summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/arm/msm/msm_thermal.txt4
-rw-r--r--Documentation/devicetree/bindings/arm/msm/qcom,osm.txt15
-rw-r--r--Documentation/devicetree/bindings/display/msm/dsi.txt16
-rw-r--r--Documentation/devicetree/bindings/display/msm/sde.txt472
-rw-r--r--Documentation/devicetree/bindings/drm/msm/sde-dsi.txt96
-rw-r--r--Documentation/devicetree/bindings/drm/msm/sde-wb.txt23
-rw-r--r--Documentation/devicetree/bindings/fb/mdss-dp.txt7
-rw-r--r--Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt1
-rw-r--r--Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen3.txt31
-rw-r--r--Documentation/devicetree/bindings/sound/qcom-audio-dev.txt24
-rw-r--r--Documentation/devicetree/bindings/sound/wcd_codec.txt9
-rw-r--r--Documentation/devicetree/bindings/usb/dwc3.txt2
-rw-r--r--arch/arm/boot/dts/qcom/apq8096-auto-dragonboard.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/apq8096-dragonboard.dtsi5
-rw-r--r--arch/arm/boot/dts/qcom/apq8998-v2.1-mediabox.dts6
-rw-r--r--arch/arm/boot/dts/qcom/dsi-panel-jdi-1080p-video.dtsi28
-rw-r--r--arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-cmd.dtsi54
-rw-r--r--arch/arm/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-video.dtsi3
-rw-r--r--arch/arm/boot/dts/qcom/dsi-panel-sharp-1080p-cmd.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/dsi-panel-sharp-dualmipi-wqxga-video.dtsi4
-rw-r--r--arch/arm/boot/dts/qcom/dsi-panel-sim-video.dtsi8
-rw-r--r--arch/arm/boot/dts/qcom/dsi-panel-toshiba-720p-video.dtsi100
-rw-r--r--arch/arm/boot/dts/qcom/fg-gen3-batterydata-ascent-2800mah.dtsi81
-rw-r--r--arch/arm/boot/dts/qcom/msm-audio.dtsi32
-rw-r--r--arch/arm/boot/dts/qcom/msm-pm660.dtsi7
-rw-r--r--arch/arm/boot/dts/qcom/msm-pmi8998.dtsi6
-rw-r--r--arch/arm/boot/dts/qcom/msm-smb138x.dtsi4
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-cdp.dtsi57
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-dtp.dtsi6
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-fluid.dtsi1
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-liquid.dtsi6
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-mdss-panels.dtsi12
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-mtp.dtsi39
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-sde-display.dtsi352
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-sde.dtsi546
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-v2.dtsi6
-rw-r--r--arch/arm/boot/dts/qcom/msm8996.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-camera.dtsi20
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-cdp.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-interposer-sdm660-cdp.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-interposer-sdm660-mtp.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-mdss-panels.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-mdss.dtsi7
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-mtp.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-qrd.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-v2-camera.dtsi14
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-v2.dtsi3
-rw-r--r--arch/arm/boot/dts/qcom/msm8998.dtsi20
-rw-r--r--arch/arm/boot/dts/qcom/sdm630-cdp.dtsi25
-rw-r--r--arch/arm/boot/dts/qcom/sdm630-mtp.dtsi25
-rw-r--r--arch/arm/boot/dts/qcom/sdm630-pm.dtsi774
-rw-r--r--arch/arm/boot/dts/qcom/sdm630-rumi.dts21
-rw-r--r--arch/arm/boot/dts/qcom/sdm630.dtsi245
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-audio.dtsi7
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-cdp.dtsi45
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-common.dtsi83
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-gpu.dtsi419
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-mdss-panels.dtsi73
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-mdss.dtsi8
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-mtp.dtsi33
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-pinctrl.dtsi76
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-pm660a-cdp.dts12
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-pm660a-mtp.dts12
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-qrd.dts22
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-qrd.dtsi57
-rw-r--r--arch/arm/boot/dts/qcom/sdm660.dtsi57
-rw-r--r--arch/arm/configs/sdm660-perf_defconfig5
-rw-r--r--arch/arm/configs/sdm660_defconfig5
-rw-r--r--arch/arm64/configs/msm-perf_defconfig8
-rw-r--r--arch/arm64/configs/msm_defconfig8
-rw-r--r--arch/arm64/configs/msmcortex-perf_defconfig4
-rw-r--r--arch/arm64/configs/msmcortex_defconfig3
-rw-r--r--arch/arm64/configs/sdm660-perf_defconfig5
-rw-r--r--arch/arm64/configs/sdm660_defconfig5
-rw-r--r--drivers/android/Kconfig12
-rw-r--r--drivers/android/binder.c1007
-rw-r--r--drivers/char/adsprpc.c302
-rw-r--r--drivers/char/adsprpc_compat.c4
-rw-r--r--drivers/char/adsprpc_shared.h1
-rw-r--r--drivers/char/diag/diagfwd_bridge.c14
-rw-r--r--drivers/clk/msm/clock-mmss-8998.c3
-rw-r--r--drivers/clk/msm/clock-osm.c16
-rw-r--r--drivers/clk/qcom/clk-cpu-osm.c9
-rw-r--r--drivers/crypto/msm/ice.c15
-rw-r--r--drivers/crypto/msm/ota_crypto.c6
-rw-r--r--drivers/crypto/msm/qce50.c5
-rw-r--r--drivers/crypto/msm/qcedev.c4
-rw-r--r--drivers/crypto/msm/qcrypto.c6
-rw-r--r--drivers/gpu/drm/Kconfig3
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c10
-rw-r--r--drivers/gpu/drm/msm/Kconfig36
-rw-r--r--drivers/gpu/drm/msm/Makefile127
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c169
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h133
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_clk_pwr.c727
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_clk_pwr.h214
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c2302
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h489
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h578
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_1_4.c1512
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_reg_1_4.h192
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_defs.h372
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_display.c2588
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_display.h336
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_display_test.c114
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_display_test.h31
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_drm.c515
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_drm.h83
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_hw.h39
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_panel.c1998
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_panel.h203
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_phy.c859
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_phy.h196
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h164
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v4_0.c858
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c9
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h10
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_bridge.c4
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c36
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c5
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_format.c8
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_kms.h4
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c483
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c747
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h219
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c61
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c24
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h3
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h60
-rw-r--r--drivers/gpu/drm/msm/msm_mmu.h21
-rw-r--r--drivers/gpu/drm/msm/msm_prop.c662
-rw-r--r--drivers/gpu/drm/msm/msm_prop.h391
-rw-r--r--drivers/gpu/drm/msm/msm_smmu.c502
-rw-r--r--drivers/gpu/drm/msm/sde/sde_backlight.c103
-rw-r--r--drivers/gpu/drm/msm/sde/sde_backlight.h18
-rw-r--r--drivers/gpu/drm/msm/sde/sde_color_processing.c990
-rw-r--r--drivers/gpu/drm/msm/sde/sde_color_processing.h95
-rw-r--r--drivers/gpu/drm/msm/sde/sde_connector.c624
-rw-r--r--drivers/gpu/drm/msm/sde/sde_connector.h298
-rw-r--r--drivers/gpu/drm/msm/sde/sde_core_irq.c460
-rw-r--r--drivers/gpu/drm/msm/sde/sde_core_irq.h138
-rw-r--r--drivers/gpu/drm/msm/sde/sde_core_perf.c610
-rw-r--r--drivers/gpu/drm/msm/sde/sde_core_perf.h124
-rw-r--r--drivers/gpu/drm/msm/sde/sde_crtc.c1693
-rw-r--r--drivers/gpu/drm/msm/sde/sde_crtc.h289
-rw-r--r--drivers/gpu/drm/msm/sde/sde_encoder.c1334
-rw-r--r--drivers/gpu/drm/msm/sde/sde_encoder.h122
-rw-r--r--drivers/gpu/drm/msm/sde/sde_encoder_phys.h406
-rw-r--r--drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c712
-rw-r--r--drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c872
-rw-r--r--drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c1096
-rw-r--r--drivers/gpu/drm/msm/sde/sde_fence.c232
-rw-r--r--drivers/gpu/drm/msm/sde/sde_fence.h177
-rw-r--r--drivers/gpu/drm/msm/sde/sde_formats.c996
-rw-r--r--drivers/gpu/drm/msm/sde/sde_formats.h107
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_catalog.c1998
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_catalog.h716
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_catalog_format.h134
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_cdm.c342
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_cdm.h127
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_color_processing.h18
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.c453
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.h78
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_ctl.c461
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_ctl.h186
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_dspp.c120
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_dspp.h183
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_hwio.h0
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_interrupts.c991
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_interrupts.h257
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_intf.c342
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_intf.h133
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_lm.c207
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_lm.h102
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_mdss.h443
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_pingpong.c168
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_pingpong.h123
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_sspp.c943
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_sspp.h467
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_top.c268
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_top.h170
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_util.c92
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_util.h55
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_vbif.c165
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_vbif.h90
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_wb.c224
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_wb.h105
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hwio.h59
-rw-r--r--drivers/gpu/drm/msm/sde/sde_irq.c166
-rw-r--r--drivers/gpu/drm/msm/sde/sde_irq.h59
-rw-r--r--drivers/gpu/drm/msm/sde/sde_kms.c1208
-rw-r--r--drivers/gpu/drm/msm/sde/sde_kms.h371
-rw-r--r--drivers/gpu/drm/msm/sde/sde_kms_utils.c153
-rw-r--r--drivers/gpu/drm/msm/sde/sde_plane.c2400
-rw-r--r--drivers/gpu/drm/msm/sde/sde_plane.h101
-rw-r--r--drivers/gpu/drm/msm/sde/sde_rm.c1262
-rw-r--r--drivers/gpu/drm/msm/sde/sde_rm.h201
-rw-r--r--drivers/gpu/drm/msm/sde/sde_trace.h195
-rw-r--r--drivers/gpu/drm/msm/sde/sde_vbif.c284
-rw-r--r--drivers/gpu/drm/msm/sde/sde_vbif.h51
-rw-r--r--drivers/gpu/drm/msm/sde/sde_wb.c745
-rw-r--r--drivers/gpu/drm/msm/sde/sde_wb.h321
-rw-r--r--drivers/gpu/drm/msm/sde_dbg.h62
-rw-r--r--drivers/gpu/drm/msm/sde_dbg_evtlog.c326
-rw-r--r--drivers/gpu/drm/msm/sde_power_handle.c924
-rw-r--r--drivers/gpu/drm/msm/sde_power_handle.h229
-rw-r--r--drivers/gpu/msm/adreno.c22
-rw-r--r--drivers/gpu/msm/adreno.h4
-rw-r--r--drivers/gpu/msm/adreno_a5xx.c1
-rw-r--r--drivers/gpu/msm/kgsl_iommu.c9
-rw-r--r--drivers/hwtracing/coresight/coresight.c5
-rw-r--r--drivers/iio/adc/qcom-rradc.c94
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c24
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c9
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_base.c15
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_base.h2
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_core.c6
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_r1_pipe.c4
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_r1_wb.c2
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c13
-rw-r--r--drivers/media/platform/msm/vidc/msm_vidc_common.c5
-rw-r--r--drivers/media/platform/msm/vidc/msm_vidc_dcvs.c5
-rw-r--r--drivers/mfd/wcd9xxx-utils.c4
-rw-r--r--drivers/misc/qcom/qdsp6v2/audio_utils_aio.c7
-rw-r--r--drivers/mmc/host/cmdq_hci.c46
-rw-r--r--drivers/mmc/host/cmdq_hci.h8
-rw-r--r--drivers/mmc/host/sdhci-msm-ice.c301
-rw-r--r--drivers/mmc/host/sdhci-msm-ice.h36
-rw-r--r--drivers/mmc/host/sdhci-msm.c33
-rw-r--r--drivers/mmc/host/sdhci-msm.h4
-rw-r--r--drivers/mmc/host/sdhci.c27
-rw-r--r--drivers/mmc/host/sdhci.h2
-rw-r--r--drivers/net/ethernet/msm/ecm_ipa.c269
-rw-r--r--drivers/net/wireless/ath/ath10k/Kconfig20
-rw-r--r--drivers/net/wireless/ath/ath10k/Makefile3
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c53
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.h14
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c207
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h9
-rw-r--r--drivers/net/wireless/ath/ath10k/hif.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c19
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.c8
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h9
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c24
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c14
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c700
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.h3
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-ops.h10
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c92
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.h24
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h4
-rw-r--r--drivers/platform/msm/gsi/gsi.c10
-rw-r--r--drivers/platform/msm/gsi/gsi_reg.h4
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa.c14
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_i.h2
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_utils.c189
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c17
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h12
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c17
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.h9
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h6
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c8
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h4
-rw-r--r--drivers/power/power_supply_sysfs.c6
-rw-r--r--drivers/power/reset/msm-poweroff.c3
-rw-r--r--drivers/power/supply/qcom/Makefile6
-rw-r--r--drivers/power/supply/qcom/battery.c756
-rw-r--r--drivers/power/supply/qcom/fg-core.h10
-rw-r--r--drivers/power/supply/qcom/fg-memif.c74
-rw-r--r--drivers/power/supply/qcom/fg-util.c24
-rw-r--r--drivers/power/supply/qcom/qpnp-fg-gen3.c423
-rw-r--r--drivers/power/supply/qcom/qpnp-qnovo.c1
-rw-r--r--drivers/power/supply/qcom/qpnp-smb2.c146
-rw-r--r--drivers/power/supply/qcom/smb-lib.c332
-rw-r--r--drivers/power/supply/qcom/smb-lib.h12
-rw-r--r--drivers/power/supply/qcom/smb1351-charger.c13
-rw-r--r--drivers/power/supply/qcom/smb135x-charger.c6
-rw-r--r--drivers/power/supply/qcom/smb138x-charger.c12
-rw-r--r--drivers/soc/qcom/Kconfig10
-rw-r--r--drivers/soc/qcom/glink_smem_native_xprt.c9
-rw-r--r--drivers/soc/qcom/glink_ssr.c3
-rw-r--r--drivers/soc/qcom/icnss.c35
-rw-r--r--drivers/soc/qcom/kryo-l2-accessors.c6
-rw-r--r--drivers/soc/qcom/peripheral-loader.c8
-rw-r--r--drivers/soc/qcom/qdsp6v2/apr_tal_glink.c112
-rw-r--r--drivers/spmi/spmi-pmic-arb.c24
-rw-r--r--drivers/thermal/msm-tsens.c2
-rw-r--r--drivers/thermal/msm_lmh_dcvs.c21
-rw-r--r--drivers/thermal/msm_thermal.c199
-rw-r--r--drivers/usb/dwc3/core.c15
-rw-r--r--drivers/usb/dwc3/core.h8
-rw-r--r--drivers/usb/dwc3/debugfs.c5
-rw-r--r--drivers/usb/dwc3/dwc3-msm.c38
-rw-r--r--drivers/usb/dwc3/ep0.c14
-rw-r--r--drivers/usb/dwc3/gadget.c7
-rw-r--r--drivers/usb/dwc3/gadget.h8
-rw-r--r--drivers/usb/dwc3/host.c6
-rw-r--r--drivers/usb/gadget/Kconfig11
-rw-r--r--drivers/usb/gadget/function/Makefile2
-rw-r--r--drivers/usb/gadget/function/f_ccid.c1105
-rw-r--r--drivers/usb/gadget/function/f_ccid.h83
-rw-r--r--drivers/usb/host/xhci-plat.c14
-rw-r--r--drivers/usb/pd/policy_engine.c292
-rw-r--r--drivers/usb/pd/qpnp-pdphy.c17
-rw-r--r--drivers/usb/pd/usbpd.h26
-rw-r--r--drivers/video/fbdev/msm/mdp3_ctrl.c8
-rw-r--r--drivers/video/fbdev/msm/mdss_dp.c133
-rw-r--r--drivers/video/fbdev/msm/mdss_dp.h26
-rw-r--r--drivers/video/fbdev/msm/mdss_dp_util.c45
-rw-r--r--drivers/video/fbdev/msm/mdss_dp_util.h17
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp.c1
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_ctl.c15
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c51
-rw-r--r--fs/ext4/crypto.c4
-rw-r--r--fs/jbd2/journal.c2
-rw-r--r--include/drm/drm_crtc.h2
-rw-r--r--include/linux/mdss_io_util.h3
-rw-r--r--include/linux/msm_gsi.h21
-rw-r--r--include/linux/power_supply.h17
-rw-r--r--include/linux/qdsp6v2/apr_tal.h3
-rw-r--r--include/linux/sched.h2
-rw-r--r--include/linux/usb/ccid_desc.h112
-rw-r--r--include/linux/usb/xhci_pdriver.h4
-rw-r--r--include/soc/qcom/icnss.h2
-rw-r--r--include/sound/apr_audio-v2.h10
-rw-r--r--include/sound/cpe_core.h4
-rw-r--r--include/sound/q6adm-v2.h4
-rw-r--r--include/sound/q6lsm.h71
-rw-r--r--include/trace/events/sched.h97
-rw-r--r--include/uapi/drm/Kbuild2
-rw-r--r--include/uapi/drm/drm_fourcc.h8
-rw-r--r--include/uapi/drm/drm_mode.h2
-rw-r--r--include/uapi/drm/msm_drm.h54
-rw-r--r--include/uapi/drm/msm_drm_pp.h82
-rw-r--r--include/uapi/drm/sde_drm.h298
-rw-r--r--include/uapi/linux/android/binder.h104
-rw-r--r--include/uapi/media/msm_vidc.h2
-rw-r--r--include/uapi/sound/lsm_params.h51
-rw-r--r--kernel/sched/boost.c9
-rw-r--r--kernel/sched/core.c11
-rw-r--r--kernel/sched/fair.c29
-rw-r--r--kernel/sched/hmp.c320
-rw-r--r--kernel/sched/rt.c20
-rw-r--r--kernel/sched/sched.h31
-rw-r--r--net/rmnet_data/rmnet_data_config.c7
-rw-r--r--net/rmnet_data/rmnet_data_config.h4
-rw-r--r--net/rmnet_data/rmnet_data_handlers.c75
-rw-r--r--net/wireless/util.c2
-rw-r--r--sound/core/pcm_lib.c3
-rw-r--r--sound/soc/codecs/msm_sdw/msm-sdw-tables.c3
-rw-r--r--sound/soc/codecs/msm_sdw/msm_sdw.h4
-rw-r--r--sound/soc/codecs/msm_sdw/msm_sdw_cdc.c280
-rw-r--r--sound/soc/codecs/wcd-mbhc-v2.c8
-rw-r--r--sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.c8
-rw-r--r--sound/soc/codecs/wcd934x/wcd934x-routing.h16
-rw-r--r--sound/soc/codecs/wcd934x/wcd934x.c284
-rw-r--r--sound/soc/codecs/wcd9xxx-resmgr-v2.c3
-rw-r--r--sound/soc/codecs/wcd_cpe_core.c26
-rw-r--r--sound/soc/codecs/wsa881x-regmap.c5
-rw-r--r--sound/soc/codecs/wsa881x.c6
-rw-r--r--sound/soc/codecs/wsa881x.h3
-rw-r--r--sound/soc/msm/msm-cpe-lsm.c252
-rw-r--r--sound/soc/msm/msm-dai-fe.c82
-rw-r--r--sound/soc/msm/msm8998.c4
-rw-r--r--sound/soc/msm/qdsp6v2/audio_cal_utils.c4
-rw-r--r--sound/soc/msm/qdsp6v2/msm-dolby-dap-config.c4
-rw-r--r--sound/soc/msm/qdsp6v2/msm-lsm-client.c523
-rw-r--r--sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c1419
-rw-r--r--sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h9
-rw-r--r--sound/soc/msm/qdsp6v2/q6adm.c63
-rw-r--r--sound/soc/msm/qdsp6v2/q6asm.c30
-rw-r--r--sound/soc/msm/qdsp6v2/q6lsm.c293
-rw-r--r--sound/soc/msm/qdsp6v2/q6voice.c6
-rw-r--r--sound/soc/msm/sdm660-common.c128
-rw-r--r--sound/soc/msm/sdm660-common.h1
-rw-r--r--sound/soc/msm/sdm660-internal.c30
376 files changed, 62656 insertions, 3431 deletions
diff --git a/Documentation/devicetree/bindings/arm/msm/msm_thermal.txt b/Documentation/devicetree/bindings/arm/msm/msm_thermal.txt
index c1e2d3b2ba11..c2bc68a57bc7 100644
--- a/Documentation/devicetree/bindings/arm/msm/msm_thermal.txt
+++ b/Documentation/devicetree/bindings/arm/msm/msm_thermal.txt
@@ -105,7 +105,9 @@ Optional properties
- clock-names: The list of clocks needed for thermal module. Must be
- "osm" for LMH DCVS
- clocks: The phandle to the clocks.
-
+- qcom,cxip-lm-enable: If this optional property is defined with a non zero value,
+ it enables CXIP_LM hardware feature. If value is zero,
+ it disables CXIP_LM hardware feature.
Optional child nodes
- qcom,pmic-opt-curr-temp: Threshold temperature for requesting optimum current (request
dual phase) for rails with PMIC, in degC. If this property exists,
diff --git a/Documentation/devicetree/bindings/arm/msm/qcom,osm.txt b/Documentation/devicetree/bindings/arm/msm/qcom,osm.txt
index be8f27d87738..38c9fe749abb 100644
--- a/Documentation/devicetree/bindings/arm/msm/qcom,osm.txt
+++ b/Documentation/devicetree/bindings/arm/msm/qcom,osm.txt
@@ -10,8 +10,9 @@ Properties:
Usage: required
Value type: <string>
Definition: must be "qcom,cpu-clock-osm-msm8998-v1",
- "qcom,cpu-clock-osm-msm8998-v2" or
- "qcom,clk-cpu-osm".
+ "qcom,cpu-clock-osm-msm8998-v2",
+ "qcom,clk-cpu-osm" or
+ "qcom,clk-cpu-osm-sdm630".
- reg
Usage: required
@@ -281,6 +282,16 @@ Properties:
performance mode with a total of 4 tuples corresponding
to each supported performance mode.
+- qcom,pwrcl-apcs-mem-acc-threshold-voltage
+ Usage: optional
+ Value type: <u32>
+ Definition: Specifies the highest MEM ACC threshold voltage in
+ microvolts for the Power cluster. This voltage is
+ used to determine which MEM ACC setting is used for the
+ highest frequencies. If specified, the voltage must match
+ the MEM ACC threshold voltage specified for the
+ corresponding CPRh device.
+
- qcom,perfcl-apcs-mem-acc-val
Usage: required if qcom,osm-no-tz is specified
Value type: <prop-encoded-array>
diff --git a/Documentation/devicetree/bindings/display/msm/dsi.txt b/Documentation/devicetree/bindings/display/msm/dsi.txt
index f344b9e49198..ae2278fb3d1c 100644
--- a/Documentation/devicetree/bindings/display/msm/dsi.txt
+++ b/Documentation/devicetree/bindings/display/msm/dsi.txt
@@ -69,6 +69,20 @@ Required properties:
Optional properties:
- qcom,dsi-phy-regulator-ldo-mode: Boolean value indicating if the LDO mode PHY
regulator is wanted.
+- qcom,mdss-mdp-transfer-time-us: Specifies the dsi transfer time for command mode
+ panels in microseconds. Driver uses this number to adjust
+ the clock rate according to the expected transfer time.
+ Increasing this value would slow down the mdp processing
+ and can result in slower performance.
+ Decreasing this value can speed up the mdp processing,
+ but this can also impact power consumption.
+ As a rule this time should not be higher than the time
+ that would be expected with the processing at the
+ dsi link rate since anyways this would be the maximum
+ transfer time that could be achieved.
+ If ping pong split is enabled, this time should not be higher
+ than two times the dsi link rate time.
+ If the property is not specified, then the default value is 14000 us.
Example:
mdss_dsi0: qcom,mdss_dsi@fd922800 {
@@ -105,6 +119,8 @@ Example:
qcom,master-dsi;
qcom,sync-dual-dsi;
+ qcom,mdss-mdp-transfer-time-us = <12000>;
+
pinctrl-names = "default", "sleep";
pinctrl-0 = <&mdss_dsi_active>;
pinctrl-1 = <&mdss_dsi_suspend>;
diff --git a/Documentation/devicetree/bindings/display/msm/sde.txt b/Documentation/devicetree/bindings/display/msm/sde.txt
new file mode 100644
index 000000000000..c9e7d7423d7f
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/msm/sde.txt
@@ -0,0 +1,472 @@
+Qualcomm Technologies, Inc. SDE KMS
+
+Snapdragon Display Engine implements Linux DRM/KMS APIs to drive user
+interface to different panel interfaces. SDE driver is the core of
+display subsystem which manage all data paths to different panel interfaces.
+
+Required properties
+- compatible: Must be "qcom,sde-kms"
+- reg: Offset and length of the register set for the device.
+- reg-names : Names to refer to register sets related to this device
+- clocks: List of Phandles for clock device nodes
+ needed by the device.
+- clock-names: List of clock names needed by the device.
+- mmagic-supply: Phandle for mmagic mdss supply regulator device node.
+- vdd-supply: Phandle for vdd regulator device node.
+- interrupt-parent: Must be core interrupt controller.
+- interrupts: Interrupt associated with MDSS.
+- interrupt-controller: Mark the device node as an interrupt controller.
+- #interrupt-cells: Should be one. The first cell is interrupt number.
+- iommus: Specifies the SID's used by this context bank.
+- qcom,sde-sspp-type: Array of strings for SDE source surface pipes type information.
+ A source pipe can be "vig", "rgb", "dma" or "cursor" type.
+ Number of xin ids defined should match the number of offsets
+ defined in property: qcom,sde-sspp-off.
+- qcom,sde-sspp-off: Array of offset for SDE source surface pipes. The offsets
+ are calculated from register "mdp_phys" defined in
+ reg property + "sde-off". The number of offsets defined here should
+ reflect the amount of pipes that can be active in SDE for
+ this configuration.
+- qcom,sde-sspp-xin-id: Array of VBIF clients ids (xins) corresponding
+ to the respective source pipes. Number of xin ids
+ defined should match the number of offsets
+ defined in property: qcom,sde-sspp-off.
+- qcom,sde-ctl-off: Array of offset addresses for the available ctl
+ hw blocks within SDE, these offsets are
+ calculated from register "mdp_phys" defined in
+ reg property. The number of ctl offsets defined
+ here should reflect the number of control paths
+ that can be configured concurrently on SDE for
+ this configuration.
+- qcom,sde-wb-off: Array of offset addresses for the programmable
+ writeback blocks within SDE.
+- qcom,sde-wb-xin-id: Array of VBIF clients ids (xins) corresponding
+ to the respective writeback. Number of xin ids
+ defined should match the number of offsets
+ defined in property: qcom,sde-wb-off.
+- qcom,sde-mixer-off: Array of offset addresses for the available
+ mixer blocks that can drive data to panel
+ interfaces. These offsets are be calculated from
+ register "mdp_phys" defined in reg property.
+ The number of offsets defined should reflect the
+ amount of mixers that can drive data to a panel
+ interface.
+- qcom,sde-dspp-off: Array of offset addresses for the available dspp
+ blocks. These offsets are calculated from
+ register "mdp_phys" defined in reg property.
+- qcom,sde-pp-off: Array of offset addresses for the available
+ pingpong blocks. These offsets are calculated
+ from register "mdp_phys" defined in reg property.
+- qcom,sde-pp-slave: Array of flags indicating whether each ping pong
+ block may be configured as a pp slave.
+- qcom,sde-intf-off: Array of offset addresses for the available SDE
+ interface blocks that can drive data to a
+ panel controller. The offsets are calculated
+ from "mdp_phys" defined in reg property. The number
+ of offsets defined should reflect the number of
+ programmable interface blocks available in hardware.
+
+Optional properties:
+- clock-rate: List of clock rates in Hz.
+- clock-max-rate: List of maximum clock rate in Hz that this device supports.
+- qcom,platform-supply-entries: A node that lists the elements of the supply. There
+ can be more than one instance of this binding,
+ in which case the entry would be appended with
+ the supply entry index.
+ e.g. qcom,platform-supply-entry@0
+ -- reg: offset and length of the register set for the device.
+ -- qcom,supply-name: name of the supply (vdd/vdda/vddio)
+ -- qcom,supply-min-voltage: minimum voltage level (uV)
+ -- qcom,supply-max-voltage: maximum voltage level (uV)
+ -- qcom,supply-enable-load: load drawn (uA) from enabled supply
+ -- qcom,supply-disable-load: load drawn (uA) from disabled supply
+ -- qcom,supply-pre-on-sleep: time to sleep (ms) before turning on
+ -- qcom,supply-post-on-sleep: time to sleep (ms) after turning on
+ -- qcom,supply-pre-off-sleep: time to sleep (ms) before turning off
+ -- qcom,supply-post-off-sleep: time to sleep (ms) after turning off
+- qcom,sde-sspp-src-size: A u32 value indicates the address range for each sspp.
+- qcom,sde-mixer-size: A u32 value indicates the address range for each mixer.
+- qcom,sde-ctl-size: A u32 value indicates the address range for each ctl.
+- qcom,sde-dspp-size: A u32 value indicates the address range for each dspp.
+- qcom,sde-intf-size: A u32 value indicates the address range for each intf.
+- qcom,sde-dsc-size: A u32 value indicates the address range for each dsc.
+- qcom,sde-cdm-size: A u32 value indicates the address range for each cdm.
+- qcom,sde-pp-size: A u32 value indicates the address range for each pingpong.
+- qcom,sde-wb-size: A u32 value indicates the address range for each writeback.
+- qcom,sde-len: A u32 entry for SDE address range.
+- qcom,sde-intf-max-prefetch-lines: Array of u32 values for max prefetch lines on
+ each interface.
+- qcom,sde-sspp-linewidth: A u32 value indicates the max sspp line width.
+- qcom,sde-mixer-linewidth: A u32 value indicates the max mixer line width.
+- qcom,sde-wb-linewidth: A u32 value indicates the max writeback line width.
+- qcom,sde-sspp-scale-size: A u32 value indicates the scaling block size on sspp.
+- qcom,sde-mixer-blendstages: A u32 value indicates the max mixer blend stages for
+ alpha blending.
+- qcom,sde-qseed-type: A string entry indicates qseed support on sspp and wb.
+ It supports "qssedv3" and "qseedv2" entries for qseed
+ type. By default "qseedv2" is used if this optional property
+ is not defined.
+- qcom,sde-csc-type: A string entry indicates csc support on sspp and wb.
+ It supports "csc" and "csc-10bit" entries for csc
+ type.
+- qcom,sde-highest-bank-bit: A u32 property to indicate GPU/Camera/Video highest memory
+ bank bit used for tile format buffers.
+- qcom,sde-panic-per-pipe: Boolean property to indicate if panic signal
+ control feature is available on each source pipe.
+- qcom,sde-has-src-split: Boolean property to indicate if source split
+ feature is available or not.
+- qcom,sde-has-mixer-gc: Boolean property to indicate if mixer has gamma correction
+ feature available or not.
+- qcom,sde-has-cdp: Boolean property to indicate if cdp feature is
+ available or not.
+- qcom,sde-sspp-clk-ctrl: Array of offsets describing clk control
+ offsets for dynamic clock gating. 1st value
+ in the array represents offset of the control
+ register. 2nd value represents bit offset within
+ control register. Number of offsets defined should
+ match the number of offsets defined in
+ property: qcom,sde-sspp-off
+- qcom,sde-sspp-clk-status: Array of offsets describing clk status
+ offsets for dynamic clock gating. 1st value
+ in the array represents offset of the status
+ register. 2nd value represents bit offset within
+ control register. Number of offsets defined should
+ match the number of offsets defined in
+ property: qcom,sde-sspp-off.
+- qcom,sde-sspp-danger-lut: A 3 cell property, with a format of <linear, tile, nrt>,
+ indicating the danger luts on sspp.
+- qcom,sde-sspp-safe-lut: A 3 cell property, with a format of <linear, tile, nrt>,
+ indicating the safe luts on sspp.
+- qcom,sde-sspp-max-rects: Array of u32 values indicating maximum rectangles supported
+ on each sspp. This property is for multirect feature support.
+ Number of offsets defined should match the number of
+ offsets defined in property: qcom,sde-sspp-off.
+- qcom,sde-intf-type: Array of string provides the interface type information.
+ Possible string values
+ "dsi" - dsi display interface
+ "dp" - Display Port interface
+ "hdmi" - HDMI display interface
+ An interface is considered as "none" if interface type
+ is not defined.
+- qcom,sde-off: SDE offset from "mdp_phys" defined in reg property.
+- qcom,sde-cdm-off: Array of offset addresses for the available
+ cdm blocks. These offsets will be calculated from
+ register "mdp_phys" defined in reg property.
+- qcom,sde-vbif-off: Array of offset addresses for the available
+ vbif blocks. These offsets will be calculated from
+ register "vbif_phys" defined in reg property.
+- qcom,sde-vbif-size: A u32 value indicates the vbif block address range.
+- qcom,sde-te-off: A u32 offset indicates the te block offset on pingpong.
+ This offset is 0x0 by default.
+- qcom,sde-te2-off: A u32 offset indicates the te2 block offset on pingpong.
+- qcom,sde-te-size: A u32 value indicates the te block address range.
+- qcom,sde-te2-size: A u32 value indicates the te2 block address range.
+- qcom,sde-dsc-off: A u32 offset indicates the dsc block offset on pingpong.
+- qcom,sde-sspp-vig-blocks: A node that lists the blocks inside the VIG hardware. The
+ block entries will contain the offset and version (if needed)
+ of each feature block. The presence of a block entry
+ indicates that the SSPP VIG contains that feature hardware.
+ e.g. qcom,sde-sspp-vig-blocks
+ -- qcom,sde-vig-csc-off: offset of CSC hardware
+ -- qcom,sde-vig-qseed-off: offset of QSEED hardware
+ -- qcom,sde-vig-pcc: offset and version of PCC hardware
+ -- qcom,sde-vig-hsic: offset and version of global PA adjustment
+ -- qcom,sde-vig-memcolor: offset and version of PA memcolor hardware
+- qcom,sde-sspp-rgb-blocks: A node that lists the blocks inside the RGB hardware. The
+ block entries will contain the offset and version (if needed)
+ of each feature block. The presence of a block entry
+ indicates that the SSPP RGB contains that feature hardware.
+ e.g. qcom,sde-sspp-vig-blocks
+ -- qcom,sde-rgb-scaler-off: offset of RGB scaler hardware
+ -- qcom,sde-rgb-pcc: offset and version of PCC hardware
+- qcom,sde-dspp-blocks: A node that lists the blocks inside the DSPP hardware. The
+ block entries will contain the offset and version of each
+ feature block. The presence of a block entry indicates that
+ the DSPP contains that feature hardware.
+ e.g. qcom,sde-dspp-blocks
+ -- qcom,sde-dspp-pcc: offset and version of PCC hardware
+ -- qcom,sde-dspp-gc: offset and version of GC hardware
+ -- qcom,sde-dspp-hsic: offset and version of global PA adjustment
+ -- qcom,sde-dspp-memcolor: offset and version of PA memcolor hardware
+ -- qcom,sde-dspp-sixzone: offset and version of PA sixzone hardware
+ -- qcom,sde-dspp-gamut: offset and version of Gamut mapping hardware
+ -- qcom,sde-dspp-dither: offset and version of dither hardware
+ -- qcom,sde-dspp-hist: offset and version of histogram hardware
+ -- qcom,sde-dspp-vlut: offset and version of PA vLUT hardware
+- qcom,sde-mixer-blocks: A node that lists the blocks inside the layer mixer hardware. The
+ block entries will contain the offset and version (if needed)
+ of each feature block. The presence of a block entry
+ indicates that the layer mixer contains that feature hardware.
+ e.g. qcom,sde-mixer-blocks
+ -- qcom,sde-mixer-gc: offset and version of mixer GC hardware
+- qcom,sde-dspp-ad-off: Array of u32 offsets indicate the ad block offset from the
+ DSPP offset. Since AD hardware is represented as part of
+ DSPP block, the AD offsets must be offset from the
+ corresponding DSPP base.
+- qcom,sde-dspp-ad-version A u32 value indicating the version of the AD hardware
+- qcom,sde-vbif-id: Array of vbif ids corresponding to the
+ offsets defined in property: qcom,sde-vbif-off.
+- qcom,sde-vbif-default-ot-rd-limit: A u32 value indicates the default read OT limit
+- qcom,sde-vbif-default-ot-wr-limit: A u32 value indicates the default write OT limit
+- qcom,sde-vbif-dynamic-ot-rd-limit: A series of 2 cell property, with a format
+ of (pps, OT limit), where pps is pixel per second and
+ OT limit is the read limit to apply if the given
+ pps is not exceeded.
+- qcom,sde-vbif-dynamic-ot-wr-limit: A series of 2 cell property, with a format
+ of (pps, OT limit), where pps is pixel per second and
+ OT limit is the write limit to apply if the given
+ pps is not exceeded.
+- qcom,sde-wb-id: Array of writeback ids corresponding to the
+ offsets defined in property: qcom,sde-wb-off.
+- qcom,sde-wb-clk-ctrl: Array of 2 cell property describing clk control
+ offsets for dynamic clock gating. 1st value
+ in the array represents offset of the control
+ register. 2nd value represents bit offset within
+ control register. Number of offsets defined should
+ match the number of offsets defined in
+ property: qcom,sde-wb-off
+- qcom,sde-dram-channels: This represents the number of channels in the
+ Bus memory controller.
+- qcom,sde-num-nrt-paths: Integer property represents the number of non-realtime
+ paths in each Bus Scaling Usecase. This value depends on
+ number of AXI ports that are dedicated to non-realtime VBIF
+ for particular chipset.
+ These paths must be defined after rt-paths in
+ "qcom,msm-bus,vectors-KBps" vector request.
+- qcom,sde-max-bw-low-kbps: This value indicates the max bandwidth in Kbps
+ that can be supported without underflow.
+ This is a low bandwidth threshold which should
+ be applied in most scenarios to be safe from
+ underflows when unable to satisfy bandwidth
+ requirements.
+- qcom,sde-max-bw-high-kbps: This value indicates the max bandwidth in Kbps
+ that can be supported without underflow.
+ This is a high bandwidth threshold which can be
+ applied in scenarios where panel interface can
+ be more tolerant to memory latency such as
+ command mode panels.
+
+Bus Scaling Subnodes:
+- qcom,sde-reg-bus: Property to provide Bus scaling for register access for
+ mdss blocks.
+- qcom,sde-data-bus: Property to provide Bus scaling for data bus access for
+ mdss blocks.
+
+Bus Scaling Data:
+- qcom,msm-bus,name: String property describing client name.
+- qcom,msm-bus,num-cases: This is the number of Bus Scaling use cases
+ defined in the vectors property.
+- qcom,msm-bus,num-paths: This represents the number of paths in each
+ Bus Scaling Usecase.
+- qcom,msm-bus,vectors-KBps: * A series of 4 cell properties, with a format
+ of (src, dst, ab, ib) which is defined at
+ Documentation/devicetree/bindings/arm/msm/msm_bus.txt
+ * Current values of src & dst are defined at
+ include/linux/msm-bus-board.h
+
+
+Please refer to ../../interrupt-controller/interrupts.txt for a general
+description of interrupt bindings.
+
+Example:
+ mdss_mdp: qcom,mdss_mdp@900000 {
+ compatible = "qcom,sde-kms";
+ reg = <0x00900000 0x90000>,
+ <0x009b0000 0x1040>,
+ <0x009b8000 0x1040>;
+ reg-names = "mdp_phys",
+ "vbif_phys",
+ "vbif_nrt_phys";
+ clocks = <&clock_mmss clk_mdss_ahb_clk>,
+ <&clock_mmss clk_mdss_axi_clk>,
+ <&clock_mmss clk_mdp_clk_src>,
+ <&clock_mmss clk_mdss_mdp_vote_clk>,
+ <&clock_mmss clk_smmu_mdp_axi_clk>,
+ <&clock_mmss clk_mmagic_mdss_axi_clk>,
+ <&clock_mmss clk_mdss_vsync_clk>;
+ clock-names = "iface_clk",
+ "bus_clk",
+ "core_clk_src",
+ "core_clk",
+ "iommu_clk",
+ "mmagic_clk",
+ "vsync_clk";
+ clock-rate = <0>, <0>, <0>;
+ clock-max-rate= <0 320000000 0>;
+ mmagic-supply = <&gdsc_mmagic_mdss>;
+ vdd-supply = <&gdsc_mdss>;
+ interrupt-parent = <&intc>;
+ interrupts = <0 83 0>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ iommus = <&mdp_smmu 0>;
+
+ qcom,sde-off = <0x1000>;
+ qcom,sde-ctl-off = <0x00002000 0x00002200 0x00002400
+ 0x00002600 0x00002800>;
+ qcom,sde-mixer-off = <0x00045000 0x00046000
+ 0x00047000 0x0004a000>;
+ qcom,sde-dspp-off = <0x00055000 0x00057000>;
+ qcom,sde-dspp-ad-off = <0x24000 0x22800>;
+ qcom,sde-dspp-ad-version = <0x00030000>;
+ qcom,sde-wb-off = <0x00066000>;
+ qcom,sde-wb-xin-id = <6>;
+ qcom,sde-intf-off = <0x0006b000 0x0006b800
+ 0x0006c000 0x0006c800>;
+ qcom,sde-intf-type = "none", "dsi", "dsi", "hdmi";
+ qcom,sde-pp-off = <0x00071000 0x00071800
+ 0x00072000 0x00072800>;
+ qcom,sde-pp-slave = <0x0 0x0 0x0 0x0>;
+ qcom,sde-cdm-off = <0x0007a200>;
+ qcom,sde-dsc-off = <0x00081000 0x00081400>;
+ qcom,sde-intf-max-prefetch-lines = <0x15 0x15 0x15 0x15>;
+
+ qcom,sde-sspp-type = "vig", "vig", "vig",
+ "vig", "rgb", "rgb",
+ "rgb", "rgb", "dma",
+ "dma", "cursor", "cursor";
+
+ qcom,sde-sspp-off = <0x00005000 0x00007000 0x00009000
+ 0x0000b000 0x00015000 0x00017000
+ 0x00019000 0x0001b000 0x00025000
+ 0x00027000 0x00035000 0x00037000>;
+
+ qcom,sde-sspp-xin-id = <0 4 8
+ 12 1 5
+ 9 13 2
+ 10 7 7>;
+
+ /* offsets are relative to "mdp_phys + qcom,sde-off */
+ qcom,sde-sspp-clk-ctrl = <0x2ac 0>, <0x2b4 0>, <0x2bc 0>,
+ <0x2c4 0>, <0x2ac 4>, <0x2b4 4>, <0x2bc 4>,
+ <0x2c4 4>, <0x2ac 8>, <0x2b4 8>, <0x3a8 16>,
+ <0x3b0 16>;
+ qcom,sde-sspp-clk-status = <0x2ac 0>, <0x2b4 0>, <0x2bc 0>,
+ <0x2c4 0>, <0x2ac 4>, <0x2b4 4>, <0x2bc 4>,
+ <0x2c4 4>, <0x2ac 8>, <0x2b4 8>, <0x3a8 16>,
+ <0x3b0 16>;
+ qcom,sde-mixer-linewidth = <2560>;
+ qcom,sde-sspp-linewidth = <2560>;
+ qcom,sde-mixer-blendstages = <0x7>;
+ qcom,sde-highest-bank-bit = <0x2>;
+ qcom,sde-panic-per-pipe;
+ qcom,sde-has-cdp;
+ qcom,sde-has-src-split;
+ qcom,sde-sspp-src-size = <0x100>;
+ qcom,sde-mixer-size = <0x100>;
+ qcom,sde-ctl-size = <0x100>;
+ qcom,sde-dspp-size = <0x100>;
+ qcom,sde-intf-size = <0x100>;
+ qcom,sde-dsc-size = <0x100>;
+ qcom,sde-cdm-size = <0x100>;
+ qcom,sde-pp-size = <0x100>;
+ qcom,sde-wb-size = <0x100>;
+ qcom,sde-len = <0x100>;
+ qcom,sde-wb-linewidth = <2560>;
+ qcom,sde-sspp-scale-size = <0x100>;
+ qcom,sde-mixer-blendstages = <0x8>;
+ qcom,sde-qseed-type = "qseedv2";
+ qcom,sde-highest-bank-bit = <15>;
+ qcom,sde-has-mixer-gc;
+ qcom,sde-sspp-max-rects = <1 1 1 1
+ 1 1 1 1
+ 1 1
+ 1 1>;
+ qcom,sde-te-off = <0x100>;
+ qcom,sde-te2-off = <0x100>;
+ qcom,sde-te-size = <0xffff>;
+ qcom,sde-te2-size = <0xffff>;
+
+ qcom,sde-wb-id = <2>;
+ qcom,sde-wb-clk-ctrl = <0x2bc 16>;
+
+ qcom,sde-sspp-danger-lut = <0x000f 0xffff 0x0000>;
+ qcom,sde-sspp-safe-lut = <0xfffc 0xff00 0xffff>;
+
+ qcom,sde-vbif-off = <0 0>;
+ qcom,sde-vbif-id = <0 1>;
+ qcom,sde-vbif-default-ot-rd-limit = <32>;
+ qcom,sde-vbif-default-ot-wr-limit = <16>;
+ qcom,sde-vbif-dynamic-ot-rd-limit = <62208000 2>,
+ <124416000 4>, <248832000 16>;
+ qcom,sde-vbif-dynamic-ot-wr-limit = <62208000 2>,
+ <124416000 4>, <248832000 16>;
+
+ qcom,sde-dram-channels = <2>;
+ qcom,sde-num-nrt-paths = <1>;
+
+ qcom,sde-max-bw-high-kbps = <9000000>;
+ qcom,sde-max-bw-low-kbps = <9000000>;
+
+ qcom,sde-sspp-vig-blocks {
+ qcom,sde-vig-csc-off = <0x320>;
+ qcom,sde-vig-qseed-off = <0x200>;
+ /* Offset from vig top, version of HSIC */
+ qcom,sde-vig-hsic = <0x200 0x00010000>;
+ qcom,sde-vig-memcolor = <0x200 0x00010000>;
+ qcom,sde-vig-pcc = <0x1780 0x00010000>;
+ };
+
+ qcom,sde-sspp-rgb-blocks {
+ qcom,sde-rgb-scaler-off = <0x200>;
+ qcom,sde-rgb-pcc = <0x380 0x00010000>;
+ };
+
+ qcom,sde-dspp-blocks {
+ qcom,sde-dspp-pcc = <0x1700 0x00010000>;
+ qcom,sde-dspp-gc = <0x17c0 0x00010000>;
+ qcom,sde-dspp-hsic = <0x0 0x00010000>;
+ qcom,sde-dspp-memcolor = <0x0 0x00010000>;
+ qcom,sde-dspp-sixzone = <0x0 0x00010000>;
+ qcom,sde-dspp-gamut = <0x1600 0x00010000>;
+ qcom,sde-dspp-dither = <0x0 0x00010000>;
+ qcom,sde-dspp-hist = <0x0 0x00010000>;
+ qcom,sde-dspp-vlut = <0x0 0x00010000>;
+ };
+
+ qcom,sde-mixer-blocks {
+ qcom,sde-mixer-gc = <0x3c0 0x00010000>;
+ };
+
+ qcom,platform-supply-entries {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ qcom,platform-supply-entry@0 {
+ reg = <0>;
+ qcom,supply-name = "vdd";
+ qcom,supply-min-voltage = <0>;
+ qcom,supply-max-voltage = <0>;
+ qcom,supply-enable-load = <0>;
+ qcom,supply-disable-load = <0>;
+ qcom,supply-pre-on-sleep = <0>;
+ qcom,supply-post-on-sleep = <0>;
+ qcom,supply-pre-off-sleep = <0>;
+ qcom,supply-post-off-sleep = <0>;
+ };
+ };
+
+ qcom,sde-data-bus {
+ qcom,msm-bus,name = "mdss_sde";
+ qcom,msm-bus,num-cases = <3>;
+ qcom,msm-bus,num-paths = <3>;
+ qcom,msm-bus,vectors-KBps =
+ <22 512 0 0>, <23 512 0 0>, <25 512 0 0>,
+ <22 512 0 6400000>, <23 512 0 6400000>,
+ <25 512 0 6400000>,
+ <22 512 0 6400000>, <23 512 0 6400000>,
+ <25 512 0 6400000>;
+ };
+
+ qcom,sde-reg-bus {
+ /* Reg Bus Scale Settings */
+ qcom,msm-bus,name = "mdss_reg";
+ qcom,msm-bus,num-cases = <4>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,active-only;
+ qcom,msm-bus,vectors-KBps =
+ <1 590 0 0>,
+ <1 590 0 76800>,
+ <1 590 0 160000>,
+ <1 590 0 320000>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/drm/msm/sde-dsi.txt b/Documentation/devicetree/bindings/drm/msm/sde-dsi.txt
new file mode 100644
index 000000000000..48a2c6c78297
--- /dev/null
+++ b/Documentation/devicetree/bindings/drm/msm/sde-dsi.txt
@@ -0,0 +1,96 @@
+Qualcomm Technologies, Inc.
+
+mdss-dsi is the master DSI device which supports multiple DSI host controllers
+that are compatible with MIPI display serial interface specification.
+
+DSI Controller:
+Required properties:
+- compatible: Should be "qcom,dsi-ctrl-hw-v<version>". Supported
+ versions include 1.4 and 2.0.
+ eg: qcom,dsi-ctrl-hw-v1.4, qcom,dsi-ctrl-hw-v2.0
+ And for dsi phy driver:
+ qcom,dsi-phy-v1.0, qcom,dsi-phy-v2.0, qcom,dsi-phy-v3.0,
+ qcom,dsi-phy-v4.0
+- reg: Base address and length of DSI controller's memory
+ mapped regions.
+- reg-names: A list of strings that name the list of regs.
+ "dsi_ctrl" - DSI controller memory region.
+ "mmss_misc" - MMSS misc memory region.
+- cell-index: Specifies the controller instance.
+- clocks: Clocks required for DSI controller operation.
+- clock-names: Names of the clocks corresponding to handles. Following
+ clocks are required:
+ "mdp_core_clk"
+ "iface_clk"
+ "core_mmss_clk"
+ "bus_clk"
+ "byte_clk"
+ "pixel_clk"
+ "core_clk"
+ "byte_clk_rcg"
+ "pixel_clk_rcg"
+- gdsc-supply: phandle to gdsc regulator node.
+- vdda-supply: phandle to vdda regulator node.
+- vcca-supply: phandle to vcca regulator node.
+- interrupt-parent phandle to the interrupt parent device node.
+- interrupts: The interrupt signal from the DSI block.
+
+Bus Scaling Data:
+- qcom,msm-bus,name: String property describing MDSS client.
+- qcom,msm-bus,num-cases: This is the number of bus scaling use cases
+ defined in the vectors property. This must be
+ set to <2> for MDSS DSI driver where use-case 0
+ is used to remove BW votes from the system. Use
+ case 1 is used to generate bandwidth requestes
+ when sending command packets.
+- qcom,msm-bus,num-paths: This represents number of paths in each bus
+ scaling usecase. This value depends on number of
+ AXI master ports dedicated to MDSS for
+ particular chipset.
+- qcom,msm-bus,vectors-KBps: A series of 4 cell properties, with a format
+ of (src, dst, ab, ib) which is defined at
+ Documentation/devicetree/bindings/arm/msm/msm_bus.txt.
+ DSI driver should always set average bandwidth
+ (ab) to 0 and always use instantaneous
+ bandwidth(ib) values.
+
+Optional properties:
+- label: String to describe controller.
+- qcom,platform-te-gpio: Specifies the gpio used for TE.
+- qcom,dsi-display-active: Current active display
+- qcom,dsi-ctrl: handle to dsi controller device
+- qcom,dsi-phy: handle to dsi phy device
+- qcom,dsi-manager: Specifies dsi manager is present
+- qcom,dsi-display: Specifies dsi display is present
+- qcom,hdmi-display: Specifies hdmi is present
+- qcom,dp-display: Specified dp is present
+- qcom,<type>-supply-entries: A node that lists the elements of the supply used by the
+ a particular "type" of DSI module. The module "types"
+ can be "core", "ctrl", and "phy". Within the same type,
+ there can be more than one instance of this binding,
+ in which case the entry would be appended with the
+ supply entry index.
+ e.g. qcom,ctrl-supply-entry@0
+ -- qcom,supply-name: name of the supply (vdd/vdda/vddio)
+ -- qcom,supply-min-voltage: minimum voltage level (uV)
+ -- qcom,supply-max-voltage: maximum voltage level (uV)
+ -- qcom,supply-enable-load: load drawn (uA) from enabled supply
+ -- qcom,supply-disable-load: load drawn (uA) from disabled supply
+ -- qcom,supply-pre-on-sleep: time to sleep (ms) before turning on
+ -- qcom,supply-post-on-sleep: time to sleep (ms) after turning on
+ -- qcom,supply-pre-off-sleep: time to sleep (ms) before turning off
+ -- qcom,supply-post-off-sleep: time to sleep (ms) after turning off
+- qcom,mdss-mdp-transfer-time-us: Specifies the dsi transfer time for command mode
+ panels in microseconds. Driver uses this number to adjust
+ the clock rate according to the expected transfer time.
+ Increasing this value would slow down the mdp processing
+ and can result in slower performance.
+ Decreasing this value can speed up the mdp processing,
+ but this can also impact power consumption.
+ As a rule this time should not be higher than the time
+ that would be expected with the processing at the
+ dsi link rate since anyways this would be the maximum
+ transfer time that could be achieved.
+ If ping pong split enabled, this time should not be higher
+ than two times the dsi link rate time.
+ If the property is not specified, then the default value is 14000 us. \ No newline at end of file
diff --git a/Documentation/devicetree/bindings/drm/msm/sde-wb.txt b/Documentation/devicetree/bindings/drm/msm/sde-wb.txt
new file mode 100644
index 000000000000..863b334e438a
--- /dev/null
+++ b/Documentation/devicetree/bindings/drm/msm/sde-wb.txt
@@ -0,0 +1,23 @@
+QTI Snapdragon Display Engine (SDE) writeback display
+
+Required properties:
+- compatible: "qcom,wb-display"
+
+Optional properties:
+- cell-index: Index of writeback device instance.
+ Default to 0 if not specified.
+- label: String to describe this writeback display.
+ Default to "unknown" if not specified.
+
+Example:
+
+/ {
+ ...
+
+ sde_wb: qcom,wb-display {
+ compatible = "qcom,wb-display";
+ cell-index = <2>;
+ label = "wb_display";
+ };
+
+};
diff --git a/Documentation/devicetree/bindings/fb/mdss-dp.txt b/Documentation/devicetree/bindings/fb/mdss-dp.txt
index 27516d3b54a5..7bf7b9bacb60 100644
--- a/Documentation/devicetree/bindings/fb/mdss-dp.txt
+++ b/Documentation/devicetree/bindings/fb/mdss-dp.txt
@@ -27,6 +27,7 @@ Required properties
- qcom,aux-en-gpio: Specifies the aux-channel enable gpio.
- qcom,aux-sel-gpio: Specifies the aux-channel select gpio.
- qcom,usbplug-cc-gpio: Specifies the usbplug orientation gpio.
+- qcom,aux-cfg-settings: An array that specifies the DP AUX configuration settings.
Optional properties:
- qcom,<type>-supply-entries: A node that lists the elements of the supply used by the
@@ -51,6 +52,8 @@ Optional properties:
- pinctrl-<0..n>: Lists phandles each pointing to the pin configuration node within a pin
controller. These pin configurations are installed in the pinctrl
device node. Refer to pinctrl-bindings.txt
+- qcom,logical2physical-lane-map: An array that specifies the DP logical to physical lane map setting.
+- qcom,phy-register-offset: An integer specifying the offset value of DP PHY register space.
Example:
mdss_dp_ctrl: qcom,dp_ctrl@c990000 {
@@ -83,6 +86,10 @@ Example:
"core_aux_clk", "core_cfg_ahb_clk", "ctrl_link_clk",
"ctrl_link_iface_clk", "ctrl_crypto_clk", "ctrl_pixel_clk";
+ qcom,aux-cfg-settings = [00 13 00 10 0a 26 0a 03 8b 03];
+ qcom,logical2physical-lane-map = [02 03 01 00];
+ qcom,phy-register-offset = <0x4>;
+
qcom,core-supply-entries {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt b/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt
index b676efe97b8b..4fd0c2ecbc6e 100644
--- a/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt
+++ b/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt
@@ -628,6 +628,7 @@ Example:
qcom,mdss-dsi-underflow-color = <0xff>;
qcom,mdss-dsi-bl-min-level = <1>;
qcom,mdss-dsi-bl-max-level = < 15>;
+ qcom,mdss-brightness-max-level = <255>;
qcom,mdss-dsi-interleave-mode = <0>;
qcom,mdss-dsi-panel-type = "dsi_video_mode";
qcom,mdss-dsi-te-check-enable;
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen3.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen3.txt
index 221657780178..8adfeebb1580 100644
--- a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen3.txt
+++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen3.txt
@@ -99,10 +99,10 @@ First Level Node - FG Gen3 device
- qcom,fg-delta-soc-thr
Usage: optional
Value type: <u32>
- Definition: Percentage of monotonic SOC increase upon which the delta
- SOC interrupt will be triggered. If this property is not
- specified, then the default value will be 1. Possible
- values are in the range of 0 to 12.
+ Definition: Percentage of SOC increase upon which the delta monotonic &
+ battery SOC interrupts will be triggered. If this property
+ is not specified, then the default value will be 1.
+ Possible values are in the range of 0 to 12.
- qcom,fg-recharge-soc-thr
Usage: optional
@@ -156,12 +156,12 @@ First Level Node - FG Gen3 device
- qcom,cycle-counter-en
Usage: optional
- Value type: <bool>
+ Value type: <empty>
Definition: Enables the cycle counter feature.
- qcom,fg-force-load-profile
Usage: optional
- Value type: <bool>
+ Value type: <empty>
Definition: If set, battery profile will be force loaded if the profile
loaded earlier by bootloader doesn't match with the profile
available in the device tree.
@@ -229,13 +229,13 @@ First Level Node - FG Gen3 device
Definition: Battery temperature delta interrupt threshold. Possible
values are: 2, 4, 6 and 10. Unit is in Kelvin.
-- qcom,hold-soc-while-full:
+- qcom,hold-soc-while-full
Usage: optional
- Value type: <bool>
+ Value type: <empty>
Definition: A boolean property that when defined holds SOC at 100% when
the battery is full.
-- qcom,ki-coeff-soc-dischg:
+- qcom,ki-coeff-soc-dischg
Usage: optional
Value type: <prop-encoded-array>
Definition: Array of monotonic SOC threshold values to change the ki
@@ -243,7 +243,7 @@ First Level Node - FG Gen3 device
This should be defined in the ascending order and in the
range of 0-100. Array limit is set to 3.
-- qcom,ki-coeff-med-dischg:
+- qcom,ki-coeff-med-dischg
Usage: optional
Value type: <prop-encoded-array>
Definition: Array of ki coefficient values for medium discharge current
@@ -254,7 +254,7 @@ First Level Node - FG Gen3 device
is specified to make it fully functional. Value has no
unit. Allowed range is 0 to 62200 in micro units.
-- qcom,ki-coeff-hi-dischg:
+- qcom,ki-coeff-hi-dischg
Usage: optional
Value type: <prop-encoded-array>
Definition: Array of ki coefficient values for high discharge current
@@ -311,6 +311,15 @@ First Level Node - FG Gen3 device
148438 (14.84 %) will be used. Lowest possible value is
1954 (0.19 %).
+- qcom,fg-auto-recharge-soc
+ Usage: optional
+ Value type: <empty>
+ Definition: A boolean property when defined will configure automatic
+ recharge SOC threshold. If not specified, automatic
+ recharge voltage threshold will be configured. This has
+ to be configured in conjunction with the charger side
+ configuration for proper functionality.
+
==========================================================
Second Level Nodes - Peripherals managed by FG Gen3 driver
==========================================================
diff --git a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
index fc2c89bfbbd5..6f0d99d560cd 100644
--- a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
+++ b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
@@ -834,17 +834,6 @@ Example:
Required properties:
- compatible : "qcom,msm8974-audio-taiko"
- qcom,model : The user-visible name of this sound card.
-- reg : Offset and length of the register region(s) for MI2S/PCM MUX
-- reg-names : Register region name(s) referenced in reg above
- Required register resource entries are:
- "lpaif_pri_mode_muxsel": Physical address of MUX to select between
- Primary PCM and Primary MI2S
- "lpaif_sec_mode_muxsel": Physical address of MUX to select between
- Secondary PCM and Secondary MI2S
- "lpaif_tert_mode_muxsel": Physical address of MUX to select between
- Primary PCM and Tertiary MI2S
- "lpaif_quat_mode_muxsel": Physical address of MUX to select between
- Secondary PCM and Quarternary MI2S
- qcom,audio-routing : A list of the connections between audio components.
Each entry is a pair of strings, the first being the connection's sink,
the second being the connection's source.
@@ -882,6 +871,19 @@ Required properties:
codec dai names should match to that of the phandle order given
in "asoc-codec".
Optional properties:
+- reg : Offset and length of the register region(s) for MI2S/PCM MUX.
+ Not applicable for all targets.
+- reg-names : Register region name(s) referenced in reg above.
+ Not applicable for all targets.
+ Required register resource entries are:
+ "lpaif_pri_mode_muxsel": Physical address of MUX to select between
+ Primary PCM and Primary MI2S
+ "lpaif_sec_mode_muxsel": Physical address of MUX to select between
+ Secondary PCM and Secondary MI2S
+ "lpaif_tert_mode_muxsel": Physical address of MUX to select between
+ Primary PCM and Tertiary MI2S
+ "lpaif_quat_mode_muxsel": Physical address of MUX to select between
+ Secondary PCM and Quarternary MI2S
- qcom,hdmi-audio-rx: specifies if HDMI audio support is enabled or not.
- qcom,ext-ult-spk-amp-gpio : GPIO for enabling of speaker path amplifier.
diff --git a/Documentation/devicetree/bindings/sound/wcd_codec.txt b/Documentation/devicetree/bindings/sound/wcd_codec.txt
index fb2b3654dcc7..b7a81efb768c 100644
--- a/Documentation/devicetree/bindings/sound/wcd_codec.txt
+++ b/Documentation/devicetree/bindings/sound/wcd_codec.txt
@@ -1,4 +1,6 @@
-WCD audio CODEC
+* Qualcomm Technologies WCD Audio Codec
+
+This node models the Qualcomm Technologies Audio HW WCD Codecs
Required properties:
@@ -512,6 +514,10 @@ Required properties:
which is also existing driver WSA881x that represents
soundwire slave devices.
+Optional Properties:
+ - qcom,cache-always : Boolean. This property is used in WSA slave
+ device to use cacheable for all registers.
+
Example:
msm_sdw_codec: qcom,msm-sdw-codec@152c1000 {
@@ -529,6 +535,7 @@ msm_sdw_codec: qcom,msm-sdw-codec@152c1000 {
compatible = "qcom,wsa881x";
reg = <0x00 0x20170212>;
qcom,spkr-sd-n-gpio = <&tlmm 80 0>;
+ qcom,cache-always;
};
};
};
diff --git a/Documentation/devicetree/bindings/usb/dwc3.txt b/Documentation/devicetree/bindings/usb/dwc3.txt
index 3136687adb57..ddca4c39e2de 100644
--- a/Documentation/devicetree/bindings/usb/dwc3.txt
+++ b/Documentation/devicetree/bindings/usb/dwc3.txt
@@ -60,6 +60,7 @@ Optional properties:
- snps,num-normal-evt-buffs: If present, specifies number of normal event buffers. Default is 1.
- snps,num-gsi-evt-buffs: If present, specifies number of GSI based hardware accelerated event buffers.
1 event buffer is needed per h/w accelerated endpoint.
+ - xhci-imod-value: Interrupt moderation interval for host mode (in increments of 250nsec).
This is usually a subnode to DWC3 glue to which it is connected.
@@ -74,4 +75,5 @@ dwc3@4a030000 {
tx-fifo-resize;
snps,usb3-u1u2-disable;
snps,num-gsi-evt-buffs = <0x2>;
+ xhci-imod-value = <4000>;
};
diff --git a/arch/arm/boot/dts/qcom/apq8096-auto-dragonboard.dtsi b/arch/arm/boot/dts/qcom/apq8096-auto-dragonboard.dtsi
index 70156b1f8493..533861b4422a 100644
--- a/arch/arm/boot/dts/qcom/apq8096-auto-dragonboard.dtsi
+++ b/arch/arm/boot/dts/qcom/apq8096-auto-dragonboard.dtsi
@@ -325,7 +325,7 @@
};
};
-#include "msm8996-mdss-panels.dtsi"
+#include "msm8996-sde-display.dtsi"
&dsi_hx8379a_fwvga_truly_vid {
qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_pwm";
diff --git a/arch/arm/boot/dts/qcom/apq8096-dragonboard.dtsi b/arch/arm/boot/dts/qcom/apq8096-dragonboard.dtsi
index a7482bcce112..bfc6f210a0bb 100644
--- a/arch/arm/boot/dts/qcom/apq8096-dragonboard.dtsi
+++ b/arch/arm/boot/dts/qcom/apq8096-dragonboard.dtsi
@@ -325,7 +325,7 @@
};
};
-#include "msm8996-mdss-panels.dtsi"
+#include "msm8996-sde-display.dtsi"
&dsi_hx8379a_fwvga_truly_vid {
qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_pwm";
@@ -341,9 +341,6 @@
qcom,mdss-pref-prim-intf = "dsi";
};
-&mdss_dsi {
- hw-config = "single_dsi";
-};
&mdss_dsi0 {
qcom,dsi-pref-prim-pan = <&dsi_hx8379a_fwvga_truly_vid>;
diff --git a/arch/arm/boot/dts/qcom/apq8998-v2.1-mediabox.dts b/arch/arm/boot/dts/qcom/apq8998-v2.1-mediabox.dts
index bc18fb54400f..78fdba4fdb9b 100644
--- a/arch/arm/boot/dts/qcom/apq8998-v2.1-mediabox.dts
+++ b/arch/arm/boot/dts/qcom/apq8998-v2.1-mediabox.dts
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -25,6 +25,10 @@
status = "disabled";
};
+&msm_ath10k_wlan {
+ status = "enabled";
+};
+
&mdss_mdp {
qcom,mdss-pref-prim-intf = "hdmi";
};
diff --git a/arch/arm/boot/dts/qcom/dsi-panel-jdi-1080p-video.dtsi b/arch/arm/boot/dts/qcom/dsi-panel-jdi-1080p-video.dtsi
index cecd8d3cf2a0..6f3f63d27d70 100644
--- a/arch/arm/boot/dts/qcom/dsi-panel-jdi-1080p-video.dtsi
+++ b/arch/arm/boot/dts/qcom/dsi-panel-jdi-1080p-video.dtsi
@@ -10,8 +10,14 @@
* GNU General Public License for more details.
*/
+/*---------------------------------------------------------------------------
+ * This file is autogenerated file using gcdb parser. Please do not edit it.
+ * Update input XML file to add a new entry or update variable in this file
+ * VERSION = "1.0"
+ *---------------------------------------------------------------------------
+ */
&mdss_mdp {
- dsi_jdi_1080_vid: qcom,mdss_dsi_jdi_1080p_video {
+ dsi_jdi_1080p_video: qcom,mdss_dsi_jdi_1080p_video {
qcom,mdss-dsi-panel-name = "jdi 1080p video mode dsi panel";
qcom,mdss-dsi-panel-type = "dsi_video_mode";
qcom,mdss-dsi-panel-framerate = <60>;
@@ -31,19 +37,21 @@
qcom,mdss-dsi-v-top-border = <0>;
qcom,mdss-dsi-v-bottom-border = <0>;
qcom,mdss-dsi-bpp = <24>;
+ qcom,mdss-dsi-color-order = "rgb_swap_rgb";
qcom,mdss-dsi-underflow-color = <0xff>;
qcom,mdss-dsi-border-color = <0>;
qcom,mdss-dsi-on-command = [15 01 00 00 00 00 02 55 00
- 15 01 00 00 00 00 02 53 2C
- 15 01 00 00 00 00 02 35 00
- 05 01 00 00 78 00 02 29 00
- 05 01 00 00 78 00 02 11 00];
+ 15 01 00 00 00 00 02 53 2C
+ 15 01 00 00 00 00 02 35 00
+ 05 01 00 00 78 00 02 29 00
+ 05 01 00 00 78 00 02 11 00];
qcom,mdss-dsi-off-command = [05 01 00 00 02 00 02 28 00
05 01 00 00 79 00 02 10 00];
qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
qcom,mdss-dsi-h-sync-pulse = <0>;
qcom,mdss-dsi-traffic-mode = "burst_mode";
+ qcom,mdss-dsi-lane-map = "lane_map_0123";
qcom,mdss-dsi-bllp-eof-power-mode;
qcom,mdss-dsi-bllp-power-mode;
qcom,mdss-dsi-lane-0-state;
@@ -51,9 +59,9 @@
qcom,mdss-dsi-lane-2-state;
qcom,mdss-dsi-lane-3-state;
qcom,mdss-dsi-panel-timings =
- [e7 36 24 00 66 6a 2a 3a 2d 03 04 00];
- qcom,mdss-dsi-t-clk-post = <0x04>;
- qcom,mdss-dsi-t-clk-pre = <0x1b>;
+ [ce 2e 1e 00 5a 5c 24 30 24 03 04 00];
+ qcom,mdss-dsi-t-clk-post = <0x0d>;
+ qcom,mdss-dsi-t-clk-pre = <0x2f>;
qcom,mdss-dsi-bl-min-level = <1>;
qcom,mdss-dsi-bl-max-level = <4095>;
qcom,mdss-dsi-dma-trigger = "trigger_sw";
@@ -61,6 +69,8 @@
qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>;
qcom,mdss-pan-physical-width-dimension = <61>;
- qcom,mdss-pan-physical-height-dimension = <110>;
+ qcom,mdss-pan-physical-heigth-dimenstion = <110>;
+ qcom,mdss-dsi-tx-eot-append;
+ qcom,ulps-enabled;
};
};
diff --git a/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-cmd.dtsi b/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-cmd.dtsi
index aeeaaa7ca6fb..ebd73ceaa8ce 100644
--- a/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-cmd.dtsi
+++ b/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-cmd.dtsi
@@ -61,35 +61,31 @@
qcom,mdss-dsi-te-check-enable;
qcom,mdss-dsi-te-using-te-pin;
qcom,ulps-enabled;
- qcom,mdss-dsi-panel-hdr-enabled;
- qcom,mdss-dsi-panel-hdr-color-primaries = <14500 15500 32000
- 17000 15500 30000 8000 3000>;
- qcom,mdss-dsi-panel-peak-brightness = <4200000>;
- qcom,mdss-dsi-panel-blackness-level = <3230>;
- qcom,mdss-dsi-on-command = [15 01 00 00 00 00 02 ff 10
- 15 01 00 00 00 00 02 fb 01
- 15 01 00 00 00 00 02 ba 03
- 15 01 00 00 00 00 02 e5 01
- 15 01 00 00 00 00 02 35 00
- 15 01 00 00 00 00 02 bb 10
- 15 01 00 00 00 00 02 b0 03
- 15 01 00 00 00 00 02 ff e0
- 15 01 00 00 00 00 02 fb 01
- 15 01 00 00 00 00 02 6b 3d
- 15 01 00 00 00 00 02 6c 3d
- 15 01 00 00 00 00 02 6d 3d
- 15 01 00 00 00 00 02 6e 3d
- 15 01 00 00 00 00 02 6f 3d
- 15 01 00 00 00 00 02 35 02
- 15 01 00 00 00 00 02 36 72
- 15 01 00 00 00 00 02 37 10
- 15 01 00 00 00 00 02 08 c0
- 15 01 00 00 00 00 02 ff 24
- 15 01 00 00 00 00 02 fb 01
- 15 01 00 00 00 00 02 c6 06
- 15 01 00 00 00 00 02 ff 10
- 05 01 00 00 78 00 02 11 00
- 05 01 00 00 32 00 02 29 00];
+ qcom,mdss-dsi-on-command = [15 01 00 00 10 00 02 ff 10
+ 15 01 00 00 10 00 02 fb 01
+ 15 01 00 00 10 00 02 ba 03
+ 15 01 00 00 10 00 02 e5 01
+ 15 01 00 00 10 00 02 35 00
+ 15 01 00 00 10 00 02 bb 10
+ 15 01 00 00 10 00 02 b0 03
+ 15 01 00 00 10 00 02 ff e0
+ 15 01 00 00 10 00 02 fb 01
+ 15 01 00 00 10 00 02 6b 3d
+ 15 01 00 00 10 00 02 6c 3d
+ 15 01 00 00 10 00 02 6d 3d
+ 15 01 00 00 10 00 02 6e 3d
+ 15 01 00 00 10 00 02 6f 3d
+ 15 01 00 00 10 00 02 35 02
+ 15 01 00 00 10 00 02 36 72
+ 15 01 00 00 10 00 02 37 10
+ 15 01 00 00 10 00 02 08 c0
+ 15 01 00 00 10 00 02 ff 24
+ 15 01 00 00 10 00 02 fb 01
+ 15 01 00 00 10 00 02 c6 06
+ 15 01 00 00 10 00 02 9d 30 /* Enable IMGSWAP */
+ 15 01 00 00 10 00 02 ff 10
+ 05 01 00 00 a0 00 02 11 00
+ 05 01 00 00 a0 00 02 29 00];
qcom,mdss-dsi-off-command = [05 01 00 00 0a 00 02 28 00
05 01 00 00 3c 00 02 10 00];
diff --git a/arch/arm/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-video.dtsi b/arch/arm/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-video.dtsi
index d125a5783f9e..f2f77e3ed9cb 100644
--- a/arch/arm/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-video.dtsi
+++ b/arch/arm/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-video.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -201,6 +201,7 @@
qcom,mdss-dsi-dma-trigger = "trigger_sw";
qcom,mdss-dsi-mdp-trigger = "none";
qcom,mdss-dsi-reset-sequence = <1 20>, <0 20>, <1 50>;
+ qcom,mdss-dsi-tx-eot-append;
qcom,config-select = <&dsi_dual_nt35597_truly_video_config0>;
diff --git a/arch/arm/boot/dts/qcom/dsi-panel-sharp-1080p-cmd.dtsi b/arch/arm/boot/dts/qcom/dsi-panel-sharp-1080p-cmd.dtsi
index 68dabd2fe41c..401cb21b4ada 100644
--- a/arch/arm/boot/dts/qcom/dsi-panel-sharp-1080p-cmd.dtsi
+++ b/arch/arm/boot/dts/qcom/dsi-panel-sharp-1080p-cmd.dtsi
@@ -13,7 +13,9 @@
&mdss_mdp {
dsi_sharp_1080_cmd: qcom,mdss_dsi_sharp_1080p_cmd {
qcom,mdss-dsi-panel-name = "sharp 1080p cmd mode dsi panel";
+ qcom,mdss-dsi-panel-controller = <&mdss_dsi0>;
qcom,mdss-dsi-panel-type = "dsi_cmd_mode";
+ qcom,mdss-dsi-panel-destination = "display_1";
qcom,mdss-dsi-panel-framerate = <60>;
qcom,mdss-dsi-panel-clockrate = <850000000>;
qcom,mdss-dsi-virtual-channel-id = <0>;
diff --git a/arch/arm/boot/dts/qcom/dsi-panel-sharp-dualmipi-wqxga-video.dtsi b/arch/arm/boot/dts/qcom/dsi-panel-sharp-dualmipi-wqxga-video.dtsi
index 2a5b8a248730..94620f007dd9 100644
--- a/arch/arm/boot/dts/qcom/dsi-panel-sharp-dualmipi-wqxga-video.dtsi
+++ b/arch/arm/boot/dts/qcom/dsi-panel-sharp-dualmipi-wqxga-video.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -31,7 +31,7 @@
qcom,mdss-dsi-border-color = <0>;
qcom,mdss-dsi-on-command = [05 01 00 00 a0 00 02 11 00
05 01 00 00 02 00 02 29 00];
- qcom,mdss-dsi-off-command = [05 01 00 00 02 00 02 28 00
+ qcom,mdss-dsi-pre-off-command = [05 01 00 00 02 00 02 28 00
05 01 00 00 a0 00 02 10 00];
qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
diff --git a/arch/arm/boot/dts/qcom/dsi-panel-sim-video.dtsi b/arch/arm/boot/dts/qcom/dsi-panel-sim-video.dtsi
index 36e3022e4d1f..e5a5ee8f08d9 100644
--- a/arch/arm/boot/dts/qcom/dsi-panel-sim-video.dtsi
+++ b/arch/arm/boot/dts/qcom/dsi-panel-sim-video.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -19,9 +19,9 @@
qcom,mdss-dsi-stream = <0>;
qcom,mdss-dsi-panel-width = <640>;
qcom,mdss-dsi-panel-height = <480>;
- qcom,mdss-dsi-h-front-porch = <6>;
- qcom,mdss-dsi-h-back-porch = <6>;
- qcom,mdss-dsi-h-pulse-width = <2>;
+ qcom,mdss-dsi-h-front-porch = <8>;
+ qcom,mdss-dsi-h-back-porch = <8>;
+ qcom,mdss-dsi-h-pulse-width = <8>;
qcom,mdss-dsi-h-sync-skew = <0>;
qcom,mdss-dsi-v-back-porch = <6>;
qcom,mdss-dsi-v-front-porch = <6>;
diff --git a/arch/arm/boot/dts/qcom/dsi-panel-toshiba-720p-video.dtsi b/arch/arm/boot/dts/qcom/dsi-panel-toshiba-720p-video.dtsi
new file mode 100644
index 000000000000..191a3fba8ce6
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/dsi-panel-toshiba-720p-video.dtsi
@@ -0,0 +1,100 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&mdss_mdp {
+ dsi_tosh_720_vid: qcom,mdss_dsi_toshiba_720p_video {
+ qcom,mdss-dsi-panel-name = "toshiba 720p video mode dsi panel";
+ qcom,mdss-dsi-panel-controller = <&mdss_dsi0>;
+ qcom,mdss-dsi-panel-type = "dsi_video_mode";
+ qcom,mdss-dsi-panel-destination = "display_1";
+ qcom,mdss-dsi-panel-framerate = <60>;
+ qcom,mdss-dsi-virtual-channel-id = <0>;
+ qcom,mdss-dsi-stream = <0>;
+ qcom,mdss-dsi-panel-width = <720>;
+ qcom,mdss-dsi-panel-height = <1280>;
+ qcom,mdss-dsi-h-front-porch = <144>;
+ qcom,mdss-dsi-h-back-porch = <32>;
+ qcom,mdss-dsi-h-pulse-width = <12>;
+ qcom,mdss-dsi-h-sync-skew = <0>;
+ qcom,mdss-dsi-v-back-porch = <3>;
+ qcom,mdss-dsi-v-front-porch = <9>;
+ qcom,mdss-dsi-v-pulse-width = <4>;
+ qcom,mdss-dsi-h-left-border = <0>;
+ qcom,mdss-dsi-h-right-border = <0>;
+ qcom,mdss-dsi-v-top-border = <0>;
+ qcom,mdss-dsi-v-bottom-border = <0>;
+ qcom,mdss-dsi-bpp = <24>;
+ qcom,mdss-dsi-underflow-color = <0xff>;
+ qcom,mdss-dsi-border-color = <0>;
+ qcom,mdss-dsi-on-command = [23 01 00 00 0a 00 02 b0 00
+ 23 01 00 00 0a 00 02 b2 00
+ 23 01 00 00 0a 00 02 b3 0c
+ 23 01 00 00 0a 00 02 b4 02
+ 29 01 00 00 00 00 06 c0 40 02 7f c8 08
+ 29 01 00 00 00 00 10 c1 00 a8 00 00 00
+ 00 00 9d 08 27 00 00 00 00 00
+ 29 01 00 00 00 00 06 c2 00 00 09 00 00
+ 23 01 00 00 0a 00 02 c3 04
+ 29 01 00 00 00 00 04 c4 4d 83 00
+ 29 01 00 00 00 00 0b c6 12 00 08 71 00
+ 00 00 80 00 04
+ 23 01 00 00 0a 00 02 c7 22
+ 29 01 00 00 00 00 05 c8 4c 0c 0c 0c
+ 29 01 00 00 00 00 0e c9 00 40 00 16 32
+ 2e 3a 43 3e 3c 45 79 3f
+ 29 01 00 00 00 00 0e ca 00 46 1a 23 21
+ 1c 25 31 2d 49 5f 7f 3f
+ 29 01 00 00 00 00 0e cb 00 4c 20 3a 42
+ 40 47 4b 42 3e 46 7e 3f
+ 29 01 00 00 00 00 0e cc 00 41 19 21 1d
+ 14 18 1f 1d 25 3f 73 3f
+ 29 01 00 00 00 00 0e cd 23 79 5a 5f 57
+ 4c 51 51 45 3f 4b 7f 3f
+ 29 01 00 00 00 00 0e ce 00 40 14 20 1a
+ 0e 0e 13 08 00 05 46 1c
+ 29 01 00 00 00 00 04 d0 6a 64 01
+ 29 01 00 00 00 00 03 d1 77 d4
+ 23 01 00 00 0a 00 02 d3 33
+ 29 01 00 00 00 00 03 d5 0f 0f
+ 29 01 00 00 00 00 07 d8 34 64 23 25 62 32
+ 29 01 00 00 00 00 0c de 10 7b 11 0a 00
+ 00 00 00 00 00 00
+ 29 01 00 00 00 00 09 fd 04 55 53 00 70 ff 10 73
+ 23 01 00 00 0a 00 02 e2 00
+ 05 01 00 00 78 00 02 11 00
+ 05 01 00 00 32 00 02 29 00];
+ qcom,mdss-dsi-off-command = [05 01 00 00 32 00 02 28 00
+ 05 01 00 00 78 00 02 10 00];
+ qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+ qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+ qcom,mdss-dsi-h-sync-pulse = <0>;
+ qcom,mdss-dsi-traffic-mode = "non_burst_sync_event";
+ qcom,mdss-dsi-bllp-eof-power-mode;
+ qcom,mdss-dsi-bllp-power-mode;
+ qcom,mdss-dsi-lane-0-state;
+ qcom,mdss-dsi-lane-1-state;
+ qcom,mdss-dsi-lane-2-state;
+ qcom,mdss-dsi-lane-3-state;
+ qcom,mdss-dsi-panel-timings = [b0 23 1b 00 94 93 1e 25
+ 15 03 04 00];
+ qcom,mdss-dsi-t-clk-post = <0x04>;
+ qcom,mdss-dsi-t-clk-pre = <0x1b>;
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,mdss-dsi-dma-trigger = "trigger_sw";
+ qcom,mdss-dsi-mdp-trigger = "none";
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+ qcom,mdss-dsi-pan-enable-dynamic-fps;
+ qcom,mdss-dsi-pan-fps-update = "dfps_suspend_resume_mode";
+ qcom,mdss-dsi-reset-sequence = <1 20>, <0 200>, <1 20>;
+ };
+};
diff --git a/arch/arm/boot/dts/qcom/fg-gen3-batterydata-ascent-2800mah.dtsi b/arch/arm/boot/dts/qcom/fg-gen3-batterydata-ascent-2800mah.dtsi
new file mode 100644
index 000000000000..a83d860cd697
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/fg-gen3-batterydata-ascent-2800mah.dtsi
@@ -0,0 +1,81 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+qcom,ascent_2800mah {
+ /* #Ascent_860_82912_0000_2800mAh_averaged_MasterSlave_Jan11th2017*/
+ qcom,max-voltage-uv = <4350000>;
+ qcom,fg-cc-cv-threshold-mv = <4340>;
+ qcom,fastchg-current-ma = <2800>;
+ qcom,batt-id-kohm = <20>;
+ qcom,battery-beta = <3450>;
+ qcom,battery-type = "ascent_2800mah_averaged_masterslave_jan11th2017";
+ qcom,checksum = <0x0110>;
+ qcom,gui-version = "PMI8998GUI - 2.0.0.54";
+ qcom,fg-profile-data = [
+ 21 21 F5 0D
+ 82 0B 6E 05
+ 0C 1D 5F FA
+ 74 06 97 01
+ 0E 18 F7 22
+ A8 45 B1 52
+ 76 00 00 00
+ 0E 00 00 00
+ 00 00 3D C4
+ 6E CD 2A CB
+ 21 00 08 00
+ 28 D3 2E E5
+ 0E 06 BA F3
+ 59 E3 22 12
+ 08 E5 54 32
+ 22 06 09 20
+ 27 00 14 00
+ 4B 20 F6 04
+ CF 0A 04 06
+ 25 1D B7 FA
+ DD F4 BB 06
+ FE 18 E1 22
+ 73 45 32 53
+ 5F 00 00 00
+ 0E 00 00 00
+ 00 00 D5 D5
+ 9C CC 8E D3
+ 1A 00 00 00
+ 6E EA 2E E5
+ 6E 06 A9 00
+ 6D F5 73 0B
+ 2A 02 61 1B
+ B1 33 CC FF
+ 07 10 00 00
+ 14 0B 99 45
+ 1A 00 40 00
+ 7D 01 0A FA
+ FF 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ 00 00 00 00
+ ];
+};
diff --git a/arch/arm/boot/dts/qcom/msm-audio.dtsi b/arch/arm/boot/dts/qcom/msm-audio.dtsi
index e34cf0426887..fc0e828eb2bc 100644
--- a/arch/arm/boot/dts/qcom/msm-audio.dtsi
+++ b/arch/arm/boot/dts/qcom/msm-audio.dtsi
@@ -593,16 +593,6 @@
qcom,mi2s-audio-intf;
qcom,auxpcm-audio-intf;
qcom,msm-mi2s-master = <1>, <1>, <1>, <1>;
-
- reg = <0x1508a000 0x4>,
- <0x1508b000 0x4>,
- <0x1508c000 0x4>,
- <0x1508d000 0x4>;
- reg-names = "lpaif_pri_mode_muxsel",
- "lpaif_sec_mode_muxsel",
- "lpaif_tert_mode_muxsel",
- "lpaif_quat_mode_muxsel";
-
qcom,audio-routing =
"AIF4 VI", "MCLK",
"RX_BIAS", "MCLK",
@@ -705,16 +695,6 @@
qcom,mi2s-audio-intf;
qcom,auxpcm-audio-intf;
qcom,msm-mi2s-master = <1>, <1>, <1>, <1>;
-
- reg = <0x1508a000 0x4>,
- <0x1508b000 0x4>,
- <0x1508c000 0x4>,
- <0x1508d000 0x4>;
- reg-names = "lpaif_pri_mode_muxsel",
- "lpaif_sec_mode_muxsel",
- "lpaif_tert_mode_muxsel",
- "lpaif_quat_mode_muxsel";
-
qcom,audio-routing =
"RX_BIAS", "MCLK",
"MADINPUT", "MCLK",
@@ -810,21 +790,10 @@
status = "disabled";
compatible = "qcom,sdm660-asoc-snd";
qcom,model = "sdm660-snd-card";
- qcom,wsa-disable;
qcom,wcn-btfm;
qcom,mi2s-audio-intf;
qcom,auxpcm-audio-intf;
qcom,msm-mi2s-master = <1>, <1>, <1>, <1>;
-
- reg = <0x1508a000 0x4>,
- <0x1508b000 0x4>,
- <0x1508c000 0x4>,
- <0x1508d000 0x4>;
- reg-names = "lpaif_pri_mode_muxsel",
- "lpaif_sec_mode_muxsel",
- "lpaif_tert_mode_muxsel",
- "lpaif_quat_mode_muxsel";
-
qcom,msm-mclk-freq = <9600000>;
qcom,msm-mbhc-hphl-swh = <1>;
qcom,msm-mbhc-gnd-swh = <1>;
@@ -834,7 +803,6 @@
qcom,cdc-pdm-gpios = <&cdc_pdm_gpios>;
qcom,cdc-comp-gpios = <&cdc_comp_gpios>;
qcom,cdc-dmic-gpios = <&cdc_dmic_gpios>;
- qcom,cdc-sdw-gpios = <&cdc_sdw_gpios>;
qcom,audio-routing =
"RX_BIAS", "INT_MCLK0",
"SPK_RX_BIAS", "INT_MCLK0",
diff --git a/arch/arm/boot/dts/qcom/msm-pm660.dtsi b/arch/arm/boot/dts/qcom/msm-pm660.dtsi
index e8e773a33622..1154c28bb9ea 100644
--- a/arch/arm/boot/dts/qcom/msm-pm660.dtsi
+++ b/arch/arm/boot/dts/qcom/msm-pm660.dtsi
@@ -497,6 +497,7 @@
#address-cells = <1>;
#size-cells = <0>;
#io-channel-cells = <1>;
+ qcom,pmic-revid = <&pm660_revid>;
};
pm660_fg: qpnp,fg {
@@ -517,8 +518,10 @@
reg = <0x4000 0x100>;
interrupts = <0x0 0x40 0x0 IRQ_TYPE_EDGE_BOTH>,
<0x0 0x40 0x1 IRQ_TYPE_EDGE_BOTH>,
- <0x0 0x40 0x2 IRQ_TYPE_EDGE_BOTH>,
- <0x0 0x40 0x3 IRQ_TYPE_EDGE_BOTH>,
+ <0x0 0x40 0x2
+ IRQ_TYPE_EDGE_RISING>,
+ <0x0 0x40 0x3
+ IRQ_TYPE_EDGE_RISING>,
<0x0 0x40 0x4 IRQ_TYPE_EDGE_BOTH>,
<0x0 0x40 0x5
IRQ_TYPE_EDGE_RISING>,
diff --git a/arch/arm/boot/dts/qcom/msm-pmi8998.dtsi b/arch/arm/boot/dts/qcom/msm-pmi8998.dtsi
index be47b6483288..9218f8dc583c 100644
--- a/arch/arm/boot/dts/qcom/msm-pmi8998.dtsi
+++ b/arch/arm/boot/dts/qcom/msm-pmi8998.dtsi
@@ -333,8 +333,10 @@
reg = <0x4000 0x100>;
interrupts = <0x2 0x40 0x0 IRQ_TYPE_EDGE_BOTH>,
<0x2 0x40 0x1 IRQ_TYPE_EDGE_BOTH>,
- <0x2 0x40 0x2 IRQ_TYPE_EDGE_BOTH>,
- <0x2 0x40 0x3 IRQ_TYPE_EDGE_BOTH>,
+ <0x2 0x40 0x2
+ IRQ_TYPE_EDGE_RISING>,
+ <0x2 0x40 0x3
+ IRQ_TYPE_EDGE_RISING>,
<0x2 0x40 0x4 IRQ_TYPE_EDGE_BOTH>,
<0x2 0x40 0x5
IRQ_TYPE_EDGE_RISING>,
diff --git a/arch/arm/boot/dts/qcom/msm-smb138x.dtsi b/arch/arm/boot/dts/qcom/msm-smb138x.dtsi
index e6e04f19d7ea..2aa730898f75 100644
--- a/arch/arm/boot/dts/qcom/msm-smb138x.dtsi
+++ b/arch/arm/boot/dts/qcom/msm-smb138x.dtsi
@@ -57,8 +57,8 @@
die_temp@2 {
reg = <2>;
- qcom,scale = <(-1032)>;
- qcom,offset = <344125>;
+ qcom,scale = <(-1306)>;
+ qcom,offset = <397904>;
};
batt_i@3 {
diff --git a/arch/arm/boot/dts/qcom/msm8996-cdp.dtsi b/arch/arm/boot/dts/qcom/msm8996-cdp.dtsi
index 165c7de039e5..6fafb8b38d06 100644
--- a/arch/arm/boot/dts/qcom/msm8996-cdp.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-cdp.dtsi
@@ -340,6 +340,7 @@
};
};
+#include "msm8996-sde-display.dtsi"
#include "msm8996-mdss-panels.dtsi"
&mdss_mdp {
@@ -350,6 +351,21 @@
hw-config = "split_dsi";
};
+&mdss_hdmi_tx {
+ pinctrl-names = "hdmi_hpd_active", "hdmi_ddc_active", "hdmi_cec_active",
+ "hdmi_active", "hdmi_sleep";
+ pinctrl-0 = <&mdss_hdmi_hpd_active &mdss_hdmi_ddc_suspend
+ &mdss_hdmi_cec_suspend>;
+ pinctrl-1 = <&mdss_hdmi_hpd_active &mdss_hdmi_ddc_active
+ &mdss_hdmi_cec_suspend>;
+ pinctrl-2 = <&mdss_hdmi_hpd_active &mdss_hdmi_cec_active
+ &mdss_hdmi_ddc_suspend>;
+ pinctrl-3 = <&mdss_hdmi_hpd_active &mdss_hdmi_ddc_active
+ &mdss_hdmi_cec_active>;
+ pinctrl-4 = <&mdss_hdmi_hpd_suspend &mdss_hdmi_ddc_suspend
+ &mdss_hdmi_cec_suspend>;
+};
+
&mdss_dsi0 {
qcom,dsi-pref-prim-pan = <&dsi_dual_sharp_video>;
pinctrl-names = "mdss_default", "mdss_sleep";
@@ -370,19 +386,8 @@
qcom,platform-bklight-en-gpio = <&pm8994_gpios 14 0>;
};
-&mdss_hdmi_tx {
- pinctrl-names = "hdmi_hpd_active", "hdmi_ddc_active", "hdmi_cec_active",
- "hdmi_active", "hdmi_sleep";
- pinctrl-0 = <&mdss_hdmi_hpd_active &mdss_hdmi_ddc_suspend
- &mdss_hdmi_cec_suspend>;
- pinctrl-1 = <&mdss_hdmi_hpd_active &mdss_hdmi_ddc_active
- &mdss_hdmi_cec_suspend>;
- pinctrl-2 = <&mdss_hdmi_hpd_active &mdss_hdmi_cec_active
- &mdss_hdmi_ddc_suspend>;
- pinctrl-3 = <&mdss_hdmi_hpd_active &mdss_hdmi_ddc_active
- &mdss_hdmi_cec_active>;
- pinctrl-4 = <&mdss_hdmi_hpd_suspend &mdss_hdmi_ddc_suspend
- &mdss_hdmi_cec_suspend>;
+&ibb_regulator {
+ qcom,qpnp-ibb-discharge-resistor = <32>;
};
&labibb {
@@ -390,11 +395,24 @@
qcom,qpnp-labibb-mode = "lcd";
};
+&dsi_tosh_720_vid {
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,cont-splash-enabled;
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply_vdd_no_labibb>;
+ qcom,platform-reset-gpio = <&tlmm 8 0>;
+ qcom,platform-bklight-en-gpio = <&pm8994_gpios 14 0>;
+ qcom,5v-boost-gpio = <&pmi8994_gpios 8 0>;
+};
+
&dsi_dual_sharp_video {
qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
qcom,mdss-dsi-bl-min-level = <1>;
qcom,mdss-dsi-bl-max-level = <4095>;
qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+ qcom,platform-reset-gpio = <&tlmm 8 0>;
+ qcom,platform-bklight-en-gpio = <&pm8994_gpios 14 0>;
};
&dsi_dual_nt35597_video {
@@ -402,6 +420,7 @@
qcom,mdss-dsi-bl-min-level = <1>;
qcom,mdss-dsi-bl-max-level = <4095>;
qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+ qcom,platform-reset-gpio = <&tlmm 8 0>;
};
&dsi_dual_nt35597_cmd {
@@ -411,6 +430,7 @@
qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
qcom,partial-update-enabled = "single_roi";
qcom,panel-roi-alignment = <720 128 720 64 720 64>;
+ qcom,platform-reset-gpio = <&tlmm 8 0>;
};
&dsi_nt35950_4k_dsc_cmd {
@@ -502,6 +522,17 @@
qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
};
+&dsi_jdi_1080p_video {
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,cont-splash-enabled;
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply_no_labibb>;
+ qcom,platform-reset-gpio = <&tlmm 8 0>;
+ qcom,platform-bklight-en-gpio = <&pm8994_gpios 14 0>;
+ qcom,5v-boost-gpio = <&pmi8994_gpios 8 0>;
+};
+
&pm8994_gpios {
gpio@c700 { /* GPIO 8 - WLAN_EN */
qcom,mode = <1>; /* Digital output*/
diff --git a/arch/arm/boot/dts/qcom/msm8996-dtp.dtsi b/arch/arm/boot/dts/qcom/msm8996-dtp.dtsi
index 5c62766b1a26..c2667b49fedb 100644
--- a/arch/arm/boot/dts/qcom/msm8996-dtp.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-dtp.dtsi
@@ -11,7 +11,7 @@
*/
#include "msm8996-pinctrl.dtsi"
-#include "msm8996-mdss-panels.dtsi"
+#include "msm8996-sde-display.dtsi"
#include "msm8996-camera-sensor-dtp.dtsi"
#include "msm8996-wsa881x.dtsi"
@@ -467,10 +467,6 @@
status = "disabled";
};
-&mdss_dsi {
- hw-config = "split_dsi";
-};
-
&mdss_dsi0 {
qcom,dsi-pref-prim-pan = <&dsi_r69007_wqxga_cmd>;
pinctrl-names = "mdss_default", "mdss_sleep";
diff --git a/arch/arm/boot/dts/qcom/msm8996-fluid.dtsi b/arch/arm/boot/dts/qcom/msm8996-fluid.dtsi
index baecf4b8574e..86bc8099c4d6 100644
--- a/arch/arm/boot/dts/qcom/msm8996-fluid.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-fluid.dtsi
@@ -587,6 +587,7 @@
status = "ok";
};
+#include "msm8996-sde-display.dtsi"
#include "msm8996-mdss-panels.dtsi"
&mdss_mdp {
diff --git a/arch/arm/boot/dts/qcom/msm8996-liquid.dtsi b/arch/arm/boot/dts/qcom/msm8996-liquid.dtsi
index dae7306cdd07..571e67a7dd93 100644
--- a/arch/arm/boot/dts/qcom/msm8996-liquid.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-liquid.dtsi
@@ -294,16 +294,12 @@
};
};
-#include "msm8996-mdss-panels.dtsi"
+#include "msm8996-sde-display.dtsi"
&mdss_mdp {
qcom,mdss-pref-prim-intf = "dsi";
};
-&mdss_dsi {
- hw-config = "split_dsi";
-};
-
&mdss_dsi0 {
qcom,dsi-pref-prim-pan = <&dsi_dual_jdi_4k_nofbc_video>;
pinctrl-names = "mdss_default", "mdss_sleep";
diff --git a/arch/arm/boot/dts/qcom/msm8996-mdss-panels.dtsi b/arch/arm/boot/dts/qcom/msm8996-mdss-panels.dtsi
index bfb85274846f..18a0f29e4d8a 100644
--- a/arch/arm/boot/dts/qcom/msm8996-mdss-panels.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-mdss-panels.dtsi
@@ -30,7 +30,9 @@
#include "dsi-panel-sharp-dualmipi-1080p-120hz.dtsi"
#include "dsi-panel-sharp-1080p-cmd.dtsi"
#include "dsi-panel-sharp-dsc-4k-video.dtsi"
+#include "dsi-panel-toshiba-720p-video.dtsi"
#include "dsi-panel-sharp-dsc-4k-cmd.dtsi"
+#include "dsi-panel-jdi-1080p-video.dtsi"
&soc {
dsi_panel_pwr_supply: dsi_panel_pwr_supply {
@@ -127,6 +129,16 @@
qcom,supply-disable-load = <80>;
qcom,supply-post-on-sleep = <20>;
};
+
+ qcom,panel-supply-entry@1 {
+ reg = <1>;
+ qcom,supply-name = "vdd";
+ qcom,supply-min-voltage = <3000000>;
+ qcom,supply-max-voltage = <3000000>;
+ qcom,supply-enable-load = <857000>;
+ qcom,supply-disable-load = <0>;
+ qcom,supply-post-on-sleep = <0>;
+ };
};
};
diff --git a/arch/arm/boot/dts/qcom/msm8996-mtp.dtsi b/arch/arm/boot/dts/qcom/msm8996-mtp.dtsi
index 27d3eea5bc20..ab10a71d1fd7 100644
--- a/arch/arm/boot/dts/qcom/msm8996-mtp.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-mtp.dtsi
@@ -336,12 +336,17 @@
};
};
+#include "msm8996-sde-display.dtsi"
#include "msm8996-mdss-panels.dtsi"
&mdss_mdp {
qcom,mdss-pref-prim-intf = "dsi";
};
+&mdss_hdmi {
+ status = "ok";
+};
+
&mdss_dsi {
hw-config = "split_dsi";
};
@@ -366,23 +371,44 @@
qcom,platform-bklight-en-gpio = <&pm8994_gpios 14 0>;
};
+&ibb_regulator {
+ qcom,qpnp-ibb-discharge-resistor = <32>;
+};
+
&labibb {
status = "ok";
qcom,qpnp-labibb-mode = "lcd";
};
+&dsi_tosh_720_vid {
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,cont-splash-enabled;
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply_vdd_no_labibb>;
+ qcom,platform-reset-gpio = <&tlmm 8 0>;
+ qcom,platform-bklight-en-gpio = <&pm8994_gpios 14 0>;
+ qcom,5v-boost-gpio = <&pmi8994_gpios 8 0>;
+};
+
&dsi_dual_sharp_video {
qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
qcom,mdss-dsi-bl-min-level = <1>;
qcom,mdss-dsi-bl-max-level = <4095>;
qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+ qcom,platform-reset-gpio = <&tlmm 8 0>;
+ qcom,platform-bklight-en-gpio = <&pm8994_gpios 14 0>;
};
&dsi_sharp_1080_cmd {
qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
qcom,mdss-dsi-bl-min-level = <1>;
qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,cont-splash-enabled;
qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+ qcom,platform-reset-gpio = <&tlmm 8 0>;
+ qcom,platform-bklight-en-gpio = <&pm8994_gpios 14 0>;
+ qcom,5v-boost-gpio = <&pmi8994_gpios 8 0>;
};
&dsi_dual_nt35597_video {
@@ -390,6 +416,7 @@
qcom,mdss-dsi-bl-min-level = <1>;
qcom,mdss-dsi-bl-max-level = <4095>;
qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+ qcom,platform-reset-gpio = <&tlmm 8 0>;
};
&dsi_dual_nt35597_cmd {
@@ -399,6 +426,7 @@
qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
qcom,partial-update-enabled = "single_roi";
qcom,panel-roi-alignment = <720 128 720 64 720 64>;
+ qcom,platform-reset-gpio = <&tlmm 8 0>;
};
&dsi_nt35950_4k_dsc_cmd {
@@ -483,6 +511,17 @@
qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
};
+&dsi_jdi_1080p_video {
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,cont-splash-enabled;
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply_no_labibb>;
+ qcom,platform-reset-gpio = <&tlmm 8 0>;
+ qcom,platform-bklight-en-gpio = <&pm8994_gpios 14 0>;
+ qcom,5v-boost-gpio = <&pmi8994_gpios 8 0>;
+};
+
/{
mtp_batterydata: qcom,battery-data {
qcom,batt-id-range-pct = <15>;
diff --git a/arch/arm/boot/dts/qcom/msm8996-sde-display.dtsi b/arch/arm/boot/dts/qcom/msm8996-sde-display.dtsi
new file mode 100644
index 000000000000..3f81da7c3ebc
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/msm8996-sde-display.dtsi
@@ -0,0 +1,352 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "dsi-panel-toshiba-720p-video.dtsi"
+#include "dsi-panel-sharp-dualmipi-wqxga-video.dtsi"
+#include "dsi-panel-nt35597-dualmipi-wqxga-video.dtsi"
+#include "dsi-panel-nt35597-dualmipi-wqxga-cmd.dtsi"
+#include "dsi-panel-nt35597-dsc-wqxga-video.dtsi"
+#include "dsi-panel-jdi-dualmipi-video.dtsi"
+#include "dsi-panel-jdi-dualmipi-cmd.dtsi"
+#include "dsi-panel-jdi-4k-dualmipi-video-nofbc.dtsi"
+#include "dsi-panel-sim-video.dtsi"
+#include "dsi-panel-sim-dualmipi-video.dtsi"
+#include "dsi-panel-sim-cmd.dtsi"
+#include "dsi-panel-sim-dualmipi-cmd.dtsi"
+#include "dsi-panel-nt35597-dsc-wqxga-cmd.dtsi"
+#include "dsi-panel-hx8379a-truly-fwvga-video.dtsi"
+#include "dsi-panel-r69007-dualdsi-wqxga-cmd.dtsi"
+#include "dsi-panel-jdi-1080p-video.dtsi"
+#include "dsi-panel-sharp-1080p-cmd.dtsi"
+
+&soc {
+ dsi_panel_pwr_supply: dsi_panel_pwr_supply {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,panel-supply-entry@0 {
+ reg = <0>;
+ qcom,supply-name = "vddio";
+ qcom,supply-min-voltage = <1800000>;
+ qcom,supply-max-voltage = <1800000>;
+ qcom,supply-enable-load = <62000>;
+ qcom,supply-disable-load = <80>;
+ qcom,supply-post-on-sleep = <20>;
+ };
+
+ qcom,panel-supply-entry@1 {
+ reg = <1>;
+ qcom,supply-name = "lab";
+ qcom,supply-min-voltage = <4600000>;
+ qcom,supply-max-voltage = <6000000>;
+ qcom,supply-enable-load = <100000>;
+ qcom,supply-disable-load = <100>;
+ };
+
+ qcom,panel-supply-entry@2 {
+ reg = <2>;
+ qcom,supply-name = "ibb";
+ qcom,supply-min-voltage = <4600000>;
+ qcom,supply-max-voltage = <6000000>;
+ qcom,supply-enable-load = <100000>;
+ qcom,supply-disable-load = <100>;
+ qcom,supply-post-on-sleep = <20>;
+ };
+ };
+
+ dsi_panel_pwr_supply_no_labibb: dsi_panel_pwr_supply_no_labibb {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,panel-supply-entry@0 {
+ reg = <0>;
+ qcom,supply-name = "vddio";
+ qcom,supply-min-voltage = <1800000>;
+ qcom,supply-max-voltage = <1800000>;
+ qcom,supply-enable-load = <62000>;
+ qcom,supply-disable-load = <80>;
+ qcom,supply-post-on-sleep = <20>;
+ };
+ };
+
+ dsi_panel_pwr_supply_vdd_no_labibb: dsi_panel_pwr_supply_vdd_no_labibb {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,panel-supply-entry@0 {
+ reg = <0>;
+ qcom,supply-name = "vddio";
+ qcom,supply-min-voltage = <1800000>;
+ qcom,supply-max-voltage = <1800000>;
+ qcom,supply-enable-load = <62000>;
+ qcom,supply-disable-load = <80>;
+ qcom,supply-post-on-sleep = <20>;
+ };
+
+ qcom,panel-supply-entry@1 {
+ reg = <1>;
+ qcom,supply-name = "vdd";
+ qcom,supply-min-voltage = <3000000>;
+ qcom,supply-max-voltage = <3000000>;
+ qcom,supply-enable-load = <857000>;
+ qcom,supply-disable-load = <0>;
+ qcom,supply-post-on-sleep = <0>;
+ };
+ };
+
+ dsi_dual_sharp_video_1: qcom,dsi-display@0 {
+ compatible = "qcom,dsi-display";
+ label = "dsi_dual_sharp_video";
+ qcom,display-type = "primary";
+
+ qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
+ qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
+ clocks = <&clock_mmss clk_ext_byte0_clk_src>,
+ <&clock_mmss clk_ext_pclk0_clk_src>;
+ clock-names = "src_byte_clk", "src_pixel_clk";
+
+ pinctrl-names = "panel_active", "panel_suspend";
+ pinctrl-0 = <&mdss_dsi_active &mdss_te_active>;
+ pinctrl-1 = <&mdss_dsi_suspend &mdss_te_suspend>;
+ qcom,platform-te-gpio = <&tlmm 10 0>;
+ qcom,platform-reset-gpio = <&tlmm 8 0>;
+
+ qcom,dsi-panel = <&dsi_dual_sharp_video>;
+ vddio-supply = <&pm8994_l14>;
+ lab-supply = <&lab_regulator>;
+ ibb-supply = <&ibb_regulator>;
+ qcom,dsi-display-active;
+ };
+
+ single_dsi_sim_vid: qcom,dsi-display@1 {
+ compatible = "qcom,dsi-display";
+ label = "single_dsi_sim";
+ qcom,display-type = "primary";
+
+ qcom,dsi-ctrl = <&mdss_dsi0>;
+ qcom,dsi-phy = <&mdss_dsi_phy0>;
+ clocks = <&clock_mmss clk_ext_byte0_clk_src>,
+ <&clock_mmss clk_ext_pclk0_clk_src>;
+ clock-names = "src_byte_clk", "src_pixel_clk";
+
+ pinctrl-names = "panel_active", "panel_suspend";
+ pinctrl-0 = <&mdss_dsi_active &mdss_te_active>;
+ pinctrl-1 = <&mdss_dsi_suspend &mdss_te_suspend>;
+ qcom,platform-te-gpio = <&tlmm 10 0>;
+
+ qcom,dsi-panel = <&dsi_sim_vid>;
+ vddio-supply = <&pm8994_l14>;
+ lab-supply = <&lab_regulator>;
+ ibb-supply = <&ibb_regulator>;
+ };
+
+ dsi_toshiba_720p_vid: qcom,dsi-display@2 {
+ compatible = "qcom,dsi-display";
+ label = "single_dsi_toshiba_720p";
+ qcom,display-type = "primary";
+
+ qcom,dsi-ctrl = <&mdss_dsi0>;
+ qcom,dsi-phy = <&mdss_dsi_phy0>;
+ clocks = <&clock_mmss clk_ext_byte0_clk_src>,
+ <&clock_mmss clk_ext_pclk0_clk_src>;
+ clock-names = "src_byte_clk", "src_pixel_clk";
+
+ pinctrl-names = "panel_active", "panel_suspend";
+ pinctrl-0 = <&mdss_dsi_active &mdss_te_active>;
+ pinctrl-1 = <&mdss_dsi_suspend &mdss_te_suspend>;
+ qcom,platform-te-gpio = <&tlmm 10 0>;
+
+ qcom,dsi-panel = <&dsi_tosh_720_vid>;
+ vddio-supply = <&pm8994_l14>;
+ vdd-supply = <&pm8994_l19>;
+ };
+
+ dsi_jdi_1080p_vid: qcom,dsi-display@3 {
+ compatible = "qcom,dsi-display";
+ label = "single_dsi_jdi_1080p";
+ qcom,display-type = "primary";
+
+ qcom,dsi-ctrl = <&mdss_dsi0>;
+ qcom,dsi-phy = <&mdss_dsi_phy0>;
+ clocks = <&clock_mmss clk_ext_byte0_clk_src>,
+ <&clock_mmss clk_ext_pclk0_clk_src>;
+ clock-names = "src_byte_clk", "src_pixel_clk";
+
+ pinctrl-names = "panel_active", "panel_suspend";
+ pinctrl-0 = <&mdss_dsi_active &mdss_te_active>;
+ pinctrl-1 = <&mdss_dsi_suspend &mdss_te_suspend>;
+
+ qcom,dsi-panel = <&dsi_jdi_1080p_video>;
+ vddio-supply = <&pm8994_l14>;
+ };
+
+ dsi_sharp_fhd_cmd: qcom,dsi-display@4 {
+ compatible = "qcom,dsi-display";
+ label = "single_dsi_sharp_1080p";
+ qcom,display-type = "primary";
+
+ qcom,dsi-ctrl = <&mdss_dsi0>;
+ qcom,dsi-phy = <&mdss_dsi_phy0>;
+ clocks = <&clock_mmss clk_ext_byte0_clk_src>,
+ <&clock_mmss clk_ext_pclk0_clk_src>;
+ clock-names = "src_byte_clk", "src_pixel_clk";
+
+ pinctrl-names = "panel_active", "panel_suspend";
+ pinctrl-0 = <&mdss_dsi_active &mdss_te_active>;
+ pinctrl-1 = <&mdss_dsi_suspend &mdss_te_suspend>;
+
+ qcom,dsi-panel = <&dsi_sharp_1080_cmd>;
+ vddio-supply = <&pm8994_l14>;
+ vdd-supply = <&pm8994_l19>;
+ lab-supply = <&lab_regulator>;
+ ibb-supply = <&ibb_regulator>;
+ };
+
+ sde_wb: qcom,wb-display@0 {
+ compatible = "qcom,wb-display";
+ cell-index = <0>;
+ label = "wb_display";
+ };
+
+ dsi_dual_nt35597_cmd_1: qcom,dsi-display@5 {
+ compatible = "qcom,dsi-display";
+ label = "dsi_dual_nt35597_cmd";
+ qcom,display-type = "primary";
+
+ /* dsi1/dsi0 swapped due to IMGSWAP */
+ qcom,dsi-ctrl = <&mdss_dsi1 &mdss_dsi0>;
+ qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
+ clocks = <&clock_mmss clk_ext_byte0_clk_src>,
+ <&clock_mmss clk_ext_pclk0_clk_src>;
+ clock-names = "src_byte_clk", "src_pixel_clk";
+
+ pinctrl-names = "panel_active", "panel_suspend";
+ pinctrl-0 = <&mdss_dsi_active &mdss_te_active>;
+ pinctrl-1 = <&mdss_dsi_suspend &mdss_te_suspend>;
+ qcom,platform-te-gpio = <&tlmm 10 0>;
+
+ qcom,dsi-panel = <&dsi_dual_nt35597_cmd>;
+ vddio-supply = <&pm8994_l14>;
+ lab-supply = <&lab_regulator>;
+ ibb-supply = <&ibb_regulator>;
+ };
+
+ dsi_dual_nt35597_video_1: qcom,dsi-display@6 {
+ compatible = "qcom,dsi-display";
+ label = "dsi_dual_nt35597_video";
+ qcom,display-type = "primary";
+
+ qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
+ qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
+ clocks = <&clock_mmss clk_ext_byte0_clk_src>,
+ <&clock_mmss clk_ext_pclk0_clk_src>;
+ clock-names = "src_byte_clk", "src_pixel_clk";
+
+ pinctrl-names = "panel_active", "panel_suspend";
+ pinctrl-0 = <&mdss_dsi_active &mdss_te_active>;
+ pinctrl-1 = <&mdss_dsi_suspend &mdss_te_suspend>;
+ qcom,platform-te-gpio = <&tlmm 10 0>;
+
+ qcom,dsi-panel = <&dsi_dual_nt35597_video>;
+ vddio-supply = <&pm8994_l14>;
+ lab-supply = <&lab_regulator>;
+ ibb-supply = <&ibb_regulator>;
+ };
+};
+
+&mdss_mdp {
+ connectors = <&dsi_dual_sharp_video_1
+ &sde_wb>;
+};
+
+&dsi_dual_sharp_video {
+ qcom,mdss-dsi-panel-timings-8996 = [23 20 06 09 05 03 04 a0
+ 23 20 06 09 05 03 04 a0
+ 23 20 06 09 05 03 04 a0
+ 23 20 06 09 05 03 04 a0
+ 23 2e 06 08 05 03 04 a0];
+};
+
+&dsi_dual_jdi_cmd {
+ qcom,mdss-dsi-panel-timings-8996 = [22 1e 06 08 04 03 04 a0
+ 22 1e 06 08 04 03 04 a0
+ 22 1e 06 08 04 03 04 a0
+ 22 1e 06 08 04 03 04 a0
+ 22 2c 05 08 04 03 04 a0];
+ qcom,esd-check-enabled;
+ qcom,mdss-dsi-panel-status-check-mode = "te_signal_check";
+};
+
+&dsi_dual_jdi_video {
+ qcom,mdss-dsi-panel-timings-8996 = [22 1e 06 08 04 03 04 a0
+ 22 1e 06 08 04 03 04 a0
+ 22 1e 06 08 04 03 04 a0
+ 22 1e 06 08 04 03 04 a0
+ 22 2c 05 08 04 03 04 a0];
+};
+
+&dsi_dual_nt35597_video {
+ qcom,mdss-dsi-panel-timings-8996 = [23 1e 07 08 05 03 04 a0
+ 23 1e 07 08 05 03 04 a0
+ 23 1e 07 08 05 03 04 a0
+ 23 1e 07 08 05 03 04 a0
+ 23 18 07 08 04 03 04 a0];
+};
+
+&dsi_dual_nt35597_cmd {
+ qcom,mdss-dsi-panel-timings-8996 = [23 1e 07 08 05 03 04 a0
+ 23 1e 07 08 05 03 04 a0
+ 23 1e 07 08 05 03 04 a0
+ 23 1e 07 08 05 03 04 a0
+ 23 18 07 08 04 03 04 a0];
+};
+
+&dsi_nt35597_dsc_video {
+ qcom,mdss-dsi-panel-timings-8996 = [20 1d 05 07 03 03 04 a0
+ 20 1d 05 07 03 03 04 a0
+ 20 1d 05 07 03 03 04 a0
+ 20 1d 05 07 03 03 04 a0
+ 20 12 05 06 03 13 04 a0];
+};
+
+&dsi_nt35597_dsc_cmd {
+ qcom,mdss-dsi-panel-timings-8996 = [20 1d 05 07 03 03 04 a0
+ 20 1d 05 07 03 03 04 a0
+ 20 1d 05 07 03 03 04 a0
+ 20 1d 05 07 03 03 04 a0
+ 20 12 05 06 03 13 04 a0];
+};
+
+&dsi_dual_jdi_4k_nofbc_video {
+ qcom,mdss-dsi-panel-timings-8996 = [
+ 2c 27 0e 10 0a 03 04 a0
+ 2c 27 0e 10 0a 03 04 a0
+ 2c 27 0e 10 0a 03 04 a0
+ 2c 27 0e 10 0a 03 04 a0
+ 2c 32 0e 0f 0a 03 04 a0];
+};
+
+&dsi_hx8379a_fwvga_truly_vid {
+ qcom,mdss-dsi-panel-timings-8996 = [23 20 06 09 05 03 04 a0
+ 23 20 06 09 05 03 04 a0
+ 23 20 06 09 05 03 04 a0
+ 23 20 06 09 05 03 04 a0
+ 23 2e 06 08 05 03 04 a0];
+};
+
+&dsi_r69007_wqxga_cmd {
+ qcom,mdss-dsi-panel-timings-8996 = [23 1f 07 09 05 03 04 a0
+ 23 1f 07 09 05 03 04 a0
+ 23 1f 07 09 05 03 04 a0
+ 23 1f 07 09 05 03 04 a0
+ 23 19 08 08 05 03 04 a0];
+};
diff --git a/arch/arm/boot/dts/qcom/msm8996-sde.dtsi b/arch/arm/boot/dts/qcom/msm8996-sde.dtsi
new file mode 100644
index 000000000000..8aebac3b0e22
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/msm8996-sde.dtsi
@@ -0,0 +1,546 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+ mdss_mdp: qcom,mdss_mdp@900000 {
+ compatible = "qcom,sde-kms";
+ reg = <0x00900000 0x90000>,
+ <0x009b0000 0x1040>,
+ <0x009b8000 0x1040>;
+ reg-names = "mdp_phys",
+ "vbif_phys",
+ "vbif_nrt_phys";
+
+ /* clock and supply entries */
+ clocks = <&clock_mmss clk_mdss_ahb_clk>,
+ <&clock_mmss clk_mdss_axi_clk>,
+ <&clock_mmss clk_mdp_clk_src>,
+ <&clock_mmss clk_mdss_mdp_vote_clk>,
+ <&clock_mmss clk_smmu_mdp_axi_clk>,
+ <&clock_mmss clk_smmu_mdp_ahb_clk>,
+ <&clock_mmss clk_smmu_rot_axi_clk>,
+ <&clock_mmss clk_smmu_rot_ahb_clk>,
+ <&clock_mmss clk_mmagic_mdss_axi_clk>,
+ <&clock_mmss clk_mdss_vsync_clk>;
+ clock-names = "iface_clk",
+ "bus_clk",
+ "core_clk_src",
+ "core_clk",
+ "iommu_mdp_axi_clk",
+ "iommu_mdp_ahb_clk",
+ "iommu_rot_axi_clk",
+ "iommu_rot_ahb_clk",
+ "mmagic_clk",
+ "vsync_clk";
+ clock-rate = <0 0 412500000 412500000 0 0 0 0>;
+ clock-max-rate = <0 0 412500000 412500000 0 0 0 0>;
+
+ /* interrupt config */
+ interrupt-parent = <&intc>;
+ interrupts = <0 83 0>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ iommus = <&mdp_smmu 0>;
+
+ /* hw blocks */
+ qcom,sde-off = <0x1000>;
+ qcom,sde-ctl-off = <0x2000 0x2200 0x2400
+ 0x2600 0x2800>;
+ qcom,sde-mixer-off = <0x45000 0x46000 0x47000
+ 0x48000 0x49000 0x4a000>;
+ qcom,sde-dspp-off = <0x55000 0x57000>;
+ qcom,sde-dspp-ad-off = <0x24000 0x22800>;
+ qcom,sde-dspp-ad-version = <0x00030000>;
+ qcom,sde-wb-off = <0x66000>;
+ qcom,sde-wb-id = <2>;
+ qcom,sde-wb-xin-id = <6>;
+ qcom,sde-wb-clk-ctrl = <0x2bc 16>;
+ qcom,sde-intf-off = <0x6b000 0x6b800
+ 0x6c000 0x6c800>;
+ qcom,sde-intf-type = "none", "dsi", "dsi", "hdmi";
+ qcom,sde-pp-off = <0x71000 0x71800
+ 0x72000 0x72800 0x73000>;
+ qcom,sde-pp-slave = <0x0 0x0 0x0 0x0 0x1>;
+ qcom,sde-te2-off = <0x2000 0x2000 0x0 0x0 0x0>;
+ qcom,sde-cdm-off = <0x7a200>;
+ qcom,sde-dsc-off = <0x10000 0x10000 0x0 0x0 0x0>;
+ qcom,sde-intf-max-prefetch-lines = <0x15 0x15 0x15 0x15>;
+
+ qcom,sde-sspp-type = "vig", "vig", "vig",
+ "vig", "rgb", "rgb",
+ "rgb", "rgb", "dma",
+ "dma", "cursor", "cursor";
+
+ qcom,sde-sspp-off = <0x5000 0x7000 0x9000
+ 0xb000 0x15000 0x17000
+ 0x19000 0x1b000 0x25000
+ 0x27000 0x35000 0x37000>;
+
+ qcom,sde-sspp-xin-id = <0 4 8
+ 12 1 5
+ 9 13 2
+ 10 7 7>;
+
+ /* offsets are relative to "mdp_phys + qcom,sde-off */
+ qcom,sde-sspp-clk-ctrl = <0x2ac 0>, <0x2b4 0>, <0x2bc 0>,
+ <0x2c4 0>, <0x2ac 4>, <0x2b4 4>, <0x2bc 4>,
+ <0x2c4 4>, <0x2ac 8>, <0x2b4 8>, <0x3a8 16>,
+ <0x3b0 16>;
+ qcom,sde-qseed-type = "qseedv2";
+ qcom,sde-csc-type = "csc";
+ qcom,sde-mixer-linewidth = <2560>;
+ qcom,sde-sspp-linewidth = <2560>;
+ qcom,sde-mixer-blendstages = <0x7>;
+ qcom,sde-highest-bank-bit = <0x2>;
+ qcom,sde-panic-per-pipe;
+ qcom,sde-has-cdp;
+ qcom,sde-has-src-split;
+ qcom,sde-max-bw-low-kbps = <9600000>;
+ qcom,sde-max-bw-high-kbps = <9600000>;
+ qcom,sde-dram-channels = <2>;
+ qcom,sde-num-nrt-paths = <1>;
+
+ qcom,sde-sspp-danger-lut = <0x000f 0xffff 0x0000>;
+ qcom,sde-sspp-safe-lut = <0xfffc 0xff00 0xffff>;
+
+ qcom,sde-vbif-off = <0 0>;
+ qcom,sde-vbif-size = <0x1040>;
+ qcom,sde-vbif-id = <0 1>;
+ qcom,sde-vbif-default-ot-rd-limit = <32>;
+ qcom,sde-vbif-default-ot-wr-limit = <16>;
+ qcom,sde-vbif-dynamic-ot-rd-limit = <62208000 2>,
+ <124416000 4>, <248832000 16>;
+ qcom,sde-vbif-dynamic-ot-wr-limit = <62208000 2>,
+ <124416000 4>, <248832000 16>;
+
+ mmagic-supply = <&gdsc_mmagic_mdss>;
+ vdd-supply = <&gdsc_mdss>;
+
+ qcom,sde-sspp-vig-blocks {
+ qcom,sde-vig-csc-off = <0x320>;
+ qcom,sde-vig-qseed-off = <0x200>;
+ /* Offset from vig top, version of HSIC */
+ qcom,sde-vig-hsic = <0x200 0x00010007>;
+ qcom,sde-vig-memcolor = <0x200 0x00010007>;
+ qcom,sde-vig-pcc = <0x1780 0x00010007>;
+ };
+
+ qcom,sde-sspp-rgb-blocks {
+ qcom,sde-rgb-scaler-off = <0x200>;
+ qcom,sde-rgb-pcc = <0x380 0x00010007>;
+ };
+
+ qcom,sde-dspp-blocks {
+ qcom,sde-dspp-pcc = <0x1700 0x00010007>;
+ qcom,sde-dspp-gc = <0x17c0 0x00010007>;
+ qcom,sde-dspp-hsic = <0x0 0x00010007>;
+ qcom,sde-dspp-memcolor = <0x0 0x00010007>;
+ qcom,sde-dspp-sixzone = <0x0 0x00010007>;
+ qcom,sde-dspp-gamut = <0x1600 0x00010007>;
+ qcom,sde-dspp-dither = <0x0 0x00010007>;
+ qcom,sde-dspp-hist = <0x0 0x00010007>;
+ qcom,sde-dspp-vlut = <0x0 0x00010007>;
+ };
+
+ qcom,sde-mixer-blocks {
+ qcom,sde-mixer-gc = <0x3c0 0x00010007>;
+ };
+
+ qcom,platform-supply-entries {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,platform-supply-entry@0 {
+ reg = <0>;
+ qcom,supply-name = "mmagic";
+ qcom,supply-min-voltage = <0>;
+ qcom,supply-max-voltage = <0>;
+ qcom,supply-enable-load = <0>;
+ qcom,supply-disable-load = <0>;
+ };
+
+ qcom,platform-supply-entry@1 {
+ reg = <1>;
+ qcom,supply-name = "vdd";
+ qcom,supply-min-voltage = <0>;
+ qcom,supply-max-voltage = <0>;
+ qcom,supply-enable-load = <0>;
+ qcom,supply-disable-load = <0>;
+ };
+ };
+
+ smmu_mdp_unsec: qcom,smmu_mdp_unsec_cb {
+ compatible = "qcom,smmu_mdp_unsec";
+ iommus = <&mdp_smmu 0>;
+ };
+
+ smmu_rot_unsec: qcom,smmu_rot_unsec_cb {
+ compatible = "qcom,smmu_rot_unsec";
+ iommus = <&rot_smmu 0>;
+ };
+
+ smmu_mdp_sec: qcom,smmu_mdp_sec_cb {
+ compatible = "qcom,smmu_mdp_sec";
+ iommus = <&mdp_smmu 1>;
+ };
+
+ smmu_rot_sec: qcom,smmu_rot_sec_cb {
+ compatible = "qcom,smmu_rot_sec";
+ iommus = <&rot_smmu 1>;
+ };
+
+ /* data and reg bus scale settings */
+ qcom,sde-data-bus {
+ qcom,msm-bus,name = "mdss_sde";
+ qcom,msm-bus,num-cases = <3>;
+ qcom,msm-bus,num-paths = <3>;
+ qcom,msm-bus,vectors-KBps =
+ <22 512 0 0>, <23 512 0 0>, <25 512 0 0>,
+ <22 512 0 6400000>, <23 512 0 6400000>,
+ <25 512 0 6400000>,
+ <22 512 0 6400000>, <23 512 0 6400000>,
+ <25 512 0 6400000>;
+ };
+
+ qcom,sde-reg-bus {
+ qcom,msm-bus,name = "mdss_reg";
+ qcom,msm-bus,num-cases = <4>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,active-only;
+ qcom,msm-bus,vectors-KBps =
+ <1 590 0 0>,
+ <1 590 0 76800>,
+ <1 590 0 160000>,
+ <1 590 0 320000>;
+ };
+ };
+
+ mdss_dsi0: qcom,mdss_dsi_ctrl0@994000 {
+ compatible = "qcom,dsi-ctrl-hw-v1.4";
+ label = "dsi-ctrl-0";
+ cell-index = <0>;
+ reg = <0x994000 0x400>,
+ <0x828000 0x108>;
+ reg-names = "dsi_ctrl", "mmss_misc";
+
+ gdsc-supply = <&gdsc_mdss>;
+ vdda-supply = <&pm8994_l2>;
+ vcca-supply = <&pm8994_l28>;
+
+ clocks = <&clock_mmss clk_mdss_mdp_vote_clk>,
+ <&clock_mmss clk_mdss_ahb_clk>,
+ <&clock_mmss clk_mmss_misc_ahb_clk>,
+ <&clock_mmss clk_mdss_axi_clk>,
+ <&clock_mmss clk_mdss_byte0_clk>,
+ <&clock_mmss clk_mdss_pclk0_clk>,
+ <&clock_mmss clk_mdss_esc0_clk>,
+ <&clock_mmss clk_byte0_clk_src>,
+ <&clock_mmss clk_pclk0_clk_src>;
+
+ clock-names = "mdp_core_clk", "iface_clk",
+ "core_mmss_clk", "bus_clk",
+ "byte_clk", "pixel_clk", "core_clk",
+ "byte_clk_rcg", "pixel_clk_rcg";
+
+ /* axi bus scale settings */
+ qcom,msm-bus,name = "mdss_dsi0";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <22 512 0 0>,
+ <22 512 0 1000>;
+
+ interrupt-parent = <&mdss_mdp>;
+ interrupts = <4 0>;
+ qcom,core-supply-entries {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,core-supply-entry@0 {
+ reg = <0>;
+ qcom,supply-name = "gdsc";
+ qcom,supply-min-voltage = <0>;
+ qcom,supply-max-voltage = <0>;
+ qcom,supply-enable-load = <0>;
+ qcom,supply-disable-load = <0>;
+ };
+ };
+
+ qcom,ctrl-supply-entries {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,ctrl-supply-entry@0 {
+ reg = <0>;
+ qcom,supply-name = "vcca";
+ qcom,supply-min-voltage = <925000>;
+ qcom,supply-max-voltage = <925000>;
+ qcom,supply-enable-load = <17000>;
+ qcom,supply-disable-load = <32>;
+ };
+
+ qcom,ctrl-supply-entry@1 {
+ reg = <0>;
+ qcom,supply-name = "vdda";
+ qcom,supply-min-voltage = <1250000>;
+ qcom,supply-max-voltage = <1250000>;
+ qcom,supply-enable-load = <18160>;
+ qcom,supply-disable-load = <1>;
+ };
+
+ };
+ };
+
+ mdss_dsi1: qcom,mdss_dsi_ctrl1@996000 {
+ compatible = "qcom,dsi-ctrl-hw-v1.4";
+ label = "dsi-ctrl-1";
+ cell-index = <1>;
+ reg = <0x996000 0x400>,
+ <0x828000 0x108>;
+ reg-names = "dsi_ctrl", "mmss_misc";
+
+ gdsc-supply = <&gdsc_mdss>;
+ vdda-supply = <&pm8994_l2>;
+ vcca-supply = <&pm8994_l28>;
+
+ clocks = <&clock_mmss clk_mdss_mdp_vote_clk>,
+ <&clock_mmss clk_mdss_ahb_clk>,
+ <&clock_mmss clk_mmss_misc_ahb_clk>,
+ <&clock_mmss clk_mdss_axi_clk>,
+ <&clock_mmss clk_mdss_byte1_clk>,
+ <&clock_mmss clk_mdss_pclk1_clk>,
+ <&clock_mmss clk_mdss_esc1_clk>,
+ <&clock_mmss clk_byte1_clk_src>,
+ <&clock_mmss clk_pclk1_clk_src>;
+ clock-names = "mdp_core_clk", "iface_clk",
+ "core_mmss_clk", "bus_clk",
+ "byte_clk", "pixel_clk", "core_clk",
+ "byte_clk_rcg", "pixel_clk_rcg";
+
+ /* axi bus scale settings */
+ qcom,msm-bus,name = "mdss_dsi1";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <22 512 0 0>,
+ <22 512 0 1000>;
+
+ interrupt-parent = <&mdss_mdp>;
+ interrupts = <5 0>;
+ qcom,core-supply-entries {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,core-supply-entry@0 {
+ reg = <0>;
+ qcom,supply-name = "gdsc";
+ qcom,supply-min-voltage = <0>;
+ qcom,supply-max-voltage = <0>;
+ qcom,supply-enable-load = <0>;
+ qcom,supply-disable-load = <0>;
+ };
+ };
+
+ qcom,ctrl-supply-entries {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,ctrl-supply-entry@0 {
+ reg = <0>;
+ qcom,supply-name = "vdda";
+ qcom,supply-min-voltage = <1250000>;
+ qcom,supply-max-voltage = <1250000>;
+ qcom,supply-enable-load = <18160>;
+ qcom,supply-disable-load = <1>;
+ };
+
+ qcom,ctrl-supply-entry@1 {
+ reg = <0>;
+ qcom,supply-name = "vcca";
+ qcom,supply-min-voltage = <925000>;
+ qcom,supply-max-voltage = <925000>;
+ qcom,supply-enable-load = <18050>;
+ qcom,supply-disable-load = <32>;
+ };
+ };
+ };
+
+ mdss_dsi_phy0: qcom,mdss_dsi_phy0@994400 {
+ compatible = "qcom,dsi-phy-v4.0";
+ label = "dsi-phy-0";
+ cell-index = <0>;
+ reg = <0x994400 0x588>;
+ reg-names = "dsi_phy";
+
+ gdsc-supply = <&gdsc_mdss>;
+ vdda-supply = <&pm8994_l2>;
+
+ clocks = <&clock_mmss clk_mdss_mdp_vote_clk>,
+ <&clock_mmss clk_mdss_ahb_clk>,
+ <&clock_mmss clk_mmss_misc_ahb_clk>,
+ <&clock_mmss clk_mdss_axi_clk>;
+ clock-names = "mdp_core_clk", "iface_clk",
+ "core_mmss_clk", "bus_clk";
+
+ qcom,platform-strength-ctrl = [ff 06
+ ff 06
+ ff 06
+ ff 06
+ ff 00];
+ qcom,platform-regulator-settings = [1d
+ 1d 1d 1d 1d];
+ qcom,platform-lane-config = [00 00 10 0f
+ 00 00 10 0f
+ 00 00 10 0f
+ 00 00 10 0f
+ 00 00 10 8f];
+
+ qcom,core-supply-entries {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,core-supply-entry@0 {
+ reg = <0>;
+ qcom,supply-name = "gdsc";
+ qcom,supply-min-voltage = <0>;
+ qcom,supply-max-voltage = <0>;
+ qcom,supply-enable-load = <0>;
+ qcom,supply-disable-load = <0>;
+ };
+ };
+
+ qcom,phy-supply-entries {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,phy-supply-entry@0 {
+ reg = <0>;
+ qcom,supply-name = "vdda";
+ qcom,supply-min-voltage = <1250000>;
+ qcom,supply-max-voltage = <1250000>;
+ qcom,supply-enable-load = <2500>;
+ qcom,supply-disable-load = <1>;
+ };
+ };
+ };
+
+ mdss_dsi_phy1: qcom,mdss_dsi_phy1@996400 {
+ compatible = "qcom,dsi-phy-v4.0";
+ label = "dsi-phy-1";
+ cell-index = <1>;
+ reg = <0x996400 0x588>;
+ reg-names = "dsi_phy";
+
+ gdsc-supply = <&gdsc_mdss>;
+ vdda-supply = <&pm8994_l2>;
+
+ clocks = <&clock_mmss clk_mdss_mdp_vote_clk>,
+ <&clock_mmss clk_mdss_ahb_clk>,
+ <&clock_mmss clk_mmss_misc_ahb_clk>,
+ <&clock_mmss clk_mdss_axi_clk>;
+ clock-names = "mdp_core_clk", "iface_clk",
+ "core_mmss_clk", "bus_clk";
+
+ qcom,platform-strength-ctrl = [ff 06
+ ff 06
+ ff 06
+ ff 06
+ ff 00];
+ qcom,platform-regulator-settings = [1d
+ 1d 1d 1d 1d];
+ qcom,platform-lane-config = [00 00 10 0f
+ 00 00 10 0f
+ 00 00 10 0f
+ 00 00 10 0f
+ 00 00 10 8f];
+
+ qcom,core-supply-entries {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,core-supply-entry@0 {
+ reg = <0>;
+ qcom,supply-name = "gdsc";
+ qcom,supply-min-voltage = <0>;
+ qcom,supply-max-voltage = <0>;
+ qcom,supply-enable-load = <0>;
+ qcom,supply-disable-load = <0>;
+ };
+ };
+
+ qcom,phy-supply-entries {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,phy-supply-entry@0 {
+ reg = <0>;
+ qcom,supply-name = "vdda";
+ qcom,supply-min-voltage = <1250000>;
+ qcom,supply-max-voltage = <1250000>;
+ qcom,supply-enable-load = <2500>;
+ qcom,supply-disable-load = <1>;
+ };
+ };
+ };
+
+ mdss_hdmi: qcom,hdmi_tx@9a0000 {
+ compatible = "qcom,hdmi-tx-8996";
+
+ reg = <0x009a0000 0x50c>,
+ <0x00070000 0x6158>,
+ <0x009e0000 0xfff>;
+ reg-names = "core_physical",
+ "qfprom_physical",
+ "hdcp_physical";
+ clocks = <&clock_mmss clk_mdss_mdp_vote_clk>,
+ <&clock_mmss clk_mdss_ahb_clk>,
+ <&clock_mmss clk_mdss_hdmi_clk>,
+ <&clock_mmss clk_mdss_hdmi_ahb_clk>,
+ <&clock_mmss clk_mdss_extpclk_clk>;
+ clock-names =
+ "mdp_core_clk",
+ "iface_clk",
+ "core_clk",
+ "alt_iface_clk",
+ "extp_clk";
+ interrupt-parent = <&mdss_mdp>;
+ interrupts = <8 0>;
+ hpd-gdsc-supply = <&gdsc_mdss>;
+ qcom,hdmi-tx-hpd-gpio = <&pm8994_mpps 4 0>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&mdss_hdmi_hpd_active
+ &mdss_hdmi_ddc_active
+ &mdss_hdmi_cec_active>;
+ pinctrl-1 = <&mdss_hdmi_hpd_suspend
+ &mdss_hdmi_ddc_suspend
+ &mdss_hdmi_cec_suspend>;
+
+ hdmi_audio: qcom,msm-hdmi-audio-rx {
+ compatible = "qcom,msm-hdmi-audio-codec-rx";
+ };
+ };
+};
+
+/* dummy nodes for compatibility with 8996 mdss dtsi */
+&soc {
+ mdss_dsi: qcom,mdss_dsi_dummy {
+ /* dummy node for backward compatibility */
+ };
+
+ mdss_hdmi_tx: qcom,mdss_hdmi_tx_dummy {
+ /* dummy node for backward compatibility */
+ };
+
+ mdss_fb2: qcom,mdss_fb2_dummy {
+ /* dummy node for backward compatibility */
+ };
+};
diff --git a/arch/arm/boot/dts/qcom/msm8996-v2.dtsi b/arch/arm/boot/dts/qcom/msm8996-v2.dtsi
index d3c262f42ace..9725bc3ee530 100644
--- a/arch/arm/boot/dts/qcom/msm8996-v2.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-v2.dtsi
@@ -480,7 +480,11 @@
gdsc-venus-supply = <&gdsc_venus>;
};
-&mdss_dsi {
+&mdss_hdmi {
+ hpd-gdsc-venus-supply = <&gdsc_venus>;
+};
+
+&mdss_dsi0 {
gdsc-venus-supply = <&gdsc_venus>;
qcom,core-supply-entries {
#address-cells = <1>;
diff --git a/arch/arm/boot/dts/qcom/msm8996.dtsi b/arch/arm/boot/dts/qcom/msm8996.dtsi
index f69c388fbbef..49eafeaa5d70 100644
--- a/arch/arm/boot/dts/qcom/msm8996.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996.dtsi
@@ -237,7 +237,7 @@
};
#include "msm8996-ion.dtsi"
-#include "msm8996-mdss.dtsi"
+#include "msm8996-sde.dtsi"
#include "msm8996-mdss-pll.dtsi"
#include "msm8996-smp2p.dtsi"
#include "msm8996-ipcrouter.dtsi"
diff --git a/arch/arm/boot/dts/qcom/msm8998-camera.dtsi b/arch/arm/boot/dts/qcom/msm8998-camera.dtsi
index f8dae210bc4e..c35ea886408d 100644
--- a/arch/arm/boot/dts/qcom/msm8998-camera.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998-camera.dtsi
@@ -51,8 +51,8 @@
"csi_src_clk", "csi_clk", "cphy_csid_clk",
"csiphy_timer_src_clk", "csiphy_timer_clk",
"camss_ispif_ahb_clk", "csiphy_clk_src", "csiphy_clk";
- qcom,clock-rates = <0 0 0 0 0 0 256000000 0 0 200000000 0
- 0 256000000 0>;
+ qcom,clock-rates = <0 0 0 0 0 0 274290000 0 0 200000000 0
+ 0 274290000 0>;
status = "ok";
};
@@ -86,8 +86,8 @@
"csi_src_clk", "csi_clk", "cphy_csid_clk",
"csiphy_timer_src_clk", "csiphy_timer_clk",
"camss_ispif_ahb_clk", "csiphy_clk_src", "csiphy_clk";
- qcom,clock-rates = <0 0 0 0 0 0 256000000 0 0 200000000 0
- 0 256000000 0>;
+ qcom,clock-rates = <0 0 0 0 0 0 274290000 0 0 200000000 0
+ 0 274290000 0>;
status = "ok";
};
@@ -121,8 +121,8 @@
"csi_src_clk", "csi_clk", "cphy_csid_clk",
"csiphy_timer_src_clk", "csiphy_timer_clk",
"camss_ispif_ahb_clk", "csiphy_clk_src", "csiphy_clk";
- qcom,clock-rates = <0 0 0 0 0 0 256000000 0 0 200000000 0
- 0 256000000 0>;
+ qcom,clock-rates = <0 0 0 0 0 0 274290000 0 0 200000000 0
+ 0 274290000 0>;
status = "ok";
};
@@ -159,7 +159,7 @@
"ispif_ahb_clk", "csi_src_clk", "csiphy_clk_src",
"csi_clk", "csi_ahb_clk", "csi_rdi_clk",
"csi_pix_clk", "cphy_csid_clk";
- qcom,clock-rates = <0 0 0 0 0 0 0 256000000 256000000
+ qcom,clock-rates = <0 0 0 0 0 0 0 274290000 274290000
0 0 0 0 0>;
status = "ok";
};
@@ -197,7 +197,7 @@
"ispif_ahb_clk", "csi_src_clk", "csiphy_clk_src",
"csi_clk", "csi_ahb_clk", "csi_rdi_clk",
"csi_pix_clk", "cphy_csid_clk";
- qcom,clock-rates = <0 0 0 0 0 0 0 256000000 256000000
+ qcom,clock-rates = <0 0 0 0 0 0 0 274290000 274290000
0 0 0 0 0>;
status = "ok";
};
@@ -235,7 +235,7 @@
"ispif_ahb_clk", "csi_src_clk", "csiphy_clk_src",
"csi_clk", "csi_ahb_clk", "csi_rdi_clk",
"csi_pix_clk", "cphy_csid_clk";
- qcom,clock-rates = <0 0 0 0 0 0 0 256000000 256000000
+ qcom,clock-rates = <0 0 0 0 0 0 0 274290000 274290000
0 0 0 0 0>;
status = "ok";
};
@@ -273,7 +273,7 @@
"ispif_ahb_clk", "csi_src_clk", "csiphy_clk_src",
"csi_clk", "csi_ahb_clk", "csi_rdi_clk",
"csi_pix_clk", "cphy_csid_clk";
- qcom,clock-rates = <0 0 0 0 0 0 0 256000000 256000000
+ qcom,clock-rates = <0 0 0 0 0 0 0 274290000 274290000
0 0 0 0 0>;
status = "ok";
};
diff --git a/arch/arm/boot/dts/qcom/msm8998-cdp.dtsi b/arch/arm/boot/dts/qcom/msm8998-cdp.dtsi
index dff374962e02..f91b29bca493 100644
--- a/arch/arm/boot/dts/qcom/msm8998-cdp.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998-cdp.dtsi
@@ -331,7 +331,7 @@
qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
};
-&dsi_jdi_1080_vid {
+&dsi_jdi_1080p_video {
qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
qcom,mdss-dsi-bl-min-level = <1>;
qcom,mdss-dsi-bl-max-level = <4095>;
diff --git a/arch/arm/boot/dts/qcom/msm8998-interposer-sdm660-cdp.dtsi b/arch/arm/boot/dts/qcom/msm8998-interposer-sdm660-cdp.dtsi
index 4bf3dc08ab3e..40bb8727cc30 100644
--- a/arch/arm/boot/dts/qcom/msm8998-interposer-sdm660-cdp.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998-interposer-sdm660-cdp.dtsi
@@ -311,7 +311,7 @@
qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
};
-&dsi_jdi_1080_vid {
+&dsi_jdi_1080p_video {
qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
qcom,mdss-dsi-bl-min-level = <1>;
qcom,mdss-dsi-bl-max-level = <4095>;
diff --git a/arch/arm/boot/dts/qcom/msm8998-interposer-sdm660-mtp.dtsi b/arch/arm/boot/dts/qcom/msm8998-interposer-sdm660-mtp.dtsi
index a9306475e24e..d652b456cb1c 100644
--- a/arch/arm/boot/dts/qcom/msm8998-interposer-sdm660-mtp.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998-interposer-sdm660-mtp.dtsi
@@ -336,7 +336,7 @@
qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
};
-&dsi_jdi_1080_vid {
+&dsi_jdi_1080p_video {
qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
qcom,mdss-dsi-bl-min-level = <1>;
qcom,mdss-dsi-bl-max-level = <4095>;
diff --git a/arch/arm/boot/dts/qcom/msm8998-mdss-panels.dtsi b/arch/arm/boot/dts/qcom/msm8998-mdss-panels.dtsi
index bfe29ff56413..d0d13332595a 100644
--- a/arch/arm/boot/dts/qcom/msm8998-mdss-panels.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998-mdss-panels.dtsi
@@ -174,7 +174,7 @@
qcom,mdss-dsi-t-clk-pre = <0x22>;
};
-&dsi_jdi_1080_vid {
+&dsi_jdi_1080p_video {
qcom,mdss-dsi-panel-timings = [00 1a 06 06 0a 11 05 07 05 03 04 00];
qcom,mdss-dsi-t-clk-post = <0x07>;
qcom,mdss-dsi-t-clk-pre = <0x28>;
diff --git a/arch/arm/boot/dts/qcom/msm8998-mdss.dtsi b/arch/arm/boot/dts/qcom/msm8998-mdss.dtsi
index 845c96eb5ef4..3f13cdc34892 100644
--- a/arch/arm/boot/dts/qcom/msm8998-mdss.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998-mdss.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -126,7 +126,7 @@
<0x012ac 0xc0000ccc>,
<0x012b4 0xc0000ccc>,
<0x012bc 0x00cccccc>,
- <0x012c4 0x000000cc>,
+ <0x012c4 0x0000cccc>,
<0x013a8 0x0cccc0c0>,
<0x013b0 0xccccc0c0>,
<0x013b8 0xcccc0000>,
@@ -500,6 +500,9 @@
qcom,msm_ext_disp = <&msm_ext_disp>;
+ qcom,aux-cfg-settings = [00 13 00 10 0a 26 0a 03 8b 03];
+ qcom,logical2physical-lane-map = [02 03 01 00];
+
qcom,core-supply-entries {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm/boot/dts/qcom/msm8998-mtp.dtsi b/arch/arm/boot/dts/qcom/msm8998-mtp.dtsi
index a0e56f630eb7..4aadd4802b51 100644
--- a/arch/arm/boot/dts/qcom/msm8998-mtp.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998-mtp.dtsi
@@ -364,7 +364,7 @@
qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
};
-&dsi_jdi_1080_vid {
+&dsi_jdi_1080p_video {
qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
qcom,mdss-dsi-bl-min-level = <1>;
qcom,mdss-dsi-bl-max-level = <4095>;
diff --git a/arch/arm/boot/dts/qcom/msm8998-qrd.dtsi b/arch/arm/boot/dts/qcom/msm8998-qrd.dtsi
index af533bbfbc83..fb69a793a680 100644
--- a/arch/arm/boot/dts/qcom/msm8998-qrd.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998-qrd.dtsi
@@ -352,7 +352,7 @@
qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
};
-&dsi_jdi_1080_vid {
+&dsi_jdi_1080p_video {
qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
qcom,mdss-dsi-bl-min-level = <1>;
qcom,mdss-dsi-bl-max-level = <4095>;
diff --git a/arch/arm/boot/dts/qcom/msm8998-v2-camera.dtsi b/arch/arm/boot/dts/qcom/msm8998-v2-camera.dtsi
index fdc452a47a46..93da11e66799 100644
--- a/arch/arm/boot/dts/qcom/msm8998-v2-camera.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998-v2-camera.dtsi
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -42,8 +42,8 @@
"csi_src_clk", "csi_clk", "cphy_csid_clk",
"csiphy_timer_src_clk", "csiphy_timer_clk",
"camss_ispif_ahb_clk", "csiphy_clk_src", "csiphy_clk";
- qcom,clock-rates = <0 0 0 0 0 0 256000000 0 0 200000000 0
- 0 256000000 0>;
+ qcom,clock-rates = <0 0 0 0 0 0 274290000 0 0 200000000 0
+ 0 274290000 0>;
status = "ok";
};
@@ -77,8 +77,8 @@
"csi_src_clk", "csi_clk", "cphy_csid_clk",
"csiphy_timer_src_clk", "csiphy_timer_clk",
"camss_ispif_ahb_clk", "csiphy_clk_src", "csiphy_clk";
- qcom,clock-rates = <0 0 0 0 0 0 256000000 0 0 200000000 0
- 0 256000000 0>;
+ qcom,clock-rates = <0 0 0 0 0 0 274290000 0 0 200000000 0
+ 0 274290000 0>;
status = "ok";
};
@@ -112,8 +112,8 @@
"csi_src_clk", "csi_clk", "cphy_csid_clk",
"csiphy_timer_src_clk", "csiphy_timer_clk",
"camss_ispif_ahb_clk", "csiphy_clk_src", "csiphy_clk";
- qcom,clock-rates = <0 0 0 0 0 0 256000000 0 0 200000000 0
- 0 256000000 0>;
+ qcom,clock-rates = <0 0 0 0 0 0 274290000 0 0 200000000 0
+ 0 274290000 0>;
status = "ok";
};
diff --git a/arch/arm/boot/dts/qcom/msm8998-v2.dtsi b/arch/arm/boot/dts/qcom/msm8998-v2.dtsi
index 1d8fe225c9af..b6ddd549efe5 100644
--- a/arch/arm/boot/dts/qcom/msm8998-v2.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998-v2.dtsi
@@ -44,6 +44,7 @@
qcom,acdextint0-val = <0x2cf9ae8 0x2cf9ae8>;
qcom,acdextint1-val = <0x2cf9afe 0x2cf9afe>;
qcom,acdautoxfer-val = <0x00000015 0x00000015>;
+ qcom,pwrcl-apcs-mem-acc-threshold-voltage = <852000>;
qcom,perfcl-apcs-mem-acc-threshold-voltage = <852000>;
qcom,apm-threshold-voltage = <800000>;
@@ -452,6 +453,8 @@
qcom,cpr-aging-ref-voltage = <1056000>;
qcom,apm-threshold-voltage = <800000>;
qcom,apm-hysteresis-voltage = <0>;
+ qcom,mem-acc-threshold-voltage = <852000>;
+ qcom,mem-acc-crossover-voltage = <852000>;
};
&apc0_pwrcl_vreg {
diff --git a/arch/arm/boot/dts/qcom/msm8998.dtsi b/arch/arm/boot/dts/qcom/msm8998.dtsi
index 30679791006b..2e41f3a3567d 100644
--- a/arch/arm/boot/dts/qcom/msm8998.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998.dtsi
@@ -3041,7 +3041,25 @@
};
- qcom,icnss@18800000 {
+ msm_ath10k_wlan: qcom,msm_ath10k_wlan@18000000 {
+ status = "disabled";
+ compatible = "qcom,wcn3990-wifi";
+ interrupts =
+ <0 413 0 /* CE0 */ >,
+ <0 414 0 /* CE1 */ >,
+ <0 415 0 /* CE2 */ >,
+ <0 416 0 /* CE3 */ >,
+ <0 417 0 /* CE4 */ >,
+ <0 418 0 /* CE5 */ >,
+ <0 420 0 /* CE6 */ >,
+ <0 421 0 /* CE7 */ >,
+ <0 422 0 /* CE8 */ >,
+ <0 423 0 /* CE9 */ >,
+ <0 424 0 /* CE10 */ >,
+ <0 425 0 /* CE11 */ >;
+ };
+
+ qcom,icnss@18800000 {
compatible = "qcom,icnss";
reg = <0x18800000 0x800000>,
<0xa0000000 0x10000000>,
diff --git a/arch/arm/boot/dts/qcom/sdm630-cdp.dtsi b/arch/arm/boot/dts/qcom/sdm630-cdp.dtsi
index e2344063ce16..af288ff26d06 100644
--- a/arch/arm/boot/dts/qcom/sdm630-cdp.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm630-cdp.dtsi
@@ -21,6 +21,31 @@
pinctrl-0 = <&uart_console_active>;
};
+&pm660l_wled {
+ qcom,led-strings-list = [01 02];
+};
+
+&ufsphy1 {
+ vdda-phy-supply = <&pm660l_l1>;
+ vdda-pll-supply = <&pm660_l10>;
+ vddp-ref-clk-supply = <&pm660_l1>;
+ vdda-phy-max-microamp = <51400>;
+ vdda-pll-max-microamp = <14200>;
+ vddp-ref-clk-max-microamp = <100>;
+ vddp-ref-clk-always-on;
+ status = "ok";
+};
+
+&ufs1 {
+ vdd-hba-supply = <&gdsc_ufs>;
+ vdd-hba-fixed-regulator;
+ vcc-supply = <&pm660l_l4>;
+ vccq2-supply = <&pm660_l8>;
+ vcc-max-microamp = <500000>;
+ vccq2-max-microamp = <600000>;
+ status = "ok";
+};
+
&soc {
};
diff --git a/arch/arm/boot/dts/qcom/sdm630-mtp.dtsi b/arch/arm/boot/dts/qcom/sdm630-mtp.dtsi
index 7a4e36ddda9e..a47f8419f41a 100644
--- a/arch/arm/boot/dts/qcom/sdm630-mtp.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm630-mtp.dtsi
@@ -26,10 +26,35 @@
pinctrl-0 = <&uart_console_active>;
};
+&ufsphy1 {
+ vdda-phy-supply = <&pm660l_l1>;
+ vdda-pll-supply = <&pm660_l10>;
+ vddp-ref-clk-supply = <&pm660_l1>;
+ vdda-phy-max-microamp = <51400>;
+ vdda-pll-max-microamp = <14200>;
+ vddp-ref-clk-max-microamp = <100>;
+ vddp-ref-clk-always-on;
+ status = "ok";
+};
+
+&ufs1 {
+ vdd-hba-supply = <&gdsc_ufs>;
+ vdd-hba-fixed-regulator;
+ vcc-supply = <&pm660l_l4>;
+ vccq2-supply = <&pm660_l8>;
+ vcc-max-microamp = <500000>;
+ vccq2-max-microamp = <600000>;
+ status = "ok";
+};
+
&mem_client_3_size {
qcom,peripheral-size = <0x500000>;
};
+&pm660l_wled {
+ qcom,led-strings-list = [01 02];
+};
+
&soc {
};
diff --git a/arch/arm/boot/dts/qcom/sdm630-pm.dtsi b/arch/arm/boot/dts/qcom/sdm630-pm.dtsi
new file mode 100644
index 000000000000..093eadab0413
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/sdm630-pm.dtsi
@@ -0,0 +1,774 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+&soc {
+ qcom,spm@178120000 {
+ compatible = "qcom,spm-v2";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x17812000 0x1000>;
+ qcom,name = "gold-l2"; /* Gold L2 SAW */
+ qcom,saw2-ver-reg = <0xfd0>;
+ qcom,cpu-vctl-list = <&CPU0 &CPU1 &CPU2 &CPU3>;
+ qcom,vctl-timeout-us = <500>;
+ qcom,vctl-port = <0x0>;
+ qcom,phase-port = <0x1>;
+ qcom,saw2-avs-ctl = <0x1010031>;
+ qcom,saw2-avs-limit = <0x4580458>;
+ qcom,pfm-port = <0x2>;
+ };
+
+ qcom,spm@179120000 {
+ compatible = "qcom,spm-v2";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x17912000 0x1000>;
+ qcom,name = "silver-l2"; /* Silver L2 SAW */
+ qcom,saw2-ver-reg = <0xfd0>;
+ qcom,cpu-vctl-list = <&CPU4 &CPU5 &CPU6 &CPU7>;
+ qcom,vctl-timeout-us = <500>;
+ qcom,vctl-port = <0x0>;
+ qcom,phase-port = <0x1>;
+ qcom,saw2-avs-ctl = <0x1010031>;
+ qcom,saw2-avs-limit = <0x4580458>;
+ qcom,pfm-port = <0x2>;
+ };
+
+ qcom,lpm-levels {
+ compatible = "qcom,lpm-levels";
+ qcom,use-psci;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ qcom,pm-cluster@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ label = "system";
+ qcom,spm-device-names = "cci";
+ qcom,psci-mode-shift = <8>;
+ qcom,psci-mode-mask = <0xf>;
+
+ qcom,pm-cluster-level@0{
+ reg = <0>;
+ label = "system-active";
+ qcom,psci-mode = <0x0>;
+ qcom,latency-us = <100>;
+ qcom,ss-power = <725>;
+ qcom,energy-overhead = <85000>;
+ qcom,time-overhead = <120>;
+ };
+
+ qcom,pm-cluster-level@1{ /* E3 */
+ reg = <1>;
+ label = "system-pc";
+ qcom,psci-mode = <0x3>;
+ qcom,latency-us = <350>;
+ qcom,ss-power = <530>;
+ qcom,energy-overhead = <160000>;
+ qcom,time-overhead = <550>;
+ qcom,min-child-idx = <3>;
+ qcom,is-reset;
+ qcom,notify-rpm;
+ };
+
+ qcom,pm-cluster@0{
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ label = "pwr";
+ qcom,spm-device-names = "l2";
+ qcom,cpu = <&CPU4 &CPU5 &CPU6 &CPU7>;
+ qcom,psci-mode-shift = <4>;
+ qcom,psci-mode-mask = <0xf>;
+
+ qcom,pm-cluster-level@0{ /* D1 */
+ reg = <0>;
+ label = "pwr-l2-active";
+ qcom,psci-mode = <0x1>;
+ qcom,latency-us = <40>;
+ qcom,ss-power = <740>;
+ qcom,energy-overhead = <65000>;
+ qcom,time-overhead = <85>;
+ };
+ qcom,pm-cluster-level@1{ /* D2D */
+ reg = <1>;
+ label = "pwr-l2-dynret";
+ qcom,psci-mode = <0x2>;
+ qcom,latency-us = <60>;
+ qcom,ss-power = <700>;
+ qcom,energy-overhead = <85000>;
+ qcom,time-overhead = <85>;
+ qcom,min-child-idx = <1>;
+ };
+
+ qcom,pm-cluster-level@2{ /* D2E */
+ reg = <2>;
+ label = "pwr-l2-ret";
+ qcom,psci-mode = <0x3>;
+ qcom,latency-us = <100>;
+ qcom,ss-power = <640>;
+ qcom,energy-overhead = <135000>;
+ qcom,time-overhead = <85>;
+ qcom,min-child-idx = <2>;
+ };
+
+ qcom,pm-cluster-level@3{ /* D4 */
+ reg = <3>;
+ label = "pwr-l2-pc";
+ qcom,psci-mode = <0x4>;
+ qcom,latency-us = <700>;
+ qcom,ss-power = <450>;
+ qcom,energy-overhead = <210000>;
+ qcom,time-overhead = <11500>;
+ qcom,min-child-idx = <2>;
+ qcom,is-reset;
+ };
+
+ qcom,pm-cpu {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ qcom,psci-mode-shift = <0>;
+ qcom,psci-mode-mask = <0xf>;
+
+ qcom,pm-cpu-level@0 { /* C1 */
+ reg = <0>;
+ qcom,spm-cpu-mode = "wfi";
+ qcom,psci-cpu-mode = <0x1>;
+ qcom,latency-us = <20>;
+ qcom,ss-power = <750>;
+ qcom,energy-overhead = <32000>;
+ qcom,time-overhead = <60>;
+ };
+
+ qcom,pm-cpu-level@1 { /* C2D */
+ reg = <1>;
+ qcom,psci-cpu-mode = <2>;
+ qcom,spm-cpu-mode = "ret";
+ qcom,latency-us = <40>;
+ qcom,ss-power = <730>;
+ qcom,energy-overhead = <85500>;
+ qcom,time-overhead = <110>;
+ };
+
+ qcom,pm-cpu-level@2 { /* C3 */
+ reg = <2>;
+ qcom,spm-cpu-mode = "pc";
+ qcom,psci-cpu-mode = <0x3>;
+ qcom,latency-us = <80>;
+ qcom,ss-power = <700>;
+ qcom,energy-overhead = <126480>;
+ qcom,time-overhead = <160>;
+ qcom,is-reset;
+ };
+ };
+ };
+
+ qcom,pm-cluster@1{
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ label = "perf";
+ qcom,spm-device-names = "l2";
+ qcom,cpu = <&CPU0 &CPU1 &CPU2 &CPU3>;
+ qcom,psci-mode-shift = <4>;
+ qcom,psci-mode-mask = <0xf>;
+
+ qcom,pm-cluster-level@0{ /* D1 */
+ reg = <0>;
+ label = "perf-l2-active";
+ qcom,psci-mode = <0x1>;
+ qcom,latency-us = <40>;
+ qcom,ss-power = <740>;
+ qcom,energy-overhead = <70000>;
+ qcom,time-overhead = <80>;
+ };
+
+ qcom,pm-cluster-level@1{ /* D2D */
+ reg = <1>;
+ label = "perf-l2-dynret";
+ qcom,psci-mode = <2>;
+ qcom,latency-us = <60>;
+ qcom,ss-power = <700>;
+ qcom,energy-overhead = <85000>;
+ qcom,time-overhead = <85>;
+ qcom,min-child-idx = <1>;
+ };
+
+ qcom,pm-cluster-level@2{ /* D2E */
+ reg = <2>;
+ label = "perf-l2-ret";
+ qcom,psci-mode = <3>;
+ qcom,latency-us = <100>;
+ qcom,ss-power = <640>;
+ qcom,energy-overhead = <135000>;
+ qcom,time-overhead = <85>;
+ qcom,min-child-idx = <2>;
+ };
+
+ qcom,pm-cluster-level@3{ /* D4 */
+ reg = <3>;
+ label = "perf-l2-pc";
+ qcom,psci-mode = <0x4>;
+ qcom,latency-us = <800>;
+ qcom,ss-power = <450>;
+ qcom,energy-overhead = <240000>;
+ qcom,time-overhead = <11500>;
+ qcom,min-child-idx = <2>;
+ qcom,is-reset;
+ };
+
+ qcom,pm-cpu {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ qcom,psci-mode-shift = <0>;
+ qcom,psci-mode-mask = <0xf>;
+
+ qcom,pm-cpu-level@0 { /* C1 */
+ reg = <0>;
+ qcom,spm-cpu-mode = "wfi";
+ qcom,psci-cpu-mode = <0x1>;
+ qcom,latency-us = <25>;
+ qcom,ss-power = <750>;
+ qcom,energy-overhead = <37000>;
+ qcom,time-overhead = <50>;
+ };
+
+ qcom,pm-cpu-level@1 { /* C2D */
+ reg = <1>;
+ qcom,psci-cpu-mode = <2>;
+ qcom,spm-cpu-mode = "ret";
+ qcom,latency-us = <40>;
+ qcom,ss-power = <730>;
+ qcom,energy-overhead = <85500>;
+ qcom,time-overhead = <110>;
+ };
+
+ qcom,pm-cpu-level@2 { /* C3 */
+ reg = <2>;
+ qcom,spm-cpu-mode = "pc";
+ qcom,psci-cpu-mode = <0x3>;
+ qcom,latency-us = <80>;
+ qcom,ss-power = <700>;
+ qcom,energy-overhead = <136480>;
+ qcom,time-overhead = <160>;
+ qcom,is-reset;
+ };
+ };
+ };
+ };
+ };
+
+ qcom,rpm-stats@200000 {
+ compatible = "qcom,rpm-stats";
+ reg = <0x200000 0x1000>,
+ <0x290014 0x4>,
+ <0x29001c 0x4>;
+ reg-names = "phys_addr_base",
+ "offset_addr",
+ "heap_phys_addrbase";
+ qcom,sleep-stats-version = <2>;
+ };
+
+ qcom,rpm-rail-stats@200000 {
+ compatible = "qcom,rpm-rail-stats";
+ reg = <0x200000 0x100>,
+ <0x29000c 0x4>;
+ reg-names = "phys_addr_base",
+ "offset_addr";
+ };
+
+ qcom,rpm-log@200000 {
+ compatible = "qcom,rpm-log";
+ reg = <0x200000 0x4000>,
+ <0x290018 0x4>;
+ qcom,rpm-addr-phys = <0x200000>;
+ qcom,offset-version = <4>;
+ qcom,offset-page-buffer-addr = <36>;
+ qcom,offset-log-len = <40>;
+ qcom,offset-log-len-mask = <44>;
+ qcom,offset-page-indices = <56>;
+ };
+
+ qcom,rpm-master-stats@778150 {
+ compatible = "qcom,rpm-master-stats";
+ reg = <0x778150 0x5000>;
+ qcom,masters = "APSS", "MPSS", "ADSP", "CDSP", "TZ";
+ qcom,master-stats-version = <2>;
+ qcom,master-offset = <4096>;
+ };
+
+ rpm_msg_ram: memory@0x200000 {
+ compatible = "qcom,rpm-msg-ram";
+ reg = <0x200000 0x1000>,
+ <0x290000 0x1000>;
+ };
+
+ rpm_code_ram: rpm-memory@0x778000 {
+ compatible = "qcom,rpm-code-ram";
+ reg = <0x778000 0x5000>;
+ };
+
+ qcom,system-stats {
+ compatible = "qcom,system-stats";
+ qcom,rpm-msg-ram = <&rpm_msg_ram>;
+ qcom,rpm-code-ram = <&rpm_code_ram>;
+ qcom,masters = "APSS", "MPSS", "ADSP", "CDSP", "TZ";
+ };
+
+ qcom,mpm@7781b8 {
+ compatible = "qcom,mpm-v2";
+ reg = <0x7781b8 0x1000>, /* MSM_RPM_MPM_BASE 4K */
+ <0x17911008 0x4>; /* MSM_APCS_GCC_BASE 4K */
+ reg-names = "vmpm", "ipc";
+ interrupts = <GIC_SPI 171 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&clock_rpmcc CXO_LPM_CLK>;
+ clock-names = "xo";
+ qcom,num-mpm-irqs = <96>;
+
+ qcom,ipc-bit-offset = <1>;
+
+ qcom,gic-parent = <&intc>;
+ qcom,gic-map =
+ <0x02 216>, /* tsens1_tsens_upper_lower_int */
+ <0x31 212>, /* usb30_power_event_irq */
+ <0x34 275>, /* qmp_usb3_lfps_rxterm_irq_cx */
+ <0x4f 379>, /* qusb2phy_intr */
+ <0x57 358>, /* ee0_apps_hlos_spmi_periph_irq */
+ <0x5b 519>, /* lpass_pmu_tmr_timeout_irq_cx */
+ <0xff 16>, /* APC[0-7]_qgicQTmrHypPhysIrptReq */
+ <0xff 17>, /* APC[0-7]_qgicQTmrSecPhysIrptReq */
+ <0xff 18>, /* APC[0-7]_qgicQTmrNonSecPhysIrptReq */
+ <0xff 19>, /* APC[0-7]_qgicQTmrVirtIrptReq */
+ <0xff 20>, /* APC[0-7]_dbgCommRxFull */
+ <0xff 21>, /* APC[0-7]_dbgCommTxEmpty */
+ <0xff 22>, /* APC[0-7]_qgicPerfMonIrptReq */
+ <0xff 23>, /* corespm_vote_int[0-7] */
+ <0xff 24>, /* APC[0-3]_qgicExtFaultIrptReq */
+ <0xff 28>, /* qgicWakeupSync[0-7] */
+ <0xff 29>, /* APCC_cti_SPI_intx[0-7] */
+ <0xff 30>, /* APCC_cti_SPI_inty[0-7] */
+ <0xff 32>, /* l2spm_vote_int[0] */
+ <0xff 33>, /* l2spm_vote_int[1] */
+ <0xff 34>, /* APCC_qgicL2ErrorIrptReq */
+ <0xff 35>, /* WDT_barkInt */
+ <0xff 36>, /* WDT_biteExpired */
+ <0xff 39>, /* QTMR_qgicFrm0VirtIrq */
+ <0xff 40>, /* QTMR_qgicFrm0PhyIrq */
+ <0xff 41>, /* QTMR_qgicFrm1PhyIrq */
+ <0xff 42>, /* QTMR_qgicFrm2PhyIrq */
+ <0xff 43>, /* QTMR_qgicFrm3PhyIrq */
+ <0xff 44>, /* QTMR_qgicFrm4PhyIrq */
+ <0xff 45>, /* QTMR_qgicFrm5PhyIrq */
+ <0xff 46>, /* QTMR_qgicFrm6PhyIrq */
+ <0xff 47>, /* rbif_Irq[0] */
+ <0xff 48>, /* rbif_Irq[1] */
+ <0xff 49>, /* rbif_Irq[2] */
+ <0xff 50>, /* rbif_Irq[3] */
+ <0xff 51>, /* rbif_Irq[4] */
+ <0xff 52>, /* cci_spm_vote_summary_int */
+ <0xff 54>, /* nERRORIRQ */
+ <0xff 55>, /* nEVNTCNTOVERFLOW_cci */
+ <0xff 56>, /* QTMR_qgicFrm0VirtIrq */
+ <0xff 57>, /* QTMR_qgicFrm0PhyIrq */
+ <0xff 58>, /* QTMR_qgicFrm1PhyIrq */
+ <0xff 59>, /* QTMR_qgicFrm2PhyIrq */
+ <0xff 60>, /* QTMR_qgicFrm3PhyIrq */
+ <0xff 61>, /* QTMR_qgicFrm4PhyIrq */
+ <0xff 62>, /* QTMR_qgicFrm5PhyIrq */
+ <0xff 63>, /* QTMR_qgicFrm6PhyIrq */
+ <0xff 64>, /* wakeup_counter_irq_OR */
+ <0xff 65>, /* APC[0-3]_vs_alarm */
+ <0xff 66>, /* apc1_vs_alarm */
+ <0xff 67>, /* o_pwr_osm_irq */
+ <0xff 68>, /* o_perf_osm_irq */
+ <0xff 69>, /* o_pwr_dcvsh_interrupt */
+ <0xff 70>, /* o_perf_dcvsh_interrupt */
+ <0xff 73>, /* L2_EXTERRIRQ_C0 */
+ <0xff 74>, /* L2_EXTERRIRQ_C1 */
+ <0xff 75>, /* L2_INTERRIRQ_C0 */
+ <0xff 76>, /* L2_INTERRIRQ_C1 */
+ <0xff 77>, /* L2SPM_svicInt[0] */
+ <0xff 78>, /* L2SPM_svicInt[1] */
+ <0xff 79>, /* L2SPM_svicIntSwDone[0] */
+ <0xff 80>, /* L2SPM_svicIntSwDone[1] */
+ <0xff 81>, /* l2_avs_err[0] */
+ <0xff 82>, /* l2_avs_err[1] */
+ <0xff 83>, /* l2_avs_ack[0] */
+ <0xff 84>, /* l2_avs_ack[1] */
+ <0xff 98>, /* o_qm_interrupt */
+ <0xff 100>, /* camss_vbif_1_irpt */
+ <0xff 101>, /* processor_1_user_int */
+ <0xff 102>, /* processor_1_kernel_int */
+ <0xff 106>, /* dir_conn_irq_lpa_dsp[2] */
+ <0xff 107>, /* dir_conn_irq_lpa_dsp[1] */
+ <0xff 109>, /* camss_vbif_0_irpt */
+ <0xff 110>, /* csiphy_0_irq */
+ <0xff 111>, /* csiphy_1_irq */
+ <0xff 112>, /* csiphy_2_irq */
+ <0xff 115>, /* mdss_irq */
+ <0xff 116>, /* mdss_vbif_irpt */
+ <0xff 117>, /* dir_conn_irq_lpa_dsp[0] */
+ <0xff 119>, /* lpass_irq_out_apcs[11] */
+ <0xff 122>, /* o_pimem_tpdm_bc_irq_ofsat */
+ <0xff 123>, /* o_pimem_tpdm_tc_irq_ofsat */
+ <0xff 124>, /* dir_conn_irq_sensors[1] */
+ <0xff 125>, /* dir_conn_irq_sensors[0] */
+ <0xff 127>, /* peripheral_irq[2] */
+ <0xff 128>, /* peripheral_irq[3] */
+ <0xff 129>, /* peripheral_irq[4] */
+ <0xff 130>, /* peripheral_irq[5] */
+ <0xff 133>, /* peripheral_irq[2] */
+ <0xff 134>, /* peripheral_irq[3] */
+ <0xff 135>, /* peripheral_irq[4] */
+ <0xff 136>, /* peripheral_irq[5] */
+ <0xff 139>, /* peripheral_irq[0] */
+ <0xff 140>, /* peripheral_irq[1] */
+ <0xff 142>, /* sdcc_irq[0] */
+ <0xff 143>, /* sdcc_irq[1] */
+ <0xff 144>, /* sdcc_pwr_cmd_irq */
+ <0xff 145>, /* peripheral_irq[0] */
+ <0xff 146>, /* peripheral_irq[1] */
+ <0xff 148>, /* osmmu_CIrpt[4] */
+ <0xff 149>, /* osmmu_CIrpt[5] */
+ <0xff 150>, /* sdio_wakeup_irq */
+ <0xff 151>, /* acvremoval_int */
+ <0xff 152>, /* trs_int */
+ <0xff 155>, /* dir_conn_irq_lpa_dsp[5] */
+ <0xff 156>, /* dir_conn_irq_lpa_dsp[4] */
+ <0xff 157>, /* sdcc_irq[0] */
+ <0xff 158>, /* sdcc_irq[1] */
+ <0xff 159>, /* lpass_irq_out_apcs[39] */
+ <0xff 160>, /* lpass_irq_out_apcs[38] */
+ <0xff 163>, /* usb30_ctrl_irq[0] */
+ <0xff 164>, /* usb30_bam_irq[0] */
+ <0xff 165>, /* usb30_hs_phy_irq */
+ <0xff 166>, /* o_lm_int_2qgic */
+ <0xff 169>, /* lpass_irq_out_apcs[33] */
+ <0xff 172>, /* dcvs_int[6] */
+ <0xff 173>, /* dcvs_int[7] */
+ <0xff 184>, /* dir_conn_irq_lpa_dsp[3] */
+ <0xff 185>, /* camss_vbif_2_irpt */
+ <0xff 186>, /* mnoc_obs_mainfault */
+ <0xff 188>, /* lpass_irq_out_apcs[00] */
+ <0xff 189>, /* lpass_irq_out_apcs[01] */
+ <0xff 190>, /* lpass_irq_out_apcs[02] */
+ <0xff 191>, /* lpass_irq_out_apcs[03] */
+ <0xff 192>, /* lpass_irq_out_apcs[04] */
+ <0xff 193>, /* lpass_irq_out_apcs[05] */
+ <0xff 194>, /* lpass_irq_out_apcs[06] */
+ <0xff 195>, /* lpass_irq_out_apcs[07] */
+ <0xff 196>, /* lpass_irq_out_apcs[08] */
+ <0xff 197>, /* lpass_irq_out_apcs[09] */
+ <0xff 199>, /* qdss_usb_trace_bam_irq[0] */
+ <0xff 200>, /* rpm_ipc[4] */
+ <0xff 201>, /* rpm_ipc[5] */
+ <0xff 202>, /* rpm_ipc[6] */
+ <0xff 203>, /* rpm_ipc[7] */
+ <0xff 204>, /* rpm_ipc[20] */
+ <0xff 205>, /* rpm_ipc[21] */
+ <0xff 206>, /* rpm_ipc[22] */
+ <0xff 207>, /* rpm_ipc[23] */
+ <0xff 208>, /* lpi_dir_conn_irq_apps[0] */
+ <0xff 209>, /* lpi_dir_conn_irq_apps[1] */
+ <0xff 210>, /* lpi_dir_conn_irq_apps[2] */
+ <0xff 213>, /* secure_wdog_bark_irq */
+ <0xff 214>, /* tsens1_tsens_max_min_int */
+ <0xff 215>, /* o_bimc_intr[0] */
+ <0xff 217>, /* o_ocimem_nonsec_irq */
+ <0xff 218>, /* cpr_irq[1] */
+ <0xff 219>, /* lpass_irq_out_vmm[00] */
+ <0xff 220>, /* spmi_protocol_irq */
+ <0xff 221>, /* lpass_irq_out_vmm[01] */
+ <0xff 222>, /* lpass_irq_out_vmm[02] */
+ <0xff 223>, /* spdm_offline_irq */
+ <0xff 224>, /* spdm_realtime_irq[1] */
+ <0xff 225>, /* snoc_obs_mainFault */
+ <0xff 226>, /* cnoc_obs_mainFault */
+ <0xff 227>, /* o_tcsr_xpu3_sec_summary_intr */
+ <0xff 228>, /* o_tcsr_xpu3_non_sec_summary_intr */
+ <0xff 229>, /* o_timeout_slave_hmss_summary_intr */
+ <0xff 230>, /* o_tcsr_vmidmt_client_sec_summary_intr */
+ <0xff 231>, /* o_tcsr_vmidmt_client_nsec_summary_intr */
+ <0xff 232>, /* o_tcsr_vmidmt_cfg_sec_summary_intr */
+ <0xff 233>, /* o_tcsr_vmidmt_cfg_non_sec_summary_intr */
+ <0xff 234>, /* lpass_irq_out_vmm[03] */
+ <0xff 235>, /* cpr_irq[0] */
+ <0xff 236>, /* crypto_core_irq[0] */
+ <0xff 237>, /* crypto_core_irq[1] */
+ <0xff 238>, /* crypto_bam_irq[0] */
+ <0xff 239>, /* crypto_bam_irq[1] */
+ <0xff 240>, /* summary_irq_hmss */
+ <0xff 241>, /* dir_conn_irq_hmss[7] */
+ <0xff 242>, /* dir_conn_irq_hmss[6] */
+ <0xff 243>, /* dir_conn_irq_hmss[5] */
+ <0xff 244>, /* dir_conn_irq_hmss[4] */
+ <0xff 245>, /* dir_conn_irq_hmss[3] */
+ <0xff 246>, /* dir_conn_irq_hmss[2] */
+ <0xff 247>, /* dir_conn_irq_hmss[1] */
+ <0xff 248>, /* dir_conn_irq_hmss[0] */
+ <0xff 249>, /* summary_irq_hmss_tz */
+ <0xff 250>, /* cpr_irq[3] */
+ <0xff 251>, /* cpr_irq[2] */
+ <0xff 252>, /* cpr_irq[1] */
+ <0xff 253>, /* sdcc_pwr_cmd_irq */
+ <0xff 254>, /* sdio_wakeup_irq */
+ <0xff 255>, /* cpr_irq[0] */
+ <0xff 256>, /* lpass_irq_out_apcs[34] */
+ <0xff 257>, /* lpass_irq_out_apcs[35] */
+ <0xff 258>, /* lpass_irq_out_apcs[21] */
+ <0xff 261>, /* o_tcsr_mmu_nsgcfglrpt_summary_intr */
+ <0xff 262>, /* o_tcsr_mmu_gcfglrpt_summary_intr */
+ <0xff 263>, /* o_tcsr_mmu_nsglrpt_summary_intr */
+ <0xff 264>, /* o_tcsr_mmu_glrpt_summary_intr */
+ <0xff 265>, /* vbif_irpt */
+ <0xff 266>, /* lpass_irq_out_apcs[20] */
+ <0xff 267>, /* lpass_irq_out_apcs[19] */
+ <0xff 269>, /* rpm_wdog_expired_irq */
+ <0xff 270>, /* bam_irq[0] */
+ <0xff 271>, /* bam_irq[0] */
+ <0xff 276>, /* mmss_bimc_smmu_cirpt[4] */
+ <0xff 277>, /* mmss_bimc_smmu_cirpt[5] */
+ <0xff 278>, /* usb30_ctrl_irq[1] */
+ <0xff 279>, /* mmss_bimc_smmu_cirpt[6] */
+ <0xff 280>, /* mmss_bimc_smmu_cirpt[7] */
+ <0xff 281>, /* mmss_bimc_smmu_cirpt[8] */
+ <0xff 282>, /* mmss_bimc_smmu_cirpt[9] */
+ <0xff 283>, /* mmss_bimc_smmu_cirpt[10] */
+ <0xff 284>, /* mmss_bimc_smmu_cirpt[11] */
+ <0xff 285>, /* mmss_bimc_smmu_cirpt[12] */
+ <0xff 286>, /* mmss_bimc_smmu_cirpt[13] */
+ <0xff 287>, /* mmss_bimc_smmu_cirpt[14] */
+ <0xff 288>, /* mmss_bimc_smmu_cirpt[15] */
+ <0xff 289>, /* ufs_ice_sec_level_irq */
+ <0xff 291>, /* lpass_irq_out_apcs[18] */
+ <0xff 292>, /* mmss_bimc_smmu_cirpt[16] */
+ <0xff 293>, /* mmss_bimc_smmu_cirpt[17] */
+ <0xff 294>, /* mmss_bimc_smmu_cirpt[18] */
+ <0xff 295>, /* mmss_bimc_smmu_cirpt[0] */
+ <0xff 296>, /* mmss_bimc_smmu_pmirpt */
+ <0xff 297>, /* ufs_intrq */
+ <0xff 298>, /* mmss_bimc_smmu_cirpt[1] */
+ <0xff 299>, /* mmss_bimc_smmu_cirpt[2] */
+ <0xff 300>, /* mmss_bimc_smmu_cirpt[3] */
+ <0xff 301>, /* lpass_irq_out_apcs[17] */
+ <0xff 302>, /* qdss_etrbytecnt_irq */
+ <0xff 303>, /* lpass_irq_out_apcs[16] */
+ <0xff 304>, /* mmss_bimc_smmu_cirpt[19] */
+ <0xff 305>, /* mmss_bimc_smmu_cirpt[20] */
+ <0xff 306>, /* mmss_bimc_smmu_cirpt[21] */
+ <0xff 307>, /* mmss_bimc_smmu_cirpt[22] */
+ <0xff 308>, /* mmss_bimc_smmu_cirpt[23] */
+ <0xff 316>, /* lpass_irq_out_apcs[13] */
+ <0xff 317>, /* rbif_irq */
+ <0xff 318>, /* gpu_cc_gpu_cx_gds_hw_ctrl_irq_out */
+ <0xff 319>, /* venus0_irq */
+ <0xff 323>, /* lpass_irq_out_apcs[14] */
+ <0xff 324>, /* lpass_irq_out_apcs[15] */
+ <0xff 325>, /* camss_irq18 */
+ <0xff 326>, /* camss_irq0 */
+ <0xff 327>, /* camss_irq1 */
+ <0xff 328>, /* camss_irq2 */
+ <0xff 329>, /* camss_irq3 */
+ <0xff 330>, /* camss_irq4 */
+ <0xff 331>, /* camss_irq5 */
+ <0xff 332>, /* GC_SYS_irq[0] */
+ <0xff 333>, /* GC_SYS_irq[1] */
+ <0xff 334>, /* GC_SYS_irq[2] */
+ <0xff 335>, /* GC_SYS_irq[3] */
+ <0xff 336>, /* camss_irq13 */
+ <0xff 337>, /* camss_irq14 */
+ <0xff 338>, /* camss_irq15 */
+ <0xff 339>, /* camss_irq16 */
+ <0xff 340>, /* camss_irq17 */
+ <0xff 341>, /* camss_irq6 */
+ <0xff 342>, /* lpass_irq_out_apcs[36] */
+ <0xff 345>, /* camss_irq7 */
+ <0xff 346>, /* camss_irq8 */
+ <0xff 347>, /* camss_irq9 */
+ <0xff 348>, /* camss_irq10 */
+ <0xff 350>, /* camss_irq12 */
+ <0xff 351>, /* lpass_irq_out_apcs[12] */
+ <0xff 357>, /* o_pimem_nonfatal_irq */
+ <0xff 359>, /* ee1_apps_trustzone_spmi_periph_irq */
+ <0xff 360>, /* o_pimem_fatal_irq */
+ <0xff 361>, /* osmmu_CIrpt[0] */
+ <0xff 362>, /* osmmu_CIrpt[1] */
+ <0xff 363>, /* osmmu_CIrpt[2] */
+ <0xff 364>, /* osmmu_CIrpt[3] */
+ <0xff 365>, /* ipa_irq[0] */
+ <0xff 366>, /* osmmu_PMIrpt */
+ <0xff 381>, /* osmmu_CIrpt[6] */
+ <0xff 382>, /* osmmu_CIrpt[7] */
+ <0xff 385>, /* osmmu_CIrpt[12] */
+ <0xff 386>, /* osmmu_CIrpt[13] */
+ <0xff 387>, /* osmmu_CIrpt[14] */
+ <0xff 388>, /* osmmu_CIrpt[15] */
+ <0xff 389>, /* osmmu_CIrpt[16] */
+ <0xff 390>, /* osmmu_CIrpt[17] */
+ <0xff 391>, /* osmmu_CIrpt[18] */
+ <0xff 392>, /* osmmu_CIrpt[19] */
+ <0xff 393>, /* o_dcc_crc_fail_int */
+ <0xff 404>, /* aggre2noc_obs_mainFault */
+ <0xff 405>, /* osmmu_CIrpt[0] */
+ <0xff 406>, /* osmmu_CIrpt[1] */
+ <0xff 407>, /* osmmu_CIrpt[2] */
+ <0xff 408>, /* osmmu_CIrpt[3] */
+ <0xff 409>, /* osmmu_CIrpt[4] */
+ <0xff 410>, /* osmmu_CIrpt[5] */
+ <0xff 411>, /* o_dcc_task_done_int */
+ <0xff 412>, /* vsense_apps_alarm_irq */
+ <0xff 413>, /* osmmu_PMIrpt */
+ <0xff 414>, /* channel0_apps_hlos_trans_done_irq */
+ <0xff 415>, /* channel1_apps_trustzone_trans_done_irq */
+ <0xff 416>, /* rpm_ipc[28] */
+ <0xff 417>, /* rpm_ipc[29] */
+ <0xff 418>, /* rpm_ipc[30] */
+ <0xff 419>, /* rpm_ipc[31] */
+ <0xff 423>, /* lpass_irq_out_apcs[40] */
+ <0xff 424>, /* ipa_irq[2] */
+ <0xff 425>, /* lpass_irq_out_apcs[22] */
+ <0xff 426>, /* lpass_irq_out_apcs[23] */
+ <0xff 427>, /* lpass_irq_out_apcs[24] */
+ <0xff 428>, /* lpass_irq_out_apcs[25] */
+ <0xff 429>, /* lpass_irq_out_apcs[26] */
+ <0xff 430>, /* lpass_irq_out_apcs[27] */
+ <0xff 431>, /* lpass_irq_out_apcs[28] */
+ <0xff 432>, /* lpass_irq_out_apcs[29] */
+ <0xff 433>, /* lpass_irq_out_apcs[30] */
+ <0xff 434>, /* lpass_irq_out_apcs[31] */
+ <0xff 435>, /* lpass_irq_out_apcs[32] */
+ <0xff 436>, /* lpass_irq_out_apcs[37] */
+ <0xff 445>, /* wcss_apss_ce_intr[0] */
+ <0xff 446>, /* wcss_apss_ce_intr[1] */
+ <0xff 447>, /* wcss_apss_ce_intr[2] */
+ <0xff 448>, /* wcss_apss_ce_intr[3] */
+ <0xff 449>, /* wcss_apss_ce_intr[4] */
+ <0xff 450>, /* wcss_apss_ce_intr[5] */
+ <0xff 452>, /* wcss_apss_ce_intr[6] */
+ <0xff 453>, /* wcss_apss_ce_intr[7] */
+ <0xff 454>, /* wcss_apss_ce_intr[8] */
+ <0xff 455>, /* wcss_apss_ce_intr[9] */
+ <0xff 456>, /* wcss_apss_ce_intr[10] */
+ <0xff 457>, /* wcss_apss_ce_intr[11] */
+ <0xff 458>, /* wcss_apss_status_intr */
+ <0xff 462>, /* tsens1_tsens_critical_int */
+ <0xff 464>, /* ipa_bam_irq[0] */
+ <0xff 465>, /* ipa_bam_irq[2] */
+ <0xff 466>, /* ssc_uart_int */
+ <0xff 468>, /* cri_cm_irq_tz */
+ <0xff 469>, /* cri_cm_irq_hyp */
+ <0xff 471>, /* mmss_bimc_smmu_gds_hw_ctrl_irq_out */
+ <0xff 472>, /* gcc_gds_hw_ctrl_irq_out */
+ <0xff 474>, /* osmmu_CIrpt[20] */
+ <0xff 475>, /* osmmu_CIrpt[21] */
+ <0xff 476>, /* osmmu_CIrpt[22] */
+ <0xff 477>, /* tsens0_tsens_critical_int */
+ <0xff 478>, /* tsens0_tsens_max_min_int */
+ <0xff 479>, /* osmmu_CIrpt[23] */
+ <0xff 480>, /* q6_wdog_expired_irq */
+ <0xff 481>, /* mss_ipc_out_irq[4] */
+ <0xff 482>, /* mss_ipc_out_irq[5] */
+ <0xff 483>, /* mss_ipc_out_irq[6] */
+ <0xff 484>, /* mss_ipc_out_irq[7] */
+ <0xff 485>, /* mss_ipc_out_irq[28] */
+ <0xff 486>, /* mss_ipc_out_irq[29] */
+ <0xff 487>, /* mss_ipc_out_irq[30] */
+ <0xff 488>, /* mss_ipc_out_irq[31] */
+ <0xff 490>, /* tsens0_tsens_upper_lower_int */
+ <0xff 491>, /* qspi_irq0 */
+ <0xff 492>, /* sdcc_ice_sec_level_irq */
+ <0xff 494>, /* osmmu_CIrpt[6] */
+ <0xff 495>, /* osmmu_CIrpt[7] */
+ <0xff 496>, /* osmmu_CIrpt[8] */
+ <0xff 497>, /* osmmu_CIrpt[9] */
+ <0xff 498>, /* osmmu_CIrpt[10] */
+ <0xff 499>, /* osmmu_CIrpt[11] */
+ <0xff 500>, /* osmmu_CIrpt[24] */
+ <0xff 501>, /* osmmu_CIrpt[25] */
+ <0xff 503>, /* o_bimc_intr[1] */
+ <0xff 504>, /* osmmu_CIrpt[26] */
+ <0xff 505>, /* osmmu_CIrpt[27] */
+ <0xff 506>, /* osmmu_CIrpt[28] */
+ <0xff 516>, /* lpass_irq_out_apcs[41] */
+ <0xff 517>, /* lpass_irq_out_apcs[42] */
+ <0xff 520>; /* lpass_irq_out_apcs[45] */
+
+ qcom,gpio-parent = <&tlmm>;
+ qcom,gpio-map =
+ <3 1>,
+ <4 5>,
+ <5 9>,
+ <6 10>,
+ <7 66>,
+ <8 22>,
+ <9 25>,
+ <10 28>,
+ <11 58>,
+ <13 41>,
+ <14 43>,
+ <15 40>,
+ <16 42>,
+ <17 46>,
+ <18 50>,
+ <19 44>,
+ <21 56>,
+ <22 45>,
+ <23 68>,
+ <24 69>,
+ <25 70>,
+ <26 71>,
+ <27 72>,
+ <28 73>,
+ <29 64>,
+ <30 2>,
+ <31 13>,
+ <32 111>,
+ <33 74>,
+ <34 75>,
+ <35 76>,
+ <36 82>,
+ <37 17>,
+ <38 77>,
+ <39 47>,
+ <40 54>,
+ <41 48>,
+ <42 101>,
+ <43 49>,
+ <44 51>,
+ <45 86>,
+ <46 90>,
+ <47 91>,
+ <48 52>,
+ <50 55>,
+ <51 6>,
+ <53 65>,
+ <55 67>,
+ <56 83>,
+ <57 84>,
+ <58 85>,
+ <59 87>,
+ <63 21>,
+ <64 78>,
+ <65 113>,
+ <66 60>,
+ <67 98>,
+ <68 30>,
+ <70 31>,
+ <71 29>,
+ <76 107>,
+ <83 109>,
+ <84 103>,
+ <85 105>;
+ };
+};
diff --git a/arch/arm/boot/dts/qcom/sdm630-rumi.dts b/arch/arm/boot/dts/qcom/sdm630-rumi.dts
index 2ea1af4da90c..018429a4ebb1 100644
--- a/arch/arm/boot/dts/qcom/sdm630-rumi.dts
+++ b/arch/arm/boot/dts/qcom/sdm630-rumi.dts
@@ -67,6 +67,27 @@
pinctrl-0 = <&uart_console_active>;
};
+&ufsphy1 {
+ vdda-phy-supply = <&pm660l_l1>;
+ vdda-pll-supply = <&pm660_l10>;
+ vddp-ref-clk-supply = <&pm660_l1>;
+ vdda-phy-max-microamp = <51400>;
+ vdda-pll-max-microamp = <14200>;
+ vddp-ref-clk-max-microamp = <100>;
+ vddp-ref-clk-always-on;
+ status = "ok";
+};
+
+&ufs1 {
+ vdd-hba-supply = <&gdsc_ufs>;
+ vdd-hba-fixed-regulator;
+ vcc-supply = <&pm660l_l4>;
+ vccq2-supply = <&pm660_l8>;
+ vcc-max-microamp = <500000>;
+ vccq2-max-microamp = <600000>;
+ status = "ok";
+};
+
&clock_gcc {
compatible = "qcom,dummycc";
clock-output-names = "gcc_clocks";
diff --git a/arch/arm/boot/dts/qcom/sdm630.dtsi b/arch/arm/boot/dts/qcom/sdm630.dtsi
index 9aa83c07ebac..86e788dc81a0 100644
--- a/arch/arm/boot/dts/qcom/sdm630.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm630.dtsi
@@ -17,6 +17,7 @@
#include <dt-bindings/clock/qcom,rpmcc.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/regulator/qcom,rpm-smd-regulator.h>
+#include <dt-bindings/clock/qcom,cpu-osm.h>
/ {
model = "Qualcomm Technologies, Inc. SDM630";
@@ -293,7 +294,7 @@
alloc-ranges = <0x0 0x00000000 0x0 0xffffffff>;
reusable;
alignment = <0x0 0x400000>;
- size = <0x0 0x400000>;
+ size = <0x0 0xa00000>;
};
qseecom_mem: qseecom_region {
@@ -671,6 +672,7 @@
qcom,pet-time = <10000>;
qcom,ipi-ping;
qcom,wakeup-enable;
+ qcom,scandump-size = <0x40000>;
};
uartblsp1dm1: serial@0c170000 {
@@ -939,6 +941,105 @@
qcom,ipa-advertise-sg-support;
};
+ clock_cpu: qcom,clk-cpu-630@179c0000 {
+ compatible = "qcom,clk-cpu-osm-sdm630";
+ status = "disabled";
+ reg = <0x179c0000 0x4000>, <0x17916000 0x1000>,
+ <0x17816000 0x1000>, <0x179d1000 0x1000>,
+ <0x00784130 0x8>;
+ reg-names = "osm", "pwrcl_pll", "perfcl_pll",
+ "apcs_common", "perfcl_efuse";
+
+ interrupts = <GIC_SPI 35 IRQ_TYPE_EDGE_RISING>,
+ <GIC_SPI 36 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "pwrcl-irq", "perfcl-irq";
+
+ qcom,pwrcl-speedbin0-v0 =
+ < 300000000 0x0004000f 0x01200020 0x1 1 >,
+ < 614400000 0x05040020 0x03200020 0x1 2 >,
+ < 883200000 0x0404002e 0x04250025 0x1 3 >,
+ < 1094400000 0x04040039 0x052e002e 0x2 4 >,
+ < 1382400000 0x04040048 0x07390039 0x2 5 >,
+ < 1536000000 0x04040050 0x08400040 0x3 6 >,
+ < 1728000000 0x0404005a 0x09480048 0x3 7 >,
+ < 1843200000 0x04040060 0x094c004c 0x3 8 >;
+
+ qcom,perfcl-speedbin0-v0 =
+ < 300000000 0x0004000f 0x01200020 0x1 1 >,
+ < 787200000 0x05040029 0x04200020 0x1 2 >,
+ < 1113600000 0x0404003a 0x052e002e 0x1 3 >,
+ < 1344000000 0x04040046 0x07380038 0x2 4 >,
+ < 1516800000 0x0404004f 0x073f003f 0x2 5 >,
+ < 1670400000 0x04040057 0x08450045 0x2 6 >,
+ < 1881600000 0x04040062 0x094e004e 0x3 7 >,
+ < 2016000000 0x04040069 0x0a540054 0x3 8 >,
+ < 2150400000 0x04040070 0x0b590059 0x3 9 >,
+ < 2380800000 0x0404007c 0x0c630063 0x3 10 >;
+
+ qcom,perfcl-speedbin1-v0 =
+ < 300000000 0x0004000f 0x01200020 0x1 1 >,
+ < 787200000 0x05040029 0x04200020 0x1 2 >,
+ < 1113600000 0x0404003a 0x052e002e 0x1 3 >,
+ < 1344000000 0x04040046 0x07380038 0x2 4 >,
+ < 1516800000 0x0404004f 0x073f003f 0x2 5 >,
+ < 1670400000 0x04040057 0x08450045 0x2 6 >,
+ < 1881600000 0x04040062 0x094e004e 0x3 7 >,
+ < 2016000000 0x04040069 0x0a540054 0x3 8 >,
+ < 2150400000 0x04040070 0x0b590059 0x3 8 >,
+ < 2208000000 0x04040073 0x0b5c005c 0x3 10 >;
+
+ qcom,perfcl-speedbin2-v0 =
+ < 300000000 0x0004000f 0x01200020 0x1 1 >,
+ < 787200000 0x05040029 0x04200020 0x1 2 >,
+ < 1113600000 0x0404003a 0x052e002e 0x1 3 >,
+ < 1344000000 0x04040046 0x07380038 0x2 4 >,
+ < 1516800000 0x0404004f 0x073f003f 0x2 5 >,
+ < 1670400000 0x04040057 0x08450045 0x2 6 >,
+ < 1881600000 0x04040062 0x094e004e 0x3 7 >,
+ < 2016000000 0x04040069 0x0a540054 0x3 8 >,
+ < 2150400000 0x04040070 0x0b590059 0x3 9 >,
+ < 2380800000 0x0404007c 0x0c630063 0x3 10 >,
+ < 2515200000 0x04040083 0x0d680068 0x3 11 >;
+
+ qcom,up-timer = <1000 1000>;
+ qcom,down-timer = <1000 1000>;
+ qcom,pc-override-index = <0 0>;
+ qcom,set-ret-inactive;
+ qcom,enable-llm-freq-vote;
+ qcom,llm-freq-up-timer = <327675 327675>;
+ qcom,llm-freq-down-timer = <327675 327675>;
+ qcom,enable-llm-volt-vote;
+ qcom,llm-volt-up-timer = <327675 327675>;
+ qcom,llm-volt-down-timer = <327675 327675>;
+ qcom,cc-reads = <10>;
+ qcom,cc-delay = <5>;
+ qcom,cc-factor = <100>;
+ qcom,osm-clk-rate = <200000000>;
+ qcom,xo-clk-rate = <19200000>;
+
+ qcom,l-val-base = <0x17916004 0x17816004>;
+ qcom,apcs-itm-present = <0x179d143c 0x179d143c>;
+ qcom,apcs-pll-user-ctl = <0x1791600c 0x1781600c>;
+ qcom,apcs-cfg-rcgr = <0x17911054 0x17811054>;
+ qcom,apcs-cmd-rcgr = <0x17911050 0x17811050>;
+ qcom,apm-mode-ctl = <0x179d0004 0x179d0010>;
+ qcom,apm-ctrl-status = <0x179d000c 0x179d0018>;
+
+ qcom,apm-threshold-voltage = <872000>;
+ qcom,boost-fsm-en;
+ qcom,safe-fsm-en;
+ qcom,ps-fsm-en;
+ qcom,droop-fsm-en;
+ qcom,wfx-fsm-en;
+ qcom,pc-fsm-en;
+
+ clock-names = "aux_clk", "xo_a";
+ clocks = <&clock_gcc HMSS_GPLL0_CLK_SRC>,
+ <&clock_rpmcc RPM_XO_A_CLK_SRC>;
+
+ #clock-cells = <1>;
+ };
+
qcom,ipc-spinlock@1f40000 {
compatible = "qcom,ipc-spinlock-sfpb";
reg = <0x1f40000 0x8000>;
@@ -956,6 +1057,41 @@
qcom,mpu-enabled;
};
+ qcom,msm-adsprpc-mem {
+ compatible = "qcom,msm-adsprpc-mem-region";
+ memory-region = <&adsp_mem>;
+ };
+
+ qcom,msm_fastrpc {
+ compatible = "qcom,msm-fastrpc-adsp";
+ qcom,fastrpc-glink;
+
+ qcom,msm_fastrpc_compute_cb1 {
+ compatible = "qcom,msm-fastrpc-compute-cb";
+ label = "adsprpc-smd";
+ iommus = <&lpass_q6_smmu 3>;
+ dma-coherent;
+ };
+ qcom,msm_fastrpc_compute_cb2 {
+ compatible = "qcom,msm-fastrpc-compute-cb";
+ label = "adsprpc-smd";
+ iommus = <&lpass_q6_smmu 7>;
+ dma-coherent;
+ };
+ qcom,msm_fastrpc_compute_cb3 {
+ compatible = "qcom,msm-fastrpc-compute-cb";
+ label = "adsprpc-smd";
+ iommus = <&lpass_q6_smmu 8>;
+ dma-coherent;
+ };
+ qcom,msm_fastrpc_compute_cb4 {
+ compatible = "qcom,msm-fastrpc-compute-cb";
+ label = "adsprpc-smd";
+ iommus = <&lpass_q6_smmu 9>;
+ dma-coherent;
+ };
+ };
+
dcc: dcc@10b3000 {
compatible = "qcom,dcc";
reg = <0x10b3000 0x1000>,
@@ -1378,6 +1514,108 @@
0x178a80b8 0x178b80b8>;
};
+ jtag_fuse: jtagfuse@786040 {
+ compatible = "qcom,jtag-fuse-v4";
+ reg = <0x786040 0x8>;
+ reg-names = "fuse-base";
+ };
+
+ jtag_mm0: jtagmm@7840000 {
+ compatible = "qcom,jtagv8-mm";
+ reg = <0x7840000 0x1000>;
+ reg-names = "etm-base";
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+
+ qcom,coresight-jtagmm-cpu = <&CPU4>;
+ };
+
+ jtag_mm1: jtagmm@7940000 {
+ compatible = "qcom,jtagv8-mm";
+ reg = <0x7940000 0x1000>;
+ reg-names = "etm-base";
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+
+ qcom,coresight-jtagmm-cpu = <&CPU5>;
+ };
+
+ jtag_mm2: jtagmm@7a40000 {
+ compatible = "qcom,jtagv8-mm";
+ reg = <0x7a40000 0x1000>;
+ reg-names = "etm-base";
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+
+ qcom,coresight-jtagmm-cpu = <&CPU6>;
+ };
+
+ jtag_mm3: jtagmm@7b40000 {
+ compatible = "qcom,jtagv8-mm";
+ reg = <0x7b40000 0x1000>;
+ reg-names = "etm-base";
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+
+ qcom,coresight-jtagmm-cpu = <&CPU7>;
+ };
+
+ jtag_mm4: jtagmm@7c40000 {
+ compatible = "qcom,jtagv8-mm";
+ reg = <0x7c40000 0x1000>;
+ reg-names = "etm-base";
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+
+ qcom,coresight-jtagmm-cpu = <&CPU0>;
+ };
+
+ jtag_mm5: jtagmm@7d40000 {
+ compatible = "qcom,jtagv8-mm";
+ reg = <0x7d40000 0x1000>;
+ reg-names = "etm-base";
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+
+ qcom,coresight-jtagmm-cpu = <&CPU1>;
+ };
+
+ jtag_mm6: jtagmm@7e40000 {
+ compatible = "qcom,jtagv8-mm";
+ reg = <0x7e40000 0x1000>;
+ reg-names = "etm-base";
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+
+ qcom,coresight-jtagmm-cpu = <&CPU2>;
+ };
+
+ jtag_mm7: jtagmm@7f40000 {
+ compatible = "qcom,jtagv8-mm";
+ reg = <0x7f40000 0x1000>;
+ reg-names = "etm-base";
+
+ clocks = <&clock_rpmcc RPM_QDSS_CLK>,
+ <&clock_rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "core_clk", "core_a_clk";
+
+ qcom,coresight-jtagmm-cpu = <&CPU3>;
+ };
+
spmi_bus: qcom,spmi@800f000 {
compatible = "qcom,spmi-pmic-arb";
reg = <0x800f000 0x1000>,
@@ -1521,6 +1759,7 @@
#include "sdm660-common.dtsi"
#include "msm-arm-smmu-630.dtsi"
#include "sdm660-camera.dtsi"
+#include "sdm630-pm.dtsi"
&gdsc_usb30 {
status = "ok";
@@ -1588,3 +1827,7 @@
&gdsc_gpu_cx {
status = "ok";
};
+
+&blsp2_uart1_hs {
+ status = "ok";
+};
diff --git a/arch/arm/boot/dts/qcom/sdm660-audio.dtsi b/arch/arm/boot/dts/qcom/sdm660-audio.dtsi
index 6f9a6f9ee946..e1244a497201 100644
--- a/arch/arm/boot/dts/qcom/sdm660-audio.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660-audio.dtsi
@@ -89,7 +89,7 @@
wcd_spi_0: wcd_spi {
compatible = "qcom,wcd-spi-v2";
- qcom,master-bus-num = <10>;
+ qcom,master-bus-num = <7>;
qcom,chip-select = <0>;
qcom,max-frequency = <24000000>;
qcom,mem-base-addr = <0x100000>;
@@ -217,6 +217,7 @@
reg = <0x152c1000 0x0>;
interrupts = <0 161 0>;
interrupt-names = "swr_master_irq";
+ qcom,cdc-sdw-gpios = <&cdc_sdw_gpios>;
swr_master {
compatible = "qcom,swr-wcd";
@@ -227,24 +228,28 @@
compatible = "qcom,wsa881x";
reg = <0x0 0x20170211>;
qcom,spkr-sd-n-node = <&wsa_spkr_en1>;
+ qcom,cache-always;
};
wsa881x_212_en: wsa881x_en@20170212 {
compatible = "qcom,wsa881x";
reg = <0x0 0x20170212>;
qcom,spkr-sd-n-node = <&wsa_spkr_en2>;
+ qcom,cache-always;
};
wsa881x_213_en: wsa881x_en@21170213 {
compatible = "qcom,wsa881x";
reg = <0x0 0x21170213>;
qcom,spkr-sd-n-node = <&wsa_spkr_en1>;
+ qcom,cache-always;
};
wsa881x_214_en: wsa881x_en@21170214 {
compatible = "qcom,wsa881x";
reg = <0x0 0x21170214>;
qcom,spkr-sd-n-node = <&wsa_spkr_en2>;
+ qcom,cache-always;
};
};
};
diff --git a/arch/arm/boot/dts/qcom/sdm660-cdp.dtsi b/arch/arm/boot/dts/qcom/sdm660-cdp.dtsi
index 0b82fbf7ffbc..5bdca492fee2 100644
--- a/arch/arm/boot/dts/qcom/sdm660-cdp.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660-cdp.dtsi
@@ -42,6 +42,39 @@
status = "ok";
};
+&pm660_gpios {
+ /* GPIO 4 (NFC_CLK_REQ) */
+ gpio@c300 {
+ qcom,mode = <0>;
+ qcom,vin-sel = <1>;
+ qcom,src-sel = <0>;
+ qcom,master-en = <1>;
+ status = "okay";
+ };
+};
+
+&i2c_6 { /* BLSP1 QUP6 (NFC) */
+ status = "okay";
+ nq@28 {
+ compatible = "qcom,nq-nci";
+ reg = <0x28>;
+ qcom,nq-irq = <&tlmm 28 0x00>;
+ qcom,nq-ven = <&tlmm 29 0x00>;
+ qcom,nq-firm = <&tlmm 30 0x00>;
+ qcom,nq-clkreq = <&pm660_gpios 4 0x00>;
+ qcom,nq-esepwr = <&tlmm 31 0x00>;
+ interrupt-parent = <&tlmm>;
+ qcom,clk-src = "BBCLK3";
+ interrupts = <28 0>;
+ interrupt-names = "nfc_irq";
+ pinctrl-names = "nfc_active", "nfc_suspend";
+ pinctrl-0 = <&nfc_int_active &nfc_enable_active>;
+ pinctrl-1 = <&nfc_int_suspend &nfc_enable_suspend>;
+ clocks = <&clock_rpmcc RPM_LN_BB_CLK3_PIN>;
+ clock-names = "ref_clk";
+ };
+};
+
&mdss_mdp {
qcom,mdss-pref-prim-intf = "dsi";
};
@@ -139,6 +172,18 @@
qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
};
+&dsi_truly_1080_vid {
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs";
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+};
+
+&dsi_truly_1080_cmd {
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs";
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+ qcom,partial-update-enabled = "single_roi";
+ qcom,panel-roi-alignment = <2 2 4 2 1080 2>;
+};
+
&sdhc_1 {
/* device core power supply */
vdd-supply = <&pm660l_l4>;
diff --git a/arch/arm/boot/dts/qcom/sdm660-common.dtsi b/arch/arm/boot/dts/qcom/sdm660-common.dtsi
index a5e66f38df3c..5a0997faf133 100644
--- a/arch/arm/boot/dts/qcom/sdm660-common.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660-common.dtsi
@@ -11,6 +11,89 @@
*/
&soc {
+ ufsphy1: ufsphy@1da7000 {
+ compatible = "qcom,ufs-phy-qmp-v3-660";
+ reg = <0x1da7000 0xdb8>;
+ reg-names = "phy_mem";
+ #phy-cells = <0>;
+ clock-names = "ref_clk_src",
+ "ref_clk",
+ "ref_aux_clk";
+ clocks = <&clock_rpmcc RPM_LN_BB_CLK1>,
+ <&clock_gcc GCC_UFS_CLKREF_CLK>,
+ <&clock_gcc GCC_UFS_PHY_AUX_CLK>;
+ status = "disabled";
+ };
+
+ ufs1: ufshc@1da4000 {
+ compatible = "qcom,ufshc";
+ reg = <0x1da4000 0x3000>;
+ interrupts = <0 265 0>;
+ phys = <&ufsphy1>;
+ phy-names = "ufsphy";
+
+ clock-names =
+ "core_clk",
+ "bus_aggr_clk",
+ "iface_clk",
+ "core_clk_unipro",
+ "core_clk_ice",
+ "ref_clk",
+ "tx_lane0_sync_clk",
+ "rx_lane0_sync_clk";
+ clocks =
+ <&clock_gcc GCC_UFS_AXI_CLK>,
+ <&clock_gcc GCC_AGGRE2_UFS_AXI_CLK>,
+ <&clock_gcc GCC_UFS_AHB_CLK>,
+ <&clock_gcc GCC_UFS_UNIPRO_CORE_CLK>,
+ <&clock_gcc GCC_UFS_ICE_CORE_CLK>,
+ <&clock_rpmcc RPM_LN_BB_CLK1>,
+ <&clock_gcc GCC_UFS_TX_SYMBOL_0_CLK>,
+ <&clock_gcc GCC_UFS_RX_SYMBOL_0_CLK>;
+ freq-table-hz =
+ <50000000 200000000>,
+ <0 0>,
+ <0 0>,
+ <37500000 150000000>,
+ <75000000 300000000>,
+ <0 0>,
+ <0 0>,
+ <0 0>;
+
+ lanes-per-direction = <1>;
+
+ qcom,msm-bus,name = "ufs1";
+ qcom,msm-bus,num-cases = <12>;
+ qcom,msm-bus,num-paths = <2>;
+ qcom,msm-bus,vectors-KBps =
+ <95 512 0 0>, <1 650 0 0>, /* No vote */
+ <95 512 922 0>, <1 650 1000 0>, /* PWM G1 */
+ <95 512 1844 0>, <1 650 1000 0>, /* PWM G2 */
+ <95 512 3688 0>, <1 650 1000 0>, /* PWM G3 */
+ <95 512 7376 0>, <1 650 1000 0>, /* PWM G4 */
+ <95 512 127796 0>, <1 650 1000 0>, /* HS G1 RA */
+ <95 512 255591 0>, <1 650 1000 0>, /* HS G2 RA */
+ <95 512 2097152 0>, <1 650 102400 0>, /* HS G3 RA */
+ <95 512 149422 0>, <1 650 1000 0>, /* HS G1 RB */
+ <95 512 298189 0>, <1 650 1000 0>, /* HS G2 RB */
+ <95 512 2097152 0>, <1 650 102400 0>, /* HS G3 RB */
+ <95 512 7643136 0>, <1 650 307200 0>; /* Max. bandwidth */
+ qcom,bus-vector-names = "MIN",
+ "PWM_G1_L1", "PWM_G2_L1", "PWM_G3_L1", "PWM_G4_L1",
+ "HS_RA_G1_L1", "HS_RA_G2_L1", "HS_RA_G3_L1",
+ "HS_RB_G1_L1", "HS_RB_G2_L1", "HS_RB_G3_L1",
+ "MAX";
+
+ qcom,pm-qos-cpu-groups = <0x0F 0xF0>;
+ qcom,pm-qos-cpu-group-latency-us = <26 26>;
+ qcom,pm-qos-default-cpu = <0>;
+
+ resets = <&clock_gcc GCC_UFS_BCR>;
+ reset-names = "core_reset";
+
+ status = "disabled";
+ };
+
usb3: ssusb@a800000 {
compatible = "qcom,dwc-usb3-msm";
reg = <0x0a800000 0xfc100>,
diff --git a/arch/arm/boot/dts/qcom/sdm660-gpu.dtsi b/arch/arm/boot/dts/qcom/sdm660-gpu.dtsi
index 1e62a2423e38..dedc0c99d2e5 100644
--- a/arch/arm/boot/dts/qcom/sdm660-gpu.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660-gpu.dtsi
@@ -58,8 +58,9 @@
label = "kgsl-3d0";
compatible = "qcom,kgsl-3d0", "qcom,kgsl-3d";
status = "ok";
- reg = <0x5000000 0x40000>;
- reg-names = "kgsl_3d0_reg_memory";
+ reg = <0x5000000 0x40000
+ 0x780000 0x6220>;
+ reg-names = "kgsl_3d0_reg_memory", "qfprom_memory";
interrupts = <0 300 0>;
interrupt-names = "kgsl_3d0_irq";
qcom,id = <0>;
@@ -123,7 +124,6 @@
qcom,pm-qos-wakeup-latency = <349>;
/* Quirks */
- qcom,gpu-quirk-two-pass-use-wfi;
qcom,gpu-quirk-dp2clockgating-disable;
qcom,gpu-quirk-lmloadkill-disable;
@@ -136,6 +136,8 @@
/* Context aware jump target power level */
qcom,ca-target-pwrlevel = <4>;
+ qcom,gpu-speed-bin = <0x41a0 0x1fe00000 21>;
+
/* GPU Mempools */
qcom,gpu-mempools {
#address-cells= <1>;
@@ -156,92 +158,349 @@
};
};
- /* Power levels */
- qcom,gpu-pwrlevels {
+ /*
+ * Speed-bin zero is default speed bin.
+ * For rest of the speed bins, speed-bin value
+ * is calulated as FMAX/4.8 MHz round up to zero
+ * decimal places.
+ */
+ qcom,gpu-pwrlevel-bins {
#address-cells = <1>;
#size-cells = <0>;
- compatible = "qcom,gpu-pwrlevels";
-
- /* TURBO */
- qcom,gpu-pwrlevel@0 {
- reg = <0>;
- qcom,gpu-freq = <750000000>;
- qcom,bus-freq = <13>;
- qcom,bus-min = <12>;
- qcom,bus-max = <13>;
- };
-
- /* TURBO */
- qcom,gpu-pwrlevel@1 {
- reg = <1>;
- qcom,gpu-freq = <700000000>;
- qcom,bus-freq = <11>;
- qcom,bus-min = <11>;
- qcom,bus-max = <13>;
- };
-
- /* NOM_L1 */
- qcom,gpu-pwrlevel@2 {
- reg = <2>;
- qcom,gpu-freq = <647000000>;
- qcom,bus-freq = <11>;
- qcom,bus-min = <10>;
- qcom,bus-max = <12>;
- };
-
- /* NOM */
- qcom,gpu-pwrlevel@3 {
- reg = <3>;
- qcom,gpu-freq = <588000000>;
- qcom,bus-freq = <10>;
- qcom,bus-min = <9>;
- qcom,bus-max = <12>;
- };
-
- /* SVS_L1 */
- qcom,gpu-pwrlevel@4 {
- reg = <4>;
- qcom,gpu-freq = <465000000>;
- qcom,bus-freq = <9>;
- qcom,bus-min = <8>;
- qcom,bus-max = <11>;
- };
-
- /* SVS */
- qcom,gpu-pwrlevel@5 {
- reg = <5>;
- qcom,gpu-freq = <370000000>;
- qcom,bus-freq = <8>;
- qcom,bus-min = <6>;
- qcom,bus-max = <9>;
+ compatible="qcom,gpu-pwrlevel-bins";
+
+ qcom,gpu-pwrlevels-0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,speed-bin = <0>;
+
+ qcom,initial-pwrlevel = <6>;
+
+ /* TURBO */
+ qcom,gpu-pwrlevel@0 {
+ reg = <0>;
+ qcom,gpu-freq = <750000000>;
+ qcom,bus-freq = <13>;
+ qcom,bus-min = <12>;
+ qcom,bus-max = <13>;
+ };
+
+ /* TURBO */
+ qcom,gpu-pwrlevel@1 {
+ reg = <1>;
+ qcom,gpu-freq = <700000000>;
+ qcom,bus-freq = <11>;
+ qcom,bus-min = <11>;
+ qcom,bus-max = <13>;
+ };
+
+ /* NOM_L1 */
+ qcom,gpu-pwrlevel@2 {
+ reg = <2>;
+ qcom,gpu-freq = <647000000>;
+ qcom,bus-freq = <11>;
+ qcom,bus-min = <10>;
+ qcom,bus-max = <12>;
+ };
+
+ /* NOM */
+ qcom,gpu-pwrlevel@3 {
+ reg = <3>;
+ qcom,gpu-freq = <588000000>;
+ qcom,bus-freq = <10>;
+ qcom,bus-min = <9>;
+ qcom,bus-max = <12>;
+ };
+
+ /* SVS_L1 */
+ qcom,gpu-pwrlevel@4 {
+ reg = <4>;
+ qcom,gpu-freq = <465000000>;
+ qcom,bus-freq = <9>;
+ qcom,bus-min = <8>;
+ qcom,bus-max = <11>;
+ };
+
+ /* SVS */
+ qcom,gpu-pwrlevel@5 {
+ reg = <5>;
+ qcom,gpu-freq = <370000000>;
+ qcom,bus-freq = <8>;
+ qcom,bus-min = <6>;
+ qcom,bus-max = <9>;
+ };
+
+ /* Low SVS */
+ qcom,gpu-pwrlevel@6 {
+ reg = <6>;
+ qcom,gpu-freq = <266000000>;
+ qcom,bus-freq = <3>;
+ qcom,bus-min = <3>;
+ qcom,bus-max = <6>;
+ };
+
+ /* Min SVS */
+ qcom,gpu-pwrlevel@7 {
+ reg = <7>;
+ qcom,gpu-freq = <160000000>;
+ qcom,bus-freq = <3>;
+ qcom,bus-min = <3>;
+ qcom,bus-max = <5>;
+ };
+
+ /* XO */
+ qcom,gpu-pwrlevel@8 {
+ reg = <8>;
+ qcom,gpu-freq = <19200000>;
+ qcom,bus-freq = <0>;
+ qcom,bus-min = <0>;
+ qcom,bus-max = <0>;
+ };
};
- /* Low SVS */
- qcom,gpu-pwrlevel@6 {
- reg = <6>;
- qcom,gpu-freq = <266000000>;
- qcom,bus-freq = <3>;
- qcom,bus-min = <3>;
- qcom,bus-max = <6>;
+ qcom,gpu-pwrlevels-1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,speed-bin = <157>;
+
+ qcom,initial-pwrlevel = <6>;
+
+ /* TURBO */
+ qcom,gpu-pwrlevel@0 {
+ reg = <0>;
+ qcom,gpu-freq = <750000000>;
+ qcom,bus-freq = <13>;
+ qcom,bus-min = <12>;
+ qcom,bus-max = <13>;
+ };
+
+ /* TURBO */
+ qcom,gpu-pwrlevel@1 {
+ reg = <1>;
+ qcom,gpu-freq = <700000000>;
+ qcom,bus-freq = <11>;
+ qcom,bus-min = <11>;
+ qcom,bus-max = <13>;
+ };
+
+ /* NOM_L1 */
+ qcom,gpu-pwrlevel@2 {
+ reg = <2>;
+ qcom,gpu-freq = <647000000>;
+ qcom,bus-freq = <11>;
+ qcom,bus-min = <10>;
+ qcom,bus-max = <12>;
+ };
+
+ /* NOM */
+ qcom,gpu-pwrlevel@3 {
+ reg = <3>;
+ qcom,gpu-freq = <588000000>;
+ qcom,bus-freq = <10>;
+ qcom,bus-min = <9>;
+ qcom,bus-max = <12>;
+ };
+
+ /* SVS_L1 */
+ qcom,gpu-pwrlevel@4 {
+ reg = <4>;
+ qcom,gpu-freq = <465000000>;
+ qcom,bus-freq = <9>;
+ qcom,bus-min = <8>;
+ qcom,bus-max = <11>;
+ };
+
+ /* SVS */
+ qcom,gpu-pwrlevel@5 {
+ reg = <5>;
+ qcom,gpu-freq = <370000000>;
+ qcom,bus-freq = <8>;
+ qcom,bus-min = <6>;
+ qcom,bus-max = <9>;
+ };
+
+ /* Low SVS */
+ qcom,gpu-pwrlevel@6 {
+ reg = <6>;
+ qcom,gpu-freq = <266000000>;
+ qcom,bus-freq = <3>;
+ qcom,bus-min = <3>;
+ qcom,bus-max = <6>;
+ };
+
+ /* Min SVS */
+ qcom,gpu-pwrlevel@7 {
+ reg = <7>;
+ qcom,gpu-freq = <160000000>;
+ qcom,bus-freq = <3>;
+ qcom,bus-min = <3>;
+ qcom,bus-max = <5>;
+ };
+
+ /* XO */
+ qcom,gpu-pwrlevel@8 {
+ reg = <8>;
+ qcom,gpu-freq = <19200000>;
+ qcom,bus-freq = <0>;
+ qcom,bus-min = <0>;
+ qcom,bus-max = <0>;
+ };
};
- /* Min SVS */
- qcom,gpu-pwrlevel@7 {
- reg = <7>;
- qcom,gpu-freq = <160000000>;
- qcom,bus-freq = <3>;
- qcom,bus-min = <3>;
- qcom,bus-max = <5>;
+ qcom,gpu-pwrlevels-2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,speed-bin = <146>;
+
+ qcom,initial-pwrlevel = <5>;
+
+ /* TURBO */
+ qcom,gpu-pwrlevel@0 {
+ reg = <0>;
+ qcom,gpu-freq = <700000000>;
+ qcom,bus-freq = <13>;
+ qcom,bus-min = <12>;
+ qcom,bus-max = <13>;
+ };
+
+ /* NOM_L1 */
+ qcom,gpu-pwrlevel@1 {
+ reg = <1>;
+ qcom,gpu-freq = <647000000>;
+ qcom,bus-freq = <11>;
+ qcom,bus-min = <10>;
+ qcom,bus-max = <12>;
+ };
+
+ /* NOM */
+ qcom,gpu-pwrlevel@2 {
+ reg = <2>;
+ qcom,gpu-freq = <588000000>;
+ qcom,bus-freq = <10>;
+ qcom,bus-min = <9>;
+ qcom,bus-max = <12>;
+ };
+
+ /* SVS_L1 */
+ qcom,gpu-pwrlevel@3 {
+ reg = <3>;
+ qcom,gpu-freq = <465000000>;
+ qcom,bus-freq = <9>;
+ qcom,bus-min = <8>;
+ qcom,bus-max = <11>;
+ };
+
+ /* SVS */
+ qcom,gpu-pwrlevel@4 {
+ reg = <4>;
+ qcom,gpu-freq = <370000000>;
+ qcom,bus-freq = <8>;
+ qcom,bus-min = <6>;
+ qcom,bus-max = <9>;
+ };
+
+ /* Low SVS */
+ qcom,gpu-pwrlevel@5 {
+ reg = <5>;
+ qcom,gpu-freq = <266000000>;
+ qcom,bus-freq = <3>;
+ qcom,bus-min = <3>;
+ qcom,bus-max = <6>;
+ };
+
+ /* Min SVS */
+ qcom,gpu-pwrlevel@6 {
+ reg = <6>;
+ qcom,gpu-freq = <160000000>;
+ qcom,bus-freq = <3>;
+ qcom,bus-min = <3>;
+ qcom,bus-max = <5>;
+ };
+
+ /* XO */
+ qcom,gpu-pwrlevel@7 {
+ reg = <7>;
+ qcom,gpu-freq = <19200000>;
+ qcom,bus-freq = <0>;
+ qcom,bus-min = <0>;
+ qcom,bus-max = <0>;
+ };
};
- /* XO */
- qcom,gpu-pwrlevel@8 {
- reg = <8>;
- qcom,gpu-freq = <19200000>;
- qcom,bus-freq = <0>;
- qcom,bus-min = <0>;
- qcom,bus-max = <0>;
+ qcom,gpu-pwrlevels-3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,speed-bin = <135>;
+
+ qcom,initial-pwrlevel = <4>;
+
+ /* NOM_L1 */
+ qcom,gpu-pwrlevel@0 {
+ reg = <0>;
+ qcom,gpu-freq = <647000000>;
+ qcom,bus-freq = <13>;
+ qcom,bus-min = <12>;
+ qcom,bus-max = <13>;
+ };
+
+ /* NOM */
+ qcom,gpu-pwrlevel@1 {
+ reg = <1>;
+ qcom,gpu-freq = <588000000>;
+ qcom,bus-freq = <10>;
+ qcom,bus-min = <9>;
+ qcom,bus-max = <12>;
+ };
+
+ /* SVS_L1 */
+ qcom,gpu-pwrlevel@2 {
+ reg = <2>;
+ qcom,gpu-freq = <465000000>;
+ qcom,bus-freq = <9>;
+ qcom,bus-min = <8>;
+ qcom,bus-max = <11>;
+ };
+
+ /* SVS */
+ qcom,gpu-pwrlevel@3 {
+ reg = <3>;
+ qcom,gpu-freq = <370000000>;
+ qcom,bus-freq = <8>;
+ qcom,bus-min = <6>;
+ qcom,bus-max = <9>;
+ };
+
+ /* Low SVS */
+ qcom,gpu-pwrlevel@4 {
+ reg = <4>;
+ qcom,gpu-freq = <266000000>;
+ qcom,bus-freq = <3>;
+ qcom,bus-min = <3>;
+ qcom,bus-max = <6>;
+ };
+
+ /* Min SVS */
+ qcom,gpu-pwrlevel@5 {
+ reg = <5>;
+ qcom,gpu-freq = <160000000>;
+ qcom,bus-freq = <3>;
+ qcom,bus-min = <3>;
+ qcom,bus-max = <5>;
+ };
+
+ /* XO */
+ qcom,gpu-pwrlevel@6 {
+ reg = <6>;
+ qcom,gpu-freq = <19200000>;
+ qcom,bus-freq = <0>;
+ qcom,bus-min = <0>;
+ qcom,bus-max = <0>;
+ };
};
};
};
diff --git a/arch/arm/boot/dts/qcom/sdm660-mdss-panels.dtsi b/arch/arm/boot/dts/qcom/sdm660-mdss-panels.dtsi
index da41c9127d90..a0cc0a6180e5 100644
--- a/arch/arm/boot/dts/qcom/sdm660-mdss-panels.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660-mdss-panels.dtsi
@@ -22,6 +22,8 @@
#include "dsi-panel-nt35597-dualmipi-wqxga-cmd.dtsi"
#include "dsi-panel-nt35695b-truly-fhd-video.dtsi"
#include "dsi-panel-nt35695b-truly-fhd-cmd.dtsi"
+#include "dsi-panel-truly-1080p-cmd.dtsi"
+#include "dsi-panel-truly-1080p-video.dtsi"
&soc {
dsi_panel_pwr_supply: dsi_panel_pwr_supply {
@@ -56,9 +58,58 @@
qcom,supply-post-on-sleep = <10>;
};
};
-};
-&soc {
+ dsi_panel_pwr_supply_labibb_amoled:
+ dsi_panel_pwr_supply_labibb_amoled {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,panel-supply-entry@0 {
+ reg = <0>;
+ qcom,supply-name = "wqhd-vddio";
+ qcom,supply-min-voltage = <1880000>;
+ qcom,supply-max-voltage = <1950000>;
+ qcom,supply-enable-load = <32000>;
+ qcom,supply-disable-load = <80>;
+ };
+
+ qcom,panel-supply-entry@1 {
+ reg = <1>;
+ qcom,supply-name = "vdda-3p3";
+ qcom,supply-min-voltage = <3300000>;
+ qcom,supply-max-voltage = <3300000>;
+ qcom,supply-enable-load = <13200>;
+ qcom,supply-disable-load = <80>;
+ };
+
+ qcom,panel-supply-entry@2 {
+ reg = <2>;
+ qcom,supply-name = "lab";
+ qcom,supply-min-voltage = <4600000>;
+ qcom,supply-max-voltage = <6100000>;
+ qcom,supply-enable-load = <100000>;
+ qcom,supply-disable-load = <100>;
+ };
+
+ qcom,panel-supply-entry@3 {
+ reg = <3>;
+ qcom,supply-name = "ibb";
+ qcom,supply-min-voltage = <4000000>;
+ qcom,supply-max-voltage = <6300000>;
+ qcom,supply-enable-load = <100000>;
+ qcom,supply-disable-load = <100>;
+ };
+
+ qcom,panel-supply-entry@4 {
+ reg = <4>;
+ qcom,supply-name = "oledb";
+ qcom,supply-min-voltage = <5000000>;
+ qcom,supply-max-voltage = <8100000>;
+ qcom,supply-enable-load = <100000>;
+ qcom,supply-disable-load = <100>;
+ };
+ };
+
dsi_panel_pwr_supply_no_labibb: dsi_panel_pwr_supply_no_labibb {
#address-cells = <1>;
#size-cells = <0>;
@@ -80,6 +131,8 @@
23 1e 07 08 05 03 04 a0
23 1e 07 08 05 03 04 a0
23 18 07 08 04 03 04 a0];
+ qcom,esd-check-enabled;
+ qcom,mdss-dsi-panel-status-check-mode = "bta_check";
};
&dsi_dual_nt35597_truly_cmd {
@@ -157,3 +210,19 @@
23 1e 07 08 05 03 04 a0
23 18 07 08 04 03 04 a0];
};
+
+&dsi_truly_1080_vid {
+ qcom,mdss-dsi-panel-timings-phy-v2 = [23 1e 08 09 05 03 04 a0
+ 23 1e 08 09 05 03 04 a0
+ 23 1e 08 09 05 03 04 a0
+ 23 1e 08 09 05 03 04 a0
+ 23 1a 08 09 05 03 04 a0];
+};
+
+&dsi_truly_1080_cmd {
+ qcom,mdss-dsi-panel-timings-phy-v2 = [23 1e 08 09 05 03 04 a0
+ 23 1e 08 09 05 03 04 a0
+ 23 1e 08 09 05 03 04 a0
+ 23 1e 08 09 05 03 04 a0
+ 23 1a 08 09 05 03 04 a0];
+};
diff --git a/arch/arm/boot/dts/qcom/sdm660-mdss.dtsi b/arch/arm/boot/dts/qcom/sdm660-mdss.dtsi
index 1e7f5ea6efc5..32cf55a99ac0 100644
--- a/arch/arm/boot/dts/qcom/sdm660-mdss.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660-mdss.dtsi
@@ -46,8 +46,9 @@
/* VBIF QoS remapper settings*/
qcom,mdss-vbif-qos-rt-setting = <1 2 2 2>;
- qcom,vbif-settings = <0x00ac 0x00000040>,
- <0x00d0 0x00001010>; /* v1 only */
+ qcom,mdss-vbif-qos-nrt-setting = <1 1 1 1>;
+ qcom,vbif-settings = <0x00ac 0x00008040>,
+ <0x00d0 0x00002828>;
qcom,mdss-has-panic-ctrl;
qcom,mdss-per-pipe-panic-luts = <0x000f>,
@@ -127,7 +128,7 @@
<0x012ac 0xc0000ccc>,
<0x012b4 0xc0000ccc>,
<0x012bc 0x00cccccc>,
- <0x012c4 0x000000cc>,
+ <0x012c4 0x0000cccc>,
<0x013a8 0x0cccc0c0>,
<0x013b0 0xccccc0c0>,
<0x013b8 0xcccc0000>,
@@ -355,6 +356,7 @@
qcom,timing-db-mode;
wqhd-vddio-supply = <&pm660_l11>;
+ vdda-3p3-supply = <&pm660l_l6>;
lab-supply = <&lcdb_ldo_vreg>;
ibb-supply = <&lcdb_ncp_vreg>;
qcom,mdss-mdp = <&mdss_mdp>;
diff --git a/arch/arm/boot/dts/qcom/sdm660-mtp.dtsi b/arch/arm/boot/dts/qcom/sdm660-mtp.dtsi
index b35f66abfde5..b666d846ca04 100644
--- a/arch/arm/boot/dts/qcom/sdm660-mtp.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660-mtp.dtsi
@@ -47,6 +47,39 @@
status = "ok";
};
+&pm660_gpios {
+ /* GPIO 4 (NFC_CLK_REQ) */
+ gpio@c300 {
+ qcom,mode = <0>;
+ qcom,vin-sel = <1>;
+ qcom,src-sel = <0>;
+ qcom,master-en = <1>;
+ status = "okay";
+ };
+};
+
+&i2c_6 { /* BLSP1 QUP6 (NFC) */
+ status = "okay";
+ nq@28 {
+ compatible = "qcom,nq-nci";
+ reg = <0x28>;
+ qcom,nq-irq = <&tlmm 28 0x00>;
+ qcom,nq-ven = <&tlmm 29 0x00>;
+ qcom,nq-firm = <&tlmm 30 0x00>;
+ qcom,nq-clkreq = <&pm660_gpios 4 0x00>;
+ qcom,nq-esepwr = <&tlmm 31 0x00>;
+ interrupt-parent = <&tlmm>;
+ qcom,clk-src = "BBCLK3";
+ interrupts = <28 0>;
+ interrupt-names = "nfc_irq";
+ pinctrl-names = "nfc_active", "nfc_suspend";
+ pinctrl-0 = <&nfc_int_active &nfc_enable_active>;
+ pinctrl-1 = <&nfc_int_suspend &nfc_enable_suspend>;
+ clocks = <&clock_rpmcc RPM_LN_BB_CLK3_PIN>;
+ clock-names = "ref_clk";
+ };
+};
+
&mdss_mdp {
qcom,mdss-pref-prim-intf = "dsi";
};
diff --git a/arch/arm/boot/dts/qcom/sdm660-pinctrl.dtsi b/arch/arm/boot/dts/qcom/sdm660-pinctrl.dtsi
index 2c207873aa0b..dbc98a97fc8d 100644
--- a/arch/arm/boot/dts/qcom/sdm660-pinctrl.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660-pinctrl.dtsi
@@ -369,6 +369,68 @@
};
};
+ nfc {
+ nfc_int_active: nfc_int_active {
+ /* active state */
+ mux {
+ /* GPIO 28 NFC Read Interrupt */
+ pins = "gpio28";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio28";
+ drive-strength = <2>; /* 2 MA */
+ bias-pull-up;
+ };
+ };
+
+ nfc_int_suspend: nfc_int_suspend {
+ /* sleep state */
+ mux {
+ /* GPIO 28 NFC Read Interrupt */
+ pins = "gpio28";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio28";
+ drive-strength = <2>; /* 2 MA */
+ bias-pull-up;
+ };
+ };
+
+ nfc_enable_active: nfc_enable_active {
+ /* active state */
+ mux {
+ /* 29: NFC ENABLE 31:ESE Enable */
+ pins = "gpio29", "gpio31";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio29", "gpio31";
+ drive-strength = <2>; /* 2 MA */
+ bias-pull-up;
+ };
+ };
+
+ nfc_enable_suspend: nfc_enable_suspend {
+ /* sleep state */
+ mux {
+ /* 29: NFC ENABLE 31:ESE Enable */
+ pins = "gpio29", "gpio31";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio29", "gpio31";
+ drive-strength = <2>; /* 2 MA */
+ bias-disable;
+ };
+ };
+ };
+
i2c_7 {
i2c_7_active: i2c_7_active {
mux {
@@ -692,7 +754,7 @@
config {
pins = "gpio24";
drive-strength = <2>;
- bias-pull-down;
+ bias-bus-hold;
};
};
@@ -704,8 +766,8 @@
config {
pins = "gpio24";
- drive-strength = <16>;
- bias-disable;
+ drive-strength = <2>;
+ bias-bus-hold;
};
};
};
@@ -719,8 +781,8 @@
config {
pins = "gpio25";
- drive-strength = <2>;
- bias-pull-down;
+ drive-strength = <4>;
+ bias-bus-hold;
};
};
@@ -732,8 +794,8 @@
config {
pins = "gpio25";
- drive-strength = <16>;
- bias-disable;
+ drive-strength = <4>;
+ bias-bus-hold;
};
};
};
diff --git a/arch/arm/boot/dts/qcom/sdm660-pm660a-cdp.dts b/arch/arm/boot/dts/qcom/sdm660-pm660a-cdp.dts
index 0ce1ab440e92..335f454d2bba 100644
--- a/arch/arm/boot/dts/qcom/sdm660-pm660a-cdp.dts
+++ b/arch/arm/boot/dts/qcom/sdm660-pm660a-cdp.dts
@@ -24,3 +24,15 @@
qcom,board-id = <1 0>;
qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>;
};
+
+&mdss_dsi0 {
+ oledb-supply = <&pm660a_oledb>;
+ lab-supply = <&lab_regulator>;
+ ibb-supply = <&ibb_regulator>;
+};
+
+&mdss_dsi1 {
+ oledb-supply = <&pm660a_oledb>;
+ lab-supply = <&lab_regulator>;
+ ibb-supply = <&ibb_regulator>;
+};
diff --git a/arch/arm/boot/dts/qcom/sdm660-pm660a-mtp.dts b/arch/arm/boot/dts/qcom/sdm660-pm660a-mtp.dts
index 25f7c1ba969c..a783060d0155 100644
--- a/arch/arm/boot/dts/qcom/sdm660-pm660a-mtp.dts
+++ b/arch/arm/boot/dts/qcom/sdm660-pm660a-mtp.dts
@@ -24,3 +24,15 @@
qcom,board-id = <8 0>;
qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>;
};
+
+&mdss_dsi0 {
+ oledb-supply = <&pm660a_oledb>;
+ lab-supply = <&lab_regulator>;
+ ibb-supply = <&ibb_regulator>;
+};
+
+&mdss_dsi1 {
+ oledb-supply = <&pm660a_oledb>;
+ lab-supply = <&lab_regulator>;
+ ibb-supply = <&ibb_regulator>;
+};
diff --git a/arch/arm/boot/dts/qcom/sdm660-qrd.dts b/arch/arm/boot/dts/qcom/sdm660-qrd.dts
index 5d888c9d039f..9ad1bed8bbfa 100644
--- a/arch/arm/boot/dts/qcom/sdm660-qrd.dts
+++ b/arch/arm/boot/dts/qcom/sdm660-qrd.dts
@@ -64,25 +64,3 @@
&pm660l_wled {
qcom,led-strings-list = [00 01];
};
-
-&tasha_snd {
- qcom,model = "sdm660-tasha-skus-snd-card";
- qcom,audio-routing =
- "AIF4 VI", "MCLK",
- "RX_BIAS", "MCLK",
- "MADINPUT", "MCLK",
- "AMIC2", "MIC BIAS2",
- "MIC BIAS2", "Headset Mic",
- "DMIC0", "MIC BIAS1",
- "MIC BIAS1", "Digital Mic0",
- "DMIC2", "MIC BIAS3",
- "MIC BIAS3", "Digital Mic2",
- "DMIC4", "MIC BIAS3",
- "MIC BIAS3", "Digital Mic4",
- "SpkrLeft IN", "SPK1 OUT";
- qcom,msm-mbhc-hphl-swh = <1>;
- /delete-property/ qcom,us-euro-gpios;
- qcom,wsa-max-devs = <1>;
- qcom,wsa-devs = <&wsa881x_211>, <&wsa881x_213>;
- qcom,wsa-aux-dev-prefix = "SpkrLeft", "SpkrLeft";
-};
diff --git a/arch/arm/boot/dts/qcom/sdm660-qrd.dtsi b/arch/arm/boot/dts/qcom/sdm660-qrd.dtsi
index 57e547085cf9..f0d13b3455ab 100644
--- a/arch/arm/boot/dts/qcom/sdm660-qrd.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660-qrd.dtsi
@@ -12,6 +12,7 @@
#include "sdm660-pinctrl.dtsi"
#include "sdm660-camera-sensor-qrd.dtsi"
+#include "sdm660-external-codec.dtsi"
/ {
};
@@ -118,6 +119,37 @@
qcom,src-sel = <0>;
qcom,out-strength = <1>;
};
+
+ /* GPIO 4 (NFC_CLK_REQ) */
+ gpio@c300 {
+ qcom,mode = <0>;
+ qcom,vin-sel = <1>;
+ qcom,src-sel = <0>;
+ qcom,master-en = <1>;
+ status = "okay";
+ };
+};
+
+&i2c_6 { /* BLSP1 QUP6 (NFC) */
+ status = "okay";
+ nq@28 {
+ compatible = "qcom,nq-nci";
+ reg = <0x28>;
+ qcom,nq-irq = <&tlmm 28 0x00>;
+ qcom,nq-ven = <&tlmm 29 0x00>;
+ qcom,nq-firm = <&tlmm 30 0x00>;
+ qcom,nq-clkreq = <&pm660_gpios 4 0x00>;
+ qcom,nq-esepwr = <&tlmm 31 0x00>;
+ interrupt-parent = <&tlmm>;
+ qcom,clk-src = "BBCLK3";
+ interrupts = <28 0>;
+ interrupt-names = "nfc_irq";
+ pinctrl-names = "nfc_active", "nfc_suspend";
+ pinctrl-0 = <&nfc_int_active &nfc_enable_active>;
+ pinctrl-1 = <&nfc_int_suspend &nfc_enable_suspend>;
+ clocks = <&clock_rpmcc RPM_LN_BB_CLK3_PIN>;
+ clock-names = "ref_clk";
+ };
};
&pm660l_gpios {
@@ -222,3 +254,28 @@
qcom,fg-jeita-thresholds = <0 5 55 55>;
qcom,fg-cutoff-voltage = <3700>;
};
+
+&tasha_snd {
+ qcom,model = "sdm660-tasha-skus-snd-card";
+ qcom,audio-routing =
+ "AIF4 VI", "MCLK",
+ "RX_BIAS", "MCLK",
+ "MADINPUT", "MCLK",
+ "AMIC2", "MIC BIAS2",
+ "MIC BIAS2", "Headset Mic",
+ "DMIC0", "MIC BIAS1",
+ "MIC BIAS1", "Digital Mic0",
+ "DMIC2", "MIC BIAS3",
+ "MIC BIAS3", "Digital Mic2",
+ "DMIC4", "MIC BIAS3",
+ "MIC BIAS3", "Digital Mic4",
+ "SpkrLeft IN", "SPK1 OUT";
+ qcom,msm-mbhc-hphl-swh = <1>;
+ /delete-property/ qcom,us-euro-gpios;
+ /delete-property/ qcom,hph-en0-gpio;
+ /delete-property/ qcom,hph-en1-gpio;
+ qcom,wsa-max-devs = <1>;
+ qcom,wsa-devs = <&wsa881x_211>, <&wsa881x_213>;
+ qcom,wsa-aux-dev-prefix = "SpkrLeft", "SpkrLeft";
+};
+
diff --git a/arch/arm/boot/dts/qcom/sdm660.dtsi b/arch/arm/boot/dts/qcom/sdm660.dtsi
index 7c3877e5bda1..2a7ef9dcd4cd 100644
--- a/arch/arm/boot/dts/qcom/sdm660.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660.dtsi
@@ -315,7 +315,7 @@
alloc-ranges = <0x0 0x00000000 0x0 0xffffffff>;
reusable;
alignment = <0x0 0x400000>;
- size = <0x0 0x400000>;
+ size = <0x0 0xa00000>;
};
qseecom_mem: qseecom_region {
@@ -356,7 +356,7 @@
qca,bt-vdd-pa-supply = <&pm660_l6_pin_ctrl>;
qca,bt-vdd-ldo-supply = <&pm660_l19_pin_ctrl>;
qca,bt-chip-pwd-supply = <&pm660l_bob_pin1>;
- clocks = <&clock_rpmcc RPM_RF_CLK1>;
+ clocks = <&clock_rpmcc RPM_RF_CLK1_PIN>;
clock-names = "rf_clk1";
qca,bt-vdd-core-voltage-level = <1800000 1900000>;
@@ -562,6 +562,8 @@
reg-names = "tsens_physical", "tsens_eeprom_physical";
interrupts = <0 184 0>, <0 430 0>;
interrupt-names = "tsens-upper-lower", "tsens-critical";
+ qcom,client-id = <0 1 2 3 4 5 6 7 8 9 10 11 12 13>;
+ qcom,sensor-id = <0 10 11 4 5 6 7 8 13 1 3 12 9 2>;
qcom,sensors = <14>;
qcom,slope = <3200 3200 3200 3200 3200 3200 3200 3200
3200 3200 3200 3200 3200 3200>;
@@ -726,7 +728,7 @@
clock-names = "osm";
clocks = <&clock_cpu PWRCL_CLK>;
-
+ qcom,cxip-lm-enable = <1>;
qcom,vdd-restriction-temp = <5>;
qcom,vdd-restriction-temp-hysteresis = <10>;
@@ -1249,6 +1251,51 @@
< 2457600 >;
};
+ ufs_ice: ufsice@1db0000 {
+ compatible = "qcom,ice";
+ reg = <0x1db0000 0x8000>;
+ qcom,enable-ice-clk;
+ clock-names = "ufs_core_clk", "bus_clk",
+ "iface_clk", "ice_core_clk";
+ clocks = <&clock_gcc GCC_UFS_AXI_CLK>,
+ <&clock_gcc GCC_UFS_CLKREF_CLK>,
+ <&clock_gcc GCC_UFS_AHB_CLK>,
+ <&clock_gcc GCC_UFS_ICE_CORE_CLK>;
+ qcom,op-freq-hz = <0>, <0>, <0>, <300000000>;
+ vdd-hba-supply = <&gdsc_ufs>;
+ qcom,msm-bus,name = "ufs_ice_noc";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <1 650 0 0>, /* No vote */
+ <1 650 1000 0>; /* Max. bandwidth */
+ qcom,bus-vector-names = "MIN",
+ "MAX";
+ qcom,instance-type = "ufs";
+ };
+
+ sdcc1_ice: sdcc1ice@c0c8000 {
+ compatible = "qcom,ice";
+ reg = <0xc0c8000 0x8000>;
+ qcom,enable-ice-clk;
+ clock-names = "ice_core_clk_src", "ice_core_clk",
+ "bus_clk", "iface_clk";
+ clocks = <&clock_gcc SDCC1_ICE_CORE_CLK_SRC>,
+ <&clock_gcc GCC_SDCC1_ICE_CORE_CLK>,
+ <&clock_gcc GCC_SDCC1_APPS_CLK>,
+ <&clock_gcc GCC_SDCC1_AHB_CLK>;
+ qcom,op-freq-hz = <300000000>, <0>, <0>, <0>;
+ qcom,msm-bus,name = "sdcc_ice_noc";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <78 512 0 0>, /* No vote */
+ <78 512 1000 0>; /* Max. bandwidth */
+ qcom,bus-vector-names = "MIN",
+ "MAX";
+ qcom,instance-type = "sdcc";
+ };
+
sdhc_1: sdhci@c0c4000 {
compatible = "qcom,sdhci-msm-v5";
reg = <0xc0c4000 0x1000>, <0xc0c5000 0x1000>;
@@ -1259,6 +1306,7 @@
qcom,bus-width = <8>;
qcom,large-address-bus;
+ sdhc-msm-crypto = <&sdcc1_ice>;
qcom,devfreq,freq-table = <50000000 200000000>;
@@ -1509,7 +1557,7 @@
dcc: dcc@10b3000 {
compatible = "qcom,dcc";
reg = <0x10b3000 0x1000>,
- <0x10b4000 0x2000>;
+ <0x10b4000 0x800>;
reg-names = "dcc-base", "dcc-ram-base";
clocks = <&clock_gcc GCC_DCC_AHB_CLK>;
@@ -2127,6 +2175,7 @@
interrupts = <0 265 0>;
phys = <&ufsphy1>;
phy-names = "ufsphy";
+ ufs-qcom-crypto = <&ufs_ice>;
clock-names =
"core_clk",
diff --git a/arch/arm/configs/sdm660-perf_defconfig b/arch/arm/configs/sdm660-perf_defconfig
index b8df922b52fc..0028dbbb5c68 100644
--- a/arch/arm/configs/sdm660-perf_defconfig
+++ b/arch/arm/configs/sdm660-perf_defconfig
@@ -254,11 +254,15 @@ CONFIG_SCSI_UFSHCD_PLATFORM=y
CONFIG_SCSI_UFS_QCOM=y
CONFIG_SCSI_UFS_QCOM_ICE=y
CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+CONFIG_MD_LINEAR=y
CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=y
CONFIG_DM_REQ_CRYPT=y
CONFIG_DM_UEVENT=y
CONFIG_DM_VERITY=y
+CONFIG_DM_VERITY_FEC=y
+CONFIG_DM_ANDROID_VERITY=y
CONFIG_NETDEVICES=y
CONFIG_BONDING=y
CONFIG_DUMMY=y
@@ -448,6 +452,7 @@ CONFIG_MMC_TEST=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_MSM=y
+CONFIG_MMC_SDHCI_MSM_ICE=y
CONFIG_MMC_CQ_HCI=y
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
diff --git a/arch/arm/configs/sdm660_defconfig b/arch/arm/configs/sdm660_defconfig
index fd5340a45aa2..c2ceac22f15f 100644
--- a/arch/arm/configs/sdm660_defconfig
+++ b/arch/arm/configs/sdm660_defconfig
@@ -253,11 +253,15 @@ CONFIG_SCSI_UFSHCD_PLATFORM=y
CONFIG_SCSI_UFS_QCOM=y
CONFIG_SCSI_UFS_QCOM_ICE=y
CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+CONFIG_MD_LINEAR=y
CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=y
CONFIG_DM_REQ_CRYPT=y
CONFIG_DM_UEVENT=y
CONFIG_DM_VERITY=y
+CONFIG_DM_VERITY_FEC=y
+CONFIG_DM_ANDROID_VERITY=y
CONFIG_NETDEVICES=y
CONFIG_BONDING=y
CONFIG_DUMMY=y
@@ -449,6 +453,7 @@ CONFIG_MMC_TEST=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_MSM=y
+CONFIG_MMC_SDHCI_MSM_ICE=y
CONFIG_MMC_CQ_HCI=y
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
diff --git a/arch/arm64/configs/msm-perf_defconfig b/arch/arm64/configs/msm-perf_defconfig
index 56bbe8054264..f184c571331a 100644
--- a/arch/arm64/configs/msm-perf_defconfig
+++ b/arch/arm64/configs/msm-perf_defconfig
@@ -78,6 +78,7 @@ CONFIG_XFRM_USER=y
CONFIG_XFRM_STATISTICS=y
CONFIG_NET_KEY=y
CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y
CONFIG_IP_MULTIPLE_TABLES=y
CONFIG_IP_ROUTE_VERBOSE=y
@@ -344,7 +345,6 @@ CONFIG_THERMAL_TSENS8974=y
CONFIG_THERMAL_QPNP_ADC_TM=y
CONFIG_MFD_SPMI_PMIC=y
CONFIG_WCD9335_CODEC=y
-CONFIG_REGULATOR=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_REGULATOR_RPM_SMD=y
CONFIG_REGULATOR_QPNP=y
@@ -394,13 +394,15 @@ CONFIG_MSM_VIDC_VMEM=y
CONFIG_MSM_VIDC_GOVERNORS=y
CONFIG_MSM_SDE_ROTATOR=y
CONFIG_QCOM_KGSL=y
-CONFIG_FB=y
+CONFIG_DRM=y
CONFIG_FB_MSM=y
CONFIG_FB_MSM_MDSS=y
CONFIG_FB_MSM_MDSS_WRITEBACK=y
CONFIG_FB_MSM_MDSS_HDMI_PANEL=y
CONFIG_FB_MSM_MDSS_XLOG_DEBUG=y
CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_BACKLIGHT_GENERIC=m
CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set
# CONFIG_LOGO_LINUX_VGA16 is not set
@@ -516,7 +518,6 @@ CONFIG_MSM_IPC_ROUTER_MHI_XPRT=y
CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y
CONFIG_MSM_GLINK_PKT=y
CONFIG_MSM_SPM=y
-CONFIG_QCOM_SCM=y
CONFIG_QCOM_SCM_XPU=y
CONFIG_QCOM_WATCHDOG_V2=y
CONFIG_QCOM_MEMORY_DUMP_V2=y
@@ -560,7 +561,6 @@ CONFIG_EXT4_FS_ICE_ENCRYPTION=y
CONFIG_FUSE_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
-CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_ECRYPT_FS=y
CONFIG_ECRYPT_FS_MESSAGING=y
diff --git a/arch/arm64/configs/msm_defconfig b/arch/arm64/configs/msm_defconfig
index c39a9311e056..a7d2b895e08c 100644
--- a/arch/arm64/configs/msm_defconfig
+++ b/arch/arm64/configs/msm_defconfig
@@ -78,6 +78,7 @@ CONFIG_XFRM_USER=y
CONFIG_XFRM_STATISTICS=y
CONFIG_NET_KEY=y
CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y
CONFIG_IP_MULTIPLE_TABLES=y
CONFIG_IP_ROUTE_VERBOSE=y
@@ -331,7 +332,6 @@ CONFIG_THERMAL_TSENS8974=y
CONFIG_THERMAL_QPNP_ADC_TM=y
CONFIG_MFD_SPMI_PMIC=y
CONFIG_WCD9335_CODEC=y
-CONFIG_REGULATOR=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_REGULATOR_RPM_SMD=y
CONFIG_REGULATOR_QPNP=y
@@ -382,13 +382,15 @@ CONFIG_MSM_VIDC_VMEM=y
CONFIG_MSM_VIDC_GOVERNORS=y
CONFIG_MSM_SDE_ROTATOR=y
CONFIG_QCOM_KGSL=y
-CONFIG_FB=y
+CONFIG_DRM=y
CONFIG_FB_MSM=y
CONFIG_FB_MSM_MDSS=y
CONFIG_FB_MSM_MDSS_WRITEBACK=y
CONFIG_FB_MSM_MDSS_HDMI_PANEL=y
CONFIG_FB_MSM_MDSS_XLOG_DEBUG=y
CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_BACKLIGHT_GENERIC=m
CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set
# CONFIG_LOGO_LINUX_VGA16 is not set
@@ -517,7 +519,6 @@ CONFIG_MSM_IPC_ROUTER_MHI_XPRT=y
CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y
CONFIG_MSM_GLINK_PKT=y
CONFIG_MSM_SPM=y
-CONFIG_QCOM_SCM=y
CONFIG_QCOM_SCM_XPU=y
CONFIG_QCOM_WATCHDOG_V2=y
CONFIG_QCOM_MEMORY_DUMP_V2=y
@@ -567,7 +568,6 @@ CONFIG_EXT4_FS_ICE_ENCRYPTION=y
CONFIG_FUSE_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
-CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_ECRYPT_FS=y
CONFIG_ECRYPT_FS_MESSAGING=y
diff --git a/arch/arm64/configs/msmcortex-perf_defconfig b/arch/arm64/configs/msmcortex-perf_defconfig
index d71cfed7614d..f4a7d9107f36 100644
--- a/arch/arm64/configs/msmcortex-perf_defconfig
+++ b/arch/arm64/configs/msmcortex-perf_defconfig
@@ -87,6 +87,7 @@ CONFIG_XFRM_USER=y
CONFIG_XFRM_STATISTICS=y
CONFIG_NET_KEY=y
CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y
CONFIG_IP_MULTIPLE_TABLES=y
CONFIG_IP_ROUTE_VERBOSE=y
@@ -456,6 +457,7 @@ CONFIG_USB_CONFIGFS_F_DIAG=y
CONFIG_USB_CONFIGFS_F_GSI=y
CONFIG_USB_CONFIGFS_F_CDEV=y
CONFIG_USB_CONFIGFS_F_QDSS=y
+CONFIG_USB_CONFIGFS_F_CCID=y
CONFIG_MMC=y
CONFIG_MMC_PERF_PROFILING=y
CONFIG_MMC_CLKGATE=y
@@ -567,6 +569,7 @@ CONFIG_PWM_QPNP=y
CONFIG_ARM_GIC_V3_ACL=y
CONFIG_ANDROID=y
CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_ANDROID_BINDER_DEVICES="binder,hwbinder"
CONFIG_MSM_TZ_LOG=y
CONFIG_SENSORS_SSC=y
CONFIG_EXT2_FS=y
@@ -599,6 +602,7 @@ CONFIG_DEBUG_RODATA=y
CONFIG_DEBUG_ALIGN_RODATA=y
CONFIG_CORESIGHT=y
CONFIG_CORESIGHT_EVENT=y
+CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
CONFIG_CORESIGHT_QCOM_REPLICATOR=y
CONFIG_CORESIGHT_STM=y
CONFIG_CORESIGHT_HWEVENT=y
diff --git a/arch/arm64/configs/msmcortex_defconfig b/arch/arm64/configs/msmcortex_defconfig
index cbff6b3a5b74..d0fdb7206d5b 100644
--- a/arch/arm64/configs/msmcortex_defconfig
+++ b/arch/arm64/configs/msmcortex_defconfig
@@ -85,6 +85,7 @@ CONFIG_XFRM_USER=y
CONFIG_XFRM_STATISTICS=y
CONFIG_NET_KEY=y
CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y
CONFIG_IP_MULTIPLE_TABLES=y
CONFIG_IP_ROUTE_VERBOSE=y
@@ -457,6 +458,7 @@ CONFIG_USB_CONFIGFS_F_DIAG=y
CONFIG_USB_CONFIGFS_F_GSI=y
CONFIG_USB_CONFIGFS_F_CDEV=y
CONFIG_USB_CONFIGFS_F_QDSS=y
+CONFIG_USB_CONFIGFS_F_CCID=y
CONFIG_MMC=y
CONFIG_MMC_PERF_PROFILING=y
CONFIG_MMC_PARANOID_SD_INIT=y
@@ -586,6 +588,7 @@ CONFIG_ARM_GIC_V3_ACL=y
CONFIG_PHY_XGENE=y
CONFIG_ANDROID=y
CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_ANDROID_BINDER_DEVICES="binder,hwbinder"
CONFIG_MSM_TZ_LOG=y
CONFIG_SENSORS_SSC=y
CONFIG_EXT2_FS=y
diff --git a/arch/arm64/configs/sdm660-perf_defconfig b/arch/arm64/configs/sdm660-perf_defconfig
index 27b445e30a49..d8a62f634ecc 100644
--- a/arch/arm64/configs/sdm660-perf_defconfig
+++ b/arch/arm64/configs/sdm660-perf_defconfig
@@ -249,11 +249,15 @@ CONFIG_SCSI_UFSHCD_PLATFORM=y
CONFIG_SCSI_UFS_QCOM=y
CONFIG_SCSI_UFS_QCOM_ICE=y
CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+CONFIG_MD_LINEAR=y
CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=y
CONFIG_DM_REQ_CRYPT=y
CONFIG_DM_UEVENT=y
CONFIG_DM_VERITY=y
+CONFIG_DM_VERITY_FEC=y
+CONFIG_DM_ANDROID_VERITY=y
CONFIG_NETDEVICES=y
CONFIG_BONDING=y
CONFIG_DUMMY=y
@@ -476,6 +480,7 @@ CONFIG_MMC_TEST=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_MSM=y
+CONFIG_MMC_SDHCI_MSM_ICE=y
CONFIG_MMC_CQ_HCI=y
CONFIG_LEDS_QPNP=y
CONFIG_LEDS_QPNP_FLASH_V2=y
diff --git a/arch/arm64/configs/sdm660_defconfig b/arch/arm64/configs/sdm660_defconfig
index 9dac8be9d37d..bc633548bbc3 100644
--- a/arch/arm64/configs/sdm660_defconfig
+++ b/arch/arm64/configs/sdm660_defconfig
@@ -250,11 +250,15 @@ CONFIG_SCSI_UFSHCD_PLATFORM=y
CONFIG_SCSI_UFS_QCOM=y
CONFIG_SCSI_UFS_QCOM_ICE=y
CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+CONFIG_MD_LINEAR=y
CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=y
CONFIG_DM_REQ_CRYPT=y
CONFIG_DM_UEVENT=y
CONFIG_DM_VERITY=y
+CONFIG_DM_VERITY_FEC=y
+CONFIG_DM_ANDROID_VERITY=y
CONFIG_NETDEVICES=y
CONFIG_BONDING=y
CONFIG_DUMMY=y
@@ -478,6 +482,7 @@ CONFIG_MMC_TEST=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_MSM=y
+CONFIG_MMC_SDHCI_MSM_ICE=y
CONFIG_MMC_CQ_HCI=y
CONFIG_LEDS_QPNP=y
CONFIG_LEDS_QPNP_FLASH_V2=y
diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig
index bdfc6c6f4f5a..a82fc022d34b 100644
--- a/drivers/android/Kconfig
+++ b/drivers/android/Kconfig
@@ -19,6 +19,18 @@ config ANDROID_BINDER_IPC
Android process, using Binder to identify, invoke and pass arguments
between said processes.
+config ANDROID_BINDER_DEVICES
+ string "Android Binder devices"
+ depends on ANDROID_BINDER_IPC
+ default "binder"
+ ---help---
+ Default value for the binder.devices parameter.
+
+ The binder.devices parameter is a comma-separated list of strings
+ that specifies the names of the binder device nodes that will be
+ created. Each binder device has its own context manager, and is
+ therefore logically separated from the other devices.
+
config ANDROID_BINDER_IPC_32BIT
bool
depends on !64BIT && ANDROID_BINDER_IPC
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index a3f458fd2238..c8c11de7d44c 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -50,14 +50,13 @@ static DEFINE_MUTEX(binder_main_lock);
static DEFINE_MUTEX(binder_deferred_lock);
static DEFINE_MUTEX(binder_mmap_lock);
+static HLIST_HEAD(binder_devices);
static HLIST_HEAD(binder_procs);
static HLIST_HEAD(binder_deferred_list);
static HLIST_HEAD(binder_dead_nodes);
static struct dentry *binder_debugfs_dir_entry_root;
static struct dentry *binder_debugfs_dir_entry_proc;
-static struct binder_node *binder_context_mgr_node;
-static kuid_t binder_context_mgr_uid = INVALID_UID;
static int binder_last_id;
static struct workqueue_struct *binder_deferred_workqueue;
@@ -116,6 +115,9 @@ module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
static bool binder_debug_no_lock;
module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO);
+static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
+module_param_named(devices, binder_devices_param, charp, S_IRUGO);
+
static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
static int binder_stop_on_user_error;
@@ -146,6 +148,17 @@ module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
binder_stop_on_user_error = 2; \
} while (0)
+#define to_flat_binder_object(hdr) \
+ container_of(hdr, struct flat_binder_object, hdr)
+
+#define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
+
+#define to_binder_buffer_object(hdr) \
+ container_of(hdr, struct binder_buffer_object, hdr)
+
+#define to_binder_fd_array_object(hdr) \
+ container_of(hdr, struct binder_fd_array_object, hdr)
+
enum binder_stat_types {
BINDER_STAT_PROC,
BINDER_STAT_THREAD,
@@ -159,7 +172,7 @@ enum binder_stat_types {
struct binder_stats {
int br[_IOC_NR(BR_FAILED_REPLY) + 1];
- int bc[_IOC_NR(BC_DEAD_BINDER_DONE) + 1];
+ int bc[_IOC_NR(BC_REPLY_SG) + 1];
int obj_created[BINDER_STAT_COUNT];
int obj_deleted[BINDER_STAT_COUNT];
};
@@ -187,6 +200,7 @@ struct binder_transaction_log_entry {
int to_node;
int data_size;
int offsets_size;
+ const char *context_name;
};
struct binder_transaction_log {
int next;
@@ -211,6 +225,18 @@ static struct binder_transaction_log_entry *binder_transaction_log_add(
return e;
}
+struct binder_context {
+ struct binder_node *binder_context_mgr_node;
+ kuid_t binder_context_mgr_uid;
+ const char *name;
+};
+
+struct binder_device {
+ struct hlist_node hlist;
+ struct miscdevice miscdev;
+ struct binder_context context;
+};
+
struct binder_work {
struct list_head entry;
enum {
@@ -283,6 +309,7 @@ struct binder_buffer {
struct binder_node *target_node;
size_t data_size;
size_t offsets_size;
+ size_t extra_buffers_size;
uint8_t data[0];
};
@@ -326,6 +353,7 @@ struct binder_proc {
int ready_threads;
long default_priority;
struct dentry *debugfs_entry;
+ struct binder_context *context;
};
enum {
@@ -720,7 +748,9 @@ err_no_vma:
static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
size_t data_size,
- size_t offsets_size, int is_async)
+ size_t offsets_size,
+ size_t extra_buffers_size,
+ int is_async)
{
struct rb_node *n = proc->free_buffers.rb_node;
struct binder_buffer *buffer;
@@ -728,7 +758,7 @@ static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
struct rb_node *best_fit = NULL;
void *has_page_addr;
void *end_page_addr;
- size_t size;
+ size_t size, data_offsets_size;
if (proc->vma == NULL) {
pr_err("%d: binder_alloc_buf, no vma\n",
@@ -736,15 +766,20 @@ static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
return NULL;
}
- size = ALIGN(data_size, sizeof(void *)) +
+ data_offsets_size = ALIGN(data_size, sizeof(void *)) +
ALIGN(offsets_size, sizeof(void *));
- if (size < data_size || size < offsets_size) {
+ if (data_offsets_size < data_size || data_offsets_size < offsets_size) {
binder_user_error("%d: got transaction with invalid size %zd-%zd\n",
proc->pid, data_size, offsets_size);
return NULL;
}
-
+ size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *));
+ if (size < data_offsets_size || size < extra_buffers_size) {
+ binder_user_error("%d: got transaction with invalid extra_buffers_size %zd\n",
+ proc->pid, extra_buffers_size);
+ return NULL;
+ }
if (is_async &&
proc->free_async_space < size + sizeof(struct binder_buffer)) {
binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
@@ -813,6 +848,7 @@ static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
proc->pid, size, buffer);
buffer->data_size = data_size;
buffer->offsets_size = offsets_size;
+ buffer->extra_buffers_size = extra_buffers_size;
buffer->async_transaction = is_async;
if (is_async) {
proc->free_async_space -= size + sizeof(struct binder_buffer);
@@ -887,7 +923,8 @@ static void binder_free_buf(struct binder_proc *proc,
buffer_size = binder_buffer_size(proc, buffer);
size = ALIGN(buffer->data_size, sizeof(void *)) +
- ALIGN(buffer->offsets_size, sizeof(void *));
+ ALIGN(buffer->offsets_size, sizeof(void *)) +
+ ALIGN(buffer->extra_buffers_size, sizeof(void *));
binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: binder_free_buf %p size %zd buffer_size %zd\n",
@@ -1001,8 +1038,10 @@ static int binder_inc_node(struct binder_node *node, int strong, int internal,
if (internal) {
if (target_list == NULL &&
node->internal_strong_refs == 0 &&
- !(node == binder_context_mgr_node &&
- node->has_strong_ref)) {
+ !(node->proc &&
+ node == node->proc->context->
+ binder_context_mgr_node &&
+ node->has_strong_ref)) {
pr_err("invalid inc strong node for %d\n",
node->debug_id);
return -EINVAL;
@@ -1103,6 +1142,7 @@ static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
struct rb_node **p = &proc->refs_by_node.rb_node;
struct rb_node *parent = NULL;
struct binder_ref *ref, *new_ref;
+ struct binder_context *context = proc->context;
while (*p) {
parent = *p;
@@ -1125,7 +1165,7 @@ static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
rb_link_node(&new_ref->rb_node_node, parent, p);
rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
- new_ref->desc = (node == binder_context_mgr_node) ? 0 : 1;
+ new_ref->desc = (node == context->binder_context_mgr_node) ? 0 : 1;
for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
ref = rb_entry(n, struct binder_ref, rb_node_desc);
if (ref->desc > new_ref->desc)
@@ -1312,11 +1352,158 @@ static void binder_send_failed_reply(struct binder_transaction *t,
}
}
+/**
+ * binder_validate_object() - checks for a valid metadata object in a buffer.
+ * @buffer: binder_buffer that we're parsing.
+ * @offset: offset in the buffer at which to validate an object.
+ *
+ * Return: If there's a valid metadata object at @offset in @buffer, the
+ * size of that object. Otherwise, it returns zero.
+ */
+static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
+{
+ /* Check if we can read a header first */
+ struct binder_object_header *hdr;
+ size_t object_size = 0;
+
+ if (offset > buffer->data_size - sizeof(*hdr) ||
+ buffer->data_size < sizeof(*hdr) ||
+ !IS_ALIGNED(offset, sizeof(u32)))
+ return 0;
+
+ /* Ok, now see if we can read a complete object. */
+ hdr = (struct binder_object_header *)(buffer->data + offset);
+ switch (hdr->type) {
+ case BINDER_TYPE_BINDER:
+ case BINDER_TYPE_WEAK_BINDER:
+ case BINDER_TYPE_HANDLE:
+ case BINDER_TYPE_WEAK_HANDLE:
+ object_size = sizeof(struct flat_binder_object);
+ break;
+ case BINDER_TYPE_FD:
+ object_size = sizeof(struct binder_fd_object);
+ break;
+ case BINDER_TYPE_PTR:
+ object_size = sizeof(struct binder_buffer_object);
+ break;
+ case BINDER_TYPE_FDA:
+ object_size = sizeof(struct binder_fd_array_object);
+ break;
+ default:
+ return 0;
+ }
+ if (offset <= buffer->data_size - object_size &&
+ buffer->data_size >= object_size)
+ return object_size;
+ else
+ return 0;
+}
+
+/**
+ * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
+ * @b: binder_buffer containing the object
+ * @index: index in offset array at which the binder_buffer_object is
+ * located
+ * @start: points to the start of the offset array
+ * @num_valid: the number of valid offsets in the offset array
+ *
+ * Return: If @index is within the valid range of the offset array
+ * described by @start and @num_valid, and if there's a valid
+ * binder_buffer_object at the offset found in index @index
+ * of the offset array, that object is returned. Otherwise,
+ * %NULL is returned.
+ * Note that the offset found in index @index itself is not
+ * verified; this function assumes that @num_valid elements
+ * from @start were previously verified to have valid offsets.
+ */
+static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b,
+ binder_size_t index,
+ binder_size_t *start,
+ binder_size_t num_valid)
+{
+ struct binder_buffer_object *buffer_obj;
+ binder_size_t *offp;
+
+ if (index >= num_valid)
+ return NULL;
+
+ offp = start + index;
+ buffer_obj = (struct binder_buffer_object *)(b->data + *offp);
+ if (buffer_obj->hdr.type != BINDER_TYPE_PTR)
+ return NULL;
+
+ return buffer_obj;
+}
+
+/**
+ * binder_validate_fixup() - validates pointer/fd fixups happen in order.
+ * @b: transaction buffer
+ * @objects_start start of objects buffer
+ * @buffer: binder_buffer_object in which to fix up
+ * @offset: start offset in @buffer to fix up
+ * @last_obj: last binder_buffer_object that we fixed up in
+ * @last_min_offset: minimum fixup offset in @last_obj
+ *
+ * Return: %true if a fixup in buffer @buffer at offset @offset is
+ * allowed.
+ *
+ * For safety reasons, we only allow fixups inside a buffer to happen
+ * at increasing offsets; additionally, we only allow fixup on the last
+ * buffer object that was verified, or one of its parents.
+ *
+ * Example of what is allowed:
+ *
+ * A
+ * B (parent = A, offset = 0)
+ * C (parent = A, offset = 16)
+ * D (parent = C, offset = 0)
+ * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
+ *
+ * Examples of what is not allowed:
+ *
+ * Decreasing offsets within the same parent:
+ * A
+ * C (parent = A, offset = 16)
+ * B (parent = A, offset = 0) // decreasing offset within A
+ *
+ * Referring to a parent that wasn't the last object or any of its parents:
+ * A
+ * B (parent = A, offset = 0)
+ * C (parent = A, offset = 0)
+ * C (parent = A, offset = 16)
+ * D (parent = B, offset = 0) // B is not A or any of A's parents
+ */
+static bool binder_validate_fixup(struct binder_buffer *b,
+ binder_size_t *objects_start,
+ struct binder_buffer_object *buffer,
+ binder_size_t fixup_offset,
+ struct binder_buffer_object *last_obj,
+ binder_size_t last_min_offset)
+{
+ if (!last_obj) {
+ /* Nothing to fix up in */
+ return false;
+ }
+
+ while (last_obj != buffer) {
+ /*
+ * Safe to retrieve the parent of last_obj, since it
+ * was already previously verified by the driver.
+ */
+ if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
+ return false;
+ last_min_offset = last_obj->parent_offset + sizeof(uintptr_t);
+ last_obj = (struct binder_buffer_object *)
+ (b->data + *(objects_start + last_obj->parent));
+ }
+ return (fixup_offset >= last_min_offset);
+}
+
static void binder_transaction_buffer_release(struct binder_proc *proc,
struct binder_buffer *buffer,
binder_size_t *failed_at)
{
- binder_size_t *offp, *off_end;
+ binder_size_t *offp, *off_start, *off_end;
int debug_id = buffer->debug_id;
binder_debug(BINDER_DEBUG_TRANSACTION,
@@ -1327,28 +1514,30 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
if (buffer->target_node)
binder_dec_node(buffer->target_node, 1, 0);
- offp = (binder_size_t *)(buffer->data +
- ALIGN(buffer->data_size, sizeof(void *)));
+ off_start = (binder_size_t *)(buffer->data +
+ ALIGN(buffer->data_size, sizeof(void *)));
if (failed_at)
off_end = failed_at;
else
- off_end = (void *)offp + buffer->offsets_size;
- for (; offp < off_end; offp++) {
- struct flat_binder_object *fp;
+ off_end = (void *)off_start + buffer->offsets_size;
+ for (offp = off_start; offp < off_end; offp++) {
+ struct binder_object_header *hdr;
+ size_t object_size = binder_validate_object(buffer, *offp);
- if (*offp > buffer->data_size - sizeof(*fp) ||
- buffer->data_size < sizeof(*fp) ||
- !IS_ALIGNED(*offp, sizeof(u32))) {
- pr_err("transaction release %d bad offset %lld, size %zd\n",
+ if (object_size == 0) {
+ pr_err("transaction release %d bad object at offset %lld, size %zd\n",
debug_id, (u64)*offp, buffer->data_size);
continue;
}
- fp = (struct flat_binder_object *)(buffer->data + *offp);
- switch (fp->type) {
+ hdr = (struct binder_object_header *)(buffer->data + *offp);
+ switch (hdr->type) {
case BINDER_TYPE_BINDER:
case BINDER_TYPE_WEAK_BINDER: {
- struct binder_node *node = binder_get_node(proc, fp->binder);
+ struct flat_binder_object *fp;
+ struct binder_node *node;
+ fp = to_flat_binder_object(hdr);
+ node = binder_get_node(proc, fp->binder);
if (node == NULL) {
pr_err("transaction release %d bad node %016llx\n",
debug_id, (u64)fp->binder);
@@ -1357,12 +1546,17 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
binder_debug(BINDER_DEBUG_TRANSACTION,
" node %d u%016llx\n",
node->debug_id, (u64)node->ptr);
- binder_dec_node(node, fp->type == BINDER_TYPE_BINDER, 0);
+ binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
+ 0);
} break;
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
- struct binder_ref *ref = binder_get_ref(proc, fp->handle,
- fp->type == BINDER_TYPE_HANDLE);
+ struct flat_binder_object *fp;
+ struct binder_ref *ref;
+
+ fp = to_flat_binder_object(hdr);
+ ref = binder_get_ref(proc, fp->handle,
+ hdr->type == BINDER_TYPE_HANDLE);
if (ref == NULL) {
pr_err("transaction release %d bad handle %d\n",
@@ -1372,32 +1566,348 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
binder_debug(BINDER_DEBUG_TRANSACTION,
" ref %d desc %d (node %d)\n",
ref->debug_id, ref->desc, ref->node->debug_id);
- binder_dec_ref(ref, fp->type == BINDER_TYPE_HANDLE);
+ binder_dec_ref(ref, hdr->type == BINDER_TYPE_HANDLE);
} break;
- case BINDER_TYPE_FD:
+ case BINDER_TYPE_FD: {
+ struct binder_fd_object *fp = to_binder_fd_object(hdr);
+
binder_debug(BINDER_DEBUG_TRANSACTION,
- " fd %d\n", fp->handle);
+ " fd %d\n", fp->fd);
if (failed_at)
- task_close_fd(proc, fp->handle);
+ task_close_fd(proc, fp->fd);
+ } break;
+ case BINDER_TYPE_PTR:
+ /*
+ * Nothing to do here, this will get cleaned up when the
+ * transaction buffer gets freed
+ */
break;
-
+ case BINDER_TYPE_FDA: {
+ struct binder_fd_array_object *fda;
+ struct binder_buffer_object *parent;
+ uintptr_t parent_buffer;
+ u32 *fd_array;
+ size_t fd_index;
+ binder_size_t fd_buf_size;
+
+ fda = to_binder_fd_array_object(hdr);
+ parent = binder_validate_ptr(buffer, fda->parent,
+ off_start,
+ offp - off_start);
+ if (!parent) {
+ pr_err("transaction release %d bad parent offset",
+ debug_id);
+ continue;
+ }
+ /*
+ * Since the parent was already fixed up, convert it
+ * back to kernel address space to access it
+ */
+ parent_buffer = parent->buffer -
+ proc->user_buffer_offset;
+
+ fd_buf_size = sizeof(u32) * fda->num_fds;
+ if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
+ pr_err("transaction release %d invalid number of fds (%lld)\n",
+ debug_id, (u64)fda->num_fds);
+ continue;
+ }
+ if (fd_buf_size > parent->length ||
+ fda->parent_offset > parent->length - fd_buf_size) {
+ /* No space for all file descriptors here. */
+ pr_err("transaction release %d not enough space for %lld fds in buffer\n",
+ debug_id, (u64)fda->num_fds);
+ continue;
+ }
+ fd_array = (u32 *)(parent_buffer + fda->parent_offset);
+ for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
+ task_close_fd(proc, fd_array[fd_index]);
+ } break;
default:
pr_err("transaction release %d bad object type %x\n",
- debug_id, fp->type);
+ debug_id, hdr->type);
break;
}
}
}
+static int binder_translate_binder(struct flat_binder_object *fp,
+ struct binder_transaction *t,
+ struct binder_thread *thread)
+{
+ struct binder_node *node;
+ struct binder_ref *ref;
+ struct binder_proc *proc = thread->proc;
+ struct binder_proc *target_proc = t->to_proc;
+
+ node = binder_get_node(proc, fp->binder);
+ if (!node) {
+ node = binder_new_node(proc, fp->binder, fp->cookie);
+ if (!node)
+ return -ENOMEM;
+
+ node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
+ node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
+ }
+ if (fp->cookie != node->cookie) {
+ binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
+ proc->pid, thread->pid, (u64)fp->binder,
+ node->debug_id, (u64)fp->cookie,
+ (u64)node->cookie);
+ return -EINVAL;
+ }
+ if (security_binder_transfer_binder(proc->tsk, target_proc->tsk))
+ return -EPERM;
+
+ ref = binder_get_ref_for_node(target_proc, node);
+ if (!ref)
+ return -EINVAL;
+
+ if (fp->hdr.type == BINDER_TYPE_BINDER)
+ fp->hdr.type = BINDER_TYPE_HANDLE;
+ else
+ fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
+ fp->binder = 0;
+ fp->handle = ref->desc;
+ fp->cookie = 0;
+ binder_inc_ref(ref, fp->hdr.type == BINDER_TYPE_HANDLE, &thread->todo);
+
+ trace_binder_transaction_node_to_ref(t, node, ref);
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ " node %d u%016llx -> ref %d desc %d\n",
+ node->debug_id, (u64)node->ptr,
+ ref->debug_id, ref->desc);
+
+ return 0;
+}
+
+static int binder_translate_handle(struct flat_binder_object *fp,
+ struct binder_transaction *t,
+ struct binder_thread *thread)
+{
+ struct binder_ref *ref;
+ struct binder_proc *proc = thread->proc;
+ struct binder_proc *target_proc = t->to_proc;
+
+ ref = binder_get_ref(proc, fp->handle,
+ fp->hdr.type == BINDER_TYPE_HANDLE);
+ if (!ref) {
+ binder_user_error("%d:%d got transaction with invalid handle, %d\n",
+ proc->pid, thread->pid, fp->handle);
+ return -EINVAL;
+ }
+ if (security_binder_transfer_binder(proc->tsk, target_proc->tsk))
+ return -EPERM;
+
+ if (ref->node->proc == target_proc) {
+ if (fp->hdr.type == BINDER_TYPE_HANDLE)
+ fp->hdr.type = BINDER_TYPE_BINDER;
+ else
+ fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
+ fp->binder = ref->node->ptr;
+ fp->cookie = ref->node->cookie;
+ binder_inc_node(ref->node, fp->hdr.type == BINDER_TYPE_BINDER,
+ 0, NULL);
+ trace_binder_transaction_ref_to_node(t, ref);
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ " ref %d desc %d -> node %d u%016llx\n",
+ ref->debug_id, ref->desc, ref->node->debug_id,
+ (u64)ref->node->ptr);
+ } else {
+ struct binder_ref *new_ref;
+
+ new_ref = binder_get_ref_for_node(target_proc, ref->node);
+ if (!new_ref)
+ return -EINVAL;
+
+ fp->binder = 0;
+ fp->handle = new_ref->desc;
+ fp->cookie = 0;
+ binder_inc_ref(new_ref, fp->hdr.type == BINDER_TYPE_HANDLE,
+ NULL);
+ trace_binder_transaction_ref_to_ref(t, ref, new_ref);
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ " ref %d desc %d -> ref %d desc %d (node %d)\n",
+ ref->debug_id, ref->desc, new_ref->debug_id,
+ new_ref->desc, ref->node->debug_id);
+ }
+ return 0;
+}
+
+static int binder_translate_fd(int fd,
+ struct binder_transaction *t,
+ struct binder_thread *thread,
+ struct binder_transaction *in_reply_to)
+{
+ struct binder_proc *proc = thread->proc;
+ struct binder_proc *target_proc = t->to_proc;
+ int target_fd;
+ struct file *file;
+ int ret;
+ bool target_allows_fd;
+
+ if (in_reply_to)
+ target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
+ else
+ target_allows_fd = t->buffer->target_node->accept_fds;
+ if (!target_allows_fd) {
+ binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
+ proc->pid, thread->pid,
+ in_reply_to ? "reply" : "transaction",
+ fd);
+ ret = -EPERM;
+ goto err_fd_not_accepted;
+ }
+
+ file = fget(fd);
+ if (!file) {
+ binder_user_error("%d:%d got transaction with invalid fd, %d\n",
+ proc->pid, thread->pid, fd);
+ ret = -EBADF;
+ goto err_fget;
+ }
+ ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
+ if (ret < 0) {
+ ret = -EPERM;
+ goto err_security;
+ }
+
+ target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
+ if (target_fd < 0) {
+ ret = -ENOMEM;
+ goto err_get_unused_fd;
+ }
+ task_fd_install(target_proc, target_fd, file);
+ trace_binder_transaction_fd(t, fd, target_fd);
+ binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n",
+ fd, target_fd);
+
+ return target_fd;
+
+err_get_unused_fd:
+err_security:
+ fput(file);
+err_fget:
+err_fd_not_accepted:
+ return ret;
+}
+
+static int binder_translate_fd_array(struct binder_fd_array_object *fda,
+ struct binder_buffer_object *parent,
+ struct binder_transaction *t,
+ struct binder_thread *thread,
+ struct binder_transaction *in_reply_to)
+{
+ binder_size_t fdi, fd_buf_size, num_installed_fds;
+ int target_fd;
+ uintptr_t parent_buffer;
+ u32 *fd_array;
+ struct binder_proc *proc = thread->proc;
+ struct binder_proc *target_proc = t->to_proc;
+
+ fd_buf_size = sizeof(u32) * fda->num_fds;
+ if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
+ binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
+ proc->pid, thread->pid, (u64)fda->num_fds);
+ return -EINVAL;
+ }
+ if (fd_buf_size > parent->length ||
+ fda->parent_offset > parent->length - fd_buf_size) {
+ /* No space for all file descriptors here. */
+ binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
+ proc->pid, thread->pid, (u64)fda->num_fds);
+ return -EINVAL;
+ }
+ /*
+ * Since the parent was already fixed up, convert it
+ * back to the kernel address space to access it
+ */
+ parent_buffer = parent->buffer - target_proc->user_buffer_offset;
+ fd_array = (u32 *)(parent_buffer + fda->parent_offset);
+ if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
+ binder_user_error("%d:%d parent offset not aligned correctly.\n",
+ proc->pid, thread->pid);
+ return -EINVAL;
+ }
+ for (fdi = 0; fdi < fda->num_fds; fdi++) {
+ target_fd = binder_translate_fd(fd_array[fdi], t, thread,
+ in_reply_to);
+ if (target_fd < 0)
+ goto err_translate_fd_failed;
+ fd_array[fdi] = target_fd;
+ }
+ return 0;
+
+err_translate_fd_failed:
+ /*
+ * Failed to allocate fd or security error, free fds
+ * installed so far.
+ */
+ num_installed_fds = fdi;
+ for (fdi = 0; fdi < num_installed_fds; fdi++)
+ task_close_fd(target_proc, fd_array[fdi]);
+ return target_fd;
+}
+
+static int binder_fixup_parent(struct binder_transaction *t,
+ struct binder_thread *thread,
+ struct binder_buffer_object *bp,
+ binder_size_t *off_start,
+ binder_size_t num_valid,
+ struct binder_buffer_object *last_fixup_obj,
+ binder_size_t last_fixup_min_off)
+{
+ struct binder_buffer_object *parent;
+ u8 *parent_buffer;
+ struct binder_buffer *b = t->buffer;
+ struct binder_proc *proc = thread->proc;
+ struct binder_proc *target_proc = t->to_proc;
+
+ if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
+ return 0;
+
+ parent = binder_validate_ptr(b, bp->parent, off_start, num_valid);
+ if (!parent) {
+ binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
+ proc->pid, thread->pid);
+ return -EINVAL;
+ }
+
+ if (!binder_validate_fixup(b, off_start,
+ parent, bp->parent_offset,
+ last_fixup_obj,
+ last_fixup_min_off)) {
+ binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
+ proc->pid, thread->pid);
+ return -EINVAL;
+ }
+
+ if (parent->length < sizeof(binder_uintptr_t) ||
+ bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
+ /* No space for a pointer here! */
+ binder_user_error("%d:%d got transaction with invalid parent offset\n",
+ proc->pid, thread->pid);
+ return -EINVAL;
+ }
+ parent_buffer = (u8 *)(parent->buffer -
+ target_proc->user_buffer_offset);
+ *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
+
+ return 0;
+}
+
static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
- struct binder_transaction_data *tr, int reply)
+ struct binder_transaction_data *tr, int reply,
+ binder_size_t extra_buffers_size)
{
+ int ret;
struct binder_transaction *t;
struct binder_work *tcomplete;
- binder_size_t *offp, *off_end;
+ binder_size_t *offp, *off_end, *off_start;
binder_size_t off_min;
+ u8 *sg_bufp, *sg_buf_end;
struct binder_proc *target_proc;
struct binder_thread *target_thread = NULL;
struct binder_node *target_node = NULL;
@@ -1406,6 +1916,9 @@ static void binder_transaction(struct binder_proc *proc,
struct binder_transaction *in_reply_to = NULL;
struct binder_transaction_log_entry *e;
uint32_t return_error;
+ struct binder_buffer_object *last_fixup_obj = NULL;
+ binder_size_t last_fixup_min_off = 0;
+ struct binder_context *context = proc->context;
e = binder_transaction_log_add(&binder_transaction_log);
e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
@@ -1414,6 +1927,7 @@ static void binder_transaction(struct binder_proc *proc,
e->target_handle = tr->target.handle;
e->data_size = tr->data_size;
e->offsets_size = tr->offsets_size;
+ e->context_name = proc->context->name;
if (reply) {
in_reply_to = thread->transaction_stack;
@@ -1466,7 +1980,7 @@ static void binder_transaction(struct binder_proc *proc,
}
target_node = ref->node;
} else {
- target_node = binder_context_mgr_node;
+ target_node = context->binder_context_mgr_node;
if (target_node == NULL) {
return_error = BR_DEAD_REPLY;
goto err_no_context_mgr_node;
@@ -1533,20 +2047,22 @@ static void binder_transaction(struct binder_proc *proc,
if (reply)
binder_debug(BINDER_DEBUG_TRANSACTION,
- "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld\n",
+ "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
proc->pid, thread->pid, t->debug_id,
target_proc->pid, target_thread->pid,
(u64)tr->data.ptr.buffer,
(u64)tr->data.ptr.offsets,
- (u64)tr->data_size, (u64)tr->offsets_size);
+ (u64)tr->data_size, (u64)tr->offsets_size,
+ (u64)extra_buffers_size);
else
binder_debug(BINDER_DEBUG_TRANSACTION,
- "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld\n",
+ "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
proc->pid, thread->pid, t->debug_id,
target_proc->pid, target_node->debug_id,
(u64)tr->data.ptr.buffer,
(u64)tr->data.ptr.offsets,
- (u64)tr->data_size, (u64)tr->offsets_size);
+ (u64)tr->data_size, (u64)tr->offsets_size,
+ (u64)extra_buffers_size);
if (!reply && !(tr->flags & TF_ONE_WAY))
t->from = thread;
@@ -1562,7 +2078,8 @@ static void binder_transaction(struct binder_proc *proc,
trace_binder_transaction(reply, t, target_node);
t->buffer = binder_alloc_buf(target_proc, tr->data_size,
- tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
+ tr->offsets_size, extra_buffers_size,
+ !reply && (t->flags & TF_ONE_WAY));
if (t->buffer == NULL) {
return_error = BR_FAILED_REPLY;
goto err_binder_alloc_buf_failed;
@@ -1575,8 +2092,9 @@ static void binder_transaction(struct binder_proc *proc,
if (target_node)
binder_inc_node(target_node, 1, 0, NULL);
- offp = (binder_size_t *)(t->buffer->data +
- ALIGN(tr->data_size, sizeof(void *)));
+ off_start = (binder_size_t *)(t->buffer->data +
+ ALIGN(tr->data_size, sizeof(void *)));
+ offp = off_start;
if (copy_from_user_preempt_disabled(t->buffer->data, (const void __user *)(uintptr_t)
tr->data.ptr.buffer, tr->data_size)) {
@@ -1598,175 +2116,138 @@ static void binder_transaction(struct binder_proc *proc,
return_error = BR_FAILED_REPLY;
goto err_bad_offset;
}
- off_end = (void *)offp + tr->offsets_size;
+ if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
+ binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
+ proc->pid, thread->pid,
+ (u64)extra_buffers_size);
+ return_error = BR_FAILED_REPLY;
+ goto err_bad_offset;
+ }
+ off_end = (void *)off_start + tr->offsets_size;
+ sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
+ sg_buf_end = sg_bufp + extra_buffers_size;
off_min = 0;
for (; offp < off_end; offp++) {
- struct flat_binder_object *fp;
+ struct binder_object_header *hdr;
+ size_t object_size = binder_validate_object(t->buffer, *offp);
- if (*offp > t->buffer->data_size - sizeof(*fp) ||
- *offp < off_min ||
- t->buffer->data_size < sizeof(*fp) ||
- !IS_ALIGNED(*offp, sizeof(u32))) {
- binder_user_error("%d:%d got transaction with invalid offset, %lld (min %lld, max %lld)\n",
+ if (object_size == 0 || *offp < off_min) {
+ binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
proc->pid, thread->pid, (u64)*offp,
(u64)off_min,
- (u64)(t->buffer->data_size -
- sizeof(*fp)));
+ (u64)t->buffer->data_size);
return_error = BR_FAILED_REPLY;
goto err_bad_offset;
}
- fp = (struct flat_binder_object *)(t->buffer->data + *offp);
- off_min = *offp + sizeof(struct flat_binder_object);
- switch (fp->type) {
+
+ hdr = (struct binder_object_header *)(t->buffer->data + *offp);
+ off_min = *offp + object_size;
+ switch (hdr->type) {
case BINDER_TYPE_BINDER:
case BINDER_TYPE_WEAK_BINDER: {
- struct binder_ref *ref;
- struct binder_node *node = binder_get_node(proc, fp->binder);
+ struct flat_binder_object *fp;
- if (node == NULL) {
- node = binder_new_node(proc, fp->binder, fp->cookie);
- if (node == NULL) {
- return_error = BR_FAILED_REPLY;
- goto err_binder_new_node_failed;
- }
- node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
- node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
- }
- if (fp->cookie != node->cookie) {
- binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
- proc->pid, thread->pid,
- (u64)fp->binder, node->debug_id,
- (u64)fp->cookie, (u64)node->cookie);
- return_error = BR_FAILED_REPLY;
- goto err_binder_get_ref_for_node_failed;
- }
- if (security_binder_transfer_binder(proc->tsk,
- target_proc->tsk)) {
- return_error = BR_FAILED_REPLY;
- goto err_binder_get_ref_for_node_failed;
- }
- ref = binder_get_ref_for_node(target_proc, node);
- if (ref == NULL) {
+ fp = to_flat_binder_object(hdr);
+ ret = binder_translate_binder(fp, t, thread);
+ if (ret < 0) {
return_error = BR_FAILED_REPLY;
- goto err_binder_get_ref_for_node_failed;
+ goto err_translate_failed;
}
- if (fp->type == BINDER_TYPE_BINDER)
- fp->type = BINDER_TYPE_HANDLE;
- else
- fp->type = BINDER_TYPE_WEAK_HANDLE;
- fp->binder = 0;
- fp->handle = ref->desc;
- fp->cookie = 0;
- binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
- &thread->todo);
-
- trace_binder_transaction_node_to_ref(t, node, ref);
- binder_debug(BINDER_DEBUG_TRANSACTION,
- " node %d u%016llx -> ref %d desc %d\n",
- node->debug_id, (u64)node->ptr,
- ref->debug_id, ref->desc);
} break;
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
- struct binder_ref *ref = binder_get_ref(proc, fp->handle,
- fp->type == BINDER_TYPE_HANDLE);
+ struct flat_binder_object *fp;
- if (ref == NULL) {
- binder_user_error("%d:%d got transaction with invalid handle, %d\n",
- proc->pid,
- thread->pid, fp->handle);
+ fp = to_flat_binder_object(hdr);
+ ret = binder_translate_handle(fp, t, thread);
+ if (ret < 0) {
return_error = BR_FAILED_REPLY;
- goto err_binder_get_ref_failed;
- }
- if (security_binder_transfer_binder(proc->tsk,
- target_proc->tsk)) {
- return_error = BR_FAILED_REPLY;
- goto err_binder_get_ref_failed;
- }
- if (ref->node->proc == target_proc) {
- if (fp->type == BINDER_TYPE_HANDLE)
- fp->type = BINDER_TYPE_BINDER;
- else
- fp->type = BINDER_TYPE_WEAK_BINDER;
- fp->binder = ref->node->ptr;
- fp->cookie = ref->node->cookie;
- binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL);
- trace_binder_transaction_ref_to_node(t, ref);
- binder_debug(BINDER_DEBUG_TRANSACTION,
- " ref %d desc %d -> node %d u%016llx\n",
- ref->debug_id, ref->desc, ref->node->debug_id,
- (u64)ref->node->ptr);
- } else {
- struct binder_ref *new_ref;
-
- new_ref = binder_get_ref_for_node(target_proc, ref->node);
- if (new_ref == NULL) {
- return_error = BR_FAILED_REPLY;
- goto err_binder_get_ref_for_node_failed;
- }
- fp->binder = 0;
- fp->handle = new_ref->desc;
- fp->cookie = 0;
- binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
- trace_binder_transaction_ref_to_ref(t, ref,
- new_ref);
- binder_debug(BINDER_DEBUG_TRANSACTION,
- " ref %d desc %d -> ref %d desc %d (node %d)\n",
- ref->debug_id, ref->desc, new_ref->debug_id,
- new_ref->desc, ref->node->debug_id);
+ goto err_translate_failed;
}
} break;
case BINDER_TYPE_FD: {
- int target_fd;
- struct file *file;
-
- if (reply) {
- if (!(in_reply_to->flags & TF_ACCEPT_FDS)) {
- binder_user_error("%d:%d got reply with fd, %d, but target does not allow fds\n",
- proc->pid, thread->pid, fp->handle);
- return_error = BR_FAILED_REPLY;
- goto err_fd_not_allowed;
- }
- } else if (!target_node->accept_fds) {
- binder_user_error("%d:%d got transaction with fd, %d, but target does not allow fds\n",
- proc->pid, thread->pid, fp->handle);
+ struct binder_fd_object *fp = to_binder_fd_object(hdr);
+ int target_fd = binder_translate_fd(fp->fd, t, thread,
+ in_reply_to);
+
+ if (target_fd < 0) {
return_error = BR_FAILED_REPLY;
- goto err_fd_not_allowed;
+ goto err_translate_failed;
}
-
- file = fget(fp->handle);
- if (file == NULL) {
- binder_user_error("%d:%d got transaction with invalid fd, %d\n",
- proc->pid, thread->pid, fp->handle);
+ fp->pad_binder = 0;
+ fp->fd = target_fd;
+ } break;
+ case BINDER_TYPE_FDA: {
+ struct binder_fd_array_object *fda =
+ to_binder_fd_array_object(hdr);
+ struct binder_buffer_object *parent =
+ binder_validate_ptr(t->buffer, fda->parent,
+ off_start,
+ offp - off_start);
+ if (!parent) {
+ binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
+ proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
- goto err_fget_failed;
+ goto err_bad_parent;
}
- if (security_binder_transfer_file(proc->tsk,
- target_proc->tsk,
- file) < 0) {
- fput(file);
+ if (!binder_validate_fixup(t->buffer, off_start,
+ parent, fda->parent_offset,
+ last_fixup_obj,
+ last_fixup_min_off)) {
+ binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
+ proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
- goto err_get_unused_fd_failed;
+ goto err_bad_parent;
}
- target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
- if (target_fd < 0) {
- fput(file);
+ ret = binder_translate_fd_array(fda, parent, t, thread,
+ in_reply_to);
+ if (ret < 0) {
return_error = BR_FAILED_REPLY;
- goto err_get_unused_fd_failed;
+ goto err_translate_failed;
}
- task_fd_install(target_proc, target_fd, file);
- trace_binder_transaction_fd(t, fp->handle, target_fd);
- binder_debug(BINDER_DEBUG_TRANSACTION,
- " fd %d -> %d\n", fp->handle, target_fd);
- /* TODO: fput? */
- fp->binder = 0;
- fp->handle = target_fd;
+ last_fixup_obj = parent;
+ last_fixup_min_off =
+ fda->parent_offset + sizeof(u32) * fda->num_fds;
+ } break;
+ case BINDER_TYPE_PTR: {
+ struct binder_buffer_object *bp =
+ to_binder_buffer_object(hdr);
+ size_t buf_left = sg_buf_end - sg_bufp;
+
+ if (bp->length > buf_left) {
+ binder_user_error("%d:%d got transaction with too large buffer\n",
+ proc->pid, thread->pid);
+ return_error = BR_FAILED_REPLY;
+ goto err_bad_offset;
+ }
+ if (copy_from_user(sg_bufp,
+ (const void __user *)(uintptr_t)
+ bp->buffer, bp->length)) {
+ binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
+ proc->pid, thread->pid);
+ return_error = BR_FAILED_REPLY;
+ goto err_copy_data_failed;
+ }
+ /* Fixup buffer pointer to target proc address space */
+ bp->buffer = (uintptr_t)sg_bufp +
+ target_proc->user_buffer_offset;
+ sg_bufp += ALIGN(bp->length, sizeof(u64));
+
+ ret = binder_fixup_parent(t, thread, bp, off_start,
+ offp - off_start,
+ last_fixup_obj,
+ last_fixup_min_off);
+ if (ret < 0) {
+ return_error = BR_FAILED_REPLY;
+ goto err_translate_failed;
+ }
+ last_fixup_obj = bp;
+ last_fixup_min_off = 0;
} break;
-
default:
binder_user_error("%d:%d got transaction with invalid object type, %x\n",
- proc->pid, thread->pid, fp->type);
+ proc->pid, thread->pid, hdr->type);
return_error = BR_FAILED_REPLY;
goto err_bad_object_type;
}
@@ -1804,14 +2285,10 @@ static void binder_transaction(struct binder_proc *proc,
}
return;
-err_get_unused_fd_failed:
-err_fget_failed:
-err_fd_not_allowed:
-err_binder_get_ref_for_node_failed:
-err_binder_get_ref_failed:
-err_binder_new_node_failed:
+err_translate_failed:
err_bad_object_type:
err_bad_offset:
+err_bad_parent:
err_copy_data_failed:
trace_binder_transaction_failed_buffer_release(t->buffer);
binder_transaction_buffer_release(target_proc, t->buffer, offp);
@@ -1855,6 +2332,7 @@ static int binder_thread_write(struct binder_proc *proc,
binder_size_t *consumed)
{
uint32_t cmd;
+ struct binder_context *context = proc->context;
void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
@@ -1881,10 +2359,10 @@ static int binder_thread_write(struct binder_proc *proc,
if (get_user_preempt_disabled(target, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
- if (target == 0 && binder_context_mgr_node &&
+ if (target == 0 && context->binder_context_mgr_node &&
(cmd == BC_INCREFS || cmd == BC_ACQUIRE)) {
ref = binder_get_ref_for_node(proc,
- binder_context_mgr_node);
+ context->binder_context_mgr_node);
if (ref->desc != target) {
binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n",
proc->pid, thread->pid,
@@ -2029,6 +2507,17 @@ static int binder_thread_write(struct binder_proc *proc,
break;
}
+ case BC_TRANSACTION_SG:
+ case BC_REPLY_SG: {
+ struct binder_transaction_data_sg tr;
+
+ if (copy_from_user(&tr, ptr, sizeof(tr)))
+ return -EFAULT;
+ ptr += sizeof(tr);
+ binder_transaction(proc, thread, &tr.transaction_data,
+ cmd == BC_REPLY_SG, tr.buffers_size);
+ break;
+ }
case BC_TRANSACTION:
case BC_REPLY: {
struct binder_transaction_data tr;
@@ -2036,7 +2525,8 @@ static int binder_thread_write(struct binder_proc *proc,
if (copy_from_user_preempt_disabled(&tr, ptr, sizeof(tr)))
return -EFAULT;
ptr += sizeof(tr);
- binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
+ binder_transaction(proc, thread, &tr,
+ cmd == BC_REPLY, 0);
break;
}
@@ -2789,9 +3279,11 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp)
{
int ret = 0;
struct binder_proc *proc = filp->private_data;
+ struct binder_context *context = proc->context;
+
kuid_t curr_euid = current_euid();
- if (binder_context_mgr_node != NULL) {
+ if (context->binder_context_mgr_node) {
pr_err("BINDER_SET_CONTEXT_MGR already set\n");
ret = -EBUSY;
goto out;
@@ -2799,27 +3291,27 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp)
ret = security_binder_set_context_mgr(proc->tsk);
if (ret < 0)
goto out;
- if (uid_valid(binder_context_mgr_uid)) {
- if (!uid_eq(binder_context_mgr_uid, curr_euid)) {
+ if (uid_valid(context->binder_context_mgr_uid)) {
+ if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
from_kuid(&init_user_ns, curr_euid),
from_kuid(&init_user_ns,
- binder_context_mgr_uid));
+ context->binder_context_mgr_uid));
ret = -EPERM;
goto out;
}
} else {
- binder_context_mgr_uid = curr_euid;
+ context->binder_context_mgr_uid = curr_euid;
}
- binder_context_mgr_node = binder_new_node(proc, 0, 0);
- if (binder_context_mgr_node == NULL) {
+ context->binder_context_mgr_node = binder_new_node(proc, 0, 0);
+ if (!context->binder_context_mgr_node) {
ret = -ENOMEM;
goto out;
}
- binder_context_mgr_node->local_weak_refs++;
- binder_context_mgr_node->local_strong_refs++;
- binder_context_mgr_node->has_strong_ref = 1;
- binder_context_mgr_node->has_weak_ref = 1;
+ context->binder_context_mgr_node->local_weak_refs++;
+ context->binder_context_mgr_node->local_strong_refs++;
+ context->binder_context_mgr_node->has_strong_ref = 1;
+ context->binder_context_mgr_node->has_weak_ref = 1;
out:
return ret;
}
@@ -3044,6 +3536,7 @@ err_bad_arg:
static int binder_open(struct inode *nodp, struct file *filp)
{
struct binder_proc *proc;
+ struct binder_device *binder_dev;
binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
current->group_leader->pid, current->pid);
@@ -3056,6 +3549,9 @@ static int binder_open(struct inode *nodp, struct file *filp)
INIT_LIST_HEAD(&proc->todo);
init_waitqueue_head(&proc->wait);
proc->default_priority = task_nice(current);
+ binder_dev = container_of(filp->private_data, struct binder_device,
+ miscdev);
+ proc->context = &binder_dev->context;
binder_lock(__func__);
@@ -3071,8 +3567,17 @@ static int binder_open(struct inode *nodp, struct file *filp)
char strbuf[11];
snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
+ /*
+ * proc debug entries are shared between contexts, so
+ * this will fail if the process tries to open the driver
+ * again with a different context. The priting code will
+ * anyway print all contexts that a given PID has, so this
+ * is not a problem.
+ */
proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
- binder_debugfs_dir_entry_proc, proc, &binder_proc_fops);
+ binder_debugfs_dir_entry_proc,
+ (void *)(unsigned long)proc->pid,
+ &binder_proc_fops);
}
return 0;
@@ -3165,6 +3670,7 @@ static int binder_node_release(struct binder_node *node, int refs)
static void binder_deferred_release(struct binder_proc *proc)
{
struct binder_transaction *t;
+ struct binder_context *context = proc->context;
struct rb_node *n;
int threads, nodes, incoming_refs, outgoing_refs, buffers,
active_transactions, page_count;
@@ -3174,11 +3680,12 @@ static void binder_deferred_release(struct binder_proc *proc)
hlist_del(&proc->proc_node);
- if (binder_context_mgr_node && binder_context_mgr_node->proc == proc) {
+ if (context->binder_context_mgr_node &&
+ context->binder_context_mgr_node->proc == proc) {
binder_debug(BINDER_DEBUG_DEAD_BINDER,
"%s: %d context_mgr_node gone\n",
__func__, proc->pid);
- binder_context_mgr_node = NULL;
+ context->binder_context_mgr_node = NULL;
}
threads = 0;
@@ -3471,6 +3978,7 @@ static void print_binder_proc(struct seq_file *m,
size_t header_pos;
seq_printf(m, "proc %d\n", proc->pid);
+ seq_printf(m, "context %s\n", proc->context->name);
header_pos = m->count;
for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
@@ -3540,7 +4048,9 @@ static const char * const binder_command_strings[] = {
"BC_EXIT_LOOPER",
"BC_REQUEST_DEATH_NOTIFICATION",
"BC_CLEAR_DEATH_NOTIFICATION",
- "BC_DEAD_BINDER_DONE"
+ "BC_DEAD_BINDER_DONE",
+ "BC_TRANSACTION_SG",
+ "BC_REPLY_SG",
};
static const char * const binder_objstat_strings[] = {
@@ -3595,6 +4105,7 @@ static void print_binder_proc_stats(struct seq_file *m,
int count, strong, weak;
seq_printf(m, "proc %d\n", proc->pid);
+ seq_printf(m, "context %s\n", proc->context->name);
count = 0;
for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
count++;
@@ -3702,23 +4213,18 @@ static int binder_transactions_show(struct seq_file *m, void *unused)
static int binder_proc_show(struct seq_file *m, void *unused)
{
struct binder_proc *itr;
- struct binder_proc *proc = m->private;
+ int pid = (unsigned long)m->private;
int do_lock = !binder_debug_no_lock;
- bool valid_proc = false;
if (do_lock)
binder_lock(__func__);
hlist_for_each_entry(itr, &binder_procs, proc_node) {
- if (itr == proc) {
- valid_proc = true;
- break;
+ if (itr->pid == pid) {
+ seq_puts(m, "binder proc state:\n");
+ print_binder_proc(m, itr, 1);
}
}
- if (valid_proc) {
- seq_puts(m, "binder proc state:\n");
- print_binder_proc(m, proc, 1);
- }
if (do_lock)
binder_unlock(__func__);
return 0;
@@ -3728,11 +4234,11 @@ static void print_binder_transaction_log_entry(struct seq_file *m,
struct binder_transaction_log_entry *e)
{
seq_printf(m,
- "%d: %s from %d:%d to %d:%d node %d handle %d size %d:%d\n",
+ "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d\n",
e->debug_id, (e->call_type == 2) ? "reply" :
((e->call_type == 1) ? "async" : "call "), e->from_proc,
- e->from_thread, e->to_proc, e->to_thread, e->to_node,
- e->target_handle, e->data_size, e->offsets_size);
+ e->from_thread, e->to_proc, e->to_thread, e->context_name,
+ e->to_node, e->target_handle, e->data_size, e->offsets_size);
}
static int binder_transaction_log_show(struct seq_file *m, void *unused)
@@ -3760,20 +4266,44 @@ static const struct file_operations binder_fops = {
.release = binder_release,
};
-static struct miscdevice binder_miscdev = {
- .minor = MISC_DYNAMIC_MINOR,
- .name = "binder",
- .fops = &binder_fops
-};
-
BINDER_DEBUG_ENTRY(state);
BINDER_DEBUG_ENTRY(stats);
BINDER_DEBUG_ENTRY(transactions);
BINDER_DEBUG_ENTRY(transaction_log);
+static int __init init_binder_device(const char *name)
+{
+ int ret;
+ struct binder_device *binder_device;
+
+ binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
+ if (!binder_device)
+ return -ENOMEM;
+
+ binder_device->miscdev.fops = &binder_fops;
+ binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
+ binder_device->miscdev.name = name;
+
+ binder_device->context.binder_context_mgr_uid = INVALID_UID;
+ binder_device->context.name = name;
+
+ ret = misc_register(&binder_device->miscdev);
+ if (ret < 0) {
+ kfree(binder_device);
+ return ret;
+ }
+
+ hlist_add_head(&binder_device->hlist, &binder_devices);
+
+ return ret;
+}
+
static int __init binder_init(void)
{
int ret;
+ char *device_name, *device_names;
+ struct binder_device *device;
+ struct hlist_node *tmp;
binder_deferred_workqueue = create_singlethread_workqueue("binder");
if (!binder_deferred_workqueue)
@@ -3783,7 +4313,7 @@ static int __init binder_init(void)
if (binder_debugfs_dir_entry_root)
binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
binder_debugfs_dir_entry_root);
- ret = misc_register(&binder_miscdev);
+
if (binder_debugfs_dir_entry_root) {
debugfs_create_file("state",
S_IRUGO,
@@ -3811,6 +4341,37 @@ static int __init binder_init(void)
&binder_transaction_log_failed,
&binder_transaction_log_fops);
}
+
+ /*
+ * Copy the module_parameter string, because we don't want to
+ * tokenize it in-place.
+ */
+ device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
+ if (!device_names) {
+ ret = -ENOMEM;
+ goto err_alloc_device_names_failed;
+ }
+ strcpy(device_names, binder_devices_param);
+
+ while ((device_name = strsep(&device_names, ","))) {
+ ret = init_binder_device(device_name);
+ if (ret)
+ goto err_init_binder_device_failed;
+ }
+
+ return ret;
+
+err_init_binder_device_failed:
+ hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
+ misc_deregister(&device->miscdev);
+ hlist_del(&device->hlist);
+ kfree(device);
+ }
+err_alloc_device_names_failed:
+ debugfs_remove_recursive(binder_debugfs_dir_entry_root);
+
+ destroy_workqueue(binder_deferred_workqueue);
+
return ret;
}
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index 95dca75efde9..fe3af13f0d38 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -53,8 +53,10 @@
#define TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID 0x0D
#define TZ_PIL_AUTH_QDSP6_PROC 1
#define ADSP_MMAP_HEAP_ADDR 4
+#define ADSP_MMAP_REMOTE_HEAP_ADDR 8
#define FASTRPC_ENOSUCH 39
#define VMID_SSC_Q6 5
+#define VMID_ADSP_Q6 6
#define DEBUGFS_SIZE 1024
#define RPC_TIMEOUT (5 * HZ)
@@ -179,6 +181,7 @@ struct fastrpc_ctx_lst {
};
struct fastrpc_smmu {
+ struct device *dev;
struct dma_iommu_mapping *mapping;
int cb;
int enabled;
@@ -354,7 +357,7 @@ static void fastrpc_buf_free(struct fastrpc_buf *buf, int cache)
hyp_assign_phys(buf->phys, buf_page_size(buf->size),
srcVM, 2, destVM, destVMperm, 1);
}
- dma_free_coherent(fl->sctx->dev, buf->size, buf->virt,
+ dma_free_coherent(fl->sctx->smmu.dev, buf->size, buf->virt,
buf->phys);
}
kfree(buf);
@@ -380,7 +383,8 @@ static void fastrpc_buf_list_free(struct fastrpc_file *fl)
static void fastrpc_mmap_add(struct fastrpc_mmap *map)
{
- if (map->flags == ADSP_MMAP_HEAP_ADDR) {
+ if (map->flags == ADSP_MMAP_HEAP_ADDR ||
+ map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
struct fastrpc_apps *me = &gfa;
spin_lock(&me->hlock);
@@ -401,7 +405,8 @@ static int fastrpc_mmap_find(struct fastrpc_file *fl, int fd, uintptr_t va,
struct fastrpc_apps *me = &gfa;
struct fastrpc_mmap *match = 0, *map;
struct hlist_node *n;
- if (mflags == ADSP_MMAP_HEAP_ADDR) {
+ if (mflags == ADSP_MMAP_HEAP_ADDR ||
+ mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
spin_lock(&me->hlock);
hlist_for_each_entry_safe(map, n, &me->maps, hn) {
if (va >= map->va &&
@@ -505,7 +510,8 @@ static void fastrpc_mmap_free(struct fastrpc_mmap *map)
if (!map)
return;
fl = map->fl;
- if (map->flags == ADSP_MMAP_HEAP_ADDR) {
+ if (map->flags == ADSP_MMAP_HEAP_ADDR ||
+ map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
spin_lock(&me->hlock);
map->refs--;
if (!map->refs)
@@ -520,11 +526,8 @@ static void fastrpc_mmap_free(struct fastrpc_mmap *map)
}
if (map->refs > 0)
return;
- if (map->secure)
- sess = fl->secsctx;
- else
- sess = fl->sctx;
- if (map->flags == ADSP_MMAP_HEAP_ADDR) {
+ if (map->flags == ADSP_MMAP_HEAP_ADDR ||
+ map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
DEFINE_DMA_ATTRS(attrs);
if (me->dev == NULL) {
@@ -541,11 +544,16 @@ static void fastrpc_mmap_free(struct fastrpc_mmap *map)
int destVM[1] = {VMID_HLOS};
int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
+ if (map->secure)
+ sess = fl->secsctx;
+ else
+ sess = fl->sctx;
+
if (!IS_ERR_OR_NULL(map->handle))
ion_free(fl->apps->client, map->handle);
if (sess->smmu.enabled) {
if (map->size || map->phys)
- msm_dma_unmap_sg(sess->dev,
+ msm_dma_unmap_sg(sess->smmu.dev,
map->table->sgl,
map->table->nents, DMA_BIDIRECTIONAL,
map->buf);
@@ -598,7 +606,8 @@ static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd, unsigned attr,
map->fl = fl;
map->fd = fd;
map->attr = attr;
- if (mflags == ADSP_MMAP_HEAP_ADDR) {
+ if (mflags == ADSP_MMAP_HEAP_ADDR ||
+ mflags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
map->apps = me;
map->fl = 0;
VERIFY(err, !dma_alloc_memory(&region_start, len));
@@ -606,6 +615,7 @@ static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd, unsigned attr,
goto bail;
map->phys = (uintptr_t)region_start;
map->size = len;
+ map->va = map->phys;
} else {
VERIFY(err, !IS_ERR_OR_NULL(map->handle =
ion_import_dma_buf(fl->apps->client, fd)));
@@ -637,7 +647,7 @@ static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd, unsigned attr,
if (err)
goto bail;
VERIFY(err, !IS_ERR_OR_NULL(map->attach =
- dma_buf_attach(map->buf, sess->dev)));
+ dma_buf_attach(map->buf, sess->smmu.dev)));
if (err)
goto bail;
VERIFY(err, !IS_ERR_OR_NULL(map->table =
@@ -656,7 +666,7 @@ static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd, unsigned attr,
dma_set_attr(DMA_ATTR_FORCE_COHERENT, &attrs);
VERIFY(err, map->table->nents ==
- msm_dma_map_sg_attrs(sess->dev,
+ msm_dma_map_sg_attrs(sess->smmu.dev,
map->table->sgl, map->table->nents,
DMA_BIDIRECTIONAL, map->buf, &attrs));
if (err)
@@ -686,8 +696,8 @@ static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd, unsigned attr,
if (err)
goto bail;
}
+ map->va = va;
}
- map->va = va;
map->len = len;
fastrpc_mmap_add(map);
@@ -732,12 +742,12 @@ static int fastrpc_buf_alloc(struct fastrpc_file *fl, ssize_t size,
buf->virt = 0;
buf->phys = 0;
buf->size = size;
- buf->virt = dma_alloc_coherent(fl->sctx->dev, buf->size,
+ buf->virt = dma_alloc_coherent(fl->sctx->smmu.dev, buf->size,
(void *)&buf->phys, GFP_KERNEL);
if (IS_ERR_OR_NULL(buf->virt)) {
/* free cache and retry */
fastrpc_buf_list_free(fl);
- buf->virt = dma_alloc_coherent(fl->sctx->dev, buf->size,
+ buf->virt = dma_alloc_coherent(fl->sctx->smmu.dev, buf->size,
(void *)&buf->phys, GFP_KERNEL);
VERIFY(err, !IS_ERR_OR_NULL(buf->virt));
}
@@ -1483,6 +1493,7 @@ static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
return err;
}
+static int fastrpc_channel_open(struct fastrpc_file *fl);
static int fastrpc_init_process(struct fastrpc_file *fl,
struct fastrpc_ioctl_init_attrs *uproc)
{
@@ -1491,6 +1502,10 @@ static int fastrpc_init_process(struct fastrpc_file *fl,
struct fastrpc_ioctl_init *init = &uproc->init;
struct smq_phy_page pages[1];
struct fastrpc_mmap *file = 0, *mem = 0;
+
+ VERIFY(err, !fastrpc_channel_open(fl));
+ if (err)
+ goto bail;
if (init->flags == FASTRPC_INIT_ATTACH) {
remote_arg_t ra[1];
int tgid = current->tgid;
@@ -1573,6 +1588,56 @@ static int fastrpc_init_process(struct fastrpc_file *fl,
FASTRPC_MODE_PARALLEL, 1, &ioctl)));
if (err)
goto bail;
+ } else if (init->flags == FASTRPC_INIT_CREATE_STATIC) {
+ int srcVM[1] = {VMID_HLOS};
+ int destVM[1] = {VMID_ADSP_Q6};
+ int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
+ remote_arg_t ra[3];
+ uint64_t phys = 0;
+ ssize_t size;
+ int fds[3];
+ char *proc_name = (unsigned char *)init->file;
+ struct {
+ int pgid;
+ int namelen;
+ int pageslen;
+ } inbuf;
+ inbuf.pgid = current->tgid;
+ inbuf.namelen = strlen(proc_name)+1;
+ inbuf.pageslen = 1;
+ VERIFY(err, !fastrpc_mmap_create(fl, -1, 0, init->mem,
+ init->memlen, ADSP_MMAP_HEAP_ADDR, &mem));
+ phys = mem->phys;
+ size = mem->size;
+ VERIFY(err, !hyp_assign_phys(phys, (uint64_t)size,
+ srcVM, 1, destVM, destVMperm, 1));
+ if (err)
+ goto bail;
+
+ ra[0].buf.pv = (void *)&inbuf;
+ ra[0].buf.len = sizeof(inbuf);
+ fds[0] = 0;
+
+ ra[1].buf.pv = (void *)proc_name;
+ ra[1].buf.len = inbuf.namelen;
+ fds[1] = 0;
+
+ pages[0].addr = phys;
+ pages[0].size = size;
+
+ ra[2].buf.pv = (void *)pages;
+ ra[2].buf.len = sizeof(*pages);
+ fds[2] = 0;
+ ioctl.inv.handle = 1;
+
+ ioctl.inv.sc = REMOTE_SCALARS_MAKE(8, 3, 0);
+ ioctl.inv.pra = ra;
+ ioctl.fds = 0;
+ ioctl.attrs = 0;
+ VERIFY(err, !(err = fastrpc_internal_invoke(fl,
+ FASTRPC_MODE_PARALLEL, 1, &ioctl)));
+ if (err)
+ goto bail;
} else {
err = -ENOTTY;
}
@@ -1591,6 +1656,9 @@ static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl)
remote_arg_t ra[1];
int tgid = 0;
+ VERIFY(err, fl->cid >= 0 && fl->cid < NUM_CHANNELS);
+ if (err)
+ goto bail;
VERIFY(err, fl->apps->channel[fl->cid].chan != 0);
if (err)
goto bail;
@@ -1662,8 +1730,17 @@ static int fastrpc_mmap_on_dsp(struct fastrpc_file *fl, uint32_t flags,
desc.arginfo = SCM_ARGS(3);
err = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
TZ_PIL_PROTECT_MEM_SUBSYS_ID), &desc);
- }
+ } else if (flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
+ int srcVM[1] = {VMID_HLOS};
+ int destVM[1] = {VMID_ADSP_Q6};
+ int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
+
+ VERIFY(err, !hyp_assign_phys(map->phys, (uint64_t)map->size,
+ srcVM, 1, destVM, destVMperm, 1));
+ if (err)
+ goto bail;
+ }
bail:
return err;
}
@@ -1671,34 +1748,46 @@ bail:
static int fastrpc_munmap_on_dsp_rh(struct fastrpc_file *fl,
struct fastrpc_mmap *map)
{
- struct fastrpc_ioctl_invoke_attrs ioctl;
- struct scm_desc desc = {0};
- remote_arg_t ra[1];
int err = 0;
- struct {
- uint8_t skey;
- } routargs;
+ int srcVM[1] = {VMID_ADSP_Q6};
+ int destVM[1] = {VMID_HLOS};
+ int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
+
+ if (map->flags == ADSP_MMAP_HEAP_ADDR) {
+ struct fastrpc_ioctl_invoke_attrs ioctl;
+ struct scm_desc desc = {0};
+ remote_arg_t ra[1];
+ int err = 0;
+ struct {
+ uint8_t skey;
+ } routargs;
- ra[0].buf.pv = (void *)&routargs;
- ra[0].buf.len = sizeof(routargs);
+ ra[0].buf.pv = (void *)&routargs;
+ ra[0].buf.len = sizeof(routargs);
- ioctl.inv.handle = 1;
- ioctl.inv.sc = REMOTE_SCALARS_MAKE(7, 0, 1);
- ioctl.inv.pra = ra;
- ioctl.fds = 0;
- ioctl.attrs = 0;
+ ioctl.inv.handle = 1;
+ ioctl.inv.sc = REMOTE_SCALARS_MAKE(7, 0, 1);
+ ioctl.inv.pra = ra;
+ ioctl.fds = 0;
+ ioctl.attrs = 0;
- VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
- FASTRPC_MODE_PARALLEL, 1, &ioctl)));
- if (err)
- goto bail;
- desc.args[0] = TZ_PIL_AUTH_QDSP6_PROC;
- desc.args[1] = map->phys;
- desc.args[2] = map->size;
- desc.args[3] = routargs.skey;
- desc.arginfo = SCM_ARGS(4);
- err = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
- TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID), &desc);
+ VERIFY(err, 0 == (err = fastrpc_internal_invoke(fl,
+ FASTRPC_MODE_PARALLEL, 1, &ioctl)));
+ if (err)
+ goto bail;
+ desc.args[0] = TZ_PIL_AUTH_QDSP6_PROC;
+ desc.args[1] = map->phys;
+ desc.args[2] = map->size;
+ desc.args[3] = routargs.skey;
+ desc.arginfo = SCM_ARGS(4);
+ err = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
+ TZ_PIL_CLEAR_PROTECT_MEM_SUBSYS_ID), &desc);
+ } else if (map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
+ VERIFY(err, !hyp_assign_phys(map->phys, (uint64_t)map->size,
+ srcVM, 1, destVM, destVMperm, 1));
+ if (err)
+ goto bail;
+ }
bail:
return err;
@@ -1715,7 +1804,8 @@ static int fastrpc_munmap_on_dsp(struct fastrpc_file *fl,
uintptr_t vaddrout;
ssize_t size;
} inargs;
- if (map->flags == ADSP_MMAP_HEAP_ADDR) {
+ if (map->flags == ADSP_MMAP_HEAP_ADDR ||
+ map->flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
VERIFY(err, !fastrpc_munmap_on_dsp_rh(fl, map));
if (err)
goto bail;
@@ -2225,42 +2315,20 @@ static const struct file_operations debugfs_fops = {
.open = fastrpc_debugfs_open,
.read = fastrpc_debugfs_read,
};
-
-static int fastrpc_device_open(struct inode *inode, struct file *filp)
+static int fastrpc_channel_open(struct fastrpc_file *fl)
{
- int cid = MINOR(inode->i_rdev);
- struct dentry *debugfs_file;
- int err = 0;
struct fastrpc_apps *me = &gfa;
- struct fastrpc_file *fl = 0;
-
- VERIFY(err, fl = kzalloc(sizeof(*fl), GFP_KERNEL));
- if (err)
- return err;
-
- filp->private_data = fl;
+ int cid, err = 0;
mutex_lock(&me->smd_mutex);
- debugfs_file = debugfs_create_file(current->comm, 0644, debugfs_root,
- fl, &debugfs_fops);
- context_list_ctor(&fl->clst);
- spin_lock_init(&fl->hlock);
- INIT_HLIST_HEAD(&fl->maps);
- INIT_HLIST_HEAD(&fl->bufs);
- INIT_HLIST_NODE(&fl->hn);
- fl->tgid = current->tgid;
- fl->apps = me;
- fl->cid = cid;
- if (debugfs_file != NULL)
- fl->debugfs_file = debugfs_file;
- memset(&fl->perf, 0, sizeof(fl->perf));
-
- VERIFY(err, !fastrpc_session_alloc_locked(&me->channel[cid], 0,
- &fl->sctx));
+ VERIFY(err, fl && fl->sctx);
+ if (err)
+ goto bail;
+ cid = fl->cid;
+ VERIFY(err, cid >= 0 && cid < NUM_CHANNELS);
if (err)
goto bail;
- fl->cid = cid;
fl->ssrcount = me->channel[cid].ssrcount;
if ((kref_get_unless_zero(&me->channel[cid].kref) == 0) ||
(me->channel[cid].chan == 0)) {
@@ -2286,25 +2354,58 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp)
me->channel[cid].ssrcount;
}
}
- spin_lock(&me->hlock);
- hlist_add_head(&fl->hn, &me->drivers);
- spin_unlock(&me->hlock);
bail:
mutex_unlock(&me->smd_mutex);
-
- if (err && fl)
- fastrpc_device_release(inode, filp);
return err;
}
+static int fastrpc_device_open(struct inode *inode, struct file *filp)
+{
+ int err = 0;
+ struct fastrpc_file *fl = 0;
+ struct fastrpc_apps *me = &gfa;
+
+ VERIFY(err, fl = kzalloc(sizeof(*fl), GFP_KERNEL));
+ if (err)
+ return err;
+
+ context_list_ctor(&fl->clst);
+ spin_lock_init(&fl->hlock);
+ INIT_HLIST_HEAD(&fl->maps);
+ INIT_HLIST_HEAD(&fl->bufs);
+ INIT_HLIST_NODE(&fl->hn);
+ fl->tgid = current->tgid;
+ fl->apps = me;
+ fl->mode = FASTRPC_MODE_SERIAL;
+ fl->cid = -1;
+ filp->private_data = fl;
+ spin_lock(&me->hlock);
+ hlist_add_head(&fl->hn, &me->drivers);
+ spin_unlock(&me->hlock);
+ return 0;
+}
+
static int fastrpc_get_info(struct fastrpc_file *fl, uint32_t *info)
{
int err = 0;
+ uint32_t cid;
- VERIFY(err, fl && fl->sctx);
+ VERIFY(err, fl != 0);
if (err)
goto bail;
+ if (fl->cid == -1) {
+ cid = *info;
+ VERIFY(err, cid < NUM_CHANNELS);
+ if (err)
+ goto bail;
+ fl->cid = cid;
+ fl->ssrcount = fl->apps->channel[cid].ssrcount;
+ VERIFY(err, !fastrpc_session_alloc_locked(
+ &fl->apps->channel[cid], 0, &fl->sctx));
+ if (err)
+ goto bail;
+ }
*info = (fl->sctx->smmu.enabled ? 1 : 0);
bail:
return err;
@@ -2405,6 +2506,9 @@ static long fastrpc_device_ioctl(struct file *file, unsigned int ioctl_num,
goto bail;
break;
case FASTRPC_IOCTL_GETINFO:
+ VERIFY(err, 0 == copy_from_user(&info, param, sizeof(info)));
+ if (err)
+ goto bail;
VERIFY(err, 0 == (err = fastrpc_get_info(fl, &info)));
if (err)
goto bail;
@@ -2554,7 +2658,7 @@ static int fastrpc_cb_probe(struct device *dev)
VERIFY(err, !arm_iommu_attach_device(dev, sess->smmu.mapping));
if (err)
goto bail;
- sess->dev = dev;
+ sess->smmu.dev = dev;
sess->smmu.enabled = 1;
chan->sesscount++;
debugfs_global_file = debugfs_create_file("global", 0644, debugfs_root,
@@ -2619,10 +2723,10 @@ static int fastrpc_cb_legacy_probe(struct device *dev)
if (err)
goto bail;
first_sess = &chan->session[chan->sesscount];
- first_sess->dev = msm_iommu_get_ctx(name);
+ first_sess->smmu.dev = msm_iommu_get_ctx(name);
VERIFY(err, !IS_ERR_OR_NULL(first_sess->smmu.mapping =
arm_iommu_create_mapping(
- msm_iommu_get_bus(first_sess->dev),
+ msm_iommu_get_bus(first_sess->smmu.dev),
range[0], range[1])));
if (err)
goto bail;
@@ -2636,7 +2740,7 @@ static int fastrpc_cb_legacy_probe(struct device *dev)
goto bail;
sess = &chan->session[chan->sesscount];
sess->smmu.cb = sids[i];
- sess->dev = first_sess->dev;
+ sess->smmu.dev = first_sess->smmu.dev;
sess->smmu.enabled = 1;
sess->smmu.mapping = first_sess->smmu.mapping;
chan->sesscount++;
@@ -2696,9 +2800,9 @@ static void fastrpc_deinit(void)
}
for (j = 0; j < NUM_SESSIONS; j++) {
struct fastrpc_session_ctx *sess = &chan->session[j];
- if (sess->smmu.enabled) {
- arm_iommu_detach_device(sess->dev);
- sess->dev = 0;
+ if (sess->smmu.dev) {
+ arm_iommu_detach_device(sess->smmu.dev);
+ sess->smmu.dev = 0;
}
if (sess->smmu.mapping) {
arm_iommu_release_mapping(sess->smmu.mapping);
@@ -2720,6 +2824,7 @@ static struct platform_driver fastrpc_driver = {
static int __init fastrpc_device_init(void)
{
struct fastrpc_apps *me = &gfa;
+ struct device *dev = 0;
int err = 0, i;
memset(me, 0, sizeof(*me));
@@ -2736,7 +2841,7 @@ static int __init fastrpc_device_init(void)
cdev_init(&me->cdev, &fops);
me->cdev.owner = THIS_MODULE;
VERIFY(err, 0 == cdev_add(&me->cdev, MKDEV(MAJOR(me->dev_no), 0),
- NUM_CHANNELS));
+ 1));
if (err)
goto cdev_init_bail;
me->class = class_create(THIS_MODULE, "fastrpc");
@@ -2744,15 +2849,14 @@ static int __init fastrpc_device_init(void)
if (err)
goto class_create_bail;
me->compat = (NULL == fops.compat_ioctl) ? 0 : 1;
+ dev = device_create(me->class, NULL,
+ MKDEV(MAJOR(me->dev_no), 0),
+ NULL, gcinfo[0].name);
+ VERIFY(err, !IS_ERR_OR_NULL(dev));
+ if (err)
+ goto device_create_bail;
for (i = 0; i < NUM_CHANNELS; i++) {
- if (!gcinfo[i].name)
- continue;
- me->channel[i].dev = device_create(me->class, NULL,
- MKDEV(MAJOR(me->dev_no), i),
- NULL, gcinfo[i].name);
- VERIFY(err, !IS_ERR(me->channel[i].dev));
- if (err)
- goto device_create_bail;
+ me->channel[i].dev = dev;
me->channel[i].ssrcount = 0;
me->channel[i].prevssrcount = 0;
me->channel[i].ramdumpenabled = 0;
@@ -2771,12 +2875,12 @@ static int __init fastrpc_device_init(void)
return 0;
device_create_bail:
for (i = 0; i < NUM_CHANNELS; i++) {
- if (IS_ERR_OR_NULL(me->channel[i].dev))
- continue;
- device_destroy(me->class, MKDEV(MAJOR(me->dev_no), i));
- subsys_notif_unregister_notifier(me->channel[i].handle,
- &me->channel[i].nb);
+ if (me->channel[i].handle)
+ subsys_notif_unregister_notifier(me->channel[i].handle,
+ &me->channel[i].nb);
}
+ if (!IS_ERR_OR_NULL(dev))
+ device_destroy(me->class, MKDEV(MAJOR(me->dev_no), 0));
class_destroy(me->class);
class_create_bail:
cdev_del(&me->cdev);
diff --git a/drivers/char/adsprpc_compat.c b/drivers/char/adsprpc_compat.c
index f7e84dd55606..fcd6d1142618 100644
--- a/drivers/char/adsprpc_compat.c
+++ b/drivers/char/adsprpc_compat.c
@@ -391,6 +391,10 @@ long compat_fastrpc_device_ioctl(struct file *filp, unsigned int cmd,
sizeof(*info))));
if (err)
return -EFAULT;
+ err = get_user(u, info32);
+ err |= put_user(u, info);
+ if (err)
+ return err;
ret = filp->f_op->unlocked_ioctl(filp, FASTRPC_IOCTL_GETINFO,
(unsigned long)info);
if (ret)
diff --git a/drivers/char/adsprpc_shared.h b/drivers/char/adsprpc_shared.h
index f686b0a1d6fa..2a66b11bf179 100644
--- a/drivers/char/adsprpc_shared.h
+++ b/drivers/char/adsprpc_shared.h
@@ -53,6 +53,7 @@
/* INIT a new process or attach to guestos */
#define FASTRPC_INIT_ATTACH 0
#define FASTRPC_INIT_CREATE 1
+#define FASTRPC_INIT_CREATE_STATIC 2
/* Retrives number of input buffers from the scalars parameter */
#define REMOTE_SCALARS_INBUFS(sc) (((sc) >> 16) & 0x0ff)
diff --git a/drivers/char/diag/diagfwd_bridge.c b/drivers/char/diag/diagfwd_bridge.c
index 0462a64614f3..3342984eb795 100644
--- a/drivers/char/diag/diagfwd_bridge.c
+++ b/drivers/char/diag/diagfwd_bridge.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -90,6 +90,18 @@ static int diagfwd_bridge_mux_disconnect(int id, int mode)
{
if (id < 0 || id >= NUM_REMOTE_DEV)
return -EINVAL;
+
+ if ((mode == DIAG_USB_MODE &&
+ driver->logging_mode == DIAG_MEMORY_DEVICE_MODE) ||
+ (mode == DIAG_MEMORY_DEVICE_MODE &&
+ driver->logging_mode == DIAG_USB_MODE)) {
+ /*
+ * Don't close the MHI channels when usb is disconnected
+ * and a process is running in memory device mode.
+ */
+ return 0;
+ }
+
if (bridge_info[id].dev_ops && bridge_info[id].dev_ops->close)
bridge_info[id].dev_ops->close(bridge_info[id].ctxt);
return 0;
diff --git a/drivers/clk/msm/clock-mmss-8998.c b/drivers/clk/msm/clock-mmss-8998.c
index eb543010c17b..a72de47c34c0 100644
--- a/drivers/clk/msm/clock-mmss-8998.c
+++ b/drivers/clk/msm/clock-mmss-8998.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -263,6 +263,7 @@ static struct clk_freq_tbl ftbl_csi_clk_src[] = {
static struct clk_freq_tbl ftbl_csi_clk_src_vq[] = {
F_MM( 164571429, mmpll10_pll_out, 3.5, 0, 0),
F_MM( 256000000, mmpll4_pll_out, 3, 0, 0),
+ F_MM( 274290000, mmpll7_pll_out, 3.5, 0, 0),
F_MM( 300000000, mmsscc_gpll0, 2, 0, 0),
F_MM( 384000000, mmpll4_pll_out, 2, 0, 0),
F_MM( 576000000, mmpll10_pll_out, 1, 0, 0),
diff --git a/drivers/clk/msm/clock-osm.c b/drivers/clk/msm/clock-osm.c
index 79e8a7d8eb00..1639b1b7f94b 100644
--- a/drivers/clk/msm/clock-osm.c
+++ b/drivers/clk/msm/clock-osm.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -83,6 +83,7 @@ enum clk_osm_trace_packet_id {
#define VERSION_REG 0x0
#define OSM_TABLE_SIZE 40
+#define MAX_VIRTUAL_CORNER (OSM_TABLE_SIZE - 1)
#define MAX_CLUSTER_CNT 2
#define CORE_COUNT_VAL(val) ((val & GENMASK(18, 16)) >> 16)
#define SINGLE_CORE 1
@@ -1662,6 +1663,14 @@ static int clk_osm_resolve_crossover_corners(struct clk_osm *c,
break;
}
}
+
+ /*
+ * This assumes the OSM table uses corners
+ * 0 to MAX_VIRTUAL_CORNER - 1.
+ */
+ if (!c->mem_acc_threshold_vc)
+ c->mem_acc_threshold_vc =
+ MAX_VIRTUAL_CORNER;
}
return 0;
@@ -3232,9 +3241,10 @@ static int cpu_clock_osm_driver_probe(struct platform_device *pdev)
return rc;
}
- rc = clk_osm_resolve_crossover_corners(&pwrcl_clk, pdev, NULL);
+ rc = clk_osm_resolve_crossover_corners(&pwrcl_clk, pdev,
+ "qcom,pwrcl-apcs-mem-acc-threshold-voltage");
if (rc)
- dev_info(&pdev->dev, "No APM crossover corner programmed\n");
+ dev_info(&pdev->dev, "No MEM-ACC crossover corner programmed\n");
rc = clk_osm_resolve_crossover_corners(&perfcl_clk, pdev,
"qcom,perfcl-apcs-mem-acc-threshold-voltage");
diff --git a/drivers/clk/qcom/clk-cpu-osm.c b/drivers/clk/qcom/clk-cpu-osm.c
index 6d13adf7a6ee..e88d70f07a1c 100644
--- a/drivers/clk/qcom/clk-cpu-osm.c
+++ b/drivers/clk/qcom/clk-cpu-osm.c
@@ -3034,6 +3034,7 @@ static int clk_cpu_osm_driver_probe(struct platform_device *pdev)
{
int rc = 0, cpu, i;
int speedbin = 0, pvs_ver = 0;
+ bool is_sdm630 = 0;
u32 pte_efuse;
int num_clks = ARRAY_SIZE(osm_qcom_clk_hws);
struct clk *clk;
@@ -3306,6 +3307,13 @@ static int clk_cpu_osm_driver_probe(struct platform_device *pdev)
"Failed to enable clock for cpu %d\n", cpu);
}
+ is_sdm630 = of_device_is_compatible(pdev->dev.of_node,
+ "qcom,clk-cpu-osm-sdm630");
+ if (is_sdm630) {
+ pwrcl_boot_rate = 1382400000;
+ perfcl_boot_rate = 1670400000;
+ }
+
/* Set final boot rate */
rc = clk_set_rate(pwrcl_clk.hw.clk, pwrcl_boot_rate);
if (rc) {
@@ -3348,6 +3356,7 @@ exit:
static const struct of_device_id match_table[] = {
{ .compatible = "qcom,clk-cpu-osm" },
+ { .compatible = "qcom,clk-cpu-osm-sdm630" },
{}
};
diff --git a/drivers/crypto/msm/ice.c b/drivers/crypto/msm/ice.c
index 027a0d0bd0b8..2788c0add14a 100644
--- a/drivers/crypto/msm/ice.c
+++ b/drivers/crypto/msm/ice.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1608,7 +1608,18 @@ static int enable_ice_setup(struct ice_device *ice_dev)
out_clocks:
qcom_ice_enable_clocks(ice_dev, false);
out_reg:
- regulator_disable(ice_dev->reg);
+ if (ice_dev->is_regulator_available) {
+ if (qcom_ice_get_vreg(ice_dev)) {
+ pr_err("%s: Could not get regulator\n", __func__);
+ goto out;
+ }
+ ret = regulator_disable(ice_dev->reg);
+ if (ret) {
+ pr_err("%s:%pK: Could not disable regulator\n",
+ __func__, ice_dev);
+ goto out;
+ }
+ }
out:
return ret;
}
diff --git a/drivers/crypto/msm/ota_crypto.c b/drivers/crypto/msm/ota_crypto.c
index 9b4a001bec95..674913cb20bf 100644
--- a/drivers/crypto/msm/ota_crypto.c
+++ b/drivers/crypto/msm/ota_crypto.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2014,2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -880,8 +880,8 @@ static ssize_t _debug_stats_read(struct file *file, char __user *buf,
int len;
len = _disp_stats();
-
- rc = simple_read_from_buffer((void __user *) buf, len,
+ if (len <= count)
+ rc = simple_read_from_buffer((void __user *) buf, len,
ppos, (void *) _debug_read_buf, len);
return rc;
diff --git a/drivers/crypto/msm/qce50.c b/drivers/crypto/msm/qce50.c
index 8d17ea89e266..ee7e735761e2 100644
--- a/drivers/crypto/msm/qce50.c
+++ b/drivers/crypto/msm/qce50.c
@@ -4967,6 +4967,11 @@ int qce_aead_req(void *handle, struct qce_req *q_req)
else
q_req->cryptlen = areq->cryptlen - authsize;
+ if (q_req->cryptlen > UINT_MAX - areq->assoclen) {
+ pr_err("Integer overflow on total aead req length.\n");
+ return -EINVAL;
+ }
+
totallen = q_req->cryptlen + areq->assoclen;
if (pce_dev->support_cmd_dscr) {
diff --git a/drivers/crypto/msm/qcedev.c b/drivers/crypto/msm/qcedev.c
index a629c621648c..5ce87a6edcc3 100644
--- a/drivers/crypto/msm/qcedev.c
+++ b/drivers/crypto/msm/qcedev.c
@@ -1987,9 +1987,9 @@ static ssize_t _debug_stats_read(struct file *file, char __user *buf,
len = _disp_stats(qcedev);
- rc = simple_read_from_buffer((void __user *) buf, len,
+ if (len <= count)
+ rc = simple_read_from_buffer((void __user *) buf, len,
ppos, (void *) _debug_read_buf, len);
-
return rc;
}
diff --git a/drivers/crypto/msm/qcrypto.c b/drivers/crypto/msm/qcrypto.c
index a898dbcbd0ca..893b0b6da6b8 100644
--- a/drivers/crypto/msm/qcrypto.c
+++ b/drivers/crypto/msm/qcrypto.c
@@ -1,6 +1,6 @@
/* Qualcomm Crypto driver
*
- * Copyright (c) 2010-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -5419,9 +5419,9 @@ static ssize_t _debug_stats_read(struct file *file, char __user *buf,
len = _disp_stats(qcrypto);
- rc = simple_read_from_buffer((void __user *) buf, len,
+ if (len <= count)
+ rc = simple_read_from_buffer((void __user *) buf, len,
ppos, (void *) _debug_read_buf, len);
-
return rc;
}
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index c4bf9a1cf4a6..f4554b39d5d9 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -8,6 +8,7 @@ menuconfig DRM
tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)"
depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && MMU && HAS_DMA
select HDMI
+ select FB
select FB_CMDLINE
select I2C
select I2C_ALGOBIT
@@ -52,7 +53,7 @@ config DRM_FBDEV_EMULATION
depends on DRM
select DRM_KMS_HELPER
select DRM_KMS_FB_HELPER
- default y
+ default n
help
Choose this option if you have a need for the legacy fbdev
support. Note that this support also provides the linux console
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index 2d5ca8eec13a..e944b0c456ed 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -335,7 +335,7 @@ int mipi_dsi_create_packet(struct mipi_dsi_packet *packet,
return -EINVAL;
memset(packet, 0, sizeof(*packet));
- packet->header[0] = ((msg->channel & 0x3) << 6) | (msg->type & 0x3f);
+ packet->header[2] = ((msg->channel & 0x3) << 6) | (msg->type & 0x3f);
/* TODO: compute ECC if hardware support is not available */
@@ -347,16 +347,16 @@ int mipi_dsi_create_packet(struct mipi_dsi_packet *packet,
* and 2.
*/
if (mipi_dsi_packet_format_is_long(msg->type)) {
- packet->header[1] = (msg->tx_len >> 0) & 0xff;
- packet->header[2] = (msg->tx_len >> 8) & 0xff;
+ packet->header[0] = (msg->tx_len >> 0) & 0xff;
+ packet->header[1] = (msg->tx_len >> 8) & 0xff;
packet->payload_length = msg->tx_len;
packet->payload = msg->tx_buf;
} else {
const u8 *tx = msg->tx_buf;
- packet->header[1] = (msg->tx_len > 0) ? tx[0] : 0;
- packet->header[2] = (msg->tx_len > 1) ? tx[1] : 0;
+ packet->header[0] = (msg->tx_len > 0) ? tx[0] : 0;
+ packet->header[1] = (msg->tx_len > 1) ? tx[1] : 0;
}
packet->size = sizeof(packet->header) + packet->payload_length;
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 84d3ec98e6b9..afd94a1e85d3 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -3,7 +3,7 @@ config DRM_MSM
tristate "MSM DRM"
depends on DRM
depends on ARCH_QCOM || (ARM && COMPILE_TEST)
- depends on OF && COMMON_CLK
+ depends on OF
select REGULATOR
select DRM_KMS_HELPER
select DRM_PANEL
@@ -33,6 +33,18 @@ config DRM_MSM_DSI
Choose this option if you have a need for MIPI DSI connector
support.
+config DRM_MSM_DSI_STAGING
+ bool "Enable new DSI driver support in MSM DRM driver"
+ depends on DRM_MSM
+ select DRM_PANEL
+ select DRM_MIPI_DSI
+ default y
+ help
+ Choose this option if you need MIPI DSI connector support on MSM
+ which conforms to DRM. MIPI stands for Mobile Industry Processor
+ Interface and DSI stands for Display Serial Interface which powers
+ the primary display of your mobile device.
+
config DRM_MSM_DSI_PLL
bool "Enable DSI PLL driver in MSM DRM"
depends on DRM_MSM_DSI && COMMON_CLK
@@ -54,3 +66,25 @@ config DRM_MSM_DSI_20NM_PHY
default y
help
Choose this option if the 20nm DSI PHY is used on the platform.
+
+config DRM_MSM_MDP4
+ tristate "MSM MDP4 DRM driver"
+ depends on DRM_MSM
+ default n
+ help
+ Choose this option if MSM MDP4 revision support is needed in DRM/KMS.
+
+config DRM_MSM_HDCP
+ tristate "HDCP for MSM DRM"
+ depends on DRM_MSM
+ default n
+ help
+ Chose this option if HDCP supported is needed in DRM/KMS driver.
+
+config DRM_SDE_WB
+ bool "Enable Writeback support in SDE DRM"
+ depends on DRM_MSM
+ default y
+ help
+ Choose this option for writeback connector support.
+
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 1c90290be716..4ca16fc01e1c 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -1,11 +1,10 @@
-ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/msm
+ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/msm -Idrivers/gpu/drm/msm/dsi-staging
ccflags-$(CONFIG_DRM_MSM_DSI) += -Idrivers/gpu/drm/msm/dsi
+ccflags-$(CONFIG_SYNC) += -Idrivers/staging/android
+ccflags-$(CONFIG_DRM_MSM_DSI_PLL) += -Idrivers/gpu/drm/msm/dsi
+ccflags-y += -Idrivers/gpu/drm/msm/sde
-msm-y := \
- adreno/adreno_device.o \
- adreno/adreno_gpu.o \
- adreno/a3xx_gpu.o \
- adreno/a4xx_gpu.o \
+msm_drm-y := \
hdmi/hdmi.o \
hdmi/hdmi_audio.o \
hdmi/hdmi_bridge.o \
@@ -23,13 +22,6 @@ msm-y := \
edp/edp_phy.o \
mdp/mdp_format.o \
mdp/mdp_kms.o \
- mdp/mdp4/mdp4_crtc.o \
- mdp/mdp4/mdp4_dtv_encoder.o \
- mdp/mdp4/mdp4_lcdc_encoder.o \
- mdp/mdp4/mdp4_lvds_connector.o \
- mdp/mdp4/mdp4_irq.o \
- mdp/mdp4/mdp4_kms.o \
- mdp/mdp4/mdp4_plane.o \
mdp/mdp5/mdp5_cfg.o \
mdp/mdp5/mdp5_ctl.o \
mdp/mdp5/mdp5_crtc.o \
@@ -38,6 +30,91 @@ msm-y := \
mdp/mdp5/mdp5_kms.o \
mdp/mdp5/mdp5_plane.o \
mdp/mdp5/mdp5_smp.o \
+ sde/sde_crtc.o \
+ sde/sde_encoder.o \
+ sde/sde_encoder_phys_vid.o \
+ sde/sde_encoder_phys_cmd.o \
+ sde/sde_irq.o \
+ sde/sde_core_irq.o \
+ sde/sde_core_perf.o \
+ sde/sde_rm.o \
+ sde/sde_kms_utils.o \
+ sde/sde_kms.o \
+ sde/sde_plane.o \
+ sde/sde_connector.o \
+ sde/sde_backlight.o \
+ sde/sde_color_processing.o \
+ sde/sde_vbif.o \
+ sde_dbg_evtlog.o
+
+# use drm gpu driver only if qcom_kgsl driver not available
+ifneq ($(CONFIG_QCOM_KGSL),y)
+msm_drm-y += adreno/adreno_device.o \
+ adreno/adreno_gpu.o \
+ adreno/a3xx_gpu.o \
+ adreno/a4xx_gpu.o
+endif
+
+msm_drm-$(CONFIG_DRM_MSM_MDP4) += mdp/mdp4/mdp4_crtc.o \
+ mdp/mdp4/mdp4_dtv_encoder.o \
+ mdp/mdp4/mdp4_lcdc_encoder.o \
+ mdp/mdp4/mdp4_lvds_connector.o \
+ mdp/mdp4/mdp4_irq.o \
+ mdp/mdp4/mdp4_kms.o \
+ mdp/mdp4/mdp4_plane.o
+
+msm_drm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o
+msm_drm-$(CONFIG_SYNC) += sde/sde_fence.o
+msm_drm-$(CONFIG_COMMON_CLK) += mdp/mdp4/mdp4_lvds_pll.o
+
+msm_drm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \
+ dsi/dsi_cfg.o \
+ dsi/dsi_host.o \
+ dsi/dsi_manager.o \
+ dsi/phy/dsi_phy.o \
+ dsi/dsi_manager.o \
+ mdp/mdp5/mdp5_cmd_encoder.o
+
+msm_drm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/phy/dsi_phy_28nm.o
+msm_drm-$(CONFIG_DRM_MSM_DSI_20NM_PHY) += dsi/phy/dsi_phy_20nm.o
+
+msm_drm-$(CONFIG_DRM_MSM_DSI_STAGING) += dsi-staging/dsi_phy.o \
+ dsi-staging/dsi_clk_pwr.o \
+ dsi-staging/dsi_phy.o \
+ dsi-staging/dsi_phy_hw_v4_0.o \
+ dsi-staging/dsi_ctrl_hw_1_4.o \
+ dsi-staging/dsi_ctrl.o \
+ dsi-staging/dsi_catalog.o \
+ dsi-staging/dsi_drm.o \
+ dsi-staging/dsi_display.o \
+ dsi-staging/dsi_panel.o \
+ dsi-staging/dsi_display_test.o
+
+msm_drm-$(CONFIG_DRM_MSM_DSI_PLL) += dsi/pll/dsi_pll.o \
+ dsi/pll/dsi_pll_28nm.o
+
+msm_drm-$(CONFIG_DRM_MSM) += \
+ sde/sde_hw_catalog.o \
+ sde/sde_hw_cdm.o \
+ sde/sde_hw_dspp.o \
+ sde/sde_hw_intf.o \
+ sde/sde_hw_lm.o \
+ sde/sde_hw_ctl.o \
+ sde/sde_hw_util.o \
+ sde/sde_hw_sspp.o \
+ sde/sde_hw_wb.o \
+ sde/sde_hw_pingpong.o \
+ sde/sde_hw_top.o \
+ sde/sde_hw_interrupts.o \
+ sde/sde_hw_vbif.o \
+ sde/sde_formats.o \
+ sde_power_handle.o \
+ sde/sde_hw_color_processing_v1_7.o
+
+msm_drm-$(CONFIG_DRM_SDE_WB) += sde/sde_wb.o \
+ sde/sde_encoder_phys_wb.o
+
+msm_drm-$(CONFIG_DRM_MSM) += \
msm_atomic.o \
msm_drv.o \
msm_fb.o \
@@ -46,26 +123,10 @@ msm-y := \
msm_gem_submit.o \
msm_gpu.o \
msm_iommu.o \
+ msm_smmu.o \
msm_perf.o \
msm_rd.o \
- msm_ringbuffer.o
-
-msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o
-msm-$(CONFIG_COMMON_CLK) += mdp/mdp4/mdp4_lvds_pll.o
-
-msm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \
- dsi/dsi_cfg.o \
- dsi/dsi_host.o \
- dsi/dsi_manager.o \
- dsi/phy/dsi_phy.o \
- mdp/mdp5/mdp5_cmd_encoder.o
-
-msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/phy/dsi_phy_28nm.o
-msm-$(CONFIG_DRM_MSM_DSI_20NM_PHY) += dsi/phy/dsi_phy_20nm.o
-
-ifeq ($(CONFIG_DRM_MSM_DSI_PLL),y)
-msm-y += dsi/pll/dsi_pll.o
-msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/pll/dsi_pll_28nm.o
-endif
+ msm_ringbuffer.o \
+ msm_prop.o
-obj-$(CONFIG_DRM_MSM) += msm.o
+obj-$(CONFIG_DRM_MSM) += msm_drm.o
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c
new file mode 100644
index 000000000000..06027a963be1
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c
@@ -0,0 +1,169 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "msm-dsi-catalog:[%s] " fmt, __func__
+#include <linux/errno.h>
+
+#include "dsi_catalog.h"
+
+/**
+ * dsi_catalog_14_init() - catalog init for dsi controller v1.4
+ */
+static void dsi_catalog_14_init(struct dsi_ctrl_hw *ctrl)
+{
+ ctrl->ops.host_setup = dsi_ctrl_hw_14_host_setup;
+ ctrl->ops.setup_lane_map = dsi_ctrl_hw_14_setup_lane_map;
+ ctrl->ops.video_engine_en = dsi_ctrl_hw_14_video_engine_en;
+ ctrl->ops.video_engine_setup = dsi_ctrl_hw_14_video_engine_setup;
+ ctrl->ops.set_video_timing = dsi_ctrl_hw_14_set_video_timing;
+ ctrl->ops.cmd_engine_setup = dsi_ctrl_hw_14_cmd_engine_setup;
+ ctrl->ops.setup_cmd_stream = dsi_ctrl_hw_14_setup_cmd_stream;
+ ctrl->ops.ctrl_en = dsi_ctrl_hw_14_ctrl_en;
+ ctrl->ops.cmd_engine_en = dsi_ctrl_hw_14_cmd_engine_en;
+ ctrl->ops.phy_sw_reset = dsi_ctrl_hw_14_phy_sw_reset;
+ ctrl->ops.soft_reset = dsi_ctrl_hw_14_soft_reset;
+ ctrl->ops.kickoff_command = dsi_ctrl_hw_14_kickoff_command;
+ ctrl->ops.kickoff_fifo_command = dsi_ctrl_hw_14_kickoff_fifo_command;
+ ctrl->ops.reset_cmd_fifo = dsi_ctrl_hw_14_reset_cmd_fifo;
+ ctrl->ops.trigger_command_dma = dsi_ctrl_hw_14_trigger_command_dma;
+ ctrl->ops.ulps_request = dsi_ctrl_hw_14_ulps_request;
+ ctrl->ops.ulps_exit = dsi_ctrl_hw_14_ulps_exit;
+ ctrl->ops.clear_ulps_request = dsi_ctrl_hw_14_clear_ulps_request;
+ ctrl->ops.get_lanes_in_ulps = dsi_ctrl_hw_14_get_lanes_in_ulps;
+ ctrl->ops.clamp_enable = dsi_ctrl_hw_14_clamp_enable;
+ ctrl->ops.clamp_disable = dsi_ctrl_hw_14_clamp_disable;
+ ctrl->ops.get_interrupt_status = dsi_ctrl_hw_14_get_interrupt_status;
+ ctrl->ops.get_error_status = dsi_ctrl_hw_14_get_error_status;
+ ctrl->ops.clear_error_status = dsi_ctrl_hw_14_clear_error_status;
+ ctrl->ops.clear_interrupt_status =
+ dsi_ctrl_hw_14_clear_interrupt_status;
+ ctrl->ops.enable_status_interrupts =
+ dsi_ctrl_hw_14_enable_status_interrupts;
+ ctrl->ops.enable_error_interrupts =
+ dsi_ctrl_hw_14_enable_error_interrupts;
+ ctrl->ops.video_test_pattern_setup =
+ dsi_ctrl_hw_14_video_test_pattern_setup;
+ ctrl->ops.cmd_test_pattern_setup =
+ dsi_ctrl_hw_14_cmd_test_pattern_setup;
+ ctrl->ops.test_pattern_enable = dsi_ctrl_hw_14_test_pattern_enable;
+ ctrl->ops.trigger_cmd_test_pattern =
+ dsi_ctrl_hw_14_trigger_cmd_test_pattern;
+ ctrl->ops.reg_dump_to_buffer = dsi_ctrl_hw_14_reg_dump_to_buffer;
+}
+
+/**
+ * dsi_catalog_20_init() - catalog init for dsi controller v2.0
+ */
+static void dsi_catalog_20_init(struct dsi_ctrl_hw *ctrl)
+{
+ set_bit(DSI_CTRL_CPHY, ctrl->feature_map);
+}
+
+/**
+ * dsi_catalog_ctrl_setup() - return catalog info for dsi controller
+ * @ctrl: Pointer to DSI controller hw object.
+ * @version: DSI controller version.
+ * @index: DSI controller instance ID.
+ *
+ * This function setups the catalog information in the dsi_ctrl_hw object.
+ *
+ * return: error code for failure and 0 for success.
+ */
+int dsi_catalog_ctrl_setup(struct dsi_ctrl_hw *ctrl,
+ enum dsi_ctrl_version version,
+ u32 index)
+{
+ int rc = 0;
+
+ if (version == DSI_CTRL_VERSION_UNKNOWN ||
+ version >= DSI_CTRL_VERSION_MAX) {
+ pr_err("Unsupported version: %d\n", version);
+ return -ENOTSUPP;
+ }
+
+ ctrl->index = index;
+ set_bit(DSI_CTRL_VIDEO_TPG, ctrl->feature_map);
+ set_bit(DSI_CTRL_CMD_TPG, ctrl->feature_map);
+ set_bit(DSI_CTRL_VARIABLE_REFRESH_RATE, ctrl->feature_map);
+ set_bit(DSI_CTRL_DYNAMIC_REFRESH, ctrl->feature_map);
+ set_bit(DSI_CTRL_DESKEW_CALIB, ctrl->feature_map);
+ set_bit(DSI_CTRL_DPHY, ctrl->feature_map);
+
+ switch (version) {
+ case DSI_CTRL_VERSION_1_4:
+ dsi_catalog_14_init(ctrl);
+ break;
+ case DSI_CTRL_VERSION_2_0:
+ dsi_catalog_20_init(ctrl);
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return rc;
+}
+
+/**
+ * dsi_catalog_phy_4_0_init() - catalog init for DSI PHY v4.0
+ */
+static void dsi_catalog_phy_4_0_init(struct dsi_phy_hw *phy)
+{
+ phy->ops.regulator_enable = dsi_phy_hw_v4_0_regulator_enable;
+ phy->ops.regulator_disable = dsi_phy_hw_v4_0_regulator_disable;
+ phy->ops.enable = dsi_phy_hw_v4_0_enable;
+ phy->ops.disable = dsi_phy_hw_v4_0_disable;
+ phy->ops.calculate_timing_params =
+ dsi_phy_hw_v4_0_calculate_timing_params;
+}
+
+/**
+ * dsi_catalog_phy_setup() - return catalog info for dsi phy hardware
+ * @ctrl: Pointer to DSI PHY hw object.
+ * @version: DSI PHY version.
+ * @index: DSI PHY instance ID.
+ *
+ * This function setups the catalog information in the dsi_phy_hw object.
+ *
+ * return: error code for failure and 0 for success.
+ */
+int dsi_catalog_phy_setup(struct dsi_phy_hw *phy,
+ enum dsi_phy_version version,
+ u32 index)
+{
+ int rc = 0;
+
+ if (version == DSI_PHY_VERSION_UNKNOWN ||
+ version >= DSI_PHY_VERSION_MAX) {
+ pr_err("Unsupported version: %d\n", version);
+ return -ENOTSUPP;
+ }
+
+ phy->index = index;
+ set_bit(DSI_PHY_DPHY, phy->feature_map);
+
+ switch (version) {
+ case DSI_PHY_VERSION_4_0:
+ dsi_catalog_phy_4_0_init(phy);
+ break;
+ case DSI_PHY_VERSION_1_0:
+ case DSI_PHY_VERSION_2_0:
+ case DSI_PHY_VERSION_3_0:
+ default:
+ return -ENOTSUPP;
+ }
+
+ return rc;
+}
+
+
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h
new file mode 100644
index 000000000000..98bd9b039f09
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DSI_CATALOG_H_
+#define _DSI_CATALOG_H_
+
+#include "dsi_ctrl_hw.h"
+#include "dsi_phy_hw.h"
+
+/**
+ * dsi_catalog_ctrl_setup() - return catalog info for dsi controller
+ * @ctrl: Pointer to DSI controller hw object.
+ * @version: DSI controller version.
+ * @index: DSI controller instance ID.
+ *
+ * This function setups the catalog information in the dsi_ctrl_hw object.
+ *
+ * return: error code for failure and 0 for success.
+ */
+int dsi_catalog_ctrl_setup(struct dsi_ctrl_hw *ctrl,
+ enum dsi_ctrl_version version,
+ u32 index);
+
+/**
+ * dsi_catalog_phy_setup() - return catalog info for dsi phy hardware
+ * @ctrl: Pointer to DSI PHY hw object.
+ * @version: DSI PHY version.
+ * @index: DSI PHY instance ID.
+ *
+ * This function setups the catalog information in the dsi_phy_hw object.
+ *
+ * return: error code for failure and 0 for success.
+ */
+int dsi_catalog_phy_setup(struct dsi_phy_hw *phy,
+ enum dsi_phy_version version,
+ u32 index);
+
+/* Definitions for 4.0 PHY hardware driver */
+void dsi_phy_hw_v4_0_regulator_enable(struct dsi_phy_hw *phy,
+ struct dsi_phy_per_lane_cfgs *cfg);
+void dsi_phy_hw_v4_0_regulator_disable(struct dsi_phy_hw *phy);
+void dsi_phy_hw_v4_0_enable(struct dsi_phy_hw *phy, struct dsi_phy_cfg *cfg);
+void dsi_phy_hw_v4_0_disable(struct dsi_phy_hw *phy);
+int dsi_phy_hw_v4_0_calculate_timing_params(struct dsi_phy_hw *phy,
+ struct dsi_mode_info *mode,
+ struct dsi_host_common_cfg *cfg,
+ struct dsi_phy_per_lane_cfgs
+ *timing);
+
+/* Definitions for 1.4 controller hardware driver */
+void dsi_ctrl_hw_14_host_setup(struct dsi_ctrl_hw *ctrl,
+ struct dsi_host_common_cfg *config);
+void dsi_ctrl_hw_14_video_engine_en(struct dsi_ctrl_hw *ctrl, bool on);
+void dsi_ctrl_hw_14_video_engine_setup(struct dsi_ctrl_hw *ctrl,
+ struct dsi_host_common_cfg *common_cfg,
+ struct dsi_video_engine_cfg *cfg);
+void dsi_ctrl_hw_14_set_video_timing(struct dsi_ctrl_hw *ctrl,
+ struct dsi_mode_info *mode);
+
+void dsi_ctrl_hw_14_cmd_engine_setup(struct dsi_ctrl_hw *ctrl,
+ struct dsi_host_common_cfg *common_cfg,
+ struct dsi_cmd_engine_cfg *cfg);
+
+void dsi_ctrl_hw_14_ctrl_en(struct dsi_ctrl_hw *ctrl, bool on);
+void dsi_ctrl_hw_14_cmd_engine_en(struct dsi_ctrl_hw *ctrl, bool on);
+
+void dsi_ctrl_hw_14_setup_cmd_stream(struct dsi_ctrl_hw *ctrl,
+ u32 width_in_pixels,
+ u32 h_stride,
+ u32 height_in_lines,
+ u32 vc_id);
+void dsi_ctrl_hw_14_phy_sw_reset(struct dsi_ctrl_hw *ctrl);
+void dsi_ctrl_hw_14_soft_reset(struct dsi_ctrl_hw *ctrl);
+
+void dsi_ctrl_hw_14_setup_lane_map(struct dsi_ctrl_hw *ctrl,
+ struct dsi_lane_mapping *lane_map);
+void dsi_ctrl_hw_14_kickoff_command(struct dsi_ctrl_hw *ctrl,
+ struct dsi_ctrl_cmd_dma_info *cmd,
+ u32 flags);
+
+void dsi_ctrl_hw_14_kickoff_fifo_command(struct dsi_ctrl_hw *ctrl,
+ struct dsi_ctrl_cmd_dma_fifo_info *cmd,
+ u32 flags);
+void dsi_ctrl_hw_14_reset_cmd_fifo(struct dsi_ctrl_hw *ctrl);
+void dsi_ctrl_hw_14_trigger_command_dma(struct dsi_ctrl_hw *ctrl);
+
+void dsi_ctrl_hw_14_ulps_request(struct dsi_ctrl_hw *ctrl, u32 lanes);
+void dsi_ctrl_hw_14_ulps_exit(struct dsi_ctrl_hw *ctrl, u32 lanes);
+void dsi_ctrl_hw_14_clear_ulps_request(struct dsi_ctrl_hw *ctrl, u32 lanes);
+u32 dsi_ctrl_hw_14_get_lanes_in_ulps(struct dsi_ctrl_hw *ctrl);
+
+void dsi_ctrl_hw_14_clamp_enable(struct dsi_ctrl_hw *ctrl,
+ u32 lanes,
+ bool enable_ulps);
+
+void dsi_ctrl_hw_14_clamp_disable(struct dsi_ctrl_hw *ctrl,
+ u32 lanes,
+ bool disable_ulps);
+u32 dsi_ctrl_hw_14_get_interrupt_status(struct dsi_ctrl_hw *ctrl);
+void dsi_ctrl_hw_14_clear_interrupt_status(struct dsi_ctrl_hw *ctrl, u32 ints);
+void dsi_ctrl_hw_14_enable_status_interrupts(struct dsi_ctrl_hw *ctrl,
+ u32 ints);
+
+u64 dsi_ctrl_hw_14_get_error_status(struct dsi_ctrl_hw *ctrl);
+void dsi_ctrl_hw_14_clear_error_status(struct dsi_ctrl_hw *ctrl, u64 errors);
+void dsi_ctrl_hw_14_enable_error_interrupts(struct dsi_ctrl_hw *ctrl,
+ u64 errors);
+
+void dsi_ctrl_hw_14_video_test_pattern_setup(struct dsi_ctrl_hw *ctrl,
+ enum dsi_test_pattern type,
+ u32 init_val);
+void dsi_ctrl_hw_14_cmd_test_pattern_setup(struct dsi_ctrl_hw *ctrl,
+ enum dsi_test_pattern type,
+ u32 init_val,
+ u32 stream_id);
+void dsi_ctrl_hw_14_test_pattern_enable(struct dsi_ctrl_hw *ctrl, bool enable);
+void dsi_ctrl_hw_14_trigger_cmd_test_pattern(struct dsi_ctrl_hw *ctrl,
+ u32 stream_id);
+ssize_t dsi_ctrl_hw_14_reg_dump_to_buffer(struct dsi_ctrl_hw *ctrl,
+ char *buf,
+ u32 size);
+#endif /* _DSI_CATALOG_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_clk_pwr.c b/drivers/gpu/drm/msm/dsi-staging/dsi_clk_pwr.c
new file mode 100644
index 000000000000..7def847f6f2a
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_clk_pwr.c
@@ -0,0 +1,727 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/of.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+
+#include "dsi_clk_pwr.h"
+
+#define INC_REFCOUNT(s, start_func) \
+ ({ \
+ int rc = 0; \
+ if ((s)->refcount == 0) { \
+ rc = start_func(s); \
+ if (rc) \
+ pr_err("failed to enable, rc = %d\n", rc); \
+ } \
+ (s)->refcount++; \
+ rc; \
+ })
+
+#define DEC_REFCOUNT(s, stop_func) \
+ ({ \
+ int rc = 0; \
+ if ((s)->refcount == 0) { \
+ pr_err("unbalanced refcount\n"); \
+ } else { \
+ (s)->refcount--; \
+ if ((s)->refcount == 0) { \
+ rc = stop_func(s); \
+ if (rc) \
+ pr_err("disable failed, rc=%d\n", rc); \
+ } \
+ } \
+ rc; \
+ })
+
+static int dsi_core_clk_start(struct dsi_core_clk_info *clks)
+{
+ int rc = 0;
+
+ rc = clk_prepare_enable(clks->mdp_core_clk);
+ if (rc) {
+ pr_err("failed to enable mdp_core_clk, rc=%d\n", rc);
+ goto error;
+ }
+
+ rc = clk_prepare_enable(clks->iface_clk);
+ if (rc) {
+ pr_err("failed to enable iface_clk, rc=%d\n", rc);
+ goto error_disable_core_clk;
+ }
+
+ rc = clk_prepare_enable(clks->bus_clk);
+ if (rc) {
+ pr_err("failed to enable bus_clk, rc=%d\n", rc);
+ goto error_disable_iface_clk;
+ }
+
+ rc = clk_prepare_enable(clks->core_mmss_clk);
+ if (rc) {
+ pr_err("failed to enable core_mmss_clk, rc=%d\n", rc);
+ goto error_disable_bus_clk;
+ }
+
+ return rc;
+
+error_disable_bus_clk:
+ clk_disable_unprepare(clks->bus_clk);
+error_disable_iface_clk:
+ clk_disable_unprepare(clks->iface_clk);
+error_disable_core_clk:
+ clk_disable_unprepare(clks->mdp_core_clk);
+error:
+ return rc;
+}
+
+static int dsi_core_clk_stop(struct dsi_core_clk_info *clks)
+{
+ clk_disable_unprepare(clks->core_mmss_clk);
+ clk_disable_unprepare(clks->bus_clk);
+ clk_disable_unprepare(clks->iface_clk);
+ clk_disable_unprepare(clks->mdp_core_clk);
+
+ return 0;
+}
+
+static int dsi_link_clk_set_rate(struct dsi_link_clk_info *l_clks)
+{
+ int rc = 0;
+
+ rc = clk_set_rate(l_clks->esc_clk, l_clks->esc_clk_rate);
+ if (rc) {
+ pr_err("clk_set_rate failed for esc_clk rc = %d\n", rc);
+ goto error;
+ }
+
+ rc = clk_set_rate(l_clks->byte_clk, l_clks->byte_clk_rate);
+ if (rc) {
+ pr_err("clk_set_rate failed for byte_clk rc = %d\n", rc);
+ goto error;
+ }
+
+ rc = clk_set_rate(l_clks->pixel_clk, l_clks->pixel_clk_rate);
+ if (rc) {
+ pr_err("clk_set_rate failed for pixel_clk rc = %d\n", rc);
+ goto error;
+ }
+error:
+ return rc;
+}
+
+static int dsi_link_clk_prepare(struct dsi_link_clk_info *l_clks)
+{
+ int rc = 0;
+
+ rc = clk_prepare(l_clks->esc_clk);
+ if (rc) {
+ pr_err("Failed to prepare dsi esc clk, rc=%d\n", rc);
+ goto esc_clk_err;
+ }
+
+ rc = clk_prepare(l_clks->byte_clk);
+ if (rc) {
+ pr_err("Failed to prepare dsi byte clk, rc=%d\n", rc);
+ goto byte_clk_err;
+ }
+
+ rc = clk_prepare(l_clks->pixel_clk);
+ if (rc) {
+ pr_err("Failed to prepare dsi pixel clk, rc=%d\n", rc);
+ goto pixel_clk_err;
+ }
+
+ return rc;
+
+pixel_clk_err:
+ clk_unprepare(l_clks->byte_clk);
+byte_clk_err:
+ clk_unprepare(l_clks->esc_clk);
+esc_clk_err:
+ return rc;
+}
+
+static void dsi_link_clk_unprepare(struct dsi_link_clk_info *l_clks)
+{
+ clk_unprepare(l_clks->pixel_clk);
+ clk_unprepare(l_clks->byte_clk);
+ clk_unprepare(l_clks->esc_clk);
+}
+
+static int dsi_link_clk_enable(struct dsi_link_clk_info *l_clks)
+{
+ int rc = 0;
+
+ rc = clk_enable(l_clks->esc_clk);
+ if (rc) {
+ pr_err("Failed to enable dsi esc clk, rc=%d\n", rc);
+ goto esc_clk_err;
+ }
+
+ rc = clk_enable(l_clks->byte_clk);
+ if (rc) {
+ pr_err("Failed to enable dsi byte clk, rc=%d\n", rc);
+ goto byte_clk_err;
+ }
+
+ rc = clk_enable(l_clks->pixel_clk);
+ if (rc) {
+ pr_err("Failed to enable dsi pixel clk, rc=%d\n", rc);
+ goto pixel_clk_err;
+ }
+
+ return rc;
+
+pixel_clk_err:
+ clk_disable(l_clks->byte_clk);
+byte_clk_err:
+ clk_disable(l_clks->esc_clk);
+esc_clk_err:
+ return rc;
+}
+
+static void dsi_link_clk_disable(struct dsi_link_clk_info *l_clks)
+{
+ clk_disable(l_clks->esc_clk);
+ clk_disable(l_clks->pixel_clk);
+ clk_disable(l_clks->byte_clk);
+}
+
+/**
+ * dsi_link_clk_start() - enable dsi link clocks
+ */
+static int dsi_link_clk_start(struct dsi_link_clk_info *clks)
+{
+ int rc = 0;
+
+ if (clks->set_new_rate) {
+ rc = dsi_link_clk_set_rate(clks);
+ if (rc) {
+ pr_err("failed to set clk rates, rc = %d\n", rc);
+ goto error;
+ } else {
+ clks->set_new_rate = false;
+ }
+ }
+
+ rc = dsi_link_clk_prepare(clks);
+ if (rc) {
+ pr_err("failed to prepare link clks, rc = %d\n", rc);
+ goto error;
+ }
+
+ rc = dsi_link_clk_enable(clks);
+ if (rc) {
+ pr_err("failed to enable link clks, rc = %d\n", rc);
+ goto error_unprepare;
+ }
+
+ pr_debug("Link clocks are enabled\n");
+ return rc;
+error_unprepare:
+ dsi_link_clk_unprepare(clks);
+error:
+ return rc;
+}
+
+/**
+ * dsi_link_clk_stop() - Stop DSI link clocks.
+ */
+static int dsi_link_clk_stop(struct dsi_link_clk_info *clks)
+{
+ dsi_link_clk_disable(clks);
+ dsi_link_clk_unprepare(clks);
+
+ pr_debug("Link clocks disabled\n");
+
+ return 0;
+}
+
+/*
+ * dsi_pwr_parse_supply_node() - parse power supply node from root device node
+ */
+static int dsi_pwr_parse_supply_node(struct device_node *root,
+ struct dsi_regulator_info *regs)
+{
+ int rc = 0;
+ int i = 0;
+ u32 tmp = 0;
+ struct device_node *node = NULL;
+
+ for_each_child_of_node(root, node) {
+ const char *st = NULL;
+
+ rc = of_property_read_string(node, "qcom,supply-name", &st);
+ if (rc) {
+ pr_err("failed to read name, rc = %d\n", rc);
+ goto error;
+ }
+
+ snprintf(regs->vregs[i].vreg_name,
+ ARRAY_SIZE(regs->vregs[i].vreg_name),
+ "%s", st);
+
+ rc = of_property_read_u32(node, "qcom,supply-min-voltage",
+ &tmp);
+ if (rc) {
+ pr_err("failed to read min voltage, rc = %d\n", rc);
+ goto error;
+ }
+ regs->vregs[i].min_voltage = tmp;
+
+ rc = of_property_read_u32(node, "qcom,supply-max-voltage",
+ &tmp);
+ if (rc) {
+ pr_err("failed to read max voltage, rc = %d\n", rc);
+ goto error;
+ }
+ regs->vregs[i].max_voltage = tmp;
+
+ rc = of_property_read_u32(node, "qcom,supply-enable-load",
+ &tmp);
+ if (rc) {
+ pr_err("failed to read enable load, rc = %d\n", rc);
+ goto error;
+ }
+ regs->vregs[i].enable_load = tmp;
+
+ rc = of_property_read_u32(node, "qcom,supply-disable-load",
+ &tmp);
+ if (rc) {
+ pr_err("failed to read disable load, rc = %d\n", rc);
+ goto error;
+ }
+ regs->vregs[i].disable_load = tmp;
+
+ /* Optional values */
+ rc = of_property_read_u32(node, "qcom,supply-pre-on-sleep",
+ &tmp);
+ if (rc) {
+ pr_debug("pre-on-sleep not specified\n");
+ rc = 0;
+ } else {
+ regs->vregs[i].pre_on_sleep = tmp;
+ }
+
+ rc = of_property_read_u32(node, "qcom,supply-pre-off-sleep",
+ &tmp);
+ if (rc) {
+ pr_debug("pre-off-sleep not specified\n");
+ rc = 0;
+ } else {
+ regs->vregs[i].pre_off_sleep = tmp;
+ }
+
+ rc = of_property_read_u32(node, "qcom,supply-post-on-sleep",
+ &tmp);
+ if (rc) {
+ pr_debug("post-on-sleep not specified\n");
+ rc = 0;
+ } else {
+ regs->vregs[i].post_on_sleep = tmp;
+ }
+
+ rc = of_property_read_u32(node, "qcom,supply-post-off-sleep",
+ &tmp);
+ if (rc) {
+ pr_debug("post-off-sleep not specified\n");
+ rc = 0;
+ } else {
+ regs->vregs[i].post_off_sleep = tmp;
+ }
+
+ ++i;
+ pr_debug("[%s] minv=%d maxv=%d, en_load=%d, dis_load=%d\n",
+ regs->vregs[i].vreg_name,
+ regs->vregs[i].min_voltage,
+ regs->vregs[i].max_voltage,
+ regs->vregs[i].enable_load,
+ regs->vregs[i].disable_load);
+ }
+
+error:
+ return rc;
+}
+
+/**
+ * dsi_pwr_enable_vregs() - enable/disable regulators
+ */
+static int dsi_pwr_enable_vregs(struct dsi_regulator_info *regs, bool enable)
+{
+ int rc = 0, i = 0;
+ struct dsi_vreg *vreg;
+ int num_of_v = 0;
+
+ if (enable) {
+ for (i = 0; i < regs->count; i++) {
+ vreg = &regs->vregs[i];
+ if (vreg->pre_on_sleep)
+ msleep(vreg->pre_on_sleep);
+
+ rc = regulator_set_load(vreg->vreg,
+ vreg->enable_load);
+ if (rc < 0) {
+ pr_err("Setting optimum mode failed for %s\n",
+ vreg->vreg_name);
+ goto error;
+ }
+ num_of_v = regulator_count_voltages(vreg->vreg);
+ if (num_of_v > 0) {
+ rc = regulator_set_voltage(vreg->vreg,
+ vreg->min_voltage,
+ vreg->max_voltage);
+ if (rc) {
+ pr_err("Set voltage(%s) fail, rc=%d\n",
+ vreg->vreg_name, rc);
+ goto error_disable_opt_mode;
+ }
+ }
+
+ rc = regulator_enable(vreg->vreg);
+ if (rc) {
+ pr_err("enable failed for %s, rc=%d\n",
+ vreg->vreg_name, rc);
+ goto error_disable_voltage;
+ }
+
+ if (vreg->post_on_sleep)
+ msleep(vreg->post_on_sleep);
+ }
+ } else {
+ for (i = (regs->count - 1); i >= 0; i--) {
+ if (regs->vregs[i].pre_off_sleep)
+ msleep(regs->vregs[i].pre_off_sleep);
+
+ (void)regulator_set_load(regs->vregs[i].vreg,
+ regs->vregs[i].disable_load);
+ (void)regulator_disable(regs->vregs[i].vreg);
+
+ if (regs->vregs[i].post_off_sleep)
+ msleep(regs->vregs[i].post_off_sleep);
+ }
+ }
+
+ return 0;
+error_disable_opt_mode:
+ (void)regulator_set_load(regs->vregs[i].vreg,
+ regs->vregs[i].disable_load);
+
+error_disable_voltage:
+ if (num_of_v > 0)
+ (void)regulator_set_voltage(regs->vregs[i].vreg,
+ 0, regs->vregs[i].max_voltage);
+error:
+ for (i--; i >= 0; i--) {
+ if (regs->vregs[i].pre_off_sleep)
+ msleep(regs->vregs[i].pre_off_sleep);
+
+ (void)regulator_set_load(regs->vregs[i].vreg,
+ regs->vregs[i].disable_load);
+
+ num_of_v = regulator_count_voltages(regs->vregs[i].vreg);
+ if (num_of_v > 0)
+ (void)regulator_set_voltage(regs->vregs[i].vreg,
+ 0, regs->vregs[i].max_voltage);
+
+ (void)regulator_disable(regs->vregs[i].vreg);
+
+ if (regs->vregs[i].post_off_sleep)
+ msleep(regs->vregs[i].post_off_sleep);
+ }
+
+ return rc;
+}
+
+/**
+* dsi_clk_pwr_of_get_vreg_data - Parse regulator supply information
+* @of_node: Device of node to parse for supply information.
+* @regs: Pointer where regulator information will be copied to.
+* @supply_name: Name of the supply node.
+*
+* return: error code in case of failure or 0 for success.
+*/
+int dsi_clk_pwr_of_get_vreg_data(struct device_node *of_node,
+ struct dsi_regulator_info *regs,
+ char *supply_name)
+{
+ int rc = 0;
+ struct device_node *supply_root_node = NULL;
+
+ if (!of_node || !regs) {
+ pr_err("Bad params\n");
+ return -EINVAL;
+ }
+
+ regs->count = 0;
+ supply_root_node = of_get_child_by_name(of_node, supply_name);
+ if (!supply_root_node) {
+ supply_root_node = of_parse_phandle(of_node, supply_name, 0);
+ if (!supply_root_node) {
+ pr_err("No supply entry present for %s\n", supply_name);
+ return -EINVAL;
+ }
+ }
+
+ regs->count = of_get_available_child_count(supply_root_node);
+ if (regs->count == 0) {
+ pr_err("No vregs defined for %s\n", supply_name);
+ return -EINVAL;
+ }
+
+ regs->vregs = kcalloc(regs->count, sizeof(*regs->vregs), GFP_KERNEL);
+ if (!regs->vregs) {
+ regs->count = 0;
+ return -ENOMEM;
+ }
+
+ rc = dsi_pwr_parse_supply_node(supply_root_node, regs);
+ if (rc) {
+ pr_err("failed to parse supply node for %s, rc = %d\n",
+ supply_name, rc);
+
+ kfree(regs->vregs);
+ regs->vregs = NULL;
+ regs->count = 0;
+ }
+
+ return rc;
+}
+
+/**
+ * dsi_clk_pwr_get_dt_vreg_data - parse regulator supply information
+ * @dev: Device whose of_node needs to be parsed.
+ * @regs: Pointer where regulator information will be copied to.
+ * @supply_name: Name of the supply node.
+ *
+ * return: error code in case of failure or 0 for success.
+ */
+int dsi_clk_pwr_get_dt_vreg_data(struct device *dev,
+ struct dsi_regulator_info *regs,
+ char *supply_name)
+{
+ int rc = 0;
+ struct device_node *of_node = NULL;
+ struct device_node *supply_node = NULL;
+ struct device_node *supply_root_node = NULL;
+
+ if (!dev || !regs) {
+ pr_err("Bad params\n");
+ return -EINVAL;
+ }
+
+ of_node = dev->of_node;
+ regs->count = 0;
+ supply_root_node = of_get_child_by_name(of_node, supply_name);
+ if (!supply_root_node) {
+ supply_root_node = of_parse_phandle(of_node, supply_name, 0);
+ if (!supply_root_node) {
+ pr_err("No supply entry present for %s\n", supply_name);
+ return -EINVAL;
+ }
+ }
+
+ for_each_child_of_node(supply_root_node, supply_node)
+ regs->count++;
+
+ if (regs->count == 0) {
+ pr_err("No vregs defined for %s\n", supply_name);
+ return -EINVAL;
+ }
+
+ regs->vregs = devm_kcalloc(dev, regs->count, sizeof(*regs->vregs),
+ GFP_KERNEL);
+ if (!regs->vregs) {
+ regs->count = 0;
+ return -ENOMEM;
+ }
+
+ rc = dsi_pwr_parse_supply_node(supply_root_node, regs);
+ if (rc) {
+ pr_err("failed to parse supply node for %s, rc = %d\n",
+ supply_name, rc);
+ devm_kfree(dev, regs->vregs);
+ regs->vregs = NULL;
+ regs->count = 0;
+ }
+
+ return rc;
+}
+
+/**
+ * dsi_pwr_enable_regulator() - enable a set of regulators
+ * @regs: Pointer to set of regulators to enable or disable.
+ * @enable: Enable/Disable regulators.
+ *
+ * return: error code in case of failure or 0 for success.
+ */
+int dsi_pwr_enable_regulator(struct dsi_regulator_info *regs, bool enable)
+{
+ int rc = 0;
+
+ if (enable) {
+ if (regs->refcount == 0) {
+ rc = dsi_pwr_enable_vregs(regs, true);
+ if (rc)
+ pr_err("failed to enable regulators\n");
+ }
+ regs->refcount++;
+ } else {
+ if (regs->refcount == 0) {
+ pr_err("Unbalanced regulator off\n");
+ } else {
+ regs->refcount--;
+ if (regs->refcount == 0) {
+ rc = dsi_pwr_enable_vregs(regs, false);
+ if (rc)
+ pr_err("failed to disable vregs\n");
+ }
+ }
+ }
+
+ return rc;
+}
+
+/**
+ * dsi_clk_enable_core_clks() - enable DSI core clocks
+ * @clks: DSI core clock information.
+ * @enable: enable/disable DSI core clocks.
+ *
+ * A ref count is maintained, so caller should make sure disable and enable
+ * calls are balanced.
+ *
+ * return: error code in case of failure or 0 for success.
+ */
+int dsi_clk_enable_core_clks(struct dsi_core_clk_info *clks, bool enable)
+{
+ int rc = 0;
+
+ if (enable)
+ rc = INC_REFCOUNT(clks, dsi_core_clk_start);
+ else
+ rc = DEC_REFCOUNT(clks, dsi_core_clk_stop);
+
+ return rc;
+}
+
+/**
+ * dsi_clk_enable_link_clks() - enable DSI link clocks
+ * @clks: DSI link clock information.
+ * @enable: enable/disable DSI link clocks.
+ *
+ * A ref count is maintained, so caller should make sure disable and enable
+ * calls are balanced.
+ *
+ * return: error code in case of failure or 0 for success.
+ */
+int dsi_clk_enable_link_clks(struct dsi_link_clk_info *clks, bool enable)
+{
+ int rc = 0;
+
+ if (enable)
+ rc = INC_REFCOUNT(clks, dsi_link_clk_start);
+ else
+ rc = DEC_REFCOUNT(clks, dsi_link_clk_stop);
+
+ return rc;
+}
+
+/**
+ * dsi_clk_set_link_frequencies() - set frequencies for link clks
+ * @clks: Link clock information
+ * @pixel_clk: pixel clock frequency in KHz.
+ * @byte_clk: Byte clock frequency in KHz.
+ * @esc_clk: Escape clock frequency in KHz.
+ *
+ * return: error code in case of failure or 0 for success.
+ */
+int dsi_clk_set_link_frequencies(struct dsi_link_clk_info *clks,
+ u64 pixel_clk,
+ u64 byte_clk,
+ u64 esc_clk)
+{
+ int rc = 0;
+
+ clks->pixel_clk_rate = pixel_clk;
+ clks->byte_clk_rate = byte_clk;
+ clks->esc_clk_rate = esc_clk;
+ clks->set_new_rate = true;
+
+ return rc;
+}
+
+/**
+ * dsi_clk_set_pixel_clk_rate() - set frequency for pixel clock
+ * @clks: DSI link clock information.
+ * @pixel_clk: Pixel clock rate in KHz.
+ *
+ * return: error code in case of failure or 0 for success.
+ */
+int dsi_clk_set_pixel_clk_rate(struct dsi_link_clk_info *clks, u64 pixel_clk)
+{
+ int rc = 0;
+
+ rc = clk_set_rate(clks->pixel_clk, pixel_clk);
+ if (rc)
+ pr_err("failed to set clk rate for pixel clk, rc=%d\n", rc);
+ else
+ clks->pixel_clk_rate = pixel_clk;
+
+ return rc;
+}
+
+/**
+ * dsi_clk_set_byte_clk_rate() - set frequency for byte clock
+ * @clks: DSI link clock information.
+ * @byte_clk: Byte clock rate in KHz.
+ *
+ * return: error code in case of failure or 0 for success.
+ */
+int dsi_clk_set_byte_clk_rate(struct dsi_link_clk_info *clks, u64 byte_clk)
+{
+ int rc = 0;
+
+ rc = clk_set_rate(clks->byte_clk, byte_clk);
+ if (rc)
+ pr_err("failed to set clk rate for byte clk, rc=%d\n", rc);
+ else
+ clks->byte_clk_rate = byte_clk;
+
+ return rc;
+}
+
+/**
+ * dsi_clk_update_parent() - update parent clocks for specified clock
+ * @parent: link clock pair which are set as parent.
+ * @child: link clock pair whose parent has to be set.
+ */
+int dsi_clk_update_parent(struct dsi_clk_link_set *parent,
+ struct dsi_clk_link_set *child)
+{
+ int rc = 0;
+
+ rc = clk_set_parent(child->byte_clk, parent->byte_clk);
+ if (rc) {
+ pr_err("failed to set byte clk parent\n");
+ goto error;
+ }
+
+ rc = clk_set_parent(child->pixel_clk, parent->pixel_clk);
+ if (rc) {
+ pr_err("failed to set pixel clk parent\n");
+ goto error;
+ }
+error:
+ return rc;
+}
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_clk_pwr.h b/drivers/gpu/drm/msm/dsi-staging/dsi_clk_pwr.h
new file mode 100644
index 000000000000..223ca4ec4290
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_clk_pwr.h
@@ -0,0 +1,214 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DSI_CLK_PWR_H_
+#define _DSI_CLK_PWR_H_
+
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/regulator/consumer.h>
+#include <linux/clk.h>
+
+/**
+ * struct dsi_vreg - regulator information for DSI regulators
+ * @vreg: Handle to the regulator.
+ * @vreg_name: Regulator name.
+ * @min_voltage: Minimum voltage in uV.
+ * @max_voltage: Maximum voltage in uV.
+ * @enable_load: Load, in uA, when enabled.
+ * @disable_load: Load, in uA, when disabled.
+ * @pre_on_sleep: Sleep, in ms, before enabling the regulator.
+ * @post_on_sleep: Sleep, in ms, after enabling the regulator.
+ * @pre_off_sleep: Sleep, in ms, before disabling the regulator.
+ * @post_off_sleep: Sleep, in ms, after disabling the regulator.
+ */
+struct dsi_vreg {
+ struct regulator *vreg;
+ char vreg_name[32];
+ u32 min_voltage;
+ u32 max_voltage;
+ u32 enable_load;
+ u32 disable_load;
+ u32 pre_on_sleep;
+ u32 post_on_sleep;
+ u32 pre_off_sleep;
+ u32 post_off_sleep;
+};
+
+/**
+ * struct dsi_regulator_info - set of vregs that are turned on/off together.
+ * @vregs: Array of dsi_vreg structures.
+ * @count: Number of vregs.
+ * @refcount: Reference counting for enabling.
+ */
+struct dsi_regulator_info {
+ struct dsi_vreg *vregs;
+ u32 count;
+ u32 refcount;
+};
+
+/**
+ * struct dsi_core_clk_info - Core clock information for DSI hardware
+ * @mdp_core_clk: Handle to MDP core clock.
+ * @iface_clk: Handle to MDP interface clock.
+ * @core_mmss_clk: Handle to MMSS core clock.
+ * @bus_clk: Handle to bus clock.
+ * @refcount: Reference count for core clocks.
+ * @clk_state: Current clock state.
+ */
+struct dsi_core_clk_info {
+ struct clk *mdp_core_clk;
+ struct clk *iface_clk;
+ struct clk *core_mmss_clk;
+ struct clk *bus_clk;
+
+ u32 refcount;
+ u32 clk_state;
+};
+
+/**
+ * struct dsi_link_clk_info - Link clock information for DSI hardware.
+ * @byte_clk: Handle to DSI byte clock.
+ * @byte_clk_rate: Frequency of DSI byte clock in KHz.
+ * @pixel_clk: Handle to DSI pixel clock.
+ * @pixel_clk_rate: Frequency of DSI pixel clock in KHz.
+ * @esc_clk: Handle to DSI escape clock.
+ * @esc_clk_rate: Frequency of DSI escape clock in KHz.
+ * @refcount: Reference count for link clocks.
+ * @clk_state: Current clock state.
+ * @set_new_rate: private flag used by clock utility.
+ */
+struct dsi_link_clk_info {
+ struct clk *byte_clk;
+ u64 byte_clk_rate;
+
+ struct clk *pixel_clk;
+ u64 pixel_clk_rate;
+
+ struct clk *esc_clk;
+ u64 esc_clk_rate;
+
+ u32 refcount;
+ u32 clk_state;
+ bool set_new_rate;
+};
+
+/**
+ * struct dsi_clk_link_set - Pair of clock handles to describe link clocks
+ * @byte_clk: Handle to DSi byte clock.
+ * @pixel_clk: Handle to DSI pixel clock.
+ */
+struct dsi_clk_link_set {
+ struct clk *byte_clk;
+ struct clk *pixel_clk;
+};
+
+/**
+ * dsi_clk_pwr_of_get_vreg_data - parse regulator supply information
+ * @of_node: Device of node to parse for supply information.
+ * @regs: Pointer where regulator information will be copied to.
+ * @supply_name: Name of the supply node.
+ *
+ * return: error code in case of failure or 0 for success.
+ */
+int dsi_clk_pwr_of_get_vreg_data(struct device_node *of_node,
+ struct dsi_regulator_info *regs,
+ char *supply_name);
+
+/**
+ * dsi_clk_pwr_get_dt_vreg_data - parse regulator supply information
+ * @dev: Device whose of_node needs to be parsed.
+ * @regs: Pointer where regulator information will be copied to.
+ * @supply_name: Name of the supply node.
+ *
+ * return: error code in case of failure or 0 for success.
+ */
+int dsi_clk_pwr_get_dt_vreg_data(struct device *dev,
+ struct dsi_regulator_info *regs,
+ char *supply_name);
+
+/**
+ * dsi_pwr_enable_regulator() - enable a set of regulators
+ * @regs: Pointer to set of regulators to enable or disable.
+ * @enable: Enable/Disable regulators.
+ *
+ * return: error code in case of failure or 0 for success.
+ */
+int dsi_pwr_enable_regulator(struct dsi_regulator_info *regs, bool enable);
+
+/**
+ * dsi_clk_enable_core_clks() - enable DSI core clocks
+ * @clks: DSI core clock information.
+ * @enable: enable/disable DSI core clocks.
+ *
+ * A ref count is maintained, so caller should make sure disable and enable
+ * calls are balanced.
+ *
+ * return: error code in case of failure or 0 for success.
+ */
+int dsi_clk_enable_core_clks(struct dsi_core_clk_info *clks, bool enable);
+
+/**
+ * dsi_clk_enable_link_clks() - enable DSI link clocks
+ * @clks: DSI link clock information.
+ * @enable: enable/disable DSI link clocks.
+ *
+ * A ref count is maintained, so caller should make sure disable and enable
+ * calls are balanced.
+ *
+ * return: error code in case of failure or 0 for success.
+ */
+int dsi_clk_enable_link_clks(struct dsi_link_clk_info *clks, bool enable);
+
+/**
+ * dsi_clk_set_link_frequencies() - set frequencies for link clks
+ * @clks: Link clock information
+ * @pixel_clk: pixel clock frequency in KHz.
+ * @byte_clk: Byte clock frequency in KHz.
+ * @esc_clk: Escape clock frequency in KHz.
+ *
+ * return: error code in case of failure or 0 for success.
+ */
+int dsi_clk_set_link_frequencies(struct dsi_link_clk_info *clks,
+ u64 pixel_clk,
+ u64 byte_clk,
+ u64 esc_clk);
+
+/**
+ * dsi_clk_set_pixel_clk_rate() - set frequency for pixel clock
+ * @clks: DSI link clock information.
+ * @pixel_clk: Pixel clock rate in KHz.
+ *
+ * return: error code in case of failure or 0 for success.
+ */
+int dsi_clk_set_pixel_clk_rate(struct dsi_link_clk_info *clks, u64 pixel_clk);
+
+/**
+ * dsi_clk_set_byte_clk_rate() - set frequency for byte clock
+ * @clks: DSI link clock information.
+ * @byte_clk: Byte clock rate in KHz.
+ *
+ * return: error code in case of failure or 0 for success.
+ */
+int dsi_clk_set_byte_clk_rate(struct dsi_link_clk_info *clks, u64 byte_clk);
+
+/**
+ * dsi_clk_update_parent() - update parent clocks for specified clock
+ * @parent: link clock pair which are set as parent.
+ * @child: link clock pair whose parent has to be set.
+ */
+int dsi_clk_update_parent(struct dsi_clk_link_set *parent,
+ struct dsi_clk_link_set *child);
+#endif /* _DSI_CLK_PWR_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
new file mode 100644
index 000000000000..b8520aadbc0c
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
@@ -0,0 +1,2302 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "dsi-ctrl:[%s] " fmt, __func__
+
+#include <linux/of_device.h>
+#include <linux/err.h>
+#include <linux/regulator/consumer.h>
+#include <linux/clk.h>
+#include <linux/msm-bus.h>
+#include <linux/of_irq.h>
+#include <video/mipi_display.h>
+
+#include "msm_drv.h"
+#include "msm_kms.h"
+#include "msm_gpu.h"
+#include "dsi_ctrl.h"
+#include "dsi_ctrl_hw.h"
+#include "dsi_clk_pwr.h"
+#include "dsi_catalog.h"
+
+#define DSI_CTRL_DEFAULT_LABEL "MDSS DSI CTRL"
+
+#define DSI_CTRL_TX_TO_MS 200
+
+#define TO_ON_OFF(x) ((x) ? "ON" : "OFF")
+/**
+ * enum dsi_ctrl_driver_ops - controller driver ops
+ */
+enum dsi_ctrl_driver_ops {
+ DSI_CTRL_OP_POWER_STATE_CHANGE,
+ DSI_CTRL_OP_CMD_ENGINE,
+ DSI_CTRL_OP_VID_ENGINE,
+ DSI_CTRL_OP_HOST_ENGINE,
+ DSI_CTRL_OP_CMD_TX,
+ DSI_CTRL_OP_ULPS_TOGGLE,
+ DSI_CTRL_OP_CLAMP_TOGGLE,
+ DSI_CTRL_OP_SET_CLK_SOURCE,
+ DSI_CTRL_OP_HOST_INIT,
+ DSI_CTRL_OP_TPG,
+ DSI_CTRL_OP_PHY_SW_RESET,
+ DSI_CTRL_OP_ASYNC_TIMING,
+ DSI_CTRL_OP_MAX
+};
+
+struct dsi_ctrl_list_item {
+ struct dsi_ctrl *ctrl;
+ struct list_head list;
+};
+
+static LIST_HEAD(dsi_ctrl_list);
+static DEFINE_MUTEX(dsi_ctrl_list_lock);
+
+static const enum dsi_ctrl_version dsi_ctrl_v1_4 = DSI_CTRL_VERSION_1_4;
+static const enum dsi_ctrl_version dsi_ctrl_v2_0 = DSI_CTRL_VERSION_2_0;
+
+static const struct of_device_id msm_dsi_of_match[] = {
+ {
+ .compatible = "qcom,dsi-ctrl-hw-v1.4",
+ .data = &dsi_ctrl_v1_4,
+ },
+ {
+ .compatible = "qcom,dsi-ctrl-hw-v2.0",
+ .data = &dsi_ctrl_v2_0,
+ },
+ {}
+};
+
+static ssize_t debugfs_state_info_read(struct file *file,
+ char __user *buff,
+ size_t count,
+ loff_t *ppos)
+{
+ struct dsi_ctrl *dsi_ctrl = file->private_data;
+ char *buf;
+ u32 len = 0;
+
+ if (!dsi_ctrl)
+ return -ENODEV;
+
+ if (*ppos)
+ return 0;
+
+ buf = kzalloc(SZ_4K, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ /* Dump current state */
+ len += snprintf((buf + len), (SZ_4K - len), "Current State:\n");
+ len += snprintf((buf + len), (SZ_4K - len),
+ "\tPOWER_STATUS = %s\n\tCORE_CLOCK = %s\n",
+ TO_ON_OFF(dsi_ctrl->current_state.pwr_enabled),
+ TO_ON_OFF(dsi_ctrl->current_state.core_clk_enabled));
+ len += snprintf((buf + len), (SZ_4K - len),
+ "\tLINK_CLOCK = %s\n\tULPS_STATUS = %s\n",
+ TO_ON_OFF(dsi_ctrl->current_state.link_clk_enabled),
+ TO_ON_OFF(dsi_ctrl->current_state.ulps_enabled));
+ len += snprintf((buf + len), (SZ_4K - len),
+ "\tCLAMP_STATUS = %s\n\tCTRL_ENGINE = %s\n",
+ TO_ON_OFF(dsi_ctrl->current_state.clamp_enabled),
+ TO_ON_OFF(dsi_ctrl->current_state.controller_state));
+ len += snprintf((buf + len), (SZ_4K - len),
+ "\tVIDEO_ENGINE = %s\n\tCOMMAND_ENGINE = %s\n",
+ TO_ON_OFF(dsi_ctrl->current_state.vid_engine_state),
+ TO_ON_OFF(dsi_ctrl->current_state.cmd_engine_state));
+
+ /* Dump clock information */
+ len += snprintf((buf + len), (SZ_4K - len), "\nClock Info:\n");
+ len += snprintf((buf + len), (SZ_4K - len),
+ "\tBYTE_CLK = %llu, PIXEL_CLK = %llu, ESC_CLK = %llu\n",
+ dsi_ctrl->clk_info.link_clks.byte_clk_rate,
+ dsi_ctrl->clk_info.link_clks.pixel_clk_rate,
+ dsi_ctrl->clk_info.link_clks.esc_clk_rate);
+
+ /* TODO: make sure that this does not exceed 4K */
+ if (copy_to_user(buff, buf, len)) {
+ kfree(buf);
+ return -EFAULT;
+ }
+
+ *ppos += len;
+ kfree(buf);
+ return len;
+}
+
+static ssize_t debugfs_reg_dump_read(struct file *file,
+ char __user *buff,
+ size_t count,
+ loff_t *ppos)
+{
+ struct dsi_ctrl *dsi_ctrl = file->private_data;
+ char *buf;
+ u32 len = 0;
+
+ if (!dsi_ctrl)
+ return -ENODEV;
+
+ if (*ppos)
+ return 0;
+
+ buf = kzalloc(SZ_4K, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ if (dsi_ctrl->current_state.core_clk_enabled) {
+ len = dsi_ctrl->hw.ops.reg_dump_to_buffer(&dsi_ctrl->hw,
+ buf,
+ SZ_4K);
+ } else {
+ len = snprintf((buf + len), (SZ_4K - len),
+ "Core clocks are not turned on, cannot read\n");
+ }
+
+ /* TODO: make sure that this does not exceed 4K */
+ if (copy_to_user(buff, buf, len)) {
+ kfree(buf);
+ return -EFAULT;
+ }
+
+ *ppos += len;
+ kfree(buf);
+ return len;
+}
+
+static const struct file_operations state_info_fops = {
+ .open = simple_open,
+ .read = debugfs_state_info_read,
+};
+
+static const struct file_operations reg_dump_fops = {
+ .open = simple_open,
+ .read = debugfs_reg_dump_read,
+};
+
+static int dsi_ctrl_debugfs_init(struct dsi_ctrl *dsi_ctrl,
+ struct dentry *parent)
+{
+ int rc = 0;
+ struct dentry *dir, *state_file, *reg_dump;
+
+ dir = debugfs_create_dir(dsi_ctrl->name, parent);
+ if (IS_ERR_OR_NULL(dir)) {
+ rc = PTR_ERR(dir);
+ pr_err("[DSI_%d] debugfs create dir failed, rc=%d\n",
+ dsi_ctrl->index, rc);
+ goto error;
+ }
+
+ state_file = debugfs_create_file("state_info",
+ 0444,
+ dir,
+ dsi_ctrl,
+ &state_info_fops);
+ if (IS_ERR_OR_NULL(state_file)) {
+ rc = PTR_ERR(state_file);
+ pr_err("[DSI_%d] state file failed, rc=%d\n",
+ dsi_ctrl->index, rc);
+ goto error_remove_dir;
+ }
+
+ reg_dump = debugfs_create_file("reg_dump",
+ 0444,
+ dir,
+ dsi_ctrl,
+ &reg_dump_fops);
+ if (IS_ERR_OR_NULL(reg_dump)) {
+ rc = PTR_ERR(reg_dump);
+ pr_err("[DSI_%d] reg dump file failed, rc=%d\n",
+ dsi_ctrl->index, rc);
+ goto error_remove_dir;
+ }
+
+ dsi_ctrl->debugfs_root = dir;
+error_remove_dir:
+ debugfs_remove(dir);
+error:
+ return rc;
+}
+
+static int dsi_ctrl_debugfs_deinit(struct dsi_ctrl *dsi_ctrl)
+{
+ debugfs_remove(dsi_ctrl->debugfs_root);
+ return 0;
+}
+
+static int dsi_ctrl_check_state(struct dsi_ctrl *dsi_ctrl,
+ enum dsi_ctrl_driver_ops op,
+ u32 op_state)
+{
+ int rc = 0;
+ struct dsi_ctrl_state_info *state = &dsi_ctrl->current_state;
+
+ switch (op) {
+ case DSI_CTRL_OP_POWER_STATE_CHANGE:
+ if (state->power_state == op_state) {
+ pr_debug("[%d] No change in state, pwr_state=%d\n",
+ dsi_ctrl->index, op_state);
+ rc = -EINVAL;
+ } else if (state->power_state == DSI_CTRL_POWER_LINK_CLK_ON) {
+ if ((state->cmd_engine_state == DSI_CTRL_ENGINE_ON) ||
+ (state->vid_engine_state == DSI_CTRL_ENGINE_ON) ||
+ (state->controller_state == DSI_CTRL_ENGINE_ON)) {
+ pr_debug("[%d]State error: op=%d: %d, %d, %d\n",
+ dsi_ctrl->index,
+ op_state,
+ state->cmd_engine_state,
+ state->vid_engine_state,
+ state->controller_state);
+ rc = -EINVAL;
+ }
+ }
+ break;
+ case DSI_CTRL_OP_CMD_ENGINE:
+ if (state->cmd_engine_state == op_state) {
+ pr_debug("[%d] No change in state, cmd_state=%d\n",
+ dsi_ctrl->index, op_state);
+ rc = -EINVAL;
+ } else if ((state->power_state != DSI_CTRL_POWER_LINK_CLK_ON) ||
+ (state->controller_state != DSI_CTRL_ENGINE_ON)) {
+ pr_debug("[%d]State error: op=%d: %d, %d\n",
+ dsi_ctrl->index,
+ op,
+ state->power_state,
+ state->controller_state);
+ rc = -EINVAL;
+ }
+ break;
+ case DSI_CTRL_OP_VID_ENGINE:
+ if (state->vid_engine_state == op_state) {
+ pr_debug("[%d] No change in state, cmd_state=%d\n",
+ dsi_ctrl->index, op_state);
+ rc = -EINVAL;
+ } else if ((state->power_state != DSI_CTRL_POWER_LINK_CLK_ON) ||
+ (state->controller_state != DSI_CTRL_ENGINE_ON)) {
+ pr_debug("[%d]State error: op=%d: %d, %d\n",
+ dsi_ctrl->index,
+ op,
+ state->power_state,
+ state->controller_state);
+ rc = -EINVAL;
+ }
+ break;
+ case DSI_CTRL_OP_HOST_ENGINE:
+ if (state->controller_state == op_state) {
+ pr_debug("[%d] No change in state, ctrl_state=%d\n",
+ dsi_ctrl->index, op_state);
+ rc = -EINVAL;
+ } else if (state->power_state != DSI_CTRL_POWER_LINK_CLK_ON) {
+ pr_debug("[%d]State error (link is off): op=%d:, %d\n",
+ dsi_ctrl->index,
+ op_state,
+ state->power_state);
+ rc = -EINVAL;
+ } else if ((op_state == DSI_CTRL_ENGINE_OFF) &&
+ ((state->cmd_engine_state != DSI_CTRL_ENGINE_OFF) ||
+ (state->vid_engine_state != DSI_CTRL_ENGINE_OFF))) {
+ pr_debug("[%d]State error (eng on): op=%d: %d, %d\n",
+ dsi_ctrl->index,
+ op_state,
+ state->cmd_engine_state,
+ state->vid_engine_state);
+ rc = -EINVAL;
+ }
+ break;
+ case DSI_CTRL_OP_CMD_TX:
+ if ((state->power_state != DSI_CTRL_POWER_LINK_CLK_ON) ||
+ (state->host_initialized != true) ||
+ (state->cmd_engine_state != DSI_CTRL_ENGINE_ON)) {
+ pr_debug("[%d]State error: op=%d: %d, %d, %d\n",
+ dsi_ctrl->index,
+ op,
+ state->power_state,
+ state->host_initialized,
+ state->cmd_engine_state);
+ rc = -EINVAL;
+ }
+ break;
+ case DSI_CTRL_OP_HOST_INIT:
+ if (state->host_initialized == op_state) {
+ pr_debug("[%d] No change in state, host_init=%d\n",
+ dsi_ctrl->index, op_state);
+ rc = -EINVAL;
+ } else if (state->power_state != DSI_CTRL_POWER_CORE_CLK_ON) {
+ pr_debug("[%d]State error: op=%d: %d\n",
+ dsi_ctrl->index, op, state->power_state);
+ rc = -EINVAL;
+ }
+ break;
+ case DSI_CTRL_OP_ULPS_TOGGLE:
+ if (state->ulps_enabled == op_state) {
+ pr_debug("[%d] No change in state, ulps_enabled=%d\n",
+ dsi_ctrl->index, op_state);
+ rc = -EINVAL;
+ } else if ((state->power_state != DSI_CTRL_POWER_LINK_CLK_ON) ||
+ (state->controller_state != DSI_CTRL_ENGINE_ON)) {
+ pr_debug("[%d]State error: op=%d: %d, %d\n",
+ dsi_ctrl->index,
+ op,
+ state->power_state,
+ state->controller_state);
+ rc = -EINVAL;
+ }
+ break;
+ case DSI_CTRL_OP_CLAMP_TOGGLE:
+ if (state->clamp_enabled == op_state) {
+ pr_debug("[%d] No change in state, clamp_enabled=%d\n",
+ dsi_ctrl->index, op_state);
+ rc = -EINVAL;
+ } else if ((state->power_state != DSI_CTRL_POWER_LINK_CLK_ON) ||
+ (state->controller_state != DSI_CTRL_ENGINE_ON)) {
+ pr_debug("[%d]State error: op=%d: %d, %d\n",
+ dsi_ctrl->index,
+ op,
+ state->power_state,
+ state->controller_state);
+ rc = -EINVAL;
+ }
+ break;
+ case DSI_CTRL_OP_SET_CLK_SOURCE:
+ if (state->power_state == DSI_CTRL_POWER_LINK_CLK_ON) {
+ pr_debug("[%d] State error: op=%d: %d\n",
+ dsi_ctrl->index,
+ op,
+ state->power_state);
+ rc = -EINVAL;
+ }
+ break;
+ case DSI_CTRL_OP_TPG:
+ if (state->tpg_enabled == op_state) {
+ pr_debug("[%d] No change in state, tpg_enabled=%d\n",
+ dsi_ctrl->index, op_state);
+ rc = -EINVAL;
+ } else if ((state->power_state != DSI_CTRL_POWER_LINK_CLK_ON) ||
+ (state->controller_state != DSI_CTRL_ENGINE_ON)) {
+ pr_debug("[%d]State error: op=%d: %d, %d\n",
+ dsi_ctrl->index,
+ op,
+ state->power_state,
+ state->controller_state);
+ rc = -EINVAL;
+ }
+ break;
+ case DSI_CTRL_OP_PHY_SW_RESET:
+ if (state->power_state != DSI_CTRL_POWER_CORE_CLK_ON) {
+ pr_debug("[%d]State error: op=%d: %d\n",
+ dsi_ctrl->index, op, state->power_state);
+ rc = -EINVAL;
+ }
+ break;
+ case DSI_CTRL_OP_ASYNC_TIMING:
+ if (state->vid_engine_state != op_state) {
+ pr_err("[%d] Unexpected engine state vid_state=%d\n",
+ dsi_ctrl->index, op_state);
+ rc = -EINVAL;
+ }
+ break;
+ default:
+ rc = -ENOTSUPP;
+ break;
+ }
+
+ return rc;
+}
+
+static void dsi_ctrl_update_state(struct dsi_ctrl *dsi_ctrl,
+ enum dsi_ctrl_driver_ops op,
+ u32 op_state)
+{
+ struct dsi_ctrl_state_info *state = &dsi_ctrl->current_state;
+
+ switch (op) {
+ case DSI_CTRL_OP_POWER_STATE_CHANGE:
+ state->power_state = op_state;
+ if (op_state == DSI_CTRL_POWER_OFF) {
+ state->pwr_enabled = false;
+ state->core_clk_enabled = false;
+ state->link_clk_enabled = false;
+ } else if (op_state == DSI_CTRL_POWER_VREG_ON) {
+ state->pwr_enabled = true;
+ state->core_clk_enabled = false;
+ state->link_clk_enabled = false;
+ } else if (op_state == DSI_CTRL_POWER_CORE_CLK_ON) {
+ state->pwr_enabled = true;
+ state->core_clk_enabled = true;
+ state->link_clk_enabled = false;
+ } else if (op_state == DSI_CTRL_POWER_LINK_CLK_ON) {
+ state->pwr_enabled = true;
+ state->core_clk_enabled = true;
+ state->link_clk_enabled = true;
+ }
+ break;
+ case DSI_CTRL_OP_CMD_ENGINE:
+ state->cmd_engine_state = op_state;
+ break;
+ case DSI_CTRL_OP_VID_ENGINE:
+ state->vid_engine_state = op_state;
+ break;
+ case DSI_CTRL_OP_HOST_ENGINE:
+ state->controller_state = op_state;
+ break;
+ case DSI_CTRL_OP_ULPS_TOGGLE:
+ state->ulps_enabled = (op_state == 1) ? true : false;
+ break;
+ case DSI_CTRL_OP_CLAMP_TOGGLE:
+ state->clamp_enabled = (op_state == 1) ? true : false;
+ break;
+ case DSI_CTRL_OP_SET_CLK_SOURCE:
+ state->clk_source_set = (op_state == 1) ? true : false;
+ break;
+ case DSI_CTRL_OP_HOST_INIT:
+ state->host_initialized = (op_state == 1) ? true : false;
+ break;
+ case DSI_CTRL_OP_TPG:
+ state->tpg_enabled = (op_state == 1) ? true : false;
+ break;
+ case DSI_CTRL_OP_CMD_TX:
+ case DSI_CTRL_OP_PHY_SW_RESET:
+ default:
+ break;
+ }
+}
+
+static int dsi_ctrl_init_regmap(struct platform_device *pdev,
+ struct dsi_ctrl *ctrl)
+{
+ int rc = 0;
+ void __iomem *ptr;
+
+ ptr = msm_ioremap(pdev, "dsi_ctrl", ctrl->name);
+ if (IS_ERR(ptr)) {
+ rc = PTR_ERR(ptr);
+ return rc;
+ }
+
+ ctrl->hw.base = ptr;
+ pr_debug("[%s] map dsi_ctrl registers to %p\n", ctrl->name,
+ ctrl->hw.base);
+
+ ptr = msm_ioremap(pdev, "mmss_misc", ctrl->name);
+ if (IS_ERR(ptr)) {
+ rc = PTR_ERR(ptr);
+ return rc;
+ }
+
+ ctrl->hw.mmss_misc_base = ptr;
+ pr_debug("[%s] map mmss_misc registers to %p\n", ctrl->name,
+ ctrl->hw.mmss_misc_base);
+ return rc;
+}
+
+static int dsi_ctrl_clocks_deinit(struct dsi_ctrl *ctrl)
+{
+ struct dsi_core_clk_info *core = &ctrl->clk_info.core_clks;
+ struct dsi_link_clk_info *link = &ctrl->clk_info.link_clks;
+ struct dsi_clk_link_set *rcg = &ctrl->clk_info.rcg_clks;
+
+ if (core->mdp_core_clk)
+ devm_clk_put(&ctrl->pdev->dev, core->mdp_core_clk);
+ if (core->iface_clk)
+ devm_clk_put(&ctrl->pdev->dev, core->iface_clk);
+ if (core->core_mmss_clk)
+ devm_clk_put(&ctrl->pdev->dev, core->core_mmss_clk);
+ if (core->bus_clk)
+ devm_clk_put(&ctrl->pdev->dev, core->bus_clk);
+
+ memset(core, 0x0, sizeof(*core));
+
+ if (link->byte_clk)
+ devm_clk_put(&ctrl->pdev->dev, link->byte_clk);
+ if (link->pixel_clk)
+ devm_clk_put(&ctrl->pdev->dev, link->pixel_clk);
+ if (link->esc_clk)
+ devm_clk_put(&ctrl->pdev->dev, link->esc_clk);
+
+ memset(link, 0x0, sizeof(*link));
+
+ if (rcg->byte_clk)
+ devm_clk_put(&ctrl->pdev->dev, rcg->byte_clk);
+ if (rcg->pixel_clk)
+ devm_clk_put(&ctrl->pdev->dev, rcg->pixel_clk);
+
+ memset(rcg, 0x0, sizeof(*rcg));
+
+ return 0;
+}
+
+static int dsi_ctrl_clocks_init(struct platform_device *pdev,
+ struct dsi_ctrl *ctrl)
+{
+ int rc = 0;
+ struct dsi_core_clk_info *core = &ctrl->clk_info.core_clks;
+ struct dsi_link_clk_info *link = &ctrl->clk_info.link_clks;
+ struct dsi_clk_link_set *rcg = &ctrl->clk_info.rcg_clks;
+
+ core->mdp_core_clk = devm_clk_get(&pdev->dev, "mdp_core_clk");
+ if (IS_ERR(core->mdp_core_clk)) {
+ rc = PTR_ERR(core->mdp_core_clk);
+ pr_err("failed to get mdp_core_clk, rc=%d\n", rc);
+ goto fail;
+ }
+
+ core->iface_clk = devm_clk_get(&pdev->dev, "iface_clk");
+ if (IS_ERR(core->iface_clk)) {
+ rc = PTR_ERR(core->iface_clk);
+ pr_err("failed to get iface_clk, rc=%d\n", rc);
+ goto fail;
+ }
+
+ core->core_mmss_clk = devm_clk_get(&pdev->dev, "core_mmss_clk");
+ if (IS_ERR(core->core_mmss_clk)) {
+ rc = PTR_ERR(core->core_mmss_clk);
+ pr_err("failed to get core_mmss_clk, rc=%d\n", rc);
+ goto fail;
+ }
+
+ core->bus_clk = devm_clk_get(&pdev->dev, "bus_clk");
+ if (IS_ERR(core->bus_clk)) {
+ rc = PTR_ERR(core->bus_clk);
+ pr_err("failed to get bus_clk, rc=%d\n", rc);
+ goto fail;
+ }
+
+ link->byte_clk = devm_clk_get(&pdev->dev, "byte_clk");
+ if (IS_ERR(link->byte_clk)) {
+ rc = PTR_ERR(link->byte_clk);
+ pr_err("failed to get byte_clk, rc=%d\n", rc);
+ goto fail;
+ }
+
+ link->pixel_clk = devm_clk_get(&pdev->dev, "pixel_clk");
+ if (IS_ERR(link->pixel_clk)) {
+ rc = PTR_ERR(link->pixel_clk);
+ pr_err("failed to get pixel_clk, rc=%d\n", rc);
+ goto fail;
+ }
+
+ link->esc_clk = devm_clk_get(&pdev->dev, "core_clk");
+ if (IS_ERR(link->esc_clk)) {
+ rc = PTR_ERR(link->esc_clk);
+ pr_err("failed to get esc_clk, rc=%d\n", rc);
+ goto fail;
+ }
+
+ rcg->byte_clk = devm_clk_get(&pdev->dev, "byte_clk_rcg");
+ if (IS_ERR(rcg->byte_clk)) {
+ rc = PTR_ERR(rcg->byte_clk);
+ pr_err("failed to get byte_clk_rcg, rc=%d\n", rc);
+ goto fail;
+ }
+
+ rcg->pixel_clk = devm_clk_get(&pdev->dev, "pixel_clk_rcg");
+ if (IS_ERR(rcg->pixel_clk)) {
+ rc = PTR_ERR(rcg->pixel_clk);
+ pr_err("failed to get pixel_clk_rcg, rc=%d\n", rc);
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dsi_ctrl_clocks_deinit(ctrl);
+ return rc;
+}
+
+static int dsi_ctrl_supplies_deinit(struct dsi_ctrl *ctrl)
+{
+ int i = 0;
+ int rc = 0;
+ struct dsi_regulator_info *regs;
+
+ regs = &ctrl->pwr_info.digital;
+ for (i = 0; i < regs->count; i++) {
+ if (!regs->vregs[i].vreg)
+ pr_err("vreg is NULL, should not reach here\n");
+ else
+ devm_regulator_put(regs->vregs[i].vreg);
+ }
+
+ regs = &ctrl->pwr_info.host_pwr;
+ for (i = 0; i < regs->count; i++) {
+ if (!regs->vregs[i].vreg)
+ pr_err("vreg is NULL, should not reach here\n");
+ else
+ devm_regulator_put(regs->vregs[i].vreg);
+ }
+
+ if (!ctrl->pwr_info.host_pwr.vregs) {
+ devm_kfree(&ctrl->pdev->dev, ctrl->pwr_info.host_pwr.vregs);
+ ctrl->pwr_info.host_pwr.vregs = NULL;
+ ctrl->pwr_info.host_pwr.count = 0;
+ }
+
+ if (!ctrl->pwr_info.digital.vregs) {
+ devm_kfree(&ctrl->pdev->dev, ctrl->pwr_info.digital.vregs);
+ ctrl->pwr_info.digital.vregs = NULL;
+ ctrl->pwr_info.digital.count = 0;
+ }
+
+ return rc;
+}
+
+static int dsi_ctrl_supplies_init(struct platform_device *pdev,
+ struct dsi_ctrl *ctrl)
+{
+ int rc = 0;
+ int i = 0;
+ struct dsi_regulator_info *regs;
+ struct regulator *vreg = NULL;
+
+ rc = dsi_clk_pwr_get_dt_vreg_data(&pdev->dev,
+ &ctrl->pwr_info.digital,
+ "qcom,core-supply-entries");
+ if (rc) {
+ pr_err("failed to get digital supply, rc = %d\n", rc);
+ goto error;
+ }
+
+ rc = dsi_clk_pwr_get_dt_vreg_data(&pdev->dev,
+ &ctrl->pwr_info.host_pwr,
+ "qcom,ctrl-supply-entries");
+ if (rc) {
+ pr_err("failed to get host power supplies, rc = %d\n", rc);
+ goto error_digital;
+ }
+
+ regs = &ctrl->pwr_info.digital;
+ for (i = 0; i < regs->count; i++) {
+ vreg = devm_regulator_get(&pdev->dev, regs->vregs[i].vreg_name);
+ if (IS_ERR(vreg)) {
+ pr_err("failed to get %s regulator\n",
+ regs->vregs[i].vreg_name);
+ rc = PTR_ERR(vreg);
+ goto error_host_pwr;
+ }
+ regs->vregs[i].vreg = vreg;
+ }
+
+ regs = &ctrl->pwr_info.host_pwr;
+ for (i = 0; i < regs->count; i++) {
+ vreg = devm_regulator_get(&pdev->dev, regs->vregs[i].vreg_name);
+ if (IS_ERR(vreg)) {
+ pr_err("failed to get %s regulator\n",
+ regs->vregs[i].vreg_name);
+ for (--i; i >= 0; i--)
+ devm_regulator_put(regs->vregs[i].vreg);
+ rc = PTR_ERR(vreg);
+ goto error_digital_put;
+ }
+ regs->vregs[i].vreg = vreg;
+ }
+
+ return rc;
+
+error_digital_put:
+ regs = &ctrl->pwr_info.digital;
+ for (i = 0; i < regs->count; i++)
+ devm_regulator_put(regs->vregs[i].vreg);
+error_host_pwr:
+ devm_kfree(&pdev->dev, ctrl->pwr_info.host_pwr.vregs);
+ ctrl->pwr_info.host_pwr.vregs = NULL;
+ ctrl->pwr_info.host_pwr.count = 0;
+error_digital:
+ devm_kfree(&pdev->dev, ctrl->pwr_info.digital.vregs);
+ ctrl->pwr_info.digital.vregs = NULL;
+ ctrl->pwr_info.digital.count = 0;
+error:
+ return rc;
+}
+
+static int dsi_ctrl_axi_bus_client_init(struct platform_device *pdev,
+ struct dsi_ctrl *ctrl)
+{
+ int rc = 0;
+ struct dsi_ctrl_bus_scale_info *bus = &ctrl->axi_bus_info;
+
+ bus->bus_scale_table = msm_bus_cl_get_pdata(pdev);
+ if (IS_ERR_OR_NULL(bus->bus_scale_table)) {
+ rc = PTR_ERR(bus->bus_scale_table);
+ pr_err("msm_bus_cl_get_pdata() failed, rc = %d\n", rc);
+ bus->bus_scale_table = NULL;
+ return rc;
+ }
+
+ bus->bus_handle = msm_bus_scale_register_client(bus->bus_scale_table);
+ if (!bus->bus_handle) {
+ rc = -EINVAL;
+ pr_err("failed to register axi bus client\n");
+ }
+
+ return rc;
+}
+
+static int dsi_ctrl_axi_bus_client_deinit(struct dsi_ctrl *ctrl)
+{
+ struct dsi_ctrl_bus_scale_info *bus = &ctrl->axi_bus_info;
+
+ if (bus->bus_handle) {
+ msm_bus_scale_unregister_client(bus->bus_handle);
+
+ bus->bus_handle = 0;
+ }
+
+ return 0;
+}
+
+static int dsi_ctrl_validate_panel_info(struct dsi_ctrl *dsi_ctrl,
+ struct dsi_host_config *config)
+{
+ int rc = 0;
+ struct dsi_host_common_cfg *host_cfg = &config->common_config;
+
+ if (config->panel_mode >= DSI_OP_MODE_MAX) {
+ pr_err("Invalid dsi operation mode (%d)\n", config->panel_mode);
+ rc = -EINVAL;
+ goto err;
+ }
+
+ if ((host_cfg->data_lanes & (DSI_CLOCK_LANE - 1)) == 0) {
+ pr_err("No data lanes are enabled\n");
+ rc = -EINVAL;
+ goto err;
+ }
+err:
+ return rc;
+}
+
+static int dsi_ctrl_update_link_freqs(struct dsi_ctrl *dsi_ctrl,
+ struct dsi_host_config *config)
+{
+ int rc = 0;
+ u32 num_of_lanes = 0;
+ u32 bpp = 3;
+ u64 h_period, v_period, bit_rate, pclk_rate, bit_rate_per_lane,
+ byte_clk_rate;
+ struct dsi_host_common_cfg *host_cfg = &config->common_config;
+ struct dsi_mode_info *timing = &config->video_timing;
+
+ if (host_cfg->data_lanes & DSI_DATA_LANE_0)
+ num_of_lanes++;
+ if (host_cfg->data_lanes & DSI_DATA_LANE_1)
+ num_of_lanes++;
+ if (host_cfg->data_lanes & DSI_DATA_LANE_2)
+ num_of_lanes++;
+ if (host_cfg->data_lanes & DSI_DATA_LANE_3)
+ num_of_lanes++;
+
+ h_period = DSI_H_TOTAL(timing);
+ v_period = DSI_V_TOTAL(timing);
+
+ bit_rate = h_period * v_period * timing->refresh_rate * bpp * 8;
+ bit_rate_per_lane = bit_rate;
+ do_div(bit_rate_per_lane, num_of_lanes);
+ pclk_rate = bit_rate;
+ do_div(pclk_rate, (8 * bpp));
+ byte_clk_rate = bit_rate_per_lane;
+ do_div(byte_clk_rate, 8);
+ pr_debug("bit_clk_rate = %llu, bit_clk_rate_per_lane = %llu\n",
+ bit_rate, bit_rate_per_lane);
+ pr_debug("byte_clk_rate = %llu, pclk_rate = %llu\n",
+ byte_clk_rate, pclk_rate);
+
+ rc = dsi_clk_set_link_frequencies(&dsi_ctrl->clk_info.link_clks,
+ pclk_rate,
+ byte_clk_rate,
+ config->esc_clk_rate_hz);
+ if (rc)
+ pr_err("Failed to update link frequencies\n");
+
+ return rc;
+}
+
+static int dsi_ctrl_enable_supplies(struct dsi_ctrl *dsi_ctrl, bool enable)
+{
+ int rc = 0;
+
+ if (enable) {
+ rc = dsi_pwr_enable_regulator(&dsi_ctrl->pwr_info.host_pwr,
+ true);
+ if (rc) {
+ pr_err("failed to enable host power regs, rc=%d\n", rc);
+ goto error;
+ }
+
+ rc = dsi_pwr_enable_regulator(&dsi_ctrl->pwr_info.digital,
+ true);
+ if (rc) {
+ pr_err("failed to enable gdsc, rc=%d\n", rc);
+ (void)dsi_pwr_enable_regulator(
+ &dsi_ctrl->pwr_info.host_pwr,
+ false
+ );
+ goto error;
+ }
+ } else {
+ rc = dsi_pwr_enable_regulator(&dsi_ctrl->pwr_info.digital,
+ false);
+ if (rc) {
+ pr_err("failed to disable gdsc, rc=%d\n", rc);
+ goto error;
+ }
+
+ rc = dsi_pwr_enable_regulator(&dsi_ctrl->pwr_info.host_pwr,
+ false);
+ if (rc) {
+ pr_err("failed to disable host power regs, rc=%d\n",
+ rc);
+ goto error;
+ }
+ }
+error:
+ return rc;
+}
+
+static int dsi_ctrl_vote_for_bandwidth(struct dsi_ctrl *dsi_ctrl, bool on)
+{
+ int rc = 0;
+ bool changed = false;
+ struct dsi_ctrl_bus_scale_info *axi_bus = &dsi_ctrl->axi_bus_info;
+
+ if (on) {
+ if (axi_bus->refcount == 0)
+ changed = true;
+
+ axi_bus->refcount++;
+ } else {
+ if (axi_bus->refcount != 0) {
+ axi_bus->refcount--;
+
+ if (axi_bus->refcount == 0)
+ changed = true;
+ } else {
+ pr_err("bus bw votes are not balanced\n");
+ }
+ }
+
+ if (changed) {
+ rc = msm_bus_scale_client_update_request(axi_bus->bus_handle,
+ on ? 1 : 0);
+ if (rc)
+ pr_err("bus scale client update failed, rc=%d\n", rc);
+ }
+
+ return rc;
+}
+
+static int dsi_ctrl_copy_and_pad_cmd(struct dsi_ctrl *dsi_ctrl,
+ const struct mipi_dsi_packet *packet,
+ u8 **buffer,
+ u32 *size)
+{
+ int rc = 0;
+ u8 *buf = NULL;
+ u32 len, i;
+
+ len = packet->size;
+ len += 0x3; len &= ~0x03; /* Align to 32 bits */
+
+ buf = devm_kzalloc(&dsi_ctrl->pdev->dev, len * sizeof(u8), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ for (i = 0; i < len; i++) {
+ if (i >= packet->size)
+ buf[i] = 0xFF;
+ else if (i < sizeof(packet->header))
+ buf[i] = packet->header[i];
+ else
+ buf[i] = packet->payload[i - sizeof(packet->header)];
+ }
+
+ if (packet->payload_length > 0)
+ buf[3] |= BIT(6);
+
+ buf[3] |= BIT(7);
+ *buffer = buf;
+ *size = len;
+
+ return rc;
+}
+
+static int dsi_message_tx(struct dsi_ctrl *dsi_ctrl,
+ const struct mipi_dsi_msg *msg,
+ u32 flags)
+{
+ int rc = 0;
+ struct mipi_dsi_packet packet;
+ struct dsi_ctrl_cmd_dma_fifo_info cmd;
+ u32 hw_flags = 0;
+ u32 length = 0;
+ u8 *buffer = NULL;
+
+ if (!(flags & DSI_CTRL_CMD_FIFO_STORE)) {
+ pr_err("Memory DMA is not supported, use FIFO\n");
+ goto error;
+ }
+
+ rc = mipi_dsi_create_packet(&packet, msg);
+ if (rc) {
+ pr_err("Failed to create message packet, rc=%d\n", rc);
+ goto error;
+ }
+
+ if (flags & DSI_CTRL_CMD_FIFO_STORE) {
+ rc = dsi_ctrl_copy_and_pad_cmd(dsi_ctrl,
+ &packet,
+ &buffer,
+ &length);
+ if (rc) {
+ pr_err("[%s] failed to copy message, rc=%d\n",
+ dsi_ctrl->name, rc);
+ goto error;
+ }
+ cmd.command = (u32 *)buffer;
+ cmd.size = length;
+ cmd.en_broadcast = (flags & DSI_CTRL_CMD_BROADCAST) ?
+ true : false;
+ cmd.is_master = (flags & DSI_CTRL_CMD_BROADCAST_MASTER) ?
+ true : false;
+ cmd.use_lpm = (msg->flags & MIPI_DSI_MSG_USE_LPM) ?
+ true : false;
+ }
+
+ hw_flags |= (flags & DSI_CTRL_CMD_DEFER_TRIGGER) ?
+ DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER : 0;
+
+ if (!(flags & DSI_CTRL_CMD_DEFER_TRIGGER))
+ reinit_completion(&dsi_ctrl->int_info.cmd_dma_done);
+
+ if (flags & DSI_CTRL_CMD_FIFO_STORE)
+ dsi_ctrl->hw.ops.kickoff_fifo_command(&dsi_ctrl->hw,
+ &cmd,
+ hw_flags);
+
+ if (!(flags & DSI_CTRL_CMD_DEFER_TRIGGER)) {
+ u32 retry = 10;
+ u32 status = 0;
+ u64 error = 0;
+ u32 mask = (DSI_CMD_MODE_DMA_DONE);
+
+ while ((status == 0) && (retry > 0)) {
+ udelay(1000);
+ status = dsi_ctrl->hw.ops.get_interrupt_status(
+ &dsi_ctrl->hw);
+ error = dsi_ctrl->hw.ops.get_error_status(
+ &dsi_ctrl->hw);
+ status &= mask;
+ retry--;
+ dsi_ctrl->hw.ops.clear_interrupt_status(&dsi_ctrl->hw,
+ status);
+ dsi_ctrl->hw.ops.clear_error_status(&dsi_ctrl->hw,
+ error);
+ }
+ pr_debug("INT STATUS = %x, retry = %d\n", status, retry);
+ if (retry == 0)
+ pr_err("[DSI_%d]Command transfer failed\n",
+ dsi_ctrl->index);
+
+ dsi_ctrl->hw.ops.reset_cmd_fifo(&dsi_ctrl->hw);
+ }
+error:
+ if (buffer)
+ devm_kfree(&dsi_ctrl->pdev->dev, buffer);
+ return rc;
+}
+
+static int dsi_set_max_return_size(struct dsi_ctrl *dsi_ctrl,
+ const struct mipi_dsi_msg *rx_msg,
+ u32 size)
+{
+ int rc = 0;
+ u8 tx[2] = { (u8)(size & 0xFF), (u8)(size >> 8) };
+ struct mipi_dsi_msg msg = {
+ .channel = rx_msg->channel,
+ .type = MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE,
+ .tx_len = 2,
+ .tx_buf = tx,
+ };
+
+ rc = dsi_message_tx(dsi_ctrl, &msg, 0x0);
+ if (rc)
+ pr_err("failed to send max return size packet, rc=%d\n", rc);
+
+ return rc;
+}
+
+static int dsi_message_rx(struct dsi_ctrl *dsi_ctrl,
+ const struct mipi_dsi_msg *msg,
+ u32 flags)
+{
+ int rc = 0;
+ u32 rd_pkt_size;
+ u32 total_read_len;
+ u32 bytes_read = 0, tot_bytes_read = 0;
+ u32 current_read_len;
+ bool short_resp = false;
+ bool read_done = false;
+
+ if (msg->rx_len <= 2) {
+ short_resp = true;
+ rd_pkt_size = msg->rx_len;
+ total_read_len = 4;
+ } else {
+ short_resp = false;
+ current_read_len = 10;
+ if (msg->rx_len < current_read_len)
+ rd_pkt_size = msg->rx_len;
+ else
+ rd_pkt_size = current_read_len;
+
+ total_read_len = current_read_len + 6;
+ }
+
+ while (!read_done) {
+ rc = dsi_set_max_return_size(dsi_ctrl, msg, rd_pkt_size);
+ if (rc) {
+ pr_err("Failed to set max return packet size, rc=%d\n",
+ rc);
+ goto error;
+ }
+
+ rc = dsi_message_tx(dsi_ctrl, msg, flags);
+ if (rc) {
+ pr_err("Message transmission failed, rc=%d\n", rc);
+ goto error;
+ }
+
+
+ tot_bytes_read += bytes_read;
+ if (short_resp)
+ read_done = true;
+ else if (msg->rx_len <= tot_bytes_read)
+ read_done = true;
+ }
+error:
+ return rc;
+}
+
+
+static int dsi_enable_ulps(struct dsi_ctrl *dsi_ctrl)
+{
+ int rc = 0;
+ u32 lanes;
+ u32 ulps_lanes;
+
+ if (dsi_ctrl->host_config.panel_mode == DSI_OP_CMD_MODE)
+ lanes = dsi_ctrl->host_config.common_config.data_lanes;
+
+ lanes |= DSI_CLOCK_LANE;
+ dsi_ctrl->hw.ops.ulps_request(&dsi_ctrl->hw, lanes);
+
+ ulps_lanes = dsi_ctrl->hw.ops.get_lanes_in_ulps(&dsi_ctrl->hw);
+
+ if ((lanes & ulps_lanes) != lanes) {
+ pr_err("Failed to enter ULPS, request=0x%x, actual=0x%x\n",
+ lanes, ulps_lanes);
+ rc = -EIO;
+ }
+
+ return rc;
+}
+
+static int dsi_disable_ulps(struct dsi_ctrl *dsi_ctrl)
+{
+ int rc = 0;
+ u32 ulps_lanes, lanes = 0;
+
+ if (dsi_ctrl->host_config.panel_mode == DSI_OP_CMD_MODE)
+ lanes = dsi_ctrl->host_config.common_config.data_lanes;
+
+ lanes |= DSI_CLOCK_LANE;
+ ulps_lanes = dsi_ctrl->hw.ops.get_lanes_in_ulps(&dsi_ctrl->hw);
+
+ if ((lanes & ulps_lanes) != lanes)
+ pr_err("Mismatch between lanes in ULPS\n");
+
+ lanes &= ulps_lanes;
+
+ dsi_ctrl->hw.ops.ulps_exit(&dsi_ctrl->hw, lanes);
+
+ /* 1 ms delay is recommended by specification */
+ udelay(1000);
+
+ dsi_ctrl->hw.ops.clear_ulps_request(&dsi_ctrl->hw, lanes);
+
+ ulps_lanes = dsi_ctrl->hw.ops.get_lanes_in_ulps(&dsi_ctrl->hw);
+ if (ulps_lanes & lanes) {
+ pr_err("Lanes (0x%x) stuck in ULPS\n", ulps_lanes);
+ rc = -EIO;
+ }
+
+ return rc;
+}
+
+static int dsi_ctrl_drv_state_init(struct dsi_ctrl *dsi_ctrl)
+{
+ int rc = 0;
+ bool splash_enabled = false;
+ struct dsi_ctrl_state_info *state = &dsi_ctrl->current_state;
+
+ if (!splash_enabled) {
+ state->power_state = DSI_CTRL_POWER_OFF;
+ state->cmd_engine_state = DSI_CTRL_ENGINE_OFF;
+ state->vid_engine_state = DSI_CTRL_ENGINE_OFF;
+ state->pwr_enabled = false;
+ state->core_clk_enabled = false;
+ state->link_clk_enabled = false;
+ state->ulps_enabled = false;
+ state->clamp_enabled = false;
+ state->clk_source_set = false;
+ }
+
+ return rc;
+}
+
+int dsi_ctrl_intr_deinit(struct dsi_ctrl *dsi_ctrl)
+{
+ struct dsi_ctrl_interrupts *ints = &dsi_ctrl->int_info;
+
+ devm_free_irq(&dsi_ctrl->pdev->dev, ints->irq, dsi_ctrl);
+
+ return 0;
+}
+
+static int dsi_ctrl_buffer_deinit(struct dsi_ctrl *dsi_ctrl)
+{
+ if (dsi_ctrl->tx_cmd_buf) {
+ msm_gem_put_iova(dsi_ctrl->tx_cmd_buf, 0);
+
+ msm_gem_free_object(dsi_ctrl->tx_cmd_buf);
+ dsi_ctrl->tx_cmd_buf = NULL;
+ }
+
+ return 0;
+}
+
+int dsi_ctrl_buffer_init(struct dsi_ctrl *dsi_ctrl)
+{
+ int rc = 0;
+ u32 iova = 0;
+
+ dsi_ctrl->tx_cmd_buf = msm_gem_new(dsi_ctrl->drm_dev,
+ SZ_4K,
+ MSM_BO_UNCACHED);
+
+ if (IS_ERR(dsi_ctrl->tx_cmd_buf)) {
+ rc = PTR_ERR(dsi_ctrl->tx_cmd_buf);
+ pr_err("failed to allocate gem, rc=%d\n", rc);
+ dsi_ctrl->tx_cmd_buf = NULL;
+ goto error;
+ }
+
+ dsi_ctrl->cmd_buffer_size = SZ_4K;
+
+ rc = msm_gem_get_iova(dsi_ctrl->tx_cmd_buf, 0, &iova);
+ if (rc) {
+ pr_err("failed to get iova, rc=%d\n", rc);
+ (void)dsi_ctrl_buffer_deinit(dsi_ctrl);
+ goto error;
+ }
+
+ if (iova & 0x07) {
+ pr_err("Tx command buffer is not 8 byte aligned\n");
+ rc = -ENOTSUPP;
+ (void)dsi_ctrl_buffer_deinit(dsi_ctrl);
+ goto error;
+ }
+error:
+ return rc;
+}
+
+static int dsi_enable_io_clamp(struct dsi_ctrl *dsi_ctrl, bool enable)
+{
+ bool en_ulps = dsi_ctrl->current_state.ulps_enabled;
+ u32 lanes = 0;
+
+ if (dsi_ctrl->host_config.panel_mode == DSI_OP_CMD_MODE)
+ lanes = dsi_ctrl->host_config.common_config.data_lanes;
+
+ lanes |= DSI_CLOCK_LANE;
+
+ if (enable)
+ dsi_ctrl->hw.ops.clamp_enable(&dsi_ctrl->hw, lanes, en_ulps);
+ else
+ dsi_ctrl->hw.ops.clamp_disable(&dsi_ctrl->hw, lanes, en_ulps);
+
+ return 0;
+}
+
+static int dsi_ctrl_dev_probe(struct platform_device *pdev)
+{
+ struct dsi_ctrl *dsi_ctrl;
+ struct dsi_ctrl_list_item *item;
+ const struct of_device_id *id;
+ enum dsi_ctrl_version version;
+ u32 index = 0;
+ int rc = 0;
+
+ id = of_match_node(msm_dsi_of_match, pdev->dev.of_node);
+ if (!id)
+ return -ENODEV;
+
+ version = *(enum dsi_ctrl_version *)id->data;
+
+ item = devm_kzalloc(&pdev->dev, sizeof(*item), GFP_KERNEL);
+ if (!item)
+ return -ENOMEM;
+
+ dsi_ctrl = devm_kzalloc(&pdev->dev, sizeof(*dsi_ctrl), GFP_KERNEL);
+ if (!dsi_ctrl)
+ return -ENOMEM;
+
+ rc = of_property_read_u32(pdev->dev.of_node, "cell-index", &index);
+ if (rc) {
+ pr_debug("cell index not set, default to 0\n");
+ index = 0;
+ }
+
+ dsi_ctrl->index = index;
+
+ dsi_ctrl->name = of_get_property(pdev->dev.of_node, "label", NULL);
+ if (!dsi_ctrl->name)
+ dsi_ctrl->name = DSI_CTRL_DEFAULT_LABEL;
+
+ rc = dsi_ctrl_init_regmap(pdev, dsi_ctrl);
+ if (rc) {
+ pr_err("Failed to parse register information, rc = %d\n", rc);
+ goto fail;
+ }
+
+ rc = dsi_ctrl_clocks_init(pdev, dsi_ctrl);
+ if (rc) {
+ pr_err("Failed to parse clock information, rc = %d\n", rc);
+ goto fail;
+ }
+
+ rc = dsi_ctrl_supplies_init(pdev, dsi_ctrl);
+ if (rc) {
+ pr_err("Failed to parse voltage supplies, rc = %d\n", rc);
+ goto fail_clks;
+ }
+
+ dsi_ctrl->version = version;
+ rc = dsi_catalog_ctrl_setup(&dsi_ctrl->hw, dsi_ctrl->version,
+ dsi_ctrl->index);
+ if (rc) {
+ pr_err("Catalog does not support version (%d)\n",
+ dsi_ctrl->version);
+ goto fail_supplies;
+ }
+
+ rc = dsi_ctrl_axi_bus_client_init(pdev, dsi_ctrl);
+ if (rc)
+ pr_err("failed to init axi bus client, rc = %d\n", rc);
+
+ item->ctrl = dsi_ctrl;
+
+ mutex_lock(&dsi_ctrl_list_lock);
+ list_add(&item->list, &dsi_ctrl_list);
+ mutex_unlock(&dsi_ctrl_list_lock);
+
+ mutex_init(&dsi_ctrl->ctrl_lock);
+
+ dsi_ctrl->pdev = pdev;
+ platform_set_drvdata(pdev, dsi_ctrl);
+
+ pr_debug("Probe successful for %s\n", dsi_ctrl->name);
+
+ return 0;
+
+fail_supplies:
+ (void)dsi_ctrl_supplies_deinit(dsi_ctrl);
+fail_clks:
+ (void)dsi_ctrl_clocks_deinit(dsi_ctrl);
+fail:
+ return rc;
+}
+
+static int dsi_ctrl_dev_remove(struct platform_device *pdev)
+{
+ int rc = 0;
+ struct dsi_ctrl *dsi_ctrl;
+ struct list_head *pos, *tmp;
+
+ dsi_ctrl = platform_get_drvdata(pdev);
+
+ mutex_lock(&dsi_ctrl_list_lock);
+ list_for_each_safe(pos, tmp, &dsi_ctrl_list) {
+ struct dsi_ctrl_list_item *n = list_entry(pos,
+ struct dsi_ctrl_list_item,
+ list);
+ if (n->ctrl == dsi_ctrl) {
+ list_del(&n->list);
+ break;
+ }
+ }
+ mutex_unlock(&dsi_ctrl_list_lock);
+
+ mutex_lock(&dsi_ctrl->ctrl_lock);
+ rc = dsi_ctrl_axi_bus_client_deinit(dsi_ctrl);
+ if (rc)
+ pr_err("failed to deinitialize axi bus client, rc = %d\n", rc);
+
+ rc = dsi_ctrl_supplies_deinit(dsi_ctrl);
+ if (rc)
+ pr_err("failed to deinitialize voltage supplies, rc=%d\n", rc);
+
+ rc = dsi_ctrl_clocks_deinit(dsi_ctrl);
+ if (rc)
+ pr_err("failed to deinitialize clocks, rc=%d\n", rc);
+
+ mutex_unlock(&dsi_ctrl->ctrl_lock);
+
+ mutex_destroy(&dsi_ctrl->ctrl_lock);
+ devm_kfree(&pdev->dev, dsi_ctrl);
+
+ platform_set_drvdata(pdev, NULL);
+ return 0;
+}
+
+static struct platform_driver dsi_ctrl_driver = {
+ .probe = dsi_ctrl_dev_probe,
+ .remove = dsi_ctrl_dev_remove,
+ .driver = {
+ .name = "drm_dsi_ctrl",
+ .of_match_table = msm_dsi_of_match,
+ },
+};
+
+/**
+ * dsi_ctrl_get() - get a dsi_ctrl handle from an of_node
+ * @of_node: of_node of the DSI controller.
+ *
+ * Gets the DSI controller handle for the corresponding of_node. The ref count
+ * is incremented to one and all subsequent gets will fail until the original
+ * clients calls a put.
+ *
+ * Return: DSI Controller handle.
+ */
+struct dsi_ctrl *dsi_ctrl_get(struct device_node *of_node)
+{
+ struct list_head *pos, *tmp;
+ struct dsi_ctrl *ctrl = NULL;
+
+ mutex_lock(&dsi_ctrl_list_lock);
+ list_for_each_safe(pos, tmp, &dsi_ctrl_list) {
+ struct dsi_ctrl_list_item *n;
+
+ n = list_entry(pos, struct dsi_ctrl_list_item, list);
+ if (n->ctrl->pdev->dev.of_node == of_node) {
+ ctrl = n->ctrl;
+ break;
+ }
+ }
+ mutex_unlock(&dsi_ctrl_list_lock);
+
+ if (!ctrl) {
+ pr_err("Device with of node not found\n");
+ ctrl = ERR_PTR(-EPROBE_DEFER);
+ return ctrl;
+ }
+
+ mutex_lock(&ctrl->ctrl_lock);
+ if (ctrl->refcount == 1) {
+ pr_err("[%s] Device in use\n", ctrl->name);
+ ctrl = ERR_PTR(-EBUSY);
+ } else {
+ ctrl->refcount++;
+ }
+ mutex_unlock(&ctrl->ctrl_lock);
+ return ctrl;
+}
+
+/**
+ * dsi_ctrl_put() - releases a dsi controller handle.
+ * @dsi_ctrl: DSI controller handle.
+ *
+ * Releases the DSI controller. Driver will clean up all resources and puts back
+ * the DSI controller into reset state.
+ */
+void dsi_ctrl_put(struct dsi_ctrl *dsi_ctrl)
+{
+ mutex_lock(&dsi_ctrl->ctrl_lock);
+
+ if (dsi_ctrl->refcount == 0)
+ pr_err("Unbalanced dsi_ctrl_put call\n");
+ else
+ dsi_ctrl->refcount--;
+
+ mutex_unlock(&dsi_ctrl->ctrl_lock);
+}
+
+/**
+ * dsi_ctrl_drv_init() - initialize dsi controller driver.
+ * @dsi_ctrl: DSI controller handle.
+ * @parent: Parent directory for debug fs.
+ *
+ * Initializes DSI controller driver. Driver should be initialized after
+ * dsi_ctrl_get() succeeds.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_drv_init(struct dsi_ctrl *dsi_ctrl, struct dentry *parent)
+{
+ int rc = 0;
+
+ if (!dsi_ctrl || !parent) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&dsi_ctrl->ctrl_lock);
+ rc = dsi_ctrl_drv_state_init(dsi_ctrl);
+ if (rc) {
+ pr_err("Failed to initialize driver state, rc=%d\n", rc);
+ goto error;
+ }
+
+ rc = dsi_ctrl_debugfs_init(dsi_ctrl, parent);
+ if (rc) {
+ pr_err("[DSI_%d] failed to init debug fs, rc=%d\n",
+ dsi_ctrl->index, rc);
+ goto error;
+ }
+
+error:
+ mutex_unlock(&dsi_ctrl->ctrl_lock);
+ return rc;
+}
+
+/**
+ * dsi_ctrl_drv_deinit() - de-initializes dsi controller driver
+ * @dsi_ctrl: DSI controller handle.
+ *
+ * Releases all resources acquired by dsi_ctrl_drv_init().
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_drv_deinit(struct dsi_ctrl *dsi_ctrl)
+{
+ int rc = 0;
+
+ if (!dsi_ctrl) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&dsi_ctrl->ctrl_lock);
+
+ rc = dsi_ctrl_debugfs_deinit(dsi_ctrl);
+ if (rc)
+ pr_err("failed to release debugfs root, rc=%d\n", rc);
+
+ rc = dsi_ctrl_buffer_deinit(dsi_ctrl);
+ if (rc)
+ pr_err("Failed to free cmd buffers, rc=%d\n", rc);
+
+ mutex_unlock(&dsi_ctrl->ctrl_lock);
+ return rc;
+}
+
+/**
+ * dsi_ctrl_phy_sw_reset() - perform a PHY software reset
+ * @dsi_ctrl: DSI controller handle.
+ *
+ * Performs a PHY software reset on the DSI controller. Reset should be done
+ * when the controller power state is DSI_CTRL_POWER_CORE_CLK_ON and the PHY is
+ * not enabled.
+ *
+ * This function will fail if driver is in any other state.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_phy_sw_reset(struct dsi_ctrl *dsi_ctrl)
+{
+ int rc = 0;
+
+ if (!dsi_ctrl) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&dsi_ctrl->ctrl_lock);
+ rc = dsi_ctrl_check_state(dsi_ctrl, DSI_CTRL_OP_PHY_SW_RESET, 0x0);
+ if (rc) {
+ pr_err("[DSI_%d] Controller state check failed, rc=%d\n",
+ dsi_ctrl->index, rc);
+ goto error;
+ }
+
+ dsi_ctrl->hw.ops.phy_sw_reset(&dsi_ctrl->hw);
+
+ pr_debug("[DSI_%d] PHY soft reset done\n", dsi_ctrl->index);
+ dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_PHY_SW_RESET, 0x0);
+error:
+ mutex_unlock(&dsi_ctrl->ctrl_lock);
+ return rc;
+}
+
+/**
+ * dsi_ctrl_seamless_timing_update() - update only controller timing
+ * @dsi_ctrl: DSI controller handle.
+ * @timing: New DSI timing info
+ *
+ * Updates host timing values to conduct a seamless transition to new timing
+ * For example, to update the porch values in a dynamic fps switch.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_async_timing_update(struct dsi_ctrl *dsi_ctrl,
+ struct dsi_mode_info *timing)
+{
+ struct dsi_mode_info *host_mode;
+ int rc = 0;
+
+ if (!dsi_ctrl || !timing) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&dsi_ctrl->ctrl_lock);
+
+ rc = dsi_ctrl_check_state(dsi_ctrl, DSI_CTRL_OP_ASYNC_TIMING,
+ DSI_CTRL_ENGINE_ON);
+ if (rc) {
+ pr_err("[DSI_%d] Controller state check failed, rc=%d\n",
+ dsi_ctrl->index, rc);
+ goto exit;
+ }
+
+ host_mode = &dsi_ctrl->host_config.video_timing;
+ memcpy(host_mode, timing, sizeof(*host_mode));
+
+ dsi_ctrl->hw.ops.set_video_timing(&dsi_ctrl->hw, host_mode);
+
+exit:
+ mutex_unlock(&dsi_ctrl->ctrl_lock);
+ return rc;
+}
+
+/**
+ * dsi_ctrl_host_init() - Initialize DSI host hardware.
+ * @dsi_ctrl: DSI controller handle.
+ *
+ * Initializes DSI controller hardware with host configuration provided by
+ * dsi_ctrl_update_host_config(). Initialization can be performed only during
+ * DSI_CTRL_POWER_CORE_CLK_ON state and after the PHY SW reset has been
+ * performed.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_host_init(struct dsi_ctrl *dsi_ctrl)
+{
+ int rc = 0;
+
+ if (!dsi_ctrl) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&dsi_ctrl->ctrl_lock);
+ rc = dsi_ctrl_check_state(dsi_ctrl, DSI_CTRL_OP_HOST_INIT, 0x1);
+ if (rc) {
+ pr_err("[DSI_%d] Controller state check failed, rc=%d\n",
+ dsi_ctrl->index, rc);
+ goto error;
+ }
+
+ dsi_ctrl->hw.ops.setup_lane_map(&dsi_ctrl->hw,
+ &dsi_ctrl->host_config.lane_map);
+
+ dsi_ctrl->hw.ops.host_setup(&dsi_ctrl->hw,
+ &dsi_ctrl->host_config.common_config);
+
+ if (dsi_ctrl->host_config.panel_mode == DSI_OP_CMD_MODE) {
+ dsi_ctrl->hw.ops.cmd_engine_setup(&dsi_ctrl->hw,
+ &dsi_ctrl->host_config.common_config,
+ &dsi_ctrl->host_config.u.cmd_engine);
+
+ dsi_ctrl->hw.ops.setup_cmd_stream(&dsi_ctrl->hw,
+ dsi_ctrl->host_config.video_timing.h_active,
+ dsi_ctrl->host_config.video_timing.h_active * 3,
+ dsi_ctrl->host_config.video_timing.v_active,
+ 0x0);
+ } else {
+ dsi_ctrl->hw.ops.video_engine_setup(&dsi_ctrl->hw,
+ &dsi_ctrl->host_config.common_config,
+ &dsi_ctrl->host_config.u.video_engine);
+ dsi_ctrl->hw.ops.set_video_timing(&dsi_ctrl->hw,
+ &dsi_ctrl->host_config.video_timing);
+ }
+
+
+
+ dsi_ctrl->hw.ops.enable_status_interrupts(&dsi_ctrl->hw, 0x0);
+ dsi_ctrl->hw.ops.enable_error_interrupts(&dsi_ctrl->hw, 0x0);
+
+ /* Perform a soft reset before enabling dsi controller */
+ dsi_ctrl->hw.ops.soft_reset(&dsi_ctrl->hw);
+ pr_debug("[DSI_%d]Host initialization complete\n", dsi_ctrl->index);
+ dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_HOST_INIT, 0x1);
+error:
+ mutex_unlock(&dsi_ctrl->ctrl_lock);
+ return rc;
+}
+
+/**
+ * dsi_ctrl_host_deinit() - De-Initialize DSI host hardware.
+ * @dsi_ctrl: DSI controller handle.
+ *
+ * De-initializes DSI controller hardware. It can be performed only during
+ * DSI_CTRL_POWER_CORE_CLK_ON state after LINK clocks have been turned off.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_host_deinit(struct dsi_ctrl *dsi_ctrl)
+{
+ int rc = 0;
+
+ if (!dsi_ctrl) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&dsi_ctrl->ctrl_lock);
+
+ rc = dsi_ctrl_check_state(dsi_ctrl, DSI_CTRL_OP_HOST_INIT, 0x0);
+ if (rc) {
+ pr_err("[DSI_%d] Controller state check failed, rc=%d\n",
+ dsi_ctrl->index, rc);
+ pr_err("driver state check failed, rc=%d\n", rc);
+ goto error;
+ }
+
+ pr_debug("[DSI_%d] Host deinitization complete\n", dsi_ctrl->index);
+ dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_HOST_INIT, 0x0);
+error:
+ mutex_unlock(&dsi_ctrl->ctrl_lock);
+ return rc;
+}
+
+/**
+ * dsi_ctrl_update_host_config() - update dsi host configuration
+ * @dsi_ctrl: DSI controller handle.
+ * @config: DSI host configuration.
+ * @flags: dsi_mode_flags modifying the behavior
+ *
+ * Updates driver with new Host configuration to use for host initialization.
+ * This function call will only update the software context. The stored
+ * configuration information will be used when the host is initialized.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_update_host_config(struct dsi_ctrl *ctrl,
+ struct dsi_host_config *config,
+ int flags)
+{
+ int rc = 0;
+
+ if (!ctrl || !config) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ctrl->ctrl_lock);
+
+ rc = dsi_ctrl_validate_panel_info(ctrl, config);
+ if (rc) {
+ pr_err("panel validation failed, rc=%d\n", rc);
+ goto error;
+ }
+
+ if (!(flags & DSI_MODE_FLAG_SEAMLESS)) {
+ rc = dsi_ctrl_update_link_freqs(ctrl, config);
+ if (rc) {
+ pr_err("[%s] failed to update link frequencies, rc=%d\n",
+ ctrl->name, rc);
+ goto error;
+ }
+ }
+
+ pr_debug("[DSI_%d]Host config updated\n", ctrl->index);
+ memcpy(&ctrl->host_config, config, sizeof(ctrl->host_config));
+error:
+ mutex_unlock(&ctrl->ctrl_lock);
+ return rc;
+}
+
+/**
+ * dsi_ctrl_validate_timing() - validate a video timing configuration
+ * @dsi_ctrl: DSI controller handle.
+ * @timing: Pointer to timing data.
+ *
+ * Driver will validate if the timing configuration is supported on the
+ * controller hardware.
+ *
+ * Return: error code if timing is not supported.
+ */
+int dsi_ctrl_validate_timing(struct dsi_ctrl *dsi_ctrl,
+ struct dsi_mode_info *mode)
+{
+ int rc = 0;
+
+ if (!dsi_ctrl || !mode) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&dsi_ctrl->ctrl_lock);
+ mutex_unlock(&dsi_ctrl->ctrl_lock);
+
+ return rc;
+}
+
+/**
+ * dsi_ctrl_cmd_transfer() - Transfer commands on DSI link
+ * @dsi_ctrl: DSI controller handle.
+ * @msg: Message to transfer on DSI link.
+ * @flags: Modifiers for message transfer.
+ *
+ * Command transfer can be done only when command engine is enabled. The
+ * transfer API will block until either the command transfer finishes or
+ * the timeout value is reached. If the trigger is deferred, it will return
+ * without triggering the transfer. Command parameters are programmed to
+ * hardware.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_cmd_transfer(struct dsi_ctrl *dsi_ctrl,
+ const struct mipi_dsi_msg *msg,
+ u32 flags)
+{
+ int rc = 0;
+
+ if (!dsi_ctrl || !msg) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&dsi_ctrl->ctrl_lock);
+
+ rc = dsi_ctrl_check_state(dsi_ctrl, DSI_CTRL_OP_CMD_TX, 0x0);
+ if (rc) {
+ pr_err("[DSI_%d] Controller state check failed, rc=%d\n",
+ dsi_ctrl->index, rc);
+ goto error;
+ }
+
+ rc = dsi_ctrl_vote_for_bandwidth(dsi_ctrl, true);
+ if (rc) {
+ pr_err("bandwidth request failed, rc=%d\n", rc);
+ goto error;
+ }
+
+ if (flags & DSI_CTRL_CMD_READ) {
+ rc = dsi_message_rx(dsi_ctrl, msg, flags);
+ if (rc)
+ pr_err("read message failed, rc=%d\n", rc);
+ } else {
+ rc = dsi_message_tx(dsi_ctrl, msg, flags);
+ if (rc)
+ pr_err("command msg transfer failed, rc = %d\n", rc);
+ }
+
+ dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_CMD_TX, 0x0);
+
+ (void)dsi_ctrl_vote_for_bandwidth(dsi_ctrl, false);
+error:
+ mutex_unlock(&dsi_ctrl->ctrl_lock);
+ return rc;
+}
+
+/**
+ * dsi_ctrl_cmd_tx_trigger() - Trigger a deferred command.
+ * @dsi_ctrl: DSI controller handle.
+ * @flags: Modifiers.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_cmd_tx_trigger(struct dsi_ctrl *dsi_ctrl, u32 flags)
+{
+ int rc = 0;
+ u32 status = 0;
+ u32 mask = (DSI_CMD_MODE_DMA_DONE);
+
+ if (!dsi_ctrl) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&dsi_ctrl->ctrl_lock);
+
+ reinit_completion(&dsi_ctrl->int_info.cmd_dma_done);
+
+ dsi_ctrl->hw.ops.trigger_command_dma(&dsi_ctrl->hw);
+
+ if ((flags & DSI_CTRL_CMD_BROADCAST) &&
+ (flags & DSI_CTRL_CMD_BROADCAST_MASTER)) {
+ u32 retry = 10;
+
+ while ((status == 0) && (retry > 0)) {
+ udelay(1000);
+ status = dsi_ctrl->hw.ops.get_interrupt_status(
+ &dsi_ctrl->hw);
+ status &= mask;
+ retry--;
+ dsi_ctrl->hw.ops.clear_interrupt_status(&dsi_ctrl->hw,
+ status);
+ }
+ pr_debug("INT STATUS = %x, retry = %d\n", status, retry);
+ if (retry == 0)
+ pr_err("[DSI_%d]Command transfer failed\n",
+ dsi_ctrl->index);
+ }
+
+ mutex_unlock(&dsi_ctrl->ctrl_lock);
+ return rc;
+}
+
+/**
+ * dsi_ctrl_set_power_state() - set power state for dsi controller
+ * @dsi_ctrl: DSI controller handle.
+ * @state: Power state.
+ *
+ * Set power state for DSI controller. Power state can be changed only when
+ * Controller, Video and Command engines are turned off.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_set_power_state(struct dsi_ctrl *dsi_ctrl,
+ enum dsi_power_state state)
+{
+ int rc = 0;
+ bool core_clk_enable = false;
+ bool link_clk_enable = false;
+ bool reg_enable = false;
+ struct dsi_ctrl_state_info *drv_state;
+
+ if (!dsi_ctrl || (state >= DSI_CTRL_POWER_MAX)) {
+ pr_err("Invalid Params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&dsi_ctrl->ctrl_lock);
+
+ rc = dsi_ctrl_check_state(dsi_ctrl, DSI_CTRL_OP_POWER_STATE_CHANGE,
+ state);
+ if (rc) {
+ pr_err("[DSI_%d] Controller state check failed, rc=%d\n",
+ dsi_ctrl->index, rc);
+ goto error;
+ }
+
+ if (state == DSI_CTRL_POWER_LINK_CLK_ON)
+ reg_enable = core_clk_enable = link_clk_enable = true;
+ else if (state == DSI_CTRL_POWER_CORE_CLK_ON)
+ reg_enable = core_clk_enable = true;
+ else if (state == DSI_CTRL_POWER_VREG_ON)
+ reg_enable = true;
+
+ drv_state = &dsi_ctrl->current_state;
+
+ if ((reg_enable) && (reg_enable != drv_state->pwr_enabled)) {
+ rc = dsi_ctrl_enable_supplies(dsi_ctrl, true);
+ if (rc) {
+ pr_err("[%d]failed to enable voltage supplies, rc=%d\n",
+ dsi_ctrl->index, rc);
+ goto error;
+ }
+ }
+
+ if ((core_clk_enable) &&
+ (core_clk_enable != drv_state->core_clk_enabled)) {
+ rc = dsi_clk_enable_core_clks(&dsi_ctrl->clk_info.core_clks,
+ true);
+ if (rc) {
+ pr_err("[%d] failed to enable core clocks, rc=%d\n",
+ dsi_ctrl->index, rc);
+ goto error;
+ }
+ }
+
+ if (link_clk_enable != drv_state->link_clk_enabled) {
+ rc = dsi_clk_enable_link_clks(&dsi_ctrl->clk_info.link_clks,
+ link_clk_enable);
+ if (rc) {
+ pr_err("[%d] failed to enable link clocks, rc=%d\n",
+ dsi_ctrl->index, rc);
+ goto error;
+ }
+ }
+
+ if ((!core_clk_enable) &&
+ (core_clk_enable != drv_state->core_clk_enabled)) {
+ rc = dsi_clk_enable_core_clks(&dsi_ctrl->clk_info.core_clks,
+ false);
+ if (rc) {
+ pr_err("[%d] failed to disable core clocks, rc=%d\n",
+ dsi_ctrl->index, rc);
+ goto error;
+ }
+ }
+
+ if ((!reg_enable) && (reg_enable != drv_state->pwr_enabled)) {
+ rc = dsi_ctrl_enable_supplies(dsi_ctrl, false);
+ if (rc) {
+ pr_err("[%d]failed to disable vreg supplies, rc=%d\n",
+ dsi_ctrl->index, rc);
+ goto error;
+ }
+ }
+
+ pr_debug("[DSI_%d] Power state updated to %d\n", dsi_ctrl->index,
+ state);
+ dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_POWER_STATE_CHANGE, state);
+error:
+ mutex_unlock(&dsi_ctrl->ctrl_lock);
+ return rc;
+}
+
+/**
+ * dsi_ctrl_set_tpg_state() - enable/disable test pattern on the controller
+ * @dsi_ctrl: DSI controller handle.
+ * @on: enable/disable test pattern.
+ *
+ * Test pattern can be enabled only after Video engine (for video mode panels)
+ * or command engine (for cmd mode panels) is enabled.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_set_tpg_state(struct dsi_ctrl *dsi_ctrl, bool on)
+{
+ int rc = 0;
+
+ if (!dsi_ctrl) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&dsi_ctrl->ctrl_lock);
+
+ rc = dsi_ctrl_check_state(dsi_ctrl, DSI_CTRL_OP_TPG, on);
+ if (rc) {
+ pr_err("[DSI_%d] Controller state check failed, rc=%d\n",
+ dsi_ctrl->index, rc);
+ goto error;
+ }
+
+ if (on) {
+ if (dsi_ctrl->host_config.panel_mode == DSI_OP_VIDEO_MODE) {
+ dsi_ctrl->hw.ops.video_test_pattern_setup(&dsi_ctrl->hw,
+ DSI_TEST_PATTERN_INC,
+ 0xFFFF);
+ } else {
+ dsi_ctrl->hw.ops.cmd_test_pattern_setup(
+ &dsi_ctrl->hw,
+ DSI_TEST_PATTERN_INC,
+ 0xFFFF,
+ 0x0);
+ }
+ }
+ dsi_ctrl->hw.ops.test_pattern_enable(&dsi_ctrl->hw, on);
+
+ pr_debug("[DSI_%d]Set test pattern state=%d\n", dsi_ctrl->index, on);
+ dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_TPG, on);
+error:
+ mutex_unlock(&dsi_ctrl->ctrl_lock);
+ return rc;
+}
+
+/**
+ * dsi_ctrl_set_host_engine_state() - set host engine state
+ * @dsi_ctrl: DSI Controller handle.
+ * @state: Engine state.
+ *
+ * Host engine state can be modified only when DSI controller power state is
+ * set to DSI_CTRL_POWER_LINK_CLK_ON and cmd, video engines are disabled.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_set_host_engine_state(struct dsi_ctrl *dsi_ctrl,
+ enum dsi_engine_state state)
+{
+ int rc = 0;
+
+ if (!dsi_ctrl || (state >= DSI_CTRL_ENGINE_MAX)) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&dsi_ctrl->ctrl_lock);
+
+ rc = dsi_ctrl_check_state(dsi_ctrl, DSI_CTRL_OP_HOST_ENGINE, state);
+ if (rc) {
+ pr_err("[DSI_%d] Controller state check failed, rc=%d\n",
+ dsi_ctrl->index, rc);
+ goto error;
+ }
+
+ if (state == DSI_CTRL_ENGINE_ON)
+ dsi_ctrl->hw.ops.ctrl_en(&dsi_ctrl->hw, true);
+ else
+ dsi_ctrl->hw.ops.ctrl_en(&dsi_ctrl->hw, false);
+
+ pr_debug("[DSI_%d] Set host engine state = %d\n", dsi_ctrl->index,
+ state);
+ dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_HOST_ENGINE, state);
+error:
+ mutex_unlock(&dsi_ctrl->ctrl_lock);
+ return rc;
+}
+
+/**
+ * dsi_ctrl_set_cmd_engine_state() - set command engine state
+ * @dsi_ctrl: DSI Controller handle.
+ * @state: Engine state.
+ *
+ * Command engine state can be modified only when DSI controller power state is
+ * set to DSI_CTRL_POWER_LINK_CLK_ON.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_set_cmd_engine_state(struct dsi_ctrl *dsi_ctrl,
+ enum dsi_engine_state state)
+{
+ int rc = 0;
+
+ if (!dsi_ctrl || (state >= DSI_CTRL_ENGINE_MAX)) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&dsi_ctrl->ctrl_lock);
+
+ rc = dsi_ctrl_check_state(dsi_ctrl, DSI_CTRL_OP_CMD_ENGINE, state);
+ if (rc) {
+ pr_err("[DSI_%d] Controller state check failed, rc=%d\n",
+ dsi_ctrl->index, rc);
+ goto error;
+ }
+
+ if (state == DSI_CTRL_ENGINE_ON)
+ dsi_ctrl->hw.ops.cmd_engine_en(&dsi_ctrl->hw, true);
+ else
+ dsi_ctrl->hw.ops.cmd_engine_en(&dsi_ctrl->hw, false);
+
+ pr_debug("[DSI_%d] Set cmd engine state = %d\n", dsi_ctrl->index,
+ state);
+ dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_CMD_ENGINE, state);
+error:
+ mutex_unlock(&dsi_ctrl->ctrl_lock);
+ return rc;
+}
+
+/**
+ * dsi_ctrl_set_vid_engine_state() - set video engine state
+ * @dsi_ctrl: DSI Controller handle.
+ * @state: Engine state.
+ *
+ * Video engine state can be modified only when DSI controller power state is
+ * set to DSI_CTRL_POWER_LINK_CLK_ON.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_set_vid_engine_state(struct dsi_ctrl *dsi_ctrl,
+ enum dsi_engine_state state)
+{
+ int rc = 0;
+ bool on;
+
+ if (!dsi_ctrl || (state >= DSI_CTRL_ENGINE_MAX)) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&dsi_ctrl->ctrl_lock);
+
+ rc = dsi_ctrl_check_state(dsi_ctrl, DSI_CTRL_OP_VID_ENGINE, state);
+ if (rc) {
+ pr_err("[DSI_%d] Controller state check failed, rc=%d\n",
+ dsi_ctrl->index, rc);
+ goto error;
+ }
+
+ on = (state == DSI_CTRL_ENGINE_ON) ? true : false;
+ dsi_ctrl->hw.ops.video_engine_en(&dsi_ctrl->hw, on);
+
+ /* perform a reset when turning off video engine */
+ if (!on)
+ dsi_ctrl->hw.ops.soft_reset(&dsi_ctrl->hw);
+
+ pr_debug("[DSI_%d] Set video engine state = %d\n", dsi_ctrl->index,
+ state);
+ dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_VID_ENGINE, state);
+error:
+ mutex_unlock(&dsi_ctrl->ctrl_lock);
+ return rc;
+}
+
+/**
+ * dsi_ctrl_set_ulps() - set ULPS state for DSI lanes.
+ * @dsi_ctrl: DSI controller handle.
+ * @enable: enable/disable ULPS.
+ *
+ * ULPS can be enabled/disabled after DSI host engine is turned on.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_set_ulps(struct dsi_ctrl *dsi_ctrl, bool enable)
+{
+ int rc = 0;
+
+ if (!dsi_ctrl) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&dsi_ctrl->ctrl_lock);
+
+ rc = dsi_ctrl_check_state(dsi_ctrl, DSI_CTRL_OP_ULPS_TOGGLE, enable);
+ if (rc) {
+ pr_err("[DSI_%d] Controller state check failed, rc=%d\n",
+ dsi_ctrl->index, rc);
+ goto error;
+ }
+
+ if (enable)
+ rc = dsi_enable_ulps(dsi_ctrl);
+ else
+ rc = dsi_disable_ulps(dsi_ctrl);
+
+ if (rc) {
+ pr_err("[DSI_%d] Ulps state change(%d) failed, rc=%d\n",
+ dsi_ctrl->index, enable, rc);
+ goto error;
+ }
+
+ pr_debug("[DSI_%d] ULPS state = %d\n", dsi_ctrl->index, enable);
+ dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_ULPS_TOGGLE, enable);
+error:
+ mutex_unlock(&dsi_ctrl->ctrl_lock);
+ return rc;
+}
+
+/**
+ * dsi_ctrl_set_clamp_state() - set clamp state for DSI phy
+ * @dsi_ctrl: DSI controller handle.
+ * @enable: enable/disable clamping.
+ *
+ * Clamps can be enabled/disabled while DSI contoller is still turned on.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_set_clamp_state(struct dsi_ctrl *dsi_ctrl, bool enable)
+{
+ int rc = 0;
+
+ if (!dsi_ctrl) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&dsi_ctrl->ctrl_lock);
+
+ rc = dsi_ctrl_check_state(dsi_ctrl, DSI_CTRL_OP_CLAMP_TOGGLE, enable);
+ if (rc) {
+ pr_err("[DSI_%d] Controller state check failed, rc=%d\n",
+ dsi_ctrl->index, rc);
+ goto error;
+ }
+
+ rc = dsi_enable_io_clamp(dsi_ctrl, enable);
+ if (rc) {
+ pr_err("[DSI_%d] Failed to enable IO clamp\n", dsi_ctrl->index);
+ goto error;
+ }
+
+ pr_debug("[DSI_%d] Clamp state = %d\n", dsi_ctrl->index, enable);
+ dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_CLAMP_TOGGLE, enable);
+error:
+ mutex_unlock(&dsi_ctrl->ctrl_lock);
+ return rc;
+}
+
+/**
+ * dsi_ctrl_set_clock_source() - set clock source fpr dsi link clocks
+ * @dsi_ctrl: DSI controller handle.
+ * @source_clks: Source clocks for DSI link clocks.
+ *
+ * Clock source should be changed while link clocks are disabled.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_set_clock_source(struct dsi_ctrl *dsi_ctrl,
+ struct dsi_clk_link_set *source_clks)
+{
+ int rc = 0;
+ u32 op_state = 0;
+
+ if (!dsi_ctrl || !source_clks) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&dsi_ctrl->ctrl_lock);
+
+ if (source_clks->pixel_clk && source_clks->byte_clk)
+ op_state = 1;
+
+ rc = dsi_ctrl_check_state(dsi_ctrl, DSI_CTRL_OP_SET_CLK_SOURCE,
+ op_state);
+ if (rc) {
+ pr_err("[DSI_%d] Controller state check failed, rc=%d\n",
+ dsi_ctrl->index, rc);
+ goto error;
+ }
+
+ rc = dsi_clk_update_parent(source_clks, &dsi_ctrl->clk_info.rcg_clks);
+ if (rc) {
+ pr_err("[DSI_%d]Failed to update link clk parent, rc=%d\n",
+ dsi_ctrl->index, rc);
+ (void)dsi_clk_update_parent(&dsi_ctrl->clk_info.pll_op_clks,
+ &dsi_ctrl->clk_info.rcg_clks);
+ goto error;
+ }
+
+ dsi_ctrl->clk_info.pll_op_clks.byte_clk = source_clks->byte_clk;
+ dsi_ctrl->clk_info.pll_op_clks.pixel_clk = source_clks->pixel_clk;
+
+ pr_debug("[DSI_%d] Source clocks are updated\n", dsi_ctrl->index);
+
+ dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_SET_CLK_SOURCE, op_state);
+
+error:
+ mutex_unlock(&dsi_ctrl->ctrl_lock);
+ return rc;
+}
+
+/**
+ * dsi_ctrl_drv_register() - register platform driver for dsi controller
+ */
+void dsi_ctrl_drv_register(void)
+{
+ platform_driver_register(&dsi_ctrl_driver);
+}
+
+/**
+ * dsi_ctrl_drv_unregister() - unregister platform driver
+ */
+void dsi_ctrl_drv_unregister(void)
+{
+ platform_driver_unregister(&dsi_ctrl_driver);
+}
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
new file mode 100644
index 000000000000..993a35cbf84a
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
@@ -0,0 +1,489 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DSI_CTRL_H_
+#define _DSI_CTRL_H_
+
+#include <linux/debugfs.h>
+
+#include "dsi_defs.h"
+#include "dsi_ctrl_hw.h"
+#include "dsi_clk_pwr.h"
+#include "drm_mipi_dsi.h"
+
+/*
+ * DSI Command transfer modifiers
+ * @DSI_CTRL_CMD_READ: The current transfer involves reading data.
+ * @DSI_CTRL_CMD_BROADCAST: The current transfer needs to be done in
+ * broadcast mode to multiple slaves.
+ * @DSI_CTRL_CMD_BROADCAST_MASTER: This controller is the master and the slaves
+ * sync to this trigger.
+ * @DSI_CTRL_CMD_DEFER_TRIGGER: Defer the command trigger to later.
+ * @DSI_CTRL_CMD_FIFO_STORE: Use FIFO for command transfer in place of
+ * reading data from memory.
+ */
+#define DSI_CTRL_CMD_READ 0x1
+#define DSI_CTRL_CMD_BROADCAST 0x2
+#define DSI_CTRL_CMD_BROADCAST_MASTER 0x4
+#define DSI_CTRL_CMD_DEFER_TRIGGER 0x8
+#define DSI_CTRL_CMD_FIFO_STORE 0x10
+
+/**
+ * enum dsi_power_state - defines power states for dsi controller.
+ * @DSI_CTRL_POWER_OFF: DSI controller is powered down.
+ * @DSI_CTRL_POWER_VREG_ON: Digital and analog supplies for DSI controller
+ * are powered on.
+ * @DSI_CTRL_POWER_CORE_CLK_ON: DSI core clocks for register access are enabled.
+ * @DSI_CTRL_POWER_LINK_CLK_ON: DSI link clocks for link transfer are enabled.
+ * @DSI_CTRL_POWER_MAX: Maximum value.
+ */
+enum dsi_power_state {
+ DSI_CTRL_POWER_OFF = 0,
+ DSI_CTRL_POWER_VREG_ON,
+ DSI_CTRL_POWER_CORE_CLK_ON,
+ DSI_CTRL_POWER_LINK_CLK_ON,
+ DSI_CTRL_POWER_MAX,
+};
+
+/**
+ * enum dsi_engine_state - define engine status for dsi controller.
+ * @DSI_CTRL_ENGINE_OFF: Engine is turned off.
+ * @DSI_CTRL_ENGINE_ON: Engine is turned on.
+ * @DSI_CTRL_ENGINE_MAX: Maximum value.
+ */
+enum dsi_engine_state {
+ DSI_CTRL_ENGINE_OFF = 0,
+ DSI_CTRL_ENGINE_ON,
+ DSI_CTRL_ENGINE_MAX,
+};
+
+/**
+ * struct dsi_ctrl_power_info - digital and analog power supplies for dsi host
+ * @digital: Digital power supply required to turn on DSI controller hardware.
+ * @host_pwr: Analog power supplies required to turn on DSI controller hardware.
+ * Even though DSI controller it self does not require an analog
+ * power supply, supplies required for PLL can be defined here to
+ * allow proper control over these supplies.
+ */
+struct dsi_ctrl_power_info {
+ struct dsi_regulator_info digital;
+ struct dsi_regulator_info host_pwr;
+};
+
+/**
+ * struct dsi_ctrl_clk_info - clock information for DSI controller
+ * @core_clks: Core clocks needed to access DSI controller registers.
+ * @link_clks: Link clocks required to transmit data over DSI link.
+ * @rcg_clks: Root clock generation clocks generated in MMSS_CC. The
+ * output of the PLL is set as parent for these root
+ * clocks. These clocks are specific to controller
+ * instance.
+ * @mux_clks: Mux clocks used for Dynamic refresh feature.
+ * @ext_clks: External byte/pixel clocks from the MMSS block. These
+ * clocks are set as parent to rcg clocks.
+ * @pll_op_clks: TODO:
+ * @shadow_clks: TODO:
+ */
+struct dsi_ctrl_clk_info {
+ /* Clocks parsed from DT */
+ struct dsi_core_clk_info core_clks;
+ struct dsi_link_clk_info link_clks;
+ struct dsi_clk_link_set rcg_clks;
+
+ /* Clocks set by DSI Manager */
+ struct dsi_clk_link_set mux_clks;
+ struct dsi_clk_link_set ext_clks;
+ struct dsi_clk_link_set pll_op_clks;
+ struct dsi_clk_link_set shadow_clks;
+};
+
+/**
+ * struct dsi_ctrl_bus_scale_info - Bus scale info for msm-bus bandwidth voting
+ * @bus_scale_table: Bus scale voting usecases.
+ * @bus_handle: Handle used for voting bandwidth.
+ * @refcount: reference count.
+ */
+struct dsi_ctrl_bus_scale_info {
+ struct msm_bus_scale_pdata *bus_scale_table;
+ u32 bus_handle;
+ u32 refcount;
+};
+
+/**
+ * struct dsi_ctrl_state_info - current driver state information
+ * @power_state: Controller power state.
+ * @cmd_engine_state: Status of DSI command engine.
+ * @vid_engine_state: Status of DSI video engine.
+ * @controller_state: Status of DSI Controller engine.
+ * @pwr_enabled: Set to true, if voltage supplies are enabled.
+ * @core_clk_enabled: Set to true, if core clocks are enabled.
+ * @lin_clk_enabled: Set to true, if link clocks are enabled.
+ * @ulps_enabled: Set to true, if lanes are in ULPS state.
+ * @clamp_enabled: Set to true, if PHY output is clamped.
+ * @clk_source_set: Set to true, if parent is set for DSI link clocks.
+ */
+struct dsi_ctrl_state_info {
+ enum dsi_power_state power_state;
+ enum dsi_engine_state cmd_engine_state;
+ enum dsi_engine_state vid_engine_state;
+ enum dsi_engine_state controller_state;
+ bool pwr_enabled;
+ bool core_clk_enabled;
+ bool link_clk_enabled;
+ bool ulps_enabled;
+ bool clamp_enabled;
+ bool clk_source_set;
+ bool host_initialized;
+ bool tpg_enabled;
+};
+
+/**
+ * struct dsi_ctrl_interrupts - define interrupt information
+ * @irq: IRQ id for the DSI controller.
+ * @intr_lock: Spinlock to protect access to interrupt registers.
+ * @interrupt_status: Status interrupts which need to be serviced.
+ * @error_status: Error interurpts which need to be serviced.
+ * @interrupts_enabled: Status interrupts which are enabled.
+ * @errors_enabled: Error interrupts which are enabled.
+ * @cmd_dma_done: Completion signal for DSI_CMD_MODE_DMA_DONE interrupt
+ * @vid_frame_done: Completion signal for DSI_VIDEO_MODE_FRAME_DONE int.
+ * @cmd_frame_done: Completion signal for DSI_CMD_FRAME_DONE interrupt.
+ * @interrupt_done_work: Work item for servicing status interrupts.
+ * @error_status_work: Work item for servicing error interrupts.
+ */
+struct dsi_ctrl_interrupts {
+ u32 irq;
+ spinlock_t intr_lock; /* protects access to interrupt registers */
+ u32 interrupt_status;
+ u64 error_status;
+
+ u32 interrupts_enabled;
+ u64 errors_enabled;
+
+ struct completion cmd_dma_done;
+ struct completion vid_frame_done;
+ struct completion cmd_frame_done;
+
+ struct work_struct interrupt_done_work;
+ struct work_struct error_status_work;
+};
+
+/**
+ * struct dsi_ctrl - DSI controller object
+ * @pdev: Pointer to platform device.
+ * @index: Instance id.
+ * @name: Name of the controller instance.
+ * @refcount: ref counter.
+ * @ctrl_lock: Mutex for hardware and object access.
+ * @drm_dev: Pointer to DRM device.
+ * @version: DSI controller version.
+ * @hw: DSI controller hardware object.
+ * @current_state; Current driver and hardware state.
+ * @int_info: Interrupt information.
+ * @clk_info: Clock information.
+ * @pwr_info: Power information.
+ * @axi_bus_info: AXI bus information.
+ * @host_config: Current host configuration.
+ * @tx_cmd_buf: Tx command buffer.
+ * @cmd_buffer_size: Size of command buffer.
+ * @debugfs_root: Root for debugfs entries.
+ */
+struct dsi_ctrl {
+ struct platform_device *pdev;
+ u32 index;
+ const char *name;
+ u32 refcount;
+ struct mutex ctrl_lock;
+ struct drm_device *drm_dev;
+
+ enum dsi_ctrl_version version;
+ struct dsi_ctrl_hw hw;
+
+ /* Current state */
+ struct dsi_ctrl_state_info current_state;
+
+ struct dsi_ctrl_interrupts int_info;
+ /* Clock and power states */
+ struct dsi_ctrl_clk_info clk_info;
+ struct dsi_ctrl_power_info pwr_info;
+ struct dsi_ctrl_bus_scale_info axi_bus_info;
+
+ struct dsi_host_config host_config;
+ /* Command tx and rx */
+ struct drm_gem_object *tx_cmd_buf;
+ u32 cmd_buffer_size;
+
+ /* Debug Information */
+ struct dentry *debugfs_root;
+
+};
+
+/**
+ * dsi_ctrl_get() - get a dsi_ctrl handle from an of_node
+ * @of_node: of_node of the DSI controller.
+ *
+ * Gets the DSI controller handle for the corresponding of_node. The ref count
+ * is incremented to one and all subsequent gets will fail until the original
+ * clients calls a put.
+ *
+ * Return: DSI Controller handle.
+ */
+struct dsi_ctrl *dsi_ctrl_get(struct device_node *of_node);
+
+/**
+ * dsi_ctrl_put() - releases a dsi controller handle.
+ * @dsi_ctrl: DSI controller handle.
+ *
+ * Releases the DSI controller. Driver will clean up all resources and puts back
+ * the DSI controller into reset state.
+ */
+void dsi_ctrl_put(struct dsi_ctrl *dsi_ctrl);
+
+/**
+ * dsi_ctrl_drv_init() - initialize dsi controller driver.
+ * @dsi_ctrl: DSI controller handle.
+ * @parent: Parent directory for debug fs.
+ *
+ * Initializes DSI controller driver. Driver should be initialized after
+ * dsi_ctrl_get() succeeds.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_drv_init(struct dsi_ctrl *dsi_ctrl, struct dentry *parent);
+
+/**
+ * dsi_ctrl_drv_deinit() - de-initializes dsi controller driver
+ * @dsi_ctrl: DSI controller handle.
+ *
+ * Releases all resources acquired by dsi_ctrl_drv_init().
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_drv_deinit(struct dsi_ctrl *dsi_ctrl);
+
+/**
+ * dsi_ctrl_validate_timing() - validate a video timing configuration
+ * @dsi_ctrl: DSI controller handle.
+ * @timing: Pointer to timing data.
+ *
+ * Driver will validate if the timing configuration is supported on the
+ * controller hardware.
+ *
+ * Return: error code if timing is not supported.
+ */
+int dsi_ctrl_validate_timing(struct dsi_ctrl *dsi_ctrl,
+ struct dsi_mode_info *timing);
+
+/**
+ * dsi_ctrl_update_host_config() - update dsi host configuration
+ * @dsi_ctrl: DSI controller handle.
+ * @config: DSI host configuration.
+ * @flags: dsi_mode_flags modifying the behavior
+ *
+ * Updates driver with new Host configuration to use for host initialization.
+ * This function call will only update the software context. The stored
+ * configuration information will be used when the host is initialized.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_update_host_config(struct dsi_ctrl *dsi_ctrl,
+ struct dsi_host_config *config,
+ int flags);
+
+/**
+ * dsi_ctrl_async_timing_update() - update only controller timing
+ * @dsi_ctrl: DSI controller handle.
+ * @timing: New DSI timing info
+ *
+ * Updates host timing values to asynchronously transition to new timing
+ * For example, to update the porch values in a seamless/dynamic fps switch.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_async_timing_update(struct dsi_ctrl *dsi_ctrl,
+ struct dsi_mode_info *timing);
+
+/**
+ * dsi_ctrl_phy_sw_reset() - perform a PHY software reset
+ * @dsi_ctrl: DSI controller handle.
+ *
+ * Performs a PHY software reset on the DSI controller. Reset should be done
+ * when the controller power state is DSI_CTRL_POWER_CORE_CLK_ON and the PHY is
+ * not enabled.
+ *
+ * This function will fail if driver is in any other state.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_phy_sw_reset(struct dsi_ctrl *dsi_ctrl);
+
+/**
+ * dsi_ctrl_host_init() - Initialize DSI host hardware.
+ * @dsi_ctrl: DSI controller handle.
+ *
+ * Initializes DSI controller hardware with host configuration provided by
+ * dsi_ctrl_update_host_config(). Initialization can be performed only during
+ * DSI_CTRL_POWER_CORE_CLK_ON state and after the PHY SW reset has been
+ * performed.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_host_init(struct dsi_ctrl *dsi_ctrl);
+
+/**
+ * dsi_ctrl_host_deinit() - De-Initialize DSI host hardware.
+ * @dsi_ctrl: DSI controller handle.
+ *
+ * De-initializes DSI controller hardware. It can be performed only during
+ * DSI_CTRL_POWER_CORE_CLK_ON state after LINK clocks have been turned off.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_host_deinit(struct dsi_ctrl *dsi_ctrl);
+
+/**
+ * dsi_ctrl_set_tpg_state() - enable/disable test pattern on the controller
+ * @dsi_ctrl: DSI controller handle.
+ * @on: enable/disable test pattern.
+ *
+ * Test pattern can be enabled only after Video engine (for video mode panels)
+ * or command engine (for cmd mode panels) is enabled.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_set_tpg_state(struct dsi_ctrl *dsi_ctrl, bool on);
+
+/**
+ * dsi_ctrl_cmd_transfer() - Transfer commands on DSI link
+ * @dsi_ctrl: DSI controller handle.
+ * @msg: Message to transfer on DSI link.
+ * @flags: Modifiers for message transfer.
+ *
+ * Command transfer can be done only when command engine is enabled. The
+ * transfer API will until either the command transfer finishes or the timeout
+ * value is reached. If the trigger is deferred, it will return without
+ * triggering the transfer. Command parameters are programmed to hardware.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_cmd_transfer(struct dsi_ctrl *dsi_ctrl,
+ const struct mipi_dsi_msg *msg,
+ u32 flags);
+
+/**
+ * dsi_ctrl_cmd_tx_trigger() - Trigger a deferred command.
+ * @dsi_ctrl: DSI controller handle.
+ * @flags: Modifiers.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_cmd_tx_trigger(struct dsi_ctrl *dsi_ctrl, u32 flags);
+
+/**
+ * dsi_ctrl_set_power_state() - set power state for dsi controller
+ * @dsi_ctrl: DSI controller handle.
+ * @state: Power state.
+ *
+ * Set power state for DSI controller. Power state can be changed only when
+ * Controller, Video and Command engines are turned off.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_set_power_state(struct dsi_ctrl *dsi_ctrl,
+ enum dsi_power_state state);
+
+/**
+ * dsi_ctrl_set_cmd_engine_state() - set command engine state
+ * @dsi_ctrl: DSI Controller handle.
+ * @state: Engine state.
+ *
+ * Command engine state can be modified only when DSI controller power state is
+ * set to DSI_CTRL_POWER_LINK_CLK_ON.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_set_cmd_engine_state(struct dsi_ctrl *dsi_ctrl,
+ enum dsi_engine_state state);
+
+/**
+ * dsi_ctrl_set_vid_engine_state() - set video engine state
+ * @dsi_ctrl: DSI Controller handle.
+ * @state: Engine state.
+ *
+ * Video engine state can be modified only when DSI controller power state is
+ * set to DSI_CTRL_POWER_LINK_CLK_ON.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_set_vid_engine_state(struct dsi_ctrl *dsi_ctrl,
+ enum dsi_engine_state state);
+
+/**
+ * dsi_ctrl_set_host_engine_state() - set host engine state
+ * @dsi_ctrl: DSI Controller handle.
+ * @state: Engine state.
+ *
+ * Host engine state can be modified only when DSI controller power state is
+ * set to DSI_CTRL_POWER_LINK_CLK_ON and cmd, video engines are disabled.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_set_host_engine_state(struct dsi_ctrl *dsi_ctrl,
+ enum dsi_engine_state state);
+
+/**
+ * dsi_ctrl_set_ulps() - set ULPS state for DSI lanes.
+ * @dsi_ctrl: DSI controller handle.
+ * @enable: enable/disable ULPS.
+ *
+ * ULPS can be enabled/disabled after DSI host engine is turned on.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_set_ulps(struct dsi_ctrl *dsi_ctrl, bool enable);
+
+/**
+ * dsi_ctrl_set_clamp_state() - set clamp state for DSI phy
+ * @dsi_ctrl: DSI controller handle.
+ * @enable: enable/disable clamping.
+ *
+ * Clamps can be enabled/disabled while DSI contoller is still turned on.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_set_clamp_state(struct dsi_ctrl *dsi_Ctrl, bool enable);
+
+/**
+ * dsi_ctrl_set_clock_source() - set clock source fpr dsi link clocks
+ * @dsi_ctrl: DSI controller handle.
+ * @source_clks: Source clocks for DSI link clocks.
+ *
+ * Clock source should be changed while link clocks are disabled.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_set_clock_source(struct dsi_ctrl *dsi_ctrl,
+ struct dsi_clk_link_set *source_clks);
+
+/**
+ * dsi_ctrl_drv_register() - register platform driver for dsi controller
+ */
+void dsi_ctrl_drv_register(void);
+
+/**
+ * dsi_ctrl_drv_unregister() - unregister platform driver
+ */
+void dsi_ctrl_drv_unregister(void);
+
+#endif /* _DSI_CTRL_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
new file mode 100644
index 000000000000..b81cdaf4ba02
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
@@ -0,0 +1,578 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DSI_CTRL_HW_H_
+#define _DSI_CTRL_HW_H_
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/bitmap.h>
+
+#include "dsi_defs.h"
+
+/**
+ * Modifier flag for command transmission. If this flag is set, command
+ * information is programmed to hardware and transmission is not triggered.
+ * Caller should call the trigger_command_dma() to start the transmission. This
+ * flag is valed for kickoff_command() and kickoff_fifo_command() operations.
+ */
+#define DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER 0x1
+
+/**
+ * enum dsi_ctrl_version - version of the dsi host controller
+ * @DSI_CTRL_VERSION_UNKNOWN: Unknown controller version
+ * @DSI_CTRL_VERSION_1_4: DSI host v1.4 controller
+ * @DSI_CTRL_VERSION_2_0: DSI host v2.0 controller
+ * @DSI_CTRL_VERSION_MAX: max version
+ */
+enum dsi_ctrl_version {
+ DSI_CTRL_VERSION_UNKNOWN,
+ DSI_CTRL_VERSION_1_4,
+ DSI_CTRL_VERSION_2_0,
+ DSI_CTRL_VERSION_MAX
+};
+
+/**
+ * enum dsi_ctrl_hw_features - features supported by dsi host controller
+ * @DSI_CTRL_VIDEO_TPG: Test pattern support for video mode.
+ * @DSI_CTRL_CMD_TPG: Test pattern support for command mode.
+ * @DSI_CTRL_VARIABLE_REFRESH_RATE: variable panel timing
+ * @DSI_CTRL_DYNAMIC_REFRESH: variable pixel clock rate
+ * @DSI_CTRL_NULL_PACKET_INSERTION: NULL packet insertion
+ * @DSI_CTRL_DESKEW_CALIB: Deskew calibration support
+ * @DSI_CTRL_DPHY: Controller support for DPHY
+ * @DSI_CTRL_CPHY: Controller support for CPHY
+ * @DSI_CTRL_MAX_FEATURES:
+ */
+enum dsi_ctrl_hw_features {
+ DSI_CTRL_VIDEO_TPG,
+ DSI_CTRL_CMD_TPG,
+ DSI_CTRL_VARIABLE_REFRESH_RATE,
+ DSI_CTRL_DYNAMIC_REFRESH,
+ DSI_CTRL_NULL_PACKET_INSERTION,
+ DSI_CTRL_DESKEW_CALIB,
+ DSI_CTRL_DPHY,
+ DSI_CTRL_CPHY,
+ DSI_CTRL_MAX_FEATURES
+};
+
+/**
+ * enum dsi_test_pattern - test pattern type
+ * @DSI_TEST_PATTERN_FIXED: Test pattern is fixed, based on init value.
+ * @DSI_TEST_PATTERN_INC: Incremental test pattern, base on init value.
+ * @DSI_TEST_PATTERN_POLY: Pattern generated from polynomial and init val.
+ * @DSI_TEST_PATTERN_MAX:
+ */
+enum dsi_test_pattern {
+ DSI_TEST_PATTERN_FIXED = 0,
+ DSI_TEST_PATTERN_INC,
+ DSI_TEST_PATTERN_POLY,
+ DSI_TEST_PATTERN_MAX
+};
+
+/**
+ * enum dsi_status_int_type - status interrupts generated by DSI controller
+ * @DSI_CMD_MODE_DMA_DONE: Command mode DMA packets are sent out.
+ * @DSI_CMD_STREAM0_FRAME_DONE: A frame of command mode stream0 is sent out.
+ * @DSI_CMD_STREAM1_FRAME_DONE: A frame of command mode stream1 is sent out.
+ * @DSI_CMD_STREAM2_FRAME_DONE: A frame of command mode stream2 is sent out.
+ * @DSI_VIDEO_MODE_FRAME_DONE: A frame of video mode stream is sent out.
+ * @DSI_BTA_DONE: A BTA is completed.
+ * @DSI_CMD_FRAME_DONE: A frame of selected command mode stream is
+ * sent out by MDP.
+ * @DSI_DYN_REFRESH_DONE: The dynamic refresh operation has completed.
+ * @DSI_DESKEW_DONE: The deskew calibration operation has completed
+ * @DSI_DYN_BLANK_DMA_DONE: The dynamic blankin DMA operation has
+ * completed.
+ */
+enum dsi_status_int_type {
+ DSI_CMD_MODE_DMA_DONE = BIT(0),
+ DSI_CMD_STREAM0_FRAME_DONE = BIT(1),
+ DSI_CMD_STREAM1_FRAME_DONE = BIT(2),
+ DSI_CMD_STREAM2_FRAME_DONE = BIT(3),
+ DSI_VIDEO_MODE_FRAME_DONE = BIT(4),
+ DSI_BTA_DONE = BIT(5),
+ DSI_CMD_FRAME_DONE = BIT(6),
+ DSI_DYN_REFRESH_DONE = BIT(7),
+ DSI_DESKEW_DONE = BIT(8),
+ DSI_DYN_BLANK_DMA_DONE = BIT(9)
+};
+
+/**
+ * enum dsi_error_int_type - error interrupts generated by DSI controller
+ * @DSI_RDBK_SINGLE_ECC_ERR: Single bit ECC error in read packet.
+ * @DSI_RDBK_MULTI_ECC_ERR: Multi bit ECC error in read packet.
+ * @DSI_RDBK_CRC_ERR: CRC error in read packet.
+ * @DSI_RDBK_INCOMPLETE_PKT: Incomplete read packet.
+ * @DSI_PERIPH_ERROR_PKT: Error packet returned from peripheral,
+ * @DSI_LP_RX_TIMEOUT: Low power reverse transmission timeout.
+ * @DSI_HS_TX_TIMEOUT: High speed forward transmission timeout.
+ * @DSI_BTA_TIMEOUT: BTA timeout.
+ * @DSI_PLL_UNLOCK: PLL has unlocked.
+ * @DSI_DLN0_ESC_ENTRY_ERR: Incorrect LP Rx escape entry.
+ * @DSI_DLN0_ESC_SYNC_ERR: LP Rx data is not byte aligned.
+ * @DSI_DLN0_LP_CONTROL_ERR: Incorrect LP Rx state sequence.
+ * @DSI_PENDING_HS_TX_TIMEOUT: Pending High-speed transfer timeout.
+ * @DSI_INTERLEAVE_OP_CONTENTION: Interleave operation contention.
+ * @DSI_CMD_DMA_FIFO_UNDERFLOW: Command mode DMA FIFO underflow.
+ * @DSI_CMD_MDP_FIFO_UNDERFLOW: Command MDP FIFO underflow (failed to
+ * receive one complete line from MDP).
+ * @DSI_DLN0_HS_FIFO_OVERFLOW: High speed FIFO for data lane 0 overflows.
+ * @DSI_DLN1_HS_FIFO_OVERFLOW: High speed FIFO for data lane 1 overflows.
+ * @DSI_DLN2_HS_FIFO_OVERFLOW: High speed FIFO for data lane 2 overflows.
+ * @DSI_DLN3_HS_FIFO_OVERFLOW: High speed FIFO for data lane 3 overflows.
+ * @DSI_DLN0_HS_FIFO_UNDERFLOW: High speed FIFO for data lane 0 underflows.
+ * @DSI_DLN1_HS_FIFO_UNDERFLOW: High speed FIFO for data lane 1 underflows.
+ * @DSI_DLN2_HS_FIFO_UNDERFLOW: High speed FIFO for data lane 2 underflows.
+ * @DSI_DLN3_HS_FIFO_UNDERFLOW: High speed FIFO for data lane 3 undeflows.
+ * @DSI_DLN0_LP0_CONTENTION: PHY level contention while lane 0 is low.
+ * @DSI_DLN1_LP0_CONTENTION: PHY level contention while lane 1 is low.
+ * @DSI_DLN2_LP0_CONTENTION: PHY level contention while lane 2 is low.
+ * @DSI_DLN3_LP0_CONTENTION: PHY level contention while lane 3 is low.
+ * @DSI_DLN0_LP1_CONTENTION: PHY level contention while lane 0 is high.
+ * @DSI_DLN1_LP1_CONTENTION: PHY level contention while lane 1 is high.
+ * @DSI_DLN2_LP1_CONTENTION: PHY level contention while lane 2 is high.
+ * @DSI_DLN3_LP1_CONTENTION: PHY level contention while lane 3 is high.
+ */
+enum dsi_error_int_type {
+ DSI_RDBK_SINGLE_ECC_ERR = BIT(0),
+ DSI_RDBK_MULTI_ECC_ERR = BIT(1),
+ DSI_RDBK_CRC_ERR = BIT(2),
+ DSI_RDBK_INCOMPLETE_PKT = BIT(3),
+ DSI_PERIPH_ERROR_PKT = BIT(4),
+ DSI_LP_RX_TIMEOUT = BIT(5),
+ DSI_HS_TX_TIMEOUT = BIT(6),
+ DSI_BTA_TIMEOUT = BIT(7),
+ DSI_PLL_UNLOCK = BIT(8),
+ DSI_DLN0_ESC_ENTRY_ERR = BIT(9),
+ DSI_DLN0_ESC_SYNC_ERR = BIT(10),
+ DSI_DLN0_LP_CONTROL_ERR = BIT(11),
+ DSI_PENDING_HS_TX_TIMEOUT = BIT(12),
+ DSI_INTERLEAVE_OP_CONTENTION = BIT(13),
+ DSI_CMD_DMA_FIFO_UNDERFLOW = BIT(14),
+ DSI_CMD_MDP_FIFO_UNDERFLOW = BIT(15),
+ DSI_DLN0_HS_FIFO_OVERFLOW = BIT(16),
+ DSI_DLN1_HS_FIFO_OVERFLOW = BIT(17),
+ DSI_DLN2_HS_FIFO_OVERFLOW = BIT(18),
+ DSI_DLN3_HS_FIFO_OVERFLOW = BIT(19),
+ DSI_DLN0_HS_FIFO_UNDERFLOW = BIT(20),
+ DSI_DLN1_HS_FIFO_UNDERFLOW = BIT(21),
+ DSI_DLN2_HS_FIFO_UNDERFLOW = BIT(22),
+ DSI_DLN3_HS_FIFO_UNDERFLOW = BIT(23),
+ DSI_DLN0_LP0_CONTENTION = BIT(24),
+ DSI_DLN1_LP0_CONTENTION = BIT(25),
+ DSI_DLN2_LP0_CONTENTION = BIT(26),
+ DSI_DLN3_LP0_CONTENTION = BIT(27),
+ DSI_DLN0_LP1_CONTENTION = BIT(28),
+ DSI_DLN1_LP1_CONTENTION = BIT(29),
+ DSI_DLN2_LP1_CONTENTION = BIT(30),
+ DSI_DLN3_LP1_CONTENTION = BIT(31),
+};
+
+/**
+ * struct dsi_ctrl_cmd_dma_info - command buffer information
+ * @offset: IOMMU VA for command buffer address.
+ * @length: Length of the command buffer.
+ * @en_broadcast: Enable broadcast mode if set to true.
+ * @is_master: Is master in broadcast mode.
+ * @use_lpm: Use low power mode for command transmission.
+ */
+struct dsi_ctrl_cmd_dma_info {
+ u32 offset;
+ u32 length;
+ bool en_broadcast;
+ bool is_master;
+ bool use_lpm;
+};
+
+/**
+ * struct dsi_ctrl_cmd_dma_fifo_info - command payload tp be sent using FIFO
+ * @command: VA for command buffer.
+ * @size: Size of the command buffer.
+ * @en_broadcast: Enable broadcast mode if set to true.
+ * @is_master: Is master in broadcast mode.
+ * @use_lpm: Use low power mode for command transmission.
+ */
+struct dsi_ctrl_cmd_dma_fifo_info {
+ u32 *command;
+ u32 size;
+ bool en_broadcast;
+ bool is_master;
+ bool use_lpm;
+};
+
+struct dsi_ctrl_hw;
+
+/**
+ * struct dsi_ctrl_hw_ops - operations supported by dsi host hardware
+ */
+struct dsi_ctrl_hw_ops {
+
+ /**
+ * host_setup() - Setup DSI host configuration
+ * @ctrl: Pointer to controller host hardware.
+ * @config: Configuration for DSI host controller
+ */
+ void (*host_setup)(struct dsi_ctrl_hw *ctrl,
+ struct dsi_host_common_cfg *config);
+
+ /**
+ * video_engine_en() - enable DSI video engine
+ * @ctrl: Pointer to controller host hardware.
+ * @on: Enable/disabel video engine.
+ */
+ void (*video_engine_en)(struct dsi_ctrl_hw *ctrl, bool on);
+
+ /**
+ * video_engine_setup() - Setup dsi host controller for video mode
+ * @ctrl: Pointer to controller host hardware.
+ * @common_cfg: Common configuration parameters.
+ * @cfg: Video mode configuration.
+ *
+ * Set up DSI video engine with a specific configuration. Controller and
+ * video engine are not enabled as part of this function.
+ */
+ void (*video_engine_setup)(struct dsi_ctrl_hw *ctrl,
+ struct dsi_host_common_cfg *common_cfg,
+ struct dsi_video_engine_cfg *cfg);
+
+ /**
+ * set_video_timing() - set up the timing for video frame
+ * @ctrl: Pointer to controller host hardware.
+ * @mode: Video mode information.
+ *
+ * Set up the video timing parameters for the DSI video mode operation.
+ */
+ void (*set_video_timing)(struct dsi_ctrl_hw *ctrl,
+ struct dsi_mode_info *mode);
+
+ /**
+ * cmd_engine_setup() - setup dsi host controller for command mode
+ * @ctrl: Pointer to the controller host hardware.
+ * @common_cfg: Common configuration parameters.
+ * @cfg: Command mode configuration.
+ *
+ * Setup DSI CMD engine with a specific configuration. Controller and
+ * command engine are not enabled as part of this function.
+ */
+ void (*cmd_engine_setup)(struct dsi_ctrl_hw *ctrl,
+ struct dsi_host_common_cfg *common_cfg,
+ struct dsi_cmd_engine_cfg *cfg);
+
+ /**
+ * setup_cmd_stream() - set up parameters for command pixel streams
+ * @ctrl: Pointer to controller host hardware.
+ * @width_in_pixels: Width of the stream in pixels.
+ * @h_stride: Horizontal stride in bytes.
+ * @height_inLines: Number of lines in the stream.
+ * @vc_id: stream_id.
+ *
+ * Setup parameters for command mode pixel stream size.
+ */
+ void (*setup_cmd_stream)(struct dsi_ctrl_hw *ctrl,
+ u32 width_in_pixels,
+ u32 h_stride,
+ u32 height_in_lines,
+ u32 vc_id);
+
+ /**
+ * ctrl_en() - enable DSI controller engine
+ * @ctrl: Pointer to the controller host hardware.
+ * @on: turn on/off the DSI controller engine.
+ */
+ void (*ctrl_en)(struct dsi_ctrl_hw *ctrl, bool on);
+
+ /**
+ * cmd_engine_en() - enable DSI controller command engine
+ * @ctrl: Pointer to the controller host hardware.
+ * @on: Turn on/off the DSI command engine.
+ */
+ void (*cmd_engine_en)(struct dsi_ctrl_hw *ctrl, bool on);
+
+ /**
+ * phy_sw_reset() - perform a soft reset on the PHY.
+ * @ctrl: Pointer to the controller host hardware.
+ */
+ void (*phy_sw_reset)(struct dsi_ctrl_hw *ctrl);
+
+ /**
+ * soft_reset() - perform a soft reset on DSI controller
+ * @ctrl: Pointer to the controller host hardware.
+ *
+ * The video, command and controller engines will be disable before the
+ * reset is triggered. These engines will not be enabled after the reset
+ * is complete. Caller must re-enable the engines.
+ *
+ * If the reset is done while MDP timing engine is turned on, the video
+ * enigne should be re-enabled only during the vertical blanking time.
+ */
+ void (*soft_reset)(struct dsi_ctrl_hw *ctrl);
+
+ /**
+ * setup_lane_map() - setup mapping between logical and physical lanes
+ * @ctrl: Pointer to the controller host hardware.
+ * @lane_map: Structure defining the mapping between DSI logical
+ * lanes and physical lanes.
+ */
+ void (*setup_lane_map)(struct dsi_ctrl_hw *ctrl,
+ struct dsi_lane_mapping *lane_map);
+
+ /**
+ * kickoff_command() - transmits commands stored in memory
+ * @ctrl: Pointer to the controller host hardware.
+ * @cmd: Command information.
+ * @flags: Modifiers for command transmission.
+ *
+ * The controller hardware is programmed with address and size of the
+ * command buffer. The transmission is kicked off if
+ * DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER flag is not set. If this flag is
+ * set, caller should make a separate call to trigger_command_dma() to
+ * transmit the command.
+ */
+ void (*kickoff_command)(struct dsi_ctrl_hw *ctrl,
+ struct dsi_ctrl_cmd_dma_info *cmd,
+ u32 flags);
+
+ /**
+ * kickoff_fifo_command() - transmits a command using FIFO in dsi
+ * hardware.
+ * @ctrl: Pointer to the controller host hardware.
+ * @cmd: Command information.
+ * @flags: Modifiers for command transmission.
+ *
+ * The controller hardware FIFO is programmed with command header and
+ * payload. The transmission is kicked off if
+ * DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER flag is not set. If this flag is
+ * set, caller should make a separate call to trigger_command_dma() to
+ * transmit the command.
+ */
+ void (*kickoff_fifo_command)(struct dsi_ctrl_hw *ctrl,
+ struct dsi_ctrl_cmd_dma_fifo_info *cmd,
+ u32 flags);
+
+ void (*reset_cmd_fifo)(struct dsi_ctrl_hw *ctrl);
+ /**
+ * trigger_command_dma() - trigger transmission of command buffer.
+ * @ctrl: Pointer to the controller host hardware.
+ *
+ * This trigger can be only used if there was a prior call to
+ * kickoff_command() of kickoff_fifo_command() with
+ * DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER flag.
+ */
+ void (*trigger_command_dma)(struct dsi_ctrl_hw *ctrl);
+
+ /**
+ * get_cmd_read_data() - get data read from the peripheral
+ * @ctrl: Pointer to the controller host hardware.
+ * @rd_buf: Buffer where data will be read into.
+ * @total_read_len: Number of bytes to read.
+ */
+ u32 (*get_cmd_read_data)(struct dsi_ctrl_hw *ctrl,
+ u8 *rd_buf,
+ u32 total_read_len);
+
+ /**
+ * ulps_request() - request ulps entry for specified lanes
+ * @ctrl: Pointer to the controller host hardware.
+ * @lanes: ORed list of lanes (enum dsi_data_lanes) which need
+ * to enter ULPS.
+ *
+ * Caller should check if lanes are in ULPS mode by calling
+ * get_lanes_in_ulps() operation.
+ */
+ void (*ulps_request)(struct dsi_ctrl_hw *ctrl, u32 lanes);
+
+ /**
+ * ulps_exit() - exit ULPS on specified lanes
+ * @ctrl: Pointer to the controller host hardware.
+ * @lanes: ORed list of lanes (enum dsi_data_lanes) which need
+ * to exit ULPS.
+ *
+ * Caller should check if lanes are in active mode by calling
+ * get_lanes_in_ulps() operation.
+ */
+ void (*ulps_exit)(struct dsi_ctrl_hw *ctrl, u32 lanes);
+
+ /**
+ * clear_ulps_request() - clear ulps request once all lanes are active
+ * @ctrl: Pointer to controller host hardware.
+ * @lanes: ORed list of lanes (enum dsi_data_lanes).
+ *
+ * ULPS request should be cleared after the lanes have exited ULPS.
+ */
+ void (*clear_ulps_request)(struct dsi_ctrl_hw *ctrl, u32 lanes);
+
+ /**
+ * get_lanes_in_ulps() - returns the list of lanes in ULPS mode
+ * @ctrl: Pointer to the controller host hardware.
+ *
+ * Returns an ORed list of lanes (enum dsi_data_lanes) that are in ULPS
+ * state. If 0 is returned, all the lanes are active.
+ *
+ * Return: List of lanes in ULPS state.
+ */
+ u32 (*get_lanes_in_ulps)(struct dsi_ctrl_hw *ctrl);
+
+ /**
+ * clamp_enable() - enable DSI clamps to keep PHY driving a stable link
+ * @ctrl: Pointer to the controller host hardware.
+ * @lanes: ORed list of lanes which need to be clamped.
+ * @enable_ulps: TODO:??
+ */
+ void (*clamp_enable)(struct dsi_ctrl_hw *ctrl,
+ u32 lanes,
+ bool enable_ulps);
+
+ /**
+ * clamp_disable() - disable DSI clamps
+ * @ctrl: Pointer to the controller host hardware.
+ * @lanes: ORed list of lanes which need to have clamps released.
+ * @disable_ulps: TODO:??
+ */
+ void (*clamp_disable)(struct dsi_ctrl_hw *ctrl,
+ u32 lanes,
+ bool disable_ulps);
+
+ /**
+ * get_interrupt_status() - returns the interrupt status
+ * @ctrl: Pointer to the controller host hardware.
+ *
+ * Returns the ORed list of interrupts(enum dsi_status_int_type) that
+ * are active. This list does not include any error interrupts. Caller
+ * should call get_error_status for error interrupts.
+ *
+ * Return: List of active interrupts.
+ */
+ u32 (*get_interrupt_status)(struct dsi_ctrl_hw *ctrl);
+
+ /**
+ * clear_interrupt_status() - clears the specified interrupts
+ * @ctrl: Pointer to the controller host hardware.
+ * @ints: List of interrupts to be cleared.
+ */
+ void (*clear_interrupt_status)(struct dsi_ctrl_hw *ctrl, u32 ints);
+
+ /**
+ * enable_status_interrupts() - enable the specified interrupts
+ * @ctrl: Pointer to the controller host hardware.
+ * @ints: List of interrupts to be enabled.
+ *
+ * Enables the specified interrupts. This list will override the
+ * previous interrupts enabled through this function. Caller has to
+ * maintain the state of the interrupts enabled. To disable all
+ * interrupts, set ints to 0.
+ */
+ void (*enable_status_interrupts)(struct dsi_ctrl_hw *ctrl, u32 ints);
+
+ /**
+ * get_error_status() - returns the error status
+ * @ctrl: Pointer to the controller host hardware.
+ *
+ * Returns the ORed list of errors(enum dsi_error_int_type) that are
+ * active. This list does not include any status interrupts. Caller
+ * should call get_interrupt_status for status interrupts.
+ *
+ * Return: List of active error interrupts.
+ */
+ u64 (*get_error_status)(struct dsi_ctrl_hw *ctrl);
+
+ /**
+ * clear_error_status() - clears the specified errors
+ * @ctrl: Pointer to the controller host hardware.
+ * @errors: List of errors to be cleared.
+ */
+ void (*clear_error_status)(struct dsi_ctrl_hw *ctrl, u64 errors);
+
+ /**
+ * enable_error_interrupts() - enable the specified interrupts
+ * @ctrl: Pointer to the controller host hardware.
+ * @errors: List of errors to be enabled.
+ *
+ * Enables the specified interrupts. This list will override the
+ * previous interrupts enabled through this function. Caller has to
+ * maintain the state of the interrupts enabled. To disable all
+ * interrupts, set errors to 0.
+ */
+ void (*enable_error_interrupts)(struct dsi_ctrl_hw *ctrl, u64 errors);
+
+ /**
+ * video_test_pattern_setup() - setup test pattern engine for video mode
+ * @ctrl: Pointer to the controller host hardware.
+ * @type: Type of test pattern.
+ * @init_val: Initial value to use for generating test pattern.
+ */
+ void (*video_test_pattern_setup)(struct dsi_ctrl_hw *ctrl,
+ enum dsi_test_pattern type,
+ u32 init_val);
+
+ /**
+ * cmd_test_pattern_setup() - setup test patttern engine for cmd mode
+ * @ctrl: Pointer to the controller host hardware.
+ * @type: Type of test pattern.
+ * @init_val: Initial value to use for generating test pattern.
+ * @stream_id: Stream Id on which packets are generated.
+ */
+ void (*cmd_test_pattern_setup)(struct dsi_ctrl_hw *ctrl,
+ enum dsi_test_pattern type,
+ u32 init_val,
+ u32 stream_id);
+
+ /**
+ * test_pattern_enable() - enable test pattern engine
+ * @ctrl: Pointer to the controller host hardware.
+ * @enable: Enable/Disable test pattern engine.
+ */
+ void (*test_pattern_enable)(struct dsi_ctrl_hw *ctrl, bool enable);
+
+ /**
+ * trigger_cmd_test_pattern() - trigger a command mode frame update with
+ * test pattern
+ * @ctrl: Pointer to the controller host hardware.
+ * @stream_id: Stream on which frame update is sent.
+ */
+ void (*trigger_cmd_test_pattern)(struct dsi_ctrl_hw *ctrl,
+ u32 stream_id);
+
+ ssize_t (*reg_dump_to_buffer)(struct dsi_ctrl_hw *ctrl,
+ char *buf,
+ u32 size);
+};
+
+/*
+ * struct dsi_ctrl_hw - DSI controller hardware object specific to an instance
+ * @base: VA for the DSI controller base address.
+ * @length: Length of the DSI controller register map.
+ * @index: Instance ID of the controller.
+ * @feature_map: Features supported by the DSI controller.
+ * @ops: Function pointers to the operations supported by the
+ * controller.
+ */
+struct dsi_ctrl_hw {
+ void __iomem *base;
+ u32 length;
+ void __iomem *mmss_misc_base;
+ u32 mmss_misc_length;
+ u32 index;
+
+ /* features */
+ DECLARE_BITMAP(feature_map, DSI_CTRL_MAX_FEATURES);
+ struct dsi_ctrl_hw_ops ops;
+
+ /* capabilities */
+ u32 supported_interrupts;
+ u64 supported_errors;
+};
+
+#endif /* _DSI_CTRL_HW_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_1_4.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_1_4.c
new file mode 100644
index 000000000000..ca04eedd6af1
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_1_4.c
@@ -0,0 +1,1512 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "dsi-hw:" fmt
+#include <linux/delay.h>
+
+#include "dsi_ctrl_hw.h"
+#include "dsi_ctrl_reg_1_4.h"
+#include "dsi_hw.h"
+
+#define MMSS_MISC_CLAMP_REG_OFF 0x0014
+
+/* Unsupported formats default to RGB888 */
+static const u8 cmd_mode_format_map[DSI_PIXEL_FORMAT_MAX] = {
+ 0x6, 0x7, 0x8, 0x8, 0x0, 0x3, 0x4 };
+static const u8 video_mode_format_map[DSI_PIXEL_FORMAT_MAX] = {
+ 0x0, 0x1, 0x2, 0x3, 0x3, 0x3, 0x3 };
+
+
+/**
+ * dsi_setup_trigger_controls() - setup dsi trigger configurations
+ * @ctrl: Pointer to the controller host hardware.
+ * @cfg: DSI host configuration that is common to both video and
+ * command modes.
+ */
+static void dsi_setup_trigger_controls(struct dsi_ctrl_hw *ctrl,
+ struct dsi_host_common_cfg *cfg)
+{
+ u32 reg = 0;
+ const u8 trigger_map[DSI_TRIGGER_MAX] = {
+ 0x0, 0x2, 0x1, 0x4, 0x5, 0x6 };
+
+ reg |= (cfg->te_mode == DSI_TE_ON_EXT_PIN) ? BIT(31) : 0;
+ reg |= (trigger_map[cfg->dma_cmd_trigger] & 0x7);
+ reg |= (trigger_map[cfg->mdp_cmd_trigger] & 0x7) << 4;
+ DSI_W32(ctrl, DSI_TRIG_CTRL, reg);
+}
+
+/**
+ * dsi_ctrl_hw_14_host_setup() - setup dsi host configuration
+ * @ctrl: Pointer to the controller host hardware.
+ * @cfg: DSI host configuration that is common to both video and
+ * command modes.
+ */
+void dsi_ctrl_hw_14_host_setup(struct dsi_ctrl_hw *ctrl,
+ struct dsi_host_common_cfg *cfg)
+{
+ u32 reg_value = 0;
+
+ dsi_setup_trigger_controls(ctrl, cfg);
+
+ /* Setup clocking timing controls */
+ reg_value = ((cfg->t_clk_post & 0x3F) << 8);
+ reg_value |= (cfg->t_clk_pre & 0x3F);
+ DSI_W32(ctrl, DSI_CLKOUT_TIMING_CTRL, reg_value);
+
+ /* EOT packet control */
+ reg_value = cfg->append_tx_eot ? 1 : 0;
+ reg_value |= (cfg->ignore_rx_eot ? (1 << 4) : 0);
+ DSI_W32(ctrl, DSI_EOT_PACKET_CTRL, reg_value);
+
+ /* Turn on dsi clocks */
+ DSI_W32(ctrl, DSI_CLK_CTRL, 0x23F);
+
+ /* Setup DSI control register */
+ reg_value = 0;
+ reg_value |= (cfg->en_crc_check ? BIT(24) : 0);
+ reg_value |= (cfg->en_ecc_check ? BIT(20) : 0);
+ reg_value |= BIT(8); /* Clock lane */
+ reg_value |= ((cfg->data_lanes & DSI_DATA_LANE_3) ? BIT(7) : 0);
+ reg_value |= ((cfg->data_lanes & DSI_DATA_LANE_2) ? BIT(6) : 0);
+ reg_value |= ((cfg->data_lanes & DSI_DATA_LANE_1) ? BIT(5) : 0);
+ reg_value |= ((cfg->data_lanes & DSI_DATA_LANE_0) ? BIT(4) : 0);
+
+ DSI_W32(ctrl, DSI_CTRL, reg_value);
+
+ pr_debug("[DSI_%d]Host configuration complete\n", ctrl->index);
+}
+
+/**
+ * phy_sw_reset() - perform a soft reset on the PHY.
+ * @ctrl: Pointer to the controller host hardware.
+ */
+void dsi_ctrl_hw_14_phy_sw_reset(struct dsi_ctrl_hw *ctrl)
+{
+ DSI_W32(ctrl, DSI_PHY_SW_RESET, 0x1);
+ udelay(1000);
+ DSI_W32(ctrl, DSI_PHY_SW_RESET, 0x0);
+ udelay(100);
+
+ pr_debug("[DSI_%d] phy sw reset done\n", ctrl->index);
+}
+
+/**
+ * soft_reset() - perform a soft reset on DSI controller
+ * @ctrl: Pointer to the controller host hardware.
+ *
+ * The video, command and controller engines will be disable before the
+ * reset is triggered. These engines will not be enabled after the reset
+ * is complete. Caller must re-enable the engines.
+ *
+ * If the reset is done while MDP timing engine is turned on, the video
+ * enigne should be re-enabled only during the vertical blanking time.
+ */
+void dsi_ctrl_hw_14_soft_reset(struct dsi_ctrl_hw *ctrl)
+{
+ u32 reg = 0;
+ u32 reg_ctrl = 0;
+
+ /* Clear DSI_EN, VIDEO_MODE_EN, CMD_MODE_EN */
+ reg_ctrl = DSI_R32(ctrl, DSI_CTRL);
+ DSI_W32(ctrl, DSI_CTRL, reg_ctrl & ~0x7);
+
+ /* Force enable PCLK, BYTECLK, AHBM_HCLK */
+ reg = DSI_R32(ctrl, DSI_CLK_CTRL);
+ reg |= 0x23F;
+ DSI_W32(ctrl, DSI_CLK_CTRL, reg);
+
+ /* Trigger soft reset */
+ DSI_W32(ctrl, DSI_SOFT_RESET, 0x1);
+ udelay(1);
+ DSI_W32(ctrl, DSI_SOFT_RESET, 0x0);
+
+ /* Disable force clock on */
+ reg &= ~(BIT(20) | BIT(11));
+ DSI_W32(ctrl, DSI_CLK_CTRL, reg);
+
+ /* Re-enable DSI controller */
+ DSI_W32(ctrl, DSI_CTRL, reg_ctrl);
+ pr_debug("[DSI_%d] ctrl soft reset done\n", ctrl->index);
+}
+
+/**
+ * set_video_timing() - set up the timing for video frame
+ * @ctrl: Pointer to controller host hardware.
+ * @mode: Video mode information.
+ *
+ * Set up the video timing parameters for the DSI video mode operation.
+ */
+void dsi_ctrl_hw_14_set_video_timing(struct dsi_ctrl_hw *ctrl,
+ struct dsi_mode_info *mode)
+{
+ u32 reg = 0;
+ u32 hs_start = 0;
+ u32 hs_end, active_h_start, active_h_end, h_total;
+ u32 vs_start = 0, vs_end = 0;
+ u32 vpos_start = 0, vpos_end, active_v_start, active_v_end, v_total;
+
+ hs_end = mode->h_sync_width;
+ active_h_start = mode->h_sync_width + mode->h_back_porch;
+ active_h_end = active_h_start + mode->h_active;
+ h_total = (mode->h_sync_width + mode->h_back_porch + mode->h_active +
+ mode->h_front_porch) - 1;
+
+ vpos_end = mode->v_sync_width;
+ active_v_start = mode->v_sync_width + mode->v_back_porch;
+ active_v_end = active_v_start + mode->v_active;
+ v_total = (mode->v_sync_width + mode->v_back_porch + mode->v_active +
+ mode->v_front_porch) - 1;
+
+ reg = ((active_h_end & 0xFFFF) << 16) | (active_h_start & 0xFFFF);
+ DSI_W32(ctrl, DSI_VIDEO_MODE_ACTIVE_H, reg);
+
+ reg = ((active_v_end & 0xFFFF) << 16) | (active_v_start & 0xFFFF);
+ DSI_W32(ctrl, DSI_VIDEO_MODE_ACTIVE_V, reg);
+
+ reg = ((v_total & 0xFFFF) << 16) | (h_total & 0xFFFF);
+ DSI_W32(ctrl, DSI_VIDEO_MODE_TOTAL, reg);
+
+ reg = ((hs_end & 0xFFFF) << 16) | (hs_start & 0xFFFF);
+ DSI_W32(ctrl, DSI_VIDEO_MODE_HSYNC, reg);
+
+ reg = ((vs_end & 0xFFFF) << 16) | (vs_start & 0xFFFF);
+ DSI_W32(ctrl, DSI_VIDEO_MODE_VSYNC, reg);
+
+ reg = ((vpos_end & 0xFFFF) << 16) | (vpos_start & 0xFFFF);
+ DSI_W32(ctrl, DSI_VIDEO_MODE_VSYNC_VPOS, reg);
+
+ /* TODO: HS TIMER value? */
+ DSI_W32(ctrl, DSI_HS_TIMER_CTRL, 0x3FD08);
+ DSI_W32(ctrl, DSI_MISR_VIDEO_CTRL, 0x10100);
+ DSI_W32(ctrl, DSI_DSI_TIMING_FLUSH, 0x1);
+ pr_debug("[DSI_%d] ctrl video parameters updated\n", ctrl->index);
+}
+
+/**
+ * setup_cmd_stream() - set up parameters for command pixel streams
+ * @ctrl: Pointer to controller host hardware.
+ * @width_in_pixels: Width of the stream in pixels.
+ * @h_stride: Horizontal stride in bytes.
+ * @height_inLines: Number of lines in the stream.
+ * @vc_id: stream_id
+ *
+ * Setup parameters for command mode pixel stream size.
+ */
+void dsi_ctrl_hw_14_setup_cmd_stream(struct dsi_ctrl_hw *ctrl,
+ u32 width_in_pixels,
+ u32 h_stride,
+ u32 height_in_lines,
+ u32 vc_id)
+{
+ u32 reg = 0;
+
+ reg = (h_stride + 1) << 16;
+ reg |= (vc_id & 0x3) << 8;
+ reg |= 0x39; /* packet data type */
+ DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_STREAM0_CTRL, reg);
+ DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_STREAM1_CTRL, reg);
+
+ reg = (height_in_lines << 16) | width_in_pixels;
+ DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_STREAM0_TOTAL, reg);
+ DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_STREAM1_TOTAL, reg);
+}
+
+/**
+ * video_engine_setup() - Setup dsi host controller for video mode
+ * @ctrl: Pointer to controller host hardware.
+ * @common_cfg: Common configuration parameters.
+ * @cfg: Video mode configuration.
+ *
+ * Set up DSI video engine with a specific configuration. Controller and
+ * video engine are not enabled as part of this function.
+ */
+void dsi_ctrl_hw_14_video_engine_setup(struct dsi_ctrl_hw *ctrl,
+ struct dsi_host_common_cfg *common_cfg,
+ struct dsi_video_engine_cfg *cfg)
+{
+ u32 reg = 0;
+
+ reg |= (cfg->last_line_interleave_en ? BIT(31) : 0);
+ reg |= (cfg->pulse_mode_hsa_he ? BIT(28) : 0);
+ reg |= (cfg->hfp_lp11_en ? BIT(24) : 0);
+ reg |= (cfg->hbp_lp11_en ? BIT(20) : 0);
+ reg |= (cfg->hsa_lp11_en ? BIT(16) : 0);
+ reg |= (cfg->eof_bllp_lp11_en ? BIT(15) : 0);
+ reg |= (cfg->bllp_lp11_en ? BIT(12) : 0);
+ reg |= (cfg->traffic_mode & 0x3) << 8;
+ reg |= (cfg->vc_id & 0x3);
+ reg |= (video_mode_format_map[common_cfg->dst_format] & 0x3) << 4;
+ DSI_W32(ctrl, DSI_VIDEO_MODE_CTRL, reg);
+
+ reg = (common_cfg->swap_mode & 0x7) << 12;
+ reg |= (common_cfg->bit_swap_red ? BIT(0) : 0);
+ reg |= (common_cfg->bit_swap_green ? BIT(4) : 0);
+ reg |= (common_cfg->bit_swap_blue ? BIT(8) : 0);
+ DSI_W32(ctrl, DSI_VIDEO_MODE_DATA_CTRL, reg);
+ /* Enable Timing double buffering */
+ DSI_W32(ctrl, DSI_DSI_TIMING_DB_MODE, 0x1);
+
+
+ pr_debug("[DSI_%d] Video engine setup done\n", ctrl->index);
+}
+
+/**
+ * cmd_engine_setup() - setup dsi host controller for command mode
+ * @ctrl: Pointer to the controller host hardware.
+ * @common_cfg: Common configuration parameters.
+ * @cfg: Command mode configuration.
+ *
+ * Setup DSI CMD engine with a specific configuration. Controller and
+ * command engine are not enabled as part of this function.
+ */
+void dsi_ctrl_hw_14_cmd_engine_setup(struct dsi_ctrl_hw *ctrl,
+ struct dsi_host_common_cfg *common_cfg,
+ struct dsi_cmd_engine_cfg *cfg)
+{
+ u32 reg = 0;
+
+ reg = (cfg->max_cmd_packets_interleave & 0xF) << 20;
+ reg |= (common_cfg->bit_swap_red ? BIT(4) : 0);
+ reg |= (common_cfg->bit_swap_green ? BIT(8) : 0);
+ reg |= (common_cfg->bit_swap_blue ? BIT(12) : 0);
+ reg |= cmd_mode_format_map[common_cfg->dst_format];
+ DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_CTRL, reg);
+
+ reg = DSI_R32(ctrl, DSI_COMMAND_MODE_MDP_CTRL2);
+ reg |= BIT(16);
+ DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_CTRL2, reg);
+
+ reg = cfg->wr_mem_start & 0xFF;
+ reg |= (cfg->wr_mem_continue & 0xFF) << 8;
+ reg |= (cfg->insert_dcs_command ? BIT(16) : 0);
+ DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_DCS_CMD_CTRL, reg);
+
+ pr_debug("[DSI_%d] Cmd engine setup done\n", ctrl->index);
+}
+
+/**
+ * video_engine_en() - enable DSI video engine
+ * @ctrl: Pointer to controller host hardware.
+ * @on: Enable/disabel video engine.
+ */
+void dsi_ctrl_hw_14_video_engine_en(struct dsi_ctrl_hw *ctrl, bool on)
+{
+ u32 reg = 0;
+
+ /* Set/Clear VIDEO_MODE_EN bit */
+ reg = DSI_R32(ctrl, DSI_CTRL);
+ if (on)
+ reg |= BIT(1);
+ else
+ reg &= ~BIT(1);
+
+ DSI_W32(ctrl, DSI_CTRL, reg);
+
+ pr_debug("[DSI_%d] Video engine = %d\n", ctrl->index, on);
+}
+
+/**
+ * ctrl_en() - enable DSI controller engine
+ * @ctrl: Pointer to the controller host hardware.
+ * @on: turn on/off the DSI controller engine.
+ */
+void dsi_ctrl_hw_14_ctrl_en(struct dsi_ctrl_hw *ctrl, bool on)
+{
+ u32 reg = 0;
+
+ /* Set/Clear DSI_EN bit */
+ reg = DSI_R32(ctrl, DSI_CTRL);
+ if (on)
+ reg |= BIT(0);
+ else
+ reg &= ~BIT(0);
+
+ DSI_W32(ctrl, DSI_CTRL, reg);
+
+ pr_debug("[DSI_%d] Controller engine = %d\n", ctrl->index, on);
+}
+
+/**
+ * cmd_engine_en() - enable DSI controller command engine
+ * @ctrl: Pointer to the controller host hardware.
+ * @on: Turn on/off the DSI command engine.
+ */
+void dsi_ctrl_hw_14_cmd_engine_en(struct dsi_ctrl_hw *ctrl, bool on)
+{
+ u32 reg = 0;
+
+ /* Set/Clear CMD_MODE_EN bit */
+ reg = DSI_R32(ctrl, DSI_CTRL);
+ if (on)
+ reg |= BIT(2);
+ else
+ reg &= ~BIT(2);
+
+ DSI_W32(ctrl, DSI_CTRL, reg);
+
+ pr_debug("[DSI_%d] command engine = %d\n", ctrl->index, on);
+}
+
+/**
+ * setup_lane_map() - setup mapping between logical and physical lanes
+ * @ctrl: Pointer to the controller host hardware.
+ * @lane_map: Structure defining the mapping between DSI logical
+ * lanes and physical lanes.
+ */
+void dsi_ctrl_hw_14_setup_lane_map(struct dsi_ctrl_hw *ctrl,
+ struct dsi_lane_mapping *lane_map)
+{
+ u32 reg_value = 0;
+ u32 lane_number = ((lane_map->physical_lane0 * 1000)+
+ (lane_map->physical_lane1 * 100) +
+ (lane_map->physical_lane2 * 10) +
+ (lane_map->physical_lane3));
+
+ if (lane_number == 123)
+ reg_value = 0;
+ else if (lane_number == 3012)
+ reg_value = 1;
+ else if (lane_number == 2301)
+ reg_value = 2;
+ else if (lane_number == 1230)
+ reg_value = 3;
+ else if (lane_number == 321)
+ reg_value = 4;
+ else if (lane_number == 1032)
+ reg_value = 5;
+ else if (lane_number == 2103)
+ reg_value = 6;
+ else if (lane_number == 3210)
+ reg_value = 7;
+
+ DSI_W32(ctrl, DSI_LANE_SWAP_CTRL, reg_value);
+
+ pr_debug("[DSI_%d] Lane swap setup complete\n", ctrl->index);
+}
+
+/**
+ * kickoff_command() - transmits commands stored in memory
+ * @ctrl: Pointer to the controller host hardware.
+ * @cmd: Command information.
+ * @flags: Modifiers for command transmission.
+ *
+ * The controller hardware is programmed with address and size of the
+ * command buffer. The transmission is kicked off if
+ * DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER flag is not set. If this flag is
+ * set, caller should make a separate call to trigger_command_dma() to
+ * transmit the command.
+ */
+void dsi_ctrl_hw_14_kickoff_command(struct dsi_ctrl_hw *ctrl,
+ struct dsi_ctrl_cmd_dma_info *cmd,
+ u32 flags)
+{
+ u32 reg = 0;
+
+ /*Set BROADCAST_EN and EMBEDDED_MODE */
+ reg = DSI_R32(ctrl, DSI_COMMAND_MODE_DMA_CTRL);
+ if (cmd->en_broadcast)
+ reg |= BIT(31);
+ else
+ reg &= ~BIT(31);
+
+ if (cmd->is_master)
+ reg |= BIT(30);
+ else
+ reg &= ~BIT(30);
+
+ if (cmd->use_lpm)
+ reg |= BIT(26);
+ else
+ reg &= ~BIT(26);
+
+ reg |= BIT(28);
+ DSI_W32(ctrl, DSI_COMMAND_MODE_DMA_CTRL, reg);
+
+ DSI_W32(ctrl, DSI_DMA_CMD_OFFSET, cmd->offset);
+ DSI_W32(ctrl, DSI_DMA_CMD_LENGTH, (cmd->length & 0xFFFFFF));
+
+ /* wait for writes to complete before kick off */
+ wmb();
+
+ if (!(flags & DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER))
+ DSI_W32(ctrl, DSI_CMD_MODE_DMA_SW_TRIGGER, 0x1);
+}
+
+/**
+ * kickoff_fifo_command() - transmits a command using FIFO in dsi
+ * hardware.
+ * @ctrl: Pointer to the controller host hardware.
+ * @cmd: Command information.
+ * @flags: Modifiers for command transmission.
+ *
+ * The controller hardware FIFO is programmed with command header and
+ * payload. The transmission is kicked off if
+ * DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER flag is not set. If this flag is
+ * set, caller should make a separate call to trigger_command_dma() to
+ * transmit the command.
+ */
+void dsi_ctrl_hw_14_kickoff_fifo_command(struct dsi_ctrl_hw *ctrl,
+ struct dsi_ctrl_cmd_dma_fifo_info *cmd,
+ u32 flags)
+{
+ u32 reg = 0, i = 0;
+ u32 *ptr = cmd->command;
+ /*
+ * Set CMD_DMA_TPG_EN, TPG_DMA_FIFO_MODE and
+ * CMD_DMA_PATTERN_SEL = custom pattern stored in TPG DMA FIFO
+ */
+ reg = (BIT(1) | BIT(2) | (0x3 << 16));
+ DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CTRL, reg);
+
+ /*
+ * Program the FIFO with command buffer. Hardware requires an extra
+ * DWORD (set to zero) if the length of command buffer is odd DWORDS.
+ */
+ for (i = 0; i < cmd->size; i += 4) {
+ DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CMD_DMA_INIT_VAL, *ptr);
+ ptr++;
+ }
+
+ if ((cmd->size / 4) & 0x1)
+ DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CMD_DMA_INIT_VAL, 0);
+
+ /*Set BROADCAST_EN and EMBEDDED_MODE */
+ reg = DSI_R32(ctrl, DSI_COMMAND_MODE_DMA_CTRL);
+ if (cmd->en_broadcast)
+ reg |= BIT(31);
+ else
+ reg &= ~BIT(31);
+
+ if (cmd->is_master)
+ reg |= BIT(30);
+ else
+ reg &= ~BIT(30);
+
+ if (cmd->use_lpm)
+ reg |= BIT(26);
+ else
+ reg &= ~BIT(26);
+
+ reg |= BIT(28);
+
+ DSI_W32(ctrl, DSI_COMMAND_MODE_DMA_CTRL, reg);
+
+ DSI_W32(ctrl, DSI_DMA_CMD_LENGTH, (cmd->size & 0xFFFFFFFF));
+ /* Finish writes before command trigger */
+ wmb();
+
+ if (!(flags & DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER))
+ DSI_W32(ctrl, DSI_CMD_MODE_DMA_SW_TRIGGER, 0x1);
+
+ pr_debug("[DSI_%d]size=%d, trigger = %d\n",
+ ctrl->index, cmd->size,
+ (flags & DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER) ? false : true);
+}
+
+void dsi_ctrl_hw_14_reset_cmd_fifo(struct dsi_ctrl_hw *ctrl)
+{
+ /* disable cmd dma tpg */
+ DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CTRL, 0x0);
+
+ DSI_W32(ctrl, DSI_TPG_DMA_FIFO_RESET, 0x1);
+ udelay(1);
+ DSI_W32(ctrl, DSI_TPG_DMA_FIFO_RESET, 0x0);
+}
+
+/**
+ * trigger_command_dma() - trigger transmission of command buffer.
+ * @ctrl: Pointer to the controller host hardware.
+ *
+ * This trigger can be only used if there was a prior call to
+ * kickoff_command() of kickoff_fifo_command() with
+ * DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER flag.
+ */
+void dsi_ctrl_hw_14_trigger_command_dma(struct dsi_ctrl_hw *ctrl)
+{
+ DSI_W32(ctrl, DSI_CMD_MODE_DMA_SW_TRIGGER, 0x1);
+ pr_debug("[DSI_%d] CMD DMA triggered\n", ctrl->index);
+}
+
+/**
+ * get_cmd_read_data() - get data read from the peripheral
+ * @ctrl: Pointer to the controller host hardware.
+ * @rd_buf: Buffer where data will be read into.
+ * @total_read_len: Number of bytes to read.
+ *
+ * return: number of bytes read.
+ */
+u32 dsi_ctrl_hw_14_get_cmd_read_data(struct dsi_ctrl_hw *ctrl,
+ u8 *rd_buf,
+ u32 read_offset,
+ u32 total_read_len)
+{
+ u32 *lp, *temp, data;
+ int i, j = 0, cnt;
+ u32 read_cnt;
+ u32 rx_byte = 0;
+ u32 repeated_bytes = 0;
+ u8 reg[16];
+ u32 pkt_size = 0;
+ int buf_offset = read_offset;
+
+ lp = (u32 *)rd_buf;
+ temp = (u32 *)reg;
+ cnt = (rx_byte + 3) >> 2;
+
+ if (cnt > 4)
+ cnt = 4;
+
+ if (rx_byte == 4)
+ read_cnt = 4;
+ else
+ read_cnt = pkt_size + 6;
+
+ if (read_cnt > 16) {
+ int bytes_shifted;
+
+ bytes_shifted = read_cnt - 16;
+ repeated_bytes = buf_offset - bytes_shifted;
+ }
+
+ for (i = cnt - 1; i >= 0; i--) {
+ data = DSI_R32(ctrl, DSI_RDBK_DATA0 + i*4);
+ *temp++ = ntohl(data);
+ }
+
+ for (i = repeated_bytes; i < 16; i++)
+ rd_buf[j++] = reg[i];
+
+ pr_debug("[DSI_%d] Read %d bytes\n", ctrl->index, j);
+ return j;
+}
+/**
+ * ulps_request() - request ulps entry for specified lanes
+ * @ctrl: Pointer to the controller host hardware.
+ * @lanes: ORed list of lanes (enum dsi_data_lanes) which need
+ * to enter ULPS.
+ *
+ * Caller should check if lanes are in ULPS mode by calling
+ * get_lanes_in_ulps() operation.
+ */
+void dsi_ctrl_hw_14_ulps_request(struct dsi_ctrl_hw *ctrl, u32 lanes)
+{
+ u32 reg = 0;
+
+ if (lanes & DSI_CLOCK_LANE)
+ reg = BIT(4);
+ if (lanes & DSI_DATA_LANE_0)
+ reg |= BIT(0);
+ if (lanes & DSI_DATA_LANE_1)
+ reg |= BIT(1);
+ if (lanes & DSI_DATA_LANE_2)
+ reg |= BIT(2);
+ if (lanes & DSI_DATA_LANE_3)
+ reg |= BIT(3);
+
+ DSI_W32(ctrl, DSI_LANE_CTRL, reg);
+
+ pr_debug("[DSI_%d] ULPS requested for lanes 0x%x\n", ctrl->index,
+ lanes);
+}
+
+/**
+ * ulps_exit() - exit ULPS on specified lanes
+ * @ctrl: Pointer to the controller host hardware.
+ * @lanes: ORed list of lanes (enum dsi_data_lanes) which need
+ * to exit ULPS.
+ *
+ * Caller should check if lanes are in active mode by calling
+ * get_lanes_in_ulps() operation.
+ */
+void dsi_ctrl_hw_14_ulps_exit(struct dsi_ctrl_hw *ctrl, u32 lanes)
+{
+ u32 reg = 0;
+
+ reg = DSI_R32(ctrl, DSI_LANE_CTRL);
+ if (lanes & DSI_CLOCK_LANE)
+ reg |= BIT(12);
+ if (lanes & DSI_DATA_LANE_0)
+ reg |= BIT(8);
+ if (lanes & DSI_DATA_LANE_1)
+ reg |= BIT(9);
+ if (lanes & DSI_DATA_LANE_2)
+ reg |= BIT(10);
+ if (lanes & DSI_DATA_LANE_3)
+ reg |= BIT(11);
+
+ DSI_W32(ctrl, DSI_LANE_CTRL, reg);
+
+ pr_debug("[DSI_%d] ULPS exit request for lanes=0x%x\n",
+ ctrl->index, lanes);
+}
+
+/**
+ * clear_ulps_request() - clear ulps request once all lanes are active
+ * @ctrl: Pointer to controller host hardware.
+ * @lanes: ORed list of lanes (enum dsi_data_lanes).
+ *
+ * ULPS request should be cleared after the lanes have exited ULPS.
+ */
+void dsi_ctrl_hw_14_clear_ulps_request(struct dsi_ctrl_hw *ctrl, u32 lanes)
+{
+ u32 reg = 0;
+
+ reg = DSI_R32(ctrl, DSI_LANE_CTRL);
+ reg &= ~BIT(4); /* clock lane */
+ if (lanes & DSI_DATA_LANE_0)
+ reg &= ~BIT(0);
+ if (lanes & DSI_DATA_LANE_1)
+ reg &= ~BIT(1);
+ if (lanes & DSI_DATA_LANE_2)
+ reg &= ~BIT(2);
+ if (lanes & DSI_DATA_LANE_3)
+ reg &= ~BIT(3);
+
+ DSI_W32(ctrl, DSI_LANE_CTRL, reg);
+ /*
+ * HPG recommends separate writes for clearing ULPS_REQUEST and
+ * ULPS_EXIT.
+ */
+ DSI_W32(ctrl, DSI_LANE_CTRL, 0x0);
+
+ pr_debug("[DSI_%d] ULPS request cleared\n", ctrl->index);
+}
+
+/**
+ * get_lanes_in_ulps() - returns the list of lanes in ULPS mode
+ * @ctrl: Pointer to the controller host hardware.
+ *
+ * Returns an ORed list of lanes (enum dsi_data_lanes) that are in ULPS
+ * state. If 0 is returned, all the lanes are active.
+ *
+ * Return: List of lanes in ULPS state.
+ */
+u32 dsi_ctrl_hw_14_get_lanes_in_ulps(struct dsi_ctrl_hw *ctrl)
+{
+ u32 reg = 0;
+ u32 lanes = 0;
+
+ reg = DSI_R32(ctrl, DSI_LANE_STATUS);
+ if (!(reg & BIT(8)))
+ lanes |= DSI_DATA_LANE_0;
+ if (!(reg & BIT(9)))
+ lanes |= DSI_DATA_LANE_1;
+ if (!(reg & BIT(10)))
+ lanes |= DSI_DATA_LANE_2;
+ if (!(reg & BIT(11)))
+ lanes |= DSI_DATA_LANE_3;
+ if (!(reg & BIT(12)))
+ lanes |= DSI_CLOCK_LANE;
+
+ pr_debug("[DSI_%d] lanes in ulps = 0x%x\n", ctrl->index, lanes);
+ return lanes;
+}
+
+/**
+ * clamp_enable() - enable DSI clamps to keep PHY driving a stable link
+ * @ctrl: Pointer to the controller host hardware.
+ * @lanes: ORed list of lanes which need to be clamped.
+ * @enable_ulps: TODO:??
+ */
+void dsi_ctrl_hw_14_clamp_enable(struct dsi_ctrl_hw *ctrl,
+ u32 lanes,
+ bool enable_ulps)
+{
+ u32 clamp_reg = 0;
+ u32 bit_shift = 0;
+ u32 reg = 0;
+
+ if (ctrl->index == 1)
+ bit_shift = 16;
+
+ if (lanes & DSI_CLOCK_LANE) {
+ clamp_reg |= BIT(9);
+ if (enable_ulps)
+ clamp_reg |= BIT(8);
+ }
+
+ if (lanes & DSI_DATA_LANE_0) {
+ clamp_reg |= BIT(7);
+ if (enable_ulps)
+ clamp_reg |= BIT(6);
+ }
+
+ if (lanes & DSI_DATA_LANE_1) {
+ clamp_reg |= BIT(5);
+ if (enable_ulps)
+ clamp_reg |= BIT(4);
+ }
+
+ if (lanes & DSI_DATA_LANE_2) {
+ clamp_reg |= BIT(3);
+ if (enable_ulps)
+ clamp_reg |= BIT(2);
+ }
+
+ if (lanes & DSI_DATA_LANE_3) {
+ clamp_reg |= BIT(1);
+ if (enable_ulps)
+ clamp_reg |= BIT(0);
+ }
+
+ clamp_reg |= BIT(15); /* Enable clamp */
+
+ reg = DSI_MMSS_MISC_R32(ctrl, MMSS_MISC_CLAMP_REG_OFF);
+ reg |= (clamp_reg << bit_shift);
+ DSI_MMSS_MISC_W32(ctrl, MMSS_MISC_CLAMP_REG_OFF, reg);
+
+
+ reg = DSI_MMSS_MISC_R32(ctrl, MMSS_MISC_CLAMP_REG_OFF);
+ reg |= BIT(30);
+ DSI_MMSS_MISC_W32(ctrl, MMSS_MISC_CLAMP_REG_OFF, reg);
+
+ pr_debug("[DSI_%d] Clamps enabled for lanes=0x%x\n", ctrl->index,
+ lanes);
+}
+
+/**
+ * clamp_disable() - disable DSI clamps
+ * @ctrl: Pointer to the controller host hardware.
+ * @lanes: ORed list of lanes which need to have clamps released.
+ * @disable_ulps: TODO:??
+ */
+void dsi_ctrl_hw_14_clamp_disable(struct dsi_ctrl_hw *ctrl,
+ u32 lanes,
+ bool disable_ulps)
+{
+ u32 clamp_reg = 0;
+ u32 bit_shift = 0;
+ u32 reg = 0;
+
+ if (ctrl->index == 1)
+ bit_shift = 16;
+
+ if (lanes & DSI_CLOCK_LANE) {
+ clamp_reg |= BIT(9);
+ if (disable_ulps)
+ clamp_reg |= BIT(8);
+ }
+
+ if (lanes & DSI_DATA_LANE_0) {
+ clamp_reg |= BIT(7);
+ if (disable_ulps)
+ clamp_reg |= BIT(6);
+ }
+
+ if (lanes & DSI_DATA_LANE_1) {
+ clamp_reg |= BIT(5);
+ if (disable_ulps)
+ clamp_reg |= BIT(4);
+ }
+
+ if (lanes & DSI_DATA_LANE_2) {
+ clamp_reg |= BIT(3);
+ if (disable_ulps)
+ clamp_reg |= BIT(2);
+ }
+
+ if (lanes & DSI_DATA_LANE_3) {
+ clamp_reg |= BIT(1);
+ if (disable_ulps)
+ clamp_reg |= BIT(0);
+ }
+
+ clamp_reg |= BIT(15); /* Enable clamp */
+ clamp_reg <<= bit_shift;
+
+ /* Disable PHY reset skip */
+ reg = DSI_MMSS_MISC_R32(ctrl, MMSS_MISC_CLAMP_REG_OFF);
+ reg &= ~BIT(30);
+ DSI_MMSS_MISC_W32(ctrl, MMSS_MISC_CLAMP_REG_OFF, reg);
+
+ reg = DSI_MMSS_MISC_R32(ctrl, MMSS_MISC_CLAMP_REG_OFF);
+ reg &= ~(clamp_reg);
+ DSI_MMSS_MISC_W32(ctrl, MMSS_MISC_CLAMP_REG_OFF, reg);
+
+ pr_debug("[DSI_%d] Disable clamps for lanes=%d\n", ctrl->index, lanes);
+}
+
+/**
+ * get_interrupt_status() - returns the interrupt status
+ * @ctrl: Pointer to the controller host hardware.
+ *
+ * Returns the ORed list of interrupts(enum dsi_status_int_type) that
+ * are active. This list does not include any error interrupts. Caller
+ * should call get_error_status for error interrupts.
+ *
+ * Return: List of active interrupts.
+ */
+u32 dsi_ctrl_hw_14_get_interrupt_status(struct dsi_ctrl_hw *ctrl)
+{
+ u32 reg = 0;
+ u32 ints = 0;
+
+ reg = DSI_R32(ctrl, DSI_INT_CTRL);
+
+ if (reg & BIT(0))
+ ints |= DSI_CMD_MODE_DMA_DONE;
+ if (reg & BIT(8))
+ ints |= DSI_CMD_FRAME_DONE;
+ if (reg & BIT(10))
+ ints |= DSI_CMD_STREAM0_FRAME_DONE;
+ if (reg & BIT(12))
+ ints |= DSI_CMD_STREAM1_FRAME_DONE;
+ if (reg & BIT(14))
+ ints |= DSI_CMD_STREAM2_FRAME_DONE;
+ if (reg & BIT(16))
+ ints |= DSI_VIDEO_MODE_FRAME_DONE;
+ if (reg & BIT(20))
+ ints |= DSI_BTA_DONE;
+ if (reg & BIT(28))
+ ints |= DSI_DYN_REFRESH_DONE;
+ if (reg & BIT(30))
+ ints |= DSI_DESKEW_DONE;
+
+ pr_debug("[DSI_%d] Interrupt status = 0x%x, INT_CTRL=0x%x\n",
+ ctrl->index, ints, reg);
+ return ints;
+}
+
+/**
+ * clear_interrupt_status() - clears the specified interrupts
+ * @ctrl: Pointer to the controller host hardware.
+ * @ints: List of interrupts to be cleared.
+ */
+void dsi_ctrl_hw_14_clear_interrupt_status(struct dsi_ctrl_hw *ctrl, u32 ints)
+{
+ u32 reg = 0;
+
+ if (ints & DSI_CMD_MODE_DMA_DONE)
+ reg |= BIT(0);
+ if (ints & DSI_CMD_FRAME_DONE)
+ reg |= BIT(8);
+ if (ints & DSI_CMD_STREAM0_FRAME_DONE)
+ reg |= BIT(10);
+ if (ints & DSI_CMD_STREAM1_FRAME_DONE)
+ reg |= BIT(12);
+ if (ints & DSI_CMD_STREAM2_FRAME_DONE)
+ reg |= BIT(14);
+ if (ints & DSI_VIDEO_MODE_FRAME_DONE)
+ reg |= BIT(16);
+ if (ints & DSI_BTA_DONE)
+ reg |= BIT(20);
+ if (ints & DSI_DYN_REFRESH_DONE)
+ reg |= BIT(28);
+ if (ints & DSI_DESKEW_DONE)
+ reg |= BIT(30);
+
+ DSI_W32(ctrl, DSI_INT_CTRL, reg);
+
+ pr_debug("[DSI_%d] Clear interrupts, ints = 0x%x, INT_CTRL=0x%x\n",
+ ctrl->index, ints, reg);
+}
+
+/**
+ * enable_status_interrupts() - enable the specified interrupts
+ * @ctrl: Pointer to the controller host hardware.
+ * @ints: List of interrupts to be enabled.
+ *
+ * Enables the specified interrupts. This list will override the
+ * previous interrupts enabled through this function. Caller has to
+ * maintain the state of the interrupts enabled. To disable all
+ * interrupts, set ints to 0.
+ */
+void dsi_ctrl_hw_14_enable_status_interrupts(struct dsi_ctrl_hw *ctrl, u32 ints)
+{
+ u32 reg = 0;
+
+ /* Do not change value of DSI_ERROR_MASK bit */
+ reg |= (DSI_R32(ctrl, DSI_INT_CTRL) & BIT(25));
+ if (ints & DSI_CMD_MODE_DMA_DONE)
+ reg |= BIT(1);
+ if (ints & DSI_CMD_FRAME_DONE)
+ reg |= BIT(9);
+ if (ints & DSI_CMD_STREAM0_FRAME_DONE)
+ reg |= BIT(11);
+ if (ints & DSI_CMD_STREAM1_FRAME_DONE)
+ reg |= BIT(13);
+ if (ints & DSI_CMD_STREAM2_FRAME_DONE)
+ reg |= BIT(15);
+ if (ints & DSI_VIDEO_MODE_FRAME_DONE)
+ reg |= BIT(17);
+ if (ints & DSI_BTA_DONE)
+ reg |= BIT(21);
+ if (ints & DSI_DYN_REFRESH_DONE)
+ reg |= BIT(29);
+ if (ints & DSI_DESKEW_DONE)
+ reg |= BIT(31);
+
+ DSI_W32(ctrl, DSI_INT_CTRL, reg);
+
+ pr_debug("[DSI_%d] Enable interrupts 0x%x, INT_CTRL=0x%x\n",
+ ctrl->index, ints, reg);
+}
+
+/**
+ * get_error_status() - returns the error status
+ * @ctrl: Pointer to the controller host hardware.
+ *
+ * Returns the ORed list of errors(enum dsi_error_int_type) that are
+ * active. This list does not include any status interrupts. Caller
+ * should call get_interrupt_status for status interrupts.
+ *
+ * Return: List of active error interrupts.
+ */
+u64 dsi_ctrl_hw_14_get_error_status(struct dsi_ctrl_hw *ctrl)
+{
+ u32 dln0_phy_err;
+ u32 fifo_status;
+ u32 ack_error;
+ u32 timeout_errors;
+ u32 clk_error;
+ u32 dsi_status;
+ u64 errors = 0;
+
+ dln0_phy_err = DSI_R32(ctrl, DSI_DLN0_PHY_ERR);
+ if (dln0_phy_err & BIT(0))
+ errors |= DSI_DLN0_ESC_ENTRY_ERR;
+ if (dln0_phy_err & BIT(4))
+ errors |= DSI_DLN0_ESC_SYNC_ERR;
+ if (dln0_phy_err & BIT(8))
+ errors |= DSI_DLN0_LP_CONTROL_ERR;
+ if (dln0_phy_err & BIT(12))
+ errors |= DSI_DLN0_LP0_CONTENTION;
+ if (dln0_phy_err & BIT(16))
+ errors |= DSI_DLN0_LP1_CONTENTION;
+
+ fifo_status = DSI_R32(ctrl, DSI_FIFO_STATUS);
+ if (fifo_status & BIT(7))
+ errors |= DSI_CMD_MDP_FIFO_UNDERFLOW;
+ if (fifo_status & BIT(10))
+ errors |= DSI_CMD_DMA_FIFO_UNDERFLOW;
+ if (fifo_status & BIT(18))
+ errors |= DSI_DLN0_HS_FIFO_OVERFLOW;
+ if (fifo_status & BIT(19))
+ errors |= DSI_DLN0_HS_FIFO_UNDERFLOW;
+ if (fifo_status & BIT(22))
+ errors |= DSI_DLN1_HS_FIFO_OVERFLOW;
+ if (fifo_status & BIT(23))
+ errors |= DSI_DLN1_HS_FIFO_UNDERFLOW;
+ if (fifo_status & BIT(26))
+ errors |= DSI_DLN2_HS_FIFO_OVERFLOW;
+ if (fifo_status & BIT(27))
+ errors |= DSI_DLN2_HS_FIFO_UNDERFLOW;
+ if (fifo_status & BIT(30))
+ errors |= DSI_DLN3_HS_FIFO_OVERFLOW;
+ if (fifo_status & BIT(31))
+ errors |= DSI_DLN3_HS_FIFO_UNDERFLOW;
+
+ ack_error = DSI_R32(ctrl, DSI_ACK_ERR_STATUS);
+ if (ack_error & BIT(16))
+ errors |= DSI_RDBK_SINGLE_ECC_ERR;
+ if (ack_error & BIT(17))
+ errors |= DSI_RDBK_MULTI_ECC_ERR;
+ if (ack_error & BIT(20))
+ errors |= DSI_RDBK_CRC_ERR;
+ if (ack_error & BIT(23))
+ errors |= DSI_RDBK_INCOMPLETE_PKT;
+ if (ack_error & BIT(24))
+ errors |= DSI_PERIPH_ERROR_PKT;
+
+ timeout_errors = DSI_R32(ctrl, DSI_TIMEOUT_STATUS);
+ if (timeout_errors & BIT(0))
+ errors |= DSI_HS_TX_TIMEOUT;
+ if (timeout_errors & BIT(4))
+ errors |= DSI_LP_RX_TIMEOUT;
+ if (timeout_errors & BIT(8))
+ errors |= DSI_BTA_TIMEOUT;
+
+ clk_error = DSI_R32(ctrl, DSI_CLK_STATUS);
+ if (clk_error & BIT(16))
+ errors |= DSI_PLL_UNLOCK;
+
+ dsi_status = DSI_R32(ctrl, DSI_STATUS);
+ if (dsi_status & BIT(31))
+ errors |= DSI_INTERLEAVE_OP_CONTENTION;
+
+ pr_debug("[DSI_%d] Error status = 0x%llx, phy=0x%x, fifo=0x%x",
+ ctrl->index, errors, dln0_phy_err, fifo_status);
+ pr_debug("[DSI_%d] ack=0x%x, timeout=0x%x, clk=0x%x, dsi=0x%x\n",
+ ctrl->index, ack_error, timeout_errors, clk_error, dsi_status);
+ return errors;
+}
+
+/**
+ * clear_error_status() - clears the specified errors
+ * @ctrl: Pointer to the controller host hardware.
+ * @errors: List of errors to be cleared.
+ */
+void dsi_ctrl_hw_14_clear_error_status(struct dsi_ctrl_hw *ctrl, u64 errors)
+{
+ u32 dln0_phy_err = 0;
+ u32 fifo_status = 0;
+ u32 ack_error = 0;
+ u32 timeout_error = 0;
+ u32 clk_error = 0;
+ u32 dsi_status = 0;
+ u32 int_ctrl = 0;
+
+ if (errors & DSI_RDBK_SINGLE_ECC_ERR)
+ ack_error |= BIT(16);
+ if (errors & DSI_RDBK_MULTI_ECC_ERR)
+ ack_error |= BIT(17);
+ if (errors & DSI_RDBK_CRC_ERR)
+ ack_error |= BIT(20);
+ if (errors & DSI_RDBK_INCOMPLETE_PKT)
+ ack_error |= BIT(23);
+ if (errors & DSI_PERIPH_ERROR_PKT)
+ ack_error |= BIT(24);
+
+ if (errors & DSI_LP_RX_TIMEOUT)
+ timeout_error |= BIT(4);
+ if (errors & DSI_HS_TX_TIMEOUT)
+ timeout_error |= BIT(0);
+ if (errors & DSI_BTA_TIMEOUT)
+ timeout_error |= BIT(8);
+
+ if (errors & DSI_PLL_UNLOCK)
+ clk_error |= BIT(16);
+
+ if (errors & DSI_DLN0_LP0_CONTENTION)
+ dln0_phy_err |= BIT(12);
+ if (errors & DSI_DLN0_LP1_CONTENTION)
+ dln0_phy_err |= BIT(16);
+ if (errors & DSI_DLN0_ESC_ENTRY_ERR)
+ dln0_phy_err |= BIT(0);
+ if (errors & DSI_DLN0_ESC_SYNC_ERR)
+ dln0_phy_err |= BIT(4);
+ if (errors & DSI_DLN0_LP_CONTROL_ERR)
+ dln0_phy_err |= BIT(8);
+
+ if (errors & DSI_CMD_DMA_FIFO_UNDERFLOW)
+ fifo_status |= BIT(10);
+ if (errors & DSI_CMD_MDP_FIFO_UNDERFLOW)
+ fifo_status |= BIT(7);
+ if (errors & DSI_DLN0_HS_FIFO_OVERFLOW)
+ fifo_status |= BIT(18);
+ if (errors & DSI_DLN1_HS_FIFO_OVERFLOW)
+ fifo_status |= BIT(22);
+ if (errors & DSI_DLN2_HS_FIFO_OVERFLOW)
+ fifo_status |= BIT(26);
+ if (errors & DSI_DLN3_HS_FIFO_OVERFLOW)
+ fifo_status |= BIT(30);
+ if (errors & DSI_DLN0_HS_FIFO_UNDERFLOW)
+ fifo_status |= BIT(19);
+ if (errors & DSI_DLN1_HS_FIFO_UNDERFLOW)
+ fifo_status |= BIT(23);
+ if (errors & DSI_DLN2_HS_FIFO_UNDERFLOW)
+ fifo_status |= BIT(27);
+ if (errors & DSI_DLN3_HS_FIFO_UNDERFLOW)
+ fifo_status |= BIT(31);
+
+ if (errors & DSI_INTERLEAVE_OP_CONTENTION)
+ dsi_status |= BIT(31);
+
+ DSI_W32(ctrl, DSI_DLN0_PHY_ERR, dln0_phy_err);
+ DSI_W32(ctrl, DSI_FIFO_STATUS, fifo_status);
+ DSI_W32(ctrl, DSI_ACK_ERR_STATUS, ack_error);
+ DSI_W32(ctrl, DSI_TIMEOUT_STATUS, timeout_error);
+ DSI_W32(ctrl, DSI_CLK_STATUS, clk_error);
+ DSI_W32(ctrl, DSI_STATUS, dsi_status);
+
+ int_ctrl = DSI_R32(ctrl, DSI_INT_CTRL);
+ int_ctrl |= BIT(24);
+ DSI_W32(ctrl, DSI_INT_CTRL, int_ctrl);
+ pr_debug("[DSI_%d] clear errors = 0x%llx, phy=0x%x, fifo=0x%x",
+ ctrl->index, errors, dln0_phy_err, fifo_status);
+ pr_debug("[DSI_%d] ack=0x%x, timeout=0x%x, clk=0x%x, dsi=0x%x\n",
+ ctrl->index, ack_error, timeout_error, clk_error, dsi_status);
+}
+
+/**
+ * enable_error_interrupts() - enable the specified interrupts
+ * @ctrl: Pointer to the controller host hardware.
+ * @errors: List of errors to be enabled.
+ *
+ * Enables the specified interrupts. This list will override the
+ * previous interrupts enabled through this function. Caller has to
+ * maintain the state of the interrupts enabled. To disable all
+ * interrupts, set errors to 0.
+ */
+void dsi_ctrl_hw_14_enable_error_interrupts(struct dsi_ctrl_hw *ctrl,
+ u64 errors)
+{
+ u32 int_ctrl = 0;
+ u32 int_mask0 = 0x7FFF3BFF;
+
+ int_ctrl = DSI_R32(ctrl, DSI_INT_CTRL);
+ if (errors)
+ int_ctrl |= BIT(25);
+ else
+ int_ctrl &= ~BIT(25);
+
+ if (errors & DSI_RDBK_SINGLE_ECC_ERR)
+ int_mask0 &= ~BIT(0);
+ if (errors & DSI_RDBK_MULTI_ECC_ERR)
+ int_mask0 &= ~BIT(1);
+ if (errors & DSI_RDBK_CRC_ERR)
+ int_mask0 &= ~BIT(2);
+ if (errors & DSI_RDBK_INCOMPLETE_PKT)
+ int_mask0 &= ~BIT(3);
+ if (errors & DSI_PERIPH_ERROR_PKT)
+ int_mask0 &= ~BIT(4);
+
+ if (errors & DSI_LP_RX_TIMEOUT)
+ int_mask0 &= ~BIT(5);
+ if (errors & DSI_HS_TX_TIMEOUT)
+ int_mask0 &= ~BIT(6);
+ if (errors & DSI_BTA_TIMEOUT)
+ int_mask0 &= ~BIT(7);
+
+ if (errors & DSI_PLL_UNLOCK)
+ int_mask0 &= ~BIT(28);
+
+ if (errors & DSI_DLN0_LP0_CONTENTION)
+ int_mask0 &= ~BIT(24);
+ if (errors & DSI_DLN0_LP1_CONTENTION)
+ int_mask0 &= ~BIT(25);
+ if (errors & DSI_DLN0_ESC_ENTRY_ERR)
+ int_mask0 &= ~BIT(21);
+ if (errors & DSI_DLN0_ESC_SYNC_ERR)
+ int_mask0 &= ~BIT(22);
+ if (errors & DSI_DLN0_LP_CONTROL_ERR)
+ int_mask0 &= ~BIT(23);
+
+ if (errors & DSI_CMD_DMA_FIFO_UNDERFLOW)
+ int_mask0 &= ~BIT(9);
+ if (errors & DSI_CMD_MDP_FIFO_UNDERFLOW)
+ int_mask0 &= ~BIT(11);
+ if (errors & DSI_DLN0_HS_FIFO_OVERFLOW)
+ int_mask0 &= ~BIT(16);
+ if (errors & DSI_DLN1_HS_FIFO_OVERFLOW)
+ int_mask0 &= ~BIT(17);
+ if (errors & DSI_DLN2_HS_FIFO_OVERFLOW)
+ int_mask0 &= ~BIT(18);
+ if (errors & DSI_DLN3_HS_FIFO_OVERFLOW)
+ int_mask0 &= ~BIT(19);
+ if (errors & DSI_DLN0_HS_FIFO_UNDERFLOW)
+ int_mask0 &= ~BIT(26);
+ if (errors & DSI_DLN1_HS_FIFO_UNDERFLOW)
+ int_mask0 &= ~BIT(27);
+ if (errors & DSI_DLN2_HS_FIFO_UNDERFLOW)
+ int_mask0 &= ~BIT(29);
+ if (errors & DSI_DLN3_HS_FIFO_UNDERFLOW)
+ int_mask0 &= ~BIT(30);
+
+ if (errors & DSI_INTERLEAVE_OP_CONTENTION)
+ int_mask0 &= ~BIT(8);
+
+ DSI_W32(ctrl, DSI_INT_CTRL, int_ctrl);
+ DSI_W32(ctrl, DSI_ERR_INT_MASK0, int_mask0);
+
+ pr_debug("[DSI_%d] enable errors = 0x%llx, int_mask0=0x%x\n",
+ ctrl->index, errors, int_mask0);
+}
+
+/**
+ * video_test_pattern_setup() - setup test pattern engine for video mode
+ * @ctrl: Pointer to the controller host hardware.
+ * @type: Type of test pattern.
+ * @init_val: Initial value to use for generating test pattern.
+ */
+void dsi_ctrl_hw_14_video_test_pattern_setup(struct dsi_ctrl_hw *ctrl,
+ enum dsi_test_pattern type,
+ u32 init_val)
+{
+ u32 reg = 0;
+
+ DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_VIDEO_INIT_VAL, init_val);
+
+ switch (type) {
+ case DSI_TEST_PATTERN_FIXED:
+ reg |= (0x2 << 4);
+ break;
+ case DSI_TEST_PATTERN_INC:
+ reg |= (0x1 << 4);
+ break;
+ case DSI_TEST_PATTERN_POLY:
+ DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_VIDEO_POLY, 0xF0F0F);
+ break;
+ default:
+ break;
+ }
+
+ DSI_W32(ctrl, DSI_TPG_MAIN_CONTROL, 0x100);
+ DSI_W32(ctrl, DSI_TPG_VIDEO_CONFIG, 0x5);
+ DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CTRL, reg);
+
+ pr_debug("[DSI_%d] Video test pattern setup done\n", ctrl->index);
+}
+
+/**
+ * cmd_test_pattern_setup() - setup test patttern engine for cmd mode
+ * @ctrl: Pointer to the controller host hardware.
+ * @type: Type of test pattern.
+ * @init_val: Initial value to use for generating test pattern.
+ * @stream_id: Stream Id on which packets are generated.
+ */
+void dsi_ctrl_hw_14_cmd_test_pattern_setup(struct dsi_ctrl_hw *ctrl,
+ enum dsi_test_pattern type,
+ u32 init_val,
+ u32 stream_id)
+{
+ u32 reg = 0;
+ u32 init_offset;
+ u32 poly_offset;
+ u32 pattern_sel_shift;
+
+ switch (stream_id) {
+ case 0:
+ init_offset = DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL0;
+ poly_offset = DSI_TEST_PATTERN_GEN_CMD_MDP_STREAM0_POLY;
+ pattern_sel_shift = 8;
+ break;
+ case 1:
+ init_offset = DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL1;
+ poly_offset = DSI_TEST_PATTERN_GEN_CMD_MDP_STREAM1_POLY;
+ pattern_sel_shift = 12;
+ break;
+ case 2:
+ init_offset = DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL2;
+ poly_offset = DSI_TEST_PATTERN_GEN_CMD_MDP_STREAM2_POLY;
+ pattern_sel_shift = 20;
+ break;
+ default:
+ return;
+ }
+
+ DSI_W32(ctrl, init_offset, init_val);
+
+ switch (type) {
+ case DSI_TEST_PATTERN_FIXED:
+ reg |= (0x2 << pattern_sel_shift);
+ break;
+ case DSI_TEST_PATTERN_INC:
+ reg |= (0x1 << pattern_sel_shift);
+ break;
+ case DSI_TEST_PATTERN_POLY:
+ DSI_W32(ctrl, poly_offset, 0xF0F0F);
+ break;
+ default:
+ break;
+ }
+
+ DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CTRL, reg);
+ pr_debug("[DSI_%d] Cmd test pattern setup done\n", ctrl->index);
+}
+
+/**
+ * test_pattern_enable() - enable test pattern engine
+ * @ctrl: Pointer to the controller host hardware.
+ * @enable: Enable/Disable test pattern engine.
+ */
+void dsi_ctrl_hw_14_test_pattern_enable(struct dsi_ctrl_hw *ctrl,
+ bool enable)
+{
+ u32 reg = DSI_R32(ctrl, DSI_TEST_PATTERN_GEN_CTRL);
+
+ if (enable)
+ reg |= BIT(0);
+ else
+ reg &= ~BIT(0);
+
+ DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CTRL, reg);
+
+ pr_debug("[DSI_%d] Test pattern enable=%d\n", ctrl->index, enable);
+}
+
+/**
+ * trigger_cmd_test_pattern() - trigger a command mode frame update with
+ * test pattern
+ * @ctrl: Pointer to the controller host hardware.
+ * @stream_id: Stream on which frame update is sent.
+ */
+void dsi_ctrl_hw_14_trigger_cmd_test_pattern(struct dsi_ctrl_hw *ctrl,
+ u32 stream_id)
+{
+ switch (stream_id) {
+ case 0:
+ DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CMD_STREAM0_TRIGGER, 0x1);
+ break;
+ case 1:
+ DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CMD_STREAM1_TRIGGER, 0x1);
+ break;
+ case 2:
+ DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CMD_STREAM2_TRIGGER, 0x1);
+ break;
+ default:
+ break;
+ }
+
+ pr_debug("[DSI_%d] Cmd Test pattern trigger\n", ctrl->index);
+}
+
+#define DUMP_REG_VALUE(off) "\t%-30s: 0x%08x\n", #off, DSI_R32(ctrl, off)
+ssize_t dsi_ctrl_hw_14_reg_dump_to_buffer(struct dsi_ctrl_hw *ctrl,
+ char *buf,
+ u32 size)
+{
+ u32 len = 0;
+
+ len += snprintf((buf + len), (size - len), "CONFIGURATION REGS:\n");
+
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_HW_VERSION));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_CTRL));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_STATUS));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_FIFO_STATUS));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_VIDEO_MODE_CTRL));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_VIDEO_MODE_SYNC_DATATYPE));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_VIDEO_MODE_PIXEL_DATATYPE));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_VIDEO_MODE_BLANKING_DATATYPE));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_VIDEO_MODE_DATA_CTRL));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_VIDEO_MODE_ACTIVE_H));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_VIDEO_MODE_ACTIVE_V));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_VIDEO_MODE_TOTAL));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_VIDEO_MODE_HSYNC));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_VIDEO_MODE_VSYNC));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_VIDEO_MODE_VSYNC_VPOS));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_COMMAND_MODE_DMA_CTRL));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_CTRL));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_DCS_CMD_CTRL));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_DMA_CMD_OFFSET));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_DMA_CMD_LENGTH));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_DMA_FIFO_CTRL));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_DMA_NULL_PACKET_DATA));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_STREAM0_CTRL));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_STREAM0_TOTAL));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_STREAM1_CTRL));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_STREAM1_TOTAL));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_ACK_ERR_STATUS));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_RDBK_DATA0));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_RDBK_DATA1));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_RDBK_DATA2));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_RDBK_DATA3));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_RDBK_DATATYPE0));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_RDBK_DATATYPE1));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_TRIG_CTRL));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_EXT_MUX));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_EXT_MUX_TE_PULSE_DETECT_CTRL));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_CMD_MODE_DMA_SW_TRIGGER));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_CMD_MODE_MDP_SW_TRIGGER));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_CMD_MODE_BTA_SW_TRIGGER));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_RESET_SW_TRIGGER));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_LANE_STATUS));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_LANE_CTRL));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_LANE_SWAP_CTRL));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_DLN0_PHY_ERR));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_LP_TIMER_CTRL));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_HS_TIMER_CTRL));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_TIMEOUT_STATUS));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_CLKOUT_TIMING_CTRL));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_EOT_PACKET));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_EOT_PACKET_CTRL));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_GENERIC_ESC_TX_TRIGGER));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_ERR_INT_MASK0));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_INT_CTRL));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_SOFT_RESET));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_CLK_CTRL));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_CLK_STATUS));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_PHY_SW_RESET));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_AXI2AHB_CTRL));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_CTRL2));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_STREAM2_CTRL));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_STREAM2_TOTAL));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_VBIF_CTRL));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_AES_CTRL));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_RDBK_DATA_CTRL));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_TEST_PATTERN_GEN_CMD_DMA_INIT_VAL2));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_TPG_DMA_FIFO_STATUS));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_TPG_DMA_FIFO_WRITE_TRIGGER));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_DSI_TIMING_FLUSH));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_DSI_TIMING_DB_MODE));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_TPG_DMA_FIFO_RESET));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_VERSION));
+
+ pr_err("LLENGTH = %d\n", len);
+ return len;
+}
+
+
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_reg_1_4.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_reg_1_4.h
new file mode 100644
index 000000000000..028ad46664a7
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_reg_1_4.h
@@ -0,0 +1,192 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DSI_CTRL_REG_H_
+#define _DSI_CTRL_REG_H_
+
+#define DSI_HW_VERSION (0x0000)
+#define DSI_CTRL (0x0004)
+#define DSI_STATUS (0x0008)
+#define DSI_FIFO_STATUS (0x000C)
+#define DSI_VIDEO_MODE_CTRL (0x0010)
+#define DSI_VIDEO_MODE_SYNC_DATATYPE (0x0014)
+#define DSI_VIDEO_MODE_PIXEL_DATATYPE (0x0018)
+#define DSI_VIDEO_MODE_BLANKING_DATATYPE (0x001C)
+#define DSI_VIDEO_MODE_DATA_CTRL (0x0020)
+#define DSI_VIDEO_MODE_ACTIVE_H (0x0024)
+#define DSI_VIDEO_MODE_ACTIVE_V (0x0028)
+#define DSI_VIDEO_MODE_TOTAL (0x002C)
+#define DSI_VIDEO_MODE_HSYNC (0x0030)
+#define DSI_VIDEO_MODE_VSYNC (0x0034)
+#define DSI_VIDEO_MODE_VSYNC_VPOS (0x0038)
+#define DSI_COMMAND_MODE_DMA_CTRL (0x003C)
+#define DSI_COMMAND_MODE_MDP_CTRL (0x0040)
+#define DSI_COMMAND_MODE_MDP_DCS_CMD_CTRL (0x0044)
+#define DSI_DMA_CMD_OFFSET (0x0048)
+#define DSI_DMA_CMD_LENGTH (0x004C)
+#define DSI_DMA_FIFO_CTRL (0x0050)
+#define DSI_DMA_NULL_PACKET_DATA (0x0054)
+#define DSI_COMMAND_MODE_MDP_STREAM0_CTRL (0x0058)
+#define DSI_COMMAND_MODE_MDP_STREAM0_TOTAL (0x005C)
+#define DSI_COMMAND_MODE_MDP_STREAM1_CTRL (0x0060)
+#define DSI_COMMAND_MODE_MDP_STREAM1_TOTAL (0x0064)
+#define DSI_ACK_ERR_STATUS (0x0068)
+#define DSI_RDBK_DATA0 (0x006C)
+#define DSI_RDBK_DATA1 (0x0070)
+#define DSI_RDBK_DATA2 (0x0074)
+#define DSI_RDBK_DATA3 (0x0078)
+#define DSI_RDBK_DATATYPE0 (0x007C)
+#define DSI_RDBK_DATATYPE1 (0x0080)
+#define DSI_TRIG_CTRL (0x0084)
+#define DSI_EXT_MUX (0x0088)
+#define DSI_EXT_MUX_TE_PULSE_DETECT_CTRL (0x008C)
+#define DSI_CMD_MODE_DMA_SW_TRIGGER (0x0090)
+#define DSI_CMD_MODE_MDP_SW_TRIGGER (0x0094)
+#define DSI_CMD_MODE_BTA_SW_TRIGGER (0x0098)
+#define DSI_RESET_SW_TRIGGER (0x009C)
+#define DSI_MISR_CMD_CTRL (0x00A0)
+#define DSI_MISR_VIDEO_CTRL (0x00A4)
+#define DSI_LANE_STATUS (0x00A8)
+#define DSI_LANE_CTRL (0x00AC)
+#define DSI_LANE_SWAP_CTRL (0x00B0)
+#define DSI_DLN0_PHY_ERR (0x00B4)
+#define DSI_LP_TIMER_CTRL (0x00B8)
+#define DSI_HS_TIMER_CTRL (0x00BC)
+#define DSI_TIMEOUT_STATUS (0x00C0)
+#define DSI_CLKOUT_TIMING_CTRL (0x00C4)
+#define DSI_EOT_PACKET (0x00C8)
+#define DSI_EOT_PACKET_CTRL (0x00CC)
+#define DSI_GENERIC_ESC_TX_TRIGGER (0x00D0)
+#define DSI_CAM_BIST_CTRL (0x00D4)
+#define DSI_CAM_BIST_FRAME_SIZE (0x00D8)
+#define DSI_CAM_BIST_BLOCK_SIZE (0x00DC)
+#define DSI_CAM_BIST_FRAME_CONFIG (0x00E0)
+#define DSI_CAM_BIST_LSFR_CTRL (0x00E4)
+#define DSI_CAM_BIST_LSFR_INIT (0x00E8)
+#define DSI_CAM_BIST_START (0x00EC)
+#define DSI_CAM_BIST_STATUS (0x00F0)
+#define DSI_ERR_INT_MASK0 (0x010C)
+#define DSI_INT_CTRL (0x0110)
+#define DSI_IOBIST_CTRL (0x0114)
+#define DSI_SOFT_RESET (0x0118)
+#define DSI_CLK_CTRL (0x011C)
+#define DSI_CLK_STATUS (0x0120)
+#define DSI_PHY_SW_RESET (0x012C)
+#define DSI_AXI2AHB_CTRL (0x0130)
+#define DSI_MISR_CMD_MDP0_32BIT (0x0134)
+#define DSI_MISR_CMD_MDP1_32BIT (0x0138)
+#define DSI_MISR_CMD_DMA_32BIT (0x013C)
+#define DSI_MISR_VIDEO_32BIT (0x0140)
+#define DSI_LANE_MISR_CTRL (0x0144)
+#define DSI_LANE0_MISR (0x0148)
+#define DSI_LANE1_MISR (0x014C)
+#define DSI_LANE2_MISR (0x0150)
+#define DSI_LANE3_MISR (0x0154)
+#define DSI_TEST_PATTERN_GEN_CTRL (0x015C)
+#define DSI_TEST_PATTERN_GEN_VIDEO_POLY (0x0160)
+#define DSI_TEST_PATTERN_GEN_VIDEO_INIT_VAL (0x0164)
+#define DSI_TEST_PATTERN_GEN_CMD_MDP_STREAM0_POLY (0x0168)
+#define DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL0 (0x016C)
+#define DSI_TEST_PATTERN_GEN_CMD_MDP_STREAM1_POLY (0x0170)
+#define DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL1 (0x0174)
+#define DSI_TEST_PATTERN_GEN_CMD_DMA_POLY (0x0178)
+#define DSI_TEST_PATTERN_GEN_CMD_DMA_INIT_VAL (0x017C)
+#define DSI_TEST_PATTERN_GEN_VIDEO_ENABLE (0x0180)
+#define DSI_TEST_PATTERN_GEN_CMD_STREAM0_TRIGGER (0x0184)
+#define DSI_TEST_PATTERN_GEN_CMD_STREAM1_TRIGGER (0x0188)
+#define DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL2 (0x018C)
+#define DSI_TEST_PATTERN_GEN_CMD_MDP_STREAM2_POLY (0x0190)
+#define DSI_TEST_PATTERN_GEN_CMD_MDP_STREAM2_POLY (0x0190)
+#define DSI_COMMAND_MODE_MDP_IDLE_CTRL (0x0194)
+#define DSI_TEST_PATTERN_GEN_CMD_STREAM2_TRIGGER (0x0198)
+#define DSI_TPG_MAIN_CONTROL (0x019C)
+#define DSI_TPG_MAIN_CONTROL2 (0x01A0)
+#define DSI_TPG_VIDEO_CONFIG (0x01A4)
+#define DSI_TPG_COMPONENT_LIMITS (0x01A8)
+#define DSI_TPG_RECTANGLE (0x01AC)
+#define DSI_TPG_BLACK_WHITE_PATTERN_FRAMES (0x01B0)
+#define DSI_TPG_RGB_MAPPING (0x01B4)
+#define DSI_COMMAND_MODE_MDP_CTRL2 (0x01B8)
+#define DSI_COMMAND_MODE_MDP_STREAM2_CTRL (0x01BC)
+#define DSI_COMMAND_MODE_MDP_STREAM2_TOTAL (0x01C0)
+#define DSI_MISR_CMD_MDP2_8BIT (0x01C4)
+#define DSI_MISR_CMD_MDP2_32BIT (0x01C8)
+#define DSI_VBIF_CTRL (0x01CC)
+#define DSI_AES_CTRL (0x01D0)
+#define DSI_RDBK_DATA_CTRL (0x01D4)
+#define DSI_TEST_PATTERN_GEN_CMD_DMA_INIT_VAL2 (0x01D8)
+#define DSI_TPG_DMA_FIFO_STATUS (0x01DC)
+#define DSI_TPG_DMA_FIFO_WRITE_TRIGGER (0x01E0)
+#define DSI_DSI_TIMING_FLUSH (0x01E4)
+#define DSI_DSI_TIMING_DB_MODE (0x01E8)
+#define DSI_TPG_DMA_FIFO_RESET (0x01EC)
+#define DSI_SCRATCH_REGISTER_0 (0x01F0)
+#define DSI_VERSION (0x01F4)
+#define DSI_SCRATCH_REGISTER_1 (0x01F8)
+#define DSI_SCRATCH_REGISTER_2 (0x01FC)
+#define DSI_DYNAMIC_REFRESH_CTRL (0x0200)
+#define DSI_DYNAMIC_REFRESH_PIPE_DELAY (0x0204)
+#define DSI_DYNAMIC_REFRESH_PIPE_DELAY2 (0x0208)
+#define DSI_DYNAMIC_REFRESH_PLL_DELAY (0x020C)
+#define DSI_DYNAMIC_REFRESH_STATUS (0x0210)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL0 (0x0214)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL1 (0x0218)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL2 (0x021C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL3 (0x0220)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL4 (0x0224)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL5 (0x0228)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL6 (0x022C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL7 (0x0230)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL8 (0x0234)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL9 (0x0238)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL10 (0x023C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL11 (0x0240)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL12 (0x0244)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL13 (0x0248)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL14 (0x024C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL15 (0x0250)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL16 (0x0254)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL17 (0x0258)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL18 (0x025C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL19 (0x0260)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL20 (0x0264)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL21 (0x0268)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL22 (0x026C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL23 (0x0270)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL24 (0x0274)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL25 (0x0278)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL26 (0x027C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL27 (0x0280)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL28 (0x0284)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL29 (0x0288)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL30 (0x028C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL31 (0x0290)
+#define DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR (0x0294)
+#define DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR2 (0x0298)
+#define DSI_VIDEO_COMPRESSION_MODE_CTRL (0x02A0)
+#define DSI_VIDEO_COMPRESSION_MODE_CTRL2 (0x02A4)
+#define DSI_COMMAND_COMPRESSION_MODE_CTRL (0x02A8)
+#define DSI_COMMAND_COMPRESSION_MODE_CTRL2 (0x02AC)
+#define DSI_COMMAND_COMPRESSION_MODE_CTRL3 (0x02B0)
+#define DSI_COMMAND_MODE_NULL_INSERTION_CTRL (0x02B4)
+#define DSI_READ_BACK_DISABLE_STATUS (0x02B8)
+#define DSI_DESKEW_CTRL (0x02BC)
+#define DSI_DESKEW_DELAY_CTRL (0x02C0)
+#define DSI_DESKEW_SW_TRIGGER (0x02C4)
+#define DSI_SECURE_DISPLAY_STATUS (0x02CC)
+#define DSI_SECURE_DISPLAY_BLOCK_COMMAND_COLOR (0x02D0)
+#define DSI_SECURE_DISPLAY_BLOCK_VIDEO_COLOR (0x02D4)
+
+
+#endif /* _DSI_CTRL_REG_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h b/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h
new file mode 100644
index 000000000000..2caa32ea8f0c
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h
@@ -0,0 +1,372 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DSI_DEFS_H_
+#define _DSI_DEFS_H_
+
+#include <linux/types.h>
+
+#define DSI_H_TOTAL(t) (((t)->h_active) + ((t)->h_back_porch) + \
+ ((t)->h_sync_width) + ((t)->h_front_porch))
+
+#define DSI_V_TOTAL(t) (((t)->v_active) + ((t)->v_back_porch) + \
+ ((t)->v_sync_width) + ((t)->v_front_porch))
+
+/**
+ * enum dsi_pixel_format - DSI pixel formats
+ * @DSI_PIXEL_FORMAT_RGB565:
+ * @DSI_PIXEL_FORMAT_RGB666:
+ * @DSI_PIXEL_FORMAT_RGB666_LOOSE:
+ * @DSI_PIXEL_FORMAT_RGB888:
+ * @DSI_PIXEL_FORMAT_RGB111:
+ * @DSI_PIXEL_FORMAT_RGB332:
+ * @DSI_PIXEL_FORMAT_RGB444:
+ * @DSI_PIXEL_FORMAT_MAX:
+ */
+enum dsi_pixel_format {
+ DSI_PIXEL_FORMAT_RGB565 = 0,
+ DSI_PIXEL_FORMAT_RGB666,
+ DSI_PIXEL_FORMAT_RGB666_LOOSE,
+ DSI_PIXEL_FORMAT_RGB888,
+ DSI_PIXEL_FORMAT_RGB111,
+ DSI_PIXEL_FORMAT_RGB332,
+ DSI_PIXEL_FORMAT_RGB444,
+ DSI_PIXEL_FORMAT_MAX
+};
+
+/**
+ * enum dsi_op_mode - dsi operation mode
+ * @DSI_OP_VIDEO_MODE: DSI video mode operation
+ * @DSI_OP_CMD_MODE: DSI Command mode operation
+ * @DSI_OP_MODE_MAX:
+ */
+enum dsi_op_mode {
+ DSI_OP_VIDEO_MODE = 0,
+ DSI_OP_CMD_MODE,
+ DSI_OP_MODE_MAX
+};
+
+/**
+ * enum dsi_mode_flags - flags to signal other drm components via private flags
+ * @DSI_MODE_FLAG_SEAMLESS: Seamless transition requested by user
+ * @DSI_MODE_FLAG_DFPS: Seamless transition is DynamicFPS
+ * @DSI_MODE_FLAG_VBLANK_PRE_MODESET: Transition needs VBLANK before Modeset
+ */
+enum dsi_mode_flags {
+ DSI_MODE_FLAG_SEAMLESS = BIT(0),
+ DSI_MODE_FLAG_DFPS = BIT(1),
+ DSI_MODE_FLAG_VBLANK_PRE_MODESET = BIT(2)
+};
+
+/**
+ * enum dsi_data_lanes - dsi physical lanes
+ * @DSI_DATA_LANE_0: Physical lane 0
+ * @DSI_DATA_LANE_1: Physical lane 1
+ * @DSI_DATA_LANE_2: Physical lane 2
+ * @DSI_DATA_LANE_3: Physical lane 3
+ * @DSI_CLOCK_LANE: Physical clock lane
+ */
+enum dsi_data_lanes {
+ DSI_DATA_LANE_0 = BIT(0),
+ DSI_DATA_LANE_1 = BIT(1),
+ DSI_DATA_LANE_2 = BIT(2),
+ DSI_DATA_LANE_3 = BIT(3),
+ DSI_CLOCK_LANE = BIT(4)
+};
+
+/**
+ * enum dsi_logical_lane - dsi logical lanes
+ * @DSI_LOGICAL_LANE_0: Logical lane 0
+ * @DSI_LOGICAL_LANE_1: Logical lane 1
+ * @DSI_LOGICAL_LANE_2: Logical lane 2
+ * @DSI_LOGICAL_LANE_3: Logical lane 3
+ * @DSI_LOGICAL_CLOCK_LANE: Clock lane
+ * @DSI_LANE_MAX: Maximum lanes supported
+ */
+enum dsi_logical_lane {
+ DSI_LOGICAL_LANE_0 = 0,
+ DSI_LOGICAL_LANE_1,
+ DSI_LOGICAL_LANE_2,
+ DSI_LOGICAL_LANE_3,
+ DSI_LOGICAL_CLOCK_LANE,
+ DSI_LANE_MAX
+};
+
+/**
+ * enum dsi_trigger_type - dsi trigger type
+ * @DSI_TRIGGER_NONE: No trigger.
+ * @DSI_TRIGGER_TE: TE trigger.
+ * @DSI_TRIGGER_SEOF: Start or End of frame.
+ * @DSI_TRIGGER_SW: Software trigger.
+ * @DSI_TRIGGER_SW_SEOF: Software trigger and start/end of frame.
+ * @DSI_TRIGGER_SW_TE: Software and TE triggers.
+ * @DSI_TRIGGER_MAX: Max trigger values.
+ */
+enum dsi_trigger_type {
+ DSI_TRIGGER_NONE = 0,
+ DSI_TRIGGER_TE,
+ DSI_TRIGGER_SEOF,
+ DSI_TRIGGER_SW,
+ DSI_TRIGGER_SW_SEOF,
+ DSI_TRIGGER_SW_TE,
+ DSI_TRIGGER_MAX
+};
+
+/**
+ * enum dsi_color_swap_mode - color swap mode
+ * @DSI_COLOR_SWAP_RGB:
+ * @DSI_COLOR_SWAP_RBG:
+ * @DSI_COLOR_SWAP_BGR:
+ * @DSI_COLOR_SWAP_BRG:
+ * @DSI_COLOR_SWAP_GRB:
+ * @DSI_COLOR_SWAP_GBR:
+ */
+enum dsi_color_swap_mode {
+ DSI_COLOR_SWAP_RGB = 0,
+ DSI_COLOR_SWAP_RBG,
+ DSI_COLOR_SWAP_BGR,
+ DSI_COLOR_SWAP_BRG,
+ DSI_COLOR_SWAP_GRB,
+ DSI_COLOR_SWAP_GBR
+};
+
+/**
+ * enum dsi_dfps_type - Dynamic FPS support type
+ * @DSI_DFPS_NONE: Dynamic FPS is not supported.
+ * @DSI_DFPS_SUSPEND_RESUME:
+ * @DSI_DFPS_IMMEDIATE_CLK:
+ * @DSI_DFPS_IMMEDIATE_HFP:
+ * @DSI_DFPS_IMMEDIATE_VFP:
+ * @DSI_DPFS_MAX:
+ */
+enum dsi_dfps_type {
+ DSI_DFPS_NONE = 0,
+ DSI_DFPS_SUSPEND_RESUME,
+ DSI_DFPS_IMMEDIATE_CLK,
+ DSI_DFPS_IMMEDIATE_HFP,
+ DSI_DFPS_IMMEDIATE_VFP,
+ DSI_DFPS_MAX
+};
+
+/**
+ * enum dsi_phy_type - DSI phy types
+ * @DSI_PHY_TYPE_DPHY:
+ * @DSI_PHY_TYPE_CPHY:
+ */
+enum dsi_phy_type {
+ DSI_PHY_TYPE_DPHY,
+ DSI_PHY_TYPE_CPHY
+};
+
+/**
+ * enum dsi_te_mode - dsi te source
+ * @DSI_TE_ON_DATA_LINK: TE read from DSI link
+ * @DSI_TE_ON_EXT_PIN: TE signal on an external GPIO
+ */
+enum dsi_te_mode {
+ DSI_TE_ON_DATA_LINK = 0,
+ DSI_TE_ON_EXT_PIN,
+};
+
+/**
+ * enum dsi_video_traffic_mode - video mode pixel transmission type
+ * @DSI_VIDEO_TRAFFIC_SYNC_PULSES: Non-burst mode with sync pulses.
+ * @DSI_VIDEO_TRAFFIC_SYNC_START_EVENTS: Non-burst mode with sync start events.
+ * @DSI_VIDEO_TRAFFIC_BURST_MODE: Burst mode using sync start events.
+ */
+enum dsi_video_traffic_mode {
+ DSI_VIDEO_TRAFFIC_SYNC_PULSES = 0,
+ DSI_VIDEO_TRAFFIC_SYNC_START_EVENTS,
+ DSI_VIDEO_TRAFFIC_BURST_MODE,
+};
+
+/**
+ * struct dsi_mode_info - video mode information dsi frame
+ * @h_active: Active width of one frame in pixels.
+ * @h_back_porch: Horizontal back porch in pixels.
+ * @h_sync_width: HSYNC width in pixels.
+ * @h_front_porch: Horizontal fron porch in pixels.
+ * @h_skew:
+ * @h_sync_polarity: Polarity of HSYNC (false is active low).
+ * @v_active: Active height of one frame in lines.
+ * @v_back_porch: Vertical back porch in lines.
+ * @v_sync_width: VSYNC width in lines.
+ * @v_front_porch: Vertical front porch in lines.
+ * @v_sync_polarity: Polarity of VSYNC (false is active low).
+ * @refresh_rate: Refresh rate in Hz.
+ */
+struct dsi_mode_info {
+ u32 h_active;
+ u32 h_back_porch;
+ u32 h_sync_width;
+ u32 h_front_porch;
+ u32 h_skew;
+ bool h_sync_polarity;
+
+ u32 v_active;
+ u32 v_back_porch;
+ u32 v_sync_width;
+ u32 v_front_porch;
+ bool v_sync_polarity;
+
+ u32 refresh_rate;
+};
+
+/**
+ * struct dsi_lane_mapping - Mapping between DSI logical and physical lanes
+ * @physical_lane0: Logical lane to which physical lane 0 is mapped.
+ * @physical_lane1: Logical lane to which physical lane 1 is mapped.
+ * @physical_lane2: Logical lane to which physical lane 2 is mapped.
+ * @physical_lane3: Logical lane to which physical lane 3 is mapped.
+ */
+struct dsi_lane_mapping {
+ enum dsi_logical_lane physical_lane0;
+ enum dsi_logical_lane physical_lane1;
+ enum dsi_logical_lane physical_lane2;
+ enum dsi_logical_lane physical_lane3;
+};
+
+/**
+ * struct dsi_host_common_cfg - Host configuration common to video and cmd mode
+ * @dst_format: Destination pixel format.
+ * @data_lanes: Physical data lanes to be enabled.
+ * @en_crc_check: Enable CRC checks.
+ * @en_ecc_check: Enable ECC checks.
+ * @te_mode: Source for TE signalling.
+ * @mdp_cmd_trigger: MDP frame update trigger for command mode.
+ * @dma_cmd_trigger: Command DMA trigger.
+ * @cmd_trigger_stream: Command mode stream to trigger.
+ * @bit_swap_read: Is red color bit swapped.
+ * @bit_swap_green: Is green color bit swapped.
+ * @bit_swap_blue: Is blue color bit swapped.
+ * @t_clk_post: Number of byte clock cycles that the transmitter shall
+ * continue sending after last data lane has transitioned
+ * to LP mode.
+ * @t_clk_pre: Number of byte clock cycles that the high spped clock
+ * shall be driven prior to data lane transitions from LP
+ * to HS mode.
+ * @ignore_rx_eot: Ignore Rx EOT packets if set to true.
+ * @append_tx_eot: Append EOT packets for forward transmissions if set to
+ * true.
+ */
+struct dsi_host_common_cfg {
+ enum dsi_pixel_format dst_format;
+ enum dsi_data_lanes data_lanes;
+ bool en_crc_check;
+ bool en_ecc_check;
+ enum dsi_te_mode te_mode;
+ enum dsi_trigger_type mdp_cmd_trigger;
+ enum dsi_trigger_type dma_cmd_trigger;
+ u32 cmd_trigger_stream;
+ enum dsi_color_swap_mode swap_mode;
+ bool bit_swap_red;
+ bool bit_swap_green;
+ bool bit_swap_blue;
+ u32 t_clk_post;
+ u32 t_clk_pre;
+ bool ignore_rx_eot;
+ bool append_tx_eot;
+};
+
+/**
+ * struct dsi_video_engine_cfg - DSI video engine configuration
+ * @host_cfg: Pointer to host common configuration.
+ * @last_line_interleave_en: Allow command mode op interleaved on last line of
+ * video stream.
+ * @pulse_mode_hsa_he: Send HSA and HE following VS/VE packet if set to
+ * true.
+ * @hfp_lp11_en: Enter low power stop mode (LP-11) during HFP.
+ * @hbp_lp11_en: Enter low power stop mode (LP-11) during HBP.
+ * @hsa_lp11_en: Enter low power stop mode (LP-11) during HSA.
+ * @eof_bllp_lp11_en: Enter low power stop mode (LP-11) during BLLP of
+ * last line of a frame.
+ * @bllp_lp11_en: Enter low power stop mode (LP-11) during BLLP.
+ * @traffic_mode: Traffic mode for video stream.
+ * @vc_id: Virtual channel identifier.
+ */
+struct dsi_video_engine_cfg {
+ bool last_line_interleave_en;
+ bool pulse_mode_hsa_he;
+ bool hfp_lp11_en;
+ bool hbp_lp11_en;
+ bool hsa_lp11_en;
+ bool eof_bllp_lp11_en;
+ bool bllp_lp11_en;
+ enum dsi_video_traffic_mode traffic_mode;
+ u32 vc_id;
+};
+
+/**
+ * struct dsi_cmd_engine_cfg - DSI command engine configuration
+ * @host_cfg: Pointer to host common configuration.
+ * @host_cfg: Common host configuration
+ * @max_cmd_packets_interleave Maximum number of command mode RGB packets to
+ * send with in one horizontal blanking period
+ * of the video mode frame.
+ * @wr_mem_start: DCS command for write_memory_start.
+ * @wr_mem_continue: DCS command for write_memory_continue.
+ * @insert_dcs_command: Insert DCS command as first byte of payload
+ * of the pixel data.
+ * @mdp_transfer_time_us Specifies the mdp transfer time for command mode
+ * panels in microseconds
+ */
+struct dsi_cmd_engine_cfg {
+ u32 max_cmd_packets_interleave;
+ u32 wr_mem_start;
+ u32 wr_mem_continue;
+ bool insert_dcs_command;
+ u32 mdp_transfer_time_us;
+};
+
+/**
+ * struct dsi_host_config - DSI host configuration parameters.
+ * @panel_mode: Operation mode for panel (video or cmd mode).
+ * @common_config: Host configuration common to both Video and Cmd mode.
+ * @video_engine: Video engine configuration if panel is in video mode.
+ * @cmd_engine: Cmd engine configuration if panel is in cmd mode.
+ * @esc_clk_rate_khz: Esc clock frequency in Hz.
+ * @bit_clk_rate_hz: Bit clock frequency in Hz.
+ * @video_timing: Video timing information of a frame.
+ * @lane_map: Mapping between logical and physical lanes.
+ * @phy_type: PHY type to be used.
+ */
+struct dsi_host_config {
+ enum dsi_op_mode panel_mode;
+ struct dsi_host_common_cfg common_config;
+ union {
+ struct dsi_video_engine_cfg video_engine;
+ struct dsi_cmd_engine_cfg cmd_engine;
+ } u;
+ u64 esc_clk_rate_hz;
+ u64 bit_clk_rate_hz;
+ struct dsi_mode_info video_timing;
+ struct dsi_lane_mapping lane_map;
+};
+
+/**
+ * struct dsi_display_mode - specifies mode for dsi display
+ * @timing: Timing parameters for the panel.
+ * @pixel_clk_khz: Pixel clock in Khz.
+ * @panel_mode: Panel operation mode.
+ * @flags: Additional flags.
+ */
+struct dsi_display_mode {
+ struct dsi_mode_info timing;
+ u32 pixel_clk_khz;
+ enum dsi_op_mode panel_mode;
+
+ u32 flags;
+};
+
+#endif /* _DSI_DEFS_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
new file mode 100644
index 000000000000..5a166a4bae93
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
@@ -0,0 +1,2588 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "msm-dsi-display:[%s] " fmt, __func__
+
+#include <linux/list.h>
+#include <linux/of.h>
+
+#include "msm_drv.h"
+#include "dsi_display.h"
+#include "dsi_panel.h"
+#include "dsi_ctrl.h"
+#include "dsi_ctrl_hw.h"
+#include "dsi_drm.h"
+
+#define to_dsi_display(x) container_of(x, struct dsi_display, host)
+
+static DEFINE_MUTEX(dsi_display_list_lock);
+static LIST_HEAD(dsi_display_list);
+
+static const struct of_device_id dsi_display_dt_match[] = {
+ {.compatible = "qcom,dsi-display"},
+ {}
+};
+
+static struct dsi_display *main_display;
+
+int dsi_display_set_backlight(void *display, u32 bl_lvl)
+{
+ struct dsi_display *dsi_display = display;
+ struct dsi_panel *panel;
+ int rc = 0;
+
+ if (dsi_display == NULL)
+ return -EINVAL;
+
+ panel = dsi_display->panel;
+
+ rc = dsi_panel_set_backlight(panel, bl_lvl);
+ if (rc)
+ pr_err("unable to set backlight\n");
+
+ return rc;
+}
+
+static ssize_t debugfs_dump_info_read(struct file *file,
+ char __user *buff,
+ size_t count,
+ loff_t *ppos)
+{
+ struct dsi_display *display = file->private_data;
+ char *buf;
+ u32 len = 0;
+ int i;
+
+ if (!display)
+ return -ENODEV;
+
+ if (*ppos)
+ return 0;
+
+ buf = kzalloc(SZ_4K, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ len += snprintf(buf + len, (SZ_4K - len), "name = %s\n", display->name);
+ len += snprintf(buf + len, (SZ_4K - len),
+ "\tResolution = %dx%d\n",
+ display->config.video_timing.h_active,
+ display->config.video_timing.v_active);
+
+ for (i = 0; i < display->ctrl_count; i++) {
+ len += snprintf(buf + len, (SZ_4K - len),
+ "\tCTRL_%d:\n\t\tctrl = %s\n\t\tphy = %s\n",
+ i, display->ctrl[i].ctrl->name,
+ display->ctrl[i].phy->name);
+ }
+
+ len += snprintf(buf + len, (SZ_4K - len),
+ "\tPanel = %s\n", display->panel->name);
+
+ len += snprintf(buf + len, (SZ_4K - len),
+ "\tClock master = %s\n",
+ display->ctrl[display->clk_master_idx].ctrl->name);
+
+ if (copy_to_user(buff, buf, len)) {
+ kfree(buf);
+ return -EFAULT;
+ }
+
+ *ppos += len;
+
+ kfree(buf);
+ return len;
+}
+
+
+static const struct file_operations dump_info_fops = {
+ .open = simple_open,
+ .read = debugfs_dump_info_read,
+};
+
+static int dsi_display_debugfs_init(struct dsi_display *display)
+{
+ int rc = 0;
+ struct dentry *dir, *dump_file;
+
+ dir = debugfs_create_dir(display->name, NULL);
+ if (IS_ERR_OR_NULL(dir)) {
+ rc = PTR_ERR(dir);
+ pr_err("[%s] debugfs create dir failed, rc = %d\n",
+ display->name, rc);
+ goto error;
+ }
+
+ dump_file = debugfs_create_file("dump_info",
+ 0444,
+ dir,
+ display,
+ &dump_info_fops);
+ if (IS_ERR_OR_NULL(dump_file)) {
+ rc = PTR_ERR(dump_file);
+ pr_err("[%s] debugfs create file failed, rc=%d\n",
+ display->name, rc);
+ goto error_remove_dir;
+ }
+
+ display->root = dir;
+ return rc;
+error_remove_dir:
+ debugfs_remove(dir);
+error:
+ return rc;
+}
+
+static int dsi_display_debugfs_deinit(struct dsi_display *display)
+{
+ debugfs_remove_recursive(display->root);
+
+ return 0;
+}
+
+static void adjust_timing_by_ctrl_count(const struct dsi_display *display,
+ struct dsi_display_mode *mode)
+{
+ if (display->ctrl_count > 1) {
+ mode->timing.h_active /= display->ctrl_count;
+ mode->timing.h_front_porch /= display->ctrl_count;
+ mode->timing.h_sync_width /= display->ctrl_count;
+ mode->timing.h_back_porch /= display->ctrl_count;
+ mode->timing.h_skew /= display->ctrl_count;
+ mode->pixel_clk_khz /= display->ctrl_count;
+ }
+}
+
+static int dsi_display_ctrl_power_on(struct dsi_display *display)
+{
+ int rc = 0;
+ int i;
+ struct dsi_display_ctrl *ctrl;
+
+ /* Sequence does not matter for split dsi usecases */
+
+ for (i = 0; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ if (!ctrl->ctrl)
+ continue;
+
+ rc = dsi_ctrl_set_power_state(ctrl->ctrl,
+ DSI_CTRL_POWER_VREG_ON);
+ if (rc) {
+ pr_err("[%s] Failed to set power state, rc=%d\n",
+ ctrl->ctrl->name, rc);
+ goto error;
+ }
+ }
+
+ return rc;
+error:
+ for (i = i - 1; i >= 0; i--) {
+ ctrl = &display->ctrl[i];
+ if (!ctrl->ctrl)
+ continue;
+ (void)dsi_ctrl_set_power_state(ctrl->ctrl, DSI_CTRL_POWER_OFF);
+ }
+ return rc;
+}
+
+static int dsi_display_ctrl_power_off(struct dsi_display *display)
+{
+ int rc = 0;
+ int i;
+ struct dsi_display_ctrl *ctrl;
+
+ /* Sequence does not matter for split dsi usecases */
+
+ for (i = 0; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ if (!ctrl->ctrl)
+ continue;
+
+ rc = dsi_ctrl_set_power_state(ctrl->ctrl, DSI_CTRL_POWER_OFF);
+ if (rc) {
+ pr_err("[%s] Failed to power off, rc=%d\n",
+ ctrl->ctrl->name, rc);
+ goto error;
+ }
+ }
+error:
+ return rc;
+}
+
+static int dsi_display_phy_power_on(struct dsi_display *display)
+{
+ int rc = 0;
+ int i;
+ struct dsi_display_ctrl *ctrl;
+
+ /* Sequence does not matter for split dsi usecases */
+
+ for (i = 0; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ if (!ctrl->ctrl)
+ continue;
+
+ rc = dsi_phy_set_power_state(ctrl->phy, true);
+ if (rc) {
+ pr_err("[%s] Failed to set power state, rc=%d\n",
+ ctrl->phy->name, rc);
+ goto error;
+ }
+ }
+
+ return rc;
+error:
+ for (i = i - 1; i >= 0; i--) {
+ ctrl = &display->ctrl[i];
+ if (!ctrl->phy)
+ continue;
+ (void)dsi_phy_set_power_state(ctrl->phy, false);
+ }
+ return rc;
+}
+
+static int dsi_display_phy_power_off(struct dsi_display *display)
+{
+ int rc = 0;
+ int i;
+ struct dsi_display_ctrl *ctrl;
+
+ /* Sequence does not matter for split dsi usecases */
+
+ for (i = 0; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ if (!ctrl->phy)
+ continue;
+
+ rc = dsi_phy_set_power_state(ctrl->phy, false);
+ if (rc) {
+ pr_err("[%s] Failed to power off, rc=%d\n",
+ ctrl->ctrl->name, rc);
+ goto error;
+ }
+ }
+error:
+ return rc;
+}
+
+static int dsi_display_ctrl_core_clk_on(struct dsi_display *display)
+{
+ int rc = 0;
+ int i;
+ struct dsi_display_ctrl *m_ctrl, *ctrl;
+
+ /*
+ * In case of split DSI usecases, the clock for master controller should
+ * be enabled before the other controller. Master controller in the
+ * clock context refers to the controller that sources the clock.
+ */
+
+ m_ctrl = &display->ctrl[display->clk_master_idx];
+
+ rc = dsi_ctrl_set_power_state(m_ctrl->ctrl, DSI_CTRL_POWER_CORE_CLK_ON);
+ if (rc) {
+ pr_err("[%s] failed to turn on clocks, rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
+
+ /* Turn on rest of the controllers */
+ for (i = 0; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ if (!ctrl->ctrl || (ctrl == m_ctrl))
+ continue;
+
+ rc = dsi_ctrl_set_power_state(ctrl->ctrl,
+ DSI_CTRL_POWER_CORE_CLK_ON);
+ if (rc) {
+ pr_err("[%s] failed to turn on clock, rc=%d\n",
+ display->name, rc);
+ goto error_disable_master;
+ }
+ }
+ return rc;
+error_disable_master:
+ (void)dsi_ctrl_set_power_state(m_ctrl->ctrl, DSI_CTRL_POWER_VREG_ON);
+error:
+ return rc;
+}
+
+static int dsi_display_ctrl_link_clk_on(struct dsi_display *display)
+{
+ int rc = 0;
+ int i;
+ struct dsi_display_ctrl *m_ctrl, *ctrl;
+
+ /*
+ * In case of split DSI usecases, the clock for master controller should
+ * be enabled before the other controller. Master controller in the
+ * clock context refers to the controller that sources the clock.
+ */
+
+ m_ctrl = &display->ctrl[display->clk_master_idx];
+
+ rc = dsi_ctrl_set_clock_source(m_ctrl->ctrl,
+ &display->clock_info.src_clks);
+ if (rc) {
+ pr_err("[%s] failed to set source clocks for master, rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
+
+ rc = dsi_ctrl_set_power_state(m_ctrl->ctrl, DSI_CTRL_POWER_LINK_CLK_ON);
+ if (rc) {
+ pr_err("[%s] failed to turn on clocks, rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
+
+ /* Turn on rest of the controllers */
+ for (i = 0; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ if (!ctrl->ctrl || (ctrl == m_ctrl))
+ continue;
+
+ rc = dsi_ctrl_set_clock_source(ctrl->ctrl,
+ &display->clock_info.src_clks);
+ if (rc) {
+ pr_err("[%s] failed to set source clocks, rc=%d\n",
+ display->name, rc);
+ goto error_disable_master;
+ }
+
+ rc = dsi_ctrl_set_power_state(ctrl->ctrl,
+ DSI_CTRL_POWER_LINK_CLK_ON);
+ if (rc) {
+ pr_err("[%s] failed to turn on clock, rc=%d\n",
+ display->name, rc);
+ goto error_disable_master;
+ }
+ }
+ return rc;
+error_disable_master:
+ (void)dsi_ctrl_set_power_state(m_ctrl->ctrl,
+ DSI_CTRL_POWER_CORE_CLK_ON);
+error:
+ return rc;
+}
+
+static int dsi_display_ctrl_core_clk_off(struct dsi_display *display)
+{
+ int rc = 0;
+ int i;
+ struct dsi_display_ctrl *m_ctrl, *ctrl;
+
+ /*
+ * In case of split DSI usecases, clock for slave DSI controllers should
+ * be disabled first before disabling clock for master controller. Slave
+ * controllers in the clock context refer to controller which source
+ * clock from another controller.
+ */
+
+ m_ctrl = &display->ctrl[display->clk_master_idx];
+
+ for (i = 0; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ if (!ctrl->ctrl || (ctrl == m_ctrl))
+ continue;
+
+ rc = dsi_ctrl_set_power_state(ctrl->ctrl,
+ DSI_CTRL_POWER_VREG_ON);
+ if (rc) {
+ pr_err("[%s] failed to turn off clock, rc=%d\n",
+ display->name, rc);
+ }
+ }
+
+ rc = dsi_ctrl_set_power_state(m_ctrl->ctrl, DSI_CTRL_POWER_VREG_ON);
+ if (rc)
+ pr_err("[%s] failed to turn off clocks, rc=%d\n",
+ display->name, rc);
+
+ return rc;
+}
+
+static int dsi_display_ctrl_link_clk_off(struct dsi_display *display)
+{
+ int rc = 0;
+ int i;
+ struct dsi_display_ctrl *m_ctrl, *ctrl;
+
+ /*
+ * In case of split DSI usecases, clock for slave DSI controllers should
+ * be disabled first before disabling clock for master controller. Slave
+ * controllers in the clock context refer to controller which source
+ * clock from another controller.
+ */
+
+ m_ctrl = &display->ctrl[display->clk_master_idx];
+
+ for (i = 0; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ if (!ctrl->ctrl || (ctrl == m_ctrl))
+ continue;
+
+ rc = dsi_ctrl_set_power_state(ctrl->ctrl,
+ DSI_CTRL_POWER_CORE_CLK_ON);
+ if (rc) {
+ pr_err("[%s] failed to turn off clock, rc=%d\n",
+ display->name, rc);
+ }
+ }
+ rc = dsi_ctrl_set_power_state(m_ctrl->ctrl, DSI_CTRL_POWER_CORE_CLK_ON);
+ if (rc)
+ pr_err("[%s] failed to turn off clocks, rc=%d\n",
+ display->name, rc);
+ return rc;
+}
+
+static int dsi_display_ctrl_init(struct dsi_display *display)
+{
+ int rc = 0;
+ int i;
+ struct dsi_display_ctrl *ctrl;
+
+ for (i = 0 ; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ rc = dsi_ctrl_host_init(ctrl->ctrl);
+ if (rc) {
+ pr_err("[%s] failed to init host_%d, rc=%d\n",
+ display->name, i, rc);
+ goto error_host_deinit;
+ }
+ }
+
+ return 0;
+error_host_deinit:
+ for (i = i - 1; i >= 0; i--) {
+ ctrl = &display->ctrl[i];
+ (void)dsi_ctrl_host_deinit(ctrl->ctrl);
+ }
+ return rc;
+}
+
+static int dsi_display_ctrl_deinit(struct dsi_display *display)
+{
+ int rc = 0;
+ int i;
+ struct dsi_display_ctrl *ctrl;
+
+ for (i = 0 ; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ rc = dsi_ctrl_host_deinit(ctrl->ctrl);
+ if (rc) {
+ pr_err("[%s] failed to deinit host_%d, rc=%d\n",
+ display->name, i, rc);
+ }
+ }
+
+ return rc;
+}
+
+static int dsi_display_cmd_engine_enable(struct dsi_display *display)
+{
+ int rc = 0;
+ int i;
+ struct dsi_display_ctrl *m_ctrl, *ctrl;
+
+ if (display->cmd_engine_refcount > 0) {
+ display->cmd_engine_refcount++;
+ return 0;
+ }
+
+ m_ctrl = &display->ctrl[display->cmd_master_idx];
+
+ rc = dsi_ctrl_set_cmd_engine_state(m_ctrl->ctrl, DSI_CTRL_ENGINE_ON);
+ if (rc) {
+ pr_err("[%s] failed to enable cmd engine, rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
+
+ for (i = 0; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ if (!ctrl->ctrl || (ctrl == m_ctrl))
+ continue;
+
+ rc = dsi_ctrl_set_cmd_engine_state(ctrl->ctrl,
+ DSI_CTRL_ENGINE_ON);
+ if (rc) {
+ pr_err("[%s] failed to enable cmd engine, rc=%d\n",
+ display->name, rc);
+ goto error_disable_master;
+ }
+ }
+
+ display->cmd_engine_refcount++;
+ return rc;
+error_disable_master:
+ (void)dsi_ctrl_set_cmd_engine_state(m_ctrl->ctrl, DSI_CTRL_ENGINE_OFF);
+error:
+ return rc;
+}
+
+static int dsi_display_cmd_engine_disable(struct dsi_display *display)
+{
+ int rc = 0;
+ int i;
+ struct dsi_display_ctrl *m_ctrl, *ctrl;
+
+ if (display->cmd_engine_refcount == 0) {
+ pr_err("[%s] Invalid refcount\n", display->name);
+ return 0;
+ } else if (display->cmd_engine_refcount > 1) {
+ display->cmd_engine_refcount--;
+ return 0;
+ }
+
+ m_ctrl = &display->ctrl[display->cmd_master_idx];
+ for (i = 0; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ if (!ctrl->ctrl || (ctrl == m_ctrl))
+ continue;
+
+ rc = dsi_ctrl_set_cmd_engine_state(ctrl->ctrl,
+ DSI_CTRL_ENGINE_OFF);
+ if (rc)
+ pr_err("[%s] failed to enable cmd engine, rc=%d\n",
+ display->name, rc);
+ }
+
+ rc = dsi_ctrl_set_cmd_engine_state(m_ctrl->ctrl, DSI_CTRL_ENGINE_OFF);
+ if (rc) {
+ pr_err("[%s] failed to enable cmd engine, rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
+
+error:
+ display->cmd_engine_refcount = 0;
+ return rc;
+}
+
+static int dsi_display_ctrl_host_enable(struct dsi_display *display)
+{
+ int rc = 0;
+ int i;
+ struct dsi_display_ctrl *m_ctrl, *ctrl;
+
+ m_ctrl = &display->ctrl[display->cmd_master_idx];
+
+ rc = dsi_ctrl_set_host_engine_state(m_ctrl->ctrl, DSI_CTRL_ENGINE_ON);
+ if (rc) {
+ pr_err("[%s] failed to enable host engine, rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
+
+ for (i = 0; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ if (!ctrl->ctrl || (ctrl == m_ctrl))
+ continue;
+
+ rc = dsi_ctrl_set_host_engine_state(ctrl->ctrl,
+ DSI_CTRL_ENGINE_ON);
+ if (rc) {
+ pr_err("[%s] failed to enable sl host engine, rc=%d\n",
+ display->name, rc);
+ goto error_disable_master;
+ }
+ }
+
+ return rc;
+error_disable_master:
+ (void)dsi_ctrl_set_host_engine_state(m_ctrl->ctrl, DSI_CTRL_ENGINE_OFF);
+error:
+ return rc;
+}
+
+static int dsi_display_ctrl_host_disable(struct dsi_display *display)
+{
+ int rc = 0;
+ int i;
+ struct dsi_display_ctrl *m_ctrl, *ctrl;
+
+ m_ctrl = &display->ctrl[display->cmd_master_idx];
+ for (i = 0; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ if (!ctrl->ctrl || (ctrl == m_ctrl))
+ continue;
+
+ rc = dsi_ctrl_set_host_engine_state(ctrl->ctrl,
+ DSI_CTRL_ENGINE_OFF);
+ if (rc)
+ pr_err("[%s] failed to disable host engine, rc=%d\n",
+ display->name, rc);
+ }
+
+ rc = dsi_ctrl_set_host_engine_state(m_ctrl->ctrl, DSI_CTRL_ENGINE_OFF);
+ if (rc) {
+ pr_err("[%s] failed to disable host engine, rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
+
+error:
+ return rc;
+}
+
+static int dsi_display_vid_engine_enable(struct dsi_display *display)
+{
+ int rc = 0;
+ int i;
+ struct dsi_display_ctrl *m_ctrl, *ctrl;
+
+ m_ctrl = &display->ctrl[display->video_master_idx];
+
+ rc = dsi_ctrl_set_vid_engine_state(m_ctrl->ctrl, DSI_CTRL_ENGINE_ON);
+ if (rc) {
+ pr_err("[%s] failed to enable vid engine, rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
+
+ for (i = 0; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ if (!ctrl->ctrl || (ctrl == m_ctrl))
+ continue;
+
+ rc = dsi_ctrl_set_vid_engine_state(ctrl->ctrl,
+ DSI_CTRL_ENGINE_ON);
+ if (rc) {
+ pr_err("[%s] failed to enable vid engine, rc=%d\n",
+ display->name, rc);
+ goto error_disable_master;
+ }
+ }
+
+ return rc;
+error_disable_master:
+ (void)dsi_ctrl_set_vid_engine_state(m_ctrl->ctrl, DSI_CTRL_ENGINE_OFF);
+error:
+ return rc;
+}
+
+static int dsi_display_vid_engine_disable(struct dsi_display *display)
+{
+ int rc = 0;
+ int i;
+ struct dsi_display_ctrl *m_ctrl, *ctrl;
+
+ m_ctrl = &display->ctrl[display->video_master_idx];
+
+ for (i = 0; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ if (!ctrl->ctrl || (ctrl == m_ctrl))
+ continue;
+
+ rc = dsi_ctrl_set_vid_engine_state(ctrl->ctrl,
+ DSI_CTRL_ENGINE_OFF);
+ if (rc)
+ pr_err("[%s] failed to disable vid engine, rc=%d\n",
+ display->name, rc);
+ }
+
+ rc = dsi_ctrl_set_vid_engine_state(m_ctrl->ctrl, DSI_CTRL_ENGINE_OFF);
+ if (rc)
+ pr_err("[%s] failed to disable mvid engine, rc=%d\n",
+ display->name, rc);
+
+ return rc;
+}
+
+static int dsi_display_phy_enable(struct dsi_display *display)
+{
+ int rc = 0;
+ int i;
+ struct dsi_display_ctrl *m_ctrl, *ctrl;
+ enum dsi_phy_pll_source m_src = DSI_PLL_SOURCE_STANDALONE;
+
+ m_ctrl = &display->ctrl[display->clk_master_idx];
+ if (display->ctrl_count > 1)
+ m_src = DSI_PLL_SOURCE_NATIVE;
+
+ rc = dsi_phy_enable(m_ctrl->phy,
+ &display->config,
+ m_src,
+ true);
+ if (rc) {
+ pr_err("[%s] failed to enable DSI PHY, rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
+
+ for (i = 0; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ if (!ctrl->ctrl || (ctrl == m_ctrl))
+ continue;
+
+ rc = dsi_phy_enable(ctrl->phy,
+ &display->config,
+ DSI_PLL_SOURCE_NON_NATIVE,
+ true);
+ if (rc) {
+ pr_err("[%s] failed to enable DSI PHY, rc=%d\n",
+ display->name, rc);
+ goto error_disable_master;
+ }
+ }
+
+ return rc;
+
+error_disable_master:
+ (void)dsi_phy_disable(m_ctrl->phy);
+error:
+ return rc;
+}
+
+static int dsi_display_phy_disable(struct dsi_display *display)
+{
+ int rc = 0;
+ int i;
+ struct dsi_display_ctrl *m_ctrl, *ctrl;
+
+ m_ctrl = &display->ctrl[display->clk_master_idx];
+
+ for (i = 0; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ if (!ctrl->ctrl || (ctrl == m_ctrl))
+ continue;
+
+ rc = dsi_phy_disable(ctrl->phy);
+ if (rc)
+ pr_err("[%s] failed to disable DSI PHY, rc=%d\n",
+ display->name, rc);
+ }
+
+ rc = dsi_phy_disable(m_ctrl->phy);
+ if (rc)
+ pr_err("[%s] failed to disable DSI PHY, rc=%d\n",
+ display->name, rc);
+
+ return rc;
+}
+
+static int dsi_display_wake_up(struct dsi_display *display)
+{
+ return 0;
+}
+
+static int dsi_display_broadcast_cmd(struct dsi_display *display,
+ const struct mipi_dsi_msg *msg)
+{
+ int rc = 0;
+ u32 flags, m_flags;
+ struct dsi_display_ctrl *ctrl, *m_ctrl;
+ int i;
+
+ m_flags = (DSI_CTRL_CMD_BROADCAST | DSI_CTRL_CMD_BROADCAST_MASTER |
+ DSI_CTRL_CMD_DEFER_TRIGGER | DSI_CTRL_CMD_FIFO_STORE);
+ flags = (DSI_CTRL_CMD_BROADCAST | DSI_CTRL_CMD_DEFER_TRIGGER |
+ DSI_CTRL_CMD_FIFO_STORE);
+
+ /*
+ * 1. Setup commands in FIFO
+ * 2. Trigger commands
+ */
+ m_ctrl = &display->ctrl[display->cmd_master_idx];
+ rc = dsi_ctrl_cmd_transfer(m_ctrl->ctrl, msg, m_flags);
+ if (rc) {
+ pr_err("[%s] cmd transfer failed on master,rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
+
+ for (i = 0; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ if (ctrl == m_ctrl)
+ continue;
+
+ rc = dsi_ctrl_cmd_transfer(ctrl->ctrl, msg, flags);
+ if (rc) {
+ pr_err("[%s] cmd transfer failed, rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
+
+ rc = dsi_ctrl_cmd_tx_trigger(ctrl->ctrl,
+ DSI_CTRL_CMD_BROADCAST);
+ if (rc) {
+ pr_err("[%s] cmd trigger failed, rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
+ }
+
+ rc = dsi_ctrl_cmd_tx_trigger(m_ctrl->ctrl,
+ (DSI_CTRL_CMD_BROADCAST_MASTER |
+ DSI_CTRL_CMD_BROADCAST));
+ if (rc) {
+ pr_err("[%s] cmd trigger failed for master, rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
+
+error:
+ return rc;
+}
+
+static int dsi_display_phy_sw_reset(struct dsi_display *display)
+{
+ int rc = 0;
+ int i;
+ struct dsi_display_ctrl *m_ctrl, *ctrl;
+
+ m_ctrl = &display->ctrl[display->cmd_master_idx];
+
+ rc = dsi_ctrl_phy_sw_reset(m_ctrl->ctrl);
+ if (rc) {
+ pr_err("[%s] failed to reset phy, rc=%d\n", display->name, rc);
+ goto error;
+ }
+
+ for (i = 0; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ if (!ctrl->ctrl || (ctrl == m_ctrl))
+ continue;
+
+ rc = dsi_ctrl_phy_sw_reset(ctrl->ctrl);
+ if (rc) {
+ pr_err("[%s] failed to reset phy, rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
+ }
+
+error:
+ return rc;
+}
+
+static int dsi_host_attach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *dsi)
+{
+ return 0;
+}
+
+static int dsi_host_detach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *dsi)
+{
+ return 0;
+}
+
+static ssize_t dsi_host_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct dsi_display *display = to_dsi_display(host);
+
+ int rc = 0;
+
+ if (!host || !msg) {
+ pr_err("Invalid params\n");
+ return 0;
+ }
+
+ rc = dsi_display_wake_up(display);
+ if (rc) {
+ pr_err("[%s] failed to wake up display, rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
+
+ rc = dsi_display_cmd_engine_enable(display);
+ if (rc) {
+ pr_err("[%s] failed to enable cmd engine, rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
+
+ if (display->ctrl_count > 1) {
+ rc = dsi_display_broadcast_cmd(display, msg);
+ if (rc) {
+ pr_err("[%s] cmd broadcast failed, rc=%d\n",
+ display->name, rc);
+ goto error_disable_cmd_engine;
+ }
+ } else {
+ rc = dsi_ctrl_cmd_transfer(display->ctrl[0].ctrl, msg,
+ DSI_CTRL_CMD_FIFO_STORE);
+ if (rc) {
+ pr_err("[%s] cmd transfer failed, rc=%d\n",
+ display->name, rc);
+ goto error_disable_cmd_engine;
+ }
+ }
+error_disable_cmd_engine:
+ (void)dsi_display_cmd_engine_disable(display);
+error:
+ return rc;
+}
+
+
+static struct mipi_dsi_host_ops dsi_host_ops = {
+ .attach = dsi_host_attach,
+ .detach = dsi_host_detach,
+ .transfer = dsi_host_transfer,
+};
+
+static int dsi_display_mipi_host_init(struct dsi_display *display)
+{
+ int rc = 0;
+ struct mipi_dsi_host *host = &display->host;
+
+ host->dev = &display->pdev->dev;
+ host->ops = &dsi_host_ops;
+
+ rc = mipi_dsi_host_register(host);
+ if (rc) {
+ pr_err("[%s] failed to register mipi dsi host, rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
+
+error:
+ return rc;
+}
+static int dsi_display_mipi_host_deinit(struct dsi_display *display)
+{
+ int rc = 0;
+ struct mipi_dsi_host *host = &display->host;
+
+ mipi_dsi_host_unregister(host);
+
+ host->dev = NULL;
+ host->ops = NULL;
+
+ return rc;
+}
+
+static int dsi_display_clocks_deinit(struct dsi_display *display)
+{
+ int rc = 0;
+ struct dsi_clk_link_set *src = &display->clock_info.src_clks;
+ struct dsi_clk_link_set *mux = &display->clock_info.mux_clks;
+ struct dsi_clk_link_set *shadow = &display->clock_info.shadow_clks;
+
+ if (src->byte_clk) {
+ devm_clk_put(&display->pdev->dev, src->byte_clk);
+ src->byte_clk = NULL;
+ }
+
+ if (src->pixel_clk) {
+ devm_clk_put(&display->pdev->dev, src->pixel_clk);
+ src->pixel_clk = NULL;
+ }
+
+ if (mux->byte_clk) {
+ devm_clk_put(&display->pdev->dev, mux->byte_clk);
+ mux->byte_clk = NULL;
+ }
+
+ if (mux->pixel_clk) {
+ devm_clk_put(&display->pdev->dev, mux->pixel_clk);
+ mux->pixel_clk = NULL;
+ }
+
+ if (shadow->byte_clk) {
+ devm_clk_put(&display->pdev->dev, shadow->byte_clk);
+ shadow->byte_clk = NULL;
+ }
+
+ if (shadow->pixel_clk) {
+ devm_clk_put(&display->pdev->dev, shadow->pixel_clk);
+ shadow->pixel_clk = NULL;
+ }
+
+ return rc;
+}
+
+static int dsi_display_clocks_init(struct dsi_display *display)
+{
+ int rc = 0;
+ struct dsi_clk_link_set *src = &display->clock_info.src_clks;
+ struct dsi_clk_link_set *mux = &display->clock_info.mux_clks;
+ struct dsi_clk_link_set *shadow = &display->clock_info.shadow_clks;
+
+ src->byte_clk = devm_clk_get(&display->pdev->dev, "src_byte_clk");
+ if (IS_ERR_OR_NULL(src->byte_clk)) {
+ rc = PTR_ERR(src->byte_clk);
+ src->byte_clk = NULL;
+ pr_err("failed to get src_byte_clk, rc=%d\n", rc);
+ goto error;
+ }
+
+ src->pixel_clk = devm_clk_get(&display->pdev->dev, "src_pixel_clk");
+ if (IS_ERR_OR_NULL(src->pixel_clk)) {
+ rc = PTR_ERR(src->pixel_clk);
+ src->pixel_clk = NULL;
+ pr_err("failed to get src_pixel_clk, rc=%d\n", rc);
+ goto error;
+ }
+
+ mux->byte_clk = devm_clk_get(&display->pdev->dev, "mux_byte_clk");
+ if (IS_ERR_OR_NULL(mux->byte_clk)) {
+ rc = PTR_ERR(mux->byte_clk);
+ pr_err("failed to get mux_byte_clk, rc=%d\n", rc);
+ mux->byte_clk = NULL;
+ /*
+ * Skip getting rest of clocks since one failed. This is a
+ * non-critical failure since these clocks are requied only for
+ * dynamic refresh use cases.
+ */
+ rc = 0;
+ goto done;
+ };
+
+ mux->pixel_clk = devm_clk_get(&display->pdev->dev, "mux_pixel_clk");
+ if (IS_ERR_OR_NULL(mux->pixel_clk)) {
+ rc = PTR_ERR(mux->pixel_clk);
+ mux->pixel_clk = NULL;
+ pr_err("failed to get mux_pixel_clk, rc=%d\n", rc);
+ /*
+ * Skip getting rest of clocks since one failed. This is a
+ * non-critical failure since these clocks are requied only for
+ * dynamic refresh use cases.
+ */
+ rc = 0;
+ goto done;
+ };
+
+ shadow->byte_clk = devm_clk_get(&display->pdev->dev, "shadow_byte_clk");
+ if (IS_ERR_OR_NULL(shadow->byte_clk)) {
+ rc = PTR_ERR(shadow->byte_clk);
+ shadow->byte_clk = NULL;
+ pr_err("failed to get shadow_byte_clk, rc=%d\n", rc);
+ /*
+ * Skip getting rest of clocks since one failed. This is a
+ * non-critical failure since these clocks are requied only for
+ * dynamic refresh use cases.
+ */
+ rc = 0;
+ goto done;
+ };
+
+ shadow->pixel_clk = devm_clk_get(&display->pdev->dev,
+ "shadow_pixel_clk");
+ if (IS_ERR_OR_NULL(shadow->pixel_clk)) {
+ rc = PTR_ERR(shadow->pixel_clk);
+ shadow->pixel_clk = NULL;
+ pr_err("failed to get shadow_pixel_clk, rc=%d\n", rc);
+ /*
+ * Skip getting rest of clocks since one failed. This is a
+ * non-critical failure since these clocks are requied only for
+ * dynamic refresh use cases.
+ */
+ rc = 0;
+ goto done;
+ };
+
+done:
+ return 0;
+error:
+ (void)dsi_display_clocks_deinit(display);
+ return rc;
+}
+
+static int dsi_display_parse_lane_map(struct dsi_display *display)
+{
+ int rc = 0;
+
+ display->lane_map.physical_lane0 = DSI_LOGICAL_LANE_0;
+ display->lane_map.physical_lane1 = DSI_LOGICAL_LANE_1;
+ display->lane_map.physical_lane2 = DSI_LOGICAL_LANE_2;
+ display->lane_map.physical_lane3 = DSI_LOGICAL_LANE_3;
+ return rc;
+}
+
+static int dsi_display_parse_dt(struct dsi_display *display)
+{
+ int rc = 0;
+ int i;
+ u32 phy_count = 0;
+ struct device_node *of_node;
+
+ /* Parse controllers */
+ for (i = 0; i < MAX_DSI_CTRLS_PER_DISPLAY; i++) {
+ of_node = of_parse_phandle(display->pdev->dev.of_node,
+ "qcom,dsi-ctrl", i);
+ if (!of_node) {
+ if (!i) {
+ pr_err("No controllers present\n");
+ return -ENODEV;
+ }
+ break;
+ }
+
+ display->ctrl[i].ctrl_of_node = of_node;
+ display->ctrl_count++;
+ }
+
+ /* Parse Phys */
+ for (i = 0; i < MAX_DSI_CTRLS_PER_DISPLAY; i++) {
+ of_node = of_parse_phandle(display->pdev->dev.of_node,
+ "qcom,dsi-phy", i);
+ if (!of_node) {
+ if (!i) {
+ pr_err("No PHY devices present\n");
+ rc = -ENODEV;
+ goto error;
+ }
+ break;
+ }
+
+ display->ctrl[i].phy_of_node = of_node;
+ phy_count++;
+ }
+
+ if (phy_count != display->ctrl_count) {
+ pr_err("Number of controllers does not match PHYs\n");
+ rc = -ENODEV;
+ goto error;
+ }
+
+ of_node = of_parse_phandle(display->pdev->dev.of_node,
+ "qcom,dsi-panel", 0);
+ if (!of_node) {
+ pr_err("No Panel device present\n");
+ rc = -ENODEV;
+ goto error;
+ } else {
+ display->panel_of = of_node;
+ }
+
+ rc = dsi_display_parse_lane_map(display);
+ if (rc) {
+ pr_err("Lane map not found, rc=%d\n", rc);
+ goto error;
+ }
+error:
+ return rc;
+}
+
+static int dsi_display_res_init(struct dsi_display *display)
+{
+ int rc = 0;
+ int i;
+ struct dsi_display_ctrl *ctrl;
+
+ for (i = 0; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ ctrl->ctrl = dsi_ctrl_get(ctrl->ctrl_of_node);
+ if (IS_ERR_OR_NULL(ctrl->ctrl)) {
+ rc = PTR_ERR(ctrl->ctrl);
+ pr_err("failed to get dsi controller, rc=%d\n", rc);
+ ctrl->ctrl = NULL;
+ goto error_ctrl_put;
+ }
+
+ ctrl->phy = dsi_phy_get(ctrl->phy_of_node);
+ if (IS_ERR_OR_NULL(ctrl->phy)) {
+ rc = PTR_ERR(ctrl->phy);
+ pr_err("failed to get phy controller, rc=%d\n", rc);
+ dsi_ctrl_put(ctrl->ctrl);
+ ctrl->phy = NULL;
+ goto error_ctrl_put;
+ }
+ }
+
+ display->panel = dsi_panel_get(&display->pdev->dev, display->panel_of);
+ if (IS_ERR_OR_NULL(display->panel)) {
+ rc = PTR_ERR(display->panel);
+ pr_err("failed to get panel, rc=%d\n", rc);
+ display->panel = NULL;
+ goto error_ctrl_put;
+ }
+
+ rc = dsi_display_clocks_init(display);
+ if (rc) {
+ pr_err("Failed to parse clock data, rc=%d\n", rc);
+ goto error_ctrl_put;
+ }
+
+ return 0;
+error_ctrl_put:
+ for (i = i - 1; i >= 0; i--) {
+ ctrl = &display->ctrl[i];
+ dsi_ctrl_put(ctrl->ctrl);
+ dsi_phy_put(ctrl->phy);
+ }
+ return rc;
+}
+
+static int dsi_display_res_deinit(struct dsi_display *display)
+{
+ int rc = 0;
+ int i;
+ struct dsi_display_ctrl *ctrl;
+
+ rc = dsi_display_clocks_deinit(display);
+ if (rc)
+ pr_err("clocks deinit failed, rc=%d\n", rc);
+
+ for (i = 0; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ dsi_phy_put(ctrl->phy);
+ dsi_ctrl_put(ctrl->ctrl);
+ }
+
+ return rc;
+}
+
+static int dsi_display_validate_mode_set(struct dsi_display *display,
+ struct dsi_display_mode *mode,
+ u32 flags)
+{
+ int rc = 0;
+ int i;
+ struct dsi_display_ctrl *ctrl;
+
+ /*
+ * To set a mode:
+ * 1. Controllers should be turned off.
+ * 2. Link clocks should be off.
+ * 3. Phy should be disabled.
+ */
+
+ for (i = 0; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ if ((ctrl->power_state > DSI_CTRL_POWER_VREG_ON) ||
+ (ctrl->phy_enabled)) {
+ rc = -EINVAL;
+ goto error;
+ }
+ }
+
+error:
+ return rc;
+}
+
+static bool dsi_display_is_seamless_dfps_possible(
+ const struct dsi_display *display,
+ const struct dsi_display_mode *tgt,
+ const enum dsi_dfps_type dfps_type)
+{
+ struct dsi_display_mode *cur;
+
+ if (!display || !tgt) {
+ pr_err("Invalid params\n");
+ return false;
+ }
+
+ cur = &display->panel->mode;
+
+ if (cur->timing.h_active != tgt->timing.h_active) {
+ pr_debug("timing.h_active differs %d %d\n",
+ cur->timing.h_active, tgt->timing.h_active);
+ return false;
+ }
+
+ if (cur->timing.h_back_porch != tgt->timing.h_back_porch) {
+ pr_debug("timing.h_back_porch differs %d %d\n",
+ cur->timing.h_back_porch,
+ tgt->timing.h_back_porch);
+ return false;
+ }
+
+ if (cur->timing.h_sync_width != tgt->timing.h_sync_width) {
+ pr_debug("timing.h_sync_width differs %d %d\n",
+ cur->timing.h_sync_width,
+ tgt->timing.h_sync_width);
+ return false;
+ }
+
+ if (cur->timing.h_front_porch != tgt->timing.h_front_porch) {
+ pr_debug("timing.h_front_porch differs %d %d\n",
+ cur->timing.h_front_porch,
+ tgt->timing.h_front_porch);
+ if (dfps_type != DSI_DFPS_IMMEDIATE_HFP)
+ return false;
+ }
+
+ if (cur->timing.h_skew != tgt->timing.h_skew) {
+ pr_debug("timing.h_skew differs %d %d\n",
+ cur->timing.h_skew,
+ tgt->timing.h_skew);
+ return false;
+ }
+
+ /* skip polarity comparison */
+
+ if (cur->timing.v_active != tgt->timing.v_active) {
+ pr_debug("timing.v_active differs %d %d\n",
+ cur->timing.v_active,
+ tgt->timing.v_active);
+ return false;
+ }
+
+ if (cur->timing.v_back_porch != tgt->timing.v_back_porch) {
+ pr_debug("timing.v_back_porch differs %d %d\n",
+ cur->timing.v_back_porch,
+ tgt->timing.v_back_porch);
+ return false;
+ }
+
+ if (cur->timing.v_sync_width != tgt->timing.v_sync_width) {
+ pr_debug("timing.v_sync_width differs %d %d\n",
+ cur->timing.v_sync_width,
+ tgt->timing.v_sync_width);
+ return false;
+ }
+
+ if (cur->timing.v_front_porch != tgt->timing.v_front_porch) {
+ pr_debug("timing.v_front_porch differs %d %d\n",
+ cur->timing.v_front_porch,
+ tgt->timing.v_front_porch);
+ if (dfps_type != DSI_DFPS_IMMEDIATE_VFP)
+ return false;
+ }
+
+ /* skip polarity comparison */
+
+ if (cur->timing.refresh_rate == tgt->timing.refresh_rate) {
+ pr_debug("timing.refresh_rate identical %d %d\n",
+ cur->timing.refresh_rate,
+ tgt->timing.refresh_rate);
+ return false;
+ }
+
+ if (cur->pixel_clk_khz != tgt->pixel_clk_khz)
+ pr_debug("pixel_clk_khz differs %d %d\n",
+ cur->pixel_clk_khz, tgt->pixel_clk_khz);
+
+ if (cur->panel_mode != tgt->panel_mode) {
+ pr_debug("panel_mode differs %d %d\n",
+ cur->panel_mode, tgt->panel_mode);
+ return false;
+ }
+
+ if (cur->flags != tgt->flags)
+ pr_debug("flags differs %d %d\n", cur->flags, tgt->flags);
+
+ return true;
+}
+
+static int dsi_display_dfps_update(struct dsi_display *display,
+ struct dsi_display_mode *dsi_mode)
+{
+ struct dsi_mode_info *timing;
+ struct dsi_display_ctrl *m_ctrl, *ctrl;
+ struct dsi_display_mode *panel_mode;
+ struct dsi_dfps_capabilities dfps_caps;
+ int rc = 0;
+ int i;
+
+ if (!display || !dsi_mode) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+ timing = &dsi_mode->timing;
+
+ dsi_panel_get_dfps_caps(display->panel, &dfps_caps);
+ if (!dfps_caps.dfps_support) {
+ pr_err("dfps not supported\n");
+ return -ENOTSUPP;
+ }
+
+ if (dfps_caps.type == DSI_DFPS_IMMEDIATE_CLK) {
+ pr_err("dfps clock method not supported\n");
+ return -ENOTSUPP;
+ }
+
+ /* For split DSI, update the clock master first */
+
+ pr_debug("configuring seamless dynamic fps\n\n");
+
+ m_ctrl = &display->ctrl[display->clk_master_idx];
+ rc = dsi_ctrl_async_timing_update(m_ctrl->ctrl, timing);
+ if (rc) {
+ pr_err("[%s] failed to dfps update host_%d, rc=%d\n",
+ display->name, i, rc);
+ goto error;
+ }
+
+ /* Update the rest of the controllers */
+ for (i = 0; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ if (!ctrl->ctrl || (ctrl == m_ctrl))
+ continue;
+
+ rc = dsi_ctrl_async_timing_update(ctrl->ctrl, timing);
+ if (rc) {
+ pr_err("[%s] failed to dfps update host_%d, rc=%d\n",
+ display->name, i, rc);
+ goto error;
+ }
+ }
+
+ panel_mode = &display->panel->mode;
+ memcpy(panel_mode, dsi_mode, sizeof(*panel_mode));
+
+error:
+ return rc;
+}
+
+static int dsi_display_dfps_calc_front_porch(
+ u64 clk_hz,
+ u32 new_fps,
+ u32 a_total,
+ u32 b_total,
+ u32 b_fp,
+ u32 *b_fp_out)
+{
+ s32 b_fp_new;
+
+ if (!b_fp_out) {
+ pr_err("Invalid params");
+ return -EINVAL;
+ }
+
+ if (!a_total || !new_fps) {
+ pr_err("Invalid pixel total or new fps in mode request\n");
+ return -EINVAL;
+ }
+
+ /**
+ * Keep clock, other porches constant, use new fps, calc front porch
+ * clk = (hor * ver * fps)
+ * hfront = clk / (vtotal * fps)) - hactive - hback - hsync
+ */
+ b_fp_new = (clk_hz / (a_total * new_fps)) - (b_total - b_fp);
+
+ pr_debug("clk %llu fps %u a %u b %u b_fp %u new_fp %d\n",
+ clk_hz, new_fps, a_total, b_total, b_fp, b_fp_new);
+
+ if (b_fp_new < 0) {
+ pr_err("Invalid new_hfp calcluated%d\n", b_fp_new);
+ return -EINVAL;
+ }
+
+ /**
+ * TODO: To differentiate from clock method when communicating to the
+ * other components, perhaps we should set clk here to original value
+ */
+ *b_fp_out = b_fp_new;
+
+ return 0;
+}
+
+static int dsi_display_get_dfps_timing(struct dsi_display *display,
+ struct dsi_display_mode *adj_mode)
+{
+ struct dsi_dfps_capabilities dfps_caps;
+ struct dsi_display_mode per_ctrl_mode;
+ struct dsi_mode_info *timing;
+ struct dsi_ctrl *m_ctrl;
+ u64 clk_hz;
+
+ int rc = 0;
+
+ if (!display || !adj_mode) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+ m_ctrl = display->ctrl[display->clk_master_idx].ctrl;
+
+ dsi_panel_get_dfps_caps(display->panel, &dfps_caps);
+ if (!dfps_caps.dfps_support) {
+ pr_err("dfps not supported by panel\n");
+ return -EINVAL;
+ }
+
+ per_ctrl_mode = *adj_mode;
+ adjust_timing_by_ctrl_count(display, &per_ctrl_mode);
+
+ if (!dsi_display_is_seamless_dfps_possible(display,
+ &per_ctrl_mode, dfps_caps.type)) {
+ pr_err("seamless dynamic fps not supported for mode\n");
+ return -EINVAL;
+ }
+
+ /* TODO: Remove this direct reference to the dsi_ctrl */
+ clk_hz = m_ctrl->clk_info.link_clks.pixel_clk_rate;
+ timing = &per_ctrl_mode.timing;
+
+ switch (dfps_caps.type) {
+ case DSI_DFPS_IMMEDIATE_VFP:
+ rc = dsi_display_dfps_calc_front_porch(
+ clk_hz,
+ timing->refresh_rate,
+ DSI_H_TOTAL(timing),
+ DSI_V_TOTAL(timing),
+ timing->v_front_porch,
+ &adj_mode->timing.v_front_porch);
+ break;
+
+ case DSI_DFPS_IMMEDIATE_HFP:
+ rc = dsi_display_dfps_calc_front_porch(
+ clk_hz,
+ timing->refresh_rate,
+ DSI_V_TOTAL(timing),
+ DSI_H_TOTAL(timing),
+ timing->h_front_porch,
+ &adj_mode->timing.h_front_porch);
+ if (!rc)
+ adj_mode->timing.h_front_porch *= display->ctrl_count;
+ break;
+
+ default:
+ pr_err("Unsupported DFPS mode %d\n", dfps_caps.type);
+ rc = -ENOTSUPP;
+ }
+
+ return rc;
+}
+
+static bool dsi_display_validate_mode_seamless(struct dsi_display *display,
+ struct dsi_display_mode *adj_mode)
+{
+ int rc = 0;
+
+ if (!display || !adj_mode) {
+ pr_err("Invalid params\n");
+ return false;
+ }
+
+ /* Currently the only seamless transition is dynamic fps */
+ rc = dsi_display_get_dfps_timing(display, adj_mode);
+ if (rc) {
+ pr_debug("Dynamic FPS not supported for seamless\n");
+ } else {
+ pr_debug("Mode switch is seamless Dynamic FPS\n");
+ adj_mode->flags |= DSI_MODE_FLAG_DFPS |
+ DSI_MODE_FLAG_VBLANK_PRE_MODESET;
+ }
+
+ return rc;
+}
+
+static int dsi_display_set_mode_sub(struct dsi_display *display,
+ struct dsi_display_mode *mode,
+ u32 flags)
+{
+ int rc = 0;
+ int i;
+ struct dsi_display_ctrl *ctrl;
+
+ rc = dsi_panel_get_host_cfg_for_mode(display->panel,
+ mode,
+ &display->config);
+ if (rc) {
+ pr_err("[%s] failed to get host config for mode, rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
+
+ memcpy(&display->config.lane_map, &display->lane_map,
+ sizeof(display->lane_map));
+
+ if (mode->flags & DSI_MODE_FLAG_DFPS) {
+ rc = dsi_display_dfps_update(display, mode);
+ if (rc) {
+ pr_err("[%s]DSI dfps update failed, rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
+ }
+
+ for (i = 0; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ rc = dsi_ctrl_update_host_config(ctrl->ctrl, &display->config,
+ mode->flags);
+ if (rc) {
+ pr_err("[%s] failed to update ctrl config, rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
+
+ }
+error:
+ return rc;
+}
+
+/**
+ * _dsi_display_dev_init - initializes the display device
+ * Initialization will acquire references to the resources required for the
+ * display hardware to function.
+ * @display: Handle to the display
+ * Returns: Zero on success
+ */
+static int _dsi_display_dev_init(struct dsi_display *display)
+{
+ int rc = 0;
+
+ if (!display) {
+ pr_err("invalid display\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&display->display_lock);
+
+ rc = dsi_display_parse_dt(display);
+ if (rc) {
+ pr_err("[%s] failed to parse dt, rc=%d\n", display->name, rc);
+ goto error;
+ }
+
+ rc = dsi_display_res_init(display);
+ if (rc) {
+ pr_err("[%s] failed to initialize resources, rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
+error:
+ mutex_unlock(&display->display_lock);
+ return rc;
+}
+
+/**
+ * _dsi_display_dev_deinit - deinitializes the display device
+ * All the resources acquired during device init will be released.
+ * @display: Handle to the display
+ * Returns: Zero on success
+ */
+static int _dsi_display_dev_deinit(struct dsi_display *display)
+{
+ int rc = 0;
+
+ if (!display) {
+ pr_err("invalid display\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&display->display_lock);
+
+ rc = dsi_display_res_deinit(display);
+ if (rc)
+ pr_err("[%s] failed to deinitialize resource, rc=%d\n",
+ display->name, rc);
+
+ mutex_unlock(&display->display_lock);
+
+ return rc;
+}
+
+/**
+ * dsi_display_bind - bind dsi device with controlling device
+ * @dev: Pointer to base of platform device
+ * @master: Pointer to container of drm device
+ * @data: Pointer to private data
+ * Returns: Zero on success
+ */
+static int dsi_display_bind(struct device *dev,
+ struct device *master,
+ void *data)
+{
+ struct dsi_display_ctrl *display_ctrl;
+ struct drm_device *drm;
+ struct dsi_display *display;
+ struct platform_device *pdev = to_platform_device(dev);
+ int i, rc = 0;
+
+ if (!dev || !pdev || !master) {
+ pr_err("invalid param(s), dev %pK, pdev %pK, master %pK\n",
+ dev, pdev, master);
+ return -EINVAL;
+ }
+
+ drm = dev_get_drvdata(master);
+ display = platform_get_drvdata(pdev);
+ if (!drm || !display) {
+ pr_err("invalid param(s), drm %pK, display %pK\n",
+ drm, display);
+ return -EINVAL;
+ }
+
+ mutex_lock(&display->display_lock);
+
+ rc = dsi_display_debugfs_init(display);
+ if (rc) {
+ pr_err("[%s] debugfs init failed, rc=%d\n", display->name, rc);
+ goto error;
+ }
+
+ for (i = 0; i < display->ctrl_count; i++) {
+ display_ctrl = &display->ctrl[i];
+
+ rc = dsi_ctrl_drv_init(display_ctrl->ctrl, display->root);
+ if (rc) {
+ pr_err("[%s] failed to initialize ctrl[%d], rc=%d\n",
+ display->name, i, rc);
+ goto error_ctrl_deinit;
+ }
+
+ rc = dsi_phy_drv_init(display_ctrl->phy);
+ if (rc) {
+ pr_err("[%s] Failed to initialize phy[%d], rc=%d\n",
+ display->name, i, rc);
+ (void)dsi_ctrl_drv_deinit(display_ctrl->ctrl);
+ goto error_ctrl_deinit;
+ }
+ }
+
+ rc = dsi_display_mipi_host_init(display);
+ if (rc) {
+ pr_err("[%s] failed to initialize mipi host, rc=%d\n",
+ display->name, rc);
+ goto error_ctrl_deinit;
+ }
+
+ rc = dsi_panel_drv_init(display->panel, &display->host);
+ if (rc) {
+ if (rc != -EPROBE_DEFER)
+ pr_err("[%s] failed to initialize panel driver, rc=%d\n",
+ display->name, rc);
+ goto error_host_deinit;
+ }
+
+ rc = dsi_panel_get_mode_count(display->panel, &display->num_of_modes);
+ if (rc) {
+ pr_err("[%s] failed to get mode count, rc=%d\n",
+ display->name, rc);
+ goto error_panel_deinit;
+ }
+
+ display->drm_dev = drm;
+ goto error;
+
+error_panel_deinit:
+ (void)dsi_panel_drv_deinit(display->panel);
+error_host_deinit:
+ (void)dsi_display_mipi_host_deinit(display);
+error_ctrl_deinit:
+ for (i = i - 1; i >= 0; i--) {
+ display_ctrl = &display->ctrl[i];
+ (void)dsi_phy_drv_deinit(display_ctrl->phy);
+ (void)dsi_ctrl_drv_deinit(display_ctrl->ctrl);
+ }
+ (void)dsi_display_debugfs_deinit(display);
+error:
+ mutex_unlock(&display->display_lock);
+ return rc;
+}
+
+/**
+ * dsi_display_unbind - unbind dsi from controlling device
+ * @dev: Pointer to base of platform device
+ * @master: Pointer to container of drm device
+ * @data: Pointer to private data
+ */
+static void dsi_display_unbind(struct device *dev,
+ struct device *master, void *data)
+{
+ struct dsi_display_ctrl *display_ctrl;
+ struct dsi_display *display;
+ struct platform_device *pdev = to_platform_device(dev);
+ int i, rc = 0;
+
+ if (!dev || !pdev) {
+ pr_err("invalid param(s)\n");
+ return;
+ }
+
+ display = platform_get_drvdata(pdev);
+ if (!display) {
+ pr_err("invalid display\n");
+ return;
+ }
+
+ mutex_lock(&display->display_lock);
+
+ rc = dsi_panel_drv_deinit(display->panel);
+ if (rc)
+ pr_err("[%s] failed to deinit panel driver, rc=%d\n",
+ display->name, rc);
+
+ rc = dsi_display_mipi_host_deinit(display);
+ if (rc)
+ pr_err("[%s] failed to deinit mipi hosts, rc=%d\n",
+ display->name,
+ rc);
+
+ for (i = 0; i < display->ctrl_count; i++) {
+ display_ctrl = &display->ctrl[i];
+
+ rc = dsi_phy_drv_deinit(display_ctrl->phy);
+ if (rc)
+ pr_err("[%s] failed to deinit phy%d driver, rc=%d\n",
+ display->name, i, rc);
+
+ rc = dsi_ctrl_drv_deinit(display_ctrl->ctrl);
+ if (rc)
+ pr_err("[%s] failed to deinit ctrl%d driver, rc=%d\n",
+ display->name, i, rc);
+ }
+ (void)dsi_display_debugfs_deinit(display);
+
+ mutex_unlock(&display->display_lock);
+}
+
+static const struct component_ops dsi_display_comp_ops = {
+ .bind = dsi_display_bind,
+ .unbind = dsi_display_unbind,
+};
+
+static struct platform_driver dsi_display_driver = {
+ .probe = dsi_display_dev_probe,
+ .remove = dsi_display_dev_remove,
+ .driver = {
+ .name = "msm-dsi-display",
+ .of_match_table = dsi_display_dt_match,
+ },
+};
+
+int dsi_display_dev_probe(struct platform_device *pdev)
+{
+ int rc = 0;
+ struct dsi_display *display;
+
+ if (!pdev || !pdev->dev.of_node) {
+ pr_err("pdev not found\n");
+ return -ENODEV;
+ }
+
+ display = devm_kzalloc(&pdev->dev, sizeof(*display), GFP_KERNEL);
+ if (!display)
+ return -ENOMEM;
+
+ display->name = of_get_property(pdev->dev.of_node, "label", NULL);
+
+ display->is_active = of_property_read_bool(pdev->dev.of_node,
+ "qcom,dsi-display-active");
+
+ display->display_type = of_get_property(pdev->dev.of_node,
+ "qcom,display-type", NULL);
+ if (!display->display_type)
+ display->display_type = "unknown";
+
+ mutex_init(&display->display_lock);
+
+ display->pdev = pdev;
+ platform_set_drvdata(pdev, display);
+ mutex_lock(&dsi_display_list_lock);
+ list_add(&display->list, &dsi_display_list);
+ mutex_unlock(&dsi_display_list_lock);
+
+ if (display->is_active) {
+ main_display = display;
+ rc = _dsi_display_dev_init(display);
+ if (rc) {
+ pr_err("device init failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = component_add(&pdev->dev, &dsi_display_comp_ops);
+ if (rc)
+ pr_err("component add failed, rc=%d\n", rc);
+ }
+ return rc;
+}
+
+int dsi_display_dev_remove(struct platform_device *pdev)
+{
+ int rc = 0;
+ struct dsi_display *display;
+ struct dsi_display *pos, *tmp;
+
+ if (!pdev) {
+ pr_err("Invalid device\n");
+ return -EINVAL;
+ }
+
+ display = platform_get_drvdata(pdev);
+
+ (void)_dsi_display_dev_deinit(display);
+
+ mutex_lock(&dsi_display_list_lock);
+ list_for_each_entry_safe(pos, tmp, &dsi_display_list, list) {
+ if (pos == display) {
+ list_del(&display->list);
+ break;
+ }
+ }
+ mutex_unlock(&dsi_display_list_lock);
+
+ platform_set_drvdata(pdev, NULL);
+ devm_kfree(&pdev->dev, display);
+ return rc;
+}
+
+int dsi_display_get_num_of_displays(void)
+{
+ int count = 0;
+ struct dsi_display *display;
+
+ mutex_lock(&dsi_display_list_lock);
+
+ list_for_each_entry(display, &dsi_display_list, list) {
+ count++;
+ }
+
+ mutex_unlock(&dsi_display_list_lock);
+ return count;
+}
+
+int dsi_display_get_active_displays(void **display_array, u32 max_display_count)
+{
+ struct dsi_display *pos;
+ int i = 0;
+
+ if (!display_array || !max_display_count) {
+ if (!display_array)
+ pr_err("invalid params\n");
+ return 0;
+ }
+
+ mutex_lock(&dsi_display_list_lock);
+
+ list_for_each_entry(pos, &dsi_display_list, list) {
+ if (i >= max_display_count) {
+ pr_err("capping display count to %d\n", i);
+ break;
+ }
+ if (pos->is_active)
+ display_array[i++] = pos;
+ }
+
+ mutex_unlock(&dsi_display_list_lock);
+ return i;
+}
+
+struct dsi_display *dsi_display_get_display_by_name(const char *name)
+{
+ struct dsi_display *display = NULL, *pos;
+
+ mutex_lock(&dsi_display_list_lock);
+
+ list_for_each_entry(pos, &dsi_display_list, list) {
+ if (!strcmp(name, pos->name))
+ display = pos;
+ }
+
+ mutex_unlock(&dsi_display_list_lock);
+
+ return display;
+}
+
+void dsi_display_set_active_state(struct dsi_display *display, bool is_active)
+{
+ mutex_lock(&display->display_lock);
+ display->is_active = is_active;
+ mutex_unlock(&display->display_lock);
+}
+
+int dsi_display_drm_bridge_init(struct dsi_display *display,
+ struct drm_encoder *enc)
+{
+ int rc = 0;
+ struct dsi_bridge *bridge;
+ struct msm_drm_private *priv = NULL;
+
+ if (!display || !display->drm_dev || !enc) {
+ pr_err("invalid param(s)\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&display->display_lock);
+ priv = display->drm_dev->dev_private;
+
+ if (!priv) {
+ pr_err("Private data is not present\n");
+ rc = -EINVAL;
+ goto error;
+ }
+
+ if (display->bridge) {
+ pr_err("display is already initialize\n");
+ goto error;
+ }
+
+ bridge = dsi_drm_bridge_init(display, display->drm_dev, enc);
+ if (IS_ERR_OR_NULL(bridge)) {
+ rc = PTR_ERR(bridge);
+ pr_err("[%s] brige init failed, %d\n", display->name, rc);
+ goto error;
+ }
+
+ display->bridge = bridge;
+ priv->bridges[priv->num_bridges++] = &bridge->base;
+
+error:
+ mutex_unlock(&display->display_lock);
+ return rc;
+}
+
+int dsi_display_drm_bridge_deinit(struct dsi_display *display)
+{
+ int rc = 0;
+
+ if (!display) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&display->display_lock);
+
+ dsi_drm_bridge_cleanup(display->bridge);
+ display->bridge = NULL;
+
+ mutex_unlock(&display->display_lock);
+ return rc;
+}
+
+int dsi_display_get_info(struct msm_display_info *info, void *disp)
+{
+ struct dsi_display *display;
+ struct dsi_panel_phy_props phy_props;
+ int i, rc;
+
+ if (!info || !disp) {
+ pr_err("invalid params\n");
+ return -EINVAL;
+ }
+ display = disp;
+
+ mutex_lock(&display->display_lock);
+ rc = dsi_panel_get_phy_props(display->panel, &phy_props);
+ if (rc) {
+ pr_err("[%s] failed to get panel phy props, rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
+
+ info->intf_type = DRM_MODE_CONNECTOR_DSI;
+
+ info->num_of_h_tiles = display->ctrl_count;
+ for (i = 0; i < info->num_of_h_tiles; i++)
+ info->h_tile_instance[i] = display->ctrl[i].ctrl->index;
+
+ info->is_connected = true;
+ info->width_mm = phy_props.panel_width_mm;
+ info->height_mm = phy_props.panel_height_mm;
+ info->max_width = 1920;
+ info->max_height = 1080;
+ info->compression = MSM_DISPLAY_COMPRESS_NONE;
+
+ switch (display->panel->mode.panel_mode) {
+ case DSI_OP_VIDEO_MODE:
+ info->capabilities |= MSM_DISPLAY_CAP_VID_MODE;
+ break;
+ case DSI_OP_CMD_MODE:
+ info->capabilities |= MSM_DISPLAY_CAP_CMD_MODE;
+ break;
+ default:
+ pr_err("unknwown dsi panel mode %d\n",
+ display->panel->mode.panel_mode);
+ break;
+ }
+error:
+ mutex_unlock(&display->display_lock);
+ return rc;
+}
+
+int dsi_display_get_modes(struct dsi_display *display,
+ struct dsi_display_mode *modes,
+ u32 *count)
+{
+ int rc = 0;
+ int i;
+ struct dsi_dfps_capabilities dfps_caps;
+ int num_dfps_rates;
+
+ if (!display || !count) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&display->display_lock);
+
+ rc = dsi_panel_get_dfps_caps(display->panel, &dfps_caps);
+ if (rc) {
+ pr_err("[%s] failed to get dfps caps from panel\n",
+ display->name);
+ goto error;
+ }
+
+ num_dfps_rates = !dfps_caps.dfps_support ? 1 :
+ dfps_caps.max_refresh_rate -
+ dfps_caps.min_refresh_rate + 1;
+
+ if (!modes) {
+ /* Inflate num_of_modes by fps in dfps */
+ *count = display->num_of_modes * num_dfps_rates;
+ goto error;
+ }
+
+ for (i = 0; i < *count; i++) {
+ /* Insert the dfps "sub-modes" between main panel modes */
+ int panel_mode_idx = i / num_dfps_rates;
+
+ rc = dsi_panel_get_mode(display->panel, panel_mode_idx, modes);
+ if (rc) {
+ pr_err("[%s] failed to get mode from panel\n",
+ display->name);
+ goto error;
+ }
+
+ if (dfps_caps.dfps_support) {
+ modes->timing.refresh_rate = dfps_caps.min_refresh_rate
+ + (i % num_dfps_rates);
+ modes->pixel_clk_khz = (DSI_H_TOTAL(&modes->timing) *
+ DSI_V_TOTAL(&modes->timing) *
+ modes->timing.refresh_rate) / 1000;
+ }
+
+ if (display->ctrl_count > 1) { /* TODO: remove if */
+ modes->timing.h_active *= display->ctrl_count;
+ modes->timing.h_front_porch *= display->ctrl_count;
+ modes->timing.h_sync_width *= display->ctrl_count;
+ modes->timing.h_back_porch *= display->ctrl_count;
+ modes->timing.h_skew *= display->ctrl_count;
+ modes->pixel_clk_khz *= display->ctrl_count;
+ }
+
+ modes++;
+ }
+
+error:
+ mutex_unlock(&display->display_lock);
+ return rc;
+}
+
+int dsi_display_validate_mode(struct dsi_display *display,
+ struct dsi_display_mode *mode,
+ u32 flags)
+{
+ int rc = 0;
+ int i;
+ struct dsi_display_ctrl *ctrl;
+ struct dsi_display_mode adj_mode;
+
+ if (!display || !mode) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&display->display_lock);
+
+ adj_mode = *mode;
+ adjust_timing_by_ctrl_count(display, &adj_mode);
+
+ rc = dsi_panel_validate_mode(display->panel, &adj_mode);
+ if (rc) {
+ pr_err("[%s] panel mode validation failed, rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
+
+ for (i = 0; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ rc = dsi_ctrl_validate_timing(ctrl->ctrl, &adj_mode.timing);
+ if (rc) {
+ pr_err("[%s] ctrl mode validation failed, rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
+
+ rc = dsi_phy_validate_mode(ctrl->phy, &adj_mode.timing);
+ if (rc) {
+ pr_err("[%s] phy mode validation failed, rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
+ }
+
+ if ((flags & DSI_VALIDATE_FLAG_ALLOW_ADJUST) &&
+ (mode->flags & DSI_MODE_FLAG_SEAMLESS)) {
+ rc = dsi_display_validate_mode_seamless(display, mode);
+ if (rc) {
+ pr_err("[%s] seamless not possible rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
+ }
+
+error:
+ mutex_unlock(&display->display_lock);
+ return rc;
+}
+
+int dsi_display_set_mode(struct dsi_display *display,
+ struct dsi_display_mode *mode,
+ u32 flags)
+{
+ int rc = 0;
+ struct dsi_display_mode adj_mode;
+
+ if (!display || !mode) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&display->display_lock);
+
+ adj_mode = *mode;
+ adjust_timing_by_ctrl_count(display, &adj_mode);
+
+ rc = dsi_display_validate_mode_set(display, &adj_mode, flags);
+ if (rc) {
+ pr_err("[%s] mode cannot be set\n", display->name);
+ goto error;
+ }
+
+ rc = dsi_display_set_mode_sub(display, &adj_mode, flags);
+ if (rc) {
+ pr_err("[%s] failed to set mode\n", display->name);
+ goto error;
+ }
+error:
+ mutex_unlock(&display->display_lock);
+ return rc;
+}
+
+int dsi_display_set_tpg_state(struct dsi_display *display, bool enable)
+{
+ int rc = 0;
+ int i;
+ struct dsi_display_ctrl *ctrl;
+
+ if (!display) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ rc = dsi_ctrl_set_tpg_state(ctrl->ctrl, enable);
+ if (rc) {
+ pr_err("[%s] failed to set tpg state for host_%d\n",
+ display->name, i);
+ goto error;
+ }
+ }
+
+ display->is_tpg_enabled = enable;
+error:
+ return rc;
+}
+
+int dsi_display_prepare(struct dsi_display *display)
+{
+ int rc = 0;
+
+ if (!display) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&display->display_lock);
+
+ rc = dsi_panel_pre_prepare(display->panel);
+ if (rc) {
+ pr_err("[%s] panel pre-prepare failed, rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
+
+ rc = dsi_display_ctrl_power_on(display);
+ if (rc) {
+ pr_err("[%s] failed to power on dsi controllers, rc=%d\n",
+ display->name, rc);
+ goto error_panel_post_unprep;
+ }
+
+ rc = dsi_display_phy_power_on(display);
+ if (rc) {
+ pr_err("[%s] failed to power on dsi phy, rc = %d\n",
+ display->name, rc);
+ goto error_ctrl_pwr_off;
+ }
+
+ rc = dsi_display_ctrl_core_clk_on(display);
+ if (rc) {
+ pr_err("[%s] failed to enable DSI core clocks, rc=%d\n",
+ display->name, rc);
+ goto error_phy_pwr_off;
+ }
+
+ rc = dsi_display_phy_sw_reset(display);
+ if (rc) {
+ pr_err("[%s] failed to reset phy, rc=%d\n", display->name, rc);
+ goto error_ctrl_clk_off;
+ }
+
+ rc = dsi_display_phy_enable(display);
+ if (rc) {
+ pr_err("[%s] failed to enable DSI PHY, rc=%d\n",
+ display->name, rc);
+ goto error_ctrl_clk_off;
+ }
+
+ rc = dsi_display_ctrl_init(display);
+ if (rc) {
+ pr_err("[%s] failed to setup DSI controller, rc=%d\n",
+ display->name, rc);
+ goto error_phy_disable;
+ }
+
+ rc = dsi_display_ctrl_link_clk_on(display);
+ if (rc) {
+ pr_err("[%s] failed to enable DSI link clocks, rc=%d\n",
+ display->name, rc);
+ goto error_ctrl_deinit;
+ }
+
+ rc = dsi_display_ctrl_host_enable(display);
+ if (rc) {
+ pr_err("[%s] failed to enable DSI host, rc=%d\n",
+ display->name, rc);
+ goto error_ctrl_link_off;
+ }
+
+ rc = dsi_panel_prepare(display->panel);
+ if (rc) {
+ pr_err("[%s] panel prepare failed, rc=%d\n", display->name, rc);
+ goto error_host_engine_off;
+ }
+
+ goto error;
+
+error_host_engine_off:
+ (void)dsi_display_ctrl_host_disable(display);
+error_ctrl_link_off:
+ (void)dsi_display_ctrl_link_clk_off(display);
+error_ctrl_deinit:
+ (void)dsi_display_ctrl_deinit(display);
+error_phy_disable:
+ (void)dsi_display_phy_disable(display);
+error_ctrl_clk_off:
+ (void)dsi_display_ctrl_core_clk_off(display);
+error_phy_pwr_off:
+ (void)dsi_display_phy_power_off(display);
+error_ctrl_pwr_off:
+ (void)dsi_display_ctrl_power_off(display);
+error_panel_post_unprep:
+ (void)dsi_panel_post_unprepare(display->panel);
+error:
+ mutex_unlock(&display->display_lock);
+ return rc;
+}
+
+int dsi_display_enable(struct dsi_display *display)
+{
+ int rc = 0;
+
+ if (!display) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&display->display_lock);
+
+ rc = dsi_panel_enable(display->panel);
+ if (rc) {
+ pr_err("[%s] failed to enable DSI panel, rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
+
+ if (display->config.panel_mode == DSI_OP_VIDEO_MODE) {
+ rc = dsi_display_vid_engine_enable(display);
+ if (rc) {
+ pr_err("[%s]failed to enable DSI video engine, rc=%d\n",
+ display->name, rc);
+ goto error_disable_panel;
+ }
+ } else if (display->config.panel_mode == DSI_OP_CMD_MODE) {
+ rc = dsi_display_cmd_engine_enable(display);
+ if (rc) {
+ pr_err("[%s]failed to enable DSI cmd engine, rc=%d\n",
+ display->name, rc);
+ goto error_disable_panel;
+ }
+ } else {
+ pr_err("[%s] Invalid configuration\n", display->name);
+ rc = -EINVAL;
+ goto error_disable_panel;
+ }
+
+ goto error;
+
+error_disable_panel:
+ (void)dsi_panel_disable(display->panel);
+error:
+ mutex_unlock(&display->display_lock);
+ return rc;
+}
+
+int dsi_display_post_enable(struct dsi_display *display)
+{
+ int rc = 0;
+
+ if (!display) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&display->display_lock);
+
+ rc = dsi_panel_post_enable(display->panel);
+ if (rc)
+ pr_err("[%s] panel post-enable failed, rc=%d\n",
+ display->name, rc);
+
+ mutex_unlock(&display->display_lock);
+ return rc;
+}
+
+int dsi_display_pre_disable(struct dsi_display *display)
+{
+ int rc = 0;
+
+ if (!display) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&display->display_lock);
+
+ rc = dsi_panel_pre_disable(display->panel);
+ if (rc)
+ pr_err("[%s] panel pre-disable failed, rc=%d\n",
+ display->name, rc);
+
+ mutex_unlock(&display->display_lock);
+ return rc;
+}
+
+int dsi_display_disable(struct dsi_display *display)
+{
+ int rc = 0;
+
+ if (!display) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&display->display_lock);
+
+ rc = dsi_display_wake_up(display);
+ if (rc)
+ pr_err("[%s] display wake up failed, rc=%d\n",
+ display->name, rc);
+
+ rc = dsi_panel_disable(display->panel);
+ if (rc)
+ pr_err("[%s] failed to disable DSI panel, rc=%d\n",
+ display->name, rc);
+
+ if (display->config.panel_mode == DSI_OP_VIDEO_MODE) {
+ rc = dsi_display_vid_engine_disable(display);
+ if (rc)
+ pr_err("[%s]failed to disable DSI vid engine, rc=%d\n",
+ display->name, rc);
+ } else if (display->config.panel_mode == DSI_OP_CMD_MODE) {
+ rc = dsi_display_cmd_engine_disable(display);
+ if (rc)
+ pr_err("[%s]failed to disable DSI cmd engine, rc=%d\n",
+ display->name, rc);
+ } else {
+ pr_err("[%s] Invalid configuration\n", display->name);
+ rc = -EINVAL;
+ }
+
+ mutex_unlock(&display->display_lock);
+ return rc;
+}
+
+int dsi_display_unprepare(struct dsi_display *display)
+{
+ int rc = 0;
+
+ if (!display) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&display->display_lock);
+
+ rc = dsi_display_wake_up(display);
+ if (rc)
+ pr_err("[%s] display wake up failed, rc=%d\n",
+ display->name, rc);
+
+ rc = dsi_panel_unprepare(display->panel);
+ if (rc)
+ pr_err("[%s] panel unprepare failed, rc=%d\n",
+ display->name, rc);
+
+ rc = dsi_display_ctrl_host_disable(display);
+ if (rc)
+ pr_err("[%s] failed to disable DSI host, rc=%d\n",
+ display->name, rc);
+
+ rc = dsi_display_ctrl_link_clk_off(display);
+ if (rc)
+ pr_err("[%s] failed to disable Link clocks, rc=%d\n",
+ display->name, rc);
+
+ rc = dsi_display_ctrl_deinit(display);
+ if (rc)
+ pr_err("[%s] failed to deinit controller, rc=%d\n",
+ display->name, rc);
+
+ rc = dsi_display_phy_disable(display);
+ if (rc)
+ pr_err("[%s] failed to disable DSI PHY, rc=%d\n",
+ display->name, rc);
+
+ rc = dsi_display_ctrl_core_clk_off(display);
+ if (rc)
+ pr_err("[%s] failed to disable DSI clocks, rc=%d\n",
+ display->name, rc);
+
+ rc = dsi_display_phy_power_off(display);
+ if (rc)
+ pr_err("[%s] failed to power off PHY, rc=%d\n",
+ display->name, rc);
+
+ rc = dsi_display_ctrl_power_off(display);
+ if (rc)
+ pr_err("[%s] failed to power DSI vregs, rc=%d\n",
+ display->name, rc);
+
+ rc = dsi_panel_post_unprepare(display->panel);
+ if (rc)
+ pr_err("[%s] panel post-unprepare failed, rc=%d\n",
+ display->name, rc);
+
+ mutex_unlock(&display->display_lock);
+ return rc;
+}
+
+static int __init dsi_display_register(void)
+{
+ dsi_phy_drv_register();
+ dsi_ctrl_drv_register();
+ return platform_driver_register(&dsi_display_driver);
+}
+
+static void __exit dsi_display_unregister(void)
+{
+ platform_driver_unregister(&dsi_display_driver);
+ dsi_ctrl_drv_unregister();
+ dsi_phy_drv_unregister();
+}
+
+module_init(dsi_display_register);
+module_exit(dsi_display_unregister);
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
new file mode 100644
index 000000000000..b77bf268dbd1
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
@@ -0,0 +1,336 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DSI_DISPLAY_H_
+#define _DSI_DISPLAY_H_
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/debugfs.h>
+#include <linux/of_device.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+
+#include "msm_drv.h"
+#include "dsi_defs.h"
+#include "dsi_ctrl.h"
+#include "dsi_phy.h"
+#include "dsi_panel.h"
+
+#define MAX_DSI_CTRLS_PER_DISPLAY 2
+
+/*
+ * DSI Validate Mode modifiers
+ * @DSI_VALIDATE_FLAG_ALLOW_ADJUST: Allow mode validation to also do fixup
+ */
+#define DSI_VALIDATE_FLAG_ALLOW_ADJUST 0x1
+
+/**
+ * enum dsi_display_type - enumerates DSI display types
+ * @DSI_DISPLAY_SINGLE: A panel connected on a single DSI interface.
+ * @DSI_DISPLAY_EXT_BRIDGE: A bridge is connected between panel and DSI host.
+ * It utilizes a single DSI interface.
+ * @DSI_DISPLAY_SPLIT: A panel that utilizes more than one DSI
+ * interfaces.
+ * @DSI_DISPLAY_SPLIT_EXT_BRIDGE: A bridge is present between panel and DSI
+ * host. It utilizes more than one DSI interface.
+ */
+enum dsi_display_type {
+ DSI_DISPLAY_SINGLE = 0,
+ DSI_DISPLAY_EXT_BRIDGE,
+ DSI_DISPLAY_SPLIT,
+ DSI_DISPLAY_SPLIT_EXT_BRIDGE,
+ DSI_DISPLAY_MAX,
+};
+
+/**
+ * struct dsi_display_ctrl - dsi ctrl/phy information for the display
+ * @ctrl: Handle to the DSI controller device.
+ * @ctrl_of_node: pHandle to the DSI controller device.
+ * @dsi_ctrl_idx: DSI controller instance id.
+ * @power_state: Current power state of the DSI controller.
+ * @phy: Handle to the DSI PHY device.
+ * @phy_of_node: pHandle to the DSI PHY device.
+ * @phy_enabled: PHY power status.
+ */
+struct dsi_display_ctrl {
+ /* controller info */
+ struct dsi_ctrl *ctrl;
+ struct device_node *ctrl_of_node;
+ u32 dsi_ctrl_idx;
+
+ enum dsi_power_state power_state;
+
+ /* phy info */
+ struct msm_dsi_phy *phy;
+ struct device_node *phy_of_node;
+
+ bool phy_enabled;
+};
+
+/**
+ * struct dsi_display_clk_info - dsi display clock source information
+ * @src_clks: Source clocks for DSI display.
+ * @mux_clks: Mux clocks used for DFPS.
+ * @shadow_clks: Used for DFPS.
+ */
+struct dsi_display_clk_info {
+ struct dsi_clk_link_set src_clks;
+ struct dsi_clk_link_set mux_clks;
+ struct dsi_clk_link_set shadow_clks;
+};
+
+/**
+ * struct dsi_display - dsi display information
+ * @pdev: Pointer to platform device.
+ * @drm_dev: DRM device associated with the display.
+ * @name: Name of the display.
+ * @display_type: Display type as defined in device tree.
+ * @list: List pointer.
+ * @is_active: Is display active.
+ * @display_lock: Mutex for dsi_display interface.
+ * @ctrl_count: Number of DSI interfaces required by panel.
+ * @ctrl: Controller information for DSI display.
+ * @panel: Handle to DSI panel.
+ * @panel_of: pHandle to DSI panel.
+ * @type: DSI display type.
+ * @clk_master_idx: The master controller for controlling clocks. This is an
+ * index into the ctrl[MAX_DSI_CTRLS_PER_DISPLAY] array.
+ * @cmd_master_idx: The master controller for sending DSI commands to panel.
+ * @video_master_idx: The master controller for enabling video engine.
+ * @clock_info: Clock sourcing for DSI display.
+ * @lane_map: Lane mapping between DSI host and Panel.
+ * @num_of_modes: Number of modes supported by display.
+ * @is_tpg_enabled: TPG state.
+ * @host: DRM MIPI DSI Host.
+ * @connector: Pointer to DRM connector object.
+ * @bridge: Pointer to DRM bridge object.
+ * @cmd_engine_refcount: Reference count enforcing single instance of cmd eng
+ * @root: Debugfs root directory
+ */
+struct dsi_display {
+ struct platform_device *pdev;
+ struct drm_device *drm_dev;
+
+ const char *name;
+ const char *display_type;
+ struct list_head list;
+ bool is_active;
+ struct mutex display_lock;
+
+ u32 ctrl_count;
+ struct dsi_display_ctrl ctrl[MAX_DSI_CTRLS_PER_DISPLAY];
+
+ /* panel info */
+ struct dsi_panel *panel;
+ struct device_node *panel_of;
+
+ enum dsi_display_type type;
+ u32 clk_master_idx;
+ u32 cmd_master_idx;
+ u32 video_master_idx;
+
+ struct dsi_display_clk_info clock_info;
+ struct dsi_host_config config;
+ struct dsi_lane_mapping lane_map;
+ u32 num_of_modes;
+ bool is_tpg_enabled;
+
+ struct mipi_dsi_host host;
+ struct dsi_bridge *bridge;
+ u32 cmd_engine_refcount;
+
+ /* DEBUG FS */
+ struct dentry *root;
+};
+
+int dsi_display_dev_probe(struct platform_device *pdev);
+int dsi_display_dev_remove(struct platform_device *pdev);
+
+/**
+ * dsi_display_get_num_of_displays() - returns number of display devices
+ * supported.
+ *
+ * Return: number of displays.
+ */
+int dsi_display_get_num_of_displays(void);
+
+/**
+ * dsi_display_get_active_displays - returns pointers for active display devices
+ * @display_array: Pointer to display array to be filled
+ * @max_display_count: Size of display_array
+ * @Returns: Number of display entries filled
+ */
+int dsi_display_get_active_displays(void **display_array,
+ u32 max_display_count);
+
+/**
+ * dsi_display_get_display_by_name()- finds display by name
+ * @index: name of the display.
+ *
+ * Return: handle to the display or error code.
+ */
+struct dsi_display *dsi_display_get_display_by_name(const char *name);
+
+/**
+ * dsi_display_set_active_state() - sets the state of the display
+ * @display: Handle to display.
+ * @is_active: state
+ */
+void dsi_display_set_active_state(struct dsi_display *display, bool is_active);
+
+/**
+ * dsi_display_drm_bridge_init() - initializes DRM bridge object for DSI
+ * @display: Handle to the display.
+ * @encoder: Pointer to the encoder object which is connected to the
+ * display.
+ *
+ * Return: error code.
+ */
+int dsi_display_drm_bridge_init(struct dsi_display *display,
+ struct drm_encoder *enc);
+
+/**
+ * dsi_display_drm_bridge_deinit() - destroys DRM bridge for the display
+ * @display: Handle to the display.
+ *
+ * Return: error code.
+ */
+int dsi_display_drm_bridge_deinit(struct dsi_display *display);
+
+/**
+ * dsi_display_get_info() - returns the display properties
+ * @info: Pointer to the structure where info is stored.
+ * @disp: Handle to the display.
+ *
+ * Return: error code.
+ */
+int dsi_display_get_info(struct msm_display_info *info, void *disp);
+
+/**
+ * dsi_display_get_modes() - get modes supported by display
+ * @display: Handle to display.
+ * @modes; Pointer to array of modes. Memory allocated should be
+ * big enough to store (count * struct dsi_display_mode)
+ * elements. If modes pointer is NULL, number of modes will
+ * be stored in the memory pointed to by count.
+ * @count: If modes is NULL, number of modes will be stored. If
+ * not, mode information will be copied (number of modes
+ * copied will be equal to *count).
+ *
+ * Return: error code.
+ */
+int dsi_display_get_modes(struct dsi_display *display,
+ struct dsi_display_mode *modes,
+ u32 *count);
+
+/**
+ * dsi_display_validate_mode() - validates if mode is supported by display
+ * @display: Handle to display.
+ * @mode: Mode to be validated.
+ * @flags: Modifier flags.
+ *
+ * Return: 0 if supported or error code.
+ */
+int dsi_display_validate_mode(struct dsi_display *display,
+ struct dsi_display_mode *mode,
+ u32 flags);
+
+/**
+ * dsi_display_set_mode() - Set mode on the display.
+ * @display: Handle to display.
+ * @mode: mode to be set.
+ * @flags: Modifier flags.
+ *
+ * Return: error code.
+ */
+int dsi_display_set_mode(struct dsi_display *display,
+ struct dsi_display_mode *mode,
+ u32 flags);
+
+/**
+ * dsi_display_prepare() - prepare display
+ * @display: Handle to display.
+ *
+ * Prepare will perform power up sequences for the host and panel hardware.
+ * Power and clock resources might be turned on (depending on the panel mode).
+ * The video engine is not enabled.
+ *
+ * Return: error code.
+ */
+int dsi_display_prepare(struct dsi_display *display);
+
+/**
+ * dsi_display_enable() - enable display
+ * @display: Handle to display.
+ *
+ * Enable will turn on the host engine and the panel. At the end of the enable
+ * function, Host and panel hardware are ready to accept pixel data from
+ * upstream.
+ *
+ * Return: error code.
+ */
+int dsi_display_enable(struct dsi_display *display);
+
+/**
+ * dsi_display_post_enable() - perform post enable operations.
+ * @display: Handle to display.
+ *
+ * Some panels might require some commands to be sent after pixel data
+ * transmission has started. Such commands are sent as part of the post_enable
+ * function.
+ *
+ * Return: error code.
+ */
+int dsi_display_post_enable(struct dsi_display *display);
+
+/**
+ * dsi_display_pre_disable() - perform pre disable operations.
+ * @display: Handle to display.
+ *
+ * If a panel requires commands to be sent before pixel data transmission is
+ * stopped, those can be sent as part of pre_disable.
+ *
+ * Return: error code.
+ */
+int dsi_display_pre_disable(struct dsi_display *display);
+
+/**
+ * dsi_display_disable() - disable panel and host hardware.
+ * @display: Handle to display.
+ *
+ * Disable host and panel hardware and pixel data transmission can not continue.
+ *
+ * Return: error code.
+ */
+int dsi_display_disable(struct dsi_display *display);
+
+/**
+ * dsi_display_unprepare() - power off display hardware.
+ * @display: Handle to display.
+ *
+ * Host and panel hardware is turned off. Panel will be in reset state at the
+ * end of the function.
+ *
+ * Return: error code.
+ */
+int dsi_display_unprepare(struct dsi_display *display);
+
+int dsi_display_set_tpg_state(struct dsi_display *display, bool enable);
+
+int dsi_display_clock_gate(struct dsi_display *display, bool enable);
+int dsi_dispaly_static_frame(struct dsi_display *display, bool enable);
+
+int dsi_display_set_backlight(void *display, u32 bl_lvl);
+#endif /* _DSI_DISPLAY_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display_test.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display_test.c
new file mode 100644
index 000000000000..93fb041399e2
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display_test.c
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/slab.h>
+
+#include "dsi_display_test.h"
+
+static void dsi_display_test_dump_modes(struct dsi_display_mode *mode, u32
+ count)
+{
+}
+
+static void dsi_display_test_work(struct work_struct *work)
+{
+ struct dsi_display_test *test;
+ struct dsi_display *display;
+ struct dsi_display_mode *modes;
+ u32 count = 0;
+ u32 size = 0;
+ int rc = 0;
+
+ test = container_of(work, struct dsi_display_test, test_work);
+
+ display = test->display;
+ rc = dsi_display_get_modes(display, NULL, &count);
+ if (rc) {
+ pr_err("failed to get modes count, rc=%d\n", rc);
+ goto test_fail;
+ }
+
+ size = count * sizeof(*modes);
+ modes = kzalloc(size, GFP_KERNEL);
+ if (!modes) {
+ rc = -ENOMEM;
+ goto test_fail;
+ }
+
+ rc = dsi_display_get_modes(display, modes, &count);
+ if (rc) {
+ pr_err("failed to get modes, rc=%d\n", rc);
+ goto test_fail_free_modes;
+ }
+
+ dsi_display_test_dump_modes(modes, count);
+
+ rc = dsi_display_set_mode(display, &modes[0], 0x0);
+ if (rc) {
+ pr_err("failed to set mode, rc=%d\n", rc);
+ goto test_fail_free_modes;
+ }
+
+ rc = dsi_display_prepare(display);
+ if (rc) {
+ pr_err("failed to prepare display, rc=%d\n", rc);
+ goto test_fail_free_modes;
+ }
+
+ rc = dsi_display_enable(display);
+ if (rc) {
+ pr_err("failed to enable display, rc=%d\n", rc);
+ goto test_fail_unprep_disp;
+ }
+ return;
+
+test_fail_unprep_disp:
+ if (rc) {
+ pr_err("failed to unprep display, rc=%d\n", rc);
+ goto test_fail_free_modes;
+ }
+
+test_fail_free_modes:
+ kfree(modes);
+test_fail:
+ return;
+}
+
+int dsi_display_test_init(struct dsi_display *display)
+{
+ static int done;
+ int rc = 0;
+ struct dsi_display_test *test;
+
+ if (done)
+ return rc;
+
+ done = 1;
+ if (!display) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ test = kzalloc(sizeof(*test), GFP_KERNEL);
+ if (!test)
+ return -ENOMEM;
+
+ test->display = display;
+ INIT_WORK(&test->test_work, dsi_display_test_work);
+
+ dsi_display_test_work(&test->test_work);
+ return rc;
+}
+
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display_test.h b/drivers/gpu/drm/msm/dsi-staging/dsi_display_test.h
new file mode 100644
index 000000000000..e36569854ab1
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display_test.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DSI_DISPLAY_TEST_H_
+#define _DSI_DISPLAY_TEST_H_
+
+#include "dsi_display.h"
+#include "dsi_ctrl_hw.h"
+#include "dsi_ctrl.h"
+
+struct dsi_display_test {
+ struct dsi_display *display;
+
+ struct work_struct test_work;
+};
+
+int dsi_display_test_init(struct dsi_display *display);
+
+
+#endif /* _DSI_DISPLAY_TEST_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
new file mode 100644
index 000000000000..a1adecf81cc0
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
@@ -0,0 +1,515 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+
+#define pr_fmt(fmt) "dsi-drm:[%s] " fmt, __func__
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic.h>
+
+#include "msm_kms.h"
+#include "sde_connector.h"
+#include "dsi_drm.h"
+
+#define to_dsi_bridge(x) container_of((x), struct dsi_bridge, base)
+#define to_dsi_state(x) container_of((x), struct dsi_connector_state, base)
+
+static void convert_to_dsi_mode(const struct drm_display_mode *drm_mode,
+ struct dsi_display_mode *dsi_mode)
+{
+ memset(dsi_mode, 0, sizeof(*dsi_mode));
+
+ dsi_mode->timing.h_active = drm_mode->hdisplay;
+ dsi_mode->timing.h_back_porch = drm_mode->htotal - drm_mode->hsync_end;
+ dsi_mode->timing.h_sync_width = drm_mode->htotal -
+ (drm_mode->hsync_start + dsi_mode->timing.h_back_porch);
+ dsi_mode->timing.h_front_porch = drm_mode->hsync_start -
+ drm_mode->hdisplay;
+ dsi_mode->timing.h_skew = drm_mode->hskew;
+
+ dsi_mode->timing.v_active = drm_mode->vdisplay;
+ dsi_mode->timing.v_back_porch = drm_mode->vtotal - drm_mode->vsync_end;
+ dsi_mode->timing.v_sync_width = drm_mode->vtotal -
+ (drm_mode->vsync_start + dsi_mode->timing.v_back_porch);
+
+ dsi_mode->timing.v_front_porch = drm_mode->vsync_start -
+ drm_mode->vdisplay;
+
+ dsi_mode->timing.refresh_rate = drm_mode->vrefresh;
+
+ dsi_mode->pixel_clk_khz = drm_mode->clock;
+ dsi_mode->panel_mode = 0; /* TODO: Panel Mode */
+
+ if (msm_is_mode_seamless(drm_mode))
+ dsi_mode->flags |= DSI_MODE_FLAG_SEAMLESS;
+ if (msm_is_mode_dynamic_fps(drm_mode))
+ dsi_mode->flags |= DSI_MODE_FLAG_DFPS;
+ if (msm_needs_vblank_pre_modeset(drm_mode))
+ dsi_mode->flags |= DSI_MODE_FLAG_VBLANK_PRE_MODESET;
+}
+
+static void convert_to_drm_mode(const struct dsi_display_mode *dsi_mode,
+ struct drm_display_mode *drm_mode)
+{
+ memset(drm_mode, 0, sizeof(*drm_mode));
+
+ drm_mode->hdisplay = dsi_mode->timing.h_active;
+ drm_mode->hsync_start = drm_mode->hdisplay +
+ dsi_mode->timing.h_front_porch;
+ drm_mode->hsync_end = drm_mode->hsync_start +
+ dsi_mode->timing.h_sync_width;
+ drm_mode->htotal = drm_mode->hsync_end + dsi_mode->timing.h_back_porch;
+ drm_mode->hskew = dsi_mode->timing.h_skew;
+
+ drm_mode->vdisplay = dsi_mode->timing.v_active;
+ drm_mode->vsync_start = drm_mode->vdisplay +
+ dsi_mode->timing.v_front_porch;
+ drm_mode->vsync_end = drm_mode->vsync_start +
+ dsi_mode->timing.v_sync_width;
+ drm_mode->vtotal = drm_mode->vsync_end + dsi_mode->timing.v_back_porch;
+
+ drm_mode->vrefresh = dsi_mode->timing.refresh_rate;
+ drm_mode->clock = dsi_mode->pixel_clk_khz;
+
+ if (dsi_mode->flags & DSI_MODE_FLAG_SEAMLESS)
+ drm_mode->flags |= DRM_MODE_FLAG_SEAMLESS;
+ if (dsi_mode->flags & DSI_MODE_FLAG_DFPS)
+ drm_mode->private_flags |= MSM_MODE_FLAG_SEAMLESS_DYNAMIC_FPS;
+ if (dsi_mode->flags & DSI_MODE_FLAG_VBLANK_PRE_MODESET)
+ drm_mode->private_flags |= MSM_MODE_FLAG_VBLANK_PRE_MODESET;
+
+ drm_mode_set_name(drm_mode);
+}
+
+static int dsi_bridge_attach(struct drm_bridge *bridge)
+{
+ struct dsi_bridge *c_bridge = to_dsi_bridge(bridge);
+
+ if (!bridge) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ pr_debug("[%d] attached\n", c_bridge->id);
+
+ return 0;
+
+}
+
+static void dsi_bridge_pre_enable(struct drm_bridge *bridge)
+{
+ int rc = 0;
+ struct dsi_bridge *c_bridge = to_dsi_bridge(bridge);
+
+ if (!bridge) {
+ pr_err("Invalid params\n");
+ return;
+ }
+
+ /* By this point mode should have been validated through mode_fixup */
+ rc = dsi_display_set_mode(c_bridge->display,
+ &(c_bridge->dsi_mode), 0x0);
+ if (rc) {
+ pr_err("[%d] failed to perform a mode set, rc=%d\n",
+ c_bridge->id, rc);
+ return;
+ }
+
+ if (c_bridge->dsi_mode.flags & DSI_MODE_FLAG_SEAMLESS) {
+ pr_debug("[%d] seamless pre-enable\n", c_bridge->id);
+ return;
+ }
+
+ rc = dsi_display_prepare(c_bridge->display);
+ if (rc) {
+ pr_err("[%d] DSI display prepare failed, rc=%d\n",
+ c_bridge->id, rc);
+ return;
+ }
+
+ rc = dsi_display_enable(c_bridge->display);
+ if (rc) {
+ pr_err("[%d] DSI display enable failed, rc=%d\n",
+ c_bridge->id, rc);
+ (void)dsi_display_unprepare(c_bridge->display);
+ }
+}
+
+static void dsi_bridge_enable(struct drm_bridge *bridge)
+{
+ int rc = 0;
+ struct dsi_bridge *c_bridge = to_dsi_bridge(bridge);
+
+ if (!bridge) {
+ pr_err("Invalid params\n");
+ return;
+ }
+
+ if (c_bridge->dsi_mode.flags & DSI_MODE_FLAG_SEAMLESS) {
+ pr_debug("[%d] seamless enable\n", c_bridge->id);
+ return;
+ }
+
+ rc = dsi_display_post_enable(c_bridge->display);
+ if (rc)
+ pr_err("[%d] DSI display post enabled failed, rc=%d\n",
+ c_bridge->id, rc);
+}
+
+static void dsi_bridge_disable(struct drm_bridge *bridge)
+{
+ int rc = 0;
+ struct dsi_bridge *c_bridge = to_dsi_bridge(bridge);
+
+ if (!bridge) {
+ pr_err("Invalid params\n");
+ return;
+ }
+
+ rc = dsi_display_pre_disable(c_bridge->display);
+ if (rc) {
+ pr_err("[%d] DSI display pre disable failed, rc=%d\n",
+ c_bridge->id, rc);
+ }
+}
+
+static void dsi_bridge_post_disable(struct drm_bridge *bridge)
+{
+ int rc = 0;
+ struct dsi_bridge *c_bridge = to_dsi_bridge(bridge);
+
+ if (!bridge) {
+ pr_err("Invalid params\n");
+ return;
+ }
+
+ rc = dsi_display_disable(c_bridge->display);
+ if (rc) {
+ pr_err("[%d] DSI display disable failed, rc=%d\n",
+ c_bridge->id, rc);
+ return;
+ }
+
+ rc = dsi_display_unprepare(c_bridge->display);
+ if (rc) {
+ pr_err("[%d] DSI display unprepare failed, rc=%d\n",
+ c_bridge->id, rc);
+ return;
+ }
+}
+
+static void dsi_bridge_mode_set(struct drm_bridge *bridge,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct dsi_bridge *c_bridge = to_dsi_bridge(bridge);
+
+ if (!bridge || !mode || !adjusted_mode) {
+ pr_err("Invalid params\n");
+ return;
+ }
+
+ memset(&(c_bridge->dsi_mode), 0x0, sizeof(struct dsi_display_mode));
+ convert_to_dsi_mode(adjusted_mode, &(c_bridge->dsi_mode));
+
+ pr_debug("note: using panel cmd/vid mode instead of user val\n");
+ c_bridge->dsi_mode.panel_mode =
+ c_bridge->display->panel->mode.panel_mode;
+}
+
+static bool dsi_bridge_mode_fixup(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ int rc = 0;
+ bool ret = true;
+ struct dsi_bridge *c_bridge = to_dsi_bridge(bridge);
+ struct dsi_display_mode dsi_mode;
+
+ if (!bridge || !mode || !adjusted_mode) {
+ pr_err("Invalid params\n");
+ return false;
+ }
+
+ convert_to_dsi_mode(mode, &dsi_mode);
+
+ rc = dsi_display_validate_mode(c_bridge->display, &dsi_mode,
+ DSI_VALIDATE_FLAG_ALLOW_ADJUST);
+ if (rc) {
+ pr_err("[%d] mode is not valid, rc=%d\n", c_bridge->id, rc);
+ ret = false;
+ } else {
+ convert_to_drm_mode(&dsi_mode, adjusted_mode);
+ }
+
+ return ret;
+}
+
+static const struct drm_bridge_funcs dsi_bridge_ops = {
+ .attach = dsi_bridge_attach,
+ .mode_fixup = dsi_bridge_mode_fixup,
+ .pre_enable = dsi_bridge_pre_enable,
+ .enable = dsi_bridge_enable,
+ .disable = dsi_bridge_disable,
+ .post_disable = dsi_bridge_post_disable,
+ .mode_set = dsi_bridge_mode_set,
+};
+
+int dsi_conn_post_init(struct drm_connector *connector,
+ void *info,
+ void *display)
+{
+ struct dsi_display *dsi_display = display;
+ struct dsi_panel *panel;
+
+ if (!info || !dsi_display)
+ return -EINVAL;
+
+ sde_kms_info_add_keystr(info,
+ "display type", dsi_display->display_type);
+
+ switch (dsi_display->type) {
+ case DSI_DISPLAY_SINGLE:
+ sde_kms_info_add_keystr(info, "display config",
+ "single display");
+ break;
+ case DSI_DISPLAY_EXT_BRIDGE:
+ sde_kms_info_add_keystr(info, "display config", "ext bridge");
+ break;
+ case DSI_DISPLAY_SPLIT:
+ sde_kms_info_add_keystr(info, "display config",
+ "split display");
+ break;
+ case DSI_DISPLAY_SPLIT_EXT_BRIDGE:
+ sde_kms_info_add_keystr(info, "display config",
+ "split ext bridge");
+ break;
+ default:
+ pr_debug("invalid display type:%d\n", dsi_display->type);
+ break;
+ }
+
+ if (!dsi_display->panel) {
+ pr_debug("invalid panel data\n");
+ goto end;
+ }
+
+ panel = dsi_display->panel;
+ sde_kms_info_add_keystr(info, "panel name", panel->name);
+
+ switch (panel->mode.panel_mode) {
+ case DSI_OP_VIDEO_MODE:
+ sde_kms_info_add_keystr(info, "panel mode", "video");
+ break;
+ case DSI_OP_CMD_MODE:
+ sde_kms_info_add_keystr(info, "panel mode", "command");
+ sde_kms_info_add_keyint(info, "mdp_transfer_time_us",
+ panel->cmd_config.mdp_transfer_time_us);
+ break;
+ default:
+ pr_debug("invalid panel type:%d\n", panel->mode.panel_mode);
+ break;
+ }
+ sde_kms_info_add_keystr(info, "dfps support",
+ panel->dfps_caps.dfps_support ? "true" : "false");
+
+ switch (panel->phy_props.rotation) {
+ case DSI_PANEL_ROTATE_NONE:
+ sde_kms_info_add_keystr(info, "panel orientation", "none");
+ break;
+ case DSI_PANEL_ROTATE_H_FLIP:
+ sde_kms_info_add_keystr(info, "panel orientation", "horz flip");
+ break;
+ case DSI_PANEL_ROTATE_V_FLIP:
+ sde_kms_info_add_keystr(info, "panel orientation", "vert flip");
+ break;
+ default:
+ pr_debug("invalid panel rotation:%d\n",
+ panel->phy_props.rotation);
+ break;
+ }
+
+ switch (panel->bl_config.type) {
+ case DSI_BACKLIGHT_PWM:
+ sde_kms_info_add_keystr(info, "backlight type", "pwm");
+ break;
+ case DSI_BACKLIGHT_WLED:
+ sde_kms_info_add_keystr(info, "backlight type", "wled");
+ break;
+ case DSI_BACKLIGHT_DCS:
+ sde_kms_info_add_keystr(info, "backlight type", "dcs");
+ break;
+ default:
+ pr_debug("invalid panel backlight type:%d\n",
+ panel->bl_config.type);
+ break;
+ }
+
+end:
+ return 0;
+}
+
+enum drm_connector_status dsi_conn_detect(struct drm_connector *conn,
+ bool force,
+ void *display)
+{
+ enum drm_connector_status status = connector_status_unknown;
+ struct msm_display_info info;
+ int rc;
+
+ if (!conn || !display)
+ return status;
+
+ /* get display dsi_info */
+ memset(&info, 0x0, sizeof(info));
+ rc = dsi_display_get_info(&info, display);
+ if (rc) {
+ pr_err("failed to get display info, rc=%d\n", rc);
+ return connector_status_disconnected;
+ }
+
+ if (info.capabilities & MSM_DISPLAY_CAP_HOT_PLUG)
+ status = (info.is_connected ? connector_status_connected :
+ connector_status_disconnected);
+ else
+ status = connector_status_connected;
+
+ conn->display_info.width_mm = info.width_mm;
+ conn->display_info.height_mm = info.height_mm;
+
+ return status;
+}
+
+int dsi_connector_get_modes(struct drm_connector *connector,
+ void *display)
+{
+ u32 count = 0;
+ u32 size = 0;
+ struct dsi_display_mode *modes;
+ struct drm_display_mode drm_mode;
+ int rc, i;
+
+ if (sde_connector_get_panel(connector)) {
+ /*
+ * TODO: If drm_panel is attached, query modes from the panel.
+ * This is complicated in split dsi cases because panel is not
+ * attached to both connectors.
+ */
+ goto end;
+ }
+ rc = dsi_display_get_modes(display, NULL, &count);
+ if (rc) {
+ pr_err("failed to get num of modes, rc=%d\n", rc);
+ goto error;
+ }
+
+ size = count * sizeof(*modes);
+ modes = kzalloc(size, GFP_KERNEL);
+ if (!modes) {
+ count = 0;
+ goto end;
+ }
+
+ rc = dsi_display_get_modes(display, modes, &count);
+ if (rc) {
+ pr_err("failed to get modes, rc=%d\n", rc);
+ count = 0;
+ goto error;
+ }
+
+ for (i = 0; i < count; i++) {
+ struct drm_display_mode *m;
+
+ memset(&drm_mode, 0x0, sizeof(drm_mode));
+ convert_to_drm_mode(&modes[i], &drm_mode);
+ m = drm_mode_duplicate(connector->dev, &drm_mode);
+ if (!m) {
+ pr_err("failed to add mode %ux%u\n",
+ drm_mode.hdisplay,
+ drm_mode.vdisplay);
+ count = -ENOMEM;
+ goto error;
+ }
+ m->width_mm = connector->display_info.width_mm;
+ m->height_mm = connector->display_info.height_mm;
+ drm_mode_probed_add(connector, m);
+ }
+error:
+ kfree(modes);
+end:
+ pr_debug("MODE COUNT =%d\n\n", count);
+ return count;
+}
+
+enum drm_mode_status dsi_conn_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode,
+ void *display)
+{
+ struct dsi_display_mode dsi_mode;
+ int rc;
+
+ if (!connector || !mode) {
+ pr_err("Invalid params\n");
+ return MODE_ERROR;
+ }
+
+ convert_to_dsi_mode(mode, &dsi_mode);
+
+ rc = dsi_display_validate_mode(display, &dsi_mode,
+ DSI_VALIDATE_FLAG_ALLOW_ADJUST);
+ if (rc) {
+ pr_err("mode not supported, rc=%d\n", rc);
+ return MODE_BAD;
+ }
+
+ return MODE_OK;
+}
+
+struct dsi_bridge *dsi_drm_bridge_init(struct dsi_display *display,
+ struct drm_device *dev,
+ struct drm_encoder *encoder)
+{
+ int rc = 0;
+ struct dsi_bridge *bridge;
+
+ bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
+ if (!bridge) {
+ rc = -ENOMEM;
+ goto error;
+ }
+
+ bridge->display = display;
+ bridge->base.funcs = &dsi_bridge_ops;
+ bridge->base.encoder = encoder;
+
+ rc = drm_bridge_attach(dev, &bridge->base);
+ if (rc) {
+ pr_err("failed to attach bridge, rc=%d\n", rc);
+ goto error_free_bridge;
+ }
+
+ encoder->bridge = &bridge->base;
+ return bridge;
+error_free_bridge:
+ kfree(bridge);
+error:
+ return ERR_PTR(rc);
+}
+
+void dsi_drm_bridge_cleanup(struct dsi_bridge *bridge)
+{
+ if (bridge && bridge->base.encoder)
+ bridge->base.encoder->bridge = NULL;
+
+ kfree(bridge);
+}
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.h b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.h
new file mode 100644
index 000000000000..934899bd2068
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DSI_DRM_H_
+#define _DSI_DRM_H_
+
+#include <linux/types.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "msm_drv.h"
+
+#include "dsi_display.h"
+
+struct dsi_bridge {
+ struct drm_bridge base;
+ u32 id;
+
+ struct dsi_display *display;
+ struct dsi_display_mode dsi_mode;
+};
+
+/**
+ * dsi_conn_post_init - callback to perform additional initialization steps
+ * @connector: Pointer to drm connector structure
+ * @info: Pointer to sde connector info structure
+ * @display: Pointer to private display handle
+ * Returns: Zero on success
+ */
+int dsi_conn_post_init(struct drm_connector *connector,
+ void *info,
+ void *display);
+
+/**
+ * dsi_conn_detect - callback to determine if connector is connected
+ * @connector: Pointer to drm connector structure
+ * @force: Force detect setting from drm framework
+ * @display: Pointer to private display handle
+ * Returns: Connector 'is connected' status
+ */
+enum drm_connector_status dsi_conn_detect(struct drm_connector *conn,
+ bool force,
+ void *display);
+
+/**
+ * dsi_connector_get_modes - callback to add drm modes via drm_mode_probed_add()
+ * @connector: Pointer to drm connector structure
+ * @display: Pointer to private display handle
+ * Returns: Number of modes added
+ */
+int dsi_connector_get_modes(struct drm_connector *connector,
+ void *display);
+
+/**
+ * dsi_conn_mode_valid - callback to determine if specified mode is valid
+ * @connector: Pointer to drm connector structure
+ * @mode: Pointer to drm mode structure
+ * @display: Pointer to private display handle
+ * Returns: Validity status for specified mode
+ */
+enum drm_mode_status dsi_conn_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode,
+ void *display);
+
+struct dsi_bridge *dsi_drm_bridge_init(struct dsi_display *display,
+ struct drm_device *dev,
+ struct drm_encoder *encoder);
+
+void dsi_drm_bridge_cleanup(struct dsi_bridge *bridge);
+
+#endif /* _DSI_DRM_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_hw.h b/drivers/gpu/drm/msm/dsi-staging/dsi_hw.h
new file mode 100644
index 000000000000..01535c02a7f8
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_hw.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DSI_HW_H_
+#define _DSI_HW_H_
+#include <linux/io.h>
+
+#define DSI_R32(dsi_hw, off) readl_relaxed((dsi_hw)->base + (off))
+#define DSI_W32(dsi_hw, off, val) \
+ do {\
+ pr_debug("[DSI_%d][%s] - [0x%08x]\n", \
+ (dsi_hw)->index, #off, val); \
+ writel_relaxed((val), (dsi_hw)->base + (off)); \
+ } while (0)
+
+#define DSI_MMSS_MISC_R32(dsi_hw, off) \
+ readl_relaxed((dsi_hw)->mmss_misc_base + (off))
+#define DSI_MMSS_MISC_W32(dsi_hw, off, val) \
+ do {\
+ pr_debug("[DSI_%d][%s] - [0x%08x]\n", \
+ (dsi_hw)->index, #off, val); \
+ writel_relaxed((val), (dsi_hw)->mmss_misc_base + (off)); \
+ } while (0)
+
+#define DSI_R64(dsi_hw, off) readq_relaxed((dsi_hw)->base + (off))
+#define DSI_W64(dsi_hw, off, val) writeq_relaxed((val), (dsi_hw)->base + (off))
+
+#endif /* _DSI_HW_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
new file mode 100644
index 000000000000..a7a39e685d4d
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
@@ -0,0 +1,1998 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+
+#include "dsi_panel.h"
+#include "dsi_ctrl_hw.h"
+
+#define DSI_PANEL_DEFAULT_LABEL "Default dsi panel"
+
+#define DEFAULT_MDP_TRANSFER_TIME 14000
+
+static int dsi_panel_vreg_get(struct dsi_panel *panel)
+{
+ int rc = 0;
+ int i;
+ struct regulator *vreg = NULL;
+
+ for (i = 0; i < panel->power_info.count; i++) {
+ vreg = devm_regulator_get(panel->parent,
+ panel->power_info.vregs[i].vreg_name);
+ rc = PTR_RET(vreg);
+ if (rc) {
+ pr_err("failed to get %s regulator\n",
+ panel->power_info.vregs[i].vreg_name);
+ goto error_put;
+ }
+ panel->power_info.vregs[i].vreg = vreg;
+ }
+
+ return rc;
+error_put:
+ for (i = i - 1; i >= 0; i--) {
+ devm_regulator_put(panel->power_info.vregs[i].vreg);
+ panel->power_info.vregs[i].vreg = NULL;
+ }
+ return rc;
+}
+
+static int dsi_panel_vreg_put(struct dsi_panel *panel)
+{
+ int rc = 0;
+ int i;
+
+ for (i = panel->power_info.count - 1; i >= 0; i--)
+ devm_regulator_put(panel->power_info.vregs[i].vreg);
+
+ return rc;
+}
+
+static int dsi_panel_gpio_request(struct dsi_panel *panel)
+{
+ int rc = 0;
+ struct dsi_panel_reset_config *r_config = &panel->reset_config;
+
+ if (gpio_is_valid(r_config->reset_gpio)) {
+ rc = gpio_request(r_config->reset_gpio, "reset_gpio");
+ if (rc) {
+ pr_err("request for reset_gpio failed, rc=%d\n", rc);
+ goto error;
+ }
+ }
+
+ if (gpio_is_valid(r_config->disp_en_gpio)) {
+ rc = gpio_request(r_config->disp_en_gpio, "disp_en_gpio");
+ if (rc) {
+ pr_err("request for disp_en_gpio failed, rc=%d\n", rc);
+ goto error_release_reset;
+ }
+ }
+
+ if (gpio_is_valid(panel->bl_config.en_gpio)) {
+ rc = gpio_request(panel->bl_config.en_gpio, "bklt_en_gpio");
+ if (rc) {
+ pr_err("request for bklt_en_gpio failed, rc=%d\n", rc);
+ goto error_release_disp_en;
+ }
+ }
+
+ goto error;
+error_release_disp_en:
+ if (gpio_is_valid(r_config->disp_en_gpio))
+ gpio_free(r_config->disp_en_gpio);
+error_release_reset:
+ if (gpio_is_valid(r_config->reset_gpio))
+ gpio_free(r_config->reset_gpio);
+error:
+ return rc;
+}
+
+static int dsi_panel_gpio_release(struct dsi_panel *panel)
+{
+ int rc = 0;
+ struct dsi_panel_reset_config *r_config = &panel->reset_config;
+
+ if (gpio_is_valid(r_config->reset_gpio))
+ gpio_free(r_config->reset_gpio);
+
+ if (gpio_is_valid(r_config->disp_en_gpio))
+ gpio_free(r_config->disp_en_gpio);
+
+ if (gpio_is_valid(panel->bl_config.en_gpio))
+ gpio_free(panel->bl_config.en_gpio);
+
+ return rc;
+}
+
+static int dsi_panel_reset(struct dsi_panel *panel)
+{
+ int rc = 0;
+ struct dsi_panel_reset_config *r_config = &panel->reset_config;
+ int i;
+
+ if (gpio_is_valid(panel->reset_config.disp_en_gpio)) {
+ rc = gpio_direction_output(panel->bl_config.en_gpio, 1);
+ if (rc) {
+ pr_err("unable to set dir for disp gpio rc=%d\n", rc);
+ goto exit;
+ }
+ }
+
+ if (r_config->count) {
+ rc = gpio_direction_output(r_config->reset_gpio,
+ r_config->sequence[0].level);
+ if (rc) {
+ pr_err("unable to set dir for rst gpio rc=%d\n", rc);
+ goto exit;
+ }
+ }
+
+ for (i = 0; i < r_config->count; i++) {
+ gpio_set_value(r_config->reset_gpio,
+ r_config->sequence[i].level);
+
+
+ if (r_config->sequence[i].sleep_ms)
+ usleep_range(r_config->sequence[i].sleep_ms * 1000,
+ r_config->sequence[i].sleep_ms * 1000);
+ }
+
+ if (gpio_is_valid(panel->bl_config.en_gpio)) {
+ rc = gpio_direction_output(panel->bl_config.en_gpio, 1);
+ if (rc)
+ pr_err("unable to set dir for bklt gpio rc=%d\n", rc);
+ }
+exit:
+ return rc;
+}
+
+static int dsi_panel_set_pinctrl_state(struct dsi_panel *panel, bool enable)
+{
+ int rc = 0;
+ struct pinctrl_state *state;
+
+ if (enable)
+ state = panel->pinctrl.active;
+ else
+ state = panel->pinctrl.suspend;
+
+ rc = pinctrl_select_state(panel->pinctrl.pinctrl, state);
+ if (rc)
+ pr_err("[%s] failed to set pin state, rc=%d\n", panel->name,
+ rc);
+
+ return rc;
+}
+
+
+static int dsi_panel_power_on(struct dsi_panel *panel)
+{
+ int rc = 0;
+
+ rc = dsi_pwr_enable_regulator(&panel->power_info, true);
+ if (rc) {
+ pr_err("[%s] failed to enable vregs, rc=%d\n", panel->name, rc);
+ goto exit;
+ }
+
+ rc = dsi_panel_set_pinctrl_state(panel, true);
+ if (rc) {
+ pr_err("[%s] failed to set pinctrl, rc=%d\n", panel->name, rc);
+ goto error_disable_vregs;
+ }
+
+ rc = dsi_panel_reset(panel);
+ if (rc) {
+ pr_err("[%s] failed to reset panel, rc=%d\n", panel->name, rc);
+ goto error_disable_gpio;
+ }
+
+ goto exit;
+
+error_disable_gpio:
+ if (gpio_is_valid(panel->reset_config.disp_en_gpio))
+ gpio_set_value(panel->reset_config.disp_en_gpio, 0);
+
+ if (gpio_is_valid(panel->bl_config.en_gpio))
+ gpio_set_value(panel->bl_config.en_gpio, 0);
+
+ (void)dsi_panel_set_pinctrl_state(panel, false);
+
+error_disable_vregs:
+ (void)dsi_pwr_enable_regulator(&panel->power_info, false);
+
+exit:
+ return rc;
+}
+
+static int dsi_panel_power_off(struct dsi_panel *panel)
+{
+ int rc = 0;
+
+ if (gpio_is_valid(panel->reset_config.disp_en_gpio))
+ gpio_set_value(panel->reset_config.disp_en_gpio, 0);
+
+ if (gpio_is_valid(panel->reset_config.reset_gpio))
+ gpio_set_value(panel->reset_config.reset_gpio, 0);
+
+ rc = dsi_panel_set_pinctrl_state(panel, false);
+ if (rc) {
+ pr_err("[%s] failed set pinctrl state, rc=%d\n", panel->name,
+ rc);
+ }
+
+ rc = dsi_pwr_enable_regulator(&panel->power_info, false);
+ if (rc)
+ pr_err("[%s] failed to enable vregs, rc=%d\n", panel->name, rc);
+
+ return rc;
+}
+static int dsi_panel_tx_cmd_set(struct dsi_panel *panel,
+ enum dsi_cmd_set_type type)
+{
+ int rc = 0, i = 0;
+ ssize_t len;
+ struct dsi_cmd_desc *cmds = panel->cmd_sets[type].cmds;
+ u32 count = panel->cmd_sets[type].count;
+ enum dsi_cmd_set_state state = panel->cmd_sets[type].state;
+ const struct mipi_dsi_host_ops *ops = panel->host->ops;
+
+ if (count == 0) {
+ pr_debug("[%s] No commands to be sent for state(%d)\n",
+ panel->name, type);
+ goto error;
+ }
+
+ for (i = 0; i < count; i++) {
+ /* TODO: handle last command */
+ if (state == DSI_CMD_SET_STATE_LP)
+ cmds->msg.flags |= MIPI_DSI_MSG_USE_LPM;
+
+ len = ops->transfer(panel->host, &cmds->msg);
+ if (len < 0) {
+ rc = len;
+ pr_err("failed to set cmds(%d), rc=%d\n", type, rc);
+ goto error;
+ }
+ if (cmds->post_wait_ms)
+ msleep(cmds->post_wait_ms);
+ cmds++;
+ }
+error:
+ return rc;
+}
+
+static int dsi_panel_pinctrl_deinit(struct dsi_panel *panel)
+{
+ int rc = 0;
+
+ devm_pinctrl_put(panel->pinctrl.pinctrl);
+
+ return rc;
+}
+
+static int dsi_panel_pinctrl_init(struct dsi_panel *panel)
+{
+ int rc = 0;
+
+ /* TODO: pinctrl is defined in dsi dt node */
+ panel->pinctrl.pinctrl = devm_pinctrl_get(panel->parent);
+ if (IS_ERR_OR_NULL(panel->pinctrl.pinctrl)) {
+ rc = PTR_ERR(panel->pinctrl.pinctrl);
+ pr_err("failed to get pinctrl, rc=%d\n", rc);
+ goto error;
+ }
+
+ panel->pinctrl.active = pinctrl_lookup_state(panel->pinctrl.pinctrl,
+ "panel_active");
+ if (IS_ERR_OR_NULL(panel->pinctrl.active)) {
+ rc = PTR_ERR(panel->pinctrl.active);
+ pr_err("failed to get pinctrl active state, rc=%d\n", rc);
+ goto error;
+ }
+
+ panel->pinctrl.suspend =
+ pinctrl_lookup_state(panel->pinctrl.pinctrl, "panel_suspend");
+
+ if (IS_ERR_OR_NULL(panel->pinctrl.suspend)) {
+ rc = PTR_ERR(panel->pinctrl.suspend);
+ pr_err("failed to get pinctrl suspend state, rc=%d\n", rc);
+ goto error;
+ }
+
+error:
+ return rc;
+}
+
+#ifdef CONFIG_LEDS_TRIGGERS
+static int dsi_panel_led_bl_register(struct dsi_panel *panel,
+ struct dsi_backlight_config *bl)
+{
+ int rc = 0;
+
+ led_trigger_register_simple("bkl-trigger", &bl->wled);
+
+ /* LED APIs don't tell us directly whether a classdev has yet
+ * been registered to service this trigger. Until classdev is
+ * registered, calling led_trigger has no effect, and doesn't
+ * fail. Classdevs are associated with any registered triggers
+ * when they do register, but that is too late for FBCon.
+ * Check the cdev list directly and defer if appropriate.
+ */
+ if (!bl->wled) {
+ pr_err("[%s] backlight registration failed\n", panel->name);
+ rc = -EINVAL;
+ } else {
+ read_lock(&bl->wled->leddev_list_lock);
+ if (list_empty(&bl->wled->led_cdevs))
+ rc = -EPROBE_DEFER;
+ read_unlock(&bl->wled->leddev_list_lock);
+
+ if (rc) {
+ pr_info("[%s] backlight %s not ready, defer probe\n",
+ panel->name, bl->wled->name);
+ led_trigger_unregister_simple(bl->wled);
+ }
+ }
+
+ return rc;
+}
+#else
+static int dsi_panel_led_bl_register(struct dsi_panel *panel,
+ struct dsi_backlight_config *bl)
+{
+ return 0;
+}
+#endif
+
+int dsi_panel_set_backlight(struct dsi_panel *panel, u32 bl_lvl)
+{
+ int rc = 0;
+ struct dsi_backlight_config *bl = &panel->bl_config;
+
+ switch (bl->type) {
+ case DSI_BACKLIGHT_WLED:
+ led_trigger_event(bl->wled, bl_lvl);
+ break;
+ default:
+ pr_err("Backlight type(%d) not supported\n", bl->type);
+ rc = -ENOTSUPP;
+ }
+
+ return rc;
+}
+
+static int dsi_panel_bl_register(struct dsi_panel *panel)
+{
+ int rc = 0;
+ struct dsi_backlight_config *bl = &panel->bl_config;
+
+ switch (bl->type) {
+ case DSI_BACKLIGHT_WLED:
+ rc = dsi_panel_led_bl_register(panel, bl);
+ break;
+ default:
+ pr_err("Backlight type(%d) not supported\n", bl->type);
+ rc = -ENOTSUPP;
+ goto error;
+ }
+
+error:
+ return rc;
+}
+
+static int dsi_panel_bl_unregister(struct dsi_panel *panel)
+{
+ int rc = 0;
+ struct dsi_backlight_config *bl = &panel->bl_config;
+
+ switch (bl->type) {
+ case DSI_BACKLIGHT_WLED:
+ led_trigger_unregister_simple(bl->wled);
+ break;
+ default:
+ pr_err("Backlight type(%d) not supported\n", bl->type);
+ rc = -ENOTSUPP;
+ goto error;
+ }
+
+error:
+ return rc;
+}
+static int dsi_panel_parse_timing(struct dsi_mode_info *mode,
+ struct device_node *of_node)
+{
+ int rc = 0;
+
+ rc = of_property_read_u32(of_node, "qcom,mdss-dsi-panel-framerate",
+ &mode->refresh_rate);
+ if (rc) {
+ pr_err("failed to read qcom,mdss-dsi-panel-framerate, rc=%d\n",
+ rc);
+ goto error;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,mdss-dsi-panel-width",
+ &mode->h_active);
+ if (rc) {
+ pr_err("failed to read qcom,mdss-dsi-panel-width, rc=%d\n", rc);
+ goto error;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,mdss-dsi-h-front-porch",
+ &mode->h_front_porch);
+ if (rc) {
+ pr_err("failed to read qcom,mdss-dsi-h-front-porch, rc=%d\n",
+ rc);
+ goto error;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,mdss-dsi-h-back-porch",
+ &mode->h_back_porch);
+ if (rc) {
+ pr_err("failed to read qcom,mdss-dsi-h-back-porch, rc=%d\n",
+ rc);
+ goto error;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,mdss-dsi-h-pulse-width",
+ &mode->h_sync_width);
+ if (rc) {
+ pr_err("failed to read qcom,mdss-dsi-h-pulse-width, rc=%d\n",
+ rc);
+ goto error;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,mdss-dsi-h-sync-skew",
+ &mode->h_skew);
+ if (rc)
+ pr_err("qcom,mdss-dsi-h-sync-skew is not defined, rc=%d\n", rc);
+
+ rc = of_property_read_u32(of_node, "qcom,mdss-dsi-panel-height",
+ &mode->v_active);
+ if (rc) {
+ pr_err("failed to read qcom,mdss-dsi-panel-height, rc=%d\n",
+ rc);
+ goto error;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,mdss-dsi-v-back-porch",
+ &mode->v_back_porch);
+ if (rc) {
+ pr_err("failed to read qcom,mdss-dsi-v-back-porch, rc=%d\n",
+ rc);
+ goto error;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,mdss-dsi-v-front-porch",
+ &mode->v_front_porch);
+ if (rc) {
+ pr_err("failed to read qcom,mdss-dsi-v-back-porch, rc=%d\n",
+ rc);
+ goto error;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,mdss-dsi-v-pulse-width",
+ &mode->v_sync_width);
+ if (rc) {
+ pr_err("failed to read qcom,mdss-dsi-v-pulse-width, rc=%d\n",
+ rc);
+ goto error;
+ }
+
+error:
+ return rc;
+}
+
+static int dsi_panel_parse_pixel_format(struct dsi_host_common_cfg *host,
+ struct device_node *of_node,
+ const char *name)
+{
+ int rc = 0;
+ u32 bpp = 0;
+ enum dsi_pixel_format fmt;
+ const char *packing;
+
+ rc = of_property_read_u32(of_node, "qcom,mdss-dsi-bpp", &bpp);
+ if (rc) {
+ pr_err("[%s] failed to read qcom,mdss-dsi-bpp, rc=%d\n",
+ name, rc);
+ return rc;
+ }
+
+ switch (bpp) {
+ case 3:
+ fmt = DSI_PIXEL_FORMAT_RGB111;
+ break;
+ case 8:
+ fmt = DSI_PIXEL_FORMAT_RGB332;
+ break;
+ case 12:
+ fmt = DSI_PIXEL_FORMAT_RGB444;
+ break;
+ case 16:
+ fmt = DSI_PIXEL_FORMAT_RGB565;
+ break;
+ case 18:
+ fmt = DSI_PIXEL_FORMAT_RGB666;
+ break;
+ case 24:
+ default:
+ fmt = DSI_PIXEL_FORMAT_RGB888;
+ break;
+ }
+
+ if (fmt == DSI_PIXEL_FORMAT_RGB666) {
+ packing = of_get_property(of_node,
+ "qcom,mdss-dsi-pixel-packing",
+ NULL);
+ if (packing && !strcmp(packing, "loose"))
+ fmt = DSI_PIXEL_FORMAT_RGB666_LOOSE;
+ }
+
+ host->dst_format = fmt;
+ return rc;
+}
+
+static int dsi_panel_parse_lane_states(struct dsi_host_common_cfg *host,
+ struct device_node *of_node,
+ const char *name)
+{
+ int rc = 0;
+ bool lane_enabled;
+
+ lane_enabled = of_property_read_bool(of_node,
+ "qcom,mdss-dsi-lane-0-state");
+ host->data_lanes |= (lane_enabled ? DSI_DATA_LANE_0 : 0);
+
+ lane_enabled = of_property_read_bool(of_node,
+ "qcom,mdss-dsi-lane-1-state");
+ host->data_lanes |= (lane_enabled ? DSI_DATA_LANE_1 : 0);
+
+ lane_enabled = of_property_read_bool(of_node,
+ "qcom,mdss-dsi-lane-2-state");
+ host->data_lanes |= (lane_enabled ? DSI_DATA_LANE_2 : 0);
+
+ lane_enabled = of_property_read_bool(of_node,
+ "qcom,mdss-dsi-lane-3-state");
+ host->data_lanes |= (lane_enabled ? DSI_DATA_LANE_3 : 0);
+
+ if (host->data_lanes == 0) {
+ pr_err("[%s] No data lanes are enabled, rc=%d\n", name, rc);
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+static int dsi_panel_parse_color_swap(struct dsi_host_common_cfg *host,
+ struct device_node *of_node,
+ const char *name)
+{
+ int rc = 0;
+ const char *swap_mode;
+
+ swap_mode = of_get_property(of_node, "qcom,mdss-dsi-color-order", NULL);
+ if (swap_mode) {
+ if (!strcmp(swap_mode, "rgb_swap_rgb")) {
+ host->swap_mode = DSI_COLOR_SWAP_RGB;
+ } else if (!strcmp(swap_mode, "rgb_swap_rbg")) {
+ host->swap_mode = DSI_COLOR_SWAP_RBG;
+ } else if (!strcmp(swap_mode, "rgb_swap_brg")) {
+ host->swap_mode = DSI_COLOR_SWAP_BRG;
+ } else if (!strcmp(swap_mode, "rgb_swap_grb")) {
+ host->swap_mode = DSI_COLOR_SWAP_GRB;
+ } else if (!strcmp(swap_mode, "rgb_swap_gbr")) {
+ host->swap_mode = DSI_COLOR_SWAP_GBR;
+ } else {
+ pr_err("[%s] Unrecognized color order-%s\n",
+ name, swap_mode);
+ rc = -EINVAL;
+ }
+ } else {
+ pr_debug("[%s] Falling back to default color order\n", name);
+ host->swap_mode = DSI_COLOR_SWAP_RGB;
+ }
+
+ /* bit swap on color channel is not defined in dt */
+ host->bit_swap_red = false;
+ host->bit_swap_green = false;
+ host->bit_swap_blue = false;
+ return rc;
+}
+
+static int dsi_panel_parse_triggers(struct dsi_host_common_cfg *host,
+ struct device_node *of_node,
+ const char *name)
+{
+ const char *trig;
+ int rc = 0;
+
+ trig = of_get_property(of_node, "qcom,mdss-dsi-mdp-trigger", NULL);
+ if (trig) {
+ if (!strcmp(trig, "none")) {
+ host->mdp_cmd_trigger = DSI_TRIGGER_NONE;
+ } else if (!strcmp(trig, "trigger_te")) {
+ host->mdp_cmd_trigger = DSI_TRIGGER_TE;
+ } else if (!strcmp(trig, "trigger_sw")) {
+ host->mdp_cmd_trigger = DSI_TRIGGER_SW;
+ } else if (!strcmp(trig, "trigger_sw_te")) {
+ host->mdp_cmd_trigger = DSI_TRIGGER_SW_TE;
+ } else {
+ pr_err("[%s] Unrecognized mdp trigger type (%s)\n",
+ name, trig);
+ rc = -EINVAL;
+ }
+
+ } else {
+ pr_debug("[%s] Falling back to default MDP trigger\n",
+ name);
+ host->mdp_cmd_trigger = DSI_TRIGGER_SW;
+ }
+
+ trig = of_get_property(of_node, "qcom,mdss-dsi-dma-trigger", NULL);
+ if (trig) {
+ if (!strcmp(trig, "none")) {
+ host->dma_cmd_trigger = DSI_TRIGGER_NONE;
+ } else if (!strcmp(trig, "trigger_te")) {
+ host->dma_cmd_trigger = DSI_TRIGGER_TE;
+ } else if (!strcmp(trig, "trigger_sw")) {
+ host->dma_cmd_trigger = DSI_TRIGGER_SW;
+ } else if (!strcmp(trig, "trigger_sw_seof")) {
+ host->dma_cmd_trigger = DSI_TRIGGER_SW_SEOF;
+ } else if (!strcmp(trig, "trigger_sw_te")) {
+ host->dma_cmd_trigger = DSI_TRIGGER_SW_TE;
+ } else {
+ pr_err("[%s] Unrecognized mdp trigger type (%s)\n",
+ name, trig);
+ rc = -EINVAL;
+ }
+
+ } else {
+ pr_debug("[%s] Falling back to default MDP trigger\n", name);
+ host->dma_cmd_trigger = DSI_TRIGGER_SW;
+ }
+
+
+ return rc;
+}
+
+static int dsi_panel_parse_misc_host_config(struct dsi_host_common_cfg *host,
+ struct device_node *of_node,
+ const char *name)
+{
+ u32 val = 0;
+ int rc = 0;
+
+ rc = of_property_read_u32(of_node, "qcom,mdss-dsi-t-clk-post", &val);
+ if (rc) {
+ pr_debug("[%s] Fallback to default t_clk_post value\n", name);
+ host->t_clk_post = 0x03;
+ } else {
+ host->t_clk_post = val;
+ pr_debug("[%s] t_clk_post = %d\n", name, val);
+ }
+
+ val = 0;
+ rc = of_property_read_u32(of_node, "qcom,mdss-dsi-t-clk-pre", &val);
+ if (rc) {
+ pr_debug("[%s] Fallback to default t_clk_pre value\n", name);
+ host->t_clk_pre = 0x24;
+ } else {
+ host->t_clk_pre = val;
+ pr_debug("[%s] t_clk_pre = %d\n", name, val);
+ }
+
+ host->ignore_rx_eot = of_property_read_bool(of_node,
+ "qcom,mdss-dsi-rx-eot-ignore");
+
+ host->append_tx_eot = of_property_read_bool(of_node,
+ "qcom,mdss-dsi-tx-eot-append");
+
+ return 0;
+}
+
+static int dsi_panel_parse_host_config(struct dsi_panel *panel,
+ struct device_node *of_node)
+{
+ int rc = 0;
+
+ rc = dsi_panel_parse_pixel_format(&panel->host_config, of_node,
+ panel->name);
+ if (rc) {
+ pr_err("[%s] failed to get pixel format, rc=%d\n",
+ panel->name, rc);
+ goto error;
+ }
+
+ rc = dsi_panel_parse_lane_states(&panel->host_config, of_node,
+ panel->name);
+ if (rc) {
+ pr_err("[%s] failed to parse lane states, rc=%d\n",
+ panel->name, rc);
+ goto error;
+ }
+
+ rc = dsi_panel_parse_color_swap(&panel->host_config, of_node,
+ panel->name);
+ if (rc) {
+ pr_err("[%s] failed to parse color swap config, rc=%d\n",
+ panel->name, rc);
+ goto error;
+ }
+
+ rc = dsi_panel_parse_triggers(&panel->host_config, of_node,
+ panel->name);
+ if (rc) {
+ pr_err("[%s] failed to parse triggers, rc=%d\n",
+ panel->name, rc);
+ goto error;
+ }
+
+ rc = dsi_panel_parse_misc_host_config(&panel->host_config, of_node,
+ panel->name);
+ if (rc) {
+ pr_err("[%s] failed to parse misc host config, rc=%d\n",
+ panel->name, rc);
+ goto error;
+ }
+
+error:
+ return rc;
+}
+
+static int dsi_panel_parse_dfps_caps(struct dsi_dfps_capabilities *dfps_caps,
+ struct device_node *of_node,
+ const char *name)
+{
+ int rc = 0;
+ bool supported = false;
+ const char *type;
+ u32 val = 0;
+
+ supported = of_property_read_bool(of_node,
+ "qcom,mdss-dsi-pan-enable-dynamic-fps");
+
+ if (!supported) {
+ pr_debug("[%s] DFPS is not supported\n", name);
+ dfps_caps->dfps_support = false;
+ } else {
+
+ type = of_get_property(of_node,
+ "qcom,mdss-dsi-pan-fps-update",
+ NULL);
+ if (!type) {
+ pr_err("[%s] dfps type not defined\n", name);
+ rc = -EINVAL;
+ goto error;
+ } else if (!strcmp(type, "dfps_suspend_resume_mode")) {
+ dfps_caps->type = DSI_DFPS_SUSPEND_RESUME;
+ } else if (!strcmp(type, "dfps_immediate_clk_mode")) {
+ dfps_caps->type = DSI_DFPS_IMMEDIATE_CLK;
+ } else if (!strcmp(type, "dfps_immediate_porch_mode_hfp")) {
+ dfps_caps->type = DSI_DFPS_IMMEDIATE_HFP;
+ } else if (!strcmp(type, "dfps_immediate_porch_mode_vfp")) {
+ dfps_caps->type = DSI_DFPS_IMMEDIATE_VFP;
+ } else {
+ pr_err("[%s] dfps type is not recognized\n", name);
+ rc = -EINVAL;
+ goto error;
+ }
+
+ rc = of_property_read_u32(of_node,
+ "qcom,mdss-dsi-min-refresh-rate",
+ &val);
+ if (rc) {
+ pr_err("[%s] Min refresh rate is not defined\n", name);
+ rc = -EINVAL;
+ goto error;
+ }
+ dfps_caps->min_refresh_rate = val;
+
+ rc = of_property_read_u32(of_node,
+ "qcom,mdss-dsi-max-refresh-rate",
+ &val);
+ if (rc) {
+ pr_debug("[%s] Using default refresh rate\n", name);
+ rc = of_property_read_u32(of_node,
+ "qcom,mdss-dsi-panel-framerate",
+ &val);
+ if (rc) {
+ pr_err("[%s] max refresh rate is not defined\n",
+ name);
+ rc = -EINVAL;
+ goto error;
+ }
+ }
+ dfps_caps->max_refresh_rate = val;
+
+ if (dfps_caps->min_refresh_rate > dfps_caps->max_refresh_rate) {
+ pr_err("[%s] min rate > max rate\n", name);
+ rc = -EINVAL;
+ }
+
+ pr_debug("[%s] DFPS is supported %d-%d, mode %d\n", name,
+ dfps_caps->min_refresh_rate,
+ dfps_caps->max_refresh_rate,
+ dfps_caps->type);
+ dfps_caps->dfps_support = true;
+ }
+
+error:
+ return rc;
+}
+
+static int dsi_panel_parse_video_host_config(struct dsi_video_engine_cfg *cfg,
+ struct device_node *of_node,
+ const char *name)
+{
+ int rc = 0;
+ const char *traffic_mode;
+ u32 vc_id = 0;
+ u32 val = 0;
+
+ rc = of_property_read_u32(of_node, "qcom,mdss-dsi-h-sync-pulse", &val);
+ if (rc) {
+ pr_debug("[%s] fallback to default h-sync-pulse\n", name);
+ cfg->pulse_mode_hsa_he = false;
+ } else if (val == 1) {
+ cfg->pulse_mode_hsa_he = true;
+ } else if (val == 0) {
+ cfg->pulse_mode_hsa_he = false;
+ } else {
+ pr_err("[%s] Unrecognized value for mdss-dsi-h-sync-pulse\n",
+ name);
+ rc = -EINVAL;
+ goto error;
+ }
+
+ cfg->hfp_lp11_en = of_property_read_bool(of_node,
+ "qcom,mdss-dsi-hfp-power-mode");
+
+ cfg->hbp_lp11_en = of_property_read_bool(of_node,
+ "qcom,mdss-dsi-hbp-power-mode");
+
+ cfg->hsa_lp11_en = of_property_read_bool(of_node,
+ "qcom,mdss-dsi-hsa-power-mode");
+
+ cfg->last_line_interleave_en = of_property_read_bool(of_node,
+ "qcom,mdss-dsi-last-line-interleave");
+
+ cfg->eof_bllp_lp11_en = of_property_read_bool(of_node,
+ "qcom,mdss-dsi-bllp-eof-power-mode");
+
+ cfg->bllp_lp11_en = of_property_read_bool(of_node,
+ "qcom,mdss-dsi-bllp-power-mode");
+
+ traffic_mode = of_get_property(of_node,
+ "qcom,mdss-dsi-traffic-mode",
+ NULL);
+ if (!traffic_mode) {
+ pr_debug("[%s] Falling back to default traffic mode\n", name);
+ cfg->traffic_mode = DSI_VIDEO_TRAFFIC_SYNC_PULSES;
+ } else if (!strcmp(traffic_mode, "non_burst_sync_pulse")) {
+ cfg->traffic_mode = DSI_VIDEO_TRAFFIC_SYNC_PULSES;
+ } else if (!strcmp(traffic_mode, "non_burst_sync_event")) {
+ cfg->traffic_mode = DSI_VIDEO_TRAFFIC_SYNC_START_EVENTS;
+ } else if (!strcmp(traffic_mode, "burst_mode")) {
+ cfg->traffic_mode = DSI_VIDEO_TRAFFIC_BURST_MODE;
+ } else {
+ pr_err("[%s] Unrecognized traffic mode-%s\n", name,
+ traffic_mode);
+ rc = -EINVAL;
+ goto error;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,mdss-dsi-virtual-channel-id",
+ &vc_id);
+ if (rc) {
+ pr_debug("[%s] Fallback to default vc id\n", name);
+ cfg->vc_id = 0;
+ } else {
+ cfg->vc_id = vc_id;
+ }
+
+error:
+ return rc;
+}
+
+static int dsi_panel_parse_cmd_host_config(struct dsi_cmd_engine_cfg *cfg,
+ struct device_node *of_node,
+ const char *name)
+{
+ u32 val = 0;
+ int rc = 0;
+
+ rc = of_property_read_u32(of_node, "qcom,mdss-dsi-wr-mem-start", &val);
+ if (rc) {
+ pr_debug("[%s] Fallback to default wr-mem-start\n", name);
+ cfg->wr_mem_start = 0x2C;
+ } else {
+ cfg->wr_mem_start = val;
+ }
+
+ val = 0;
+ rc = of_property_read_u32(of_node, "qcom,mdss-dsi-wr-mem-continue",
+ &val);
+ if (rc) {
+ pr_debug("[%s] Fallback to default wr-mem-continue\n", name);
+ cfg->wr_mem_continue = 0x3C;
+ } else {
+ cfg->wr_mem_continue = val;
+ }
+
+ /* TODO: fix following */
+ cfg->max_cmd_packets_interleave = 0;
+
+ val = 0;
+ rc = of_property_read_u32(of_node, "qcom,mdss-dsi-te-dcs-command",
+ &val);
+ if (rc) {
+ pr_debug("[%s] fallback to default te-dcs-cmd\n", name);
+ cfg->insert_dcs_command = true;
+ } else if (val == 1) {
+ cfg->insert_dcs_command = true;
+ } else if (val == 0) {
+ cfg->insert_dcs_command = false;
+ } else {
+ pr_err("[%s] Unrecognized value for mdss-dsi-te-dcs-command\n",
+ name);
+ rc = -EINVAL;
+ goto error;
+ }
+
+ if (of_property_read_u32(of_node, "qcom,mdss-mdp-transfer-time-us",
+ &val)) {
+ pr_debug("[%s] Fallback to default transfer-time-us\n", name);
+ cfg->mdp_transfer_time_us = DEFAULT_MDP_TRANSFER_TIME;
+ } else {
+ cfg->mdp_transfer_time_us = val;
+ }
+
+error:
+ return rc;
+}
+
+static int dsi_panel_parse_panel_mode(struct dsi_panel *panel,
+ struct device_node *of_node)
+{
+ int rc = 0;
+ enum dsi_op_mode panel_mode;
+ const char *mode;
+
+ mode = of_get_property(of_node, "qcom,mdss-dsi-panel-type", NULL);
+ if (!mode) {
+ pr_debug("[%s] Fallback to default panel mode\n", panel->name);
+ panel_mode = DSI_OP_VIDEO_MODE;
+ } else if (!strcmp(mode, "dsi_video_mode")) {
+ panel_mode = DSI_OP_VIDEO_MODE;
+ } else if (!strcmp(mode, "dsi_cmd_mode")) {
+ panel_mode = DSI_OP_CMD_MODE;
+ } else {
+ pr_err("[%s] Unrecognized panel type-%s\n", panel->name, mode);
+ rc = -EINVAL;
+ goto error;
+ }
+
+ if (panel_mode == DSI_OP_VIDEO_MODE) {
+ rc = dsi_panel_parse_video_host_config(&panel->video_config,
+ of_node,
+ panel->name);
+ if (rc) {
+ pr_err("[%s] Failed to parse video host cfg, rc=%d\n",
+ panel->name, rc);
+ goto error;
+ }
+ }
+
+ if (panel_mode == DSI_OP_CMD_MODE) {
+ rc = dsi_panel_parse_cmd_host_config(&panel->cmd_config,
+ of_node,
+ panel->name);
+ if (rc) {
+ pr_err("[%s] Failed to parse cmd host config, rc=%d\n",
+ panel->name, rc);
+ goto error;
+ }
+ }
+
+ panel->mode.panel_mode = panel_mode;
+error:
+ return rc;
+}
+
+static int dsi_panel_parse_phy_props(struct dsi_panel_phy_props *props,
+ struct device_node *of_node,
+ const char *name)
+{
+ int rc = 0;
+ u32 val = 0;
+ const char *str;
+
+ rc = of_property_read_u32(of_node,
+ "qcom,mdss-pan-physical-width-dimension",
+ &val);
+ if (rc) {
+ pr_debug("[%s] Physical panel width is not defined\n", name);
+ props->panel_width_mm = 0;
+ rc = 0;
+ } else {
+ props->panel_width_mm = val;
+ }
+
+ rc = of_property_read_u32(of_node,
+ "qcom,mdss-pan-physical-height-dimension",
+ &val);
+ if (rc) {
+ pr_debug("[%s] Physical panel height is not defined\n", name);
+ props->panel_height_mm = 0;
+ rc = 0;
+ } else {
+ props->panel_height_mm = val;
+ }
+
+ str = of_get_property(of_node, "qcom,mdss-dsi-panel-orientation", NULL);
+ if (!str) {
+ props->rotation = DSI_PANEL_ROTATE_NONE;
+ } else if (!strcmp(str, "180")) {
+ props->rotation = DSI_PANEL_ROTATE_HV_FLIP;
+ } else if (!strcmp(str, "hflip")) {
+ props->rotation = DSI_PANEL_ROTATE_H_FLIP;
+ } else if (!strcmp(str, "vflip")) {
+ props->rotation = DSI_PANEL_ROTATE_V_FLIP;
+ } else {
+ pr_err("[%s] Unrecognized panel rotation-%s\n", name, str);
+ rc = -EINVAL;
+ goto error;
+ }
+error:
+ return rc;
+}
+const char *cmd_set_prop_map[DSI_CMD_SET_MAX] = {
+ "qcom,mdss-dsi-pre-on-command",
+ "qcom,mdss-dsi-on-command",
+ "qcom,mdss-dsi-post-panel-on-command",
+ "qcom,mdss-dsi-pre-off-command",
+ "qcom,mdss-dsi-off-command",
+ "qcom,mdss-dsi-post-off-command",
+ "qcom,mdss-dsi-pre-res-switch",
+ "qcom,mdss-dsi-res-switch",
+ "qcom,mdss-dsi-post-res-switch",
+ "qcom,cmd-to-video-mode-switch-commands",
+ "qcom,cmd-to-video-mode-post-switch-commands",
+ "qcom,video-to-cmd-mode-switch-commands",
+ "qcom,video-to-cmd-mode-post-switch-commands",
+ "qcom,mdss-dsi-panel-status-command",
+};
+
+const char *cmd_set_state_map[DSI_CMD_SET_MAX] = {
+ "qcom,mdss-dsi-pre-on-command-state",
+ "qcom,mdss-dsi-on-command-state",
+ "qcom,mdss-dsi-post-on-command-state",
+ "qcom,mdss-dsi-pre-off-command-state",
+ "qcom,mdss-dsi-off-command-state",
+ "qcom,mdss-dsi-post-off-command-state",
+ "qcom,mdss-dsi-pre-res-switch-state",
+ "qcom,mdss-dsi-res-switch-state",
+ "qcom,mdss-dsi-post-res-switch-state",
+ "qcom,cmd-to-video-mode-switch-commands-state",
+ "qcom,cmd-to-video-mode-post-switch-commands-state",
+ "qcom,video-to-cmd-mode-switch-commands-state",
+ "qcom,video-to-cmd-mode-post-switch-commands-state",
+ "qcom,mdss-dsi-panel-status-command-state",
+};
+
+static int dsi_panel_get_cmd_pkt_count(const char *data, u32 length, u32 *cnt)
+{
+ const u32 cmd_set_min_size = 7;
+ u32 count = 0;
+ u32 packet_length;
+ u32 tmp;
+
+ while (length >= cmd_set_min_size) {
+ packet_length = cmd_set_min_size;
+ tmp = ((data[5] << 8) | (data[6]));
+ packet_length += tmp;
+ if (packet_length > length) {
+ pr_err("FORMAT ERROR\n");
+ return -EINVAL;
+ }
+ length -= packet_length;
+ data += packet_length;
+ count++;
+ };
+
+ *cnt = count;
+ return 0;
+}
+
+static int dsi_panel_create_cmd_packets(const char *data,
+ u32 length,
+ u32 count,
+ struct dsi_cmd_desc *cmd)
+{
+ int rc = 0;
+ int i, j;
+ u8 *payload;
+
+ for (i = 0; i < count; i++) {
+ u32 size;
+
+ cmd[i].msg.type = data[0];
+ cmd[i].last_command = (data[1] == 1 ? true : false);
+ cmd[i].msg.channel = data[2];
+ cmd[i].msg.flags |= (data[3] == 1 ? MIPI_DSI_MSG_REQ_ACK : 0);
+ cmd[i].post_wait_ms = data[4];
+ cmd[i].msg.tx_len = ((data[5] << 8) | (data[6]));
+
+ size = cmd[i].msg.tx_len * sizeof(u8);
+
+ payload = kzalloc(size, GFP_KERNEL);
+ if (!payload) {
+ rc = -ENOMEM;
+ goto error_free_payloads;
+ }
+
+ for (j = 0; j < cmd[i].msg.tx_len; j++)
+ payload[j] = data[7 + j];
+
+ cmd[i].msg.tx_buf = payload;
+ data += (7 + cmd[i].msg.tx_len);
+ }
+
+ return rc;
+error_free_payloads:
+ for (i = i - 1; i >= 0; i--) {
+ cmd--;
+ kfree(cmd->msg.tx_buf);
+ }
+
+ return rc;
+}
+
+static void dsi_panel_destroy_cmd_packets(struct dsi_panel_cmd_set *set)
+{
+ u32 i = 0;
+ struct dsi_cmd_desc *cmd;
+
+ for (i = 0; i < set->count; i++) {
+ cmd = &set->cmds[i];
+ kfree(cmd->msg.tx_buf);
+ }
+
+ kfree(set->cmds);
+}
+
+static int dsi_panel_parse_cmd_sets_sub(struct dsi_panel_cmd_set *cmd,
+ enum dsi_cmd_set_type type,
+ struct device_node *of_node)
+{
+ int rc = 0;
+ u32 length = 0;
+ u32 size;
+ const char *data;
+ const char *state;
+ u32 packet_count = 0;
+
+ data = of_get_property(of_node, cmd_set_prop_map[type], &length);
+ if (!data) {
+ pr_err("%s commands not defined\n", cmd_set_prop_map[type]);
+ rc = -ENOTSUPP;
+ goto error;
+ }
+
+ rc = dsi_panel_get_cmd_pkt_count(data, length, &packet_count);
+ if (rc) {
+ pr_err("commands failed, rc=%d\n", rc);
+ goto error;
+ }
+ pr_debug("[%s] packet-count=%d, %d\n", cmd_set_prop_map[type],
+ packet_count, length);
+
+ size = packet_count * sizeof(*cmd->cmds);
+ cmd->cmds = kzalloc(size, GFP_KERNEL);
+ if (!cmd->cmds) {
+ rc = -ENOMEM;
+ goto error;
+ }
+ cmd->count = packet_count;
+
+ rc = dsi_panel_create_cmd_packets(data, length, packet_count,
+ cmd->cmds);
+ if (rc) {
+ pr_err("Failed to create cmd packets, rc=%d\n", rc);
+ goto error_free_mem;
+ }
+
+ state = of_get_property(of_node, cmd_set_state_map[type], NULL);
+ if (!state || !strcmp(state, "dsi_lp_mode")) {
+ cmd->state = DSI_CMD_SET_STATE_LP;
+ } else if (!strcmp(state, "dsi_hs_mode")) {
+ cmd->state = DSI_CMD_SET_STATE_HS;
+ } else {
+ pr_err("[%s] Command state unrecognized-%s\n",
+ cmd_set_state_map[type], state);
+ goto error_free_mem;
+ }
+
+ return rc;
+error_free_mem:
+ kfree(cmd->cmds);
+ cmd->cmds = NULL;
+error:
+ return rc;
+
+}
+
+static int dsi_panel_parse_cmd_sets(struct dsi_panel *panel,
+ struct device_node *of_node)
+{
+ int rc = 0;
+ struct dsi_panel_cmd_set *set;
+ u32 i;
+
+ for (i = DSI_CMD_SET_PRE_ON; i < DSI_CMD_SET_MAX; i++) {
+ set = &panel->cmd_sets[i];
+ set->type = i;
+ rc = dsi_panel_parse_cmd_sets_sub(set, i, of_node);
+ if (rc)
+ pr_err("[%s] failed to parse set %d\n", panel->name, i);
+ }
+
+ rc = 0;
+ return rc;
+}
+
+static int dsi_panel_parse_reset_sequence(struct dsi_panel *panel,
+ struct device_node *of_node)
+{
+ int rc = 0;
+ int i;
+ u32 length = 0;
+ u32 count = 0;
+ u32 size = 0;
+ u32 *arr_32 = NULL;
+ const u32 *arr;
+ struct dsi_reset_seq *seq;
+
+ arr = of_get_property(of_node, "qcom,mdss-dsi-reset-sequence", &length);
+ if (!arr) {
+ pr_err("[%s] dsi-reset-sequence not found\n", panel->name);
+ rc = -EINVAL;
+ goto error;
+ }
+ if (length & 0x1) {
+ pr_err("[%s] syntax error for dsi-reset-sequence\n",
+ panel->name);
+ rc = -EINVAL;
+ goto error;
+ }
+
+ pr_err("RESET SEQ LENGTH = %d\n", length);
+ length = length / sizeof(u32);
+
+ size = length * sizeof(u32);
+
+ arr_32 = kzalloc(size, GFP_KERNEL);
+ if (!arr_32) {
+ rc = -ENOMEM;
+ goto error;
+ }
+
+ rc = of_property_read_u32_array(of_node, "qcom,mdss-dsi-reset-sequence",
+ arr_32, length);
+ if (rc) {
+ pr_err("[%s] cannot read dso-reset-seqience\n", panel->name);
+ goto error_free_arr_32;
+ }
+
+ count = length / 2;
+ size = count * sizeof(*seq);
+ seq = kzalloc(size, GFP_KERNEL);
+ if (!seq) {
+ rc = -ENOMEM;
+ goto error_free_arr_32;
+ }
+
+ panel->reset_config.sequence = seq;
+ panel->reset_config.count = count;
+
+ for (i = 0; i < length; i += 2) {
+ seq->level = arr_32[i];
+ seq->sleep_ms = arr_32[i + 1];
+ seq++;
+ }
+
+
+error_free_arr_32:
+ kfree(arr_32);
+error:
+ return rc;
+}
+
+static int dsi_panel_parse_power_cfg(struct device *parent,
+ struct dsi_panel *panel,
+ struct device_node *of_node)
+{
+ int rc = 0;
+
+ rc = dsi_clk_pwr_of_get_vreg_data(of_node,
+ &panel->power_info,
+ "qcom,panel-supply-entries");
+ if (rc) {
+ pr_err("[%s] failed to parse vregs\n", panel->name);
+ goto error;
+ }
+
+error:
+ return rc;
+}
+
+static int dsi_panel_parse_gpios(struct dsi_panel *panel,
+ struct device_node *of_node)
+{
+ int rc = 0;
+
+ panel->reset_config.reset_gpio = of_get_named_gpio(of_node,
+ "qcom,platform-reset-gpio",
+ 0);
+ if (!gpio_is_valid(panel->reset_config.reset_gpio)) {
+ pr_err("[%s] failed get reset gpio, rc=%d\n", panel->name, rc);
+ rc = -EINVAL;
+ goto error;
+ }
+
+ panel->reset_config.disp_en_gpio = of_get_named_gpio(of_node,
+ "qcom,5v-boost-gpio",
+ 0);
+ if (!gpio_is_valid(panel->reset_config.disp_en_gpio)) {
+ pr_debug("[%s] 5v-boot-gpio is not set, rc=%d\n",
+ panel->name, rc);
+ panel->reset_config.disp_en_gpio = of_get_named_gpio(of_node,
+ "qcom,platform-en-gpio",
+ 0);
+ if (!gpio_is_valid(panel->reset_config.disp_en_gpio)) {
+ pr_debug("[%s] platform-en-gpio is not set, rc=%d\n",
+ panel->name, rc);
+ }
+ }
+
+ /* TODO: release memory */
+ rc = dsi_panel_parse_reset_sequence(panel, of_node);
+ if (rc) {
+ pr_err("[%s] failed to parse reset sequence, rc=%d\n",
+ panel->name, rc);
+ goto error;
+ }
+
+error:
+ return rc;
+}
+
+static int dsi_panel_parse_bl_pwm_config(struct dsi_backlight_config *config,
+ struct device_node *of_node)
+{
+ int rc = 0;
+ u32 val;
+
+ rc = of_property_read_u32(of_node, "qcom,dsi-bl-pmic-bank-select",
+ &val);
+ if (rc) {
+ pr_err("bl-pmic-bank-select is not defined, rc=%d\n", rc);
+ goto error;
+ }
+ config->pwm_pmic_bank = val;
+
+ rc = of_property_read_u32(of_node, "qcom,dsi-bl-pmic-pwm-frequency",
+ &val);
+ if (rc) {
+ pr_err("bl-pmic-bank-select is not defined, rc=%d\n", rc);
+ goto error;
+ }
+ config->pwm_period_usecs = val;
+
+ config->pwm_pmi_control = of_property_read_bool(of_node,
+ "qcom,mdss-dsi-bl-pwm-pmi");
+
+ config->pwm_gpio = of_get_named_gpio(of_node,
+ "qcom,mdss-dsi-pwm-gpio",
+ 0);
+ if (!gpio_is_valid(config->pwm_gpio)) {
+ pr_err("pwm gpio is invalid\n");
+ rc = -EINVAL;
+ goto error;
+ }
+
+error:
+ return rc;
+}
+
+static int dsi_panel_parse_bl_config(struct dsi_panel *panel,
+ struct device_node *of_node)
+{
+ int rc = 0;
+ const char *bl_type;
+ u32 val = 0;
+
+ bl_type = of_get_property(of_node,
+ "qcom,mdss-dsi-bl-pmic-control-type",
+ NULL);
+ if (!bl_type) {
+ panel->bl_config.type = DSI_BACKLIGHT_UNKNOWN;
+ } else if (!strcmp(bl_type, "bl_ctrl_pwm")) {
+ panel->bl_config.type = DSI_BACKLIGHT_PWM;
+ } else if (!strcmp(bl_type, "bl_ctrl_wled")) {
+ panel->bl_config.type = DSI_BACKLIGHT_WLED;
+ } else if (!strcmp(bl_type, "bl_ctrl_dcs")) {
+ panel->bl_config.type = DSI_BACKLIGHT_DCS;
+ } else {
+ pr_debug("[%s] bl-pmic-control-type unknown-%s\n",
+ panel->name, bl_type);
+ panel->bl_config.type = DSI_BACKLIGHT_UNKNOWN;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,mdss-dsi-bl-min-level", &val);
+ if (rc) {
+ pr_debug("[%s] bl-min-level unspecified, defaulting to zero\n",
+ panel->name);
+ panel->bl_config.bl_min_level = 0;
+ } else {
+ panel->bl_config.bl_min_level = val;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,mdss-dsi-bl-max-level", &val);
+ if (rc) {
+ pr_debug("[%s] bl-max-level unspecified, defaulting to max level\n",
+ panel->name);
+ panel->bl_config.bl_max_level = MAX_BL_LEVEL;
+ } else {
+ panel->bl_config.bl_max_level = val;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,mdss-brightness-max-level",
+ &val);
+ if (rc) {
+ pr_debug("[%s] brigheness-max-level unspecified, defaulting to 255\n",
+ panel->name);
+ panel->bl_config.brightness_max_level = 255;
+ } else {
+ panel->bl_config.brightness_max_level = val;
+ }
+
+ if (panel->bl_config.type == DSI_BACKLIGHT_PWM) {
+ rc = dsi_panel_parse_bl_pwm_config(&panel->bl_config, of_node);
+ if (rc) {
+ pr_err("[%s] failed to parse pwm config, rc=%d\n",
+ panel->name, rc);
+ goto error;
+ }
+ }
+
+ panel->bl_config.en_gpio = of_get_named_gpio(of_node,
+ "qcom,platform-bklight-en-gpio",
+ 0);
+ if (!gpio_is_valid(panel->bl_config.en_gpio)) {
+ pr_err("[%s] failed get bklt gpio, rc=%d\n", panel->name, rc);
+ rc = -EINVAL;
+ goto error;
+ }
+
+error:
+ return rc;
+}
+
+struct dsi_panel *dsi_panel_get(struct device *parent,
+ struct device_node *of_node)
+{
+ struct dsi_panel *panel;
+ int rc = 0;
+
+ panel = kzalloc(sizeof(*panel), GFP_KERNEL);
+ if (!panel)
+ return ERR_PTR(-ENOMEM);
+
+ panel->name = of_get_property(of_node, "qcom,mdss-dsi-panel-name",
+ NULL);
+ if (!panel->name)
+ panel->name = DSI_PANEL_DEFAULT_LABEL;
+
+ rc = dsi_panel_parse_timing(&panel->mode.timing, of_node);
+ if (rc) {
+ pr_err("failed to parse panel timing, rc=%d\n", rc);
+ goto error;
+ }
+
+ panel->mode.pixel_clk_khz = (DSI_H_TOTAL(&panel->mode.timing) *
+ DSI_V_TOTAL(&panel->mode.timing) *
+ panel->mode.timing.refresh_rate) / 1000;
+ rc = dsi_panel_parse_host_config(panel, of_node);
+ if (rc) {
+ pr_err("failed to parse host configuration, rc=%d\n", rc);
+ goto error;
+ }
+
+ rc = dsi_panel_parse_panel_mode(panel, of_node);
+ if (rc) {
+ pr_err("failed to parse panel mode configuration, rc=%d\n", rc);
+ goto error;
+ }
+
+ rc = dsi_panel_parse_dfps_caps(&panel->dfps_caps, of_node, panel->name);
+ if (rc)
+ pr_err("failed to parse dfps configuration, rc=%d\n", rc);
+
+ rc = dsi_panel_parse_phy_props(&panel->phy_props, of_node, panel->name);
+ if (rc) {
+ pr_err("failed to parse panel physical dimension, rc=%d\n", rc);
+ goto error;
+ }
+
+ rc = dsi_panel_parse_cmd_sets(panel, of_node);
+ if (rc) {
+ pr_err("failed to parse command sets, rc=%d\n", rc);
+ goto error;
+ }
+
+ rc = dsi_panel_parse_power_cfg(parent, panel, of_node);
+ if (rc)
+ pr_err("failed to parse power config, rc=%d\n", rc);
+
+ rc = dsi_panel_parse_gpios(panel, of_node);
+ if (rc)
+ pr_err("failed to parse panel gpios, rc=%d\n", rc);
+
+ rc = dsi_panel_parse_bl_config(panel, of_node);
+ if (rc)
+ pr_err("failed to parse backlight config, rc=%d\n", rc);
+
+ panel->panel_of_node = of_node;
+ drm_panel_init(&panel->drm_panel);
+ mutex_init(&panel->panel_lock);
+ panel->parent = parent;
+ return panel;
+error:
+ kfree(panel);
+ return ERR_PTR(rc);
+}
+
+void dsi_panel_put(struct dsi_panel *panel)
+{
+ u32 i;
+
+ for (i = 0; i < DSI_CMD_SET_MAX; i++)
+ dsi_panel_destroy_cmd_packets(&panel->cmd_sets[i]);
+
+ /* TODO: more free */
+ kfree(panel);
+}
+
+int dsi_panel_drv_init(struct dsi_panel *panel,
+ struct mipi_dsi_host *host)
+{
+ int rc = 0;
+ struct mipi_dsi_device *dev;
+
+ if (!panel || !host) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&panel->panel_lock);
+
+ dev = &panel->mipi_device;
+
+ dev->host = host;
+ /*
+ * We dont have device structure since panel is not a device node.
+ * When using drm panel framework, the device is probed when the host is
+ * create.
+ */
+ dev->channel = 0;
+ dev->lanes = 4;
+
+ panel->host = host;
+ rc = dsi_panel_vreg_get(panel);
+ if (rc) {
+ pr_err("[%s] Failed to get panel regulators, rc=%d\n",
+ panel->name, rc);
+ goto exit;
+ }
+
+ rc = dsi_panel_pinctrl_init(panel);
+ if (rc) {
+ pr_err("[%s] failed to init pinctrl, rc=%d\n", panel->name, rc);
+ goto error_vreg_put;
+ }
+
+ rc = dsi_panel_gpio_request(panel);
+ if (rc) {
+ pr_err("[%s] failed to request gpios, rc=%d\n", panel->name,
+ rc);
+ goto error_pinctrl_deinit;
+ }
+
+ rc = dsi_panel_bl_register(panel);
+ if (rc) {
+ if (rc != -EPROBE_DEFER)
+ pr_err("[%s] failed to register backlight, rc=%d\n",
+ panel->name, rc);
+ goto error_gpio_release;
+ }
+
+ goto exit;
+
+error_gpio_release:
+ (void)dsi_panel_gpio_release(panel);
+error_pinctrl_deinit:
+ (void)dsi_panel_pinctrl_deinit(panel);
+error_vreg_put:
+ (void)dsi_panel_vreg_put(panel);
+exit:
+ mutex_unlock(&panel->panel_lock);
+ return rc;
+}
+
+int dsi_panel_drv_deinit(struct dsi_panel *panel)
+{
+ int rc = 0;
+
+ if (!panel) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&panel->panel_lock);
+
+ rc = dsi_panel_bl_unregister(panel);
+ if (rc)
+ pr_err("[%s] failed to unregister backlight, rc=%d\n",
+ panel->name, rc);
+
+ rc = dsi_panel_gpio_release(panel);
+ if (rc)
+ pr_err("[%s] failed to release gpios, rc=%d\n", panel->name,
+ rc);
+
+ rc = dsi_panel_pinctrl_deinit(panel);
+ if (rc)
+ pr_err("[%s] failed to deinit gpios, rc=%d\n", panel->name,
+ rc);
+
+ rc = dsi_panel_vreg_put(panel);
+ if (rc)
+ pr_err("[%s] failed to put regs, rc=%d\n", panel->name, rc);
+
+ panel->host = NULL;
+ memset(&panel->mipi_device, 0x0, sizeof(panel->mipi_device));
+
+ mutex_unlock(&panel->panel_lock);
+ return rc;
+}
+
+int dsi_panel_validate_mode(struct dsi_panel *panel,
+ struct dsi_display_mode *mode)
+{
+ return 0;
+}
+
+int dsi_panel_get_mode_count(struct dsi_panel *panel, u32 *count)
+{
+ int rc = 0;
+
+ if (!panel || !count) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&panel->panel_lock);
+ /* TODO: DT format has not been decided for multiple modes. */
+ *count = 1;
+
+ mutex_unlock(&panel->panel_lock);
+ return rc;
+}
+
+int dsi_panel_get_phy_props(struct dsi_panel *panel,
+ struct dsi_panel_phy_props *phy_props)
+{
+ int rc = 0;
+
+ if (!panel || !phy_props) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&panel->panel_lock);
+
+ memcpy(phy_props, &panel->phy_props, sizeof(*phy_props));
+
+ mutex_unlock(&panel->panel_lock);
+ return rc;
+}
+
+int dsi_panel_get_dfps_caps(struct dsi_panel *panel,
+ struct dsi_dfps_capabilities *dfps_caps)
+{
+ int rc = 0;
+
+ if (!panel || !dfps_caps) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&panel->panel_lock);
+
+ memcpy(dfps_caps, &panel->dfps_caps, sizeof(*dfps_caps));
+
+ mutex_unlock(&panel->panel_lock);
+ return rc;
+}
+
+int dsi_panel_get_mode(struct dsi_panel *panel,
+ u32 index,
+ struct dsi_display_mode *mode)
+{
+ int rc = 0;
+
+ if (!panel || !mode) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&panel->panel_lock);
+ if (index != 0)
+ rc = -ENOTSUPP; /* TODO: Support more than one mode */
+ else
+ memcpy(mode, &panel->mode, sizeof(*mode));
+
+ mutex_unlock(&panel->panel_lock);
+ return rc;
+}
+
+int dsi_panel_get_host_cfg_for_mode(struct dsi_panel *panel,
+ struct dsi_display_mode *mode,
+ struct dsi_host_config *config)
+{
+ int rc = 0;
+
+ if (!panel || !mode || !config) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&panel->panel_lock);
+
+ config->panel_mode = panel->mode.panel_mode;
+ memcpy(&config->common_config, &panel->host_config,
+ sizeof(config->common_config));
+
+ if (mode->panel_mode == DSI_OP_VIDEO_MODE) {
+ memcpy(&config->u.video_engine, &panel->video_config,
+ sizeof(config->u.video_engine));
+ } else {
+ memcpy(&config->u.cmd_engine, &panel->cmd_config,
+ sizeof(config->u.cmd_engine));
+ }
+
+ memcpy(&config->video_timing, &mode->timing,
+ sizeof(config->video_timing));
+
+ config->esc_clk_rate_hz = 19200000;
+ mutex_unlock(&panel->panel_lock);
+ return rc;
+}
+
+int dsi_panel_pre_prepare(struct dsi_panel *panel)
+{
+ int rc = 0;
+
+ if (!panel) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&panel->panel_lock);
+
+ /* If LP11_INIT is set, panel will be powered up during prepare() */
+ if (panel->lp11_init)
+ goto error;
+
+ rc = dsi_panel_power_on(panel);
+ if (rc) {
+ pr_err("[%s] Panel power on failed, rc=%d\n", panel->name, rc);
+ goto error;
+ }
+
+error:
+ mutex_unlock(&panel->panel_lock);
+ return rc;
+}
+
+int dsi_panel_prepare(struct dsi_panel *panel)
+{
+ int rc = 0;
+
+ if (!panel) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&panel->panel_lock);
+
+ if (panel->lp11_init) {
+ rc = dsi_panel_power_on(panel);
+ if (rc) {
+ pr_err("[%s] panel power on failed, rc=%d\n",
+ panel->name, rc);
+ goto error;
+ }
+ }
+
+ rc = dsi_panel_tx_cmd_set(panel, DSI_CMD_SET_PRE_ON);
+ if (rc) {
+ pr_err("[%s] failed to send DSI_CMD_SET_PRE_ON cmds, rc=%d\n",
+ panel->name, rc);
+ goto error;
+ }
+
+error:
+ mutex_unlock(&panel->panel_lock);
+ return rc;
+}
+
+int dsi_panel_enable(struct dsi_panel *panel)
+{
+ int rc = 0;
+
+ if (!panel) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&panel->panel_lock);
+
+ rc = dsi_panel_tx_cmd_set(panel, DSI_CMD_SET_ON);
+ if (rc) {
+ pr_err("[%s] failed to send DSI_CMD_SET_ON cmds, rc=%d\n",
+ panel->name, rc);
+ }
+ mutex_unlock(&panel->panel_lock);
+ return rc;
+}
+
+int dsi_panel_post_enable(struct dsi_panel *panel)
+{
+ int rc = 0;
+
+ if (!panel) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&panel->panel_lock);
+
+ rc = dsi_panel_tx_cmd_set(panel, DSI_CMD_SET_POST_ON);
+ if (rc) {
+ pr_err("[%s] failed to send DSI_CMD_SET_POST_ON cmds, rc=%d\n",
+ panel->name, rc);
+ goto error;
+ }
+error:
+ mutex_unlock(&panel->panel_lock);
+ return rc;
+}
+
+int dsi_panel_pre_disable(struct dsi_panel *panel)
+{
+ int rc = 0;
+
+ if (!panel) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&panel->panel_lock);
+
+ rc = dsi_panel_tx_cmd_set(panel, DSI_CMD_SET_PRE_OFF);
+ if (rc) {
+ pr_err("[%s] failed to send DSI_CMD_SET_PRE_OFF cmds, rc=%d\n",
+ panel->name, rc);
+ goto error;
+ }
+
+error:
+ mutex_unlock(&panel->panel_lock);
+ return rc;
+}
+
+int dsi_panel_disable(struct dsi_panel *panel)
+{
+ int rc = 0;
+
+ if (!panel) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&panel->panel_lock);
+
+ rc = dsi_panel_tx_cmd_set(panel, DSI_CMD_SET_OFF);
+ if (rc) {
+ pr_err("[%s] failed to send DSI_CMD_SET_OFF cmds, rc=%d\n",
+ panel->name, rc);
+ goto error;
+ }
+error:
+ mutex_unlock(&panel->panel_lock);
+ return rc;
+}
+
+int dsi_panel_unprepare(struct dsi_panel *panel)
+{
+ int rc = 0;
+
+ if (!panel) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&panel->panel_lock);
+
+ rc = dsi_panel_tx_cmd_set(panel, DSI_CMD_SET_POST_OFF);
+ if (rc) {
+ pr_err("[%s] failed to send DSI_CMD_SET_POST_OFF cmds, rc=%d\n",
+ panel->name, rc);
+ goto error;
+ }
+
+ if (panel->lp11_init) {
+ rc = dsi_panel_power_off(panel);
+ if (rc) {
+ pr_err("[%s] panel power_Off failed, rc=%d\n",
+ panel->name, rc);
+ goto error;
+ }
+ }
+error:
+ mutex_unlock(&panel->panel_lock);
+ return rc;
+}
+
+int dsi_panel_post_unprepare(struct dsi_panel *panel)
+{
+ int rc = 0;
+
+ if (!panel) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&panel->panel_lock);
+
+ if (!panel->lp11_init) {
+ rc = dsi_panel_power_off(panel);
+ if (rc) {
+ pr_err("[%s] panel power_Off failed, rc=%d\n",
+ panel->name, rc);
+ goto error;
+ }
+ }
+error:
+ mutex_unlock(&panel->panel_lock);
+ return rc;
+}
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
new file mode 100644
index 000000000000..4d21a4cf6428
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
@@ -0,0 +1,203 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DSI_PANEL_H_
+#define _DSI_PANEL_H_
+
+#include <linux/of_device.h>
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <linux/leds.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_mipi_dsi.h>
+
+#include "dsi_defs.h"
+#include "dsi_ctrl_hw.h"
+#include "dsi_clk_pwr.h"
+
+#define MAX_BL_LEVEL 4096
+
+enum dsi_panel_rotation {
+ DSI_PANEL_ROTATE_NONE = 0,
+ DSI_PANEL_ROTATE_HV_FLIP,
+ DSI_PANEL_ROTATE_H_FLIP,
+ DSI_PANEL_ROTATE_V_FLIP
+};
+
+enum dsi_cmd_set_type {
+ DSI_CMD_SET_PRE_ON = 0,
+ DSI_CMD_SET_ON,
+ DSI_CMD_SET_POST_ON,
+ DSI_CMD_SET_PRE_OFF,
+ DSI_CMD_SET_OFF,
+ DSI_CMD_SET_POST_OFF,
+ DSI_CMD_SET_PRE_RES_SWITCH,
+ DSI_CMD_SET_RES_SWITCH,
+ DSI_CMD_SET_POST_RES_SWITCH,
+ DSI_CMD_SET_CMD_TO_VID_SWITCH,
+ DSI_CMD_SET_POST_CMD_TO_VID_SWITCH,
+ DSI_CMD_SET_VID_TO_CMD_SWITCH,
+ DSI_CMD_SET_POST_VID_TO_CMD_SWITCH,
+ DSI_CMD_SET_PANEL_STATUS,
+ DSI_CMD_SET_MAX
+};
+
+enum dsi_cmd_set_state {
+ DSI_CMD_SET_STATE_LP = 0,
+ DSI_CMD_SET_STATE_HS,
+ DSI_CMD_SET_STATE_MAX
+};
+
+enum dsi_backlight_type {
+ DSI_BACKLIGHT_PWM = 0,
+ DSI_BACKLIGHT_WLED,
+ DSI_BACKLIGHT_DCS,
+ DSI_BACKLIGHT_UNKNOWN,
+ DSI_BACKLIGHT_MAX,
+};
+
+struct dsi_dfps_capabilities {
+ bool dfps_support;
+ enum dsi_dfps_type type;
+ u32 min_refresh_rate;
+ u32 max_refresh_rate;
+};
+
+struct dsi_pinctrl_info {
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *active;
+ struct pinctrl_state *suspend;
+};
+
+struct dsi_panel_phy_props {
+ u32 panel_width_mm;
+ u32 panel_height_mm;
+ enum dsi_panel_rotation rotation;
+};
+
+struct dsi_cmd_desc {
+ struct mipi_dsi_msg msg;
+ bool last_command;
+ u32 post_wait_ms;
+};
+
+struct dsi_panel_cmd_set {
+ enum dsi_cmd_set_type type;
+ enum dsi_cmd_set_state state;
+ u32 count;
+ struct dsi_cmd_desc *cmds;
+};
+
+struct dsi_backlight_config {
+ enum dsi_backlight_type type;
+
+ u32 bl_min_level;
+ u32 bl_max_level;
+ u32 brightness_max_level;
+
+ int en_gpio;
+ /* PWM params */
+ bool pwm_pmi_control;
+ u32 pwm_pmic_bank;
+ u32 pwm_period_usecs;
+ int pwm_gpio;
+
+ /* WLED params */
+ struct led_trigger *wled;
+ struct backlight_device *bd;
+};
+
+struct dsi_reset_seq {
+ u32 level;
+ u32 sleep_ms;
+};
+
+struct dsi_panel_reset_config {
+ struct dsi_reset_seq *sequence;
+ u32 count;
+
+ int reset_gpio;
+ int disp_en_gpio;
+};
+
+struct dsi_panel {
+ const char *name;
+ struct device_node *panel_of_node;
+ struct mipi_dsi_device mipi_device;
+
+ struct mutex panel_lock;
+ struct drm_panel drm_panel;
+ struct mipi_dsi_host *host;
+ struct device *parent;
+
+ struct dsi_host_common_cfg host_config;
+ struct dsi_video_engine_cfg video_config;
+ struct dsi_cmd_engine_cfg cmd_config;
+
+ struct dsi_dfps_capabilities dfps_caps;
+
+ struct dsi_panel_cmd_set cmd_sets[DSI_CMD_SET_MAX];
+ struct dsi_panel_phy_props phy_props;
+
+ struct dsi_regulator_info power_info;
+ struct dsi_display_mode mode;
+
+ struct dsi_backlight_config bl_config;
+ struct dsi_panel_reset_config reset_config;
+ struct dsi_pinctrl_info pinctrl;
+
+ bool lp11_init;
+};
+
+struct dsi_panel *dsi_panel_get(struct device *parent,
+ struct device_node *of_node);
+void dsi_panel_put(struct dsi_panel *panel);
+
+int dsi_panel_drv_init(struct dsi_panel *panel, struct mipi_dsi_host *host);
+int dsi_panel_drv_deinit(struct dsi_panel *panel);
+
+int dsi_panel_get_mode_count(struct dsi_panel *panel, u32 *count);
+int dsi_panel_get_mode(struct dsi_panel *panel,
+ u32 index,
+ struct dsi_display_mode *mode);
+int dsi_panel_validate_mode(struct dsi_panel *panel,
+ struct dsi_display_mode *mode);
+int dsi_panel_get_host_cfg_for_mode(struct dsi_panel *panel,
+ struct dsi_display_mode *mode,
+ struct dsi_host_config *config);
+
+int dsi_panel_get_phy_props(struct dsi_panel *panel,
+ struct dsi_panel_phy_props *phy_props);
+int dsi_panel_get_dfps_caps(struct dsi_panel *panel,
+ struct dsi_dfps_capabilities *dfps_caps);
+
+int dsi_panel_pre_prepare(struct dsi_panel *panel);
+
+int dsi_panel_prepare(struct dsi_panel *panel);
+
+int dsi_panel_enable(struct dsi_panel *panel);
+
+int dsi_panel_post_enable(struct dsi_panel *panel);
+
+int dsi_panel_pre_disable(struct dsi_panel *panel);
+
+int dsi_panel_disable(struct dsi_panel *panel);
+
+int dsi_panel_unprepare(struct dsi_panel *panel);
+
+int dsi_panel_post_unprepare(struct dsi_panel *panel);
+
+int dsi_panel_set_backlight(struct dsi_panel *panel, u32 bl_lvl);
+#endif /* _DSI_PANEL_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c
new file mode 100644
index 000000000000..1ccbbe7df573
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c
@@ -0,0 +1,859 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "msm-dsi-phy:[%s] " fmt, __func__
+
+#include <linux/of_device.h>
+#include <linux/err.h>
+#include <linux/regulator/consumer.h>
+#include <linux/clk.h>
+#include <linux/msm-bus.h>
+#include <linux/list.h>
+
+#include "msm_drv.h"
+#include "msm_kms.h"
+#include "msm_gpu.h"
+#include "dsi_phy.h"
+#include "dsi_phy_hw.h"
+#include "dsi_clk_pwr.h"
+#include "dsi_catalog.h"
+
+#define DSI_PHY_DEFAULT_LABEL "MDSS PHY CTRL"
+
+struct dsi_phy_list_item {
+ struct msm_dsi_phy *phy;
+ struct list_head list;
+};
+
+static LIST_HEAD(dsi_phy_list);
+static DEFINE_MUTEX(dsi_phy_list_lock);
+
+static const struct dsi_ver_spec_info dsi_phy_v1_0 = {
+ .version = DSI_PHY_VERSION_1_0,
+ .lane_cfg_count = 4,
+ .strength_cfg_count = 2,
+ .regulator_cfg_count = 1,
+ .timing_cfg_count = 8,
+};
+static const struct dsi_ver_spec_info dsi_phy_v2_0 = {
+ .version = DSI_PHY_VERSION_2_0,
+ .lane_cfg_count = 4,
+ .strength_cfg_count = 2,
+ .regulator_cfg_count = 1,
+ .timing_cfg_count = 8,
+};
+static const struct dsi_ver_spec_info dsi_phy_v3_0 = {
+ .version = DSI_PHY_VERSION_3_0,
+ .lane_cfg_count = 4,
+ .strength_cfg_count = 2,
+ .regulator_cfg_count = 1,
+ .timing_cfg_count = 8,
+};
+static const struct dsi_ver_spec_info dsi_phy_v4_0 = {
+ .version = DSI_PHY_VERSION_4_0,
+ .lane_cfg_count = 4,
+ .strength_cfg_count = 2,
+ .regulator_cfg_count = 1,
+ .timing_cfg_count = 8,
+};
+
+static const struct of_device_id msm_dsi_phy_of_match[] = {
+ { .compatible = "qcom,dsi-phy-v1.0",
+ .data = &dsi_phy_v1_0,},
+ { .compatible = "qcom,dsi-phy-v2.0",
+ .data = &dsi_phy_v2_0,},
+ { .compatible = "qcom,dsi-phy-v3.0",
+ .data = &dsi_phy_v3_0,},
+ { .compatible = "qcom,dsi-phy-v4.0",
+ .data = &dsi_phy_v4_0,},
+ {}
+};
+
+static int dsi_phy_regmap_init(struct platform_device *pdev,
+ struct msm_dsi_phy *phy)
+{
+ int rc = 0;
+ void __iomem *ptr;
+
+ ptr = msm_ioremap(pdev, "dsi_phy", phy->name);
+ if (IS_ERR(ptr)) {
+ rc = PTR_ERR(ptr);
+ return rc;
+ }
+
+ phy->hw.base = ptr;
+
+ pr_debug("[%s] map dsi_phy registers to %p\n", phy->name, phy->hw.base);
+
+ return rc;
+}
+
+static int dsi_phy_regmap_deinit(struct msm_dsi_phy *phy)
+{
+ pr_debug("[%s] unmap registers\n", phy->name);
+ return 0;
+}
+
+static int dsi_phy_clocks_deinit(struct msm_dsi_phy *phy)
+{
+ int rc = 0;
+ struct dsi_core_clk_info *core = &phy->clks.core_clks;
+
+ if (core->mdp_core_clk)
+ devm_clk_put(&phy->pdev->dev, core->mdp_core_clk);
+ if (core->iface_clk)
+ devm_clk_put(&phy->pdev->dev, core->iface_clk);
+ if (core->core_mmss_clk)
+ devm_clk_put(&phy->pdev->dev, core->core_mmss_clk);
+ if (core->bus_clk)
+ devm_clk_put(&phy->pdev->dev, core->bus_clk);
+
+ memset(core, 0x0, sizeof(*core));
+
+ return rc;
+}
+
+static int dsi_phy_clocks_init(struct platform_device *pdev,
+ struct msm_dsi_phy *phy)
+{
+ int rc = 0;
+ struct dsi_core_clk_info *core = &phy->clks.core_clks;
+
+ core->mdp_core_clk = devm_clk_get(&pdev->dev, "mdp_core_clk");
+ if (IS_ERR(core->mdp_core_clk)) {
+ rc = PTR_ERR(core->mdp_core_clk);
+ pr_err("failed to get mdp_core_clk, rc=%d\n", rc);
+ goto fail;
+ }
+
+ core->iface_clk = devm_clk_get(&pdev->dev, "iface_clk");
+ if (IS_ERR(core->iface_clk)) {
+ rc = PTR_ERR(core->iface_clk);
+ pr_err("failed to get iface_clk, rc=%d\n", rc);
+ goto fail;
+ }
+
+ core->core_mmss_clk = devm_clk_get(&pdev->dev, "core_mmss_clk");
+ if (IS_ERR(core->core_mmss_clk)) {
+ rc = PTR_ERR(core->core_mmss_clk);
+ pr_err("failed to get core_mmss_clk, rc=%d\n", rc);
+ goto fail;
+ }
+
+ core->bus_clk = devm_clk_get(&pdev->dev, "bus_clk");
+ if (IS_ERR(core->bus_clk)) {
+ rc = PTR_ERR(core->bus_clk);
+ pr_err("failed to get bus_clk, rc=%d\n", rc);
+ goto fail;
+ }
+
+ return rc;
+fail:
+ dsi_phy_clocks_deinit(phy);
+ return rc;
+}
+
+static int dsi_phy_supplies_init(struct platform_device *pdev,
+ struct msm_dsi_phy *phy)
+{
+ int rc = 0;
+ int i = 0;
+ struct dsi_regulator_info *regs;
+ struct regulator *vreg = NULL;
+
+ regs = &phy->pwr_info.digital;
+ regs->vregs = devm_kzalloc(&pdev->dev, sizeof(struct dsi_vreg),
+ GFP_KERNEL);
+ if (!regs->vregs)
+ goto error;
+
+ regs->count = 1;
+ snprintf(regs->vregs->vreg_name,
+ ARRAY_SIZE(regs->vregs[i].vreg_name),
+ "%s", "gdsc");
+
+ rc = dsi_clk_pwr_get_dt_vreg_data(&pdev->dev,
+ &phy->pwr_info.phy_pwr,
+ "qcom,phy-supply-entries");
+ if (rc) {
+ pr_err("failed to get host power supplies, rc = %d\n", rc);
+ goto error_digital;
+ }
+
+ regs = &phy->pwr_info.digital;
+ for (i = 0; i < regs->count; i++) {
+ vreg = devm_regulator_get(&pdev->dev, regs->vregs[i].vreg_name);
+ rc = PTR_RET(vreg);
+ if (rc) {
+ pr_err("failed to get %s regulator\n",
+ regs->vregs[i].vreg_name);
+ goto error_host_pwr;
+ }
+ regs->vregs[i].vreg = vreg;
+ }
+
+ regs = &phy->pwr_info.phy_pwr;
+ for (i = 0; i < regs->count; i++) {
+ vreg = devm_regulator_get(&pdev->dev, regs->vregs[i].vreg_name);
+ rc = PTR_RET(vreg);
+ if (rc) {
+ pr_err("failed to get %s regulator\n",
+ regs->vregs[i].vreg_name);
+ for (--i; i >= 0; i--)
+ devm_regulator_put(regs->vregs[i].vreg);
+ goto error_digital_put;
+ }
+ regs->vregs[i].vreg = vreg;
+ }
+
+ return rc;
+
+error_digital_put:
+ regs = &phy->pwr_info.digital;
+ for (i = 0; i < regs->count; i++)
+ devm_regulator_put(regs->vregs[i].vreg);
+error_host_pwr:
+ devm_kfree(&pdev->dev, phy->pwr_info.phy_pwr.vregs);
+ phy->pwr_info.phy_pwr.vregs = NULL;
+ phy->pwr_info.phy_pwr.count = 0;
+error_digital:
+ devm_kfree(&pdev->dev, phy->pwr_info.digital.vregs);
+ phy->pwr_info.digital.vregs = NULL;
+ phy->pwr_info.digital.count = 0;
+error:
+ return rc;
+}
+
+static int dsi_phy_supplies_deinit(struct msm_dsi_phy *phy)
+{
+ int i = 0;
+ int rc = 0;
+ struct dsi_regulator_info *regs;
+
+ regs = &phy->pwr_info.digital;
+ for (i = 0; i < regs->count; i++) {
+ if (!regs->vregs[i].vreg)
+ pr_err("vreg is NULL, should not reach here\n");
+ else
+ devm_regulator_put(regs->vregs[i].vreg);
+ }
+
+ regs = &phy->pwr_info.phy_pwr;
+ for (i = 0; i < regs->count; i++) {
+ if (!regs->vregs[i].vreg)
+ pr_err("vreg is NULL, should not reach here\n");
+ else
+ devm_regulator_put(regs->vregs[i].vreg);
+ }
+
+ if (phy->pwr_info.phy_pwr.vregs) {
+ devm_kfree(&phy->pdev->dev, phy->pwr_info.phy_pwr.vregs);
+ phy->pwr_info.phy_pwr.vregs = NULL;
+ phy->pwr_info.phy_pwr.count = 0;
+ }
+ if (phy->pwr_info.digital.vregs) {
+ devm_kfree(&phy->pdev->dev, phy->pwr_info.digital.vregs);
+ phy->pwr_info.digital.vregs = NULL;
+ phy->pwr_info.digital.count = 0;
+ }
+
+ return rc;
+}
+
+static int dsi_phy_parse_dt_per_lane_cfgs(struct platform_device *pdev,
+ struct dsi_phy_per_lane_cfgs *cfg,
+ char *property)
+{
+ int rc = 0, i = 0, j = 0;
+ const u8 *data;
+ u32 len = 0;
+
+ data = of_get_property(pdev->dev.of_node, property, &len);
+ if (!data) {
+ pr_err("Unable to read Phy %s settings\n", property);
+ return -EINVAL;
+ }
+
+ if (len != DSI_LANE_MAX * cfg->count_per_lane) {
+ pr_err("incorrect phy %s settings, exp=%d, act=%d\n",
+ property, (DSI_LANE_MAX * cfg->count_per_lane), len);
+ return -EINVAL;
+ }
+
+ for (i = DSI_LOGICAL_LANE_0; i < DSI_LANE_MAX; i++) {
+ for (j = 0; j < cfg->count_per_lane; j++) {
+ cfg->lane[i][j] = *data;
+ data++;
+ }
+ }
+
+ return rc;
+}
+
+static int dsi_phy_settings_init(struct platform_device *pdev,
+ struct msm_dsi_phy *phy)
+{
+ int rc = 0;
+ struct dsi_phy_per_lane_cfgs *lane = &phy->cfg.lanecfg;
+ struct dsi_phy_per_lane_cfgs *strength = &phy->cfg.strength;
+ struct dsi_phy_per_lane_cfgs *timing = &phy->cfg.timing;
+ struct dsi_phy_per_lane_cfgs *regs = &phy->cfg.regulators;
+
+ lane->count_per_lane = phy->ver_info->lane_cfg_count;
+ rc = dsi_phy_parse_dt_per_lane_cfgs(pdev, lane,
+ "qcom,platform-lane-config");
+ if (rc) {
+ pr_err("failed to parse lane cfgs, rc=%d\n", rc);
+ goto err;
+ }
+
+ strength->count_per_lane = phy->ver_info->strength_cfg_count;
+ rc = dsi_phy_parse_dt_per_lane_cfgs(pdev, strength,
+ "qcom,platform-strength-ctrl");
+ if (rc) {
+ pr_err("failed to parse lane cfgs, rc=%d\n", rc);
+ goto err;
+ }
+
+ regs->count_per_lane = phy->ver_info->regulator_cfg_count;
+ rc = dsi_phy_parse_dt_per_lane_cfgs(pdev, regs,
+ "qcom,platform-regulator-settings");
+ if (rc) {
+ pr_err("failed to parse lane cfgs, rc=%d\n", rc);
+ goto err;
+ }
+
+ /* Actual timing values are dependent on panel */
+ timing->count_per_lane = phy->ver_info->timing_cfg_count;
+
+err:
+ lane->count_per_lane = 0;
+ strength->count_per_lane = 0;
+ regs->count_per_lane = 0;
+ timing->count_per_lane = 0;
+ return rc;
+}
+
+static int dsi_phy_settings_deinit(struct msm_dsi_phy *phy)
+{
+ memset(&phy->cfg.lanecfg, 0x0, sizeof(phy->cfg.lanecfg));
+ memset(&phy->cfg.strength, 0x0, sizeof(phy->cfg.strength));
+ memset(&phy->cfg.timing, 0x0, sizeof(phy->cfg.timing));
+ memset(&phy->cfg.regulators, 0x0, sizeof(phy->cfg.regulators));
+ return 0;
+}
+
+static int dsi_phy_driver_probe(struct platform_device *pdev)
+{
+ struct msm_dsi_phy *dsi_phy;
+ struct dsi_phy_list_item *item;
+ const struct of_device_id *id;
+ const struct dsi_ver_spec_info *ver_info;
+ int rc = 0;
+ u32 index = 0;
+
+ if (!pdev || !pdev->dev.of_node) {
+ pr_err("pdev not found\n");
+ return -ENODEV;
+ }
+
+ id = of_match_node(msm_dsi_phy_of_match, pdev->dev.of_node);
+ if (!id)
+ return -ENODEV;
+
+ ver_info = id->data;
+
+ item = devm_kzalloc(&pdev->dev, sizeof(*item), GFP_KERNEL);
+ if (!item)
+ return -ENOMEM;
+
+
+ dsi_phy = devm_kzalloc(&pdev->dev, sizeof(*dsi_phy), GFP_KERNEL);
+ if (!dsi_phy) {
+ devm_kfree(&pdev->dev, item);
+ return -ENOMEM;
+ }
+
+ rc = of_property_read_u32(pdev->dev.of_node, "cell-index", &index);
+ if (rc) {
+ pr_debug("cell index not set, default to 0\n");
+ index = 0;
+ }
+
+ dsi_phy->index = index;
+
+ dsi_phy->name = of_get_property(pdev->dev.of_node, "label", NULL);
+ if (!dsi_phy->name)
+ dsi_phy->name = DSI_PHY_DEFAULT_LABEL;
+
+ pr_debug("Probing %s device\n", dsi_phy->name);
+
+ rc = dsi_phy_regmap_init(pdev, dsi_phy);
+ if (rc) {
+ pr_err("Failed to parse register information, rc=%d\n", rc);
+ goto fail;
+ }
+
+ rc = dsi_phy_clocks_init(pdev, dsi_phy);
+ if (rc) {
+ pr_err("failed to parse clock information, rc = %d\n", rc);
+ goto fail_regmap;
+ }
+
+ rc = dsi_phy_supplies_init(pdev, dsi_phy);
+ if (rc) {
+ pr_err("failed to parse voltage supplies, rc = %d\n", rc);
+ goto fail_clks;
+ }
+
+ rc = dsi_catalog_phy_setup(&dsi_phy->hw, ver_info->version,
+ dsi_phy->index);
+ if (rc) {
+ pr_err("Catalog does not support version (%d)\n",
+ ver_info->version);
+ goto fail_supplies;
+ }
+
+ dsi_phy->ver_info = ver_info;
+ rc = dsi_phy_settings_init(pdev, dsi_phy);
+ if (rc) {
+ pr_err("Failed to parse phy setting, rc=%d\n", rc);
+ goto fail_supplies;
+ }
+
+ item->phy = dsi_phy;
+
+ mutex_lock(&dsi_phy_list_lock);
+ list_add(&item->list, &dsi_phy_list);
+ mutex_unlock(&dsi_phy_list_lock);
+
+ mutex_init(&dsi_phy->phy_lock);
+ /** TODO: initialize debugfs */
+ dsi_phy->pdev = pdev;
+ platform_set_drvdata(pdev, dsi_phy);
+ pr_debug("Probe successful for %s\n", dsi_phy->name);
+ return 0;
+
+fail_supplies:
+ (void)dsi_phy_supplies_deinit(dsi_phy);
+fail_clks:
+ (void)dsi_phy_clocks_deinit(dsi_phy);
+fail_regmap:
+ (void)dsi_phy_regmap_deinit(dsi_phy);
+fail:
+ devm_kfree(&pdev->dev, dsi_phy);
+ devm_kfree(&pdev->dev, item);
+ return rc;
+}
+
+static int dsi_phy_driver_remove(struct platform_device *pdev)
+{
+ int rc = 0;
+ struct msm_dsi_phy *phy = platform_get_drvdata(pdev);
+ struct list_head *pos, *tmp;
+
+ if (!pdev || !phy) {
+ pr_err("Invalid device\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&dsi_phy_list_lock);
+ list_for_each_safe(pos, tmp, &dsi_phy_list) {
+ struct dsi_phy_list_item *n;
+
+ n = list_entry(pos, struct dsi_phy_list_item, list);
+ if (n->phy == phy) {
+ list_del(&n->list);
+ devm_kfree(&pdev->dev, n);
+ break;
+ }
+ }
+ mutex_unlock(&dsi_phy_list_lock);
+
+ mutex_lock(&phy->phy_lock);
+ rc = dsi_phy_settings_deinit(phy);
+ if (rc)
+ pr_err("failed to deinitialize phy settings, rc=%d\n", rc);
+
+ rc = dsi_phy_supplies_deinit(phy);
+ if (rc)
+ pr_err("failed to deinitialize voltage supplies, rc=%d\n", rc);
+
+ rc = dsi_phy_clocks_deinit(phy);
+ if (rc)
+ pr_err("failed to deinitialize clocks, rc=%d\n", rc);
+
+ rc = dsi_phy_regmap_deinit(phy);
+ if (rc)
+ pr_err("failed to deinitialize regmap, rc=%d\n", rc);
+ mutex_unlock(&phy->phy_lock);
+
+ mutex_destroy(&phy->phy_lock);
+ devm_kfree(&pdev->dev, phy);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver dsi_phy_platform_driver = {
+ .probe = dsi_phy_driver_probe,
+ .remove = dsi_phy_driver_remove,
+ .driver = {
+ .name = "dsi_phy",
+ .of_match_table = msm_dsi_phy_of_match,
+ },
+};
+
+static void dsi_phy_enable_hw(struct msm_dsi_phy *phy)
+{
+ if (phy->hw.ops.regulator_enable)
+ phy->hw.ops.regulator_enable(&phy->hw, &phy->cfg.regulators);
+
+ if (phy->hw.ops.enable)
+ phy->hw.ops.enable(&phy->hw, &phy->cfg);
+}
+
+static void dsi_phy_disable_hw(struct msm_dsi_phy *phy)
+{
+ if (phy->hw.ops.disable)
+ phy->hw.ops.disable(&phy->hw);
+
+ if (phy->hw.ops.regulator_disable)
+ phy->hw.ops.regulator_disable(&phy->hw);
+}
+
+/**
+ * dsi_phy_get() - get a dsi phy handle from device node
+ * @of_node: device node for dsi phy controller
+ *
+ * Gets the DSI PHY handle for the corresponding of_node. The ref count is
+ * incremented to one all subsequents get will fail until the original client
+ * calls a put.
+ *
+ * Return: DSI PHY handle or an error code.
+ */
+struct msm_dsi_phy *dsi_phy_get(struct device_node *of_node)
+{
+ struct list_head *pos, *tmp;
+ struct msm_dsi_phy *phy = NULL;
+
+ mutex_lock(&dsi_phy_list_lock);
+ list_for_each_safe(pos, tmp, &dsi_phy_list) {
+ struct dsi_phy_list_item *n;
+
+ n = list_entry(pos, struct dsi_phy_list_item, list);
+ if (n->phy->pdev->dev.of_node == of_node) {
+ phy = n->phy;
+ break;
+ }
+ }
+ mutex_unlock(&dsi_phy_list_lock);
+
+ if (!phy) {
+ pr_err("Device with of node not found\n");
+ phy = ERR_PTR(-EPROBE_DEFER);
+ return phy;
+ }
+
+ mutex_lock(&phy->phy_lock);
+ if (phy->refcount > 0) {
+ pr_err("[PHY_%d] Device under use\n", phy->index);
+ phy = ERR_PTR(-EINVAL);
+ } else {
+ phy->refcount++;
+ }
+ mutex_unlock(&phy->phy_lock);
+ return phy;
+}
+
+/**
+ * dsi_phy_put() - release dsi phy handle
+ * @dsi_phy: DSI PHY handle.
+ *
+ * Release the DSI PHY hardware. Driver will clean up all resources and puts
+ * back the DSI PHY into reset state.
+ */
+void dsi_phy_put(struct msm_dsi_phy *dsi_phy)
+{
+ mutex_lock(&dsi_phy->phy_lock);
+
+ if (dsi_phy->refcount == 0)
+ pr_err("Unbalanced dsi_phy_put call\n");
+ else
+ dsi_phy->refcount--;
+
+ mutex_unlock(&dsi_phy->phy_lock);
+}
+
+/**
+ * dsi_phy_drv_init() - initialize dsi phy driver
+ * @dsi_phy: DSI PHY handle.
+ *
+ * Initializes DSI PHY driver. Should be called after dsi_phy_get().
+ *
+ * Return: error code.
+ */
+int dsi_phy_drv_init(struct msm_dsi_phy *dsi_phy)
+{
+ return 0;
+}
+
+/**
+ * dsi_phy_drv_deinit() - de-initialize dsi phy driver
+ * @dsi_phy: DSI PHY handle.
+ *
+ * Release all resources acquired by dsi_phy_drv_init().
+ *
+ * Return: error code.
+ */
+int dsi_phy_drv_deinit(struct msm_dsi_phy *dsi_phy)
+{
+ return 0;
+}
+
+/**
+ * dsi_phy_validate_mode() - validate a display mode
+ * @dsi_phy: DSI PHY handle.
+ * @mode: Mode information.
+ *
+ * Validation will fail if the mode cannot be supported by the PHY driver or
+ * hardware.
+ *
+ * Return: error code.
+ */
+int dsi_phy_validate_mode(struct msm_dsi_phy *dsi_phy,
+ struct dsi_mode_info *mode)
+{
+ int rc = 0;
+
+ if (!dsi_phy || !mode) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&dsi_phy->phy_lock);
+
+ pr_debug("[PHY_%d] Skipping validation\n", dsi_phy->index);
+
+ mutex_unlock(&dsi_phy->phy_lock);
+ return rc;
+}
+
+/**
+ * dsi_phy_set_power_state() - enable/disable dsi phy power supplies
+ * @dsi_phy: DSI PHY handle.
+ * @enable: Boolean flag to enable/disable.
+ *
+ * Return: error code.
+ */
+int dsi_phy_set_power_state(struct msm_dsi_phy *dsi_phy, bool enable)
+{
+ int rc = 0;
+
+ if (!dsi_phy) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&dsi_phy->phy_lock);
+
+ if (enable == dsi_phy->power_state) {
+ pr_err("[PHY_%d] No state change\n", dsi_phy->index);
+ goto error;
+ }
+
+ if (enable) {
+ rc = dsi_pwr_enable_regulator(&dsi_phy->pwr_info.digital, true);
+ if (rc) {
+ pr_err("failed to enable digital regulator\n");
+ goto error;
+ }
+ rc = dsi_pwr_enable_regulator(&dsi_phy->pwr_info.phy_pwr, true);
+ if (rc) {
+ pr_err("failed to enable phy power\n");
+ (void)dsi_pwr_enable_regulator(
+ &dsi_phy->pwr_info.digital,
+ false
+ );
+ goto error;
+ }
+ } else {
+ rc = dsi_pwr_enable_regulator(&dsi_phy->pwr_info.phy_pwr,
+ false);
+ if (rc) {
+ pr_err("failed to enable digital regulator\n");
+ goto error;
+ }
+ rc = dsi_pwr_enable_regulator(&dsi_phy->pwr_info.digital,
+ false);
+ if (rc) {
+ pr_err("failed to enable phy power\n");
+ goto error;
+ }
+ }
+
+ dsi_phy->power_state = enable;
+error:
+ mutex_unlock(&dsi_phy->phy_lock);
+ return rc;
+}
+
+/**
+ * dsi_phy_enable() - enable DSI PHY hardware
+ * @dsi_phy: DSI PHY handle.
+ * @config: DSI host configuration.
+ * @pll_source: Source PLL for PHY clock.
+ * @skip_validation: Validation will not be performed on parameters.
+ *
+ * Validates and enables DSI PHY.
+ *
+ * Return: error code.
+ */
+int dsi_phy_enable(struct msm_dsi_phy *phy,
+ struct dsi_host_config *config,
+ enum dsi_phy_pll_source pll_source,
+ bool skip_validation)
+{
+ int rc = 0;
+
+ if (!phy || !config) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&phy->phy_lock);
+
+ if (!skip_validation)
+ pr_debug("[PHY_%d] TODO: perform validation\n", phy->index);
+
+ rc = dsi_clk_enable_core_clks(&phy->clks.core_clks, true);
+ if (rc) {
+ pr_err("failed to enable core clocks, rc=%d\n", rc);
+ goto error;
+ }
+
+ memcpy(&phy->mode, &config->video_timing, sizeof(phy->mode));
+ phy->data_lanes = config->common_config.data_lanes;
+ phy->dst_format = config->common_config.dst_format;
+ phy->lane_map = config->lane_map;
+ phy->cfg.pll_source = pll_source;
+
+ rc = phy->hw.ops.calculate_timing_params(&phy->hw,
+ &phy->mode,
+ &config->common_config,
+ &phy->cfg.timing);
+ if (rc) {
+ pr_err("[%s] failed to set timing, rc=%d\n", phy->name, rc);
+ goto error_disable_clks;
+ }
+
+ dsi_phy_enable_hw(phy);
+
+error_disable_clks:
+ rc = dsi_clk_enable_core_clks(&phy->clks.core_clks, false);
+ if (rc) {
+ pr_err("failed to disable clocks, skip phy disable\n");
+ goto error;
+ }
+error:
+ mutex_unlock(&phy->phy_lock);
+ return rc;
+}
+
+/**
+ * dsi_phy_disable() - disable DSI PHY hardware.
+ * @phy: DSI PHY handle.
+ *
+ * Return: error code.
+ */
+int dsi_phy_disable(struct msm_dsi_phy *phy)
+{
+ int rc = 0;
+
+ if (!phy) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&phy->phy_lock);
+
+ rc = dsi_clk_enable_core_clks(&phy->clks.core_clks, true);
+ if (rc) {
+ pr_err("failed to enable core clocks, rc=%d\n", rc);
+ goto error;
+ }
+
+ dsi_phy_disable_hw(phy);
+
+ rc = dsi_clk_enable_core_clks(&phy->clks.core_clks, false);
+ if (rc) {
+ pr_err("failed to disable core clocks, rc=%d\n", rc);
+ goto error;
+ }
+
+error:
+ mutex_unlock(&phy->phy_lock);
+ return rc;
+}
+
+/**
+ * dsi_phy_set_timing_params() - timing parameters for the panel
+ * @phy: DSI PHY handle
+ * @timing: array holding timing params.
+ * @size: size of the array.
+ *
+ * When PHY timing calculator is not implemented, this array will be used to
+ * pass PHY timing information.
+ *
+ * Return: error code.
+ */
+int dsi_phy_set_timing_params(struct msm_dsi_phy *phy,
+ u8 *timing, u32 size)
+{
+ int rc = 0;
+ int i, j;
+ struct dsi_phy_per_lane_cfgs *timing_cfg;
+
+ if (!phy || !timing || !size) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&phy->phy_lock);
+
+ if (size != (DSI_LANE_MAX * phy->cfg.timing.count_per_lane)) {
+ pr_err("Unexpected timing array size %d\n", size);
+ rc = -EINVAL;
+ } else {
+ timing_cfg = &phy->cfg.timing;
+ for (i = DSI_LOGICAL_LANE_0; i < DSI_LANE_MAX; i++) {
+ for (j = 0; j < timing_cfg->count_per_lane; j++) {
+ timing_cfg->lane[i][j] = *timing;
+ timing++;
+ }
+ }
+ }
+ mutex_unlock(&phy->phy_lock);
+ return rc;
+}
+
+void dsi_phy_drv_register(void)
+{
+ platform_driver_register(&dsi_phy_platform_driver);
+}
+
+void dsi_phy_drv_unregister(void)
+{
+ platform_driver_unregister(&dsi_phy_platform_driver);
+}
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.h b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.h
new file mode 100644
index 000000000000..6c31bfa3ea00
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.h
@@ -0,0 +1,196 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DSI_PHY_H_
+#define _DSI_PHY_H_
+
+#include "dsi_defs.h"
+#include "dsi_clk_pwr.h"
+#include "dsi_phy_hw.h"
+
+struct dsi_ver_spec_info {
+ enum dsi_phy_version version;
+ u32 lane_cfg_count;
+ u32 strength_cfg_count;
+ u32 regulator_cfg_count;
+ u32 timing_cfg_count;
+};
+
+/**
+ * struct dsi_phy_clk_info - clock information for DSI controller
+ * @core_clks: Core clocks needed to access PHY registers.
+ */
+struct dsi_phy_clk_info {
+ struct dsi_core_clk_info core_clks;
+};
+
+/**
+ * struct dsi_phy_power_info - digital and analog power supplies for DSI PHY
+ * @digital: Digital power supply for DSI PHY.
+ * @phy_pwr: Analog power supplies for DSI PHY to work.
+ */
+struct dsi_phy_power_info {
+ struct dsi_regulator_info digital;
+ struct dsi_regulator_info phy_pwr;
+};
+
+/**
+ * struct msm_dsi_phy - DSI PHY object
+ * @pdev: Pointer to platform device.
+ * @index: Instance id.
+ * @name: Name of the PHY instance.
+ * @refcount: Reference count.
+ * @phy_lock: Mutex for hardware and object access.
+ * @ver_info: Version specific phy parameters.
+ * @hw: DSI PHY hardware object.
+ * @cfg: DSI phy configuration.
+ * @power_state: True if PHY is powered on.
+ * @mode: Current mode.
+ * @data_lanes: Number of data lanes used.
+ * @dst_format: Destination format.
+ * @lane_map: Map between logical and physical lanes.
+ */
+struct msm_dsi_phy {
+ struct platform_device *pdev;
+ int index;
+ const char *name;
+ u32 refcount;
+ struct mutex phy_lock;
+
+ const struct dsi_ver_spec_info *ver_info;
+ struct dsi_phy_hw hw;
+
+ struct dsi_phy_clk_info clks;
+ struct dsi_phy_power_info pwr_info;
+
+ struct dsi_phy_cfg cfg;
+
+ bool power_state;
+ struct dsi_mode_info mode;
+ enum dsi_data_lanes data_lanes;
+ enum dsi_pixel_format dst_format;
+ struct dsi_lane_mapping lane_map;
+};
+
+/**
+ * dsi_phy_get() - get a dsi phy handle from device node
+ * @of_node: device node for dsi phy controller
+ *
+ * Gets the DSI PHY handle for the corresponding of_node. The ref count is
+ * incremented to one all subsequents get will fail until the original client
+ * calls a put.
+ *
+ * Return: DSI PHY handle or an error code.
+ */
+struct msm_dsi_phy *dsi_phy_get(struct device_node *of_node);
+
+/**
+ * dsi_phy_put() - release dsi phy handle
+ * @dsi_phy: DSI PHY handle.
+ *
+ * Release the DSI PHY hardware. Driver will clean up all resources and puts
+ * back the DSI PHY into reset state.
+ */
+void dsi_phy_put(struct msm_dsi_phy *dsi_phy);
+
+/**
+ * dsi_phy_drv_init() - initialize dsi phy driver
+ * @dsi_phy: DSI PHY handle.
+ *
+ * Initializes DSI PHY driver. Should be called after dsi_phy_get().
+ *
+ * Return: error code.
+ */
+int dsi_phy_drv_init(struct msm_dsi_phy *dsi_phy);
+
+/**
+ * dsi_phy_drv_deinit() - de-initialize dsi phy driver
+ * @dsi_phy: DSI PHY handle.
+ *
+ * Release all resources acquired by dsi_phy_drv_init().
+ *
+ * Return: error code.
+ */
+int dsi_phy_drv_deinit(struct msm_dsi_phy *dsi_phy);
+
+/**
+ * dsi_phy_validate_mode() - validate a display mode
+ * @dsi_phy: DSI PHY handle.
+ * @mode: Mode information.
+ *
+ * Validation will fail if the mode cannot be supported by the PHY driver or
+ * hardware.
+ *
+ * Return: error code.
+ */
+int dsi_phy_validate_mode(struct msm_dsi_phy *dsi_phy,
+ struct dsi_mode_info *mode);
+
+/**
+ * dsi_phy_set_power_state() - enable/disable dsi phy power supplies
+ * @dsi_phy: DSI PHY handle.
+ * @enable: Boolean flag to enable/disable.
+ *
+ * Return: error code.
+ */
+int dsi_phy_set_power_state(struct msm_dsi_phy *dsi_phy, bool enable);
+
+/**
+ * dsi_phy_enable() - enable DSI PHY hardware
+ * @dsi_phy: DSI PHY handle.
+ * @config: DSI host configuration.
+ * @pll_source: Source PLL for PHY clock.
+ * @skip_validation: Validation will not be performed on parameters.
+ *
+ * Validates and enables DSI PHY.
+ *
+ * Return: error code.
+ */
+int dsi_phy_enable(struct msm_dsi_phy *dsi_phy,
+ struct dsi_host_config *config,
+ enum dsi_phy_pll_source pll_source,
+ bool skip_validation);
+
+/**
+ * dsi_phy_disable() - disable DSI PHY hardware.
+ * @phy: DSI PHY handle.
+ *
+ * Return: error code.
+ */
+int dsi_phy_disable(struct msm_dsi_phy *phy);
+
+/**
+ * dsi_phy_set_timing_params() - timing parameters for the panel
+ * @phy: DSI PHY handle
+ * @timing: array holding timing params.
+ * @size: size of the array.
+ *
+ * When PHY timing calculator is not implemented, this array will be used to
+ * pass PHY timing information.
+ *
+ * Return: error code.
+ */
+int dsi_phy_set_timing_params(struct msm_dsi_phy *phy,
+ u8 *timing, u32 size);
+
+/**
+ * dsi_phy_drv_register() - register platform driver for dsi phy
+ */
+void dsi_phy_drv_register(void);
+
+/**
+ * dsi_phy_drv_unregister() - unregister platform driver
+ */
+void dsi_phy_drv_unregister(void);
+
+#endif /* _DSI_PHY_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h
new file mode 100644
index 000000000000..5edfd5e62738
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h
@@ -0,0 +1,164 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DSI_PHY_HW_H_
+#define _DSI_PHY_HW_H_
+
+#include "dsi_defs.h"
+
+#define DSI_MAX_SETTINGS 8
+
+/**
+ * enum dsi_phy_version - DSI PHY version enumeration
+ * @DSI_PHY_VERSION_UNKNOWN: Unknown version.
+ * @DSI_PHY_VERSION_1_0: 28nm-HPM.
+ * @DSI_PHY_VERSION_2_0: 28nm-LPM.
+ * @DSI_PHY_VERSION_3_0: 20nm.
+ * @DSI_PHY_VERSION_4_0: 14nm.
+ * @DSI_PHY_VERSION_MAX:
+ */
+enum dsi_phy_version {
+ DSI_PHY_VERSION_UNKNOWN,
+ DSI_PHY_VERSION_1_0, /* 28nm-HPM */
+ DSI_PHY_VERSION_2_0, /* 28nm-LPM */
+ DSI_PHY_VERSION_3_0, /* 20nm */
+ DSI_PHY_VERSION_4_0, /* 14nm */
+ DSI_PHY_VERSION_MAX
+};
+
+/**
+ * enum dsi_phy_hw_features - features supported by DSI PHY hardware
+ * @DSI_PHY_DPHY: Supports DPHY
+ * @DSI_PHY_CPHY: Supports CPHY
+ */
+enum dsi_phy_hw_features {
+ DSI_PHY_DPHY,
+ DSI_PHY_CPHY,
+ DSI_PHY_MAX_FEATURES
+};
+
+/**
+ * enum dsi_phy_pll_source - pll clock source for PHY.
+ * @DSI_PLL_SOURCE_STANDALONE: Clock is sourced from native PLL and is not
+ * shared by other PHYs.
+ * @DSI_PLL_SOURCE_NATIVE: Clock is sourced from native PLL and is
+ * shared by other PHYs.
+ * @DSI_PLL_SOURCE_NON_NATIVE: Clock is sourced from other PHYs.
+ * @DSI_PLL_SOURCE_MAX:
+ */
+enum dsi_phy_pll_source {
+ DSI_PLL_SOURCE_STANDALONE = 0,
+ DSI_PLL_SOURCE_NATIVE,
+ DSI_PLL_SOURCE_NON_NATIVE,
+ DSI_PLL_SOURCE_MAX
+};
+
+/**
+ * struct dsi_phy_per_lane_cfgs - Holds register values for PHY parameters
+ * @lane: A set of maximum 8 values for each lane.
+ * @count_per_lane: Number of values per each lane.
+ */
+struct dsi_phy_per_lane_cfgs {
+ u8 lane[DSI_LANE_MAX][DSI_MAX_SETTINGS];
+ u32 count_per_lane;
+};
+
+/**
+ * struct dsi_phy_cfg - DSI PHY configuration
+ * @lanecfg: Lane configuration settings.
+ * @strength: Strength settings for lanes.
+ * @timing: Timing parameters for lanes.
+ * @regulators: Regulator settings for lanes.
+ * @pll_source: PLL source.
+ */
+struct dsi_phy_cfg {
+ struct dsi_phy_per_lane_cfgs lanecfg;
+ struct dsi_phy_per_lane_cfgs strength;
+ struct dsi_phy_per_lane_cfgs timing;
+ struct dsi_phy_per_lane_cfgs regulators;
+ enum dsi_phy_pll_source pll_source;
+};
+
+struct dsi_phy_hw;
+
+/**
+ * struct dsi_phy_hw_ops - Operations for DSI PHY hardware.
+ * @regulator_enable: Enable PHY regulators.
+ * @regulator_disable: Disable PHY regulators.
+ * @enable: Enable PHY.
+ * @disable: Disable PHY.
+ * @calculate_timing_params: Calculate PHY timing params from mode information
+ */
+struct dsi_phy_hw_ops {
+ /**
+ * regulator_enable() - enable regulators for DSI PHY
+ * @phy: Pointer to DSI PHY hardware object.
+ * @reg_cfg: Regulator configuration for all DSI lanes.
+ */
+ void (*regulator_enable)(struct dsi_phy_hw *phy,
+ struct dsi_phy_per_lane_cfgs *reg_cfg);
+
+ /**
+ * regulator_disable() - disable regulators
+ * @phy: Pointer to DSI PHY hardware object.
+ */
+ void (*regulator_disable)(struct dsi_phy_hw *phy);
+
+ /**
+ * enable() - Enable PHY hardware
+ * @phy: Pointer to DSI PHY hardware object.
+ * @cfg: Per lane configurations for timing, strength and lane
+ * configurations.
+ */
+ void (*enable)(struct dsi_phy_hw *phy, struct dsi_phy_cfg *cfg);
+
+ /**
+ * disable() - Disable PHY hardware
+ * @phy: Pointer to DSI PHY hardware object.
+ */
+ void (*disable)(struct dsi_phy_hw *phy);
+
+ /**
+ * calculate_timing_params() - calculates timing parameters.
+ * @phy: Pointer to DSI PHY hardware object.
+ * @mode: Mode information for which timing has to be calculated.
+ * @config: DSI host configuration for this mode.
+ * @timing: Timing parameters for each lane which will be returned.
+ */
+ int (*calculate_timing_params)(struct dsi_phy_hw *phy,
+ struct dsi_mode_info *mode,
+ struct dsi_host_common_cfg *config,
+ struct dsi_phy_per_lane_cfgs *timing);
+};
+
+/**
+ * struct dsi_phy_hw - DSI phy hardware object specific to an instance
+ * @base: VA for the DSI PHY base address.
+ * @length: Length of the DSI PHY register base map.
+ * @index: Instance ID of the controller.
+ * @version: DSI PHY version.
+ * @feature_map: Features supported by DSI PHY.
+ * @ops: Function pointer to PHY operations.
+ */
+struct dsi_phy_hw {
+ void __iomem *base;
+ u32 length;
+ u32 index;
+
+ enum dsi_phy_version version;
+
+ DECLARE_BITMAP(feature_map, DSI_PHY_MAX_FEATURES);
+ struct dsi_phy_hw_ops ops;
+};
+
+#endif /* _DSI_PHY_HW_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v4_0.c b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v4_0.c
new file mode 100644
index 000000000000..512352d96f98
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v4_0.c
@@ -0,0 +1,858 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "dsi-phy-hw:" fmt
+#include <linux/math64.h>
+#include <linux/delay.h>
+#include "dsi_hw.h"
+#include "dsi_phy_hw.h"
+
+#define DSIPHY_CMN_REVISION_ID0 0x0000
+#define DSIPHY_CMN_REVISION_ID1 0x0004
+#define DSIPHY_CMN_REVISION_ID2 0x0008
+#define DSIPHY_CMN_REVISION_ID3 0x000C
+#define DSIPHY_CMN_CLK_CFG0 0x0010
+#define DSIPHY_CMN_CLK_CFG1 0x0014
+#define DSIPHY_CMN_GLBL_TEST_CTRL 0x0018
+#define DSIPHY_CMN_CTRL_0 0x001C
+#define DSIPHY_CMN_CTRL_1 0x0020
+#define DSIPHY_CMN_CAL_HW_TRIGGER 0x0024
+#define DSIPHY_CMN_CAL_SW_CFG0 0x0028
+#define DSIPHY_CMN_CAL_SW_CFG1 0x002C
+#define DSIPHY_CMN_CAL_SW_CFG2 0x0030
+#define DSIPHY_CMN_CAL_HW_CFG0 0x0034
+#define DSIPHY_CMN_CAL_HW_CFG1 0x0038
+#define DSIPHY_CMN_CAL_HW_CFG2 0x003C
+#define DSIPHY_CMN_CAL_HW_CFG3 0x0040
+#define DSIPHY_CMN_CAL_HW_CFG4 0x0044
+#define DSIPHY_CMN_PLL_CNTRL 0x0048
+#define DSIPHY_CMN_LDO_CNTRL 0x004C
+
+#define DSIPHY_CMN_REGULATOR_CAL_STATUS0 0x0064
+#define DSIPHY_CMN_REGULATOR_CAL_STATUS1 0x0068
+
+/* n = 0..3 for data lanes and n = 4 for clock lane */
+#define DSIPHY_DLNX_CFG0(n) (0x100 + ((n) * 0x80))
+#define DSIPHY_DLNX_CFG1(n) (0x104 + ((n) * 0x80))
+#define DSIPHY_DLNX_CFG2(n) (0x108 + ((n) * 0x80))
+#define DSIPHY_DLNX_CFG3(n) (0x10C + ((n) * 0x80))
+#define DSIPHY_DLNX_TEST_DATAPATH(n) (0x110 + ((n) * 0x80))
+#define DSIPHY_DLNX_TEST_STR(n) (0x114 + ((n) * 0x80))
+#define DSIPHY_DLNX_TIMING_CTRL_4(n) (0x118 + ((n) * 0x80))
+#define DSIPHY_DLNX_TIMING_CTRL_5(n) (0x11C + ((n) * 0x80))
+#define DSIPHY_DLNX_TIMING_CTRL_6(n) (0x120 + ((n) * 0x80))
+#define DSIPHY_DLNX_TIMING_CTRL_7(n) (0x124 + ((n) * 0x80))
+#define DSIPHY_DLNX_TIMING_CTRL_8(n) (0x128 + ((n) * 0x80))
+#define DSIPHY_DLNX_TIMING_CTRL_9(n) (0x12C + ((n) * 0x80))
+#define DSIPHY_DLNX_TIMING_CTRL_10(n) (0x130 + ((n) * 0x80))
+#define DSIPHY_DLNX_TIMING_CTRL_11(n) (0x134 + ((n) * 0x80))
+#define DSIPHY_DLNX_STRENGTH_CTRL_0(n) (0x138 + ((n) * 0x80))
+#define DSIPHY_DLNX_STRENGTH_CTRL_1(n) (0x13C + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_POLY(n) (0x140 + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_SEED0(n) (0x144 + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_SEED1(n) (0x148 + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_HEAD(n) (0x14C + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_SOT(n) (0x150 + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_CTRL0(n) (0x154 + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_CTRL1(n) (0x158 + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_CTRL2(n) (0x15C + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_CTRL3(n) (0x160 + ((n) * 0x80))
+#define DSIPHY_DLNX_VREG_CNTRL(n) (0x164 + ((n) * 0x80))
+#define DSIPHY_DLNX_HSTX_STR_STATUS(n) (0x168 + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_STATUS0(n) (0x16C + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_STATUS1(n) (0x170 + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_STATUS2(n) (0x174 + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_STATUS3(n) (0x178 + ((n) * 0x80))
+#define DSIPHY_DLNX_MISR_STATUS(n) (0x17C + ((n) * 0x80))
+
+#define DSIPHY_PLL_CLKBUFLR_EN 0x041C
+#define DSIPHY_PLL_PLL_BANDGAP 0x0508
+
+/**
+ * struct timing_entry - Calculated values for each timing parameter.
+ * @mipi_min:
+ * @mipi_max:
+ * @rec_min:
+ * @rec_max:
+ * @rec:
+ * @reg_value: Value to be programmed in register.
+ */
+struct timing_entry {
+ s32 mipi_min;
+ s32 mipi_max;
+ s32 rec_min;
+ s32 rec_max;
+ s32 rec;
+ u8 reg_value;
+};
+
+/**
+ * struct phy_timing_desc - Timing parameters for DSI PHY.
+ */
+struct phy_timing_desc {
+ struct timing_entry clk_prepare;
+ struct timing_entry clk_zero;
+ struct timing_entry clk_trail;
+ struct timing_entry hs_prepare;
+ struct timing_entry hs_zero;
+ struct timing_entry hs_trail;
+ struct timing_entry hs_rqst;
+ struct timing_entry hs_rqst_clk;
+ struct timing_entry hs_exit;
+ struct timing_entry ta_go;
+ struct timing_entry ta_sure;
+ struct timing_entry ta_set;
+ struct timing_entry clk_post;
+ struct timing_entry clk_pre;
+};
+
+/**
+ * struct phy_clk_params - Clock parameters for PHY timing calculations.
+ */
+struct phy_clk_params {
+ u32 bitclk_mbps;
+ u32 escclk_numer;
+ u32 escclk_denom;
+ u32 tlpx_numer_ns;
+ u32 treot_ns;
+};
+
+/**
+ * regulator_enable() - enable regulators for DSI PHY
+ * @phy: Pointer to DSI PHY hardware object.
+ * @reg_cfg: Regulator configuration for all DSI lanes.
+ */
+void dsi_phy_hw_v4_0_regulator_enable(struct dsi_phy_hw *phy,
+ struct dsi_phy_per_lane_cfgs *reg_cfg)
+{
+ int i;
+
+ for (i = DSI_LOGICAL_LANE_0; i < DSI_LANE_MAX; i++)
+ DSI_W32(phy, DSIPHY_DLNX_VREG_CNTRL(i), reg_cfg->lane[i][0]);
+
+ /* make sure all values are written to hardware */
+ wmb();
+
+ pr_debug("[DSI_%d] Phy regulators enabled\n", phy->index);
+}
+
+/**
+ * regulator_disable() - disable regulators
+ * @phy: Pointer to DSI PHY hardware object.
+ */
+void dsi_phy_hw_v4_0_regulator_disable(struct dsi_phy_hw *phy)
+{
+ pr_debug("[DSI_%d] Phy regulators disabled\n", phy->index);
+}
+
+/**
+ * enable() - Enable PHY hardware
+ * @phy: Pointer to DSI PHY hardware object.
+ * @cfg: Per lane configurations for timing, strength and lane
+ * configurations.
+ */
+void dsi_phy_hw_v4_0_enable(struct dsi_phy_hw *phy,
+ struct dsi_phy_cfg *cfg)
+{
+ int i;
+ struct dsi_phy_per_lane_cfgs *timing = &cfg->timing;
+ u32 data;
+
+ DSI_W32(phy, DSIPHY_CMN_LDO_CNTRL, 0x1C);
+
+ DSI_W32(phy, DSIPHY_CMN_GLBL_TEST_CTRL, 0x1);
+ for (i = DSI_LOGICAL_LANE_0; i < DSI_LANE_MAX; i++) {
+
+ DSI_W32(phy, DSIPHY_DLNX_CFG0(i), cfg->lanecfg.lane[i][0]);
+ DSI_W32(phy, DSIPHY_DLNX_CFG1(i), cfg->lanecfg.lane[i][1]);
+ DSI_W32(phy, DSIPHY_DLNX_CFG2(i), cfg->lanecfg.lane[i][2]);
+ DSI_W32(phy, DSIPHY_DLNX_CFG3(i), cfg->lanecfg.lane[i][3]);
+
+ DSI_W32(phy, DSIPHY_DLNX_TEST_STR(i), 0x88);
+
+ DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_4(i), timing->lane[i][0]);
+ DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_5(i), timing->lane[i][1]);
+ DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_6(i), timing->lane[i][2]);
+ DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_7(i), timing->lane[i][3]);
+ DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_8(i), timing->lane[i][4]);
+ DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_9(i), timing->lane[i][5]);
+ DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_10(i), timing->lane[i][6]);
+ DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_11(i), timing->lane[i][7]);
+
+ DSI_W32(phy, DSIPHY_DLNX_STRENGTH_CTRL_0(i),
+ cfg->strength.lane[i][0]);
+ DSI_W32(phy, DSIPHY_DLNX_STRENGTH_CTRL_1(i),
+ cfg->strength.lane[i][1]);
+ }
+
+ /* make sure all values are written to hardware before enabling phy */
+ wmb();
+
+ DSI_W32(phy, DSIPHY_CMN_CTRL_1, 0x80);
+ udelay(100);
+ DSI_W32(phy, DSIPHY_CMN_CTRL_1, 0x00);
+
+ data = DSI_R32(phy, DSIPHY_CMN_GLBL_TEST_CTRL);
+
+ switch (cfg->pll_source) {
+ case DSI_PLL_SOURCE_STANDALONE:
+ DSI_W32(phy, DSIPHY_PLL_CLKBUFLR_EN, 0x01);
+ data &= ~BIT(2);
+ break;
+ case DSI_PLL_SOURCE_NATIVE:
+ DSI_W32(phy, DSIPHY_PLL_CLKBUFLR_EN, 0x03);
+ data &= ~BIT(2);
+ break;
+ case DSI_PLL_SOURCE_NON_NATIVE:
+ DSI_W32(phy, DSIPHY_PLL_CLKBUFLR_EN, 0x00);
+ data |= BIT(2);
+ break;
+ default:
+ break;
+ }
+
+ DSI_W32(phy, DSIPHY_CMN_GLBL_TEST_CTRL, data);
+
+ /* Enable bias current for pll1 during split display case */
+ if (cfg->pll_source == DSI_PLL_SOURCE_NON_NATIVE)
+ DSI_W32(phy, DSIPHY_PLL_PLL_BANDGAP, 0x3);
+
+ pr_debug("[DSI_%d]Phy enabled ", phy->index);
+}
+
+/**
+ * disable() - Disable PHY hardware
+ * @phy: Pointer to DSI PHY hardware object.
+ */
+void dsi_phy_hw_v4_0_disable(struct dsi_phy_hw *phy)
+{
+ DSI_W32(phy, DSIPHY_PLL_CLKBUFLR_EN, 0);
+ DSI_W32(phy, DSIPHY_CMN_GLBL_TEST_CTRL, 0);
+ DSI_W32(phy, DSIPHY_CMN_CTRL_0, 0);
+ pr_debug("[DSI_%d]Phy disabled ", phy->index);
+}
+
+static const u32 bits_per_pixel[DSI_PIXEL_FORMAT_MAX] = {
+ 16, 18, 18, 24, 3, 8, 12 };
+
+/**
+ * calc_clk_prepare - calculates prepare timing params for clk lane.
+ */
+static int calc_clk_prepare(struct phy_clk_params *clk_params,
+ struct phy_timing_desc *desc,
+ s32 *actual_frac,
+ s64 *actual_intermediate)
+{
+ u32 const min_prepare_frac = 50;
+ u64 const multiplier = BIT(20);
+
+ struct timing_entry *t = &desc->clk_prepare;
+ int rc = 0;
+ u64 dividend, temp, temp_multiple;
+ s32 frac = 0;
+ s64 intermediate;
+ s64 clk_prep_actual;
+
+ dividend = ((t->rec_max - t->rec_min) * min_prepare_frac * multiplier);
+ temp = roundup(div_s64(dividend, 100), multiplier);
+ temp += (t->rec_min * multiplier);
+ t->rec = div_s64(temp, multiplier);
+
+ if (t->rec & 0xffffff00) {
+ pr_err("Incorrect rec valuefor clk_prepare\n");
+ rc = -EINVAL;
+ } else {
+ t->reg_value = t->rec;
+ }
+
+ /* calculate theoretical value */
+ temp_multiple = 8 * t->reg_value * clk_params->tlpx_numer_ns
+ * multiplier;
+ intermediate = div_s64(temp_multiple, clk_params->bitclk_mbps);
+ div_s64_rem(temp_multiple, clk_params->bitclk_mbps, &frac);
+ clk_prep_actual = div_s64((intermediate + frac), multiplier);
+
+ pr_debug("CLK_PREPARE:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d",
+ t->mipi_min, t->mipi_max, t->rec_min, t->rec_max);
+ pr_debug(" reg_value=%d, actual=%lld\n", t->reg_value, clk_prep_actual);
+
+ *actual_frac = frac;
+ *actual_intermediate = intermediate;
+
+ return rc;
+}
+
+/**
+ * calc_clk_zero - calculates zero timing params for clk lane.
+ */
+static int calc_clk_zero(struct phy_clk_params *clk_params,
+ struct phy_timing_desc *desc,
+ s32 actual_frac,
+ s64 actual_intermediate)
+{
+ u32 const clk_zero_min_frac = 2;
+ u64 const multiplier = BIT(20);
+
+ int rc = 0;
+ struct timing_entry *t = &desc->clk_zero;
+ s64 mipi_min, rec_temp1, rec_temp2, rec_temp3, rec_min;
+
+ mipi_min = ((300 * multiplier) - (actual_intermediate + actual_frac));
+ t->mipi_min = div_s64(mipi_min, multiplier);
+
+ rec_temp1 = div_s64((mipi_min * clk_params->bitclk_mbps),
+ clk_params->tlpx_numer_ns);
+ rec_temp2 = (rec_temp1 - (11 * multiplier));
+ rec_temp3 = roundup(div_s64(rec_temp2, 8), multiplier);
+ rec_min = (div_s64(rec_temp3, multiplier) - 3);
+ t->rec_min = rec_min;
+ t->rec_max = ((t->rec_min > 255) ? 511 : 255);
+
+ t->rec = DIV_ROUND_UP(
+ (((t->rec_max - t->rec_min) * clk_zero_min_frac) +
+ (t->rec_min * 100)),
+ 100);
+
+ if (t->rec & 0xffffff00) {
+ pr_err("Incorrect rec valuefor clk_zero\n");
+ rc = -EINVAL;
+ } else {
+ t->reg_value = t->rec;
+ }
+
+ pr_debug("CLK_ZERO:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n",
+ t->mipi_min, t->mipi_max, t->rec_min, t->rec_max,
+ t->reg_value);
+ return rc;
+}
+
+/**
+ * calc_clk_trail - calculates prepare trail params for clk lane.
+ */
+static int calc_clk_trail(struct phy_clk_params *clk_params,
+ struct phy_timing_desc *desc,
+ s64 *teot_clk_lane)
+{
+ u64 const multiplier = BIT(20);
+ u32 const phy_timing_frac = 30;
+
+ int rc = 0;
+ struct timing_entry *t = &desc->clk_trail;
+ u64 temp_multiple;
+ s32 frac;
+ s64 mipi_max_tr, rec_temp1, rec_temp2, rec_temp3, mipi_max;
+ s64 teot_clk_lane1;
+
+ temp_multiple = div_s64(
+ (12 * multiplier * clk_params->tlpx_numer_ns),
+ clk_params->bitclk_mbps);
+ div_s64_rem(temp_multiple, multiplier, &frac);
+
+ mipi_max_tr = ((105 * multiplier) +
+ (temp_multiple + frac));
+ teot_clk_lane1 = div_s64(mipi_max_tr, multiplier);
+
+ mipi_max = (mipi_max_tr - (clk_params->treot_ns * multiplier));
+ t->mipi_max = div_s64(mipi_max, multiplier);
+
+ temp_multiple = div_s64(
+ (t->mipi_min * multiplier * clk_params->bitclk_mbps),
+ clk_params->tlpx_numer_ns);
+
+ div_s64_rem(temp_multiple, multiplier, &frac);
+ rec_temp1 = temp_multiple + frac + (3 * multiplier);
+ rec_temp2 = div_s64(rec_temp1, 8);
+ rec_temp3 = roundup(rec_temp2, multiplier);
+
+ t->rec_min = div_s64(rec_temp3, multiplier);
+
+ /* recommended max */
+ rec_temp1 = div_s64((mipi_max * clk_params->bitclk_mbps),
+ clk_params->tlpx_numer_ns);
+ rec_temp2 = rec_temp1 + (3 * multiplier);
+ rec_temp3 = rec_temp2 / 8;
+ t->rec_max = div_s64(rec_temp3, multiplier);
+
+ t->rec = DIV_ROUND_UP(
+ (((t->rec_max - t->rec_min) * phy_timing_frac) +
+ (t->rec_min * 100)),
+ 100);
+
+ if (t->rec & 0xffffff00) {
+ pr_err("Incorrect rec valuefor clk_zero\n");
+ rc = -EINVAL;
+ } else {
+ t->reg_value = t->rec;
+ }
+
+ *teot_clk_lane = teot_clk_lane1;
+ pr_debug("CLK_TRAIL:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n",
+ t->mipi_min, t->mipi_max, t->rec_min, t->rec_max,
+ t->reg_value);
+ return rc;
+
+}
+
+/**
+ * calc_hs_prepare - calculates prepare timing params for data lanes in HS.
+ */
+static int calc_hs_prepare(struct phy_clk_params *clk_params,
+ struct phy_timing_desc *desc,
+ u64 *temp_mul)
+{
+ u64 const multiplier = BIT(20);
+ u32 const min_prepare_frac = 50;
+ int rc = 0;
+ struct timing_entry *t = &desc->hs_prepare;
+ u64 temp_multiple, dividend, temp;
+ s32 frac;
+ s64 rec_temp1, rec_temp2, mipi_max, mipi_min;
+ u32 low_clk_multiplier = 0;
+
+ if (clk_params->bitclk_mbps <= 120)
+ low_clk_multiplier = 2;
+ /* mipi min */
+ temp_multiple = div_s64((4 * multiplier * clk_params->tlpx_numer_ns),
+ clk_params->bitclk_mbps);
+ div_s64_rem(temp_multiple, multiplier, &frac);
+ mipi_min = (40 * multiplier) + (temp_multiple + frac);
+ t->mipi_min = div_s64(mipi_min, multiplier);
+
+ /* mipi_max */
+ temp_multiple = div_s64(
+ (6 * multiplier * clk_params->tlpx_numer_ns),
+ clk_params->bitclk_mbps);
+ div_s64_rem(temp_multiple, multiplier, &frac);
+ mipi_max = (85 * multiplier) + temp_multiple;
+ t->mipi_max = div_s64(mipi_max, multiplier);
+
+ /* recommended min */
+ temp_multiple = div_s64((mipi_min * clk_params->bitclk_mbps),
+ clk_params->tlpx_numer_ns);
+ temp_multiple -= (low_clk_multiplier * multiplier);
+ div_s64_rem(temp_multiple, multiplier, &frac);
+ rec_temp1 = roundup(((temp_multiple + frac) / 8), multiplier);
+ t->rec_min = div_s64(rec_temp1, multiplier);
+
+ /* recommended max */
+ temp_multiple = div_s64((mipi_max * clk_params->bitclk_mbps),
+ clk_params->tlpx_numer_ns);
+ temp_multiple -= (low_clk_multiplier * multiplier);
+ div_s64_rem(temp_multiple, multiplier, &frac);
+ rec_temp2 = rounddown((temp_multiple / 8), multiplier);
+ t->rec_max = div_s64(rec_temp2, multiplier);
+
+ /* register value */
+ dividend = ((rec_temp2 - rec_temp1) * min_prepare_frac);
+ temp = roundup(div_u64(dividend, 100), multiplier);
+ t->rec = div_s64((temp + rec_temp1), multiplier);
+
+ if (t->rec & 0xffffff00) {
+ pr_err("Incorrect rec valuefor hs_prepare\n");
+ rc = -EINVAL;
+ } else {
+ t->reg_value = t->rec;
+ }
+
+ temp_multiple = div_s64(
+ (8 * (temp + rec_temp1) * clk_params->tlpx_numer_ns),
+ clk_params->bitclk_mbps);
+
+ *temp_mul = temp_multiple;
+ pr_debug("HS_PREP:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n",
+ t->mipi_min, t->mipi_max, t->rec_min, t->rec_max,
+ t->reg_value);
+ return rc;
+}
+
+/**
+ * calc_hs_zero - calculates zero timing params for data lanes in HS.
+ */
+static int calc_hs_zero(struct phy_clk_params *clk_params,
+ struct phy_timing_desc *desc,
+ u64 temp_multiple)
+{
+ u32 const hs_zero_min_frac = 10;
+ u64 const multiplier = BIT(20);
+ int rc = 0;
+ struct timing_entry *t = &desc->hs_zero;
+ s64 rec_temp1, rec_temp2, rec_temp3, mipi_min;
+ s64 rec_min;
+
+ mipi_min = div_s64((10 * clk_params->tlpx_numer_ns * multiplier),
+ clk_params->bitclk_mbps);
+ rec_temp1 = (145 * multiplier) + mipi_min - temp_multiple;
+ t->mipi_min = div_s64(rec_temp1, multiplier);
+
+ /* recommended min */
+ rec_temp1 = div_s64((rec_temp1 * clk_params->bitclk_mbps),
+ clk_params->tlpx_numer_ns);
+ rec_temp2 = rec_temp1 - (11 * multiplier);
+ rec_temp3 = roundup((rec_temp2 / 8), multiplier);
+ rec_min = rec_temp3 - (3 * multiplier);
+ t->rec_min = div_s64(rec_min, multiplier);
+ t->rec_max = ((t->rec_min > 255) ? 511 : 255);
+
+ t->rec = DIV_ROUND_UP(
+ (((t->rec_max - t->rec_min) * hs_zero_min_frac) +
+ (t->rec_min * 100)),
+ 100);
+
+ if (t->rec & 0xffffff00) {
+ pr_err("Incorrect rec valuefor hs_zero\n");
+ rc = -EINVAL;
+ } else {
+ t->reg_value = t->rec;
+ }
+
+ pr_debug("HS_ZERO:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n",
+ t->mipi_min, t->mipi_max, t->rec_min, t->rec_max,
+ t->reg_value);
+
+ return rc;
+}
+
+/**
+ * calc_hs_trail - calculates trail timing params for data lanes in HS.
+ */
+static int calc_hs_trail(struct phy_clk_params *clk_params,
+ struct phy_timing_desc *desc,
+ u64 teot_clk_lane)
+{
+ u32 const phy_timing_frac = 30;
+ int rc = 0;
+ struct timing_entry *t = &desc->hs_trail;
+ s64 rec_temp1;
+
+ t->mipi_min = 60 +
+ mult_frac(clk_params->tlpx_numer_ns, 4,
+ clk_params->bitclk_mbps);
+
+ t->mipi_max = teot_clk_lane - clk_params->treot_ns;
+
+ t->rec_min = DIV_ROUND_UP(
+ ((t->mipi_min * clk_params->bitclk_mbps) +
+ (3 * clk_params->tlpx_numer_ns)),
+ (8 * clk_params->tlpx_numer_ns));
+
+ rec_temp1 = ((t->mipi_max * clk_params->bitclk_mbps) +
+ (3 * clk_params->tlpx_numer_ns));
+ t->rec_max = (rec_temp1 / (8 * clk_params->tlpx_numer_ns));
+ rec_temp1 = DIV_ROUND_UP(
+ ((t->rec_max - t->rec_min) * phy_timing_frac),
+ 100);
+ t->rec = rec_temp1 + t->rec_min;
+
+ if (t->rec & 0xffffff00) {
+ pr_err("Incorrect rec valuefor hs_trail\n");
+ rc = -EINVAL;
+ } else {
+ t->reg_value = t->rec;
+ }
+
+ pr_debug("HS_TRAIL:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n",
+ t->mipi_min, t->mipi_max, t->rec_min, t->rec_max,
+ t->reg_value);
+
+ return rc;
+}
+
+/**
+ * calc_hs_rqst - calculates rqst timing params for data lanes in HS.
+ */
+static int calc_hs_rqst(struct phy_clk_params *clk_params,
+ struct phy_timing_desc *desc)
+{
+ int rc = 0;
+ struct timing_entry *t = &desc->hs_rqst;
+
+ t->rec = DIV_ROUND_UP(
+ ((t->mipi_min * clk_params->bitclk_mbps) -
+ (8 * clk_params->tlpx_numer_ns)),
+ (8 * clk_params->tlpx_numer_ns));
+
+ if (t->rec & 0xffffff00) {
+ pr_err("Incorrect rec valuefor hs_rqst, %d\n", t->rec);
+ rc = -EINVAL;
+ } else {
+ t->reg_value = t->rec;
+ }
+
+ pr_debug("HS_RQST:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n",
+ t->mipi_min, t->mipi_max, t->rec_min, t->rec_max,
+ t->reg_value);
+
+ return rc;
+}
+
+/**
+ * calc_hs_exit - calculates exit timing params for data lanes in HS.
+ */
+static int calc_hs_exit(struct phy_clk_params *clk_params,
+ struct phy_timing_desc *desc)
+{
+ u32 const hs_exit_min_frac = 10;
+ int rc = 0;
+ struct timing_entry *t = &desc->hs_exit;
+
+ t->rec_min = (DIV_ROUND_UP(
+ (t->mipi_min * clk_params->bitclk_mbps),
+ (8 * clk_params->tlpx_numer_ns)) - 1);
+
+ t->rec = DIV_ROUND_UP(
+ (((t->rec_max - t->rec_min) * hs_exit_min_frac) +
+ (t->rec_min * 100)),
+ 100);
+
+ if (t->rec & 0xffffff00) {
+ pr_err("Incorrect rec valuefor hs_exit\n");
+ rc = -EINVAL;
+ } else {
+ t->reg_value = t->rec;
+ }
+
+ pr_debug("HS_EXIT:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n",
+ t->mipi_min, t->mipi_max, t->rec_min, t->rec_max,
+ t->reg_value);
+
+ return rc;
+}
+
+/**
+ * calc_hs_rqst_clk - calculates rqst timing params for clock lane..
+ */
+static int calc_hs_rqst_clk(struct phy_clk_params *clk_params,
+ struct phy_timing_desc *desc)
+{
+ int rc = 0;
+ struct timing_entry *t = &desc->hs_rqst_clk;
+
+ t->rec = DIV_ROUND_UP(
+ ((t->mipi_min * clk_params->bitclk_mbps) -
+ (8 * clk_params->tlpx_numer_ns)),
+ (8 * clk_params->tlpx_numer_ns));
+
+ if (t->rec & 0xffffff00) {
+ pr_err("Incorrect rec valuefor hs_rqst_clk\n");
+ rc = -EINVAL;
+ } else {
+ t->reg_value = t->rec;
+ }
+
+ pr_debug("HS_RQST_CLK:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n",
+ t->mipi_min, t->mipi_max, t->rec_min, t->rec_max,
+ t->reg_value);
+
+ return rc;
+}
+
+/**
+ * dsi_phy_calc_timing_params - calculates timing paramets for a given bit clock
+ */
+static int dsi_phy_calc_timing_params(struct phy_clk_params *clk_params,
+ struct phy_timing_desc *desc)
+{
+ int rc = 0;
+ s32 actual_frac = 0;
+ s64 actual_intermediate = 0;
+ u64 temp_multiple;
+ s64 teot_clk_lane;
+
+ rc = calc_clk_prepare(clk_params, desc, &actual_frac,
+ &actual_intermediate);
+ if (rc) {
+ pr_err("clk_prepare calculations failed, rc=%d\n", rc);
+ goto error;
+ }
+
+ rc = calc_clk_zero(clk_params, desc, actual_frac, actual_intermediate);
+ if (rc) {
+ pr_err("clk_zero calculations failed, rc=%d\n", rc);
+ goto error;
+ }
+
+ rc = calc_clk_trail(clk_params, desc, &teot_clk_lane);
+ if (rc) {
+ pr_err("clk_trail calculations failed, rc=%d\n", rc);
+ goto error;
+ }
+
+ rc = calc_hs_prepare(clk_params, desc, &temp_multiple);
+ if (rc) {
+ pr_err("hs_prepare calculations failed, rc=%d\n", rc);
+ goto error;
+ }
+
+ rc = calc_hs_zero(clk_params, desc, temp_multiple);
+ if (rc) {
+ pr_err("hs_zero calculations failed, rc=%d\n", rc);
+ goto error;
+ }
+
+ rc = calc_hs_trail(clk_params, desc, teot_clk_lane);
+ if (rc) {
+ pr_err("hs_trail calculations failed, rc=%d\n", rc);
+ goto error;
+ }
+
+ rc = calc_hs_rqst(clk_params, desc);
+ if (rc) {
+ pr_err("hs_rqst calculations failed, rc=%d\n", rc);
+ goto error;
+ }
+
+ rc = calc_hs_exit(clk_params, desc);
+ if (rc) {
+ pr_err("hs_exit calculations failed, rc=%d\n", rc);
+ goto error;
+ }
+
+ rc = calc_hs_rqst_clk(clk_params, desc);
+ if (rc) {
+ pr_err("hs_rqst_clk calculations failed, rc=%d\n", rc);
+ goto error;
+ }
+error:
+ return rc;
+}
+
+/**
+ * calculate_timing_params() - calculates timing parameters.
+ * @phy: Pointer to DSI PHY hardware object.
+ * @mode: Mode information for which timing has to be calculated.
+ * @config: DSI host configuration for this mode.
+ * @timing: Timing parameters for each lane which will be returned.
+ */
+int dsi_phy_hw_v4_0_calculate_timing_params(struct dsi_phy_hw *phy,
+ struct dsi_mode_info *mode,
+ struct dsi_host_common_cfg *host,
+ struct dsi_phy_per_lane_cfgs *timing)
+{
+ /* constants */
+ u32 const esc_clk_mhz = 192; /* TODO: esc clock is hardcoded */
+ u32 const esc_clk_mmss_cc_prediv = 10;
+ u32 const tlpx_numer = 1000;
+ u32 const tr_eot = 20;
+ u32 const clk_prepare_spec_min = 38;
+ u32 const clk_prepare_spec_max = 95;
+ u32 const clk_trail_spec_min = 60;
+ u32 const hs_exit_spec_min = 100;
+ u32 const hs_exit_reco_max = 255;
+ u32 const hs_rqst_spec_min = 50;
+
+ /* local vars */
+ int rc = 0;
+ int i;
+ u32 h_total, v_total;
+ u64 inter_num;
+ u32 num_of_lanes = 0;
+ u32 bpp;
+ u64 x, y;
+ struct phy_timing_desc desc;
+ struct phy_clk_params clk_params = {0};
+
+ memset(&desc, 0x0, sizeof(desc));
+ h_total = DSI_H_TOTAL(mode);
+ v_total = DSI_V_TOTAL(mode);
+
+ bpp = bits_per_pixel[host->dst_format];
+
+ inter_num = bpp * mode->refresh_rate;
+
+ if (host->data_lanes & DSI_DATA_LANE_0)
+ num_of_lanes++;
+ if (host->data_lanes & DSI_DATA_LANE_1)
+ num_of_lanes++;
+ if (host->data_lanes & DSI_DATA_LANE_2)
+ num_of_lanes++;
+ if (host->data_lanes & DSI_DATA_LANE_3)
+ num_of_lanes++;
+
+
+ x = mult_frac(v_total * h_total, inter_num, num_of_lanes);
+ y = rounddown(x, 1);
+
+ clk_params.bitclk_mbps = rounddown(mult_frac(y, 1, 1000000), 1);
+ clk_params.escclk_numer = esc_clk_mhz;
+ clk_params.escclk_denom = esc_clk_mmss_cc_prediv;
+ clk_params.tlpx_numer_ns = tlpx_numer;
+ clk_params.treot_ns = tr_eot;
+
+
+ /* Setup default parameters */
+ desc.clk_prepare.mipi_min = clk_prepare_spec_min;
+ desc.clk_prepare.mipi_max = clk_prepare_spec_max;
+ desc.clk_trail.mipi_min = clk_trail_spec_min;
+ desc.hs_exit.mipi_min = hs_exit_spec_min;
+ desc.hs_exit.rec_max = hs_exit_reco_max;
+
+ desc.clk_prepare.rec_min = DIV_ROUND_UP(
+ (desc.clk_prepare.mipi_min * clk_params.bitclk_mbps),
+ (8 * clk_params.tlpx_numer_ns)
+ );
+
+ desc.clk_prepare.rec_max = rounddown(
+ mult_frac((desc.clk_prepare.mipi_max * clk_params.bitclk_mbps),
+ 1, (8 * clk_params.tlpx_numer_ns)),
+ 1);
+
+ desc.hs_rqst.mipi_min = hs_rqst_spec_min;
+ desc.hs_rqst_clk.mipi_min = hs_rqst_spec_min;
+
+ pr_debug("BIT CLOCK = %d, tlpx_numer_ns=%d, treot_ns=%d\n",
+ clk_params.bitclk_mbps, clk_params.tlpx_numer_ns,
+ clk_params.treot_ns);
+ rc = dsi_phy_calc_timing_params(&clk_params, &desc);
+ if (rc) {
+ pr_err("Timing calc failed, rc=%d\n", rc);
+ goto error;
+ }
+
+
+ for (i = DSI_LOGICAL_LANE_0; i < DSI_LANE_MAX; i++) {
+ timing->lane[i][0] = desc.hs_exit.reg_value;
+
+ if (i == DSI_LOGICAL_CLOCK_LANE)
+ timing->lane[i][1] = desc.clk_zero.reg_value;
+ else
+ timing->lane[i][1] = desc.hs_zero.reg_value;
+
+ if (i == DSI_LOGICAL_CLOCK_LANE)
+ timing->lane[i][2] = desc.clk_prepare.reg_value;
+ else
+ timing->lane[i][2] = desc.hs_prepare.reg_value;
+
+ if (i == DSI_LOGICAL_CLOCK_LANE)
+ timing->lane[i][3] = desc.clk_trail.reg_value;
+ else
+ timing->lane[i][3] = desc.hs_trail.reg_value;
+
+ if (i == DSI_LOGICAL_CLOCK_LANE)
+ timing->lane[i][4] = desc.hs_rqst_clk.reg_value;
+ else
+ timing->lane[i][4] = desc.hs_rqst.reg_value;
+
+ timing->lane[i][5] = 0x3;
+ timing->lane[i][6] = 0x4;
+ timing->lane[i][7] = 0xA0;
+ pr_debug("[%d][%d %d %d %d %d]\n", i, timing->lane[i][0],
+ timing->lane[i][1],
+ timing->lane[i][2],
+ timing->lane[i][3],
+ timing->lane[i][4]);
+ }
+ timing->count_per_lane = 8;
+
+error:
+ return rc;
+}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 1f4a95eeb348..ba5921149ac3 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, 2016 The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -56,7 +56,7 @@ static irqreturn_t hdmi_irq(int irq, void *dev_id)
/* Process HDCP: */
if (hdmi->hdcp_ctrl)
- hdmi_hdcp_irq(hdmi->hdcp_ctrl);
+ hdmi_hdcp_ctrl_irq(hdmi->hdcp_ctrl);
/* TODO audio.. */
@@ -75,7 +75,8 @@ static void hdmi_destroy(struct hdmi *hdmi)
flush_workqueue(hdmi->workq);
destroy_workqueue(hdmi->workq);
}
- hdmi_hdcp_destroy(hdmi);
+
+ hdmi_hdcp_ctrl_destroy(hdmi);
if (phy)
phy->funcs->destroy(phy);
@@ -228,7 +229,7 @@ static struct hdmi *hdmi_init(struct platform_device *pdev)
goto fail;
}
- hdmi->hdcp_ctrl = hdmi_hdcp_init(hdmi);
+ hdmi->hdcp_ctrl = hdmi_hdcp_ctrl_init(hdmi);
if (IS_ERR(hdmi->hdcp_ctrl)) {
dev_warn(&pdev->dev, "failed to init hdcp: disabled\n");
hdmi->hdcp_ctrl = NULL;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index d0e663192d01..e22ddcd51248 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -187,10 +187,10 @@ struct i2c_adapter *hdmi_i2c_init(struct hdmi *hdmi);
/*
* hdcp
*/
-struct hdmi_hdcp_ctrl *hdmi_hdcp_init(struct hdmi *hdmi);
-void hdmi_hdcp_destroy(struct hdmi *hdmi);
-void hdmi_hdcp_on(struct hdmi_hdcp_ctrl *hdcp_ctrl);
-void hdmi_hdcp_off(struct hdmi_hdcp_ctrl *hdcp_ctrl);
-void hdmi_hdcp_irq(struct hdmi_hdcp_ctrl *hdcp_ctrl);
+struct hdmi_hdcp_ctrl *hdmi_hdcp_ctrl_init(struct hdmi *hdmi);
+void hdmi_hdcp_ctrl_destroy(struct hdmi *hdmi);
+void hdmi_hdcp_ctrl_on(struct hdmi_hdcp_ctrl *hdcp_ctrl);
+void hdmi_hdcp_ctrl_off(struct hdmi_hdcp_ctrl *hdcp_ctrl);
+void hdmi_hdcp_ctrl_irq(struct hdmi_hdcp_ctrl *hdcp_ctrl);
#endif /* __HDMI_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
index 92b69ae8caf9..5b6a90abd108 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
@@ -106,7 +106,7 @@ static void hdmi_bridge_pre_enable(struct drm_bridge *bridge)
hdmi_set_mode(hdmi, true);
if (hdmi->hdcp_ctrl)
- hdmi_hdcp_on(hdmi->hdcp_ctrl);
+ hdmi_hdcp_ctrl_on(hdmi->hdcp_ctrl);
}
static void hdmi_bridge_enable(struct drm_bridge *bridge)
@@ -124,7 +124,7 @@ static void hdmi_bridge_post_disable(struct drm_bridge *bridge)
struct hdmi_phy *phy = hdmi->phy;
if (hdmi->hdcp_ctrl)
- hdmi_hdcp_off(hdmi->hdcp_ctrl);
+ hdmi_hdcp_ctrl_off(hdmi->hdcp_ctrl);
DBG("power down");
hdmi_set_mode(hdmi, false);
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c b/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c
index 1dc9c34eb0df..e56a8675c0a4 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -14,6 +14,7 @@
#include "hdmi.h"
#include <linux/qcom_scm.h>
+#ifdef CONFIG_DRM_MSM_HDCP
#define HDCP_REG_ENABLE 0x01
#define HDCP_REG_DISABLE 0x00
#define HDCP_PORT_ADDR 0x74
@@ -202,7 +203,7 @@ static int hdmi_hdcp_scm_wr(struct hdmi_hdcp_ctrl *hdcp_ctrl, u32 *preg,
return ret;
}
-void hdmi_hdcp_irq(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+void hdmi_hdcp_ctrl_irq(struct hdmi_hdcp_ctrl *hdcp_ctrl)
{
struct hdmi *hdmi = hdcp_ctrl->hdmi;
u32 reg_val, hdcp_int_status;
@@ -1310,7 +1311,7 @@ end:
}
}
-void hdmi_hdcp_on(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+void hdmi_hdcp_ctrl_on(struct hdmi_hdcp_ctrl *hdcp_ctrl)
{
struct hdmi *hdmi = hdcp_ctrl->hdmi;
u32 reg_val;
@@ -1335,7 +1336,7 @@ void hdmi_hdcp_on(struct hdmi_hdcp_ctrl *hdcp_ctrl)
queue_work(hdmi->workq, &hdcp_ctrl->hdcp_auth_work);
}
-void hdmi_hdcp_off(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+void hdmi_hdcp_ctrl_off(struct hdmi_hdcp_ctrl *hdcp_ctrl)
{
struct hdmi *hdmi = hdcp_ctrl->hdmi;
unsigned long flags;
@@ -1399,7 +1400,7 @@ void hdmi_hdcp_off(struct hdmi_hdcp_ctrl *hdcp_ctrl)
DBG("HDCP: Off");
}
-struct hdmi_hdcp_ctrl *hdmi_hdcp_init(struct hdmi *hdmi)
+struct hdmi_hdcp_ctrl *hdmi_hdcp_ctrl_init(struct hdmi *hdmi)
{
struct hdmi_hdcp_ctrl *hdcp_ctrl = NULL;
@@ -1428,10 +1429,33 @@ struct hdmi_hdcp_ctrl *hdmi_hdcp_init(struct hdmi *hdmi)
return hdcp_ctrl;
}
-void hdmi_hdcp_destroy(struct hdmi *hdmi)
+void hdmi_hdcp_ctrl_destroy(struct hdmi *hdmi)
{
if (hdmi && hdmi->hdcp_ctrl) {
kfree(hdmi->hdcp_ctrl);
hdmi->hdcp_ctrl = NULL;
}
}
+
+#else
+struct hdmi_hdcp_ctrl *hdmi_hdcp_ctrl_init(struct hdmi *hdmi)
+{
+ return NULL;
+}
+
+void hdmi_hdcp_ctrl_destroy(struct hdmi *hdmi)
+{
+}
+
+void hdmi_hdcp_ctrl_on(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+}
+
+void hdmi_hdcp_ctrl_off(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+}
+
+void hdmi_hdcp_ctrl_irq(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+}
+#endif
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index b532faa8026d..f7aebf5516ce 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, 2016, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -595,7 +595,8 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
mdelay(16);
if (config->platform.iommu) {
- mmu = msm_iommu_new(&pdev->dev, config->platform.iommu);
+ mmu = msm_smmu_new(&pdev->dev,
+ MSM_SMMU_DOMAIN_UNSECURE);
if (IS_ERR(mmu)) {
ret = PTR_ERR(mmu);
dev_err(dev->dev, "failed to init iommu: %d\n", ret);
diff --git a/drivers/gpu/drm/msm/mdp/mdp_format.c b/drivers/gpu/drm/msm/mdp/mdp_format.c
index 1c2caffc97e4..d2fa72815833 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_format.c
+++ b/drivers/gpu/drm/msm/mdp/mdp_format.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014,2016 The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -165,7 +165,11 @@ uint32_t mdp_get_formats(uint32_t *pixel_formats, uint32_t max_formats,
return i;
}
-const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format)
+const struct msm_format *mdp_get_format(
+ struct msm_kms *kms,
+ uint32_t format,
+ const uint64_t *modifiers,
+ uint32_t modifiers_len)
{
int i;
for (i = 0; i < ARRAY_SIZE(formats); i++) {
diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.h b/drivers/gpu/drm/msm/mdp/mdp_kms.h
index 303130320748..0d0723d32a03 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp_kms.h
@@ -98,7 +98,9 @@ struct mdp_format {
#define MDP_FORMAT_IS_YUV(mdp_format) ((mdp_format)->is_yuv)
uint32_t mdp_get_formats(uint32_t *formats, uint32_t max_formats, bool rgb_only);
-const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format);
+const struct msm_format *mdp_get_format(struct msm_kms *kms,
+ uint32_t format, const uint64_t *modifiers,
+ uint32_t modifiers_len);
/* MDP capabilities */
#define MDP_CAP_SMP BIT(0) /* Shared Memory Pool */
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index 7eb253bc24df..d8791155236c 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -1,4 +1,5 @@
/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
* Copyright (C) 2014 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -25,10 +26,9 @@ struct msm_commit {
uint32_t fence;
struct msm_fence_cb fence_cb;
uint32_t crtc_mask;
+ struct kthread_work commit_work;
};
-static void fence_cb(struct msm_fence_cb *cb);
-
/* block until specified crtcs are no longer pending update, and
* atomically mark them as pending update
*/
@@ -59,75 +59,351 @@ static void end_atomic(struct msm_drm_private *priv, uint32_t crtc_mask)
spin_unlock(&priv->pending_crtcs_event.lock);
}
-static struct msm_commit *commit_init(struct drm_atomic_state *state)
+static void commit_destroy(struct msm_commit *commit)
+{
+ end_atomic(commit->dev->dev_private, commit->crtc_mask);
+ kfree(commit);
+}
+
+static void msm_atomic_wait_for_commit_done(
+ struct drm_device *dev,
+ struct drm_atomic_state *old_state,
+ int modeset_flags)
{
- struct msm_commit *c = kzalloc(sizeof(*c), GFP_KERNEL);
+ struct drm_crtc *crtc;
+ struct msm_drm_private *priv = old_state->dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+ int ncrtcs = old_state->dev->mode_config.num_crtc;
+ int i;
- if (!c)
- return NULL;
+ for (i = 0; i < ncrtcs; i++) {
+ int private_flags;
- c->dev = state->dev;
- c->state = state;
+ crtc = old_state->crtcs[i];
- /* TODO we might need a way to indicate to run the cb on a
- * different wq so wait_for_vblanks() doesn't block retiring
- * bo's..
- */
- INIT_FENCE_CB(&c->fence_cb, fence_cb);
+ if (!crtc || !crtc->state || !crtc->state->enable)
+ continue;
+
+ /* If specified, only wait if requested flag is true */
+ private_flags = crtc->state->adjusted_mode.private_flags;
+ if (modeset_flags && !(modeset_flags & private_flags))
+ continue;
+
+ /* Legacy cursor ioctls are completely unsynced, and userspace
+ * relies on that (by doing tons of cursor updates). */
+ if (old_state->legacy_cursor_update)
+ continue;
- return c;
+ if (kms->funcs->wait_for_crtc_commit_done)
+ kms->funcs->wait_for_crtc_commit_done(kms, crtc);
+ }
}
-static void commit_destroy(struct msm_commit *c)
+static void
+msm_disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
{
- end_atomic(c->dev->dev_private, c->crtc_mask);
- kfree(c);
+ struct drm_connector *connector;
+ struct drm_connector_state *old_conn_state;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state;
+ int i;
+
+ for_each_connector_in_state(old_state, connector, old_conn_state, i) {
+ const struct drm_encoder_helper_funcs *funcs;
+ struct drm_encoder *encoder;
+ struct drm_crtc_state *old_crtc_state;
+ unsigned int crtc_idx;
+
+ /*
+ * Shut down everything that's in the changeset and currently
+ * still on. So need to check the old, saved state.
+ */
+ if (!old_conn_state->crtc)
+ continue;
+
+ crtc_idx = drm_crtc_index(old_conn_state->crtc);
+ old_crtc_state = old_state->crtc_states[crtc_idx];
+
+ if (!old_crtc_state->active ||
+ !drm_atomic_crtc_needs_modeset(old_conn_state->crtc->state))
+ continue;
+
+ encoder = old_conn_state->best_encoder;
+
+ /* We shouldn't get this far if we didn't previously have
+ * an encoder.. but WARN_ON() rather than explode.
+ */
+ if (WARN_ON(!encoder))
+ continue;
+
+ if (msm_is_mode_seamless(
+ &connector->encoder->crtc->state->mode))
+ continue;
+
+ funcs = encoder->helper_private;
+
+ DRM_DEBUG_ATOMIC("disabling [ENCODER:%d:%s]\n",
+ encoder->base.id, encoder->name);
+
+ /*
+ * Each encoder has at most one connector (since we always steal
+ * it away), so we won't call disable hooks twice.
+ */
+ drm_bridge_disable(encoder->bridge);
+
+ /* Right function depends upon target state. */
+ if (connector->state->crtc && funcs->prepare)
+ funcs->prepare(encoder);
+ else if (funcs->disable)
+ funcs->disable(encoder);
+ else
+ funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
+
+ drm_bridge_post_disable(encoder->bridge);
+ }
+
+ for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+ const struct drm_crtc_helper_funcs *funcs;
+
+ /* Shut down everything that needs a full modeset. */
+ if (!drm_atomic_crtc_needs_modeset(crtc->state))
+ continue;
+
+ if (!old_crtc_state->active)
+ continue;
+
+ if (msm_is_mode_seamless(&crtc->state->mode))
+ continue;
+
+ funcs = crtc->helper_private;
+
+ DRM_DEBUG_ATOMIC("disabling [CRTC:%d]\n",
+ crtc->base.id);
+
+ /* Right function depends upon target state. */
+ if (crtc->state->enable && funcs->prepare)
+ funcs->prepare(crtc);
+ else if (funcs->disable)
+ funcs->disable(crtc);
+ else
+ funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+ }
}
-static void msm_atomic_wait_for_commit_done(struct drm_device *dev,
+static void
+msm_crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state;
+ struct drm_connector *connector;
+ struct drm_connector_state *old_conn_state;
+ int i;
+
+ for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+ const struct drm_crtc_helper_funcs *funcs;
+
+ if (!crtc->state->mode_changed)
+ continue;
+
+ funcs = crtc->helper_private;
+
+ if (crtc->state->enable && funcs->mode_set_nofb) {
+ DRM_DEBUG_ATOMIC("modeset on [CRTC:%d]\n",
+ crtc->base.id);
+
+ funcs->mode_set_nofb(crtc);
+ }
+ }
+
+ for_each_connector_in_state(old_state, connector, old_conn_state, i) {
+ const struct drm_encoder_helper_funcs *funcs;
+ struct drm_crtc_state *new_crtc_state;
+ struct drm_encoder *encoder;
+ struct drm_display_mode *mode, *adjusted_mode;
+
+ if (!connector->state->best_encoder)
+ continue;
+
+ encoder = connector->state->best_encoder;
+ funcs = encoder->helper_private;
+ new_crtc_state = connector->state->crtc->state;
+ mode = &new_crtc_state->mode;
+ adjusted_mode = &new_crtc_state->adjusted_mode;
+
+ if (!new_crtc_state->mode_changed)
+ continue;
+
+ DRM_DEBUG_ATOMIC("modeset on [ENCODER:%d:%s]\n",
+ encoder->base.id, encoder->name);
+
+ /*
+ * Each encoder has at most one connector (since we always steal
+ * it away), so we won't call mode_set hooks twice.
+ */
+ if (funcs->mode_set)
+ funcs->mode_set(encoder, mode, adjusted_mode);
+
+ drm_bridge_mode_set(encoder->bridge, mode, adjusted_mode);
+ }
+}
+
+/**
+ * msm_atomic_helper_commit_modeset_disables - modeset commit to disable outputs
+ * @dev: DRM device
+ * @old_state: atomic state object with old state structures
+ *
+ * This function shuts down all the outputs that need to be shut down and
+ * prepares them (if required) with the new mode.
+ *
+ * For compatibility with legacy crtc helpers this should be called before
+ * drm_atomic_helper_commit_planes(), which is what the default commit function
+ * does. But drivers with different needs can group the modeset commits together
+ * and do the plane commits at the end. This is useful for drivers doing runtime
+ * PM since planes updates then only happen when the CRTC is actually enabled.
+ */
+static void msm_atomic_helper_commit_modeset_disables(struct drm_device *dev,
+ struct drm_atomic_state *old_state)
+{
+ msm_disable_outputs(dev, old_state);
+
+ drm_atomic_helper_update_legacy_modeset_state(dev, old_state);
+
+ msm_crtc_set_mode(dev, old_state);
+}
+
+/**
+ * msm_atomic_helper_commit_modeset_enables - modeset commit to enable outputs
+ * @dev: DRM device
+ * @old_state: atomic state object with old state structures
+ *
+ * This function enables all the outputs with the new configuration which had to
+ * be turned off for the update.
+ *
+ * For compatibility with legacy crtc helpers this should be called after
+ * drm_atomic_helper_commit_planes(), which is what the default commit function
+ * does. But drivers with different needs can group the modeset commits together
+ * and do the plane commits at the end. This is useful for drivers doing runtime
+ * PM since planes updates then only happen when the CRTC is actually enabled.
+ */
+static void msm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
struct drm_atomic_state *old_state)
{
struct drm_crtc *crtc;
- struct msm_drm_private *priv = old_state->dev->dev_private;
+ struct drm_crtc_state *old_crtc_state;
+ struct drm_connector *connector;
+ struct drm_connector_state *old_conn_state;
+ struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
- int ncrtcs = old_state->dev->mode_config.num_crtc;
+ int bridge_enable_count = 0;
int i;
- for (i = 0; i < ncrtcs; i++) {
- crtc = old_state->crtcs[i];
+ for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+ const struct drm_crtc_helper_funcs *funcs;
- if (!crtc)
+ /* Need to filter out CRTCs where only planes change. */
+ if (!drm_atomic_crtc_needs_modeset(crtc->state))
continue;
- if (!crtc->state->enable)
+ if (!crtc->state->active)
continue;
- /* Legacy cursor ioctls are completely unsynced, and userspace
- * relies on that (by doing tons of cursor updates). */
- if (old_state->legacy_cursor_update)
+ if (msm_is_mode_seamless(&crtc->state->mode))
+ continue;
+
+ funcs = crtc->helper_private;
+
+ if (crtc->state->enable) {
+ DRM_DEBUG_ATOMIC("enabling [CRTC:%d]\n",
+ crtc->base.id);
+
+ if (funcs->enable)
+ funcs->enable(crtc);
+ else
+ funcs->commit(crtc);
+ }
+ }
+
+ /* ensure bridge/encoder updates happen on same vblank */
+ msm_atomic_wait_for_commit_done(dev, old_state,
+ MSM_MODE_FLAG_VBLANK_PRE_MODESET);
+
+ for_each_connector_in_state(old_state, connector, old_conn_state, i) {
+ const struct drm_encoder_helper_funcs *funcs;
+ struct drm_encoder *encoder;
+
+ if (!connector->state->best_encoder)
+ continue;
+
+ if (!connector->state->crtc->state->active ||
+ !drm_atomic_crtc_needs_modeset(
+ connector->state->crtc->state))
+ continue;
+
+ encoder = connector->state->best_encoder;
+ funcs = encoder->helper_private;
+
+ DRM_DEBUG_ATOMIC("enabling [ENCODER:%d:%s]\n",
+ encoder->base.id, encoder->name);
+
+ /*
+ * Each encoder has at most one connector (since we always steal
+ * it away), so we won't call enable hooks twice.
+ */
+ drm_bridge_pre_enable(encoder->bridge);
+ ++bridge_enable_count;
+
+ if (funcs->enable)
+ funcs->enable(encoder);
+ else
+ funcs->commit(encoder);
+ }
+
+ if (kms->funcs->commit) {
+ DRM_DEBUG_ATOMIC("triggering commit\n");
+ kms->funcs->commit(kms, old_state);
+ }
+
+ /* If no bridges were pre_enabled, skip iterating over them again */
+ if (bridge_enable_count == 0)
+ return;
+
+ for_each_connector_in_state(old_state, connector, old_conn_state, i) {
+ struct drm_encoder *encoder;
+
+ if (!connector->state->best_encoder)
+ continue;
+
+ if (!connector->state->crtc->state->active ||
+ !drm_atomic_crtc_needs_modeset(
+ connector->state->crtc->state))
continue;
- kms->funcs->wait_for_crtc_commit_done(kms, crtc);
+ encoder = connector->state->best_encoder;
+
+ DRM_DEBUG_ATOMIC("bridge enable enabling [ENCODER:%d:%s]\n",
+ encoder->base.id, encoder->name);
+
+ drm_bridge_enable(encoder->bridge);
}
}
/* The (potentially) asynchronous part of the commit. At this point
* nothing can fail short of armageddon.
*/
-static void complete_commit(struct msm_commit *c)
+static void complete_commit(struct msm_commit *commit)
{
- struct drm_atomic_state *state = c->state;
+ struct drm_atomic_state *state = commit->state;
struct drm_device *dev = state->dev;
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
kms->funcs->prepare_commit(kms, state);
- drm_atomic_helper_commit_modeset_disables(dev, state);
+ msm_atomic_helper_commit_modeset_disables(dev, state);
drm_atomic_helper_commit_planes(dev, state, false);
- drm_atomic_helper_commit_modeset_enables(dev, state);
+ msm_atomic_helper_commit_modeset_enables(dev, state);
/* NOTE: _wait_for_vblanks() only waits for vblank on
* enabled CRTCs. So we end up faulting when disabling
@@ -142,7 +418,7 @@ static void complete_commit(struct msm_commit *c)
* not be critical path)
*/
- msm_atomic_wait_for_commit_done(dev, state);
+ msm_atomic_wait_for_commit_done(dev, state, 0);
drm_atomic_helper_cleanup_planes(dev, state);
@@ -150,38 +426,97 @@ static void complete_commit(struct msm_commit *c)
drm_atomic_state_free(state);
- commit_destroy(c);
+ commit_destroy(commit);
}
static void fence_cb(struct msm_fence_cb *cb)
{
- struct msm_commit *c =
+ struct msm_commit *commit =
container_of(cb, struct msm_commit, fence_cb);
- complete_commit(c);
+ complete_commit(commit);
}
-static void add_fb(struct msm_commit *c, struct drm_framebuffer *fb)
+static void _msm_drm_commit_work_cb(struct kthread_work *work)
{
- struct drm_gem_object *obj = msm_framebuffer_bo(fb, 0);
- c->fence = max(c->fence, msm_gem_fence(to_msm_bo(obj), MSM_PREP_READ));
+ struct msm_commit *commit = NULL;
+
+ if (!work) {
+ DRM_ERROR("%s: Invalid commit work data!\n", __func__);
+ return;
+ }
+
+ commit = container_of(work, struct msm_commit, commit_work);
+
+ complete_commit(commit);
}
-int msm_atomic_check(struct drm_device *dev,
- struct drm_atomic_state *state)
+static struct msm_commit *commit_init(struct drm_atomic_state *state)
{
- int ret;
+ struct msm_commit *commit = kzalloc(sizeof(*commit), GFP_KERNEL);
- /*
- * msm ->atomic_check can update ->mode_changed for pixel format
- * changes, hence must be run before we check the modeset changes.
+ if (!commit) {
+ DRM_ERROR("invalid commit\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ commit->dev = state->dev;
+ commit->state = state;
+
+ /* TODO we might need a way to indicate to run the cb on a
+ * different wq so wait_for_vblanks() doesn't block retiring
+ * bo's..
*/
- ret = drm_atomic_helper_check_planes(dev, state);
- if (ret)
- return ret;
+ INIT_FENCE_CB(&commit->fence_cb, fence_cb);
+ init_kthread_work(&commit->commit_work, _msm_drm_commit_work_cb);
- ret = drm_atomic_helper_check_modeset(dev, state);
- if (ret)
- return ret;
+ return commit;
+}
+
+static void commit_set_fence(struct msm_commit *commit,
+ struct drm_framebuffer *fb)
+{
+ struct drm_gem_object *obj = msm_framebuffer_bo(fb, 0);
+ commit->fence = max(commit->fence,
+ msm_gem_fence(to_msm_bo(obj), MSM_PREP_READ));
+}
+
+/* Start display thread function */
+static int msm_atomic_commit_dispatch(struct drm_device *dev,
+ struct drm_atomic_state *state, struct msm_commit *commit)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct drm_crtc *crtc = NULL;
+ struct drm_crtc_state *crtc_state = NULL;
+ int ret = -EINVAL, i = 0, j = 0;
+
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ for (j = 0; j < priv->num_crtcs; j++) {
+ if (priv->disp_thread[j].crtc_id ==
+ crtc->base.id) {
+ if (priv->disp_thread[j].thread) {
+ queue_kthread_work(
+ &priv->disp_thread[j].worker,
+ &commit->commit_work);
+ /* only return zero if work is
+ * queued successfully.
+ */
+ ret = 0;
+ } else {
+ DRM_ERROR(" Error for crtc_id: %d\n",
+ priv->disp_thread[j].crtc_id);
+ }
+ break;
+ }
+ }
+ /*
+ * TODO: handle cases where there will be more than
+ * one crtc per commit cycle. Remove this check then.
+ * Current assumption is there will be only one crtc
+ * per commit cycle.
+ */
+ if (j < priv->num_crtcs)
+ break;
+ }
return ret;
}
@@ -192,9 +527,8 @@ int msm_atomic_check(struct drm_device *dev,
* @state: the driver state object
* @async: asynchronous commit
*
- * This function commits a with drm_atomic_helper_check() pre-validated state
- * object. This can still fail when e.g. the framebuffer reservation fails. For
- * now this doesn't implement asynchronous commits.
+ * This function commits with drm_atomic_helper_check() pre-validated state
+ * object. This can still fail when e.g. the framebuffer reservation fails.
*
* RETURNS
* Zero for success or -errno.
@@ -202,19 +536,21 @@ int msm_atomic_check(struct drm_device *dev,
int msm_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *state, bool async)
{
+ struct msm_drm_private *priv = dev->dev_private;
int nplanes = dev->mode_config.num_total_plane;
int ncrtcs = dev->mode_config.num_crtc;
ktime_t timeout;
- struct msm_commit *c;
+ struct msm_commit *commit;
int i, ret;
ret = drm_atomic_helper_prepare_planes(dev, state);
if (ret)
return ret;
- c = commit_init(state);
- if (!c) {
- ret = -ENOMEM;
+ commit = commit_init(state);
+ if (IS_ERR_OR_NULL(commit)) {
+ ret = PTR_ERR(commit);
+ DRM_ERROR("commit_init failed: %d\n", ret);
goto error;
}
@@ -225,7 +561,7 @@ int msm_atomic_commit(struct drm_device *dev,
struct drm_crtc *crtc = state->crtcs[i];
if (!crtc)
continue;
- c->crtc_mask |= (1 << drm_crtc_index(crtc));
+ commit->crtc_mask |= (1 << drm_crtc_index(crtc));
}
/*
@@ -239,16 +575,17 @@ int msm_atomic_commit(struct drm_device *dev,
continue;
if ((plane->state->fb != new_state->fb) && new_state->fb)
- add_fb(c, new_state->fb);
+ commit_set_fence(commit, new_state->fb);
}
/*
* Wait for pending updates on any of the same crtc's and then
* mark our set of crtc's as busy:
*/
- ret = start_atomic(dev->dev_private, c->crtc_mask);
+ ret = start_atomic(dev->dev_private, commit->crtc_mask);
if (ret) {
- kfree(c);
+ DRM_ERROR("start_atomic failed: %d\n", ret);
+ commit_destroy(commit);
goto error;
}
@@ -261,6 +598,16 @@ int msm_atomic_commit(struct drm_device *dev,
drm_atomic_helper_swap_state(dev, state);
/*
+ * Provide the driver a chance to prepare for output fences. This is
+ * done after the point of no return, but before asynchronous commits
+ * are dispatched to work queues, so that the fence preparation is
+ * finished before the .atomic_commit returns.
+ */
+ if (priv && priv->kms && priv->kms->funcs &&
+ priv->kms->funcs->prepare_fence)
+ priv->kms->funcs->prepare_fence(priv->kms, state);
+
+ /*
* Everything below can be run asynchronously without the need to grab
* any modeset locks at all under one conditions: It must be guaranteed
* that the asynchronous work has either been cancelled (if the driver
@@ -277,16 +624,22 @@ int msm_atomic_commit(struct drm_device *dev,
*/
if (async) {
- msm_queue_fence_cb(dev, &c->fence_cb, c->fence);
+ ret = msm_atomic_commit_dispatch(dev, state, commit);
+ if (ret) {
+ DRM_ERROR("%s: atomic commit failed\n", __func__);
+ drm_atomic_state_free(state);
+ commit_destroy(commit);
+ goto error;
+ }
return 0;
}
timeout = ktime_add_ms(ktime_get(), 1000);
/* uninterruptible wait */
- msm_wait_fence(dev, c->fence, &timeout, false);
+ msm_wait_fence(dev, commit->fence, &timeout, false);
- complete_commit(c);
+ complete_commit(commit);
return 0;
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index b88ce514eb8e..5d04b0384215 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -1,4 +1,5 @@
/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -15,9 +16,13 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/of_address.h>
#include "msm_drv.h"
#include "msm_gpu.h"
#include "msm_kms.h"
+#include "sde_wb.h"
+
+#define TEARDOWN_DEADLOCK_RETRY_MAX 5
static void msm_fb_output_poll_changed(struct drm_device *dev)
{
@@ -29,7 +34,7 @@ static void msm_fb_output_poll_changed(struct drm_device *dev)
static const struct drm_mode_config_funcs mode_config_funcs = {
.fb_create = msm_framebuffer_create,
.output_poll_changed = msm_fb_output_poll_changed,
- .atomic_check = msm_atomic_check,
+ .atomic_check = drm_atomic_helper_check,
.atomic_commit = msm_atomic_commit,
};
@@ -46,6 +51,29 @@ int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu)
return idx;
}
+void msm_unregister_mmu(struct drm_device *dev, struct msm_mmu *mmu)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ int idx;
+
+ if (priv->num_mmus <= 0) {
+ dev_err(dev->dev, "invalid num mmus %d\n", priv->num_mmus);
+ return;
+ }
+
+ idx = priv->num_mmus - 1;
+
+ /* only support reverse-order deallocation */
+ if (priv->mmus[idx] != mmu) {
+ dev_err(dev->dev, "unexpected mmu at idx %d\n", idx);
+ return;
+ }
+
+ --priv->num_mmus;
+ priv->mmus[idx] = 0;
+}
+
+
#ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
static bool reglog = false;
MODULE_PARM_DESC(reglog, "Enable register read/write logging");
@@ -99,6 +127,11 @@ void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
return ptr;
}
+void msm_iounmap(struct platform_device *pdev, void __iomem *addr)
+{
+ devm_iounmap(&pdev->dev, addr);
+}
+
void msm_writel(u32 data, void __iomem *addr)
{
if (reglog)
@@ -120,7 +153,7 @@ struct vblank_event {
bool enable;
};
-static void vblank_ctrl_worker(struct work_struct *work)
+static void vblank_ctrl_worker(struct kthread_work *work)
{
struct msm_vblank_ctrl *vbl_ctrl = container_of(work,
struct msm_vblank_ctrl, work);
@@ -168,7 +201,7 @@ static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
- queue_work(priv->wq, &vbl_ctrl->work);
+ queue_kthread_work(&priv->disp_thread[crtc_id].worker, &vbl_ctrl->work);
return 0;
}
@@ -180,21 +213,32 @@ static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
static int msm_unload(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = dev->platformdev;
struct msm_kms *kms = priv->kms;
struct msm_gpu *gpu = priv->gpu;
struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
struct vblank_event *vbl_ev, *tmp;
+ int i;
/* We must cancel and cleanup any pending vblank enable/disable
* work before drm_irq_uninstall() to avoid work re-enabling an
* irq after uninstall has disabled it.
*/
- cancel_work_sync(&vbl_ctrl->work);
+ flush_kthread_work(&vbl_ctrl->work);
list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
list_del(&vbl_ev->node);
kfree(vbl_ev);
}
+ /* clean up display commit worker threads */
+ for (i = 0; i < priv->num_crtcs; i++) {
+ if (priv->disp_thread[i].thread) {
+ flush_kthread_worker(&priv->disp_thread[i].worker);
+ kthread_stop(priv->disp_thread[i].thread);
+ priv->disp_thread[i].thread = NULL;
+ }
+ }
+
drm_kms_helper_poll_fini(dev);
drm_mode_config_cleanup(dev);
drm_vblank_cleanup(dev);
@@ -226,6 +270,11 @@ static int msm_unload(struct drm_device *dev)
priv->vram.paddr, &attrs);
}
+ sde_evtlog_destroy();
+
+ sde_power_client_destroy(&priv->phandle, priv->pclient);
+ sde_power_resource_deinit(pdev, &priv->phandle);
+
component_unbind_all(dev->dev, dev);
dev->dev_private = NULL;
@@ -235,13 +284,20 @@ static int msm_unload(struct drm_device *dev)
return 0;
}
+#define KMS_MDP4 0
+#define KMS_MDP5 1
+#define KMS_SDE 2
+
static int get_mdp_ver(struct platform_device *pdev)
{
#ifdef CONFIG_OF
static const struct of_device_id match_types[] = { {
.compatible = "qcom,mdss_mdp",
- .data = (void *)5,
- }, {
+ .data = (void *)KMS_MDP5,
+ },
+ {
+ .compatible = "qcom,sde-kms",
+ .data = (void *)KMS_SDE,
/* end node */
} };
struct device *dev = &pdev->dev;
@@ -250,11 +306,9 @@ static int get_mdp_ver(struct platform_device *pdev)
if (match)
return (int)(unsigned long)match->data;
#endif
- return 4;
+ return KMS_MDP4;
}
-#include <linux/of_address.h>
-
static int msm_init_vram(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
@@ -330,12 +384,32 @@ static int msm_init_vram(struct drm_device *dev)
return ret;
}
+#ifdef CONFIG_OF
+static int msm_component_bind_all(struct device *dev,
+ struct drm_device *drm_dev)
+{
+ int ret;
+
+ ret = component_bind_all(dev, drm_dev);
+ if (ret)
+ DRM_ERROR("component_bind_all failed: %d\n", ret);
+
+ return ret;
+}
+#else
+static int msm_component_bind_all(struct device *dev,
+ struct drm_device *drm_dev)
+{
+ return 0;
+}
+#endif
+
static int msm_load(struct drm_device *dev, unsigned long flags)
{
struct platform_device *pdev = dev->platformdev;
struct msm_drm_private *priv;
struct msm_kms *kms;
- int ret;
+ int ret, i;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) {
@@ -345,22 +419,36 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
dev->dev_private = priv;
- priv->wq = alloc_ordered_workqueue("msm", 0);
+ priv->wq = alloc_ordered_workqueue("msm_drm", 0);
init_waitqueue_head(&priv->fence_event);
init_waitqueue_head(&priv->pending_crtcs_event);
+ INIT_LIST_HEAD(&priv->client_event_list);
INIT_LIST_HEAD(&priv->inactive_list);
INIT_LIST_HEAD(&priv->fence_cbs);
INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
- INIT_WORK(&priv->vblank_ctrl.work, vblank_ctrl_worker);
+ init_kthread_work(&priv->vblank_ctrl.work, vblank_ctrl_worker);
spin_lock_init(&priv->vblank_ctrl.lock);
drm_mode_config_init(dev);
platform_set_drvdata(pdev, dev);
+ ret = sde_power_resource_init(pdev, &priv->phandle);
+ if (ret) {
+ pr_err("sde power resource init failed\n");
+ goto fail;
+ }
+
+ priv->pclient = sde_power_client_create(&priv->phandle, "sde");
+ if (IS_ERR_OR_NULL(priv->pclient)) {
+ pr_err("sde power client create failed\n");
+ ret = -EINVAL;
+ goto fail;
+ }
+
/* Bind all our sub-components: */
- ret = component_bind_all(dev->dev, dev);
+ ret = msm_component_bind_all(dev->dev, dev);
if (ret)
return ret;
@@ -368,13 +456,22 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
if (ret)
goto fail;
+ ret = sde_evtlog_init(dev->primary->debugfs_root);
+ if (ret) {
+ dev_err(dev->dev, "failed to init evtlog: %d\n", ret);
+ goto fail;
+ }
+
switch (get_mdp_ver(pdev)) {
- case 4:
+ case KMS_MDP4:
kms = mdp4_kms_init(dev);
break;
- case 5:
+ case KMS_MDP5:
kms = mdp5_kms_init(dev);
break;
+ case KMS_SDE:
+ kms = sde_kms_init(dev);
+ break;
default:
kms = ERR_PTR(-ENODEV);
break;
@@ -387,15 +484,16 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
* and (for example) use dmabuf/prime to share buffers with
* imx drm driver on iMX5
*/
+ priv->kms = NULL;
dev_err(dev->dev, "failed to load kms\n");
ret = PTR_ERR(kms);
goto fail;
}
priv->kms = kms;
+ pm_runtime_enable(dev->dev);
- if (kms) {
- pm_runtime_enable(dev->dev);
+ if (kms && kms->funcs && kms->funcs->hw_init) {
ret = kms->funcs->hw_init(kms);
if (ret) {
dev_err(dev->dev, "kms hw init failed: %d\n", ret);
@@ -403,6 +501,29 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
}
}
+ /* initialize commit thread structure */
+ for (i = 0; i < priv->num_crtcs; i++) {
+ priv->disp_thread[i].crtc_id = priv->crtcs[i]->base.id;
+ init_kthread_worker(&priv->disp_thread[i].worker);
+ priv->disp_thread[i].dev = dev;
+ priv->disp_thread[i].thread =
+ kthread_run(kthread_worker_fn,
+ &priv->disp_thread[i].worker,
+ "crtc_commit:%d",
+ priv->disp_thread[i].crtc_id);
+
+ if (IS_ERR(priv->disp_thread[i].thread)) {
+ dev_err(dev->dev, "failed to create kthread\n");
+ priv->disp_thread[i].thread = NULL;
+ /* clean up previously created threads if any */
+ for (i -= 1; i >= 0; i--) {
+ kthread_stop(priv->disp_thread[i].thread);
+ priv->disp_thread[i].thread = NULL;
+ }
+ goto fail;
+ }
+ }
+
dev->mode_config.funcs = &mode_config_funcs;
ret = drm_vblank_init(dev, priv->num_crtcs);
@@ -430,6 +551,15 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
if (ret)
goto fail;
+ /* perform subdriver post initialization */
+ if (kms && kms->funcs && kms->funcs->postinit) {
+ ret = kms->funcs->postinit(kms);
+ if (ret) {
+ dev_err(dev->dev, "kms post init failed: %d\n", ret);
+ goto fail;
+ }
+ }
+
drm_kms_helper_poll_init(dev);
return 0;
@@ -439,6 +569,11 @@ fail:
return ret;
}
+#ifdef CONFIG_QCOM_KGSL
+static void load_gpu(struct drm_device *dev)
+{
+}
+#else
static void load_gpu(struct drm_device *dev)
{
static DEFINE_MUTEX(init_lock);
@@ -451,6 +586,7 @@ static void load_gpu(struct drm_device *dev)
mutex_unlock(&init_lock);
}
+#endif
static int msm_open(struct drm_device *dev, struct drm_file *file)
{
@@ -467,17 +603,34 @@ static int msm_open(struct drm_device *dev, struct drm_file *file)
file->driver_priv = ctx;
+ if (dev && dev->dev_private) {
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms;
+
+ kms = priv->kms;
+ if (kms && kms->funcs && kms->funcs->postopen)
+ kms->funcs->postopen(kms, file);
+ }
return 0;
}
static void msm_preclose(struct drm_device *dev, struct drm_file *file)
{
struct msm_drm_private *priv = dev->dev_private;
- struct msm_file_private *ctx = file->driver_priv;
struct msm_kms *kms = priv->kms;
- if (kms)
+ if (kms && kms->funcs && kms->funcs->preclose)
kms->funcs->preclose(kms, file);
+}
+
+static void msm_postclose(struct drm_device *dev, struct drm_file *file)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_file_private *ctx = file->driver_priv;
+ struct msm_kms *kms = priv->kms;
+
+ if (kms && kms->funcs && kms->funcs->postclose)
+ kms->funcs->postclose(kms, file);
mutex_lock(&dev->struct_mutex);
if (ctx == priv->lastctx)
@@ -487,11 +640,126 @@ static void msm_preclose(struct drm_device *dev, struct drm_file *file)
kfree(ctx);
}
+static int msm_disable_all_modes_commit(
+ struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane *plane;
+ struct drm_crtc *crtc;
+ unsigned plane_mask;
+ int ret;
+
+ plane_mask = 0;
+ drm_for_each_plane(plane, dev) {
+ struct drm_plane_state *plane_state;
+
+ plane_state = drm_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state)) {
+ ret = PTR_ERR(plane_state);
+ goto fail;
+ }
+
+ plane_state->rotation = BIT(DRM_ROTATE_0);
+
+ plane->old_fb = plane->fb;
+ plane_mask |= 1 << drm_plane_index(plane);
+
+ /* disable non-primary: */
+ if (plane->type == DRM_PLANE_TYPE_PRIMARY)
+ continue;
+
+ DRM_DEBUG("disabling plane %d\n", plane->base.id);
+
+ ret = __drm_atomic_helper_disable_plane(plane, plane_state);
+ if (ret != 0)
+ DRM_ERROR("error %d disabling plane %d\n", ret,
+ plane->base.id);
+ }
+
+ drm_for_each_crtc(crtc, dev) {
+ struct drm_mode_set mode_set;
+
+ memset(&mode_set, 0, sizeof(struct drm_mode_set));
+ mode_set.crtc = crtc;
+
+ DRM_DEBUG("disabling crtc %d\n", crtc->base.id);
+
+ ret = __drm_atomic_helper_set_config(&mode_set, state);
+ if (ret != 0)
+ DRM_ERROR("error %d disabling crtc %d\n", ret,
+ crtc->base.id);
+ }
+
+ DRM_DEBUG("committing disables\n");
+ ret = drm_atomic_commit(state);
+
+fail:
+ drm_atomic_clean_old_fb(dev, plane_mask, ret);
+ DRM_DEBUG("disables result %d\n", ret);
+ return ret;
+}
+
+/**
+ * msm_clear_all_modes - disables all planes and crtcs via an atomic commit
+ * based on restore_fbdev_mode_atomic in drm_fb_helper.c
+ * @dev: device pointer
+ * @Return: 0 on success, otherwise -error
+ */
+static int msm_disable_all_modes(struct drm_device *dev)
+{
+ struct drm_atomic_state *state;
+ int ret, i;
+
+ state = drm_atomic_state_alloc(dev);
+ if (!state)
+ return -ENOMEM;
+
+ state->acquire_ctx = dev->mode_config.acquire_ctx;
+
+ for (i = 0; i < TEARDOWN_DEADLOCK_RETRY_MAX; i++) {
+ ret = msm_disable_all_modes_commit(dev, state);
+ if (ret != -EDEADLK)
+ break;
+ drm_atomic_state_clear(state);
+ drm_atomic_legacy_backoff(state);
+ }
+
+ /* on successful atomic commit state ownership transfers to framework */
+ if (ret != 0)
+ drm_atomic_state_free(state);
+
+ return ret;
+}
+
static void msm_lastclose(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
- if (priv->fbdev)
+ struct msm_kms *kms = priv->kms;
+ int i;
+
+ /*
+ * clean up vblank disable immediately as this is the last close.
+ */
+ for (i = 0; i < dev->num_crtcs; i++) {
+ struct drm_vblank_crtc *vblank = &dev->vblank[i];
+ struct timer_list *disable_timer = &vblank->disable_timer;
+
+ if (del_timer_sync(disable_timer))
+ disable_timer->function(disable_timer->data);
+ }
+
+ /* wait for pending vblank requests to be executed by worker thread */
+ flush_workqueue(priv->wq);
+
+ if (priv->fbdev) {
drm_fb_helper_restore_fbdev_mode_unlocked(priv->fbdev);
+ } else {
+ drm_modeset_lock_all(dev);
+ msm_disable_all_modes(dev);
+ drm_modeset_unlock_all(dev);
+ if (kms && kms->funcs && kms->funcs->lastclose)
+ kms->funcs->lastclose(kms);
+ }
}
static irqreturn_t msm_irq(int irq, void *arg)
@@ -927,6 +1195,362 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
return msm_wait_fence(dev, args->fence, &timeout, true);
}
+static int msm_event_supported(struct drm_device *dev,
+ struct drm_msm_event_req *req)
+{
+ int ret = -EINVAL;
+ struct drm_mode_object *arg_obj;
+ struct drm_crtc *crtc;
+
+ arg_obj = drm_mode_object_find(dev, req->object_id, req->object_type);
+ if (!arg_obj)
+ return -ENOENT;
+
+ if (arg_obj->type == DRM_MODE_OBJECT_CRTC) {
+ crtc = obj_to_crtc(arg_obj);
+ req->index = drm_crtc_index(crtc);
+ }
+
+ switch (req->event) {
+ case DRM_EVENT_VBLANK:
+ case DRM_EVENT_HISTOGRAM:
+ case DRM_EVENT_AD:
+ if (arg_obj->type == DRM_MODE_OBJECT_CRTC)
+ ret = 0;
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+static void msm_vblank_read_cb(struct drm_pending_event *e)
+{
+ struct drm_pending_vblank_event *vblank;
+ struct msm_drm_private *priv;
+ struct drm_file *file_priv;
+ struct drm_device *dev;
+ struct msm_drm_event *v;
+ int ret = 0;
+ bool need_vblank = false;
+
+ if (!e) {
+ DRM_ERROR("invalid pending event payload\n");
+ return;
+ }
+
+ vblank = container_of(e, struct drm_pending_vblank_event, base);
+ file_priv = vblank->base.file_priv;
+ dev = (file_priv && file_priv->minor) ? file_priv->minor->dev : NULL;
+ priv = (dev) ? dev->dev_private : NULL;
+ if (!priv) {
+ DRM_ERROR("invalid msm private\n");
+ return;
+ }
+
+ list_for_each_entry(v, &priv->client_event_list, base.link) {
+ if (v->base.file_priv != file_priv ||
+ (v->event.type != DRM_EVENT_VBLANK &&
+ v->event.type != DRM_EVENT_AD))
+ continue;
+ need_vblank = true;
+ /**
+ * User-space client requests for N vsyncs when event
+ * requested is DRM_EVENT_AD. Once the count reaches zero,
+ * notify stop requesting for additional vsync's.
+ */
+ if (v->event.type == DRM_EVENT_AD) {
+ if (vblank->event.user_data)
+ vblank->event.user_data--;
+ need_vblank = (vblank->event.user_data) ? true : false;
+ }
+ break;
+ }
+
+ if (!need_vblank) {
+ kfree(vblank);
+ } else {
+ ret = drm_vblank_get(dev, vblank->pipe);
+ if (!ret) {
+ list_add(&vblank->base.link, &dev->vblank_event_list);
+ } else {
+ DRM_ERROR("vblank enable failed ret %d\n", ret);
+ kfree(vblank);
+ }
+ }
+}
+
+static int msm_enable_vblank_event(struct drm_device *dev,
+ struct drm_msm_event_req *req, struct drm_file *file)
+{
+ struct drm_pending_vblank_event *e;
+ int ret = 0;
+ unsigned long flags;
+ struct drm_vblank_crtc *vblank;
+
+ if (WARN_ON(req->index >= dev->num_crtcs))
+ return -EINVAL;
+
+ vblank = &dev->vblank[req->index];
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+ if (!e)
+ return -ENOMEM;
+
+ e->pipe = req->index;
+ e->base.pid = current->pid;
+ e->event.base.type = DRM_EVENT_VBLANK;
+ e->event.base.length = sizeof(e->event);
+ e->event.user_data = req->client_context;
+ e->base.event = &e->event.base;
+ e->base.file_priv = file;
+ e->base.destroy = msm_vblank_read_cb;
+
+ ret = drm_vblank_get(dev, e->pipe);
+ if (ret) {
+ DRM_ERROR("failed to enable the vblank\n");
+ goto free;
+ }
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ if (!vblank->enabled) {
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+
+ if (file->event_space < sizeof(e->event)) {
+ ret = -EBUSY;
+ goto err_unlock;
+ }
+ file->event_space -= sizeof(e->event);
+ list_add_tail(&e->base.link, &dev->vblank_event_list);
+err_unlock:
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+free:
+ if (ret)
+ kfree(e);
+ return ret;
+}
+
+static int msm_enable_event(struct drm_device *dev,
+ struct drm_msm_event_req *req, struct drm_file *file)
+{
+ int ret = -EINVAL;
+
+ switch (req->event) {
+ case DRM_EVENT_AD:
+ case DRM_EVENT_VBLANK:
+ ret = msm_enable_vblank_event(dev, req, file);
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+static int msm_disable_vblank_event(struct drm_device *dev,
+ struct drm_msm_event_req *req,
+ struct drm_file *file)
+{
+ struct drm_pending_vblank_event *e, *t;
+
+ list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
+ if (e->pipe != req->index || file != e->base.file_priv)
+ continue;
+ list_del(&e->base.link);
+ drm_vblank_put(dev, req->index);
+ kfree(e);
+ }
+ return 0;
+}
+
+static int msm_disable_event(struct drm_device *dev,
+ struct drm_msm_event_req *req,
+ struct drm_file *file)
+{
+ int ret = -EINVAL;
+
+ switch (req->event) {
+ case DRM_EVENT_AD:
+ case DRM_EVENT_VBLANK:
+ ret = msm_disable_vblank_event(dev, req, file);
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+
+static int msm_ioctl_register_event(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct drm_msm_event_req *req_event = data;
+ struct msm_drm_event *client;
+ struct msm_drm_event *v;
+ unsigned long flag = 0;
+ bool dup_request = false;
+ int ret = 0;
+
+ if (msm_event_supported(dev, req_event)) {
+ DRM_ERROR("unsupported event %x object %x object id %d\n",
+ req_event->event, req_event->object_type,
+ req_event->object_id);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&dev->event_lock, flag);
+ list_for_each_entry(v, &priv->client_event_list, base.link) {
+ if (v->base.file_priv != file)
+ continue;
+ if (v->event.type == req_event->event &&
+ v->info.object_id == req_event->object_id) {
+ DRM_ERROR("duplicate request for event %x obj id %d\n",
+ v->event.type, v->info.object_id);
+ dup_request = true;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&dev->event_lock, flag);
+
+ if (dup_request)
+ return -EINVAL;
+
+ client = kzalloc(sizeof(*client), GFP_KERNEL);
+ if (!client)
+ return -ENOMEM;
+
+ client->base.file_priv = file;
+ client->base.pid = current->pid;
+ client->base.event = &client->event;
+ client->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
+ client->event.type = req_event->event;
+ memcpy(&client->info, req_event, sizeof(client->info));
+
+ spin_lock_irqsave(&dev->event_lock, flag);
+ list_add_tail(&client->base.link, &priv->client_event_list);
+ spin_unlock_irqrestore(&dev->event_lock, flag);
+
+ ret = msm_enable_event(dev, req_event, file);
+ if (ret) {
+ DRM_ERROR("failed to enable event %x object %x object id %d\n",
+ req_event->event, req_event->object_type,
+ req_event->object_id);
+ spin_lock_irqsave(&dev->event_lock, flag);
+ list_del(&client->base.link);
+ spin_unlock_irqrestore(&dev->event_lock, flag);
+ kfree(client);
+ }
+ return ret;
+}
+
+static int msm_ioctl_deregister_event(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct drm_msm_event_req *req_event = data;
+ struct msm_drm_event *client = NULL;
+ struct msm_drm_event *v, *vt;
+ unsigned long flag = 0;
+
+ if (msm_event_supported(dev, req_event)) {
+ DRM_ERROR("unsupported event %x object %x object id %d\n",
+ req_event->event, req_event->object_type,
+ req_event->object_id);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&dev->event_lock, flag);
+ msm_disable_event(dev, req_event, file);
+ list_for_each_entry_safe(v, vt, &priv->client_event_list, base.link) {
+ if (v->event.type == req_event->event &&
+ v->info.object_id == req_event->object_id &&
+ v->base.file_priv == file) {
+ client = v;
+ list_del(&client->base.link);
+ client->base.destroy(&client->base);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&dev->event_lock, flag);
+
+ return 0;
+}
+
+void msm_send_crtc_notification(struct drm_crtc *crtc,
+ struct drm_event *event, u8 *payload)
+{
+ struct drm_device *dev = NULL;
+ struct msm_drm_private *priv = NULL;
+ unsigned long flags;
+ struct msm_drm_event *notify, *v;
+ int len = 0;
+
+ if (!crtc || !event || !event->length || !payload) {
+ DRM_ERROR("err param crtc %pK event %pK len %d payload %pK\n",
+ crtc, event, ((event) ? (event->length) : -1),
+ payload);
+ return;
+ }
+ dev = crtc->dev;
+ priv = (dev) ? dev->dev_private : NULL;
+ if (!dev || !priv) {
+ DRM_ERROR("invalid dev %pK priv %pK\n", dev, priv);
+ return;
+ }
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ list_for_each_entry(v, &priv->client_event_list, base.link) {
+ if (v->event.type != event->type ||
+ crtc->base.id != v->info.object_id)
+ continue;
+ len = event->length + sizeof(struct drm_msm_event_resp);
+ if (v->base.file_priv->event_space < len) {
+ DRM_ERROR("Insufficient space to notify\n");
+ continue;
+ }
+ notify = kzalloc(len, GFP_ATOMIC);
+ if (!notify)
+ continue;
+ notify->base.file_priv = v->base.file_priv;
+ notify->base.event = &notify->event;
+ notify->base.pid = v->base.pid;
+ notify->base.destroy =
+ (void (*)(struct drm_pending_event *)) kfree;
+ notify->event.type = v->event.type;
+ notify->event.length = len;
+ list_add(&notify->base.link,
+ &notify->base.file_priv->event_list);
+ notify->base.file_priv->event_space -= len;
+ memcpy(&notify->info, &v->info, sizeof(notify->info));
+ memcpy(notify->data, payload, event->length);
+ wake_up_interruptible(&notify->base.file_priv->event_wait);
+ }
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+int msm_release(struct inode *inode, struct file *filp)
+{
+ struct drm_file *file_priv = filp->private_data;
+ struct drm_minor *minor = file_priv->minor;
+ struct drm_device *dev = minor->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_drm_event *v, *vt;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ list_for_each_entry_safe(v, vt, &priv->client_event_list, base.link) {
+ if (v->base.file_priv != file_priv)
+ continue;
+ list_del(&v->base.link);
+ msm_disable_event(dev, &v->info, file_priv);
+ v->base.destroy(&v->base);
+ }
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ return drm_release(inode, filp);
+}
+
static const struct drm_ioctl_desc msm_ioctls[] = {
DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_AUTH|DRM_RENDER_ALLOW),
@@ -935,6 +1559,11 @@ static const struct drm_ioctl_desc msm_ioctls[] = {
DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(SDE_WB_CONFIG, sde_wb_config, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(MSM_REGISTER_EVENT, msm_ioctl_register_event,
+ DRM_UNLOCKED|DRM_CONTROL_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_DEREGISTER_EVENT, msm_ioctl_deregister_event,
+ DRM_UNLOCKED|DRM_CONTROL_ALLOW),
};
static const struct vm_operations_struct vm_ops = {
@@ -946,7 +1575,7 @@ static const struct vm_operations_struct vm_ops = {
static const struct file_operations fops = {
.owner = THIS_MODULE,
.open = drm_open,
- .release = drm_release,
+ .release = msm_release,
.unlocked_ioctl = drm_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
@@ -968,6 +1597,7 @@ static struct drm_driver msm_driver = {
.unload = msm_unload,
.open = msm_open,
.preclose = msm_preclose,
+ .postclose = msm_postclose,
.lastclose = msm_lastclose,
.set_busid = drm_platform_set_busid,
.irq_handler = msm_irq,
@@ -1000,7 +1630,7 @@ static struct drm_driver msm_driver = {
.ioctls = msm_ioctls,
.num_ioctls = DRM_MSM_NUM_IOCTLS,
.fops = &fops,
- .name = "msm",
+ .name = "msm_drm",
.desc = "MSM Snapdragon DRM",
.date = "20130625",
.major = 1,
@@ -1031,6 +1661,27 @@ static const struct dev_pm_ops msm_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(msm_pm_suspend, msm_pm_resume)
};
+static int msm_drm_bind(struct device *dev)
+{
+ int ret;
+
+ ret = drm_platform_init(&msm_driver, to_platform_device(dev));
+ if (ret)
+ DRM_ERROR("drm_platform_init failed: %d\n", ret);
+
+ return ret;
+}
+
+static void msm_drm_unbind(struct device *dev)
+{
+ drm_put_dev(platform_get_drvdata(to_platform_device(dev)));
+}
+
+static const struct component_master_ops msm_drm_ops = {
+ .bind = msm_drm_bind,
+ .unbind = msm_drm_unbind,
+};
+
/*
* Componentized driver support:
*/
@@ -1062,27 +1713,31 @@ static int add_components(struct device *dev, struct component_match **matchptr,
return 0;
}
-#else
-static int compare_dev(struct device *dev, void *data)
+
+static int msm_add_master_component(struct device *dev,
+ struct component_match *match)
{
- return dev == data;
+ int ret;
+
+ ret = component_master_add_with_match(dev, &msm_drm_ops, match);
+ if (ret)
+ DRM_ERROR("component add match failed: %d\n", ret);
+
+ return ret;
}
-#endif
-static int msm_drm_bind(struct device *dev)
+#else
+static int compare_dev(struct device *dev, void *data)
{
- return drm_platform_init(&msm_driver, to_platform_device(dev));
+ return dev == data;
}
-static void msm_drm_unbind(struct device *dev)
+static int msm_add_master_component(struct device *dev,
+ struct component_match *match)
{
- drm_put_dev(platform_get_drvdata(to_platform_device(dev)));
+ return 0;
}
-
-static const struct component_master_ops msm_drm_ops = {
- .bind = msm_drm_bind,
- .unbind = msm_drm_unbind,
-};
+#endif
/*
* Platform driver:
@@ -1090,7 +1745,9 @@ static const struct component_master_ops msm_drm_ops = {
static int msm_pdev_probe(struct platform_device *pdev)
{
+ int ret;
struct component_match *match = NULL;
+
#ifdef CONFIG_OF
add_components(&pdev->dev, &match, "connectors");
add_components(&pdev->dev, &match, "gpus");
@@ -1120,15 +1777,16 @@ static int msm_pdev_probe(struct platform_device *pdev)
component_match_add(&pdev->dev, &match, compare_dev, dev);
}
#endif
-
pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
- return component_master_add_with_match(&pdev->dev, &msm_drm_ops, match);
+ ret = msm_add_master_component(&pdev->dev, match);
+
+ return ret;
}
static int msm_pdev_remove(struct platform_device *pdev)
{
+ msm_drm_unbind(&pdev->dev);
component_master_del(&pdev->dev, &msm_drm_ops);
-
return 0;
}
@@ -1140,6 +1798,7 @@ static const struct platform_device_id msm_id[] = {
static const struct of_device_id dt_match[] = {
{ .compatible = "qcom,mdp" }, /* mdp4 */
{ .compatible = "qcom,mdss_mdp" }, /* mdp5 */
+ { .compatible = "qcom,sde-kms" }, /* sde */
{}
};
MODULE_DEVICE_TABLE(of, dt_match);
@@ -1148,13 +1807,23 @@ static struct platform_driver msm_platform_driver = {
.probe = msm_pdev_probe,
.remove = msm_pdev_remove,
.driver = {
- .name = "msm",
+ .name = "msm_drm",
.of_match_table = dt_match,
.pm = &msm_pm_ops,
},
.id_table = msm_id,
};
+#ifdef CONFIG_QCOM_KGSL
+void __init adreno_register(void)
+{
+}
+
+void __exit adreno_unregister(void)
+{
+}
+#endif
+
static int __init msm_drm_register(void)
{
DBG("init");
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 3be7a56b14f1..a2678882d57a 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -1,4 +1,5 @@
/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -31,7 +32,9 @@
#include <linux/iommu.h>
#include <linux/types.h>
#include <linux/of_graph.h>
+#include <linux/mdss_io_util.h>
#include <asm/sizes.h>
+#include <linux/kthread.h>
#ifndef CONFIG_OF
#include <mach/board.h>
@@ -48,6 +51,12 @@
#include <drm/msm_drm.h>
#include <drm/drm_gem.h>
+#include "sde_power_handle.h"
+
+#define GET_MAJOR_REV(rev) ((rev) >> 28)
+#define GET_MINOR_REV(rev) (((rev) >> 16) & 0xFFF)
+#define GET_STEP_REV(rev) ((rev) & 0xFFFF)
+
struct msm_kms;
struct msm_gpu;
struct msm_mmu;
@@ -55,7 +64,12 @@ struct msm_rd_state;
struct msm_perf_state;
struct msm_gem_submit;
-#define NUM_DOMAINS 2 /* one for KMS, then one per gpu core (?) */
+#define NUM_DOMAINS 4 /* one for KMS, then one per gpu core (?) */
+#define MAX_CRTCS 8
+#define MAX_PLANES 12
+#define MAX_ENCODERS 8
+#define MAX_BRIDGES 8
+#define MAX_CONNECTORS 8
struct msm_file_private {
/* currently we don't do anything useful with this.. but when
@@ -66,22 +80,181 @@ struct msm_file_private {
};
enum msm_mdp_plane_property {
- PLANE_PROP_ZPOS,
+ /* blob properties, always put these first */
+ PLANE_PROP_SCALER_V1,
+ PLANE_PROP_SCALER_V2,
+ PLANE_PROP_CSC_V1,
+ PLANE_PROP_INFO,
+ PLANE_PROP_SCALER_LUT_ED,
+ PLANE_PROP_SCALER_LUT_CIR,
+ PLANE_PROP_SCALER_LUT_SEP,
+ PLANE_PROP_SKIN_COLOR,
+ PLANE_PROP_SKY_COLOR,
+ PLANE_PROP_FOLIAGE_COLOR,
+
+ /* # of blob properties */
+ PLANE_PROP_BLOBCOUNT,
+
+ /* range properties */
+ PLANE_PROP_ZPOS = PLANE_PROP_BLOBCOUNT,
PLANE_PROP_ALPHA,
- PLANE_PROP_PREMULTIPLIED,
- PLANE_PROP_MAX_NUM
+ PLANE_PROP_COLOR_FILL,
+ PLANE_PROP_H_DECIMATE,
+ PLANE_PROP_V_DECIMATE,
+ PLANE_PROP_INPUT_FENCE,
+ PLANE_PROP_HUE_ADJUST,
+ PLANE_PROP_SATURATION_ADJUST,
+ PLANE_PROP_VALUE_ADJUST,
+ PLANE_PROP_CONTRAST_ADJUST,
+
+ /* enum/bitmask properties */
+ PLANE_PROP_ROTATION,
+ PLANE_PROP_BLEND_OP,
+ PLANE_PROP_SRC_CONFIG,
+
+ /* total # of properties */
+ PLANE_PROP_COUNT
+};
+
+enum msm_mdp_crtc_property {
+ CRTC_PROP_INFO,
+
+ /* # of blob properties */
+ CRTC_PROP_BLOBCOUNT,
+
+ /* range properties */
+ CRTC_PROP_INPUT_FENCE_TIMEOUT = CRTC_PROP_BLOBCOUNT,
+ CRTC_PROP_OUTPUT_FENCE,
+ CRTC_PROP_OUTPUT_FENCE_OFFSET,
+ CRTC_PROP_CORE_CLK,
+ CRTC_PROP_CORE_AB,
+ CRTC_PROP_CORE_IB,
+
+ /* total # of properties */
+ CRTC_PROP_COUNT
+};
+
+enum msm_mdp_conn_property {
+ /* blob properties, always put these first */
+ CONNECTOR_PROP_SDE_INFO,
+
+ /* # of blob properties */
+ CONNECTOR_PROP_BLOBCOUNT,
+
+ /* range properties */
+ CONNECTOR_PROP_OUT_FB = CONNECTOR_PROP_BLOBCOUNT,
+ CONNECTOR_PROP_RETIRE_FENCE,
+ CONNECTOR_PROP_DST_X,
+ CONNECTOR_PROP_DST_Y,
+ CONNECTOR_PROP_DST_W,
+ CONNECTOR_PROP_DST_H,
+
+ /* enum/bitmask properties */
+ CONNECTOR_PROP_TOPOLOGY_NAME,
+ CONNECTOR_PROP_TOPOLOGY_CONTROL,
+
+ /* total # of properties */
+ CONNECTOR_PROP_COUNT
};
struct msm_vblank_ctrl {
- struct work_struct work;
+ struct kthread_work work;
struct list_head event_list;
spinlock_t lock;
};
+#define MAX_H_TILES_PER_DISPLAY 2
+
+/**
+ * enum msm_display_compression - compression method used for pixel stream
+ * @MSM_DISPLAY_COMPRESS_NONE: Pixel data is not compressed
+ * @MSM_DISPLAY_COMPRESS_DSC: DSC compresison is used
+ * @MSM_DISPLAY_COMPRESS_FBC: FBC compression is used
+ */
+enum msm_display_compression {
+ MSM_DISPLAY_COMPRESS_NONE,
+ MSM_DISPLAY_COMPRESS_DSC,
+ MSM_DISPLAY_COMPRESS_FBC,
+};
+
+/**
+ * enum msm_display_caps - features/capabilities supported by displays
+ * @MSM_DISPLAY_CAP_VID_MODE: Video or "active" mode supported
+ * @MSM_DISPLAY_CAP_CMD_MODE: Command mode supported
+ * @MSM_DISPLAY_CAP_HOT_PLUG: Hot plug detection supported
+ * @MSM_DISPLAY_CAP_EDID: EDID supported
+ */
+enum msm_display_caps {
+ MSM_DISPLAY_CAP_VID_MODE = BIT(0),
+ MSM_DISPLAY_CAP_CMD_MODE = BIT(1),
+ MSM_DISPLAY_CAP_HOT_PLUG = BIT(2),
+ MSM_DISPLAY_CAP_EDID = BIT(3),
+};
+
+/**
+ * struct msm_display_info - defines display properties
+ * @intf_type: DRM_MODE_CONNECTOR_ display type
+ * @capabilities: Bitmask of display flags
+ * @num_of_h_tiles: Number of horizontal tiles in case of split interface
+ * @h_tile_instance: Controller instance used per tile. Number of elements is
+ * based on num_of_h_tiles
+ * @is_connected: Set to true if display is connected
+ * @width_mm: Physical width
+ * @height_mm: Physical height
+ * @max_width: Max width of display. In case of hot pluggable display
+ * this is max width supported by controller
+ * @max_height: Max height of display. In case of hot pluggable display
+ * this is max height supported by controller
+ * @compression: Compression supported by the display
+ */
+struct msm_display_info {
+ int intf_type;
+ uint32_t capabilities;
+
+ uint32_t num_of_h_tiles;
+ uint32_t h_tile_instance[MAX_H_TILES_PER_DISPLAY];
+
+ bool is_connected;
+
+ unsigned int width_mm;
+ unsigned int height_mm;
+
+ uint32_t max_width;
+ uint32_t max_height;
+
+ enum msm_display_compression compression;
+};
+
+/**
+ * struct msm_drm_event - defines custom event notification struct
+ * @base: base object required for event notification by DRM framework.
+ * @event: event object required for event notification by DRM framework.
+ * @info: contains information of DRM object for which events has been
+ * requested.
+ * @data: memory location which contains response payload for event.
+ */
+struct msm_drm_event {
+ struct drm_pending_event base;
+ struct drm_event event;
+ struct drm_msm_event_req info;
+ u8 data[];
+};
+
+/* Commit thread specific structure */
+struct msm_drm_commit {
+ struct drm_device *dev;
+ struct task_struct *thread;
+ unsigned int crtc_id;
+ struct kthread_worker worker;
+};
+
struct msm_drm_private {
struct msm_kms *kms;
+ struct sde_power_handle phandle;
+ struct sde_power_client *pclient;
+
/* subordinate devices, if present: */
struct platform_device *gpu_pdev;
@@ -128,22 +301,29 @@ struct msm_drm_private {
struct msm_mmu *mmus[NUM_DOMAINS];
unsigned int num_planes;
- struct drm_plane *planes[8];
+ struct drm_plane *planes[MAX_PLANES];
unsigned int num_crtcs;
- struct drm_crtc *crtcs[8];
+ struct drm_crtc *crtcs[MAX_CRTCS];
+
+ struct msm_drm_commit disp_thread[MAX_CRTCS];
unsigned int num_encoders;
- struct drm_encoder *encoders[8];
+ struct drm_encoder *encoders[MAX_ENCODERS];
unsigned int num_bridges;
- struct drm_bridge *bridges[8];
+ struct drm_bridge *bridges[MAX_BRIDGES];
unsigned int num_connectors;
- struct drm_connector *connectors[8];
+ struct drm_connector *connectors[MAX_CONNECTORS];
/* Properties */
- struct drm_property *plane_property[PLANE_PROP_MAX_NUM];
+ struct drm_property *plane_property[PLANE_PROP_COUNT];
+ struct drm_property *crtc_property[CRTC_PROP_COUNT];
+ struct drm_property *conn_property[CONNECTOR_PROP_COUNT];
+
+ /* Color processing properties for the crtc */
+ struct drm_property **cp_property;
/* VRAM carveout, used when no IOMMU: */
struct {
@@ -156,6 +336,9 @@ struct msm_drm_private {
} vram;
struct msm_vblank_ctrl vblank_ctrl;
+
+ /* list of clients waiting for events */
+ struct list_head client_event_list;
};
struct msm_format {
@@ -176,12 +359,11 @@ void __msm_fence_worker(struct work_struct *work);
(_cb)->func = _func; \
} while (0)
-int msm_atomic_check(struct drm_device *dev,
- struct drm_atomic_state *state);
int msm_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *state, bool async);
int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
+void msm_unregister_mmu(struct drm_device *dev, struct msm_mmu *mmu);
int msm_wait_fence(struct drm_device *dev, uint32_t fence,
ktime_t *timeout, bool interruptible);
@@ -264,6 +446,15 @@ enum msm_dsi_encoder_id {
MSM_DSI_CMD_ENCODER_ID = 1,
MSM_DSI_ENCODER_NUM = 2
};
+
+/* *
+ * msm_send_crtc_notification - notify user-space clients of crtc events.
+ * @crtc: crtc that is generating the event.
+ * @event: event that needs to be notified.
+ * @payload: payload for the event.
+ */
+void msm_send_crtc_notification(struct drm_crtc *crtc,
+ struct drm_event *event, u8 *payload);
#ifdef CONFIG_DRM_MSM_DSI
void __init msm_dsi_register(void);
void __exit msm_dsi_unregister(void);
@@ -301,6 +492,7 @@ static inline void msm_rd_dump_submit(struct msm_gem_submit *submit) {}
void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
const char *dbgname);
+void msm_iounmap(struct platform_device *dev, void __iomem *addr);
void msm_writel(u32 data, void __iomem *addr);
u32 msm_readl(const void __iomem *addr);
@@ -331,5 +523,4 @@ static inline int align_pitch(int width, int bpp)
/* for conditionally setting boolean flag(s): */
#define COND(bool, val) ((bool) ? (val) : 0)
-
#endif /* __MSM_DRV_H__ */
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index 121713281417..dca4de382581 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -133,8 +133,7 @@ struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane)
const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb)
{
- struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
- return msm_fb->format;
+ return fb ? (to_msm_framebuffer(fb))->format : NULL;
}
struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
@@ -175,18 +174,20 @@ struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
struct msm_framebuffer *msm_fb = NULL;
struct drm_framebuffer *fb;
const struct msm_format *format;
- int ret, i, n;
+ int ret, i, num_planes;
unsigned int hsub, vsub;
+ bool is_modified = false;
DBG("create framebuffer: dev=%p, mode_cmd=%p (%dx%d@%4.4s)",
dev, mode_cmd, mode_cmd->width, mode_cmd->height,
(char *)&mode_cmd->pixel_format);
- n = drm_format_num_planes(mode_cmd->pixel_format);
+ num_planes = drm_format_num_planes(mode_cmd->pixel_format);
hsub = drm_format_horz_chroma_subsampling(mode_cmd->pixel_format);
vsub = drm_format_vert_chroma_subsampling(mode_cmd->pixel_format);
- format = kms->funcs->get_format(kms, mode_cmd->pixel_format);
+ format = kms->funcs->get_format(kms, mode_cmd->pixel_format,
+ mode_cmd->modifier, num_planes);
if (!format) {
dev_err(dev->dev, "unsupported pixel format: %4.4s\n",
(char *)&mode_cmd->pixel_format);
@@ -204,27 +205,53 @@ struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
msm_fb->format = format;
- if (n > ARRAY_SIZE(msm_fb->planes)) {
+ if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
+ for (i = 0; i < ARRAY_SIZE(mode_cmd->modifier); i++) {
+ if (mode_cmd->modifier[i]) {
+ is_modified = true;
+ break;
+ }
+ }
+ }
+
+ if (num_planes > ARRAY_SIZE(msm_fb->planes)) {
ret = -EINVAL;
goto fail;
}
- for (i = 0; i < n; i++) {
- unsigned int width = mode_cmd->width / (i ? hsub : 1);
- unsigned int height = mode_cmd->height / (i ? vsub : 1);
- unsigned int min_size;
-
- min_size = (height - 1) * mode_cmd->pitches[i]
- + width * drm_format_plane_cpp(mode_cmd->pixel_format, i)
- + mode_cmd->offsets[i];
-
- if (bos[i]->size < min_size) {
+ if (is_modified) {
+ if (!kms->funcs->check_modified_format) {
+ dev_err(dev->dev, "can't check modified fb format\n");
ret = -EINVAL;
goto fail;
+ } else {
+ ret = kms->funcs->check_modified_format(
+ kms, msm_fb->format, mode_cmd, bos);
+ if (ret)
+ goto fail;
+ }
+ } else {
+ for (i = 0; i < num_planes; i++) {
+ unsigned int width = mode_cmd->width / (i ? hsub : 1);
+ unsigned int height = mode_cmd->height / (i ? vsub : 1);
+ unsigned int min_size;
+ unsigned int cpp;
+
+ cpp = drm_format_plane_cpp(mode_cmd->pixel_format, i);
+
+ min_size = (height - 1) * mode_cmd->pitches[i]
+ + width * cpp
+ + mode_cmd->offsets[i];
+
+ if (bos[i]->size < min_size) {
+ ret = -EINVAL;
+ goto fail;
+ }
}
+ }
+ for (i = 0; i < num_planes; i++)
msm_fb->planes[i] = bos[i];
- }
drm_helper_mode_fill_fb_struct(fb, mode_cmd);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index c76cc853b08a..6fa56abf0c78 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -295,16 +295,23 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
if (iommu_present(&platform_bus_type)) {
struct msm_mmu *mmu = priv->mmus[id];
- uint32_t offset;
if (WARN_ON(!mmu))
return -EINVAL;
- offset = (uint32_t)mmap_offset(obj);
- ret = mmu->funcs->map(mmu, offset, msm_obj->sgt,
- obj->size, IOMMU_READ | IOMMU_WRITE);
- msm_obj->domain[id].iova = offset;
+ if (obj->import_attach && mmu->funcs->map_dma_buf) {
+ ret = mmu->funcs->map_dma_buf(mmu, msm_obj->sgt,
+ obj->import_attach->dmabuf,
+ DMA_BIDIRECTIONAL);
+ if (ret) {
+ DRM_ERROR("Unable to map dma buf\n");
+ return ret;
+ }
+ }
+ msm_obj->domain[id].iova =
+ sg_dma_address(msm_obj->sgt->sgl);
} else {
+ WARN_ONCE(1, "physical address being used\n");
msm_obj->domain[id].iova = physaddr(obj);
}
}
@@ -524,8 +531,11 @@ void msm_gem_free_object(struct drm_gem_object *obj)
for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
struct msm_mmu *mmu = priv->mmus[id];
if (mmu && msm_obj->domain[id].iova) {
- uint32_t offset = msm_obj->domain[id].iova;
- mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size);
+ if (obj->import_attach && mmu->funcs->unmap_dma_buf) {
+ mmu->funcs->unmap_dma_buf(mmu, msm_obj->sgt,
+ obj->import_attach->dmabuf,
+ DMA_BIDIRECTIONAL);
+ }
}
}
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 6fc59bfeedeb..2e4ae6b1c5d0 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -53,8 +53,7 @@ struct msm_gem_object {
void *vaddr;
struct {
- // XXX
- uint32_t iova;
+ dma_addr_t iova;
} domain[NUM_DOMAINS];
/* normally (resv == &_resv) except for imported bo's */
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index 9bcabaada179..2ab50919f514 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -1,4 +1,5 @@
/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -25,6 +26,15 @@
#define MAX_PLANE 4
+/**
+ * Device Private DRM Mode Flags
+ * drm_mode->private_flags
+ */
+/* Connector has interpreted seamless transition request as dynamic fps */
+#define MSM_MODE_FLAG_SEAMLESS_DYNAMIC_FPS (1<<0)
+/* Transition to new mode requires a wait-for-vblank before the modeset */
+#define MSM_MODE_FLAG_VBLANK_PRE_MODESET (1<<1)
+
/* As there are different display controller blocks depending on the
* snapdragon version, the kms support is split out and the appropriate
* implementation is loaded at runtime. The kms module is responsible
@@ -33,6 +43,7 @@
struct msm_kms_funcs {
/* hw initialization: */
int (*hw_init)(struct msm_kms *kms);
+ int (*postinit)(struct msm_kms *kms);
/* irq handling: */
void (*irq_preinstall)(struct msm_kms *kms);
int (*irq_postinstall)(struct msm_kms *kms);
@@ -41,21 +52,38 @@ struct msm_kms_funcs {
int (*enable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
void (*disable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
/* modeset, bracketing atomic_commit(): */
- void (*prepare_commit)(struct msm_kms *kms, struct drm_atomic_state *state);
- void (*complete_commit)(struct msm_kms *kms, struct drm_atomic_state *state);
+ void (*prepare_fence)(struct msm_kms *kms,
+ struct drm_atomic_state *state);
+ void (*prepare_commit)(struct msm_kms *kms,
+ struct drm_atomic_state *state);
+ void (*commit)(struct msm_kms *kms, struct drm_atomic_state *state);
+ void (*complete_commit)(struct msm_kms *kms,
+ struct drm_atomic_state *state);
/* functions to wait for atomic commit completed on each CRTC */
void (*wait_for_crtc_commit_done)(struct msm_kms *kms,
struct drm_crtc *crtc);
+ /* get msm_format w/ optional format modifiers from drm_mode_fb_cmd2 */
+ const struct msm_format *(*get_format)(struct msm_kms *kms,
+ const uint32_t format,
+ const uint64_t *modifiers,
+ const uint32_t modifiers_len);
+ /* do format checking on format modified through fb_cmd2 modifiers */
+ int (*check_modified_format)(const struct msm_kms *kms,
+ const struct msm_format *msm_fmt,
+ const struct drm_mode_fb_cmd2 *cmd,
+ struct drm_gem_object **bos);
/* misc: */
- const struct msm_format *(*get_format)(struct msm_kms *kms, uint32_t format);
long (*round_pixclk)(struct msm_kms *kms, unsigned long rate,
struct drm_encoder *encoder);
int (*set_split_display)(struct msm_kms *kms,
struct drm_encoder *encoder,
struct drm_encoder *slave_encoder,
bool is_cmd_mode);
+ void (*postopen)(struct msm_kms *kms, struct drm_file *file);
/* cleanup: */
void (*preclose)(struct msm_kms *kms, struct drm_file *file);
+ void (*postclose)(struct msm_kms *kms, struct drm_file *file);
+ void (*lastclose)(struct msm_kms *kms);
void (*destroy)(struct msm_kms *kms);
};
@@ -74,7 +102,33 @@ static inline void msm_kms_init(struct msm_kms *kms,
kms->funcs = funcs;
}
+#ifdef CONFIG_DRM_MSM_MDP4
struct msm_kms *mdp4_kms_init(struct drm_device *dev);
+#else
+static inline
+struct msm_kms *mdp4_kms_init(struct drm_device *dev) { return NULL; };
+#endif
struct msm_kms *mdp5_kms_init(struct drm_device *dev);
+struct msm_kms *sde_kms_init(struct drm_device *dev);
+
+/**
+ * Mode Set Utility Functions
+ */
+static inline bool msm_is_mode_seamless(const struct drm_display_mode *mode)
+{
+ return (mode->flags & DRM_MODE_FLAG_SEAMLESS);
+}
+
+static inline bool msm_is_mode_dynamic_fps(const struct drm_display_mode *mode)
+{
+ return ((mode->flags & DRM_MODE_FLAG_SEAMLESS) &&
+ (mode->private_flags & MSM_MODE_FLAG_SEAMLESS_DYNAMIC_FPS));
+}
+
+static inline bool msm_needs_vblank_pre_modeset(
+ const struct drm_display_mode *mode)
+{
+ return (mode->private_flags & MSM_MODE_FLAG_VBLANK_PRE_MODESET);
+}
#endif /* __MSM_KMS_H__ */
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index 7cd88d9dc155..cbf0d4593522 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -20,6 +20,17 @@
#include <linux/iommu.h>
+struct msm_mmu;
+struct msm_gpu;
+
+enum msm_mmu_domain_type {
+ MSM_SMMU_DOMAIN_UNSECURE,
+ MSM_SMMU_DOMAIN_NRT_UNSECURE,
+ MSM_SMMU_DOMAIN_SECURE,
+ MSM_SMMU_DOMAIN_NRT_SECURE,
+ MSM_SMMU_DOMAIN_MAX,
+};
+
struct msm_mmu_funcs {
int (*attach)(struct msm_mmu *mmu, const char **names, int cnt);
void (*detach)(struct msm_mmu *mmu, const char **names, int cnt);
@@ -27,6 +38,14 @@ struct msm_mmu_funcs {
unsigned len, int prot);
int (*unmap)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt,
unsigned len);
+ int (*map_sg)(struct msm_mmu *mmu, struct sg_table *sgt,
+ enum dma_data_direction dir);
+ void (*unmap_sg)(struct msm_mmu *mmu, struct sg_table *sgt,
+ enum dma_data_direction dir);
+ int (*map_dma_buf)(struct msm_mmu *mmu, struct sg_table *sgt,
+ struct dma_buf *dma_buf, int dir);
+ void (*unmap_dma_buf)(struct msm_mmu *mmu, struct sg_table *sgt,
+ struct dma_buf *dma_buf, int dir);
void (*destroy)(struct msm_mmu *mmu);
};
@@ -44,5 +63,7 @@ static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev,
struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain);
struct msm_mmu *msm_gpummu_new(struct device *dev, struct msm_gpu *gpu);
+struct msm_mmu *msm_smmu_new(struct device *dev,
+ enum msm_mmu_domain_type domain);
#endif /* __MSM_MMU_H__ */
diff --git a/drivers/gpu/drm/msm/msm_prop.c b/drivers/gpu/drm/msm/msm_prop.c
new file mode 100644
index 000000000000..5a9e472ea59b
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_prop.c
@@ -0,0 +1,662 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm_prop.h"
+
+void msm_property_init(struct msm_property_info *info,
+ struct drm_mode_object *base,
+ struct drm_device *dev,
+ struct drm_property **property_array,
+ struct msm_property_data *property_data,
+ uint32_t property_count,
+ uint32_t blob_count,
+ uint32_t state_size)
+{
+ int i;
+
+ /* prevent access if any of these are NULL */
+ if (!base || !dev || !property_array || !property_data) {
+ property_count = 0;
+ blob_count = 0;
+
+ DRM_ERROR("invalid arguments, forcing zero properties\n");
+ return;
+ }
+
+ /* can't have more blob properties than total properties */
+ if (blob_count > property_count) {
+ blob_count = property_count;
+
+ DBG("Capping number of blob properties to %d", blob_count);
+ }
+
+ if (!info) {
+ DRM_ERROR("info pointer is NULL\n");
+ } else {
+ info->base = base;
+ info->dev = dev;
+ info->property_array = property_array;
+ info->property_data = property_data;
+ info->property_count = property_count;
+ info->blob_count = blob_count;
+ info->install_request = 0;
+ info->install_count = 0;
+ info->recent_idx = 0;
+ info->is_active = false;
+ info->state_size = state_size;
+ info->state_cache_size = 0;
+ mutex_init(&info->property_lock);
+
+ memset(property_data,
+ 0,
+ sizeof(struct msm_property_data) *
+ property_count);
+ INIT_LIST_HEAD(&info->dirty_list);
+
+ for (i = 0; i < property_count; ++i)
+ INIT_LIST_HEAD(&property_data[i].dirty_node);
+ }
+}
+
+void msm_property_destroy(struct msm_property_info *info)
+{
+ if (!info)
+ return;
+
+ /* reset dirty list */
+ INIT_LIST_HEAD(&info->dirty_list);
+
+ /* free state cache */
+ while (info->state_cache_size > 0)
+ kfree(info->state_cache[--(info->state_cache_size)]);
+
+ mutex_destroy(&info->property_lock);
+}
+
+int msm_property_pop_dirty(struct msm_property_info *info)
+{
+ struct list_head *item;
+ int rc = 0;
+
+ if (!info) {
+ DRM_ERROR("invalid info\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&info->property_lock);
+ if (list_empty(&info->dirty_list)) {
+ rc = -EAGAIN;
+ } else {
+ item = info->dirty_list.next;
+ list_del_init(item);
+ rc = container_of(item, struct msm_property_data, dirty_node)
+ - info->property_data;
+ DRM_DEBUG_KMS("property %d dirty\n", rc);
+ }
+ mutex_unlock(&info->property_lock);
+
+ return rc;
+}
+
+/**
+ * _msm_property_set_dirty_no_lock - flag given property as being dirty
+ * This function doesn't mutex protect the
+ * dirty linked list.
+ * @info: Pointer to property info container struct
+ * @property_idx: Property index
+ */
+static void _msm_property_set_dirty_no_lock(
+ struct msm_property_info *info,
+ uint32_t property_idx)
+{
+ if (!info || property_idx >= info->property_count) {
+ DRM_ERROR("invalid argument(s), info %pK, idx %u\n",
+ info, property_idx);
+ return;
+ }
+
+ /* avoid re-inserting if already dirty */
+ if (!list_empty(&info->property_data[property_idx].dirty_node)) {
+ DRM_DEBUG_KMS("property %u already dirty\n", property_idx);
+ return;
+ }
+
+ list_add_tail(&info->property_data[property_idx].dirty_node,
+ &info->dirty_list);
+}
+
+/**
+ * _msm_property_install_integer - install standard drm range property
+ * @info: Pointer to property info container struct
+ * @name: Property name
+ * @flags: Other property type flags, e.g. DRM_MODE_PROP_IMMUTABLE
+ * @min: Min property value
+ * @max: Max property value
+ * @init: Default Property value
+ * @property_idx: Property index
+ * @force_dirty: Whether or not to filter 'dirty' status on unchanged values
+ */
+static void _msm_property_install_integer(struct msm_property_info *info,
+ const char *name, int flags, uint64_t min, uint64_t max,
+ uint64_t init, uint32_t property_idx, bool force_dirty)
+{
+ struct drm_property **prop;
+
+ if (!info)
+ return;
+
+ ++info->install_request;
+
+ if (!name || (property_idx >= info->property_count)) {
+ DRM_ERROR("invalid argument(s), %s\n", name ? name : "null");
+ } else {
+ prop = &info->property_array[property_idx];
+ /*
+ * Properties need to be attached to each drm object that
+ * uses them, but only need to be created once
+ */
+ if (*prop == 0) {
+ *prop = drm_property_create_range(info->dev,
+ flags, name, min, max);
+ if (*prop == 0)
+ DRM_ERROR("create %s property failed\n", name);
+ }
+
+ /* save init value for later */
+ info->property_data[property_idx].default_value = init;
+ info->property_data[property_idx].force_dirty = force_dirty;
+
+ /* always attach property, if created */
+ if (*prop) {
+ drm_object_attach_property(info->base, *prop, init);
+ ++info->install_count;
+ }
+ }
+}
+
+void msm_property_install_range(struct msm_property_info *info,
+ const char *name, int flags, uint64_t min, uint64_t max,
+ uint64_t init, uint32_t property_idx)
+{
+ _msm_property_install_integer(info, name, flags,
+ min, max, init, property_idx, false);
+}
+
+void msm_property_install_volatile_range(struct msm_property_info *info,
+ const char *name, int flags, uint64_t min, uint64_t max,
+ uint64_t init, uint32_t property_idx)
+{
+ _msm_property_install_integer(info, name, flags,
+ min, max, init, property_idx, true);
+}
+
+void msm_property_install_rotation(struct msm_property_info *info,
+ unsigned int supported_rotations, uint32_t property_idx)
+{
+ struct drm_property **prop;
+
+ if (!info)
+ return;
+
+ ++info->install_request;
+
+ if (property_idx >= info->property_count) {
+ DRM_ERROR("invalid property index %d\n", property_idx);
+ } else {
+ prop = &info->property_array[property_idx];
+ /*
+ * Properties need to be attached to each drm object that
+ * uses them, but only need to be created once
+ */
+ if (*prop == 0) {
+ *prop = drm_mode_create_rotation_property(info->dev,
+ supported_rotations);
+ if (*prop == 0)
+ DRM_ERROR("create rotation property failed\n");
+ }
+
+ /* save init value for later */
+ info->property_data[property_idx].default_value = 0;
+ info->property_data[property_idx].force_dirty = false;
+
+ /* always attach property, if created */
+ if (*prop) {
+ drm_object_attach_property(info->base, *prop, 0);
+ ++info->install_count;
+ }
+ }
+}
+
+void msm_property_install_enum(struct msm_property_info *info,
+ const char *name, int flags, int is_bitmask,
+ const struct drm_prop_enum_list *values, int num_values,
+ uint32_t property_idx)
+{
+ struct drm_property **prop;
+
+ if (!info)
+ return;
+
+ ++info->install_request;
+
+ if (!name || !values || !num_values ||
+ (property_idx >= info->property_count)) {
+ DRM_ERROR("invalid argument(s), %s\n", name ? name : "null");
+ } else {
+ prop = &info->property_array[property_idx];
+ /*
+ * Properties need to be attached to each drm object that
+ * uses them, but only need to be created once
+ */
+ if (*prop == 0) {
+ /* 'bitmask' is a special type of 'enum' */
+ if (is_bitmask)
+ *prop = drm_property_create_bitmask(info->dev,
+ DRM_MODE_PROP_BITMASK | flags,
+ name, values, num_values, -1);
+ else
+ *prop = drm_property_create_enum(info->dev,
+ DRM_MODE_PROP_ENUM | flags,
+ name, values, num_values);
+ if (*prop == 0)
+ DRM_ERROR("create %s property failed\n", name);
+ }
+
+ /* save init value for later */
+ info->property_data[property_idx].default_value = 0;
+ info->property_data[property_idx].force_dirty = false;
+
+ /* always attach property, if created */
+ if (*prop) {
+ drm_object_attach_property(info->base, *prop, 0);
+ ++info->install_count;
+ }
+ }
+}
+
+void msm_property_install_blob(struct msm_property_info *info,
+ const char *name, int flags, uint32_t property_idx)
+{
+ struct drm_property **prop;
+
+ if (!info)
+ return;
+
+ ++info->install_request;
+
+ if (!name || (property_idx >= info->blob_count)) {
+ DRM_ERROR("invalid argument(s), %s\n", name ? name : "null");
+ } else {
+ prop = &info->property_array[property_idx];
+ /*
+ * Properties need to be attached to each drm object that
+ * uses them, but only need to be created once
+ */
+ if (*prop == 0) {
+ /* use 'create' for blob property place holder */
+ *prop = drm_property_create(info->dev,
+ DRM_MODE_PROP_BLOB | flags, name, 0);
+ if (*prop == 0)
+ DRM_ERROR("create %s property failed\n", name);
+ }
+
+ /* save init value for later */
+ info->property_data[property_idx].default_value = 0;
+ info->property_data[property_idx].force_dirty = true;
+
+ /* always attach property, if created */
+ if (*prop) {
+ drm_object_attach_property(info->base, *prop, -1);
+ ++info->install_count;
+ }
+ }
+}
+
+int msm_property_install_get_status(struct msm_property_info *info)
+{
+ int rc = -ENOMEM;
+
+ if (info && (info->install_request == info->install_count))
+ rc = 0;
+
+ return rc;
+}
+
+int msm_property_index(struct msm_property_info *info,
+ struct drm_property *property)
+{
+ uint32_t count;
+ int32_t idx;
+ int rc = -EINVAL;
+
+ if (!info || !property) {
+ DRM_ERROR("invalid argument(s)\n");
+ } else {
+ /*
+ * Linear search, but start from last found index. This will
+ * help if any single property is accessed multiple times in a
+ * row. Ideally, we could keep a list of properties sorted in
+ * the order of most recent access, but that may be overkill
+ * for now.
+ */
+ mutex_lock(&info->property_lock);
+ idx = info->recent_idx;
+ count = info->property_count;
+ while (count) {
+ --count;
+
+ /* stop searching on match */
+ if (info->property_array[idx] == property) {
+ info->recent_idx = idx;
+ rc = idx;
+ break;
+ }
+
+ /* move to next valid index */
+ if (--idx < 0)
+ idx = info->property_count - 1;
+ }
+ mutex_unlock(&info->property_lock);
+ }
+
+ return rc;
+}
+
+int msm_property_atomic_set(struct msm_property_info *info,
+ uint64_t *property_values,
+ struct drm_property_blob **property_blobs,
+ struct drm_property *property, uint64_t val)
+{
+ struct drm_property_blob *blob;
+ int property_idx, rc = -EINVAL;
+
+ property_idx = msm_property_index(info, property);
+ if (!info || (property_idx == -EINVAL) || !property_values) {
+ DRM_DEBUG("Invalid argument(s)\n");
+ } else {
+ /* extra handling for incoming properties */
+ mutex_lock(&info->property_lock);
+ if ((property->flags & DRM_MODE_PROP_BLOB) &&
+ (property_idx < info->blob_count) &&
+ property_blobs) {
+ /* DRM lookup also takes a reference */
+ blob = drm_property_lookup_blob(info->dev,
+ (uint32_t)val);
+ if (!blob) {
+ DRM_ERROR("blob not found\n");
+ val = 0;
+ } else {
+ DBG("Blob %u saved", blob->base.id);
+ val = blob->base.id;
+
+ /* save blob - need to clear previous ref */
+ if (property_blobs[property_idx])
+ drm_property_unreference_blob(
+ property_blobs[property_idx]);
+ property_blobs[property_idx] = blob;
+ }
+ }
+
+ /* update value and flag as dirty */
+ if (property_values[property_idx] != val ||
+ info->property_data[property_idx].force_dirty) {
+ property_values[property_idx] = val;
+ _msm_property_set_dirty_no_lock(info, property_idx);
+
+ DBG("%s - %lld", property->name, val);
+ }
+ mutex_unlock(&info->property_lock);
+ rc = 0;
+ }
+
+ return rc;
+}
+
+int msm_property_atomic_get(struct msm_property_info *info,
+ uint64_t *property_values,
+ struct drm_property_blob **property_blobs,
+ struct drm_property *property, uint64_t *val)
+{
+ int property_idx, rc = -EINVAL;
+
+ property_idx = msm_property_index(info, property);
+ if (!info || (property_idx == -EINVAL) || !property_values || !val) {
+ DRM_DEBUG("Invalid argument(s)\n");
+ } else {
+ mutex_lock(&info->property_lock);
+ *val = property_values[property_idx];
+ mutex_unlock(&info->property_lock);
+ rc = 0;
+ }
+
+ return rc;
+}
+
+void *msm_property_alloc_state(struct msm_property_info *info)
+{
+ void *state = NULL;
+
+ if (!info) {
+ DRM_ERROR("invalid property info\n");
+ return NULL;
+ }
+
+ mutex_lock(&info->property_lock);
+ if (info->state_cache_size)
+ state = info->state_cache[--(info->state_cache_size)];
+ mutex_unlock(&info->property_lock);
+
+ if (!state && info->state_size)
+ state = kmalloc(info->state_size, GFP_KERNEL);
+
+ if (!state)
+ DRM_ERROR("failed to allocate state\n");
+
+ return state;
+}
+
+/**
+ * _msm_property_free_state - helper function for freeing local state objects
+ * @info: Pointer to property info container struct
+ * @st: Pointer to state object
+ */
+static void _msm_property_free_state(struct msm_property_info *info, void *st)
+{
+ if (!info || !st)
+ return;
+
+ mutex_lock(&info->property_lock);
+ if (info->state_cache_size < MSM_PROP_STATE_CACHE_SIZE)
+ info->state_cache[(info->state_cache_size)++] = st;
+ else
+ kfree(st);
+ mutex_unlock(&info->property_lock);
+}
+
+void msm_property_reset_state(struct msm_property_info *info, void *state,
+ uint64_t *property_values,
+ struct drm_property_blob **property_blobs)
+{
+ uint32_t i;
+
+ if (!info) {
+ DRM_ERROR("invalid property info\n");
+ return;
+ }
+
+ if (state)
+ memset(state, 0, info->state_size);
+
+ /*
+ * Assign default property values. This helper is mostly used
+ * to initialize newly created state objects.
+ */
+ if (property_values)
+ for (i = 0; i < info->property_count; ++i)
+ property_values[i] =
+ info->property_data[i].default_value;
+
+ if (property_blobs)
+ for (i = 0; i < info->blob_count; ++i)
+ property_blobs[i] = 0;
+}
+
+void msm_property_duplicate_state(struct msm_property_info *info,
+ void *old_state, void *state,
+ uint64_t *property_values,
+ struct drm_property_blob **property_blobs)
+{
+ uint32_t i;
+
+ if (!info || !old_state || !state) {
+ DRM_ERROR("invalid argument(s)\n");
+ return;
+ }
+
+ memcpy(state, old_state, info->state_size);
+
+ if (property_blobs) {
+ /* add ref count for blobs */
+ for (i = 0; i < info->blob_count; ++i)
+ if (property_blobs[i])
+ drm_property_reference_blob(property_blobs[i]);
+ }
+}
+
+void msm_property_destroy_state(struct msm_property_info *info, void *state,
+ uint64_t *property_values,
+ struct drm_property_blob **property_blobs)
+{
+ uint32_t i;
+
+ if (!info || !state) {
+ DRM_ERROR("invalid argument(s)\n");
+ return;
+ }
+ if (property_blobs) {
+ /* remove ref count for blobs */
+ for (i = 0; i < info->blob_count; ++i)
+ if (property_blobs[i])
+ drm_property_unreference_blob(
+ property_blobs[i]);
+ }
+
+ _msm_property_free_state(info, state);
+}
+
+void *msm_property_get_blob(struct msm_property_info *info,
+ struct drm_property_blob **property_blobs,
+ size_t *byte_len,
+ uint32_t property_idx)
+{
+ struct drm_property_blob *blob;
+ size_t len = 0;
+ void *rc = 0;
+
+ if (!info || !property_blobs || (property_idx >= info->blob_count)) {
+ DRM_ERROR("invalid argument(s)\n");
+ } else {
+ blob = property_blobs[property_idx];
+ if (blob) {
+ len = blob->length;
+ rc = &blob->data;
+ }
+ }
+
+ if (byte_len)
+ *byte_len = len;
+
+ return rc;
+}
+
+int msm_property_set_blob(struct msm_property_info *info,
+ struct drm_property_blob **blob_reference,
+ void *blob_data,
+ size_t byte_len,
+ uint32_t property_idx)
+{
+ struct drm_property_blob *blob = NULL;
+ int rc = -EINVAL;
+
+ if (!info || !blob_reference || (property_idx >= info->blob_count)) {
+ DRM_ERROR("invalid argument(s)\n");
+ } else {
+ /* create blob */
+ if (blob_data && byte_len) {
+ blob = drm_property_create_blob(info->dev,
+ byte_len,
+ blob_data);
+ if (IS_ERR_OR_NULL(blob)) {
+ rc = PTR_ERR(blob);
+ DRM_ERROR("failed to create blob, %d\n", rc);
+ goto exit;
+ }
+ }
+
+ /* update drm object */
+ rc = drm_object_property_set_value(info->base,
+ info->property_array[property_idx],
+ blob ? blob->base.id : 0);
+ if (rc) {
+ DRM_ERROR("failed to set blob to property\n");
+ if (blob)
+ drm_property_unreference_blob(blob);
+ goto exit;
+ }
+
+ /* update local reference */
+ if (*blob_reference)
+ drm_property_unreference_blob(*blob_reference);
+ *blob_reference = blob;
+ }
+
+exit:
+ return rc;
+}
+
+int msm_property_set_property(struct msm_property_info *info,
+ uint64_t *property_values,
+ uint32_t property_idx,
+ uint64_t val)
+{
+ int rc = -EINVAL;
+
+ if (!info || (property_idx >= info->property_count) ||
+ property_idx < info->blob_count || !property_values) {
+ DRM_ERROR("invalid argument(s)\n");
+ } else {
+ struct drm_property *drm_prop;
+
+ mutex_lock(&info->property_lock);
+
+ /* update cached value */
+ if (property_values)
+ property_values[property_idx] = val;
+
+ /* update the new default value for immutables */
+ drm_prop = info->property_array[property_idx];
+ if (drm_prop->flags & DRM_MODE_PROP_IMMUTABLE)
+ info->property_data[property_idx].default_value = val;
+
+ mutex_unlock(&info->property_lock);
+
+ /* update drm object */
+ rc = drm_object_property_set_value(info->base, drm_prop, val);
+ if (rc)
+ DRM_ERROR("failed set property value, idx %d rc %d\n",
+ property_idx, rc);
+
+ }
+
+ return rc;
+}
+
diff --git a/drivers/gpu/drm/msm/msm_prop.h b/drivers/gpu/drm/msm/msm_prop.h
new file mode 100644
index 000000000000..dbe28bdf5638
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_prop.h
@@ -0,0 +1,391 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MSM_PROP_H_
+#define _MSM_PROP_H_
+
+#include <linux/list.h>
+#include "msm_drv.h"
+
+#define MSM_PROP_STATE_CACHE_SIZE 2
+
+/**
+ * struct msm_property_data - opaque structure for tracking per
+ * drm-object per property stuff
+ * @default_value: Default property value for this drm object
+ * @dirty_node: Linked list node to track if property is dirty or not
+ * @force_dirty: Always dirty property on incoming sets, rather than checking
+ * for modified values
+ */
+struct msm_property_data {
+ uint64_t default_value;
+ struct list_head dirty_node;
+ bool force_dirty;
+};
+
+/**
+ * struct msm_property_info: Structure for property/state helper functions
+ * @base: Pointer to base drm object (plane/crtc/etc.)
+ * @dev: Pointer to drm device object
+ * @property_array: Pointer to array for storing created property objects
+ * @property_data: Pointer to array for storing private property data
+ * @property_count: Total number of properties
+ * @blob_count: Total number of blob properties, should be <= count
+ * @install_request: Total number of property 'install' requests
+ * @install_count: Total number of successful 'install' requests
+ * @recent_idx: Index of property most recently accessed by set/get
+ * @dirty_list: List of all properties that have been 'atomic_set' but not
+ * yet cleared with 'msm_property_pop_dirty'
+ * @is_active: Whether or not drm component properties are 'active'
+ * @state_cache: Cache of local states, to prevent alloc/free thrashing
+ * @state_size: Size of local state structures
+ * @state_cache_size: Number of state structures currently stored in state_cache
+ * @property_lock: Mutex to protect local variables
+ */
+struct msm_property_info {
+ struct drm_mode_object *base;
+ struct drm_device *dev;
+
+ struct drm_property **property_array;
+ struct msm_property_data *property_data;
+ uint32_t property_count;
+ uint32_t blob_count;
+ uint32_t install_request;
+ uint32_t install_count;
+
+ int32_t recent_idx;
+
+ struct list_head dirty_list;
+ bool is_active;
+
+ void *state_cache[MSM_PROP_STATE_CACHE_SIZE];
+ uint32_t state_size;
+ int32_t state_cache_size;
+ struct mutex property_lock;
+};
+
+/**
+ * msm_property_get_default - query default value of a property
+ * @info: Pointer to property info container struct
+ * @property_idx: Property index
+ * Returns: Default value for specified property
+ */
+static inline
+uint64_t msm_property_get_default(struct msm_property_info *info,
+ uint32_t property_idx)
+{
+ uint64_t rc = 0;
+
+ if (!info)
+ return 0;
+
+ mutex_lock(&info->property_lock);
+ if (property_idx < info->property_count)
+ rc = info->property_data[property_idx].default_value;
+ mutex_unlock(&info->property_lock);
+
+ return rc;
+}
+
+/**
+ * msm_property_set_is_active - set overall 'active' status for all properties
+ * @info: Pointer to property info container struct
+ * @is_active: New 'is active' status
+ */
+static inline
+void msm_property_set_is_active(struct msm_property_info *info, bool is_active)
+{
+ if (info) {
+ mutex_lock(&info->property_lock);
+ info->is_active = is_active;
+ mutex_unlock(&info->property_lock);
+ }
+}
+
+/**
+ * msm_property_get_is_active - query property 'is active' status
+ * @info: Pointer to property info container struct
+ * Returns: Current 'is active's status
+ */
+static inline
+bool msm_property_get_is_active(struct msm_property_info *info)
+{
+ bool rc = false;
+
+ if (info) {
+ mutex_lock(&info->property_lock);
+ rc = info->is_active;
+ mutex_unlock(&info->property_lock);
+ }
+
+ return rc;
+}
+
+/**
+ * msm_property_pop_dirty - determine next dirty property and clear
+ * its dirty flag
+ * @info: Pointer to property info container struct
+ * Returns: Valid msm property index on success,
+ * -EAGAIN if no dirty properties are available
+ * Property indicies returned from this function are similar
+ * to those returned by the msm_property_index function.
+ */
+int msm_property_pop_dirty(struct msm_property_info *info);
+
+/**
+ * msm_property_init - initialize property info structure
+ * @info: Pointer to property info container struct
+ * @base: Pointer to base drm object (plane/crtc/etc.)
+ * @dev: Pointer to drm device object
+ * @property_array: Pointer to array for storing created property objects
+ * @property_data: Pointer to array for storing private property data
+ * @property_count: Total number of properties
+ * @blob_count: Total number of blob properties, should be <= count
+ * @state_size: Size of local state object
+ */
+void msm_property_init(struct msm_property_info *info,
+ struct drm_mode_object *base,
+ struct drm_device *dev,
+ struct drm_property **property_array,
+ struct msm_property_data *property_data,
+ uint32_t property_count,
+ uint32_t blob_count,
+ uint32_t state_size);
+
+/**
+ * msm_property_destroy - destroy helper info structure
+ *
+ * @info: Pointer to property info container struct
+ */
+void msm_property_destroy(struct msm_property_info *info);
+
+/**
+ * msm_property_install_range - install standard drm range property
+ * @info: Pointer to property info container struct
+ * @name: Property name
+ * @flags: Other property type flags, e.g. DRM_MODE_PROP_IMMUTABLE
+ * @min: Min property value
+ * @max: Max property value
+ * @init: Default Property value
+ * @property_idx: Property index
+ */
+void msm_property_install_range(struct msm_property_info *info,
+ const char *name,
+ int flags,
+ uint64_t min,
+ uint64_t max,
+ uint64_t init,
+ uint32_t property_idx);
+
+/**
+ * msm_property_install_volatile_range - install drm range property
+ * This function is similar to msm_property_install_range, but assumes
+ * that the property is meant for holding user pointers or descriptors
+ * that may reference volatile data without having an updated value.
+ * @info: Pointer to property info container struct
+ * @name: Property name
+ * @flags: Other property type flags, e.g. DRM_MODE_PROP_IMMUTABLE
+ * @min: Min property value
+ * @max: Max property value
+ * @init: Default Property value
+ * @property_idx: Property index
+ */
+void msm_property_install_volatile_range(struct msm_property_info *info,
+ const char *name,
+ int flags,
+ uint64_t min,
+ uint64_t max,
+ uint64_t init,
+ uint32_t property_idx);
+
+/**
+ * msm_property_install_rotation - install standard drm rotation property
+ * @info: Pointer to property info container struct
+ * @supported_rotations: Bitmask of supported rotation values (see
+ * drm_mode_create_rotation_property for more details)
+ * @property_idx: Property index
+ */
+void msm_property_install_rotation(struct msm_property_info *info,
+ unsigned int supported_rotations,
+ uint32_t property_idx);
+
+/**
+ * msm_property_install_enum - install standard drm enum/bitmask property
+ * @info: Pointer to property info container struct
+ * @name: Property name
+ * @flags: Other property type flags, e.g. DRM_MODE_PROP_IMMUTABLE
+ * @is_bitmask: Set to non-zero to create a bitmask property, rather than an
+ * enumeration one
+ * @values: Array of allowable enumeration/bitmask values
+ * @num_values: Size of values array
+ * @property_idx: Property index
+ */
+void msm_property_install_enum(struct msm_property_info *info,
+ const char *name,
+ int flags,
+ int is_bitmask,
+ const struct drm_prop_enum_list *values,
+ int num_values,
+ uint32_t property_idx);
+
+/**
+ * msm_property_install_blob - install standard drm blob property
+ * @info: Pointer to property info container struct
+ * @name: Property name
+ * @flags: Extra flags for property creation
+ * @property_idx: Property index
+ */
+void msm_property_install_blob(struct msm_property_info *info,
+ const char *name,
+ int flags,
+ uint32_t property_idx);
+
+/**
+ * msm_property_install_get_status - query overal status of property additions
+ * @info: Pointer to property info container struct
+ * Returns: Zero if previous property install calls were all successful
+ */
+int msm_property_install_get_status(struct msm_property_info *info);
+
+/**
+ * msm_property_index - determine property index from drm_property ptr
+ * @info: Pointer to property info container struct
+ * @property: Incoming property pointer
+ * Returns: Valid property index, or -EINVAL on error
+ */
+int msm_property_index(struct msm_property_info *info,
+ struct drm_property *property);
+
+/**
+ * msm_property_atomic_set - helper function for atomic property set callback
+ * @info: Pointer to property info container struct
+ * @property_values: Pointer to property values cache array
+ * @property_blobs: Pointer to property blobs cache array
+ * @property: Incoming property pointer
+ * @val: Incoming property value
+ * Returns: Zero on success
+ */
+int msm_property_atomic_set(struct msm_property_info *info,
+ uint64_t *property_values,
+ struct drm_property_blob **property_blobs,
+ struct drm_property *property,
+ uint64_t val);
+
+/**
+ * msm_property_atomic_get - helper function for atomic property get callback
+ * @info: Pointer to property info container struct
+ * @property_values: Pointer to property values cache array
+ * @property_blobs: Pointer to property blobs cache array
+ * @property: Incoming property pointer
+ * @val: Pointer to variable for receiving property value
+ * Returns: Zero on success
+ */
+int msm_property_atomic_get(struct msm_property_info *info,
+ uint64_t *property_values,
+ struct drm_property_blob **property_blobs,
+ struct drm_property *property,
+ uint64_t *val);
+
+/**
+ * msm_property_alloc_state - helper function for allocating local state objects
+ * @info: Pointer to property info container struct
+ */
+void *msm_property_alloc_state(struct msm_property_info *info);
+
+/**
+ * msm_property_reset_state - helper function for state reset callback
+ * @info: Pointer to property info container struct
+ * @state: Pointer to local state structure
+ * @property_values: Pointer to property values cache array
+ * @property_blobs: Pointer to property blobs cache array
+ */
+void msm_property_reset_state(struct msm_property_info *info,
+ void *state,
+ uint64_t *property_values,
+ struct drm_property_blob **property_blobs);
+
+/**
+ * msm_property_duplicate_state - helper function for duplicate state cb
+ * @info: Pointer to property info container struct
+ * @old_state: Pointer to original state structure
+ * @state: Pointer to newly created state structure
+ * @property_values: Pointer to property values cache array
+ * @property_blobs: Pointer to property blobs cache array
+ */
+void msm_property_duplicate_state(struct msm_property_info *info,
+ void *old_state,
+ void *state,
+ uint64_t *property_values,
+ struct drm_property_blob **property_blobs);
+
+/**
+ * msm_property_destroy_state - helper function for destroy state cb
+ * @info: Pointer to property info container struct
+ * @state: Pointer to local state structure
+ * @property_values: Pointer to property values cache array
+ * @property_blobs: Pointer to property blobs cache array
+ */
+void msm_property_destroy_state(struct msm_property_info *info,
+ void *state,
+ uint64_t *property_values,
+ struct drm_property_blob **property_blobs);
+
+/**
+ * msm_property_get_blob - obtain cached data pointer for drm blob property
+ * @info: Pointer to property info container struct
+ * @property_blobs: Pointer to property blobs cache array
+ * @byte_len: Optional pointer to variable for accepting blob size
+ * @property_idx: Property index
+ * Returns: Pointer to blob data
+ */
+void *msm_property_get_blob(struct msm_property_info *info,
+ struct drm_property_blob **property_blobs,
+ size_t *byte_len,
+ uint32_t property_idx);
+
+/**
+ * msm_property_set_blob - update blob property on a drm object
+ * This function updates the blob property value of the given drm object. Its
+ * intended use is to update blob properties that have been created with the
+ * DRM_MODE_PROP_IMMUTABLE flag set.
+ * @info: Pointer to property info container struct
+ * @blob_reference: Reference to a pointer that holds the created data blob
+ * @blob_data: Pointer to blob data
+ * @byte_len: Length of blob data, in bytes
+ * @property_idx: Property index
+ * Returns: Zero on success
+ */
+int msm_property_set_blob(struct msm_property_info *info,
+ struct drm_property_blob **blob_reference,
+ void *blob_data,
+ size_t byte_len,
+ uint32_t property_idx);
+
+/**
+ * msm_property_set_property - update property on a drm object
+ * This function updates the property value of the given drm object. Its
+ * intended use is to update properties that have been created with the
+ * DRM_MODE_PROP_IMMUTABLE flag set.
+ * Note: This function cannot be called on a blob.
+ * @info: Pointer to property info container struct
+ * @property_values: Pointer to property values cache array
+ * @property_idx: Property index
+ * @val: value of the property to set
+ * Returns: Zero on success
+ */
+int msm_property_set_property(struct msm_property_info *info,
+ uint64_t *property_values,
+ uint32_t property_idx,
+ uint64_t val);
+
+#endif /* _MSM_PROP_H_ */
+
diff --git a/drivers/gpu/drm/msm/msm_smmu.c b/drivers/gpu/drm/msm/msm_smmu.c
new file mode 100644
index 000000000000..f29c1df46691
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_smmu.c
@@ -0,0 +1,502 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/pm_runtime.h>
+#include <linux/msm_dma_iommu_mapping.h>
+
+#include <asm/dma-iommu.h>
+#include <soc/qcom/secure_buffer.h>
+
+#include "msm_drv.h"
+#include "msm_mmu.h"
+
+#ifndef SZ_4G
+#define SZ_4G (((size_t) SZ_1G) * 4)
+#endif
+
+struct msm_smmu_client {
+ struct device *dev;
+ struct dma_iommu_mapping *mmu_mapping;
+ bool domain_attached;
+};
+
+struct msm_smmu {
+ struct msm_mmu base;
+ struct device *client_dev;
+ struct msm_smmu_client *client;
+};
+
+struct msm_smmu_domain {
+ const char *label;
+ size_t va_start;
+ size_t va_size;
+ bool secure;
+};
+
+#define to_msm_smmu(x) container_of(x, struct msm_smmu, base)
+#define msm_smmu_to_client(smmu) (smmu->client)
+
+static int _msm_smmu_create_mapping(struct msm_smmu_client *client,
+ const struct msm_smmu_domain *domain);
+
+static int msm_smmu_attach(struct msm_mmu *mmu, const char **names, int cnt)
+{
+ struct msm_smmu *smmu = to_msm_smmu(mmu);
+ struct msm_smmu_client *client = msm_smmu_to_client(smmu);
+ int rc = 0;
+
+ if (!client) {
+ pr_err("undefined smmu client\n");
+ return -EINVAL;
+ }
+
+ /* domain attach only once */
+ if (client->domain_attached)
+ return 0;
+
+ rc = arm_iommu_attach_device(client->dev,
+ client->mmu_mapping);
+ if (rc) {
+ dev_err(client->dev, "iommu attach dev failed (%d)\n",
+ rc);
+ return rc;
+ }
+
+ client->domain_attached = true;
+
+ dev_dbg(client->dev, "iommu domain attached\n");
+
+ return 0;
+}
+
+static void msm_smmu_detach(struct msm_mmu *mmu, const char **names, int cnt)
+{
+ struct msm_smmu *smmu = to_msm_smmu(mmu);
+ struct msm_smmu_client *client = msm_smmu_to_client(smmu);
+
+ if (!client) {
+ pr_err("undefined smmu client\n");
+ return;
+ }
+
+ if (!client->domain_attached)
+ return;
+
+ arm_iommu_detach_device(client->dev);
+ client->domain_attached = false;
+ dev_dbg(client->dev, "iommu domain detached\n");
+}
+
+static int msm_smmu_map(struct msm_mmu *mmu, uint32_t iova,
+ struct sg_table *sgt, unsigned len, int prot)
+{
+ struct msm_smmu *smmu = to_msm_smmu(mmu);
+ struct msm_smmu_client *client = msm_smmu_to_client(smmu);
+ struct iommu_domain *domain;
+ struct scatterlist *sg;
+ unsigned int da = iova;
+ unsigned int i, j;
+ int ret;
+
+ if (!client)
+ return -ENODEV;
+
+ domain = client->mmu_mapping->domain;
+ if (!domain || !sgt)
+ return -EINVAL;
+
+ for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+ u32 pa = sg_phys(sg) - sg->offset;
+ size_t bytes = sg->length + sg->offset;
+
+ VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
+
+ ret = iommu_map(domain, da, pa, bytes, prot);
+ if (ret)
+ goto fail;
+
+ da += bytes;
+ }
+
+ return 0;
+
+fail:
+ da = iova;
+
+ for_each_sg(sgt->sgl, sg, i, j) {
+ size_t bytes = sg->length + sg->offset;
+
+ iommu_unmap(domain, da, bytes);
+ da += bytes;
+ }
+ return ret;
+}
+
+static int msm_smmu_map_sg(struct msm_mmu *mmu, struct sg_table *sgt,
+ enum dma_data_direction dir)
+{
+ struct msm_smmu *smmu = to_msm_smmu(mmu);
+ struct msm_smmu_client *client = msm_smmu_to_client(smmu);
+ int ret;
+
+ ret = dma_map_sg(client->dev, sgt->sgl, sgt->nents, dir);
+ if (ret != sgt->nents)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void msm_smmu_unmap_sg(struct msm_mmu *mmu, struct sg_table *sgt,
+ enum dma_data_direction dir)
+{
+ struct msm_smmu *smmu = to_msm_smmu(mmu);
+ struct msm_smmu_client *client = msm_smmu_to_client(smmu);
+
+ dma_unmap_sg(client->dev, sgt->sgl, sgt->nents, dir);
+}
+
+static int msm_smmu_unmap(struct msm_mmu *mmu, uint32_t iova,
+ struct sg_table *sgt, unsigned len)
+{
+ struct msm_smmu *smmu = to_msm_smmu(mmu);
+ struct msm_smmu_client *client = msm_smmu_to_client(smmu);
+ struct iommu_domain *domain;
+ struct scatterlist *sg;
+ unsigned int da = iova;
+ int i;
+
+ if (!client)
+ return -ENODEV;
+
+ domain = client->mmu_mapping->domain;
+ if (!domain || !sgt)
+ return -EINVAL;
+
+ for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+ size_t bytes = sg->length + sg->offset;
+ size_t unmapped;
+
+ unmapped = iommu_unmap(domain, da, bytes);
+ if (unmapped < bytes)
+ return unmapped;
+
+ VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
+
+ WARN_ON(!PAGE_ALIGNED(bytes));
+
+ da += bytes;
+ }
+
+ return 0;
+}
+
+static void msm_smmu_destroy(struct msm_mmu *mmu)
+{
+ struct msm_smmu *smmu = to_msm_smmu(mmu);
+ struct platform_device *pdev = to_platform_device(smmu->client_dev);
+
+ if (smmu->client_dev)
+ platform_device_unregister(pdev);
+ kfree(smmu);
+}
+
+static int msm_smmu_map_dma_buf(struct msm_mmu *mmu, struct sg_table *sgt,
+ struct dma_buf *dma_buf, int dir)
+{
+ struct msm_smmu *smmu = to_msm_smmu(mmu);
+ struct msm_smmu_client *client = msm_smmu_to_client(smmu);
+ int ret;
+
+ ret = msm_dma_map_sg_lazy(client->dev, sgt->sgl, sgt->nents, dir,
+ dma_buf);
+ if (ret != sgt->nents) {
+ DRM_ERROR("dma map sg failed\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+
+static void msm_smmu_unmap_dma_buf(struct msm_mmu *mmu, struct sg_table *sgt,
+ struct dma_buf *dma_buf, int dir)
+{
+ struct msm_smmu *smmu = to_msm_smmu(mmu);
+ struct msm_smmu_client *client = msm_smmu_to_client(smmu);
+
+ msm_dma_unmap_sg(client->dev, sgt->sgl, sgt->nents, dir, dma_buf);
+}
+
+static const struct msm_mmu_funcs funcs = {
+ .attach = msm_smmu_attach,
+ .detach = msm_smmu_detach,
+ .map = msm_smmu_map,
+ .map_sg = msm_smmu_map_sg,
+ .unmap_sg = msm_smmu_unmap_sg,
+ .unmap = msm_smmu_unmap,
+ .map_dma_buf = msm_smmu_map_dma_buf,
+ .unmap_dma_buf = msm_smmu_unmap_dma_buf,
+ .destroy = msm_smmu_destroy,
+};
+
+static struct msm_smmu_domain msm_smmu_domains[MSM_SMMU_DOMAIN_MAX] = {
+ [MSM_SMMU_DOMAIN_UNSECURE] = {
+ .label = "mdp_ns",
+ .va_start = SZ_128K,
+ .va_size = SZ_4G - SZ_128K,
+ .secure = false,
+ },
+ [MSM_SMMU_DOMAIN_SECURE] = {
+ .label = "mdp_s",
+ .va_start = 0,
+ .va_size = SZ_4G,
+ .secure = true,
+ },
+ [MSM_SMMU_DOMAIN_NRT_UNSECURE] = {
+ .label = "rot_ns",
+ .va_start = SZ_128K,
+ .va_size = SZ_4G - SZ_128K,
+ .secure = false,
+ },
+ [MSM_SMMU_DOMAIN_NRT_SECURE] = {
+ .label = "rot_s",
+ .va_start = 0,
+ .va_size = SZ_4G,
+ .secure = true,
+ },
+};
+
+static const struct of_device_id msm_smmu_dt_match[] = {
+ { .compatible = "qcom,smmu_mdp_unsec",
+ .data = &msm_smmu_domains[MSM_SMMU_DOMAIN_UNSECURE] },
+ { .compatible = "qcom,smmu_mdp_sec",
+ .data = &msm_smmu_domains[MSM_SMMU_DOMAIN_SECURE] },
+ { .compatible = "qcom,smmu_rot_unsec",
+ .data = &msm_smmu_domains[MSM_SMMU_DOMAIN_NRT_UNSECURE] },
+ { .compatible = "qcom,smmu_rot_sec",
+ .data = &msm_smmu_domains[MSM_SMMU_DOMAIN_NRT_SECURE] },
+ {}
+};
+MODULE_DEVICE_TABLE(of, msm_smmu_dt_match);
+
+static struct device *msm_smmu_device_create(struct device *dev,
+ enum msm_mmu_domain_type domain,
+ struct msm_smmu *smmu)
+{
+ struct device_node *child;
+ struct platform_device *pdev;
+ int i;
+ const char *compat = NULL;
+
+ for (i = 0; i < ARRAY_SIZE(msm_smmu_dt_match); i++) {
+ if (msm_smmu_dt_match[i].data == &msm_smmu_domains[domain]) {
+ compat = msm_smmu_dt_match[i].compatible;
+ break;
+ }
+ }
+
+ if (!compat) {
+ DRM_ERROR("unable to find matching domain for %d\n", domain);
+ return ERR_PTR(-ENOENT);
+ }
+ DRM_INFO("found domain %d compat: %s\n", domain, compat);
+
+ if (domain == MSM_SMMU_DOMAIN_UNSECURE) {
+ int rc;
+
+ smmu->client = devm_kzalloc(dev,
+ sizeof(struct msm_smmu_client), GFP_KERNEL);
+ if (!smmu->client)
+ return ERR_PTR(-ENOMEM);
+
+ smmu->client->dev = dev;
+
+ rc = _msm_smmu_create_mapping(msm_smmu_to_client(smmu),
+ msm_smmu_dt_match[i].data);
+ if (rc) {
+ devm_kfree(dev, smmu->client);
+ smmu->client = NULL;
+ return ERR_PTR(rc);
+ }
+
+ return NULL;
+ }
+
+ child = of_find_compatible_node(dev->of_node, NULL, compat);
+ if (!child) {
+ DRM_ERROR("unable to find compatible node for %s\n", compat);
+ return ERR_PTR(-ENODEV);
+ }
+
+ pdev = of_platform_device_create(child, NULL, dev);
+ if (!pdev) {
+ DRM_ERROR("unable to create smmu platform dev for domain %d\n",
+ domain);
+ return ERR_PTR(-ENODEV);
+ }
+
+ smmu->client = platform_get_drvdata(pdev);
+
+ return &pdev->dev;
+}
+
+struct msm_mmu *msm_smmu_new(struct device *dev,
+ enum msm_mmu_domain_type domain)
+{
+ struct msm_smmu *smmu;
+ struct device *client_dev;
+
+ smmu = kzalloc(sizeof(*smmu), GFP_KERNEL);
+ if (!smmu)
+ return ERR_PTR(-ENOMEM);
+
+ client_dev = msm_smmu_device_create(dev, domain, smmu);
+ if (IS_ERR(client_dev)) {
+ kfree(smmu);
+ return (void *)client_dev ? : ERR_PTR(-ENODEV);
+ }
+
+ smmu->client_dev = client_dev;
+ msm_mmu_init(&smmu->base, dev, &funcs);
+
+ return &smmu->base;
+}
+
+static int _msm_smmu_create_mapping(struct msm_smmu_client *client,
+ const struct msm_smmu_domain *domain)
+{
+ int rc;
+
+ client->mmu_mapping = arm_iommu_create_mapping(&platform_bus_type,
+ domain->va_start, domain->va_size);
+ if (IS_ERR(client->mmu_mapping)) {
+ dev_err(client->dev,
+ "iommu create mapping failed for domain=%s\n",
+ domain->label);
+ return PTR_ERR(client->mmu_mapping);
+ }
+
+ if (domain->secure) {
+ int secure_vmid = VMID_CP_PIXEL;
+
+ rc = iommu_domain_set_attr(client->mmu_mapping->domain,
+ DOMAIN_ATTR_SECURE_VMID, &secure_vmid);
+ if (rc) {
+ dev_err(client->dev, "couldn't set secure pix vmid\n");
+ goto error;
+ }
+ }
+
+ DRM_INFO("Created domain %s [%zx,%zx] secure=%d\n",
+ domain->label, domain->va_start, domain->va_size,
+ domain->secure);
+
+ return 0;
+
+error:
+ arm_iommu_release_mapping(client->mmu_mapping);
+ return rc;
+}
+
+/**
+ * msm_smmu_probe()
+ * @pdev: platform device
+ *
+ * Each smmu context acts as a separate device and the context banks are
+ * configured with a VA range.
+ * Registers the clks as each context bank has its own clks, for which voting
+ * has to be done everytime before using that context bank.
+ */
+static int msm_smmu_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+ struct msm_smmu_client *client;
+ const struct msm_smmu_domain *domain;
+ int rc;
+
+ match = of_match_device(msm_smmu_dt_match, &pdev->dev);
+ if (!match || !match->data) {
+ dev_err(&pdev->dev, "probe failed as match data is invalid\n");
+ return -EINVAL;
+ }
+
+ domain = match->data;
+ if (!domain) {
+ dev_err(&pdev->dev, "no matching device found\n");
+ return -EINVAL;
+ }
+
+ DRM_INFO("probing device %s\n", match->compatible);
+
+ client = devm_kzalloc(&pdev->dev, sizeof(*client), GFP_KERNEL);
+ if (!client)
+ return -ENOMEM;
+
+ client->dev = &pdev->dev;
+
+ rc = _msm_smmu_create_mapping(client, domain);
+ platform_set_drvdata(pdev, client);
+
+ return rc;
+}
+
+static int msm_smmu_remove(struct platform_device *pdev)
+{
+ struct msm_smmu_client *client;
+
+ client = platform_get_drvdata(pdev);
+ if (client->domain_attached) {
+ arm_iommu_detach_device(client->dev);
+ client->domain_attached = false;
+ }
+ arm_iommu_release_mapping(client->mmu_mapping);
+
+ return 0;
+}
+
+static struct platform_driver msm_smmu_driver = {
+ .probe = msm_smmu_probe,
+ .remove = msm_smmu_remove,
+ .driver = {
+ .name = "msmdrm_smmu",
+ .of_match_table = msm_smmu_dt_match,
+ },
+};
+
+static int __init msm_smmu_driver_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&msm_smmu_driver);
+ if (ret)
+ pr_err("mdss_smmu_register_driver() failed!\n");
+
+ return ret;
+}
+module_init(msm_smmu_driver_init);
+
+static void __exit msm_smmu_driver_cleanup(void)
+{
+ platform_driver_unregister(&msm_smmu_driver);
+}
+module_exit(msm_smmu_driver_cleanup);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM SMMU driver");
diff --git a/drivers/gpu/drm/msm/sde/sde_backlight.c b/drivers/gpu/drm/msm/sde/sde_backlight.c
new file mode 100644
index 000000000000..9034eeb944fe
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_backlight.c
@@ -0,0 +1,103 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "sde_connector.h"
+#include <linux/backlight.h>
+#include "dsi_drm.h"
+
+#define SDE_BRIGHT_TO_BL(out, v, bl_max, max_bright) do {\
+ out = (2 * (v) * (bl_max) + max_bright);\
+ do_div(out, 2 * max_bright);\
+} while (0)
+
+static int sde_backlight_device_update_status(struct backlight_device *bd)
+{
+ int brightness;
+ struct drm_connector *connector;
+ struct dsi_display *display;
+ struct sde_connector *c_conn;
+ int bl_lvl;
+
+ brightness = bd->props.brightness;
+
+ if ((bd->props.power != FB_BLANK_UNBLANK) ||
+ (bd->props.state & BL_CORE_FBBLANK) ||
+ (bd->props.state & BL_CORE_SUSPENDED))
+ brightness = 0;
+
+ connector = bl_get_data(bd);
+ c_conn = to_sde_connector(connector);
+ display = (struct dsi_display *) c_conn->display;
+ if (brightness > display->panel->bl_config.bl_max_level)
+ brightness = display->panel->bl_config.bl_max_level;
+
+ /* This maps UI brightness into driver backlight level with
+ * rounding
+ */
+ SDE_BRIGHT_TO_BL(bl_lvl, brightness,
+ display->panel->bl_config.bl_max_level,
+ display->panel->bl_config.brightness_max_level);
+
+ if (!bl_lvl && brightness)
+ bl_lvl = 1;
+
+ if (c_conn->ops.set_backlight)
+ c_conn->ops.set_backlight(c_conn->display, bl_lvl);
+
+ return 0;
+}
+
+static int sde_backlight_device_get_brightness(struct backlight_device *bd)
+{
+ return 0;
+}
+
+static const struct backlight_ops sde_backlight_device_ops = {
+ .update_status = sde_backlight_device_update_status,
+ .get_brightness = sde_backlight_device_get_brightness,
+};
+
+int sde_backlight_setup(struct drm_connector *connector)
+{
+ struct sde_connector *c_conn;
+ struct backlight_device *bd;
+ struct backlight_properties props;
+ struct dsi_display *display;
+ struct dsi_backlight_config *bl_config;
+
+ if (!connector)
+ return -EINVAL;
+
+ c_conn = to_sde_connector(connector);
+ memset(&props, 0, sizeof(props));
+ props.type = BACKLIGHT_RAW;
+ props.power = FB_BLANK_UNBLANK;
+
+ switch (c_conn->connector_type) {
+ case DRM_MODE_CONNECTOR_DSI:
+ display = (struct dsi_display *) c_conn->display;
+ bl_config = &display->panel->bl_config;
+ props.max_brightness = bl_config->brightness_max_level;
+ props.brightness = bl_config->brightness_max_level;
+ bd = backlight_device_register("sde-backlight",
+ connector->kdev,
+ connector,
+ &sde_backlight_device_ops, &props);
+ if (IS_ERR(bd)) {
+ pr_err("Failed to register backlight: %ld\n",
+ PTR_ERR(bd));
+ return -ENODEV;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_backlight.h b/drivers/gpu/drm/msm/sde/sde_backlight.h
new file mode 100644
index 000000000000..1ea130592302
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_backlight.h
@@ -0,0 +1,18 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_BACKLIGHT_H_
+#define _SDE_BACKLIGHT_H_
+
+int sde_backlight_setup(struct drm_connector *connector);
+
+#endif /* _SDE_BACKLIGHT_H_ */
diff --git a/drivers/gpu/drm/msm/sde/sde_color_processing.c b/drivers/gpu/drm/msm/sde/sde_color_processing.c
new file mode 100644
index 000000000000..ef7492817983
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_color_processing.c
@@ -0,0 +1,990 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <drm/msm_drm_pp.h>
+#include "sde_color_processing.h"
+#include "sde_kms.h"
+#include "sde_crtc.h"
+#include "sde_hw_dspp.h"
+#include "sde_hw_lm.h"
+
+struct sde_cp_node {
+ u32 property_id;
+ u32 prop_flags;
+ u32 feature;
+ void *blob_ptr;
+ uint64_t prop_val;
+ const struct sde_pp_blk *pp_blk;
+ struct list_head feature_list;
+ struct list_head active_list;
+ struct list_head dirty_list;
+ bool is_dspp_feature;
+};
+
+struct sde_cp_prop_attach {
+ struct drm_crtc *crtc;
+ struct drm_property *prop;
+ struct sde_cp_node *prop_node;
+ u32 feature;
+ uint64_t val;
+};
+
+static void dspp_pcc_install_property(struct drm_crtc *crtc);
+
+static void dspp_hsic_install_property(struct drm_crtc *crtc);
+
+static void dspp_ad_install_property(struct drm_crtc *crtc);
+
+static void dspp_vlut_install_property(struct drm_crtc *crtc);
+
+typedef void (*dspp_prop_install_func_t)(struct drm_crtc *crtc);
+
+static dspp_prop_install_func_t dspp_prop_install_func[SDE_DSPP_MAX];
+
+#define setup_dspp_prop_install_funcs(func) \
+do { \
+ func[SDE_DSPP_PCC] = dspp_pcc_install_property; \
+ func[SDE_DSPP_HSIC] = dspp_hsic_install_property; \
+ func[SDE_DSPP_AD] = dspp_ad_install_property; \
+ func[SDE_DSPP_VLUT] = dspp_vlut_install_property; \
+} while (0)
+
+typedef void (*lm_prop_install_func_t)(struct drm_crtc *crtc);
+
+static lm_prop_install_func_t lm_prop_install_func[SDE_MIXER_MAX];
+
+static void lm_gc_install_property(struct drm_crtc *crtc);
+
+#define setup_lm_prop_install_funcs(func) \
+ (func[SDE_MIXER_GC] = lm_gc_install_property)
+
+enum {
+ /* Append new DSPP features before SDE_CP_CRTC_DSPP_MAX */
+ /* DSPP Features start */
+ SDE_CP_CRTC_DSPP_IGC,
+ SDE_CP_CRTC_DSPP_PCC,
+ SDE_CP_CRTC_DSPP_GC,
+ SDE_CP_CRTC_DSPP_HUE,
+ SDE_CP_CRTC_DSPP_SAT,
+ SDE_CP_CRTC_DSPP_VAL,
+ SDE_CP_CRTC_DSPP_CONT,
+ SDE_CP_CRTC_DSPP_MEMCOLOR,
+ SDE_CP_CRTC_DSPP_SIXZONE,
+ SDE_CP_CRTC_DSPP_GAMUT,
+ SDE_CP_CRTC_DSPP_DITHER,
+ SDE_CP_CRTC_DSPP_HIST,
+ SDE_CP_CRTC_DSPP_AD,
+ SDE_CP_CRTC_DSPP_VLUT,
+ SDE_CP_CRTC_DSPP_MAX,
+ /* DSPP features end */
+
+ /* Append new LM features before SDE_CP_CRTC_MAX_FEATURES */
+ /* LM feature start*/
+ SDE_CP_CRTC_LM_GC,
+ /* LM feature end*/
+
+ SDE_CP_CRTC_MAX_FEATURES,
+};
+
+#define INIT_PROP_ATTACH(p, crtc, prop, node, feature, val) \
+ do { \
+ (p)->crtc = crtc; \
+ (p)->prop = prop; \
+ (p)->prop_node = node; \
+ (p)->feature = feature; \
+ (p)->val = val; \
+ } while (0)
+
+static void sde_cp_get_hw_payload(struct sde_cp_node *prop_node,
+ struct sde_hw_cp_cfg *hw_cfg,
+ bool *feature_enabled)
+{
+
+ struct drm_property_blob *blob = NULL;
+
+ memset(hw_cfg, 0, sizeof(*hw_cfg));
+ *feature_enabled = false;
+
+ blob = prop_node->blob_ptr;
+ if (prop_node->prop_flags & DRM_MODE_PROP_BLOB) {
+ if (blob) {
+ hw_cfg->len = blob->length;
+ hw_cfg->payload = blob->data;
+ *feature_enabled = true;
+ }
+ } else if (prop_node->prop_flags & DRM_MODE_PROP_RANGE) {
+ /* Check if local blob is Set */
+ if (!blob) {
+ hw_cfg->len = sizeof(prop_node->prop_val);
+ if (prop_node->prop_val)
+ hw_cfg->payload = &prop_node->prop_val;
+ } else {
+ hw_cfg->len = (prop_node->prop_val) ? blob->length :
+ 0;
+ hw_cfg->payload = (prop_node->prop_val) ? blob->data
+ : NULL;
+ }
+ if (prop_node->prop_val)
+ *feature_enabled = true;
+ } else {
+ DRM_ERROR("property type is not supported\n");
+ }
+}
+
+static int sde_cp_disable_crtc_blob_property(struct sde_cp_node *prop_node)
+{
+ struct drm_property_blob *blob = prop_node->blob_ptr;
+
+ if (!blob)
+ return -EINVAL;
+ drm_property_unreference_blob(blob);
+ prop_node->blob_ptr = NULL;
+ return 0;
+}
+
+static int sde_cp_create_local_blob(struct drm_crtc *crtc, u32 feature, int len)
+{
+ int ret = -EINVAL;
+ bool found = false;
+ struct sde_cp_node *prop_node = NULL;
+ struct drm_property_blob *blob_ptr;
+ struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
+
+ list_for_each_entry(prop_node, &sde_crtc->feature_list, feature_list) {
+ if (prop_node->feature == feature) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found || prop_node->prop_flags & DRM_MODE_PROP_BLOB) {
+ DRM_ERROR("local blob create failed prop found %d flags %d\n",
+ found, prop_node->prop_flags);
+ return ret;
+ }
+
+ blob_ptr = drm_property_create_blob(crtc->dev, len, NULL);
+ ret = (IS_ERR_OR_NULL(blob_ptr)) ? PTR_ERR(blob_ptr) : 0;
+ if (!ret)
+ prop_node->blob_ptr = blob_ptr;
+
+ return ret;
+}
+
+static void sde_cp_destroy_local_blob(struct sde_cp_node *prop_node)
+{
+ if (!(prop_node->prop_flags & DRM_MODE_PROP_BLOB) &&
+ prop_node->blob_ptr)
+ drm_property_unreference_blob(prop_node->blob_ptr);
+}
+
+static int sde_cp_handle_range_property(struct sde_cp_node *prop_node,
+ uint64_t val)
+{
+ int ret = 0;
+ struct drm_property_blob *blob_ptr = prop_node->blob_ptr;
+
+ if (!blob_ptr) {
+ prop_node->prop_val = val;
+ return 0;
+ }
+
+ if (!val) {
+ prop_node->prop_val = 0;
+ return 0;
+ }
+
+ ret = copy_from_user(blob_ptr->data, (void *)val, blob_ptr->length);
+ if (ret) {
+ DRM_ERROR("failed to get the property info ret %d", ret);
+ ret = -EFAULT;
+ } else {
+ prop_node->prop_val = val;
+ }
+
+ return ret;
+}
+
+static int sde_cp_disable_crtc_property(struct drm_crtc *crtc,
+ struct drm_property *property,
+ struct sde_cp_node *prop_node)
+{
+ int ret = -EINVAL;
+
+ if (property->flags & DRM_MODE_PROP_BLOB)
+ ret = sde_cp_disable_crtc_blob_property(prop_node);
+ else if (property->flags & DRM_MODE_PROP_RANGE)
+ ret = sde_cp_handle_range_property(prop_node, 0);
+ return ret;
+}
+
+static int sde_cp_enable_crtc_blob_property(struct drm_crtc *crtc,
+ struct sde_cp_node *prop_node,
+ uint64_t val)
+{
+ struct drm_property_blob *blob = NULL;
+
+ /**
+ * For non-blob based properties add support to create a blob
+ * using the val and store the blob_ptr in prop_node.
+ */
+ blob = drm_property_lookup_blob(crtc->dev, val);
+ if (!blob) {
+ DRM_ERROR("invalid blob id %lld\n", val);
+ return -EINVAL;
+ }
+ /* Release refernce to existing payload of the property */
+ if (prop_node->blob_ptr)
+ drm_property_unreference_blob(prop_node->blob_ptr);
+
+ prop_node->blob_ptr = blob;
+ return 0;
+}
+
+static int sde_cp_enable_crtc_property(struct drm_crtc *crtc,
+ struct drm_property *property,
+ struct sde_cp_node *prop_node,
+ uint64_t val)
+{
+ int ret = -EINVAL;
+
+ if (property->flags & DRM_MODE_PROP_BLOB)
+ ret = sde_cp_enable_crtc_blob_property(crtc, prop_node, val);
+ else if (property->flags & DRM_MODE_PROP_RANGE)
+ ret = sde_cp_handle_range_property(prop_node, val);
+ return ret;
+}
+
+static struct sde_kms *get_kms(struct drm_crtc *crtc)
+{
+ struct msm_drm_private *priv = crtc->dev->dev_private;
+
+ return to_sde_kms(priv->kms);
+}
+
+static void sde_cp_crtc_prop_attach(struct sde_cp_prop_attach *prop_attach)
+{
+
+ struct sde_crtc *sde_crtc = to_sde_crtc(prop_attach->crtc);
+
+ drm_object_attach_property(&prop_attach->crtc->base,
+ prop_attach->prop, prop_attach->val);
+
+ INIT_LIST_HEAD(&prop_attach->prop_node->active_list);
+ INIT_LIST_HEAD(&prop_attach->prop_node->dirty_list);
+
+ prop_attach->prop_node->property_id = prop_attach->prop->base.id;
+ prop_attach->prop_node->prop_flags = prop_attach->prop->flags;
+ prop_attach->prop_node->feature = prop_attach->feature;
+
+ if (prop_attach->feature < SDE_CP_CRTC_DSPP_MAX)
+ prop_attach->prop_node->is_dspp_feature = true;
+ else
+ prop_attach->prop_node->is_dspp_feature = false;
+
+ list_add(&prop_attach->prop_node->feature_list,
+ &sde_crtc->feature_list);
+}
+
+void sde_cp_crtc_init(struct drm_crtc *crtc)
+{
+ struct sde_crtc *sde_crtc = NULL;
+
+ if (!crtc) {
+ DRM_ERROR("invalid crtc %pK\n", crtc);
+ return;
+ }
+
+ sde_crtc = to_sde_crtc(crtc);
+ if (!sde_crtc) {
+ DRM_ERROR("invalid sde_crtc %pK\n", sde_crtc);
+ return;
+ }
+
+ INIT_LIST_HEAD(&sde_crtc->active_list);
+ INIT_LIST_HEAD(&sde_crtc->dirty_list);
+ INIT_LIST_HEAD(&sde_crtc->feature_list);
+}
+
+static void sde_cp_crtc_install_immutable_property(struct drm_crtc *crtc,
+ char *name,
+ u32 feature)
+{
+ struct drm_property *prop;
+ struct sde_cp_node *prop_node = NULL;
+ struct msm_drm_private *priv;
+ struct sde_cp_prop_attach prop_attach;
+ uint64_t val = 0;
+
+ if (feature >= SDE_CP_CRTC_MAX_FEATURES) {
+ DRM_ERROR("invalid feature %d max %d\n", feature,
+ SDE_CP_CRTC_MAX_FEATURES);
+ return;
+ }
+
+ prop_node = kzalloc(sizeof(*prop_node), GFP_KERNEL);
+ if (!prop_node)
+ return;
+
+ priv = crtc->dev->dev_private;
+ prop = priv->cp_property[feature];
+
+ if (!prop) {
+ prop = drm_property_create(crtc->dev, DRM_MODE_PROP_IMMUTABLE,
+ name, 0);
+ if (!prop) {
+ DRM_ERROR("property create failed: %s\n", name);
+ kfree(prop_node);
+ return;
+ }
+ priv->cp_property[feature] = prop;
+ }
+
+ INIT_PROP_ATTACH(&prop_attach, crtc, prop, prop_node,
+ feature, val);
+ sde_cp_crtc_prop_attach(&prop_attach);
+}
+
+static void sde_cp_crtc_install_range_property(struct drm_crtc *crtc,
+ char *name,
+ u32 feature,
+ uint64_t min, uint64_t max,
+ uint64_t val)
+{
+ struct drm_property *prop;
+ struct sde_cp_node *prop_node = NULL;
+ struct msm_drm_private *priv;
+ struct sde_cp_prop_attach prop_attach;
+
+ if (feature >= SDE_CP_CRTC_MAX_FEATURES) {
+ DRM_ERROR("invalid feature %d max %d\n", feature,
+ SDE_CP_CRTC_MAX_FEATURES);
+ return;
+ }
+
+ prop_node = kzalloc(sizeof(*prop_node), GFP_KERNEL);
+ if (!prop_node)
+ return;
+
+ priv = crtc->dev->dev_private;
+ prop = priv->cp_property[feature];
+
+ if (!prop) {
+ prop = drm_property_create_range(crtc->dev, 0, name, min, max);
+ if (!prop) {
+ DRM_ERROR("property create failed: %s\n", name);
+ kfree(prop_node);
+ return;
+ }
+ priv->cp_property[feature] = prop;
+ }
+
+ INIT_PROP_ATTACH(&prop_attach, crtc, prop, prop_node,
+ feature, val);
+
+ sde_cp_crtc_prop_attach(&prop_attach);
+}
+
+static void sde_cp_crtc_create_blob_property(struct drm_crtc *crtc, char *name,
+ u32 feature)
+{
+ struct drm_property *prop;
+ struct sde_cp_node *prop_node = NULL;
+ struct msm_drm_private *priv;
+ uint64_t val = 0;
+ struct sde_cp_prop_attach prop_attach;
+
+ if (feature >= SDE_CP_CRTC_MAX_FEATURES) {
+ DRM_ERROR("invalid feature %d max %d\n", feature,
+ SDE_CP_CRTC_MAX_FEATURES);
+ return;
+ }
+
+ prop_node = kzalloc(sizeof(*prop_node), GFP_KERNEL);
+ if (!prop_node)
+ return;
+
+ priv = crtc->dev->dev_private;
+ prop = priv->cp_property[feature];
+
+ if (!prop) {
+ prop = drm_property_create(crtc->dev,
+ DRM_MODE_PROP_BLOB, name, 0);
+ if (!prop) {
+ DRM_ERROR("property create failed: %s\n", name);
+ kfree(prop_node);
+ return;
+ }
+ priv->cp_property[feature] = prop;
+ }
+
+ INIT_PROP_ATTACH(&prop_attach, crtc, prop, prop_node,
+ feature, val);
+
+ sde_cp_crtc_prop_attach(&prop_attach);
+}
+
+static void sde_cp_crtc_setfeature(struct sde_cp_node *prop_node,
+ struct sde_crtc *sde_crtc)
+{
+ struct sde_hw_cp_cfg hw_cfg;
+ struct sde_hw_mixer *hw_lm;
+ struct sde_hw_dspp *hw_dspp;
+ u32 num_mixers = sde_crtc->num_mixers;
+ int i = 0;
+ bool feature_enabled = false;
+ int ret = 0;
+
+ sde_cp_get_hw_payload(prop_node, &hw_cfg, &feature_enabled);
+
+ for (i = 0; i < num_mixers && !ret; i++) {
+ hw_lm = sde_crtc->mixers[i].hw_lm;
+ hw_dspp = sde_crtc->mixers[i].hw_dspp;
+
+ switch (prop_node->feature) {
+ case SDE_CP_CRTC_DSPP_VLUT:
+ if (!hw_dspp || !hw_dspp->ops.setup_vlut) {
+ ret = -EINVAL;
+ continue;
+ }
+ hw_dspp->ops.setup_vlut(hw_dspp, &hw_cfg);
+ break;
+ case SDE_CP_CRTC_DSPP_PCC:
+ if (!hw_dspp || !hw_dspp->ops.setup_pcc) {
+ ret = -EINVAL;
+ continue;
+ }
+ hw_dspp->ops.setup_pcc(hw_dspp, &hw_cfg);
+ break;
+ case SDE_CP_CRTC_DSPP_IGC:
+ if (!hw_dspp || !hw_dspp->ops.setup_igc) {
+ ret = -EINVAL;
+ continue;
+ }
+ hw_dspp->ops.setup_igc(hw_dspp, &hw_cfg);
+ break;
+ case SDE_CP_CRTC_DSPP_GC:
+ if (!hw_dspp || !hw_dspp->ops.setup_gc) {
+ ret = -EINVAL;
+ continue;
+ }
+ hw_dspp->ops.setup_gc(hw_dspp, &hw_cfg);
+ break;
+ case SDE_CP_CRTC_DSPP_HUE:
+ if (!hw_dspp || !hw_dspp->ops.setup_hue) {
+ ret = -EINVAL;
+ continue;
+ }
+ hw_dspp->ops.setup_hue(hw_dspp, &hw_cfg);
+ break;
+ case SDE_CP_CRTC_DSPP_SAT:
+ if (!hw_dspp || !hw_dspp->ops.setup_sat) {
+ ret = -EINVAL;
+ continue;
+ }
+ hw_dspp->ops.setup_sat(hw_dspp, &hw_cfg);
+ break;
+ case SDE_CP_CRTC_DSPP_VAL:
+ if (!hw_dspp || !hw_dspp->ops.setup_val) {
+ ret = -EINVAL;
+ continue;
+ }
+ hw_dspp->ops.setup_val(hw_dspp, &hw_cfg);
+ break;
+ case SDE_CP_CRTC_DSPP_CONT:
+ if (!hw_dspp || !hw_dspp->ops.setup_cont) {
+ ret = -EINVAL;
+ continue;
+ }
+ hw_dspp->ops.setup_cont(hw_dspp, &hw_cfg);
+ break;
+ case SDE_CP_CRTC_DSPP_MEMCOLOR:
+ if (!hw_dspp || !hw_dspp->ops.setup_pa_memcolor)
+ ret = -EINVAL;
+ continue;
+ hw_dspp->ops.setup_pa_memcolor(hw_dspp, &hw_cfg);
+ break;
+ case SDE_CP_CRTC_DSPP_SIXZONE:
+ if (!hw_dspp || !hw_dspp->ops.setup_sixzone) {
+ ret = -EINVAL;
+ continue;
+ }
+ hw_dspp->ops.setup_sixzone(hw_dspp, &hw_cfg);
+ break;
+ case SDE_CP_CRTC_DSPP_GAMUT:
+ if (!hw_dspp || !hw_dspp->ops.setup_gamut) {
+ ret = -EINVAL;
+ continue;
+ }
+ hw_dspp->ops.setup_gamut(hw_dspp, &hw_cfg);
+ break;
+ case SDE_CP_CRTC_LM_GC:
+ if (!hw_lm || !hw_lm->ops.setup_gc) {
+ ret = -EINVAL;
+ continue;
+ }
+ hw_lm->ops.setup_gc(hw_lm, &hw_cfg);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ }
+
+ if (ret) {
+ DRM_ERROR("failed to %s feature %d\n",
+ ((feature_enabled) ? "enable" : "disable"),
+ prop_node->feature);
+ return;
+ }
+
+ if (feature_enabled) {
+ DRM_DEBUG_DRIVER("Add feature to active list %d\n",
+ prop_node->property_id);
+ list_add_tail(&prop_node->active_list, &sde_crtc->active_list);
+ } else {
+ DRM_DEBUG_DRIVER("remove feature from active list %d\n",
+ prop_node->property_id);
+ list_del_init(&prop_node->active_list);
+ }
+ /* Programming of feature done remove from dirty list */
+ list_del_init(&prop_node->dirty_list);
+}
+
+void sde_cp_crtc_apply_properties(struct drm_crtc *crtc)
+{
+ struct sde_crtc *sde_crtc = NULL;
+ bool set_dspp_flush = false, set_lm_flush = false;
+ struct sde_cp_node *prop_node = NULL, *n = NULL;
+ struct sde_hw_ctl *ctl;
+ uint32_t flush_mask = 0;
+ u32 num_mixers = 0, i = 0;
+
+ if (!crtc || !crtc->dev) {
+ DRM_ERROR("invalid crtc %pK dev %pK\n", crtc,
+ (crtc ? crtc->dev : NULL));
+ return;
+ }
+
+ sde_crtc = to_sde_crtc(crtc);
+ if (!sde_crtc) {
+ DRM_ERROR("invalid sde_crtc %pK\n", sde_crtc);
+ return;
+ }
+
+ num_mixers = sde_crtc->num_mixers;
+ if (!num_mixers) {
+ DRM_DEBUG_DRIVER("no mixers for this crtc\n");
+ return;
+ }
+
+ /* Check if dirty list is empty for early return */
+ if (list_empty(&sde_crtc->dirty_list)) {
+ DRM_DEBUG_DRIVER("Dirty list is empty\n");
+ return;
+ }
+
+ list_for_each_entry_safe(prop_node, n, &sde_crtc->dirty_list,
+ dirty_list) {
+ sde_cp_crtc_setfeature(prop_node, sde_crtc);
+ /* Set the flush flag to true */
+ if (prop_node->is_dspp_feature)
+ set_dspp_flush = true;
+ else
+ set_lm_flush = true;
+ }
+
+ for (i = 0; i < num_mixers; i++) {
+ ctl = sde_crtc->mixers[i].hw_ctl;
+ if (!ctl)
+ continue;
+ if (set_dspp_flush && ctl->ops.get_bitmask_dspp
+ && sde_crtc->mixers[i].hw_dspp)
+ ctl->ops.get_bitmask_dspp(ctl,
+ &flush_mask,
+ sde_crtc->mixers[i].hw_dspp->idx);
+ ctl->ops.update_pending_flush(ctl, flush_mask);
+ if (set_lm_flush && ctl->ops.get_bitmask_mixer
+ && sde_crtc->mixers[i].hw_lm)
+ flush_mask = ctl->ops.get_bitmask_mixer(ctl,
+ sde_crtc->mixers[i].hw_lm->idx);
+ ctl->ops.update_pending_flush(ctl, flush_mask);
+ }
+}
+
+void sde_cp_crtc_install_properties(struct drm_crtc *crtc)
+{
+ struct sde_kms *kms = NULL;
+ struct sde_crtc *sde_crtc = NULL;
+ struct sde_mdss_cfg *catalog = NULL;
+ unsigned long features = 0;
+ int i = 0;
+ struct msm_drm_private *priv;
+
+ if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
+ DRM_ERROR("invalid crtc %pK dev %pK\n",
+ crtc, ((crtc) ? crtc->dev : NULL));
+ return;
+ }
+
+ sde_crtc = to_sde_crtc(crtc);
+ if (!sde_crtc) {
+ DRM_ERROR("sde_crtc %pK\n", sde_crtc);
+ return;
+ }
+
+ kms = get_kms(crtc);
+ if (!kms || !kms->catalog) {
+ DRM_ERROR("invalid sde kms %pK catalog %pK sde_crtc %pK\n",
+ kms, ((kms) ? kms->catalog : NULL), sde_crtc);
+ return;
+ }
+
+ /**
+ * Function can be called during the atomic_check with test_only flag
+ * and actual commit. Allocate properties only if feature list is
+ * empty during the atomic_check with test_only flag.
+ */
+ if (!list_empty(&sde_crtc->feature_list))
+ return;
+
+ catalog = kms->catalog;
+ priv = crtc->dev->dev_private;
+ /**
+ * DSPP/LM properties are global to all the CRTCS.
+ * Properties are created for first CRTC and re-used for later
+ * crtcs.
+ */
+ if (!priv->cp_property) {
+ priv->cp_property = kzalloc((sizeof(priv->cp_property) *
+ SDE_CP_CRTC_MAX_FEATURES), GFP_KERNEL);
+ setup_dspp_prop_install_funcs(dspp_prop_install_func);
+ setup_lm_prop_install_funcs(lm_prop_install_func);
+ }
+ if (!priv->cp_property)
+ return;
+
+ if (!catalog->dspp_count)
+ goto lm_property;
+
+ /* Check for all the DSPP properties and attach it to CRTC */
+ features = catalog->dspp[0].features;
+ for (i = 0; i < SDE_DSPP_MAX; i++) {
+ if (!test_bit(i, &features))
+ continue;
+ if (dspp_prop_install_func[i])
+ dspp_prop_install_func[i](crtc);
+ }
+
+lm_property:
+ if (!catalog->mixer_count)
+ return;
+
+ /* Check for all the LM properties and attach it to CRTC */
+ features = catalog->mixer[0].features;
+ for (i = 0; i < SDE_MIXER_MAX; i++) {
+ if (!test_bit(i, &features))
+ continue;
+ if (lm_prop_install_func[i])
+ lm_prop_install_func[i](crtc);
+ }
+}
+
+int sde_cp_crtc_set_property(struct drm_crtc *crtc,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct sde_cp_node *prop_node = NULL;
+ struct sde_crtc *sde_crtc = NULL;
+ int ret = 0, i = 0, dspp_cnt, lm_cnt;
+ u8 found = 0;
+
+ if (!crtc || !property) {
+ DRM_ERROR("invalid crtc %pK property %pK\n", crtc, property);
+ return -EINVAL;
+ }
+
+ sde_crtc = to_sde_crtc(crtc);
+ if (!sde_crtc) {
+ DRM_ERROR("invalid sde_crtc %pK\n", sde_crtc);
+ return -EINVAL;
+ }
+
+ list_for_each_entry(prop_node, &sde_crtc->feature_list, feature_list) {
+ if (property->base.id == prop_node->property_id) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found)
+ return 0;
+ /**
+ * sde_crtc is virtual ensure that hardware has been attached to the
+ * crtc. Check LM and dspp counts based on whether feature is a
+ * dspp/lm feature.
+ */
+ if (!sde_crtc->num_mixers ||
+ sde_crtc->num_mixers > ARRAY_SIZE(sde_crtc->mixers)) {
+ DRM_ERROR("Invalid mixer config act cnt %d max cnt %ld\n",
+ sde_crtc->num_mixers, ARRAY_SIZE(sde_crtc->mixers));
+ return -EINVAL;
+ }
+
+ dspp_cnt = 0;
+ lm_cnt = 0;
+ for (i = 0; i < sde_crtc->num_mixers; i++) {
+ if (sde_crtc->mixers[i].hw_dspp)
+ dspp_cnt++;
+ if (sde_crtc->mixers[i].hw_lm)
+ lm_cnt++;
+ }
+
+ if (prop_node->is_dspp_feature && dspp_cnt < sde_crtc->num_mixers) {
+ DRM_ERROR("invalid dspp cnt %d mixer cnt %d\n", dspp_cnt,
+ sde_crtc->num_mixers);
+ return -EINVAL;
+ } else if (lm_cnt < sde_crtc->num_mixers) {
+ DRM_ERROR("invalid lm cnt %d mixer cnt %d\n", lm_cnt,
+ sde_crtc->num_mixers);
+ return -EINVAL;
+ }
+ /* remove the property from dirty list */
+ list_del_init(&prop_node->dirty_list);
+
+ if (!val)
+ ret = sde_cp_disable_crtc_property(crtc, property, prop_node);
+ else
+ ret = sde_cp_enable_crtc_property(crtc, property,
+ prop_node, val);
+
+ if (!ret) {
+ /* remove the property from active list */
+ list_del_init(&prop_node->active_list);
+ /* Mark the feature as dirty */
+ list_add_tail(&prop_node->dirty_list, &sde_crtc->dirty_list);
+ }
+ return ret;
+}
+
+int sde_cp_crtc_get_property(struct drm_crtc *crtc,
+ struct drm_property *property, uint64_t *val)
+{
+ struct sde_cp_node *prop_node = NULL;
+ struct sde_crtc *sde_crtc = NULL;
+
+ if (!crtc || !property || !val) {
+ DRM_ERROR("invalid crtc %pK property %pK val %pK\n",
+ crtc, property, val);
+ return -EINVAL;
+ }
+
+ sde_crtc = to_sde_crtc(crtc);
+ if (!sde_crtc) {
+ DRM_ERROR("invalid sde_crtc %pK\n", sde_crtc);
+ return -EINVAL;
+ }
+ /* Return 0 if property is not supported */
+ *val = 0;
+ list_for_each_entry(prop_node, &sde_crtc->feature_list, feature_list) {
+ if (property->base.id == prop_node->property_id) {
+ *val = prop_node->prop_val;
+ break;
+ }
+ }
+ return 0;
+}
+
+void sde_cp_crtc_destroy_properties(struct drm_crtc *crtc)
+{
+ struct sde_crtc *sde_crtc = NULL;
+ struct sde_cp_node *prop_node = NULL, *n = NULL;
+
+ if (!crtc) {
+ DRM_ERROR("invalid crtc %pK\n", crtc);
+ return;
+ }
+
+ sde_crtc = to_sde_crtc(crtc);
+ if (!sde_crtc) {
+ DRM_ERROR("invalid sde_crtc %pK\n", sde_crtc);
+ return;
+ }
+
+ list_for_each_entry_safe(prop_node, n, &sde_crtc->feature_list,
+ feature_list) {
+ if (prop_node->prop_flags & DRM_MODE_PROP_BLOB
+ && prop_node->blob_ptr)
+ drm_property_unreference_blob(prop_node->blob_ptr);
+
+ list_del_init(&prop_node->active_list);
+ list_del_init(&prop_node->dirty_list);
+ list_del_init(&prop_node->feature_list);
+ sde_cp_destroy_local_blob(prop_node);
+ kfree(prop_node);
+ }
+
+ INIT_LIST_HEAD(&sde_crtc->active_list);
+ INIT_LIST_HEAD(&sde_crtc->dirty_list);
+ INIT_LIST_HEAD(&sde_crtc->feature_list);
+}
+
+void sde_cp_crtc_suspend(struct drm_crtc *crtc)
+{
+ struct sde_crtc *sde_crtc = NULL;
+ struct sde_cp_node *prop_node = NULL, *n = NULL;
+
+ if (!crtc) {
+ DRM_ERROR("crtc %pK\n", crtc);
+ return;
+ }
+ sde_crtc = to_sde_crtc(crtc);
+ if (!sde_crtc) {
+ DRM_ERROR("sde_crtc %pK\n", sde_crtc);
+ return;
+ }
+
+ list_for_each_entry_safe(prop_node, n, &sde_crtc->active_list,
+ active_list) {
+ list_add_tail(&prop_node->dirty_list, &sde_crtc->dirty_list);
+ list_del_init(&prop_node->active_list);
+ }
+}
+
+void sde_cp_crtc_resume(struct drm_crtc *crtc)
+{
+ /* placeholder for operations needed during resume */
+}
+
+static void dspp_pcc_install_property(struct drm_crtc *crtc)
+{
+ char feature_name[256];
+ struct sde_kms *kms = NULL;
+ struct sde_mdss_cfg *catalog = NULL;
+ u32 version;
+
+ kms = get_kms(crtc);
+ catalog = kms->catalog;
+
+ version = catalog->dspp[0].sblk->pcc.version >> 16;
+ snprintf(feature_name, ARRAY_SIZE(feature_name), "%s%d",
+ "SDE_DSPP_PCC_V", version);
+ switch (version) {
+ case 1:
+ sde_cp_crtc_create_blob_property(crtc, feature_name,
+ SDE_CP_CRTC_DSPP_PCC);
+ break;
+ default:
+ DRM_ERROR("version %d not supported\n", version);
+ break;
+ }
+}
+
+static void dspp_hsic_install_property(struct drm_crtc *crtc)
+{
+ char feature_name[256];
+ struct sde_kms *kms = NULL;
+ struct sde_mdss_cfg *catalog = NULL;
+ u32 version;
+
+ kms = get_kms(crtc);
+ catalog = kms->catalog;
+ version = catalog->dspp[0].sblk->hsic.version >> 16;
+ switch (version) {
+ case 1:
+ snprintf(feature_name, ARRAY_SIZE(feature_name), "%s%d",
+ "SDE_DSPP_HUE_V", version);
+ sde_cp_crtc_install_range_property(crtc, feature_name,
+ SDE_CP_CRTC_DSPP_HUE, 0, U32_MAX, 0);
+ break;
+ default:
+ DRM_ERROR("version %d not supported\n", version);
+ break;
+ }
+}
+
+static void dspp_vlut_install_property(struct drm_crtc *crtc)
+{
+ char feature_name[256];
+ struct sde_kms *kms = NULL;
+ struct sde_mdss_cfg *catalog = NULL;
+ u32 version;
+
+ kms = get_kms(crtc);
+ catalog = kms->catalog;
+ version = catalog->dspp[0].sblk->vlut.version >> 16;
+ snprintf(feature_name, ARRAY_SIZE(feature_name), "%s%d",
+ "SDE_DSPP_VLUT_V", version);
+ switch (version) {
+ case 1:
+ sde_cp_crtc_install_range_property(crtc, feature_name,
+ SDE_CP_CRTC_DSPP_VLUT, 0, U64_MAX, 0);
+ sde_cp_create_local_blob(crtc,
+ SDE_CP_CRTC_DSPP_VLUT,
+ sizeof(struct drm_msm_pa_vlut));
+ break;
+ default:
+ DRM_ERROR("version %d not supported\n", version);
+ break;
+ }
+}
+
+static void dspp_ad_install_property(struct drm_crtc *crtc)
+{
+ char feature_name[256];
+ struct sde_kms *kms = NULL;
+ struct sde_mdss_cfg *catalog = NULL;
+ u32 version;
+
+ kms = get_kms(crtc);
+ catalog = kms->catalog;
+ version = catalog->dspp[0].sblk->ad.version >> 16;
+ snprintf(feature_name, ARRAY_SIZE(feature_name), "%s%d",
+ "SDE_DSPP_AD_V", version);
+ switch (version) {
+ case 3:
+ sde_cp_crtc_install_immutable_property(crtc,
+ feature_name, SDE_CP_CRTC_DSPP_AD);
+ break;
+ default:
+ DRM_ERROR("version %d not supported\n", version);
+ break;
+ }
+}
+
+static void lm_gc_install_property(struct drm_crtc *crtc)
+{
+ char feature_name[256];
+ struct sde_kms *kms = NULL;
+ struct sde_mdss_cfg *catalog = NULL;
+ u32 version;
+
+ kms = get_kms(crtc);
+ catalog = kms->catalog;
+ version = catalog->mixer[0].sblk->gc.version >> 16;
+ snprintf(feature_name, ARRAY_SIZE(feature_name), "%s%d",
+ "SDE_LM_GC_V", version);
+ switch (version) {
+ case 1:
+ sde_cp_crtc_create_blob_property(crtc, feature_name,
+ SDE_CP_CRTC_LM_GC);
+ break;
+ default:
+ DRM_ERROR("version %d not supported\n", version);
+ break;
+ }
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_color_processing.h b/drivers/gpu/drm/msm/sde/sde_color_processing.h
new file mode 100644
index 000000000000..bf954ec6a8e7
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_color_processing.h
@@ -0,0 +1,95 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _SDE_COLOR_PROCESSING_H
+#define _SDE_COLOR_PROCESSING_H
+#include <drm/drm_crtc.h>
+
+/*
+ * PA MEMORY COLOR types
+ * @MEMCOLOR_SKIN Skin memory color type
+ * @MEMCOLOR_SKY Sky memory color type
+ * @MEMCOLOR_FOLIAGE Foliage memory color type
+ */
+enum sde_memcolor_type {
+ MEMCOLOR_SKIN = 0,
+ MEMCOLOR_SKY,
+ MEMCOLOR_FOLIAGE
+};
+
+/**
+ * sde_cp_crtc_init(): Initialize color processing lists for a crtc.
+ * Should be called during crtc initialization.
+ * @crtc: Pointer to sde_crtc.
+ */
+void sde_cp_crtc_init(struct drm_crtc *crtc);
+
+/**
+ * sde_cp_crtc_install_properties(): Installs the color processing
+ * properties for a crtc.
+ * Should be called during crtc initialization.
+ * @crtc: Pointer to crtc.
+ */
+void sde_cp_crtc_install_properties(struct drm_crtc *crtc);
+
+/**
+ * sde_cp_crtc_destroy_properties: Destroys color processing
+ * properties for a crtc.
+ * should be called during crtc de-initialization.
+ * @crtc: Pointer to crtc.
+ */
+void sde_cp_crtc_destroy_properties(struct drm_crtc *crtc);
+
+/**
+ * sde_cp_crtc_set_property: Set a color processing property
+ * for a crtc.
+ * Should be during atomic set property.
+ * @crtc: Pointer to crtc.
+ * @property: Property that needs to enabled/disabled.
+ * @val: Value of property.
+ */
+int sde_cp_crtc_set_property(struct drm_crtc *crtc,
+ struct drm_property *property, uint64_t val);
+
+/**
+ * sde_cp_crtc_apply_properties: Enable/disable properties
+ * for a crtc.
+ * Should be called during atomic commit call.
+ * @crtc: Pointer to crtc.
+ */
+void sde_cp_crtc_apply_properties(struct drm_crtc *crtc);
+
+/**
+ * sde_cp_crtc_get_property: Get value of color processing property
+ * for a crtc.
+ * Should be during atomic get property.
+ * @crtc: Pointer to crtc.
+ * @property: Property that needs to enabled/disabled.
+ * @val: Value of property.
+ *
+ */
+int sde_cp_crtc_get_property(struct drm_crtc *crtc,
+ struct drm_property *property, uint64_t *val);
+
+/**
+ * sde_cp_crtc_suspend: Suspend the crtc features
+ * @crtc: Pointer to crtc.
+ */
+void sde_cp_crtc_suspend(struct drm_crtc *crtc);
+
+/**
+ * sde_cp_crtc_resume: Resume the crtc features
+ * @crtc: Pointer to crtc.
+ */
+void sde_cp_crtc_resume(struct drm_crtc *crtc);
+#endif /*_SDE_COLOR_PROCESSING_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c
new file mode 100644
index 000000000000..ac9997c238cd
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_connector.c
@@ -0,0 +1,624 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "sde-drm:[%s] " fmt, __func__
+#include "msm_drv.h"
+
+#include "sde_kms.h"
+#include "sde_connector.h"
+#include "sde_backlight.h"
+
+static const struct drm_prop_enum_list e_topology_name[] = {
+ {SDE_RM_TOPOLOGY_UNKNOWN, "sde_unknown"},
+ {SDE_RM_TOPOLOGY_SINGLEPIPE, "sde_singlepipe"},
+ {SDE_RM_TOPOLOGY_DUALPIPE, "sde_dualpipe"},
+ {SDE_RM_TOPOLOGY_PPSPLIT, "sde_ppsplit"},
+ {SDE_RM_TOPOLOGY_DUALPIPEMERGE, "sde_dualpipemerge"}
+};
+static const struct drm_prop_enum_list e_topology_control[] = {
+ {SDE_RM_TOPCTL_RESERVE_LOCK, "reserve_lock"},
+ {SDE_RM_TOPCTL_RESERVE_CLEAR, "reserve_clear"},
+ {SDE_RM_TOPCTL_DSPP, "dspp"},
+ {SDE_RM_TOPCTL_FORCE_TILING, "force_tiling"},
+ {SDE_RM_TOPCTL_PPSPLIT, "ppsplit"}
+};
+
+int sde_connector_get_info(struct drm_connector *connector,
+ struct msm_display_info *info)
+{
+ struct sde_connector *c_conn;
+
+ if (!connector || !info) {
+ SDE_ERROR("invalid argument(s), conn %pK, info %pK\n",
+ connector, info);
+ return -EINVAL;
+ }
+
+ c_conn = to_sde_connector(connector);
+
+ if (!c_conn->display || !c_conn->ops.get_info) {
+ SDE_ERROR("display info not supported for %pK\n",
+ c_conn->display);
+ return -EINVAL;
+ }
+
+ return c_conn->ops.get_info(info, c_conn->display);
+}
+
+static void sde_connector_destroy(struct drm_connector *connector)
+{
+ struct sde_connector *c_conn;
+
+ if (!connector) {
+ SDE_ERROR("invalid connector\n");
+ return;
+ }
+
+ c_conn = to_sde_connector(connector);
+
+ if (c_conn->blob_caps)
+ drm_property_unreference_blob(c_conn->blob_caps);
+ msm_property_destroy(&c_conn->property_info);
+
+ drm_connector_unregister(connector);
+ sde_fence_deinit(&c_conn->retire_fence);
+ drm_connector_cleanup(connector);
+ kfree(c_conn);
+}
+
+/**
+ * _sde_connector_destroy_fb - clean up connector state's out_fb buffer
+ * @c_conn: Pointer to sde connector structure
+ * @c_state: Pointer to sde connector state structure
+ */
+static void _sde_connector_destroy_fb(struct sde_connector *c_conn,
+ struct sde_connector_state *c_state)
+{
+ if (!c_state || !c_state->out_fb) {
+ SDE_ERROR("invalid state %pK\n", c_state);
+ return;
+ }
+
+ msm_framebuffer_cleanup(c_state->out_fb,
+ c_state->mmu_id);
+ drm_framebuffer_unreference(c_state->out_fb);
+ c_state->out_fb = NULL;
+
+ if (c_conn) {
+ c_state->property_values[CONNECTOR_PROP_OUT_FB] =
+ msm_property_get_default(&c_conn->property_info,
+ CONNECTOR_PROP_OUT_FB);
+ } else {
+ c_state->property_values[CONNECTOR_PROP_OUT_FB] = ~0;
+ }
+}
+
+static void sde_connector_atomic_destroy_state(struct drm_connector *connector,
+ struct drm_connector_state *state)
+{
+ struct sde_connector *c_conn = NULL;
+ struct sde_connector_state *c_state = NULL;
+
+ if (!state) {
+ SDE_ERROR("invalid state\n");
+ return;
+ }
+
+ /*
+ * The base DRM framework currently always passes in a NULL
+ * connector pointer. This is not correct, but attempt to
+ * handle that case as much as possible.
+ */
+ if (connector)
+ c_conn = to_sde_connector(connector);
+ c_state = to_sde_connector_state(state);
+
+ if (c_state->out_fb)
+ _sde_connector_destroy_fb(c_conn, c_state);
+
+ if (!c_conn) {
+ kfree(c_state);
+ } else {
+ /* destroy value helper */
+ msm_property_destroy_state(&c_conn->property_info, c_state,
+ c_state->property_values, 0);
+ }
+}
+
+static void sde_connector_atomic_reset(struct drm_connector *connector)
+{
+ struct sde_connector *c_conn;
+ struct sde_connector_state *c_state;
+
+ if (!connector) {
+ SDE_ERROR("invalid connector\n");
+ return;
+ }
+
+ c_conn = to_sde_connector(connector);
+
+ if (connector->state) {
+ sde_connector_atomic_destroy_state(connector, connector->state);
+ connector->state = 0;
+ }
+
+ c_state = msm_property_alloc_state(&c_conn->property_info);
+ if (!c_state) {
+ SDE_ERROR("state alloc failed\n");
+ return;
+ }
+
+ /* reset value helper, zero out state structure and reset properties */
+ msm_property_reset_state(&c_conn->property_info, c_state,
+ c_state->property_values, 0);
+
+ c_state->base.connector = connector;
+ connector->state = &c_state->base;
+}
+
+static struct drm_connector_state *
+sde_connector_atomic_duplicate_state(struct drm_connector *connector)
+{
+ struct sde_connector *c_conn;
+ struct sde_connector_state *c_state, *c_oldstate;
+ int rc;
+
+ if (!connector || !connector->state) {
+ SDE_ERROR("invalid connector %pK\n", connector);
+ return NULL;
+ }
+
+ c_conn = to_sde_connector(connector);
+ c_oldstate = to_sde_connector_state(connector->state);
+ c_state = msm_property_alloc_state(&c_conn->property_info);
+ if (!c_state) {
+ SDE_ERROR("state alloc failed\n");
+ return NULL;
+ }
+
+ /* duplicate value helper */
+ msm_property_duplicate_state(&c_conn->property_info,
+ c_oldstate, c_state, c_state->property_values, 0);
+
+ /* additional handling for drm framebuffer objects */
+ if (c_state->out_fb) {
+ drm_framebuffer_reference(c_state->out_fb);
+ rc = msm_framebuffer_prepare(c_state->out_fb,
+ c_state->mmu_id);
+ if (rc)
+ SDE_ERROR("failed to prepare fb, %d\n", rc);
+ }
+
+ return &c_state->base;
+}
+
+static int sde_connector_atomic_set_property(struct drm_connector *connector,
+ struct drm_connector_state *state,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct sde_connector *c_conn;
+ struct sde_connector_state *c_state;
+ int idx, rc;
+
+ if (!connector || !state || !property) {
+ SDE_ERROR("invalid argument(s), conn %pK, state %pK, prp %pK\n",
+ connector, state, property);
+ return -EINVAL;
+ }
+
+ c_conn = to_sde_connector(connector);
+ c_state = to_sde_connector_state(state);
+
+ /* generic property handling */
+ rc = msm_property_atomic_set(&c_conn->property_info,
+ c_state->property_values, 0, property, val);
+ if (rc)
+ goto end;
+
+ /* connector-specific property handling */
+ idx = msm_property_index(&c_conn->property_info, property);
+
+ if (idx == CONNECTOR_PROP_OUT_FB) {
+ /* clear old fb, if present */
+ if (c_state->out_fb)
+ _sde_connector_destroy_fb(c_conn, c_state);
+
+ /* convert fb val to drm framebuffer and prepare it */
+ c_state->out_fb =
+ drm_framebuffer_lookup(connector->dev, val);
+ if (!c_state->out_fb) {
+ SDE_ERROR("failed to look up fb %lld\n", val);
+ rc = -EFAULT;
+ } else {
+ if (c_state->out_fb->flags & DRM_MODE_FB_SECURE)
+ c_state->mmu_id =
+ c_conn->mmu_id[SDE_IOMMU_DOMAIN_SECURE];
+ else
+ c_state->mmu_id =
+ c_conn->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE];
+
+ rc = msm_framebuffer_prepare(c_state->out_fb,
+ c_state->mmu_id);
+ if (rc)
+ SDE_ERROR("prep fb failed, %d\n", rc);
+ }
+ }
+
+ if (idx == CONNECTOR_PROP_TOPOLOGY_CONTROL) {
+ rc = sde_rm_check_property_topctl(val);
+ if (rc)
+ SDE_ERROR("invalid topology_control: 0x%llX\n", val);
+ }
+
+ /* check for custom property handling */
+ if (!rc && c_conn->ops.set_property) {
+ rc = c_conn->ops.set_property(connector,
+ state,
+ idx,
+ val,
+ c_conn->display);
+
+ /* potentially clean up out_fb if rc != 0 */
+ if ((idx == CONNECTOR_PROP_OUT_FB) && rc)
+ _sde_connector_destroy_fb(c_conn, c_state);
+ }
+end:
+ return rc;
+}
+
+static int sde_connector_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val)
+{
+ if (!connector) {
+ SDE_ERROR("invalid connector\n");
+ return -EINVAL;
+ }
+
+ return sde_connector_atomic_set_property(connector,
+ connector->state, property, val);
+}
+
+static int sde_connector_atomic_get_property(struct drm_connector *connector,
+ const struct drm_connector_state *state,
+ struct drm_property *property,
+ uint64_t *val)
+{
+ struct sde_connector *c_conn;
+ struct sde_connector_state *c_state;
+ int idx, rc = -EINVAL;
+
+ if (!connector || !state) {
+ SDE_ERROR("invalid argument(s), conn %pK, state %pK\n",
+ connector, state);
+ return -EINVAL;
+ }
+
+ c_conn = to_sde_connector(connector);
+ c_state = to_sde_connector_state(state);
+
+ idx = msm_property_index(&c_conn->property_info, property);
+ if (idx == CONNECTOR_PROP_RETIRE_FENCE)
+ /*
+ * Set a fence offset if not a virtual connector, so that the
+ * fence signals after one additional commit rather than at the
+ * end of the current one.
+ */
+ rc = sde_fence_create(&c_conn->retire_fence, val,
+ c_conn->connector_type != DRM_MODE_CONNECTOR_VIRTUAL);
+ else
+ /* get cached property value */
+ rc = msm_property_atomic_get(&c_conn->property_info,
+ c_state->property_values, 0, property, val);
+
+ /* allow for custom override */
+ if (c_conn->ops.get_property)
+ rc = c_conn->ops.get_property(connector,
+ (struct drm_connector_state *)state,
+ idx,
+ val,
+ c_conn->display);
+ return rc;
+}
+
+void sde_connector_prepare_fence(struct drm_connector *connector)
+{
+ if (!connector) {
+ SDE_ERROR("invalid connector\n");
+ return;
+ }
+
+ sde_fence_prepare(&to_sde_connector(connector)->retire_fence);
+}
+
+void sde_connector_complete_commit(struct drm_connector *connector)
+{
+ if (!connector) {
+ SDE_ERROR("invalid connector\n");
+ return;
+ }
+
+ /* signal connector's retire fence */
+ sde_fence_signal(&to_sde_connector(connector)->retire_fence, 0);
+}
+
+static enum drm_connector_status
+sde_connector_detect(struct drm_connector *connector, bool force)
+{
+ enum drm_connector_status status = connector_status_unknown;
+ struct sde_connector *c_conn;
+
+ if (!connector) {
+ SDE_ERROR("invalid connector\n");
+ return status;
+ }
+
+ c_conn = to_sde_connector(connector);
+
+ if (c_conn->ops.detect)
+ status = c_conn->ops.detect(connector,
+ force,
+ c_conn->display);
+
+ return status;
+}
+
+static const struct drm_connector_funcs sde_connector_ops = {
+ .dpms = drm_atomic_helper_connector_dpms,
+ .reset = sde_connector_atomic_reset,
+ .detect = sde_connector_detect,
+ .destroy = sde_connector_destroy,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .atomic_duplicate_state = sde_connector_atomic_duplicate_state,
+ .atomic_destroy_state = sde_connector_atomic_destroy_state,
+ .atomic_set_property = sde_connector_atomic_set_property,
+ .atomic_get_property = sde_connector_atomic_get_property,
+ .set_property = sde_connector_set_property,
+};
+
+static int sde_connector_get_modes(struct drm_connector *connector)
+{
+ struct sde_connector *c_conn;
+
+ if (!connector) {
+ SDE_ERROR("invalid connector\n");
+ return 0;
+ }
+
+ c_conn = to_sde_connector(connector);
+ if (!c_conn->ops.get_modes) {
+ SDE_DEBUG("missing get_modes callback\n");
+ return 0;
+ }
+
+ return c_conn->ops.get_modes(connector, c_conn->display);
+}
+
+static enum drm_mode_status
+sde_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct sde_connector *c_conn;
+
+ if (!connector || !mode) {
+ SDE_ERROR("invalid argument(s), conn %pK, mode %pK\n",
+ connector, mode);
+ return MODE_ERROR;
+ }
+
+ c_conn = to_sde_connector(connector);
+
+ if (c_conn->ops.mode_valid)
+ return c_conn->ops.mode_valid(connector, mode, c_conn->display);
+
+ /* assume all modes okay by default */
+ return MODE_OK;
+}
+
+static struct drm_encoder *
+sde_connector_best_encoder(struct drm_connector *connector)
+{
+ struct sde_connector *c_conn = to_sde_connector(connector);
+
+ if (!connector) {
+ SDE_ERROR("invalid connector\n");
+ return NULL;
+ }
+
+ /*
+ * This is true for now, revisit this code when multiple encoders are
+ * supported.
+ */
+ return c_conn->encoder;
+}
+
+static const struct drm_connector_helper_funcs sde_connector_helper_ops = {
+ .get_modes = sde_connector_get_modes,
+ .mode_valid = sde_connector_mode_valid,
+ .best_encoder = sde_connector_best_encoder,
+};
+
+struct drm_connector *sde_connector_init(struct drm_device *dev,
+ struct drm_encoder *encoder,
+ struct drm_panel *panel,
+ void *display,
+ const struct sde_connector_ops *ops,
+ int connector_poll,
+ int connector_type)
+{
+ struct msm_drm_private *priv;
+ struct sde_kms *sde_kms;
+ struct sde_kms_info *info;
+ struct sde_connector *c_conn = NULL;
+ int rc;
+
+ if (!dev || !dev->dev_private || !encoder) {
+ SDE_ERROR("invalid argument(s), dev %pK, enc %pK\n",
+ dev, encoder);
+ return ERR_PTR(-EINVAL);
+ }
+
+ priv = dev->dev_private;
+ if (!priv->kms) {
+ SDE_ERROR("invalid kms reference\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ c_conn = kzalloc(sizeof(*c_conn), GFP_KERNEL);
+ if (!c_conn) {
+ SDE_ERROR("failed to alloc sde connector\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ rc = drm_connector_init(dev,
+ &c_conn->base,
+ &sde_connector_ops,
+ connector_type);
+ if (rc)
+ goto error_free_conn;
+
+ c_conn->connector_type = connector_type;
+ c_conn->encoder = encoder;
+ c_conn->panel = panel;
+ c_conn->display = display;
+
+ /* cache mmu_id's for later */
+ sde_kms = to_sde_kms(priv->kms);
+ if (sde_kms->vbif[VBIF_NRT]) {
+ c_conn->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE] =
+ sde_kms->mmu_id[MSM_SMMU_DOMAIN_NRT_UNSECURE];
+ c_conn->mmu_id[SDE_IOMMU_DOMAIN_SECURE] =
+ sde_kms->mmu_id[MSM_SMMU_DOMAIN_NRT_SECURE];
+ } else {
+ c_conn->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE] =
+ sde_kms->mmu_id[MSM_SMMU_DOMAIN_UNSECURE];
+ c_conn->mmu_id[SDE_IOMMU_DOMAIN_SECURE] =
+ sde_kms->mmu_id[MSM_SMMU_DOMAIN_SECURE];
+ }
+
+ if (ops)
+ c_conn->ops = *ops;
+
+ c_conn->base.helper_private = &sde_connector_helper_ops;
+ c_conn->base.polled = connector_poll;
+ c_conn->base.interlace_allowed = 0;
+ c_conn->base.doublescan_allowed = 0;
+
+ snprintf(c_conn->name,
+ SDE_CONNECTOR_NAME_SIZE,
+ "conn%u",
+ c_conn->base.base.id);
+
+ rc = sde_fence_init(&c_conn->retire_fence, c_conn->name,
+ c_conn->base.base.id);
+ if (rc) {
+ SDE_ERROR("failed to init fence, %d\n", rc);
+ goto error_cleanup_conn;
+ }
+
+ rc = drm_connector_register(&c_conn->base);
+ if (rc) {
+ SDE_ERROR("failed to register drm connector, %d\n", rc);
+ goto error_cleanup_fence;
+ }
+
+ rc = drm_mode_connector_attach_encoder(&c_conn->base, encoder);
+ if (rc) {
+ SDE_ERROR("failed to attach encoder to connector, %d\n", rc);
+ goto error_unregister_conn;
+ }
+
+ if (c_conn->ops.set_backlight) {
+ rc = sde_backlight_setup(&c_conn->base);
+ if (rc) {
+ pr_err("failed to setup backlight, rc=%d\n", rc);
+ goto error_unregister_conn;
+ }
+ }
+
+ /* create properties */
+ msm_property_init(&c_conn->property_info, &c_conn->base.base, dev,
+ priv->conn_property, c_conn->property_data,
+ CONNECTOR_PROP_COUNT, CONNECTOR_PROP_BLOBCOUNT,
+ sizeof(struct sde_connector_state));
+
+ if (c_conn->ops.post_init) {
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ SDE_ERROR("failed to allocate info buffer\n");
+ rc = -ENOMEM;
+ goto error_unregister_conn;
+ }
+
+ sde_kms_info_reset(info);
+ rc = c_conn->ops.post_init(&c_conn->base, info, display);
+ if (rc) {
+ SDE_ERROR("post-init failed, %d\n", rc);
+ kfree(info);
+ goto error_unregister_conn;
+ }
+
+ msm_property_install_blob(&c_conn->property_info,
+ "capabilities",
+ DRM_MODE_PROP_IMMUTABLE,
+ CONNECTOR_PROP_SDE_INFO);
+
+ msm_property_set_blob(&c_conn->property_info,
+ &c_conn->blob_caps,
+ SDE_KMS_INFO_DATA(info),
+ SDE_KMS_INFO_DATALEN(info),
+ CONNECTOR_PROP_SDE_INFO);
+ kfree(info);
+ }
+
+ msm_property_install_range(&c_conn->property_info, "RETIRE_FENCE",
+ 0x0, 0, INR_OPEN_MAX, 0, CONNECTOR_PROP_RETIRE_FENCE);
+
+ /* enum/bitmask properties */
+ msm_property_install_enum(&c_conn->property_info, "topology_name",
+ DRM_MODE_PROP_IMMUTABLE, 0, e_topology_name,
+ ARRAY_SIZE(e_topology_name),
+ CONNECTOR_PROP_TOPOLOGY_NAME);
+ msm_property_install_enum(&c_conn->property_info, "topology_control",
+ 0, 1, e_topology_control,
+ ARRAY_SIZE(e_topology_control),
+ CONNECTOR_PROP_TOPOLOGY_CONTROL);
+
+ rc = msm_property_install_get_status(&c_conn->property_info);
+ if (rc) {
+ SDE_ERROR("failed to create one or more properties\n");
+ goto error_destroy_property;
+ }
+
+ SDE_DEBUG("connector %d attach encoder %d\n",
+ c_conn->base.base.id, encoder->base.id);
+
+ priv->connectors[priv->num_connectors++] = &c_conn->base;
+
+ return &c_conn->base;
+
+error_destroy_property:
+ if (c_conn->blob_caps)
+ drm_property_unreference_blob(c_conn->blob_caps);
+ msm_property_destroy(&c_conn->property_info);
+error_unregister_conn:
+ drm_connector_unregister(&c_conn->base);
+error_cleanup_fence:
+ sde_fence_deinit(&c_conn->retire_fence);
+error_cleanup_conn:
+ drm_connector_cleanup(&c_conn->base);
+error_free_conn:
+ kfree(c_conn);
+
+ return ERR_PTR(rc);
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.h b/drivers/gpu/drm/msm/sde/sde_connector.h
new file mode 100644
index 000000000000..9580282291db
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_connector.h
@@ -0,0 +1,298 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_CONNECTOR_H_
+#define _SDE_CONNECTOR_H_
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_panel.h>
+
+#include "msm_drv.h"
+#include "msm_prop.h"
+#include "sde_kms.h"
+#include "sde_fence.h"
+
+#define SDE_CONNECTOR_NAME_SIZE 16
+
+struct sde_connector;
+struct sde_connector_state;
+
+/**
+ * struct sde_connector_ops - callback functions for generic sde connector
+ * Individual callbacks documented below.
+ */
+struct sde_connector_ops {
+ /**
+ * post_init - perform additional initialization steps
+ * @connector: Pointer to drm connector structure
+ * @info: Pointer to sde connector info structure
+ * @display: Pointer to private display handle
+ * Returns: Zero on success
+ */
+ int (*post_init)(struct drm_connector *connector,
+ void *info,
+ void *display);
+
+ /**
+ * detect - determine if connector is connected
+ * @connector: Pointer to drm connector structure
+ * @force: Force detect setting from drm framework
+ * @display: Pointer to private display handle
+ * Returns: Connector 'is connected' status
+ */
+ enum drm_connector_status (*detect)(struct drm_connector *connector,
+ bool force,
+ void *display);
+
+ /**
+ * get_modes - add drm modes via drm_mode_probed_add()
+ * @connector: Pointer to drm connector structure
+ * @display: Pointer to private display handle
+ * Returns: Number of modes added
+ */
+ int (*get_modes)(struct drm_connector *connector,
+ void *display);
+
+ /**
+ * mode_valid - determine if specified mode is valid
+ * @connector: Pointer to drm connector structure
+ * @mode: Pointer to drm mode structure
+ * @display: Pointer to private display handle
+ * Returns: Validity status for specified mode
+ */
+ enum drm_mode_status (*mode_valid)(struct drm_connector *connector,
+ struct drm_display_mode *mode,
+ void *display);
+
+ /**
+ * set_property - set property value
+ * @connector: Pointer to drm connector structure
+ * @state: Pointer to drm connector state structure
+ * @property_index: DRM property index
+ * @value: Incoming property value
+ * @display: Pointer to private display structure
+ * Returns: Zero on success
+ */
+ int (*set_property)(struct drm_connector *connector,
+ struct drm_connector_state *state,
+ int property_index,
+ uint64_t value,
+ void *display);
+
+ /**
+ * get_property - get property value
+ * @connector: Pointer to drm connector structure
+ * @state: Pointer to drm connector state structure
+ * @property_index: DRM property index
+ * @value: Pointer to variable for accepting property value
+ * @display: Pointer to private display structure
+ * Returns: Zero on success
+ */
+ int (*get_property)(struct drm_connector *connector,
+ struct drm_connector_state *state,
+ int property_index,
+ uint64_t *value,
+ void *display);
+
+ /**
+ * get_info - get display information
+ * @info: Pointer to msm display info structure
+ * @display: Pointer to private display structure
+ * Returns: Zero on success
+ */
+ int (*get_info)(struct msm_display_info *info, void *display);
+
+ int (*set_backlight)(void *display, u32 bl_lvl);
+};
+
+/**
+ * struct sde_connector - local sde connector structure
+ * @base: Base drm connector structure
+ * @connector_type: Set to one of DRM_MODE_CONNECTOR_ types
+ * @encoder: Pointer to preferred drm encoder
+ * @panel: Pointer to drm panel, if present
+ * @display: Pointer to private display data structure
+ * @mmu_secure: MMU id for secure buffers
+ * @mmu_unsecure: MMU id for unsecure buffers
+ * @name: ASCII name of connector
+ * @retire_fence: Retire fence reference
+ * @ops: Local callback function pointer table
+ * @property_info: Private structure for generic property handling
+ * @property_data: Array of private data for generic property handling
+ * @blob_caps: Pointer to blob structure for 'capabilities' property
+ */
+struct sde_connector {
+ struct drm_connector base;
+
+ int connector_type;
+
+ struct drm_encoder *encoder;
+ struct drm_panel *panel;
+ void *display;
+
+ int mmu_id[SDE_IOMMU_DOMAIN_MAX];
+
+ char name[SDE_CONNECTOR_NAME_SIZE];
+
+ struct sde_fence retire_fence;
+ struct sde_connector_ops ops;
+
+ struct msm_property_info property_info;
+ struct msm_property_data property_data[CONNECTOR_PROP_COUNT];
+ struct drm_property_blob *blob_caps;
+};
+
+/**
+ * to_sde_connector - convert drm_connector pointer to sde connector pointer
+ * @X: Pointer to drm_connector structure
+ * Returns: Pointer to sde_connector structure
+ */
+#define to_sde_connector(x) container_of((x), struct sde_connector, base)
+
+/**
+ * sde_connector_get_display - get sde connector's private display pointer
+ * @C: Pointer to drm connector structure
+ * Returns: Pointer to associated private display structure
+ */
+#define sde_connector_get_display(C) \
+ ((C) ? to_sde_connector((C))->display : 0)
+
+/**
+ * sde_connector_get_panel - get sde connector's private panel pointer
+ * @C: Pointer to drm connector structure
+ * Returns: Pointer to associated private display structure
+ */
+#define sde_connector_get_panel(C) \
+ ((C) ? to_sde_connector((C))->panel : 0)
+
+/**
+ * sde_connector_get_encoder - get sde connector's private encoder pointer
+ * @C: Pointer to drm connector structure
+ * Returns: Pointer to associated private encoder structure
+ */
+#define sde_connector_get_encoder(C) \
+ ((C) ? to_sde_connector((C))->encoder : 0)
+
+/**
+ * sde_connector_get_propinfo - get sde connector's property info pointer
+ * @C: Pointer to drm connector structure
+ * Returns: Pointer to associated private property info structure
+ */
+#define sde_connector_get_propinfo(C) \
+ ((C) ? &to_sde_connector((C))->property_info : 0)
+
+/**
+ * struct sde_connector_state - private connector status structure
+ * @base: Base drm connector structure
+ * @out_fb: Pointer to output frame buffer, if applicable
+ * @mmu_id: MMU ID for accessing frame buffer objects, if applicable
+ * @property_values: Local cache of current connector property values
+ */
+struct sde_connector_state {
+ struct drm_connector_state base;
+ struct drm_framebuffer *out_fb;
+ int mmu_id;
+ uint64_t property_values[CONNECTOR_PROP_COUNT];
+};
+
+/**
+ * to_sde_connector_state - convert drm_connector_state pointer to
+ * sde connector state pointer
+ * @X: Pointer to drm_connector_state structure
+ * Returns: Pointer to sde_connector_state structure
+ */
+#define to_sde_connector_state(x) \
+ container_of((x), struct sde_connector_state, base)
+
+/**
+ * sde_connector_get_property - query integer value of connector property
+ * @S: Pointer to drm connector state
+ * @X: Property index, from enum msm_mdp_connector_property
+ * Returns: Integer value of requested property
+ */
+#define sde_connector_get_property(S, X) \
+ ((S) && ((X) < CONNECTOR_PROP_COUNT) ? \
+ (to_sde_connector_state((S))->property_values[(X)]) : 0)
+
+/**
+ * sde_connector_get_property_values - retrieve property values cache
+ * @S: Pointer to drm connector state
+ * Returns: Integer value of requested property
+ */
+#define sde_connector_get_property_values(S) \
+ ((S) ? (to_sde_connector_state((S))->property_values) : 0)
+
+/**
+ * sde_connector_get_out_fb - query out_fb value from sde connector state
+ * @S: Pointer to drm connector state
+ * Returns: Output fb associated with specified connector state
+ */
+#define sde_connector_get_out_fb(S) \
+ ((S) ? to_sde_connector_state((S))->out_fb : 0)
+
+/**
+ * sde_connector_get_topology_name - helper accessor to retrieve topology_name
+ * @connector: pointer to drm connector
+ * Returns: value of the CONNECTOR_PROP_TOPOLOGY_NAME property or 0
+ */
+static inline uint64_t sde_connector_get_topology_name(
+ struct drm_connector *connector)
+{
+ if (!connector || !connector->state)
+ return 0;
+ return sde_connector_get_property(connector->state,
+ CONNECTOR_PROP_TOPOLOGY_NAME);
+}
+
+/**
+ * sde_connector_init - create drm connector object for a given display
+ * @dev: Pointer to drm device struct
+ * @encoder: Pointer to associated encoder
+ * @panel: Pointer to associated panel, can be NULL
+ * @display: Pointer to associated display object
+ * @ops: Pointer to callback operations function table
+ * @connector_poll: Set to appropriate DRM_CONNECTOR_POLL_ setting
+ * @connector_type: Set to appropriate DRM_MODE_CONNECTOR_ type
+ * Returns: Pointer to newly created drm connector struct
+ */
+struct drm_connector *sde_connector_init(struct drm_device *dev,
+ struct drm_encoder *encoder,
+ struct drm_panel *panel,
+ void *display,
+ const struct sde_connector_ops *ops,
+ int connector_poll,
+ int connector_type);
+
+/**
+ * sde_connector_prepare_fence - prepare fence support for current commit
+ * @connector: Pointer to drm connector object
+ */
+void sde_connector_prepare_fence(struct drm_connector *connector);
+
+/**
+ * sde_connector_complete_commit - signal completion of current commit
+ * @connector: Pointer to drm connector object
+ */
+void sde_connector_complete_commit(struct drm_connector *connector);
+
+/**
+ * sde_connector_get_info - query display specific information
+ * @connector: Pointer to drm connector object
+ * @info: Pointer to msm display information structure
+ * Returns: Zero on success
+ */
+int sde_connector_get_info(struct drm_connector *connector,
+ struct msm_display_info *info);
+
+#endif /* _SDE_CONNECTOR_H_ */
+
diff --git a/drivers/gpu/drm/msm/sde/sde_core_irq.c b/drivers/gpu/drm/msm/sde/sde_core_irq.c
new file mode 100644
index 000000000000..b2853e874d92
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_core_irq.c
@@ -0,0 +1,460 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/debugfs.h>
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+#include <linux/kthread.h>
+
+#include "sde_core_irq.h"
+#include "sde_power_handle.h"
+
+/**
+ * sde_core_irq_callback_handler - dispatch core interrupts
+ * @arg: private data of callback handler
+ * @irq_idx: interrupt index
+ */
+static void sde_core_irq_callback_handler(void *arg, int irq_idx)
+{
+ struct sde_kms *sde_kms = arg;
+ struct sde_irq *irq_obj = &sde_kms->irq_obj;
+ struct sde_irq_callback *cb;
+ unsigned long irq_flags;
+
+ SDE_DEBUG("irq_idx=%d\n", irq_idx);
+
+ if (list_empty(&irq_obj->irq_cb_tbl[irq_idx]))
+ SDE_ERROR("irq_idx=%d has no registered callback\n", irq_idx);
+
+ atomic_inc(&irq_obj->irq_counts[irq_idx]);
+
+ /*
+ * Perform registered function callback
+ */
+ spin_lock_irqsave(&sde_kms->irq_obj.cb_lock, irq_flags);
+ list_for_each_entry(cb, &irq_obj->irq_cb_tbl[irq_idx], list)
+ if (cb->func)
+ cb->func(cb->arg, irq_idx);
+ spin_unlock_irqrestore(&sde_kms->irq_obj.cb_lock, irq_flags);
+
+ /*
+ * Clear pending interrupt status in HW.
+ * NOTE: sde_core_irq_callback_handler is protected by top-level
+ * spinlock, so it is safe to clear any interrupt status here.
+ */
+ sde_kms->hw_intr->ops.clear_interrupt_status(
+ sde_kms->hw_intr,
+ irq_idx);
+}
+
+int sde_core_irq_idx_lookup(struct sde_kms *sde_kms,
+ enum sde_intr_type intr_type, u32 instance_idx)
+{
+ if (!sde_kms || !sde_kms->hw_intr ||
+ !sde_kms->hw_intr->ops.irq_idx_lookup)
+ return -EINVAL;
+
+ return sde_kms->hw_intr->ops.irq_idx_lookup(intr_type,
+ instance_idx);
+}
+
+/**
+ * _sde_core_irq_enable - enable core interrupt given by the index
+ * @sde_kms: Pointer to sde kms context
+ * @irq_idx: interrupt index
+ */
+static int _sde_core_irq_enable(struct sde_kms *sde_kms, int irq_idx)
+{
+ unsigned long irq_flags;
+ int ret = 0;
+
+ if (!sde_kms || !sde_kms->hw_intr ||
+ !sde_kms->irq_obj.enable_counts ||
+ !sde_kms->irq_obj.irq_counts) {
+ SDE_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ if (irq_idx < 0 || irq_idx >= sde_kms->hw_intr->irq_idx_tbl_size) {
+ SDE_ERROR("invalid IRQ index: [%d]\n", irq_idx);
+ return -EINVAL;
+ }
+
+ SDE_DEBUG("irq_idx=%d enable_count=%d\n", irq_idx,
+ atomic_read(&sde_kms->irq_obj.enable_counts[irq_idx]));
+
+ spin_lock_irqsave(&sde_kms->irq_obj.cb_lock, irq_flags);
+ SDE_EVT32(irq_idx,
+ atomic_read(&sde_kms->irq_obj.enable_counts[irq_idx]));
+ if (atomic_inc_return(&sde_kms->irq_obj.enable_counts[irq_idx]) == 1) {
+ ret = sde_kms->hw_intr->ops.enable_irq(
+ sde_kms->hw_intr,
+ irq_idx);
+ if (ret)
+ SDE_ERROR("Fail to enable IRQ for irq_idx:%d\n",
+ irq_idx);
+
+ SDE_DEBUG("irq_idx=%d ret=%d\n", irq_idx, ret);
+
+ /* empty callback list but interrupt is enabled */
+ if (list_empty(&sde_kms->irq_obj.irq_cb_tbl[irq_idx]))
+ SDE_ERROR("irq_idx=%d enabled with no callback\n",
+ irq_idx);
+ }
+ spin_unlock_irqrestore(&sde_kms->irq_obj.cb_lock, irq_flags);
+
+ return ret;
+}
+
+int sde_core_irq_enable(struct sde_kms *sde_kms, int *irq_idxs, u32 irq_count)
+{
+ int i;
+ int ret = 0;
+
+ if (!sde_kms || !irq_idxs || !irq_count) {
+ SDE_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; (i < irq_count) && !ret; i++)
+ ret = _sde_core_irq_enable(sde_kms, irq_idxs[i]);
+
+ return ret;
+}
+
+/**
+ * _sde_core_irq_disable - disable core interrupt given by the index
+ * @sde_kms: Pointer to sde kms context
+ * @irq_idx: interrupt index
+ */
+static int _sde_core_irq_disable(struct sde_kms *sde_kms, int irq_idx)
+{
+ unsigned long irq_flags;
+ int ret = 0;
+
+ if (!sde_kms || !sde_kms->hw_intr || !sde_kms->irq_obj.enable_counts) {
+ SDE_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ if (irq_idx < 0 || irq_idx >= sde_kms->hw_intr->irq_idx_tbl_size) {
+ SDE_ERROR("invalid IRQ index: [%d]\n", irq_idx);
+ return -EINVAL;
+ }
+
+ SDE_DEBUG("irq_idx=%d enable_count=%d\n", irq_idx,
+ atomic_read(&sde_kms->irq_obj.enable_counts[irq_idx]));
+
+ spin_lock_irqsave(&sde_kms->irq_obj.cb_lock, irq_flags);
+ SDE_EVT32(irq_idx,
+ atomic_read(&sde_kms->irq_obj.enable_counts[irq_idx]));
+ if (atomic_dec_return(&sde_kms->irq_obj.enable_counts[irq_idx]) == 0) {
+ ret = sde_kms->hw_intr->ops.disable_irq(
+ sde_kms->hw_intr,
+ irq_idx);
+ if (ret)
+ SDE_ERROR("Fail to disable IRQ for irq_idx:%d\n",
+ irq_idx);
+ SDE_DEBUG("irq_idx=%d ret=%d\n", irq_idx, ret);
+ }
+ spin_unlock_irqrestore(&sde_kms->irq_obj.cb_lock, irq_flags);
+
+ return ret;
+}
+
+int sde_core_irq_disable(struct sde_kms *sde_kms, int *irq_idxs, u32 irq_count)
+{
+ int i;
+ int ret = 0;
+
+ if (!sde_kms || !irq_idxs || !irq_count) {
+ SDE_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; (i < irq_count) && !ret; i++)
+ ret = _sde_core_irq_disable(sde_kms, irq_idxs[i]);
+
+ return ret;
+}
+
+u32 sde_core_irq_read(struct sde_kms *sde_kms, int irq_idx, bool clear)
+{
+ if (!sde_kms || !sde_kms->hw_intr ||
+ !sde_kms->hw_intr->ops.get_interrupt_status)
+ return 0;
+
+ return sde_kms->hw_intr->ops.get_interrupt_status(sde_kms->hw_intr,
+ irq_idx, clear);
+}
+
+int sde_core_irq_register_callback(struct sde_kms *sde_kms, int irq_idx,
+ struct sde_irq_callback *register_irq_cb)
+{
+ unsigned long irq_flags;
+
+ if (!sde_kms || !register_irq_cb || !register_irq_cb->func ||
+ !sde_kms->irq_obj.irq_cb_tbl) {
+ SDE_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ if (irq_idx < 0 || irq_idx >= sde_kms->hw_intr->irq_idx_tbl_size) {
+ SDE_ERROR("invalid IRQ index: [%d]\n", irq_idx);
+ return -EINVAL;
+ }
+
+ SDE_DEBUG("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
+
+ spin_lock_irqsave(&sde_kms->irq_obj.cb_lock, irq_flags);
+ SDE_EVT32(irq_idx, register_irq_cb);
+ list_del_init(&register_irq_cb->list);
+ list_add_tail(&register_irq_cb->list,
+ &sde_kms->irq_obj.irq_cb_tbl[irq_idx]);
+ spin_unlock_irqrestore(&sde_kms->irq_obj.cb_lock, irq_flags);
+
+ return 0;
+}
+
+int sde_core_irq_unregister_callback(struct sde_kms *sde_kms, int irq_idx,
+ struct sde_irq_callback *register_irq_cb)
+{
+ unsigned long irq_flags;
+
+ if (!sde_kms || !register_irq_cb || !register_irq_cb->func ||
+ !sde_kms->irq_obj.irq_cb_tbl) {
+ SDE_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ if (irq_idx < 0 || irq_idx >= sde_kms->hw_intr->irq_idx_tbl_size) {
+ SDE_ERROR("invalid IRQ index: [%d]\n", irq_idx);
+ return -EINVAL;
+ }
+
+ SDE_DEBUG("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
+
+ spin_lock_irqsave(&sde_kms->irq_obj.cb_lock, irq_flags);
+ SDE_EVT32(irq_idx, register_irq_cb);
+ list_del_init(&register_irq_cb->list);
+ /* empty callback list but interrupt is still enabled */
+ if (list_empty(&sde_kms->irq_obj.irq_cb_tbl[irq_idx]) &&
+ atomic_read(&sde_kms->irq_obj.enable_counts[irq_idx]))
+ SDE_ERROR("irq_idx=%d enabled with no callback\n", irq_idx);
+ spin_unlock_irqrestore(&sde_kms->irq_obj.cb_lock, irq_flags);
+
+ return 0;
+}
+
+static void sde_clear_all_irqs(struct sde_kms *sde_kms)
+{
+ if (!sde_kms || !sde_kms->hw_intr ||
+ !sde_kms->hw_intr->ops.clear_all_irqs)
+ return;
+
+ sde_kms->hw_intr->ops.clear_all_irqs(sde_kms->hw_intr);
+}
+
+static void sde_disable_all_irqs(struct sde_kms *sde_kms)
+{
+ if (!sde_kms || !sde_kms->hw_intr ||
+ !sde_kms->hw_intr->ops.disable_all_irqs)
+ return;
+
+ sde_kms->hw_intr->ops.disable_all_irqs(sde_kms->hw_intr);
+}
+
+#ifdef CONFIG_DEBUG_FS
+#define DEFINE_SDE_DEBUGFS_SEQ_FOPS(__prefix) \
+static int __prefix ## _open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, __prefix ## _show, inode->i_private); \
+} \
+static const struct file_operations __prefix ## _fops = { \
+ .owner = THIS_MODULE, \
+ .open = __prefix ## _open, \
+ .release = single_release, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+}
+
+static int sde_debugfs_core_irq_show(struct seq_file *s, void *v)
+{
+ struct sde_irq *irq_obj = s->private;
+ struct sde_irq_callback *cb;
+ unsigned long irq_flags;
+ int i, irq_count, enable_count, cb_count;
+
+ if (!irq_obj || !irq_obj->enable_counts || !irq_obj->irq_cb_tbl) {
+ SDE_ERROR("invalid parameters\n");
+ return 0;
+ }
+
+ for (i = 0; i < irq_obj->total_irqs; i++) {
+ spin_lock_irqsave(&irq_obj->cb_lock, irq_flags);
+ cb_count = 0;
+ irq_count = atomic_read(&irq_obj->irq_counts[i]);
+ enable_count = atomic_read(&irq_obj->enable_counts[i]);
+ list_for_each_entry(cb, &irq_obj->irq_cb_tbl[i], list)
+ cb_count++;
+ spin_unlock_irqrestore(&irq_obj->cb_lock, irq_flags);
+
+ if (irq_count || enable_count || cb_count)
+ seq_printf(s, "idx:%d irq:%d enable:%d cb:%d\n",
+ i, irq_count, enable_count, cb_count);
+ }
+
+ return 0;
+}
+
+DEFINE_SDE_DEBUGFS_SEQ_FOPS(sde_debugfs_core_irq);
+
+static int sde_debugfs_core_irq_init(struct sde_kms *sde_kms,
+ struct dentry *parent)
+{
+ sde_kms->irq_obj.debugfs_file = debugfs_create_file("core_irq", 0644,
+ parent, &sde_kms->irq_obj,
+ &sde_debugfs_core_irq_fops);
+
+ return 0;
+}
+
+static void sde_debugfs_core_irq_destroy(struct sde_kms *sde_kms)
+{
+ debugfs_remove(sde_kms->irq_obj.debugfs_file);
+ sde_kms->irq_obj.debugfs_file = NULL;
+}
+
+#else
+static int sde_debugfs_core_irq_init(struct sde_kms *sde_kms,
+ struct dentry *parent)
+{
+ return 0;
+}
+
+static void sde_debugfs_core_irq_destroy(struct sde_kms *sde_kms)
+{
+}
+#endif
+
+void sde_core_irq_preinstall(struct sde_kms *sde_kms)
+{
+ struct msm_drm_private *priv;
+ int i;
+
+ if (!sde_kms) {
+ SDE_ERROR("invalid sde_kms\n");
+ return;
+ } else if (!sde_kms->dev) {
+ SDE_ERROR("invalid drm device\n");
+ return;
+ } else if (!sde_kms->dev->dev_private) {
+ SDE_ERROR("invalid device private\n");
+ return;
+ }
+ priv = sde_kms->dev->dev_private;
+
+ sde_power_resource_enable(&priv->phandle, sde_kms->core_client, true);
+ sde_clear_all_irqs(sde_kms);
+ sde_disable_all_irqs(sde_kms);
+ sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
+
+ spin_lock_init(&sde_kms->irq_obj.cb_lock);
+
+ /* Create irq callbacks for all possible irq_idx */
+ sde_kms->irq_obj.total_irqs = sde_kms->hw_intr->irq_idx_tbl_size;
+ sde_kms->irq_obj.irq_cb_tbl = kcalloc(sde_kms->irq_obj.total_irqs,
+ sizeof(struct list_head), GFP_KERNEL);
+ sde_kms->irq_obj.enable_counts = kcalloc(sde_kms->irq_obj.total_irqs,
+ sizeof(atomic_t), GFP_KERNEL);
+ sde_kms->irq_obj.irq_counts = kcalloc(sde_kms->irq_obj.total_irqs,
+ sizeof(atomic_t), GFP_KERNEL);
+ for (i = 0; i < sde_kms->irq_obj.total_irqs; i++) {
+ INIT_LIST_HEAD(&sde_kms->irq_obj.irq_cb_tbl[i]);
+ atomic_set(&sde_kms->irq_obj.enable_counts[i], 0);
+ atomic_set(&sde_kms->irq_obj.irq_counts[i], 0);
+ }
+
+ sde_debugfs_core_irq_init(sde_kms, sde_kms->debugfs_root);
+}
+
+int sde_core_irq_postinstall(struct sde_kms *sde_kms)
+{
+ return 0;
+}
+
+void sde_core_irq_uninstall(struct sde_kms *sde_kms)
+{
+ struct msm_drm_private *priv;
+ int i;
+
+ if (!sde_kms) {
+ SDE_ERROR("invalid sde_kms\n");
+ return;
+ } else if (!sde_kms->dev) {
+ SDE_ERROR("invalid drm device\n");
+ return;
+ } else if (!sde_kms->dev->dev_private) {
+ SDE_ERROR("invalid device private\n");
+ return;
+ }
+ priv = sde_kms->dev->dev_private;
+
+ sde_debugfs_core_irq_destroy(sde_kms);
+
+ sde_power_resource_enable(&priv->phandle, sde_kms->core_client, true);
+ for (i = 0; i < sde_kms->irq_obj.total_irqs; i++)
+ if (atomic_read(&sde_kms->irq_obj.enable_counts[i]) ||
+ !list_empty(&sde_kms->irq_obj.irq_cb_tbl[i]))
+ SDE_ERROR("irq_idx=%d still enabled/registered\n", i);
+
+ sde_clear_all_irqs(sde_kms);
+ sde_disable_all_irqs(sde_kms);
+ sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
+
+ kfree(sde_kms->irq_obj.irq_cb_tbl);
+ kfree(sde_kms->irq_obj.enable_counts);
+ kfree(sde_kms->irq_obj.irq_counts);
+ sde_kms->irq_obj.irq_cb_tbl = NULL;
+ sde_kms->irq_obj.enable_counts = NULL;
+ sde_kms->irq_obj.irq_counts = NULL;
+ sde_kms->irq_obj.total_irqs = 0;
+}
+
+irqreturn_t sde_core_irq(struct sde_kms *sde_kms)
+{
+ /*
+ * Read interrupt status from all sources. Interrupt status are
+ * stored within hw_intr.
+ * Function will also clear the interrupt status after reading.
+ * Individual interrupt status bit will only get stored if it
+ * is enabled.
+ */
+ sde_kms->hw_intr->ops.get_interrupt_statuses(sde_kms->hw_intr);
+
+ /*
+ * Dispatch to HW driver to handle interrupt lookup that is being
+ * fired. When matching interrupt is located, HW driver will call to
+ * sde_core_irq_callback_handler with the irq_idx from the lookup table.
+ * sde_core_irq_callback_handler will perform the registered function
+ * callback, and do the interrupt status clearing once the registered
+ * callback is finished.
+ */
+ sde_kms->hw_intr->ops.dispatch_irqs(
+ sde_kms->hw_intr,
+ sde_core_irq_callback_handler,
+ sde_kms);
+
+ return IRQ_HANDLED;
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_core_irq.h b/drivers/gpu/drm/msm/sde/sde_core_irq.h
new file mode 100644
index 000000000000..92642e73daa8
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_core_irq.h
@@ -0,0 +1,138 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __SDE_CORE_IRQ_H__
+#define __SDE_CORE_IRQ_H__
+
+#include "sde_kms.h"
+#include "sde_hw_interrupts.h"
+
+/**
+ * sde_core_irq_preinstall - perform pre-installation of core IRQ handler
+ * @sde_kms: SDE handle
+ * @return: none
+ */
+void sde_core_irq_preinstall(struct sde_kms *sde_kms);
+
+/**
+ * sde_core_irq_postinstall - perform post-installation of core IRQ handler
+ * @sde_kms: SDE handle
+ * @return: 0 if success; error code otherwise
+ */
+int sde_core_irq_postinstall(struct sde_kms *sde_kms);
+
+/**
+ * sde_core_irq_uninstall - uninstall core IRQ handler
+ * @sde_kms: SDE handle
+ * @return: none
+ */
+void sde_core_irq_uninstall(struct sde_kms *sde_kms);
+
+/**
+ * sde_core_irq - core IRQ handler
+ * @sde_kms: SDE handle
+ * @return: interrupt handling status
+ */
+irqreturn_t sde_core_irq(struct sde_kms *sde_kms);
+
+/**
+ * sde_core_irq_idx_lookup - IRQ helper function for lookup irq_idx from HW
+ * interrupt mapping table.
+ * @sde_kms: SDE handle
+ * @intr_type: SDE HW interrupt type for lookup
+ * @instance_idx: SDE HW block instance defined in sde_hw_mdss.h
+ * @return: irq_idx or -EINVAL when fail to lookup
+ */
+int sde_core_irq_idx_lookup(
+ struct sde_kms *sde_kms,
+ enum sde_intr_type intr_type,
+ uint32_t instance_idx);
+
+/**
+ * sde_core_irq_enable - IRQ helper function for enabling one or more IRQs
+ * @sde_kms: SDE handle
+ * @irq_idxs: Array of irq index
+ * @irq_count: Number of irq_idx provided in the array
+ * @return: 0 for success enabling IRQ, otherwise failure
+ *
+ * This function increments count on each enable and decrements on each
+ * disable. Interrupts is enabled if count is 0 before increment.
+ */
+int sde_core_irq_enable(
+ struct sde_kms *sde_kms,
+ int *irq_idxs,
+ uint32_t irq_count);
+
+/**
+ * sde_core_irq_disable - IRQ helper function for disabling one of more IRQs
+ * @sde_kms: SDE handle
+ * @irq_idxs: Array of irq index
+ * @irq_count: Number of irq_idx provided in the array
+ * @return: 0 for success disabling IRQ, otherwise failure
+ *
+ * This function increments count on each enable and decrements on each
+ * disable. Interrupts is disabled if count is 0 after decrement.
+ */
+int sde_core_irq_disable(
+ struct sde_kms *sde_kms,
+ int *irq_idxs,
+ uint32_t irq_count);
+
+/**
+ * sde_core_irq_read - IRQ helper function for reading IRQ status
+ * @sde_kms: SDE handle
+ * @irq_idx: irq index
+ * @clear: True to clear the irq after read
+ * @return: non-zero if irq detected; otherwise no irq detected
+ */
+u32 sde_core_irq_read(
+ struct sde_kms *sde_kms,
+ int irq_idx,
+ bool clear);
+
+/**
+ * sde_core_irq_register_callback - For registering callback function on IRQ
+ * interrupt
+ * @sde_kms: SDE handle
+ * @irq_idx: irq index
+ * @irq_cb: IRQ callback structure, containing callback function
+ * and argument. Passing NULL for irq_cb will unregister
+ * the callback for the given irq_idx
+ * This must exist until un-registration.
+ * @return: 0 for success registering callback, otherwise failure
+ *
+ * This function supports registration of multiple callbacks for each interrupt.
+ */
+int sde_core_irq_register_callback(
+ struct sde_kms *sde_kms,
+ int irq_idx,
+ struct sde_irq_callback *irq_cb);
+
+/**
+ * sde_core_irq_unregister_callback - For unregistering callback function on IRQ
+ * interrupt
+ * @sde_kms: SDE handle
+ * @irq_idx: irq index
+ * @irq_cb: IRQ callback structure, containing callback function
+ * and argument. Passing NULL for irq_cb will unregister
+ * the callback for the given irq_idx
+ * This must match with registration.
+ * @return: 0 for success registering callback, otherwise failure
+ *
+ * This function supports registration of multiple callbacks for each interrupt.
+ */
+int sde_core_irq_unregister_callback(
+ struct sde_kms *sde_kms,
+ int irq_idx,
+ struct sde_irq_callback *irq_cb);
+
+#endif /* __SDE_CORE_IRQ_H__ */
diff --git a/drivers/gpu/drm/msm/sde/sde_core_perf.c b/drivers/gpu/drm/msm/sde/sde_core_perf.c
new file mode 100644
index 000000000000..0ba644d5519d
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_core_perf.c
@@ -0,0 +1,610 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/debugfs.h>
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/sort.h>
+#include <linux/clk.h>
+#include <linux/bitmap.h>
+
+#include "msm_prop.h"
+
+#include "sde_kms.h"
+#include "sde_fence.h"
+#include "sde_formats.h"
+#include "sde_hw_sspp.h"
+#include "sde_trace.h"
+#include "sde_crtc.h"
+#include "sde_plane.h"
+#include "sde_encoder.h"
+#include "sde_wb.h"
+#include "sde_core_perf.h"
+#include "sde_trace.h"
+
+static struct sde_kms *_sde_crtc_get_kms(struct drm_crtc *crtc)
+{
+ struct msm_drm_private *priv;
+
+ if (!crtc->dev || !crtc->dev->dev_private) {
+ SDE_ERROR("invalid device\n");
+ return NULL;
+ }
+
+ priv = crtc->dev->dev_private;
+ if (!priv || !priv->kms) {
+ SDE_ERROR("invalid kms\n");
+ return NULL;
+ }
+
+ return to_sde_kms(priv->kms);
+}
+
+static bool _sde_core_perf_crtc_is_power_on(struct drm_crtc *crtc)
+{
+ return sde_crtc_is_enabled(crtc);
+}
+
+static bool _sde_core_video_mode_intf_connected(struct drm_crtc *crtc)
+{
+ struct drm_crtc *tmp_crtc;
+
+ if (!crtc)
+ return 0;
+
+ drm_for_each_crtc(tmp_crtc, crtc->dev) {
+ if ((sde_crtc_get_intf_mode(tmp_crtc) == INTF_MODE_VIDEO) &&
+ _sde_core_perf_crtc_is_power_on(tmp_crtc)) {
+ SDE_DEBUG("video interface connected crtc:%d\n",
+ tmp_crtc->base.id);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+int sde_core_perf_crtc_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ u32 bw, threshold;
+ u64 bw_sum_of_intfs = 0;
+ bool is_video_mode;
+ struct sde_crtc_state *sde_cstate;
+ struct drm_crtc *tmp_crtc;
+ struct sde_kms *kms;
+
+ if (!crtc || !state) {
+ SDE_ERROR("invalid crtc\n");
+ return -EINVAL;
+ }
+
+ kms = _sde_crtc_get_kms(crtc);
+ if (!kms || !kms->catalog) {
+ SDE_ERROR("invalid parameters\n");
+ return 0;
+ }
+
+ /* we only need bandwidth check on real-time clients (interfaces) */
+ if (sde_crtc_is_wb(crtc))
+ return 0;
+
+ sde_cstate = to_sde_crtc_state(state);
+
+ bw_sum_of_intfs = sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_AB);
+
+ drm_for_each_crtc(tmp_crtc, crtc->dev) {
+ if (_sde_core_perf_crtc_is_power_on(tmp_crtc) &&
+ sde_crtc_is_rt(tmp_crtc) && tmp_crtc != crtc) {
+ struct sde_crtc_state *tmp_cstate =
+ to_sde_crtc_state(tmp_crtc->state);
+
+ bw_sum_of_intfs += tmp_cstate->cur_perf.bw_ctl;
+ }
+ }
+
+ /* convert bandwidth to kb */
+ bw = DIV_ROUND_UP_ULL(bw_sum_of_intfs, 1000);
+ SDE_DEBUG("calculated bandwidth=%uk\n", bw);
+
+ is_video_mode = sde_crtc_get_intf_mode(crtc) == INTF_MODE_VIDEO;
+ threshold = (is_video_mode ||
+ _sde_core_video_mode_intf_connected(crtc)) ?
+ kms->catalog->perf.max_bw_low : kms->catalog->perf.max_bw_high;
+
+ SDE_DEBUG("final threshold bw limit = %d\n", threshold);
+
+ if (!threshold) {
+ sde_cstate->cur_perf.bw_ctl = 0;
+ SDE_ERROR("no bandwidth limits specified\n");
+ return -E2BIG;
+ } else if (bw > threshold) {
+ sde_cstate->cur_perf.bw_ctl = 0;
+ SDE_DEBUG("exceeds bandwidth: %ukb > %ukb\n", bw, threshold);
+ return -E2BIG;
+ }
+
+ return 0;
+}
+
+static void _sde_core_perf_calc_crtc(struct sde_kms *kms,
+ struct drm_crtc *crtc,
+ struct sde_core_perf_params *perf)
+{
+ struct sde_crtc_state *sde_cstate;
+
+ sde_cstate = to_sde_crtc_state(crtc->state);
+ memset(perf, 0, sizeof(struct sde_core_perf_params));
+
+ perf->bw_ctl = sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_AB);
+ perf->max_per_pipe_ib =
+ sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_IB);
+ perf->core_clk_rate =
+ sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_CLK);
+
+ SDE_DEBUG("crtc=%d clk_rate=%u ib=%llu ab=%llu\n",
+ crtc->base.id, perf->core_clk_rate,
+ perf->max_per_pipe_ib, perf->bw_ctl);
+}
+
+static u64 _sde_core_perf_crtc_calc_client_vote(struct sde_kms *kms,
+ struct drm_crtc *crtc, struct sde_core_perf_params *perf,
+ bool nrt_client, u32 core_clk)
+{
+ u64 bw_sum_of_intfs = 0;
+ struct drm_crtc *tmp_crtc;
+
+ drm_for_each_crtc(tmp_crtc, crtc->dev) {
+ if (_sde_core_perf_crtc_is_power_on(crtc) &&
+ /* RealTime clients */
+ ((!nrt_client) ||
+ /* Non-RealTime clients */
+ (nrt_client && sde_crtc_is_nrt(tmp_crtc)))) {
+ struct sde_crtc_state *sde_cstate =
+ to_sde_crtc_state(tmp_crtc->state);
+
+ perf->max_per_pipe_ib = max(perf->max_per_pipe_ib,
+ sde_cstate->cur_perf.max_per_pipe_ib);
+
+ bw_sum_of_intfs += sde_cstate->cur_perf.bw_ctl;
+
+ SDE_DEBUG("crtc=%d bw=%llu\n",
+ tmp_crtc->base.id,
+ sde_cstate->cur_perf.bw_ctl);
+ }
+ }
+
+ return bw_sum_of_intfs;
+}
+
+static void _sde_core_perf_crtc_update_client_vote(struct sde_kms *kms,
+ struct sde_core_perf_params *params, bool nrt_client, u64 bw_vote)
+{
+ struct msm_drm_private *priv = kms->dev->dev_private;
+ u64 bus_ab_quota, bus_ib_quota;
+
+ bus_ab_quota = max(bw_vote, kms->perf.perf_tune.min_bus_vote);
+ bus_ib_quota = params->max_per_pipe_ib;
+
+ SDE_ATRACE_INT("bus_quota", bus_ib_quota);
+ sde_power_data_bus_set_quota(&priv->phandle, kms->core_client,
+ nrt_client ? SDE_POWER_HANDLE_DATA_BUS_CLIENT_NRT :
+ SDE_POWER_HANDLE_DATA_BUS_CLIENT_RT,
+ bus_ab_quota, bus_ib_quota);
+ SDE_DEBUG("client:%s ab=%llu ib=%llu\n", nrt_client ? "nrt" : "rt",
+ bus_ab_quota, bus_ib_quota);
+}
+
+static void _sde_core_perf_crtc_update_bus(struct sde_kms *kms,
+ struct drm_crtc *crtc, u32 core_clk)
+{
+ u64 bw_sum_of_rt_intfs = 0, bw_sum_of_nrt_intfs = 0;
+ struct sde_core_perf_params params = {0};
+
+ SDE_ATRACE_BEGIN(__func__);
+
+ /*
+ * non-real time client
+ */
+ if (sde_crtc_is_nrt(crtc)) {
+ bw_sum_of_nrt_intfs = _sde_core_perf_crtc_calc_client_vote(
+ kms, crtc, &params, true, core_clk);
+ _sde_core_perf_crtc_update_client_vote(kms, &params, true,
+ bw_sum_of_nrt_intfs);
+ }
+
+ /*
+ * real time client
+ */
+ if (!sde_crtc_is_nrt(crtc) ||
+ sde_crtc_is_wb(crtc)) {
+ bw_sum_of_rt_intfs = _sde_core_perf_crtc_calc_client_vote(kms,
+ crtc, &params, false, core_clk);
+ _sde_core_perf_crtc_update_client_vote(kms, &params, false,
+ bw_sum_of_rt_intfs);
+ }
+
+ SDE_ATRACE_END(__func__);
+}
+
+/**
+ * @sde_core_perf_crtc_release_bw() - request zero bandwidth
+ * @crtc - pointer to a crtc
+ *
+ * Function checks a state variable for the crtc, if all pending commit
+ * requests are done, meaning no more bandwidth is needed, release
+ * bandwidth request.
+ */
+void sde_core_perf_crtc_release_bw(struct drm_crtc *crtc)
+{
+ struct drm_crtc *tmp_crtc;
+ struct sde_crtc_state *sde_cstate;
+ struct sde_kms *kms;
+
+ if (!crtc) {
+ SDE_ERROR("invalid crtc\n");
+ return;
+ }
+
+ kms = _sde_crtc_get_kms(crtc);
+ if (!kms || !kms->catalog) {
+ SDE_ERROR("invalid kms\n");
+ return;
+ }
+
+ sde_cstate = to_sde_crtc_state(crtc->state);
+
+ /* only do this for command panel or writeback */
+ if ((sde_crtc_get_intf_mode(crtc) != INTF_MODE_CMD) &&
+ (sde_crtc_get_intf_mode(crtc) != INTF_MODE_WB_LINE))
+ return;
+
+ /*
+ * If video interface present, cmd panel bandwidth cannot be
+ * released.
+ */
+ if (sde_crtc_get_intf_mode(crtc) == INTF_MODE_CMD)
+ drm_for_each_crtc(tmp_crtc, crtc->dev) {
+ if (_sde_core_perf_crtc_is_power_on(tmp_crtc) &&
+ sde_crtc_get_intf_mode(tmp_crtc) ==
+ INTF_MODE_VIDEO)
+ return;
+ }
+
+ /* Release the bandwidth */
+ if (kms->perf.enable_bw_release) {
+ trace_sde_cmd_release_bw(crtc->base.id);
+ sde_cstate->cur_perf.bw_ctl = 0;
+ sde_cstate->new_perf.bw_ctl = 0;
+ SDE_DEBUG("Release BW crtc=%d\n", crtc->base.id);
+ _sde_core_perf_crtc_update_bus(kms, crtc, 0);
+ }
+}
+
+static int _sde_core_select_clk_lvl(struct sde_kms *kms,
+ u32 clk_rate)
+{
+ return clk_round_rate(kms->perf.core_clk, clk_rate);
+}
+
+static u32 _sde_core_perf_get_core_clk_rate(struct sde_kms *kms)
+{
+ u32 clk_rate = 0;
+ struct drm_crtc *crtc;
+ struct sde_crtc_state *sde_cstate;
+ int ncrtc = 0;
+
+ drm_for_each_crtc(crtc, kms->dev) {
+ if (_sde_core_perf_crtc_is_power_on(crtc)) {
+ sde_cstate = to_sde_crtc_state(crtc->state);
+ clk_rate = max(sde_cstate->cur_perf.core_clk_rate,
+ clk_rate);
+ clk_rate = clk_round_rate(kms->perf.core_clk, clk_rate);
+ }
+ ncrtc++;
+ }
+ clk_rate = _sde_core_select_clk_lvl(kms, clk_rate);
+
+ SDE_DEBUG("clk:%u ncrtc:%d\n", clk_rate, ncrtc);
+
+ return clk_rate;
+}
+
+void sde_core_perf_crtc_update(struct drm_crtc *crtc,
+ int params_changed, bool stop_req)
+{
+ struct sde_core_perf_params *new, *old;
+ int update_bus = 0, update_clk = 0;
+ u32 clk_rate = 0;
+ struct sde_crtc *sde_crtc;
+ struct sde_crtc_state *sde_cstate;
+ int ret;
+ struct msm_drm_private *priv;
+ struct sde_kms *kms;
+
+ if (!crtc) {
+ SDE_ERROR("invalid crtc\n");
+ return;
+ }
+
+ kms = _sde_crtc_get_kms(crtc);
+ if (!kms || !kms->catalog) {
+ SDE_ERROR("invalid kms\n");
+ return;
+ }
+ priv = kms->dev->dev_private;
+
+ sde_crtc = to_sde_crtc(crtc);
+ sde_cstate = to_sde_crtc_state(crtc->state);
+
+ SDE_DEBUG("crtc:%d stop_req:%d core_clk:%u\n",
+ crtc->base.id, stop_req, kms->perf.core_clk_rate);
+
+ SDE_ATRACE_BEGIN(__func__);
+
+ old = &sde_cstate->cur_perf;
+ new = &sde_cstate->new_perf;
+
+ if (_sde_core_perf_crtc_is_power_on(crtc) && !stop_req) {
+ if (params_changed)
+ _sde_core_perf_calc_crtc(kms, crtc, new);
+
+ /*
+ * cases for bus bandwidth update.
+ * 1. new bandwidth vote or writeback output vote
+ * are higher than current vote for update request.
+ * 2. new bandwidth vote or writeback output vote are
+ * lower than current vote at end of commit or stop.
+ */
+ if ((params_changed && ((new->bw_ctl > old->bw_ctl))) ||
+ (!params_changed && ((new->bw_ctl < old->bw_ctl)))) {
+ SDE_DEBUG("crtc=%d p=%d new_bw=%llu,old_bw=%llu\n",
+ crtc->base.id, params_changed, new->bw_ctl,
+ old->bw_ctl);
+ old->bw_ctl = new->bw_ctl;
+ old->max_per_pipe_ib = new->max_per_pipe_ib;
+ update_bus = 1;
+ }
+
+ if ((params_changed &&
+ (new->core_clk_rate > old->core_clk_rate)) ||
+ (!params_changed &&
+ (new->core_clk_rate < old->core_clk_rate))) {
+ old->core_clk_rate = new->core_clk_rate;
+ update_clk = 1;
+ }
+ } else {
+ SDE_DEBUG("crtc=%d disable\n", crtc->base.id);
+ memset(old, 0, sizeof(*old));
+ memset(new, 0, sizeof(*new));
+ update_bus = 1;
+ update_clk = 1;
+ }
+
+ /*
+ * Calculate mdp clock before bandwidth calculation. If traffic shaper
+ * is enabled and clock increased, the bandwidth calculation can
+ * use the new clock for the rotator bw calculation.
+ */
+ if (update_clk)
+ clk_rate = _sde_core_perf_get_core_clk_rate(kms);
+
+ if (update_bus)
+ _sde_core_perf_crtc_update_bus(kms, crtc, clk_rate);
+
+ /*
+ * Update the clock after bandwidth vote to ensure
+ * bandwidth is available before clock rate is increased.
+ */
+ if (update_clk) {
+ SDE_ATRACE_INT(kms->perf.clk_name, clk_rate);
+ SDE_EVT32(kms->dev, stop_req, clk_rate);
+ ret = sde_power_clk_set_rate(&priv->phandle,
+ kms->perf.clk_name, clk_rate);
+ if (ret) {
+ SDE_ERROR("failed to set %s clock rate %u\n",
+ kms->perf.clk_name, clk_rate);
+ goto end;
+ }
+
+ kms->perf.core_clk_rate = clk_rate;
+ SDE_DEBUG("update clk rate = %d HZ\n", clk_rate);
+ }
+
+end:
+ SDE_ATRACE_END(__func__);
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+static ssize_t _sde_core_perf_mode_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct sde_core_perf *perf = file->private_data;
+ struct sde_perf_cfg *cfg = &perf->catalog->perf;
+ int perf_mode = 0;
+ char buf[10];
+
+ if (!perf)
+ return -ENODEV;
+
+ if (count >= sizeof(buf))
+ return -EFAULT;
+
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+
+ buf[count] = 0; /* end of string */
+
+ if (kstrtoint(buf, 0, &perf_mode))
+ return -EFAULT;
+
+ if (perf_mode) {
+ /* run the driver with max clk and BW vote */
+ perf->perf_tune.min_core_clk = perf->max_core_clk_rate;
+ perf->perf_tune.min_bus_vote =
+ (u64) cfg->max_bw_high * 1000;
+ } else {
+ /* reset the perf tune params to 0 */
+ perf->perf_tune.min_core_clk = 0;
+ perf->perf_tune.min_bus_vote = 0;
+ }
+ return count;
+}
+
+static ssize_t _sde_core_perf_mode_read(struct file *file,
+ char __user *buff, size_t count, loff_t *ppos)
+{
+ struct sde_core_perf *perf = file->private_data;
+ int len = 0;
+ char buf[40] = {'\0'};
+
+ if (!perf)
+ return -ENODEV;
+
+ if (*ppos)
+ return 0; /* the end */
+
+ len = snprintf(buf, sizeof(buf), "min_mdp_clk %lu min_bus_vote %llu\n",
+ perf->perf_tune.min_core_clk,
+ perf->perf_tune.min_bus_vote);
+ if (len < 0 || len >= sizeof(buf))
+ return 0;
+
+ if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
+ return -EFAULT;
+
+ *ppos += len; /* increase offset */
+
+ return len;
+}
+
+static const struct file_operations sde_core_perf_mode_fops = {
+ .open = simple_open,
+ .read = _sde_core_perf_mode_read,
+ .write = _sde_core_perf_mode_write,
+};
+
+static void sde_debugfs_core_perf_destroy(struct sde_core_perf *perf)
+{
+ debugfs_remove_recursive(perf->debugfs_root);
+ perf->debugfs_root = NULL;
+}
+
+static int sde_debugfs_core_perf_init(struct sde_core_perf *perf,
+ struct dentry *parent)
+{
+ struct sde_mdss_cfg *catalog = perf->catalog;
+ struct msm_drm_private *priv;
+ struct sde_kms *sde_kms;
+
+ priv = perf->dev->dev_private;
+ if (!priv || !priv->kms) {
+ SDE_ERROR("invalid KMS reference\n");
+ return -EINVAL;
+ }
+
+ sde_kms = to_sde_kms(priv->kms);
+
+ perf->debugfs_root = debugfs_create_dir("core_perf", parent);
+ if (!perf->debugfs_root) {
+ SDE_ERROR("failed to create core perf debugfs\n");
+ return -EINVAL;
+ }
+
+ debugfs_create_u64("max_core_clk_rate", 0644, perf->debugfs_root,
+ &perf->max_core_clk_rate);
+ debugfs_create_u32("core_clk_rate", 0644, perf->debugfs_root,
+ &perf->core_clk_rate);
+ debugfs_create_u32("enable_bw_release", 0644, perf->debugfs_root,
+ (u32 *)&perf->enable_bw_release);
+ debugfs_create_u32("threshold_low", 0644, perf->debugfs_root,
+ (u32 *)&catalog->perf.max_bw_low);
+ debugfs_create_u32("threshold_high", 0644, perf->debugfs_root,
+ (u32 *)&catalog->perf.max_bw_high);
+ debugfs_create_file("perf_mode", 0644, perf->debugfs_root,
+ (u32 *)perf, &sde_core_perf_mode_fops);
+
+ return 0;
+}
+#else
+static void sde_debugfs_core_perf_destroy(struct sde_core_perf *perf)
+{
+}
+
+static int sde_debugfs_core_perf_init(struct sde_core_perf *perf,
+ struct dentry *parent)
+{
+ return 0;
+}
+#endif
+
+void sde_core_perf_destroy(struct sde_core_perf *perf)
+{
+ if (!perf) {
+ SDE_ERROR("invalid parameters\n");
+ return;
+ }
+
+ sde_debugfs_core_perf_destroy(perf);
+ perf->max_core_clk_rate = 0;
+ perf->core_clk = NULL;
+ mutex_destroy(&perf->perf_lock);
+ perf->clk_name = NULL;
+ perf->phandle = NULL;
+ perf->catalog = NULL;
+ perf->dev = NULL;
+}
+
+int sde_core_perf_init(struct sde_core_perf *perf,
+ struct drm_device *dev,
+ struct sde_mdss_cfg *catalog,
+ struct sde_power_handle *phandle,
+ struct sde_power_client *pclient,
+ char *clk_name,
+ struct dentry *debugfs_parent)
+{
+ if (!perf || !catalog || !phandle || !pclient ||
+ !clk_name || !debugfs_parent) {
+ SDE_ERROR("invalid parameters\n");
+ return -EINVAL;
+ }
+
+ perf->dev = dev;
+ perf->catalog = catalog;
+ perf->phandle = phandle;
+ perf->pclient = pclient;
+ perf->clk_name = clk_name;
+ mutex_init(&perf->perf_lock);
+
+ perf->core_clk = sde_power_clk_get_clk(phandle, clk_name);
+ if (!perf->core_clk) {
+ SDE_ERROR("invalid core clk\n");
+ goto err;
+ }
+
+ perf->max_core_clk_rate = sde_power_clk_get_max_rate(phandle, clk_name);
+ if (!perf->max_core_clk_rate) {
+ SDE_ERROR("invalid max core clk rate\n");
+ goto err;
+ }
+
+ sde_debugfs_core_perf_init(perf, debugfs_parent);
+
+ return 0;
+
+err:
+ sde_core_perf_destroy(perf);
+ return -ENODEV;
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_core_perf.h b/drivers/gpu/drm/msm/sde/sde_core_perf.h
new file mode 100644
index 000000000000..e5dd9b6e6704
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_core_perf.h
@@ -0,0 +1,124 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __SDE_CORE_PERF_H__
+#define __SDE_CORE_PERF_H__
+
+#include <linux/types.h>
+#include <linux/dcache.h>
+#include <linux/mutex.h>
+#include <drm/drm_crtc.h>
+
+#include "sde_hw_catalog.h"
+#include "sde_power_handle.h"
+
+/**
+ * struct sde_core_perf_params - definition of performance parameters
+ * @max_per_pipe_ib: maximum instantaneous bandwidth request
+ * @bw_ctl: arbitrated bandwidth request
+ * @core_clk_rate: core clock rate request
+ */
+struct sde_core_perf_params {
+ u64 max_per_pipe_ib;
+ u64 bw_ctl;
+ u32 core_clk_rate;
+};
+
+/**
+ * struct sde_core_perf_tune - definition of performance tuning control
+ * @min_core_clk: minimum core clock
+ * @min_bus_vote: minimum bus vote
+ */
+struct sde_core_perf_tune {
+ unsigned long min_core_clk;
+ u64 min_bus_vote;
+};
+
+/**
+ * struct sde_core_perf - definition of core performance context
+ * @dev: Pointer to drm device
+ * @debugfs_root: top level debug folder
+ * @perf_lock: serialization lock for this context
+ * @catalog: Pointer to catalog configuration
+ * @phandle: Pointer to power handler
+ * @pclient: Pointer to power client
+ * @clk_name: core clock name
+ * @core_clk: Pointer to core clock structure
+ * @core_clk_rate: current core clock rate
+ * @max_core_clk_rate: maximum allowable core clock rate
+ * @perf_tune: debug control for performance tuning
+ * @enable_bw_release: debug control for bandwidth release
+ */
+struct sde_core_perf {
+ struct drm_device *dev;
+ struct dentry *debugfs_root;
+ struct mutex perf_lock;
+ struct sde_mdss_cfg *catalog;
+ struct sde_power_handle *phandle;
+ struct sde_power_client *pclient;
+ char *clk_name;
+ struct clk *core_clk;
+ u32 core_clk_rate;
+ u64 max_core_clk_rate;
+ struct sde_core_perf_tune perf_tune;
+ u32 enable_bw_release;
+};
+
+/**
+ * sde_core_perf_crtc_check - validate performance of the given crtc state
+ * @crtc: Pointer to crtc
+ * @state: Pointer to new crtc state
+ * return: zero if success, or error code otherwise
+ */
+int sde_core_perf_crtc_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state);
+
+/**
+ * sde_core_perf_crtc_update - update performance of the given crtc
+ * @crtc: Pointer to crtc
+ * @params_changed: true if crtc parameters are modified
+ * @stop_req: true if this is a stop request
+ */
+void sde_core_perf_crtc_update(struct drm_crtc *crtc,
+ int params_changed, bool stop_req);
+
+/**
+ * sde_core_perf_crtc_release_bw - release bandwidth of the given crtc
+ * @crtc: Pointer to crtc
+ */
+void sde_core_perf_crtc_release_bw(struct drm_crtc *crtc);
+
+/**
+ * sde_core_perf_destroy - destroy the given core performance context
+ * @perf: Pointer to core performance context
+ */
+void sde_core_perf_destroy(struct sde_core_perf *perf);
+
+/**
+ * sde_core_perf_init - initialize the given core performance context
+ * @perf: Pointer to core performance context
+ * @dev: Pointer to drm device
+ * @catalog: Pointer to catalog
+ * @phandle: Pointer to power handle
+ * @pclient: Pointer to power client
+ * @clk_name: core clock name
+ * @debugfs_parent: Pointer to parent debugfs
+ */
+int sde_core_perf_init(struct sde_core_perf *perf,
+ struct drm_device *dev,
+ struct sde_mdss_cfg *catalog,
+ struct sde_power_handle *phandle,
+ struct sde_power_client *pclient,
+ char *clk_name,
+ struct dentry *debugfs_parent);
+
+#endif /* __SDE_CORE_PERF_H__ */
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
new file mode 100644
index 000000000000..05e6da14cec0
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -0,0 +1,1693 @@
+/*
+ * Copyright (c) 2014-2017 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+#include <linux/sort.h>
+#include <linux/debugfs.h>
+#include <linux/ktime.h>
+#include <uapi/drm/sde_drm.h>
+#include <drm/drm_mode.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_flip_work.h>
+
+#include "sde_kms.h"
+#include "sde_hw_lm.h"
+#include "sde_hw_ctl.h"
+#include "sde_crtc.h"
+#include "sde_plane.h"
+#include "sde_color_processing.h"
+#include "sde_encoder.h"
+#include "sde_connector.h"
+#include "sde_power_handle.h"
+#include "sde_core_perf.h"
+
+/* default input fence timeout, in ms */
+#define SDE_CRTC_INPUT_FENCE_TIMEOUT 2000
+
+/*
+ * The default input fence timeout is 2 seconds while max allowed
+ * range is 10 seconds. Any value above 10 seconds adds glitches beyond
+ * tolerance limit.
+ */
+#define SDE_CRTC_MAX_INPUT_FENCE_TIMEOUT 10000
+
+/* layer mixer index on sde_crtc */
+#define LEFT_MIXER 0
+#define RIGHT_MIXER 1
+
+static inline struct sde_kms *_sde_crtc_get_kms(struct drm_crtc *crtc)
+{
+ struct msm_drm_private *priv = crtc->dev->dev_private;
+ return to_sde_kms(priv->kms);
+}
+
+static void sde_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
+
+ SDE_DEBUG("\n");
+
+ if (!crtc)
+ return;
+
+ if (sde_crtc->blob_info)
+ drm_property_unreference_blob(sde_crtc->blob_info);
+ msm_property_destroy(&sde_crtc->property_info);
+ sde_cp_crtc_destroy_properties(crtc);
+
+ debugfs_remove_recursive(sde_crtc->debugfs_root);
+ mutex_destroy(&sde_crtc->crtc_lock);
+ sde_fence_deinit(&sde_crtc->output_fence);
+
+ drm_crtc_cleanup(crtc);
+ kfree(sde_crtc);
+}
+
+static bool sde_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ SDE_DEBUG("\n");
+
+ if (msm_is_mode_seamless(adjusted_mode) &&
+ (!crtc->enabled || crtc->state->active_changed)) {
+ SDE_ERROR("crtc state prevents seamless transition\n");
+ return false;
+ }
+
+ return true;
+}
+
+static void _sde_crtc_setup_blend_cfg(struct sde_crtc_mixer *mixer,
+ struct sde_plane_state *pstate, struct sde_format *format)
+{
+ uint32_t blend_op, fg_alpha, bg_alpha;
+ uint32_t blend_type;
+ struct sde_hw_mixer *lm = mixer->hw_lm;
+
+ /* default to opaque blending */
+ fg_alpha = sde_plane_get_property(pstate, PLANE_PROP_ALPHA);
+ bg_alpha = 0xFF - fg_alpha;
+ blend_op = SDE_BLEND_FG_ALPHA_FG_CONST | SDE_BLEND_BG_ALPHA_BG_CONST;
+ blend_type = sde_plane_get_property(pstate, PLANE_PROP_BLEND_OP);
+
+ SDE_DEBUG("blend type:0x%x blend alpha:0x%x\n", blend_type, fg_alpha);
+
+ switch (blend_type) {
+
+ case SDE_DRM_BLEND_OP_OPAQUE:
+ blend_op = SDE_BLEND_FG_ALPHA_FG_CONST |
+ SDE_BLEND_BG_ALPHA_BG_CONST;
+ break;
+
+ case SDE_DRM_BLEND_OP_PREMULTIPLIED:
+ if (format->alpha_enable) {
+ blend_op = SDE_BLEND_FG_ALPHA_FG_CONST |
+ SDE_BLEND_BG_ALPHA_FG_PIXEL;
+ if (fg_alpha != 0xff) {
+ bg_alpha = fg_alpha;
+ blend_op |= SDE_BLEND_BG_MOD_ALPHA |
+ SDE_BLEND_BG_INV_MOD_ALPHA;
+ } else {
+ blend_op |= SDE_BLEND_BG_INV_ALPHA;
+ }
+ }
+ break;
+
+ case SDE_DRM_BLEND_OP_COVERAGE:
+ if (format->alpha_enable) {
+ blend_op = SDE_BLEND_FG_ALPHA_FG_PIXEL |
+ SDE_BLEND_BG_ALPHA_FG_PIXEL;
+ if (fg_alpha != 0xff) {
+ bg_alpha = fg_alpha;
+ blend_op |= SDE_BLEND_FG_MOD_ALPHA |
+ SDE_BLEND_FG_INV_MOD_ALPHA |
+ SDE_BLEND_BG_MOD_ALPHA |
+ SDE_BLEND_BG_INV_MOD_ALPHA;
+ } else {
+ blend_op |= SDE_BLEND_BG_INV_ALPHA;
+ }
+ }
+ break;
+ default:
+ /* do nothing */
+ break;
+ }
+
+ lm->ops.setup_blend_config(lm, pstate->stage, fg_alpha,
+ bg_alpha, blend_op);
+ SDE_DEBUG("format 0x%x, alpha_enable %u fg alpha:0x%x bg alpha:0x%x \"\
+ blend_op:0x%x\n", format->base.pixel_format,
+ format->alpha_enable, fg_alpha, bg_alpha, blend_op);
+}
+
+static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc,
+ struct sde_crtc *sde_crtc, struct sde_crtc_mixer *mixer)
+{
+ struct drm_plane *plane;
+
+ struct sde_plane_state *pstate = NULL;
+ struct sde_format *format;
+ struct sde_hw_ctl *ctl = mixer->hw_ctl;
+ struct sde_hw_stage_cfg *stage_cfg = &sde_crtc->stage_cfg;
+
+ u32 flush_mask = 0, crtc_split_width;
+ uint32_t lm_idx = LEFT_MIXER, idx;
+ bool bg_alpha_enable[CRTC_DUAL_MIXERS] = {false};
+ bool lm_right = false;
+ int left_crtc_zpos_cnt[SDE_STAGE_MAX + 1] = {0};
+ int right_crtc_zpos_cnt[SDE_STAGE_MAX + 1] = {0};
+
+ crtc_split_width = get_crtc_split_width(crtc);
+
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+
+ pstate = to_sde_plane_state(plane->state);
+
+ flush_mask = ctl->ops.get_bitmask_sspp(ctl,
+ sde_plane_pipe(plane));
+
+ /* always stage plane on either left or right lm */
+ if (plane->state->crtc_x >= crtc_split_width) {
+ lm_idx = RIGHT_MIXER;
+ idx = right_crtc_zpos_cnt[pstate->stage]++;
+ } else {
+ lm_idx = LEFT_MIXER;
+ idx = left_crtc_zpos_cnt[pstate->stage]++;
+ }
+
+ /* stage plane on right LM if it crosses the boundary */
+ lm_right = (lm_idx == LEFT_MIXER) &&
+ (plane->state->crtc_x + plane->state->crtc_w >
+ crtc_split_width);
+
+ stage_cfg->stage[lm_idx][pstate->stage][idx] =
+ sde_plane_pipe(plane);
+ mixer[lm_idx].flush_mask |= flush_mask;
+
+ SDE_DEBUG("crtc %d stage:%d - plane %d sspp %d fb %d\n",
+ crtc->base.id,
+ pstate->stage,
+ plane->base.id,
+ sde_plane_pipe(plane) - SSPP_VIG0,
+ plane->state->fb ?
+ plane->state->fb->base.id : -1);
+
+ format = to_sde_format(msm_framebuffer_format(pstate->base.fb));
+
+ /* blend config update */
+ if (pstate->stage != SDE_STAGE_BASE) {
+ _sde_crtc_setup_blend_cfg(mixer + lm_idx, pstate,
+ format);
+
+ if (bg_alpha_enable[lm_idx] && !format->alpha_enable)
+ mixer[lm_idx].mixer_op_mode = 0;
+ else
+ mixer[lm_idx].mixer_op_mode |=
+ 1 << pstate->stage;
+ } else if (format->alpha_enable) {
+ bg_alpha_enable[lm_idx] = true;
+ }
+
+ if (lm_right) {
+ idx = right_crtc_zpos_cnt[pstate->stage]++;
+ stage_cfg->stage[RIGHT_MIXER][pstate->stage][idx] =
+ sde_plane_pipe(plane);
+ mixer[RIGHT_MIXER].flush_mask |= flush_mask;
+
+ /* blend config update */
+ if (pstate->stage != SDE_STAGE_BASE) {
+ _sde_crtc_setup_blend_cfg(mixer + RIGHT_MIXER,
+ pstate, format);
+
+ if (bg_alpha_enable[RIGHT_MIXER] &&
+ !format->alpha_enable)
+ mixer[RIGHT_MIXER].mixer_op_mode = 0;
+ else
+ mixer[RIGHT_MIXER].mixer_op_mode |=
+ 1 << pstate->stage;
+ } else if (format->alpha_enable) {
+ bg_alpha_enable[RIGHT_MIXER] = true;
+ }
+ }
+ }
+}
+
+/**
+ * _sde_crtc_blend_setup - configure crtc mixers
+ * @crtc: Pointer to drm crtc structure
+ */
+static void _sde_crtc_blend_setup(struct drm_crtc *crtc)
+{
+ struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
+ struct sde_crtc_mixer *mixer = sde_crtc->mixers;
+ struct sde_hw_ctl *ctl;
+ struct sde_hw_mixer *lm;
+
+ int i;
+
+ SDE_DEBUG("%s\n", sde_crtc->name);
+
+ if (sde_crtc->num_mixers > CRTC_DUAL_MIXERS) {
+ SDE_ERROR("invalid number mixers: %d\n", sde_crtc->num_mixers);
+ return;
+ }
+
+ for (i = 0; i < sde_crtc->num_mixers; i++) {
+ if (!mixer[i].hw_lm || !mixer[i].hw_ctl) {
+ SDE_ERROR("invalid lm or ctl assigned to mixer\n");
+ return;
+ }
+ mixer[i].mixer_op_mode = 0;
+ mixer[i].flush_mask = 0;
+ if (mixer[i].hw_ctl->ops.clear_all_blendstages)
+ mixer[i].hw_ctl->ops.clear_all_blendstages(
+ mixer[i].hw_ctl);
+ }
+
+ /* initialize stage cfg */
+ memset(&sde_crtc->stage_cfg, 0, sizeof(struct sde_hw_stage_cfg));
+
+ _sde_crtc_blend_setup_mixer(crtc, sde_crtc, mixer);
+
+ for (i = 0; i < sde_crtc->num_mixers; i++) {
+ ctl = mixer[i].hw_ctl;
+ lm = mixer[i].hw_lm;
+
+ lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode);
+
+ mixer[i].flush_mask |= ctl->ops.get_bitmask_mixer(ctl,
+ mixer[i].hw_lm->idx);
+
+ /* stage config flush mask */
+ ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask);
+
+ SDE_DEBUG("lm %d, op_mode 0x%X, ctl %d, flush mask 0x%x\n",
+ mixer[i].hw_lm->idx - LM_0,
+ mixer[i].mixer_op_mode,
+ ctl->idx - CTL_0,
+ mixer[i].flush_mask);
+
+ ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx,
+ &sde_crtc->stage_cfg, i);
+ }
+}
+
+void sde_crtc_prepare_commit(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ struct sde_crtc *sde_crtc;
+ struct sde_crtc_state *cstate;
+ struct drm_connector *conn;
+
+ if (!crtc || !crtc->state) {
+ SDE_ERROR("invalid crtc\n");
+ return;
+ }
+
+ sde_crtc = to_sde_crtc(crtc);
+ cstate = to_sde_crtc_state(crtc->state);
+ SDE_EVT32(DRMID(crtc));
+
+ /* identify connectors attached to this crtc */
+ cstate->is_rt = false;
+ cstate->num_connectors = 0;
+
+ drm_for_each_connector(conn, crtc->dev)
+ if (conn->state && conn->state->crtc == crtc &&
+ cstate->num_connectors < MAX_CONNECTORS) {
+ cstate->connectors[cstate->num_connectors++] = conn;
+ sde_connector_prepare_fence(conn);
+
+ if (conn->connector_type != DRM_MODE_CONNECTOR_VIRTUAL)
+ cstate->is_rt = true;
+ }
+
+ if (cstate->num_connectors > 0 && cstate->connectors[0]->encoder)
+ cstate->intf_mode = sde_encoder_get_intf_mode(
+ cstate->connectors[0]->encoder);
+ else
+ cstate->intf_mode = INTF_MODE_NONE;
+
+ /* prepare main output fence */
+ sde_fence_prepare(&sde_crtc->output_fence);
+}
+
+bool sde_crtc_is_rt(struct drm_crtc *crtc)
+{
+ if (!crtc || !crtc->state) {
+ SDE_ERROR("invalid crtc or state\n");
+ return true;
+ }
+ return to_sde_crtc_state(crtc->state)->is_rt;
+}
+
+/* if file!=NULL, this is preclose potential cancel-flip path */
+static void _sde_crtc_complete_flip(struct drm_crtc *crtc,
+ struct drm_file *file)
+{
+ struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct drm_pending_vblank_event *event;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ event = sde_crtc->event;
+ if (event) {
+ /* if regular vblank case (!file) or if cancel-flip from
+ * preclose on file that requested flip, then send the
+ * event:
+ */
+ if (!file || (event->base.file_priv == file)) {
+ sde_crtc->event = NULL;
+ DRM_DEBUG_VBL("%s: send event: %pK\n",
+ sde_crtc->name, event);
+ SDE_EVT32(DRMID(crtc));
+ drm_crtc_send_vblank_event(crtc, event);
+ }
+ }
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+static void sde_crtc_vblank_cb(void *data)
+{
+ struct drm_crtc *crtc = (struct drm_crtc *)data;
+ struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
+
+ /* keep statistics on vblank callback - with auto reset via debugfs */
+ if (ktime_equal(sde_crtc->vblank_cb_time, ktime_set(0, 0)))
+ sde_crtc->vblank_cb_time = ktime_get();
+ else
+ sde_crtc->vblank_cb_count++;
+
+ drm_crtc_handle_vblank(crtc);
+ DRM_DEBUG_VBL("crtc%d\n", crtc->base.id);
+ SDE_EVT32_IRQ(DRMID(crtc));
+}
+
+static void sde_crtc_frame_event_work(struct kthread_work *work)
+{
+ struct msm_drm_private *priv;
+ struct sde_crtc_frame_event *fevent;
+ struct drm_crtc *crtc;
+ struct sde_crtc *sde_crtc;
+ struct sde_kms *sde_kms;
+ unsigned long flags;
+
+ if (!work) {
+ SDE_ERROR("invalid work handle\n");
+ return;
+ }
+
+ fevent = container_of(work, struct sde_crtc_frame_event, work);
+ if (!fevent->crtc) {
+ SDE_ERROR("invalid crtc\n");
+ return;
+ }
+
+ crtc = fevent->crtc;
+ sde_crtc = to_sde_crtc(crtc);
+
+ sde_kms = _sde_crtc_get_kms(crtc);
+ if (!sde_kms) {
+ SDE_ERROR("invalid kms handle\n");
+ return;
+ }
+ priv = sde_kms->dev->dev_private;
+
+ SDE_DEBUG("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event,
+ ktime_to_ns(fevent->ts));
+
+ if (fevent->event == SDE_ENCODER_FRAME_EVENT_DONE ||
+ fevent->event == SDE_ENCODER_FRAME_EVENT_ERROR) {
+
+ if (atomic_read(&sde_crtc->frame_pending) < 1) {
+ /* this should not happen */
+ SDE_ERROR("crtc%d ts:%lld invalid frame_pending:%d\n",
+ crtc->base.id,
+ ktime_to_ns(fevent->ts),
+ atomic_read(&sde_crtc->frame_pending));
+ SDE_EVT32(DRMID(crtc), fevent->event, 0);
+ } else if (atomic_dec_return(&sde_crtc->frame_pending) == 0) {
+ /* release bandwidth and other resources */
+ SDE_DEBUG("crtc%d ts:%lld last pending\n",
+ crtc->base.id,
+ ktime_to_ns(fevent->ts));
+ SDE_EVT32(DRMID(crtc), fevent->event, 1);
+ sde_power_data_bus_bandwidth_ctrl(&priv->phandle,
+ sde_kms->core_client, false);
+ sde_core_perf_crtc_release_bw(crtc);
+ } else {
+ SDE_EVT32(DRMID(crtc), fevent->event, 2);
+ }
+ } else {
+ SDE_ERROR("crtc%d ts:%lld unknown event %u\n", crtc->base.id,
+ ktime_to_ns(fevent->ts),
+ fevent->event);
+ SDE_EVT32(DRMID(crtc), fevent->event, 3);
+ }
+
+ spin_lock_irqsave(&sde_crtc->spin_lock, flags);
+ list_add_tail(&fevent->list, &sde_crtc->frame_event_list);
+ spin_unlock_irqrestore(&sde_crtc->spin_lock, flags);
+}
+
+static void sde_crtc_frame_event_cb(void *data, u32 event)
+{
+ struct drm_crtc *crtc = (struct drm_crtc *)data;
+ struct sde_crtc *sde_crtc;
+ struct msm_drm_private *priv;
+ struct sde_crtc_frame_event *fevent;
+ unsigned long flags;
+ int pipe_id;
+
+ if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
+ SDE_ERROR("invalid parameters\n");
+ return;
+ }
+ sde_crtc = to_sde_crtc(crtc);
+ priv = crtc->dev->dev_private;
+ pipe_id = drm_crtc_index(crtc);
+
+ SDE_DEBUG("crtc%d\n", crtc->base.id);
+
+ SDE_EVT32(DRMID(crtc), event);
+
+ spin_lock_irqsave(&sde_crtc->spin_lock, flags);
+ fevent = list_first_entry_or_null(&sde_crtc->frame_event_list,
+ struct sde_crtc_frame_event, list);
+ if (fevent)
+ list_del_init(&fevent->list);
+ spin_unlock_irqrestore(&sde_crtc->spin_lock, flags);
+
+ if (!fevent) {
+ SDE_ERROR("crtc%d event %d overflow\n",
+ crtc->base.id, event);
+ SDE_EVT32(DRMID(crtc), event);
+ return;
+ }
+
+ fevent->event = event;
+ fevent->crtc = crtc;
+ fevent->ts = ktime_get();
+ queue_kthread_work(&priv->disp_thread[pipe_id].worker, &fevent->work);
+}
+
+void sde_crtc_complete_commit(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ struct sde_crtc *sde_crtc;
+ struct sde_crtc_state *cstate;
+ int i;
+
+ if (!crtc || !crtc->state) {
+ SDE_ERROR("invalid crtc\n");
+ return;
+ }
+
+ sde_crtc = to_sde_crtc(crtc);
+ cstate = to_sde_crtc_state(crtc->state);
+ SDE_EVT32(DRMID(crtc));
+
+ /* signal output fence(s) at end of commit */
+ sde_fence_signal(&sde_crtc->output_fence, 0);
+
+ for (i = 0; i < cstate->num_connectors; ++i)
+ sde_connector_complete_commit(cstate->connectors[i]);
+}
+
+/**
+ * _sde_crtc_set_input_fence_timeout - update ns version of in fence timeout
+ * @cstate: Pointer to sde crtc state
+ */
+static void _sde_crtc_set_input_fence_timeout(struct sde_crtc_state *cstate)
+{
+ if (!cstate) {
+ SDE_ERROR("invalid cstate\n");
+ return;
+ }
+ cstate->input_fence_timeout_ns =
+ sde_crtc_get_property(cstate, CRTC_PROP_INPUT_FENCE_TIMEOUT);
+ cstate->input_fence_timeout_ns *= NSEC_PER_MSEC;
+}
+
+/**
+ * _sde_crtc_wait_for_fences - wait for incoming framebuffer sync fences
+ * @crtc: Pointer to CRTC object
+ */
+static void _sde_crtc_wait_for_fences(struct drm_crtc *crtc)
+{
+ struct drm_plane *plane = NULL;
+ uint32_t wait_ms = 1;
+ ktime_t kt_end, kt_wait;
+
+ SDE_DEBUG("\n");
+
+ if (!crtc || !crtc->state) {
+ SDE_ERROR("invalid crtc/state %pK\n", crtc);
+ return;
+ }
+
+ /* use monotonic timer to limit total fence wait time */
+ kt_end = ktime_add_ns(ktime_get(),
+ to_sde_crtc_state(crtc->state)->input_fence_timeout_ns);
+
+ /*
+ * Wait for fences sequentially, as all of them need to be signalled
+ * before we can proceed.
+ *
+ * Limit total wait time to INPUT_FENCE_TIMEOUT, but still call
+ * sde_plane_wait_input_fence with wait_ms == 0 after the timeout so
+ * that each plane can check its fence status and react appropriately
+ * if its fence has timed out.
+ */
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ if (wait_ms) {
+ /* determine updated wait time */
+ kt_wait = ktime_sub(kt_end, ktime_get());
+ if (ktime_compare(kt_wait, ktime_set(0, 0)) >= 0)
+ wait_ms = ktime_to_ms(kt_wait);
+ else
+ wait_ms = 0;
+ }
+ sde_plane_wait_input_fence(plane, wait_ms);
+ }
+}
+
+static void _sde_crtc_setup_mixer_for_encoder(
+ struct drm_crtc *crtc,
+ struct drm_encoder *enc)
+{
+ struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
+ struct sde_kms *sde_kms = _sde_crtc_get_kms(crtc);
+ struct sde_rm *rm = &sde_kms->rm;
+ struct sde_crtc_mixer *mixer;
+ struct sde_hw_ctl *last_valid_ctl = NULL;
+ int i;
+ struct sde_rm_hw_iter lm_iter, ctl_iter, dspp_iter;
+
+ sde_rm_init_hw_iter(&lm_iter, enc->base.id, SDE_HW_BLK_LM);
+ sde_rm_init_hw_iter(&ctl_iter, enc->base.id, SDE_HW_BLK_CTL);
+ sde_rm_init_hw_iter(&dspp_iter, enc->base.id, SDE_HW_BLK_DSPP);
+
+ /* Set up all the mixers and ctls reserved by this encoder */
+ for (i = sde_crtc->num_mixers; i < ARRAY_SIZE(sde_crtc->mixers); i++) {
+ mixer = &sde_crtc->mixers[i];
+
+ if (!sde_rm_get_hw(rm, &lm_iter))
+ break;
+ mixer->hw_lm = (struct sde_hw_mixer *)lm_iter.hw;
+
+ /* CTL may be <= LMs, if <, multiple LMs controlled by 1 CTL */
+ if (!sde_rm_get_hw(rm, &ctl_iter)) {
+ SDE_DEBUG("no ctl assigned to lm %d, using previous\n",
+ mixer->hw_lm->idx - LM_0);
+ mixer->hw_ctl = last_valid_ctl;
+ } else {
+ mixer->hw_ctl = (struct sde_hw_ctl *)ctl_iter.hw;
+ last_valid_ctl = mixer->hw_ctl;
+ }
+
+ /* Shouldn't happen, mixers are always >= ctls */
+ if (!mixer->hw_ctl) {
+ SDE_ERROR("no valid ctls found for lm %d\n",
+ mixer->hw_lm->idx - LM_0);
+ return;
+ }
+
+ /* Dspp may be null */
+ (void) sde_rm_get_hw(rm, &dspp_iter);
+ mixer->hw_dspp = (struct sde_hw_dspp *)dspp_iter.hw;
+
+ mixer->encoder = enc;
+
+ sde_crtc->num_mixers++;
+ SDE_DEBUG("setup mixer %d: lm %d\n",
+ i, mixer->hw_lm->idx - LM_0);
+ SDE_DEBUG("setup mixer %d: ctl %d\n",
+ i, mixer->hw_ctl->idx - CTL_0);
+ }
+}
+
+static void _sde_crtc_setup_mixers(struct drm_crtc *crtc)
+{
+ struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
+ struct drm_encoder *enc;
+
+ sde_crtc->num_mixers = 0;
+ memset(sde_crtc->mixers, 0, sizeof(sde_crtc->mixers));
+
+ mutex_lock(&sde_crtc->crtc_lock);
+ /* Check for mixers on all encoders attached to this crtc */
+ list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) {
+ if (enc->crtc != crtc)
+ continue;
+
+ _sde_crtc_setup_mixer_for_encoder(crtc, enc);
+ }
+ mutex_unlock(&sde_crtc->crtc_lock);
+}
+
+static void sde_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ struct sde_crtc *sde_crtc;
+ struct drm_device *dev;
+ unsigned long flags;
+ u32 i;
+
+ if (!crtc) {
+ SDE_ERROR("invalid crtc\n");
+ return;
+ }
+
+ if (!crtc->state->enable) {
+ SDE_DEBUG("crtc%d -> enable %d, skip atomic_begin\n",
+ crtc->base.id, crtc->state->enable);
+ return;
+ }
+
+ SDE_DEBUG("crtc%d\n", crtc->base.id);
+
+ sde_crtc = to_sde_crtc(crtc);
+ dev = crtc->dev;
+
+ if (!sde_crtc->num_mixers)
+ _sde_crtc_setup_mixers(crtc);
+
+ if (sde_crtc->event) {
+ WARN_ON(sde_crtc->event);
+ } else {
+ spin_lock_irqsave(&dev->event_lock, flags);
+ sde_crtc->event = crtc->state->event;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ }
+
+ /* Reset flush mask from previous commit */
+ for (i = 0; i < ARRAY_SIZE(sde_crtc->mixers); i++) {
+ struct sde_hw_ctl *ctl = sde_crtc->mixers[i].hw_ctl;
+
+ if (ctl)
+ ctl->ops.clear_pending_flush(ctl);
+ }
+
+ /*
+ * If no mixers have been allocated in sde_crtc_atomic_check(),
+ * it means we are trying to flush a CRTC whose state is disabled:
+ * nothing else needs to be done.
+ */
+ if (unlikely(!sde_crtc->num_mixers))
+ return;
+
+ _sde_crtc_blend_setup(crtc);
+ sde_cp_crtc_apply_properties(crtc);
+
+ /*
+ * PP_DONE irq is only used by command mode for now.
+ * It is better to request pending before FLUSH and START trigger
+ * to make sure no pp_done irq missed.
+ * This is safe because no pp_done will happen before SW trigger
+ * in command mode.
+ */
+}
+
+static void sde_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ struct sde_crtc *sde_crtc;
+ struct drm_device *dev;
+ struct drm_plane *plane;
+ unsigned long flags;
+
+ if (!crtc) {
+ SDE_ERROR("invalid crtc\n");
+ return;
+ }
+
+ if (!crtc->state->enable) {
+ SDE_DEBUG("crtc%d -> enable %d, skip atomic_flush\n",
+ crtc->base.id, crtc->state->enable);
+ return;
+ }
+
+ SDE_DEBUG("crtc%d\n", crtc->base.id);
+
+ sde_crtc = to_sde_crtc(crtc);
+
+ dev = crtc->dev;
+
+ if (sde_crtc->event) {
+ SDE_DEBUG("already received sde_crtc->event\n");
+ } else {
+ spin_lock_irqsave(&dev->event_lock, flags);
+ sde_crtc->event = crtc->state->event;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ }
+
+ /*
+ * If no mixers has been allocated in sde_crtc_atomic_check(),
+ * it means we are trying to flush a CRTC whose state is disabled:
+ * nothing else needs to be done.
+ */
+ if (unlikely(!sde_crtc->num_mixers))
+ return;
+
+ /* wait for acquire fences before anything else is done */
+ _sde_crtc_wait_for_fences(crtc);
+
+ /* update performance setting before crtc kickoff */
+ sde_core_perf_crtc_update(crtc, 1, false);
+
+ /*
+ * Final plane updates: Give each plane a chance to complete all
+ * required writes/flushing before crtc's "flush
+ * everything" call below.
+ */
+ drm_atomic_crtc_for_each_plane(plane, crtc)
+ sde_plane_flush(plane);
+
+ /* Kickoff will be scheduled by outer layer */
+}
+
+/**
+ * sde_crtc_destroy_state - state destroy hook
+ * @crtc: drm CRTC
+ * @state: CRTC state object to release
+ */
+static void sde_crtc_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct sde_crtc *sde_crtc;
+ struct sde_crtc_state *cstate;
+
+ if (!crtc || !state) {
+ SDE_ERROR("invalid argument(s)\n");
+ return;
+ }
+
+ sde_crtc = to_sde_crtc(crtc);
+ cstate = to_sde_crtc_state(state);
+
+ SDE_DEBUG("crtc%d\n", crtc->base.id);
+
+ __drm_atomic_helper_crtc_destroy_state(crtc, state);
+
+ /* destroy value helper */
+ msm_property_destroy_state(&sde_crtc->property_info, cstate,
+ cstate->property_values, cstate->property_blobs);
+}
+
+void sde_crtc_commit_kickoff(struct drm_crtc *crtc)
+{
+ struct drm_encoder *encoder;
+ struct drm_device *dev;
+ struct sde_crtc *sde_crtc;
+ struct msm_drm_private *priv;
+ struct sde_kms *sde_kms;
+
+ if (!crtc) {
+ SDE_ERROR("invalid argument\n");
+ return;
+ }
+ dev = crtc->dev;
+ sde_crtc = to_sde_crtc(crtc);
+ sde_kms = _sde_crtc_get_kms(crtc);
+ priv = sde_kms->dev->dev_private;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc != crtc)
+ continue;
+
+ /*
+ * Encoder will flush/start now, unless it has a tx pending.
+ * If so, it may delay and flush at an irq event (e.g. ppdone)
+ */
+ sde_encoder_prepare_for_kickoff(encoder);
+ }
+
+ if (atomic_read(&sde_crtc->frame_pending) > 2) {
+ /* framework allows only 1 outstanding + current */
+ SDE_ERROR("crtc%d invalid frame pending\n",
+ crtc->base.id);
+ SDE_EVT32(DRMID(crtc), 0);
+ return;
+ } else if (atomic_inc_return(&sde_crtc->frame_pending) == 1) {
+ /* acquire bandwidth and other resources */
+ SDE_DEBUG("crtc%d first commit\n", crtc->base.id);
+ SDE_EVT32(DRMID(crtc), 1);
+ sde_power_data_bus_bandwidth_ctrl(&priv->phandle,
+ sde_kms->core_client, true);
+ } else {
+ SDE_DEBUG("crtc%d commit\n", crtc->base.id);
+ SDE_EVT32(DRMID(crtc), 2);
+ }
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc != crtc)
+ continue;
+
+ sde_encoder_kickoff(encoder);
+ }
+}
+
+/**
+ * sde_crtc_duplicate_state - state duplicate hook
+ * @crtc: Pointer to drm crtc structure
+ * @Returns: Pointer to new drm_crtc_state structure
+ */
+static struct drm_crtc_state *sde_crtc_duplicate_state(struct drm_crtc *crtc)
+{
+ struct sde_crtc *sde_crtc;
+ struct sde_crtc_state *cstate, *old_cstate;
+
+ if (!crtc || !crtc->state) {
+ SDE_ERROR("invalid argument(s)\n");
+ return NULL;
+ }
+
+ sde_crtc = to_sde_crtc(crtc);
+ old_cstate = to_sde_crtc_state(crtc->state);
+ cstate = msm_property_alloc_state(&sde_crtc->property_info);
+ if (!cstate) {
+ SDE_ERROR("failed to allocate state\n");
+ return NULL;
+ }
+
+ /* duplicate value helper */
+ msm_property_duplicate_state(&sde_crtc->property_info,
+ old_cstate, cstate,
+ cstate->property_values, cstate->property_blobs);
+
+ /* duplicate base helper */
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &cstate->base);
+
+ return &cstate->base;
+}
+
+/**
+ * sde_crtc_reset - reset hook for CRTCs
+ * Resets the atomic state for @crtc by freeing the state pointer (which might
+ * be NULL, e.g. at driver load time) and allocating a new empty state object.
+ * @crtc: Pointer to drm crtc structure
+ */
+static void sde_crtc_reset(struct drm_crtc *crtc)
+{
+ struct sde_crtc *sde_crtc;
+ struct sde_crtc_state *cstate;
+
+ if (!crtc) {
+ SDE_ERROR("invalid crtc\n");
+ return;
+ }
+
+ /* remove previous state, if present */
+ if (crtc->state) {
+ sde_crtc_destroy_state(crtc, crtc->state);
+ crtc->state = 0;
+ }
+
+ sde_crtc = to_sde_crtc(crtc);
+ cstate = msm_property_alloc_state(&sde_crtc->property_info);
+ if (!cstate) {
+ SDE_ERROR("failed to allocate state\n");
+ return;
+ }
+
+ /* reset value helper */
+ msm_property_reset_state(&sde_crtc->property_info, cstate,
+ cstate->property_values, cstate->property_blobs);
+
+ _sde_crtc_set_input_fence_timeout(cstate);
+
+ cstate->base.crtc = crtc;
+ crtc->state = &cstate->base;
+}
+
+static void sde_crtc_disable(struct drm_crtc *crtc)
+{
+ struct msm_drm_private *priv;
+ struct sde_crtc *sde_crtc;
+ struct drm_encoder *encoder;
+ struct sde_kms *sde_kms;
+
+ if (!crtc) {
+ SDE_ERROR("invalid crtc\n");
+ return;
+ }
+ sde_crtc = to_sde_crtc(crtc);
+ sde_kms = _sde_crtc_get_kms(crtc);
+ priv = sde_kms->dev->dev_private;
+
+ SDE_DEBUG("crtc%d\n", crtc->base.id);
+
+ mutex_lock(&sde_crtc->crtc_lock);
+ SDE_EVT32(DRMID(crtc));
+
+ if (atomic_read(&sde_crtc->vblank_refcount)) {
+ SDE_ERROR("crtc%d invalid vblank refcount\n",
+ crtc->base.id);
+ SDE_EVT32(DRMID(crtc));
+ drm_for_each_encoder(encoder, crtc->dev) {
+ if (encoder->crtc != crtc)
+ continue;
+ sde_encoder_register_vblank_callback(encoder, NULL,
+ NULL);
+ }
+ atomic_set(&sde_crtc->vblank_refcount, 0);
+ }
+
+ if (atomic_read(&sde_crtc->frame_pending)) {
+ /* release bandwidth and other resources */
+ SDE_ERROR("crtc%d invalid frame pending\n",
+ crtc->base.id);
+ SDE_EVT32(DRMID(crtc));
+ sde_power_data_bus_bandwidth_ctrl(&priv->phandle,
+ sde_kms->core_client, false);
+ sde_core_perf_crtc_release_bw(crtc);
+ atomic_set(&sde_crtc->frame_pending, 0);
+ }
+
+ sde_core_perf_crtc_update(crtc, 0, true);
+
+ drm_for_each_encoder(encoder, crtc->dev) {
+ if (encoder->crtc != crtc)
+ continue;
+ sde_encoder_register_frame_event_callback(encoder, NULL, NULL);
+ }
+
+ memset(sde_crtc->mixers, 0, sizeof(sde_crtc->mixers));
+ sde_crtc->num_mixers = 0;
+ mutex_unlock(&sde_crtc->crtc_lock);
+}
+
+static void sde_crtc_enable(struct drm_crtc *crtc)
+{
+ struct sde_crtc *sde_crtc;
+ struct sde_crtc_mixer *mixer;
+ struct sde_hw_mixer *lm;
+ struct drm_display_mode *mode;
+ struct sde_hw_mixer_cfg cfg;
+ struct drm_encoder *encoder;
+ int i;
+
+ if (!crtc) {
+ SDE_ERROR("invalid crtc\n");
+ return;
+ }
+
+ SDE_DEBUG("crtc%d\n", crtc->base.id);
+ SDE_EVT32(DRMID(crtc));
+
+ sde_crtc = to_sde_crtc(crtc);
+ mixer = sde_crtc->mixers;
+
+ if (WARN_ON(!crtc->state))
+ return;
+
+ mode = &crtc->state->adjusted_mode;
+
+ drm_mode_debug_printmodeline(mode);
+
+ drm_for_each_encoder(encoder, crtc->dev) {
+ if (encoder->crtc != crtc)
+ continue;
+ sde_encoder_register_frame_event_callback(encoder,
+ sde_crtc_frame_event_cb, (void *)crtc);
+ }
+
+ for (i = 0; i < sde_crtc->num_mixers; i++) {
+ lm = mixer[i].hw_lm;
+ cfg.out_width = sde_crtc_mixer_width(sde_crtc, mode);
+ cfg.out_height = mode->vdisplay;
+ cfg.right_mixer = (i == 0) ? false : true;
+ cfg.flags = 0;
+ lm->ops.setup_mixer_out(lm, &cfg);
+ }
+}
+
+struct plane_state {
+ struct sde_plane_state *sde_pstate;
+ struct drm_plane_state *drm_pstate;
+
+ int stage;
+};
+
+static int pstate_cmp(const void *a, const void *b)
+{
+ struct plane_state *pa = (struct plane_state *)a;
+ struct plane_state *pb = (struct plane_state *)b;
+ int rc = 0;
+ int pa_zpos, pb_zpos;
+
+ pa_zpos = sde_plane_get_property(pa->sde_pstate, PLANE_PROP_ZPOS);
+ pb_zpos = sde_plane_get_property(pb->sde_pstate, PLANE_PROP_ZPOS);
+
+ if (pa_zpos != pb_zpos)
+ rc = pa_zpos - pb_zpos;
+ else
+ rc = pa->drm_pstate->crtc_x - pb->drm_pstate->crtc_x;
+
+ return rc;
+}
+
+static int sde_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct sde_crtc *sde_crtc;
+ struct plane_state pstates[SDE_STAGE_MAX * 2];
+
+ struct drm_plane_state *pstate;
+ struct drm_plane *plane;
+ struct drm_display_mode *mode;
+
+ int cnt = 0, rc = 0, mixer_width, i, z_pos;
+ int left_crtc_zpos_cnt[SDE_STAGE_MAX] = {0};
+ int right_crtc_zpos_cnt[SDE_STAGE_MAX] = {0};
+
+ if (!crtc) {
+ SDE_ERROR("invalid crtc\n");
+ return -EINVAL;
+ }
+
+ if (!state->enable || !state->active) {
+ SDE_DEBUG("crtc%d -> enable %d, active %d, skip atomic_check\n",
+ crtc->base.id, state->enable, state->active);
+ return 0;
+ }
+
+ sde_crtc = to_sde_crtc(crtc);
+ mode = &state->adjusted_mode;
+ SDE_DEBUG("%s: check", sde_crtc->name);
+
+ mixer_width = sde_crtc_mixer_width(sde_crtc, mode);
+
+ /* get plane state for all drm planes associated with crtc state */
+ drm_atomic_crtc_state_for_each_plane(plane, state) {
+ pstate = drm_atomic_get_plane_state(state->state, plane);
+ if (IS_ERR_OR_NULL(pstate)) {
+ rc = PTR_ERR(pstate);
+ SDE_ERROR("%s: failed to get plane%d state, %d\n",
+ sde_crtc->name, plane->base.id, rc);
+ goto end;
+ }
+ if (cnt >= ARRAY_SIZE(pstates))
+ continue;
+
+ pstates[cnt].sde_pstate = to_sde_plane_state(pstate);
+ pstates[cnt].drm_pstate = pstate;
+ pstates[cnt].stage = sde_plane_get_property(
+ pstates[cnt].sde_pstate, PLANE_PROP_ZPOS);
+ cnt++;
+
+ if (CHECK_LAYER_BOUNDS(pstate->crtc_y, pstate->crtc_h,
+ mode->vdisplay) ||
+ CHECK_LAYER_BOUNDS(pstate->crtc_x, pstate->crtc_w,
+ mode->hdisplay)) {
+ SDE_ERROR("invalid vertical/horizontal destination\n");
+ SDE_ERROR("y:%d h:%d vdisp:%d x:%d w:%d hdisp:%d\n",
+ pstate->crtc_y, pstate->crtc_h, mode->vdisplay,
+ pstate->crtc_x, pstate->crtc_w, mode->hdisplay);
+ rc = -E2BIG;
+ goto end;
+ }
+ }
+
+ if (!sde_is_custom_client()) {
+ int stage_old = pstates[0].stage;
+
+ /* assign mixer stages based on sorted zpos property */
+ sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
+ z_pos = 0;
+ for (i = 0; i < cnt; i++) {
+ if (stage_old != pstates[i].stage)
+ ++z_pos;
+ stage_old = pstates[i].stage;
+ pstates[i].stage = z_pos;
+ }
+ }
+
+ for (i = 0; i < cnt; i++) {
+ z_pos = pstates[i].stage;
+
+ /* verify z_pos setting before using it */
+ if (z_pos >= SDE_STAGE_MAX - SDE_STAGE_0) {
+ SDE_ERROR("> %d plane stages assigned\n",
+ SDE_STAGE_MAX - SDE_STAGE_0);
+ rc = -EINVAL;
+ goto end;
+ } else if (pstates[i].drm_pstate->crtc_x < mixer_width) {
+ if (left_crtc_zpos_cnt[z_pos] == 2) {
+ SDE_ERROR("> 2 plane @ stage%d on left\n",
+ z_pos);
+ rc = -EINVAL;
+ goto end;
+ }
+ left_crtc_zpos_cnt[z_pos]++;
+ } else {
+ if (right_crtc_zpos_cnt[z_pos] == 2) {
+ SDE_ERROR("> 2 plane @ stage%d on right\n",
+ z_pos);
+ rc = -EINVAL;
+ goto end;
+ }
+ right_crtc_zpos_cnt[z_pos]++;
+ }
+ pstates[i].sde_pstate->stage = z_pos + SDE_STAGE_0;
+ SDE_DEBUG("%s: zpos %d", sde_crtc->name, z_pos);
+ }
+
+ rc = sde_core_perf_crtc_check(crtc, state);
+ if (rc) {
+ SDE_ERROR("crtc%d failed performance check %d\n",
+ crtc->base.id, rc);
+ goto end;
+ }
+
+end:
+ return rc;
+}
+
+int sde_crtc_vblank(struct drm_crtc *crtc, bool en)
+{
+ struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
+ struct drm_encoder *encoder;
+ struct drm_device *dev = crtc->dev;
+
+ if (en && atomic_inc_return(&sde_crtc->vblank_refcount) == 1) {
+ SDE_DEBUG("crtc%d vblank enable\n", crtc->base.id);
+ } else if (!en && atomic_read(&sde_crtc->vblank_refcount) < 1) {
+ SDE_ERROR("crtc%d invalid vblank disable\n", crtc->base.id);
+ return -EINVAL;
+ } else if (!en && atomic_dec_return(&sde_crtc->vblank_refcount) == 0) {
+ SDE_DEBUG("crtc%d vblank disable\n", crtc->base.id);
+ } else {
+ SDE_DEBUG("crtc%d vblank %s refcount:%d\n",
+ crtc->base.id,
+ en ? "enable" : "disable",
+ atomic_read(&sde_crtc->vblank_refcount));
+ return 0;
+ }
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc != crtc)
+ continue;
+
+ SDE_EVT32(DRMID(crtc), en);
+
+ if (en)
+ sde_encoder_register_vblank_callback(encoder,
+ sde_crtc_vblank_cb, (void *)crtc);
+ else
+ sde_encoder_register_vblank_callback(encoder, NULL,
+ NULL);
+ }
+
+ return 0;
+}
+
+void sde_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file)
+{
+ struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
+
+ SDE_DEBUG("%s: cancel: %p\n", sde_crtc->name, file);
+ _sde_crtc_complete_flip(crtc, file);
+}
+
+/**
+ * sde_crtc_install_properties - install all drm properties for crtc
+ * @crtc: Pointer to drm crtc structure
+ */
+static void sde_crtc_install_properties(struct drm_crtc *crtc,
+ struct sde_mdss_cfg *catalog)
+{
+ struct sde_crtc *sde_crtc;
+ struct drm_device *dev;
+ struct sde_kms_info *info;
+ struct sde_kms *sde_kms;
+
+ SDE_DEBUG("\n");
+
+ if (!crtc || !catalog) {
+ SDE_ERROR("invalid crtc or catalog\n");
+ return;
+ }
+
+ sde_crtc = to_sde_crtc(crtc);
+ dev = crtc->dev;
+ sde_kms = _sde_crtc_get_kms(crtc);
+
+ info = kzalloc(sizeof(struct sde_kms_info), GFP_KERNEL);
+ if (!info) {
+ SDE_ERROR("failed to allocate info memory\n");
+ return;
+ }
+
+ /* range properties */
+ msm_property_install_range(&sde_crtc->property_info,
+ "input_fence_timeout", 0x0, 0, SDE_CRTC_MAX_INPUT_FENCE_TIMEOUT,
+ SDE_CRTC_INPUT_FENCE_TIMEOUT, CRTC_PROP_INPUT_FENCE_TIMEOUT);
+
+ msm_property_install_range(&sde_crtc->property_info, "output_fence",
+ 0x0, 0, INR_OPEN_MAX, 0x0, CRTC_PROP_OUTPUT_FENCE);
+
+ msm_property_install_range(&sde_crtc->property_info,
+ "output_fence_offset", 0x0, 0, 1, 0,
+ CRTC_PROP_OUTPUT_FENCE_OFFSET);
+
+ msm_property_install_range(&sde_crtc->property_info,
+ "core_clk", 0x0, 0, U64_MAX,
+ sde_kms->perf.max_core_clk_rate,
+ CRTC_PROP_CORE_CLK);
+ msm_property_install_range(&sde_crtc->property_info,
+ "core_ab", 0x0, 0, U64_MAX,
+ SDE_POWER_HANDLE_DATA_BUS_AB_QUOTA,
+ CRTC_PROP_CORE_AB);
+ msm_property_install_range(&sde_crtc->property_info,
+ "core_ib", 0x0, 0, U64_MAX,
+ SDE_POWER_HANDLE_DATA_BUS_IB_QUOTA,
+ CRTC_PROP_CORE_IB);
+
+ msm_property_install_blob(&sde_crtc->property_info, "capabilities",
+ DRM_MODE_PROP_IMMUTABLE, CRTC_PROP_INFO);
+ sde_kms_info_reset(info);
+
+ sde_kms_info_add_keyint(info, "hw_version", catalog->hwversion);
+ sde_kms_info_add_keyint(info, "max_linewidth",
+ catalog->max_mixer_width);
+ sde_kms_info_add_keyint(info, "max_blendstages",
+ catalog->max_mixer_blendstages);
+ if (catalog->qseed_type == SDE_SSPP_SCALER_QSEED2)
+ sde_kms_info_add_keystr(info, "qseed_type", "qseed2");
+ if (catalog->qseed_type == SDE_SSPP_SCALER_QSEED3)
+ sde_kms_info_add_keystr(info, "qseed_type", "qseed3");
+ sde_kms_info_add_keyint(info, "has_src_split", catalog->has_src_split);
+ if (catalog->perf.max_bw_low)
+ sde_kms_info_add_keyint(info, "max_bandwidth_low",
+ catalog->perf.max_bw_low);
+ if (catalog->perf.max_bw_high)
+ sde_kms_info_add_keyint(info, "max_bandwidth_high",
+ catalog->perf.max_bw_high);
+ if (sde_kms->perf.max_core_clk_rate)
+ sde_kms_info_add_keyint(info, "max_mdp_clk",
+ sde_kms->perf.max_core_clk_rate);
+ msm_property_set_blob(&sde_crtc->property_info, &sde_crtc->blob_info,
+ info->data, info->len, CRTC_PROP_INFO);
+
+ kfree(info);
+}
+
+/**
+ * sde_crtc_atomic_set_property - atomically set a crtc drm property
+ * @crtc: Pointer to drm crtc structure
+ * @state: Pointer to drm crtc state structure
+ * @property: Pointer to targeted drm property
+ * @val: Updated property value
+ * @Returns: Zero on success
+ */
+static int sde_crtc_atomic_set_property(struct drm_crtc *crtc,
+ struct drm_crtc_state *state,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct sde_crtc *sde_crtc;
+ struct sde_crtc_state *cstate;
+ int idx, ret = -EINVAL;
+
+ if (!crtc || !state || !property) {
+ SDE_ERROR("invalid argument(s)\n");
+ } else {
+ sde_crtc = to_sde_crtc(crtc);
+ cstate = to_sde_crtc_state(state);
+ ret = msm_property_atomic_set(&sde_crtc->property_info,
+ cstate->property_values, cstate->property_blobs,
+ property, val);
+ if (!ret) {
+ idx = msm_property_index(&sde_crtc->property_info,
+ property);
+ if (idx == CRTC_PROP_INPUT_FENCE_TIMEOUT)
+ _sde_crtc_set_input_fence_timeout(cstate);
+ } else {
+ ret = sde_cp_crtc_set_property(crtc,
+ property, val);
+ }
+ if (ret)
+ DRM_ERROR("failed to set the property\n");
+ }
+
+ return ret;
+}
+
+/**
+ * sde_crtc_set_property - set a crtc drm property
+ * @crtc: Pointer to drm crtc structure
+ * @property: Pointer to targeted drm property
+ * @val: Updated property value
+ * @Returns: Zero on success
+ */
+static int sde_crtc_set_property(struct drm_crtc *crtc,
+ struct drm_property *property, uint64_t val)
+{
+ SDE_DEBUG("\n");
+
+ return sde_crtc_atomic_set_property(crtc, crtc->state, property, val);
+}
+
+/**
+ * sde_crtc_atomic_get_property - retrieve a crtc drm property
+ * @crtc: Pointer to drm crtc structure
+ * @state: Pointer to drm crtc state structure
+ * @property: Pointer to targeted drm property
+ * @val: Pointer to variable for receiving property value
+ * @Returns: Zero on success
+ */
+static int sde_crtc_atomic_get_property(struct drm_crtc *crtc,
+ const struct drm_crtc_state *state,
+ struct drm_property *property,
+ uint64_t *val)
+{
+ struct sde_crtc *sde_crtc;
+ struct sde_crtc_state *cstate;
+ int i, ret = -EINVAL;
+
+ if (!crtc || !state) {
+ SDE_ERROR("invalid argument(s)\n");
+ } else {
+ sde_crtc = to_sde_crtc(crtc);
+ cstate = to_sde_crtc_state(state);
+ i = msm_property_index(&sde_crtc->property_info, property);
+ if (i == CRTC_PROP_OUTPUT_FENCE) {
+ int offset = sde_crtc_get_property(cstate,
+ CRTC_PROP_OUTPUT_FENCE_OFFSET);
+
+ ret = sde_fence_create(
+ &sde_crtc->output_fence, val, offset);
+ if (ret)
+ SDE_ERROR("fence create failed\n");
+ } else {
+ ret = msm_property_atomic_get(&sde_crtc->property_info,
+ cstate->property_values,
+ cstate->property_blobs, property, val);
+ if (ret)
+ ret = sde_cp_crtc_get_property(crtc,
+ property, val);
+ }
+ if (ret)
+ DRM_ERROR("get property failed\n");
+ }
+ return ret;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int _sde_debugfs_status_show(struct seq_file *s, void *data)
+{
+ struct sde_crtc *sde_crtc;
+ struct sde_plane_state *pstate = NULL;
+ struct sde_crtc_mixer *m;
+
+ struct drm_crtc *crtc;
+ struct drm_plane *plane;
+ struct drm_display_mode *mode;
+ struct drm_framebuffer *fb;
+ struct drm_plane_state *state;
+
+ int i, out_width;
+
+ if (!s || !s->private)
+ return -EINVAL;
+
+ sde_crtc = s->private;
+ crtc = &sde_crtc->base;
+
+ mutex_lock(&sde_crtc->crtc_lock);
+ mode = &crtc->state->adjusted_mode;
+ out_width = sde_crtc_mixer_width(sde_crtc, mode);
+
+ seq_printf(s, "crtc:%d width:%d height:%d\n", crtc->base.id,
+ mode->hdisplay, mode->vdisplay);
+
+ seq_puts(s, "\n");
+
+ for (i = 0; i < sde_crtc->num_mixers; ++i) {
+ m = &sde_crtc->mixers[i];
+ if (!m->hw_lm)
+ seq_printf(s, "\tmixer[%d] has no lm\n", i);
+ else if (!m->hw_ctl)
+ seq_printf(s, "\tmixer[%d] has no ctl\n", i);
+ else
+ seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n",
+ m->hw_lm->idx - LM_0, m->hw_ctl->idx - CTL_0,
+ out_width, mode->vdisplay);
+ }
+
+ seq_puts(s, "\n");
+
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ pstate = to_sde_plane_state(plane->state);
+ state = plane->state;
+
+ if (!pstate || !state)
+ continue;
+
+ seq_printf(s, "\tplane:%u stage:%d\n", plane->base.id,
+ pstate->stage);
+
+ if (plane->state->fb) {
+ fb = plane->state->fb;
+
+ seq_printf(s, "\tfb:%d image format:%4.4s wxh:%ux%u bpp:%d\n",
+ fb->base.id, (char *) &fb->pixel_format,
+ fb->width, fb->height, fb->bits_per_pixel);
+
+ seq_puts(s, "\t");
+ for (i = 0; i < ARRAY_SIZE(fb->modifier); i++)
+ seq_printf(s, "modifier[%d]:%8llu ", i,
+ fb->modifier[i]);
+ seq_puts(s, "\n");
+
+ seq_puts(s, "\t");
+ for (i = 0; i < ARRAY_SIZE(fb->pitches); i++)
+ seq_printf(s, "pitches[%d]:%8u ", i,
+ fb->pitches[i]);
+ seq_puts(s, "\n");
+
+ seq_puts(s, "\t");
+ for (i = 0; i < ARRAY_SIZE(fb->offsets); i++)
+ seq_printf(s, "offsets[%d]:%8u ", i,
+ fb->offsets[i]);
+ seq_puts(s, "\n");
+ }
+
+ seq_printf(s, "\tsrc_x:%4d src_y:%4d src_w:%4d src_h:%4d\n",
+ state->src_x, state->src_y, state->src_w, state->src_h);
+
+ seq_printf(s, "\tdst x:%4d dst_y:%4d dst_w:%4d dst_h:%4d\n",
+ state->crtc_x, state->crtc_y, state->crtc_w,
+ state->crtc_h);
+ seq_puts(s, "\n");
+ }
+
+ if (sde_crtc->vblank_cb_count) {
+ ktime_t diff = ktime_sub(ktime_get(), sde_crtc->vblank_cb_time);
+ s64 diff_ms = ktime_to_ms(diff);
+ s64 fps = diff_ms ? DIV_ROUND_CLOSEST(
+ sde_crtc->vblank_cb_count * 1000, diff_ms) : 0;
+
+ seq_printf(s,
+ "vblank fps:%lld count:%u total:%llums\n",
+ fps,
+ sde_crtc->vblank_cb_count,
+ ktime_to_ms(diff));
+
+ /* reset time & count for next measurement */
+ sde_crtc->vblank_cb_count = 0;
+ sde_crtc->vblank_cb_time = ktime_set(0, 0);
+ }
+
+ seq_printf(s, "vblank_refcount:%d\n",
+ atomic_read(&sde_crtc->vblank_refcount));
+
+ mutex_unlock(&sde_crtc->crtc_lock);
+
+ return 0;
+}
+
+static int _sde_debugfs_status_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, _sde_debugfs_status_show, inode->i_private);
+}
+#endif
+
+static void sde_crtc_suspend(struct drm_crtc *crtc)
+{
+ sde_cp_crtc_suspend(crtc);
+}
+
+static void sde_crtc_resume(struct drm_crtc *crtc)
+{
+ sde_cp_crtc_resume(crtc);
+}
+
+static const struct drm_crtc_funcs sde_crtc_funcs = {
+ .set_config = drm_atomic_helper_set_config,
+ .destroy = sde_crtc_destroy,
+ .page_flip = drm_atomic_helper_page_flip,
+ .set_property = sde_crtc_set_property,
+ .atomic_set_property = sde_crtc_atomic_set_property,
+ .atomic_get_property = sde_crtc_atomic_get_property,
+ .reset = sde_crtc_reset,
+ .atomic_duplicate_state = sde_crtc_duplicate_state,
+ .atomic_destroy_state = sde_crtc_destroy_state,
+ .save = sde_crtc_suspend,
+ .restore = sde_crtc_resume,
+};
+
+static const struct drm_crtc_helper_funcs sde_crtc_helper_funcs = {
+ .mode_fixup = sde_crtc_mode_fixup,
+ .disable = sde_crtc_disable,
+ .enable = sde_crtc_enable,
+ .atomic_check = sde_crtc_atomic_check,
+ .atomic_begin = sde_crtc_atomic_begin,
+ .atomic_flush = sde_crtc_atomic_flush,
+};
+
+#ifdef CONFIG_DEBUG_FS
+#define DEFINE_SDE_DEBUGFS_SEQ_FOPS(__prefix) \
+static int __prefix ## _open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, __prefix ## _show, inode->i_private); \
+} \
+static const struct file_operations __prefix ## _fops = { \
+ .owner = THIS_MODULE, \
+ .open = __prefix ## _open, \
+ .release = single_release, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+}
+
+static int sde_crtc_debugfs_state_show(struct seq_file *s, void *v)
+{
+ struct drm_crtc *crtc = (struct drm_crtc *) s->private;
+ struct sde_crtc_state *cstate = to_sde_crtc_state(crtc->state);
+
+ seq_printf(s, "num_connectors: %d\n", cstate->num_connectors);
+ seq_printf(s, "is_rt: %d\n", cstate->is_rt);
+ seq_printf(s, "intf_mode: %d\n", cstate->intf_mode);
+ seq_printf(s, "bw_ctl: %llu\n", cstate->cur_perf.bw_ctl);
+ seq_printf(s, "core_clk_rate: %u\n", cstate->cur_perf.core_clk_rate);
+ seq_printf(s, "max_per_pipe_ib: %llu\n",
+ cstate->cur_perf.max_per_pipe_ib);
+
+ return 0;
+}
+DEFINE_SDE_DEBUGFS_SEQ_FOPS(sde_crtc_debugfs_state);
+
+static void _sde_crtc_init_debugfs(struct sde_crtc *sde_crtc,
+ struct sde_kms *sde_kms)
+{
+ static const struct file_operations debugfs_status_fops = {
+ .open = _sde_debugfs_status_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ };
+
+ if (sde_crtc && sde_kms) {
+ sde_crtc->debugfs_root = debugfs_create_dir(sde_crtc->name,
+ sde_debugfs_get_root(sde_kms));
+ if (sde_crtc->debugfs_root) {
+ /* don't error check these */
+ debugfs_create_file("status", S_IRUGO,
+ sde_crtc->debugfs_root,
+ sde_crtc, &debugfs_status_fops);
+ debugfs_create_file("state", S_IRUGO | S_IWUSR,
+ sde_crtc->debugfs_root,
+ &sde_crtc->base,
+ &sde_crtc_debugfs_state_fops);
+ }
+ }
+}
+#else
+static void _sde_crtc_init_debugfs(struct sde_crtc *sde_crtc,
+ struct sde_kms *sde_kms)
+{
+}
+#endif
+
+/* initialize crtc */
+struct drm_crtc *sde_crtc_init(struct drm_device *dev, struct drm_plane *plane)
+{
+ struct drm_crtc *crtc = NULL;
+ struct sde_crtc *sde_crtc = NULL;
+ struct msm_drm_private *priv = NULL;
+ struct sde_kms *kms = NULL;
+ int i;
+
+ priv = dev->dev_private;
+ kms = to_sde_kms(priv->kms);
+
+ sde_crtc = kzalloc(sizeof(*sde_crtc), GFP_KERNEL);
+ if (!sde_crtc)
+ return ERR_PTR(-ENOMEM);
+
+ crtc = &sde_crtc->base;
+ crtc->dev = dev;
+ atomic_set(&sde_crtc->vblank_refcount, 0);
+
+ spin_lock_init(&sde_crtc->spin_lock);
+ atomic_set(&sde_crtc->frame_pending, 0);
+
+ INIT_LIST_HEAD(&sde_crtc->frame_event_list);
+ for (i = 0; i < ARRAY_SIZE(sde_crtc->frame_events); i++) {
+ INIT_LIST_HEAD(&sde_crtc->frame_events[i].list);
+ list_add(&sde_crtc->frame_events[i].list,
+ &sde_crtc->frame_event_list);
+ init_kthread_work(&sde_crtc->frame_events[i].work,
+ sde_crtc_frame_event_work);
+ }
+
+ drm_crtc_init_with_planes(dev, crtc, plane, NULL, &sde_crtc_funcs);
+
+ drm_crtc_helper_add(crtc, &sde_crtc_helper_funcs);
+ plane->crtc = crtc;
+
+ /* save user friendly CRTC name for later */
+ snprintf(sde_crtc->name, SDE_CRTC_NAME_SIZE, "crtc%u", crtc->base.id);
+
+ /* initialize output fence support */
+ mutex_init(&sde_crtc->crtc_lock);
+ sde_fence_init(&sde_crtc->output_fence, sde_crtc->name, crtc->base.id);
+
+ /* initialize debugfs support */
+ _sde_crtc_init_debugfs(sde_crtc, kms);
+
+ /* create CRTC properties */
+ msm_property_init(&sde_crtc->property_info, &crtc->base, dev,
+ priv->crtc_property, sde_crtc->property_data,
+ CRTC_PROP_COUNT, CRTC_PROP_BLOBCOUNT,
+ sizeof(struct sde_crtc_state));
+
+ sde_crtc_install_properties(crtc, kms->catalog);
+
+ /* Install color processing properties */
+ sde_cp_crtc_init(crtc);
+ sde_cp_crtc_install_properties(crtc);
+
+ SDE_DEBUG("%s: successfully initialized crtc\n", sde_crtc->name);
+ return crtc;
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h
new file mode 100644
index 000000000000..25a93e882e6d
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.h
@@ -0,0 +1,289 @@
+/*
+ * Copyright (c) 2015-2017 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _SDE_CRTC_H_
+#define _SDE_CRTC_H_
+
+#include "drm_crtc.h"
+#include "msm_prop.h"
+#include "sde_fence.h"
+#include "sde_kms.h"
+#include "sde_core_perf.h"
+
+#define SDE_CRTC_NAME_SIZE 12
+
+/* define the maximum number of in-flight frame events */
+#define SDE_CRTC_FRAME_EVENT_SIZE 2
+
+/**
+ * struct sde_crtc_mixer: stores the map for each virtual pipeline in the CRTC
+ * @hw_lm: LM HW Driver context
+ * @hw_ctl: CTL Path HW driver context
+ * @hw_dspp: DSPP HW driver context
+ * @encoder: Encoder attached to this lm & ctl
+ * @mixer_op_mode: mixer blending operation mode
+ * @flush_mask: mixer flush mask for ctl, mixer and pipe
+ */
+struct sde_crtc_mixer {
+ struct sde_hw_mixer *hw_lm;
+ struct sde_hw_ctl *hw_ctl;
+ struct sde_hw_dspp *hw_dspp;
+ struct drm_encoder *encoder;
+ u32 mixer_op_mode;
+ u32 flush_mask;
+};
+
+/**
+ * struct sde_crtc_frame_event: stores crtc frame event for crtc processing
+ * @work: base work structure
+ * @crtc: Pointer to crtc handling this event
+ * @list: event list
+ * @ts: timestamp at queue entry
+ * @event: event identifier
+ */
+struct sde_crtc_frame_event {
+ struct kthread_work work;
+ struct drm_crtc *crtc;
+ struct list_head list;
+ ktime_t ts;
+ u32 event;
+};
+
+/**
+ * struct sde_crtc - virtualized CRTC data structure
+ * @base : Base drm crtc structure
+ * @name : ASCII description of this crtc
+ * @num_ctls : Number of ctl paths in use
+ * @num_mixers : Number of mixers in use
+ * @mixer : List of active mixers
+ * @event : Pointer to last received drm vblank event. If there is a
+ * pending vblank event, this will be non-null.
+ * @vsync_count : Running count of received vsync events
+ * @drm_requested_vblank : Whether vblanks have been enabled in the encoder
+ * @property_info : Opaque structure for generic property support
+ * @property_defaults : Array of default values for generic property support
+ * @stage_cfg : H/w mixer stage configuration
+ * @debugfs_root : Parent of debugfs node
+ * @vblank_cb_count : count of vblank callback since last reset
+ * @vblank_cb_time : ktime at vblank count reset
+ * @vblank_refcount : reference count for vblank enable request
+ * @feature_list : list of color processing features supported on a crtc
+ * @active_list : list of color processing features are active
+ * @dirty_list : list of color processing features are dirty
+ * @crtc_lock : crtc lock around create, destroy and access.
+ * @frame_pending : Whether or not an update is pending
+ * @frame_events : static allocation of in-flight frame events
+ * @frame_event_list : available frame event list
+ * @spin_lock : spin lock for frame event, transaction status, etc...
+ */
+struct sde_crtc {
+ struct drm_crtc base;
+ char name[SDE_CRTC_NAME_SIZE];
+
+ /* HW Resources reserved for the crtc */
+ u32 num_ctls;
+ u32 num_mixers;
+ struct sde_crtc_mixer mixers[CRTC_DUAL_MIXERS];
+
+ struct drm_pending_vblank_event *event;
+ u32 vsync_count;
+
+ struct msm_property_info property_info;
+ struct msm_property_data property_data[CRTC_PROP_COUNT];
+ struct drm_property_blob *blob_info;
+
+ /* output fence support */
+ struct sde_fence output_fence;
+
+ struct sde_hw_stage_cfg stage_cfg;
+ struct dentry *debugfs_root;
+
+ u32 vblank_cb_count;
+ ktime_t vblank_cb_time;
+ atomic_t vblank_refcount;
+
+ struct list_head feature_list;
+ struct list_head active_list;
+ struct list_head dirty_list;
+
+ struct mutex crtc_lock;
+
+ atomic_t frame_pending;
+ struct sde_crtc_frame_event frame_events[SDE_CRTC_FRAME_EVENT_SIZE];
+ struct list_head frame_event_list;
+ spinlock_t spin_lock;
+};
+
+#define to_sde_crtc(x) container_of(x, struct sde_crtc, base)
+
+/**
+ * struct sde_crtc_state - sde container for atomic crtc state
+ * @base: Base drm crtc state structure
+ * @connectors : Currently associated drm connectors
+ * @num_connectors: Number of associated drm connectors
+ * @is_rt : Whether or not the current commit contains RT connectors
+ * @intf_mode : Interface mode of the primary connector
+ * @property_values: Current crtc property values
+ * @input_fence_timeout_ns : Cached input fence timeout, in ns
+ * @property_blobs: Reference pointers for blob properties
+ */
+struct sde_crtc_state {
+ struct drm_crtc_state base;
+
+ struct drm_connector *connectors[MAX_CONNECTORS];
+ int num_connectors;
+ bool is_rt;
+ enum sde_intf_mode intf_mode;
+
+ uint64_t property_values[CRTC_PROP_COUNT];
+ uint64_t input_fence_timeout_ns;
+ struct drm_property_blob *property_blobs[CRTC_PROP_COUNT];
+
+ struct sde_core_perf_params cur_perf;
+ struct sde_core_perf_params new_perf;
+};
+
+#define to_sde_crtc_state(x) \
+ container_of(x, struct sde_crtc_state, base)
+
+/**
+ * sde_crtc_get_property - query integer value of crtc property
+ * @S: Pointer to crtc state
+ * @X: Property index, from enum msm_mdp_crtc_property
+ * Returns: Integer value of requested property
+ */
+#define sde_crtc_get_property(S, X) \
+ ((S) && ((X) < CRTC_PROP_COUNT) ? ((S)->property_values[(X)]) : 0)
+
+static inline int sde_crtc_mixer_width(struct sde_crtc *sde_crtc,
+ struct drm_display_mode *mode)
+{
+ if (!sde_crtc || !mode)
+ return 0;
+
+ return sde_crtc->num_mixers == CRTC_DUAL_MIXERS ?
+ mode->hdisplay / CRTC_DUAL_MIXERS : mode->hdisplay;
+}
+
+static inline uint32_t get_crtc_split_width(struct drm_crtc *crtc)
+{
+ struct drm_display_mode *mode;
+ struct sde_crtc *sde_crtc;
+
+ if (!crtc)
+ return 0;
+
+ sde_crtc = to_sde_crtc(crtc);
+ mode = &crtc->state->adjusted_mode;
+ return sde_crtc_mixer_width(sde_crtc, mode);
+}
+
+/**
+ * sde_crtc_vblank - enable or disable vblanks for this crtc
+ * @crtc: Pointer to drm crtc object
+ * @en: true to enable vblanks, false to disable
+ */
+int sde_crtc_vblank(struct drm_crtc *crtc, bool en);
+
+/**
+ * sde_crtc_commit_kickoff - trigger kickoff of the commit for this crtc
+ * @crtc: Pointer to drm crtc object
+ */
+void sde_crtc_commit_kickoff(struct drm_crtc *crtc);
+
+/**
+ * sde_crtc_prepare_commit - callback to prepare for output fences
+ * @crtc: Pointer to drm crtc object
+ * @old_state: Pointer to drm crtc old state object
+ */
+void sde_crtc_prepare_commit(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state);
+
+/**
+ * sde_crtc_complete_commit - callback signalling completion of current commit
+ * @crtc: Pointer to drm crtc object
+ * @old_state: Pointer to drm crtc old state object
+ */
+void sde_crtc_complete_commit(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state);
+
+/**
+ * sde_crtc_init - create a new crtc object
+ * @dev: sde device
+ * @plane: base plane
+ * @Return: new crtc object or error
+ */
+struct drm_crtc *sde_crtc_init(struct drm_device *dev, struct drm_plane *plane);
+
+/**
+ * sde_crtc_cancel_pending_flip - complete flip for clients on lastclose
+ * @crtc: Pointer to drm crtc object
+ * @file: client to cancel's file handle
+ */
+void sde_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
+
+/**
+ * sde_crtc_is_rt - query whether real time connectors are present on the crtc
+ * @crtc: Pointer to drm crtc structure
+ * Returns: True if a connector is present with real time constraints
+ */
+bool sde_crtc_is_rt(struct drm_crtc *crtc);
+
+/**
+ * sde_crtc_get_intf_mode - get interface mode of the given crtc
+ * @crtc: Pointert to crtc
+ */
+static inline enum sde_intf_mode sde_crtc_get_intf_mode(struct drm_crtc *crtc)
+{
+ struct sde_crtc_state *cstate =
+ crtc ? to_sde_crtc_state(crtc->state) : NULL;
+
+ return cstate ? cstate->intf_mode : INTF_MODE_NONE;
+}
+
+/**
+ * sde_core_perf_crtc_is_wb - check if writeback is primary output of this crtc
+ * @crtc: Pointer to crtc
+ */
+static inline bool sde_crtc_is_wb(struct drm_crtc *crtc)
+{
+ struct sde_crtc_state *cstate =
+ crtc ? to_sde_crtc_state(crtc->state) : NULL;
+
+ return cstate ? (cstate->intf_mode == INTF_MODE_WB_LINE) : false;
+}
+
+/**
+ * sde_crtc_is_nrt - check if primary output of this crtc is non-realtime client
+ * @crtc: Pointer to crtc
+ */
+static inline bool sde_crtc_is_nrt(struct drm_crtc *crtc)
+{
+ return sde_crtc_is_wb(crtc);
+}
+
+/**
+ * sde_crtc_is_enabled - check if sde crtc is enabled or not
+ * @crtc: Pointer to crtc
+ */
+static inline bool sde_crtc_is_enabled(struct drm_crtc *crtc)
+{
+ return crtc ? crtc->enabled : false;
+}
+
+#endif /* _SDE_CRTC_H_ */
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
new file mode 100644
index 000000000000..8cffb03fdfbb
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.c
@@ -0,0 +1,1334 @@
+/*
+ * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include "msm_drv.h"
+#include "sde_kms.h"
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+
+#include "sde_hwio.h"
+#include "sde_hw_catalog.h"
+#include "sde_hw_intf.h"
+#include "sde_hw_ctl.h"
+#include "sde_formats.h"
+#include "sde_encoder_phys.h"
+#include "sde_color_processing.h"
+
+#define SDE_DEBUG_ENC(e, fmt, ...) SDE_DEBUG("enc%d " fmt,\
+ (e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
+
+#define SDE_ERROR_ENC(e, fmt, ...) SDE_ERROR("enc%d " fmt,\
+ (e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
+
+/* timeout in frames waiting for frame done */
+#define SDE_ENCODER_FRAME_DONE_TIMEOUT 60
+
+/*
+ * Two to anticipate panels that can do cmd/vid dynamic switching
+ * plan is to create all possible physical encoder types, and switch between
+ * them at runtime
+ */
+#define NUM_PHYS_ENCODER_TYPES 2
+
+#define MAX_PHYS_ENCODERS_PER_VIRTUAL \
+ (MAX_H_TILES_PER_DISPLAY * NUM_PHYS_ENCODER_TYPES)
+
+#define MAX_CHANNELS_PER_ENC 2
+
+/**
+ * struct sde_encoder_virt - virtual encoder. Container of one or more physical
+ * encoders. Virtual encoder manages one "logical" display. Physical
+ * encoders manage one intf block, tied to a specific panel/sub-panel.
+ * Virtual encoder defers as much as possible to the physical encoders.
+ * Virtual encoder registers itself with the DRM Framework as the encoder.
+ * @base: drm_encoder base class for registration with DRM
+ * @enc_spin_lock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
+ * @bus_scaling_client: Client handle to the bus scaling interface
+ * @num_phys_encs: Actual number of physical encoders contained.
+ * @phys_encs: Container of physical encoders managed.
+ * @cur_master: Pointer to the current master in this mode. Optimization
+ * Only valid after enable. Cleared as disable.
+ * @hw_pp Handle to the pingpong blocks used for the display. No.
+ * pingpong blocks can be different than num_phys_encs.
+ * @crtc_vblank_cb: Callback into the upper layer / CRTC for
+ * notification of the VBLANK
+ * @crtc_vblank_cb_data: Data from upper layer for VBLANK notification
+ * @crtc_kickoff_cb: Callback into CRTC that will flush & start
+ * all CTL paths
+ * @crtc_kickoff_cb_data: Opaque user data given to crtc_kickoff_cb
+ * @debugfs_root: Debug file system root file node
+ * @enc_lock: Lock around physical encoder create/destroy and
+ access.
+ * @frame_busy_mask: Bitmask tracking which phys_enc we are still
+ * busy processing current command.
+ * Bit0 = phys_encs[0] etc.
+ * @crtc_frame_event_cb: callback handler for frame event
+ * @crtc_frame_event_cb_data: callback handler private data
+ * @crtc_frame_event: callback event
+ * @frame_done_timeout: frame done timeout in Hz
+ * @frame_done_timer: watchdog timer for frame done event
+ */
+struct sde_encoder_virt {
+ struct drm_encoder base;
+ spinlock_t enc_spinlock;
+ uint32_t bus_scaling_client;
+
+ uint32_t display_num_of_h_tiles;
+
+ unsigned int num_phys_encs;
+ struct sde_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL];
+ struct sde_encoder_phys *cur_master;
+ struct sde_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
+
+ void (*crtc_vblank_cb)(void *);
+ void *crtc_vblank_cb_data;
+
+ struct dentry *debugfs_root;
+ struct mutex enc_lock;
+ DECLARE_BITMAP(frame_busy_mask, MAX_PHYS_ENCODERS_PER_VIRTUAL);
+ void (*crtc_frame_event_cb)(void *, u32 event);
+ void *crtc_frame_event_cb_data;
+ u32 crtc_frame_event;
+
+ atomic_t frame_done_timeout;
+ struct timer_list frame_done_timer;
+};
+
+#define to_sde_encoder_virt(x) container_of(x, struct sde_encoder_virt, base)
+
+void sde_encoder_get_hw_resources(struct drm_encoder *drm_enc,
+ struct sde_encoder_hw_resources *hw_res,
+ struct drm_connector_state *conn_state)
+{
+ struct sde_encoder_virt *sde_enc = NULL;
+ int i = 0;
+
+ if (!hw_res || !drm_enc || !conn_state) {
+ SDE_ERROR("invalid argument(s), drm_enc %d, res %d, state %d\n",
+ drm_enc != 0, hw_res != 0, conn_state != 0);
+ return;
+ }
+
+ sde_enc = to_sde_encoder_virt(drm_enc);
+ SDE_DEBUG_ENC(sde_enc, "\n");
+
+ /* Query resources used by phys encs, expected to be without overlap */
+ memset(hw_res, 0, sizeof(*hw_res));
+ hw_res->display_num_of_h_tiles = sde_enc->display_num_of_h_tiles;
+
+ for (i = 0; i < sde_enc->num_phys_encs; i++) {
+ struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+
+ if (phys && phys->ops.get_hw_resources)
+ phys->ops.get_hw_resources(phys, hw_res, conn_state);
+ }
+}
+
+void sde_encoder_destroy(struct drm_encoder *drm_enc)
+{
+ struct sde_encoder_virt *sde_enc = NULL;
+ int i = 0;
+
+ if (!drm_enc) {
+ SDE_ERROR("invalid encoder\n");
+ return;
+ }
+
+ sde_enc = to_sde_encoder_virt(drm_enc);
+ SDE_DEBUG_ENC(sde_enc, "\n");
+
+ mutex_lock(&sde_enc->enc_lock);
+ for (i = 0; i < sde_enc->num_phys_encs; i++) {
+ struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+
+ if (phys && phys->ops.destroy) {
+ phys->ops.destroy(phys);
+ --sde_enc->num_phys_encs;
+ sde_enc->phys_encs[i] = NULL;
+ }
+ }
+
+ if (sde_enc->num_phys_encs)
+ SDE_ERROR_ENC(sde_enc, "expected 0 num_phys_encs not %d\n",
+ sde_enc->num_phys_encs);
+ sde_enc->num_phys_encs = 0;
+ mutex_unlock(&sde_enc->enc_lock);
+
+ drm_encoder_cleanup(drm_enc);
+ debugfs_remove_recursive(sde_enc->debugfs_root);
+ mutex_destroy(&sde_enc->enc_lock);
+
+ kfree(sde_enc);
+}
+
+void sde_encoder_helper_split_config(
+ struct sde_encoder_phys *phys_enc,
+ enum sde_intf interface)
+{
+ struct sde_encoder_virt *sde_enc;
+ struct split_pipe_cfg cfg = { 0 };
+ struct sde_hw_mdp *hw_mdptop;
+ enum sde_rm_topology_name topology;
+
+ if (!phys_enc || !phys_enc->hw_mdptop || !phys_enc->parent) {
+ SDE_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0);
+ return;
+ }
+
+ sde_enc = to_sde_encoder_virt(phys_enc->parent);
+ hw_mdptop = phys_enc->hw_mdptop;
+ cfg.en = phys_enc->split_role != ENC_ROLE_SOLO;
+ cfg.mode = phys_enc->intf_mode;
+ cfg.intf = interface;
+
+ if (cfg.en && phys_enc->ops.needs_single_flush &&
+ phys_enc->ops.needs_single_flush(phys_enc))
+ cfg.split_flush_en = true;
+
+ topology = sde_connector_get_topology_name(phys_enc->connector);
+ if (topology == SDE_RM_TOPOLOGY_PPSPLIT)
+ cfg.pp_split_slave = cfg.intf;
+ else
+ cfg.pp_split_slave = INTF_MAX;
+
+ if (phys_enc->split_role != ENC_ROLE_SLAVE) {
+ /* master/solo encoder */
+ SDE_DEBUG_ENC(sde_enc, "enable %d\n", cfg.en);
+
+ if (hw_mdptop->ops.setup_split_pipe)
+ hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg);
+ } else {
+ /*
+ * slave encoder
+ * - determine split index from master index,
+ * assume master is first pp
+ */
+ cfg.pp_split_index = sde_enc->hw_pp[0]->idx - PINGPONG_0;
+ SDE_DEBUG_ENC(sde_enc, "master using pp%d\n",
+ cfg.pp_split_index);
+
+ if (hw_mdptop->ops.setup_pp_split)
+ hw_mdptop->ops.setup_pp_split(hw_mdptop, &cfg);
+ }
+}
+
+static int sde_encoder_virt_atomic_check(
+ struct drm_encoder *drm_enc,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct sde_encoder_virt *sde_enc;
+ struct msm_drm_private *priv;
+ struct sde_kms *sde_kms;
+ const struct drm_display_mode *mode;
+ struct drm_display_mode *adj_mode;
+ int i = 0;
+ int ret = 0;
+
+ if (!drm_enc || !crtc_state || !conn_state) {
+ SDE_ERROR("invalid arg(s), drm_enc %d, crtc/conn state %d/%d\n",
+ drm_enc != 0, crtc_state != 0, conn_state != 0);
+ return -EINVAL;
+ }
+
+ sde_enc = to_sde_encoder_virt(drm_enc);
+ SDE_DEBUG_ENC(sde_enc, "\n");
+
+ priv = drm_enc->dev->dev_private;
+ sde_kms = to_sde_kms(priv->kms);
+ mode = &crtc_state->mode;
+ adj_mode = &crtc_state->adjusted_mode;
+ SDE_EVT32(DRMID(drm_enc));
+
+ /* perform atomic check on the first physical encoder (master) */
+ for (i = 0; i < sde_enc->num_phys_encs; i++) {
+ struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+
+ if (phys && phys->ops.atomic_check)
+ ret = phys->ops.atomic_check(phys, crtc_state,
+ conn_state);
+ else if (phys && phys->ops.mode_fixup)
+ if (!phys->ops.mode_fixup(phys, mode, adj_mode))
+ ret = -EINVAL;
+
+ if (ret) {
+ SDE_ERROR_ENC(sde_enc,
+ "mode unsupported, phys idx %d\n", i);
+ break;
+ }
+ }
+
+ /* Reserve dynamic resources now. Indicating AtomicTest phase */
+ if (!ret)
+ ret = sde_rm_reserve(&sde_kms->rm, drm_enc, crtc_state,
+ conn_state, true);
+
+ if (!ret)
+ drm_mode_set_crtcinfo(adj_mode, 0);
+
+ SDE_EVT32(DRMID(drm_enc), adj_mode->flags, adj_mode->private_flags);
+
+ return ret;
+}
+
+static void sde_encoder_virt_mode_set(struct drm_encoder *drm_enc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ struct sde_encoder_virt *sde_enc;
+ struct msm_drm_private *priv;
+ struct sde_kms *sde_kms;
+ struct list_head *connector_list;
+ struct drm_connector *conn = NULL, *conn_iter;
+ struct sde_rm_hw_iter pp_iter;
+ int i = 0, ret;
+
+ if (!drm_enc) {
+ SDE_ERROR("invalid encoder\n");
+ return;
+ }
+
+ sde_enc = to_sde_encoder_virt(drm_enc);
+ SDE_DEBUG_ENC(sde_enc, "\n");
+
+ priv = drm_enc->dev->dev_private;
+ sde_kms = to_sde_kms(priv->kms);
+ connector_list = &sde_kms->dev->mode_config.connector_list;
+
+ SDE_EVT32(DRMID(drm_enc));
+
+ list_for_each_entry(conn_iter, connector_list, head)
+ if (conn_iter->encoder == drm_enc)
+ conn = conn_iter;
+
+ if (!conn) {
+ SDE_ERROR_ENC(sde_enc, "failed to find attached connector\n");
+ return;
+ } else if (!conn->state) {
+ SDE_ERROR_ENC(sde_enc, "invalid connector state\n");
+ return;
+ }
+
+ /* Reserve dynamic resources now. Indicating non-AtomicTest phase */
+ ret = sde_rm_reserve(&sde_kms->rm, drm_enc, drm_enc->crtc->state,
+ conn->state, false);
+ if (ret) {
+ SDE_ERROR_ENC(sde_enc,
+ "failed to reserve hw resources, %d\n", ret);
+ return;
+ }
+
+ sde_rm_init_hw_iter(&pp_iter, drm_enc->base.id, SDE_HW_BLK_PINGPONG);
+ for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
+ sde_enc->hw_pp[i] = NULL;
+ if (!sde_rm_get_hw(&sde_kms->rm, &pp_iter))
+ break;
+ sde_enc->hw_pp[i] = (struct sde_hw_pingpong *) pp_iter.hw;
+ }
+
+ for (i = 0; i < sde_enc->num_phys_encs; i++) {
+ struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+
+ if (phys) {
+ if (!sde_enc->hw_pp[i]) {
+ SDE_ERROR_ENC(sde_enc,
+ "invalid pingpong block for the encoder\n");
+ return;
+ }
+ phys->hw_pp = sde_enc->hw_pp[i];
+ phys->connector = conn->state->connector;
+ if (phys->ops.mode_set)
+ phys->ops.mode_set(phys, mode, adj_mode);
+ }
+ }
+}
+
+static void sde_encoder_virt_enable(struct drm_encoder *drm_enc)
+{
+ struct sde_encoder_virt *sde_enc = NULL;
+ struct msm_drm_private *priv;
+ struct sde_kms *sde_kms;
+ int i = 0;
+
+ if (!drm_enc) {
+ SDE_ERROR("invalid encoder\n");
+ return;
+ } else if (!drm_enc->dev) {
+ SDE_ERROR("invalid dev\n");
+ return;
+ } else if (!drm_enc->dev->dev_private) {
+ SDE_ERROR("invalid dev_private\n");
+ return;
+ }
+
+ sde_enc = to_sde_encoder_virt(drm_enc);
+ priv = drm_enc->dev->dev_private;
+ sde_kms = to_sde_kms(priv->kms);
+
+ SDE_DEBUG_ENC(sde_enc, "\n");
+ SDE_EVT32(DRMID(drm_enc));
+
+ sde_power_resource_enable(&priv->phandle, sde_kms->core_client, true);
+
+ sde_enc->cur_master = NULL;
+ for (i = 0; i < sde_enc->num_phys_encs; i++) {
+ struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+
+ if (phys) {
+ atomic_set(&phys->vsync_cnt, 0);
+ atomic_set(&phys->underrun_cnt, 0);
+
+ if (phys->ops.is_master && phys->ops.is_master(phys)) {
+ SDE_DEBUG_ENC(sde_enc,
+ "master is now idx %d\n", i);
+ sde_enc->cur_master = phys;
+ } else if (phys->ops.enable) {
+ phys->ops.enable(phys);
+ }
+ }
+ }
+
+ if (!sde_enc->cur_master)
+ SDE_ERROR("virt encoder has no master! num_phys %d\n", i);
+ else if (sde_enc->cur_master->ops.enable)
+ sde_enc->cur_master->ops.enable(sde_enc->cur_master);
+}
+
+static void sde_encoder_virt_disable(struct drm_encoder *drm_enc)
+{
+ struct sde_encoder_virt *sde_enc = NULL;
+ struct msm_drm_private *priv;
+ struct sde_kms *sde_kms;
+ int i = 0;
+
+ if (!drm_enc) {
+ SDE_ERROR("invalid encoder\n");
+ return;
+ } else if (!drm_enc->dev) {
+ SDE_ERROR("invalid dev\n");
+ return;
+ } else if (!drm_enc->dev->dev_private) {
+ SDE_ERROR("invalid dev_private\n");
+ return;
+ }
+
+ sde_enc = to_sde_encoder_virt(drm_enc);
+ SDE_DEBUG_ENC(sde_enc, "\n");
+
+ priv = drm_enc->dev->dev_private;
+ sde_kms = to_sde_kms(priv->kms);
+
+ SDE_EVT32(DRMID(drm_enc));
+
+ if (atomic_xchg(&sde_enc->frame_done_timeout, 0)) {
+ SDE_ERROR("enc%d timeout pending\n", drm_enc->base.id);
+ del_timer_sync(&sde_enc->frame_done_timer);
+ }
+
+ for (i = 0; i < sde_enc->num_phys_encs; i++) {
+ struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+
+ if (phys) {
+ if (phys->ops.disable && !phys->ops.is_master(phys))
+ phys->ops.disable(phys);
+ phys->connector = NULL;
+ atomic_set(&phys->vsync_cnt, 0);
+ atomic_set(&phys->underrun_cnt, 0);
+ }
+ }
+
+ if (sde_enc->cur_master && sde_enc->cur_master->ops.disable)
+ sde_enc->cur_master->ops.disable(sde_enc->cur_master);
+
+ sde_enc->cur_master = NULL;
+ SDE_DEBUG_ENC(sde_enc, "cleared master\n");
+
+ sde_rm_release(&sde_kms->rm, drm_enc);
+
+ sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
+}
+
+static const struct drm_encoder_helper_funcs sde_encoder_helper_funcs = {
+ .mode_set = sde_encoder_virt_mode_set,
+ .disable = sde_encoder_virt_disable,
+ .enable = sde_encoder_virt_enable,
+ .atomic_check = sde_encoder_virt_atomic_check,
+};
+
+static const struct drm_encoder_funcs sde_encoder_funcs = {
+ .destroy = sde_encoder_destroy,
+};
+
+static enum sde_intf sde_encoder_get_intf(struct sde_mdss_cfg *catalog,
+ enum sde_intf_type type, u32 controller_id)
+{
+ int i = 0;
+
+ for (i = 0; i < catalog->intf_count; i++) {
+ if (catalog->intf[i].type == type
+ && catalog->intf[i].controller_id == controller_id) {
+ return catalog->intf[i].id;
+ }
+ }
+
+ return INTF_MAX;
+}
+
+static enum sde_wb sde_encoder_get_wb(struct sde_mdss_cfg *catalog,
+ enum sde_intf_type type, u32 controller_id)
+{
+ if (controller_id < catalog->wb_count)
+ return catalog->wb[controller_id].id;
+
+ return WB_MAX;
+}
+
+static void sde_encoder_vblank_callback(struct drm_encoder *drm_enc,
+ struct sde_encoder_phys *phy_enc)
+{
+ struct sde_encoder_virt *sde_enc = NULL;
+ unsigned long lock_flags;
+
+ if (!drm_enc || !phy_enc)
+ return;
+
+ sde_enc = to_sde_encoder_virt(drm_enc);
+
+ spin_lock_irqsave(&sde_enc->enc_spinlock, lock_flags);
+ if (sde_enc->crtc_vblank_cb)
+ sde_enc->crtc_vblank_cb(sde_enc->crtc_vblank_cb_data);
+ spin_unlock_irqrestore(&sde_enc->enc_spinlock, lock_flags);
+
+ atomic_inc(&phy_enc->vsync_cnt);
+}
+
+static void sde_encoder_underrun_callback(struct drm_encoder *drm_enc,
+ struct sde_encoder_phys *phy_enc)
+{
+ if (!phy_enc)
+ return;
+
+ atomic_inc(&phy_enc->underrun_cnt);
+ SDE_EVT32(DRMID(drm_enc), atomic_read(&phy_enc->underrun_cnt));
+}
+
+void sde_encoder_register_vblank_callback(struct drm_encoder *drm_enc,
+ void (*vbl_cb)(void *), void *vbl_data)
+{
+ struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
+ unsigned long lock_flags;
+ bool enable;
+ int i;
+
+ enable = vbl_cb ? true : false;
+
+ if (!drm_enc) {
+ SDE_ERROR("invalid encoder\n");
+ return;
+ }
+ SDE_DEBUG_ENC(sde_enc, "\n");
+ SDE_EVT32(DRMID(drm_enc), enable);
+
+ spin_lock_irqsave(&sde_enc->enc_spinlock, lock_flags);
+ sde_enc->crtc_vblank_cb = vbl_cb;
+ sde_enc->crtc_vblank_cb_data = vbl_data;
+ spin_unlock_irqrestore(&sde_enc->enc_spinlock, lock_flags);
+
+ for (i = 0; i < sde_enc->num_phys_encs; i++) {
+ struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+
+ if (phys && phys->ops.control_vblank_irq)
+ phys->ops.control_vblank_irq(phys, enable);
+ }
+}
+
+void sde_encoder_register_frame_event_callback(struct drm_encoder *drm_enc,
+ void (*frame_event_cb)(void *, u32 event),
+ void *frame_event_cb_data)
+{
+ struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
+ unsigned long lock_flags;
+ bool enable;
+
+ enable = frame_event_cb ? true : false;
+
+ if (!drm_enc) {
+ SDE_ERROR("invalid encoder\n");
+ return;
+ }
+ SDE_DEBUG_ENC(sde_enc, "\n");
+ SDE_EVT32(DRMID(drm_enc), enable, 0);
+
+ spin_lock_irqsave(&sde_enc->enc_spinlock, lock_flags);
+ sde_enc->crtc_frame_event_cb = frame_event_cb;
+ sde_enc->crtc_frame_event_cb_data = frame_event_cb_data;
+ spin_unlock_irqrestore(&sde_enc->enc_spinlock, lock_flags);
+}
+
+static void sde_encoder_frame_done_callback(
+ struct drm_encoder *drm_enc,
+ struct sde_encoder_phys *ready_phys, u32 event)
+{
+ struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
+ unsigned int i;
+
+ /* One of the physical encoders has become idle */
+ for (i = 0; i < sde_enc->num_phys_encs; i++)
+ if (sde_enc->phys_encs[i] == ready_phys) {
+ clear_bit(i, sde_enc->frame_busy_mask);
+ sde_enc->crtc_frame_event |= event;
+ SDE_EVT32(DRMID(drm_enc), i,
+ sde_enc->frame_busy_mask[0]);
+ }
+
+ if (!sde_enc->frame_busy_mask[0]) {
+ atomic_set(&sde_enc->frame_done_timeout, 0);
+ del_timer(&sde_enc->frame_done_timer);
+
+ if (sde_enc->crtc_frame_event_cb)
+ sde_enc->crtc_frame_event_cb(
+ sde_enc->crtc_frame_event_cb_data,
+ sde_enc->crtc_frame_event);
+ }
+}
+
+/**
+ * _sde_encoder_trigger_flush - trigger flush for a physical encoder
+ * drm_enc: Pointer to drm encoder structure
+ * phys: Pointer to physical encoder structure
+ * extra_flush_bits: Additional bit mask to include in flush trigger
+ */
+static inline void _sde_encoder_trigger_flush(struct drm_encoder *drm_enc,
+ struct sde_encoder_phys *phys, uint32_t extra_flush_bits)
+{
+ struct sde_hw_ctl *ctl;
+ int pending_kickoff_cnt;
+
+ if (!drm_enc || !phys) {
+ SDE_ERROR("invalid argument(s), drm_enc %d, phys_enc %d\n",
+ drm_enc != 0, phys != 0);
+ return;
+ }
+
+ ctl = phys->hw_ctl;
+ if (!ctl || !ctl->ops.trigger_flush) {
+ SDE_ERROR("missing trigger cb\n");
+ return;
+ }
+
+ pending_kickoff_cnt = sde_encoder_phys_inc_pending(phys);
+ SDE_EVT32(DRMID(&to_sde_encoder_virt(drm_enc)->base),
+ phys->intf_idx, pending_kickoff_cnt);
+
+ if (extra_flush_bits && ctl->ops.update_pending_flush)
+ ctl->ops.update_pending_flush(ctl, extra_flush_bits);
+
+ ctl->ops.trigger_flush(ctl);
+ SDE_EVT32(DRMID(drm_enc), ctl->idx);
+}
+
+/**
+ * _sde_encoder_trigger_start - trigger start for a physical encoder
+ * phys: Pointer to physical encoder structure
+ */
+static inline void _sde_encoder_trigger_start(struct sde_encoder_phys *phys)
+{
+ if (!phys) {
+ SDE_ERROR("invalid encoder\n");
+ return;
+ }
+
+ if (phys->ops.trigger_start && phys->enable_state != SDE_ENC_DISABLED)
+ phys->ops.trigger_start(phys);
+}
+
+void sde_encoder_helper_trigger_start(struct sde_encoder_phys *phys_enc)
+{
+ struct sde_hw_ctl *ctl;
+ int ctl_idx = -1;
+
+ if (!phys_enc) {
+ SDE_ERROR("invalid encoder\n");
+ return;
+ }
+
+ ctl = phys_enc->hw_ctl;
+ if (ctl && ctl->ops.trigger_start) {
+ ctl->ops.trigger_start(ctl);
+ ctl_idx = ctl->idx;
+ }
+
+ if (phys_enc && phys_enc->parent)
+ SDE_EVT32(DRMID(phys_enc->parent), ctl_idx);
+}
+
+int sde_encoder_helper_wait_event_timeout(
+ int32_t drm_id,
+ int32_t hw_id,
+ wait_queue_head_t *wq,
+ atomic_t *cnt,
+ s64 timeout_ms)
+{
+ int rc = 0;
+ s64 expected_time = ktime_to_ms(ktime_get()) + timeout_ms;
+ s64 jiffies = msecs_to_jiffies(timeout_ms);
+ s64 time;
+
+ do {
+ rc = wait_event_timeout(*wq, atomic_read(cnt) == 0, jiffies);
+ time = ktime_to_ms(ktime_get());
+
+ SDE_EVT32(drm_id, hw_id, rc, time, expected_time,
+ atomic_read(cnt));
+ /* If we timed out, counter is valid and time is less, wait again */
+ } while (atomic_read(cnt) && (rc == 0) && (time < expected_time));
+
+ return rc;
+}
+
+/**
+ * _sde_encoder_kickoff_phys - handle physical encoder kickoff
+ * Iterate through the physical encoders and perform consolidated flush
+ * and/or control start triggering as needed. This is done in the virtual
+ * encoder rather than the individual physical ones in order to handle
+ * use cases that require visibility into multiple physical encoders at
+ * a time.
+ * sde_enc: Pointer to virtual encoder structure
+ */
+static void _sde_encoder_kickoff_phys(struct sde_encoder_virt *sde_enc)
+{
+ struct sde_hw_ctl *ctl;
+ uint32_t i, pending_flush;
+ unsigned long lock_flags;
+
+ if (!sde_enc) {
+ SDE_ERROR("invalid encoder\n");
+ return;
+ }
+
+ pending_flush = 0x0;
+ sde_enc->crtc_frame_event = 0;
+
+ /* update pending counts and trigger kickoff ctl flush atomically */
+ spin_lock_irqsave(&sde_enc->enc_spinlock, lock_flags);
+
+ /* don't perform flush/start operations for slave encoders */
+ for (i = 0; i < sde_enc->num_phys_encs; i++) {
+ struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+
+ if (!phys || phys->enable_state == SDE_ENC_DISABLED)
+ continue;
+
+ ctl = phys->hw_ctl;
+ if (!ctl)
+ continue;
+
+ set_bit(i, sde_enc->frame_busy_mask);
+
+ if (!phys->ops.needs_single_flush ||
+ !phys->ops.needs_single_flush(phys))
+ _sde_encoder_trigger_flush(&sde_enc->base, phys, 0x0);
+ else if (ctl->ops.get_pending_flush)
+ pending_flush |= ctl->ops.get_pending_flush(ctl);
+ }
+
+ /* for split flush, combine pending flush masks and send to master */
+ if (pending_flush && sde_enc->cur_master) {
+ _sde_encoder_trigger_flush(
+ &sde_enc->base,
+ sde_enc->cur_master,
+ pending_flush);
+ }
+
+ _sde_encoder_trigger_start(sde_enc->cur_master);
+
+ spin_unlock_irqrestore(&sde_enc->enc_spinlock, lock_flags);
+}
+
+void sde_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc)
+{
+ struct sde_encoder_virt *sde_enc;
+ struct sde_encoder_phys *phys;
+ unsigned int i;
+
+ if (!drm_enc) {
+ SDE_ERROR("invalid encoder\n");
+ return;
+ }
+ sde_enc = to_sde_encoder_virt(drm_enc);
+
+ SDE_DEBUG_ENC(sde_enc, "\n");
+ SDE_EVT32(DRMID(drm_enc));
+
+ /* prepare for next kickoff, may include waiting on previous kickoff */
+ for (i = 0; i < sde_enc->num_phys_encs; i++) {
+ phys = sde_enc->phys_encs[i];
+ if (phys && phys->ops.prepare_for_kickoff)
+ phys->ops.prepare_for_kickoff(phys);
+ }
+}
+
+void sde_encoder_kickoff(struct drm_encoder *drm_enc)
+{
+ struct sde_encoder_virt *sde_enc;
+ struct sde_encoder_phys *phys;
+ unsigned int i;
+
+ if (!drm_enc) {
+ SDE_ERROR("invalid encoder\n");
+ return;
+ }
+ sde_enc = to_sde_encoder_virt(drm_enc);
+
+ SDE_DEBUG_ENC(sde_enc, "\n");
+
+ atomic_set(&sde_enc->frame_done_timeout,
+ SDE_ENCODER_FRAME_DONE_TIMEOUT * 1000 /
+ drm_enc->crtc->state->adjusted_mode.vrefresh);
+ mod_timer(&sde_enc->frame_done_timer, jiffies +
+ ((atomic_read(&sde_enc->frame_done_timeout) * HZ) / 1000));
+
+ /* All phys encs are ready to go, trigger the kickoff */
+ _sde_encoder_kickoff_phys(sde_enc);
+
+ /* allow phys encs to handle any post-kickoff business */
+ for (i = 0; i < sde_enc->num_phys_encs; i++) {
+ phys = sde_enc->phys_encs[i];
+ if (phys && phys->ops.handle_post_kickoff)
+ phys->ops.handle_post_kickoff(phys);
+ }
+}
+
+static int _sde_encoder_status_show(struct seq_file *s, void *data)
+{
+ struct sde_encoder_virt *sde_enc;
+ int i;
+
+ if (!s || !s->private)
+ return -EINVAL;
+
+ sde_enc = s->private;
+
+ mutex_lock(&sde_enc->enc_lock);
+ for (i = 0; i < sde_enc->num_phys_encs; i++) {
+ struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+
+ if (!phys)
+ continue;
+
+ seq_printf(s, "intf:%d vsync:%8d underrun:%8d ",
+ phys->intf_idx - INTF_0,
+ atomic_read(&phys->vsync_cnt),
+ atomic_read(&phys->underrun_cnt));
+
+ switch (phys->intf_mode) {
+ case INTF_MODE_VIDEO:
+ seq_puts(s, "mode: video\n");
+ break;
+ case INTF_MODE_CMD:
+ seq_puts(s, "mode: command\n");
+ break;
+ case INTF_MODE_WB_BLOCK:
+ seq_puts(s, "mode: wb block\n");
+ break;
+ case INTF_MODE_WB_LINE:
+ seq_puts(s, "mode: wb line\n");
+ break;
+ default:
+ seq_puts(s, "mode: ???\n");
+ break;
+ }
+ }
+ mutex_unlock(&sde_enc->enc_lock);
+
+ return 0;
+}
+
+static int _sde_encoder_debugfs_status_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, _sde_encoder_status_show, inode->i_private);
+}
+
+static void _sde_set_misr_params(struct sde_encoder_phys *phys, u32 enable,
+ u32 frame_count)
+{
+ int j;
+
+ if (!phys->misr_map)
+ return;
+
+ phys->misr_map->enable = enable;
+
+ if (frame_count <= SDE_CRC_BATCH_SIZE)
+ phys->misr_map->frame_count = frame_count;
+ else if (frame_count <= 0)
+ phys->misr_map->frame_count = 0;
+ else
+ phys->misr_map->frame_count = SDE_CRC_BATCH_SIZE;
+
+ if (!enable) {
+ phys->misr_map->last_idx = 0;
+ phys->misr_map->frame_count = 0;
+ for (j = 0; j < SDE_CRC_BATCH_SIZE; j++)
+ phys->misr_map->crc_value[j] = 0;
+ }
+}
+
+static ssize_t _sde_encoder_misr_set(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct sde_encoder_virt *sde_enc;
+ struct drm_encoder *drm_enc;
+ int i = 0;
+ char buf[10];
+ u32 enable, frame_count;
+
+ drm_enc = file->private_data;
+ sde_enc = to_sde_encoder_virt(drm_enc);
+
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+
+ buf[count] = 0; /* end of string */
+
+ if (sscanf(buf, "%u %u", &enable, &frame_count) != 2)
+ return -EFAULT;
+
+ mutex_lock(&sde_enc->enc_lock);
+ for (i = 0; i < sde_enc->num_phys_encs; i++) {
+ struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+
+ if (!phys || !phys->misr_map || !phys->ops.setup_misr)
+ continue;
+
+ _sde_set_misr_params(phys, enable, frame_count);
+ phys->ops.setup_misr(phys, phys->misr_map);
+ }
+ mutex_unlock(&sde_enc->enc_lock);
+ return count;
+}
+
+static ssize_t _sde_encoder_misr_read(
+ struct file *file,
+ char __user *buff, size_t count, loff_t *ppos)
+{
+ struct sde_encoder_virt *sde_enc;
+ struct drm_encoder *drm_enc;
+ int i = 0, j = 0, len = 0;
+ char buf[512] = {'\0'};
+
+ if (*ppos)
+ return 0;
+
+ drm_enc = file->private_data;
+ sde_enc = to_sde_encoder_virt(drm_enc);
+
+ mutex_lock(&sde_enc->enc_lock);
+ for (i = 0; i < sde_enc->num_phys_encs; i++) {
+ struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+ struct sde_misr_params *misr_map;
+
+ if (!phys || !phys->misr_map)
+ continue;
+
+ misr_map = phys->misr_map;
+
+ len += snprintf(buf+len, sizeof(buf), "INTF%d\n", i);
+ for (j = 0; j < SDE_CRC_BATCH_SIZE; j++)
+ len += snprintf(buf+len, sizeof(buf), "%x\n",
+ misr_map->crc_value[j]);
+ }
+
+ if (len < 0 || len >= sizeof(buf))
+ return 0;
+
+ if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
+ return -EFAULT;
+
+ *ppos += len; /* increase offset */
+ mutex_unlock(&sde_enc->enc_lock);
+
+ return len;
+}
+
+static void _sde_encoder_init_debugfs(struct drm_encoder *drm_enc,
+ struct sde_encoder_virt *sde_enc, struct sde_kms *sde_kms)
+{
+ static const struct file_operations debugfs_status_fops = {
+ .open = _sde_encoder_debugfs_status_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ };
+
+ static const struct file_operations debugfs_misr_fops = {
+ .open = simple_open,
+ .read = _sde_encoder_misr_read,
+ .write = _sde_encoder_misr_set,
+ };
+
+ char name[SDE_NAME_SIZE];
+
+ if (!drm_enc || !sde_enc || !sde_kms) {
+ SDE_ERROR("invalid encoder or kms\n");
+ return;
+ }
+
+ snprintf(name, SDE_NAME_SIZE, "encoder%u", drm_enc->base.id);
+
+ /* create overall sub-directory for the encoder */
+ sde_enc->debugfs_root = debugfs_create_dir(name,
+ sde_debugfs_get_root(sde_kms));
+ if (sde_enc->debugfs_root) {
+ /* don't error check these */
+ debugfs_create_file("status", S_IRUGO | S_IWUSR,
+ sde_enc->debugfs_root, sde_enc, &debugfs_status_fops);
+
+ debugfs_create_file("misr_data", S_IRUGO | S_IWUSR,
+ sde_enc->debugfs_root, drm_enc, &debugfs_misr_fops);
+
+ }
+}
+
+static int sde_encoder_virt_add_phys_encs(
+ u32 display_caps,
+ struct sde_encoder_virt *sde_enc,
+ struct sde_enc_phys_init_params *params)
+{
+ struct sde_encoder_phys *enc = NULL;
+
+ SDE_DEBUG_ENC(sde_enc, "\n");
+
+ /*
+ * We may create up to NUM_PHYS_ENCODER_TYPES physical encoder types
+ * in this function, check up-front.
+ */
+ if (sde_enc->num_phys_encs + NUM_PHYS_ENCODER_TYPES >=
+ ARRAY_SIZE(sde_enc->phys_encs)) {
+ SDE_ERROR_ENC(sde_enc, "too many physical encoders %d\n",
+ sde_enc->num_phys_encs);
+ return -EINVAL;
+ }
+
+ if (display_caps & MSM_DISPLAY_CAP_VID_MODE) {
+ enc = sde_encoder_phys_vid_init(params);
+
+ if (IS_ERR_OR_NULL(enc)) {
+ SDE_ERROR_ENC(sde_enc, "failed to init vid enc: %ld\n",
+ PTR_ERR(enc));
+ return enc == 0 ? -EINVAL : PTR_ERR(enc);
+ }
+
+ sde_enc->phys_encs[sde_enc->num_phys_encs] = enc;
+ ++sde_enc->num_phys_encs;
+ }
+
+ if (display_caps & MSM_DISPLAY_CAP_CMD_MODE) {
+ enc = sde_encoder_phys_cmd_init(params);
+
+ if (IS_ERR_OR_NULL(enc)) {
+ SDE_ERROR_ENC(sde_enc, "failed to init cmd enc: %ld\n",
+ PTR_ERR(enc));
+ return enc == 0 ? -EINVAL : PTR_ERR(enc);
+ }
+
+ sde_enc->phys_encs[sde_enc->num_phys_encs] = enc;
+ ++sde_enc->num_phys_encs;
+ }
+
+ return 0;
+}
+
+static int sde_encoder_virt_add_phys_enc_wb(struct sde_encoder_virt *sde_enc,
+ struct sde_enc_phys_init_params *params)
+{
+ struct sde_encoder_phys *enc = NULL;
+
+ if (!sde_enc) {
+ SDE_ERROR("invalid encoder\n");
+ return -EINVAL;
+ }
+
+ SDE_DEBUG_ENC(sde_enc, "\n");
+
+ if (sde_enc->num_phys_encs + 1 >= ARRAY_SIZE(sde_enc->phys_encs)) {
+ SDE_ERROR_ENC(sde_enc, "too many physical encoders %d\n",
+ sde_enc->num_phys_encs);
+ return -EINVAL;
+ }
+
+ enc = sde_encoder_phys_wb_init(params);
+
+ if (IS_ERR_OR_NULL(enc)) {
+ SDE_ERROR_ENC(sde_enc, "failed to init wb enc: %ld\n",
+ PTR_ERR(enc));
+ return enc == 0 ? -EINVAL : PTR_ERR(enc);
+ }
+
+ sde_enc->phys_encs[sde_enc->num_phys_encs] = enc;
+ ++sde_enc->num_phys_encs;
+
+ return 0;
+}
+
+static int sde_encoder_setup_display(struct sde_encoder_virt *sde_enc,
+ struct sde_kms *sde_kms,
+ struct msm_display_info *disp_info,
+ int *drm_enc_mode)
+{
+ int ret = 0;
+ int i = 0;
+ enum sde_intf_type intf_type;
+ struct sde_encoder_virt_ops parent_ops = {
+ sde_encoder_vblank_callback,
+ sde_encoder_underrun_callback,
+ sde_encoder_frame_done_callback,
+ };
+ struct sde_enc_phys_init_params phys_params;
+
+ if (!sde_enc || !sde_kms) {
+ SDE_ERROR("invalid arg(s), enc %d kms %d\n",
+ sde_enc != 0, sde_kms != 0);
+ return -EINVAL;
+ }
+
+ memset(&phys_params, 0, sizeof(phys_params));
+ phys_params.sde_kms = sde_kms;
+ phys_params.parent = &sde_enc->base;
+ phys_params.parent_ops = parent_ops;
+ phys_params.enc_spinlock = &sde_enc->enc_spinlock;
+
+ SDE_DEBUG("\n");
+
+ if (disp_info->intf_type == DRM_MODE_CONNECTOR_DSI) {
+ *drm_enc_mode = DRM_MODE_ENCODER_DSI;
+ intf_type = INTF_DSI;
+ } else if (disp_info->intf_type == DRM_MODE_CONNECTOR_HDMIA) {
+ *drm_enc_mode = DRM_MODE_ENCODER_TMDS;
+ intf_type = INTF_HDMI;
+ } else if (disp_info->intf_type == DRM_MODE_CONNECTOR_VIRTUAL) {
+ *drm_enc_mode = DRM_MODE_ENCODER_VIRTUAL;
+ intf_type = INTF_WB;
+ } else {
+ SDE_ERROR_ENC(sde_enc, "unsupported display interface type\n");
+ return -EINVAL;
+ }
+
+ WARN_ON(disp_info->num_of_h_tiles < 1);
+
+ sde_enc->display_num_of_h_tiles = disp_info->num_of_h_tiles;
+
+ SDE_DEBUG("dsi_info->num_of_h_tiles %d\n", disp_info->num_of_h_tiles);
+
+ mutex_lock(&sde_enc->enc_lock);
+ for (i = 0; i < disp_info->num_of_h_tiles && !ret; i++) {
+ /*
+ * Left-most tile is at index 0, content is controller id
+ * h_tile_instance_ids[2] = {0, 1}; DSI0 = left, DSI1 = right
+ * h_tile_instance_ids[2] = {1, 0}; DSI1 = left, DSI0 = right
+ */
+ u32 controller_id = disp_info->h_tile_instance[i];
+
+ if (disp_info->num_of_h_tiles > 1) {
+ if (i == 0)
+ phys_params.split_role = ENC_ROLE_MASTER;
+ else
+ phys_params.split_role = ENC_ROLE_SLAVE;
+ } else {
+ phys_params.split_role = ENC_ROLE_SOLO;
+ }
+
+ SDE_DEBUG("h_tile_instance %d = %d, split_role %d\n",
+ i, controller_id, phys_params.split_role);
+
+ if (intf_type == INTF_WB) {
+ phys_params.intf_idx = INTF_MAX;
+ phys_params.wb_idx = sde_encoder_get_wb(
+ sde_kms->catalog,
+ intf_type, controller_id);
+ if (phys_params.wb_idx == WB_MAX) {
+ SDE_ERROR_ENC(sde_enc,
+ "could not get wb: type %d, id %d\n",
+ intf_type, controller_id);
+ ret = -EINVAL;
+ }
+ } else {
+ phys_params.wb_idx = WB_MAX;
+ phys_params.intf_idx = sde_encoder_get_intf(
+ sde_kms->catalog, intf_type,
+ controller_id);
+ if (phys_params.intf_idx == INTF_MAX) {
+ SDE_ERROR_ENC(sde_enc,
+ "could not get wb: type %d, id %d\n",
+ intf_type, controller_id);
+ ret = -EINVAL;
+ }
+ }
+
+ if (!ret) {
+ if (intf_type == INTF_WB)
+ ret = sde_encoder_virt_add_phys_enc_wb(sde_enc,
+ &phys_params);
+ else
+ ret = sde_encoder_virt_add_phys_encs(
+ disp_info->capabilities,
+ sde_enc,
+ &phys_params);
+ if (ret)
+ SDE_ERROR_ENC(sde_enc,
+ "failed to add phys encs\n");
+ }
+ }
+ mutex_unlock(&sde_enc->enc_lock);
+
+
+ return ret;
+}
+
+static void sde_encoder_frame_done_timeout(unsigned long data)
+{
+ struct drm_encoder *drm_enc = (struct drm_encoder *) data;
+ struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
+ struct msm_drm_private *priv;
+
+ if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) {
+ SDE_ERROR("invalid parameters\n");
+ return;
+ }
+ priv = drm_enc->dev->dev_private;
+
+ if (!sde_enc->frame_busy_mask[0] || !sde_enc->crtc_frame_event_cb) {
+ SDE_DEBUG("enc%d invalid timeout\n", drm_enc->base.id);
+ SDE_EVT32(DRMID(drm_enc),
+ sde_enc->frame_busy_mask[0], 0);
+ return;
+ } else if (!atomic_xchg(&sde_enc->frame_done_timeout, 0)) {
+ SDE_ERROR("enc%d invalid timeout\n", drm_enc->base.id);
+ SDE_EVT32(DRMID(drm_enc), 0, 1);
+ return;
+ }
+
+ SDE_EVT32(DRMID(drm_enc), 0, 2);
+ sde_enc->crtc_frame_event_cb(sde_enc->crtc_frame_event_cb_data,
+ SDE_ENCODER_FRAME_EVENT_ERROR);
+}
+
+struct drm_encoder *sde_encoder_init(
+ struct drm_device *dev,
+ struct msm_display_info *disp_info)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct sde_kms *sde_kms = to_sde_kms(priv->kms);
+ struct drm_encoder *drm_enc = NULL;
+ struct sde_encoder_virt *sde_enc = NULL;
+ int drm_enc_mode = DRM_MODE_ENCODER_NONE;
+ int ret = 0;
+
+ sde_enc = kzalloc(sizeof(*sde_enc), GFP_KERNEL);
+ if (!sde_enc) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ mutex_init(&sde_enc->enc_lock);
+ ret = sde_encoder_setup_display(sde_enc, sde_kms, disp_info,
+ &drm_enc_mode);
+ if (ret)
+ goto fail;
+
+ sde_enc->cur_master = NULL;
+ spin_lock_init(&sde_enc->enc_spinlock);
+ drm_enc = &sde_enc->base;
+ drm_encoder_init(dev, drm_enc, &sde_encoder_funcs, drm_enc_mode);
+ drm_encoder_helper_add(drm_enc, &sde_encoder_helper_funcs);
+
+ atomic_set(&sde_enc->frame_done_timeout, 0);
+ setup_timer(&sde_enc->frame_done_timer, sde_encoder_frame_done_timeout,
+ (unsigned long) sde_enc);
+
+ _sde_encoder_init_debugfs(drm_enc, sde_enc, sde_kms);
+
+ SDE_DEBUG_ENC(sde_enc, "created\n");
+
+ return drm_enc;
+
+fail:
+ SDE_ERROR("failed to create encoder\n");
+ if (drm_enc)
+ sde_encoder_destroy(drm_enc);
+
+ return ERR_PTR(ret);
+}
+
+int sde_encoder_wait_for_commit_done(struct drm_encoder *drm_enc)
+{
+ struct sde_encoder_virt *sde_enc = NULL;
+ int i, ret = 0;
+
+ if (!drm_enc) {
+ SDE_ERROR("invalid encoder\n");
+ return -EINVAL;
+ }
+ sde_enc = to_sde_encoder_virt(drm_enc);
+ SDE_DEBUG_ENC(sde_enc, "\n");
+
+ for (i = 0; i < sde_enc->num_phys_encs; i++) {
+ struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+
+ if (phys && phys->ops.wait_for_commit_done) {
+ ret = phys->ops.wait_for_commit_done(phys);
+ if (ret)
+ return ret;
+ }
+
+ if (phys && phys->ops.collect_misr)
+ if (phys->misr_map && phys->misr_map->enable)
+ phys->ops.collect_misr(phys, phys->misr_map);
+ }
+
+ return ret;
+}
+
+enum sde_intf_mode sde_encoder_get_intf_mode(struct drm_encoder *encoder)
+{
+ struct sde_encoder_virt *sde_enc = NULL;
+ int i;
+
+ if (!encoder) {
+ SDE_ERROR("invalid encoder\n");
+ return INTF_MODE_NONE;
+ }
+ sde_enc = to_sde_encoder_virt(encoder);
+
+ if (sde_enc->cur_master)
+ return sde_enc->cur_master->intf_mode;
+
+ for (i = 0; i < sde_enc->num_phys_encs; i++) {
+ struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+
+ if (phys)
+ return phys->intf_mode;
+ }
+
+ return INTF_MODE_NONE;
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.h b/drivers/gpu/drm/msm/sde/sde_encoder.h
new file mode 100644
index 000000000000..82576b479bf8
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.h
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __SDE_ENCODER_H__
+#define __SDE_ENCODER_H__
+
+#include <drm/drm_crtc.h>
+
+#include "msm_prop.h"
+#include "sde_hw_mdss.h"
+
+#define SDE_ENCODER_FRAME_EVENT_DONE BIT(0)
+#define SDE_ENCODER_FRAME_EVENT_ERROR BIT(1)
+
+/**
+ * Encoder functions and data types
+ * @intfs: Interfaces this encoder is using, INTF_MODE_NONE if unused
+ * @wbs: Writebacks this encoder is using, INTF_MODE_NONE if unused
+ * @needs_cdm: Encoder requests a CDM based on pixel format conversion needs
+ * @display_num_of_h_tiles:
+ */
+struct sde_encoder_hw_resources {
+ enum sde_intf_mode intfs[INTF_MAX];
+ enum sde_intf_mode wbs[WB_MAX];
+ bool needs_cdm;
+ u32 display_num_of_h_tiles;
+};
+
+/**
+ * sde_encoder_get_hw_resources - Populate table of required hardware resources
+ * @encoder: encoder pointer
+ * @hw_res: resource table to populate with encoder required resources
+ * @conn_state: report hw reqs based on this proposed connector state
+ */
+void sde_encoder_get_hw_resources(struct drm_encoder *encoder,
+ struct sde_encoder_hw_resources *hw_res,
+ struct drm_connector_state *conn_state);
+
+/**
+ * sde_encoder_register_vblank_callback - provide callback to encoder that
+ * will be called on the next vblank.
+ * @encoder: encoder pointer
+ * @cb: callback pointer, provide NULL to deregister and disable IRQs
+ * @data: user data provided to callback
+ */
+void sde_encoder_register_vblank_callback(struct drm_encoder *encoder,
+ void (*cb)(void *), void *data);
+
+/**
+ * sde_encoder_register_frame_event_callback - provide callback to encoder that
+ * will be called after the request is complete, or other events.
+ * @encoder: encoder pointer
+ * @cb: callback pointer, provide NULL to deregister
+ * @data: user data provided to callback
+ */
+void sde_encoder_register_frame_event_callback(struct drm_encoder *encoder,
+ void (*cb)(void *, u32), void *data);
+
+/**
+ * sde_encoder_prepare_for_kickoff - schedule double buffer flip of the ctl
+ * path (i.e. ctl flush and start) at next appropriate time.
+ * Immediately: if no previous commit is outstanding.
+ * Delayed: Block until next trigger can be issued.
+ * @encoder: encoder pointer
+ */
+void sde_encoder_prepare_for_kickoff(struct drm_encoder *encoder);
+
+/**
+ * sde_encoder_kickoff - trigger a double buffer flip of the ctl path
+ * (i.e. ctl flush and start) immediately.
+ * @encoder: encoder pointer
+ */
+void sde_encoder_kickoff(struct drm_encoder *encoder);
+
+/**
+ * sde_encoder_wait_nxt_committed - Wait for hardware to have flushed the
+ * current pending frames to hardware at a vblank or ctl_start
+ * Encoders will map this differently depending on irqs
+ * vid mode -> vsync_irq
+ * @encoder: encoder pointer
+ * Returns: 0 on success, -EWOULDBLOCK if already signaled, error otherwise
+ */
+int sde_encoder_wait_for_commit_done(struct drm_encoder *drm_encoder);
+
+/*
+ * sde_encoder_get_intf_mode - get interface mode of the given encoder
+ * @encoder: Pointer to drm encoder object
+ */
+enum sde_intf_mode sde_encoder_get_intf_mode(struct drm_encoder *encoder);
+
+/**
+ * sde_encoder_init - initialize virtual encoder object
+ * @dev: Pointer to drm device structure
+ * @disp_info: Pointer to display information structure
+ * Returns: Pointer to newly created drm encoder
+ */
+struct drm_encoder *sde_encoder_init(
+ struct drm_device *dev,
+ struct msm_display_info *disp_info);
+
+/**
+ * sde_encoder_destroy - destroy previously initialized virtual encoder
+ * @drm_enc: Pointer to previously created drm encoder structure
+ */
+void sde_encoder_destroy(struct drm_encoder *drm_enc);
+
+#endif /* __SDE_ENCODER_H__ */
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
new file mode 100644
index 000000000000..ed4b7be34281
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
@@ -0,0 +1,406 @@
+/*
+ * Copyright (c) 2015-2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __SDE_ENCODER_PHYS_H__
+#define __SDE_ENCODER_PHYS_H__
+
+#include <linux/jiffies.h>
+
+#include "sde_kms.h"
+#include "sde_hw_intf.h"
+#include "sde_hw_pingpong.h"
+#include "sde_hw_ctl.h"
+#include "sde_hw_top.h"
+#include "sde_hw_wb.h"
+#include "sde_hw_cdm.h"
+#include "sde_encoder.h"
+#include "sde_connector.h"
+
+#define SDE_ENCODER_NAME_MAX 16
+
+/* wait for at most 2 vsync for lowest refresh rate (24hz) */
+#define KICKOFF_TIMEOUT_MS 84
+#define KICKOFF_TIMEOUT_JIFFIES msecs_to_jiffies(KICKOFF_TIMEOUT_MS)
+
+/**
+ * enum sde_enc_split_role - Role this physical encoder will play in a
+ * split-panel configuration, where one panel is master, and others slaves.
+ * Masters have extra responsibilities, like managing the VBLANK IRQ.
+ * @ENC_ROLE_SOLO: This is the one and only panel. This encoder is master.
+ * @ENC_ROLE_MASTER: This encoder is the master of a split panel config.
+ * @ENC_ROLE_SLAVE: This encoder is not the master of a split panel config.
+ */
+enum sde_enc_split_role {
+ ENC_ROLE_SOLO,
+ ENC_ROLE_MASTER,
+ ENC_ROLE_SLAVE
+};
+
+struct sde_encoder_phys;
+
+/**
+ * struct sde_encoder_virt_ops - Interface the containing virtual encoder
+ * provides for the physical encoders to use to callback.
+ * @handle_vblank_virt: Notify virtual encoder of vblank IRQ reception
+ * Note: This is called from IRQ handler context.
+ * @handle_underrun_virt: Notify virtual encoder of underrun IRQ reception
+ * Note: This is called from IRQ handler context.
+ * @handle_frame_done: Notify virtual encoder that this phys encoder
+ * completes last request frame.
+ */
+struct sde_encoder_virt_ops {
+ void (*handle_vblank_virt)(struct drm_encoder *,
+ struct sde_encoder_phys *phys);
+ void (*handle_underrun_virt)(struct drm_encoder *,
+ struct sde_encoder_phys *phys);
+ void (*handle_frame_done)(struct drm_encoder *,
+ struct sde_encoder_phys *phys, u32 event);
+};
+
+/**
+ * struct sde_encoder_phys_ops - Interface the physical encoders provide to
+ * the containing virtual encoder.
+ * @is_master: Whether this phys_enc is the current master
+ * encoder. Can be switched at enable time. Based
+ * on split_role and current mode (CMD/VID).
+ * @mode_fixup: DRM Call. Fixup a DRM mode.
+ * @mode_set: DRM Call. Set a DRM mode.
+ * This likely caches the mode, for use at enable.
+ * @enable: DRM Call. Enable a DRM mode.
+ * @disable: DRM Call. Disable mode.
+ * @atomic_check: DRM Call. Atomic check new DRM state.
+ * @destroy: DRM Call. Destroy and release resources.
+ * @get_hw_resources: Populate the structure with the hardware
+ * resources that this phys_enc is using.
+ * Expect no overlap between phys_encs.
+ * @control_vblank_irq Register/Deregister for VBLANK IRQ
+ * @wait_for_commit_done: Wait for hardware to have flushed the
+ * current pending frames to hardware
+ * @prepare_for_kickoff: Do any work necessary prior to a kickoff
+ * For CMD encoder, may wait for previous tx done
+ * @handle_post_kickoff: Do any work necessary post-kickoff work
+ * @trigger_start: Process start event on physical encoder
+ * @needs_single_flush: Whether encoder slaves need to be flushed
+ * @setup_misr: Sets up MISR, enable and disables based on sysfs
+ * @collect_misr: Collects MISR data on frame update
+ */
+
+struct sde_encoder_phys_ops {
+ bool (*is_master)(struct sde_encoder_phys *encoder);
+ bool (*mode_fixup)(struct sde_encoder_phys *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+ void (*mode_set)(struct sde_encoder_phys *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+ void (*enable)(struct sde_encoder_phys *encoder);
+ void (*disable)(struct sde_encoder_phys *encoder);
+ int (*atomic_check)(struct sde_encoder_phys *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state);
+ void (*destroy)(struct sde_encoder_phys *encoder);
+ void (*get_hw_resources)(struct sde_encoder_phys *encoder,
+ struct sde_encoder_hw_resources *hw_res,
+ struct drm_connector_state *conn_state);
+ int (*control_vblank_irq)(struct sde_encoder_phys *enc, bool enable);
+ int (*wait_for_commit_done)(struct sde_encoder_phys *phys_enc);
+ void (*prepare_for_kickoff)(struct sde_encoder_phys *phys_enc);
+ void (*handle_post_kickoff)(struct sde_encoder_phys *phys_enc);
+ void (*trigger_start)(struct sde_encoder_phys *phys_enc);
+ bool (*needs_single_flush)(struct sde_encoder_phys *phys_enc);
+
+ void (*setup_misr)(struct sde_encoder_phys *phys_encs,
+ struct sde_misr_params *misr_map);
+ void (*collect_misr)(struct sde_encoder_phys *phys_enc,
+ struct sde_misr_params *misr_map);
+};
+
+/**
+ * enum sde_enc_enable_state - current enabled state of the physical encoder
+ * @SDE_ENC_DISABLED: Encoder is disabled
+ * @SDE_ENC_ENABLING: Encoder transitioning to enabled
+ * Events bounding transition are encoder type specific
+ * @SDE_ENC_ENABLED: Encoder is enabled
+ */
+enum sde_enc_enable_state {
+ SDE_ENC_DISABLED,
+ SDE_ENC_ENABLING,
+ SDE_ENC_ENABLED
+};
+
+/**
+ * enum sde_intr_idx - sde encoder interrupt index
+ * @INTR_IDX_VSYNC: Vsync interrupt for video mode panel
+ * @INTR_IDX_PINGPONG: Pingpong done interrupt for cmd mode panel
+ * @INTR_IDX_UNDERRUN: Underrun interrupt for video and cmd mode panel
+ * @INTR_IDX_RDPTR: Readpointer done interrupt for cmd mode panel
+ */
+enum sde_intr_idx {
+ INTR_IDX_VSYNC,
+ INTR_IDX_PINGPONG,
+ INTR_IDX_UNDERRUN,
+ INTR_IDX_RDPTR,
+ INTR_IDX_MAX,
+};
+
+/**
+ * struct sde_encoder_phys - physical encoder that drives a single INTF block
+ * tied to a specific panel / sub-panel. Abstract type, sub-classed by
+ * phys_vid or phys_cmd for video mode or command mode encs respectively.
+ * @parent: Pointer to the containing virtual encoder
+ * @connector: If a mode is set, cached pointer to the active connector
+ * @ops: Operations exposed to the virtual encoder
+ * @parent_ops: Callbacks exposed by the parent to the phys_enc
+ * @hw_mdptop: Hardware interface to the top registers
+ * @hw_ctl: Hardware interface to the ctl registers
+ * @hw_cdm: Hardware interface to the cdm registers
+ * @cdm_cfg: Chroma-down hardware configuration
+ * @hw_pp: Hardware interface to the ping pong registers
+ * @sde_kms: Pointer to the sde_kms top level
+ * @cached_mode: DRM mode cached at mode_set time, acted on in enable
+ * @misr_map: Interface for setting and collecting MISR data
+ * @enabled: Whether the encoder has enabled and running a mode
+ * @split_role: Role to play in a split-panel configuration
+ * @intf_mode: Interface mode
+ * @intf_idx: Interface index on sde hardware
+ * @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
+ * @enable_state: Enable state tracking
+ * @vblank_refcount: Reference count of vblank request
+ * @vsync_cnt: Vsync count for the physical encoder
+ * @underrun_cnt: Underrun count for the physical encoder
+ * @pending_kickoff_cnt: Atomic counter tracking the number of kickoffs
+ * vs. the number of done/vblank irqs. Should hover
+ * between 0-2 Incremented when a new kickoff is
+ * scheduled. Decremented in irq handler
+ * @pending_kickoff_wq: Wait queue for blocking until kickoff completes
+ */
+struct sde_encoder_phys {
+ struct drm_encoder *parent;
+ struct drm_connector *connector;
+ struct sde_encoder_phys_ops ops;
+ struct sde_encoder_virt_ops parent_ops;
+ struct sde_hw_mdp *hw_mdptop;
+ struct sde_hw_ctl *hw_ctl;
+ struct sde_hw_cdm *hw_cdm;
+ struct sde_hw_cdm_cfg cdm_cfg;
+ struct sde_hw_pingpong *hw_pp;
+ struct sde_kms *sde_kms;
+ struct drm_display_mode cached_mode;
+ struct sde_misr_params *misr_map;
+ enum sde_enc_split_role split_role;
+ enum sde_intf_mode intf_mode;
+ enum sde_intf intf_idx;
+ spinlock_t *enc_spinlock;
+ enum sde_enc_enable_state enable_state;
+ atomic_t vblank_refcount;
+ atomic_t vsync_cnt;
+ atomic_t underrun_cnt;
+ atomic_t pending_kickoff_cnt;
+ wait_queue_head_t pending_kickoff_wq;
+};
+
+static inline int sde_encoder_phys_inc_pending(struct sde_encoder_phys *phys)
+{
+ return atomic_inc_return(&phys->pending_kickoff_cnt);
+}
+
+/**
+ * struct sde_encoder_phys_vid - sub-class of sde_encoder_phys to handle video
+ * mode specific operations
+ * @base: Baseclass physical encoder structure
+ * @irq_idx: IRQ interface lookup index
+ * @irq_cb: interrupt callback
+ * @hw_intf: Hardware interface to the intf registers
+ */
+struct sde_encoder_phys_vid {
+ struct sde_encoder_phys base;
+ int irq_idx[INTR_IDX_MAX];
+ struct sde_irq_callback irq_cb[INTR_IDX_MAX];
+ struct sde_hw_intf *hw_intf;
+};
+
+/**
+ * struct sde_encoder_phys_cmd - sub-class of sde_encoder_phys to handle command
+ * mode specific operations
+ * @base: Baseclass physical encoder structure
+ * @intf_idx: Intf Block index used by this phys encoder
+ * @stream_sel: Stream selection for multi-stream interfaces
+ * @pp_rd_ptr_irq_idx: IRQ signifying panel's frame read pointer
+ * For CMD encoders, VBLANK is driven by the PP RD Done IRQ
+ * @pp_tx_done_irq_idx: IRQ signifying frame transmission to panel complete
+ * @irq_cb: interrupt callback
+ */
+struct sde_encoder_phys_cmd {
+ struct sde_encoder_phys base;
+ int intf_idx;
+ int stream_sel;
+ int irq_idx[INTR_IDX_MAX];
+ struct sde_irq_callback irq_cb[INTR_IDX_MAX];
+};
+
+/**
+ * struct sde_encoder_phys_wb - sub-class of sde_encoder_phys to handle
+ * writeback specific operations
+ * @base: Baseclass physical encoder structure
+ * @hw_wb: Hardware interface to the wb registers
+ * @irq_idx: IRQ interface lookup index
+ * @wbdone_timeout: Timeout value for writeback done in msec
+ * @bypass_irqreg: Bypass irq register/unregister if non-zero
+ * @wbdone_complete: for wbdone irq synchronization
+ * @wb_cfg: Writeback hardware configuration
+ * @intf_cfg: Interface hardware configuration
+ * @wb_roi: Writeback region-of-interest
+ * @wb_fmt: Writeback pixel format
+ * @frame_count: Counter of completed writeback operations
+ * @kickoff_count: Counter of issued writeback operations
+ * @mmu_id: mmu identifier for non-secure/secure domain
+ * @wb_dev: Pointer to writeback device
+ * @start_time: Start time of writeback latest request
+ * @end_time: End time of writeback latest request
+ * @wb_name: Name of this writeback device
+ * @debugfs_root: Root entry of writeback debugfs
+ */
+struct sde_encoder_phys_wb {
+ struct sde_encoder_phys base;
+ struct sde_hw_wb *hw_wb;
+ int irq_idx;
+ struct sde_irq_callback irq_cb;
+ u32 wbdone_timeout;
+ u32 bypass_irqreg;
+ struct completion wbdone_complete;
+ struct sde_hw_wb_cfg wb_cfg;
+ struct sde_hw_intf_cfg intf_cfg;
+ struct sde_rect wb_roi;
+ const struct sde_format *wb_fmt;
+ u32 frame_count;
+ u32 kickoff_count;
+ int mmu_id[SDE_IOMMU_DOMAIN_MAX];
+ struct sde_wb_device *wb_dev;
+ ktime_t start_time;
+ ktime_t end_time;
+#ifdef CONFIG_DEBUG_FS
+ char wb_name[SDE_ENCODER_NAME_MAX];
+ struct dentry *debugfs_root;
+#endif
+};
+
+/**
+ * struct sde_enc_phys_init_params - initialization parameters for phys encs
+ * @sde_kms: Pointer to the sde_kms top level
+ * @parent: Pointer to the containing virtual encoder
+ * @parent_ops: Callbacks exposed by the parent to the phys_enc
+ * @split_role: Role to play in a split-panel configuration
+ * @intf_idx: Interface index this phys_enc will control
+ * @wb_idx: Writeback index this phys_enc will control
+ * @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
+ */
+struct sde_enc_phys_init_params {
+ struct sde_kms *sde_kms;
+ struct drm_encoder *parent;
+ struct sde_encoder_virt_ops parent_ops;
+ enum sde_enc_split_role split_role;
+ enum sde_intf intf_idx;
+ enum sde_wb wb_idx;
+ spinlock_t *enc_spinlock;
+};
+
+/**
+ * sde_encoder_phys_vid_init - Construct a new video mode physical encoder
+ * @p: Pointer to init params structure
+ * Return: Error code or newly allocated encoder
+ */
+struct sde_encoder_phys *sde_encoder_phys_vid_init(
+ struct sde_enc_phys_init_params *p);
+
+/**
+ * sde_encoder_phys_cmd_init - Construct a new command mode physical encoder
+ * @p: Pointer to init params structure
+ * Return: Error code or newly allocated encoder
+ */
+struct sde_encoder_phys *sde_encoder_phys_cmd_init(
+ struct sde_enc_phys_init_params *p);
+
+/**
+ * sde_encoder_phys_wb_init - Construct a new writeback physical encoder
+ * @p: Pointer to init params structure
+ * Return: Error code or newly allocated encoder
+ */
+#ifdef CONFIG_DRM_SDE_WB
+struct sde_encoder_phys *sde_encoder_phys_wb_init(
+ struct sde_enc_phys_init_params *p);
+#else
+static inline
+struct sde_encoder_phys *sde_encoder_phys_wb_init(
+ struct sde_enc_phys_init_params *p)
+{
+ return NULL;
+}
+#endif
+
+void sde_encoder_phys_setup_cdm(struct sde_encoder_phys *phys_enc,
+ struct drm_framebuffer *fb, const struct sde_format *format,
+ struct sde_rect *wb_roi);
+
+/**
+ * sde_encoder_helper_trigger_start - control start helper function
+ * This helper function may be optionally specified by physical
+ * encoders if they require ctl_start triggering.
+ * @phys_enc: Pointer to physical encoder structure
+ */
+void sde_encoder_helper_trigger_start(struct sde_encoder_phys *phys_enc);
+
+/**
+ * sde_encoder_helper_wait_event_timeout - wait for event with timeout
+ * taking into account that jiffies may jump between reads leading to
+ * incorrectly detected timeouts. Prevent failure in this scenario by
+ * making sure that elapsed time during wait is valid.
+ * @drm_id: drm object id for logging
+ * @hw_id: hw instance id for logging
+ * @wq: wait queue structure
+ * @cnt: atomic counter to wait on
+ * @timeout_ms: timeout value in milliseconds
+ */
+int sde_encoder_helper_wait_event_timeout(
+ int32_t drm_id,
+ int32_t hw_id,
+ wait_queue_head_t *wq,
+ atomic_t *cnt,
+ s64 timeout_ms);
+
+
+static inline enum sde_3d_blend_mode sde_encoder_helper_get_3d_blend_mode(
+ struct sde_encoder_phys *phys_enc)
+{
+ enum sde_rm_topology_name topology;
+
+ topology = sde_connector_get_topology_name(phys_enc->connector);
+ if (phys_enc->split_role == ENC_ROLE_SOLO &&
+ topology == SDE_RM_TOPOLOGY_DUALPIPEMERGE)
+ return BLEND_3D_H_ROW_INT;
+
+ return BLEND_3D_NONE;
+}
+
+/**
+ * sde_encoder_helper_split_config - split display configuration helper function
+ * This helper function may be used by physical encoders to configure
+ * the split display related registers.
+ * @phys_enc: Pointer to physical encoder structure
+ * @interface: enum sde_intf setting
+ */
+void sde_encoder_helper_split_config(
+ struct sde_encoder_phys *phys_enc,
+ enum sde_intf interface);
+
+#endif /* __sde_encoder_phys_H__ */
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
new file mode 100644
index 000000000000..76d6fe0e3023
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
@@ -0,0 +1,712 @@
+/*
+ * Copyright (c) 2015-2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+#include "sde_encoder_phys.h"
+#include "sde_hw_interrupts.h"
+#include "sde_core_irq.h"
+#include "sde_formats.h"
+
+#define SDE_DEBUG_CMDENC(e, fmt, ...) SDE_DEBUG("enc%d intf%d " fmt, \
+ (e) && (e)->base.parent ? \
+ (e)->base.parent->base.id : -1, \
+ (e) ? (e)->intf_idx - INTF_0 : -1, ##__VA_ARGS__)
+
+#define SDE_ERROR_CMDENC(e, fmt, ...) SDE_ERROR("enc%d intf%d " fmt, \
+ (e) && (e)->base.parent ? \
+ (e)->base.parent->base.id : -1, \
+ (e) ? (e)->intf_idx - INTF_0 : -1, ##__VA_ARGS__)
+
+#define to_sde_encoder_phys_cmd(x) \
+ container_of(x, struct sde_encoder_phys_cmd, base)
+
+/*
+ * Tearcheck sync start and continue thresholds are empirically found
+ * based on common panels In the future, may want to allow panels to override
+ * these default values
+ */
+#define DEFAULT_TEARCHECK_SYNC_THRESH_START 4
+#define DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE 4
+
+static inline bool sde_encoder_phys_cmd_is_master(
+ struct sde_encoder_phys *phys_enc)
+{
+ return (phys_enc->split_role != ENC_ROLE_SLAVE) ? true : false;
+}
+
+static bool sde_encoder_phys_cmd_mode_fixup(
+ struct sde_encoder_phys *phys_enc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ if (phys_enc)
+ SDE_DEBUG_CMDENC(to_sde_encoder_phys_cmd(phys_enc), "\n");
+ return true;
+}
+
+static void sde_encoder_phys_cmd_mode_set(
+ struct sde_encoder_phys *phys_enc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ struct sde_encoder_phys_cmd *cmd_enc =
+ to_sde_encoder_phys_cmd(phys_enc);
+ struct sde_rm *rm = &phys_enc->sde_kms->rm;
+ struct sde_rm_hw_iter iter;
+ int i, instance;
+
+ if (!phys_enc || !mode || !adj_mode) {
+ SDE_ERROR("invalid arg(s), enc %d mode %d adj_mode %d\n",
+ phys_enc != 0, mode != 0, adj_mode != 0);
+ return;
+ }
+ phys_enc->cached_mode = *adj_mode;
+ SDE_DEBUG_CMDENC(cmd_enc, "caching mode:\n");
+ drm_mode_debug_printmodeline(adj_mode);
+
+ instance = phys_enc->split_role == ENC_ROLE_SLAVE ? 1 : 0;
+
+ /* Retrieve previously allocated HW Resources. Shouldn't fail */
+ sde_rm_init_hw_iter(&iter, phys_enc->parent->base.id, SDE_HW_BLK_CTL);
+ for (i = 0; i <= instance; i++) {
+ if (sde_rm_get_hw(rm, &iter))
+ phys_enc->hw_ctl = (struct sde_hw_ctl *)iter.hw;
+ }
+
+ if (IS_ERR_OR_NULL(phys_enc->hw_ctl)) {
+ SDE_ERROR_CMDENC(cmd_enc, "failed to init ctl: %ld\n",
+ PTR_ERR(phys_enc->hw_ctl));
+ phys_enc->hw_ctl = NULL;
+ return;
+ }
+}
+
+static void sde_encoder_phys_cmd_pp_tx_done_irq(void *arg, int irq_idx)
+{
+ struct sde_encoder_phys_cmd *cmd_enc = arg;
+ struct sde_encoder_phys *phys_enc;
+ unsigned long lock_flags;
+ int new_cnt;
+
+ if (!cmd_enc)
+ return;
+
+ phys_enc = &cmd_enc->base;
+
+ /* notify all synchronous clients first, then asynchronous clients */
+ if (phys_enc->parent_ops.handle_frame_done)
+ phys_enc->parent_ops.handle_frame_done(phys_enc->parent,
+ phys_enc, SDE_ENCODER_FRAME_EVENT_DONE);
+
+ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+ new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
+ spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+ SDE_EVT32_IRQ(DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0, new_cnt);
+
+ /* Signal any waiting atomic commit thread */
+ wake_up_all(&phys_enc->pending_kickoff_wq);
+}
+
+static void sde_encoder_phys_cmd_pp_rd_ptr_irq(void *arg, int irq_idx)
+{
+ struct sde_encoder_phys_cmd *cmd_enc = arg;
+ struct sde_encoder_phys *phys_enc = &cmd_enc->base;
+
+ if (!cmd_enc)
+ return;
+
+ if (phys_enc->parent_ops.handle_vblank_virt)
+ phys_enc->parent_ops.handle_vblank_virt(phys_enc->parent,
+ phys_enc);
+}
+
+static bool _sde_encoder_phys_is_ppsplit_slave(
+ struct sde_encoder_phys *phys_enc)
+{
+ enum sde_rm_topology_name topology;
+
+ if (!phys_enc)
+ return false;
+
+ topology = sde_connector_get_topology_name(phys_enc->connector);
+ if (topology == SDE_RM_TOPOLOGY_PPSPLIT &&
+ phys_enc->split_role == ENC_ROLE_SLAVE)
+ return true;
+
+ return false;
+}
+
+static int _sde_encoder_phys_cmd_wait_for_idle(
+ struct sde_encoder_phys *phys_enc)
+{
+ struct sde_encoder_phys_cmd *cmd_enc =
+ to_sde_encoder_phys_cmd(phys_enc);
+ u32 irq_status;
+ int ret;
+
+ if (!phys_enc) {
+ SDE_ERROR("invalid encoder\n");
+ return -EINVAL;
+ }
+
+ /* slave encoder doesn't enable for ppsplit */
+ if (_sde_encoder_phys_is_ppsplit_slave(phys_enc))
+ return 0;
+
+ /* return EWOULDBLOCK since we know the wait isn't necessary */
+ if (phys_enc->enable_state == SDE_ENC_DISABLED) {
+ SDE_ERROR_CMDENC(cmd_enc, "encoder is disabled\n");
+ return -EWOULDBLOCK;
+ }
+
+ /* wait for previous kickoff to complete */
+ ret = sde_encoder_helper_wait_event_timeout(
+ DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ &phys_enc->pending_kickoff_wq,
+ &phys_enc->pending_kickoff_cnt,
+ KICKOFF_TIMEOUT_MS);
+ if (ret <= 0) {
+ irq_status = sde_core_irq_read(phys_enc->sde_kms,
+ INTR_IDX_PINGPONG, true);
+ if (irq_status) {
+ SDE_EVT32(DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0);
+ SDE_DEBUG_CMDENC(cmd_enc,
+ "pp:%d done but irq not triggered\n",
+ phys_enc->hw_pp->idx - PINGPONG_0);
+ sde_encoder_phys_cmd_pp_tx_done_irq(cmd_enc,
+ INTR_IDX_PINGPONG);
+ ret = 0;
+ } else {
+ SDE_EVT32(DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0);
+ SDE_ERROR_CMDENC(cmd_enc, "pp:%d kickoff timed out\n",
+ phys_enc->hw_pp->idx - PINGPONG_0);
+ if (phys_enc->parent_ops.handle_frame_done)
+ phys_enc->parent_ops.handle_frame_done(
+ phys_enc->parent, phys_enc,
+ SDE_ENCODER_FRAME_EVENT_ERROR);
+ ret = -ETIMEDOUT;
+ }
+ } else {
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static void sde_encoder_phys_cmd_underrun_irq(void *arg, int irq_idx)
+{
+ struct sde_encoder_phys_cmd *cmd_enc = arg;
+ struct sde_encoder_phys *phys_enc;
+
+ if (!cmd_enc)
+ return;
+
+ phys_enc = &cmd_enc->base;
+ if (phys_enc->parent_ops.handle_underrun_virt)
+ phys_enc->parent_ops.handle_underrun_virt(phys_enc->parent,
+ phys_enc);
+}
+
+static int sde_encoder_phys_cmd_register_irq(struct sde_encoder_phys *phys_enc,
+ enum sde_intr_type intr_type, int idx,
+ void (*irq_func)(void *, int), const char *irq_name)
+{
+ struct sde_encoder_phys_cmd *cmd_enc =
+ to_sde_encoder_phys_cmd(phys_enc);
+ int ret = 0;
+
+ if (!phys_enc) {
+ SDE_ERROR("invalid encoder\n");
+ return -EINVAL;
+ }
+
+ cmd_enc->irq_idx[idx] = sde_core_irq_idx_lookup(phys_enc->sde_kms,
+ intr_type, phys_enc->hw_pp->idx);
+ if (cmd_enc->irq_idx[idx] < 0) {
+ SDE_ERROR_CMDENC(cmd_enc,
+ "failed to lookup IRQ index for %s with pp=%d\n",
+ irq_name,
+ phys_enc->hw_pp->idx - PINGPONG_0);
+ return -EINVAL;
+ }
+
+ cmd_enc->irq_cb[idx].func = irq_func;
+ cmd_enc->irq_cb[idx].arg = cmd_enc;
+ ret = sde_core_irq_register_callback(phys_enc->sde_kms,
+ cmd_enc->irq_idx[idx], &cmd_enc->irq_cb[idx]);
+ if (ret) {
+ SDE_ERROR_CMDENC(cmd_enc,
+ "failed to register IRQ callback %s\n",
+ irq_name);
+ return ret;
+ }
+
+ ret = sde_core_irq_enable(phys_enc->sde_kms, &cmd_enc->irq_idx[idx], 1);
+ if (ret) {
+ SDE_ERROR_CMDENC(cmd_enc,
+ "failed to enable IRQ for %s, pp %d, irq_idx %d\n",
+ irq_name,
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ cmd_enc->irq_idx[idx]);
+ cmd_enc->irq_idx[idx] = -EINVAL;
+
+ /* Unregister callback on IRQ enable failure */
+ sde_core_irq_unregister_callback(phys_enc->sde_kms,
+ cmd_enc->irq_idx[idx], &cmd_enc->irq_cb[idx]);
+ return ret;
+ }
+
+ SDE_DEBUG_CMDENC(cmd_enc, "registered IRQ %s for pp %d, irq_idx %d\n",
+ irq_name,
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ cmd_enc->irq_idx[idx]);
+
+ return ret;
+}
+
+static int sde_encoder_phys_cmd_unregister_irq(
+ struct sde_encoder_phys *phys_enc, int idx)
+{
+ struct sde_encoder_phys_cmd *cmd_enc =
+ to_sde_encoder_phys_cmd(phys_enc);
+
+ if (!phys_enc) {
+ SDE_ERROR("invalid encoder\n");
+ return -EINVAL;
+ }
+
+ sde_core_irq_disable(phys_enc->sde_kms, &cmd_enc->irq_idx[idx], 1);
+ sde_core_irq_unregister_callback(phys_enc->sde_kms,
+ cmd_enc->irq_idx[idx], &cmd_enc->irq_cb[idx]);
+
+ SDE_DEBUG_CMDENC(cmd_enc, "unregistered IRQ for pp %d, irq_idx %d\n",
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ cmd_enc->irq_idx[idx]);
+
+ return 0;
+}
+
+static void sde_encoder_phys_cmd_tearcheck_config(
+ struct sde_encoder_phys *phys_enc)
+{
+ struct sde_encoder_phys_cmd *cmd_enc =
+ to_sde_encoder_phys_cmd(phys_enc);
+ struct sde_hw_tear_check tc_cfg = { 0 };
+ struct drm_display_mode *mode = &phys_enc->cached_mode;
+ bool tc_enable = true;
+ u32 vsync_hz;
+ struct msm_drm_private *priv;
+ struct sde_kms *sde_kms;
+
+ if (!phys_enc) {
+ SDE_ERROR("invalid encoder\n");
+ return;
+ }
+
+ SDE_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
+
+ if (!phys_enc->hw_pp->ops.setup_tearcheck ||
+ !phys_enc->hw_pp->ops.enable_tearcheck) {
+ SDE_DEBUG_CMDENC(cmd_enc, "tearcheck not supported\n");
+ return;
+ }
+
+ sde_kms = phys_enc->sde_kms;
+ priv = sde_kms->dev->dev_private;
+ /*
+ * TE default: dsi byte clock calculated base on 70 fps;
+ * around 14 ms to complete a kickoff cycle if te disabled;
+ * vclk_line base on 60 fps; write is faster than read;
+ * init == start == rdptr;
+ *
+ * vsync_count is ratio of MDP VSYNC clock frequency to LCD panel
+ * frequency divided by the no. of rows (lines) in the LCDpanel.
+ */
+ vsync_hz = sde_power_clk_get_rate(&priv->phandle, "vsync_clk");
+ if (!vsync_hz) {
+ SDE_DEBUG_CMDENC(cmd_enc, "invalid vsync clock rate\n");
+ return;
+ }
+
+ tc_cfg.vsync_count = vsync_hz / (mode->vtotal * mode->vrefresh);
+ tc_cfg.hw_vsync_mode = 1;
+
+ /*
+ * By setting sync_cfg_height to near max register value, we essentially
+ * disable sde hw generated TE signal, since hw TE will arrive first.
+ * Only caveat is if due to error, we hit wrap-around.
+ */
+ tc_cfg.sync_cfg_height = 0xFFF0;
+ tc_cfg.vsync_init_val = mode->vdisplay;
+ tc_cfg.sync_threshold_start = DEFAULT_TEARCHECK_SYNC_THRESH_START;
+ tc_cfg.sync_threshold_continue = DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE;
+ tc_cfg.start_pos = mode->vdisplay;
+ tc_cfg.rd_ptr_irq = mode->vdisplay + 1;
+
+ SDE_DEBUG_CMDENC(cmd_enc,
+ "tc %d vsync_clk_speed_hz %u vtotal %u vrefresh %u\n",
+ phys_enc->hw_pp->idx - PINGPONG_0, vsync_hz,
+ mode->vtotal, mode->vrefresh);
+ SDE_DEBUG_CMDENC(cmd_enc,
+ "tc %d enable %u start_pos %u rd_ptr_irq %u\n",
+ phys_enc->hw_pp->idx - PINGPONG_0, tc_enable, tc_cfg.start_pos,
+ tc_cfg.rd_ptr_irq);
+ SDE_DEBUG_CMDENC(cmd_enc,
+ "tc %d hw_vsync_mode %u vsync_count %u vsync_init_val %u\n",
+ phys_enc->hw_pp->idx - PINGPONG_0, tc_cfg.hw_vsync_mode,
+ tc_cfg.vsync_count, tc_cfg.vsync_init_val);
+ SDE_DEBUG_CMDENC(cmd_enc,
+ "tc %d cfgheight %u thresh_start %u thresh_cont %u\n",
+ phys_enc->hw_pp->idx - PINGPONG_0, tc_cfg.sync_cfg_height,
+ tc_cfg.sync_threshold_start, tc_cfg.sync_threshold_continue);
+
+ phys_enc->hw_pp->ops.setup_tearcheck(phys_enc->hw_pp, &tc_cfg);
+ phys_enc->hw_pp->ops.enable_tearcheck(phys_enc->hw_pp, tc_enable);
+}
+
+static void sde_encoder_phys_cmd_pingpong_config(
+ struct sde_encoder_phys *phys_enc)
+{
+ struct sde_encoder_phys_cmd *cmd_enc =
+ to_sde_encoder_phys_cmd(phys_enc);
+ struct sde_hw_intf_cfg intf_cfg = { 0 };
+
+ if (!phys_enc || !phys_enc->hw_ctl ||
+ !phys_enc->hw_ctl->ops.setup_intf_cfg) {
+ SDE_ERROR("invalid arg(s), enc %d\n", phys_enc != 0);
+ return;
+ }
+
+ SDE_DEBUG_CMDENC(cmd_enc, "pp %d, enabling mode:\n",
+ phys_enc->hw_pp->idx - PINGPONG_0);
+ drm_mode_debug_printmodeline(&phys_enc->cached_mode);
+
+ intf_cfg.intf = cmd_enc->intf_idx;
+ intf_cfg.intf_mode_sel = SDE_CTL_MODE_SEL_CMD;
+ intf_cfg.stream_sel = cmd_enc->stream_sel;
+ intf_cfg.mode_3d = sde_encoder_helper_get_3d_blend_mode(phys_enc);
+
+ phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg);
+
+ sde_encoder_phys_cmd_tearcheck_config(phys_enc);
+}
+
+static bool sde_encoder_phys_cmd_needs_single_flush(
+ struct sde_encoder_phys *phys_enc)
+{
+ enum sde_rm_topology_name topology;
+
+ if (!phys_enc)
+ return false;
+
+ topology = sde_connector_get_topology_name(phys_enc->connector);
+ return topology == SDE_RM_TOPOLOGY_PPSPLIT;
+}
+
+static int sde_encoder_phys_cmd_control_vblank_irq(
+ struct sde_encoder_phys *phys_enc,
+ bool enable)
+{
+ struct sde_encoder_phys_cmd *cmd_enc =
+ to_sde_encoder_phys_cmd(phys_enc);
+ int ret = 0;
+
+ if (!phys_enc) {
+ SDE_ERROR("invalid encoder\n");
+ return -EINVAL;
+ }
+
+ /* Slave encoders don't report vblank */
+ if (!sde_encoder_phys_cmd_is_master(phys_enc))
+ goto end;
+
+ SDE_DEBUG_CMDENC(cmd_enc, "[%pS] enable=%d/%d\n",
+ __builtin_return_address(0),
+ enable, atomic_read(&phys_enc->vblank_refcount));
+
+ SDE_EVT32(DRMID(phys_enc->parent), phys_enc->hw_pp->idx - PINGPONG_0,
+ enable, atomic_read(&phys_enc->vblank_refcount));
+
+ if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1)
+ ret = sde_encoder_phys_cmd_register_irq(phys_enc,
+ SDE_IRQ_TYPE_PING_PONG_RD_PTR,
+ INTR_IDX_RDPTR,
+ sde_encoder_phys_cmd_pp_rd_ptr_irq,
+ "pp_rd_ptr");
+ else if (!enable && atomic_dec_return(&phys_enc->vblank_refcount) == 0)
+ ret = sde_encoder_phys_cmd_unregister_irq(phys_enc,
+ INTR_IDX_RDPTR);
+
+end:
+ if (ret)
+ SDE_ERROR_CMDENC(cmd_enc,
+ "control vblank irq error %d, enable %d\n",
+ ret, enable);
+
+ return ret;
+}
+
+static void sde_encoder_phys_cmd_enable(struct sde_encoder_phys *phys_enc)
+{
+ struct sde_encoder_phys_cmd *cmd_enc =
+ to_sde_encoder_phys_cmd(phys_enc);
+ struct sde_hw_ctl *ctl;
+ u32 flush_mask;
+ int ret;
+
+ if (!phys_enc || !phys_enc->hw_ctl) {
+ SDE_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0);
+ return;
+ }
+ SDE_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
+
+ if (phys_enc->enable_state == SDE_ENC_ENABLED) {
+ SDE_ERROR("already enabled\n");
+ return;
+ }
+
+ sde_encoder_helper_split_config(phys_enc, cmd_enc->intf_idx);
+
+ sde_encoder_phys_cmd_pingpong_config(phys_enc);
+
+ if (_sde_encoder_phys_is_ppsplit_slave(phys_enc))
+ goto update_flush;
+
+ /* Both master and slave need to register for pp_tx_done */
+ ret = sde_encoder_phys_cmd_register_irq(phys_enc,
+ SDE_IRQ_TYPE_PING_PONG_COMP,
+ INTR_IDX_PINGPONG,
+ sde_encoder_phys_cmd_pp_tx_done_irq,
+ "pp_tx_done");
+ if (ret)
+ return;
+
+ ret = sde_encoder_phys_cmd_control_vblank_irq(phys_enc, true);
+ if (ret) {
+ sde_encoder_phys_cmd_unregister_irq(phys_enc,
+ INTR_IDX_PINGPONG);
+ return;
+ }
+
+ ret = sde_encoder_phys_cmd_register_irq(phys_enc,
+ SDE_IRQ_TYPE_INTF_UNDER_RUN,
+ INTR_IDX_UNDERRUN,
+ sde_encoder_phys_cmd_underrun_irq,
+ "underrun");
+ if (ret) {
+ sde_encoder_phys_cmd_control_vblank_irq(phys_enc, false);
+ sde_encoder_phys_cmd_unregister_irq(phys_enc,
+ INTR_IDX_PINGPONG);
+ return;
+ }
+
+update_flush:
+ ctl = phys_enc->hw_ctl;
+ ctl->ops.get_bitmask_intf(ctl, &flush_mask, cmd_enc->intf_idx);
+ ctl->ops.update_pending_flush(ctl, flush_mask);
+ phys_enc->enable_state = SDE_ENC_ENABLED;
+
+ SDE_DEBUG_CMDENC(cmd_enc, "update pending flush ctl %d flush_mask %x\n",
+ ctl->idx - CTL_0, flush_mask);
+}
+
+static void sde_encoder_phys_cmd_disable(struct sde_encoder_phys *phys_enc)
+{
+ struct sde_encoder_phys_cmd *cmd_enc =
+ to_sde_encoder_phys_cmd(phys_enc);
+ int ret;
+
+ if (!phys_enc) {
+ SDE_ERROR("invalid encoder\n");
+ return;
+ }
+ SDE_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
+
+ if (phys_enc->enable_state == SDE_ENC_DISABLED) {
+ SDE_ERROR_CMDENC(cmd_enc, "already disabled\n");
+ return;
+ }
+
+ SDE_EVT32(DRMID(phys_enc->parent), phys_enc->hw_pp->idx - PINGPONG_0);
+
+ if (!_sde_encoder_phys_is_ppsplit_slave(phys_enc)) {
+ ret = _sde_encoder_phys_cmd_wait_for_idle(phys_enc);
+ if (ret) {
+ atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+ SDE_ERROR_CMDENC(cmd_enc,
+ "pp %d failed wait for idle, %d\n",
+ phys_enc->hw_pp->idx - PINGPONG_0, ret);
+ SDE_EVT32(DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0, ret);
+ }
+
+ sde_encoder_phys_cmd_unregister_irq(
+ phys_enc, INTR_IDX_UNDERRUN);
+ sde_encoder_phys_cmd_control_vblank_irq(phys_enc, false);
+ sde_encoder_phys_cmd_unregister_irq(
+ phys_enc, INTR_IDX_PINGPONG);
+ }
+
+ phys_enc->enable_state = SDE_ENC_DISABLED;
+
+ if (atomic_read(&phys_enc->vblank_refcount))
+ SDE_ERROR("enc:%d role:%d invalid vblank refcount %d\n",
+ phys_enc->parent->base.id,
+ phys_enc->split_role,
+ atomic_read(&phys_enc->vblank_refcount));
+}
+
+static void sde_encoder_phys_cmd_destroy(struct sde_encoder_phys *phys_enc)
+{
+ struct sde_encoder_phys_cmd *cmd_enc =
+ to_sde_encoder_phys_cmd(phys_enc);
+
+ if (!phys_enc) {
+ SDE_ERROR("invalid encoder\n");
+ return;
+ }
+ kfree(cmd_enc);
+}
+
+static void sde_encoder_phys_cmd_get_hw_resources(
+ struct sde_encoder_phys *phys_enc,
+ struct sde_encoder_hw_resources *hw_res,
+ struct drm_connector_state *conn_state)
+{
+ struct sde_encoder_phys_cmd *cmd_enc =
+ to_sde_encoder_phys_cmd(phys_enc);
+
+ if (!phys_enc) {
+ SDE_ERROR("invalid encoder\n");
+ return;
+ }
+ SDE_DEBUG_CMDENC(cmd_enc, "\n");
+ hw_res->intfs[cmd_enc->intf_idx - INTF_0] = INTF_MODE_CMD;
+}
+
+static int sde_encoder_phys_cmd_wait_for_commit_done(
+ struct sde_encoder_phys *phys_enc)
+{
+ /*
+ * Since ctl_start "commits" the transaction to hardware, and the
+ * tearcheck block takes it from there, there is no need to have a
+ * separate wait for committed, a la wait-for-vsync in video mode
+ */
+
+ return 0;
+}
+
+static void sde_encoder_phys_cmd_prepare_for_kickoff(
+ struct sde_encoder_phys *phys_enc)
+{
+ struct sde_encoder_phys_cmd *cmd_enc =
+ to_sde_encoder_phys_cmd(phys_enc);
+ int ret;
+
+ if (!phys_enc) {
+ SDE_ERROR("invalid encoder\n");
+ return;
+ }
+ SDE_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
+ SDE_EVT32(DRMID(phys_enc->parent), phys_enc->hw_pp->idx - PINGPONG_0);
+
+ /*
+ * Mark kickoff request as outstanding. If there are more than one,
+ * outstanding, then we have to wait for the previous one to complete
+ */
+ ret = _sde_encoder_phys_cmd_wait_for_idle(phys_enc);
+ if (ret) {
+ /* force pending_kickoff_cnt 0 to discard failed kickoff */
+ atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+ SDE_EVT32(DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0);
+ SDE_ERROR("failed wait_for_idle: %d\n", ret);
+ }
+}
+
+static void sde_encoder_phys_cmd_init_ops(
+ struct sde_encoder_phys_ops *ops)
+{
+ ops->is_master = sde_encoder_phys_cmd_is_master;
+ ops->mode_set = sde_encoder_phys_cmd_mode_set;
+ ops->mode_fixup = sde_encoder_phys_cmd_mode_fixup;
+ ops->enable = sde_encoder_phys_cmd_enable;
+ ops->disable = sde_encoder_phys_cmd_disable;
+ ops->destroy = sde_encoder_phys_cmd_destroy;
+ ops->get_hw_resources = sde_encoder_phys_cmd_get_hw_resources;
+ ops->control_vblank_irq = sde_encoder_phys_cmd_control_vblank_irq;
+ ops->wait_for_commit_done = sde_encoder_phys_cmd_wait_for_commit_done;
+ ops->prepare_for_kickoff = sde_encoder_phys_cmd_prepare_for_kickoff;
+ ops->trigger_start = sde_encoder_helper_trigger_start;
+ ops->needs_single_flush = sde_encoder_phys_cmd_needs_single_flush;
+}
+
+struct sde_encoder_phys *sde_encoder_phys_cmd_init(
+ struct sde_enc_phys_init_params *p)
+{
+ struct sde_encoder_phys *phys_enc = NULL;
+ struct sde_encoder_phys_cmd *cmd_enc = NULL;
+ struct sde_hw_mdp *hw_mdp;
+ int i, ret = 0;
+
+ SDE_DEBUG("intf %d\n", p->intf_idx - INTF_0);
+
+ cmd_enc = kzalloc(sizeof(*cmd_enc), GFP_KERNEL);
+ if (!cmd_enc) {
+ ret = -ENOMEM;
+ SDE_ERROR("failed to allocate\n");
+ goto fail;
+ }
+ phys_enc = &cmd_enc->base;
+
+ hw_mdp = sde_rm_get_mdp(&p->sde_kms->rm);
+ if (IS_ERR_OR_NULL(hw_mdp)) {
+ ret = PTR_ERR(hw_mdp);
+ SDE_ERROR("failed to get mdptop\n");
+ goto fail_mdp_init;
+ }
+ phys_enc->hw_mdptop = hw_mdp;
+
+ cmd_enc->intf_idx = p->intf_idx;
+ phys_enc->intf_idx = p->intf_idx;
+
+ sde_encoder_phys_cmd_init_ops(&phys_enc->ops);
+ phys_enc->parent = p->parent;
+ phys_enc->parent_ops = p->parent_ops;
+ phys_enc->sde_kms = p->sde_kms;
+ phys_enc->split_role = p->split_role;
+ phys_enc->intf_mode = INTF_MODE_CMD;
+ phys_enc->enc_spinlock = p->enc_spinlock;
+ cmd_enc->stream_sel = 0;
+ phys_enc->enable_state = SDE_ENC_DISABLED;
+ for (i = 0; i < INTR_IDX_MAX; i++)
+ INIT_LIST_HEAD(&cmd_enc->irq_cb[i].list);
+ atomic_set(&phys_enc->vblank_refcount, 0);
+ atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+ init_waitqueue_head(&phys_enc->pending_kickoff_wq);
+
+ SDE_DEBUG_CMDENC(cmd_enc, "created\n");
+
+ return phys_enc;
+
+fail_mdp_init:
+ kfree(cmd_enc);
+fail:
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
new file mode 100644
index 000000000000..e61ff97d2ca4
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
@@ -0,0 +1,872 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+#include "sde_encoder_phys.h"
+#include "sde_hw_interrupts.h"
+#include "sde_core_irq.h"
+#include "sde_formats.h"
+
+#define SDE_DEBUG_VIDENC(e, fmt, ...) SDE_DEBUG("enc%d intf%d " fmt, \
+ (e) && (e)->base.parent ? \
+ (e)->base.parent->base.id : -1, \
+ (e) && (e)->hw_intf ? \
+ (e)->hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
+
+#define SDE_ERROR_VIDENC(e, fmt, ...) SDE_ERROR("enc%d intf%d " fmt, \
+ (e) && (e)->base.parent ? \
+ (e)->base.parent->base.id : -1, \
+ (e) && (e)->hw_intf ? \
+ (e)->hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
+
+#define to_sde_encoder_phys_vid(x) \
+ container_of(x, struct sde_encoder_phys_vid, base)
+
+static bool sde_encoder_phys_vid_is_master(
+ struct sde_encoder_phys *phys_enc)
+{
+ bool ret = false;
+
+ if (phys_enc->split_role != ENC_ROLE_SLAVE)
+ ret = true;
+
+ return ret;
+}
+
+static void drm_mode_to_intf_timing_params(
+ const struct sde_encoder_phys_vid *vid_enc,
+ const struct drm_display_mode *mode,
+ struct intf_timing_params *timing)
+{
+ memset(timing, 0, sizeof(*timing));
+ /*
+ * https://www.kernel.org/doc/htmldocs/drm/ch02s05.html
+ * Active Region Front Porch Sync Back Porch
+ * <-----------------><------------><-----><----------->
+ * <- [hv]display --->
+ * <--------- [hv]sync_start ------>
+ * <----------------- [hv]sync_end ------->
+ * <---------------------------- [hv]total ------------->
+ */
+ timing->width = mode->hdisplay; /* active width */
+ timing->height = mode->vdisplay; /* active height */
+ timing->xres = timing->width;
+ timing->yres = timing->height;
+ timing->h_back_porch = mode->htotal - mode->hsync_end;
+ timing->h_front_porch = mode->hsync_start - mode->hdisplay;
+ timing->v_back_porch = mode->vtotal - mode->vsync_end;
+ timing->v_front_porch = mode->vsync_start - mode->vdisplay;
+ timing->hsync_pulse_width = mode->hsync_end - mode->hsync_start;
+ timing->vsync_pulse_width = mode->vsync_end - mode->vsync_start;
+ timing->hsync_polarity = (mode->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0;
+ timing->vsync_polarity = (mode->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0;
+ timing->border_clr = 0;
+ timing->underflow_clr = 0xff;
+ timing->hsync_skew = mode->hskew;
+
+ /* DSI controller cannot handle active-low sync signals. */
+ if (vid_enc->hw_intf->cap->type == INTF_DSI) {
+ timing->hsync_polarity = 0;
+ timing->vsync_polarity = 0;
+ }
+
+ /*
+ * For edp only:
+ * DISPLAY_V_START = (VBP * HCYCLE) + HBP
+ * DISPLAY_V_END = (VBP + VACTIVE) * HCYCLE - 1 - HFP
+ */
+ /*
+ * if (vid_enc->hw->cap->type == INTF_EDP) {
+ * display_v_start += mode->htotal - mode->hsync_start;
+ * display_v_end -= mode->hsync_start - mode->hdisplay;
+ * }
+ */
+}
+
+static inline u32 get_horizontal_total(const struct intf_timing_params *timing)
+{
+ u32 active = timing->xres;
+ u32 inactive =
+ timing->h_back_porch + timing->h_front_porch +
+ timing->hsync_pulse_width;
+ return active + inactive;
+}
+
+static inline u32 get_vertical_total(const struct intf_timing_params *timing)
+{
+ u32 active = timing->yres;
+ u32 inactive =
+ timing->v_back_porch + timing->v_front_porch +
+ timing->vsync_pulse_width;
+ return active + inactive;
+}
+
+/*
+ * programmable_fetch_get_num_lines:
+ * Number of fetch lines in vertical front porch
+ * @timing: Pointer to the intf timing information for the requested mode
+ *
+ * Returns the number of fetch lines in vertical front porch at which mdp
+ * can start fetching the next frame.
+ *
+ * Number of needed prefetch lines is anything that cannot be absorbed in the
+ * start of frame time (back porch + vsync pulse width).
+ *
+ * Some panels have very large VFP, however we only need a total number of
+ * lines based on the chip worst case latencies.
+ */
+static u32 programmable_fetch_get_num_lines(
+ struct sde_encoder_phys_vid *vid_enc,
+ const struct intf_timing_params *timing)
+{
+ u32 worst_case_needed_lines =
+ vid_enc->hw_intf->cap->prog_fetch_lines_worst_case;
+ u32 start_of_frame_lines =
+ timing->v_back_porch + timing->vsync_pulse_width;
+ u32 needed_vfp_lines = worst_case_needed_lines - start_of_frame_lines;
+ u32 actual_vfp_lines = 0;
+
+ /* Fetch must be outside active lines, otherwise undefined. */
+ if (start_of_frame_lines >= worst_case_needed_lines) {
+ SDE_DEBUG_VIDENC(vid_enc,
+ "prog fetch is not needed, large vbp+vsw\n");
+ actual_vfp_lines = 0;
+ } else if (timing->v_front_porch < needed_vfp_lines) {
+ /* Warn fetch needed, but not enough porch in panel config */
+ pr_warn_once
+ ("low vbp+vfp may lead to perf issues in some cases\n");
+ SDE_DEBUG_VIDENC(vid_enc,
+ "less vfp than fetch req, using entire vfp\n");
+ actual_vfp_lines = timing->v_front_porch;
+ } else {
+ SDE_DEBUG_VIDENC(vid_enc, "room in vfp for needed prefetch\n");
+ actual_vfp_lines = needed_vfp_lines;
+ }
+
+ SDE_DEBUG_VIDENC(vid_enc,
+ "v_front_porch %u v_back_porch %u vsync_pulse_width %u\n",
+ timing->v_front_porch, timing->v_back_porch,
+ timing->vsync_pulse_width);
+ SDE_DEBUG_VIDENC(vid_enc,
+ "wc_lines %u needed_vfp_lines %u actual_vfp_lines %u\n",
+ worst_case_needed_lines, needed_vfp_lines, actual_vfp_lines);
+
+ return actual_vfp_lines;
+}
+
+/*
+ * programmable_fetch_config: Programs HW to prefetch lines by offsetting
+ * the start of fetch into the vertical front porch for cases where the
+ * vsync pulse width and vertical back porch time is insufficient
+ *
+ * Gets # of lines to pre-fetch, then calculate VSYNC counter value.
+ * HW layer requires VSYNC counter of first pixel of tgt VFP line.
+ *
+ * @timing: Pointer to the intf timing information for the requested mode
+ */
+static void programmable_fetch_config(struct sde_encoder_phys *phys_enc,
+ const struct intf_timing_params *timing)
+{
+ struct sde_encoder_phys_vid *vid_enc =
+ to_sde_encoder_phys_vid(phys_enc);
+ struct intf_prog_fetch f = { 0 };
+ u32 vfp_fetch_lines = 0;
+ u32 horiz_total = 0;
+ u32 vert_total = 0;
+ u32 vfp_fetch_start_vsync_counter = 0;
+ unsigned long lock_flags;
+
+ if (WARN_ON_ONCE(!vid_enc->hw_intf->ops.setup_prg_fetch))
+ return;
+
+ vfp_fetch_lines = programmable_fetch_get_num_lines(vid_enc, timing);
+ if (vfp_fetch_lines) {
+ vert_total = get_vertical_total(timing);
+ horiz_total = get_horizontal_total(timing);
+ vfp_fetch_start_vsync_counter =
+ (vert_total - vfp_fetch_lines) * horiz_total + 1;
+ f.enable = 1;
+ f.fetch_start = vfp_fetch_start_vsync_counter;
+ }
+
+ SDE_DEBUG_VIDENC(vid_enc,
+ "vfp_fetch_lines %u vfp_fetch_start_vsync_counter %u\n",
+ vfp_fetch_lines, vfp_fetch_start_vsync_counter);
+
+ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+ vid_enc->hw_intf->ops.setup_prg_fetch(vid_enc->hw_intf, &f);
+ spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+}
+
+static bool sde_encoder_phys_vid_mode_fixup(
+ struct sde_encoder_phys *phys_enc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ if (phys_enc)
+ SDE_DEBUG_VIDENC(to_sde_encoder_phys_vid(phys_enc), "\n");
+
+ /*
+ * Modifying mode has consequences when the mode comes back to us
+ */
+ return true;
+}
+
+static void sde_encoder_phys_vid_setup_timing_engine(
+ struct sde_encoder_phys *phys_enc)
+{
+ struct sde_encoder_phys_vid *vid_enc;
+ struct drm_display_mode mode;
+ struct intf_timing_params timing_params = { 0 };
+ const struct sde_format *fmt = NULL;
+ u32 fmt_fourcc = DRM_FORMAT_RGB888;
+ unsigned long lock_flags;
+ struct sde_hw_intf_cfg intf_cfg = { 0 };
+
+ if (!phys_enc || !phys_enc->hw_ctl ||
+ !phys_enc->hw_ctl->ops.setup_intf_cfg) {
+ SDE_ERROR("invalid encoder %d\n", phys_enc != 0);
+ return;
+ }
+
+ mode = phys_enc->cached_mode;
+ vid_enc = to_sde_encoder_phys_vid(phys_enc);
+ if (!vid_enc->hw_intf->ops.setup_timing_gen) {
+ SDE_ERROR("timing engine setup is not supported\n");
+ return;
+ }
+
+ SDE_DEBUG_VIDENC(vid_enc, "enabling mode:\n");
+ drm_mode_debug_printmodeline(&mode);
+
+ if (phys_enc->split_role != ENC_ROLE_SOLO) {
+ mode.hdisplay >>= 1;
+ mode.htotal >>= 1;
+ mode.hsync_start >>= 1;
+ mode.hsync_end >>= 1;
+
+ SDE_DEBUG_VIDENC(vid_enc,
+ "split_role %d, halve horizontal %d %d %d %d\n",
+ phys_enc->split_role,
+ mode.hdisplay, mode.htotal,
+ mode.hsync_start, mode.hsync_end);
+ }
+
+ drm_mode_to_intf_timing_params(vid_enc, &mode, &timing_params);
+
+ fmt = sde_get_sde_format(fmt_fourcc);
+ SDE_DEBUG_VIDENC(vid_enc, "fmt_fourcc 0x%X\n", fmt_fourcc);
+
+ intf_cfg.intf = vid_enc->hw_intf->idx;
+ intf_cfg.intf_mode_sel = SDE_CTL_MODE_SEL_VID;
+ intf_cfg.stream_sel = 0; /* Don't care value for video mode */
+ intf_cfg.mode_3d = sde_encoder_helper_get_3d_blend_mode(phys_enc);
+
+ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+ vid_enc->hw_intf->ops.setup_timing_gen(vid_enc->hw_intf,
+ &timing_params, fmt);
+ phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg);
+ spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+ programmable_fetch_config(phys_enc, &timing_params);
+}
+
+static void sde_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
+{
+ struct sde_encoder_phys_vid *vid_enc = arg;
+ struct sde_encoder_phys *phys_enc;
+ unsigned long lock_flags;
+ int new_cnt;
+
+ if (!vid_enc)
+ return;
+
+ phys_enc = &vid_enc->base;
+ if (phys_enc->parent_ops.handle_vblank_virt)
+ phys_enc->parent_ops.handle_vblank_virt(phys_enc->parent,
+ phys_enc);
+
+ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+ new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
+ SDE_EVT32_IRQ(DRMID(phys_enc->parent), vid_enc->hw_intf->idx - INTF_0,
+ new_cnt);
+ spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+ /* Signal any waiting atomic commit thread */
+ wake_up_all(&phys_enc->pending_kickoff_wq);
+}
+
+static void sde_encoder_phys_vid_underrun_irq(void *arg, int irq_idx)
+{
+ struct sde_encoder_phys_vid *vid_enc = arg;
+ struct sde_encoder_phys *phys_enc;
+
+ if (!vid_enc)
+ return;
+
+ phys_enc = &vid_enc->base;
+ if (phys_enc->parent_ops.handle_underrun_virt)
+ phys_enc->parent_ops.handle_underrun_virt(phys_enc->parent,
+ phys_enc);
+}
+
+static bool sde_encoder_phys_vid_needs_single_flush(
+ struct sde_encoder_phys *phys_enc)
+{
+ return phys_enc && phys_enc->split_role != ENC_ROLE_SOLO;
+}
+
+static int sde_encoder_phys_vid_register_irq(struct sde_encoder_phys *phys_enc,
+ enum sde_intr_type intr_type, int idx,
+ void (*irq_func)(void *, int), const char *irq_name)
+{
+ struct sde_encoder_phys_vid *vid_enc;
+ int ret = 0;
+
+ if (!phys_enc) {
+ SDE_ERROR("invalid encoder\n");
+ return -EINVAL;
+ }
+
+ vid_enc = to_sde_encoder_phys_vid(phys_enc);
+ vid_enc->irq_idx[idx] = sde_core_irq_idx_lookup(phys_enc->sde_kms,
+ intr_type, vid_enc->hw_intf->idx);
+ if (vid_enc->irq_idx[idx] < 0) {
+ SDE_ERROR_VIDENC(vid_enc,
+ "failed to lookup IRQ index for %s type:%d\n", irq_name,
+ intr_type);
+ return -EINVAL;
+ }
+
+ vid_enc->irq_cb[idx].func = irq_func;
+ vid_enc->irq_cb[idx].arg = vid_enc;
+ ret = sde_core_irq_register_callback(phys_enc->sde_kms,
+ vid_enc->irq_idx[idx], &vid_enc->irq_cb[idx]);
+ if (ret) {
+ SDE_ERROR_VIDENC(vid_enc,
+ "failed to register IRQ callback for %s\n", irq_name);
+ return ret;
+ }
+
+ ret = sde_core_irq_enable(phys_enc->sde_kms, &vid_enc->irq_idx[idx], 1);
+ if (ret) {
+ SDE_ERROR_VIDENC(vid_enc,
+ "enable IRQ for intr:%s failed, irq_idx %d\n",
+ irq_name, vid_enc->irq_idx[idx]);
+ vid_enc->irq_idx[idx] = -EINVAL;
+
+ /* unregister callback on IRQ enable failure */
+ sde_core_irq_unregister_callback(phys_enc->sde_kms,
+ vid_enc->irq_idx[idx], &vid_enc->irq_cb[idx]);
+ return ret;
+ }
+
+ SDE_DEBUG_VIDENC(vid_enc, "registered irq %s idx: %d\n",
+ irq_name, vid_enc->irq_idx[idx]);
+
+ return ret;
+}
+
+static int sde_encoder_phys_vid_unregister_irq(
+ struct sde_encoder_phys *phys_enc, int idx)
+{
+ struct sde_encoder_phys_vid *vid_enc;
+
+ if (!phys_enc) {
+ SDE_ERROR("invalid encoder\n");
+ goto end;
+ }
+
+ vid_enc = to_sde_encoder_phys_vid(phys_enc);
+ sde_core_irq_disable(phys_enc->sde_kms, &vid_enc->irq_idx[idx], 1);
+
+ sde_core_irq_unregister_callback(phys_enc->sde_kms,
+ vid_enc->irq_idx[idx], &vid_enc->irq_cb[idx]);
+
+ SDE_DEBUG_VIDENC(vid_enc, "unregistered %d\n", vid_enc->irq_idx[idx]);
+
+end:
+ return 0;
+}
+
+static void sde_encoder_phys_vid_mode_set(
+ struct sde_encoder_phys *phys_enc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ struct sde_rm *rm;
+ struct sde_rm_hw_iter iter;
+ int i, instance;
+ struct sde_encoder_phys_vid *vid_enc;
+
+ if (!phys_enc || !phys_enc->sde_kms) {
+ SDE_ERROR("invalid encoder\n");
+ return;
+ }
+
+ rm = &phys_enc->sde_kms->rm;
+ vid_enc = to_sde_encoder_phys_vid(phys_enc);
+ phys_enc->cached_mode = *adj_mode;
+ SDE_DEBUG_VIDENC(vid_enc, "caching mode:\n");
+ drm_mode_debug_printmodeline(adj_mode);
+
+ instance = phys_enc->split_role == ENC_ROLE_SLAVE ? 1 : 0;
+
+ /* Retrieve previously allocated HW Resources. Shouldn't fail */
+ sde_rm_init_hw_iter(&iter, phys_enc->parent->base.id, SDE_HW_BLK_CTL);
+ for (i = 0; i <= instance; i++) {
+ if (sde_rm_get_hw(rm, &iter))
+ phys_enc->hw_ctl = (struct sde_hw_ctl *)iter.hw;
+ }
+ if (IS_ERR_OR_NULL(phys_enc->hw_ctl)) {
+ SDE_ERROR_VIDENC(vid_enc, "failed to init ctl, %ld\n",
+ PTR_ERR(phys_enc->hw_ctl));
+ phys_enc->hw_ctl = NULL;
+ return;
+ }
+}
+
+static int sde_encoder_phys_vid_control_vblank_irq(
+ struct sde_encoder_phys *phys_enc,
+ bool enable)
+{
+ int ret = 0;
+ struct sde_encoder_phys_vid *vid_enc;
+
+ if (!phys_enc) {
+ SDE_ERROR("invalid encoder\n");
+ return -EINVAL;
+ }
+
+ vid_enc = to_sde_encoder_phys_vid(phys_enc);
+
+ /* Slave encoders don't report vblank */
+ if (!sde_encoder_phys_vid_is_master(phys_enc))
+ return 0;
+
+ SDE_DEBUG_VIDENC(vid_enc, "[%pS] enable=%d/%d\n",
+ __builtin_return_address(0),
+ enable, atomic_read(&phys_enc->vblank_refcount));
+
+ SDE_EVT32(DRMID(phys_enc->parent), enable,
+ atomic_read(&phys_enc->vblank_refcount));
+
+ if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1)
+ ret = sde_encoder_phys_vid_register_irq(phys_enc,
+ SDE_IRQ_TYPE_INTF_VSYNC,
+ INTR_IDX_VSYNC,
+ sde_encoder_phys_vid_vblank_irq, "vsync_irq");
+ else if (!enable && atomic_dec_return(&phys_enc->vblank_refcount) == 0)
+ ret = sde_encoder_phys_vid_unregister_irq(phys_enc,
+ INTR_IDX_VSYNC);
+
+ if (ret)
+ SDE_ERROR_VIDENC(vid_enc,
+ "control vblank irq error %d, enable %d\n",
+ ret, enable);
+
+ return ret;
+}
+
+static void sde_encoder_phys_vid_enable(struct sde_encoder_phys *phys_enc)
+{
+ struct msm_drm_private *priv;
+ struct sde_encoder_phys_vid *vid_enc;
+ struct sde_hw_intf *intf;
+ struct sde_hw_ctl *ctl;
+ u32 flush_mask = 0;
+ int ret;
+
+ if (!phys_enc || !phys_enc->parent || !phys_enc->parent->dev ||
+ !phys_enc->parent->dev->dev_private) {
+ SDE_ERROR("invalid encoder/device\n");
+ return;
+ }
+ priv = phys_enc->parent->dev->dev_private;
+
+ vid_enc = to_sde_encoder_phys_vid(phys_enc);
+ intf = vid_enc->hw_intf;
+ ctl = phys_enc->hw_ctl;
+ if (!vid_enc->hw_intf || !phys_enc->hw_ctl) {
+ SDE_ERROR("invalid hw_intf %d hw_ctl %d\n",
+ vid_enc->hw_intf != 0, phys_enc->hw_ctl != 0);
+ return;
+ }
+
+ SDE_DEBUG_VIDENC(vid_enc, "\n");
+
+ if (WARN_ON(!vid_enc->hw_intf->ops.enable_timing))
+ return;
+
+ sde_power_data_bus_bandwidth_ctrl(&priv->phandle,
+ phys_enc->sde_kms->core_client, true);
+
+ sde_encoder_helper_split_config(phys_enc, vid_enc->hw_intf->idx);
+
+ sde_encoder_phys_vid_setup_timing_engine(phys_enc);
+ ret = sde_encoder_phys_vid_control_vblank_irq(phys_enc, true);
+ if (ret)
+ goto end;
+
+ ret = sde_encoder_phys_vid_register_irq(phys_enc,
+ SDE_IRQ_TYPE_INTF_UNDER_RUN,
+ INTR_IDX_UNDERRUN,
+ sde_encoder_phys_vid_underrun_irq, "underrun");
+ if (ret) {
+ sde_encoder_phys_vid_control_vblank_irq(phys_enc, false);
+ goto end;
+ }
+
+ ctl->ops.get_bitmask_intf(ctl, &flush_mask, intf->idx);
+ ctl->ops.update_pending_flush(ctl, flush_mask);
+
+ SDE_DEBUG_VIDENC(vid_enc, "update pending flush ctl %d flush_mask %x\n",
+ ctl->idx - CTL_0, flush_mask);
+
+ /* ctl_flush & timing engine enable will be triggered by framework */
+ if (phys_enc->enable_state == SDE_ENC_DISABLED)
+ phys_enc->enable_state = SDE_ENC_ENABLING;
+
+end:
+ return;
+}
+
+static void sde_encoder_phys_vid_destroy(struct sde_encoder_phys *phys_enc)
+{
+ struct sde_encoder_phys_vid *vid_enc;
+
+ if (!phys_enc) {
+ SDE_ERROR("invalid encoder\n");
+ return;
+ }
+
+ vid_enc = to_sde_encoder_phys_vid(phys_enc);
+ SDE_DEBUG_VIDENC(vid_enc, "\n");
+ kfree(vid_enc);
+}
+
+static void sde_encoder_phys_vid_get_hw_resources(
+ struct sde_encoder_phys *phys_enc,
+ struct sde_encoder_hw_resources *hw_res,
+ struct drm_connector_state *conn_state)
+{
+ struct sde_encoder_phys_vid *vid_enc;
+
+ if (!phys_enc || !hw_res) {
+ SDE_ERROR("invalid arg(s), enc %d hw_res %d conn_state %d\n",
+ phys_enc != 0, hw_res != 0, conn_state != 0);
+ return;
+ }
+
+ vid_enc = to_sde_encoder_phys_vid(phys_enc);
+ if (!vid_enc->hw_intf) {
+ SDE_ERROR("invalid arg(s), hw_intf\n");
+ return;
+ }
+
+ SDE_DEBUG_VIDENC(vid_enc, "\n");
+ hw_res->intfs[vid_enc->hw_intf->idx - INTF_0] = INTF_MODE_VIDEO;
+}
+
+static int sde_encoder_phys_vid_wait_for_vblank(
+ struct sde_encoder_phys *phys_enc, bool notify)
+{
+ struct sde_encoder_phys_vid *vid_enc =
+ to_sde_encoder_phys_vid(phys_enc);
+ u32 irq_status;
+ int ret;
+
+ if (!sde_encoder_phys_vid_is_master(phys_enc)) {
+ /* always signal done for slave video encoder */
+ if (notify && phys_enc->parent_ops.handle_frame_done)
+ phys_enc->parent_ops.handle_frame_done(
+ phys_enc->parent, phys_enc,
+ SDE_ENCODER_FRAME_EVENT_DONE);
+ return 0;
+ }
+
+ if (phys_enc->enable_state != SDE_ENC_ENABLED) {
+ SDE_ERROR("encoder not enabled\n");
+ return -EWOULDBLOCK;
+ }
+
+ SDE_EVT32(DRMID(phys_enc->parent), vid_enc->hw_intf->idx - INTF_0,
+ SDE_EVTLOG_FUNC_ENTRY);
+
+ /* Wait for kickoff to complete */
+ ret = sde_encoder_helper_wait_event_timeout(
+ DRMID(phys_enc->parent),
+ vid_enc->hw_intf->idx - INTF_0,
+ &phys_enc->pending_kickoff_wq,
+ &phys_enc->pending_kickoff_cnt,
+ KICKOFF_TIMEOUT_MS);
+ if (ret <= 0) {
+ irq_status = sde_core_irq_read(phys_enc->sde_kms,
+ INTR_IDX_VSYNC, true);
+ if (irq_status) {
+ SDE_EVT32(DRMID(phys_enc->parent),
+ vid_enc->hw_intf->idx - INTF_0);
+ SDE_DEBUG_VIDENC(vid_enc, "done, irq not triggered\n");
+ if (notify && phys_enc->parent_ops.handle_frame_done)
+ phys_enc->parent_ops.handle_frame_done(
+ phys_enc->parent, phys_enc,
+ SDE_ENCODER_FRAME_EVENT_DONE);
+ sde_encoder_phys_vid_vblank_irq(vid_enc,
+ INTR_IDX_VSYNC);
+ ret = 0;
+ } else {
+ SDE_EVT32(DRMID(phys_enc->parent),
+ vid_enc->hw_intf->idx - INTF_0);
+ SDE_ERROR_VIDENC(vid_enc, "kickoff timed out\n");
+ if (notify && phys_enc->parent_ops.handle_frame_done)
+ phys_enc->parent_ops.handle_frame_done(
+ phys_enc->parent, phys_enc,
+ SDE_ENCODER_FRAME_EVENT_ERROR);
+ ret = -ETIMEDOUT;
+ }
+ } else {
+ if (notify && phys_enc->parent_ops.handle_frame_done)
+ phys_enc->parent_ops.handle_frame_done(
+ phys_enc->parent, phys_enc,
+ SDE_ENCODER_FRAME_EVENT_DONE);
+ ret = 0;
+ }
+
+ return 0;
+}
+
+static int sde_encoder_phys_vid_wait_for_commit_done(
+ struct sde_encoder_phys *phys_enc)
+{
+ int ret;
+
+ ret = sde_encoder_phys_vid_wait_for_vblank(phys_enc, true);
+
+ return ret;
+}
+
+static void sde_encoder_phys_vid_disable(struct sde_encoder_phys *phys_enc)
+{
+ struct msm_drm_private *priv;
+ struct sde_encoder_phys_vid *vid_enc;
+ unsigned long lock_flags;
+ int ret;
+
+ if (!phys_enc || !phys_enc->parent || !phys_enc->parent->dev ||
+ !phys_enc->parent->dev->dev_private) {
+ SDE_ERROR("invalid encoder/device\n");
+ return;
+ }
+ priv = phys_enc->parent->dev->dev_private;
+
+ vid_enc = to_sde_encoder_phys_vid(phys_enc);
+ if (!vid_enc->hw_intf || !phys_enc->hw_ctl) {
+ SDE_ERROR("invalid hw_intf %d hw_ctl %d\n",
+ vid_enc->hw_intf != 0, phys_enc->hw_ctl != 0);
+ return;
+ }
+
+ SDE_DEBUG_VIDENC(vid_enc, "\n");
+
+ if (WARN_ON(!vid_enc->hw_intf->ops.enable_timing))
+ return;
+
+ if (phys_enc->enable_state == SDE_ENC_DISABLED) {
+ SDE_ERROR("already disabled\n");
+ return;
+ }
+
+ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+ vid_enc->hw_intf->ops.enable_timing(vid_enc->hw_intf, 0);
+ if (sde_encoder_phys_vid_is_master(phys_enc))
+ sde_encoder_phys_inc_pending(phys_enc);
+ spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+ /*
+ * Wait for a vsync so we know the ENABLE=0 latched before
+ * the (connector) source of the vsync's gets disabled,
+ * otherwise we end up in a funny state if we re-enable
+ * before the disable latches, which results that some of
+ * the settings changes for the new modeset (like new
+ * scanout buffer) don't latch properly..
+ */
+ if (sde_encoder_phys_vid_is_master(phys_enc)) {
+ ret = sde_encoder_phys_vid_wait_for_vblank(phys_enc, false);
+ if (ret) {
+ atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+ SDE_ERROR_VIDENC(vid_enc,
+ "failure waiting for disable: %d\n",
+ ret);
+ SDE_EVT32(DRMID(phys_enc->parent),
+ vid_enc->hw_intf->idx - INTF_0, ret);
+ }
+ sde_encoder_phys_vid_control_vblank_irq(phys_enc, false);
+ }
+
+ sde_power_data_bus_bandwidth_ctrl(&priv->phandle,
+ phys_enc->sde_kms->core_client, false);
+
+ if (atomic_read(&phys_enc->vblank_refcount))
+ SDE_ERROR_VIDENC(vid_enc, "invalid vblank refcount %d\n",
+ atomic_read(&phys_enc->vblank_refcount));
+
+ phys_enc->enable_state = SDE_ENC_DISABLED;
+}
+
+static void sde_encoder_phys_vid_handle_post_kickoff(
+ struct sde_encoder_phys *phys_enc)
+{
+ unsigned long lock_flags;
+ struct sde_encoder_phys_vid *vid_enc;
+
+ if (!phys_enc) {
+ SDE_ERROR("invalid encoder\n");
+ return;
+ }
+
+ vid_enc = to_sde_encoder_phys_vid(phys_enc);
+ SDE_DEBUG_VIDENC(vid_enc, "enable_state %d\n", phys_enc->enable_state);
+
+ /*
+ * Video mode must flush CTL before enabling timing engine
+ * Video encoders need to turn on their interfaces now
+ */
+ if (phys_enc->enable_state == SDE_ENC_ENABLING) {
+ SDE_EVT32(DRMID(phys_enc->parent),
+ vid_enc->hw_intf->idx - INTF_0);
+ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+ vid_enc->hw_intf->ops.enable_timing(vid_enc->hw_intf, 1);
+ spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+ phys_enc->enable_state = SDE_ENC_ENABLED;
+ }
+}
+
+static void sde_encoder_phys_vid_setup_misr(struct sde_encoder_phys *phys_enc,
+ struct sde_misr_params *misr_map)
+{
+ struct sde_encoder_phys_vid *vid_enc =
+ to_sde_encoder_phys_vid(phys_enc);
+
+ if (vid_enc && vid_enc->hw_intf && vid_enc->hw_intf->ops.setup_misr)
+ vid_enc->hw_intf->ops.setup_misr(vid_enc->hw_intf, misr_map);
+}
+
+static void sde_encoder_phys_vid_collect_misr(struct sde_encoder_phys *phys_enc,
+ struct sde_misr_params *misr_map)
+{
+ struct sde_encoder_phys_vid *vid_enc =
+ to_sde_encoder_phys_vid(phys_enc);
+
+ if (vid_enc && vid_enc->hw_intf && vid_enc->hw_intf->ops.collect_misr)
+ vid_enc->hw_intf->ops.collect_misr(vid_enc->hw_intf, misr_map);
+}
+
+static void sde_encoder_phys_vid_init_ops(struct sde_encoder_phys_ops *ops)
+{
+ ops->is_master = sde_encoder_phys_vid_is_master;
+ ops->mode_set = sde_encoder_phys_vid_mode_set;
+ ops->mode_fixup = sde_encoder_phys_vid_mode_fixup;
+ ops->enable = sde_encoder_phys_vid_enable;
+ ops->disable = sde_encoder_phys_vid_disable;
+ ops->destroy = sde_encoder_phys_vid_destroy;
+ ops->get_hw_resources = sde_encoder_phys_vid_get_hw_resources;
+ ops->control_vblank_irq = sde_encoder_phys_vid_control_vblank_irq;
+ ops->wait_for_commit_done = sde_encoder_phys_vid_wait_for_commit_done;
+ ops->handle_post_kickoff = sde_encoder_phys_vid_handle_post_kickoff;
+ ops->needs_single_flush = sde_encoder_phys_vid_needs_single_flush;
+ ops->setup_misr = sde_encoder_phys_vid_setup_misr;
+ ops->collect_misr = sde_encoder_phys_vid_collect_misr;
+}
+
+struct sde_encoder_phys *sde_encoder_phys_vid_init(
+ struct sde_enc_phys_init_params *p)
+{
+ struct sde_encoder_phys *phys_enc = NULL;
+ struct sde_encoder_phys_vid *vid_enc = NULL;
+ struct sde_rm_hw_iter iter;
+ struct sde_hw_mdp *hw_mdp;
+ int i, ret = 0;
+
+ if (!p) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ vid_enc = kzalloc(sizeof(*vid_enc), GFP_KERNEL);
+ if (!vid_enc) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ phys_enc = &vid_enc->base;
+
+ hw_mdp = sde_rm_get_mdp(&p->sde_kms->rm);
+ if (IS_ERR_OR_NULL(hw_mdp)) {
+ ret = PTR_ERR(hw_mdp);
+ SDE_ERROR("failed to get mdptop\n");
+ goto fail;
+ }
+ phys_enc->hw_mdptop = hw_mdp;
+ phys_enc->intf_idx = p->intf_idx;
+
+ /**
+ * hw_intf resource permanently assigned to this encoder
+ * Other resources allocated at atomic commit time by use case
+ */
+ sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_INTF);
+ while (sde_rm_get_hw(&p->sde_kms->rm, &iter)) {
+ struct sde_hw_intf *hw_intf = (struct sde_hw_intf *)iter.hw;
+
+ if (hw_intf->idx == p->intf_idx) {
+ vid_enc->hw_intf = hw_intf;
+ break;
+ }
+ }
+
+ if (!vid_enc->hw_intf) {
+ ret = -EINVAL;
+ SDE_ERROR("failed to get hw_intf\n");
+ goto fail;
+ }
+
+ phys_enc->misr_map = kzalloc(sizeof(struct sde_misr_params),
+ GFP_KERNEL);
+ if (!phys_enc->misr_map)
+ SDE_ERROR("sde misr map allocation failed\n");
+
+ SDE_DEBUG_VIDENC(vid_enc, "\n");
+
+ sde_encoder_phys_vid_init_ops(&phys_enc->ops);
+ phys_enc->parent = p->parent;
+ phys_enc->parent_ops = p->parent_ops;
+ phys_enc->sde_kms = p->sde_kms;
+ phys_enc->split_role = p->split_role;
+ phys_enc->intf_mode = INTF_MODE_VIDEO;
+ phys_enc->enc_spinlock = p->enc_spinlock;
+ for (i = 0; i < INTR_IDX_MAX; i++)
+ INIT_LIST_HEAD(&vid_enc->irq_cb[i].list);
+ atomic_set(&phys_enc->vblank_refcount, 0);
+ atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+ init_waitqueue_head(&phys_enc->pending_kickoff_wq);
+ phys_enc->enable_state = SDE_ENC_DISABLED;
+
+ SDE_DEBUG_VIDENC(vid_enc, "created intf idx:%d\n", p->intf_idx);
+
+ return phys_enc;
+
+fail:
+ SDE_ERROR("failed to create encoder\n");
+ if (vid_enc)
+ sde_encoder_phys_vid_destroy(phys_enc);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
new file mode 100644
index 000000000000..9943e3906df0
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
@@ -0,0 +1,1096 @@
+/*
+ * Copyright (c) 2015-2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+#include <linux/debugfs.h>
+
+#include "sde_encoder_phys.h"
+#include "sde_formats.h"
+#include "sde_hw_top.h"
+#include "sde_hw_interrupts.h"
+#include "sde_core_irq.h"
+#include "sde_wb.h"
+#include "sde_vbif.h"
+
+#define to_sde_encoder_phys_wb(x) \
+ container_of(x, struct sde_encoder_phys_wb, base)
+
+#define WBID(wb_enc) ((wb_enc) ? wb_enc->wb_dev->wb_idx : -1)
+
+/**
+ * sde_encoder_phys_wb_is_master - report wb always as master encoder
+ */
+static bool sde_encoder_phys_wb_is_master(struct sde_encoder_phys *phys_enc)
+{
+ return true;
+}
+
+/**
+ * sde_encoder_phys_wb_get_intr_type - get interrupt type based on block mode
+ * @hw_wb: Pointer to h/w writeback driver
+ */
+static enum sde_intr_type sde_encoder_phys_wb_get_intr_type(
+ struct sde_hw_wb *hw_wb)
+{
+ return (hw_wb->caps->features & BIT(SDE_WB_BLOCK_MODE)) ?
+ SDE_IRQ_TYPE_WB_ROT_COMP : SDE_IRQ_TYPE_WB_WFD_COMP;
+}
+
+/**
+ * sde_encoder_phys_wb_set_ot_limit - set OT limit for writeback interface
+ * @phys_enc: Pointer to physical encoder
+ */
+static void sde_encoder_phys_wb_set_ot_limit(
+ struct sde_encoder_phys *phys_enc)
+{
+ struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
+ struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
+ struct sde_vbif_set_ot_params ot_params;
+
+ memset(&ot_params, 0, sizeof(ot_params));
+ ot_params.xin_id = hw_wb->caps->xin_id;
+ ot_params.num = hw_wb->idx - WB_0;
+ ot_params.width = wb_enc->wb_roi.w;
+ ot_params.height = wb_enc->wb_roi.h;
+ ot_params.is_wfd = true;
+ ot_params.frame_rate = phys_enc->cached_mode.vrefresh;
+ ot_params.vbif_idx = hw_wb->caps->vbif_idx;
+ ot_params.clk_ctrl = hw_wb->caps->clk_ctrl;
+ ot_params.rd = false;
+
+ sde_vbif_set_ot_limit(phys_enc->sde_kms, &ot_params);
+}
+
+/**
+ * sde_encoder_phys_wb_set_traffic_shaper - set traffic shaper for writeback
+ * @phys_enc: Pointer to physical encoder
+ */
+static void sde_encoder_phys_wb_set_traffic_shaper(
+ struct sde_encoder_phys *phys_enc)
+{
+ struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
+ struct sde_hw_wb_cfg *wb_cfg = &wb_enc->wb_cfg;
+
+ /* traffic shaper is only enabled for rotator */
+ wb_cfg->ts_cfg.en = false;
+}
+
+/**
+ * sde_encoder_phys_setup_cdm - setup chroma down block
+ * @phys_enc: Pointer to physical encoder
+ * @fb: Pointer to output framebuffer
+ * @format: Output format
+ */
+void sde_encoder_phys_setup_cdm(struct sde_encoder_phys *phys_enc,
+ struct drm_framebuffer *fb, const struct sde_format *format,
+ struct sde_rect *wb_roi)
+{
+ struct sde_hw_cdm *hw_cdm = phys_enc->hw_cdm;
+ struct sde_hw_cdm_cfg *cdm_cfg = &phys_enc->cdm_cfg;
+ int ret;
+
+ if (!SDE_FORMAT_IS_YUV(format)) {
+ SDE_DEBUG("[cdm_disable fmt:%x]\n",
+ format->base.pixel_format);
+
+ if (hw_cdm && hw_cdm->ops.disable)
+ hw_cdm->ops.disable(hw_cdm);
+
+ return;
+ }
+
+ memset(cdm_cfg, 0, sizeof(struct sde_hw_cdm_cfg));
+
+ cdm_cfg->output_width = wb_roi->w;
+ cdm_cfg->output_height = wb_roi->h;
+ cdm_cfg->output_fmt = format;
+ cdm_cfg->output_type = CDM_CDWN_OUTPUT_WB;
+ cdm_cfg->output_bit_depth = SDE_FORMAT_IS_DX(format) ?
+ CDM_CDWN_OUTPUT_10BIT : CDM_CDWN_OUTPUT_8BIT;
+
+ /* enable 10 bit logic */
+ switch (cdm_cfg->output_fmt->chroma_sample) {
+ case SDE_CHROMA_RGB:
+ cdm_cfg->h_cdwn_type = CDM_CDWN_DISABLE;
+ cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE;
+ break;
+ case SDE_CHROMA_H2V1:
+ cdm_cfg->h_cdwn_type = CDM_CDWN_COSITE;
+ cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE;
+ break;
+ case SDE_CHROMA_420:
+ cdm_cfg->h_cdwn_type = CDM_CDWN_COSITE;
+ cdm_cfg->v_cdwn_type = CDM_CDWN_OFFSITE;
+ break;
+ case SDE_CHROMA_H1V2:
+ default:
+ SDE_ERROR("unsupported chroma sampling type\n");
+ cdm_cfg->h_cdwn_type = CDM_CDWN_DISABLE;
+ cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE;
+ break;
+ }
+
+ SDE_DEBUG("[cdm_enable:%d,%d,%X,%d,%d,%d,%d]\n",
+ cdm_cfg->output_width,
+ cdm_cfg->output_height,
+ cdm_cfg->output_fmt->base.pixel_format,
+ cdm_cfg->output_type,
+ cdm_cfg->output_bit_depth,
+ cdm_cfg->h_cdwn_type,
+ cdm_cfg->v_cdwn_type);
+
+ if (hw_cdm && hw_cdm->ops.setup_cdwn) {
+ ret = hw_cdm->ops.setup_cdwn(hw_cdm, cdm_cfg);
+ if (ret < 0) {
+ SDE_ERROR("failed to setup CDM %d\n", ret);
+ return;
+ }
+ }
+
+ if (hw_cdm && hw_cdm->ops.enable) {
+ ret = hw_cdm->ops.enable(hw_cdm, cdm_cfg);
+ if (ret < 0) {
+ SDE_ERROR("failed to enable CDM %d\n", ret);
+ return;
+ }
+ }
+}
+
+/**
+ * sde_encoder_phys_wb_setup_fb - setup output framebuffer
+ * @phys_enc: Pointer to physical encoder
+ * @fb: Pointer to output framebuffer
+ * @wb_roi: Pointer to output region of interest
+ */
+static void sde_encoder_phys_wb_setup_fb(struct sde_encoder_phys *phys_enc,
+ struct drm_framebuffer *fb, struct sde_rect *wb_roi)
+{
+ struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
+ struct sde_hw_wb *hw_wb;
+ struct sde_hw_wb_cfg *wb_cfg;
+ const struct msm_format *format;
+ int ret, mmu_id;
+
+ if (!phys_enc) {
+ SDE_ERROR("invalid encoder\n");
+ return;
+ }
+
+ hw_wb = wb_enc->hw_wb;
+ wb_cfg = &wb_enc->wb_cfg;
+ memset(wb_cfg, 0, sizeof(struct sde_hw_wb_cfg));
+
+ wb_cfg->intf_mode = phys_enc->intf_mode;
+ wb_cfg->is_secure = (fb->flags & DRM_MODE_FB_SECURE) ? true : false;
+ mmu_id = (wb_cfg->is_secure) ?
+ wb_enc->mmu_id[SDE_IOMMU_DOMAIN_SECURE] :
+ wb_enc->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE];
+
+ SDE_DEBUG("[fb_secure:%d]\n", wb_cfg->is_secure);
+
+ format = msm_framebuffer_format(fb);
+ if (!format) {
+ SDE_DEBUG("invalid format for fb\n");
+ return;
+ }
+
+ wb_cfg->dest.format = sde_get_sde_format_ext(
+ format->pixel_format,
+ fb->modifier,
+ drm_format_num_planes(fb->pixel_format));
+ if (!wb_cfg->dest.format) {
+ /* this error should be detected during atomic_check */
+ SDE_ERROR("failed to get format %x\n", format->pixel_format);
+ return;
+ }
+ wb_cfg->roi = *wb_roi;
+
+ if (hw_wb->caps->features & BIT(SDE_WB_XY_ROI_OFFSET)) {
+ ret = sde_format_populate_layout(mmu_id, fb, &wb_cfg->dest);
+ if (ret) {
+ SDE_DEBUG("failed to populate layout %d\n", ret);
+ return;
+ }
+ wb_cfg->dest.width = fb->width;
+ wb_cfg->dest.height = fb->height;
+ wb_cfg->dest.num_planes = wb_cfg->dest.format->num_planes;
+ } else {
+ ret = sde_format_populate_layout_with_roi(mmu_id, fb, wb_roi,
+ &wb_cfg->dest);
+ if (ret) {
+ /* this error should be detected during atomic_check */
+ SDE_DEBUG("failed to populate layout %d\n", ret);
+ return;
+ }
+ }
+
+ if ((wb_cfg->dest.format->fetch_planes == SDE_PLANE_PLANAR) &&
+ (wb_cfg->dest.format->element[0] == C1_B_Cb))
+ swap(wb_cfg->dest.plane_addr[1], wb_cfg->dest.plane_addr[2]);
+
+ SDE_DEBUG("[fb_offset:%8.8x,%8.8x,%8.8x,%8.8x]\n",
+ wb_cfg->dest.plane_addr[0],
+ wb_cfg->dest.plane_addr[1],
+ wb_cfg->dest.plane_addr[2],
+ wb_cfg->dest.plane_addr[3]);
+ SDE_DEBUG("[fb_stride:%8.8x,%8.8x,%8.8x,%8.8x]\n",
+ wb_cfg->dest.plane_pitch[0],
+ wb_cfg->dest.plane_pitch[1],
+ wb_cfg->dest.plane_pitch[2],
+ wb_cfg->dest.plane_pitch[3]);
+
+ if (hw_wb->ops.setup_roi)
+ hw_wb->ops.setup_roi(hw_wb, wb_cfg);
+
+ if (hw_wb->ops.setup_outformat)
+ hw_wb->ops.setup_outformat(hw_wb, wb_cfg);
+
+ if (hw_wb->ops.setup_outaddress)
+ hw_wb->ops.setup_outaddress(hw_wb, wb_cfg);
+}
+
+/**
+ * sde_encoder_phys_wb_setup_cdp - setup chroma down prefetch block
+ * @phys_enc: Pointer to physical encoder
+ */
+static void sde_encoder_phys_wb_setup_cdp(struct sde_encoder_phys *phys_enc)
+{
+ struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
+ struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
+ struct sde_hw_intf_cfg *intf_cfg = &wb_enc->intf_cfg;
+
+ memset(intf_cfg, 0, sizeof(struct sde_hw_intf_cfg));
+
+ intf_cfg->intf = SDE_NONE;
+ intf_cfg->wb = hw_wb->idx;
+ intf_cfg->mode_3d = sde_encoder_helper_get_3d_blend_mode(phys_enc);
+
+ if (phys_enc->hw_ctl && phys_enc->hw_ctl->ops.setup_intf_cfg)
+ phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl,
+ intf_cfg);
+}
+
+/**
+ * sde_encoder_phys_wb_atomic_check - verify and fixup given atomic states
+ * @phys_enc: Pointer to physical encoder
+ * @crtc_state: Pointer to CRTC atomic state
+ * @conn_state: Pointer to connector atomic state
+ */
+static int sde_encoder_phys_wb_atomic_check(
+ struct sde_encoder_phys *phys_enc,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
+ struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
+ const struct sde_wb_cfg *wb_cfg = hw_wb->caps;
+ struct drm_framebuffer *fb;
+ const struct sde_format *fmt;
+ struct sde_rect wb_roi;
+ const struct drm_display_mode *mode = &crtc_state->mode;
+ int rc;
+
+ SDE_DEBUG("[atomic_check:%d,%d,\"%s\",%d,%d]\n",
+ hw_wb->idx - WB_0, mode->base.id, mode->name,
+ mode->hdisplay, mode->vdisplay);
+
+ if (!conn_state || !conn_state->connector) {
+ SDE_ERROR("invalid connector state\n");
+ return -EINVAL;
+ } else if (conn_state->connector->status !=
+ connector_status_connected) {
+ SDE_ERROR("connector not connected %d\n",
+ conn_state->connector->status);
+ return -EINVAL;
+ }
+
+ memset(&wb_roi, 0, sizeof(struct sde_rect));
+
+ rc = sde_wb_connector_state_get_output_roi(conn_state, &wb_roi);
+ if (rc) {
+ SDE_ERROR("failed to get roi %d\n", rc);
+ return rc;
+ }
+
+ SDE_DEBUG("[roi:%u,%u,%u,%u]\n", wb_roi.x, wb_roi.y,
+ wb_roi.w, wb_roi.h);
+
+ fb = sde_wb_connector_state_get_output_fb(conn_state);
+ if (!fb) {
+ SDE_ERROR("no output framebuffer\n");
+ return -EINVAL;
+ }
+
+ SDE_DEBUG("[fb_id:%u][fb:%u,%u]\n", fb->base.id,
+ fb->width, fb->height);
+
+ fmt = sde_get_sde_format_ext(fb->pixel_format, fb->modifier,
+ drm_format_num_planes(fb->pixel_format));
+ if (!fmt) {
+ SDE_ERROR("unsupported output pixel format:%x\n",
+ fb->pixel_format);
+ return -EINVAL;
+ }
+
+ SDE_DEBUG("[fb_fmt:%x,%llx]\n", fb->pixel_format,
+ fb->modifier[0]);
+
+ if (SDE_FORMAT_IS_YUV(fmt) &&
+ !(wb_cfg->features & BIT(SDE_WB_YUV_CONFIG))) {
+ SDE_ERROR("invalid output format %x\n", fmt->base.pixel_format);
+ return -EINVAL;
+ }
+
+ if (SDE_FORMAT_IS_UBWC(fmt) &&
+ !(wb_cfg->features & BIT(SDE_WB_UBWC_1_0))) {
+ SDE_ERROR("invalid output format %x\n", fmt->base.pixel_format);
+ return -EINVAL;
+ }
+
+ if (SDE_FORMAT_IS_YUV(fmt) != !!phys_enc->hw_cdm)
+ crtc_state->mode_changed = true;
+
+ if (wb_roi.w && wb_roi.h) {
+ if (wb_roi.w != mode->hdisplay) {
+ SDE_ERROR("invalid roi w=%d, mode w=%d\n", wb_roi.w,
+ mode->hdisplay);
+ return -EINVAL;
+ } else if (wb_roi.h != mode->vdisplay) {
+ SDE_ERROR("invalid roi h=%d, mode h=%d\n", wb_roi.h,
+ mode->vdisplay);
+ return -EINVAL;
+ } else if (wb_roi.x + wb_roi.w > fb->width) {
+ SDE_ERROR("invalid roi x=%d, w=%d, fb w=%d\n",
+ wb_roi.x, wb_roi.w, fb->width);
+ return -EINVAL;
+ } else if (wb_roi.y + wb_roi.h > fb->height) {
+ SDE_ERROR("invalid roi y=%d, h=%d, fb h=%d\n",
+ wb_roi.y, wb_roi.h, fb->height);
+ return -EINVAL;
+ } else if (wb_roi.w > wb_cfg->sblk->maxlinewidth) {
+ SDE_ERROR("invalid roi w=%d, maxlinewidth=%u\n",
+ wb_roi.w, wb_cfg->sblk->maxlinewidth);
+ return -EINVAL;
+ }
+ } else {
+ if (wb_roi.x || wb_roi.y) {
+ SDE_ERROR("invalid roi x=%d, y=%d\n",
+ wb_roi.x, wb_roi.y);
+ return -EINVAL;
+ } else if (fb->width != mode->hdisplay) {
+ SDE_ERROR("invalid fb w=%d, mode w=%d\n", fb->width,
+ mode->hdisplay);
+ return -EINVAL;
+ } else if (fb->height != mode->vdisplay) {
+ SDE_ERROR("invalid fb h=%d, mode h=%d\n", fb->height,
+ mode->vdisplay);
+ return -EINVAL;
+ } else if (fb->width > wb_cfg->sblk->maxlinewidth) {
+ SDE_ERROR("invalid fb w=%d, maxlinewidth=%u\n",
+ fb->width, wb_cfg->sblk->maxlinewidth);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * sde_encoder_phys_wb_flush - flush hardware update
+ * @phys_enc: Pointer to physical encoder
+ */
+static void sde_encoder_phys_wb_flush(struct sde_encoder_phys *phys_enc)
+{
+ struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
+ struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
+ struct sde_hw_ctl *hw_ctl = phys_enc->hw_ctl;
+ struct sde_hw_cdm *hw_cdm = phys_enc->hw_cdm;
+ u32 flush_mask = 0;
+
+ SDE_DEBUG("[wb:%d]\n", hw_wb->idx - WB_0);
+
+ if (!hw_ctl) {
+ SDE_DEBUG("[wb:%d] no ctl assigned\n", hw_wb->idx - WB_0);
+ return;
+ }
+
+ if (hw_ctl->ops.get_bitmask_wb)
+ hw_ctl->ops.get_bitmask_wb(hw_ctl, &flush_mask, hw_wb->idx);
+
+ if (hw_ctl->ops.get_bitmask_cdm && hw_cdm)
+ hw_ctl->ops.get_bitmask_cdm(hw_ctl, &flush_mask, hw_cdm->idx);
+
+ if (hw_ctl->ops.update_pending_flush)
+ hw_ctl->ops.update_pending_flush(hw_ctl, flush_mask);
+
+ SDE_DEBUG("Flushing CTL_ID %d, flush_mask %x, WB %d\n",
+ hw_ctl->idx - CTL_0, flush_mask, hw_wb->idx - WB_0);
+}
+
+/**
+ * sde_encoder_phys_wb_setup - setup writeback encoder
+ * @phys_enc: Pointer to physical encoder
+ */
+static void sde_encoder_phys_wb_setup(
+ struct sde_encoder_phys *phys_enc)
+{
+ struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
+ struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
+ struct drm_display_mode mode = phys_enc->cached_mode;
+ struct drm_framebuffer *fb;
+ struct sde_rect *wb_roi = &wb_enc->wb_roi;
+
+ SDE_DEBUG("[mode_set:%d,%d,\"%s\",%d,%d]\n",
+ hw_wb->idx - WB_0, mode.base.id, mode.name,
+ mode.hdisplay, mode.vdisplay);
+
+ memset(wb_roi, 0, sizeof(struct sde_rect));
+
+ fb = sde_wb_get_output_fb(wb_enc->wb_dev);
+ if (!fb) {
+ SDE_DEBUG("no output framebuffer\n");
+ return;
+ }
+
+ SDE_DEBUG("[fb_id:%u][fb:%u,%u]\n", fb->base.id,
+ fb->width, fb->height);
+
+ sde_wb_get_output_roi(wb_enc->wb_dev, wb_roi);
+ if (wb_roi->w == 0 || wb_roi->h == 0) {
+ wb_roi->x = 0;
+ wb_roi->y = 0;
+ wb_roi->w = fb->width;
+ wb_roi->h = fb->height;
+ }
+
+ SDE_DEBUG("[roi:%u,%u,%u,%u]\n", wb_roi->x, wb_roi->y,
+ wb_roi->w, wb_roi->h);
+
+ wb_enc->wb_fmt = sde_get_sde_format_ext(fb->pixel_format, fb->modifier,
+ drm_format_num_planes(fb->pixel_format));
+ if (!wb_enc->wb_fmt) {
+ SDE_ERROR("unsupported output pixel format: %d\n",
+ fb->pixel_format);
+ return;
+ }
+
+ SDE_DEBUG("[fb_fmt:%x,%llx]\n", fb->pixel_format,
+ fb->modifier[0]);
+
+ sde_encoder_phys_wb_set_ot_limit(phys_enc);
+
+ sde_encoder_phys_wb_set_traffic_shaper(phys_enc);
+
+ sde_encoder_phys_setup_cdm(phys_enc, fb, wb_enc->wb_fmt, wb_roi);
+
+ sde_encoder_phys_wb_setup_fb(phys_enc, fb, wb_roi);
+
+ sde_encoder_phys_wb_setup_cdp(phys_enc);
+}
+
+/**
+ * sde_encoder_phys_wb_unregister_irq - unregister writeback interrupt handler
+ * @phys_enc: Pointer to physical encoder
+ */
+static int sde_encoder_phys_wb_unregister_irq(
+ struct sde_encoder_phys *phys_enc)
+{
+ struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
+ struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
+
+ if (wb_enc->bypass_irqreg)
+ return 0;
+
+ sde_core_irq_disable(phys_enc->sde_kms, &wb_enc->irq_idx, 1);
+ sde_core_irq_unregister_callback(phys_enc->sde_kms, wb_enc->irq_idx,
+ &wb_enc->irq_cb);
+
+ SDE_DEBUG("un-register IRQ for wb %d, irq_idx=%d\n",
+ hw_wb->idx - WB_0,
+ wb_enc->irq_idx);
+
+ return 0;
+}
+
+/**
+ * sde_encoder_phys_wb_done_irq - writeback interrupt handler
+ * @arg: Pointer to writeback encoder
+ * @irq_idx: interrupt index
+ */
+static void sde_encoder_phys_wb_done_irq(void *arg, int irq_idx)
+{
+ struct sde_encoder_phys_wb *wb_enc = arg;
+ struct sde_encoder_phys *phys_enc = &wb_enc->base;
+ struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
+
+ SDE_DEBUG("[wb:%d,%u]\n", hw_wb->idx - WB_0,
+ wb_enc->frame_count);
+
+ if (phys_enc->parent_ops.handle_frame_done)
+ phys_enc->parent_ops.handle_frame_done(phys_enc->parent,
+ phys_enc, SDE_ENCODER_FRAME_EVENT_DONE);
+
+ phys_enc->parent_ops.handle_vblank_virt(phys_enc->parent,
+ phys_enc);
+
+ complete_all(&wb_enc->wbdone_complete);
+}
+
+/**
+ * sde_encoder_phys_wb_register_irq - register writeback interrupt handler
+ * @phys_enc: Pointer to physical encoder
+ */
+static int sde_encoder_phys_wb_register_irq(struct sde_encoder_phys *phys_enc)
+{
+ struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
+ struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
+ struct sde_irq_callback *irq_cb = &wb_enc->irq_cb;
+ enum sde_intr_type intr_type;
+ int ret = 0;
+
+ if (wb_enc->bypass_irqreg)
+ return 0;
+
+ intr_type = sde_encoder_phys_wb_get_intr_type(hw_wb);
+ wb_enc->irq_idx = sde_core_irq_idx_lookup(phys_enc->sde_kms,
+ intr_type, hw_wb->idx);
+ if (wb_enc->irq_idx < 0) {
+ SDE_ERROR(
+ "failed to lookup IRQ index for WB_DONE with wb=%d\n",
+ hw_wb->idx - WB_0);
+ return -EINVAL;
+ }
+
+ irq_cb->func = sde_encoder_phys_wb_done_irq;
+ irq_cb->arg = wb_enc;
+ ret = sde_core_irq_register_callback(phys_enc->sde_kms,
+ wb_enc->irq_idx, irq_cb);
+ if (ret) {
+ SDE_ERROR("failed to register IRQ callback WB_DONE\n");
+ return ret;
+ }
+
+ ret = sde_core_irq_enable(phys_enc->sde_kms, &wb_enc->irq_idx, 1);
+ if (ret) {
+ SDE_ERROR(
+ "failed to enable IRQ for WB_DONE, wb %d, irq_idx=%d\n",
+ hw_wb->idx - WB_0,
+ wb_enc->irq_idx);
+ wb_enc->irq_idx = -EINVAL;
+
+ /* Unregister callback on IRQ enable failure */
+ sde_core_irq_unregister_callback(phys_enc->sde_kms,
+ wb_enc->irq_idx, irq_cb);
+ return ret;
+ }
+
+ SDE_DEBUG("registered IRQ for wb %d, irq_idx=%d\n",
+ hw_wb->idx - WB_0,
+ wb_enc->irq_idx);
+
+ return ret;
+}
+
+/**
+ * sde_encoder_phys_wb_mode_set - set display mode
+ * @phys_enc: Pointer to physical encoder
+ * @mode: Pointer to requested display mode
+ * @adj_mode: Pointer to adjusted display mode
+ */
+static void sde_encoder_phys_wb_mode_set(
+ struct sde_encoder_phys *phys_enc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
+ struct sde_rm *rm = &phys_enc->sde_kms->rm;
+ struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
+ struct sde_rm_hw_iter iter;
+ int i, instance;
+
+ phys_enc->cached_mode = *adj_mode;
+ instance = phys_enc->split_role == ENC_ROLE_SLAVE ? 1 : 0;
+
+ SDE_DEBUG("[mode_set_cache:%d,%d,\"%s\",%d,%d]\n",
+ hw_wb->idx - WB_0, mode->base.id,
+ mode->name, mode->hdisplay, mode->vdisplay);
+
+ phys_enc->hw_ctl = NULL;
+ phys_enc->hw_cdm = NULL;
+
+ /* Retrieve previously allocated HW Resources. CTL shouldn't fail */
+ sde_rm_init_hw_iter(&iter, phys_enc->parent->base.id, SDE_HW_BLK_CTL);
+ for (i = 0; i <= instance; i++) {
+ sde_rm_get_hw(rm, &iter);
+ if (i == instance)
+ phys_enc->hw_ctl = (struct sde_hw_ctl *) iter.hw;
+ }
+
+ if (IS_ERR_OR_NULL(phys_enc->hw_ctl)) {
+ SDE_ERROR("failed init ctl: %ld\n", PTR_ERR(phys_enc->hw_ctl));
+ phys_enc->hw_ctl = NULL;
+ return;
+ }
+
+ /* CDM is optional */
+ sde_rm_init_hw_iter(&iter, phys_enc->parent->base.id, SDE_HW_BLK_CDM);
+ for (i = 0; i <= instance; i++) {
+ sde_rm_get_hw(rm, &iter);
+ if (i == instance)
+ phys_enc->hw_cdm = (struct sde_hw_cdm *) iter.hw;
+ }
+
+ if (IS_ERR(phys_enc->hw_cdm)) {
+ SDE_ERROR("CDM required but not allocated: %ld\n",
+ PTR_ERR(phys_enc->hw_cdm));
+ phys_enc->hw_ctl = NULL;
+ }
+}
+
+/**
+ * sde_encoder_phys_wb_wait_for_commit_done - wait until request is committed
+ * @phys_enc: Pointer to physical encoder
+ */
+static int sde_encoder_phys_wb_wait_for_commit_done(
+ struct sde_encoder_phys *phys_enc)
+{
+ unsigned long ret;
+ struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
+ u32 irq_status;
+ u64 wb_time = 0;
+ int rc = 0;
+
+ /* Return EWOULDBLOCK since we know the wait isn't necessary */
+ if (WARN_ON(phys_enc->enable_state != SDE_ENC_ENABLED))
+ return -EWOULDBLOCK;
+
+ SDE_EVT32(DRMID(phys_enc->parent), WBID(wb_enc), wb_enc->frame_count);
+
+ ret = wait_for_completion_timeout(&wb_enc->wbdone_complete,
+ KICKOFF_TIMEOUT_JIFFIES);
+
+ if (!ret) {
+ SDE_EVT32(DRMID(phys_enc->parent), WBID(wb_enc),
+ wb_enc->frame_count);
+
+ irq_status = sde_core_irq_read(phys_enc->sde_kms,
+ wb_enc->irq_idx, true);
+ if (irq_status) {
+ SDE_DEBUG("wb:%d done but irq not triggered\n",
+ wb_enc->wb_dev->wb_idx - WB_0);
+ sde_encoder_phys_wb_done_irq(wb_enc, wb_enc->irq_idx);
+ } else {
+ SDE_ERROR("wb:%d kickoff timed out\n",
+ wb_enc->wb_dev->wb_idx - WB_0);
+ if (phys_enc->parent_ops.handle_frame_done)
+ phys_enc->parent_ops.handle_frame_done(
+ phys_enc->parent, phys_enc,
+ SDE_ENCODER_FRAME_EVENT_ERROR);
+ rc = -ETIMEDOUT;
+ }
+ }
+
+ sde_encoder_phys_wb_unregister_irq(phys_enc);
+
+ if (!rc)
+ wb_enc->end_time = ktime_get();
+
+ /* once operation is done, disable traffic shaper */
+ if (wb_enc->wb_cfg.ts_cfg.en && wb_enc->hw_wb &&
+ wb_enc->hw_wb->ops.setup_trafficshaper) {
+ wb_enc->wb_cfg.ts_cfg.en = false;
+ wb_enc->hw_wb->ops.setup_trafficshaper(
+ wb_enc->hw_wb, &wb_enc->wb_cfg);
+ }
+
+ /* remove vote for iommu/clk/bus */
+ wb_enc->frame_count++;
+
+ if (!rc) {
+ wb_time = (u64)ktime_to_us(wb_enc->end_time) -
+ (u64)ktime_to_us(wb_enc->start_time);
+ SDE_DEBUG("wb:%d took %llu us\n",
+ wb_enc->wb_dev->wb_idx - WB_0, wb_time);
+ }
+
+ SDE_EVT32(DRMID(phys_enc->parent), WBID(wb_enc), wb_enc->frame_count,
+ wb_time);
+
+ return rc;
+}
+
+/**
+ * sde_encoder_phys_wb_prepare_for_kickoff - pre-kickoff processing
+ * @phys_enc: Pointer to physical encoder
+ */
+static void sde_encoder_phys_wb_prepare_for_kickoff(
+ struct sde_encoder_phys *phys_enc)
+{
+ struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
+ int ret;
+
+ SDE_DEBUG("[wb:%d,%u]\n", wb_enc->hw_wb->idx - WB_0,
+ wb_enc->kickoff_count);
+
+ reinit_completion(&wb_enc->wbdone_complete);
+
+ ret = sde_encoder_phys_wb_register_irq(phys_enc);
+ if (ret) {
+ SDE_ERROR("failed to register irq %d\n", ret);
+ return;
+ }
+
+ wb_enc->kickoff_count++;
+
+ /* set OT limit & enable traffic shaper */
+ sde_encoder_phys_wb_setup(phys_enc);
+
+ sde_encoder_phys_wb_flush(phys_enc);
+
+ /* vote for iommu/clk/bus */
+ wb_enc->start_time = ktime_get();
+
+ SDE_EVT32(DRMID(phys_enc->parent), WBID(wb_enc), wb_enc->kickoff_count);
+}
+
+/**
+ * sde_encoder_phys_wb_handle_post_kickoff - post-kickoff processing
+ * @phys_enc: Pointer to physical encoder
+ */
+static void sde_encoder_phys_wb_handle_post_kickoff(
+ struct sde_encoder_phys *phys_enc)
+{
+ struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
+
+ SDE_DEBUG("[wb:%d]\n", wb_enc->hw_wb->idx - WB_0);
+
+ SDE_EVT32(DRMID(phys_enc->parent), WBID(wb_enc));
+}
+
+/**
+ * sde_encoder_phys_wb_enable - enable writeback encoder
+ * @phys_enc: Pointer to physical encoder
+ */
+static void sde_encoder_phys_wb_enable(struct sde_encoder_phys *phys_enc)
+{
+ struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
+ struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
+ struct drm_device *dev;
+ struct drm_connector *connector;
+
+ SDE_DEBUG("[wb:%d]\n", hw_wb->idx - WB_0);
+
+ if (!wb_enc->base.parent || !wb_enc->base.parent->dev) {
+ SDE_ERROR("invalid drm device\n");
+ return;
+ }
+ dev = wb_enc->base.parent->dev;
+
+ /* find associated writeback connector */
+ mutex_lock(&dev->mode_config.mutex);
+ drm_for_each_connector(connector, phys_enc->parent->dev) {
+ if (connector->encoder == phys_enc->parent)
+ break;
+ }
+ mutex_unlock(&dev->mode_config.mutex);
+
+ if (!connector || connector->encoder != phys_enc->parent) {
+ SDE_ERROR("failed to find writeback connector\n");
+ return;
+ }
+ wb_enc->wb_dev = sde_wb_connector_get_wb(connector);
+
+ phys_enc->enable_state = SDE_ENC_ENABLED;
+}
+
+/**
+ * sde_encoder_phys_wb_disable - disable writeback encoder
+ * @phys_enc: Pointer to physical encoder
+ */
+static void sde_encoder_phys_wb_disable(struct sde_encoder_phys *phys_enc)
+{
+ struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
+ struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
+
+ SDE_DEBUG("[wb:%d]\n", hw_wb->idx - WB_0);
+
+ if (phys_enc->enable_state == SDE_ENC_DISABLED) {
+ SDE_ERROR("encoder is already disabled\n");
+ return;
+ }
+
+ if (wb_enc->frame_count != wb_enc->kickoff_count) {
+ SDE_DEBUG("[wait_for_done: wb:%d, frame:%u, kickoff:%u]\n",
+ hw_wb->idx - WB_0, wb_enc->frame_count,
+ wb_enc->kickoff_count);
+ sde_encoder_phys_wb_wait_for_commit_done(phys_enc);
+ }
+
+ if (phys_enc->hw_cdm && phys_enc->hw_cdm->ops.disable) {
+ SDE_DEBUG_DRIVER("[cdm_disable]\n");
+ phys_enc->hw_cdm->ops.disable(phys_enc->hw_cdm);
+ }
+
+ phys_enc->enable_state = SDE_ENC_DISABLED;
+}
+
+/**
+ * sde_encoder_phys_wb_get_hw_resources - get hardware resources
+ * @phys_enc: Pointer to physical encoder
+ * @hw_res: Pointer to encoder resources
+ */
+static void sde_encoder_phys_wb_get_hw_resources(
+ struct sde_encoder_phys *phys_enc,
+ struct sde_encoder_hw_resources *hw_res,
+ struct drm_connector_state *conn_state)
+{
+ struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
+ struct sde_hw_wb *hw_wb;
+ struct drm_framebuffer *fb;
+ const struct sde_format *fmt;
+
+ if (!phys_enc) {
+ SDE_ERROR("invalid encoder\n");
+ return;
+ }
+
+ fb = sde_wb_connector_state_get_output_fb(conn_state);
+ if (!fb) {
+ SDE_ERROR("no output framebuffer\n");
+ return;
+ }
+
+ fmt = sde_get_sde_format_ext(fb->pixel_format, fb->modifier,
+ drm_format_num_planes(fb->pixel_format));
+ if (!fmt) {
+ SDE_ERROR("unsupported output pixel format:%d\n",
+ fb->pixel_format);
+ return;
+ }
+
+ hw_wb = wb_enc->hw_wb;
+ hw_res->wbs[hw_wb->idx - WB_0] = phys_enc->intf_mode;
+ hw_res->needs_cdm = SDE_FORMAT_IS_YUV(fmt);
+ SDE_DEBUG("[wb:%d] intf_mode=%d needs_cdm=%d\n", hw_wb->idx - WB_0,
+ hw_res->wbs[hw_wb->idx - WB_0],
+ hw_res->needs_cdm);
+}
+
+#ifdef CONFIG_DEBUG_FS
+/**
+ * sde_encoder_phys_wb_init_debugfs - initialize writeback encoder debugfs
+ * @phys_enc: Pointer to physical encoder
+ * @sde_kms: Pointer to SDE KMS object
+ */
+static int sde_encoder_phys_wb_init_debugfs(
+ struct sde_encoder_phys *phys_enc, struct sde_kms *kms)
+{
+ struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
+
+ if (!phys_enc || !kms || !wb_enc->hw_wb)
+ return -EINVAL;
+
+ snprintf(wb_enc->wb_name, ARRAY_SIZE(wb_enc->wb_name), "encoder_wb%d",
+ wb_enc->hw_wb->idx - WB_0);
+
+ wb_enc->debugfs_root =
+ debugfs_create_dir(wb_enc->wb_name,
+ sde_debugfs_get_root(kms));
+ if (!wb_enc->debugfs_root) {
+ SDE_ERROR("failed to create debugfs\n");
+ return -ENOMEM;
+ }
+
+ if (!debugfs_create_u32("wbdone_timeout", S_IRUGO | S_IWUSR,
+ wb_enc->debugfs_root, &wb_enc->wbdone_timeout)) {
+ SDE_ERROR("failed to create debugfs/wbdone_timeout\n");
+ return -ENOMEM;
+ }
+
+ if (!debugfs_create_u32("bypass_irqreg", S_IRUGO | S_IWUSR,
+ wb_enc->debugfs_root, &wb_enc->bypass_irqreg)) {
+ SDE_ERROR("failed to create debugfs/bypass_irqreg\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * sde_encoder_phys_wb_destroy_debugfs - destroy writeback encoder debugfs
+ * @phys_enc: Pointer to physical encoder
+ */
+static void sde_encoder_phys_wb_destroy_debugfs(
+ struct sde_encoder_phys *phys_enc)
+{
+ struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
+
+ if (!phys_enc)
+ return;
+
+ debugfs_remove_recursive(wb_enc->debugfs_root);
+}
+#else
+static int sde_encoder_phys_wb_init_debugfs(
+ struct sde_encoder_phys *phys_enc, struct sde_kms *kms)
+{
+ return 0;
+}
+static void sde_encoder_phys_wb_destroy_debugfs(
+ struct sde_encoder_phys *phys_enc)
+{
+}
+#endif
+
+/**
+ * sde_encoder_phys_wb_destroy - destroy writeback encoder
+ * @phys_enc: Pointer to physical encoder
+ */
+static void sde_encoder_phys_wb_destroy(struct sde_encoder_phys *phys_enc)
+{
+ struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
+ struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
+
+ SDE_DEBUG("[wb:%d]\n", hw_wb->idx - WB_0);
+
+ if (!phys_enc)
+ return;
+
+ sde_encoder_phys_wb_destroy_debugfs(phys_enc);
+
+ kfree(wb_enc);
+}
+
+/**
+ * sde_encoder_phys_wb_init_ops - initialize writeback operations
+ * @ops: Pointer to encoder operation table
+ */
+static void sde_encoder_phys_wb_init_ops(struct sde_encoder_phys_ops *ops)
+{
+ ops->is_master = sde_encoder_phys_wb_is_master;
+ ops->mode_set = sde_encoder_phys_wb_mode_set;
+ ops->enable = sde_encoder_phys_wb_enable;
+ ops->disable = sde_encoder_phys_wb_disable;
+ ops->destroy = sde_encoder_phys_wb_destroy;
+ ops->atomic_check = sde_encoder_phys_wb_atomic_check;
+ ops->get_hw_resources = sde_encoder_phys_wb_get_hw_resources;
+ ops->wait_for_commit_done = sde_encoder_phys_wb_wait_for_commit_done;
+ ops->prepare_for_kickoff = sde_encoder_phys_wb_prepare_for_kickoff;
+ ops->handle_post_kickoff = sde_encoder_phys_wb_handle_post_kickoff;
+ ops->trigger_start = sde_encoder_helper_trigger_start;
+}
+
+/**
+ * sde_encoder_phys_wb_init - initialize writeback encoder
+ * @init: Pointer to init info structure with initialization params
+ */
+struct sde_encoder_phys *sde_encoder_phys_wb_init(
+ struct sde_enc_phys_init_params *p)
+{
+ struct sde_encoder_phys *phys_enc;
+ struct sde_encoder_phys_wb *wb_enc;
+ struct sde_hw_mdp *hw_mdp;
+ int ret = 0;
+
+ SDE_DEBUG("\n");
+
+ wb_enc = kzalloc(sizeof(*wb_enc), GFP_KERNEL);
+ if (!wb_enc) {
+ ret = -ENOMEM;
+ goto fail_alloc;
+ }
+ wb_enc->irq_idx = -EINVAL;
+ wb_enc->wbdone_timeout = KICKOFF_TIMEOUT_MS;
+ init_completion(&wb_enc->wbdone_complete);
+
+ phys_enc = &wb_enc->base;
+
+ if (p->sde_kms->vbif[VBIF_NRT]) {
+ wb_enc->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE] =
+ p->sde_kms->mmu_id[MSM_SMMU_DOMAIN_NRT_UNSECURE];
+ wb_enc->mmu_id[SDE_IOMMU_DOMAIN_SECURE] =
+ p->sde_kms->mmu_id[MSM_SMMU_DOMAIN_NRT_SECURE];
+ } else {
+ wb_enc->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE] =
+ p->sde_kms->mmu_id[MSM_SMMU_DOMAIN_UNSECURE];
+ wb_enc->mmu_id[SDE_IOMMU_DOMAIN_SECURE] =
+ p->sde_kms->mmu_id[MSM_SMMU_DOMAIN_SECURE];
+ }
+
+ hw_mdp = sde_rm_get_mdp(&p->sde_kms->rm);
+ if (IS_ERR_OR_NULL(hw_mdp)) {
+ ret = PTR_ERR(hw_mdp);
+ SDE_ERROR("failed to init hw_top: %d\n", ret);
+ goto fail_mdp_init;
+ }
+ phys_enc->hw_mdptop = hw_mdp;
+
+ /**
+ * hw_wb resource permanently assigned to this encoder
+ * Other resources allocated at atomic commit time by use case
+ */
+ if (p->wb_idx != SDE_NONE) {
+ struct sde_rm_hw_iter iter;
+
+ sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_WB);
+ while (sde_rm_get_hw(&p->sde_kms->rm, &iter)) {
+ struct sde_hw_wb *hw_wb = (struct sde_hw_wb *)iter.hw;
+
+ if (hw_wb->idx == p->wb_idx) {
+ wb_enc->hw_wb = hw_wb;
+ break;
+ }
+ }
+
+ if (!wb_enc->hw_wb) {
+ ret = -EINVAL;
+ SDE_ERROR("failed to init hw_wb%d\n", p->wb_idx - WB_0);
+ goto fail_wb_init;
+ }
+ } else {
+ ret = -EINVAL;
+ SDE_ERROR("invalid wb_idx\n");
+ goto fail_wb_check;
+ }
+
+ sde_encoder_phys_wb_init_ops(&phys_enc->ops);
+ phys_enc->parent = p->parent;
+ phys_enc->parent_ops = p->parent_ops;
+ phys_enc->sde_kms = p->sde_kms;
+ phys_enc->split_role = p->split_role;
+ phys_enc->intf_mode = INTF_MODE_WB_LINE;
+ phys_enc->intf_idx = p->intf_idx;
+ phys_enc->enc_spinlock = p->enc_spinlock;
+ INIT_LIST_HEAD(&wb_enc->irq_cb.list);
+
+ ret = sde_encoder_phys_wb_init_debugfs(phys_enc, p->sde_kms);
+ if (ret) {
+ SDE_ERROR("failed to init debugfs %d\n", ret);
+ goto fail_debugfs_init;
+ }
+
+ SDE_DEBUG("Created sde_encoder_phys_wb for wb %d\n",
+ wb_enc->hw_wb->idx - WB_0);
+
+ return phys_enc;
+
+fail_debugfs_init:
+fail_wb_init:
+fail_wb_check:
+fail_mdp_init:
+ kfree(wb_enc);
+fail_alloc:
+ return ERR_PTR(ret);
+}
+
diff --git a/drivers/gpu/drm/msm/sde/sde_fence.c b/drivers/gpu/drm/msm/sde/sde_fence.c
new file mode 100644
index 000000000000..6db6f989006f
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_fence.c
@@ -0,0 +1,232 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <sync.h>
+#include <sw_sync.h>
+#include "msm_drv.h"
+#include "sde_kms.h"
+#include "sde_fence.h"
+
+void *sde_sync_get(uint64_t fd)
+{
+ /* force signed compare, fdget accepts an int argument */
+ return (signed int)fd >= 0 ? sync_fence_fdget(fd) : NULL;
+}
+
+void sde_sync_put(void *fence)
+{
+ if (fence)
+ sync_fence_put(fence);
+}
+
+int sde_sync_wait(void *fence, long timeout_ms)
+{
+ if (!fence)
+ return -EINVAL;
+ return sync_fence_wait(fence, timeout_ms);
+}
+
+uint32_t sde_sync_get_name_prefix(void *fence)
+{
+ char *name;
+ uint32_t i, prefix;
+
+ if (!fence)
+ return 0x0;
+
+ name = ((struct sync_fence *)fence)->name;
+ prefix = 0x0;
+ for (i = 0; i < sizeof(uint32_t) && name[i]; ++i)
+ prefix = (prefix << CHAR_BIT) | name[i];
+
+ return prefix;
+}
+
+#if IS_ENABLED(CONFIG_SW_SYNC)
+/**
+ * _sde_fence_create_fd - create fence object and return an fd for it
+ * This function is NOT thread-safe.
+ * @timeline: Timeline to associate with fence
+ * @name: Name for fence
+ * @val: Timeline value at which to signal the fence
+ * Return: File descriptor on success, or error code on error
+ */
+static int _sde_fence_create_fd(void *timeline, const char *name, uint32_t val)
+{
+ struct sync_pt *sync_pt;
+ struct sync_fence *fence;
+ signed int fd = -EINVAL;
+
+ if (!timeline) {
+ SDE_ERROR("invalid timeline\n");
+ goto exit;
+ }
+
+ if (!name)
+ name = "sde_fence";
+
+ /* create sync point */
+ sync_pt = sw_sync_pt_create(timeline, val);
+ if (sync_pt == NULL) {
+ SDE_ERROR("failed to create sync point, %s\n", name);
+ goto exit;
+ }
+
+ /* create fence */
+ fence = sync_fence_create(name, sync_pt);
+ if (fence == NULL) {
+ sync_pt_free(sync_pt);
+ SDE_ERROR("couldn't create fence, %s\n", name);
+ goto exit;
+ }
+
+ /* create fd */
+ fd = get_unused_fd_flags(0);
+ if (fd < 0) {
+ SDE_ERROR("failed to get_unused_fd_flags(), %s\n", name);
+ sync_fence_put(fence);
+ goto exit;
+ }
+
+ sync_fence_install(fence, fd);
+exit:
+ return fd;
+}
+
+/**
+ * SDE_FENCE_TIMELINE_NAME - macro for accessing s/w timeline's name
+ * @fence: Pointer to sde fence structure
+ * @drm_id: ID number of owning DRM Object
+ * Returns: Pointer to timeline name string
+ */
+#define SDE_FENCE_TIMELINE_NAME(fence) \
+ (((struct sw_sync_timeline *)fence->timeline)->obj.name)
+
+int sde_fence_init(struct sde_fence *fence,
+ const char *name,
+ uint32_t drm_id)
+{
+ if (!fence) {
+ SDE_ERROR("invalid argument(s)\n");
+ return -EINVAL;
+ }
+
+ fence->timeline = sw_sync_timeline_create(name ? name : "sde");
+ if (!fence->timeline) {
+ SDE_ERROR("failed to create timeline\n");
+ return -ENOMEM;
+ }
+
+ fence->commit_count = 0;
+ fence->done_count = 0;
+ fence->drm_id = drm_id;
+
+ mutex_init(&fence->fence_lock);
+ return 0;
+
+}
+
+void sde_fence_deinit(struct sde_fence *fence)
+{
+ if (!fence) {
+ SDE_ERROR("invalid fence\n");
+ return;
+ }
+
+ mutex_destroy(&fence->fence_lock);
+ if (fence->timeline)
+ sync_timeline_destroy(fence->timeline);
+}
+
+int sde_fence_prepare(struct sde_fence *fence)
+{
+ if (!fence) {
+ SDE_ERROR("invalid fence\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&fence->fence_lock);
+ ++fence->commit_count;
+ SDE_EVT32(fence->drm_id, fence->commit_count, fence->done_count);
+ mutex_unlock(&fence->fence_lock);
+ return 0;
+}
+
+int sde_fence_create(struct sde_fence *fence, uint64_t *val, int offset)
+{
+ uint32_t trigger_value;
+ int fd, rc = -EINVAL;
+
+ if (!fence || !fence->timeline || !val) {
+ SDE_ERROR("invalid argument(s), fence %pK, pval %pK\n",
+ fence, val);
+ } else {
+ /*
+ * Allow created fences to have a constant offset with respect
+ * to the timeline. This allows us to delay the fence signalling
+ * w.r.t. the commit completion (e.g., an offset of +1 would
+ * cause fences returned during a particular commit to signal
+ * after an additional delay of one commit, rather than at the
+ * end of the current one.
+ */
+ mutex_lock(&fence->fence_lock);
+ trigger_value = fence->commit_count + (int32_t)offset;
+ fd = _sde_fence_create_fd(fence->timeline,
+ SDE_FENCE_TIMELINE_NAME(fence),
+ trigger_value);
+ *val = fd;
+
+ SDE_EVT32(fence->drm_id, trigger_value, fd);
+ mutex_unlock(&fence->fence_lock);
+
+ if (fd >= 0)
+ rc = 0;
+ }
+
+ return rc;
+}
+
+void sde_fence_signal(struct sde_fence *fence, bool is_error)
+{
+ if (!fence || !fence->timeline) {
+ SDE_ERROR("invalid fence, %pK\n", fence);
+ return;
+ }
+
+ mutex_lock(&fence->fence_lock);
+ if ((fence->done_count - fence->commit_count) < 0)
+ ++fence->done_count;
+ else
+ SDE_ERROR("detected extra signal attempt!\n");
+
+ /*
+ * Always advance 'done' counter,
+ * but only advance timeline if !error
+ */
+ if (!is_error) {
+ int32_t val;
+
+ val = fence->done_count;
+ val -= ((struct sw_sync_timeline *)
+ fence->timeline)->value;
+ if (val < 0)
+ SDE_ERROR("invalid value\n");
+ else
+ sw_sync_timeline_inc(fence->timeline, (int)val);
+ }
+
+ SDE_EVT32(fence->drm_id, fence->done_count,
+ ((struct sw_sync_timeline *) fence->timeline)->value);
+
+ mutex_unlock(&fence->fence_lock);
+}
+#endif
diff --git a/drivers/gpu/drm/msm/sde/sde_fence.h b/drivers/gpu/drm/msm/sde/sde_fence.h
new file mode 100644
index 000000000000..113d16b916f7
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_fence.h
@@ -0,0 +1,177 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_FENCE_H_
+#define _SDE_FENCE_H_
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/mutex.h>
+
+#ifndef CHAR_BIT
+#define CHAR_BIT 8 /* define this if limits.h not available */
+#endif
+
+#ifdef CONFIG_SYNC
+/**
+ * sde_sync_get - Query sync fence object from a file handle
+ *
+ * On success, this function also increments the refcount of the sync fence
+ *
+ * @fd: Integer sync fence handle
+ *
+ * Return: Pointer to sync fence object, or NULL
+ */
+void *sde_sync_get(uint64_t fd);
+
+/**
+ * sde_sync_put - Releases a sync fence object acquired by @sde_sync_get
+ *
+ * This function decrements the sync fence's reference count; the object will
+ * be released if the reference count goes to zero.
+ *
+ * @fence: Pointer to sync fence
+ */
+void sde_sync_put(void *fence);
+
+/**
+ * sde_sync_wait - Query sync fence object from a file handle
+ *
+ * @fence: Pointer to sync fence
+ * @timeout_ms: Time to wait, in milliseconds. Waits forever if timeout_ms < 0
+ *
+ * Return: Zero on success, or -ETIME on timeout
+ */
+int sde_sync_wait(void *fence, long timeout_ms);
+
+/**
+ * sde_sync_get_name_prefix - get integer representation of fence name prefix
+ * @fence: Pointer to opaque fence structure
+ *
+ * Return: 32-bit integer containing first 4 characters of fence name,
+ * big-endian notation
+ */
+uint32_t sde_sync_get_name_prefix(void *fence);
+#else
+static inline void *sde_sync_get(uint64_t fd)
+{
+ return NULL;
+}
+
+static inline void sde_sync_put(void *fence)
+{
+}
+
+static inline int sde_sync_wait(void *fence, long timeout_ms)
+{
+ return 0;
+}
+
+static inline uint32_t sde_sync_get_name_prefix(void *fence)
+{
+ return 0x0;
+}
+#endif
+
+/**
+ * struct sde_fence - output fence container structure
+ * @timeline: Pointer to fence timeline
+ * @commit_count: Number of detected commits since bootup
+ * @done_count: Number of completed commits since bootup
+ * @drm_id: ID number of owning DRM Object
+ * @fence_lock: Mutex object to protect local fence variables
+ */
+struct sde_fence {
+ void *timeline;
+ int32_t commit_count;
+ int32_t done_count;
+ uint32_t drm_id;
+ struct mutex fence_lock;
+};
+
+#if IS_ENABLED(CONFIG_SW_SYNC)
+/**
+ * sde_fence_init - initialize fence object
+ * @fence: Pointer to crtc fence object
+ * @drm_id: ID number of owning DRM Object
+ * @name: Timeline name
+ * Returns: Zero on success
+ */
+int sde_fence_init(struct sde_fence *fence,
+ const char *name,
+ uint32_t drm_id);
+
+/**
+ * sde_fence_deinit - deinit fence container
+ * @fence: Pointer fence container
+ */
+void sde_fence_deinit(struct sde_fence *fence);
+
+/**
+ * sde_fence_prepare - prepare to return fences for current commit
+ * @fence: Pointer fence container
+ * Returns: Zero on success
+ */
+int sde_fence_prepare(struct sde_fence *fence);
+
+/**
+ * sde_fence_create - create output fence object
+ * @fence: Pointer fence container
+ * @val: Pointer to output value variable, fence fd will be placed here
+ * @offset: Fence signal commit offset, e.g., +1 to signal on next commit
+ * Returns: Zero on success
+ */
+int sde_fence_create(struct sde_fence *fence, uint64_t *val, int offset);
+
+/**
+ * sde_fence_signal - advance fence timeline to signal outstanding fences
+ * @fence: Pointer fence container
+ * @is_error: Set to non-zero if the commit didn't complete successfully
+ */
+void sde_fence_signal(struct sde_fence *fence, bool is_error);
+#else
+static inline int sde_fence_init(struct sde_fence *fence,
+ const char *name,
+ uint32_t drm_id)
+{
+ /* do nothing */
+ return 0;
+}
+
+static inline void sde_fence_deinit(struct sde_fence *fence)
+{
+ /* do nothing */
+}
+
+static inline void sde_fence_prepare(struct sde_fence *fence)
+{
+ /* do nothing */
+}
+
+static inline int sde_fence_get(struct sde_fence *fence, uint64_t *val)
+{
+ return -EINVAL;
+}
+
+static inline void sde_fence_signal(struct sde_fence *fence, bool is_error)
+{
+ /* do nothing */
+}
+
+static inline int sde_fence_create(struct sde_fence *fence, uint64_t *val,
+ int offset)
+{
+ return 0;
+}
+#endif /* IS_ENABLED(CONFIG_SW_SYNC) */
+
+#endif /* _SDE_FENCE_H_ */
diff --git a/drivers/gpu/drm/msm/sde/sde_formats.c b/drivers/gpu/drm/msm/sde/sde_formats.c
new file mode 100644
index 000000000000..41180f5dec12
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_formats.c
@@ -0,0 +1,996 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <uapi/drm/drm_fourcc.h>
+
+#include "sde_kms.h"
+#include "sde_formats.h"
+
+#define SDE_UBWC_META_MACRO_W_H 16
+#define SDE_UBWC_META_BLOCK_SIZE 256
+#define SDE_MAX_IMG_WIDTH 0x3FFF
+#define SDE_MAX_IMG_HEIGHT 0x3FFF
+
+/**
+ * SDE supported format packing, bpp, and other format
+ * information.
+ * SDE currently only supports interleaved RGB formats
+ * UBWC support for a pixel format is indicated by the flag,
+ * there is additional meta data plane for such formats
+ */
+
+#define INTERLEAVED_RGB_FMT(fmt, a, r, g, b, e0, e1, e2, e3, uc, alpha, \
+bp, flg, fm, np) \
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = SDE_PLANE_INTERLEAVED, \
+ .alpha_enable = alpha, \
+ .element = { (e0), (e1), (e2), (e3) }, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = SDE_CHROMA_RGB, \
+ .unpack_align_msb = 0, \
+ .unpack_tight = 1, \
+ .unpack_count = uc, \
+ .bpp = bp, \
+ .fetch_mode = fm, \
+ .flag = flg, \
+ .num_planes = np \
+}
+
+#define INTERLEAVED_YUV_FMT(fmt, a, r, g, b, e0, e1, e2, e3, \
+alpha, chroma, count, bp, flg, fm, np) \
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = SDE_PLANE_INTERLEAVED, \
+ .alpha_enable = alpha, \
+ .element = { (e0), (e1), (e2), (e3)}, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = chroma, \
+ .unpack_align_msb = 0, \
+ .unpack_tight = 1, \
+ .unpack_count = count, \
+ .bpp = bp, \
+ .fetch_mode = fm, \
+ .flag = flg, \
+ .num_planes = np \
+}
+
+#define PSEUDO_YUV_FMT(fmt, a, r, g, b, e0, e1, chroma, flg, fm, np) \
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = SDE_PLANE_PSEUDO_PLANAR, \
+ .alpha_enable = false, \
+ .element = { (e0), (e1), 0, 0 }, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = chroma, \
+ .unpack_align_msb = 0, \
+ .unpack_tight = 1, \
+ .unpack_count = 2, \
+ .bpp = 2, \
+ .fetch_mode = fm, \
+ .flag = flg, \
+ .num_planes = np \
+}
+
+#define PLANAR_YUV_FMT(fmt, a, r, g, b, e0, e1, e2, alpha, chroma, bp, \
+flg, fm, np) \
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = SDE_PLANE_PLANAR, \
+ .alpha_enable = alpha, \
+ .element = { (e0), (e1), (e2), 0 }, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = chroma, \
+ .unpack_align_msb = 0, \
+ .unpack_tight = 1, \
+ .unpack_count = 1, \
+ .bpp = bp, \
+ .fetch_mode = fm, \
+ .flag = flg, \
+ .num_planes = np \
+}
+
+static const struct sde_format sde_format_map[] = {
+ INTERLEAVED_RGB_FMT(ARGB8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ true, 4, 0,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ABGR8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ true, 4, 0,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XBGR8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ true, 4, 0,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBA8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, 0,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRA8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ true, 4, 0,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRX8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ false, 4, 0,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XRGB8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ false, 4, 0,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBX8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ false, 4, 0,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGB888,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
+ false, 3, 0,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGR888,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
+ false, 3, 0,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGB565,
+ 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
+ false, 2, 0,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGR565,
+ 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
+ false, 2, 0,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ARGB1555,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ true, 2, 0,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ABGR1555,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ true, 2, 0,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBA5551,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 2, 0,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRA5551,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ true, 2, 0,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XRGB1555,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ false, 2, 0,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XBGR1555,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ false, 2, 0,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBX5551,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ false, 2, 0,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRX5551,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ false, 2, 0,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ARGB4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ true, 2, 0,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ABGR4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ true, 2, 0,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBA4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 2, 0,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRA4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ true, 2, 0,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XRGB4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ false, 2, 0,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XBGR4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ false, 2, 0,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBX4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ false, 2, 0,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRX4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ false, 2, 0,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRA1010102,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ true, 4, SDE_FORMAT_FLAG_DX,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBA1010102,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, SDE_FORMAT_FLAG_DX,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ABGR2101010,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ true, 4, SDE_FORMAT_FLAG_DX,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ARGB2101010,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ true, 4, SDE_FORMAT_FLAG_DX,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XRGB2101010,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ false, 4, SDE_FORMAT_FLAG_DX,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRX1010102,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ false, 4, SDE_FORMAT_FLAG_DX,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XBGR2101010,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ false, 4, SDE_FORMAT_FLAG_DX,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBX1010102,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ false, 4, SDE_FORMAT_FLAG_DX,
+ SDE_FETCH_LINEAR, 1),
+
+ PSEUDO_YUV_FMT(NV12,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C2_R_Cr,
+ SDE_CHROMA_420, SDE_FORMAT_FLAG_YUV,
+ SDE_FETCH_LINEAR, 2),
+
+ PSEUDO_YUV_FMT(NV21,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C1_B_Cb,
+ SDE_CHROMA_420, SDE_FORMAT_FLAG_YUV,
+ SDE_FETCH_LINEAR, 2),
+
+ PSEUDO_YUV_FMT(NV16,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C2_R_Cr,
+ SDE_CHROMA_H2V1, SDE_FORMAT_FLAG_YUV,
+ SDE_FETCH_LINEAR, 2),
+
+ PSEUDO_YUV_FMT(NV61,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C1_B_Cb,
+ SDE_CHROMA_H2V1, SDE_FORMAT_FLAG_YUV,
+ SDE_FETCH_LINEAR, 2),
+
+ INTERLEAVED_YUV_FMT(VYUY,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C0_G_Y,
+ false, SDE_CHROMA_H2V1, 4, 2, SDE_FORMAT_FLAG_YUV,
+ SDE_FETCH_LINEAR, 2),
+
+ INTERLEAVED_YUV_FMT(UYVY,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C0_G_Y,
+ false, SDE_CHROMA_H2V1, 4, 2, SDE_FORMAT_FLAG_YUV,
+ SDE_FETCH_LINEAR, 2),
+
+ INTERLEAVED_YUV_FMT(YUYV,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C0_G_Y, C1_B_Cb, C0_G_Y, C2_R_Cr,
+ false, SDE_CHROMA_H2V1, 4, 2, SDE_FORMAT_FLAG_YUV,
+ SDE_FETCH_LINEAR, 2),
+
+ INTERLEAVED_YUV_FMT(YVYU,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C0_G_Y, C2_R_Cr, C0_G_Y, C1_B_Cb,
+ false, SDE_CHROMA_H2V1, 4, 2, SDE_FORMAT_FLAG_YUV,
+ SDE_FETCH_LINEAR, 2),
+
+ PLANAR_YUV_FMT(YUV420,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C0_G_Y, C1_B_Cb, C2_R_Cr,
+ false, SDE_CHROMA_420, 1, SDE_FORMAT_FLAG_YUV,
+ SDE_FETCH_LINEAR, 3),
+
+ PLANAR_YUV_FMT(YVU420,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C0_G_Y, C2_R_Cr, C1_B_Cb,
+ false, SDE_CHROMA_420, 1, SDE_FORMAT_FLAG_YUV,
+ SDE_FETCH_LINEAR, 3),
+};
+
+/*
+ * UBWC formats table:
+ * This table holds the UBWC formats supported.
+ * If a compression ratio needs to be used for this or any other format,
+ * the data will be passed by user-space.
+ */
+static const struct sde_format sde_format_map_ubwc[] = {
+ INTERLEAVED_RGB_FMT(RGB565,
+ 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
+ false, 2, 0,
+ SDE_FETCH_UBWC, 2),
+
+ INTERLEAVED_RGB_FMT(RGBA8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, 0,
+ SDE_FETCH_UBWC, 2),
+
+ INTERLEAVED_RGB_FMT(RGBX8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ false, 4, 0,
+ SDE_FETCH_UBWC, 2),
+
+ INTERLEAVED_RGB_FMT(RGBA1010102,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, SDE_FORMAT_FLAG_DX,
+ SDE_FETCH_UBWC, 2),
+
+ INTERLEAVED_RGB_FMT(RGBX1010102,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, SDE_FORMAT_FLAG_DX,
+ SDE_FETCH_UBWC, 2),
+
+ PSEUDO_YUV_FMT(NV12,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C2_R_Cr,
+ SDE_CHROMA_420, SDE_FORMAT_FLAG_YUV,
+ SDE_FETCH_UBWC, 4),
+};
+
+/* _sde_get_v_h_subsample_rate - Get subsample rates for all formats we support
+ * Note: Not using the drm_format_*_subsampling since we have formats
+ */
+static void _sde_get_v_h_subsample_rate(
+ enum sde_chroma_samp_type chroma_sample,
+ uint32_t *v_sample,
+ uint32_t *h_sample)
+{
+ if (!v_sample || !h_sample)
+ return;
+
+ switch (chroma_sample) {
+ case SDE_CHROMA_H2V1:
+ *v_sample = 1;
+ *h_sample = 2;
+ break;
+ case SDE_CHROMA_H1V2:
+ *v_sample = 2;
+ *h_sample = 1;
+ break;
+ case SDE_CHROMA_420:
+ *v_sample = 2;
+ *h_sample = 2;
+ break;
+ default:
+ *v_sample = 1;
+ *h_sample = 1;
+ break;
+ }
+}
+
+static int _sde_format_get_plane_sizes_ubwc(
+ const struct sde_format *fmt,
+ const uint32_t width,
+ const uint32_t height,
+ struct sde_hw_fmt_layout *layout)
+{
+ int i;
+
+ memset(layout, 0, sizeof(struct sde_hw_fmt_layout));
+ layout->format = fmt;
+ layout->width = width;
+ layout->height = height;
+ layout->num_planes = fmt->num_planes;
+
+ if (fmt->base.pixel_format == DRM_FORMAT_NV12) {
+ uint32_t y_stride_alignment, uv_stride_alignment;
+ uint32_t y_height_alignment, uv_height_alignment;
+ uint32_t y_tile_width = 32;
+ uint32_t y_tile_height = 8;
+ uint32_t uv_tile_width = y_tile_width / 2;
+ uint32_t uv_tile_height = y_tile_height;
+ uint32_t y_bpp_numer = 1, y_bpp_denom = 1;
+ uint32_t uv_bpp_numer = 1, uv_bpp_denom = 1;
+
+ y_stride_alignment = 128;
+ uv_stride_alignment = 64;
+ y_height_alignment = 32;
+ uv_height_alignment = 32;
+ y_bpp_numer = 1;
+ uv_bpp_numer = 2;
+ y_bpp_denom = 1;
+ uv_bpp_denom = 1;
+
+ layout->num_planes = 4;
+ /* Y bitstream stride and plane size */
+ layout->plane_pitch[0] = ALIGN(width, y_stride_alignment);
+ layout->plane_pitch[0] = (layout->plane_pitch[0] * y_bpp_numer)
+ / y_bpp_denom;
+ layout->plane_size[0] = ALIGN(layout->plane_pitch[0] *
+ ALIGN(height, y_height_alignment), 4096);
+
+ /* CbCr bitstream stride and plane size */
+ layout->plane_pitch[1] = ALIGN(width / 2, uv_stride_alignment);
+ layout->plane_pitch[1] = (layout->plane_pitch[1] * uv_bpp_numer)
+ / uv_bpp_denom;
+ layout->plane_size[1] = ALIGN(layout->plane_pitch[1] *
+ ALIGN(height / 2, uv_height_alignment), 4096);
+
+ /* Y meta data stride and plane size */
+ layout->plane_pitch[2] = ALIGN(
+ DIV_ROUND_UP(width, y_tile_width), 64);
+ layout->plane_size[2] = ALIGN(layout->plane_pitch[2] *
+ ALIGN(DIV_ROUND_UP(height, y_tile_height), 16), 4096);
+
+ /* CbCr meta data stride and plane size */
+ layout->plane_pitch[3] = ALIGN(
+ DIV_ROUND_UP(width / 2, uv_tile_width), 64);
+ layout->plane_size[3] = ALIGN(layout->plane_pitch[3] *
+ ALIGN(DIV_ROUND_UP(height / 2, uv_tile_height), 16),
+ 4096);
+
+ } else if (fmt->base.pixel_format == DRM_FORMAT_RGBA8888 ||
+ fmt->base.pixel_format == DRM_FORMAT_RGBX8888 ||
+ fmt->base.pixel_format == DRM_FORMAT_RGBA1010102 ||
+ fmt->base.pixel_format == DRM_FORMAT_RGBX1010102 ||
+ fmt->base.pixel_format == DRM_FORMAT_RGB565) {
+ uint32_t stride_alignment, aligned_bitstream_width;
+
+ if (fmt->base.pixel_format == DRM_FORMAT_RGB565)
+ stride_alignment = 128;
+ else
+ stride_alignment = 64;
+ layout->num_planes = 3;
+
+ /* Nothing in plane[1] */
+
+ /* RGB bitstream stride and plane size */
+ aligned_bitstream_width = ALIGN(width, stride_alignment);
+ layout->plane_pitch[0] = aligned_bitstream_width * fmt->bpp;
+ layout->plane_size[0] = ALIGN(fmt->bpp * aligned_bitstream_width
+ * ALIGN(height, 16), 4096);
+
+ /* RGB meta data stride and plane size */
+ layout->plane_pitch[2] = ALIGN(DIV_ROUND_UP(
+ aligned_bitstream_width, 16), 64);
+ layout->plane_size[2] = ALIGN(layout->plane_pitch[2] *
+ ALIGN(DIV_ROUND_UP(height, 4), 16), 4096);
+ } else {
+ DRM_ERROR("UBWC format not supported for fmt:0x%X\n",
+ fmt->base.pixel_format);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < SDE_MAX_PLANES; i++)
+ layout->total_size += layout->plane_size[i];
+
+ return 0;
+}
+
+static int _sde_format_get_plane_sizes_linear(
+ const struct sde_format *fmt,
+ const uint32_t width,
+ const uint32_t height,
+ struct sde_hw_fmt_layout *layout)
+{
+ int i;
+
+ memset(layout, 0, sizeof(struct sde_hw_fmt_layout));
+ layout->format = fmt;
+ layout->width = width;
+ layout->height = height;
+ layout->num_planes = fmt->num_planes;
+
+ /* Due to memset above, only need to set planes of interest */
+ if (fmt->fetch_planes == SDE_PLANE_INTERLEAVED) {
+ layout->num_planes = 1;
+ layout->plane_size[0] = width * height * layout->format->bpp;
+ layout->plane_pitch[0] = width * layout->format->bpp;
+ } else {
+ uint32_t v_subsample, h_subsample;
+ uint32_t chroma_samp;
+
+ chroma_samp = fmt->chroma_sample;
+ _sde_get_v_h_subsample_rate(chroma_samp, &v_subsample,
+ &h_subsample);
+
+ if (width % h_subsample || height % v_subsample) {
+ DRM_ERROR("mismatch in subsample vs dimensions\n");
+ return -EINVAL;
+ }
+
+ layout->plane_pitch[0] = width;
+ layout->plane_pitch[1] = width / h_subsample;
+ layout->plane_size[0] = layout->plane_pitch[0] * height;
+ layout->plane_size[1] = layout->plane_pitch[1] *
+ (height / v_subsample);
+
+ if (fmt->fetch_planes == SDE_PLANE_PSEUDO_PLANAR) {
+ layout->num_planes = 2;
+ layout->plane_size[1] *= 2;
+ layout->plane_pitch[1] *= 2;
+ } else {
+ /* planar */
+ layout->num_planes = 3;
+ layout->plane_size[2] = layout->plane_size[1];
+ layout->plane_pitch[2] = layout->plane_pitch[1];
+ }
+ }
+
+ for (i = 0; i < SDE_MAX_PLANES; i++)
+ layout->total_size += layout->plane_size[i];
+
+ return 0;
+}
+
+static int _sde_format_get_plane_sizes(
+ const struct sde_format *fmt,
+ const uint32_t w,
+ const uint32_t h,
+ struct sde_hw_fmt_layout *layout)
+{
+ if (!layout || !fmt) {
+ DRM_ERROR("invalid pointer\n");
+ return -EINVAL;
+ }
+
+ if ((w > SDE_MAX_IMG_WIDTH) || (h > SDE_MAX_IMG_HEIGHT)) {
+ DRM_ERROR("image dimensions outside max range\n");
+ return -ERANGE;
+ }
+
+ if (SDE_FORMAT_IS_UBWC(fmt))
+ return _sde_format_get_plane_sizes_ubwc(fmt, w, h, layout);
+
+ return _sde_format_get_plane_sizes_linear(fmt, w, h, layout);
+}
+
+static int _sde_format_populate_addrs_ubwc(
+ int mmu_id,
+ struct drm_framebuffer *fb,
+ struct sde_hw_fmt_layout *layout)
+{
+ uint32_t base_addr;
+
+ if (!fb || !layout) {
+ DRM_ERROR("invalid pointers\n");
+ return -EINVAL;
+ }
+
+ base_addr = msm_framebuffer_iova(fb, mmu_id, 0);
+ if (!base_addr) {
+ DRM_ERROR("failed to retrieve base addr\n");
+ return -EFAULT;
+ }
+
+ /* Per-format logic for verifying active planes */
+ if (SDE_FORMAT_IS_YUV(layout->format)) {
+ /************************************************/
+ /* UBWC ** */
+ /* buffer ** SDE PLANE */
+ /* format ** */
+ /************************************************/
+ /* ------------------- ** -------------------- */
+ /* | Y meta | ** | Y bitstream | */
+ /* | data | ** | plane | */
+ /* ------------------- ** -------------------- */
+ /* | Y bitstream | ** | CbCr bitstream | */
+ /* | data | ** | plane | */
+ /* ------------------- ** -------------------- */
+ /* | Cbcr metadata | ** | Y meta | */
+ /* | data | ** | plane | */
+ /* ------------------- ** -------------------- */
+ /* | CbCr bitstream | ** | CbCr meta | */
+ /* | data | ** | plane | */
+ /* ------------------- ** -------------------- */
+ /************************************************/
+
+ /* configure Y bitstream plane */
+ layout->plane_addr[0] = base_addr + layout->plane_size[2];
+
+ /* configure CbCr bitstream plane */
+ layout->plane_addr[1] = base_addr + layout->plane_size[0]
+ + layout->plane_size[2] + layout->plane_size[3];
+
+ /* configure Y metadata plane */
+ layout->plane_addr[2] = base_addr;
+
+ /* configure CbCr metadata plane */
+ layout->plane_addr[3] = base_addr + layout->plane_size[0]
+ + layout->plane_size[2];
+
+ } else {
+ /************************************************/
+ /* UBWC ** */
+ /* buffer ** SDE PLANE */
+ /* format ** */
+ /************************************************/
+ /* ------------------- ** -------------------- */
+ /* | RGB meta | ** | RGB bitstream | */
+ /* | data | ** | plane | */
+ /* ------------------- ** -------------------- */
+ /* | RGB bitstream | ** | NONE | */
+ /* | data | ** | | */
+ /* ------------------- ** -------------------- */
+ /* ** | RGB meta | */
+ /* ** | plane | */
+ /* ** -------------------- */
+ /************************************************/
+
+ layout->plane_addr[0] = base_addr + layout->plane_size[2];
+ layout->plane_addr[1] = 0;
+ layout->plane_addr[2] = base_addr;
+ layout->plane_addr[3] = 0;
+ }
+
+ return 0;
+}
+
+static int _sde_format_populate_addrs_linear(
+ int mmu_id,
+ struct drm_framebuffer *fb,
+ struct sde_hw_fmt_layout *layout)
+{
+ unsigned int i;
+
+ /* Can now check the pitches given vs pitches expected */
+ for (i = 0; i < layout->num_planes; ++i) {
+ if (layout->plane_pitch[i] != fb->pitches[i]) {
+ DRM_ERROR("plane %u expected pitch %u, fb %u\n",
+ i, layout->plane_pitch[i], fb->pitches[i]);
+ return -EINVAL;
+ }
+ }
+
+ /* Populate addresses for simple formats here */
+ for (i = 0; i < layout->num_planes; ++i) {
+ layout->plane_addr[i] = msm_framebuffer_iova(fb, mmu_id, i);
+ if (!layout->plane_addr[i]) {
+ DRM_ERROR("failed to retrieve base addr\n");
+ return -EFAULT;
+ }
+ }
+
+ return 0;
+}
+
+int sde_format_populate_layout(
+ int mmu_id,
+ struct drm_framebuffer *fb,
+ struct sde_hw_fmt_layout *layout)
+{
+ uint32_t plane_addr[SDE_MAX_PLANES];
+ int i, ret;
+
+ if (!fb || !layout) {
+ DRM_ERROR("invalid arguments\n");
+ return -EINVAL;
+ }
+
+ if ((fb->width > SDE_MAX_IMG_WIDTH) ||
+ (fb->height > SDE_MAX_IMG_HEIGHT)) {
+ DRM_ERROR("image dimensions outside max range\n");
+ return -ERANGE;
+ }
+
+ layout->format = to_sde_format(msm_framebuffer_format(fb));
+
+ /* Populate the plane sizes etc via get_format */
+ ret = _sde_format_get_plane_sizes(layout->format, fb->width, fb->height,
+ layout);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < SDE_MAX_PLANES; ++i)
+ plane_addr[i] = layout->plane_addr[i];
+
+ /* Populate the addresses given the fb */
+ if (SDE_FORMAT_IS_UBWC(layout->format))
+ ret = _sde_format_populate_addrs_ubwc(mmu_id, fb, layout);
+ else
+ ret = _sde_format_populate_addrs_linear(mmu_id, fb, layout);
+
+ /* check if anything changed */
+ if (!ret && !memcmp(plane_addr, layout->plane_addr, sizeof(plane_addr)))
+ ret = -EAGAIN;
+
+ return ret;
+}
+
+static void _sde_format_calc_offset_linear(struct sde_hw_fmt_layout *source,
+ u32 x, u32 y)
+{
+ if ((x == 0) && (y == 0))
+ return;
+
+ source->plane_addr[0] += y * source->plane_pitch[0];
+
+ if (source->num_planes == 1) {
+ source->plane_addr[0] += x * source->format->bpp;
+ } else {
+ uint32_t xoff, yoff;
+ uint32_t v_subsample = 1;
+ uint32_t h_subsample = 1;
+
+ _sde_get_v_h_subsample_rate(source->format->chroma_sample,
+ &v_subsample, &h_subsample);
+
+ xoff = x / h_subsample;
+ yoff = y / v_subsample;
+
+ source->plane_addr[0] += x;
+ source->plane_addr[1] += xoff +
+ (yoff * source->plane_pitch[1]);
+ if (source->num_planes == 2) /* pseudo planar */
+ source->plane_addr[1] += xoff;
+ else /* planar */
+ source->plane_addr[2] += xoff +
+ (yoff * source->plane_pitch[2]);
+ }
+}
+
+int sde_format_populate_layout_with_roi(
+ int mmu_id,
+ struct drm_framebuffer *fb,
+ struct sde_rect *roi,
+ struct sde_hw_fmt_layout *layout)
+{
+ int ret;
+
+ ret = sde_format_populate_layout(mmu_id, fb, layout);
+ if (ret || !roi)
+ return ret;
+
+ if (!roi->w || !roi->h || (roi->x + roi->w > fb->width) ||
+ (roi->y + roi->h > fb->height)) {
+ DRM_ERROR("invalid roi=[%d,%d,%d,%d], fb=[%u,%u]\n",
+ roi->x, roi->y, roi->w, roi->h,
+ fb->width, fb->height);
+ ret = -EINVAL;
+ } else if (SDE_FORMAT_IS_LINEAR(layout->format)) {
+ _sde_format_calc_offset_linear(layout, roi->x, roi->y);
+ layout->width = roi->w;
+ layout->height = roi->h;
+ } else if (roi->x || roi->y || (roi->w != fb->width) ||
+ (roi->h != fb->height)) {
+ DRM_ERROR("non-linear layout with roi not supported\n");
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+int sde_format_check_modified_format(
+ const struct msm_kms *kms,
+ const struct msm_format *msm_fmt,
+ const struct drm_mode_fb_cmd2 *cmd,
+ struct drm_gem_object **bos)
+{
+ int ret, i, num_base_fmt_planes;
+ const struct sde_format *fmt;
+ struct sde_hw_fmt_layout layout;
+ uint32_t bos_total_size = 0;
+
+ if (!msm_fmt || !cmd || !bos) {
+ DRM_ERROR("invalid arguments\n");
+ return -EINVAL;
+ }
+
+ fmt = to_sde_format(msm_fmt);
+ num_base_fmt_planes = drm_format_num_planes(fmt->base.pixel_format);
+
+ ret = _sde_format_get_plane_sizes(fmt, cmd->width, cmd->height,
+ &layout);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < num_base_fmt_planes; i++) {
+ if (!bos[i]) {
+ DRM_ERROR("invalid handle for plane %d\n", i);
+ return -EINVAL;
+ }
+ bos_total_size += bos[i]->size;
+ }
+
+ if (bos_total_size < layout.total_size) {
+ DRM_ERROR("buffers total size too small %u expected %u\n",
+ bos_total_size, layout.total_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+const struct sde_format *sde_get_sde_format_ext(
+ const uint32_t format,
+ const uint64_t *modifiers,
+ const uint32_t modifiers_len)
+{
+ uint32_t i = 0;
+ uint64_t mod0 = 0;
+ const struct sde_format *fmt = NULL;
+ const struct sde_format *map = NULL;
+ ssize_t map_size = 0;
+
+ /*
+ * Currently only support exactly zero or one modifier.
+ * All planes used must specify the same modifier.
+ */
+ if (modifiers_len && !modifiers) {
+ DRM_ERROR("invalid modifiers array\n");
+ return NULL;
+ } else if (modifiers && modifiers_len && modifiers[0]) {
+ mod0 = modifiers[0];
+ DBG("plane format modifier 0x%llX", mod0);
+ for (i = 1; i < modifiers_len; i++) {
+ if (modifiers[i] != mod0) {
+ DRM_ERROR("bad fmt mod 0x%llX on plane %d\n",
+ modifiers[i], i);
+ return NULL;
+ }
+ }
+ }
+
+ switch (mod0) {
+ case 0:
+ map = sde_format_map;
+ map_size = ARRAY_SIZE(sde_format_map);
+ break;
+ case DRM_FORMAT_MOD_QCOM_COMPRESSED:
+ map = sde_format_map_ubwc;
+ map_size = ARRAY_SIZE(sde_format_map_ubwc);
+ DBG("found fmt 0x%X DRM_FORMAT_MOD_QCOM_COMPRESSED", format);
+ break;
+ default:
+ DRM_ERROR("unsupported format modifier %llX\n", mod0);
+ return NULL;
+ }
+
+ for (i = 0; i < map_size; i++) {
+ if (format == map[i].base.pixel_format) {
+ fmt = &map[i];
+ break;
+ }
+ }
+
+ if (fmt == NULL)
+ DRM_ERROR("unsupported fmt 0x%X modifier 0x%llX\n",
+ format, mod0);
+ else
+ DBG("fmt %s mod 0x%llX ubwc %d yuv %d",
+ drm_get_format_name(format), mod0,
+ SDE_FORMAT_IS_UBWC(fmt),
+ SDE_FORMAT_IS_YUV(fmt));
+
+ return fmt;
+}
+
+const struct msm_format *sde_get_msm_format(
+ struct msm_kms *kms,
+ const uint32_t format,
+ const uint64_t *modifiers,
+ const uint32_t modifiers_len)
+{
+ const struct sde_format *fmt = sde_get_sde_format_ext(format,
+ modifiers, modifiers_len);
+ if (fmt)
+ return &fmt->base;
+ return NULL;
+}
+
+uint32_t sde_populate_formats(
+ const struct sde_format_extended *format_list,
+ uint32_t *pixel_formats,
+ uint64_t *pixel_modifiers,
+ uint32_t pixel_formats_max)
+{
+ uint32_t i, fourcc_format;
+
+ if (!format_list || !pixel_formats)
+ return 0;
+
+ for (i = 0, fourcc_format = 0;
+ format_list->fourcc_format && i < pixel_formats_max;
+ ++format_list) {
+ /* verify if listed format is in sde_format_map? */
+
+ /* optionally return modified formats */
+ if (pixel_modifiers) {
+ /* assume same modifier for all fb planes */
+ pixel_formats[i] = format_list->fourcc_format;
+ pixel_modifiers[i++] = format_list->modifier;
+ } else {
+ /* assume base formats grouped together */
+ if (fourcc_format != format_list->fourcc_format) {
+ fourcc_format = format_list->fourcc_format;
+ pixel_formats[i++] = fourcc_format;
+ }
+ }
+ }
+
+ return i;
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_formats.h b/drivers/gpu/drm/msm/sde/sde_formats.h
new file mode 100644
index 000000000000..5dcdfbb653ed
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_formats.h
@@ -0,0 +1,107 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_FORMATS_H
+#define _SDE_FORMATS_H
+
+#include <drm/drm_fourcc.h>
+#include "sde_hw_mdss.h"
+
+/**
+ * sde_get_sde_format_ext() - Returns sde format structure pointer.
+ * @format: DRM FourCC Code
+ * @modifiers: format modifier array from client, one per plane
+ * @modifiers_len: number of planes and array size for plane_modifiers
+ */
+const struct sde_format *sde_get_sde_format_ext(
+ const uint32_t format,
+ const uint64_t *modifiers,
+ const uint32_t modifiers_len);
+
+#define sde_get_sde_format(f) sde_get_sde_format_ext(f, NULL, 0)
+
+/**
+ * sde_get_msm_format - get an sde_format by its msm_format base
+ * callback function registers with the msm_kms layer
+ * @kms: kms driver
+ * @format: DRM FourCC Code
+ * @modifiers: format modifier array from client, one per plane
+ * @modifiers_len: number of planes and array size for plane_modifiers
+ */
+const struct msm_format *sde_get_msm_format(
+ struct msm_kms *kms,
+ const uint32_t format,
+ const uint64_t *modifiers,
+ const uint32_t modifiers_len);
+
+/**
+ * sde_populate_formats - populate the given array with fourcc codes supported
+ * @format_list: pointer to list of possible formats
+ * @pixel_formats: array to populate with fourcc codes
+ * @pixel_modifiers: array to populate with drm modifiers, can be NULL
+ * @pixel_formats_max: length of pixel formats array
+ * Return: number of elements populated
+ */
+uint32_t sde_populate_formats(
+ const struct sde_format_extended *format_list,
+ uint32_t *pixel_formats,
+ uint64_t *pixel_modifiers,
+ uint32_t pixel_formats_max);
+
+/**
+ * sde_format_check_modified_format - validate format and buffers for
+ * sde non-standard, i.e. modified format
+ * @kms: kms driver
+ * @msm_fmt: pointer to the msm_fmt base pointer of an sde_format
+ * @cmd: fb_cmd2 structure user request
+ * @bos: gem buffer object list
+ *
+ * Return: error code on failure, 0 on success
+ */
+int sde_format_check_modified_format(
+ const struct msm_kms *kms,
+ const struct msm_format *msm_fmt,
+ const struct drm_mode_fb_cmd2 *cmd,
+ struct drm_gem_object **bos);
+
+/**
+ * sde_format_populate_layout - populate the given format layout based on
+ * mmu, fb, and format found in the fb
+ * @mmu_id: mmu id handle
+ * @fb: framebuffer pointer
+ * @fmtl: format layout structure to populate
+ *
+ * Return: error code on failure, -EAGAIN if success but the addresses
+ * are the same as before or 0 if new addresses were populated
+ */
+int sde_format_populate_layout(
+ int mmu_id,
+ struct drm_framebuffer *fb,
+ struct sde_hw_fmt_layout *fmtl);
+
+/**
+ * sde_format_populate_layout_with_roi - populate the given format layout
+ * based on mmu, fb, roi, and format found in the fb
+ * @mmu_id: mmu id handle
+ * @fb: framebuffer pointer
+ * @roi: region of interest (optional)
+ * @fmtl: format layout structure to populate
+ *
+ * Return: error code on failure, 0 on success
+ */
+int sde_format_populate_layout_with_roi(
+ int mmu_id,
+ struct drm_framebuffer *fb,
+ struct sde_rect *roi,
+ struct sde_hw_fmt_layout *fmtl);
+
+#endif /*_SDE_FORMATS_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
new file mode 100644
index 000000000000..31a6d985c38f
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
@@ -0,0 +1,1998 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+#include <linux/slab.h>
+#include <linux/of_address.h>
+
+#include "sde_hw_mdss.h"
+#include "sde_hw_catalog.h"
+#include "sde_hw_catalog_format.h"
+#include "sde_kms.h"
+
+/*************************************************************
+ * MACRO DEFINITION
+ *************************************************************/
+
+/**
+ * Max hardware block in certain hardware. For ex: sspp pipes
+ * can have QSEED, pcc, igc, pa, csc, etc. This count is max
+ * 12 based on software design. It should be increased if any of the
+ * hardware block has more subblocks.
+ */
+#define MAX_SDE_HW_BLK 12
+
+/* each entry will have register address and bit offset in that register */
+#define MAX_BIT_OFFSET 2
+
+/* default line width for sspp */
+#define DEFAULT_SDE_LINE_WIDTH 2048
+
+/* max mixer blend stages */
+#define DEFAULT_SDE_MIXER_BLENDSTAGES 7
+
+/* max bank bit for macro tile and ubwc format */
+#define DEFAULT_SDE_HIGHEST_BANK_BIT 15
+
+/* default hardware block size if dtsi entry is not present */
+#define DEFAULT_SDE_HW_BLOCK_LEN 0x100
+
+/* default rects for multi rect case */
+#define DEFAULT_SDE_SSPP_MAX_RECTS 1
+
+/* total number of intf - dp, dsi, hdmi */
+#define INTF_COUNT 3
+
+#define MAX_SSPP_UPSCALE 20
+#define MAX_SSPP_DOWNSCALE 4
+#define SSPP_UNITY_SCALE 1
+
+#define MAX_HORZ_DECIMATION 4
+#define MAX_VERT_DECIMATION 4
+
+#define MAX_SPLIT_DISPLAY_CTL 2
+#define MAX_PP_SPLIT_DISPLAY_CTL 1
+
+#define MDSS_BASE_OFFSET 0x0
+
+#define ROT_LM_OFFSET 3
+#define LINE_LM_OFFSET 5
+#define LINE_MODE_WB_OFFSET 2
+
+/* maximum XIN halt timeout in usec */
+#define VBIF_XIN_HALT_TIMEOUT 0x4000
+
+#define DEFAULT_CREQ_LUT_NRT 0x0
+#define DEFAULT_PIXEL_RAM_SIZE (50 * 1024)
+
+/* access property value based on prop_type and hardware index */
+#define PROP_VALUE_ACCESS(p, i, j) ((p + i)->value[j])
+
+/*
+ * access element within PROP_TYPE_BIT_OFFSET_ARRAYs based on prop_type,
+ * hardware index and offset array index
+ */
+#define PROP_BITVALUE_ACCESS(p, i, j, k) ((p + i)->bit_value[j][k])
+
+/*************************************************************
+ * DTSI PROPERTY INDEX
+ *************************************************************/
+enum {
+ HW_OFF,
+ HW_LEN,
+ HW_PROP_MAX,
+};
+
+enum sde_prop {
+ SDE_OFF,
+ SDE_LEN,
+ SSPP_LINEWIDTH,
+ MIXER_LINEWIDTH,
+ MIXER_BLEND,
+ WB_LINEWIDTH,
+ BANK_BIT,
+ QSEED_TYPE,
+ CSC_TYPE,
+ PANIC_PER_PIPE,
+ CDP,
+ SRC_SPLIT,
+ SDE_PROP_MAX,
+};
+
+enum {
+ PERF_MAX_BW_LOW,
+ PERF_MAX_BW_HIGH,
+ PERF_PROP_MAX,
+};
+
+enum {
+ SSPP_OFF,
+ SSPP_SIZE,
+ SSPP_TYPE,
+ SSPP_XIN,
+ SSPP_CLK_CTRL,
+ SSPP_CLK_STATUS,
+ SSPP_DANGER,
+ SSPP_SAFE,
+ SSPP_MAX_RECTS,
+ SSPP_SCALE_SIZE,
+ SSPP_VIG_BLOCKS,
+ SSPP_RGB_BLOCKS,
+ SSPP_PROP_MAX,
+};
+
+enum {
+ VIG_QSEED_OFF,
+ VIG_CSC_OFF,
+ VIG_HSIC_PROP,
+ VIG_MEMCOLOR_PROP,
+ VIG_PCC_PROP,
+ VIG_PROP_MAX,
+};
+
+enum {
+ RGB_SCALER_OFF,
+ RGB_PCC_PROP,
+ RGB_PROP_MAX,
+};
+
+enum {
+ INTF_OFF,
+ INTF_LEN,
+ INTF_PREFETCH,
+ INTF_TYPE,
+ INTF_PROP_MAX,
+};
+
+enum {
+ PP_OFF,
+ PP_LEN,
+ TE_OFF,
+ TE_LEN,
+ TE2_OFF,
+ TE2_LEN,
+ DSC_OFF,
+ DSC_LEN,
+ PP_SLAVE,
+ PP_PROP_MAX,
+};
+
+enum {
+ DSPP_OFF,
+ DSPP_SIZE,
+ DSPP_BLOCKS,
+ DSPP_PROP_MAX,
+};
+
+enum {
+ DSPP_IGC_PROP,
+ DSPP_PCC_PROP,
+ DSPP_GC_PROP,
+ DSPP_HSIC_PROP,
+ DSPP_MEMCOLOR_PROP,
+ DSPP_SIXZONE_PROP,
+ DSPP_GAMUT_PROP,
+ DSPP_DITHER_PROP,
+ DSPP_HIST_PROP,
+ DSPP_VLUT_PROP,
+ DSPP_BLOCKS_PROP_MAX,
+};
+
+enum {
+ AD_OFF,
+ AD_VERSION,
+ AD_PROP_MAX,
+};
+
+enum {
+ MIXER_OFF,
+ MIXER_LEN,
+ MIXER_BLOCKS,
+ MIXER_PROP_MAX,
+};
+
+enum {
+ MIXER_GC_PROP,
+ MIXER_BLOCKS_PROP_MAX,
+};
+
+enum {
+ WB_OFF,
+ WB_LEN,
+ WB_ID,
+ WB_XIN_ID,
+ WB_CLK_CTRL,
+ WB_PROP_MAX,
+};
+
+enum {
+ VBIF_OFF,
+ VBIF_LEN,
+ VBIF_ID,
+ VBIF_DEFAULT_OT_RD_LIMIT,
+ VBIF_DEFAULT_OT_WR_LIMIT,
+ VBIF_DYNAMIC_OT_RD_LIMIT,
+ VBIF_DYNAMIC_OT_WR_LIMIT,
+ VBIF_PROP_MAX,
+};
+
+/*************************************************************
+ * dts property definition
+ *************************************************************/
+enum prop_type {
+ PROP_TYPE_BOOL,
+ PROP_TYPE_U32,
+ PROP_TYPE_U32_ARRAY,
+ PROP_TYPE_STRING,
+ PROP_TYPE_STRING_ARRAY,
+ PROP_TYPE_BIT_OFFSET_ARRAY,
+ PROP_TYPE_NODE,
+};
+
+struct sde_prop_type {
+ /* use property index from enum property for readability purpose */
+ u8 id;
+ /* it should be property name based on dtsi documentation */
+ char *prop_name;
+ /**
+ * if property is marked mandatory then it will fail parsing
+ * when property is not present
+ */
+ u32 is_mandatory;
+ /* property type based on "enum prop_type" */
+ enum prop_type type;
+};
+
+struct sde_prop_value {
+ u32 value[MAX_SDE_HW_BLK];
+ u32 bit_value[MAX_SDE_HW_BLK][MAX_BIT_OFFSET];
+};
+
+/*************************************************************
+ * dts property list
+ *************************************************************/
+static struct sde_prop_type sde_prop[] = {
+ {SDE_OFF, "qcom,sde-off", true, PROP_TYPE_U32},
+ {SDE_LEN, "qcom,sde-len", false, PROP_TYPE_U32},
+ {SSPP_LINEWIDTH, "qcom,sde-sspp-linewidth", false, PROP_TYPE_U32},
+ {MIXER_LINEWIDTH, "qcom,sde-mixer-linewidth", false, PROP_TYPE_U32},
+ {MIXER_BLEND, "qcom,sde-mixer-blendstages", false, PROP_TYPE_U32},
+ {WB_LINEWIDTH, "qcom,sde-wb-linewidth", false, PROP_TYPE_U32},
+ {BANK_BIT, "qcom,sde-highest-bank-bit", false, PROP_TYPE_U32},
+ {QSEED_TYPE, "qcom,sde-qseed-type", false, PROP_TYPE_STRING},
+ {CSC_TYPE, "qcom,sde-csc-type", false, PROP_TYPE_STRING},
+ {PANIC_PER_PIPE, "qcom,sde-panic-per-pipe", false, PROP_TYPE_BOOL},
+ {CDP, "qcom,sde-has-cdp", false, PROP_TYPE_BOOL},
+ {SRC_SPLIT, "qcom,sde-has-src-split", false, PROP_TYPE_BOOL},
+};
+
+static struct sde_prop_type sde_perf_prop[] = {
+ {PERF_MAX_BW_LOW, "qcom,sde-max-bw-low-kbps", false, PROP_TYPE_U32},
+ {PERF_MAX_BW_HIGH, "qcom,sde-max-bw-high-kbps", false, PROP_TYPE_U32},
+};
+
+static struct sde_prop_type sspp_prop[] = {
+ {SSPP_OFF, "qcom,sde-sspp-off", true, PROP_TYPE_U32_ARRAY},
+ {SSPP_SIZE, "qcom,sde-sspp-src-size", false, PROP_TYPE_U32},
+ {SSPP_TYPE, "qcom,sde-sspp-type", true, PROP_TYPE_STRING_ARRAY},
+ {SSPP_XIN, "qcom,sde-sspp-xin-id", true, PROP_TYPE_U32_ARRAY},
+ {SSPP_CLK_CTRL, "qcom,sde-sspp-clk-ctrl", false,
+ PROP_TYPE_BIT_OFFSET_ARRAY},
+ {SSPP_CLK_STATUS, "qcom,sde-sspp-clk-status", false,
+ PROP_TYPE_BIT_OFFSET_ARRAY},
+ {SSPP_DANGER, "qcom,sde-sspp-danger-lut", false, PROP_TYPE_U32_ARRAY},
+ {SSPP_SAFE, "qcom,sde-sspp-safe-lut", false, PROP_TYPE_U32_ARRAY},
+ {SSPP_MAX_RECTS, "qcom,sde-sspp-max-rects", false, PROP_TYPE_U32_ARRAY},
+ {SSPP_SCALE_SIZE, "qcom,sde-sspp-scale-size", false, PROP_TYPE_U32},
+ {SSPP_VIG_BLOCKS, "qcom,sde-sspp-vig-blocks", false, PROP_TYPE_NODE},
+ {SSPP_RGB_BLOCKS, "qcom,sde-sspp-rgb-blocks", false, PROP_TYPE_NODE},
+};
+
+static struct sde_prop_type vig_prop[] = {
+ {VIG_QSEED_OFF, "qcom,sde-vig-qseed-off", false, PROP_TYPE_U32},
+ {VIG_CSC_OFF, "qcom,sde-vig-csc-off", false, PROP_TYPE_U32},
+ {VIG_HSIC_PROP, "qcom,sde-vig-hsic", false, PROP_TYPE_U32_ARRAY},
+ {VIG_MEMCOLOR_PROP, "qcom,sde-vig-memcolor", false,
+ PROP_TYPE_U32_ARRAY},
+ {VIG_PCC_PROP, "qcom,sde-vig-pcc", false, PROP_TYPE_U32_ARRAY},
+};
+
+static struct sde_prop_type rgb_prop[] = {
+ {RGB_SCALER_OFF, "qcom,sde-rgb-scaler-off", false, PROP_TYPE_U32},
+ {RGB_PCC_PROP, "qcom,sde-rgb-pcc", false, PROP_TYPE_U32_ARRAY},
+};
+
+static struct sde_prop_type ctl_prop[] = {
+ {HW_OFF, "qcom,sde-ctl-off", true, PROP_TYPE_U32_ARRAY},
+ {HW_LEN, "qcom,sde-ctl-size", false, PROP_TYPE_U32},
+};
+
+static struct sde_prop_type mixer_prop[] = {
+ {MIXER_OFF, "qcom,sde-mixer-off", true, PROP_TYPE_U32_ARRAY},
+ {MIXER_LEN, "qcom,sde-mixer-size", false, PROP_TYPE_U32},
+ {MIXER_BLOCKS, "qcom,sde-mixer-blocks", false, PROP_TYPE_NODE},
+};
+
+static struct sde_prop_type mixer_blocks_prop[] = {
+ {MIXER_GC_PROP, "qcom,sde-mixer-gc", false, PROP_TYPE_U32_ARRAY},
+};
+
+static struct sde_prop_type dspp_prop[] = {
+ {DSPP_OFF, "qcom,sde-dspp-off", true, PROP_TYPE_U32_ARRAY},
+ {DSPP_SIZE, "qcom,sde-dspp-size", false, PROP_TYPE_U32},
+ {DSPP_BLOCKS, "qcom,sde-dspp-blocks", false, PROP_TYPE_NODE},
+};
+
+static struct sde_prop_type dspp_blocks_prop[] = {
+ {DSPP_IGC_PROP, "qcom,sde-dspp-igc", false, PROP_TYPE_U32_ARRAY},
+ {DSPP_PCC_PROP, "qcom,sde-dspp-pcc", false, PROP_TYPE_U32_ARRAY},
+ {DSPP_GC_PROP, "qcom,sde-dspp-gc", false, PROP_TYPE_U32_ARRAY},
+ {DSPP_HSIC_PROP, "qcom,sde-dspp-hsic", false, PROP_TYPE_U32_ARRAY},
+ {DSPP_MEMCOLOR_PROP, "qcom,sde-dspp-memcolor", false,
+ PROP_TYPE_U32_ARRAY},
+ {DSPP_SIXZONE_PROP, "qcom,sde-dspp-sixzone", false,
+ PROP_TYPE_U32_ARRAY},
+ {DSPP_GAMUT_PROP, "qcom,sde-dspp-gamut", false, PROP_TYPE_U32_ARRAY},
+ {DSPP_DITHER_PROP, "qcom,sde-dspp-dither", false, PROP_TYPE_U32_ARRAY},
+ {DSPP_HIST_PROP, "qcom,sde-dspp-hist", false, PROP_TYPE_U32_ARRAY},
+ {DSPP_VLUT_PROP, "qcom,sde-dspp-vlut", false, PROP_TYPE_U32_ARRAY},
+};
+
+static struct sde_prop_type ad_prop[] = {
+ {AD_OFF, "qcom,sde-dspp-ad-off", false, PROP_TYPE_U32_ARRAY},
+ {AD_VERSION, "qcom,sde-dspp-ad-version", false, PROP_TYPE_U32},
+};
+
+static struct sde_prop_type pp_prop[] = {
+ {PP_OFF, "qcom,sde-pp-off", true, PROP_TYPE_U32_ARRAY},
+ {PP_LEN, "qcom,sde-pp-size", false, PROP_TYPE_U32},
+ {TE_OFF, "qcom,sde-te-off", false, PROP_TYPE_U32_ARRAY},
+ {TE_LEN, "qcom,sde-te-size", false, PROP_TYPE_U32},
+ {TE2_OFF, "qcom,sde-te2-off", false, PROP_TYPE_U32_ARRAY},
+ {TE2_LEN, "qcom,sde-te2-size", false, PROP_TYPE_U32},
+ {DSC_OFF, "qcom,sde-dsc-off", false, PROP_TYPE_U32_ARRAY},
+ {DSC_LEN, "qcom,sde-dsc-size", false, PROP_TYPE_U32},
+ {PP_SLAVE, "qcom,sde-pp-slave", false, PROP_TYPE_U32_ARRAY},
+};
+
+static struct sde_prop_type cdm_prop[] = {
+ {HW_OFF, "qcom,sde-cdm-off", false, PROP_TYPE_U32_ARRAY},
+ {HW_LEN, "qcom,sde-cdm-size", false, PROP_TYPE_U32},
+};
+
+static struct sde_prop_type intf_prop[] = {
+ {INTF_OFF, "qcom,sde-intf-off", true, PROP_TYPE_U32_ARRAY},
+ {INTF_LEN, "qcom,sde-intf-size", false, PROP_TYPE_U32},
+ {INTF_PREFETCH, "qcom,sde-intf-max-prefetch-lines", false,
+ PROP_TYPE_U32_ARRAY},
+ {INTF_TYPE, "qcom,sde-intf-type", false, PROP_TYPE_STRING_ARRAY},
+};
+
+static struct sde_prop_type wb_prop[] = {
+ {WB_OFF, "qcom,sde-wb-off", true, PROP_TYPE_U32_ARRAY},
+ {WB_LEN, "qcom,sde-wb-size", false, PROP_TYPE_U32},
+ {WB_ID, "qcom,sde-wb-id", true, PROP_TYPE_U32_ARRAY},
+ {WB_XIN_ID, "qcom,sde-wb-xin-id", false, PROP_TYPE_U32_ARRAY},
+ {WB_CLK_CTRL, "qcom,sde-wb-clk-ctrl", false,
+ PROP_TYPE_BIT_OFFSET_ARRAY},
+};
+
+static struct sde_prop_type vbif_prop[] = {
+ {VBIF_OFF, "qcom,sde-vbif-off", true, PROP_TYPE_U32_ARRAY},
+ {VBIF_LEN, "qcom,sde-vbif-size", false, PROP_TYPE_U32},
+ {VBIF_ID, "qcom,sde-vbif-id", false, PROP_TYPE_U32_ARRAY},
+ {VBIF_DEFAULT_OT_RD_LIMIT, "qcom,sde-vbif-default-ot-rd-limit", false,
+ PROP_TYPE_U32},
+ {VBIF_DEFAULT_OT_WR_LIMIT, "qcom,sde-vbif-default-ot-wr-limit", false,
+ PROP_TYPE_U32},
+ {VBIF_DYNAMIC_OT_RD_LIMIT, "qcom,sde-vbif-dynamic-ot-rd-limit", false,
+ PROP_TYPE_U32_ARRAY},
+ {VBIF_DYNAMIC_OT_WR_LIMIT, "qcom,sde-vbif-dynamic-ot-wr-limit", false,
+ PROP_TYPE_U32_ARRAY},
+};
+
+/*************************************************************
+ * static API list
+ *************************************************************/
+static int _parse_dt_u32_handler(struct device_node *np,
+ char *prop_name, u32 *offsets, int len, bool mandatory)
+{
+ int rc = of_property_read_u32_array(np, prop_name, offsets, len);
+
+ if (rc && mandatory)
+ SDE_ERROR("mandatory prop: %s u32 array read len:%d\n",
+ prop_name, len);
+ else if (rc)
+ SDE_DEBUG("optional prop: %s u32 array read len:%d\n",
+ prop_name, len);
+
+ return rc;
+}
+
+static int _parse_dt_bit_offset(struct device_node *np,
+ char *prop_name, struct sde_prop_value *prop_value, u32 prop_index,
+ u32 count, bool mandatory)
+{
+ int rc = 0, len, i, j;
+ const u32 *arr;
+
+ arr = of_get_property(np, prop_name, &len);
+ if (arr) {
+ len /= sizeof(u32);
+ len &= ~0x1;
+ for (i = 0, j = 0; i < len; j++) {
+ PROP_BITVALUE_ACCESS(prop_value, prop_index, j, 0) =
+ be32_to_cpu(arr[i]);
+ i++;
+ PROP_BITVALUE_ACCESS(prop_value, prop_index, j, 1) =
+ be32_to_cpu(arr[i]);
+ i++;
+ }
+ } else {
+ if (mandatory) {
+ SDE_ERROR("error mandatory property '%s' not found\n",
+ prop_name);
+ rc = -EINVAL;
+ } else {
+ SDE_DEBUG("error optional property '%s' not found\n",
+ prop_name);
+ }
+ }
+
+ return rc;
+}
+
+static int _validate_dt_entry(struct device_node *np,
+ struct sde_prop_type *sde_prop, u32 prop_size, int *prop_count,
+ int *off_count)
+{
+ int rc = 0, i, val;
+ struct device_node *snp = NULL;
+
+ if (off_count) {
+ *off_count = of_property_count_u32_elems(np,
+ sde_prop[0].prop_name);
+ if ((*off_count > MAX_BLOCKS) || (*off_count < 0)) {
+ if (sde_prop[0].is_mandatory) {
+ SDE_ERROR("invalid hw offset prop name:%s\"\
+ count: %d\n",
+ sde_prop[0].prop_name, *off_count);
+ rc = -EINVAL;
+ }
+ *off_count = 0;
+ return rc;
+ }
+ }
+
+ for (i = 0; i < prop_size; i++) {
+ switch (sde_prop[i].type) {
+ case PROP_TYPE_U32:
+ rc = of_property_read_u32(np, sde_prop[i].prop_name,
+ &val);
+ break;
+ case PROP_TYPE_U32_ARRAY:
+ prop_count[i] = of_property_count_u32_elems(np,
+ sde_prop[i].prop_name);
+ if (prop_count[i] < 0)
+ rc = prop_count[i];
+ break;
+ case PROP_TYPE_STRING_ARRAY:
+ prop_count[i] = of_property_count_strings(np,
+ sde_prop[i].prop_name);
+ if (prop_count[i] < 0)
+ rc = prop_count[i];
+ break;
+ case PROP_TYPE_BIT_OFFSET_ARRAY:
+ of_get_property(np, sde_prop[i].prop_name, &val);
+ prop_count[i] = val / (MAX_BIT_OFFSET * sizeof(u32));
+ break;
+ case PROP_TYPE_NODE:
+ snp = of_get_child_by_name(np,
+ sde_prop[i].prop_name);
+ if (!snp)
+ rc = -EINVAL;
+ break;
+ default:
+ SDE_DEBUG("invalid property type:%d\n",
+ sde_prop[i].type);
+ break;
+ }
+ SDE_DEBUG("prop id:%d prop name:%s prop type:%d \"\
+ prop_count:%d\n", i, sde_prop[i].prop_name,
+ sde_prop[i].type, prop_count[i]);
+
+ if (rc && sde_prop[i].is_mandatory &&
+ ((sde_prop[i].type == PROP_TYPE_U32) ||
+ (sde_prop[i].type == PROP_TYPE_NODE))) {
+ SDE_ERROR("prop:%s not present\n",
+ sde_prop[i].prop_name);
+ goto end;
+ } else if (sde_prop[i].type == PROP_TYPE_U32 ||
+ sde_prop[i].type == PROP_TYPE_BOOL ||
+ sde_prop[i].type == PROP_TYPE_NODE) {
+ rc = 0;
+ continue;
+ }
+
+ if (off_count && (prop_count[i] != *off_count) &&
+ sde_prop[i].is_mandatory) {
+ SDE_ERROR("prop:%s count:%d is different compared to \"\
+ offset array:%d\n", sde_prop[i].prop_name,
+ prop_count[i], *off_count);
+ rc = -EINVAL;
+ goto end;
+ } else if (off_count && prop_count[i] != *off_count) {
+ SDE_DEBUG("prop:%s count:%d is different compared to \"\
+ offset array:%d\n", sde_prop[i].prop_name,
+ prop_count[i], *off_count);
+ rc = 0;
+ prop_count[i] = 0;
+ }
+ if (!off_count && prop_count[i] < 0) {
+ prop_count[i] = 0;
+ if (sde_prop[i].is_mandatory) {
+ SDE_ERROR("prop:%s count:%d is negative\n",
+ sde_prop[i].prop_name, prop_count[i]);
+ rc = -EINVAL;
+ } else {
+ rc = 0;
+ SDE_DEBUG("prop:%s count:%d is negative\n",
+ sde_prop[i].prop_name, prop_count[i]);
+ }
+ }
+ }
+
+end:
+ return rc;
+}
+
+static int _read_dt_entry(struct device_node *np,
+ struct sde_prop_type *sde_prop, u32 prop_size, int *prop_count,
+ bool *prop_exists,
+ struct sde_prop_value *prop_value)
+{
+ int rc = 0, i, j;
+
+ for (i = 0; i < prop_size; i++) {
+ prop_exists[i] = true;
+ switch (sde_prop[i].type) {
+ case PROP_TYPE_U32:
+ rc = of_property_read_u32(np, sde_prop[i].prop_name,
+ &PROP_VALUE_ACCESS(prop_value, i, 0));
+ SDE_DEBUG("prop id:%d prop name:%s prop type:%d \"\
+ value:0x%x\n", i, sde_prop[i].prop_name,
+ sde_prop[i].type,
+ PROP_VALUE_ACCESS(prop_value, i, 0));
+ if (rc)
+ prop_exists[i] = false;
+ break;
+ case PROP_TYPE_BOOL:
+ PROP_VALUE_ACCESS(prop_value, i, 0) =
+ of_property_read_bool(np,
+ sde_prop[i].prop_name);
+ SDE_DEBUG("prop id:%d prop name:%s prop type:%d \"\
+ value:0x%x\n", i, sde_prop[i].prop_name,
+ sde_prop[i].type,
+ PROP_VALUE_ACCESS(prop_value, i, 0));
+ break;
+ case PROP_TYPE_U32_ARRAY:
+ rc = _parse_dt_u32_handler(np, sde_prop[i].prop_name,
+ &PROP_VALUE_ACCESS(prop_value, i, 0),
+ prop_count[i], sde_prop[i].is_mandatory);
+ if (rc && sde_prop[i].is_mandatory) {
+ SDE_ERROR("%s prop validation success but \"\
+ read failed\n", sde_prop[i].prop_name);
+ prop_exists[i] = false;
+ goto end;
+ } else {
+ if (rc)
+ prop_exists[i] = false;
+ /* only for debug purpose */
+ SDE_DEBUG("prop id:%d prop name:%s prop \"\
+ type:%d", i, sde_prop[i].prop_name,
+ sde_prop[i].type);
+ for (j = 0; j < prop_count[i]; j++)
+ SDE_DEBUG(" value[%d]:0x%x ", j,
+ PROP_VALUE_ACCESS(prop_value, i,
+ j));
+ SDE_DEBUG("\n");
+ }
+ break;
+ case PROP_TYPE_BIT_OFFSET_ARRAY:
+ rc = _parse_dt_bit_offset(np, sde_prop[i].prop_name,
+ prop_value, i, prop_count[i],
+ sde_prop[i].is_mandatory);
+ if (rc && sde_prop[i].is_mandatory) {
+ SDE_ERROR("%s prop validation success but \"\
+ read failed\n", sde_prop[i].prop_name);
+ prop_exists[i] = false;
+ goto end;
+ } else {
+ if (rc)
+ prop_exists[i] = false;
+ SDE_DEBUG("prop id:%d prop name:%s prop \"\
+ type:%d", i, sde_prop[i].prop_name,
+ sde_prop[i].type);
+ for (j = 0; j < prop_count[i]; j++)
+ SDE_DEBUG(" count[%d]: bit:0x%x \"\
+ off:0x%x \n", j,
+ PROP_BITVALUE_ACCESS(prop_value,
+ i, j, 0),
+ PROP_BITVALUE_ACCESS(prop_value,
+ i, j, 1));
+ SDE_DEBUG("\n");
+ }
+ break;
+ case PROP_TYPE_NODE:
+ /* Node will be parsed in calling function */
+ rc = 0;
+ break;
+ default:
+ SDE_DEBUG("invalid property type:%d\n",
+ sde_prop[i].type);
+ break;
+ }
+ rc = 0;
+ }
+
+end:
+ return rc;
+}
+
+static void _sde_sspp_setup_vig(struct sde_mdss_cfg *sde_cfg,
+ struct sde_sspp_cfg *sspp, struct sde_sspp_sub_blks *sblk,
+ bool *prop_exists, struct sde_prop_value *prop_value, u32 *vig_count)
+{
+ sblk->maxupscale = MAX_SSPP_UPSCALE;
+ sblk->maxdwnscale = MAX_SSPP_DOWNSCALE;
+ sspp->id = SSPP_VIG0 + *vig_count;
+ sspp->clk_ctrl = SDE_CLK_CTRL_VIG0 + *vig_count;
+ sblk->format_list = plane_formats_yuv;
+ set_bit(SDE_SSPP_QOS, &sspp->features);
+ (*vig_count)++;
+
+ if (!prop_value)
+ return;
+
+ if (sde_cfg->qseed_type == SDE_SSPP_SCALER_QSEED2) {
+ set_bit(SDE_SSPP_SCALER_QSEED2, &sspp->features);
+ sblk->scaler_blk.id = SDE_SSPP_SCALER_QSEED2;
+ sblk->scaler_blk.base = PROP_VALUE_ACCESS(prop_value,
+ VIG_QSEED_OFF, 0);
+ } else if (sde_cfg->qseed_type == SDE_SSPP_SCALER_QSEED3) {
+ set_bit(SDE_SSPP_SCALER_QSEED3, &sspp->features);
+ sblk->scaler_blk.id = SDE_SSPP_SCALER_QSEED3;
+ sblk->scaler_blk.base = PROP_VALUE_ACCESS(prop_value,
+ VIG_QSEED_OFF, 0);
+ }
+
+ sblk->csc_blk.id = SDE_SSPP_CSC;
+ if (sde_cfg->csc_type == SDE_SSPP_CSC) {
+ set_bit(SDE_SSPP_CSC, &sspp->features);
+ sblk->csc_blk.base = PROP_VALUE_ACCESS(prop_value,
+ VIG_CSC_OFF, 0);
+ } else if (sde_cfg->csc_type == SDE_SSPP_CSC_10BIT) {
+ set_bit(SDE_SSPP_CSC_10BIT, &sspp->features);
+ sblk->csc_blk.base = PROP_VALUE_ACCESS(prop_value,
+ VIG_CSC_OFF, 0);
+ }
+
+ sblk->hsic_blk.id = SDE_SSPP_HSIC;
+ if (prop_exists[VIG_HSIC_PROP]) {
+ sblk->hsic_blk.base = PROP_VALUE_ACCESS(prop_value,
+ VIG_HSIC_PROP, 0);
+ sblk->hsic_blk.version = PROP_VALUE_ACCESS(prop_value,
+ VIG_HSIC_PROP, 1);
+ sblk->hsic_blk.len = 0;
+ set_bit(SDE_SSPP_HSIC, &sspp->features);
+ }
+
+ sblk->memcolor_blk.id = SDE_SSPP_MEMCOLOR;
+ if (prop_exists[VIG_MEMCOLOR_PROP]) {
+ sblk->memcolor_blk.base = PROP_VALUE_ACCESS(prop_value,
+ VIG_MEMCOLOR_PROP, 0);
+ sblk->memcolor_blk.version = PROP_VALUE_ACCESS(prop_value,
+ VIG_MEMCOLOR_PROP, 1);
+ sblk->memcolor_blk.len = 0;
+ set_bit(SDE_SSPP_MEMCOLOR, &sspp->features);
+ }
+
+ sblk->pcc_blk.id = SDE_SSPP_PCC;
+ if (prop_exists[VIG_PCC_PROP]) {
+ sblk->pcc_blk.base = PROP_VALUE_ACCESS(prop_value,
+ VIG_PCC_PROP, 0);
+ sblk->pcc_blk.version = PROP_VALUE_ACCESS(prop_value,
+ VIG_PCC_PROP, 1);
+ sblk->pcc_blk.len = 0;
+ set_bit(SDE_SSPP_PCC, &sspp->features);
+ }
+}
+
+static void _sde_sspp_setup_rgb(struct sde_mdss_cfg *sde_cfg,
+ struct sde_sspp_cfg *sspp, struct sde_sspp_sub_blks *sblk,
+ bool *prop_exists, struct sde_prop_value *prop_value, u32 *rgb_count)
+{
+ sblk->maxupscale = MAX_SSPP_UPSCALE;
+ sblk->maxdwnscale = MAX_SSPP_DOWNSCALE;
+ sspp->id = SSPP_RGB0 + *rgb_count;
+ sspp->clk_ctrl = SDE_CLK_CTRL_RGB0 + *rgb_count;
+ sblk->format_list = plane_formats;
+ set_bit(SDE_SSPP_QOS, &sspp->features);
+ (*rgb_count)++;
+
+ if (!prop_value)
+ return;
+
+ if (sde_cfg->qseed_type == SDE_SSPP_SCALER_QSEED2) {
+ set_bit(SDE_SSPP_SCALER_RGB, &sspp->features);
+ sblk->scaler_blk.id = SDE_SSPP_SCALER_QSEED2;
+ sblk->scaler_blk.base = PROP_VALUE_ACCESS(prop_value,
+ RGB_SCALER_OFF, 0);
+ } else if (sde_cfg->qseed_type == SDE_SSPP_SCALER_QSEED3) {
+ set_bit(SDE_SSPP_SCALER_RGB, &sspp->features);
+ sblk->scaler_blk.id = SDE_SSPP_SCALER_QSEED3;
+ sblk->scaler_blk.base = PROP_VALUE_ACCESS(prop_value,
+ RGB_SCALER_OFF, 0);
+ }
+
+ sblk->pcc_blk.id = SDE_SSPP_PCC;
+ if (prop_exists[RGB_PCC_PROP]) {
+ sblk->pcc_blk.base = PROP_VALUE_ACCESS(prop_value,
+ RGB_PCC_PROP, 0);
+ sblk->pcc_blk.version = PROP_VALUE_ACCESS(prop_value,
+ RGB_PCC_PROP, 1);
+ sblk->pcc_blk.len = 0;
+ set_bit(SDE_SSPP_PCC, &sspp->features);
+ }
+}
+
+static void _sde_sspp_setup_cursor(struct sde_mdss_cfg *sde_cfg,
+ struct sde_sspp_cfg *sspp, struct sde_sspp_sub_blks *sblk,
+ struct sde_prop_value *prop_value, u32 *cursor_count)
+{
+ set_bit(SDE_SSPP_CURSOR, &sspp->features);
+ sblk->maxupscale = SSPP_UNITY_SCALE;
+ sblk->maxdwnscale = SSPP_UNITY_SCALE;
+ sspp->id = SSPP_CURSOR0 + *cursor_count;
+ sspp->clk_ctrl = SDE_CLK_CTRL_CURSOR0 + *cursor_count;
+ sblk->format_list = plane_formats;
+ (*cursor_count)++;
+}
+
+static void _sde_sspp_setup_dma(struct sde_mdss_cfg *sde_cfg,
+ struct sde_sspp_cfg *sspp, struct sde_sspp_sub_blks *sblk,
+ struct sde_prop_value *prop_value, u32 *dma_count)
+{
+ sblk->maxupscale = SSPP_UNITY_SCALE;
+ sblk->maxdwnscale = SSPP_UNITY_SCALE;
+ sspp->id = SSPP_DMA0 + *dma_count;
+ sspp->clk_ctrl = SDE_CLK_CTRL_DMA0 + *dma_count;
+ sblk->format_list = plane_formats;
+ set_bit(SDE_SSPP_QOS, &sspp->features);
+ (*dma_count)++;
+}
+
+static int sde_sspp_parse_dt(struct device_node *np,
+ struct sde_mdss_cfg *sde_cfg)
+{
+ int rc, prop_count[SSPP_PROP_MAX], off_count, i, j;
+ int vig_prop_count[VIG_PROP_MAX], rgb_prop_count[RGB_PROP_MAX];
+ bool prop_exists[SSPP_PROP_MAX], vig_prop_exists[VIG_PROP_MAX];
+ bool rgb_prop_exists[RGB_PROP_MAX];
+ struct sde_prop_value *prop_value = NULL;
+ struct sde_prop_value *vig_prop_value = NULL, *rgb_prop_value = NULL;
+ const char *type;
+ struct sde_sspp_cfg *sspp;
+ struct sde_sspp_sub_blks *sblk;
+ u32 vig_count = 0, dma_count = 0, rgb_count = 0, cursor_count = 0;
+ u32 danger_count = 0, safe_count = 0;
+ struct device_node *snp = NULL;
+
+ prop_value = kzalloc(SSPP_PROP_MAX *
+ sizeof(struct sde_prop_value), GFP_KERNEL);
+ if (!prop_value) {
+ rc = -ENOMEM;
+ goto end;
+ }
+
+ rc = _validate_dt_entry(np, sspp_prop, ARRAY_SIZE(sspp_prop),
+ prop_count, &off_count);
+ if (rc)
+ goto end;
+
+ rc = _validate_dt_entry(np, &sspp_prop[SSPP_DANGER], 1,
+ &prop_count[SSPP_DANGER], &danger_count);
+ if (rc)
+ goto end;
+
+ rc = _validate_dt_entry(np, &sspp_prop[SSPP_SAFE], 1,
+ &prop_count[SSPP_SAFE], &safe_count);
+ if (rc)
+ goto end;
+
+ rc = _read_dt_entry(np, sspp_prop, ARRAY_SIZE(sspp_prop), prop_count,
+ prop_exists, prop_value);
+ if (rc)
+ goto end;
+
+ sde_cfg->sspp_count = off_count;
+
+ /* get vig feature dt properties if they exist */
+ snp = of_get_child_by_name(np, sspp_prop[SSPP_VIG_BLOCKS].prop_name);
+ if (snp) {
+ vig_prop_value = kzalloc(VIG_PROP_MAX *
+ sizeof(struct sde_prop_value), GFP_KERNEL);
+ if (!vig_prop_value) {
+ rc = -ENOMEM;
+ goto end;
+ }
+ rc = _validate_dt_entry(snp, vig_prop, ARRAY_SIZE(vig_prop),
+ vig_prop_count, NULL);
+ if (rc)
+ goto end;
+ rc = _read_dt_entry(snp, vig_prop, ARRAY_SIZE(vig_prop),
+ vig_prop_count, vig_prop_exists,
+ vig_prop_value);
+ }
+
+ /* get rgb feature dt properties if they exist */
+ snp = of_get_child_by_name(np, sspp_prop[SSPP_RGB_BLOCKS].prop_name);
+ if (snp) {
+ rgb_prop_value = kzalloc(RGB_PROP_MAX *
+ sizeof(struct sde_prop_value),
+ GFP_KERNEL);
+ if (!rgb_prop_value) {
+ rc = -ENOMEM;
+ goto end;
+ }
+ rc = _validate_dt_entry(snp, rgb_prop, ARRAY_SIZE(rgb_prop),
+ rgb_prop_count, NULL);
+ if (rc)
+ goto end;
+ rc = _read_dt_entry(snp, rgb_prop, ARRAY_SIZE(rgb_prop),
+ rgb_prop_count, rgb_prop_exists,
+ rgb_prop_value);
+ }
+
+ for (i = 0; i < off_count; i++) {
+ sspp = sde_cfg->sspp + i;
+ sblk = kzalloc(sizeof(*sblk), GFP_KERNEL);
+ if (!sblk) {
+ rc = -ENOMEM;
+ /* catalog deinit will release the allocated blocks */
+ goto end;
+ }
+ sspp->sblk = sblk;
+
+ sspp->base = PROP_VALUE_ACCESS(prop_value, SSPP_OFF, i);
+ sblk->maxlinewidth = sde_cfg->max_sspp_linewidth;
+
+ set_bit(SDE_SSPP_SRC, &sspp->features);
+ sblk->src_blk.id = SDE_SSPP_SRC;
+
+ of_property_read_string_index(np,
+ sspp_prop[SSPP_TYPE].prop_name, i, &type);
+ if (!strcmp(type, "vig")) {
+ _sde_sspp_setup_vig(sde_cfg, sspp, sblk,
+ vig_prop_exists, vig_prop_value, &vig_count);
+ } else if (!strcmp(type, "rgb")) {
+ _sde_sspp_setup_rgb(sde_cfg, sspp, sblk,
+ rgb_prop_exists, rgb_prop_value, &rgb_count);
+ } else if (!strcmp(type, "cursor")) {
+ /* No prop values for cursor pipes */
+ _sde_sspp_setup_cursor(sde_cfg, sspp, sblk, NULL,
+ &cursor_count);
+ } else if (!strcmp(type, "dma")) {
+ /* No prop values for DMA pipes */
+ _sde_sspp_setup_dma(sde_cfg, sspp, sblk, NULL,
+ &dma_count);
+ } else {
+ SDE_ERROR("invalid sspp type:%s\n", type);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ sblk->maxhdeciexp = MAX_HORZ_DECIMATION;
+ sblk->maxvdeciexp = MAX_VERT_DECIMATION;
+
+ sspp->xin_id = PROP_VALUE_ACCESS(prop_value, SSPP_XIN, i);
+ sblk->danger_lut_linear =
+ PROP_VALUE_ACCESS(prop_value, SSPP_DANGER, 0);
+ sblk->danger_lut_tile =
+ PROP_VALUE_ACCESS(prop_value, SSPP_DANGER, 1);
+ sblk->danger_lut_nrt =
+ PROP_VALUE_ACCESS(prop_value, SSPP_DANGER, 2);
+ sblk->safe_lut_linear =
+ PROP_VALUE_ACCESS(prop_value, SSPP_SAFE, 0);
+ sblk->safe_lut_tile =
+ PROP_VALUE_ACCESS(prop_value, SSPP_SAFE, 1);
+ sblk->safe_lut_nrt =
+ PROP_VALUE_ACCESS(prop_value, SSPP_SAFE, 2);
+ sblk->creq_lut_nrt = DEFAULT_CREQ_LUT_NRT;
+ sblk->pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE;
+ sblk->src_blk.len = PROP_VALUE_ACCESS(prop_value, SSPP_SIZE, 0);
+
+ for (j = 0; j < sde_cfg->mdp_count; j++) {
+ sde_cfg->mdp[j].clk_ctrls[sspp->clk_ctrl].reg_off =
+ PROP_BITVALUE_ACCESS(prop_value,
+ SSPP_CLK_CTRL, i, 0);
+ sde_cfg->mdp[j].clk_ctrls[sspp->clk_ctrl].bit_off =
+ PROP_BITVALUE_ACCESS(prop_value,
+ SSPP_CLK_CTRL, i, 1);
+ }
+
+ SDE_DEBUG(
+ "xin:%d danger:%x/%x/%x safe:%x/%x/%x creq:%x ram:%d clk%d:%x/%d\n",
+ sspp->xin_id,
+ sblk->danger_lut_linear,
+ sblk->danger_lut_tile,
+ sblk->danger_lut_nrt,
+ sblk->safe_lut_linear,
+ sblk->safe_lut_tile,
+ sblk->safe_lut_nrt,
+ sblk->creq_lut_nrt,
+ sblk->pixel_ram_size,
+ sspp->clk_ctrl,
+ sde_cfg->mdp[0].clk_ctrls[sspp->clk_ctrl].reg_off,
+ sde_cfg->mdp[0].clk_ctrls[sspp->clk_ctrl].bit_off);
+ }
+
+end:
+ kfree(prop_value);
+ kfree(vig_prop_value);
+ kfree(rgb_prop_value);
+ return rc;
+}
+
+static int sde_ctl_parse_dt(struct device_node *np,
+ struct sde_mdss_cfg *sde_cfg)
+{
+ int rc, prop_count[HW_PROP_MAX], i;
+ bool prop_exists[HW_PROP_MAX];
+ struct sde_prop_value *prop_value = NULL;
+ struct sde_ctl_cfg *ctl;
+ u32 off_count;
+
+ if (!sde_cfg) {
+ SDE_ERROR("invalid argument input param\n");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ prop_value = kzalloc(HW_PROP_MAX *
+ sizeof(struct sde_prop_value), GFP_KERNEL);
+ if (!prop_value) {
+ rc = -ENOMEM;
+ goto end;
+ }
+
+ rc = _validate_dt_entry(np, ctl_prop, ARRAY_SIZE(ctl_prop), prop_count,
+ &off_count);
+ if (rc)
+ goto end;
+
+ sde_cfg->ctl_count = off_count;
+
+ rc = _read_dt_entry(np, ctl_prop, ARRAY_SIZE(ctl_prop), prop_count,
+ prop_exists, prop_value);
+ if (rc)
+ goto end;
+
+ for (i = 0; i < off_count; i++) {
+ ctl = sde_cfg->ctl + i;
+ ctl->base = PROP_VALUE_ACCESS(prop_value, HW_OFF, i);
+ ctl->id = CTL_0 + i;
+
+ if (i < MAX_SPLIT_DISPLAY_CTL)
+ set_bit(SDE_CTL_SPLIT_DISPLAY, &ctl->features);
+ if (i < MAX_PP_SPLIT_DISPLAY_CTL)
+ set_bit(SDE_CTL_PINGPONG_SPLIT, &ctl->features);
+ }
+
+end:
+ kfree(prop_value);
+ return rc;
+}
+
+static int sde_mixer_parse_dt(struct device_node *np,
+ struct sde_mdss_cfg *sde_cfg)
+{
+ int rc, prop_count[MIXER_PROP_MAX], i;
+ int blocks_prop_count[MIXER_BLOCKS_PROP_MAX];
+ bool prop_exists[MIXER_PROP_MAX];
+ bool blocks_prop_exists[MIXER_BLOCKS_PROP_MAX];
+ struct sde_prop_value *prop_value = NULL, *blocks_prop_value = NULL;
+ u32 off_count, max_blendstages;
+ u32 blend_reg_base[] = {0x20, 0x50, 0x80, 0xb0, 0x230, 0x260, 0x290};
+ u32 lm_pair_mask[] = {LM_1, LM_0, LM_5, 0x0, 0x0, LM_2};
+ struct sde_lm_cfg *mixer;
+ struct sde_lm_sub_blks *sblk;
+ int pp_count, dspp_count;
+ u32 pp_idx, dspp_idx;
+ struct device_node *snp = NULL;
+
+ if (!sde_cfg) {
+ SDE_ERROR("invalid argument input param\n");
+ rc = -EINVAL;
+ goto end;
+ }
+ max_blendstages = sde_cfg->max_mixer_blendstages;
+
+ prop_value = kzalloc(MIXER_PROP_MAX *
+ sizeof(struct sde_prop_value), GFP_KERNEL);
+ if (!prop_value) {
+ rc = -ENOMEM;
+ goto end;
+ }
+
+ rc = _validate_dt_entry(np, mixer_prop, ARRAY_SIZE(mixer_prop),
+ prop_count, &off_count);
+ if (rc)
+ goto end;
+
+ sde_cfg->mixer_count = off_count;
+
+ rc = _read_dt_entry(np, mixer_prop, ARRAY_SIZE(mixer_prop), prop_count,
+ prop_exists, prop_value);
+ if (rc)
+ goto end;
+
+ pp_count = sde_cfg->pingpong_count;
+ dspp_count = sde_cfg->dspp_count;
+
+ /* get mixer feature dt properties if they exist */
+ snp = of_get_child_by_name(np, mixer_prop[MIXER_BLOCKS].prop_name);
+ if (snp) {
+ blocks_prop_value = kzalloc(MIXER_BLOCKS_PROP_MAX *
+ MAX_SDE_HW_BLK * sizeof(struct sde_prop_value),
+ GFP_KERNEL);
+ if (!blocks_prop_value) {
+ rc = -ENOMEM;
+ goto end;
+ }
+ rc = _validate_dt_entry(snp, mixer_blocks_prop,
+ ARRAY_SIZE(mixer_blocks_prop), blocks_prop_count, NULL);
+ if (rc)
+ goto end;
+ rc = _read_dt_entry(snp, mixer_blocks_prop,
+ ARRAY_SIZE(mixer_blocks_prop),
+ blocks_prop_count, blocks_prop_exists,
+ blocks_prop_value);
+ }
+
+ for (i = 0, pp_idx = 0, dspp_idx = 0; i < off_count; i++) {
+ mixer = sde_cfg->mixer + i;
+ sblk = kzalloc(sizeof(*sblk), GFP_KERNEL);
+ if (!sblk) {
+ rc = -ENOMEM;
+ /* catalog deinit will release the allocated blocks */
+ goto end;
+ }
+ mixer->sblk = sblk;
+
+ mixer->base = PROP_VALUE_ACCESS(prop_value, MIXER_OFF, i);
+ mixer->len = PROP_VALUE_ACCESS(prop_value, MIXER_LEN, 0);
+ mixer->id = LM_0 + i;
+ if (!prop_exists[MIXER_LEN])
+ mixer->len = DEFAULT_SDE_HW_BLOCK_LEN;
+
+ if (lm_pair_mask[i])
+ mixer->lm_pair_mask = 1 << lm_pair_mask[i];
+
+ sblk->maxblendstages = max_blendstages;
+ sblk->maxwidth = sde_cfg->max_mixer_width;
+ memcpy(sblk->blendstage_base, blend_reg_base, sizeof(u32) *
+ min_t(u32, MAX_BLOCKS, min_t(u32,
+ ARRAY_SIZE(blend_reg_base), max_blendstages)));
+ if (sde_cfg->has_src_split)
+ set_bit(SDE_MIXER_SOURCESPLIT, &mixer->features);
+
+ if ((i < ROT_LM_OFFSET) || (i >= LINE_LM_OFFSET)) {
+ mixer->pingpong = pp_count > 0 ? pp_idx + PINGPONG_0
+ : PINGPONG_MAX;
+ mixer->dspp = dspp_count > 0 ? dspp_idx + DSPP_0
+ : DSPP_MAX;
+ pp_count--;
+ dspp_count--;
+ pp_idx++;
+ dspp_idx++;
+ } else {
+ mixer->pingpong = PINGPONG_MAX;
+ mixer->dspp = DSPP_MAX;
+ }
+
+ sblk->gc.id = SDE_MIXER_GC;
+ if (blocks_prop_value && blocks_prop_exists[MIXER_GC_PROP]) {
+ sblk->gc.base = PROP_VALUE_ACCESS(blocks_prop_value,
+ MIXER_GC_PROP, 0);
+ sblk->gc.version = PROP_VALUE_ACCESS(blocks_prop_value,
+ MIXER_GC_PROP, 1);
+ sblk->gc.len = 0;
+ set_bit(SDE_MIXER_GC, &mixer->features);
+ }
+ }
+
+end:
+ kfree(prop_value);
+ kfree(blocks_prop_value);
+ return rc;
+}
+
+static int sde_intf_parse_dt(struct device_node *np,
+ struct sde_mdss_cfg *sde_cfg)
+{
+ int rc, prop_count[INTF_PROP_MAX], i;
+ struct sde_prop_value *prop_value = NULL;
+ bool prop_exists[INTF_PROP_MAX];
+ u32 off_count;
+ u32 dsi_count = 0, none_count = 0, hdmi_count = 0, dp_count = 0;
+ const char *type;
+ struct sde_intf_cfg *intf;
+
+ if (!sde_cfg) {
+ SDE_ERROR("invalid argument\n");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ prop_value = kzalloc(INTF_PROP_MAX *
+ sizeof(struct sde_prop_value), GFP_KERNEL);
+ if (!prop_value) {
+ rc = -ENOMEM;
+ goto end;
+ }
+
+ rc = _validate_dt_entry(np, intf_prop, ARRAY_SIZE(intf_prop),
+ prop_count, &off_count);
+ if (rc)
+ goto end;
+
+ sde_cfg->intf_count = off_count;
+
+ rc = _read_dt_entry(np, intf_prop, ARRAY_SIZE(intf_prop), prop_count,
+ prop_exists, prop_value);
+ if (rc)
+ goto end;
+
+ for (i = 0; i < off_count; i++) {
+ intf = sde_cfg->intf + i;
+ intf->base = PROP_VALUE_ACCESS(prop_value, INTF_OFF, i);
+ intf->len = PROP_VALUE_ACCESS(prop_value, INTF_LEN, 0);
+ intf->id = INTF_0 + i;
+ if (!prop_exists[INTF_LEN])
+ intf->len = DEFAULT_SDE_HW_BLOCK_LEN;
+
+ intf->prog_fetch_lines_worst_case =
+ PROP_VALUE_ACCESS(prop_value, INTF_PREFETCH, i);
+
+ of_property_read_string_index(np,
+ intf_prop[INTF_TYPE].prop_name, i, &type);
+ if (!strcmp(type, "dsi")) {
+ intf->type = INTF_DSI;
+ intf->controller_id = dsi_count;
+ dsi_count++;
+ } else if (!strcmp(type, "hdmi")) {
+ intf->type = INTF_HDMI;
+ intf->controller_id = hdmi_count;
+ hdmi_count++;
+ } else if (!strcmp(type, "dp")) {
+ intf->type = INTF_DP;
+ intf->controller_id = dp_count;
+ dp_count++;
+ } else {
+ intf->type = INTF_NONE;
+ intf->controller_id = none_count;
+ none_count++;
+ }
+ }
+
+end:
+ kfree(prop_value);
+ return rc;
+}
+
+static int sde_wb_parse_dt(struct device_node *np, struct sde_mdss_cfg *sde_cfg)
+{
+ int rc, prop_count[WB_PROP_MAX], i, j;
+ struct sde_prop_value *prop_value = NULL;
+ bool prop_exists[WB_PROP_MAX];
+ u32 off_count;
+ struct sde_wb_cfg *wb;
+ struct sde_wb_sub_blocks *sblk;
+
+ if (!sde_cfg) {
+ SDE_ERROR("invalid argument\n");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ prop_value = kzalloc(WB_PROP_MAX *
+ sizeof(struct sde_prop_value), GFP_KERNEL);
+ if (!prop_value) {
+ rc = -ENOMEM;
+ goto end;
+ }
+
+ rc = _validate_dt_entry(np, wb_prop, ARRAY_SIZE(wb_prop), prop_count,
+ &off_count);
+ if (rc)
+ goto end;
+
+ sde_cfg->wb_count = off_count;
+
+ rc = _read_dt_entry(np, wb_prop, ARRAY_SIZE(wb_prop), prop_count,
+ prop_exists, prop_value);
+ if (rc)
+ goto end;
+
+ for (i = 0; i < off_count; i++) {
+ wb = sde_cfg->wb + i;
+ sblk = kzalloc(sizeof(*sblk), GFP_KERNEL);
+ if (!sblk) {
+ rc = -ENOMEM;
+ /* catalog deinit will release the allocated blocks */
+ goto end;
+ }
+ wb->sblk = sblk;
+
+ wb->base = PROP_VALUE_ACCESS(prop_value, WB_OFF, i);
+ wb->id = WB_0 + PROP_VALUE_ACCESS(prop_value, WB_ID, i);
+ wb->clk_ctrl = SDE_CLK_CTRL_WB0 +
+ PROP_VALUE_ACCESS(prop_value, WB_ID, i);
+ wb->xin_id = PROP_VALUE_ACCESS(prop_value, WB_XIN_ID, i);
+ wb->vbif_idx = VBIF_NRT;
+ wb->len = PROP_VALUE_ACCESS(prop_value, WB_LEN, 0);
+ wb->format_list = wb2_formats;
+ if (!prop_exists[WB_LEN])
+ wb->len = DEFAULT_SDE_HW_BLOCK_LEN;
+ sblk->maxlinewidth = sde_cfg->max_wb_linewidth;
+
+ if (wb->id >= LINE_MODE_WB_OFFSET)
+ set_bit(SDE_WB_LINE_MODE, &wb->features);
+ else
+ set_bit(SDE_WB_BLOCK_MODE, &wb->features);
+ set_bit(SDE_WB_TRAFFIC_SHAPER, &wb->features);
+ set_bit(SDE_WB_YUV_CONFIG, &wb->features);
+
+ for (j = 0; j < sde_cfg->mdp_count; j++) {
+ sde_cfg->mdp[j].clk_ctrls[wb->clk_ctrl].reg_off =
+ PROP_BITVALUE_ACCESS(prop_value,
+ WB_CLK_CTRL, i, 0);
+ sde_cfg->mdp[j].clk_ctrls[wb->clk_ctrl].bit_off =
+ PROP_BITVALUE_ACCESS(prop_value,
+ WB_CLK_CTRL, i, 1);
+ }
+
+ SDE_DEBUG(
+ "wb:%d xin:%d vbif:%d clk%d:%x/%d\n",
+ wb->id - WB_0,
+ wb->xin_id,
+ wb->vbif_idx,
+ wb->clk_ctrl,
+ sde_cfg->mdp[0].clk_ctrls[wb->clk_ctrl].reg_off,
+ sde_cfg->mdp[0].clk_ctrls[wb->clk_ctrl].bit_off);
+ }
+
+end:
+ kfree(prop_value);
+ return rc;
+}
+
+static void _sde_dspp_setup_blocks(struct sde_mdss_cfg *sde_cfg,
+ struct sde_dspp_cfg *dspp, struct sde_dspp_sub_blks *sblk,
+ bool *prop_exists, struct sde_prop_value *prop_value)
+{
+ sblk->igc.id = SDE_DSPP_IGC;
+ if (prop_exists[DSPP_IGC_PROP]) {
+ sblk->igc.base = PROP_VALUE_ACCESS(prop_value,
+ DSPP_IGC_PROP, 0);
+ sblk->igc.version = PROP_VALUE_ACCESS(prop_value,
+ DSPP_IGC_PROP, 1);
+ sblk->igc.len = 0;
+ set_bit(SDE_DSPP_IGC, &dspp->features);
+ }
+
+ sblk->pcc.id = SDE_DSPP_PCC;
+ if (prop_exists[DSPP_PCC_PROP]) {
+ sblk->pcc.base = PROP_VALUE_ACCESS(prop_value,
+ DSPP_PCC_PROP, 0);
+ sblk->pcc.version = PROP_VALUE_ACCESS(prop_value,
+ DSPP_PCC_PROP, 1);
+ sblk->pcc.len = 0;
+ set_bit(SDE_DSPP_PCC, &dspp->features);
+ }
+
+ sblk->gc.id = SDE_DSPP_GC;
+ if (prop_exists[DSPP_GC_PROP]) {
+ sblk->gc.base = PROP_VALUE_ACCESS(prop_value, DSPP_GC_PROP, 0);
+ sblk->gc.version = PROP_VALUE_ACCESS(prop_value,
+ DSPP_GC_PROP, 1);
+ sblk->gc.len = 0;
+ set_bit(SDE_DSPP_GC, &dspp->features);
+ }
+
+ sblk->gamut.id = SDE_DSPP_GAMUT;
+ if (prop_exists[DSPP_GAMUT_PROP]) {
+ sblk->gamut.base = PROP_VALUE_ACCESS(prop_value,
+ DSPP_GAMUT_PROP, 0);
+ sblk->gamut.version = PROP_VALUE_ACCESS(prop_value,
+ DSPP_GAMUT_PROP, 1);
+ sblk->gamut.len = 0;
+ set_bit(SDE_DSPP_GAMUT, &dspp->features);
+ }
+
+ sblk->dither.id = SDE_DSPP_DITHER;
+ if (prop_exists[DSPP_DITHER_PROP]) {
+ sblk->dither.base = PROP_VALUE_ACCESS(prop_value,
+ DSPP_DITHER_PROP, 0);
+ sblk->dither.version = PROP_VALUE_ACCESS(prop_value,
+ DSPP_DITHER_PROP, 1);
+ sblk->dither.len = 0;
+ set_bit(SDE_DSPP_DITHER, &dspp->features);
+ }
+
+ sblk->hist.id = SDE_DSPP_HIST;
+ if (prop_exists[DSPP_HIST_PROP]) {
+ sblk->hist.base = PROP_VALUE_ACCESS(prop_value,
+ DSPP_HIST_PROP, 0);
+ sblk->hist.version = PROP_VALUE_ACCESS(prop_value,
+ DSPP_HIST_PROP, 1);
+ sblk->hist.len = 0;
+ set_bit(SDE_DSPP_HIST, &dspp->features);
+ }
+
+ sblk->hsic.id = SDE_DSPP_HSIC;
+ if (prop_exists[DSPP_HSIC_PROP]) {
+ sblk->hsic.base = PROP_VALUE_ACCESS(prop_value,
+ DSPP_HSIC_PROP, 0);
+ sblk->hsic.version = PROP_VALUE_ACCESS(prop_value,
+ DSPP_HSIC_PROP, 1);
+ sblk->hsic.len = 0;
+ set_bit(SDE_DSPP_HSIC, &dspp->features);
+ }
+
+ sblk->memcolor.id = SDE_DSPP_MEMCOLOR;
+ if (prop_exists[DSPP_MEMCOLOR_PROP]) {
+ sblk->memcolor.base = PROP_VALUE_ACCESS(prop_value,
+ DSPP_MEMCOLOR_PROP, 0);
+ sblk->memcolor.version = PROP_VALUE_ACCESS(prop_value,
+ DSPP_MEMCOLOR_PROP, 1);
+ sblk->memcolor.len = 0;
+ set_bit(SDE_DSPP_MEMCOLOR, &dspp->features);
+ }
+
+ sblk->sixzone.id = SDE_DSPP_SIXZONE;
+ if (prop_exists[DSPP_SIXZONE_PROP]) {
+ sblk->sixzone.base = PROP_VALUE_ACCESS(prop_value,
+ DSPP_SIXZONE_PROP, 0);
+ sblk->sixzone.version = PROP_VALUE_ACCESS(prop_value,
+ DSPP_SIXZONE_PROP, 1);
+ sblk->sixzone.len = 0;
+ set_bit(SDE_DSPP_SIXZONE, &dspp->features);
+ }
+
+ sblk->vlut.id = SDE_DSPP_VLUT;
+ if (prop_exists[DSPP_VLUT_PROP]) {
+ sblk->vlut.base = PROP_VALUE_ACCESS(prop_value,
+ DSPP_VLUT_PROP, 0);
+ sblk->vlut.version = PROP_VALUE_ACCESS(prop_value,
+ DSPP_VLUT_PROP, 1);
+ sblk->sixzone.len = 0;
+ set_bit(SDE_DSPP_VLUT, &dspp->features);
+ }
+}
+
+static int sde_dspp_parse_dt(struct device_node *np,
+ struct sde_mdss_cfg *sde_cfg)
+{
+ int rc, prop_count[DSPP_PROP_MAX], i;
+ int ad_prop_count[AD_PROP_MAX];
+ bool prop_exists[DSPP_PROP_MAX], ad_prop_exists[AD_PROP_MAX];
+ bool blocks_prop_exists[DSPP_BLOCKS_PROP_MAX];
+ struct sde_prop_value *ad_prop_value = NULL;
+ int blocks_prop_count[DSPP_BLOCKS_PROP_MAX];
+ struct sde_prop_value *prop_value = NULL, *blocks_prop_value = NULL;
+ u32 off_count, ad_off_count;
+ struct sde_dspp_cfg *dspp;
+ struct sde_dspp_sub_blks *sblk;
+ struct device_node *snp = NULL;
+
+ if (!sde_cfg) {
+ SDE_ERROR("invalid argument\n");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ prop_value = kzalloc(DSPP_PROP_MAX *
+ sizeof(struct sde_prop_value), GFP_KERNEL);
+ if (!prop_value) {
+ rc = -ENOMEM;
+ goto end;
+ }
+
+ rc = _validate_dt_entry(np, dspp_prop, ARRAY_SIZE(dspp_prop),
+ prop_count, &off_count);
+ if (rc)
+ goto end;
+
+ sde_cfg->dspp_count = off_count;
+
+ rc = _read_dt_entry(np, dspp_prop, ARRAY_SIZE(dspp_prop), prop_count,
+ prop_exists, prop_value);
+ if (rc)
+ goto end;
+
+ /* Parse AD dtsi entries */
+ ad_prop_value = kzalloc(AD_PROP_MAX *
+ sizeof(struct sde_prop_value), GFP_KERNEL);
+ if (!ad_prop_value) {
+ rc = -ENOMEM;
+ goto end;
+ }
+ rc = _validate_dt_entry(np, ad_prop, ARRAY_SIZE(ad_prop),
+ ad_prop_count, &ad_off_count);
+ if (rc)
+ goto end;
+ rc = _read_dt_entry(np, ad_prop, ARRAY_SIZE(ad_prop), ad_prop_count,
+ ad_prop_exists, ad_prop_value);
+ if (rc)
+ goto end;
+
+ /* get DSPP feature dt properties if they exist */
+ snp = of_get_child_by_name(np, dspp_prop[DSPP_BLOCKS].prop_name);
+ if (snp) {
+ blocks_prop_value = kzalloc(DSPP_BLOCKS_PROP_MAX *
+ MAX_SDE_HW_BLK * sizeof(struct sde_prop_value),
+ GFP_KERNEL);
+ if (!blocks_prop_value) {
+ rc = -ENOMEM;
+ goto end;
+ }
+ rc = _validate_dt_entry(snp, dspp_blocks_prop,
+ ARRAY_SIZE(dspp_blocks_prop), blocks_prop_count, NULL);
+ if (rc)
+ goto end;
+ rc = _read_dt_entry(snp, dspp_blocks_prop,
+ ARRAY_SIZE(dspp_blocks_prop), blocks_prop_count,
+ blocks_prop_exists, blocks_prop_value);
+ if (rc)
+ goto end;
+ }
+
+ for (i = 0; i < off_count; i++) {
+ dspp = sde_cfg->dspp + i;
+ dspp->base = PROP_VALUE_ACCESS(prop_value, DSPP_OFF, i);
+ dspp->id = DSPP_0 + i;
+
+ sblk = kzalloc(sizeof(*sblk), GFP_KERNEL);
+ if (!sblk) {
+ rc = -ENOMEM;
+ /* catalog deinit will release the allocated blocks */
+ goto end;
+ }
+ dspp->sblk = sblk;
+
+ if (blocks_prop_value)
+ _sde_dspp_setup_blocks(sde_cfg, dspp, sblk,
+ blocks_prop_exists, blocks_prop_value);
+
+ sblk->ad.id = SDE_DSPP_AD;
+ if (ad_prop_value && (i < ad_off_count) &&
+ ad_prop_exists[AD_OFF]) {
+ sblk->ad.base = PROP_VALUE_ACCESS(ad_prop_value,
+ AD_OFF, i);
+ sblk->ad.version = PROP_VALUE_ACCESS(ad_prop_value,
+ AD_VERSION, 0);
+ set_bit(SDE_DSPP_AD, &dspp->features);
+ }
+ }
+
+end:
+ kfree(prop_value);
+ kfree(ad_prop_value);
+ kfree(blocks_prop_value);
+ return rc;
+}
+
+static int sde_cdm_parse_dt(struct device_node *np,
+ struct sde_mdss_cfg *sde_cfg)
+{
+ int rc, prop_count[HW_PROP_MAX], i;
+ struct sde_prop_value *prop_value = NULL;
+ bool prop_exists[HW_PROP_MAX];
+ u32 off_count;
+ struct sde_cdm_cfg *cdm;
+
+ if (!sde_cfg) {
+ SDE_ERROR("invalid argument\n");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ prop_value = kzalloc(HW_PROP_MAX *
+ sizeof(struct sde_prop_value), GFP_KERNEL);
+ if (!prop_value) {
+ rc = -ENOMEM;
+ goto end;
+ }
+
+ rc = _validate_dt_entry(np, cdm_prop, ARRAY_SIZE(cdm_prop), prop_count,
+ &off_count);
+ if (rc)
+ goto end;
+
+ sde_cfg->cdm_count = off_count;
+
+ rc = _read_dt_entry(np, cdm_prop, ARRAY_SIZE(cdm_prop), prop_count,
+ prop_exists, prop_value);
+ if (rc)
+ goto end;
+
+ for (i = 0; i < off_count; i++) {
+ cdm = sde_cfg->cdm + i;
+ cdm->base = PROP_VALUE_ACCESS(prop_value, HW_OFF, i);
+ cdm->id = CDM_0 + i;
+ cdm->len = PROP_VALUE_ACCESS(prop_value, HW_LEN, 0);
+
+ /* intf3 and wb2 for cdm block */
+ cdm->wb_connect = sde_cfg->wb_count ? BIT(WB_2) : BIT(31);
+ cdm->intf_connect = sde_cfg->intf_count ? BIT(INTF_3) : BIT(31);
+ }
+
+end:
+ kfree(prop_value);
+ return rc;
+}
+
+static int sde_vbif_parse_dt(struct device_node *np,
+ struct sde_mdss_cfg *sde_cfg)
+{
+ int rc, prop_count[VBIF_PROP_MAX], i, j, k;
+ struct sde_prop_value *prop_value = NULL;
+ bool prop_exists[VBIF_PROP_MAX];
+ u32 off_count, vbif_len, rd_len = 0, wr_len = 0;
+ struct sde_vbif_cfg *vbif;
+
+ if (!sde_cfg) {
+ SDE_ERROR("invalid argument\n");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ prop_value = kzalloc(VBIF_PROP_MAX *
+ sizeof(struct sde_prop_value), GFP_KERNEL);
+ if (!prop_value) {
+ rc = -ENOMEM;
+ goto end;
+ }
+
+ rc = _validate_dt_entry(np, vbif_prop, ARRAY_SIZE(vbif_prop),
+ prop_count, &off_count);
+ if (rc)
+ goto end;
+
+ rc = _validate_dt_entry(np, &vbif_prop[VBIF_DYNAMIC_OT_RD_LIMIT], 1,
+ &prop_count[VBIF_DYNAMIC_OT_RD_LIMIT], &rd_len);
+ if (rc)
+ goto end;
+
+ rc = _validate_dt_entry(np, &vbif_prop[VBIF_DYNAMIC_OT_WR_LIMIT], 1,
+ &prop_count[VBIF_DYNAMIC_OT_WR_LIMIT], &wr_len);
+ if (rc)
+ goto end;
+
+ sde_cfg->vbif_count = off_count;
+
+ rc = _read_dt_entry(np, vbif_prop, ARRAY_SIZE(vbif_prop), prop_count,
+ prop_exists, prop_value);
+ if (rc)
+ goto end;
+
+ vbif_len = PROP_VALUE_ACCESS(prop_value, VBIF_LEN, 0);
+ if (!prop_exists[VBIF_LEN])
+ vbif_len = DEFAULT_SDE_HW_BLOCK_LEN;
+
+ for (i = 0; i < off_count; i++) {
+ vbif = sde_cfg->vbif + i;
+ vbif->base = PROP_VALUE_ACCESS(prop_value, VBIF_OFF, i);
+ vbif->len = vbif_len;
+ vbif->id = VBIF_0 + PROP_VALUE_ACCESS(prop_value, VBIF_ID, i);
+
+ SDE_DEBUG("vbif:%d\n", vbif->id - VBIF_0);
+
+ vbif->xin_halt_timeout = VBIF_XIN_HALT_TIMEOUT;
+
+ vbif->default_ot_rd_limit = PROP_VALUE_ACCESS(prop_value,
+ VBIF_DEFAULT_OT_RD_LIMIT, 0);
+ SDE_DEBUG("default_ot_rd_limit=%u\n",
+ vbif->default_ot_rd_limit);
+
+ vbif->default_ot_wr_limit = PROP_VALUE_ACCESS(prop_value,
+ VBIF_DEFAULT_OT_WR_LIMIT, 0);
+ SDE_DEBUG("default_ot_wr_limit=%u\n",
+ vbif->default_ot_wr_limit);
+
+ vbif->dynamic_ot_rd_tbl.count =
+ prop_count[VBIF_DYNAMIC_OT_RD_LIMIT] / 2;
+ SDE_DEBUG("dynamic_ot_rd_tbl.count=%u\n",
+ vbif->dynamic_ot_rd_tbl.count);
+ if (vbif->dynamic_ot_rd_tbl.count) {
+ vbif->dynamic_ot_rd_tbl.cfg = kcalloc(
+ vbif->dynamic_ot_rd_tbl.count,
+ sizeof(struct sde_vbif_dynamic_ot_cfg),
+ GFP_KERNEL);
+ if (!vbif->dynamic_ot_rd_tbl.cfg) {
+ rc = -ENOMEM;
+ goto end;
+ }
+ }
+
+ for (j = 0, k = 0; j < vbif->dynamic_ot_rd_tbl.count; j++) {
+ vbif->dynamic_ot_rd_tbl.cfg[j].pps = (u64)
+ PROP_VALUE_ACCESS(prop_value,
+ VBIF_DYNAMIC_OT_RD_LIMIT, k++);
+ vbif->dynamic_ot_rd_tbl.cfg[j].ot_limit =
+ PROP_VALUE_ACCESS(prop_value,
+ VBIF_DYNAMIC_OT_RD_LIMIT, k++);
+ SDE_DEBUG("dynamic_ot_rd_tbl[%d].cfg=<%llu %u>\n", j,
+ vbif->dynamic_ot_rd_tbl.cfg[j].pps,
+ vbif->dynamic_ot_rd_tbl.cfg[j].ot_limit);
+ }
+
+ vbif->dynamic_ot_wr_tbl.count =
+ prop_count[VBIF_DYNAMIC_OT_WR_LIMIT] / 2;
+ SDE_DEBUG("dynamic_ot_wr_tbl.count=%u\n",
+ vbif->dynamic_ot_wr_tbl.count);
+ if (vbif->dynamic_ot_wr_tbl.count) {
+ vbif->dynamic_ot_wr_tbl.cfg = kcalloc(
+ vbif->dynamic_ot_wr_tbl.count,
+ sizeof(struct sde_vbif_dynamic_ot_cfg),
+ GFP_KERNEL);
+ if (!vbif->dynamic_ot_wr_tbl.cfg) {
+ rc = -ENOMEM;
+ goto end;
+ }
+ }
+
+ for (j = 0, k = 0; j < vbif->dynamic_ot_wr_tbl.count; j++) {
+ vbif->dynamic_ot_wr_tbl.cfg[j].pps = (u64)
+ PROP_VALUE_ACCESS(prop_value,
+ VBIF_DYNAMIC_OT_WR_LIMIT, k++);
+ vbif->dynamic_ot_wr_tbl.cfg[j].ot_limit =
+ PROP_VALUE_ACCESS(prop_value,
+ VBIF_DYNAMIC_OT_WR_LIMIT, k++);
+ SDE_DEBUG("dynamic_ot_wr_tbl[%d].cfg=<%llu %u>\n", j,
+ vbif->dynamic_ot_wr_tbl.cfg[j].pps,
+ vbif->dynamic_ot_wr_tbl.cfg[j].ot_limit);
+ }
+
+ if (vbif->default_ot_rd_limit || vbif->default_ot_wr_limit ||
+ vbif->dynamic_ot_rd_tbl.count ||
+ vbif->dynamic_ot_wr_tbl.count)
+ set_bit(SDE_VBIF_QOS_OTLIM, &vbif->features);
+ }
+
+end:
+ kfree(prop_value);
+ return rc;
+}
+
+static int sde_pp_parse_dt(struct device_node *np, struct sde_mdss_cfg *sde_cfg)
+{
+ int rc, prop_count[PP_PROP_MAX], i;
+ struct sde_prop_value *prop_value = NULL;
+ bool prop_exists[PP_PROP_MAX];
+ u32 off_count;
+ struct sde_pingpong_cfg *pp;
+ struct sde_pingpong_sub_blks *sblk;
+
+ if (!sde_cfg) {
+ SDE_ERROR("invalid argument\n");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ prop_value = kzalloc(PP_PROP_MAX *
+ sizeof(struct sde_prop_value), GFP_KERNEL);
+ if (!prop_value) {
+ rc = -ENOMEM;
+ goto end;
+ }
+
+ rc = _validate_dt_entry(np, pp_prop, ARRAY_SIZE(pp_prop), prop_count,
+ &off_count);
+ if (rc)
+ goto end;
+
+ sde_cfg->pingpong_count = off_count;
+
+ rc = _read_dt_entry(np, pp_prop, ARRAY_SIZE(pp_prop), prop_count,
+ prop_exists, prop_value);
+ if (rc)
+ goto end;
+
+ for (i = 0; i < off_count; i++) {
+ pp = sde_cfg->pingpong + i;
+ sblk = kzalloc(sizeof(*sblk), GFP_KERNEL);
+ if (!sblk) {
+ rc = -ENOMEM;
+ /* catalog deinit will release the allocated blocks */
+ goto end;
+ }
+ pp->sblk = sblk;
+
+ pp->base = PROP_VALUE_ACCESS(prop_value, PP_OFF, i);
+ pp->id = PINGPONG_0 + i;
+ pp->len = PROP_VALUE_ACCESS(prop_value, PP_LEN, 0);
+
+ sblk->te.base = PROP_VALUE_ACCESS(prop_value, TE_OFF, i);
+ sblk->te.id = SDE_PINGPONG_TE;
+ set_bit(SDE_PINGPONG_TE, &pp->features);
+
+ sblk->te2.base = PROP_VALUE_ACCESS(prop_value, TE2_OFF, i);
+ if (sblk->te2.base) {
+ sblk->te2.id = SDE_PINGPONG_TE2;
+ set_bit(SDE_PINGPONG_TE2, &pp->features);
+ set_bit(SDE_PINGPONG_SPLIT, &pp->features);
+ }
+
+ if (PROP_VALUE_ACCESS(prop_value, PP_SLAVE, i))
+ set_bit(SDE_PINGPONG_SLAVE, &pp->features);
+
+ sblk->dsc.base = PROP_VALUE_ACCESS(prop_value, DSC_OFF, i);
+ if (sblk->dsc.base) {
+ sblk->dsc.id = SDE_PINGPONG_DSC;
+ set_bit(SDE_PINGPONG_DSC, &pp->features);
+ }
+ }
+
+end:
+ kfree(prop_value);
+ return rc;
+}
+
+static int sde_parse_dt(struct device_node *np, struct sde_mdss_cfg *cfg)
+{
+ int rc, len, prop_count[SDE_PROP_MAX];
+ struct sde_prop_value *prop_value = NULL;
+ bool prop_exists[SDE_PROP_MAX];
+ const char *type;
+
+ if (!cfg) {
+ SDE_ERROR("invalid argument\n");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ prop_value = kzalloc(SDE_PROP_MAX *
+ sizeof(struct sde_prop_value), GFP_KERNEL);
+ if (!prop_value) {
+ rc = -ENOMEM;
+ goto end;
+ }
+
+ rc = _validate_dt_entry(np, sde_prop, ARRAY_SIZE(sde_prop), prop_count,
+ &len);
+ if (rc)
+ goto end;
+
+ rc = _read_dt_entry(np, sde_prop, ARRAY_SIZE(sde_prop), prop_count,
+ prop_exists, prop_value);
+ if (rc)
+ goto end;
+
+ cfg->mdss_count = 1;
+ cfg->mdss[0].base = MDSS_BASE_OFFSET;
+ cfg->mdss[0].id = MDP_TOP;
+
+ cfg->mdp_count = 1;
+ cfg->mdp[0].id = MDP_TOP;
+ cfg->mdp[0].base = PROP_VALUE_ACCESS(prop_value, SDE_OFF, 0);
+ cfg->mdp[0].len = PROP_VALUE_ACCESS(prop_value, SDE_LEN, 0);
+ if (!prop_exists[SDE_LEN])
+ cfg->mdp[0].len = DEFAULT_SDE_HW_BLOCK_LEN;
+
+ cfg->max_sspp_linewidth = PROP_VALUE_ACCESS(prop_value,
+ SSPP_LINEWIDTH, 0);
+ if (!prop_exists[SSPP_LINEWIDTH])
+ cfg->max_sspp_linewidth = DEFAULT_SDE_LINE_WIDTH;
+
+ cfg->max_mixer_width = PROP_VALUE_ACCESS(prop_value,
+ MIXER_LINEWIDTH, 0);
+ if (!prop_exists[MIXER_LINEWIDTH])
+ cfg->max_mixer_width = DEFAULT_SDE_LINE_WIDTH;
+
+ cfg->max_mixer_blendstages = PROP_VALUE_ACCESS(prop_value,
+ MIXER_BLEND, 0);
+ if (!prop_exists[MIXER_BLEND])
+ cfg->max_mixer_blendstages = DEFAULT_SDE_MIXER_BLENDSTAGES;
+
+ cfg->max_wb_linewidth = PROP_VALUE_ACCESS(prop_value, WB_LINEWIDTH, 0);
+ if (!prop_exists[WB_LINEWIDTH])
+ cfg->max_wb_linewidth = DEFAULT_SDE_LINE_WIDTH;
+
+ cfg->mdp[0].highest_bank_bit = PROP_VALUE_ACCESS(prop_value,
+ BANK_BIT, 0);
+ if (!prop_exists[BANK_BIT])
+ cfg->mdp[0].highest_bank_bit = DEFAULT_SDE_HIGHEST_BANK_BIT;
+
+ rc = of_property_read_string(np, sde_prop[QSEED_TYPE].prop_name, &type);
+ if (!rc && !strcmp(type, "qseedv3"))
+ cfg->qseed_type = SDE_SSPP_SCALER_QSEED3;
+ else if (!rc && !strcmp(type, "qseedv2"))
+ cfg->qseed_type = SDE_SSPP_SCALER_QSEED2;
+
+ rc = of_property_read_string(np, sde_prop[CSC_TYPE].prop_name, &type);
+ if (!rc && !strcmp(type, "csc"))
+ cfg->csc_type = SDE_SSPP_CSC;
+ else if (!rc && !strcmp(type, "csc-10bit"))
+ cfg->csc_type = SDE_SSPP_CSC_10BIT;
+
+ cfg->has_src_split = PROP_VALUE_ACCESS(prop_value, SRC_SPLIT, 0);
+end:
+ kfree(prop_value);
+ return rc;
+}
+
+static int sde_perf_parse_dt(struct device_node *np, struct sde_mdss_cfg *cfg)
+{
+ int rc, len, prop_count[PERF_PROP_MAX];
+ struct sde_prop_value *prop_value = NULL;
+ bool prop_exists[PERF_PROP_MAX];
+
+ if (!cfg) {
+ SDE_ERROR("invalid argument\n");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ prop_value = kzalloc(SDE_PROP_MAX *
+ sizeof(struct sde_prop_value), GFP_KERNEL);
+ if (!prop_value) {
+ rc = -ENOMEM;
+ goto end;
+ }
+
+ rc = _validate_dt_entry(np, sde_perf_prop, ARRAY_SIZE(sde_perf_prop),
+ prop_count, &len);
+ if (rc)
+ goto freeprop;
+
+ rc = _read_dt_entry(np, sde_perf_prop, ARRAY_SIZE(sde_perf_prop),
+ prop_count, prop_exists, prop_value);
+ if (rc)
+ goto freeprop;
+
+ cfg->perf.max_bw_low =
+ PROP_VALUE_ACCESS(prop_value, PERF_MAX_BW_LOW, 0);
+ cfg->perf.max_bw_high =
+ PROP_VALUE_ACCESS(prop_value, PERF_MAX_BW_HIGH, 0);
+
+freeprop:
+ kfree(prop_value);
+end:
+ return rc;
+}
+
+static void sde_hardware_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
+{
+ switch (hw_rev) {
+ case SDE_HW_VER_170:
+ case SDE_HW_VER_171:
+ case SDE_HW_VER_172:
+ /* update msm8996 target here */
+ break;
+ case SDE_HW_VER_300:
+ case SDE_HW_VER_400:
+ /* update cobalt and skunk target here */
+ break;
+ }
+}
+
+void sde_hw_catalog_deinit(struct sde_mdss_cfg *sde_cfg)
+{
+ int i;
+
+ if (!sde_cfg)
+ return;
+
+ for (i = 0; i < sde_cfg->sspp_count; i++)
+ kfree(sde_cfg->sspp[i].sblk);
+
+ for (i = 0; i < sde_cfg->mixer_count; i++)
+ kfree(sde_cfg->mixer[i].sblk);
+
+ for (i = 0; i < sde_cfg->wb_count; i++)
+ kfree(sde_cfg->wb[i].sblk);
+
+ for (i = 0; i < sde_cfg->dspp_count; i++)
+ kfree(sde_cfg->dspp[i].sblk);
+
+ for (i = 0; i < sde_cfg->pingpong_count; i++)
+ kfree(sde_cfg->pingpong[i].sblk);
+
+ for (i = 0; i < sde_cfg->vbif_count; i++) {
+ kfree(sde_cfg->vbif[i].dynamic_ot_rd_tbl.cfg);
+ kfree(sde_cfg->vbif[i].dynamic_ot_wr_tbl.cfg);
+ }
+ kfree(sde_cfg);
+}
+
+/*************************************************************
+ * hardware catalog init
+ *************************************************************/
+struct sde_mdss_cfg *sde_hw_catalog_init(struct drm_device *dev, u32 hw_rev)
+{
+ int rc;
+ struct sde_mdss_cfg *sde_cfg;
+ struct device_node *np = dev->dev->of_node;
+
+ sde_cfg = kzalloc(sizeof(*sde_cfg), GFP_KERNEL);
+ if (!sde_cfg)
+ return ERR_PTR(-ENOMEM);
+
+ sde_cfg->hwversion = hw_rev;
+
+ rc = sde_parse_dt(np, sde_cfg);
+ if (rc)
+ goto end;
+
+ rc = sde_ctl_parse_dt(np, sde_cfg);
+ if (rc)
+ goto end;
+
+ rc = sde_sspp_parse_dt(np, sde_cfg);
+ if (rc)
+ goto end;
+
+ rc = sde_dspp_parse_dt(np, sde_cfg);
+ if (rc)
+ goto end;
+
+ rc = sde_pp_parse_dt(np, sde_cfg);
+ if (rc)
+ goto end;
+
+ /* mixer parsing should be done after dspp and pp for mapping setup */
+ rc = sde_mixer_parse_dt(np, sde_cfg);
+ if (rc)
+ goto end;
+
+ rc = sde_intf_parse_dt(np, sde_cfg);
+ if (rc)
+ goto end;
+
+ rc = sde_wb_parse_dt(np, sde_cfg);
+ if (rc)
+ goto end;
+
+ /* cdm parsing should be done after intf and wb for mapping setup */
+ rc = sde_cdm_parse_dt(np, sde_cfg);
+ if (rc)
+ goto end;
+
+ rc = sde_vbif_parse_dt(np, sde_cfg);
+ if (rc)
+ goto end;
+
+ rc = sde_perf_parse_dt(np, sde_cfg);
+ if (rc)
+ goto end;
+
+ sde_hardware_caps(sde_cfg, hw_rev);
+
+ return sde_cfg;
+
+end:
+ sde_hw_catalog_deinit(sde_cfg);
+ return NULL;
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
new file mode 100644
index 000000000000..a8f9169aaf35
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
@@ -0,0 +1,716 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HW_CATALOG_H
+#define _SDE_HW_CATALOG_H
+
+#include <linux/kernel.h>
+#include <linux/bug.h>
+#include <linux/bitmap.h>
+#include <linux/err.h>
+#include <linux/msm-bus.h>
+#include <drm/drmP.h>
+
+/**
+ * Max hardware block count: For ex: max 12 SSPP pipes or
+ * 5 ctl paths. In all cases, it can have max 12 hardware blocks
+ * based on current design
+ */
+#define MAX_BLOCKS 12
+
+#define SDE_HW_VER(MAJOR, MINOR, STEP) (((MAJOR & 0xF) << 28) |\
+ ((MINOR & 0xFFF) << 16) |\
+ (STEP & 0xFFFF))
+
+#define SDE_HW_MAJOR(rev) ((rev) >> 28)
+#define SDE_HW_MINOR(rev) (((rev) >> 16) & 0xFFF)
+#define SDE_HW_STEP(rev) ((rev) & 0xFFFF)
+#define SDE_HW_MAJOR_MINOR(rev) ((rev) >> 16)
+
+#define IS_SDE_MAJOR_MINOR_SAME(rev1, rev2) \
+ (SDE_HW_MAJOR_MINOR((rev1)) == SDE_HW_MAJOR_MINOR((rev2)))
+
+#define SDE_HW_VER_170 SDE_HW_VER(1, 7, 0) /* 8996 v1.0 */
+#define SDE_HW_VER_171 SDE_HW_VER(1, 7, 1) /* 8996 v2.0 */
+#define SDE_HW_VER_172 SDE_HW_VER(1, 7, 2) /* 8996 v3.0 */
+#define SDE_HW_VER_300 SDE_HW_VER(3, 0, 0) /* cobalt v1.0 */
+#define SDE_HW_VER_400 SDE_HW_VER(4, 0, 0) /* msmskunk v1.0 */
+
+#define IS_MSMSKUNK_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_400)
+
+#define MAX_IMG_WIDTH 0x3fff
+#define MAX_IMG_HEIGHT 0x3fff
+
+#define CRTC_DUAL_MIXERS 2
+
+#define SDE_COLOR_PROCESS_VER(MAJOR, MINOR) \
+ ((((MAJOR) & 0xFFFF) << 16) | (((MINOR) & 0xFFFF)))
+#define SDE_COLOR_PROCESS_MAJOR(version) (((version) & 0xFFFF0000) >> 16)
+#define SDE_COLOR_PROCESS_MINOR(version) ((version) & 0xFFFF)
+
+/**
+ * MDP TOP BLOCK features
+ * @SDE_MDP_PANIC_PER_PIPE Panic configuration needs to be be done per pipe
+ * @SDE_MDP_10BIT_SUPPORT, Chipset supports 10 bit pixel formats
+ * @SDE_MDP_BWC, MDSS HW supports Bandwidth compression.
+ * @SDE_MDP_UBWC_1_0, This chipsets supports Universal Bandwidth
+ * compression initial revision
+ * @SDE_MDP_UBWC_1_5, Universal Bandwidth compression version 1.5
+ * @SDE_MDP_CDP, Client driven prefetch
+ * @SDE_MDP_MAX Maximum value
+
+ */
+enum {
+ SDE_MDP_PANIC_PER_PIPE = 0x1,
+ SDE_MDP_10BIT_SUPPORT,
+ SDE_MDP_BWC,
+ SDE_MDP_UBWC_1_0,
+ SDE_MDP_UBWC_1_5,
+ SDE_MDP_CDP,
+ SDE_MDP_MAX
+};
+
+/**
+ * SSPP sub-blocks/features
+ * @SDE_SSPP_SRC Src and fetch part of the pipes,
+ * @SDE_SSPP_SCALER_QSEED2, QSEED2 algorithm support
+ * @SDE_SSPP_SCALER_QSEED3, QSEED3 alogorithm support
+ * @SDE_SSPP_SCALER_RGB, RGB Scaler, supported by RGB pipes
+ * @SDE_SSPP_CSC, Support of Color space converion
+ * @SDE_SSPP_CSC_10BIT, Support of 10-bit Color space conversion
+ * @SDE_SSPP_HSIC, Global HSIC control
+ * @SDE_SSPP_MEMCOLOR Memory Color Support
+ * @SDE_SSPP_IGC, Inverse gamma correction
+ * @SDE_SSPP_PCC, Color correction support
+ * @SDE_SSPP_CURSOR, SSPP can be used as a cursor layer
+ * @SDE_SSPP_QOS, SSPP support QoS control, danger/safe/creq
+ * @SDE_SSPP_MAX maximum value
+ */
+enum {
+ SDE_SSPP_SRC = 0x1,
+ SDE_SSPP_SCALER_QSEED2,
+ SDE_SSPP_SCALER_QSEED3,
+ SDE_SSPP_SCALER_RGB,
+ SDE_SSPP_CSC,
+ SDE_SSPP_CSC_10BIT,
+ SDE_SSPP_HSIC,
+ SDE_SSPP_MEMCOLOR,
+ SDE_SSPP_IGC,
+ SDE_SSPP_PCC,
+ SDE_SSPP_CURSOR,
+ SDE_SSPP_QOS,
+ SDE_SSPP_MAX
+};
+
+/*
+ * MIXER sub-blocks/features
+ * @SDE_MIXER_LAYER Layer mixer layer blend configuration,
+ * @SDE_MIXER_SOURCESPLIT Layer mixer supports source-split configuration
+ * @SDE_MIXER_GC Gamma correction block
+ * @SDE_MIXER_MAX maximum value
+ */
+enum {
+ SDE_MIXER_LAYER = 0x1,
+ SDE_MIXER_SOURCESPLIT,
+ SDE_MIXER_GC,
+ SDE_MIXER_MAX
+};
+
+/**
+ * DSPP sub-blocks
+ * @SDE_DSPP_IGC DSPP Inverse gamma correction block
+ * @SDE_DSPP_PCC Panel color correction block
+ * @SDE_DSPP_GC Gamma correction block
+ * @SDE_DSPP_HSIC Global HSIC block
+ * @SDE_DSPP_MEMCOLOR Memory Color block
+ * @SDE_DSPP_SIXZONE Six zone block
+ * @SDE_DSPP_GAMUT Gamut bloc
+ * @SDE_DSPP_DITHER Dither block
+ * @SDE_DSPP_HIST Histogram block
+ * @SDE_DSPP_VLUT PA VLUT block
+ * @SDE_DSPP_AD AD block
+ * @SDE_DSPP_MAX maximum value
+ */
+enum {
+ SDE_DSPP_IGC = 0x1,
+ SDE_DSPP_PCC,
+ SDE_DSPP_GC,
+ SDE_DSPP_HSIC,
+ SDE_DSPP_MEMCOLOR,
+ SDE_DSPP_SIXZONE,
+ SDE_DSPP_GAMUT,
+ SDE_DSPP_DITHER,
+ SDE_DSPP_HIST,
+ SDE_DSPP_VLUT,
+ SDE_DSPP_AD,
+ SDE_DSPP_MAX
+};
+
+/**
+ * PINGPONG sub-blocks
+ * @SDE_PINGPONG_TE Tear check block
+ * @SDE_PINGPONG_TE2 Additional tear check block for split pipes
+ * @SDE_PINGPONG_SPLIT PP block supports split fifo
+ * @SDE_PINGPONG_SLAVE PP block is a suitable slave for split fifo
+ * @SDE_PINGPONG_DSC, Display stream compression blocks
+ * @SDE_PINGPONG_MAX
+ */
+enum {
+ SDE_PINGPONG_TE = 0x1,
+ SDE_PINGPONG_TE2,
+ SDE_PINGPONG_SPLIT,
+ SDE_PINGPONG_SLAVE,
+ SDE_PINGPONG_DSC,
+ SDE_PINGPONG_MAX
+};
+
+/**
+ * CTL sub-blocks
+ * @SDE_CTL_SPLIT_DISPLAY CTL supports video mode split display
+ * @SDE_CTL_PINGPONG_SPLIT CTL supports pingpong split
+ * @SDE_CTL_MAX
+ */
+enum {
+ SDE_CTL_SPLIT_DISPLAY = 0x1,
+ SDE_CTL_PINGPONG_SPLIT,
+ SDE_CTL_MAX
+};
+
+/**
+ * WB sub-blocks and features
+ * @SDE_WB_LINE_MODE Writeback module supports line/linear mode
+ * @SDE_WB_BLOCK_MODE Writeback module supports block mode read
+ * @SDE_WB_ROTATE rotation support,this is available if writeback
+ * supports block mode read
+ * @SDE_WB_CSC Writeback color conversion block support
+ * @SDE_WB_CHROMA_DOWN, Writeback chroma down block,
+ * @SDE_WB_DOWNSCALE, Writeback integer downscaler,
+ * @SDE_WB_DITHER, Dither block
+ * @SDE_WB_TRAFFIC_SHAPER, Writeback traffic shaper bloc
+ * @SDE_WB_UBWC_1_0, Writeback Universal bandwidth compression 1.0
+ * support
+ * @SDE_WB_UBWC_1_5 UBWC 1.5 support
+ * @SDE_WB_YUV_CONFIG Writeback supports output of YUV colorspace
+ * @SDE_WB_PIPE_ALPHA Writeback supports pipe alpha
+ * @SDE_WB_XY_ROI_OFFSET Writeback supports x/y-offset of out ROI in
+ * the destination image
+ * @SDE_WB_MAX maximum value
+ */
+enum {
+ SDE_WB_LINE_MODE = 0x1,
+ SDE_WB_BLOCK_MODE,
+ SDE_WB_ROTATE = SDE_WB_BLOCK_MODE,
+ SDE_WB_CSC,
+ SDE_WB_CHROMA_DOWN,
+ SDE_WB_DOWNSCALE,
+ SDE_WB_DITHER,
+ SDE_WB_TRAFFIC_SHAPER,
+ SDE_WB_UBWC_1_0,
+ SDE_WB_YUV_CONFIG,
+ SDE_WB_PIPE_ALPHA,
+ SDE_WB_XY_ROI_OFFSET,
+ SDE_WB_MAX
+};
+
+/**
+ * VBIF sub-blocks and features
+ * @SDE_VBIF_QOS_OTLIM VBIF supports OT Limit
+ * @SDE_VBIF_MAX maximum value
+ */
+enum {
+ SDE_VBIF_QOS_OTLIM = 0x1,
+ SDE_VBIF_MAX
+};
+
+/**
+ * MACRO SDE_HW_BLK_INFO - information of HW blocks inside SDE
+ * @id: enum identifying this block
+ * @base: register base offset to mdss
+ * @len: length of hardware block
+ * @features bit mask identifying sub-blocks/features
+ */
+#define SDE_HW_BLK_INFO \
+ u32 id; \
+ u32 base; \
+ u32 len; \
+ unsigned long features; \
+
+/**
+ * MACRO SDE_HW_SUBBLK_INFO - information of HW sub-block inside SDE
+ * @id: enum identifying this sub-block
+ * @base: offset of this sub-block relative to the block
+ * offset
+ * @len register block length of this sub-block
+ */
+#define SDE_HW_SUBBLK_INFO \
+ u32 id; \
+ u32 base; \
+ u32 len
+
+/**
+ * struct sde_src_blk: SSPP part of the source pipes
+ * @info: HW register and features supported by this sub-blk
+ */
+struct sde_src_blk {
+ SDE_HW_SUBBLK_INFO;
+};
+
+/**
+ * struct sde_scaler_blk: Scaler information
+ * @info: HW register and features supported by this sub-blk
+ * @version: qseed block revision
+ */
+struct sde_scaler_blk {
+ SDE_HW_SUBBLK_INFO;
+ u32 version;
+};
+
+struct sde_csc_blk {
+ SDE_HW_SUBBLK_INFO;
+};
+
+/**
+ * struct sde_pp_blk : Pixel processing sub-blk information
+ * @info: HW register and features supported by this sub-blk
+ * @version: HW Algorithm version
+ */
+struct sde_pp_blk {
+ SDE_HW_SUBBLK_INFO;
+ u32 version;
+};
+
+/**
+ * struct sde_format_extended - define sde specific pixel format+modifier
+ * @fourcc_format: Base FOURCC pixel format code
+ * @modifier: 64-bit drm format modifier, same modifier must be applied to all
+ * framebuffer planes
+ */
+struct sde_format_extended {
+ uint32_t fourcc_format;
+ uint64_t modifier;
+};
+
+/**
+ * struct sde_sspp_sub_blks : SSPP sub-blocks
+ * @maxdwnscale: max downscale ratio supported(without DECIMATION)
+ * @maxupscale: maxupscale ratio supported
+ * @maxwidth: max pixelwidth supported by this pipe
+ * @danger_lut_linear: LUT to generate danger signals for linear format
+ * @safe_lut_linear: LUT to generate safe signals for linear format
+ * @danger_lut_tile: LUT to generate danger signals for tile format
+ * @safe_lut_tile: LUT to generate safe signals for tile format
+ * @danger_lut_nrt: LUT to generate danger signals for non-realtime use case
+ * @safe_lut_nrt: LUT to generate safe signals for non-realtime use case
+ * @creq_lut_nrt: LUT to generate creq signals for non-realtime use case
+ * @creq_vblank: creq priority during vertical blanking
+ * @danger_vblank: danger priority during vertical blanking
+ * @pixel_ram_size: size of latency hiding and de-tiling buffer in bytes
+ * @src_blk:
+ * @scaler_blk:
+ * @csc_blk:
+ * @hsic:
+ * @memcolor:
+ * @pcc_blk:
+ * @igc_blk:
+ * @format_list: Pointer to list of supported formats
+ */
+struct sde_sspp_sub_blks {
+ u32 maxlinewidth;
+ u32 danger_lut_linear;
+ u32 safe_lut_linear;
+ u32 danger_lut_tile;
+ u32 safe_lut_tile;
+ u32 danger_lut_nrt;
+ u32 safe_lut_nrt;
+ u32 creq_lut_nrt;
+ u32 creq_vblank;
+ u32 danger_vblank;
+ u32 pixel_ram_size;
+ u32 maxdwnscale;
+ u32 maxupscale;
+ u32 maxhdeciexp; /* max decimation is 2^value */
+ u32 maxvdeciexp; /* max decimation is 2^value */
+ struct sde_src_blk src_blk;
+ struct sde_scaler_blk scaler_blk;
+ struct sde_pp_blk csc_blk;
+ struct sde_pp_blk hsic_blk;
+ struct sde_pp_blk memcolor_blk;
+ struct sde_pp_blk pcc_blk;
+ struct sde_pp_blk igc_blk;
+
+ const struct sde_format_extended *format_list;
+};
+
+/**
+ * struct sde_lm_sub_blks: information of mixer block
+ * @maxwidth: Max pixel width supported by this mixer
+ * @maxblendstages: Max number of blend-stages supported
+ * @blendstage_base: Blend-stage register base offset
+ * @gc: gamma correction block
+ */
+struct sde_lm_sub_blks {
+ u32 maxwidth;
+ u32 maxblendstages;
+ u32 blendstage_base[MAX_BLOCKS];
+ struct sde_pp_blk gc;
+};
+
+struct sde_dspp_sub_blks {
+ struct sde_pp_blk igc;
+ struct sde_pp_blk pcc;
+ struct sde_pp_blk gc;
+ struct sde_pp_blk hsic;
+ struct sde_pp_blk memcolor;
+ struct sde_pp_blk sixzone;
+ struct sde_pp_blk gamut;
+ struct sde_pp_blk dither;
+ struct sde_pp_blk hist;
+ struct sde_pp_blk ad;
+ struct sde_pp_blk vlut;
+};
+
+struct sde_pingpong_sub_blks {
+ struct sde_pp_blk te;
+ struct sde_pp_blk te2;
+ struct sde_pp_blk dsc;
+};
+
+struct sde_wb_sub_blocks {
+ u32 maxlinewidth;
+};
+
+struct sde_mdss_base_cfg {
+ SDE_HW_BLK_INFO;
+};
+
+/**
+ * sde_clk_ctrl_type - Defines top level clock control signals
+ */
+enum sde_clk_ctrl_type {
+ SDE_CLK_CTRL_NONE,
+ SDE_CLK_CTRL_VIG0,
+ SDE_CLK_CTRL_VIG1,
+ SDE_CLK_CTRL_VIG2,
+ SDE_CLK_CTRL_VIG3,
+ SDE_CLK_CTRL_VIG4,
+ SDE_CLK_CTRL_RGB0,
+ SDE_CLK_CTRL_RGB1,
+ SDE_CLK_CTRL_RGB2,
+ SDE_CLK_CTRL_RGB3,
+ SDE_CLK_CTRL_DMA0,
+ SDE_CLK_CTRL_DMA1,
+ SDE_CLK_CTRL_CURSOR0,
+ SDE_CLK_CTRL_CURSOR1,
+ SDE_CLK_CTRL_WB0,
+ SDE_CLK_CTRL_WB1,
+ SDE_CLK_CTRL_WB2,
+ SDE_CLK_CTRL_MAX,
+};
+
+/* struct sde_clk_ctrl_reg : Clock control register
+ * @reg_off: register offset
+ * @bit_off: bit offset
+ */
+struct sde_clk_ctrl_reg {
+ u32 reg_off;
+ u32 bit_off;
+};
+
+/* struct sde_mdp_cfg : MDP TOP-BLK instance info
+ * @id: index identifying this block
+ * @base: register base offset to mdss
+ * @features bit mask identifying sub-blocks/features
+ * @highest_bank_bit: UBWC parameter
+ * @clk_ctrls clock control register definition
+ */
+struct sde_mdp_cfg {
+ SDE_HW_BLK_INFO;
+ u32 highest_bank_bit;
+ struct sde_clk_ctrl_reg clk_ctrls[SDE_CLK_CTRL_MAX];
+};
+
+/* struct sde_mdp_cfg : MDP TOP-BLK instance info
+ * @id: index identifying this block
+ * @base: register base offset to mdss
+ * @features bit mask identifying sub-blocks/features
+ */
+struct sde_ctl_cfg {
+ SDE_HW_BLK_INFO;
+};
+
+/**
+ * struct sde_sspp_cfg - information of source pipes
+ * @id: index identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * @sblk: SSPP sub-blocks information
+ * @xin_id: bus client identifier
+ * @clk_ctrl clock control identifier
+ */
+struct sde_sspp_cfg {
+ SDE_HW_BLK_INFO;
+ const struct sde_sspp_sub_blks *sblk;
+ u32 xin_id;
+ enum sde_clk_ctrl_type clk_ctrl;
+};
+
+/**
+ * struct sde_lm_cfg - information of layer mixer blocks
+ * @id: index identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * @sblk: LM Sub-blocks information
+ * @dspp: ID of connected DSPP, DSPP_MAX if unsupported
+ * @pingpong: ID of connected PingPong, PINGPONG_MAX if unsupported
+ * @lm_pair_mask: Bitmask of LMs that can be controlled by same CTL
+ */
+struct sde_lm_cfg {
+ SDE_HW_BLK_INFO;
+ const struct sde_lm_sub_blks *sblk;
+ u32 dspp;
+ u32 pingpong;
+ unsigned long lm_pair_mask;
+};
+
+/**
+ * struct sde_dspp_cfg - information of DSPP blocks
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * supported by this block
+ * @sblk sub-blocks information
+ */
+struct sde_dspp_cfg {
+ SDE_HW_BLK_INFO;
+ const struct sde_dspp_sub_blks *sblk;
+};
+
+/**
+ * struct sde_pingpong_cfg - information of PING-PONG blocks
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * @sblk sub-blocks information
+ */
+struct sde_pingpong_cfg {
+ SDE_HW_BLK_INFO;
+ const struct sde_pingpong_sub_blks *sblk;
+};
+
+/**
+ * struct sde_cdm_cfg - information of chroma down blocks
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * @intf_connect Bitmask of INTF IDs this CDM can connect to
+ * @wb_connect: Bitmask of Writeback IDs this CDM can connect to
+ */
+struct sde_cdm_cfg {
+ SDE_HW_BLK_INFO;
+ unsigned long intf_connect;
+ unsigned long wb_connect;
+};
+
+/**
+ * struct sde_intf_cfg - information of timing engine blocks
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * @type: Interface type(DSI, DP, HDMI)
+ * @controller_id: Controller Instance ID in case of multiple of intf type
+ * @prog_fetch_lines_worst_case Worst case latency num lines needed to prefetch
+ */
+struct sde_intf_cfg {
+ SDE_HW_BLK_INFO;
+ u32 type; /* interface type*/
+ u32 controller_id;
+ u32 prog_fetch_lines_worst_case;
+};
+
+/**
+ * struct sde_wb_cfg - information of writeback blocks
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * @sblk sub-block information
+ * @format_list: Pointer to list of supported formats
+ * @vbif_idx vbif identifier
+ * @xin_id client interface identifier
+ * @clk_ctrl clock control identifier
+ */
+struct sde_wb_cfg {
+ SDE_HW_BLK_INFO;
+ const struct sde_wb_sub_blocks *sblk;
+ const struct sde_format_extended *format_list;
+ u32 vbif_idx;
+ u32 xin_id;
+ enum sde_clk_ctrl_type clk_ctrl;
+};
+
+/**
+ * struct sde_vbif_dynamic_ot_cfg - dynamic OT setting
+ * @pps pixel per seconds
+ * @ot_limit OT limit to use up to specified pixel per second
+ */
+struct sde_vbif_dynamic_ot_cfg {
+ u64 pps;
+ u32 ot_limit;
+};
+
+/**
+ * struct sde_vbif_dynamic_ot_tbl - dynamic OT setting table
+ * @count length of cfg
+ * @cfg pointer to array of configuration settings with
+ * ascending requirements
+ */
+struct sde_vbif_dynamic_ot_tbl {
+ u32 count;
+ struct sde_vbif_dynamic_ot_cfg *cfg;
+};
+
+/**
+ * struct sde_vbif_cfg - information of VBIF blocks
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * @ot_rd_limit default OT read limit
+ * @ot_wr_limit default OT write limit
+ * @xin_halt_timeout maximum time (in usec) for xin to halt
+ * @dynamic_ot_rd_tbl dynamic OT read configuration table
+ * @dynamic_ot_wr_tbl dynamic OT write configuration table
+ */
+struct sde_vbif_cfg {
+ SDE_HW_BLK_INFO;
+ u32 default_ot_rd_limit;
+ u32 default_ot_wr_limit;
+ u32 xin_halt_timeout;
+ struct sde_vbif_dynamic_ot_tbl dynamic_ot_rd_tbl;
+ struct sde_vbif_dynamic_ot_tbl dynamic_ot_wr_tbl;
+};
+
+/**
+ * struct sde_perf_cfg - performance control settings
+ * @max_bw_low low threshold of maximum bandwidth (kbps)
+ * @max_bw_high high threshold of maximum bandwidth (kbps)
+ */
+struct sde_perf_cfg {
+ u32 max_bw_low;
+ u32 max_bw_high;
+};
+
+/**
+ * struct sde_mdss_cfg - information of MDSS HW
+ * This is the main catalog data structure representing
+ * this HW version. Contains number of instances,
+ * register offsets, capabilities of the all MDSS HW sub-blocks.
+ *
+ * @max_sspp_linewidth max source pipe line width support.
+ * @max_mixer_width max layer mixer line width support.
+ * @max_mixer_blendstages max layer mixer blend stages or
+ * supported z order
+ * @max_wb_linewidth max writeback line width support.
+ * @highest_bank_bit highest memory bit setting for tile buffers.
+ * @qseed_type qseed2 or qseed3 support.
+ * @csc_type csc or csc_10bit support.
+ * @has_src_split source split feature status
+ * @has_cdp Client driver prefetch feature status
+ */
+struct sde_mdss_cfg {
+ u32 hwversion;
+
+ u32 max_sspp_linewidth;
+ u32 max_mixer_width;
+ u32 max_mixer_blendstages;
+ u32 max_wb_linewidth;
+ u32 highest_bank_bit;
+ u32 qseed_type;
+ u32 csc_type;
+ bool has_src_split;
+ bool has_cdp;
+
+ u32 mdss_count;
+ struct sde_mdss_base_cfg mdss[MAX_BLOCKS];
+
+ u32 mdp_count;
+ struct sde_mdp_cfg mdp[MAX_BLOCKS];
+
+ u32 ctl_count;
+ struct sde_ctl_cfg ctl[MAX_BLOCKS];
+
+ u32 sspp_count;
+ struct sde_sspp_cfg sspp[MAX_BLOCKS];
+
+ u32 mixer_count;
+ struct sde_lm_cfg mixer[MAX_BLOCKS];
+
+ u32 dspp_count;
+ struct sde_dspp_cfg dspp[MAX_BLOCKS];
+
+ u32 pingpong_count;
+ struct sde_pingpong_cfg pingpong[MAX_BLOCKS];
+
+ u32 cdm_count;
+ struct sde_cdm_cfg cdm[MAX_BLOCKS];
+
+ u32 intf_count;
+ struct sde_intf_cfg intf[MAX_BLOCKS];
+
+ u32 wb_count;
+ struct sde_wb_cfg wb[MAX_BLOCKS];
+
+ u32 vbif_count;
+ struct sde_vbif_cfg vbif[MAX_BLOCKS];
+ /* Add additional block data structures here */
+
+ struct sde_perf_cfg perf;
+};
+
+struct sde_mdss_hw_cfg_handler {
+ u32 major;
+ u32 minor;
+ struct sde_mdss_cfg* (*cfg_init)(u32);
+};
+
+/*
+ * Access Macros
+ */
+#define BLK_MDP(s) ((s)->mdp)
+#define BLK_CTL(s) ((s)->ctl)
+#define BLK_VIG(s) ((s)->vig)
+#define BLK_RGB(s) ((s)->rgb)
+#define BLK_DMA(s) ((s)->dma)
+#define BLK_CURSOR(s) ((s)->cursor)
+#define BLK_MIXER(s) ((s)->mixer)
+#define BLK_DSPP(s) ((s)->dspp)
+#define BLK_PINGPONG(s) ((s)->pingpong)
+#define BLK_CDM(s) ((s)->cdm)
+#define BLK_INTF(s) ((s)->intf)
+#define BLK_WB(s) ((s)->wb)
+#define BLK_AD(s) ((s)->ad)
+
+/**
+ * sde_hw_catalog_init - sde hardware catalog init API parses dtsi property
+ * and stores all parsed offset, hardware capabilities in config structure.
+ * @dev: drm device node.
+ * @hw_rev: caller needs provide the hardware revision before parsing.
+ *
+ * Return: parsed sde config structure
+ */
+struct sde_mdss_cfg *sde_hw_catalog_init(struct drm_device *dev, u32 hw_rev);
+
+/**
+ * sde_hw_catalog_deinit - sde hardware catalog cleanup
+ * @sde_cfg: pointer returned from init function
+ */
+void sde_hw_catalog_deinit(struct sde_mdss_cfg *sde_cfg);
+
+#endif /* _SDE_HW_CATALOG_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog_format.h b/drivers/gpu/drm/msm/sde/sde_hw_catalog_format.h
new file mode 100644
index 000000000000..296694422653
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog_format.h
@@ -0,0 +1,134 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "sde_hw_mdss.h"
+
+static const struct sde_format_extended plane_formats[] = {
+ {DRM_FORMAT_ARGB8888, 0},
+ {DRM_FORMAT_ABGR8888, 0},
+ {DRM_FORMAT_RGBA8888, 0},
+ {DRM_FORMAT_RGBA8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_BGRA8888, 0},
+ {DRM_FORMAT_XRGB8888, 0},
+ {DRM_FORMAT_RGBX8888, 0},
+ {DRM_FORMAT_BGRX8888, 0},
+ {DRM_FORMAT_XBGR8888, 0},
+ {DRM_FORMAT_RGBX8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_RGB888, 0},
+ {DRM_FORMAT_BGR888, 0},
+ {DRM_FORMAT_RGB565, 0},
+ {DRM_FORMAT_RGB565, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_BGR565, 0},
+ {DRM_FORMAT_ARGB1555, 0},
+ {DRM_FORMAT_ABGR1555, 0},
+ {DRM_FORMAT_RGBA5551, 0},
+ {DRM_FORMAT_BGRA5551, 0},
+ {DRM_FORMAT_XRGB1555, 0},
+ {DRM_FORMAT_XBGR1555, 0},
+ {DRM_FORMAT_RGBX5551, 0},
+ {DRM_FORMAT_BGRX5551, 0},
+ {DRM_FORMAT_ARGB4444, 0},
+ {DRM_FORMAT_ABGR4444, 0},
+ {DRM_FORMAT_RGBA4444, 0},
+ {DRM_FORMAT_BGRA4444, 0},
+ {DRM_FORMAT_XRGB4444, 0},
+ {DRM_FORMAT_XBGR4444, 0},
+ {DRM_FORMAT_RGBX4444, 0},
+ {DRM_FORMAT_BGRX4444, 0},
+ {0, 0},
+};
+
+static const struct sde_format_extended plane_formats_yuv[] = {
+ {DRM_FORMAT_ARGB8888, 0},
+ {DRM_FORMAT_ABGR8888, 0},
+ {DRM_FORMAT_RGBA8888, 0},
+ {DRM_FORMAT_BGRX8888, 0},
+ {DRM_FORMAT_RGBA8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_BGRA8888, 0},
+ {DRM_FORMAT_XRGB8888, 0},
+ {DRM_FORMAT_XBGR8888, 0},
+ {DRM_FORMAT_RGBX8888, 0},
+ {DRM_FORMAT_RGBX8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_RGB888, 0},
+ {DRM_FORMAT_BGR888, 0},
+ {DRM_FORMAT_RGB565, 0},
+ {DRM_FORMAT_RGB565, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_BGR565, 0},
+ {DRM_FORMAT_ARGB1555, 0},
+ {DRM_FORMAT_ABGR1555, 0},
+ {DRM_FORMAT_RGBA5551, 0},
+ {DRM_FORMAT_BGRA5551, 0},
+ {DRM_FORMAT_XRGB1555, 0},
+ {DRM_FORMAT_XBGR1555, 0},
+ {DRM_FORMAT_RGBX5551, 0},
+ {DRM_FORMAT_BGRX5551, 0},
+ {DRM_FORMAT_ARGB4444, 0},
+ {DRM_FORMAT_ABGR4444, 0},
+ {DRM_FORMAT_RGBA4444, 0},
+ {DRM_FORMAT_BGRA4444, 0},
+ {DRM_FORMAT_XRGB4444, 0},
+ {DRM_FORMAT_XBGR4444, 0},
+ {DRM_FORMAT_RGBX4444, 0},
+ {DRM_FORMAT_BGRX4444, 0},
+
+ {DRM_FORMAT_NV12, 0},
+ {DRM_FORMAT_NV12, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_NV21, 0},
+ {DRM_FORMAT_NV16, 0},
+ {DRM_FORMAT_NV61, 0},
+ {DRM_FORMAT_VYUY, 0},
+ {DRM_FORMAT_UYVY, 0},
+ {DRM_FORMAT_YUYV, 0},
+ {DRM_FORMAT_YVYU, 0},
+ {DRM_FORMAT_YUV420, 0},
+ {DRM_FORMAT_YVU420, 0},
+ {0, 0},
+};
+
+static const struct sde_format_extended wb2_formats[] = {
+ {DRM_FORMAT_RGB565, 0},
+ {DRM_FORMAT_RGB888, 0},
+ {DRM_FORMAT_ARGB8888, 0},
+ {DRM_FORMAT_RGBA8888, 0},
+ {DRM_FORMAT_XRGB8888, 0},
+ {DRM_FORMAT_RGBX8888, 0},
+ {DRM_FORMAT_ARGB1555, 0},
+ {DRM_FORMAT_RGBA5551, 0},
+ {DRM_FORMAT_XRGB1555, 0},
+ {DRM_FORMAT_RGBX5551, 0},
+ {DRM_FORMAT_ARGB4444, 0},
+ {DRM_FORMAT_RGBA4444, 0},
+ {DRM_FORMAT_RGBX4444, 0},
+ {DRM_FORMAT_XRGB4444, 0},
+
+ {DRM_FORMAT_BGR565, 0},
+ {DRM_FORMAT_BGR888, 0},
+ {DRM_FORMAT_ABGR8888, 0},
+ {DRM_FORMAT_BGRA8888, 0},
+ {DRM_FORMAT_BGRX8888, 0},
+ {DRM_FORMAT_XBGR8888, 0},
+ {DRM_FORMAT_ABGR1555, 0},
+ {DRM_FORMAT_BGRA5551, 0},
+ {DRM_FORMAT_XBGR1555, 0},
+ {DRM_FORMAT_BGRX5551, 0},
+ {DRM_FORMAT_ABGR4444, 0},
+ {DRM_FORMAT_BGRA4444, 0},
+ {DRM_FORMAT_BGRX4444, 0},
+ {DRM_FORMAT_XBGR4444, 0},
+
+ {DRM_FORMAT_YUV420, 0},
+ {DRM_FORMAT_NV12, 0},
+ {DRM_FORMAT_NV16, 0},
+ {DRM_FORMAT_YUYV, 0},
+
+ {0, 0},
+};
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_cdm.c b/drivers/gpu/drm/msm/sde/sde_hw_cdm.c
new file mode 100644
index 000000000000..c7cbb93bece4
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_cdm.c
@@ -0,0 +1,342 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "sde_hw_mdss.h"
+#include "sde_hwio.h"
+#include "sde_hw_catalog.h"
+#include "sde_hw_cdm.h"
+
+#define CDM_CSC_10_OPMODE 0x000
+#define CDM_CSC_10_BASE 0x004
+
+#define CDM_CDWN2_OP_MODE 0x100
+#define CDM_CDWN2_CLAMP_OUT 0x104
+#define CDM_CDWN2_PARAMS_3D_0 0x108
+#define CDM_CDWN2_PARAMS_3D_1 0x10C
+#define CDM_CDWN2_COEFF_COSITE_H_0 0x110
+#define CDM_CDWN2_COEFF_COSITE_H_1 0x114
+#define CDM_CDWN2_COEFF_COSITE_H_2 0x118
+#define CDM_CDWN2_COEFF_OFFSITE_H_0 0x11C
+#define CDM_CDWN2_COEFF_OFFSITE_H_1 0x120
+#define CDM_CDWN2_COEFF_OFFSITE_H_2 0x124
+#define CDM_CDWN2_COEFF_COSITE_V 0x128
+#define CDM_CDWN2_COEFF_OFFSITE_V 0x12C
+#define CDM_CDWN2_OUT_SIZE 0x130
+
+#define CDM_HDMI_PACK_OP_MODE 0x200
+#define CDM_CSC_10_MATRIX_COEFF_0 0x004
+
+/**
+ * Horizontal coefficients for cosite chroma downscale
+ * s13 representation of coefficients
+ */
+static u32 cosite_h_coeff[] = {0x00000016, 0x000001cc, 0x0100009e};
+
+/**
+ * Horizontal coefficients for offsite chroma downscale
+ */
+static u32 offsite_h_coeff[] = {0x000b0005, 0x01db01eb, 0x00e40046};
+
+/**
+ * Vertical coefficients for cosite chroma downscale
+ */
+static u32 cosite_v_coeff[] = {0x00080004};
+/**
+ * Vertical coefficients for offsite chroma downscale
+ */
+static u32 offsite_v_coeff[] = {0x00060002};
+
+/* Limited Range rgb2yuv coeff with clamp and bias values for CSC 10 module */
+static struct sde_csc_cfg rgb2yuv_cfg = {
+ {
+ 0x0083, 0x0102, 0x0032,
+ 0x1fb5, 0x1f6c, 0x00e1,
+ 0x00e1, 0x1f45, 0x1fdc
+ },
+ { 0x00, 0x00, 0x00 },
+ { 0x0040, 0x0200, 0x0200 },
+ { 0x000, 0x3ff, 0x000, 0x3ff, 0x000, 0x3ff },
+ { 0x040, 0x3ac, 0x040, 0x3c0, 0x040, 0x3c0 },
+};
+
+static struct sde_cdm_cfg *_cdm_offset(enum sde_cdm cdm,
+ struct sde_mdss_cfg *m,
+ void __iomem *addr,
+ struct sde_hw_blk_reg_map *b)
+{
+ int i;
+
+ for (i = 0; i < m->cdm_count; i++) {
+ if (cdm == m->cdm[i].id) {
+ b->base_off = addr;
+ b->blk_off = m->cdm[i].base;
+ b->hwversion = m->hwversion;
+ b->log_mask = SDE_DBG_MASK_CDM;
+ return &m->cdm[i];
+ }
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static void sde_hw_cdm_setup_csc_10bit(struct sde_hw_cdm *ctx,
+ struct sde_csc_cfg *data)
+{
+ struct sde_hw_blk_reg_map *c = &ctx->hw;
+ u32 csc_reg_off = CDM_CSC_10_MATRIX_COEFF_0;
+ u32 val;
+
+ /* matrix coeff */
+ val = data->csc_mv[0] | (data->csc_mv[1] << 16);
+ SDE_REG_WRITE(c, csc_reg_off, val);
+ val = data->csc_mv[2] | (data->csc_mv[3] << 16);
+ SDE_REG_WRITE(c, csc_reg_off + 0x4, val);
+ val = data->csc_mv[4] | (data->csc_mv[5] << 16);
+ SDE_REG_WRITE(c, csc_reg_off + 0x8, val);
+ val = data->csc_mv[6] | (data->csc_mv[7] << 16);
+ SDE_REG_WRITE(c, csc_reg_off + 0xc, val);
+ val = data->csc_mv[8];
+ SDE_REG_WRITE(c, csc_reg_off + 0x10, val);
+
+ /* Pre clamp */
+ val = (data->csc_pre_lv[0] << 16) | data->csc_pre_lv[1];
+ SDE_REG_WRITE(c, csc_reg_off + 0x14, val);
+ val = (data->csc_pre_lv[2] << 16) | data->csc_pre_lv[3];
+ SDE_REG_WRITE(c, csc_reg_off + 0x18, val);
+ val = (data->csc_pre_lv[4] << 16) | data->csc_pre_lv[5];
+ SDE_REG_WRITE(c, csc_reg_off + 0x1c, val);
+
+ /* Post clamp */
+ val = (data->csc_post_lv[0] << 16) | data->csc_post_lv[1];
+ SDE_REG_WRITE(c, csc_reg_off + 0x20, val);
+ val = (data->csc_post_lv[2] << 16) | data->csc_post_lv[3];
+ SDE_REG_WRITE(c, csc_reg_off + 0x24, val);
+ val = (data->csc_post_lv[4] << 16) | data->csc_post_lv[5];
+ SDE_REG_WRITE(c, csc_reg_off + 0x28, val);
+
+ /* Pre-Bias */
+ SDE_REG_WRITE(c, csc_reg_off + 0x2c, data->csc_pre_bv[0]);
+ SDE_REG_WRITE(c, csc_reg_off + 0x30, data->csc_pre_bv[1]);
+ SDE_REG_WRITE(c, csc_reg_off + 0x34, data->csc_pre_bv[2]);
+
+ /* Post-Bias */
+ SDE_REG_WRITE(c, csc_reg_off + 0x38, data->csc_post_bv[0]);
+ SDE_REG_WRITE(c, csc_reg_off + 0x3c, data->csc_post_bv[1]);
+ SDE_REG_WRITE(c, csc_reg_off + 0x40, data->csc_post_bv[2]);
+}
+
+static int sde_hw_cdm_setup_cdwn(struct sde_hw_cdm *ctx,
+ struct sde_hw_cdm_cfg *cfg)
+{
+ struct sde_hw_blk_reg_map *c = &ctx->hw;
+ u32 opmode = 0;
+ u32 out_size = 0;
+
+ if (cfg->output_bit_depth == CDM_CDWN_OUTPUT_10BIT)
+ opmode &= ~BIT(7);
+ else
+ opmode |= BIT(7);
+
+ /* ENABLE DWNS_H bit */
+ opmode |= BIT(1);
+
+ switch (cfg->h_cdwn_type) {
+ case CDM_CDWN_DISABLE:
+ /* CLEAR METHOD_H field */
+ opmode &= ~(0x18);
+ /* CLEAR DWNS_H bit */
+ opmode &= ~BIT(1);
+ break;
+ case CDM_CDWN_PIXEL_DROP:
+ /* Clear METHOD_H field (pixel drop is 0) */
+ opmode &= ~(0x18);
+ break;
+ case CDM_CDWN_AVG:
+ /* Clear METHOD_H field (Average is 0x1) */
+ opmode &= ~(0x18);
+ opmode |= (0x1 << 0x3);
+ break;
+ case CDM_CDWN_COSITE:
+ /* Clear METHOD_H field (Average is 0x2) */
+ opmode &= ~(0x18);
+ opmode |= (0x2 << 0x3);
+ /* Co-site horizontal coefficients */
+ SDE_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_0,
+ cosite_h_coeff[0]);
+ SDE_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_1,
+ cosite_h_coeff[1]);
+ SDE_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_2,
+ cosite_h_coeff[2]);
+ break;
+ case CDM_CDWN_OFFSITE:
+ /* Clear METHOD_H field (Average is 0x3) */
+ opmode &= ~(0x18);
+ opmode |= (0x3 << 0x3);
+
+ /* Off-site horizontal coefficients */
+ SDE_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_0,
+ offsite_h_coeff[0]);
+ SDE_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_1,
+ offsite_h_coeff[1]);
+ SDE_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_2,
+ offsite_h_coeff[2]);
+ break;
+ default:
+ pr_err("%s invalid horz down sampling type\n", __func__);
+ return -EINVAL;
+ }
+
+ /* ENABLE DWNS_V bit */
+ opmode |= BIT(2);
+
+ switch (cfg->v_cdwn_type) {
+ case CDM_CDWN_DISABLE:
+ /* CLEAR METHOD_V field */
+ opmode &= ~(0x60);
+ /* CLEAR DWNS_V bit */
+ opmode &= ~BIT(2);
+ break;
+ case CDM_CDWN_PIXEL_DROP:
+ /* Clear METHOD_V field (pixel drop is 0) */
+ opmode &= ~(0x60);
+ break;
+ case CDM_CDWN_AVG:
+ /* Clear METHOD_V field (Average is 0x1) */
+ opmode &= ~(0x60);
+ opmode |= (0x1 << 0x5);
+ break;
+ case CDM_CDWN_COSITE:
+ /* Clear METHOD_V field (Average is 0x2) */
+ opmode &= ~(0x60);
+ opmode |= (0x2 << 0x5);
+ /* Co-site vertical coefficients */
+ SDE_REG_WRITE(c,
+ CDM_CDWN2_COEFF_COSITE_V,
+ cosite_v_coeff[0]);
+ break;
+ case CDM_CDWN_OFFSITE:
+ /* Clear METHOD_V field (Average is 0x3) */
+ opmode &= ~(0x60);
+ opmode |= (0x3 << 0x5);
+
+ /* Off-site vertical coefficients */
+ SDE_REG_WRITE(c,
+ CDM_CDWN2_COEFF_OFFSITE_V,
+ offsite_v_coeff[0]);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (cfg->v_cdwn_type || cfg->h_cdwn_type)
+ opmode |= BIT(0); /* EN CDWN module */
+ else
+ opmode &= ~BIT(0);
+
+ out_size = (cfg->output_width & 0xFFFF) |
+ ((cfg->output_height & 0xFFFF) << 16);
+ SDE_REG_WRITE(c, CDM_CDWN2_OUT_SIZE, out_size);
+ SDE_REG_WRITE(c, CDM_CDWN2_OP_MODE, opmode);
+ SDE_REG_WRITE(c, CDM_CDWN2_CLAMP_OUT,
+ ((0x3FF << 16) | 0x0));
+
+ return 0;
+}
+
+int sde_hw_cdm_enable(struct sde_hw_cdm *ctx,
+ struct sde_hw_cdm_cfg *cdm)
+{
+ struct sde_hw_blk_reg_map *c = &ctx->hw;
+ const struct sde_format *fmt = cdm->output_fmt;
+ struct cdm_output_cfg cdm_cfg = { 0 };
+ u32 opmode = 0;
+ u32 csc = 0;
+
+ if (!SDE_FORMAT_IS_YUV(fmt))
+ return -EINVAL;
+
+ if (cdm->output_type == CDM_CDWN_OUTPUT_HDMI) {
+ if (fmt->chroma_sample != SDE_CHROMA_H1V2)
+ return -EINVAL; /*unsupported format */
+ opmode = BIT(0);
+ opmode |= (fmt->chroma_sample << 1);
+ cdm_cfg.intf_en = true;
+ } else {
+ opmode = 0;
+ cdm_cfg.wb_en = true;
+ }
+
+ csc |= BIT(2);
+ csc &= ~BIT(1);
+ csc |= BIT(0);
+
+ if (ctx->hw_mdp && ctx->hw_mdp->ops.setup_cdm_output)
+ ctx->hw_mdp->ops.setup_cdm_output(ctx->hw_mdp, &cdm_cfg);
+
+ SDE_REG_WRITE(c, CDM_CSC_10_OPMODE, csc);
+ SDE_REG_WRITE(c, CDM_HDMI_PACK_OP_MODE, opmode);
+ return 0;
+}
+
+void sde_hw_cdm_disable(struct sde_hw_cdm *ctx)
+{
+ struct cdm_output_cfg cdm_cfg = { 0 };
+
+ if (ctx->hw_mdp && ctx->hw_mdp->ops.setup_cdm_output)
+ ctx->hw_mdp->ops.setup_cdm_output(ctx->hw_mdp, &cdm_cfg);
+}
+
+static void _setup_cdm_ops(struct sde_hw_cdm_ops *ops,
+ unsigned long features)
+{
+ ops->setup_csc_data = sde_hw_cdm_setup_csc_10bit;
+ ops->setup_cdwn = sde_hw_cdm_setup_cdwn;
+ ops->enable = sde_hw_cdm_enable;
+ ops->disable = sde_hw_cdm_disable;
+}
+
+struct sde_hw_cdm *sde_hw_cdm_init(enum sde_cdm idx,
+ void __iomem *addr,
+ struct sde_mdss_cfg *m,
+ struct sde_hw_mdp *hw_mdp)
+{
+ struct sde_hw_cdm *c;
+ struct sde_cdm_cfg *cfg;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _cdm_offset(idx, m, addr, &c->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(c);
+ return ERR_PTR(-EINVAL);
+ }
+
+ c->idx = idx;
+ c->cdm_hw_cap = cfg;
+ _setup_cdm_ops(&c->ops, c->cdm_hw_cap->features);
+ c->hw_mdp = hw_mdp;
+
+ /*
+ * Perform any default initialization for the chroma down module
+ * @setup default csc coefficients
+ */
+ sde_hw_cdm_setup_csc_10bit(c, &rgb2yuv_cfg);
+
+ return c;
+}
+
+void sde_hw_cdm_destroy(struct sde_hw_cdm *cdm)
+{
+ kfree(cdm);
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_cdm.h b/drivers/gpu/drm/msm/sde/sde_hw_cdm.h
new file mode 100644
index 000000000000..264b8a418573
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_cdm.h
@@ -0,0 +1,127 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HW_CDM_H
+#define _SDE_HW_CDM_H
+
+#include "sde_hw_mdss.h"
+#include "sde_hw_top.h"
+
+struct sde_hw_cdm;
+
+struct sde_hw_cdm_cfg {
+ u32 output_width;
+ u32 output_height;
+ u32 output_bit_depth;
+ u32 h_cdwn_type;
+ u32 v_cdwn_type;
+ const struct sde_format *output_fmt;
+ u32 output_type;
+ int flags;
+};
+
+enum sde_hw_cdwn_type {
+ CDM_CDWN_DISABLE,
+ CDM_CDWN_PIXEL_DROP,
+ CDM_CDWN_AVG,
+ CDM_CDWN_COSITE,
+ CDM_CDWN_OFFSITE,
+};
+
+enum sde_hw_cdwn_output_type {
+ CDM_CDWN_OUTPUT_HDMI,
+ CDM_CDWN_OUTPUT_WB,
+};
+
+enum sde_hw_cdwn_output_bit_depth {
+ CDM_CDWN_OUTPUT_8BIT,
+ CDM_CDWN_OUTPUT_10BIT,
+};
+
+/**
+ * struct sde_hw_cdm_ops : Interface to the chroma down Hw driver functions
+ * Assumption is these functions will be called after
+ * clocks are enabled
+ * @setup_csc: Programs the csc matrix
+ * @setup_cdwn: Sets up the chroma down sub module
+ * @enable: Enables the output to interface and programs the
+ * output packer
+ * @disable: Puts the cdm in bypass mode
+ */
+struct sde_hw_cdm_ops {
+ /**
+ * Programs the CSC matrix for conversion from RGB space to YUV space,
+ * it is optional to call this function as this matrix is automatically
+ * set during initialization, user should call this if it wants
+ * to program a different matrix than default matrix.
+ * @cdm: Pointer to the chroma down context structure
+ * @data Pointer to CSC configuration data
+ */
+ void (*setup_csc_data)(struct sde_hw_cdm *cdm,
+ struct sde_csc_cfg *data);
+
+ /**
+ * Programs the Chroma downsample part.
+ * @cdm Pointer to chroma down context
+ */
+ int (*setup_cdwn)(struct sde_hw_cdm *cdm,
+ struct sde_hw_cdm_cfg *cfg);
+
+ /**
+ * Enable the CDM module
+ * @cdm Pointer to chroma down context
+ */
+ int (*enable)(struct sde_hw_cdm *cdm,
+ struct sde_hw_cdm_cfg *cfg);
+
+ /**
+ * Disable the CDM module
+ * @cdm Pointer to chroma down context
+ */
+ void (*disable)(struct sde_hw_cdm *cdm);
+};
+
+struct sde_hw_cdm {
+ /* base */
+ struct sde_hw_blk_reg_map hw;
+
+ /* chroma down */
+ const struct sde_cdm_cfg *cdm_hw_cap;
+ enum sde_cdm idx;
+
+ /* mdp top hw driver */
+ struct sde_hw_mdp *hw_mdp;
+
+ /* ops */
+ struct sde_hw_cdm_ops ops;
+};
+
+/**
+ * sde_hw_cdm_init - initializes the cdm hw driver object.
+ * should be called once before accessing every cdm.
+ * @idx: cdm index for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @m : pointer to mdss catalog data
+ * @hw_mdp: pointer to mdp top hw driver object
+ */
+struct sde_hw_cdm *sde_hw_cdm_init(enum sde_cdm idx,
+ void __iomem *addr,
+ struct sde_mdss_cfg *m,
+ struct sde_hw_mdp *hw_mdp);
+
+/**
+ * sde_hw_cdm_destroy - destroys CDM driver context
+ * @cdm: pointer to CDM driver context
+ */
+void sde_hw_cdm_destroy(struct sde_hw_cdm *cdm);
+
+#endif /*_SDE_HW_CDM_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_color_processing.h b/drivers/gpu/drm/msm/sde/sde_hw_color_processing.h
new file mode 100644
index 000000000000..a30e1a52b046
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_color_processing.h
@@ -0,0 +1,18 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HW_COLOR_PROCESSING_H
+#define _SDE_HW_COLOR_PROCESSING_H
+
+#include "sde_hw_color_processing_v1_7.h"
+
+#endif
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.c b/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.c
new file mode 100644
index 000000000000..f1f66f37ba6a
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.c
@@ -0,0 +1,453 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/msm_drm_pp.h>
+#include "sde_hw_color_processing_v1_7.h"
+
+#define PA_HUE_VIG_OFF 0x110
+#define PA_SAT_VIG_OFF 0x114
+#define PA_VAL_VIG_OFF 0x118
+#define PA_CONT_VIG_OFF 0x11C
+
+#define PA_HUE_DSPP_OFF 0x238
+#define PA_SAT_DSPP_OFF 0x23C
+#define PA_VAL_DSPP_OFF 0x240
+#define PA_CONT_DSPP_OFF 0x244
+
+#define PA_LUTV_DSPP_OFF 0x1400
+#define PA_LUT_SWAP_OFF 0x234
+
+#define PA_HUE_MASK 0xFFF
+#define PA_SAT_MASK 0xFFFF
+#define PA_VAL_MASK 0xFF
+#define PA_CONT_MASK 0xFF
+
+#define MEMCOL_PWL0_OFF 0x88
+#define MEMCOL_PWL0_MASK 0xFFFF07FF
+#define MEMCOL_PWL1_OFF 0x8C
+#define MEMCOL_PWL1_MASK 0xFFFFFFFF
+#define MEMCOL_HUE_REGION_OFF 0x90
+#define MEMCOL_HUE_REGION_MASK 0x7FF07FF
+#define MEMCOL_SAT_REGION_OFF 0x94
+#define MEMCOL_SAT_REGION_MASK 0xFFFFFF
+#define MEMCOL_VAL_REGION_OFF 0x98
+#define MEMCOL_VAL_REGION_MASK 0xFFFFFF
+#define MEMCOL_P0_LEN 0x14
+#define MEMCOL_P1_LEN 0x8
+#define MEMCOL_PWL2_OFF 0x218
+#define MEMCOL_PWL2_MASK 0xFFFFFFFF
+#define MEMCOL_BLEND_GAIN_OFF 0x21C
+#define MEMCOL_PWL_HOLD_OFF 0x214
+
+#define VIG_OP_PA_EN BIT(4)
+#define VIG_OP_PA_SKIN_EN BIT(5)
+#define VIG_OP_PA_FOL_EN BIT(6)
+#define VIG_OP_PA_SKY_EN BIT(7)
+#define VIG_OP_PA_HUE_EN BIT(25)
+#define VIG_OP_PA_SAT_EN BIT(26)
+#define VIG_OP_PA_VAL_EN BIT(27)
+#define VIG_OP_PA_CONT_EN BIT(28)
+
+#define DSPP_OP_SZ_VAL_EN BIT(31)
+#define DSPP_OP_SZ_SAT_EN BIT(30)
+#define DSPP_OP_SZ_HUE_EN BIT(29)
+#define DSPP_OP_PA_HUE_EN BIT(25)
+#define DSPP_OP_PA_SAT_EN BIT(26)
+#define DSPP_OP_PA_VAL_EN BIT(27)
+#define DSPP_OP_PA_CONT_EN BIT(28)
+#define DSPP_OP_PA_EN BIT(20)
+#define DSPP_OP_PA_LUTV_EN BIT(19)
+#define DSPP_OP_PA_SKIN_EN BIT(5)
+#define DSPP_OP_PA_FOL_EN BIT(6)
+#define DSPP_OP_PA_SKY_EN BIT(7)
+
+#define REG_MASK(n) ((BIT(n)) - 1)
+
+#define PA_VIG_DISABLE_REQUIRED(x) \
+ !((x) & (VIG_OP_PA_SKIN_EN | VIG_OP_PA_SKY_EN | \
+ VIG_OP_PA_FOL_EN | VIG_OP_PA_HUE_EN | \
+ VIG_OP_PA_SAT_EN | VIG_OP_PA_VAL_EN | \
+ VIG_OP_PA_CONT_EN))
+
+
+#define PA_DSPP_DISABLE_REQUIRED(x) \
+ !((x) & (DSPP_OP_PA_SKIN_EN | DSPP_OP_PA_SKY_EN | \
+ DSPP_OP_PA_FOL_EN | DSPP_OP_PA_HUE_EN | \
+ DSPP_OP_PA_SAT_EN | DSPP_OP_PA_VAL_EN | \
+ DSPP_OP_PA_CONT_EN | DSPP_OP_PA_LUTV_EN))
+
+#define DSPP_OP_PCC_ENABLE BIT(0)
+#define PCC_OP_MODE_OFF 0
+#define PCC_CONST_COEFF_OFF 4
+#define PCC_R_COEFF_OFF 0x10
+#define PCC_G_COEFF_OFF 0x1C
+#define PCC_B_COEFF_OFF 0x28
+#define PCC_RG_COEFF_OFF 0x34
+#define PCC_RB_COEFF_OFF 0x40
+#define PCC_GB_COEFF_OFF 0x4C
+#define PCC_RGB_COEFF_OFF 0x58
+#define PCC_CONST_COEFF_MASK 0xFFFF
+#define PCC_COEFF_MASK 0x3FFFF
+
+#define SSPP 0
+#define DSPP 1
+
+static void __setup_pa_hue(struct sde_hw_blk_reg_map *hw,
+ const struct sde_pp_blk *blk, uint32_t hue,
+ int location)
+{
+ u32 base = blk->base;
+ u32 offset = (location == DSPP) ? PA_HUE_DSPP_OFF : PA_HUE_VIG_OFF;
+ u32 op_hue_en = (location == DSPP) ? DSPP_OP_PA_HUE_EN :
+ VIG_OP_PA_HUE_EN;
+ u32 op_pa_en = (location == DSPP) ? DSPP_OP_PA_EN : VIG_OP_PA_EN;
+ u32 disable_req;
+ u32 opmode;
+
+ SDE_REG_WRITE(hw, base + offset, hue & PA_HUE_MASK);
+
+ opmode = SDE_REG_READ(hw, base);
+
+ if (!hue) {
+ opmode &= ~op_hue_en;
+ disable_req = (location == DSPP) ?
+ PA_DSPP_DISABLE_REQUIRED(opmode) :
+ PA_VIG_DISABLE_REQUIRED(opmode);
+ if (disable_req)
+ opmode &= ~op_pa_en;
+ } else {
+ opmode |= op_hue_en | op_pa_en;
+ }
+
+ SDE_REG_WRITE(hw, base, opmode);
+}
+
+void sde_setup_pipe_pa_hue_v1_7(struct sde_hw_pipe *ctx, void *cfg)
+{
+ uint32_t hue = *((uint32_t *)cfg);
+
+ __setup_pa_hue(&ctx->hw, &ctx->cap->sblk->hsic_blk, hue, SSPP);
+}
+
+void sde_setup_dspp_pa_hue_v1_7(struct sde_hw_dspp *ctx, void *cfg)
+{
+ uint32_t hue = *((uint32_t *)cfg);
+
+ __setup_pa_hue(&ctx->hw, &ctx->cap->sblk->hsic, hue, DSPP);
+}
+
+static void __setup_pa_sat(struct sde_hw_blk_reg_map *hw,
+ const struct sde_pp_blk *blk, uint32_t sat,
+ int location)
+{
+ u32 base = blk->base;
+ u32 offset = (location == DSPP) ? PA_SAT_DSPP_OFF : PA_SAT_VIG_OFF;
+ u32 op_sat_en = (location == DSPP) ?
+ DSPP_OP_PA_SAT_EN : VIG_OP_PA_SAT_EN;
+ u32 op_pa_en = (location == DSPP) ? DSPP_OP_PA_EN : VIG_OP_PA_EN;
+ u32 disable_req;
+ u32 opmode;
+
+ SDE_REG_WRITE(hw, base + offset, sat & PA_SAT_MASK);
+
+ opmode = SDE_REG_READ(hw, base);
+
+ if (!sat) {
+ opmode &= ~op_sat_en;
+ disable_req = (location == DSPP) ?
+ PA_DSPP_DISABLE_REQUIRED(opmode) :
+ PA_VIG_DISABLE_REQUIRED(opmode);
+ if (disable_req)
+ opmode &= ~op_pa_en;
+ } else {
+ opmode |= op_sat_en | op_pa_en;
+ }
+
+ SDE_REG_WRITE(hw, base, opmode);
+}
+
+void sde_setup_pipe_pa_sat_v1_7(struct sde_hw_pipe *ctx, void *cfg)
+{
+ uint32_t sat = *((uint32_t *)cfg);
+
+ __setup_pa_sat(&ctx->hw, &ctx->cap->sblk->hsic_blk, sat, SSPP);
+}
+
+static void __setup_pa_val(struct sde_hw_blk_reg_map *hw,
+ const struct sde_pp_blk *blk, uint32_t value,
+ int location)
+{
+ u32 base = blk->base;
+ u32 offset = (location == DSPP) ? PA_VAL_DSPP_OFF : PA_VAL_VIG_OFF;
+ u32 op_val_en = (location == DSPP) ?
+ DSPP_OP_PA_VAL_EN : VIG_OP_PA_VAL_EN;
+ u32 op_pa_en = (location == DSPP) ? DSPP_OP_PA_EN : VIG_OP_PA_EN;
+ u32 disable_req;
+ u32 opmode;
+
+ SDE_REG_WRITE(hw, base + offset, value & PA_VAL_MASK);
+
+ opmode = SDE_REG_READ(hw, base);
+
+ if (!value) {
+ opmode &= ~op_val_en;
+ disable_req = (location == DSPP) ?
+ PA_DSPP_DISABLE_REQUIRED(opmode) :
+ PA_VIG_DISABLE_REQUIRED(opmode);
+ if (disable_req)
+ opmode &= ~op_pa_en;
+ } else {
+ opmode |= op_val_en | op_pa_en;
+ }
+
+ SDE_REG_WRITE(hw, base, opmode);
+}
+
+void sde_setup_pipe_pa_val_v1_7(struct sde_hw_pipe *ctx, void *cfg)
+{
+ uint32_t value = *((uint32_t *)cfg);
+
+ __setup_pa_val(&ctx->hw, &ctx->cap->sblk->hsic_blk, value, SSPP);
+}
+
+static void __setup_pa_cont(struct sde_hw_blk_reg_map *hw,
+ const struct sde_pp_blk *blk, uint32_t contrast,
+ int location)
+{
+ u32 base = blk->base;
+ u32 offset = (location == DSPP) ? PA_CONT_DSPP_OFF : PA_CONT_VIG_OFF;
+ u32 op_cont_en = (location == DSPP) ? DSPP_OP_PA_CONT_EN :
+ VIG_OP_PA_CONT_EN;
+ u32 op_pa_en = (location == DSPP) ? DSPP_OP_PA_EN : VIG_OP_PA_EN;
+ u32 disable_req;
+ u32 opmode;
+
+ SDE_REG_WRITE(hw, base + offset, contrast & PA_CONT_MASK);
+
+ opmode = SDE_REG_READ(hw, base);
+
+ if (!contrast) {
+ opmode &= ~op_cont_en;
+ disable_req = (location == DSPP) ?
+ PA_DSPP_DISABLE_REQUIRED(opmode) :
+ PA_VIG_DISABLE_REQUIRED(opmode);
+ if (disable_req)
+ opmode &= ~op_pa_en;
+ } else {
+ opmode |= op_cont_en | op_pa_en;
+ }
+
+ SDE_REG_WRITE(hw, base, opmode);
+}
+
+void sde_setup_pipe_pa_cont_v1_7(struct sde_hw_pipe *ctx, void *cfg)
+{
+ uint32_t contrast = *((uint32_t *)cfg);
+
+ __setup_pa_cont(&ctx->hw, &ctx->cap->sblk->hsic_blk, contrast, SSPP);
+}
+
+void sde_setup_pipe_pa_memcol_v1_7(struct sde_hw_pipe *ctx,
+ enum sde_memcolor_type type,
+ void *cfg)
+{
+ struct drm_msm_memcol *mc = cfg;
+ u32 base = ctx->cap->sblk->memcolor_blk.base;
+ u32 off, op, mc_en, hold = 0;
+ u32 mc_i = 0;
+
+ switch (type) {
+ case MEMCOLOR_SKIN:
+ mc_en = VIG_OP_PA_SKIN_EN;
+ mc_i = 0;
+ break;
+ case MEMCOLOR_SKY:
+ mc_en = VIG_OP_PA_SKY_EN;
+ mc_i = 1;
+ break;
+ case MEMCOLOR_FOLIAGE:
+ mc_en = VIG_OP_PA_FOL_EN;
+ mc_i = 2;
+ break;
+ default:
+ DRM_ERROR("Invalid memory color type %d\n", type);
+ return;
+ }
+
+ op = SDE_REG_READ(&ctx->hw, base);
+ if (!mc) {
+ op &= ~mc_en;
+ if (PA_VIG_DISABLE_REQUIRED(op))
+ op &= ~VIG_OP_PA_EN;
+ SDE_REG_WRITE(&ctx->hw, base, op);
+ return;
+ }
+
+ off = base + (mc_i * MEMCOL_P0_LEN);
+ SDE_REG_WRITE(&ctx->hw, (off + MEMCOL_PWL0_OFF),
+ mc->color_adjust_p0 & MEMCOL_PWL0_MASK);
+ SDE_REG_WRITE(&ctx->hw, (off + MEMCOL_PWL1_OFF),
+ mc->color_adjust_p1 & MEMCOL_PWL1_MASK);
+ SDE_REG_WRITE(&ctx->hw, (off + MEMCOL_HUE_REGION_OFF),
+ mc->hue_region & MEMCOL_HUE_REGION_MASK);
+ SDE_REG_WRITE(&ctx->hw, (off + MEMCOL_SAT_REGION_OFF),
+ mc->sat_region & MEMCOL_SAT_REGION_MASK);
+ SDE_REG_WRITE(&ctx->hw, (off + MEMCOL_VAL_REGION_OFF),
+ mc->val_region & MEMCOL_VAL_REGION_MASK);
+
+ off = base + (mc_i * MEMCOL_P1_LEN);
+ SDE_REG_WRITE(&ctx->hw, (off + MEMCOL_PWL2_OFF),
+ mc->color_adjust_p2 & MEMCOL_PWL2_MASK);
+ SDE_REG_WRITE(&ctx->hw, (off + MEMCOL_BLEND_GAIN_OFF), mc->blend_gain);
+
+ hold = SDE_REG_READ(&ctx->hw, off + MEMCOL_PWL_HOLD_OFF);
+ hold &= ~(0xF << (mc_i * 4));
+ hold |= ((mc->sat_hold & 0x3) << (mc_i * 4));
+ hold |= ((mc->val_hold & 0x3) << ((mc_i * 4) + 2));
+ SDE_REG_WRITE(&ctx->hw, (off + MEMCOL_PWL_HOLD_OFF), hold);
+
+ op |= VIG_OP_PA_EN | mc_en;
+ SDE_REG_WRITE(&ctx->hw, base, op);
+}
+
+void sde_setup_dspp_pcc_v1_7(struct sde_hw_dspp *ctx, void *cfg)
+{
+ struct sde_hw_cp_cfg *hw_cfg = cfg;
+ struct drm_msm_pcc *pcc;
+ void __iomem *base;
+
+ if (!hw_cfg || (hw_cfg->len != sizeof(*pcc) && hw_cfg->payload)) {
+ DRM_ERROR("invalid params hw %p payload %p payloadsize %d \"\
+ exp size %zd\n",
+ hw_cfg, ((hw_cfg) ? hw_cfg->payload : NULL),
+ ((hw_cfg) ? hw_cfg->len : 0), sizeof(*pcc));
+ return;
+ }
+ base = ctx->hw.base_off + ctx->cap->base;
+
+ /* Turn off feature */
+ if (!hw_cfg->payload) {
+ SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base,
+ PCC_OP_MODE_OFF);
+ return;
+ }
+ DRM_DEBUG_DRIVER("Enable PCC feature\n");
+ pcc = hw_cfg->payload;
+
+ SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_CONST_COEFF_OFF,
+ pcc->r.c & PCC_CONST_COEFF_MASK);
+ SDE_REG_WRITE(&ctx->hw,
+ ctx->cap->sblk->pcc.base + PCC_CONST_COEFF_OFF + 4,
+ pcc->g.c & PCC_CONST_COEFF_MASK);
+ SDE_REG_WRITE(&ctx->hw,
+ ctx->cap->sblk->pcc.base + PCC_CONST_COEFF_OFF + 8,
+ pcc->b.c & PCC_CONST_COEFF_MASK);
+
+ SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_R_COEFF_OFF,
+ pcc->r.r & PCC_COEFF_MASK);
+ SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_R_COEFF_OFF + 4,
+ pcc->g.r & PCC_COEFF_MASK);
+ SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_R_COEFF_OFF + 8,
+ pcc->b.r & PCC_COEFF_MASK);
+
+ SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_G_COEFF_OFF,
+ pcc->r.g & PCC_COEFF_MASK);
+ SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_G_COEFF_OFF + 4,
+ pcc->g.g & PCC_COEFF_MASK);
+ SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_G_COEFF_OFF + 8,
+ pcc->b.g & PCC_COEFF_MASK);
+
+ SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_B_COEFF_OFF,
+ pcc->r.b & PCC_COEFF_MASK);
+ SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_B_COEFF_OFF + 4,
+ pcc->g.b & PCC_COEFF_MASK);
+ SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_B_COEFF_OFF + 8,
+ pcc->b.b & PCC_COEFF_MASK);
+
+
+ SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_RG_COEFF_OFF,
+ pcc->r.rg & PCC_COEFF_MASK);
+ SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_RG_COEFF_OFF + 4,
+ pcc->g.rg & PCC_COEFF_MASK);
+ SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_RG_COEFF_OFF + 8,
+ pcc->b.rg & PCC_COEFF_MASK);
+
+ SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_RB_COEFF_OFF,
+ pcc->r.rb & PCC_COEFF_MASK);
+ SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_RB_COEFF_OFF + 4,
+ pcc->g.rb & PCC_COEFF_MASK);
+ SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_RB_COEFF_OFF + 8,
+ pcc->b.rb & PCC_COEFF_MASK);
+
+
+ SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_GB_COEFF_OFF,
+ pcc->r.gb & PCC_COEFF_MASK);
+ SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_GB_COEFF_OFF + 4,
+ pcc->g.gb & PCC_COEFF_MASK);
+ SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_GB_COEFF_OFF + 8,
+ pcc->b.gb & PCC_COEFF_MASK);
+
+ SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_RGB_COEFF_OFF,
+ pcc->r.rgb & PCC_COEFF_MASK);
+ SDE_REG_WRITE(&ctx->hw,
+ ctx->cap->sblk->pcc.base + PCC_RGB_COEFF_OFF + 4,
+ pcc->g.rgb & PCC_COEFF_MASK);
+ SDE_REG_WRITE(&ctx->hw,
+ ctx->cap->sblk->pcc.base + PCC_RGB_COEFF_OFF + 8,
+ pcc->b.rgb & PCC_COEFF_MASK);
+ SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base, DSPP_OP_PCC_ENABLE);
+}
+
+void sde_setup_dspp_pa_vlut_v1_7(struct sde_hw_dspp *ctx, void *cfg)
+{
+ struct drm_msm_pa_vlut *payload = NULL;
+ struct sde_hw_cp_cfg *hw_cfg = cfg;
+ u32 base = ctx->cap->sblk->vlut.base;
+ u32 offset = base + PA_LUTV_DSPP_OFF;
+ u32 op_mode, tmp;
+ int i = 0, j = 0;
+
+ if (!hw_cfg || (hw_cfg->payload && hw_cfg->len !=
+ sizeof(struct drm_msm_pa_vlut))) {
+ DRM_ERROR("hw %pK payload %pK payloadsize %d exp size %zd\n",
+ hw_cfg, ((hw_cfg) ? hw_cfg->payload : NULL),
+ ((hw_cfg) ? hw_cfg->len : 0),
+ sizeof(struct drm_msm_pa_vlut));
+ return;
+ }
+ op_mode = SDE_REG_READ(&ctx->hw, base);
+ if (!hw_cfg->payload) {
+ DRM_DEBUG_DRIVER("Disable vlut feature\n");
+ /**
+ * In the PA_VLUT disable case, remove PA_VLUT enable bit(19)
+ * first, then check whether any other PA sub-features are
+ * enabled or not. If none of the sub-features are enabled,
+ * remove the PA global enable bit(20).
+ */
+ op_mode &= ~((u32)DSPP_OP_PA_LUTV_EN);
+ if (PA_DSPP_DISABLE_REQUIRED(op_mode))
+ op_mode &= ~((u32)DSPP_OP_PA_EN);
+ SDE_REG_WRITE(&ctx->hw, base, op_mode);
+ return;
+ }
+ payload = hw_cfg->payload;
+ DRM_DEBUG_DRIVER("Enable vlut feature flags %llx\n", payload->flags);
+ for (i = 0, j = 0; i < ARRAY_SIZE(payload->val); i += 2, j += 4) {
+ tmp = (payload->val[i] & REG_MASK(10)) |
+ ((payload->val[i + 1] & REG_MASK(10)) << 16);
+ SDE_REG_WRITE(&ctx->hw, (offset + j),
+ tmp);
+ }
+ SDE_REG_WRITE(&ctx->hw, (base + PA_LUT_SWAP_OFF), 1);
+ op_mode |= DSPP_OP_PA_EN | DSPP_OP_PA_LUTV_EN;
+ SDE_REG_WRITE(&ctx->hw, base, op_mode);
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.h b/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.h
new file mode 100644
index 000000000000..0f9bc0e38322
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.h
@@ -0,0 +1,78 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HW_COLOR_PROCESSING_V1_7_H
+#define _SDE_HW_COLOR_PROCESSING_V1_7_H
+
+#include "sde_hw_sspp.h"
+#include "sde_hw_dspp.h"
+
+/**
+ * sde_setup_pipe_pa_hue_v1_7 - setup SSPP hue feature in v1.7 hardware
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to hue data
+ */
+void sde_setup_pipe_pa_hue_v1_7(struct sde_hw_pipe *ctx, void *cfg);
+
+/**
+ * sde_setup_pipe_pa_sat_v1_7 - setup SSPP saturation feature in v1.7 hardware
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to saturation data
+ */
+void sde_setup_pipe_pa_sat_v1_7(struct sde_hw_pipe *ctx, void *cfg);
+
+/**
+ * sde_setup_pipe_pa_val_v1_7 - setup SSPP value feature in v1.7 hardware
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to value data
+ */
+void sde_setup_pipe_pa_val_v1_7(struct sde_hw_pipe *ctx, void *cfg);
+
+/**
+ * sde_setup_pipe_pa_cont_v1_7 - setup SSPP contrast feature in v1.7 hardware
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to contrast data
+ */
+void sde_setup_pipe_pa_cont_v1_7(struct sde_hw_pipe *ctx, void *cfg);
+
+/**
+ * sde_setup_pipe_pa_memcol_v1_7 - setup SSPP memory color in v1.7 hardware
+ * @ctx: Pointer to pipe context
+ * @type: Memory color type (Skin, sky, or foliage)
+ * @cfg: Pointer to memory color config data
+ */
+void sde_setup_pipe_pa_memcol_v1_7(struct sde_hw_pipe *ctx,
+ enum sde_memcolor_type type,
+ void *cfg);
+
+/**
+ * sde_setup_dspp_pcc_v1_7 - setup DSPP PCC veature in v1.7 hardware
+ * @ctx: Pointer to dspp context
+ * @cfg: Pointer to PCC data
+ */
+void sde_setup_dspp_pcc_v1_7(struct sde_hw_dspp *ctx, void *cfg);
+
+/**
+ * sde_setup_dspp_pa_hue_v1_7 - setup DSPP hue feature in v1.7 hardware
+ * @ctx: Pointer to DSPP context
+ * @cfg: Pointer to hue data
+ */
+void sde_setup_dspp_pa_hue_v1_7(struct sde_hw_dspp *ctx, void *cfg);
+
+/**
+ * sde_setup_dspp_pa_vlut_v1_7 - setup DSPP PA vLUT feature in v1.7 hardware
+ * @ctx: Pointer to DSPP context
+ * @cfg: Pointer to vLUT data
+ */
+void sde_setup_dspp_pa_vlut_v1_7(struct sde_hw_dspp *ctx, void *cfg);
+
+#endif
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
new file mode 100644
index 000000000000..56d9f2a4a9b8
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
@@ -0,0 +1,461 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include "sde_hwio.h"
+#include "sde_hw_ctl.h"
+
+#define CTL_LAYER(lm) \
+ (((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004))
+#define CTL_LAYER_EXT(lm) \
+ (0x40 + (((lm) - LM_0) * 0x004))
+#define CTL_LAYER_EXT2(lm) \
+ (0x70 + (((lm) - LM_0) * 0x004))
+#define CTL_TOP 0x014
+#define CTL_FLUSH 0x018
+#define CTL_START 0x01C
+#define CTL_SW_RESET 0x030
+#define CTL_LAYER_EXTN_OFFSET 0x40
+
+#define SDE_REG_RESET_TIMEOUT_COUNT 20
+
+static struct sde_ctl_cfg *_ctl_offset(enum sde_ctl ctl,
+ struct sde_mdss_cfg *m,
+ void __iomem *addr,
+ struct sde_hw_blk_reg_map *b)
+{
+ int i;
+
+ for (i = 0; i < m->ctl_count; i++) {
+ if (ctl == m->ctl[i].id) {
+ b->base_off = addr;
+ b->blk_off = m->ctl[i].base;
+ b->hwversion = m->hwversion;
+ b->log_mask = SDE_DBG_MASK_CTL;
+ return &m->ctl[i];
+ }
+ }
+ return ERR_PTR(-ENOMEM);
+}
+
+static int _mixer_stages(const struct sde_lm_cfg *mixer, int count,
+ enum sde_lm lm)
+{
+ int i;
+ int stages = -EINVAL;
+
+ for (i = 0; i < count; i++) {
+ if (lm == mixer[i].id) {
+ stages = mixer[i].sblk->maxblendstages;
+ break;
+ }
+ }
+
+ return stages;
+}
+
+static inline void sde_hw_ctl_trigger_start(struct sde_hw_ctl *ctx)
+{
+ SDE_REG_WRITE(&ctx->hw, CTL_START, 0x1);
+}
+
+static inline void sde_hw_ctl_clear_pending_flush(struct sde_hw_ctl *ctx)
+{
+ ctx->pending_flush_mask = 0x0;
+}
+
+static inline void sde_hw_ctl_update_pending_flush(struct sde_hw_ctl *ctx,
+ u32 flushbits)
+{
+ ctx->pending_flush_mask |= flushbits;
+}
+
+static u32 sde_hw_ctl_get_pending_flush(struct sde_hw_ctl *ctx)
+{
+ if (!ctx)
+ return 0x0;
+
+ return ctx->pending_flush_mask;
+}
+
+static inline void sde_hw_ctl_trigger_flush(struct sde_hw_ctl *ctx)
+{
+ SDE_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
+}
+
+
+static inline uint32_t sde_hw_ctl_get_bitmask_sspp(struct sde_hw_ctl *ctx,
+ enum sde_sspp sspp)
+{
+ uint32_t flushbits = 0;
+
+ switch (sspp) {
+ case SSPP_VIG0:
+ flushbits = BIT(0);
+ break;
+ case SSPP_VIG1:
+ flushbits = BIT(1);
+ break;
+ case SSPP_VIG2:
+ flushbits = BIT(2);
+ break;
+ case SSPP_VIG3:
+ flushbits = BIT(18);
+ break;
+ case SSPP_RGB0:
+ flushbits = BIT(3);
+ break;
+ case SSPP_RGB1:
+ flushbits = BIT(4);
+ break;
+ case SSPP_RGB2:
+ flushbits = BIT(5);
+ break;
+ case SSPP_RGB3:
+ flushbits = BIT(19);
+ break;
+ case SSPP_DMA0:
+ flushbits = BIT(11);
+ break;
+ case SSPP_DMA1:
+ flushbits = BIT(12);
+ break;
+ case SSPP_DMA2:
+ flushbits = BIT(24);
+ break;
+ case SSPP_DMA3:
+ flushbits = BIT(25);
+ break;
+ case SSPP_CURSOR0:
+ flushbits = BIT(22);
+ break;
+ case SSPP_CURSOR1:
+ flushbits = BIT(23);
+ break;
+ default:
+ break;
+ }
+
+ return flushbits;
+}
+
+static inline uint32_t sde_hw_ctl_get_bitmask_mixer(struct sde_hw_ctl *ctx,
+ enum sde_lm lm)
+{
+ uint32_t flushbits = 0;
+
+ switch (lm) {
+ case LM_0:
+ flushbits = BIT(6);
+ break;
+ case LM_1:
+ flushbits = BIT(7);
+ break;
+ case LM_2:
+ flushbits = BIT(8);
+ break;
+ case LM_3:
+ flushbits = BIT(9);
+ break;
+ case LM_4:
+ flushbits = BIT(10);
+ break;
+ case LM_5:
+ flushbits = BIT(20);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ flushbits |= BIT(17); /* CTL */
+
+ return flushbits;
+}
+
+static inline int sde_hw_ctl_get_bitmask_dspp(struct sde_hw_ctl *ctx,
+ u32 *flushbits, enum sde_dspp dspp)
+{
+ switch (dspp) {
+ case DSPP_0:
+ *flushbits |= BIT(13);
+ break;
+ case DSPP_1:
+ *flushbits |= BIT(14);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static inline int sde_hw_ctl_get_bitmask_intf(struct sde_hw_ctl *ctx,
+ u32 *flushbits, enum sde_intf intf)
+{
+ switch (intf) {
+ case INTF_0:
+ *flushbits |= BIT(31);
+ break;
+ case INTF_1:
+ *flushbits |= BIT(30);
+ break;
+ case INTF_2:
+ *flushbits |= BIT(29);
+ break;
+ case INTF_3:
+ *flushbits |= BIT(28);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static inline int sde_hw_ctl_get_bitmask_wb(struct sde_hw_ctl *ctx,
+ u32 *flushbits, enum sde_wb wb)
+{
+ switch (wb) {
+ case WB_0:
+ case WB_1:
+ case WB_2:
+ *flushbits |= BIT(16);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static inline int sde_hw_ctl_get_bitmask_cdm(struct sde_hw_ctl *ctx,
+ u32 *flushbits, enum sde_cdm cdm)
+{
+ switch (cdm) {
+ case CDM_0:
+ *flushbits |= BIT(26);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int sde_hw_ctl_reset_control(struct sde_hw_ctl *ctx)
+{
+ struct sde_hw_blk_reg_map *c = &ctx->hw;
+ int count = SDE_REG_RESET_TIMEOUT_COUNT;
+ int reset;
+
+ SDE_REG_WRITE(c, CTL_SW_RESET, 0x1);
+
+ for (; count > 0; count--) {
+ /* insert small delay to avoid spinning the cpu while waiting */
+ usleep_range(20, 50);
+ reset = SDE_REG_READ(c, CTL_SW_RESET);
+ if (reset == 0)
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static void sde_hw_ctl_clear_all_blendstages(struct sde_hw_ctl *ctx)
+{
+ struct sde_hw_blk_reg_map *c = &ctx->hw;
+ int i;
+
+ for (i = 0; i < ctx->mixer_count; i++) {
+ SDE_REG_WRITE(c, CTL_LAYER(LM_0 + i), 0);
+ SDE_REG_WRITE(c, CTL_LAYER_EXT(LM_0 + i), 0);
+ }
+}
+
+static void sde_hw_ctl_setup_blendstage(struct sde_hw_ctl *ctx,
+ enum sde_lm lm, struct sde_hw_stage_cfg *stage_cfg, u32 index)
+{
+ struct sde_hw_blk_reg_map *c = &ctx->hw;
+ u32 mixercfg, mixercfg_ext, mix, ext, mixercfg_ext2;
+ int i, j;
+ u8 stages;
+ int pipes_per_stage;
+
+ if (index >= CRTC_DUAL_MIXERS)
+ return;
+
+ stages = _mixer_stages(ctx->mixer_hw_caps, ctx->mixer_count, lm);
+ if (stages < 0)
+ return;
+
+ if (test_bit(SDE_MIXER_SOURCESPLIT,
+ &ctx->mixer_hw_caps->features))
+ pipes_per_stage = PIPES_PER_STAGE;
+ else
+ pipes_per_stage = 1;
+
+ mixercfg = BIT(24); /* always set BORDER_OUT */
+ mixercfg_ext = 0;
+ mixercfg_ext2 = 0;
+
+ for (i = 0; i <= stages; i++) {
+ /* overflow to ext register if 'i + 1 > 7' */
+ mix = (i + 1) & 0x7;
+ ext = i >= 7;
+
+ for (j = 0 ; j < pipes_per_stage; j++) {
+ switch (stage_cfg->stage[index][i][j]) {
+ case SSPP_VIG0:
+ mixercfg |= mix << 0;
+ mixercfg_ext |= ext << 0;
+ break;
+ case SSPP_VIG1:
+ mixercfg |= mix << 3;
+ mixercfg_ext |= ext << 2;
+ break;
+ case SSPP_VIG2:
+ mixercfg |= mix << 6;
+ mixercfg_ext |= ext << 4;
+ break;
+ case SSPP_VIG3:
+ mixercfg |= mix << 26;
+ mixercfg_ext |= ext << 6;
+ break;
+ case SSPP_RGB0:
+ mixercfg |= mix << 9;
+ mixercfg_ext |= ext << 8;
+ break;
+ case SSPP_RGB1:
+ mixercfg |= mix << 12;
+ mixercfg_ext |= ext << 10;
+ break;
+ case SSPP_RGB2:
+ mixercfg |= mix << 15;
+ mixercfg_ext |= ext << 12;
+ break;
+ case SSPP_RGB3:
+ mixercfg |= mix << 29;
+ mixercfg_ext |= ext << 14;
+ break;
+ case SSPP_DMA0:
+ mixercfg |= mix << 18;
+ mixercfg_ext |= ext << 16;
+ break;
+ case SSPP_DMA1:
+ mixercfg |= mix << 21;
+ mixercfg_ext |= ext << 18;
+ break;
+ case SSPP_DMA2:
+ mix = (i + 1) & 0xf;
+ mixercfg_ext2 |= mix << 0;
+ break;
+ case SSPP_DMA3:
+ mix = (i + 1) & 0xf;
+ mixercfg_ext2 |= mix << 4;
+ break;
+ case SSPP_CURSOR0:
+ mixercfg_ext |= ((i + 1) & 0xF) << 20;
+ break;
+ case SSPP_CURSOR1:
+ mixercfg_ext |= ((i + 1) & 0xF) << 26;
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+ SDE_REG_WRITE(c, CTL_LAYER(lm), mixercfg);
+ SDE_REG_WRITE(c, CTL_LAYER_EXT(lm), mixercfg_ext);
+ SDE_REG_WRITE(c, CTL_LAYER_EXT2(lm), mixercfg_ext2);
+}
+
+static void sde_hw_ctl_intf_cfg(struct sde_hw_ctl *ctx,
+ struct sde_hw_intf_cfg *cfg)
+{
+ struct sde_hw_blk_reg_map *c = &ctx->hw;
+ u32 intf_cfg = 0;
+
+ intf_cfg |= (cfg->intf & 0xF) << 4;
+
+ if (cfg->wb)
+ intf_cfg |= (cfg->wb & 0x3) + 2;
+
+ if (cfg->mode_3d) {
+ intf_cfg |= BIT(19);
+ intf_cfg |= (cfg->mode_3d - 0x1) << 20;
+ }
+
+ switch (cfg->intf_mode_sel) {
+ case SDE_CTL_MODE_SEL_VID:
+ intf_cfg &= ~BIT(17);
+ intf_cfg &= ~(0x3 << 15);
+ break;
+ case SDE_CTL_MODE_SEL_CMD:
+ intf_cfg |= BIT(17);
+ intf_cfg |= ((cfg->stream_sel & 0x3) << 15);
+ break;
+ default:
+ pr_err("unknown interface type %d\n", cfg->intf_mode_sel);
+ return;
+ }
+
+ SDE_REG_WRITE(c, CTL_TOP, intf_cfg);
+}
+
+static void _setup_ctl_ops(struct sde_hw_ctl_ops *ops,
+ unsigned long cap)
+{
+ ops->clear_pending_flush = sde_hw_ctl_clear_pending_flush;
+ ops->update_pending_flush = sde_hw_ctl_update_pending_flush;
+ ops->get_pending_flush = sde_hw_ctl_get_pending_flush;
+ ops->trigger_flush = sde_hw_ctl_trigger_flush;
+ ops->trigger_start = sde_hw_ctl_trigger_start;
+ ops->setup_intf_cfg = sde_hw_ctl_intf_cfg;
+ ops->reset = sde_hw_ctl_reset_control;
+ ops->clear_all_blendstages = sde_hw_ctl_clear_all_blendstages;
+ ops->setup_blendstage = sde_hw_ctl_setup_blendstage;
+ ops->get_bitmask_sspp = sde_hw_ctl_get_bitmask_sspp;
+ ops->get_bitmask_mixer = sde_hw_ctl_get_bitmask_mixer;
+ ops->get_bitmask_dspp = sde_hw_ctl_get_bitmask_dspp;
+ ops->get_bitmask_intf = sde_hw_ctl_get_bitmask_intf;
+ ops->get_bitmask_cdm = sde_hw_ctl_get_bitmask_cdm;
+ ops->get_bitmask_wb = sde_hw_ctl_get_bitmask_wb;
+};
+
+struct sde_hw_ctl *sde_hw_ctl_init(enum sde_ctl idx,
+ void __iomem *addr,
+ struct sde_mdss_cfg *m)
+{
+ struct sde_hw_ctl *c;
+ struct sde_ctl_cfg *cfg;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _ctl_offset(idx, m, addr, &c->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(c);
+ pr_err("failed to create sde_hw_ctl %d\n", idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ c->caps = cfg;
+ _setup_ctl_ops(&c->ops, c->caps->features);
+ c->idx = idx;
+ c->mixer_count = m->mixer_count;
+ c->mixer_hw_caps = m->mixer;
+
+ return c;
+}
+
+void sde_hw_ctl_destroy(struct sde_hw_ctl *ctx)
+{
+ kfree(ctx);
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.h b/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
new file mode 100644
index 000000000000..2fb7b377e51d
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
@@ -0,0 +1,186 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HW_CTL_H
+#define _SDE_HW_CTL_H
+
+#include "sde_hw_mdss.h"
+#include "sde_hw_util.h"
+#include "sde_hw_catalog.h"
+
+/**
+ * sde_ctl_mode_sel: Interface mode selection
+ * SDE_CTL_MODE_SEL_VID: Video mode interface
+ * SDE_CTL_MODE_SEL_CMD: Command mode interface
+ */
+enum sde_ctl_mode_sel {
+ SDE_CTL_MODE_SEL_VID = 0,
+ SDE_CTL_MODE_SEL_CMD
+};
+
+struct sde_hw_ctl;
+/**
+ * struct sde_hw_stage_cfg - blending stage cfg
+ * @stage
+ */
+struct sde_hw_stage_cfg {
+ enum sde_sspp stage[CRTC_DUAL_MIXERS][SDE_STAGE_MAX][PIPES_PER_STAGE];
+};
+
+/**
+ * struct sde_hw_intf_cfg :Describes how the SDE writes data to output interface
+ * @intf : Interface id
+ * @wb: Writeback id
+ * @mode_3d: 3d mux configuration
+ * @intf_mode_sel: Interface mode, cmd / vid
+ * @stream_sel: Stream selection for multi-stream interfaces
+ */
+struct sde_hw_intf_cfg {
+ enum sde_intf intf;
+ enum sde_wb wb;
+ enum sde_3d_blend_mode mode_3d;
+ enum sde_ctl_mode_sel intf_mode_sel;
+ int stream_sel;
+};
+
+/**
+ * struct sde_hw_ctl_ops - Interface to the wb Hw driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct sde_hw_ctl_ops {
+ /**
+ * kickoff hw operation for Sw controlled interfaces
+ * DSI cmd mode and WB interface are SW controlled
+ * @ctx : ctl path ctx pointer
+ */
+ void (*trigger_start)(struct sde_hw_ctl *ctx);
+
+ /**
+ * Clear the value of the cached pending_flush_mask
+ * No effect on hardware
+ * @ctx : ctl path ctx pointer
+ */
+ void (*clear_pending_flush)(struct sde_hw_ctl *ctx);
+
+ /**
+ * Query the value of the cached pending_flush_mask
+ * No effect on hardware
+ * @ctx : ctl path ctx pointer
+ */
+ u32 (*get_pending_flush)(struct sde_hw_ctl *ctx);
+
+ /**
+ * OR in the given flushbits to the cached pending_flush_mask
+ * No effect on hardware
+ * @ctx : ctl path ctx pointer
+ * @flushbits : module flushmask
+ */
+ void (*update_pending_flush)(struct sde_hw_ctl *ctx,
+ u32 flushbits);
+
+ /**
+ * Write the value of the pending_flush_mask to hardware
+ * @ctx : ctl path ctx pointer
+ */
+ void (*trigger_flush)(struct sde_hw_ctl *ctx);
+
+ /**
+ * Setup ctl_path interface config
+ * @ctx
+ * @cfg : interface config structure pointer
+ */
+ void (*setup_intf_cfg)(struct sde_hw_ctl *ctx,
+ struct sde_hw_intf_cfg *cfg);
+
+ int (*reset)(struct sde_hw_ctl *c);
+
+ uint32_t (*get_bitmask_sspp)(struct sde_hw_ctl *ctx,
+ enum sde_sspp blk);
+
+ uint32_t (*get_bitmask_mixer)(struct sde_hw_ctl *ctx,
+ enum sde_lm blk);
+
+ int (*get_bitmask_dspp)(struct sde_hw_ctl *ctx,
+ u32 *flushbits,
+ enum sde_dspp blk);
+
+ int (*get_bitmask_intf)(struct sde_hw_ctl *ctx,
+ u32 *flushbits,
+ enum sde_intf blk);
+
+ int (*get_bitmask_cdm)(struct sde_hw_ctl *ctx,
+ u32 *flushbits,
+ enum sde_cdm blk);
+
+ int (*get_bitmask_wb)(struct sde_hw_ctl *ctx,
+ u32 *flushbits,
+ enum sde_wb blk);
+
+ /**
+ * Set all blend stages to disabled
+ * @ctx : ctl path ctx pointer
+ */
+ void (*clear_all_blendstages)(struct sde_hw_ctl *ctx);
+
+ /**
+ * Configure layer mixer to pipe configuration
+ * @ctx : ctl path ctx pointer
+ * @lm : layer mixer enumeration
+ * @cfg : blend stage configuration
+ */
+ void (*setup_blendstage)(struct sde_hw_ctl *ctx,
+ enum sde_lm lm, struct sde_hw_stage_cfg *cfg, u32 index);
+};
+
+/**
+ * struct sde_hw_ctl : CTL PATH driver object
+ * @hw: block register map object
+ * @idx: control path index
+ * @ctl_hw_caps: control path capabilities
+ * @mixer_count: number of mixers
+ * @mixer_hw_caps: mixer hardware capabilities
+ * @pending_flush_mask: storage for pending ctl_flush managed via ops
+ * @ops: operation list
+ */
+struct sde_hw_ctl {
+ /* base */
+ struct sde_hw_blk_reg_map hw;
+
+ /* ctl path */
+ int idx;
+ const struct sde_ctl_cfg *caps;
+ int mixer_count;
+ const struct sde_lm_cfg *mixer_hw_caps;
+ u32 pending_flush_mask;
+
+ /* ops */
+ struct sde_hw_ctl_ops ops;
+};
+
+/**
+ * sde_hw_ctl_init(): Initializes the ctl_path hw driver object.
+ * should be called before accessing every ctl path registers.
+ * @idx: ctl_path index for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @m : pointer to mdss catalog data
+ */
+struct sde_hw_ctl *sde_hw_ctl_init(enum sde_ctl idx,
+ void __iomem *addr,
+ struct sde_mdss_cfg *m);
+
+/**
+ * sde_hw_ctl_destroy(): Destroys ctl driver context
+ * should be called to free the context
+ */
+void sde_hw_ctl_destroy(struct sde_hw_ctl *ctx);
+
+#endif /*_SDE_HW_CTL_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_dspp.c b/drivers/gpu/drm/msm/sde/sde_hw_dspp.c
new file mode 100644
index 000000000000..d6250b07b4f0
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_dspp.c
@@ -0,0 +1,120 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <drm/msm_drm_pp.h>
+#include "sde_hw_mdss.h"
+#include "sde_hwio.h"
+#include "sde_hw_catalog.h"
+#include "sde_hw_dspp.h"
+#include "sde_hw_color_processing.h"
+
+static struct sde_dspp_cfg *_dspp_offset(enum sde_dspp dspp,
+ struct sde_mdss_cfg *m,
+ void __iomem *addr,
+ struct sde_hw_blk_reg_map *b)
+{
+ int i;
+
+ for (i = 0; i < m->dspp_count; i++) {
+ if (dspp == m->dspp[i].id) {
+ b->base_off = addr;
+ b->blk_off = m->dspp[i].base;
+ b->hwversion = m->hwversion;
+ b->log_mask = SDE_DBG_MASK_DSPP;
+ return &m->dspp[i];
+ }
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+void sde_dspp_setup_histogram(struct sde_hw_dspp *ctx, void *cfg)
+{
+}
+
+void sde_dspp_read_histogram(struct sde_hw_dspp *ctx, void *cfg)
+{
+}
+
+void sde_dspp_update_igc(struct sde_hw_dspp *ctx, void *cfg)
+{
+}
+
+void sde_dspp_setup_sharpening(struct sde_hw_dspp *ctx, void *cfg)
+{
+}
+
+void sde_dspp_setup_danger_safe(struct sde_hw_dspp *ctx, void *cfg)
+{
+}
+
+void sde_dspp_setup_dither(struct sde_hw_dspp *ctx, void *cfg)
+{
+}
+
+static void _setup_dspp_ops(struct sde_hw_dspp *c, unsigned long features)
+{
+ int i = 0;
+
+ for (i = 0; i < SDE_DSPP_MAX; i++) {
+ if (!test_bit(i, &features))
+ continue;
+ switch (i) {
+ case SDE_DSPP_PCC:
+ if (c->cap->sblk->pcc.version ==
+ (SDE_COLOR_PROCESS_VER(0x1, 0x7)))
+ c->ops.setup_pcc = sde_setup_dspp_pcc_v1_7;
+ break;
+ case SDE_DSPP_HSIC:
+ if (c->cap->sblk->hsic.version ==
+ (SDE_COLOR_PROCESS_VER(0x1, 0x7)))
+ c->ops.setup_hue = sde_setup_dspp_pa_hue_v1_7;
+ break;
+ case SDE_DSPP_VLUT:
+ if (c->cap->sblk->vlut.version ==
+ (SDE_COLOR_PROCESS_VER(0x1, 0x7))) {
+ c->ops.setup_vlut = sde_setup_dspp_pa_vlut_v1_7;
+ }
+ default:
+ break;
+ }
+ }
+}
+
+struct sde_hw_dspp *sde_hw_dspp_init(enum sde_dspp idx,
+ void __iomem *addr,
+ struct sde_mdss_cfg *m)
+{
+ struct sde_hw_dspp *c;
+ struct sde_dspp_cfg *cfg;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _dspp_offset(idx, m, addr, &c->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(c);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Assign ops */
+ c->idx = idx;
+ c->cap = cfg;
+ _setup_dspp_ops(c, c->cap->features);
+
+ return c;
+}
+
+void sde_hw_dspp_destroy(struct sde_hw_dspp *dspp)
+{
+ kfree(dspp);
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_dspp.h b/drivers/gpu/drm/msm/sde/sde_hw_dspp.h
new file mode 100644
index 000000000000..6e6ad2f8d0e5
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_dspp.h
@@ -0,0 +1,183 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HW_DSPP_H
+#define _SDE_HW_DSPP_H
+
+struct sde_hw_dspp;
+
+/**
+ * struct sde_hw_dspp_ops - interface to the dspp hardware driver functions
+ * Caller must call the init function to get the dspp context for each dspp
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct sde_hw_dspp_ops {
+ /**
+ * setup_histogram - setup dspp histogram
+ * @ctx: Pointer to dspp context
+ * @cfg: Pointer to configuration
+ */
+ void (*setup_histogram)(struct sde_hw_dspp *ctx, void *cfg);
+
+ /**
+ * read_histogram - read dspp histogram
+ * @ctx: Pointer to dspp context
+ * @cfg: Pointer to configuration
+ */
+ void (*read_histogram)(struct sde_hw_dspp *ctx, void *cfg);
+
+ /**
+ * setup_igc - update dspp igc
+ * @ctx: Pointer to dspp context
+ * @cfg: Pointer to configuration
+ */
+ void (*setup_igc)(struct sde_hw_dspp *ctx, void *cfg);
+
+ /**
+ * setup_pa - setup dspp pa
+ * @ctx: Pointer to dspp context
+ * @cfg: Pointer to configuration
+ */
+ void (*setup_pa)(struct sde_hw_dspp *dspp, void *cfg);
+
+ /**
+ * setup_pcc - setup dspp pcc
+ * @ctx: Pointer to dspp context
+ * @cfg: Pointer to configuration
+ */
+ void (*setup_pcc)(struct sde_hw_dspp *ctx, void *cfg);
+
+ /**
+ * setup_sharpening - setup dspp sharpening
+ * @ctx: Pointer to dspp context
+ * @cfg: Pointer to configuration
+ */
+ void (*setup_sharpening)(struct sde_hw_dspp *ctx, void *cfg);
+
+ /**
+ * setup_pa_memcolor - setup dspp memcolor
+ * @ctx: Pointer to dspp context
+ * @cfg: Pointer to configuration
+ */
+ void (*setup_pa_memcolor)(struct sde_hw_dspp *ctx, void *cfg);
+
+ /**
+ * setup_sixzone - setup dspp six zone
+ * @ctx: Pointer to dspp context
+ * @cfg: Pointer to configuration
+ */
+ void (*setup_sixzone)(struct sde_hw_dspp *dspp, void *cfg);
+
+ /**
+ * setup_danger_safe - setup danger safe LUTS
+ * @ctx: Pointer to dspp context
+ * @cfg: Pointer to configuration
+ */
+ void (*setup_danger_safe)(struct sde_hw_dspp *ctx, void *cfg);
+
+ /**
+ * setup_dither - setup dspp dither
+ * @ctx: Pointer to dspp context
+ * @cfg: Pointer to configuration
+ */
+ void (*setup_dither)(struct sde_hw_dspp *ctx, void *cfg);
+
+ /**
+ * setup_hue - setup dspp PA hue
+ * @ctx: Pointer to dspp context
+ * @cfg: Pointer to configuration
+ */
+ void (*setup_hue)(struct sde_hw_dspp *ctx, void *cfg);
+
+ /**
+ * setup_sat - setup dspp PA saturation
+ * @ctx: Pointer to dspp context
+ * @cfg: Pointer to configuration
+ */
+ void (*setup_sat)(struct sde_hw_dspp *ctx, void *cfg);
+
+ /**
+ * setup_val - setup dspp PA value
+ * @ctx: Pointer to dspp context
+ * @cfg: Pointer to configuration
+ */
+ void (*setup_val)(struct sde_hw_dspp *ctx, void *cfg);
+
+ /**
+ * setup_cont - setup dspp PA contrast
+ * @ctx: Pointer to dspp context
+ * @cfg: Pointer to configuration
+ */
+ void (*setup_cont)(struct sde_hw_dspp *ctx, void *cfg);
+
+ /**
+ * setup_vlut - setup dspp PA VLUT
+ * @ctx: Pointer to dspp context
+ * @cfg: Pointer to configuration
+ */
+ void (*setup_vlut)(struct sde_hw_dspp *ctx, void *cfg);
+
+ /**
+ * setup_gc - update dspp gc
+ * @ctx: Pointer to dspp context
+ * @cfg: Pointer to configuration
+ */
+ void (*setup_gc)(struct sde_hw_dspp *ctx, void *cfg);
+
+ /**
+ * setup_gamut - update dspp gamut
+ * @ctx: Pointer to dspp context
+ * @cfg: Pointer to configuration
+ */
+ void (*setup_gamut)(struct sde_hw_dspp *ctx, void *cfg);
+};
+
+/**
+ * struct sde_hw_dspp - dspp description
+ * @base_off: MDP register mapped offset
+ * @blk_off: DSPP offset relative to mdss offset
+ * @length Length of register block offset
+ * @hwversion Mdss hw version number
+ * @idx: DSPP index
+ * @dspp_hw_cap: Pointer to layer_cfg
+ * @highest_bank_bit:
+ * @ops: Pointer to operations possible for this dspp
+ */
+struct sde_hw_dspp {
+ /* base */
+ struct sde_hw_blk_reg_map hw;
+
+ /* dspp */
+ enum sde_dspp idx;
+ const struct sde_dspp_cfg *cap;
+
+ /* Ops */
+ struct sde_hw_dspp_ops ops;
+};
+
+/**
+ * sde_hw_dspp_init - initializes the dspp hw driver object.
+ * should be called once before accessing every dspp.
+ * @idx: DSPP index for which driver object is required
+ * @addr: Mapped register io address of MDP
+ */
+struct sde_hw_dspp *sde_hw_dspp_init(enum sde_dspp idx,
+ void __iomem *addr,
+ struct sde_mdss_cfg *m);
+
+/**
+ * sde_hw_dspp_destroy(): Destroys DSPP driver context
+ * @dspp: Pointer to DSPP driver context
+ */
+void sde_hw_dspp_destroy(struct sde_hw_dspp *dspp);
+
+#endif /*_SDE_HW_DSPP_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_hwio.h b/drivers/gpu/drm/msm/sde/sde_hw_hwio.h
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_hwio.h
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c
new file mode 100644
index 000000000000..49930365d989
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c
@@ -0,0 +1,991 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/slab.h>
+
+#include "sde_kms.h"
+#include "sde_hw_interrupts.h"
+#include "sde_hw_util.h"
+#include "sde_hw_mdss.h"
+
+/**
+ * Register offsets in MDSS register file for the interrupt registers
+ * w.r.t. to the MDSS base
+ */
+#define HW_INTR_STATUS 0x0010
+#define MDP_SSPP_TOP0_OFF 0x1000
+#define MDP_INTF_0_OFF 0x6B000
+#define MDP_INTF_1_OFF 0x6B800
+#define MDP_INTF_2_OFF 0x6C000
+#define MDP_INTF_3_OFF 0x6C800
+#define MDP_INTF_4_OFF 0x6D000
+
+/**
+ * WB interrupt status bit definitions
+ */
+#define SDE_INTR_WB_0_DONE BIT(0)
+#define SDE_INTR_WB_1_DONE BIT(1)
+#define SDE_INTR_WB_2_DONE BIT(4)
+
+/**
+ * WDOG timer interrupt status bit definitions
+ */
+#define SDE_INTR_WD_TIMER_0_DONE BIT(2)
+#define SDE_INTR_WD_TIMER_1_DONE BIT(3)
+#define SDE_INTR_WD_TIMER_2_DONE BIT(5)
+#define SDE_INTR_WD_TIMER_3_DONE BIT(6)
+#define SDE_INTR_WD_TIMER_4_DONE BIT(7)
+
+/**
+ * Pingpong interrupt status bit definitions
+ */
+#define SDE_INTR_PING_PONG_0_DONE BIT(8)
+#define SDE_INTR_PING_PONG_1_DONE BIT(9)
+#define SDE_INTR_PING_PONG_2_DONE BIT(10)
+#define SDE_INTR_PING_PONG_3_DONE BIT(11)
+#define SDE_INTR_PING_PONG_0_RD_PTR BIT(12)
+#define SDE_INTR_PING_PONG_1_RD_PTR BIT(13)
+#define SDE_INTR_PING_PONG_2_RD_PTR BIT(14)
+#define SDE_INTR_PING_PONG_3_RD_PTR BIT(15)
+#define SDE_INTR_PING_PONG_0_WR_PTR BIT(16)
+#define SDE_INTR_PING_PONG_1_WR_PTR BIT(17)
+#define SDE_INTR_PING_PONG_2_WR_PTR BIT(18)
+#define SDE_INTR_PING_PONG_3_WR_PTR BIT(19)
+#define SDE_INTR_PING_PONG_0_AUTOREFRESH_DONE BIT(20)
+#define SDE_INTR_PING_PONG_1_AUTOREFRESH_DONE BIT(21)
+#define SDE_INTR_PING_PONG_2_AUTOREFRESH_DONE BIT(22)
+#define SDE_INTR_PING_PONG_3_AUTOREFRESH_DONE BIT(23)
+
+/**
+ * Interface interrupt status bit definitions
+ */
+#define SDE_INTR_INTF_0_UNDERRUN BIT(24)
+#define SDE_INTR_INTF_1_UNDERRUN BIT(26)
+#define SDE_INTR_INTF_2_UNDERRUN BIT(28)
+#define SDE_INTR_INTF_3_UNDERRUN BIT(30)
+#define SDE_INTR_INTF_0_VSYNC BIT(25)
+#define SDE_INTR_INTF_1_VSYNC BIT(27)
+#define SDE_INTR_INTF_2_VSYNC BIT(29)
+#define SDE_INTR_INTF_3_VSYNC BIT(31)
+
+/**
+ * Pingpong Secondary interrupt status bit definitions
+ */
+#define SDE_INTR_PING_PONG_S0_AUTOREFRESH_DONE BIT(0)
+#define SDE_INTR_PING_PONG_S0_WR_PTR BIT(4)
+#define SDE_INTR_PING_PONG_S0_RD_PTR BIT(8)
+#define SDE_INTR_PING_PONG_S0_TEAR_DETECTED BIT(22)
+#define SDE_INTR_PING_PONG_S0_TE_DETECTED BIT(28)
+
+/**
+ * Pingpong TEAR detection interrupt status bit definitions
+ */
+#define SDE_INTR_PING_PONG_0_TEAR_DETECTED BIT(16)
+#define SDE_INTR_PING_PONG_1_TEAR_DETECTED BIT(17)
+#define SDE_INTR_PING_PONG_2_TEAR_DETECTED BIT(18)
+#define SDE_INTR_PING_PONG_3_TEAR_DETECTED BIT(19)
+
+/**
+ * Pingpong TE detection interrupt status bit definitions
+ */
+#define SDE_INTR_PING_PONG_0_TE_DETECTED BIT(24)
+#define SDE_INTR_PING_PONG_1_TE_DETECTED BIT(25)
+#define SDE_INTR_PING_PONG_2_TE_DETECTED BIT(26)
+#define SDE_INTR_PING_PONG_3_TE_DETECTED BIT(27)
+
+/**
+ * Concurrent WB overflow interrupt status bit definitions
+ */
+#define SDE_INTR_CWB_2_OVERFLOW BIT(14)
+#define SDE_INTR_CWB_3_OVERFLOW BIT(15)
+
+/**
+ * Histogram VIG done interrupt status bit definitions
+ */
+#define SDE_INTR_HIST_VIG_0_DONE BIT(0)
+#define SDE_INTR_HIST_VIG_1_DONE BIT(4)
+#define SDE_INTR_HIST_VIG_2_DONE BIT(8)
+#define SDE_INTR_HIST_VIG_3_DONE BIT(10)
+
+/**
+ * Histogram VIG reset Sequence done interrupt status bit definitions
+ */
+#define SDE_INTR_HIST_VIG_0_RSTSEQ_DONE BIT(1)
+#define SDE_INTR_HIST_VIG_1_RSTSEQ_DONE BIT(5)
+#define SDE_INTR_HIST_VIG_2_RSTSEQ_DONE BIT(9)
+#define SDE_INTR_HIST_VIG_3_RSTSEQ_DONE BIT(11)
+
+/**
+ * Histogram DSPP done interrupt status bit definitions
+ */
+#define SDE_INTR_HIST_DSPP_0_DONE BIT(12)
+#define SDE_INTR_HIST_DSPP_1_DONE BIT(16)
+#define SDE_INTR_HIST_DSPP_2_DONE BIT(20)
+#define SDE_INTR_HIST_DSPP_3_DONE BIT(22)
+
+/**
+ * Histogram DSPP reset Sequence done interrupt status bit definitions
+ */
+#define SDE_INTR_HIST_DSPP_0_RSTSEQ_DONE BIT(13)
+#define SDE_INTR_HIST_DSPP_1_RSTSEQ_DONE BIT(17)
+#define SDE_INTR_HIST_DSPP_2_RSTSEQ_DONE BIT(21)
+#define SDE_INTR_HIST_DSPP_3_RSTSEQ_DONE BIT(23)
+
+/**
+ * INTF interrupt status bit definitions
+ */
+#define SDE_INTR_VIDEO_INTO_STATIC BIT(0)
+#define SDE_INTR_VIDEO_OUTOF_STATIC BIT(1)
+#define SDE_INTR_DSICMD_0_INTO_STATIC BIT(2)
+#define SDE_INTR_DSICMD_0_OUTOF_STATIC BIT(3)
+#define SDE_INTR_DSICMD_1_INTO_STATIC BIT(4)
+#define SDE_INTR_DSICMD_1_OUTOF_STATIC BIT(5)
+#define SDE_INTR_DSICMD_2_INTO_STATIC BIT(6)
+#define SDE_INTR_DSICMD_2_OUTOF_STATIC BIT(7)
+#define SDE_INTR_PROG_LINE BIT(8)
+
+/**
+ * struct sde_intr_reg - array of SDE register sets
+ * @clr_off: offset to CLEAR reg
+ * @en_off: offset to ENABLE reg
+ * @status_off: offset to STATUS reg
+ */
+struct sde_intr_reg {
+ u32 clr_off;
+ u32 en_off;
+ u32 status_off;
+};
+
+/**
+ * struct sde_irq_type - maps each irq with i/f
+ * @intr_type: type of interrupt listed in sde_intr_type
+ * @instance_idx: instance index of the associated HW block in SDE
+ * @irq_mask: corresponding bit in the interrupt status reg
+ * @reg_idx: which reg set to use
+ */
+struct sde_irq_type {
+ u32 intr_type;
+ u32 instance_idx;
+ u32 irq_mask;
+ u32 reg_idx;
+};
+
+/**
+ * List of SDE interrupt registers
+ */
+static const struct sde_intr_reg sde_intr_set[] = {
+ {
+ MDP_SSPP_TOP0_OFF+INTR_CLEAR,
+ MDP_SSPP_TOP0_OFF+INTR_EN,
+ MDP_SSPP_TOP0_OFF+INTR_STATUS
+ },
+ {
+ MDP_SSPP_TOP0_OFF+INTR2_CLEAR,
+ MDP_SSPP_TOP0_OFF+INTR2_EN,
+ MDP_SSPP_TOP0_OFF+INTR2_STATUS
+ },
+ {
+ MDP_SSPP_TOP0_OFF+HIST_INTR_CLEAR,
+ MDP_SSPP_TOP0_OFF+HIST_INTR_EN,
+ MDP_SSPP_TOP0_OFF+HIST_INTR_STATUS
+ },
+ {
+ MDP_INTF_0_OFF+INTF_INTR_CLEAR,
+ MDP_INTF_0_OFF+INTF_INTR_EN,
+ MDP_INTF_0_OFF+INTF_INTR_STATUS
+ },
+ {
+ MDP_INTF_1_OFF+INTF_INTR_CLEAR,
+ MDP_INTF_1_OFF+INTF_INTR_EN,
+ MDP_INTF_1_OFF+INTF_INTR_STATUS
+ },
+ {
+ MDP_INTF_2_OFF+INTF_INTR_CLEAR,
+ MDP_INTF_2_OFF+INTF_INTR_EN,
+ MDP_INTF_2_OFF+INTF_INTR_STATUS
+ },
+ {
+ MDP_INTF_3_OFF+INTF_INTR_CLEAR,
+ MDP_INTF_3_OFF+INTF_INTR_EN,
+ MDP_INTF_3_OFF+INTF_INTR_STATUS
+ },
+ {
+ MDP_INTF_4_OFF+INTF_INTR_CLEAR,
+ MDP_INTF_4_OFF+INTF_INTR_EN,
+ MDP_INTF_4_OFF+INTF_INTR_STATUS
+ }
+};
+
+/**
+ * IRQ mapping table - use for lookup an irq_idx in this table that have
+ * a matching interface type and instance index.
+ */
+static const struct sde_irq_type sde_irq_map[] = {
+ /* BEGIN MAP_RANGE: 0-31, INTR */
+ /* irq_idx: 0-3 */
+ { SDE_IRQ_TYPE_WB_ROT_COMP, WB_0, SDE_INTR_WB_0_DONE, 0},
+ { SDE_IRQ_TYPE_WB_ROT_COMP, WB_1, SDE_INTR_WB_1_DONE, 0},
+ { SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_0, SDE_INTR_WD_TIMER_0_DONE, 0},
+ { SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_1, SDE_INTR_WD_TIMER_1_DONE, 0},
+ /* irq_idx: 4-7 */
+ { SDE_IRQ_TYPE_WB_WFD_COMP, WB_2, SDE_INTR_WB_2_DONE, 0},
+ { SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_2, SDE_INTR_WD_TIMER_2_DONE, 0},
+ { SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_3, SDE_INTR_WD_TIMER_3_DONE, 0},
+ { SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_4, SDE_INTR_WD_TIMER_4_DONE, 0},
+ /* irq_idx: 8-11 */
+ { SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_0,
+ SDE_INTR_PING_PONG_0_DONE, 0},
+ { SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_1,
+ SDE_INTR_PING_PONG_1_DONE, 0},
+ { SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_2,
+ SDE_INTR_PING_PONG_2_DONE, 0},
+ { SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_3,
+ SDE_INTR_PING_PONG_3_DONE, 0},
+ /* irq_idx: 12-15 */
+ { SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_0,
+ SDE_INTR_PING_PONG_0_RD_PTR, 0},
+ { SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_1,
+ SDE_INTR_PING_PONG_1_RD_PTR, 0},
+ { SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_2,
+ SDE_INTR_PING_PONG_2_RD_PTR, 0},
+ { SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_3,
+ SDE_INTR_PING_PONG_3_RD_PTR, 0},
+ /* irq_idx: 16-19 */
+ { SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_0,
+ SDE_INTR_PING_PONG_0_WR_PTR, 0},
+ { SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_1,
+ SDE_INTR_PING_PONG_1_WR_PTR, 0},
+ { SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_2,
+ SDE_INTR_PING_PONG_2_WR_PTR, 0},
+ { SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_3,
+ SDE_INTR_PING_PONG_3_WR_PTR, 0},
+ /* irq_idx: 20-23 */
+ { SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_0,
+ SDE_INTR_PING_PONG_0_AUTOREFRESH_DONE, 0},
+ { SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_1,
+ SDE_INTR_PING_PONG_1_AUTOREFRESH_DONE, 0},
+ { SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_2,
+ SDE_INTR_PING_PONG_2_AUTOREFRESH_DONE, 0},
+ { SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_3,
+ SDE_INTR_PING_PONG_3_AUTOREFRESH_DONE, 0},
+ /* irq_idx: 24-27 */
+ { SDE_IRQ_TYPE_INTF_UNDER_RUN, INTF_0, SDE_INTR_INTF_0_UNDERRUN, 0},
+ { SDE_IRQ_TYPE_INTF_VSYNC, INTF_0, SDE_INTR_INTF_0_VSYNC, 0},
+ { SDE_IRQ_TYPE_INTF_UNDER_RUN, INTF_1, SDE_INTR_INTF_1_UNDERRUN, 0},
+ { SDE_IRQ_TYPE_INTF_VSYNC, INTF_1, SDE_INTR_INTF_1_VSYNC, 0},
+ /* irq_idx: 28-31 */
+ { SDE_IRQ_TYPE_INTF_UNDER_RUN, INTF_2, SDE_INTR_INTF_2_UNDERRUN, 0},
+ { SDE_IRQ_TYPE_INTF_VSYNC, INTF_2, SDE_INTR_INTF_2_VSYNC, 0},
+ { SDE_IRQ_TYPE_INTF_UNDER_RUN, INTF_3, SDE_INTR_INTF_3_UNDERRUN, 0},
+ { SDE_IRQ_TYPE_INTF_VSYNC, INTF_3, SDE_INTR_INTF_3_VSYNC, 0},
+
+ /* BEGIN MAP_RANGE: 32-64, INTR2 */
+ /* irq_idx: 32-35 */
+ { SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_S0,
+ SDE_INTR_PING_PONG_S0_AUTOREFRESH_DONE, 1},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+ /* irq_idx: 36-39 */
+ { SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_S0,
+ SDE_INTR_PING_PONG_S0_WR_PTR, 1},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+ /* irq_idx: 40-43 */
+ { SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_S0,
+ SDE_INTR_PING_PONG_S0_RD_PTR, 1},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+ /* irq_idx: 44-47 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { SDE_IRQ_TYPE_CWB_OVERFLOW, CWB_2, SDE_INTR_CWB_2_OVERFLOW, 1},
+ { SDE_IRQ_TYPE_CWB_OVERFLOW, CWB_3, SDE_INTR_CWB_3_OVERFLOW, 1},
+ /* irq_idx: 48-51 */
+ { SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_0,
+ SDE_INTR_PING_PONG_0_TEAR_DETECTED, 1},
+ { SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_1,
+ SDE_INTR_PING_PONG_1_TEAR_DETECTED, 1},
+ { SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_2,
+ SDE_INTR_PING_PONG_2_TEAR_DETECTED, 1},
+ { SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_3,
+ SDE_INTR_PING_PONG_3_TEAR_DETECTED, 1},
+ /* irq_idx: 52-55 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_S0,
+ SDE_INTR_PING_PONG_S0_TEAR_DETECTED, 1},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+ /* irq_idx: 56-59 */
+ { SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_0,
+ SDE_INTR_PING_PONG_0_TE_DETECTED, 1},
+ { SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_1,
+ SDE_INTR_PING_PONG_1_TE_DETECTED, 1},
+ { SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_2,
+ SDE_INTR_PING_PONG_2_TE_DETECTED, 1},
+ { SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_3,
+ SDE_INTR_PING_PONG_3_TE_DETECTED, 1},
+ /* irq_idx: 60-63 */
+ { SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_S0,
+ SDE_INTR_PING_PONG_S0_TE_DETECTED, 1},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+
+ /* BEGIN MAP_RANGE: 64-95 HIST */
+ /* irq_idx: 64-67 */
+ { SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG0, SDE_INTR_HIST_VIG_0_DONE, 2},
+ { SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG0,
+ SDE_INTR_HIST_VIG_0_RSTSEQ_DONE, 2},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ /* irq_idx: 68-71 */
+ { SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG1, SDE_INTR_HIST_VIG_1_DONE, 2},
+ { SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG1,
+ SDE_INTR_HIST_VIG_1_RSTSEQ_DONE, 2},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ /* irq_idx: 68-71 */
+ { SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG2, SDE_INTR_HIST_VIG_2_DONE, 2},
+ { SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG2,
+ SDE_INTR_HIST_VIG_2_RSTSEQ_DONE, 2},
+ { SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG3, SDE_INTR_HIST_VIG_3_DONE, 2},
+ { SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG3,
+ SDE_INTR_HIST_VIG_3_RSTSEQ_DONE, 2},
+ /* irq_idx: 72-75 */
+ { SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_0, SDE_INTR_HIST_DSPP_0_DONE, 2},
+ { SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_0,
+ SDE_INTR_HIST_DSPP_0_RSTSEQ_DONE, 2},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ /* irq_idx: 76-79 */
+ { SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_1, SDE_INTR_HIST_DSPP_1_DONE, 2},
+ { SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_1,
+ SDE_INTR_HIST_DSPP_1_RSTSEQ_DONE, 2},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ /* irq_idx: 80-83 */
+ { SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_2, SDE_INTR_HIST_DSPP_2_DONE, 2},
+ { SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_2,
+ SDE_INTR_HIST_DSPP_2_RSTSEQ_DONE, 2},
+ { SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_3, SDE_INTR_HIST_DSPP_3_DONE, 2},
+ { SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_3,
+ SDE_INTR_HIST_DSPP_3_RSTSEQ_DONE, 2},
+ /* irq_idx: 84-87 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ /* irq_idx: 88-91 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ /* irq_idx: 92-95 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+
+ /* BEGIN MAP_RANGE: 96-127 INTF_0_INTR */
+ /* irq_idx: 96-99 */
+ { SDE_IRQ_TYPE_SFI_VIDEO_IN, INTF_0,
+ SDE_INTR_VIDEO_INTO_STATIC, 3},
+ { SDE_IRQ_TYPE_SFI_VIDEO_OUT, INTF_0,
+ SDE_INTR_VIDEO_OUTOF_STATIC, 3},
+ { SDE_IRQ_TYPE_SFI_CMD_0_IN, INTF_0,
+ SDE_INTR_DSICMD_0_INTO_STATIC, 3},
+ { SDE_IRQ_TYPE_SFI_CMD_0_OUT, INTF_0,
+ SDE_INTR_DSICMD_0_OUTOF_STATIC, 3},
+ /* irq_idx: 100-103 */
+ { SDE_IRQ_TYPE_SFI_CMD_1_IN, INTF_0,
+ SDE_INTR_DSICMD_1_INTO_STATIC, 3},
+ { SDE_IRQ_TYPE_SFI_CMD_1_OUT, INTF_0,
+ SDE_INTR_DSICMD_1_OUTOF_STATIC, 3},
+ { SDE_IRQ_TYPE_SFI_CMD_2_IN, INTF_0,
+ SDE_INTR_DSICMD_2_INTO_STATIC, 3},
+ { SDE_IRQ_TYPE_SFI_CMD_2_OUT, INTF_0,
+ SDE_INTR_DSICMD_2_OUTOF_STATIC, 3},
+ /* irq_idx: 104-107 */
+ { SDE_IRQ_TYPE_PROG_LINE, INTF_0, SDE_INTR_PROG_LINE, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ /* irq_idx: 108-111 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ /* irq_idx: 112-115 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ /* irq_idx: 116-119 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ /* irq_idx: 120-123 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ /* irq_idx: 124-127 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+
+ /* BEGIN MAP_RANGE: 128-159 INTF_1_INTR */
+ /* irq_idx: 128-131 */
+ { SDE_IRQ_TYPE_SFI_VIDEO_IN, INTF_1,
+ SDE_INTR_VIDEO_INTO_STATIC, 4},
+ { SDE_IRQ_TYPE_SFI_VIDEO_OUT, INTF_1,
+ SDE_INTR_VIDEO_OUTOF_STATIC, 4},
+ { SDE_IRQ_TYPE_SFI_CMD_0_IN, INTF_1,
+ SDE_INTR_DSICMD_0_INTO_STATIC, 4},
+ { SDE_IRQ_TYPE_SFI_CMD_0_OUT, INTF_1,
+ SDE_INTR_DSICMD_0_OUTOF_STATIC, 4},
+ /* irq_idx: 132-135 */
+ { SDE_IRQ_TYPE_SFI_CMD_1_IN, INTF_1,
+ SDE_INTR_DSICMD_1_INTO_STATIC, 4},
+ { SDE_IRQ_TYPE_SFI_CMD_1_OUT, INTF_1,
+ SDE_INTR_DSICMD_1_OUTOF_STATIC, 4},
+ { SDE_IRQ_TYPE_SFI_CMD_2_IN, INTF_1,
+ SDE_INTR_DSICMD_2_INTO_STATIC, 4},
+ { SDE_IRQ_TYPE_SFI_CMD_2_OUT, INTF_1,
+ SDE_INTR_DSICMD_2_OUTOF_STATIC, 4},
+ /* irq_idx: 136-139 */
+ { SDE_IRQ_TYPE_PROG_LINE, INTF_1, SDE_INTR_PROG_LINE, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ /* irq_idx: 140-143 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ /* irq_idx: 144-147 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ /* irq_idx: 148-151 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ /* irq_idx: 152-155 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ /* irq_idx: 156-159 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+
+ /* BEGIN MAP_RANGE: 160-191 INTF_2_INTR */
+ /* irq_idx: 160-163 */
+ { SDE_IRQ_TYPE_SFI_VIDEO_IN, INTF_2,
+ SDE_INTR_VIDEO_INTO_STATIC, 5},
+ { SDE_IRQ_TYPE_SFI_VIDEO_OUT, INTF_2,
+ SDE_INTR_VIDEO_OUTOF_STATIC, 5},
+ { SDE_IRQ_TYPE_SFI_CMD_0_IN, INTF_2,
+ SDE_INTR_DSICMD_0_INTO_STATIC, 5},
+ { SDE_IRQ_TYPE_SFI_CMD_0_OUT, INTF_2,
+ SDE_INTR_DSICMD_0_OUTOF_STATIC, 5},
+ /* irq_idx: 164-167 */
+ { SDE_IRQ_TYPE_SFI_CMD_1_IN, INTF_2,
+ SDE_INTR_DSICMD_1_INTO_STATIC, 5},
+ { SDE_IRQ_TYPE_SFI_CMD_1_OUT, INTF_2,
+ SDE_INTR_DSICMD_1_OUTOF_STATIC, 5},
+ { SDE_IRQ_TYPE_SFI_CMD_2_IN, INTF_2,
+ SDE_INTR_DSICMD_2_INTO_STATIC, 5},
+ { SDE_IRQ_TYPE_SFI_CMD_2_OUT, INTF_2,
+ SDE_INTR_DSICMD_2_OUTOF_STATIC, 5},
+ /* irq_idx: 168-171 */
+ { SDE_IRQ_TYPE_PROG_LINE, INTF_2, SDE_INTR_PROG_LINE, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ /* irq_idx: 172-175 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ /* irq_idx: 176-179 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ /* irq_idx: 180-183 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ /* irq_idx: 184-187 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ /* irq_idx: 188-191 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+
+ /* BEGIN MAP_RANGE: 192-223 INTF_3_INTR */
+ /* irq_idx: 192-195 */
+ { SDE_IRQ_TYPE_SFI_VIDEO_IN, INTF_3,
+ SDE_INTR_VIDEO_INTO_STATIC, 6},
+ { SDE_IRQ_TYPE_SFI_VIDEO_OUT, INTF_3,
+ SDE_INTR_VIDEO_OUTOF_STATIC, 6},
+ { SDE_IRQ_TYPE_SFI_CMD_0_IN, INTF_3,
+ SDE_INTR_DSICMD_0_INTO_STATIC, 6},
+ { SDE_IRQ_TYPE_SFI_CMD_0_OUT, INTF_3,
+ SDE_INTR_DSICMD_0_OUTOF_STATIC, 6},
+ /* irq_idx: 196-199 */
+ { SDE_IRQ_TYPE_SFI_CMD_1_IN, INTF_3,
+ SDE_INTR_DSICMD_1_INTO_STATIC, 6},
+ { SDE_IRQ_TYPE_SFI_CMD_1_OUT, INTF_3,
+ SDE_INTR_DSICMD_1_OUTOF_STATIC, 6},
+ { SDE_IRQ_TYPE_SFI_CMD_2_IN, INTF_3,
+ SDE_INTR_DSICMD_2_INTO_STATIC, 6},
+ { SDE_IRQ_TYPE_SFI_CMD_2_OUT, INTF_3,
+ SDE_INTR_DSICMD_2_OUTOF_STATIC, 6},
+ /* irq_idx: 200-203 */
+ { SDE_IRQ_TYPE_PROG_LINE, INTF_3, SDE_INTR_PROG_LINE, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ /* irq_idx: 204-207 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ /* irq_idx: 208-211 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ /* irq_idx: 212-215 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ /* irq_idx: 216-219 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ /* irq_idx: 220-223 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+
+ /* BEGIN MAP_RANGE: 224-255 INTF_4_INTR */
+ /* irq_idx: 224-227 */
+ { SDE_IRQ_TYPE_SFI_VIDEO_IN, INTF_4,
+ SDE_INTR_VIDEO_INTO_STATIC, 7},
+ { SDE_IRQ_TYPE_SFI_VIDEO_OUT, INTF_4,
+ SDE_INTR_VIDEO_OUTOF_STATIC, 7},
+ { SDE_IRQ_TYPE_SFI_CMD_0_IN, INTF_4,
+ SDE_INTR_DSICMD_0_INTO_STATIC, 7},
+ { SDE_IRQ_TYPE_SFI_CMD_0_OUT, INTF_4,
+ SDE_INTR_DSICMD_0_OUTOF_STATIC, 7},
+ /* irq_idx: 228-231 */
+ { SDE_IRQ_TYPE_SFI_CMD_1_IN, INTF_4,
+ SDE_INTR_DSICMD_1_INTO_STATIC, 7},
+ { SDE_IRQ_TYPE_SFI_CMD_1_OUT, INTF_4,
+ SDE_INTR_DSICMD_1_OUTOF_STATIC, 7},
+ { SDE_IRQ_TYPE_SFI_CMD_2_IN, INTF_4,
+ SDE_INTR_DSICMD_2_INTO_STATIC, 7},
+ { SDE_IRQ_TYPE_SFI_CMD_2_OUT, INTF_4,
+ SDE_INTR_DSICMD_2_OUTOF_STATIC, 7},
+ /* irq_idx: 232-235 */
+ { SDE_IRQ_TYPE_PROG_LINE, INTF_4, SDE_INTR_PROG_LINE, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ /* irq_idx: 236-239 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ /* irq_idx: 240-243 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ /* irq_idx: 244-247 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ /* irq_idx: 248-251 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ /* irq_idx: 252-255 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+};
+
+static int sde_hw_intr_irqidx_lookup(enum sde_intr_type intr_type,
+ u32 instance_idx)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sde_irq_map); i++) {
+ if (intr_type == sde_irq_map[i].intr_type &&
+ instance_idx == sde_irq_map[i].instance_idx)
+ return i;
+ }
+
+ pr_debug("IRQ lookup fail!! intr_type=%d, instance_idx=%d\n",
+ intr_type, instance_idx);
+ return -EINVAL;
+}
+
+static void sde_hw_intr_set_mask(struct sde_hw_intr *intr, uint32_t reg_off,
+ uint32_t mask)
+{
+ SDE_REG_WRITE(&intr->hw, reg_off, mask);
+}
+
+static void sde_hw_intr_dispatch_irq(struct sde_hw_intr *intr,
+ void (*cbfunc)(void *, int),
+ void *arg)
+{
+ int reg_idx;
+ int irq_idx;
+ int start_idx;
+ int end_idx;
+ u32 irq_status;
+ unsigned long irq_flags;
+
+ /*
+ * The dispatcher will save the IRQ status before calling here.
+ * Now need to go through each IRQ status and find matching
+ * irq lookup index.
+ */
+ spin_lock_irqsave(&intr->status_lock, irq_flags);
+ for (reg_idx = 0; reg_idx < ARRAY_SIZE(sde_intr_set); reg_idx++) {
+ irq_status = intr->save_irq_status[reg_idx];
+
+ /*
+ * Each Interrupt register has a range of 32 indexes, and
+ * that is static for sde_irq_map.
+ */
+ start_idx = reg_idx * 32;
+ end_idx = start_idx + 32;
+
+ /*
+ * Search through matching intr status from irq map.
+ * start_idx and end_idx defined the search range in
+ * the sde_irq_map.
+ */
+ for (irq_idx = start_idx;
+ (irq_idx < end_idx) && irq_status;
+ irq_idx++)
+ if ((irq_status & sde_irq_map[irq_idx].irq_mask) &&
+ (sde_irq_map[irq_idx].reg_idx == reg_idx)) {
+ /*
+ * Once a match on irq mask, perform a callback
+ * to the given cbfunc. cbfunc will take care
+ * the interrupt status clearing. If cbfunc is
+ * not provided, then the interrupt clearing
+ * is here.
+ */
+ if (cbfunc)
+ cbfunc(arg, irq_idx);
+ else
+ intr->ops.clear_interrupt_status(
+ intr, irq_idx);
+
+ /*
+ * When callback finish, clear the irq_status
+ * with the matching mask. Once irq_status
+ * is all cleared, the search can be stopped.
+ */
+ irq_status &= ~sde_irq_map[irq_idx].irq_mask;
+ }
+ }
+ spin_unlock_irqrestore(&intr->status_lock, irq_flags);
+}
+
+static int sde_hw_intr_enable_irq(struct sde_hw_intr *intr, int irq_idx)
+{
+ int reg_idx;
+ unsigned long irq_flags;
+ const struct sde_intr_reg *reg;
+ const struct sde_irq_type *irq;
+ const char *dbgstr = NULL;
+ uint32_t cache_irq_mask;
+
+ if (irq_idx < 0 || irq_idx >= ARRAY_SIZE(sde_irq_map)) {
+ pr_err("invalid IRQ index: [%d]\n", irq_idx);
+ return -EINVAL;
+ }
+
+ irq = &sde_irq_map[irq_idx];
+ reg_idx = irq->reg_idx;
+ reg = &sde_intr_set[reg_idx];
+
+ spin_lock_irqsave(&intr->mask_lock, irq_flags);
+ cache_irq_mask = intr->cache_irq_mask[reg_idx];
+ if (cache_irq_mask & irq->irq_mask) {
+ dbgstr = "SDE IRQ already set:";
+ } else {
+ dbgstr = "SDE IRQ enabled:";
+
+ cache_irq_mask |= irq->irq_mask;
+ /* Cleaning any pending interrupt */
+ SDE_REG_WRITE(&intr->hw, reg->clr_off, irq->irq_mask);
+ /* Enabling interrupts with the new mask */
+ SDE_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
+
+ intr->cache_irq_mask[reg_idx] = cache_irq_mask;
+ }
+ spin_unlock_irqrestore(&intr->mask_lock, irq_flags);
+
+ pr_debug("%s MASK:0x%.8x, CACHE-MASK:0x%.8x\n", dbgstr,
+ irq->irq_mask, cache_irq_mask);
+
+ return 0;
+}
+
+static int sde_hw_intr_disable_irq(struct sde_hw_intr *intr, int irq_idx)
+{
+ int reg_idx;
+ unsigned long irq_flags;
+ const struct sde_intr_reg *reg;
+ const struct sde_irq_type *irq;
+ const char *dbgstr = NULL;
+ uint32_t cache_irq_mask;
+
+ if (irq_idx < 0 || irq_idx >= ARRAY_SIZE(sde_irq_map)) {
+ pr_err("invalid IRQ index: [%d]\n", irq_idx);
+ return -EINVAL;
+ }
+
+ irq = &sde_irq_map[irq_idx];
+ reg_idx = irq->reg_idx;
+ reg = &sde_intr_set[reg_idx];
+
+ spin_lock_irqsave(&intr->mask_lock, irq_flags);
+ cache_irq_mask = intr->cache_irq_mask[reg_idx];
+ if ((cache_irq_mask & irq->irq_mask) == 0) {
+ dbgstr = "SDE IRQ is already cleared:";
+ } else {
+ dbgstr = "SDE IRQ mask disable:";
+
+ cache_irq_mask &= ~irq->irq_mask;
+ /* Disable interrupts based on the new mask */
+ SDE_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
+ /* Cleaning any pending interrupt */
+ SDE_REG_WRITE(&intr->hw, reg->clr_off, irq->irq_mask);
+
+ intr->cache_irq_mask[reg_idx] = cache_irq_mask;
+ }
+ spin_unlock_irqrestore(&intr->mask_lock, irq_flags);
+
+ pr_debug("%s MASK:0x%.8x, CACHE-MASK:0x%.8x\n", dbgstr,
+ irq->irq_mask, cache_irq_mask);
+
+ return 0;
+}
+
+static int sde_hw_intr_clear_irqs(struct sde_hw_intr *intr)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sde_intr_set); i++)
+ SDE_REG_WRITE(&intr->hw, sde_intr_set[i].clr_off, 0xffffffff);
+
+ return 0;
+}
+
+static int sde_hw_intr_disable_irqs(struct sde_hw_intr *intr)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sde_intr_set); i++)
+ SDE_REG_WRITE(&intr->hw, sde_intr_set[i].en_off, 0x00000000);
+
+ return 0;
+}
+
+static int sde_hw_intr_get_valid_interrupts(struct sde_hw_intr *intr,
+ uint32_t *mask)
+{
+ *mask = IRQ_SOURCE_MDP | IRQ_SOURCE_DSI0 | IRQ_SOURCE_DSI1
+ | IRQ_SOURCE_HDMI | IRQ_SOURCE_EDP;
+ return 0;
+}
+
+static int sde_hw_intr_get_interrupt_sources(struct sde_hw_intr *intr,
+ uint32_t *sources)
+{
+ *sources = SDE_REG_READ(&intr->hw, HW_INTR_STATUS);
+ return 0;
+}
+
+static void sde_hw_intr_get_interrupt_statuses(struct sde_hw_intr *intr)
+{
+ int i;
+ u32 enable_mask;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&intr->status_lock, irq_flags);
+ for (i = 0; i < ARRAY_SIZE(sde_intr_set); i++) {
+ /* Read interrupt status */
+ intr->save_irq_status[i] = SDE_REG_READ(&intr->hw,
+ sde_intr_set[i].status_off);
+
+ /* Read enable mask */
+ enable_mask = SDE_REG_READ(&intr->hw, sde_intr_set[i].en_off);
+
+ /* and clear the interrupt */
+ if (intr->save_irq_status[i])
+ SDE_REG_WRITE(&intr->hw, sde_intr_set[i].clr_off,
+ intr->save_irq_status[i]);
+
+ /* Finally update IRQ status based on enable mask */
+ intr->save_irq_status[i] &= enable_mask;
+ }
+ spin_unlock_irqrestore(&intr->status_lock, irq_flags);
+}
+
+static void sde_hw_intr_clear_interrupt_status(struct sde_hw_intr *intr,
+ int irq_idx)
+{
+ int reg_idx;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&intr->mask_lock, irq_flags);
+
+ reg_idx = sde_irq_map[irq_idx].reg_idx;
+ SDE_REG_WRITE(&intr->hw, sde_intr_set[reg_idx].clr_off,
+ sde_irq_map[irq_idx].irq_mask);
+
+ spin_unlock_irqrestore(&intr->mask_lock, irq_flags);
+}
+
+static u32 sde_hw_intr_get_interrupt_status(struct sde_hw_intr *intr,
+ int irq_idx, bool clear)
+{
+ int reg_idx;
+ unsigned long irq_flags;
+ u32 intr_status;
+
+ spin_lock_irqsave(&intr->mask_lock, irq_flags);
+
+ reg_idx = sde_irq_map[irq_idx].reg_idx;
+ intr_status = SDE_REG_READ(&intr->hw,
+ sde_intr_set[reg_idx].status_off) &
+ sde_irq_map[irq_idx].irq_mask;
+ if (intr_status && clear)
+ SDE_REG_WRITE(&intr->hw, sde_intr_set[irq_idx].clr_off,
+ intr_status);
+
+ spin_unlock_irqrestore(&intr->mask_lock, irq_flags);
+
+ return intr_status;
+}
+
+static void __setup_intr_ops(struct sde_hw_intr_ops *ops)
+{
+ ops->set_mask = sde_hw_intr_set_mask;
+ ops->irq_idx_lookup = sde_hw_intr_irqidx_lookup;
+ ops->enable_irq = sde_hw_intr_enable_irq;
+ ops->disable_irq = sde_hw_intr_disable_irq;
+ ops->dispatch_irqs = sde_hw_intr_dispatch_irq;
+ ops->clear_all_irqs = sde_hw_intr_clear_irqs;
+ ops->disable_all_irqs = sde_hw_intr_disable_irqs;
+ ops->get_valid_interrupts = sde_hw_intr_get_valid_interrupts;
+ ops->get_interrupt_sources = sde_hw_intr_get_interrupt_sources;
+ ops->get_interrupt_statuses = sde_hw_intr_get_interrupt_statuses;
+ ops->clear_interrupt_status = sde_hw_intr_clear_interrupt_status;
+ ops->get_interrupt_status = sde_hw_intr_get_interrupt_status;
+}
+
+static struct sde_mdss_base_cfg *__intr_offset(struct sde_mdss_cfg *m,
+ void __iomem *addr, struct sde_hw_blk_reg_map *hw)
+{
+ if (m->mdp_count == 0)
+ return NULL;
+
+ hw->base_off = addr;
+ hw->blk_off = m->mdss[0].base;
+ hw->hwversion = m->hwversion;
+ return &m->mdss[0];
+}
+
+struct sde_hw_intr *sde_hw_intr_init(void __iomem *addr,
+ struct sde_mdss_cfg *m)
+{
+ struct sde_hw_intr *intr = kzalloc(sizeof(*intr), GFP_KERNEL);
+ struct sde_mdss_base_cfg *cfg;
+
+ if (!intr)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = __intr_offset(m, addr, &intr->hw);
+ if (!cfg) {
+ kfree(intr);
+ return ERR_PTR(-EINVAL);
+ }
+ __setup_intr_ops(&intr->ops);
+
+ intr->irq_idx_tbl_size = ARRAY_SIZE(sde_irq_map);
+
+ intr->cache_irq_mask = kcalloc(ARRAY_SIZE(sde_intr_set), sizeof(u32),
+ GFP_KERNEL);
+ if (intr->cache_irq_mask == NULL) {
+ kfree(intr);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ intr->save_irq_status = kcalloc(ARRAY_SIZE(sde_intr_set), sizeof(u32),
+ GFP_KERNEL);
+ if (intr->save_irq_status == NULL) {
+ kfree(intr->cache_irq_mask);
+ kfree(intr);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ spin_lock_init(&intr->mask_lock);
+ spin_lock_init(&intr->status_lock);
+
+ return intr;
+}
+
+void sde_hw_intr_destroy(struct sde_hw_intr *intr)
+{
+ if (intr) {
+ kfree(intr->cache_irq_mask);
+ kfree(intr->save_irq_status);
+ kfree(intr);
+ }
+}
+
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.h b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.h
new file mode 100644
index 000000000000..261ef64c0065
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.h
@@ -0,0 +1,257 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HW_INTERRUPTS_H
+#define _SDE_HW_INTERRUPTS_H
+
+#include <linux/types.h>
+
+#include "sde_hwio.h"
+#include "sde_hw_catalog.h"
+#include "sde_hw_util.h"
+#include "sde_hw_mdss.h"
+
+#define IRQ_SOURCE_MDP BIT(0)
+#define IRQ_SOURCE_DSI0 BIT(4)
+#define IRQ_SOURCE_DSI1 BIT(5)
+#define IRQ_SOURCE_HDMI BIT(8)
+#define IRQ_SOURCE_EDP BIT(12)
+#define IRQ_SOURCE_MHL BIT(16)
+
+/**
+ * sde_intr_type - HW Interrupt Type
+ * @SDE_IRQ_TYPE_WB_ROT_COMP: WB rotator done
+ * @SDE_IRQ_TYPE_WB_WFD_COMP: WB WFD done
+ * @SDE_IRQ_TYPE_PING_PONG_COMP: PingPong done
+ * @SDE_IRQ_TYPE_PING_PONG_RD_PTR: PingPong read pointer
+ * @SDE_IRQ_TYPE_PING_PONG_WR_PTR: PingPong write pointer
+ * @SDE_IRQ_TYPE_PING_PONG_AUTO_REF: PingPong auto refresh
+ * @SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK: PingPong Tear check
+ * @SDE_IRQ_TYPE_PING_PONG_TE_CHECK: PingPong TE detection
+ * @SDE_IRQ_TYPE_INTF_UNDER_RUN: INTF underrun
+ * @SDE_IRQ_TYPE_INTF_VSYNC: INTF VSYNC
+ * @SDE_IRQ_TYPE_CWB_OVERFLOW: Concurrent WB overflow
+ * @SDE_IRQ_TYPE_HIST_VIG_DONE: VIG Histogram done
+ * @SDE_IRQ_TYPE_HIST_VIG_RSTSEQ: VIG Histogram reset
+ * @SDE_IRQ_TYPE_HIST_DSPP_DONE: DSPP Histogram done
+ * @SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ: DSPP Histogram reset
+ * @SDE_IRQ_TYPE_WD_TIMER: Watchdog timer
+ * @SDE_IRQ_TYPE_SFI_VIDEO_IN: Video static frame INTR into static
+ * @SDE_IRQ_TYPE_SFI_VIDEO_OUT: Video static frame INTR out-of static
+ * @SDE_IRQ_TYPE_SFI_CMD_0_IN: DSI CMD0 static frame INTR into static
+ * @SDE_IRQ_TYPE_SFI_CMD_0_OUT: DSI CMD0 static frame INTR out-of static
+ * @SDE_IRQ_TYPE_SFI_CMD_1_IN: DSI CMD1 static frame INTR into static
+ * @SDE_IRQ_TYPE_SFI_CMD_1_OUT: DSI CMD1 static frame INTR out-of static
+ * @SDE_IRQ_TYPE_SFI_CMD_2_IN: DSI CMD2 static frame INTR into static
+ * @SDE_IRQ_TYPE_SFI_CMD_2_OUT: DSI CMD2 static frame INTR out-of static
+ * @SDE_IRQ_TYPE_PROG_LINE: Programmable Line interrupt
+ * @SDE_IRQ_TYPE_RESERVED: Reserved for expansion
+ */
+enum sde_intr_type {
+ SDE_IRQ_TYPE_WB_ROT_COMP,
+ SDE_IRQ_TYPE_WB_WFD_COMP,
+ SDE_IRQ_TYPE_PING_PONG_COMP,
+ SDE_IRQ_TYPE_PING_PONG_RD_PTR,
+ SDE_IRQ_TYPE_PING_PONG_WR_PTR,
+ SDE_IRQ_TYPE_PING_PONG_AUTO_REF,
+ SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK,
+ SDE_IRQ_TYPE_PING_PONG_TE_CHECK,
+ SDE_IRQ_TYPE_INTF_UNDER_RUN,
+ SDE_IRQ_TYPE_INTF_VSYNC,
+ SDE_IRQ_TYPE_CWB_OVERFLOW,
+ SDE_IRQ_TYPE_HIST_VIG_DONE,
+ SDE_IRQ_TYPE_HIST_VIG_RSTSEQ,
+ SDE_IRQ_TYPE_HIST_DSPP_DONE,
+ SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ,
+ SDE_IRQ_TYPE_WD_TIMER,
+ SDE_IRQ_TYPE_SFI_VIDEO_IN,
+ SDE_IRQ_TYPE_SFI_VIDEO_OUT,
+ SDE_IRQ_TYPE_SFI_CMD_0_IN,
+ SDE_IRQ_TYPE_SFI_CMD_0_OUT,
+ SDE_IRQ_TYPE_SFI_CMD_1_IN,
+ SDE_IRQ_TYPE_SFI_CMD_1_OUT,
+ SDE_IRQ_TYPE_SFI_CMD_2_IN,
+ SDE_IRQ_TYPE_SFI_CMD_2_OUT,
+ SDE_IRQ_TYPE_PROG_LINE,
+ SDE_IRQ_TYPE_RESERVED,
+};
+
+struct sde_hw_intr;
+
+/**
+ * Interrupt operations.
+ */
+struct sde_hw_intr_ops {
+ /**
+ * set_mask - Programs the given interrupt register with the
+ * given interrupt mask. Register value will get overwritten.
+ * @intr: HW interrupt handle
+ * @reg_off: MDSS HW register offset
+ * @irqmask: IRQ mask value
+ */
+ void (*set_mask)(
+ struct sde_hw_intr *intr,
+ uint32_t reg,
+ uint32_t irqmask);
+
+ /**
+ * irq_idx_lookup - Lookup IRQ index on the HW interrupt type
+ * Used for all irq related ops
+ * @intr_type: Interrupt type defined in sde_intr_type
+ * @instance_idx: HW interrupt block instance
+ * @return: irq_idx or -EINVAL for lookup fail
+ */
+ int (*irq_idx_lookup)(
+ enum sde_intr_type intr_type,
+ u32 instance_idx);
+
+ /**
+ * enable_irq - Enable IRQ based on lookup IRQ index
+ * @intr: HW interrupt handle
+ * @irq_idx: Lookup irq index return from irq_idx_lookup
+ * @return: 0 for success, otherwise failure
+ */
+ int (*enable_irq)(
+ struct sde_hw_intr *intr,
+ int irq_idx);
+
+ /**
+ * disable_irq - Disable IRQ based on lookup IRQ index
+ * @intr: HW interrupt handle
+ * @irq_idx: Lookup irq index return from irq_idx_lookup
+ * @return: 0 for success, otherwise failure
+ */
+ int (*disable_irq)(
+ struct sde_hw_intr *intr,
+ int irq_idx);
+
+ /**
+ * clear_all_irqs - Clears all the interrupts (i.e. acknowledges
+ * any asserted IRQs). Useful during reset.
+ * @intr: HW interrupt handle
+ * @return: 0 for success, otherwise failure
+ */
+ int (*clear_all_irqs)(
+ struct sde_hw_intr *intr);
+
+ /**
+ * disable_all_irqs - Disables all the interrupts. Useful during reset.
+ * @intr: HW interrupt handle
+ * @return: 0 for success, otherwise failure
+ */
+ int (*disable_all_irqs)(
+ struct sde_hw_intr *intr);
+
+ /**
+ * dispatch_irqs - IRQ dispatcher will call the given callback
+ * function when a matching interrupt status bit is
+ * found in the irq mapping table.
+ * @intr: HW interrupt handle
+ * @cbfunc: Callback function pointer
+ * @arg: Argument to pass back during callback
+ */
+ void (*dispatch_irqs)(
+ struct sde_hw_intr *intr,
+ void (*cbfunc)(void *arg, int irq_idx),
+ void *arg);
+
+ /**
+ * get_interrupt_statuses - Gets and store value from all interrupt
+ * status registers that are currently fired.
+ * @intr: HW interrupt handle
+ */
+ void (*get_interrupt_statuses)(
+ struct sde_hw_intr *intr);
+
+ /**
+ * clear_interrupt_status - Clears HW interrupt status based on given
+ * lookup IRQ index.
+ * @intr: HW interrupt handle
+ * @irq_idx: Lookup irq index return from irq_idx_lookup
+ */
+ void (*clear_interrupt_status)(
+ struct sde_hw_intr *intr,
+ int irq_idx);
+
+ /**
+ * get_interrupt_status - Gets HW interrupt status, and clear if set,
+ * based on given lookup IRQ index.
+ * @intr: HW interrupt handle
+ * @irq_idx: Lookup irq index return from irq_idx_lookup
+ * @clear: True to clear irq after read
+ */
+ u32 (*get_interrupt_status)(
+ struct sde_hw_intr *intr,
+ int irq_idx,
+ bool clear);
+
+ /**
+ * get_valid_interrupts - Gets a mask of all valid interrupt sources
+ * within SDE. These are actually status bits
+ * within interrupt registers that specify the
+ * source of the interrupt in IRQs. For example,
+ * valid interrupt sources can be MDP, DSI,
+ * HDMI etc.
+ * @intr: HW interrupt handle
+ * @mask: Returning the interrupt source MASK
+ * @return: 0 for success, otherwise failure
+ */
+ int (*get_valid_interrupts)(
+ struct sde_hw_intr *intr,
+ uint32_t *mask);
+
+ /**
+ * get_interrupt_sources - Gets the bitmask of the SDE interrupt
+ * source that are currently fired.
+ * @intr: HW interrupt handle
+ * @sources: Returning the SDE interrupt source status bit mask
+ * @return: 0 for success, otherwise failure
+ */
+ int (*get_interrupt_sources)(
+ struct sde_hw_intr *intr,
+ uint32_t *sources);
+};
+
+/**
+ * struct sde_hw_intr: hw interrupts handling data structure
+ * @hw: virtual address mapping
+ * @ops: function pointer mapping for IRQ handling
+ * @cache_irq_mask: array of IRQ enable masks reg storage created during init
+ * @save_irq_status: array of IRQ status reg storage created during init
+ * @irq_idx_tbl_size: total number of irq_idx mapped in the hw_interrupts
+ * @mask_lock: spinlock for accessing IRQ mask
+ * @status_lock: spinlock for accessing IRQ status
+ */
+struct sde_hw_intr {
+ struct sde_hw_blk_reg_map hw;
+ struct sde_hw_intr_ops ops;
+ u32 *cache_irq_mask;
+ u32 *save_irq_status;
+ u32 irq_idx_tbl_size;
+ spinlock_t mask_lock;
+ spinlock_t status_lock;
+};
+
+/**
+ * sde_hw_intr_init(): Initializes the interrupts hw object
+ * @addr: mapped register io address of MDP
+ * @m : pointer to mdss catalog data
+ */
+struct sde_hw_intr *sde_hw_intr_init(void __iomem *addr,
+ struct sde_mdss_cfg *m);
+
+/**
+ * sde_hw_intr_destroy(): Cleanup interrutps hw object
+ * @intr: pointer to interrupts hw object
+ */
+void sde_hw_intr_destroy(struct sde_hw_intr *intr);
+#endif
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_intf.c b/drivers/gpu/drm/msm/sde/sde_hw_intf.c
new file mode 100644
index 000000000000..3b34719e9971
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_intf.c
@@ -0,0 +1,342 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "sde_hwio.h"
+#include "sde_hw_catalog.h"
+#include "sde_hw_intf.h"
+
+#define INTF_TIMING_ENGINE_EN 0x000
+#define INTF_CONFIG 0x004
+#define INTF_HSYNC_CTL 0x008
+#define INTF_VSYNC_PERIOD_F0 0x00C
+#define INTF_VSYNC_PERIOD_F1 0x010
+#define INTF_VSYNC_PULSE_WIDTH_F0 0x014
+#define INTF_VSYNC_PULSE_WIDTH_F1 0x018
+#define INTF_DISPLAY_V_START_F0 0x01C
+#define INTF_DISPLAY_V_START_F1 0x020
+#define INTF_DISPLAY_V_END_F0 0x024
+#define INTF_DISPLAY_V_END_F1 0x028
+#define INTF_ACTIVE_V_START_F0 0x02C
+#define INTF_ACTIVE_V_START_F1 0x030
+#define INTF_ACTIVE_V_END_F0 0x034
+#define INTF_ACTIVE_V_END_F1 0x038
+#define INTF_DISPLAY_HCTL 0x03C
+#define INTF_ACTIVE_HCTL 0x040
+#define INTF_BORDER_COLOR 0x044
+#define INTF_UNDERFLOW_COLOR 0x048
+#define INTF_HSYNC_SKEW 0x04C
+#define INTF_POLARITY_CTL 0x050
+#define INTF_TEST_CTL 0x054
+#define INTF_TP_COLOR0 0x058
+#define INTF_TP_COLOR1 0x05C
+#define INTF_FRAME_LINE_COUNT_EN 0x0A8
+#define INTF_FRAME_COUNT 0x0AC
+#define INTF_LINE_COUNT 0x0B0
+
+#define INTF_DEFLICKER_CONFIG 0x0F0
+#define INTF_DEFLICKER_STRNG_COEFF 0x0F4
+#define INTF_DEFLICKER_WEAK_COEFF 0x0F8
+
+#define INTF_DSI_CMD_MODE_TRIGGER_EN 0x084
+#define INTF_PANEL_FORMAT 0x090
+#define INTF_TPG_ENABLE 0x100
+#define INTF_TPG_MAIN_CONTROL 0x104
+#define INTF_TPG_VIDEO_CONFIG 0x108
+#define INTF_TPG_COMPONENT_LIMITS 0x10C
+#define INTF_TPG_RECTANGLE 0x110
+#define INTF_TPG_INITIAL_VALUE 0x114
+#define INTF_TPG_BLK_WHITE_PATTERN_FRAMES 0x118
+#define INTF_TPG_RGB_MAPPING 0x11C
+#define INTF_PROG_FETCH_START 0x170
+
+#define INTF_FRAME_LINE_COUNT_EN 0x0A8
+#define INTF_FRAME_COUNT 0x0AC
+#define INTF_LINE_COUNT 0x0B0
+
+#define INTF_MISR_CTRL 0x180
+#define INTF_MISR_SIGNATURE 0x184
+
+#define MISR_FRAME_COUNT_MASK 0xFF
+#define MISR_CTRL_ENABLE BIT(8)
+#define MISR_CTRL_STATUS BIT(9)
+#define MISR_CTRL_STATUS_CLEAR BIT(10)
+#define INTF_MISR_CTRL_FREE_RUN_MASK BIT(31)
+
+static struct sde_intf_cfg *_intf_offset(enum sde_intf intf,
+ struct sde_mdss_cfg *m,
+ void __iomem *addr,
+ struct sde_hw_blk_reg_map *b)
+{
+ int i;
+
+ for (i = 0; i < m->intf_count; i++) {
+ if ((intf == m->intf[i].id) &&
+ (m->intf[i].type != INTF_NONE)) {
+ b->base_off = addr;
+ b->blk_off = m->intf[i].base;
+ b->hwversion = m->hwversion;
+ b->log_mask = SDE_DBG_MASK_INTF;
+ return &m->intf[i];
+ }
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static void sde_hw_intf_setup_timing_engine(struct sde_hw_intf *ctx,
+ const struct intf_timing_params *p,
+ const struct sde_format *fmt)
+{
+ struct sde_hw_blk_reg_map *c = &ctx->hw;
+ u32 hsync_period, vsync_period;
+ u32 display_v_start, display_v_end;
+ u32 hsync_start_x, hsync_end_x;
+ u32 active_h_start, active_h_end;
+ u32 active_v_start, active_v_end;
+ u32 active_hctl, display_hctl, hsync_ctl;
+ u32 polarity_ctl, den_polarity, hsync_polarity, vsync_polarity;
+ u32 panel_format;
+ u32 intf_cfg;
+
+ /* read interface_cfg */
+ intf_cfg = SDE_REG_READ(c, INTF_CONFIG);
+ hsync_period = p->hsync_pulse_width + p->h_back_porch + p->width +
+ p->h_front_porch;
+ vsync_period = p->vsync_pulse_width + p->v_back_porch + p->height +
+ p->v_front_porch;
+
+ display_v_start = ((p->vsync_pulse_width + p->v_back_porch) *
+ hsync_period) + p->hsync_skew;
+ display_v_end = ((vsync_period - p->v_front_porch) * hsync_period) +
+ p->hsync_skew - 1;
+
+ if (ctx->cap->type == INTF_EDP) {
+ display_v_start += p->hsync_pulse_width + p->h_back_porch;
+ display_v_end -= p->h_front_porch;
+ }
+
+ hsync_start_x = p->h_back_porch + p->hsync_pulse_width;
+ hsync_end_x = hsync_period - p->h_front_porch - 1;
+
+ if (p->width != p->xres) {
+ active_h_start = hsync_start_x;
+ active_h_end = active_h_start + p->xres - 1;
+ } else {
+ active_h_start = 0;
+ active_h_end = 0;
+ }
+
+ if (p->height != p->yres) {
+ active_v_start = display_v_start;
+ active_v_end = active_v_start + (p->yres * hsync_period) - 1;
+ } else {
+ active_v_start = 0;
+ active_v_end = 0;
+ }
+
+ if (active_h_end) {
+ active_hctl = (active_h_end << 16) | active_h_start;
+ intf_cfg |= BIT(29); /* ACTIVE_H_ENABLE */
+ } else {
+ active_hctl = 0;
+ }
+
+ if (active_v_end)
+ intf_cfg |= BIT(30); /* ACTIVE_V_ENABLE */
+
+ hsync_ctl = (hsync_period << 16) | p->hsync_pulse_width;
+ display_hctl = (hsync_end_x << 16) | hsync_start_x;
+
+ den_polarity = 0;
+ if (ctx->cap->type == INTF_HDMI) {
+ hsync_polarity = p->yres >= 720 ? 0 : 1;
+ vsync_polarity = p->yres >= 720 ? 0 : 1;
+ } else {
+ hsync_polarity = 0;
+ vsync_polarity = 0;
+ }
+ polarity_ctl = (den_polarity << 2) | /* DEN Polarity */
+ (vsync_polarity << 1) | /* VSYNC Polarity */
+ (hsync_polarity << 0); /* HSYNC Polarity */
+
+ if (!SDE_FORMAT_IS_YUV(fmt))
+ panel_format = (fmt->bits[C0_G_Y] |
+ (fmt->bits[C1_B_Cb] << 2) |
+ (fmt->bits[C2_R_Cr] << 4) |
+ (0x21 << 8));
+ else
+ /* Interface treats all the pixel data in RGB888 format */
+ panel_format = (COLOR_8BIT |
+ (COLOR_8BIT << 2) |
+ (COLOR_8BIT << 4) |
+ (0x21 << 8));
+
+ SDE_REG_WRITE(c, INTF_HSYNC_CTL, hsync_ctl);
+ SDE_REG_WRITE(c, INTF_VSYNC_PERIOD_F0, vsync_period * hsync_period);
+ SDE_REG_WRITE(c, INTF_VSYNC_PULSE_WIDTH_F0,
+ p->vsync_pulse_width * hsync_period);
+ SDE_REG_WRITE(c, INTF_DISPLAY_HCTL, display_hctl);
+ SDE_REG_WRITE(c, INTF_DISPLAY_V_START_F0, display_v_start);
+ SDE_REG_WRITE(c, INTF_DISPLAY_V_END_F0, display_v_end);
+ SDE_REG_WRITE(c, INTF_ACTIVE_HCTL, active_hctl);
+ SDE_REG_WRITE(c, INTF_ACTIVE_V_START_F0, active_v_start);
+ SDE_REG_WRITE(c, INTF_ACTIVE_V_END_F0, active_v_end);
+ SDE_REG_WRITE(c, INTF_BORDER_COLOR, p->border_clr);
+ SDE_REG_WRITE(c, INTF_UNDERFLOW_COLOR, p->underflow_clr);
+ SDE_REG_WRITE(c, INTF_HSYNC_SKEW, p->hsync_skew);
+ SDE_REG_WRITE(c, INTF_POLARITY_CTL, polarity_ctl);
+ SDE_REG_WRITE(c, INTF_FRAME_LINE_COUNT_EN, 0x3);
+ SDE_REG_WRITE(c, INTF_CONFIG, intf_cfg);
+ SDE_REG_WRITE(c, INTF_PANEL_FORMAT, panel_format);
+}
+
+static void sde_hw_intf_enable_timing_engine(
+ struct sde_hw_intf *intf,
+ u8 enable)
+{
+ struct sde_hw_blk_reg_map *c = &intf->hw;
+ /* Note: Display interface select is handled in top block hw layer */
+ SDE_REG_WRITE(c, INTF_TIMING_ENGINE_EN, enable != 0);
+}
+
+static void sde_hw_intf_setup_prg_fetch(
+ struct sde_hw_intf *intf,
+ const struct intf_prog_fetch *fetch)
+{
+ struct sde_hw_blk_reg_map *c = &intf->hw;
+ int fetch_enable;
+
+ /*
+ * Fetch should always be outside the active lines. If the fetching
+ * is programmed within active region, hardware behavior is unknown.
+ */
+
+ fetch_enable = SDE_REG_READ(c, INTF_CONFIG);
+ if (fetch->enable) {
+ fetch_enable |= BIT(31);
+ SDE_REG_WRITE(c, INTF_PROG_FETCH_START,
+ fetch->fetch_start);
+ } else {
+ fetch_enable &= ~BIT(31);
+ }
+
+ SDE_REG_WRITE(c, INTF_CONFIG, fetch_enable);
+}
+
+static void sde_hw_intf_get_status(
+ struct sde_hw_intf *intf,
+ struct intf_status *s)
+{
+ struct sde_hw_blk_reg_map *c = &intf->hw;
+
+ s->is_en = SDE_REG_READ(c, INTF_TIMING_ENGINE_EN);
+ if (s->is_en) {
+ s->frame_count = SDE_REG_READ(c, INTF_FRAME_COUNT);
+ s->line_count = SDE_REG_READ(c, INTF_LINE_COUNT);
+ } else {
+ s->line_count = 0;
+ s->frame_count = 0;
+ }
+}
+
+static void sde_hw_intf_set_misr(struct sde_hw_intf *intf,
+ struct sde_misr_params *misr_map)
+{
+ struct sde_hw_blk_reg_map *c = &intf->hw;
+ u32 config = 0;
+
+ if (!misr_map)
+ return;
+
+ SDE_REG_WRITE(c, INTF_MISR_CTRL, MISR_CTRL_STATUS_CLEAR);
+ /* Clear data */
+ wmb();
+
+ if (misr_map->enable) {
+ config = (MISR_FRAME_COUNT_MASK & 1) |
+ (MISR_CTRL_ENABLE);
+
+ SDE_REG_WRITE(c, INTF_MISR_CTRL, config);
+ } else {
+ SDE_REG_WRITE(c, INTF_MISR_CTRL, 0);
+ }
+}
+
+static void sde_hw_intf_collect_misr(struct sde_hw_intf *intf,
+ struct sde_misr_params *misr_map)
+{
+ struct sde_hw_blk_reg_map *c = &intf->hw;
+
+ if (!misr_map)
+ return;
+
+ if (misr_map->enable) {
+ if (misr_map->last_idx < misr_map->frame_count &&
+ misr_map->last_idx < SDE_CRC_BATCH_SIZE)
+ misr_map->crc_value[misr_map->last_idx] =
+ SDE_REG_READ(c, INTF_MISR_SIGNATURE);
+ }
+
+ misr_map->enable =
+ misr_map->enable & (misr_map->last_idx <= SDE_CRC_BATCH_SIZE);
+
+ misr_map->last_idx++;
+}
+
+static void _setup_intf_ops(struct sde_hw_intf_ops *ops,
+ unsigned long cap)
+{
+ ops->setup_timing_gen = sde_hw_intf_setup_timing_engine;
+ ops->setup_prg_fetch = sde_hw_intf_setup_prg_fetch;
+ ops->get_status = sde_hw_intf_get_status;
+ ops->enable_timing = sde_hw_intf_enable_timing_engine;
+ ops->setup_misr = sde_hw_intf_set_misr;
+ ops->collect_misr = sde_hw_intf_collect_misr;
+}
+
+struct sde_hw_intf *sde_hw_intf_init(enum sde_intf idx,
+ void __iomem *addr,
+ struct sde_mdss_cfg *m)
+{
+ struct sde_hw_intf *c;
+ struct sde_intf_cfg *cfg;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _intf_offset(idx, m, addr, &c->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(c);
+ pr_err("failed to create sde_hw_intf %d\n", idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /*
+ * Assign ops
+ */
+ c->idx = idx;
+ c->cap = cfg;
+ c->mdss = m;
+ _setup_intf_ops(&c->ops, c->cap->features);
+
+ /*
+ * Perform any default initialization for the intf
+ */
+ return c;
+}
+
+void sde_hw_intf_destroy(struct sde_hw_intf *intf)
+{
+ kfree(intf);
+}
+
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_intf.h b/drivers/gpu/drm/msm/sde/sde_hw_intf.h
new file mode 100644
index 000000000000..f4a01cb64d7f
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_intf.h
@@ -0,0 +1,133 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HW_INTF_H
+#define _SDE_HW_INTF_H
+
+#include "sde_hw_catalog.h"
+#include "sde_hw_mdss.h"
+#include "sde_hw_util.h"
+
+struct sde_hw_intf;
+
+/* Batch size of frames for collecting MISR data */
+#define SDE_CRC_BATCH_SIZE 16
+
+/**
+ * struct sde_misr_params : Interface for getting and setting MISR data
+ * Assumption is these functions will be called after clocks are enabled
+ * @ enable : enables/disables MISR
+ * @ frame_count : represents number of frames for which MISR is enabled
+ * @ last_idx: number of frames for which MISR data is collected
+ * @ crc_value: stores the collected MISR data
+ */
+struct sde_misr_params {
+ bool enable;
+ u32 frame_count;
+ u32 last_idx;
+ u32 crc_value[SDE_CRC_BATCH_SIZE];
+};
+
+/* intf timing settings */
+struct intf_timing_params {
+ u32 width; /* active width */
+ u32 height; /* active height */
+ u32 xres; /* Display panel width */
+ u32 yres; /* Display panel height */
+
+ u32 h_back_porch;
+ u32 h_front_porch;
+ u32 v_back_porch;
+ u32 v_front_porch;
+ u32 hsync_pulse_width;
+ u32 vsync_pulse_width;
+ u32 hsync_polarity;
+ u32 vsync_polarity;
+ u32 border_clr;
+ u32 underflow_clr;
+ u32 hsync_skew;
+};
+
+struct intf_prog_fetch {
+ u8 enable;
+ /* vsync counter for the front porch pixel line */
+ u32 fetch_start;
+};
+
+struct intf_status {
+ u8 is_en; /* interface timing engine is enabled or not */
+ u32 frame_count; /* frame count since timing engine enabled */
+ u32 line_count; /* current line count including blanking */
+};
+
+/**
+ * struct sde_hw_intf_ops : Interface to the interface Hw driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ * @ setup_timing_gen : programs the timing engine
+ * @ setup_prog_fetch : enables/disables the programmable fetch logic
+ * @ enable_timing: enable/disable timing engine
+ * @ get_status: returns if timing engine is enabled or not
+ * @ setup_misr: enables/disables MISR in HW register
+ * @ collect_misr: reads and stores MISR data from HW register
+ */
+struct sde_hw_intf_ops {
+ void (*setup_timing_gen)(struct sde_hw_intf *intf,
+ const struct intf_timing_params *p,
+ const struct sde_format *fmt);
+
+ void (*setup_prg_fetch)(struct sde_hw_intf *intf,
+ const struct intf_prog_fetch *fetch);
+
+ void (*enable_timing)(struct sde_hw_intf *intf,
+ u8 enable);
+
+ void (*get_status)(struct sde_hw_intf *intf,
+ struct intf_status *status);
+
+ void (*setup_misr)(struct sde_hw_intf *intf,
+ struct sde_misr_params *misr_map);
+
+ void (*collect_misr)(struct sde_hw_intf *intf,
+ struct sde_misr_params *misr_map);
+};
+
+struct sde_hw_intf {
+ /* base */
+ struct sde_hw_blk_reg_map hw;
+
+ /* intf */
+ enum sde_intf idx;
+ const struct sde_intf_cfg *cap;
+ const struct sde_mdss_cfg *mdss;
+
+ /* ops */
+ struct sde_hw_intf_ops ops;
+};
+
+/**
+ * sde_hw_intf_init(): Initializes the intf driver for the passed
+ * interface idx.
+ * @idx: interface index for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @m : pointer to mdss catalog data
+ */
+struct sde_hw_intf *sde_hw_intf_init(enum sde_intf idx,
+ void __iomem *addr,
+ struct sde_mdss_cfg *m);
+
+/**
+ * sde_hw_intf_destroy(): Destroys INTF driver context
+ * @intf: Pointer to INTF driver context
+ */
+void sde_hw_intf_destroy(struct sde_hw_intf *intf);
+
+#endif /*_SDE_HW_INTF_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_lm.c b/drivers/gpu/drm/msm/sde/sde_hw_lm.c
new file mode 100644
index 000000000000..365b9b17715d
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_lm.c
@@ -0,0 +1,207 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "sde_hw_catalog.h"
+#include "sde_hwio.h"
+#include "sde_hw_lm.h"
+#include "sde_hw_mdss.h"
+
+#define LM_OP_MODE 0x00
+#define LM_OUT_SIZE 0x04
+#define LM_BORDER_COLOR_0 0x08
+#define LM_BORDER_COLOR_1 0x010
+
+/* These register are offset to mixer base + stage base */
+#define LM_BLEND0_OP 0x00
+#define LM_BLEND0_CONST_ALPHA 0x04
+#define LM_BLEND0_FG_ALPHA 0x04
+#define LM_BLEND0_BG_ALPHA 0x08
+
+static struct sde_lm_cfg *_lm_offset(enum sde_lm mixer,
+ struct sde_mdss_cfg *m,
+ void __iomem *addr,
+ struct sde_hw_blk_reg_map *b)
+{
+ int i;
+
+ for (i = 0; i < m->mixer_count; i++) {
+ if (mixer == m->mixer[i].id) {
+ b->base_off = addr;
+ b->blk_off = m->mixer[i].base;
+ b->hwversion = m->hwversion;
+ b->log_mask = SDE_DBG_MASK_LM;
+ return &m->mixer[i];
+ }
+ }
+
+ return ERR_PTR(-ENOMEM);
+}
+
+/**
+ * _stage_offset(): returns the relative offset of the blend registers
+ * for the stage to be setup
+ * @c: mixer ctx contains the mixer to be programmed
+ * @stage: stage index to setup
+ */
+static inline int _stage_offset(struct sde_hw_mixer *ctx, enum sde_stage stage)
+{
+ const struct sde_lm_sub_blks *sblk = ctx->cap->sblk;
+ int rc;
+
+ if (stage == SDE_STAGE_BASE)
+ rc = -EINVAL;
+ else if (stage <= sblk->maxblendstages)
+ rc = sblk->blendstage_base[stage - 1];
+ else
+ rc = -EINVAL;
+
+ return rc;
+}
+
+static void sde_hw_lm_setup_out(struct sde_hw_mixer *ctx,
+ struct sde_hw_mixer_cfg *mixer)
+{
+ struct sde_hw_blk_reg_map *c = &ctx->hw;
+ u32 outsize;
+ u32 op_mode;
+
+ op_mode = SDE_REG_READ(c, LM_OP_MODE);
+
+ outsize = mixer->out_height << 16 | mixer->out_width;
+ SDE_REG_WRITE(c, LM_OUT_SIZE, outsize);
+
+ /* SPLIT_LEFT_RIGHT */
+ if (mixer->right_mixer)
+ op_mode |= BIT(31);
+ else
+ op_mode &= ~BIT(31);
+ SDE_REG_WRITE(c, LM_OP_MODE, op_mode);
+}
+
+static void sde_hw_lm_setup_border_color(struct sde_hw_mixer *ctx,
+ struct sde_mdss_color *color,
+ u8 border_en)
+{
+ struct sde_hw_blk_reg_map *c = &ctx->hw;
+
+ if (border_en) {
+ SDE_REG_WRITE(c, LM_BORDER_COLOR_0,
+ (color->color_0 & 0xFFF) |
+ ((color->color_1 & 0xFFF) << 0x10));
+ SDE_REG_WRITE(c, LM_BORDER_COLOR_1,
+ (color->color_2 & 0xFFF) |
+ ((color->color_3 & 0xFFF) << 0x10));
+ }
+}
+
+static void sde_hw_lm_setup_blend_config_msmskunk(struct sde_hw_mixer *ctx,
+ u32 stage, u32 fg_alpha, u32 bg_alpha, u32 blend_op)
+{
+ struct sde_hw_blk_reg_map *c = &ctx->hw;
+ int stage_off;
+ u32 const_alpha;
+
+ if (stage == SDE_STAGE_BASE)
+ return;
+
+ stage_off = _stage_offset(ctx, stage);
+ if (WARN_ON(stage_off < 0))
+ return;
+
+ const_alpha = (bg_alpha & 0xFF) | ((fg_alpha & 0xFF) << 16);
+ SDE_REG_WRITE(c, LM_BLEND0_CONST_ALPHA + stage_off, const_alpha);
+ SDE_REG_WRITE(c, LM_BLEND0_OP + stage_off, blend_op);
+}
+
+static void sde_hw_lm_setup_blend_config(struct sde_hw_mixer *ctx,
+ u32 stage, u32 fg_alpha, u32 bg_alpha, u32 blend_op)
+{
+ struct sde_hw_blk_reg_map *c = &ctx->hw;
+ int stage_off;
+
+ if (stage == SDE_STAGE_BASE)
+ return;
+
+ stage_off = _stage_offset(ctx, stage);
+ if (WARN_ON(stage_off < 0))
+ return;
+
+ SDE_REG_WRITE(c, LM_BLEND0_FG_ALPHA + stage_off, fg_alpha);
+ SDE_REG_WRITE(c, LM_BLEND0_BG_ALPHA + stage_off, bg_alpha);
+ SDE_REG_WRITE(c, LM_BLEND0_OP + stage_off, blend_op);
+}
+
+static void sde_hw_lm_setup_color3(struct sde_hw_mixer *ctx,
+ uint32_t mixer_op_mode)
+{
+ struct sde_hw_blk_reg_map *c = &ctx->hw;
+ int op_mode;
+
+ /* read the existing op_mode configuration */
+ op_mode = SDE_REG_READ(c, LM_OP_MODE);
+
+ op_mode = (op_mode & (BIT(31) | BIT(30))) | mixer_op_mode;
+
+ SDE_REG_WRITE(c, LM_OP_MODE, op_mode);
+}
+
+static void sde_hw_lm_gc(struct sde_hw_mixer *mixer,
+ void *cfg)
+{
+}
+
+static void _setup_mixer_ops(struct sde_mdss_cfg *m,
+ struct sde_hw_lm_ops *ops,
+ unsigned long cap)
+{
+ ops->setup_mixer_out = sde_hw_lm_setup_out;
+ if (IS_MSMSKUNK_TARGET(m->hwversion))
+ ops->setup_blend_config = sde_hw_lm_setup_blend_config_msmskunk;
+ else
+ ops->setup_blend_config = sde_hw_lm_setup_blend_config;
+ ops->setup_alpha_out = sde_hw_lm_setup_color3;
+ ops->setup_border_color = sde_hw_lm_setup_border_color;
+ ops->setup_gc = sde_hw_lm_gc;
+};
+
+struct sde_hw_mixer *sde_hw_lm_init(enum sde_lm idx,
+ void __iomem *addr,
+ struct sde_mdss_cfg *m)
+{
+ struct sde_hw_mixer *c;
+ struct sde_lm_cfg *cfg;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _lm_offset(idx, m, addr, &c->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(c);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Assign ops */
+ c->idx = idx;
+ c->cap = cfg;
+ _setup_mixer_ops(m, &c->ops, c->cap->features);
+
+ /*
+ * Perform any default initialization for the sspp blocks
+ */
+ return c;
+}
+
+void sde_hw_lm_destroy(struct sde_hw_mixer *lm)
+{
+ kfree(lm);
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_lm.h b/drivers/gpu/drm/msm/sde/sde_hw_lm.h
new file mode 100644
index 000000000000..7318c18ddaba
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_lm.h
@@ -0,0 +1,102 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HW_LM_H
+#define _SDE_HW_LM_H
+
+#include "sde_hw_mdss.h"
+#include "sde_hw_util.h"
+
+struct sde_hw_mixer;
+
+struct sde_hw_mixer_cfg {
+ u32 out_width;
+ u32 out_height;
+ bool right_mixer;
+ int flags;
+};
+
+struct sde_hw_color3_cfg {
+ u8 keep_fg[SDE_STAGE_MAX];
+};
+
+/**
+ *
+ * struct sde_hw_lm_ops : Interface to the mixer Hw driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct sde_hw_lm_ops {
+ /*
+ * Sets up mixer output width and height
+ * and border color if enabled
+ */
+ void (*setup_mixer_out)(struct sde_hw_mixer *ctx,
+ struct sde_hw_mixer_cfg *cfg);
+
+ /*
+ * Alpha blending configuration
+ * for the specified stage
+ */
+ void (*setup_blend_config)(struct sde_hw_mixer *ctx, uint32_t stage,
+ uint32_t fg_alpha, uint32_t bg_alpha, uint32_t blend_op);
+
+ /*
+ * Alpha color component selection from either fg or bg
+ */
+ void (*setup_alpha_out)(struct sde_hw_mixer *ctx, uint32_t mixer_op);
+
+ /**
+ * setup_border_color : enable/disable border color
+ */
+ void (*setup_border_color)(struct sde_hw_mixer *ctx,
+ struct sde_mdss_color *color,
+ u8 border_en);
+ /**
+ * setup_gc : enable/disable gamma correction feature
+ */
+ void (*setup_gc)(struct sde_hw_mixer *mixer,
+ void *cfg);
+
+};
+
+struct sde_hw_mixer {
+ /* base */
+ struct sde_hw_blk_reg_map hw;
+
+ /* lm */
+ enum sde_lm idx;
+ const struct sde_lm_cfg *cap;
+ const struct sde_mdp_cfg *mdp;
+ const struct sde_ctl_cfg *ctl;
+
+ /* ops */
+ struct sde_hw_lm_ops ops;
+};
+
+/**
+ * sde_hw_lm_init(): Initializes the mixer hw driver object.
+ * should be called once before accessing every mixer.
+ * @idx: mixer index for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @m : pointer to mdss catalog data
+ */
+struct sde_hw_mixer *sde_hw_lm_init(enum sde_lm idx,
+ void __iomem *addr,
+ struct sde_mdss_cfg *m);
+
+/**
+ * sde_hw_lm_destroy(): Destroys layer mixer driver context
+ * @lm: Pointer to LM driver context
+ */
+void sde_hw_lm_destroy(struct sde_hw_mixer *lm);
+
+#endif /*_SDE_HW_LM_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_mdss.h b/drivers/gpu/drm/msm/sde/sde_hw_mdss.h
new file mode 100644
index 000000000000..dcba248d27b0
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_mdss.h
@@ -0,0 +1,443 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HW_MDSS_H
+#define _SDE_HW_MDSS_H
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+
+#include "msm_drv.h"
+
+#define SDE_NONE 0
+
+#ifndef SDE_CSC_MATRIX_COEFF_SIZE
+#define SDE_CSC_MATRIX_COEFF_SIZE 9
+#endif
+
+#ifndef SDE_CSC_CLAMP_SIZE
+#define SDE_CSC_CLAMP_SIZE 6
+#endif
+
+#ifndef SDE_CSC_BIAS_SIZE
+#define SDE_CSC_BIAS_SIZE 3
+#endif
+
+#ifndef SDE_MAX_PLANES
+#define SDE_MAX_PLANES 4
+#endif
+
+#define PIPES_PER_STAGE 2
+#ifndef SDE_MAX_DE_CURVES
+#define SDE_MAX_DE_CURVES 3
+#endif
+
+#define SDE_FORMAT_FLAG_YUV (1 << 0)
+#define SDE_FORMAT_FLAG_DX (1 << 1)
+
+#define SDE_FORMAT_IS_YUV(X) ((X)->flag & SDE_FORMAT_FLAG_YUV)
+#define SDE_FORMAT_IS_DX(X) ((X)->flag & SDE_FORMAT_FLAG_DX)
+#define SDE_FORMAT_IS_LINEAR(X) ((X)->fetch_mode == SDE_FETCH_LINEAR)
+#define SDE_FORMAT_IS_UBWC(X) ((X)->fetch_mode == SDE_FETCH_UBWC)
+
+#define SDE_BLEND_FG_ALPHA_FG_CONST (0 << 0)
+#define SDE_BLEND_FG_ALPHA_BG_CONST (1 << 0)
+#define SDE_BLEND_FG_ALPHA_FG_PIXEL (2 << 0)
+#define SDE_BLEND_FG_ALPHA_BG_PIXEL (3 << 0)
+#define SDE_BLEND_FG_INV_ALPHA (1 << 2)
+#define SDE_BLEND_FG_MOD_ALPHA (1 << 3)
+#define SDE_BLEND_FG_INV_MOD_ALPHA (1 << 4)
+#define SDE_BLEND_FG_TRANSP_EN (1 << 5)
+#define SDE_BLEND_BG_ALPHA_FG_CONST (0 << 8)
+#define SDE_BLEND_BG_ALPHA_BG_CONST (1 << 8)
+#define SDE_BLEND_BG_ALPHA_FG_PIXEL (2 << 8)
+#define SDE_BLEND_BG_ALPHA_BG_PIXEL (3 << 8)
+#define SDE_BLEND_BG_INV_ALPHA (1 << 10)
+#define SDE_BLEND_BG_MOD_ALPHA (1 << 11)
+#define SDE_BLEND_BG_INV_MOD_ALPHA (1 << 12)
+#define SDE_BLEND_BG_TRANSP_EN (1 << 13)
+
+enum sde_hw_blk_type {
+ SDE_HW_BLK_TOP = 0,
+ SDE_HW_BLK_SSPP,
+ SDE_HW_BLK_LM,
+ SDE_HW_BLK_DSPP,
+ SDE_HW_BLK_CTL,
+ SDE_HW_BLK_CDM,
+ SDE_HW_BLK_PINGPONG,
+ SDE_HW_BLK_INTF,
+ SDE_HW_BLK_WB,
+ SDE_HW_BLK_MAX,
+};
+
+enum sde_mdp {
+ MDP_TOP = 0x1,
+ MDP_MAX,
+};
+
+enum sde_sspp {
+ SSPP_NONE,
+ SSPP_VIG0,
+ SSPP_VIG1,
+ SSPP_VIG2,
+ SSPP_VIG3,
+ SSPP_RGB0,
+ SSPP_RGB1,
+ SSPP_RGB2,
+ SSPP_RGB3,
+ SSPP_DMA0,
+ SSPP_DMA1,
+ SSPP_DMA2,
+ SSPP_DMA3,
+ SSPP_CURSOR0,
+ SSPP_CURSOR1,
+ SSPP_MAX
+};
+
+enum sde_sspp_type {
+ SSPP_TYPE_VIG,
+ SSPP_TYPE_RGB,
+ SSPP_TYPE_DMA,
+ SSPP_TYPE_CURSOR,
+ SSPP_TYPE_MAX
+};
+
+enum sde_lm {
+ LM_0 = 1,
+ LM_1,
+ LM_2,
+ LM_3,
+ LM_4,
+ LM_5,
+ LM_6,
+ LM_MAX
+};
+
+enum sde_stage {
+ SDE_STAGE_BASE = 0,
+ SDE_STAGE_0,
+ SDE_STAGE_1,
+ SDE_STAGE_2,
+ SDE_STAGE_3,
+ SDE_STAGE_4,
+ SDE_STAGE_5,
+ SDE_STAGE_6,
+ SDE_STAGE_MAX
+};
+enum sde_dspp {
+ DSPP_0 = 1,
+ DSPP_1,
+ DSPP_2,
+ DSPP_3,
+ DSPP_MAX
+};
+
+enum sde_ctl {
+ CTL_0 = 1,
+ CTL_1,
+ CTL_2,
+ CTL_3,
+ CTL_4,
+ CTL_MAX
+};
+
+enum sde_cdm {
+ CDM_0 = 1,
+ CDM_1,
+ CDM_MAX
+};
+
+enum sde_pingpong {
+ PINGPONG_0 = 1,
+ PINGPONG_1,
+ PINGPONG_2,
+ PINGPONG_3,
+ PINGPONG_4,
+ PINGPONG_S0,
+ PINGPONG_MAX
+};
+
+enum sde_intf {
+ INTF_0 = 1,
+ INTF_1,
+ INTF_2,
+ INTF_3,
+ INTF_4,
+ INTF_5,
+ INTF_6,
+ INTF_MAX
+};
+
+enum sde_intf_type {
+ INTF_NONE = 0x0,
+ INTF_DSI = 0x1,
+ INTF_HDMI = 0x3,
+ INTF_LCDC = 0x5,
+ INTF_EDP = 0x9,
+ INTF_DP = 0xa,
+ INTF_TYPE_MAX,
+
+ /* virtual interfaces */
+ INTF_WB = 0x100,
+};
+
+enum sde_intf_mode {
+ INTF_MODE_NONE = 0,
+ INTF_MODE_CMD,
+ INTF_MODE_VIDEO,
+ INTF_MODE_WB_BLOCK,
+ INTF_MODE_WB_LINE,
+ INTF_MODE_MAX
+};
+
+enum sde_wb {
+ WB_0 = 1,
+ WB_1,
+ WB_2,
+ WB_3,
+ WB_MAX
+};
+
+enum sde_ad {
+ AD_0 = 0x1,
+ AD_1,
+ AD_MAX
+};
+
+enum sde_cwb {
+ CWB_0 = 0x1,
+ CWB_1,
+ CWB_2,
+ CWB_3,
+ CWB_MAX
+};
+
+enum sde_wd_timer {
+ WD_TIMER_0 = 0x1,
+ WD_TIMER_1,
+ WD_TIMER_2,
+ WD_TIMER_3,
+ WD_TIMER_4,
+ WD_TIMER_5,
+ WD_TIMER_MAX
+};
+
+enum sde_vbif {
+ VBIF_0,
+ VBIF_1,
+ VBIF_MAX,
+ VBIF_RT = VBIF_0,
+ VBIF_NRT = VBIF_1
+};
+
+enum sde_iommu_domain {
+ SDE_IOMMU_DOMAIN_UNSECURE,
+ SDE_IOMMU_DOMAIN_SECURE,
+ SDE_IOMMU_DOMAIN_MAX
+};
+
+/**
+ * SDE HW,Component order color map
+ */
+enum {
+ C0_G_Y = 0,
+ C1_B_Cb = 1,
+ C2_R_Cr = 2,
+ C3_ALPHA = 3
+};
+
+/**
+ * enum sde_plane_type - defines how the color component pixel packing
+ * @SDE_PLANE_INTERLEAVED : Color components in single plane
+ * @SDE_PLANE_PLANAR : Color component in separate planes
+ * @SDE_PLANE_PSEUDO_PLANAR : Chroma components interleaved in separate plane
+ */
+enum sde_plane_type {
+ SDE_PLANE_INTERLEAVED,
+ SDE_PLANE_PLANAR,
+ SDE_PLANE_PSEUDO_PLANAR,
+};
+
+/**
+ * enum sde_chroma_samp_type - chroma sub-samplng type
+ * @SDE_CHROMA_RGB : No chroma subsampling
+ * @SDE_CHROMA_H2V1 : Chroma pixels are horizontally subsampled
+ * @SDE_CHROMA_H1V2 : Chroma pixels are vertically subsampled
+ * @SDE_CHROMA_420 : 420 subsampling
+ */
+enum sde_chroma_samp_type {
+ SDE_CHROMA_RGB,
+ SDE_CHROMA_H2V1,
+ SDE_CHROMA_H1V2,
+ SDE_CHROMA_420
+};
+
+/**
+ * sde_fetch_type - Defines How SDE HW fetches data
+ * @SDE_FETCH_LINEAR : fetch is line by line
+ * @SDE_FETCH_TILE : fetches data in Z order from a tile
+ * @SDE_FETCH_UBWC : fetch and decompress data
+ */
+enum sde_fetch_type {
+ SDE_FETCH_LINEAR,
+ SDE_FETCH_TILE,
+ SDE_FETCH_UBWC
+};
+
+/**
+ * Value of enum chosen to fit the number of bits
+ * expected by the HW programming.
+ */
+enum {
+ COLOR_ALPHA_1BIT = 0,
+ COLOR_ALPHA_4BIT = 1,
+ COLOR_4BIT = 0,
+ COLOR_5BIT = 1, /* No 5-bit Alpha */
+ COLOR_6BIT = 2, /* 6-Bit Alpha also = 2 */
+ COLOR_8BIT = 3, /* 8-Bit Alpha also = 3 */
+};
+
+/**
+ * enum sde_3d_blend_mode
+ * Desribes how the 3d data is blended
+ * @BLEND_3D_NONE : 3d blending not enabled
+ * @BLEND_3D_FRAME_INT : Frame interleaving
+ * @BLEND_3D_H_ROW_INT : Horizontal row interleaving
+ * @BLEND_3D_V_ROW_INT : vertical row interleaving
+ * @BLEND_3D_COL_INT : column interleaving
+ * @BLEND_3D_MAX :
+ */
+enum sde_3d_blend_mode {
+ BLEND_3D_NONE = 0,
+ BLEND_3D_FRAME_INT,
+ BLEND_3D_H_ROW_INT,
+ BLEND_3D_V_ROW_INT,
+ BLEND_3D_COL_INT,
+ BLEND_3D_MAX
+};
+
+/** struct sde_format - defines the format configuration which
+ * allows SDE HW to correctly fetch and decode the format
+ * @base: base msm_format struture containing fourcc code
+ * @fetch_planes: how the color components are packed in pixel format
+ * @element: element color ordering
+ * @bits: element bit widths
+ * @chroma_sample: chroma sub-samplng type
+ * @unpack_align_msb: unpack aligned, 0 to LSB, 1 to MSB
+ * @unpack_tight: 0 for loose, 1 for tight
+ * @unpack_count: 0 = 1 component, 1 = 2 component
+ * @bpp: bytes per pixel
+ * @alpha_enable: whether the format has an alpha channel
+ * @num_planes: number of planes (including meta data planes)
+ * @fetch_mode: linear, tiled, or ubwc hw fetch behavior
+ * @is_yuv: is format a yuv variant
+ * @flag: usage bit flags
+ * @tile_width: format tile width
+ * @tile_height: format tile height
+ */
+struct sde_format {
+ struct msm_format base;
+ enum sde_plane_type fetch_planes;
+ u8 element[SDE_MAX_PLANES];
+ u8 bits[SDE_MAX_PLANES];
+ enum sde_chroma_samp_type chroma_sample;
+ u8 unpack_align_msb;
+ u8 unpack_tight;
+ u8 unpack_count;
+ u8 bpp;
+ u8 alpha_enable;
+ u8 num_planes;
+ enum sde_fetch_type fetch_mode;
+ u32 flag;
+ u16 tile_width;
+ u16 tile_height;
+};
+#define to_sde_format(x) container_of(x, struct sde_format, base)
+
+/**
+ * struct sde_hw_fmt_layout - format information of the source pixel data
+ * @format: pixel format parameters
+ * @num_planes: number of planes (including meta data planes)
+ * @width: image width
+ * @height: image height
+ * @total_size: total size in bytes
+ * @plane_addr: address of each plane
+ * @plane_size: length of each plane
+ * @plane_pitch: pitch of each plane
+ */
+struct sde_hw_fmt_layout {
+ const struct sde_format *format;
+ uint32_t num_planes;
+ uint32_t width;
+ uint32_t height;
+ uint32_t total_size;
+ uint32_t plane_addr[SDE_MAX_PLANES];
+ uint32_t plane_size[SDE_MAX_PLANES];
+ uint32_t plane_pitch[SDE_MAX_PLANES];
+};
+
+struct sde_rect {
+ u16 x;
+ u16 y;
+ u16 w;
+ u16 h;
+};
+
+struct sde_csc_cfg {
+ /* matrix coefficients in S15.16 format */
+ uint32_t csc_mv[SDE_CSC_MATRIX_COEFF_SIZE];
+ uint32_t csc_pre_bv[SDE_CSC_BIAS_SIZE];
+ uint32_t csc_post_bv[SDE_CSC_BIAS_SIZE];
+ uint32_t csc_pre_lv[SDE_CSC_CLAMP_SIZE];
+ uint32_t csc_post_lv[SDE_CSC_CLAMP_SIZE];
+};
+
+/**
+ * struct sde_mdss_color - mdss color description
+ * color 0 : green
+ * color 1 : blue
+ * color 2 : red
+ * color 3 : alpha
+ */
+struct sde_mdss_color {
+ u32 color_0;
+ u32 color_1;
+ u32 color_2;
+ u32 color_3;
+};
+
+/*
+ * Define bit masks for h/w logging.
+ */
+#define SDE_DBG_MASK_NONE (1 << 0)
+#define SDE_DBG_MASK_CDM (1 << 1)
+#define SDE_DBG_MASK_DSPP (1 << 2)
+#define SDE_DBG_MASK_INTF (1 << 3)
+#define SDE_DBG_MASK_LM (1 << 4)
+#define SDE_DBG_MASK_CTL (1 << 5)
+#define SDE_DBG_MASK_PINGPONG (1 << 6)
+#define SDE_DBG_MASK_SSPP (1 << 7)
+#define SDE_DBG_MASK_WB (1 << 8)
+#define SDE_DBG_MASK_TOP (1 << 9)
+#define SDE_DBG_MASK_VBIF (1 << 10)
+
+/**
+ * struct sde_hw_cp_cfg: hardware dspp/lm feature payload.
+ * @payload: Feature specific payload.
+ * @len: Length of the payload.
+ */
+struct sde_hw_cp_cfg {
+ void *payload;
+ u32 len;
+};
+
+#endif /* _SDE_HW_MDSS_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c
new file mode 100644
index 000000000000..837edeeba4c6
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c
@@ -0,0 +1,168 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "sde_hw_mdss.h"
+#include "sde_hwio.h"
+#include "sde_hw_catalog.h"
+#include "sde_hw_pingpong.h"
+
+#define PP_TEAR_CHECK_EN 0x000
+#define PP_SYNC_CONFIG_VSYNC 0x004
+#define PP_SYNC_CONFIG_HEIGHT 0x008
+#define PP_SYNC_WRCOUNT 0x00C
+#define PP_VSYNC_INIT_VAL 0x010
+#define PP_INT_COUNT_VAL 0x014
+#define PP_SYNC_THRESH 0x018
+#define PP_START_POS 0x01C
+#define PP_RD_PTR_IRQ 0x020
+#define PP_WR_PTR_IRQ 0x024
+#define PP_OUT_LINE_COUNT 0x028
+#define PP_LINE_COUNT 0x02C
+#define PP_AUTOREFRESH_CONFIG 0x030
+
+#define PP_FBC_MODE 0x034
+#define PP_FBC_BUDGET_CTL 0x038
+#define PP_FBC_LOSSY_MODE 0x03C
+#define PP_DSC_MODE 0x0a0
+#define PP_DCE_DATA_IN_SWAP 0x0ac
+#define PP_DCE_DATA_OUT_SWAP 0x0c8
+
+static struct sde_pingpong_cfg *_pingpong_offset(enum sde_pingpong pp,
+ struct sde_mdss_cfg *m,
+ void __iomem *addr,
+ struct sde_hw_blk_reg_map *b)
+{
+ int i;
+
+ for (i = 0; i < m->pingpong_count; i++) {
+ if (pp == m->pingpong[i].id) {
+ b->base_off = addr;
+ b->blk_off = m->pingpong[i].base;
+ b->hwversion = m->hwversion;
+ b->log_mask = SDE_DBG_MASK_PINGPONG;
+ return &m->pingpong[i];
+ }
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static int sde_hw_pp_setup_te_config(struct sde_hw_pingpong *pp,
+ struct sde_hw_tear_check *te)
+{
+ struct sde_hw_blk_reg_map *c = &pp->hw;
+ int cfg;
+
+ cfg = BIT(19); /*VSYNC_COUNTER_EN */
+ if (te->hw_vsync_mode)
+ cfg |= BIT(20);
+
+ cfg |= te->vsync_count;
+
+ SDE_REG_WRITE(c, PP_SYNC_CONFIG_VSYNC, cfg);
+ SDE_REG_WRITE(c, PP_SYNC_CONFIG_HEIGHT, te->sync_cfg_height);
+ SDE_REG_WRITE(c, PP_VSYNC_INIT_VAL, te->vsync_init_val);
+ SDE_REG_WRITE(c, PP_RD_PTR_IRQ, te->rd_ptr_irq);
+ SDE_REG_WRITE(c, PP_START_POS, te->start_pos);
+ SDE_REG_WRITE(c, PP_SYNC_THRESH,
+ ((te->sync_threshold_continue << 16) |
+ te->sync_threshold_start));
+ SDE_REG_WRITE(c, PP_SYNC_WRCOUNT,
+ (te->start_pos + te->sync_threshold_start + 1));
+
+ return 0;
+}
+
+int sde_hw_pp_setup_autorefresh_config(struct sde_hw_pingpong *pp,
+ struct sde_hw_autorefresh *cfg)
+{
+ struct sde_hw_blk_reg_map *c = &pp->hw;
+ u32 refresh_cfg;
+
+ if (cfg->enable)
+ refresh_cfg = BIT(31) | cfg->frame_count;
+ else
+ refresh_cfg = 0;
+
+ SDE_REG_WRITE(c, PP_AUTOREFRESH_CONFIG,
+ refresh_cfg);
+
+ return 0;
+}
+
+int sde_hw_pp_setup_dsc_compression(struct sde_hw_pingpong *pp,
+ struct sde_hw_dsc_cfg *cfg)
+{
+ return 0;
+}
+int sde_hw_pp_enable_te(struct sde_hw_pingpong *pp, bool enable)
+{
+ struct sde_hw_blk_reg_map *c = &pp->hw;
+
+ SDE_REG_WRITE(c, PP_TEAR_CHECK_EN, enable);
+ return 0;
+}
+
+int sde_hw_pp_get_vsync_info(struct sde_hw_pingpong *pp,
+ struct sde_hw_pp_vsync_info *info)
+{
+ struct sde_hw_blk_reg_map *c = &pp->hw;
+ u32 val;
+
+ val = SDE_REG_READ(c, PP_VSYNC_INIT_VAL);
+ info->init_val = val & 0xffff;
+
+ val = SDE_REG_READ(c, PP_INT_COUNT_VAL);
+ info->vsync_count = (val & 0xffff0000) >> 16;
+ info->line_count = val & 0xffff;
+
+ return 0;
+}
+
+static void _setup_pingpong_ops(struct sde_hw_pingpong_ops *ops,
+ unsigned long cap)
+{
+ ops->setup_tearcheck = sde_hw_pp_setup_te_config;
+ ops->enable_tearcheck = sde_hw_pp_enable_te;
+ ops->get_vsync_info = sde_hw_pp_get_vsync_info;
+ ops->setup_autorefresh = sde_hw_pp_setup_autorefresh_config;
+ ops->setup_dsc = sde_hw_pp_setup_dsc_compression;
+};
+
+struct sde_hw_pingpong *sde_hw_pingpong_init(enum sde_pingpong idx,
+ void __iomem *addr,
+ struct sde_mdss_cfg *m)
+{
+ struct sde_hw_pingpong *c;
+ struct sde_pingpong_cfg *cfg;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _pingpong_offset(idx, m, addr, &c->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(c);
+ return ERR_PTR(-EINVAL);
+ }
+
+ c->idx = idx;
+ c->pingpong_hw_cap = cfg;
+ _setup_pingpong_ops(&c->ops, c->pingpong_hw_cap->features);
+
+ return c;
+}
+
+void sde_hw_pingpong_destroy(struct sde_hw_pingpong *pp)
+{
+ kfree(pp);
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_pingpong.h b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.h
new file mode 100644
index 000000000000..fc3bea54b485
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.h
@@ -0,0 +1,123 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HW_PINGPONG_H
+#define _SDE_HW_PINGPONG_H
+
+struct sde_hw_pingpong;
+
+struct sde_hw_tear_check {
+ /*
+ * This is ratio of MDP VSYNC clk freq(Hz) to
+ * refresh rate divided by no of lines
+ */
+ u32 vsync_count;
+ u32 sync_cfg_height;
+ u32 vsync_init_val;
+ u32 sync_threshold_start;
+ u32 sync_threshold_continue;
+ u32 start_pos;
+ u32 rd_ptr_irq;
+ u8 hw_vsync_mode;
+};
+
+struct sde_hw_autorefresh {
+ bool enable;
+ u32 frame_count;
+};
+
+struct sde_hw_pp_vsync_info {
+ u32 init_val; /* value of rd pointer at vsync edge */
+ u32 vsync_count; /* mdp clocks to complete one line */
+ u32 line_count; /* current line count */
+};
+
+struct sde_hw_dsc_cfg {
+ u8 enable;
+};
+
+/**
+ *
+ * struct sde_hw_pingpong_ops : Interface to the pingpong Hw driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ * @setup_tearcheck :
+ * @enable_tearcheck :
+ * @get_vsync_info :
+ * @setup_autorefresh :
+ * #setup_dsc :
+ */
+struct sde_hw_pingpong_ops {
+ /**
+ * enables vysnc generation and sets up init value of
+ * read pointer and programs the tear check cofiguration
+ */
+ int (*setup_tearcheck)(struct sde_hw_pingpong *pp,
+ struct sde_hw_tear_check *cfg);
+
+ /**
+ * enables tear check block
+ */
+ int (*enable_tearcheck)(struct sde_hw_pingpong *pp,
+ bool enable);
+
+ /**
+ * provides the programmed and current
+ * line_count
+ */
+ int (*get_vsync_info)(struct sde_hw_pingpong *pp,
+ struct sde_hw_pp_vsync_info *info);
+
+ /**
+ * configure and enable the autorefresh config
+ */
+ int (*setup_autorefresh)(struct sde_hw_pingpong *pp,
+ struct sde_hw_autorefresh *cfg);
+
+ /**
+ * Program the dsc compression block
+ */
+ int (*setup_dsc)(struct sde_hw_pingpong *pp,
+ struct sde_hw_dsc_cfg *cfg);
+};
+
+struct sde_hw_pingpong {
+ /* base */
+ struct sde_hw_blk_reg_map hw;
+
+ /* pingpong */
+ enum sde_pingpong idx;
+ const struct sde_pingpong_cfg *pingpong_hw_cap;
+
+ /* ops */
+ struct sde_hw_pingpong_ops ops;
+};
+
+/**
+ * sde_hw_pingpong_init - initializes the pingpong driver for the passed
+ * pingpong idx.
+ * @idx: Pingpong index for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * @m: Pointer to mdss catalog data
+ * Returns: Error code or allocated sde_hw_pingpong context
+ */
+struct sde_hw_pingpong *sde_hw_pingpong_init(enum sde_pingpong idx,
+ void __iomem *addr,
+ struct sde_mdss_cfg *m);
+
+/**
+ * sde_hw_pingpong_destroy - destroys pingpong driver context
+ * should be called to free the context
+ * @pp: Pointer to PP driver context returned by sde_hw_pingpong_init
+ */
+void sde_hw_pingpong_destroy(struct sde_hw_pingpong *pp);
+
+#endif /*_SDE_HW_PINGPONG_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
new file mode 100644
index 000000000000..882a1c84e9a2
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
@@ -0,0 +1,943 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "sde_hwio.h"
+#include "sde_hw_catalog.h"
+#include "sde_hw_lm.h"
+#include "sde_hw_sspp.h"
+#include "sde_hw_color_processing.h"
+
+#define SDE_FETCH_CONFIG_RESET_VALUE 0x00000087
+
+/* SDE_SSPP_SRC */
+#define SSPP_SRC_SIZE 0x00
+#define SSPP_SRC_XY 0x08
+#define SSPP_OUT_SIZE 0x0c
+#define SSPP_OUT_XY 0x10
+#define SSPP_SRC0_ADDR 0x14
+#define SSPP_SRC1_ADDR 0x18
+#define SSPP_SRC2_ADDR 0x1C
+#define SSPP_SRC3_ADDR 0x20
+#define SSPP_SRC_YSTRIDE0 0x24
+#define SSPP_SRC_YSTRIDE1 0x28
+#define SSPP_SRC_FORMAT 0x30
+#define SSPP_SRC_UNPACK_PATTERN 0x34
+#define SSPP_SRC_OP_MODE 0x38
+#define MDSS_MDP_OP_DEINTERLACE BIT(22)
+
+#define MDSS_MDP_OP_DEINTERLACE_ODD BIT(23)
+#define MDSS_MDP_OP_IGC_ROM_1 BIT(18)
+#define MDSS_MDP_OP_IGC_ROM_0 BIT(17)
+#define MDSS_MDP_OP_IGC_EN BIT(16)
+#define MDSS_MDP_OP_FLIP_UD BIT(14)
+#define MDSS_MDP_OP_FLIP_LR BIT(13)
+#define MDSS_MDP_OP_BWC_EN BIT(0)
+#define MDSS_MDP_OP_PE_OVERRIDE BIT(31)
+#define MDSS_MDP_OP_BWC_LOSSLESS (0 << 1)
+#define MDSS_MDP_OP_BWC_Q_HIGH (1 << 1)
+#define MDSS_MDP_OP_BWC_Q_MED (2 << 1)
+
+#define SSPP_SRC_CONSTANT_COLOR 0x3c
+#define SSPP_FETCH_CONFIG 0x048
+#define SSPP_DANGER_LUT 0x60
+#define SSPP_SAFE_LUT 0x64
+#define SSPP_CREQ_LUT 0x68
+#define SSPP_QOS_CTRL 0x6C
+#define SSPP_DECIMATION_CONFIG 0xB4
+#define SSPP_SRC_ADDR_SW_STATUS 0x70
+#define SSPP_SW_PIX_EXT_C0_LR 0x100
+#define SSPP_SW_PIX_EXT_C0_TB 0x104
+#define SSPP_SW_PIX_EXT_C0_REQ_PIXELS 0x108
+#define SSPP_SW_PIX_EXT_C1C2_LR 0x110
+#define SSPP_SW_PIX_EXT_C1C2_TB 0x114
+#define SSPP_SW_PIX_EXT_C1C2_REQ_PIXELS 0x118
+#define SSPP_SW_PIX_EXT_C3_LR 0x120
+#define SSPP_SW_PIX_EXT_C3_TB 0x124
+#define SSPP_SW_PIX_EXT_C3_REQ_PIXELS 0x128
+#define SSPP_UBWC_ERROR_STATUS 0x138
+#define SSPP_VIG_OP_MODE 0x0
+#define SSPP_VIG_CSC_10_OP_MODE 0x0
+
+/* SSPP_QOS_CTRL */
+#define SSPP_QOS_CTRL_VBLANK_EN BIT(16)
+#define SSPP_QOS_CTRL_DANGER_SAFE_EN BIT(0)
+#define SSPP_QOS_CTRL_DANGER_VBLANK_MASK 0x3
+#define SSPP_QOS_CTRL_DANGER_VBLANK_OFF 4
+#define SSPP_QOS_CTRL_CREQ_VBLANK_MASK 0x3
+#define SSPP_QOS_CTRL_CREQ_VBLANK_OFF 20
+
+/* SDE_SSPP_SCALER_QSEED2 */
+#define SCALE_CONFIG 0x04
+#define COMP0_3_PHASE_STEP_X 0x10
+#define COMP0_3_PHASE_STEP_Y 0x14
+#define COMP1_2_PHASE_STEP_X 0x18
+#define COMP1_2_PHASE_STEP_Y 0x1c
+#define COMP0_3_INIT_PHASE_X 0x20
+#define COMP0_3_INIT_PHASE_Y 0x24
+#define COMP1_2_INIT_PHASE_X 0x28
+#define COMP1_2_INIT_PHASE_Y 0x2C
+#define VIG_0_QSEED2_SHARP 0x30
+
+/* SDE_SSPP_SCALER_QSEED3 */
+#define QSEED3_HW_VERSION 0x00
+#define QSEED3_OP_MODE 0x04
+#define QSEED3_RGB2Y_COEFF 0x08
+#define QSEED3_PHASE_INIT 0x0C
+#define QSEED3_PHASE_STEP_Y_H 0x10
+#define QSEED3_PHASE_STEP_Y_V 0x14
+#define QSEED3_PHASE_STEP_UV_H 0x18
+#define QSEED3_PHASE_STEP_UV_V 0x1C
+#define QSEED3_PRELOAD 0x20
+#define QSEED3_DE_SHARPEN 0x24
+#define QSEED3_DE_SHARPEN_CTL 0x28
+#define QSEED3_DE_SHAPE_CTL 0x2C
+#define QSEED3_DE_THRESHOLD 0x30
+#define QSEED3_DE_ADJUST_DATA_0 0x34
+#define QSEED3_DE_ADJUST_DATA_1 0x38
+#define QSEED3_DE_ADJUST_DATA_2 0x3C
+#define QSEED3_SRC_SIZE_Y_RGB_A 0x40
+#define QSEED3_SRC_SIZE_UV 0x44
+#define QSEED3_DST_SIZE 0x48
+#define QSEED3_COEF_LUT_CTRL 0x4C
+#define QSEED3_COEF_LUT_SWAP_BIT 0
+#define QSEED3_COEF_LUT_DIR_BIT 1
+#define QSEED3_COEF_LUT_Y_CIR_BIT 2
+#define QSEED3_COEF_LUT_UV_CIR_BIT 3
+#define QSEED3_COEF_LUT_Y_SEP_BIT 4
+#define QSEED3_COEF_LUT_UV_SEP_BIT 5
+#define QSEED3_BUFFER_CTRL 0x50
+#define QSEED3_CLK_CTRL0 0x54
+#define QSEED3_CLK_CTRL1 0x58
+#define QSEED3_CLK_STATUS 0x5C
+#define QSEED3_MISR_CTRL 0x70
+#define QSEED3_MISR_SIGNATURE_0 0x74
+#define QSEED3_MISR_SIGNATURE_1 0x78
+#define QSEED3_PHASE_INIT_Y_H 0x90
+#define QSEED3_PHASE_INIT_Y_V 0x94
+#define QSEED3_PHASE_INIT_UV_H 0x98
+#define QSEED3_PHASE_INIT_UV_V 0x9C
+#define QSEED3_COEF_LUT 0x100
+#define QSEED3_FILTERS 5
+#define QSEED3_LUT_REGIONS 4
+#define QSEED3_CIRCULAR_LUTS 9
+#define QSEED3_SEPARABLE_LUTS 10
+#define QSEED3_LUT_SIZE 60
+#define QSEED3_ENABLE 2
+#define QSEED3_DIR_LUT_SIZE (200 * sizeof(u32))
+#define QSEED3_CIR_LUT_SIZE \
+ (QSEED3_LUT_SIZE * QSEED3_CIRCULAR_LUTS * sizeof(u32))
+#define QSEED3_SEP_LUT_SIZE \
+ (QSEED3_LUT_SIZE * QSEED3_SEPARABLE_LUTS * sizeof(u32))
+
+/*
+ * Definitions for ViG op modes
+ */
+#define VIG_OP_CSC_DST_DATAFMT BIT(19)
+#define VIG_OP_CSC_SRC_DATAFMT BIT(18)
+#define VIG_OP_CSC_EN BIT(17)
+#define VIG_OP_MEM_PROT_CONT BIT(15)
+#define VIG_OP_MEM_PROT_VAL BIT(14)
+#define VIG_OP_MEM_PROT_SAT BIT(13)
+#define VIG_OP_MEM_PROT_HUE BIT(12)
+#define VIG_OP_HIST BIT(8)
+#define VIG_OP_SKY_COL BIT(7)
+#define VIG_OP_FOIL BIT(6)
+#define VIG_OP_SKIN_COL BIT(5)
+#define VIG_OP_PA_EN BIT(4)
+#define VIG_OP_PA_SAT_ZERO_EXP BIT(2)
+#define VIG_OP_MEM_PROT_BLEND BIT(1)
+
+/*
+ * Definitions for CSC 10 op modes
+ */
+#define VIG_CSC_10_SRC_DATAFMT BIT(1)
+#define VIG_CSC_10_EN BIT(0)
+#define CSC_10BIT_OFFSET 4
+
+static inline int _sspp_subblk_offset(struct sde_hw_pipe *ctx,
+ int s_id,
+ u32 *idx)
+{
+ int rc = 0;
+ const struct sde_sspp_sub_blks *sblk = ctx->cap->sblk;
+
+ if (!ctx)
+ return -EINVAL;
+
+ switch (s_id) {
+ case SDE_SSPP_SRC:
+ *idx = sblk->src_blk.base;
+ break;
+ case SDE_SSPP_SCALER_QSEED2:
+ case SDE_SSPP_SCALER_QSEED3:
+ case SDE_SSPP_SCALER_RGB:
+ *idx = sblk->scaler_blk.base;
+ break;
+ case SDE_SSPP_CSC:
+ case SDE_SSPP_CSC_10BIT:
+ *idx = sblk->csc_blk.base;
+ break;
+ case SDE_SSPP_HSIC:
+ *idx = sblk->hsic_blk.base;
+ break;
+ case SDE_SSPP_PCC:
+ *idx = sblk->pcc_blk.base;
+ break;
+ case SDE_SSPP_MEMCOLOR:
+ *idx = sblk->memcolor_blk.base;
+ break;
+ default:
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+static void _sspp_setup_opmode(struct sde_hw_pipe *ctx,
+ u32 mask, u8 en)
+{
+ u32 idx;
+ u32 opmode;
+
+ if (!test_bit(SDE_SSPP_SCALER_QSEED2, &ctx->cap->features) ||
+ _sspp_subblk_offset(ctx, SDE_SSPP_SCALER_QSEED2, &idx) ||
+ !test_bit(SDE_SSPP_CSC, &ctx->cap->features))
+ return;
+
+ opmode = SDE_REG_READ(&ctx->hw, SSPP_VIG_OP_MODE + idx);
+
+ if (en)
+ opmode |= mask;
+ else
+ opmode &= ~mask;
+
+ SDE_REG_WRITE(&ctx->hw, SSPP_VIG_OP_MODE + idx, opmode);
+}
+
+static void _sspp_setup_csc10_opmode(struct sde_hw_pipe *ctx,
+ u32 mask, u8 en)
+{
+ u32 idx;
+ u32 opmode;
+
+ if (_sspp_subblk_offset(ctx, SDE_SSPP_CSC_10BIT, &idx))
+ return;
+
+ opmode = SDE_REG_READ(&ctx->hw, SSPP_VIG_CSC_10_OP_MODE + idx);
+ if (en)
+ opmode |= mask;
+ else
+ opmode &= ~mask;
+
+ SDE_REG_WRITE(&ctx->hw, SSPP_VIG_CSC_10_OP_MODE + idx, opmode);
+}
+
+/**
+ * Setup source pixel format, flip,
+ */
+static void sde_hw_sspp_setup_format(struct sde_hw_pipe *ctx,
+ const struct sde_format *fmt, u32 flags)
+{
+ struct sde_hw_blk_reg_map *c;
+ u32 chroma_samp, unpack, src_format;
+ u32 secure = 0;
+ u32 opmode = 0;
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx) || !fmt)
+ return;
+
+ c = &ctx->hw;
+ opmode = SDE_REG_READ(c, SSPP_SRC_OP_MODE + idx);
+ opmode &= ~(MDSS_MDP_OP_FLIP_LR | MDSS_MDP_OP_FLIP_UD |
+ MDSS_MDP_OP_BWC_EN | MDSS_MDP_OP_PE_OVERRIDE);
+
+ if (flags & SDE_SSPP_SECURE_OVERLAY_SESSION)
+ secure = 0xF;
+
+ if (flags & SDE_SSPP_FLIP_LR)
+ opmode |= MDSS_MDP_OP_FLIP_LR;
+ if (flags & SDE_SSPP_FLIP_UD)
+ opmode |= MDSS_MDP_OP_FLIP_UD;
+
+ chroma_samp = fmt->chroma_sample;
+ if (flags & SDE_SSPP_SOURCE_ROTATED_90) {
+ if (chroma_samp == SDE_CHROMA_H2V1)
+ chroma_samp = SDE_CHROMA_H1V2;
+ else if (chroma_samp == SDE_CHROMA_H1V2)
+ chroma_samp = SDE_CHROMA_H2V1;
+ }
+
+ src_format = (chroma_samp << 23) | (fmt->fetch_planes << 19) |
+ (fmt->bits[C3_ALPHA] << 6) | (fmt->bits[C2_R_Cr] << 4) |
+ (fmt->bits[C1_B_Cb] << 2) | (fmt->bits[C0_G_Y] << 0);
+
+ if (flags & SDE_SSPP_ROT_90)
+ src_format |= BIT(11); /* ROT90 */
+
+ if (fmt->alpha_enable && fmt->fetch_planes == SDE_PLANE_INTERLEAVED)
+ src_format |= BIT(8); /* SRCC3_EN */
+
+ if (flags & SDE_SSPP_SOLID_FILL)
+ src_format |= BIT(22);
+
+ unpack = (fmt->element[3] << 24) | (fmt->element[2] << 16) |
+ (fmt->element[1] << 8) | (fmt->element[0] << 0);
+ src_format |= ((fmt->unpack_count - 1) << 12) |
+ (fmt->unpack_tight << 17) |
+ (fmt->unpack_align_msb << 18) |
+ ((fmt->bpp - 1) << 9);
+
+ if (fmt->fetch_mode != SDE_FETCH_LINEAR) {
+ if (SDE_FORMAT_IS_UBWC(fmt))
+ opmode |= MDSS_MDP_OP_BWC_EN;
+ src_format |= (fmt->fetch_mode & 3) << 30; /*FRAME_FORMAT */
+ SDE_REG_WRITE(c, SSPP_FETCH_CONFIG,
+ SDE_FETCH_CONFIG_RESET_VALUE |
+ ctx->highest_bank_bit << 18);
+ }
+
+ opmode |= MDSS_MDP_OP_PE_OVERRIDE;
+
+ /* if this is YUV pixel format, enable CSC */
+ if (SDE_FORMAT_IS_YUV(fmt))
+ src_format |= BIT(15);
+
+ if (SDE_FORMAT_IS_DX(fmt))
+ src_format |= BIT(14);
+
+ /* update scaler opmode, if appropriate */
+ if (test_bit(SDE_SSPP_CSC, &ctx->cap->features))
+ _sspp_setup_opmode(ctx, VIG_OP_CSC_EN | VIG_OP_CSC_SRC_DATAFMT,
+ SDE_FORMAT_IS_YUV(fmt));
+ else if (test_bit(SDE_SSPP_CSC_10BIT, &ctx->cap->features))
+ _sspp_setup_csc10_opmode(ctx,
+ VIG_CSC_10_EN | VIG_CSC_10_SRC_DATAFMT,
+ SDE_FORMAT_IS_YUV(fmt));
+
+ SDE_REG_WRITE(c, SSPP_SRC_FORMAT + idx, src_format);
+ SDE_REG_WRITE(c, SSPP_SRC_UNPACK_PATTERN + idx, unpack);
+ SDE_REG_WRITE(c, SSPP_SRC_OP_MODE + idx, opmode);
+ SDE_REG_WRITE(c, SSPP_SRC_ADDR_SW_STATUS + idx, secure);
+
+ /* clear previous UBWC error */
+ SDE_REG_WRITE(c, SSPP_UBWC_ERROR_STATUS + idx, BIT(31));
+}
+
+static void sde_hw_sspp_setup_pe_config(struct sde_hw_pipe *ctx,
+ struct sde_hw_pixel_ext *pe_ext)
+{
+ struct sde_hw_blk_reg_map *c;
+ u8 color;
+ u32 lr_pe[4], tb_pe[4], tot_req_pixels[4];
+ const u32 bytemask = 0xff;
+ const u32 shortmask = 0xffff;
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx) || !pe_ext)
+ return;
+
+ c = &ctx->hw;
+
+ /* program SW pixel extension override for all pipes*/
+ for (color = 0; color < SDE_MAX_PLANES; color++) {
+ /* color 2 has the same set of registers as color 1 */
+ if (color == 2)
+ continue;
+
+ lr_pe[color] = ((pe_ext->right_ftch[color] & bytemask) << 24)|
+ ((pe_ext->right_rpt[color] & bytemask) << 16)|
+ ((pe_ext->left_ftch[color] & bytemask) << 8)|
+ (pe_ext->left_rpt[color] & bytemask);
+
+ tb_pe[color] = ((pe_ext->btm_ftch[color] & bytemask) << 24)|
+ ((pe_ext->btm_rpt[color] & bytemask) << 16)|
+ ((pe_ext->top_ftch[color] & bytemask) << 8)|
+ (pe_ext->top_rpt[color] & bytemask);
+
+ tot_req_pixels[color] = (((pe_ext->roi_h[color] +
+ pe_ext->num_ext_pxls_top[color] +
+ pe_ext->num_ext_pxls_btm[color]) & shortmask) << 16) |
+ ((pe_ext->roi_w[color] +
+ pe_ext->num_ext_pxls_left[color] +
+ pe_ext->num_ext_pxls_right[color]) & shortmask);
+ }
+
+ /* color 0 */
+ SDE_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_LR + idx, lr_pe[0]);
+ SDE_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_TB + idx, tb_pe[0]);
+ SDE_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_REQ_PIXELS + idx,
+ tot_req_pixels[0]);
+
+ /* color 1 and color 2 */
+ SDE_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_LR + idx, lr_pe[1]);
+ SDE_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_TB + idx, tb_pe[1]);
+ SDE_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_REQ_PIXELS + idx,
+ tot_req_pixels[1]);
+
+ /* color 3 */
+ SDE_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_LR + idx, lr_pe[3]);
+ SDE_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_TB + idx, lr_pe[3]);
+ SDE_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_REQ_PIXELS + idx,
+ tot_req_pixels[3]);
+}
+
+static void _sde_hw_sspp_setup_scaler(struct sde_hw_pipe *ctx,
+ struct sde_hw_pipe_cfg *sspp,
+ struct sde_hw_pixel_ext *pe,
+ void *scaler_cfg)
+{
+ struct sde_hw_blk_reg_map *c;
+ int config_h = 0x0;
+ int config_v = 0x0;
+ u32 idx;
+
+ (void)sspp;
+ (void)scaler_cfg;
+ if (_sspp_subblk_offset(ctx, SDE_SSPP_SCALER_QSEED2, &idx) || !pe)
+ return;
+
+ c = &ctx->hw;
+
+ /* enable scaler(s) if valid filter set */
+ if (pe->horz_filter[SDE_SSPP_COMP_0] < SDE_SCALE_FILTER_MAX)
+ config_h |= pe->horz_filter[SDE_SSPP_COMP_0] << 8;
+ if (pe->horz_filter[SDE_SSPP_COMP_1_2] < SDE_SCALE_FILTER_MAX)
+ config_h |= pe->horz_filter[SDE_SSPP_COMP_1_2] << 12;
+ if (pe->horz_filter[SDE_SSPP_COMP_3] < SDE_SCALE_FILTER_MAX)
+ config_h |= pe->horz_filter[SDE_SSPP_COMP_3] << 16;
+
+ if (config_h)
+ config_h |= BIT(0);
+
+ if (pe->vert_filter[SDE_SSPP_COMP_0] < SDE_SCALE_FILTER_MAX)
+ config_v |= pe->vert_filter[SDE_SSPP_COMP_0] << 10;
+ if (pe->vert_filter[SDE_SSPP_COMP_1_2] < SDE_SCALE_FILTER_MAX)
+ config_v |= pe->vert_filter[SDE_SSPP_COMP_1_2] << 14;
+ if (pe->vert_filter[SDE_SSPP_COMP_3] < SDE_SCALE_FILTER_MAX)
+ config_v |= pe->vert_filter[SDE_SSPP_COMP_3] << 18;
+
+ if (config_v)
+ config_v |= BIT(1);
+
+ SDE_REG_WRITE(c, SCALE_CONFIG + idx, config_h | config_v);
+ SDE_REG_WRITE(c, COMP0_3_INIT_PHASE_X + idx,
+ pe->init_phase_x[SDE_SSPP_COMP_0]);
+ SDE_REG_WRITE(c, COMP0_3_INIT_PHASE_Y + idx,
+ pe->init_phase_y[SDE_SSPP_COMP_0]);
+ SDE_REG_WRITE(c, COMP0_3_PHASE_STEP_X + idx,
+ pe->phase_step_x[SDE_SSPP_COMP_0]);
+ SDE_REG_WRITE(c, COMP0_3_PHASE_STEP_Y + idx,
+ pe->phase_step_y[SDE_SSPP_COMP_0]);
+
+ SDE_REG_WRITE(c, COMP1_2_INIT_PHASE_X + idx,
+ pe->init_phase_x[SDE_SSPP_COMP_1_2]);
+ SDE_REG_WRITE(c, COMP1_2_INIT_PHASE_Y + idx,
+ pe->init_phase_y[SDE_SSPP_COMP_1_2]);
+ SDE_REG_WRITE(c, COMP1_2_PHASE_STEP_X + idx,
+ pe->phase_step_x[SDE_SSPP_COMP_1_2]);
+ SDE_REG_WRITE(c, COMP1_2_PHASE_STEP_Y + idx,
+ pe->phase_step_y[SDE_SSPP_COMP_1_2]);
+}
+
+static void _sde_hw_sspp_setup_scaler3_lut(struct sde_hw_pipe *ctx,
+ struct sde_hw_scaler3_cfg *scaler3_cfg)
+{
+ u32 idx;
+ int i, j, filter;
+ int config_lut = 0x0;
+ unsigned long lut_flags;
+ u32 lut_addr, lut_offset, lut_len;
+ u32 *lut[QSEED3_FILTERS] = {NULL, NULL, NULL, NULL, NULL};
+ static const uint32_t offset[QSEED3_FILTERS][QSEED3_LUT_REGIONS][2] = {
+ {{18, 0x000}, {12, 0x120}, {12, 0x1E0}, {8, 0x2A0} },
+ {{6, 0x320}, {3, 0x3E0}, {3, 0x440}, {3, 0x4A0} },
+ {{6, 0x500}, {3, 0x5c0}, {3, 0x620}, {3, 0x680} },
+ {{6, 0x380}, {3, 0x410}, {3, 0x470}, {3, 0x4d0} },
+ {{6, 0x560}, {3, 0x5f0}, {3, 0x650}, {3, 0x6b0} },
+ };
+
+ if (_sspp_subblk_offset(ctx, SDE_SSPP_SCALER_QSEED3, &idx) ||
+ !scaler3_cfg)
+ return;
+
+ lut_flags = (unsigned long) scaler3_cfg->lut_flag;
+ if (test_bit(QSEED3_COEF_LUT_DIR_BIT, &lut_flags) &&
+ (scaler3_cfg->dir_len == QSEED3_DIR_LUT_SIZE)) {
+ lut[0] = scaler3_cfg->dir_lut;
+ config_lut = 1;
+ }
+ if (test_bit(QSEED3_COEF_LUT_Y_CIR_BIT, &lut_flags) &&
+ (scaler3_cfg->y_rgb_cir_lut_idx < QSEED3_CIRCULAR_LUTS) &&
+ (scaler3_cfg->cir_len == QSEED3_CIR_LUT_SIZE)) {
+ lut[1] = scaler3_cfg->cir_lut +
+ scaler3_cfg->y_rgb_cir_lut_idx * QSEED3_LUT_SIZE;
+ config_lut = 1;
+ }
+ if (test_bit(QSEED3_COEF_LUT_UV_CIR_BIT, &lut_flags) &&
+ (scaler3_cfg->uv_cir_lut_idx < QSEED3_CIRCULAR_LUTS) &&
+ (scaler3_cfg->cir_len == QSEED3_CIR_LUT_SIZE)) {
+ lut[2] = scaler3_cfg->cir_lut +
+ scaler3_cfg->uv_cir_lut_idx * QSEED3_LUT_SIZE;
+ config_lut = 1;
+ }
+ if (test_bit(QSEED3_COEF_LUT_Y_SEP_BIT, &lut_flags) &&
+ (scaler3_cfg->y_rgb_sep_lut_idx < QSEED3_SEPARABLE_LUTS) &&
+ (scaler3_cfg->sep_len == QSEED3_SEP_LUT_SIZE)) {
+ lut[3] = scaler3_cfg->sep_lut +
+ scaler3_cfg->y_rgb_sep_lut_idx * QSEED3_LUT_SIZE;
+ config_lut = 1;
+ }
+ if (test_bit(QSEED3_COEF_LUT_UV_SEP_BIT, &lut_flags) &&
+ (scaler3_cfg->uv_sep_lut_idx < QSEED3_SEPARABLE_LUTS) &&
+ (scaler3_cfg->sep_len == QSEED3_SEP_LUT_SIZE)) {
+ lut[4] = scaler3_cfg->sep_lut +
+ scaler3_cfg->uv_sep_lut_idx * QSEED3_LUT_SIZE;
+ config_lut = 1;
+ }
+
+ if (config_lut) {
+ for (filter = 0; filter < QSEED3_FILTERS; filter++) {
+ if (!lut[filter])
+ continue;
+ lut_offset = 0;
+ for (i = 0; i < QSEED3_LUT_REGIONS; i++) {
+ lut_addr = QSEED3_COEF_LUT + idx
+ + offset[filter][i][1];
+ lut_len = offset[filter][i][0] << 2;
+ for (j = 0; j < lut_len; j++) {
+ SDE_REG_WRITE(&ctx->hw,
+ lut_addr,
+ (lut[filter])[lut_offset++]);
+ lut_addr += 4;
+ }
+ }
+ }
+ }
+
+ if (test_bit(QSEED3_COEF_LUT_SWAP_BIT, &lut_flags))
+ SDE_REG_WRITE(&ctx->hw, QSEED3_COEF_LUT_CTRL + idx, BIT(0));
+
+}
+
+static void _sde_hw_sspp_setup_scaler3_de(struct sde_hw_pipe *ctx,
+ struct sde_hw_scaler3_de_cfg *de_cfg)
+{
+ u32 idx;
+ u32 sharp_lvl, sharp_ctl, shape_ctl, de_thr;
+ u32 adjust_a, adjust_b, adjust_c;
+ struct sde_hw_blk_reg_map *hw;
+
+ if (_sspp_subblk_offset(ctx, SDE_SSPP_SCALER_QSEED3, &idx) || !de_cfg)
+ return;
+
+ if (!de_cfg->enable)
+ return;
+
+ hw = &ctx->hw;
+ sharp_lvl = (de_cfg->sharpen_level1 & 0x1FF) |
+ ((de_cfg->sharpen_level2 & 0x1FF) << 16);
+
+ sharp_ctl = ((de_cfg->limit & 0xF) << 9) |
+ ((de_cfg->prec_shift & 0x7) << 13) |
+ ((de_cfg->clip & 0x7) << 16);
+
+ shape_ctl = (de_cfg->thr_quiet & 0xFF) |
+ ((de_cfg->thr_dieout & 0x3FF) << 16);
+
+ de_thr = (de_cfg->thr_low & 0x3FF) |
+ ((de_cfg->thr_high & 0x3FF) << 16);
+
+ adjust_a = (de_cfg->adjust_a[0] & 0x3FF) |
+ ((de_cfg->adjust_a[1] & 0x3FF) << 10) |
+ ((de_cfg->adjust_a[2] & 0x3FF) << 20);
+
+ adjust_b = (de_cfg->adjust_b[0] & 0x3FF) |
+ ((de_cfg->adjust_b[1] & 0x3FF) << 10) |
+ ((de_cfg->adjust_b[2] & 0x3FF) << 20);
+
+ adjust_c = (de_cfg->adjust_c[0] & 0x3FF) |
+ ((de_cfg->adjust_c[1] & 0x3FF) << 10) |
+ ((de_cfg->adjust_c[2] & 0x3FF) << 20);
+
+ SDE_REG_WRITE(hw, QSEED3_DE_SHARPEN + idx, sharp_lvl);
+ SDE_REG_WRITE(hw, QSEED3_DE_SHARPEN_CTL + idx, sharp_ctl);
+ SDE_REG_WRITE(hw, QSEED3_DE_SHAPE_CTL + idx, shape_ctl);
+ SDE_REG_WRITE(hw, QSEED3_DE_THRESHOLD + idx, de_thr);
+ SDE_REG_WRITE(hw, QSEED3_DE_ADJUST_DATA_0 + idx, adjust_a);
+ SDE_REG_WRITE(hw, QSEED3_DE_ADJUST_DATA_1 + idx, adjust_b);
+ SDE_REG_WRITE(hw, QSEED3_DE_ADJUST_DATA_2 + idx, adjust_c);
+
+}
+
+static void _sde_hw_sspp_setup_scaler3(struct sde_hw_pipe *ctx,
+ struct sde_hw_pipe_cfg *sspp,
+ struct sde_hw_pixel_ext *pe,
+ void *scaler_cfg)
+{
+ u32 idx;
+ u32 op_mode = 0;
+ u32 phase_init, preload, src_y_rgb, src_uv, dst;
+ struct sde_hw_scaler3_cfg *scaler3_cfg = scaler_cfg;
+
+ (void)pe;
+ if (_sspp_subblk_offset(ctx, SDE_SSPP_SCALER_QSEED3, &idx) || !sspp
+ || !scaler3_cfg || !ctx || !ctx->cap || !ctx->cap->sblk)
+ return;
+
+ if (!scaler3_cfg->enable) {
+ SDE_REG_WRITE(&ctx->hw, QSEED3_OP_MODE + idx, 0x0);
+ return;
+ }
+
+ op_mode |= BIT(0);
+ op_mode |= (scaler3_cfg->y_rgb_filter_cfg & 0x3) << 16;
+
+ if (SDE_FORMAT_IS_YUV(sspp->layout.format)) {
+ op_mode |= BIT(12);
+ op_mode |= (scaler3_cfg->uv_filter_cfg & 0x3) << 24;
+ }
+
+ if (!SDE_FORMAT_IS_DX(sspp->layout.format))
+ op_mode |= BIT(14);
+
+ op_mode |= (scaler3_cfg->blend_cfg & 1) << 31;
+ op_mode |= (scaler3_cfg->dir_en) ? BIT(4) : 0;
+
+ preload =
+ ((scaler3_cfg->preload_x[0] & 0x7F) << 0) |
+ ((scaler3_cfg->preload_y[0] & 0x7F) << 8) |
+ ((scaler3_cfg->preload_x[1] & 0x7F) << 16) |
+ ((scaler3_cfg->preload_y[1] & 0x7F) << 24);
+
+ src_y_rgb = (scaler3_cfg->src_width[0] & 0x1FFFF) |
+ ((scaler3_cfg->src_height[0] & 0x1FFFF) << 16);
+
+ src_uv = (scaler3_cfg->src_width[1] & 0x1FFFF) |
+ ((scaler3_cfg->src_height[1] & 0x1FFFF) << 16);
+
+ dst = (scaler3_cfg->dst_width & 0x1FFFF) |
+ ((scaler3_cfg->dst_height & 0x1FFFF) << 16);
+
+ if (scaler3_cfg->de.enable) {
+ _sde_hw_sspp_setup_scaler3_de(ctx, &scaler3_cfg->de);
+ op_mode |= BIT(8);
+ }
+
+ if (scaler3_cfg->lut_flag)
+ _sde_hw_sspp_setup_scaler3_lut(ctx, scaler3_cfg);
+
+ if (ctx->cap->sblk->scaler_blk.version == 0x1002) {
+ if (sspp->layout.format->alpha_enable) {
+ op_mode |= BIT(10);
+ op_mode |= (scaler3_cfg->alpha_filter_cfg & 0x1) << 30;
+ }
+ phase_init =
+ ((scaler3_cfg->init_phase_x[0] & 0x3F) << 0) |
+ ((scaler3_cfg->init_phase_y[0] & 0x3F) << 8) |
+ ((scaler3_cfg->init_phase_x[1] & 0x3F) << 16) |
+ ((scaler3_cfg->init_phase_y[1] & 0x3F) << 24);
+ SDE_REG_WRITE(&ctx->hw, QSEED3_PHASE_INIT + idx, phase_init);
+ } else {
+ if (sspp->layout.format->alpha_enable) {
+ op_mode |= BIT(10);
+ op_mode |= (scaler3_cfg->alpha_filter_cfg & 0x3) << 29;
+ }
+ SDE_REG_WRITE(&ctx->hw, QSEED3_PHASE_INIT_Y_H + idx,
+ scaler3_cfg->init_phase_x[0] & 0x1FFFFF);
+ SDE_REG_WRITE(&ctx->hw, QSEED3_PHASE_INIT_Y_V + idx,
+ scaler3_cfg->init_phase_y[0] & 0x1FFFFF);
+ SDE_REG_WRITE(&ctx->hw, QSEED3_PHASE_INIT_UV_H + idx,
+ scaler3_cfg->init_phase_x[1] & 0x1FFFFF);
+ SDE_REG_WRITE(&ctx->hw, QSEED3_PHASE_INIT_UV_V + idx,
+ scaler3_cfg->init_phase_y[1] & 0x1FFFFF);
+ }
+
+ SDE_REG_WRITE(&ctx->hw, QSEED3_PHASE_STEP_Y_H + idx,
+ scaler3_cfg->phase_step_x[0] & 0xFFFFFF);
+
+ SDE_REG_WRITE(&ctx->hw, QSEED3_PHASE_STEP_Y_V + idx,
+ scaler3_cfg->phase_step_y[0] & 0xFFFFFF);
+
+ SDE_REG_WRITE(&ctx->hw, QSEED3_PHASE_STEP_UV_H + idx,
+ scaler3_cfg->phase_step_x[1] & 0xFFFFFF);
+
+ SDE_REG_WRITE(&ctx->hw, QSEED3_PHASE_STEP_UV_V + idx,
+ scaler3_cfg->phase_step_y[1] & 0xFFFFFF);
+
+ SDE_REG_WRITE(&ctx->hw, QSEED3_PRELOAD + idx, preload);
+
+ SDE_REG_WRITE(&ctx->hw, QSEED3_SRC_SIZE_Y_RGB_A + idx, src_y_rgb);
+
+ SDE_REG_WRITE(&ctx->hw, QSEED3_SRC_SIZE_UV + idx, src_uv);
+
+ SDE_REG_WRITE(&ctx->hw, QSEED3_DST_SIZE + idx, dst);
+
+ SDE_REG_WRITE(&ctx->hw, QSEED3_OP_MODE + idx, op_mode);
+}
+
+/**
+ * sde_hw_sspp_setup_rects()
+ */
+static void sde_hw_sspp_setup_rects(struct sde_hw_pipe *ctx,
+ struct sde_hw_pipe_cfg *cfg,
+ struct sde_hw_pixel_ext *pe_ext,
+ void *scale_cfg)
+{
+ struct sde_hw_blk_reg_map *c;
+ u32 src_size, src_xy, dst_size, dst_xy, ystride0, ystride1;
+ u32 decimation = 0;
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx) || !cfg)
+ return;
+
+ c = &ctx->hw;
+
+ /* program pixel extension override */
+ if (pe_ext)
+ sde_hw_sspp_setup_pe_config(ctx, pe_ext);
+
+ /* src and dest rect programming */
+ src_xy = (cfg->src_rect.y << 16) | (cfg->src_rect.x);
+ src_size = (cfg->src_rect.h << 16) | (cfg->src_rect.w);
+ dst_xy = (cfg->dst_rect.y << 16) | (cfg->dst_rect.x);
+ dst_size = (cfg->dst_rect.h << 16) | (cfg->dst_rect.w);
+
+ ystride0 = (cfg->layout.plane_pitch[0]) |
+ (cfg->layout.plane_pitch[1] << 16);
+ ystride1 = (cfg->layout.plane_pitch[2]) |
+ (cfg->layout.plane_pitch[3] << 16);
+
+ /* program scaler, phase registers, if pipes supporting scaling */
+ if (ctx->cap->features & SDE_SSPP_SCALER) {
+ /* program decimation */
+ decimation = ((1 << cfg->horz_decimation) - 1) << 8;
+ decimation |= ((1 << cfg->vert_decimation) - 1);
+ ctx->ops.setup_scaler(ctx, cfg, pe_ext, scale_cfg);
+ }
+
+ /* rectangle register programming */
+ SDE_REG_WRITE(c, SSPP_SRC_SIZE + idx, src_size);
+ SDE_REG_WRITE(c, SSPP_SRC_XY + idx, src_xy);
+ SDE_REG_WRITE(c, SSPP_OUT_SIZE + idx, dst_size);
+ SDE_REG_WRITE(c, SSPP_OUT_XY + idx, dst_xy);
+
+ SDE_REG_WRITE(c, SSPP_SRC_YSTRIDE0 + idx, ystride0);
+ SDE_REG_WRITE(c, SSPP_SRC_YSTRIDE1 + idx, ystride1);
+ SDE_REG_WRITE(c, SSPP_DECIMATION_CONFIG + idx, decimation);
+}
+
+static void sde_hw_sspp_setup_sourceaddress(struct sde_hw_pipe *ctx,
+ struct sde_hw_pipe_cfg *cfg)
+{
+ int i;
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(cfg->layout.plane_addr); i++)
+ SDE_REG_WRITE(&ctx->hw, SSPP_SRC0_ADDR + idx + i * 0x4,
+ cfg->layout.plane_addr[i]);
+}
+
+static void sde_hw_sspp_setup_csc(struct sde_hw_pipe *ctx,
+ struct sde_csc_cfg *data)
+{
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, SDE_SSPP_CSC, &idx) || !data)
+ return;
+
+ if (test_bit(SDE_SSPP_CSC_10BIT, &ctx->cap->features))
+ idx += CSC_10BIT_OFFSET;
+
+ sde_hw_csc_setup(&ctx->hw, idx, data);
+}
+
+static void sde_hw_sspp_setup_sharpening(struct sde_hw_pipe *ctx,
+ struct sde_hw_sharp_cfg *cfg)
+{
+ struct sde_hw_blk_reg_map *c;
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, SDE_SSPP_SCALER_QSEED2, &idx) || !cfg ||
+ !test_bit(SDE_SSPP_SCALER_QSEED2, &ctx->cap->features))
+ return;
+
+ c = &ctx->hw;
+
+ SDE_REG_WRITE(c, VIG_0_QSEED2_SHARP + idx, cfg->strength);
+ SDE_REG_WRITE(c, VIG_0_QSEED2_SHARP + idx + 0x4, cfg->edge_thr);
+ SDE_REG_WRITE(c, VIG_0_QSEED2_SHARP + idx + 0x8, cfg->smooth_thr);
+ SDE_REG_WRITE(c, VIG_0_QSEED2_SHARP + idx + 0xC, cfg->noise_thr);
+}
+
+static void sde_hw_sspp_setup_solidfill(struct sde_hw_pipe *ctx, u32 color)
+{
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx))
+ return;
+
+ SDE_REG_WRITE(&ctx->hw, SSPP_SRC_CONSTANT_COLOR + idx, color);
+}
+
+static void sde_hw_sspp_setup_danger_safe_lut(struct sde_hw_pipe *ctx,
+ struct sde_hw_pipe_qos_cfg *cfg)
+{
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx))
+ return;
+
+ SDE_REG_WRITE(&ctx->hw, SSPP_DANGER_LUT + idx, cfg->danger_lut);
+ SDE_REG_WRITE(&ctx->hw, SSPP_SAFE_LUT + idx, cfg->safe_lut);
+}
+
+static void sde_hw_sspp_setup_creq_lut(struct sde_hw_pipe *ctx,
+ struct sde_hw_pipe_qos_cfg *cfg)
+{
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx))
+ return;
+
+ SDE_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT + idx, cfg->creq_lut);
+}
+
+static void sde_hw_sspp_setup_qos_ctrl(struct sde_hw_pipe *ctx,
+ struct sde_hw_pipe_qos_cfg *cfg)
+{
+ u32 idx;
+ u32 qos_ctrl = 0;
+
+ if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx))
+ return;
+
+ if (cfg->vblank_en) {
+ qos_ctrl |= ((cfg->creq_vblank &
+ SSPP_QOS_CTRL_CREQ_VBLANK_MASK) <<
+ SSPP_QOS_CTRL_CREQ_VBLANK_OFF);
+ qos_ctrl |= ((cfg->danger_vblank &
+ SSPP_QOS_CTRL_DANGER_VBLANK_MASK) <<
+ SSPP_QOS_CTRL_DANGER_VBLANK_OFF);
+ qos_ctrl |= SSPP_QOS_CTRL_VBLANK_EN;
+ }
+
+ if (cfg->danger_safe_en)
+ qos_ctrl |= SSPP_QOS_CTRL_DANGER_SAFE_EN;
+
+ SDE_REG_WRITE(&ctx->hw, SSPP_QOS_CTRL + idx, qos_ctrl);
+}
+
+static void _setup_layer_ops(struct sde_hw_pipe *c,
+ unsigned long features)
+{
+ if (test_bit(SDE_SSPP_SRC, &features)) {
+ c->ops.setup_format = sde_hw_sspp_setup_format;
+ c->ops.setup_rects = sde_hw_sspp_setup_rects;
+ c->ops.setup_sourceaddress = sde_hw_sspp_setup_sourceaddress;
+ c->ops.setup_solidfill = sde_hw_sspp_setup_solidfill;
+ }
+ if (test_bit(SDE_SSPP_QOS, &features)) {
+ c->ops.setup_danger_safe_lut =
+ sde_hw_sspp_setup_danger_safe_lut;
+ c->ops.setup_creq_lut = sde_hw_sspp_setup_creq_lut;
+ c->ops.setup_qos_ctrl = sde_hw_sspp_setup_qos_ctrl;
+ }
+
+ if (test_bit(SDE_SSPP_CSC, &features) ||
+ test_bit(SDE_SSPP_CSC_10BIT, &features))
+ c->ops.setup_csc = sde_hw_sspp_setup_csc;
+
+ if (test_bit(SDE_SSPP_SCALER_QSEED2, &features))
+ c->ops.setup_sharpening = sde_hw_sspp_setup_sharpening;
+
+ if (test_bit(SDE_SSPP_SCALER_QSEED3, &features))
+ c->ops.setup_scaler = _sde_hw_sspp_setup_scaler3;
+ else
+ c->ops.setup_scaler = _sde_hw_sspp_setup_scaler;
+
+ if (test_bit(SDE_SSPP_HSIC, &features)) {
+ /* TODO: add version based assignment here as inline or macro */
+ if (c->cap->sblk->hsic_blk.version ==
+ (SDE_COLOR_PROCESS_VER(0x1, 0x7))) {
+ c->ops.setup_pa_hue = sde_setup_pipe_pa_hue_v1_7;
+ c->ops.setup_pa_sat = sde_setup_pipe_pa_sat_v1_7;
+ c->ops.setup_pa_val = sde_setup_pipe_pa_val_v1_7;
+ c->ops.setup_pa_cont = sde_setup_pipe_pa_cont_v1_7;
+ }
+ }
+
+ if (test_bit(SDE_SSPP_MEMCOLOR, &features)) {
+ if (c->cap->sblk->memcolor_blk.version ==
+ (SDE_COLOR_PROCESS_VER(0x1, 0x7)))
+ c->ops.setup_pa_memcolor =
+ sde_setup_pipe_pa_memcol_v1_7;
+ }
+}
+
+static struct sde_sspp_cfg *_sspp_offset(enum sde_sspp sspp,
+ void __iomem *addr,
+ struct sde_mdss_cfg *catalog,
+ struct sde_hw_blk_reg_map *b)
+{
+ int i;
+
+ if ((sspp < SSPP_MAX) && catalog && addr && b) {
+ for (i = 0; i < catalog->sspp_count; i++) {
+ if (sspp == catalog->sspp[i].id) {
+ b->base_off = addr;
+ b->blk_off = catalog->sspp[i].base;
+ b->hwversion = catalog->hwversion;
+ b->log_mask = SDE_DBG_MASK_SSPP;
+ return &catalog->sspp[i];
+ }
+ }
+ }
+
+ return ERR_PTR(-ENOMEM);
+}
+
+struct sde_hw_pipe *sde_hw_sspp_init(enum sde_sspp idx,
+ void __iomem *addr,
+ struct sde_mdss_cfg *catalog)
+{
+ struct sde_hw_pipe *ctx;
+ struct sde_sspp_cfg *cfg;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _sspp_offset(idx, addr, catalog, &ctx->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(ctx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Assign ops */
+ ctx->idx = idx;
+ ctx->cap = cfg;
+ _setup_layer_ops(ctx, ctx->cap->features);
+ ctx->highest_bank_bit = catalog->mdp[0].highest_bank_bit;
+
+ return ctx;
+}
+
+void sde_hw_sspp_destroy(struct sde_hw_pipe *ctx)
+{
+ kfree(ctx);
+}
+
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_sspp.h b/drivers/gpu/drm/msm/sde/sde_hw_sspp.h
new file mode 100644
index 000000000000..743f5e72d1a8
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_sspp.h
@@ -0,0 +1,467 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HW_SSPP_H
+#define _SDE_HW_SSPP_H
+
+#include "sde_hw_catalog.h"
+#include "sde_hw_mdss.h"
+#include "sde_hw_util.h"
+#include "sde_formats.h"
+#include "sde_color_processing.h"
+
+struct sde_hw_pipe;
+
+/**
+ * Flags
+ */
+#define SDE_SSPP_SECURE_OVERLAY_SESSION 0x1
+#define SDE_SSPP_FLIP_LR 0x2
+#define SDE_SSPP_FLIP_UD 0x4
+#define SDE_SSPP_SOURCE_ROTATED_90 0x8
+#define SDE_SSPP_ROT_90 0x10
+#define SDE_SSPP_SOLID_FILL 0x20
+
+/**
+ * Define all scaler feature bits in catalog
+ */
+#define SDE_SSPP_SCALER ((1UL << SDE_SSPP_SCALER_RGB) | \
+ (1UL << SDE_SSPP_SCALER_QSEED2) | \
+ (1UL << SDE_SSPP_SCALER_QSEED3))
+
+/**
+ * Component indices
+ */
+enum {
+ SDE_SSPP_COMP_0,
+ SDE_SSPP_COMP_1_2,
+ SDE_SSPP_COMP_2,
+ SDE_SSPP_COMP_3,
+
+ SDE_SSPP_COMP_MAX
+};
+
+enum {
+ SDE_FRAME_LINEAR,
+ SDE_FRAME_TILE_A4X,
+ SDE_FRAME_TILE_A5X,
+};
+
+enum sde_hw_filter {
+ SDE_SCALE_FILTER_NEAREST = 0,
+ SDE_SCALE_FILTER_BIL,
+ SDE_SCALE_FILTER_PCMN,
+ SDE_SCALE_FILTER_CA,
+ SDE_SCALE_FILTER_MAX
+};
+
+struct sde_hw_sharp_cfg {
+ u32 strength;
+ u32 edge_thr;
+ u32 smooth_thr;
+ u32 noise_thr;
+};
+
+struct sde_hw_pixel_ext {
+ /* scaling factors are enabled for this input layer */
+ uint8_t enable_pxl_ext;
+
+ int init_phase_x[SDE_MAX_PLANES];
+ int phase_step_x[SDE_MAX_PLANES];
+ int init_phase_y[SDE_MAX_PLANES];
+ int phase_step_y[SDE_MAX_PLANES];
+
+ /*
+ * Number of pixels extension in left, right, top and bottom direction
+ * for all color components. This pixel value for each color component
+ * should be sum of fetch + repeat pixels.
+ */
+ int num_ext_pxls_left[SDE_MAX_PLANES];
+ int num_ext_pxls_right[SDE_MAX_PLANES];
+ int num_ext_pxls_top[SDE_MAX_PLANES];
+ int num_ext_pxls_btm[SDE_MAX_PLANES];
+
+ /*
+ * Number of pixels needs to be overfetched in left, right, top and
+ * bottom directions from source image for scaling.
+ */
+ int left_ftch[SDE_MAX_PLANES];
+ int right_ftch[SDE_MAX_PLANES];
+ int top_ftch[SDE_MAX_PLANES];
+ int btm_ftch[SDE_MAX_PLANES];
+
+ /*
+ * Number of pixels needs to be repeated in left, right, top and
+ * bottom directions for scaling.
+ */
+ int left_rpt[SDE_MAX_PLANES];
+ int right_rpt[SDE_MAX_PLANES];
+ int top_rpt[SDE_MAX_PLANES];
+ int btm_rpt[SDE_MAX_PLANES];
+
+ uint32_t roi_w[SDE_MAX_PLANES];
+ uint32_t roi_h[SDE_MAX_PLANES];
+
+ /*
+ * Filter type to be used for scaling in horizontal and vertical
+ * directions
+ */
+ enum sde_hw_filter horz_filter[SDE_MAX_PLANES];
+ enum sde_hw_filter vert_filter[SDE_MAX_PLANES];
+
+};
+
+/**
+ * struct sde_hw_scaler3_de_cfg : QSEEDv3 detail enhancer configuration
+ * @enable: detail enhancer enable/disable
+ * @sharpen_level1: sharpening strength for noise
+ * @sharpen_level2: sharpening strength for signal
+ * @ clip: clip shift
+ * @ limit: limit value
+ * @ thr_quiet: quiet threshold
+ * @ thr_dieout: dieout threshold
+ * @ thr_high: low threshold
+ * @ thr_high: high threshold
+ * @ prec_shift: precision shift
+ * @ adjust_a: A-coefficients for mapping curve
+ * @ adjust_b: B-coefficients for mapping curve
+ * @ adjust_c: C-coefficients for mapping curve
+ */
+struct sde_hw_scaler3_de_cfg {
+ u32 enable;
+ int16_t sharpen_level1;
+ int16_t sharpen_level2;
+ uint16_t clip;
+ uint16_t limit;
+ uint16_t thr_quiet;
+ uint16_t thr_dieout;
+ uint16_t thr_low;
+ uint16_t thr_high;
+ uint16_t prec_shift;
+ int16_t adjust_a[SDE_MAX_DE_CURVES];
+ int16_t adjust_b[SDE_MAX_DE_CURVES];
+ int16_t adjust_c[SDE_MAX_DE_CURVES];
+};
+
+/**
+ * struct sde_hw_scaler3_cfg : QSEEDv3 configuration
+ * @enable: scaler enable
+ * @dir_en: direction detection block enable
+ * @ init_phase_x: horizontal initial phase
+ * @ phase_step_x: horizontal phase step
+ * @ init_phase_y: vertical initial phase
+ * @ phase_step_y: vertical phase step
+ * @ preload_x: horizontal preload value
+ * @ preload_y: vertical preload value
+ * @ src_width: source width
+ * @ src_height: source height
+ * @ dst_width: destination width
+ * @ dst_height: destination height
+ * @ y_rgb_filter_cfg: y/rgb plane filter configuration
+ * @ uv_filter_cfg: uv plane filter configuration
+ * @ alpha_filter_cfg: alpha filter configuration
+ * @ blend_cfg: blend coefficients configuration
+ * @ lut_flag: scaler LUT update flags
+ * 0x1 swap LUT bank
+ * 0x2 update 2D filter LUT
+ * 0x4 update y circular filter LUT
+ * 0x8 update uv circular filter LUT
+ * 0x10 update y separable filter LUT
+ * 0x20 update uv separable filter LUT
+ * @ dir_lut_idx: 2D filter LUT index
+ * @ y_rgb_cir_lut_idx: y circular filter LUT index
+ * @ uv_cir_lut_idx: uv circular filter LUT index
+ * @ y_rgb_sep_lut_idx: y circular filter LUT index
+ * @ uv_sep_lut_idx: uv separable filter LUT index
+ * @ dir_lut: pointer to 2D LUT
+ * @ cir_lut: pointer to circular filter LUT
+ * @ sep_lut: pointer to separable filter LUT
+ * @ de: detail enhancer configuration
+ */
+struct sde_hw_scaler3_cfg {
+ u32 enable;
+ u32 dir_en;
+ int32_t init_phase_x[SDE_MAX_PLANES];
+ int32_t phase_step_x[SDE_MAX_PLANES];
+ int32_t init_phase_y[SDE_MAX_PLANES];
+ int32_t phase_step_y[SDE_MAX_PLANES];
+
+ u32 preload_x[SDE_MAX_PLANES];
+ u32 preload_y[SDE_MAX_PLANES];
+ u32 src_width[SDE_MAX_PLANES];
+ u32 src_height[SDE_MAX_PLANES];
+
+ u32 dst_width;
+ u32 dst_height;
+
+ u32 y_rgb_filter_cfg;
+ u32 uv_filter_cfg;
+ u32 alpha_filter_cfg;
+ u32 blend_cfg;
+
+ u32 lut_flag;
+ u32 dir_lut_idx;
+
+ u32 y_rgb_cir_lut_idx;
+ u32 uv_cir_lut_idx;
+ u32 y_rgb_sep_lut_idx;
+ u32 uv_sep_lut_idx;
+ u32 *dir_lut;
+ size_t dir_len;
+ u32 *cir_lut;
+ size_t cir_len;
+ u32 *sep_lut;
+ size_t sep_len;
+
+ /*
+ * Detail enhancer settings
+ */
+ struct sde_hw_scaler3_de_cfg de;
+};
+
+/**
+ * struct sde_hw_pipe_cfg : Pipe description
+ * @layout: format layout information for programming buffer to hardware
+ * @src_rect: src ROI, caller takes into account the different operations
+ * such as decimation, flip etc to program this field
+ * @dest_rect: destination ROI.
+ * @ horz_decimation : horizontal decimation factor( 0, 2, 4, 8, 16)
+ * @ vert_decimation : vertical decimation factor( 0, 2, 4, 8, 16)
+ * 2: Read 1 line/pixel drop 1 line/pixel
+ * 4: Read 1 line/pixel drop 3 lines/pixels
+ * 8: Read 1 line/pixel drop 7 lines/pixels
+ * 16: Read 1 line/pixel drop 15 line/pixels
+ */
+struct sde_hw_pipe_cfg {
+ struct sde_hw_fmt_layout layout;
+ struct sde_rect src_rect;
+ struct sde_rect dst_rect;
+ u8 horz_decimation;
+ u8 vert_decimation;
+};
+
+/**
+ * struct sde_hw_pipe_qos_cfg : Source pipe QoS configuration
+ * @danger_lut: LUT for generate danger level based on fill level
+ * @safe_lut: LUT for generate safe level based on fill level
+ * @creq_lut: LUT for generate creq level based on fill level
+ * @creq_vblank: creq value generated to vbif during vertical blanking
+ * @danger_vblank: danger value generated during vertical blanking
+ * @vblank_en: enable creq_vblank and danger_vblank during vblank
+ * @danger_safe_en: enable danger safe generation
+ */
+struct sde_hw_pipe_qos_cfg {
+ u32 danger_lut;
+ u32 safe_lut;
+ u32 creq_lut;
+ u32 creq_vblank;
+ u32 danger_vblank;
+ bool vblank_en;
+ bool danger_safe_en;
+};
+
+/**
+ * struct sde_hw_sspp_ops - interface to the SSPP Hw driver functions
+ * Caller must call the init function to get the pipe context for each pipe
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct sde_hw_sspp_ops {
+ /**
+ * setup_format - setup pixel format cropping rectangle, flip
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to pipe config structure
+ * @flags: Extra flags for format config
+ */
+ void (*setup_format)(struct sde_hw_pipe *ctx,
+ const struct sde_format *fmt, u32 flags);
+
+ /**
+ * setup_rects - setup pipe ROI rectangles
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to pipe config structure
+ * @pe_ext: Pointer to pixel ext settings
+ * @scale_cfg: Pointer to scaler settings
+ */
+ void (*setup_rects)(struct sde_hw_pipe *ctx,
+ struct sde_hw_pipe_cfg *cfg,
+ struct sde_hw_pixel_ext *pe_ext,
+ void *scale_cfg);
+
+ /**
+ * setup_sourceaddress - setup pipe source addresses
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to pipe config structure
+ */
+ void (*setup_sourceaddress)(struct sde_hw_pipe *ctx,
+ struct sde_hw_pipe_cfg *cfg);
+
+ /**
+ * setup_csc - setup color space coversion
+ * @ctx: Pointer to pipe context
+ * @data: Pointer to config structure
+ */
+ void (*setup_csc)(struct sde_hw_pipe *ctx, struct sde_csc_cfg *data);
+
+ /**
+ * setup_solidfill - enable/disable colorfill
+ * @ctx: Pointer to pipe context
+ * @const_color: Fill color value
+ * @flags: Pipe flags
+ */
+ void (*setup_solidfill)(struct sde_hw_pipe *ctx, u32 color);
+
+ /**
+ * setup_sharpening - setup sharpening
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to config structure
+ */
+ void (*setup_sharpening)(struct sde_hw_pipe *ctx,
+ struct sde_hw_sharp_cfg *cfg);
+
+
+ /**
+ * setup_pa_hue(): Setup source hue adjustment
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to hue data
+ */
+ void (*setup_pa_hue)(struct sde_hw_pipe *ctx, void *cfg);
+
+ /**
+ * setup_pa_sat(): Setup source saturation adjustment
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to saturation data
+ */
+ void (*setup_pa_sat)(struct sde_hw_pipe *ctx, void *cfg);
+
+ /**
+ * setup_pa_val(): Setup source value adjustment
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to value data
+ */
+ void (*setup_pa_val)(struct sde_hw_pipe *ctx, void *cfg);
+
+ /**
+ * setup_pa_cont(): Setup source contrast adjustment
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer contrast data
+ */
+ void (*setup_pa_cont)(struct sde_hw_pipe *ctx, void *cfg);
+
+ /**
+ * setup_pa_memcolor - setup source color processing
+ * @ctx: Pointer to pipe context
+ * @type: Memcolor type (Skin, sky or foliage)
+ * @cfg: Pointer to memory color config data
+ */
+ void (*setup_pa_memcolor)(struct sde_hw_pipe *ctx,
+ enum sde_memcolor_type type, void *cfg);
+
+ /**
+ * setup_igc - setup inverse gamma correction
+ * @ctx: Pointer to pipe context
+ */
+ void (*setup_igc)(struct sde_hw_pipe *ctx);
+
+ /**
+ * setup_danger_safe_lut - setup danger safe LUTs
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to pipe QoS configuration
+ *
+ */
+ void (*setup_danger_safe_lut)(struct sde_hw_pipe *ctx,
+ struct sde_hw_pipe_qos_cfg *cfg);
+
+ /**
+ * setup_creq_lut - setup CREQ LUT
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to pipe QoS configuration
+ *
+ */
+ void (*setup_creq_lut)(struct sde_hw_pipe *ctx,
+ struct sde_hw_pipe_qos_cfg *cfg);
+
+ /**
+ * setup_qos_ctrl - setup QoS control
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to pipe QoS configuration
+ *
+ */
+ void (*setup_qos_ctrl)(struct sde_hw_pipe *ctx,
+ struct sde_hw_pipe_qos_cfg *cfg);
+
+ /**
+ * setup_histogram - setup histograms
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to histogram configuration
+ */
+ void (*setup_histogram)(struct sde_hw_pipe *ctx,
+ void *cfg);
+
+ /**
+ * setup_scaler - setup scaler
+ * @ctx: Pointer to pipe context
+ * @pipe_cfg: Pointer to pipe configuration
+ * @pe_cfg: Pointer to pixel extension configuration
+ * @scaler_cfg: Pointer to scaler configuration
+ */
+ void (*setup_scaler)(struct sde_hw_pipe *ctx,
+ struct sde_hw_pipe_cfg *pipe_cfg,
+ struct sde_hw_pixel_ext *pe_cfg,
+ void *scaler_cfg);
+};
+
+/**
+ * struct sde_hw_pipe - pipe description
+ * @base_off: mdp register mapped offset
+ * @blk_off: pipe offset relative to mdss offset
+ * @length length of register block offset
+ * @hwversion mdss hw version number
+ * @idx: pipe index
+ * @type : pipe type, VIG/DMA/RGB/CURSOR, certain operations are not
+ * supported for each pipe type
+ * @pipe_hw_cap: pointer to layer_cfg
+ * @highest_bank_bit:
+ * @ops: pointer to operations possible for this pipe
+ */
+struct sde_hw_pipe {
+ /* base */
+ struct sde_hw_blk_reg_map hw;
+
+ /* Pipe */
+ enum sde_sspp idx;
+ const struct sde_sspp_cfg *cap;
+ u32 highest_bank_bit;
+
+ /* Ops */
+ struct sde_hw_sspp_ops ops;
+};
+
+/**
+ * sde_hw_sspp_init - initializes the sspp hw driver object.
+ * Should be called once before accessing every pipe.
+ * @idx: Pipe index for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * @catalog : Pointer to mdss catalog data
+ */
+struct sde_hw_pipe *sde_hw_sspp_init(enum sde_sspp idx,
+ void __iomem *addr,
+ struct sde_mdss_cfg *catalog);
+
+/**
+ * sde_hw_sspp_destroy(): Destroys SSPP driver context
+ * should be called during Hw pipe cleanup.
+ * @ctx: Pointer to SSPP driver context returned by sde_hw_sspp_init
+ */
+void sde_hw_sspp_destroy(struct sde_hw_pipe *ctx);
+
+#endif /*_SDE_HW_SSPP_H */
+
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_top.c b/drivers/gpu/drm/msm/sde/sde_hw_top.c
new file mode 100644
index 000000000000..1a5d469e6e7e
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_top.c
@@ -0,0 +1,268 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "sde_hwio.h"
+#include "sde_hw_catalog.h"
+#include "sde_hw_top.h"
+
+#define SSPP_SPARE 0x28
+
+#define FLD_SPLIT_DISPLAY_CMD BIT(1)
+#define FLD_SMART_PANEL_FREE_RUN BIT(2)
+#define FLD_INTF_1_SW_TRG_MUX BIT(4)
+#define FLD_INTF_2_SW_TRG_MUX BIT(8)
+#define FLD_TE_LINE_INTER_WATERLEVEL_MASK 0xFFFF
+
+#define DANGER_STATUS 0x360
+#define SAFE_STATUS 0x364
+
+#define TE_LINE_INTERVAL 0x3F4
+
+#define TRAFFIC_SHAPER_EN BIT(31)
+#define TRAFFIC_SHAPER_RD_CLIENT(num) (0x030 + (num * 4))
+#define TRAFFIC_SHAPER_WR_CLIENT(num) (0x060 + (num * 4))
+#define TRAFFIC_SHAPER_FIXPOINT_FACTOR 4
+
+static void sde_hw_setup_split_pipe(struct sde_hw_mdp *mdp,
+ struct split_pipe_cfg *cfg)
+{
+ struct sde_hw_blk_reg_map *c = &mdp->hw;
+ u32 upper_pipe = 0;
+ u32 lower_pipe = 0;
+
+ if (!mdp || !cfg)
+ return;
+
+ if (cfg->en) {
+ if (cfg->mode == INTF_MODE_CMD) {
+ lower_pipe = FLD_SPLIT_DISPLAY_CMD;
+ /* interface controlling sw trigger */
+ if (cfg->intf == INTF_2)
+ lower_pipe |= FLD_INTF_1_SW_TRG_MUX;
+ else
+ lower_pipe |= FLD_INTF_2_SW_TRG_MUX;
+
+ /* free run */
+ if (cfg->pp_split_slave != INTF_MAX)
+ lower_pipe = FLD_SMART_PANEL_FREE_RUN;
+
+ upper_pipe = lower_pipe;
+ } else {
+ if (cfg->intf == INTF_2) {
+ lower_pipe = FLD_INTF_1_SW_TRG_MUX;
+ upper_pipe = FLD_INTF_2_SW_TRG_MUX;
+ } else {
+ lower_pipe = FLD_INTF_2_SW_TRG_MUX;
+ upper_pipe = FLD_INTF_1_SW_TRG_MUX;
+ }
+ }
+ }
+
+ SDE_REG_WRITE(c, SSPP_SPARE, cfg->split_flush_en ? 0x1 : 0x0);
+ SDE_REG_WRITE(c, SPLIT_DISPLAY_LOWER_PIPE_CTRL, lower_pipe);
+ SDE_REG_WRITE(c, SPLIT_DISPLAY_UPPER_PIPE_CTRL, upper_pipe);
+ SDE_REG_WRITE(c, SPLIT_DISPLAY_EN, cfg->en & 0x1);
+}
+
+static void sde_hw_setup_pp_split(struct sde_hw_mdp *mdp,
+ struct split_pipe_cfg *cfg)
+{
+ u32 ppb_config = 0x0;
+ u32 ppb_control = 0x0;
+
+ if (!mdp || !cfg)
+ return;
+
+ if (cfg->en && cfg->pp_split_slave != INTF_MAX) {
+ ppb_config |= (cfg->pp_split_slave - INTF_0 + 1) << 20;
+ ppb_config |= BIT(16); /* split enable */
+ ppb_control = BIT(5); /* horz split*/
+ }
+ if (cfg->pp_split_index) {
+ SDE_REG_WRITE(&mdp->hw, PPB0_CONFIG, 0x0);
+ SDE_REG_WRITE(&mdp->hw, PPB0_CNTL, 0x0);
+ SDE_REG_WRITE(&mdp->hw, PPB1_CONFIG, ppb_config);
+ SDE_REG_WRITE(&mdp->hw, PPB1_CNTL, ppb_control);
+ } else {
+ SDE_REG_WRITE(&mdp->hw, PPB0_CONFIG, ppb_config);
+ SDE_REG_WRITE(&mdp->hw, PPB0_CNTL, ppb_control);
+ SDE_REG_WRITE(&mdp->hw, PPB1_CONFIG, 0x0);
+ SDE_REG_WRITE(&mdp->hw, PPB1_CNTL, 0x0);
+ }
+}
+
+static void sde_hw_setup_cdm_output(struct sde_hw_mdp *mdp,
+ struct cdm_output_cfg *cfg)
+{
+ struct sde_hw_blk_reg_map *c = &mdp->hw;
+ u32 out_ctl = 0;
+
+ if (cfg->wb_en)
+ out_ctl |= BIT(24);
+ else if (cfg->intf_en)
+ out_ctl |= BIT(19);
+
+ SDE_REG_WRITE(c, MDP_OUT_CTL_0, out_ctl);
+}
+
+static bool sde_hw_setup_clk_force_ctrl(struct sde_hw_mdp *mdp,
+ enum sde_clk_ctrl_type clk_ctrl, bool enable)
+{
+ struct sde_hw_blk_reg_map *c = &mdp->hw;
+ u32 reg_off, bit_off;
+ u32 reg_val, new_val;
+ bool clk_forced_on;
+
+ if (clk_ctrl <= SDE_CLK_CTRL_NONE || clk_ctrl >= SDE_CLK_CTRL_MAX)
+ return false;
+
+ reg_off = mdp->cap->clk_ctrls[clk_ctrl].reg_off;
+ bit_off = mdp->cap->clk_ctrls[clk_ctrl].bit_off;
+
+ reg_val = SDE_REG_READ(c, reg_off);
+
+ if (enable)
+ new_val = reg_val | BIT(bit_off);
+ else
+ new_val = reg_val & ~BIT(bit_off);
+
+ SDE_REG_WRITE(c, reg_off, new_val);
+
+ clk_forced_on = !(reg_val & BIT(bit_off));
+
+ return clk_forced_on;
+}
+
+
+static void sde_hw_get_danger_status(struct sde_hw_mdp *mdp,
+ struct sde_danger_safe_status *status)
+{
+ struct sde_hw_blk_reg_map *c = &mdp->hw;
+ u32 value;
+
+ value = SDE_REG_READ(c, DANGER_STATUS);
+ status->mdp = (value >> 0) & 0x3;
+ status->sspp[SSPP_VIG0] = (value >> 4) & 0x3;
+ status->sspp[SSPP_VIG1] = (value >> 6) & 0x3;
+ status->sspp[SSPP_VIG2] = (value >> 8) & 0x3;
+ status->sspp[SSPP_VIG3] = (value >> 10) & 0x3;
+ status->sspp[SSPP_RGB0] = (value >> 12) & 0x3;
+ status->sspp[SSPP_RGB1] = (value >> 14) & 0x3;
+ status->sspp[SSPP_RGB2] = (value >> 16) & 0x3;
+ status->sspp[SSPP_RGB3] = (value >> 18) & 0x3;
+ status->sspp[SSPP_DMA0] = (value >> 20) & 0x3;
+ status->sspp[SSPP_DMA1] = (value >> 22) & 0x3;
+ status->sspp[SSPP_DMA2] = (value >> 28) & 0x3;
+ status->sspp[SSPP_DMA3] = (value >> 30) & 0x3;
+ status->sspp[SSPP_CURSOR0] = (value >> 24) & 0x3;
+ status->sspp[SSPP_CURSOR1] = (value >> 26) & 0x3;
+ status->wb[WB_0] = 0;
+ status->wb[WB_1] = 0;
+ status->wb[WB_2] = (value >> 2) & 0x3;
+ status->wb[WB_3] = 0;
+}
+
+static void sde_hw_get_safe_status(struct sde_hw_mdp *mdp,
+ struct sde_danger_safe_status *status)
+{
+ struct sde_hw_blk_reg_map *c = &mdp->hw;
+ u32 value;
+
+ value = SDE_REG_READ(c, SAFE_STATUS);
+ status->mdp = (value >> 0) & 0x1;
+ status->sspp[SSPP_VIG0] = (value >> 4) & 0x1;
+ status->sspp[SSPP_VIG1] = (value >> 6) & 0x1;
+ status->sspp[SSPP_VIG2] = (value >> 8) & 0x1;
+ status->sspp[SSPP_VIG3] = (value >> 10) & 0x1;
+ status->sspp[SSPP_RGB0] = (value >> 12) & 0x1;
+ status->sspp[SSPP_RGB1] = (value >> 14) & 0x1;
+ status->sspp[SSPP_RGB2] = (value >> 16) & 0x1;
+ status->sspp[SSPP_RGB3] = (value >> 18) & 0x1;
+ status->sspp[SSPP_DMA0] = (value >> 20) & 0x1;
+ status->sspp[SSPP_DMA1] = (value >> 22) & 0x1;
+ status->sspp[SSPP_DMA2] = (value >> 28) & 0x1;
+ status->sspp[SSPP_DMA3] = (value >> 30) & 0x1;
+ status->sspp[SSPP_CURSOR0] = (value >> 24) & 0x1;
+ status->sspp[SSPP_CURSOR1] = (value >> 26) & 0x1;
+ status->wb[WB_0] = 0;
+ status->wb[WB_1] = 0;
+ status->wb[WB_2] = (value >> 2) & 0x1;
+ status->wb[WB_3] = 0;
+}
+
+static void _setup_mdp_ops(struct sde_hw_mdp_ops *ops,
+ unsigned long cap)
+{
+ ops->setup_split_pipe = sde_hw_setup_split_pipe;
+ ops->setup_pp_split = sde_hw_setup_pp_split;
+ ops->setup_cdm_output = sde_hw_setup_cdm_output;
+ ops->setup_clk_force_ctrl = sde_hw_setup_clk_force_ctrl;
+ ops->get_danger_status = sde_hw_get_danger_status;
+ ops->get_safe_status = sde_hw_get_safe_status;
+}
+
+static const struct sde_mdp_cfg *_top_offset(enum sde_mdp mdp,
+ const struct sde_mdss_cfg *m,
+ void __iomem *addr,
+ struct sde_hw_blk_reg_map *b)
+{
+ int i;
+
+ for (i = 0; i < m->mdp_count; i++) {
+ if (mdp == m->mdp[i].id) {
+ b->base_off = addr;
+ b->blk_off = m->mdp[i].base;
+ b->hwversion = m->hwversion;
+ b->log_mask = SDE_DBG_MASK_TOP;
+ return &m->mdp[i];
+ }
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+struct sde_hw_mdp *sde_hw_mdptop_init(enum sde_mdp idx,
+ void __iomem *addr,
+ const struct sde_mdss_cfg *m)
+{
+ struct sde_hw_mdp *mdp;
+ const struct sde_mdp_cfg *cfg;
+
+ mdp = kzalloc(sizeof(*mdp), GFP_KERNEL);
+ if (!mdp)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _top_offset(idx, m, addr, &mdp->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(mdp);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /*
+ * Assign ops
+ */
+ mdp->idx = idx;
+ mdp->cap = cfg;
+ _setup_mdp_ops(&mdp->ops, mdp->cap->features);
+
+ /*
+ * Perform any default initialization for the intf
+ */
+
+ return mdp;
+}
+
+void sde_hw_mdp_destroy(struct sde_hw_mdp *mdp)
+{
+ kfree(mdp);
+}
+
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_top.h b/drivers/gpu/drm/msm/sde/sde_hw_top.h
new file mode 100644
index 000000000000..780d051e7408
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_top.h
@@ -0,0 +1,170 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HW_TOP_H
+#define _SDE_HW_TOP_H
+
+#include "sde_hw_catalog.h"
+#include "sde_hw_mdss.h"
+#include "sde_hw_util.h"
+
+struct sde_hw_mdp;
+
+/**
+ * struct traffic_shaper_cfg: traffic shaper configuration
+ * @en : enable/disable traffic shaper
+ * @rd_client : true if read client; false if write client
+ * @client_id : client identifier
+ * @bpc_denom : denominator of byte per clk
+ * @bpc_numer : numerator of byte per clk
+ */
+struct traffic_shaper_cfg {
+ bool en;
+ bool rd_client;
+ u32 client_id;
+ u32 bpc_denom;
+ u64 bpc_numer;
+};
+
+/**
+ * struct split_pipe_cfg - pipe configuration for dual display panels
+ * @en : Enable/disable dual pipe confguration
+ * @mode : Panel interface mode
+ * @intf : Interface id for main control path
+ * @pp_split_slave: Slave interface for ping pong split, INTF_MAX to disable
+ * @pp_split_idx: Ping pong index for ping pong split
+ * @split_flush_en: Allows both the paths to be flushed when master path is
+ * flushed
+ */
+struct split_pipe_cfg {
+ bool en;
+ enum sde_intf_mode mode;
+ enum sde_intf intf;
+ enum sde_intf pp_split_slave;
+ u32 pp_split_index;
+ bool split_flush_en;
+};
+
+/**
+ * struct cdm_output_cfg: output configuration for cdm
+ * @wb_en : enable/disable writeback output
+ * @intf_en : enable/disable interface output
+ */
+struct cdm_output_cfg {
+ bool wb_en;
+ bool intf_en;
+};
+
+/**
+ * struct sde_danger_safe_status: danger and safe status signals
+ * @mdp: top level status
+ * @sspp: source pipe status
+ * @wb: writebck output status
+ */
+struct sde_danger_safe_status {
+ u8 mdp;
+ u8 sspp[SSPP_MAX];
+ u8 wb[WB_MAX];
+};
+
+/**
+ * struct sde_hw_mdp_ops - interface to the MDP TOP Hw driver functions
+ * Assumption is these functions will be called after clocks are enabled.
+ * @setup_split_pipe : Programs the pipe control registers
+ * @setup_pp_split : Programs the pp split control registers
+ * @setup_cdm_output : programs cdm control
+ * @setup_traffic_shaper : programs traffic shaper control
+ */
+struct sde_hw_mdp_ops {
+ /** setup_split_pipe() : Regsiters are not double buffered, thisk
+ * function should be called before timing control enable
+ * @mdp : mdp top context driver
+ * @cfg : upper and lower part of pipe configuration
+ */
+ void (*setup_split_pipe)(struct sde_hw_mdp *mdp,
+ struct split_pipe_cfg *p);
+
+ /** setup_pp_split() : Configure pp split related registers
+ * @mdp : mdp top context driver
+ * @cfg : upper and lower part of pipe configuration
+ */
+ void (*setup_pp_split)(struct sde_hw_mdp *mdp,
+ struct split_pipe_cfg *cfg);
+
+ /**
+ * setup_cdm_output() : Setup selection control of the cdm data path
+ * @mdp : mdp top context driver
+ * @cfg : cdm output configuration
+ */
+ void (*setup_cdm_output)(struct sde_hw_mdp *mdp,
+ struct cdm_output_cfg *cfg);
+
+ /**
+ * setup_traffic_shaper() : Setup traffic shaper control
+ * @mdp : mdp top context driver
+ * @cfg : traffic shaper configuration
+ */
+ void (*setup_traffic_shaper)(struct sde_hw_mdp *mdp,
+ struct traffic_shaper_cfg *cfg);
+
+ /**
+ * setup_clk_force_ctrl - set clock force control
+ * @mdp: mdp top context driver
+ * @clk_ctrl: clock to be controlled
+ * @enable: force on enable
+ * @return: if the clock is forced-on by this function
+ */
+ bool (*setup_clk_force_ctrl)(struct sde_hw_mdp *mdp,
+ enum sde_clk_ctrl_type clk_ctrl, bool enable);
+
+ /**
+ * get_danger_status - get danger status
+ * @mdp: mdp top context driver
+ * @status: Pointer to danger safe status
+ */
+ void (*get_danger_status)(struct sde_hw_mdp *mdp,
+ struct sde_danger_safe_status *status);
+
+ /**
+ * get_safe_status - get safe status
+ * @mdp: mdp top context driver
+ * @status: Pointer to danger safe status
+ */
+ void (*get_safe_status)(struct sde_hw_mdp *mdp,
+ struct sde_danger_safe_status *status);
+};
+
+struct sde_hw_mdp {
+ /* base */
+ struct sde_hw_blk_reg_map hw;
+
+ /* intf */
+ enum sde_mdp idx;
+ const struct sde_mdp_cfg *cap;
+
+ /* ops */
+ struct sde_hw_mdp_ops ops;
+};
+
+/**
+ * sde_hw_intf_init - initializes the intf driver for the passed interface idx
+ * @idx: Interface index for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * @m: Pointer to mdss catalog data
+ */
+struct sde_hw_mdp *sde_hw_mdptop_init(enum sde_mdp idx,
+ void __iomem *addr,
+ const struct sde_mdss_cfg *m);
+
+void sde_hw_mdp_destroy(struct sde_hw_mdp *mdp);
+
+#endif /*_SDE_HW_TOP_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_util.c b/drivers/gpu/drm/msm/sde/sde_hw_util.c
new file mode 100644
index 000000000000..6f52f31a7569
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_util.c
@@ -0,0 +1,92 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+#include "msm_drv.h"
+#include "sde_kms.h"
+#include "sde_hw_mdss.h"
+#include "sde_hw_util.h"
+
+/* using a file static variables for debugfs access */
+static u32 sde_hw_util_log_mask = SDE_DBG_MASK_NONE;
+
+void sde_reg_write(struct sde_hw_blk_reg_map *c,
+ u32 reg_off,
+ u32 val,
+ const char *name)
+{
+ /* don't need to mutex protect this */
+ if (c->log_mask & sde_hw_util_log_mask)
+ SDE_DEBUG_DRIVER("[%s:0x%X] <= 0x%X\n",
+ name, c->blk_off + reg_off, val);
+ writel_relaxed(val, c->base_off + c->blk_off + reg_off);
+}
+
+int sde_reg_read(struct sde_hw_blk_reg_map *c, u32 reg_off)
+{
+ return readl_relaxed(c->base_off + c->blk_off + reg_off);
+}
+
+u32 *sde_hw_util_get_log_mask_ptr(void)
+{
+ return &sde_hw_util_log_mask;
+}
+
+void sde_hw_csc_setup(struct sde_hw_blk_reg_map *c,
+ u32 csc_reg_off,
+ struct sde_csc_cfg *data)
+{
+ static const u32 matrix_shift = 7;
+ u32 val;
+
+ /* matrix coeff - convert S15.16 to S4.9 */
+ val = ((data->csc_mv[0] >> matrix_shift) & 0x1FFF) |
+ (((data->csc_mv[1] >> matrix_shift) & 0x1FFF) << 16);
+ SDE_REG_WRITE(c, csc_reg_off, val);
+ val = ((data->csc_mv[2] >> matrix_shift) & 0x1FFF) |
+ (((data->csc_mv[3] >> matrix_shift) & 0x1FFF) << 16);
+ SDE_REG_WRITE(c, csc_reg_off + 0x4, val);
+ val = ((data->csc_mv[4] >> matrix_shift) & 0x1FFF) |
+ (((data->csc_mv[5] >> matrix_shift) & 0x1FFF) << 16);
+ SDE_REG_WRITE(c, csc_reg_off + 0x8, val);
+ val = ((data->csc_mv[6] >> matrix_shift) & 0x1FFF) |
+ (((data->csc_mv[7] >> matrix_shift) & 0x1FFF) << 16);
+ SDE_REG_WRITE(c, csc_reg_off + 0xc, val);
+ val = (data->csc_mv[8] >> matrix_shift) & 0x1FFF;
+ SDE_REG_WRITE(c, csc_reg_off + 0x10, val);
+
+ /* Pre clamp */
+ val = (data->csc_pre_lv[0] << 8) | data->csc_pre_lv[1];
+ SDE_REG_WRITE(c, csc_reg_off + 0x14, val);
+ val = (data->csc_pre_lv[2] << 8) | data->csc_pre_lv[3];
+ SDE_REG_WRITE(c, csc_reg_off + 0x18, val);
+ val = (data->csc_pre_lv[4] << 8) | data->csc_pre_lv[5];
+ SDE_REG_WRITE(c, csc_reg_off + 0x1c, val);
+
+ /* Post clamp */
+ val = (data->csc_post_lv[0] << 8) | data->csc_post_lv[1];
+ SDE_REG_WRITE(c, csc_reg_off + 0x20, val);
+ val = (data->csc_post_lv[2] << 8) | data->csc_post_lv[3];
+ SDE_REG_WRITE(c, csc_reg_off + 0x24, val);
+ val = (data->csc_post_lv[4] << 8) | data->csc_post_lv[5];
+ SDE_REG_WRITE(c, csc_reg_off + 0x28, val);
+
+ /* Pre-Bias */
+ SDE_REG_WRITE(c, csc_reg_off + 0x2c, data->csc_pre_bv[0]);
+ SDE_REG_WRITE(c, csc_reg_off + 0x30, data->csc_pre_bv[1]);
+ SDE_REG_WRITE(c, csc_reg_off + 0x34, data->csc_pre_bv[2]);
+
+ /* Post-Bias */
+ SDE_REG_WRITE(c, csc_reg_off + 0x38, data->csc_post_bv[0]);
+ SDE_REG_WRITE(c, csc_reg_off + 0x3c, data->csc_post_bv[1]);
+ SDE_REG_WRITE(c, csc_reg_off + 0x40, data->csc_post_bv[2]);
+}
+
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_util.h b/drivers/gpu/drm/msm/sde/sde_hw_util.h
new file mode 100644
index 000000000000..a4d8be9de907
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_util.h
@@ -0,0 +1,55 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HW_UTIL_H
+#define _SDE_HW_UTIL_H
+
+#include <linux/io.h>
+#include <linux/slab.h>
+#include "sde_hw_mdss.h"
+
+/*
+ * This is the common struct maintained by each sub block
+ * for mapping the register offsets in this block to the
+ * absoulute IO address
+ * @base_off: mdp register mapped offset
+ * @blk_off: pipe offset relative to mdss offset
+ * @length length of register block offset
+ * @hwversion mdss hw version number
+ */
+struct sde_hw_blk_reg_map {
+ void __iomem *base_off;
+ u32 blk_off;
+ u32 length;
+ u32 hwversion;
+ u32 log_mask;
+};
+
+u32 *sde_hw_util_get_log_mask_ptr(void);
+
+void sde_reg_write(struct sde_hw_blk_reg_map *c,
+ u32 reg_off,
+ u32 val,
+ const char *name);
+int sde_reg_read(struct sde_hw_blk_reg_map *c, u32 reg_off);
+
+#define SDE_REG_WRITE(c, off, val) sde_reg_write(c, off, val, #off)
+#define SDE_REG_READ(c, off) sde_reg_read(c, off)
+
+void *sde_hw_util_get_dir(void);
+
+void sde_hw_csc_setup(struct sde_hw_blk_reg_map *c,
+ u32 csc_reg_off,
+ struct sde_csc_cfg *data);
+
+#endif /* _SDE_HW_UTIL_H */
+
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_vbif.c b/drivers/gpu/drm/msm/sde/sde_hw_vbif.c
new file mode 100644
index 000000000000..76473fa879c5
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_vbif.c
@@ -0,0 +1,165 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "sde_hwio.h"
+#include "sde_hw_catalog.h"
+#include "sde_hw_vbif.h"
+
+#define VBIF_VERSION 0x0000
+#define VBIF_CLK_FORCE_CTRL0 0x0008
+#define VBIF_CLK_FORCE_CTRL1 0x000C
+#define VBIF_QOS_REMAP_00 0x0020
+#define VBIF_QOS_REMAP_01 0x0024
+#define VBIF_QOS_REMAP_10 0x0028
+#define VBIF_QOS_REMAP_11 0x002C
+#define VBIF_WRITE_GATHTER_EN 0x00AC
+#define VBIF_IN_RD_LIM_CONF0 0x00B0
+#define VBIF_IN_RD_LIM_CONF1 0x00B4
+#define VBIF_IN_RD_LIM_CONF2 0x00B8
+#define VBIF_IN_WR_LIM_CONF0 0x00C0
+#define VBIF_IN_WR_LIM_CONF1 0x00C4
+#define VBIF_IN_WR_LIM_CONF2 0x00C8
+#define VBIF_OUT_RD_LIM_CONF0 0x00D0
+#define VBIF_OUT_WR_LIM_CONF0 0x00D4
+#define VBIF_XIN_HALT_CTRL0 0x0200
+#define VBIF_XIN_HALT_CTRL1 0x0204
+
+static void sde_hw_set_limit_conf(struct sde_hw_vbif *vbif,
+ u32 xin_id, bool rd, u32 limit)
+{
+ struct sde_hw_blk_reg_map *c = &vbif->hw;
+ u32 reg_val;
+ u32 reg_off;
+ u32 bit_off;
+
+ if (rd)
+ reg_off = VBIF_IN_RD_LIM_CONF0;
+ else
+ reg_off = VBIF_IN_WR_LIM_CONF0;
+
+ reg_off += (xin_id / 4) * 4;
+ bit_off = (xin_id % 4) * 8;
+ reg_val = SDE_REG_READ(c, reg_off);
+ reg_val &= ~(0xFF << bit_off);
+ reg_val |= (limit) << bit_off;
+ SDE_REG_WRITE(c, reg_off, reg_val);
+}
+
+static u32 sde_hw_get_limit_conf(struct sde_hw_vbif *vbif,
+ u32 xin_id, bool rd)
+{
+ struct sde_hw_blk_reg_map *c = &vbif->hw;
+ u32 reg_val;
+ u32 reg_off;
+ u32 bit_off;
+ u32 limit;
+
+ if (rd)
+ reg_off = VBIF_IN_RD_LIM_CONF0;
+ else
+ reg_off = VBIF_IN_WR_LIM_CONF0;
+
+ reg_off += (xin_id / 4) * 4;
+ bit_off = (xin_id % 4) * 8;
+ reg_val = SDE_REG_READ(c, reg_off);
+ limit = (reg_val >> bit_off) & 0xFF;
+
+ return limit;
+}
+
+static void sde_hw_set_halt_ctrl(struct sde_hw_vbif *vbif,
+ u32 xin_id, bool enable)
+{
+ struct sde_hw_blk_reg_map *c = &vbif->hw;
+ u32 reg_val;
+
+ reg_val = SDE_REG_READ(c, VBIF_XIN_HALT_CTRL0);
+
+ if (enable)
+ reg_val |= BIT(xin_id);
+ else
+ reg_val &= ~BIT(xin_id);
+
+ SDE_REG_WRITE(c, VBIF_XIN_HALT_CTRL0, reg_val);
+}
+
+static bool sde_hw_get_halt_ctrl(struct sde_hw_vbif *vbif,
+ u32 xin_id)
+{
+ struct sde_hw_blk_reg_map *c = &vbif->hw;
+ u32 reg_val;
+
+ reg_val = SDE_REG_READ(c, VBIF_XIN_HALT_CTRL1);
+
+ return (reg_val & BIT(xin_id)) ? true : false;
+}
+
+static void _setup_vbif_ops(struct sde_hw_vbif_ops *ops,
+ unsigned long cap)
+{
+ ops->set_limit_conf = sde_hw_set_limit_conf;
+ ops->get_limit_conf = sde_hw_get_limit_conf;
+ ops->set_halt_ctrl = sde_hw_set_halt_ctrl;
+ ops->get_halt_ctrl = sde_hw_get_halt_ctrl;
+}
+
+static const struct sde_vbif_cfg *_top_offset(enum sde_vbif vbif,
+ const struct sde_mdss_cfg *m,
+ void __iomem *addr,
+ struct sde_hw_blk_reg_map *b)
+{
+ int i;
+
+ for (i = 0; i < m->vbif_count; i++) {
+ if (vbif == m->vbif[i].id) {
+ b->base_off = addr;
+ b->blk_off = m->vbif[i].base;
+ b->hwversion = m->hwversion;
+ b->log_mask = SDE_DBG_MASK_VBIF;
+ return &m->vbif[i];
+ }
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+struct sde_hw_vbif *sde_hw_vbif_init(enum sde_vbif idx,
+ void __iomem *addr,
+ const struct sde_mdss_cfg *m)
+{
+ struct sde_hw_vbif *c;
+ const struct sde_vbif_cfg *cfg;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _top_offset(idx, m, addr, &c->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(c);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /*
+ * Assign ops
+ */
+ c->idx = idx;
+ c->cap = cfg;
+ _setup_vbif_ops(&c->ops, c->cap->features);
+
+ return c;
+}
+
+void sde_hw_vbif_destroy(struct sde_hw_vbif *vbif)
+{
+ kfree(vbif);
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_vbif.h b/drivers/gpu/drm/msm/sde/sde_hw_vbif.h
new file mode 100644
index 000000000000..de7fac0ed8f2
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_vbif.h
@@ -0,0 +1,90 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HW_VBIF_H
+#define _SDE_HW_VBIF_H
+
+#include "sde_hw_catalog.h"
+#include "sde_hw_mdss.h"
+#include "sde_hw_util.h"
+
+struct sde_hw_vbif;
+
+/**
+ * struct sde_hw_vbif_ops : Interface to the VBIF hardware driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct sde_hw_vbif_ops {
+ /**
+ * set_limit_conf - set transaction limit config
+ * @vbif: vbif context driver
+ * @xin_id: client interface identifier
+ * @rd: true for read limit; false for write limit
+ * @limit: outstanding transaction limit
+ */
+ void (*set_limit_conf)(struct sde_hw_vbif *vbif,
+ u32 xin_id, bool rd, u32 limit);
+
+ /**
+ * get_limit_conf - get transaction limit config
+ * @vbif: vbif context driver
+ * @xin_id: client interface identifier
+ * @rd: true for read limit; false for write limit
+ * @return: outstanding transaction limit
+ */
+ u32 (*get_limit_conf)(struct sde_hw_vbif *vbif,
+ u32 xin_id, bool rd);
+
+ /**
+ * set_halt_ctrl - set halt control
+ * @vbif: vbif context driver
+ * @xin_id: client interface identifier
+ * @enable: halt control enable
+ */
+ void (*set_halt_ctrl)(struct sde_hw_vbif *vbif,
+ u32 xin_id, bool enable);
+
+ /**
+ * get_halt_ctrl - get halt control
+ * @vbif: vbif context driver
+ * @xin_id: client interface identifier
+ * @return: halt control enable
+ */
+ bool (*get_halt_ctrl)(struct sde_hw_vbif *vbif,
+ u32 xin_id);
+};
+
+struct sde_hw_vbif {
+ /* base */
+ struct sde_hw_blk_reg_map hw;
+
+ /* vbif */
+ enum sde_vbif idx;
+ const struct sde_vbif_cfg *cap;
+
+ /* ops */
+ struct sde_hw_vbif_ops ops;
+};
+
+/**
+ * sde_hw_vbif_init - initializes the vbif driver for the passed interface idx
+ * @idx: Interface index for which driver object is required
+ * @addr: Mapped register io address of MDSS
+ * @m: Pointer to mdss catalog data
+ */
+struct sde_hw_vbif *sde_hw_vbif_init(enum sde_vbif idx,
+ void __iomem *addr,
+ const struct sde_mdss_cfg *m);
+
+void sde_hw_vbif_destroy(struct sde_hw_vbif *vbif);
+
+#endif /*_SDE_HW_VBIF_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_wb.c b/drivers/gpu/drm/msm/sde/sde_hw_wb.c
new file mode 100644
index 000000000000..426e9991a6b5
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_wb.c
@@ -0,0 +1,224 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "sde_hw_mdss.h"
+#include "sde_hwio.h"
+#include "sde_hw_catalog.h"
+#include "sde_hw_wb.h"
+#include "sde_formats.h"
+
+#define WB_DST_FORMAT 0x000
+#define WB_DST_OP_MODE 0x004
+#define WB_DST_PACK_PATTERN 0x008
+#define WB_DST0_ADDR 0x00C
+#define WB_DST1_ADDR 0x010
+#define WB_DST2_ADDR 0x014
+#define WB_DST3_ADDR 0x018
+#define WB_DST_YSTRIDE0 0x01C
+#define WB_DST_YSTRIDE1 0x020
+#define WB_DST_YSTRIDE1 0x020
+#define WB_DST_DITHER_BITDEPTH 0x024
+#define WB_DST_MATRIX_ROW0 0x030
+#define WB_DST_MATRIX_ROW1 0x034
+#define WB_DST_MATRIX_ROW2 0x038
+#define WB_DST_MATRIX_ROW3 0x03C
+#define WB_DST_WRITE_CONFIG 0x048
+#define WB_ROTATION_DNSCALER 0x050
+#define WB_ROTATOR_PIPE_DOWNSCALER 0x054
+#define WB_N16_INIT_PHASE_X_C03 0x060
+#define WB_N16_INIT_PHASE_X_C12 0x064
+#define WB_N16_INIT_PHASE_Y_C03 0x068
+#define WB_N16_INIT_PHASE_Y_C12 0x06C
+#define WB_OUT_SIZE 0x074
+#define WB_ALPHA_X_VALUE 0x078
+#define WB_CSC_BASE 0x260
+#define WB_DST_ADDR_SW_STATUS 0x2B0
+#define WB_CDP_CTRL 0x2B4
+#define WB_OUT_IMAGE_SIZE 0x2C0
+#define WB_OUT_XY 0x2C4
+
+static struct sde_wb_cfg *_wb_offset(enum sde_wb wb,
+ struct sde_mdss_cfg *m,
+ void __iomem *addr,
+ struct sde_hw_blk_reg_map *b)
+{
+ int i;
+
+ for (i = 0; i < m->wb_count; i++) {
+ if (wb == m->wb[i].id) {
+ b->base_off = addr;
+ b->blk_off = m->wb[i].base;
+ b->hwversion = m->hwversion;
+ b->log_mask = SDE_DBG_MASK_WB;
+ return &m->wb[i];
+ }
+ }
+ return ERR_PTR(-EINVAL);
+}
+
+static void sde_hw_wb_setup_outaddress(struct sde_hw_wb *ctx,
+ struct sde_hw_wb_cfg *data)
+{
+ struct sde_hw_blk_reg_map *c = &ctx->hw;
+
+ SDE_REG_WRITE(c, WB_DST0_ADDR, data->dest.plane_addr[0]);
+ SDE_REG_WRITE(c, WB_DST1_ADDR, data->dest.plane_addr[1]);
+ SDE_REG_WRITE(c, WB_DST2_ADDR, data->dest.plane_addr[2]);
+ SDE_REG_WRITE(c, WB_DST3_ADDR, data->dest.plane_addr[3]);
+}
+
+static void sde_hw_wb_setup_format(struct sde_hw_wb *ctx,
+ struct sde_hw_wb_cfg *data)
+{
+ struct sde_hw_blk_reg_map *c = &ctx->hw;
+ const struct sde_format *fmt = data->dest.format;
+ u32 dst_format, pattern, ystride0, ystride1, outsize, chroma_samp;
+ u32 write_config = 0;
+ u32 opmode = 0;
+ u32 dst_addr_sw = 0;
+ u32 cdp_settings = 0x0;
+
+ chroma_samp = fmt->chroma_sample;
+
+ dst_format = (chroma_samp << 23) |
+ (fmt->fetch_planes << 19) |
+ (fmt->bits[C3_ALPHA] << 6) |
+ (fmt->bits[C2_R_Cr] << 4) |
+ (fmt->bits[C1_B_Cb] << 2) |
+ (fmt->bits[C0_G_Y] << 0);
+
+ if (fmt->bits[C3_ALPHA] || fmt->alpha_enable) {
+ dst_format |= BIT(8); /* DSTC3_EN */
+ if (!fmt->alpha_enable ||
+ !(ctx->caps->features & BIT(SDE_WB_PIPE_ALPHA)))
+ dst_format |= BIT(14); /* DST_ALPHA_X */
+ }
+
+ if (SDE_FORMAT_IS_YUV(fmt) &&
+ (ctx->caps->features & BIT(SDE_WB_YUV_CONFIG)))
+ dst_format |= BIT(15);
+
+ if (SDE_FORMAT_IS_DX(fmt))
+ dst_format |= BIT(21);
+
+ pattern = (fmt->element[3] << 24) |
+ (fmt->element[2] << 16) |
+ (fmt->element[1] << 8) |
+ (fmt->element[0] << 0);
+
+ dst_format |= (fmt->unpack_align_msb << 18) |
+ (fmt->unpack_tight << 17) |
+ ((fmt->unpack_count - 1) << 12) |
+ ((fmt->bpp - 1) << 9);
+
+ ystride0 = data->dest.plane_pitch[0] |
+ (data->dest.plane_pitch[1] << 16);
+ ystride1 = data->dest.plane_pitch[2] |
+ (data->dest.plane_pitch[3] << 16);
+
+ if (data->roi.h && data->roi.w)
+ outsize = (data->roi.h << 16) | data->roi.w;
+ else
+ outsize = (data->dest.height << 16) | data->dest.width;
+
+ if (SDE_FORMAT_IS_UBWC(fmt)) {
+ opmode |= BIT(0);
+ dst_format |= BIT(31);
+ if (ctx->highest_bank_bit)
+ write_config |= (ctx->highest_bank_bit << 8);
+ if (fmt->base.pixel_format == DRM_FORMAT_RGB565)
+ write_config |= 0x8;
+ }
+
+ if (data->is_secure)
+ dst_addr_sw |= BIT(0);
+
+ SDE_REG_WRITE(c, WB_ALPHA_X_VALUE, 0xFF);
+ SDE_REG_WRITE(c, WB_DST_FORMAT, dst_format);
+ SDE_REG_WRITE(c, WB_DST_OP_MODE, opmode);
+ SDE_REG_WRITE(c, WB_DST_PACK_PATTERN, pattern);
+ SDE_REG_WRITE(c, WB_DST_YSTRIDE0, ystride0);
+ SDE_REG_WRITE(c, WB_DST_YSTRIDE1, ystride1);
+ SDE_REG_WRITE(c, WB_OUT_SIZE, outsize);
+ SDE_REG_WRITE(c, WB_DST_WRITE_CONFIG, write_config);
+ SDE_REG_WRITE(c, WB_DST_ADDR_SW_STATUS, dst_addr_sw);
+
+ /* Enable CDP */
+ cdp_settings = BIT(0);
+
+ if (!SDE_FORMAT_IS_LINEAR(fmt))
+ cdp_settings |= BIT(1);
+
+ /* Enable 64 transactions if line mode*/
+ if (data->intf_mode == INTF_MODE_WB_LINE)
+ cdp_settings |= BIT(3);
+
+ SDE_REG_WRITE(c, WB_CDP_CTRL, cdp_settings);
+}
+
+static void sde_hw_wb_roi(struct sde_hw_wb *ctx, struct sde_hw_wb_cfg *wb)
+{
+ struct sde_hw_blk_reg_map *c = &ctx->hw;
+ u32 image_size, out_size, out_xy;
+
+ image_size = (wb->dest.height << 16) | wb->dest.width;
+ out_xy = (wb->roi.y << 16) | wb->roi.x;
+ out_size = (wb->roi.h << 16) | wb->roi.w;
+
+ SDE_REG_WRITE(c, WB_OUT_IMAGE_SIZE, image_size);
+ SDE_REG_WRITE(c, WB_OUT_XY, out_xy);
+ SDE_REG_WRITE(c, WB_OUT_SIZE, out_size);
+}
+
+static void _setup_wb_ops(struct sde_hw_wb_ops *ops,
+ unsigned long features)
+{
+ ops->setup_outaddress = sde_hw_wb_setup_outaddress;
+ ops->setup_outformat = sde_hw_wb_setup_format;
+
+ if (test_bit(SDE_WB_XY_ROI_OFFSET, &features))
+ ops->setup_roi = sde_hw_wb_roi;
+}
+
+struct sde_hw_wb *sde_hw_wb_init(enum sde_wb idx,
+ void __iomem *addr,
+ struct sde_mdss_cfg *m,
+ struct sde_hw_mdp *hw_mdp)
+{
+ struct sde_hw_wb *c;
+ struct sde_wb_cfg *cfg;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _wb_offset(idx, m, addr, &c->hw);
+ if (IS_ERR(cfg)) {
+ WARN(1, "Unable to find wb idx=%d\n", idx);
+ kfree(c);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Assign ops */
+ c->idx = idx;
+ c->caps = cfg;
+ _setup_wb_ops(&c->ops, c->caps->features);
+ c->highest_bank_bit = m->mdp[0].highest_bank_bit;
+ c->hw_mdp = hw_mdp;
+
+ return c;
+}
+
+void sde_hw_wb_destroy(struct sde_hw_wb *hw_wb)
+{
+ kfree(hw_wb);
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_wb.h b/drivers/gpu/drm/msm/sde/sde_hw_wb.h
new file mode 100644
index 000000000000..52a5ee5b06a5
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_wb.h
@@ -0,0 +1,105 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HW_WB_H
+#define _SDE_HW_WB_H
+
+#include "sde_hw_catalog.h"
+#include "sde_hw_mdss.h"
+#include "sde_hw_top.h"
+#include "sde_hw_util.h"
+
+struct sde_hw_wb;
+
+struct sde_hw_wb_cfg {
+ struct sde_hw_fmt_layout dest;
+ enum sde_intf_mode intf_mode;
+ struct traffic_shaper_cfg ts_cfg;
+ struct sde_rect roi;
+ bool is_secure;
+};
+
+/**
+ *
+ * struct sde_hw_wb_ops : Interface to the wb Hw driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct sde_hw_wb_ops {
+ void (*setup_csc_data)(struct sde_hw_wb *ctx,
+ struct sde_csc_cfg *data);
+
+ void (*setup_outaddress)(struct sde_hw_wb *ctx,
+ struct sde_hw_wb_cfg *wb);
+
+ void (*setup_outformat)(struct sde_hw_wb *ctx,
+ struct sde_hw_wb_cfg *wb);
+
+ void (*setup_rotator)(struct sde_hw_wb *ctx,
+ struct sde_hw_wb_cfg *wb);
+
+ void (*setup_dither)(struct sde_hw_wb *ctx,
+ struct sde_hw_wb_cfg *wb);
+
+ void (*setup_cdwn)(struct sde_hw_wb *ctx,
+ struct sde_hw_wb_cfg *wb);
+
+ void (*setup_trafficshaper)(struct sde_hw_wb *ctx,
+ struct sde_hw_wb_cfg *wb);
+
+ void (*setup_roi)(struct sde_hw_wb *ctx,
+ struct sde_hw_wb_cfg *wb);
+};
+
+/**
+ * struct sde_hw_wb : WB driver object
+ * @struct sde_hw_blk_reg_map *hw;
+ * @idx
+ * @wb_hw_caps
+ * @ops
+ * @highest_bank_bit: GPU highest memory bank bit used
+ * @hw_mdp: MDP top level hardware block
+ */
+struct sde_hw_wb {
+ /* base */
+ struct sde_hw_blk_reg_map hw;
+
+ /* wb path */
+ int idx;
+ const struct sde_wb_cfg *caps;
+
+ /* ops */
+ struct sde_hw_wb_ops ops;
+
+ u32 highest_bank_bit;
+
+ struct sde_hw_mdp *hw_mdp;
+};
+
+/**
+ * sde_hw_wb_init(): Initializes and return writeback hw driver object.
+ * @idx: wb_path index for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @m : pointer to mdss catalog data
+ * @hw_mdp: pointer to mdp top hw driver object
+ */
+struct sde_hw_wb *sde_hw_wb_init(enum sde_wb idx,
+ void __iomem *addr,
+ struct sde_mdss_cfg *m,
+ struct sde_hw_mdp *hw_mdp);
+
+/**
+ * sde_hw_wb_destroy(): Destroy writeback hw driver object.
+ * @hw_wb: Pointer to writeback hw driver object
+ */
+void sde_hw_wb_destroy(struct sde_hw_wb *hw_wb);
+
+#endif /*_SDE_HW_WB_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_hwio.h b/drivers/gpu/drm/msm/sde/sde_hwio.h
new file mode 100644
index 000000000000..c95bace3a004
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hwio.h
@@ -0,0 +1,59 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HWIO_H
+#define _SDE_HWIO_H
+
+#include "sde_hw_util.h"
+
+/**
+ * MDP TOP block Register and bit fields and defines
+ */
+#define DISP_INTF_SEL 0x004
+#define INTR_EN 0x010
+#define INTR_STATUS 0x014
+#define INTR_CLEAR 0x018
+#define INTR2_EN 0x008
+#define INTR2_STATUS 0x00c
+#define INTR2_CLEAR 0x02c
+#define HIST_INTR_EN 0x01c
+#define HIST_INTR_STATUS 0x020
+#define HIST_INTR_CLEAR 0x024
+#define INTF_INTR_EN 0x1C0
+#define INTF_INTR_STATUS 0x1C4
+#define INTF_INTR_CLEAR 0x1C8
+#define SPLIT_DISPLAY_EN 0x2F4
+#define SPLIT_DISPLAY_UPPER_PIPE_CTRL 0x2F8
+#define DSPP_IGC_COLOR0_RAM_LUTN 0x300
+#define DSPP_IGC_COLOR1_RAM_LUTN 0x304
+#define DSPP_IGC_COLOR2_RAM_LUTN 0x308
+#define PPB0_CNTL 0x330
+#define PPB0_CONFIG 0x334
+#define PPB1_CNTL 0x338
+#define PPB1_CONFIG 0x33C
+#define HW_EVENTS_CTL 0x37C
+#define CLK_CTRL3 0x3A8
+#define CLK_STATUS3 0x3AC
+#define CLK_CTRL4 0x3B0
+#define CLK_STATUS4 0x3B4
+#define CLK_CTRL5 0x3B8
+#define CLK_STATUS5 0x3BC
+#define CLK_CTRL7 0x3D0
+#define CLK_STATUS7 0x3D4
+#define SPLIT_DISPLAY_LOWER_PIPE_CTRL 0x3F0
+#define SPLIT_DISPLAY_TE_LINE_INTERVAL 0x3F4
+#define INTF_SW_RESET_MASK 0x3FC
+#define MDP_OUT_CTL_0 0x410
+#define MDP_VSYNC_SEL 0x414
+#define DCE_SEL 0x450
+
+#endif /*_SDE_HWIO_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_irq.c b/drivers/gpu/drm/msm/sde/sde_irq.c
new file mode 100644
index 000000000000..909d6df38260
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_irq.c
@@ -0,0 +1,166 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+#include <linux/kthread.h>
+
+#include "sde_irq.h"
+#include "sde_core_irq.h"
+
+irqreturn_t sde_irq(struct msm_kms *kms)
+{
+ struct sde_kms *sde_kms = to_sde_kms(kms);
+ u32 interrupts;
+
+ sde_kms->hw_intr->ops.get_interrupt_sources(sde_kms->hw_intr,
+ &interrupts);
+
+ /*
+ * Taking care of MDP interrupt
+ */
+ if (interrupts & IRQ_SOURCE_MDP) {
+ interrupts &= ~IRQ_SOURCE_MDP;
+ sde_core_irq(sde_kms);
+ }
+
+ /*
+ * Routing all other interrupts to external drivers
+ */
+ while (interrupts) {
+ irq_hw_number_t hwirq = fls(interrupts) - 1;
+
+ generic_handle_irq(irq_find_mapping(
+ sde_kms->irq_controller.domain, hwirq));
+ interrupts &= ~(1 << hwirq);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void sde_hw_irq_mask(struct irq_data *irqd)
+{
+ struct sde_kms *sde_kms;
+
+ if (!irqd || !irq_data_get_irq_chip_data(irqd)) {
+ SDE_ERROR("invalid parameters irqd %d\n", irqd != 0);
+ return;
+ }
+ sde_kms = irq_data_get_irq_chip_data(irqd);
+
+ smp_mb__before_atomic();
+ clear_bit(irqd->hwirq, &sde_kms->irq_controller.enabled_mask);
+ smp_mb__after_atomic();
+}
+
+static void sde_hw_irq_unmask(struct irq_data *irqd)
+{
+ struct sde_kms *sde_kms;
+
+ if (!irqd || !irq_data_get_irq_chip_data(irqd)) {
+ SDE_ERROR("invalid parameters irqd %d\n", irqd != 0);
+ return;
+ }
+ sde_kms = irq_data_get_irq_chip_data(irqd);
+
+ smp_mb__before_atomic();
+ set_bit(irqd->hwirq, &sde_kms->irq_controller.enabled_mask);
+ smp_mb__after_atomic();
+}
+
+static struct irq_chip sde_hw_irq_chip = {
+ .name = "sde",
+ .irq_mask = sde_hw_irq_mask,
+ .irq_unmask = sde_hw_irq_unmask,
+};
+
+static int sde_hw_irqdomain_map(struct irq_domain *domain,
+ unsigned int irq, irq_hw_number_t hwirq)
+{
+ struct sde_kms *sde_kms;
+ int rc;
+
+ if (!domain || !domain->host_data) {
+ SDE_ERROR("invalid parameters domain %d\n", domain != 0);
+ return -EINVAL;
+ }
+ sde_kms = domain->host_data;
+
+ irq_set_chip_and_handler(irq, &sde_hw_irq_chip, handle_level_irq);
+ rc = irq_set_chip_data(irq, sde_kms);
+
+ return rc;
+}
+
+static struct irq_domain_ops sde_hw_irqdomain_ops = {
+ .map = sde_hw_irqdomain_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+void sde_irq_preinstall(struct msm_kms *kms)
+{
+ struct sde_kms *sde_kms = to_sde_kms(kms);
+ struct device *dev;
+ struct irq_domain *domain;
+
+ if (!sde_kms->dev || !sde_kms->dev->dev) {
+ pr_err("invalid device handles\n");
+ return;
+ }
+ dev = sde_kms->dev->dev;
+
+ domain = irq_domain_add_linear(dev->of_node, 32,
+ &sde_hw_irqdomain_ops, sde_kms);
+ if (!domain) {
+ pr_err("failed to add irq_domain\n");
+ return;
+ }
+
+ sde_kms->irq_controller.enabled_mask = 0;
+ sde_kms->irq_controller.domain = domain;
+
+ sde_core_irq_preinstall(sde_kms);
+}
+
+int sde_irq_postinstall(struct msm_kms *kms)
+{
+ struct sde_kms *sde_kms = to_sde_kms(kms);
+ int rc;
+
+ if (!kms) {
+ SDE_ERROR("invalid parameters\n");
+ return -EINVAL;
+ }
+
+ rc = sde_core_irq_postinstall(sde_kms);
+
+ return rc;
+}
+
+void sde_irq_uninstall(struct msm_kms *kms)
+{
+ struct sde_kms *sde_kms = to_sde_kms(kms);
+
+ if (!kms) {
+ SDE_ERROR("invalid parameters\n");
+ return;
+ }
+
+ sde_core_irq_uninstall(sde_kms);
+
+ if (sde_kms->irq_controller.domain) {
+ irq_domain_remove(sde_kms->irq_controller.domain);
+ sde_kms->irq_controller.domain = NULL;
+ }
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_irq.h b/drivers/gpu/drm/msm/sde/sde_irq.h
new file mode 100644
index 000000000000..e10900719f3f
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_irq.h
@@ -0,0 +1,59 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __SDE_IRQ_H__
+#define __SDE_IRQ_H__
+
+#include <linux/kernel.h>
+#include <linux/irqdomain.h>
+
+#include "msm_kms.h"
+
+/**
+ * sde_irq_controller - define MDSS level interrupt controller context
+ * @enabled_mask: enable status of MDSS level interrupt
+ * @domain: interrupt domain of this controller
+ */
+struct sde_irq_controller {
+ unsigned long enabled_mask;
+ struct irq_domain *domain;
+};
+
+/**
+ * sde_irq_preinstall - perform pre-installation of MDSS IRQ handler
+ * @kms: pointer to kms context
+ * @return: none
+ */
+void sde_irq_preinstall(struct msm_kms *kms);
+
+/**
+ * sde_irq_postinstall - perform post-installation of MDSS IRQ handler
+ * @kms: pointer to kms context
+ * @return: 0 if success; error code otherwise
+ */
+int sde_irq_postinstall(struct msm_kms *kms);
+
+/**
+ * sde_irq_uninstall - uninstall MDSS IRQ handler
+ * @drm_dev: pointer to kms context
+ * @return: none
+ */
+void sde_irq_uninstall(struct msm_kms *kms);
+
+/**
+ * sde_irq - MDSS level IRQ handler
+ * @kms: pointer to kms context
+ * @return: interrupt handling status
+ */
+irqreturn_t sde_irq(struct msm_kms *kms);
+
+#endif /* __SDE_IRQ_H__ */
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
new file mode 100644
index 000000000000..afe90d16e31d
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -0,0 +1,1208 @@
+/*
+ * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <drm/drm_crtc.h>
+#include <linux/debugfs.h>
+
+#include "msm_drv.h"
+#include "msm_mmu.h"
+
+#include "dsi_display.h"
+#include "dsi_drm.h"
+#include "sde_wb.h"
+
+#include "sde_kms.h"
+#include "sde_core_irq.h"
+#include "sde_formats.h"
+#include "sde_hw_vbif.h"
+#include "sde_vbif.h"
+#include "sde_encoder.h"
+#include "sde_plane.h"
+#include "sde_crtc.h"
+
+#define CREATE_TRACE_POINTS
+#include "sde_trace.h"
+
+static const char * const iommu_ports[] = {
+ "mdp_0",
+};
+
+/**
+ * Controls size of event log buffer. Specified as a power of 2.
+ */
+#define SDE_EVTLOG_SIZE 1024
+
+/*
+ * To enable overall DRM driver logging
+ * # echo 0x2 > /sys/module/drm/parameters/debug
+ *
+ * To enable DRM driver h/w logging
+ * # echo <mask> > /sys/kernel/debug/dri/0/hw_log_mask
+ *
+ * See sde_hw_mdss.h for h/w logging mask definitions (search for SDE_DBG_MASK_)
+ */
+#define SDE_DEBUGFS_DIR "msm_sde"
+#define SDE_DEBUGFS_HWMASKNAME "hw_log_mask"
+
+/**
+ * sdecustom - enable certain driver customizations for sde clients
+ * Enabling this modifies the standard DRM behavior slightly and assumes
+ * that the clients have specific knowledge about the modifications that
+ * are involved, so don't enable this unless you know what you're doing.
+ *
+ * Parts of the driver that are affected by this setting may be located by
+ * searching for invocations of the 'sde_is_custom_client()' function.
+ *
+ * This is disabled by default.
+ */
+static bool sdecustom = true;
+module_param(sdecustom, bool, 0400);
+MODULE_PARM_DESC(sdecustom, "Enable customizations for sde clients");
+
+static int sde_kms_hw_init(struct msm_kms *kms);
+static int _sde_kms_mmu_destroy(struct sde_kms *sde_kms);
+
+bool sde_is_custom_client(void)
+{
+ return sdecustom;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int _sde_danger_signal_status(struct seq_file *s,
+ bool danger_status)
+{
+ struct sde_kms *kms = (struct sde_kms *)s->private;
+ struct msm_drm_private *priv;
+ struct sde_danger_safe_status status;
+ int i;
+
+ if (!kms || !kms->dev || !kms->dev->dev_private || !kms->hw_mdp) {
+ SDE_ERROR("invalid arg(s)\n");
+ return 0;
+ }
+
+ priv = kms->dev->dev_private;
+ memset(&status, 0, sizeof(struct sde_danger_safe_status));
+
+ sde_power_resource_enable(&priv->phandle, kms->core_client, true);
+ if (danger_status) {
+ seq_puts(s, "\nDanger signal status:\n");
+ if (kms->hw_mdp->ops.get_danger_status)
+ kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
+ &status);
+ } else {
+ seq_puts(s, "\nSafe signal status:\n");
+ if (kms->hw_mdp->ops.get_danger_status)
+ kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
+ &status);
+ }
+ sde_power_resource_enable(&priv->phandle, kms->core_client, false);
+
+ seq_printf(s, "MDP : 0x%x\n", status.mdp);
+
+ for (i = SSPP_VIG0; i < SSPP_MAX; i++)
+ seq_printf(s, "SSPP%d : 0x%x \t", i - SSPP_VIG0,
+ status.sspp[i]);
+ seq_puts(s, "\n");
+
+ for (i = WB_0; i < WB_MAX; i++)
+ seq_printf(s, "WB%d : 0x%x \t", i - WB_0,
+ status.wb[i]);
+ seq_puts(s, "\n");
+
+ return 0;
+}
+
+#define DEFINE_SDE_DEBUGFS_SEQ_FOPS(__prefix) \
+static int __prefix ## _open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, __prefix ## _show, inode->i_private); \
+} \
+static const struct file_operations __prefix ## _fops = { \
+ .owner = THIS_MODULE, \
+ .open = __prefix ## _open, \
+ .release = single_release, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+}
+
+static int sde_debugfs_danger_stats_show(struct seq_file *s, void *v)
+{
+ return _sde_danger_signal_status(s, true);
+}
+DEFINE_SDE_DEBUGFS_SEQ_FOPS(sde_debugfs_danger_stats);
+
+static int sde_debugfs_safe_stats_show(struct seq_file *s, void *v)
+{
+ return _sde_danger_signal_status(s, false);
+}
+DEFINE_SDE_DEBUGFS_SEQ_FOPS(sde_debugfs_safe_stats);
+
+static void sde_debugfs_danger_destroy(struct sde_kms *sde_kms)
+{
+ debugfs_remove_recursive(sde_kms->debugfs_danger);
+ sde_kms->debugfs_danger = NULL;
+}
+
+static int sde_debugfs_danger_init(struct sde_kms *sde_kms,
+ struct dentry *parent)
+{
+ sde_kms->debugfs_danger = debugfs_create_dir("danger",
+ parent);
+ if (!sde_kms->debugfs_danger) {
+ SDE_ERROR("failed to create danger debugfs\n");
+ return -EINVAL;
+ }
+
+ debugfs_create_file("danger_status", 0644, sde_kms->debugfs_danger,
+ sde_kms, &sde_debugfs_danger_stats_fops);
+ debugfs_create_file("safe_status", 0644, sde_kms->debugfs_danger,
+ sde_kms, &sde_debugfs_safe_stats_fops);
+
+ return 0;
+}
+
+static int _sde_debugfs_show_regset32(struct seq_file *s, void *data)
+{
+ struct sde_debugfs_regset32 *regset;
+ struct sde_kms *sde_kms;
+ struct drm_device *dev;
+ struct msm_drm_private *priv;
+ void __iomem *base;
+ uint32_t i, addr;
+
+ if (!s || !s->private)
+ return 0;
+
+ regset = s->private;
+
+ sde_kms = regset->sde_kms;
+ if (!sde_kms || !sde_kms->mmio)
+ return 0;
+
+ dev = sde_kms->dev;
+ if (!dev)
+ return 0;
+
+ priv = dev->dev_private;
+ if (!priv)
+ return 0;
+
+ base = sde_kms->mmio + regset->offset;
+
+ /* insert padding spaces, if needed */
+ if (regset->offset & 0xF) {
+ seq_printf(s, "[%x]", regset->offset & ~0xF);
+ for (i = 0; i < (regset->offset & 0xF); i += 4)
+ seq_puts(s, " ");
+ }
+
+ if (sde_power_resource_enable(&priv->phandle,
+ sde_kms->core_client, true)) {
+ seq_puts(s, "failed to enable sde clocks\n");
+ return 0;
+ }
+
+ /* main register output */
+ for (i = 0; i < regset->blk_len; i += 4) {
+ addr = regset->offset + i;
+ if ((addr & 0xF) == 0x0)
+ seq_printf(s, i ? "\n[%x]" : "[%x]", addr);
+ seq_printf(s, " %08x", readl_relaxed(base + i));
+ }
+ seq_puts(s, "\n");
+ sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
+
+ return 0;
+}
+
+static int sde_debugfs_open_regset32(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, _sde_debugfs_show_regset32, inode->i_private);
+}
+
+static const struct file_operations sde_fops_regset32 = {
+ .open = sde_debugfs_open_regset32,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void sde_debugfs_setup_regset32(struct sde_debugfs_regset32 *regset,
+ uint32_t offset, uint32_t length, struct sde_kms *sde_kms)
+{
+ if (regset) {
+ regset->offset = offset;
+ regset->blk_len = length;
+ regset->sde_kms = sde_kms;
+ }
+}
+
+void *sde_debugfs_create_regset32(const char *name, umode_t mode,
+ void *parent, struct sde_debugfs_regset32 *regset)
+{
+ if (!name || !regset || !regset->sde_kms || !regset->blk_len)
+ return NULL;
+
+ /* make sure offset is a multiple of 4 */
+ regset->offset = round_down(regset->offset, 4);
+
+ return debugfs_create_file(name, mode, parent,
+ regset, &sde_fops_regset32);
+}
+
+void *sde_debugfs_get_root(struct sde_kms *sde_kms)
+{
+ return sde_kms ? sde_kms->debugfs_root : 0;
+}
+
+static int _sde_debugfs_init(struct sde_kms *sde_kms)
+{
+ void *p;
+
+ p = sde_hw_util_get_log_mask_ptr();
+
+ if (!sde_kms || !p)
+ return -EINVAL;
+
+ if (sde_kms->dev && sde_kms->dev->primary)
+ sde_kms->debugfs_root = sde_kms->dev->primary->debugfs_root;
+ else
+ sde_kms->debugfs_root = debugfs_create_dir(SDE_DEBUGFS_DIR, 0);
+
+ /* allow debugfs_root to be NULL */
+ debugfs_create_x32(SDE_DEBUGFS_HWMASKNAME,
+ 0644, sde_kms->debugfs_root, p);
+
+ /* create common folder for debug information */
+ sde_kms->debugfs_debug = debugfs_create_dir("debug",
+ sde_kms->debugfs_root);
+ if (!sde_kms->debugfs_debug)
+ SDE_ERROR("failed to create debugfs debug directory\n");
+
+ sde_debugfs_danger_init(sde_kms, sde_kms->debugfs_debug);
+ sde_debugfs_vbif_init(sde_kms, sde_kms->debugfs_debug);
+
+ return 0;
+}
+
+static void _sde_debugfs_destroy(struct sde_kms *sde_kms)
+{
+ /* don't need to NULL check debugfs_root */
+ if (sde_kms) {
+ sde_debugfs_vbif_destroy(sde_kms);
+ sde_debugfs_danger_destroy(sde_kms);
+ debugfs_remove_recursive(sde_kms->debugfs_debug);
+ sde_kms->debugfs_debug = 0;
+ debugfs_remove_recursive(sde_kms->debugfs_root);
+ sde_kms->debugfs_root = 0;
+ }
+}
+#else
+static void sde_debugfs_danger_destroy(struct sde_kms *sde_kms,
+ struct dentry *parent)
+{
+}
+
+static int sde_debugfs_danger_init(struct sde_kms *sde_kms,
+ struct dentry *parent)
+{
+ return 0;
+}
+#endif
+
+static int sde_kms_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+ struct sde_kms *sde_kms = to_sde_kms(kms);
+ struct drm_device *dev = sde_kms->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+
+ sde_power_resource_enable(&priv->phandle, sde_kms->core_client, true);
+
+ return sde_crtc_vblank(crtc, true);
+}
+
+static void sde_kms_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+ struct sde_kms *sde_kms = to_sde_kms(kms);
+ struct drm_device *dev = sde_kms->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+
+ sde_crtc_vblank(crtc, false);
+
+ sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
+}
+
+static void sde_kms_prepare_commit(struct msm_kms *kms,
+ struct drm_atomic_state *state)
+{
+ struct sde_kms *sde_kms = to_sde_kms(kms);
+ struct drm_device *dev = sde_kms->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+
+ sde_power_resource_enable(&priv->phandle, sde_kms->core_client, true);
+}
+
+static void sde_kms_commit(struct msm_kms *kms,
+ struct drm_atomic_state *old_state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state;
+ int i;
+
+ for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+ if (crtc->state->active) {
+ SDE_EVT32(DRMID(crtc));
+ sde_crtc_commit_kickoff(crtc);
+ }
+ }
+}
+
+static void sde_kms_complete_commit(struct msm_kms *kms,
+ struct drm_atomic_state *old_state)
+{
+ struct sde_kms *sde_kms = to_sde_kms(kms);
+ struct drm_device *dev = sde_kms->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state;
+ int i;
+
+ for_each_crtc_in_state(old_state, crtc, old_crtc_state, i)
+ sde_crtc_complete_commit(crtc, old_crtc_state);
+ sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
+
+ SDE_EVT32(SDE_EVTLOG_FUNC_EXIT);
+}
+
+static void sde_kms_wait_for_commit_done(struct msm_kms *kms,
+ struct drm_crtc *crtc)
+{
+ struct drm_encoder *encoder;
+ struct drm_device *dev = crtc->dev;
+ int ret;
+
+ if (!kms || !crtc || !crtc->state) {
+ SDE_ERROR("invalid params\n");
+ return;
+ }
+
+ if (!crtc->state->enable) {
+ SDE_DEBUG("[crtc:%d] not enable\n", crtc->base.id);
+ return;
+ }
+
+ if (!crtc->state->active) {
+ SDE_DEBUG("[crtc:%d] not active\n", crtc->base.id);
+ return;
+ }
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc != crtc)
+ continue;
+ /*
+ * Wait post-flush if necessary to delay before plane_cleanup
+ * For example, wait for vsync in case of video mode panels
+ * This should be a no-op for command mode panels
+ */
+ SDE_EVT32(DRMID(crtc));
+ ret = sde_encoder_wait_for_commit_done(encoder);
+ if (ret && ret != -EWOULDBLOCK) {
+ SDE_ERROR("wait for commit done returned %d\n", ret);
+ break;
+ }
+ }
+}
+
+static void sde_kms_prepare_fence(struct msm_kms *kms,
+ struct drm_atomic_state *old_state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state;
+ int i, rc;
+
+ if (!kms || !old_state || !old_state->dev || !old_state->acquire_ctx) {
+ SDE_ERROR("invalid argument(s)\n");
+ return;
+ }
+
+retry:
+ /* attempt to acquire ww mutex for connection */
+ rc = drm_modeset_lock(&old_state->dev->mode_config.connection_mutex,
+ old_state->acquire_ctx);
+
+ if (rc == -EDEADLK) {
+ drm_modeset_backoff(old_state->acquire_ctx);
+ goto retry;
+ }
+
+ /* old_state actually contains updated crtc pointers */
+ for_each_crtc_in_state(old_state, crtc, old_crtc_state, i)
+ sde_crtc_prepare_commit(crtc, old_crtc_state);
+}
+
+/**
+ * _sde_kms_get_displays - query for underlying display handles and cache them
+ * @sde_kms: Pointer to sde kms structure
+ * Returns: Zero on success
+ */
+static int _sde_kms_get_displays(struct sde_kms *sde_kms)
+{
+ int rc = -ENOMEM;
+
+ if (!sde_kms) {
+ SDE_ERROR("invalid sde kms\n");
+ return -EINVAL;
+ }
+
+ /* dsi */
+ sde_kms->dsi_displays = NULL;
+ sde_kms->dsi_display_count = dsi_display_get_num_of_displays();
+ if (sde_kms->dsi_display_count) {
+ sde_kms->dsi_displays = kcalloc(sde_kms->dsi_display_count,
+ sizeof(void *),
+ GFP_KERNEL);
+ if (!sde_kms->dsi_displays) {
+ SDE_ERROR("failed to allocate dsi displays\n");
+ goto exit_deinit_dsi;
+ }
+ sde_kms->dsi_display_count =
+ dsi_display_get_active_displays(sde_kms->dsi_displays,
+ sde_kms->dsi_display_count);
+ }
+
+ /* wb */
+ sde_kms->wb_displays = NULL;
+ sde_kms->wb_display_count = sde_wb_get_num_of_displays();
+ if (sde_kms->wb_display_count) {
+ sde_kms->wb_displays = kcalloc(sde_kms->wb_display_count,
+ sizeof(void *),
+ GFP_KERNEL);
+ if (!sde_kms->wb_displays) {
+ SDE_ERROR("failed to allocate wb displays\n");
+ goto exit_deinit_wb;
+ }
+ sde_kms->wb_display_count =
+ wb_display_get_displays(sde_kms->wb_displays,
+ sde_kms->wb_display_count);
+ }
+ return 0;
+
+exit_deinit_wb:
+ kfree(sde_kms->wb_displays);
+ sde_kms->wb_display_count = 0;
+ sde_kms->wb_displays = NULL;
+
+exit_deinit_dsi:
+ kfree(sde_kms->dsi_displays);
+ sde_kms->dsi_display_count = 0;
+ sde_kms->dsi_displays = NULL;
+ return rc;
+}
+
+/**
+ * _sde_kms_release_displays - release cache of underlying display handles
+ * @sde_kms: Pointer to sde kms structure
+ */
+static void _sde_kms_release_displays(struct sde_kms *sde_kms)
+{
+ if (!sde_kms) {
+ SDE_ERROR("invalid sde kms\n");
+ return;
+ }
+
+ kfree(sde_kms->wb_displays);
+ sde_kms->wb_displays = NULL;
+ sde_kms->wb_display_count = 0;
+
+ kfree(sde_kms->dsi_displays);
+ sde_kms->dsi_displays = NULL;
+ sde_kms->dsi_display_count = 0;
+}
+
+/**
+ * _sde_kms_setup_displays - create encoders, bridges and connectors
+ * for underlying displays
+ * @dev: Pointer to drm device structure
+ * @priv: Pointer to private drm device data
+ * @sde_kms: Pointer to sde kms structure
+ * Returns: Zero on success
+ */
+static int _sde_kms_setup_displays(struct drm_device *dev,
+ struct msm_drm_private *priv,
+ struct sde_kms *sde_kms)
+{
+ static const struct sde_connector_ops dsi_ops = {
+ .post_init = dsi_conn_post_init,
+ .detect = dsi_conn_detect,
+ .get_modes = dsi_connector_get_modes,
+ .mode_valid = dsi_conn_mode_valid,
+ .get_info = dsi_display_get_info,
+ .set_backlight = dsi_display_set_backlight
+ };
+ static const struct sde_connector_ops wb_ops = {
+ .post_init = sde_wb_connector_post_init,
+ .detect = sde_wb_connector_detect,
+ .get_modes = sde_wb_connector_get_modes,
+ .set_property = sde_wb_connector_set_property,
+ .get_info = sde_wb_get_info,
+ };
+ struct msm_display_info info;
+ struct drm_encoder *encoder;
+ void *display, *connector;
+ int i, max_encoders;
+ int rc = 0;
+
+ if (!dev || !priv || !sde_kms) {
+ SDE_ERROR("invalid argument(s)\n");
+ return -EINVAL;
+ }
+
+ max_encoders = sde_kms->dsi_display_count + sde_kms->wb_display_count;
+ if (max_encoders > ARRAY_SIZE(priv->encoders)) {
+ max_encoders = ARRAY_SIZE(priv->encoders);
+ SDE_ERROR("capping number of displays to %d", max_encoders);
+ }
+
+ /* dsi */
+ for (i = 0; i < sde_kms->dsi_display_count &&
+ priv->num_encoders < max_encoders; ++i) {
+ display = sde_kms->dsi_displays[i];
+ encoder = NULL;
+
+ memset(&info, 0x0, sizeof(info));
+ rc = dsi_display_get_info(&info, display);
+ if (rc) {
+ SDE_ERROR("dsi get_info %d failed\n", i);
+ continue;
+ }
+
+ encoder = sde_encoder_init(dev, &info);
+ if (IS_ERR_OR_NULL(encoder)) {
+ SDE_ERROR("encoder init failed for dsi %d\n", i);
+ continue;
+ }
+
+ rc = dsi_display_drm_bridge_init(display, encoder);
+ if (rc) {
+ SDE_ERROR("dsi bridge %d init failed, %d\n", i, rc);
+ sde_encoder_destroy(encoder);
+ continue;
+ }
+
+ connector = sde_connector_init(dev,
+ encoder,
+ 0,
+ display,
+ &dsi_ops,
+ DRM_CONNECTOR_POLL_HPD,
+ DRM_MODE_CONNECTOR_DSI);
+ if (connector) {
+ priv->encoders[priv->num_encoders++] = encoder;
+ } else {
+ SDE_ERROR("dsi %d connector init failed\n", i);
+ dsi_display_drm_bridge_deinit(display);
+ sde_encoder_destroy(encoder);
+ }
+ }
+
+ /* wb */
+ for (i = 0; i < sde_kms->wb_display_count &&
+ priv->num_encoders < max_encoders; ++i) {
+ display = sde_kms->wb_displays[i];
+ encoder = NULL;
+
+ memset(&info, 0x0, sizeof(info));
+ rc = sde_wb_get_info(&info, display);
+ if (rc) {
+ SDE_ERROR("wb get_info %d failed\n", i);
+ continue;
+ }
+
+ encoder = sde_encoder_init(dev, &info);
+ if (IS_ERR_OR_NULL(encoder)) {
+ SDE_ERROR("encoder init failed for wb %d\n", i);
+ continue;
+ }
+
+ rc = sde_wb_drm_init(display, encoder);
+ if (rc) {
+ SDE_ERROR("wb bridge %d init failed, %d\n", i, rc);
+ sde_encoder_destroy(encoder);
+ continue;
+ }
+
+ connector = sde_connector_init(dev,
+ encoder,
+ 0,
+ display,
+ &wb_ops,
+ DRM_CONNECTOR_POLL_HPD,
+ DRM_MODE_CONNECTOR_VIRTUAL);
+ if (connector) {
+ priv->encoders[priv->num_encoders++] = encoder;
+ } else {
+ SDE_ERROR("wb %d connector init failed\n", i);
+ sde_wb_drm_deinit(display);
+ sde_encoder_destroy(encoder);
+ }
+ }
+
+ return 0;
+}
+
+static void _sde_kms_drm_obj_destroy(struct sde_kms *sde_kms)
+{
+ struct msm_drm_private *priv;
+ int i;
+
+ if (!sde_kms) {
+ SDE_ERROR("invalid sde_kms\n");
+ return;
+ } else if (!sde_kms->dev) {
+ SDE_ERROR("invalid dev\n");
+ return;
+ } else if (!sde_kms->dev->dev_private) {
+ SDE_ERROR("invalid dev_private\n");
+ return;
+ }
+ priv = sde_kms->dev->dev_private;
+
+ for (i = 0; i < priv->num_crtcs; i++)
+ priv->crtcs[i]->funcs->destroy(priv->crtcs[i]);
+ priv->num_crtcs = 0;
+
+ for (i = 0; i < priv->num_planes; i++)
+ priv->planes[i]->funcs->destroy(priv->planes[i]);
+ priv->num_planes = 0;
+
+ for (i = 0; i < priv->num_connectors; i++)
+ priv->connectors[i]->funcs->destroy(priv->connectors[i]);
+ priv->num_connectors = 0;
+
+ for (i = 0; i < priv->num_encoders; i++)
+ priv->encoders[i]->funcs->destroy(priv->encoders[i]);
+ priv->num_encoders = 0;
+
+ _sde_kms_release_displays(sde_kms);
+}
+
+static int _sde_kms_drm_obj_init(struct sde_kms *sde_kms)
+{
+ struct drm_device *dev;
+ struct drm_plane *primary_planes[MAX_PLANES], *plane;
+ struct drm_crtc *crtc;
+
+ struct msm_drm_private *priv;
+ struct sde_mdss_cfg *catalog;
+
+ int primary_planes_idx, i, ret;
+ int max_crtc_count, max_plane_count;
+
+ if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev) {
+ SDE_ERROR("invalid sde_kms\n");
+ return -EINVAL;
+ }
+
+ dev = sde_kms->dev;
+ priv = dev->dev_private;
+ catalog = sde_kms->catalog;
+
+ /*
+ * Query for underlying display drivers, and create connectors,
+ * bridges and encoders for them.
+ */
+ if (!_sde_kms_get_displays(sde_kms))
+ (void)_sde_kms_setup_displays(dev, priv, sde_kms);
+
+ max_crtc_count = min(catalog->mixer_count, priv->num_encoders);
+ max_plane_count = min_t(u32, catalog->sspp_count, MAX_PLANES);
+
+ /* Create the planes */
+ primary_planes_idx = 0;
+ for (i = 0; i < max_plane_count; i++) {
+ bool primary = true;
+
+ if (catalog->sspp[i].features & BIT(SDE_SSPP_CURSOR)
+ || primary_planes_idx >= max_crtc_count)
+ primary = false;
+
+ plane = sde_plane_init(dev, catalog->sspp[i].id, primary,
+ (1UL << max_crtc_count) - 1);
+ if (IS_ERR(plane)) {
+ SDE_ERROR("sde_plane_init failed\n");
+ ret = PTR_ERR(plane);
+ goto fail;
+ }
+ priv->planes[priv->num_planes++] = plane;
+
+ if (primary)
+ primary_planes[primary_planes_idx++] = plane;
+ }
+
+ max_crtc_count = min(max_crtc_count, primary_planes_idx);
+
+ /* Create one CRTC per encoder */
+ for (i = 0; i < max_crtc_count; i++) {
+ crtc = sde_crtc_init(dev, primary_planes[i]);
+ if (IS_ERR(crtc)) {
+ ret = PTR_ERR(crtc);
+ goto fail;
+ }
+ priv->crtcs[priv->num_crtcs++] = crtc;
+ }
+
+ if (sde_is_custom_client()) {
+ /* All CRTCs are compatible with all planes */
+ for (i = 0; i < priv->num_planes; i++)
+ priv->planes[i]->possible_crtcs =
+ (1 << priv->num_crtcs) - 1;
+ }
+
+ /* All CRTCs are compatible with all encoders */
+ for (i = 0; i < priv->num_encoders; i++)
+ priv->encoders[i]->possible_crtcs = (1 << priv->num_crtcs) - 1;
+
+ return 0;
+fail:
+ _sde_kms_drm_obj_destroy(sde_kms);
+ return ret;
+}
+
+static int sde_kms_postinit(struct msm_kms *kms)
+{
+ struct sde_kms *sde_kms = to_sde_kms(kms);
+ struct drm_device *dev;
+
+ if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev) {
+ SDE_ERROR("invalid sde_kms\n");
+ return -EINVAL;
+ }
+
+ dev = sde_kms->dev;
+
+ /*
+ * Allow vblank interrupt to be disabled by drm vblank timer.
+ */
+ dev->vblank_disable_allowed = true;
+
+ return 0;
+}
+
+static long sde_kms_round_pixclk(struct msm_kms *kms, unsigned long rate,
+ struct drm_encoder *encoder)
+{
+ return rate;
+}
+
+static void _sde_kms_hw_destroy(struct sde_kms *sde_kms,
+ struct platform_device *pdev)
+{
+ struct drm_device *dev;
+ struct msm_drm_private *priv;
+ int i;
+
+ if (!sde_kms || !pdev)
+ return;
+
+ dev = sde_kms->dev;
+ if (!dev)
+ return;
+
+ priv = dev->dev_private;
+ if (!priv)
+ return;
+
+ if (sde_kms->hw_intr)
+ sde_hw_intr_destroy(sde_kms->hw_intr);
+ sde_kms->hw_intr = NULL;
+
+ _sde_kms_release_displays(sde_kms);
+
+ /* safe to call these more than once during shutdown */
+ _sde_debugfs_destroy(sde_kms);
+ _sde_kms_mmu_destroy(sde_kms);
+ sde_core_perf_destroy(&sde_kms->perf);
+
+ if (sde_kms->catalog) {
+ for (i = 0; i < sde_kms->catalog->vbif_count; i++) {
+ u32 vbif_idx = sde_kms->catalog->vbif[i].id;
+
+ if ((vbif_idx < VBIF_MAX) && sde_kms->hw_vbif[vbif_idx])
+ sde_hw_vbif_destroy(sde_kms->hw_vbif[vbif_idx]);
+ }
+ }
+
+ if (sde_kms->rm_init)
+ sde_rm_destroy(&sde_kms->rm);
+ sde_kms->rm_init = false;
+
+ if (sde_kms->catalog)
+ sde_hw_catalog_deinit(sde_kms->catalog);
+ sde_kms->catalog = NULL;
+
+ if (sde_kms->core_client)
+ sde_power_client_destroy(&priv->phandle, sde_kms->core_client);
+ sde_kms->core_client = NULL;
+
+ if (sde_kms->vbif[VBIF_NRT])
+ msm_iounmap(pdev, sde_kms->vbif[VBIF_NRT]);
+ sde_kms->vbif[VBIF_NRT] = NULL;
+
+ if (sde_kms->vbif[VBIF_RT])
+ msm_iounmap(pdev, sde_kms->vbif[VBIF_RT]);
+ sde_kms->vbif[VBIF_RT] = NULL;
+
+ if (sde_kms->mmio)
+ msm_iounmap(pdev, sde_kms->mmio);
+ sde_kms->mmio = NULL;
+}
+
+static void sde_kms_destroy(struct msm_kms *kms)
+{
+ struct sde_kms *sde_kms;
+ struct drm_device *dev;
+
+ if (!kms) {
+ SDE_ERROR("invalid kms\n");
+ return;
+ }
+
+ sde_kms = to_sde_kms(kms);
+ dev = sde_kms->dev;
+ if (!dev) {
+ SDE_ERROR("invalid device\n");
+ return;
+ }
+
+ _sde_kms_hw_destroy(sde_kms, dev->platformdev);
+ kfree(sde_kms);
+}
+
+static void sde_kms_preclose(struct msm_kms *kms, struct drm_file *file)
+{
+ struct sde_kms *sde_kms = to_sde_kms(kms);
+ struct drm_device *dev = sde_kms->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ unsigned i;
+
+ for (i = 0; i < priv->num_crtcs; i++)
+ sde_crtc_cancel_pending_flip(priv->crtcs[i], file);
+}
+
+static const struct msm_kms_funcs kms_funcs = {
+ .hw_init = sde_kms_hw_init,
+ .postinit = sde_kms_postinit,
+ .irq_preinstall = sde_irq_preinstall,
+ .irq_postinstall = sde_irq_postinstall,
+ .irq_uninstall = sde_irq_uninstall,
+ .irq = sde_irq,
+ .preclose = sde_kms_preclose,
+ .prepare_fence = sde_kms_prepare_fence,
+ .prepare_commit = sde_kms_prepare_commit,
+ .commit = sde_kms_commit,
+ .complete_commit = sde_kms_complete_commit,
+ .wait_for_crtc_commit_done = sde_kms_wait_for_commit_done,
+ .enable_vblank = sde_kms_enable_vblank,
+ .disable_vblank = sde_kms_disable_vblank,
+ .check_modified_format = sde_format_check_modified_format,
+ .get_format = sde_get_msm_format,
+ .round_pixclk = sde_kms_round_pixclk,
+ .destroy = sde_kms_destroy,
+};
+
+/* the caller api needs to turn on clock before calling it */
+static inline void _sde_kms_core_hw_rev_init(struct sde_kms *sde_kms)
+{
+ sde_kms->core_rev = readl_relaxed(sde_kms->mmio + 0x0);
+}
+
+static int _sde_kms_mmu_destroy(struct sde_kms *sde_kms)
+{
+ struct msm_mmu *mmu;
+ int i;
+
+ for (i = ARRAY_SIZE(sde_kms->mmu_id) - 1; i >= 0; i--) {
+ if (!sde_kms->mmu[i])
+ continue;
+
+ mmu = sde_kms->mmu[i];
+ msm_unregister_mmu(sde_kms->dev, mmu);
+ mmu->funcs->detach(mmu, (const char **)iommu_ports,
+ ARRAY_SIZE(iommu_ports));
+ mmu->funcs->destroy(mmu);
+ sde_kms->mmu[i] = 0;
+ sde_kms->mmu_id[i] = 0;
+ }
+
+ return 0;
+}
+
+static int _sde_kms_mmu_init(struct sde_kms *sde_kms)
+{
+ struct msm_mmu *mmu;
+ int i, ret;
+
+ for (i = 0; i < MSM_SMMU_DOMAIN_MAX; i++) {
+ mmu = msm_smmu_new(sde_kms->dev->dev, i);
+ if (IS_ERR(mmu)) {
+ /* MMU's can be optional depending on platform */
+ ret = PTR_ERR(mmu);
+ DRM_INFO("failed to init iommu id %d: rc: %d\n", i,
+ ret);
+ continue;
+ }
+
+ ret = mmu->funcs->attach(mmu, (const char **)iommu_ports,
+ ARRAY_SIZE(iommu_ports));
+ if (ret) {
+ SDE_ERROR("failed to attach iommu %d: %d\n", i, ret);
+ mmu->funcs->destroy(mmu);
+ goto fail;
+ }
+
+ sde_kms->mmu_id[i] = msm_register_mmu(sde_kms->dev, mmu);
+ if (sde_kms->mmu_id[i] < 0) {
+ ret = sde_kms->mmu_id[i];
+ SDE_ERROR("failed to register sde iommu %d: %d\n",
+ i, ret);
+ mmu->funcs->detach(mmu, (const char **)iommu_ports,
+ ARRAY_SIZE(iommu_ports));
+ goto fail;
+ }
+
+ sde_kms->mmu[i] = mmu;
+ }
+
+ return 0;
+fail:
+ _sde_kms_mmu_destroy(sde_kms);
+
+ return ret;
+}
+
+static int sde_kms_hw_init(struct msm_kms *kms)
+{
+ struct sde_kms *sde_kms;
+ struct drm_device *dev;
+ struct msm_drm_private *priv;
+ int i, rc = -EINVAL;
+
+ if (!kms) {
+ SDE_ERROR("invalid kms\n");
+ goto end;
+ }
+
+ sde_kms = to_sde_kms(kms);
+ dev = sde_kms->dev;
+ if (!dev || !dev->platformdev) {
+ SDE_ERROR("invalid device\n");
+ goto end;
+ }
+
+ priv = dev->dev_private;
+ if (!priv) {
+ SDE_ERROR("invalid private data\n");
+ goto end;
+ }
+
+ sde_kms->mmio = msm_ioremap(dev->platformdev, "mdp_phys", "SDE");
+ if (IS_ERR(sde_kms->mmio)) {
+ rc = PTR_ERR(sde_kms->mmio);
+ SDE_ERROR("mdp register memory map failed: %d\n", rc);
+ sde_kms->mmio = NULL;
+ goto error;
+ }
+ DRM_INFO("mapped mdp address space @%p\n", sde_kms->mmio);
+
+ sde_kms->vbif[VBIF_RT] = msm_ioremap(dev->platformdev,
+ "vbif_phys", "VBIF");
+ if (IS_ERR(sde_kms->vbif[VBIF_RT])) {
+ rc = PTR_ERR(sde_kms->vbif[VBIF_RT]);
+ SDE_ERROR("vbif register memory map failed: %d\n", rc);
+ sde_kms->vbif[VBIF_RT] = NULL;
+ goto error;
+ }
+
+ sde_kms->vbif[VBIF_NRT] = msm_ioremap(dev->platformdev,
+ "vbif_nrt_phys", "VBIF_NRT");
+ if (IS_ERR(sde_kms->vbif[VBIF_NRT])) {
+ sde_kms->vbif[VBIF_NRT] = NULL;
+ SDE_DEBUG("VBIF NRT is not defined");
+ }
+
+ sde_kms->core_client = sde_power_client_create(&priv->phandle, "core");
+ if (IS_ERR_OR_NULL(sde_kms->core_client)) {
+ rc = PTR_ERR(sde_kms->core_client);
+ SDE_ERROR("sde power client create failed: %d\n", rc);
+ sde_kms->core_client = NULL;
+ goto error;
+ }
+
+ rc = sde_power_resource_enable(&priv->phandle, sde_kms->core_client,
+ true);
+ if (rc) {
+ SDE_ERROR("resource enable failed: %d\n", rc);
+ goto error;
+ }
+
+ _sde_kms_core_hw_rev_init(sde_kms);
+
+ pr_info("sde hardware revision:0x%x\n", sde_kms->core_rev);
+
+ sde_kms->catalog = sde_hw_catalog_init(dev, sde_kms->core_rev);
+ if (IS_ERR_OR_NULL(sde_kms->catalog)) {
+ rc = PTR_ERR(sde_kms->catalog);
+ SDE_ERROR("catalog init failed: %d\n", rc);
+ sde_kms->catalog = NULL;
+ goto power_error;
+ }
+
+ rc = sde_rm_init(&sde_kms->rm, sde_kms->catalog, sde_kms->mmio,
+ sde_kms->dev);
+ if (rc) {
+ SDE_ERROR("rm init failed: %d\n", rc);
+ goto power_error;
+ }
+
+ sde_kms->rm_init = true;
+
+ sde_kms->hw_mdp = sde_rm_get_mdp(&sde_kms->rm);
+ if (IS_ERR_OR_NULL(sde_kms->hw_mdp)) {
+ rc = PTR_ERR(sde_kms->hw_mdp);
+ SDE_ERROR("failed to get hw_mdp: %d\n", rc);
+ sde_kms->hw_mdp = NULL;
+ goto power_error;
+ }
+
+ for (i = 0; i < sde_kms->catalog->vbif_count; i++) {
+ u32 vbif_idx = sde_kms->catalog->vbif[i].id;
+
+ sde_kms->hw_vbif[i] = sde_hw_vbif_init(vbif_idx,
+ sde_kms->vbif[vbif_idx], sde_kms->catalog);
+ if (IS_ERR_OR_NULL(sde_kms->hw_vbif[vbif_idx])) {
+ rc = PTR_ERR(sde_kms->hw_vbif[vbif_idx]);
+ SDE_ERROR("failed to init vbif %d: %d\n", vbif_idx, rc);
+ sde_kms->hw_vbif[vbif_idx] = NULL;
+ goto power_error;
+ }
+ }
+
+ /*
+ * Now we need to read the HW catalog and initialize resources such as
+ * clocks, regulators, GDSC/MMAGIC, ioremap the register ranges etc
+ */
+ rc = _sde_kms_mmu_init(sde_kms);
+ if (rc) {
+ SDE_ERROR("sde_kms_mmu_init failed: %d\n", rc);
+ goto power_error;
+ }
+
+ /*
+ * NOTE: Calling sde_debugfs_init here so that the drm_minor device for
+ * 'primary' is already created.
+ */
+ rc = _sde_debugfs_init(sde_kms);
+ if (rc) {
+ SDE_ERROR("sde_debugfs init failed: %d\n", rc);
+ goto power_error;
+ }
+
+ rc = sde_core_perf_init(&sde_kms->perf, dev, sde_kms->catalog,
+ &priv->phandle, priv->pclient, "core_clk_src",
+ sde_kms->debugfs_debug);
+ if (rc) {
+ SDE_ERROR("failed to init perf %d\n", rc);
+ goto perf_err;
+ }
+
+ /*
+ * _sde_kms_drm_obj_init should create the DRM related objects
+ * i.e. CRTCs, planes, encoders, connectors and so forth
+ */
+ rc = _sde_kms_drm_obj_init(sde_kms);
+ if (rc) {
+ SDE_ERROR("modeset init failed: %d\n", rc);
+ goto drm_obj_init_err;
+ }
+
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+
+ /*
+ * max crtc width is equal to the max mixer width * 2 and max height is
+ * is 4K
+ */
+ dev->mode_config.max_width = sde_kms->catalog->max_mixer_width * 2;
+ dev->mode_config.max_height = 4096;
+
+ /*
+ * Support format modifiers for compression etc.
+ */
+ dev->mode_config.allow_fb_modifiers = true;
+
+ sde_kms->hw_intr = sde_hw_intr_init(sde_kms->mmio, sde_kms->catalog);
+ if (IS_ERR_OR_NULL(sde_kms->hw_intr)) {
+ rc = PTR_ERR(sde_kms->hw_intr);
+ SDE_ERROR("hw_intr init failed: %d\n", rc);
+ sde_kms->hw_intr = NULL;
+ goto hw_intr_init_err;
+ }
+
+ sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
+ return 0;
+
+hw_intr_init_err:
+ _sde_kms_drm_obj_destroy(sde_kms);
+drm_obj_init_err:
+ sde_core_perf_destroy(&sde_kms->perf);
+perf_err:
+power_error:
+ sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
+error:
+ _sde_kms_hw_destroy(sde_kms, dev->platformdev);
+end:
+ return rc;
+}
+
+struct msm_kms *sde_kms_init(struct drm_device *dev)
+{
+ struct msm_drm_private *priv;
+ struct sde_kms *sde_kms;
+
+ if (!dev || !dev->dev_private) {
+ SDE_ERROR("drm device node invalid\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ priv = dev->dev_private;
+
+ sde_kms = kzalloc(sizeof(*sde_kms), GFP_KERNEL);
+ if (!sde_kms) {
+ SDE_ERROR("failed to allocate sde kms\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ msm_kms_init(&sde_kms->base, &kms_funcs);
+ sde_kms->dev = dev;
+
+ return &sde_kms->base;
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.h b/drivers/gpu/drm/msm/sde/sde_kms.h
new file mode 100644
index 000000000000..bf127ffe9eb6
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_kms.h
@@ -0,0 +1,371 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __SDE_KMS_H__
+#define __SDE_KMS_H__
+
+#include "msm_drv.h"
+#include "msm_kms.h"
+#include "msm_mmu.h"
+#include "sde_dbg.h"
+#include "sde_hw_catalog.h"
+#include "sde_hw_ctl.h"
+#include "sde_hw_lm.h"
+#include "sde_hw_interrupts.h"
+#include "sde_hw_wb.h"
+#include "sde_hw_top.h"
+#include "sde_rm.h"
+#include "sde_power_handle.h"
+#include "sde_irq.h"
+#include "sde_core_perf.h"
+
+#define DRMID(x) ((x) ? (x)->base.id : -1)
+
+/**
+ * SDE_DEBUG - macro for kms/plane/crtc/encoder/connector logs
+ * @fmt: Pointer to format string
+ */
+#define SDE_DEBUG(fmt, ...) \
+ do { \
+ if (unlikely(drm_debug & DRM_UT_KMS)) \
+ drm_ut_debug_printk(__func__, fmt, ##__VA_ARGS__); \
+ else \
+ pr_debug(fmt, ##__VA_ARGS__); \
+ } while (0)
+
+/**
+ * SDE_DEBUG_DRIVER - macro for hardware driver logging
+ * @fmt: Pointer to format string
+ */
+#define SDE_DEBUG_DRIVER(fmt, ...) \
+ do { \
+ if (unlikely(drm_debug & DRM_UT_DRIVER)) \
+ drm_ut_debug_printk(__func__, fmt, ##__VA_ARGS__); \
+ else \
+ pr_debug(fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define SDE_ERROR(fmt, ...) pr_err("[sde error]" fmt, ##__VA_ARGS__)
+
+#define POPULATE_RECT(rect, a, b, c, d, Q16_flag) \
+ do { \
+ (rect)->x = (Q16_flag) ? (a) >> 16 : (a); \
+ (rect)->y = (Q16_flag) ? (b) >> 16 : (b); \
+ (rect)->w = (Q16_flag) ? (c) >> 16 : (c); \
+ (rect)->h = (Q16_flag) ? (d) >> 16 : (d); \
+ } while (0)
+
+#define CHECK_LAYER_BOUNDS(offset, size, max_size) \
+ (((size) > (max_size)) || ((offset) > ((max_size) - (size))))
+
+/**
+ * ktime_compare_safe - compare two ktime structures
+ * This macro is similar to the standard ktime_compare() function, but
+ * attempts to also handle ktime overflows.
+ * @A: First ktime value
+ * @B: Second ktime value
+ * Returns: -1 if A < B, 0 if A == B, 1 if A > B
+ */
+#define ktime_compare_safe(A, B) \
+ ktime_compare(ktime_sub((A), (B)), ktime_set(0, 0))
+
+#define SDE_NAME_SIZE 12
+
+/*
+ * struct sde_irq_callback - IRQ callback handlers
+ * @list: list to callback
+ * @func: intr handler
+ * @arg: argument for the handler
+ */
+struct sde_irq_callback {
+ struct list_head list;
+ void (*func)(void *arg, int irq_idx);
+ void *arg;
+};
+
+/**
+ * struct sde_irq: IRQ structure contains callback registration info
+ * @total_irq: total number of irq_idx obtained from HW interrupts mapping
+ * @irq_cb_tbl: array of IRQ callbacks setting
+ * @enable_counts array of IRQ enable counts
+ * @cb_lock: callback lock
+ * @debugfs_file: debugfs file for irq statistics
+ */
+struct sde_irq {
+ u32 total_irqs;
+ struct list_head *irq_cb_tbl;
+ atomic_t *enable_counts;
+ atomic_t *irq_counts;
+ spinlock_t cb_lock;
+ struct dentry *debugfs_file;
+};
+
+struct sde_kms {
+ struct msm_kms base;
+ struct drm_device *dev;
+ int core_rev;
+ struct sde_mdss_cfg *catalog;
+
+ struct msm_mmu *mmu[MSM_SMMU_DOMAIN_MAX];
+ int mmu_id[MSM_SMMU_DOMAIN_MAX];
+ struct sde_power_client *core_client;
+
+ /* directory entry for debugfs */
+ void *debugfs_root;
+ struct dentry *debugfs_debug;
+ struct dentry *debugfs_danger;
+ struct dentry *debugfs_vbif;
+
+ /* io/register spaces: */
+ void __iomem *mmio, *vbif[VBIF_MAX];
+
+ struct regulator *vdd;
+ struct regulator *mmagic;
+ struct regulator *venus;
+
+ struct sde_irq_controller irq_controller;
+
+ struct sde_hw_intr *hw_intr;
+ struct sde_irq irq_obj;
+
+ struct sde_core_perf perf;
+
+ struct sde_rm rm;
+ bool rm_init;
+
+ struct sde_hw_vbif *hw_vbif[VBIF_MAX];
+ struct sde_hw_mdp *hw_mdp;
+ int dsi_display_count;
+ void **dsi_displays;
+ int wb_display_count;
+ void **wb_displays;
+
+ bool has_danger_ctrl;
+};
+
+struct vsync_info {
+ u32 frame_count;
+ u32 line_count;
+};
+
+#define to_sde_kms(x) container_of(x, struct sde_kms, base)
+
+/**
+ * sde_is_custom_client - whether or not to enable non-standard customizations
+ *
+ * Return: Whether or not the 'sdeclient' module parameter was set on boot up
+ */
+bool sde_is_custom_client(void);
+
+/**
+ * Debugfs functions - extra helper functions for debugfs support
+ *
+ * Main debugfs documentation is located at,
+ *
+ * Documentation/filesystems/debugfs.txt
+ *
+ * @sde_debugfs_setup_regset32: Initialize data for sde_debugfs_create_regset32
+ * @sde_debugfs_create_regset32: Create 32-bit register dump file
+ * @sde_debugfs_get_root: Get root dentry for SDE_KMS's debugfs node
+ */
+
+/**
+ * Companion structure for sde_debugfs_create_regset32. Do not initialize the
+ * members of this structure explicitly; use sde_debugfs_setup_regset32 instead.
+ */
+struct sde_debugfs_regset32 {
+ uint32_t offset;
+ uint32_t blk_len;
+ struct sde_kms *sde_kms;
+};
+
+/**
+ * sde_debugfs_setup_regset32 - Initialize register block definition for debugfs
+ * This function is meant to initialize sde_debugfs_regset32 structures for use
+ * with sde_debugfs_create_regset32.
+ * @regset: opaque register definition structure
+ * @offset: sub-block offset
+ * @length: sub-block length, in bytes
+ * @sde_kms: pointer to sde kms structure
+ */
+void sde_debugfs_setup_regset32(struct sde_debugfs_regset32 *regset,
+ uint32_t offset, uint32_t length, struct sde_kms *sde_kms);
+
+/**
+ * sde_debugfs_create_regset32 - Create register read back file for debugfs
+ *
+ * This function is almost identical to the standard debugfs_create_regset32()
+ * function, with the main difference being that a list of register
+ * names/offsets do not need to be provided. The 'read' function simply outputs
+ * sequential register values over a specified range.
+ *
+ * Similar to the related debugfs_create_regset32 API, the structure pointed to
+ * by regset needs to persist for the lifetime of the created file. The calling
+ * code is responsible for initialization/management of this structure.
+ *
+ * The structure pointed to by regset is meant to be opaque. Please use
+ * sde_debugfs_setup_regset32 to initialize it.
+ *
+ * @name: File name within debugfs
+ * @mode: File mode within debugfs
+ * @parent: Parent directory entry within debugfs, can be NULL
+ * @regset: Pointer to persistent register block definition
+ *
+ * Return: dentry pointer for newly created file, use either debugfs_remove()
+ * or debugfs_remove_recursive() (on a parent directory) to remove the
+ * file
+ */
+void *sde_debugfs_create_regset32(const char *name, umode_t mode,
+ void *parent, struct sde_debugfs_regset32 *regset);
+
+/**
+ * sde_debugfs_get_root - Return root directory entry for SDE's debugfs
+ *
+ * The return value should be passed as the 'parent' argument to subsequent
+ * debugfs create calls.
+ *
+ * @sde_kms: Pointer to SDE's KMS structure
+ *
+ * Return: dentry pointer for SDE's debugfs location
+ */
+void *sde_debugfs_get_root(struct sde_kms *sde_kms);
+
+/**
+ * SDE info management functions
+ * These functions/definitions allow for building up a 'sde_info' structure
+ * containing one or more "key=value\n" entries.
+ */
+#define SDE_KMS_INFO_MAX_SIZE 4096
+
+/**
+ * struct sde_kms_info - connector information structure container
+ * @data: Array of information character data
+ * @len: Current length of information data
+ * @staged_len: Temporary data buffer length, commit to
+ * len using sde_kms_info_stop
+ * @start: Whether or not a partial data entry was just started
+ */
+struct sde_kms_info {
+ char data[SDE_KMS_INFO_MAX_SIZE];
+ uint32_t len;
+ uint32_t staged_len;
+ bool start;
+};
+
+/**
+ * SDE_KMS_INFO_DATA - Macro for accessing sde_kms_info data bytes
+ * @S: Pointer to sde_kms_info structure
+ * Returns: Pointer to byte data
+ */
+#define SDE_KMS_INFO_DATA(S) ((S) ? ((struct sde_kms_info *)(S))->data : 0)
+
+/**
+ * SDE_KMS_INFO_DATALEN - Macro for accessing sde_kms_info data length
+ * @S: Pointer to sde_kms_info structure
+ * Returns: Size of available byte data
+ */
+#define SDE_KMS_INFO_DATALEN(S) ((S) ? ((struct sde_kms_info *)(S))->len : 0)
+
+/**
+ * sde_kms_info_reset - reset sde_kms_info structure
+ * @info: Pointer to sde_kms_info structure
+ */
+void sde_kms_info_reset(struct sde_kms_info *info);
+
+/**
+ * sde_kms_info_add_keyint - add integer value to 'sde_kms_info'
+ * @info: Pointer to sde_kms_info structure
+ * @key: Pointer to key string
+ * @value: Signed 32-bit integer value
+ */
+void sde_kms_info_add_keyint(struct sde_kms_info *info,
+ const char *key,
+ int32_t value);
+
+/**
+ * sde_kms_info_add_keystr - add string value to 'sde_kms_info'
+ * @info: Pointer to sde_kms_info structure
+ * @key: Pointer to key string
+ * @value: Pointer to string value
+ */
+void sde_kms_info_add_keystr(struct sde_kms_info *info,
+ const char *key,
+ const char *value);
+
+/**
+ * sde_kms_info_start - begin adding key to 'sde_kms_info'
+ * Usage:
+ * sde_kms_info_start(key)
+ * sde_kms_info_append(val_1)
+ * ...
+ * sde_kms_info_append(val_n)
+ * sde_kms_info_stop
+ * @info: Pointer to sde_kms_info structure
+ * @key: Pointer to key string
+ */
+void sde_kms_info_start(struct sde_kms_info *info,
+ const char *key);
+
+/**
+ * sde_kms_info_append - append value string to 'sde_kms_info'
+ * Usage:
+ * sde_kms_info_start(key)
+ * sde_kms_info_append(val_1)
+ * ...
+ * sde_kms_info_append(val_n)
+ * sde_kms_info_stop
+ * @info: Pointer to sde_kms_info structure
+ * @str: Pointer to partial value string
+ */
+void sde_kms_info_append(struct sde_kms_info *info,
+ const char *str);
+
+/**
+ * sde_kms_info_append_format - append format code string to 'sde_kms_info'
+ * Usage:
+ * sde_kms_info_start(key)
+ * sde_kms_info_append_format(fourcc, modifier)
+ * ...
+ * sde_kms_info_stop
+ * @info: Pointer to sde_kms_info structure
+ * @pixel_format: FOURCC format code
+ * @modifier: 64-bit drm format modifier
+ */
+void sde_kms_info_append_format(struct sde_kms_info *info,
+ uint32_t pixel_format,
+ uint64_t modifier);
+
+/**
+ * sde_kms_info_stop - finish adding key to 'sde_kms_info'
+ * Usage:
+ * sde_kms_info_start(key)
+ * sde_kms_info_append(val_1)
+ * ...
+ * sde_kms_info_append(val_n)
+ * sde_kms_info_stop
+ * @info: Pointer to sde_kms_info structure
+ */
+void sde_kms_info_stop(struct sde_kms_info *info);
+
+/**
+ * Vblank enable/disable functions
+ */
+int sde_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
+void sde_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
+
+#endif /* __sde_kms_H__ */
diff --git a/drivers/gpu/drm/msm/sde/sde_kms_utils.c b/drivers/gpu/drm/msm/sde/sde_kms_utils.c
new file mode 100644
index 000000000000..6e29c09deb40
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_kms_utils.c
@@ -0,0 +1,153 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "sde-kms_utils:[%s] " fmt, __func__
+
+#include "sde_kms.h"
+
+void sde_kms_info_reset(struct sde_kms_info *info)
+{
+ if (info) {
+ info->len = 0;
+ info->staged_len = 0;
+ }
+}
+
+void sde_kms_info_add_keyint(struct sde_kms_info *info,
+ const char *key,
+ int32_t value)
+{
+ uint32_t len;
+
+ if (info && key) {
+ len = snprintf(info->data + info->len,
+ SDE_KMS_INFO_MAX_SIZE - info->len,
+ "%s=%d\n",
+ key,
+ value);
+
+ /* check if snprintf truncated the string */
+ if ((info->len + len) < SDE_KMS_INFO_MAX_SIZE)
+ info->len += len;
+ }
+}
+
+void sde_kms_info_add_keystr(struct sde_kms_info *info,
+ const char *key,
+ const char *value)
+{
+ uint32_t len;
+
+ if (info && key && value) {
+ len = snprintf(info->data + info->len,
+ SDE_KMS_INFO_MAX_SIZE - info->len,
+ "%s=%s\n",
+ key,
+ value);
+
+ /* check if snprintf truncated the string */
+ if ((info->len + len) < SDE_KMS_INFO_MAX_SIZE)
+ info->len += len;
+ }
+}
+
+void sde_kms_info_start(struct sde_kms_info *info,
+ const char *key)
+{
+ uint32_t len;
+
+ if (info && key) {
+ len = snprintf(info->data + info->len,
+ SDE_KMS_INFO_MAX_SIZE - info->len,
+ "%s=",
+ key);
+
+ info->start = true;
+
+ /* check if snprintf truncated the string */
+ if ((info->len + len) < SDE_KMS_INFO_MAX_SIZE)
+ info->staged_len = info->len + len;
+ }
+}
+
+void sde_kms_info_append(struct sde_kms_info *info,
+ const char *str)
+{
+ uint32_t len;
+
+ if (info) {
+ len = snprintf(info->data + info->staged_len,
+ SDE_KMS_INFO_MAX_SIZE - info->staged_len,
+ "%s",
+ str);
+
+ /* check if snprintf truncated the string */
+ if ((info->staged_len + len) < SDE_KMS_INFO_MAX_SIZE) {
+ info->staged_len += len;
+ info->start = false;
+ }
+ }
+}
+
+void sde_kms_info_append_format(struct sde_kms_info *info,
+ uint32_t pixel_format,
+ uint64_t modifier)
+{
+ uint32_t len;
+
+ if (!info)
+ return;
+
+ if (modifier) {
+ len = snprintf(info->data + info->staged_len,
+ SDE_KMS_INFO_MAX_SIZE - info->staged_len,
+ info->start ?
+ "%c%c%c%c/%llX/%llX" : " %c%c%c%c/%llX/%llX",
+ (pixel_format >> 0) & 0xFF,
+ (pixel_format >> 8) & 0xFF,
+ (pixel_format >> 16) & 0xFF,
+ (pixel_format >> 24) & 0xFF,
+ (modifier >> 56) & 0xFF,
+ modifier & ((1ULL << 56) - 1));
+ } else {
+ len = snprintf(info->data + info->staged_len,
+ SDE_KMS_INFO_MAX_SIZE - info->staged_len,
+ info->start ?
+ "%c%c%c%c" : " %c%c%c%c",
+ (pixel_format >> 0) & 0xFF,
+ (pixel_format >> 8) & 0xFF,
+ (pixel_format >> 16) & 0xFF,
+ (pixel_format >> 24) & 0xFF);
+ }
+
+ /* check if snprintf truncated the string */
+ if ((info->staged_len + len) < SDE_KMS_INFO_MAX_SIZE) {
+ info->staged_len += len;
+ info->start = false;
+ }
+}
+
+void sde_kms_info_stop(struct sde_kms_info *info)
+{
+ uint32_t len;
+
+ if (info) {
+ /* insert final delimiter */
+ len = snprintf(info->data + info->staged_len,
+ SDE_KMS_INFO_MAX_SIZE - info->staged_len,
+ "\n");
+
+ /* check if snprintf truncated the string */
+ if ((info->staged_len + len) < SDE_KMS_INFO_MAX_SIZE)
+ info->len = info->staged_len + len;
+ }
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
new file mode 100644
index 000000000000..3ca74926cfac
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -0,0 +1,2400 @@
+/*
+ * Copyright (C) 2014-2017 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/debugfs.h>
+#include <uapi/drm/sde_drm.h>
+#include <uapi/drm/msm_drm_pp.h>
+
+#include "msm_prop.h"
+
+#include "sde_kms.h"
+#include "sde_fence.h"
+#include "sde_formats.h"
+#include "sde_hw_sspp.h"
+#include "sde_trace.h"
+#include "sde_crtc.h"
+#include "sde_vbif.h"
+#include "sde_plane.h"
+#include "sde_color_processing.h"
+
+#define SDE_DEBUG_PLANE(pl, fmt, ...) SDE_DEBUG("plane%d " fmt,\
+ (pl) ? (pl)->base.base.id : -1, ##__VA_ARGS__)
+
+#define SDE_ERROR_PLANE(pl, fmt, ...) SDE_ERROR("plane%d " fmt,\
+ (pl) ? (pl)->base.base.id : -1, ##__VA_ARGS__)
+
+#define DECIMATED_DIMENSION(dim, deci) (((dim) + ((1 << (deci)) - 1)) >> (deci))
+#define PHASE_STEP_SHIFT 21
+#define PHASE_STEP_UNIT_SCALE ((int) (1 << PHASE_STEP_SHIFT))
+#define PHASE_RESIDUAL 15
+
+#define SHARP_STRENGTH_DEFAULT 32
+#define SHARP_EDGE_THR_DEFAULT 112
+#define SHARP_SMOOTH_THR_DEFAULT 8
+#define SHARP_NOISE_THR_DEFAULT 2
+
+#define SDE_NAME_SIZE 12
+
+#define SDE_PLANE_COLOR_FILL_FLAG BIT(31)
+
+/* dirty bits for update function */
+#define SDE_PLANE_DIRTY_RECTS 0x1
+#define SDE_PLANE_DIRTY_FORMAT 0x2
+#define SDE_PLANE_DIRTY_SHARPEN 0x4
+#define SDE_PLANE_DIRTY_ALL 0xFFFFFFFF
+
+/**
+ * enum sde_plane_qos - Different qos configurations for each pipe
+ *
+ * @SDE_PLANE_QOS_VBLANK_CTRL: Setup VBLANK qos for the pipe.
+ * @SDE_PLANE_QOS_VBLANK_AMORTIZE: Enables Amortization within pipe.
+ * this configuration is mutually exclusive from VBLANK_CTRL.
+ * @SDE_PLANE_QOS_PANIC_CTRL: Setup panic for the pipe.
+ */
+enum sde_plane_qos {
+ SDE_PLANE_QOS_VBLANK_CTRL = BIT(0),
+ SDE_PLANE_QOS_VBLANK_AMORTIZE = BIT(1),
+ SDE_PLANE_QOS_PANIC_CTRL = BIT(2),
+};
+
+/*
+ * struct sde_plane - local sde plane structure
+ * @csc_cfg: Decoded user configuration for csc
+ * @csc_usr_ptr: Points to csc_cfg if valid user config available
+ * @csc_ptr: Points to sde_csc_cfg structure to use for current
+ */
+struct sde_plane {
+ struct drm_plane base;
+
+ int mmu_id;
+
+ struct mutex lock;
+
+ enum sde_sspp pipe;
+ uint32_t features; /* capabilities from catalog */
+ uint32_t nformats;
+ uint32_t formats[64];
+
+ struct sde_hw_pipe *pipe_hw;
+ struct sde_hw_pipe_cfg pipe_cfg;
+ struct sde_hw_sharp_cfg sharp_cfg;
+ struct sde_hw_scaler3_cfg *scaler3_cfg;
+ struct sde_hw_pipe_qos_cfg pipe_qos_cfg;
+ uint32_t color_fill;
+ bool is_error;
+ bool is_rt_pipe;
+
+ struct sde_hw_pixel_ext pixel_ext;
+ bool pixel_ext_usr;
+
+ struct sde_csc_cfg csc_cfg;
+ struct sde_csc_cfg *csc_usr_ptr;
+ struct sde_csc_cfg *csc_ptr;
+
+ const struct sde_sspp_sub_blks *pipe_sblk;
+
+ char pipe_name[SDE_NAME_SIZE];
+
+ struct msm_property_info property_info;
+ struct msm_property_data property_data[PLANE_PROP_COUNT];
+ struct drm_property_blob *blob_info;
+
+ /* debugfs related stuff */
+ struct dentry *debugfs_root;
+ struct sde_debugfs_regset32 debugfs_src;
+ struct sde_debugfs_regset32 debugfs_scaler;
+ struct sde_debugfs_regset32 debugfs_csc;
+};
+
+#define to_sde_plane(x) container_of(x, struct sde_plane, base)
+
+static bool sde_plane_enabled(struct drm_plane_state *state)
+{
+ return state && state->fb && state->crtc;
+}
+
+/**
+ * _sde_plane_calc_fill_level - calculate fill level of the given source format
+ * @plane: Pointer to drm plane
+ * @fmt: Pointer to source buffer format
+ * @src_wdith: width of source buffer
+ * Return: fill level corresponding to the source buffer/format or 0 if error
+ */
+static inline int _sde_plane_calc_fill_level(struct drm_plane *plane,
+ const struct sde_format *fmt, u32 src_width)
+{
+ struct sde_plane *psde;
+ u32 fixed_buff_size;
+ u32 total_fl;
+
+ if (!plane || !fmt) {
+ SDE_ERROR("invalid arguments\n");
+ return 0;
+ }
+
+ psde = to_sde_plane(plane);
+ fixed_buff_size = psde->pipe_sblk->pixel_ram_size;
+
+ if (fmt->fetch_planes == SDE_PLANE_PSEUDO_PLANAR) {
+ if (fmt->chroma_sample == SDE_CHROMA_420) {
+ /* NV12 */
+ total_fl = (fixed_buff_size / 2) /
+ ((src_width + 32) * fmt->bpp);
+ } else {
+ /* non NV12 */
+ total_fl = (fixed_buff_size) /
+ ((src_width + 32) * fmt->bpp);
+ }
+ } else {
+ total_fl = (fixed_buff_size * 2) /
+ ((src_width + 32) * fmt->bpp);
+ }
+
+ SDE_DEBUG("plane%u: pnum:%d fmt:%x w:%u fl:%u\n",
+ plane->base.id, psde->pipe - SSPP_VIG0,
+ fmt->base.pixel_format, src_width, total_fl);
+
+ return total_fl;
+}
+
+/**
+ * _sde_plane_get_qos_lut_linear - get linear LUT mapping
+ * @total_fl: fill level
+ * Return: LUT setting corresponding to the fill level
+ */
+static inline u32 _sde_plane_get_qos_lut_linear(u32 total_fl)
+{
+ u32 qos_lut;
+
+ if (total_fl <= 4)
+ qos_lut = 0x1B;
+ else if (total_fl <= 5)
+ qos_lut = 0x5B;
+ else if (total_fl <= 6)
+ qos_lut = 0x15B;
+ else if (total_fl <= 7)
+ qos_lut = 0x55B;
+ else if (total_fl <= 8)
+ qos_lut = 0x155B;
+ else if (total_fl <= 9)
+ qos_lut = 0x555B;
+ else if (total_fl <= 10)
+ qos_lut = 0x1555B;
+ else if (total_fl <= 11)
+ qos_lut = 0x5555B;
+ else if (total_fl <= 12)
+ qos_lut = 0x15555B;
+ else
+ qos_lut = 0x55555B;
+
+ return qos_lut;
+}
+
+/**
+ * _sde_plane_get_qos_lut_macrotile - get macrotile LUT mapping
+ * @total_fl: fill level
+ * Return: LUT setting corresponding to the fill level
+ */
+static inline u32 _sde_plane_get_qos_lut_macrotile(u32 total_fl)
+{
+ u32 qos_lut;
+
+ if (total_fl <= 10)
+ qos_lut = 0x1AAff;
+ else if (total_fl <= 11)
+ qos_lut = 0x5AAFF;
+ else if (total_fl <= 12)
+ qos_lut = 0x15AAFF;
+ else
+ qos_lut = 0x55AAFF;
+
+ return qos_lut;
+}
+
+/**
+ * _sde_plane_set_qos_lut - set QoS LUT of the given plane
+ * @plane: Pointer to drm plane
+ * @fb: Pointer to framebuffer associated with the given plane
+ */
+static void _sde_plane_set_qos_lut(struct drm_plane *plane,
+ struct drm_framebuffer *fb)
+{
+ struct sde_plane *psde;
+ const struct sde_format *fmt = NULL;
+ u32 qos_lut;
+ u32 total_fl = 0;
+
+ if (!plane || !fb) {
+ SDE_ERROR("invalid arguments plane %d fb %d\n",
+ plane != 0, fb != 0);
+ return;
+ }
+
+ psde = to_sde_plane(plane);
+
+ if (!psde->pipe_hw || !psde->pipe_sblk) {
+ SDE_ERROR("invalid arguments\n");
+ return;
+ } else if (!psde->pipe_hw->ops.setup_creq_lut) {
+ return;
+ }
+
+ if (!psde->is_rt_pipe) {
+ qos_lut = psde->pipe_sblk->creq_lut_nrt;
+ } else {
+ fmt = sde_get_sde_format_ext(
+ fb->pixel_format,
+ fb->modifier,
+ drm_format_num_planes(fb->pixel_format));
+ total_fl = _sde_plane_calc_fill_level(plane, fmt,
+ psde->pipe_cfg.src_rect.w);
+
+ if (SDE_FORMAT_IS_LINEAR(fmt))
+ qos_lut = _sde_plane_get_qos_lut_linear(total_fl);
+ else
+ qos_lut = _sde_plane_get_qos_lut_macrotile(total_fl);
+ }
+
+ psde->pipe_qos_cfg.creq_lut = qos_lut;
+
+ trace_sde_perf_set_qos_luts(psde->pipe - SSPP_VIG0,
+ (fmt) ? fmt->base.pixel_format : 0,
+ psde->is_rt_pipe, total_fl, qos_lut,
+ (fmt) ? SDE_FORMAT_IS_LINEAR(fmt) : 0);
+
+ SDE_DEBUG("plane%u: pnum:%d fmt:%x rt:%d fl:%u lut:0x%x\n",
+ plane->base.id,
+ psde->pipe - SSPP_VIG0,
+ (fmt) ? fmt->base.pixel_format : 0,
+ psde->is_rt_pipe, total_fl, qos_lut);
+
+ psde->pipe_hw->ops.setup_creq_lut(psde->pipe_hw, &psde->pipe_qos_cfg);
+}
+
+/**
+ * _sde_plane_set_panic_lut - set danger/safe LUT of the given plane
+ * @plane: Pointer to drm plane
+ * @fb: Pointer to framebuffer associated with the given plane
+ */
+static void _sde_plane_set_danger_lut(struct drm_plane *plane,
+ struct drm_framebuffer *fb)
+{
+ struct sde_plane *psde;
+ const struct sde_format *fmt = NULL;
+ u32 danger_lut, safe_lut;
+
+ if (!plane || !fb) {
+ SDE_ERROR("invalid arguments\n");
+ return;
+ }
+
+ psde = to_sde_plane(plane);
+
+ if (!psde->pipe_hw || !psde->pipe_sblk) {
+ SDE_ERROR("invalid arguments\n");
+ return;
+ } else if (!psde->pipe_hw->ops.setup_danger_safe_lut) {
+ return;
+ }
+
+ if (!psde->is_rt_pipe) {
+ danger_lut = psde->pipe_sblk->danger_lut_nrt;
+ safe_lut = psde->pipe_sblk->safe_lut_nrt;
+ } else {
+ fmt = sde_get_sde_format_ext(
+ fb->pixel_format,
+ fb->modifier,
+ drm_format_num_planes(fb->pixel_format));
+
+ if (SDE_FORMAT_IS_LINEAR(fmt)) {
+ danger_lut = psde->pipe_sblk->danger_lut_linear;
+ safe_lut = psde->pipe_sblk->safe_lut_linear;
+ } else {
+ danger_lut = psde->pipe_sblk->danger_lut_tile;
+ safe_lut = psde->pipe_sblk->safe_lut_tile;
+ }
+ }
+
+ psde->pipe_qos_cfg.danger_lut = danger_lut;
+ psde->pipe_qos_cfg.safe_lut = safe_lut;
+
+ trace_sde_perf_set_danger_luts(psde->pipe - SSPP_VIG0,
+ (fmt) ? fmt->base.pixel_format : 0,
+ (fmt) ? fmt->fetch_mode : 0,
+ psde->pipe_qos_cfg.danger_lut,
+ psde->pipe_qos_cfg.safe_lut);
+
+ SDE_DEBUG("plane%u: pnum:%d fmt:%x mode:%d luts[0x%x, 0x%x]\n",
+ plane->base.id,
+ psde->pipe - SSPP_VIG0,
+ fmt ? fmt->base.pixel_format : 0,
+ fmt ? fmt->fetch_mode : -1,
+ psde->pipe_qos_cfg.danger_lut,
+ psde->pipe_qos_cfg.safe_lut);
+
+ psde->pipe_hw->ops.setup_danger_safe_lut(psde->pipe_hw,
+ &psde->pipe_qos_cfg);
+}
+
+/**
+ * _sde_plane_set_qos_ctrl - set QoS control of the given plane
+ * @plane: Pointer to drm plane
+ * @enable: true to enable QoS control
+ * @flags: QoS control mode (enum sde_plane_qos)
+ */
+static void _sde_plane_set_qos_ctrl(struct drm_plane *plane,
+ bool enable, u32 flags)
+{
+ struct sde_plane *psde;
+
+ if (!plane) {
+ SDE_ERROR("invalid arguments\n");
+ return;
+ }
+
+ psde = to_sde_plane(plane);
+
+ if (!psde->pipe_hw || !psde->pipe_sblk) {
+ SDE_ERROR("invalid arguments\n");
+ return;
+ } else if (!psde->pipe_hw->ops.setup_qos_ctrl) {
+ return;
+ }
+
+ if (flags & SDE_PLANE_QOS_VBLANK_CTRL) {
+ psde->pipe_qos_cfg.creq_vblank = psde->pipe_sblk->creq_vblank;
+ psde->pipe_qos_cfg.danger_vblank =
+ psde->pipe_sblk->danger_vblank;
+ psde->pipe_qos_cfg.vblank_en = enable;
+ }
+
+ if (flags & SDE_PLANE_QOS_VBLANK_AMORTIZE) {
+ /* this feature overrules previous VBLANK_CTRL */
+ psde->pipe_qos_cfg.vblank_en = false;
+ psde->pipe_qos_cfg.creq_vblank = 0; /* clear vblank bits */
+ }
+
+ if (flags & SDE_PLANE_QOS_PANIC_CTRL)
+ psde->pipe_qos_cfg.danger_safe_en = enable;
+
+ if (!psde->is_rt_pipe) {
+ psde->pipe_qos_cfg.vblank_en = false;
+ psde->pipe_qos_cfg.danger_safe_en = false;
+ }
+
+ SDE_DEBUG("plane%u: pnum:%d ds:%d vb:%d pri[0x%x, 0x%x] is_rt:%d\n",
+ plane->base.id,
+ psde->pipe - SSPP_VIG0,
+ psde->pipe_qos_cfg.danger_safe_en,
+ psde->pipe_qos_cfg.vblank_en,
+ psde->pipe_qos_cfg.creq_vblank,
+ psde->pipe_qos_cfg.danger_vblank,
+ psde->is_rt_pipe);
+
+ psde->pipe_hw->ops.setup_qos_ctrl(psde->pipe_hw,
+ &psde->pipe_qos_cfg);
+}
+
+int sde_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable)
+{
+ struct sde_plane *psde;
+ struct msm_drm_private *priv;
+ struct sde_kms *sde_kms;
+
+ if (!plane || !plane->dev) {
+ SDE_ERROR("invalid arguments\n");
+ return -EINVAL;
+ }
+
+ priv = plane->dev->dev_private;
+ if (!priv || !priv->kms) {
+ SDE_ERROR("invalid KMS reference\n");
+ return -EINVAL;
+ }
+
+ sde_kms = to_sde_kms(priv->kms);
+ psde = to_sde_plane(plane);
+
+ if (!psde->is_rt_pipe)
+ goto end;
+
+ sde_power_resource_enable(&priv->phandle, sde_kms->core_client, true);
+
+ _sde_plane_set_qos_ctrl(plane, enable, SDE_PLANE_QOS_PANIC_CTRL);
+
+ sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
+
+end:
+ return 0;
+}
+
+/**
+ * _sde_plane_set_ot_limit - set OT limit for the given plane
+ * @plane: Pointer to drm plane
+ * @crtc: Pointer to drm crtc
+ */
+static void _sde_plane_set_ot_limit(struct drm_plane *plane,
+ struct drm_crtc *crtc)
+{
+ struct sde_plane *psde;
+ struct sde_vbif_set_ot_params ot_params;
+ struct msm_drm_private *priv;
+ struct sde_kms *sde_kms;
+
+ if (!plane || !plane->dev || !crtc) {
+ SDE_ERROR("invalid arguments plane %d crtc %d\n",
+ plane != 0, crtc != 0);
+ return;
+ }
+
+ priv = plane->dev->dev_private;
+ if (!priv || !priv->kms) {
+ SDE_ERROR("invalid KMS reference\n");
+ return;
+ }
+
+ sde_kms = to_sde_kms(priv->kms);
+ psde = to_sde_plane(plane);
+ if (!psde->pipe_hw) {
+ SDE_ERROR("invalid pipe reference\n");
+ return;
+ }
+
+ memset(&ot_params, 0, sizeof(ot_params));
+ ot_params.xin_id = psde->pipe_hw->cap->xin_id;
+ ot_params.num = psde->pipe_hw->idx - SSPP_NONE;
+ ot_params.width = psde->pipe_cfg.src_rect.w;
+ ot_params.height = psde->pipe_cfg.src_rect.h;
+ ot_params.is_wfd = !psde->is_rt_pipe;
+ ot_params.frame_rate = crtc->mode.vrefresh;
+ ot_params.vbif_idx = VBIF_RT;
+ ot_params.clk_ctrl = psde->pipe_hw->cap->clk_ctrl;
+ ot_params.rd = true;
+
+ sde_vbif_set_ot_limit(sde_kms, &ot_params);
+}
+
+/* helper to update a state's input fence pointer from the property */
+static void _sde_plane_set_input_fence(struct sde_plane *psde,
+ struct sde_plane_state *pstate, uint64_t fd)
+{
+ if (!psde || !pstate) {
+ SDE_ERROR("invalid arg(s), plane %d state %d\n",
+ psde != 0, pstate != 0);
+ return;
+ }
+
+ /* clear previous reference */
+ if (pstate->input_fence)
+ sde_sync_put(pstate->input_fence);
+
+ /* get fence pointer for later */
+ pstate->input_fence = sde_sync_get(fd);
+
+ SDE_DEBUG_PLANE(psde, "0x%llX\n", fd);
+}
+
+int sde_plane_wait_input_fence(struct drm_plane *plane, uint32_t wait_ms)
+{
+ struct sde_plane *psde;
+ struct sde_plane_state *pstate;
+ uint32_t prefix;
+ void *input_fence;
+ int ret = -EINVAL;
+
+ if (!plane) {
+ SDE_ERROR("invalid plane\n");
+ } else if (!plane->state) {
+ SDE_ERROR_PLANE(to_sde_plane(plane), "invalid state\n");
+ } else {
+ psde = to_sde_plane(plane);
+ pstate = to_sde_plane_state(plane->state);
+ input_fence = pstate->input_fence;
+
+ if (input_fence) {
+ prefix = sde_sync_get_name_prefix(input_fence);
+ ret = sde_sync_wait(input_fence, wait_ms);
+
+ SDE_EVT32(DRMID(plane), -ret, prefix);
+
+ switch (ret) {
+ case 0:
+ SDE_DEBUG_PLANE(psde, "signaled\n");
+ break;
+ case -ETIME:
+ SDE_ERROR_PLANE(psde, "%ums timeout on %08X\n",
+ wait_ms, prefix);
+ psde->is_error = true;
+ break;
+ default:
+ SDE_ERROR_PLANE(psde, "error %d on %08X\n",
+ ret, prefix);
+ psde->is_error = true;
+ break;
+ }
+ } else {
+ ret = 0;
+ }
+ }
+ return ret;
+}
+
+static inline void _sde_plane_set_scanout(struct drm_plane *plane,
+ struct sde_plane_state *pstate,
+ struct sde_hw_pipe_cfg *pipe_cfg,
+ struct drm_framebuffer *fb)
+{
+ struct sde_plane *psde;
+ int ret;
+
+ if (!plane || !pstate || !pipe_cfg || !fb) {
+ SDE_ERROR(
+ "invalid arg(s), plane %d state %d cfg %d fb %d\n",
+ plane != 0, pstate != 0, pipe_cfg != 0, fb != 0);
+ return;
+ }
+
+ psde = to_sde_plane(plane);
+ if (!psde->pipe_hw) {
+ SDE_ERROR_PLANE(psde, "invalid pipe_hw\n");
+ return;
+ }
+
+ ret = sde_format_populate_layout(psde->mmu_id, fb, &pipe_cfg->layout);
+ if (ret == -EAGAIN)
+ SDE_DEBUG_PLANE(psde, "not updating same src addrs\n");
+ else if (ret)
+ SDE_ERROR_PLANE(psde, "failed to get format layout, %d\n", ret);
+ else if (psde->pipe_hw->ops.setup_sourceaddress)
+ psde->pipe_hw->ops.setup_sourceaddress(psde->pipe_hw, pipe_cfg);
+}
+
+static int _sde_plane_setup_scaler3_lut(struct sde_plane *psde,
+ struct sde_plane_state *pstate)
+{
+ struct sde_hw_scaler3_cfg *cfg = psde->scaler3_cfg;
+ int ret = 0;
+
+ cfg->dir_lut = msm_property_get_blob(
+ &psde->property_info,
+ pstate->property_blobs, &cfg->dir_len,
+ PLANE_PROP_SCALER_LUT_ED);
+ cfg->cir_lut = msm_property_get_blob(
+ &psde->property_info,
+ pstate->property_blobs, &cfg->cir_len,
+ PLANE_PROP_SCALER_LUT_CIR);
+ cfg->sep_lut = msm_property_get_blob(
+ &psde->property_info,
+ pstate->property_blobs, &cfg->sep_len,
+ PLANE_PROP_SCALER_LUT_SEP);
+ if (!cfg->dir_lut || !cfg->cir_lut || !cfg->sep_lut)
+ ret = -ENODATA;
+ return ret;
+}
+
+static void _sde_plane_setup_scaler3(struct sde_plane *psde,
+ uint32_t src_w, uint32_t src_h, uint32_t dst_w, uint32_t dst_h,
+ struct sde_hw_scaler3_cfg *scale_cfg,
+ const struct sde_format *fmt,
+ uint32_t chroma_subsmpl_h, uint32_t chroma_subsmpl_v)
+{
+}
+
+/**
+ * _sde_plane_setup_scaler2 - determine default scaler phase steps/filter type
+ * @psde: Pointer to SDE plane object
+ * @src: Source size
+ * @dst: Destination size
+ * @phase_steps: Pointer to output array for phase steps
+ * @filter: Pointer to output array for filter type
+ * @fmt: Pointer to format definition
+ * @chroma_subsampling: Subsampling amount for chroma channel
+ *
+ * Returns: 0 on success
+ */
+static int _sde_plane_setup_scaler2(struct sde_plane *psde,
+ uint32_t src, uint32_t dst, uint32_t *phase_steps,
+ enum sde_hw_filter *filter, const struct sde_format *fmt,
+ uint32_t chroma_subsampling)
+{
+ if (!psde || !phase_steps || !filter || !fmt) {
+ SDE_ERROR(
+ "invalid arg(s), plane %d phase %d filter %d fmt %d\n",
+ psde != 0, phase_steps != 0, filter != 0, fmt != 0);
+ return -EINVAL;
+ }
+
+ /* calculate phase steps, leave init phase as zero */
+ phase_steps[SDE_SSPP_COMP_0] =
+ mult_frac(1 << PHASE_STEP_SHIFT, src, dst);
+ phase_steps[SDE_SSPP_COMP_1_2] =
+ phase_steps[SDE_SSPP_COMP_0] / chroma_subsampling;
+ phase_steps[SDE_SSPP_COMP_2] = phase_steps[SDE_SSPP_COMP_1_2];
+ phase_steps[SDE_SSPP_COMP_3] = phase_steps[SDE_SSPP_COMP_0];
+
+ /* calculate scaler config, if necessary */
+ if (SDE_FORMAT_IS_YUV(fmt) || src != dst) {
+ filter[SDE_SSPP_COMP_3] =
+ (src <= dst) ? SDE_SCALE_FILTER_BIL :
+ SDE_SCALE_FILTER_PCMN;
+
+ if (SDE_FORMAT_IS_YUV(fmt)) {
+ filter[SDE_SSPP_COMP_0] = SDE_SCALE_FILTER_CA;
+ filter[SDE_SSPP_COMP_1_2] = filter[SDE_SSPP_COMP_3];
+ } else {
+ filter[SDE_SSPP_COMP_0] = filter[SDE_SSPP_COMP_3];
+ filter[SDE_SSPP_COMP_1_2] =
+ SDE_SCALE_FILTER_NEAREST;
+ }
+ } else {
+ /* disable scaler */
+ filter[SDE_SSPP_COMP_0] = SDE_SCALE_FILTER_MAX;
+ filter[SDE_SSPP_COMP_1_2] = SDE_SCALE_FILTER_MAX;
+ filter[SDE_SSPP_COMP_3] = SDE_SCALE_FILTER_MAX;
+ }
+ return 0;
+}
+
+/**
+ * _sde_plane_setup_pixel_ext - determine default pixel extension values
+ * @psde: Pointer to SDE plane object
+ * @src: Source size
+ * @dst: Destination size
+ * @decimated_src: Source size after decimation, if any
+ * @phase_steps: Pointer to output array for phase steps
+ * @out_src: Output array for pixel extension values
+ * @out_edge1: Output array for pixel extension first edge
+ * @out_edge2: Output array for pixel extension second edge
+ * @filter: Pointer to array for filter type
+ * @fmt: Pointer to format definition
+ * @chroma_subsampling: Subsampling amount for chroma channel
+ * @post_compare: Whether to chroma subsampled source size for comparisions
+ */
+static void _sde_plane_setup_pixel_ext(struct sde_plane *psde,
+ uint32_t src, uint32_t dst, uint32_t decimated_src,
+ uint32_t *phase_steps, uint32_t *out_src, int *out_edge1,
+ int *out_edge2, enum sde_hw_filter *filter,
+ const struct sde_format *fmt, uint32_t chroma_subsampling,
+ bool post_compare)
+{
+ int64_t edge1, edge2, caf;
+ uint32_t src_work;
+ int i, tmp;
+
+ if (psde && phase_steps && out_src && out_edge1 &&
+ out_edge2 && filter && fmt) {
+ /* handle CAF for YUV formats */
+ if (SDE_FORMAT_IS_YUV(fmt) && *filter == SDE_SCALE_FILTER_CA)
+ caf = PHASE_STEP_UNIT_SCALE;
+ else
+ caf = 0;
+
+ for (i = 0; i < SDE_MAX_PLANES; i++) {
+ src_work = decimated_src;
+ if (i == SDE_SSPP_COMP_1_2 || i == SDE_SSPP_COMP_2)
+ src_work /= chroma_subsampling;
+ if (post_compare)
+ src = src_work;
+ if (!SDE_FORMAT_IS_YUV(fmt) && (src == dst)) {
+ /* unity */
+ edge1 = 0;
+ edge2 = 0;
+ } else if (dst >= src) {
+ /* upscale */
+ edge1 = (1 << PHASE_RESIDUAL);
+ edge1 -= caf;
+ edge2 = (1 << PHASE_RESIDUAL);
+ edge2 += (dst - 1) * *(phase_steps + i);
+ edge2 -= (src_work - 1) * PHASE_STEP_UNIT_SCALE;
+ edge2 += caf;
+ edge2 = -(edge2);
+ } else {
+ /* downscale */
+ edge1 = 0;
+ edge2 = (dst - 1) * *(phase_steps + i);
+ edge2 -= (src_work - 1) * PHASE_STEP_UNIT_SCALE;
+ edge2 += *(phase_steps + i);
+ edge2 = -(edge2);
+ }
+
+ /* only enable CAF for luma plane */
+ caf = 0;
+
+ /* populate output arrays */
+ *(out_src + i) = src_work;
+
+ /* edge updates taken from __pxl_extn_helper */
+ if (edge1 >= 0) {
+ tmp = (uint32_t)edge1;
+ tmp >>= PHASE_STEP_SHIFT;
+ *(out_edge1 + i) = -tmp;
+ } else {
+ tmp = (uint32_t)(-edge1);
+ *(out_edge1 + i) =
+ (tmp + PHASE_STEP_UNIT_SCALE - 1) >>
+ PHASE_STEP_SHIFT;
+ }
+ if (edge2 >= 0) {
+ tmp = (uint32_t)edge2;
+ tmp >>= PHASE_STEP_SHIFT;
+ *(out_edge2 + i) = -tmp;
+ } else {
+ tmp = (uint32_t)(-edge2);
+ *(out_edge2 + i) =
+ (tmp + PHASE_STEP_UNIT_SCALE - 1) >>
+ PHASE_STEP_SHIFT;
+ }
+ }
+ }
+}
+
+static inline void _sde_plane_setup_csc(struct sde_plane *psde)
+{
+ static const struct sde_csc_cfg sde_csc_YUV2RGB_601L = {
+ {
+ /* S15.16 format */
+ 0x00012A00, 0x00000000, 0x00019880,
+ 0x00012A00, 0xFFFF9B80, 0xFFFF3000,
+ 0x00012A00, 0x00020480, 0x00000000,
+ },
+ /* signed bias */
+ { 0xfff0, 0xff80, 0xff80,},
+ { 0x0, 0x0, 0x0,},
+ /* unsigned clamp */
+ { 0x10, 0xeb, 0x10, 0xf0, 0x10, 0xf0,},
+ { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff,},
+ };
+ static const struct sde_csc_cfg sde_csc10_YUV2RGB_601L = {
+ {
+ /* S15.16 format */
+ 0x00012A00, 0x00000000, 0x00019880,
+ 0x00012A00, 0xFFFF9B80, 0xFFFF3000,
+ 0x00012A00, 0x00020480, 0x00000000,
+ },
+ /* signed bias */
+ { 0xffc0, 0xfe00, 0xfe00,},
+ { 0x0, 0x0, 0x0,},
+ /* unsigned clamp */
+ { 0x40, 0x3ac, 0x40, 0x3c0, 0x40, 0x3c0,},
+ { 0x00, 0x3ff, 0x00, 0x3ff, 0x00, 0x3ff,},
+ };
+
+ if (!psde) {
+ SDE_ERROR("invalid plane\n");
+ return;
+ }
+
+ /* revert to kernel default if override not available */
+ if (psde->csc_usr_ptr)
+ psde->csc_ptr = psde->csc_usr_ptr;
+ else if (BIT(SDE_SSPP_CSC_10BIT) & psde->features)
+ psde->csc_ptr = (struct sde_csc_cfg *)&sde_csc10_YUV2RGB_601L;
+ else
+ psde->csc_ptr = (struct sde_csc_cfg *)&sde_csc_YUV2RGB_601L;
+
+ SDE_DEBUG_PLANE(psde, "using 0x%X 0x%X 0x%X...\n",
+ psde->csc_ptr->csc_mv[0],
+ psde->csc_ptr->csc_mv[1],
+ psde->csc_ptr->csc_mv[2]);
+}
+
+static void sde_color_process_plane_setup(struct drm_plane *plane)
+{
+ struct sde_plane *psde;
+ struct sde_plane_state *pstate;
+ uint32_t hue, saturation, value, contrast;
+ struct drm_msm_memcol *memcol = NULL;
+ size_t memcol_sz = 0;
+
+ psde = to_sde_plane(plane);
+ pstate = to_sde_plane_state(plane->state);
+
+ hue = (uint32_t) sde_plane_get_property(pstate, PLANE_PROP_HUE_ADJUST);
+ if (psde->pipe_hw->ops.setup_pa_hue)
+ psde->pipe_hw->ops.setup_pa_hue(psde->pipe_hw, &hue);
+ saturation = (uint32_t) sde_plane_get_property(pstate,
+ PLANE_PROP_SATURATION_ADJUST);
+ if (psde->pipe_hw->ops.setup_pa_sat)
+ psde->pipe_hw->ops.setup_pa_sat(psde->pipe_hw, &saturation);
+ value = (uint32_t) sde_plane_get_property(pstate,
+ PLANE_PROP_VALUE_ADJUST);
+ if (psde->pipe_hw->ops.setup_pa_val)
+ psde->pipe_hw->ops.setup_pa_val(psde->pipe_hw, &value);
+ contrast = (uint32_t) sde_plane_get_property(pstate,
+ PLANE_PROP_CONTRAST_ADJUST);
+ if (psde->pipe_hw->ops.setup_pa_cont)
+ psde->pipe_hw->ops.setup_pa_cont(psde->pipe_hw, &contrast);
+
+ if (psde->pipe_hw->ops.setup_pa_memcolor) {
+ /* Skin memory color setup */
+ memcol = msm_property_get_blob(&psde->property_info,
+ pstate->property_blobs,
+ &memcol_sz,
+ PLANE_PROP_SKIN_COLOR);
+ psde->pipe_hw->ops.setup_pa_memcolor(psde->pipe_hw,
+ MEMCOLOR_SKIN, memcol);
+
+ /* Sky memory color setup */
+ memcol = msm_property_get_blob(&psde->property_info,
+ pstate->property_blobs,
+ &memcol_sz,
+ PLANE_PROP_SKY_COLOR);
+ psde->pipe_hw->ops.setup_pa_memcolor(psde->pipe_hw,
+ MEMCOLOR_SKY, memcol);
+
+ /* Foliage memory color setup */
+ memcol = msm_property_get_blob(&psde->property_info,
+ pstate->property_blobs,
+ &memcol_sz,
+ PLANE_PROP_FOLIAGE_COLOR);
+ psde->pipe_hw->ops.setup_pa_memcolor(psde->pipe_hw,
+ MEMCOLOR_FOLIAGE, memcol);
+ }
+}
+
+static void _sde_plane_setup_scaler(struct sde_plane *psde,
+ const struct sde_format *fmt,
+ struct sde_plane_state *pstate)
+{
+ struct sde_hw_pixel_ext *pe;
+ uint32_t chroma_subsmpl_h, chroma_subsmpl_v;
+
+ if (!psde || !fmt) {
+ SDE_ERROR("invalid arg(s), plane %d fmt %d state %d\n",
+ psde != 0, fmt != 0, pstate != 0);
+ return;
+ }
+
+ pe = &(psde->pixel_ext);
+
+ psde->pipe_cfg.horz_decimation =
+ sde_plane_get_property(pstate, PLANE_PROP_H_DECIMATE);
+ psde->pipe_cfg.vert_decimation =
+ sde_plane_get_property(pstate, PLANE_PROP_V_DECIMATE);
+
+ /* don't chroma subsample if decimating */
+ chroma_subsmpl_h = psde->pipe_cfg.horz_decimation ? 1 :
+ drm_format_horz_chroma_subsampling(fmt->base.pixel_format);
+ chroma_subsmpl_v = psde->pipe_cfg.vert_decimation ? 1 :
+ drm_format_vert_chroma_subsampling(fmt->base.pixel_format);
+
+ /* update scaler */
+ if (psde->features & BIT(SDE_SSPP_SCALER_QSEED3)) {
+ int error;
+
+ error = _sde_plane_setup_scaler3_lut(psde, pstate);
+ if (error || !psde->pixel_ext_usr) {
+ /* calculate default config for QSEED3 */
+ _sde_plane_setup_scaler3(psde,
+ psde->pipe_cfg.src_rect.w,
+ psde->pipe_cfg.src_rect.h,
+ psde->pipe_cfg.dst_rect.w,
+ psde->pipe_cfg.dst_rect.h,
+ psde->scaler3_cfg, fmt,
+ chroma_subsmpl_h, chroma_subsmpl_v);
+ }
+ } else if (!psde->pixel_ext_usr) {
+ uint32_t deci_dim, i;
+
+ /* calculate default configuration for QSEED2 */
+ memset(pe, 0, sizeof(struct sde_hw_pixel_ext));
+
+ SDE_DEBUG_PLANE(psde, "default config\n");
+ deci_dim = DECIMATED_DIMENSION(psde->pipe_cfg.src_rect.w,
+ psde->pipe_cfg.horz_decimation);
+ _sde_plane_setup_scaler2(psde,
+ deci_dim,
+ psde->pipe_cfg.dst_rect.w,
+ pe->phase_step_x,
+ pe->horz_filter, fmt, chroma_subsmpl_h);
+
+ if (SDE_FORMAT_IS_YUV(fmt))
+ deci_dim &= ~0x1;
+ _sde_plane_setup_pixel_ext(psde, psde->pipe_cfg.src_rect.w,
+ psde->pipe_cfg.dst_rect.w, deci_dim,
+ pe->phase_step_x,
+ pe->roi_w,
+ pe->num_ext_pxls_left,
+ pe->num_ext_pxls_right, pe->horz_filter, fmt,
+ chroma_subsmpl_h, 0);
+
+ deci_dim = DECIMATED_DIMENSION(psde->pipe_cfg.src_rect.h,
+ psde->pipe_cfg.vert_decimation);
+ _sde_plane_setup_scaler2(psde,
+ deci_dim,
+ psde->pipe_cfg.dst_rect.h,
+ pe->phase_step_y,
+ pe->vert_filter, fmt, chroma_subsmpl_v);
+ _sde_plane_setup_pixel_ext(psde, psde->pipe_cfg.src_rect.h,
+ psde->pipe_cfg.dst_rect.h, deci_dim,
+ pe->phase_step_y,
+ pe->roi_h,
+ pe->num_ext_pxls_top,
+ pe->num_ext_pxls_btm, pe->vert_filter, fmt,
+ chroma_subsmpl_v, 1);
+
+ for (i = 0; i < SDE_MAX_PLANES; i++) {
+ if (pe->num_ext_pxls_left[i] >= 0)
+ pe->left_rpt[i] = pe->num_ext_pxls_left[i];
+ else
+ pe->left_ftch[i] = pe->num_ext_pxls_left[i];
+
+ if (pe->num_ext_pxls_right[i] >= 0)
+ pe->right_rpt[i] = pe->num_ext_pxls_right[i];
+ else
+ pe->right_ftch[i] = pe->num_ext_pxls_right[i];
+
+ if (pe->num_ext_pxls_top[i] >= 0)
+ pe->top_rpt[i] = pe->num_ext_pxls_top[i];
+ else
+ pe->top_ftch[i] = pe->num_ext_pxls_top[i];
+
+ if (pe->num_ext_pxls_btm[i] >= 0)
+ pe->btm_rpt[i] = pe->num_ext_pxls_btm[i];
+ else
+ pe->btm_ftch[i] = pe->num_ext_pxls_btm[i];
+ }
+ }
+}
+
+/**
+ * _sde_plane_color_fill - enables color fill on plane
+ * @psde: Pointer to SDE plane object
+ * @color: RGB fill color value, [23..16] Blue, [15..8] Green, [7..0] Red
+ * @alpha: 8-bit fill alpha value, 255 selects 100% alpha
+ * Returns: 0 on success
+ */
+static int _sde_plane_color_fill(struct sde_plane *psde,
+ uint32_t color, uint32_t alpha)
+{
+ const struct sde_format *fmt;
+
+ if (!psde) {
+ SDE_ERROR("invalid plane\n");
+ return -EINVAL;
+ }
+
+ if (!psde->pipe_hw) {
+ SDE_ERROR_PLANE(psde, "invalid plane h/w pointer\n");
+ return -EINVAL;
+ }
+
+ SDE_DEBUG_PLANE(psde, "\n");
+
+ /*
+ * select fill format to match user property expectation,
+ * h/w only supports RGB variants
+ */
+ fmt = sde_get_sde_format(DRM_FORMAT_ABGR8888);
+
+ /* update sspp */
+ if (fmt && psde->pipe_hw->ops.setup_solidfill) {
+ psde->pipe_hw->ops.setup_solidfill(psde->pipe_hw,
+ (color & 0xFFFFFF) | ((alpha & 0xFF) << 24));
+
+ /* override scaler/decimation if solid fill */
+ psde->pipe_cfg.src_rect.x = 0;
+ psde->pipe_cfg.src_rect.y = 0;
+ psde->pipe_cfg.src_rect.w = psde->pipe_cfg.dst_rect.w;
+ psde->pipe_cfg.src_rect.h = psde->pipe_cfg.dst_rect.h;
+
+ _sde_plane_setup_scaler(psde, fmt, 0);
+
+ if (psde->pipe_hw->ops.setup_format)
+ psde->pipe_hw->ops.setup_format(psde->pipe_hw,
+ fmt, SDE_SSPP_SOLID_FILL);
+
+ if (psde->pipe_hw->ops.setup_rects)
+ psde->pipe_hw->ops.setup_rects(psde->pipe_hw,
+ &psde->pipe_cfg, &psde->pixel_ext,
+ psde->scaler3_cfg);
+ }
+
+ return 0;
+}
+
+static int _sde_plane_mode_set(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ uint32_t nplanes, src_flags;
+ struct sde_plane *psde;
+ struct sde_plane_state *pstate;
+ const struct sde_format *fmt;
+ struct drm_crtc *crtc;
+ struct drm_framebuffer *fb;
+ struct sde_rect src, dst;
+ bool q16_data = true;
+ int idx;
+
+ if (!plane) {
+ SDE_ERROR("invalid plane\n");
+ return -EINVAL;
+ } else if (!plane->state) {
+ SDE_ERROR("invalid plane state\n");
+ return -EINVAL;
+ }
+
+ psde = to_sde_plane(plane);
+ pstate = to_sde_plane_state(plane->state);
+
+ crtc = state->crtc;
+ fb = state->fb;
+ if (!crtc || !fb) {
+ SDE_ERROR_PLANE(psde, "invalid crtc %d or fb %d\n",
+ crtc != 0, fb != 0);
+ return -EINVAL;
+ }
+ fmt = to_sde_format(msm_framebuffer_format(fb));
+ nplanes = fmt->num_planes;
+
+ /* determine what needs to be refreshed */
+ while ((idx = msm_property_pop_dirty(&psde->property_info)) >= 0) {
+ switch (idx) {
+ case PLANE_PROP_SCALER_V1:
+ case PLANE_PROP_SCALER_V2:
+ case PLANE_PROP_H_DECIMATE:
+ case PLANE_PROP_V_DECIMATE:
+ case PLANE_PROP_SRC_CONFIG:
+ case PLANE_PROP_ZPOS:
+ pstate->dirty |= SDE_PLANE_DIRTY_RECTS;
+ break;
+ case PLANE_PROP_CSC_V1:
+ pstate->dirty |= SDE_PLANE_DIRTY_FORMAT;
+ break;
+ case PLANE_PROP_COLOR_FILL:
+ /* potentially need to refresh everything */
+ pstate->dirty = SDE_PLANE_DIRTY_ALL;
+ break;
+ case PLANE_PROP_ROTATION:
+ pstate->dirty |= SDE_PLANE_DIRTY_FORMAT;
+ break;
+ case PLANE_PROP_INFO:
+ case PLANE_PROP_ALPHA:
+ case PLANE_PROP_INPUT_FENCE:
+ case PLANE_PROP_BLEND_OP:
+ /* no special action required */
+ break;
+ default:
+ /* unknown property, refresh everything */
+ pstate->dirty |= SDE_PLANE_DIRTY_ALL;
+ SDE_ERROR("executing full mode set, prp_idx %d\n", idx);
+ break;
+ }
+ }
+
+ if (pstate->dirty & SDE_PLANE_DIRTY_RECTS)
+ memset(&(psde->pipe_cfg), 0, sizeof(struct sde_hw_pipe_cfg));
+
+ _sde_plane_set_scanout(plane, pstate, &psde->pipe_cfg, fb);
+
+ /* early out if nothing dirty */
+ if (!pstate->dirty)
+ return 0;
+ pstate->pending = true;
+
+ psde->is_rt_pipe = sde_crtc_is_rt(crtc);
+ _sde_plane_set_qos_ctrl(plane, false, SDE_PLANE_QOS_PANIC_CTRL);
+
+ /* update roi config */
+ if (pstate->dirty & SDE_PLANE_DIRTY_RECTS) {
+ POPULATE_RECT(&src, state->src_x, state->src_y,
+ state->src_w, state->src_h, q16_data);
+ POPULATE_RECT(&dst, state->crtc_x, state->crtc_y,
+ state->crtc_w, state->crtc_h, !q16_data);
+
+ SDE_DEBUG_PLANE(psde,
+ "FB[%u] %u,%u,%ux%u->crtc%u %d,%d,%ux%u, %s ubwc %d\n",
+ fb->base.id, src.x, src.y, src.w, src.h,
+ crtc->base.id, dst.x, dst.y, dst.w, dst.h,
+ drm_get_format_name(fmt->base.pixel_format),
+ SDE_FORMAT_IS_UBWC(fmt));
+
+ if (sde_plane_get_property(pstate, PLANE_PROP_SRC_CONFIG) &
+ BIT(SDE_DRM_DEINTERLACE)) {
+ SDE_DEBUG_PLANE(psde, "deinterlace\n");
+ for (idx = 0; idx < SDE_MAX_PLANES; ++idx)
+ psde->pipe_cfg.layout.plane_pitch[idx] <<= 1;
+ src.h /= 2;
+ src.y = DIV_ROUND_UP(src.y, 2);
+ src.y &= ~0x1;
+ }
+
+ psde->pipe_cfg.src_rect = src;
+ psde->pipe_cfg.dst_rect = dst;
+
+ /* check for color fill */
+ psde->color_fill = (uint32_t)sde_plane_get_property(pstate,
+ PLANE_PROP_COLOR_FILL);
+ if (psde->color_fill & SDE_PLANE_COLOR_FILL_FLAG) {
+ /* skip remaining processing on color fill */
+ pstate->dirty = 0x0;
+ } else if (psde->pipe_hw->ops.setup_rects) {
+ _sde_plane_setup_scaler(psde, fmt, pstate);
+
+ psde->pipe_hw->ops.setup_rects(psde->pipe_hw,
+ &psde->pipe_cfg, &psde->pixel_ext,
+ psde->scaler3_cfg);
+ }
+ }
+
+ if ((pstate->dirty & SDE_PLANE_DIRTY_FORMAT) &&
+ psde->pipe_hw->ops.setup_format) {
+ src_flags = 0x0;
+ SDE_DEBUG_PLANE(psde, "rotation 0x%llX\n",
+ sde_plane_get_property(pstate, PLANE_PROP_ROTATION));
+ if (sde_plane_get_property(pstate, PLANE_PROP_ROTATION) &
+ BIT(DRM_REFLECT_X))
+ src_flags |= SDE_SSPP_FLIP_LR;
+ if (sde_plane_get_property(pstate, PLANE_PROP_ROTATION) &
+ BIT(DRM_REFLECT_Y))
+ src_flags |= SDE_SSPP_FLIP_UD;
+
+ /* update format */
+ psde->pipe_hw->ops.setup_format(psde->pipe_hw, fmt, src_flags);
+
+ /* update csc */
+ if (SDE_FORMAT_IS_YUV(fmt))
+ _sde_plane_setup_csc(psde);
+ else
+ psde->csc_ptr = 0;
+ }
+
+ sde_color_process_plane_setup(plane);
+
+ /* update sharpening */
+ if ((pstate->dirty & SDE_PLANE_DIRTY_SHARPEN) &&
+ psde->pipe_hw->ops.setup_sharpening) {
+ psde->sharp_cfg.strength = SHARP_STRENGTH_DEFAULT;
+ psde->sharp_cfg.edge_thr = SHARP_EDGE_THR_DEFAULT;
+ psde->sharp_cfg.smooth_thr = SHARP_SMOOTH_THR_DEFAULT;
+ psde->sharp_cfg.noise_thr = SHARP_NOISE_THR_DEFAULT;
+
+ psde->pipe_hw->ops.setup_sharpening(psde->pipe_hw,
+ &psde->sharp_cfg);
+ }
+
+ _sde_plane_set_qos_lut(plane, fb);
+ _sde_plane_set_danger_lut(plane, fb);
+
+ if (plane->type != DRM_PLANE_TYPE_CURSOR) {
+ _sde_plane_set_qos_ctrl(plane, true, SDE_PLANE_QOS_PANIC_CTRL);
+ _sde_plane_set_ot_limit(plane, crtc);
+ }
+
+ /* clear dirty */
+ pstate->dirty = 0x0;
+
+ return 0;
+}
+
+static int sde_plane_prepare_fb(struct drm_plane *plane,
+ const struct drm_plane_state *new_state)
+{
+ struct drm_framebuffer *fb = new_state->fb;
+ struct sde_plane *psde = to_sde_plane(plane);
+
+ if (!new_state->fb)
+ return 0;
+
+ SDE_DEBUG_PLANE(psde, "FB[%u]\n", fb->base.id);
+ return msm_framebuffer_prepare(fb, psde->mmu_id);
+}
+
+static void sde_plane_cleanup_fb(struct drm_plane *plane,
+ const struct drm_plane_state *old_state)
+{
+ struct drm_framebuffer *fb = old_state ? old_state->fb : NULL;
+ struct sde_plane *psde = plane ? to_sde_plane(plane) : NULL;
+
+ if (!fb)
+ return;
+
+ SDE_DEBUG_PLANE(psde, "FB[%u]\n", fb->base.id);
+ msm_framebuffer_cleanup(fb, psde->mmu_id);
+}
+
+static void _sde_plane_atomic_check_mode_changed(struct sde_plane *psde,
+ struct drm_plane_state *state,
+ struct drm_plane_state *old_state)
+{
+ struct sde_plane_state *pstate = to_sde_plane_state(state);
+
+ /* no need to check it again */
+ if (pstate->dirty == SDE_PLANE_DIRTY_ALL)
+ return;
+
+ if (!sde_plane_enabled(state) || !sde_plane_enabled(old_state)
+ || psde->is_error) {
+ SDE_DEBUG_PLANE(psde,
+ "enabling/disabling full modeset required\n");
+ pstate->dirty |= SDE_PLANE_DIRTY_ALL;
+ } else if (to_sde_plane_state(old_state)->pending) {
+ SDE_DEBUG_PLANE(psde, "still pending\n");
+ pstate->dirty |= SDE_PLANE_DIRTY_ALL;
+ } else if (state->src_w != old_state->src_w ||
+ state->src_h != old_state->src_h ||
+ state->src_x != old_state->src_x ||
+ state->src_y != old_state->src_y) {
+ SDE_DEBUG_PLANE(psde, "src rect updated\n");
+ pstate->dirty |= SDE_PLANE_DIRTY_RECTS;
+ } else if (state->crtc_w != old_state->crtc_w ||
+ state->crtc_h != old_state->crtc_h ||
+ state->crtc_x != old_state->crtc_x ||
+ state->crtc_y != old_state->crtc_y) {
+ SDE_DEBUG_PLANE(psde, "crtc rect updated\n");
+ pstate->dirty |= SDE_PLANE_DIRTY_RECTS;
+ }
+
+ if (!state->fb || !old_state->fb) {
+ SDE_DEBUG_PLANE(psde, "can't compare fb handles\n");
+ } else if (state->fb->pixel_format != old_state->fb->pixel_format) {
+ SDE_DEBUG_PLANE(psde, "format change\n");
+ pstate->dirty |= SDE_PLANE_DIRTY_FORMAT | SDE_PLANE_DIRTY_RECTS;
+ } else {
+ uint64_t *new_mods = state->fb->modifier;
+ uint64_t *old_mods = old_state->fb->modifier;
+ uint32_t *new_pitches = state->fb->pitches;
+ uint32_t *old_pitches = old_state->fb->pitches;
+ uint32_t *new_offset = state->fb->offsets;
+ uint32_t *old_offset = old_state->fb->offsets;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(state->fb->modifier); i++) {
+ if (new_mods[i] != old_mods[i]) {
+ SDE_DEBUG_PLANE(psde,
+ "format modifiers change\"\
+ plane:%d new_mode:%llu old_mode:%llu\n",
+ i, new_mods[i], old_mods[i]);
+ pstate->dirty |= SDE_PLANE_DIRTY_FORMAT |
+ SDE_PLANE_DIRTY_RECTS;
+ break;
+ }
+ }
+ for (i = 0; i < ARRAY_SIZE(state->fb->pitches); i++) {
+ if (new_pitches[i] != old_pitches[i]) {
+ SDE_DEBUG_PLANE(psde,
+ "pitches change plane:%d\"\
+ old_pitches:%u new_pitches:%u\n",
+ i, old_pitches[i], new_pitches[i]);
+ pstate->dirty |= SDE_PLANE_DIRTY_RECTS;
+ break;
+ }
+ }
+ for (i = 0; i < ARRAY_SIZE(state->fb->offsets); i++) {
+ if (new_offset[i] != old_offset[i]) {
+ SDE_DEBUG_PLANE(psde,
+ "offset change plane:%d\"\
+ old_offset:%u new_offset:%u\n",
+ i, old_offset[i], new_offset[i]);
+ pstate->dirty |= SDE_PLANE_DIRTY_FORMAT |
+ SDE_PLANE_DIRTY_RECTS;
+ break;
+ }
+ }
+ }
+}
+
+static int sde_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ int ret = 0;
+ struct sde_plane *psde;
+ struct sde_plane_state *pstate;
+ const struct sde_format *fmt;
+ struct sde_rect src, dst;
+ uint32_t deci_w, deci_h, src_deci_w, src_deci_h;
+ uint32_t max_upscale, max_downscale, min_src_size, max_linewidth;
+ bool q16_data = true;
+
+ if (!plane || !state) {
+ SDE_ERROR("invalid arg(s), plane %d state %d\n",
+ plane != 0, state != 0);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ psde = to_sde_plane(plane);
+ pstate = to_sde_plane_state(state);
+
+ if (!psde->pipe_sblk) {
+ SDE_ERROR_PLANE(psde, "invalid catalog\n");
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ deci_w = sde_plane_get_property(pstate, PLANE_PROP_H_DECIMATE);
+ deci_h = sde_plane_get_property(pstate, PLANE_PROP_V_DECIMATE);
+
+ /* src values are in Q16 fixed point, convert to integer */
+ POPULATE_RECT(&src, state->src_x, state->src_y, state->src_w,
+ state->src_h, q16_data);
+ POPULATE_RECT(&dst, state->crtc_x, state->crtc_y, state->crtc_w,
+ state->crtc_h, !q16_data);
+
+ src_deci_w = DECIMATED_DIMENSION(src.w, deci_w);
+ src_deci_h = DECIMATED_DIMENSION(src.h, deci_h);
+
+ max_upscale = psde->pipe_sblk->maxupscale;
+ max_downscale = psde->pipe_sblk->maxdwnscale;
+ max_linewidth = psde->pipe_sblk->maxlinewidth;
+
+ SDE_DEBUG_PLANE(psde, "check %d -> %d\n",
+ sde_plane_enabled(plane->state), sde_plane_enabled(state));
+
+ if (!sde_plane_enabled(state))
+ goto modeset_update;
+
+ fmt = to_sde_format(msm_framebuffer_format(state->fb));
+
+ min_src_size = SDE_FORMAT_IS_YUV(fmt) ? 2 : 1;
+
+ if (SDE_FORMAT_IS_YUV(fmt) &&
+ (!(psde->features & SDE_SSPP_SCALER) ||
+ !(psde->features & (BIT(SDE_SSPP_CSC)
+ | BIT(SDE_SSPP_CSC_10BIT))))) {
+ SDE_ERROR_PLANE(psde,
+ "plane doesn't have scaler/csc for yuv\n");
+ ret = -EINVAL;
+
+ /* check src bounds */
+ } else if (state->fb->width > MAX_IMG_WIDTH ||
+ state->fb->height > MAX_IMG_HEIGHT ||
+ src.w < min_src_size || src.h < min_src_size ||
+ CHECK_LAYER_BOUNDS(src.x, src.w, state->fb->width) ||
+ CHECK_LAYER_BOUNDS(src.y, src.h, state->fb->height)) {
+ SDE_ERROR_PLANE(psde, "invalid source %u, %u, %ux%u\n",
+ src.x, src.y, src.w, src.h);
+ ret = -E2BIG;
+
+ /* valid yuv image */
+ } else if (SDE_FORMAT_IS_YUV(fmt) && ((src.x & 0x1) || (src.y & 0x1) ||
+ (src.w & 0x1) || (src.h & 0x1))) {
+ SDE_ERROR_PLANE(psde, "invalid yuv source %u, %u, %ux%u\n",
+ src.x, src.y, src.w, src.h);
+ ret = -EINVAL;
+
+ /* min dst support */
+ } else if (dst.w < 0x1 || dst.h < 0x1) {
+ SDE_ERROR_PLANE(psde, "invalid dest rect %u, %u, %ux%u\n",
+ dst.x, dst.y, dst.w, dst.h);
+ ret = -EINVAL;
+
+ /* decimation validation */
+ } else if (deci_w || deci_h) {
+ if ((deci_w > psde->pipe_sblk->maxhdeciexp) ||
+ (deci_h > psde->pipe_sblk->maxvdeciexp)) {
+ SDE_ERROR_PLANE(psde,
+ "too much decimation requested\n");
+ ret = -EINVAL;
+ } else if (fmt->fetch_mode != SDE_FETCH_LINEAR) {
+ SDE_ERROR_PLANE(psde,
+ "decimation requires linear fetch\n");
+ ret = -EINVAL;
+ }
+
+ } else if (!(psde->features & SDE_SSPP_SCALER) &&
+ ((src.w != dst.w) || (src.h != dst.h))) {
+ SDE_ERROR_PLANE(psde,
+ "pipe doesn't support scaling %ux%u->%ux%u\n",
+ src.w, src.h, dst.w, dst.h);
+ ret = -EINVAL;
+
+ /* check decimated source width */
+ } else if (src_deci_w > max_linewidth) {
+ SDE_ERROR_PLANE(psde,
+ "invalid src w:%u, deci w:%u, line w:%u\n",
+ src.w, src_deci_w, max_linewidth);
+ ret = -E2BIG;
+
+ /* check max scaler capability */
+ } else if (((src_deci_w * max_upscale) < dst.w) ||
+ ((src_deci_h * max_upscale) < dst.h) ||
+ ((dst.w * max_downscale) < src_deci_w) ||
+ ((dst.h * max_downscale) < src_deci_h)) {
+ SDE_ERROR_PLANE(psde,
+ "too much scaling requested %ux%u->%ux%u\n",
+ src_deci_w, src_deci_h, dst.w, dst.h);
+ ret = -E2BIG;
+ }
+
+modeset_update:
+ if (!ret)
+ _sde_plane_atomic_check_mode_changed(psde, state, plane->state);
+exit:
+ return ret;
+}
+
+/**
+ * sde_plane_flush - final plane operations before commit flush
+ * @plane: Pointer to drm plane structure
+ */
+void sde_plane_flush(struct drm_plane *plane)
+{
+ struct sde_plane *psde;
+
+ if (!plane) {
+ SDE_ERROR("invalid plane\n");
+ return;
+ }
+
+ psde = to_sde_plane(plane);
+
+ /*
+ * These updates have to be done immediately before the plane flush
+ * timing, and may not be moved to the atomic_update/mode_set functions.
+ */
+ if (psde->is_error)
+ /* force white frame with 0% alpha pipe output on error */
+ _sde_plane_color_fill(psde, 0xFFFFFF, 0x0);
+ else if (psde->color_fill & SDE_PLANE_COLOR_FILL_FLAG)
+ /* force 100% alpha */
+ _sde_plane_color_fill(psde, psde->color_fill, 0xFF);
+ else if (psde->pipe_hw && psde->csc_ptr && psde->pipe_hw->ops.setup_csc)
+ psde->pipe_hw->ops.setup_csc(psde->pipe_hw, psde->csc_ptr);
+
+ /* flag h/w flush complete */
+ if (plane->state)
+ to_sde_plane_state(plane->state)->pending = false;
+}
+
+static void sde_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct sde_plane *psde;
+ struct drm_plane_state *state;
+ struct sde_plane_state *pstate;
+
+ if (!plane) {
+ SDE_ERROR("invalid plane\n");
+ return;
+ } else if (!plane->state) {
+ SDE_ERROR("invalid plane state\n");
+ return;
+ }
+
+ psde = to_sde_plane(plane);
+ psde->is_error = false;
+ state = plane->state;
+ pstate = to_sde_plane_state(state);
+
+ SDE_DEBUG_PLANE(psde, "\n");
+
+ if (!sde_plane_enabled(state)) {
+ pstate->pending = true;
+ } else {
+ int ret;
+
+ ret = _sde_plane_mode_set(plane, state);
+ /* atomic_check should have ensured that this doesn't fail */
+ WARN_ON(ret < 0);
+ }
+}
+
+
+/* helper to install properties which are common to planes and crtcs */
+static void _sde_plane_install_properties(struct drm_plane *plane,
+ struct sde_mdss_cfg *catalog)
+{
+ static const struct drm_prop_enum_list e_blend_op[] = {
+ {SDE_DRM_BLEND_OP_NOT_DEFINED, "not_defined"},
+ {SDE_DRM_BLEND_OP_OPAQUE, "opaque"},
+ {SDE_DRM_BLEND_OP_PREMULTIPLIED, "premultiplied"},
+ {SDE_DRM_BLEND_OP_COVERAGE, "coverage"}
+ };
+ static const struct drm_prop_enum_list e_src_config[] = {
+ {SDE_DRM_DEINTERLACE, "deinterlace"}
+ };
+ const struct sde_format_extended *format_list;
+ struct sde_kms_info *info;
+ struct sde_plane *psde = to_sde_plane(plane);
+ int zpos_max = 255;
+ int zpos_def = 0;
+ char feature_name[256];
+
+ if (!plane || !psde) {
+ SDE_ERROR("invalid plane\n");
+ return;
+ } else if (!psde->pipe_hw || !psde->pipe_sblk) {
+ SDE_ERROR("invalid plane, pipe_hw %d pipe_sblk %d\n",
+ psde->pipe_hw != 0, psde->pipe_sblk != 0);
+ return;
+ } else if (!catalog) {
+ SDE_ERROR("invalid catalog\n");
+ return;
+ }
+
+ if (sde_is_custom_client()) {
+ if (catalog->mixer_count && catalog->mixer &&
+ catalog->mixer[0].sblk->maxblendstages) {
+ zpos_max = catalog->mixer[0].sblk->maxblendstages - 1;
+ if (zpos_max > SDE_STAGE_MAX - SDE_STAGE_0 - 1)
+ zpos_max = SDE_STAGE_MAX - SDE_STAGE_0 - 1;
+ }
+ } else if (plane->type != DRM_PLANE_TYPE_PRIMARY) {
+ /* reserve zpos == 0 for primary planes */
+ zpos_def = drm_plane_index(plane) + 1;
+ }
+
+ msm_property_install_range(&psde->property_info, "zpos",
+ 0x0, 0, zpos_max, zpos_def, PLANE_PROP_ZPOS);
+
+ msm_property_install_range(&psde->property_info, "alpha",
+ 0x0, 0, 255, 255, PLANE_PROP_ALPHA);
+
+ /* linux default file descriptor range on each process */
+ msm_property_install_range(&psde->property_info, "input_fence",
+ 0x0, 0, INR_OPEN_MAX, 0, PLANE_PROP_INPUT_FENCE);
+
+ if (psde->pipe_sblk->maxhdeciexp) {
+ msm_property_install_range(&psde->property_info, "h_decimate",
+ 0x0, 0, psde->pipe_sblk->maxhdeciexp, 0,
+ PLANE_PROP_H_DECIMATE);
+ }
+
+ if (psde->pipe_sblk->maxvdeciexp) {
+ msm_property_install_range(&psde->property_info, "v_decimate",
+ 0x0, 0, psde->pipe_sblk->maxvdeciexp, 0,
+ PLANE_PROP_V_DECIMATE);
+ }
+
+ if (psde->features & BIT(SDE_SSPP_SCALER_QSEED3)) {
+ msm_property_install_volatile_range(&psde->property_info,
+ "scaler_v2", 0x0, 0, ~0, 0, PLANE_PROP_SCALER_V2);
+ msm_property_install_blob(&psde->property_info, "lut_ed", 0,
+ PLANE_PROP_SCALER_LUT_ED);
+ msm_property_install_blob(&psde->property_info, "lut_cir", 0,
+ PLANE_PROP_SCALER_LUT_CIR);
+ msm_property_install_blob(&psde->property_info, "lut_sep", 0,
+ PLANE_PROP_SCALER_LUT_SEP);
+ } else if (psde->features & SDE_SSPP_SCALER) {
+ msm_property_install_volatile_range(&psde->property_info,
+ "scaler_v1", 0x0, 0, ~0, 0, PLANE_PROP_SCALER_V1);
+ }
+
+ if (psde->features & BIT(SDE_SSPP_CSC)) {
+ msm_property_install_volatile_range(&psde->property_info,
+ "csc_v1", 0x0, 0, ~0, 0, PLANE_PROP_CSC_V1);
+ }
+
+ if (psde->features & BIT(SDE_SSPP_HSIC)) {
+ snprintf(feature_name, sizeof(feature_name), "%s%d",
+ "SDE_SSPP_HUE_V",
+ psde->pipe_sblk->hsic_blk.version >> 16);
+ msm_property_install_range(&psde->property_info,
+ feature_name, 0, 0, 0xFFFFFFFF, 0,
+ PLANE_PROP_HUE_ADJUST);
+ snprintf(feature_name, sizeof(feature_name), "%s%d",
+ "SDE_SSPP_SATURATION_V",
+ psde->pipe_sblk->hsic_blk.version >> 16);
+ msm_property_install_range(&psde->property_info,
+ feature_name, 0, 0, 0xFFFFFFFF, 0,
+ PLANE_PROP_SATURATION_ADJUST);
+ snprintf(feature_name, sizeof(feature_name), "%s%d",
+ "SDE_SSPP_VALUE_V",
+ psde->pipe_sblk->hsic_blk.version >> 16);
+ msm_property_install_range(&psde->property_info,
+ feature_name, 0, 0, 0xFFFFFFFF, 0,
+ PLANE_PROP_VALUE_ADJUST);
+ snprintf(feature_name, sizeof(feature_name), "%s%d",
+ "SDE_SSPP_CONTRAST_V",
+ psde->pipe_sblk->hsic_blk.version >> 16);
+ msm_property_install_range(&psde->property_info,
+ feature_name, 0, 0, 0xFFFFFFFF, 0,
+ PLANE_PROP_CONTRAST_ADJUST);
+ }
+
+ /* standard properties */
+ msm_property_install_rotation(&psde->property_info,
+ BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y), PLANE_PROP_ROTATION);
+
+ msm_property_install_enum(&psde->property_info, "blend_op", 0x0, 0,
+ e_blend_op, ARRAY_SIZE(e_blend_op), PLANE_PROP_BLEND_OP);
+
+ msm_property_install_enum(&psde->property_info, "src_config", 0x0, 1,
+ e_src_config, ARRAY_SIZE(e_src_config), PLANE_PROP_SRC_CONFIG);
+
+ if (psde->pipe_hw->ops.setup_solidfill)
+ msm_property_install_range(&psde->property_info, "color_fill",
+ 0, 0, 0xFFFFFFFF, 0, PLANE_PROP_COLOR_FILL);
+
+ info = kzalloc(sizeof(struct sde_kms_info), GFP_KERNEL);
+ if (!info) {
+ SDE_ERROR("failed to allocate info memory\n");
+ return;
+ }
+
+ msm_property_install_blob(&psde->property_info, "capabilities",
+ DRM_MODE_PROP_IMMUTABLE, PLANE_PROP_INFO);
+ sde_kms_info_reset(info);
+
+ format_list = psde->pipe_sblk->format_list;
+ if (format_list) {
+ sde_kms_info_start(info, "pixel_formats");
+ while (format_list->fourcc_format) {
+ sde_kms_info_append_format(info,
+ format_list->fourcc_format,
+ format_list->modifier);
+ ++format_list;
+ }
+ sde_kms_info_stop(info);
+ }
+
+ sde_kms_info_add_keyint(info, "max_linewidth",
+ psde->pipe_sblk->maxlinewidth);
+ sde_kms_info_add_keyint(info, "max_upscale",
+ psde->pipe_sblk->maxupscale);
+ sde_kms_info_add_keyint(info, "max_downscale",
+ psde->pipe_sblk->maxdwnscale);
+ sde_kms_info_add_keyint(info, "max_horizontal_deci",
+ psde->pipe_sblk->maxhdeciexp);
+ sde_kms_info_add_keyint(info, "max_vertical_deci",
+ psde->pipe_sblk->maxvdeciexp);
+ msm_property_set_blob(&psde->property_info, &psde->blob_info,
+ info->data, info->len, PLANE_PROP_INFO);
+
+ kfree(info);
+
+ if (psde->features & BIT(SDE_SSPP_MEMCOLOR)) {
+ snprintf(feature_name, sizeof(feature_name), "%s%d",
+ "SDE_SSPP_SKIN_COLOR_V",
+ psde->pipe_sblk->memcolor_blk.version >> 16);
+ msm_property_install_blob(&psde->property_info, feature_name, 0,
+ PLANE_PROP_SKIN_COLOR);
+ snprintf(feature_name, sizeof(feature_name), "%s%d",
+ "SDE_SSPP_SKY_COLOR_V",
+ psde->pipe_sblk->memcolor_blk.version >> 16);
+ msm_property_install_blob(&psde->property_info, feature_name, 0,
+ PLANE_PROP_SKY_COLOR);
+ snprintf(feature_name, sizeof(feature_name), "%s%d",
+ "SDE_SSPP_FOLIAGE_COLOR_V",
+ psde->pipe_sblk->memcolor_blk.version >> 16);
+ msm_property_install_blob(&psde->property_info, feature_name, 0,
+ PLANE_PROP_FOLIAGE_COLOR);
+ }
+}
+
+static inline void _sde_plane_set_csc_v1(struct sde_plane *psde, void *usr_ptr)
+{
+ struct sde_drm_csc_v1 csc_v1;
+ int i;
+
+ if (!psde) {
+ SDE_ERROR("invalid plane\n");
+ return;
+ }
+
+ psde->csc_usr_ptr = NULL;
+ if (!usr_ptr) {
+ SDE_DEBUG_PLANE(psde, "csc data removed\n");
+ return;
+ }
+
+ if (copy_from_user(&csc_v1, usr_ptr, sizeof(csc_v1))) {
+ SDE_ERROR_PLANE(psde, "failed to copy csc data\n");
+ return;
+ }
+
+ /* populate from user space */
+ for (i = 0; i < SDE_CSC_MATRIX_COEFF_SIZE; ++i)
+ psde->csc_cfg.csc_mv[i] = csc_v1.ctm_coeff[i] >> 16;
+ for (i = 0; i < SDE_CSC_BIAS_SIZE; ++i) {
+ psde->csc_cfg.csc_pre_bv[i] = csc_v1.pre_bias[i];
+ psde->csc_cfg.csc_post_bv[i] = csc_v1.post_bias[i];
+ }
+ for (i = 0; i < SDE_CSC_CLAMP_SIZE; ++i) {
+ psde->csc_cfg.csc_pre_lv[i] = csc_v1.pre_clamp[i];
+ psde->csc_cfg.csc_post_lv[i] = csc_v1.post_clamp[i];
+ }
+ psde->csc_usr_ptr = &psde->csc_cfg;
+}
+
+static inline void _sde_plane_set_scaler_v1(struct sde_plane *psde, void *usr)
+{
+ struct sde_drm_scaler_v1 scale_v1;
+ struct sde_hw_pixel_ext *pe;
+ int i;
+
+ if (!psde) {
+ SDE_ERROR("invalid plane\n");
+ return;
+ }
+
+ psde->pixel_ext_usr = false;
+ if (!usr) {
+ SDE_DEBUG_PLANE(psde, "scale data removed\n");
+ return;
+ }
+
+ if (copy_from_user(&scale_v1, usr, sizeof(scale_v1))) {
+ SDE_ERROR_PLANE(psde, "failed to copy scale data\n");
+ return;
+ }
+
+ /* populate from user space */
+ pe = &(psde->pixel_ext);
+ memset(pe, 0, sizeof(struct sde_hw_pixel_ext));
+ for (i = 0; i < SDE_MAX_PLANES; i++) {
+ pe->init_phase_x[i] = scale_v1.init_phase_x[i];
+ pe->phase_step_x[i] = scale_v1.phase_step_x[i];
+ pe->init_phase_y[i] = scale_v1.init_phase_y[i];
+ pe->phase_step_y[i] = scale_v1.phase_step_y[i];
+
+ pe->horz_filter[i] = scale_v1.horz_filter[i];
+ pe->vert_filter[i] = scale_v1.vert_filter[i];
+ }
+ for (i = 0; i < SDE_MAX_PLANES; i++) {
+ pe->left_ftch[i] = scale_v1.pe.left_ftch[i];
+ pe->right_ftch[i] = scale_v1.pe.right_ftch[i];
+ pe->left_rpt[i] = scale_v1.pe.left_rpt[i];
+ pe->right_rpt[i] = scale_v1.pe.right_rpt[i];
+ pe->roi_w[i] = scale_v1.pe.num_ext_pxls_lr[i];
+
+ pe->top_ftch[i] = scale_v1.pe.top_ftch[i];
+ pe->btm_ftch[i] = scale_v1.pe.btm_ftch[i];
+ pe->top_rpt[i] = scale_v1.pe.top_rpt[i];
+ pe->btm_rpt[i] = scale_v1.pe.btm_rpt[i];
+ pe->roi_h[i] = scale_v1.pe.num_ext_pxls_tb[i];
+ }
+
+ psde->pixel_ext_usr = true;
+
+ SDE_DEBUG_PLANE(psde, "user property data copied\n");
+}
+
+static inline void _sde_plane_set_scaler_v2(struct sde_plane *psde,
+ struct sde_plane_state *pstate, void *usr)
+{
+ struct sde_drm_scaler_v2 scale_v2;
+ struct sde_hw_pixel_ext *pe;
+ int i;
+ struct sde_hw_scaler3_cfg *cfg;
+
+ if (!psde) {
+ SDE_ERROR("invalid plane\n");
+ return;
+ }
+
+ cfg = psde->scaler3_cfg;
+ psde->pixel_ext_usr = false;
+ if (!usr) {
+ SDE_DEBUG_PLANE(psde, "scale data removed\n");
+ return;
+ }
+
+ if (copy_from_user(&scale_v2, usr, sizeof(scale_v2))) {
+ SDE_ERROR_PLANE(psde, "failed to copy scale data\n");
+ return;
+ }
+
+ /* populate from user space */
+ pe = &(psde->pixel_ext);
+ memset(pe, 0, sizeof(struct sde_hw_pixel_ext));
+ cfg->enable = scale_v2.enable;
+ cfg->dir_en = scale_v2.dir_en;
+ for (i = 0; i < SDE_MAX_PLANES; i++) {
+ cfg->init_phase_x[i] = scale_v2.init_phase_x[i];
+ cfg->phase_step_x[i] = scale_v2.phase_step_x[i];
+ cfg->init_phase_y[i] = scale_v2.init_phase_y[i];
+ cfg->phase_step_y[i] = scale_v2.phase_step_y[i];
+
+ cfg->preload_x[i] = scale_v2.preload_x[i];
+ cfg->preload_y[i] = scale_v2.preload_y[i];
+ cfg->src_width[i] = scale_v2.src_width[i];
+ cfg->src_height[i] = scale_v2.src_height[i];
+ }
+ cfg->dst_width = scale_v2.dst_width;
+ cfg->dst_height = scale_v2.dst_height;
+
+ cfg->y_rgb_filter_cfg = scale_v2.y_rgb_filter_cfg;
+ cfg->uv_filter_cfg = scale_v2.uv_filter_cfg;
+ cfg->alpha_filter_cfg = scale_v2.alpha_filter_cfg;
+ cfg->blend_cfg = scale_v2.blend_cfg;
+
+ cfg->lut_flag = scale_v2.lut_flag;
+ cfg->dir_lut_idx = scale_v2.dir_lut_idx;
+ cfg->y_rgb_cir_lut_idx = scale_v2.y_rgb_cir_lut_idx;
+ cfg->uv_cir_lut_idx = scale_v2.uv_cir_lut_idx;
+ cfg->y_rgb_sep_lut_idx = scale_v2.y_rgb_sep_lut_idx;
+ cfg->uv_sep_lut_idx = scale_v2.uv_sep_lut_idx;
+
+ cfg->de.enable = scale_v2.de.enable;
+ cfg->de.sharpen_level1 = scale_v2.de.sharpen_level1;
+ cfg->de.sharpen_level2 = scale_v2.de.sharpen_level2;
+ cfg->de.clip = scale_v2.de.clip;
+ cfg->de.limit = scale_v2.de.limit;
+ cfg->de.thr_quiet = scale_v2.de.thr_quiet;
+ cfg->de.thr_dieout = scale_v2.de.thr_dieout;
+ cfg->de.thr_low = scale_v2.de.thr_low;
+ cfg->de.thr_high = scale_v2.de.thr_high;
+ cfg->de.prec_shift = scale_v2.de.prec_shift;
+ for (i = 0; i < SDE_MAX_DE_CURVES; i++) {
+ cfg->de.adjust_a[i] = scale_v2.de.adjust_a[i];
+ cfg->de.adjust_b[i] = scale_v2.de.adjust_b[i];
+ cfg->de.adjust_c[i] = scale_v2.de.adjust_c[i];
+ }
+ for (i = 0; i < SDE_MAX_PLANES; i++) {
+ pe->left_ftch[i] = scale_v2.pe.left_ftch[i];
+ pe->right_ftch[i] = scale_v2.pe.right_ftch[i];
+ pe->left_rpt[i] = scale_v2.pe.left_rpt[i];
+ pe->right_rpt[i] = scale_v2.pe.right_rpt[i];
+ pe->roi_w[i] = scale_v2.pe.num_ext_pxls_lr[i];
+
+ pe->top_ftch[i] = scale_v2.pe.top_ftch[i];
+ pe->btm_ftch[i] = scale_v2.pe.btm_ftch[i];
+ pe->top_rpt[i] = scale_v2.pe.top_rpt[i];
+ pe->btm_rpt[i] = scale_v2.pe.btm_rpt[i];
+ pe->roi_h[i] = scale_v2.pe.num_ext_pxls_tb[i];
+ }
+ psde->pixel_ext_usr = true;
+
+ SDE_DEBUG_PLANE(psde, "user property data copied\n");
+}
+
+static int sde_plane_atomic_set_property(struct drm_plane *plane,
+ struct drm_plane_state *state, struct drm_property *property,
+ uint64_t val)
+{
+ struct sde_plane *psde = plane ? to_sde_plane(plane) : NULL;
+ struct sde_plane_state *pstate;
+ int idx, ret = -EINVAL;
+
+ SDE_DEBUG_PLANE(psde, "\n");
+
+ if (!plane) {
+ SDE_ERROR("invalid plane\n");
+ } else if (!state) {
+ SDE_ERROR_PLANE(psde, "invalid state\n");
+ } else {
+ pstate = to_sde_plane_state(state);
+ ret = msm_property_atomic_set(&psde->property_info,
+ pstate->property_values, pstate->property_blobs,
+ property, val);
+ if (!ret) {
+ idx = msm_property_index(&psde->property_info,
+ property);
+ switch (idx) {
+ case PLANE_PROP_INPUT_FENCE:
+ _sde_plane_set_input_fence(psde, pstate, val);
+ break;
+ case PLANE_PROP_CSC_V1:
+ _sde_plane_set_csc_v1(psde, (void *)val);
+ break;
+ case PLANE_PROP_SCALER_V1:
+ _sde_plane_set_scaler_v1(psde, (void *)val);
+ break;
+ case PLANE_PROP_SCALER_V2:
+ _sde_plane_set_scaler_v2(psde, pstate,
+ (void *)val);
+ break;
+ default:
+ /* nothing to do */
+ break;
+ }
+ }
+ }
+
+ return ret;
+}
+
+static int sde_plane_set_property(struct drm_plane *plane,
+ struct drm_property *property, uint64_t val)
+{
+ SDE_DEBUG("\n");
+
+ return sde_plane_atomic_set_property(plane,
+ plane->state, property, val);
+}
+
+static int sde_plane_atomic_get_property(struct drm_plane *plane,
+ const struct drm_plane_state *state,
+ struct drm_property *property, uint64_t *val)
+{
+ struct sde_plane *psde = plane ? to_sde_plane(plane) : NULL;
+ struct sde_plane_state *pstate;
+ int ret = -EINVAL;
+
+ if (!plane) {
+ SDE_ERROR("invalid plane\n");
+ } else if (!state) {
+ SDE_ERROR("invalid state\n");
+ } else {
+ SDE_DEBUG_PLANE(psde, "\n");
+ pstate = to_sde_plane_state(state);
+ ret = msm_property_atomic_get(&psde->property_info,
+ pstate->property_values, pstate->property_blobs,
+ property, val);
+ }
+
+ return ret;
+}
+
+static void sde_plane_destroy(struct drm_plane *plane)
+{
+ struct sde_plane *psde = plane ? to_sde_plane(plane) : NULL;
+
+ SDE_DEBUG_PLANE(psde, "\n");
+
+ if (psde) {
+ _sde_plane_set_qos_ctrl(plane, false, SDE_PLANE_QOS_PANIC_CTRL);
+
+ debugfs_remove_recursive(psde->debugfs_root);
+
+ if (psde->blob_info)
+ drm_property_unreference_blob(psde->blob_info);
+ msm_property_destroy(&psde->property_info);
+ mutex_destroy(&psde->lock);
+
+ drm_plane_helper_disable(plane);
+
+ /* this will destroy the states as well */
+ drm_plane_cleanup(plane);
+
+ if (psde->pipe_hw)
+ sde_hw_sspp_destroy(psde->pipe_hw);
+
+ kfree(psde);
+ }
+}
+
+static void sde_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct sde_plane *psde;
+ struct sde_plane_state *pstate;
+
+ if (!plane || !state) {
+ SDE_ERROR("invalid arg(s), plane %d state %d\n",
+ plane != 0, state != 0);
+ return;
+ }
+
+ psde = to_sde_plane(plane);
+ pstate = to_sde_plane_state(state);
+
+ SDE_DEBUG_PLANE(psde, "\n");
+
+ /* remove ref count for frame buffers */
+ if (state->fb)
+ drm_framebuffer_unreference(state->fb);
+
+ /* remove ref count for fence */
+ if (pstate->input_fence)
+ sde_sync_put(pstate->input_fence);
+
+ /* destroy value helper */
+ msm_property_destroy_state(&psde->property_info, pstate,
+ pstate->property_values, pstate->property_blobs);
+}
+
+static struct drm_plane_state *
+sde_plane_duplicate_state(struct drm_plane *plane)
+{
+ struct sde_plane *psde;
+ struct sde_plane_state *pstate;
+ struct sde_plane_state *old_state;
+ uint64_t input_fence_default;
+
+ if (!plane) {
+ SDE_ERROR("invalid plane\n");
+ return NULL;
+ } else if (!plane->state) {
+ SDE_ERROR("invalid plane state\n");
+ return NULL;
+ }
+
+ old_state = to_sde_plane_state(plane->state);
+ psde = to_sde_plane(plane);
+ pstate = msm_property_alloc_state(&psde->property_info);
+ if (!pstate) {
+ SDE_ERROR_PLANE(psde, "failed to allocate state\n");
+ return NULL;
+ }
+
+ SDE_DEBUG_PLANE(psde, "\n");
+
+ /* duplicate value helper */
+ msm_property_duplicate_state(&psde->property_info, old_state, pstate,
+ pstate->property_values, pstate->property_blobs);
+
+ /* add ref count for frame buffer */
+ if (pstate->base.fb)
+ drm_framebuffer_reference(pstate->base.fb);
+
+ /* clear out any input fence */
+ pstate->input_fence = 0;
+ input_fence_default = msm_property_get_default(
+ &psde->property_info, PLANE_PROP_INPUT_FENCE);
+ msm_property_set_property(&psde->property_info, pstate->property_values,
+ PLANE_PROP_INPUT_FENCE, input_fence_default);
+
+ pstate->dirty = 0x0;
+ pstate->pending = false;
+
+ return &pstate->base;
+}
+
+static void sde_plane_reset(struct drm_plane *plane)
+{
+ struct sde_plane *psde;
+ struct sde_plane_state *pstate;
+
+ if (!plane) {
+ SDE_ERROR("invalid plane\n");
+ return;
+ }
+
+ psde = to_sde_plane(plane);
+ SDE_DEBUG_PLANE(psde, "\n");
+
+ /* remove previous state, if present */
+ if (plane->state) {
+ sde_plane_destroy_state(plane, plane->state);
+ plane->state = 0;
+ }
+
+ pstate = msm_property_alloc_state(&psde->property_info);
+ if (!pstate) {
+ SDE_ERROR_PLANE(psde, "failed to allocate state\n");
+ return;
+ }
+
+ /* reset value helper */
+ msm_property_reset_state(&psde->property_info, pstate,
+ pstate->property_values, pstate->property_blobs);
+
+ pstate->base.plane = plane;
+
+ plane->state = &pstate->base;
+}
+
+static const struct drm_plane_funcs sde_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = sde_plane_destroy,
+ .set_property = sde_plane_set_property,
+ .atomic_set_property = sde_plane_atomic_set_property,
+ .atomic_get_property = sde_plane_atomic_get_property,
+ .reset = sde_plane_reset,
+ .atomic_duplicate_state = sde_plane_duplicate_state,
+ .atomic_destroy_state = sde_plane_destroy_state,
+};
+
+static const struct drm_plane_helper_funcs sde_plane_helper_funcs = {
+ .prepare_fb = sde_plane_prepare_fb,
+ .cleanup_fb = sde_plane_cleanup_fb,
+ .atomic_check = sde_plane_atomic_check,
+ .atomic_update = sde_plane_atomic_update,
+};
+
+enum sde_sspp sde_plane_pipe(struct drm_plane *plane)
+{
+ return plane ? to_sde_plane(plane)->pipe : SSPP_NONE;
+}
+
+static ssize_t _sde_plane_danger_read(struct file *file,
+ char __user *buff, size_t count, loff_t *ppos)
+{
+ struct sde_kms *kms = file->private_data;
+ struct sde_mdss_cfg *cfg = kms->catalog;
+ int len = 0;
+ char buf[40] = {'\0'};
+
+ if (!cfg)
+ return -ENODEV;
+
+ if (*ppos)
+ return 0; /* the end */
+
+ len = snprintf(buf, sizeof(buf), "%d\n", !kms->has_danger_ctrl);
+ if (len < 0 || len >= sizeof(buf))
+ return 0;
+
+ if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
+ return -EFAULT;
+
+ *ppos += len; /* increase offset */
+
+ return len;
+}
+
+static void _sde_plane_set_danger_state(struct sde_kms *kms, bool enable)
+{
+ struct drm_plane *plane;
+
+ drm_for_each_plane(plane, kms->dev) {
+ if (plane->fb && plane->state) {
+ sde_plane_danger_signal_ctrl(plane, enable);
+ SDE_DEBUG("plane:%d img:%dx%d ",
+ plane->base.id, plane->fb->width,
+ plane->fb->height);
+ SDE_DEBUG("src[%d,%d,%d,%d] dst[%d,%d,%d,%d]\n",
+ plane->state->src_x >> 16,
+ plane->state->src_y >> 16,
+ plane->state->src_w >> 16,
+ plane->state->src_h >> 16,
+ plane->state->crtc_x, plane->state->crtc_y,
+ plane->state->crtc_w, plane->state->crtc_h);
+ } else {
+ SDE_DEBUG("Inactive plane:%d\n", plane->base.id);
+ }
+ }
+}
+
+static ssize_t _sde_plane_danger_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct sde_kms *kms = file->private_data;
+ struct sde_mdss_cfg *cfg = kms->catalog;
+ int disable_panic;
+ char buf[10];
+
+ if (!cfg)
+ return -EFAULT;
+
+ if (count >= sizeof(buf))
+ return -EFAULT;
+
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+
+ buf[count] = 0; /* end of string */
+
+ if (kstrtoint(buf, 0, &disable_panic))
+ return -EFAULT;
+
+ if (disable_panic) {
+ /* Disable panic signal for all active pipes */
+ SDE_DEBUG("Disabling danger:\n");
+ _sde_plane_set_danger_state(kms, false);
+ kms->has_danger_ctrl = false;
+ } else {
+ /* Enable panic signal for all active pipes */
+ SDE_DEBUG("Enabling danger:\n");
+ kms->has_danger_ctrl = true;
+ _sde_plane_set_danger_state(kms, true);
+ }
+
+ return count;
+}
+
+static const struct file_operations sde_plane_danger_enable = {
+ .open = simple_open,
+ .read = _sde_plane_danger_read,
+ .write = _sde_plane_danger_write,
+};
+
+static void _sde_plane_init_debugfs(struct sde_plane *psde, struct sde_kms *kms)
+{
+ const struct sde_sspp_sub_blks *sblk = 0;
+ const struct sde_sspp_cfg *cfg = 0;
+
+ if (psde && psde->pipe_hw)
+ cfg = psde->pipe_hw->cap;
+ if (cfg)
+ sblk = cfg->sblk;
+
+ if (kms && sblk) {
+ /* create overall sub-directory for the pipe */
+ psde->debugfs_root =
+ debugfs_create_dir(psde->pipe_name,
+ sde_debugfs_get_root(kms));
+ if (psde->debugfs_root) {
+ /* don't error check these */
+ debugfs_create_x32("features", S_IRUGO | S_IWUSR,
+ psde->debugfs_root, &psde->features);
+
+ /* add register dump support */
+ sde_debugfs_setup_regset32(&psde->debugfs_src,
+ sblk->src_blk.base + cfg->base,
+ sblk->src_blk.len,
+ kms);
+ sde_debugfs_create_regset32("src_blk", S_IRUGO,
+ psde->debugfs_root, &psde->debugfs_src);
+
+ sde_debugfs_setup_regset32(&psde->debugfs_scaler,
+ sblk->scaler_blk.base + cfg->base,
+ sblk->scaler_blk.len,
+ kms);
+ sde_debugfs_create_regset32("scaler_blk", S_IRUGO,
+ psde->debugfs_root,
+ &psde->debugfs_scaler);
+
+ sde_debugfs_setup_regset32(&psde->debugfs_csc,
+ sblk->csc_blk.base + cfg->base,
+ sblk->csc_blk.len,
+ kms);
+ sde_debugfs_create_regset32("csc_blk", S_IRUGO,
+ psde->debugfs_root, &psde->debugfs_csc);
+
+ debugfs_create_u32("xin_id",
+ S_IRUGO,
+ psde->debugfs_root,
+ (u32 *) &cfg->xin_id);
+ debugfs_create_u32("clk_ctrl",
+ S_IRUGO,
+ psde->debugfs_root,
+ (u32 *) &cfg->clk_ctrl);
+ debugfs_create_x32("creq_vblank",
+ S_IRUGO | S_IWUSR,
+ psde->debugfs_root,
+ (u32 *) &sblk->creq_vblank);
+ debugfs_create_x32("danger_vblank",
+ S_IRUGO | S_IWUSR,
+ psde->debugfs_root,
+ (u32 *) &sblk->danger_vblank);
+
+ debugfs_create_file("disable_danger",
+ S_IRUGO | S_IWUSR,
+ psde->debugfs_root,
+ kms, &sde_plane_danger_enable);
+ }
+ }
+}
+
+/* initialize plane */
+struct drm_plane *sde_plane_init(struct drm_device *dev,
+ uint32_t pipe, bool primary_plane,
+ unsigned long possible_crtcs)
+{
+ struct drm_plane *plane = NULL;
+ struct sde_plane *psde;
+ struct msm_drm_private *priv;
+ struct sde_kms *kms;
+ enum drm_plane_type type;
+ int ret = -EINVAL;
+
+ if (!dev) {
+ SDE_ERROR("[%u]device is NULL\n", pipe);
+ goto exit;
+ }
+
+ priv = dev->dev_private;
+ if (!priv) {
+ SDE_ERROR("[%u]private data is NULL\n", pipe);
+ goto exit;
+ }
+
+ if (!priv->kms) {
+ SDE_ERROR("[%u]invalid KMS reference\n", pipe);
+ goto exit;
+ }
+ kms = to_sde_kms(priv->kms);
+
+ if (!kms->catalog) {
+ SDE_ERROR("[%u]invalid catalog reference\n", pipe);
+ goto exit;
+ }
+
+ /* create and zero local structure */
+ psde = kzalloc(sizeof(*psde), GFP_KERNEL);
+ if (!psde) {
+ SDE_ERROR("[%u]failed to allocate local plane struct\n", pipe);
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ /* cache local stuff for later */
+ plane = &psde->base;
+ psde->pipe = pipe;
+ psde->mmu_id = kms->mmu_id[MSM_SMMU_DOMAIN_UNSECURE];
+
+ /* initialize underlying h/w driver */
+ psde->pipe_hw = sde_hw_sspp_init(pipe, kms->mmio, kms->catalog);
+ if (IS_ERR(psde->pipe_hw)) {
+ SDE_ERROR("[%u]SSPP init failed\n", pipe);
+ ret = PTR_ERR(psde->pipe_hw);
+ goto clean_plane;
+ } else if (!psde->pipe_hw->cap || !psde->pipe_hw->cap->sblk) {
+ SDE_ERROR("[%u]SSPP init returned invalid cfg\n", pipe);
+ goto clean_sspp;
+ }
+
+ /* cache features mask for later */
+ psde->features = psde->pipe_hw->cap->features;
+ psde->pipe_sblk = psde->pipe_hw->cap->sblk;
+ if (!psde->pipe_sblk) {
+ SDE_ERROR("[%u]invalid sblk\n", pipe);
+ goto clean_sspp;
+ }
+
+ if (psde->features & BIT(SDE_SSPP_SCALER_QSEED3)) {
+ psde->scaler3_cfg = kzalloc(sizeof(struct sde_hw_scaler3_cfg),
+ GFP_KERNEL);
+ if (!psde->scaler3_cfg) {
+ SDE_ERROR("[%u]failed to allocate scale struct\n",
+ pipe);
+ ret = -ENOMEM;
+ goto clean_sspp;
+ }
+ }
+
+ /* add plane to DRM framework */
+ psde->nformats = sde_populate_formats(psde->pipe_sblk->format_list,
+ psde->formats,
+ 0,
+ ARRAY_SIZE(psde->formats));
+
+ if (!psde->nformats) {
+ SDE_ERROR("[%u]no valid formats for plane\n", pipe);
+ goto clean_sspp;
+ }
+
+ if (psde->features & BIT(SDE_SSPP_CURSOR))
+ type = DRM_PLANE_TYPE_CURSOR;
+ else if (primary_plane)
+ type = DRM_PLANE_TYPE_PRIMARY;
+ else
+ type = DRM_PLANE_TYPE_OVERLAY;
+ ret = drm_universal_plane_init(dev, plane, possible_crtcs,
+ &sde_plane_funcs, psde->formats, psde->nformats, type);
+ if (ret)
+ goto clean_sspp;
+
+ /* success! finalize initialization */
+ drm_plane_helper_add(plane, &sde_plane_helper_funcs);
+
+ msm_property_init(&psde->property_info, &plane->base, dev,
+ priv->plane_property, psde->property_data,
+ PLANE_PROP_COUNT, PLANE_PROP_BLOBCOUNT,
+ sizeof(struct sde_plane_state));
+
+ _sde_plane_install_properties(plane, kms->catalog);
+
+ /* save user friendly pipe name for later */
+ snprintf(psde->pipe_name, SDE_NAME_SIZE, "plane%u", plane->base.id);
+
+ mutex_init(&psde->lock);
+
+ _sde_plane_init_debugfs(psde, kms);
+
+ DRM_INFO("%s created for pipe %u\n", psde->pipe_name, pipe);
+ return plane;
+
+clean_sspp:
+ if (psde && psde->pipe_hw)
+ sde_hw_sspp_destroy(psde->pipe_hw);
+
+ if (psde && psde->scaler3_cfg)
+ kfree(psde->scaler3_cfg);
+clean_plane:
+ kfree(psde);
+exit:
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.h b/drivers/gpu/drm/msm/sde/sde_plane.h
new file mode 100644
index 000000000000..1514f633c61e
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_plane.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _SDE_PLANE_H_
+#define _SDE_PLANE_H_
+
+#include <drm/drm_crtc.h>
+
+#include "msm_prop.h"
+#include "sde_hw_mdss.h"
+
+/**
+ * struct sde_plane_state: Define sde extension of drm plane state object
+ * @base: base drm plane state object
+ * @property_values: cached plane property values
+ * @property_blobs: blob properties
+ * @input_fence: dereferenced input fence pointer
+ * @stage: assigned by crtc blender
+ * @dirty: bitmask for which pipe h/w config functions need to be updated
+ * @pending: whether the current update is still pending
+ */
+struct sde_plane_state {
+ struct drm_plane_state base;
+ uint64_t property_values[PLANE_PROP_COUNT];
+ struct drm_property_blob *property_blobs[PLANE_PROP_BLOBCOUNT];
+ void *input_fence;
+ enum sde_stage stage;
+ uint32_t dirty;
+ bool pending;
+};
+
+#define to_sde_plane_state(x) \
+ container_of(x, struct sde_plane_state, base)
+
+/**
+ * sde_plane_get_property - Query integer value of plane property
+ * @S: Pointer to plane state
+ * @X: Property index, from enum msm_mdp_plane_property
+ * Returns: Integer value of requested property
+ */
+#define sde_plane_get_property(S, X) \
+ ((S) && ((X) < PLANE_PROP_COUNT) ? ((S)->property_values[(X)]) : 0)
+
+/**
+ * sde_plane_pipe - return sspp identifier for the given plane
+ * @plane: Pointer to DRM plane object
+ * Returns: sspp identifier of the given plane
+ */
+enum sde_sspp sde_plane_pipe(struct drm_plane *plane);
+
+/**
+ * sde_plane_flush - final plane operations before commit flush
+ * @plane: Pointer to drm plane structure
+ */
+void sde_plane_flush(struct drm_plane *plane);
+
+/**
+ * sde_plane_init - create new sde plane for the given pipe
+ * @dev: Pointer to DRM device
+ * @pipe: sde hardware pipe identifier
+ * @primary_plane: true if this pipe is primary plane for crtc
+ * @possible_crtcs: bitmask of crtc that can be attached to the given pipe
+ */
+struct drm_plane *sde_plane_init(struct drm_device *dev,
+ uint32_t pipe, bool primary_plane,
+ unsigned long possible_crtcs);
+
+/**
+ * sde_plane_wait_input_fence - wait for input fence object
+ * @plane: Pointer to DRM plane object
+ * @wait_ms: Wait timeout value
+ * Returns: Zero on success
+ */
+int sde_plane_wait_input_fence(struct drm_plane *plane, uint32_t wait_ms);
+
+/**
+ * sde_plane_color_fill - enables color fill on plane
+ * @plane: Pointer to DRM plane object
+ * @color: RGB fill color value, [23..16] Blue, [15..8] Green, [7..0] Red
+ * @alpha: 8-bit fill alpha value, 255 selects 100% alpha
+ * Returns: 0 on success
+ */
+int sde_plane_color_fill(struct drm_plane *plane,
+ uint32_t color, uint32_t alpha);
+
+#endif /* _SDE_PLANE_H_ */
diff --git a/drivers/gpu/drm/msm/sde/sde_rm.c b/drivers/gpu/drm/msm/sde/sde_rm.c
new file mode 100644
index 000000000000..1d27b27d265c
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_rm.c
@@ -0,0 +1,1262 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "[drm:%s] " fmt, __func__
+#include "sde_kms.h"
+#include "sde_hw_lm.h"
+#include "sde_hw_ctl.h"
+#include "sde_hw_cdm.h"
+#include "sde_hw_dspp.h"
+#include "sde_hw_pingpong.h"
+#include "sde_hw_intf.h"
+#include "sde_hw_wb.h"
+#include "sde_encoder.h"
+#include "sde_connector.h"
+
+#define RESERVED_BY_OTHER(h, r) \
+ ((h)->rsvp && ((h)->rsvp->enc_id != (r)->enc_id))
+
+#define RM_RQ_LOCK(r) ((r)->top_ctrl & BIT(SDE_RM_TOPCTL_RESERVE_LOCK))
+#define RM_RQ_CLEAR(r) ((r)->top_ctrl & BIT(SDE_RM_TOPCTL_RESERVE_CLEAR))
+#define RM_RQ_DSPP(r) ((r)->top_ctrl & BIT(SDE_RM_TOPCTL_DSPP))
+#define RM_RQ_PPSPLIT(r) ((r)->top_ctrl & BIT(SDE_RM_TOPCTL_PPSPLIT))
+#define RM_RQ_FORCE_TILING(r) ((r)->top_ctrl & BIT(SDE_RM_TOPCTL_FORCE_TILING))
+
+/**
+ * struct sde_rm_requirements - Reservation requirements parameter bundle
+ * @top_name: DRM<->HW topology use case user is trying to enable
+ * @dspp: Whether the user requires a DSPP
+ * @num_lm: Number of layer mixers needed in the use case
+ * @hw_res: Hardware resources required as reported by the encoders
+ */
+struct sde_rm_requirements {
+ enum sde_rm_topology_name top_name;
+ uint64_t top_ctrl;
+ int num_lm;
+ int num_ctl;
+ bool needs_split_display;
+ struct sde_encoder_hw_resources hw_res;
+};
+
+/**
+ * struct sde_rm_rsvp - Use Case Reservation tagging structure
+ * Used to tag HW blocks as reserved by a CRTC->Encoder->Connector chain
+ * By using as a tag, rather than lists of pointers to HW blocks used
+ * we can avoid some list management since we don't know how many blocks
+ * of each type a given use case may require.
+ * @list: List head for list of all reservations
+ * @seq: Global RSVP sequence number for debugging, especially for
+ * differentiating differenct allocations for same encoder.
+ * @enc_id: Reservations are tracked by Encoder DRM object ID.
+ * CRTCs may be connected to multiple Encoders.
+ * An encoder or connector id identifies the display path.
+ * @topology DRM<->HW topology use case
+ */
+struct sde_rm_rsvp {
+ struct list_head list;
+ uint32_t seq;
+ uint32_t enc_id;
+ enum sde_rm_topology_name topology;
+};
+
+/**
+ * struct sde_rm_hw_blk - hardware block tracking list member
+ * @list: List head for list of all hardware blocks tracking items
+ * @rsvp: Pointer to use case reservation if reserved by a client
+ * @rsvp_nxt: Temporary pointer used during reservation to the incoming
+ * request. Will be swapped into rsvp if proposal is accepted
+ * @type: Type of hardware block this structure tracks
+ * @id: Hardware ID number, within it's own space, ie. LM_X
+ * @catalog: Pointer to the hardware catalog entry for this block
+ * @hw: Pointer to the hardware register access object for this block
+ */
+struct sde_rm_hw_blk {
+ struct list_head list;
+ struct sde_rm_rsvp *rsvp;
+ struct sde_rm_rsvp *rsvp_nxt;
+ enum sde_hw_blk_type type;
+ const char *type_name;
+ uint32_t id;
+ void *catalog;
+ void *hw;
+};
+
+/**
+ * sde_rm_dbg_rsvp_stage - enum of steps in making reservation for event logging
+ */
+enum sde_rm_dbg_rsvp_stage {
+ SDE_RM_STAGE_BEGIN,
+ SDE_RM_STAGE_AFTER_CLEAR,
+ SDE_RM_STAGE_AFTER_RSVPNEXT,
+ SDE_RM_STAGE_FINAL
+};
+
+static void _sde_rm_print_rsvps(
+ struct sde_rm *rm,
+ enum sde_rm_dbg_rsvp_stage stage)
+{
+ struct sde_rm_rsvp *rsvp;
+ struct sde_rm_hw_blk *blk;
+ enum sde_hw_blk_type type;
+
+ SDE_DEBUG("%d\n", stage);
+
+ list_for_each_entry(rsvp, &rm->rsvps, list) {
+ SDE_DEBUG("%d rsvp[s%ue%u] topology %d\n", stage, rsvp->seq,
+ rsvp->enc_id, rsvp->topology);
+ SDE_EVT32(stage, rsvp->seq, rsvp->enc_id, rsvp->topology);
+ }
+
+ for (type = 0; type < SDE_HW_BLK_MAX; type++) {
+ list_for_each_entry(blk, &rm->hw_blks[type], list) {
+ if (!blk->rsvp && !blk->rsvp_nxt)
+ continue;
+
+ SDE_DEBUG("%d rsvp[s%ue%u->s%ue%u] %s %d\n", stage,
+ (blk->rsvp) ? blk->rsvp->seq : 0,
+ (blk->rsvp) ? blk->rsvp->enc_id : 0,
+ (blk->rsvp_nxt) ? blk->rsvp_nxt->seq : 0,
+ (blk->rsvp_nxt) ? blk->rsvp_nxt->enc_id : 0,
+ blk->type_name, blk->id);
+
+ SDE_EVT32(stage,
+ (blk->rsvp) ? blk->rsvp->seq : 0,
+ (blk->rsvp) ? blk->rsvp->enc_id : 0,
+ (blk->rsvp_nxt) ? blk->rsvp_nxt->seq : 0,
+ (blk->rsvp_nxt) ? blk->rsvp_nxt->enc_id : 0,
+ blk->type, blk->id);
+ }
+ }
+}
+
+struct sde_hw_mdp *sde_rm_get_mdp(struct sde_rm *rm)
+{
+ return rm->hw_mdp;
+}
+
+void sde_rm_init_hw_iter(
+ struct sde_rm_hw_iter *iter,
+ uint32_t enc_id,
+ enum sde_hw_blk_type type)
+{
+ memset(iter, 0, sizeof(*iter));
+ iter->enc_id = enc_id;
+ iter->type = type;
+}
+
+bool sde_rm_get_hw(struct sde_rm *rm, struct sde_rm_hw_iter *i)
+{
+ struct list_head *blk_list;
+
+ if (!rm || !i || i->type >= SDE_HW_BLK_MAX) {
+ SDE_ERROR("invalid rm\n");
+ return false;
+ }
+
+ i->hw = NULL;
+ blk_list = &rm->hw_blks[i->type];
+
+ if (i->blk && (&i->blk->list == blk_list)) {
+ SDE_ERROR("attempt resume iteration past last\n");
+ return false;
+ }
+
+ i->blk = list_prepare_entry(i->blk, blk_list, list);
+
+ list_for_each_entry_continue(i->blk, blk_list, list) {
+ struct sde_rm_rsvp *rsvp = i->blk->rsvp;
+
+ if (i->blk->type != i->type) {
+ SDE_ERROR("found incorrect block type %d on %d list\n",
+ i->blk->type, i->type);
+ return false;
+ }
+
+ if ((i->enc_id == 0) || (rsvp && rsvp->enc_id == i->enc_id)) {
+ i->hw = i->blk->hw;
+ SDE_DEBUG("found type %d %s id %d for enc %d\n",
+ i->type, i->blk->type_name, i->blk->id,
+ i->enc_id);
+ return true;
+ }
+ }
+
+ SDE_DEBUG("no match, type %d for enc %d\n", i->type, i->enc_id);
+
+ return false;
+}
+
+static void _sde_rm_hw_destroy(enum sde_hw_blk_type type, void *hw)
+{
+ switch (type) {
+ case SDE_HW_BLK_LM:
+ sde_hw_lm_destroy(hw);
+ break;
+ case SDE_HW_BLK_DSPP:
+ sde_hw_dspp_destroy(hw);
+ break;
+ case SDE_HW_BLK_CTL:
+ sde_hw_ctl_destroy(hw);
+ break;
+ case SDE_HW_BLK_CDM:
+ sde_hw_cdm_destroy(hw);
+ break;
+ case SDE_HW_BLK_PINGPONG:
+ sde_hw_pingpong_destroy(hw);
+ break;
+ case SDE_HW_BLK_INTF:
+ sde_hw_intf_destroy(hw);
+ break;
+ case SDE_HW_BLK_WB:
+ sde_hw_wb_destroy(hw);
+ break;
+ case SDE_HW_BLK_SSPP:
+ /* SSPPs are not managed by the resource manager */
+ case SDE_HW_BLK_TOP:
+ /* Top is a singleton, not managed in hw_blks list */
+ case SDE_HW_BLK_MAX:
+ default:
+ SDE_ERROR("unsupported block type %d\n", type);
+ break;
+ }
+}
+
+int sde_rm_destroy(struct sde_rm *rm)
+{
+
+ struct sde_rm_rsvp *rsvp_cur, *rsvp_nxt;
+ struct sde_rm_hw_blk *hw_cur, *hw_nxt;
+ enum sde_hw_blk_type type;
+
+ if (!rm) {
+ SDE_ERROR("invalid rm\n");
+ return -EINVAL;
+ }
+
+ list_for_each_entry_safe(rsvp_cur, rsvp_nxt, &rm->rsvps, list) {
+ list_del(&rsvp_cur->list);
+ kfree(rsvp_cur);
+ }
+
+
+ for (type = 0; type < SDE_HW_BLK_MAX; type++) {
+ list_for_each_entry_safe(hw_cur, hw_nxt, &rm->hw_blks[type],
+ list) {
+ list_del(&hw_cur->list);
+ _sde_rm_hw_destroy(hw_cur->type, hw_cur->hw);
+ kfree(hw_cur);
+ }
+ }
+
+ sde_hw_mdp_destroy(rm->hw_mdp);
+ rm->hw_mdp = NULL;
+
+ return 0;
+}
+
+static int _sde_rm_hw_blk_create(
+ struct sde_rm *rm,
+ struct sde_mdss_cfg *cat,
+ void *mmio,
+ enum sde_hw_blk_type type,
+ uint32_t id,
+ void *hw_catalog_info)
+{
+ struct sde_rm_hw_blk *blk;
+ struct sde_hw_mdp *hw_mdp;
+ const char *name;
+ void *hw;
+
+ hw_mdp = rm->hw_mdp;
+
+ switch (type) {
+ case SDE_HW_BLK_LM:
+ hw = sde_hw_lm_init(id, mmio, cat);
+ name = "lm";
+ break;
+ case SDE_HW_BLK_DSPP:
+ hw = sde_hw_dspp_init(id, mmio, cat);
+ name = "dspp";
+ break;
+ case SDE_HW_BLK_CTL:
+ hw = sde_hw_ctl_init(id, mmio, cat);
+ name = "ctl";
+ break;
+ case SDE_HW_BLK_CDM:
+ hw = sde_hw_cdm_init(id, mmio, cat, hw_mdp);
+ name = "cdm";
+ break;
+ case SDE_HW_BLK_PINGPONG:
+ hw = sde_hw_pingpong_init(id, mmio, cat);
+ name = "pp";
+ break;
+ case SDE_HW_BLK_INTF:
+ hw = sde_hw_intf_init(id, mmio, cat);
+ name = "intf";
+ break;
+ case SDE_HW_BLK_WB:
+ hw = sde_hw_wb_init(id, mmio, cat, hw_mdp);
+ name = "wb";
+ break;
+ case SDE_HW_BLK_SSPP:
+ /* SSPPs are not managed by the resource manager */
+ case SDE_HW_BLK_TOP:
+ /* Top is a singleton, not managed in hw_blks list */
+ case SDE_HW_BLK_MAX:
+ default:
+ SDE_ERROR("unsupported block type %d\n", type);
+ return -EINVAL;
+ }
+
+ if (IS_ERR_OR_NULL(hw)) {
+ SDE_ERROR("failed hw object creation: type %d, err %ld\n",
+ type, PTR_ERR(hw));
+ return -EFAULT;
+ }
+
+ blk = kzalloc(sizeof(*blk), GFP_KERNEL);
+ if (!blk) {
+ _sde_rm_hw_destroy(type, hw);
+ return -ENOMEM;
+ }
+
+ blk->type_name = name;
+ blk->type = type;
+ blk->id = id;
+ blk->catalog = hw_catalog_info;
+ blk->hw = hw;
+ list_add_tail(&blk->list, &rm->hw_blks[type]);
+
+ return 0;
+}
+
+int sde_rm_init(struct sde_rm *rm,
+ struct sde_mdss_cfg *cat,
+ void *mmio,
+ struct drm_device *dev)
+{
+ int rc, i;
+ enum sde_hw_blk_type type;
+
+ if (!rm || !cat || !mmio || !dev) {
+ SDE_ERROR("invalid kms\n");
+ return -EINVAL;
+ }
+
+ /* Clear, setup lists */
+ memset(rm, 0, sizeof(*rm));
+ INIT_LIST_HEAD(&rm->rsvps);
+ for (type = 0; type < SDE_HW_BLK_MAX; type++)
+ INIT_LIST_HEAD(&rm->hw_blks[type]);
+
+ /* Some of the sub-blocks require an mdptop to be created */
+ rm->hw_mdp = sde_hw_mdptop_init(MDP_TOP, mmio, cat);
+ if (IS_ERR_OR_NULL(rm->hw_mdp)) {
+ rc = PTR_ERR(rm->hw_mdp);
+ rm->hw_mdp = NULL;
+ SDE_ERROR("failed: mdp hw not available\n");
+ goto fail;
+ }
+
+ /* Interrogate HW catalog and create tracking items for hw blocks */
+ for (i = 0; i < cat->mixer_count; i++) {
+ struct sde_lm_cfg *lm = &cat->mixer[i];
+
+ if (lm->pingpong == PINGPONG_MAX) {
+ SDE_DEBUG("skip mixer %d without pingpong\n", lm->id);
+ continue;
+ }
+
+ rc = _sde_rm_hw_blk_create(rm, cat, mmio, SDE_HW_BLK_LM,
+ cat->mixer[i].id, &cat->mixer[i]);
+ if (rc) {
+ SDE_ERROR("failed: lm hw not available\n");
+ goto fail;
+ }
+
+ if (!rm->lm_max_width) {
+ rm->lm_max_width = lm->sblk->maxwidth;
+ } else if (rm->lm_max_width != lm->sblk->maxwidth) {
+ /*
+ * Don't expect to have hw where lm max widths differ.
+ * If found, take the min.
+ */
+ SDE_ERROR("unsupported: lm maxwidth differs\n");
+ if (rm->lm_max_width > lm->sblk->maxwidth)
+ rm->lm_max_width = lm->sblk->maxwidth;
+ }
+ }
+
+ for (i = 0; i < cat->dspp_count; i++) {
+ rc = _sde_rm_hw_blk_create(rm, cat, mmio, SDE_HW_BLK_DSPP,
+ cat->dspp[i].id, &cat->dspp[i]);
+ if (rc) {
+ SDE_ERROR("failed: dspp hw not available\n");
+ goto fail;
+ }
+ }
+
+ for (i = 0; i < cat->pingpong_count; i++) {
+ rc = _sde_rm_hw_blk_create(rm, cat, mmio, SDE_HW_BLK_PINGPONG,
+ cat->pingpong[i].id, &cat->pingpong[i]);
+ if (rc) {
+ SDE_ERROR("failed: pp hw not available\n");
+ goto fail;
+ }
+ }
+
+ for (i = 0; i < cat->intf_count; i++) {
+ if (cat->intf[i].type == INTF_NONE) {
+ SDE_DEBUG("skip intf %d with type none\n", i);
+ continue;
+ }
+
+ rc = _sde_rm_hw_blk_create(rm, cat, mmio, SDE_HW_BLK_INTF,
+ cat->intf[i].id, &cat->intf[i]);
+ if (rc) {
+ SDE_ERROR("failed: intf hw not available\n");
+ goto fail;
+ }
+ }
+
+ for (i = 0; i < cat->wb_count; i++) {
+ rc = _sde_rm_hw_blk_create(rm, cat, mmio, SDE_HW_BLK_WB,
+ cat->wb[i].id, &cat->wb[i]);
+ if (rc) {
+ SDE_ERROR("failed: wb hw not available\n");
+ goto fail;
+ }
+ }
+
+ for (i = 0; i < cat->ctl_count; i++) {
+ rc = _sde_rm_hw_blk_create(rm, cat, mmio, SDE_HW_BLK_CTL,
+ cat->ctl[i].id, &cat->ctl[i]);
+ if (rc) {
+ SDE_ERROR("failed: ctl hw not available\n");
+ goto fail;
+ }
+ }
+
+ for (i = 0; i < cat->cdm_count; i++) {
+ rc = _sde_rm_hw_blk_create(rm, cat, mmio, SDE_HW_BLK_CDM,
+ cat->cdm[i].id, &cat->cdm[i]);
+ if (rc) {
+ SDE_ERROR("failed: cdm hw not available\n");
+ goto fail;
+ }
+ }
+
+ return 0;
+
+fail:
+ sde_rm_destroy(rm);
+
+ return rc;
+}
+
+/**
+ * _sde_rm_check_lm_and_get_connected_blks - check if proposed layer mixer meets
+ * proposed use case requirements, incl. hardwired dependent blocks like
+ * pingpong, and dspp.
+ * @rm: sde resource manager handle
+ * @rsvp: reservation currently being created
+ * @reqs: proposed use case requirements
+ * @lm: proposed layer mixer, function checks if lm, and all other hardwired
+ * blocks connected to the lm (pp, dspp) are available and appropriate
+ * @dspp: output parameter, dspp block attached to the layer mixer.
+ * NULL if dspp was not available, or not matching requirements.
+ * @pp: output parameter, pingpong block attached to the layer mixer.
+ * NULL if dspp was not available, or not matching requirements.
+ * @primary_lm: if non-null, this function check if lm is compatible primary_lm
+ * as well as satisfying all other requirements
+ * @Return: true if lm matches all requirements, false otherwise
+ */
+static bool _sde_rm_check_lm_and_get_connected_blks(
+ struct sde_rm *rm,
+ struct sde_rm_rsvp *rsvp,
+ struct sde_rm_requirements *reqs,
+ struct sde_rm_hw_blk *lm,
+ struct sde_rm_hw_blk **dspp,
+ struct sde_rm_hw_blk **pp,
+ struct sde_rm_hw_blk *primary_lm)
+{
+ struct sde_lm_cfg *lm_cfg = (struct sde_lm_cfg *)lm->catalog;
+ struct sde_pingpong_cfg *pp_cfg;
+ struct sde_rm_hw_iter iter;
+
+ *dspp = NULL;
+ *pp = NULL;
+
+ SDE_DEBUG("check lm %d: dspp %d pp %d\n", lm_cfg->id, lm_cfg->dspp,
+ lm_cfg->pingpong);
+
+ /* Check if this layer mixer is a peer of the proposed primary LM */
+ if (primary_lm) {
+ struct sde_lm_cfg *prim_lm_cfg =
+ (struct sde_lm_cfg *)primary_lm->catalog;
+
+ if (!test_bit(lm_cfg->id, &prim_lm_cfg->lm_pair_mask)) {
+ SDE_DEBUG("lm %d not peer of lm %d\n", lm_cfg->id,
+ prim_lm_cfg->id);
+ return false;
+ }
+ }
+
+ /* Matches user requirements? */
+ if ((RM_RQ_DSPP(reqs) && lm_cfg->dspp == DSPP_MAX) ||
+ (!RM_RQ_DSPP(reqs) && lm_cfg->dspp != DSPP_MAX)) {
+ SDE_DEBUG("dspp req mismatch lm %d reqdspp %d, lm->dspp %d\n",
+ lm_cfg->id, (bool)(RM_RQ_DSPP(reqs)),
+ lm_cfg->dspp);
+ return false;
+ }
+
+ /* Already reserved? */
+ if (RESERVED_BY_OTHER(lm, rsvp)) {
+ SDE_DEBUG("lm %d already reserved\n", lm_cfg->id);
+ return false;
+ }
+
+ if (lm_cfg->dspp != DSPP_MAX) {
+ sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_DSPP);
+ while (sde_rm_get_hw(rm, &iter)) {
+ if (iter.blk->id == lm_cfg->dspp) {
+ *dspp = iter.blk;
+ break;
+ }
+ }
+
+ if (!*dspp) {
+ SDE_DEBUG("lm %d failed to retrieve dspp %d\n", lm->id,
+ lm_cfg->dspp);
+ return false;
+ }
+
+ if (RESERVED_BY_OTHER(*dspp, rsvp)) {
+ SDE_DEBUG("lm %d dspp %d already reserved\n",
+ lm->id, (*dspp)->id);
+ return false;
+ }
+ }
+
+ sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_PINGPONG);
+ while (sde_rm_get_hw(rm, &iter)) {
+ if (iter.blk->id == lm_cfg->pingpong) {
+ *pp = iter.blk;
+ break;
+ }
+ }
+
+ if (!*pp) {
+ SDE_ERROR("failed to get pp on lm %d\n", lm_cfg->pingpong);
+ return false;
+ }
+
+ if (RESERVED_BY_OTHER(*pp, rsvp)) {
+ SDE_DEBUG("lm %d pp %d already reserved\n", lm->id,
+ (*pp)->id);
+ *dspp = NULL;
+ return false;
+ }
+
+ pp_cfg = (struct sde_pingpong_cfg *)((*pp)->catalog);
+ if ((reqs->top_name == SDE_RM_TOPOLOGY_PPSPLIT) &&
+ !(test_bit(SDE_PINGPONG_SPLIT, &pp_cfg->features))) {
+ SDE_DEBUG("pp %d doesn't support ppsplit\n", pp_cfg->id);
+ *dspp = NULL;
+ return false;
+ }
+
+ return true;
+}
+
+static int _sde_rm_reserve_lms(
+ struct sde_rm *rm,
+ struct sde_rm_rsvp *rsvp,
+ struct sde_rm_requirements *reqs)
+
+{
+ struct sde_rm_hw_blk *lm[MAX_BLOCKS];
+ struct sde_rm_hw_blk *dspp[MAX_BLOCKS];
+ struct sde_rm_hw_blk *pp[MAX_BLOCKS];
+ struct sde_rm_hw_iter iter_i, iter_j;
+ int lm_count = 0;
+ int i, rc = 0;
+
+ if (!reqs->num_lm) {
+ SDE_ERROR("invalid number of lm: %d\n", reqs->num_lm);
+ return -EINVAL;
+ }
+
+ /* Find a primary mixer */
+ sde_rm_init_hw_iter(&iter_i, 0, SDE_HW_BLK_LM);
+ while (lm_count != reqs->num_lm && sde_rm_get_hw(rm, &iter_i)) {
+ memset(&lm, 0, sizeof(lm));
+ memset(&dspp, 0, sizeof(dspp));
+ memset(&pp, 0, sizeof(pp));
+
+ lm_count = 0;
+ lm[lm_count] = iter_i.blk;
+
+ if (!_sde_rm_check_lm_and_get_connected_blks(rm, rsvp, reqs,
+ lm[lm_count], &dspp[lm_count], &pp[lm_count],
+ NULL))
+ continue;
+
+ ++lm_count;
+
+ /* Valid primary mixer found, find matching peers */
+ sde_rm_init_hw_iter(&iter_j, 0, SDE_HW_BLK_LM);
+
+ while (lm_count != reqs->num_lm && sde_rm_get_hw(rm, &iter_j)) {
+ if (iter_i.blk == iter_j.blk)
+ continue;
+
+ if (!_sde_rm_check_lm_and_get_connected_blks(rm, rsvp,
+ reqs, iter_j.blk, &dspp[lm_count],
+ &pp[lm_count], iter_i.blk))
+ continue;
+
+ lm[lm_count] = iter_j.blk;
+ ++lm_count;
+ }
+ }
+
+ if (lm_count != reqs->num_lm) {
+ SDE_DEBUG("unable to find appropriate mixers\n");
+ return -ENAVAIL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(lm); i++) {
+ if (!lm[i])
+ break;
+
+ lm[i]->rsvp_nxt = rsvp;
+ pp[i]->rsvp_nxt = rsvp;
+ if (dspp[i])
+ dspp[i]->rsvp_nxt = rsvp;
+
+ SDE_EVT32(lm[i]->type, rsvp->enc_id, lm[i]->id, pp[i]->id,
+ dspp[i] ? dspp[i]->id : 0);
+ }
+
+ if (reqs->top_name == SDE_RM_TOPOLOGY_PPSPLIT) {
+ /* reserve a free PINGPONG_SLAVE block */
+ rc = -ENAVAIL;
+ sde_rm_init_hw_iter(&iter_i, 0, SDE_HW_BLK_PINGPONG);
+ while (sde_rm_get_hw(rm, &iter_i)) {
+ struct sde_pingpong_cfg *pp_cfg =
+ (struct sde_pingpong_cfg *)
+ (iter_i.blk->catalog);
+
+ if (!(test_bit(SDE_PINGPONG_SLAVE, &pp_cfg->features)))
+ continue;
+ if (RESERVED_BY_OTHER(iter_i.blk, rsvp))
+ continue;
+
+ iter_i.blk->rsvp_nxt = rsvp;
+ rc = 0;
+ break;
+ }
+ }
+
+ return rc;
+}
+
+static int _sde_rm_reserve_ctls(
+ struct sde_rm *rm,
+ struct sde_rm_rsvp *rsvp,
+ struct sde_rm_requirements *reqs)
+{
+ struct sde_rm_hw_blk *ctls[MAX_BLOCKS];
+ struct sde_rm_hw_iter iter;
+ int i = 0;
+
+ memset(&ctls, 0, sizeof(ctls));
+
+ sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_CTL);
+ while (sde_rm_get_hw(rm, &iter)) {
+ unsigned long caps;
+ bool has_split_display, has_ppsplit;
+
+ if (RESERVED_BY_OTHER(iter.blk, rsvp))
+ continue;
+
+ caps = ((struct sde_ctl_cfg *)iter.blk->catalog)->features;
+ has_split_display = BIT(SDE_CTL_SPLIT_DISPLAY) & caps;
+ has_ppsplit = BIT(SDE_CTL_PINGPONG_SPLIT) & caps;
+
+ SDE_DEBUG("ctl %d caps 0x%lX\n", iter.blk->id, caps);
+
+ if (reqs->needs_split_display != has_split_display)
+ continue;
+
+ if (reqs->top_name == SDE_RM_TOPOLOGY_PPSPLIT && !has_ppsplit)
+ continue;
+
+ ctls[i] = iter.blk;
+ SDE_DEBUG("ctl %d match\n", iter.blk->id);
+
+ if (++i == reqs->num_ctl)
+ break;
+ }
+
+ if (i != reqs->num_ctl)
+ return -ENAVAIL;
+
+ for (i = 0; i < ARRAY_SIZE(ctls) && i < reqs->num_ctl; i++) {
+ ctls[i]->rsvp_nxt = rsvp;
+ SDE_EVT32(ctls[i]->type, rsvp->enc_id, ctls[i]->id);
+ }
+
+ return 0;
+}
+
+static int _sde_rm_reserve_cdm(
+ struct sde_rm *rm,
+ struct sde_rm_rsvp *rsvp,
+ uint32_t id,
+ enum sde_hw_blk_type type)
+{
+ struct sde_rm_hw_iter iter;
+ struct sde_cdm_cfg *cdm;
+
+ sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_CDM);
+ while (sde_rm_get_hw(rm, &iter)) {
+ bool match = false;
+
+ if (RESERVED_BY_OTHER(iter.blk, rsvp))
+ continue;
+
+ cdm = (struct sde_cdm_cfg *)(iter.blk->catalog);
+
+ if (type == SDE_HW_BLK_INTF && id != INTF_MAX)
+ match = test_bit(id, &cdm->intf_connect);
+ else if (type == SDE_HW_BLK_WB && id != WB_MAX)
+ match = test_bit(id, &cdm->wb_connect);
+
+ SDE_DEBUG("type %d id %d, cdm intfs %lu wbs %lu match %d\n",
+ type, id, cdm->intf_connect, cdm->wb_connect,
+ match);
+
+ if (!match)
+ continue;
+
+ iter.blk->rsvp_nxt = rsvp;
+ SDE_EVT32(iter.blk->type, rsvp->enc_id, iter.blk->id);
+ break;
+ }
+
+ if (!iter.hw) {
+ SDE_ERROR("couldn't reserve cdm for type %d id %d\n", type, id);
+ return -ENAVAIL;
+ }
+
+ return 0;
+}
+
+static int _sde_rm_reserve_intf_or_wb(
+ struct sde_rm *rm,
+ struct sde_rm_rsvp *rsvp,
+ uint32_t id,
+ enum sde_hw_blk_type type,
+ bool needs_cdm)
+{
+ struct sde_rm_hw_iter iter;
+ int ret = 0;
+
+ /* Find the block entry in the rm, and note the reservation */
+ sde_rm_init_hw_iter(&iter, 0, type);
+ while (sde_rm_get_hw(rm, &iter)) {
+ if (iter.blk->id != id)
+ continue;
+
+ if (RESERVED_BY_OTHER(iter.blk, rsvp)) {
+ SDE_ERROR("type %d id %d already reserved\n", type, id);
+ return -ENAVAIL;
+ }
+
+ iter.blk->rsvp_nxt = rsvp;
+ SDE_EVT32(iter.blk->type, rsvp->enc_id, iter.blk->id);
+ break;
+ }
+
+ /* Shouldn't happen since wbs / intfs are fixed at probe */
+ if (!iter.hw) {
+ SDE_ERROR("couldn't find type %d id %d\n", type, id);
+ return -EINVAL;
+ }
+
+ /* Expected only one intf or wb will request cdm */
+ if (needs_cdm)
+ ret = _sde_rm_reserve_cdm(rm, rsvp, id, type);
+
+ return ret;
+}
+
+static int _sde_rm_reserve_intf_related_hw(
+ struct sde_rm *rm,
+ struct sde_rm_rsvp *rsvp,
+ struct sde_encoder_hw_resources *hw_res)
+{
+ int i, ret = 0;
+ u32 id;
+
+ for (i = 0; i < ARRAY_SIZE(hw_res->intfs); i++) {
+ if (hw_res->intfs[i] == INTF_MODE_NONE)
+ continue;
+ id = i + INTF_0;
+ ret = _sde_rm_reserve_intf_or_wb(rm, rsvp, id,
+ SDE_HW_BLK_INTF, hw_res->needs_cdm);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(hw_res->wbs); i++) {
+ if (hw_res->wbs[i] == INTF_MODE_NONE)
+ continue;
+ id = i + WB_0;
+ ret = _sde_rm_reserve_intf_or_wb(rm, rsvp, id,
+ SDE_HW_BLK_WB, hw_res->needs_cdm);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int _sde_rm_make_next_rsvp(
+ struct sde_rm *rm,
+ struct drm_encoder *enc,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ struct sde_rm_rsvp *rsvp,
+ struct sde_rm_requirements *reqs)
+{
+ int ret;
+
+ /* Create reservation info, tag reserved blocks with it as we go */
+ rsvp->seq = ++rm->rsvp_next_seq;
+ rsvp->enc_id = enc->base.id;
+ rsvp->topology = reqs->top_name;
+ list_add_tail(&rsvp->list, &rm->rsvps);
+
+ /*
+ * Assign LMs and blocks whose usage is tied to them: DSPP & Pingpong.
+ * Do assignment preferring to give away low-resource mixers first:
+ * - Check mixers without DSPPs
+ * - Only then allow to grab from mixers with DSPP capability
+ */
+ ret = _sde_rm_reserve_lms(rm, rsvp, reqs);
+ if (ret && !RM_RQ_DSPP(reqs)) {
+ reqs->top_ctrl |= BIT(SDE_RM_TOPCTL_DSPP);
+ ret = _sde_rm_reserve_lms(rm, rsvp, reqs);
+ }
+
+ if (ret) {
+ SDE_ERROR("unable to find appropriate mixers\n");
+ return ret;
+ }
+
+ /*
+ * Do assignment preferring to give away low-resource CTLs first:
+ * - Check mixers without Split Display
+ * - Only then allow to grab from CTLs with split display capability
+ */
+ _sde_rm_reserve_ctls(rm, rsvp, reqs);
+ if (ret && !reqs->needs_split_display) {
+ reqs->needs_split_display = true;
+ _sde_rm_reserve_ctls(rm, rsvp, reqs);
+ }
+ if (ret) {
+ SDE_ERROR("unable to find appropriate CTL\n");
+ return ret;
+ }
+
+ /* Assign INTFs, WBs, and blks whose usage is tied to them: CTL & CDM */
+ ret = _sde_rm_reserve_intf_related_hw(rm, rsvp, &reqs->hw_res);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int _sde_rm_populate_requirements(
+ struct sde_rm *rm,
+ struct drm_encoder *enc,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ struct sde_rm_requirements *reqs)
+{
+ const struct drm_display_mode *mode = &crtc_state->mode;
+
+ /**
+ * DRM<->HW Topologies
+ *
+ * Name: SINGLEPIPE
+ * Description: 1 LM, 1 PP, 1 INTF
+ * Condition: 1 DRM Encoder w/ 1 Display Tiles (Default)
+ *
+ * Name: DUALPIPE
+ * Description: 2 LM, 2 PP, 2 INTF
+ * Condition: 1 DRM Encoder w/ 2 Display Tiles
+ *
+ * Name: PPSPLIT
+ * Description: 1 LM, 1 PP + 1 Slave PP, 2 INTF
+ * Condition:
+ * 1 DRM Encoder w/ 2 Display Tiles
+ * topology_control & SDE_TOPREQ_PPSPLIT
+ *
+ * Name: DUALPIPEMERGE
+ * Description: 2 LM, 2 PP, 3DMux, 1 INTF
+ * Condition:
+ * 1 DRM Encoder w/ 1 Display Tiles
+ * display_info.max_width >= layer_mixer.max_width
+ *
+ * Name: DUALPIPEMERGE
+ * Description: 2 LM, 2 PP, 3DMux, 1 INTF
+ * Condition:
+ * 1 DRM Encoder w/ 1 Display Tiles
+ * display_info.max_width <= layer_mixer.max_width
+ * topology_control & SDE_TOPREQ_FORCE_TILING
+ */
+
+ memset(reqs, 0, sizeof(*reqs));
+
+ reqs->top_ctrl = sde_connector_get_property(conn_state,
+ CONNECTOR_PROP_TOPOLOGY_CONTROL);
+ sde_encoder_get_hw_resources(enc, &reqs->hw_res, conn_state);
+
+ /* Base assumption is LMs = h_tiles, conditions below may override */
+ reqs->num_lm = reqs->hw_res.display_num_of_h_tiles;
+
+ if (reqs->num_lm == 2) {
+ if (RM_RQ_PPSPLIT(reqs)) {
+ /* user requests serving dual display with 1 lm */
+ reqs->top_name = SDE_RM_TOPOLOGY_PPSPLIT;
+ reqs->num_lm = 1;
+ reqs->num_ctl = 1;
+ reqs->needs_split_display = true;
+ } else {
+ /* dual display, serve with 2 lms */
+ reqs->top_name = SDE_RM_TOPOLOGY_DUALPIPE;
+ reqs->num_ctl = 2;
+ reqs->needs_split_display = true;
+ }
+
+ } else if (reqs->num_lm == 1) {
+ if (mode->hdisplay > rm->lm_max_width) {
+ /* wide display, must split across 2 lm and merge */
+ reqs->top_name = SDE_RM_TOPOLOGY_DUALPIPEMERGE;
+ reqs->num_lm = 2;
+ reqs->num_ctl = 1;
+ reqs->needs_split_display = false;
+ } else if (RM_RQ_FORCE_TILING(reqs)) {
+ /* thin display, but user requests 2 lm and merge */
+ reqs->top_name = SDE_RM_TOPOLOGY_DUALPIPEMERGE;
+ reqs->num_lm = 2;
+ reqs->num_ctl = 1;
+ reqs->needs_split_display = false;
+ } else {
+ /* thin display, serve with only 1 lm */
+ reqs->top_name = SDE_RM_TOPOLOGY_SINGLEPIPE;
+ reqs->num_ctl = 1;
+ reqs->needs_split_display = false;
+ }
+
+ } else {
+ /* Currently no configurations with # LM > 2 */
+ SDE_ERROR("unsupported # of mixers %d\n", reqs->num_lm);
+ return -EINVAL;
+ }
+
+ SDE_DEBUG("top_ctrl 0x%llX num_h_tiles %d\n", reqs->top_ctrl,
+ reqs->hw_res.display_num_of_h_tiles);
+ SDE_DEBUG("display_max_width %d rm->lm_max_width %d\n",
+ mode->hdisplay, rm->lm_max_width);
+ SDE_DEBUG("num_lm %d num_ctl %d topology_name %d\n", reqs->num_lm,
+ reqs->num_ctl, reqs->top_name);
+ SDE_DEBUG("num_lm %d topology_name %d\n", reqs->num_lm,
+ reqs->top_name);
+ SDE_EVT32(mode->hdisplay, rm->lm_max_width, reqs->num_lm,
+ reqs->top_ctrl, reqs->top_name, reqs->num_ctl);
+
+ return 0;
+}
+
+static struct sde_rm_rsvp *_sde_rm_get_rsvp(
+ struct sde_rm *rm,
+ struct drm_encoder *enc)
+{
+ struct sde_rm_rsvp *i;
+
+ if (!rm || !enc) {
+ SDE_ERROR("invalid params\n");
+ return NULL;
+ }
+
+ if (list_empty(&rm->rsvps))
+ return NULL;
+
+ list_for_each_entry(i, &rm->rsvps, list)
+ if (i->enc_id == enc->base.id)
+ return i;
+
+ return NULL;
+}
+
+static struct drm_connector *_sde_rm_get_connector(
+ struct drm_encoder *enc)
+{
+ struct drm_connector *conn = NULL;
+ struct list_head *connector_list =
+ &enc->dev->mode_config.connector_list;
+
+ list_for_each_entry(conn, connector_list, head)
+ if (conn->encoder == enc)
+ return conn;
+
+ return NULL;
+}
+
+/**
+ * _sde_rm_release_rsvp - release resources and release a reservation
+ * @rm: KMS handle
+ * @rsvp: RSVP pointer to release and release resources for
+ */
+void _sde_rm_release_rsvp(
+ struct sde_rm *rm,
+ struct sde_rm_rsvp *rsvp,
+ struct drm_connector *conn)
+{
+ struct sde_rm_rsvp *rsvp_c, *rsvp_n;
+ struct sde_rm_hw_blk *blk;
+ enum sde_hw_blk_type type;
+
+ if (!rsvp)
+ return;
+
+ SDE_DEBUG("rel rsvp %d enc %d\n", rsvp->seq, rsvp->enc_id);
+
+ list_for_each_entry_safe(rsvp_c, rsvp_n, &rm->rsvps, list) {
+ if (rsvp == rsvp_c) {
+ list_del(&rsvp_c->list);
+ break;
+ }
+ }
+
+ for (type = 0; type < SDE_HW_BLK_MAX; type++) {
+ list_for_each_entry(blk, &rm->hw_blks[type], list) {
+ if (blk->rsvp == rsvp) {
+ blk->rsvp = NULL;
+ SDE_DEBUG("rel rsvp %d enc %d %s %d\n",
+ rsvp->seq, rsvp->enc_id,
+ blk->type_name, blk->id);
+ }
+ if (blk->rsvp_nxt == rsvp) {
+ blk->rsvp_nxt = NULL;
+ SDE_DEBUG("rel rsvp_nxt %d enc %d %s %d\n",
+ rsvp->seq, rsvp->enc_id,
+ blk->type_name, blk->id);
+ }
+ }
+ }
+
+ kfree(rsvp);
+
+ (void) msm_property_set_property(
+ sde_connector_get_propinfo(conn),
+ sde_connector_get_property_values(conn->state),
+ CONNECTOR_PROP_TOPOLOGY_NAME,
+ SDE_RM_TOPOLOGY_UNKNOWN);
+}
+
+void sde_rm_release(struct sde_rm *rm, struct drm_encoder *enc)
+{
+ struct sde_rm_rsvp *rsvp;
+ struct drm_connector *conn;
+ uint64_t top_ctrl;
+
+ if (!rm || !enc) {
+ SDE_ERROR("invalid params\n");
+ return;
+ }
+
+ rsvp = _sde_rm_get_rsvp(rm, enc);
+ if (!rsvp) {
+ SDE_ERROR("failed to find rsvp for enc %d\n", enc->base.id);
+ return;
+ }
+
+ conn = _sde_rm_get_connector(enc);
+ if (!conn) {
+ SDE_ERROR("failed to get connector for enc %d\n", enc->base.id);
+ return;
+ }
+
+ top_ctrl = sde_connector_get_property(conn->state,
+ CONNECTOR_PROP_TOPOLOGY_CONTROL);
+
+ if (top_ctrl & BIT(SDE_RM_TOPCTL_RESERVE_LOCK)) {
+ SDE_DEBUG("rsvp[s%de%d] not releasing locked resources\n",
+ rsvp->seq, rsvp->enc_id);
+ } else {
+ SDE_DEBUG("release rsvp[s%de%d]\n", rsvp->seq,
+ rsvp->enc_id);
+ _sde_rm_release_rsvp(rm, rsvp, conn);
+ }
+}
+
+static int _sde_rm_commit_rsvp(
+ struct sde_rm *rm,
+ struct sde_rm_rsvp *rsvp,
+ struct drm_connector_state *conn_state)
+{
+ struct sde_rm_hw_blk *blk;
+ enum sde_hw_blk_type type;
+ int ret = 0;
+
+ ret = msm_property_set_property(
+ sde_connector_get_propinfo(conn_state->connector),
+ sde_connector_get_property_values(conn_state),
+ CONNECTOR_PROP_TOPOLOGY_NAME,
+ rsvp->topology);
+ if (ret)
+ _sde_rm_release_rsvp(rm, rsvp, conn_state->connector);
+
+ /* Swap next rsvp to be the active */
+ for (type = 0; type < SDE_HW_BLK_MAX; type++) {
+ list_for_each_entry(blk, &rm->hw_blks[type], list) {
+ if (blk->rsvp_nxt) {
+ blk->rsvp = blk->rsvp_nxt;
+ blk->rsvp_nxt = NULL;
+ }
+ }
+ }
+
+ if (!ret) {
+ SDE_DEBUG("rsrv enc %d topology %d\n", rsvp->enc_id,
+ rsvp->topology);
+ SDE_EVT32(rsvp->enc_id, rsvp->topology);
+ }
+
+ return ret;
+}
+
+int sde_rm_check_property_topctl(uint64_t val)
+{
+ if ((BIT(SDE_RM_TOPCTL_FORCE_TILING) & val) &&
+ (BIT(SDE_RM_TOPCTL_PPSPLIT) & val)) {
+ SDE_ERROR("ppsplit & force_tiling are incompatible\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int sde_rm_reserve(
+ struct sde_rm *rm,
+ struct drm_encoder *enc,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ bool test_only)
+{
+ struct sde_rm_rsvp *rsvp_cur, *rsvp_nxt;
+ struct sde_rm_requirements reqs;
+ int ret;
+
+ if (!rm || !enc || !crtc_state || !conn_state) {
+ SDE_ERROR("invalid arguments\n");
+ return -EINVAL;
+ }
+
+ /* Check if this is just a page-flip */
+ if (!drm_atomic_crtc_needs_modeset(crtc_state))
+ return 0;
+
+ SDE_DEBUG("reserving hw for conn %d enc %d crtc %d test_only %d\n",
+ conn_state->connector->base.id, enc->base.id,
+ crtc_state->crtc->base.id, test_only);
+ SDE_EVT32(enc->base.id, conn_state->connector->base.id);
+
+ _sde_rm_print_rsvps(rm, SDE_RM_STAGE_BEGIN);
+
+ ret = _sde_rm_populate_requirements(rm, enc, crtc_state,
+ conn_state, &reqs);
+ if (ret) {
+ SDE_ERROR("failed to populate hw requirements\n");
+ return ret;
+ }
+
+ /*
+ * We only support one active reservation per-hw-block. But to implement
+ * transactional semantics for test-only, and for allowing failure while
+ * modifying your existing reservation, over the course of this
+ * function we can have two reservations:
+ * Current: Existing reservation
+ * Next: Proposed reservation. The proposed reservation may fail, or may
+ * be discarded if in test-only mode.
+ * If reservation is successful, and we're not in test-only, then we
+ * replace the current with the next.
+ */
+ rsvp_nxt = kzalloc(sizeof(*rsvp_nxt), GFP_KERNEL);
+ if (!rsvp_nxt)
+ return -ENOMEM;
+
+ rsvp_cur = _sde_rm_get_rsvp(rm, enc);
+
+ /*
+ * User can request that we clear out any reservation during the
+ * atomic_check phase by using this CLEAR bit
+ */
+ if (rsvp_cur && test_only && RM_RQ_CLEAR(&reqs)) {
+ SDE_DEBUG("test_only & CLEAR: clear rsvp[s%de%d]\n",
+ rsvp_cur->seq, rsvp_cur->enc_id);
+ _sde_rm_release_rsvp(rm, rsvp_cur, conn_state->connector);
+ rsvp_cur = NULL;
+ _sde_rm_print_rsvps(rm, SDE_RM_STAGE_AFTER_CLEAR);
+ }
+
+ /* Check the proposed reservation, store it in hw's "next" field */
+ ret = _sde_rm_make_next_rsvp(rm, enc, crtc_state, conn_state,
+ rsvp_nxt, &reqs);
+
+ _sde_rm_print_rsvps(rm, SDE_RM_STAGE_AFTER_RSVPNEXT);
+
+ if (ret) {
+ SDE_ERROR("failed to reserve hw resources: %d\n", ret);
+ _sde_rm_release_rsvp(rm, rsvp_nxt, conn_state->connector);
+ } else if (test_only && !RM_RQ_LOCK(&reqs)) {
+ /*
+ * Normally, if test_only, test the reservation and then undo
+ * However, if the user requests LOCK, then keep the reservation
+ * made during the atomic_check phase.
+ */
+ SDE_DEBUG("test_only: discard test rsvp[s%de%d]\n",
+ rsvp_nxt->seq, rsvp_nxt->enc_id);
+ _sde_rm_release_rsvp(rm, rsvp_nxt, conn_state->connector);
+ } else {
+ if (test_only && RM_RQ_LOCK(&reqs))
+ SDE_DEBUG("test_only & LOCK: lock rsvp[s%de%d]\n",
+ rsvp_nxt->seq, rsvp_nxt->enc_id);
+
+ _sde_rm_release_rsvp(rm, rsvp_cur, conn_state->connector);
+
+ ret = _sde_rm_commit_rsvp(rm, rsvp_nxt, conn_state);
+ }
+
+ _sde_rm_print_rsvps(rm, SDE_RM_STAGE_FINAL);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_rm.h b/drivers/gpu/drm/msm/sde/sde_rm.h
new file mode 100644
index 000000000000..855b12ce8150
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_rm.h
@@ -0,0 +1,201 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __SDE_RM_H__
+#define __SDE_RM_H__
+
+#include <linux/list.h>
+
+#include "msm_kms.h"
+#include "sde_hw_top.h"
+
+/**
+ * enum sde_rm_topology_name - HW resource use case in use by connector
+ * @SDE_RM_TOPOLOGY_UNKNOWN: No topology in use currently
+ * @SDE_RM_TOPOLOGY_SINGLEPIPE: 1 LM, 1 PP, 1 INTF/WB
+ * @SDE_RM_TOPOLOGY_DUALPIPE: 2 LM, 2 PP, 2 INTF/WB
+ * @SDE_RM_TOPOLOGY_PPSPLIT: 1 LM, 2 PPs, 2 INTF/WB
+ * @SDE_RM_TOPOLOGY_DUALPIPEMERGE: 2 LM, 2 PP, 3DMux, 1 INTF/WB
+ */
+enum sde_rm_topology_name {
+ SDE_RM_TOPOLOGY_UNKNOWN = 0,
+ SDE_RM_TOPOLOGY_SINGLEPIPE,
+ SDE_RM_TOPOLOGY_DUALPIPE,
+ SDE_RM_TOPOLOGY_PPSPLIT,
+ SDE_RM_TOPOLOGY_DUALPIPEMERGE,
+};
+
+/**
+ * enum sde_rm_topology_control - HW resource use case in use by connector
+ * @SDE_RM_TOPCTL_RESERVE_LOCK: If set, in AtomicTest phase, after a successful
+ * test, reserve the resources for this display.
+ * Normal behavior would not impact the reservation
+ * list during the AtomicTest phase.
+ * @SDE_RM_TOPCTL_RESERVE_CLEAR: If set, in AtomicTest phase, before testing,
+ * release any reservation held by this display.
+ * Normal behavior would not impact the
+ * reservation list during the AtomicTest phase.
+ * @SDE_RM_TOPCTL_DSPP: Require layer mixers with DSPP capabilities
+ * @SDE_RM_TOPCTL_FORCE_TILING: Require kernel to split across multiple layer
+ * mixers, despite width fitting within capability
+ * of a single layer mixer.
+ * @SDE_RM_TOPCTL_PPSPLIT: Require kernel to use pingpong split pipe
+ * configuration instead of dual pipe.
+ */
+enum sde_rm_topology_control {
+ SDE_RM_TOPCTL_RESERVE_LOCK,
+ SDE_RM_TOPCTL_RESERVE_CLEAR,
+ SDE_RM_TOPCTL_DSPP,
+ SDE_RM_TOPCTL_FORCE_TILING,
+ SDE_RM_TOPCTL_PPSPLIT,
+};
+
+/**
+ * struct sde_rm - SDE dynamic hardware resource manager
+ * @dev: device handle for event logging purposes
+ * @rsvps: list of hardware reservations by each crtc->encoder->connector
+ * @hw_blks: array of lists of hardware resources present in the system, one
+ * list per type of hardware block
+ * @hw_mdp: hardware object for mdp_top
+ * @lm_max_width: cached layer mixer maximum width
+ * @rsvp_next_seq: sequence number for next reservation for debugging purposes
+ */
+struct sde_rm {
+ struct drm_device *dev;
+ struct list_head rsvps;
+ struct list_head hw_blks[SDE_HW_BLK_MAX];
+ struct sde_hw_mdp *hw_mdp;
+ uint32_t lm_max_width;
+ uint32_t rsvp_next_seq;
+};
+
+/**
+ * struct sde_rm_hw_blk - resource manager internal structure
+ * forward declaration for single iterator definition without void pointer
+ */
+struct sde_rm_hw_blk;
+
+/**
+ * struct sde_rm_hw_iter - iterator for use with sde_rm
+ * @hw: sde_hw object requested, or NULL on failure
+ * @blk: sde_rm internal block representation. Clients ignore. Used as iterator.
+ * @enc_id: DRM ID of Encoder client wishes to search for, or 0 for Any Encoder
+ * @type: Hardware Block Type client wishes to search for.
+ */
+struct sde_rm_hw_iter {
+ void *hw;
+ struct sde_rm_hw_blk *blk;
+ uint32_t enc_id;
+ enum sde_hw_blk_type type;
+};
+
+/**
+ * sde_rm_init - Read hardware catalog and create reservation tracking objects
+ * for all HW blocks.
+ * @rm: SDE Resource Manager handle
+ * @cat: Pointer to hardware catalog
+ * @mmio: mapped register io address of MDP
+ * @dev: device handle for event logging purposes
+ * @Return: 0 on Success otherwise -ERROR
+ */
+int sde_rm_init(struct sde_rm *rm,
+ struct sde_mdss_cfg *cat,
+ void *mmio,
+ struct drm_device *dev);
+
+/**
+ * sde_rm_destroy - Free all memory allocated by sde_rm_init
+ * @rm: SDE Resource Manager handle
+ * @Return: 0 on Success otherwise -ERROR
+ */
+int sde_rm_destroy(struct sde_rm *rm);
+
+/**
+ * sde_rm_reserve - Given a CRTC->Encoder->Connector display chain, analyze
+ * the use connections and user requirements, specified through related
+ * topology control properties, and reserve hardware blocks to that
+ * display chain.
+ * HW blocks can then be accessed through sde_rm_get_* functions.
+ * HW Reservations should be released via sde_rm_release_hw.
+ * @rm: SDE Resource Manager handle
+ * @drm_enc: DRM Encoder handle
+ * @crtc_state: Proposed Atomic DRM CRTC State handle
+ * @conn_state: Proposed Atomic DRM Connector State handle
+ * @test_only: Atomic-Test phase, discard results (unless property overrides)
+ * @Return: 0 on Success otherwise -ERROR
+ */
+int sde_rm_reserve(struct sde_rm *rm,
+ struct drm_encoder *drm_enc,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ bool test_only);
+
+/**
+ * sde_rm_reserve - Given the encoder for the display chain, release any
+ * HW blocks previously reserved for that use case.
+ * @rm: SDE Resource Manager handle
+ * @enc: DRM Encoder handle
+ * @Return: 0 on Success otherwise -ERROR
+ */
+void sde_rm_release(struct sde_rm *rm, struct drm_encoder *enc);
+
+/**
+ * sde_rm_get_mdp - Retrieve HW block for MDP TOP.
+ * This is never reserved, and is usable by any display.
+ * @rm: SDE Resource Manager handle
+ * @Return: Pointer to hw block or NULL
+ */
+struct sde_hw_mdp *sde_rm_get_mdp(struct sde_rm *rm);
+
+/**
+ * sde_rm_init_hw_iter - setup given iterator for new iteration over hw list
+ * using sde_rm_get_hw
+ * @iter: iter object to initialize
+ * @enc_id: DRM ID of Encoder client wishes to search for, or 0 for Any Encoder
+ * @type: Hardware Block Type client wishes to search for.
+ */
+void sde_rm_init_hw_iter(
+ struct sde_rm_hw_iter *iter,
+ uint32_t enc_id,
+ enum sde_hw_blk_type type);
+/**
+ * sde_rm_get_hw - retrieve reserved hw object given encoder and hw type
+ * Meant to do a single pass through the hardware list to iteratively
+ * retrieve hardware blocks of a given type for a given encoder.
+ * Initialize an iterator object.
+ * Set hw block type of interest. Set encoder id of interest, 0 for any.
+ * Function returns first hw of type for that encoder.
+ * Subsequent calls will return the next reserved hw of that type in-order.
+ * Iterator HW pointer will be null on failure to find hw.
+ * @rm: SDE Resource Manager handle
+ * @iter: iterator object
+ * @Return: true on match found, false on no match found
+ */
+bool sde_rm_get_hw(struct sde_rm *rm, struct sde_rm_hw_iter *iter);
+
+/**
+ * sde_rm_check_property_topctl - validate property bitmask before it is set
+ * @val: user's proposed topology control bitmask
+ * @Return: 0 on success or error
+ */
+int sde_rm_check_property_topctl(uint64_t val);
+
+/**
+ * sde_rm_check_property_topctl - validate property bitmask before it is set
+ * @val: user's proposed topology control bitmask
+ * @Return: 0 on success or error
+ */
+int sde_rm_check_property_topctl(uint64_t val);
+
+#endif /* __SDE_RM_H__ */
diff --git a/drivers/gpu/drm/msm/sde/sde_trace.h b/drivers/gpu/drm/msm/sde/sde_trace.h
new file mode 100644
index 000000000000..2a4e6b59a08c
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_trace.h
@@ -0,0 +1,195 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#if !defined(_SDE_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _SDE_TRACE_H_
+
+#include <linux/stringify.h>
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM sde
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE sde_trace
+
+TRACE_EVENT(sde_perf_set_qos_luts,
+ TP_PROTO(u32 pnum, u32 fmt, bool rt, u32 fl,
+ u32 lut, bool linear),
+ TP_ARGS(pnum, fmt, rt, fl, lut, linear),
+ TP_STRUCT__entry(
+ __field(u32, pnum)
+ __field(u32, fmt)
+ __field(bool, rt)
+ __field(u32, fl)
+ __field(u32, lut)
+ __field(bool, linear)
+ ),
+ TP_fast_assign(
+ __entry->pnum = pnum;
+ __entry->fmt = fmt;
+ __entry->rt = rt;
+ __entry->fl = fl;
+ __entry->lut = lut;
+ __entry->linear = linear;
+ ),
+ TP_printk("pnum=%d fmt=%x rt=%d fl=%d lut=0x%x lin=%d",
+ __entry->pnum, __entry->fmt,
+ __entry->rt, __entry->fl,
+ __entry->lut, __entry->linear)
+);
+
+TRACE_EVENT(sde_perf_set_danger_luts,
+ TP_PROTO(u32 pnum, u32 fmt, u32 mode, u32 danger_lut,
+ u32 safe_lut),
+ TP_ARGS(pnum, fmt, mode, danger_lut, safe_lut),
+ TP_STRUCT__entry(
+ __field(u32, pnum)
+ __field(u32, fmt)
+ __field(u32, mode)
+ __field(u32, danger_lut)
+ __field(u32, safe_lut)
+ ),
+ TP_fast_assign(
+ __entry->pnum = pnum;
+ __entry->fmt = fmt;
+ __entry->mode = mode;
+ __entry->danger_lut = danger_lut;
+ __entry->safe_lut = safe_lut;
+ ),
+ TP_printk("pnum=%d fmt=%x mode=%d luts[0x%x, 0x%x]",
+ __entry->pnum, __entry->fmt,
+ __entry->mode, __entry->danger_lut,
+ __entry->safe_lut)
+);
+
+TRACE_EVENT(sde_perf_set_ot,
+ TP_PROTO(u32 pnum, u32 xin_id, u32 rd_lim, u32 vbif_idx),
+ TP_ARGS(pnum, xin_id, rd_lim, vbif_idx),
+ TP_STRUCT__entry(
+ __field(u32, pnum)
+ __field(u32, xin_id)
+ __field(u32, rd_lim)
+ __field(u32, vbif_idx)
+ ),
+ TP_fast_assign(
+ __entry->pnum = pnum;
+ __entry->xin_id = xin_id;
+ __entry->rd_lim = rd_lim;
+ __entry->vbif_idx = vbif_idx;
+ ),
+ TP_printk("pnum:%d xin_id:%d ot:%d vbif:%d",
+ __entry->pnum, __entry->xin_id, __entry->rd_lim,
+ __entry->vbif_idx)
+)
+
+TRACE_EVENT(sde_perf_update_bus,
+ TP_PROTO(int client, unsigned long long ab_quota,
+ unsigned long long ib_quota),
+ TP_ARGS(client, ab_quota, ib_quota),
+ TP_STRUCT__entry(
+ __field(int, client)
+ __field(u64, ab_quota)
+ __field(u64, ib_quota)
+ ),
+ TP_fast_assign(
+ __entry->client = client;
+ __entry->ab_quota = ab_quota;
+ __entry->ib_quota = ib_quota;
+ ),
+ TP_printk("Request client:%d ab=%llu ib=%llu",
+ __entry->client,
+ __entry->ab_quota,
+ __entry->ib_quota)
+)
+
+
+TRACE_EVENT(sde_cmd_release_bw,
+ TP_PROTO(u32 crtc_id),
+ TP_ARGS(crtc_id),
+ TP_STRUCT__entry(
+ __field(u32, crtc_id)
+ ),
+ TP_fast_assign(
+ __entry->crtc_id = crtc_id;
+ ),
+ TP_printk("crtc:%d", __entry->crtc_id)
+);
+
+TRACE_EVENT(sde_mark_write,
+ TP_PROTO(int pid, const char *name, bool trace_begin),
+ TP_ARGS(pid, name, trace_begin),
+ TP_STRUCT__entry(
+ __field(int, pid)
+ __string(trace_name, name)
+ __field(bool, trace_begin)
+ ),
+ TP_fast_assign(
+ __entry->pid = pid;
+ __assign_str(trace_name, name);
+ __entry->trace_begin = trace_begin;
+ ),
+ TP_printk("%s|%d|%s", __entry->trace_begin ? "B" : "E",
+ __entry->pid, __get_str(trace_name))
+)
+
+TRACE_EVENT(sde_trace_counter,
+ TP_PROTO(int pid, char *name, int value),
+ TP_ARGS(pid, name, value),
+ TP_STRUCT__entry(
+ __field(int, pid)
+ __string(counter_name, name)
+ __field(int, value)
+ ),
+ TP_fast_assign(
+ __entry->pid = current->tgid;
+ __assign_str(counter_name, name);
+ __entry->value = value;
+ ),
+ TP_printk("%d|%s|%d", __entry->pid,
+ __get_str(counter_name), __entry->value)
+)
+
+TRACE_EVENT(sde_evtlog,
+ TP_PROTO(const char *tag, u32 tag_id, u64 value1, u64 value2),
+ TP_ARGS(tag, tag_id, value1, value2),
+ TP_STRUCT__entry(
+ __field(int, pid)
+ __string(evtlog_tag, tag)
+ __field(u32, tag_id)
+ __field(u64, value1)
+ __field(u64, value2)
+ ),
+ TP_fast_assign(
+ __entry->pid = current->tgid;
+ __assign_str(evtlog_tag, tag);
+ __entry->tag_id = tag_id;
+ __entry->value1 = value1;
+ __entry->value2 = value2;
+ ),
+ TP_printk("%d|%s:%d|%llu|%llu", __entry->pid, __get_str(evtlog_tag),
+ __entry->tag_id, __entry->value1, __entry->value2)
+)
+
+#define SDE_ATRACE_END(name) trace_sde_mark_write(current->tgid, name, 0)
+#define SDE_ATRACE_BEGIN(name) trace_sde_mark_write(current->tgid, name, 1)
+#define SDE_ATRACE_FUNC() SDE_ATRACE_BEGIN(__func__)
+
+#define SDE_ATRACE_INT(name, value) \
+ trace_sde_trace_counter(current->tgid, name, value)
+
+#endif /* _SDE_TRACE_H_ */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/msm/sde/sde_vbif.c b/drivers/gpu/drm/msm/sde/sde_vbif.c
new file mode 100644
index 000000000000..b114840d741c
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_vbif.c
@@ -0,0 +1,284 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/debugfs.h>
+
+#include "sde_vbif.h"
+#include "sde_hw_vbif.h"
+#include "sde_trace.h"
+
+/**
+ * _sde_vbif_wait_for_xin_halt - wait for the xin to halt
+ * @vbif: Pointer to hardware vbif driver
+ * @xin_id: Client interface identifier
+ * @return: 0 if success; error code otherwise
+ */
+static int _sde_vbif_wait_for_xin_halt(struct sde_hw_vbif *vbif, u32 xin_id)
+{
+ ktime_t timeout;
+ bool status;
+ int rc;
+
+ if (!vbif || !vbif->cap || !vbif->ops.get_halt_ctrl) {
+ SDE_ERROR("invalid arguments vbif %d\n", vbif != 0);
+ return -EINVAL;
+ }
+
+ timeout = ktime_add_us(ktime_get(), vbif->cap->xin_halt_timeout);
+ for (;;) {
+ status = vbif->ops.get_halt_ctrl(vbif, xin_id);
+ if (status)
+ break;
+ if (ktime_compare_safe(ktime_get(), timeout) > 0) {
+ status = vbif->ops.get_halt_ctrl(vbif, xin_id);
+ break;
+ }
+ usleep_range(501, 1000);
+ }
+
+ if (!status) {
+ rc = -ETIMEDOUT;
+ SDE_ERROR("VBIF %d client %d not halting. TIMEDOUT.\n",
+ vbif->idx - VBIF_0, xin_id);
+ } else {
+ rc = 0;
+ SDE_DEBUG("VBIF %d client %d is halted\n",
+ vbif->idx - VBIF_0, xin_id);
+ }
+
+ return rc;
+}
+
+/**
+ * _sde_vbif_apply_dynamic_ot_limit - determine OT based on usecase parameters
+ * @vbif: Pointer to hardware vbif driver
+ * @ot_lim: Pointer to OT limit to be modified
+ * @params: Pointer to usecase parameters
+ */
+static void _sde_vbif_apply_dynamic_ot_limit(struct sde_hw_vbif *vbif,
+ u32 *ot_lim, struct sde_vbif_set_ot_params *params)
+{
+ u64 pps;
+ const struct sde_vbif_dynamic_ot_tbl *tbl;
+ u32 i;
+
+ if (!vbif || !(vbif->cap->features & BIT(SDE_VBIF_QOS_OTLIM)))
+ return;
+
+ /* Dynamic OT setting done only for WFD */
+ if (!params->is_wfd)
+ return;
+
+ pps = params->frame_rate;
+ pps *= params->width;
+ pps *= params->height;
+
+ tbl = params->rd ? &vbif->cap->dynamic_ot_rd_tbl :
+ &vbif->cap->dynamic_ot_wr_tbl;
+
+ for (i = 0; i < tbl->count; i++) {
+ if (pps <= tbl->cfg[i].pps) {
+ *ot_lim = tbl->cfg[i].ot_limit;
+ break;
+ }
+ }
+
+ SDE_DEBUG("vbif:%d xin:%d w:%d h:%d fps:%d pps:%llu ot:%u\n",
+ vbif->idx - VBIF_0, params->xin_id,
+ params->width, params->height, params->frame_rate,
+ pps, *ot_lim);
+}
+
+/**
+ * _sde_vbif_get_ot_limit - get OT based on usecase & configuration parameters
+ * @vbif: Pointer to hardware vbif driver
+ * @params: Pointer to usecase parameters
+ * @return: OT limit
+ */
+static u32 _sde_vbif_get_ot_limit(struct sde_hw_vbif *vbif,
+ struct sde_vbif_set_ot_params *params)
+{
+ u32 ot_lim = 0;
+ u32 val;
+
+ if (!vbif || !vbif->cap) {
+ SDE_ERROR("invalid arguments vbif %d\n", vbif != 0);
+ return -EINVAL;
+ }
+
+ if (vbif->cap->default_ot_wr_limit && !params->rd)
+ ot_lim = vbif->cap->default_ot_wr_limit;
+ else if (vbif->cap->default_ot_rd_limit && params->rd)
+ ot_lim = vbif->cap->default_ot_rd_limit;
+
+ /*
+ * If default ot is not set from dt/catalog,
+ * then do not configure it.
+ */
+ if (ot_lim == 0)
+ goto exit;
+
+ /* Modify the limits if the target and the use case requires it */
+ _sde_vbif_apply_dynamic_ot_limit(vbif, &ot_lim, params);
+
+ if (vbif && vbif->ops.get_limit_conf) {
+ val = vbif->ops.get_limit_conf(vbif,
+ params->xin_id, params->rd);
+ if (val == ot_lim)
+ ot_lim = 0;
+ }
+
+exit:
+ SDE_DEBUG("vbif:%d xin:%d ot_lim:%d\n",
+ vbif->idx - VBIF_0, params->xin_id, ot_lim);
+ return ot_lim;
+}
+
+/**
+ * sde_vbif_set_ot_limit - set OT based on usecase & configuration parameters
+ * @vbif: Pointer to hardware vbif driver
+ * @params: Pointer to usecase parameters
+ *
+ * Note this function would block waiting for bus halt.
+ */
+void sde_vbif_set_ot_limit(struct sde_kms *sde_kms,
+ struct sde_vbif_set_ot_params *params)
+{
+ struct sde_hw_vbif *vbif = NULL;
+ struct sde_hw_mdp *mdp;
+ bool forced_on = false;
+ u32 ot_lim;
+ int ret, i;
+
+ if (!sde_kms) {
+ SDE_ERROR("invalid arguments\n");
+ return;
+ }
+ mdp = sde_kms->hw_mdp;
+
+ for (i = 0; i < ARRAY_SIZE(sde_kms->hw_vbif); i++) {
+ if (sde_kms->hw_vbif[i] &&
+ sde_kms->hw_vbif[i]->idx == params->vbif_idx)
+ vbif = sde_kms->hw_vbif[i];
+ }
+
+ if (!vbif || !mdp) {
+ SDE_DEBUG("invalid arguments vbif %d mdp %d\n",
+ vbif != 0, mdp != 0);
+ return;
+ }
+
+ if (!mdp->ops.setup_clk_force_ctrl ||
+ !vbif->ops.set_limit_conf ||
+ !vbif->ops.set_halt_ctrl)
+ return;
+
+ ot_lim = _sde_vbif_get_ot_limit(vbif, params) & 0xFF;
+
+ if (ot_lim == 0)
+ goto exit;
+
+ trace_sde_perf_set_ot(params->num, params->xin_id, ot_lim,
+ params->vbif_idx);
+
+ forced_on = mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, true);
+
+ vbif->ops.set_limit_conf(vbif, params->xin_id, params->rd, ot_lim);
+
+ vbif->ops.set_halt_ctrl(vbif, params->xin_id, true);
+
+ ret = _sde_vbif_wait_for_xin_halt(vbif, params->xin_id);
+ if (ret)
+ SDE_EVT32(vbif->idx, params->xin_id);
+
+ vbif->ops.set_halt_ctrl(vbif, params->xin_id, false);
+
+ if (forced_on)
+ mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, false);
+exit:
+ return;
+}
+
+#ifdef CONFIG_DEBUG_FS
+void sde_debugfs_vbif_destroy(struct sde_kms *sde_kms)
+{
+ debugfs_remove_recursive(sde_kms->debugfs_vbif);
+ sde_kms->debugfs_vbif = NULL;
+}
+
+int sde_debugfs_vbif_init(struct sde_kms *sde_kms, struct dentry *debugfs_root)
+{
+ char vbif_name[32];
+ struct dentry *debugfs_vbif;
+ int i, j;
+
+ sde_kms->debugfs_vbif = debugfs_create_dir("vbif",
+ sde_kms->debugfs_root);
+ if (!sde_kms->debugfs_vbif) {
+ SDE_ERROR("failed to create vbif debugfs\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < sde_kms->catalog->vbif_count; i++) {
+ struct sde_vbif_cfg *vbif = &sde_kms->catalog->vbif[i];
+
+ snprintf(vbif_name, sizeof(vbif_name), "%d", vbif->id);
+
+ debugfs_vbif = debugfs_create_dir(vbif_name,
+ sde_kms->debugfs_vbif);
+
+ debugfs_create_u32("features", 0644, debugfs_vbif,
+ (u32 *)&vbif->features);
+
+ debugfs_create_u32("xin_halt_timeout", S_IRUGO, debugfs_vbif,
+ (u32 *)&vbif->xin_halt_timeout);
+
+ debugfs_create_u32("default_rd_ot_limit", S_IRUGO, debugfs_vbif,
+ (u32 *)&vbif->default_ot_rd_limit);
+
+ debugfs_create_u32("default_wr_ot_limit", S_IRUGO, debugfs_vbif,
+ (u32 *)&vbif->default_ot_wr_limit);
+
+ for (j = 0; j < vbif->dynamic_ot_rd_tbl.count; j++) {
+ struct sde_vbif_dynamic_ot_cfg *cfg =
+ &vbif->dynamic_ot_rd_tbl.cfg[j];
+
+ snprintf(vbif_name, sizeof(vbif_name),
+ "dynamic_ot_rd_%d_pps", j);
+ debugfs_create_u64(vbif_name, S_IRUGO, debugfs_vbif,
+ (u64 *)&cfg->pps);
+ snprintf(vbif_name, sizeof(vbif_name),
+ "dynamic_ot_rd_%d_ot_limit", j);
+ debugfs_create_u32(vbif_name, S_IRUGO, debugfs_vbif,
+ (u32 *)&cfg->ot_limit);
+ }
+
+ for (j = 0; j < vbif->dynamic_ot_wr_tbl.count; j++) {
+ struct sde_vbif_dynamic_ot_cfg *cfg =
+ &vbif->dynamic_ot_wr_tbl.cfg[j];
+
+ snprintf(vbif_name, sizeof(vbif_name),
+ "dynamic_ot_wr_%d_pps", j);
+ debugfs_create_u64(vbif_name, S_IRUGO, debugfs_vbif,
+ (u64 *)&cfg->pps);
+ snprintf(vbif_name, sizeof(vbif_name),
+ "dynamic_ot_wr_%d_ot_limit", j);
+ debugfs_create_u32(vbif_name, S_IRUGO, debugfs_vbif,
+ (u32 *)&cfg->ot_limit);
+ }
+ }
+
+ return 0;
+}
+#endif
diff --git a/drivers/gpu/drm/msm/sde/sde_vbif.h b/drivers/gpu/drm/msm/sde/sde_vbif.h
new file mode 100644
index 000000000000..33f16a867a60
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_vbif.h
@@ -0,0 +1,51 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __SDE_VBIF_H__
+#define __SDE_VBIF_H__
+
+#include "sde_kms.h"
+
+struct sde_vbif_set_ot_params {
+ u32 xin_id;
+ u32 num;
+ u32 width;
+ u32 height;
+ u32 frame_rate;
+ bool rd;
+ bool is_wfd;
+ u32 vbif_idx;
+ u32 clk_ctrl;
+};
+
+/**
+ * sde_vbif_set_ot_limit - set OT limit for vbif client
+ * @sde_kms: SDE handler
+ * @params: Pointer to OT configuration parameters
+ */
+void sde_vbif_set_ot_limit(struct sde_kms *sde_kms,
+ struct sde_vbif_set_ot_params *params);
+
+#ifdef CONFIG_DEBUG_FS
+int sde_debugfs_vbif_init(struct sde_kms *sde_kms, struct dentry *debugfs_root);
+void sde_debugfs_vbif_destroy(struct sde_kms *sde_kms);
+#else
+static inline int sde_debugfs_vbif_init(struct sde_kms *sde_kms,
+ struct dentry *debugfs_root)
+{
+ return 0;
+}
+static inline void sde_debugfs_vbif_destroy(struct sde_kms *sde_kms)
+{
+}
+#endif
+#endif /* __SDE_VBIF_H__ */
diff --git a/drivers/gpu/drm/msm/sde/sde_wb.c b/drivers/gpu/drm/msm/sde/sde_wb.c
new file mode 100644
index 000000000000..647cb5891153
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_wb.c
@@ -0,0 +1,745 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include "msm_kms.h"
+#include "sde_kms.h"
+#include "sde_wb.h"
+#include "sde_formats.h"
+
+/* maximum display mode resolution if not available from catalog */
+#define SDE_WB_MODE_MAX_WIDTH 4096
+#define SDE_WB_MODE_MAX_HEIGHT 4096
+
+/* Serialization lock for sde_wb_list */
+static DEFINE_MUTEX(sde_wb_list_lock);
+
+/* List of all writeback devices installed */
+static LIST_HEAD(sde_wb_list);
+
+/**
+ * sde_wb_is_format_valid - check if given format/modifier is supported
+ * @wb_dev: Pointer to writeback device
+ * @pixel_format: Fourcc pixel format
+ * @format_modifier: Format modifier
+ * Returns: true if valid; false otherwise
+ */
+static int sde_wb_is_format_valid(struct sde_wb_device *wb_dev,
+ u32 pixel_format, u64 format_modifier)
+{
+ const struct sde_format_extended *fmts = wb_dev->wb_cfg->format_list;
+ int i;
+
+ if (!fmts)
+ return false;
+
+ for (i = 0; fmts[i].fourcc_format; i++)
+ if ((fmts[i].modifier == format_modifier) &&
+ (fmts[i].fourcc_format == pixel_format))
+ return true;
+
+ return false;
+}
+
+enum drm_connector_status
+sde_wb_connector_detect(struct drm_connector *connector,
+ bool force,
+ void *display)
+{
+ enum drm_connector_status rc = connector_status_unknown;
+
+ SDE_DEBUG("\n");
+
+ if (display)
+ rc = ((struct sde_wb_device *)display)->detect_status;
+
+ return rc;
+}
+
+int sde_wb_connector_get_modes(struct drm_connector *connector, void *display)
+{
+ struct sde_wb_device *wb_dev;
+ int num_modes = 0;
+
+ if (!connector || !display)
+ return 0;
+
+ wb_dev = display;
+
+ SDE_DEBUG("\n");
+
+ mutex_lock(&wb_dev->wb_lock);
+ if (wb_dev->count_modes && wb_dev->modes) {
+ struct drm_display_mode *mode;
+ int i, ret;
+
+ for (i = 0; i < wb_dev->count_modes; i++) {
+ mode = drm_mode_create(connector->dev);
+ if (!mode) {
+ SDE_ERROR("failed to create mode\n");
+ break;
+ }
+ ret = drm_mode_convert_umode(mode,
+ &wb_dev->modes[i]);
+ if (ret) {
+ SDE_ERROR("failed to convert mode %d\n", ret);
+ break;
+ }
+
+ drm_mode_probed_add(connector, mode);
+ num_modes++;
+ }
+ } else {
+ u32 max_width = (wb_dev->wb_cfg && wb_dev->wb_cfg->sblk) ?
+ wb_dev->wb_cfg->sblk->maxlinewidth :
+ SDE_WB_MODE_MAX_WIDTH;
+
+ num_modes = drm_add_modes_noedid(connector, max_width,
+ SDE_WB_MODE_MAX_HEIGHT);
+ }
+ mutex_unlock(&wb_dev->wb_lock);
+ return num_modes;
+}
+
+struct drm_framebuffer *
+sde_wb_connector_state_get_output_fb(struct drm_connector_state *state)
+{
+ if (!state || !state->connector ||
+ (state->connector->connector_type !=
+ DRM_MODE_CONNECTOR_VIRTUAL)) {
+ SDE_ERROR("invalid params\n");
+ return NULL;
+ }
+
+ SDE_DEBUG("\n");
+
+ return sde_connector_get_out_fb(state);
+}
+
+int sde_wb_connector_state_get_output_roi(struct drm_connector_state *state,
+ struct sde_rect *roi)
+{
+ if (!state || !roi || !state->connector ||
+ (state->connector->connector_type !=
+ DRM_MODE_CONNECTOR_VIRTUAL)) {
+ SDE_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ SDE_DEBUG("\n");
+
+ roi->x = sde_connector_get_property(state, CONNECTOR_PROP_DST_X);
+ roi->y = sde_connector_get_property(state, CONNECTOR_PROP_DST_Y);
+ roi->w = sde_connector_get_property(state, CONNECTOR_PROP_DST_W);
+ roi->h = sde_connector_get_property(state, CONNECTOR_PROP_DST_H);
+
+ return 0;
+}
+
+/**
+ * sde_wb_connector_set_modes - set writeback modes and connection status
+ * @wb_dev: Pointer to write back device
+ * @count_modes: Count of modes
+ * @modes: Pointer to writeback mode requested
+ * @connected: Connection status requested
+ * Returns: 0 if success; error code otherwise
+ */
+static
+int sde_wb_connector_set_modes(struct sde_wb_device *wb_dev,
+ u32 count_modes, struct drm_mode_modeinfo __user *modes,
+ bool connected)
+{
+ int ret = 0;
+
+ if (!wb_dev || !wb_dev->connector ||
+ (wb_dev->connector->connector_type !=
+ DRM_MODE_CONNECTOR_VIRTUAL)) {
+ SDE_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ SDE_DEBUG("\n");
+
+ if (connected) {
+ SDE_DEBUG("connect\n");
+
+ if (wb_dev->modes) {
+ wb_dev->count_modes = 0;
+
+ kfree(wb_dev->modes);
+ wb_dev->modes = NULL;
+ }
+
+ if (count_modes && modes) {
+ wb_dev->modes = kcalloc(count_modes,
+ sizeof(struct drm_mode_modeinfo),
+ GFP_KERNEL);
+ if (!wb_dev->modes) {
+ SDE_ERROR("invalid params\n");
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ if (copy_from_user(wb_dev->modes, modes,
+ count_modes *
+ sizeof(struct drm_mode_modeinfo))) {
+ SDE_ERROR("failed to copy modes\n");
+ kfree(wb_dev->modes);
+ wb_dev->modes = NULL;
+ ret = -EFAULT;
+ goto error;
+ }
+
+ wb_dev->count_modes = count_modes;
+ }
+
+ wb_dev->detect_status = connector_status_connected;
+ } else {
+ SDE_DEBUG("disconnect\n");
+
+ if (wb_dev->modes) {
+ wb_dev->count_modes = 0;
+
+ kfree(wb_dev->modes);
+ wb_dev->modes = NULL;
+ }
+
+ wb_dev->detect_status = connector_status_disconnected;
+ }
+
+error:
+ return ret;
+}
+
+int sde_wb_connector_set_property(struct drm_connector *connector,
+ struct drm_connector_state *state,
+ int property_index,
+ uint64_t value,
+ void *display)
+{
+ struct sde_wb_device *wb_dev = display;
+ struct drm_framebuffer *out_fb;
+ int rc = 0;
+
+ SDE_DEBUG("\n");
+
+ if (state && (property_index == CONNECTOR_PROP_OUT_FB)) {
+ const struct sde_format *sde_format;
+
+ out_fb = sde_connector_get_out_fb(state);
+ if (!out_fb)
+ goto done;
+
+ sde_format = sde_get_sde_format_ext(out_fb->pixel_format,
+ out_fb->modifier,
+ drm_format_num_planes(out_fb->pixel_format));
+ if (!sde_format) {
+ SDE_ERROR("failed to get sde format\n");
+ rc = -EINVAL;
+ goto done;
+ }
+
+ if (!sde_wb_is_format_valid(wb_dev, out_fb->pixel_format,
+ out_fb->modifier[0])) {
+ SDE_ERROR("unsupported writeback format 0x%x/0x%llx\n",
+ out_fb->pixel_format,
+ out_fb->modifier[0]);
+ rc = -EINVAL;
+ goto done;
+ }
+ }
+
+done:
+ return rc;
+}
+
+int sde_wb_get_info(struct msm_display_info *info, void *display)
+{
+ struct sde_wb_device *wb_dev = display;
+
+ if (!info || !wb_dev) {
+ pr_err("invalid params\n");
+ return -EINVAL;
+ }
+
+ info->intf_type = DRM_MODE_CONNECTOR_VIRTUAL;
+ info->num_of_h_tiles = 1;
+ info->h_tile_instance[0] = sde_wb_get_index(display);
+ info->is_connected = true;
+ info->capabilities = MSM_DISPLAY_CAP_HOT_PLUG | MSM_DISPLAY_CAP_EDID;
+ info->max_width = (wb_dev->wb_cfg && wb_dev->wb_cfg->sblk) ?
+ wb_dev->wb_cfg->sblk->maxlinewidth :
+ SDE_WB_MODE_MAX_WIDTH;
+ info->max_height = SDE_WB_MODE_MAX_HEIGHT;
+ info->compression = MSM_DISPLAY_COMPRESS_NONE;
+ return 0;
+}
+
+int sde_wb_connector_post_init(struct drm_connector *connector,
+ void *info,
+ void *display)
+{
+ struct sde_connector *c_conn;
+ struct sde_wb_device *wb_dev = display;
+ const struct sde_format_extended *format_list;
+
+ if (!connector || !info || !display || !wb_dev->wb_cfg) {
+ SDE_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ c_conn = to_sde_connector(connector);
+ wb_dev->connector = connector;
+ wb_dev->detect_status = connector_status_connected;
+ format_list = wb_dev->wb_cfg->format_list;
+
+ /*
+ * Add extra connector properties
+ */
+ msm_property_install_range(&c_conn->property_info, "FB_ID",
+ 0x0, 0, ~0, ~0, CONNECTOR_PROP_OUT_FB);
+ msm_property_install_range(&c_conn->property_info, "DST_X",
+ 0x0, 0, UINT_MAX, 0, CONNECTOR_PROP_DST_X);
+ msm_property_install_range(&c_conn->property_info, "DST_Y",
+ 0x0, 0, UINT_MAX, 0, CONNECTOR_PROP_DST_Y);
+ msm_property_install_range(&c_conn->property_info, "DST_W",
+ 0x0, 0, UINT_MAX, 0, CONNECTOR_PROP_DST_W);
+ msm_property_install_range(&c_conn->property_info, "DST_H",
+ 0x0, 0, UINT_MAX, 0, CONNECTOR_PROP_DST_H);
+
+ /*
+ * Populate info buffer
+ */
+ if (format_list) {
+ sde_kms_info_start(info, "pixel_formats");
+ while (format_list->fourcc_format) {
+ sde_kms_info_append_format(info,
+ format_list->fourcc_format,
+ format_list->modifier);
+ ++format_list;
+ }
+ sde_kms_info_stop(info);
+ }
+
+ sde_kms_info_add_keyint(info,
+ "wb_intf_index",
+ wb_dev->wb_idx - WB_0);
+
+ sde_kms_info_add_keyint(info,
+ "maxlinewidth",
+ wb_dev->wb_cfg->sblk->maxlinewidth);
+
+ sde_kms_info_start(info, "features");
+ if (wb_dev->wb_cfg && (wb_dev->wb_cfg->features & SDE_WB_UBWC_1_0))
+ sde_kms_info_append(info, "wb_ubwc");
+ sde_kms_info_stop(info);
+
+ return 0;
+}
+
+struct drm_framebuffer *sde_wb_get_output_fb(struct sde_wb_device *wb_dev)
+{
+ struct drm_framebuffer *fb;
+
+ if (!wb_dev || !wb_dev->connector) {
+ SDE_ERROR("invalid params\n");
+ return NULL;
+ }
+
+ SDE_DEBUG("\n");
+
+ mutex_lock(&wb_dev->wb_lock);
+ fb = sde_wb_connector_state_get_output_fb(wb_dev->connector->state);
+ mutex_unlock(&wb_dev->wb_lock);
+
+ return fb;
+}
+
+int sde_wb_get_output_roi(struct sde_wb_device *wb_dev, struct sde_rect *roi)
+{
+ int rc;
+
+ if (!wb_dev || !wb_dev->connector || !roi) {
+ SDE_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ SDE_DEBUG("\n");
+
+ mutex_lock(&wb_dev->wb_lock);
+ rc = sde_wb_connector_state_get_output_roi(
+ wb_dev->connector->state, roi);
+ mutex_unlock(&wb_dev->wb_lock);
+
+ return rc;
+}
+
+u32 sde_wb_get_num_of_displays(void)
+{
+ u32 count = 0;
+ struct sde_wb_device *wb_dev;
+
+ SDE_DEBUG("\n");
+
+ mutex_lock(&sde_wb_list_lock);
+ list_for_each_entry(wb_dev, &sde_wb_list, wb_list) {
+ count++;
+ }
+ mutex_unlock(&sde_wb_list_lock);
+
+ return count;
+}
+
+int wb_display_get_displays(void **display_array, u32 max_display_count)
+{
+ struct sde_wb_device *curr;
+ int i = 0;
+
+ SDE_DEBUG("\n");
+
+ if (!display_array || !max_display_count) {
+ if (!display_array)
+ SDE_ERROR("invalid param\n");
+ return 0;
+ }
+
+ mutex_lock(&sde_wb_list_lock);
+ list_for_each_entry(curr, &sde_wb_list, wb_list) {
+ if (i >= max_display_count)
+ break;
+ display_array[i++] = curr;
+ }
+ mutex_unlock(&sde_wb_list_lock);
+
+ return i;
+}
+
+int sde_wb_config(struct drm_device *drm_dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct sde_drm_wb_cfg *config = data;
+ struct msm_drm_private *priv;
+ struct sde_wb_device *wb_dev = NULL;
+ struct sde_wb_device *curr;
+ struct drm_connector *connector;
+ uint32_t flags;
+ uint32_t connector_id;
+ uint32_t count_modes;
+ uint64_t modes;
+ int rc;
+
+ if (!drm_dev || !data) {
+ SDE_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ SDE_DEBUG("\n");
+
+ flags = config->flags;
+ connector_id = config->connector_id;
+ count_modes = config->count_modes;
+ modes = config->modes;
+
+ priv = drm_dev->dev_private;
+
+ connector = drm_connector_find(drm_dev, connector_id);
+ if (!connector) {
+ SDE_ERROR("failed to find connector\n");
+ rc = -ENOENT;
+ goto fail;
+ }
+
+ mutex_lock(&sde_wb_list_lock);
+ list_for_each_entry(curr, &sde_wb_list, wb_list) {
+ if (curr->connector == connector) {
+ wb_dev = curr;
+ break;
+ }
+ }
+ mutex_unlock(&sde_wb_list_lock);
+
+ if (!wb_dev) {
+ SDE_ERROR("failed to find wb device\n");
+ rc = -ENOENT;
+ goto fail;
+ }
+
+ mutex_lock(&wb_dev->wb_lock);
+
+ rc = sde_wb_connector_set_modes(wb_dev, count_modes,
+ (struct drm_mode_modeinfo __user *) (uintptr_t) modes,
+ (flags & SDE_DRM_WB_CFG_FLAGS_CONNECTED) ? true : false);
+
+ mutex_unlock(&wb_dev->wb_lock);
+ drm_helper_hpd_irq_event(drm_dev);
+fail:
+ return rc;
+}
+
+/**
+ * _sde_wb_dev_init - perform device initialization
+ * @wb_dev: Pointer to writeback device
+ */
+static int _sde_wb_dev_init(struct sde_wb_device *wb_dev)
+{
+ int rc = 0;
+
+ if (!wb_dev) {
+ SDE_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ SDE_DEBUG("\n");
+
+ return rc;
+}
+
+/**
+ * _sde_wb_dev_deinit - perform device de-initialization
+ * @wb_dev: Pointer to writeback device
+ */
+static int _sde_wb_dev_deinit(struct sde_wb_device *wb_dev)
+{
+ int rc = 0;
+
+ if (!wb_dev) {
+ SDE_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ SDE_DEBUG("\n");
+
+ return rc;
+}
+
+/**
+ * sde_wb_bind - bind writeback device with controlling device
+ * @dev: Pointer to base of platform device
+ * @master: Pointer to container of drm device
+ * @data: Pointer to private data
+ * Returns: Zero on success
+ */
+static int sde_wb_bind(struct device *dev, struct device *master, void *data)
+{
+ struct sde_wb_device *wb_dev;
+
+ if (!dev || !master) {
+ SDE_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ wb_dev = platform_get_drvdata(to_platform_device(dev));
+ if (!wb_dev) {
+ SDE_ERROR("invalid wb device\n");
+ return -EINVAL;
+ }
+
+ SDE_DEBUG("\n");
+
+ mutex_lock(&wb_dev->wb_lock);
+ wb_dev->drm_dev = dev_get_drvdata(master);
+ mutex_unlock(&wb_dev->wb_lock);
+
+ return 0;
+}
+
+/**
+ * sde_wb_unbind - unbind writeback from controlling device
+ * @dev: Pointer to base of platform device
+ * @master: Pointer to container of drm device
+ * @data: Pointer to private data
+ */
+static void sde_wb_unbind(struct device *dev,
+ struct device *master, void *data)
+{
+ struct sde_wb_device *wb_dev;
+
+ if (!dev) {
+ SDE_ERROR("invalid params\n");
+ return;
+ }
+
+ wb_dev = platform_get_drvdata(to_platform_device(dev));
+ if (!wb_dev) {
+ SDE_ERROR("invalid wb device\n");
+ return;
+ }
+
+ SDE_DEBUG("\n");
+
+ mutex_lock(&wb_dev->wb_lock);
+ wb_dev->drm_dev = NULL;
+ mutex_unlock(&wb_dev->wb_lock);
+}
+
+static const struct component_ops sde_wb_comp_ops = {
+ .bind = sde_wb_bind,
+ .unbind = sde_wb_unbind,
+};
+
+/**
+ * sde_wb_drm_init - perform DRM initialization
+ * @wb_dev: Pointer to writeback device
+ * @encoder: Pointer to associated encoder
+ */
+int sde_wb_drm_init(struct sde_wb_device *wb_dev, struct drm_encoder *encoder)
+{
+ int rc = 0;
+
+ if (!wb_dev || !wb_dev->drm_dev || !encoder) {
+ SDE_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ SDE_DEBUG("\n");
+
+ mutex_lock(&wb_dev->wb_lock);
+
+ if (wb_dev->drm_dev->dev_private) {
+ struct msm_drm_private *priv = wb_dev->drm_dev->dev_private;
+ struct sde_kms *sde_kms = to_sde_kms(priv->kms);
+
+ if (wb_dev->index < sde_kms->catalog->wb_count) {
+ wb_dev->wb_idx = sde_kms->catalog->wb[wb_dev->index].id;
+ wb_dev->wb_cfg = &sde_kms->catalog->wb[wb_dev->index];
+ }
+ }
+
+ wb_dev->drm_dev = encoder->dev;
+ wb_dev->encoder = encoder;
+ mutex_unlock(&wb_dev->wb_lock);
+ return rc;
+}
+
+int sde_wb_drm_deinit(struct sde_wb_device *wb_dev)
+{
+ int rc = 0;
+
+ if (!wb_dev) {
+ SDE_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ SDE_DEBUG("\n");
+
+ return rc;
+}
+
+/**
+ * sde_wb_probe - load writeback module
+ * @pdev: Pointer to platform device
+ */
+static int sde_wb_probe(struct platform_device *pdev)
+{
+ struct sde_wb_device *wb_dev;
+ int ret;
+
+ wb_dev = devm_kzalloc(&pdev->dev, sizeof(*wb_dev), GFP_KERNEL);
+ if (!wb_dev)
+ return -ENOMEM;
+
+ SDE_DEBUG("\n");
+
+ ret = of_property_read_u32(pdev->dev.of_node, "cell-index",
+ &wb_dev->index);
+ if (ret) {
+ SDE_DEBUG("cell index not set, default to 0\n");
+ wb_dev->index = 0;
+ }
+
+ wb_dev->name = of_get_property(pdev->dev.of_node, "label", NULL);
+ if (!wb_dev->name) {
+ SDE_DEBUG("label not set, default to unknown\n");
+ wb_dev->name = "unknown";
+ }
+
+ wb_dev->wb_idx = SDE_NONE;
+
+ mutex_init(&wb_dev->wb_lock);
+ platform_set_drvdata(pdev, wb_dev);
+
+ mutex_lock(&sde_wb_list_lock);
+ list_add(&wb_dev->wb_list, &sde_wb_list);
+ mutex_unlock(&sde_wb_list_lock);
+
+ if (!_sde_wb_dev_init(wb_dev)) {
+ ret = component_add(&pdev->dev, &sde_wb_comp_ops);
+ if (ret)
+ pr_err("component add failed\n");
+ }
+
+ return ret;
+}
+
+/**
+ * sde_wb_remove - unload writeback module
+ * @pdev: Pointer to platform device
+ */
+static int sde_wb_remove(struct platform_device *pdev)
+{
+ struct sde_wb_device *wb_dev;
+ struct sde_wb_device *curr, *next;
+
+ wb_dev = platform_get_drvdata(pdev);
+ if (!wb_dev)
+ return 0;
+
+ SDE_DEBUG("\n");
+
+ (void)_sde_wb_dev_deinit(wb_dev);
+
+ mutex_lock(&sde_wb_list_lock);
+ list_for_each_entry_safe(curr, next, &sde_wb_list, wb_list) {
+ if (curr == wb_dev) {
+ list_del(&wb_dev->wb_list);
+ break;
+ }
+ }
+ mutex_unlock(&sde_wb_list_lock);
+
+ kfree(wb_dev->modes);
+ mutex_destroy(&wb_dev->wb_lock);
+
+ platform_set_drvdata(pdev, NULL);
+ devm_kfree(&pdev->dev, wb_dev);
+
+ return 0;
+}
+
+static const struct of_device_id dt_match[] = {
+ { .compatible = "qcom,wb-display"},
+ {}
+};
+
+static struct platform_driver sde_wb_driver = {
+ .probe = sde_wb_probe,
+ .remove = sde_wb_remove,
+ .driver = {
+ .name = "sde_wb",
+ .of_match_table = dt_match,
+ },
+};
+
+static int __init sde_wb_register(void)
+{
+ return platform_driver_register(&sde_wb_driver);
+}
+
+static void __exit sde_wb_unregister(void)
+{
+ platform_driver_unregister(&sde_wb_driver);
+}
+
+module_init(sde_wb_register);
+module_exit(sde_wb_unregister);
diff --git a/drivers/gpu/drm/msm/sde/sde_wb.h b/drivers/gpu/drm/msm/sde/sde_wb.h
new file mode 100644
index 000000000000..4e335956db55
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_wb.h
@@ -0,0 +1,321 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __SDE_WB_H__
+#define __SDE_WB_H__
+
+#include <linux/platform_device.h>
+
+#include "msm_kms.h"
+#include "sde_kms.h"
+#include "sde_connector.h"
+
+/**
+ * struct sde_wb_device - Writeback device context
+ * @drm_dev: Pointer to controlling DRM device
+ * @index: Index of hardware instance from device tree
+ * @wb_idx: Writeback identifier of enum sde_wb
+ * @wb_cfg: Writeback configuration catalog
+ * @name: Name of writeback device from device tree
+ * @display_type: Display type from device tree
+ * @wb_list List of all writeback devices
+ * @wb_lock Serialization lock for writeback context structure
+ * @connector: Connector associated with writeback device
+ * @encoder: Encoder associated with writeback device
+ * @count_modes: Length of writeback connector modes array
+ * @modes: Writeback connector modes array
+ */
+struct sde_wb_device {
+ struct drm_device *drm_dev;
+
+ u32 index;
+ u32 wb_idx;
+ struct sde_wb_cfg *wb_cfg;
+ const char *name;
+
+ struct list_head wb_list;
+ struct mutex wb_lock;
+
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+
+ enum drm_connector_status detect_status;
+ u32 count_modes;
+ struct drm_mode_modeinfo *modes;
+};
+
+/**
+ * sde_wb_get_index - get device index of the given writeback device
+ * @wb_dev: Pointer to writeback device
+ * Returns: Index of hardware instance
+ */
+static inline
+int sde_wb_get_index(struct sde_wb_device *wb_dev)
+{
+ return wb_dev ? wb_dev->index : -1;
+}
+
+#ifdef CONFIG_DRM_SDE_WB
+/**
+ * sde_wb_get_output_fb - get framebuffer in current atomic state
+ * @wb_dev: Pointer to writeback device
+ * Returns: Pointer to framebuffer
+ */
+struct drm_framebuffer *sde_wb_get_output_fb(struct sde_wb_device *wb_dev);
+
+/**
+ * sde_wb_get_output_roi - get region-of-interest in current atomic state
+ * @wb_dev: Pointer to writeback device
+ * @roi: Pointer to region of interest
+ * Returns: 0 if success; error code otherwise
+ */
+int sde_wb_get_output_roi(struct sde_wb_device *wb_dev, struct sde_rect *roi);
+
+/**
+ * sde_wb_get_num_of_displays - get total number of writeback devices
+ * Returns: Number of writeback devices
+ */
+u32 sde_wb_get_num_of_displays(void);
+
+/**
+ * wb_display_get_displays - returns pointers for supported display devices
+ * @display_array: Pointer to display array to be filled
+ * @max_display_count: Size of display_array
+ * @Returns: Number of display entries filled
+ */
+int wb_display_get_displays(void **display_array, u32 max_display_count);
+
+void sde_wb_set_active_state(struct sde_wb_device *wb_dev, bool is_active);
+bool sde_wb_is_active(struct sde_wb_device *wb_dev);
+
+/**
+ * sde_wb_drm_init - perform DRM initialization
+ * @wb_dev: Pointer to writeback device
+ * @encoder: Pointer to associated encoder
+ * Returns: 0 if success; error code otherwise
+ */
+int sde_wb_drm_init(struct sde_wb_device *wb_dev, struct drm_encoder *encoder);
+
+/**
+ * sde_wb_drm_deinit - perform DRM de-initialization
+ * @wb_dev: Pointer to writeback device
+ * Returns: 0 if success; error code otherwise
+ */
+int sde_wb_drm_deinit(struct sde_wb_device *wb_dev);
+
+/**
+ * sde_wb_config - setup connection status and available drm modes of the
+ * given writeback connector
+ * @drm_dev: Pointer to DRM device
+ * @data: Pointer to writeback configuration
+ * @file_priv: Pointer file private data
+ * Returns: 0 if success; error code otherwise
+ *
+ * This function will initiate hot-plug detection event.
+ */
+int sde_wb_config(struct drm_device *drm_dev, void *data,
+ struct drm_file *file_priv);
+
+/**
+ * sde_wb_connector_post_init - perform writeback specific initialization
+ * @connector: Pointer to drm connector structure
+ * @info: Pointer to connector info
+ * @display: Pointer to private display structure
+ * Returns: Zero on success
+ */
+int sde_wb_connector_post_init(struct drm_connector *connector,
+ void *info,
+ void *display);
+
+/**
+ * sde_wb_connector_detect - perform writeback connection status detection
+ * @connector: Pointer to connector
+ * @force: Indicate force detection
+ * @display: Pointer to writeback device
+ * Returns: connector status
+ */
+enum drm_connector_status
+sde_wb_connector_detect(struct drm_connector *connector,
+ bool force,
+ void *display);
+
+/**
+ * sde_wb_connector_get_modes - get display modes of connector
+ * @connector: Pointer to connector
+ * @display: Pointer to writeback device
+ * Returns: Number of modes
+ *
+ * If display modes are not specified in writeback configuration IOCTL, this
+ * function will install default EDID modes up to maximum resolution support.
+ */
+int sde_wb_connector_get_modes(struct drm_connector *connector, void *display);
+
+/**
+ * sde_wb_connector_set_property - set atomic connector property
+ * @connector: Pointer to drm connector structure
+ * @state: Pointer to drm connector state structure
+ * @property_index: DRM property index
+ * @value: Incoming property value
+ * @display: Pointer to private display structure
+ * Returns: Zero on success
+ */
+int sde_wb_connector_set_property(struct drm_connector *connector,
+ struct drm_connector_state *state,
+ int property_index,
+ uint64_t value,
+ void *display);
+
+/**
+ * sde_wb_get_info - retrieve writeback 'display' information
+ * @info: Pointer to display info structure
+ * @display: Pointer to private display structure
+ * Returns: Zero on success
+ */
+int sde_wb_get_info(struct msm_display_info *info, void *display);
+
+/**
+ * sde_wb_connector_get_wb - retrieve writeback device of the given connector
+ * @connector: Pointer to drm connector
+ * Returns: Pointer to writeback device on success; NULL otherwise
+ */
+static inline
+struct sde_wb_device *sde_wb_connector_get_wb(struct drm_connector *connector)
+{
+ if (!connector ||
+ (connector->connector_type != DRM_MODE_CONNECTOR_VIRTUAL)) {
+ SDE_ERROR("invalid params\n");
+ return NULL;
+ }
+
+ return sde_connector_get_display(connector);
+}
+
+/**
+ * sde_wb_connector_state_get_output_fb - get framebuffer of given state
+ * @state: Pointer to connector state
+ * Returns: Pointer to framebuffer
+ */
+struct drm_framebuffer *
+sde_wb_connector_state_get_output_fb(struct drm_connector_state *state);
+
+/**
+ * sde_wb_connector_state_get_output_roi - get roi from given atomic state
+ * @state: Pointer to atomic state
+ * @roi: Pointer to region of interest
+ * Returns: 0 if success; error code otherwise
+ */
+int sde_wb_connector_state_get_output_roi(struct drm_connector_state *state,
+ struct sde_rect *roi);
+
+#else
+static inline
+struct drm_framebuffer *sde_wb_get_output_fb(struct sde_wb_device *wb_dev)
+{
+ return NULL;
+}
+static inline
+int sde_wb_get_output_roi(struct sde_wb_device *wb_dev, struct sde_rect *roi)
+{
+ return 0;
+}
+static inline
+u32 sde_wb_get_num_of_displays(void)
+{
+ return 0;
+}
+static inline
+int wb_display_get_displays(void **display_array, u32 max_display_count)
+{
+ return 0;
+}
+static inline
+void sde_wb_set_active_state(struct sde_wb_device *wb_dev, bool is_active)
+{
+}
+static inline
+bool sde_wb_is_active(struct sde_wb_device *wb_dev)
+{
+ return false;
+}
+static inline
+int sde_wb_drm_init(struct sde_wb_device *wb_dev, struct drm_encoder *encoder)
+{
+ return 0;
+}
+static inline
+int sde_wb_drm_deinit(struct sde_wb_device *wb_dev)
+{
+ return 0;
+}
+static inline
+int sde_wb_config(struct drm_device *drm_dev, void *data,
+ struct drm_file *file_priv)
+{
+ return 0;
+}
+static inline
+int sde_wb_connector_post_init(struct drm_connector *connector,
+ void *info,
+ void *display)
+{
+ return 0;
+}
+static inline
+enum drm_connector_status
+sde_wb_connector_detect(struct drm_connector *connector,
+ bool force,
+ void *display)
+{
+ return connector_status_disconnected;
+}
+static inline
+int sde_wb_connector_get_modes(struct drm_connector *connector, void *display)
+{
+ return -EINVAL;
+}
+static inline
+int sde_wb_connector_set_property(struct drm_connector *connector,
+ struct drm_connector_state *state,
+ int property_index,
+ uint64_t value,
+ void *display)
+{
+ return 0;
+}
+static inline
+int sde_wb_get_info(struct msm_display_info *info, void *display)
+{
+ return 0;
+}
+static inline
+struct sde_wb_device *sde_wb_connector_get_wb(struct drm_connector *connector)
+{
+ return NULL;
+}
+
+static inline
+struct drm_framebuffer *
+sde_wb_connector_state_get_output_fb(struct drm_connector_state *state)
+{
+ return NULL;
+}
+
+static inline
+int sde_wb_connector_state_get_output_roi(struct drm_connector_state *state,
+ struct sde_rect *roi)
+{
+ return 0;
+}
+
+#endif
+#endif /* __SDE_WB_H__ */
+
diff --git a/drivers/gpu/drm/msm/sde_dbg.h b/drivers/gpu/drm/msm/sde_dbg.h
new file mode 100644
index 000000000000..271c41f05ce5
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde_dbg.h
@@ -0,0 +1,62 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef SDE_DBG_H_
+#define SDE_DBG_H_
+
+#include <stdarg.h>
+#include <linux/debugfs.h>
+#include <linux/list.h>
+
+#define SDE_EVTLOG_DATA_LIMITER (-1)
+#define SDE_EVTLOG_FUNC_ENTRY 0x1111
+#define SDE_EVTLOG_FUNC_EXIT 0x2222
+
+#define SDE_DBG_DUMP_DATA_LIMITER (NULL)
+
+enum sde_dbg_evtlog_flag {
+ SDE_EVTLOG_DEFAULT = BIT(0),
+ SDE_EVTLOG_IRQ = BIT(1),
+ SDE_EVTLOG_ALL = BIT(7)
+};
+
+/**
+ * SDE_EVT32 - Write an list of 32bit values as an event into the event log
+ * ... - variable arguments
+ */
+#define SDE_EVT32(...) sde_evtlog(__func__, __LINE__, SDE_EVTLOG_DEFAULT, \
+ ##__VA_ARGS__, SDE_EVTLOG_DATA_LIMITER)
+#define SDE_EVT32_IRQ(...) sde_evtlog(__func__, __LINE__, SDE_EVTLOG_IRQ, \
+ ##__VA_ARGS__, SDE_EVTLOG_DATA_LIMITER)
+
+#define SDE_DBG_DUMP(...) \
+ sde_dbg_dump(false, __func__, ##__VA_ARGS__, \
+ SDE_DBG_DUMP_DATA_LIMITER)
+
+#define SDE_DBG_DUMP_WQ(...) \
+ sde_dbg_dump(true, __func__, ##__VA_ARGS__, \
+ SDE_DBG_DUMP_DATA_LIMITER)
+
+#if defined(CONFIG_DEBUG_FS)
+
+int sde_evtlog_init(struct dentry *debugfs_root);
+void sde_evtlog_destroy(void);
+void sde_evtlog(const char *name, int line, int flag, ...);
+void sde_dbg_dump(bool queue, const char *name, ...);
+#else
+static inline int sde_evtlog_init(struct dentry *debugfs_root) { return 0; }
+static inline void sde_evtlog(const char *name, int line, flag, ...) {}
+static inline void sde_evtlog_destroy(void) { }
+static inline void sde_dbg_dump(bool queue, const char *name, ...) {}
+#endif
+
+#endif /* SDE_DBG_H_ */
diff --git a/drivers/gpu/drm/msm/sde_dbg_evtlog.c b/drivers/gpu/drm/msm/sde_dbg_evtlog.c
new file mode 100644
index 000000000000..72832776659d
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde_dbg_evtlog.c
@@ -0,0 +1,326 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "sde_evtlog:[%s] " fmt, __func__
+
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/ktime.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/dma-buf.h>
+
+#include "sde_dbg.h"
+#include "sde_trace.h"
+
+#ifdef CONFIG_DRM_SDE_EVTLOG_DEBUG
+#define SDE_EVTLOG_DEFAULT_ENABLE 1
+#else
+#define SDE_EVTLOG_DEFAULT_ENABLE 0
+#endif
+
+#define SDE_DBG_DEFAULT_PANIC 1
+
+/*
+ * evtlog will print this number of entries when it is called through
+ * sysfs node or panic. This prevents kernel log from evtlog message
+ * flood.
+ */
+#define SDE_EVTLOG_PRINT_ENTRY 256
+
+/*
+ * evtlog keeps this number of entries in memory for debug purpose. This
+ * number must be greater than print entry to prevent out of bound evtlog
+ * entry array access.
+ */
+#define SDE_EVTLOG_ENTRY (SDE_EVTLOG_PRINT_ENTRY * 4)
+#define SDE_EVTLOG_MAX_DATA 15
+#define SDE_EVTLOG_BUF_MAX 512
+#define SDE_EVTLOG_BUF_ALIGN 32
+
+DEFINE_SPINLOCK(sde_evtloglock);
+
+struct tlog {
+ u32 counter;
+ s64 time;
+ const char *name;
+ int line;
+ u32 data[SDE_EVTLOG_MAX_DATA];
+ u32 data_cnt;
+ int pid;
+};
+
+static struct sde_dbg_evtlog {
+ struct tlog logs[SDE_EVTLOG_ENTRY];
+ u32 first;
+ u32 last;
+ u32 curr;
+ struct dentry *evtlog;
+ u32 evtlog_enable;
+ u32 panic_on_err;
+ struct work_struct evtlog_dump_work;
+ bool work_panic;
+} sde_dbg_evtlog;
+
+static inline bool sde_evtlog_is_enabled(u32 flag)
+{
+ return (flag & sde_dbg_evtlog.evtlog_enable) ||
+ (flag == SDE_EVTLOG_ALL && sde_dbg_evtlog.evtlog_enable);
+}
+
+void sde_evtlog(const char *name, int line, int flag, ...)
+{
+ unsigned long flags;
+ int i, val = 0;
+ va_list args;
+ struct tlog *log;
+
+ if (!sde_evtlog_is_enabled(flag))
+ return;
+
+ spin_lock_irqsave(&sde_evtloglock, flags);
+ log = &sde_dbg_evtlog.logs[sde_dbg_evtlog.curr];
+ log->time = ktime_to_us(ktime_get());
+ log->name = name;
+ log->line = line;
+ log->data_cnt = 0;
+ log->pid = current->pid;
+
+ va_start(args, flag);
+ for (i = 0; i < SDE_EVTLOG_MAX_DATA; i++) {
+
+ val = va_arg(args, int);
+ if (val == SDE_EVTLOG_DATA_LIMITER)
+ break;
+
+ log->data[i] = val;
+ }
+ va_end(args);
+ log->data_cnt = i;
+ sde_dbg_evtlog.curr = (sde_dbg_evtlog.curr + 1) % SDE_EVTLOG_ENTRY;
+ sde_dbg_evtlog.last++;
+
+ trace_sde_evtlog(name, line, i > 0 ? log->data[0] : 0,
+ i > 1 ? log->data[1] : 0);
+
+ spin_unlock_irqrestore(&sde_evtloglock, flags);
+}
+
+/* always dump the last entries which are not dumped yet */
+static bool _sde_evtlog_dump_calc_range(void)
+{
+ static u32 next;
+ bool need_dump = true;
+ unsigned long flags;
+ struct sde_dbg_evtlog *evtlog = &sde_dbg_evtlog;
+
+ spin_lock_irqsave(&sde_evtloglock, flags);
+
+ evtlog->first = next;
+
+ if (evtlog->last == evtlog->first) {
+ need_dump = false;
+ goto dump_exit;
+ }
+
+ if (evtlog->last < evtlog->first) {
+ evtlog->first %= SDE_EVTLOG_ENTRY;
+ if (evtlog->last < evtlog->first)
+ evtlog->last += SDE_EVTLOG_ENTRY;
+ }
+
+ if ((evtlog->last - evtlog->first) > SDE_EVTLOG_PRINT_ENTRY) {
+ pr_warn("evtlog buffer overflow before dump: %d\n",
+ evtlog->last - evtlog->first);
+ evtlog->first = evtlog->last - SDE_EVTLOG_PRINT_ENTRY;
+ }
+ next = evtlog->first + 1;
+
+dump_exit:
+ spin_unlock_irqrestore(&sde_evtloglock, flags);
+
+ return need_dump;
+}
+
+static ssize_t sde_evtlog_dump_entry(char *evtlog_buf, ssize_t evtlog_buf_size)
+{
+ int i;
+ ssize_t off = 0;
+ struct tlog *log, *prev_log;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sde_evtloglock, flags);
+
+ log = &sde_dbg_evtlog.logs[sde_dbg_evtlog.first %
+ SDE_EVTLOG_ENTRY];
+
+ prev_log = &sde_dbg_evtlog.logs[(sde_dbg_evtlog.first - 1) %
+ SDE_EVTLOG_ENTRY];
+
+ off = snprintf((evtlog_buf + off), (evtlog_buf_size - off), "%s:%-4d",
+ log->name, log->line);
+
+ if (off < SDE_EVTLOG_BUF_ALIGN) {
+ memset((evtlog_buf + off), 0x20, (SDE_EVTLOG_BUF_ALIGN - off));
+ off = SDE_EVTLOG_BUF_ALIGN;
+ }
+
+ off += snprintf((evtlog_buf + off), (evtlog_buf_size - off),
+ "=>[%-8d:%-11llu:%9llu][%-4d]:", sde_dbg_evtlog.first,
+ log->time, (log->time - prev_log->time), log->pid);
+
+ for (i = 0; i < log->data_cnt; i++)
+ off += snprintf((evtlog_buf + off), (evtlog_buf_size - off),
+ "%x ", log->data[i]);
+
+ off += snprintf((evtlog_buf + off), (evtlog_buf_size - off), "\n");
+
+ spin_unlock_irqrestore(&sde_evtloglock, flags);
+
+ return off;
+}
+
+static void _sde_evtlog_dump_all(void)
+{
+ char evtlog_buf[SDE_EVTLOG_BUF_MAX];
+
+ while (_sde_evtlog_dump_calc_range()) {
+ sde_evtlog_dump_entry(evtlog_buf, SDE_EVTLOG_BUF_MAX);
+ pr_info("%s", evtlog_buf);
+ }
+}
+
+static void _sde_dump_array(bool dead, const char *name)
+{
+ _sde_evtlog_dump_all();
+
+ if (dead && sde_dbg_evtlog.panic_on_err)
+ panic(name);
+}
+
+static void _sde_dump_work(struct work_struct *work)
+{
+ _sde_dump_array(sde_dbg_evtlog.work_panic, "evtlog_workitem");
+}
+
+void sde_dbg_dump(bool queue, const char *name, ...)
+{
+ int i;
+ bool dead = false;
+ va_list args;
+ char *blk_name = NULL;
+
+ if (!sde_evtlog_is_enabled(SDE_EVTLOG_DEFAULT))
+ return;
+
+ if (queue && work_pending(&sde_dbg_evtlog.evtlog_dump_work))
+ return;
+
+ va_start(args, name);
+ for (i = 0; i < SDE_EVTLOG_MAX_DATA; i++) {
+ blk_name = va_arg(args, char*);
+ if (IS_ERR_OR_NULL(blk_name))
+ break;
+
+ if (!strcmp(blk_name, "panic"))
+ dead = true;
+ }
+ va_end(args);
+
+ if (queue) {
+ /* schedule work to dump later */
+ sde_dbg_evtlog.work_panic = dead;
+ schedule_work(&sde_dbg_evtlog.evtlog_dump_work);
+ } else {
+ _sde_dump_array(dead, name);
+ }
+}
+
+static int sde_evtlog_dump_open(struct inode *inode, struct file *file)
+{
+ /* non-seekable */
+ file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t sde_evtlog_dump_read(struct file *file, char __user *buff,
+ size_t count, loff_t *ppos)
+{
+ ssize_t len = 0;
+ char evtlog_buf[SDE_EVTLOG_BUF_MAX];
+
+ if (_sde_evtlog_dump_calc_range()) {
+ len = sde_evtlog_dump_entry(evtlog_buf, SDE_EVTLOG_BUF_MAX);
+ if (copy_to_user(buff, evtlog_buf, len))
+ return -EFAULT;
+ *ppos += len;
+ }
+
+ return len;
+}
+
+static ssize_t sde_evtlog_dump_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ _sde_evtlog_dump_all();
+
+ if (sde_dbg_evtlog.panic_on_err)
+ panic("sde");
+
+ return count;
+}
+
+static const struct file_operations sde_evtlog_fops = {
+ .open = sde_evtlog_dump_open,
+ .read = sde_evtlog_dump_read,
+ .write = sde_evtlog_dump_write,
+};
+
+int sde_evtlog_init(struct dentry *debugfs_root)
+{
+ int i;
+
+ sde_dbg_evtlog.evtlog = debugfs_create_dir("evt_dbg", debugfs_root);
+ if (IS_ERR_OR_NULL(sde_dbg_evtlog.evtlog)) {
+ pr_err("debugfs_create_dir fail, error %ld\n",
+ PTR_ERR(sde_dbg_evtlog.evtlog));
+ sde_dbg_evtlog.evtlog = NULL;
+ return -ENODEV;
+ }
+
+ INIT_WORK(&sde_dbg_evtlog.evtlog_dump_work, _sde_dump_work);
+ sde_dbg_evtlog.work_panic = false;
+
+ for (i = 0; i < SDE_EVTLOG_ENTRY; i++)
+ sde_dbg_evtlog.logs[i].counter = i;
+
+ debugfs_create_file("dump", 0644, sde_dbg_evtlog.evtlog, NULL,
+ &sde_evtlog_fops);
+ debugfs_create_u32("enable", 0644, sde_dbg_evtlog.evtlog,
+ &sde_dbg_evtlog.evtlog_enable);
+ debugfs_create_u32("panic", 0644, sde_dbg_evtlog.evtlog,
+ &sde_dbg_evtlog.panic_on_err);
+
+ sde_dbg_evtlog.evtlog_enable = SDE_EVTLOG_DEFAULT_ENABLE;
+ sde_dbg_evtlog.panic_on_err = SDE_DBG_DEFAULT_PANIC;
+
+ pr_info("evtlog_status: enable:%d, panic:%d\n",
+ sde_dbg_evtlog.evtlog_enable, sde_dbg_evtlog.panic_on_err);
+
+ return 0;
+}
+
+void sde_evtlog_destroy(void)
+{
+ debugfs_remove(sde_dbg_evtlog.evtlog);
+}
diff --git a/drivers/gpu/drm/msm/sde_power_handle.c b/drivers/gpu/drm/msm/sde_power_handle.c
new file mode 100644
index 000000000000..3c82a261e3fb
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde_power_handle.c
@@ -0,0 +1,924 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d]: " fmt, __func__, __LINE__
+
+#include <linux/clk.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/string.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/of_platform.h>
+
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include <linux/mdss_io_util.h>
+
+#include "sde_power_handle.h"
+#include "sde_trace.h"
+
+struct sde_power_client *sde_power_client_create(
+ struct sde_power_handle *phandle, char *client_name)
+{
+ struct sde_power_client *client;
+ static u32 id;
+
+ if (!client_name || !phandle) {
+ pr_err("client name is null or invalid power data\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ client = kzalloc(sizeof(struct sde_power_client), GFP_KERNEL);
+ if (!client)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_lock(&phandle->phandle_lock);
+ strlcpy(client->name, client_name, MAX_CLIENT_NAME_LEN);
+ client->usecase_ndx = VOTE_INDEX_DISABLE;
+ client->id = id;
+ pr_debug("client %s created:%pK id :%d\n", client_name,
+ client, id);
+ id++;
+ list_add(&client->list, &phandle->power_client_clist);
+ mutex_unlock(&phandle->phandle_lock);
+
+ return client;
+}
+
+void sde_power_client_destroy(struct sde_power_handle *phandle,
+ struct sde_power_client *client)
+{
+ if (!client || !phandle) {
+ pr_err("reg bus vote: invalid client handle\n");
+ } else {
+ pr_debug("bus vote client %s destroyed:%pK id:%u\n",
+ client->name, client, client->id);
+ mutex_lock(&phandle->phandle_lock);
+ list_del_init(&client->list);
+ mutex_unlock(&phandle->phandle_lock);
+ kfree(client);
+ }
+}
+
+static int sde_power_parse_dt_supply(struct platform_device *pdev,
+ struct dss_module_power *mp)
+{
+ int i = 0, rc = 0;
+ u32 tmp = 0;
+ struct device_node *of_node = NULL, *supply_root_node = NULL;
+ struct device_node *supply_node = NULL;
+
+ if (!pdev || !mp) {
+ pr_err("invalid input param pdev:%pK mp:%pK\n", pdev, mp);
+ return -EINVAL;
+ }
+
+ of_node = pdev->dev.of_node;
+
+ mp->num_vreg = 0;
+ supply_root_node = of_get_child_by_name(of_node,
+ "qcom,platform-supply-entries");
+ if (!supply_root_node) {
+ pr_debug("no supply entry present\n");
+ return rc;
+ }
+
+ for_each_child_of_node(supply_root_node, supply_node)
+ mp->num_vreg++;
+
+ if (mp->num_vreg == 0) {
+ pr_debug("no vreg\n");
+ return rc;
+ }
+
+ pr_debug("vreg found. count=%d\n", mp->num_vreg);
+ mp->vreg_config = devm_kzalloc(&pdev->dev, sizeof(struct dss_vreg) *
+ mp->num_vreg, GFP_KERNEL);
+ if (!mp->vreg_config) {
+ rc = -ENOMEM;
+ return rc;
+ }
+
+ for_each_child_of_node(supply_root_node, supply_node) {
+
+ const char *st = NULL;
+
+ rc = of_property_read_string(supply_node,
+ "qcom,supply-name", &st);
+ if (rc) {
+ pr_err("error reading name. rc=%d\n", rc);
+ goto error;
+ }
+
+ strlcpy(mp->vreg_config[i].vreg_name, st,
+ sizeof(mp->vreg_config[i].vreg_name));
+
+ rc = of_property_read_u32(supply_node,
+ "qcom,supply-min-voltage", &tmp);
+ if (rc) {
+ pr_err("error reading min volt. rc=%d\n", rc);
+ goto error;
+ }
+ mp->vreg_config[i].min_voltage = tmp;
+
+ rc = of_property_read_u32(supply_node,
+ "qcom,supply-max-voltage", &tmp);
+ if (rc) {
+ pr_err("error reading max volt. rc=%d\n", rc);
+ goto error;
+ }
+ mp->vreg_config[i].max_voltage = tmp;
+
+ rc = of_property_read_u32(supply_node,
+ "qcom,supply-enable-load", &tmp);
+ if (rc) {
+ pr_err("error reading enable load. rc=%d\n", rc);
+ goto error;
+ }
+ mp->vreg_config[i].enable_load = tmp;
+
+ rc = of_property_read_u32(supply_node,
+ "qcom,supply-disable-load", &tmp);
+ if (rc) {
+ pr_err("error reading disable load. rc=%d\n", rc);
+ goto error;
+ }
+ mp->vreg_config[i].disable_load = tmp;
+
+ rc = of_property_read_u32(supply_node,
+ "qcom,supply-pre-on-sleep", &tmp);
+ if (rc)
+ pr_debug("error reading supply pre sleep value. rc=%d\n",
+ rc);
+
+ mp->vreg_config[i].pre_on_sleep = (!rc ? tmp : 0);
+
+ rc = of_property_read_u32(supply_node,
+ "qcom,supply-pre-off-sleep", &tmp);
+ if (rc)
+ pr_debug("error reading supply pre sleep value. rc=%d\n",
+ rc);
+
+ mp->vreg_config[i].pre_off_sleep = (!rc ? tmp : 0);
+
+ rc = of_property_read_u32(supply_node,
+ "qcom,supply-post-on-sleep", &tmp);
+ if (rc)
+ pr_debug("error reading supply post sleep value. rc=%d\n",
+ rc);
+
+ mp->vreg_config[i].post_on_sleep = (!rc ? tmp : 0);
+
+ rc = of_property_read_u32(supply_node,
+ "qcom,supply-post-off-sleep", &tmp);
+ if (rc)
+ pr_debug("error reading supply post sleep value. rc=%d\n",
+ rc);
+
+ mp->vreg_config[i].post_off_sleep = (!rc ? tmp : 0);
+
+ pr_debug("%s min=%d, max=%d, enable=%d, disable=%d, preonsleep=%d, postonsleep=%d, preoffsleep=%d, postoffsleep=%d\n",
+ mp->vreg_config[i].vreg_name,
+ mp->vreg_config[i].min_voltage,
+ mp->vreg_config[i].max_voltage,
+ mp->vreg_config[i].enable_load,
+ mp->vreg_config[i].disable_load,
+ mp->vreg_config[i].pre_on_sleep,
+ mp->vreg_config[i].post_on_sleep,
+ mp->vreg_config[i].pre_off_sleep,
+ mp->vreg_config[i].post_off_sleep);
+ ++i;
+
+ rc = 0;
+ }
+
+ return rc;
+
+error:
+ if (mp->vreg_config) {
+ devm_kfree(&pdev->dev, mp->vreg_config);
+ mp->vreg_config = NULL;
+ mp->num_vreg = 0;
+ }
+
+ return rc;
+}
+
+static int sde_power_parse_dt_clock(struct platform_device *pdev,
+ struct dss_module_power *mp)
+{
+ u32 i = 0, rc = 0;
+ const char *clock_name;
+ u32 clock_rate = 0;
+ u32 clock_max_rate = 0;
+ int num_clk = 0;
+
+ if (!pdev || !mp) {
+ pr_err("invalid input param pdev:%pK mp:%pK\n", pdev, mp);
+ return -EINVAL;
+ }
+
+ mp->num_clk = 0;
+ num_clk = of_property_count_strings(pdev->dev.of_node,
+ "clock-names");
+ if (num_clk <= 0) {
+ pr_debug("clocks are not defined\n");
+ goto clk_err;
+ }
+
+ mp->num_clk = num_clk;
+ mp->clk_config = devm_kzalloc(&pdev->dev,
+ sizeof(struct dss_clk) * num_clk, GFP_KERNEL);
+ if (!mp->clk_config) {
+ rc = -ENOMEM;
+ mp->num_clk = 0;
+ goto clk_err;
+ }
+
+ for (i = 0; i < num_clk; i++) {
+ of_property_read_string_index(pdev->dev.of_node, "clock-names",
+ i, &clock_name);
+ strlcpy(mp->clk_config[i].clk_name, clock_name,
+ sizeof(mp->clk_config[i].clk_name));
+
+ of_property_read_u32_index(pdev->dev.of_node, "clock-rate",
+ i, &clock_rate);
+ mp->clk_config[i].rate = clock_rate;
+
+ if (!clock_rate)
+ mp->clk_config[i].type = DSS_CLK_AHB;
+ else
+ mp->clk_config[i].type = DSS_CLK_PCLK;
+
+ clock_max_rate = 0;
+ of_property_read_u32_index(pdev->dev.of_node, "clock-max-rate",
+ i, &clock_max_rate);
+ mp->clk_config[i].max_rate = clock_max_rate;
+ }
+
+clk_err:
+ return rc;
+}
+
+#ifdef CONFIG_QCOM_BUS_SCALING
+
+#define MAX_AXI_PORT_COUNT 3
+
+static int _sde_power_data_bus_set_quota(
+ struct sde_power_data_bus_handle *pdbus,
+ u64 ab_quota_rt, u64 ab_quota_nrt,
+ u64 ib_quota_rt, u64 ib_quota_nrt)
+{
+ int new_uc_idx;
+ u64 ab_quota[MAX_AXI_PORT_COUNT] = {0, 0};
+ u64 ib_quota[MAX_AXI_PORT_COUNT] = {0, 0};
+ int rc;
+
+ if (pdbus->data_bus_hdl < 1) {
+ pr_err("invalid bus handle %d\n", pdbus->data_bus_hdl);
+ return -EINVAL;
+ }
+
+ if (!ab_quota_rt && !ab_quota_nrt && !ib_quota_rt && !ib_quota_nrt) {
+ new_uc_idx = 0;
+ } else {
+ int i;
+ struct msm_bus_vectors *vect = NULL;
+ struct msm_bus_scale_pdata *bw_table =
+ pdbus->data_bus_scale_table;
+ u32 nrt_axi_port_cnt = pdbus->nrt_axi_port_cnt;
+ u32 total_axi_port_cnt = pdbus->axi_port_cnt;
+ u32 rt_axi_port_cnt = total_axi_port_cnt - nrt_axi_port_cnt;
+ int match_cnt = 0;
+
+ if (!bw_table || !total_axi_port_cnt ||
+ total_axi_port_cnt > MAX_AXI_PORT_COUNT) {
+ pr_err("invalid input\n");
+ return -EINVAL;
+ }
+
+ if (pdbus->bus_channels) {
+ ib_quota_rt = div_u64(ib_quota_rt,
+ pdbus->bus_channels);
+ ib_quota_nrt = div_u64(ib_quota_nrt,
+ pdbus->bus_channels);
+ }
+
+ if (nrt_axi_port_cnt) {
+
+ ab_quota_rt = div_u64(ab_quota_rt, rt_axi_port_cnt);
+ ab_quota_nrt = div_u64(ab_quota_nrt, nrt_axi_port_cnt);
+
+ for (i = 0; i < total_axi_port_cnt; i++) {
+ if (i < rt_axi_port_cnt) {
+ ab_quota[i] = ab_quota_rt;
+ ib_quota[i] = ib_quota_rt;
+ } else {
+ ab_quota[i] = ab_quota_nrt;
+ ib_quota[i] = ib_quota_nrt;
+ }
+ }
+ } else {
+ ab_quota[0] = div_u64(ab_quota_rt + ab_quota_nrt,
+ total_axi_port_cnt);
+ ib_quota[0] = ib_quota_rt + ib_quota_nrt;
+
+ for (i = 1; i < total_axi_port_cnt; i++) {
+ ab_quota[i] = ab_quota[0];
+ ib_quota[i] = ib_quota[0];
+ }
+ }
+
+ for (i = 0; i < total_axi_port_cnt; i++) {
+ vect = &bw_table->usecase
+ [pdbus->curr_bw_uc_idx].vectors[i];
+ /* avoid performing updates for small changes */
+ if ((ab_quota[i] == vect->ab) &&
+ (ib_quota[i] == vect->ib))
+ match_cnt++;
+ }
+
+ if (match_cnt == total_axi_port_cnt) {
+ pr_debug("skip BW vote\n");
+ return 0;
+ }
+
+ new_uc_idx = (pdbus->curr_bw_uc_idx %
+ (bw_table->num_usecases - 1)) + 1;
+
+ for (i = 0; i < total_axi_port_cnt; i++) {
+ vect = &bw_table->usecase[new_uc_idx].vectors[i];
+ vect->ab = ab_quota[i];
+ vect->ib = ib_quota[i];
+
+ pr_debug("uc_idx=%d %s path idx=%d ab=%llu ib=%llu\n",
+ new_uc_idx, (i < rt_axi_port_cnt) ? "rt" : "nrt"
+ , i, vect->ab, vect->ib);
+ }
+ }
+ pdbus->curr_bw_uc_idx = new_uc_idx;
+ pdbus->ao_bw_uc_idx = new_uc_idx;
+
+ if ((pdbus->bus_ref_cnt == 0) && pdbus->curr_bw_uc_idx) {
+ rc = 0;
+ } else { /* vote BW if bus_bw_cnt > 0 or uc_idx is zero */
+ SDE_ATRACE_BEGIN("msm_bus_scale_req");
+ rc = msm_bus_scale_client_update_request(pdbus->data_bus_hdl,
+ new_uc_idx);
+ SDE_ATRACE_END("msm_bus_scale_req");
+ }
+ return rc;
+}
+
+int sde_power_data_bus_set_quota(struct sde_power_handle *phandle,
+ struct sde_power_client *pclient,
+ int bus_client, u64 ab_quota, u64 ib_quota)
+{
+ int rc = 0;
+ int i;
+ u64 total_ab_rt = 0, total_ib_rt = 0;
+ u64 total_ab_nrt = 0, total_ib_nrt = 0;
+ struct sde_power_client *client;
+
+ if (!phandle || !pclient ||
+ bus_client >= SDE_POWER_HANDLE_DATA_BUS_CLIENT_MAX) {
+ pr_err("invalid parameters\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&phandle->phandle_lock);
+
+ pclient->ab[bus_client] = ab_quota;
+ pclient->ib[bus_client] = ib_quota;
+ trace_sde_perf_update_bus(bus_client, ab_quota, ib_quota);
+
+ list_for_each_entry(client, &phandle->power_client_clist, list) {
+ for (i = 0; i < SDE_POWER_HANDLE_DATA_BUS_CLIENT_MAX; i++) {
+ if (i == SDE_POWER_HANDLE_DATA_BUS_CLIENT_NRT) {
+ total_ab_nrt += client->ab[i];
+ total_ib_nrt += client->ib[i];
+ } else {
+ total_ab_rt += client->ab[i];
+ total_ib_rt = max(total_ib_rt, client->ib[i]);
+ }
+ }
+ }
+
+ rc = _sde_power_data_bus_set_quota(&phandle->data_bus_handle,
+ total_ab_rt, total_ab_nrt,
+ total_ib_rt, total_ib_nrt);
+
+ mutex_unlock(&phandle->phandle_lock);
+
+ return rc;
+}
+
+static void sde_power_data_bus_unregister(
+ struct sde_power_data_bus_handle *pdbus)
+{
+ if (pdbus->data_bus_hdl) {
+ msm_bus_scale_unregister_client(pdbus->data_bus_hdl);
+ pdbus->data_bus_hdl = 0;
+ }
+}
+
+static int sde_power_data_bus_parse(struct platform_device *pdev,
+ struct sde_power_data_bus_handle *pdbus)
+{
+ struct device_node *node;
+ int rc = 0;
+ int paths;
+
+ pdbus->bus_channels = 1;
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,sde-dram-channels", &pdbus->bus_channels);
+ if (rc) {
+ pr_debug("number of channels property not specified\n");
+ rc = 0;
+ }
+
+ pdbus->nrt_axi_port_cnt = 0;
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,sde-num-nrt-paths",
+ &pdbus->nrt_axi_port_cnt);
+ if (rc) {
+ pr_debug("number of axi port property not specified\n");
+ rc = 0;
+ }
+
+ node = of_get_child_by_name(pdev->dev.of_node, "qcom,sde-data-bus");
+ if (node) {
+ rc = of_property_read_u32(node,
+ "qcom,msm-bus,num-paths", &paths);
+ if (rc) {
+ pr_err("Error. qcom,msm-bus,num-paths not found\n");
+ return rc;
+ }
+ pdbus->axi_port_cnt = paths;
+
+ pdbus->data_bus_scale_table =
+ msm_bus_pdata_from_node(pdev, node);
+ if (IS_ERR_OR_NULL(pdbus->data_bus_scale_table)) {
+ pr_err("reg bus handle parsing failed\n");
+ rc = PTR_ERR(pdbus->data_bus_scale_table);
+ goto end;
+ }
+ pdbus->data_bus_hdl = msm_bus_scale_register_client(
+ pdbus->data_bus_scale_table);
+ if (!pdbus->data_bus_hdl) {
+ pr_err("data_bus_client register failed\n");
+ rc = -EINVAL;
+ goto end;
+ }
+ pr_debug("register data_bus_hdl=%x\n", pdbus->data_bus_hdl);
+
+ /*
+ * Following call will not result in actual vote rather update
+ * the current index and ab/ib value. When continuous splash
+ * is enabled, actual vote will happen when splash handoff is
+ * done.
+ */
+ return _sde_power_data_bus_set_quota(pdbus,
+ SDE_POWER_HANDLE_DATA_BUS_AB_QUOTA,
+ SDE_POWER_HANDLE_DATA_BUS_AB_QUOTA,
+ SDE_POWER_HANDLE_DATA_BUS_IB_QUOTA,
+ SDE_POWER_HANDLE_DATA_BUS_IB_QUOTA);
+ }
+
+end:
+ return rc;
+}
+
+static int sde_power_reg_bus_parse(struct platform_device *pdev,
+ struct sde_power_handle *phandle)
+{
+ struct device_node *node;
+ struct msm_bus_scale_pdata *bus_scale_table;
+ int rc = 0;
+
+ node = of_get_child_by_name(pdev->dev.of_node, "qcom,sde-reg-bus");
+ if (node) {
+ bus_scale_table = msm_bus_pdata_from_node(pdev, node);
+ if (IS_ERR_OR_NULL(bus_scale_table)) {
+ pr_err("reg bus handle parsing failed\n");
+ rc = PTR_ERR(bus_scale_table);
+ goto end;
+ }
+ phandle->reg_bus_hdl = msm_bus_scale_register_client(
+ bus_scale_table);
+ if (!phandle->reg_bus_hdl) {
+ pr_err("reg_bus_client register failed\n");
+ rc = -EINVAL;
+ goto end;
+ }
+ pr_debug("register reg_bus_hdl=%x\n", phandle->reg_bus_hdl);
+ }
+
+end:
+ return rc;
+}
+
+static void sde_power_reg_bus_unregister(u32 reg_bus_hdl)
+{
+ if (reg_bus_hdl)
+ msm_bus_scale_unregister_client(reg_bus_hdl);
+}
+
+static int sde_power_reg_bus_update(u32 reg_bus_hdl, u32 usecase_ndx)
+{
+ int rc = 0;
+
+ if (reg_bus_hdl)
+ rc = msm_bus_scale_client_update_request(reg_bus_hdl,
+ usecase_ndx);
+ if (rc)
+ pr_err("failed to set reg bus vote rc=%d\n", rc);
+
+ return rc;
+}
+#else
+static int sde_power_data_bus_parse(struct platform_device *pdev,
+ struct sde_power_handle *phandle)
+{
+ return 0;
+}
+
+static void sde_power_data_bus_unregister(u32 reg_bus_hdl)
+{
+}
+
+int sde_power_data_bus_set_quota(struct sde_power_handle *phandle,
+ struct sde_power_client *pclient,
+ int bus_client, u64 ab_quota, u64 ib_quota)
+{
+ return 0;
+}
+
+static int sde_power_reg_bus_parse(struct platform_device *pdev,
+ struct sde_power_handle *phandle)
+{
+ return 0;
+}
+
+static void sde_power_reg_bus_unregister(u32 reg_bus_hdl)
+{
+}
+
+static int sde_power_reg_bus_update(u32 reg_bus_hdl, u32 usecase_ndx)
+{
+ return 0;
+}
+#endif
+
+void sde_power_data_bus_bandwidth_ctrl(struct sde_power_handle *phandle,
+ struct sde_power_client *pclient, int enable)
+{
+ struct sde_power_data_bus_handle *pdbus;
+ int changed = 0;
+
+ if (!phandle || !pclient) {
+ pr_err("invalid power/client handle\n");
+ return;
+ }
+
+ pdbus = &phandle->data_bus_handle;
+
+ mutex_lock(&phandle->phandle_lock);
+ if (enable) {
+ if (pdbus->bus_ref_cnt == 0)
+ changed++;
+ pdbus->bus_ref_cnt++;
+ } else {
+ if (pdbus->bus_ref_cnt) {
+ pdbus->bus_ref_cnt--;
+ if (pdbus->bus_ref_cnt == 0)
+ changed++;
+ } else {
+ pr_debug("Can not be turned off\n");
+ }
+ }
+
+ pr_debug("%pS: task:%s bw_cnt=%d changed=%d enable=%d\n",
+ __builtin_return_address(0), current->group_leader->comm,
+ pdbus->bus_ref_cnt, changed, enable);
+
+ if (changed) {
+ SDE_ATRACE_INT("data_bus_ctrl", enable);
+
+ if (!enable) {
+ if (!pdbus->handoff_pending) {
+ msm_bus_scale_client_update_request(
+ pdbus->data_bus_hdl, 0);
+ pdbus->ao_bw_uc_idx = 0;
+ }
+ } else {
+ msm_bus_scale_client_update_request(
+ pdbus->data_bus_hdl,
+ pdbus->curr_bw_uc_idx);
+ }
+ }
+
+ mutex_unlock(&phandle->phandle_lock);
+}
+
+int sde_power_resource_init(struct platform_device *pdev,
+ struct sde_power_handle *phandle)
+{
+ int rc = 0;
+ struct dss_module_power *mp;
+
+ if (!phandle || !pdev) {
+ pr_err("invalid input param\n");
+ rc = -EINVAL;
+ goto end;
+ }
+ mp = &phandle->mp;
+ phandle->dev = &pdev->dev;
+
+ rc = sde_power_parse_dt_clock(pdev, mp);
+ if (rc) {
+ pr_err("device clock parsing failed\n");
+ goto end;
+ }
+
+ rc = sde_power_parse_dt_supply(pdev, mp);
+ if (rc) {
+ pr_err("device vreg supply parsing failed\n");
+ goto parse_vreg_err;
+ }
+
+ rc = msm_dss_config_vreg(&pdev->dev,
+ mp->vreg_config, mp->num_vreg, 1);
+ if (rc) {
+ pr_err("vreg config failed rc=%d\n", rc);
+ goto vreg_err;
+ }
+
+ rc = msm_dss_get_clk(&pdev->dev, mp->clk_config, mp->num_clk);
+ if (rc) {
+ pr_err("clock get failed rc=%d\n", rc);
+ goto clk_err;
+ }
+
+ rc = msm_dss_clk_set_rate(mp->clk_config, mp->num_clk);
+ if (rc) {
+ pr_err("clock set rate failed rc=%d\n", rc);
+ goto bus_err;
+ }
+
+ rc = sde_power_reg_bus_parse(pdev, phandle);
+ if (rc) {
+ pr_err("register bus parse failed rc=%d\n", rc);
+ goto bus_err;
+ }
+
+ rc = sde_power_data_bus_parse(pdev, &phandle->data_bus_handle);
+ if (rc) {
+ pr_err("register data bus parse failed rc=%d\n", rc);
+ goto data_bus_err;
+ }
+
+ INIT_LIST_HEAD(&phandle->power_client_clist);
+ mutex_init(&phandle->phandle_lock);
+
+ return rc;
+
+data_bus_err:
+ sde_power_reg_bus_unregister(phandle->reg_bus_hdl);
+bus_err:
+ msm_dss_put_clk(mp->clk_config, mp->num_clk);
+clk_err:
+ msm_dss_config_vreg(&pdev->dev, mp->vreg_config, mp->num_vreg, 0);
+vreg_err:
+ devm_kfree(&pdev->dev, mp->vreg_config);
+ mp->num_vreg = 0;
+parse_vreg_err:
+ devm_kfree(&pdev->dev, mp->clk_config);
+ mp->num_clk = 0;
+end:
+ return rc;
+}
+
+void sde_power_resource_deinit(struct platform_device *pdev,
+ struct sde_power_handle *phandle)
+{
+ struct dss_module_power *mp;
+
+ if (!phandle || !pdev) {
+ pr_err("invalid input param\n");
+ return;
+ }
+ mp = &phandle->mp;
+
+ sde_power_data_bus_unregister(&phandle->data_bus_handle);
+
+ sde_power_reg_bus_unregister(phandle->reg_bus_hdl);
+
+ msm_dss_put_clk(mp->clk_config, mp->num_clk);
+
+ msm_dss_config_vreg(&pdev->dev, mp->vreg_config, mp->num_vreg, 0);
+
+ if (mp->clk_config)
+ devm_kfree(&pdev->dev, mp->clk_config);
+
+ if (mp->vreg_config)
+ devm_kfree(&pdev->dev, mp->vreg_config);
+
+ mp->num_vreg = 0;
+ mp->num_clk = 0;
+}
+
+int sde_power_resource_enable(struct sde_power_handle *phandle,
+ struct sde_power_client *pclient, bool enable)
+{
+ int rc = 0;
+ bool changed = false;
+ u32 max_usecase_ndx = VOTE_INDEX_DISABLE, prev_usecase_ndx;
+ struct sde_power_client *client;
+ struct dss_module_power *mp;
+
+ if (!phandle || !pclient) {
+ pr_err("invalid input argument\n");
+ return -EINVAL;
+ }
+
+ mp = &phandle->mp;
+
+ mutex_lock(&phandle->phandle_lock);
+ if (enable)
+ pclient->refcount++;
+ else if (pclient->refcount)
+ pclient->refcount--;
+
+ if (pclient->refcount)
+ pclient->usecase_ndx = VOTE_INDEX_LOW;
+ else
+ pclient->usecase_ndx = VOTE_INDEX_DISABLE;
+
+ list_for_each_entry(client, &phandle->power_client_clist, list) {
+ if (client->usecase_ndx < VOTE_INDEX_MAX &&
+ client->usecase_ndx > max_usecase_ndx)
+ max_usecase_ndx = client->usecase_ndx;
+ }
+
+ if (phandle->current_usecase_ndx != max_usecase_ndx) {
+ changed = true;
+ prev_usecase_ndx = phandle->current_usecase_ndx;
+ phandle->current_usecase_ndx = max_usecase_ndx;
+ }
+
+ pr_debug("%pS: changed=%d current idx=%d request client %s id:%u enable:%d refcount:%d\n",
+ __builtin_return_address(0), changed, max_usecase_ndx,
+ pclient->name, pclient->id, enable, pclient->refcount);
+
+ if (!changed)
+ goto end;
+
+ if (enable) {
+ rc = msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg, enable);
+ if (rc) {
+ pr_err("failed to enable vregs rc=%d\n", rc);
+ goto vreg_err;
+ }
+
+ rc = sde_power_reg_bus_update(phandle->reg_bus_hdl,
+ max_usecase_ndx);
+ if (rc) {
+ pr_err("failed to set reg bus vote rc=%d\n", rc);
+ goto reg_bus_hdl_err;
+ }
+
+ rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, enable);
+ if (rc) {
+ pr_err("clock enable failed rc:%d\n", rc);
+ goto clk_err;
+ }
+ } else {
+ msm_dss_enable_clk(mp->clk_config, mp->num_clk, enable);
+
+ sde_power_reg_bus_update(phandle->reg_bus_hdl,
+ max_usecase_ndx);
+
+ msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg, enable);
+ }
+
+end:
+ mutex_unlock(&phandle->phandle_lock);
+ return rc;
+
+clk_err:
+ sde_power_reg_bus_update(phandle->reg_bus_hdl, prev_usecase_ndx);
+reg_bus_hdl_err:
+ msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg, 0);
+vreg_err:
+ phandle->current_usecase_ndx = prev_usecase_ndx;
+ mutex_unlock(&phandle->phandle_lock);
+ return rc;
+}
+
+int sde_power_clk_set_rate(struct sde_power_handle *phandle, char *clock_name,
+ u64 rate)
+{
+ int i, rc = -EINVAL;
+ struct dss_module_power *mp;
+
+ if (!phandle) {
+ pr_err("invalid input power handle\n");
+ return -EINVAL;
+ }
+ mp = &phandle->mp;
+
+ for (i = 0; i < mp->num_clk; i++) {
+ if (!strcmp(mp->clk_config[i].clk_name, clock_name)) {
+ if (mp->clk_config[i].max_rate &&
+ (rate > mp->clk_config[i].max_rate))
+ rate = mp->clk_config[i].max_rate;
+
+ mp->clk_config[i].rate = rate;
+ rc = msm_dss_clk_set_rate(mp->clk_config, mp->num_clk);
+ break;
+ }
+ }
+
+ return rc;
+}
+
+u64 sde_power_clk_get_rate(struct sde_power_handle *phandle, char *clock_name)
+{
+ int i;
+ struct dss_module_power *mp;
+ u64 rate = -EINVAL;
+
+ if (!phandle) {
+ pr_err("invalid input power handle\n");
+ return -EINVAL;
+ }
+ mp = &phandle->mp;
+
+ for (i = 0; i < mp->num_clk; i++) {
+ if (!strcmp(mp->clk_config[i].clk_name, clock_name)) {
+ rate = clk_get_rate(mp->clk_config[i].clk);
+ break;
+ }
+ }
+
+ return rate;
+}
+
+u64 sde_power_clk_get_max_rate(struct sde_power_handle *phandle,
+ char *clock_name)
+{
+ int i;
+ struct dss_module_power *mp;
+ u64 rate = 0;
+
+ if (!phandle) {
+ pr_err("invalid input power handle\n");
+ return 0;
+ }
+ mp = &phandle->mp;
+
+ for (i = 0; i < mp->num_clk; i++) {
+ if (!strcmp(mp->clk_config[i].clk_name, clock_name)) {
+ rate = mp->clk_config[i].max_rate;
+ break;
+ }
+ }
+
+ return rate;
+}
+
+struct clk *sde_power_clk_get_clk(struct sde_power_handle *phandle,
+ char *clock_name)
+{
+ int i;
+ struct dss_module_power *mp;
+ struct clk *clk = NULL;
+
+ if (!phandle) {
+ pr_err("invalid input power handle\n");
+ return 0;
+ }
+ mp = &phandle->mp;
+
+ for (i = 0; i < mp->num_clk; i++) {
+ if (!strcmp(mp->clk_config[i].clk_name, clock_name)) {
+ clk = mp->clk_config[i].clk;
+ break;
+ }
+ }
+
+ return clk;
+}
diff --git a/drivers/gpu/drm/msm/sde_power_handle.h b/drivers/gpu/drm/msm/sde_power_handle.h
new file mode 100644
index 000000000000..b982d1704312
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde_power_handle.h
@@ -0,0 +1,229 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _SDE_POWER_HANDLE_H_
+#define _SDE_POWER_HANDLE_H_
+
+#define MAX_CLIENT_NAME_LEN 128
+
+#define SDE_POWER_HANDLE_DATA_BUS_IB_QUOTA 2000000000
+#define SDE_POWER_HANDLE_DATA_BUS_AB_QUOTA 2000000000
+
+/**
+ * mdss_bus_vote_type: register bus vote type
+ * VOTE_INDEX_DISABLE: removes the client vote
+ * VOTE_INDEX_LOW: keeps the lowest vote for register bus
+ * VOTE_INDEX_MAX: invalid
+ */
+enum mdss_bus_vote_type {
+ VOTE_INDEX_DISABLE,
+ VOTE_INDEX_LOW,
+ VOTE_INDEX_MAX,
+};
+
+/**
+ * enum sde_power_handle_data_bus_client - type of axi bus clients
+ * @SDE_POWER_HANDLE_DATA_BUS_CLIENT_RT: core real-time bus client
+ * @SDE_POWER_HANDLE_DATA_BUS_CLIENT_NRT: core non-real-time bus client
+ * @SDE_POWER_HANDLE_DATA_BUS_CLIENT_MAX: maximum number of bus client type
+ */
+enum sde_power_handle_data_bus_client {
+ SDE_POWER_HANDLE_DATA_BUS_CLIENT_RT,
+ SDE_POWER_HANDLE_DATA_BUS_CLIENT_NRT,
+ SDE_POWER_HANDLE_DATA_BUS_CLIENT_MAX
+};
+
+/**
+ * struct sde_power_client: stores the power client for sde driver
+ * @name: name of the client
+ * @usecase_ndx: current regs bus vote type
+ * @refcount: current refcount if multiple modules are using same
+ * same client for enable/disable. Power module will
+ * aggregate the refcount and vote accordingly for this
+ * client.
+ * @id: assigned during create. helps for debugging.
+ * @list: list to attach power handle master list
+ * @ab: arbitrated bandwidth for each bus client
+ * @ib: instantaneous bandwidth for each bus client
+ */
+struct sde_power_client {
+ char name[MAX_CLIENT_NAME_LEN];
+ short usecase_ndx;
+ short refcount;
+ u32 id;
+ struct list_head list;
+ u64 ab[SDE_POWER_HANDLE_DATA_BUS_CLIENT_MAX];
+ u64 ib[SDE_POWER_HANDLE_DATA_BUS_CLIENT_MAX];
+};
+
+/**
+ * struct sde_power_data_handle: power handle struct for data bus
+ * @data_bus_scale_table: pointer to bus scaling table
+ * @data_bus_hdl: current data bus handle
+ * @axi_port_cnt: number of rt axi ports
+ * @nrt_axi_port_cnt: number of nrt axi ports
+ * @bus_channels: number of memory bus channels
+ * @curr_bw_uc_idx: current use case index of data bus
+ * @ao_bw_uc_idx: active only use case index of data bus
+ * @bus_ref_cnt: reference count of data bus enable request
+ * @handoff_pending: True to indicate if bootloader hand-over is pending
+ */
+struct sde_power_data_bus_handle {
+ struct msm_bus_scale_pdata *data_bus_scale_table;
+ u32 data_bus_hdl;
+ u32 axi_port_cnt;
+ u32 nrt_axi_port_cnt;
+ u32 bus_channels;
+ u32 curr_bw_uc_idx;
+ u32 ao_bw_uc_idx;
+ u32 bus_ref_cnt;
+ int handoff_pending;
+};
+
+/**
+ * struct sde_power_handle: power handle main struct
+ * @mp: module power for clock and regulator
+ * @client_clist: master list to store all clients
+ * @phandle_lock: lock to synchronize the enable/disable
+ * @dev: pointer to device structure
+ * @usecase_ndx: current usecase index
+ * @reg_bus_hdl: current register bus handle
+ * @data_bus_handle: context structure for data bus control
+ */
+struct sde_power_handle {
+ struct dss_module_power mp;
+ struct list_head power_client_clist;
+ struct mutex phandle_lock;
+ struct device *dev;
+ u32 current_usecase_ndx;
+ u32 reg_bus_hdl;
+ struct sde_power_data_bus_handle data_bus_handle;
+};
+
+/**
+ * sde_power_resource_init() - initializes the sde power handle
+ * @pdev: platform device to search the power resources
+ * @pdata: power handle to store the power resources
+ *
+ * Return: error code.
+ */
+int sde_power_resource_init(struct platform_device *pdev,
+ struct sde_power_handle *pdata);
+
+/**
+ * sde_power_resource_deinit() - release the sde power handle
+ * @pdev: platform device for power resources
+ * @pdata: power handle containing the resources
+ *
+ * Return: error code.
+ */
+void sde_power_resource_deinit(struct platform_device *pdev,
+ struct sde_power_handle *pdata);
+
+/**
+ * sde_power_client_create() - create the client on power handle
+ * @pdata: power handle containing the resources
+ * @client_name: new client name for registration
+ *
+ * Return: error code.
+ */
+struct sde_power_client *sde_power_client_create(struct sde_power_handle *pdata,
+ char *client_name);
+
+/**
+ * sde_power_client_destroy() - destroy the client on power handle
+ * @pdata: power handle containing the resources
+ * @client_name: new client name for registration
+ *
+ * Return: none
+ */
+void sde_power_client_destroy(struct sde_power_handle *phandle,
+ struct sde_power_client *client);
+
+/**
+ * sde_power_resource_enable() - enable/disable the power resources
+ * @pdata: power handle containing the resources
+ * @client: client information to enable/disable its vote
+ * @enable: boolean request for enable/disable
+ *
+ * Return: error code.
+ */
+int sde_power_resource_enable(struct sde_power_handle *pdata,
+ struct sde_power_client *pclient, bool enable);
+
+/**
+ * sde_power_clk_set_rate() - set the clock rate
+ * @pdata: power handle containing the resources
+ * @clock_name: clock name which needs rate update.
+ * @rate: Requested rate.
+ *
+ * Return: error code.
+ */
+int sde_power_clk_set_rate(struct sde_power_handle *pdata, char *clock_name,
+ u64 rate);
+
+/**
+ * sde_power_clk_get_rate() - get the clock rate
+ * @pdata: power handle containing the resources
+ * @clock_name: clock name to get the rate
+ *
+ * Return: current clock rate
+ */
+u64 sde_power_clk_get_rate(struct sde_power_handle *pdata, char *clock_name);
+
+/**
+ * sde_power_clk_get_max_rate() - get the maximum clock rate
+ * @pdata: power handle containing the resources
+ * @clock_name: clock name to get the max rate.
+ *
+ * Return: maximum clock rate or 0 if not found.
+ */
+u64 sde_power_clk_get_max_rate(struct sde_power_handle *pdata,
+ char *clock_name);
+
+/**
+ * sde_power_clk_get_clk() - get the clock
+ * @pdata: power handle containing the resources
+ * @clock_name: clock name to get the clk pointer.
+ *
+ * Return: Pointer to clock
+ */
+struct clk *sde_power_clk_get_clk(struct sde_power_handle *phandle,
+ char *clock_name);
+
+/**
+ * sde_power_data_bus_set_quota() - set data bus quota for power client
+ * @phandle: power handle containing the resources
+ * @client: client information to set quota
+ * @bus_client: real-time or non-real-time bus client
+ * @ab_quota: arbitrated bus bandwidth
+ * @ib_quota: instantaneous bus bandwidth
+ *
+ * Return: zero if success, or error code otherwise
+ */
+int sde_power_data_bus_set_quota(struct sde_power_handle *phandle,
+ struct sde_power_client *pclient,
+ int bus_client, u64 ab_quota, u64 ib_quota);
+
+/**
+ * sde_power_data_bus_bandwidth_ctrl() - control data bus bandwidth enable
+ * @phandle: power handle containing the resources
+ * @client: client information to bandwidth control
+ * @enable: true to enable bandwidth for data base
+ *
+ * Return: none
+ */
+void sde_power_data_bus_bandwidth_ctrl(struct sde_power_handle *phandle,
+ struct sde_power_client *pclient, int enable);
+
+#endif /* _SDE_POWER_HANDLE_H_ */
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 11226472d801..0f582cf35e6b 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2007-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2007-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -592,6 +592,9 @@ static irqreturn_t adreno_irq_handler(struct kgsl_device *device)
unsigned int status = 0, tmp, int_bit;
int i;
+ atomic_inc(&adreno_dev->pending_irq_refcnt);
+ smp_mb__after_atomic();
+
adreno_readreg(adreno_dev, ADRENO_REG_RBBM_INT_0_STATUS, &status);
/*
@@ -630,6 +633,10 @@ static irqreturn_t adreno_irq_handler(struct kgsl_device *device)
adreno_writereg(adreno_dev, ADRENO_REG_RBBM_INT_CLEAR_CMD,
int_bit);
+ smp_mb__before_atomic();
+ atomic_dec(&adreno_dev->pending_irq_refcnt);
+ smp_mb__after_atomic();
+
return ret;
}
@@ -2030,7 +2037,18 @@ inline unsigned int adreno_irq_pending(struct adreno_device *adreno_dev)
adreno_readreg(adreno_dev, ADRENO_REG_RBBM_INT_0_STATUS, &status);
- return (status & gpudev->irq->mask) ? 1 : 0;
+ /*
+ * IRQ handler clears the RBBM INT0 status register immediately
+ * entering the ISR before actually serving the interrupt because
+ * of this we can't rely only on RBBM INT0 status only.
+ * Use pending_irq_refcnt along with RBBM INT0 to correctly
+ * determine whether any IRQ is pending or not.
+ */
+ if ((status & gpudev->irq->mask) ||
+ atomic_read(&adreno_dev->pending_irq_refcnt))
+ return 1;
+ else
+ return 0;
}
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index 2c8345aadc07..f8c9b00d3f39 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -351,6 +351,7 @@ struct adreno_gpu_core {
* @ram_cycles_lo: Number of DDR clock cycles for the monitor session
* @perfctr_pwr_lo: Number of cycles VBIF is stalled by DDR
* @halt: Atomic variable to check whether the GPU is currently halted
+ * @pending_irq_refcnt: Atomic variable to keep track of running IRQ handlers
* @ctx_d_debugfs: Context debugfs node
* @pwrctrl_flag: Flag to hold adreno specific power attributes
* @profile_buffer: Memdesc holding the drawobj profiling buffer
@@ -408,6 +409,7 @@ struct adreno_device {
unsigned int starved_ram_lo;
unsigned int perfctr_pwr_lo;
atomic_t halt;
+ atomic_t pending_irq_refcnt;
struct dentry *ctx_d_debugfs;
unsigned long pwrctrl_flag;
diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c
index 0aab38ccc703..15c4b9427f8e 100644
--- a/drivers/gpu/msm/adreno_a5xx.c
+++ b/drivers/gpu/msm/adreno_a5xx.c
@@ -159,6 +159,7 @@ static const struct {
{ adreno_is_a530, a530_efuse_leakage },
{ adreno_is_a530, a530_efuse_speed_bin },
{ adreno_is_a505, a530_efuse_speed_bin },
+ { adreno_is_a512, a530_efuse_speed_bin },
};
static void a5xx_check_features(struct adreno_device *adreno_dev)
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index d997cdd2cc7e..6c667cb62896 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -809,6 +809,13 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
ptname = MMU_FEATURE(mmu, KGSL_MMU_GLOBAL_PAGETABLE) ?
KGSL_MMU_GLOBAL_PT : tid;
+ /*
+ * Trace needs to be logged before searching the faulting
+ * address in free list as it takes quite long time in
+ * search and delays the trace unnecessarily.
+ */
+ trace_kgsl_mmu_pagefault(ctx->kgsldev, addr,
+ ptname, write ? "write" : "read");
if (test_bit(KGSL_FT_PAGEFAULT_LOG_ONE_PER_PAGE,
&adreno_dev->ft_pf_policy))
@@ -845,8 +852,6 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
}
}
- trace_kgsl_mmu_pagefault(ctx->kgsldev, addr,
- ptname, write ? "write" : "read");
/*
* We do not want the h/w to resume fetching data from an iommu
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index c34599c0594d..9b0cc7baca73 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, 2016 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012, 2016-2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -309,6 +309,9 @@ static int coresight_build_paths(struct coresight_device *csdev,
int i, ret = -EINVAL;
struct coresight_connection *conn;
+ if (!csdev)
+ return ret;
+
list_add(&csdev->path_link, path);
if ((csdev->type == CORESIGHT_DEV_TYPE_SINK ||
diff --git a/drivers/iio/adc/qcom-rradc.c b/drivers/iio/adc/qcom-rradc.c
index b7504fdd380f..57145ea72e90 100644
--- a/drivers/iio/adc/qcom-rradc.c
+++ b/drivers/iio/adc/qcom-rradc.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -165,10 +165,19 @@
#define FAB_ID_GF 0x30
#define FAB_ID_SMIC 0x11
+#define FAB_ID_660_GF 0x0
+#define FAB_ID_660_TSMC 0x2
+#define FAB_ID_660_MX 0x3
#define FG_ADC_RR_CHG_TEMP_GF_OFFSET_UV 1303168
#define FG_ADC_RR_CHG_TEMP_GF_SLOPE_UV_PER_C 3784
#define FG_ADC_RR_CHG_TEMP_SMIC_OFFSET_UV 1338433
#define FG_ADC_RR_CHG_TEMP_SMIC_SLOPE_UV_PER_C 3655
+#define FG_ADC_RR_CHG_TEMP_660_GF_OFFSET_UV 1309001
+#define FG_RR_CHG_TEMP_660_GF_SLOPE_UV_PER_C 3403
+#define FG_ADC_RR_CHG_TEMP_660_SMIC_OFFSET_UV 1295898
+#define FG_RR_CHG_TEMP_660_SMIC_SLOPE_UV_PER_C 3596
+#define FG_ADC_RR_CHG_TEMP_660_MGNA_OFFSET_UV 1314779
+#define FG_RR_CHG_TEMP_660_MGNA_SLOPE_UV_PER_C 3496
#define FG_ADC_RR_CHG_TEMP_OFFSET_MILLI_DEGC 25000
#define FG_ADC_RR_CHG_THRESHOLD_SCALE 4
@@ -388,23 +397,70 @@ static int rradc_post_process_die_temp(struct rradc_chip *chip,
return 0;
}
+static int rradc_get_660_fab_coeff(struct rradc_chip *chip,
+ int64_t *offset, int64_t *slope)
+{
+ switch (chip->pmic_fab_id->fab_id) {
+ case FAB_ID_660_GF:
+ *offset = FG_ADC_RR_CHG_TEMP_660_GF_OFFSET_UV;
+ *slope = FG_RR_CHG_TEMP_660_GF_SLOPE_UV_PER_C;
+ break;
+ case FAB_ID_660_TSMC:
+ *offset = FG_ADC_RR_CHG_TEMP_660_SMIC_OFFSET_UV;
+ *slope = FG_RR_CHG_TEMP_660_SMIC_SLOPE_UV_PER_C;
+ break;
+ default:
+ *offset = FG_ADC_RR_CHG_TEMP_660_MGNA_OFFSET_UV;
+ *slope = FG_RR_CHG_TEMP_660_MGNA_SLOPE_UV_PER_C;
+ }
+
+ return 0;
+}
+
+static int rradc_get_8998_fab_coeff(struct rradc_chip *chip,
+ int64_t *offset, int64_t *slope)
+{
+ switch (chip->pmic_fab_id->fab_id) {
+ case FAB_ID_GF:
+ *offset = FG_ADC_RR_CHG_TEMP_GF_OFFSET_UV;
+ *slope = FG_ADC_RR_CHG_TEMP_GF_SLOPE_UV_PER_C;
+ break;
+ case FAB_ID_SMIC:
+ *offset = FG_ADC_RR_CHG_TEMP_SMIC_OFFSET_UV;
+ *slope = FG_ADC_RR_CHG_TEMP_SMIC_SLOPE_UV_PER_C;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int rradc_post_process_chg_temp_hot(struct rradc_chip *chip,
struct rradc_chan_prop *prop, u16 adc_code,
int *result_millidegc)
{
int64_t uv = 0, offset = 0, slope = 0;
+ int rc = 0;
if (chip->revid_dev_node) {
- switch (chip->pmic_fab_id->fab_id) {
- case FAB_ID_GF:
- offset = FG_ADC_RR_CHG_TEMP_GF_OFFSET_UV;
- slope = FG_ADC_RR_CHG_TEMP_GF_SLOPE_UV_PER_C;
+ switch (chip->pmic_fab_id->pmic_subtype) {
+ case PM660_SUBTYPE:
+ rc = rradc_get_660_fab_coeff(chip, &offset, &slope);
+ if (rc < 0) {
+ pr_err("Unable to get fab id coefficients\n");
+ return -EINVAL;
+ }
break;
- case FAB_ID_SMIC:
- offset = FG_ADC_RR_CHG_TEMP_SMIC_OFFSET_UV;
- slope = FG_ADC_RR_CHG_TEMP_SMIC_SLOPE_UV_PER_C;
+ case PMI8998_SUBTYPE:
+ rc = rradc_get_8998_fab_coeff(chip, &offset, &slope);
+ if (rc < 0) {
+ pr_err("Unable to get fab id coefficients\n");
+ return -EINVAL;
+ }
break;
default:
+ pr_err("No PMIC subtype found\n");
return -EINVAL;
}
} else {
@@ -444,18 +500,26 @@ static int rradc_post_process_chg_temp(struct rradc_chip *chip,
int *result_millidegc)
{
int64_t uv = 0, offset = 0, slope = 0;
+ int rc = 0;
if (chip->revid_dev_node) {
- switch (chip->pmic_fab_id->fab_id) {
- case FAB_ID_GF:
- offset = FG_ADC_RR_CHG_TEMP_GF_OFFSET_UV;
- slope = FG_ADC_RR_CHG_TEMP_GF_SLOPE_UV_PER_C;
+ switch (chip->pmic_fab_id->pmic_subtype) {
+ case PM660_SUBTYPE:
+ rc = rradc_get_660_fab_coeff(chip, &offset, &slope);
+ if (rc < 0) {
+ pr_err("Unable to get fab id coefficients\n");
+ return -EINVAL;
+ }
break;
- case FAB_ID_SMIC:
- offset = FG_ADC_RR_CHG_TEMP_SMIC_OFFSET_UV;
- slope = FG_ADC_RR_CHG_TEMP_SMIC_SLOPE_UV_PER_C;
+ case PMI8998_SUBTYPE:
+ rc = rradc_get_8998_fab_coeff(chip, &offset, &slope);
+ if (rc < 0) {
+ pr_err("Unable to get fab id coefficients\n");
+ return -EINVAL;
+ }
break;
default:
+ pr_err("No PMIC subtype found\n");
return -EINVAL;
}
} else {
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
index dd7d1c836809..1ddf51407884 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
@@ -846,6 +846,7 @@ void msm_isp_increment_frame_id(struct vfe_device *vfe_dev,
enum msm_vfe_dual_hw_type dual_hw_type;
enum msm_vfe_dual_hw_ms_type ms_type;
unsigned long flags;
+ int i;
struct master_slave_resource_info *ms_res =
&vfe_dev->common_data->ms_resource;
@@ -869,8 +870,27 @@ void msm_isp_increment_frame_id(struct vfe_device *vfe_dev,
src_info->dual_hw_ms_info.index)) {
pr_err("Frame out of sync on vfe %d\n",
vfe_dev->pdev->id);
- msm_isp_halt_send_error(vfe_dev,
- ISP_EVENT_BUF_FATAL_ERROR);
+ /*
+ * set this isp as async mode to force
+ *it sync again at the next sof
+ */
+ src_info->dual_hw_ms_info.sync_state =
+ MSM_ISP_DUAL_CAM_ASYNC;
+ /*
+ * set the other isp as async mode to force
+ * it sync again at the next sof
+ */
+ for (i = 0; i < MAX_VFE * VFE_SRC_MAX; i++) {
+ if (ms_res->src_info[i] == NULL)
+ continue;
+ if (src_info == ms_res->src_info[i] ||
+ ms_res->src_info[i]->
+ active == 0)
+ continue;
+ ms_res->src_info[i]->dual_hw_ms_info.
+ sync_state =
+ MSM_ISP_DUAL_CAM_ASYNC;
+ }
}
ms_res->src_sof_mask |= (1 <<
src_info->dual_hw_ms_info.index);
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c b/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c
index e3022b2dbe34..55a743737c59 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/csid/msm_csid.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -58,6 +58,7 @@
#define TRUE 1
#define FALSE 0
+#define MAX_LANE_COUNT 4
#define CSID_TIMEOUT msecs_to_jiffies(100)
#undef CDBG
@@ -284,6 +285,12 @@ static int msm_csid_config(struct csid_device *csid_dev,
csid_params->lane_assign);
CDBG("%s csid_params phy_sel = %d\n", __func__,
csid_params->phy_sel);
+ if ((csid_params->lane_cnt == 0) ||
+ (csid_params->lane_cnt > MAX_LANE_COUNT)) {
+ pr_err("%s:%d invalid lane count = %d\n",
+ __func__, __LINE__, csid_params->lane_cnt);
+ return -EINVAL;
+ }
csid_dev->csid_lane_cnt = csid_params->lane_cnt;
rc = msm_csid_reset(csid_dev);
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.c
index 9048d54bed38..1f92186feeef 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.c
@@ -30,6 +30,7 @@
#include "sde_rotator_base.h"
#include "sde_rotator_util.h"
#include "sde_rotator_trace.h"
+#include "sde_rotator_debug.h"
static inline u64 fudge_factor(u64 val, u32 numer, u32 denom)
{
@@ -237,6 +238,8 @@ static u32 get_ot_limit(u32 reg_off, u32 bit_off,
exit:
SDEROT_DBG("ot_lim=%d\n", ot_lim);
+ SDEROT_EVTLOG(params->width, params->height, params->fmt, params->fps,
+ ot_lim);
return ot_lim;
}
@@ -248,6 +251,7 @@ void sde_mdp_set_ot_limit(struct sde_mdp_set_ot_params *params)
params->reg_off_vbif_lim_conf;
u32 bit_off_vbif_lim_conf = (params->xin_id % 4) * 8;
u32 reg_val;
+ u32 sts;
bool forced_on;
ot_lim = get_ot_limit(
@@ -258,6 +262,16 @@ void sde_mdp_set_ot_limit(struct sde_mdp_set_ot_params *params)
if (ot_lim == 0)
goto exit;
+ if (params->rotsts_base && params->rotsts_busy_mask) {
+ sts = readl_relaxed(params->rotsts_base);
+ if (sts & params->rotsts_busy_mask) {
+ SDEROT_ERR(
+ "Rotator still busy, should not modify VBIF\n");
+ SDEROT_EVTLOG_TOUT_HANDLER(
+ "rot", "vbif_dbg_bus", "panic");
+ }
+ }
+
trace_rot_perf_set_ot(params->num, params->xin_id, ot_lim);
forced_on = force_on_xin_clk(params->bit_off_mdp_clk_ctrl,
@@ -283,6 +297,7 @@ void sde_mdp_set_ot_limit(struct sde_mdp_set_ot_params *params)
force_on_xin_clk(params->bit_off_mdp_clk_ctrl,
params->reg_off_mdp_clk_ctrl, false);
+ SDEROT_EVTLOG(params->num, params->xin_id, ot_lim);
exit:
return;
}
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
index c04e71f459d1..a7c1e890758e 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
@@ -39,6 +39,8 @@ struct sde_mdp_set_ot_params {
u32 reg_off_vbif_lim_conf;
u32 reg_off_mdp_clk_ctrl;
u32 bit_off_mdp_clk_ctrl;
+ char __iomem *rotsts_base;
+ u32 rotsts_busy_mask;
};
enum sde_bus_vote_type {
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
index 29215c1a5910..e9988400b729 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
@@ -1237,14 +1237,16 @@ static int sde_rotator_calc_perf(struct sde_rot_mgr *mgr,
perf->rdot_limit = sde_mdp_get_ot_limit(
config->input.width, config->input.height,
- config->input.format, max_fps, true);
+ config->input.format, config->frame_rate, true);
perf->wrot_limit = sde_mdp_get_ot_limit(
config->input.width, config->input.height,
- config->input.format, max_fps, false);
+ config->input.format, config->frame_rate, false);
SDEROT_DBG("clk:%lu, rdBW:%d, wrBW:%d, rdOT:%d, wrOT:%d\n",
perf->clk_rate, read_bw, write_bw, perf->rdot_limit,
perf->wrot_limit);
+ SDEROT_EVTLOG(perf->clk_rate, read_bw, write_bw, perf->rdot_limit,
+ perf->wrot_limit);
return 0;
}
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r1_pipe.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r1_pipe.c
index 3d84389513f1..5f886d7f1af2 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r1_pipe.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r1_pipe.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012, 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -350,7 +350,7 @@ static int sde_mdp_src_addr_setup(struct sde_mdp_pipe *pipe,
static void sde_mdp_set_ot_limit_pipe(struct sde_mdp_pipe *pipe)
{
- struct sde_mdp_set_ot_params ot_params;
+ struct sde_mdp_set_ot_params ot_params = {0,};
ot_params.xin_id = pipe->xin_id;
ot_params.num = pipe->num;
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r1_wb.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r1_wb.c
index f9dc34167c59..863dfb09ad0f 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r1_wb.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r1_wb.c
@@ -402,7 +402,7 @@ static int sde_mdp_wb_wait4comp(struct sde_mdp_ctl *ctl, void *arg)
static void sde_mdp_set_ot_limit_wb(struct sde_mdp_writeback_ctx *ctx)
{
- struct sde_mdp_set_ot_params ot_params;
+ struct sde_mdp_set_ot_params ot_params = {0,};
ot_params.xin_id = ctx->xin_id;
ot_params.num = ctx->wb_num;
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
index 3bb8399da4bf..d7fb167ab49f 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
@@ -339,6 +339,8 @@ static void sde_hw_rotator_disable_irq(struct sde_hw_rotator *rot)
*/
static void sde_hw_rotator_dump_status(struct sde_hw_rotator *rot)
{
+ struct sde_rot_data_type *mdata = sde_rot_get_mdata();
+
SDEROT_ERR(
"op_mode = %x, int_en = %x, int_status = %x\n",
SDE_ROTREG_READ(rot->mdss_base,
@@ -370,6 +372,10 @@ static void sde_hw_rotator_dump_status(struct sde_hw_rotator *rot)
"UBWC decode status = %x, UBWC encode status = %x\n",
SDE_ROTREG_READ(rot->mdss_base, ROT_SSPP_UBWC_ERROR_STATUS),
SDE_ROTREG_READ(rot->mdss_base, ROT_WB_UBWC_ERROR_STATUS));
+
+ SDEROT_ERR("VBIF XIN HALT status = %x VBIF AXI HALT status = %x\n",
+ SDE_VBIF_READ(mdata, MMSS_VBIF_XIN_HALT_CTRL1),
+ SDE_VBIF_READ(mdata, MMSS_VBIF_AXI_HALT_CTRL1));
}
/**
@@ -1689,7 +1695,8 @@ static int sde_hw_rotator_config(struct sde_rot_hw_resource *hw,
item->input.width, item->input.height,
item->output.width, item->output.height,
entry->src_buf.p[0].addr, entry->dst_buf.p[0].addr,
- item->input.format, item->output.format);
+ item->input.format, item->output.format,
+ entry->perf->config.frame_rate);
if (mdata->default_ot_rd_limit) {
struct sde_mdp_set_ot_params ot_params;
@@ -1708,6 +1715,8 @@ static int sde_hw_rotator_config(struct sde_rot_hw_resource *hw,
ot_params.fmt = ctx->is_traffic_shaping ?
SDE_PIX_FMT_ABGR_8888 :
entry->perf->config.input.format;
+ ot_params.rotsts_base = rot->mdss_base + ROTTOP_STATUS;
+ ot_params.rotsts_busy_mask = ROT_BUSY_BIT;
sde_mdp_set_ot_limit(&ot_params);
}
@@ -1728,6 +1737,8 @@ static int sde_hw_rotator_config(struct sde_rot_hw_resource *hw,
ot_params.fmt = ctx->is_traffic_shaping ?
SDE_PIX_FMT_ABGR_8888 :
entry->perf->config.input.format;
+ ot_params.rotsts_base = rot->mdss_base + ROTTOP_STATUS;
+ ot_params.rotsts_busy_mask = ROT_BUSY_BIT;
sde_mdp_set_ot_limit(&ot_params);
}
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index c5b4872b8e23..50ea4a200dfa 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1087,7 +1087,8 @@ static void handle_event_change(enum hal_command_response cmd, void *data)
rc = msm_comm_g_ctrl_for_id(inst,
V4L2_CID_MPEG_VIDC_VIDEO_CONTINUE_DATA_TRANSFER);
- if (!IS_ERR_VALUE(rc) && rc == true) {
+ if ((!IS_ERR_VALUE(rc) && rc == true) ||
+ is_thumbnail_session(inst)) {
event = V4L2_EVENT_SEQ_CHANGED_SUFFICIENT;
if (msm_comm_get_stream_output_mode(inst) ==
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_dcvs.c b/drivers/media/platform/msm/vidc/msm_vidc_dcvs.c
index 3cd1c38f8f37..65f70d901f2a 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_dcvs.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_dcvs.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -570,7 +570,8 @@ static bool msm_dcvs_check_supported(struct msm_vidc_inst *inst)
inst->dcvs.extra_buffer_count = 0;
if (!IS_VALID_DCVS_SESSION(num_mbs_per_frame,
- res->dcvs_limit[inst->session_type].min_mbpf)) {
+ res->dcvs_limit[inst->session_type].min_mbpf) ||
+ (inst->flags & VIDC_THUMBNAIL)) {
inst->dcvs.extra_buffer_count = 0;
is_dcvs_supported = false;
goto dcvs_decision_done;
diff --git a/drivers/mfd/wcd9xxx-utils.c b/drivers/mfd/wcd9xxx-utils.c
index 909e2f77a43e..7a035cc45a18 100644
--- a/drivers/mfd/wcd9xxx-utils.c
+++ b/drivers/mfd/wcd9xxx-utils.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -669,7 +669,7 @@ int wcd9xxx_reset(struct device *dev)
return -EINVAL;
}
- value = msm_cdc_get_gpio_state(wcd9xxx->wcd_rst_np);
+ value = msm_cdc_pinctrl_get_state(wcd9xxx->wcd_rst_np);
if (value > 0) {
wcd9xxx->avoid_cdc_rstlow = 1;
return 0;
diff --git a/drivers/misc/qcom/qdsp6v2/audio_utils_aio.c b/drivers/misc/qcom/qdsp6v2/audio_utils_aio.c
index c963280e5bf5..b7af80854420 100644
--- a/drivers/misc/qcom/qdsp6v2/audio_utils_aio.c
+++ b/drivers/misc/qcom/qdsp6v2/audio_utils_aio.c
@@ -1,6 +1,6 @@
/* Copyright (C) 2008 Google, Inc.
* Copyright (C) 2008 HTC Corporation
- * Copyright (c) 2009-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2009-2017, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -119,7 +119,10 @@ static int audio_aio_ion_lookup_vaddr(struct q6audio_aio *audio, void *addr,
list_for_each_entry(region_elt, &audio->ion_region_queue, list) {
if (addr >= region_elt->vaddr &&
addr < region_elt->vaddr + region_elt->len &&
- addr + len <= region_elt->vaddr + region_elt->len) {
+ addr + len <= region_elt->vaddr + region_elt->len &&
+ addr + len > addr) {
+ /* to avoid integer addition overflow */
+
/* offset since we could pass vaddr inside a registerd
* ion buffer
*/
diff --git a/drivers/mmc/host/cmdq_hci.c b/drivers/mmc/host/cmdq_hci.c
index d712f29da9f1..a7546471e780 100644
--- a/drivers/mmc/host/cmdq_hci.c
+++ b/drivers/mmc/host/cmdq_hci.c
@@ -345,6 +345,7 @@ static int cmdq_enable(struct mmc_host *mmc)
{
int err = 0;
u32 cqcfg;
+ u32 cqcap = 0;
bool dcmd_enable;
struct cmdq_host *cq_host = mmc_cmdq_private(mmc);
@@ -373,6 +374,18 @@ static int cmdq_enable(struct mmc_host *mmc)
cqcfg = ((cq_host->caps & CMDQ_TASK_DESC_SZ_128 ? CQ_TASK_DESC_SZ : 0) |
(dcmd_enable ? CQ_DCMD : 0));
+ cqcap = cmdq_readl(cq_host, CQCAP);
+ if (cqcap & CQCAP_CS) {
+ /*
+ * In case host controller supports cryptographic operations
+ * then, it uses 128bit task descriptor. Upper 64 bits of task
+ * descriptor would be used to pass crypto specific informaton.
+ */
+ cq_host->caps |= CMDQ_CAP_CRYPTO_SUPPORT |
+ CMDQ_TASK_DESC_SZ_128;
+ cqcfg |= CQ_ICE_ENABLE;
+ }
+
cmdq_writel(cq_host, cqcfg, CQCFG);
/* enable CQ_HOST */
cmdq_writel(cq_host, cmdq_readl(cq_host, CQCFG) | CQ_ENABLE,
@@ -688,6 +701,30 @@ static void cmdq_prep_dcmd_desc(struct mmc_host *mmc,
upper_32_bits(*task_desc));
}
+static inline
+void cmdq_prep_crypto_desc(struct cmdq_host *cq_host, u64 *task_desc,
+ u64 ice_ctx)
+{
+ u64 *ice_desc = NULL;
+
+ if (cq_host->caps & CMDQ_CAP_CRYPTO_SUPPORT) {
+ /*
+ * Get the address of ice context for the given task descriptor.
+ * ice context is present in the upper 64bits of task descriptor
+ * ice_conext_base_address = task_desc + 8-bytes
+ */
+ ice_desc = (__le64 __force *)((u8 *)task_desc +
+ CQ_TASK_DESC_TASK_PARAMS_SIZE);
+ memset(ice_desc, 0, CQ_TASK_DESC_ICE_PARAMS_SIZE);
+
+ /*
+ * Assign upper 64bits data of task descritor with ice context
+ */
+ if (ice_ctx)
+ *ice_desc = cpu_to_le64(ice_ctx);
+ }
+}
+
static void cmdq_pm_qos_vote(struct sdhci_host *host, struct mmc_request *mrq)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -711,6 +748,7 @@ static int cmdq_request(struct mmc_host *mmc, struct mmc_request *mrq)
u32 tag = mrq->cmdq_req->tag;
struct cmdq_host *cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc);
struct sdhci_host *host = mmc_priv(mmc);
+ u64 ice_ctx = 0;
if (!cq_host->enabled) {
pr_err("%s: CMDQ host not enabled yet !!!\n",
@@ -730,7 +768,7 @@ static int cmdq_request(struct mmc_host *mmc, struct mmc_request *mrq)
}
if (cq_host->ops->crypto_cfg) {
- err = cq_host->ops->crypto_cfg(mmc, mrq, tag);
+ err = cq_host->ops->crypto_cfg(mmc, mrq, tag, &ice_ctx);
if (err) {
pr_err("%s: failed to configure crypto: err %d tag %d\n",
mmc_hostname(mmc), err, tag);
@@ -743,6 +781,9 @@ static int cmdq_request(struct mmc_host *mmc, struct mmc_request *mrq)
cmdq_prep_task_desc(mrq, &data, 1,
(mrq->cmdq_req->cmdq_req_flags & QBR));
*task_desc = cpu_to_le64(data);
+
+ cmdq_prep_crypto_desc(cq_host, task_desc, ice_ctx);
+
cmdq_log_task_desc_history(cq_host, *task_desc, false);
err = cmdq_prep_tran_desc(mrq, cq_host, tag);
@@ -787,7 +828,8 @@ static void cmdq_finish_data(struct mmc_host *mmc, unsigned int tag)
CMDQ_SEND_STATUS_TRIGGER, CQ_VENDOR_CFG);
cmdq_runtime_pm_put(cq_host);
- if (cq_host->ops->crypto_cfg_reset)
+ if (!(cq_host->caps & CMDQ_CAP_CRYPTO_SUPPORT) &&
+ cq_host->ops->crypto_cfg_reset)
cq_host->ops->crypto_cfg_reset(mmc, tag);
mrq->done(mrq);
}
diff --git a/drivers/mmc/host/cmdq_hci.h b/drivers/mmc/host/cmdq_hci.h
index e1cbb126f411..05c924ae0935 100644
--- a/drivers/mmc/host/cmdq_hci.h
+++ b/drivers/mmc/host/cmdq_hci.h
@@ -18,11 +18,13 @@
#define CQVER 0x00
/* capabilities */
#define CQCAP 0x04
+#define CQCAP_CS (1 << 28)
/* configuration */
#define CQCFG 0x08
#define CQ_DCMD 0x00001000
#define CQ_TASK_DESC_SZ 0x00000100
#define CQ_ENABLE 0x00000001
+#define CQ_ICE_ENABLE 0x00000002
/* control */
#define CQCTL 0x0C
@@ -144,6 +146,9 @@
#define CQ_VENDOR_CFG 0x100
#define CMDQ_SEND_STATUS_TRIGGER (1 << 31)
+#define CQ_TASK_DESC_TASK_PARAMS_SIZE 8
+#define CQ_TASK_DESC_ICE_PARAMS_SIZE 8
+
struct task_history {
u64 task;
bool is_dcmd;
@@ -161,6 +166,7 @@ struct cmdq_host {
u32 dcmd_slot;
u32 caps;
#define CMDQ_TASK_DESC_SZ_128 0x1
+#define CMDQ_CAP_CRYPTO_SUPPORT 0x2
u32 quirks;
#define CMDQ_QUIRK_SHORT_TXFR_DESC_SZ 0x1
@@ -208,7 +214,7 @@ struct cmdq_host_ops {
void (*enhanced_strobe_mask)(struct mmc_host *mmc, bool set);
int (*reset)(struct mmc_host *mmc);
int (*crypto_cfg)(struct mmc_host *mmc, struct mmc_request *mrq,
- u32 slot);
+ u32 slot, u64 *ice_ctx);
void (*crypto_cfg_reset)(struct mmc_host *mmc, unsigned int slot);
void (*post_cqe_halt)(struct mmc_host *mmc);
};
diff --git a/drivers/mmc/host/sdhci-msm-ice.c b/drivers/mmc/host/sdhci-msm-ice.c
index 2ef459582aae..a6ef06aa6f1d 100644
--- a/drivers/mmc/host/sdhci-msm-ice.c
+++ b/drivers/mmc/host/sdhci-msm-ice.c
@@ -58,6 +58,52 @@ out:
return ice_vops;
}
+static
+void sdhci_msm_enable_ice_hci(struct sdhci_host *host, bool enable)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ u32 config = 0;
+ u32 ice_cap = 0;
+
+ /*
+ * Enable the cryptographic support inside SDHC.
+ * This is a global config which needs to be enabled
+ * all the time.
+ * Only when it it is enabled, the ICE_HCI capability
+ * will get reflected in CQCAP register.
+ */
+ config = readl_relaxed(host->ioaddr + HC_VENDOR_SPECIFIC_FUNC4);
+
+ if (enable)
+ config &= ~DISABLE_CRYPTO;
+ else
+ config |= DISABLE_CRYPTO;
+ writel_relaxed(config, host->ioaddr + HC_VENDOR_SPECIFIC_FUNC4);
+
+ /*
+ * CQCAP register is in different register space from above
+ * ice global enable register. So a mb() is required to ensure
+ * above write gets completed before reading the CQCAP register.
+ */
+ mb();
+
+ /*
+ * Check if ICE HCI capability support is present
+ * If present, enable it.
+ */
+ ice_cap = readl_relaxed(msm_host->cryptoio + ICE_CQ_CAPABILITIES);
+ if (ice_cap & ICE_HCI_SUPPORT) {
+ config = readl_relaxed(msm_host->cryptoio + ICE_CQ_CONFIG);
+
+ if (enable)
+ config |= CRYPTO_GENERAL_ENABLE;
+ else
+ config &= ~CRYPTO_GENERAL_ENABLE;
+ writel_relaxed(config, msm_host->cryptoio + ICE_CQ_CONFIG);
+ }
+}
+
int sdhci_msm_ice_get_dev(struct sdhci_host *host)
{
struct device *sdhc_dev;
@@ -96,6 +142,37 @@ int sdhci_msm_ice_get_dev(struct sdhci_host *host)
return 0;
}
+static
+int sdhci_msm_ice_pltfm_init(struct sdhci_msm_host *msm_host)
+{
+ struct resource *ice_memres = NULL;
+ struct platform_device *pdev = msm_host->pdev;
+ int err = 0;
+
+ if (!msm_host->ice_hci_support)
+ goto out;
+ /*
+ * ICE HCI registers are present in cmdq register space.
+ * So map the cmdq mem for accessing ICE HCI registers.
+ */
+ ice_memres = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "cmdq_mem");
+ if (!ice_memres) {
+ dev_err(&pdev->dev, "Failed to get iomem resource for ice\n");
+ err = -EINVAL;
+ goto out;
+ }
+ msm_host->cryptoio = devm_ioremap(&pdev->dev,
+ ice_memres->start,
+ resource_size(ice_memres));
+ if (!msm_host->cryptoio) {
+ dev_err(&pdev->dev, "Failed to remap registers\n");
+ err = -ENOMEM;
+ }
+out:
+ return err;
+}
+
int sdhci_msm_ice_init(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -103,6 +180,13 @@ int sdhci_msm_ice_init(struct sdhci_host *host)
int err = 0;
if (msm_host->ice.vops->init) {
+ err = sdhci_msm_ice_pltfm_init(msm_host);
+ if (err)
+ goto out;
+
+ if (msm_host->ice_hci_support)
+ sdhci_msm_enable_ice_hci(host, true);
+
err = msm_host->ice.vops->init(msm_host->ice.pdev,
msm_host,
sdhci_msm_ice_error_cb);
@@ -110,6 +194,8 @@ int sdhci_msm_ice_init(struct sdhci_host *host)
pr_err("%s: ice init err %d\n",
mmc_hostname(host->mmc), err);
sdhci_msm_ice_print_regs(host);
+ if (msm_host->ice_hci_support)
+ sdhci_msm_enable_ice_hci(host, false);
goto out;
}
msm_host->ice.state = SDHCI_MSM_ICE_STATE_ACTIVE;
@@ -125,60 +211,47 @@ void sdhci_msm_ice_cfg_reset(struct sdhci_host *host, u32 slot)
host->ioaddr + CORE_VENDOR_SPEC_ICE_CTRL_INFO_3_n + 16 * slot);
}
-int sdhci_msm_ice_cfg(struct sdhci_host *host, struct mmc_request *mrq,
- u32 slot)
+static
+int sdhci_msm_ice_get_cfg(struct sdhci_msm_host *msm_host, struct request *req,
+ unsigned int *bypass, short *key_index)
{
- struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct sdhci_msm_host *msm_host = pltfm_host->priv;
int err = 0;
struct ice_data_setting ice_set;
- sector_t lba = 0;
- unsigned int ctrl_info_val = 0;
- unsigned int bypass = SDHCI_MSM_ICE_ENABLE_BYPASS;
- struct request *req;
- if (msm_host->ice.state != SDHCI_MSM_ICE_STATE_ACTIVE) {
- pr_err("%s: ice is in invalid state %d\n",
- mmc_hostname(host->mmc), msm_host->ice.state);
- return -EINVAL;
- }
-
- BUG_ON(!mrq);
memset(&ice_set, 0, sizeof(struct ice_data_setting));
- req = mrq->req;
- if (req) {
- lba = req->__sector;
- if (msm_host->ice.vops->config_start) {
- err = msm_host->ice.vops->config_start(
- msm_host->ice.pdev,
- req, &ice_set, false);
- if (err) {
- pr_err("%s: ice config failed %d\n",
- mmc_hostname(host->mmc), err);
- return err;
- }
+ if (msm_host->ice.vops->config_start) {
+ err = msm_host->ice.vops->config_start(
+ msm_host->ice.pdev,
+ req, &ice_set, false);
+ if (err) {
+ pr_err("%s: ice config failed %d\n",
+ mmc_hostname(msm_host->mmc), err);
+ return err;
}
- /* if writing data command */
- if (rq_data_dir(req) == WRITE)
- bypass = ice_set.encr_bypass ?
- SDHCI_MSM_ICE_ENABLE_BYPASS :
- SDHCI_MSM_ICE_DISABLE_BYPASS;
- /* if reading data command */
- else if (rq_data_dir(req) == READ)
- bypass = ice_set.decr_bypass ?
- SDHCI_MSM_ICE_ENABLE_BYPASS :
- SDHCI_MSM_ICE_DISABLE_BYPASS;
- pr_debug("%s: %s: slot %d encr_bypass %d bypass %d decr_bypass %d key_index %d\n",
- mmc_hostname(host->mmc),
- (rq_data_dir(req) == WRITE) ? "WRITE" : "READ",
- slot, ice_set.encr_bypass, bypass,
- ice_set.decr_bypass,
- ice_set.crypto_data.key_index);
}
+ /* if writing data command */
+ if (rq_data_dir(req) == WRITE)
+ *bypass = ice_set.encr_bypass ?
+ SDHCI_MSM_ICE_ENABLE_BYPASS :
+ SDHCI_MSM_ICE_DISABLE_BYPASS;
+ /* if reading data command */
+ else if (rq_data_dir(req) == READ)
+ *bypass = ice_set.decr_bypass ?
+ SDHCI_MSM_ICE_ENABLE_BYPASS :
+ SDHCI_MSM_ICE_DISABLE_BYPASS;
+ *key_index = ice_set.crypto_data.key_index;
+ return err;
+}
+
+static
+void sdhci_msm_ice_update_cfg(struct sdhci_host *host, u64 lba,
+ u32 slot, unsigned int bypass, short key_index)
+{
+ unsigned int ctrl_info_val = 0;
/* Configure ICE index */
ctrl_info_val =
- (ice_set.crypto_data.key_index &
+ (key_index &
MASK_SDHCI_MSM_ICE_CTRL_INFO_KEY_INDEX)
<< OFFSET_SDHCI_MSM_ICE_CTRL_INFO_KEY_INDEX;
@@ -199,9 +272,145 @@ int sdhci_msm_ice_cfg(struct sdhci_host *host, struct mmc_request *mrq,
host->ioaddr + CORE_VENDOR_SPEC_ICE_CTRL_INFO_2_n + 16 * slot);
writel_relaxed(ctrl_info_val,
host->ioaddr + CORE_VENDOR_SPEC_ICE_CTRL_INFO_3_n + 16 * slot);
+ /* Ensure ICE registers are configured before issuing SDHCI request */
+ mb();
+}
+static inline
+void sdhci_msm_ice_hci_update_cmdq_cfg(u64 dun, unsigned int bypass,
+ short key_index, u64 *ice_ctx)
+{
+ /*
+ * The naming convention got changed between ICE2.0 and ICE3.0
+ * registers fields. Below is the equivalent names for
+ * ICE3.0 Vs ICE2.0:
+ * Data Unit Number(DUN) == Logical Base address(LBA)
+ * Crypto Configuration index (CCI) == Key Index
+ * Crypto Enable (CE) == !BYPASS
+ */
+ if (ice_ctx)
+ *ice_ctx = DATA_UNIT_NUM(dun) |
+ CRYPTO_CONFIG_INDEX(key_index) |
+ CRYPTO_ENABLE(!bypass);
+}
+
+static
+void sdhci_msm_ice_hci_update_noncq_cfg(struct sdhci_host *host,
+ u64 dun, unsigned int bypass, short key_index)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ unsigned int crypto_params = 0;
+ /*
+ * The naming convention got changed between ICE2.0 and ICE3.0
+ * registers fields. Below is the equivalent names for
+ * ICE3.0 Vs ICE2.0:
+ * Data Unit Number(DUN) == Logical Base address(LBA)
+ * Crypto Configuration index (CCI) == Key Index
+ * Crypto Enable (CE) == !BYPASS
+ */
+ /* Configure ICE bypass mode */
+ crypto_params |=
+ (!bypass & MASK_SDHCI_MSM_ICE_HCI_PARAM_CE)
+ << OFFSET_SDHCI_MSM_ICE_HCI_PARAM_CE;
+ /* Configure Crypto Configure Index (CCI) */
+ crypto_params |= (key_index &
+ MASK_SDHCI_MSM_ICE_HCI_PARAM_CCI)
+ << OFFSET_SDHCI_MSM_ICE_HCI_PARAM_CCI;
+
+ writel_relaxed((crypto_params & 0xFFFFFFFF),
+ msm_host->cryptoio + ICE_NONCQ_CRYPTO_PARAMS);
+
+ /* Update DUN */
+ writel_relaxed((dun & 0xFFFFFFFF),
+ msm_host->cryptoio + ICE_NONCQ_CRYPTO_DUN);
/* Ensure ICE registers are configured before issuing SDHCI request */
mb();
+}
+
+int sdhci_msm_ice_cfg(struct sdhci_host *host, struct mmc_request *mrq,
+ u32 slot)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ int err = 0;
+ short key_index = 0;
+ sector_t lba = 0;
+ unsigned int bypass = SDHCI_MSM_ICE_ENABLE_BYPASS;
+ struct request *req;
+
+ if (msm_host->ice.state != SDHCI_MSM_ICE_STATE_ACTIVE) {
+ pr_err("%s: ice is in invalid state %d\n",
+ mmc_hostname(host->mmc), msm_host->ice.state);
+ return -EINVAL;
+ }
+
+ WARN_ON(!mrq);
+ if (!mrq)
+ return -EINVAL;
+ req = mrq->req;
+ if (req) {
+ lba = req->__sector;
+ err = sdhci_msm_ice_get_cfg(msm_host, req, &bypass, &key_index);
+ if (err)
+ return err;
+ pr_debug("%s: %s: slot %d bypass %d key_index %d\n",
+ mmc_hostname(host->mmc),
+ (rq_data_dir(req) == WRITE) ? "WRITE" : "READ",
+ slot, bypass, key_index);
+ }
+
+ if (msm_host->ice_hci_support) {
+ /* For ICE HCI / ICE3.0 */
+ sdhci_msm_ice_hci_update_noncq_cfg(host, lba, bypass,
+ key_index);
+ } else {
+ /* For ICE versions earlier to ICE3.0 */
+ sdhci_msm_ice_update_cfg(host, lba, slot, bypass, key_index);
+ }
+ return 0;
+}
+
+int sdhci_msm_ice_cmdq_cfg(struct sdhci_host *host,
+ struct mmc_request *mrq, u32 slot, u64 *ice_ctx)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ int err = 0;
+ short key_index;
+ sector_t lba = 0;
+ unsigned int bypass = SDHCI_MSM_ICE_ENABLE_BYPASS;
+ struct request *req;
+
+ if (msm_host->ice.state != SDHCI_MSM_ICE_STATE_ACTIVE) {
+ pr_err("%s: ice is in invalid state %d\n",
+ mmc_hostname(host->mmc), msm_host->ice.state);
+ return -EINVAL;
+ }
+
+ WARN_ON(!mrq);
+ if (!mrq)
+ return -EINVAL;
+ req = mrq->req;
+ if (req) {
+ lba = req->__sector;
+ err = sdhci_msm_ice_get_cfg(msm_host, req, &bypass, &key_index);
+ if (err)
+ return err;
+ pr_debug("%s: %s: slot %d bypass %d key_index %d\n",
+ mmc_hostname(host->mmc),
+ (rq_data_dir(req) == WRITE) ? "WRITE" : "READ",
+ slot, bypass, key_index);
+ }
+
+ if (msm_host->ice_hci_support) {
+ /* For ICE HCI / ICE3.0 */
+ sdhci_msm_ice_hci_update_cmdq_cfg(lba, bypass, key_index,
+ ice_ctx);
+ } else {
+ /* For ICE versions earlier to ICE3.0 */
+ sdhci_msm_ice_update_cfg(host, lba, slot, bypass, key_index);
+ }
return 0;
}
@@ -227,6 +436,10 @@ int sdhci_msm_ice_reset(struct sdhci_host *host)
}
}
+ /* If ICE HCI support is present then re-enable it */
+ if (msm_host->ice_hci_support)
+ sdhci_msm_enable_ice_hci(host, true);
+
if (msm_host->ice.state != SDHCI_MSM_ICE_STATE_ACTIVE) {
pr_err("%s: ice is in invalid state after reset %d\n",
mmc_hostname(host->mmc), msm_host->ice.state);
diff --git a/drivers/mmc/host/sdhci-msm-ice.h b/drivers/mmc/host/sdhci-msm-ice.h
index 1c4266330290..d8d640437522 100644
--- a/drivers/mmc/host/sdhci-msm-ice.h
+++ b/drivers/mmc/host/sdhci-msm-ice.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015, 2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -37,11 +37,27 @@
#define CORE_VENDOR_SPEC_ICE_CTRL_INFO_2_n 0x308
#define CORE_VENDOR_SPEC_ICE_CTRL_INFO_3_n 0x30C
+/* ICE3.0 register which got added cmdq reg space */
+#define ICE_CQ_CAPABILITIES 0x04
+#define ICE_HCI_SUPPORT (1 << 28)
+#define ICE_CQ_CONFIG 0x08
+#define CRYPTO_GENERAL_ENABLE (1 << 1)
+#define ICE_NONCQ_CRYPTO_PARAMS 0x70
+#define ICE_NONCQ_CRYPTO_DUN 0x74
+
+/* ICE3.0 register which got added hc reg space */
+#define HC_VENDOR_SPECIFIC_FUNC4 0x260
+#define DISABLE_CRYPTO (1 << 15)
+#define HC_VENDOR_SPECIFIC_ICE_CTRL 0x800
+#define ICE_SW_RST_EN (1 << 0)
+
/* SDHCI MSM ICE CTRL Info register offset */
enum {
OFFSET_SDHCI_MSM_ICE_CTRL_INFO_BYPASS = 0,
- OFFSET_SDHCI_MSM_ICE_CTRL_INFO_KEY_INDEX = 0x1,
- OFFSET_SDHCI_MSM_ICE_CTRL_INFO_CDU = 0x6,
+ OFFSET_SDHCI_MSM_ICE_CTRL_INFO_KEY_INDEX = 1,
+ OFFSET_SDHCI_MSM_ICE_CTRL_INFO_CDU = 6,
+ OFFSET_SDHCI_MSM_ICE_HCI_PARAM_CCI = 0,
+ OFFSET_SDHCI_MSM_ICE_HCI_PARAM_CE = 8,
};
/* SDHCI MSM ICE CTRL Info register masks */
@@ -49,6 +65,8 @@ enum {
MASK_SDHCI_MSM_ICE_CTRL_INFO_BYPASS = 0x1,
MASK_SDHCI_MSM_ICE_CTRL_INFO_KEY_INDEX = 0x1F,
MASK_SDHCI_MSM_ICE_CTRL_INFO_CDU = 0x7,
+ MASK_SDHCI_MSM_ICE_HCI_PARAM_CE = 0x1,
+ MASK_SDHCI_MSM_ICE_HCI_PARAM_CCI = 0xff
};
/* SDHCI MSM ICE encryption/decryption bypass state */
@@ -76,12 +94,19 @@ enum {
SDHCI_MSM_ICE_STATE_SUSPENDED = 2,
};
+/* crypto context fields in cmdq data command task descriptor */
+#define DATA_UNIT_NUM(x) (((u64)(x) & 0xFFFFFFFF) << 0)
+#define CRYPTO_CONFIG_INDEX(x) (((u64)(x) & 0xFF) << 32)
+#define CRYPTO_ENABLE(x) (((u64)(x) & 0x1) << 47)
+
#ifdef CONFIG_MMC_SDHCI_MSM_ICE
int sdhci_msm_ice_get_dev(struct sdhci_host *host);
int sdhci_msm_ice_init(struct sdhci_host *host);
void sdhci_msm_ice_cfg_reset(struct sdhci_host *host, u32 slot);
int sdhci_msm_ice_cfg(struct sdhci_host *host, struct mmc_request *mrq,
u32 slot);
+int sdhci_msm_ice_cmdq_cfg(struct sdhci_host *host,
+ struct mmc_request *mrq, u32 slot, u64 *ice_ctx);
int sdhci_msm_ice_reset(struct sdhci_host *host);
int sdhci_msm_ice_resume(struct sdhci_host *host);
int sdhci_msm_ice_suspend(struct sdhci_host *host);
@@ -113,6 +138,11 @@ inline int sdhci_msm_ice_cfg(struct sdhci_host *host,
{
return 0;
}
+inline int sdhci_msm_ice_cmdq_cfg(struct sdhci_host *host,
+ struct mmc_request *mrq, u32 slot, u64 *ice_ctx)
+{
+ return 0;
+}
inline int sdhci_msm_ice_reset(struct sdhci_host *host)
{
return 0;
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 466e0a2c8483..f43f22503aa3 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -2906,7 +2906,24 @@ out:
return rc;
}
+static void sdhci_msm_disable_controller_clock(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ if (atomic_read(&msm_host->controller_clock)) {
+ if (!IS_ERR(msm_host->clk))
+ clk_disable_unprepare(msm_host->clk);
+ if (!IS_ERR(msm_host->pclk))
+ clk_disable_unprepare(msm_host->pclk);
+ if (!IS_ERR(msm_host->ice_clk))
+ clk_disable_unprepare(msm_host->ice_clk);
+ sdhci_msm_bus_voting(host, 0);
+ atomic_set(&msm_host->controller_clock, 0);
+ pr_debug("%s: %s: disabled controller clock\n",
+ mmc_hostname(host->mmc), __func__);
+ }
+}
static int sdhci_msm_prepare_clocks(struct sdhci_host *host, bool enable)
{
@@ -3362,8 +3379,14 @@ void sdhci_msm_reset(struct sdhci_host *host, u8 mask)
struct sdhci_msm_host *msm_host = pltfm_host->priv;
/* Set ICE core to be reset in sync with SDHC core */
- if (msm_host->ice.pdev)
- writel_relaxed(1, host->ioaddr + CORE_VENDOR_SPEC_ICE_CTRL);
+ if (msm_host->ice.pdev) {
+ if (msm_host->ice_hci_support)
+ writel_relaxed(1, host->ioaddr +
+ HC_VENDOR_SPECIFIC_ICE_CTRL);
+ else
+ writel_relaxed(1,
+ host->ioaddr + CORE_VENDOR_SPEC_ICE_CTRL);
+ }
sdhci_reset(host, mask);
}
@@ -3974,6 +3997,7 @@ static unsigned int sdhci_msm_get_current_limit(struct sdhci_host *host)
static struct sdhci_ops sdhci_msm_ops = {
.crypto_engine_cfg = sdhci_msm_ice_cfg,
+ .crypto_engine_cmdq_cfg = sdhci_msm_ice_cmdq_cfg,
.crypto_cfg_reset = sdhci_msm_ice_cfg_reset,
.crypto_engine_reset = sdhci_msm_ice_reset,
.set_uhs_signaling = sdhci_msm_set_uhs_signaling,
@@ -4099,6 +4123,9 @@ static void sdhci_set_default_hw_caps(struct sdhci_msm_host *msm_host,
msm_host_offset->CORE_VENDOR_SPEC_CAPABILITIES0);
/* keep track of the value in SDHCI_CAPABILITIES */
msm_host->caps_0 = caps;
+
+ if ((major == 1) && (minor >= 0x6b))
+ msm_host->ice_hci_support = true;
}
#ifdef CONFIG_MMC_CQ_HCI
@@ -4848,7 +4875,7 @@ static int sdhci_msm_suspend(struct device *dev)
}
ret = sdhci_msm_runtime_suspend(dev);
out:
-
+ sdhci_msm_disable_controller_clock(host);
if (host->mmc->card && mmc_card_sdio(host->mmc->card)) {
sdio_cfg = sdhci_msm_cfg_sdio_wakeup(host, true);
if (sdio_cfg)
diff --git a/drivers/mmc/host/sdhci-msm.h b/drivers/mmc/host/sdhci-msm.h
index 6f96ea97bddc..c26636198a22 100644
--- a/drivers/mmc/host/sdhci-msm.h
+++ b/drivers/mmc/host/sdhci-msm.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -174,6 +174,8 @@ struct sdhci_msm_ice_data {
struct sdhci_msm_host {
struct platform_device *pdev;
void __iomem *core_mem; /* MSM SDCC mapped address */
+ void __iomem *cryptoio; /* ICE HCI mapped address */
+ bool ice_hci_support;
int pwr_irq; /* power irq */
struct clk *clk; /* main SD/MMC bus clock */
struct clk *pclk; /* SDHC peripheral bus clock */
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 3fd564388720..43853306a6bb 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -3632,14 +3632,35 @@ static void sdhci_cmdq_clear_set_dumpregs(struct mmc_host *mmc, bool set)
host->ops->clear_set_dumpregs(host, set);
}
static int sdhci_cmdq_crypto_cfg(struct mmc_host *mmc,
- struct mmc_request *mrq, u32 slot)
+ struct mmc_request *mrq, u32 slot, u64 *ice_ctx)
{
struct sdhci_host *host = mmc_priv(mmc);
+ int err = 0;
if (!host->is_crypto_en)
return 0;
- return sdhci_crypto_cfg(host, mrq, slot);
+ if (host->crypto_reset_reqd && host->ops->crypto_engine_reset) {
+ err = host->ops->crypto_engine_reset(host);
+ if (err) {
+ pr_err("%s: crypto reset failed\n",
+ mmc_hostname(host->mmc));
+ goto out;
+ }
+ host->crypto_reset_reqd = false;
+ }
+
+ if (host->ops->crypto_engine_cmdq_cfg) {
+ err = host->ops->crypto_engine_cmdq_cfg(host, mrq,
+ slot, ice_ctx);
+ if (err) {
+ pr_err("%s: failed to configure crypto\n",
+ mmc_hostname(host->mmc));
+ goto out;
+ }
+ }
+out:
+ return err;
}
static void sdhci_cmdq_crypto_cfg_reset(struct mmc_host *mmc, unsigned int slot)
@@ -3702,7 +3723,7 @@ static void sdhci_cmdq_clear_set_dumpregs(struct mmc_host *mmc, bool set)
}
static int sdhci_cmdq_crypto_cfg(struct mmc_host *mmc,
- struct mmc_request *mrq, u32 slot)
+ struct mmc_request *mrq, u32 slot, u64 *ice_ctx)
{
return 0;
}
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index e5419b42a444..c4bbdd80f29c 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -657,6 +657,8 @@ struct sdhci_ops {
int (*platform_execute_tuning)(struct sdhci_host *host, u32 opcode);
int (*crypto_engine_cfg)(struct sdhci_host *host,
struct mmc_request *mrq, u32 slot);
+ int (*crypto_engine_cmdq_cfg)(struct sdhci_host *host,
+ struct mmc_request *mrq, u32 slot, u64 *ice_ctx);
int (*crypto_engine_reset)(struct sdhci_host *host);
void (*crypto_cfg_reset)(struct sdhci_host *host, unsigned int slot);
void (*set_uhs_signaling)(struct sdhci_host *host, unsigned int uhs);
diff --git a/drivers/net/ethernet/msm/ecm_ipa.c b/drivers/net/ethernet/msm/ecm_ipa.c
index c7c4da22139d..0f25932d8855 100644
--- a/drivers/net/ethernet/msm/ecm_ipa.c
+++ b/drivers/net/ethernet/msm/ecm_ipa.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -9,7 +9,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
-
#include <linux/debugfs.h>
#include <linux/errno.h>
#include <linux/etherdevice.h>
@@ -106,10 +105,6 @@ enum ecm_ipa_operation {
* struct ecm_ipa_dev - main driver context parameters
* @net: network interface struct implemented by this driver
* @directory: debugfs directory for various debuging switches
- * @tx_enable: flag that enable/disable Tx path to continue to IPA
- * @rx_enable: flag that enable/disable Rx path to continue to IPA
- * @rm_enable: flag that enable/disable Resource manager request prior to Tx
- * @dma_enable: flag that allow on-the-fly DMA mode for IPA
* @eth_ipv4_hdr_hdl: saved handle for ipv4 header-insertion table
* @eth_ipv6_hdr_hdl: saved handle for ipv6 header-insertion table
* @usb_to_ipa_hdl: save handle for IPA pipe operations
@@ -129,10 +124,6 @@ enum ecm_ipa_operation {
*/
struct ecm_ipa_dev {
struct net_device *net;
- u32 tx_enable;
- u32 rx_enable;
- u32 rm_enable;
- bool dma_enable;
struct dentry *directory;
uint32_t eth_ipv4_hdr_hdl;
uint32_t eth_ipv6_hdr_hdl;
@@ -167,32 +158,16 @@ static void ecm_ipa_rm_notify(void *user_data, enum ipa_rm_event event,
static struct net_device_stats *ecm_ipa_get_stats(struct net_device *net);
static int ecm_ipa_create_rm_resource(struct ecm_ipa_dev *ecm_ipa_ctx);
static void ecm_ipa_destory_rm_resource(struct ecm_ipa_dev *ecm_ipa_ctx);
-static bool rx_filter(struct sk_buff *skb);
-static bool tx_filter(struct sk_buff *skb);
-static bool rm_enabled(struct ecm_ipa_dev *ecm_ipa_ctx);
static int resource_request(struct ecm_ipa_dev *ecm_ipa_ctx);
static void resource_release(struct ecm_ipa_dev *ecm_ipa_ctx);
static netdev_tx_t ecm_ipa_start_xmit(struct sk_buff *skb,
struct net_device *net);
-static int ecm_ipa_debugfs_stall_open(struct inode *inode,
- struct file *file);
-static ssize_t ecm_ipa_debugfs_stall_write(struct file *file,
- const char __user *buf, size_t count, loff_t *ppos);
static int ecm_ipa_debugfs_atomic_open(struct inode *inode, struct file *file);
-static ssize_t ecm_ipa_debugfs_enable_write_dma(struct file *file,
- const char __user *buf, size_t count, loff_t *ppos);
-static int ecm_ipa_debugfs_dma_open(struct inode *inode, struct file *file);
-static ssize_t ecm_ipa_debugfs_enable_write(struct file *file,
- const char __user *buf, size_t count, loff_t *ppos);
-static ssize_t ecm_ipa_debugfs_enable_read(struct file *file,
- char __user *ubuf, size_t count, loff_t *ppos);
static ssize_t ecm_ipa_debugfs_atomic_read(struct file *file,
char __user *ubuf, size_t count, loff_t *ppos);
static int ecm_ipa_debugfs_init(struct ecm_ipa_dev *ecm_ipa_ctx);
static void ecm_ipa_debugfs_destroy(struct ecm_ipa_dev *ecm_ipa_ctx);
static int ecm_ipa_ep_registers_cfg(u32 usb_to_ipa_hdl, u32 ipa_to_usb_hdl);
-static int ecm_ipa_ep_registers_dma_cfg(u32 usb_to_ipa_hdl,
- enum ipa_client_type prod_client);
static int ecm_ipa_set_device_ethernet_addr(u8 *dev_ethaddr,
u8 device_ethaddr[]);
static enum ecm_ipa_state ecm_ipa_next_state(enum ecm_ipa_state current_state,
@@ -210,22 +185,11 @@ static const struct net_device_ops ecm_ipa_netdev_ops = {
.ndo_get_stats = ecm_ipa_get_stats,
};
-const struct file_operations ecm_ipa_debugfs_dma_ops = {
- .open = ecm_ipa_debugfs_dma_open,
- .read = ecm_ipa_debugfs_enable_read,
- .write = ecm_ipa_debugfs_enable_write_dma,
-};
-
const struct file_operations ecm_ipa_debugfs_atomic_ops = {
.open = ecm_ipa_debugfs_atomic_open,
.read = ecm_ipa_debugfs_atomic_read,
};
-const struct file_operations ecm_ipa_debugfs_stall_ops = {
- .open = ecm_ipa_debugfs_stall_open,
- .write = ecm_ipa_debugfs_stall_write,
-};
-
static void ecm_ipa_msg_free_cb(void *buff, u32 len, u32 type)
{
kfree(buff);
@@ -286,9 +250,6 @@ int ecm_ipa_init(struct ecm_ipa_params *params)
ECM_IPA_DEBUG("ecm_ipa_ctx (private) = %p\n", ecm_ipa_ctx);
ecm_ipa_ctx->net = net;
- ecm_ipa_ctx->tx_enable = true;
- ecm_ipa_ctx->rx_enable = true;
- ecm_ipa_ctx->rm_enable = true;
ecm_ipa_ctx->outstanding_high = DEFAULT_OUTSTANDING_HIGH;
ecm_ipa_ctx->outstanding_low = DEFAULT_OUTSTANDING_LOW;
atomic_set(&ecm_ipa_ctx->outstanding_pkts, 0);
@@ -604,12 +565,6 @@ static netdev_tx_t ecm_ipa_start_xmit(struct sk_buff *skb,
return NETDEV_TX_BUSY;
}
- if (unlikely(tx_filter(skb))) {
- dev_kfree_skb_any(skb);
- ECM_IPA_DEBUG("packet got filtered out on Tx path\n");
- status = NETDEV_TX_OK;
- goto out;
- }
ret = resource_request(ecm_ipa_ctx);
if (ret) {
ECM_IPA_DEBUG("Waiting to resource\n");
@@ -683,11 +638,6 @@ static void ecm_ipa_packet_receive_notify(void *priv,
skb->dev = ecm_ipa_ctx->net;
skb->protocol = eth_type_trans(skb, ecm_ipa_ctx->net);
- if (rx_filter(skb)) {
- ECM_IPA_DEBUG("packet got filtered out on Rx path\n");
- dev_kfree_skb_any(skb);
- return;
- }
result = netif_rx(skb);
if (result)
@@ -1150,42 +1100,15 @@ static void ecm_ipa_destory_rm_resource(struct ecm_ipa_dev *ecm_ipa_ctx)
ECM_IPA_LOG_EXIT();
}
-static bool rx_filter(struct sk_buff *skb)
-{
- struct ecm_ipa_dev *ecm_ipa_ctx = netdev_priv(skb->dev);
- return !ecm_ipa_ctx->rx_enable;
-}
-
-static bool tx_filter(struct sk_buff *skb)
-{
- struct ecm_ipa_dev *ecm_ipa_ctx = netdev_priv(skb->dev);
- return !ecm_ipa_ctx->tx_enable;
-}
-
-static bool rm_enabled(struct ecm_ipa_dev *ecm_ipa_ctx)
-{
- return ecm_ipa_ctx->rm_enable;
-}
-
static int resource_request(struct ecm_ipa_dev *ecm_ipa_ctx)
{
- int result = 0;
-
- if (!rm_enabled(ecm_ipa_ctx))
- goto out;
- result = ipa_rm_inactivity_timer_request_resource(
- IPA_RM_RESOURCE_STD_ECM_PROD);
-out:
- return result;
+ return ipa_rm_inactivity_timer_request_resource(
+ IPA_RM_RESOURCE_STD_ECM_PROD);
}
static void resource_release(struct ecm_ipa_dev *ecm_ipa_ctx)
{
- if (!rm_enabled(ecm_ipa_ctx))
- goto out;
ipa_rm_inactivity_timer_release_resource(IPA_RM_RESOURCE_STD_ECM_PROD);
-out:
- return;
}
/**
@@ -1258,45 +1181,6 @@ static void ecm_ipa_tx_timeout(struct net_device *net)
net->stats.tx_errors++;
}
-static int ecm_ipa_debugfs_stall_open(struct inode *inode,
- struct file *file)
-{
- ECM_IPA_LOG_ENTRY();
-
- ECM_IPA_LOG_EXIT();
-
- return 0;
-}
-
-static ssize_t ecm_ipa_debugfs_stall_write(struct file *file,
- const char __user *buf, size_t count, loff_t *ppos)
-{
- u32 cmdq_cfg_mmio_phy = 0xFD4E3038;
- void *cmdq_cfg_mmio_virt;
- int result;
- bool val = 0;
-
- ECM_IPA_LOG_ENTRY();
-
- file->private_data = &val;
- result = ecm_ipa_debugfs_enable_write(file, buf, count, ppos);
-
- cmdq_cfg_mmio_virt = ioremap(cmdq_cfg_mmio_phy, sizeof(u32));
- if (!cmdq_cfg_mmio_virt) {
- ECM_IPA_ERROR("fail on mmio for cmdq_cfg_mmio_phy=0x%x",
- cmdq_cfg_mmio_phy);
- return result;
- }
-
- iowrite32(val, cmdq_cfg_mmio_virt);
- ECM_IPA_DEBUG("Value %d was written to cfgq", val);
-
- ECM_IPA_LOG_EXIT();
-
- return result;
-
-}
-
static int ecm_ipa_debugfs_atomic_open(struct inode *inode, struct file *file)
{
struct ecm_ipa_dev *ecm_ipa_ctx = inode->i_private;
@@ -1306,78 +1190,6 @@ static int ecm_ipa_debugfs_atomic_open(struct inode *inode, struct file *file)
return 0;
}
-static ssize_t ecm_ipa_debugfs_enable_write_dma(struct file *file,
- const char __user *buf, size_t count, loff_t *ppos)
-{
- struct ecm_ipa_dev *ecm_ipa_ctx = file->private_data;
- int result;
- ECM_IPA_LOG_ENTRY();
- file->private_data = &ecm_ipa_ctx->dma_enable;
- result = ecm_ipa_debugfs_enable_write(file, buf, count, ppos);
- if (ecm_ipa_ctx->dma_enable)
- ecm_ipa_ep_registers_dma_cfg(ecm_ipa_ctx->usb_to_ipa_hdl,
- ecm_ipa_ctx->ipa_to_usb_client);
- else
- ecm_ipa_ep_registers_cfg(ecm_ipa_ctx->usb_to_ipa_hdl,
- ecm_ipa_ctx->usb_to_ipa_hdl);
- ECM_IPA_LOG_EXIT();
- return result;
-}
-
-static int ecm_ipa_debugfs_dma_open(struct inode *inode, struct file *file)
-{
- struct ecm_ipa_dev *ecm_ipa_ctx = inode->i_private;
- ECM_IPA_LOG_ENTRY();
- file->private_data = ecm_ipa_ctx;
- ECM_IPA_LOG_EXIT();
- return 0;
-}
-
-static ssize_t ecm_ipa_debugfs_enable_write(struct file *file,
- const char __user *buf, size_t count, loff_t *ppos)
-{
- unsigned long missing;
- char input;
- bool *enable = file->private_data;
- if (count != sizeof(input) + 1) {
- ECM_IPA_ERROR("wrong input length(%zd)\n", count);
- return -EINVAL;
- }
- if (!buf) {
- ECM_IPA_ERROR("Bad argument\n");
- return -EINVAL;
- }
- missing = copy_from_user(&input, buf, 1);
- if (missing)
- return -EFAULT;
- ECM_IPA_DEBUG("input received %c\n", input);
- *enable = input - '0';
- ECM_IPA_DEBUG("value was set to %d\n", *enable);
- return count;
-}
-
-static ssize_t ecm_ipa_debugfs_enable_read(struct file *file,
- char __user *ubuf, size_t count, loff_t *ppos)
-{
- int nbytes;
- int size = 0;
- int ret;
- loff_t pos;
- u8 enable_str[sizeof(char)*3] = {0};
- bool *enable = file->private_data;
- pos = *ppos;
- nbytes = scnprintf(enable_str, sizeof(enable_str), "%d\n", *enable);
- ret = simple_read_from_buffer(ubuf, count, ppos, enable_str, nbytes);
- if (ret < 0) {
- ECM_IPA_ERROR("simple_read_from_buffer problem\n");
- return ret;
- }
- size += ret;
- count -= nbytes;
- *ppos = pos + size;
- return size;
-}
-
static ssize_t ecm_ipa_debugfs_atomic_read(struct file *file,
char __user *ubuf, size_t count, loff_t *ppos)
{
@@ -1394,7 +1206,6 @@ static int ecm_ipa_debugfs_init(struct ecm_ipa_dev *ecm_ipa_ctx)
{
const mode_t flags_read_write = S_IRUGO | S_IWUGO;
const mode_t flags_read_only = S_IRUGO;
- const mode_t flags_write_only = S_IWUGO;
struct dentry *file;
ECM_IPA_LOG_ENTRY();
@@ -1407,24 +1218,6 @@ static int ecm_ipa_debugfs_init(struct ecm_ipa_dev *ecm_ipa_ctx)
ECM_IPA_ERROR("could not create debugfs directory entry\n");
goto fail_directory;
}
- file = debugfs_create_bool("tx_enable", flags_read_write,
- ecm_ipa_ctx->directory, &ecm_ipa_ctx->tx_enable);
- if (!file) {
- ECM_IPA_ERROR("could not create debugfs tx file\n");
- goto fail_file;
- }
- file = debugfs_create_bool("rx_enable", flags_read_write,
- ecm_ipa_ctx->directory, &ecm_ipa_ctx->rx_enable);
- if (!file) {
- ECM_IPA_ERROR("could not create debugfs rx file\n");
- goto fail_file;
- }
- file = debugfs_create_bool("rm_enable", flags_read_write,
- ecm_ipa_ctx->directory, &ecm_ipa_ctx->rm_enable);
- if (!file) {
- ECM_IPA_ERROR("could not create debugfs rm file\n");
- goto fail_file;
- }
file = debugfs_create_u8("outstanding_high", flags_read_write,
ecm_ipa_ctx->directory, &ecm_ipa_ctx->outstanding_high);
if (!file) {
@@ -1437,13 +1230,6 @@ static int ecm_ipa_debugfs_init(struct ecm_ipa_dev *ecm_ipa_ctx)
ECM_IPA_ERROR("could not create outstanding_low file\n");
goto fail_file;
}
- file = debugfs_create_file("dma_enable", flags_read_write,
- ecm_ipa_ctx->directory,
- ecm_ipa_ctx, &ecm_ipa_debugfs_dma_ops);
- if (!file) {
- ECM_IPA_ERROR("could not create debugfs dma file\n");
- goto fail_file;
- }
file = debugfs_create_file("outstanding", flags_read_only,
ecm_ipa_ctx->directory,
ecm_ipa_ctx, &ecm_ipa_debugfs_atomic_ops);
@@ -1452,14 +1238,7 @@ static int ecm_ipa_debugfs_init(struct ecm_ipa_dev *ecm_ipa_ctx)
goto fail_file;
}
- file = debugfs_create_file("stall_ipa_rx_proc", flags_write_only,
- ecm_ipa_ctx->directory,
- ecm_ipa_ctx, &ecm_ipa_debugfs_stall_ops);
- if (!file) {
- ECM_IPA_ERROR("could not create stall_ipa_rx_proc file\n");
- goto fail_file;
- }
-
+ ECM_IPA_DEBUG("debugfs entries were created\n");
ECM_IPA_LOG_EXIT();
return 0;
@@ -1523,46 +1302,6 @@ out:
}
/**
- * ecm_ipa_ep_registers_dma_cfg() - configure the USB endpoints for ECM
- * DMA
- * @usb_to_ipa_hdl: handle received from ipa_connect
- *
- * This function will override the previous configuration
- * which is needed for cores that does not support blocks logic
- * Note that client handles are the actual pipe index
- */
-static int ecm_ipa_ep_registers_dma_cfg(u32 usb_to_ipa_hdl,
- enum ipa_client_type prod_client)
-{
- int result = 0;
- struct ipa_ep_cfg_mode cfg_mode;
- u32 apps_to_ipa_hdl = 2;
-
- ECM_IPA_LOG_ENTRY();
-
- memset(&cfg_mode, 0 , sizeof(cfg_mode));
- cfg_mode.mode = IPA_DMA;
- cfg_mode.dst = prod_client;
- result = ipa_cfg_ep_mode(apps_to_ipa_hdl, &cfg_mode);
- if (result) {
- ECM_IPA_ERROR("failed to configure Apps to IPA\n");
- goto out;
- }
- memset(&cfg_mode, 0 , sizeof(cfg_mode));
- cfg_mode.mode = IPA_DMA;
- cfg_mode.dst = IPA_CLIENT_A5_LAN_WAN_CONS;
- result = ipa_cfg_ep_mode(usb_to_ipa_hdl, &cfg_mode);
- if (result) {
- ECM_IPA_ERROR("failed to configure USB to IPA\n");
- goto out;
- }
- ECM_IPA_DEBUG("end-point registers successfully configured\n");
-out:
- ECM_IPA_LOG_EXIT();
- return result;
-}
-
-/**
* ecm_ipa_set_device_ethernet_addr() - set device etherenet address
* @dev_ethaddr: device etherenet address
*
diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig
index db1ca629cbd6..f0c831b4b3d9 100644
--- a/drivers/net/wireless/ath/ath10k/Kconfig
+++ b/drivers/net/wireless/ath/ath10k/Kconfig
@@ -21,6 +21,26 @@ config ATH10K_AHB
---help---
This module adds support for AHB bus
+config ATH10K_TARGET_SNOC
+ tristate "Atheros ath10k SNOC support"
+ depends on ATH10K
+ ---help---
+ This module adds support for the Integrated WCN3990 WLAN module,
+ WCN3990 has integrated 802.11ac chipset with SNOC bus interface.
+ This module also adds support to register the WCN3990 wlan module
+ with MAC80211 network subsystem.
+
+config ATH10K_SNOC
+ bool "Enable/disable Atheros ath10k SNOC bus interface support"
+ depends on ATH10K
+ depends on ATH10K_TARGET_SNOC
+ ---help---
+ This module add support for WLAN SNOC bus registration, WLAN
+ copy engine configuration for the WCN3990 chipset, WLAN hardware
+ shadow register configuration, create host to target communication
+ interface to interact with WLAN firmware, WLAN module interface
+ control and data receive(RX)/transmit(TX) control.
+
config ATH10K_DEBUG
bool "Atheros ath10k debugging"
depends on ATH10K
diff --git a/drivers/net/wireless/ath/ath10k/Makefile b/drivers/net/wireless/ath/ath10k/Makefile
index 930fadd940d8..25b23bf2c8e6 100644
--- a/drivers/net/wireless/ath/ath10k/Makefile
+++ b/drivers/net/wireless/ath/ath10k/Makefile
@@ -24,6 +24,9 @@ ath10k_core-$(CONFIG_PM) += wow.o
obj-$(CONFIG_ATH10K_PCI) += ath10k_pci.o
ath10k_pci-y += pci.o \
ce.o
+obj-$(CONFIG_ATH10K_TARGET_SNOC) += ath10k_snoc.o
+ath10k_snoc-y += snoc.o \
+ ce.o
ath10k_pci-$(CONFIG_ATH10K_AHB) += ahb.o
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index a0dd6bd8165c..e5213de8a686 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -298,10 +298,22 @@ int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
if (flags & CE_SEND_FLAG_GATHER)
desc_flags |= CE_DESC_FLAGS_GATHER;
+
if (flags & CE_SEND_FLAG_BYTE_SWAP)
desc_flags |= CE_DESC_FLAGS_BYTE_SWAP;
- sdesc.addr = __cpu_to_le32(buffer);
+ if (QCA_REV_WCN3990(ar)) {
+ flags |= upper_32_bits(buffer) & CE_DESC_FLAGS_GET_MASK;
+ sdesc.addr_lo = __cpu_to_le32(buffer);
+ sdesc.addr_hi = flags;
+ if (flags & CE_SEND_FLAG_GATHER)
+ sdesc.addr_hi |= CE_WCN3990_DESC_FLAGS_GATHER;
+ else
+ sdesc.addr_hi &= ~CE_WCN3990_DESC_FLAGS_GATHER;
+ } else {
+ sdesc.addr = __cpu_to_le32(buffer);
+ }
+
sdesc.nbytes = __cpu_to_le16(nbytes);
sdesc.flags = __cpu_to_le16(desc_flags);
@@ -410,7 +422,13 @@ int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx,
CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0)
return -ENOSPC;
- desc->addr = __cpu_to_le32(paddr);
+ if (QCA_REV_WCN3990(ar)) {
+ desc->addr = paddr;
+ desc->addr &= CE_DESC_37BIT_ADDR_MASK;
+ } else {
+ desc->addr = __cpu_to_le32(paddr);
+ }
+
desc->nbytes = 0;
dest_ring->per_transfer_context[write_index] = ctx;
@@ -735,8 +753,12 @@ void ath10k_ce_per_engine_service_any(struct ath10k *ar)
{
int ce_id;
u32 intr_summary;
+ struct ath10k_ce_pipe *ce_state;
- intr_summary = CE_INTERRUPT_SUMMARY(ar);
+ if (ar->target_version == ATH10K_HW_WCN3990)
+ intr_summary = 0xFFF;
+ else
+ intr_summary = CE_INTERRUPT_SUMMARY(ar);
for (ce_id = 0; intr_summary && (ce_id < CE_COUNT); ce_id++) {
if (intr_summary & (1 << ce_id))
@@ -745,8 +767,11 @@ void ath10k_ce_per_engine_service_any(struct ath10k *ar)
/* no intr pending on this CE */
continue;
- ath10k_ce_per_engine_service(ar, ce_id);
+ ce_state = ((struct ath10k_ce_pipe *)ar->ce_states + ce_id);
+ if (ce_state->send_cb || ce_state->recv_cb)
+ ath10k_ce_per_engine_service(ar, ce_id);
}
+
}
/*
@@ -798,6 +823,26 @@ void ath10k_ce_enable_interrupts(struct ath10k *ar)
((struct ath10k_ce_pipe *)ar->ce_states + ce_id));
}
+void ath10k_ce_enable_per_ce_interrupts(struct ath10k *ar, unsigned int ce_id)
+{
+ u32 offset;
+ u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
+
+ offset = HOST_IE_ADDRESS + ctrl_addr;
+ ar->bus_write32(ar, offset, 1);
+ ar->bus_read32(ar, offset);
+}
+
+void ath10k_ce_disable_per_ce_interrupts(struct ath10k *ar, unsigned int ce_id)
+{
+ u32 offset;
+ u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
+
+ offset = HOST_IE_ADDRESS + ctrl_addr;
+ ar->bus_write32(ar, offset, 0);
+ ar->bus_read32(ar, offset);
+}
+
static int ath10k_ce_init_src_ring(struct ath10k *ar,
unsigned int ce_id,
const struct ce_attr *attr)
diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h
index 3c6dba648574..1b49db14d387 100644
--- a/drivers/net/wireless/ath/ath10k/ce.h
+++ b/drivers/net/wireless/ath/ath10k/ce.h
@@ -38,6 +38,10 @@ struct ath10k_ce_pipe;
#define CE_DESC_FLAGS_GATHER (1 << 0)
#define CE_DESC_FLAGS_BYTE_SWAP (1 << 1)
+#define CE_WCN3990_DESC_FLAGS_GATHER BIT(31)
+
+#define CE_DESC_FLAGS_GET_MASK 0x1F
+#define CE_DESC_37BIT_ADDR_MASK 0x1FFFFFFFFF
/* Following desc flags are used in QCA99X0 */
#define CE_DESC_FLAGS_HOST_INT_DIS (1 << 2)
@@ -54,7 +58,13 @@ struct ce_desc {
};
#else
struct ce_desc {
- __le64 addr;
+ union {
+ __le64 addr;
+ struct {
+ __le32 addr_lo;
+ __le32 addr_hi;
+ };
+ };
u16 nbytes; /* length in register map */
u16 flags; /* fw_metadata_high */
u32 toeplitz_hash_result;
@@ -239,6 +249,8 @@ void ath10k_ce_per_engine_service_any(struct ath10k *ar);
void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id);
int ath10k_ce_disable_interrupts(struct ath10k *ar);
void ath10k_ce_enable_interrupts(struct ath10k *ar);
+void ath10k_ce_disable_per_ce_interrupts(struct ath10k *ar, unsigned int ce_id);
+void ath10k_ce_enable_per_ce_interrupts(struct ath10k *ar, unsigned int ce_id);
/* ce_attr.flags values */
/* Use NonSnooping PCIe accesses? */
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index bf77ac66c79e..871329c79a46 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2013, 2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -305,6 +305,21 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_ops = &qca99x0_ops,
.decap_align_bytes = 1,
},
+ {
+ .id = ATH10K_HW_WCN3990,
+ .dev_id = 0,
+ .name = "wcn3990 hw1.0",
+ .continuous_frag_desc = true,
+ .tx_chain_mask = 0x7,
+ .rx_chain_mask = 0x7,
+ .max_spatial_stream = 4,
+ .fw = {
+ .dir = WCN3990_HW_1_0_FW_DIR,
+ },
+ .sw_decrypt_mcast_mgmt = true,
+ .hw_ops = &wcn3990_ops,
+ .decap_align_bytes = 1,
+ },
};
static const char *const ath10k_core_fw_feature_str[] = {
@@ -1263,12 +1278,14 @@ int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name,
data += ie_len;
}
- if (!fw_file->firmware_data ||
- !fw_file->firmware_len) {
- ath10k_warn(ar, "No ATH10K_FW_IE_FW_IMAGE found from '%s/%s', skipping\n",
- ar->hw_params.fw.dir, name);
- ret = -ENOMEDIUM;
- goto err;
+ if (ar->is_bmi) {
+ if (!fw_file->firmware_data ||
+ !fw_file->firmware_len) {
+ ath10k_warn(ar, "No ATH10K_FW_IE_FW_IMAGE found from '%s/%s', skipping\n",
+ ar->hw_params.fw.dir, name);
+ ret = -ENOMEDIUM;
+ goto err;
+ }
}
return 0;
@@ -1282,8 +1299,10 @@ static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
{
int ret;
- /* calibration file is optional, don't check for any errors */
- ath10k_fetch_cal_file(ar);
+ if (ar->is_bmi) {
+ /* calibration file is optional, don't check for any errors */
+ ath10k_fetch_cal_file(ar);
+ }
ar->fw_api = 5;
ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
@@ -1574,8 +1593,6 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
{
struct ath10k_fw_file *fw_file = &ar->normal_mode_fw.fw_file;
- init_fw_param(ar, &ar->normal_mode_fw.fw_file);
-
if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, fw_file->fw_features) &&
!test_bit(ATH10K_FW_FEATURE_WMI_10X, fw_file->fw_features)) {
ath10k_err(ar, "feature bits corrupted: 10.2 feature requires 10.x feature to be set as well");
@@ -1800,40 +1817,44 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
ar->running_fw = fw;
- ath10k_bmi_start(ar);
+ if (ar->is_bmi) {
- if (ath10k_init_configure_target(ar)) {
- status = -EINVAL;
- goto err;
- }
+ ath10k_bmi_start(ar);
- status = ath10k_download_cal_data(ar);
- if (status)
- goto err;
+ if (ath10k_init_configure_target(ar)) {
+ status = -EINVAL;
+ goto err;
+ }
- /* Some of of qca988x solutions are having global reset issue
- * during target initialization. Bypassing PLL setting before
- * downloading firmware and letting the SoC run on REF_CLK is
- * fixing the problem. Corresponding firmware change is also needed
- * to set the clock source once the target is initialized.
- */
- if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT,
- ar->running_fw->fw_file.fw_features)) {
- status = ath10k_bmi_write32(ar, hi_skip_clock_init, 1);
- if (status) {
- ath10k_err(ar, "could not write to skip_clock_init: %d\n",
- status);
+ status = ath10k_download_cal_data(ar);
+ if (status)
goto err;
+
+ /* Some of of qca988x solutions are having global reset issue
+ * during target initialization. Bypassing PLL setting before
+ * downloading firmware and letting the SoC run on REF_CLK is
+ * fixing the problem. Corresponding firmware change is also
+ * needed to set the clock source once the target is
+ * initialized.
+ */
+ if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT,
+ ar->running_fw->fw_file.fw_features)) {
+ status = ath10k_bmi_write32(ar, hi_skip_clock_init, 1);
+ if (status) {
+ ath10k_err(ar, "skip_clock_init failed: %d\n",
+ status);
+ goto err;
+ }
}
- }
- status = ath10k_download_fw(ar);
- if (status)
- goto err;
+ status = ath10k_download_fw(ar);
+ if (status)
+ goto err;
- status = ath10k_init_uart(ar);
- if (status)
- goto err;
+ status = ath10k_init_uart(ar);
+ if (status)
+ goto err;
+ }
ar->htc.htc_ops.target_send_suspend_complete =
ath10k_send_suspend_complete;
@@ -1844,9 +1865,11 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
goto err;
}
- status = ath10k_bmi_done(ar);
- if (status)
- goto err;
+ if (ar->is_bmi) {
+ status = ath10k_bmi_done(ar);
+ if (status)
+ goto err;
+ }
status = ath10k_wmi_attach(ar);
if (status) {
@@ -1968,10 +1991,13 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
* possible to implicitly make it correct by creating a dummy vdev and
* then deleting it.
*/
- status = ath10k_core_reset_rx_filter(ar);
- if (status) {
- ath10k_err(ar, "failed to reset rx filter: %d\n", status);
- goto err_hif_stop;
+ if (!QCA_REV_WCN3990(ar)) {
+ status = ath10k_core_reset_rx_filter(ar);
+ if (status) {
+ ath10k_err(ar, "failed to reset rx filter: %d\n",
+ status);
+ goto err_hif_stop;
+ }
}
/* If firmware indicates Full Rx Reorder support it must be used in a
@@ -2076,16 +2102,17 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
return ret;
}
- memset(&target_info, 0, sizeof(target_info));
- ret = ath10k_bmi_get_target_info(ar, &target_info);
- if (ret) {
- ath10k_err(ar, "could not get target info (%d)\n", ret);
- goto err_power_down;
+ if (ar->is_bmi) {
+ memset(&target_info, 0, sizeof(target_info));
+ ret = ath10k_bmi_get_target_info(ar, &target_info);
+ if (ret) {
+ ath10k_err(ar, "could not get target info (%d)\n", ret);
+ goto err_power_down;
+ }
+ ar->target_version = target_info.version;
+ ar->hw->wiphy->hw_version = target_info.version;
}
- ar->target_version = target_info.version;
- ar->hw->wiphy->hw_version = target_info.version;
-
ret = ath10k_init_hw_params(ar);
if (ret) {
ath10k_err(ar, "could not get hw params (%d)\n", ret);
@@ -2099,35 +2126,37 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
}
BUILD_BUG_ON(sizeof(ar->hw->wiphy->fw_version) !=
- sizeof(ar->normal_mode_fw.fw_file.fw_version));
- memcpy(ar->hw->wiphy->fw_version, ar->normal_mode_fw.fw_file.fw_version,
+ sizeof(ar->normal_mode_fw.fw_file.fw_version));
+ memcpy(ar->hw->wiphy->fw_version,
+ ar->normal_mode_fw.fw_file.fw_version,
sizeof(ar->hw->wiphy->fw_version));
-
ath10k_debug_print_hwfw_info(ar);
- ret = ath10k_core_pre_cal_download(ar);
- if (ret) {
- /* pre calibration data download is not necessary
- * for all the chipsets. Ignore failures and continue.
- */
- ath10k_dbg(ar, ATH10K_DBG_BOOT,
- "could not load pre cal data: %d\n", ret);
- }
+ if (ar->is_bmi) {
+ ret = ath10k_core_pre_cal_download(ar);
+ if (ret) {
+ /* pre calibration data download is not necessary
+ * for all the chipsets. Ignore failures and continue.
+ */
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "could not load pre cal data: %d\n", ret);
+ }
- ret = ath10k_core_get_board_id_from_otp(ar);
- if (ret && ret != -EOPNOTSUPP) {
- ath10k_err(ar, "failed to get board id from otp: %d\n",
- ret);
- goto err_free_firmware_files;
- }
+ ret = ath10k_core_get_board_id_from_otp(ar);
+ if (ret && ret != -EOPNOTSUPP) {
+ ath10k_err(ar, "failed to get board id from otp: %d\n",
+ ret);
+ goto err_free_firmware_files;
+ }
- ret = ath10k_core_fetch_board_file(ar);
- if (ret) {
- ath10k_err(ar, "failed to fetch board file: %d\n", ret);
- goto err_free_firmware_files;
- }
+ ret = ath10k_core_fetch_board_file(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to fetch board file: %d\n", ret);
+ goto err_free_firmware_files;
+ }
- ath10k_debug_print_board_info(ar);
+ ath10k_debug_print_board_info(ar);
+ }
ret = ath10k_core_init_firmware_features(ar);
if (ret) {
@@ -2136,11 +2165,14 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
goto err_free_firmware_files;
}
- ret = ath10k_swap_code_seg_init(ar, &ar->normal_mode_fw.fw_file);
- if (ret) {
- ath10k_err(ar, "failed to initialize code swap segment: %d\n",
- ret);
- goto err_free_firmware_files;
+ if (ar->is_bmi) {
+ ret = ath10k_swap_code_seg_init(ar,
+ &ar->normal_mode_fw.fw_file);
+ if (ret) {
+ ath10k_err(ar, "failed to init code swap segment: %d\n",
+ ret);
+ goto err_free_firmware_files;
+ }
}
mutex_lock(&ar->conf_mutex);
@@ -2285,6 +2317,12 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
ar->hif.ops = hif_ops;
ar->hif.bus = bus;
+ /* By default, assume bmi is set, as most of the existing
+ * chip sets are based on this, set to false explicitly
+ * when current chip set does not support.
+ */
+ ar->is_bmi = true;
+
switch (hw_rev) {
case ATH10K_HW_QCA988X:
case ATH10K_HW_QCA9887:
@@ -2309,6 +2347,13 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
ar->regs = &qca4019_regs;
ar->hw_values = &qca4019_values;
break;
+ case ATH10K_HW_WCN3990:
+ ar->regs = &wcn3990_regs;
+ ar->hw_values = &wcn3990_values;
+ /* WCN3990 chip set is non bmi based */
+ ar->is_bmi = false;
+ ar->fw_flags = &wcn3990_fw_flags;
+ break;
default:
ath10k_err(ar, "unsupported core hardware revision %d\n",
hw_rev);
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 9eede4266721..9310de85f2a0 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2013, 2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -661,6 +661,10 @@ enum ath10k_tx_pause_reason {
ATH10K_TX_PAUSE_MAX,
};
+struct fw_flag {
+ u32 flags;
+};
+
struct ath10k_fw_file {
const struct firmware *firmware;
@@ -919,6 +923,9 @@ struct ath10k {
u32 (*bus_read32)(void *ar, u32 offset);
spinlock_t ce_lock; /* lock for CE access */
void *ce_states;
+ struct fw_flag *fw_flags;
+ /* set for bmi chip sets */
+ bool is_bmi;
/* must be last */
u8 drv_priv[0] __aligned(sizeof(void *));
};
diff --git a/drivers/net/wireless/ath/ath10k/hif.h b/drivers/net/wireless/ath/ath10k/hif.h
index 9e79d52e18cf..861446a41066 100644
--- a/drivers/net/wireless/ath/ath10k/hif.h
+++ b/drivers/net/wireless/ath/ath10k/hif.h
@@ -26,7 +26,7 @@ struct ath10k_hif_sg_item {
u16 transfer_id;
void *transfer_context; /* NULL = tx completion callback not called */
void *vaddr; /* for debugging mostly */
- u32 paddr;
+ dma_addr_t paddr;
u16 len;
};
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 78df1d1ad67c..42e97d99a3d1 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -37,7 +37,7 @@
static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
static struct sk_buff *
-ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u32 paddr)
+ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u64 paddr)
{
struct ath10k_skb_rxcb *rxcb;
@@ -130,13 +130,16 @@ static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
rxcb = ATH10K_SKB_RXCB(skb);
rxcb->paddr = paddr;
htt->rx_ring.netbufs_ring[idx] = skb;
- htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr);
+ if (QCA_REV_WCN3990(htt->ar))
+ htt->rx_ring.paddrs_ring[idx] = __cpu_to_le64(paddr);
+ else
+ htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr);
htt->rx_ring.fill_cnt++;
if (htt->rx_ring.in_ord_rx) {
hash_add(htt->rx_ring.skb_table,
&ATH10K_SKB_RXCB(skb)->hlist,
- (u32)paddr);
+ paddr);
}
num--;
@@ -382,7 +385,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
}
static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
- u32 paddr)
+ u64 paddr)
{
struct ath10k *ar = htt->ar;
struct ath10k_skb_rxcb *rxcb;
@@ -417,7 +420,7 @@ static int ath10k_htt_rx_pop_paddr_list(struct ath10k_htt *htt,
struct sk_buff *msdu;
int msdu_count;
bool is_offload;
- u32 paddr;
+ u64 paddr;
lockdep_assert_held(&htt->rx_ring.lock);
@@ -427,6 +430,8 @@ static int ath10k_htt_rx_pop_paddr_list(struct ath10k_htt *htt,
while (msdu_count--) {
#ifdef CONFIG_ATH10K_SNOC
paddr = __le32_to_cpu(msdu_desc->msdu_paddr_lo);
+ paddr |= ((u64)(msdu_desc->msdu_paddr_hi &
+ HTT_WCN3990_PADDR_MASK) << 32);
#else
paddr = __le32_to_cpu(msdu_desc->msdu_paddr);
#endif
@@ -938,7 +943,7 @@ static void ath10k_process_rx(struct ath10k *ar,
*status = *rx_status;
ath10k_dbg(ar, ATH10K_DBG_DATA,
- "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%llx fcs-err %i mic-err %i amsdu-more %i\n",
+ "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
skb,
skb->len,
ieee80211_get_SA(hdr),
@@ -1566,7 +1571,7 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
}
num_msdus = skb_queue_len(&amsdu);
- ath10k_htt_rx_h_ppdu(ar, &amsdu, &rx_status, 0xffff);
+ ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0);
ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c
index 4332a27dfa9a..0cd1068b0beb 100644
--- a/drivers/net/wireless/ath/ath10k/hw.c
+++ b/drivers/net/wireless/ath/ath10k/hw.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2015 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014-2015, 2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -216,6 +216,10 @@ const struct ath10k_hw_values wcn3990_values = {
.ce_desc_meta_data_lsb = 4,
};
+struct fw_flag wcn3990_fw_flags = {
+ .flags = 0x82E,
+};
+
void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
u32 cc, u32 rcc, u32 cc_prev, u32 rcc_prev)
{
@@ -267,3 +271,5 @@ static int ath10k_qca99x0_rx_desc_get_l3_pad_bytes(struct htt_rx_desc *rxd)
const struct ath10k_hw_ops qca99x0_ops = {
.rx_desc_get_l3_pad_bytes = ath10k_qca99x0_rx_desc_get_l3_pad_bytes,
};
+
+const struct ath10k_hw_ops wcn3990_ops = {0};
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index b7d9c23e17fa..2e7d90ef53f2 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2013,2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -128,6 +128,10 @@ enum qca9377_chip_id_rev {
#define QCA4019_HW_1_0_BOARD_DATA_FILE "board.bin"
#define QCA4019_HW_1_0_PATCH_LOAD_ADDR 0x1234
+/* WCN3990 1.0 definitions */
+#define WCN3990_HW_1_0_DEV_VERSION ATH10K_HW_WCN3990
+#define WCN3990_HW_1_0_FW_DIR "/etc/firmware"
+
#define ATH10K_FW_API2_FILE "firmware-2.bin"
#define ATH10K_FW_API3_FILE "firmware-3.bin"
@@ -265,6 +269,8 @@ extern const struct ath10k_hw_regs qca99x0_regs;
extern const struct ath10k_hw_regs qca4019_regs;
extern const struct ath10k_hw_regs wcn3990_regs;
+extern struct fw_flag wcn3990_fw_flags;
+
struct ath10k_hw_values {
u32 rtc_state_val_on;
u8 ce_count;
@@ -432,6 +438,7 @@ struct ath10k_hw_ops {
extern const struct ath10k_hw_ops qca988x_ops;
extern const struct ath10k_hw_ops qca99x0_ops;
+extern const struct ath10k_hw_ops wcn3990_ops;
static inline int
ath10k_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw,
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 7a5bfeea8f3d..2b372ae63b60 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2013, 2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -2798,11 +2798,13 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
arvif->def_wep_key_idx = -1;
- ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap);
- if (ret) {
- ath10k_warn(ar, "failed to recalc txbf for vdev %i: %d\n",
- arvif->vdev_id, ret);
- return;
+ if (!QCA_REV_WCN3990(ar)) {
+ ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap);
+ if (ret) {
+ ath10k_warn(ar, "failed to recalc txbf for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return;
+ }
}
arvif->is_up = false;
@@ -3444,7 +3446,9 @@ ath10k_mac_tx_h_get_txpath(struct ath10k *ar,
return ATH10K_MAC_TX_HTT;
case ATH10K_HW_TXRX_MGMT:
if (test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
- ar->running_fw->fw_file.fw_features))
+ ar->running_fw->fw_file.fw_features) ||
+ test_bit(WMI_SERVICE_MGMT_TX_WMI,
+ ar->wmi.svc_map))
return ATH10K_MAC_TX_WMI_MGMT;
else if (ar->htt.target_version_major >= 3)
return ATH10K_MAC_TX_HTT;
@@ -7987,8 +7991,10 @@ int ath10k_mac_register(struct ath10k *ar)
goto err_free;
}
- if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
- ar->hw->netdev_features = NETIF_F_HW_CSUM;
+ if (!QCA_REV_WCN3990(ar)) {
+ if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
+ ar->hw->netdev_features = NETIF_F_HW_CSUM;
+ }
if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED)) {
/* Init ath dfs pattern detector */
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 0c250f8d45ce..072e008900e6 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -859,6 +859,8 @@ static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
case ATH10K_HW_QCA4019:
val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS);
break;
+ default:
+ break;
}
val |= 0x100000 | (addr & 0xfffff);
@@ -1363,8 +1365,8 @@ int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
for (i = 0; i < n_items - 1; i++) {
ath10k_dbg(ar, ATH10K_DBG_PCI,
- "pci tx item %d paddr 0x%08x len %d n_items %d\n",
- i, items[i].paddr, items[i].len, n_items);
+ "pci tx item %d paddr %pad len %d n_items %d\n",
+ i, &items[i].paddr, items[i].len, n_items);
ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
items[i].vaddr, items[i].len);
@@ -1381,8 +1383,8 @@ int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
/* `i` is equal to `n_items -1` after for() */
ath10k_dbg(ar, ATH10K_DBG_PCI,
- "pci tx item %d paddr 0x%08x len %d n_items %d\n",
- i, items[i].paddr, items[i].len, n_items);
+ "pci tx item %d paddr %pad len %d n_items %d\n",
+ i, &items[i].paddr, items[i].len, n_items);
ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
items[i].vaddr, items[i].len);
@@ -1591,6 +1593,8 @@ void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
* to mask irq/MSI.
*/
break;
+ default:
+ break;
}
}
@@ -1617,6 +1621,8 @@ static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar)
* to unmask irq/MSI.
*/
break;
+ default:
+ break;
}
}
diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
index 44d9e6c053a6..487c243e18ec 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.c
+++ b/drivers/net/wireless/ath/ath10k/snoc.c
@@ -1,4 +1,6 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -12,6 +14,11 @@
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/bitops.h>
+
+#include "core.h"
#include "debug.h"
#include "hif.h"
#include "htc.h"
@@ -20,7 +27,31 @@
#include <soc/qcom/icnss.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#define WCN3990_MAX_IRQ 12
+const char *ce_name[WCN3990_MAX_IRQ] = {
+ "WLAN_CE_0",
+ "WLAN_CE_1",
+ "WLAN_CE_2",
+ "WLAN_CE_3",
+ "WLAN_CE_4",
+ "WLAN_CE_5",
+ "WLAN_CE_6",
+ "WLAN_CE_7",
+ "WLAN_CE_8",
+ "WLAN_CE_9",
+ "WLAN_CE_10",
+ "WLAN_CE_11",
+};
+
+#define ATH10K_SNOC_TARGET_WAIT 3000
+#define ATH10K_SNOC_NUM_WARM_RESET_ATTEMPTS 3
+#define SNOC_HIF_POWER_DOWN_DELAY 30
+static void ath10k_snoc_buffer_cleanup(struct ath10k *ar);
+static int ath10k_snoc_init_irq(struct ath10k *ar);
+static int ath10k_snoc_deinit_irq(struct ath10k *ar);
+static int ath10k_snoc_request_irq(struct ath10k *ar);
+static void ath10k_snoc_free_irq(struct ath10k *ar);
static void ath10k_snoc_htc_tx_cb(struct ath10k_ce_pipe *ce_state);
static void ath10k_snoc_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
static void ath10k_snoc_htt_tx_cb(struct ath10k_ce_pipe *ce_state);
@@ -366,13 +397,31 @@ static struct service_to_pipe target_service_to_ce_map_wlan[] = {
},
};
+#define ADRASTEA_SRC_WR_INDEX_OFFSET 0x3C
+#define ADRASTEA_DST_WR_INDEX_OFFSET 0x40
+
+static struct ath10k_shadow_reg_cfg target_shadow_reg_cfg_map[] = { };
+
void ath10k_snoc_write32(void *ar, u32 offset, u32 value)
{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv((struct ath10k *)ar);
+
+ if (!ar_snoc)
+ return;
+
+ iowrite32(value, ar_snoc->mem + offset);
}
u32 ath10k_snoc_read32(void *ar, u32 offset)
{
- u32 val = 0;
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv((struct ath10k *)ar);
+ u32 val;
+
+ if (!ar_snoc)
+ return -EINVAL;
+
+ val = ioread32(ar_snoc->mem + offset);
+
return val;
}
@@ -456,7 +505,6 @@ static void ath10k_snoc_rx_post(struct ath10k *ar)
static void ath10k_snoc_rx_replenish_retry(unsigned long ptr)
{
struct ath10k *ar = (void *)ptr;
-
ath10k_snoc_rx_post(ar);
}
@@ -540,7 +588,6 @@ static void ath10k_snoc_htt_tx_cb(struct ath10k_ce_pipe *ce_state)
struct sk_buff *skb;
while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
- /* no need to call tx completion for NULL pointers */
if (!skb)
continue;
@@ -558,58 +605,608 @@ static void ath10k_snoc_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb)
static void ath10k_snoc_htt_rx_cb(struct ath10k_ce_pipe *ce_state)
{
- /* CE4 polling needs to be done whenever CE pipe which transports
- * HTT Rx (target->host) is processed.
- */
ath10k_ce_per_engine_service(ce_state->ar, CE_POLL_PIPE);
-
ath10k_snoc_process_rx_cb(ce_state, ath10k_snoc_htt_rx_deliver);
}
static int ath10k_snoc_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
struct ath10k_hif_sg_item *items, int n_items)
{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ struct ath10k_snoc_pipe *snoc_pipe;
+ struct ath10k_ce_pipe *ce_pipe;
+ struct ath10k_ce_ring *src_ring;
+ unsigned int nentries_mask;
+ unsigned int sw_index;
+ unsigned int write_index;
+ int err, i = 0;
+
+ if (!ar_snoc)
+ return -EINVAL;
+
+ snoc_pipe = &ar_snoc->pipe_info[pipe_id];
+ ce_pipe = snoc_pipe->ce_hdl;
+ src_ring = ce_pipe->src_ring;
+ spin_lock_bh(&ar_snoc->ce_lock);
+
+ nentries_mask = src_ring->nentries_mask;
+ sw_index = src_ring->sw_index;
+ write_index = src_ring->write_index;
+
+ if (unlikely(CE_RING_DELTA(nentries_mask,
+ write_index, sw_index - 1) < n_items)) {
+ err = -ENOBUFS;
+ goto err;
+ }
+
+ for (i = 0; i < n_items - 1; i++) {
+ ath10k_dbg(ar, ATH10K_DBG_SNOC,
+ "snoc tx item %d paddr %pad len %d n_items %d\n",
+ i, &items[i].paddr, items[i].len, n_items);
+
+ err = ath10k_ce_send_nolock(ce_pipe,
+ items[i].transfer_context,
+ items[i].paddr,
+ items[i].len,
+ items[i].transfer_id,
+ CE_SEND_FLAG_GATHER);
+ if (err)
+ goto err;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC,
+ "snoc tx item %d paddr %pad len %d n_items %d\n",
+ i, &items[i].paddr, items[i].len, n_items);
+
+ err = ath10k_ce_send_nolock(ce_pipe,
+ items[i].transfer_context,
+ items[i].paddr,
+ items[i].len,
+ items[i].transfer_id,
+ 0);
+ if (err)
+ goto err;
+
+ spin_unlock_bh(&ar_snoc->ce_lock);
return 0;
+
+err:
+ for (; i > 0; i--)
+ __ath10k_ce_send_revert(ce_pipe);
+
+ spin_unlock_bh(&ar_snoc->ce_lock);
+ return err;
}
static u16 ath10k_snoc_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
{
- return 0;
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "hif get free queue number\n");
+
+ return ath10k_ce_num_free_src_entries(ar_snoc->pipe_info[pipe].ce_hdl);
}
static void ath10k_snoc_hif_send_complete_check(struct ath10k *ar, u8 pipe,
int force)
{
+ int resources;
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif send complete check\n");
+
+ if (!force) {
+ resources = ath10k_snoc_hif_get_free_queue_number(ar, pipe);
+
+ if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
+ return;
+ }
+ ath10k_ce_per_engine_service(ar, pipe);
+}
+
+static void ath10k_snoc_kill_tasklet(struct ath10k *ar)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ int i;
+
+ for (i = 0; i < CE_COUNT; i++)
+ tasklet_kill(&ar_snoc->pipe_info[i].intr);
+
+ del_timer_sync(&ar_snoc->rx_post_retry);
+}
+
+static void ath10k_snoc_ce_deinit(struct ath10k *ar)
+{
+ int i;
+
+ for (i = 0; i < CE_COUNT; i++)
+ ath10k_ce_deinit_pipe(ar, i);
+}
+
+static void ath10k_snoc_release_resource(struct ath10k *ar)
+{
+ netif_napi_del(&ar->napi);
+ ath10k_snoc_ce_deinit(ar);
}
static int ath10k_snoc_hif_map_service_to_pipe(struct ath10k *ar,
u16 service_id,
u8 *ul_pipe, u8 *dl_pipe)
{
+ const struct service_to_pipe *entry;
+ bool ul_set = false, dl_set = false;
+ int i;
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif map service\n");
+
+ for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) {
+ entry = &target_service_to_ce_map_wlan[i];
+
+ if (__le32_to_cpu(entry->service_id) != service_id)
+ continue;
+
+ switch (__le32_to_cpu(entry->pipedir)) {
+ case PIPEDIR_NONE:
+ break;
+ case PIPEDIR_IN:
+ WARN_ON(dl_set);
+ *dl_pipe = __le32_to_cpu(entry->pipenum);
+ dl_set = true;
+ break;
+ case PIPEDIR_OUT:
+ WARN_ON(ul_set);
+ *ul_pipe = __le32_to_cpu(entry->pipenum);
+ ul_set = true;
+ break;
+ case PIPEDIR_INOUT:
+ WARN_ON(dl_set);
+ WARN_ON(ul_set);
+ *dl_pipe = __le32_to_cpu(entry->pipenum);
+ *ul_pipe = __le32_to_cpu(entry->pipenum);
+ dl_set = true;
+ ul_set = true;
+ break;
+ }
+ }
+
+ if (WARN_ON(!ul_set || !dl_set))
+ return -ENOENT;
+
return 0;
}
static void ath10k_snoc_hif_get_default_pipe(struct ath10k *ar,
u8 *ul_pipe, u8 *dl_pipe)
{
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif get default pipe\n");
+
+ (void)ath10k_snoc_hif_map_service_to_pipe(ar,
+ ATH10K_HTC_SVC_ID_RSVD_CTRL,
+ ul_pipe, dl_pipe);
+}
+
+static void ath10k_snoc_irq_disable(struct ath10k *ar)
+{
+ ath10k_ce_disable_interrupts(ar);
+}
+
+static void ath10k_snoc_irq_enable(struct ath10k *ar)
+{
+ ath10k_ce_enable_interrupts(ar);
+}
+
+static void ath10k_snoc_rx_pipe_cleanup(struct ath10k_snoc_pipe *snoc_pipe)
+{
+ struct ath10k *ar;
+ struct ath10k_ce_pipe *ce_pipe;
+ struct ath10k_ce_ring *ce_ring;
+ struct sk_buff *skb;
+ int i;
+
+ ar = snoc_pipe->hif_ce_state;
+ ce_pipe = snoc_pipe->ce_hdl;
+ ce_ring = ce_pipe->dest_ring;
+
+ if (!ce_ring)
+ return;
+
+ if (!snoc_pipe->buf_sz)
+ return;
+
+ for (i = 0; i < ce_ring->nentries; i++) {
+ skb = ce_ring->per_transfer_context[i];
+ if (!skb)
+ continue;
+
+ ce_ring->per_transfer_context[i] = NULL;
+
+ dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ }
+}
+
+static void ath10k_snoc_tx_pipe_cleanup(struct ath10k_snoc_pipe *snoc_pipe)
+{
+ struct ath10k *ar;
+ struct ath10k_snoc *ar_snoc;
+ struct ath10k_ce_pipe *ce_pipe;
+ struct ath10k_ce_ring *ce_ring;
+ struct sk_buff *skb;
+ int i;
+
+ ar = snoc_pipe->hif_ce_state;
+ ar_snoc = ath10k_snoc_priv(ar);
+ ce_pipe = snoc_pipe->ce_hdl;
+ ce_ring = ce_pipe->src_ring;
+
+ if (!ce_ring)
+ return;
+
+ if (!snoc_pipe->buf_sz)
+ return;
+
+ for (i = 0; i < ce_ring->nentries; i++) {
+ skb = ce_ring->per_transfer_context[i];
+ if (!skb)
+ continue;
+
+ ce_ring->per_transfer_context[i] = NULL;
+
+ ath10k_htc_tx_completion_handler(ar, skb);
+ }
+}
+
+static void ath10k_snoc_buffer_cleanup(struct ath10k *ar)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ int pipe_num;
+
+ for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
+ struct ath10k_snoc_pipe *pipe_info;
+
+ pipe_info = &ar_snoc->pipe_info[pipe_num];
+ ath10k_snoc_rx_pipe_cleanup(pipe_info);
+ ath10k_snoc_tx_pipe_cleanup(pipe_info);
+ }
+}
+
+static void ath10k_snoc_flush(struct ath10k *ar)
+{
+ ath10k_snoc_kill_tasklet(ar);
+ ath10k_snoc_buffer_cleanup(ar);
}
static void ath10k_snoc_hif_stop(struct ath10k *ar)
{
+ if (!ar)
+ return;
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
+ ath10k_snoc_irq_disable(ar);
+ ath10k_snoc_flush(ar);
+ napi_synchronize(&ar->napi);
+ napi_disable(&ar->napi);
+}
+
+static int ath10k_snoc_alloc_pipes(struct ath10k *ar)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ struct ath10k_snoc_pipe *pipe;
+ int i, ret;
+
+ for (i = 0; i < CE_COUNT; i++) {
+ pipe = &ar_snoc->pipe_info[i];
+ pipe->ce_hdl = &ar_snoc->ce_states[i];
+ pipe->pipe_num = i;
+ pipe->hif_ce_state = ar;
+
+ ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
+ if (ret) {
+ ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
+ i, ret);
+ return ret;
+ }
+
+ pipe->buf_sz = (size_t)(host_ce_config_wlan[i].src_sz_max);
+ }
+
+ return 0;
+}
+
+static void ath10k_snoc_free_pipes(struct ath10k *ar)
+{
+ int i;
+
+ for (i = 0; i < CE_COUNT; i++)
+ ath10k_ce_free_pipe(ar, i);
+}
+
+static int ath10k_snoc_init_pipes(struct ath10k *ar)
+{
+ int i, ret;
+
+ for (i = 0; i < CE_COUNT; i++) {
+ ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]);
+ if (ret) {
+ ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
+ i, ret);
+ return ret;
+ }
+ }
+
+ return 0;
}
static void ath10k_snoc_hif_power_down(struct ath10k *ar)
{
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");
+ msleep(SNOC_HIF_POWER_DOWN_DELAY);
+ icnss_wlan_disable(ICNSS_OFF);
+}
+
+static void ath10k_snoc_ce_tasklet(unsigned long ptr)
+{
+ struct ath10k_snoc_pipe *pipe = (struct ath10k_snoc_pipe *)ptr;
+ struct ath10k_snoc *ar_snoc = pipe->ar_snoc;
+
+ ath10k_ce_per_engine_service(ar_snoc->ar, pipe->pipe_num);
+}
+
+int ath10k_snoc_get_ce_id(struct ath10k *ar, int irq)
+{
+ int i;
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+
+ for (i = 0; i < CE_COUNT_MAX; i++) {
+ if (ar_snoc->ce_irqs[i] == irq)
+ return i;
+ }
+ ath10k_err(ar, "No matching CE id for irq %d\n", irq);
+
+ return -EINVAL;
+}
+
+static irqreturn_t ath10k_snoc_per_engine_handler(int irq, void *arg)
+{
+ struct ath10k *ar = arg;
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ int ce_id = ath10k_snoc_get_ce_id(ar, irq);
+
+ if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_snoc->pipe_info)) {
+ ath10k_warn(ar, "unexpected/invalid irq %d ce_id %d\n", irq,
+ ce_id);
+ return IRQ_HANDLED;
+ }
+
+ ath10k_snoc_irq_disable(ar);
+ napi_schedule(&ar->napi);
+
+ return IRQ_HANDLED;
+}
+
+static int ath10k_snoc_request_irq(struct ath10k *ar)
+{
+ int ret, id;
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ int irqflags = IRQF_TRIGGER_RISING;
+
+ for (id = 0; id < CE_COUNT_MAX; id++) {
+ ret = request_irq(ar_snoc->ce_irqs[id],
+ ath10k_snoc_per_engine_handler,
+ irqflags, ce_name[id], ar);
+ if (ret) {
+ ath10k_err(ar,
+ "%s: cannot register CE %d irq handler, ret = %d",
+ __func__, id, ret);
+ free_irq(ar_snoc->ce_irqs[id], ar);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void ath10k_snoc_free_irq(struct ath10k *ar)
+{
+ int id;
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+
+ for (id = 0; id < CE_COUNT_MAX; id++)
+ free_irq(ar_snoc->ce_irqs[id], ar);
+}
+
+static void ath10k_snoc_init_irq_tasklets(struct ath10k *ar)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ int i;
+
+ for (i = 0; i < CE_COUNT; i++) {
+ ar_snoc->pipe_info[i].ar_snoc = ar_snoc;
+ tasklet_init(&ar_snoc->pipe_info[i].intr,
+ ath10k_snoc_ce_tasklet,
+ (unsigned long)&ar_snoc->pipe_info[i]);
+ }
+}
+
+static int ath10k_snoc_init_irq(struct ath10k *ar)
+{
+ ath10k_snoc_init_irq_tasklets(ar);
+ return 0;
+}
+
+static int ath10k_snoc_deinit_irq(struct ath10k *ar)
+{
+ ath10k_snoc_irq_disable(ar);
+ return 0;
+}
+
+static int ath10k_snoc_get_soc_info(struct ath10k *ar)
+{
+ int ret;
+ struct icnss_soc_info soc_info;
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+
+ memset(&soc_info, 0, sizeof(soc_info));
+
+ ret = icnss_get_soc_info(&soc_info);
+ if (ret < 0) {
+ ath10k_err(ar, "%s: icnss_get_soc_info error = %d",
+ __func__, ret);
+ return ret;
+ }
+
+ ar_snoc->mem = soc_info.v_addr;
+ ar_snoc->mem_pa = soc_info.p_addr;
+
+ ar_snoc->target_info.soc_version = soc_info.soc_id;
+ ar_snoc->target_info.target_version = soc_info.soc_id;
+ ar_snoc->target_info.target_revision = 0;
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC,
+ "%s: mem = %pS mem_pa = %pad soc ver=%x tgt ver=%x\n",
+ __func__, ar_snoc->mem, &ar_snoc->mem_pa,
+ ar_snoc->target_info.soc_version,
+ ar_snoc->target_info.target_version);
+
+ return 0;
+}
+
+static int ath10k_snoc_wlan_enable(struct ath10k *ar)
+{
+ struct icnss_wlan_enable_cfg cfg;
+ int pipe_num, i;
+ struct ath10k_ce_tgt_pipe_cfg tgt_cfg[CE_COUNT_MAX];
+ struct ce_tgt_pipe_cfg *tmp_tgt_cfg;
+ struct ce_svc_pipe_cfg *tmp_svc_cfg;
+
+ for (pipe_num = 0; pipe_num < CE_COUNT_MAX; pipe_num++) {
+ tgt_cfg[pipe_num].pipe_num =
+ target_ce_config_wlan[pipe_num].pipenum;
+ tgt_cfg[pipe_num].pipe_dir =
+ target_ce_config_wlan[pipe_num].pipedir;
+ tgt_cfg[pipe_num].nentries =
+ target_ce_config_wlan[pipe_num].nentries;
+ tgt_cfg[pipe_num].nbytes_max =
+ target_ce_config_wlan[pipe_num].nbytes_max;
+ tgt_cfg[pipe_num].flags =
+ target_ce_config_wlan[pipe_num].flags;
+ tgt_cfg[pipe_num].reserved = 0;
+ }
+
+ cfg.num_ce_tgt_cfg = sizeof(target_ce_config_wlan) /
+ sizeof(struct ce_tgt_pipe_cfg);
+ cfg.ce_tgt_cfg = (struct ce_tgt_pipe_cfg *)
+ &tgt_cfg;
+ cfg.num_ce_svc_pipe_cfg = sizeof(target_service_to_ce_map_wlan) /
+ sizeof(struct ce_svc_pipe_cfg);
+ cfg.ce_svc_cfg = (struct ce_svc_pipe_cfg *)
+ &target_service_to_ce_map_wlan;
+ cfg.num_shadow_reg_cfg = sizeof(target_shadow_reg_cfg_map);
+ cfg.shadow_reg_cfg = (struct icnss_shadow_reg_cfg *)
+ &target_shadow_reg_cfg_map;
+
+ for (i = 0; i < cfg.num_ce_tgt_cfg; i++)
+ tmp_tgt_cfg = cfg.ce_tgt_cfg + i;
+
+ for (i = 0; i < cfg.num_ce_svc_pipe_cfg; i++)
+ tmp_svc_cfg = cfg.ce_svc_cfg + i;
+
+ return icnss_wlan_enable(&cfg, ICNSS_MISSION, "5.1.0.26N");
+}
+
+static int ath10k_snoc_bus_configure(struct ath10k *ar)
+{
+ int ret;
+
+ ret = ath10k_snoc_wlan_enable(ar);
+ if (ret < 0) {
+ ath10k_err(ar, "%s: ath10k_snoc_bus_configure error = %d",
+ __func__, ret);
+ return ret;
+ }
+
+ return 0;
}
static int ath10k_snoc_hif_start(struct ath10k *ar)
{
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
+ ath10k_snoc_irq_enable(ar);
+ ath10k_snoc_rx_post(ar);
+ return 0;
+}
+
+static int ath10k_snoc_claim(struct ath10k *ar)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+
+ ath10k_snoc_get_soc_info(ar);
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot snoc_mem 0x%p\n", ar_snoc->mem);
+
return 0;
}
static int ath10k_snoc_hif_power_up(struct ath10k *ar)
{
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "%s:WCN3990 driver state = %d\n",
+ __func__, ar->state);
+
+ if (ar->state == ATH10K_STATE_ON) {
+ ret = ath10k_snoc_bus_configure(ar);
+ if (ret)
+ ath10k_err(ar, "failed to configure bus: %d\n", ret);
+ }
+ ret = ath10k_snoc_init_pipes(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to initialize CE: %d\n", ret);
+ goto err_sleep;
+ }
+
+ napi_enable(&ar->napi);
return 0;
+
+err_sleep:
+ return ret;
+}
+
+static int ath10k_snoc_napi_poll(struct napi_struct *ctx, int budget)
+{
+ struct ath10k *ar = container_of(ctx, struct ath10k, napi);
+ int done = 0;
+
+ ath10k_ce_per_engine_service_any(ar);
+
+ done = ath10k_htt_txrx_compl_task(ar, budget);
+
+ if (done < budget) {
+ napi_complete(ctx);
+ ath10k_snoc_irq_enable(ar);
+ }
+
+ return done;
+}
+
+static int ath10k_snoc_resource_init(struct ath10k *ar)
+{
+ int i, ret = 0;
+ struct resource *res;
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+
+ for (i = 0; i < CE_COUNT; i++) {
+ res = platform_get_resource(ar_snoc->dev, IORESOURCE_IRQ, i);
+ if (!res) {
+ ath10k_err(ar, "Fail to get IRQ-%d\n", i);
+ ret = -ENODEV;
+ goto out;
+ } else {
+ ar_snoc->ce_irqs[i] = res->start;
+ }
+ }
+
+out:
+ return ret;
}
static const struct ath10k_hif_ops ath10k_snoc_hif_ops = {
@@ -628,11 +1225,12 @@ static const struct ath10k_hif_ops ath10k_snoc_hif_ops = {
static int ath10k_snoc_probe(struct platform_device *pdev)
{
- int ret = 0;
+ int ret;
struct ath10k *ar;
struct ath10k_snoc *ar_snoc;
enum ath10k_hw_rev hw_rev;
struct device *dev;
+ u32 chip_id;
dev = &pdev->dev;
hw_rev = ATH10K_HW_WCN3990;
@@ -643,8 +1241,83 @@ static int ath10k_snoc_probe(struct platform_device *pdev)
dev_err(dev, "failed to allocate core\n");
return -ENOMEM;
}
+
+ ar_snoc = ath10k_snoc_priv(ar);
+ if (!ar_snoc)
+ return -EINVAL;
+ ar_snoc->dev = pdev;
+ platform_set_drvdata(pdev, ar);
+ ar_snoc->ar = ar;
+
+ spin_lock_init(&ar_snoc->ce_lock);
+ ar->bus_write32 = ath10k_snoc_write32;
+ ar->bus_read32 = ath10k_snoc_read32;
+ ar->ce_lock = ar_snoc->ce_lock;
+ ar->ce_states = ar_snoc->ce_states;
+ ath10k_snoc_resource_init(ar);
+
+ ar->target_version = ATH10K_HW_WCN3990;
+ ar->hw->wiphy->hw_version = ATH10K_HW_WCN3990;
+ setup_timer(&ar_snoc->rx_post_retry, ath10k_snoc_rx_replenish_retry,
+ (unsigned long)ar);
+
+ ret = ath10k_snoc_claim(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to claim device: %d\n", ret);
+ goto err_core_destroy;
+ }
+ ret = ath10k_snoc_bus_configure(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to configure bus: %d\n", ret);
+ goto err_core_destroy;
+ }
+
+ ret = ath10k_snoc_alloc_pipes(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
+ ret);
+ goto err_core_destroy;
+ }
+
+ netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_snoc_napi_poll,
+ ATH10K_NAPI_BUDGET);
+
+ ret = ath10k_snoc_init_irq(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to init irqs: %d\n", ret);
+ goto err_free_pipes;
+ }
+
+ ret = ath10k_snoc_request_irq(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to request irqs: %d\n", ret);
+ goto err_deinit_irq;
+ }
+
+ chip_id = ar_snoc->target_info.soc_version;
+ /* chip id needs to be retrieved from platform driver */
+ ret = ath10k_core_register(ar, chip_id);
+ if (ret) {
+ ath10k_err(ar, "failed to register driver core: %d\n", ret);
+ goto err_free_irq;
+ }
ath10k_dbg(ar, ATH10K_DBG_SNOC, "%s:WCN3990 probed\n", __func__);
+ return 0;
+
+err_free_irq:
+ ath10k_snoc_free_irq(ar);
+ ath10k_snoc_kill_tasklet(ar);
+
+err_deinit_irq:
+ ath10k_snoc_deinit_irq(ar);
+
+err_free_pipes:
+ ath10k_snoc_free_pipes(ar);
+
+err_core_destroy:
+ ath10k_core_destroy(ar);
+
return ret;
}
@@ -659,7 +1332,14 @@ static int ath10k_snoc_remove(struct platform_device *pdev)
if (!ar_snoc)
return -EINVAL;
+ ath10k_core_unregister(ar);
+ ath10k_snoc_free_irq(ar);
+ ath10k_snoc_kill_tasklet(ar);
+ ath10k_snoc_deinit_irq(ar);
+ ath10k_snoc_release_resource(ar);
+ ath10k_snoc_free_pipes(ar);
ath10k_core_destroy(ar);
+
ath10k_dbg(ar, ATH10K_DBG_SNOC, "%s:WCN3990 removed\n", __func__);
return 0;
diff --git a/drivers/net/wireless/ath/ath10k/snoc.h b/drivers/net/wireless/ath/ath10k/snoc.h
index 1db8b532f4fa..7c8a3ca8fabf 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.h
+++ b/drivers/net/wireless/ath/ath10k/snoc.h
@@ -106,7 +106,7 @@ struct ath10k_target_info {
* @is_driver_probed: flag to indicate driver state
*/
struct ath10k_snoc {
- struct device *dev;
+ struct platform_device *dev;
struct ath10k *ar;
void __iomem *mem;
dma_addr_t mem_pa;
@@ -118,6 +118,7 @@ struct ath10k_snoc {
spinlock_t ce_lock;
struct ath10k_ce_pipe ce_states[CE_COUNT_MAX];
struct timer_list rx_post_retry;
+ u32 ce_irqs[CE_COUNT_MAX];
u32 *vaddr_rri_on_ddr;
bool is_driver_probed;
};
diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h
index c9a8bb1186f2..9817c89cd76d 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-ops.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2014, 2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -377,6 +377,7 @@ ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
struct sk_buff *skb;
int ret;
+ u32 mgmt_tx_cmdid;
if (!ar->wmi.ops->gen_mgmt_tx)
return -EOPNOTSUPP;
@@ -385,7 +386,12 @@ ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
if (IS_ERR(skb))
return PTR_ERR(skb);
- ret = ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->mgmt_tx_cmdid);
+ if (QCA_REV_WCN3990(ar))
+ mgmt_tx_cmdid = ar->wmi.cmd->mgmt_tx_send_cmdid;
+ else
+ mgmt_tx_cmdid = ar->wmi.cmd->mgmt_tx_cmdid;
+
+ ret = ath10k_wmi_cmd_send(ar, skb, mgmt_tx_cmdid);
if (ret)
return ret;
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index 7a8f9cfd8253..b0f3e9b9ef6f 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2014, 2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -1512,10 +1512,11 @@ ath10k_wmi_tlv_op_gen_start_scan(struct ath10k *ar,
cmd->ie_len = __cpu_to_le32(arg->ie_len);
cmd->num_probes = __cpu_to_le32(3);
- /* FIXME: There are some scan flag inconsistencies across firmwares,
- * e.g. WMI-TLV inverts the logic behind the following flag.
- */
- cmd->common.scan_ctrl_flags ^= __cpu_to_le32(WMI_SCAN_FILTER_PROBE_REQ);
+ if (QCA_REV_WCN3990(ar))
+ cmd->common.scan_ctrl_flags = ar->fw_flags->flags;
+ else
+ cmd->common.scan_ctrl_flags ^=
+ __cpu_to_le32(WMI_SCAN_FILTER_PROBE_REQ);
ptr += sizeof(*tlv);
ptr += sizeof(*cmd);
@@ -2458,6 +2459,84 @@ ath10k_wmi_tlv_op_gen_request_stats(struct ath10k *ar, u32 stats_mask)
}
static struct sk_buff *
+ath10k_wmi_tlv_op_gen_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
+{
+ struct ath10k_skb_cb *cb = ATH10K_SKB_CB(msdu);
+ struct wmi_tlv_mgmt_tx_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct ieee80211_hdr *hdr;
+ struct sk_buff *skb;
+ void *ptr;
+ int len;
+ u32 buf_len = (msdu->len < WMI_TX_DL_FRM_LEN) ? msdu->len :
+ WMI_TX_DL_FRM_LEN;
+ u16 fc;
+ struct ath10k_vif *arvif;
+ dma_addr_t mgmt_frame_dma;
+ u32 vdev_id;
+
+ hdr = (struct ieee80211_hdr *)msdu->data;
+ fc = le16_to_cpu(hdr->frame_control);
+
+ if (cb->vif) {
+ arvif = (void *)cb->vif->drv_priv;
+ vdev_id = arvif->vdev_id;
+ } else {
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control)))
+ return ERR_PTR(-EINVAL);
+
+ len = sizeof(*cmd) + buf_len;
+
+ if ((ieee80211_is_action(hdr->frame_control) ||
+ ieee80211_is_deauth(hdr->frame_control) ||
+ ieee80211_is_disassoc(hdr->frame_control)) &&
+ ieee80211_has_protected(hdr->frame_control)) {
+ len += IEEE80211_CCMP_MIC_LEN;
+ buf_len += IEEE80211_CCMP_MIC_LEN;
+ }
+
+ len += sizeof(*tlv);
+ len = round_up(len, 4);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_MGMT_TX_CMD);
+ tlv->len = __cpu_to_le16(sizeof(cmd->hdr));
+ cmd = (void *)tlv->value;
+ cmd->hdr.vdev_id = vdev_id;
+ cmd->hdr.desc_id = 0;
+ cmd->hdr.chanfreq = 0;
+ cmd->hdr.buf_len = __cpu_to_le32(buf_len);
+ cmd->hdr.frame_len = __cpu_to_le32(msdu->len);
+ mgmt_frame_dma = dma_map_single(arvif->ar->dev, msdu->data,
+ msdu->len, DMA_TO_DEVICE);
+ if (!mgmt_frame_dma)
+ return ERR_PTR(-ENOMEM);
+
+ cmd->hdr.paddr_lo = (uint32_t)(mgmt_frame_dma & 0xffffffff);
+ cmd->hdr.paddr_hi = (uint32_t)(upper_32_bits(mgmt_frame_dma) &
+ HTT_WCN3990_PADDR_MASK);
+ cmd->data_len = buf_len;
+ cmd->data_tag = 0x11;
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
+ tlv->len = __cpu_to_le16(sizeof(msdu->len));
+ memcpy(cmd->buf, msdu->data, buf_len);
+
+ return skb;
+}
+
+static struct sk_buff *
ath10k_wmi_tlv_op_gen_force_fw_hang(struct ath10k *ar,
enum wmi_force_fw_hang_type type,
u32 delay_ms)
@@ -3196,6 +3275,7 @@ static struct wmi_cmd_map wmi_tlv_cmd_map = {
.bcn_filter_rx_cmdid = WMI_TLV_BCN_FILTER_RX_CMDID,
.prb_req_filter_rx_cmdid = WMI_TLV_PRB_REQ_FILTER_RX_CMDID,
.mgmt_tx_cmdid = WMI_TLV_MGMT_TX_CMDID,
+ .mgmt_tx_send_cmdid = WMI_TLV_MGMT_TX_SEND_CMD,
.prb_tmpl_cmdid = WMI_TLV_PRB_TMPL_CMDID,
.addba_clear_resp_cmdid = WMI_TLV_ADDBA_CLEAR_RESP_CMDID,
.addba_send_cmdid = WMI_TLV_ADDBA_SEND_CMDID,
@@ -3622,6 +3702,7 @@ static const struct wmi_ops wmi_hl_1_0_ops = {
.gen_pdev_set_wmm = ath10k_wmi_tlv_op_gen_pdev_set_wmm,
.gen_request_stats = ath10k_wmi_tlv_op_gen_request_stats,
.gen_force_fw_hang = ath10k_wmi_tlv_op_gen_force_fw_hang,
+ .gen_mgmt_tx = ath10k_wmi_tlv_op_gen_mgmt_tx,
.gen_dbglog_cfg = ath10k_wmi_tlv_op_gen_dbglog_cfg,
.gen_pktlog_enable = ath10k_wmi_tlv_op_gen_pktlog_enable,
.gen_bcn_tmpl = ath10k_wmi_tlv_op_gen_bcn_tmpl,
@@ -3646,6 +3727,7 @@ void ath10k_wmi_hl_1_0_attach(struct ath10k *ar)
ar->wmi.vdev_param = &wmi_tlv_vdev_param_map;
ar->wmi.pdev_param = &wmi_tlv_pdev_param_map;
ar->wmi.ops = &wmi_hl_1_0_ops;
+ ar->wmi.peer_flags = &wmi_tlv_peer_flags_map;
}
/* TLV init */
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
index 496fee5d489b..2b8318010a35 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2014, 2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -22,6 +22,7 @@
#define WMI_TLV_CMD_UNSUPPORTED 0
#define WMI_TLV_PDEV_PARAM_UNSUPPORTED 0
#define WMI_TLV_VDEV_PARAM_UNSUPPORTED 0
+#define WMI_TX_DL_FRM_LEN 64
enum wmi_tlv_grp_id {
WMI_TLV_GRP_START = 0x3,
@@ -132,6 +133,7 @@ enum wmi_tlv_cmd_id {
WMI_TLV_PRB_REQ_FILTER_RX_CMDID,
WMI_TLV_MGMT_TX_CMDID,
WMI_TLV_PRB_TMPL_CMDID,
+ WMI_TLV_MGMT_TX_SEND_CMD,
WMI_TLV_ADDBA_CLEAR_RESP_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_BA_NEG),
WMI_TLV_ADDBA_SEND_CMDID,
WMI_TLV_ADDBA_STATUS_CMDID,
@@ -891,6 +893,8 @@ enum wmi_tlv_tag {
WMI_TLV_TAG_STRUCT_APFIND_EVENT_HDR,
WMI_TLV_TAG_STRUCT_HL_1_0_SVC_OFFSET = 176,
+ WMI_TLV_TAG_STRUCT_MGMT_TX_CMD = 0x1A6,
+
WMI_TLV_TAG_MAX
};
@@ -1152,6 +1156,8 @@ wmi_hl_1_0_svc_map(const __le32 *in, unsigned long *out, size_t len)
WMI_SERVICE_FORCE_FW_HANG, len);
SVCMAP(WMI_HL_10_SERVICE_RX_FULL_REORDER,
WMI_SERVICE_RX_FULL_REORDER, len);
+ SVCMAP(WMI_HL_10_SERVICE_MGMT_TX_WMI,
+ WMI_SERVICE_MGMT_TX_WMI, len);
}
static inline void
@@ -1827,4 +1833,20 @@ struct wmi_tlv_tx_pause_ev {
void ath10k_wmi_tlv_attach(struct ath10k *ar);
void ath10k_wmi_hl_1_0_attach(struct ath10k *ar);
+struct wmi_tlv_mgmt_tx_hdr {
+ __le32 vdev_id;
+ __le32 desc_id;
+ __le32 chanfreq;
+ __le32 paddr_lo;
+ __le32 paddr_hi;
+ __le32 frame_len;
+ __le32 buf_len;
+} __packed;
+
+struct wmi_tlv_mgmt_tx_cmd {
+ struct wmi_tlv_mgmt_tx_hdr hdr;
+ __le16 data_len;
+ __le16 data_tag;
+ u8 buf[0];
+} __packed;
#endif
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 1b243c899bef..74398b45e365 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2013, 2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -184,6 +184,7 @@ enum wmi_service {
WMI_SERVICE_TX_MODE_PUSH_ONLY,
WMI_SERVICE_TX_MODE_PUSH_PULL,
WMI_SERVICE_TX_MODE_DYNAMIC,
+ WMI_SERVICE_MGMT_TX_WMI,
/* keep last */
WMI_SERVICE_MAX,
@@ -720,6 +721,7 @@ struct wmi_cmd_map {
u32 bcn_filter_rx_cmdid;
u32 prb_req_filter_rx_cmdid;
u32 mgmt_tx_cmdid;
+ u32 mgmt_tx_send_cmdid;
u32 prb_tmpl_cmdid;
u32 addba_clear_resp_cmdid;
u32 addba_send_cmdid;
diff --git a/drivers/platform/msm/gsi/gsi.c b/drivers/platform/msm/gsi/gsi.c
index fd241f6b62da..58c1704a875c 100644
--- a/drivers/platform/msm/gsi/gsi.c
+++ b/drivers/platform/msm/gsi/gsi.c
@@ -2733,6 +2733,16 @@ int gsi_enable_fw(phys_addr_t gsi_base_addr, u32 gsi_size)
}
EXPORT_SYMBOL(gsi_enable_fw);
+void gsi_get_inst_ram_offset_and_size(unsigned long *base_offset,
+ unsigned long *size)
+{
+ if (base_offset)
+ *base_offset = GSI_GSI_INST_RAM_BASE_OFFS;
+ if (size)
+ *size = GSI_GSI_INST_RAM_SIZE;
+}
+EXPORT_SYMBOL(gsi_get_inst_ram_offset_and_size);
+
static int msm_gsi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
diff --git a/drivers/platform/msm/gsi/gsi_reg.h b/drivers/platform/msm/gsi/gsi_reg.h
index fa1e84896f73..1acaf74a0968 100644
--- a/drivers/platform/msm/gsi/gsi_reg.h
+++ b/drivers/platform/msm/gsi/gsi_reg.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1838,5 +1838,7 @@
#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_CLR_EV_CH_BIT_MAP_BMSK 0xffffffff
#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_CLR_EV_CH_BIT_MAP_SHFT 0x0
+#define GSI_GSI_INST_RAM_BASE_OFFS 0x4000
+#define GSI_GSI_INST_RAM_SIZE 0x4000
#endif /* __GSI_REG_H__ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index 2da3b0ddca8f..cd946fff31a9 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -1935,7 +1935,7 @@ static int ipa3_q6_clean_q6_flt_tbls(enum ipa_ip_type ip,
}
retval = ipahal_flt_generate_empty_img(1, lcl_hdr_sz, lcl_hdr_sz,
- 0, &mem);
+ 0, &mem, true);
if (retval) {
IPAERR("failed to generate flt single tbl empty img\n");
goto free_cmd_pyld;
@@ -2042,7 +2042,7 @@ static int ipa3_q6_clean_q6_rt_tbls(enum ipa_ip_type ip,
retval = ipahal_rt_generate_empty_img(
modem_rt_index_hi - modem_rt_index_lo + 1,
- lcl_hdr_sz, lcl_hdr_sz, &mem);
+ lcl_hdr_sz, lcl_hdr_sz, &mem, true);
if (retval) {
IPAERR("fail generate empty rt img\n");
return -ENOMEM;
@@ -2514,7 +2514,7 @@ int _ipa_init_rt4_v3(void)
rc = ipahal_rt_generate_empty_img(IPA_MEM_PART(v4_rt_num_index),
IPA_MEM_PART(v4_rt_hash_size), IPA_MEM_PART(v4_rt_nhash_size),
- &mem);
+ &mem, false);
if (rc) {
IPAERR("fail generate empty v4 rt img\n");
return rc;
@@ -2581,7 +2581,7 @@ int _ipa_init_rt6_v3(void)
rc = ipahal_rt_generate_empty_img(IPA_MEM_PART(v6_rt_num_index),
IPA_MEM_PART(v6_rt_hash_size), IPA_MEM_PART(v6_rt_nhash_size),
- &mem);
+ &mem, false);
if (rc) {
IPAERR("fail generate empty v6 rt img\n");
return rc;
@@ -2642,7 +2642,7 @@ int _ipa_init_flt4_v3(void)
rc = ipahal_flt_generate_empty_img(ipa3_ctx->ep_flt_num,
IPA_MEM_PART(v4_flt_hash_size),
IPA_MEM_PART(v4_flt_nhash_size), ipa3_ctx->ep_flt_bitmap,
- &mem);
+ &mem, false);
if (rc) {
IPAERR("fail generate empty v4 flt img\n");
return rc;
@@ -2702,7 +2702,7 @@ int _ipa_init_flt6_v3(void)
rc = ipahal_flt_generate_empty_img(ipa3_ctx->ep_flt_num,
IPA_MEM_PART(v6_flt_hash_size),
IPA_MEM_PART(v6_flt_nhash_size), ipa3_ctx->ep_flt_bitmap,
- &mem);
+ &mem, false);
if (rc) {
IPAERR("fail generate empty v6 flt img\n");
return rc;
@@ -4021,7 +4021,7 @@ static int ipa3_trigger_fw_loading_mdms(void)
IPADBG("FWs are available for loading\n");
- result = ipa3_load_fws(fw);
+ result = ipa3_load_fws(fw, ipa3_res.transport_mem_base);
if (result) {
IPAERR("IPA FWs loading has failed\n");
release_firmware(fw);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index b3ce52424fec..7feb1c1ce178 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -2036,7 +2036,7 @@ int ipa3_uc_panic_notifier(struct notifier_block *this,
unsigned long event, void *ptr);
void ipa3_inc_acquire_wakelock(void);
void ipa3_dec_release_wakelock(void);
-int ipa3_load_fws(const struct firmware *firmware);
+int ipa3_load_fws(const struct firmware *firmware, phys_addr_t gsi_mem_base);
int ipa3_register_ipa_ready_cb(void (*ipa_ready_cb)(void *), void *user_data);
const char *ipa_hw_error_str(enum ipa3_hw_errors err_type);
int ipa_gsi_ch20_wa(void);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index 683777d4cacd..3b909acdd823 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -3581,75 +3581,164 @@ end_sequence:
return res;
}
+static int ipa3_load_single_fw(const struct firmware *firmware,
+ const struct elf32_phdr *phdr)
+{
+ uint32_t *fw_mem_base;
+ int index;
+ const uint32_t *elf_data_ptr;
+
+ if (phdr->p_offset > firmware->size) {
+ IPAERR("Invalid ELF: offset=%u is beyond elf_size=%zu\n",
+ phdr->p_offset, firmware->size);
+ return -EINVAL;
+ }
+ if ((firmware->size - phdr->p_offset) < phdr->p_filesz) {
+ IPAERR("Invalid ELF: offset=%u filesz=%u elf_size=%zu\n",
+ phdr->p_offset, phdr->p_filesz, firmware->size);
+ return -EINVAL;
+ }
+
+ if (phdr->p_memsz % sizeof(uint32_t)) {
+ IPAERR("FW mem size %u doesn't align to 32bit\n",
+ phdr->p_memsz);
+ return -EFAULT;
+ }
+
+ if (phdr->p_filesz > phdr->p_memsz) {
+ IPAERR("FW image too big src_size=%u dst_size=%u\n",
+ phdr->p_filesz, phdr->p_memsz);
+ return -EFAULT;
+ }
+
+ fw_mem_base = ioremap(phdr->p_vaddr, phdr->p_memsz);
+ if (!fw_mem_base) {
+ IPAERR("Failed to map 0x%x for the size of %u\n",
+ phdr->p_vaddr, phdr->p_memsz);
+ return -ENOMEM;
+ }
+
+ /* Set the entire region to 0s */
+ memset(fw_mem_base, 0, phdr->p_memsz);
+
+ elf_data_ptr = (uint32_t *)(firmware->data + phdr->p_offset);
+
+ /* Write the FW */
+ for (index = 0; index < phdr->p_filesz/sizeof(uint32_t); index++) {
+ writel_relaxed(*elf_data_ptr, &fw_mem_base[index]);
+ elf_data_ptr++;
+ }
+
+ iounmap(fw_mem_base);
+
+ return 0;
+}
+
/**
* ipa3_load_fws() - Load the IPAv3 FWs into IPA&GSI SRAM.
*
* @firmware: Structure which contains the FW data from the user space.
+ * @gsi_mem_base: GSI base address
*
* Return value: 0 on success, negative otherwise
*
*/
-int ipa3_load_fws(const struct firmware *firmware)
+int ipa3_load_fws(const struct firmware *firmware, phys_addr_t gsi_mem_base)
{
const struct elf32_hdr *ehdr;
const struct elf32_phdr *phdr;
- const uint8_t *elf_phdr_ptr;
- uint32_t *elf_data_ptr;
- int phdr_idx, index;
- uint32_t *fw_mem_base;
-
- ehdr = (struct elf32_hdr *) firmware->data;
-
- elf_phdr_ptr = firmware->data + sizeof(*ehdr);
+ unsigned long gsi_iram_ofst;
+ unsigned long gsi_iram_size;
+ phys_addr_t ipa_reg_mem_base;
+ u32 ipa_reg_ofst;
+ int rc;
+
+ if (!gsi_mem_base) {
+ IPAERR("Invalid GSI base address\n");
+ return -EINVAL;
+ }
- for (phdr_idx = 0; phdr_idx < ehdr->e_phnum; phdr_idx++) {
- /*
- * The ELF program header will contain the starting
- * address to which the firmware needs to copied.
- */
- phdr = (struct elf32_phdr *)elf_phdr_ptr;
+ ipa_assert_on(!firmware);
+ /* One program header per FW image: GSI, DPS and HPS */
+ if (firmware->size < (sizeof(*ehdr) + 3 * sizeof(*phdr))) {
+ IPAERR("Missing ELF and Program headers firmware size=%zu\n",
+ firmware->size);
+ return -EINVAL;
+ }
- /*
- * p_vaddr will contain the starting address to which the
- * FW needs to be loaded.
- * p_memsz will contain the size of the IRAM.
- * p_filesz will contain the size of the FW image.
- */
- fw_mem_base = ioremap(phdr->p_vaddr, phdr->p_memsz);
- if (!fw_mem_base) {
- IPAERR("Failed to map 0x%x for the size of %u\n",
- phdr->p_vaddr, phdr->p_memsz);
- return -ENOMEM;
- }
+ ehdr = (struct elf32_hdr *) firmware->data;
+ ipa_assert_on(!ehdr);
+ if (ehdr->e_phnum != 3) {
+ IPAERR("Unexpected number of ELF program headers\n");
+ return -EINVAL;
+ }
+ phdr = (struct elf32_phdr *)(firmware->data + sizeof(*ehdr));
- /* Set the entire region to 0s */
- memset(fw_mem_base, 0, phdr->p_memsz);
+ /*
+ * Each ELF program header represents a FW image and contains:
+ * p_vaddr : The starting address to which the FW needs to loaded.
+ * p_memsz : The size of the IRAM (where the image loaded)
+ * p_filesz: The size of the FW image embedded inside the ELF
+ * p_offset: Absolute offset to the image from the head of the ELF
+ */
- /*
- * p_offset will contain and absolute offset from the beginning
- * of the ELF file.
- */
- elf_data_ptr = (uint32_t *)
- ((uint8_t *)firmware->data + phdr->p_offset);
+ /* Load GSI FW image */
+ gsi_get_inst_ram_offset_and_size(&gsi_iram_ofst, &gsi_iram_size);
+ if (phdr->p_vaddr != (gsi_mem_base + gsi_iram_ofst)) {
+ IPAERR(
+ "Invalid GSI FW img load addr vaddr=0x%x gsi_mem_base=%pa gsi_iram_ofst=0x%lx\n"
+ , phdr->p_vaddr, &gsi_mem_base, gsi_iram_ofst);
+ return -EINVAL;
+ }
+ if (phdr->p_memsz > gsi_iram_size) {
+ IPAERR("Invalid GSI FW img size memsz=%d gsi_iram_size=%lu\n",
+ phdr->p_memsz, gsi_iram_size);
+ return -EINVAL;
+ }
+ rc = ipa3_load_single_fw(firmware, phdr);
+ if (rc)
+ return rc;
- if (phdr->p_memsz % sizeof(uint32_t)) {
- IPAERR("FW size %u doesn't align to 32bit\n",
- phdr->p_memsz);
- return -EFAULT;
- }
+ phdr++;
+ ipa_reg_mem_base = ipa3_ctx->ipa_wrapper_base + ipahal_get_reg_base();
- /* Write the FW */
- for (index = 0; index < phdr->p_filesz/sizeof(uint32_t);
- index++) {
- writel_relaxed(*elf_data_ptr, &fw_mem_base[index]);
- elf_data_ptr++;
- }
+ /* Load IPA DPS FW image */
+ ipa_reg_ofst = ipahal_get_reg_ofst(IPA_DPS_SEQUENCER_FIRST);
+ if (phdr->p_vaddr != (ipa_reg_mem_base + ipa_reg_ofst)) {
+ IPAERR(
+ "Invalid IPA DPS img load addr vaddr=0x%x ipa_reg_mem_base=%pa ipa_reg_ofst=%u\n"
+ , phdr->p_vaddr, &ipa_reg_mem_base, ipa_reg_ofst);
+ return -EINVAL;
+ }
+ if (phdr->p_memsz > ipahal_get_dps_img_mem_size()) {
+ IPAERR("Invalid IPA DPS img size memsz=%d dps_mem_size=%u\n",
+ phdr->p_memsz, ipahal_get_dps_img_mem_size());
+ return -EINVAL;
+ }
+ rc = ipa3_load_single_fw(firmware, phdr);
+ if (rc)
+ return rc;
- iounmap(fw_mem_base);
+ phdr++;
- elf_phdr_ptr = elf_phdr_ptr + sizeof(*phdr);
+ /* Load IPA HPS FW image */
+ ipa_reg_ofst = ipahal_get_reg_ofst(IPA_HPS_SEQUENCER_FIRST);
+ if (phdr->p_vaddr != (ipa_reg_mem_base + ipa_reg_ofst)) {
+ IPAERR(
+ "Invalid IPA HPS img load addr vaddr=0x%x ipa_reg_mem_base=%pa ipa_reg_ofst=%u\n"
+ , phdr->p_vaddr, &ipa_reg_mem_base, ipa_reg_ofst);
+ return -EINVAL;
+ }
+ if (phdr->p_memsz > ipahal_get_hps_img_mem_size()) {
+ IPAERR("Invalid IPA HPS img size memsz=%d dps_mem_size=%u\n",
+ phdr->p_memsz, ipahal_get_hps_img_mem_size());
+ return -EINVAL;
}
- IPADBG("IPA FWs (GSI FW, HPS and DPS) were loaded\n");
+ rc = ipa3_load_single_fw(firmware, phdr);
+ if (rc)
+ return rc;
+
+ IPADBG("IPA FWs (GSI FW, DPS and HPS) loaded successfully\n");
return 0;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
index 2bc179d5a33c..bc9d45b254b3 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1257,6 +1257,21 @@ int ipahal_get_proc_ctx_needed_len(enum ipa_hdr_proc_type type)
return res;
}
+/*
+ * Get IPA Data Processing Star image memory size at IPA SRAM
+ */
+u32 ipahal_get_dps_img_mem_size(void)
+{
+ return IPA_HW_DPS_IMG_MEM_SIZE_V3_0;
+}
+
+/*
+ * Get IPA Header Processing Star image memory size at IPA SRAM
+ */
+u32 ipahal_get_hps_img_mem_size(void)
+{
+ return IPA_HW_HPS_IMG_MEM_SIZE_V3_0;
+}
int ipahal_init(enum ipa_hw_type ipa_hw_type, void __iomem *base,
struct device *ipa_pdev)
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h
index 654977511814..154045fe4f56 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -634,6 +634,16 @@ int ipahal_cp_proc_ctx_to_hw_buff(enum ipa_hdr_proc_type type,
*/
int ipahal_get_proc_ctx_needed_len(enum ipa_hdr_proc_type type);
+/*
+ * Get IPA Data Processing Star image memory size at IPA SRAM
+ */
+u32 ipahal_get_dps_img_mem_size(void);
+
+/*
+ * Get IPA Header Processing Star image memory size at IPA SRAM
+ */
+u32 ipahal_get_hps_img_mem_size(void);
+
int ipahal_init(enum ipa_hw_type ipa_hw_type, void __iomem *base,
struct device *ipa_pdev);
void ipahal_destroy(void);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
index 67b3cb301f1f..72cc4764e7aa 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -2553,16 +2553,19 @@ u32 ipahal_get_low_rule_id(void)
* @hash_hdr_size: SRAM buf size of the hash tbls hdr. Used for space check
* @nhash_hdr_size: SRAM buf size of the nhash tbls hdr. Used for space check
* @mem: mem object that points to DMA mem representing the hdr structure
+ * @atomic: should DMA allocation be executed with atomic flag
*/
int ipahal_rt_generate_empty_img(u32 tbls_num, u32 hash_hdr_size,
- u32 nhash_hdr_size, struct ipa_mem_buffer *mem)
+ u32 nhash_hdr_size, struct ipa_mem_buffer *mem, bool atomic)
{
int i;
u64 addr;
struct ipahal_fltrt_obj *obj;
+ int flag;
IPAHAL_DBG("Entry\n");
+ flag = atomic ? GFP_ATOMIC : GFP_KERNEL;
obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
if (!tbls_num || !nhash_hdr_size || !mem) {
@@ -2589,7 +2592,7 @@ int ipahal_rt_generate_empty_img(u32 tbls_num, u32 hash_hdr_size,
mem->size = tbls_num * obj->tbl_hdr_width;
mem->base = dma_alloc_coherent(ipahal_ctx->ipa_pdev, mem->size,
- &mem->phys_base, GFP_KERNEL);
+ &mem->phys_base, flag);
if (!mem->base) {
IPAHAL_ERR("fail to alloc DMA buff of size %d\n", mem->size);
return -ENOMEM;
@@ -2615,18 +2618,22 @@ int ipahal_rt_generate_empty_img(u32 tbls_num, u32 hash_hdr_size,
* should be: bit0->EP0, bit1->EP1
* If bitmap is zero -> create tbl without bitmap entry
* @mem: mem object that points to DMA mem representing the hdr structure
+ * @atomic: should DMA allocation be executed with atomic flag
*/
int ipahal_flt_generate_empty_img(u32 tbls_num, u32 hash_hdr_size,
- u32 nhash_hdr_size, u64 ep_bitmap, struct ipa_mem_buffer *mem)
+ u32 nhash_hdr_size, u64 ep_bitmap, struct ipa_mem_buffer *mem,
+ bool atomic)
{
int flt_spc;
u64 flt_bitmap;
int i;
u64 addr;
struct ipahal_fltrt_obj *obj;
+ int flag;
IPAHAL_DBG("Entry - ep_bitmap 0x%llx\n", ep_bitmap);
+ flag = atomic ? GFP_ATOMIC : GFP_KERNEL;
obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
if (!tbls_num || !nhash_hdr_size || !mem) {
@@ -2667,7 +2674,7 @@ int ipahal_flt_generate_empty_img(u32 tbls_num, u32 hash_hdr_size,
if (ep_bitmap)
mem->size += obj->tbl_hdr_width;
mem->base = dma_alloc_coherent(ipahal_ctx->ipa_pdev, mem->size,
- &mem->phys_base, GFP_KERNEL);
+ &mem->phys_base, flag);
if (!mem->base) {
IPAHAL_ERR("fail to alloc DMA buff of size %d\n", mem->size);
return -ENOMEM;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.h
index ee2704d62b50..3ee883b6fb20 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -171,9 +171,10 @@ u32 ipahal_get_low_rule_id(void);
* @hash_hdr_size: SRAM buf size of the hash tbls hdr. Used for space check
* @nhash_hdr_size: SRAM buf size of the nhash tbls hdr. Used for space check
* @mem: mem object that points to DMA mem representing the hdr structure
+ * @atomic: should DMA allocation be executed with atomic flag
*/
int ipahal_rt_generate_empty_img(u32 tbls_num, u32 hash_hdr_size,
- u32 nhash_hdr_size, struct ipa_mem_buffer *mem);
+ u32 nhash_hdr_size, struct ipa_mem_buffer *mem, bool atomic);
/*
* ipahal_flt_generate_empty_img() - Generate empty filter image
@@ -185,9 +186,11 @@ int ipahal_rt_generate_empty_img(u32 tbls_num, u32 hash_hdr_size,
* @ep_bitmap: Bitmap representing the EP that has flt tables. The format
* should be: bit0->EP0, bit1->EP1
* @mem: mem object that points to DMA mem representing the hdr structure
+ * @atomic: should DMA allocation be executed with atomic flag
*/
int ipahal_flt_generate_empty_img(u32 tbls_num, u32 hash_hdr_size,
- u32 nhash_hdr_size, u64 ep_bitmap, struct ipa_mem_buffer *mem);
+ u32 nhash_hdr_size, u64 ep_bitmap, struct ipa_mem_buffer *mem,
+ bool atomic);
/*
* ipahal_fltrt_allocate_hw_tbl_imgs() - Allocate tbl images DMA structures
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h
index 4c4b6661e8fc..d6a496e56861 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -546,4 +546,8 @@ struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq {
struct ipa_hw_hdr_proc_ctx_tlv end;
};
+/* IPA HW DPS/HPS image memory sizes */
+#define IPA_HW_DPS_IMG_MEM_SIZE_V3_0 128
+#define IPA_HW_HPS_IMG_MEM_SIZE_V3_0 320
+
#endif /* _IPAHAL_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
index 347a8c418ebb..ed0d0032af8d 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1119,6 +1119,12 @@ static struct ipahal_reg_obj ipahal_reg_objs[IPA_HW_MAX][IPA_REG_MAX] = {
[IPA_HW_v3_0][IPA_QSB_MAX_READS] = {
ipareg_construct_qsb_max_reads, ipareg_parse_dummy,
0x00000078, 0},
+ [IPA_HW_v3_0][IPA_DPS_SEQUENCER_FIRST] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x0001e000, 0},
+ [IPA_HW_v3_0][IPA_HPS_SEQUENCER_FIRST] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x0001e080, 0},
/* IPAv3.1 */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
index 5f1e3fe410b1..0fc2a65f5e99 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -83,6 +83,8 @@ enum ipahal_reg_name {
IPA_QSB_MAX_WRITES,
IPA_QSB_MAX_READS,
IPA_TX_CFG,
+ IPA_DPS_SEQUENCER_FIRST,
+ IPA_HPS_SEQUENCER_FIRST,
IPA_REG_MAX,
};
diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
index e99bf1ee5ad2..3b2ea6a6a5ed 100644
--- a/drivers/power/power_supply_sysfs.c
+++ b/drivers/power/power_supply_sysfs.c
@@ -46,7 +46,7 @@ static ssize_t power_supply_show_property(struct device *dev,
static char *type_text[] = {
"Unknown", "Battery", "UPS", "Mains", "USB", "USB_DCP",
"USB_CDP", "USB_ACA", "USB_HVDCP", "USB_HVDCP_3", "USB_PD",
- "Wireless", "BMS", "USB_Parallel", "Wipower",
+ "Wireless", "BMS", "Parallel", "Main", "Wipower",
"TYPEC", "TYPEC_UFP", "TYPEC_DFP"
};
static char *status_text[] = {
@@ -275,11 +275,13 @@ static struct device_attribute power_supply_attrs[] = {
POWER_SUPPLY_ATTR(charger_temp),
POWER_SUPPLY_ATTR(charger_temp_max),
POWER_SUPPLY_ATTR(parallel_disable),
- POWER_SUPPLY_ATTR(parallel_percent),
POWER_SUPPLY_ATTR(pe_start),
POWER_SUPPLY_ATTR(set_ship_mode),
POWER_SUPPLY_ATTR(soc_reporting_ready),
POWER_SUPPLY_ATTR(debug_battery),
+ POWER_SUPPLY_ATTR(fcc_delta),
+ POWER_SUPPLY_ATTR(icl_reduction),
+ POWER_SUPPLY_ATTR(parallel_mode),
/* Local extensions of type int64_t */
POWER_SUPPLY_ATTR(charge_counter_ext),
/* Properties of type `const char *' */
diff --git a/drivers/power/reset/msm-poweroff.c b/drivers/power/reset/msm-poweroff.c
index 38e822de34a7..a30ed90d6e92 100644
--- a/drivers/power/reset/msm-poweroff.c
+++ b/drivers/power/reset/msm-poweroff.c
@@ -530,7 +530,7 @@ static int msm_restart_probe(struct platform_device *pdev)
pr_err("unable to map imem KASLR offset\n");
}
- if (kaslr_imem_addr && scm_is_secure_device()) {
+ if (kaslr_imem_addr) {
__raw_writel(0xdead4ead, kaslr_imem_addr);
__raw_writel(KASLR_OFFSET_BIT_MASK &
(kimage_vaddr - KIMAGE_VADDR), kaslr_imem_addr + 4);
@@ -602,7 +602,6 @@ skip_sysfs_create:
scm_deassert_ps_hold_supported = true;
#ifdef CONFIG_QCOM_DLOAD_MODE
- download_mode = scm_is_secure_device();
set_dload_mode(download_mode);
if (!download_mode)
scm_disable_sdi();
diff --git a/drivers/power/supply/qcom/Makefile b/drivers/power/supply/qcom/Makefile
index 0126d2d0a18e..912acb0c788a 100644
--- a/drivers/power/supply/qcom/Makefile
+++ b/drivers/power/supply/qcom/Makefile
@@ -6,6 +6,6 @@ obj-$(CONFIG_SMB1351_USB_CHARGER) += smb1351-charger.o pmic-voter.o
obj-$(CONFIG_MSM_BCL_CTL) += msm_bcl.o
obj-$(CONFIG_MSM_BCL_PERIPHERAL_CTL) += bcl_peripheral.o
obj-$(CONFIG_BATTERY_BCL) += battery_current_limit.o
-obj-$(CONFIG_QPNP_SMB2) += qpnp-smb2.o smb-lib.o pmic-voter.o storm-watch.o
-obj-$(CONFIG_SMB138X_CHARGER) += smb138x-charger.o smb-lib.o pmic-voter.o storm-watch.o
-obj-$(CONFIG_QPNP_QNOVO) += qpnp-qnovo.o
+obj-$(CONFIG_QPNP_SMB2) += qpnp-smb2.o smb-lib.o pmic-voter.o storm-watch.o battery.o
+obj-$(CONFIG_SMB138X_CHARGER) += smb138x-charger.o smb-lib.o pmic-voter.o storm-watch.o battery.o
+obj-$(CONFIG_QPNP_QNOVO) += qpnp-qnovo.o battery.o
diff --git a/drivers/power/supply/qcom/battery.c b/drivers/power/supply/qcom/battery.c
new file mode 100644
index 000000000000..34add97b55d2
--- /dev/null
+++ b/drivers/power/supply/qcom/battery.c
@@ -0,0 +1,756 @@
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "QCOM-BATT: %s: " fmt, __func__
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/power_supply.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/qpnp/qpnp-revid.h>
+#include <linux/printk.h>
+#include <linux/pm_wakeup.h>
+#include <linux/slab.h>
+#include "pmic-voter.h"
+
+#define DRV_MAJOR_VERSION 1
+#define DRV_MINOR_VERSION 0
+
+#define CHG_STATE_VOTER "CHG_STATE_VOTER"
+#define TAPER_END_VOTER "TAPER_END_VOTER"
+#define PL_TAPER_EARLY_BAD_VOTER "PL_TAPER_EARLY_BAD_VOTER"
+#define PARALLEL_PSY_VOTER "PARALLEL_PSY_VOTER"
+
+struct pl_data {
+ int pl_mode;
+ int slave_pct;
+ int taper_pct;
+ int slave_fcc_ua;
+ struct votable *fcc_votable;
+ struct votable *fv_votable;
+ struct votable *pl_disable_votable;
+ struct votable *pl_awake_votable;
+ struct work_struct status_change_work;
+ struct delayed_work pl_taper_work;
+ struct power_supply *main_psy;
+ struct power_supply *pl_psy;
+ struct power_supply *batt_psy;
+ int settled_ua;
+ int charge_type;
+ struct class qcom_batt_class;
+ struct wakeup_source *pl_ws;
+ struct notifier_block nb;
+};
+
+struct pl_data *the_chip;
+
+enum print_reason {
+ PR_PARALLEL = BIT(0),
+};
+
+static int debug_mask;
+module_param_named(debug_mask, debug_mask, int, S_IRUSR | S_IWUSR);
+
+#define pl_dbg(chip, reason, fmt, ...) \
+ do { \
+ if (debug_mask & (reason)) \
+ pr_info(fmt, ##__VA_ARGS__); \
+ else \
+ pr_debug(fmt, ##__VA_ARGS__); \
+ } while (0)
+
+enum {
+ VER = 0,
+ SLAVE_PCT,
+};
+
+/*******
+ * ICL *
+********/
+static void split_settled(struct pl_data *chip)
+{
+ int slave_icl_pct;
+ int slave_ua = 0;
+ union power_supply_propval pval = {0, };
+ int rc;
+
+ /* TODO some parallel chargers do not have a fine ICL resolution. For
+ * them implement a psy interface which returns the closest lower ICL
+ * for desired split
+ */
+
+ if (chip->pl_mode != POWER_SUPPLY_PARALLEL_USBIN_USBIN)
+ return;
+
+ if (!chip->main_psy)
+ return;
+
+ if (!get_effective_result_locked(chip->pl_disable_votable)) {
+ /* read the aicl settled value */
+ rc = power_supply_get_property(chip->main_psy,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED, &pval);
+ if (rc < 0) {
+ pr_err("Couldn't get aicl settled value rc=%d\n", rc);
+ return;
+ }
+ chip->settled_ua = pval.intval;
+ /* slave gets 10 percent points less for ICL */
+ slave_icl_pct = max(0, chip->slave_pct - 10);
+ slave_ua = (chip->settled_ua * slave_icl_pct) / 100;
+ }
+
+ /* ICL_REDUCTION on main could be 0mA when pl is disabled */
+ pval.intval = slave_ua;
+ rc = power_supply_set_property(chip->main_psy,
+ POWER_SUPPLY_PROP_ICL_REDUCTION, &pval);
+ if (rc < 0) {
+ pr_err("Couldn't change slave suspend state rc=%d\n", rc);
+ return;
+ }
+
+ /* set parallel's ICL could be 0mA when pl is disabled */
+ pval.intval = slave_ua;
+ rc = power_supply_set_property(chip->pl_psy,
+ POWER_SUPPLY_PROP_CURRENT_MAX, &pval);
+ if (rc < 0) {
+ pr_err("Couldn't set parallel icl, rc=%d\n", rc);
+ return;
+ }
+}
+
+static ssize_t version_show(struct class *c, struct class_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d.%d\n",
+ DRV_MAJOR_VERSION, DRV_MINOR_VERSION);
+}
+
+/*************
+* SLAVE PCT *
+**************/
+static ssize_t slave_pct_show(struct class *c, struct class_attribute *attr,
+ char *ubuf)
+{
+ struct pl_data *chip = container_of(c, struct pl_data,
+ qcom_batt_class);
+
+ return snprintf(ubuf, PAGE_SIZE, "%d\n", chip->slave_pct);
+}
+
+static ssize_t slave_pct_store(struct class *c, struct class_attribute *attr,
+ const char *ubuf, size_t count)
+{
+ struct pl_data *chip = container_of(c, struct pl_data,
+ qcom_batt_class);
+ unsigned long val;
+
+ if (kstrtoul(ubuf, 10, &val))
+ return -EINVAL;
+
+ chip->slave_pct = val;
+ rerun_election(chip->fcc_votable);
+ rerun_election(chip->fv_votable);
+ split_settled(chip);
+
+ return count;
+}
+
+static struct class_attribute pl_attributes[] = {
+ [VER] = __ATTR_RO(version),
+ [SLAVE_PCT] = __ATTR(parallel_pct, S_IRUGO | S_IWUSR,
+ slave_pct_show, slave_pct_store),
+ __ATTR_NULL,
+};
+
+/***********
+ * TAPER *
+************/
+#define MINIMUM_PARALLEL_FCC_UA 500000
+#define PL_TAPER_WORK_DELAY_MS 100
+#define TAPER_RESIDUAL_PCT 75
+static void pl_taper_work(struct work_struct *work)
+{
+ struct pl_data *chip = container_of(work, struct pl_data,
+ pl_taper_work.work);
+ union power_supply_propval pval = {0, };
+ int rc;
+
+ /* exit immediately if parallel is disabled */
+ if (get_effective_result(chip->pl_disable_votable)) {
+ pl_dbg(chip, PR_PARALLEL, "terminating parallel not in progress\n");
+ goto done;
+ }
+
+ pl_dbg(chip, PR_PARALLEL, "entering parallel taper work slave_fcc = %d\n",
+ chip->slave_fcc_ua);
+ if (chip->slave_fcc_ua < MINIMUM_PARALLEL_FCC_UA) {
+ pl_dbg(chip, PR_PARALLEL, "terminating parallel's share lower than 500mA\n");
+ vote(chip->pl_disable_votable, TAPER_END_VOTER, true, 0);
+ goto done;
+ }
+
+ rc = power_supply_get_property(chip->batt_psy,
+ POWER_SUPPLY_PROP_CHARGE_TYPE, &pval);
+ if (rc < 0) {
+ pr_err("Couldn't get batt charge type rc=%d\n", rc);
+ goto done;
+ }
+
+ chip->charge_type = pval.intval;
+ if (pval.intval == POWER_SUPPLY_CHARGE_TYPE_TAPER) {
+ pl_dbg(chip, PR_PARALLEL, "master is taper charging; reducing slave FCC\n");
+
+ vote(chip->pl_awake_votable, TAPER_END_VOTER, true, 0);
+ /* Reduce the taper percent by 25 percent */
+ chip->taper_pct = chip->taper_pct * TAPER_RESIDUAL_PCT / 100;
+ rerun_election(chip->fcc_votable);
+ pl_dbg(chip, PR_PARALLEL, "taper entry scheduling work after %d ms\n",
+ PL_TAPER_WORK_DELAY_MS);
+ schedule_delayed_work(&chip->pl_taper_work,
+ msecs_to_jiffies(PL_TAPER_WORK_DELAY_MS));
+ return;
+ }
+
+ /*
+ * Master back to Fast Charge, get out of this round of taper reduction
+ */
+ pl_dbg(chip, PR_PARALLEL, "master is fast charging; waiting for next taper\n");
+
+done:
+ vote(chip->pl_awake_votable, TAPER_END_VOTER, false, 0);
+}
+
+/*********
+ * FCC *
+**********/
+#define EFFICIENCY_PCT 80
+#define MICRO_5V 5000000
+static void split_fcc(struct pl_data *chip, int total_ua,
+ int *master_ua, int *slave_ua)
+{
+ int rc, effective_total_ua, slave_limited_ua, hw_cc_delta_ua = 0,
+ aicl_settled_ua, input_limited_fcc_ua;
+ union power_supply_propval pval = {0, };
+
+ rc = power_supply_get_property(chip->main_psy,
+ POWER_SUPPLY_PROP_FCC_DELTA, &pval);
+ if (rc < 0)
+ hw_cc_delta_ua = 0;
+ else
+ hw_cc_delta_ua = pval.intval;
+
+ input_limited_fcc_ua = INT_MAX;
+ if (chip->pl_mode == POWER_SUPPLY_PARALLEL_MID_MID) {
+ rc = power_supply_get_property(chip->main_psy,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED,
+ &pval);
+ if (rc < 0)
+ aicl_settled_ua = 0;
+ else
+ aicl_settled_ua = pval.intval;
+
+ input_limited_fcc_ua = div64_s64(
+ (s64)aicl_settled_ua * MICRO_5V * EFFICIENCY_PCT,
+ (s64)get_effective_result(chip->fv_votable)
+ * 100);
+ }
+
+ effective_total_ua = max(0, total_ua + hw_cc_delta_ua);
+ slave_limited_ua = min(effective_total_ua, input_limited_fcc_ua);
+ *slave_ua = (slave_limited_ua * chip->slave_pct) / 100;
+ *slave_ua = (*slave_ua * chip->taper_pct) / 100;
+ *master_ua = max(0, total_ua - *slave_ua);
+}
+
+static int pl_fcc_vote_callback(struct votable *votable, void *data,
+ int total_fcc_ua, const char *client)
+{
+ struct pl_data *chip = data;
+ union power_supply_propval pval = {0, };
+ int rc, master_fcc_ua = total_fcc_ua, slave_fcc_ua = 0;
+
+ if (total_fcc_ua < 0)
+ return 0;
+
+ if (!chip->main_psy)
+ return 0;
+
+ if (chip->pl_mode == POWER_SUPPLY_PARALLEL_NONE
+ || get_effective_result_locked(chip->pl_disable_votable)) {
+ pval.intval = total_fcc_ua;
+ rc = power_supply_set_property(chip->main_psy,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+ &pval);
+ if (rc < 0)
+ pr_err("Couldn't set main fcc, rc=%d\n", rc);
+ return rc;
+ }
+
+ split_fcc(chip, total_fcc_ua, &master_fcc_ua, &slave_fcc_ua);
+ pval.intval = slave_fcc_ua;
+ rc = power_supply_set_property(chip->pl_psy,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, &pval);
+ if (rc < 0) {
+ pr_err("Couldn't set parallel fcc, rc=%d\n", rc);
+ return rc;
+ }
+
+ chip->slave_fcc_ua = slave_fcc_ua;
+
+ pval.intval = master_fcc_ua;
+ rc = power_supply_set_property(chip->main_psy,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, &pval);
+ if (rc < 0) {
+ pr_err("Could not set main fcc, rc=%d\n", rc);
+ return rc;
+ }
+
+ pl_dbg(chip, PR_PARALLEL, "master_fcc=%d slave_fcc=%d distribution=(%d/%d)\n",
+ master_fcc_ua, slave_fcc_ua,
+ (master_fcc_ua * 100) / total_fcc_ua,
+ (slave_fcc_ua * 100) / total_fcc_ua);
+
+ return 0;
+}
+
+#define PARALLEL_FLOAT_VOLTAGE_DELTA_UV 50000
+static int pl_fv_vote_callback(struct votable *votable, void *data,
+ int fv_uv, const char *client)
+{
+ struct pl_data *chip = data;
+ union power_supply_propval pval = {0, };
+ int rc = 0;
+
+ if (fv_uv < 0)
+ return 0;
+
+ if (!chip->main_psy)
+ return 0;
+
+ pval.intval = fv_uv;
+ rc = power_supply_set_property(chip->main_psy,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX, &pval);
+ if (rc < 0) {
+ pr_err("Couldn't set main fv, rc=%d\n", rc);
+ return rc;
+ }
+
+ if (chip->pl_mode != POWER_SUPPLY_PARALLEL_NONE) {
+ pval.intval = fv_uv + PARALLEL_FLOAT_VOLTAGE_DELTA_UV;
+ rc = power_supply_set_property(chip->pl_psy,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX, &pval);
+ if (rc < 0) {
+ pr_err("Couldn't set float on parallel rc=%d\n", rc);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static int pl_disable_vote_callback(struct votable *votable,
+ void *data, int pl_disable, const char *client)
+{
+ struct pl_data *chip = data;
+ union power_supply_propval pval = {0, };
+ int rc;
+
+ chip->settled_ua = 0;
+ chip->taper_pct = 100;
+
+ if (!pl_disable) { /* enable */
+ rerun_election(chip->fv_votable);
+ rerun_election(chip->fcc_votable);
+ /*
+ * Enable will be called with a valid pl_psy always. The
+ * PARALLEL_PSY_VOTER keeps it disabled unless a pl_psy
+ * is seen.
+ */
+ pval.intval = 0;
+ rc = power_supply_set_property(chip->pl_psy,
+ POWER_SUPPLY_PROP_INPUT_SUSPEND, &pval);
+ if (rc < 0)
+ pr_err("Couldn't change slave suspend state rc=%d\n",
+ rc);
+
+ if (chip->pl_mode == POWER_SUPPLY_PARALLEL_USBIN_USBIN)
+ split_settled(chip);
+ /*
+ * we could have been enabled while in taper mode,
+ * start the taper work if so
+ */
+ rc = power_supply_get_property(chip->batt_psy,
+ POWER_SUPPLY_PROP_CHARGE_TYPE, &pval);
+ if (rc < 0) {
+ pr_err("Couldn't get batt charge type rc=%d\n", rc);
+ } else {
+ if (pval.intval == POWER_SUPPLY_CHARGE_TYPE_TAPER) {
+ pl_dbg(chip, PR_PARALLEL,
+ "pl enabled in Taper scheduing work\n");
+ schedule_delayed_work(&chip->pl_taper_work, 0);
+ }
+ }
+ } else {
+ if (chip->pl_mode == POWER_SUPPLY_PARALLEL_USBIN_USBIN)
+ split_settled(chip);
+
+ /* pl_psy may be NULL while in the disable branch */
+ if (chip->pl_psy) {
+ pval.intval = 1;
+ rc = power_supply_set_property(chip->pl_psy,
+ POWER_SUPPLY_PROP_INPUT_SUSPEND, &pval);
+ if (rc < 0)
+ pr_err("Couldn't change slave suspend state rc=%d\n",
+ rc);
+ }
+ rerun_election(chip->fcc_votable);
+ rerun_election(chip->fv_votable);
+ }
+
+ pl_dbg(chip, PR_PARALLEL, "parallel charging %s\n",
+ pl_disable ? "disabled" : "enabled");
+
+ return 0;
+}
+
+static int pl_awake_vote_callback(struct votable *votable,
+ void *data, int awake, const char *client)
+{
+ struct pl_data *chip = data;
+
+ if (awake)
+ __pm_stay_awake(chip->pl_ws);
+ else
+ __pm_relax(chip->pl_ws);
+
+ pr_debug("client: %s awake: %d\n", client, awake);
+ return 0;
+}
+
+static bool is_main_available(struct pl_data *chip)
+{
+ if (!chip->main_psy)
+ chip->main_psy = power_supply_get_by_name("main");
+
+ if (!chip->main_psy)
+ return false;
+
+ return true;
+}
+
+static bool is_batt_available(struct pl_data *chip)
+{
+ if (!chip->batt_psy)
+ chip->batt_psy = power_supply_get_by_name("battery");
+
+ if (!chip->batt_psy)
+ return false;
+
+ return true;
+}
+
+static bool is_parallel_available(struct pl_data *chip)
+{
+ union power_supply_propval pval = {0, };
+ int rc;
+
+ if (chip->pl_psy)
+ return true;
+
+ chip->pl_psy = power_supply_get_by_name("parallel");
+ if (!chip->pl_psy)
+ return false;
+
+ rc = power_supply_get_property(chip->pl_psy,
+ POWER_SUPPLY_PROP_PARALLEL_MODE, &pval);
+ if (rc < 0) {
+ pr_err("Couldn't get parallel mode from parallel rc=%d\n",
+ rc);
+ return false;
+ }
+ /*
+ * Note that pl_mode only be udpated to anything other than a _NONE
+ * only after pl_psy is found. IOW pl_mode != _NONE implies that
+ * pl_psy is present and valid
+ */
+ chip->pl_mode = pval.intval;
+ vote(chip->pl_disable_votable, PARALLEL_PSY_VOTER, false, 0);
+
+ return true;
+}
+
+static void handle_main_charge_type(struct pl_data *chip)
+{
+ union power_supply_propval pval = {0, };
+ int rc;
+
+ rc = power_supply_get_property(chip->batt_psy,
+ POWER_SUPPLY_PROP_CHARGE_TYPE, &pval);
+ if (rc < 0) {
+ pr_err("Couldn't get batt charge type rc=%d\n", rc);
+ return;
+ }
+
+ /* not fast/not taper state to disables parallel */
+ if ((pval.intval != POWER_SUPPLY_CHARGE_TYPE_FAST)
+ && (pval.intval != POWER_SUPPLY_CHARGE_TYPE_TAPER)) {
+ vote(chip->pl_disable_votable, CHG_STATE_VOTER, true, 0);
+ chip->taper_pct = 100;
+ vote(chip->pl_disable_votable, TAPER_END_VOTER, false, 0);
+ vote(chip->pl_disable_votable, PL_TAPER_EARLY_BAD_VOTER,
+ false, 0);
+ chip->charge_type = pval.intval;
+ return;
+ }
+
+ /* handle taper charge entry */
+ if (chip->charge_type == POWER_SUPPLY_CHARGE_TYPE_FAST
+ && (pval.intval == POWER_SUPPLY_CHARGE_TYPE_TAPER)) {
+ chip->charge_type = pval.intval;
+ pl_dbg(chip, PR_PARALLEL, "taper entry scheduling work\n");
+ schedule_delayed_work(&chip->pl_taper_work, 0);
+ return;
+ }
+
+ /* handle fast/taper charge entry */
+ if (pval.intval == POWER_SUPPLY_CHARGE_TYPE_TAPER
+ || pval.intval == POWER_SUPPLY_CHARGE_TYPE_FAST) {
+ pl_dbg(chip, PR_PARALLEL, "chg_state enabling parallel\n");
+ vote(chip->pl_disable_votable, CHG_STATE_VOTER, false, 0);
+ chip->charge_type = pval.intval;
+ return;
+ }
+
+ /* remember the new state only if it isn't any of the above */
+ chip->charge_type = pval.intval;
+}
+
+static void handle_settled_aicl_split(struct pl_data *chip)
+{
+ union power_supply_propval pval = {0, };
+ int rc;
+
+ if (!get_effective_result(chip->pl_disable_votable)
+ && chip->pl_mode == POWER_SUPPLY_PARALLEL_USBIN_USBIN) {
+ /*
+ * call aicl split only when USBIN_USBIN and enabled
+ * and if aicl changed
+ */
+ rc = power_supply_get_property(chip->main_psy,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED,
+ &pval);
+ if (rc < 0) {
+ pr_err("Couldn't get aicl settled value rc=%d\n", rc);
+ return;
+ }
+ if (chip->settled_ua != pval.intval) {
+ chip->settled_ua = pval.intval;
+ split_settled(chip);
+ }
+ }
+}
+
+static void handle_parallel_in_taper(struct pl_data *chip)
+{
+ union power_supply_propval pval = {0, };
+ int rc;
+
+ if (get_effective_result_locked(chip->pl_disable_votable))
+ return;
+
+ if (!chip->pl_psy)
+ return;
+
+ rc = power_supply_get_property(chip->pl_psy,
+ POWER_SUPPLY_PROP_CHARGE_TYPE, &pval);
+ if (rc < 0) {
+ pr_err("Couldn't get pl charge type rc=%d\n", rc);
+ return;
+ }
+
+ /*
+ * if parallel is seen in taper mode ever, that is an anomaly and
+ * we disable parallel charger
+ */
+ if (pval.intval == POWER_SUPPLY_CHARGE_TYPE_TAPER) {
+ vote(chip->pl_disable_votable, PL_TAPER_EARLY_BAD_VOTER,
+ true, 0);
+ return;
+ }
+}
+
+static void status_change_work(struct work_struct *work)
+{
+ struct pl_data *chip = container_of(work,
+ struct pl_data, status_change_work);
+
+ if (!is_main_available(chip))
+ return;
+
+ if (!is_batt_available(chip))
+ return;
+
+ is_parallel_available(chip);
+
+ handle_main_charge_type(chip);
+ handle_settled_aicl_split(chip);
+ handle_parallel_in_taper(chip);
+}
+
+static int pl_notifier_call(struct notifier_block *nb,
+ unsigned long ev, void *v)
+{
+ struct power_supply *psy = v;
+ struct pl_data *chip = container_of(nb, struct pl_data, nb);
+
+ if (ev != PSY_EVENT_PROP_CHANGED)
+ return NOTIFY_OK;
+
+ if ((strcmp(psy->desc->name, "parallel") == 0)
+ || (strcmp(psy->desc->name, "battery") == 0))
+ schedule_work(&chip->status_change_work);
+
+ return NOTIFY_OK;
+}
+
+static int pl_register_notifier(struct pl_data *chip)
+{
+ int rc;
+
+ chip->nb.notifier_call = pl_notifier_call;
+ rc = power_supply_reg_notifier(&chip->nb);
+ if (rc < 0) {
+ pr_err("Couldn't register psy notifier rc = %d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int pl_determine_initial_status(struct pl_data *chip)
+{
+ status_change_work(&chip->status_change_work);
+ return 0;
+}
+
+static int pl_init(void)
+{
+ struct pl_data *chip;
+ int rc = 0;
+
+ chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+ chip->slave_pct = 50;
+
+ chip->pl_ws = wakeup_source_register("qcom-battery");
+ if (!chip->pl_ws)
+ goto cleanup;
+
+ chip->fcc_votable = create_votable("FCC", VOTE_MIN,
+ pl_fcc_vote_callback,
+ chip);
+ if (IS_ERR(chip->fcc_votable)) {
+ rc = PTR_ERR(chip->fcc_votable);
+ goto release_wakeup_source;
+ }
+
+ chip->fv_votable = create_votable("FV", VOTE_MAX,
+ pl_fv_vote_callback,
+ chip);
+ if (IS_ERR(chip->fv_votable)) {
+ rc = PTR_ERR(chip->fv_votable);
+ goto destroy_votable;
+ }
+
+ chip->pl_disable_votable = create_votable("PL_DISABLE", VOTE_SET_ANY,
+ pl_disable_vote_callback,
+ chip);
+ if (IS_ERR(chip->pl_disable_votable)) {
+ rc = PTR_ERR(chip->pl_disable_votable);
+ goto destroy_votable;
+ }
+ vote(chip->pl_disable_votable, CHG_STATE_VOTER, true, 0);
+ vote(chip->pl_disable_votable, TAPER_END_VOTER, false, 0);
+ vote(chip->pl_disable_votable, PARALLEL_PSY_VOTER, true, 0);
+
+ chip->pl_awake_votable = create_votable("PL_AWAKE", VOTE_SET_ANY,
+ pl_awake_vote_callback,
+ chip);
+ if (IS_ERR(chip->pl_awake_votable)) {
+ rc = PTR_ERR(chip->pl_disable_votable);
+ goto destroy_votable;
+ }
+
+ INIT_WORK(&chip->status_change_work, status_change_work);
+ INIT_DELAYED_WORK(&chip->pl_taper_work, pl_taper_work);
+
+ rc = pl_register_notifier(chip);
+ if (rc < 0) {
+ pr_err("Couldn't register psy notifier rc = %d\n", rc);
+ goto unreg_notifier;
+ }
+
+ rc = pl_determine_initial_status(chip);
+ if (rc < 0) {
+ pr_err("Couldn't determine initial status rc=%d\n", rc);
+ goto unreg_notifier;
+ }
+
+ chip->qcom_batt_class.name = "qcom-battery",
+ chip->qcom_batt_class.owner = THIS_MODULE,
+ chip->qcom_batt_class.class_attrs = pl_attributes;
+
+ rc = class_register(&chip->qcom_batt_class);
+ if (rc < 0) {
+ pr_err("couldn't register pl_data sysfs class rc = %d\n", rc);
+ goto unreg_notifier;
+ }
+
+ return rc;
+
+unreg_notifier:
+ power_supply_unreg_notifier(&chip->nb);
+destroy_votable:
+ destroy_votable(chip->pl_awake_votable);
+ destroy_votable(chip->pl_disable_votable);
+ destroy_votable(chip->fv_votable);
+ destroy_votable(chip->fcc_votable);
+release_wakeup_source:
+ wakeup_source_unregister(chip->pl_ws);
+cleanup:
+ kfree(chip);
+ return rc;
+}
+
+static void pl_deinit(void)
+{
+ struct pl_data *chip = the_chip;
+
+ power_supply_unreg_notifier(&chip->nb);
+ destroy_votable(chip->pl_awake_votable);
+ destroy_votable(chip->pl_disable_votable);
+ destroy_votable(chip->fv_votable);
+ destroy_votable(chip->fcc_votable);
+ wakeup_source_unregister(chip->pl_ws);
+ kfree(chip);
+}
+
+module_init(pl_init);
+module_exit(pl_deinit)
+
+MODULE_DESCRIPTION("");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/supply/qcom/fg-core.h b/drivers/power/supply/qcom/fg-core.h
index 07bde30524ac..0d3fcc2ede86 100644
--- a/drivers/power/supply/qcom/fg-core.h
+++ b/drivers/power/supply/qcom/fg-core.h
@@ -156,7 +156,8 @@ enum fg_sram_param_id {
FG_SRAM_ESR_TIMER_CHG_INIT,
FG_SRAM_SYS_TERM_CURR,
FG_SRAM_CHG_TERM_CURR,
- FG_SRAM_DELTA_SOC_THR,
+ FG_SRAM_DELTA_MSOC_THR,
+ FG_SRAM_DELTA_BSOC_THR,
FG_SRAM_RECHARGE_SOC_THR,
FG_SRAM_RECHARGE_VBATT_THR,
FG_SRAM_KI_COEFF_MED_DISCHG,
@@ -205,6 +206,7 @@ enum wa_flags {
struct fg_dt_props {
bool force_load_profile;
bool hold_soc_while_full;
+ bool auto_recharge_soc;
int cutoff_volt_mv;
int empty_volt_mv;
int vbatt_low_thr_mv;
@@ -322,6 +324,7 @@ struct fg_chip {
struct mutex bus_lock;
struct mutex sram_rw_lock;
struct mutex batt_avg_lock;
+ struct mutex charge_full_lock;
u32 batt_soc_base;
u32 batt_info_base;
u32 mem_if_base;
@@ -335,6 +338,9 @@ struct fg_chip {
int last_soc;
int last_batt_temp;
int health;
+ int maint_soc;
+ int delta_soc;
+ int last_msoc;
bool profile_available;
bool profile_loaded;
bool battery_missing;
@@ -345,6 +351,7 @@ struct fg_chip {
bool esr_fcc_ctrl_en;
bool soc_reporting_ready;
bool esr_flt_cold_temp_en;
+ bool bsoc_delta_irq_en;
struct completion soc_update;
struct completion soc_ready;
struct delayed_work profile_load_work;
@@ -369,6 +376,7 @@ struct fg_log_buffer {
/* transaction parameters */
struct fg_trans {
struct fg_chip *chip;
+ struct mutex fg_dfs_lock; /* Prevent thread concurrency */
struct fg_log_buffer *log;
u32 cnt;
u16 addr;
diff --git a/drivers/power/supply/qcom/fg-memif.c b/drivers/power/supply/qcom/fg-memif.c
index a98ff7d765e3..2dc76182ed15 100644
--- a/drivers/power/supply/qcom/fg-memif.c
+++ b/drivers/power/supply/qcom/fg-memif.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -289,7 +289,7 @@ static int fg_check_iacs_ready(struct fg_chip *chip)
}
if (!tries) {
- pr_err("IACS_RDY not set\n");
+ pr_err("IACS_RDY not set, opr_sts: %d\n", ima_opr_sts);
/* check for error condition */
rc = fg_clear_ima_errors_if_any(chip, false);
if (rc < 0) {
@@ -544,9 +544,9 @@ static int fg_get_beat_count(struct fg_chip *chip, u8 *count)
int fg_interleaved_mem_read(struct fg_chip *chip, u16 address, u8 offset,
u8 *val, int len)
{
- int rc = 0;
+ int rc = 0, ret;
u8 start_beat_count, end_beat_count, count = 0;
- bool retry_once = false;
+ bool retry = false;
if (offset > 3) {
pr_err("offset too large %d\n", offset);
@@ -554,10 +554,18 @@ int fg_interleaved_mem_read(struct fg_chip *chip, u16 address, u8 offset,
}
retry:
+ if (count >= RETRY_COUNT) {
+ pr_err("Tried %d times\n", RETRY_COUNT);
+ retry = false;
+ goto out;
+ }
+
rc = fg_interleaved_mem_config(chip, val, address, offset, len,
FG_READ);
if (rc < 0) {
pr_err("failed to configure SRAM for IMA rc = %d\n", rc);
+ count++;
+ retry = true;
goto out;
}
@@ -565,18 +573,21 @@ retry:
rc = fg_get_beat_count(chip, &start_beat_count);
if (rc < 0) {
pr_err("failed to read beat count rc=%d\n", rc);
+ count++;
+ retry = true;
goto out;
}
/* read data */
rc = __fg_interleaved_mem_read(chip, address, offset, val, len);
if (rc < 0) {
- if ((rc == -EAGAIN) && (count < RETRY_COUNT)) {
- count++;
+ count++;
+ if (rc == -EAGAIN) {
pr_err("IMA access failed retry_count = %d\n", count);
goto retry;
}
pr_err("failed to read SRAM address rc = %d\n", rc);
+ retry = true;
goto out;
}
@@ -584,28 +595,31 @@ retry:
rc = fg_get_beat_count(chip, &end_beat_count);
if (rc < 0) {
pr_err("failed to read beat count rc=%d\n", rc);
+ count++;
+ retry = true;
goto out;
}
fg_dbg(chip, FG_SRAM_READ, "Start beat_count = %x End beat_count = %x\n",
start_beat_count, end_beat_count);
- if (start_beat_count != end_beat_count && !retry_once) {
+ if (start_beat_count != end_beat_count) {
fg_dbg(chip, FG_SRAM_READ, "Beat count(%d/%d) do not match - retry transaction\n",
start_beat_count, end_beat_count);
- retry_once = true;
+ count++;
+ retry = true;
}
out:
/* Release IMA access */
- rc = fg_masked_write(chip, MEM_IF_MEM_INTF_CFG(chip),
+ ret = fg_masked_write(chip, MEM_IF_MEM_INTF_CFG(chip),
MEM_ACCESS_REQ_BIT | IACS_SLCT_BIT, 0);
- if (rc < 0) {
- pr_err("failed to reset IMA access bit rc = %d\n", rc);
- return rc;
+ if (rc < 0 && ret < 0) {
+ pr_err("failed to reset IMA access bit ret = %d\n", ret);
+ return ret;
}
- if (retry_once) {
- retry_once = false;
+ if (retry) {
+ retry = false;
goto retry;
}
@@ -615,8 +629,9 @@ out:
int fg_interleaved_mem_write(struct fg_chip *chip, u16 address, u8 offset,
u8 *val, int len, bool atomic_access)
{
- int rc = 0;
+ int rc = 0, ret;
u8 start_beat_count, end_beat_count, count = 0;
+ bool retry = false;
if (offset > 3) {
pr_err("offset too large %d\n", offset);
@@ -624,10 +639,18 @@ int fg_interleaved_mem_write(struct fg_chip *chip, u16 address, u8 offset,
}
retry:
+ if (count >= RETRY_COUNT) {
+ pr_err("Tried %d times\n", RETRY_COUNT);
+ retry = false;
+ goto out;
+ }
+
rc = fg_interleaved_mem_config(chip, val, address, offset, len,
FG_WRITE);
if (rc < 0) {
pr_err("failed to configure SRAM for IMA rc = %d\n", rc);
+ count++;
+ retry = true;
goto out;
}
@@ -635,18 +658,21 @@ retry:
rc = fg_get_beat_count(chip, &start_beat_count);
if (rc < 0) {
pr_err("failed to read beat count rc=%d\n", rc);
+ count++;
+ retry = true;
goto out;
}
/* write data */
rc = __fg_interleaved_mem_write(chip, address, offset, val, len);
if (rc < 0) {
+ count++;
if ((rc == -EAGAIN) && (count < RETRY_COUNT)) {
- count++;
pr_err("IMA access failed retry_count = %d\n", count);
goto retry;
}
pr_err("failed to write SRAM address rc = %d\n", rc);
+ retry = true;
goto out;
}
@@ -654,6 +680,8 @@ retry:
rc = fg_get_beat_count(chip, &end_beat_count);
if (rc < 0) {
pr_err("failed to read beat count rc=%d\n", rc);
+ count++;
+ retry = true;
goto out;
}
@@ -662,11 +690,19 @@ retry:
start_beat_count, end_beat_count);
out:
/* Release IMA access */
- rc = fg_masked_write(chip, MEM_IF_MEM_INTF_CFG(chip),
+ ret = fg_masked_write(chip, MEM_IF_MEM_INTF_CFG(chip),
MEM_ACCESS_REQ_BIT | IACS_SLCT_BIT, 0);
- if (rc < 0)
- pr_err("failed to reset IMA access bit rc = %d\n", rc);
+ if (rc < 0 && ret < 0) {
+ pr_err("failed to reset IMA access bit ret = %d\n", ret);
+ return ret;
+ }
+
+ if (retry) {
+ retry = false;
+ goto retry;
+ }
+ /* Return the error we got before releasing memory access */
return rc;
}
diff --git a/drivers/power/supply/qcom/fg-util.c b/drivers/power/supply/qcom/fg-util.c
index 41d2af0fbdc6..d5c4f8ffaac3 100644
--- a/drivers/power/supply/qcom/fg-util.c
+++ b/drivers/power/supply/qcom/fg-util.c
@@ -486,6 +486,7 @@ static int fg_sram_dfs_open(struct inode *inode, struct file *file)
trans->addr = dbgfs_data.addr;
trans->chip = dbgfs_data.chip;
trans->offset = trans->addr;
+ mutex_init(&trans->fg_dfs_lock);
file->private_data = trans;
return 0;
@@ -497,6 +498,7 @@ static int fg_sram_dfs_close(struct inode *inode, struct file *file)
if (trans && trans->log && trans->data) {
file->private_data = NULL;
+ mutex_destroy(&trans->fg_dfs_lock);
devm_kfree(trans->chip->dev, trans->log);
devm_kfree(trans->chip->dev, trans->data);
devm_kfree(trans->chip->dev, trans);
@@ -648,10 +650,13 @@ static ssize_t fg_sram_dfs_reg_read(struct file *file, char __user *buf,
size_t ret;
size_t len;
+ mutex_lock(&trans->fg_dfs_lock);
/* Is the the log buffer empty */
if (log->rpos >= log->wpos) {
- if (get_log_data(trans) <= 0)
- return 0;
+ if (get_log_data(trans) <= 0) {
+ len = 0;
+ goto unlock_mutex;
+ }
}
len = min(count, log->wpos - log->rpos);
@@ -659,7 +664,8 @@ static ssize_t fg_sram_dfs_reg_read(struct file *file, char __user *buf,
ret = copy_to_user(buf, &log->data[log->rpos], len);
if (ret == len) {
pr_err("error copy sram register values to user\n");
- return -EFAULT;
+ len = -EFAULT;
+ goto unlock_mutex;
}
/* 'ret' is the number of bytes not copied */
@@ -667,6 +673,9 @@ static ssize_t fg_sram_dfs_reg_read(struct file *file, char __user *buf,
*ppos += len;
log->rpos += len;
+
+unlock_mutex:
+ mutex_unlock(&trans->fg_dfs_lock);
return len;
}
@@ -691,10 +700,13 @@ static ssize_t fg_sram_dfs_reg_write(struct file *file, const char __user *buf,
struct fg_trans *trans = file->private_data;
u32 address = trans->addr;
+ mutex_lock(&trans->fg_dfs_lock);
/* Make a copy of the user data */
kbuf = kmalloc(count + 1, GFP_KERNEL);
- if (!kbuf)
- return -ENOMEM;
+ if (!kbuf) {
+ ret = -ENOMEM;
+ goto unlock_mutex;
+ }
ret = copy_from_user(kbuf, buf, count);
if (ret == count) {
@@ -744,6 +756,8 @@ static ssize_t fg_sram_dfs_reg_write(struct file *file, const char __user *buf,
free_buf:
kfree(kbuf);
+unlock_mutex:
+ mutex_unlock(&trans->fg_dfs_lock);
return ret;
}
diff --git a/drivers/power/supply/qcom/qpnp-fg-gen3.c b/drivers/power/supply/qcom/qpnp-fg-gen3.c
index a441ff310e9f..12f3d448c891 100644
--- a/drivers/power/supply/qcom/qpnp-fg-gen3.c
+++ b/drivers/power/supply/qcom/qpnp-fg-gen3.c
@@ -48,8 +48,10 @@
#define KI_COEFF_HI_DISCHG_OFFSET 0
#define KI_COEFF_LOW_DISCHG_WORD 10
#define KI_COEFF_LOW_DISCHG_OFFSET 2
-#define DELTA_SOC_THR_WORD 12
-#define DELTA_SOC_THR_OFFSET 3
+#define DELTA_MSOC_THR_WORD 12
+#define DELTA_MSOC_THR_OFFSET 3
+#define DELTA_BSOC_THR_WORD 13
+#define DELTA_BSOC_THR_OFFSET 2
#define RECHARGE_SOC_THR_WORD 14
#define RECHARGE_SOC_THR_OFFSET 0
#define CHG_TERM_CURR_WORD 14
@@ -113,8 +115,10 @@
#define KI_COEFF_MED_DISCHG_v2_OFFSET 0
#define KI_COEFF_HI_DISCHG_v2_WORD 10
#define KI_COEFF_HI_DISCHG_v2_OFFSET 1
-#define DELTA_SOC_THR_v2_WORD 13
-#define DELTA_SOC_THR_v2_OFFSET 0
+#define DELTA_BSOC_THR_v2_WORD 12
+#define DELTA_BSOC_THR_v2_OFFSET 3
+#define DELTA_MSOC_THR_v2_WORD 13
+#define DELTA_MSOC_THR_v2_OFFSET 0
#define RECHARGE_SOC_THR_v2_WORD 14
#define RECHARGE_SOC_THR_v2_OFFSET 1
#define CHG_TERM_CURR_v2_WORD 15
@@ -143,6 +147,8 @@ static void fg_encode_current(struct fg_sram_param *sp,
static void fg_encode_default(struct fg_sram_param *sp,
enum fg_sram_param_id id, int val, u8 *buf);
+static struct fg_irq_info fg_irqs[FG_IRQ_MAX];
+
#define PARAM(_id, _addr_word, _addr_byte, _len, _num, _den, _offset, \
_enc, _dec) \
[FG_SRAM_##_id] = { \
@@ -188,8 +194,10 @@ static struct fg_sram_param pmi8998_v1_sram_params[] = {
1000000, 122070, 0, fg_encode_current, NULL),
PARAM(CHG_TERM_CURR, CHG_TERM_CURR_WORD, CHG_TERM_CURR_OFFSET, 1,
100000, 390625, 0, fg_encode_current, NULL),
- PARAM(DELTA_SOC_THR, DELTA_SOC_THR_WORD, DELTA_SOC_THR_OFFSET, 1, 2048,
- 100, 0, fg_encode_default, NULL),
+ PARAM(DELTA_MSOC_THR, DELTA_MSOC_THR_WORD, DELTA_MSOC_THR_OFFSET, 1,
+ 2048, 100, 0, fg_encode_default, NULL),
+ PARAM(DELTA_BSOC_THR, DELTA_BSOC_THR_WORD, DELTA_BSOC_THR_OFFSET, 1,
+ 2048, 100, 0, fg_encode_default, NULL),
PARAM(RECHARGE_SOC_THR, RECHARGE_SOC_THR_WORD, RECHARGE_SOC_THR_OFFSET,
1, 256, 100, 0, fg_encode_default, NULL),
PARAM(ESR_TIMER_DISCHG_MAX, ESR_TIMER_DISCHG_MAX_WORD,
@@ -248,8 +256,10 @@ static struct fg_sram_param pmi8998_v2_sram_params[] = {
1000000, 122070, 0, fg_encode_current, NULL),
PARAM(CHG_TERM_CURR, CHG_TERM_CURR_v2_WORD, CHG_TERM_CURR_v2_OFFSET, 1,
100000, 390625, 0, fg_encode_current, NULL),
- PARAM(DELTA_SOC_THR, DELTA_SOC_THR_v2_WORD, DELTA_SOC_THR_v2_OFFSET, 1,
- 2048, 100, 0, fg_encode_default, NULL),
+ PARAM(DELTA_MSOC_THR, DELTA_MSOC_THR_v2_WORD, DELTA_MSOC_THR_v2_OFFSET,
+ 1, 2048, 100, 0, fg_encode_default, NULL),
+ PARAM(DELTA_BSOC_THR, DELTA_BSOC_THR_v2_WORD, DELTA_BSOC_THR_v2_OFFSET,
+ 1, 2048, 100, 0, fg_encode_default, NULL),
PARAM(RECHARGE_SOC_THR, RECHARGE_SOC_THR_v2_WORD,
RECHARGE_SOC_THR_v2_OFFSET, 1, 256, 100, 0, fg_encode_default,
NULL),
@@ -684,6 +694,20 @@ static int fg_get_msoc_raw(struct fg_chip *chip, int *val)
return 0;
}
+#define FULL_CAPACITY 100
+#define FULL_SOC_RAW 255
+static int fg_get_msoc(struct fg_chip *chip, int *msoc)
+{
+ int rc;
+
+ rc = fg_get_msoc_raw(chip, msoc);
+ if (rc < 0)
+ return rc;
+
+ *msoc = DIV_ROUND_CLOSEST(*msoc * FULL_CAPACITY, FULL_SOC_RAW);
+ return 0;
+}
+
static bool is_batt_empty(struct fg_chip *chip)
{
u8 status;
@@ -705,7 +729,7 @@ static bool is_batt_empty(struct fg_chip *chip)
return false;
}
- rc = fg_get_msoc_raw(chip, &msoc);
+ rc = fg_get_msoc(chip, &msoc);
if (!rc)
pr_warn("batt_soc_rt_sts: %x vbatt: %d uV msoc:%d\n", status,
vbatt_uv, msoc);
@@ -770,9 +794,8 @@ static bool is_debug_batt_id(struct fg_chip *chip)
return false;
}
-#define FULL_CAPACITY 100
-#define FULL_SOC_RAW 255
#define DEBUG_BATT_SOC 67
+#define BATT_MISS_SOC 50
#define EMPTY_SOC 0
static int fg_get_prop_capacity(struct fg_chip *chip, int *val)
{
@@ -783,6 +806,16 @@ static int fg_get_prop_capacity(struct fg_chip *chip, int *val)
return 0;
}
+ if (chip->fg_restarting) {
+ *val = chip->last_soc;
+ return 0;
+ }
+
+ if (chip->battery_missing) {
+ *val = BATT_MISS_SOC;
+ return 0;
+ }
+
if (is_batt_empty(chip)) {
*val = EMPTY_SOC;
return 0;
@@ -793,11 +826,14 @@ static int fg_get_prop_capacity(struct fg_chip *chip, int *val)
return 0;
}
- rc = fg_get_msoc_raw(chip, &msoc);
+ rc = fg_get_msoc(chip, &msoc);
if (rc < 0)
return rc;
- *val = DIV_ROUND_CLOSEST(msoc * FULL_CAPACITY, FULL_SOC_RAW);
+ if (chip->delta_soc > 0)
+ *val = chip->maint_soc;
+ else
+ *val = msoc;
return 0;
}
@@ -981,6 +1017,38 @@ static int fg_set_esr_timer(struct fg_chip *chip, int cycles, bool charging,
/* Other functions HERE */
+static void fg_notify_charger(struct fg_chip *chip)
+{
+ union power_supply_propval prop = {0, };
+ int rc;
+
+ if (!chip->batt_psy)
+ return;
+
+ if (!chip->profile_available)
+ return;
+
+ prop.intval = chip->bp.float_volt_uv;
+ rc = power_supply_set_property(chip->batt_psy,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX, &prop);
+ if (rc < 0) {
+ pr_err("Error in setting voltage_max property on batt_psy, rc=%d\n",
+ rc);
+ return;
+ }
+
+ prop.intval = chip->bp.fastchg_curr_ma * 1000;
+ rc = power_supply_set_property(chip->batt_psy,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, &prop);
+ if (rc < 0) {
+ pr_err("Error in setting constant_charge_current_max property on batt_psy, rc=%d\n",
+ rc);
+ return;
+ }
+
+ fg_dbg(chip, FG_STATUS, "Notified charger on float voltage and FCC\n");
+}
+
static int fg_awake_cb(struct votable *votable, void *data, int awake,
const char *client)
{
@@ -995,14 +1063,18 @@ static int fg_awake_cb(struct votable *votable, void *data, int awake,
return 0;
}
-static bool is_charger_available(struct fg_chip *chip)
+static bool batt_psy_initialized(struct fg_chip *chip)
{
- if (!chip->batt_psy)
- chip->batt_psy = power_supply_get_by_name("battery");
+ if (chip->batt_psy)
+ return true;
+ chip->batt_psy = power_supply_get_by_name("battery");
if (!chip->batt_psy)
return false;
+ /* batt_psy is initialized, set the fcc and fv */
+ fg_notify_charger(chip);
+
return true;
}
@@ -1350,6 +1422,36 @@ static int fg_adjust_ki_coeff_dischg(struct fg_chip *chip)
return 0;
}
+static int fg_set_recharge_voltage(struct fg_chip *chip, int voltage_mv)
+{
+ u8 buf;
+ int rc;
+
+ if (chip->dt.auto_recharge_soc)
+ return 0;
+
+ /* This configuration is available only for pmicobalt v2.0 and above */
+ if (chip->wa_flags & PMI8998_V1_REV_WA)
+ return 0;
+
+ fg_dbg(chip, FG_STATUS, "Setting recharge voltage to %dmV\n",
+ voltage_mv);
+ fg_encode(chip->sp, FG_SRAM_RECHARGE_VBATT_THR, voltage_mv, &buf);
+ rc = fg_sram_write(chip,
+ chip->sp[FG_SRAM_RECHARGE_VBATT_THR].addr_word,
+ chip->sp[FG_SRAM_RECHARGE_VBATT_THR].addr_byte,
+ &buf, chip->sp[FG_SRAM_RECHARGE_VBATT_THR].len,
+ FG_IMA_DEFAULT);
+ if (rc < 0) {
+ pr_err("Error in writing recharge_vbatt_thr, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+#define AUTO_RECHG_VOLT_LOW_LIMIT_MV 3700
static int fg_charge_full_update(struct fg_chip *chip)
{
union power_supply_propval prop = {0, };
@@ -1359,14 +1461,25 @@ static int fg_charge_full_update(struct fg_chip *chip)
if (!chip->dt.hold_soc_while_full)
return 0;
- if (!is_charger_available(chip))
+ if (!batt_psy_initialized(chip))
return 0;
+ mutex_lock(&chip->charge_full_lock);
+ if (!chip->charge_done && chip->bsoc_delta_irq_en) {
+ disable_irq_wake(fg_irqs[BSOC_DELTA_IRQ].irq);
+ disable_irq_nosync(fg_irqs[BSOC_DELTA_IRQ].irq);
+ chip->bsoc_delta_irq_en = false;
+ } else if (chip->charge_done && !chip->bsoc_delta_irq_en) {
+ enable_irq(fg_irqs[BSOC_DELTA_IRQ].irq);
+ enable_irq_wake(fg_irqs[BSOC_DELTA_IRQ].irq);
+ chip->bsoc_delta_irq_en = true;
+ }
+
rc = power_supply_get_property(chip->batt_psy, POWER_SUPPLY_PROP_HEALTH,
&prop);
if (rc < 0) {
pr_err("Error in getting battery health, rc=%d\n", rc);
- return rc;
+ goto out;
}
chip->health = prop.intval;
@@ -1376,33 +1489,77 @@ static int fg_charge_full_update(struct fg_chip *chip)
rc = fg_get_sram_prop(chip, FG_SRAM_BATT_SOC, &bsoc);
if (rc < 0) {
pr_err("Error in getting BATT_SOC, rc=%d\n", rc);
- return rc;
+ goto out;
}
/* We need 2 most significant bytes here */
bsoc = (u32)bsoc >> 16;
- rc = fg_get_prop_capacity(chip, &msoc);
+ rc = fg_get_msoc(chip, &msoc);
if (rc < 0) {
- pr_err("Error in getting capacity, rc=%d\n", rc);
- return rc;
+ pr_err("Error in getting msoc, rc=%d\n", rc);
+ goto out;
}
- fg_dbg(chip, FG_STATUS, "msoc: %d health: %d status: %d\n", msoc,
- chip->health, chip->charge_status);
- if (chip->charge_done) {
- if (msoc >= 99 && chip->health == POWER_SUPPLY_HEALTH_GOOD)
+ fg_dbg(chip, FG_STATUS, "msoc: %d bsoc: %x health: %d status: %d full: %d\n",
+ msoc, bsoc, chip->health, chip->charge_status,
+ chip->charge_full);
+ if (chip->charge_done && !chip->charge_full) {
+ if (msoc >= 99 && chip->health == POWER_SUPPLY_HEALTH_GOOD) {
+ fg_dbg(chip, FG_STATUS, "Setting charge_full to true\n");
chip->charge_full = true;
- else
+ /*
+ * Lower the recharge voltage so that VBAT_LT_RECHG
+ * signal will not be asserted soon.
+ */
+ rc = fg_set_recharge_voltage(chip,
+ AUTO_RECHG_VOLT_LOW_LIMIT_MV);
+ if (rc < 0) {
+ pr_err("Error in reducing recharge voltage, rc=%d\n",
+ rc);
+ goto out;
+ }
+ } else {
fg_dbg(chip, FG_STATUS, "Terminated charging @ SOC%d\n",
msoc);
- } else if ((bsoc >> 8) <= recharge_soc) {
- fg_dbg(chip, FG_STATUS, "bsoc: %d recharge_soc: %d\n",
- bsoc >> 8, recharge_soc);
+ }
+ } else if ((bsoc >> 8) <= recharge_soc && chip->charge_full) {
+ chip->delta_soc = FULL_CAPACITY - msoc;
+
+ /*
+ * We're spreading out the delta SOC over every 10% change
+ * in monotonic SOC. We cannot spread more than 9% in the
+ * range of 0-100 skipping the first 10%.
+ */
+ if (chip->delta_soc > 9) {
+ chip->delta_soc = 0;
+ chip->maint_soc = 0;
+ } else {
+ chip->maint_soc = FULL_CAPACITY;
+ chip->last_msoc = msoc;
+ }
+
chip->charge_full = false;
+
+ /*
+ * Raise the recharge voltage so that VBAT_LT_RECHG signal
+ * will be asserted soon as battery SOC had dropped below
+ * the recharge SOC threshold.
+ */
+ rc = fg_set_recharge_voltage(chip,
+ chip->dt.recharge_volt_thr_mv);
+ if (rc < 0) {
+ pr_err("Error in setting recharge voltage, rc=%d\n",
+ rc);
+ goto out;
+ }
+ fg_dbg(chip, FG_STATUS, "bsoc: %d recharge_soc: %d delta_soc: %d\n",
+ bsoc >> 8, recharge_soc, chip->delta_soc);
+ } else {
+ goto out;
}
if (!chip->charge_full)
- return 0;
+ goto out;
/*
* During JEITA conditions, charge_full can happen early. FULL_SOC
@@ -1413,18 +1570,20 @@ static int fg_charge_full_update(struct fg_chip *chip)
FG_IMA_ATOMIC);
if (rc < 0) {
pr_err("failed to write full_soc rc=%d\n", rc);
- return rc;
+ goto out;
}
rc = fg_sram_write(chip, MONOTONIC_SOC_WORD, MONOTONIC_SOC_OFFSET,
full_soc, 2, FG_IMA_ATOMIC);
if (rc < 0) {
pr_err("failed to write monotonic_soc rc=%d\n", rc);
- return rc;
+ goto out;
}
fg_dbg(chip, FG_STATUS, "Set charge_full to true @ soc %d\n", msoc);
- return 0;
+out:
+ mutex_unlock(&chip->charge_full_lock);
+ return rc;
}
#define RCONN_CONFIG_BIT BIT(0)
@@ -1503,13 +1662,16 @@ static int fg_rconn_config(struct fg_chip *chip)
static int fg_set_recharge_soc(struct fg_chip *chip, int recharge_soc)
{
- u8 buf[4];
+ u8 buf;
int rc;
- fg_encode(chip->sp, FG_SRAM_RECHARGE_SOC_THR, recharge_soc, buf);
+ if (!chip->dt.auto_recharge_soc)
+ return 0;
+
+ fg_encode(chip->sp, FG_SRAM_RECHARGE_SOC_THR, recharge_soc, &buf);
rc = fg_sram_write(chip,
chip->sp[FG_SRAM_RECHARGE_SOC_THR].addr_word,
- chip->sp[FG_SRAM_RECHARGE_SOC_THR].addr_byte, buf,
+ chip->sp[FG_SRAM_RECHARGE_SOC_THR].addr_byte, &buf,
chip->sp[FG_SRAM_RECHARGE_SOC_THR].len, FG_IMA_DEFAULT);
if (rc < 0) {
pr_err("Error in writing recharge_soc_thr, rc=%d\n", rc);
@@ -1523,6 +1685,9 @@ static int fg_adjust_recharge_soc(struct fg_chip *chip)
{
int rc, msoc, recharge_soc, new_recharge_soc = 0;
+ if (!chip->dt.auto_recharge_soc)
+ return 0;
+
recharge_soc = chip->dt.recharge_soc_thr;
/*
* If the input is present and charging had been terminated, adjust
@@ -1532,13 +1697,12 @@ static int fg_adjust_recharge_soc(struct fg_chip *chip)
if (is_input_present(chip) && !chip->recharge_soc_adjusted
&& chip->charge_done) {
/* Get raw monotonic SOC for calculation */
- rc = fg_get_msoc_raw(chip, &msoc);
+ rc = fg_get_msoc(chip, &msoc);
if (rc < 0) {
pr_err("Error in getting msoc, rc=%d\n", rc);
return rc;
}
- msoc = DIV_ROUND_CLOSEST(msoc * FULL_CAPACITY, FULL_SOC_RAW);
/* Adjust the recharge_soc threshold */
new_recharge_soc = msoc - (FULL_CAPACITY - recharge_soc);
} else if (chip->recharge_soc_adjusted && (!is_input_present(chip)
@@ -1723,7 +1887,7 @@ static void status_change_work(struct work_struct *work)
union power_supply_propval prop = {0, };
int rc;
- if (!is_charger_available(chip)) {
+ if (!batt_psy_initialized(chip)) {
fg_dbg(chip, FG_STATUS, "Charger not available?!\n");
goto out;
}
@@ -2026,7 +2190,6 @@ wait:
goto wait;
} else if (rc <= 0) {
pr_err("wait for soc_ready timed out rc=%d\n", rc);
- goto out;
}
rc = fg_masked_write(chip, BATT_SOC_RESTART(chip), RESTART_GO_BIT, 0);
@@ -2040,37 +2203,6 @@ out:
return rc;
}
-static void fg_notify_charger(struct fg_chip *chip)
-{
- union power_supply_propval prop = {0, };
- int rc;
-
- if (!is_charger_available(chip)) {
- pr_warn("Charger not available yet?\n");
- return;
- }
-
- prop.intval = chip->bp.float_volt_uv;
- rc = power_supply_set_property(chip->batt_psy,
- POWER_SUPPLY_PROP_VOLTAGE_MAX, &prop);
- if (rc < 0) {
- pr_err("Error in setting voltage_max property on batt_psy, rc=%d\n",
- rc);
- return;
- }
-
- prop.intval = chip->bp.fastchg_curr_ma * 1000;
- rc = power_supply_set_property(chip->batt_psy,
- POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, &prop);
- if (rc < 0) {
- pr_err("Error in setting constant_charge_current_max property on batt_psy, rc=%d\n",
- rc);
- return;
- }
-
- fg_dbg(chip, FG_STATUS, "Notified charger on float voltage and FCC\n");
-}
-
static void profile_load_work(struct work_struct *work)
{
struct fg_chip *chip = container_of(work,
@@ -2136,6 +2268,7 @@ done:
rc);
}
+ batt_psy_initialized(chip);
fg_notify_charger(chip);
chip->profile_loaded = true;
chip->soc_reporting_ready = true;
@@ -2279,7 +2412,6 @@ reschedule:
msecs_to_jiffies(BATT_AVG_POLL_PERIOD_MS));
}
-#define DECI_TAU_SCALE 13
#define HOURS_TO_SECONDS 3600
#define OCV_SLOPE_UV 10869
#define MILLI_UNIT 1000
@@ -2288,7 +2420,7 @@ static int fg_get_time_to_full(struct fg_chip *chip, int *val)
{
int rc, ibatt_avg, vbatt_avg, rbatt, msoc, ocv_cc2cv, full_soc,
act_cap_uah;
- s32 i_cc2cv, soc_cc2cv, ln_val;
+ s32 i_cc2cv, soc_cc2cv, ln_val, centi_tau_scale;
s64 t_predicted_cc = 0, t_predicted_cv = 0;
if (chip->bp.float_volt_uv <= 0) {
@@ -2296,7 +2428,7 @@ static int fg_get_time_to_full(struct fg_chip *chip, int *val)
return -ENODATA;
}
- if (!is_charger_available(chip)) {
+ if (!batt_psy_initialized(chip)) {
fg_dbg(chip, FG_TTF, "charger is not available\n");
return -ENODATA;
}
@@ -2411,15 +2543,20 @@ skip_cc_estimate:
/* CV estimate starts here */
if (chip->charge_type >= POWER_SUPPLY_CHARGE_TYPE_TAPER)
- ln_val = ibatt_avg / abs(chip->dt.sys_term_curr_ma);
+ ln_val = ibatt_avg / (abs(chip->dt.sys_term_curr_ma) + 200);
else
- ln_val = i_cc2cv / abs(chip->dt.sys_term_curr_ma);
+ ln_val = i_cc2cv / (abs(chip->dt.sys_term_curr_ma) + 200);
+
+ if (msoc < 95)
+ centi_tau_scale = 100;
+ else
+ centi_tau_scale = 20 * (100 - msoc);
fg_dbg(chip, FG_TTF, "ln_in=%d\n", ln_val);
rc = fg_lerp(fg_ln_table, ARRAY_SIZE(fg_ln_table), ln_val, &ln_val);
fg_dbg(chip, FG_TTF, "ln_out=%d\n", ln_val);
t_predicted_cv = div_s64((s64)act_cap_uah * rbatt, MICRO_UNIT);
- t_predicted_cv = div_s64(t_predicted_cv * DECI_TAU_SCALE, 10);
+ t_predicted_cv = div_s64(t_predicted_cv * centi_tau_scale, 100);
t_predicted_cv = div_s64(t_predicted_cv * ln_val, MILLI_UNIT);
t_predicted_cv = div_s64(t_predicted_cv * HOURS_TO_SECONDS, MICRO_UNIT);
fg_dbg(chip, FG_TTF, "t_predicted_cv=%lld\n", t_predicted_cv);
@@ -2473,6 +2610,48 @@ static int fg_get_time_to_empty(struct fg_chip *chip, int *val)
return 0;
}
+static int fg_update_maint_soc(struct fg_chip *chip)
+{
+ int rc = 0, msoc;
+
+ mutex_lock(&chip->charge_full_lock);
+ if (chip->delta_soc <= 0)
+ goto out;
+
+ rc = fg_get_msoc(chip, &msoc);
+ if (rc < 0) {
+ pr_err("Error in getting msoc, rc=%d\n", rc);
+ goto out;
+ }
+
+ if (msoc > chip->maint_soc) {
+ /*
+ * When the monotonic SOC goes above maintenance SOC, we should
+ * stop showing the maintenance SOC.
+ */
+ chip->delta_soc = 0;
+ chip->maint_soc = 0;
+ } else if (msoc <= chip->last_msoc) {
+ /* MSOC is decreasing. Decrease maintenance SOC as well */
+ chip->maint_soc -= 1;
+ if (!(msoc % 10)) {
+ /*
+ * Reduce the maintenance SOC additionally by 1 whenever
+ * it crosses a SOC multiple of 10.
+ */
+ chip->maint_soc -= 1;
+ chip->delta_soc -= 1;
+ }
+ }
+
+ fg_dbg(chip, FG_IRQ, "msoc: %d last_msoc: %d maint_soc: %d delta_soc: %d\n",
+ msoc, chip->last_msoc, chip->maint_soc, chip->delta_soc);
+ chip->last_msoc = msoc;
+out:
+ mutex_unlock(&chip->charge_full_lock);
+ return rc;
+}
+
/* PSY CALLBACKS STAY HERE */
static int fg_psy_get_property(struct power_supply *psy,
@@ -2484,13 +2663,13 @@ static int fg_psy_get_property(struct power_supply *psy,
switch (psp) {
case POWER_SUPPLY_PROP_CAPACITY:
- if (chip->fg_restarting)
- pval->intval = chip->last_soc;
- else
- rc = fg_get_prop_capacity(chip, &pval->intval);
+ rc = fg_get_prop_capacity(chip, &pval->intval);
break;
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
- rc = fg_get_battery_voltage(chip, &pval->intval);
+ if (chip->battery_missing)
+ pval->intval = 3700000;
+ else
+ rc = fg_get_battery_voltage(chip, &pval->intval);
break;
case POWER_SUPPLY_PROP_CURRENT_NOW:
rc = fg_get_battery_current(chip, &pval->intval);
@@ -2743,15 +2922,27 @@ static int fg_hw_init(struct fg_chip *chip)
}
if (chip->dt.delta_soc_thr > 0 && chip->dt.delta_soc_thr < 100) {
- fg_encode(chip->sp, FG_SRAM_DELTA_SOC_THR,
+ fg_encode(chip->sp, FG_SRAM_DELTA_MSOC_THR,
chip->dt.delta_soc_thr, buf);
rc = fg_sram_write(chip,
- chip->sp[FG_SRAM_DELTA_SOC_THR].addr_word,
- chip->sp[FG_SRAM_DELTA_SOC_THR].addr_byte,
- buf, chip->sp[FG_SRAM_DELTA_SOC_THR].len,
+ chip->sp[FG_SRAM_DELTA_MSOC_THR].addr_word,
+ chip->sp[FG_SRAM_DELTA_MSOC_THR].addr_byte,
+ buf, chip->sp[FG_SRAM_DELTA_MSOC_THR].len,
FG_IMA_DEFAULT);
if (rc < 0) {
- pr_err("Error in writing delta_soc_thr, rc=%d\n", rc);
+ pr_err("Error in writing delta_msoc_thr, rc=%d\n", rc);
+ return rc;
+ }
+
+ fg_encode(chip->sp, FG_SRAM_DELTA_BSOC_THR,
+ chip->dt.delta_soc_thr, buf);
+ rc = fg_sram_write(chip,
+ chip->sp[FG_SRAM_DELTA_BSOC_THR].addr_word,
+ chip->sp[FG_SRAM_DELTA_BSOC_THR].addr_byte,
+ buf, chip->sp[FG_SRAM_DELTA_BSOC_THR].len,
+ FG_IMA_DEFAULT);
+ if (rc < 0) {
+ pr_err("Error in writing delta_bsoc_thr, rc=%d\n", rc);
return rc;
}
}
@@ -2764,18 +2955,11 @@ static int fg_hw_init(struct fg_chip *chip)
}
}
- /* This configuration is available only for pmicobalt v2.0 and above */
- if (!(chip->wa_flags & PMI8998_V1_REV_WA) &&
- chip->dt.recharge_volt_thr_mv > 0) {
- fg_encode(chip->sp, FG_SRAM_RECHARGE_VBATT_THR,
- chip->dt.recharge_volt_thr_mv, buf);
- rc = fg_sram_write(chip,
- chip->sp[FG_SRAM_RECHARGE_VBATT_THR].addr_word,
- chip->sp[FG_SRAM_RECHARGE_VBATT_THR].addr_byte,
- buf, chip->sp[FG_SRAM_RECHARGE_VBATT_THR].len,
- FG_IMA_DEFAULT);
+ if (chip->dt.recharge_volt_thr_mv > 0) {
+ rc = fg_set_recharge_voltage(chip,
+ chip->dt.recharge_volt_thr_mv);
if (rc < 0) {
- pr_err("Error in writing recharge_vbatt_thr, rc=%d\n",
+ pr_err("Error in setting recharge_voltage, rc=%d\n",
rc);
return rc;
}
@@ -3004,7 +3188,7 @@ static irqreturn_t fg_delta_batt_temp_irq_handler(int irq, void *data)
if (rc < 0)
pr_err("Error in configuring ESR filter rc:%d\n", rc);
- if (!is_charger_available(chip)) {
+ if (!batt_psy_initialized(chip)) {
chip->last_batt_temp = batt_temp;
return IRQ_HANDLED;
}
@@ -3042,7 +3226,20 @@ static irqreturn_t fg_soc_update_irq_handler(int irq, void *data)
return IRQ_HANDLED;
}
-static irqreturn_t fg_delta_soc_irq_handler(int irq, void *data)
+static irqreturn_t fg_delta_bsoc_irq_handler(int irq, void *data)
+{
+ struct fg_chip *chip = data;
+ int rc;
+
+ fg_dbg(chip, FG_IRQ, "irq %d triggered\n", irq);
+ rc = fg_charge_full_update(chip);
+ if (rc < 0)
+ pr_err("Error in charge_full_update, rc=%d\n", rc);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t fg_delta_msoc_irq_handler(int irq, void *data)
{
struct fg_chip *chip = data;
int rc;
@@ -3062,7 +3259,11 @@ static irqreturn_t fg_delta_soc_irq_handler(int irq, void *data)
if (rc < 0)
pr_err("Error in adjusting ki_coeff_dischg, rc=%d\n", rc);
- if (is_charger_available(chip))
+ rc = fg_update_maint_soc(chip);
+ if (rc < 0)
+ pr_err("Error in updating maint_soc, rc=%d\n", rc);
+
+ if (batt_psy_initialized(chip))
power_supply_changed(chip->batt_psy);
return IRQ_HANDLED;
@@ -3073,7 +3274,7 @@ static irqreturn_t fg_empty_soc_irq_handler(int irq, void *data)
struct fg_chip *chip = data;
fg_dbg(chip, FG_IRQ, "irq %d triggered\n", irq);
- if (is_charger_available(chip))
+ if (batt_psy_initialized(chip))
power_supply_changed(chip->batt_psy);
return IRQ_HANDLED;
@@ -3116,12 +3317,13 @@ static struct fg_irq_info fg_irqs[FG_IRQ_MAX] = {
},
[MSOC_DELTA_IRQ] = {
.name = "msoc-delta",
- .handler = fg_delta_soc_irq_handler,
+ .handler = fg_delta_msoc_irq_handler,
.wakeable = true,
},
[BSOC_DELTA_IRQ] = {
.name = "bsoc-delta",
- .handler = fg_dummy_irq_handler,
+ .handler = fg_delta_bsoc_irq_handler,
+ .wakeable = true,
},
[SOC_READY_IRQ] = {
.name = "soc-ready",
@@ -3479,6 +3681,9 @@ static int fg_parse_dt(struct fg_chip *chip)
else
chip->dt.recharge_volt_thr_mv = temp;
+ chip->dt.auto_recharge_soc = of_property_read_bool(node,
+ "qcom,fg-auto-recharge-soc");
+
rc = of_property_read_u32(node, "qcom,fg-rsense-sel", &temp);
if (rc < 0)
chip->dt.rsense_sel = SRC_SEL_BATFET_SMB;
@@ -3681,6 +3886,7 @@ static int fg_gen3_probe(struct platform_device *pdev)
mutex_init(&chip->cyc_ctr.lock);
mutex_init(&chip->cl.lock);
mutex_init(&chip->batt_avg_lock);
+ mutex_init(&chip->charge_full_lock);
init_completion(&chip->soc_update);
init_completion(&chip->soc_ready);
INIT_DELAYED_WORK(&chip->profile_load_work, profile_load_work);
@@ -3736,6 +3942,13 @@ static int fg_gen3_probe(struct platform_device *pdev)
if (fg_irqs[SOC_UPDATE_IRQ].irq)
disable_irq_nosync(fg_irqs[SOC_UPDATE_IRQ].irq);
+ /* Keep BSOC_DELTA_IRQ irq disabled until we require it */
+ if (fg_irqs[BSOC_DELTA_IRQ].irq) {
+ disable_irq_wake(fg_irqs[BSOC_DELTA_IRQ].irq);
+ disable_irq_nosync(fg_irqs[BSOC_DELTA_IRQ].irq);
+ chip->bsoc_delta_irq_en = false;
+ }
+
rc = fg_debugfs_create(chip);
if (rc < 0) {
dev_err(chip->dev, "Error in creating debugfs entries, rc:%d\n",
diff --git a/drivers/power/supply/qcom/qpnp-qnovo.c b/drivers/power/supply/qcom/qpnp-qnovo.c
index 7bc90fbf2929..c4cc541a0a3e 100644
--- a/drivers/power/supply/qcom/qpnp-qnovo.c
+++ b/drivers/power/supply/qcom/qpnp-qnovo.c
@@ -630,6 +630,7 @@ static ssize_t enable_show(struct class *c, struct class_attribute *attr,
int val;
val = get_client_vote(chip->disable_votable, USER_VOTER);
+ val = !val;
return snprintf(ubuf, PAGE_SIZE, "%d\n", val);
}
diff --git a/drivers/power/supply/qcom/qpnp-smb2.c b/drivers/power/supply/qcom/qpnp-smb2.c
index 8dc93f42b059..dea932ae37ad 100644
--- a/drivers/power/supply/qcom/qpnp-smb2.c
+++ b/drivers/power/supply/qcom/qpnp-smb2.c
@@ -479,7 +479,7 @@ static int smb2_usb_get_prop(struct power_supply *psy,
rc = smblib_get_pe_start(chg, val);
break;
default:
- pr_err("get prop %d is not supported\n", psp);
+ pr_err("get prop %d is not supported in usb\n", psp);
rc = -EINVAL;
break;
}
@@ -574,6 +574,118 @@ static int smb2_init_usb_psy(struct smb2 *chip)
return 0;
}
+/*****************************
+ * USB MAIN PSY REGISTRATION *
+ *****************************/
+
+static enum power_supply_property smb2_usb_main_props[] = {
+ POWER_SUPPLY_PROP_VOLTAGE_MAX,
+ POWER_SUPPLY_PROP_ICL_REDUCTION,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+ POWER_SUPPLY_PROP_TYPE,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED,
+ POWER_SUPPLY_PROP_FCC_DELTA,
+ /*
+ * TODO move the TEMP and TEMP_MAX properties here,
+ * and update the thermal balancer to look here
+ */
+};
+
+static int smb2_usb_main_get_prop(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct smb2 *chip = power_supply_get_drvdata(psy);
+ struct smb_charger *chg = &chip->chg;
+ int rc = 0;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+ rc = smblib_get_charge_param(chg, &chg->param.fv, &val->intval);
+ break;
+ case POWER_SUPPLY_PROP_ICL_REDUCTION:
+ val->intval = chg->icl_reduction_ua;
+ break;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+ rc = smblib_get_charge_param(chg, &chg->param.fcc,
+ &val->intval);
+ break;
+ case POWER_SUPPLY_PROP_TYPE:
+ val->intval = POWER_SUPPLY_TYPE_MAIN;
+ break;
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED:
+ rc = smblib_get_prop_input_current_settled(chg, val);
+ break;
+ case POWER_SUPPLY_PROP_FCC_DELTA:
+ rc = smblib_get_prop_fcc_delta(chg, val);
+ break;
+ default:
+ pr_err("get prop %d is not supported in usb-main\n", psp);
+ rc = -EINVAL;
+ break;
+ }
+ if (rc < 0) {
+ pr_debug("Couldn't get prop %d rc = %d\n", psp, rc);
+ return -ENODATA;
+ }
+ return 0;
+}
+
+static int smb2_usb_main_set_prop(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct smb2 *chip = power_supply_get_drvdata(psy);
+ struct smb_charger *chg = &chip->chg;
+ int rc = 0;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX:
+ rc = smblib_set_charge_param(chg, &chg->param.fv, val->intval);
+ break;
+ case POWER_SUPPLY_PROP_ICL_REDUCTION:
+ chg->icl_reduction_ua = val->intval;
+ rc = rerun_election(chg->usb_icl_votable);
+ break;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+ rc = smblib_set_charge_param(chg, &chg->param.fcc, val->intval);
+ break;
+ default:
+ pr_err("set prop %d is not supported\n", psp);
+ rc = -EINVAL;
+ break;
+ }
+
+ return rc;
+}
+
+static const struct power_supply_desc usb_main_psy_desc = {
+ .name = "main",
+ .type = POWER_SUPPLY_TYPE_MAIN,
+ .properties = smb2_usb_main_props,
+ .num_properties = ARRAY_SIZE(smb2_usb_main_props),
+ .get_property = smb2_usb_main_get_prop,
+ .set_property = smb2_usb_main_set_prop,
+};
+
+static int smb2_init_usb_main_psy(struct smb2 *chip)
+{
+ struct power_supply_config usb_main_cfg = {};
+ struct smb_charger *chg = &chip->chg;
+
+ usb_main_cfg.drv_data = chip;
+ usb_main_cfg.of_node = chg->dev->of_node;
+ chg->usb_main_psy = devm_power_supply_register(chg->dev,
+ &usb_main_psy_desc,
+ &usb_main_cfg);
+ if (IS_ERR(chg->usb_main_psy)) {
+ pr_err("Couldn't register USB main power supply\n");
+ return PTR_ERR(chg->usb_main_psy);
+ }
+
+ return 0;
+}
+
/*************************
* DC PSY REGISTRATION *
*************************/
@@ -701,7 +813,6 @@ static enum power_supply_property smb2_batt_props[] = {
POWER_SUPPLY_PROP_STEP_CHARGING_STEP,
POWER_SUPPLY_PROP_CHARGE_DONE,
POWER_SUPPLY_PROP_PARALLEL_DISABLE,
- POWER_SUPPLY_PROP_PARALLEL_PERCENT,
POWER_SUPPLY_PROP_SET_SHIP_MODE,
};
@@ -775,9 +886,6 @@ static int smb2_batt_get_prop(struct power_supply *psy,
val->intval = get_client_vote(chg->pl_disable_votable,
USER_VOTER);
break;
- case POWER_SUPPLY_PROP_PARALLEL_PERCENT:
- val->intval = chg->pl.slave_pct;
- break;
case POWER_SUPPLY_PROP_SET_SHIP_MODE:
/* Not in ship mode as long as device is active */
val->intval = 0;
@@ -815,12 +923,6 @@ static int smb2_batt_set_prop(struct power_supply *psy,
case POWER_SUPPLY_PROP_PARALLEL_DISABLE:
vote(chg->pl_disable_votable, USER_VOTER, (bool)val->intval, 0);
break;
- case POWER_SUPPLY_PROP_PARALLEL_PERCENT:
- if (val->intval < 0 || val->intval > 100)
- return -EINVAL;
- chg->pl.slave_pct = val->intval;
- rerun_election(chg->fcc_votable);
- break;
case POWER_SUPPLY_PROP_VOLTAGE_MAX:
vote(chg->fv_votable, DEFAULT_VOTER, true, val->intval);
break;
@@ -848,7 +950,6 @@ static int smb2_batt_prop_is_writeable(struct power_supply *psy,
case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL:
case POWER_SUPPLY_PROP_CAPACITY:
case POWER_SUPPLY_PROP_PARALLEL_DISABLE:
- case POWER_SUPPLY_PROP_PARALLEL_PERCENT:
return 1;
default:
break;
@@ -1226,12 +1327,6 @@ static int smb2_init_hw(struct smb2 *chip)
}
/* votes must be cast before configuring software control */
- vote(chg->pl_disable_votable,
- PL_INDIRECT_VOTER, true, 0);
- vote(chg->pl_disable_votable,
- CHG_STATE_VOTER, true, 0);
- vote(chg->pl_disable_votable,
- PARALLEL_PSY_VOTER, true, 0);
vote(chg->usb_suspend_votable,
DEFAULT_VOTER, chip->dt.no_battery, 0);
vote(chg->dc_suspend_votable,
@@ -1821,7 +1916,6 @@ static int smb2_probe(struct platform_device *pdev)
struct smb2 *chip;
struct smb_charger *chg;
int rc = 0;
- u8 stat;
union power_supply_propval val;
int usb_present, batt_present, batt_health, batt_charge_type;
@@ -1912,20 +2006,12 @@ static int smb2_probe(struct platform_device *pdev)
goto cleanup;
}
- rc = smblib_read(chg, SHDN_CMD_REG, &stat);
+ rc = smb2_init_usb_main_psy(chip);
if (rc < 0) {
- pr_err("Couldn't read MISC_SHDN_CMD_REG rc=%d\n", rc);
- return rc;
- }
-
- if (stat) {
- pr_err("bad charger part; faking USB insertion\n");
- chip->bad_part = true;
- power_supply_changed(chg->usb_psy);
- return 0;
+ pr_err("Couldn't initialize usb psy rc=%d\n", rc);
+ goto cleanup;
}
- chg->pl.slave_pct = 50;
rc = smb2_init_batt_psy(chip);
if (rc < 0) {
pr_err("Couldn't initialize batt psy rc=%d\n", rc);
diff --git a/drivers/power/supply/qcom/smb-lib.c b/drivers/power/supply/qcom/smb-lib.c
index 2f612884fe0e..3e939c5ffba1 100644
--- a/drivers/power/supply/qcom/smb-lib.c
+++ b/drivers/power/supply/qcom/smb-lib.c
@@ -39,7 +39,6 @@
static bool is_secure(struct smb_charger *chg, int addr)
{
-
if (addr == SHIP_MODE_REG)
return true;
/* assume everything above 0xA0 is secure */
@@ -152,36 +151,6 @@ static int smblib_get_jeita_cc_delta(struct smb_charger *chg, int *cc_delta_ua)
return 0;
}
-static void smblib_split_fcc(struct smb_charger *chg, int total_ua,
- int *master_ua, int *slave_ua)
-{
- int rc, jeita_cc_delta_ua, step_cc_delta_ua, effective_total_ua,
- slave_limited_ua, hw_cc_delta_ua = 0;
-
- rc = smblib_get_step_cc_delta(chg, &step_cc_delta_ua);
- if (rc < 0) {
- smblib_err(chg, "Couldn't get step cc delta rc=%d\n", rc);
- step_cc_delta_ua = 0;
- } else {
- hw_cc_delta_ua = step_cc_delta_ua;
- }
-
- rc = smblib_get_jeita_cc_delta(chg, &jeita_cc_delta_ua);
- if (rc < 0) {
- smblib_err(chg, "Couldn't get jeita cc delta rc=%d\n", rc);
- jeita_cc_delta_ua = 0;
- } else if (jeita_cc_delta_ua < 0) {
- /* HW will take the min between JEITA and step charge */
- hw_cc_delta_ua = min(hw_cc_delta_ua, jeita_cc_delta_ua);
- }
-
- effective_total_ua = max(0, total_ua + hw_cc_delta_ua);
- slave_limited_ua = min(effective_total_ua, chg->input_limited_fcc_ua);
- *slave_ua = (slave_limited_ua * chg->pl.slave_pct) / 100;
- *slave_ua = (*slave_ua * chg->pl.taper_pct) / 100;
- *master_ua = max(0, total_ua - *slave_ua);
-}
-
/********************
* REGISTER GETTERS *
********************/
@@ -536,10 +505,8 @@ static int smblib_notifier_call(struct notifier_block *nb,
schedule_work(&chg->bms_update_work);
}
- if (!chg->pl.psy && !strcmp(psy->desc->name, "parallel")) {
+ if (!chg->pl.psy && !strcmp(psy->desc->name, "parallel"))
chg->pl.psy = psy;
- schedule_work(&chg->pl_detect_work);
- }
return NOTIFY_OK;
}
@@ -600,8 +567,6 @@ static void smblib_uusb_removal(struct smb_charger *chg)
/* reset both usbin current and voltage votes */
vote(chg->pl_enable_votable_indirect, USBIN_I_VOTER, false, 0);
vote(chg->pl_enable_votable_indirect, USBIN_V_VOTER, false, 0);
- /* reset taper_end voter here */
- vote(chg->pl_disable_votable, TAPER_END_VOTER, false, 0);
cancel_delayed_work_sync(&chg->hvdcp_detect_work);
@@ -729,84 +694,6 @@ static int smblib_fcc_max_vote_callback(struct votable *votable, void *data,
return vote(chg->fcc_votable, FCC_MAX_RESULT_VOTER, true, fcc_ua);
}
-static int smblib_fcc_vote_callback(struct votable *votable, void *data,
- int total_fcc_ua, const char *client)
-{
- struct smb_charger *chg = data;
- union power_supply_propval pval = {0, };
- int rc, master_fcc_ua = total_fcc_ua, slave_fcc_ua = 0;
-
- if (total_fcc_ua < 0)
- return 0;
-
- if (chg->mode == PARALLEL_MASTER
- && !get_effective_result_locked(chg->pl_disable_votable)) {
- smblib_split_fcc(chg, total_fcc_ua, &master_fcc_ua,
- &slave_fcc_ua);
-
- /*
- * parallel charger is not disabled, implying that
- * chg->pl.psy exists
- */
- pval.intval = slave_fcc_ua;
- rc = power_supply_set_property(chg->pl.psy,
- POWER_SUPPLY_PROP_CURRENT_MAX, &pval);
- if (rc < 0) {
- smblib_err(chg, "Could not set parallel fcc, rc=%d\n",
- rc);
- return rc;
- }
-
- chg->pl.slave_fcc_ua = slave_fcc_ua;
- }
-
- rc = smblib_set_charge_param(chg, &chg->param.fcc, master_fcc_ua);
- if (rc < 0) {
- smblib_err(chg, "Couldn't set master fcc rc=%d\n", rc);
- return rc;
- }
-
- smblib_dbg(chg, PR_PARALLEL, "master_fcc=%d slave_fcc=%d distribution=(%d/%d)\n",
- master_fcc_ua, slave_fcc_ua,
- (master_fcc_ua * 100) / total_fcc_ua,
- (slave_fcc_ua * 100) / total_fcc_ua);
-
- return 0;
-}
-
-#define PARALLEL_FLOAT_VOLTAGE_DELTA_UV 50000
-static int smblib_fv_vote_callback(struct votable *votable, void *data,
- int fv_uv, const char *client)
-{
- struct smb_charger *chg = data;
- union power_supply_propval pval = {0, };
- int rc = 0;
-
- if (fv_uv < 0) {
- smblib_dbg(chg, PR_MISC, "No Voter\n");
- return 0;
- }
-
- rc = smblib_set_charge_param(chg, &chg->param.fv, fv_uv);
- if (rc < 0) {
- smblib_err(chg, "Couldn't set floating voltage rc=%d\n", rc);
- return rc;
- }
-
- if (chg->mode == PARALLEL_MASTER && chg->pl.psy) {
- pval.intval = fv_uv + PARALLEL_FLOAT_VOLTAGE_DELTA_UV;
- rc = power_supply_set_property(chg->pl.psy,
- POWER_SUPPLY_PROP_VOLTAGE_MAX, &pval);
- if (rc < 0) {
- smblib_err(chg,
- "Couldn't set float on parallel rc=%d\n", rc);
- return rc;
- }
- }
-
- return 0;
-}
-
#define USBIN_25MA 25000
#define USBIN_100MA 100000
#define USBIN_150MA 150000
@@ -947,35 +834,6 @@ static int smblib_awake_vote_callback(struct votable *votable, void *data,
return 0;
}
-static int smblib_pl_disable_vote_callback(struct votable *votable, void *data,
- int pl_disable, const char *client)
-{
- struct smb_charger *chg = data;
- union power_supply_propval pval = {0, };
- int rc;
-
- if (chg->mode != PARALLEL_MASTER || !chg->pl.psy)
- return 0;
-
- chg->pl.taper_pct = 100;
- rerun_election(chg->fv_votable);
- rerun_election(chg->fcc_votable);
-
- pval.intval = pl_disable;
- rc = power_supply_set_property(chg->pl.psy,
- POWER_SUPPLY_PROP_INPUT_SUSPEND, &pval);
- if (rc < 0) {
- smblib_err(chg,
- "Couldn't change slave suspend state rc=%d\n", rc);
- return rc;
- }
-
- smblib_dbg(chg, PR_PARALLEL, "parallel charging %s\n",
- pl_disable ? "disabled" : "enabled");
-
- return 0;
-}
-
static int smblib_chg_disable_vote_callback(struct votable *votable, void *data,
int chg_disable, const char *client)
{
@@ -2359,6 +2217,39 @@ int smblib_set_prop_ship_mode(struct smb_charger *chg,
return rc;
}
+/***********************
+* USB MAIN PSY GETTERS *
+*************************/
+int smblib_get_prop_fcc_delta(struct smb_charger *chg,
+ union power_supply_propval *val)
+{
+ int rc, jeita_cc_delta_ua, step_cc_delta_ua, hw_cc_delta_ua = 0;
+
+ rc = smblib_get_step_cc_delta(chg, &step_cc_delta_ua);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't get step cc delta rc=%d\n", rc);
+ step_cc_delta_ua = 0;
+ } else {
+ hw_cc_delta_ua = step_cc_delta_ua;
+ }
+
+ rc = smblib_get_jeita_cc_delta(chg, &jeita_cc_delta_ua);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't get jeita cc delta rc=%d\n", rc);
+ jeita_cc_delta_ua = 0;
+ } else if (jeita_cc_delta_ua < 0) {
+ /* HW will take the min between JEITA and step charge */
+ hw_cc_delta_ua = min(hw_cc_delta_ua, jeita_cc_delta_ua);
+ }
+
+ val->intval = hw_cc_delta_ua;
+ return 0;
+}
+
+/***********************
+* USB MAIN PSY SETTERS *
+*************************/
+
int smblib_reg_block_update(struct smb_charger *chg,
struct reg_info *entry)
{
@@ -2623,35 +2514,6 @@ unlock:
return IRQ_HANDLED;
}
-static void smblib_pl_handle_chg_state_change(struct smb_charger *chg, u8 stat)
-{
- bool pl_enabled;
-
- if (chg->mode != PARALLEL_MASTER)
- return;
-
- pl_enabled = !get_effective_result_locked(chg->pl_disable_votable);
- switch (stat) {
- case FAST_CHARGE:
- case FULLON_CHARGE:
- vote(chg->pl_disable_votable, CHG_STATE_VOTER, false, 0);
- break;
- case TAPER_CHARGE:
- if (pl_enabled) {
- cancel_delayed_work_sync(&chg->pl_taper_work);
- schedule_delayed_work(&chg->pl_taper_work, 0);
- }
- break;
- case TERMINATE_CHARGE:
- case INHIBIT_CHARGE:
- case DISABLE_CHARGE:
- vote(chg->pl_disable_votable, TAPER_END_VOTER, false, 0);
- break;
- default:
- break;
- }
-}
-
irqreturn_t smblib_handle_chg_state_change(int irq, void *data)
{
struct smb_irq_data *irq_data = data;
@@ -2669,7 +2531,6 @@ irqreturn_t smblib_handle_chg_state_change(int irq, void *data)
}
stat = stat & BATTERY_CHARGER_STATUS_MASK;
- smblib_pl_handle_chg_state_change(chg, stat);
power_supply_changed(chg->batt_psy);
return IRQ_HANDLED;
}
@@ -2819,16 +2680,15 @@ irqreturn_t smblib_handle_usb_plugin(int irq, void *data)
}
#define USB_WEAK_INPUT_UA 1400000
-#define EFFICIENCY_PCT 80
irqreturn_t smblib_handle_icl_change(int irq, void *data)
{
struct smb_irq_data *irq_data = data;
struct smb_charger *chg = irq_data->parent_data;
- int rc, icl_ua;
+ int rc, settled_ua;
smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
- rc = smblib_get_charge_param(chg, &chg->param.icl_stat, &icl_ua);
+ rc = smblib_get_charge_param(chg, &chg->param.icl_stat, &settled_ua);
if (rc < 0) {
smblib_err(chg, "Couldn't get ICL status rc=%d\n", rc);
return IRQ_HANDLED;
@@ -2837,15 +2697,10 @@ irqreturn_t smblib_handle_icl_change(int irq, void *data)
if (chg->mode != PARALLEL_MASTER)
return IRQ_HANDLED;
- chg->input_limited_fcc_ua = div64_s64(
- (s64)icl_ua * MICRO_5V * EFFICIENCY_PCT,
- (s64)get_effective_result(chg->fv_votable) * 100);
-
- if (!get_effective_result(chg->pl_disable_votable))
- rerun_election(chg->fcc_votable);
+ power_supply_changed(chg->usb_main_psy);
vote(chg->pl_enable_votable_indirect, USBIN_I_VOTER,
- icl_ua >= USB_WEAK_INPUT_UA, 0);
+ settled_ua >= USB_WEAK_INPUT_UA, 0);
return IRQ_HANDLED;
}
@@ -3063,12 +2918,9 @@ static void typec_source_removal(struct smb_charger *chg)
{
int rc;
- vote(chg->pl_disable_votable, TYPEC_SRC_VOTER, true, 0);
/* reset both usbin current and voltage votes */
vote(chg->pl_enable_votable_indirect, USBIN_I_VOTER, false, 0);
vote(chg->pl_enable_votable_indirect, USBIN_V_VOTER, false, 0);
- /* reset taper_end voter here */
- vote(chg->pl_disable_votable, TAPER_END_VOTER, false, 0);
cancel_delayed_work_sync(&chg->hvdcp_detect_work);
@@ -3101,7 +2953,6 @@ static void typec_source_removal(struct smb_charger *chg)
static void typec_source_insertion(struct smb_charger *chg)
{
- vote(chg->pl_disable_votable, TYPEC_SRC_VOTER, false, 0);
}
static void typec_sink_insertion(struct smb_charger *chg)
@@ -3435,58 +3286,6 @@ static void step_soc_req_work(struct work_struct *work)
step_charge_soc_update(chg, pval.intval);
}
-static void smblib_pl_detect_work(struct work_struct *work)
-{
- struct smb_charger *chg = container_of(work, struct smb_charger,
- pl_detect_work);
-
- vote(chg->pl_disable_votable, PARALLEL_PSY_VOTER, false, 0);
-}
-
-#define MINIMUM_PARALLEL_FCC_UA 500000
-#define PL_TAPER_WORK_DELAY_MS 100
-#define TAPER_RESIDUAL_PCT 75
-static void smblib_pl_taper_work(struct work_struct *work)
-{
- struct smb_charger *chg = container_of(work, struct smb_charger,
- pl_taper_work.work);
- union power_supply_propval pval = {0, };
- int rc;
-
- smblib_dbg(chg, PR_PARALLEL, "starting parallel taper work\n");
- if (chg->pl.slave_fcc_ua < MINIMUM_PARALLEL_FCC_UA) {
- smblib_dbg(chg, PR_PARALLEL, "parallel taper is done\n");
- vote(chg->pl_disable_votable, TAPER_END_VOTER, true, 0);
- goto done;
- }
-
- rc = smblib_get_prop_batt_charge_type(chg, &pval);
- if (rc < 0) {
- smblib_err(chg, "Couldn't get batt charge type rc=%d\n", rc);
- goto done;
- }
-
- if (pval.intval == POWER_SUPPLY_CHARGE_TYPE_TAPER) {
- smblib_dbg(chg, PR_PARALLEL, "master is taper charging; reducing slave FCC\n");
- vote(chg->awake_votable, PL_TAPER_WORK_RUNNING_VOTER, true, 0);
- /* Reduce the taper percent by 25 percent */
- chg->pl.taper_pct = chg->pl.taper_pct
- * TAPER_RESIDUAL_PCT / 100;
- rerun_election(chg->fcc_votable);
- schedule_delayed_work(&chg->pl_taper_work,
- msecs_to_jiffies(PL_TAPER_WORK_DELAY_MS));
- return;
- }
-
- /*
- * Master back to Fast Charge, get out of this round of taper reduction
- */
- smblib_dbg(chg, PR_PARALLEL, "master is fast charging; waiting for next taper\n");
-
-done:
- vote(chg->awake_votable, PL_TAPER_WORK_RUNNING_VOTER, false, 0);
-}
-
static void clear_hdc_work(struct work_struct *work)
{
struct smb_charger *chg = container_of(work, struct smb_charger,
@@ -3567,6 +3366,25 @@ static int smblib_create_votables(struct smb_charger *chg)
{
int rc = 0;
+ chg->fcc_votable = find_votable("FCC");
+ if (!chg->fcc_votable) {
+ rc = -EPROBE_DEFER;
+ return rc;
+ }
+
+ chg->fv_votable = find_votable("FV");
+ if (!chg->fv_votable) {
+ rc = -EPROBE_DEFER;
+ return rc;
+ }
+
+ chg->pl_disable_votable = find_votable("PL_DISABLE");
+ if (!chg->pl_disable_votable) {
+ rc = -EPROBE_DEFER;
+ return rc;
+ }
+ vote(chg->pl_disable_votable, PL_INDIRECT_VOTER, true, 0);
+
chg->usb_suspend_votable = create_votable("USB_SUSPEND", VOTE_SET_ANY,
smblib_usb_suspend_vote_callback,
chg);
@@ -3591,22 +3409,6 @@ static int smblib_create_votables(struct smb_charger *chg)
return rc;
}
- chg->fcc_votable = create_votable("FCC", VOTE_MIN,
- smblib_fcc_vote_callback,
- chg);
- if (IS_ERR(chg->fcc_votable)) {
- rc = PTR_ERR(chg->fcc_votable);
- return rc;
- }
-
- chg->fv_votable = create_votable("FV", VOTE_MAX,
- smblib_fv_vote_callback,
- chg);
- if (IS_ERR(chg->fv_votable)) {
- rc = PTR_ERR(chg->fv_votable);
- return rc;
- }
-
chg->usb_icl_votable = create_votable("USB_ICL", VOTE_MIN,
smblib_usb_icl_vote_callback,
chg);
@@ -3646,14 +3448,6 @@ static int smblib_create_votables(struct smb_charger *chg)
return rc;
}
- chg->pl_disable_votable = create_votable("PL_DISABLE", VOTE_SET_ANY,
- smblib_pl_disable_vote_callback,
- chg);
- if (IS_ERR(chg->pl_disable_votable)) {
- rc = PTR_ERR(chg->pl_disable_votable);
- return rc;
- }
-
chg->chg_disable_votable = create_votable("CHG_DISABLE", VOTE_SET_ANY,
smblib_chg_disable_vote_callback,
chg);
@@ -3710,10 +3504,6 @@ static void smblib_destroy_votables(struct smb_charger *chg)
destroy_votable(chg->dc_suspend_votable);
if (chg->fcc_max_votable)
destroy_votable(chg->fcc_max_votable);
- if (chg->fcc_votable)
- destroy_votable(chg->fcc_votable);
- if (chg->fv_votable)
- destroy_votable(chg->fv_votable);
if (chg->usb_icl_votable)
destroy_votable(chg->usb_icl_votable);
if (chg->dc_icl_votable)
@@ -3724,8 +3514,6 @@ static void smblib_destroy_votables(struct smb_charger *chg)
destroy_votable(chg->pd_allowed_votable);
if (chg->awake_votable)
destroy_votable(chg->awake_votable);
- if (chg->pl_disable_votable)
- destroy_votable(chg->pl_disable_votable);
if (chg->chg_disable_votable)
destroy_votable(chg->chg_disable_votable);
if (chg->pl_enable_votable_indirect)
@@ -3755,10 +3543,8 @@ int smblib_init(struct smb_charger *chg)
mutex_init(&chg->write_lock);
mutex_init(&chg->otg_overcurrent_lock);
INIT_WORK(&chg->bms_update_work, bms_update_work);
- INIT_WORK(&chg->pl_detect_work, smblib_pl_detect_work);
INIT_WORK(&chg->rdstd_cc2_detach_work, rdstd_cc2_detach_work);
INIT_DELAYED_WORK(&chg->hvdcp_detect_work, smblib_hvdcp_detect_work);
- INIT_DELAYED_WORK(&chg->pl_taper_work, smblib_pl_taper_work);
INIT_DELAYED_WORK(&chg->step_soc_req_work, step_soc_req_work);
INIT_DELAYED_WORK(&chg->clear_hdc_work, clear_hdc_work);
chg->fake_capacity = -EINVAL;
@@ -3781,10 +3567,6 @@ int smblib_init(struct smb_charger *chg)
chg->bms_psy = power_supply_get_by_name("bms");
chg->pl.psy = power_supply_get_by_name("parallel");
- if (chg->pl.psy)
- vote(chg->pl_disable_votable, PARALLEL_PSY_VOTER,
- false, 0);
-
break;
case PARALLEL_SLAVE:
break;
diff --git a/drivers/power/supply/qcom/smb-lib.h b/drivers/power/supply/qcom/smb-lib.h
index a4121224b121..b3fce23c6508 100644
--- a/drivers/power/supply/qcom/smb-lib.h
+++ b/drivers/power/supply/qcom/smb-lib.h
@@ -32,7 +32,6 @@ enum print_reason {
#define DCP_VOTER "DCP_VOTER"
#define USB_PSY_VOTER "USB_PSY_VOTER"
#define PL_TAPER_WORK_RUNNING_VOTER "PL_TAPER_WORK_RUNNING_VOTER"
-#define PARALLEL_PSY_VOTER "PARALLEL_PSY_VOTER"
#define PL_INDIRECT_VOTER "PL_INDIRECT_VOTER"
#define USBIN_I_VOTER "USBIN_I_VOTER"
#define USBIN_V_VOTER "USBIN_V_VOTER"
@@ -128,9 +127,6 @@ struct smb_params {
struct parallel_params {
struct power_supply *psy;
- int slave_pct;
- int taper_pct;
- int slave_fcc_ua;
};
struct smb_iio {
@@ -170,6 +166,7 @@ struct smb_charger {
struct power_supply *dc_psy;
struct power_supply *bms_psy;
struct power_supply_desc usb_psy_desc;
+ struct power_supply *usb_main_psy;
/* notifiers */
struct notifier_block nb;
@@ -202,11 +199,9 @@ struct smb_charger {
/* work */
struct work_struct bms_update_work;
- struct work_struct pl_detect_work;
struct work_struct rdstd_cc2_detach_work;
struct delayed_work hvdcp_detect_work;
struct delayed_work ps_change_timeout_work;
- struct delayed_work pl_taper_work;
struct delayed_work step_soc_req_work;
struct delayed_work clear_hdc_work;
@@ -226,7 +221,6 @@ struct smb_charger {
bool is_hdc;
bool chg_done;
bool micro_usb_mode;
- int input_limited_fcc_ua;
bool otg_en;
bool vconn_en;
int otg_attempts;
@@ -240,6 +234,8 @@ struct smb_charger {
/* extcon for VBUS / ID notification to USB for uUSB */
struct extcon_dev *extcon;
bool usb_ever_removed;
+
+ int icl_reduction_ua;
};
int smblib_read(struct smb_charger *chg, u16 addr, u8 *val);
@@ -386,6 +382,8 @@ int smblib_set_prop_ship_mode(struct smb_charger *chg,
const union power_supply_propval *val);
void smblib_suspend_on_debug_battery(struct smb_charger *chg);
int smblib_rerun_apsd_if_required(struct smb_charger *chg);
+int smblib_get_prop_fcc_delta(struct smb_charger *chg,
+ union power_supply_propval *val);
int smblib_init(struct smb_charger *chg);
int smblib_deinit(struct smb_charger *chg);
diff --git a/drivers/power/supply/qcom/smb1351-charger.c b/drivers/power/supply/qcom/smb1351-charger.c
index e9d8c0e08447..0c2943c7f2df 100644
--- a/drivers/power/supply/qcom/smb1351-charger.c
+++ b/drivers/power/supply/qcom/smb1351-charger.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1414,6 +1414,7 @@ static enum power_supply_property smb1351_parallel_properties[] = {
POWER_SUPPLY_PROP_VOLTAGE_MAX,
POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED,
POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+ POWER_SUPPLY_PROP_PARALLEL_MODE,
};
static int smb1351_parallel_set_chg_present(struct smb1351_charger *chip,
@@ -1667,6 +1668,12 @@ static int smb1351_parallel_get_property(struct power_supply *psy,
else
val->intval = 0;
break;
+ case POWER_SUPPLY_PROP_PARALLEL_MODE:
+ if (chip->parallel_charger_present)
+ val->intval = POWER_SUPPLY_PARALLEL_USBIN_USBIN;
+ else
+ val->intval = POWER_SUPPLY_PARALLEL_NONE;
+ break;
default:
return -EINVAL;
}
@@ -3158,8 +3165,8 @@ static int smb1351_parallel_charger_probe(struct i2c_client *client,
i2c_set_clientdata(client, chip);
- chip->parallel_psy_d.name = "usb-parallel";
- chip->parallel_psy_d.type = POWER_SUPPLY_TYPE_USB_PARALLEL;
+ chip->parallel_psy_d.name = "parallel";
+ chip->parallel_psy_d.type = POWER_SUPPLY_TYPE_PARALLEL;
chip->parallel_psy_d.get_property = smb1351_parallel_get_property;
chip->parallel_psy_d.set_property = smb1351_parallel_set_property;
chip->parallel_psy_d.properties = smb1351_parallel_properties;
diff --git a/drivers/power/supply/qcom/smb135x-charger.c b/drivers/power/supply/qcom/smb135x-charger.c
index 65d4ae56ff83..08af01544590 100644
--- a/drivers/power/supply/qcom/smb135x-charger.c
+++ b/drivers/power/supply/qcom/smb135x-charger.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -4378,8 +4378,8 @@ static int smb135x_parallel_charger_probe(struct i2c_client *client,
i2c_set_clientdata(client, chip);
- chip->parallel_psy_d.name = "usb-parallel";
- chip->parallel_psy_d.type = POWER_SUPPLY_TYPE_USB_PARALLEL;
+ chip->parallel_psy_d.name = "parallel";
+ chip->parallel_psy_d.type = POWER_SUPPLY_TYPE_PARALLEL;
chip->parallel_psy_d.get_property = smb135x_parallel_get_property;
chip->parallel_psy_d.set_property = smb135x_parallel_set_property;
chip->parallel_psy_d.properties = smb135x_parallel_properties;
diff --git a/drivers/power/supply/qcom/smb138x-charger.c b/drivers/power/supply/qcom/smb138x-charger.c
index c836e780fc86..ae15fef6c3a6 100644
--- a/drivers/power/supply/qcom/smb138x-charger.c
+++ b/drivers/power/supply/qcom/smb138x-charger.c
@@ -409,11 +409,12 @@ static enum power_supply_property smb138x_parallel_props[] = {
POWER_SUPPLY_PROP_PIN_ENABLED,
POWER_SUPPLY_PROP_INPUT_SUSPEND,
POWER_SUPPLY_PROP_VOLTAGE_MAX,
- POWER_SUPPLY_PROP_CURRENT_MAX,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
POWER_SUPPLY_PROP_CURRENT_NOW,
POWER_SUPPLY_PROP_CHARGER_TEMP,
POWER_SUPPLY_PROP_CHARGER_TEMP_MAX,
POWER_SUPPLY_PROP_MODEL_NAME,
+ POWER_SUPPLY_PROP_PARALLEL_MODE,
};
static int smb138x_parallel_get_prop(struct power_supply *psy,
@@ -447,7 +448,7 @@ static int smb138x_parallel_get_prop(struct power_supply *psy,
case POWER_SUPPLY_PROP_VOLTAGE_MAX:
rc = smblib_get_charge_param(chg, &chg->param.fv, &val->intval);
break;
- case POWER_SUPPLY_PROP_CURRENT_MAX:
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
rc = smblib_get_charge_param(chg, &chg->param.fcc,
&val->intval);
break;
@@ -463,6 +464,9 @@ static int smb138x_parallel_get_prop(struct power_supply *psy,
case POWER_SUPPLY_PROP_MODEL_NAME:
val->strval = "smb138x";
break;
+ case POWER_SUPPLY_PROP_PARALLEL_MODE:
+ val->intval = POWER_SUPPLY_PARALLEL_MID_MID;
+ break;
default:
pr_err("parallel power supply get prop %d not supported\n",
prop);
@@ -516,7 +520,7 @@ static int smb138x_parallel_set_prop(struct power_supply *psy,
case POWER_SUPPLY_PROP_VOLTAGE_MAX:
rc = smblib_set_charge_param(chg, &chg->param.fv, val->intval);
break;
- case POWER_SUPPLY_PROP_CURRENT_MAX:
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
rc = smblib_set_charge_param(chg, &chg->param.fcc, val->intval);
break;
case POWER_SUPPLY_PROP_BUCK_FREQ:
@@ -540,7 +544,7 @@ static int smb138x_parallel_prop_is_writeable(struct power_supply *psy,
static const struct power_supply_desc parallel_psy_desc = {
.name = "parallel",
- .type = POWER_SUPPLY_TYPE_USB_PARALLEL,
+ .type = POWER_SUPPLY_TYPE_PARALLEL,
.properties = smb138x_parallel_props,
.num_properties = ARRAY_SIZE(smb138x_parallel_props),
.get_property = smb138x_parallel_get_prop,
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 86e29199b2b1..4a315d8f5534 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -178,6 +178,16 @@ config MSM_QMI_INTERFACE
to perform QMI message marshaling and transport them over IPC
Router.
+config MSM_L2_IA_DEBUG
+ bool "Enable MSM L2 Indirect Access Debug"
+ depends on DEBUG_FS
+ default n
+ help
+ This option enables L2 indirect access debug
+ capability. It exposes L2 indirect access
+ debugfs interface to get/set data, address,
+ and target cpus.
+
config MSM_RPM_SMD
bool "RPM driver using SMD protocol"
help
diff --git a/drivers/soc/qcom/glink_smem_native_xprt.c b/drivers/soc/qcom/glink_smem_native_xprt.c
index f2d2aece6846..14cf10b92122 100644
--- a/drivers/soc/qcom/glink_smem_native_xprt.c
+++ b/drivers/soc/qcom/glink_smem_native_xprt.c
@@ -678,10 +678,15 @@ static void process_rx_data(struct edge_info *einfo, uint16_t cmd_id,
} else if (intent->data == NULL) {
if (einfo->intentless) {
intent->data = kmalloc(cmd.frag_size, GFP_ATOMIC);
- if (!intent->data)
+ if (!intent->data) {
err = true;
- else
+ GLINK_ERR(
+ "%s: atomic alloc fail ch %d liid %d size %d\n",
+ __func__, rcid, intent_id,
+ cmd.frag_size);
+ } else {
intent->intent_size = cmd.frag_size;
+ }
} else {
GLINK_ERR(
"%s: intent for ch %d liid %d has no data buff\n",
diff --git a/drivers/soc/qcom/glink_ssr.c b/drivers/soc/qcom/glink_ssr.c
index 4d94e6446505..5e2dbc8b1d20 100644
--- a/drivers/soc/qcom/glink_ssr.c
+++ b/drivers/soc/qcom/glink_ssr.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -339,6 +339,7 @@ void close_ch_worker(struct work_struct *work)
BUG_ON(!ss_info->cb_data);
kfree(ss_info->cb_data);
+ ss_info->cb_data = NULL;
kfree(close_work);
}
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index 873416944b19..16ee98d8e4e0 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -146,7 +146,8 @@ enum icnss_debug_quirks {
FW_REJUVENATE_ENABLE,
};
-#define ICNSS_QUIRKS_DEFAULT BIT(VBATT_DISABLE)
+#define ICNSS_QUIRKS_DEFAULT (BIT(VBATT_DISABLE) | \
+ BIT(FW_REJUVENATE_ENABLE))
unsigned long quirks = ICNSS_QUIRKS_DEFAULT;
module_param(quirks, ulong, 0600);
@@ -1234,7 +1235,7 @@ out:
return ret;
}
-static int wlfw_ini_send_sync_msg(bool enable_fw_log)
+static int wlfw_ini_send_sync_msg(uint8_t fw_log_mode)
{
int ret;
struct wlfw_ini_req_msg_v01 req;
@@ -1244,14 +1245,14 @@ static int wlfw_ini_send_sync_msg(bool enable_fw_log)
if (!penv || !penv->wlfw_clnt)
return -ENODEV;
- icnss_pr_dbg("Sending ini sync request, state: 0x%lx, fw_log: %d\n",
- penv->state, enable_fw_log);
+ icnss_pr_dbg("Sending ini sync request, state: 0x%lx, fw_log_mode: %d\n",
+ penv->state, fw_log_mode);
memset(&req, 0, sizeof(req));
memset(&resp, 0, sizeof(resp));
req.enablefwlog_valid = 1;
- req.enablefwlog = enable_fw_log;
+ req.enablefwlog = fw_log_mode;
req_desc.max_msg_len = WLFW_INI_REQ_MSG_V01_MAX_MSG_LEN;
req_desc.msg_id = QMI_WLFW_INI_REQ_V01;
@@ -1266,14 +1267,14 @@ static int wlfw_ini_send_sync_msg(bool enable_fw_log)
ret = qmi_send_req_wait(penv->wlfw_clnt, &req_desc, &req, sizeof(req),
&resp_desc, &resp, sizeof(resp), WLFW_TIMEOUT_MS);
if (ret < 0) {
- icnss_pr_err("Send INI req failed fw_log: %d, ret: %d\n",
- enable_fw_log, ret);
+ icnss_pr_err("Send INI req failed fw_log_mode: %d, ret: %d\n",
+ fw_log_mode, ret);
goto out;
}
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
- icnss_pr_err("QMI INI request rejected, fw_log:%d result:%d error:%d\n",
- enable_fw_log, resp.resp.result, resp.resp.error);
+ icnss_pr_err("QMI INI request rejected, fw_log_mode:%d result:%d error:%d\n",
+ fw_log_mode, resp.resp.result, resp.resp.error);
ret = resp.resp.result;
goto out;
}
@@ -1469,7 +1470,7 @@ static int wlfw_dynamic_feature_mask_send_sync_msg(struct icnss_priv *priv,
if (!test_bit(FW_REJUVENATE_ENABLE, &quirks)) {
icnss_pr_dbg("FW rejuvenate is disabled from quirks\n");
- dynamic_feature_mask &= ~QMI_WLFW_FW_REJUVENATE_V01;
+ return 0;
}
icnss_pr_dbg("Sending dynamic feature mask request, val 0x%llx, state: 0x%lx\n",
@@ -2517,21 +2518,19 @@ int icnss_get_soc_info(struct icnss_soc_info *info)
}
EXPORT_SYMBOL(icnss_get_soc_info);
-int icnss_set_fw_debug_mode(bool enable_fw_log)
+int icnss_set_fw_log_mode(uint8_t fw_log_mode)
{
int ret;
- icnss_pr_dbg("%s FW debug mode",
- enable_fw_log ? "Enalbing" : "Disabling");
+ icnss_pr_dbg("FW log mode: %u\n", fw_log_mode);
- ret = wlfw_ini_send_sync_msg(enable_fw_log);
+ ret = wlfw_ini_send_sync_msg(fw_log_mode);
if (ret)
- icnss_pr_err("Fail to send ini, ret = %d, fw_log: %d\n", ret,
- enable_fw_log);
-
+ icnss_pr_err("Fail to send ini, ret = %d, fw_log_mode: %u\n",
+ ret, fw_log_mode);
return ret;
}
-EXPORT_SYMBOL(icnss_set_fw_debug_mode);
+EXPORT_SYMBOL(icnss_set_fw_log_mode);
int icnss_athdiag_read(struct device *dev, uint32_t offset,
uint32_t mem_type, uint32_t data_len,
diff --git a/drivers/soc/qcom/kryo-l2-accessors.c b/drivers/soc/qcom/kryo-l2-accessors.c
index a945f9e0ba40..1d81074d7b81 100644
--- a/drivers/soc/qcom/kryo-l2-accessors.c
+++ b/drivers/soc/qcom/kryo-l2-accessors.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2015, 2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -80,7 +80,7 @@ u64 get_l2_indirect_reg(u64 reg)
}
EXPORT_SYMBOL(get_l2_indirect_reg);
-#if defined(CONFIG_DEBUG_FS)
+#if defined(CONFIG_MSM_L2_IA_DEBUG)
static u32 debug_addr;
static int debug_target_cpu;
@@ -180,4 +180,4 @@ static int l2_ia_debug_init(void)
}
late_initcall(l2_ia_debug_init);
-#endif /* CONFIG_DEBUG_FS */
+#endif /* CONFIG_MSM_L2_IA_DEBUG */
diff --git a/drivers/soc/qcom/peripheral-loader.c b/drivers/soc/qcom/peripheral-loader.c
index eaeb8d4776fe..2244c64d28af 100644
--- a/drivers/soc/qcom/peripheral-loader.c
+++ b/drivers/soc/qcom/peripheral-loader.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -171,7 +171,11 @@ int pil_do_ramdump(struct pil_desc *desc, void *ramdump_dev)
ret = do_elf_ramdump(ramdump_dev, ramdump_segs, count);
kfree(ramdump_segs);
- if (!ret && desc->subsys_vmid > 0)
+ if (ret)
+ pil_err(desc, "%s: Ramdump collection failed for subsys %s rc:%d\n",
+ __func__, desc->name, ret);
+
+ if (desc->subsys_vmid > 0)
ret = pil_assign_mem_to_subsys(desc, priv->region_start,
(priv->region_end - priv->region_start));
diff --git a/drivers/soc/qcom/qdsp6v2/apr_tal_glink.c b/drivers/soc/qcom/qdsp6v2/apr_tal_glink.c
index 45ac48eb2241..19974b61ec1c 100644
--- a/drivers/soc/qcom/qdsp6v2/apr_tal_glink.c
+++ b/drivers/soc/qcom/qdsp6v2/apr_tal_glink.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016 The Linux Foundation.
+/* Copyright (c) 2016-2017 The Linux Foundation.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
@@ -17,7 +17,6 @@
#include <linux/uaccess.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
-#include <linux/list.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/errno.h>
@@ -34,16 +33,10 @@
#define APR_MAXIMUM_NUM_OF_RETRIES 2
struct apr_tx_buf {
- struct list_head list;
struct apr_pkt_priv pkt_priv;
char buf[APR_MAX_BUF];
};
-struct apr_buf_list {
- struct list_head list;
- spinlock_t lock;
-};
-
struct link_state {
uint32_t dest;
void *handle;
@@ -52,7 +45,6 @@ struct link_state {
};
static struct link_state link_state[APR_DEST_MAX];
-static struct apr_buf_list buf_list;
static char *svc_names[APR_DEST_MAX][APR_CLIENT_MAX] = {
{
@@ -68,44 +60,37 @@ static char *svc_names[APR_DEST_MAX][APR_CLIENT_MAX] = {
static struct apr_svc_ch_dev
apr_svc_ch[APR_DL_MAX][APR_DEST_MAX][APR_CLIENT_MAX];
-static struct apr_tx_buf *apr_get_free_buf(int len)
+static struct apr_tx_buf *apr_alloc_buf(int len)
{
- struct apr_tx_buf *tx_buf;
- unsigned long flags;
if (len > APR_MAX_BUF) {
pr_err("%s: buf too large [%d]\n", __func__, len);
return ERR_PTR(-EINVAL);
}
- spin_lock_irqsave(&buf_list.lock, flags);
- if (list_empty(&buf_list.list)) {
- spin_unlock_irqrestore(&buf_list.lock, flags);
- pr_err("%s: No buf available\n", __func__);
- return ERR_PTR(-ENOMEM);
- }
-
- tx_buf = list_first_entry(&buf_list.list, struct apr_tx_buf, list);
- list_del(&tx_buf->list);
- spin_unlock_irqrestore(&buf_list.lock, flags);
-
- return tx_buf;
+ return kzalloc(sizeof(struct apr_tx_buf), GFP_ATOMIC);
}
-static void apr_buf_add_tail(const void *buf)
+static void apr_free_buf(const void *ptr)
{
- struct apr_tx_buf *list;
- unsigned long flags;
- if (!buf)
+ struct apr_pkt_priv *apr_pkt_priv = (struct apr_pkt_priv *)ptr;
+ struct apr_tx_buf *tx_buf;
+
+ if (!apr_pkt_priv) {
+ pr_err("%s: Invalid apr_pkt_priv\n", __func__);
return;
+ }
- spin_lock_irqsave(&buf_list.lock, flags);
- list = container_of((void *)buf, struct apr_tx_buf, buf);
- list_add_tail(&list->list, &buf_list.list);
- spin_unlock_irqrestore(&buf_list.lock, flags);
+ if (apr_pkt_priv->pkt_owner == APR_PKT_OWNER_DRIVER) {
+ tx_buf = container_of((void *)apr_pkt_priv,
+ struct apr_tx_buf, pkt_priv);
+ pr_debug("%s: Freeing buffer %pK", __func__, tx_buf);
+ kfree(tx_buf);
+ }
}
+
static int __apr_tal_write(struct apr_svc_ch_dev *apr_ch, void *data,
struct apr_pkt_priv *pkt_priv, int len)
{
@@ -137,7 +122,7 @@ int apr_tal_write(struct apr_svc_ch_dev *apr_ch, void *data,
return -EINVAL;
if (pkt_priv->pkt_owner == APR_PKT_OWNER_DRIVER) {
- tx_buf = apr_get_free_buf(len);
+ tx_buf = apr_alloc_buf(len);
if (IS_ERR_OR_NULL(tx_buf)) {
rc = -EINVAL;
goto exit;
@@ -160,7 +145,7 @@ int apr_tal_write(struct apr_svc_ch_dev *apr_ch, void *data,
if (rc < 0) {
pr_err("%s: Unable to send the packet, rc:%d\n", __func__, rc);
if (pkt_priv->pkt_owner == APR_PKT_OWNER_DRIVER)
- apr_buf_add_tail(pkt_data);
+ kfree(tx_buf);
}
exit:
return rc;
@@ -189,39 +174,17 @@ void apr_tal_notify_rx(void *handle, const void *priv, const void *pkt_priv,
static void apr_tal_notify_tx_abort(void *handle, const void *priv,
const void *pkt_priv)
{
- struct apr_pkt_priv *apr_pkt_priv_ptr =
- (struct apr_pkt_priv *)pkt_priv;
- struct apr_tx_buf *list_node;
-
- if (!apr_pkt_priv_ptr) {
- pr_err("%s: Invalid pkt_priv\n", __func__);
- return;
- }
-
- pr_debug("%s: tx_abort received for apr_pkt_priv_ptr:%pK\n",
- __func__, apr_pkt_priv_ptr);
-
- if (apr_pkt_priv_ptr->pkt_owner == APR_PKT_OWNER_DRIVER) {
- list_node = container_of(apr_pkt_priv_ptr,
- struct apr_tx_buf, pkt_priv);
- apr_buf_add_tail(list_node->buf);
- }
+ pr_debug("%s: tx_abort received for pkt_priv:%pK\n",
+ __func__, pkt_priv);
+ apr_free_buf(pkt_priv);
}
void apr_tal_notify_tx_done(void *handle, const void *priv,
const void *pkt_priv, const void *ptr)
{
- struct apr_pkt_priv *apr_pkt_priv = (struct apr_pkt_priv *)pkt_priv;
-
- if (!pkt_priv || !ptr) {
- pr_err("%s: Invalid pkt_priv or ptr\n", __func__);
- return;
- }
-
- pr_debug("%s: tx_done received\n", __func__);
-
- if (apr_pkt_priv->pkt_owner == APR_PKT_OWNER_DRIVER)
- apr_buf_add_tail(ptr);
+ pr_debug("%s: tx_done received for pkt_priv:%pK\n",
+ __func__, pkt_priv);
+ apr_free_buf(pkt_priv);
}
bool apr_tal_notify_rx_intent_req(void *handle, const void *priv,
@@ -457,8 +420,6 @@ static struct glink_link_info lpass_link_info = {
static int __init apr_tal_init(void)
{
int i, j, k;
- struct apr_tx_buf *buf;
- struct list_head *ptr, *next;
for (i = 0; i < APR_DL_MAX; i++) {
for (j = 0; j < APR_DEST_MAX; j++) {
@@ -474,21 +435,6 @@ static int __init apr_tal_init(void)
for (i = 0; i < APR_DEST_MAX; i++)
init_waitqueue_head(&link_state[i].wait);
- spin_lock_init(&buf_list.lock);
- INIT_LIST_HEAD(&buf_list.list);
- for (i = 0; i < APR_NUM_OF_TX_BUF; i++) {
- buf = kzalloc(sizeof(struct apr_tx_buf), GFP_KERNEL);
- if (!buf) {
- pr_err("%s: Unable to allocate tx buf\n", __func__);
- goto tx_buf_alloc_fail;
- }
-
- INIT_LIST_HEAD(&buf->list);
- spin_lock(&buf_list.lock);
- list_add_tail(&buf->list, &buf_list.list);
- spin_unlock(&buf_list.lock);
- }
-
link_state[APR_DEST_MODEM].link_state = GLINK_LINK_STATE_DOWN;
link_state[APR_DEST_MODEM].handle =
glink_register_link_state_cb(&mpss_link_info, NULL);
@@ -502,13 +448,5 @@ static int __init apr_tal_init(void)
pr_err("%s: Unable to register lpass link state\n", __func__);
return 0;
-
-tx_buf_alloc_fail:
- list_for_each_safe(ptr, next, &buf_list.list) {
- buf = list_entry(ptr, struct apr_tx_buf, list);
- list_del(&buf->list);
- kfree(buf);
- }
- return -ENOMEM;
}
device_initcall(apr_tal_init);
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index 6c8154d126b2..a5bfeab596ac 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -533,18 +533,6 @@ static void cleanup_irq(struct spmi_pmic_arb *pa, u16 apid, int id)
"cleanup_irq apid=%d sid=0x%x per=0x%x irq=%d\n",
apid, sid, per, id);
writel_relaxed(irq_mask, pa->intr + pa->ver_ops->irq_clear(apid));
-
- if (pmic_arb_write_cmd(pa->spmic, SPMI_CMD_EXT_WRITEL, sid,
- (per << 8) + QPNPINT_REG_LATCHED_CLR, &irq_mask, 1))
- dev_err_ratelimited(&pa->spmic->dev,
- "failed to ack irq_mask = 0x%x for ppid = %x\n",
- irq_mask, ppid);
-
- if (pmic_arb_write_cmd(pa->spmic, SPMI_CMD_EXT_WRITEL, sid,
- (per << 8) + QPNPINT_REG_EN_CLR, &irq_mask, 1))
- dev_err_ratelimited(&pa->spmic->dev,
- "failed to ack irq_mask = 0x%x for ppid = %x\n",
- irq_mask, ppid);
}
static void periph_interrupt(struct spmi_pmic_arb *pa, u16 apid, bool show)
@@ -721,6 +709,17 @@ static struct irq_chip pmic_arb_irqchip = {
| IRQCHIP_SKIP_SET_WAKE,
};
+static void qpnpint_irq_domain_activate(struct irq_domain *domain,
+ struct irq_data *d)
+{
+ u8 irq = HWIRQ_IRQ(d->hwirq);
+ u8 buf;
+
+ buf = BIT(irq);
+ qpnpint_spmi_write(d, QPNPINT_REG_EN_CLR, &buf, 1);
+ qpnpint_spmi_write(d, QPNPINT_REG_LATCHED_CLR, &buf, 1);
+}
+
static int qpnpint_irq_domain_dt_translate(struct irq_domain *d,
struct device_node *controller,
const u32 *intspec,
@@ -1186,6 +1185,7 @@ static const struct pmic_arb_ver_ops pmic_arb_v5 = {
static const struct irq_domain_ops pmic_arb_irq_domain_ops = {
.map = qpnpint_irq_domain_map,
.xlate = qpnpint_irq_domain_dt_translate,
+ .activate = qpnpint_irq_domain_activate,
};
static void spmi_pmic_arb_resume(void)
diff --git a/drivers/thermal/msm-tsens.c b/drivers/thermal/msm-tsens.c
index b7733e90fc8b..82a8e4e200ba 100644
--- a/drivers/thermal/msm-tsens.c
+++ b/drivers/thermal/msm-tsens.c
@@ -363,6 +363,7 @@ static int32_t get_tsens_sensor_for_client_id(struct tsens_tm_device *tmdev,
if (!strcmp(id->compatible, "qcom,msm8996-tsens") ||
(!strcmp(id->compatible, "qcom,msm8998-tsens")) ||
+ (!strcmp(id->compatible, "qcom,sdm660-tsens")) ||
(!strcmp(id->compatible, "qcom,msmhamster-tsens"))) {
while (i < tmdev->tsens_num_sensor && !id_found) {
if (tmdev->sensor[i].sensor_client_id ==
@@ -492,6 +493,7 @@ int tsens_get_hw_id_mapping(int thermal_sensor_num, int *sensor_client_id)
if (!strcmp(id->compatible, "qcom,msm8996-tsens") ||
(!strcmp(id->compatible, "qcom,msm8998-tsens")) ||
+ (!strcmp(id->compatible, "qcom,sdm660-tsens")) ||
(!strcmp(id->compatible, "qcom,msmhamster-tsens"))) {
/* Assign client id's that is used to get the
* controller and hw_sensor details
diff --git a/drivers/thermal/msm_lmh_dcvs.c b/drivers/thermal/msm_lmh_dcvs.c
index 7758750516f8..4ebfc713cb28 100644
--- a/drivers/thermal/msm_lmh_dcvs.c
+++ b/drivers/thermal/msm_lmh_dcvs.c
@@ -68,6 +68,8 @@
_max = (_val) & 0x3FF; \
_max *= 19200; \
} while (0)
+#define FREQ_KHZ_TO_HZ(_val) ((_val) * 1000)
+#define FREQ_HZ_TO_KHZ(_val) ((_val) / 1000)
enum lmh_hw_trips {
LIMITS_TRIP_LO,
@@ -114,6 +116,7 @@ static uint32_t msm_lmh_mitigation_notify(struct msm_lmh_dcvs_hw *hw)
uint32_t max_limit = 0, val = 0;
struct device *cpu_dev = NULL;
unsigned long freq_val;
+ struct dev_pm_opp *opp_entry;
val = readl_relaxed(hw->osm_hw_reg);
dcvsh_get_frequency(val, max_limit);
@@ -124,11 +127,23 @@ static uint32_t msm_lmh_mitigation_notify(struct msm_lmh_dcvs_hw *hw)
goto notify_exit;
}
- freq_val = max_limit;
+ freq_val = FREQ_KHZ_TO_HZ(max_limit);
rcu_read_lock();
- dev_pm_opp_find_freq_floor(cpu_dev, &freq_val);
+ opp_entry = dev_pm_opp_find_freq_floor(cpu_dev, &freq_val);
+ /*
+ * Hardware mitigation frequency can be lower than the lowest
+ * possible CPU frequency. In that case freq floor call will
+ * fail with -ERANGE and we need to match to the lowest
+ * frequency using freq_ceil.
+ */
+ if (IS_ERR(opp_entry) && PTR_ERR(opp_entry) == -ERANGE) {
+ opp_entry = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_val);
+ if (IS_ERR(opp_entry))
+ dev_err(cpu_dev, "frequency:%lu. opp error:%ld\n",
+ freq_val, PTR_ERR(opp_entry));
+ }
rcu_read_unlock();
- max_limit = freq_val;
+ max_limit = FREQ_HZ_TO_KHZ(freq_val);
sched_update_cpu_freq_min_max(&hw->core_map, 0, max_limit);
trace_lmh_dcvs_freq(cpumask_first(&hw->core_map), max_limit);
diff --git a/drivers/thermal/msm_thermal.c b/drivers/thermal/msm_thermal.c
index f685892edd39..7c75f740a204 100644
--- a/drivers/thermal/msm_thermal.c
+++ b/drivers/thermal/msm_thermal.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -49,6 +49,7 @@
#include <linux/suspend.h>
#include <linux/uaccess.h>
#include <linux/uio_driver.h>
+#include <linux/io.h>
#include <asm/cacheflush.h>
@@ -88,6 +89,20 @@
#define HOTPLUG_RETRY_INTERVAL_MS 100
#define UIO_VERSION "1.0"
+#define CXIP_LM_BASE_ADDRESS 0x1FE5000
+#define CXIP_LM_ADDRESS_SIZE 0x68
+#define CXIP_LM_VOTE_STATUS 0x40
+#define CXIP_LM_BYPASS 0x44
+#define CXIP_LM_VOTE_CLEAR 0x48
+#define CXIP_LM_VOTE_SET 0x4c
+#define CXIP_LM_FEATURE_EN 0x50
+#define CXIP_LM_DISABLE_VAL 0x0
+#define CXIP_LM_BYPASS_VAL 0xFF00
+#define CXIP_LM_THERM_VOTE_VAL 0x80
+#define CXIP_LM_THERM_SENS_ID 8
+#define CXIP_LM_THERM_SENS_HIGH 90
+#define CXIP_LM_THERM_SENS_LOW 75
+
#define VALIDATE_AND_SET_MASK(_node, _key, _mask, _cpu) \
do { \
if (of_property_read_bool(_node, _key)) \
@@ -179,6 +194,7 @@ static bool gfx_warm_phase_ctrl_enabled;
static bool cx_phase_ctrl_enabled;
static bool vdd_mx_enabled;
static bool therm_reset_enabled;
+static bool cxip_lm_enabled;
static bool online_core;
static bool cluster_info_probed;
static bool cluster_info_nodes_called;
@@ -207,6 +223,7 @@ static bool tsens_temp_print;
static uint32_t bucket;
static cpumask_t throttling_mask;
static int tsens_scaling_factor = SENSOR_SCALING_FACTOR;
+static void *cxip_lm_reg_base;
static LIST_HEAD(devices_list);
static LIST_HEAD(thresholds_list);
@@ -301,6 +318,7 @@ enum msm_thresh_list {
MSM_GFX_PHASE_CTRL_HOT,
MSM_OCR,
MSM_VDD_MX_RESTRICTION,
+ MSM_THERM_CXIP_LM,
MSM_LIST_MAX_NR,
};
@@ -495,6 +513,9 @@ static ssize_t thermal_config_debugfs_write(struct file *file,
} \
} while (0)
+#define CXIP_LM_CLIENTS_STATUS() \
+ readl_relaxed(cxip_lm_reg_base + CXIP_LM_VOTE_STATUS)
+
static void uio_init(struct platform_device *pdev)
{
int ret = 0;
@@ -2895,6 +2916,76 @@ static void therm_reset_notify(struct therm_threshold *thresh_data)
thresh_data->threshold);
}
+static void cxip_lm_therm_vote_apply(bool vote)
+{
+ static bool prev_vote;
+
+ if (prev_vote == vote)
+ return;
+
+ prev_vote = vote;
+ writel_relaxed(CXIP_LM_THERM_VOTE_VAL,
+ cxip_lm_reg_base +
+ (vote ? CXIP_LM_VOTE_SET : CXIP_LM_VOTE_CLEAR));
+
+ pr_debug("%s vote for cxip_lm. Agg.vote:0x%x\n",
+ vote ? "Applied" : "Cleared", CXIP_LM_CLIENTS_STATUS());
+}
+
+static int do_cxip_lm(void)
+{
+ int temp = 0, ret = 0;
+
+ if (!cxip_lm_enabled)
+ return ret;
+
+ ret = therm_get_temp(
+ thresh[MSM_THERM_CXIP_LM].thresh_list->sensor_id,
+ thresh[MSM_THERM_CXIP_LM].thresh_list->id_type,
+ &temp);
+ if (ret) {
+ pr_err("Unable to read TSENS sensor:%d, err:%d\n",
+ thresh[MSM_THERM_CXIP_LM].thresh_list->sensor_id, ret);
+ return ret;
+ }
+
+ if (temp >= CXIP_LM_THERM_SENS_HIGH)
+ cxip_lm_therm_vote_apply(true);
+ else if (temp <= CXIP_LM_THERM_SENS_LOW)
+ cxip_lm_therm_vote_apply(false);
+
+ return ret;
+}
+
+static void therm_cxip_lm_notify(struct therm_threshold *trig_thresh)
+{
+ if (!cxip_lm_enabled)
+ return;
+
+ if (!trig_thresh) {
+ pr_err("Invalid input\n");
+ return;
+ }
+
+ switch (trig_thresh->trip_triggered) {
+ case THERMAL_TRIP_CONFIGURABLE_HI:
+ cxip_lm_therm_vote_apply(true);
+ break;
+ case THERMAL_TRIP_CONFIGURABLE_LOW:
+ cxip_lm_therm_vote_apply(false);
+ break;
+ default:
+ pr_err("Invalid trip type\n");
+ break;
+ }
+
+ if (trig_thresh->cur_state != trig_thresh->trip_triggered) {
+ sensor_mgr_set_threshold(trig_thresh->sensor_id,
+ trig_thresh->threshold);
+ trig_thresh->cur_state = trig_thresh->trip_triggered;
+ }
+}
+
static void retry_hotplug(struct work_struct *work)
{
mutex_lock(&core_control_mutex);
@@ -3524,6 +3615,7 @@ static void check_temp(struct work_struct *work)
goto reschedule;
}
do_core_control(temp);
+ do_cxip_lm();
do_vdd_mx();
do_psm();
do_gfx_phase_cond();
@@ -3554,6 +3646,12 @@ static int __ref msm_thermal_cpu_callback(struct notifier_block *nfb,
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_UP_PREPARE:
+ /*
+ * Apply LMH freq cap vote, which was requested when the
+ * core was offline.
+ */
+ if (lmh_dcvs_available)
+ msm_lmh_dcvs_update(cpu);
if (!cpumask_test_and_set_cpu(cpu, cpus_previously_online))
pr_debug("Total prev cores online tracked %u\n",
cpumask_weight(cpus_previously_online));
@@ -4554,6 +4652,13 @@ static void thermal_monitor_init(void)
!(convert_to_zone_id(&thresh[MSM_VDD_MX_RESTRICTION])))
therm_set_threshold(&thresh[MSM_VDD_MX_RESTRICTION]);
+ if (cxip_lm_enabled &&
+ !(convert_to_zone_id(&thresh[MSM_THERM_CXIP_LM]))) {
+ /* To handle if temp > HIGH */
+ do_cxip_lm();
+ therm_set_threshold(&thresh[MSM_THERM_CXIP_LM]);
+ }
+
init_exit:
return;
}
@@ -6223,6 +6328,74 @@ fetch_mitig_exit:
return err;
}
+static void thermal_cxip_lm_disable(void)
+{
+ THERM_MITIGATION_DISABLE(cxip_lm_enabled, MSM_THERM_CXIP_LM);
+ cxip_lm_therm_vote_apply(false);
+}
+
+static int probe_cxip_lm(struct device_node *node,
+ struct msm_thermal_data *data,
+ struct platform_device *pdev)
+{
+ char *key = NULL;
+ int ret = 0;
+ u32 val = 0;
+
+ key = "qcom,cxip-lm-enable";
+ ret = of_property_read_u32(node, key, &val);
+ if (ret) {
+ cxip_lm_enabled = false;
+ return -EINVAL;
+ }
+ cxip_lm_enabled = val ? true : false;
+
+ cxip_lm_reg_base = devm_ioremap(&pdev->dev,
+ CXIP_LM_BASE_ADDRESS, CXIP_LM_ADDRESS_SIZE);
+ if (!cxip_lm_reg_base) {
+ pr_err("cxip_lm reg remap failed\n");
+ ret = -ENOMEM;
+ goto PROBE_CXIP_LM_EXIT;
+ }
+
+ /* If it is disable request, disable and exit */
+ if (!cxip_lm_enabled) {
+ writel_relaxed(CXIP_LM_DISABLE_VAL,
+ cxip_lm_reg_base + CXIP_LM_FEATURE_EN);
+ devm_ioremap_release(&pdev->dev, cxip_lm_reg_base);
+ return 0;
+ };
+
+ /* Set bypass clients bits */
+ writel_relaxed(CXIP_LM_BYPASS_VAL, cxip_lm_reg_base + CXIP_LM_BYPASS);
+
+ ret = sensor_mgr_init_threshold(&thresh[MSM_THERM_CXIP_LM],
+ CXIP_LM_THERM_SENS_ID, CXIP_LM_THERM_SENS_HIGH,
+ CXIP_LM_THERM_SENS_LOW, therm_cxip_lm_notify);
+ if (ret) {
+ pr_err("cxip_lm sensor init failed\n");
+ goto PROBE_CXIP_LM_EXIT;
+ }
+
+ snprintf(mit_config[MSM_THERM_CXIP_LM].config_name,
+ MAX_DEBUGFS_CONFIG_LEN, "cxip_lm");
+ mit_config[MSM_THERM_CXIP_LM].disable_config
+ = thermal_cxip_lm_disable;
+
+PROBE_CXIP_LM_EXIT:
+ if (ret) {
+ if (cxip_lm_reg_base)
+ devm_ioremap_release(&pdev->dev,
+ cxip_lm_reg_base);
+ dev_info(&pdev->dev,
+ "%s:Failed reading node=%s, key=%s err=%d. KTM continues\n",
+ __func__, node->full_name, key, ret);
+ cxip_lm_enabled = false;
+ }
+
+ return ret;
+}
+
static void probe_sensor_info(struct device_node *node,
struct msm_thermal_data *data, struct platform_device *pdev)
{
@@ -6991,6 +7164,19 @@ static void thermal_phase_ctrl_config_read(struct seq_file *m, void *data)
msm_thermal_info.gfx_sensor);
}
+static void thermal_cxip_lm_config_read(struct seq_file *m, void *data)
+{
+ if (cxip_lm_enabled) {
+ seq_puts(m, "\n-----CX IPEAK LM-----\n");
+ seq_printf(m, "threshold:%d degC\n",
+ CXIP_LM_THERM_SENS_HIGH);
+ seq_printf(m, "threshold clear:%d degC\n",
+ CXIP_LM_THERM_SENS_LOW);
+ seq_printf(m, "tsens sensor:tsens_tz_sensor%d\n",
+ CXIP_LM_THERM_SENS_ID);
+ }
+}
+
static void thermal_disable_all_mitigation(void)
{
thermal_cpu_freq_mit_disable();
@@ -7003,6 +7189,7 @@ static void thermal_disable_all_mitigation(void)
thermal_cx_phase_ctrl_mit_disable();
thermal_gfx_phase_warm_ctrl_mit_disable();
thermal_gfx_phase_crit_ctrl_mit_disable();
+ thermal_cxip_lm_disable();
}
static void enable_config(int config_id)
@@ -7029,6 +7216,9 @@ static void enable_config(int config_id)
case MSM_VDD_MX_RESTRICTION:
vdd_mx_enabled = 1;
break;
+ case MSM_THERM_CXIP_LM:
+ cxip_lm_enabled = 1;
+ break;
case MSM_LIST_MAX_NR + HOTPLUG_CONFIG:
hotplug_enabled = 1;
break;
@@ -7132,6 +7322,7 @@ static int thermal_config_debugfs_read(struct seq_file *m, void *data)
thermal_psm_config_read(m, data);
thermal_ocr_config_read(m, data);
thermal_phase_ctrl_config_read(m, data);
+ thermal_cxip_lm_config_read(m, data);
return 0;
}
@@ -7220,6 +7411,7 @@ static int msm_thermal_dev_probe(struct platform_device *pdev)
probe_cx_phase_ctrl(node, &data, pdev);
probe_gfx_phase_ctrl(node, &data, pdev);
probe_therm_reset(node, &data, pdev);
+ probe_cxip_lm(node, &data, pdev);
update_cpu_topology(&pdev->dev);
ret = fetch_cpu_mitigaiton_info(&data, pdev);
if (ret) {
@@ -7299,6 +7491,11 @@ static int msm_thermal_dev_exit(struct platform_device *inp_dev)
&thresh[MSM_VDD_MX_RESTRICTION]);
kfree(thresh[MSM_VDD_MX_RESTRICTION].thresh_list);
}
+ if (cxip_lm_enabled) {
+ sensor_mgr_remove_threshold(
+ &thresh[MSM_THERM_CXIP_LM]);
+ kfree(thresh[MSM_THERM_CXIP_LM].thresh_list);
+ }
kfree(thresh);
thresh = NULL;
}
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 5aae319198fa..2ac9b28e036f 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -178,6 +178,11 @@ static int dwc3_core_reset(struct dwc3 *dwc)
reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
reg &= ~DWC3_GUSB3PIPECTL_DELAYP1TRANS;
+
+ /* core exits U1/U2/U3 only in PHY power state P1/P2/P3 respectively */
+ if (dwc->revision <= DWC3_REVISION_310A)
+ reg |= DWC3_GUSB3PIPECTL_UX_EXIT_IN_PX;
+
dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
dwc3_notify_event(dwc, DWC3_CONTROLLER_RESET_EVENT, 0);
@@ -659,8 +664,10 @@ int dwc3_core_init(struct dwc3 *dwc)
/* Handle USB2.0-only core configuration */
if (DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3) ==
DWC3_GHWPARAMS3_SSPHY_IFC_DIS) {
- if (dwc->maximum_speed == USB_SPEED_SUPER)
- dwc->maximum_speed = USB_SPEED_HIGH;
+ if (dwc->max_hw_supp_speed == USB_SPEED_SUPER) {
+ dwc->max_hw_supp_speed = USB_SPEED_HIGH;
+ dwc->maximum_speed = dwc->max_hw_supp_speed;
+ }
}
ret = dwc3_core_reset(dwc);
@@ -1084,6 +1091,7 @@ static int dwc3_probe(struct platform_device *pdev)
hird_threshold = 12;
dwc->maximum_speed = usb_get_maximum_speed(dev);
+ dwc->max_hw_supp_speed = dwc->maximum_speed;
dwc->dr_mode = usb_get_dr_mode(dev);
dwc->has_lpm_erratum = device_property_read_bool(dev,
@@ -1157,6 +1165,7 @@ static int dwc3_probe(struct platform_device *pdev)
if (pdata) {
dwc->maximum_speed = pdata->maximum_speed;
+ dwc->max_hw_supp_speed = dwc->maximum_speed;
dwc->has_lpm_erratum = pdata->has_lpm_erratum;
if (pdata->lpm_nyet_threshold)
lpm_nyet_threshold = pdata->lpm_nyet_threshold;
@@ -1190,7 +1199,7 @@ static int dwc3_probe(struct platform_device *pdev)
/* default to superspeed if no maximum_speed passed */
if (dwc->maximum_speed == USB_SPEED_UNKNOWN)
- dwc->maximum_speed = USB_SPEED_SUPER;
+ dwc->max_hw_supp_speed = dwc->maximum_speed = USB_SPEED_SUPER;
dwc->lpm_nyet_threshold = lpm_nyet_threshold;
dwc->tx_de_emphasis = tx_de_emphasis;
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index c2cdfd1a823b..9de5e06430e1 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -213,6 +213,7 @@
/* Global USB3 PIPE Control Register */
#define DWC3_GUSB3PIPECTL_PHYSOFTRST (1 << 31)
#define DWC3_GUSB3PIPECTL_U2SSINP3OK (1 << 29)
+#define DWC3_GUSB3PIPECTL_UX_EXIT_IN_PX (1 << 27)
#define DWC3_GUSB3PIPECTL_REQP1P2P3 (1 << 24)
#define DWC3_GUSB3PIPECTL_DEP1P2P3(n) ((n) << 19)
#define DWC3_GUSB3PIPECTL_DEP1P2P3_MASK DWC3_GUSB3PIPECTL_DEP1P2P3(7)
@@ -757,7 +758,8 @@ struct dwc3_scratchpad_array {
* @nr_scratch: number of scratch buffers
* @num_event_buffers: calculated number of event buffers
* @u1u2: only used on revisions <1.83a for workaround
- * @maximum_speed: maximum speed requested (mainly for testing purposes)
+ * @maximum_speed: maximum speed to operate as requested by sw
+ * @max_hw_supp_speed: maximum speed supported by hw design
* @revision: revision register contents
* @dr_mode: requested mode of operation
* @usb2_phy: pointer to USB2 PHY
@@ -888,6 +890,7 @@ struct dwc3 {
u32 u1;
u32 u1u2;
u32 maximum_speed;
+ u32 max_hw_supp_speed;
/*
* All 3.1 IP version constants are greater than the 3.0 IP
@@ -917,6 +920,7 @@ struct dwc3 {
#define DWC3_REVISION_260A 0x5533260a
#define DWC3_REVISION_270A 0x5533270a
#define DWC3_REVISION_280A 0x5533280a
+#define DWC3_REVISION_310A 0x5533310a
/*
* NOTICE: we're using bit 31 as a "is usb 3.1" flag. This is really
@@ -1017,6 +1021,8 @@ struct dwc3 {
unsigned irq_event_count[MAX_INTR_STATS];
unsigned irq_dbg_index;
+ unsigned long l1_remote_wakeup_cnt;
+
wait_queue_head_t wait_linkstate;
};
diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c
index 4b4978043d50..068b03a35bd5 100644
--- a/drivers/usb/dwc3/debugfs.c
+++ b/drivers/usb/dwc3/debugfs.c
@@ -1180,9 +1180,12 @@ static int dwc3_gadget_int_events_show(struct seq_file *s, void *unused)
seq_printf(s, "%d\t", dwc->bh_completion_time[i]);
seq_putc(s, '\n');
- seq_printf(s, "t_pwr evt irq : %lld\t",
+ seq_printf(s, "t_pwr evt irq : %lld\n",
ktime_to_us(dwc->t_pwr_evt_irq));
+ seq_printf(s, "l1_remote_wakeup_cnt : %lu\n",
+ dwc->l1_remote_wakeup_cnt);
+
spin_unlock_irqrestore(&dwc->lock, flags);
return 0;
}
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index 1e252febc783..211e1945962c 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -2608,6 +2608,8 @@ static int dwc3_msm_id_notifier(struct notifier_block *nb,
speed = extcon_get_cable_state_(edev, EXTCON_USB_SPEED);
dwc->maximum_speed = (speed <= 0) ? USB_SPEED_HIGH : USB_SPEED_SUPER;
+ if (dwc->maximum_speed > dwc->max_hw_supp_speed)
+ dwc->maximum_speed = dwc->max_hw_supp_speed;
if (mdwc->id_state != id) {
mdwc->id_state = id;
@@ -2649,6 +2651,8 @@ static int dwc3_msm_vbus_notifier(struct notifier_block *nb,
speed = extcon_get_cable_state_(edev, EXTCON_USB_SPEED);
dwc->maximum_speed = (speed <= 0) ? USB_SPEED_HIGH : USB_SPEED_SUPER;
+ if (dwc->maximum_speed > dwc->max_hw_supp_speed)
+ dwc->maximum_speed = dwc->max_hw_supp_speed;
mdwc->vbus_active = event;
if (dwc->is_drd && !mdwc->in_restart) {
@@ -2750,6 +2754,39 @@ static ssize_t mode_store(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RW(mode);
+
+static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+ struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+
+ return snprintf(buf, PAGE_SIZE, "%s",
+ usb_speed_string(dwc->max_hw_supp_speed));
+}
+
+static ssize_t speed_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+ struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+ enum usb_device_speed req_speed = USB_SPEED_UNKNOWN;
+
+ if (sysfs_streq(buf, "high"))
+ req_speed = USB_SPEED_HIGH;
+ else if (sysfs_streq(buf, "super"))
+ req_speed = USB_SPEED_SUPER;
+
+ if (req_speed != USB_SPEED_UNKNOWN &&
+ req_speed != dwc->max_hw_supp_speed) {
+ dwc->maximum_speed = dwc->max_hw_supp_speed = req_speed;
+ schedule_work(&mdwc->restart_usb_work);
+ }
+
+ return count;
+}
+static DEVICE_ATTR_RW(speed);
+
static void msm_dwc3_perf_vote_work(struct work_struct *w);
static int dwc3_msm_probe(struct platform_device *pdev)
@@ -3093,6 +3130,7 @@ static int dwc3_msm_probe(struct platform_device *pdev)
}
device_create_file(&pdev->dev, &dev_attr_mode);
+ device_create_file(&pdev->dev, &dev_attr_speed);
host_mode = usb_get_dr_mode(&mdwc->dwc3->dev) == USB_DR_MODE_HOST;
if (!dwc->is_drd && host_mode) {
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 2b910e09a80a..9cd87513619c 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -236,6 +236,8 @@ int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
unsigned long flags;
int ret;
+ enum dwc3_link_state link_state;
+ u32 reg;
spin_lock_irqsave(&dwc->lock, flags);
if (!dep->endpoint.desc) {
@@ -252,6 +254,18 @@ int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
goto out;
}
+ /* if link stats is in L1 initiate remote wakeup before queuing req */
+ if (dwc->speed != DWC3_DSTS_SUPERSPEED) {
+ link_state = dwc3_get_link_state(dwc);
+ /* in HS this link state is same as L1 */
+ if (link_state == DWC3_LINK_STATE_U2) {
+ dwc->l1_remote_wakeup_cnt++;
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ reg |= DWC3_DCTL_ULSTCHNG_RECOVERY;
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+ }
+ }
+
dwc3_trace(trace_dwc3_ep0,
"queueing request %pK to %s length %d state '%s'",
request, dep->name, request->length,
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index e2440b7efc58..88350e61f3bd 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1317,13 +1317,6 @@ static int dwc3_gadget_wakeup(struct usb_gadget *g)
return 0;
}
-static inline enum dwc3_link_state dwc3_get_link_state(struct dwc3 *dwc)
-{
- u32 reg;
- reg = dwc3_readl(dwc->regs, DWC3_DSTS);
- return DWC3_DSTS_USBLNKST(reg);
-}
-
static bool dwc3_gadget_is_suspended(struct dwc3 *dwc)
{
if (atomic_read(&dwc->in_lpm) ||
diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h
index 3abd6379164e..a21962c8f513 100644
--- a/drivers/usb/dwc3/gadget.h
+++ b/drivers/usb/dwc3/gadget.h
@@ -84,6 +84,14 @@ static inline void dwc3_gadget_move_request_queued(struct dwc3_request *req)
list_move_tail(&req->list, &dep->req_queued);
}
+static inline enum dwc3_link_state dwc3_get_link_state(struct dwc3 *dwc)
+{
+ u32 reg;
+
+ reg = dwc3_readl(dwc->regs, DWC3_DSTS);
+ return DWC3_DSTS_USBLNKST(reg);
+}
+
void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
int status);
diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
index d03678a02185..7f1ae5cf9909 100644
--- a/drivers/usb/dwc3/host.c
+++ b/drivers/usb/dwc3/host.c
@@ -25,6 +25,7 @@ int dwc3_host_init(struct dwc3 *dwc)
struct platform_device *xhci;
struct usb_xhci_pdata pdata;
int ret;
+ struct device_node *node = dwc->dev->of_node;
xhci = platform_device_alloc("xhci-hcd", PLATFORM_DEVID_AUTO);
if (!xhci) {
@@ -52,6 +53,11 @@ int dwc3_host_init(struct dwc3 *dwc)
pdata.usb3_lpm_capable = dwc->usb3_lpm_capable;
+ ret = of_property_read_u32(node, "xhci-imod-value",
+ &pdata.imod_interval);
+ if (ret)
+ pdata.imod_interval = 0; /* use default xhci.c value */
+
ret = platform_device_add_data(xhci, &pdata, sizeof(pdata));
if (ret) {
dev_err(dwc->dev, "couldn't add platform data to xHCI device\n");
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index b826f926b205..0787e36742aa 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -229,6 +229,9 @@ config USB_F_CDEV
config USB_F_QDSS
tristate
+config USB_F_CCID
+ tristate
+
choice
tristate "USB Gadget Drivers"
default USB_ETH
@@ -562,6 +565,14 @@ config USB_CONFIGFS_F_QDSS
help
USB QDSS function driver to get hwtracing related data over USB.
+config USB_CONFIGFS_F_CCID
+ bool "USB CCID function"
+ select USB_F_CCID
+ depends on USB_CONFIGFS
+ help
+ USB CCID function driver creats transport layer between the
+ userspace CCID component and the Windows Host.
+
source "drivers/usb/gadget/legacy/Kconfig"
endchoice
diff --git a/drivers/usb/gadget/function/Makefile b/drivers/usb/gadget/function/Makefile
index 511909fb78f6..7a64b24b8bf6 100644
--- a/drivers/usb/gadget/function/Makefile
+++ b/drivers/usb/gadget/function/Makefile
@@ -64,3 +64,5 @@ usb_f_qcrndis-y := f_qc_rndis.o u_data_ipa.o
obj-$(CONFIG_USB_F_QCRNDIS) += usb_f_qcrndis.o
usb_f_rmnet_bam-y := f_rmnet.o u_ctrl_qti.o
obj-$(CONFIG_USB_F_RMNET_BAM) += usb_f_rmnet_bam.o
+usb_f_ccid-y := f_ccid.o
+obj-$(CONFIG_USB_F_CCID) += usb_f_ccid.o
diff --git a/drivers/usb/gadget/function/f_ccid.c b/drivers/usb/gadget/function/f_ccid.c
new file mode 100644
index 000000000000..28ac8d0010d8
--- /dev/null
+++ b/drivers/usb/gadget/function/f_ccid.c
@@ -0,0 +1,1105 @@
+/*
+ * f_ccid.c -- CCID function Driver
+ *
+ * Copyright (c) 2011, 2013, 2017 The Linux Foundation. All rights reserved.
+
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details
+ */
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/usb/ccid_desc.h>
+#include <linux/usb/composite.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+
+#include "f_ccid.h"
+
+#define BULK_IN_BUFFER_SIZE sizeof(struct ccid_bulk_in_header)
+#define BULK_OUT_BUFFER_SIZE sizeof(struct ccid_bulk_out_header)
+#define CTRL_BUF_SIZE 4
+#define FUNCTION_NAME "ccid"
+#define MAX_INST_NAME_LEN 40
+#define CCID_CTRL_DEV_NAME "ccid_ctrl"
+#define CCID_BULK_DEV_NAME "ccid_bulk"
+#define CCID_NOTIFY_INTERVAL 5
+#define CCID_NOTIFY_MAXPACKET 4
+
+/* number of tx requests to allocate */
+#define TX_REQ_MAX 4
+
+struct ccid_ctrl_dev {
+ atomic_t opened;
+ struct list_head tx_q;
+ wait_queue_head_t tx_wait_q;
+ unsigned char buf[CTRL_BUF_SIZE];
+ int tx_ctrl_done;
+ struct miscdevice ccid_ctrl_device;
+};
+
+struct ccid_bulk_dev {
+ atomic_t error;
+ atomic_t opened;
+ atomic_t rx_req_busy;
+ wait_queue_head_t read_wq;
+ wait_queue_head_t write_wq;
+ struct usb_request *rx_req;
+ int rx_done;
+ struct list_head tx_idle;
+ struct miscdevice ccid_bulk_device;
+};
+
+struct ccid_opts {
+ struct usb_function_instance func_inst;
+ struct f_ccid *ccid;
+};
+
+struct f_ccid {
+ struct usb_function function;
+ int ifc_id;
+ spinlock_t lock;
+ atomic_t online;
+ /* usb eps*/
+ struct usb_ep *notify;
+ struct usb_ep *in;
+ struct usb_ep *out;
+ struct usb_request *notify_req;
+ struct ccid_ctrl_dev ctrl_dev;
+ struct ccid_bulk_dev bulk_dev;
+ int dtr_state;
+};
+
+static inline struct f_ccid *ctrl_dev_to_ccid(struct ccid_ctrl_dev *d)
+{
+ return container_of(d, struct f_ccid, ctrl_dev);
+}
+
+static inline struct f_ccid *bulk_dev_to_ccid(struct ccid_bulk_dev *d)
+{
+ return container_of(d, struct f_ccid, bulk_dev);
+}
+
+/* Interface Descriptor: */
+static struct usb_interface_descriptor ccid_interface_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bNumEndpoints = 3,
+ .bInterfaceClass = USB_CLASS_CSCID,
+ .bInterfaceSubClass = 0,
+ .bInterfaceProtocol = 0,
+};
+/* CCID Class Descriptor */
+static struct usb_ccid_class_descriptor ccid_class_desc = {
+ .bLength = sizeof(ccid_class_desc),
+ .bDescriptorType = CCID_DECRIPTOR_TYPE,
+ .bcdCCID = CCID1_10,
+ .bMaxSlotIndex = 0,
+ /* This value indicates what voltages the CCID can supply to slots */
+ .bVoltageSupport = VOLTS_3_0,
+ .dwProtocols = PROTOCOL_TO,
+ /* Default ICC clock frequency in KHz */
+ .dwDefaultClock = 3580,
+ /* Maximum supported ICC clock frequency in KHz */
+ .dwMaximumClock = 3580,
+ .bNumClockSupported = 0,
+ /* Default ICC I/O data rate in bps */
+ .dwDataRate = 9600,
+ /* Maximum supported ICC I/O data rate in bps */
+ .dwMaxDataRate = 9600,
+ .bNumDataRatesSupported = 0,
+ .dwMaxIFSD = 0,
+ .dwSynchProtocols = 0,
+ .dwMechanical = 0,
+ /* This value indicates what intelligent features the CCID has */
+ .dwFeatures = CCID_FEATURES_EXC_SAPDU |
+ CCID_FEATURES_AUTO_PNEGO |
+ CCID_FEATURES_AUTO_BAUD |
+ CCID_FEATURES_AUTO_CLOCK |
+ CCID_FEATURES_AUTO_VOLT |
+ CCID_FEATURES_AUTO_ACTIV |
+ CCID_FEATURES_AUTO_PCONF,
+ /* extended APDU level Message Length */
+ .dwMaxCCIDMessageLength = 0x200,
+ .bClassGetResponse = 0x0,
+ .bClassEnvelope = 0x0,
+ .wLcdLayout = 0,
+ .bPINSupport = 0,
+ .bMaxCCIDBusySlots = 1
+};
+/* Full speed support: */
+static struct usb_endpoint_descriptor ccid_fs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(CCID_NOTIFY_MAXPACKET),
+ .bInterval = 1 << CCID_NOTIFY_INTERVAL,
+};
+
+static struct usb_endpoint_descriptor ccid_fs_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(64),
+};
+
+static struct usb_endpoint_descriptor ccid_fs_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(64),
+};
+
+static struct usb_descriptor_header *ccid_fs_descs[] = {
+ (struct usb_descriptor_header *) &ccid_interface_desc,
+ (struct usb_descriptor_header *) &ccid_class_desc,
+ (struct usb_descriptor_header *) &ccid_fs_notify_desc,
+ (struct usb_descriptor_header *) &ccid_fs_in_desc,
+ (struct usb_descriptor_header *) &ccid_fs_out_desc,
+ NULL,
+};
+
+/* High speed support: */
+static struct usb_endpoint_descriptor ccid_hs_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(CCID_NOTIFY_MAXPACKET),
+ .bInterval = CCID_NOTIFY_INTERVAL + 4,
+};
+
+static struct usb_endpoint_descriptor ccid_hs_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor ccid_hs_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *ccid_hs_descs[] = {
+ (struct usb_descriptor_header *) &ccid_interface_desc,
+ (struct usb_descriptor_header *) &ccid_class_desc,
+ (struct usb_descriptor_header *) &ccid_hs_notify_desc,
+ (struct usb_descriptor_header *) &ccid_hs_in_desc,
+ (struct usb_descriptor_header *) &ccid_hs_out_desc,
+ NULL,
+};
+
+static inline struct f_ccid *func_to_ccid(struct usb_function *f)
+{
+ return container_of(f, struct f_ccid, function);
+}
+
+static void ccid_req_put(struct f_ccid *ccid_dev, struct list_head *head,
+ struct usb_request *req)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ccid_dev->lock, flags);
+ list_add_tail(&req->list, head);
+ spin_unlock_irqrestore(&ccid_dev->lock, flags);
+}
+
+static struct usb_request *ccid_req_get(struct f_ccid *ccid_dev,
+ struct list_head *head)
+{
+ unsigned long flags;
+ struct usb_request *req = NULL;
+
+ spin_lock_irqsave(&ccid_dev->lock, flags);
+ if (!list_empty(head)) {
+ req = list_first_entry(head, struct usb_request, list);
+ list_del(&req->list);
+ }
+ spin_unlock_irqrestore(&ccid_dev->lock, flags);
+ return req;
+}
+
+static void ccid_notify_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ switch (req->status) {
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ case 0:
+ break;
+ default:
+ pr_err("CCID notify ep error %d\n", req->status);
+ }
+}
+
+static void ccid_bulk_complete_in(struct usb_ep *ep, struct usb_request *req)
+{
+ struct f_ccid *ccid_dev = req->context;
+ struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev;
+
+ if (req->status != 0)
+ atomic_set(&bulk_dev->error, 1);
+
+ ccid_req_put(ccid_dev, &bulk_dev->tx_idle, req);
+ wake_up(&bulk_dev->write_wq);
+}
+
+static void ccid_bulk_complete_out(struct usb_ep *ep, struct usb_request *req)
+{
+ struct f_ccid *ccid_dev = req->context;
+ struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev;
+ if (req->status != 0)
+ atomic_set(&bulk_dev->error, 1);
+
+ bulk_dev->rx_done = 1;
+ wake_up(&bulk_dev->read_wq);
+}
+
+static struct usb_request *
+ccid_request_alloc(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags)
+{
+ struct usb_request *req;
+
+ req = usb_ep_alloc_request(ep, kmalloc_flags);
+
+ if (req != NULL) {
+ req->length = len;
+ req->buf = kmalloc(len, kmalloc_flags);
+ if (req->buf == NULL) {
+ usb_ep_free_request(ep, req);
+ req = NULL;
+ }
+ }
+
+ return req ? req : ERR_PTR(-ENOMEM);
+}
+
+static void ccid_request_free(struct usb_request *req, struct usb_ep *ep)
+{
+ if (req) {
+ kfree(req->buf);
+ usb_ep_free_request(ep, req);
+ }
+}
+
+static int
+ccid_function_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+ struct f_ccid *ccid_dev = container_of(f, struct f_ccid, function);
+ struct ccid_ctrl_dev *ctrl_dev = &ccid_dev->ctrl_dev;
+ struct usb_composite_dev *cdev = f->config->cdev;
+ struct usb_request *req = cdev->req;
+ int ret = -EOPNOTSUPP;
+ u16 w_index = le16_to_cpu(ctrl->wIndex);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u16 w_length = le16_to_cpu(ctrl->wLength);
+
+ if (!atomic_read(&ccid_dev->online))
+ return -ENOTCONN;
+
+ switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+
+ case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | CCIDGENERICREQ_ABORT:
+ if (w_length != 0)
+ goto invalid;
+ ctrl_dev->buf[0] = CCIDGENERICREQ_ABORT;
+ ctrl_dev->buf[1] = w_value & 0xFF;
+ ctrl_dev->buf[2] = (w_value >> 8) & 0xFF;
+ ctrl_dev->buf[3] = 0x00;
+ ctrl_dev->tx_ctrl_done = 1;
+ wake_up(&ctrl_dev->tx_wait_q);
+ ret = 0;
+ break;
+
+ case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | CCIDGENERICREQ_GET_CLOCK_FREQUENCIES:
+ *(u32 *) req->buf =
+ cpu_to_le32(ccid_class_desc.dwDefaultClock);
+ ret = min_t(u32, w_length,
+ sizeof(ccid_class_desc.dwDefaultClock));
+ break;
+
+ case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+ | CCIDGENERICREQ_GET_DATA_RATES:
+ *(u32 *) req->buf = cpu_to_le32(ccid_class_desc.dwDataRate);
+ ret = min_t(u32, w_length, sizeof(ccid_class_desc.dwDataRate));
+ break;
+
+ default:
+invalid:
+ pr_debug("invalid control req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ }
+
+ /* respond with data transfer or status phase? */
+ if (ret >= 0) {
+ pr_debug("ccid req%02x.%02x v%04x i%04x l%d\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+ req->length = ret;
+ ret = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+ if (ret < 0)
+ pr_err("ccid ep0 enqueue err %d\n", ret);
+ }
+
+ return ret;
+}
+
+static void ccid_function_disable(struct usb_function *f)
+{
+ struct f_ccid *ccid_dev = func_to_ccid(f);
+ struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev;
+ struct ccid_ctrl_dev *ctrl_dev = &ccid_dev->ctrl_dev;
+ struct usb_request *req;
+
+ /* Disable endpoints */
+ usb_ep_disable(ccid_dev->notify);
+ usb_ep_disable(ccid_dev->in);
+ usb_ep_disable(ccid_dev->out);
+ /* Free endpoint related requests */
+ ccid_request_free(ccid_dev->notify_req, ccid_dev->notify);
+ if (!atomic_read(&bulk_dev->rx_req_busy))
+ ccid_request_free(bulk_dev->rx_req, ccid_dev->out);
+ while ((req = ccid_req_get(ccid_dev, &bulk_dev->tx_idle)))
+ ccid_request_free(req, ccid_dev->in);
+
+ ccid_dev->dtr_state = 0;
+ atomic_set(&ccid_dev->online, 0);
+ /* Wake up threads */
+ wake_up(&bulk_dev->write_wq);
+ wake_up(&bulk_dev->read_wq);
+ wake_up(&ctrl_dev->tx_wait_q);
+
+}
+
+static int
+ccid_function_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+ struct f_ccid *ccid_dev = func_to_ccid(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+ struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev;
+ struct usb_request *req;
+ int ret = 0;
+ int i;
+
+ ccid_dev->notify_req = ccid_request_alloc(ccid_dev->notify,
+ sizeof(struct usb_ccid_notification), GFP_ATOMIC);
+ if (IS_ERR(ccid_dev->notify_req)) {
+ pr_err("%s: unable to allocate memory for notify req\n",
+ __func__);
+ return PTR_ERR(ccid_dev->notify_req);
+ }
+ ccid_dev->notify_req->complete = ccid_notify_complete;
+ ccid_dev->notify_req->context = ccid_dev;
+
+ /* now allocate requests for our endpoints */
+ req = ccid_request_alloc(ccid_dev->out, (unsigned)BULK_OUT_BUFFER_SIZE,
+ GFP_ATOMIC);
+ if (IS_ERR(req)) {
+ pr_err("%s: unable to allocate memory for out req\n",
+ __func__);
+ ret = PTR_ERR(req);
+ goto free_notify;
+ }
+ req->complete = ccid_bulk_complete_out;
+ req->context = ccid_dev;
+ bulk_dev->rx_req = req;
+
+ for (i = 0; i < TX_REQ_MAX; i++) {
+ req = ccid_request_alloc(ccid_dev->in,
+ (unsigned)BULK_IN_BUFFER_SIZE,
+ GFP_ATOMIC);
+ if (IS_ERR(req)) {
+ pr_err("%s: unable to allocate memory for in req\n",
+ __func__);
+ ret = PTR_ERR(req);
+ goto free_bulk_out;
+ }
+ req->complete = ccid_bulk_complete_in;
+ req->context = ccid_dev;
+ ccid_req_put(ccid_dev, &bulk_dev->tx_idle, req);
+ }
+
+ /* choose the descriptors and enable endpoints */
+ ret = config_ep_by_speed(cdev->gadget, f, ccid_dev->notify);
+ if (ret) {
+ ccid_dev->notify->desc = NULL;
+ pr_err("%s: config_ep_by_speed failed for ep#%s, err#%d\n",
+ __func__, ccid_dev->notify->name, ret);
+ goto free_bulk_in;
+ }
+ ret = usb_ep_enable(ccid_dev->notify);
+ if (ret) {
+ pr_err("%s: usb ep#%s enable failed, err#%d\n",
+ __func__, ccid_dev->notify->name, ret);
+ goto free_bulk_in;
+ }
+ ccid_dev->notify->driver_data = ccid_dev;
+
+ ret = config_ep_by_speed(cdev->gadget, f, ccid_dev->in);
+ if (ret) {
+ ccid_dev->in->desc = NULL;
+ pr_err("%s: config_ep_by_speed failed for ep#%s, err#%d\n",
+ __func__, ccid_dev->in->name, ret);
+ goto disable_ep_notify;
+ }
+ ret = usb_ep_enable(ccid_dev->in);
+ if (ret) {
+ pr_err("%s: usb ep#%s enable failed, err#%d\n",
+ __func__, ccid_dev->in->name, ret);
+ goto disable_ep_notify;
+ }
+
+ ret = config_ep_by_speed(cdev->gadget, f, ccid_dev->out);
+ if (ret) {
+ ccid_dev->out->desc = NULL;
+ pr_err("%s: config_ep_by_speed failed for ep#%s, err#%d\n",
+ __func__, ccid_dev->out->name, ret);
+ goto disable_ep_in;
+ }
+ ret = usb_ep_enable(ccid_dev->out);
+ if (ret) {
+ pr_err("%s: usb ep#%s enable failed, err#%d\n",
+ __func__, ccid_dev->out->name, ret);
+ goto disable_ep_in;
+ }
+ ccid_dev->dtr_state = 1;
+ atomic_set(&ccid_dev->online, 1);
+ return ret;
+
+disable_ep_in:
+ usb_ep_disable(ccid_dev->in);
+disable_ep_notify:
+ usb_ep_disable(ccid_dev->notify);
+ ccid_dev->notify->driver_data = NULL;
+free_bulk_in:
+ while ((req = ccid_req_get(ccid_dev, &bulk_dev->tx_idle)))
+ ccid_request_free(req, ccid_dev->in);
+free_bulk_out:
+ ccid_request_free(bulk_dev->rx_req, ccid_dev->out);
+free_notify:
+ ccid_request_free(ccid_dev->notify_req, ccid_dev->notify);
+ return ret;
+}
+
+static void ccid_function_unbind(struct usb_configuration *c,
+ struct usb_function *f)
+{
+ if (gadget_is_dualspeed(c->cdev->gadget))
+ usb_free_descriptors(f->hs_descriptors);
+ usb_free_descriptors(f->fs_descriptors);
+
+}
+
+static int ccid_function_bind(struct usb_configuration *c,
+ struct usb_function *f)
+{
+ struct f_ccid *ccid_dev = func_to_ccid(f);
+ struct usb_ep *ep;
+ struct usb_composite_dev *cdev = c->cdev;
+ int ret = -ENODEV;
+
+ ccid_dev->ifc_id = usb_interface_id(c, f);
+ if (ccid_dev->ifc_id < 0) {
+ pr_err("%s: unable to allocate ifc id, err:%d",
+ __func__, ccid_dev->ifc_id);
+ return ccid_dev->ifc_id;
+ }
+ ccid_interface_desc.bInterfaceNumber = ccid_dev->ifc_id;
+
+ ep = usb_ep_autoconfig(cdev->gadget, &ccid_fs_notify_desc);
+ if (!ep) {
+ pr_err("%s: usb epnotify autoconfig failed\n", __func__);
+ return -ENODEV;
+ }
+ ccid_dev->notify = ep;
+ ep->driver_data = cdev;
+
+ ep = usb_ep_autoconfig(cdev->gadget, &ccid_fs_in_desc);
+ if (!ep) {
+ pr_err("%s: usb epin autoconfig failed\n", __func__);
+ ret = -ENODEV;
+ goto ep_auto_in_fail;
+ }
+ ccid_dev->in = ep;
+ ep->driver_data = cdev;
+
+ ep = usb_ep_autoconfig(cdev->gadget, &ccid_fs_out_desc);
+ if (!ep) {
+ pr_err("%s: usb epout autoconfig failed\n", __func__);
+ ret = -ENODEV;
+ goto ep_auto_out_fail;
+ }
+ ccid_dev->out = ep;
+ ep->driver_data = cdev;
+
+ f->fs_descriptors = usb_copy_descriptors(ccid_fs_descs);
+ if (!f->fs_descriptors)
+ goto ep_auto_out_fail;
+
+ if (gadget_is_dualspeed(cdev->gadget)) {
+ ccid_hs_in_desc.bEndpointAddress =
+ ccid_fs_in_desc.bEndpointAddress;
+ ccid_hs_out_desc.bEndpointAddress =
+ ccid_fs_out_desc.bEndpointAddress;
+ ccid_hs_notify_desc.bEndpointAddress =
+ ccid_fs_notify_desc.bEndpointAddress;
+
+ /* copy descriptors, and track endpoint copies */
+ f->hs_descriptors = usb_copy_descriptors(ccid_hs_descs);
+ if (!f->hs_descriptors)
+ goto ep_auto_out_fail;
+ }
+
+ pr_debug("%s: CCID %s Speed, IN:%s OUT:%s\n", __func__,
+ gadget_is_dualspeed(cdev->gadget) ? "dual" : "full",
+ ccid_dev->in->name, ccid_dev->out->name);
+
+ return 0;
+
+ep_auto_out_fail:
+ ccid_dev->out->driver_data = NULL;
+ ccid_dev->out = NULL;
+ep_auto_in_fail:
+ ccid_dev->in->driver_data = NULL;
+ ccid_dev->in = NULL;
+
+ return ret;
+}
+
+static int ccid_bulk_open(struct inode *ip, struct file *fp)
+{
+ struct ccid_bulk_dev *bulk_dev = container_of(fp->private_data,
+ struct ccid_bulk_dev,
+ ccid_bulk_device);
+ struct f_ccid *ccid_dev = bulk_dev_to_ccid(bulk_dev);
+ unsigned long flags;
+
+ pr_debug("ccid_bulk_open\n");
+ if (!atomic_read(&ccid_dev->online)) {
+ pr_debug("%s: USB cable not connected\n", __func__);
+ return -ENODEV;
+ }
+
+ if (atomic_read(&bulk_dev->opened)) {
+ pr_debug("%s: bulk device is already opened\n", __func__);
+ return -EBUSY;
+ }
+ atomic_set(&bulk_dev->opened, 1);
+ /* clear the error latch */
+ atomic_set(&bulk_dev->error, 0);
+ spin_lock_irqsave(&ccid_dev->lock, flags);
+ fp->private_data = ccid_dev;
+ spin_unlock_irqrestore(&ccid_dev->lock, flags);
+
+ return 0;
+}
+
+static int ccid_bulk_release(struct inode *ip, struct file *fp)
+{
+ struct f_ccid *ccid_dev = fp->private_data;
+ struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev;
+
+ pr_debug("ccid_bulk_release\n");
+ atomic_set(&bulk_dev->opened, 0);
+ return 0;
+}
+
+static ssize_t ccid_bulk_read(struct file *fp, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct f_ccid *ccid_dev = fp->private_data;
+ struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev;
+ struct usb_request *req;
+ int r = count, xfer;
+ int ret;
+ unsigned long flags;
+
+ pr_debug("ccid_bulk_read(%zu)\n", count);
+
+ if (count > BULK_OUT_BUFFER_SIZE) {
+ pr_err("%s: max_buffer_size:%zu given_pkt_size:%zu\n",
+ __func__, BULK_OUT_BUFFER_SIZE, count);
+ return -ENOMEM;
+ }
+
+ if (atomic_read(&bulk_dev->error)) {
+ r = -EIO;
+ pr_err("%s bulk_dev_error\n", __func__);
+ goto done;
+ }
+
+requeue_req:
+ spin_lock_irqsave(&ccid_dev->lock, flags);
+ if (!atomic_read(&ccid_dev->online)) {
+ pr_debug("%s: USB cable not connected\n", __func__);
+ return -ENODEV;
+ }
+ /* queue a request */
+ req = bulk_dev->rx_req;
+ req->length = count;
+ bulk_dev->rx_done = 0;
+ spin_unlock_irqrestore(&ccid_dev->lock, flags);
+ ret = usb_ep_queue(ccid_dev->out, req, GFP_KERNEL);
+ if (ret < 0) {
+ r = -EIO;
+ pr_err("%s usb ep queue failed\n", __func__);
+ atomic_set(&bulk_dev->error, 1);
+ goto done;
+ }
+ /* wait for a request to complete */
+ ret = wait_event_interruptible(bulk_dev->read_wq, bulk_dev->rx_done ||
+ atomic_read(&bulk_dev->error) ||
+ !atomic_read(&ccid_dev->online));
+ if (ret < 0) {
+ atomic_set(&bulk_dev->error, 1);
+ r = ret;
+ usb_ep_dequeue(ccid_dev->out, req);
+ goto done;
+ }
+ if (!atomic_read(&bulk_dev->error)) {
+ spin_lock_irqsave(&ccid_dev->lock, flags);
+ if (!atomic_read(&ccid_dev->online)) {
+ spin_unlock_irqrestore(&ccid_dev->lock, flags);
+ pr_debug("%s: USB cable not connected\n", __func__);
+ r = -ENODEV;
+ goto done;
+ }
+ /* If we got a 0-len packet, throw it back and try again. */
+ if (req->actual == 0) {
+ spin_unlock_irqrestore(&ccid_dev->lock, flags);
+ goto requeue_req;
+ }
+ xfer = (req->actual < count) ? req->actual : count;
+ atomic_set(&bulk_dev->rx_req_busy, 1);
+ spin_unlock_irqrestore(&ccid_dev->lock, flags);
+
+ if (copy_to_user(buf, req->buf, xfer))
+ r = -EFAULT;
+
+ spin_lock_irqsave(&ccid_dev->lock, flags);
+ atomic_set(&bulk_dev->rx_req_busy, 0);
+ if (!atomic_read(&ccid_dev->online)) {
+ ccid_request_free(bulk_dev->rx_req, ccid_dev->out);
+ spin_unlock_irqrestore(&ccid_dev->lock, flags);
+ pr_debug("%s: USB cable not connected\n", __func__);
+ r = -ENODEV;
+ goto done;
+ } else {
+ r = xfer;
+ }
+ spin_unlock_irqrestore(&ccid_dev->lock, flags);
+ } else {
+ r = -EIO;
+ }
+done:
+ pr_debug("ccid_bulk_read returning %d\n", r);
+ return r;
+}
+
+static ssize_t ccid_bulk_write(struct file *fp, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct f_ccid *ccid_dev = fp->private_data;
+ struct ccid_bulk_dev *bulk_dev = &ccid_dev->bulk_dev;
+ struct usb_request *req = 0;
+ int r = count;
+ int ret;
+ unsigned long flags;
+
+ pr_debug("ccid_bulk_write(%zu)\n", count);
+
+ if (!atomic_read(&ccid_dev->online)) {
+ pr_debug("%s: USB cable not connected\n", __func__);
+ return -ENODEV;
+ }
+
+ if (!count) {
+ pr_err("%s: zero length ctrl pkt\n", __func__);
+ return -ENODEV;
+ }
+ if (count > BULK_IN_BUFFER_SIZE) {
+ pr_err("%s: max_buffer_size:%zu given_pkt_size:%zu\n",
+ __func__, BULK_IN_BUFFER_SIZE, count);
+ return -ENOMEM;
+ }
+
+
+ /* get an idle tx request to use */
+ ret = wait_event_interruptible(bulk_dev->write_wq,
+ ((req = ccid_req_get(ccid_dev, &bulk_dev->tx_idle)) ||
+ atomic_read(&bulk_dev->error)));
+
+ if (ret < 0) {
+ r = ret;
+ goto done;
+ }
+
+ if (atomic_read(&bulk_dev->error)) {
+ pr_err(" %s dev->error\n", __func__);
+ r = -EIO;
+ goto done;
+ }
+ if (copy_from_user(req->buf, buf, count)) {
+ if (!atomic_read(&ccid_dev->online)) {
+ pr_debug("%s: USB cable not connected\n",
+ __func__);
+ ccid_request_free(req, ccid_dev->in);
+ r = -ENODEV;
+ } else {
+ ccid_req_put(ccid_dev, &bulk_dev->tx_idle, req);
+ r = -EFAULT;
+ }
+ goto done;
+ }
+ req->length = count;
+ ret = usb_ep_queue(ccid_dev->in, req, GFP_KERNEL);
+ if (ret < 0) {
+ pr_debug("ccid_bulk_write: xfer error %d\n", ret);
+ atomic_set(&bulk_dev->error, 1);
+ ccid_req_put(ccid_dev, &bulk_dev->tx_idle, req);
+ r = -EIO;
+ spin_lock_irqsave(&ccid_dev->lock, flags);
+ if (!atomic_read(&ccid_dev->online)) {
+ spin_unlock_irqrestore(&ccid_dev->lock, flags);
+ pr_debug("%s: USB cable not connected\n",
+ __func__);
+ while ((req = ccid_req_get(ccid_dev,
+ &bulk_dev->tx_idle)))
+ ccid_request_free(req, ccid_dev->in);
+ r = -ENODEV;
+ }
+ spin_unlock_irqrestore(&ccid_dev->lock, flags);
+ goto done;
+ }
+done:
+ pr_debug("ccid_bulk_write returning %d\n", r);
+ return r;
+}
+
+static const struct file_operations ccid_bulk_fops = {
+ .owner = THIS_MODULE,
+ .read = ccid_bulk_read,
+ .write = ccid_bulk_write,
+ .open = ccid_bulk_open,
+ .release = ccid_bulk_release,
+};
+
+static int ccid_bulk_device_init(struct f_ccid *dev)
+{
+ int ret;
+ struct ccid_bulk_dev *bulk_dev = &dev->bulk_dev;
+
+ init_waitqueue_head(&bulk_dev->read_wq);
+ init_waitqueue_head(&bulk_dev->write_wq);
+ INIT_LIST_HEAD(&bulk_dev->tx_idle);
+
+ bulk_dev->ccid_bulk_device.name = CCID_BULK_DEV_NAME;
+ bulk_dev->ccid_bulk_device.fops = &ccid_bulk_fops;
+ bulk_dev->ccid_bulk_device.minor = MISC_DYNAMIC_MINOR;
+
+ ret = misc_register(&bulk_dev->ccid_bulk_device);
+ if (ret) {
+ pr_err("%s: failed to register misc device\n", __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ccid_ctrl_open(struct inode *inode, struct file *fp)
+{
+ struct ccid_ctrl_dev *ctrl_dev = container_of(fp->private_data,
+ struct ccid_ctrl_dev,
+ ccid_ctrl_device);
+ struct f_ccid *ccid_dev = ctrl_dev_to_ccid(ctrl_dev);
+ unsigned long flags;
+
+ if (!atomic_read(&ccid_dev->online)) {
+ pr_debug("%s: USB cable not connected\n", __func__);
+ return -ENODEV;
+ }
+ if (atomic_read(&ctrl_dev->opened)) {
+ pr_debug("%s: ctrl device is already opened\n", __func__);
+ return -EBUSY;
+ }
+ atomic_set(&ctrl_dev->opened, 1);
+ spin_lock_irqsave(&ccid_dev->lock, flags);
+ fp->private_data = ccid_dev;
+ spin_unlock_irqrestore(&ccid_dev->lock, flags);
+
+ return 0;
+}
+
+
+static int ccid_ctrl_release(struct inode *inode, struct file *fp)
+{
+ struct f_ccid *ccid_dev = fp->private_data;
+ struct ccid_ctrl_dev *ctrl_dev = &ccid_dev->ctrl_dev;
+
+ atomic_set(&ctrl_dev->opened, 0);
+
+ return 0;
+}
+
+static ssize_t ccid_ctrl_read(struct file *fp, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct f_ccid *ccid_dev = fp->private_data;
+ struct ccid_ctrl_dev *ctrl_dev = &ccid_dev->ctrl_dev;
+ int ret = 0;
+
+ if (!atomic_read(&ccid_dev->online)) {
+ pr_debug("%s: USB cable not connected\n", __func__);
+ return -ENODEV;
+ }
+ if (count > CTRL_BUF_SIZE)
+ count = CTRL_BUF_SIZE;
+
+ ret = wait_event_interruptible(ctrl_dev->tx_wait_q,
+ ctrl_dev->tx_ctrl_done);
+ if (ret < 0)
+ return ret;
+ ctrl_dev->tx_ctrl_done = 0;
+
+ if (!atomic_read(&ccid_dev->online)) {
+ pr_debug("%s: USB cable not connected\n", __func__);
+ return -ENODEV;
+ }
+ ret = copy_to_user(buf, ctrl_dev->buf, count);
+ if (ret)
+ return -EFAULT;
+
+ return count;
+}
+
+static long
+ccid_ctrl_ioctl(struct file *fp, unsigned cmd, u_long arg)
+{
+ struct f_ccid *ccid_dev = fp->private_data;
+ struct usb_request *req = ccid_dev->notify_req;
+ struct usb_ccid_notification *ccid_notify = req->buf;
+ void __user *argp = (void __user *)arg;
+ int ret = 0;
+
+ switch (cmd) {
+ case CCID_NOTIFY_CARD:
+ if (copy_from_user(ccid_notify, argp,
+ sizeof(struct usb_ccid_notification)))
+ return -EFAULT;
+ req->length = 2;
+ break;
+ case CCID_NOTIFY_HWERROR:
+ if (copy_from_user(ccid_notify, argp,
+ sizeof(struct usb_ccid_notification)))
+ return -EFAULT;
+ req->length = 4;
+ break;
+ case CCID_READ_DTR:
+ if (copy_to_user((int *)arg, &ccid_dev->dtr_state, sizeof(int)))
+ return -EFAULT;
+ return 0;
+ }
+ ret = usb_ep_queue(ccid_dev->notify, ccid_dev->notify_req, GFP_KERNEL);
+ if (ret < 0) {
+ pr_err("ccid notify ep enqueue error %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static const struct file_operations ccid_ctrl_fops = {
+ .owner = THIS_MODULE,
+ .open = ccid_ctrl_open,
+ .release = ccid_ctrl_release,
+ .read = ccid_ctrl_read,
+ .unlocked_ioctl = ccid_ctrl_ioctl,
+};
+
+static int ccid_ctrl_device_init(struct f_ccid *dev)
+{
+ int ret;
+ struct ccid_ctrl_dev *ctrl_dev = &dev->ctrl_dev;
+
+ INIT_LIST_HEAD(&ctrl_dev->tx_q);
+ init_waitqueue_head(&ctrl_dev->tx_wait_q);
+
+ ctrl_dev->ccid_ctrl_device.name = CCID_CTRL_DEV_NAME;
+ ctrl_dev->ccid_ctrl_device.fops = &ccid_ctrl_fops;
+ ctrl_dev->ccid_ctrl_device.minor = MISC_DYNAMIC_MINOR;
+
+ ret = misc_register(&ctrl_dev->ccid_ctrl_device);
+ if (ret) {
+ pr_err("%s: failed to register misc device\n", __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ccid_free_func(struct usb_function *f)
+{
+ pr_debug("%s\n", __func__);
+}
+
+static int ccid_bind_config(struct f_ccid *ccid_dev)
+{
+ pr_debug("ccid_bind_config\n");
+
+ ccid_dev->function.name = FUNCTION_NAME;
+ ccid_dev->function.fs_descriptors = ccid_fs_descs;
+ ccid_dev->function.hs_descriptors = ccid_hs_descs;
+ ccid_dev->function.bind = ccid_function_bind;
+ ccid_dev->function.unbind = ccid_function_unbind;
+ ccid_dev->function.set_alt = ccid_function_set_alt;
+ ccid_dev->function.setup = ccid_function_setup;
+ ccid_dev->function.disable = ccid_function_disable;
+ ccid_dev->function.free_func = ccid_free_func;
+
+ return 0;
+}
+
+static struct f_ccid *ccid_setup(void)
+{
+ struct f_ccid *ccid_dev;
+ int ret;
+
+ ccid_dev = kzalloc(sizeof(*ccid_dev), GFP_KERNEL);
+ if (!ccid_dev) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ spin_lock_init(&ccid_dev->lock);
+
+ ret = ccid_ctrl_device_init(ccid_dev);
+ if (ret) {
+ pr_err("%s: ccid_ctrl_device_init failed, err:%d\n",
+ __func__, ret);
+ goto err_ctrl_init;
+ }
+ ret = ccid_bulk_device_init(ccid_dev);
+ if (ret) {
+ pr_err("%s: ccid_bulk_device_init failed, err:%d\n",
+ __func__, ret);
+ goto err_bulk_init;
+ }
+
+ return ccid_dev;
+err_bulk_init:
+ misc_deregister(&ccid_dev->ctrl_dev.ccid_ctrl_device);
+err_ctrl_init:
+ kfree(ccid_dev);
+error:
+ pr_err("ccid gadget driver failed to initialize\n");
+ return ERR_PTR(ret);
+}
+
+static inline struct ccid_opts *to_ccid_opts(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct ccid_opts,
+ func_inst.group);
+}
+
+static void ccid_attr_release(struct config_item *item)
+{
+ struct ccid_opts *opts = to_ccid_opts(item);
+
+ usb_put_function_instance(&opts->func_inst);
+}
+
+static struct configfs_item_operations ccid_item_ops = {
+ .release = ccid_attr_release,
+};
+
+static struct config_item_type ccid_func_type = {
+ .ct_item_ops = &ccid_item_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static int ccid_set_inst_name(struct usb_function_instance *fi,
+ const char *name)
+{
+ int name_len;
+ struct f_ccid *ccid;
+ struct ccid_opts *opts = container_of(fi, struct ccid_opts, func_inst);
+
+ name_len = strlen(name) + 1;
+ if (name_len > MAX_INST_NAME_LEN)
+ return -ENAMETOOLONG;
+
+ ccid = ccid_setup();
+ if (IS_ERR(ccid))
+ return PTR_ERR(ccid);
+
+ opts->ccid = ccid;
+
+ return 0;
+}
+
+static void ccid_free_inst(struct usb_function_instance *f)
+{
+ struct ccid_opts *opts = container_of(f, struct ccid_opts, func_inst);
+
+ if (!opts->ccid)
+ return;
+
+ misc_deregister(&opts->ccid->ctrl_dev.ccid_ctrl_device);
+ misc_deregister(&opts->ccid->bulk_dev.ccid_bulk_device);
+
+ kfree(opts->ccid);
+ kfree(opts);
+}
+
+
+static struct usb_function_instance *ccid_alloc_inst(void)
+{
+ struct ccid_opts *opts;
+
+ opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+ if (!opts)
+ return ERR_PTR(-ENOMEM);
+
+ opts->func_inst.set_inst_name = ccid_set_inst_name;
+ opts->func_inst.free_func_inst = ccid_free_inst;
+ config_group_init_type_name(&opts->func_inst.group, "",
+ &ccid_func_type);
+
+ return &opts->func_inst;
+}
+
+static struct usb_function *ccid_alloc(struct usb_function_instance *fi)
+{
+ struct ccid_opts *opts;
+ int ret;
+
+ opts = container_of(fi, struct ccid_opts, func_inst);
+
+ ret = ccid_bind_config(opts->ccid);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return &opts->ccid->function;
+}
+
+DECLARE_USB_FUNCTION_INIT(ccid, ccid_alloc_inst, ccid_alloc);
+MODULE_DESCRIPTION("USB CCID function Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/gadget/function/f_ccid.h b/drivers/usb/gadget/function/f_ccid.h
new file mode 100644
index 000000000000..42a7ebbbccfc
--- /dev/null
+++ b/drivers/usb/gadget/function/f_ccid.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2011, 2017 The Linux Foundation. All rights reserved.
+
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details
+ */
+
+#ifndef __F_CCID_H
+#define __F_CCID_H
+
+#define PROTOCOL_TO 0x01
+#define PROTOCOL_T1 0x02
+#define ABDATA_SIZE 512
+
+/* define for dwFeatures for Smart Card Device Class Descriptors */
+/* No special characteristics */
+#define CCID_FEATURES_NADA 0x00000000
+/* Automatic parameter configuration based on ATR data */
+#define CCID_FEATURES_AUTO_PCONF 0x00000002
+/* Automatic activation of ICC on inserting */
+#define CCID_FEATURES_AUTO_ACTIV 0x00000004
+/* Automatic ICC voltage selection */
+#define CCID_FEATURES_AUTO_VOLT 0x00000008
+/* Automatic ICC clock frequency change */
+#define CCID_FEATURES_AUTO_CLOCK 0x00000010
+/* Automatic baud rate change */
+#define CCID_FEATURES_AUTO_BAUD 0x00000020
+/*Automatic parameters negotiation made by the CCID */
+#define CCID_FEATURES_AUTO_PNEGO 0x00000040
+/* Automatic PPS made by the CCID according to the active parameters */
+#define CCID_FEATURES_AUTO_PPS 0x00000080
+/* CCID can set ICC in clock stop mode */
+#define CCID_FEATURES_ICCSTOP 0x00000100
+/* NAD value other than 00 accepted (T=1 protocol in use) */
+#define CCID_FEATURES_NAD 0x00000200
+/* Automatic IFSD exchange as first exchange (T=1 protocol in use) */
+#define CCID_FEATURES_AUTO_IFSD 0x00000400
+/* TPDU level exchanges with CCID */
+#define CCID_FEATURES_EXC_TPDU 0x00010000
+/* Short APDU level exchange with CCID */
+#define CCID_FEATURES_EXC_SAPDU 0x00020000
+/* Short and Extended APDU level exchange with CCID */
+#define CCID_FEATURES_EXC_APDU 0x00040000
+/* USB Wake up signaling supported on card insertion and removal */
+#define CCID_FEATURES_WAKEUP 0x00100000
+
+#define CCID_NOTIFY_CARD _IOW('C', 1, struct usb_ccid_notification)
+#define CCID_NOTIFY_HWERROR _IOW('C', 2, struct usb_ccid_notification)
+#define CCID_READ_DTR _IOR('C', 3, int)
+
+struct usb_ccid_notification {
+ unsigned char buf[4];
+} __packed;
+
+struct ccid_bulk_in_header {
+ unsigned char bMessageType;
+ unsigned long wLength;
+ unsigned char bSlot;
+ unsigned char bSeq;
+ unsigned char bStatus;
+ unsigned char bError;
+ unsigned char bSpecific;
+ unsigned char abData[ABDATA_SIZE];
+ unsigned char bSizeToSend;
+} __packed;
+
+struct ccid_bulk_out_header {
+ unsigned char bMessageType;
+ unsigned long wLength;
+ unsigned char bSlot;
+ unsigned char bSeq;
+ unsigned char bSpecific_0;
+ unsigned char bSpecific_1;
+ unsigned char bSpecific_2;
+ unsigned char APDU[ABDATA_SIZE];
+} __packed;
+#endif
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 1221a80e0bdc..05d96fd8c07c 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -138,6 +138,8 @@ static int xhci_plat_probe(struct platform_device *pdev)
struct clk *clk;
int ret;
int irq;
+ u32 temp, imod;
+ unsigned long flags;
if (usb_disabled())
return -ENODEV;
@@ -256,6 +258,18 @@ static int xhci_plat_probe(struct platform_device *pdev)
device_wakeup_enable(&xhci->shared_hcd->self.root_hub->dev);
+ /* override imod interval if specified */
+ if (pdata && pdata->imod_interval) {
+ imod = pdata->imod_interval & ER_IRQ_INTERVAL_MASK;
+ spin_lock_irqsave(&xhci->lock, flags);
+ temp = readl_relaxed(&xhci->ir_set->irq_control);
+ temp &= ~ER_IRQ_INTERVAL_MASK;
+ temp |= imod;
+ writel_relaxed(temp, &xhci->ir_set->irq_control);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ dev_dbg(&pdev->dev, "%s: imod set to %u\n", __func__, imod);
+ }
+
ret = device_create_file(&pdev->dev, &dev_attr_config_imod);
if (ret)
dev_err(&pdev->dev, "%s: unable to create imod sysfs entry\n",
diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c
index d0ee0c9d6430..ed33743c9062 100644
--- a/drivers/usb/pd/policy_engine.c
+++ b/drivers/usb/pd/policy_engine.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -187,18 +187,23 @@ static void *usbpd_ipc_log;
#define PD_MAX_MSG_ID 7
-#define PD_MSG_HDR(type, dr, pr, id, cnt) \
- (((type) & 0xF) | ((dr) << 5) | (1 << 6) | \
+#define PD_MSG_HDR(type, dr, pr, id, cnt, rev) \
+ (((type) & 0xF) | ((dr) << 5) | (rev << 6) | \
((pr) << 8) | ((id) << 9) | ((cnt) << 12))
#define PD_MSG_HDR_COUNT(hdr) (((hdr) >> 12) & 7)
#define PD_MSG_HDR_TYPE(hdr) ((hdr) & 0xF)
#define PD_MSG_HDR_ID(hdr) (((hdr) >> 9) & 7)
+#define PD_MSG_HDR_REV(hdr) (((hdr) >> 6) & 3)
#define PD_RDO_FIXED(obj, gb, mismatch, usb_comm, no_usb_susp, curr1, curr2) \
(((obj) << 28) | ((gb) << 27) | ((mismatch) << 26) | \
((usb_comm) << 25) | ((no_usb_susp) << 24) | \
((curr1) << 10) | (curr2))
+#define PD_RDO_AUGMENTED(obj, mismatch, usb_comm, no_usb_susp, volt, curr) \
+ (((obj) << 28) | ((mismatch) << 26) | ((usb_comm) << 25) | \
+ ((no_usb_susp) << 24) | ((volt) << 9) | (curr))
+
#define PD_RDO_OBJ_POS(rdo) ((rdo) >> 28 & 7)
#define PD_RDO_GIVEBACK(rdo) ((rdo) >> 27 & 1)
#define PD_RDO_MISMATCH(rdo) ((rdo) >> 26 & 1)
@@ -206,11 +211,14 @@ static void *usbpd_ipc_log;
#define PD_RDO_NO_USB_SUSP(rdo) ((rdo) >> 24 & 1)
#define PD_RDO_FIXED_CURR(rdo) ((rdo) >> 10 & 0x3FF)
#define PD_RDO_FIXED_CURR_MINMAX(rdo) ((rdo) & 0x3FF)
+#define PD_RDO_PROG_VOLTAGE(rdo) ((rdo) >> 9 & 0x7FF)
+#define PD_RDO_PROG_CURR(rdo) ((rdo) & 0x7F)
#define PD_SRC_PDO_TYPE(pdo) (((pdo) >> 30) & 3)
#define PD_SRC_PDO_TYPE_FIXED 0
#define PD_SRC_PDO_TYPE_BATTERY 1
#define PD_SRC_PDO_TYPE_VARIABLE 2
+#define PD_SRC_PDO_TYPE_AUGMENTED 3
#define PD_SRC_PDO_FIXED_PR_SWAP(pdo) (((pdo) >> 29) & 1)
#define PD_SRC_PDO_FIXED_USB_SUSP(pdo) (((pdo) >> 28) & 1)
@@ -225,6 +233,11 @@ static void *usbpd_ipc_log;
#define PD_SRC_PDO_VAR_BATT_MIN_VOLT(pdo) (((pdo) >> 10) & 0x3FF)
#define PD_SRC_PDO_VAR_BATT_MAX(pdo) ((pdo) & 0x3FF)
+#define PD_APDO_PPS(pdo) (((pdo) >> 28) & 3)
+#define PD_APDO_MAX_VOLT(pdo) (((pdo) >> 17) & 0xFF)
+#define PD_APDO_MIN_VOLT(pdo) (((pdo) >> 8) & 0xFF)
+#define PD_APDO_MAX_CURR(pdo) ((pdo) & 0x7F)
+
/* Vendor Defined Messages */
#define MAX_CRC_RECEIVE_TIME 9 /* ~(2 * tReceive_max(1.1ms) * # retry 4) */
#define MAX_VDM_RESPONSE_TIME 60 /* 2 * tVDMSenderResponse_max(30ms) */
@@ -253,6 +266,9 @@ static void *usbpd_ipc_log;
#define ID_HDR_VID 0x05c6 /* qcom */
#define PROD_VDO_PID 0x0a00 /* TBD */
+static bool check_vsafe0v = true;
+module_param(check_vsafe0v, bool, S_IRUSR | S_IWUSR);
+
static int min_sink_current = 900;
module_param(min_sink_current, int, S_IRUSR | S_IWUSR);
@@ -310,6 +326,7 @@ struct usbpd {
enum power_supply_typec_power_role forced_pr;
bool vbus_present;
+ enum pd_spec_rev spec_rev;
enum data_role current_dr;
enum power_role current_pr;
bool in_pr_swap;
@@ -448,7 +465,7 @@ static int pd_send_msg(struct usbpd *pd, u8 hdr_type, const u32 *data,
u16 hdr;
hdr = PD_MSG_HDR(hdr_type, pd->current_dr, pd->current_pr,
- pd->tx_msgid, num_data);
+ pd->tx_msgid, num_data, pd->spec_rev);
ret = pd_phy_write(hdr, (u8 *)data, num_data * sizeof(u32), type, 15);
/* TODO figure out timeout. based on tReceive=1.1ms x nRetryCount? */
@@ -462,31 +479,48 @@ static int pd_send_msg(struct usbpd *pd, u8 hdr_type, const u32 *data,
return 0;
}
-static int pd_select_pdo(struct usbpd *pd, int pdo_pos)
+static int pd_select_pdo(struct usbpd *pd, int pdo_pos, int uv, int ua)
{
int curr;
int max_current;
bool mismatch = false;
+ u8 type;
u32 pdo = pd->received_pdos[pdo_pos - 1];
- /* TODO: handle variable/battery types */
- if (PD_SRC_PDO_TYPE(pdo) != PD_SRC_PDO_TYPE_FIXED) {
- usbpd_err(&pd->dev, "Non-fixed PDOs currently unsupported\n");
- return -ENOTSUPP;
- }
+ type = PD_SRC_PDO_TYPE(pdo);
+ if (type == PD_SRC_PDO_TYPE_FIXED) {
+ curr = max_current = PD_SRC_PDO_FIXED_MAX_CURR(pdo) * 10;
- curr = max_current = PD_SRC_PDO_FIXED_MAX_CURR(pdo) * 10;
+ /*
+ * Check if the PDO has enough current, otherwise set the
+ * Capability Mismatch flag
+ */
+ if (curr < min_sink_current) {
+ mismatch = true;
+ max_current = min_sink_current;
+ }
- /*
- * Check if the PDO has enough current, otherwise set the
- * Capability Mismatch flag
- */
- if (curr < min_sink_current) {
- mismatch = true;
- max_current = min_sink_current;
- }
+ pd->requested_voltage =
+ PD_SRC_PDO_FIXED_VOLTAGE(pdo) * 50 * 1000;
+ pd->rdo = PD_RDO_FIXED(pdo_pos, 0, mismatch, 1, 1, curr / 10,
+ max_current / 10);
+ } else if (type == PD_SRC_PDO_TYPE_AUGMENTED) {
+ if ((uv / 100000) > PD_APDO_MAX_VOLT(pdo) ||
+ (uv / 100000) < PD_APDO_MIN_VOLT(pdo) ||
+ (ua / 50000) > PD_APDO_MAX_CURR(pdo) || (ua < 0)) {
+ usbpd_err(&pd->dev, "uv (%d) and ua (%d) out of range of APDO\n",
+ uv, ua);
+ return -EINVAL;
+ }
- pd->requested_voltage = PD_SRC_PDO_FIXED_VOLTAGE(pdo) * 50 * 1000;
+ curr = ua / 1000;
+ pd->requested_voltage = uv;
+ pd->rdo = PD_RDO_AUGMENTED(pdo_pos, mismatch, 1, 1,
+ uv / 20000, ua / 50000);
+ } else {
+ usbpd_err(&pd->dev, "Only Fixed or Programmable PDOs supported\n");
+ return -ENOTSUPP;
+ }
/* Can't sink more than 5V if VCONN is sourced from the VBUS input */
if (pd->vconn_enabled && !pd->vconn_is_external &&
@@ -495,8 +529,6 @@ static int pd_select_pdo(struct usbpd *pd, int pdo_pos)
pd->requested_current = curr;
pd->requested_pdo = pdo_pos;
- pd->rdo = PD_RDO_FIXED(pdo_pos, 0, mismatch, 1, 1, curr / 10,
- max_current / 10);
return 0;
}
@@ -520,7 +552,7 @@ static int pd_eval_src_caps(struct usbpd *pd)
POWER_SUPPLY_PROP_PD_USB_SUSPEND_SUPPORTED, &val);
/* Select the first PDO (vSafe5V) immediately. */
- pd_select_pdo(pd, 1);
+ pd_select_pdo(pd, 1, 0, 0);
return 0;
}
@@ -609,6 +641,12 @@ static void phy_msg_received(struct usbpd *pd, enum pd_msg_type type,
return;
}
+ /* if spec rev differs (i.e. is older), update PHY */
+ if (PD_MSG_HDR_REV(header) < pd->spec_rev) {
+ pd->spec_rev = PD_MSG_HDR_REV(header);
+ pd_phy_update_spec_rev(pd->spec_rev);
+ }
+
rx_msg = kzalloc(sizeof(*rx_msg), GFP_KERNEL);
if (!rx_msg)
return;
@@ -650,7 +688,8 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state)
.msg_rx_cb = phy_msg_received,
.shutdown_cb = phy_shutdown,
.frame_filter_val = FRAME_FILTER_EN_SOP |
- FRAME_FILTER_EN_HARD_RESET
+ FRAME_FILTER_EN_HARD_RESET,
+ .spec_rev = USBPD_REV_20,
};
union power_supply_propval val = {0};
unsigned long flags;
@@ -688,6 +727,8 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state)
power_supply_set_property(pd->usb_psy,
POWER_SUPPLY_PROP_TYPEC_POWER_ROLE, &val);
+ /* support only PD 2.0 as a source */
+ pd->spec_rev = USBPD_REV_20;
pd_reset_protocol(pd);
if (!pd->in_pr_swap) {
@@ -698,6 +739,7 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state)
phy_params.data_role = pd->current_dr;
phy_params.power_role = pd->current_pr;
+ phy_params.spec_rev = pd->spec_rev;
ret = pd_phy_open(&phy_params);
if (ret) {
@@ -709,6 +751,8 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state)
}
pd->pd_phy_opened = true;
+ } else {
+ pd_phy_update_spec_rev(pd->spec_rev);
}
pd->current_state = PE_SRC_SEND_CAPABILITIES;
@@ -840,6 +884,11 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state)
if (!val.intval)
break;
+ /*
+ * support up to PD 3.0 as a sink; if source is 2.0,
+ * phy_msg_received() will handle the downgrade.
+ */
+ pd->spec_rev = USBPD_REV_30;
pd_reset_protocol(pd);
if (!pd->in_pr_swap) {
@@ -850,6 +899,7 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state)
phy_params.data_role = pd->current_dr;
phy_params.power_role = pd->current_pr;
+ phy_params.spec_rev = pd->spec_rev;
ret = pd_phy_open(&phy_params);
if (ret) {
@@ -861,6 +911,8 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state)
}
pd->pd_phy_opened = true;
+ } else {
+ pd_phy_update_spec_rev(pd->spec_rev);
}
pd->current_voltage = pd->requested_voltage = 5000000;
@@ -1390,6 +1442,41 @@ static void vconn_swap(struct usbpd *pd)
}
}
+static int enable_vbus(struct usbpd *pd)
+{
+ union power_supply_propval val = {0};
+ int count = 100;
+ int ret;
+
+ if (!check_vsafe0v)
+ goto enable_reg;
+
+ /*
+ * Check to make sure there's no lingering charge on
+ * VBUS before enabling it as a source. If so poll here
+ * until it goes below VSafe0V (0.8V) before proceeding.
+ */
+ while (count--) {
+ ret = power_supply_get_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW, &val);
+ if (ret || val.intval <= 800000)
+ break;
+ usleep_range(20000, 30000);
+ }
+
+ if (count < 99)
+ msleep(100); /* need to wait an additional tCCDebounce */
+
+enable_reg:
+ ret = regulator_enable(pd->vbus);
+ if (ret)
+ usbpd_err(&pd->dev, "Unable to enable vbus (%d)\n", ret);
+ else
+ pd->vbus_enabled = true;
+
+ return ret;
+}
+
static inline void rx_msg_cleanup(struct usbpd *pd)
{
struct rx_msg *msg, *tmp;
@@ -1403,6 +1490,15 @@ static inline void rx_msg_cleanup(struct usbpd *pd)
spin_unlock_irqrestore(&pd->rx_lock, flags);
}
+/* For PD 3.0, check SinkTxOk before allowing initiating AMS */
+static inline bool is_sink_tx_ok(struct usbpd *pd)
+{
+ if (pd->spec_rev == USBPD_REV_30)
+ return pd->typec_mode == POWER_SUPPLY_TYPEC_SOURCE_HIGH;
+
+ return true;
+}
+
/* Handles current state and determines transitions */
static void usbpd_sm(struct work_struct *w)
{
@@ -1541,12 +1637,7 @@ static void usbpd_sm(struct work_struct *w)
if (pd->current_pr == PR_SINK) {
usbpd_set_state(pd, PE_SNK_STARTUP);
} else if (pd->current_pr == PR_SRC) {
- ret = regulator_enable(pd->vbus);
- if (ret)
- usbpd_err(&pd->dev, "Unable to enable vbus\n");
- else
- pd->vbus_enabled = true;
-
+ enable_vbus(pd);
if (!pd->vconn_enabled &&
pd->typec_mode ==
POWER_SUPPLY_TYPEC_SINK_POWERED_CABLE) {
@@ -1718,11 +1809,7 @@ static void usbpd_sm(struct work_struct *w)
msleep(SRC_RECOVER_TIME);
pd->vbus_enabled = false;
- ret = regulator_enable(pd->vbus);
- if (ret)
- usbpd_err(&pd->dev, "Unable to enable vbus\n");
- else
- pd->vbus_enabled = true;
+ enable_vbus(pd);
if (pd->vconn_enabled) {
ret = regulator_enable(pd->vconn);
@@ -1981,7 +2068,7 @@ static void usbpd_sm(struct work_struct *w)
vconn_swap(pd);
} else if (IS_DATA(rx_msg, MSG_VDM)) {
handle_vdm_rx(pd, rx_msg);
- } else if (pd->send_pr_swap) {
+ } else if (pd->send_pr_swap && is_sink_tx_ok(pd)) {
pd->send_pr_swap = false;
ret = pd_send_msg(pd, MSG_PR_SWAP, NULL, 0, SOP_MSG);
if (ret) {
@@ -1992,7 +2079,7 @@ static void usbpd_sm(struct work_struct *w)
pd->current_state = PE_PRS_SNK_SRC_SEND_SWAP;
kick_sm(pd, SENDER_RESPONSE_TIME);
- } else if (pd->send_dr_swap) {
+ } else if (pd->send_dr_swap && is_sink_tx_ok(pd)) {
pd->send_dr_swap = false;
ret = pd_send_msg(pd, MSG_DR_SWAP, NULL, 0, SOP_MSG);
if (ret) {
@@ -2003,7 +2090,7 @@ static void usbpd_sm(struct work_struct *w)
pd->current_state = PE_DRS_SEND_DR_SWAP;
kick_sm(pd, SENDER_RESPONSE_TIME);
- } else {
+ } else if (is_sink_tx_ok(pd)) {
handle_vdm_tx(pd);
}
break;
@@ -2145,12 +2232,7 @@ static void usbpd_sm(struct work_struct *w)
/* fall-through */
case PE_PRS_SNK_SRC_SOURCE_ON:
- ret = regulator_enable(pd->vbus);
- if (ret)
- usbpd_err(&pd->dev, "Unable to enable vbus\n");
- else
- pd->vbus_enabled = true;
-
+ enable_vbus(pd);
msleep(200); /* allow time VBUS ramp-up, must be < tNewSrc */
ret = pd_send_msg(pd, MSG_PS_RDY, NULL, 0, SOP_MSG);
@@ -2302,6 +2384,12 @@ static int psy_changed(struct notifier_block *nb, unsigned long evt, void *ptr)
usbpd_info(&pd->dev, "Type-C Source (%s) connected\n",
src_current(typec_mode));
+ /* if waiting for SinkTxOk to start an AMS */
+ if (pd->spec_rev == USBPD_REV_30 &&
+ typec_mode == POWER_SUPPLY_TYPEC_SOURCE_HIGH &&
+ (pd->send_pr_swap || pd->send_dr_swap || pd->vdm_tx))
+ break;
+
if (pd->current_pr == PR_SINK)
return 0;
@@ -2439,6 +2527,12 @@ static int usbpd_dr_set_property(struct dual_role_phy_instance *dual_role,
return -EAGAIN;
}
+ if (pd->current_state == PE_SNK_READY &&
+ !is_sink_tx_ok(pd)) {
+ usbpd_err(&pd->dev, "Rp indicates SinkTxNG\n");
+ return -EAGAIN;
+ }
+
reinit_completion(&pd->swap_complete);
pd->send_dr_swap = true;
kick_sm(pd, 0);
@@ -2484,6 +2578,12 @@ static int usbpd_dr_set_property(struct dual_role_phy_instance *dual_role,
return -EAGAIN;
}
+ if (pd->current_state == PE_SNK_READY &&
+ !is_sink_tx_ok(pd)) {
+ usbpd_err(&pd->dev, "Rp indicates SinkTxNG\n");
+ return -EAGAIN;
+ }
+
reinit_completion(&pd->swap_complete);
pd->send_pr_swap = true;
kick_sm(pd, 0);
@@ -2683,18 +2783,27 @@ static ssize_t pdo_h_show(struct device *dev, struct device_attribute *attr,
"\tMax Voltage:%d (mV)\n"
"\tMin Voltage:%d (mV)\n"
"\tMax Power:%d (mW)\n",
- PD_SRC_PDO_VAR_BATT_MAX_VOLT(pdo),
- PD_SRC_PDO_VAR_BATT_MIN_VOLT(pdo),
- PD_SRC_PDO_VAR_BATT_MAX(pdo));
+ PD_SRC_PDO_VAR_BATT_MAX_VOLT(pdo) * 50,
+ PD_SRC_PDO_VAR_BATT_MIN_VOLT(pdo) * 50,
+ PD_SRC_PDO_VAR_BATT_MAX(pdo) * 250);
} else if (PD_SRC_PDO_TYPE(pdo) == PD_SRC_PDO_TYPE_VARIABLE) {
cnt += scnprintf(&buf[cnt], PAGE_SIZE - cnt,
"\tVariable supply\n"
"\tMax Voltage:%d (mV)\n"
"\tMin Voltage:%d (mV)\n"
"\tMax Current:%d (mA)\n",
- PD_SRC_PDO_VAR_BATT_MAX_VOLT(pdo),
- PD_SRC_PDO_VAR_BATT_MIN_VOLT(pdo),
- PD_SRC_PDO_VAR_BATT_MAX(pdo));
+ PD_SRC_PDO_VAR_BATT_MAX_VOLT(pdo) * 50,
+ PD_SRC_PDO_VAR_BATT_MIN_VOLT(pdo) * 50,
+ PD_SRC_PDO_VAR_BATT_MAX(pdo) * 10);
+ } else if (PD_SRC_PDO_TYPE(pdo) == PD_SRC_PDO_TYPE_AUGMENTED) {
+ cnt += scnprintf(&buf[cnt], PAGE_SIZE - cnt,
+ "\tProgrammable Power supply\n"
+ "\tMax Voltage:%d (mV)\n"
+ "\tMin Voltage:%d (mV)\n"
+ "\tMax Current:%d (mA)\n",
+ PD_APDO_MAX_VOLT(pdo) * 100,
+ PD_APDO_MIN_VOLT(pdo) * 100,
+ PD_APDO_MAX_CURR(pdo) * 50);
} else {
cnt += scnprintf(&buf[cnt], PAGE_SIZE - cnt,
"Invalid PDO\n");
@@ -2745,17 +2854,18 @@ static ssize_t select_pdo_store(struct device *dev,
{
struct usbpd *pd = dev_get_drvdata(dev);
int src_cap_id;
- int pdo;
+ int pdo, uv = 0, ua = 0;
int ret;
/* Only allowed if we are already in explicit sink contract */
- if (pd->current_state != PE_SNK_READY) {
+ if (pd->current_state != PE_SNK_READY || !is_sink_tx_ok(pd)) {
usbpd_err(&pd->dev, "select_pdo: Cannot select new PDO yet\n");
return -EBUSY;
}
- if (sscanf(buf, "%d %d\n", &src_cap_id, &pdo) != 2) {
- usbpd_err(&pd->dev, "select_pdo: Must specify <src cap id> <PDO>\n");
+ ret = sscanf(buf, "%d %d %d %d", &src_cap_id, &pdo, &uv, &ua);
+ if (ret != 2 && ret != 4) {
+ usbpd_err(&pd->dev, "select_pdo: Must specify <src cap id> <PDO> [<uV> <uA>]\n");
return -EINVAL;
}
@@ -2770,7 +2880,7 @@ static ssize_t select_pdo_store(struct device *dev,
return -EINVAL;
}
- ret = pd_select_pdo(pd, pdo);
+ ret = pd_select_pdo(pd, pdo, uv, ua);
if (ret)
return ret;
@@ -2793,27 +2903,66 @@ static ssize_t rdo_show(struct device *dev, struct device_attribute *attr,
{
struct usbpd *pd = dev_get_drvdata(dev);
- return scnprintf(buf, PAGE_SIZE, "Request Data Object: %08x\n\n"
- "Obj Pos:%d\n"
- "Giveback:%d\n"
- "Capability Mismatch:%d\n"
- "USB Communications Capable:%d\n"
- "No USB Suspend:%d\n"
- "Operating Current/Power:%d (mA) / %d (mW)\n"
- "%s Current/Power:%d (mA) / %d (mW)\n",
- pd->rdo,
+ /* dump the RDO as a hex string */
+ return snprintf(buf, PAGE_SIZE, "%08x\n", pd->rdo);
+}
+static DEVICE_ATTR_RO(rdo);
+
+static ssize_t rdo_h_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct usbpd *pd = dev_get_drvdata(dev);
+ int pos = PD_RDO_OBJ_POS(pd->rdo);
+ int type = PD_SRC_PDO_TYPE(pd->received_pdos[pos]);
+ int len;
+
+ len = scnprintf(buf, PAGE_SIZE, "Request Data Object\n"
+ "\tObj Pos:%d\n"
+ "\tGiveback:%d\n"
+ "\tCapability Mismatch:%d\n"
+ "\tUSB Communications Capable:%d\n"
+ "\tNo USB Suspend:%d\n",
PD_RDO_OBJ_POS(pd->rdo),
PD_RDO_GIVEBACK(pd->rdo),
PD_RDO_MISMATCH(pd->rdo),
PD_RDO_USB_COMM(pd->rdo),
- PD_RDO_NO_USB_SUSP(pd->rdo),
- PD_RDO_FIXED_CURR(pd->rdo) * 10,
- PD_RDO_FIXED_CURR(pd->rdo) * 250,
- PD_RDO_GIVEBACK(pd->rdo) ? "Min" : "Max",
- PD_RDO_FIXED_CURR_MINMAX(pd->rdo) * 10,
- PD_RDO_FIXED_CURR_MINMAX(pd->rdo) * 250);
+ PD_RDO_NO_USB_SUSP(pd->rdo));
+
+ switch (type) {
+ case PD_SRC_PDO_TYPE_FIXED:
+ case PD_SRC_PDO_TYPE_VARIABLE:
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "(Fixed/Variable)\n"
+ "\tOperating Current:%d (mA)\n"
+ "\t%s Current:%d (mA)\n",
+ PD_RDO_FIXED_CURR(pd->rdo) * 10,
+ PD_RDO_GIVEBACK(pd->rdo) ? "Min" : "Max",
+ PD_RDO_FIXED_CURR_MINMAX(pd->rdo) * 10);
+ break;
+
+ case PD_SRC_PDO_TYPE_BATTERY:
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "(Battery)\n"
+ "\tOperating Power:%d (mW)\n"
+ "\t%s Power:%d (mW)\n",
+ PD_RDO_FIXED_CURR(pd->rdo) * 250,
+ PD_RDO_GIVEBACK(pd->rdo) ? "Min" : "Max",
+ PD_RDO_FIXED_CURR_MINMAX(pd->rdo) * 250);
+ break;
+
+ case PD_SRC_PDO_TYPE_AUGMENTED:
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "(Programmable)\n"
+ "\tOutput Voltage:%d (mV)\n"
+ "\tOperating Current:%d (mA)\n",
+ PD_RDO_PROG_VOLTAGE(pd->rdo) * 20,
+ PD_RDO_PROG_CURR(pd->rdo) * 50);
+ break;
+ }
+
+ return len;
}
-static DEVICE_ATTR_RO(rdo);
+static DEVICE_ATTR_RO(rdo_h);
static ssize_t hard_reset_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
@@ -2849,6 +2998,7 @@ static struct attribute *usbpd_attrs[] = {
&dev_attr_pdos[6].attr,
&dev_attr_select_pdo.attr,
&dev_attr_rdo.attr,
+ &dev_attr_rdo_h.attr,
&dev_attr_hard_reset.attr,
NULL,
};
diff --git a/drivers/usb/pd/qpnp-pdphy.c b/drivers/usb/pd/qpnp-pdphy.c
index 4ecc24c6be11..63fad28fa721 100644
--- a/drivers/usb/pd/qpnp-pdphy.c
+++ b/drivers/usb/pd/qpnp-pdphy.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -34,6 +34,7 @@
#define USB_PDPHY_MSG_CONFIG 0x40
#define MSG_CONFIG_PORT_DATA_ROLE BIT(3)
#define MSG_CONFIG_PORT_POWER_ROLE BIT(2)
+#define MSG_CONFIG_SPEC_REV_MASK (BIT(1) | BIT(0))
#define USB_PDPHY_EN_CONTROL 0x46
#define CONTROL_ENABLE BIT(0)
@@ -331,6 +332,16 @@ int pd_phy_update_roles(enum data_role dr, enum power_role pr)
((dr == DR_DFP ? MSG_CONFIG_PORT_DATA_ROLE : 0) |
(pr == PR_SRC ? MSG_CONFIG_PORT_POWER_ROLE : 0)));
}
+EXPORT_SYMBOL(pd_phy_update_roles);
+
+int pd_phy_update_spec_rev(enum pd_spec_rev rev)
+{
+ struct usb_pdphy *pdphy = __pdphy;
+
+ return pdphy_masked_write(pdphy, USB_PDPHY_MSG_CONFIG,
+ MSG_CONFIG_SPEC_REV_MASK, rev);
+}
+EXPORT_SYMBOL(pd_phy_update_spec_rev);
int pd_phy_open(struct pd_phy_params *params)
{
@@ -366,6 +377,10 @@ int pd_phy_open(struct pd_phy_params *params)
if (ret)
return ret;
+ ret = pd_phy_update_spec_rev(params->spec_rev);
+ if (ret)
+ return ret;
+
ret = pdphy_reg_write(pdphy, USB_PDPHY_EN_CONTROL, 0);
if (ret)
return ret;
diff --git a/drivers/usb/pd/usbpd.h b/drivers/usb/pd/usbpd.h
index fd3f2f33bbc7..b2663add7f3c 100644
--- a/drivers/usb/pd/usbpd.h
+++ b/drivers/usb/pd/usbpd.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -51,18 +51,24 @@ enum pd_msg_type {
SOPII_MSG,
};
+enum pd_spec_rev {
+ USBPD_REV_20 = 1,
+ USBPD_REV_30 = 2,
+};
+
/* enable msg and signal to be received by phy */
#define FRAME_FILTER_EN_SOP BIT(0)
#define FRAME_FILTER_EN_HARD_RESET BIT(5)
struct pd_phy_params {
- void (*signal_cb)(struct usbpd *pd, enum pd_sig_type type);
- void (*msg_rx_cb)(struct usbpd *pd, enum pd_msg_type type,
- u8 *buf, size_t len);
- void (*shutdown_cb)(struct usbpd *pd);
- enum data_role data_role;
+ void (*signal_cb)(struct usbpd *pd, enum pd_sig_type type);
+ void (*msg_rx_cb)(struct usbpd *pd, enum pd_msg_type type,
+ u8 *buf, size_t len);
+ void (*shutdown_cb)(struct usbpd *pd);
+ enum data_role data_role;
enum power_role power_role;
- u8 frame_filter_val;
+ u8 frame_filter_val;
+ u8 spec_rev;
};
#if IS_ENABLED(CONFIG_QPNP_USB_PDPHY)
@@ -71,6 +77,7 @@ int pd_phy_signal(enum pd_sig_type type, unsigned int timeout_ms);
int pd_phy_write(u16 hdr, const u8 *data, size_t data_len,
enum pd_msg_type type, unsigned int timeout_ms);
int pd_phy_update_roles(enum data_role dr, enum power_role pr);
+int pd_phy_update_spec_rev(enum pd_spec_rev rev);
void pd_phy_close(void);
#else
static inline int pd_phy_open(struct pd_phy_params *params)
@@ -94,6 +101,11 @@ static inline int pd_phy_update_roles(enum data_role dr, enum power_role pr)
return -ENODEV;
}
+static inline int pd_phy_update_spec_rev(enum pd_spec_rev rev)
+{
+ return -ENODEV;
+}
+
static inline void pd_phy_close(void)
{
}
diff --git a/drivers/video/fbdev/msm/mdp3_ctrl.c b/drivers/video/fbdev/msm/mdp3_ctrl.c
index da6c68d43b53..fc89a2ea772e 100644
--- a/drivers/video/fbdev/msm/mdp3_ctrl.c
+++ b/drivers/video/fbdev/msm/mdp3_ctrl.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -2147,8 +2147,10 @@ static int mdp3_ctrl_lut_config(struct msm_fb_data_type *mfd,
dma = mdp3_session->dma;
- if (cfg->cmap.start + cfg->cmap.len > MDP_LUT_SIZE) {
- pr_err("Invalid arguments\n");
+ if ((cfg->cmap.start > MDP_LUT_SIZE) ||
+ (cfg->cmap.len > MDP_LUT_SIZE) ||
+ (cfg->cmap.start + cfg->cmap.len > MDP_LUT_SIZE)) {
+ pr_err("Invalid arguments.\n");
return -EINVAL;
}
diff --git a/drivers/video/fbdev/msm/mdss_dp.c b/drivers/video/fbdev/msm/mdss_dp.c
index 93b436eab5ab..af9dc7ce1730 100644
--- a/drivers/video/fbdev/msm/mdss_dp.c
+++ b/drivers/video/fbdev/msm/mdss_dp.c
@@ -129,6 +129,40 @@ static int mdss_dp_is_clk_prefix(const char *clk_prefix, const char *clk_name)
return !strncmp(clk_name, clk_prefix, strlen(clk_prefix));
}
+static int mdss_dp_parse_prop(struct platform_device *pdev,
+ struct mdss_dp_drv_pdata *dp_drv)
+{
+ int len = 0, i = 0;
+ const char *data;
+
+ data = of_get_property(pdev->dev.of_node,
+ "qcom,aux-cfg-settings", &len);
+ if ((!data) || (len != AUX_CFG_LEN)) {
+ pr_err("%s:%d, Unable to read DP AUX CFG settings",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < len; i++)
+ dp_drv->aux_cfg[i] = data[i];
+
+ data = of_get_property(pdev->dev.of_node,
+ "qcom,logical2physical-lane-map", &len);
+ if ((!data) || (len != DP_MAX_PHY_LN)) {
+ pr_debug("%s:%d, lane mapping not defined, use default",
+ __func__, __LINE__);
+ dp_drv->l_map[DP_PHY_LN0] = DP_ML0;
+ dp_drv->l_map[DP_PHY_LN1] = DP_ML1;
+ dp_drv->l_map[DP_PHY_LN2] = DP_ML2;
+ dp_drv->l_map[DP_PHY_LN3] = DP_ML3;
+ } else {
+ for (i = 0; i < len; i++)
+ dp_drv->l_map[i] = data[i];
+ }
+
+ return 0;
+}
+
static int mdss_dp_init_clk_power_data(struct device *dev,
struct mdss_dp_drv_pdata *pdata)
{
@@ -304,7 +338,25 @@ static int mdss_dp_clk_init(struct mdss_dp_drv_pdata *dp_drv,
goto ctrl_get_error;
}
+ dp_drv->pixel_clk_rcg = devm_clk_get(dev, "pixel_clk_rcg");
+ if (IS_ERR(dp_drv->pixel_clk_rcg)) {
+ pr_debug("%s: Unable to get DP pixel clk RCG\n",
+ __func__);
+ dp_drv->pixel_clk_rcg = NULL;
+ }
+
+ dp_drv->pixel_parent = devm_clk_get(dev,
+ "pixel_parent");
+ if (IS_ERR(dp_drv->pixel_parent)) {
+ pr_debug("%s: Unable to get DP pixel RCG parent\n",
+ __func__);
+ dp_drv->pixel_parent = NULL;
+ }
} else {
+ if (dp_drv->pixel_parent)
+ devm_clk_put(dev, dp_drv->pixel_parent);
+ if (dp_drv->pixel_clk_rcg)
+ devm_clk_put(dev, dp_drv->pixel_clk_rcg);
msm_dss_put_clk(ctrl_power_data->clk_config,
ctrl_power_data->num_clk);
msm_dss_put_clk(core_power_data->clk_config,
@@ -1151,10 +1203,9 @@ static inline void mdss_dp_ack_state(struct mdss_dp_drv_pdata *dp, int val)
* given usb plug orientation.
*/
static int mdss_dp_get_lane_mapping(struct mdss_dp_drv_pdata *dp,
- enum plug_orientation orientation,
- struct lane_mapping *lane_map)
+ enum plug_orientation orientation, char *lane_map)
{
- int ret = 0;
+ int ret = 0, i = 0, j = 0;
pr_debug("enter: orientation = %d\n", orientation);
@@ -1164,22 +1215,35 @@ static int mdss_dp_get_lane_mapping(struct mdss_dp_drv_pdata *dp,
goto exit;
}
- /* Set the default lane mapping */
- lane_map->lane0 = 2;
- lane_map->lane1 = 3;
- lane_map->lane2 = 1;
- lane_map->lane3 = 0;
-
+ /* For flip case, swap phy lanes with ML0 and ML3, ML1 and ML2 */
if (orientation == ORIENTATION_CC2) {
- lane_map->lane0 = 1;
- lane_map->lane1 = 0;
- lane_map->lane2 = 2;
- lane_map->lane3 = 3;
+ for (i = 0; i < DP_MAX_PHY_LN; i++) {
+ if (dp->l_map[i] == DP_ML0) {
+ for (j = 0; j < DP_MAX_PHY_LN; j++) {
+ if (dp->l_map[j] == DP_ML3) {
+ lane_map[i] = DP_ML3;
+ lane_map[j] = DP_ML0;
+ break;
+ }
+ }
+ } else if (dp->l_map[i] == DP_ML1) {
+ for (j = 0; j < DP_MAX_PHY_LN; j++) {
+ if (dp->l_map[j] == DP_ML2) {
+ lane_map[i] = DP_ML2;
+ lane_map[j] = DP_ML1;
+ break;
+ }
+ }
+ }
+ }
+ } else {
+ /* Normal orientation */
+ for (i = 0; i < DP_MAX_PHY_LN; i++)
+ lane_map[i] = dp->l_map[i];
}
pr_debug("lane0 = %d, lane1 = %d, lane2 =%d, lane3 =%d\n",
- lane_map->lane0, lane_map->lane1, lane_map->lane2,
- lane_map->lane3);
+ lane_map[0], lane_map[1], lane_map[2], lane_map[3]);
exit:
return ret;
@@ -1212,6 +1276,9 @@ static int mdss_dp_enable_mainlink_clocks(struct mdss_dp_drv_pdata *dp)
{
int ret = 0;
+ if (dp->pixel_clk_rcg && dp->pixel_parent)
+ clk_set_parent(dp->pixel_clk_rcg, dp->pixel_parent);
+
mdss_dp_set_clock_rate(dp, "ctrl_link_clk",
(dp->link_rate * DP_LINK_RATE_MULTIPLIER) / DP_KHZ_TO_HZ);
@@ -1248,9 +1315,9 @@ static void mdss_dp_disable_mainlink_clocks(struct mdss_dp_drv_pdata *dp_drv)
* configuration, output format and sink/panel timing information.
*/
static void mdss_dp_configure_source_params(struct mdss_dp_drv_pdata *dp,
- struct lane_mapping *lane_map)
+ char *lane_map)
{
- mdss_dp_ctrl_lane_mapping(&dp->ctrl_io, *lane_map);
+ mdss_dp_ctrl_lane_mapping(&dp->ctrl_io, lane_map);
mdss_dp_fill_link_cfg(dp);
mdss_dp_mainlink_ctrl(&dp->ctrl_io, true);
mdss_dp_config_ctrl(dp);
@@ -1318,7 +1385,7 @@ end:
static int mdss_dp_on_irq(struct mdss_dp_drv_pdata *dp_drv, bool lt_needed)
{
int ret = 0;
- struct lane_mapping ln_map;
+ char ln_map[4];
/* wait until link training is completed */
pr_debug("enter, lt_needed=%s\n", lt_needed ? "true" : "false");
@@ -1331,13 +1398,14 @@ static int mdss_dp_on_irq(struct mdss_dp_drv_pdata *dp_drv, bool lt_needed)
dp_init_panel_info(dp_drv, dp_drv->vic);
ret = mdss_dp_get_lane_mapping(dp_drv, dp_drv->orientation,
- &ln_map);
+ ln_map);
if (ret)
goto exit_loop;
mdss_dp_phy_share_lane_config(&dp_drv->phy_io,
dp_drv->orientation,
- dp_drv->dpcd.max_lane_count);
+ dp_drv->dpcd.max_lane_count,
+ dp_drv->phy_reg_offset);
if (lt_needed) {
/*
@@ -1352,7 +1420,7 @@ static int mdss_dp_on_irq(struct mdss_dp_drv_pdata *dp_drv, bool lt_needed)
goto exit_loop;
}
- mdss_dp_configure_source_params(dp_drv, &ln_map);
+ mdss_dp_configure_source_params(dp_drv, ln_map);
reinit_completion(&dp_drv->idle_comp);
@@ -1385,7 +1453,7 @@ exit_loop:
int mdss_dp_on_hpd(struct mdss_dp_drv_pdata *dp_drv)
{
int ret = 0;
- struct lane_mapping ln_map;
+ char ln_map[4];
/* wait until link training is completed */
mutex_lock(&dp_drv->train_mutex);
@@ -1404,7 +1472,7 @@ int mdss_dp_on_hpd(struct mdss_dp_drv_pdata *dp_drv)
}
mdss_dp_hpd_configure(&dp_drv->ctrl_io, true);
- ret = mdss_dp_get_lane_mapping(dp_drv, dp_drv->orientation, &ln_map);
+ ret = mdss_dp_get_lane_mapping(dp_drv, dp_drv->orientation, ln_map);
if (ret)
goto exit;
@@ -1419,7 +1487,7 @@ int mdss_dp_on_hpd(struct mdss_dp_drv_pdata *dp_drv)
}
mdss_dp_phy_share_lane_config(&dp_drv->phy_io, dp_drv->orientation,
- dp_drv->dpcd.max_lane_count);
+ dp_drv->dpcd.max_lane_count, dp_drv->phy_reg_offset);
ret = mdss_dp_enable_mainlink_clocks(dp_drv);
if (ret)
@@ -1427,7 +1495,7 @@ int mdss_dp_on_hpd(struct mdss_dp_drv_pdata *dp_drv)
reinit_completion(&dp_drv->idle_comp);
- mdss_dp_configure_source_params(dp_drv, &ln_map);
+ mdss_dp_configure_source_params(dp_drv, ln_map);
if (dp_drv->psm_enabled) {
ret = mdss_dp_aux_send_psm_request(dp_drv, false);
@@ -1689,7 +1757,8 @@ static int mdss_dp_host_init(struct mdss_panel_data *pdata)
mdss_dp_get_ctrl_hw_version(&dp_drv->ctrl_io),
mdss_dp_get_phy_hw_version(&dp_drv->phy_io));
- mdss_dp_phy_aux_setup(&dp_drv->phy_io);
+ mdss_dp_phy_aux_setup(&dp_drv->phy_io, dp_drv->aux_cfg,
+ dp_drv->phy_reg_offset);
mdss_dp_irq_enable(dp_drv);
dp_drv->dp_initialized = true;
@@ -2743,6 +2812,11 @@ static int mdss_retrieve_dp_ctrl_resources(struct platform_device *pdev,
return rc;
}
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,phy-register-offset", &dp_drv->phy_reg_offset);
+ if (rc)
+ dp_drv->phy_reg_offset = 0;
+
rc = msm_dss_ioremap_byname(pdev, &dp_drv->tcsr_reg_io,
"tcsr_regs");
if (rc) {
@@ -3704,6 +3778,13 @@ static int mdss_dp_probe(struct platform_device *pdev)
goto probe_err;
}
+ ret = mdss_dp_parse_prop(pdev, dp_drv);
+ if (ret) {
+ DEV_ERR("DP properties parsing failed.ret=%d\n",
+ ret);
+ goto probe_err;
+ }
+
ret = mdss_dp_irq_setup(dp_drv);
if (ret)
goto probe_err;
diff --git a/drivers/video/fbdev/msm/mdss_dp.h b/drivers/video/fbdev/msm/mdss_dp.h
index bf74a8a4d7df..d6f5d160aef2 100644
--- a/drivers/video/fbdev/msm/mdss_dp.h
+++ b/drivers/video/fbdev/msm/mdss_dp.h
@@ -36,6 +36,8 @@
#define AUX_CMD_MAX 16
#define AUX_CMD_I2C_MAX 128
+#define AUX_CFG_LEN 10
+
#define EDP_PORT_MAX 1
#define EDP_SINK_CAP_LEN 16
@@ -460,6 +462,7 @@ struct mdss_dp_drv_pdata {
struct dss_io_data dp_cc_io;
struct dss_io_data qfprom_io;
struct dss_io_data hdcp_io;
+ u32 phy_reg_offset;
int base_size;
unsigned char *mmss_cc_base;
bool override_config;
@@ -486,6 +489,10 @@ struct mdss_dp_drv_pdata {
struct edp_edid edid;
struct dpcd_cap dpcd;
+ /* DP Pixel clock RCG and PLL parent */
+ struct clk *pixel_clk_rcg;
+ struct clk *pixel_parent;
+
/* regulators */
struct dss_module_power power_data[DP_MAX_PM];
struct dp_pinctrl_res pin_res;
@@ -536,6 +543,10 @@ struct mdss_dp_drv_pdata {
struct mdss_dp_event_data dp_event;
struct task_struct *ev_thread;
+ /* dt settings */
+ char l_map[4];
+ u32 aux_cfg[AUX_CFG_LEN];
+
struct workqueue_struct *workq;
struct delayed_work hdcp_cb_work;
spinlock_t lock;
@@ -554,6 +565,21 @@ struct mdss_dp_drv_pdata {
struct list_head attention_head;
};
+enum dp_phy_lane_num {
+ DP_PHY_LN0 = 0,
+ DP_PHY_LN1 = 1,
+ DP_PHY_LN2 = 2,
+ DP_PHY_LN3 = 3,
+ DP_MAX_PHY_LN = 4,
+};
+
+enum dp_mainlink_lane_num {
+ DP_ML0 = 0,
+ DP_ML1 = 1,
+ DP_ML2 = 2,
+ DP_ML3 = 3,
+};
+
enum dp_lane_count {
DP_LANE_COUNT_1 = 1,
DP_LANE_COUNT_2 = 2,
diff --git a/drivers/video/fbdev/msm/mdss_dp_util.c b/drivers/video/fbdev/msm/mdss_dp_util.c
index 1dcf83f094c1..f89b86f72b52 100644
--- a/drivers/video/fbdev/msm/mdss_dp_util.c
+++ b/drivers/video/fbdev/msm/mdss_dp_util.c
@@ -859,31 +859,38 @@ void mdss_dp_setup_tr_unit(struct dss_io_data *ctrl_io, u8 link_rate,
pr_debug("dp_tu=0x%x\n", dp_tu);
}
-void mdss_dp_ctrl_lane_mapping(struct dss_io_data *ctrl_io,
- struct lane_mapping l_map)
+void mdss_dp_ctrl_lane_mapping(struct dss_io_data *ctrl_io, char *l_map)
{
u8 bits_per_lane = 2;
- u32 lane_map = ((l_map.lane0 << (bits_per_lane * 0))
- | (l_map.lane1 << (bits_per_lane * 1))
- | (l_map.lane2 << (bits_per_lane * 2))
- | (l_map.lane3 << (bits_per_lane * 3)));
+ u32 lane_map = ((l_map[0] << (bits_per_lane * 0))
+ | (l_map[1] << (bits_per_lane * 1))
+ | (l_map[2] << (bits_per_lane * 2))
+ | (l_map[3] << (bits_per_lane * 3)));
pr_debug("%s: lane mapping reg = 0x%x\n", __func__, lane_map);
writel_relaxed(lane_map,
ctrl_io->base + DP_LOGICAL2PHYSCIAL_LANE_MAPPING);
}
-void mdss_dp_phy_aux_setup(struct dss_io_data *phy_io)
+void mdss_dp_phy_aux_setup(struct dss_io_data *phy_io, u32 *aux_cfg,
+ u32 phy_reg_offset)
{
- writel_relaxed(0x3d, phy_io->base + DP_PHY_PD_CTL);
- writel_relaxed(0x13, phy_io->base + DP_PHY_AUX_CFG1);
- writel_relaxed(0x10, phy_io->base + DP_PHY_AUX_CFG3);
- writel_relaxed(0x0a, phy_io->base + DP_PHY_AUX_CFG4);
- writel_relaxed(0x26, phy_io->base + DP_PHY_AUX_CFG5);
- writel_relaxed(0x0a, phy_io->base + DP_PHY_AUX_CFG6);
- writel_relaxed(0x03, phy_io->base + DP_PHY_AUX_CFG7);
- writel_relaxed(0x8b, phy_io->base + DP_PHY_AUX_CFG8);
- writel_relaxed(0x03, phy_io->base + DP_PHY_AUX_CFG9);
- writel_relaxed(0x1f, phy_io->base + DP_PHY_AUX_INTERRUPT_MASK);
+ void __iomem *adjusted_phy_io_base = phy_io->base + phy_reg_offset;
+
+ writel_relaxed(0x3d, adjusted_phy_io_base + DP_PHY_PD_CTL);
+
+ /* DP AUX CFG register programming */
+ writel_relaxed(aux_cfg[0], adjusted_phy_io_base + DP_PHY_AUX_CFG0);
+ writel_relaxed(aux_cfg[1], adjusted_phy_io_base + DP_PHY_AUX_CFG1);
+ writel_relaxed(aux_cfg[2], adjusted_phy_io_base + DP_PHY_AUX_CFG2);
+ writel_relaxed(aux_cfg[3], adjusted_phy_io_base + DP_PHY_AUX_CFG3);
+ writel_relaxed(aux_cfg[4], adjusted_phy_io_base + DP_PHY_AUX_CFG4);
+ writel_relaxed(aux_cfg[5], adjusted_phy_io_base + DP_PHY_AUX_CFG5);
+ writel_relaxed(aux_cfg[6], adjusted_phy_io_base + DP_PHY_AUX_CFG6);
+ writel_relaxed(aux_cfg[7], adjusted_phy_io_base + DP_PHY_AUX_CFG7);
+ writel_relaxed(aux_cfg[8], adjusted_phy_io_base + DP_PHY_AUX_CFG8);
+ writel_relaxed(aux_cfg[9], adjusted_phy_io_base + DP_PHY_AUX_CFG9);
+
+ writel_relaxed(0x1f, adjusted_phy_io_base + DP_PHY_AUX_INTERRUPT_MASK);
}
int mdss_dp_irq_setup(struct mdss_dp_drv_pdata *dp_drv)
@@ -1036,14 +1043,14 @@ u32 mdss_dp_usbpd_gen_config_pkt(struct mdss_dp_drv_pdata *dp)
}
void mdss_dp_phy_share_lane_config(struct dss_io_data *phy_io,
- u8 orientation, u8 ln_cnt)
+ u8 orientation, u8 ln_cnt, u32 phy_reg_offset)
{
u32 info = 0x0;
info |= (ln_cnt & 0x0F);
info |= ((orientation & 0x0F) << 4);
pr_debug("Shared Info = 0x%x\n", info);
- writel_relaxed(info, phy_io->base + DP_PHY_SPARE0);
+ writel_relaxed(info, phy_io->base + phy_reg_offset + DP_PHY_SPARE0);
}
void mdss_dp_config_audio_acr_ctrl(struct dss_io_data *ctrl_io, char link_rate)
diff --git a/drivers/video/fbdev/msm/mdss_dp_util.h b/drivers/video/fbdev/msm/mdss_dp_util.h
index cb62d145960f..b3b15a3579fa 100644
--- a/drivers/video/fbdev/msm/mdss_dp_util.h
+++ b/drivers/video/fbdev/msm/mdss_dp_util.h
@@ -206,13 +206,6 @@
#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA11 (0x01C)
#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA12 (0x020)
-struct lane_mapping {
- char lane0;
- char lane1;
- char lane2;
- char lane3;
-};
-
struct edp_cmd {
char read; /* 1 == read, 0 == write */
char i2c; /* 1 == i2c cmd, 0 == native cmd */
@@ -292,12 +285,12 @@ void mdss_dp_assert_phy_reset(struct dss_io_data *ctrl_io, bool assert);
void mdss_dp_setup_tr_unit(struct dss_io_data *ctrl_io, u8 link_rate,
u8 ln_cnt, u32 res, struct mdss_panel_info *pinfo);
void mdss_dp_config_misc(struct mdss_dp_drv_pdata *dp, u32 bd, u32 cc);
-void mdss_dp_phy_aux_setup(struct dss_io_data *phy_io);
+void mdss_dp_phy_aux_setup(struct dss_io_data *phy_io, u32 *aux_cfg,
+ u32 phy_reg_offset);
void mdss_dp_hpd_configure(struct dss_io_data *ctrl_io, bool enable);
void mdss_dp_aux_ctrl(struct dss_io_data *ctrl_io, bool enable);
void mdss_dp_mainlink_ctrl(struct dss_io_data *ctrl_io, bool enable);
-void mdss_dp_ctrl_lane_mapping(struct dss_io_data *ctrl_io,
- struct lane_mapping l_map);
+void mdss_dp_ctrl_lane_mapping(struct dss_io_data *ctrl_io, char *l_map);
int mdss_dp_mainlink_ready(struct mdss_dp_drv_pdata *dp, u32 which);
void mdss_dp_timing_cfg(struct dss_io_data *ctrl_io,
struct mdss_panel_info *pinfo);
@@ -311,10 +304,8 @@ void mdss_dp_sw_config_msa(struct dss_io_data *ctrl_io,
void mdss_dp_usbpd_ext_capabilities(struct usbpd_dp_capabilities *dp_cap);
void mdss_dp_usbpd_ext_dp_status(struct usbpd_dp_status *dp_status);
u32 mdss_dp_usbpd_gen_config_pkt(struct mdss_dp_drv_pdata *dp);
-void mdss_dp_ctrl_lane_mapping(struct dss_io_data *ctrl_io,
- struct lane_mapping l_map);
void mdss_dp_phy_share_lane_config(struct dss_io_data *phy_io,
- u8 orientation, u8 ln_cnt);
+ u8 orientation, u8 ln_cnt, u32 phy_reg_offset);
void mdss_dp_config_audio_acr_ctrl(struct dss_io_data *ctrl_io,
char link_rate);
void mdss_dp_audio_setup_sdps(struct dss_io_data *ctrl_io, u32 num_of_channels);
diff --git a/drivers/video/fbdev/msm/mdss_mdp.c b/drivers/video/fbdev/msm/mdss_mdp.c
index fbbcc16f48b5..37a3876d3570 100644
--- a/drivers/video/fbdev/msm/mdss_mdp.c
+++ b/drivers/video/fbdev/msm/mdss_mdp.c
@@ -2138,6 +2138,7 @@ static void mdss_mdp_hw_rev_caps_init(struct mdss_data_type *mdata)
set_bit(MDSS_CAPS_3D_MUX_UNDERRUN_RECOVERY_SUPPORTED,
mdata->mdss_caps_map);
set_bit(MDSS_CAPS_QSEED3, mdata->mdss_caps_map);
+ set_bit(MDSS_CAPS_DEST_SCALER, mdata->mdss_caps_map);
set_bit(MDSS_CAPS_MDP_VOTE_CLK_NOT_SUPPORTED,
mdata->mdss_caps_map);
mdss_mdp_init_default_prefill_factors(mdata);
diff --git a/drivers/video/fbdev/msm/mdss_mdp_ctl.c b/drivers/video/fbdev/msm/mdss_mdp_ctl.c
index ffbf156e9eed..acb356fc681a 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_ctl.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_ctl.c
@@ -3614,6 +3614,7 @@ int mdss_mdp_ctl_setup(struct mdss_mdp_ctl *ctl)
u32 width, height;
int split_fb, rc = 0;
u32 max_mixer_width;
+ bool dsc_merge_enabled = 0;
struct mdss_panel_info *pinfo;
if (!ctl || !ctl->panel_data) {
@@ -3738,15 +3739,15 @@ int mdss_mdp_ctl_setup(struct mdss_mdp_ctl *ctl)
ctl->mixer_right = NULL;
}
- if (ctl->mixer_right) {
- if (!is_dsc_compression(pinfo) ||
- (pinfo->dsc_enc_total == 1))
- ctl->opmode |= MDSS_MDP_CTL_OP_PACK_3D_ENABLE |
- MDSS_MDP_CTL_OP_PACK_3D_H_ROW_INT;
- } else {
+ dsc_merge_enabled = is_dsc_compression(pinfo) &&
+ (pinfo->dsc_enc_total == 2);
+
+ if (ctl->mixer_right && (!dsc_merge_enabled))
+ ctl->opmode |= MDSS_MDP_CTL_OP_PACK_3D_ENABLE |
+ MDSS_MDP_CTL_OP_PACK_3D_H_ROW_INT;
+ else
ctl->opmode &= ~(MDSS_MDP_CTL_OP_PACK_3D_ENABLE |
MDSS_MDP_CTL_OP_PACK_3D_H_ROW_INT);
- }
return 0;
}
diff --git a/drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c b/drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c
index e1d2a947a77f..5b284e624c7f 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c
@@ -125,6 +125,48 @@ static inline void mdp_wb_write(struct mdss_mdp_writeback_ctx *ctx,
writel_relaxed(val, ctx->base + reg);
}
+static void mdss_mdp_qos_vbif_remapper_setup_wb(struct mdss_mdp_ctl *ctl,
+ struct mdss_mdp_writeback_ctx *ctx)
+{
+ u32 mask, reg_val, reg_val_lvl, reg_high, i, vbif_qos;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ bool is_nrt_vbif = (ctl->mixer_left && ctl->mixer_left->rotator_mode);
+
+ if (!mdata->vbif_nrt_qos)
+ return;
+
+ if (test_bit(MDSS_QOS_REMAPPER, mdata->mdss_qos_map)) {
+ mutex_lock(&mdata->reg_lock);
+ for (i = 0; i < mdata->npriority_lvl; i++) {
+ reg_high = ((ctx->xin_id & 0x8) >> 3) * 4 + (i * 8);
+
+ reg_val = MDSS_VBIF_READ(mdata,
+ MDSS_VBIF_QOS_RP_REMAP_BASE +
+ reg_high, is_nrt_vbif);
+ reg_val_lvl = MDSS_VBIF_READ(mdata,
+ MDSS_VBIF_QOS_LVL_REMAP_BASE + reg_high,
+ is_nrt_vbif);
+
+ mask = 0x3 << (ctx->xin_id * 4);
+ vbif_qos = mdata->vbif_nrt_qos[i];
+
+ reg_val &= ~(mask);
+ reg_val |= vbif_qos << (ctx->xin_id * 4);
+
+ reg_val_lvl &= ~(mask);
+ reg_val_lvl |= vbif_qos << (ctx->xin_id * 4);
+
+ pr_debug("idx:%d xin:%d reg:0x%x val:0x%x lvl:0x%x\n",
+ i, ctx->xin_id, reg_high, reg_val, reg_val_lvl);
+ MDSS_VBIF_WRITE(mdata, MDSS_VBIF_QOS_RP_REMAP_BASE +
+ reg_high, reg_val, is_nrt_vbif);
+ MDSS_VBIF_WRITE(mdata, MDSS_VBIF_QOS_LVL_REMAP_BASE +
+ reg_high, reg_val_lvl, is_nrt_vbif);
+ }
+ mutex_unlock(&mdata->reg_lock);
+ }
+}
+
static void mdss_mdp_set_qos_wb(struct mdss_mdp_ctl *ctl,
struct mdss_mdp_writeback_ctx *ctx)
{
@@ -133,12 +175,15 @@ static void mdss_mdp_set_qos_wb(struct mdss_mdp_ctl *ctl,
struct mdss_overlay_private *mdp5_data;
struct mdss_data_type *mdata = mdss_mdp_get_mdata();
- if (false == test_bit(MDSS_QOS_WB_QOS, mdata->mdss_qos_map))
- return;
-
mdp5_data = mfd_to_mdp5_data(ctl->mfd);
cwb = &mdp5_data->cwb;
+ if (!cwb->valid)
+ mdss_mdp_qos_vbif_remapper_setup_wb(ctl, ctx);
+
+ if (false == test_bit(MDSS_QOS_WB_QOS, mdata->mdss_qos_map))
+ return;
+
if (cwb->valid)
wb_qos_setup = QOS_LUT_CWB_READ;
else
diff --git a/fs/ext4/crypto.c b/fs/ext4/crypto.c
index 1bb67391225a..664f42c850b7 100644
--- a/fs/ext4/crypto.c
+++ b/fs/ext4/crypto.c
@@ -34,6 +34,7 @@
#include <linux/random.h>
#include <linux/scatterlist.h>
#include <linux/spinlock_types.h>
+#include <linux/namei.h>
#include "ext4_extents.h"
#include "xattr.h"
@@ -487,6 +488,9 @@ static int ext4_d_revalidate(struct dentry *dentry, unsigned int flags)
struct ext4_crypt_info *ci;
int dir_has_key, cached_with_key;
+ if (flags & LOOKUP_RCU)
+ return -ECHILD;
+
dir = dget_parent(dentry);
if (!ext4_encrypted_inode(d_inode(dir))) {
dput(dir);
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 624a57a9c4aa..c6a499b7547e 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -275,11 +275,11 @@ loop:
goto loop;
end_loop:
- write_unlock(&journal->j_state_lock);
del_timer_sync(&journal->j_commit_timer);
journal->j_task = NULL;
wake_up(&journal->j_wait_done_commit);
jbd_debug(1, "Journal thread exiting.\n");
+ write_unlock(&journal->j_state_lock);
return 0;
}
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 3f0c6909dda1..aab1530661a5 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -61,7 +61,7 @@ struct drm_mode_object {
struct drm_object_properties *properties;
};
-#define DRM_OBJECT_MAX_PROPERTY 24
+#define DRM_OBJECT_MAX_PROPERTY 64
struct drm_object_properties {
int count, atomic_count;
/* NOTE: if we ever start dynamically destroying properties (ie.
diff --git a/include/linux/mdss_io_util.h b/include/linux/mdss_io_util.h
index 6ad21e887877..5b2587b28737 100644
--- a/include/linux/mdss_io_util.h
+++ b/include/linux/mdss_io_util.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012, 2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -77,6 +77,7 @@ struct dss_clk {
char clk_name[32];
enum dss_clk_type type;
unsigned long rate;
+ unsigned long max_rate;
};
struct dss_module_power {
diff --git a/include/linux/msm_gsi.h b/include/linux/msm_gsi.h
index fb2607dd365b..6037fbf00a23 100644
--- a/include/linux/msm_gsi.h
+++ b/include/linux/msm_gsi.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1040,6 +1040,19 @@ int gsi_configure_regs(phys_addr_t gsi_base_addr, u32 gsi_size,
*/
int gsi_enable_fw(phys_addr_t gsi_base_addr, u32 gsi_size);
+/**
+ * gsi_get_inst_ram_offset_and_size - Peripheral should call this function
+ * to get instruction RAM base address offset and size. Peripheral typically
+ * uses this info to load GSI FW into the IRAM.
+ *
+ * @base_offset:[OUT] - IRAM base offset address
+ * @size: [OUT] - IRAM size
+
+ * @Return none
+ */
+void gsi_get_inst_ram_offset_and_size(unsigned long *base_offset,
+ unsigned long *size);
+
/*
* Here is a typical sequence of calls
*
@@ -1227,9 +1240,15 @@ static inline int gsi_configure_regs(phys_addr_t gsi_base_addr, u32 gsi_size,
{
return -GSI_STATUS_UNSUPPORTED_OP;
}
+
static inline int gsi_enable_fw(phys_addr_t gsi_base_addr, u32 gsi_size)
{
return -GSI_STATUS_UNSUPPORTED_OP;
}
+
+static inline void gsi_get_inst_ram_offset_and_size(unsigned long *base_offset,
+ unsigned long *size)
+{
+}
#endif
#endif
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index de35fe9441fe..8f95c91c059a 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -104,6 +104,12 @@ enum {
POWER_SUPPLY_DP_DM_ICL_UP = 12,
};
+enum {
+ POWER_SUPPLY_PARALLEL_NONE,
+ POWER_SUPPLY_PARALLEL_USBIN_USBIN,
+ POWER_SUPPLY_PARALLEL_MID_MID,
+};
+
enum power_supply_property {
/* Properties of type `int' */
POWER_SUPPLY_PROP_STATUS = 0,
@@ -224,11 +230,13 @@ enum power_supply_property {
POWER_SUPPLY_PROP_CHARGER_TEMP,
POWER_SUPPLY_PROP_CHARGER_TEMP_MAX,
POWER_SUPPLY_PROP_PARALLEL_DISABLE,
- POWER_SUPPLY_PROP_PARALLEL_PERCENT,
POWER_SUPPLY_PROP_PE_START,
POWER_SUPPLY_PROP_SET_SHIP_MODE,
POWER_SUPPLY_PROP_SOC_REPORTING_READY,
POWER_SUPPLY_PROP_DEBUG_BATTERY,
+ POWER_SUPPLY_PROP_FCC_DELTA,
+ POWER_SUPPLY_PROP_ICL_REDUCTION,
+ POWER_SUPPLY_PROP_PARALLEL_MODE,
/* Local extensions of type int64_t */
POWER_SUPPLY_PROP_CHARGE_COUNTER_EXT,
/* Properties of type `const char *' */
@@ -252,9 +260,10 @@ enum power_supply_type {
POWER_SUPPLY_TYPE_USB_PD, /* Power Delivery */
POWER_SUPPLY_TYPE_WIRELESS, /* Accessory Charger Adapters */
POWER_SUPPLY_TYPE_BMS, /* Battery Monitor System */
- POWER_SUPPLY_TYPE_USB_PARALLEL, /* USB Parallel Path */
- POWER_SUPPLY_TYPE_WIPOWER, /* Wipower */
- POWER_SUPPLY_TYPE_TYPEC, /*Type-C */
+ POWER_SUPPLY_TYPE_PARALLEL, /* Parallel Path */
+ POWER_SUPPLY_TYPE_MAIN, /* Main Path */
+ POWER_SUPPLY_TYPE_WIPOWER, /* Wipower */
+ POWER_SUPPLY_TYPE_TYPEC, /* Type-C */
POWER_SUPPLY_TYPE_UFP, /* Type-C UFP */
POWER_SUPPLY_TYPE_DFP, /* TYpe-C DFP */
};
diff --git a/include/linux/qdsp6v2/apr_tal.h b/include/linux/qdsp6v2/apr_tal.h
index c2c49dd748de..bf324064960b 100644
--- a/include/linux/qdsp6v2/apr_tal.h
+++ b/include/linux/qdsp6v2/apr_tal.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2011, 2016 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2011, 2016-2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -32,7 +32,6 @@
#if defined(CONFIG_MSM_QDSP6_APRV2_GLINK) || \
defined(CONFIG_MSM_QDSP6_APRV3_GLINK)
#define APR_MAX_BUF 512
-#define APR_NUM_OF_TX_BUF 30
#else
#define APR_MAX_BUF 8092
#endif
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 0d1d21e9f081..aca5c5694e09 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -335,8 +335,6 @@ enum task_event {
enum migrate_types {
GROUP_TO_RQ,
RQ_TO_GROUP,
- RQ_TO_RQ,
- GROUP_TO_GROUP,
};
#include <linux/spinlock.h>
diff --git a/include/linux/usb/ccid_desc.h b/include/linux/usb/ccid_desc.h
new file mode 100644
index 000000000000..9a0c72650cd2
--- /dev/null
+++ b/include/linux/usb/ccid_desc.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2011, 2017 The Linux Foundation. All rights reserved.
+
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details
+ */
+
+#ifndef __LINUX_USB_CCID_DESC_H
+#define __LINUX_USB_CCID_DESC_H
+
+/*CCID specification version 1.10*/
+#define CCID1_10 0x0110
+
+#define SMART_CARD_DEVICE_CLASS 0x0B
+/* Smart Card Device Class Descriptor Type */
+#define CCID_DECRIPTOR_TYPE 0x21
+
+/* Table 5.3-1 Summary of CCID Class Specific Request */
+#define CCIDGENERICREQ_ABORT 0x01
+#define CCIDGENERICREQ_GET_CLOCK_FREQUENCIES 0x02
+#define CCIDGENERICREQ_GET_DATA_RATES 0x03
+
+/* 6.1 Command Pipe, Bulk-OUT Messages */
+#define PC_TO_RDR_ICCPOWERON 0x62
+#define PC_TO_RDR_ICCPOWEROFF 0x63
+#define PC_TO_RDR_GETSLOTSTATUS 0x65
+#define PC_TO_RDR_XFRBLOCK 0x6F
+#define PC_TO_RDR_GETPARAMETERS 0x6C
+#define PC_TO_RDR_RESETPARAMETERS 0x6D
+#define PC_TO_RDR_SETPARAMETERS 0x61
+#define PC_TO_RDR_ESCAPE 0x6B
+#define PC_TO_RDR_ICCCLOCK 0x6E
+#define PC_TO_RDR_T0APDU 0x6A
+#define PC_TO_RDR_SECURE 0x69
+#define PC_TO_RDR_MECHANICAL 0x71
+#define PC_TO_RDR_ABORT 0x72
+#define PC_TO_RDR_SETDATARATEANDCLOCKFREQUENCY 0x73
+
+/* 6.2 Response Pipe, Bulk-IN Messages */
+#define RDR_TO_PC_DATABLOCK 0x80
+#define RDR_TO_PC_SLOTSTATUS 0x81
+#define RDR_TO_PC_PARAMETERS 0x82
+#define RDR_TO_PC_ESCAPE 0x83
+#define RDR_TO_PC_DATARATEANDCLOCKFREQUENCY 0x84
+
+/* 6.3 Interrupt-IN Messages */
+#define RDR_TO_PC_NOTIFYSLOTCHANGE 0x50
+#define RDR_TO_PC_HARDWAREERROR 0x51
+
+/* Table 6.2-2 Slot error register when bmCommandStatus = 1 */
+#define CMD_ABORTED 0xFF
+#define ICC_MUTE 0xFE
+#define XFR_PARITY_ERROR 0xFD
+#define XFR_OVERRUN 0xFC
+#define HW_ERROR 0xFB
+#define BAD_ATR_TS 0xF8
+#define BAD_ATR_TCK 0xF7
+#define ICC_PROTOCOL_NOT_SUPPORTED 0xF6
+#define ICC_CLASS_NOT_SUPPORTED 0xF5
+#define PROCEDURE_BYTE_CONFLICT 0xF4
+#define DEACTIVATED_PROTOCOL 0xF3
+#define BUSY_WITH_AUTO_SEQUENCE 0xF2
+#define PIN_TIMEOUT 0xF0
+#define PIN_CANCELLED 0xEF
+#define CMD_SLOT_BUSY 0xE0
+
+/* CCID rev 1.1, p.27 */
+#define VOLTS_AUTO 0x00
+#define VOLTS_5_0 0x01
+#define VOLTS_3_0 0x02
+#define VOLTS_1_8 0x03
+
+/* 6.3.1 RDR_to_PC_NotifySlotChange */
+#define ICC_NOT_PRESENT 0x00
+#define ICC_PRESENT 0x01
+#define ICC_CHANGE 0x02
+#define ICC_INSERTED_EVENT (ICC_PRESENT+ICC_CHANGE)
+
+/* Identifies the length of type of subordinate descriptors of a CCID device
+ * Table 5.1-1 Smart Card Device Class descriptors
+ */
+struct usb_ccid_class_descriptor {
+ unsigned char bLength;
+ unsigned char bDescriptorType;
+ unsigned short bcdCCID;
+ unsigned char bMaxSlotIndex;
+ unsigned char bVoltageSupport;
+ unsigned long dwProtocols;
+ unsigned long dwDefaultClock;
+ unsigned long dwMaximumClock;
+ unsigned char bNumClockSupported;
+ unsigned long dwDataRate;
+ unsigned long dwMaxDataRate;
+ unsigned char bNumDataRatesSupported;
+ unsigned long dwMaxIFSD;
+ unsigned long dwSynchProtocols;
+ unsigned long dwMechanical;
+ unsigned long dwFeatures;
+ unsigned long dwMaxCCIDMessageLength;
+ unsigned char bClassGetResponse;
+ unsigned char bClassEnvelope;
+ unsigned short wLcdLayout;
+ unsigned char bPINSupport;
+ unsigned char bMaxCCIDBusySlots;
+} __packed;
+#endif
diff --git a/include/linux/usb/xhci_pdriver.h b/include/linux/usb/xhci_pdriver.h
index 376654b5b0f7..a44b53c33e75 100644
--- a/include/linux/usb/xhci_pdriver.h
+++ b/include/linux/usb/xhci_pdriver.h
@@ -19,9 +19,13 @@
* @usb3_lpm_capable: determines if this xhci platform supports USB3
* LPM capability
*
+ * @imod_interval: minimum inter-interrupt interval. Specified in
+ * 250nsec increments.
+ *
*/
struct usb_xhci_pdata {
unsigned usb3_lpm_capable:1;
+ unsigned imod_interval;
};
#endif /* __USB_CORE_XHCI_PDRIVER_H */
diff --git a/include/soc/qcom/icnss.h b/include/soc/qcom/icnss.h
index 7770f06b5e08..6b567d7a08d3 100644
--- a/include/soc/qcom/icnss.h
+++ b/include/soc/qcom/icnss.h
@@ -104,7 +104,7 @@ extern int icnss_ce_request_irq(unsigned int ce_id,
irqreturn_t (*handler)(int, void *),
unsigned long flags, const char *name, void *ctx);
extern int icnss_get_ce_id(int irq);
-extern int icnss_set_fw_debug_mode(bool enable_fw_log);
+extern int icnss_set_fw_log_mode(uint8_t fw_log_mode);
extern int icnss_athdiag_read(struct device *dev, uint32_t offset,
uint32_t mem_type, uint32_t data_len,
uint8_t *output);
diff --git a/include/sound/apr_audio-v2.h b/include/sound/apr_audio-v2.h
index a047a33334d2..1f8bba7e9ab7 100644
--- a/include/sound/apr_audio-v2.h
+++ b/include/sound/apr_audio-v2.h
@@ -42,6 +42,8 @@ struct param_outband {
#define ADM_MATRIX_ID_AUDIO_TX 1
#define ADM_MATRIX_ID_COMPRESSED_AUDIO_RX 2
+
+#define ADM_MATRIX_ID_LISTEN_TX 4
/* Enumeration for an audio Tx matrix ID.*/
#define ADM_MATRIX_ID_AUDIOX 1
@@ -9044,6 +9046,7 @@ struct asm_aptx_dec_fmt_blk_v2 {
#define LSM_SESSION_EVENT_DETECTION_STATUS_V2 (0x00012B01)
#define LSM_DATA_EVENT_READ_DONE (0x00012B02)
#define LSM_DATA_EVENT_STATUS (0x00012B03)
+#define LSM_SESSION_EVENT_DETECTION_STATUS_V3 (0x00012B04)
#define LSM_MODULE_ID_VOICE_WAKEUP (0x00012C00)
#define LSM_PARAM_ID_ENDPOINT_DETECT_THRESHOLD (0x00012C01)
@@ -9056,6 +9059,12 @@ struct asm_aptx_dec_fmt_blk_v2 {
#define LSM_PARAM_ID_LAB_ENABLE (0x00012C09)
#define LSM_PARAM_ID_LAB_CONFIG (0x00012C0A)
#define LSM_MODULE_ID_FRAMEWORK (0x00012C0E)
+#define LSM_PARAM_ID_SWMAD_CFG (0x00012C18)
+#define LSM_PARAM_ID_SWMAD_MODEL (0x00012C19)
+#define LSM_PARAM_ID_SWMAD_ENABLE (0x00012C1A)
+#define LSM_PARAM_ID_POLLING_ENABLE (0x00012C1B)
+#define LSM_PARAM_ID_MEDIA_FMT (0x00012C1E)
+#define LSM_PARAM_ID_FWK_MODE_CONFIG (0x00012C27)
/* HW MAD specific */
#define AFE_MODULE_HW_MAD (0x00010230)
@@ -10172,6 +10181,7 @@ enum {
COMPRESSED_PASSTHROUGH,
COMPRESSED_PASSTHROUGH_CONVERT,
COMPRESSED_PASSTHROUGH_DSD,
+ LISTEN,
};
#define AUDPROC_MODULE_ID_COMPRESSED_MUTE 0x00010770
diff --git a/include/sound/cpe_core.h b/include/sound/cpe_core.h
index 323a63fd6238..846cf819b9e5 100644
--- a/include/sound/cpe_core.h
+++ b/include/sound/cpe_core.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2016, Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2017, Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -162,7 +162,7 @@ struct wcd_cpe_lsm_ops {
int (*lsm_set_one_param)(void *core_handle,
struct cpe_lsm_session *session,
struct lsm_params_info *p_info,
- void *data, enum LSM_PARAM_TYPE param_type);
+ void *data, uint32_t param_type);
void (*lsm_get_snd_model_offset)
(void *core_handle, struct cpe_lsm_session *,
size_t *offset);
diff --git a/include/sound/q6adm-v2.h b/include/sound/q6adm-v2.h
index 8c7da3b9838d..25376315dd20 100644
--- a/include/sound/q6adm-v2.h
+++ b/include/sound/q6adm-v2.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -96,7 +96,7 @@ int adm_unmap_rtac_block(uint32_t *mem_map_handle);
int adm_close(int port, int topology, int perf_mode);
int adm_matrix_map(int path, struct route_payload payload_map,
- int perf_mode);
+ int perf_mode, uint32_t passthr_mode);
int adm_connect_afe_port(int mode, int session_id, int port_id);
diff --git a/include/sound/q6lsm.h b/include/sound/q6lsm.h
index fb848bc70873..4805246766d6 100644
--- a/include/sound/q6lsm.h
+++ b/include/sound/q6lsm.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2015, Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2017, Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -21,6 +21,10 @@
#define MAX_NUM_CONFIDENCE 20
+#define ADM_LSM_PORT_ID 0xADCB
+
+#define LSM_MAX_NUM_CHANNELS 8
+
typedef void (*lsm_app_cb)(uint32_t opcode, uint32_t token,
uint32_t *payload, void *priv);
@@ -49,11 +53,12 @@ struct lsm_lab_buffer {
uint32_t mem_map_handle;
};
-struct lsm_lab_hw_params {
+struct lsm_hw_params {
u16 sample_rate;
u16 sample_size;
u32 buf_sz;
u32 period_count;
+ u16 num_chs;
};
struct lsm_client {
@@ -79,8 +84,12 @@ struct lsm_client {
bool lab_enable;
bool lab_started;
struct lsm_lab_buffer *lab_buffer;
- struct lsm_lab_hw_params hw_params;
+ struct lsm_hw_params hw_params;
bool use_topology;
+ int session_state;
+ bool poll_enable;
+ int perf_mode;
+ uint32_t event_mode;
};
struct lsm_stream_cmd_open_tx {
@@ -134,6 +143,27 @@ struct lsm_param_connect_to_port {
uint16_t reserved;
} __packed;
+struct lsm_param_poll_enable {
+ struct lsm_param_payload_common common;
+ uint32_t minor_version;
+ /* indicates to voice wakeup that HW MAD/SW polling is enabled or not */
+ uint32_t polling_enable;
+} __packed;
+
+struct lsm_param_fwk_mode_cfg {
+ struct lsm_param_payload_common common;
+ uint32_t minor_version;
+ uint32_t mode;
+} __packed;
+
+struct lsm_param_media_fmt {
+ struct lsm_param_payload_common common;
+ uint32_t minor_version;
+ uint32_t sample_rate;
+ uint16_t num_channels;
+ uint16_t bit_width;
+ uint8_t channel_mapping[LSM_MAX_NUM_CHANNELS];
+} __packed;
/*
* This param cannot be sent in this format.
@@ -163,11 +193,22 @@ struct lsm_cmd_set_params_conf {
struct lsm_param_min_confidence_levels conf_payload;
} __packed;
-struct lsm_cmd_set_opmode_connectport {
+struct lsm_cmd_set_params_opmode {
+ struct apr_hdr msg_hdr;
+ struct lsm_set_params_hdr params_hdr;
+ struct lsm_param_op_mode op_mode;
+} __packed;
+
+struct lsm_cmd_set_connectport {
+ struct apr_hdr msg_hdr;
+ struct lsm_set_params_hdr params_hdr;
+ struct lsm_param_connect_to_port connect_to_port;
+} __packed;
+
+struct lsm_cmd_poll_enable {
struct apr_hdr msg_hdr;
struct lsm_set_params_hdr params_hdr;
- struct lsm_param_connect_to_port connect_to_port;
- struct lsm_param_op_mode op_mode;
+ struct lsm_param_poll_enable poll_enable;
} __packed;
struct lsm_param_epd_thres {
@@ -250,6 +291,19 @@ struct lsm_cmd_read_done {
uint32_t flags;
} __packed;
+struct lsm_cmd_set_fwk_mode_cfg {
+ struct apr_hdr msg_hdr;
+ struct lsm_set_params_hdr params_hdr;
+ struct lsm_param_fwk_mode_cfg fwk_mode_cfg;
+} __packed;
+
+struct lsm_cmd_set_media_fmt {
+ struct apr_hdr msg_hdr;
+ struct lsm_set_params_hdr params_hdr;
+ struct lsm_param_media_fmt media_fmt;
+} __packed;
+
+
struct lsm_client *q6lsm_client_alloc(lsm_app_cb cb, void *priv);
void q6lsm_client_free(struct lsm_client *client);
int q6lsm_open(struct lsm_client *client, uint16_t app_id);
@@ -274,8 +328,11 @@ int q6lsm_read(struct lsm_client *client, struct lsm_cmd_read *read);
int q6lsm_lab_buffer_alloc(struct lsm_client *client, bool alloc);
int q6lsm_set_one_param(struct lsm_client *client,
struct lsm_params_info *p_info, void *data,
- enum LSM_PARAM_TYPE param_type);
+ uint32_t param_type);
void q6lsm_sm_set_param_data(struct lsm_client *client,
struct lsm_params_info *p_info,
size_t *offset);
+int q6lsm_set_port_connected(struct lsm_client *client);
+int q6lsm_set_fwk_mode_cfg(struct lsm_client *client, uint32_t event_mode);
+int q6lsm_set_media_fmt_params(struct lsm_client *client);
#endif /* __Q6LSM_H__ */
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 9d58d703527c..0cd236442864 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -292,6 +292,55 @@ const char *__window_print(struct trace_seq *p, const u32 *buf, int buf_len)
return ret;
}
+
+static inline s64 __rq_update_sum(struct rq *rq, bool curr, bool new)
+{
+ if (curr)
+ if (new)
+ return rq->nt_curr_runnable_sum;
+ else
+ return rq->curr_runnable_sum;
+ else
+ if (new)
+ return rq->nt_prev_runnable_sum;
+ else
+ return rq->prev_runnable_sum;
+}
+
+static inline s64 __grp_update_sum(struct rq *rq, bool curr, bool new)
+{
+ if (curr)
+ if (new)
+ return rq->grp_time.nt_curr_runnable_sum;
+ else
+ return rq->grp_time.curr_runnable_sum;
+ else
+ if (new)
+ return rq->grp_time.nt_prev_runnable_sum;
+ else
+ return rq->grp_time.prev_runnable_sum;
+}
+
+static inline s64
+__get_update_sum(struct rq *rq, enum migrate_types migrate_type,
+ bool src, bool new, bool curr)
+{
+ switch (migrate_type) {
+ case RQ_TO_GROUP:
+ if (src)
+ return __rq_update_sum(rq, curr, new);
+ else
+ return __grp_update_sum(rq, curr, new);
+ case GROUP_TO_RQ:
+ if (src)
+ return __grp_update_sum(rq, curr, new);
+ else
+ return __rq_update_sum(rq, curr, new);
+ default:
+ WARN_ON_ONCE(1);
+ return -1;
+ }
+}
#endif
TRACE_EVENT(sched_update_task_ravg,
@@ -534,17 +583,13 @@ TRACE_EVENT(sched_update_pred_demand,
TRACE_EVENT(sched_migration_update_sum,
- TP_PROTO(struct task_struct *p, enum migrate_types migrate_type, struct migration_sum_data *d),
+ TP_PROTO(struct task_struct *p, enum migrate_types migrate_type, struct rq *rq),
- TP_ARGS(p, migrate_type, d),
+ TP_ARGS(p, migrate_type, rq),
TP_STRUCT__entry(
__field(int, tcpu )
__field(int, pid )
- __field( u64, cs )
- __field( u64, ps )
- __field( s64, nt_cs )
- __field( s64, nt_ps )
__field(enum migrate_types, migrate_type )
__field( s64, src_cs )
__field( s64, src_ps )
@@ -560,30 +605,22 @@ TRACE_EVENT(sched_migration_update_sum,
__entry->tcpu = task_cpu(p);
__entry->pid = p->pid;
__entry->migrate_type = migrate_type;
- __entry->src_cs = d->src_rq ?
- d->src_rq->curr_runnable_sum :
- d->src_cpu_time->curr_runnable_sum;
- __entry->src_ps = d->src_rq ?
- d->src_rq->prev_runnable_sum :
- d->src_cpu_time->prev_runnable_sum;
- __entry->dst_cs = d->dst_rq ?
- d->dst_rq->curr_runnable_sum :
- d->dst_cpu_time->curr_runnable_sum;
- __entry->dst_ps = d->dst_rq ?
- d->dst_rq->prev_runnable_sum :
- d->dst_cpu_time->prev_runnable_sum;
- __entry->src_nt_cs = d->src_rq ?
- d->src_rq->nt_curr_runnable_sum :
- d->src_cpu_time->nt_curr_runnable_sum;
- __entry->src_nt_ps = d->src_rq ?
- d->src_rq->nt_prev_runnable_sum :
- d->src_cpu_time->nt_prev_runnable_sum;
- __entry->dst_nt_cs = d->dst_rq ?
- d->dst_rq->nt_curr_runnable_sum :
- d->dst_cpu_time->nt_curr_runnable_sum;
- __entry->dst_nt_ps = d->dst_rq ?
- d->dst_rq->nt_prev_runnable_sum :
- d->dst_cpu_time->nt_prev_runnable_sum;
+ __entry->src_cs = __get_update_sum(rq, migrate_type,
+ true, false, true);
+ __entry->src_ps = __get_update_sum(rq, migrate_type,
+ true, false, false);
+ __entry->dst_cs = __get_update_sum(rq, migrate_type,
+ false, false, true);
+ __entry->dst_ps = __get_update_sum(rq, migrate_type,
+ false, false, false);
+ __entry->src_nt_cs = __get_update_sum(rq, migrate_type,
+ true, true, true);
+ __entry->src_nt_ps = __get_update_sum(rq, migrate_type,
+ true, true, false);
+ __entry->dst_nt_cs = __get_update_sum(rq, migrate_type,
+ false, true, true);
+ __entry->dst_nt_ps = __get_update_sum(rq, migrate_type,
+ false, true, false);
),
TP_printk("pid %d task_cpu %d migrate_type %s src_cs %llu src_ps %llu dst_cs %lld dst_ps %lld src_nt_cs %llu src_nt_ps %llu dst_nt_cs %lld dst_nt_ps %lld",
diff --git a/include/uapi/drm/Kbuild b/include/uapi/drm/Kbuild
index 38d437096c35..c3c78a0d0052 100644
--- a/include/uapi/drm/Kbuild
+++ b/include/uapi/drm/Kbuild
@@ -18,3 +18,5 @@ header-y += via_drm.h
header-y += vmwgfx_drm.h
header-y += msm_drm.h
header-y += virtgpu_drm.h
+header-y += sde_drm.h
+header-y += msm_drm_pp.h
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index 0b69a7753558..53d7c80f5eb0 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -229,4 +229,12 @@
*/
#define DRM_FORMAT_MOD_SAMSUNG_64_32_TILE fourcc_mod_code(SAMSUNG, 1)
+/*
+ * Qualcomm Compressed Format
+ *
+ * Refers to a compressed variant of the base format that is compressed.
+ * Implementation may be platform and base-format specific.
+ */
+#define DRM_FORMAT_MOD_QCOM_COMPRESSED fourcc_mod_code(QCOM, 1)
+
#endif /* DRM_FOURCC_H */
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index 6c11ca401de8..09c22caf34dd 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -72,6 +72,7 @@
#define DRM_MODE_FLAG_3D_L_DEPTH_GFX_GFX_DEPTH (6<<14)
#define DRM_MODE_FLAG_3D_TOP_AND_BOTTOM (7<<14)
#define DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF (8<<14)
+#define DRM_MODE_FLAG_SEAMLESS (1<<19)
/* DPMS flags */
@@ -354,6 +355,7 @@ struct drm_mode_fb_cmd {
#define DRM_MODE_FB_INTERLACED (1<<0) /* for interlaced framebuffers */
#define DRM_MODE_FB_MODIFIERS (1<<1) /* enables ->modifer[] */
+#define DRM_MODE_FB_SECURE (1<<2) /* for secure framebuffers */
struct drm_mode_fb_cmd2 {
__u32 fb_id;
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index 75a232b9a970..fd1be42188cd 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -20,6 +20,7 @@
#include <stddef.h>
#include <drm/drm.h>
+#include <drm/sde_drm.h>
/* Please note that modifications to all structs defined here are
* subject to backwards-compatibility constraints:
@@ -196,6 +197,39 @@ struct drm_msm_wait_fence {
struct drm_msm_timespec timeout; /* in */
};
+/**
+ * struct drm_msm_event_req - Payload to event enable/disable ioctls.
+ * @object_id: DRM object id. Ex: for crtc pass crtc id.
+ * @object_type: DRM object type. Ex: for crtc set it to DRM_MODE_OBJECT_CRTC.
+ * @event: Event for which notification is being enabled/disabled.
+ * Ex: for Histogram set - DRM_EVENT_HISTOGRAM.
+ * @client_context: Opaque pointer that will be returned during event response
+ * notification.
+ * @index: Object index(ex: crtc index), optional for user-space to set.
+ * Driver will override value based on object_id and object_type.
+ */
+struct drm_msm_event_req {
+ __u32 object_id;
+ __u32 object_type;
+ __u32 event;
+ __u64 client_context;
+ __u32 index;
+};
+
+/**
+ * struct drm_msm_event_resp - payload returned when read is called for
+ * custom notifications.
+ * @base: Event type and length of complete notification payload.
+ * @info: Contains information about DRM that which raised this event.
+ * @data: Custom payload that driver returns for event type.
+ * size of data = base.length - (sizeof(base) + sizeof(info))
+ */
+struct drm_msm_event_resp {
+ struct drm_event base;
+ struct drm_msm_event_req info;
+ __u8 data[];
+};
+
#define DRM_MSM_GET_PARAM 0x00
/* placeholder:
#define DRM_MSM_SET_PARAM 0x01
@@ -206,7 +240,18 @@ struct drm_msm_wait_fence {
#define DRM_MSM_GEM_CPU_FINI 0x05
#define DRM_MSM_GEM_SUBMIT 0x06
#define DRM_MSM_WAIT_FENCE 0x07
-#define DRM_MSM_NUM_IOCTLS 0x08
+#define DRM_SDE_WB_CONFIG 0x08
+#define DRM_MSM_REGISTER_EVENT 0x09
+#define DRM_MSM_DEREGISTER_EVENT 0x0A
+#define DRM_MSM_NUM_IOCTLS 0x0B
+
+/**
+ * Currently DRM framework supports only VSYNC event.
+ * Starting the custom events at 0xff to provide space for DRM
+ * framework to add new events.
+ */
+#define DRM_EVENT_HISTOGRAM 0xff
+#define DRM_EVENT_AD 0x100
#define DRM_IOCTL_MSM_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GET_PARAM, struct drm_msm_param)
#define DRM_IOCTL_MSM_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_NEW, struct drm_msm_gem_new)
@@ -215,5 +260,10 @@ struct drm_msm_wait_fence {
#define DRM_IOCTL_MSM_GEM_CPU_FINI DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_GEM_CPU_FINI, struct drm_msm_gem_cpu_fini)
#define DRM_IOCTL_MSM_GEM_SUBMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_SUBMIT, struct drm_msm_gem_submit)
#define DRM_IOCTL_MSM_WAIT_FENCE DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_WAIT_FENCE, struct drm_msm_wait_fence)
-
+#define DRM_IOCTL_SDE_WB_CONFIG \
+ DRM_IOW((DRM_COMMAND_BASE + DRM_SDE_WB_CONFIG), struct sde_drm_wb_cfg)
+#define DRM_IOCTL_MSM_REGISTER_EVENT DRM_IOW((DRM_COMMAND_BASE + \
+ DRM_MSM_REGISTER_EVENT), struct drm_msm_event_req)
+#define DRM_IOCTL_MSM_DEREGISTER_EVENT DRM_IOW((DRM_COMMAND_BASE + \
+ DRM_MSM_DEREGISTER_EVENT), struct drm_msm_event_req)
#endif /* __MSM_DRM_H__ */
diff --git a/include/uapi/drm/msm_drm_pp.h b/include/uapi/drm/msm_drm_pp.h
new file mode 100644
index 000000000000..9ed3a13953ef
--- /dev/null
+++ b/include/uapi/drm/msm_drm_pp.h
@@ -0,0 +1,82 @@
+#ifndef _MSM_DRM_PP_H_
+#define _MSM_DRM_PP_H_
+
+#include <drm/drm.h>
+
+/**
+ * struct drm_msm_pcc_coeff - PCC coefficient structure for each color
+ * component.
+ * @c: constant coefficient.
+ * @r: red coefficient.
+ * @g: green coefficient.
+ * @b: blue coefficient.
+ * @rg: red green coefficient.
+ * @gb: green blue coefficient.
+ * @rb: red blue coefficient.
+ * @rgb: red blue green coefficient.
+ */
+
+struct drm_msm_pcc_coeff {
+ __u32 c;
+ __u32 r;
+ __u32 g;
+ __u32 b;
+ __u32 rg;
+ __u32 gb;
+ __u32 rb;
+ __u32 rgb;
+};
+
+/**
+ * struct drm_msm_pcc - pcc feature structure
+ * flags: for customizing operations
+ * r: red coefficients.
+ * g: green coefficients.
+ * b: blue coefficients.
+ */
+
+struct drm_msm_pcc {
+ __u64 flags;
+ struct drm_msm_pcc_coeff r;
+ struct drm_msm_pcc_coeff g;
+ struct drm_msm_pcc_coeff b;
+};
+
+/* struct drm_msm_pa_vlut - picture adjustment vLUT structure
+ * flags: for customizing vlut operation
+ * val: vLUT values
+ */
+#define PA_VLUT_SIZE 256
+struct drm_msm_pa_vlut {
+ __u64 flags;
+ __u32 val[PA_VLUT_SIZE];
+};
+
+/* struct drm_msm_memcol - Memory color feature strucuture.
+ * Skin, sky, foliage features are supported.
+ * @prot_flags: Bit mask for enabling protection feature.
+ * @color_adjust_p0: Adjustment curve.
+ * @color_adjust_p1: Adjustment curve.
+ * @color_adjust_p2: Adjustment curve.
+ * @blend_gain: Blend gain weightage from othe PA features.
+ * @sat_hold: Saturation hold value.
+ * @val_hold: Value hold info.
+ * @hue_region: Hue qualifier.
+ * @sat_region: Saturation qualifier.
+ * @val_region: Value qualifier.
+ */
+#define DRM_MSM_MEMCOL
+struct drm_msm_memcol {
+ __u64 prot_flags;
+ __u32 color_adjust_p0;
+ __u32 color_adjust_p1;
+ __u32 color_adjust_p2;
+ __u32 blend_gain;
+ __u32 sat_hold;
+ __u32 val_hold;
+ __u32 hue_region;
+ __u32 sat_region;
+ __u32 val_region;
+};
+
+#endif /* _MSM_DRM_PP_H_ */
diff --git a/include/uapi/drm/sde_drm.h b/include/uapi/drm/sde_drm.h
new file mode 100644
index 000000000000..c7bed3b1ccf3
--- /dev/null
+++ b/include/uapi/drm/sde_drm.h
@@ -0,0 +1,298 @@
+#ifndef _SDE_DRM_H_
+#define _SDE_DRM_H_
+
+/* Total number of supported color planes */
+#define SDE_MAX_PLANES 4
+
+/* Total number of parameterized detail enhancer mapping curves */
+#define SDE_MAX_DE_CURVES 3
+
+ /* Y/RGB and UV filter configuration */
+#define FILTER_EDGE_DIRECTED_2D 0x0
+#define FILTER_CIRCULAR_2D 0x1
+#define FILTER_SEPARABLE_1D 0x2
+#define FILTER_BILINEAR 0x3
+
+/* Alpha filters */
+#define FILTER_ALPHA_DROP_REPEAT 0x0
+#define FILTER_ALPHA_BILINEAR 0x1
+#define FILTER_ALPHA_2D 0x3
+
+/* Blend filters */
+#define FILTER_BLEND_CIRCULAR_2D 0x0
+#define FILTER_BLEND_SEPARABLE_1D 0x1
+
+/* LUT configuration flags */
+#define SCALER_LUT_SWAP 0x1
+#define SCALER_LUT_DIR_WR 0x2
+#define SCALER_LUT_Y_CIR_WR 0x4
+#define SCALER_LUT_UV_CIR_WR 0x8
+#define SCALER_LUT_Y_SEP_WR 0x10
+#define SCALER_LUT_UV_SEP_WR 0x20
+
+/**
+ * Blend operations for "blend_op" property
+ *
+ * @SDE_DRM_BLEND_OP_NOT_DEFINED: No blend operation defined for the layer.
+ * @SDE_DRM_BLEND_OP_OPAQUE: Apply a constant blend operation. The layer
+ * would appear opaque in case fg plane alpha
+ * is 0xff.
+ * @SDE_DRM_BLEND_OP_PREMULTIPLIED: Apply source over blend rule. Layer already
+ * has alpha pre-multiplication done. If the fg
+ * plane alpha is less than 0xff, apply
+ * modulation as well. This operation is
+ * intended on layers having alpha channel.
+ * @SDE_DRM_BLEND_OP_COVERAGE: Apply source over blend rule. Layer is not
+ * alpha pre-multiplied. Apply
+ * pre-multiplication. If fg plane alpha is
+ * less than 0xff, apply modulation as well.
+ * @SDE_DRM_BLEND_OP_MAX: Used to track maximum blend operation
+ * possible by mdp.
+ */
+#define SDE_DRM_BLEND_OP_NOT_DEFINED 0
+#define SDE_DRM_BLEND_OP_OPAQUE 1
+#define SDE_DRM_BLEND_OP_PREMULTIPLIED 2
+#define SDE_DRM_BLEND_OP_COVERAGE 3
+#define SDE_DRM_BLEND_OP_MAX 4
+
+/**
+ * Bit masks for "src_config" property
+ * construct bitmask via (1UL << SDE_DRM_<flag>)
+ */
+#define SDE_DRM_DEINTERLACE 0 /* Specifies interlaced input */
+
+/* DRM bitmasks are restricted to 0..63 */
+#define SDE_DRM_BITMASK_COUNT 64
+
+/**
+ * struct sde_drm_pix_ext_v1 - version 1 of pixel ext structure
+ * @num_ext_pxls_lr: Number of total horizontal pixels
+ * @num_ext_pxls_tb: Number of total vertical lines
+ * @left_ftch: Number of extra pixels to overfetch from left
+ * @right_ftch: Number of extra pixels to overfetch from right
+ * @top_ftch: Number of extra lines to overfetch from top
+ * @btm_ftch: Number of extra lines to overfetch from bottom
+ * @left_rpt: Number of extra pixels to repeat from left
+ * @right_rpt: Number of extra pixels to repeat from right
+ * @top_rpt: Number of extra lines to repeat from top
+ * @btm_rpt: Number of extra lines to repeat from bottom
+ */
+struct sde_drm_pix_ext_v1 {
+ /*
+ * Number of pixels ext in left, right, top and bottom direction
+ * for all color components.
+ */
+ int32_t num_ext_pxls_lr[SDE_MAX_PLANES];
+ int32_t num_ext_pxls_tb[SDE_MAX_PLANES];
+
+ /*
+ * Number of pixels needs to be overfetched in left, right, top
+ * and bottom directions from source image for scaling.
+ */
+ int32_t left_ftch[SDE_MAX_PLANES];
+ int32_t right_ftch[SDE_MAX_PLANES];
+ int32_t top_ftch[SDE_MAX_PLANES];
+ int32_t btm_ftch[SDE_MAX_PLANES];
+ /*
+ * Number of pixels needs to be repeated in left, right, top and
+ * bottom directions for scaling.
+ */
+ int32_t left_rpt[SDE_MAX_PLANES];
+ int32_t right_rpt[SDE_MAX_PLANES];
+ int32_t top_rpt[SDE_MAX_PLANES];
+ int32_t btm_rpt[SDE_MAX_PLANES];
+
+};
+
+/**
+ * struct sde_drm_scaler_v1 - version 1 of struct sde_drm_scaler
+ * @lr: Pixel extension settings for left/right
+ * @tb: Pixel extension settings for top/botton
+ * @init_phase_x: Initial scaler phase values for x
+ * @phase_step_x: Phase step values for x
+ * @init_phase_y: Initial scaler phase values for y
+ * @phase_step_y: Phase step values for y
+ * @horz_filter: Horizontal filter array
+ * @vert_filter: Vertical filter array
+ */
+struct sde_drm_scaler_v1 {
+ /*
+ * Pix ext settings
+ */
+ struct sde_drm_pix_ext_v1 pe;
+ /*
+ * Phase settings
+ */
+ int32_t init_phase_x[SDE_MAX_PLANES];
+ int32_t phase_step_x[SDE_MAX_PLANES];
+ int32_t init_phase_y[SDE_MAX_PLANES];
+ int32_t phase_step_y[SDE_MAX_PLANES];
+
+ /*
+ * Filter type to be used for scaling in horizontal and vertical
+ * directions
+ */
+ uint32_t horz_filter[SDE_MAX_PLANES];
+ uint32_t vert_filter[SDE_MAX_PLANES];
+};
+
+/**
+ * struct sde_drm_de_v1 - version 1 of detail enhancer structure
+ * @enable: Enables/disables detail enhancer
+ * @sharpen_level1: Sharpening strength for noise
+ * @sharpen_level2: Sharpening strength for context
+ * @clip: Clip coefficient
+ * @limit: Detail enhancer limit factor
+ * @thr_quiet: Quite zone threshold
+ * @thr_dieout: Die-out zone threshold
+ * @thr_low: Linear zone left threshold
+ * @thr_high: Linear zone right threshold
+ * @prec_shift: Detail enhancer precision
+ * @adjust_a: Mapping curves A coefficients
+ * @adjust_b: Mapping curves B coefficients
+ * @adjust_c: Mapping curves C coefficients
+ */
+struct sde_drm_de_v1 {
+ uint32_t enable;
+ int16_t sharpen_level1;
+ int16_t sharpen_level2;
+ uint16_t clip;
+ uint16_t limit;
+ uint16_t thr_quiet;
+ uint16_t thr_dieout;
+ uint16_t thr_low;
+ uint16_t thr_high;
+ uint16_t prec_shift;
+ int16_t adjust_a[SDE_MAX_DE_CURVES];
+ int16_t adjust_b[SDE_MAX_DE_CURVES];
+ int16_t adjust_c[SDE_MAX_DE_CURVES];
+};
+
+/**
+ * struct sde_drm_scaler_v2 - version 2 of struct sde_drm_scaler
+ * @enable: Scaler enable
+ * @dir_en: Detail enhancer enable
+ * @pe: Pixel extension settings
+ * @horz_decimate: Horizontal decimation factor
+ * @vert_decimate: Vertical decimation factor
+ * @init_phase_x: Initial scaler phase values for x
+ * @phase_step_x: Phase step values for x
+ * @init_phase_y: Initial scaler phase values for y
+ * @phase_step_y: Phase step values for y
+ * @preload_x: Horizontal preload value
+ * @preload_y: Vertical preload value
+ * @src_width: Source width
+ * @src_height: Source height
+ * @dst_width: Destination width
+ * @dst_height: Destination height
+ * @y_rgb_filter_cfg: Y/RGB plane filter configuration
+ * @uv_filter_cfg: UV plane filter configuration
+ * @alpha_filter_cfg: Alpha filter configuration
+ * @blend_cfg: Selection of blend coefficients
+ * @lut_flag: LUT configuration flags
+ * @dir_lut_idx: 2d 4x4 LUT index
+ * @y_rgb_cir_lut_idx: Y/RGB circular LUT index
+ * @uv_cir_lut_idx: UV circular LUT index
+ * @y_rgb_sep_lut_idx: Y/RGB separable LUT index
+ * @uv_sep_lut_idx: UV separable LUT index
+ * @de: Detail enhancer settings
+*/
+struct sde_drm_scaler_v2 {
+ /*
+ * General definitions
+ */
+ uint32_t enable;
+ uint32_t dir_en;
+
+ /*
+ * Pix ext settings
+ */
+ struct sde_drm_pix_ext_v1 pe;
+
+ /*
+ * Decimation settings
+ */
+ uint32_t horz_decimate;
+ uint32_t vert_decimate;
+
+ /*
+ * Phase settings
+ */
+ int32_t init_phase_x[SDE_MAX_PLANES];
+ int32_t phase_step_x[SDE_MAX_PLANES];
+ int32_t init_phase_y[SDE_MAX_PLANES];
+ int32_t phase_step_y[SDE_MAX_PLANES];
+
+ uint32_t preload_x[SDE_MAX_PLANES];
+ uint32_t preload_y[SDE_MAX_PLANES];
+ uint32_t src_width[SDE_MAX_PLANES];
+ uint32_t src_height[SDE_MAX_PLANES];
+
+ uint32_t dst_width;
+ uint32_t dst_height;
+
+ uint32_t y_rgb_filter_cfg;
+ uint32_t uv_filter_cfg;
+ uint32_t alpha_filter_cfg;
+ uint32_t blend_cfg;
+
+ uint32_t lut_flag;
+ uint32_t dir_lut_idx;
+
+ /* for Y(RGB) and UV planes*/
+ uint32_t y_rgb_cir_lut_idx;
+ uint32_t uv_cir_lut_idx;
+ uint32_t y_rgb_sep_lut_idx;
+ uint32_t uv_sep_lut_idx;
+
+ /*
+ * Detail enhancer settings
+ */
+ struct sde_drm_de_v1 de;
+};
+
+
+/*
+ * Define constants for struct sde_drm_csc
+ */
+#define SDE_CSC_MATRIX_COEFF_SIZE 9
+#define SDE_CSC_CLAMP_SIZE 6
+#define SDE_CSC_BIAS_SIZE 3
+
+/**
+ * struct sde_drm_csc_v1 - version 1 of struct sde_drm_csc
+ * @ctm_coeff: Matrix coefficients, in S31.32 format
+ * @pre_bias: Pre-bias array values
+ * @post_bias: Post-bias array values
+ * @pre_clamp: Pre-clamp array values
+ * @post_clamp: Post-clamp array values
+ */
+struct sde_drm_csc_v1 {
+ int64_t ctm_coeff[SDE_CSC_MATRIX_COEFF_SIZE];
+ uint32_t pre_bias[SDE_CSC_BIAS_SIZE];
+ uint32_t post_bias[SDE_CSC_BIAS_SIZE];
+ uint32_t pre_clamp[SDE_CSC_CLAMP_SIZE];
+ uint32_t post_clamp[SDE_CSC_CLAMP_SIZE];
+};
+
+/* Writeback Config version definition */
+#define SDE_DRM_WB_CFG 0x1
+
+/* SDE_DRM_WB_CONFIG_FLAGS - Writeback configuration flags */
+#define SDE_DRM_WB_CFG_FLAGS_CONNECTED (1<<0)
+
+/**
+ * struct sde_drm_wb_cfg - Writeback configuration structure
+ * @flags: see DRM_MSM_WB_CONFIG_FLAGS
+ * @connector_id: writeback connector identifier
+ * @count_modes: Count of modes in modes_ptr
+ * @modes: Pointer to struct drm_mode_modeinfo
+ */
+struct sde_drm_wb_cfg {
+ uint32_t flags;
+ uint32_t connector_id;
+ uint32_t count_modes;
+ uint64_t modes;
+};
+
+#endif /* _SDE_DRM_H_ */
diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h
index 41420e341e75..51f891fb1b18 100644
--- a/include/uapi/linux/android/binder.h
+++ b/include/uapi/linux/android/binder.h
@@ -33,6 +33,8 @@ enum {
BINDER_TYPE_HANDLE = B_PACK_CHARS('s', 'h', '*', B_TYPE_LARGE),
BINDER_TYPE_WEAK_HANDLE = B_PACK_CHARS('w', 'h', '*', B_TYPE_LARGE),
BINDER_TYPE_FD = B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE),
+ BINDER_TYPE_FDA = B_PACK_CHARS('f', 'd', 'a', B_TYPE_LARGE),
+ BINDER_TYPE_PTR = B_PACK_CHARS('p', 't', '*', B_TYPE_LARGE),
};
enum {
@@ -48,6 +50,14 @@ typedef __u64 binder_size_t;
typedef __u64 binder_uintptr_t;
#endif
+/**
+ * struct binder_object_header - header shared by all binder metadata objects.
+ * @type: type of the object
+ */
+struct binder_object_header {
+ __u32 type;
+};
+
/*
* This is the flattened representation of a Binder object for transfer
* between processes. The 'offsets' supplied as part of a binder transaction
@@ -56,9 +66,8 @@ typedef __u64 binder_uintptr_t;
* between processes.
*/
struct flat_binder_object {
- /* 8 bytes for large_flat_header. */
- __u32 type;
- __u32 flags;
+ struct binder_object_header hdr;
+ __u32 flags;
/* 8 bytes of data. */
union {
@@ -70,6 +79,84 @@ struct flat_binder_object {
binder_uintptr_t cookie;
};
+/**
+ * struct binder_fd_object - describes a filedescriptor to be fixed up.
+ * @hdr: common header structure
+ * @pad_flags: padding to remain compatible with old userspace code
+ * @pad_binder: padding to remain compatible with old userspace code
+ * @fd: file descriptor
+ * @cookie: opaque data, used by user-space
+ */
+struct binder_fd_object {
+ struct binder_object_header hdr;
+ __u32 pad_flags;
+ union {
+ binder_uintptr_t pad_binder;
+ __u32 fd;
+ };
+
+ binder_uintptr_t cookie;
+};
+
+/* struct binder_buffer_object - object describing a userspace buffer
+ * @hdr: common header structure
+ * @flags: one or more BINDER_BUFFER_* flags
+ * @buffer: address of the buffer
+ * @length: length of the buffer
+ * @parent: index in offset array pointing to parent buffer
+ * @parent_offset: offset in @parent pointing to this buffer
+ *
+ * A binder_buffer object represents an object that the
+ * binder kernel driver can copy verbatim to the target
+ * address space. A buffer itself may be pointed to from
+ * within another buffer, meaning that the pointer inside
+ * that other buffer needs to be fixed up as well. This
+ * can be done by setting the BINDER_BUFFER_FLAG_HAS_PARENT
+ * flag in @flags, by setting @parent buffer to the index
+ * in the offset array pointing to the parent binder_buffer_object,
+ * and by setting @parent_offset to the offset in the parent buffer
+ * at which the pointer to this buffer is located.
+ */
+struct binder_buffer_object {
+ struct binder_object_header hdr;
+ __u32 flags;
+ binder_uintptr_t buffer;
+ binder_size_t length;
+ binder_size_t parent;
+ binder_size_t parent_offset;
+};
+
+enum {
+ BINDER_BUFFER_FLAG_HAS_PARENT = 0x01,
+};
+
+/* struct binder_fd_array_object - object describing an array of fds in a buffer
+ * @hdr: common header structure
+ * @num_fds: number of file descriptors in the buffer
+ * @parent: index in offset array to buffer holding the fd array
+ * @parent_offset: start offset of fd array in the buffer
+ *
+ * A binder_fd_array object represents an array of file
+ * descriptors embedded in a binder_buffer_object. It is
+ * different from a regular binder_buffer_object because it
+ * describes a list of file descriptors to fix up, not an opaque
+ * blob of memory, and hence the kernel needs to treat it differently.
+ *
+ * An example of how this would be used is with Android's
+ * native_handle_t object, which is a struct with a list of integers
+ * and a list of file descriptors. The native_handle_t struct itself
+ * will be represented by a struct binder_buffer_objct, whereas the
+ * embedded list of file descriptors is represented by a
+ * struct binder_fd_array_object with that binder_buffer_object as
+ * a parent.
+ */
+struct binder_fd_array_object {
+ struct binder_object_header hdr;
+ binder_size_t num_fds;
+ binder_size_t parent;
+ binder_size_t parent_offset;
+};
+
/*
* On 64-bit platforms where user code may run in 32-bits the driver must
* translate the buffer (and local binder) addresses appropriately.
@@ -162,6 +249,11 @@ struct binder_transaction_data {
} data;
};
+struct binder_transaction_data_sg {
+ struct binder_transaction_data transaction_data;
+ binder_size_t buffers_size;
+};
+
struct binder_ptr_cookie {
binder_uintptr_t ptr;
binder_uintptr_t cookie;
@@ -346,6 +438,12 @@ enum binder_driver_command_protocol {
/*
* void *: cookie
*/
+
+ BC_TRANSACTION_SG = _IOW('c', 17, struct binder_transaction_data_sg),
+ BC_REPLY_SG = _IOW('c', 18, struct binder_transaction_data_sg),
+ /*
+ * binder_transaction_data_sg: the sent command.
+ */
};
#endif /* _UAPI_LINUX_BINDER_H */
diff --git a/include/uapi/media/msm_vidc.h b/include/uapi/media/msm_vidc.h
index b259bdef8a93..e9370cb660b2 100644
--- a/include/uapi/media/msm_vidc.h
+++ b/include/uapi/media/msm_vidc.h
@@ -245,7 +245,7 @@ enum msm_vidc_extradata_type {
MSM_VIDC_EXTRADATA_DIGITAL_ZOOM = 0x07000010,
#define MSM_VIDC_EXTRADATA_VPX_COLORSPACE_INFO \
MSM_VIDC_EXTRADATA_VPX_COLORSPACE_INFO
- MSM_VIDC_EXTRADATA_VPX_COLORSPACE_INFO = 0x070000011,
+ MSM_VIDC_EXTRADATA_VPX_COLORSPACE_INFO = 0x00000014,
MSM_VIDC_EXTRADATA_MULTISLICE_INFO = 0x7F100000,
MSM_VIDC_EXTRADATA_NUM_CONCEALED_MB = 0x7F100001,
MSM_VIDC_EXTRADATA_INDEX = 0x7F100002,
diff --git a/include/uapi/sound/lsm_params.h b/include/uapi/sound/lsm_params.h
index eafdc117413a..9ca5930475ba 100644
--- a/include/uapi/sound/lsm_params.h
+++ b/include/uapi/sound/lsm_params.h
@@ -1,6 +1,9 @@
#ifndef _UAPI_LSM_PARAMS_H__
#define _UAPI_LSM_PARAMS_H__
+#define LSM_POLLING_ENABLE_SUPPORT
+#define LSM_EVENT_TIMESTAMP_MODE_SUPPORT
+
#include <linux/types.h>
#include <sound/asound.h>
@@ -18,6 +21,19 @@
#define LSM_OUT_TRANSFER_MODE_RT (0)
#define LSM_OUT_TRANSFER_MODE_FTRT (1)
+#define LSM_ENDPOINT_DETECT_THRESHOLD (0)
+#define LSM_OPERATION_MODE (1)
+#define LSM_GAIN (2)
+#define LSM_MIN_CONFIDENCE_LEVELS (3)
+#define LSM_REG_SND_MODEL (4)
+#define LSM_DEREG_SND_MODEL (5)
+#define LSM_CUSTOM_PARAMS (6)
+#define LSM_POLLING_ENABLE (7)
+#define LSM_PARAMS_MAX (LSM_POLLING_ENABLE + 1)
+
+#define LSM_EVENT_NON_TIME_STAMP_MODE (0)
+#define LSM_EVENT_TIME_STAMP_MODE (1)
+
enum lsm_app_id {
LSM_VOICE_WAKEUP_APP_ID = 1,
LSM_VOICE_WAKEUP_APP_ID_V2 = 2,
@@ -35,18 +51,6 @@ enum lsm_vw_status {
LSM_VOICE_WAKEUP_STATUS_REJECTED
};
-enum LSM_PARAM_TYPE {
- LSM_ENDPOINT_DETECT_THRESHOLD = 0,
- LSM_OPERATION_MODE,
- LSM_GAIN,
- LSM_MIN_CONFIDENCE_LEVELS,
- LSM_REG_SND_MODEL,
- LSM_DEREG_SND_MODEL,
- LSM_CUSTOM_PARAMS,
- /* driver ioctl will parse only so many params */
- LSM_PARAMS_MAX,
-};
-
/*
* Data for LSM_ENDPOINT_DETECT_THRESHOLD param_type
* @epd_begin: Begin threshold
@@ -75,6 +79,14 @@ struct snd_lsm_gain {
__u16 gain;
};
+/*
+ * Data for LSM_POLLING_ENABLE param_type
+ * @poll_en: Polling enable or disable
+ */
+struct snd_lsm_poll_enable {
+ bool poll_en;
+};
+
struct snd_lsm_sound_model_v2 {
__u8 __user *data;
@@ -95,11 +107,20 @@ struct snd_lsm_event_status {
__u8 payload[0];
};
+struct snd_lsm_event_status_v3 {
+ __u32 timestamp_lsw;
+ __u32 timestamp_msw;
+ __u16 status;
+ __u16 payload_size;
+ __u8 payload[0];
+};
+
struct snd_lsm_detection_params {
__u8 *conf_level;
enum lsm_detection_mode detect_mode;
__u8 num_confidence_levels;
bool detect_failure;
+ bool poll_enable;
};
/*
@@ -122,7 +143,7 @@ struct lsm_params_info {
__u32 param_id;
__u32 param_size;
__u8 __user *param_data;
- enum LSM_PARAM_TYPE param_type;
+ uint32_t param_type;
};
/*
@@ -171,5 +192,9 @@ struct snd_lsm_output_format_cfg {
struct snd_lsm_module_params)
#define SNDRV_LSM_OUT_FORMAT_CFG _IOW('U', 0x0C, \
struct snd_lsm_output_format_cfg)
+#define SNDRV_LSM_SET_PORT _IO('U', 0x0D)
+#define SNDRV_LSM_SET_FWK_MODE_CONFIG _IOW('U', 0x0E, uint32_t)
+#define SNDRV_LSM_EVENT_STATUS_V3 _IOW('U', 0x0F, \
+ struct snd_lsm_event_status_v3)
#endif
diff --git a/kernel/sched/boost.c b/kernel/sched/boost.c
index fcfda385b74a..5bdd51b1e55e 100644
--- a/kernel/sched/boost.c
+++ b/kernel/sched/boost.c
@@ -156,9 +156,6 @@ void sched_boost_parse_dt(void)
struct device_node *sn;
const char *boost_policy;
- if (!sched_enable_hmp)
- return;
-
sn = of_find_node_by_path("/sched-hmp");
if (!sn)
return;
@@ -175,9 +172,6 @@ int sched_set_boost(int type)
{
int ret = 0;
- if (!sched_enable_hmp)
- return -EINVAL;
-
mutex_lock(&boost_mutex);
if (verify_boost_params(sysctl_sched_boost, type))
@@ -197,9 +191,6 @@ int sched_boost_handler(struct ctl_table *table, int write,
unsigned int *data = (unsigned int *)table->data;
unsigned int old_val;
- if (!sched_enable_hmp)
- return -EINVAL;
-
mutex_lock(&boost_mutex);
old_val = *data;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 519aee32e122..3fcadbae663d 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3025,8 +3025,9 @@ void sched_exec(void)
unsigned long flags;
int dest_cpu, curr_cpu;
- if (sched_enable_hmp)
- return;
+#ifdef CONFIG_SCHED_HMP
+ return;
+#endif
raw_spin_lock_irqsave(&p->pi_lock, flags);
curr_cpu = task_cpu(p);
@@ -8215,8 +8216,9 @@ void __init sched_init(void)
int i, j;
unsigned long alloc_size = 0, ptr;
- if (sched_enable_hmp)
- pr_info("HMP scheduling enabled.\n");
+#ifdef CONFIG_SCHED_HMP
+ pr_info("HMP scheduling enabled.\n");
+#endif
BUG_ON(num_possible_cpus() > BITS_PER_LONG);
@@ -8362,6 +8364,7 @@ void __init sched_init(void)
rq->cluster = &init_cluster;
rq->curr_runnable_sum = rq->prev_runnable_sum = 0;
rq->nt_curr_runnable_sum = rq->nt_prev_runnable_sum = 0;
+ memset(&rq->grp_time, 0, sizeof(struct group_cpu_time));
rq->old_busy_time = 0;
rq->old_estimated_time = 0;
rq->old_busy_time_group = 0;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index ac4c3f1d144a..6f68b0e19c4a 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3332,9 +3332,9 @@ void _inc_hmp_sched_stats_fair(struct rq *rq,
* inc/dec_nr_big_task and inc/dec_cumulative_runnable_avg called
* from inc_cfs_rq_hmp_stats() have similar checks), we gain a bit on
* efficiency by short-circuiting for_each_sched_entity() loop when
- * !sched_enable_hmp || sched_disable_window_stats
+ * sched_disable_window_stats
*/
- if (!sched_enable_hmp || sched_disable_window_stats)
+ if (sched_disable_window_stats)
return;
for_each_sched_entity(se) {
@@ -3357,7 +3357,7 @@ _dec_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p, int change_cra)
struct sched_entity *se = &p->se;
/* See comment on efficiency in _inc_hmp_sched_stats_fair */
- if (!sched_enable_hmp || sched_disable_window_stats)
+ if (sched_disable_window_stats)
return;
for_each_sched_entity(se) {
@@ -3482,8 +3482,7 @@ static inline int migration_needed(struct task_struct *p, int cpu)
int nice;
struct related_thread_group *grp;
- if (!sched_enable_hmp || p->state != TASK_RUNNING ||
- p->nr_cpus_allowed == 1)
+ if (p->state != TASK_RUNNING || p->nr_cpus_allowed == 1)
return 0;
/* No need to migrate task that is about to be throttled */
@@ -7024,8 +7023,9 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
int want_affine = 0;
int sync = wake_flags & WF_SYNC;
- if (sched_enable_hmp)
- return select_best_cpu(p, prev_cpu, 0, sync);
+#ifdef CONFIG_SCHED_HMP
+ return select_best_cpu(p, prev_cpu, 0, sync);
+#endif
if (sd_flag & SD_BALANCE_WAKE)
want_affine = (!wake_wide(p) && task_fits_max(p, cpu) &&
@@ -9313,8 +9313,9 @@ static struct rq *find_busiest_queue(struct lb_env *env,
unsigned long busiest_load = 0, busiest_capacity = 1;
int i;
- if (sched_enable_hmp)
- return find_busiest_queue_hmp(env, group);
+#ifdef CONFIG_SCHED_HMP
+ return find_busiest_queue_hmp(env, group);
+#endif
for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
unsigned long capacity, wl;
@@ -10120,8 +10121,9 @@ static inline int find_new_ilb(int type)
{
int ilb;
- if (sched_enable_hmp)
- return find_new_hmp_ilb(type);
+#ifdef CONFIG_SCHED_HMP
+ return find_new_hmp_ilb(type);
+#endif
ilb = cpumask_first(nohz.idle_cpus_mask);
@@ -10496,8 +10498,9 @@ static inline int _nohz_kick_needed(struct rq *rq, int cpu, int *type)
if (likely(!atomic_read(&nohz.nr_cpus)))
return 0;
- if (sched_enable_hmp)
- return _nohz_kick_needed_hmp(rq, cpu, type);
+#ifdef CONFIG_SCHED_HMP
+ return _nohz_kick_needed_hmp(rq, cpu, type);
+#endif
if (time_before(now, nohz.next_balance))
return 0;
diff --git a/kernel/sched/hmp.c b/kernel/sched/hmp.c
index 95125c5518e2..1c0defb34ae1 100644
--- a/kernel/sched/hmp.c
+++ b/kernel/sched/hmp.c
@@ -28,8 +28,7 @@ const char *task_event_names[] = {"PUT_PREV_TASK", "PICK_NEXT_TASK",
"TASK_WAKE", "TASK_MIGRATE", "TASK_UPDATE",
"IRQ_UPDATE"};
-const char *migrate_type_names[] = {"GROUP_TO_RQ", "RQ_TO_GROUP",
- "RQ_TO_RQ", "GROUP_TO_GROUP"};
+const char *migrate_type_names[] = {"GROUP_TO_RQ", "RQ_TO_GROUP"};
static ktime_t ktime_last;
static bool sched_ktime_suspended;
@@ -616,19 +615,6 @@ int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb)
return 0;
}
-int __init set_sched_enable_hmp(char *str)
-{
- int enable_hmp = 0;
-
- get_option(&str, &enable_hmp);
-
- sched_enable_hmp = !!enable_hmp;
-
- return 0;
-}
-
-early_param("sched_enable_hmp", set_sched_enable_hmp);
-
/* Clear any HMP scheduler related requests pending from or on cpu */
void clear_hmp_request(int cpu)
{
@@ -870,9 +856,6 @@ unsigned int max_task_load(void)
return sched_ravg_window;
}
-/* Use this knob to turn on or off HMP-aware task placement logic */
-unsigned int __read_mostly sched_enable_hmp;
-
/* A cpu can no longer accommodate more tasks if:
*
* rq->nr_running > sysctl_sched_spill_nr_run ||
@@ -1245,7 +1228,7 @@ unlock:
void inc_nr_big_task(struct hmp_sched_stats *stats, struct task_struct *p)
{
- if (!sched_enable_hmp || sched_disable_window_stats)
+ if (sched_disable_window_stats)
return;
if (is_big_task(p))
@@ -1254,7 +1237,7 @@ void inc_nr_big_task(struct hmp_sched_stats *stats, struct task_struct *p)
void dec_nr_big_task(struct hmp_sched_stats *stats, struct task_struct *p)
{
- if (!sched_enable_hmp || sched_disable_window_stats)
+ if (sched_disable_window_stats)
return;
if (is_big_task(p))
@@ -1323,7 +1306,7 @@ void fixup_nr_big_tasks(struct hmp_sched_stats *stats,
u64 new_task_load;
u64 old_task_load;
- if (!sched_enable_hmp || sched_disable_window_stats)
+ if (sched_disable_window_stats)
return;
old_task_load = scale_load_to_cpu(task_load(p), task_cpu(p));
@@ -1433,9 +1416,6 @@ int sched_window_update_handler(struct ctl_table *table, int write,
unsigned int *data = (unsigned int *)table->data;
unsigned int old_val;
- if (!sched_enable_hmp)
- return -EINVAL;
-
mutex_lock(&policy_mutex);
old_val = *data;
@@ -1471,9 +1451,6 @@ int sched_hmp_proc_update_handler(struct ctl_table *table, int write,
unsigned int *data = (unsigned int *)table->data;
int update_task_count = 0;
- if (!sched_enable_hmp)
- return 0;
-
/*
* The policy mutex is acquired with cpu_hotplug.lock
* held from cpu_up()->cpufreq_governor_interactive()->
@@ -1713,45 +1690,19 @@ static inline unsigned int load_to_freq(struct rq *rq, u64 load)
return freq;
}
-static inline struct group_cpu_time *
-_group_cpu_time(struct related_thread_group *grp, int cpu);
-
-/*
- * Return load from all related group in given cpu.
- * Caller must ensure that related_thread_group_lock is held.
- */
-static void _group_load_in_cpu(int cpu, u64 *grp_load, u64 *new_grp_load)
-{
- struct related_thread_group *grp;
-
- for_each_related_thread_group(grp) {
- struct group_cpu_time *cpu_time;
-
- cpu_time = _group_cpu_time(grp, cpu);
- *grp_load += cpu_time->prev_runnable_sum;
- if (new_grp_load)
- *new_grp_load += cpu_time->nt_prev_runnable_sum;
- }
-}
-
/*
* Return load from all related groups in given frequency domain.
- * Caller must ensure that related_thread_group_lock is held.
*/
static void group_load_in_freq_domain(struct cpumask *cpus,
u64 *grp_load, u64 *new_grp_load)
{
- struct related_thread_group *grp;
int j;
- for_each_related_thread_group(grp) {
- for_each_cpu(j, cpus) {
- struct group_cpu_time *cpu_time;
+ for_each_cpu(j, cpus) {
+ struct rq *rq = cpu_rq(j);
- cpu_time = _group_cpu_time(grp, j);
- *grp_load += cpu_time->prev_runnable_sum;
- *new_grp_load += cpu_time->nt_prev_runnable_sum;
- }
+ *grp_load += rq->grp_time.prev_runnable_sum;
+ *new_grp_load += rq->grp_time.nt_prev_runnable_sum;
}
}
@@ -1776,9 +1727,6 @@ static int send_notification(struct rq *rq, int check_pred, int check_groups)
int rc = 0;
u64 group_load = 0, new_load = 0;
- if (!sched_enable_hmp)
- return 0;
-
if (check_pred) {
u64 prev = rq->old_busy_time;
u64 predicted = rq->hmp_stats.pred_demands_sum;
@@ -1796,20 +1744,18 @@ static int send_notification(struct rq *rq, int check_pred, int check_groups)
if (freq_required < cur_freq + sysctl_sched_pred_alert_freq)
return 0;
} else {
- read_lock_irqsave(&related_thread_group_lock, flags);
/*
* Protect from concurrent update of rq->prev_runnable_sum and
* group cpu load
*/
- raw_spin_lock(&rq->lock);
+ raw_spin_lock_irqsave(&rq->lock, flags);
if (check_groups)
- _group_load_in_cpu(cpu_of(rq), &group_load, NULL);
+ group_load = rq->grp_time.prev_runnable_sum;
new_load = rq->prev_runnable_sum + group_load;
new_load = freq_policy_load(rq, new_load);
- raw_spin_unlock(&rq->lock);
- read_unlock_irqrestore(&related_thread_group_lock, flags);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
cur_freq = load_to_freq(rq, rq->old_busy_time);
freq_required = load_to_freq(rq, new_load);
@@ -2283,6 +2229,31 @@ static void rollover_task_window(struct task_struct *p, bool full_window)
}
}
+static void rollover_cpu_window(struct rq *rq, bool full_window)
+{
+ u64 curr_sum = rq->curr_runnable_sum;
+ u64 nt_curr_sum = rq->nt_curr_runnable_sum;
+ u64 grp_curr_sum = rq->grp_time.curr_runnable_sum;
+ u64 grp_nt_curr_sum = rq->grp_time.nt_curr_runnable_sum;
+
+ if (unlikely(full_window)) {
+ curr_sum = 0;
+ nt_curr_sum = 0;
+ grp_curr_sum = 0;
+ grp_nt_curr_sum = 0;
+ }
+
+ rq->prev_runnable_sum = curr_sum;
+ rq->nt_prev_runnable_sum = nt_curr_sum;
+ rq->grp_time.prev_runnable_sum = grp_curr_sum;
+ rq->grp_time.nt_prev_runnable_sum = grp_nt_curr_sum;
+
+ rq->curr_runnable_sum = 0;
+ rq->nt_curr_runnable_sum = 0;
+ rq->grp_time.curr_runnable_sum = 0;
+ rq->grp_time.nt_curr_runnable_sum = 0;
+}
+
/*
* Account cpu activity in its busy time counters (rq->curr/prev_runnable_sum)
*/
@@ -2299,8 +2270,6 @@ static void update_cpu_busy_time(struct task_struct *p, struct rq *rq,
u64 *prev_runnable_sum = &rq->prev_runnable_sum;
u64 *nt_curr_runnable_sum = &rq->nt_curr_runnable_sum;
u64 *nt_prev_runnable_sum = &rq->nt_prev_runnable_sum;
- int flip_counters = 0;
- int prev_sum_reset = 0;
bool new_task;
struct related_thread_group *grp;
int cpu = rq->cpu;
@@ -2315,51 +2284,6 @@ static void update_cpu_busy_time(struct task_struct *p, struct rq *rq,
new_task = is_new_task(p);
- grp = p->grp;
- if (grp && sched_freq_aggregate) {
- /* cpu_time protected by rq_lock */
- struct group_cpu_time *cpu_time =
- _group_cpu_time(grp, cpu_of(rq));
-
- curr_runnable_sum = &cpu_time->curr_runnable_sum;
- prev_runnable_sum = &cpu_time->prev_runnable_sum;
-
- nt_curr_runnable_sum = &cpu_time->nt_curr_runnable_sum;
- nt_prev_runnable_sum = &cpu_time->nt_prev_runnable_sum;
-
- if (cpu_time->window_start != rq->window_start) {
- int nr_windows;
-
- delta = rq->window_start - cpu_time->window_start;
- nr_windows = div64_u64(delta, window_size);
- if (nr_windows > 1)
- prev_sum_reset = 1;
-
- cpu_time->window_start = rq->window_start;
- flip_counters = 1;
- }
-
- if (p_is_curr_task && new_window) {
- u64 curr_sum = rq->curr_runnable_sum;
- u64 nt_curr_sum = rq->nt_curr_runnable_sum;
-
- if (full_window)
- curr_sum = nt_curr_sum = 0;
-
- rq->prev_runnable_sum = curr_sum;
- rq->nt_prev_runnable_sum = nt_curr_sum;
-
- rq->curr_runnable_sum = 0;
- rq->nt_curr_runnable_sum = 0;
- }
- } else {
- if (p_is_curr_task && new_window) {
- flip_counters = 1;
- if (full_window)
- prev_sum_reset = 1;
- }
- }
-
/*
* Handle per-task window rollover. We don't care about the idle
* task or exiting tasks.
@@ -2369,26 +2293,25 @@ static void update_cpu_busy_time(struct task_struct *p, struct rq *rq,
rollover_task_window(p, full_window);
}
- if (flip_counters) {
- u64 curr_sum = *curr_runnable_sum;
- u64 nt_curr_sum = *nt_curr_runnable_sum;
+ if (p_is_curr_task && new_window) {
+ rollover_cpu_window(rq, full_window);
+ rollover_top_tasks(rq, full_window);
+ }
- if (prev_sum_reset)
- curr_sum = nt_curr_sum = 0;
+ if (!account_busy_for_cpu_time(rq, p, irqtime, event))
+ goto done;
- *prev_runnable_sum = curr_sum;
- *nt_prev_runnable_sum = nt_curr_sum;
+ grp = p->grp;
+ if (grp && sched_freq_aggregate) {
+ struct group_cpu_time *cpu_time = &rq->grp_time;
- *curr_runnable_sum = 0;
- *nt_curr_runnable_sum = 0;
+ curr_runnable_sum = &cpu_time->curr_runnable_sum;
+ prev_runnable_sum = &cpu_time->prev_runnable_sum;
- if (p_is_curr_task)
- rollover_top_tasks(rq, full_window);
+ nt_curr_runnable_sum = &cpu_time->nt_curr_runnable_sum;
+ nt_prev_runnable_sum = &cpu_time->nt_prev_runnable_sum;
}
- if (!account_busy_for_cpu_time(rq, p, irqtime, event))
- goto done;
-
if (!new_window) {
/*
* account_busy_for_cpu_time() = 1 so busy time needs
@@ -2905,7 +2828,7 @@ void update_task_ravg(struct task_struct *p, struct rq *rq, int event,
done:
trace_sched_update_task_ravg(p, rq, event, wallclock, irqtime,
rq->cc.cycles, rq->cc.time,
- _group_cpu_time(p->grp, cpu_of(rq)));
+ p->grp ? &rq->grp_time : NULL);
p->ravg.mark_start = wallclock;
}
@@ -3012,7 +2935,7 @@ void set_window_start(struct rq *rq)
{
static int sync_cpu_available;
- if (rq->window_start || !sched_enable_hmp)
+ if (rq->window_start)
return;
if (!sync_cpu_available) {
@@ -3063,7 +2986,6 @@ void reset_all_window_stats(u64 window_start, unsigned int window_size)
u64 start_ts = sched_ktime_clock();
int reason = WINDOW_CHANGE;
unsigned int old = 0, new = 0;
- struct related_thread_group *grp;
local_irq_save(flags);
@@ -3081,19 +3003,6 @@ void reset_all_window_stats(u64 window_start, unsigned int window_size)
read_unlock(&tasklist_lock);
- list_for_each_entry(grp, &active_related_thread_groups, list) {
- int j;
-
- for_each_possible_cpu(j) {
- struct group_cpu_time *cpu_time;
- /* Protected by rq lock */
- cpu_time = _group_cpu_time(grp, j);
- memset(cpu_time, 0, sizeof(struct group_cpu_time));
- if (window_start)
- cpu_time->window_start = window_start;
- }
- }
-
if (window_size) {
sched_ravg_window = window_size * TICK_NSEC;
set_hmp_defaults();
@@ -3109,6 +3018,7 @@ void reset_all_window_stats(u64 window_start, unsigned int window_size)
rq->window_start = window_start;
rq->curr_runnable_sum = rq->prev_runnable_sum = 0;
rq->nt_curr_runnable_sum = rq->nt_prev_runnable_sum = 0;
+ memset(&rq->grp_time, 0, sizeof(struct group_cpu_time));
for (i = 0; i < NUM_TRACKED_WINDOWS; i++) {
memset(&rq->load_subs[i], 0,
sizeof(struct load_subtractions));
@@ -3204,9 +3114,6 @@ static inline u64 freq_policy_load(struct rq *rq, u64 load)
return load;
}
-static inline void
-sync_window_start(struct rq *rq, struct group_cpu_time *cpu_time);
-
void sched_get_cpus_busy(struct sched_load *busy,
const struct cpumask *query_cpus)
{
@@ -3223,7 +3130,6 @@ void sched_get_cpus_busy(struct sched_load *busy,
unsigned int window_size;
u64 max_prev_sum = 0;
int max_busy_cpu = cpumask_first(query_cpus);
- struct related_thread_group *grp;
u64 total_group_load = 0, total_ngload = 0;
bool aggregate_load = false;
struct sched_cluster *cluster = cpu_cluster(cpumask_first(query_cpus));
@@ -3233,8 +3139,6 @@ void sched_get_cpus_busy(struct sched_load *busy,
local_irq_save(flags);
- read_lock(&related_thread_group_lock);
-
/*
* This function could be called in timer context, and the
* current task may have been executing for a long time. Ensure
@@ -3287,15 +3191,6 @@ void sched_get_cpus_busy(struct sched_load *busy,
raw_spin_unlock(&cluster->load_lock);
- for_each_related_thread_group(grp) {
- for_each_cpu(cpu, query_cpus) {
- /* Protected by rq_lock */
- struct group_cpu_time *cpu_time =
- _group_cpu_time(grp, cpu);
- sync_window_start(cpu_rq(cpu), cpu_time);
- }
- }
-
group_load_in_freq_domain(
&cpu_rq(max_busy_cpu)->freq_domain_cpumask,
&total_group_load, &total_ngload);
@@ -3316,7 +3211,8 @@ void sched_get_cpus_busy(struct sched_load *busy,
ngload[i] = total_ngload;
}
} else {
- _group_load_in_cpu(cpu, &group_load[i], &ngload[i]);
+ group_load[i] = rq->grp_time.prev_runnable_sum;
+ ngload[i] = rq->grp_time.nt_prev_runnable_sum;
}
load[i] += group_load[i];
@@ -3341,8 +3237,6 @@ skip_early:
for_each_cpu(cpu, query_cpus)
raw_spin_unlock(&(cpu_rq(cpu))->lock);
- read_unlock(&related_thread_group_lock);
-
local_irq_restore(flags);
i = 0;
@@ -3620,7 +3514,7 @@ void fixup_busy_time(struct task_struct *p, int new_cpu)
bool new_task;
struct related_thread_group *grp;
- if (!sched_enable_hmp || (!p->on_rq && p->state != TASK_WAKING))
+ if (!p->on_rq && p->state != TASK_WAKING)
return;
if (exiting_task(p)) {
@@ -3659,18 +3553,17 @@ void fixup_busy_time(struct task_struct *p, int new_cpu)
if (grp && sched_freq_aggregate) {
struct group_cpu_time *cpu_time;
- cpu_time = _group_cpu_time(grp, cpu_of(src_rq));
+ cpu_time = &src_rq->grp_time;
src_curr_runnable_sum = &cpu_time->curr_runnable_sum;
src_prev_runnable_sum = &cpu_time->prev_runnable_sum;
src_nt_curr_runnable_sum = &cpu_time->nt_curr_runnable_sum;
src_nt_prev_runnable_sum = &cpu_time->nt_prev_runnable_sum;
- cpu_time = _group_cpu_time(grp, cpu_of(dest_rq));
+ cpu_time = &dest_rq->grp_time;
dst_curr_runnable_sum = &cpu_time->curr_runnable_sum;
dst_prev_runnable_sum = &cpu_time->prev_runnable_sum;
dst_nt_curr_runnable_sum = &cpu_time->nt_curr_runnable_sum;
dst_nt_prev_runnable_sum = &cpu_time->nt_prev_runnable_sum;
- sync_window_start(dest_rq, cpu_time);
if (p->ravg.curr_window) {
*src_curr_runnable_sum -= p->ravg.curr_window;
@@ -3799,61 +3692,6 @@ void set_preferred_cluster(struct related_thread_group *grp)
#define DEFAULT_CGROUP_COLOC_ID 1
-static inline void free_group_cputime(struct related_thread_group *grp)
-{
- free_percpu(grp->cpu_time);
-}
-
-static int alloc_group_cputime(struct related_thread_group *grp)
-{
- int i;
- struct group_cpu_time *cpu_time;
- int cpu = raw_smp_processor_id();
- struct rq *rq = cpu_rq(cpu);
- u64 window_start = rq->window_start;
-
- grp->cpu_time = alloc_percpu_gfp(struct group_cpu_time, GFP_ATOMIC);
- if (!grp->cpu_time)
- return -ENOMEM;
-
- for_each_possible_cpu(i) {
- cpu_time = per_cpu_ptr(grp->cpu_time, i);
- memset(cpu_time, 0, sizeof(struct group_cpu_time));
- cpu_time->window_start = window_start;
- }
-
- return 0;
-}
-
-/*
- * A group's window_start may be behind. When moving it forward, flip prev/curr
- * counters. When moving forward > 1 window, prev counter is set to 0
- */
-static inline void
-sync_window_start(struct rq *rq, struct group_cpu_time *cpu_time)
-{
- u64 delta;
- int nr_windows;
- u64 curr_sum = cpu_time->curr_runnable_sum;
- u64 nt_curr_sum = cpu_time->nt_curr_runnable_sum;
-
- delta = rq->window_start - cpu_time->window_start;
- if (!delta)
- return;
-
- nr_windows = div64_u64(delta, sched_ravg_window);
- if (nr_windows > 1)
- curr_sum = nt_curr_sum = 0;
-
- cpu_time->prev_runnable_sum = curr_sum;
- cpu_time->curr_runnable_sum = 0;
-
- cpu_time->nt_prev_runnable_sum = nt_curr_sum;
- cpu_time->nt_curr_runnable_sum = 0;
-
- cpu_time->window_start = rq->window_start;
-}
-
/*
* Task's cpu usage is accounted in:
* rq->curr/prev_runnable_sum, when its ->grp is NULL
@@ -3871,7 +3709,6 @@ static void transfer_busy_time(struct rq *rq, struct related_thread_group *grp,
u64 *src_prev_runnable_sum, *dst_prev_runnable_sum;
u64 *src_nt_curr_runnable_sum, *dst_nt_curr_runnable_sum;
u64 *src_nt_prev_runnable_sum, *dst_nt_prev_runnable_sum;
- struct migration_sum_data d;
int migrate_type;
int cpu = cpu_of(rq);
bool new_task;
@@ -3886,15 +3723,10 @@ static void transfer_busy_time(struct rq *rq, struct related_thread_group *grp,
update_task_ravg(p, rq, TASK_UPDATE, wallclock, 0);
new_task = is_new_task(p);
- /* cpu_time protected by related_thread_group_lock, grp->lock rq_lock */
- cpu_time = _group_cpu_time(grp, cpu);
+ cpu_time = &rq->grp_time;
if (event == ADD_TASK) {
- sync_window_start(rq, cpu_time);
migrate_type = RQ_TO_GROUP;
- d.src_rq = rq;
- d.src_cpu_time = NULL;
- d.dst_rq = NULL;
- d.dst_cpu_time = cpu_time;
+
src_curr_runnable_sum = &rq->curr_runnable_sum;
dst_curr_runnable_sum = &cpu_time->curr_runnable_sum;
src_prev_runnable_sum = &rq->prev_runnable_sum;
@@ -3919,17 +3751,7 @@ static void transfer_busy_time(struct rq *rq, struct related_thread_group *grp,
} else {
migrate_type = GROUP_TO_RQ;
- d.src_rq = NULL;
- d.src_cpu_time = cpu_time;
- d.dst_rq = rq;
- d.dst_cpu_time = NULL;
- /*
- * In case of REM_TASK, cpu_time->window_start would be
- * uptodate, because of the update_task_ravg() we called
- * above on the moving task. Hence no need for
- * sync_window_start()
- */
src_curr_runnable_sum = &cpu_time->curr_runnable_sum;
dst_curr_runnable_sum = &rq->curr_runnable_sum;
src_prev_runnable_sum = &cpu_time->prev_runnable_sum;
@@ -3975,7 +3797,7 @@ static void transfer_busy_time(struct rq *rq, struct related_thread_group *grp,
p->ravg.curr_window_cpu[cpu] = p->ravg.curr_window;
p->ravg.prev_window_cpu[cpu] = p->ravg.prev_window;
- trace_sched_migration_update_sum(p, migrate_type, &d);
+ trace_sched_migration_update_sum(p, migrate_type, rq);
BUG_ON((s64)*src_curr_runnable_sum < 0);
BUG_ON((s64)*src_prev_runnable_sum < 0);
@@ -3983,18 +3805,6 @@ static void transfer_busy_time(struct rq *rq, struct related_thread_group *grp,
BUG_ON((s64)*src_nt_prev_runnable_sum < 0);
}
-static inline struct group_cpu_time *
-task_group_cpu_time(struct task_struct *p, int cpu)
-{
- return _group_cpu_time(rcu_dereference(p->grp), cpu);
-}
-
-static inline struct group_cpu_time *
-_group_cpu_time(struct related_thread_group *grp, int cpu)
-{
- return grp ? per_cpu_ptr(grp->cpu_time, cpu) : NULL;
-}
-
static inline struct related_thread_group*
lookup_related_thread_group(unsigned int group_id)
{
@@ -4014,12 +3824,6 @@ int alloc_related_thread_groups(void)
goto err;
}
- if (alloc_group_cputime(grp)) {
- kfree(grp);
- ret = -ENOMEM;
- goto err;
- }
-
grp->id = i;
INIT_LIST_HEAD(&grp->tasks);
INIT_LIST_HEAD(&grp->list);
@@ -4034,7 +3838,6 @@ err:
for (i = 1; i < MAX_NUM_CGROUP_COLOC_ID; i++) {
grp = lookup_related_thread_group(i);
if (grp) {
- free_group_cputime(grp);
kfree(grp);
related_thread_groups[i] = NULL;
} else {
@@ -4418,9 +4221,6 @@ static int register_sched_callback(void)
{
int ret;
- if (!sched_enable_hmp)
- return 0;
-
ret = cpufreq_register_notifier(&notifier_policy_block,
CPUFREQ_POLICY_NOTIFIER);
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 3fe00d6fa335..b72352bbd752 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1406,6 +1406,7 @@ static void yield_task_rt(struct rq *rq)
#ifdef CONFIG_SMP
static int find_lowest_rq(struct task_struct *task);
+#ifdef CONFIG_SCHED_HMP
static int
select_task_rq_rt_hmp(struct task_struct *p, int cpu, int sd_flag, int flags)
{
@@ -1419,6 +1420,7 @@ select_task_rq_rt_hmp(struct task_struct *p, int cpu, int sd_flag, int flags)
return cpu;
}
+#endif
static int
select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
@@ -1426,8 +1428,9 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
struct task_struct *curr;
struct rq *rq;
- if (sched_enable_hmp)
- return select_task_rq_rt_hmp(p, cpu, sd_flag, flags);
+#ifdef CONFIG_SCHED_HMP
+ return select_task_rq_rt_hmp(p, cpu, sd_flag, flags);
+#endif
/* For anything but wake ups, just return the task_cpu */
if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
@@ -1796,14 +1799,6 @@ static int find_lowest_rq_hmp(struct task_struct *task)
return best_cpu;
}
-
-#else /* CONFIG_SCHED_HMP */
-
-static int find_lowest_rq_hmp(struct task_struct *task)
-{
- return -1;
-}
-
#endif /* CONFIG_SCHED_HMP */
static int find_lowest_rq(struct task_struct *task)
@@ -1813,8 +1808,9 @@ static int find_lowest_rq(struct task_struct *task)
int this_cpu = smp_processor_id();
int cpu = task_cpu(task);
- if (sched_enable_hmp)
- return find_lowest_rq_hmp(task);
+#ifdef CONFIG_SCHED_HMP
+ return find_lowest_rq_hmp(task);
+#endif
/* Make sure the mask is initialized first */
if (unlikely(!lowest_mask))
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index d907eeb297a3..a91f4cc1d8d3 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -366,6 +366,13 @@ struct load_subtractions {
u64 new_subs;
};
+struct group_cpu_time {
+ u64 curr_runnable_sum;
+ u64 prev_runnable_sum;
+ u64 nt_curr_runnable_sum;
+ u64 nt_prev_runnable_sum;
+};
+
struct sched_cluster {
raw_spinlock_t load_lock;
struct list_head list;
@@ -407,12 +414,6 @@ struct related_thread_group {
struct sched_cluster *preferred_cluster;
struct rcu_head rcu;
u64 last_update;
- struct group_cpu_time __percpu *cpu_time; /* one per cluster */
-};
-
-struct migration_sum_data {
- struct rq *src_rq, *dst_rq;
- struct group_cpu_time *src_cpu_time, *dst_cpu_time;
};
extern struct list_head cluster_head;
@@ -776,6 +777,7 @@ struct rq {
u64 prev_runnable_sum;
u64 nt_curr_runnable_sum;
u64 nt_prev_runnable_sum;
+ struct group_cpu_time grp_time;
struct load_subtractions load_subs[NUM_TRACKED_WINDOWS];
DECLARE_BITMAP_ARRAY(top_tasks_bitmap,
NUM_TRACKED_WINDOWS, NUM_LOAD_INDICES);
@@ -1083,7 +1085,6 @@ enum sched_boost_policy {
extern struct mutex policy_mutex;
extern unsigned int sched_ravg_window;
extern unsigned int sched_disable_window_stats;
-extern unsigned int sched_enable_hmp;
extern unsigned int max_possible_freq;
extern unsigned int min_max_freq;
extern unsigned int pct_task_load(struct task_struct *p);
@@ -1127,7 +1128,6 @@ extern void update_cluster_topology(void);
extern void note_task_waking(struct task_struct *p, u64 wallclock);
extern void set_task_last_switch_out(struct task_struct *p, u64 wallclock);
extern void init_clusters(void);
-extern int __init set_sched_enable_hmp(char *str);
extern void reset_cpu_hmp_stats(int cpu, int reset_cra);
extern unsigned int max_task_load(void);
extern void sched_account_irqtime(int cpu, struct task_struct *curr,
@@ -1257,7 +1257,7 @@ inc_cumulative_runnable_avg(struct hmp_sched_stats *stats,
{
u32 task_load;
- if (!sched_enable_hmp || sched_disable_window_stats)
+ if (sched_disable_window_stats)
return;
task_load = sched_disable_window_stats ? 0 : p->ravg.demand;
@@ -1272,7 +1272,7 @@ dec_cumulative_runnable_avg(struct hmp_sched_stats *stats,
{
u32 task_load;
- if (!sched_enable_hmp || sched_disable_window_stats)
+ if (sched_disable_window_stats)
return;
task_load = sched_disable_window_stats ? 0 : p->ravg.demand;
@@ -1290,7 +1290,7 @@ fixup_cumulative_runnable_avg(struct hmp_sched_stats *stats,
struct task_struct *p, s64 task_load_delta,
s64 pred_demand_delta)
{
- if (!sched_enable_hmp || sched_disable_window_stats)
+ if (sched_disable_window_stats)
return;
stats->cumulative_runnable_avg += task_load_delta;
@@ -1350,14 +1350,6 @@ check_for_freq_change(struct rq *rq, bool check_pred, bool check_groups);
extern void notify_migration(int src_cpu, int dest_cpu,
bool src_cpu_dead, struct task_struct *p);
-struct group_cpu_time {
- u64 curr_runnable_sum;
- u64 prev_runnable_sum;
- u64 nt_curr_runnable_sum;
- u64 nt_prev_runnable_sum;
- u64 window_start;
-};
-
/* Is frequency of two cpus synchronized with each other? */
static inline int same_freq_domain(int src_cpu, int dst_cpu)
{
@@ -1667,7 +1659,6 @@ static inline int update_preferred_cluster(struct related_thread_group *grp,
static inline void add_new_task_to_grp(struct task_struct *new) {}
-#define sched_enable_hmp 0
#define PRED_DEMAND_DELTA (0)
static inline void
diff --git a/net/rmnet_data/rmnet_data_config.c b/net/rmnet_data/rmnet_data_config.c
index ebce455b645d..fb4c60fc2203 100644
--- a/net/rmnet_data/rmnet_data_config.c
+++ b/net/rmnet_data/rmnet_data_config.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -540,6 +540,11 @@ void rmnet_config_netlink_msg_handler(struct sk_buff *skb)
nlmsg_header = (struct nlmsghdr *) skb->data;
rmnet_header = (struct rmnet_nl_msg_s *) nlmsg_data(nlmsg_header);
+ if (!nlmsg_header->nlmsg_pid ||
+ (nlmsg_header->nlmsg_len < sizeof(struct nlmsghdr) +
+ sizeof(struct rmnet_nl_msg_s)))
+ return;
+
LOGL("Netlink message pid=%d, seq=%d, length=%d, rmnet_type=%d",
nlmsg_header->nlmsg_pid,
nlmsg_header->nlmsg_seq,
diff --git a/net/rmnet_data/rmnet_data_config.h b/net/rmnet_data/rmnet_data_config.h
index 78329c38b364..f19fbb378111 100644
--- a/net/rmnet_data/rmnet_data_config.h
+++ b/net/rmnet_data/rmnet_data_config.h
@@ -1,5 +1,6 @@
/*
- * Copyright (c) 2013-2014, 2016 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2014, 2016-2017 The Linux Foundation.
+ * All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -40,6 +41,7 @@ struct rmnet_logical_ep_conf_s {
uint8_t rmnet_mode;
uint8_t mux_id;
struct timespec flush_time;
+ unsigned int flush_byte_count;
struct net_device *egress_dev;
};
diff --git a/net/rmnet_data/rmnet_data_handlers.c b/net/rmnet_data/rmnet_data_handlers.c
index 35b94e9da0d9..cef9369eace5 100644
--- a/net/rmnet_data/rmnet_data_handlers.c
+++ b/net/rmnet_data/rmnet_data_handlers.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -51,6 +51,22 @@ long gro_flush_time __read_mostly = 10000L;
module_param(gro_flush_time, long, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(gro_flush_time, "Flush GRO when spaced more than this");
+unsigned int gro_min_byte_thresh __read_mostly = 7500;
+module_param(gro_min_byte_thresh, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(gro_min_byte_thresh, "Min byte thresh to change flush time");
+
+unsigned int dynamic_gro_on __read_mostly = 1;
+module_param(dynamic_gro_on, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(dynamic_gro_on, "Toggle to turn on dynamic gro logic");
+
+unsigned int upper_flush_time __read_mostly = 15000;
+module_param(upper_flush_time, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(upper_flush_time, "Upper limit on flush time");
+
+unsigned int upper_byte_limit __read_mostly = 10500;
+module_param(upper_byte_limit, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(upper_byte_limit, "Upper byte limit");
+
#define RMNET_DATA_IP_VERSION_4 0x40
#define RMNET_DATA_IP_VERSION_6 0x60
@@ -233,7 +249,8 @@ static int rmnet_check_skb_can_gro(struct sk_buff *skb)
* ratio.
*/
static void rmnet_optional_gro_flush(struct napi_struct *napi,
- struct rmnet_logical_ep_conf_s *ep)
+ struct rmnet_logical_ep_conf_s *ep,
+ unsigned int skb_size)
{
struct timespec curr_time, diff;
@@ -242,12 +259,58 @@ static void rmnet_optional_gro_flush(struct napi_struct *napi,
if (unlikely(ep->flush_time.tv_sec == 0)) {
getnstimeofday(&ep->flush_time);
+ ep->flush_byte_count = 0;
} else {
getnstimeofday(&(curr_time));
diff = timespec_sub(curr_time, ep->flush_time);
- if ((diff.tv_sec > 0) || (diff.tv_nsec > gro_flush_time)) {
+ ep->flush_byte_count += skb_size;
+
+ if (dynamic_gro_on) {
+ if ((!(diff.tv_sec > 0) || diff.tv_nsec <=
+ gro_flush_time) &&
+ ep->flush_byte_count >=
+ gro_min_byte_thresh) {
+ /* Processed many bytes in a small time window.
+ * No longer need to flush so often and we can
+ * increase our byte limit
+ */
+ gro_flush_time = upper_flush_time;
+ gro_min_byte_thresh = upper_byte_limit;
+ } else if ((diff.tv_sec > 0 ||
+ diff.tv_nsec > gro_flush_time) &&
+ ep->flush_byte_count <
+ gro_min_byte_thresh) {
+ /* We have not hit our time limit and we are not
+ * receive many bytes. Demote ourselves to the
+ * lowest limits and flush
+ */
+ napi_gro_flush(napi, false);
+ getnstimeofday(&ep->flush_time);
+ ep->flush_byte_count = 0;
+ gro_flush_time = 10000L;
+ gro_min_byte_thresh = 7500L;
+ } else if ((diff.tv_sec > 0 ||
+ diff.tv_nsec > gro_flush_time) &&
+ ep->flush_byte_count >=
+ gro_min_byte_thresh) {
+ /* Above byte and time limt, therefore we can
+ * move/maintain our limits to be the max
+ * and flush
+ */
+ napi_gro_flush(napi, false);
+ getnstimeofday(&ep->flush_time);
+ ep->flush_byte_count = 0;
+ gro_flush_time = upper_flush_time;
+ gro_min_byte_thresh = upper_byte_limit;
+ }
+ /* else, below time limit and below
+ * byte thresh, so change nothing
+ */
+ } else if (diff.tv_sec > 0 ||
+ diff.tv_nsec >= gro_flush_time) {
napi_gro_flush(napi, false);
getnstimeofday(&ep->flush_time);
+ ep->flush_byte_count = 0;
}
}
}
@@ -267,6 +330,7 @@ static rx_handler_result_t __rmnet_deliver_skb(struct sk_buff *skb,
{
struct napi_struct *napi = NULL;
gro_result_t gro_res;
+ unsigned int skb_size;
trace___rmnet_deliver_skb(skb);
switch (ep->rmnet_mode) {
@@ -290,9 +354,12 @@ static rx_handler_result_t __rmnet_deliver_skb(struct sk_buff *skb,
(skb->dev->features & NETIF_F_GRO)) {
napi = get_current_napi_context();
if (napi != NULL) {
+ skb_size = skb->len;
gro_res = napi_gro_receive(napi, skb);
trace_rmnet_gro_downlink(gro_res);
- rmnet_optional_gro_flush(napi, ep);
+ rmnet_optional_gro_flush(
+ napi, ep,
+ skb_size);
} else {
WARN_ONCE(1, "current napi is NULL\n");
netif_receive_skb(skb);
diff --git a/net/wireless/util.c b/net/wireless/util.c
index ef394e8a42bc..00f5cededd38 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -1080,7 +1080,7 @@ static u32 cfg80211_calculate_bitrate_vht(struct rate_info *rate)
58500000,
65000000,
78000000,
- 0,
+ 86500000,
},
{ 13500000,
27000000,
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index 328b4a43f09a..b73133885384 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -375,7 +375,8 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
* the elapsed time to detect xruns.
*/
jdelta = curr_jiffies - runtime->hw_ptr_jiffies;
- if (jdelta < runtime->hw_ptr_buffer_jiffies / 2)
+ if ((jdelta < runtime->hw_ptr_buffer_jiffies / 2) ||
+ (runtime->hw_ptr_buffer_jiffies <= 0))
goto no_delta_check;
hdelta = jdelta - delta * HZ / runtime->rate;
xrun_threshold = runtime->hw_ptr_buffer_jiffies / 2 + 1;
diff --git a/sound/soc/codecs/msm_sdw/msm-sdw-tables.c b/sound/soc/codecs/msm_sdw/msm-sdw-tables.c
index 767b9052a7da..4cbdb728ef41 100644
--- a/sound/soc/codecs/msm_sdw/msm-sdw-tables.c
+++ b/sound/soc/codecs/msm_sdw/msm-sdw-tables.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -105,6 +105,7 @@ const u8 msm_sdw_page_map[MSM_SDW_MAX_REGISTER] = {
[MSM_SDW_TOP_TOP_CFG1] = 0xd,
[MSM_SDW_TOP_RX_I2S_CTL] = 0xd,
[MSM_SDW_TOP_TX_I2S_CTL] = 0xd,
+ [MSM_SDW_TOP_I2S_CLK] = 0xd,
[MSM_SDW_TOP_RX7_PATH_INPUT0_MUX] = 0xd,
[MSM_SDW_TOP_RX7_PATH_INPUT1_MUX] = 0xd,
[MSM_SDW_TOP_RX8_PATH_INPUT0_MUX] = 0xd,
diff --git a/sound/soc/codecs/msm_sdw/msm_sdw.h b/sound/soc/codecs/msm_sdw/msm_sdw.h
index d464c5064635..8e7612c85455 100644
--- a/sound/soc/codecs/msm_sdw/msm_sdw.h
+++ b/sound/soc/codecs/msm_sdw/msm_sdw.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -100,6 +100,7 @@ struct msm_sdw_priv {
int (*multi_reg_write)(struct msm_sdw_priv *msm_sdw, const void *data,
size_t count);
struct snd_soc_codec *codec;
+ struct device_node *sdw_gpio_p; /* used by pinctrl API */
/* SoundWire data structure */
struct msm_sdw_ctrl_data *sdw_ctrl_data;
int nr;
@@ -120,6 +121,7 @@ struct msm_sdw_priv {
struct mutex sdw_write_lock;
struct mutex sdw_clk_lock;
int sdw_clk_users;
+ int sdw_mclk_users;
int sdw_irq;
int int_mclk1_rsc_ref;
diff --git a/sound/soc/codecs/msm_sdw/msm_sdw_cdc.c b/sound/soc/codecs/msm_sdw/msm_sdw_cdc.c
index c8f7b05aef87..850238764d87 100644
--- a/sound/soc/codecs/msm_sdw/msm_sdw_cdc.c
+++ b/sound/soc/codecs/msm_sdw/msm_sdw_cdc.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -14,6 +14,7 @@
#include <linux/io.h>
#include <linux/init.h>
#include <linux/platform_device.h>
+#include <linux/mfd/msm-cdc-pinctrl.h>
#include <linux/printk.h>
#include <linux/debugfs.h>
#include <linux/bitops.h>
@@ -50,13 +51,15 @@
static const DECLARE_TLV_DB_SCALE(digital_gain, 0, 1, 0);
static struct snd_soc_dai_driver msm_sdw_dai[];
static bool initial_boot = true;
+static bool is_ssr_en;
+static bool skip_irq = true;
static int msm_sdw_config_ear_spkr_gain(struct snd_soc_codec *codec,
int event, int gain_reg);
static int msm_sdw_config_compander(struct snd_soc_codec *, int, int);
-static int msm_sdw_mclk_enable(struct snd_soc_codec *codec,
+static int msm_sdw_mclk_enable(struct msm_sdw_priv *msm_sdw,
int mclk_enable, bool dapm);
-static int msm_int_enable_sdw_cdc_clk(struct snd_soc_codec *codec,
+static int msm_int_enable_sdw_cdc_clk(struct msm_sdw_priv *msm_sdw,
int enable, bool dapm);
enum {
@@ -199,39 +202,10 @@ static int msm_enable_sdw_npl_clk(struct msm_sdw_priv *msm_sdw, int enable)
return ret;
}
-/**
- * msm_sdw_gpio_cb - Register callback by machine driver for sdw gpio.
- *
- * @sdw_cdc_gpio_fn: Function pointer to trigger for enable/disable sdw gpios.
- * @codec: sdw codec instance.
- *
- */
-void msm_sdw_gpio_cb(
- int (*sdw_cdc_gpio_fn)(bool enable, struct snd_soc_codec *codec),
- struct snd_soc_codec *codec)
-{
- struct msm_sdw_priv *msm_sdw;
-
- if (!codec) {
- pr_err("%s:NULL codec pointer!\n", __func__);
- return;
- }
- msm_sdw = snd_soc_codec_get_drvdata(codec);
- msm_sdw->sdw_cdc_gpio_fn = sdw_cdc_gpio_fn;
-}
-EXPORT_SYMBOL(msm_sdw_gpio_cb);
-
-static int msm_int_enable_sdw_cdc_clk(struct snd_soc_codec *codec,
+static int msm_int_enable_sdw_cdc_clk(struct msm_sdw_priv *msm_sdw,
int enable, bool dapm)
{
int ret = 0;
- struct msm_sdw_priv *msm_sdw;
-
- if (!codec) {
- pr_err("%s:NULL codec pointer\n", __func__);
- return -EINVAL;
- }
- msm_sdw = snd_soc_codec_get_drvdata(codec);
mutex_lock(&msm_sdw->cdc_int_mclk1_mutex);
dev_dbg(msm_sdw->dev, "%s: enable %d mclk1 ref counter %d\n",
@@ -297,7 +271,6 @@ static void msm_disable_int_mclk1(struct work_struct *work)
dev_dbg(msm_sdw->dev, "%s: mclk1_enabled %d mclk1_rsc_ref %d\n",
__func__, msm_sdw->int_mclk1_enabled,
msm_sdw->int_mclk1_rsc_ref);
-
if (msm_sdw->int_mclk1_enabled == true
&& msm_sdw->int_mclk1_rsc_ref == 0) {
dev_dbg(msm_sdw->dev, "Disable the mclk1\n");
@@ -321,29 +294,23 @@ static int msm_int_mclk1_event(struct snd_soc_dapm_widget *w,
struct msm_sdw_priv *msm_sdw = snd_soc_codec_get_drvdata(codec);
int ret = 0;
- mutex_lock(&msm_sdw->cdc_int_mclk1_mutex);
dev_dbg(msm_sdw->dev, "%s: event = %d\n", __func__, event);
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
/* enable the codec mclk config */
- msm_int_enable_sdw_cdc_clk(codec, 1, true);
- msm_sdw_mclk_enable(codec, 1, true);
- if (msm_sdw->sdw_cdc_gpio_fn)
- msm_sdw->sdw_cdc_gpio_fn(true, codec);
+ msm_int_enable_sdw_cdc_clk(msm_sdw, 1, true);
+ msm_sdw_mclk_enable(msm_sdw, 1, true);
break;
case SND_SOC_DAPM_POST_PMD:
/* disable the codec mclk config */
- if (msm_sdw->sdw_cdc_gpio_fn)
- msm_sdw->sdw_cdc_gpio_fn(false, codec);
- msm_sdw_mclk_enable(codec, 0, true);
- msm_int_enable_sdw_cdc_clk(codec, 0, true);
+ msm_sdw_mclk_enable(msm_sdw, 0, true);
+ msm_int_enable_sdw_cdc_clk(msm_sdw, 0, true);
break;
default:
dev_err(msm_sdw->dev,
"%s: invalid DAPM event %d\n", __func__, event);
ret = -EINVAL;
}
- mutex_unlock(&msm_sdw->cdc_int_mclk1_mutex);
return ret;
}
@@ -353,7 +320,8 @@ static int msm_sdw_ahb_write_device(struct msm_sdw_priv *msm_sdw,
u32 temp = (u32)(*value) & 0x000000FF;
if (!msm_sdw->dev_up) {
- dev_dbg(msm_sdw->dev, "%s: q6 not ready\n", __func__);
+ dev_err_ratelimited(msm_sdw->dev, "%s: q6 not ready\n",
+ __func__);
return 0;
}
@@ -367,7 +335,8 @@ static int msm_sdw_ahb_read_device(struct msm_sdw_priv *msm_sdw,
u32 temp;
if (!msm_sdw->dev_up) {
- dev_dbg(msm_sdw->dev, "%s: q6 not ready\n", __func__);
+ dev_err_ratelimited(msm_sdw->dev, "%s: q6 not ready\n",
+ __func__);
return 0;
}
@@ -454,6 +423,8 @@ static int __msm_sdw_reg_write(struct msm_sdw_priv *msm_sdw, unsigned short reg,
&((u8 *)src)[i]);
unlock_exit:
mutex_unlock(&msm_sdw->cdc_int_mclk1_mutex);
+ dev_dbg(msm_sdw->dev, "Write 0x%x val 0x%02x\n",
+ reg, (u32)(*(u32 *)src));
return ret;
}
@@ -592,6 +563,9 @@ static int msm_sdwm_handle_irq(void *handle,
}
msm_sdw = (struct msm_sdw_priv *) handle;
+ if (skip_irq)
+ return ret;
+
if (action) {
ret = request_threaded_irq(msm_sdw->sdw_irq, NULL,
swrm_irq_handler,
@@ -634,6 +608,55 @@ static void msm_sdw_codec_hd2_control(struct snd_soc_codec *codec,
}
}
+static int msm_sdw_enable_swr(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct msm_sdw_priv *msm_sdw;
+ int i, ch_cnt;
+
+ msm_sdw = snd_soc_codec_get_drvdata(codec);
+
+ if (!msm_sdw->nr)
+ return 0;
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ if (!(strnstr(w->name, "RX4", sizeof("RX4 MIX"))) &&
+ !msm_sdw->rx_4_count)
+ msm_sdw->rx_4_count++;
+ if (!(strnstr(w->name, "RX5", sizeof("RX5 MIX"))) &&
+ !msm_sdw->rx_5_count)
+ msm_sdw->rx_5_count++;
+ ch_cnt = msm_sdw->rx_4_count + msm_sdw->rx_5_count;
+
+ for (i = 0; i < msm_sdw->nr; i++) {
+ swrm_wcd_notify(msm_sdw->sdw_ctrl_data[i].sdw_pdev,
+ SWR_DEVICE_UP, NULL);
+ swrm_wcd_notify(msm_sdw->sdw_ctrl_data[i].sdw_pdev,
+ SWR_SET_NUM_RX_CH, &ch_cnt);
+ }
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ if (!(strnstr(w->name, "RX4", sizeof("RX4 MIX"))) &&
+ msm_sdw->rx_4_count)
+ msm_sdw->rx_4_count--;
+ if (!(strnstr(w->name, "RX5", sizeof("RX5 MIX"))) &&
+ msm_sdw->rx_5_count)
+ msm_sdw->rx_5_count--;
+ ch_cnt = msm_sdw->rx_4_count + msm_sdw->rx_5_count;
+
+ for (i = 0; i < msm_sdw->nr; i++)
+ swrm_wcd_notify(msm_sdw->sdw_ctrl_data[i].sdw_pdev,
+ SWR_SET_NUM_RX_CH, &ch_cnt);
+ break;
+ }
+ dev_dbg(msm_sdw->dev, "%s: current swr ch cnt: %d\n",
+ __func__, msm_sdw->rx_4_count + msm_sdw->rx_5_count);
+
+ return 0;
+}
+
static int msm_sdw_codec_enable_interpolator(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol,
int event)
@@ -643,7 +666,6 @@ static int msm_sdw_codec_enable_interpolator(struct snd_soc_dapm_widget *w,
u16 gain_reg;
u16 reg;
int val;
- int i, ch_cnt;
int offset_val = 0;
dev_dbg(codec->dev, "%s %d %s\n", __func__, event, w->name);
@@ -665,21 +687,6 @@ static int msm_sdw_codec_enable_interpolator(struct snd_soc_dapm_widget *w,
snd_soc_update_bits(codec, reg, 0x10, 0x10);
msm_sdw_codec_hd2_control(codec, reg, event);
snd_soc_update_bits(codec, reg, 1 << 0x5, 1 << 0x5);
- /* Reset if needed */
- if (!(strcmp(w->name, "RX INT4 INTERP")) &&
- !msm_sdw->rx_4_count)
- msm_sdw->rx_4_count++;
- if (!(strcmp(w->name, "RX INT5 INTERP")) &&
- !msm_sdw->rx_5_count)
- msm_sdw->rx_5_count++;
- ch_cnt = msm_sdw->rx_4_count + msm_sdw->rx_5_count;
-
- for (i = 0; i < msm_sdw->nr; i++) {
- swrm_wcd_notify(msm_sdw->sdw_ctrl_data[i].sdw_pdev,
- SWR_DEVICE_UP, NULL);
- swrm_wcd_notify(msm_sdw->sdw_ctrl_data[i].sdw_pdev,
- SWR_SET_NUM_RX_CH, &ch_cnt);
- }
break;
case SND_SOC_DAPM_POST_PMU:
msm_sdw_config_compander(codec, w->shift, event);
@@ -705,6 +712,7 @@ static int msm_sdw_codec_enable_interpolator(struct snd_soc_dapm_widget *w,
val += offset_val;
snd_soc_write(codec, gain_reg, val);
msm_sdw_config_ear_spkr_gain(codec, event, gain_reg);
+ snd_soc_update_bits(codec, reg, 0x10, 0x00);
break;
case SND_SOC_DAPM_POST_PMD:
snd_soc_update_bits(codec, reg, 1 << 0x5, 0 << 0x5);
@@ -733,17 +741,6 @@ static int msm_sdw_codec_enable_interpolator(struct snd_soc_dapm_widget *w,
snd_soc_write(codec, gain_reg, val);
}
msm_sdw_config_ear_spkr_gain(codec, event, gain_reg);
- if (!(strcmp(w->name, "RX INT4 INTERP")) &&
- msm_sdw->rx_4_count)
- msm_sdw->rx_4_count--;
- if (!(strcmp(w->name, "RX INT5 INTERP")) &&
- msm_sdw->rx_5_count)
- msm_sdw->rx_5_count--;
- ch_cnt = msm_sdw->rx_4_count + msm_sdw->rx_5_count;
-
- for (i = 0; i < msm_sdw->nr; i++)
- swrm_wcd_notify(msm_sdw->sdw_ctrl_data[i].sdw_pdev,
- SWR_SET_NUM_RX_CH, &ch_cnt);
break;
};
@@ -989,39 +986,35 @@ static int msm_sdw_vi_feed_mixer_put(struct snd_kcontrol *kcontrol,
return 0;
}
-static int msm_sdw_mclk_enable(struct snd_soc_codec *codec,
+static int msm_sdw_mclk_enable(struct msm_sdw_priv *msm_sdw,
int mclk_enable, bool dapm)
{
- struct msm_sdw_priv *msm_sdw;
-
- if (!codec) {
- pr_err("%s:NULL codec pointer\n", __func__);
- return -EINVAL;
- }
- msm_sdw = snd_soc_codec_get_drvdata(codec);
-
- mutex_lock(&msm_sdw->cdc_int_mclk1_mutex);
- dev_dbg(codec->dev, "%s: mclk_enable = %u, dapm = %d\n",
- __func__, mclk_enable, dapm);
+ dev_dbg(msm_sdw->dev, "%s: mclk_enable = %u, dapm = %d clk_users= %d\n",
+ __func__, mclk_enable, dapm, msm_sdw->sdw_mclk_users);
if (mclk_enable) {
- snd_soc_update_bits(codec,
- MSM_SDW_CLK_RST_CTRL_FS_CNT_CONTROL,
- 0x01, 0x01);
- snd_soc_update_bits(codec,
+ msm_sdw->sdw_mclk_users++;
+ if (msm_sdw->sdw_mclk_users == 1) {
+ regmap_update_bits(msm_sdw->regmap,
+ MSM_SDW_CLK_RST_CTRL_FS_CNT_CONTROL,
+ 0x01, 0x01);
+ regmap_update_bits(msm_sdw->regmap,
MSM_SDW_CLK_RST_CTRL_MCLK_CONTROL,
0x01, 0x01);
- /* 9.6MHz MCLK, set value 0x00 if other frequency */
- snd_soc_update_bits(codec,
+ /* 9.6MHz MCLK, set value 0x00 if other frequency */
+ regmap_update_bits(msm_sdw->regmap,
MSM_SDW_TOP_FREQ_MCLK, 0x01, 0x01);
+ }
} else {
- snd_soc_update_bits(codec,
- MSM_SDW_CLK_RST_CTRL_FS_CNT_CONTROL,
- 0x01, 0x00);
- snd_soc_update_bits(codec,
- MSM_SDW_CLK_RST_CTRL_MCLK_CONTROL,
- 0x01, 0x00);
+ msm_sdw->sdw_mclk_users--;
+ if (msm_sdw->sdw_mclk_users == 0) {
+ regmap_update_bits(msm_sdw->regmap,
+ MSM_SDW_CLK_RST_CTRL_FS_CNT_CONTROL,
+ 0x01, 0x00);
+ regmap_update_bits(msm_sdw->regmap,
+ MSM_SDW_CLK_RST_CTRL_MCLK_CONTROL,
+ 0x01, 0x00);
+ }
}
- mutex_unlock(&msm_sdw->cdc_int_mclk1_mutex);
return 0;
}
EXPORT_SYMBOL(msm_sdw_mclk_enable);
@@ -1193,10 +1186,13 @@ static int msm_sdw_swrm_clock(void *handle, bool enable)
if (enable) {
msm_sdw->sdw_clk_users++;
if (msm_sdw->sdw_clk_users == 1) {
- msm_enable_sdw_npl_clk(msm_sdw, true);
+ msm_int_enable_sdw_cdc_clk(msm_sdw, 1, true);
+ msm_sdw_mclk_enable(msm_sdw, 1, true);
regmap_update_bits(msm_sdw->regmap,
- MSM_SDW_CLK_RST_CTRL_SWR_CONTROL,
- 0x01, 0x01);
+ MSM_SDW_CLK_RST_CTRL_SWR_CONTROL, 0x01, 0x01);
+ msm_enable_sdw_npl_clk(msm_sdw, true);
+ msm_cdc_pinctrl_select_active_state(
+ msm_sdw->sdw_gpio_p);
}
} else {
msm_sdw->sdw_clk_users--;
@@ -1204,7 +1200,10 @@ static int msm_sdw_swrm_clock(void *handle, bool enable)
regmap_update_bits(msm_sdw->regmap,
MSM_SDW_CLK_RST_CTRL_SWR_CONTROL,
0x01, 0x00);
+ msm_sdw_mclk_enable(msm_sdw, 0, true);
+ msm_int_enable_sdw_cdc_clk(msm_sdw, 0, true);
msm_enable_sdw_npl_clk(msm_sdw, false);
+ msm_cdc_pinctrl_select_sleep_state(msm_sdw->sdw_gpio_p);
}
}
dev_dbg(msm_sdw->dev, "%s: swrm clock users %d\n",
@@ -1319,7 +1318,7 @@ static ssize_t msm_sdw_codec_version_read(struct snd_info_entry *entry,
switch (msm_sdw->version) {
case MSM_SDW_VERSION_1_0:
- len = snprintf(buffer, sizeof(buffer), "MSM_SDW_CDC_1_0\n");
+ len = snprintf(buffer, sizeof(buffer), "SDW-CDC_1_0\n");
break;
default:
len = snprintf(buffer, sizeof(buffer), "VER_UNDEFINED\n");
@@ -1355,10 +1354,10 @@ int msm_sdw_codec_info_create_codec_entry(struct snd_info_entry *codec_root,
msm_sdw = snd_soc_codec_get_drvdata(codec);
card = codec->component.card;
msm_sdw->entry = snd_register_module_info(codec_root->module,
- "msm_sdw",
+ "152c1000.msm-sdw-codec",
codec_root);
if (!msm_sdw->entry) {
- dev_dbg(codec->dev, "%s: failed to create msm_sdw entry\n",
+ dev_err(codec->dev, "%s: failed to create msm_sdw entry\n",
__func__);
return -ENOMEM;
}
@@ -1367,7 +1366,7 @@ int msm_sdw_codec_info_create_codec_entry(struct snd_info_entry *codec_root,
"version",
msm_sdw->entry);
if (!version_entry) {
- dev_dbg(codec->dev, "%s: failed to create msm_sdw version entry\n",
+ dev_err(codec->dev, "%s: failed to create msm_sdw version entry\n",
__func__);
return -ENOMEM;
}
@@ -1485,14 +1484,18 @@ static const struct snd_soc_dapm_widget msm_sdw_dapm_widgets[] = {
SND_SOC_DAPM_MIXER("AIF1_VI_SDW Mixer", SND_SOC_NOPM, AIF1_SDW_VIFEED,
0, aif1_vi_mixer, ARRAY_SIZE(aif1_vi_mixer)),
- SND_SOC_DAPM_MUX("RX4 MIX1 INP1", SND_SOC_NOPM, 0, 0,
- &rx4_mix1_inp1_mux),
- SND_SOC_DAPM_MUX("RX4 MIX1 INP2", SND_SOC_NOPM, 0, 0,
- &rx4_mix1_inp2_mux),
- SND_SOC_DAPM_MUX("RX5 MIX1 INP1", SND_SOC_NOPM, 0, 0,
- &rx5_mix1_inp1_mux),
- SND_SOC_DAPM_MUX("RX5 MIX1 INP2", SND_SOC_NOPM, 0, 0,
- &rx5_mix1_inp2_mux),
+ SND_SOC_DAPM_MUX_E("RX4 MIX1 INP1", SND_SOC_NOPM, 0, 0,
+ &rx4_mix1_inp1_mux, msm_sdw_enable_swr,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_MUX_E("RX4 MIX1 INP2", SND_SOC_NOPM, 0, 0,
+ &rx4_mix1_inp2_mux, msm_sdw_enable_swr,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_MUX_E("RX5 MIX1 INP1", SND_SOC_NOPM, 0, 0,
+ &rx5_mix1_inp1_mux, msm_sdw_enable_swr,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_MUX_E("RX5 MIX1 INP2", SND_SOC_NOPM, 0, 0,
+ &rx5_mix1_inp2_mux, msm_sdw_enable_swr,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_MIXER("RX4 MIX1", SND_SOC_NOPM, 0, 0, NULL, 0),
SND_SOC_DAPM_MIXER("RX5 MIX1", SND_SOC_NOPM, 0, 0, NULL, 0),
@@ -1588,6 +1591,24 @@ static const struct msm_sdw_reg_mask_val msm_sdw_reg_init[] = {
{MSM_SDW_BOOST1_BOOST_CFG1, 0x3F, 0x12},
{MSM_SDW_BOOST1_BOOST_CFG2, 0x1C, 0x08},
{MSM_SDW_COMPANDER8_CTL7, 0x1E, 0x18},
+ {MSM_SDW_BOOST0_BOOST_CTL, 0x70, 0x50},
+ {MSM_SDW_BOOST1_BOOST_CTL, 0x70, 0x50},
+ {MSM_SDW_RX7_RX_PATH_CFG1, 0x08, 0x08},
+ {MSM_SDW_RX8_RX_PATH_CFG1, 0x08, 0x08},
+ {MSM_SDW_TOP_TOP_CFG1, 0x02, 0x02},
+ {MSM_SDW_TOP_TOP_CFG1, 0x01, 0x01},
+ {MSM_SDW_TX9_SPKR_PROT_PATH_CFG0, 0x01, 0x01},
+ {MSM_SDW_TX10_SPKR_PROT_PATH_CFG0, 0x01, 0x01},
+ {MSM_SDW_TX11_SPKR_PROT_PATH_CFG0, 0x01, 0x01},
+ {MSM_SDW_TX12_SPKR_PROT_PATH_CFG0, 0x01, 0x01},
+ {MSM_SDW_COMPANDER7_CTL3, 0x80, 0x80},
+ {MSM_SDW_COMPANDER8_CTL3, 0x80, 0x80},
+ {MSM_SDW_COMPANDER7_CTL7, 0x01, 0x01},
+ {MSM_SDW_COMPANDER8_CTL7, 0x01, 0x01},
+ {MSM_SDW_RX7_RX_PATH_CFG0, 0x01, 0x01},
+ {MSM_SDW_RX8_RX_PATH_CFG0, 0x01, 0x01},
+ {MSM_SDW_RX7_RX_PATH_MIX_CFG, 0x01, 0x01},
+ {MSM_SDW_RX8_RX_PATH_MIX_CFG, 0x01, 0x01},
};
static void msm_sdw_init_reg(struct snd_soc_codec *codec)
@@ -1654,14 +1675,17 @@ static int msm_sdw_codec_probe(struct snd_soc_codec *codec)
msm_sdw->spkr_gain_offset = RX_GAIN_OFFSET_0_DB;
msm_sdw_init_reg(codec);
msm_sdw->version = MSM_SDW_VERSION_1_0;
- msm_sdw->dev_up = true;
- msm_sdw->service_nb.notifier_call = msm_sdw_notifier_service_cb;
- ret = audio_notifier_register("msm_sdw", AUDIO_NOTIFIER_ADSP_DOMAIN,
- &msm_sdw->service_nb);
- if (ret < 0)
- dev_err(msm_sdw->dev,
- "%s: Audio notifier register failed ret = %d\n",
- __func__, ret);
+
+ if (is_ssr_en) {
+ msm_sdw->service_nb.notifier_call = msm_sdw_notifier_service_cb;
+ ret = audio_notifier_register("msm_sdw",
+ AUDIO_NOTIFIER_ADSP_DOMAIN,
+ &msm_sdw->service_nb);
+ if (ret < 0)
+ dev_err(msm_sdw->dev,
+ "%s: Audio notifier register failed ret = %d\n",
+ __func__, ret);
+ }
return 0;
}
@@ -1801,15 +1825,11 @@ static int msm_sdw_probe(struct platform_device *pdev)
if (!msm_sdw)
return -ENOMEM;
dev_set_drvdata(&pdev->dev, msm_sdw);
+ msm_sdw->dev_up = true;
msm_sdw->dev = &pdev->dev;
INIT_WORK(&msm_sdw->msm_sdw_add_child_devices_work,
msm_sdw_add_child_devices);
- mutex_init(&msm_sdw->sdw_read_lock);
- mutex_init(&msm_sdw->sdw_write_lock);
- mutex_init(&msm_sdw->sdw_clk_lock);
-
- mutex_init(&msm_sdw->codec_mutex);
msm_sdw->sdw_plat_data.handle = (void *) msm_sdw;
msm_sdw->sdw_plat_data.read = msm_sdw_swrm_read;
msm_sdw->sdw_plat_data.write = msm_sdw_swrm_write;
@@ -1823,6 +1843,9 @@ static int msm_sdw_probe(struct platform_device *pdev)
__func__, "reg");
goto err_sdw_cdc;
}
+
+ msm_sdw->sdw_gpio_p = of_parse_phandle(pdev->dev.of_node,
+ "qcom,cdc-sdw-gpios", 0);
msm_sdw->sdw_base = ioremap(msm_sdw->sdw_base_addr,
MSM_SDW_MAX_REGISTER);
msm_sdw->read_dev = __msm_sdw_reg_read;
@@ -1873,6 +1896,11 @@ static int msm_sdw_probe(struct platform_device *pdev)
msm_disable_int_mclk1);
mutex_init(&msm_sdw->cdc_int_mclk1_mutex);
mutex_init(&msm_sdw->sdw_npl_clk_mutex);
+ mutex_init(&msm_sdw->io_lock);
+ mutex_init(&msm_sdw->sdw_read_lock);
+ mutex_init(&msm_sdw->sdw_write_lock);
+ mutex_init(&msm_sdw->sdw_clk_lock);
+ mutex_init(&msm_sdw->codec_mutex);
schedule_work(&msm_sdw->msm_sdw_add_child_devices_work);
dev_dbg(&pdev->dev, "%s: msm_sdw driver probe done\n", __func__);
diff --git a/sound/soc/codecs/wcd-mbhc-v2.c b/sound/soc/codecs/wcd-mbhc-v2.c
index cb201899b4b8..8454ebfc6216 100644
--- a/sound/soc/codecs/wcd-mbhc-v2.c
+++ b/sound/soc/codecs/wcd-mbhc-v2.c
@@ -1610,12 +1610,8 @@ static void wcd_mbhc_swch_irq_handler(struct wcd_mbhc *mbhc)
WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_SCHMT_ISRC, 0);
wcd_mbhc_report_plug(mbhc, 0, SND_JACK_LINEOUT);
} else if (mbhc->current_plug == MBHC_PLUG_TYPE_ANC_HEADPHONE) {
- mbhc->mbhc_cb->irq_control(codec,
- mbhc->intr_ids->mbhc_hs_rem_intr,
- false);
- mbhc->mbhc_cb->irq_control(codec,
- mbhc->intr_ids->mbhc_hs_ins_intr,
- false);
+ wcd_mbhc_hs_elec_irq(mbhc, WCD_MBHC_ELEC_HS_REM, false);
+ wcd_mbhc_hs_elec_irq(mbhc, WCD_MBHC_ELEC_HS_INS, false);
WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_DETECTION_TYPE,
0);
WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ELECT_SCHMT_ISRC, 0);
diff --git a/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.c b/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.c
index aa180fa3159f..e791bf07ec67 100644
--- a/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.c
+++ b/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -607,8 +607,6 @@ static void wcd_cntl_do_shutdown(struct wcd_dsp_cntl *cntl)
/* Disable WDOG */
snd_soc_update_bits(codec, WCD934X_CPE_SS_WDOG_CFG,
0x3F, 0x01);
- snd_soc_update_bits(codec, WCD934X_CODEC_RPM_CLK_MCLK_CFG,
- 0x04, 0x00);
/* Put WDSP in reset state */
snd_soc_update_bits(codec, WCD934X_CPE_SS_CPE_CTL,
@@ -633,11 +631,7 @@ static int wcd_cntl_do_boot(struct wcd_dsp_cntl *cntl)
if (cntl->debug_mode) {
snd_soc_update_bits(codec, WCD934X_CPE_SS_WDOG_CFG,
0x3F, 0x01);
- snd_soc_update_bits(codec, WCD934X_CODEC_RPM_CLK_MCLK_CFG,
- 0x04, 0x00);
} else {
- snd_soc_update_bits(codec, WCD934X_CODEC_RPM_CLK_MCLK_CFG,
- 0x04, 0x04);
snd_soc_update_bits(codec, WCD934X_CPE_SS_WDOG_CFG,
0x3F, 0x21);
}
diff --git a/sound/soc/codecs/wcd934x/wcd934x-routing.h b/sound/soc/codecs/wcd934x/wcd934x-routing.h
index 8ca4c07a3327..cd165af43eab 100644
--- a/sound/soc/codecs/wcd934x/wcd934x-routing.h
+++ b/sound/soc/codecs/wcd934x/wcd934x-routing.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -914,9 +914,23 @@ const struct snd_soc_dapm_route tavil_audio_map[] = {
{"ANC OUT EAR Enable", "Switch", "ADC MUX11"},
{"RX INT0 MIX2", NULL, "ANC OUT EAR Enable"},
+ {"ANC OUT HPHL Enable", "Switch", "ADC MUX10"},
+ {"ANC OUT HPHL Enable", "Switch", "ADC MUX11"},
+ {"RX INT1 MIX2", NULL, "ANC OUT HPHL Enable"},
+
+ {"ANC OUT HPHR Enable", "Switch", "ADC MUX12"},
+ {"ANC OUT HPHR Enable", "Switch", "ADC MUX13"},
+ {"RX INT2 MIX2", NULL, "ANC OUT HPHR Enable"},
+
{"ANC EAR PA", NULL, "RX INT0 DAC"},
{"ANC EAR", NULL, "ANC EAR PA"},
+ {"ANC HPHL PA", NULL, "RX INT1 DAC"},
+ {"ANC HPHL", NULL, "ANC HPHL PA"},
+
+ {"ANC HPHR PA", NULL, "RX INT2 DAC"},
+ {"ANC HPHR", NULL, "ANC HPHR PA"},
+
{"ANC OUT EAR SPKR Enable", "Switch", "ADC MUX10"},
{"ANC OUT EAR SPKR Enable", "Switch", "ADC MUX11"},
{"RX INT7 MIX2", NULL, "ANC OUT EAR SPKR Enable"},
diff --git a/sound/soc/codecs/wcd934x/wcd934x.c b/sound/soc/codecs/wcd934x/wcd934x.c
index 0030b1fcf773..5b300a668489 100644
--- a/sound/soc/codecs/wcd934x/wcd934x.c
+++ b/sound/soc/codecs/wcd934x/wcd934x.c
@@ -176,6 +176,10 @@ enum {
AUDIO_NOMINAL,
HPH_PA_DELAY,
CLSH_Z_CONFIG,
+ ANC_MIC_AMIC1,
+ ANC_MIC_AMIC2,
+ ANC_MIC_AMIC3,
+ ANC_MIC_AMIC4,
};
enum {
@@ -510,6 +514,7 @@ module_param(tx_unmute_delay, int,
S_IRUGO | S_IWUSR | S_IWGRP);
MODULE_PARM_DESC(tx_unmute_delay, "delay to unmute the tx path");
+static void tavil_codec_set_tx_hold(struct snd_soc_codec *, u16, bool);
/* Hold instance to soundwire platform device */
struct tavil_swr_ctrl_data {
@@ -998,14 +1003,30 @@ static int tavil_put_anc_func(struct snd_kcontrol *kcontrol,
snd_soc_dapm_enable_pin(dapm, "ANC EAR PA");
snd_soc_dapm_enable_pin(dapm, "ANC EAR");
snd_soc_dapm_enable_pin(dapm, "ANC SPK1 PA");
+ snd_soc_dapm_enable_pin(dapm, "ANC HPHL PA");
+ snd_soc_dapm_enable_pin(dapm, "ANC HPHR PA");
+ snd_soc_dapm_enable_pin(dapm, "ANC HPHL");
+ snd_soc_dapm_enable_pin(dapm, "ANC HPHR");
snd_soc_dapm_disable_pin(dapm, "EAR PA");
snd_soc_dapm_disable_pin(dapm, "EAR");
+ snd_soc_dapm_disable_pin(dapm, "HPHL PA");
+ snd_soc_dapm_disable_pin(dapm, "HPHR PA");
+ snd_soc_dapm_disable_pin(dapm, "HPHL");
+ snd_soc_dapm_disable_pin(dapm, "HPHR");
} else {
snd_soc_dapm_disable_pin(dapm, "ANC EAR PA");
snd_soc_dapm_disable_pin(dapm, "ANC EAR");
snd_soc_dapm_disable_pin(dapm, "ANC SPK1 PA");
+ snd_soc_dapm_disable_pin(dapm, "ANC HPHL PA");
+ snd_soc_dapm_disable_pin(dapm, "ANC HPHR PA");
+ snd_soc_dapm_disable_pin(dapm, "ANC HPHL");
+ snd_soc_dapm_disable_pin(dapm, "ANC HPHR");
snd_soc_dapm_enable_pin(dapm, "EAR PA");
snd_soc_dapm_enable_pin(dapm, "EAR");
+ snd_soc_dapm_enable_pin(dapm, "HPHL");
+ snd_soc_dapm_enable_pin(dapm, "HPHR");
+ snd_soc_dapm_enable_pin(dapm, "HPHL PA");
+ snd_soc_dapm_enable_pin(dapm, "HPHR PA");
}
mutex_unlock(&tavil->codec_mutex);
@@ -1122,16 +1143,56 @@ static int tavil_codec_enable_anc(struct snd_soc_dapm_widget *w,
}
/* Rate converter clk enable and set bypass mode */
- snd_soc_update_bits(codec, WCD934X_CDC_ANC0_RC_COMMON_CTL,
- 0x05, 0x05);
+ if (!strcmp(w->name, "RX INT0 DAC") ||
+ !strcmp(w->name, "RX INT1 DAC") ||
+ !strcmp(w->name, "ANC SPK1 PA")) {
+ snd_soc_update_bits(codec,
+ WCD934X_CDC_ANC0_RC_COMMON_CTL,
+ 0x05, 0x05);
+ if (!strcmp(w->name, "RX INT1 DAC")) {
+ snd_soc_update_bits(codec,
+ WCD934X_CDC_ANC0_FIFO_COMMON_CTL,
+ 0x66, 0x66);
+ }
+ } else if (!strcmp(w->name, "RX INT2 DAC")) {
+ snd_soc_update_bits(codec,
+ WCD934X_CDC_ANC1_RC_COMMON_CTL,
+ 0x05, 0x05);
+ snd_soc_update_bits(codec,
+ WCD934X_CDC_ANC1_FIFO_COMMON_CTL,
+ 0x66, 0x66);
+ }
+ if (!strcmp(w->name, "RX INT1 DAC"))
+ snd_soc_update_bits(codec,
+ WCD934X_CDC_ANC0_CLK_RESET_CTL, 0x08, 0x08);
+ else if (!strcmp(w->name, "RX INT2 DAC"))
+ snd_soc_update_bits(codec,
+ WCD934X_CDC_ANC1_CLK_RESET_CTL, 0x08, 0x08);
+
if (!hwdep_cal)
release_firmware(fw);
break;
+
+ case SND_SOC_DAPM_POST_PMU:
+ if (!strcmp(w->name, "ANC HPHL PA") ||
+ !strcmp(w->name, "ANC HPHR PA")) {
+ /* Remove ANC Rx from reset */
+ snd_soc_update_bits(codec,
+ WCD934X_CDC_ANC0_CLK_RESET_CTL,
+ 0x08, 0x00);
+ snd_soc_update_bits(codec,
+ WCD934X_CDC_ANC1_CLK_RESET_CTL,
+ 0x08, 0x00);
+ }
+
+ break;
+
case SND_SOC_DAPM_POST_PMD:
snd_soc_update_bits(codec, WCD934X_CDC_ANC0_RC_COMMON_CTL,
0x05, 0x00);
if (!strcmp(w->name, "ANC EAR PA") ||
- !strcmp(w->name, "ANC SPK1 PA")) {
+ !strcmp(w->name, "ANC SPK1 PA") ||
+ !strcmp(w->name, "ANC HPHL PA")) {
snd_soc_update_bits(codec, WCD934X_CDC_ANC0_MODE_1_CTL,
0x30, 0x00);
msleep(50);
@@ -1146,6 +1207,21 @@ static int tavil_codec_enable_anc(struct snd_soc_dapm_widget *w,
snd_soc_update_bits(codec,
WCD934X_CDC_ANC0_CLK_RESET_CTL,
0x38, 0x00);
+ } else if (!strcmp(w->name, "ANC HPHR PA")) {
+ snd_soc_update_bits(codec, WCD934X_CDC_ANC1_MODE_1_CTL,
+ 0x30, 0x00);
+ msleep(50);
+ snd_soc_update_bits(codec, WCD934X_CDC_ANC1_MODE_1_CTL,
+ 0x01, 0x00);
+ snd_soc_update_bits(codec,
+ WCD934X_CDC_ANC1_CLK_RESET_CTL,
+ 0x38, 0x38);
+ snd_soc_update_bits(codec,
+ WCD934X_CDC_ANC1_CLK_RESET_CTL,
+ 0x07, 0x00);
+ snd_soc_update_bits(codec,
+ WCD934X_CDC_ANC1_CLK_RESET_CTL,
+ 0x38, 0x00);
}
break;
}
@@ -1893,6 +1969,18 @@ static void tavil_codec_override(struct snd_soc_codec *codec, int mode,
}
}
+static void tavil_codec_clear_anc_tx_hold(struct tavil_priv *tavil)
+{
+ if (test_and_clear_bit(ANC_MIC_AMIC1, &tavil->status_mask))
+ tavil_codec_set_tx_hold(tavil->codec, WCD934X_ANA_AMIC1, false);
+ if (test_and_clear_bit(ANC_MIC_AMIC2, &tavil->status_mask))
+ tavil_codec_set_tx_hold(tavil->codec, WCD934X_ANA_AMIC2, false);
+ if (test_and_clear_bit(ANC_MIC_AMIC3, &tavil->status_mask))
+ tavil_codec_set_tx_hold(tavil->codec, WCD934X_ANA_AMIC3, false);
+ if (test_and_clear_bit(ANC_MIC_AMIC4, &tavil->status_mask))
+ tavil_codec_set_tx_hold(tavil->codec, WCD934X_ANA_AMIC4, false);
+}
+
static int tavil_codec_enable_hphr_pa(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol,
int event)
@@ -1900,6 +1988,7 @@ static int tavil_codec_enable_hphr_pa(struct snd_soc_dapm_widget *w,
struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
struct tavil_dsd_config *dsd_conf = tavil->dsd_config;
+ int ret = 0;
dev_dbg(codec->dev, "%s %s %d\n", __func__, w->name, event);
@@ -1908,6 +1997,11 @@ static int tavil_codec_enable_hphr_pa(struct snd_soc_dapm_widget *w,
if (TAVIL_IS_1_0(tavil->wcd9xxx))
snd_soc_update_bits(codec, WCD934X_HPH_REFBUFF_LP_CTL,
0x06, (0x03 << 1));
+
+ if ((!(strcmp(w->name, "ANC HPHR PA"))) &&
+ (test_bit(HPH_PA_DELAY, &tavil->status_mask)))
+ snd_soc_update_bits(codec, WCD934X_ANA_HPH, 0xC0, 0xC0);
+
set_bit(HPH_PA_DELAY, &tavil->status_mask);
if (dsd_conf &&
(snd_soc_read(codec, WCD934X_CDC_DSD1_PATH_CTL) & 0x01)) {
@@ -1917,6 +2011,16 @@ static int tavil_codec_enable_hphr_pa(struct snd_soc_dapm_widget *w,
}
break;
case SND_SOC_DAPM_POST_PMU:
+ if ((!(strcmp(w->name, "ANC HPHR PA")))) {
+ if ((snd_soc_read(codec, WCD934X_ANA_HPH) & 0xC0)
+ != 0xC0)
+ /*
+ * If PA_EN is not set (potentially in ANC case)
+ * then do nothing for POST_PMU and let left
+ * channel handle everything.
+ */
+ break;
+ }
/*
* 7ms sleep is required after PA is enabled as per
* HW requirement. If compander is disabled, then
@@ -1929,6 +2033,12 @@ static int tavil_codec_enable_hphr_pa(struct snd_soc_dapm_widget *w,
usleep_range(7000, 7100);
clear_bit(HPH_PA_DELAY, &tavil->status_mask);
}
+ if (tavil->anc_func) {
+ /* Clear Tx FE HOLD if both PAs are enabled */
+ if ((snd_soc_read(tavil->codec, WCD934X_ANA_HPH) &
+ 0xC0) == 0xC0)
+ tavil_codec_clear_anc_tx_hold(tavil);
+ }
snd_soc_update_bits(codec, WCD934X_HPH_R_TEST, 0x01, 0x01);
@@ -1951,6 +2061,34 @@ static int tavil_codec_enable_hphr_pa(struct snd_soc_dapm_widget *w,
(snd_soc_read(codec, WCD934X_CDC_DSD1_PATH_CTL) & 0x01))
snd_soc_update_bits(codec, WCD934X_CDC_DSD1_CFG2,
0x04, 0x00);
+ if (!(strcmp(w->name, "ANC HPHR PA"))) {
+ pr_debug("%s:Do everything needed for left channel\n",
+ __func__);
+ /* Do everything needed for left channel */
+ snd_soc_update_bits(codec, WCD934X_HPH_L_TEST,
+ 0x01, 0x01);
+
+ /* Remove mute */
+ snd_soc_update_bits(codec, WCD934X_CDC_RX1_RX_PATH_CTL,
+ 0x10, 0x00);
+
+ /* Remove mix path mute if it is enabled */
+ if ((snd_soc_read(codec,
+ WCD934X_CDC_RX1_RX_PATH_MIX_CTL)) &
+ 0x10)
+ snd_soc_update_bits(codec,
+ WCD934X_CDC_RX1_RX_PATH_MIX_CTL,
+ 0x10, 0x00);
+
+ if (dsd_conf && (snd_soc_read(codec,
+ WCD934X_CDC_DSD0_PATH_CTL) &
+ 0x01))
+ snd_soc_update_bits(codec,
+ WCD934X_CDC_DSD0_CFG2,
+ 0x04, 0x00);
+ /* Remove ANC Rx from reset */
+ ret = tavil_codec_enable_anc(w, kcontrol, event);
+ }
tavil_codec_override(codec, tavil->hph_mode, event);
break;
case SND_SOC_DAPM_PRE_PMD:
@@ -1967,6 +2105,8 @@ static int tavil_codec_enable_hphr_pa(struct snd_soc_dapm_widget *w,
0x10, 0x10);
snd_soc_update_bits(codec, WCD934X_CDC_RX2_RX_PATH_MIX_CTL,
0x10, 0x10);
+ if (!(strcmp(w->name, "ANC HPHR PA")))
+ snd_soc_update_bits(codec, WCD934X_ANA_HPH, 0x40, 0x00);
break;
case SND_SOC_DAPM_POST_PMD:
/*
@@ -1984,10 +2124,16 @@ static int tavil_codec_enable_hphr_pa(struct snd_soc_dapm_widget *w,
if (TAVIL_IS_1_0(tavil->wcd9xxx))
snd_soc_update_bits(codec, WCD934X_HPH_REFBUFF_LP_CTL,
0x06, 0x0);
+ if (!(strcmp(w->name, "ANC HPHR PA"))) {
+ ret = tavil_codec_enable_anc(w, kcontrol, event);
+ snd_soc_update_bits(codec,
+ WCD934X_CDC_RX2_RX_PATH_CFG0,
+ 0x10, 0x00);
+ }
break;
};
- return 0;
+ return ret;
}
static int tavil_codec_enable_hphl_pa(struct snd_soc_dapm_widget *w,
@@ -1997,6 +2143,7 @@ static int tavil_codec_enable_hphl_pa(struct snd_soc_dapm_widget *w,
struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
struct tavil_dsd_config *dsd_conf = tavil->dsd_config;
+ int ret = 0;
dev_dbg(codec->dev, "%s %s %d\n", __func__, w->name, event);
@@ -2005,6 +2152,10 @@ static int tavil_codec_enable_hphl_pa(struct snd_soc_dapm_widget *w,
if (TAVIL_IS_1_0(tavil->wcd9xxx))
snd_soc_update_bits(codec, WCD934X_HPH_REFBUFF_LP_CTL,
0x06, (0x03 << 1));
+ if ((!(strcmp(w->name, "ANC HPHL PA"))) &&
+ (test_bit(HPH_PA_DELAY, &tavil->status_mask)))
+ snd_soc_update_bits(codec, WCD934X_ANA_HPH,
+ 0xC0, 0xC0);
set_bit(HPH_PA_DELAY, &tavil->status_mask);
if (dsd_conf &&
(snd_soc_read(codec, WCD934X_CDC_DSD0_PATH_CTL) & 0x01)) {
@@ -2014,6 +2165,16 @@ static int tavil_codec_enable_hphl_pa(struct snd_soc_dapm_widget *w,
}
break;
case SND_SOC_DAPM_POST_PMU:
+ if (!(strcmp(w->name, "ANC HPHL PA"))) {
+ if ((snd_soc_read(codec, WCD934X_ANA_HPH) & 0xC0)
+ != 0xC0)
+ /*
+ * If PA_EN is not set (potentially in ANC
+ * case) then do nothing for POST_PMU and
+ * let right channel handle everything.
+ */
+ break;
+ }
/*
* 7ms sleep is required after PA is enabled as per
* HW requirement. If compander is disabled, then
@@ -2026,6 +2187,13 @@ static int tavil_codec_enable_hphl_pa(struct snd_soc_dapm_widget *w,
usleep_range(7000, 7100);
clear_bit(HPH_PA_DELAY, &tavil->status_mask);
}
+ if (tavil->anc_func) {
+ /* Clear Tx FE HOLD if both PAs are enabled */
+ if ((snd_soc_read(tavil->codec, WCD934X_ANA_HPH) &
+ 0xC0) == 0xC0)
+ tavil_codec_clear_anc_tx_hold(tavil);
+ }
+
snd_soc_update_bits(codec, WCD934X_HPH_L_TEST, 0x01, 0x01);
/* Remove Mute on primary path */
snd_soc_update_bits(codec, WCD934X_CDC_RX1_RX_PATH_CTL,
@@ -2046,6 +2214,33 @@ static int tavil_codec_enable_hphl_pa(struct snd_soc_dapm_widget *w,
(snd_soc_read(codec, WCD934X_CDC_DSD0_PATH_CTL) & 0x01))
snd_soc_update_bits(codec, WCD934X_CDC_DSD0_CFG2,
0x04, 0x00);
+ if (!(strcmp(w->name, "ANC HPHL PA"))) {
+ pr_debug("%s:Do everything needed for right channel\n",
+ __func__);
+
+ /* Do everything needed for right channel */
+ snd_soc_update_bits(codec, WCD934X_HPH_R_TEST,
+ 0x01, 0x01);
+
+ /* Remove mute */
+ snd_soc_update_bits(codec, WCD934X_CDC_RX2_RX_PATH_CTL,
+ 0x10, 0x00);
+
+ /* Remove mix path mute if it is enabled */
+ if ((snd_soc_read(codec,
+ WCD934X_CDC_RX2_RX_PATH_MIX_CTL)) &
+ 0x10)
+ snd_soc_update_bits(codec,
+ WCD934X_CDC_RX2_RX_PATH_MIX_CTL,
+ 0x10, 0x00);
+ if (dsd_conf && (snd_soc_read(codec,
+ WCD934X_CDC_DSD1_PATH_CTL) & 0x01))
+ snd_soc_update_bits(codec,
+ WCD934X_CDC_DSD1_CFG2,
+ 0x04, 0x00);
+ /* Remove ANC Rx from reset */
+ ret = tavil_codec_enable_anc(w, kcontrol, event);
+ }
tavil_codec_override(codec, tavil->hph_mode, event);
break;
case SND_SOC_DAPM_PRE_PMD:
@@ -2063,6 +2258,9 @@ static int tavil_codec_enable_hphl_pa(struct snd_soc_dapm_widget *w,
0x10, 0x10);
snd_soc_update_bits(codec, WCD934X_CDC_RX1_RX_PATH_MIX_CTL,
0x10, 0x10);
+ if (!(strcmp(w->name, "ANC HPHL PA")))
+ snd_soc_update_bits(codec, WCD934X_ANA_HPH,
+ 0x80, 0x00);
break;
case SND_SOC_DAPM_POST_PMD:
/*
@@ -2080,10 +2278,15 @@ static int tavil_codec_enable_hphl_pa(struct snd_soc_dapm_widget *w,
if (TAVIL_IS_1_0(tavil->wcd9xxx))
snd_soc_update_bits(codec, WCD934X_HPH_REFBUFF_LP_CTL,
0x06, 0x0);
+ if (!(strcmp(w->name, "ANC HPHL PA"))) {
+ ret = tavil_codec_enable_anc(w, kcontrol, event);
+ snd_soc_update_bits(codec,
+ WCD934X_CDC_RX1_RX_PATH_CFG0, 0x10, 0x00);
+ }
break;
};
- return 0;
+ return ret;
}
static int tavil_codec_enable_lineout_pa(struct snd_soc_dapm_widget *w,
@@ -2190,12 +2393,18 @@ static int tavil_codec_hphr_dac_event(struct snd_soc_dapm_widget *w,
int hph_mode = tavil->hph_mode;
u8 dem_inp;
struct tavil_dsd_config *dsd_conf = tavil->dsd_config;
+ int ret = 0;
dev_dbg(codec->dev, "%s wname: %s event: %d hph_mode: %d\n", __func__,
w->name, event, hph_mode);
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
+ if (tavil->anc_func) {
+ ret = tavil_codec_enable_anc(w, kcontrol, event);
+ /* 40 msec delay is needed to avoid click and pop */
+ msleep(40);
+ }
/* Read DEM INP Select */
dem_inp = snd_soc_read(codec, WCD934X_CDC_RX2_RX_PATH_SEC0) &
0x03;
@@ -2226,6 +2435,10 @@ static int tavil_codec_hphr_dac_event(struct snd_soc_dapm_widget *w,
WCD_CLSH_EVENT_PRE_DAC,
WCD_CLSH_STATE_HPHR,
hph_mode);
+ if (tavil->anc_func)
+ snd_soc_update_bits(codec,
+ WCD934X_CDC_RX2_RX_PATH_CFG0,
+ 0x10, 0x10);
break;
case SND_SOC_DAPM_POST_PMD:
/* 1000us required as per HW requirement */
@@ -2269,6 +2482,11 @@ static int tavil_codec_hphl_dac_event(struct snd_soc_dapm_widget *w,
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
+ if (tavil->anc_func) {
+ ret = tavil_codec_enable_anc(w, kcontrol, event);
+ /* 40 msec delay is needed to avoid click and pop */
+ msleep(40);
+ }
/* Read DEM INP Select */
dem_inp = snd_soc_read(codec, WCD934X_CDC_RX1_RX_PATH_SEC0) &
0x03;
@@ -2300,6 +2518,11 @@ static int tavil_codec_hphl_dac_event(struct snd_soc_dapm_widget *w,
WCD_CLSH_STATE_HPHL,
hph_mode);
+ if (tavil->anc_func)
+ snd_soc_update_bits(codec,
+ WCD934X_CDC_RX1_RX_PATH_CFG0,
+ 0x10, 0x10);
+
ret = tavil_mbhc_get_impedance(tavil->mbhc,
&impedl, &impedr);
if (!ret) {
@@ -2619,6 +2842,8 @@ static int __tavil_codec_enable_mad(struct snd_soc_codec *codec, bool enable)
/* Undo reset for MAD */
snd_soc_update_bits(codec, WCD934X_CPE_SS_MAD_CTL,
0x02, 0x00);
+ snd_soc_update_bits(codec, WCD934X_CODEC_RPM_CLK_MCLK_CFG,
+ 0x04, 0x04);
} else {
snd_soc_update_bits(codec, WCD934X_SOC_MAD_AUDIO_CTL_2,
0x03, 0x00);
@@ -2628,6 +2853,8 @@ static int __tavil_codec_enable_mad(struct snd_soc_codec *codec, bool enable)
/* Turn off MAD clk */
snd_soc_update_bits(codec, WCD934X_CPE_SS_MAD_CTL,
0x01, 0x00);
+ snd_soc_update_bits(codec, WCD934X_CODEC_RPM_CLK_MCLK_CFG,
+ 0x04, 0x00);
}
done:
return rc;
@@ -3180,6 +3407,15 @@ int tavil_codec_enable_interp_clk(struct snd_soc_codec *codec,
}
EXPORT_SYMBOL(tavil_codec_enable_interp_clk);
+static int tavil_anc_out_switch_cb(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+ tavil_codec_enable_interp_clk(codec, event, w->shift);
+
+ return 0;
+}
static int tavil_codec_set_idle_detect_thr(struct snd_soc_codec *codec,
int interp, int path_type)
{
@@ -3634,8 +3870,8 @@ static int tavil_codec_tx_adc_cfg(struct snd_soc_dapm_widget *w,
{
int adc_mux_n = w->shift;
struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct tavil_priv *tavil = snd_soc_codec_get_drvdata(codec);
int amic_n;
- u16 amic_reg;
dev_dbg(codec->dev, "%s: event: %d\n", __func__, event);
@@ -3643,8 +3879,13 @@ static int tavil_codec_tx_adc_cfg(struct snd_soc_dapm_widget *w,
case SND_SOC_DAPM_POST_PMU:
amic_n = tavil_codec_find_amic_input(codec, adc_mux_n);
if (amic_n) {
- amic_reg = WCD934X_ANA_AMIC1 + amic_n - 1;
- tavil_codec_set_tx_hold(codec, amic_reg, false);
+ /*
+ * Prevent ANC Rx pop by leaving Tx FE in HOLD
+ * state until PA is up. Track AMIC being used
+ * so we can release the HOLD later.
+ */
+ set_bit(ANC_MIC_AMIC1 + amic_n - 1,
+ &tavil->status_mask);
}
break;
default:
@@ -6467,6 +6708,12 @@ static const struct snd_kcontrol_new anc_ear_spkr_switch =
static const struct snd_kcontrol_new anc_spkr_pa_switch =
SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 0, 1, 0);
+static const struct snd_kcontrol_new anc_hphl_pa_switch =
+ SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 0, 1, 0);
+
+static const struct snd_kcontrol_new anc_hphr_pa_switch =
+ SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 0, 1, 0);
+
static const struct snd_kcontrol_new mad_cpe1_switch =
SOC_DAPM_SINGLE("Switch", SND_SOC_NOPM, 0, 1, 0);
@@ -7106,6 +7353,14 @@ static const struct snd_soc_dapm_widget tavil_dapm_widgets[] = {
SND_SOC_DAPM_PGA_E("ANC SPK1 PA", SND_SOC_NOPM, 0, 0, NULL, 0,
tavil_codec_enable_spkr_anc,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_PGA_E("ANC HPHL PA", SND_SOC_NOPM, 0, 0, NULL, 0,
+ tavil_codec_enable_hphl_pa,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_PGA_E("ANC HPHR PA", SND_SOC_NOPM, 0, 0, NULL, 0,
+ tavil_codec_enable_hphr_pa,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_OUTPUT("EAR"),
SND_SOC_DAPM_OUTPUT("HPHL"),
@@ -7115,6 +7370,8 @@ static const struct snd_soc_dapm_widget tavil_dapm_widgets[] = {
SND_SOC_DAPM_OUTPUT("SPK1 OUT"),
SND_SOC_DAPM_OUTPUT("SPK2 OUT"),
SND_SOC_DAPM_OUTPUT("ANC EAR"),
+ SND_SOC_DAPM_OUTPUT("ANC HPHL"),
+ SND_SOC_DAPM_OUTPUT("ANC HPHR"),
SND_SOC_DAPM_SWITCH("ANC OUT EAR Enable", SND_SOC_NOPM, 0, 0,
&anc_ear_switch),
@@ -7123,6 +7380,13 @@ static const struct snd_soc_dapm_widget tavil_dapm_widgets[] = {
SND_SOC_DAPM_SWITCH("ANC SPKR PA Enable", SND_SOC_NOPM, 0, 0,
&anc_spkr_pa_switch),
+ SND_SOC_DAPM_SWITCH_E("ANC OUT HPHL Enable", SND_SOC_NOPM, INTERP_HPHL,
+ 0, &anc_hphl_pa_switch, tavil_anc_out_switch_cb,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD),
+ SND_SOC_DAPM_SWITCH_E("ANC OUT HPHR Enable", SND_SOC_NOPM, INTERP_HPHR,
+ 0, &anc_hphr_pa_switch, tavil_anc_out_switch_cb,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD),
+
SND_SOC_DAPM_SUPPLY("RX_BIAS", SND_SOC_NOPM, 0, 0,
tavil_codec_enable_rx_bias,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
@@ -9026,6 +9290,10 @@ static int tavil_soc_codec_probe(struct snd_soc_codec *codec)
mutex_lock(&tavil->codec_mutex);
snd_soc_dapm_disable_pin(dapm, "ANC EAR PA");
snd_soc_dapm_disable_pin(dapm, "ANC EAR");
+ snd_soc_dapm_disable_pin(dapm, "ANC HPHL PA");
+ snd_soc_dapm_disable_pin(dapm, "ANC HPHR PA");
+ snd_soc_dapm_disable_pin(dapm, "ANC HPHL");
+ snd_soc_dapm_disable_pin(dapm, "ANC HPHR");
snd_soc_dapm_enable_pin(dapm, "ANC SPK1 PA");
mutex_unlock(&tavil->codec_mutex);
diff --git a/sound/soc/codecs/wcd9xxx-resmgr-v2.c b/sound/soc/codecs/wcd9xxx-resmgr-v2.c
index 71edded182e0..2d9a5101dd95 100644
--- a/sound/soc/codecs/wcd9xxx-resmgr-v2.c
+++ b/sound/soc/codecs/wcd9xxx-resmgr-v2.c
@@ -263,9 +263,6 @@ static int wcd_resmgr_enable_clk_mclk(struct wcd9xxx_resmgr_v2 *resmgr)
WCD93XX_CDC_CLK_RST_CTRL_MCLK_CONTROL,
0x01, 0x01);
wcd_resmgr_codec_reg_update_bits(resmgr,
- WCD934X_CODEC_RPM_CLK_MCLK_CFG,
- 0x04, 0x04);
- wcd_resmgr_codec_reg_update_bits(resmgr,
WCD93XX_CDC_CLK_RST_CTRL_MCLK_CONTROL,
0x01, 0x01);
wcd_resmgr_codec_reg_update_bits(resmgr,
diff --git a/sound/soc/codecs/wcd_cpe_core.c b/sound/soc/codecs/wcd_cpe_core.c
index 2088698392de..2082c356203d 100644
--- a/sound/soc/codecs/wcd_cpe_core.c
+++ b/sound/soc/codecs/wcd_cpe_core.c
@@ -3029,7 +3029,7 @@ err_ret:
static int wcd_cpe_set_one_param(void *core_handle,
struct cpe_lsm_session *session, struct lsm_params_info *p_info,
- void *data, enum LSM_PARAM_TYPE param_type)
+ void *data, uint32_t param_type)
{
struct wcd_cpe_core *core = core_handle;
int rc = 0;
@@ -3044,25 +3044,9 @@ static int wcd_cpe_set_one_param(void *core_handle,
rc = wcd_cpe_send_param_epd_thres(core, session,
data, &ids);
break;
- case LSM_OPERATION_MODE: {
- struct cpe_lsm_ids connectport_ids;
-
- rc = wcd_cpe_send_param_opmode(core, session,
- data, &ids);
- if (rc)
- break;
-
- connectport_ids.module_id = LSM_MODULE_ID_FRAMEWORK;
- connectport_ids.param_id = LSM_PARAM_ID_CONNECT_TO_PORT;
-
- rc = wcd_cpe_send_param_connectport(core, session, NULL,
- &connectport_ids, CPE_AFE_PORT_1_TX);
- if (rc)
- dev_err(core->dev,
- "%s: send_param_connectport failed, err %d\n",
- __func__, rc);
+ case LSM_OPERATION_MODE:
+ rc = wcd_cpe_send_param_opmode(core, session, data, &ids);
break;
- }
case LSM_GAIN:
rc = wcd_cpe_send_param_gain(core, session, data, &ids);
break;
@@ -3081,13 +3065,13 @@ static int wcd_cpe_set_one_param(void *core_handle,
break;
default:
pr_err("%s: wrong param_type 0x%x\n",
- __func__, p_info->param_type);
+ __func__, param_type);
}
if (rc)
dev_err(core->dev,
"%s: send_param(%d) failed, err %d\n",
- __func__, p_info->param_type, rc);
+ __func__, param_type, rc);
return rc;
}
diff --git a/sound/soc/codecs/wsa881x-regmap.c b/sound/soc/codecs/wsa881x-regmap.c
index 63bbbfa6beab..faa44301286c 100644
--- a/sound/soc/codecs/wsa881x-regmap.c
+++ b/sound/soc/codecs/wsa881x-regmap.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -216,6 +216,9 @@ static bool wsa881x_readable_register(struct device *dev, unsigned int reg)
static bool wsa881x_volatile_register(struct device *dev, unsigned int reg)
{
+ if (cache_always)
+ return false;
+
switch (reg) {
case WSA881X_CHIP_ID0:
case WSA881X_CHIP_ID1:
diff --git a/sound/soc/codecs/wsa881x.c b/sound/soc/codecs/wsa881x.c
index d7f4044b71ee..171735c8efd4 100644
--- a/sound/soc/codecs/wsa881x.c
+++ b/sound/soc/codecs/wsa881x.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -78,6 +78,7 @@ enum {
WSA881X_DEV_UP,
};
+bool cache_always;
/*
* Private data Structure for wsa881x. All parameters related to
* WSA881X codec needs to be defined here.
@@ -1227,6 +1228,9 @@ static int wsa881x_swr_probe(struct swr_device *pdev)
if (ret)
goto err;
}
+
+ cache_always = of_property_read_bool(pdev->dev.of_node,
+ "qcom,cache-always");
wsa881x_gpio_ctrl(wsa881x, true);
wsa881x->state = WSA881X_DEV_UP;
diff --git a/sound/soc/codecs/wsa881x.h b/sound/soc/codecs/wsa881x.h
index be234ac0cd07..178237555c54 100644
--- a/sound/soc/codecs/wsa881x.h
+++ b/sound/soc/codecs/wsa881x.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -20,6 +20,7 @@
#define WSA881X_MAX_SWR_PORTS 4
+extern bool cache_always;
extern int wsa881x_set_channel_map(struct snd_soc_codec *codec, u8 *port,
u8 num_port, unsigned int *ch_mask,
unsigned int *ch_rate);
diff --git a/sound/soc/msm/msm-cpe-lsm.c b/sound/soc/msm/msm-cpe-lsm.c
index ffc6119e543d..b2008d6da2a1 100644
--- a/sound/soc/msm/msm-cpe-lsm.c
+++ b/sound/soc/msm/msm-cpe-lsm.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2016, Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2017, Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1049,7 +1049,6 @@ static int msm_cpe_lsm_ioctl_shared(struct snd_pcm_substream *substream,
struct cpe_lsm_lab *lab_d = &lsm_d->lab;
struct snd_dma_buffer *dma_buf = &substream->dma_buffer;
struct msm_slim_dma_data *dma_data = NULL;
- struct snd_lsm_event_status *user;
struct snd_lsm_detection_params det_params;
int rc = 0;
@@ -1318,11 +1317,20 @@ static int msm_cpe_lsm_ioctl_shared(struct snd_pcm_substream *substream,
break;
case SNDRV_LSM_EVENT_STATUS:
+ case SNDRV_LSM_EVENT_STATUS_V3: {
+ struct snd_lsm_event_status *user;
+ struct snd_lsm_event_status_v3 *user_v3;
+
dev_dbg(rtd->dev,
"%s: %s\n",
- __func__, "SNDRV_LSM_EVENT_STATUS");
-
- user = arg;
+ __func__, "SNDRV_LSM_EVENT_STATUS(_V3)");
+ if (!arg) {
+ dev_err(rtd->dev,
+ "%s: Invalid argument to ioctl %s\n",
+ __func__,
+ "SNDRV_LSM_EVENT_STATUS(_V3)");
+ return -EINVAL;
+ }
/*
* Release the api lock before wait to allow
@@ -1343,31 +1351,62 @@ static int msm_cpe_lsm_ioctl_shared(struct snd_pcm_substream *substream,
if (atomic_read(&lsm_d->event_avail) == 1) {
rc = 0;
atomic_set(&lsm_d->event_avail, 0);
- if (lsm_d->ev_det_pld_size >
- user->payload_size) {
- dev_err(rtd->dev,
- "%s: avail pld_bytes = %u, needed = %u\n",
- __func__,
- user->payload_size,
- lsm_d->ev_det_pld_size);
- return -EINVAL;
- }
-
- user->status = lsm_d->ev_det_status;
- user->payload_size = lsm_d->ev_det_pld_size;
-
- memcpy(user->payload,
- lsm_d->ev_det_payload,
- lsm_d->ev_det_pld_size);
+ if (cmd == SNDRV_LSM_EVENT_STATUS) {
+ user = arg;
+ if (lsm_d->ev_det_pld_size >
+ user->payload_size) {
+ dev_err(rtd->dev,
+ "%s: avail pld_bytes = %u, needed = %u\n",
+ __func__,
+ user->payload_size,
+ lsm_d->ev_det_pld_size);
+ return -EINVAL;
+ }
+
+ user->status = lsm_d->ev_det_status;
+ user->payload_size =
+ lsm_d->ev_det_pld_size;
+ memcpy(user->payload,
+ lsm_d->ev_det_payload,
+ lsm_d->ev_det_pld_size);
+ } else {
+ user_v3 = arg;
+ if (lsm_d->ev_det_pld_size >
+ user_v3->payload_size) {
+ dev_err(rtd->dev,
+ "%s: avail pld_bytes = %u, needed = %u\n",
+ __func__,
+ user_v3->payload_size,
+ lsm_d->ev_det_pld_size);
+ return -EINVAL;
+ }
+ /* event status timestamp not supported
+ * on CPE mode. Set msw and lsw to 0.
+ */
+ user_v3->timestamp_lsw = 0;
+ user_v3->timestamp_msw = 0;
+ user_v3->status = lsm_d->ev_det_status;
+ user_v3->payload_size =
+ lsm_d->ev_det_pld_size;
+ memcpy(user_v3->payload,
+ lsm_d->ev_det_payload,
+ lsm_d->ev_det_pld_size);
+ }
} else if (atomic_read(&lsm_d->event_stop) == 1) {
dev_dbg(rtd->dev,
"%s: wait_aborted\n", __func__);
- user->payload_size = 0;
+ if (cmd == SNDRV_LSM_EVENT_STATUS) {
+ user = arg;
+ user->payload_size = 0;
+ } else {
+ user_v3 = arg;
+ user_v3->payload_size = 0;
+ }
rc = 0;
}
}
-
+ }
break;
case SNDRV_LSM_ABORT_EVENT:
@@ -1499,6 +1538,20 @@ static int msm_cpe_lsm_ioctl_shared(struct snd_pcm_substream *substream,
}
break;
+ case SNDRV_LSM_SET_PORT: {
+ u32 port_id = cpe->input_port_id;
+
+ dev_dbg(rtd->dev, "%s: %s\n", __func__, "SNDRV_LSM_SET_PORT");
+ rc = lsm_ops->lsm_set_port(cpe->core_handle, session, &port_id);
+ if (rc) {
+ dev_err(rtd->dev,
+ "%s: lsm_set_port failed, err = %d\n",
+ __func__, rc);
+ return rc;
+ }
+ }
+ break;
+
default:
dev_dbg(rtd->dev,
"%s: Default snd_lib_ioctl cmd 0x%x\n",
@@ -1510,7 +1563,7 @@ static int msm_cpe_lsm_ioctl_shared(struct snd_pcm_substream *substream,
}
static int msm_cpe_lsm_lab_start(struct snd_pcm_substream *substream,
- struct snd_lsm_event_status *event_status)
+ u16 event_det_status)
{
struct snd_soc_pcm_runtime *rtd;
struct cpe_lsm_data *lsm_d = NULL;
@@ -1563,7 +1616,7 @@ static int msm_cpe_lsm_lab_start(struct snd_pcm_substream *substream,
reinit_completion(&lab_d->thread_complete);
if (session->lab_enable &&
- event_status->status ==
+ event_det_status ==
LSM_VOICE_WAKEUP_STATUS_DETECTED) {
out_port = &session->afe_out_port_cfg;
out_port->port_id = session->afe_out_port_id;
@@ -2167,7 +2220,60 @@ static int msm_cpe_lsm_ioctl(struct snd_pcm_substream *substream,
goto done;
}
- msm_cpe_lsm_lab_start(substream, event_status);
+ msm_cpe_lsm_lab_start(substream, event_status->status);
+ msm_cpe_process_event_status_done(lsm_d);
+ kfree(event_status);
+ }
+ break;
+ case SNDRV_LSM_EVENT_STATUS_V3: {
+ struct snd_lsm_event_status_v3 u_event_status;
+ struct snd_lsm_event_status_v3 *event_status = NULL;
+ int u_pld_size = 0;
+
+ if (copy_from_user(&u_event_status, (void *)arg,
+ sizeof(struct snd_lsm_event_status_v3))) {
+ dev_err(rtd->dev,
+ "%s: event status copy from user failed, size %zd\n",
+ __func__,
+ sizeof(struct snd_lsm_event_status_v3));
+ err = -EFAULT;
+ goto done;
+ }
+
+ if (u_event_status.payload_size >
+ LISTEN_MAX_STATUS_PAYLOAD_SIZE) {
+ dev_err(rtd->dev,
+ "%s: payload_size %d is invalid, max allowed = %d\n",
+ __func__, u_event_status.payload_size,
+ LISTEN_MAX_STATUS_PAYLOAD_SIZE);
+ err = -EINVAL;
+ goto done;
+ }
+
+ u_pld_size = sizeof(struct snd_lsm_event_status_v3) +
+ u_event_status.payload_size;
+
+ event_status = kzalloc(u_pld_size, GFP_KERNEL);
+ if (!event_status) {
+ err = -ENOMEM;
+ goto done;
+ } else {
+ event_status->payload_size =
+ u_event_status.payload_size;
+ err = msm_cpe_lsm_ioctl_shared(substream,
+ cmd, event_status);
+ }
+
+ if (!err && copy_to_user(arg, event_status, u_pld_size)) {
+ dev_err(rtd->dev,
+ "%s: copy to user failed\n",
+ __func__);
+ kfree(event_status);
+ err = -EFAULT;
+ goto done;
+ }
+
+ msm_cpe_lsm_lab_start(substream, event_status->status);
msm_cpe_process_event_status_done(lsm_d);
kfree(event_status);
}
@@ -2297,7 +2403,7 @@ struct lsm_params_info_32 {
u32 param_id;
u32 param_size;
compat_uptr_t param_data;
- enum LSM_PARAM_TYPE param_type;
+ uint32_t param_type;
};
struct snd_lsm_module_params_32 {
@@ -2480,7 +2586,97 @@ static int msm_cpe_lsm_ioctl_compat(struct snd_pcm_substream *substream,
goto done;
}
- msm_cpe_lsm_lab_start(substream, event_status);
+ msm_cpe_lsm_lab_start(substream, event_status->status);
+ msm_cpe_process_event_status_done(lsm_d);
+ kfree(event_status);
+ kfree(udata_32);
+ }
+ break;
+ case SNDRV_LSM_EVENT_STATUS_V3: {
+ struct snd_lsm_event_status_v3 *event_status = NULL;
+ struct snd_lsm_event_status_v3 u_event_status32;
+ struct snd_lsm_event_status_v3 *udata_32 = NULL;
+ int u_pld_size = 0;
+
+ dev_dbg(rtd->dev,
+ "%s: ioctl %s\n", __func__,
+ "SNDRV_LSM_EVENT_STATUS_V3_32");
+
+ if (copy_from_user(&u_event_status32, (void *)arg,
+ sizeof(struct snd_lsm_event_status_v3))) {
+ dev_err(rtd->dev,
+ "%s: event status copy from user failed, size %zd\n",
+ __func__,
+ sizeof(struct snd_lsm_event_status_v3));
+ err = -EFAULT;
+ goto done;
+ }
+
+ if (u_event_status32.payload_size >
+ LISTEN_MAX_STATUS_PAYLOAD_SIZE) {
+ dev_err(rtd->dev,
+ "%s: payload_size %d is invalid, max allowed = %d\n",
+ __func__, u_event_status32.payload_size,
+ LISTEN_MAX_STATUS_PAYLOAD_SIZE);
+ err = -EINVAL;
+ goto done;
+ }
+
+ u_pld_size = sizeof(struct snd_lsm_event_status_v3) +
+ u_event_status32.payload_size;
+ event_status = kzalloc(u_pld_size, GFP_KERNEL);
+ if (!event_status) {
+ dev_err(rtd->dev,
+ "%s: No memory for event status\n",
+ __func__);
+ err = -ENOMEM;
+ goto done;
+ } else {
+ event_status->payload_size =
+ u_event_status32.payload_size;
+ err = msm_cpe_lsm_ioctl_shared(substream,
+ cmd, event_status);
+ if (err)
+ dev_err(rtd->dev,
+ "%s: %s failed, error = %d\n",
+ __func__,
+ "SNDRV_LSM_EVENT_STATUS_V3_32",
+ err);
+ }
+
+ if (!err) {
+ udata_32 = kzalloc(u_pld_size, GFP_KERNEL);
+ if (!udata_32) {
+ dev_err(rtd->dev,
+ "%s: nomem for udata\n",
+ __func__);
+ err = -EFAULT;
+ } else {
+ udata_32->timestamp_lsw =
+ event_status->timestamp_lsw;
+ udata_32->timestamp_msw =
+ event_status->timestamp_msw;
+ udata_32->status = event_status->status;
+ udata_32->payload_size =
+ event_status->payload_size;
+ memcpy(udata_32->payload,
+ event_status->payload,
+ u_pld_size);
+ }
+ }
+
+ if (!err && copy_to_user(arg, udata_32,
+ u_pld_size)) {
+ dev_err(rtd->dev,
+ "%s: copy to user failed\n",
+ __func__);
+ kfree(event_status);
+ kfree(udata_32);
+ err = -EFAULT;
+ goto done;
+ }
+
+ msm_cpe_lsm_lab_start(substream, event_status->status);
msm_cpe_process_event_status_done(lsm_d);
kfree(event_status);
kfree(udata_32);
diff --git a/sound/soc/msm/msm-dai-fe.c b/sound/soc/msm/msm-dai-fe.c
index 6c1897340e74..44a6a245c7a2 100644
--- a/sound/soc/msm/msm-dai-fe.c
+++ b/sound/soc/msm/msm-dai-fe.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -2203,12 +2203,14 @@ static struct snd_soc_dai_driver msm_fe_dais[] = {
.capture = {
.stream_name = "Listen 1 Audio Service Capture",
.aif_name = "LSM1_UL_HL",
- .rates = SNDRV_PCM_RATE_16000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .rates = (SNDRV_PCM_RATE_16000 |
+ SNDRV_PCM_RATE_48000),
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE),
.channels_min = 1,
- .channels_max = 1,
+ .channels_max = 4,
.rate_min = 16000,
- .rate_max = 16000,
+ .rate_max = 48000,
},
.ops = &msm_fe_dai_ops,
.name = "LSM1",
@@ -2218,12 +2220,14 @@ static struct snd_soc_dai_driver msm_fe_dais[] = {
.capture = {
.stream_name = "Listen 2 Audio Service Capture",
.aif_name = "LSM2_UL_HL",
- .rates = SNDRV_PCM_RATE_16000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .rates = (SNDRV_PCM_RATE_16000 |
+ SNDRV_PCM_RATE_48000),
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE),
.channels_min = 1,
- .channels_max = 1,
+ .channels_max = 4,
.rate_min = 16000,
- .rate_max = 16000,
+ .rate_max = 48000,
},
.ops = &msm_fe_dai_ops,
.name = "LSM2",
@@ -2233,12 +2237,14 @@ static struct snd_soc_dai_driver msm_fe_dais[] = {
.capture = {
.stream_name = "Listen 3 Audio Service Capture",
.aif_name = "LSM3_UL_HL",
- .rates = SNDRV_PCM_RATE_16000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .rates = (SNDRV_PCM_RATE_16000 |
+ SNDRV_PCM_RATE_48000),
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE),
.channels_min = 1,
- .channels_max = 1,
+ .channels_max = 4,
.rate_min = 16000,
- .rate_max = 16000,
+ .rate_max = 48000,
},
.ops = &msm_fe_dai_ops,
.name = "LSM3",
@@ -2248,12 +2254,14 @@ static struct snd_soc_dai_driver msm_fe_dais[] = {
.capture = {
.stream_name = "Listen 4 Audio Service Capture",
.aif_name = "LSM4_UL_HL",
- .rates = SNDRV_PCM_RATE_16000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .rates = (SNDRV_PCM_RATE_16000 |
+ SNDRV_PCM_RATE_48000),
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE),
.channels_min = 1,
- .channels_max = 1,
+ .channels_max = 4,
.rate_min = 16000,
- .rate_max = 16000,
+ .rate_max = 48000,
},
.ops = &msm_fe_dai_ops,
.name = "LSM4",
@@ -2263,12 +2271,14 @@ static struct snd_soc_dai_driver msm_fe_dais[] = {
.capture = {
.stream_name = "Listen 5 Audio Service Capture",
.aif_name = "LSM5_UL_HL",
- .rates = SNDRV_PCM_RATE_16000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .rates = (SNDRV_PCM_RATE_16000 |
+ SNDRV_PCM_RATE_48000),
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE),
.channels_min = 1,
- .channels_max = 1,
+ .channels_max = 4,
.rate_min = 16000,
- .rate_max = 16000,
+ .rate_max = 48000,
},
.ops = &msm_fe_dai_ops,
.name = "LSM5",
@@ -2278,12 +2288,14 @@ static struct snd_soc_dai_driver msm_fe_dais[] = {
.capture = {
.stream_name = "Listen 6 Audio Service Capture",
.aif_name = "LSM6_UL_HL",
- .rates = SNDRV_PCM_RATE_16000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .rates = (SNDRV_PCM_RATE_16000 |
+ SNDRV_PCM_RATE_48000),
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE),
.channels_min = 1,
- .channels_max = 1,
+ .channels_max = 4,
.rate_min = 16000,
- .rate_max = 16000,
+ .rate_max = 48000,
},
.ops = &msm_fe_dai_ops,
.name = "LSM6",
@@ -2293,12 +2305,14 @@ static struct snd_soc_dai_driver msm_fe_dais[] = {
.capture = {
.stream_name = "Listen 7 Audio Service Capture",
.aif_name = "LSM7_UL_HL",
- .rates = SNDRV_PCM_RATE_16000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .rates = (SNDRV_PCM_RATE_16000 |
+ SNDRV_PCM_RATE_48000),
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE),
.channels_min = 1,
- .channels_max = 1,
+ .channels_max = 4,
.rate_min = 16000,
- .rate_max = 16000,
+ .rate_max = 48000,
},
.ops = &msm_fe_dai_ops,
.name = "LSM7",
@@ -2308,12 +2322,14 @@ static struct snd_soc_dai_driver msm_fe_dais[] = {
.capture = {
.stream_name = "Listen 8 Audio Service Capture",
.aif_name = "LSM8_UL_HL",
- .rates = SNDRV_PCM_RATE_16000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .rates = (SNDRV_PCM_RATE_16000 |
+ SNDRV_PCM_RATE_48000),
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE),
.channels_min = 1,
- .channels_max = 1,
+ .channels_max = 4,
.rate_min = 16000,
- .rate_max = 16000,
+ .rate_max = 48000,
},
.ops = &msm_fe_dai_ops,
.name = "LSM8",
diff --git a/sound/soc/msm/msm8998.c b/sound/soc/msm/msm8998.c
index 557c7946506a..391640d53d56 100644
--- a/sound/soc/msm/msm8998.c
+++ b/sound/soc/msm/msm8998.c
@@ -3383,12 +3383,12 @@ static int msm_audrx_init(struct snd_soc_pcm_runtime *rtd)
snd_soc_dapm_ignore_suspend(dapm, "HPHR");
snd_soc_dapm_ignore_suspend(dapm, "AIF4 VI");
snd_soc_dapm_ignore_suspend(dapm, "VIINPUT");
+ snd_soc_dapm_ignore_suspend(dapm, "ANC HPHL");
+ snd_soc_dapm_ignore_suspend(dapm, "ANC HPHR");
if (!strcmp(dev_name(codec_dai->dev), "tasha_codec")) {
snd_soc_dapm_ignore_suspend(dapm, "LINEOUT3");
snd_soc_dapm_ignore_suspend(dapm, "LINEOUT4");
- snd_soc_dapm_ignore_suspend(dapm, "ANC HPHL");
- snd_soc_dapm_ignore_suspend(dapm, "ANC HPHR");
snd_soc_dapm_ignore_suspend(dapm, "ANC LINEOUT1");
snd_soc_dapm_ignore_suspend(dapm, "ANC LINEOUT2");
}
diff --git a/sound/soc/msm/qdsp6v2/audio_cal_utils.c b/sound/soc/msm/qdsp6v2/audio_cal_utils.c
index 75af648baef3..b54cde4ef0c3 100644
--- a/sound/soc/msm/qdsp6v2/audio_cal_utils.c
+++ b/sound/soc/msm/qdsp6v2/audio_cal_utils.c
@@ -607,7 +607,6 @@ static struct cal_block_data *create_cal_block(struct cal_type_data *cal_type,
}
INIT_LIST_HEAD(&cal_block->list);
- list_add_tail(&cal_block->list, &cal_type->cal_blocks);
cal_block->map_data.ion_map_handle = basic_cal->cal_data.mem_handle;
if (basic_cal->cal_data.mem_handle > 0) {
@@ -639,6 +638,7 @@ static struct cal_block_data *create_cal_block(struct cal_type_data *cal_type,
goto err;
}
cal_block->buffer_number = basic_cal->cal_hdr.buffer_number;
+ list_add_tail(&cal_block->list, &cal_type->cal_blocks);
pr_debug("%s: created block for cal type %d, buf num %d, map handle %d, map size %zd paddr 0x%pK!\n",
__func__, cal_type->info.reg.cal_type,
cal_block->buffer_number,
@@ -648,6 +648,8 @@ static struct cal_block_data *create_cal_block(struct cal_type_data *cal_type,
done:
return cal_block;
err:
+ kfree(cal_block->cal_info);
+ kfree(cal_block->client_info);
kfree(cal_block);
cal_block = NULL;
return cal_block;
diff --git a/sound/soc/msm/qdsp6v2/msm-dolby-dap-config.c b/sound/soc/msm/qdsp6v2/msm-dolby-dap-config.c
index 8da75d74776b..4664d39e87e0 100644
--- a/sound/soc/msm/qdsp6v2/msm-dolby-dap-config.c
+++ b/sound/soc/msm/qdsp6v2/msm-dolby-dap-config.c
@@ -816,6 +816,10 @@ int msm_dolby_dap_param_to_get_control_get(struct snd_kcontrol *kcontrol,
__func__, copp_idx);
return -EINVAL;
}
+ if (dolby_dap_params_get.length > 128 - DOLBY_PARAM_PAYLOAD_SIZE) {
+ pr_err("%s: Incorrect parameter length", __func__);
+ return -EINVAL;
+ }
params_value = kzalloc(params_length + param_payload_len, GFP_KERNEL);
if (!params_value) {
pr_err("%s, params memory alloc failed\n", __func__);
diff --git a/sound/soc/msm/qdsp6v2/msm-lsm-client.c b/sound/soc/msm/qdsp6v2/msm-lsm-client.c
index efb6644e551f..55ca659567f5 100644
--- a/sound/soc/msm/qdsp6v2/msm-lsm-client.c
+++ b/sound/soc/msm/qdsp6v2/msm-lsm-client.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2015, Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2017, Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -35,7 +35,7 @@
#define CAPTURE_MIN_NUM_PERIODS 2
#define CAPTURE_MAX_NUM_PERIODS 8
-#define CAPTURE_MAX_PERIOD_SIZE 4096
+#define CAPTURE_MAX_PERIOD_SIZE 61440
#define CAPTURE_MIN_PERIOD_SIZE 320
#define LISTEN_MAX_STATUS_PAYLOAD_SIZE 256
@@ -47,12 +47,14 @@ static struct snd_pcm_hardware msm_pcm_hardware_capture = {
SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME),
- .formats = SNDRV_PCM_FMTBIT_S16_LE,
- .rates = SNDRV_PCM_RATE_16000,
+ .formats = (SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE),
+ .rates = (SNDRV_PCM_RATE_16000 |
+ SNDRV_PCM_RATE_48000),
.rate_min = 16000,
- .rate_max = 16000,
+ .rate_max = 48000,
.channels_min = 1,
- .channels_max = 1,
+ .channels_max = 4,
.buffer_bytes_max = CAPTURE_MAX_NUM_PERIODS *
CAPTURE_MAX_PERIOD_SIZE,
.period_bytes_min = CAPTURE_MIN_PERIOD_SIZE,
@@ -64,7 +66,7 @@ static struct snd_pcm_hardware msm_pcm_hardware_capture = {
/* Conventional and unconventional sample rate supported */
static unsigned int supported_sample_rates[] = {
- 16000,
+ 16000, 48000,
};
static struct snd_pcm_hw_constraint_list constraints_sample_rates = {
@@ -76,7 +78,7 @@ static struct snd_pcm_hw_constraint_list constraints_sample_rates = {
struct lsm_priv {
struct snd_pcm_substream *substream;
struct lsm_client *lsm_client;
- struct snd_lsm_event_status *event_status;
+ struct snd_lsm_event_status_v3 *event_status;
spinlock_t event_lock;
wait_queue_head_t event_wait;
unsigned long event_avail;
@@ -88,6 +90,11 @@ struct lsm_priv {
int dma_write;
};
+enum { /* lsm session states */
+ IDLE = 0,
+ RUNNING,
+};
+
static int msm_lsm_queue_lab_buffer(struct lsm_priv *prtd, int i)
{
int rc = 0;
@@ -196,6 +203,8 @@ static void lsm_event_handler(uint32_t opcode, uint32_t token,
uint16_t status = 0;
uint16_t payload_size = 0;
uint16_t index = 0;
+ uint32_t event_ts_lsw = 0;
+ uint32_t event_ts_msw = 0;
if (!substream || !substream->private_data) {
pr_err("%s: Invalid %s\n", __func__,
@@ -269,24 +278,44 @@ static void lsm_event_handler(uint32_t opcode, uint32_t token,
"%s: event detect status = %d payload size = %d\n",
__func__, status , payload_size);
break;
+
+ case LSM_SESSION_EVENT_DETECTION_STATUS_V3:
+ event_ts_lsw = ((uint32_t *)payload)[0];
+ event_ts_msw = ((uint32_t *)payload)[1];
+ status = (uint16_t)((uint8_t *)payload)[8];
+ payload_size = (uint16_t)((uint8_t *)payload)[9];
+ index = 10;
+ dev_dbg(rtd->dev,
+ "%s: ts_msw = %u, ts_lsw = %u, event detect status = %d payload size = %d\n",
+ __func__, event_ts_msw, event_ts_lsw, status,
+ payload_size);
+ break;
+
default:
break;
}
if (opcode == LSM_SESSION_EVENT_DETECTION_STATUS ||
- opcode == LSM_SESSION_EVENT_DETECTION_STATUS_V2) {
+ opcode == LSM_SESSION_EVENT_DETECTION_STATUS_V2 ||
+ opcode == LSM_SESSION_EVENT_DETECTION_STATUS_V3) {
spin_lock_irqsave(&prtd->event_lock, flags);
prtd->event_status = krealloc(prtd->event_status,
- sizeof(struct snd_lsm_event_status) +
+ sizeof(struct snd_lsm_event_status_v3) +
payload_size, GFP_ATOMIC);
if (!prtd->event_status) {
dev_err(rtd->dev, "%s: no memory for event status\n",
__func__);
return;
}
-
+ /*
+ * event status timestamp will be non-zero and valid if
+ * opcode is LSM_SESSION_EVENT_DETECTION_STATUS_V3
+ */
+ prtd->event_status->timestamp_lsw = event_ts_lsw;
+ prtd->event_status->timestamp_msw = event_ts_msw;
prtd->event_status->status = status;
prtd->event_status->payload_size = payload_size;
+
if (likely(prtd->event_status)) {
memcpy(prtd->event_status->payload,
&((uint8_t *)payload)[index],
@@ -641,6 +670,54 @@ err_ret:
return rc;
}
+static int msm_lsm_set_poll_enable(struct snd_pcm_substream *substream,
+ struct lsm_params_info *p_info)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct lsm_priv *prtd = runtime->private_data;
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_lsm_poll_enable poll_enable;
+ int rc = 0;
+
+ if (p_info->param_size != sizeof(poll_enable)) {
+ dev_err(rtd->dev,
+ "%s: Invalid param_size %d\n",
+ __func__, p_info->param_size);
+ rc = -EINVAL;
+ goto done;
+ }
+
+ if (copy_from_user(&poll_enable, p_info->param_data,
+ sizeof(poll_enable))) {
+ dev_err(rtd->dev,
+ "%s: copy_from_user failed, size = %zd\n",
+ __func__, sizeof(poll_enable));
+ rc = -EFAULT;
+ goto done;
+ }
+
+ if (prtd->lsm_client->poll_enable == poll_enable.poll_en) {
+ dev_dbg(rtd->dev,
+ "%s: Polling for session %d already %s\n",
+ __func__, prtd->lsm_client->session,
+ (poll_enable.poll_en ? "enabled" : "disabled"));
+ rc = 0;
+ goto done;
+ }
+
+ rc = q6lsm_set_one_param(prtd->lsm_client, p_info,
+ &poll_enable, LSM_POLLING_ENABLE);
+ if (!rc) {
+ prtd->lsm_client->poll_enable = poll_enable.poll_en;
+ } else {
+ dev_err(rtd->dev,
+ "%s: Failed to set poll enable, err = %d\n",
+ __func__, rc);
+ }
+done:
+ return rc;
+}
+
static int msm_lsm_process_params(struct snd_pcm_substream *substream,
struct snd_lsm_module_params *p_data,
void *params)
@@ -681,6 +758,9 @@ static int msm_lsm_process_params(struct snd_pcm_substream *substream,
case LSM_CUSTOM_PARAMS:
rc = msm_lsm_set_custom(substream, p_info);
break;
+ case LSM_POLLING_ENABLE:
+ rc = msm_lsm_set_poll_enable(substream, p_info);
+ break;
default:
dev_err(rtd->dev,
"%s: Invalid param_type %d\n",
@@ -710,10 +790,8 @@ static int msm_lsm_ioctl_shared(struct snd_pcm_substream *substream,
struct snd_lsm_session_data session_data;
int rc = 0;
int xchg = 0;
- u32 size = 0;
struct snd_pcm_runtime *runtime;
struct lsm_priv *prtd;
- struct snd_lsm_event_status *user = arg;
struct snd_lsm_detection_params det_params;
uint8_t *confidence_level = NULL;
@@ -870,6 +948,10 @@ static int msm_lsm_ioctl_shared(struct snd_pcm_substream *substream,
break;
case SNDRV_LSM_EVENT_STATUS:
+ case SNDRV_LSM_EVENT_STATUS_V3: {
+ uint32_t ts_lsw, ts_msw;
+ uint16_t status = 0, payload_size = 0;
+
dev_dbg(rtd->dev, "%s: Get event status\n", __func__);
atomic_set(&prtd->event_wait_stop, 0);
rc = wait_event_freezable(prtd->event_wait,
@@ -882,9 +964,12 @@ static int msm_lsm_ioctl_shared(struct snd_pcm_substream *substream,
dev_dbg(rtd->dev, "%s: New event available %ld\n",
__func__, prtd->event_avail);
spin_lock_irqsave(&prtd->event_lock, flags);
+
if (prtd->event_status) {
- size = sizeof(*(prtd->event_status)) +
- prtd->event_status->payload_size;
+ payload_size = prtd->event_status->payload_size;
+ ts_lsw = prtd->event_status->timestamp_lsw;
+ ts_msw = prtd->event_status->timestamp_msw;
+ status = prtd->event_status->status;
spin_unlock_irqrestore(&prtd->event_lock,
flags);
} else {
@@ -896,15 +981,43 @@ static int msm_lsm_ioctl_shared(struct snd_pcm_substream *substream,
__func__);
break;
}
- if (user->payload_size <
- prtd->event_status->payload_size) {
- dev_dbg(rtd->dev,
- "%s: provided %d bytes isn't enough, needs %d bytes\n",
- __func__, user->payload_size,
- prtd->event_status->payload_size);
- rc = -ENOMEM;
+
+ if (cmd == SNDRV_LSM_EVENT_STATUS) {
+ struct snd_lsm_event_status *user = arg;
+
+ if (user->payload_size < payload_size) {
+ dev_dbg(rtd->dev,
+ "%s: provided %d bytes isn't enough, needs %d bytes\n",
+ __func__, user->payload_size,
+ payload_size);
+ rc = -ENOMEM;
+ } else {
+ user->status = status;
+ user->payload_size = payload_size;
+ memcpy(user->payload,
+ prtd->event_status->payload,
+ payload_size);
+ }
} else {
- memcpy(user, prtd->event_status, size);
+ struct snd_lsm_event_status_v3 *user_v3 = arg;
+
+ if (user_v3->payload_size < payload_size) {
+ dev_dbg(rtd->dev,
+ "%s: provided %d bytes isn't enough, needs %d bytes\n",
+ __func__, user_v3->payload_size,
+ payload_size);
+ rc = -ENOMEM;
+ } else {
+ user_v3->timestamp_lsw = ts_lsw;
+ user_v3->timestamp_msw = ts_msw;
+ user_v3->status = status;
+ user_v3->payload_size = payload_size;
+ memcpy(user_v3->payload,
+ prtd->event_status->payload,
+ payload_size);
+ }
+ }
+ if (!rc) {
if (prtd->lsm_client->lab_enable
&& !prtd->lsm_client->lab_started
&& prtd->event_status->status ==
@@ -929,6 +1042,7 @@ static int msm_lsm_ioctl_shared(struct snd_pcm_substream *substream,
rc = 0;
}
break;
+ }
case SNDRV_LSM_ABORT_EVENT:
dev_dbg(rtd->dev, "%s: Aborting event status wait\n",
@@ -1035,6 +1149,43 @@ static int msm_lsm_ioctl_shared(struct snd_pcm_substream *substream,
prtd->lsm_client->lab_started = false;
}
break;
+
+ case SNDRV_LSM_SET_PORT:
+ dev_dbg(rtd->dev, "%s: set LSM port\n", __func__);
+ rc = q6lsm_set_port_connected(prtd->lsm_client);
+ break;
+
+ case SNDRV_LSM_SET_FWK_MODE_CONFIG: {
+ u32 *mode = NULL;
+
+ if (!arg) {
+ dev_err(rtd->dev,
+ "%s: Invalid param arg for ioctl %s session %d\n",
+ __func__, "SNDRV_LSM_SET_FWK_MODE_CONFIG",
+ prtd->lsm_client->session);
+ rc = -EINVAL;
+ break;
+ }
+ mode = (u32 *)arg;
+ if (prtd->lsm_client->event_mode == *mode) {
+ dev_dbg(rtd->dev,
+ "%s: mode for %d already set to %d\n",
+ __func__, prtd->lsm_client->session, *mode);
+ rc = 0;
+ } else {
+ dev_dbg(rtd->dev, "%s: Event mode = %d\n",
+ __func__, *mode);
+ rc = q6lsm_set_fwk_mode_cfg(prtd->lsm_client, *mode);
+ if (!rc)
+ prtd->lsm_client->event_mode = *mode;
+ else
+ dev_err(rtd->dev,
+ "%s: set event mode failed %d\n",
+ __func__, rc);
+ }
+ break;
+ }
+
default:
dev_dbg(rtd->dev,
"%s: Falling into default snd_lib_ioctl cmd 0x%x\n",
@@ -1053,6 +1204,21 @@ static int msm_lsm_ioctl_shared(struct snd_pcm_substream *substream,
return rc;
}
#ifdef CONFIG_COMPAT
+
+struct snd_lsm_event_status32 {
+ u16 status;
+ u16 payload_size;
+ u8 payload[0];
+};
+
+struct snd_lsm_event_status_v3_32 {
+ u32 timestamp_lsw;
+ u32 timestamp_msw;
+ u16 status;
+ u16 payload_size;
+ u8 payload[0];
+};
+
struct snd_lsm_sound_model_v2_32 {
compat_uptr_t data;
compat_uptr_t confidence_level;
@@ -1074,7 +1240,7 @@ struct lsm_params_info_32 {
u32 param_id;
u32 param_size;
compat_uptr_t param_data;
- enum LSM_PARAM_TYPE param_type;
+ uint32_t param_type;
};
struct snd_lsm_module_params_32 {
@@ -1090,6 +1256,8 @@ enum {
_IOW('U', 0x0A, struct snd_lsm_detection_params_32),
SNDRV_LSM_SET_MODULE_PARAMS_32 =
_IOW('U', 0x0B, struct snd_lsm_module_params_32),
+ SNDRV_LSM_EVENT_STATUS_V3_32 =
+ _IOW('U', 0x0F, struct snd_lsm_event_status_v3_32),
};
static int msm_lsm_ioctl_compat(struct snd_pcm_substream *substream,
@@ -1178,6 +1346,73 @@ static int msm_lsm_ioctl_compat(struct snd_pcm_substream *substream,
break;
}
+ case SNDRV_LSM_EVENT_STATUS_V3_32: {
+ struct snd_lsm_event_status_v3_32 userarg32, *user32 = NULL;
+ struct snd_lsm_event_status_v3 *user = NULL;
+
+ if (copy_from_user(&userarg32, arg, sizeof(userarg32))) {
+ dev_err(rtd->dev, "%s: err copyuser ioctl %s\n",
+ __func__, "SNDRV_LSM_EVENT_STATUS_V3_32");
+ return -EFAULT;
+ }
+
+ if (userarg32.payload_size >
+ LISTEN_MAX_STATUS_PAYLOAD_SIZE) {
+ pr_err("%s: payload_size %d is invalid, max allowed = %d\n",
+ __func__, userarg32.payload_size,
+ LISTEN_MAX_STATUS_PAYLOAD_SIZE);
+ return -EINVAL;
+ }
+
+ size = sizeof(*user) + userarg32.payload_size;
+ user = kmalloc(size, GFP_KERNEL);
+ if (!user) {
+ dev_err(rtd->dev,
+ "%s: Allocation failed event status size %d\n",
+ __func__, size);
+ return -EFAULT;
+ }
+ cmd = SNDRV_LSM_EVENT_STATUS_V3;
+ user->payload_size = userarg32.payload_size;
+ err = msm_lsm_ioctl_shared(substream, cmd, user);
+
+ /* Update size with actual payload size */
+ size = sizeof(userarg32) + user->payload_size;
+ if (!err && !access_ok(VERIFY_WRITE, arg, size)) {
+ dev_err(rtd->dev,
+ "%s: write verify failed size %d\n",
+ __func__, size);
+ err = -EFAULT;
+ }
+ if (!err) {
+ user32 = kmalloc(size, GFP_KERNEL);
+ if (!user32) {
+ dev_err(rtd->dev,
+ "%s: Allocation event user status size %d\n",
+ __func__, size);
+ err = -EFAULT;
+ } else {
+ user32->timestamp_lsw = user->timestamp_lsw;
+ user32->timestamp_msw = user->timestamp_msw;
+ user32->status = user->status;
+ user32->payload_size = user->payload_size;
+ memcpy(user32->payload,
+ user->payload, user32->payload_size);
+ }
+ }
+ if (!err && (copy_to_user(arg, user32, size))) {
+ dev_err(rtd->dev, "%s: failed to copy payload %d",
+ __func__, size);
+ err = -EFAULT;
+ }
+ kfree(user);
+ kfree(user32);
+ if (err)
+ dev_err(rtd->dev, "%s: lsmevent failed %d",
+ __func__, err);
+ break;
+ }
+
case SNDRV_LSM_REG_SND_MODEL_V2_32: {
struct snd_lsm_sound_model_v2_32 snd_modelv232;
struct snd_lsm_sound_model_v2 snd_modelv2;
@@ -1573,6 +1808,67 @@ static int msm_lsm_ioctl(struct snd_pcm_substream *substream,
"%s: lsmevent failed %d", __func__, err);
return err;
}
+
+ case SNDRV_LSM_EVENT_STATUS_V3: {
+ struct snd_lsm_event_status_v3 *user = NULL;
+ struct snd_lsm_event_status_v3 userarg;
+
+ dev_dbg(rtd->dev,
+ "%s: SNDRV_LSM_EVENT_STATUS_V3\n", __func__);
+ if (!arg) {
+ dev_err(rtd->dev,
+ "%s: Invalid params event_status_v3\n",
+ __func__);
+ return -EINVAL;
+ }
+ if (copy_from_user(&userarg, arg, sizeof(userarg))) {
+ dev_err(rtd->dev,
+ "%s: err copyuser event_status_v3\n",
+ __func__);
+ return -EFAULT;
+ }
+
+ if (userarg.payload_size >
+ LISTEN_MAX_STATUS_PAYLOAD_SIZE) {
+ pr_err("%s: payload_size %d is invalid, max allowed = %d\n",
+ __func__, userarg.payload_size,
+ LISTEN_MAX_STATUS_PAYLOAD_SIZE);
+ return -EINVAL;
+ }
+
+ size = sizeof(struct snd_lsm_event_status_v3) +
+ userarg.payload_size;
+ user = kmalloc(size, GFP_KERNEL);
+ if (!user) {
+ dev_err(rtd->dev,
+ "%s: Allocation failed event status size %d\n",
+ __func__, size);
+ return -EFAULT;
+ }
+ user->payload_size = userarg.payload_size;
+ err = msm_lsm_ioctl_shared(substream, cmd, user);
+
+ /* Update size with actual payload size */
+ size = sizeof(*user) + user->payload_size;
+ if (!err && !access_ok(VERIFY_WRITE, arg, size)) {
+ dev_err(rtd->dev,
+ "%s: write verify failed size %d\n",
+ __func__, size);
+ err = -EFAULT;
+ }
+ if (!err && (copy_to_user(arg, user, size))) {
+ dev_err(rtd->dev,
+ "%s: failed to copy payload %d",
+ __func__, size);
+ err = -EFAULT;
+ }
+ kfree(user);
+ if (err)
+ dev_err(rtd->dev,
+ "%s: lsm_event_v3 failed %d", __func__, err);
+ break;
+ }
+
default:
err = msm_lsm_ioctl_shared(substream, cmd, arg);
break;
@@ -1640,6 +1936,11 @@ static int msm_lsm_open(struct snd_pcm_substream *substream)
return -ENOMEM;
}
prtd->lsm_client->opened = false;
+ prtd->lsm_client->session_state = IDLE;
+ prtd->lsm_client->poll_enable = true;
+ prtd->lsm_client->perf_mode = 0;
+ prtd->lsm_client->event_mode = LSM_EVENT_NON_TIME_STAMP_MODE;
+
return 0;
}
@@ -1648,6 +1949,7 @@ static int msm_lsm_prepare(struct snd_pcm_substream *substream)
struct snd_pcm_runtime *runtime = substream->runtime;
struct lsm_priv *prtd = runtime->private_data;
struct snd_soc_pcm_runtime *rtd;
+ int ret = 0;
if (!substream->private_data) {
pr_err("%s: Invalid private_data", __func__);
@@ -1661,9 +1963,30 @@ static int msm_lsm_prepare(struct snd_pcm_substream *substream)
"%s: LSM client data ptr is NULL\n", __func__);
return -EINVAL;
}
+
+ if (q6lsm_set_media_fmt_params(prtd->lsm_client))
+ dev_dbg(rtd->dev,
+ "%s: failed to set lsm media fmt params\n", __func__);
+
+ if (prtd->lsm_client->session_state == IDLE) {
+ ret = msm_pcm_routing_reg_phy_compr_stream(
+ rtd->dai_link->be_id,
+ prtd->lsm_client->perf_mode,
+ prtd->lsm_client->session,
+ SNDRV_PCM_STREAM_CAPTURE,
+ LISTEN);
+ if (ret) {
+ dev_err(rtd->dev,
+ "%s: register phy compr stream failed %d\n",
+ __func__, ret);
+ return ret;
+ }
+ }
+
+ prtd->lsm_client->session_state = RUNNING;
prtd->lsm_client->started = false;
runtime->private_data = prtd;
- return 0;
+ return ret;
}
static int msm_lsm_close(struct snd_pcm_substream *substream)
@@ -1712,6 +2035,9 @@ static int msm_lsm_close(struct snd_pcm_substream *substream)
__func__);
}
+ msm_pcm_routing_dereg_phy_stream(rtd->dai_link->be_id,
+ SNDRV_PCM_STREAM_CAPTURE);
+
if (prtd->lsm_client->opened) {
q6lsm_close(prtd->lsm_client);
prtd->lsm_client->opened = false;
@@ -1733,7 +2059,7 @@ static int msm_lsm_hw_params(struct snd_pcm_substream *substream,
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct lsm_priv *prtd = runtime->private_data;
- struct lsm_lab_hw_params *hw_params = NULL;
+ struct lsm_hw_params *hw_params = NULL;
struct snd_soc_pcm_runtime *rtd;
if (!substream->private_data) {
@@ -1749,25 +2075,36 @@ static int msm_lsm_hw_params(struct snd_pcm_substream *substream,
return -EINVAL;
}
hw_params = &prtd->lsm_client->hw_params;
- hw_params->sample_rate = params_rate(params);
- hw_params->sample_size =
- (params_format(params) == SNDRV_PCM_FORMAT_S16_LE) ? 16 : 0;
+ hw_params->num_chs = params_channels(params);
hw_params->period_count = params_periods(params);
- if (hw_params->sample_rate != 16000 || hw_params->sample_size != 16 ||
- hw_params->period_count == 0) {
+ hw_params->sample_rate = params_rate(params);
+ if (((hw_params->sample_rate != 16000) &&
+ (hw_params->sample_rate != 48000)) ||
+ (hw_params->period_count == 0)) {
dev_err(rtd->dev,
- "%s: Invalid params sample rate %d sample size %d period count %d",
+ "%s: Invalid Params sample rate %d period count %d\n",
__func__, hw_params->sample_rate,
- hw_params->sample_size,
- hw_params->period_count);
+ hw_params->period_count);
return -EINVAL;
}
+
+ if (params_format(params) == SNDRV_PCM_FORMAT_S16_LE) {
+ hw_params->sample_size = 16;
+ } else if (params_format(params) == SNDRV_PCM_FORMAT_S24_LE) {
+ hw_params->sample_size = 24;
+ } else {
+ dev_err(rtd->dev, "%s: Invalid Format 0x%x\n",
+ __func__, params_format(params));
+ return -EINVAL;
+ }
+
hw_params->buf_sz = params_buffer_bytes(params) /
- hw_params->period_count;
+ hw_params->period_count;
dev_dbg(rtd->dev,
- "%s: sample rate %d sample size %d buffer size %d period count %d\n",
- __func__, hw_params->sample_rate, hw_params->sample_size,
- hw_params->buf_sz, hw_params->period_count);
+ "%s: channels %d sample rate %d sample size %d buffer size %d period count %d\n",
+ __func__, hw_params->num_chs, hw_params->sample_rate,
+ hw_params->sample_size, hw_params->buf_sz,
+ hw_params->period_count);
return 0;
}
@@ -1863,6 +2200,109 @@ static int msm_lsm_pcm_copy(struct snd_pcm_substream *substream, int ch,
return 0;
}
+static int msm_lsm_app_type_cfg_ctl_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ u64 fe_id = kcontrol->private_value;
+ int app_type;
+ int acdb_dev_id;
+ int sample_rate;
+
+ pr_debug("%s: fe_id- %llu\n", __func__, fe_id);
+ if ((fe_id < MSM_FRONTEND_DAI_LSM1) ||
+ (fe_id > MSM_FRONTEND_DAI_LSM8)) {
+ pr_err("%s: Received out of bounds fe_id %llu\n",
+ __func__, fe_id);
+ return -EINVAL;
+ }
+
+ app_type = ucontrol->value.integer.value[0];
+ acdb_dev_id = ucontrol->value.integer.value[1];
+ sample_rate = ucontrol->value.integer.value[2];
+
+ pr_debug("%s: app_type- %d acdb_dev_id- %d sample_rate- %d session_type- %d\n",
+ __func__, app_type, acdb_dev_id, sample_rate, SESSION_TYPE_TX);
+ msm_pcm_routing_reg_stream_app_type_cfg(fe_id, app_type,
+ acdb_dev_id, sample_rate, SESSION_TYPE_TX);
+
+ return 0;
+}
+
+static int msm_lsm_app_type_cfg_ctl_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ u64 fe_id = kcontrol->private_value;
+ int ret = 0;
+ int app_type;
+ int acdb_dev_id;
+ int sample_rate;
+
+ pr_debug("%s: fe_id- %llu\n", __func__, fe_id);
+ if ((fe_id < MSM_FRONTEND_DAI_LSM1) ||
+ (fe_id > MSM_FRONTEND_DAI_LSM8)) {
+ pr_err("%s: Received out of bounds fe_id %llu\n",
+ __func__, fe_id);
+ return -EINVAL;
+ }
+
+ ret = msm_pcm_routing_get_stream_app_type_cfg(fe_id, SESSION_TYPE_TX,
+ &app_type, &acdb_dev_id, &sample_rate);
+ if (ret < 0) {
+ pr_err("%s: msm_pcm_routing_get_stream_app_type_cfg failed returned %d\n",
+ __func__, ret);
+ goto done;
+ }
+
+ ucontrol->value.integer.value[0] = app_type;
+ ucontrol->value.integer.value[1] = acdb_dev_id;
+ ucontrol->value.integer.value[2] = sample_rate;
+ pr_debug("%s: fedai_id %llu, session_type %d, app_type %d, acdb_dev_id %d, sample_rate %d\n",
+ __func__, fe_id, SESSION_TYPE_TX,
+ app_type, acdb_dev_id, sample_rate);
+done:
+ return ret;
+}
+
+static int msm_lsm_add_app_type_controls(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_pcm *pcm = rtd->pcm;
+ struct snd_pcm_usr *app_type_info;
+ struct snd_kcontrol *kctl;
+ const char *mixer_ctl_name = "Listen Stream";
+ const char *deviceNo = "NN";
+ const char *suffix = "App Type Cfg";
+ int ctl_len, ret = 0;
+
+ ctl_len = strlen(mixer_ctl_name) + 1 +
+ strlen(deviceNo) + 1 + strlen(suffix) + 1;
+ pr_debug("%s: Listen app type cntrl add\n", __func__);
+ ret = snd_pcm_add_usr_ctls(pcm, SNDRV_PCM_STREAM_CAPTURE,
+ NULL, 1, ctl_len, rtd->dai_link->be_id,
+ &app_type_info);
+ if (ret < 0) {
+ pr_err("%s: Listen app type cntrl add failed: %d\n",
+ __func__, ret);
+ return ret;
+ }
+ kctl = app_type_info->kctl;
+ snprintf(kctl->id.name, ctl_len, "%s %d %s",
+ mixer_ctl_name, rtd->pcm->device, suffix);
+ kctl->put = msm_lsm_app_type_cfg_ctl_put;
+ kctl->get = msm_lsm_app_type_cfg_ctl_get;
+ return 0;
+}
+
+static int msm_lsm_add_controls(struct snd_soc_pcm_runtime *rtd)
+{
+ int ret = 0;
+
+ ret = msm_lsm_add_app_type_controls(rtd);
+ if (ret)
+ pr_err("%s, add app type controls failed:%d\n", __func__, ret);
+
+ return ret;
+}
+
static struct snd_pcm_ops msm_lsm_ops = {
.open = msm_lsm_open,
.close = msm_lsm_close,
@@ -1877,11 +2317,16 @@ static struct snd_pcm_ops msm_lsm_ops = {
static int msm_asoc_lsm_new(struct snd_soc_pcm_runtime *rtd)
{
struct snd_card *card = rtd->card->snd_card;
+ int ret = 0;
if (!card->dev->coherent_dma_mask)
card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
- return 0;
+ ret = msm_lsm_add_controls(rtd);
+ if (ret)
+ pr_err("%s, kctl add failed:%d\n", __func__, ret);
+
+ return ret;
}
static int msm_asoc_lsm_probe(struct snd_soc_platform *platform)
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
index a62420a23789..30522ac023cd 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
@@ -64,15 +64,20 @@ static struct cal_type_data *cal_data;
static int fm_switch_enable;
static int hfp_switch_enable;
+static int int0_mi2s_switch_enable;
+static int int4_mi2s_switch_enable;
static int pri_mi2s_switch_enable;
static int sec_mi2s_switch_enable;
static int tert_mi2s_switch_enable;
static int quat_mi2s_switch_enable;
static int fm_pcmrx_switch_enable;
static int usb_switch_enable;
-static int lsm_mux_slim_port;
+static int lsm_port_index;
static int slim0_rx_aanc_fb_port;
static int msm_route_ec_ref_rx;
+static int msm_ec_ref_ch = 4;
+static int msm_ec_ref_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+static int msm_ec_ref_sampling_rate = 48000;
static uint32_t voc_session_id = ALL_SESSION_VSID;
static int msm_route_ext_ec_ref;
static bool is_custom_stereo_on;
@@ -86,6 +91,8 @@ enum {
MADSWAUDIO,
};
+#define ADM_LSM_PORT_INDEX 9
+
#define SLIMBUS_0_TX_TEXT "SLIMBUS_0_TX"
#define SLIMBUS_1_TX_TEXT "SLIMBUS_1_TX"
#define SLIMBUS_2_TX_TEXT "SLIMBUS_2_TX"
@@ -94,12 +101,14 @@ enum {
#define SLIMBUS_TX_VI_TEXT "SLIMBUS_TX_VI"
#define SLIMBUS_5_TX_TEXT "SLIMBUS_5_TX"
#define TERT_MI2S_TX_TEXT "TERT_MI2S_TX"
+#define QUAT_MI2S_TX_TEXT "QUAT_MI2S_TX"
+#define ADM_LSM_TX_TEXT "ADM_LSM_TX"
#define LSM_FUNCTION_TEXT "LSM Function"
-static const char * const mad_audio_mux_text[] = {
+static const char * const lsm_port_text[] = {
"None",
SLIMBUS_0_TX_TEXT, SLIMBUS_1_TX_TEXT, SLIMBUS_2_TX_TEXT,
- SLIMBUS_3_TX_TEXT, SLIMBUS_4_TX_TEXT, SLIMBUS_TX_VI_TEXT,
- SLIMBUS_5_TX_TEXT, TERT_MI2S_TX_TEXT
+ SLIMBUS_3_TX_TEXT, SLIMBUS_4_TX_TEXT, SLIMBUS_5_TX_TEXT,
+ TERT_MI2S_TX_TEXT, QUAT_MI2S_TX_TEXT, ADM_LSM_TX_TEXT
};
struct msm_pcm_route_bdai_pp_params {
@@ -268,254 +277,262 @@ static void msm_pcm_routng_cfg_matrix_map_pp(struct route_payload payload,
#define SLIMBUS_EXTPROC_RX AFE_PORT_INVALID
struct msm_pcm_routing_bdai_data msm_bedais[MSM_BACKEND_DAI_MAX] = {
- { PRIMARY_I2S_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_PRI_I2S_RX},
- { PRIMARY_I2S_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_PRI_I2S_TX},
- { SLIMBUS_0_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_0_RX},
- { SLIMBUS_0_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_0_TX},
- { HDMI_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_HDMI},
- { INT_BT_SCO_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_INT_BT_SCO_RX},
- { INT_BT_SCO_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_INT_BT_SCO_TX},
- { INT_FM_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_INT_FM_RX},
- { INT_FM_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_INT_FM_TX},
- { RT_PROXY_PORT_001_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_AFE_PCM_RX},
- { RT_PROXY_PORT_001_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_AFE_PCM_TX},
- { AFE_PORT_ID_PRIMARY_PCM_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { PRIMARY_I2S_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_PRI_I2S_RX},
+ { PRIMARY_I2S_TX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_PRI_I2S_TX},
+ { SLIMBUS_0_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_0_RX},
+ { SLIMBUS_0_TX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_0_TX},
+ { HDMI_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_HDMI},
+ { INT_BT_SCO_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_INT_BT_SCO_RX},
+ { INT_BT_SCO_TX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_INT_BT_SCO_TX},
+ { INT_FM_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_INT_FM_RX},
+ { INT_FM_TX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_INT_FM_TX},
+ { RT_PROXY_PORT_001_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ LPASS_BE_AFE_PCM_RX},
+ { RT_PROXY_PORT_001_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ LPASS_BE_AFE_PCM_TX},
+ { AFE_PORT_ID_PRIMARY_PCM_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_AUXPCM_RX},
- { AFE_PORT_ID_PRIMARY_PCM_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_PCM_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_AUXPCM_TX},
- { VOICE_PLAYBACK_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { VOICE_PLAYBACK_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_VOICE_PLAYBACK_TX},
- { VOICE2_PLAYBACK_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { VOICE2_PLAYBACK_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_VOICE2_PLAYBACK_TX},
- { VOICE_RECORD_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_INCALL_RECORD_RX},
- { VOICE_RECORD_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_INCALL_RECORD_TX},
- { MI2S_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_MI2S_RX},
- { MI2S_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_MI2S_TX},
- { SECONDARY_I2S_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SEC_I2S_RX},
- { SLIMBUS_1_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_1_RX},
- { SLIMBUS_1_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_1_TX},
- { SLIMBUS_2_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_2_RX},
- { SLIMBUS_2_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_2_TX},
- { SLIMBUS_3_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_3_RX},
- { SLIMBUS_3_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_3_TX},
- { SLIMBUS_4_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_4_RX},
- { SLIMBUS_4_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_4_TX},
- { SLIMBUS_5_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_5_RX},
- { SLIMBUS_5_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_5_TX},
- { SLIMBUS_6_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_6_RX},
- { SLIMBUS_6_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_6_TX},
- { SLIMBUS_7_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_7_RX},
- { SLIMBUS_7_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_7_TX},
- { SLIMBUS_8_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_8_RX},
- { SLIMBUS_8_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_8_TX},
- { SLIMBUS_EXTPROC_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_STUB_RX},
- { SLIMBUS_EXTPROC_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_STUB_TX},
- { SLIMBUS_EXTPROC_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_STUB_1_TX},
- { AFE_PORT_ID_QUATERNARY_MI2S_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { VOICE_RECORD_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ LPASS_BE_INCALL_RECORD_RX},
+ { VOICE_RECORD_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ LPASS_BE_INCALL_RECORD_TX},
+ { MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_MI2S_RX},
+ { MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_MI2S_TX},
+ { SECONDARY_I2S_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SEC_I2S_RX},
+ { SLIMBUS_1_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_1_RX},
+ { SLIMBUS_1_TX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_1_TX},
+ { SLIMBUS_2_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_2_RX},
+ { SLIMBUS_2_TX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_2_TX},
+ { SLIMBUS_3_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_3_RX},
+ { SLIMBUS_3_TX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_3_TX},
+ { SLIMBUS_4_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_4_RX},
+ { SLIMBUS_4_TX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_4_TX},
+ { SLIMBUS_5_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_5_RX},
+ { SLIMBUS_5_TX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_5_TX},
+ { SLIMBUS_6_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_6_RX},
+ { SLIMBUS_6_TX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_6_TX},
+ { SLIMBUS_7_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_7_RX},
+ { SLIMBUS_7_TX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_7_TX},
+ { SLIMBUS_8_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_8_RX},
+ { SLIMBUS_8_TX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_8_TX},
+ { SLIMBUS_EXTPROC_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_STUB_RX},
+ { SLIMBUS_EXTPROC_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_STUB_TX},
+ { SLIMBUS_EXTPROC_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_STUB_1_TX},
+ { AFE_PORT_ID_QUATERNARY_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_MI2S_RX},
- { AFE_PORT_ID_QUATERNARY_MI2S_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_MI2S_TX},
- { AFE_PORT_ID_SECONDARY_MI2S_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_MI2S_RX},
- { AFE_PORT_ID_SECONDARY_MI2S_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_MI2S_TX},
- { AFE_PORT_ID_PRIMARY_MI2S_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_MI2S_RX},
- { AFE_PORT_ID_PRIMARY_MI2S_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_MI2S_TX},
- { AFE_PORT_ID_TERTIARY_MI2S_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_MI2S_RX},
- { AFE_PORT_ID_TERTIARY_MI2S_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_MI2S_TX},
- { AUDIO_PORT_ID_I2S_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AUDIO_PORT_ID_I2S_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_AUDIO_I2S_RX},
- { AFE_PORT_ID_SECONDARY_PCM_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_PCM_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_AUXPCM_RX},
- { AFE_PORT_ID_SECONDARY_PCM_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_PCM_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_AUXPCM_TX},
- { AFE_PORT_ID_SPDIF_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SPDIF_RX},
- { AFE_PORT_ID_SECONDARY_MI2S_RX_SD1, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SPDIF_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SPDIF_RX},
+ { AFE_PORT_ID_SECONDARY_MI2S_RX_SD1, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_MI2S_RX_SD1},
- { AFE_PORT_ID_QUINARY_MI2S_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUINARY_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUIN_MI2S_RX},
- { AFE_PORT_ID_QUINARY_MI2S_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUINARY_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUIN_MI2S_TX},
- { AFE_PORT_ID_SENARY_MI2S_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SENARY_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SENARY_MI2S_TX},
- { AFE_PORT_ID_PRIMARY_TDM_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_RX_0},
- { AFE_PORT_ID_PRIMARY_TDM_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_TX_0},
- { AFE_PORT_ID_PRIMARY_TDM_RX_1, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_RX_1, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_RX_1},
- { AFE_PORT_ID_PRIMARY_TDM_TX_1, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_TX_1, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_TX_1},
- { AFE_PORT_ID_PRIMARY_TDM_RX_2, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_RX_2, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_RX_2},
- { AFE_PORT_ID_PRIMARY_TDM_TX_2, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_TX_2, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_TX_2},
- { AFE_PORT_ID_PRIMARY_TDM_RX_3, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_RX_3, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_RX_3},
- { AFE_PORT_ID_PRIMARY_TDM_TX_3, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_TX_3, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_TX_3},
- { AFE_PORT_ID_PRIMARY_TDM_RX_4, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_RX_4, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_RX_4},
- { AFE_PORT_ID_PRIMARY_TDM_TX_4, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_TX_4, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_TX_4},
- { AFE_PORT_ID_PRIMARY_TDM_RX_5, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_RX_5, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_RX_5},
- { AFE_PORT_ID_PRIMARY_TDM_TX_5, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_TX_5, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_TX_5},
- { AFE_PORT_ID_PRIMARY_TDM_RX_6, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_RX_6, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_RX_6},
- { AFE_PORT_ID_PRIMARY_TDM_TX_6, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_TX_6, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_TX_6},
- { AFE_PORT_ID_PRIMARY_TDM_RX_7, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_RX_7, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_RX_7},
- { AFE_PORT_ID_PRIMARY_TDM_TX_7, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_TX_7, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_TX_7},
- { AFE_PORT_ID_SECONDARY_TDM_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_RX_0},
- { AFE_PORT_ID_SECONDARY_TDM_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_TX_0},
- { AFE_PORT_ID_SECONDARY_TDM_RX_1, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_RX_1, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_RX_1},
- { AFE_PORT_ID_SECONDARY_TDM_TX_1, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_TX_1, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_TX_1},
- { AFE_PORT_ID_SECONDARY_TDM_RX_2, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_RX_2, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_RX_2},
- { AFE_PORT_ID_SECONDARY_TDM_TX_2, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_TX_2, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_TX_2},
- { AFE_PORT_ID_SECONDARY_TDM_RX_3, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_RX_3, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_RX_3},
- { AFE_PORT_ID_SECONDARY_TDM_TX_3, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_TX_3, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_TX_3},
- { AFE_PORT_ID_SECONDARY_TDM_RX_4, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_RX_4, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_RX_4},
- { AFE_PORT_ID_SECONDARY_TDM_TX_4, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_TX_4, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_TX_4},
- { AFE_PORT_ID_SECONDARY_TDM_RX_5, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_RX_5, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_RX_5},
- { AFE_PORT_ID_SECONDARY_TDM_TX_5, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_TX_5, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_TX_5},
- { AFE_PORT_ID_SECONDARY_TDM_RX_6, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_RX_6, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_RX_6},
- { AFE_PORT_ID_SECONDARY_TDM_TX_6, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_TX_6, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_TX_6},
- { AFE_PORT_ID_SECONDARY_TDM_RX_7, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_RX_7, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_RX_7},
- { AFE_PORT_ID_SECONDARY_TDM_TX_7, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_TX_7, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_TX_7},
- { AFE_PORT_ID_TERTIARY_TDM_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_RX_0},
- { AFE_PORT_ID_TERTIARY_TDM_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_TX_0},
- { AFE_PORT_ID_TERTIARY_TDM_RX_1, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_RX_1, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_RX_1},
- { AFE_PORT_ID_TERTIARY_TDM_TX_1, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_TX_1, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_TX_1},
- { AFE_PORT_ID_TERTIARY_TDM_RX_2, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_RX_2, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_RX_2},
- { AFE_PORT_ID_TERTIARY_TDM_TX_2, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_TX_2, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_TX_2},
- { AFE_PORT_ID_TERTIARY_TDM_RX_3, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_RX_3, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_RX_3},
- { AFE_PORT_ID_TERTIARY_TDM_TX_3, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_TX_3, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_TX_3},
- { AFE_PORT_ID_TERTIARY_TDM_RX_4, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_RX_4, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_RX_4},
- { AFE_PORT_ID_TERTIARY_TDM_TX_4, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_TX_4, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_TX_4},
- { AFE_PORT_ID_TERTIARY_TDM_RX_5, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_RX_5, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_RX_5},
- { AFE_PORT_ID_TERTIARY_TDM_TX_5, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_TX_5, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_TX_5},
- { AFE_PORT_ID_TERTIARY_TDM_RX_6, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_RX_6, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_RX_6},
- { AFE_PORT_ID_TERTIARY_TDM_TX_6, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_TX_6, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_TX_6},
- { AFE_PORT_ID_TERTIARY_TDM_RX_7, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_RX_7, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_RX_7},
- { AFE_PORT_ID_TERTIARY_TDM_TX_7, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_TX_7, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_TX_7},
- { AFE_PORT_ID_QUATERNARY_TDM_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_RX_0},
- { AFE_PORT_ID_QUATERNARY_TDM_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_TX_0},
- { AFE_PORT_ID_QUATERNARY_TDM_RX_1, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_RX_1, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_RX_1},
- { AFE_PORT_ID_QUATERNARY_TDM_TX_1, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_TX_1, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_TX_1},
- { AFE_PORT_ID_QUATERNARY_TDM_RX_2, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_RX_2, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_RX_2},
- { AFE_PORT_ID_QUATERNARY_TDM_TX_2, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_TX_2, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_TX_2},
- { AFE_PORT_ID_QUATERNARY_TDM_RX_3, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_RX_3, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_RX_3},
- { AFE_PORT_ID_QUATERNARY_TDM_TX_3, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_TX_3, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_TX_3},
- { AFE_PORT_ID_QUATERNARY_TDM_RX_4, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_RX_4, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_RX_4},
- { AFE_PORT_ID_QUATERNARY_TDM_TX_4, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_TX_4, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_TX_4},
- { AFE_PORT_ID_QUATERNARY_TDM_RX_5, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_RX_5, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_RX_5},
- { AFE_PORT_ID_QUATERNARY_TDM_TX_5, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_TX_5, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_TX_5},
- { AFE_PORT_ID_QUATERNARY_TDM_RX_6, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_RX_6, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_RX_6},
- { AFE_PORT_ID_QUATERNARY_TDM_TX_6, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_TX_6, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_TX_6},
- { AFE_PORT_ID_QUATERNARY_TDM_RX_7, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_RX_7, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_RX_7},
- { AFE_PORT_ID_QUATERNARY_TDM_TX_7, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_TX_7, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_TX_7},
- { INT_BT_A2DP_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_INT_BT_A2DP_RX},
- { AFE_PORT_ID_USB_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_USB_AUDIO_RX},
- { AFE_PORT_ID_USB_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_USB_AUDIO_TX},
- { DISPLAY_PORT_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_DISPLAY_PORT},
- { AFE_PORT_ID_TERTIARY_PCM_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { INT_BT_A2DP_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_INT_BT_A2DP_RX},
+ { AFE_PORT_ID_USB_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ LPASS_BE_USB_AUDIO_RX},
+ { AFE_PORT_ID_USB_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
+ LPASS_BE_USB_AUDIO_TX},
+ { DISPLAY_PORT_RX, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_DISPLAY_PORT},
+ { AFE_PORT_ID_TERTIARY_PCM_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_AUXPCM_RX},
- { AFE_PORT_ID_TERTIARY_PCM_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_PCM_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_AUXPCM_TX},
- { AFE_PORT_ID_QUATERNARY_PCM_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_PCM_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_AUXPCM_RX},
- { AFE_PORT_ID_QUATERNARY_PCM_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_PCM_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_AUXPCM_TX},
- { AFE_PORT_ID_INT0_MI2S_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_INT0_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_INT0_MI2S_RX},
- { AFE_PORT_ID_INT0_MI2S_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_INT0_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_INT0_MI2S_TX},
- { AFE_PORT_ID_INT1_MI2S_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_INT1_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_INT1_MI2S_RX},
- { AFE_PORT_ID_INT1_MI2S_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_INT1_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_INT1_MI2S_TX},
- { AFE_PORT_ID_INT2_MI2S_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_INT2_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_INT2_MI2S_RX},
- { AFE_PORT_ID_INT2_MI2S_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_INT2_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_INT2_MI2S_TX},
- { AFE_PORT_ID_INT3_MI2S_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_INT3_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_INT3_MI2S_RX},
- { AFE_PORT_ID_INT3_MI2S_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_INT3_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_INT3_MI2S_TX},
- { AFE_PORT_ID_INT4_MI2S_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_INT4_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_INT4_MI2S_RX},
- { AFE_PORT_ID_INT4_MI2S_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_INT4_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_INT4_MI2S_TX},
- { AFE_PORT_ID_INT5_MI2S_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_INT5_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_INT5_MI2S_RX},
- { AFE_PORT_ID_INT5_MI2S_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_INT5_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_INT5_MI2S_TX},
- { AFE_PORT_ID_INT6_MI2S_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_INT6_MI2S_RX, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_INT6_MI2S_RX},
- { AFE_PORT_ID_INT6_MI2S_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_INT6_MI2S_TX, 0, {0}, {0}, 0, 0, 0, 0, 0,
LPASS_BE_INT6_MI2S_TX},
- { SLIMBUS_TX_VI, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_TX_VI},
+ { SLIMBUS_TX_VI, 0, {0}, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_TX_VI},
};
-/* Track ASM playback & capture sessions of DAI */
+/* Track ASM playback & capture sessions of DAI
+ * Track LSM listen sessions
+ */
static struct msm_pcm_routing_fdai_data
- fe_dai_map[MSM_FRONTEND_DAI_MM_SIZE][2] = {
+ fe_dai_map[MSM_FRONTEND_DAI_MAX][2] = {
/* MULTIMEDIA1 */
{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
@@ -573,13 +590,80 @@ static struct msm_pcm_routing_fdai_data
/* MULTIMEDIA19 */
{{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+ /* CS_VOICE */
+ {{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+ {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+ /* VOIP */
+ {{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+ {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+ /* AFE_RX */
+ {{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+ {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+ /* AFE_TX */
+ {{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+ {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+ /* VOICE_STUB */
+ {{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+ {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+ /* VOLTE */
+ {{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+ {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+ /* DTMF_RX */
+ {{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+ {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+ /* VOICE2 */
+ {{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+ {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+ /* QCHAT */
+ {{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+ {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+ /* VOLTE_STUB */
+ {{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+ {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+ /* LSM1 */
+ {{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+ {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+ /* LSM2 */
+ {{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+ {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+ /* LSM3 */
+ {{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+ {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+ /* LSM4 */
+ {{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+ {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+ /* LSM5 */
+ {{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+ {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+ /* LSM6 */
+ {{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+ {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+ /* LSM7 */
+ {{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+ {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+ /* LSM8 */
+ {{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+ {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+ /* VOICE2_STUB */
+ {{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+ {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+ /* VOWLAN */
+ {{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+ {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+ /* VOICEMMODE1 */
+ {{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+ {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
+ /* VOICEMMODE2 */
+ {{0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} },
+ {0, INVALID_SESSION, LEGACY_PCM_MODE, {NULL, NULL} } },
};
-static unsigned long session_copp_map[MSM_FRONTEND_DAI_MM_SIZE][2]
+static unsigned long session_copp_map[MSM_FRONTEND_DAI_MAX][2]
[MSM_BACKEND_DAI_MAX];
static struct msm_pcm_routing_app_type_data app_type_cfg[MAX_APP_TYPES];
+static struct msm_pcm_routing_app_type_data lsm_app_type_cfg[MAX_APP_TYPES];
static struct msm_pcm_stream_app_type_cfg
- fe_dai_app_type_cfg[MSM_FRONTEND_DAI_MM_SIZE][2];
+ fe_dai_app_type_cfg[MSM_FRONTEND_DAI_MAX][2];
/* The caller of this should aqcuire routing lock */
void msm_pcm_routing_get_bedai_info(int be_idx,
@@ -622,13 +706,39 @@ static int msm_pcm_routing_get_app_type_idx(int app_type)
return 0;
}
+static int msm_pcm_routing_get_lsm_app_type_idx(int app_type)
+{
+ int idx;
+
+ pr_debug("%s: app_type: %d\n", __func__, app_type);
+ for (idx = 0; idx < MAX_APP_TYPES; idx++) {
+ if (lsm_app_type_cfg[idx].app_type == app_type)
+ return idx;
+ }
+ pr_debug("%s: App type not available, fallback to default\n", __func__);
+ return 0;
+}
+
+static bool is_mm_lsm_fe_id(int fe_id)
+{
+ bool rc = true;
+
+ if (fe_id > MSM_FRONTEND_DAI_MM_MAX_ID &&
+ ((fe_id < MSM_FRONTEND_DAI_LSM1) ||
+ (fe_id > MSM_FRONTEND_DAI_LSM8))) {
+ rc = false;
+ }
+ return rc;
+}
+
+
void msm_pcm_routing_reg_stream_app_type_cfg(int fedai_id, int app_type,
int acdb_dev_id, int sample_rate, int session_type)
{
pr_debug("%s: fedai_id %d, session_type %d, app_type %d, acdb_dev_id %d, sample_rate %d\n",
__func__, fedai_id, session_type, app_type,
acdb_dev_id, sample_rate);
- if (fedai_id > MSM_FRONTEND_DAI_MM_MAX_ID) {
+ if (!is_mm_lsm_fe_id(fedai_id)) {
pr_err("%s: Invalid machine driver ID %d\n",
__func__, fedai_id);
return;
@@ -675,7 +785,7 @@ int msm_pcm_routing_get_stream_app_type_cfg(int fedai_id, int session_type,
pr_err("%s: NULL pointer sent for sample rate\n", __func__);
ret = -EINVAL;
goto done;
- } else if (fedai_id > MSM_FRONTEND_DAI_MM_MAX_ID) {
+ } else if (!is_mm_lsm_fe_id(fedai_id)) {
pr_err("%s: Invalid FE ID %d\n",
__func__, fedai_id);
ret = -EINVAL;
@@ -789,7 +899,8 @@ static uint8_t is_be_dai_extproc(int be_dai)
}
static void msm_pcm_routing_build_matrix(int fedai_id, int sess_type,
- int path_type, int perf_mode)
+ int path_type, int perf_mode,
+ uint32_t passthr_mode)
{
int i, port_type, j, num_copps = 0;
struct route_payload payload;
@@ -802,7 +913,7 @@ static void msm_pcm_routing_build_matrix(int fedai_id, int sess_type,
if (!is_be_dai_extproc(i) &&
(afe_get_port_type(msm_bedais[i].port_id) == port_type) &&
(msm_bedais[i].active) &&
- (test_bit(fedai_id, &msm_bedais[i].fe_sessions))) {
+ (test_bit(fedai_id, &msm_bedais[i].fe_sessions[0]))) {
for (j = 0; j < MAX_COPPS_PER_PORT; j++) {
unsigned long copp =
session_copp_map[fedai_id][sess_type][i];
@@ -825,7 +936,7 @@ static void msm_pcm_routing_build_matrix(int fedai_id, int sess_type,
fe_dai_app_type_cfg[fedai_id][sess_type].acdb_dev_id;
payload.sample_rate =
fe_dai_app_type_cfg[fedai_id][sess_type].sample_rate;
- adm_matrix_map(path_type, payload, perf_mode);
+ adm_matrix_map(path_type, payload, perf_mode, passthr_mode);
msm_pcm_routng_cfg_matrix_map_pp(payload, path_type, perf_mode);
}
}
@@ -859,7 +970,7 @@ void msm_pcm_routing_reg_psthr_stream(int fedai_id, int dspst_id,
if (!is_be_dai_extproc(i) &&
(afe_get_port_type(msm_bedais[i].port_id) == port_type) &&
(msm_bedais[i].active) &&
- (test_bit(fedai_id, &msm_bedais[i].fe_sessions))) {
+ (test_bit(fedai_id, &msm_bedais[i].fe_sessions[0]))) {
mode = afe_get_port_type(msm_bedais[i].port_id);
adm_connect_afe_port(mode, dspst_id,
msm_bedais[i].port_id);
@@ -869,28 +980,51 @@ void msm_pcm_routing_reg_psthr_stream(int fedai_id, int dspst_id,
mutex_unlock(&routing_lock);
}
+static bool route_check_fe_id_adm_support(int fe_id)
+{
+ bool rc = true;
+
+ if ((fe_id >= MSM_FRONTEND_DAI_LSM1) &&
+ (fe_id <= MSM_FRONTEND_DAI_LSM8)) {
+ /* fe id is listen while port is set to afe */
+ if (lsm_port_index != ADM_LSM_PORT_INDEX) {
+ pr_debug("%s: fe_id %d, lsm mux slim port %d\n",
+ __func__, fe_id, lsm_port_index);
+ rc = false;
+ }
+ }
+
+ return rc;
+}
+
int msm_pcm_routing_reg_phy_compr_stream(int fe_id, int perf_mode,
int dspst_id, int stream_type,
- uint32_t compr_passthr_mode)
+ uint32_t passthr_mode)
{
int i, j, session_type, path_type, port_type, topology, num_copps = 0;
struct route_payload payload;
u32 channels, sample_rate;
u16 bit_width = 16;
+ bool is_lsm;
pr_debug("%s:fe_id[%d] perf_mode[%d] id[%d] stream_type[%d] passt[%d]",
__func__, fe_id, perf_mode, dspst_id,
- stream_type, compr_passthr_mode);
-
- if (fe_id > MSM_FRONTEND_DAI_MM_MAX_ID) {
+ stream_type, passthr_mode);
+ if (!is_mm_lsm_fe_id(fe_id)) {
/* bad ID assigned in machine driver */
pr_err("%s: bad MM ID %d\n", __func__, fe_id);
return -EINVAL;
}
+ if (!route_check_fe_id_adm_support(fe_id)) {
+ /* ignore adm open if not supported for fe_id */
+ pr_debug("%s: No ADM support for fe id %d\n", __func__, fe_id);
+ return 0;
+ }
+
if (stream_type == SNDRV_PCM_STREAM_PLAYBACK) {
session_type = SESSION_TYPE_RX;
- if (compr_passthr_mode != LEGACY_PCM)
+ if (passthr_mode != LEGACY_PCM)
path_type = ADM_PATH_COMPRESSED_RX;
else
path_type = ADM_PATH_PLAYBACK;
@@ -904,6 +1038,8 @@ int msm_pcm_routing_reg_phy_compr_stream(int fe_id, int perf_mode,
return -EINVAL;
}
+ is_lsm = (fe_id >= MSM_FRONTEND_DAI_LSM1) &&
+ (fe_id <= MSM_FRONTEND_DAI_LSM8);
mutex_lock(&routing_lock);
payload.num_copps = 0; /* only RX needs to use payload */
@@ -911,14 +1047,14 @@ int msm_pcm_routing_reg_phy_compr_stream(int fe_id, int perf_mode,
/* re-enable EQ if active */
msm_qti_pp_send_eq_values(fe_id);
for (i = 0; i < MSM_BACKEND_DAI_MAX; i++) {
- if (test_bit(fe_id, &msm_bedais[i].fe_sessions))
- msm_bedais[i].compr_passthr_mode = compr_passthr_mode;
+ if (test_bit(fe_id, &msm_bedais[i].fe_sessions[0]))
+ msm_bedais[i].passthr_mode = passthr_mode;
if (!is_be_dai_extproc(i) &&
(afe_get_port_type(msm_bedais[i].port_id) ==
port_type) &&
(msm_bedais[i].active) &&
- (test_bit(fe_id, &msm_bedais[i].fe_sessions))) {
+ (test_bit(fe_id, &msm_bedais[i].fe_sessions[0]))) {
int app_type, app_type_idx, copp_idx, acdb_dev_id;
/*
@@ -934,7 +1070,15 @@ int msm_pcm_routing_reg_phy_compr_stream(int fe_id, int perf_mode,
msm_bedais[i].format);
app_type =
fe_dai_app_type_cfg[fe_id][session_type].app_type;
- if (app_type) {
+ if (app_type && is_lsm) {
+ app_type_idx =
+ msm_pcm_routing_get_lsm_app_type_idx(app_type);
+ sample_rate =
+ fe_dai_app_type_cfg[fe_id][session_type].
+ sample_rate;
+ bit_width =
+ lsm_app_type_cfg[app_type_idx].bit_width;
+ } else if (app_type) {
app_type_idx =
msm_pcm_routing_get_app_type_idx(
app_type);
@@ -949,9 +1093,10 @@ int msm_pcm_routing_reg_phy_compr_stream(int fe_id, int perf_mode,
fe_dai_app_type_cfg[fe_id][session_type].acdb_dev_id;
topology = msm_routing_get_adm_topology(path_type,
fe_id, session_type);
- if (compr_passthr_mode == COMPRESSED_PASSTHROUGH_DSD)
+
+ if (passthr_mode == COMPRESSED_PASSTHROUGH_DSD)
topology = COMPRESS_PASSTHROUGH_NONE_TOPOLOGY;
- pr_err("%s: Before adm open topology %d\n", __func__,
+ pr_debug("%s: Before adm open topology %d\n", __func__,
topology);
copp_idx =
@@ -988,7 +1133,7 @@ int msm_pcm_routing_reg_phy_compr_stream(int fe_id, int perf_mode,
num_copps++;
}
}
- if (compr_passthr_mode != COMPRESSED_PASSTHROUGH_DSD) {
+ if (passthr_mode != COMPRESSED_PASSTHROUGH_DSD) {
msm_routing_send_device_pp_params(
msm_bedais[i].port_id,
copp_idx);
@@ -1002,7 +1147,9 @@ int msm_pcm_routing_reg_phy_compr_stream(int fe_id, int perf_mode,
fe_dai_app_type_cfg[fe_id][session_type].app_type;
payload.acdb_dev_id =
fe_dai_app_type_cfg[fe_id][session_type].acdb_dev_id;
- adm_matrix_map(path_type, payload, perf_mode);
+ payload.sample_rate =
+ fe_dai_app_type_cfg[fe_id][session_type].sample_rate;
+ adm_matrix_map(path_type, payload, perf_mode, passthr_mode);
msm_pcm_routng_cfg_matrix_map_pp(payload, path_type, perf_mode);
}
mutex_unlock(&routing_lock);
@@ -1053,6 +1200,7 @@ int msm_pcm_routing_reg_phy_stream(int fedai_id, int perf_mode,
struct route_payload payload;
u32 channels, sample_rate;
uint16_t bits_per_sample = 16;
+ uint32_t passthr_mode = LEGACY_PCM;
if (fedai_id > MSM_FRONTEND_DAI_MM_MAX_ID) {
/* bad ID assigned in machine driver */
@@ -1082,7 +1230,7 @@ int msm_pcm_routing_reg_phy_stream(int fedai_id, int perf_mode,
if (!is_be_dai_extproc(i) &&
(afe_get_port_type(msm_bedais[i].port_id) == port_type) &&
(msm_bedais[i].active) &&
- (test_bit(fedai_id, &msm_bedais[i].fe_sessions))) {
+ (test_bit(fedai_id, &msm_bedais[i].fe_sessions[0]))) {
int app_type, app_type_idx, copp_idx, acdb_dev_id;
/*
* check if ADM needs to be configured with different
@@ -1092,7 +1240,7 @@ int msm_pcm_routing_reg_phy_stream(int fedai_id, int perf_mode,
channels = msm_bedais[i].channel;
else
channels = msm_bedais[i].adm_override_ch;
- msm_bedais[i].compr_passthr_mode =
+ msm_bedais[i].passthr_mode =
LEGACY_PCM;
bits_per_sample = msm_routing_get_bit_width(
@@ -1149,7 +1297,7 @@ int msm_pcm_routing_reg_phy_stream(int fedai_id, int perf_mode,
}
}
if ((perf_mode == LEGACY_PCM_MODE) &&
- (msm_bedais[i].compr_passthr_mode ==
+ (msm_bedais[i].passthr_mode ==
LEGACY_PCM))
msm_pcm_routing_cfg_pp(msm_bedais[i].port_id,
copp_idx, topology,
@@ -1165,7 +1313,7 @@ int msm_pcm_routing_reg_phy_stream(int fedai_id, int perf_mode,
fe_dai_app_type_cfg[fedai_id][session_type].acdb_dev_id;
payload.sample_rate =
fe_dai_app_type_cfg[fedai_id][session_type].sample_rate;
- adm_matrix_map(path_type, payload, perf_mode);
+ adm_matrix_map(path_type, payload, perf_mode, passthr_mode);
msm_pcm_routng_cfg_matrix_map_pp(payload, path_type, perf_mode);
}
mutex_unlock(&routing_lock);
@@ -1194,7 +1342,7 @@ void msm_pcm_routing_dereg_phy_stream(int fedai_id, int stream_type)
int i, port_type, session_type, path_type, topology;
struct msm_pcm_routing_fdai_data *fdai;
- if (fedai_id > MSM_FRONTEND_DAI_MM_MAX_ID) {
+ if (!is_mm_lsm_fe_id(fedai_id)) {
/* bad ID assigned in machine driver */
pr_err("%s: bad MM ID\n", __func__);
return;
@@ -1215,7 +1363,7 @@ void msm_pcm_routing_dereg_phy_stream(int fedai_id, int stream_type)
if (!is_be_dai_extproc(i) &&
(afe_get_port_type(msm_bedais[i].port_id) == port_type) &&
(msm_bedais[i].active) &&
- (test_bit(fedai_id, &msm_bedais[i].fe_sessions))) {
+ (test_bit(fedai_id, &msm_bedais[i].fe_sessions[0]))) {
int idx;
unsigned long copp =
session_copp_map[fedai_id][session_type][i];
@@ -1240,7 +1388,7 @@ void msm_pcm_routing_dereg_phy_stream(int fedai_id, int stream_type)
if ((DOLBY_ADM_COPP_TOPOLOGY_ID == topology ||
DS2_ADM_COPP_TOPOLOGY_ID == topology) &&
(fdai->perf_mode == LEGACY_PCM_MODE) &&
- (msm_bedais[i].compr_passthr_mode ==
+ (msm_bedais[i].passthr_mode ==
LEGACY_PCM))
msm_pcm_routing_deinit_pp(msm_bedais[i].port_id,
topology);
@@ -1257,13 +1405,13 @@ static bool msm_pcm_routing_route_is_set(u16 be_id, u16 fe_id)
{
bool rc = false;
- if (fe_id > MSM_FRONTEND_DAI_MM_MAX_ID) {
+ if (!is_mm_lsm_fe_id(fe_id)) {
/* recheck FE ID in the mixer control defined in this file */
pr_err("%s: bad MM ID\n", __func__);
return rc;
}
- if (test_bit(fe_id, &msm_bedais[be_id].fe_sessions))
+ if (test_bit(fe_id, &msm_bedais[be_id].fe_sessions[0]))
rc = true;
return rc;
@@ -1275,19 +1423,27 @@ static void msm_pcm_routing_process_audio(u16 reg, u16 val, int set)
u32 channels, sample_rate;
uint16_t bits_per_sample = 16;
struct msm_pcm_routing_fdai_data *fdai;
+ uint32_t passthr_mode = msm_bedais[reg].passthr_mode;
+ bool is_lsm;
pr_debug("%s: reg %x val %x set %x\n", __func__, reg, val, set);
- if (val > MSM_FRONTEND_DAI_MM_MAX_ID) {
+ if (!is_mm_lsm_fe_id(val)) {
/* recheck FE ID in the mixer control defined in this file */
pr_err("%s: bad MM ID\n", __func__);
return;
}
+ if (!route_check_fe_id_adm_support(val)) {
+ /* ignore adm open if not supported for fe_id */
+ pr_debug("%s: No ADM support for fe id %d\n", __func__, val);
+ return;
+ }
+
if (afe_get_port_type(msm_bedais[reg].port_id) ==
MSM_AFE_PORT_TYPE_RX) {
session_type = SESSION_TYPE_RX;
- if (msm_bedais[reg].compr_passthr_mode != LEGACY_PCM)
+ if (passthr_mode != LEGACY_PCM)
path_type = ADM_PATH_COMPRESSED_RX;
else
path_type = ADM_PATH_PLAYBACK;
@@ -1295,15 +1451,17 @@ static void msm_pcm_routing_process_audio(u16 reg, u16 val, int set)
session_type = SESSION_TYPE_TX;
path_type = ADM_PATH_LIVE_REC;
}
+ is_lsm = (val >= MSM_FRONTEND_DAI_LSM1) &&
+ (val <= MSM_FRONTEND_DAI_LSM8);
mutex_lock(&routing_lock);
if (set) {
- if (!test_bit(val, &msm_bedais[reg].fe_sessions) &&
+ if (!test_bit(val, &msm_bedais[reg].fe_sessions[0]) &&
((msm_bedais[reg].port_id == VOICE_PLAYBACK_TX) ||
(msm_bedais[reg].port_id == VOICE2_PLAYBACK_TX)))
voc_start_playback(set, msm_bedais[reg].port_id);
- set_bit(val, &msm_bedais[reg].fe_sessions);
+ set_bit(val, &msm_bedais[reg].fe_sessions[0]);
fdai = &fe_dai_map[val][session_type];
if (msm_bedais[reg].active && fdai->strm_id !=
INVALID_SESSION) {
@@ -1334,7 +1492,15 @@ static void msm_pcm_routing_process_audio(u16 reg, u16 val, int set)
app_type =
fe_dai_app_type_cfg[val][session_type].app_type;
- if (app_type) {
+ if (app_type && is_lsm) {
+ app_type_idx =
+ msm_pcm_routing_get_lsm_app_type_idx(app_type);
+ sample_rate =
+ fe_dai_app_type_cfg[val][session_type].
+ sample_rate;
+ bits_per_sample =
+ lsm_app_type_cfg[app_type_idx].bit_width;
+ } else if (app_type) {
app_type_idx =
msm_pcm_routing_get_app_type_idx(app_type);
sample_rate =
@@ -1379,20 +1545,20 @@ static void msm_pcm_routing_process_audio(u16 reg, u16 val, int set)
msm_pcm_routing_build_matrix(val, session_type,
path_type,
- fdai->perf_mode);
+ fdai->perf_mode,
+ passthr_mode);
if ((fdai->perf_mode == LEGACY_PCM_MODE) &&
- (msm_bedais[reg].compr_passthr_mode ==
- LEGACY_PCM))
+ (passthr_mode == LEGACY_PCM))
msm_pcm_routing_cfg_pp(msm_bedais[reg].port_id,
copp_idx, topology,
channels);
}
} else {
- if (test_bit(val, &msm_bedais[reg].fe_sessions) &&
+ if (test_bit(val, &msm_bedais[reg].fe_sessions[0]) &&
((msm_bedais[reg].port_id == VOICE_PLAYBACK_TX) ||
(msm_bedais[reg].port_id == VOICE2_PLAYBACK_TX)))
voc_start_playback(set, msm_bedais[reg].port_id);
- clear_bit(val, &msm_bedais[reg].fe_sessions);
+ clear_bit(val, &msm_bedais[reg].fe_sessions[0]);
fdai = &fe_dai_map[val][session_type];
if (msm_bedais[reg].active && fdai->strm_id !=
INVALID_SESSION) {
@@ -1417,14 +1583,14 @@ static void msm_pcm_routing_process_audio(u16 reg, u16 val, int set)
if ((DOLBY_ADM_COPP_TOPOLOGY_ID == topology ||
DS2_ADM_COPP_TOPOLOGY_ID == topology) &&
(fdai->perf_mode == LEGACY_PCM_MODE) &&
- (msm_bedais[reg].compr_passthr_mode ==
- LEGACY_PCM))
+ (passthr_mode == LEGACY_PCM))
msm_pcm_routing_deinit_pp(
msm_bedais[reg].port_id,
topology);
msm_pcm_routing_build_matrix(val, session_type,
path_type,
- fdai->perf_mode);
+ fdai->perf_mode,
+ passthr_mode);
}
}
if ((msm_bedais[reg].port_id == VOICE_RECORD_RX)
@@ -1440,7 +1606,7 @@ static int msm_routing_get_audio_mixer(struct snd_kcontrol *kcontrol,
struct soc_mixer_control *mc =
(struct soc_mixer_control *)kcontrol->private_value;
- if (test_bit(mc->shift, &msm_bedais[mc->reg].fe_sessions))
+ if (test_bit(mc->shift, &msm_bedais[mc->reg].fe_sessions[0]))
ucontrol->value.integer.value[0] = 1;
else
ucontrol->value.integer.value[0] = 0;
@@ -1474,6 +1640,51 @@ static int msm_routing_put_audio_mixer(struct snd_kcontrol *kcontrol,
return 1;
}
+static int msm_routing_get_listen_mixer(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+
+ if (test_bit(mc->shift, &msm_bedais[mc->reg].fe_sessions[0]))
+ ucontrol->value.integer.value[0] = 1;
+ else
+ ucontrol->value.integer.value[0] = 0;
+
+ pr_debug("%s: reg %x shift %x val %ld\n", __func__, mc->reg, mc->shift,
+ ucontrol->value.integer.value[0]);
+
+ return 0;
+}
+
+static int msm_routing_put_listen_mixer(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_dapm_widget_list *wlist =
+ dapm_kcontrol_get_wlist(kcontrol);
+ struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+ struct snd_soc_dapm_update *update = NULL;
+
+ pr_debug("%s: reg %x shift %x val %ld\n", __func__, mc->reg, mc->shift,
+ ucontrol->value.integer.value[0]);
+
+ if (ucontrol->value.integer.value[0]) {
+ if (msm_pcm_routing_route_is_set(mc->reg, mc->shift) == false)
+ msm_pcm_routing_process_audio(mc->reg, mc->shift, 1);
+ snd_soc_dapm_mixer_update_power(widget->dapm,
+ kcontrol, 1, update);
+ } else if (!ucontrol->value.integer.value[0]) {
+ if (msm_pcm_routing_route_is_set(mc->reg, mc->shift) == true)
+ msm_pcm_routing_process_audio(mc->reg, mc->shift, 0);
+ snd_soc_dapm_mixer_update_power(widget->dapm,
+ kcontrol, 0, update);
+ }
+
+ return 1;
+}
+
static void msm_pcm_routing_process_voice(u16 reg, u16 val, int set)
{
u32 session_id = 0;
@@ -1490,9 +1701,9 @@ static void msm_pcm_routing_process_voice(u16 reg, u16 val, int set)
mutex_lock(&routing_lock);
if (set)
- set_bit(val, &msm_bedais[reg].fe_sessions);
+ set_bit(val, &msm_bedais[reg].fe_sessions[0]);
else
- clear_bit(val, &msm_bedais[reg].fe_sessions);
+ clear_bit(val, &msm_bedais[reg].fe_sessions[0]);
if (val == MSM_FRONTEND_DAI_DTMF_RX &&
afe_get_port_type(msm_bedais[reg].port_id) ==
@@ -1551,7 +1762,7 @@ static int msm_routing_get_voice_mixer(struct snd_kcontrol *kcontrol,
mutex_lock(&routing_lock);
- if (test_bit(mc->shift, &msm_bedais[mc->reg].fe_sessions))
+ if (test_bit(mc->shift, &msm_bedais[mc->reg].fe_sessions[0]))
ucontrol->value.integer.value[0] = 1;
else
ucontrol->value.integer.value[0] = 0;
@@ -1593,7 +1804,7 @@ static int msm_routing_get_voice_stub_mixer(struct snd_kcontrol *kcontrol,
mutex_lock(&routing_lock);
- if (test_bit(mc->shift, &msm_bedais[mc->reg].fe_sessions))
+ if (test_bit(mc->shift, &msm_bedais[mc->reg].fe_sessions[0]))
ucontrol->value.integer.value[0] = 1;
else
ucontrol->value.integer.value[0] = 0;
@@ -1618,13 +1829,13 @@ static int msm_routing_put_voice_stub_mixer(struct snd_kcontrol *kcontrol,
if (ucontrol->value.integer.value[0]) {
mutex_lock(&routing_lock);
- set_bit(mc->shift, &msm_bedais[mc->reg].fe_sessions);
+ set_bit(mc->shift, &msm_bedais[mc->reg].fe_sessions[0]);
mutex_unlock(&routing_lock);
snd_soc_dapm_mixer_update_power(widget->dapm, kcontrol, 1, update);
} else {
mutex_lock(&routing_lock);
- clear_bit(mc->shift, &msm_bedais[mc->reg].fe_sessions);
+ clear_bit(mc->shift, &msm_bedais[mc->reg].fe_sessions[0]);
mutex_unlock(&routing_lock);
snd_soc_dapm_mixer_update_power(widget->dapm, kcontrol, 0, update);
@@ -1692,6 +1903,64 @@ static int msm_routing_put_hfp_switch_mixer(struct snd_kcontrol *kcontrol,
return 1;
}
+static int msm_routing_get_int0_mi2s_switch_mixer(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ ucontrol->value.integer.value[0] = int0_mi2s_switch_enable;
+ pr_debug("%s: INT0 MI2S Switch enable %ld\n", __func__,
+ ucontrol->value.integer.value[0]);
+ return 0;
+}
+
+static int msm_routing_put_int0_mi2s_switch_mixer(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_dapm_widget_list *wlist =
+ dapm_kcontrol_get_wlist(kcontrol);
+ struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_update *update = NULL;
+
+ pr_debug("%s: INT0 MI2S Switch enable %ld\n", __func__,
+ ucontrol->value.integer.value[0]);
+ if (ucontrol->value.integer.value[0])
+ snd_soc_dapm_mixer_update_power(widget->dapm, kcontrol, 1,
+ update);
+ else
+ snd_soc_dapm_mixer_update_power(widget->dapm, kcontrol, 0,
+ update);
+ int0_mi2s_switch_enable = ucontrol->value.integer.value[0];
+ return 1;
+}
+
+static int msm_routing_get_int4_mi2s_switch_mixer(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ ucontrol->value.integer.value[0] = int4_mi2s_switch_enable;
+ pr_debug("%s: INT4 MI2S Switch enable %ld\n", __func__,
+ ucontrol->value.integer.value[0]);
+ return 0;
+}
+
+static int msm_routing_put_int4_mi2s_switch_mixer(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_dapm_widget_list *wlist =
+ dapm_kcontrol_get_wlist(kcontrol);
+ struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_dapm_update *update = NULL;
+
+ pr_debug("%s: INT4 MI2S Switch enable %ld\n", __func__,
+ ucontrol->value.integer.value[0]);
+ if (ucontrol->value.integer.value[0])
+ snd_soc_dapm_mixer_update_power(widget->dapm, kcontrol, 1,
+ update);
+ else
+ snd_soc_dapm_mixer_update_power(widget->dapm, kcontrol, 0,
+ update);
+ int4_mi2s_switch_enable = ucontrol->value.integer.value[0];
+ return 1;
+}
+
static int msm_routing_get_usb_switch_mixer(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
@@ -1868,23 +2137,19 @@ static int msm_routing_put_fm_pcmrx_switch_mixer(struct snd_kcontrol *kcontrol,
return 1;
}
-static int msm_routing_lsm_mux_get(struct snd_kcontrol *kcontrol,
+static int msm_routing_lsm_port_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- ucontrol->value.integer.value[0] = lsm_mux_slim_port;
+ ucontrol->value.integer.value[0] = lsm_port_index;
return 0;
}
-static int msm_routing_lsm_mux_put(struct snd_kcontrol *kcontrol,
+static int msm_routing_lsm_port_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist =
- dapm_kcontrol_get_wlist(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
int mux = ucontrol->value.enumerated.item[0];
int lsm_port = AFE_PORT_ID_SLIMBUS_MULTI_CHAN_5_TX;
- struct snd_soc_dapm_update *update = NULL;
if (mux >= e->items) {
pr_err("%s: Invalid mux value %d\n", __func__, mux);
@@ -1915,19 +2180,18 @@ static int msm_routing_lsm_mux_put(struct snd_kcontrol *kcontrol,
case 7:
lsm_port = AFE_PORT_ID_TERTIARY_MI2S_TX;
break;
+ case 8:
+ lsm_port = AFE_PORT_ID_QUATERNARY_MI2S_TX;
+ break;
+ case 9:
+ lsm_port = ADM_LSM_PORT_ID;
+ break;
default:
pr_err("Default lsm port");
break;
}
set_lsm_port(lsm_port);
-
- if (ucontrol->value.integer.value[0]) {
- lsm_mux_slim_port = ucontrol->value.integer.value[0];
- snd_soc_dapm_mux_update_power(widget->dapm, kcontrol, mux, e, update);
- } else {
- snd_soc_dapm_mux_update_power(widget->dapm, kcontrol, mux, e, update);
- lsm_mux_slim_port = ucontrol->value.integer.value[0];
- }
+ lsm_port_index = ucontrol->value.integer.value[0];
return 0;
}
@@ -1940,23 +2204,27 @@ static int msm_routing_lsm_func_get(struct snd_kcontrol *kcontrol,
enum afe_mad_type mad_type;
pr_debug("%s: enter\n", __func__);
- for (i = 0; i < ARRAY_SIZE(mad_audio_mux_text); i++)
- if (!strncmp(kcontrol->id.name, mad_audio_mux_text[i],
- strlen(mad_audio_mux_text[i])))
+ for (i = 0; i < ARRAY_SIZE(lsm_port_text); i++)
+ if (!strnstr(kcontrol->id.name, lsm_port_text[i],
+ strlen(lsm_port_text[i])))
break;
- if (i-- == ARRAY_SIZE(mad_audio_mux_text)) {
+ if (i-- == ARRAY_SIZE(lsm_port_text)) {
WARN(1, "Invalid id name %s\n", kcontrol->id.name);
return -EINVAL;
}
/*Check for Tertiary TX port*/
- if (!strcmp(kcontrol->id.name, mad_audio_mux_text[7])) {
+ if (!strcmp(kcontrol->id.name, lsm_port_text[7])) {
ucontrol->value.integer.value[0] = MADSWAUDIO;
return 0;
}
port_id = i * 2 + 1 + SLIMBUS_0_RX;
+
+ if (!strcmp(kcontrol->id.name, lsm_port_text[8]))
+ port_id = AFE_PORT_ID_QUATERNARY_MI2S_TX;
+
mad_type = afe_port_get_mad_type(port_id);
pr_debug("%s: port_id 0x%x, mad_type %d\n", __func__, port_id,
mad_type);
@@ -1991,12 +2259,12 @@ static int msm_routing_lsm_func_put(struct snd_kcontrol *kcontrol,
enum afe_mad_type mad_type;
pr_debug("%s: enter\n", __func__);
- for (i = 0; i < ARRAY_SIZE(mad_audio_mux_text); i++)
- if (!strncmp(kcontrol->id.name, mad_audio_mux_text[i],
- strlen(mad_audio_mux_text[i])))
+ for (i = 0; i < ARRAY_SIZE(lsm_port_text); i++)
+ if (strnstr(kcontrol->id.name, lsm_port_text[i],
+ strlen(lsm_port_text[i])))
break;
- if (i-- == ARRAY_SIZE(mad_audio_mux_text)) {
+ if (i-- == ARRAY_SIZE(lsm_port_text)) {
WARN(1, "Invalid id name %s\n", kcontrol->id.name);
return -EINVAL;
}
@@ -2024,11 +2292,16 @@ static int msm_routing_lsm_func_put(struct snd_kcontrol *kcontrol,
}
/*Check for Tertiary TX port*/
- if (!strcmp(kcontrol->id.name, mad_audio_mux_text[7])) {
+ if (strnstr(kcontrol->id.name, lsm_port_text[7],
+ strlen(lsm_port_text[7]))) {
port_id = AFE_PORT_ID_TERTIARY_MI2S_TX;
mad_type = MAD_SW_AUDIO;
}
+ if (strnstr(kcontrol->id.name, lsm_port_text[8],
+ strlen(lsm_port_text[8])))
+ port_id = AFE_PORT_ID_QUATERNARY_MI2S_TX;
+
pr_debug("%s: port_id 0x%x, mad_type %d\n", __func__, port_id,
mad_type);
return afe_port_set_mad_type(port_id, mad_type);
@@ -2193,6 +2466,144 @@ static int msm_routing_put_port_mixer(struct snd_kcontrol *kcontrol,
return 1;
}
+static int msm_ec_ref_ch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ ucontrol->value.integer.value[0] = msm_ec_ref_ch;
+ pr_debug("%s: msm_ec_ref_ch = %ld\n", __func__,
+ ucontrol->value.integer.value[0]);
+ return 0;
+}
+
+static int msm_ec_ref_ch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ msm_ec_ref_ch = ucontrol->value.integer.value[0];
+ pr_debug("%s: msm_ec_ref_ch = %d\n", __func__, msm_ec_ref_ch);
+ adm_num_ec_ref_rx_chans(msm_ec_ref_ch);
+ return 0;
+}
+
+static const char *const ec_ref_ch_text[] = {"Zero", "One", "Two", "Three",
+ "Four", "Five", "Six", "Seven", "Eight"};
+
+static int msm_ec_ref_bit_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (msm_ec_ref_bit_format) {
+ case SNDRV_PCM_FORMAT_S24_LE:
+ ucontrol->value.integer.value[0] = 2;
+ break;
+ case SNDRV_PCM_FORMAT_S16_LE:
+ ucontrol->value.integer.value[0] = 1;
+ break;
+ default:
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ }
+ pr_debug("%s: msm_ec_ref_bit_format = %ld\n",
+ __func__, ucontrol->value.integer.value[0]);
+ return 0;
+}
+
+static int msm_ec_ref_bit_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ u16 bit_width = 0;
+
+ switch (ucontrol->value.integer.value[0]) {
+ case 2:
+ msm_ec_ref_bit_format = SNDRV_PCM_FORMAT_S24_LE;
+ break;
+ case 1:
+ msm_ec_ref_bit_format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ default:
+ msm_ec_ref_bit_format = 0;
+ break;
+ }
+
+ if (msm_ec_ref_bit_format == SNDRV_PCM_FORMAT_S16_LE)
+ bit_width = 16;
+ else if (msm_ec_ref_bit_format == SNDRV_PCM_FORMAT_S24_LE)
+ bit_width = 24;
+
+ pr_debug("%s: msm_ec_ref_bit_format = %d\n",
+ __func__, msm_ec_ref_bit_format);
+ adm_ec_ref_rx_bit_width(bit_width);
+ return 0;
+}
+
+static char const *ec_ref_bit_format_text[] = {"0", "S16_LE", "S24_LE"};
+
+static int msm_ec_ref_rate_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ ucontrol->value.integer.value[0] = msm_ec_ref_sampling_rate;
+ pr_debug("%s: msm_ec_ref_sampling_rate = %ld\n",
+ __func__, ucontrol->value.integer.value[0]);
+ return 0;
+}
+
+static int msm_ec_ref_rate_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ switch (ucontrol->value.integer.value[0]) {
+ case 0:
+ msm_ec_ref_sampling_rate = 0;
+ break;
+ case 1:
+ msm_ec_ref_sampling_rate = 8000;
+ break;
+ case 2:
+ msm_ec_ref_sampling_rate = 16000;
+ break;
+ case 3:
+ msm_ec_ref_sampling_rate = 32000;
+ break;
+ case 4:
+ msm_ec_ref_sampling_rate = 44100;
+ break;
+ case 5:
+ msm_ec_ref_sampling_rate = 48000;
+ break;
+ case 6:
+ msm_ec_ref_sampling_rate = 96000;
+ break;
+ case 7:
+ msm_ec_ref_sampling_rate = 192000;
+ break;
+ case 8:
+ msm_ec_ref_sampling_rate = 384000;
+ break;
+ default:
+ msm_ec_ref_sampling_rate = 48000;
+ break;
+ }
+ pr_debug("%s: msm_ec_ref_sampling_rate = %d\n",
+ __func__, msm_ec_ref_sampling_rate);
+ adm_ec_ref_rx_sampling_rate(msm_ec_ref_sampling_rate);
+ return 0;
+}
+
+static const char *const ec_ref_rate_text[] = {"0", "8000", "16000",
+ "32000", "44100", "48000", "96000", "192000", "384000"};
+
+static const struct soc_enum msm_route_ec_ref_params_enum[] = {
+ SOC_ENUM_SINGLE_EXT(9, ec_ref_ch_text),
+ SOC_ENUM_SINGLE_EXT(3, ec_ref_bit_format_text),
+ SOC_ENUM_SINGLE_EXT(9, ec_ref_rate_text),
+};
+
+static const struct snd_kcontrol_new ec_ref_param_controls[] = {
+ SOC_ENUM_EXT("EC Reference Channels", msm_route_ec_ref_params_enum[0],
+ msm_ec_ref_ch_get, msm_ec_ref_ch_put),
+ SOC_ENUM_EXT("EC Reference Bit Format", msm_route_ec_ref_params_enum[1],
+ msm_ec_ref_bit_format_get, msm_ec_ref_bit_format_put),
+ SOC_ENUM_EXT("EC Reference SampleRate", msm_route_ec_ref_params_enum[2],
+ msm_ec_ref_rate_get, msm_ec_ref_rate_put),
+};
+
static int msm_routing_ec_ref_rx_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
@@ -7661,6 +8072,198 @@ static const struct snd_kcontrol_new sec_mi2s_rx_port_mixer_controls[] = {
msm_routing_put_port_mixer),
};
+static const struct snd_kcontrol_new lsm1_mixer_controls[] = {
+ SOC_SINGLE_EXT("SLIMBUS_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX,
+ MSM_FRONTEND_DAI_LSM1, 1, 0, msm_routing_get_listen_mixer,
+ msm_routing_put_listen_mixer),
+ SOC_SINGLE_EXT("SLIMBUS_1_TX", MSM_BACKEND_DAI_SLIMBUS_1_TX,
+ MSM_FRONTEND_DAI_LSM1, 1, 0, msm_routing_get_listen_mixer,
+ msm_routing_put_listen_mixer),
+ SOC_SINGLE_EXT("SLIMBUS_3_TX", MSM_BACKEND_DAI_SLIMBUS_3_TX,
+ MSM_FRONTEND_DAI_LSM1, 1, 0, msm_routing_get_listen_mixer,
+ msm_routing_put_listen_mixer),
+ SOC_SINGLE_EXT("SLIMBUS_4_TX", MSM_BACKEND_DAI_SLIMBUS_4_TX,
+ MSM_FRONTEND_DAI_LSM1, 1, 0, msm_routing_get_listen_mixer,
+ msm_routing_put_listen_mixer),
+ SOC_SINGLE_EXT("SLIMBUS_5_TX", MSM_BACKEND_DAI_SLIMBUS_5_TX,
+ MSM_FRONTEND_DAI_LSM1, 1, 0, msm_routing_get_listen_mixer,
+ msm_routing_put_listen_mixer),
+ SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_TERTIARY_MI2S_TX,
+ MSM_FRONTEND_DAI_LSM1, 1, 0, msm_routing_get_listen_mixer,
+ msm_routing_put_listen_mixer),
+ SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
+ MSM_FRONTEND_DAI_LSM1, 1, 0, msm_routing_get_listen_mixer,
+ msm_routing_put_listen_mixer),
+};
+
+static const struct snd_kcontrol_new lsm2_mixer_controls[] = {
+ SOC_SINGLE_EXT("SLIMBUS_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX,
+ MSM_FRONTEND_DAI_LSM2, 1, 0, msm_routing_get_listen_mixer,
+ msm_routing_put_listen_mixer),
+ SOC_SINGLE_EXT("SLIMBUS_1_TX", MSM_BACKEND_DAI_SLIMBUS_1_TX,
+ MSM_FRONTEND_DAI_LSM2, 1, 0, msm_routing_get_listen_mixer,
+ msm_routing_put_listen_mixer),
+ SOC_SINGLE_EXT("SLIMBUS_3_TX", MSM_BACKEND_DAI_SLIMBUS_3_TX,
+ MSM_FRONTEND_DAI_LSM2, 1, 0, msm_routing_get_listen_mixer,
+ msm_routing_put_listen_mixer),
+ SOC_SINGLE_EXT("SLIMBUS_4_TX", MSM_BACKEND_DAI_SLIMBUS_4_TX,
+ MSM_FRONTEND_DAI_LSM2, 1, 0, msm_routing_get_listen_mixer,
+ msm_routing_put_listen_mixer),
+ SOC_SINGLE_EXT("SLIMBUS_5_TX", MSM_BACKEND_DAI_SLIMBUS_5_TX,
+ MSM_FRONTEND_DAI_LSM2, 1, 0, msm_routing_get_listen_mixer,
+ msm_routing_put_listen_mixer),
+ SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_TERTIARY_MI2S_TX,
+ MSM_FRONTEND_DAI_LSM2, 1, 0, msm_routing_get_listen_mixer,
+ msm_routing_put_listen_mixer),
+ SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
+ MSM_FRONTEND_DAI_LSM2, 1, 0, msm_routing_get_listen_mixer,
+ msm_routing_put_listen_mixer),
+};
+
+static const struct snd_kcontrol_new lsm3_mixer_controls[] = {
+ SOC_SINGLE_EXT("SLIMBUS_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX,
+ MSM_FRONTEND_DAI_LSM3, 1, 0, msm_routing_get_listen_mixer,
+ msm_routing_put_listen_mixer),
+ SOC_SINGLE_EXT("SLIMBUS_1_TX", MSM_BACKEND_DAI_SLIMBUS_1_TX,
+ MSM_FRONTEND_DAI_LSM3, 1, 0, msm_routing_get_listen_mixer,
+ msm_routing_put_listen_mixer),
+ SOC_SINGLE_EXT("SLIMBUS_3_TX", MSM_BACKEND_DAI_SLIMBUS_3_TX,
+ MSM_FRONTEND_DAI_LSM3, 1, 0, msm_routing_get_listen_mixer,
+ msm_routing_put_listen_mixer),
+ SOC_SINGLE_EXT("SLIMBUS_4_TX", MSM_BACKEND_DAI_SLIMBUS_4_TX,
+ MSM_FRONTEND_DAI_LSM3, 1, 0, msm_routing_get_listen_mixer,
+ msm_routing_put_listen_mixer),
+ SOC_SINGLE_EXT("SLIMBUS_5_TX", MSM_BACKEND_DAI_SLIMBUS_5_TX,
+ MSM_FRONTEND_DAI_LSM3, 1, 0, msm_routing_get_listen_mixer,
+ msm_routing_put_listen_mixer),
+ SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_TERTIARY_MI2S_TX,
+ MSM_FRONTEND_DAI_LSM3, 1, 0, msm_routing_get_listen_mixer,
+ msm_routing_put_listen_mixer),
+ SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
+ MSM_FRONTEND_DAI_LSM3, 1, 0, msm_routing_get_listen_mixer,
+ msm_routing_put_listen_mixer),
+};
+
+static const struct snd_kcontrol_new lsm4_mixer_controls[] = {
+ SOC_SINGLE_EXT("SLIMBUS_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX,
+ MSM_FRONTEND_DAI_LSM4, 1, 0, msm_routing_get_listen_mixer,
+ msm_routing_put_listen_mixer),
+ SOC_SINGLE_EXT("SLIMBUS_1_TX", MSM_BACKEND_DAI_SLIMBUS_1_TX,
+ MSM_FRONTEND_DAI_LSM4, 1, 0, msm_routing_get_listen_mixer,
+ msm_routing_put_listen_mixer),
+ SOC_SINGLE_EXT("SLIMBUS_3_TX", MSM_BACKEND_DAI_SLIMBUS_3_TX,
+ MSM_FRONTEND_DAI_LSM4, 1, 0, msm_routing_get_listen_mixer,
+ msm_routing_put_listen_mixer),
+ SOC_SINGLE_EXT("SLIMBUS_4_TX", MSM_BACKEND_DAI_SLIMBUS_4_TX,
+ MSM_FRONTEND_DAI_LSM4, 1, 0, msm_routing_get_listen_mixer,
+ msm_routing_put_listen_mixer),
+ SOC_SINGLE_EXT("SLIMBUS_5_TX", MSM_BACKEND_DAI_SLIMBUS_5_TX,
+ MSM_FRONTEND_DAI_LSM4, 1, 0, msm_routing_get_listen_mixer,
+ msm_routing_put_listen_mixer),
+ SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_TERTIARY_MI2S_TX,
+ MSM_FRONTEND_DAI_LSM4, 1, 0, msm_routing_get_listen_mixer,
+ msm_routing_put_listen_mixer),
+ SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
+ MSM_FRONTEND_DAI_LSM4, 1, 0, msm_routing_get_listen_mixer,
+ msm_routing_put_listen_mixer),
+};
+
+static const struct snd_kcontrol_new lsm5_mixer_controls[] = {
+ SOC_SINGLE_EXT("SLIMBUS_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX,
+ MSM_FRONTEND_DAI_LSM5, 1, 0, msm_routing_get_listen_mixer,
+ msm_routing_put_listen_mixer),
+ SOC_SINGLE_EXT("SLIMBUS_1_TX", MSM_BACKEND_DAI_SLIMBUS_1_TX,
+ MSM_FRONTEND_DAI_LSM5, 1, 0, msm_routing_get_listen_mixer,
+ msm_routing_put_listen_mixer),
+ SOC_SINGLE_EXT("SLIMBUS_3_TX", MSM_BACKEND_DAI_SLIMBUS_3_TX,
+ MSM_FRONTEND_DAI_LSM5, 1, 0, msm_routing_get_listen_mixer,
+ msm_routing_put_listen_mixer),
+ SOC_SINGLE_EXT("SLIMBUS_4_TX", MSM_BACKEND_DAI_SLIMBUS_4_TX,
+ MSM_FRONTEND_DAI_LSM5, 1, 0, msm_routing_get_listen_mixer,
+ msm_routing_put_listen_mixer),
+ SOC_SINGLE_EXT("SLIMBUS_5_TX", MSM_BACKEND_DAI_SLIMBUS_5_TX,
+ MSM_FRONTEND_DAI_LSM5, 1, 0, msm_routing_get_listen_mixer,
+ msm_routing_put_listen_mixer),
+ SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_TERTIARY_MI2S_TX,
+ MSM_FRONTEND_DAI_LSM5, 1, 0, msm_routing_get_listen_mixer,
+ msm_routing_put_listen_mixer),
+ SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
+ MSM_FRONTEND_DAI_LSM5, 1, 0, msm_routing_get_listen_mixer,
+ msm_routing_put_listen_mixer),
+};
+
+static const struct snd_kcontrol_new lsm6_mixer_controls[] = {
+ SOC_SINGLE_EXT("SLIMBUS_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX,
+ MSM_FRONTEND_DAI_LSM6, 1, 0, msm_routing_get_listen_mixer,
+ msm_routing_put_listen_mixer),
+ SOC_SINGLE_EXT("SLIMBUS_1_TX", MSM_BACKEND_DAI_SLIMBUS_1_TX,
+ MSM_FRONTEND_DAI_LSM6, 1, 0, msm_routing_get_listen_mixer,
+ msm_routing_put_listen_mixer),
+ SOC_SINGLE_EXT("SLIMBUS_3_TX", MSM_BACKEND_DAI_SLIMBUS_3_TX,
+ MSM_FRONTEND_DAI_LSM6, 1, 0, msm_routing_get_listen_mixer,
+ msm_routing_put_listen_mixer),
+ SOC_SINGLE_EXT("SLIMBUS_4_TX", MSM_BACKEND_DAI_SLIMBUS_4_TX,
+ MSM_FRONTEND_DAI_LSM6, 1, 0, msm_routing_get_listen_mixer,
+ msm_routing_put_listen_mixer),
+ SOC_SINGLE_EXT("SLIMBUS_5_TX", MSM_BACKEND_DAI_SLIMBUS_5_TX,
+ MSM_FRONTEND_DAI_LSM6, 1, 0, msm_routing_get_listen_mixer,
+ msm_routing_put_listen_mixer),
+ SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_TERTIARY_MI2S_TX,
+ MSM_FRONTEND_DAI_LSM6, 1, 0, msm_routing_get_listen_mixer,
+ msm_routing_put_listen_mixer),
+ SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
+ MSM_FRONTEND_DAI_LSM6, 1, 0, msm_routing_get_listen_mixer,
+ msm_routing_put_listen_mixer),
+};
+
+static const struct snd_kcontrol_new lsm7_mixer_controls[] = {
+ SOC_SINGLE_EXT("SLIMBUS_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX,
+ MSM_FRONTEND_DAI_LSM7, 1, 0, msm_routing_get_listen_mixer,
+ msm_routing_put_listen_mixer),
+ SOC_SINGLE_EXT("SLIMBUS_1_TX", MSM_BACKEND_DAI_SLIMBUS_1_TX,
+ MSM_FRONTEND_DAI_LSM7, 1, 0, msm_routing_get_listen_mixer,
+ msm_routing_put_listen_mixer),
+ SOC_SINGLE_EXT("SLIMBUS_3_TX", MSM_BACKEND_DAI_SLIMBUS_3_TX,
+ MSM_FRONTEND_DAI_LSM7, 1, 0, msm_routing_get_listen_mixer,
+ msm_routing_put_listen_mixer),
+ SOC_SINGLE_EXT("SLIMBUS_4_TX", MSM_BACKEND_DAI_SLIMBUS_4_TX,
+ MSM_FRONTEND_DAI_LSM7, 1, 0, msm_routing_get_listen_mixer,
+ msm_routing_put_listen_mixer),
+ SOC_SINGLE_EXT("SLIMBUS_5_TX", MSM_BACKEND_DAI_SLIMBUS_5_TX,
+ MSM_FRONTEND_DAI_LSM7, 1, 0, msm_routing_get_listen_mixer,
+ msm_routing_put_listen_mixer),
+ SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_TERTIARY_MI2S_TX,
+ MSM_FRONTEND_DAI_LSM7, 1, 0, msm_routing_get_listen_mixer,
+ msm_routing_put_listen_mixer),
+ SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
+ MSM_FRONTEND_DAI_LSM7, 1, 0, msm_routing_get_listen_mixer,
+ msm_routing_put_listen_mixer),
+};
+
+static const struct snd_kcontrol_new lsm8_mixer_controls[] = {
+ SOC_SINGLE_EXT("SLIMBUS_0_TX", MSM_BACKEND_DAI_SLIMBUS_0_TX,
+ MSM_FRONTEND_DAI_LSM8, 1, 0, msm_routing_get_listen_mixer,
+ msm_routing_put_listen_mixer),
+ SOC_SINGLE_EXT("SLIMBUS_1_TX", MSM_BACKEND_DAI_SLIMBUS_1_TX,
+ MSM_FRONTEND_DAI_LSM8, 1, 0, msm_routing_get_listen_mixer,
+ msm_routing_put_listen_mixer),
+ SOC_SINGLE_EXT("SLIMBUS_3_TX", MSM_BACKEND_DAI_SLIMBUS_3_TX,
+ MSM_FRONTEND_DAI_LSM8, 1, 0, msm_routing_get_listen_mixer,
+ msm_routing_put_listen_mixer),
+ SOC_SINGLE_EXT("SLIMBUS_4_TX", MSM_BACKEND_DAI_SLIMBUS_4_TX,
+ MSM_FRONTEND_DAI_LSM8, 1, 0, msm_routing_get_listen_mixer,
+ msm_routing_put_listen_mixer),
+ SOC_SINGLE_EXT("SLIMBUS_5_TX", MSM_BACKEND_DAI_SLIMBUS_5_TX,
+ MSM_FRONTEND_DAI_LSM8, 1, 0, msm_routing_get_listen_mixer,
+ msm_routing_put_listen_mixer),
+ SOC_SINGLE_EXT("TERT_MI2S_TX", MSM_BACKEND_DAI_TERTIARY_MI2S_TX,
+ MSM_FRONTEND_DAI_LSM8, 1, 0, msm_routing_get_listen_mixer,
+ msm_routing_put_listen_mixer),
+ SOC_SINGLE_EXT("QUAT_MI2S_TX", MSM_BACKEND_DAI_QUATERNARY_MI2S_TX,
+ MSM_FRONTEND_DAI_LSM8, 1, 0, msm_routing_get_listen_mixer,
+ msm_routing_put_listen_mixer),
+};
+
static const struct snd_kcontrol_new slim_fm_switch_mixer_controls =
SOC_SINGLE_EXT("Switch", SND_SOC_NOPM,
0, 1, 0, msm_routing_get_switch_mixer,
@@ -7691,6 +8294,16 @@ static const struct snd_kcontrol_new pcm_rx_switch_mixer_controls =
0, 1, 0, msm_routing_get_fm_pcmrx_switch_mixer,
msm_routing_put_fm_pcmrx_switch_mixer);
+static const struct snd_kcontrol_new int0_mi2s_rx_switch_mixer_controls =
+ SOC_SINGLE_EXT("Switch", SND_SOC_NOPM,
+ 0, 1, 0, msm_routing_get_int0_mi2s_switch_mixer,
+ msm_routing_put_int0_mi2s_switch_mixer);
+
+static const struct snd_kcontrol_new int4_mi2s_rx_switch_mixer_controls =
+ SOC_SINGLE_EXT("Switch", SND_SOC_NOPM,
+ 0, 1, 0, msm_routing_get_int4_mi2s_switch_mixer,
+ msm_routing_put_int4_mi2s_switch_mixer);
+
static const struct snd_kcontrol_new pri_mi2s_rx_switch_mixer_controls =
SOC_SINGLE_EXT("Switch", SND_SOC_NOPM,
0, 1, 0, msm_routing_get_pri_mi2s_switch_mixer,
@@ -7736,53 +8349,17 @@ static const struct snd_kcontrol_new usb_switch_mixer_controls =
0, 1, 0, msm_routing_get_usb_switch_mixer,
msm_routing_put_usb_switch_mixer);
-static const struct soc_enum lsm_mux_enum =
- SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(mad_audio_mux_text), mad_audio_mux_text);
-
-static const struct snd_kcontrol_new lsm1_mux =
- SOC_DAPM_ENUM_EXT("LSM1 MUX", lsm_mux_enum,
- msm_routing_lsm_mux_get,
- msm_routing_lsm_mux_put);
-
-static const struct snd_kcontrol_new lsm2_mux =
- SOC_DAPM_ENUM_EXT("LSM2 MUX", lsm_mux_enum,
- msm_routing_lsm_mux_get,
- msm_routing_lsm_mux_put);
-static const struct snd_kcontrol_new lsm3_mux =
- SOC_DAPM_ENUM_EXT("LSM3 MUX", lsm_mux_enum,
- msm_routing_lsm_mux_get,
- msm_routing_lsm_mux_put);
-
-static const struct snd_kcontrol_new lsm4_mux =
- SOC_DAPM_ENUM_EXT("LSM4 MUX", lsm_mux_enum,
- msm_routing_lsm_mux_get,
- msm_routing_lsm_mux_put);
-static const struct snd_kcontrol_new lsm5_mux =
- SOC_DAPM_ENUM_EXT("LSM5 MUX", lsm_mux_enum,
- msm_routing_lsm_mux_get,
- msm_routing_lsm_mux_put);
-
-static const struct snd_kcontrol_new lsm6_mux =
- SOC_DAPM_ENUM_EXT("LSM6 MUX", lsm_mux_enum,
- msm_routing_lsm_mux_get,
- msm_routing_lsm_mux_put);
-static const struct snd_kcontrol_new lsm7_mux =
- SOC_DAPM_ENUM_EXT("LSM7 MUX", lsm_mux_enum,
- msm_routing_lsm_mux_get,
- msm_routing_lsm_mux_put);
-
-static const struct snd_kcontrol_new lsm8_mux =
- SOC_DAPM_ENUM_EXT("LSM8 MUX", lsm_mux_enum,
- msm_routing_lsm_mux_get,
- msm_routing_lsm_mux_put);
-
+static const struct soc_enum lsm_port_enum =
+ SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(lsm_port_text), lsm_port_text);
static const char * const lsm_func_text[] = {
"None", "AUDIO", "BEACON", "ULTRASOUND", "SWAUDIO",
};
static const struct soc_enum lsm_func_enum =
SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(lsm_func_text), lsm_func_text);
-static const struct snd_kcontrol_new lsm_function[] = {
+
+static const struct snd_kcontrol_new lsm_controls[] = {
+ /* kcontrol of lsm_function */
SOC_ENUM_EXT(SLIMBUS_0_TX_TEXT" "LSM_FUNCTION_TEXT, lsm_func_enum,
msm_routing_lsm_func_get, msm_routing_lsm_func_put),
SOC_ENUM_EXT(SLIMBUS_1_TX_TEXT" "LSM_FUNCTION_TEXT, lsm_func_enum,
@@ -7797,6 +8374,33 @@ static const struct snd_kcontrol_new lsm_function[] = {
msm_routing_lsm_func_get, msm_routing_lsm_func_put),
SOC_ENUM_EXT(TERT_MI2S_TX_TEXT" "LSM_FUNCTION_TEXT, lsm_func_enum,
msm_routing_lsm_func_get, msm_routing_lsm_func_put),
+ SOC_ENUM_EXT(QUAT_MI2S_TX_TEXT" "LSM_FUNCTION_TEXT, lsm_func_enum,
+ msm_routing_lsm_func_get, msm_routing_lsm_func_put),
+ /* kcontrol of lsm_port */
+ SOC_ENUM_EXT("LSM1 Port", lsm_port_enum,
+ msm_routing_lsm_port_get,
+ msm_routing_lsm_port_put),
+ SOC_ENUM_EXT("LSM2 Port", lsm_port_enum,
+ msm_routing_lsm_port_get,
+ msm_routing_lsm_port_put),
+ SOC_ENUM_EXT("LSM3 Port", lsm_port_enum,
+ msm_routing_lsm_port_get,
+ msm_routing_lsm_port_put),
+ SOC_ENUM_EXT("LSM4 Port", lsm_port_enum,
+ msm_routing_lsm_port_get,
+ msm_routing_lsm_port_put),
+ SOC_ENUM_EXT("LSM5 Port", lsm_port_enum,
+ msm_routing_lsm_port_get,
+ msm_routing_lsm_port_put),
+ SOC_ENUM_EXT("LSM6 Port", lsm_port_enum,
+ msm_routing_lsm_port_get,
+ msm_routing_lsm_port_put),
+ SOC_ENUM_EXT("LSM7 Port", lsm_port_enum,
+ msm_routing_lsm_port_get,
+ msm_routing_lsm_port_put),
+ SOC_ENUM_EXT("LSM8 Port", lsm_port_enum,
+ msm_routing_lsm_port_get,
+ msm_routing_lsm_port_put),
};
static const char * const aanc_slim_0_rx_text[] = {
@@ -7853,7 +8457,7 @@ static int msm_routing_put_stereo_to_custom_stereo_control(
(port_id != AFE_PORT_ID_INT4_MI2S_RX))
continue;
- for_each_set_bit(i, &msm_bedais[be_index].fe_sessions,
+ for_each_set_bit(i, &msm_bedais[be_index].fe_sessions[0],
MSM_FRONTEND_DAI_MM_SIZE) {
if (fe_dai_map[i][SESSION_TYPE_RX].perf_mode !=
LEGACY_PCM_MODE)
@@ -7959,6 +8563,45 @@ static const struct snd_kcontrol_new app_type_cfg_controls[] = {
msm_routing_put_app_type_cfg_control),
};
+static int msm_routing_get_lsm_app_type_cfg_control(
+ struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ return 0;
+}
+
+static int msm_routing_put_lsm_app_type_cfg_control(
+ struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int i = 0, j;
+ int num_app_types = ucontrol->value.integer.value[i++];
+
+ memset(lsm_app_type_cfg, 0, MAX_APP_TYPES*
+ sizeof(struct msm_pcm_routing_app_type_data));
+ if (num_app_types > MAX_APP_TYPES) {
+ pr_err("%s: number of app types exceed the max supported\n",
+ __func__);
+ return -EINVAL;
+ }
+ for (j = 0; j < num_app_types; j++) {
+ lsm_app_type_cfg[j].app_type =
+ ucontrol->value.integer.value[i++];
+ lsm_app_type_cfg[j].sample_rate =
+ ucontrol->value.integer.value[i++];
+ lsm_app_type_cfg[j].bit_width =
+ ucontrol->value.integer.value[i++];
+ }
+
+ return 0;
+}
+
+static const struct snd_kcontrol_new lsm_app_type_cfg_controls[] = {
+ SOC_SINGLE_MULTI_EXT("Listen App Type Config", SND_SOC_NOPM, 0,
+ 0xFFFFFFFF, 0, 128, msm_routing_get_lsm_app_type_cfg_control,
+ msm_routing_put_lsm_app_type_cfg_control),
+};
+
static int msm_routing_get_use_ds1_or_ds2_control(
struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
@@ -8154,7 +8797,7 @@ static int msm_audio_get_copp_idx_from_port_id(int port_id, int session_type,
goto done;
}
- for_each_set_bit(i, &msm_bedais[be_idx].fe_sessions,
+ for_each_set_bit(i, &msm_bedais[be_idx].fe_sessions[0],
MSM_FRONTEND_DAI_MM_SIZE) {
for (idx = 0; idx < MAX_COPPS_PER_PORT; idx++) {
copp = session_copp_map[i]
@@ -9205,6 +9848,10 @@ static const struct snd_soc_dapm_widget msm_qdsp6_widgets[] = {
&slim6_fm_switch_mixer_controls),
SND_SOC_DAPM_SWITCH("PCM_RX_DL_HL", SND_SOC_NOPM, 0, 0,
&pcm_rx_switch_mixer_controls),
+ SND_SOC_DAPM_SWITCH("INT0_MI2S_RX_DL_HL", SND_SOC_NOPM, 0, 0,
+ &int0_mi2s_rx_switch_mixer_controls),
+ SND_SOC_DAPM_SWITCH("INT4_MI2S_RX_DL_HL", SND_SOC_NOPM, 0, 0,
+ &int4_mi2s_rx_switch_mixer_controls),
SND_SOC_DAPM_SWITCH("PRI_MI2S_RX_DL_HL", SND_SOC_NOPM, 0, 0,
&pri_mi2s_rx_switch_mixer_controls),
SND_SOC_DAPM_SWITCH("SEC_MI2S_RX_DL_HL", SND_SOC_NOPM, 0, 0,
@@ -9224,16 +9871,6 @@ static const struct snd_soc_dapm_widget msm_qdsp6_widgets[] = {
SND_SOC_DAPM_SWITCH("USB_DL_HL", SND_SOC_NOPM, 0, 0,
&usb_switch_mixer_controls),
- /* Mux Definitions */
- SND_SOC_DAPM_MUX("LSM1 MUX", SND_SOC_NOPM, 0, 0, &lsm1_mux),
- SND_SOC_DAPM_MUX("LSM2 MUX", SND_SOC_NOPM, 0, 0, &lsm2_mux),
- SND_SOC_DAPM_MUX("LSM3 MUX", SND_SOC_NOPM, 0, 0, &lsm3_mux),
- SND_SOC_DAPM_MUX("LSM4 MUX", SND_SOC_NOPM, 0, 0, &lsm4_mux),
- SND_SOC_DAPM_MUX("LSM5 MUX", SND_SOC_NOPM, 0, 0, &lsm5_mux),
- SND_SOC_DAPM_MUX("LSM6 MUX", SND_SOC_NOPM, 0, 0, &lsm6_mux),
- SND_SOC_DAPM_MUX("LSM7 MUX", SND_SOC_NOPM, 0, 0, &lsm7_mux),
- SND_SOC_DAPM_MUX("LSM8 MUX", SND_SOC_NOPM, 0, 0, &lsm8_mux),
-
/* Mixer definitions */
SND_SOC_DAPM_MIXER("PRI_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
pri_i2s_rx_mixer_controls, ARRAY_SIZE(pri_i2s_rx_mixer_controls)),
@@ -9591,6 +10228,23 @@ static const struct snd_soc_dapm_widget msm_qdsp6_widgets[] = {
SND_SOC_DAPM_MIXER("USB_AUDIO_RX Port Mixer",
SND_SOC_NOPM, 0, 0, usb_rx_port_mixer_controls,
ARRAY_SIZE(usb_rx_port_mixer_controls)),
+ /* lsm mixer definitions */
+ SND_SOC_DAPM_MIXER("LSM1 Mixer", SND_SOC_NOPM, 0, 0,
+ lsm1_mixer_controls, ARRAY_SIZE(lsm1_mixer_controls)),
+ SND_SOC_DAPM_MIXER("LSM2 Mixer", SND_SOC_NOPM, 0, 0,
+ lsm2_mixer_controls, ARRAY_SIZE(lsm2_mixer_controls)),
+ SND_SOC_DAPM_MIXER("LSM3 Mixer", SND_SOC_NOPM, 0, 0,
+ lsm3_mixer_controls, ARRAY_SIZE(lsm3_mixer_controls)),
+ SND_SOC_DAPM_MIXER("LSM4 Mixer", SND_SOC_NOPM, 0, 0,
+ lsm4_mixer_controls, ARRAY_SIZE(lsm4_mixer_controls)),
+ SND_SOC_DAPM_MIXER("LSM5 Mixer", SND_SOC_NOPM, 0, 0,
+ lsm5_mixer_controls, ARRAY_SIZE(lsm5_mixer_controls)),
+ SND_SOC_DAPM_MIXER("LSM6 Mixer", SND_SOC_NOPM, 0, 0,
+ lsm6_mixer_controls, ARRAY_SIZE(lsm6_mixer_controls)),
+ SND_SOC_DAPM_MIXER("LSM7 Mixer", SND_SOC_NOPM, 0, 0,
+ lsm7_mixer_controls, ARRAY_SIZE(lsm7_mixer_controls)),
+ SND_SOC_DAPM_MIXER("LSM8 Mixer", SND_SOC_NOPM, 0, 0,
+ lsm8_mixer_controls, ARRAY_SIZE(lsm8_mixer_controls)),
/* Virtual Pins to force backends ON atm */
SND_SOC_DAPM_OUTPUT("BE_OUT"),
SND_SOC_DAPM_INPUT("BE_IN"),
@@ -11050,70 +11704,77 @@ static const struct snd_soc_dapm_route intercon[] = {
{"SLIM4_UL_HL", NULL, "SLIMBUS_4_TX"},
{"SLIM8_UL_HL", NULL, "SLIMBUS_8_TX"},
- {"LSM1 MUX", "SLIMBUS_0_TX", "SLIMBUS_0_TX"},
- {"LSM1 MUX", "SLIMBUS_1_TX", "SLIMBUS_1_TX"},
- {"LSM1 MUX", "SLIMBUS_3_TX", "SLIMBUS_3_TX"},
- {"LSM1 MUX", "SLIMBUS_4_TX", "SLIMBUS_4_TX"},
- {"LSM1 MUX", "SLIMBUS_5_TX", "SLIMBUS_5_TX"},
- {"LSM1 MUX", "TERT_MI2S_TX", "TERT_MI2S_TX"},
- {"LSM1_UL_HL", NULL, "LSM1 MUX"},
-
- {"LSM2 MUX", "SLIMBUS_0_TX", "SLIMBUS_0_TX"},
- {"LSM2 MUX", "SLIMBUS_1_TX", "SLIMBUS_1_TX"},
- {"LSM2 MUX", "SLIMBUS_3_TX", "SLIMBUS_3_TX"},
- {"LSM2 MUX", "SLIMBUS_4_TX", "SLIMBUS_4_TX"},
- {"LSM2 MUX", "SLIMBUS_5_TX", "SLIMBUS_5_TX"},
- {"LSM2 MUX", "TERT_MI2S_TX", "TERT_MI2S_TX"},
- {"LSM2_UL_HL", NULL, "LSM2 MUX"},
-
-
- {"LSM3 MUX", "SLIMBUS_0_TX", "SLIMBUS_0_TX"},
- {"LSM3 MUX", "SLIMBUS_1_TX", "SLIMBUS_1_TX"},
- {"LSM3 MUX", "SLIMBUS_3_TX", "SLIMBUS_3_TX"},
- {"LSM3 MUX", "SLIMBUS_4_TX", "SLIMBUS_4_TX"},
- {"LSM3 MUX", "SLIMBUS_5_TX", "SLIMBUS_5_TX"},
- {"LSM3 MUX", "TERT_MI2S_TX", "TERT_MI2S_TX"},
- {"LSM3_UL_HL", NULL, "LSM3 MUX"},
-
-
- {"LSM4 MUX", "SLIMBUS_0_TX", "SLIMBUS_0_TX"},
- {"LSM4 MUX", "SLIMBUS_1_TX", "SLIMBUS_1_TX"},
- {"LSM4 MUX", "SLIMBUS_3_TX", "SLIMBUS_3_TX"},
- {"LSM4 MUX", "SLIMBUS_4_TX", "SLIMBUS_4_TX"},
- {"LSM4 MUX", "SLIMBUS_5_TX", "SLIMBUS_5_TX"},
- {"LSM4 MUX", "TERT_MI2S_TX", "TERT_MI2S_TX"},
- {"LSM4_UL_HL", NULL, "LSM4 MUX"},
-
- {"LSM5 MUX", "SLIMBUS_0_TX", "SLIMBUS_0_TX"},
- {"LSM5 MUX", "SLIMBUS_1_TX", "SLIMBUS_1_TX"},
- {"LSM5 MUX", "SLIMBUS_3_TX", "SLIMBUS_3_TX"},
- {"LSM5 MUX", "SLIMBUS_4_TX", "SLIMBUS_4_TX"},
- {"LSM5 MUX", "SLIMBUS_5_TX", "SLIMBUS_5_TX"},
- {"LSM5 MUX", "TERT_MI2S_TX", "TERT_MI2S_TX"},
- {"LSM5_UL_HL", NULL, "LSM5 MUX"},
-
- {"LSM6 MUX", "SLIMBUS_0_TX", "SLIMBUS_0_TX"},
- {"LSM6 MUX", "SLIMBUS_1_TX", "SLIMBUS_1_TX"},
- {"LSM6 MUX", "SLIMBUS_3_TX", "SLIMBUS_3_TX"},
- {"LSM6 MUX", "SLIMBUS_4_TX", "SLIMBUS_4_TX"},
- {"LSM6 MUX", "SLIMBUS_5_TX", "SLIMBUS_5_TX"},
- {"LSM6_UL_HL", NULL, "LSM6 MUX"},
-
-
- {"LSM7 MUX", "SLIMBUS_0_TX", "SLIMBUS_0_TX"},
- {"LSM7 MUX", "SLIMBUS_1_TX", "SLIMBUS_1_TX"},
- {"LSM7 MUX", "SLIMBUS_3_TX", "SLIMBUS_3_TX"},
- {"LSM7 MUX", "SLIMBUS_4_TX", "SLIMBUS_4_TX"},
- {"LSM7 MUX", "SLIMBUS_5_TX", "SLIMBUS_5_TX"},
- {"LSM7_UL_HL", NULL, "LSM7 MUX"},
-
-
- {"LSM8 MUX", "SLIMBUS_0_TX", "SLIMBUS_0_TX"},
- {"LSM8 MUX", "SLIMBUS_1_TX", "SLIMBUS_1_TX"},
- {"LSM8 MUX", "SLIMBUS_3_TX", "SLIMBUS_3_TX"},
- {"LSM8 MUX", "SLIMBUS_4_TX", "SLIMBUS_4_TX"},
- {"LSM8 MUX", "SLIMBUS_5_TX", "SLIMBUS_5_TX"},
- {"LSM8_UL_HL", NULL, "LSM8 MUX"},
+
+ {"LSM1 Mixer", "SLIMBUS_0_TX", "SLIMBUS_0_TX"},
+ {"LSM1 Mixer", "SLIMBUS_1_TX", "SLIMBUS_1_TX"},
+ {"LSM1 Mixer", "SLIMBUS_3_TX", "SLIMBUS_3_TX"},
+ {"LSM1 Mixer", "SLIMBUS_4_TX", "SLIMBUS_4_TX"},
+ {"LSM1 Mixer", "SLIMBUS_5_TX", "SLIMBUS_5_TX"},
+ {"LSM1 Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
+ {"LSM1 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+ {"LSM1_UL_HL", NULL, "LSM1 Mixer"},
+
+ {"LSM2 Mixer", "SLIMBUS_0_TX", "SLIMBUS_0_TX"},
+ {"LSM2 Mixer", "SLIMBUS_1_TX", "SLIMBUS_1_TX"},
+ {"LSM2 Mixer", "SLIMBUS_3_TX", "SLIMBUS_3_TX"},
+ {"LSM2 Mixer", "SLIMBUS_4_TX", "SLIMBUS_4_TX"},
+ {"LSM2 Mixer", "SLIMBUS_5_TX", "SLIMBUS_5_TX"},
+ {"LSM2 Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
+ {"LSM2 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+ {"LSM2_UL_HL", NULL, "LSM2 Mixer"},
+
+
+ {"LSM3 Mixer", "SLIMBUS_0_TX", "SLIMBUS_0_TX"},
+ {"LSM3 Mixer", "SLIMBUS_1_TX", "SLIMBUS_1_TX"},
+ {"LSM3 Mixer", "SLIMBUS_3_TX", "SLIMBUS_3_TX"},
+ {"LSM3 Mixer", "SLIMBUS_4_TX", "SLIMBUS_4_TX"},
+ {"LSM3 Mixer", "SLIMBUS_5_TX", "SLIMBUS_5_TX"},
+ {"LSM3 Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
+ {"LSM3 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+ {"LSM3_UL_HL", NULL, "LSM3 Mixer"},
+
+
+ {"LSM4 Mixer", "SLIMBUS_0_TX", "SLIMBUS_0_TX"},
+ {"LSM4 Mixer", "SLIMBUS_1_TX", "SLIMBUS_1_TX"},
+ {"LSM4 Mixer", "SLIMBUS_3_TX", "SLIMBUS_3_TX"},
+ {"LSM4 Mixer", "SLIMBUS_4_TX", "SLIMBUS_4_TX"},
+ {"LSM4 Mixer", "SLIMBUS_5_TX", "SLIMBUS_5_TX"},
+ {"LSM4 Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
+ {"LSM4 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+ {"LSM4_UL_HL", NULL, "LSM4 Mixer"},
+
+ {"LSM5 Mixer", "SLIMBUS_0_TX", "SLIMBUS_0_TX"},
+ {"LSM5 Mixer", "SLIMBUS_1_TX", "SLIMBUS_1_TX"},
+ {"LSM5 Mixer", "SLIMBUS_3_TX", "SLIMBUS_3_TX"},
+ {"LSM5 Mixer", "SLIMBUS_4_TX", "SLIMBUS_4_TX"},
+ {"LSM5 Mixer", "SLIMBUS_5_TX", "SLIMBUS_5_TX"},
+ {"LSM5 Mixer", "TERT_MI2S_TX", "TERT_MI2S_TX"},
+ {"LSM5 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+ {"LSM5_UL_HL", NULL, "LSM5 Mixer"},
+
+ {"LSM6 Mixer", "SLIMBUS_0_TX", "SLIMBUS_0_TX"},
+ {"LSM6 Mixer", "SLIMBUS_1_TX", "SLIMBUS_1_TX"},
+ {"LSM6 Mixer", "SLIMBUS_3_TX", "SLIMBUS_3_TX"},
+ {"LSM6 Mixer", "SLIMBUS_4_TX", "SLIMBUS_4_TX"},
+ {"LSM6 Mixer", "SLIMBUS_5_TX", "SLIMBUS_5_TX"},
+ {"LSM6 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+ {"LSM6_UL_HL", NULL, "LSM6 Mixer"},
+
+ {"LSM7 Mixer", "SLIMBUS_0_TX", "SLIMBUS_0_TX"},
+ {"LSM7 Mixer", "SLIMBUS_1_TX", "SLIMBUS_1_TX"},
+ {"LSM7 Mixer", "SLIMBUS_3_TX", "SLIMBUS_3_TX"},
+ {"LSM7 Mixer", "SLIMBUS_4_TX", "SLIMBUS_4_TX"},
+ {"LSM7 Mixer", "SLIMBUS_5_TX", "SLIMBUS_5_TX"},
+ {"LSM7 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+ {"LSM7_UL_HL", NULL, "LSM7 Mixer"},
+
+ {"LSM8 Mixer", "SLIMBUS_0_TX", "SLIMBUS_0_TX"},
+ {"LSM8 Mixer", "SLIMBUS_1_TX", "SLIMBUS_1_TX"},
+ {"LSM8 Mixer", "SLIMBUS_3_TX", "SLIMBUS_3_TX"},
+ {"LSM8 Mixer", "SLIMBUS_4_TX", "SLIMBUS_4_TX"},
+ {"LSM8 Mixer", "SLIMBUS_5_TX", "SLIMBUS_5_TX"},
+ {"LSM8 Mixer", "QUAT_MI2S_TX", "QUAT_MI2S_TX"},
+ {"LSM8_UL_HL", NULL, "LSM8 Mixer"},
{"CPE_LSM_UL_HL", NULL, "BE_IN"},
@@ -11730,7 +12391,9 @@ static int msm_pcm_routing_close(struct snd_pcm_substream *substream)
path_type = ADM_PATH_LIVE_REC;
mutex_lock(&routing_lock);
- for_each_set_bit(i, &bedai->fe_sessions, MSM_FRONTEND_DAI_MM_SIZE) {
+ for_each_set_bit(i, &bedai->fe_sessions[0], MSM_FRONTEND_DAI_MAX) {
+ if (!is_mm_lsm_fe_id(i))
+ continue;
fdai = &fe_dai_map[i][session_type];
if (fdai->strm_id != INVALID_SESSION) {
int idx;
@@ -11751,13 +12414,12 @@ static int msm_pcm_routing_close(struct snd_pcm_substream *substream)
clear_bit(idx,
&session_copp_map[i][session_type][be_id]);
if ((fdai->perf_mode == LEGACY_PCM_MODE) &&
- (bedai->compr_passthr_mode == LEGACY_PCM))
+ (bedai->passthr_mode == LEGACY_PCM))
msm_pcm_routing_deinit_pp(bedai->port_id,
topology);
}
}
- bedai->compr_passthr_mode = LEGACY_PCM;
bedai->active = 0;
bedai->sample_rate = 0;
bedai->channel = 0;
@@ -11777,6 +12439,7 @@ static int msm_pcm_routing_prepare(struct snd_pcm_substream *substream)
struct msm_pcm_routing_fdai_data *fdai;
u32 session_id;
struct media_format_info voc_be_media_format;
+ bool is_lsm;
pr_debug("%s: substream->pcm->id:%s\n",
__func__, substream->pcm->id);
@@ -11789,7 +12452,7 @@ static int msm_pcm_routing_prepare(struct snd_pcm_substream *substream)
bedai = &msm_bedais[be_id];
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- if (bedai->compr_passthr_mode != LEGACY_PCM)
+ if (bedai->passthr_mode != LEGACY_PCM)
path_type = ADM_PATH_COMPRESSED_RX;
else
path_type = ADM_PATH_PLAYBACK;
@@ -11810,7 +12473,13 @@ static int msm_pcm_routing_prepare(struct snd_pcm_substream *substream)
*/
bedai->active = 1;
- for_each_set_bit(i, &bedai->fe_sessions, MSM_FRONTEND_DAI_MM_SIZE) {
+ for_each_set_bit(i, &bedai->fe_sessions[0], MSM_FRONTEND_DAI_MAX) {
+ if (!(is_mm_lsm_fe_id(i) &&
+ route_check_fe_id_adm_support(i)))
+ continue;
+
+ is_lsm = (i >= MSM_FRONTEND_DAI_LSM1) &&
+ (i <= MSM_FRONTEND_DAI_LSM8);
fdai = &fe_dai_map[i][session_type];
if (fdai->strm_id != INVALID_SESSION) {
int app_type, app_type_idx, copp_idx, acdb_dev_id;
@@ -11832,7 +12501,15 @@ static int msm_pcm_routing_prepare(struct snd_pcm_substream *substream)
app_type =
fe_dai_app_type_cfg[i][session_type].app_type;
- if (app_type) {
+ if (app_type && is_lsm) {
+ app_type_idx =
+ msm_pcm_routing_get_lsm_app_type_idx(app_type);
+ sample_rate =
+ fe_dai_app_type_cfg[i][session_type].
+ sample_rate;
+ bits_per_sample =
+ lsm_app_type_cfg[app_type_idx].bit_width;
+ } else if (app_type) {
app_type_idx =
msm_pcm_routing_get_app_type_idx(app_type);
sample_rate =
@@ -11877,16 +12554,16 @@ static int msm_pcm_routing_prepare(struct snd_pcm_substream *substream)
bedai->sample_rate);
msm_pcm_routing_build_matrix(i, session_type, path_type,
- fdai->perf_mode);
+ fdai->perf_mode,
+ bedai->passthr_mode);
if ((fdai->perf_mode == LEGACY_PCM_MODE) &&
- (bedai->compr_passthr_mode ==
- LEGACY_PCM))
+ (bedai->passthr_mode == LEGACY_PCM))
msm_pcm_routing_cfg_pp(bedai->port_id, copp_idx,
topology, channels);
}
}
- for_each_set_bit(i, &bedai->fe_sessions, MSM_FRONTEND_DAI_MAX) {
+ for_each_set_bit(i, &bedai->fe_sessions[0], MSM_FRONTEND_DAI_MAX) {
session_id = msm_pcm_routing_get_voc_sessionid(i);
if (session_id) {
pr_debug("%s voice session_id: 0x%x\n", __func__,
@@ -11951,6 +12628,7 @@ static int msm_routing_send_device_pp_params(int port_id, int copp_idx)
unsigned long pp_config = 0;
bool mute_on;
int latency;
+ bool compr_passthr_mode = true;
pr_debug("%s: port_id %d, copp_idx %d\n", __func__, port_id, copp_idx);
@@ -11987,14 +12665,16 @@ static int msm_routing_send_device_pp_params(int port_id, int copp_idx)
return -EINVAL;
}
+ if ((msm_bedais[be_idx].passthr_mode == LEGACY_PCM) ||
+ (msm_bedais[be_idx].passthr_mode == LISTEN))
+ compr_passthr_mode = false;
+
pp_config = msm_bedais_pp_params[index].pp_params_config;
if (test_bit(ADM_PP_PARAM_MUTE_BIT, &pp_config)) {
pr_debug("%s: ADM_PP_PARAM_MUTE\n", __func__);
clear_bit(ADM_PP_PARAM_MUTE_BIT, &pp_config);
mute_on = msm_bedais_pp_params[index].mute_on;
- if ((msm_bedais[be_idx].active) &&
- (msm_bedais[be_idx].compr_passthr_mode !=
- LEGACY_PCM))
+ if ((msm_bedais[be_idx].active) && compr_passthr_mode)
adm_send_compressed_device_mute(port_id,
copp_idx,
mute_on);
@@ -12004,9 +12684,7 @@ static int msm_routing_send_device_pp_params(int port_id, int copp_idx)
clear_bit(ADM_PP_PARAM_LATENCY_BIT,
&pp_config);
latency = msm_bedais_pp_params[index].latency;
- if ((msm_bedais[be_idx].active) &&
- (msm_bedais[be_idx].compr_passthr_mode !=
- LEGACY_PCM))
+ if ((msm_bedais[be_idx].active) && compr_passthr_mode)
adm_send_compressed_device_latency(port_id,
copp_idx,
latency);
@@ -12022,6 +12700,7 @@ static int msm_routing_put_device_pp_params_mixer(struct snd_kcontrol *kcontrol,
int index, be_idx, i, topo_id, idx;
bool mute;
int latency;
+ bool compr_passthr_mode = true;
pr_debug("%s: pp_id: 0x%x\n", __func__, pp_id);
@@ -12046,7 +12725,11 @@ static int msm_routing_put_device_pp_params_mixer(struct snd_kcontrol *kcontrol,
return -EINVAL;
}
- for_each_set_bit(i, &msm_bedais[be_idx].fe_sessions,
+ if ((msm_bedais[be_idx].passthr_mode == LEGACY_PCM) ||
+ (msm_bedais[be_idx].passthr_mode == LISTEN))
+ compr_passthr_mode = false;
+
+ for_each_set_bit(i, &msm_bedais[be_idx].fe_sessions[0],
MSM_FRONTEND_DAI_MM_SIZE) {
for (idx = 0; idx < MAX_COPPS_PER_PORT; idx++) {
unsigned long copp =
@@ -12060,7 +12743,7 @@ static int msm_routing_put_device_pp_params_mixer(struct snd_kcontrol *kcontrol,
continue;
pr_debug("%s: port: 0x%x, copp %ld, be active: %d, passt: %d\n",
__func__, port_id, copp, msm_bedais[be_idx].active,
- msm_bedais[be_idx].compr_passthr_mode);
+ msm_bedais[be_idx].passthr_mode);
switch (pp_id) {
case ADM_PP_PARAM_MUTE_ID:
pr_debug("%s: ADM_PP_PARAM_MUTE\n", __func__);
@@ -12068,9 +12751,7 @@ static int msm_routing_put_device_pp_params_mixer(struct snd_kcontrol *kcontrol,
msm_bedais_pp_params[index].mute_on = mute;
set_bit(ADM_PP_PARAM_MUTE_BIT,
&msm_bedais_pp_params[index].pp_params_config);
- if ((msm_bedais[be_idx].active) &&
- (msm_bedais[be_idx].compr_passthr_mode !=
- LEGACY_PCM))
+ if ((msm_bedais[be_idx].active) && compr_passthr_mode)
adm_send_compressed_device_mute(port_id,
idx, mute);
break;
@@ -12082,9 +12763,7 @@ static int msm_routing_put_device_pp_params_mixer(struct snd_kcontrol *kcontrol,
&msm_bedais_pp_params[index].pp_params_config);
latency = msm_bedais_pp_params[index].latency =
ucontrol->value.integer.value[1];
- if ((msm_bedais[be_idx].active) &&
- (msm_bedais[be_idx].compr_passthr_mode !=
- LEGACY_PCM))
+ if ((msm_bedais[be_idx].active) && compr_passthr_mode)
adm_send_compressed_device_latency(port_id,
idx, latency);
break;
@@ -12154,8 +12833,8 @@ static int msm_routing_probe(struct snd_soc_platform *platform)
snd_soc_dapm_new_widgets(platform->component.dapm.card);
- snd_soc_add_platform_controls(platform, lsm_function,
- ARRAY_SIZE(lsm_function));
+ snd_soc_add_platform_controls(platform, lsm_controls,
+ ARRAY_SIZE(lsm_controls));
snd_soc_add_platform_controls(platform, aanc_slim_0_rx_mux,
ARRAY_SIZE(aanc_slim_0_rx_mux));
@@ -12166,10 +12845,16 @@ static int msm_routing_probe(struct snd_soc_platform *platform)
snd_soc_add_platform_controls(platform, app_type_cfg_controls,
ARRAY_SIZE(app_type_cfg_controls));
+ snd_soc_add_platform_controls(platform, lsm_app_type_cfg_controls,
+ ARRAY_SIZE(lsm_app_type_cfg_controls));
+
snd_soc_add_platform_controls(platform,
stereo_to_custom_stereo_controls,
ARRAY_SIZE(stereo_to_custom_stereo_controls));
+ snd_soc_add_platform_controls(platform, ec_ref_param_controls,
+ ARRAY_SIZE(ec_ref_param_controls));
+
msm_qti_pp_add_controls(platform);
msm_dts_srs_tm_add_controls(platform);
@@ -12252,7 +12937,7 @@ int msm_routing_check_backend_enabled(int fedai_id)
return 0;
}
for (i = 0; i < MSM_BACKEND_DAI_MAX; i++) {
- if (test_bit(fedai_id, &msm_bedais[i].fe_sessions))
+ if (test_bit(fedai_id, &msm_bedais[i].fe_sessions[0]))
return msm_bedais[i].active;
}
return 0;
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h
index 0bb069154512..a066e9afc9e5 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -392,6 +392,7 @@ enum {
#define ADM_PP_PARAM_LATENCY_ID 1
#define ADM_PP_PARAM_LATENCY_BIT 2
#define BE_DAI_PORT_SESSIONS_IDX_MAX 4
+#define BE_DAI_FE_SESSIONS_IDX_MAX 2
struct msm_pcm_routing_evt {
void (*event_func)(enum msm_pcm_routing_event, void *);
@@ -401,7 +402,9 @@ struct msm_pcm_routing_evt {
struct msm_pcm_routing_bdai_data {
u16 port_id; /* AFE port ID */
u8 active; /* track if this backend is enabled */
- unsigned long fe_sessions; /* Front-end sessions */
+
+ /* Front-end sessions */
+ unsigned long fe_sessions[BE_DAI_FE_SESSIONS_IDX_MAX];
/*
* Track Tx BE ports -> Rx BE ports.
* port_sessions[0] used to track BE 0 to BE 63.
@@ -415,7 +418,7 @@ struct msm_pcm_routing_bdai_data {
unsigned int channel;
unsigned int format;
unsigned int adm_override_ch;
- u32 compr_passthr_mode;
+ u32 passthr_mode;
char *name;
};
diff --git a/sound/soc/msm/qdsp6v2/q6adm.c b/sound/soc/msm/qdsp6v2/q6adm.c
index 32826d38f65a..16ae05034662 100644
--- a/sound/soc/msm/qdsp6v2/q6adm.c
+++ b/sound/soc/msm/qdsp6v2/q6adm.c
@@ -2574,7 +2574,6 @@ int adm_open(int port_id, int path, int rate, int channel_mode, int topology,
return copp_idx;
}
-
void adm_copp_mfc_cfg(int port_id, int copp_idx, int dst_sample_rate)
{
struct audproc_mfc_output_media_fmt mfc_cfg;
@@ -2677,8 +2676,43 @@ fail_cmd:
return;
}
+static void route_set_opcode_matrix_id(
+ struct adm_cmd_matrix_map_routings_v5 **route_addr,
+ int path, uint32_t passthr_mode)
+{
+ struct adm_cmd_matrix_map_routings_v5 *route = *route_addr;
-int adm_matrix_map(int path, struct route_payload payload_map, int perf_mode)
+ switch (path) {
+ case ADM_PATH_PLAYBACK:
+ route->hdr.opcode = ADM_CMD_MATRIX_MAP_ROUTINGS_V5;
+ route->matrix_id = ADM_MATRIX_ID_AUDIO_RX;
+ break;
+ case ADM_PATH_LIVE_REC:
+ if (passthr_mode == LISTEN) {
+ route->hdr.opcode =
+ ADM_CMD_STREAM_DEVICE_MAP_ROUTINGS_V5;
+ route->matrix_id = ADM_MATRIX_ID_LISTEN_TX;
+ break;
+ }
+ /* fall through to set matrix id for non-listen case */
+ case ADM_PATH_NONLIVE_REC:
+ route->hdr.opcode = ADM_CMD_MATRIX_MAP_ROUTINGS_V5;
+ route->matrix_id = ADM_MATRIX_ID_AUDIO_TX;
+ break;
+ case ADM_PATH_COMPRESSED_RX:
+ route->hdr.opcode = ADM_CMD_STREAM_DEVICE_MAP_ROUTINGS_V5;
+ route->matrix_id = ADM_MATRIX_ID_COMPRESSED_AUDIO_RX;
+ break;
+ default:
+ pr_err("%s: Wrong path set[%d]\n", __func__, path);
+ break;
+ }
+ pr_debug("%s: opcode 0x%x, matrix id %d\n",
+ __func__, route->hdr.opcode, route->matrix_id);
+}
+
+int adm_matrix_map(int path, struct route_payload payload_map, int perf_mode,
+ uint32_t passthr_mode)
{
struct adm_cmd_matrix_map_routings_v5 *route;
struct adm_session_map_node_v5 *node;
@@ -2711,32 +2745,9 @@ int adm_matrix_map(int path, struct route_payload payload_map, int perf_mode)
route->hdr.dest_domain = APR_DOMAIN_ADSP;
route->hdr.dest_port = 0; /* Ignored */;
route->hdr.token = 0;
- if (path == ADM_PATH_COMPRESSED_RX) {
- pr_debug("%s: ADM_CMD_STREAM_DEVICE_MAP_ROUTINGS_V5 0x%x\n",
- __func__, ADM_CMD_STREAM_DEVICE_MAP_ROUTINGS_V5);
- route->hdr.opcode = ADM_CMD_STREAM_DEVICE_MAP_ROUTINGS_V5;
- } else {
- pr_debug("%s: DM_CMD_MATRIX_MAP_ROUTINGS_V5 0x%x\n",
- __func__, ADM_CMD_MATRIX_MAP_ROUTINGS_V5);
- route->hdr.opcode = ADM_CMD_MATRIX_MAP_ROUTINGS_V5;
- }
route->num_sessions = 1;
+ route_set_opcode_matrix_id(&route, path, passthr_mode);
- switch (path) {
- case ADM_PATH_PLAYBACK:
- route->matrix_id = ADM_MATRIX_ID_AUDIO_RX;
- break;
- case ADM_PATH_LIVE_REC:
- case ADM_PATH_NONLIVE_REC:
- route->matrix_id = ADM_MATRIX_ID_AUDIO_TX;
- break;
- case ADM_PATH_COMPRESSED_RX:
- route->matrix_id = ADM_MATRIX_ID_COMPRESSED_AUDIO_RX;
- break;
- default:
- pr_err("%s: Wrong path set[%d]\n", __func__, path);
- break;
- }
payload = ((u8 *)matrix_map +
sizeof(struct adm_cmd_matrix_map_routings_v5));
node = (struct adm_session_map_node_v5 *)payload;
diff --git a/sound/soc/msm/qdsp6v2/q6asm.c b/sound/soc/msm/qdsp6v2/q6asm.c
index 9353b2132e15..74fbe984e6e9 100644
--- a/sound/soc/msm/qdsp6v2/q6asm.c
+++ b/sound/soc/msm/qdsp6v2/q6asm.c
@@ -7361,7 +7361,7 @@ int q6asm_async_write(struct audio_client *ac,
}
q6asm_stream_add_hdr_async(
- ac, &write.hdr, sizeof(write), FALSE, ac->stream_id);
+ ac, &write.hdr, sizeof(write), TRUE, ac->stream_id);
port = &ac->port[IN];
ab = &port->buf[port->dsp_buf];
@@ -7522,7 +7522,7 @@ int q6asm_write(struct audio_client *ac, uint32_t len, uint32_t msw_ts,
0, /* Stream ID is NA */
port->dsp_buf,
0, /* Direction flag is NA */
- WAIT_CMD);
+ NO_WAIT_CMD);
write.hdr.opcode = ASM_DATA_CMD_WRITE_V2;
write.buf_addr_lsw = lower_32_bits(ab->phys);
write.buf_addr_msw = msm_audio_populate_upper_32_bits(ab->phys);
@@ -7601,7 +7601,7 @@ int q6asm_write_nolock(struct audio_client *ac, uint32_t len, uint32_t msw_ts,
0, /* Stream ID is NA */
port->dsp_buf,
0, /* Direction flag is NA */
- WAIT_CMD);
+ NO_WAIT_CMD);
write.hdr.opcode = ASM_DATA_CMD_WRITE_V2;
write.buf_addr_lsw = lower_32_bits(ab->phys);
@@ -8049,7 +8049,7 @@ static int __q6asm_cmd_nowait(struct audio_client *ac, int cmd,
stream_id,
0, /* Buffer index is NA */
0, /* Direction flag is NA */
- WAIT_CMD);
+ NO_WAIT_CMD);
pr_debug("%s: token = 0x%x, stream_id %d, session 0x%x\n",
__func__, hdr.token, stream_id, ac->session);
@@ -8113,7 +8113,7 @@ int __q6asm_send_meta_data(struct audio_client *ac, uint32_t stream_id,
return -EINVAL;
}
pr_debug("%s: session[%d]\n", __func__, ac->session);
- q6asm_stream_add_hdr_async(ac, &silence.hdr, sizeof(silence), FALSE,
+ q6asm_stream_add_hdr_async(ac, &silence.hdr, sizeof(silence), TRUE,
stream_id);
/*
@@ -8127,7 +8127,7 @@ int __q6asm_send_meta_data(struct audio_client *ac, uint32_t stream_id,
stream_id,
0, /* Buffer index is NA */
0, /* Direction flag is NA */
- WAIT_CMD);
+ NO_WAIT_CMD);
pr_debug("%s: token = 0x%x, stream_id %d, session 0x%x\n",
__func__, silence.hdr.token, stream_id, ac->session);
@@ -8343,14 +8343,17 @@ int q6asm_get_apr_service_id(int session_id)
int q6asm_get_asm_topology(int session_id)
{
- int topology;
+ int topology = -EINVAL;
if (session_id <= 0 || session_id > ASM_ACTIVE_STREAMS_ALLOWED) {
pr_err("%s: invalid session_id = %d\n", __func__, session_id);
- topology = -EINVAL;
goto done;
}
-
+ if (session[session_id] == NULL) {
+ pr_err("%s: session not created for session id = %d\n",
+ __func__, session_id);
+ goto done;
+ }
topology = session[session_id]->topology;
done:
return topology;
@@ -8358,14 +8361,17 @@ done:
int q6asm_get_asm_app_type(int session_id)
{
- int app_type;
+ int app_type = -EINVAL;
if (session_id <= 0 || session_id > ASM_ACTIVE_STREAMS_ALLOWED) {
pr_err("%s: invalid session_id = %d\n", __func__, session_id);
- app_type = -EINVAL;
goto done;
}
-
+ if (session[session_id] == NULL) {
+ pr_err("%s: session not created for session id = %d\n",
+ __func__, session_id);
+ goto done;
+ }
app_type = session[session_id]->app_type;
done:
return app_type;
diff --git a/sound/soc/msm/qdsp6v2/q6lsm.c b/sound/soc/msm/qdsp6v2/q6lsm.c
index 2bf0c490e834..525ec1c30f48 100644
--- a/sound/soc/msm/qdsp6v2/q6lsm.c
+++ b/sound/soc/msm/qdsp6v2/q6lsm.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2016, Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2017, Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -38,6 +38,8 @@
#define LSM_ALIGN_BOUNDARY 512
#define LSM_SAMPLE_RATE 16000
#define QLSM_PARAM_ID_MINOR_VERSION 1
+#define QLSM_PARAM_ID_MINOR_VERSION_2 2
+
static int lsm_afe_port;
enum {
@@ -707,29 +709,28 @@ static int q6lsm_send_confidence_levels(
return rc;
}
-static int q6lsm_send_params(struct lsm_client *client,
+static int q6lsm_send_param_opmode(struct lsm_client *client,
struct lsm_module_param_ids *opmode_ids,
- struct lsm_module_param_ids *connectport_ids,
u32 set_param_opcode)
{
int rc;
- struct lsm_cmd_set_opmode_connectport opmode_connectport;
+ struct lsm_cmd_set_params_opmode opmode_params;
struct apr_hdr *msg_hdr;
- struct lsm_param_connect_to_port *connect_to_port;
+
struct lsm_param_op_mode *op_mode;
u32 data_payload_size, param_size;
- msg_hdr = &opmode_connectport.msg_hdr;
+ msg_hdr = &opmode_params.msg_hdr;
q6lsm_add_hdr(client, msg_hdr,
- sizeof(opmode_connectport), true);
+ sizeof(opmode_params), true);
msg_hdr->opcode = set_param_opcode;
- data_payload_size = sizeof(opmode_connectport) -
+ data_payload_size = sizeof(opmode_params) -
sizeof(*msg_hdr) -
- sizeof(opmode_connectport.params_hdr);
- q6lsm_set_param_hdr_info(&opmode_connectport.params_hdr,
+ sizeof(opmode_params.params_hdr);
+ q6lsm_set_param_hdr_info(&opmode_params.params_hdr,
data_payload_size, 0, 0, 0);
- connect_to_port = &opmode_connectport.connect_to_port;
- op_mode = &opmode_connectport.op_mode;
+ op_mode = &opmode_params.op_mode;
+
param_size = sizeof(struct lsm_param_op_mode) -
sizeof(op_mode->common);
@@ -741,10 +742,61 @@ static int q6lsm_send_params(struct lsm_client *client,
op_mode->reserved = 0;
pr_debug("%s: mode = 0x%x", __func__, op_mode->mode);
+ rc = q6lsm_apr_send_pkt(client, client->apr,
+ &opmode_params, true, NULL);
+ if (rc)
+ pr_err("%s: Failed set_params opcode 0x%x, rc %d\n",
+ __func__, msg_hdr->opcode, rc);
+
+ pr_debug("%s: leave %d\n", __func__, rc);
+ return rc;
+}
+
+void set_lsm_port(int lsm_port)
+{
+ lsm_afe_port = lsm_port;
+}
+
+int get_lsm_port(void)
+{
+ return lsm_afe_port;
+}
+
+int q6lsm_set_port_connected(struct lsm_client *client)
+{
+ int rc;
+ struct lsm_cmd_set_connectport connectport;
+ struct lsm_module_param_ids connectport_ids;
+ struct apr_hdr *msg_hdr;
+ struct lsm_param_connect_to_port *connect_to_port;
+ u32 data_payload_size, param_size, set_param_opcode;
+
+ if (client->use_topology) {
+ set_param_opcode = LSM_SESSION_CMD_SET_PARAMS_V2;
+ connectport_ids.module_id = LSM_MODULE_ID_FRAMEWORK;
+ connectport_ids.param_id = LSM_PARAM_ID_CONNECT_TO_PORT;
+ } else {
+ set_param_opcode = LSM_SESSION_CMD_SET_PARAMS;
+ connectport_ids.module_id = LSM_MODULE_ID_VOICE_WAKEUP;
+ connectport_ids.param_id = LSM_PARAM_ID_CONNECT_TO_PORT;
+ }
+ client->connect_to_port = get_lsm_port();
+
+ msg_hdr = &connectport.msg_hdr;
+ q6lsm_add_hdr(client, msg_hdr,
+ sizeof(connectport), true);
+ msg_hdr->opcode = set_param_opcode;
+ data_payload_size = sizeof(connectport) -
+ sizeof(*msg_hdr) -
+ sizeof(connectport.params_hdr);
+ q6lsm_set_param_hdr_info(&connectport.params_hdr,
+ data_payload_size, 0, 0, 0);
+ connect_to_port = &connectport.connect_to_port;
+
param_size = (sizeof(struct lsm_param_connect_to_port) -
sizeof(connect_to_port->common));
q6lsm_set_param_common(&connect_to_port->common,
- connectport_ids, param_size,
+ &connectport_ids, param_size,
set_param_opcode);
connect_to_port->minor_version = QLSM_PARAM_ID_MINOR_VERSION;
connect_to_port->port_id = client->connect_to_port;
@@ -752,23 +804,191 @@ static int q6lsm_send_params(struct lsm_client *client,
pr_debug("%s: port= %d", __func__, connect_to_port->port_id);
rc = q6lsm_apr_send_pkt(client, client->apr,
- &opmode_connectport, true, NULL);
+ &connectport, true, NULL);
+ if (rc)
+ pr_err("%s: Failed set_params opcode 0x%x, rc %d\n",
+ __func__, msg_hdr->opcode, rc);
+
+ return rc;
+}
+static int q6lsm_send_param_polling_enable(struct lsm_client *client,
+ bool poll_en,
+ struct lsm_module_param_ids *poll_enable_ids,
+ u32 set_param_opcode)
+{
+ int rc = 0;
+ struct lsm_cmd_poll_enable cmd;
+ struct apr_hdr *msg_hdr;
+ struct lsm_param_poll_enable *poll_enable;
+ u32 data_payload_size, param_size;
+
+ msg_hdr = &cmd.msg_hdr;
+ q6lsm_add_hdr(client, msg_hdr,
+ sizeof(struct lsm_cmd_poll_enable), true);
+ msg_hdr->opcode = set_param_opcode;
+ data_payload_size = sizeof(struct lsm_cmd_poll_enable) -
+ sizeof(struct apr_hdr) -
+ sizeof(struct lsm_set_params_hdr);
+ q6lsm_set_param_hdr_info(&cmd.params_hdr,
+ data_payload_size, 0, 0, 0);
+ poll_enable = &cmd.poll_enable;
+
+ param_size = (sizeof(struct lsm_param_poll_enable) -
+ sizeof(poll_enable->common));
+ q6lsm_set_param_common(&poll_enable->common,
+ poll_enable_ids, param_size,
+ set_param_opcode);
+ poll_enable->minor_version = QLSM_PARAM_ID_MINOR_VERSION;
+ poll_enable->polling_enable = (poll_en) ? 1 : 0;
+ pr_debug("%s: poll enable= %d", __func__, poll_enable->polling_enable);
+
+ rc = q6lsm_apr_send_pkt(client, client->apr,
+ &cmd, true, NULL);
if (rc)
pr_err("%s: Failed set_params opcode 0x%x, rc %d\n",
__func__, msg_hdr->opcode, rc);
- pr_debug("%s: leave %d\n", __func__, rc);
return rc;
}
-void set_lsm_port(int lsm_port)
+int q6lsm_set_fwk_mode_cfg(struct lsm_client *client,
+ uint32_t event_mode)
{
- lsm_afe_port = lsm_port;
+ int rc = 0;
+ struct lsm_cmd_set_fwk_mode_cfg cmd;
+ struct lsm_module_param_ids fwk_mode_cfg_ids;
+ struct apr_hdr *msg_hdr;
+ struct lsm_param_fwk_mode_cfg *fwk_mode_cfg;
+ u32 data_payload_size, param_size, set_param_opcode;
+
+ if (client->use_topology) {
+ set_param_opcode = LSM_SESSION_CMD_SET_PARAMS_V2;
+ fwk_mode_cfg_ids.module_id = LSM_MODULE_ID_FRAMEWORK;
+ fwk_mode_cfg_ids.param_id = LSM_PARAM_ID_FWK_MODE_CONFIG;
+ } else {
+ pr_debug("%s: Ignore sending event mode\n", __func__);
+ return rc;
+ }
+
+ msg_hdr = &cmd.msg_hdr;
+ q6lsm_add_hdr(client, msg_hdr,
+ sizeof(struct lsm_cmd_set_fwk_mode_cfg), true);
+ msg_hdr->opcode = set_param_opcode;
+ data_payload_size = sizeof(struct lsm_cmd_set_fwk_mode_cfg) -
+ sizeof(struct apr_hdr) -
+ sizeof(struct lsm_set_params_hdr);
+ q6lsm_set_param_hdr_info(&cmd.params_hdr,
+ data_payload_size, 0, 0, 0);
+ fwk_mode_cfg = &cmd.fwk_mode_cfg;
+
+ param_size = (sizeof(struct lsm_param_fwk_mode_cfg) -
+ sizeof(fwk_mode_cfg->common));
+ q6lsm_set_param_common(&fwk_mode_cfg->common,
+ &fwk_mode_cfg_ids, param_size,
+ set_param_opcode);
+
+ fwk_mode_cfg->minor_version = QLSM_PARAM_ID_MINOR_VERSION;
+ fwk_mode_cfg->mode = event_mode;
+ pr_debug("%s: mode = %d\n", __func__, fwk_mode_cfg->mode);
+
+ rc = q6lsm_apr_send_pkt(client, client->apr,
+ &cmd, true, NULL);
+ if (rc)
+ pr_err("%s: Failed set_params opcode 0x%x, rc %d\n",
+ __func__, msg_hdr->opcode, rc);
+ return rc;
}
-int get_lsm_port()
+static int q6lsm_arrange_mch_map(struct lsm_param_media_fmt *media_fmt,
+ int channel_count)
{
- return lsm_afe_port;
+ int rc = 0;
+
+ memset(media_fmt->channel_mapping, 0, LSM_MAX_NUM_CHANNELS);
+
+ switch (channel_count) {
+ case 1:
+ media_fmt->channel_mapping[0] = PCM_CHANNEL_FC;
+ break;
+ case 2:
+ media_fmt->channel_mapping[0] = PCM_CHANNEL_FL;
+ media_fmt->channel_mapping[1] = PCM_CHANNEL_FR;
+ break;
+ case 3:
+ media_fmt->channel_mapping[0] = PCM_CHANNEL_FL;
+ media_fmt->channel_mapping[1] = PCM_CHANNEL_FR;
+ media_fmt->channel_mapping[2] = PCM_CHANNEL_FC;
+ break;
+ case 4:
+ media_fmt->channel_mapping[0] = PCM_CHANNEL_FL;
+ media_fmt->channel_mapping[1] = PCM_CHANNEL_FR;
+ media_fmt->channel_mapping[2] = PCM_CHANNEL_LS;
+ media_fmt->channel_mapping[3] = PCM_CHANNEL_RS;
+ break;
+ default:
+ pr_err("%s: invalid num_chan %d\n", __func__, channel_count);
+ rc = -EINVAL;
+ break;
+ }
+ return rc;
+}
+
+int q6lsm_set_media_fmt_params(struct lsm_client *client)
+{
+ int rc = 0;
+ struct lsm_cmd_set_media_fmt cmd;
+ struct lsm_module_param_ids media_fmt_ids;
+ struct apr_hdr *msg_hdr;
+ struct lsm_param_media_fmt *media_fmt;
+ u32 data_payload_size, param_size, set_param_opcode;
+ struct lsm_hw_params param = client->hw_params;
+
+ if (client->use_topology) {
+ set_param_opcode = LSM_SESSION_CMD_SET_PARAMS_V2;
+ media_fmt_ids.module_id = LSM_MODULE_ID_FRAMEWORK;
+ media_fmt_ids.param_id = LSM_PARAM_ID_MEDIA_FMT;
+ } else {
+ pr_debug("%s: Ignore sending media format\n", __func__);
+ goto err_ret;
+ }
+
+ msg_hdr = &cmd.msg_hdr;
+ q6lsm_add_hdr(client, msg_hdr,
+ sizeof(struct lsm_cmd_set_media_fmt), true);
+ msg_hdr->opcode = set_param_opcode;
+ data_payload_size = sizeof(struct lsm_cmd_set_media_fmt) -
+ sizeof(struct apr_hdr) -
+ sizeof(struct lsm_set_params_hdr);
+ q6lsm_set_param_hdr_info(&cmd.params_hdr,
+ data_payload_size, 0, 0, 0);
+ media_fmt = &cmd.media_fmt;
+
+ param_size = (sizeof(struct lsm_param_media_fmt) -
+ sizeof(media_fmt->common));
+ q6lsm_set_param_common(&media_fmt->common,
+ &media_fmt_ids, param_size,
+ set_param_opcode);
+
+ media_fmt->minor_version = QLSM_PARAM_ID_MINOR_VERSION_2;
+ media_fmt->sample_rate = param.sample_rate;
+ media_fmt->num_channels = param.num_chs;
+ media_fmt->bit_width = param.sample_size;
+
+ rc = q6lsm_arrange_mch_map(media_fmt, media_fmt->num_channels);
+ if (rc)
+ goto err_ret;
+
+ pr_debug("%s: sample rate= %d, channels %d bit width %d\n",
+ __func__, media_fmt->sample_rate, media_fmt->num_channels,
+ media_fmt->bit_width);
+
+ rc = q6lsm_apr_send_pkt(client, client->apr,
+ &cmd, true, NULL);
+ if (rc)
+ pr_err("%s: Failed set_params opcode 0x%x, rc %d\n",
+ __func__, msg_hdr->opcode, rc);
+err_ret:
+ return rc;
}
int q6lsm_set_data(struct lsm_client *client,
@@ -776,7 +996,7 @@ int q6lsm_set_data(struct lsm_client *client,
bool detectfailure)
{
int rc = 0;
- struct lsm_module_param_ids opmode_ids, connectport_ids;
+ struct lsm_module_param_ids opmode_ids;
struct lsm_module_param_ids conf_levels_ids;
if (!client->confidence_levels) {
@@ -800,16 +1020,12 @@ int q6lsm_set_data(struct lsm_client *client,
goto err_ret;
}
client->mode |= detectfailure << 2;
- client->connect_to_port = get_lsm_port();
opmode_ids.module_id = LSM_MODULE_ID_VOICE_WAKEUP;
opmode_ids.param_id = LSM_PARAM_ID_OPERATION_MODE;
- connectport_ids.module_id = LSM_MODULE_ID_VOICE_WAKEUP;
- connectport_ids.param_id = LSM_PARAM_ID_CONNECT_TO_PORT;
-
- rc = q6lsm_send_params(client, &opmode_ids, &connectport_ids,
- LSM_SESSION_CMD_SET_PARAMS);
+ rc = q6lsm_send_param_opmode(client, &opmode_ids,
+ LSM_SESSION_CMD_SET_PARAMS);
if (rc) {
pr_err("%s: Failed to set lsm config params %d\n",
__func__, rc);
@@ -1390,7 +1606,7 @@ static int q6lsm_send_param_gain(
int q6lsm_set_one_param(struct lsm_client *client,
struct lsm_params_info *p_info, void *data,
- enum LSM_PARAM_TYPE param_type)
+ uint32_t param_type)
{
int rc = 0, pkt_sz;
struct lsm_module_param_ids ids;
@@ -1409,7 +1625,6 @@ int q6lsm_set_one_param(struct lsm_client *client,
case LSM_OPERATION_MODE: {
struct snd_lsm_detect_mode *det_mode = data;
struct lsm_module_param_ids opmode_ids;
- struct lsm_module_param_ids connectport_ids;
if (det_mode->mode == LSM_MODE_KEYWORD_ONLY_DETECTION) {
client->mode = 0x01;
@@ -1422,16 +1637,12 @@ int q6lsm_set_one_param(struct lsm_client *client,
}
client->mode |= det_mode->detect_failure << 2;
- client->connect_to_port = get_lsm_port();
opmode_ids.module_id = p_info->module_id;
opmode_ids.param_id = p_info->param_id;
- connectport_ids.module_id = LSM_MODULE_ID_FRAMEWORK;
- connectport_ids.param_id = LSM_PARAM_ID_CONNECT_TO_PORT;
-
- rc = q6lsm_send_params(client, &opmode_ids, &connectport_ids,
- LSM_SESSION_CMD_SET_PARAMS_V2);
+ rc = q6lsm_send_param_opmode(client, &opmode_ids,
+ LSM_SESSION_CMD_SET_PARAMS_V2);
if (rc)
pr_err("%s: OPERATION_MODE failed, rc %d\n",
__func__, rc);
@@ -1458,6 +1669,20 @@ int q6lsm_set_one_param(struct lsm_client *client,
pr_err("%s: CONFIDENCE_LEVELS cmd failed, rc %d\n",
__func__, rc);
break;
+ case LSM_POLLING_ENABLE: {
+ struct snd_lsm_poll_enable *lsm_poll_enable =
+ (struct snd_lsm_poll_enable *) data;
+ ids.module_id = p_info->module_id;
+ ids.param_id = p_info->param_id;
+ rc = q6lsm_send_param_polling_enable(client,
+ lsm_poll_enable->poll_en, &ids,
+ LSM_SESSION_CMD_SET_PARAMS_V2);
+ if (rc)
+ pr_err("%s: POLLING ENABLE cmd failed, rc %d\n",
+ __func__, rc);
+ break;
+ }
+
case LSM_REG_SND_MODEL: {
struct lsm_cmd_set_params model_param;
u32 payload_size;
diff --git a/sound/soc/msm/qdsp6v2/q6voice.c b/sound/soc/msm/qdsp6v2/q6voice.c
index e7619c0ca0dd..d352133b7c32 100644
--- a/sound/soc/msm/qdsp6v2/q6voice.c
+++ b/sound/soc/msm/qdsp6v2/q6voice.c
@@ -3990,6 +3990,10 @@ static int voice_send_cvp_media_fmt_info_cmd(struct voice_data *v)
{
int ret;
+ ret = voice_send_cvp_device_channels_cmd(v);
+ if (ret < 0)
+ goto done;
+
if (voice_get_cvd_int_version(common.cvd_version) >=
CVD_INT_VERSION_2_3) {
ret = voice_send_cvp_media_format_cmd(v, RX_PATH);
@@ -4002,8 +4006,6 @@ static int voice_send_cvp_media_fmt_info_cmd(struct voice_data *v)
if (common.ec_ref_ext)
ret = voice_send_cvp_media_format_cmd(v, EC_REF_PATH);
- } else {
- ret = voice_send_cvp_device_channels_cmd(v);
}
done:
diff --git a/sound/soc/msm/sdm660-common.c b/sound/soc/msm/sdm660-common.c
index 5dc5bf9d1b3c..3a8fdf8f1256 100644
--- a/sound/soc/msm/sdm660-common.c
+++ b/sound/soc/msm/sdm660-common.c
@@ -160,11 +160,6 @@ enum {
PCM_I2S_SEL_MAX,
};
-struct mi2s_aux_pcm_common_conf {
- struct mutex lock;
- void *pcm_i2s_sel_vt_addr;
-};
-
struct mi2s_conf {
struct mutex lock;
u32 ref_cnt;
@@ -172,11 +167,6 @@ struct mi2s_conf {
u32 msm_is_ext_mclk;
};
-struct auxpcm_conf {
- struct mutex lock;
- u32 ref_cnt;
-};
-
static u32 mi2s_ebit_clk[MI2S_MAX] = {
Q6AFE_LPASS_CLK_ID_PRI_MI2S_EBIT,
Q6AFE_LPASS_CLK_ID_SEC_MI2S_EBIT,
@@ -383,11 +373,7 @@ static struct afe_clk_set mi2s_mclk[MI2S_MAX] = {
}
};
-
-
-static struct mi2s_aux_pcm_common_conf mi2s_auxpcm_conf[PCM_I2S_SEL_MAX];
static struct mi2s_conf mi2s_intf_conf[MI2S_MAX];
-static struct auxpcm_conf auxpcm_intf_conf[AUX_PCM_MAX];
static int proxy_rx_ch_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
@@ -1945,46 +1931,14 @@ EXPORT_SYMBOL(msm_common_be_hw_params_fixup);
*/
int msm_aux_pcm_snd_startup(struct snd_pcm_substream *substream)
{
- int ret = 0;
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- int index = cpu_dai->id - 1;
- return ret = 0;
dev_dbg(rtd->card->dev,
"%s: substream = %s stream = %d, dai name %s, dai ID %d\n",
__func__, substream->name, substream->stream,
- cpu_dai->name, cpu_dai->id);
-
- if (index < PRIM_AUX_PCM || index > QUAT_AUX_PCM) {
- ret = -EINVAL;
- dev_err(rtd->card->dev,
- "%s: CPU DAI id (%d) out of range\n",
- __func__, cpu_dai->id);
- goto done;
- }
-
- mutex_lock(&auxpcm_intf_conf[index].lock);
- if (++auxpcm_intf_conf[index].ref_cnt == 1) {
- if (mi2s_auxpcm_conf[index].pcm_i2s_sel_vt_addr != NULL) {
- mutex_lock(&mi2s_auxpcm_conf[index].lock);
- iowrite32(1,
- mi2s_auxpcm_conf[index].pcm_i2s_sel_vt_addr);
- mutex_unlock(&mi2s_auxpcm_conf[index].lock);
- } else {
- dev_err(rtd->card->dev,
- "%s lpaif_tert_muxsel_virt_addr is NULL\n",
- __func__);
- ret = -EINVAL;
- }
- }
- if (IS_ERR_VALUE(ret))
- auxpcm_intf_conf[index].ref_cnt--;
-
- mutex_unlock(&auxpcm_intf_conf[index].lock);
+ rtd->cpu_dai->name, rtd->cpu_dai->id);
-done:
- return ret;
+ return 0;
}
EXPORT_SYMBOL(msm_aux_pcm_snd_startup);
@@ -1996,36 +1950,12 @@ EXPORT_SYMBOL(msm_aux_pcm_snd_startup);
void msm_aux_pcm_snd_shutdown(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- int index = rtd->cpu_dai->id - 1;
dev_dbg(rtd->card->dev,
"%s: substream = %s stream = %d, dai name %s, dai ID %d\n",
__func__,
substream->name, substream->stream,
rtd->cpu_dai->name, rtd->cpu_dai->id);
-
- if (index < PRIM_AUX_PCM || index > QUAT_AUX_PCM) {
- dev_err(rtd->card->dev,
- "%s: CPU DAI id (%d) out of range\n",
- __func__, rtd->cpu_dai->id);
- return;
- }
-
- mutex_lock(&auxpcm_intf_conf[index].lock);
- if (--auxpcm_intf_conf[index].ref_cnt == 0) {
- if (mi2s_auxpcm_conf[index].pcm_i2s_sel_vt_addr != NULL) {
- mutex_lock(&mi2s_auxpcm_conf[index].lock);
- iowrite32(0,
- mi2s_auxpcm_conf[index].pcm_i2s_sel_vt_addr);
- mutex_unlock(&mi2s_auxpcm_conf[index].lock);
- } else {
- dev_err(rtd->card->dev,
- "%s lpaif_tert_muxsel_virt_addr is NULL\n",
- __func__);
- auxpcm_intf_conf[index].ref_cnt++;
- }
- }
- mutex_unlock(&auxpcm_intf_conf[index].lock);
}
EXPORT_SYMBOL(msm_aux_pcm_snd_shutdown);
@@ -2186,18 +2116,6 @@ int msm_mi2s_snd_startup(struct snd_pcm_substream *substream)
__func__, ret);
goto clean_up;
}
- if (mi2s_auxpcm_conf[index].pcm_i2s_sel_vt_addr != NULL) {
- mutex_lock(&mi2s_auxpcm_conf[index].lock);
- iowrite32(0,
- mi2s_auxpcm_conf[index].pcm_i2s_sel_vt_addr);
- mutex_unlock(&mi2s_auxpcm_conf[index].lock);
- } else {
- dev_err(rtd->card->dev,
- "%s lpaif_muxsel_virt_addr is NULL for dai %d\n",
- __func__, index);
- ret = -EINVAL;
- goto clk_off;
- }
ret = snd_soc_dai_set_fmt(cpu_dai, fmt);
if (IS_ERR_VALUE(ret)) {
dev_err(rtd->card->dev,
@@ -2683,42 +2601,16 @@ static void msm_free_auxdev_mem(struct platform_device *pdev)
static void i2s_auxpcm_init(struct platform_device *pdev)
{
- struct resource *muxsel;
int count;
u32 mi2s_master_slave[MI2S_MAX];
u32 mi2s_ext_mclk[MI2S_MAX];
int ret;
- char *str[PCM_I2S_SEL_MAX] = {
- "lpaif_pri_mode_muxsel",
- "lpaif_sec_mode_muxsel",
- "lpaif_tert_mode_muxsel",
- "lpaif_quat_mode_muxsel"
- };
for (count = 0; count < MI2S_MAX; count++) {
mutex_init(&mi2s_intf_conf[count].lock);
mi2s_intf_conf[count].ref_cnt = 0;
}
- for (count = 0; count < AUX_PCM_MAX; count++) {
- mutex_init(&auxpcm_intf_conf[count].lock);
- auxpcm_intf_conf[count].ref_cnt = 0;
- }
-
- for (count = 0; count < PCM_I2S_SEL_MAX; count++) {
- mutex_init(&mi2s_auxpcm_conf[count].lock);
- mi2s_auxpcm_conf[count].pcm_i2s_sel_vt_addr = NULL;
- }
-
- for (count = 0; count < PCM_I2S_SEL_MAX; count++) {
- muxsel = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- str[count]);
- if (muxsel) {
- mi2s_auxpcm_conf[count].pcm_i2s_sel_vt_addr
- = ioremap(muxsel->start, resource_size(muxsel));
- }
- }
-
ret = of_property_read_u32_array(pdev->dev.of_node,
"qcom,msm-mi2s-master",
mi2s_master_slave, MI2S_MAX);
@@ -2745,17 +2637,6 @@ static void i2s_auxpcm_init(struct platform_device *pdev)
}
}
-static void i2s_auxpcm_deinit(void)
-{
- int count;
-
- for (count = 0; count < PCM_I2S_SEL_MAX; count++)
- if (mi2s_auxpcm_conf[count].pcm_i2s_sel_vt_addr !=
- NULL)
- iounmap(
- mi2s_auxpcm_conf[count].pcm_i2s_sel_vt_addr);
-}
-
static const struct of_device_id sdm660_asoc_machine_of_match[] = {
{ .compatible = "qcom,sdm660-asoc-snd",
.data = "internal_codec"},
@@ -2821,8 +2702,6 @@ static int msm_asoc_machine_probe(struct platform_device *pdev)
"qcom,cdc-pdm-gpios", 0);
pdata->comp_gpio_p = of_parse_phandle(pdev->dev.of_node,
"qcom,cdc-comp-gpios", 0);
- pdata->sdw_gpio_p = of_parse_phandle(pdev->dev.of_node,
- "qcom,cdc-sdw-gpios", 0);
pdata->dmic_gpio_p = of_parse_phandle(pdev->dev.of_node,
"qcom,cdc-dmic-gpios", 0);
pdata->ext_spk_gpio_p = of_parse_phandle(pdev->dev.of_node,
@@ -2909,9 +2788,9 @@ err:
gpio_free(pdata->hph_en0_gpio);
pdata->hph_en0_gpio = 0;
}
- devm_kfree(&pdev->dev, pdata);
if (pdata->snd_card_val != INT_SND_CARD)
msm_ext_cdc_deinit(pdata);
+ devm_kfree(&pdev->dev, pdata);
return ret;
}
@@ -2929,7 +2808,6 @@ static int msm_asoc_machine_remove(struct platform_device *pdev)
gpio_free(pdata->us_euro_gpio);
gpio_free(pdata->hph_en1_gpio);
gpio_free(pdata->hph_en0_gpio);
- i2s_auxpcm_deinit();
snd_soc_unregister_card(card);
return 0;
}
diff --git a/sound/soc/msm/sdm660-common.h b/sound/soc/msm/sdm660-common.h
index 5742c8545b86..bca8cd788a39 100644
--- a/sound/soc/msm/sdm660-common.h
+++ b/sound/soc/msm/sdm660-common.h
@@ -95,7 +95,6 @@ struct msm_asoc_mach_data {
struct device_node *hph_en0_gpio_p; /* used by pinctrl API */
struct device_node *pdm_gpio_p; /* used by pinctrl API */
struct device_node *comp_gpio_p; /* used by pinctrl API */
- struct device_node *sdw_gpio_p; /* used by pinctrl API */
struct device_node *dmic_gpio_p; /* used by pinctrl API */
struct device_node *ext_spk_gpio_p; /* used by pinctrl API */
struct snd_soc_codec *codec;
diff --git a/sound/soc/msm/sdm660-internal.c b/sound/soc/msm/sdm660-internal.c
index 805ff2335b42..28728a186f92 100644
--- a/sound/soc/msm/sdm660-internal.c
+++ b/sound/soc/msm/sdm660-internal.c
@@ -539,35 +539,6 @@ static int enable_spk_ext_pa(struct snd_soc_codec *codec, int enable)
return 0;
}
-static int msm_config_sdw_gpio(bool enable, struct snd_soc_codec *codec)
-{
- struct snd_soc_card *card = codec->component.card;
- struct msm_asoc_mach_data *pdata = snd_soc_card_get_drvdata(card);
- int ret = 0;
-
- pr_debug("%s: %s SDW Clk/Data Gpios\n", __func__,
- enable ? "Enable" : "Disable");
-
- if (enable) {
- ret = msm_cdc_pinctrl_select_active_state(pdata->sdw_gpio_p);
- if (ret) {
- pr_err("%s: gpio set cannot be activated %s\n",
- __func__, "sdw_pin");
- goto done;
- }
- } else {
- ret = msm_cdc_pinctrl_select_sleep_state(pdata->sdw_gpio_p);
- if (ret) {
- pr_err("%s: gpio set cannot be de-activated %s\n",
- __func__, "sdw_pin");
- goto done;
- }
- }
-
-done:
- return ret;
-}
-
static int int_mi2s_get_idx_from_beid(int32_t be_id)
{
int idx = 0;
@@ -1386,7 +1357,6 @@ static int msm_sdw_audrx_init(struct snd_soc_pcm_runtime *rtd)
snd_soc_dapm_ignore_suspend(dapm, "VIINPUT_SDW");
snd_soc_dapm_sync(dapm);
- msm_sdw_gpio_cb(msm_config_sdw_gpio, codec);
card = rtd->card->snd_card;
if (!codec_root)
codec_root = snd_register_module_info(card->module, "codecs",