summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt3
-rw-r--r--Documentation/devicetree/bindings/fb/mdss-dsi.txt3
-rw-r--r--Documentation/devicetree/bindings/fb/mdss-mdp.txt21
-rw-r--r--Documentation/devicetree/bindings/media/video/msm-sde-rotator.txt151
-rw-r--r--arch/arm/boot/dts/qcom/dsi-adv7533-1080p.dtsi3
-rw-r--r--arch/arm/boot/dts/qcom/dsi-adv7533-720p.dtsi3
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi2
-rw-r--r--drivers/clk/msm/mdss/mdss-dsi-pll-8996-util.c18
-rw-r--r--drivers/clk/msm/mdss/mdss-dsi-pll-8996.c2
-rw-r--r--drivers/clk/msm/mdss/mdss-hdmi-pll-8996.c207
-rw-r--r--drivers/media/platform/msm/Kconfig1
-rw-r--r--drivers/media/platform/msm/Makefile1
-rw-r--r--drivers/media/platform/msm/sde/Kconfig8
-rw-r--r--drivers/media/platform/msm/sde/Makefile1
-rw-r--r--drivers/media/platform/msm/sde/rotator/Makefile23
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_base.c569
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_base.h162
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_core.c2587
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_core.h411
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c332
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_debug.h39
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c2700
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_dev.h199
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_formats.c511
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_formats.h146
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_hwio.h74
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_io_util.c429
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_io_util.h105
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_r1.c596
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_r1.h22
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_r1_ctl.c267
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_r1_debug.c45
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_r1_debug.h32
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_r1_hwio.h149
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_r1_internal.h171
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_r1_pipe.c405
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_r1_wb.c532
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c623
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.h48
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_sync.c277
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_sync.h115
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_trace.h302
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_util.c980
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_util.h196
-rw-r--r--drivers/misc/hdcp.c7
-rw-r--r--drivers/video/fbdev/msm/Makefile3
-rw-r--r--drivers/video/fbdev/msm/mdp3.c1288
-rw-r--r--drivers/video/fbdev/msm/mdp3.h81
-rw-r--r--drivers/video/fbdev/msm/mdp3_ctrl.c911
-rw-r--r--drivers/video/fbdev/msm/mdp3_ctrl.h7
-rw-r--r--drivers/video/fbdev/msm/mdp3_dma.c275
-rw-r--r--drivers/video/fbdev/msm/mdp3_dma.h14
-rw-r--r--drivers/video/fbdev/msm/mdp3_hwio.h16
-rw-r--r--drivers/video/fbdev/msm/mdp3_ppp.c527
-rw-r--r--drivers/video/fbdev/msm/mdp3_ppp.h10
-rw-r--r--drivers/video/fbdev/msm/mdp3_ppp_hwio.c212
-rw-r--r--drivers/video/fbdev/msm/mdss.h12
-rw-r--r--drivers/video/fbdev/msm/mdss_compat_utils.c6
-rw-r--r--drivers/video/fbdev/msm/mdss_dba_utils.c28
-rw-r--r--drivers/video/fbdev/msm/mdss_dba_utils.h4
-rw-r--r--drivers/video/fbdev/msm/mdss_debug.c5
-rw-r--r--drivers/video/fbdev/msm/mdss_dsi.c118
-rw-r--r--drivers/video/fbdev/msm/mdss_dsi.h29
-rw-r--r--drivers/video/fbdev/msm/mdss_dsi_clk.c4
-rw-r--r--drivers/video/fbdev/msm/mdss_dsi_host.c200
-rw-r--r--drivers/video/fbdev/msm/mdss_dsi_panel.c34
-rw-r--r--drivers/video/fbdev/msm/mdss_fb.c7
-rw-r--r--drivers/video/fbdev/msm/mdss_fb.h1
-rw-r--r--drivers/video/fbdev/msm/mdss_hdmi_audio.c525
-rw-r--r--drivers/video/fbdev/msm/mdss_hdmi_audio.h72
-rw-r--r--drivers/video/fbdev/msm/mdss_hdmi_edid.c187
-rw-r--r--drivers/video/fbdev/msm/mdss_hdmi_hdcp2p2.c17
-rw-r--r--drivers/video/fbdev/msm/mdss_hdmi_tx.c747
-rw-r--r--drivers/video/fbdev/msm/mdss_hdmi_tx.h9
-rw-r--r--drivers/video/fbdev/msm/mdss_hdmi_util.c28
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp.c590
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp.h138
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_ctl.c427
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_hwio.h41
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c379
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c3
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_layer.c113
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_overlay.c120
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_pipe.c38
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_pp.c246
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_pp.h8
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_pp_cache_config.c38
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_pp_common.c87
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_pp_common.h27
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_pp_v1_7.c78
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_pp_v3.c737
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_util.c171
-rw-r--r--drivers/video/fbdev/msm/mdss_panel.h11
-rw-r--r--drivers/video/fbdev/msm/mdss_rotator.c57
-rw-r--r--drivers/video/fbdev/msm/mdss_rotator_internal.h4
-rw-r--r--drivers/video/fbdev/msm/mdss_smmu.c6
-rw-r--r--drivers/video/fbdev/msm/msm_dba/adv7533.c45
-rw-r--r--drivers/video/fbdev/msm/msm_mdss_io_8974.c145
-rw-r--r--include/uapi/linux/msm_mdp.h3
-rw-r--r--include/video/msm_dba.h6
100 files changed, 19954 insertions, 2419 deletions
diff --git a/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt b/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt
index c0039f1950b2..2cdbdf8bc499 100644
--- a/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt
+++ b/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt
@@ -455,6 +455,8 @@ Optional properites:
instead of standard dcs type 0x0A.
- qcom,dba-panel: Indicates whether the current panel is used as a display bridge
to a non-DSI interface.
+- qcom,bridge-name: A string to indicate the name of the bridge chip connected to DSI. qcom,bridge-name
+ is required if qcom,dba-panel is defined for the panel.
- qcom,adjust-timer-wakeup-ms: An integer value to indicate the timer delay(in ms) to accommodate
s/w delay while configuring the event timer wakeup logic.
@@ -671,6 +673,7 @@ Example:
qcom,config-select = <&dsi_sim_vid_config0>;
qcom,dba-panel;
+ qcom,bridge-name = "adv7533";
dsi_sim_vid_config0: config0 {
qcom,lm-split = <360 360>;
diff --git a/Documentation/devicetree/bindings/fb/mdss-dsi.txt b/Documentation/devicetree/bindings/fb/mdss-dsi.txt
index 67c18174dc21..e978646b54f4 100644
--- a/Documentation/devicetree/bindings/fb/mdss-dsi.txt
+++ b/Documentation/devicetree/bindings/fb/mdss-dsi.txt
@@ -132,6 +132,8 @@ Optional properties:
- "primary"
- "secondary"
- "tertiary"
+- qcom,bridge-index: Instance id of the bridge chip connected to DSI. qcom,bridge-index is
+ required if a bridge chip panel is used.
Example:
mdss_dsi: qcom,mdss_dsi@0 {
@@ -247,5 +249,6 @@ Example:
qcom,dsi-irq-line;
qcom,lane-map = "lane_map_3012";
qcom,display-id = "primary";
+ qcom,bridge-index = <00>;
};
};
diff --git a/Documentation/devicetree/bindings/fb/mdss-mdp.txt b/Documentation/devicetree/bindings/fb/mdss-mdp.txt
index 4d1bdcd52135..e0a729d28e8e 100644
--- a/Documentation/devicetree/bindings/fb/mdss-mdp.txt
+++ b/Documentation/devicetree/bindings/fb/mdss-mdp.txt
@@ -9,6 +9,9 @@ Required properties
- reg : offset and length of the register set for the device.
- reg-names : names to refer to register sets related to this device
- interrupts : Interrupt associated with MDSS.
+- interrupt-controller: Mark the device node as an interrupt controller.
+ This is an empty, boolean property.
+- #interrupt-cells: Should be one. The first cell is interrupt number.
- vdd-supply : Phandle for vdd regulator device node.
- qcom,max-clk-rate: Specify maximum MDP core clock rate in hz that this
device supports.
@@ -509,6 +512,11 @@ Fudge Factors: Fudge factors are used to boost demand for
- qcom,max-pipe-width: This value specifies the maximum MDP SSPP width
the device supports. If not specified, a default value
of 2048 will be applied.
+- qcom,mdss-reg-bus: Property to provide Bus scaling for register access for
+ MDP and DSI Blocks.
+
+- qcom,mdss-rot-reg-bus: Property to provide Bus scaling for register access for
+ Rotator Block.
Optional subnodes:
- mdss_fb: Child nodes representing the frame buffer virtual devices.
@@ -621,6 +629,8 @@ Example:
<0xfd925000 0x1000>;
reg-names = "mdp_phys", "vbif_phys", "vbif_nrt_phys";
interrupts = <0 72 0>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
vdd-supply = <&gdsc_mdss>;
batfet-supply = <&pm8941_chg_batif>;
vdd-cx-supply = <&pm8841_s2_corner>;
@@ -812,5 +822,16 @@ Example:
<&clock_mmss clk_smmu_mdp_axi_clk>;
clock-names = "dummy_clk", "dummy_clk";
};
+
+ qcom,mdss-rot-reg-bus {
+ /* Reg Bus Scale Settings */
+ qcom,msm-bus,name = "mdss_rot_reg";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,active-only;
+ qcom,msm-bus,vectors-KBps =
+ <1 590 0 0>,
+ <1 590 0 76800>;
+ };
};
diff --git a/Documentation/devicetree/bindings/media/video/msm-sde-rotator.txt b/Documentation/devicetree/bindings/media/video/msm-sde-rotator.txt
new file mode 100644
index 000000000000..bd35d80ccaff
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/video/msm-sde-rotator.txt
@@ -0,0 +1,151 @@
+SDE Rotator
+
+SDE rotator is a v4l2 rotator driver, which manages the rotator hw
+block inside the Snapdragon Display Engine (or Mobile Display Subsystem)
+
+Required properties
+- compatible: Must be "qcom,sde-rotator".
+- reg: offset and length of the register set for the device.
+- reg-names: names to refer to register sets related to this device
+- interrupt-parent: phandle for the interrupt controller that
+ services interrupts for this device.
+- interrupts: Interrupt associated with rotator.
+- <name>-supply: Phandle for <name> regulator device node.
+- qcom,supply-names: names to refer to regulator device node.
+- clocks: List of Phandles for clock device nodes
+ needed by the device.
+- clock-names: List of clock names needed by the device.
+Bus Scaling Data:
+- qcom,msm-bus,name: String property describing rotator client.
+- qcom,msm-bus,num-cases: This is the the number of Bus Scaling use cases
+ defined in the vectors property. This must be
+ set to <3> for rotator driver where use-case 0 is
+ used to take off rotator BW votes from the system.
+ And use-case 1 & 2 are used in ping-pong fashion
+ to generate run-time BW requests.
+- qcom,msm-bus,num-paths: This represents the number of paths in each
+ Bus Scaling Usecase. This value depends on
+ how many number of AXI master ports are
+ dedicated to rotator for particular chipset.
+- qcom,msm-bus,vectors-KBps: * A series of 4 cell properties, with a format
+ of (src, dst, ab, ib) which is defined at
+ Documentation/devicetree/bindings/arm/msm/msm_bus.txt
+ * Current values of src & dst are defined at
+ include/linux/msm-bus-board.h
+ src values allowed for rotator are:
+ 25 = MSM_BUS_MASTER_ROTATOR
+ dst values allowed for rotator are:
+ 512 = MSM_BUS_SLAVE_EBI_CH0
+ ab: Represents aggregated bandwidth.
+ ib: Represents instantaneous bandwidth.
+ * Total number of 4 cell properties will be
+ (number of use-cases * number of paths).
+ * These values will be overridden by the driver
+ based on the run-time requirements. So initial
+ ab and ib values defined here are random and
+ bare no logic except for the use-case 0 where ab
+ and ib values needs to be 0.
+ * Define realtime vector properties followed by
+ non-realtime vector properties.
+
+Optional properties
+- qcom,rot-vbif-settings: Array with key-value pairs of constant VBIF register
+ settings used to setup MDSS QoS for optimum performance.
+ The key used should be offset from "rot_vbif_phys" register
+ defined in reg property.
+- qcom,mdss-rot-block-size: This integer value indicates the size of a memory block
+ (in pixels) to be used by the rotator. If this property
+ is not specified, then a default value of 128 pixels
+ would be used.
+- qcom,mdss-highest-bank-bit: This integer value indicate tile format as opposed to usual
+ linear format. The value tells the GPU highest memory
+ bank bit used.
+- qcom,mdss-default-ot-wr-limit: This integer value indicates maximum number of pending
+ writes that can be allowed on each WR xin.
+ This value can be used to reduce the pending writes
+ limit and can be tuned to match performance
+ requirements depending upon system state.
+ Some platforms require a dynamic ot limiting in
+ some cases. Setting this default ot write limit
+ will enable this dynamic limiting for the write
+ operations in the platforms that require these
+ limits.
+- qcom,mdss-default-ot-rd-limit: This integer value indicates the default number of pending
+ reads that can be allowed on each RD xin.
+ Some platforms require a dynamic ot limiting in
+ some cases. Setting this default ot read limit
+ will enable this dynamic limiting for the read
+ operations in the platforms that require these
+ limits.
+- qcom,mdss-rot-vbif-qos-setting: This array is used to program vbif qos remapper register
+ priority for rotator clients.
+- qcom,mdss-rot-mode: This is integer value indicates operation mode
+ of the rotator device
+
+Subnode properties:
+- compatible: Compatible name used in smmu v2.
+ smmu_v2 names should be:
+ "qcom,smmu_sde_rot_unsec"- smmu context bank device for
+ unsecure rotation domain.
+ "qcom,smmu_sde_rot_sec" - smmu context bank device for
+ secure rotation domain.
+- iommus: specifies the SID's used by this context bank
+- gdsc-mdss-supply: Phandle for mdss supply regulator device node.
+- clocks: List of Phandles for clock device nodes
+ needed by the device.
+- clock-names: List of clock names needed by the device.
+
+
+Example:
+ mdss_rotator: qcom,mdss_rotator {
+ compatible = "qcom,sde_rotator";
+ reg = <0xfd900000 0x22100>,
+ <0xfd925000 0x1000>;
+ reg-names = "mdp_phys", "rot_vbif_phys";
+ interrupt-parent = <&mdss_mdp>;
+ interrupts = <2 0>;
+
+ qcom,mdss-mdp-reg-offset = <0x00001000>;
+
+ rot-vdd-supply = <&gdsc_mdss>;
+ qcom,supply-names = "rot-vdd";
+
+ clocks = <&clock_mmss clk_mmss_mdss_ahb_clk>,
+ <&clock_mmss clk_mmss_mdss_rot_clk>;
+ clock-names = "iface_clk", "rot_core_clk";
+
+ qcom,mdss-highest-bank-bit = <0x2>;
+
+ /* Bus Scale Settings */
+ qcom,msm-bus,name = "mdss_rotator";
+ qcom,msm-bus,num-cases = <3>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <25 512 0 0>,
+ <25 512 0 6400000>,
+ <25 512 0 6400000>;
+
+ /* VBIF QoS remapper settings*/
+ qcom,mdss-rot-vbif-qos-setting = <1 1 1 1>;
+
+ qcom,mdss-default-ot-rd-limit = <8>;
+ qcom,mdss-default-ot-wr-limit = <16>;
+
+ smmu_rot_unsec: qcom,smmu_rot_unsec_cb {
+ compatible = "qcom,smmu_sde_rot_unsec";
+ iommus = <&mdp_smmu 0xe00>;
+ gdsc-mdss-supply = <&gdsc_bimc_smmu>;
+ clocks = <&clock_mmss clk_bimc_smmu_ahb_clk>,
+ <&clock_mmss clk_bimc_smmu_axi_clk>;
+ clock-names = "rot_ahb_clk", "rot_axi_clk";
+ };
+
+ smmu_sde_rot_sec: qcom,smmu_sde_rot_sec_cb {
+ compatible = "qcom,smmu_sde_rot_sec";
+ iommus = <&mmss_smmu 0xe01>;
+ gdsc-mdss-supply = <&gdsc_bimc_smmu>;
+ clocks = <&clock_mmss clk_bimc_smmu_ahb_clk>,
+ <&clock_mmss clk_bimc_smmu_axi_clk>;
+ clock-names = "rot_ahb_clk", "rot_axi_clk";
+ };
+ };
diff --git a/arch/arm/boot/dts/qcom/dsi-adv7533-1080p.dtsi b/arch/arm/boot/dts/qcom/dsi-adv7533-1080p.dtsi
index fe2b8c9e38c5..d67922a865fb 100644
--- a/arch/arm/boot/dts/qcom/dsi-adv7533-1080p.dtsi
+++ b/arch/arm/boot/dts/qcom/dsi-adv7533-1080p.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -70,5 +70,6 @@
1d 1a 03 05 01 03 04 a0
1d 1a 03 05 01 03 04 a0];
qcom,dba-panel;
+ qcom,bridge-name = "adv7533";
};
};
diff --git a/arch/arm/boot/dts/qcom/dsi-adv7533-720p.dtsi b/arch/arm/boot/dts/qcom/dsi-adv7533-720p.dtsi
index 1c1c599099c8..f6a42b430b58 100644
--- a/arch/arm/boot/dts/qcom/dsi-adv7533-720p.dtsi
+++ b/arch/arm/boot/dts/qcom/dsi-adv7533-720p.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -69,5 +69,6 @@ dsi_adv7533_720p: qcom,mdss_dsi_adv7533_720p {
1c 19 02 03 01 03 04 a0
1c 08 02 03 01 03 04 a0];
qcom,dba-panel;
+ qcom,bridge-name = "adv7533";
};
};
diff --git a/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi b/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi
index 07f2cae13eaa..40df8b7ff4de 100644
--- a/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi
@@ -308,6 +308,7 @@
pinctrl-0 = <&mdss_dsi_active &mdss_te_active>;
pinctrl-1 = <&mdss_dsi_suspend &mdss_te_suspend>;
qcom,display-id = "primary";
+ qcom,bridge-index = <0>;
qcom,panel-supply-entries {
#address-cells = <1>;
@@ -339,6 +340,7 @@
pinctrl-0 = <&mdss_dsi_active &mdss_te_active>;
pinctrl-1 = <&mdss_dsi_suspend &mdss_te_suspend>;
qcom,display-id = "tertiary";
+ qcom,bridge-index = <1>;
qcom,panel-supply-entries {
#address-cells = <1>;
diff --git a/drivers/clk/msm/mdss/mdss-dsi-pll-8996-util.c b/drivers/clk/msm/mdss/mdss-dsi-pll-8996-util.c
index e232ece8c9e4..f6c85cf8d9a4 100644
--- a/drivers/clk/msm/mdss/mdss-dsi-pll-8996-util.c
+++ b/drivers/clk/msm/mdss/mdss-dsi-pll-8996-util.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -99,6 +99,14 @@ int post_n1_div_set_div(struct div_clk *clk, int div)
struct mdss_pll_resources *pll = clk->priv;
struct dsi_pll_db *pdb;
struct dsi_pll_output *pout;
+ int rc;
+ u32 n1div = 0;
+
+ rc = mdss_pll_resource_enable(pll, true);
+ if (rc) {
+ pr_err("Failed to enable mdss dsi pll resources\n");
+ return rc;
+ }
pdb = (struct dsi_pll_db *)pll->priv;
pout = &pdb->out;
@@ -116,10 +124,16 @@ int post_n1_div_set_div(struct div_clk *clk, int div)
pout->pll_postdiv = 1; /* fixed, divided by 1 */
pout->pll_n1div = div;
+ n1div = MDSS_PLL_REG_R(pll->pll_base, DSIPHY_CMN_CLK_CFG0);
+ n1div &= ~0xf;
+ n1div |= (div & 0xf);
+ MDSS_PLL_REG_W(pll->pll_base, DSIPHY_CMN_CLK_CFG0, n1div);
+ /* ensure n1 divider is programed */
+ wmb();
pr_debug("ndx=%d div=%d postdiv=%x n1div=%x\n",
pll->index, div, pout->pll_postdiv, pout->pll_n1div);
- /* registers commited at pll_db_commit_8996() */
+ mdss_pll_resource_enable(pll, false);
return 0;
}
diff --git a/drivers/clk/msm/mdss/mdss-dsi-pll-8996.c b/drivers/clk/msm/mdss/mdss-dsi-pll-8996.c
index b366fa9f2257..dc80edd04725 100644
--- a/drivers/clk/msm/mdss/mdss-dsi-pll-8996.c
+++ b/drivers/clk/msm/mdss/mdss-dsi-pll-8996.c
@@ -464,7 +464,7 @@ static struct clk_lookup mdss_dsi_pllcc_8996_1[] = {
int dsi_pll_clock_register_8996(struct platform_device *pdev,
struct mdss_pll_resources *pll_res)
{
- int rc, ndx;
+ int rc = 0, ndx;
int const ssc_freq_default = 31500; /* default h/w recommended value */
int const ssc_ppm_default = 5000; /* default h/w recommended value */
struct dsi_pll_db *pdb;
diff --git a/drivers/clk/msm/mdss/mdss-hdmi-pll-8996.c b/drivers/clk/msm/mdss/mdss-hdmi-pll-8996.c
index 5c6b5e3dead2..b6856f56db49 100644
--- a/drivers/clk/msm/mdss/mdss-hdmi-pll-8996.c
+++ b/drivers/clk/msm/mdss/mdss-hdmi-pll-8996.c
@@ -55,6 +55,7 @@
#define HDMI_300MHZ_BIT_CLK_HZ 300000000
#define HDMI_282MHZ_BIT_CLK_HZ 282000000
#define HDMI_250MHZ_BIT_CLK_HZ 250000000
+#define HDMI_KHZ_TO_HZ 1000
/* PLL REGISTERS */
#define QSERDES_COM_ATB_SEL1 (0x000)
@@ -1413,7 +1414,7 @@ static int hdmi_8996_v3_calculate(u32 pix_clk,
cfg->com_lock_cmp1_mode0 = (pll_cmp & 0xFF);
cfg->com_lock_cmp2_mode0 = ((pll_cmp & 0xFF00) >> 8);
cfg->com_lock_cmp3_mode0 = ((pll_cmp & 0x30000) >> 16);
- cfg->com_lock_cmp_en = 0x0;
+ cfg->com_lock_cmp_en = 0x04;
cfg->com_core_clk_en = 0x2C;
cfg->com_coreclk_div = HDMI_CORECLK_DIV;
cfg->phy_mode = (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD) ? 0x10 : 0x0;
@@ -2207,14 +2208,82 @@ static int hdmi_8996_v3_1p8_vco_enable(struct clk *c)
return hdmi_8996_vco_enable(c, HDMI_VERSION_8996_V3_1_8);
}
+static int hdmi_8996_vco_get_lock_range(struct clk *c, unsigned long pixel_clk)
+{
+ u32 rng = 64, cmp_cnt = 1024;
+ u32 coreclk_div = 5, clks_pll_divsel = 2;
+ u32 vco_freq, vco_ratio, ppm_range;
+ u64 bclk;
+ struct hdmi_8996_v3_post_divider pd;
+
+ bclk = ((u64)pixel_clk) * HDMI_BIT_CLK_TO_PIX_CLK_RATIO;
+
+ DEV_DBG("%s: rate=%ld\n", __func__, pixel_clk);
+
+ if (hdmi_8996_v3_get_post_div(&pd, bclk) ||
+ pd.vco_ratio <= 0 || pd.vco_freq <= 0) {
+ DEV_ERR("%s: couldn't get post div\n", __func__);
+ return -EINVAL;
+ }
+
+ do_div(pd.vco_freq, HDMI_KHZ_TO_HZ * HDMI_KHZ_TO_HZ);
+
+ vco_freq = (u32) pd.vco_freq;
+ vco_ratio = (u32) pd.vco_ratio;
+
+ DEV_DBG("%s: freq %d, ratio %d\n", __func__,
+ vco_freq, vco_ratio);
+
+ ppm_range = (rng * HDMI_REF_CLOCK) / cmp_cnt;
+ ppm_range /= vco_freq / vco_ratio;
+ ppm_range *= coreclk_div * clks_pll_divsel;
+
+ DEV_DBG("%s: ppm range: %d\n", __func__, ppm_range);
+
+ return ppm_range;
+}
+
+static int hdmi_8996_vco_rate_atomic_update(struct clk *c,
+ unsigned long rate, u32 ver)
+{
+ struct hdmi_pll_vco_clk *vco = to_hdmi_8996_vco_clk(c);
+ struct mdss_pll_resources *io = vco->priv;
+ void __iomem *pll;
+ struct hdmi_8996_phy_pll_reg_cfg cfg = {0};
+ int rc = 0;
+
+ rc = hdmi_8996_calculate(rate, &cfg, ver);
+ if (rc) {
+ DEV_ERR("%s: PLL calculation failed\n", __func__);
+ goto end;
+ }
+
+ pll = io->pll_base;
+
+ MDSS_PLL_REG_W(pll, QSERDES_COM_DEC_START_MODE0,
+ cfg.com_dec_start_mode0);
+ MDSS_PLL_REG_W(pll, QSERDES_COM_DIV_FRAC_START1_MODE0,
+ cfg.com_div_frac_start1_mode0);
+ MDSS_PLL_REG_W(pll, QSERDES_COM_DIV_FRAC_START2_MODE0,
+ cfg.com_div_frac_start2_mode0);
+ MDSS_PLL_REG_W(pll, QSERDES_COM_DIV_FRAC_START3_MODE0,
+ cfg.com_div_frac_start3_mode0);
+
+ MDSS_PLL_REG_W(pll, QSERDES_COM_FREQ_UPDATE, 0x01);
+ MDSS_PLL_REG_W(pll, QSERDES_COM_FREQ_UPDATE, 0x00);
+
+ DEV_DBG("%s: updated to rate %ld\n", __func__, rate);
+end:
+ return rc;
+}
+
static int hdmi_8996_vco_set_rate(struct clk *c, unsigned long rate, u32 ver)
{
struct hdmi_pll_vco_clk *vco = to_hdmi_8996_vco_clk(c);
struct mdss_pll_resources *io = vco->priv;
- void __iomem *pll_base;
- void __iomem *phy_base;
unsigned int set_power_dwn = 0;
- int rc;
+ bool atomic_update = false;
+ int rc, pll_lock_range;
rc = mdss_pll_resource_enable(io, true);
if (rc) {
@@ -2222,17 +2291,36 @@ static int hdmi_8996_vco_set_rate(struct clk *c, unsigned long rate, u32 ver)
return rc;
}
- if (io->pll_on)
- set_power_dwn = 1;
+ DEV_DBG("%s: rate %ld\n", __func__, rate);
+
+ if (MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_C_READY_STATUS) & BIT(0) &&
+ MDSS_PLL_REG_R(io->phy_base, HDMI_PHY_STATUS) & BIT(0)) {
+ pll_lock_range = hdmi_8996_vco_get_lock_range(c, vco->rate);
+
+ if (pll_lock_range > 0 && vco->rate) {
+ u32 range_limit;
- pll_base = io->pll_base;
- phy_base = io->phy_base;
+ range_limit = vco->rate *
+ (pll_lock_range / HDMI_KHZ_TO_HZ);
+ range_limit /= HDMI_KHZ_TO_HZ;
- DEV_DBG("HDMI PIXEL CLK rate=%ld\n", rate);
+ DEV_DBG("%s: range limit %d\n", __func__, range_limit);
- rc = hdmi_8996_phy_pll_set_clk_rate(c, rate, ver);
- if (rc)
- DEV_ERR("%s: Failed to set clk rate\n", __func__);
+ if (abs(rate - vco->rate) < range_limit)
+ atomic_update = true;
+ }
+ }
+
+ if (io->pll_on && !atomic_update)
+ set_power_dwn = 1;
+
+ if (atomic_update) {
+ hdmi_8996_vco_rate_atomic_update(c, rate, ver);
+ } else {
+ rc = hdmi_8996_phy_pll_set_clk_rate(c, rate, ver);
+ if (rc)
+ DEV_ERR("%s: Failed to set clk rate\n", __func__);
+ }
mdss_pll_resource_enable(io, false);
@@ -2265,9 +2353,102 @@ static int hdmi_8996_v3_1p8_vco_set_rate(struct clk *c, unsigned long rate)
return hdmi_8996_vco_set_rate(c, rate, HDMI_VERSION_8996_V3_1_8);
}
+static unsigned long hdmi_get_hsclk_sel_divisor(unsigned long hsclk_sel)
+{
+ unsigned long divisor;
+
+ switch (hsclk_sel) {
+ case 0:
+ divisor = 2;
+ break;
+ case 1:
+ divisor = 6;
+ break;
+ case 2:
+ divisor = 10;
+ break;
+ case 3:
+ divisor = 14;
+ break;
+ case 4:
+ divisor = 3;
+ break;
+ case 5:
+ divisor = 9;
+ break;
+ case 6:
+ case 13:
+ divisor = 15;
+ break;
+ case 7:
+ divisor = 21;
+ break;
+ case 8:
+ divisor = 4;
+ break;
+ case 9:
+ divisor = 12;
+ break;
+ case 10:
+ divisor = 20;
+ break;
+ case 11:
+ divisor = 28;
+ break;
+ case 12:
+ divisor = 5;
+ break;
+ case 14:
+ divisor = 25;
+ break;
+ case 15:
+ divisor = 35;
+ break;
+ default:
+ divisor = 1;
+ DEV_ERR("%s: invalid hsclk_sel value = %lu",
+ __func__, hsclk_sel);
+ break;
+ }
+
+ return divisor;
+}
+
static unsigned long hdmi_8996_vco_get_rate(struct clk *c)
{
- unsigned long freq = 0;
+ unsigned long freq = 0, hsclk_sel = 0, tx_band = 0, dec_start = 0,
+ div_frac_start = 0, vco_clock_freq = 0;
+ struct hdmi_pll_vco_clk *vco = to_hdmi_8996_vco_clk(c);
+ struct mdss_pll_resources *io = vco->priv;
+
+ if (mdss_pll_resource_enable(io, true)) {
+ DEV_ERR("%s: pll resource can't be enabled\n", __func__);
+ return freq;
+ }
+
+ dec_start = MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_DEC_START_MODE0);
+
+ div_frac_start =
+ MDSS_PLL_REG_R(io->pll_base,
+ QSERDES_COM_DIV_FRAC_START1_MODE0) |
+ MDSS_PLL_REG_R(io->pll_base,
+ QSERDES_COM_DIV_FRAC_START2_MODE0) << 8 |
+ MDSS_PLL_REG_R(io->pll_base,
+ QSERDES_COM_DIV_FRAC_START3_MODE0) << 16;
+
+ vco_clock_freq = (dec_start + (div_frac_start / (1 << 20)))
+ * 4 * (HDMI_REF_CLOCK);
+
+ hsclk_sel = MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_HSCLK_SEL) & 0x15;
+ hsclk_sel = hdmi_get_hsclk_sel_divisor(hsclk_sel);
+ tx_band = MDSS_PLL_REG_R(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+ QSERDES_TX_L0_TX_BAND) & 0x3;
+
+ freq = vco_clock_freq / (10 * hsclk_sel * (1 << tx_band));
+
+ mdss_pll_resource_enable(io, false);
+
+ DEV_DBG("%s: freq = %lu\n", __func__, freq);
return freq;
}
diff --git a/drivers/media/platform/msm/Kconfig b/drivers/media/platform/msm/Kconfig
index 104675252ccd..16060773ac96 100644
--- a/drivers/media/platform/msm/Kconfig
+++ b/drivers/media/platform/msm/Kconfig
@@ -40,3 +40,4 @@ source "drivers/media/platform/msm/camera_v2/Kconfig"
endif # MSMB_CAMERA
source "drivers/media/platform/msm/vidc/Kconfig"
+source "drivers/media/platform/msm/sde/Kconfig"
diff --git a/drivers/media/platform/msm/Makefile b/drivers/media/platform/msm/Makefile
index 1b72716b0864..ff0369bdeca5 100644
--- a/drivers/media/platform/msm/Makefile
+++ b/drivers/media/platform/msm/Makefile
@@ -4,3 +4,4 @@
#
obj-$(CONFIG_MSMB_CAMERA) += camera_v2/
obj-$(CONFIG_MSM_VIDC_V4L2) += vidc/
+obj-y += sde/
diff --git a/drivers/media/platform/msm/sde/Kconfig b/drivers/media/platform/msm/sde/Kconfig
new file mode 100644
index 000000000000..3795972701bc
--- /dev/null
+++ b/drivers/media/platform/msm/sde/Kconfig
@@ -0,0 +1,8 @@
+config MSM_SDE_ROTATOR
+ bool "QTI V4L2 based SDE Rotator"
+ depends on ARCH_MSM && VIDEO_V4L2
+ select V4L2_MEM2MEM_DEV
+ select VIDEOBUF2_CORE
+ select SW_SYNC if SYNC
+ ---help---
+ Enable support of V4L2 rotator driver. \ No newline at end of file
diff --git a/drivers/media/platform/msm/sde/Makefile b/drivers/media/platform/msm/sde/Makefile
new file mode 100644
index 000000000000..fe55d5044c3b
--- /dev/null
+++ b/drivers/media/platform/msm/sde/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_MSM_SDE_ROTATOR) += rotator/
diff --git a/drivers/media/platform/msm/sde/rotator/Makefile b/drivers/media/platform/msm/sde/rotator/Makefile
new file mode 100644
index 000000000000..8793b1e97f34
--- /dev/null
+++ b/drivers/media/platform/msm/sde/rotator/Makefile
@@ -0,0 +1,23 @@
+ccflags-y += -I$(src)
+
+obj-y := \
+ sde_rotator_dev.o \
+ sde_rotator_core.o \
+ sde_rotator_base.o \
+ sde_rotator_formats.o \
+ sde_rotator_util.o \
+ sde_rotator_io_util.o \
+ sde_rotator_smmu.o
+
+obj-y += \
+ sde_rotator_r1_wb.o \
+ sde_rotator_r1_pipe.o \
+ sde_rotator_r1_ctl.o \
+ sde_rotator_r1.o
+
+obj-$(CONFIG_SYNC) += \
+ sde_rotator_sync.o
+
+obj-$(CONFIG_DEBUG_FS) += \
+ sde_rotator_debug.o \
+ sde_rotator_r1_debug.o \ No newline at end of file
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.c
new file mode 100644
index 000000000000..2f692a4891bb
--- /dev/null
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.c
@@ -0,0 +1,569 @@
+/* Copyright (c) 2012, 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/errno.h>
+#include <linux/file.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/major.h>
+#include <linux/debugfs.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include <linux/regulator/consumer.h>
+
+#define CREATE_TRACE_POINTS
+#include "sde_rotator_base.h"
+#include "sde_rotator_util.h"
+#include "sde_rotator_trace.h"
+
+static inline u64 fudge_factor(u64 val, u32 numer, u32 denom)
+{
+ u64 result = (val * (u64)numer);
+
+ do_div(result, denom);
+ return result;
+}
+
+static inline u64 apply_fudge_factor(u64 val,
+ struct sde_mult_factor *factor)
+{
+ return fudge_factor(val, factor->numer, factor->denom);
+}
+
+static inline u64 apply_inverse_fudge_factor(u64 val,
+ struct sde_mult_factor *factor)
+{
+ return fudge_factor(val, factor->denom, factor->numer);
+}
+
+static inline bool validate_comp_ratio(struct sde_mult_factor *factor)
+{
+ return factor->numer && factor->denom;
+}
+
+u32 sde_apply_comp_ratio_factor(u32 quota,
+ struct sde_mdp_format_params *fmt,
+ struct sde_mult_factor *factor)
+{
+ struct sde_rot_data_type *mdata = sde_rot_get_mdata();
+
+ if (!mdata || !test_bit(SDE_QOS_OVERHEAD_FACTOR,
+ mdata->sde_qos_map))
+ return quota;
+
+ /* apply compression ratio, only for compressed formats */
+ if (sde_mdp_is_ubwc_format(fmt) &&
+ validate_comp_ratio(factor))
+ quota = apply_inverse_fudge_factor(quota , factor);
+
+ return quota;
+}
+
+#define RES_1080p (1088*1920)
+#define RES_UHD (3840*2160)
+#define XIN_HALT_TIMEOUT_US 0x4000
+
+static int sde_mdp_wait_for_xin_halt(u32 xin_id)
+{
+ void __iomem *vbif_base;
+ u32 status;
+ struct sde_rot_data_type *mdata = sde_rot_get_mdata();
+ u32 idle_mask = BIT(xin_id);
+ int rc;
+
+ vbif_base = mdata->vbif_nrt_io.base;
+
+ rc = readl_poll_timeout(vbif_base + MMSS_VBIF_XIN_HALT_CTRL1,
+ status, (status & idle_mask),
+ 1000, XIN_HALT_TIMEOUT_US);
+ if (rc == -ETIMEDOUT) {
+ SDEROT_ERR("VBIF client %d not halting. TIMEDOUT.\n",
+ xin_id);
+ } else {
+ SDEROT_DBG("VBIF client %d is halted\n", xin_id);
+ }
+
+ return rc;
+}
+
+/**
+ * force_on_xin_clk() - enable/disable the force-on for the pipe clock
+ * @bit_off: offset of the bit to enable/disable the force-on.
+ * @reg_off: register offset for the clock control.
+ * @enable: boolean to indicate if the force-on of the clock needs to be
+ * enabled or disabled.
+ *
+ * This function returns:
+ * true - if the clock is forced-on by this function
+ * false - if the clock was already forced on
+ * It is the caller responsibility to check if this function is forcing
+ * the clock on; if so, it will need to remove the force of the clock,
+ * otherwise it should avoid to remove the force-on.
+ * Clocks must be on when calling this function.
+ */
+static bool force_on_xin_clk(u32 bit_off, u32 clk_ctl_reg_off, bool enable)
+{
+ u32 val;
+ u32 force_on_mask;
+ struct sde_rot_data_type *mdata = sde_rot_get_mdata();
+ bool clk_forced_on = false;
+
+ force_on_mask = BIT(bit_off);
+ val = readl_relaxed(mdata->mdp_base + clk_ctl_reg_off);
+
+ clk_forced_on = !(force_on_mask & val);
+
+ if (true == enable)
+ val |= force_on_mask;
+ else
+ val &= ~force_on_mask;
+
+ writel_relaxed(val, mdata->mdp_base + clk_ctl_reg_off);
+
+ return clk_forced_on;
+}
+
+static void apply_dynamic_ot_limit(u32 *ot_lim,
+ struct sde_mdp_set_ot_params *params)
+{
+ struct sde_rot_data_type *mdata = sde_rot_get_mdata();
+ u32 res;
+
+ if (false == test_bit(SDE_QOS_OTLIM, mdata->sde_qos_map))
+ return;
+
+ res = params->width * params->height;
+
+ SDEROT_DBG("w:%d h:%d rot:%d yuv:%d wb:%d res:%d\n",
+ params->width, params->height, params->is_rot,
+ params->is_yuv, params->is_wb, res);
+
+ if ((params->is_rot && params->is_yuv) ||
+ params->is_wb) {
+ if (res <= RES_1080p) {
+ *ot_lim = 2;
+ } else if (res <= RES_UHD) {
+ if (params->is_rot && params->is_yuv)
+ *ot_lim = 8;
+ else
+ *ot_lim = 16;
+ }
+ }
+}
+
+static u32 get_ot_limit(u32 reg_off, u32 bit_off,
+ struct sde_mdp_set_ot_params *params)
+{
+ struct sde_rot_data_type *mdata = sde_rot_get_mdata();
+ u32 ot_lim = 0;
+ u32 val;
+
+ if (mdata->default_ot_wr_limit &&
+ (params->reg_off_vbif_lim_conf == MMSS_VBIF_WR_LIM_CONF))
+ ot_lim = mdata->default_ot_wr_limit;
+ else if (mdata->default_ot_rd_limit &&
+ (params->reg_off_vbif_lim_conf == MMSS_VBIF_RD_LIM_CONF))
+ ot_lim = mdata->default_ot_rd_limit;
+
+ /*
+ * If default ot is not set from dt,
+ * then do not configure it.
+ */
+ if (ot_lim == 0)
+ goto exit;
+
+ /* Modify the limits if the target and the use case requires it */
+ apply_dynamic_ot_limit(&ot_lim, params);
+
+ val = SDE_VBIF_READ(mdata, reg_off);
+ val &= (0xFF << bit_off);
+ val = val >> bit_off;
+
+ if (val == ot_lim)
+ ot_lim = 0;
+
+exit:
+ SDEROT_DBG("ot_lim=%d\n", ot_lim);
+ return ot_lim;
+}
+
+void sde_mdp_set_ot_limit(struct sde_mdp_set_ot_params *params)
+{
+ struct sde_rot_data_type *mdata = sde_rot_get_mdata();
+ u32 ot_lim;
+ u32 reg_off_vbif_lim_conf = (params->xin_id / 4) * 4 +
+ params->reg_off_vbif_lim_conf;
+ u32 bit_off_vbif_lim_conf = (params->xin_id % 4) * 8;
+ u32 reg_val;
+ bool forced_on;
+
+ ot_lim = get_ot_limit(
+ reg_off_vbif_lim_conf,
+ bit_off_vbif_lim_conf,
+ params) & 0xFF;
+
+ if (ot_lim == 0)
+ goto exit;
+
+ trace_rot_perf_set_ot(params->num, params->xin_id, ot_lim);
+
+ forced_on = force_on_xin_clk(params->bit_off_mdp_clk_ctrl,
+ params->reg_off_mdp_clk_ctrl, true);
+
+ reg_val = SDE_VBIF_READ(mdata, reg_off_vbif_lim_conf);
+ reg_val &= ~(0xFF << bit_off_vbif_lim_conf);
+ reg_val |= (ot_lim) << bit_off_vbif_lim_conf;
+ SDE_VBIF_WRITE(mdata, reg_off_vbif_lim_conf, reg_val);
+
+ reg_val = SDE_VBIF_READ(mdata, MMSS_VBIF_XIN_HALT_CTRL0);
+ SDE_VBIF_WRITE(mdata, MMSS_VBIF_XIN_HALT_CTRL0,
+ reg_val | BIT(params->xin_id));
+
+ /* this is a polling operation */
+ sde_mdp_wait_for_xin_halt(params->xin_id);
+
+ reg_val = SDE_VBIF_READ(mdata, MMSS_VBIF_XIN_HALT_CTRL0);
+ SDE_VBIF_WRITE(mdata, MMSS_VBIF_XIN_HALT_CTRL0,
+ reg_val & ~BIT(params->xin_id));
+
+ if (forced_on)
+ force_on_xin_clk(params->bit_off_mdp_clk_ctrl,
+ params->reg_off_mdp_clk_ctrl, false);
+
+exit:
+ return;
+}
+
+struct reg_bus_client *sde_reg_bus_vote_client_create(char *client_name)
+{
+ struct reg_bus_client *client;
+ struct sde_rot_data_type *sde_res = sde_rot_get_mdata();
+ static u32 id;
+
+ if (client_name == NULL) {
+ SDEROT_ERR("client name is null\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ client = kzalloc(sizeof(struct reg_bus_client), GFP_KERNEL);
+ if (!client)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_lock(&sde_res->reg_bus_lock);
+ strlcpy(client->name, client_name, MAX_CLIENT_NAME_LEN);
+ client->usecase_ndx = VOTE_INDEX_DISABLE;
+ client->id = id;
+ SDEROT_DBG("bus vote client %s created:%p id :%d\n", client_name,
+ client, id);
+ id++;
+ list_add(&client->list, &sde_res->reg_bus_clist);
+ mutex_unlock(&sde_res->reg_bus_lock);
+
+ return client;
+}
+
+void sde_reg_bus_vote_client_destroy(struct reg_bus_client *client)
+{
+ struct sde_rot_data_type *sde_res = sde_rot_get_mdata();
+
+ if (!client) {
+ SDEROT_ERR("reg bus vote: invalid client handle\n");
+ } else {
+ SDEROT_DBG("bus vote client %s destroyed:%p id:%u\n",
+ client->name, client, client->id);
+ mutex_lock(&sde_res->reg_bus_lock);
+ list_del_init(&client->list);
+ mutex_unlock(&sde_res->reg_bus_lock);
+ kfree(client);
+ }
+}
+
+int sde_update_reg_bus_vote(struct reg_bus_client *bus_client, u32 usecase_ndx)
+{
+ int ret = 0;
+ bool changed = false;
+ u32 max_usecase_ndx = VOTE_INDEX_DISABLE;
+ struct reg_bus_client *client, *temp_client;
+ struct sde_rot_data_type *sde_res = sde_rot_get_mdata();
+
+ if (!sde_res || !sde_res->reg_bus_hdl || !bus_client)
+ return 0;
+
+ mutex_lock(&sde_res->reg_bus_lock);
+ bus_client->usecase_ndx = usecase_ndx;
+ list_for_each_entry_safe(client, temp_client, &sde_res->reg_bus_clist,
+ list) {
+
+ if (client->usecase_ndx < VOTE_INDEX_MAX &&
+ client->usecase_ndx > max_usecase_ndx)
+ max_usecase_ndx = client->usecase_ndx;
+ }
+
+ if (sde_res->reg_bus_usecase_ndx != max_usecase_ndx) {
+ changed = true;
+ sde_res->reg_bus_usecase_ndx = max_usecase_ndx;
+ }
+
+ SDEROT_DBG(
+ "%pS: changed=%d current idx=%d request client %s id:%u idx:%d\n",
+ __builtin_return_address(0), changed, max_usecase_ndx,
+ bus_client->name, bus_client->id, usecase_ndx);
+ if (changed)
+ ret = msm_bus_scale_client_update_request(sde_res->reg_bus_hdl,
+ max_usecase_ndx);
+
+ mutex_unlock(&sde_res->reg_bus_lock);
+ return ret;
+}
+
+static int sde_mdp_parse_dt_handler(struct platform_device *pdev,
+ char *prop_name, u32 *offsets, int len)
+{
+ int rc;
+
+ rc = of_property_read_u32_array(pdev->dev.of_node, prop_name,
+ offsets, len);
+ if (rc) {
+ SDEROT_ERR("Error from prop %s : u32 array read\n", prop_name);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int sde_mdp_parse_dt_prop_len(struct platform_device *pdev,
+ char *prop_name)
+{
+ int len = 0;
+
+ of_find_property(pdev->dev.of_node, prop_name, &len);
+
+ if (len < 1) {
+ SDEROT_INFO("prop %s : doesn't exist in device tree\n",
+ prop_name);
+ return 0;
+ }
+
+ len = len/sizeof(u32);
+
+ return len;
+}
+
+static void sde_mdp_parse_vbif_qos(struct platform_device *pdev,
+ struct sde_rot_data_type *mdata)
+{
+ int rc;
+
+ mdata->vbif_rt_qos = NULL;
+
+ mdata->npriority_lvl = sde_mdp_parse_dt_prop_len(pdev,
+ "qcom,mdss-rot-vbif-qos-setting");
+ mdata->vbif_nrt_qos = kzalloc(sizeof(u32) *
+ mdata->npriority_lvl, GFP_KERNEL);
+ if (!mdata->vbif_nrt_qos)
+ return;
+
+ rc = sde_mdp_parse_dt_handler(pdev,
+ "qcom,mdss-rot-vbif-qos-setting", mdata->vbif_nrt_qos,
+ mdata->npriority_lvl);
+ if (rc) {
+ SDEROT_DBG("vbif setting not found\n");
+ return;
+ }
+}
+
+static int sde_mdp_parse_dt_misc(struct platform_device *pdev,
+ struct sde_rot_data_type *mdata)
+{
+ int rc;
+ u32 data;
+
+ rc = of_property_read_u32(pdev->dev.of_node, "qcom,mdss-rot-block-size",
+ &data);
+ mdata->rot_block_size = (!rc ? data : 128);
+
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,mdss-default-ot-rd-limit", &data);
+ mdata->default_ot_rd_limit = (!rc ? data : 0);
+
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,mdss-default-ot-wr-limit", &data);
+ mdata->default_ot_wr_limit = (!rc ? data : 0);
+
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,mdss-highest-bank-bit", &(mdata->highest_bank_bit));
+ if (rc)
+ SDEROT_DBG(
+ "Could not read optional property: highest bank bit\n");
+
+ sde_mdp_parse_vbif_qos(pdev, mdata);
+
+ mdata->mdp_base = mdata->sde_io.base + SDE_MDP_OFFSET;
+
+ return 0;
+}
+
+#define MDP_REG_BUS_VECTOR_ENTRY(ab_val, ib_val) \
+ { \
+ .src = MSM_BUS_MASTER_AMPSS_M0, \
+ .dst = MSM_BUS_SLAVE_DISPLAY_CFG, \
+ .ab = (ab_val), \
+ .ib = (ib_val), \
+ }
+
+#define BUS_VOTE_19_MHZ 153600000
+#define BUS_VOTE_40_MHZ 320000000
+#define BUS_VOTE_80_MHZ 640000000
+
+static struct msm_bus_vectors mdp_reg_bus_vectors[] = {
+ MDP_REG_BUS_VECTOR_ENTRY(0, 0),
+ MDP_REG_BUS_VECTOR_ENTRY(0, BUS_VOTE_19_MHZ),
+ MDP_REG_BUS_VECTOR_ENTRY(0, BUS_VOTE_40_MHZ),
+ MDP_REG_BUS_VECTOR_ENTRY(0, BUS_VOTE_80_MHZ),
+};
+static struct msm_bus_paths mdp_reg_bus_usecases[ARRAY_SIZE(
+ mdp_reg_bus_vectors)];
+static struct msm_bus_scale_pdata mdp_reg_bus_scale_table = {
+ .usecase = mdp_reg_bus_usecases,
+ .num_usecases = ARRAY_SIZE(mdp_reg_bus_usecases),
+ .name = "sde_reg",
+ .active_only = true,
+};
+
+static int sde_mdp_bus_scale_register(struct sde_rot_data_type *mdata)
+{
+ struct msm_bus_scale_pdata *reg_bus_pdata;
+ int i;
+
+ if (!mdata->reg_bus_hdl) {
+ reg_bus_pdata = &mdp_reg_bus_scale_table;
+ for (i = 0; i < reg_bus_pdata->num_usecases; i++) {
+ mdp_reg_bus_usecases[i].num_paths = 1;
+ mdp_reg_bus_usecases[i].vectors =
+ &mdp_reg_bus_vectors[i];
+ }
+
+ mdata->reg_bus_hdl =
+ msm_bus_scale_register_client(reg_bus_pdata);
+ if (!mdata->reg_bus_hdl) {
+ /* Continue without reg_bus scaling */
+ SDEROT_WARN("reg_bus_client register failed\n");
+ } else
+ SDEROT_DBG("register reg_bus_hdl=%x\n",
+ mdata->reg_bus_hdl);
+ }
+
+ return 0;
+}
+
+static void sde_mdp_bus_scale_unregister(struct sde_rot_data_type *mdata)
+{
+ SDEROT_DBG("unregister reg_bus_hdl=%x\n", mdata->reg_bus_hdl);
+
+ if (mdata->reg_bus_hdl) {
+ msm_bus_scale_unregister_client(mdata->reg_bus_hdl);
+ mdata->reg_bus_hdl = 0;
+ }
+}
+
+static struct sde_rot_data_type *sde_rot_res;
+
+struct sde_rot_data_type *sde_rot_get_mdata(void)
+{
+ return sde_rot_res;
+}
+
+/*
+ * sde_rotator_base_init - initialize base rotator data/resource
+ */
+int sde_rotator_base_init(struct sde_rot_data_type **pmdata,
+ struct platform_device *pdev,
+ const void *drvdata)
+{
+ int rc;
+ struct sde_rot_data_type *mdata;
+
+ mdata = devm_kzalloc(&pdev->dev, sizeof(*mdata), GFP_KERNEL);
+ if (mdata == NULL)
+ return -ENOMEM;
+
+ mdata->pdev = pdev;
+ sde_rot_res = mdata;
+ mutex_init(&mdata->reg_bus_lock);
+ INIT_LIST_HEAD(&mdata->reg_bus_clist);
+
+ rc = sde_rot_ioremap_byname(pdev, &mdata->sde_io, "mdp_phys");
+ if (rc) {
+ SDEROT_ERR("unable to map SDE base\n");
+ goto probe_done;
+ }
+ SDEROT_DBG("SDE ROT HW Base addr=0x%x len=0x%x\n",
+ (int) (unsigned long) mdata->sde_io.base,
+ mdata->sde_io.len);
+
+ rc = sde_rot_ioremap_byname(pdev, &mdata->vbif_nrt_io, "rot_vbif_phys");
+ if (rc) {
+ SDEROT_ERR("unable to map SDE ROT VBIF base\n");
+ goto probe_done;
+ }
+ SDEROT_DBG("SDE ROT VBIF HW Base addr=%p len=0x%x\n",
+ mdata->vbif_nrt_io.base, mdata->vbif_nrt_io.len);
+
+ rc = sde_mdp_parse_dt_misc(pdev, mdata);
+ if (rc) {
+ SDEROT_ERR("Error in device tree : misc\n");
+ goto probe_done;
+ }
+
+ rc = sde_mdp_bus_scale_register(mdata);
+ if (rc) {
+ SDEROT_ERR("unable to register bus scaling\n");
+ goto probe_done;
+ }
+
+ rc = sde_smmu_init(&pdev->dev);
+ if (rc) {
+ SDEROT_ERR("sde smmu init failed %d\n", rc);
+ goto probe_done;
+ }
+
+ *pmdata = mdata;
+
+ return 0;
+probe_done:
+ return rc;
+}
+
+/*
+ * sde_rotator_base_destroy - clean up base rotator data/resource
+ */
+void sde_rotator_base_destroy(struct sde_rot_data_type *mdata)
+{
+ struct platform_device *pdev;
+
+ if (!mdata || !mdata->pdev)
+ return;
+
+ pdev = mdata->pdev;
+
+ sde_rot_res = NULL;
+ sde_mdp_bus_scale_unregister(mdata);
+ sde_rot_iounmap(&mdata->vbif_nrt_io);
+ sde_rot_iounmap(&mdata->sde_io);
+ devm_kfree(&pdev->dev, mdata);
+}
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
new file mode 100644
index 000000000000..af45f8ef0d17
--- /dev/null
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
@@ -0,0 +1,162 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __SDE_ROTATOR_BASE_H__
+#define __SDE_ROTATOR_BASE_H__
+
+#include <linux/types.h>
+#include <linux/file.h>
+#include <linux/kref.h>
+#include <linux/kernel.h>
+#include <linux/regulator/consumer.h>
+
+#include "sde_rotator_hwio.h"
+#include "sde_rotator_io_util.h"
+#include "sde_rotator_smmu.h"
+#include "sde_rotator_formats.h"
+
+struct sde_mult_factor {
+ uint32_t numer;
+ uint32_t denom;
+};
+
+struct sde_mdp_set_ot_params {
+ u32 xin_id;
+ u32 num;
+ u32 width;
+ u32 height;
+ bool is_rot;
+ bool is_wb;
+ bool is_yuv;
+ u32 reg_off_vbif_lim_conf;
+ u32 reg_off_mdp_clk_ctrl;
+ u32 bit_off_mdp_clk_ctrl;
+};
+
+enum sde_bus_vote_type {
+ VOTE_INDEX_DISABLE,
+ VOTE_INDEX_19_MHZ,
+ VOTE_INDEX_40_MHZ,
+ VOTE_INDEX_80_MHZ,
+ VOTE_INDEX_MAX,
+};
+
+#define MAX_CLIENT_NAME_LEN 64
+
+enum sde_qos_settings {
+ SDE_QOS_PER_PIPE_IB,
+ SDE_QOS_OVERHEAD_FACTOR,
+ SDE_QOS_CDP,
+ SDE_QOS_OTLIM,
+ SDE_QOS_PER_PIPE_LUT,
+ SDE_QOS_SIMPLIFIED_PREFILL,
+ SDE_QOS_VBLANK_PANIC_CTRL,
+ SDE_QOS_MAX,
+};
+
+enum sde_caps_settings {
+ SDE_CAPS_R1_WB,
+ SDE_CAPS_R3_WB,
+ SDE_CAPS_MAX,
+};
+
+enum sde_bus_clients {
+ SDE_ROT_RT,
+ SDE_ROT_NRT,
+ SDE_MAX_BUS_CLIENTS
+};
+
+struct reg_bus_client {
+ char name[MAX_CLIENT_NAME_LEN];
+ short usecase_ndx;
+ u32 id;
+ struct list_head list;
+};
+
+struct sde_smmu_client {
+ struct device *dev;
+ struct dma_iommu_mapping *mmu_mapping;
+ struct sde_module_power mp;
+ struct reg_bus_client *reg_bus_clt;
+ bool domain_attached;
+};
+
+struct sde_rot_data_type {
+ u32 mdss_version;
+
+ struct platform_device *pdev;
+ struct sde_io_data sde_io;
+ struct sde_io_data vbif_nrt_io;
+ char __iomem *mdp_base;
+
+ struct sde_smmu_client sde_smmu[SDE_IOMMU_MAX_DOMAIN];
+
+ /* bitmap to track qos applicable settings */
+ DECLARE_BITMAP(sde_qos_map, SDE_QOS_MAX);
+
+ /* bitmap to track capability settings */
+ DECLARE_BITMAP(sde_caps_map, SDE_CAPS_MAX);
+
+ u32 default_ot_rd_limit;
+ u32 default_ot_wr_limit;
+ u32 highest_bank_bit;
+ u32 rot_block_size;
+
+ /* register bus (AHB) */
+ u32 reg_bus_hdl;
+ u32 reg_bus_usecase_ndx;
+ struct list_head reg_bus_clist;
+ struct mutex reg_bus_lock;
+
+ u32 *vbif_rt_qos;
+ u32 *vbif_nrt_qos;
+ u32 npriority_lvl;
+
+ int iommu_attached;
+ int iommu_ref_cnt;
+};
+
+int sde_rotator_base_init(struct sde_rot_data_type **pmdata,
+ struct platform_device *pdev,
+ const void *drvdata);
+
+void sde_rotator_base_destroy(struct sde_rot_data_type *data);
+
+struct sde_rot_data_type *sde_rot_get_mdata(void);
+
+struct reg_bus_client *sde_reg_bus_vote_client_create(char *client_name);
+
+void sde_reg_bus_vote_client_destroy(struct reg_bus_client *client);
+
+int sde_update_reg_bus_vote(struct reg_bus_client *bus_client, u32 usecase_ndx);
+
+u32 sde_apply_comp_ratio_factor(u32 quota,
+ struct sde_mdp_format_params *fmt,
+ struct sde_mult_factor *factor);
+
+void sde_mdp_set_ot_limit(struct sde_mdp_set_ot_params *params);
+
+#define SDE_VBIF_WRITE(mdata, offset, value) \
+ (sde_reg_w(&mdata->vbif_nrt_io, offset, value, 0))
+#define SDE_VBIF_READ(mdata, offset) \
+ (sde_reg_r(&mdata->vbif_nrt_io, offset, 0))
+#define SDE_REG_WRITE(mdata, offset, value) \
+ sde_reg_w(&mdata->sde_io, offset, value, 0)
+#define SDE_REG_READ(mdata, offset) \
+ sde_reg_r(&mdata->sde_io, offset, 0)
+
+#define ATRACE_END(name) trace_rot_mark_write(current->tgid, name, 0)
+#define ATRACE_BEGIN(name) trace_rot_mark_write(current->tgid, name, 1)
+#define ATRACE_INT(name, value) \
+ trace_rot_trace_counter(current->tgid, name, value)
+
+#endif /* __SDE_ROTATOR_BASE__ */
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
new file mode 100644
index 000000000000..d854a25cd047
--- /dev/null
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
@@ -0,0 +1,2587 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/uaccess.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include <linux/regulator/consumer.h>
+#include <linux/dma-direction.h>
+
+#include "sde_rotator_base.h"
+#include "sde_rotator_core.h"
+#include "sde_rotator_dev.h"
+#include "sde_rotator_util.h"
+#include "sde_rotator_io_util.h"
+#include "sde_rotator_smmu.h"
+#include "sde_rotator_r1.h"
+#include "sde_rotator_trace.h"
+
+/* waiting for hw time out, 3 vsync for 30fps*/
+#define ROT_HW_ACQUIRE_TIMEOUT_IN_MS 100
+
+/* default pixel per clock ratio */
+#define ROT_PIXEL_PER_CLK_NUMERATOR 4
+#define ROT_PIXEL_PER_CLK_DENOMINATOR 1
+
+/*
+ * Max rotator hw blocks possible. Used for upper array limits instead of
+ * alloc and freeing small array
+ */
+#define ROT_MAX_HW_BLOCKS 2
+
+#define SDE_REG_BUS_VECTOR_ENTRY(ab_val, ib_val) \
+ { \
+ .src = MSM_BUS_MASTER_AMPSS_M0, \
+ .dst = MSM_BUS_SLAVE_DISPLAY_CFG, \
+ .ab = (ab_val), \
+ .ib = (ib_val), \
+ }
+
+#define BUS_VOTE_19_MHZ 153600000
+
+static struct msm_bus_vectors rot_reg_bus_vectors[] = {
+ SDE_REG_BUS_VECTOR_ENTRY(0, 0),
+ SDE_REG_BUS_VECTOR_ENTRY(0, BUS_VOTE_19_MHZ),
+};
+static struct msm_bus_paths rot_reg_bus_usecases[ARRAY_SIZE(
+ rot_reg_bus_vectors)];
+static struct msm_bus_scale_pdata rot_reg_bus_scale_table = {
+ .usecase = rot_reg_bus_usecases,
+ .num_usecases = ARRAY_SIZE(rot_reg_bus_usecases),
+ .name = "mdss_rot_reg",
+ .active_only = 1,
+};
+
+static int sde_rotator_bus_scale_set_quota(struct sde_rot_bus_data_type *bus,
+ u64 quota)
+{
+ int new_uc_idx;
+ int ret;
+
+ if (!bus) {
+ SDEROT_ERR("null parameter\n");
+ return -EINVAL;
+ }
+
+ if (bus->bus_hdl < 1) {
+ SDEROT_ERR("invalid bus handle %d\n", bus->bus_hdl);
+ return -EINVAL;
+ }
+
+ if (bus->curr_quota_val == quota) {
+ SDEROT_DBG("bw request already requested\n");
+ return 0;
+ }
+
+ if (!bus->bus_scale_pdata || !bus->bus_scale_pdata->num_usecases) {
+ SDEROT_ERR("invalid bus scale data\n");
+ return -EINVAL;
+ }
+
+ if (!quota) {
+ new_uc_idx = 0;
+ } else {
+ struct msm_bus_vectors *vect = NULL;
+ struct msm_bus_scale_pdata *bw_table =
+ bus->bus_scale_pdata;
+ u64 port_quota = quota;
+ u32 total_axi_port_cnt;
+ int i;
+
+ new_uc_idx = (bus->curr_bw_uc_idx %
+ (bw_table->num_usecases - 1)) + 1;
+
+ total_axi_port_cnt = bw_table->usecase[new_uc_idx].num_paths;
+ if (total_axi_port_cnt == 0) {
+ SDEROT_ERR("Number of bw paths is 0\n");
+ return -ENODEV;
+ }
+ do_div(port_quota, total_axi_port_cnt);
+
+ for (i = 0; i < total_axi_port_cnt; i++) {
+ vect = &bw_table->usecase[new_uc_idx].vectors[i];
+ vect->ab = port_quota;
+ vect->ib = 0;
+ }
+ }
+ bus->curr_bw_uc_idx = new_uc_idx;
+ bus->curr_quota_val = quota;
+
+ SDEROT_DBG("uc_idx=%d quota=%llu\n", new_uc_idx, quota);
+ ATRACE_BEGIN("msm_bus_scale_req_rot");
+ ret = msm_bus_scale_client_update_request(bus->bus_hdl,
+ new_uc_idx);
+ ATRACE_END("msm_bus_scale_req_rot");
+
+ return ret;
+}
+
+static int sde_rotator_enable_reg_bus(struct sde_rot_mgr *mgr, u64 quota)
+{
+ int ret = 0, changed = 0;
+ u32 usecase_ndx = 0;
+
+ if (!mgr || !mgr->reg_bus.bus_hdl)
+ return 0;
+
+ if (quota)
+ usecase_ndx = 1;
+
+ if (usecase_ndx != mgr->reg_bus.curr_bw_uc_idx) {
+ mgr->reg_bus.curr_bw_uc_idx = usecase_ndx;
+ changed++;
+ }
+
+ SDEROT_DBG("%s, changed=%d register bus %s\n", __func__, changed,
+ quota ? "Enable":"Disable");
+
+ if (changed) {
+ ATRACE_BEGIN("msm_bus_scale_req_rot_reg");
+ ret = msm_bus_scale_client_update_request(mgr->reg_bus.bus_hdl,
+ usecase_ndx);
+ ATRACE_END("msm_bus_scale_req_rot_reg");
+ }
+
+ return ret;
+}
+
+/*
+ * Clock rate of all open sessions working a particular hw block
+ * are added together to get the required rate for that hw block.
+ * The max of each hw block becomes the final clock rate voted for
+ */
+static unsigned long sde_rotator_clk_rate_calc(
+ struct sde_rot_mgr *mgr,
+ struct sde_rot_file_private *private)
+{
+ struct sde_rot_perf *perf;
+ unsigned long clk_rate[ROT_MAX_HW_BLOCKS] = {0};
+ unsigned long total_clk_rate = 0;
+ int i, wb_idx;
+
+ list_for_each_entry(perf, &private->perf_list, list) {
+ bool rate_accounted_for = false;
+ /*
+ * If there is one session that has two work items across
+ * different hw blocks rate is accounted for in both blocks.
+ */
+ for (i = 0; i < mgr->queue_count; i++) {
+ if (perf->work_distribution[i]) {
+ clk_rate[i] += perf->clk_rate;
+ rate_accounted_for = true;
+ }
+ }
+
+ /*
+ * Sessions that are open but not distributed on any hw block
+ * Still need to be accounted for. Rate is added to last known
+ * wb idx.
+ */
+ wb_idx = perf->last_wb_idx;
+ if ((!rate_accounted_for) && (wb_idx >= 0) &&
+ (wb_idx < mgr->queue_count))
+ clk_rate[wb_idx] += perf->clk_rate;
+ }
+
+ for (i = 0; i < mgr->queue_count; i++)
+ total_clk_rate = max(clk_rate[i], total_clk_rate);
+
+ SDEROT_DBG("Total clk rate calc=%lu\n", total_clk_rate);
+ return total_clk_rate;
+}
+
+static struct clk *sde_rotator_get_clk(struct sde_rot_mgr *mgr, u32 clk_idx)
+{
+ if (clk_idx >= mgr->num_rot_clk) {
+ SDEROT_ERR("Invalid clk index:%u", clk_idx);
+ return NULL;
+ }
+
+ return mgr->rot_clk[clk_idx].clk;
+}
+
+static void sde_rotator_set_clk_rate(struct sde_rot_mgr *mgr,
+ unsigned long rate, u32 clk_idx)
+{
+ unsigned long clk_rate;
+ struct clk *clk = sde_rotator_get_clk(mgr, clk_idx);
+ int ret;
+
+ if (clk) {
+ clk_rate = clk_round_rate(clk, rate);
+ if (IS_ERR_VALUE(clk_rate)) {
+ SDEROT_ERR("unable to round rate err=%ld\n", clk_rate);
+ } else if (clk_rate != clk_get_rate(clk)) {
+ ret = clk_set_rate(clk, clk_rate);
+ if (IS_ERR_VALUE(ret))
+ SDEROT_ERR("clk_set_rate failed, err:%d\n",
+ ret);
+ else
+ SDEROT_DBG("rotator clk rate=%lu\n", clk_rate);
+ }
+ } else {
+ SDEROT_ERR("rotator clk not setup properly\n");
+ }
+}
+
+/*
+ * Update clock according to all open files on rotator block.
+ */
+static int sde_rotator_update_clk(struct sde_rot_mgr *mgr)
+{
+ struct sde_rot_file_private *priv;
+ unsigned long clk_rate, total_clk_rate;
+
+ total_clk_rate = 0;
+ list_for_each_entry(priv, &mgr->file_list, list) {
+ clk_rate = sde_rotator_clk_rate_calc(mgr, priv);
+ total_clk_rate += clk_rate;
+ }
+
+ SDEROT_DBG("core_clk %lu\n", total_clk_rate);
+ ATRACE_INT("core_clk", total_clk_rate);
+ sde_rotator_set_clk_rate(mgr, total_clk_rate, mgr->core_clk_idx);
+
+ return 0;
+}
+
+static void sde_rotator_footswitch_ctrl(struct sde_rot_mgr *mgr, bool on)
+{
+ int ret;
+
+ if (WARN_ON(mgr->regulator_enable == on)) {
+ SDEROT_ERR("Regulators already in selected mode on=%d\n", on);
+ return;
+ }
+
+ SDEROT_DBG("%s: rotator regulators", on ? "Enable" : "Disable");
+ ret = sde_rot_enable_vreg(mgr->module_power.vreg_config,
+ mgr->module_power.num_vreg, on);
+ if (ret) {
+ SDEROT_WARN("Rotator regulator failed to %s\n",
+ on ? "enable" : "disable");
+ return;
+ }
+
+ mgr->regulator_enable = on;
+}
+
+static int sde_rotator_clk_ctrl(struct sde_rot_mgr *mgr, int enable)
+{
+ struct clk *clk;
+ int ret = 0;
+ int i, changed = 0;
+
+ if (enable) {
+ if (mgr->rot_enable_clk_cnt == 0)
+ changed++;
+ mgr->rot_enable_clk_cnt++;
+ } else {
+ if (mgr->rot_enable_clk_cnt) {
+ mgr->rot_enable_clk_cnt--;
+ if (mgr->rot_enable_clk_cnt == 0)
+ changed++;
+ } else {
+ SDEROT_ERR("Can not be turned off\n");
+ }
+ }
+
+ if (changed) {
+ SDEROT_DBG("Rotator clk %s\n", enable ? "enable" : "disable");
+ for (i = 0; i < mgr->num_rot_clk; i++) {
+ clk = mgr->rot_clk[i].clk;
+
+ if (!clk)
+ continue;
+
+ if (enable) {
+ ret = clk_prepare_enable(clk);
+ if (ret) {
+ SDEROT_ERR(
+ "enable failed clk_idx %d\n",
+ i);
+ goto error;
+ }
+ } else {
+ clk_disable_unprepare(clk);
+ }
+ }
+
+ if (enable) {
+ /* Active+Sleep */
+ msm_bus_scale_client_update_context(
+ mgr->data_bus.bus_hdl, false,
+ mgr->data_bus.curr_bw_uc_idx);
+ trace_rot_bw_ao_as_context(0);
+ } else {
+ /* Active Only */
+ msm_bus_scale_client_update_context(
+ mgr->data_bus.bus_hdl, true,
+ mgr->data_bus.curr_bw_uc_idx);
+ trace_rot_bw_ao_as_context(1);
+ }
+ }
+
+ return ret;
+error:
+ for (i--; i >= 0; i--)
+ clk_disable_unprepare(mgr->rot_clk[i].clk);
+ return ret;
+}
+
+/* sde_rotator_resource_ctrl - control state of power resource
+ * @mgr: Pointer to rotator manager
+ * @enable: 1 to enable; 0 to disable
+ *
+ * This function returns 1 if resource is already in the requested state,
+ * return 0 if the state is changed successfully, or negative error code
+ * if not successful.
+ */
+static int sde_rotator_resource_ctrl(struct sde_rot_mgr *mgr, int enable)
+{
+ int ret;
+
+ if (enable) {
+ mgr->res_ref_cnt++;
+ ret = pm_runtime_get_sync(&mgr->pdev->dev);
+ } else {
+ mgr->res_ref_cnt--;
+ ret = pm_runtime_put_sync(&mgr->pdev->dev);
+ }
+
+ SDEROT_DBG("%s: res_cnt=%d pm=%d enable=%d\n",
+ __func__, mgr->res_ref_cnt, ret, enable);
+ ATRACE_INT("res_cnt", mgr->res_ref_cnt);
+
+ return ret;
+}
+
+/* caller is expected to hold perf->work_dis_lock lock */
+static bool sde_rotator_is_work_pending(struct sde_rot_mgr *mgr,
+ struct sde_rot_perf *perf)
+{
+ int i;
+
+ for (i = 0; i < mgr->queue_count; i++) {
+ if (perf->work_distribution[i]) {
+ SDEROT_DBG("Work is still scheduled to complete\n");
+ return true;
+ }
+ }
+ return false;
+}
+
+static void sde_rotator_clear_fence(struct sde_rot_entry *entry)
+{
+ if (entry->input_fence) {
+ SDEROT_DBG("sys_fence_put i:%p\n", entry->input_fence);
+ sde_rotator_put_sync_fence(entry->input_fence);
+ entry->input_fence = NULL;
+ }
+
+ /* fence failed to copy to user space */
+ if (entry->output_fence) {
+ if (entry->fenceq && entry->fenceq->timeline)
+ sde_rotator_resync_timeline(entry->fenceq->timeline);
+
+ SDEROT_DBG("sys_fence_put o:%p\n", entry->output_fence);
+ sde_rotator_put_sync_fence(entry->output_fence);
+ entry->output_fence = NULL;
+ }
+}
+
+static int sde_rotator_signal_output(struct sde_rot_entry *entry)
+{
+ struct sde_rot_timeline *rot_timeline;
+
+ if (!entry->fenceq)
+ return -EINVAL;
+
+ rot_timeline = entry->fenceq->timeline;
+
+ if (entry->output_signaled) {
+ SDEROT_DBG("output already signaled\n");
+ return 0;
+ }
+
+ SDEROT_DBG("signal fence s:%d.%d\n", entry->item.session_id,
+ entry->item.sequence_id);
+
+ sde_rotator_inc_timeline(rot_timeline, 1);
+
+ entry->output_signaled = true;
+
+ return 0;
+}
+
+static int sde_rotator_import_buffer(struct sde_layer_buffer *buffer,
+ struct sde_mdp_data *data, u32 flags, struct device *dev, bool input)
+{
+ int i, ret = 0;
+ struct sde_fb_data planes[SDE_ROT_MAX_PLANES];
+ int dir = DMA_TO_DEVICE;
+
+ if (!input)
+ dir = DMA_FROM_DEVICE;
+
+ memset(planes, 0, sizeof(planes));
+
+ for (i = 0; i < buffer->plane_count; i++) {
+ planes[i].memory_id = buffer->planes[i].fd;
+ planes[i].offset = buffer->planes[i].offset;
+ planes[i].buffer = buffer->planes[i].buffer;
+ }
+
+ ret = sde_mdp_data_get_and_validate_size(data, planes,
+ buffer->plane_count, flags, dev, true, dir, buffer);
+
+ return ret;
+}
+
+static int sde_rotator_map_and_check_data(struct sde_rot_entry *entry)
+{
+ int ret;
+ struct sde_layer_buffer *input;
+ struct sde_layer_buffer *output;
+ struct sde_mdp_format_params *fmt;
+ struct sde_mdp_plane_sizes ps;
+ bool rotation;
+
+ input = &entry->item.input;
+ output = &entry->item.output;
+
+ rotation = (entry->item.flags & SDE_ROTATION_90) ? true : false;
+
+ ret = sde_smmu_ctrl(1);
+ if (IS_ERR_VALUE(ret))
+ return ret;
+
+ /* if error during map, the caller will release the data */
+ ret = sde_mdp_data_map(&entry->src_buf, true, DMA_TO_DEVICE);
+ if (ret) {
+ SDEROT_ERR("source buffer mapping failed ret:%d\n", ret);
+ goto end;
+ }
+
+ ret = sde_mdp_data_map(&entry->dst_buf, true, DMA_FROM_DEVICE);
+ if (ret) {
+ SDEROT_ERR("destination buffer mapping failed ret:%d\n", ret);
+ goto end;
+ }
+
+ fmt = sde_get_format_params(input->format);
+ if (!fmt) {
+ SDEROT_ERR("invalid input format:%d\n", input->format);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ ret = sde_mdp_get_plane_sizes(
+ fmt, input->width, input->height, &ps, 0, rotation);
+ if (ret) {
+ SDEROT_ERR("fail to get input plane size ret=%d\n", ret);
+ goto end;
+ }
+
+ ret = sde_mdp_data_check(&entry->src_buf, &ps, fmt);
+ if (ret) {
+ SDEROT_ERR("fail to check input data ret=%d\n", ret);
+ goto end;
+ }
+
+ fmt = sde_get_format_params(output->format);
+ if (!fmt) {
+ SDEROT_ERR("invalid output format:%d\n", output->format);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ ret = sde_mdp_get_plane_sizes(
+ fmt, output->width, output->height, &ps, 0, rotation);
+ if (ret) {
+ SDEROT_ERR("fail to get output plane size ret=%d\n", ret);
+ goto end;
+ }
+
+ ret = sde_mdp_data_check(&entry->dst_buf, &ps, fmt);
+ if (ret) {
+ SDEROT_ERR("fail to check output data ret=%d\n", ret);
+ goto end;
+ }
+
+end:
+ sde_smmu_ctrl(0);
+
+ return ret;
+}
+
+static struct sde_rot_perf *__sde_rotator_find_session(
+ struct sde_rot_file_private *private,
+ u32 session_id)
+{
+ struct sde_rot_perf *perf, *perf_next;
+ bool found = false;
+
+ list_for_each_entry_safe(perf, perf_next, &private->perf_list, list) {
+ if (perf->config.session_id == session_id) {
+ found = true;
+ break;
+ }
+ }
+ if (!found)
+ perf = NULL;
+ return perf;
+}
+
+static struct sde_rot_perf *sde_rotator_find_session(
+ struct sde_rot_file_private *private,
+ u32 session_id)
+{
+ struct sde_rot_perf *perf;
+
+ perf = __sde_rotator_find_session(private, session_id);
+ return perf;
+}
+
+static void sde_rotator_release_data(struct sde_rot_entry *entry)
+{
+ sde_mdp_data_free(&entry->src_buf, true, DMA_TO_DEVICE);
+ sde_mdp_data_free(&entry->dst_buf, true, DMA_FROM_DEVICE);
+}
+
+static int sde_rotator_import_data(struct sde_rot_mgr *mgr,
+ struct sde_rot_entry *entry)
+{
+ int ret;
+ struct sde_layer_buffer *input;
+ struct sde_layer_buffer *output;
+ u32 flag = 0;
+
+ input = &entry->item.input;
+ output = &entry->item.output;
+
+ if (entry->item.flags & SDE_ROTATION_SECURE)
+ flag = SDE_SECURE_OVERLAY_SESSION;
+
+ if (entry->item.flags & SDE_ROTATION_EXT_DMA_BUF)
+ flag |= SDE_ROT_EXT_DMA_BUF;
+
+ ret = sde_rotator_import_buffer(input, &entry->src_buf, flag,
+ &mgr->pdev->dev, true);
+ if (ret) {
+ SDEROT_ERR("fail to import input buffer ret=%d\n", ret);
+ return ret;
+ }
+
+ /*
+ * driver assumes output buffer is ready to be written
+ * immediately
+ */
+ ret = sde_rotator_import_buffer(output, &entry->dst_buf, flag,
+ &mgr->pdev->dev, false);
+ if (ret) {
+ SDEROT_ERR("fail to import output buffer ret=%d\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static struct sde_rot_hw_resource *sde_rotator_get_hw_resource(
+ struct sde_rot_queue *queue, struct sde_rot_entry *entry)
+{
+ struct sde_rot_hw_resource *hw = queue->hw;
+ struct sde_rot_mgr *mgr;
+ int ret;
+
+ if (!hw) {
+ SDEROT_ERR("no hw in the queue\n");
+ return NULL;
+ }
+
+ mgr = entry->private->mgr;
+
+ BUG_ON(atomic_read(&hw->num_active) > hw->max_active);
+ while (atomic_read(&hw->num_active) >= hw->max_active) {
+ sde_rot_mgr_unlock(entry->private->mgr);
+ ret = wait_event_timeout(hw->wait_queue,
+ (atomic_read(&hw->num_active) < hw->max_active),
+ msecs_to_jiffies(mgr->hwacquire_timeout));
+ sde_rot_mgr_lock(entry->private->mgr);
+ if (!ret) {
+ SDEROT_ERR(
+ "timeout waiting for hw resource, a:%d p:%d\n",
+ atomic_read(&hw->num_active),
+ hw->pending_count);
+ if (hw->workload)
+ SDEROT_ERR("possible faulty workload s:%d.%d\n",
+ hw->workload->item.session_id,
+ hw->workload->item.sequence_id);
+ return NULL;
+ }
+ }
+ atomic_inc(&hw->num_active);
+ SDEROT_DBG("active=%d pending=%d s:%d.%d\n",
+ atomic_read(&hw->num_active), hw->pending_count,
+ entry->item.session_id, entry->item.sequence_id);
+ hw->workload = entry;
+ return hw;
+}
+
+static void sde_rotator_put_hw_resource(struct sde_rot_queue *queue,
+ struct sde_rot_hw_resource *hw)
+{
+ struct sde_rot_entry *entry = hw->workload;
+
+ BUG_ON(atomic_read(&hw->num_active) < 1);
+ hw->workload = NULL;
+ if (!atomic_add_unless(&hw->num_active, -1, 0))
+ SDEROT_ERR("underflow active=%d pending=%d s:%d.%d\n",
+ atomic_read(&hw->num_active), hw->pending_count,
+ entry->item.session_id, entry->item.sequence_id);
+ wake_up(&hw->wait_queue);
+ SDEROT_DBG("active=%d pending=%d s:%d.%d\n",
+ atomic_read(&hw->num_active), hw->pending_count,
+ entry->item.session_id, entry->item.sequence_id);
+}
+
+/*
+ * caller will need to call sde_rotator_deinit_queue when
+ * the function returns error
+ */
+static int sde_rotator_init_queue(struct sde_rot_mgr *mgr)
+{
+ int i, size, ret = 0;
+ char name[32];
+
+ size = sizeof(struct sde_rot_queue) * mgr->queue_count;
+ mgr->commitq = devm_kzalloc(mgr->device, size, GFP_KERNEL);
+ if (!mgr->commitq)
+ return -ENOMEM;
+
+ for (i = 0; i < mgr->queue_count; i++) {
+ snprintf(name, sizeof(name), "rot_commitq_%d_%d",
+ mgr->device->id, i);
+ SDEROT_DBG("work queue name=%s\n", name);
+ mgr->commitq[i].rot_work_queue =
+ alloc_ordered_workqueue("%s",
+ WQ_MEM_RECLAIM | WQ_HIGHPRI, name);
+ if (!mgr->commitq[i].rot_work_queue) {
+ ret = -EPERM;
+ break;
+ }
+
+ /* timeline not used */
+ mgr->commitq[i].timeline = NULL;
+ }
+
+ size = sizeof(struct sde_rot_queue) * mgr->queue_count;
+ mgr->doneq = devm_kzalloc(mgr->device, size, GFP_KERNEL);
+ if (!mgr->doneq)
+ return -ENOMEM;
+
+ for (i = 0; i < mgr->queue_count; i++) {
+ snprintf(name, sizeof(name), "rot_doneq_%d_%d",
+ mgr->device->id, i);
+ SDEROT_DBG("work queue name=%s\n", name);
+ mgr->doneq[i].rot_work_queue = alloc_ordered_workqueue("%s",
+ WQ_MEM_RECLAIM | WQ_HIGHPRI, name);
+ if (!mgr->doneq[i].rot_work_queue) {
+ ret = -EPERM;
+ break;
+ }
+
+ /* timeline not used */
+ mgr->doneq[i].timeline = NULL;
+ }
+ return ret;
+}
+
+static void sde_rotator_deinit_queue(struct sde_rot_mgr *mgr)
+{
+ int i;
+
+ if (mgr->commitq) {
+ for (i = 0; i < mgr->queue_count; i++) {
+ if (mgr->commitq[i].rot_work_queue)
+ destroy_workqueue(
+ mgr->commitq[i].rot_work_queue);
+ }
+ devm_kfree(mgr->device, mgr->commitq);
+ mgr->commitq = NULL;
+ }
+ if (mgr->doneq) {
+ for (i = 0; i < mgr->queue_count; i++) {
+ if (mgr->doneq[i].rot_work_queue)
+ destroy_workqueue(
+ mgr->doneq[i].rot_work_queue);
+ }
+ devm_kfree(mgr->device, mgr->doneq);
+ mgr->doneq = NULL;
+ }
+ mgr->queue_count = 0;
+}
+
+/*
+ * sde_rotator_assign_queue() - Function assign rotation work onto hw
+ * @mgr: Rotator manager.
+ * @entry: Contains details on rotator work item being requested
+ * @private: Private struct used for access rot session performance struct
+ *
+ * This Function allocates hw required to complete rotation work item
+ * requested.
+ *
+ * Caller is responsible for calling cleanup function if error is returned
+ */
+static int sde_rotator_assign_queue(struct sde_rot_mgr *mgr,
+ struct sde_rot_entry *entry,
+ struct sde_rot_file_private *private)
+{
+ struct sde_rot_perf *perf;
+ struct sde_rot_queue *queue;
+ struct sde_rot_hw_resource *hw;
+ struct sde_rotation_item *item = &entry->item;
+ u32 wb_idx = item->wb_idx;
+ u32 pipe_idx = item->pipe_idx;
+ int ret = 0;
+
+ if (wb_idx >= mgr->queue_count) {
+ /* assign to the lowest priority queue */
+ wb_idx = mgr->queue_count - 1;
+ }
+
+ entry->doneq = &mgr->doneq[wb_idx];
+ entry->commitq = &mgr->commitq[wb_idx];
+ queue = mgr->commitq;
+
+ if (!queue->hw) {
+ hw = mgr->ops_hw_alloc(mgr, pipe_idx, wb_idx);
+ if (IS_ERR_OR_NULL(hw)) {
+ SDEROT_ERR("fail to allocate hw\n");
+ ret = PTR_ERR(hw);
+ } else {
+ queue->hw = hw;
+ }
+ }
+
+ if (queue->hw) {
+ entry->commitq = queue;
+ queue->hw->pending_count++;
+ }
+
+ perf = sde_rotator_find_session(private, item->session_id);
+ if (!perf) {
+ SDEROT_ERR(
+ "Could not find session based on rotation work item\n");
+ return -EINVAL;
+ }
+
+ entry->perf = perf;
+ perf->last_wb_idx = wb_idx;
+
+ return ret;
+}
+
+static void sde_rotator_unassign_queue(struct sde_rot_mgr *mgr,
+ struct sde_rot_entry *entry)
+{
+ struct sde_rot_queue *queue = entry->commitq;
+
+ if (!queue)
+ return;
+
+ entry->fenceq = NULL;
+ entry->commitq = NULL;
+ entry->doneq = NULL;
+
+ if (!queue->hw) {
+ SDEROT_ERR("entry assigned a queue with no hw\n");
+ return;
+ }
+
+ queue->hw->pending_count--;
+ if (queue->hw->pending_count == 0) {
+ mgr->ops_hw_free(mgr, queue->hw);
+ queue->hw = NULL;
+ }
+}
+
+void sde_rotator_queue_request(struct sde_rot_mgr *mgr,
+ struct sde_rot_file_private *private,
+ struct sde_rot_entry_container *req)
+{
+ struct sde_rot_entry *entry;
+ struct sde_rot_queue *queue;
+ u32 wb_idx;
+ int i;
+
+ if (!mgr || !private || !req) {
+ SDEROT_ERR("null parameters\n");
+ return;
+ }
+
+ for (i = 0; i < req->count; i++) {
+ entry = req->entries + i;
+ queue = entry->commitq;
+ wb_idx = queue->hw->wb_id;
+ entry->perf->work_distribution[wb_idx]++;
+ entry->work_assigned = true;
+ }
+
+ for (i = 0; i < req->count; i++) {
+ entry = req->entries + i;
+ queue = entry->commitq;
+ entry->output_fence = NULL;
+
+ if (entry->item.ts)
+ entry->item.ts[SDE_ROTATOR_TS_QUEUE] = ktime_get();
+ queue_work(queue->rot_work_queue, &entry->commit_work);
+ }
+}
+
+static u32 sde_rotator_calc_buf_bw(struct sde_mdp_format_params *fmt,
+ uint32_t width, uint32_t height, uint32_t frame_rate)
+{
+ u32 bw;
+
+ bw = width * height * frame_rate;
+ if (fmt->chroma_sample == SDE_MDP_CHROMA_420)
+ bw = (bw * 3) / 2;
+ else
+ bw *= fmt->bpp;
+ return bw;
+}
+
+static int sde_rotator_calc_perf(struct sde_rot_mgr *mgr,
+ struct sde_rot_perf *perf)
+{
+ struct sde_rotation_config *config = &perf->config;
+ u32 read_bw, write_bw;
+ struct sde_mdp_format_params *in_fmt, *out_fmt;
+
+ in_fmt = sde_get_format_params(config->input.format);
+ if (!in_fmt) {
+ SDEROT_ERR("invalid input format %d\n", config->input.format);
+ return -EINVAL;
+ }
+ out_fmt = sde_get_format_params(config->output.format);
+ if (!out_fmt) {
+ SDEROT_ERR("invalid output format %d\n", config->output.format);
+ return -EINVAL;
+ }
+
+ perf->clk_rate = config->input.width * config->input.height;
+ perf->clk_rate *= config->frame_rate;
+ /* rotator processes 4 pixels per clock */
+ perf->clk_rate = (perf->clk_rate * mgr->pixel_per_clk.denom) /
+ mgr->pixel_per_clk.numer;
+
+ read_bw = sde_rotator_calc_buf_bw(in_fmt, config->input.width,
+ config->input.height, config->frame_rate);
+
+ write_bw = sde_rotator_calc_buf_bw(out_fmt, config->output.width,
+ config->output.height, config->frame_rate);
+
+ read_bw = sde_apply_comp_ratio_factor(read_bw, in_fmt,
+ &config->input.comp_ratio);
+ write_bw = sde_apply_comp_ratio_factor(write_bw, out_fmt,
+ &config->output.comp_ratio);
+
+ perf->bw = read_bw + write_bw;
+ return 0;
+}
+
+static int sde_rotator_update_perf(struct sde_rot_mgr *mgr)
+{
+ struct sde_rot_file_private *priv;
+ struct sde_rot_perf *perf;
+ int not_in_suspend_mode;
+ u64 total_bw = 0;
+
+ not_in_suspend_mode = !atomic_read(&mgr->device_suspended);
+
+ if (not_in_suspend_mode) {
+ list_for_each_entry(priv, &mgr->file_list, list) {
+ list_for_each_entry(perf, &priv->perf_list, list) {
+ total_bw += perf->bw;
+ }
+ }
+ }
+
+ total_bw += mgr->pending_close_bw_vote;
+ sde_rotator_enable_reg_bus(mgr, total_bw);
+ ATRACE_INT("bus_quota", total_bw);
+ sde_rotator_bus_scale_set_quota(&mgr->data_bus, total_bw);
+
+ return 0;
+}
+
+static void sde_rotator_release_from_work_distribution(
+ struct sde_rot_mgr *mgr,
+ struct sde_rot_entry *entry)
+{
+ if (entry->work_assigned) {
+ bool free_perf = false;
+ u32 wb_idx = entry->commitq->hw->wb_id;
+
+ if (entry->perf->work_distribution[wb_idx])
+ entry->perf->work_distribution[wb_idx]--;
+
+ if (!entry->perf->work_distribution[wb_idx]
+ && list_empty(&entry->perf->list)) {
+ /* close session has offloaded perf free to us */
+ free_perf = true;
+ }
+
+ entry->work_assigned = false;
+ if (free_perf) {
+ if (mgr->pending_close_bw_vote < entry->perf->bw) {
+ SDEROT_ERR(
+ "close bw vote underflow %llu / %llu\n",
+ mgr->pending_close_bw_vote,
+ entry->perf->bw);
+ mgr->pending_close_bw_vote = 0;
+ } else {
+ mgr->pending_close_bw_vote -= entry->perf->bw;
+ }
+ devm_kfree(&mgr->pdev->dev,
+ entry->perf->work_distribution);
+ devm_kfree(&mgr->pdev->dev, entry->perf);
+ sde_rotator_update_perf(mgr);
+ sde_rotator_clk_ctrl(mgr, false);
+ sde_rotator_resource_ctrl(mgr, false);
+ entry->perf = NULL;
+ }
+ }
+}
+
+static void sde_rotator_release_entry(struct sde_rot_mgr *mgr,
+ struct sde_rot_entry *entry)
+{
+ sde_rotator_release_from_work_distribution(mgr, entry);
+ sde_rotator_clear_fence(entry);
+ sde_rotator_release_data(entry);
+ sde_rotator_unassign_queue(mgr, entry);
+}
+
+/*
+ * sde_rotator_commit_handler - Commit workqueue handler.
+ * @file: Pointer to work struct.
+ *
+ * This handler is responsible for commit the job to h/w.
+ * Once the job is committed, the job entry is added to the done queue.
+ *
+ * Note this asynchronous handler is protected by hal lock.
+ */
+static void sde_rotator_commit_handler(struct work_struct *work)
+{
+ struct sde_rot_entry *entry;
+ struct sde_rot_entry_container *request;
+ struct sde_rot_hw_resource *hw;
+ struct sde_rot_mgr *mgr;
+ int ret;
+
+ entry = container_of(work, struct sde_rot_entry, commit_work);
+ request = entry->request;
+
+ if (!request || !entry->private || !entry->private->mgr) {
+ SDEROT_ERR("fatal error, null request/context/device\n");
+ return;
+ }
+
+ mgr = entry->private->mgr;
+
+ SDEDEV_DBG(mgr->device,
+ "commit handler s:%d.%u src:(%d,%d,%d,%d) dst:(%d,%d,%d,%d) f:0x%x dnsc:%u/%u\n",
+ entry->item.session_id, entry->item.sequence_id,
+ entry->item.src_rect.x, entry->item.src_rect.y,
+ entry->item.src_rect.w, entry->item.src_rect.h,
+ entry->item.dst_rect.x, entry->item.dst_rect.y,
+ entry->item.dst_rect.w, entry->item.dst_rect.h,
+ entry->item.flags,
+ entry->dnsc_factor_w, entry->dnsc_factor_h);
+
+ sde_rot_mgr_lock(mgr);
+
+ hw = sde_rotator_get_hw_resource(entry->commitq, entry);
+ if (!hw) {
+ SDEROT_ERR("no hw for the queue\n");
+ goto get_hw_res_err;
+ }
+
+ if (entry->item.ts)
+ entry->item.ts[SDE_ROTATOR_TS_COMMIT] = ktime_get();
+
+ trace_rot_entry_commit(
+ entry->item.session_id, entry->item.sequence_id,
+ entry->item.wb_idx, entry->item.flags,
+ entry->item.input.format,
+ entry->item.input.width, entry->item.input.height,
+ entry->item.src_rect.x, entry->item.src_rect.y,
+ entry->item.src_rect.w, entry->item.src_rect.h,
+ entry->item.output.format,
+ entry->item.output.width, entry->item.output.height,
+ entry->item.dst_rect.x, entry->item.dst_rect.y,
+ entry->item.dst_rect.w, entry->item.dst_rect.h);
+
+ ret = sde_rotator_map_and_check_data(entry);
+ if (ret) {
+ SDEROT_ERR("fail to prepare input/output data %d\n", ret);
+ goto error;
+ }
+
+ ret = mgr->ops_config_hw(hw, entry);
+ if (ret) {
+ SDEROT_ERR("fail to configure hw resource %d\n", ret);
+ goto error;
+ }
+
+ ret = mgr->ops_kickoff_entry(hw, entry);
+ if (ret) {
+ SDEROT_ERR("fail to do kickoff %d\n", ret);
+ goto error;
+ }
+
+ if (entry->item.ts)
+ entry->item.ts[SDE_ROTATOR_TS_FLUSH] = ktime_get();
+
+ queue_work(entry->doneq->rot_work_queue, &entry->done_work);
+ sde_rot_mgr_unlock(mgr);
+ return;
+error:
+ sde_rotator_put_hw_resource(entry->commitq, hw);
+get_hw_res_err:
+ sde_rotator_signal_output(entry);
+ sde_rotator_release_entry(mgr, entry);
+ atomic_dec(&request->pending_count);
+ atomic_inc(&request->failed_count);
+ if (request->retireq && request->retire_work)
+ queue_work(request->retireq, request->retire_work);
+ sde_rot_mgr_unlock(mgr);
+}
+
+/*
+ * sde_rotator_done_handler - Done workqueue handler.
+ * @file: Pointer to work struct.
+ *
+ * This handler is responsible for waiting for h/w done event.
+ * Once the job is done, the output fence will be signaled and the job entry
+ * will be retired.
+ *
+ * Note this asynchronous handler is protected by hal lock.
+ */
+static void sde_rotator_done_handler(struct work_struct *work)
+{
+ struct sde_rot_entry *entry;
+ struct sde_rot_entry_container *request;
+ struct sde_rot_hw_resource *hw;
+ struct sde_rot_mgr *mgr;
+ int ret;
+
+ entry = container_of(work, struct sde_rot_entry, done_work);
+ request = entry->request;
+
+ if (!request || !entry->private || !entry->private->mgr) {
+ SDEROT_ERR("fatal error, null request/context/device\n");
+ return;
+ }
+
+ mgr = entry->private->mgr;
+ hw = entry->commitq->hw;
+
+ SDEDEV_DBG(mgr->device,
+ "done handler s:%d.%u src:(%d,%d,%d,%d) dst:(%d,%d,%d,%d) f:0x%x dsnc:%u/%u\n",
+ entry->item.session_id, entry->item.sequence_id,
+ entry->item.src_rect.x, entry->item.src_rect.y,
+ entry->item.src_rect.w, entry->item.src_rect.h,
+ entry->item.dst_rect.x, entry->item.dst_rect.y,
+ entry->item.dst_rect.w, entry->item.dst_rect.h,
+ entry->item.flags,
+ entry->dnsc_factor_w, entry->dnsc_factor_h);
+
+ ret = mgr->ops_wait_for_entry(hw, entry);
+ if (ret) {
+ SDEROT_ERR("fail to wait for completion %d\n", ret);
+ atomic_inc(&request->failed_count);
+ }
+
+ if (entry->item.ts)
+ entry->item.ts[SDE_ROTATOR_TS_DONE] = ktime_get();
+
+ trace_rot_entry_done(
+ entry->item.session_id, entry->item.sequence_id,
+ entry->item.wb_idx, entry->item.flags,
+ entry->item.input.format,
+ entry->item.input.width, entry->item.input.height,
+ entry->item.src_rect.x, entry->item.src_rect.y,
+ entry->item.src_rect.w, entry->item.src_rect.h,
+ entry->item.output.format,
+ entry->item.output.width, entry->item.output.height,
+ entry->item.dst_rect.x, entry->item.dst_rect.y,
+ entry->item.dst_rect.w, entry->item.dst_rect.h);
+
+ sde_rot_mgr_lock(mgr);
+ sde_rotator_put_hw_resource(entry->commitq, entry->commitq->hw);
+ sde_rotator_signal_output(entry);
+ sde_rotator_release_entry(mgr, entry);
+ atomic_dec(&request->pending_count);
+ if (request->retireq && request->retire_work)
+ queue_work(request->retireq, request->retire_work);
+ if (entry->item.ts)
+ entry->item.ts[SDE_ROTATOR_TS_RETIRE] = ktime_get();
+ sde_rot_mgr_unlock(mgr);
+}
+
+static bool sde_rotator_verify_format(struct sde_rot_mgr *mgr,
+ struct sde_mdp_format_params *in_fmt,
+ struct sde_mdp_format_params *out_fmt, bool rotation)
+{
+ u8 in_v_subsample, in_h_subsample;
+ u8 out_v_subsample, out_h_subsample;
+
+ if (!sde_mdp_is_wb_format(out_fmt)) {
+ SDEROT_DBG("Invalid output format\n");
+ return false;
+ }
+
+ if ((in_fmt->is_yuv != out_fmt->is_yuv) ||
+ (in_fmt->pixel_mode != out_fmt->pixel_mode)) {
+ SDEROT_DBG("Rotator does not support CSC\n");
+ return false;
+ }
+
+ /* Forcing same pixel depth */
+ if (memcmp(in_fmt->bits, out_fmt->bits, sizeof(in_fmt->bits))) {
+ /* Exception is that RGB can drop alpha or add X */
+ if (in_fmt->is_yuv || out_fmt->alpha_enable ||
+ (in_fmt->bits[C2_R_Cr] != out_fmt->bits[C2_R_Cr]) ||
+ (in_fmt->bits[C0_G_Y] != out_fmt->bits[C0_G_Y]) ||
+ (in_fmt->bits[C1_B_Cb] != out_fmt->bits[C1_B_Cb])) {
+ SDEROT_DBG("Bit format does not match\n");
+ return false;
+ }
+ }
+
+ /* Need to make sure that sub-sampling persists through rotation */
+ if (rotation) {
+ sde_mdp_get_v_h_subsample_rate(in_fmt->chroma_sample,
+ &in_v_subsample, &in_h_subsample);
+ sde_mdp_get_v_h_subsample_rate(out_fmt->chroma_sample,
+ &out_v_subsample, &out_h_subsample);
+
+ if ((in_v_subsample != out_h_subsample) ||
+ (in_h_subsample != out_v_subsample)) {
+ SDEROT_DBG("Rotation has invalid subsampling\n");
+ return false;
+ }
+ } else {
+ if (in_fmt->chroma_sample != out_fmt->chroma_sample) {
+ SDEROT_DBG("Format subsampling mismatch\n");
+ return false;
+ }
+ }
+
+ SDEROT_DBG("in_fmt=%0d, out_fmt=%d\n", in_fmt->format, out_fmt->format);
+ return true;
+}
+
+int sde_rotator_verify_config(struct sde_rot_mgr *mgr,
+ struct sde_rotation_config *config)
+{
+ struct sde_mdp_format_params *in_fmt, *out_fmt;
+ u8 in_v_subsample, in_h_subsample;
+ u8 out_v_subsample, out_h_subsample;
+ u32 input, output;
+ bool rotation;
+ int verify_input_only;
+
+ if (!mgr || !config) {
+ SDEROT_ERR("null parameters\n");
+ return -EINVAL;
+ }
+
+ input = config->input.format;
+ output = config->output.format;
+ rotation = (config->flags & SDE_ROTATION_90) ? true : false;
+ verify_input_only =
+ (config->flags & SDE_ROTATION_VERIFY_INPUT_ONLY) ? 1 : 0;
+
+ in_fmt = sde_get_format_params(input);
+ if (!in_fmt) {
+ SDEROT_DBG("Unrecognized input format:%u\n", input);
+ return -EINVAL;
+ }
+
+ out_fmt = sde_get_format_params(output);
+ if (!out_fmt) {
+ SDEROT_DBG("Unrecognized output format:%u\n", output);
+ return -EINVAL;
+ }
+
+ sde_mdp_get_v_h_subsample_rate(in_fmt->chroma_sample,
+ &in_v_subsample, &in_h_subsample);
+ sde_mdp_get_v_h_subsample_rate(out_fmt->chroma_sample,
+ &out_v_subsample, &out_h_subsample);
+
+ /* Dimension of image needs to be divisible by subsample rate */
+ if ((config->input.height % in_v_subsample) ||
+ (config->input.width % in_h_subsample)) {
+ SDEROT_DBG(
+ "In ROI, subsample mismatch, w=%d, h=%d, vss%d, hss%d\n",
+ config->input.width,
+ config->input.height,
+ in_v_subsample, in_h_subsample);
+ return -EINVAL;
+ }
+
+ if ((config->output.height % out_v_subsample) ||
+ (config->output.width % out_h_subsample)) {
+ SDEROT_DBG(
+ "Out ROI, subsample mismatch, w=%d, h=%d, vss%d, hss%d\n",
+ config->output.width,
+ config->output.height,
+ out_v_subsample, out_h_subsample);
+ if (!verify_input_only)
+ return -EINVAL;
+ }
+
+ if (!sde_rotator_verify_format(mgr, in_fmt,
+ out_fmt, rotation)) {
+ SDEROT_DBG(
+ "Rot format pairing invalid, in_fmt:%d, out_fmt:%d\n",
+ input, output);
+ if (!verify_input_only)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int sde_rotator_validate_item_matches_session(
+ struct sde_rotation_config *config, struct sde_rotation_item *item)
+{
+ int ret;
+
+ ret = __compare_session_item_rect(&config->input,
+ &item->src_rect, item->input.format, true);
+ if (ret)
+ return ret;
+
+ ret = __compare_session_item_rect(&config->output,
+ &item->dst_rect, item->output.format, false);
+ if (ret)
+ return ret;
+
+ ret = __compare_session_rotations(config->flags, item->flags);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/* Only need to validate x and y offset for ubwc dst fmt */
+static int sde_rotator_validate_img_roi(struct sde_rotation_item *item)
+{
+ struct sde_mdp_format_params *fmt;
+ int ret = 0;
+
+ fmt = sde_get_format_params(item->output.format);
+ if (!fmt) {
+ SDEROT_DBG("invalid output format:%d\n",
+ item->output.format);
+ return -EINVAL;
+ }
+
+ if (sde_mdp_is_ubwc_format(fmt))
+ ret = sde_validate_offset_for_ubwc_format(fmt,
+ item->dst_rect.x, item->dst_rect.y);
+
+ return ret;
+}
+
+static int sde_rotator_validate_fmt_and_item_flags(
+ struct sde_rotation_config *config, struct sde_rotation_item *item)
+{
+ struct sde_mdp_format_params *fmt;
+
+ fmt = sde_get_format_params(item->input.format);
+ if ((item->flags & SDE_ROTATION_DEINTERLACE) &&
+ sde_mdp_is_ubwc_format(fmt)) {
+ SDEROT_DBG("cannot perform deinterlace on tiled formats\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int sde_rotator_validate_entry(struct sde_rot_mgr *mgr,
+ struct sde_rot_file_private *private,
+ struct sde_rot_entry *entry)
+{
+ int ret;
+ struct sde_rotation_item *item;
+ struct sde_rot_perf *perf;
+
+ item = &entry->item;
+
+ if (item->wb_idx >= mgr->queue_count)
+ item->wb_idx = mgr->queue_count - 1;
+
+ perf = sde_rotator_find_session(private, item->session_id);
+ if (!perf) {
+ SDEROT_DBG("Could not find session:%u\n", item->session_id);
+ return -EINVAL;
+ }
+
+ ret = sde_rotator_validate_item_matches_session(&perf->config, item);
+ if (ret) {
+ SDEROT_DBG("Work item does not match session:%u\n",
+ item->session_id);
+ return ret;
+ }
+
+ ret = sde_rotator_validate_img_roi(item);
+ if (ret) {
+ SDEROT_DBG("Image roi is invalid\n");
+ return ret;
+ }
+
+ ret = sde_rotator_validate_fmt_and_item_flags(&perf->config, item);
+ if (ret)
+ return ret;
+
+ ret = mgr->ops_hw_validate_entry(mgr, entry);
+ if (ret) {
+ SDEROT_DBG("fail to configure downscale factor\n");
+ return ret;
+ }
+ return ret;
+}
+
+/*
+ * Upon failure from the function, caller needs to make sure
+ * to call sde_rotator_remove_request to clean up resources.
+ */
+static int sde_rotator_add_request(struct sde_rot_mgr *mgr,
+ struct sde_rot_file_private *private,
+ struct sde_rot_entry_container *req)
+{
+ struct sde_rot_entry *entry;
+ struct sde_rotation_item *item;
+ int i, ret;
+
+ for (i = 0; i < req->count; i++) {
+ entry = req->entries + i;
+ item = &entry->item;
+ entry->fenceq = private->fenceq;
+
+ ret = sde_rotator_validate_entry(mgr, private, entry);
+ if (ret) {
+ SDEROT_ERR("fail to validate the entry\n");
+ return ret;
+ }
+
+ ret = sde_rotator_import_data(mgr, entry);
+ if (ret) {
+ SDEROT_ERR("fail to import the data\n");
+ return ret;
+ }
+
+ entry->input_fence = item->input.fence;
+ entry->output_fence = item->output.fence;
+
+ ret = sde_rotator_assign_queue(mgr, entry, private);
+ if (ret) {
+ SDEROT_ERR("fail to assign queue to entry\n");
+ return ret;
+ }
+
+ entry->request = req;
+
+ INIT_WORK(&entry->commit_work, sde_rotator_commit_handler);
+ INIT_WORK(&entry->done_work, sde_rotator_done_handler);
+ SDEROT_DBG("Entry added. wbidx=%u, src{%u,%u,%u,%u}f=%u\n"
+ "dst{%u,%u,%u,%u}f=%u session_id=%u\n", item->wb_idx,
+ item->src_rect.x, item->src_rect.y,
+ item->src_rect.w, item->src_rect.h, item->input.format,
+ item->dst_rect.x, item->dst_rect.y,
+ item->dst_rect.w, item->dst_rect.h, item->output.format,
+ item->session_id);
+ }
+
+ list_add(&req->list, &private->req_list);
+
+ return 0;
+}
+
+void sde_rotator_remove_request(struct sde_rot_mgr *mgr,
+ struct sde_rot_file_private *private,
+ struct sde_rot_entry_container *req)
+{
+ int i;
+
+ if (!mgr || !private || !req) {
+ SDEROT_ERR("null parameters\n");
+ return;
+ }
+
+ for (i = 0; i < req->count; i++)
+ sde_rotator_release_entry(mgr, req->entries + i);
+ list_del_init(&req->list);
+}
+
+/* This function should be called with req_lock */
+static void sde_rotator_cancel_request(struct sde_rot_mgr *mgr,
+ struct sde_rot_entry_container *req)
+{
+ struct sde_rot_entry *entry;
+ int i;
+
+ /*
+ * To avoid signal the rotation entry output fence in the wrong
+ * order, all the entries in the same request needs to be canceled
+ * first, before signaling the output fence.
+ */
+ SDEROT_DBG("cancel work start\n");
+ sde_rot_mgr_unlock(mgr);
+ for (i = req->count - 1; i >= 0; i--) {
+ entry = req->entries + i;
+ cancel_work_sync(&entry->commit_work);
+ cancel_work_sync(&entry->done_work);
+ }
+ sde_rot_mgr_lock(mgr);
+ SDEROT_DBG("cancel work done\n");
+ for (i = req->count - 1; i >= 0; i--) {
+ entry = req->entries + i;
+ sde_rotator_signal_output(entry);
+ sde_rotator_release_entry(mgr, entry);
+ }
+
+ list_del_init(&req->list);
+ devm_kfree(&mgr->pdev->dev, req);
+}
+
+static void sde_rotator_cancel_all_requests(struct sde_rot_mgr *mgr,
+ struct sde_rot_file_private *private)
+{
+ struct sde_rot_entry_container *req, *req_next;
+
+ SDEROT_DBG("Canceling all rotator requests\n");
+
+ list_for_each_entry_safe(req, req_next, &private->req_list, list)
+ sde_rotator_cancel_request(mgr, req);
+}
+
+static void sde_rotator_free_completed_request(struct sde_rot_mgr *mgr,
+ struct sde_rot_file_private *private)
+{
+ struct sde_rot_entry_container *req, *req_next;
+
+ list_for_each_entry_safe(req, req_next, &private->req_list, list) {
+ if ((atomic_read(&req->pending_count) == 0) &&
+ (!req->retire_work && !req->retireq)) {
+ list_del_init(&req->list);
+ devm_kfree(&mgr->pdev->dev, req);
+ }
+ }
+}
+
+static void sde_rotator_release_rotator_perf_session(
+ struct sde_rot_mgr *mgr,
+ struct sde_rot_file_private *private)
+{
+ struct sde_rot_perf *perf, *perf_next;
+
+ SDEROT_DBG("Releasing all rotator request\n");
+ sde_rotator_cancel_all_requests(mgr, private);
+
+ list_for_each_entry_safe(perf, perf_next, &private->perf_list, list) {
+ list_del_init(&perf->list);
+ devm_kfree(&mgr->pdev->dev, perf->work_distribution);
+ devm_kfree(&mgr->pdev->dev, perf);
+ }
+}
+
+static void sde_rotator_release_all(struct sde_rot_mgr *mgr)
+{
+ struct sde_rot_file_private *priv, *priv_next;
+
+ list_for_each_entry_safe(priv, priv_next, &mgr->file_list, list) {
+ sde_rotator_release_rotator_perf_session(mgr, priv);
+ sde_rotator_resource_ctrl(mgr, false);
+ list_del_init(&priv->list);
+ }
+
+ sde_rotator_update_perf(mgr);
+}
+
+int sde_rotator_validate_request(struct sde_rot_mgr *mgr,
+ struct sde_rot_file_private *private,
+ struct sde_rot_entry_container *req)
+{
+ int i, ret = 0;
+ struct sde_rot_entry *entry;
+
+ if (!mgr || !private || !req) {
+ SDEROT_ERR("null parameters\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < req->count; i++) {
+ entry = req->entries + i;
+ ret = sde_rotator_validate_entry(mgr, private,
+ entry);
+ if (ret) {
+ SDEROT_DBG("invalid entry\n");
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static int sde_rotator_open_session(struct sde_rot_mgr *mgr,
+ struct sde_rot_file_private *private, u32 session_id)
+{
+ struct sde_rotation_config config;
+ struct sde_rot_perf *perf;
+ int ret;
+
+ if (!mgr || !private)
+ return -EINVAL;
+
+ memset(&config, 0, sizeof(struct sde_rotation_config));
+
+ /* initialize with default parameters */
+ config.frame_rate = 30;
+ config.input.comp_ratio.numer = 1;
+ config.input.comp_ratio.denom = 1;
+ config.input.format = SDE_PIX_FMT_Y_CBCR_H2V2;
+ config.input.width = 640;
+ config.input.height = 480;
+ config.output.comp_ratio.numer = 1;
+ config.output.comp_ratio.denom = 1;
+ config.output.format = SDE_PIX_FMT_Y_CBCR_H2V2;
+ config.output.width = 640;
+ config.output.height = 480;
+
+ perf = devm_kzalloc(&mgr->pdev->dev, sizeof(*perf), GFP_KERNEL);
+ if (!perf)
+ return -ENOMEM;
+
+ perf->work_distribution = devm_kzalloc(&mgr->pdev->dev,
+ sizeof(u32) * mgr->queue_count, GFP_KERNEL);
+ if (!perf->work_distribution) {
+ ret = -ENOMEM;
+ goto alloc_err;
+ }
+
+ config.session_id = session_id;
+ perf->config = config;
+ perf->last_wb_idx = -1;
+
+ INIT_LIST_HEAD(&perf->list);
+
+ ret = sde_rotator_calc_perf(mgr, perf);
+ if (ret) {
+ SDEROT_ERR("error setting the session %d\n", ret);
+ goto copy_user_err;
+ }
+
+ list_add(&perf->list, &private->perf_list);
+
+ ret = sde_rotator_resource_ctrl(mgr, true);
+ if (ret < 0) {
+ SDEROT_ERR("Failed to acquire rotator resources\n");
+ goto resource_err;
+ }
+
+ ret = sde_rotator_update_clk(mgr);
+ if (ret) {
+ SDEROT_ERR("failed to update clk %d\n", ret);
+ goto update_clk_err;
+ }
+
+ ret = sde_rotator_clk_ctrl(mgr, true);
+ if (ret) {
+ SDEROT_ERR("failed to enable clk %d\n", ret);
+ goto enable_clk_err;
+ }
+
+ ret = sde_rotator_update_perf(mgr);
+ if (ret) {
+ SDEROT_ERR("fail to open session, not enough clk/bw\n");
+ goto perf_err;
+ }
+ SDEROT_DBG("open session id=%u in{%u,%u}f:%u out{%u,%u}f:%u\n",
+ config.session_id, config.input.width, config.input.height,
+ config.input.format, config.output.width, config.output.height,
+ config.output.format);
+
+ goto done;
+perf_err:
+ sde_rotator_clk_ctrl(mgr, false);
+enable_clk_err:
+update_clk_err:
+ sde_rotator_resource_ctrl(mgr, false);
+resource_err:
+ list_del_init(&perf->list);
+copy_user_err:
+ devm_kfree(&mgr->pdev->dev, perf->work_distribution);
+alloc_err:
+ devm_kfree(&mgr->pdev->dev, perf);
+done:
+ return ret;
+}
+
+static int sde_rotator_close_session(struct sde_rot_mgr *mgr,
+ struct sde_rot_file_private *private, u32 session_id)
+{
+ struct sde_rot_perf *perf;
+ bool offload_release_work = false;
+ u32 id;
+
+ id = (u32)session_id;
+ perf = __sde_rotator_find_session(private, id);
+ if (!perf) {
+ SDEROT_ERR("Trying to close session that does not exist\n");
+ return -EINVAL;
+ }
+
+ if (sde_rotator_is_work_pending(mgr, perf)) {
+ SDEROT_DBG("Work is still pending, offload free to wq\n");
+ mgr->pending_close_bw_vote += perf->bw;
+ offload_release_work = true;
+ }
+ list_del_init(&perf->list);
+
+ if (offload_release_work)
+ goto done;
+
+ devm_kfree(&mgr->pdev->dev, perf->work_distribution);
+ devm_kfree(&mgr->pdev->dev, perf);
+ sde_rotator_update_perf(mgr);
+ sde_rotator_clk_ctrl(mgr, false);
+ sde_rotator_update_clk(mgr);
+ sde_rotator_resource_ctrl(mgr, false);
+done:
+ SDEROT_DBG("Closed session id:%u", id);
+ return 0;
+}
+
+static int sde_rotator_config_session(struct sde_rot_mgr *mgr,
+ struct sde_rot_file_private *private,
+ struct sde_rotation_config *config)
+{
+ int ret = 0;
+ struct sde_rot_perf *perf;
+
+ ret = sde_rotator_verify_config(mgr, config);
+ if (ret) {
+ SDEROT_ERR("Rotator verify format failed\n");
+ return ret;
+ }
+
+ perf = sde_rotator_find_session(private, config->session_id);
+ if (!perf) {
+ SDEROT_ERR("No session with id=%u could be found\n",
+ config->session_id);
+ return -EINVAL;
+ }
+
+ perf->config = *config;
+ ret = sde_rotator_calc_perf(mgr, perf);
+
+ if (ret) {
+ SDEROT_ERR("error in configuring the session %d\n", ret);
+ goto done;
+ }
+
+ ret = sde_rotator_update_perf(mgr);
+
+ SDEROT_DBG("reconfig session id=%u in{%u,%u}f:%u out{%u,%u}f:%u\n",
+ config->session_id, config->input.width, config->input.height,
+ config->input.format, config->output.width,
+ config->output.height, config->output.format);
+done:
+ return ret;
+}
+
+struct sde_rot_entry_container *sde_rotator_req_init(
+ struct sde_rot_mgr *mgr,
+ struct sde_rot_file_private *private,
+ struct sde_rotation_item *items,
+ u32 count, u32 flags)
+{
+ struct sde_rot_entry_container *req;
+ int size, i;
+
+ if (!mgr || !private || !items) {
+ SDEROT_ERR("null parameters\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ size = sizeof(struct sde_rot_entry_container);
+ size += sizeof(struct sde_rot_entry) * count;
+ req = devm_kzalloc(&mgr->pdev->dev, size, GFP_KERNEL);
+
+ if (!req)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&req->list);
+ req->count = count;
+ req->entries = (struct sde_rot_entry *)
+ ((void *)req + sizeof(struct sde_rot_entry_container));
+ req->flags = flags;
+ atomic_set(&req->pending_count, count);
+ atomic_set(&req->failed_count, 0);
+
+ for (i = 0; i < count; i++) {
+ req->entries[i].item = items[i];
+ req->entries[i].private = private;
+ }
+
+ return req;
+}
+
+int sde_rotator_handle_request_common(struct sde_rot_mgr *mgr,
+ struct sde_rot_file_private *private,
+ struct sde_rot_entry_container *req,
+ struct sde_rotation_item *items)
+{
+ int ret;
+
+ if (!mgr || !private || !req || !items) {
+ SDEROT_ERR("null parameters\n");
+ return -EINVAL;
+ }
+
+ sde_rotator_free_completed_request(mgr, private);
+
+ ret = sde_rotator_add_request(mgr, private, req);
+ if (ret) {
+ SDEROT_ERR("fail to add rotation request\n");
+ sde_rotator_remove_request(mgr, private, req);
+ return ret;
+ }
+ return ret;
+}
+
+static int sde_rotator_open(struct sde_rot_mgr *mgr,
+ struct sde_rot_file_private **pprivate)
+{
+ struct sde_rot_file_private *private;
+
+ if (!mgr || !pprivate)
+ return -ENODEV;
+
+ if (atomic_read(&mgr->device_suspended))
+ return -EPERM;
+
+ private = devm_kzalloc(&mgr->pdev->dev, sizeof(*private),
+ GFP_KERNEL);
+ if (!private)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&private->req_list);
+ INIT_LIST_HEAD(&private->perf_list);
+ INIT_LIST_HEAD(&private->list);
+
+ list_add(&private->list, &mgr->file_list);
+
+ *pprivate = private;
+
+ return 0;
+}
+
+static bool sde_rotator_file_priv_allowed(struct sde_rot_mgr *mgr,
+ struct sde_rot_file_private *priv)
+{
+ struct sde_rot_file_private *_priv, *_priv_next;
+ bool ret = false;
+
+ list_for_each_entry_safe(_priv, _priv_next, &mgr->file_list, list) {
+ if (_priv == priv) {
+ ret = true;
+ break;
+ }
+ }
+ return ret;
+}
+
+static int sde_rotator_close(struct sde_rot_mgr *mgr,
+ struct sde_rot_file_private *private)
+{
+ if (!mgr || !private)
+ return -ENODEV;
+
+ if (!(sde_rotator_file_priv_allowed(mgr, private))) {
+ SDEROT_ERR(
+ "Calling close with unrecognized rot_file_private\n");
+ return -EINVAL;
+ }
+
+ sde_rotator_release_rotator_perf_session(mgr, private);
+
+ list_del_init(&private->list);
+ devm_kfree(&mgr->pdev->dev, private);
+
+ sde_rotator_update_perf(mgr);
+ return 0;
+}
+
+static ssize_t sde_rotator_show_caps(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ size_t len = PAGE_SIZE;
+ int cnt = 0;
+ struct sde_rot_mgr *mgr = sde_rot_mgr_from_device(dev);
+
+ if (!mgr)
+ return cnt;
+
+#define SPRINT(fmt, ...) \
+ (cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__))
+
+ SPRINT("wb_count=%d\n", mgr->queue_count);
+ SPRINT("downscale=1\n");
+ SPRINT("ubwc=1\n");
+
+ if (mgr->ops_hw_show_caps)
+ cnt += mgr->ops_hw_show_caps(mgr, attr, buf + cnt, len - cnt);
+
+ return cnt;
+}
+
+static ssize_t sde_rotator_show_state(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ size_t len = PAGE_SIZE;
+ int cnt = 0;
+ struct sde_rot_mgr *mgr = sde_rot_mgr_from_device(dev);
+ int i;
+
+ if (!mgr)
+ return cnt;
+
+#define SPRINT(fmt, ...) \
+ (cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__))
+
+ SPRINT("reg_bus_bw=%llu\n", mgr->reg_bus.curr_quota_val);
+ SPRINT("data_bus_bw=%llu\n", mgr->data_bus.curr_quota_val);
+ SPRINT("pending_close_bw_vote=%llu\n", mgr->pending_close_bw_vote);
+ SPRINT("device_suspended=%d\n", atomic_read(&mgr->device_suspended));
+ SPRINT("footswitch_cnt=%d\n", mgr->res_ref_cnt);
+ SPRINT("regulator_enable=%d\n", mgr->regulator_enable);
+ SPRINT("enable_clk_cnt=%d\n", mgr->rot_enable_clk_cnt);
+ SPRINT("core_clk_idx=%d\n", mgr->core_clk_idx);
+ for (i = 0; i < mgr->num_rot_clk; i++)
+ if (mgr->rot_clk[i].clk)
+ SPRINT("%s=%lu\n", mgr->rot_clk[i].clk_name,
+ clk_get_rate(mgr->rot_clk[i].clk));
+
+ if (mgr->ops_hw_show_state)
+ cnt += mgr->ops_hw_show_state(mgr, attr, buf + cnt, len - cnt);
+
+ return cnt;
+}
+
+static DEVICE_ATTR(caps, S_IRUGO, sde_rotator_show_caps, NULL);
+static DEVICE_ATTR(state, S_IRUGO, sde_rotator_show_state, NULL);
+
+static struct attribute *sde_rotator_fs_attrs[] = {
+ &dev_attr_caps.attr,
+ &dev_attr_state.attr,
+ NULL
+};
+
+static struct attribute_group sde_rotator_fs_attr_group = {
+ .attrs = sde_rotator_fs_attrs
+};
+
+static int sde_rotator_parse_dt_bus(struct sde_rot_mgr *mgr,
+ struct platform_device *dev)
+{
+ int ret = 0, i;
+ int usecases;
+
+ mgr->data_bus.bus_scale_pdata = msm_bus_cl_get_pdata(dev);
+ if (IS_ERR_OR_NULL(mgr->data_bus.bus_scale_pdata)) {
+ ret = PTR_ERR(mgr->data_bus.bus_scale_pdata);
+ if (!ret) {
+ ret = -EINVAL;
+ SDEROT_ERR("msm_bus_cl_get_pdata failed. ret=%d\n",
+ ret);
+ mgr->data_bus.bus_scale_pdata = NULL;
+ }
+ }
+
+ mgr->reg_bus.bus_scale_pdata = &rot_reg_bus_scale_table;
+ usecases = mgr->reg_bus.bus_scale_pdata->num_usecases;
+ for (i = 0; i < usecases; i++) {
+ rot_reg_bus_usecases[i].num_paths = 1;
+ rot_reg_bus_usecases[i].vectors =
+ &rot_reg_bus_vectors[i];
+ }
+
+ return ret;
+}
+
+static int sde_rotator_parse_dt(struct sde_rot_mgr *mgr,
+ struct platform_device *dev)
+{
+ int ret = 0;
+ u32 data;
+
+ ret = of_property_read_u32(dev->dev.of_node,
+ "qcom,mdss-wb-count", &data);
+ if (!ret) {
+ if (data > ROT_MAX_HW_BLOCKS) {
+ SDEROT_ERR(
+ "Err, num of wb block (%d) larger than sw max %d\n",
+ data, ROT_MAX_HW_BLOCKS);
+ return -EINVAL;
+ }
+
+ mgr->queue_count = data;
+ }
+
+ ret = sde_rotator_parse_dt_bus(mgr, dev);
+ if (ret)
+ SDEROT_ERR("Failed to parse bus data\n");
+
+ return ret;
+}
+
+static void sde_rotator_put_dt_vreg_data(struct device *dev,
+ struct sde_module_power *mp)
+{
+ if (!mp) {
+ SDEROT_ERR("%s: invalid input\n", __func__);
+ return;
+ }
+
+ sde_rot_config_vreg(dev, mp->vreg_config, mp->num_vreg, 0);
+ if (mp->vreg_config) {
+ devm_kfree(dev, mp->vreg_config);
+ mp->vreg_config = NULL;
+ }
+ mp->num_vreg = 0;
+}
+
+static int sde_rotator_get_dt_vreg_data(struct device *dev,
+ struct sde_module_power *mp)
+{
+ const char *st = NULL;
+ struct device_node *of_node = NULL;
+ int dt_vreg_total = 0;
+ int i;
+ int rc;
+
+ if (!dev || !mp) {
+ SDEROT_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ of_node = dev->of_node;
+
+ dt_vreg_total = of_property_count_strings(of_node, "qcom,supply-names");
+ if (dt_vreg_total < 0) {
+ SDEROT_ERR("%s: vreg not found. rc=%d\n", __func__,
+ dt_vreg_total);
+ return 0;
+ }
+ mp->num_vreg = dt_vreg_total;
+ mp->vreg_config = devm_kzalloc(dev, sizeof(struct sde_vreg) *
+ dt_vreg_total, GFP_KERNEL);
+ if (!mp->vreg_config)
+ return -ENOMEM;
+
+ /* vreg-name */
+ for (i = 0; i < dt_vreg_total; i++) {
+ rc = of_property_read_string_index(of_node,
+ "qcom,supply-names", i, &st);
+ if (rc) {
+ SDEROT_ERR("%s: error reading name. i=%d, rc=%d\n",
+ __func__, i, rc);
+ goto error;
+ }
+ snprintf(mp->vreg_config[i].vreg_name, 32, "%s", st);
+ }
+ sde_rot_config_vreg(dev, mp->vreg_config, mp->num_vreg, 1);
+
+ for (i = 0; i < dt_vreg_total; i++) {
+ SDEROT_DBG("%s: %s min=%d, max=%d, enable=%d disable=%d\n",
+ __func__,
+ mp->vreg_config[i].vreg_name,
+ mp->vreg_config[i].min_voltage,
+ mp->vreg_config[i].max_voltage,
+ mp->vreg_config[i].enable_load,
+ mp->vreg_config[i].disable_load);
+ }
+ return rc;
+
+error:
+ if (mp->vreg_config) {
+ devm_kfree(dev, mp->vreg_config);
+ mp->vreg_config = NULL;
+ }
+ mp->num_vreg = 0;
+ return rc;
+}
+
+static void sde_rotator_bus_scale_unregister(struct sde_rot_mgr *mgr)
+{
+ SDEROT_DBG("unregister bus_hdl=%x, reg_bus_hdl=%x\n",
+ mgr->data_bus.bus_hdl, mgr->reg_bus.bus_hdl);
+
+ if (mgr->data_bus.bus_hdl)
+ msm_bus_scale_unregister_client(mgr->data_bus.bus_hdl);
+
+ if (mgr->reg_bus.bus_hdl)
+ msm_bus_scale_unregister_client(mgr->reg_bus.bus_hdl);
+}
+
+static int sde_rotator_bus_scale_register(struct sde_rot_mgr *mgr)
+{
+ if (!mgr->data_bus.bus_scale_pdata) {
+ SDEROT_ERR("Scale table is NULL\n");
+ return -EINVAL;
+ }
+
+ mgr->data_bus.bus_hdl =
+ msm_bus_scale_register_client(
+ mgr->data_bus.bus_scale_pdata);
+ if (!mgr->data_bus.bus_hdl) {
+ SDEROT_ERR("bus_client register failed\n");
+ return -EINVAL;
+ }
+ SDEROT_DBG("registered bus_hdl=%x\n", mgr->data_bus.bus_hdl);
+
+ if (mgr->reg_bus.bus_scale_pdata) {
+ mgr->reg_bus.bus_hdl =
+ msm_bus_scale_register_client(
+ mgr->reg_bus.bus_scale_pdata);
+ if (!mgr->reg_bus.bus_hdl) {
+ SDEROT_ERR("register bus_client register failed\n");
+ sde_rotator_bus_scale_unregister(mgr);
+ } else {
+ SDEROT_DBG("registered register bus_hdl=%x\n",
+ mgr->reg_bus.bus_hdl);
+ }
+ }
+
+ return 0;
+}
+
+static int sde_rotator_parse_dt_clk(struct platform_device *pdev,
+ struct sde_rot_mgr *mgr)
+{
+ u32 i = 0, rc = 0;
+ const char *clock_name;
+ int num_clk;
+
+ num_clk = of_property_count_strings(pdev->dev.of_node,
+ "clock-names");
+ if (num_clk <= 0) {
+ SDEROT_ERR("clocks are not defined\n");
+ goto clk_err;
+ }
+
+ mgr->num_rot_clk = num_clk;
+ mgr->rot_clk = devm_kzalloc(&pdev->dev,
+ sizeof(struct sde_rot_clk) * mgr->num_rot_clk,
+ GFP_KERNEL);
+ if (!mgr->rot_clk) {
+ rc = -ENOMEM;
+ mgr->num_rot_clk = 0;
+ goto clk_err;
+ }
+
+ for (i = 0; i < mgr->num_rot_clk; i++) {
+ u32 clock_rate = 0;
+
+ of_property_read_string_index(pdev->dev.of_node, "clock-names",
+ i, &clock_name);
+ strlcpy(mgr->rot_clk[i].clk_name, clock_name,
+ sizeof(mgr->rot_clk[i].clk_name));
+
+ of_property_read_u32_index(pdev->dev.of_node, "clock-rate",
+ i, &clock_rate);
+ mgr->rot_clk[i].rate = clock_rate;
+ }
+
+clk_err:
+ return rc;
+}
+
+static int sde_rotator_register_clk(struct platform_device *pdev,
+ struct sde_rot_mgr *mgr)
+{
+ int i, ret;
+ struct clk *clk;
+ struct sde_rot_clk *rot_clk;
+ int core_clk_idx = -1;
+
+ ret = sde_rotator_parse_dt_clk(pdev, mgr);
+ if (ret) {
+ SDEROT_ERR("unable to parse clocks\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < mgr->num_rot_clk; i++) {
+ rot_clk = &mgr->rot_clk[i];
+
+ clk = devm_clk_get(&pdev->dev, rot_clk->clk_name);
+ if (IS_ERR(clk)) {
+ SDEROT_ERR("unable to get clk: %s\n",
+ rot_clk->clk_name);
+ return PTR_ERR(clk);
+ }
+ rot_clk->clk = clk;
+
+ if (strcmp(rot_clk->clk_name, "rot_core_clk") == 0)
+ core_clk_idx = i;
+ }
+
+ if (core_clk_idx < 0) {
+ SDEROT_ERR("undefined core clk\n");
+ return -ENXIO;
+ }
+
+ mgr->core_clk_idx = core_clk_idx;
+
+ return 0;
+}
+
+static void sde_rotator_unregister_clk(struct sde_rot_mgr *mgr)
+{
+ kfree(mgr->rot_clk);
+ mgr->rot_clk = NULL;
+ mgr->num_rot_clk = 0;
+}
+
+static int sde_rotator_res_init(struct platform_device *pdev,
+ struct sde_rot_mgr *mgr)
+{
+ int ret;
+
+ ret = sde_rotator_get_dt_vreg_data(&pdev->dev, &mgr->module_power);
+ if (ret)
+ return ret;
+
+ ret = sde_rotator_register_clk(pdev, mgr);
+ if (ret)
+ goto error;
+
+ ret = sde_rotator_bus_scale_register(mgr);
+ if (ret)
+ goto error;
+
+ return 0;
+error:
+ sde_rotator_put_dt_vreg_data(&pdev->dev, &mgr->module_power);
+ return ret;
+}
+
+static void sde_rotator_res_destroy(struct sde_rot_mgr *mgr)
+{
+ struct platform_device *pdev = mgr->pdev;
+
+ sde_rotator_put_dt_vreg_data(&pdev->dev, &mgr->module_power);
+ sde_rotator_unregister_clk(mgr);
+ sde_rotator_bus_scale_unregister(mgr);
+}
+
+int sde_rotator_core_init(struct sde_rot_mgr **pmgr,
+ struct platform_device *pdev)
+{
+ struct sde_rot_data_type *mdata = sde_rot_get_mdata();
+ struct sde_rot_mgr *mgr;
+ int ret;
+
+ if (!pmgr || !pdev) {
+ SDEROT_ERR("null parameters\n");
+ return -EINVAL;
+ }
+
+ mgr = devm_kzalloc(&pdev->dev, sizeof(struct sde_rot_mgr),
+ GFP_KERNEL);
+ if (!mgr)
+ return -ENOMEM;
+
+ mgr->pdev = pdev;
+ mgr->device = &pdev->dev;
+ mgr->pending_close_bw_vote = 0;
+ mgr->hwacquire_timeout = ROT_HW_ACQUIRE_TIMEOUT_IN_MS;
+ mgr->queue_count = 1;
+ mgr->pixel_per_clk.numer = ROT_PIXEL_PER_CLK_NUMERATOR;
+ mgr->pixel_per_clk.denom = ROT_PIXEL_PER_CLK_DENOMINATOR;
+
+ mutex_init(&mgr->lock);
+ atomic_set(&mgr->device_suspended, 0);
+ INIT_LIST_HEAD(&mgr->file_list);
+
+ ret = sysfs_create_group(&mgr->device->kobj,
+ &sde_rotator_fs_attr_group);
+ if (ret) {
+ SDEROT_ERR("unable to register rotator sysfs nodes\n");
+ goto error_create_sysfs;
+ }
+
+ ret = sde_rotator_parse_dt(mgr, pdev);
+ if (ret) {
+ SDEROT_ERR("fail to parse the dt\n");
+ goto error_parse_dt;
+ }
+
+ ret = sde_rotator_res_init(pdev, mgr);
+ if (ret) {
+ SDEROT_ERR("res_init failed %d\n", ret);
+ goto error_res_init;
+ }
+
+ *pmgr = mgr;
+
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ if (!pm_runtime_enabled(&pdev->dev)) {
+ SDEROT_ERR("fail to enable power, force on\n");
+ sde_rotator_footswitch_ctrl(mgr, true);
+ }
+
+ /* enable power and clock before h/w initialization/query */
+ sde_rotator_update_clk(mgr);
+ sde_rotator_resource_ctrl(mgr, true);
+ sde_rotator_clk_ctrl(mgr, true);
+
+ mdata->mdss_version = SDE_REG_READ(mdata, SDE_REG_HW_VERSION);
+ SDEROT_INFO("mdss revision %x\n", mdata->mdss_version);
+
+ if ((mdata->mdss_version & 0xFFFF0000) == 0x10070000) {
+ mgr->ops_hw_init = sde_rotator_r1_init;
+ } else {
+ SDEROT_ERR("unsupported sde version %x\n",
+ mdata->mdss_version);
+ goto error_map_hw_ops;
+ }
+
+ ret = mgr->ops_hw_init(mgr);
+ if (ret) {
+ SDEROT_ERR("hw init failed %d\n", ret);
+ goto error_hw_init;
+ }
+
+ ret = sde_rotator_init_queue(mgr);
+ if (ret) {
+ SDEROT_ERR("fail to init queue\n");
+ goto error_init_queue;
+ }
+
+ /* disable power and clock after h/w initialization/query */
+ sde_rotator_clk_ctrl(mgr, false);
+ sde_rotator_resource_ctrl(mgr, false);
+
+ return 0;
+
+error_init_queue:
+ mgr->ops_hw_destroy(mgr);
+error_hw_init:
+ pm_runtime_disable(mgr->device);
+ sde_rotator_res_destroy(mgr);
+error_res_init:
+error_parse_dt:
+ sysfs_remove_group(&mgr->device->kobj, &sde_rotator_fs_attr_group);
+error_create_sysfs:
+error_map_hw_ops:
+ devm_kfree(&pdev->dev, mgr);
+ *pmgr = NULL;
+ return ret;
+}
+
+void sde_rotator_core_destroy(struct sde_rot_mgr *mgr)
+{
+ struct device *dev;
+
+ if (!mgr) {
+ SDEROT_ERR("null parameters\n");
+ return;
+ }
+
+ dev = mgr->device;
+ sde_rotator_deinit_queue(mgr);
+ mgr->ops_hw_destroy(mgr);
+ sde_rotator_release_all(mgr);
+ pm_runtime_disable(mgr->device);
+ sde_rotator_res_destroy(mgr);
+ sysfs_remove_group(&mgr->device->kobj, &sde_rotator_fs_attr_group);
+ devm_kfree(dev, mgr);
+}
+
+static void sde_rotator_suspend_cancel_rot_work(struct sde_rot_mgr *mgr)
+{
+ struct sde_rot_file_private *priv, *priv_next;
+
+ list_for_each_entry_safe(priv, priv_next, &mgr->file_list, list) {
+ sde_rotator_cancel_all_requests(mgr, priv);
+ }
+}
+
+#if defined(CONFIG_PM_RUNTIME)
+/*
+ * sde_rotator_runtime_suspend - Turn off power upon runtime suspend event
+ * @dev: Pointer to device structure
+ */
+int sde_rotator_runtime_suspend(struct device *dev)
+{
+ struct sde_rot_mgr *mgr;
+
+ mgr = sde_rot_mgr_from_device(dev);
+
+ if (!mgr) {
+ SDEROT_ERR("null parameters\n");
+ return -ENODEV;
+ }
+
+ if (mgr->rot_enable_clk_cnt) {
+ SDEROT_ERR("invalid runtime suspend request %d\n",
+ mgr->rot_enable_clk_cnt);
+ return -EBUSY;
+ }
+
+ sde_rotator_footswitch_ctrl(mgr, false);
+ ATRACE_END("runtime_active");
+ SDEROT_DBG("exit runtime_active\n");
+ return 0;
+}
+
+/*
+ * sde_rotator_runtime_resume - Turn on power upon runtime resume event
+ * @dev: Pointer to device structure
+ */
+int sde_rotator_runtime_resume(struct device *dev)
+{
+ struct sde_rot_mgr *mgr;
+
+ mgr = sde_rot_mgr_from_device(dev);
+
+ if (!mgr) {
+ SDEROT_ERR("null parameters\n");
+ return -ENODEV;
+ }
+
+ SDEROT_DBG("begin runtime_active\n");
+ ATRACE_BEGIN("runtime_active");
+ sde_rotator_footswitch_ctrl(mgr, true);
+ return 0;
+}
+
+/*
+ * sde_rotator_runtime_idle - check if device is idling
+ * @dev: Pointer to device structure
+ */
+int sde_rotator_runtime_idle(struct device *dev)
+{
+ struct sde_rot_mgr *mgr;
+
+ mgr = sde_rot_mgr_from_device(dev);
+
+ if (!mgr) {
+ SDEROT_ERR("null parameters\n");
+ return -ENODEV;
+ }
+
+ /* add check for any busy status, if any */
+ SDEROT_DBG("idling ...\n");
+ return 0;
+}
+
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+/*
+ * sde_rotator_pm_suspend - put the device in pm suspend state by cancelling
+ * all active requests
+ * @dev: Pointer to device structure
+ */
+int sde_rotator_pm_suspend(struct device *dev)
+{
+ struct sde_rot_mgr *mgr;
+
+ mgr = sde_rot_mgr_from_device(dev);
+
+ if (!mgr) {
+ SDEROT_ERR("null parameters\n");
+ return -ENODEV;
+ }
+
+
+ sde_rot_mgr_lock(mgr);
+ atomic_inc(&mgr->device_suspended);
+ sde_rotator_suspend_cancel_rot_work(mgr);
+ sde_rotator_update_perf(mgr);
+ ATRACE_END("pm_active");
+ SDEROT_DBG("end pm active %d\n", atomic_read(&mgr->device_suspended));
+ sde_rot_mgr_unlock(mgr);
+ return 0;
+}
+
+/*
+ * sde_rotator_pm_resume - put the device in pm active state
+ * @dev: Pointer to device structure
+ */
+int sde_rotator_pm_resume(struct device *dev)
+{
+ struct sde_rot_mgr *mgr;
+
+ mgr = sde_rot_mgr_from_device(dev);
+
+ if (!mgr) {
+ SDEROT_ERR("null parameters\n");
+ return -ENODEV;
+ }
+
+ /*
+ * It is possible that the runtime status of the device may
+ * have been active when the system was suspended. Reset the runtime
+ * status to suspended state after a complete system resume.
+ */
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_enable(dev);
+
+ sde_rot_mgr_lock(mgr);
+ SDEROT_DBG("begin pm active %d\n", atomic_read(&mgr->device_suspended));
+ ATRACE_BEGIN("pm_active");
+ atomic_dec(&mgr->device_suspended);
+ sde_rotator_update_perf(mgr);
+ sde_rot_mgr_unlock(mgr);
+ return 0;
+}
+#endif
+
+#if defined(CONFIG_PM) && !defined(CONFIG_PM_SLEEP)
+int sde_rotator_suspend(struct platform_device *dev, pm_message_t state)
+{
+ struct sde_rot_mgr *mgr;
+
+ mgr = sde_rot_mgr_from_pdevice(dev);
+
+ if (!mgr) {
+ SDEROT_ERR("null_parameters\n");
+ return -ENODEV;
+ }
+
+ sde_rot_mgr_lock(mgr);
+ atomic_inc(&mgr->device_suspended);
+ sde_rotator_suspend_cancel_rot_work(mgr);
+ sde_rotator_update_perf(mgr);
+ sde_rot_mgr_unlock(mgr);
+ return 0;
+}
+
+int sde_rotator_resume(struct platform_device *dev)
+{
+ struct sde_rot_mgr *mgr;
+
+ mgr = sde_rot_mgr_from_pdevice(dev);
+
+ if (!mgr) {
+ SDEROT_ERR("null parameters\n");
+ return -ENODEV;
+ }
+
+ sde_rot_mgr_lock(mgr);
+ atomic_dec(&mgr->device_suspended);
+ sde_rotator_update_perf(mgr);
+ sde_rot_mgr_unlock(mgr);
+ return 0;
+}
+#endif
+
+/*
+ * sde_rotator_session_open - external wrapper for open function
+ *
+ * Note each file open (sde_rot_file_private) is mapped to one session only.
+ */
+int sde_rotator_session_open(struct sde_rot_mgr *mgr,
+ struct sde_rot_file_private **pprivate, int session_id,
+ struct sde_rot_queue *queue)
+{
+ int ret;
+ struct sde_rot_file_private *private;
+
+ if (!mgr || !pprivate || !queue) {
+ SDEROT_ERR("null parameters\n");
+ return -EINVAL;
+ }
+
+ ret = sde_rotator_open(mgr, &private);
+ if (ret)
+ goto error_open;
+
+ private->mgr = mgr;
+ private->fenceq = queue;
+
+ ret = sde_rotator_open_session(mgr, private, session_id);
+ if (ret)
+ goto error_open_session;
+
+ *pprivate = private;
+
+ return 0;
+error_open_session:
+ sde_rotator_close(mgr, private);
+error_open:
+ return ret;
+}
+
+/*
+ * sde_rotator_session_close - external wrapper for close function
+ */
+void sde_rotator_session_close(struct sde_rot_mgr *mgr,
+ struct sde_rot_file_private *private, int session_id)
+{
+ if (!mgr || !private) {
+ SDEROT_ERR("null parameters\n");
+ return;
+ }
+
+ sde_rotator_close_session(mgr, private, session_id);
+ sde_rotator_close(mgr, private);
+
+ SDEROT_DBG("session closed s:%d\n", session_id);
+}
+
+/*
+ * sde_rotator_session_config - external wrapper for config function
+ */
+int sde_rotator_session_config(struct sde_rot_mgr *mgr,
+ struct sde_rot_file_private *private,
+ struct sde_rotation_config *config)
+{
+ if (!mgr || !private || !config) {
+ SDEROT_ERR("null parameters\n");
+ return -EINVAL;
+ }
+
+ return sde_rotator_config_session(mgr, private, config);
+}
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h
new file mode 100644
index 000000000000..0693c09e189f
--- /dev/null
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h
@@ -0,0 +1,411 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef SDE_ROTATOR_CORE_H
+#define SDE_ROTATOR_CORE_H
+
+#include <linux/list.h>
+#include <linux/file.h>
+#include <linux/ktime.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/cdev.h>
+#include <linux/pm_runtime.h>
+
+#include "sde_rotator_base.h"
+#include "sde_rotator_util.h"
+#include "sde_rotator_sync.h"
+
+/**********************************************************************
+Rotation request flag
+**********************************************************************/
+/* no rotation flag */
+#define SDE_ROTATION_NOP 0x01
+
+/* left/right flip */
+#define SDE_ROTATION_FLIP_LR 0x02
+
+/* up/down flip */
+#define SDE_ROTATION_FLIP_UD 0x04
+
+/* rotate 90 degree */
+#define SDE_ROTATION_90 0x08
+
+/* rotate 180 degre */
+#define SDE_ROTATION_180 (SDE_ROTATION_FLIP_LR | SDE_ROTATION_FLIP_UD)
+
+/* rotate 270 degree */
+#define SDE_ROTATION_270 (SDE_ROTATION_90 | SDE_ROTATION_180)
+
+/* format is interlaced */
+#define SDE_ROTATION_DEINTERLACE 0x10
+
+/* secure data */
+#define SDE_ROTATION_SECURE 0x80
+
+/* verify input configuration only */
+#define SDE_ROTATION_VERIFY_INPUT_ONLY 0x10000
+
+/* use client provided dma buf instead of ion fd */
+#define SDE_ROTATION_EXT_DMA_BUF 0x20000
+
+/**********************************************************************
+configuration structures
+**********************************************************************/
+
+struct sde_rotation_buf_info {
+ uint32_t width;
+ uint32_t height;
+ uint32_t format;
+ struct sde_mult_factor comp_ratio;
+};
+
+struct sde_rotation_config {
+ uint32_t session_id;
+ struct sde_rotation_buf_info input;
+ struct sde_rotation_buf_info output;
+ uint32_t frame_rate;
+ uint32_t flags;
+};
+
+enum sde_rotator_ts {
+ SDE_ROTATOR_TS_SRCQB, /* enqueue source buffer */
+ SDE_ROTATOR_TS_DSTQB, /* enqueue destination buffer */
+ SDE_ROTATOR_TS_FENCE, /* wait for source buffer fence */
+ SDE_ROTATOR_TS_QUEUE, /* wait for h/w resource */
+ SDE_ROTATOR_TS_COMMIT, /* prepare h/w command */
+ SDE_ROTATOR_TS_FLUSH, /* initiate h/w processing */
+ SDE_ROTATOR_TS_DONE, /* receive h/w completion */
+ SDE_ROTATOR_TS_RETIRE, /* signal destination buffer fence */
+ SDE_ROTATOR_TS_SRCDQB, /* dequeue source buffer */
+ SDE_ROTATOR_TS_DSTDQB, /* dequeue destination buffer */
+ SDE_ROTATOR_TS_MAX
+};
+
+struct sde_rotation_item {
+ /* rotation request flag */
+ uint32_t flags;
+
+ /* Source crop rectangle */
+ struct sde_rect src_rect;
+
+ /* Destination rectangle */
+ struct sde_rect dst_rect;
+
+ /* Input buffer for the request */
+ struct sde_layer_buffer input;
+
+ /* The output buffer for the request */
+ struct sde_layer_buffer output;
+
+ /*
+ * DMA pipe selection for this request by client:
+ * 0: DMA pipe 0
+ * 1: DMA pipe 1
+ * or SDE_ROTATION_HW_ANY if client wants
+ * driver to allocate any that is available
+ *
+ * OR
+ *
+ * Reserved
+ */
+ uint32_t pipe_idx;
+
+ /*
+ * Write-back block selection for this request by client:
+ * 0: Write-back block 0
+ * 1: Write-back block 1
+ * or SDE_ROTATION_HW_ANY if client wants
+ * driver to allocate any that is available
+ *
+ * OR
+ *
+ * Priority selection for this request by client:
+ * 0: Highest
+ * 1..n: Limited by the lowest available priority
+ */
+ uint32_t wb_idx;
+
+ /*
+ * Sequence ID of this request within the session
+ */
+ uint32_t sequence_id;
+
+ /* Which session ID is this request scheduled on */
+ uint32_t session_id;
+
+ /* Time stamp for profiling purposes */
+ ktime_t *ts;
+};
+
+/*
+ * Defining characteristics about rotation work, that has corresponding
+ * fmt and roi checks in open session
+ */
+#define SDE_ROT_DEFINING_FLAG_BITS SDE_ROTATION_90
+
+struct sde_rot_entry;
+struct sde_rot_perf;
+
+struct sde_rot_clk {
+ struct clk *clk;
+ char clk_name[32];
+ unsigned long rate;
+};
+
+struct sde_rot_hw_resource {
+ u32 wb_id;
+ u32 pending_count;
+ atomic_t num_active;
+ int max_active;
+ wait_queue_head_t wait_queue;
+ struct sde_rot_entry *workload;
+};
+
+struct sde_rot_queue {
+ struct workqueue_struct *rot_work_queue;
+ struct sde_rot_timeline *timeline;
+ struct sde_rot_hw_resource *hw;
+};
+
+struct sde_rot_entry_container {
+ struct list_head list;
+ u32 flags;
+ u32 count;
+ atomic_t pending_count;
+ atomic_t failed_count;
+ struct workqueue_struct *retireq;
+ struct work_struct *retire_work;
+ struct sde_rot_entry *entries;
+};
+
+struct sde_rot_mgr;
+struct sde_rot_file_private;
+
+struct sde_rot_entry {
+ struct sde_rotation_item item;
+ struct work_struct commit_work;
+ struct work_struct done_work;
+ struct sde_rot_queue *commitq;
+ struct sde_rot_queue *fenceq;
+ struct sde_rot_queue *doneq;
+ struct sde_rot_entry_container *request;
+
+ struct sde_mdp_data src_buf;
+ struct sde_mdp_data dst_buf;
+
+ struct sde_rot_sync_fence *input_fence;
+
+ struct sde_rot_sync_fence *output_fence;
+ bool output_signaled;
+
+ u32 dnsc_factor_w;
+ u32 dnsc_factor_h;
+
+ struct sde_rot_perf *perf;
+ bool work_assigned; /* Used when cleaning up work_distribution */
+ struct sde_rot_file_private *private;
+};
+
+struct sde_rot_perf {
+ struct list_head list;
+ struct sde_rotation_config config;
+ unsigned long clk_rate;
+ u64 bw;
+ struct mutex work_dis_lock;
+ u32 *work_distribution;
+ int last_wb_idx; /* last known wb index, used when above count is 0 */
+};
+
+struct sde_rot_file_private {
+ struct list_head list;
+ struct list_head req_list;
+ struct list_head perf_list;
+ struct sde_rot_mgr *mgr;
+ struct sde_rot_queue *fenceq;
+};
+
+struct sde_rot_bus_data_type {
+ struct msm_bus_scale_pdata *bus_scale_pdata;
+ u32 bus_hdl;
+ u32 curr_bw_uc_idx;
+ u64 curr_quota_val;
+};
+
+struct sde_rot_mgr {
+ struct mutex lock;
+ atomic_t device_suspended;
+ struct platform_device *pdev;
+ struct device *device;
+
+ /*
+ * Managing rotation queues, depends on
+ * how many hw pipes available on the system
+ */
+ int queue_count;
+ struct sde_rot_queue *commitq;
+ struct sde_rot_queue *doneq;
+
+ /*
+ * managing all the open file sessions to bw calculations,
+ * and resource clean up during suspend
+ */
+ struct list_head file_list;
+
+ u64 pending_close_bw_vote;
+ struct sde_rot_bus_data_type data_bus;
+ struct sde_rot_bus_data_type reg_bus;
+
+ /* Module power is only used for regulator management */
+ struct sde_module_power module_power;
+ bool regulator_enable;
+
+ int res_ref_cnt;
+ int rot_enable_clk_cnt;
+ struct sde_rot_clk *rot_clk;
+ int num_rot_clk;
+ int core_clk_idx;
+
+ u32 hwacquire_timeout;
+ struct sde_mult_factor pixel_per_clk;
+
+ int (*ops_config_hw)(struct sde_rot_hw_resource *hw,
+ struct sde_rot_entry *entry);
+ int (*ops_kickoff_entry)(struct sde_rot_hw_resource *hw,
+ struct sde_rot_entry *entry);
+ int (*ops_wait_for_entry)(struct sde_rot_hw_resource *hw,
+ struct sde_rot_entry *entry);
+ struct sde_rot_hw_resource *(*ops_hw_alloc)(struct sde_rot_mgr *mgr,
+ u32 pipe_id, u32 wb_id);
+ void (*ops_hw_free)(struct sde_rot_mgr *mgr,
+ struct sde_rot_hw_resource *hw);
+ int (*ops_hw_init)(struct sde_rot_mgr *mgr);
+ void (*ops_hw_destroy)(struct sde_rot_mgr *mgr);
+ ssize_t (*ops_hw_show_caps)(struct sde_rot_mgr *mgr,
+ struct device_attribute *attr, char *buf, ssize_t len);
+ ssize_t (*ops_hw_show_state)(struct sde_rot_mgr *mgr,
+ struct device_attribute *attr, char *buf, ssize_t len);
+ int (*ops_hw_create_debugfs)(struct sde_rot_mgr *mgr,
+ struct dentry *debugfs_root);
+ int (*ops_hw_validate_entry)(struct sde_rot_mgr *mgr,
+ struct sde_rot_entry *entry);
+
+ void *hw_data;
+};
+
+static inline int __compare_session_item_rect(
+ struct sde_rotation_buf_info *s_rect,
+ struct sde_rect *i_rect, uint32_t i_fmt, bool src)
+{
+ if ((s_rect->width != i_rect->w) || (s_rect->height != i_rect->h) ||
+ (s_rect->format != i_fmt)) {
+ SDEROT_DBG(
+ "%s: session{%u,%u}f:%u mismatch from item{%u,%u}f:%u\n",
+ (src ? "src":"dst"), s_rect->width, s_rect->height,
+ s_rect->format, i_rect->w, i_rect->h, i_fmt);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*
+ * Compare all important flag bits associated with rotation between session
+ * config and item request. Format and roi validation is done during open
+ * session and is based certain defining bits. If these defining bits are
+ * different in item request, there is a possibility that rotation item
+ * is not a valid configuration.
+ */
+static inline int __compare_session_rotations(uint32_t cfg_flag,
+ uint32_t item_flag)
+{
+ cfg_flag &= SDE_ROT_DEFINING_FLAG_BITS;
+ item_flag &= SDE_ROT_DEFINING_FLAG_BITS;
+ if (cfg_flag != item_flag) {
+ SDEROT_DBG(
+ "Rotation degree request different from open session\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int sde_rotator_core_init(struct sde_rot_mgr **pmgr,
+ struct platform_device *pdev);
+
+void sde_rotator_core_destroy(struct sde_rot_mgr *mgr);
+
+int sde_rotator_session_open(struct sde_rot_mgr *mgr,
+ struct sde_rot_file_private **pprivate, int session_id,
+ struct sde_rot_queue *queue);
+
+void sde_rotator_session_close(struct sde_rot_mgr *mgr,
+ struct sde_rot_file_private *private, int session_id);
+
+int sde_rotator_session_config(struct sde_rot_mgr *mgr,
+ struct sde_rot_file_private *private,
+ struct sde_rotation_config *config);
+
+struct sde_rot_entry_container *sde_rotator_req_init(
+ struct sde_rot_mgr *rot_dev,
+ struct sde_rot_file_private *private,
+ struct sde_rotation_item *items,
+ u32 count, u32 flags);
+
+int sde_rotator_handle_request_common(struct sde_rot_mgr *rot_dev,
+ struct sde_rot_file_private *ctx,
+ struct sde_rot_entry_container *req,
+ struct sde_rotation_item *items);
+
+void sde_rotator_queue_request(struct sde_rot_mgr *rot_dev,
+ struct sde_rot_file_private *ctx,
+ struct sde_rot_entry_container *req);
+
+void sde_rotator_remove_request(struct sde_rot_mgr *mgr,
+ struct sde_rot_file_private *private,
+ struct sde_rot_entry_container *req);
+
+int sde_rotator_verify_config(struct sde_rot_mgr *rot_dev,
+ struct sde_rotation_config *config);
+
+int sde_rotator_validate_request(struct sde_rot_mgr *rot_dev,
+ struct sde_rot_file_private *ctx,
+ struct sde_rot_entry_container *req);
+
+static inline void sde_rot_mgr_lock(struct sde_rot_mgr *mgr)
+{
+ mutex_lock(&mgr->lock);
+}
+
+static inline void sde_rot_mgr_unlock(struct sde_rot_mgr *mgr)
+{
+ mutex_unlock(&mgr->lock);
+}
+
+#if defined(CONFIG_PM_RUNTIME)
+int sde_rotator_runtime_resume(struct device *dev);
+int sde_rotator_runtime_suspend(struct device *dev);
+int sde_rotator_runtime_idle(struct device *dev);
+#endif
+
+#if defined(CONFIG_PM_SLEEP)
+int sde_rotator_pm_suspend(struct device *dev);
+int sde_rotator_pm_resume(struct device *dev);
+#endif
+
+#if defined(CONFIG_PM) && !defined(CONFIG_PM_SLEEP)
+int sde_rotator_suspend(struct platform_device *dev, pm_message_t state);
+int sde_rotator_resume(struct platform_device *dev);
+#else
+#define sde_rotator_suspend NULL
+#define sde_rotator_resume NULL
+#endif
+#endif /* __SDE_ROTATOR_CORE_H__ */
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
new file mode 100644
index 000000000000..9ce02d21704a
--- /dev/null
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
@@ -0,0 +1,332 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+
+#include "sde_rotator_debug.h"
+#include "sde_rotator_base.h"
+#include "sde_rotator_core.h"
+#include "sde_rotator_dev.h"
+
+/*
+ * sde_rotator_stat_show - Show statistics on read to this debugfs file
+ * @s: Pointer to sequence file structure
+ * @data: Pointer to private data structure
+ */
+static int sde_rotator_stat_show(struct seq_file *s, void *data)
+{
+ int i, offset;
+ struct sde_rotator_device *rot_dev = s->private;
+ struct sde_rotator_statistics *stats = &rot_dev->stats;
+ u64 count = stats->count;
+ int num_events;
+ s64 proc_max, proc_min, proc_avg;
+
+ proc_max = 0;
+ proc_min = S64_MAX;
+ proc_avg = 0;
+
+ if (count > SDE_ROTATOR_NUM_EVENTS) {
+ num_events = SDE_ROTATOR_NUM_EVENTS;
+ offset = count % SDE_ROTATOR_NUM_EVENTS;
+ } else {
+ num_events = count;
+ offset = 0;
+ }
+
+ for (i = 0; i < num_events; i++) {
+ int k = (offset + i) % SDE_ROTATOR_NUM_EVENTS;
+ ktime_t *ts = stats->ts[k];
+ ktime_t start_time =
+ ktime_before(ts[SDE_ROTATOR_TS_SRCQB],
+ ts[SDE_ROTATOR_TS_DSTQB]) ?
+ ts[SDE_ROTATOR_TS_SRCQB] :
+ ts[SDE_ROTATOR_TS_DSTQB];
+ s64 proc_time =
+ ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_RETIRE],
+ start_time));
+
+ seq_printf(s,
+ "s:%d sq:%lld dq:%lld fe:%lld q:%lld c:%lld fl:%lld d:%lld sdq:%lld ddq:%lld t:%lld\n",
+ i,
+ ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_FENCE],
+ ts[SDE_ROTATOR_TS_SRCQB])),
+ ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_FENCE],
+ ts[SDE_ROTATOR_TS_DSTQB])),
+ ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_QUEUE],
+ ts[SDE_ROTATOR_TS_FENCE])),
+ ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_COMMIT],
+ ts[SDE_ROTATOR_TS_QUEUE])),
+ ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_FLUSH],
+ ts[SDE_ROTATOR_TS_COMMIT])),
+ ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_DONE],
+ ts[SDE_ROTATOR_TS_FLUSH])),
+ ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_RETIRE],
+ ts[SDE_ROTATOR_TS_DONE])),
+ ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_SRCDQB],
+ ts[SDE_ROTATOR_TS_RETIRE])),
+ ktime_to_us(ktime_sub(ts[SDE_ROTATOR_TS_DSTDQB],
+ ts[SDE_ROTATOR_TS_RETIRE])),
+ proc_time);
+
+ proc_max = max(proc_max, proc_time);
+ proc_min = min(proc_min, proc_time);
+ proc_avg += proc_time;
+ }
+
+ proc_avg = (num_events) ?
+ DIV_ROUND_CLOSEST_ULL(proc_avg, num_events) : 0;
+
+ seq_printf(s, "count:%llu\n", count);
+ seq_printf(s, "fai1:%llu\n", stats->fail_count);
+ seq_printf(s, "t_max:%lld\n", proc_max);
+ seq_printf(s, "t_min:%lld\n", proc_min);
+ seq_printf(s, "t_avg:%lld\n", proc_avg);
+
+ return 0;
+}
+
+/*
+ * sde_rotator_stat_write - Clear statistics on write to this debugfs file.
+ * @t_file:
+ * @t_char:
+ * @t_size_t:
+ * @t_lof_t:
+ */
+static ssize_t sde_rotator_stat_write(struct file *t_file,
+ const char *t_char, size_t t_size_t, loff_t *t_loff_t)
+{
+ struct seq_file *s = t_file->private_data;
+ struct sde_rotator_device *rot_dev = s->private;
+ struct sde_rotator_statistics *stats = &rot_dev->stats;
+ char buf[128];
+
+ mutex_lock(&rot_dev->lock);
+ sde_rot_mgr_lock(rot_dev->mgr);
+ memset(stats, 0, sizeof(struct sde_rotator_statistics));
+ sde_rot_mgr_unlock(rot_dev->mgr);
+ mutex_unlock(&rot_dev->lock);
+ return simple_write_to_buffer(buf, sizeof(buf),
+ t_loff_t, t_char, t_size_t);
+}
+
+/*
+ * sde_rotator_raw_show - Show raw statistics on read from this debugfs file
+ * @s: Pointer to sequence file structure
+ * @data: Pointer to private data structure
+ */
+static int sde_rotator_raw_show(struct seq_file *s, void *data)
+{
+ int i, j, offset;
+ struct sde_rotator_device *rot_dev = s->private;
+ struct sde_rotator_statistics *stats = &rot_dev->stats;
+ u64 count = stats->count;
+ int num_events;
+
+ if (count > SDE_ROTATOR_NUM_EVENTS) {
+ num_events = SDE_ROTATOR_NUM_EVENTS;
+ offset = count % SDE_ROTATOR_NUM_EVENTS;
+ } else {
+ num_events = count;
+ offset = 0;
+ }
+
+ for (i = 0; i < num_events; i++) {
+ int k = (offset + i) % SDE_ROTATOR_NUM_EVENTS;
+ ktime_t *ts = stats->ts[k];
+
+ seq_printf(s, "%d ", i);
+ for (j = 0; j < SDE_ROTATOR_NUM_TIMESTAMPS; j++)
+ seq_printf(s, "%lld ", ktime_to_us(ts[j]));
+ seq_puts(s, "\n");
+ }
+
+ return 0;
+}
+
+/*
+ * sde_rotator_dbg_open - Processed statistics debugfs file open function
+ * @inode:
+ * @file:
+ */
+static int sde_rotator_stat_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, sde_rotator_stat_show, inode->i_private);
+}
+
+/*
+ * sde_rotator_dbg_open - Raw statistics debugfs file open function
+ * @inode:
+ * @file:
+ */
+static int sde_rotator_raw_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, sde_rotator_raw_show, inode->i_private);
+}
+
+/*
+ * sde_rotator_dbg_open - Raw statistics debugfs file open function
+ * @mdata: Pointer to rotator global data
+ * @debugfs_root: Pointer to parent debugfs node
+ */
+static int sde_rotator_base_create_debugfs(
+ struct sde_rot_data_type *mdata,
+ struct dentry *debugfs_root)
+{
+ if (!debugfs_create_u32("iommu_ref_cnt", S_IRUGO,
+ debugfs_root, &mdata->iommu_ref_cnt)) {
+ SDEROT_WARN("failed to create debugfs iommu ref cnt\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * sde_rotator_dbg_open - Raw statistics debugfs file open function
+ * @mgr: Pointer to rotator manager structure
+ * @debugfs_root: Pointer to parent debugfs node
+ */
+static int sde_rotator_core_create_debugfs(
+ struct sde_rot_mgr *mgr,
+ struct dentry *debugfs_root)
+{
+ int ret;
+
+ if (!debugfs_create_u32("hwacquire_timeout", S_IRUGO | S_IWUSR,
+ debugfs_root, &mgr->hwacquire_timeout)) {
+ SDEROT_WARN("failed to create debugfs hw acquire timeout\n");
+ return -EINVAL;
+ }
+
+ if (!debugfs_create_u32("ppc_numer", S_IRUGO | S_IWUSR,
+ debugfs_root, &mgr->pixel_per_clk.numer)) {
+ SDEROT_WARN("failed to create debugfs ppc numerator\n");
+ return -EINVAL;
+ }
+
+ if (!debugfs_create_u32("ppc_denom", S_IRUGO | S_IWUSR,
+ debugfs_root, &mgr->pixel_per_clk.denom)) {
+ SDEROT_WARN("failed to create debugfs ppc denominator\n");
+ return -EINVAL;
+ }
+
+ if (mgr->ops_hw_create_debugfs) {
+ ret = mgr->ops_hw_create_debugfs(mgr, debugfs_root);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+/*
+ * struct sde_rotator_stat_ops - processed statistics file operations
+ */
+static const struct file_operations sde_rotator_stat_ops = {
+ .open = sde_rotator_stat_open,
+ .read = seq_read,
+ .write = sde_rotator_stat_write,
+ .llseek = seq_lseek,
+ .release = single_release
+};
+
+/*
+ * struct sde_rotator_raw_ops - raw statistics file operations
+ */
+static const struct file_operations sde_rotator_raw_ops = {
+ .open = sde_rotator_raw_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release
+};
+
+/*
+ * sde_rotator_create_debugfs - Setup rotator debugfs directory structure.
+ * @rot_dev: Pointer to rotator device
+ */
+struct dentry *sde_rotator_create_debugfs(
+ struct sde_rotator_device *rot_dev)
+{
+ struct dentry *debugfs_root;
+ char dirname[32] = {0};
+
+ snprintf(dirname, sizeof(dirname), "%s%d",
+ SDE_ROTATOR_DRV_NAME, rot_dev->dev->id);
+ debugfs_root = debugfs_create_dir(dirname, NULL);
+ if (!debugfs_root) {
+ SDEROT_ERR("fail create debugfs root\n");
+ return NULL;
+ }
+
+ if (!debugfs_create_file("stats", S_IRUGO | S_IWUSR,
+ debugfs_root, rot_dev, &sde_rotator_stat_ops)) {
+ SDEROT_ERR("fail create debugfs stats\n");
+ debugfs_remove_recursive(debugfs_root);
+ return NULL;
+ }
+
+ if (!debugfs_create_file("raw", S_IRUGO | S_IWUSR,
+ debugfs_root, rot_dev, &sde_rotator_raw_ops)) {
+ SDEROT_ERR("fail create debugfs raw\n");
+ debugfs_remove_recursive(debugfs_root);
+ return NULL;
+ }
+
+ if (!debugfs_create_u32("fence_timeout", S_IRUGO | S_IWUSR,
+ debugfs_root, &rot_dev->fence_timeout)) {
+ SDEROT_ERR("fail create fence_timeout\n");
+ debugfs_remove_recursive(debugfs_root);
+ return NULL;
+ }
+
+ if (!debugfs_create_u32("streamoff_timeout", S_IRUGO | S_IWUSR,
+ debugfs_root, &rot_dev->streamoff_timeout)) {
+ SDEROT_ERR("fail create streamoff_timeout\n");
+ debugfs_remove_recursive(debugfs_root);
+ return NULL;
+ }
+
+ if (!debugfs_create_u32("early_submit", S_IRUGO | S_IWUSR,
+ debugfs_root, &rot_dev->early_submit)) {
+ SDEROT_ERR("fail create early_submit\n");
+ debugfs_remove_recursive(debugfs_root);
+ return NULL;
+ }
+
+ if (sde_rotator_base_create_debugfs(rot_dev->mdata, debugfs_root)) {
+ SDEROT_ERR("fail create base debugfs\n");
+ debugfs_remove_recursive(debugfs_root);
+ return NULL;
+ }
+
+ if (sde_rotator_core_create_debugfs(rot_dev->mgr, debugfs_root)) {
+ SDEROT_ERR("fail create core debugfs\n");
+ debugfs_remove_recursive(debugfs_root);
+ return NULL;
+ }
+
+ return debugfs_root;
+}
+
+/*
+ * sde_rotator_destroy_debugfs - Destroy rotator debugfs directory structure.
+ * @rot_dev: Pointer to rotator debugfs
+ */
+void sde_rotator_destroy_debugfs(struct dentry *debugfs)
+{
+ debugfs_remove_recursive(debugfs);
+}
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.h
new file mode 100644
index 000000000000..2ed1b759f3e9
--- /dev/null
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.h
@@ -0,0 +1,39 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __SDE_ROTATOR_DEBUG_H__
+#define __SDE_ROTATOR_DEBUG_H__
+
+#include <linux/types.h>
+#include <linux/dcache.h>
+
+struct sde_rotator_device;
+
+#if defined(CONFIG_DEBUG_FS)
+struct dentry *sde_rotator_create_debugfs(
+ struct sde_rotator_device *rot_dev);
+
+void sde_rotator_destroy_debugfs(struct dentry *debugfs);
+#else
+static inline
+struct dentry *sde_rotator_create_debugfs(
+ struct sde_rotator_device *rot_dev)
+{
+ return NULL;
+}
+
+static inline
+void sde_rotator_destroy_debugfs(struct dentry *debugfs)
+{
+}
+#endif
+#endif /* __SDE_ROTATOR_DEBUG_H__ */
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
new file mode 100644
index 000000000000..f3a5d010c0fb
--- /dev/null
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
@@ -0,0 +1,2700 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/vmalloc.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/ion.h>
+#include <linux/msm_ion.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/of.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
+#include <media/videobuf2-core.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "sde_rotator_base.h"
+#include "sde_rotator_core.h"
+#include "sde_rotator_dev.h"
+#include "sde_rotator_debug.h"
+#include "sde_rotator_trace.h"
+
+/* Start v4l2 device number (default allocation) */
+#define SDE_ROTATOR_BASE_DEVICE_NUMBER -1
+
+/* Default value for early_submit flag */
+#define SDE_ROTATOR_EARLY_SUBMIT 1
+
+/* Timeout (msec) waiting for stream to turn off. */
+#define SDE_ROTATOR_STREAM_OFF_TIMEOUT 500
+
+/* acquire fence time out, following other driver fence time out practice */
+#define SDE_ROTATOR_FENCE_TIMEOUT MSEC_PER_SEC
+
+/* Rotator default fps */
+#define SDE_ROTATOR_DEFAULT_FPS 60
+
+/* Rotator rotation angles */
+#define SDE_ROTATOR_DEGREE_270 270
+#define SDE_ROTATOR_DEGREE_180 180
+#define SDE_ROTATOR_DEGREE_90 90
+/*
+ * Format description/mapping
+ * @pixelformat: external format defined in msm_sde_rotator header.
+ *
+ * Note RGBA/8888 naming convention follows internal convention and
+ * is reverse of standard V4L2 convention. Description containing
+ * prefix 'SDE/' refers to SDE specific conventions and/or features.
+ */
+static const struct v4l2_fmtdesc fmtdesc[] = {
+ {
+ .description = "SDE/XRGB_8888",
+ .pixelformat = SDE_PIX_FMT_XRGB_8888,
+ },
+ {
+ .description = "SDE/ARGB_8888",
+ .pixelformat = SDE_PIX_FMT_ARGB_8888,
+ },
+ {
+ .description = "SDE/ABGR_8888",
+ .pixelformat = SDE_PIX_FMT_ABGR_8888,
+ },
+ {
+ .description = "SDE/RGBA_8888",
+ .pixelformat = SDE_PIX_FMT_RGBA_8888,
+ },
+ {
+ .description = "SDE/BGRA_8888",
+ .pixelformat = SDE_PIX_FMT_BGRA_8888,
+ },
+ {
+ .description = "SDE/RGBX_8888",
+ .pixelformat = SDE_PIX_FMT_RGBX_8888,
+ },
+ {
+ .description = "SDE/BGRX_8888",
+ .pixelformat = SDE_PIX_FMT_BGRX_8888,
+ },
+ {
+ .description = "RGBA_5551",
+ .pixelformat = SDE_PIX_FMT_RGBA_5551,
+ },
+ {
+ .description = "ARGB_4444",
+ .pixelformat = SDE_PIX_FMT_ARGB_4444,
+ },
+ {
+ .description = "RGBA_4444",
+ .pixelformat = SDE_PIX_FMT_RGBA_4444,
+ },
+ {
+ .description = "RGB_888",
+ .pixelformat = SDE_PIX_FMT_RGB_888,
+ },
+ {
+ .description = "BGR_888",
+ .pixelformat = SDE_PIX_FMT_BGR_888,
+ },
+ {
+ .description = "RGB_565",
+ .pixelformat = SDE_PIX_FMT_RGB_565,
+ },
+ {
+ .description = "BGR_565",
+ .pixelformat = SDE_PIX_FMT_BGR_565,
+ },
+ {
+ .description = "Y_CB_CR_H2V2",
+ .pixelformat = SDE_PIX_FMT_Y_CB_CR_H2V2,
+ },
+ {
+ .description = "Y_CR_CB_H2V2",
+ .pixelformat = SDE_PIX_FMT_Y_CR_CB_H2V2,
+ },
+ {
+ .description = "SDE/Y_CR_CB_GH2V2",
+ .pixelformat = SDE_PIX_FMT_Y_CR_CB_GH2V2,
+ },
+ {
+ .description = "Y_CBCR_H2V2",
+ .pixelformat = SDE_PIX_FMT_Y_CBCR_H2V2,
+ },
+ {
+ .description = "Y_CRCB_H2V2",
+ .pixelformat = SDE_PIX_FMT_Y_CRCB_H2V2,
+ },
+ {
+ .description = "Y_CBCR_H1V2",
+ .pixelformat = SDE_PIX_FMT_Y_CBCR_H1V2,
+ },
+ {
+ .description = "Y_CRCB_H1V2",
+ .pixelformat = SDE_PIX_FMT_Y_CRCB_H1V2,
+ },
+ {
+ .description = "Y_CBCR_H2V1",
+ .pixelformat = SDE_PIX_FMT_Y_CBCR_H2V1,
+ },
+ {
+ .description = "Y_CRCB_H2V1",
+ .pixelformat = SDE_PIX_FMT_Y_CRCB_H2V1,
+ },
+ {
+ .description = "YCBYCR_H2V1",
+ .pixelformat = SDE_PIX_FMT_YCBYCR_H2V1,
+ },
+ {
+ .description = "SDE/Y_CBCR_H2V2_VENUS",
+ .pixelformat = SDE_PIX_FMT_Y_CBCR_H2V2_VENUS,
+ },
+ {
+ .description = "SDE/Y_CRCB_H2V2_VENUS",
+ .pixelformat = SDE_PIX_FMT_Y_CRCB_H2V2_VENUS,
+ },
+ {
+ .description = "SDE/RGBA_8888_UBWC",
+ .pixelformat = SDE_PIX_FMT_RGBA_8888_UBWC,
+ },
+ {
+ .description = "SDE/RGBX_8888_UBWC",
+ .pixelformat = SDE_PIX_FMT_RGBX_8888_UBWC,
+ },
+ {
+ .description = "SDE/RGB_565_UBWC",
+ .pixelformat = SDE_PIX_FMT_RGB_565_UBWC,
+ },
+ {
+ .description = "SDE/Y_CBCR_H2V2_UBWC",
+ .pixelformat = SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
+ },
+ {
+ .description = "SDE/RGBA_1010102",
+ .pixelformat = SDE_PIX_FMT_RGBA_1010102,
+ },
+ {
+ .description = "SDE/RGBX_1010102",
+ .pixelformat = SDE_PIX_FMT_RGBX_1010102,
+ },
+ {
+ .description = "SDE/ARGB_2101010",
+ .pixelformat = SDE_PIX_FMT_ARGB_2101010,
+ },
+ {
+ .description = "SDE/XRGB_2101010",
+ .pixelformat = SDE_PIX_FMT_XRGB_2101010,
+ },
+ {
+ .description = "SDE/BGRA_1010102",
+ .pixelformat = SDE_PIX_FMT_BGRA_1010102,
+ },
+ {
+ .description = "SDE/BGRX_1010102",
+ .pixelformat = SDE_PIX_FMT_BGRX_1010102,
+ },
+ {
+ .description = "SDE/ABGR_2101010",
+ .pixelformat = SDE_PIX_FMT_ABGR_2101010,
+ },
+ {
+ .description = "SDE/XBGR_2101010",
+ .pixelformat = SDE_PIX_FMT_XBGR_2101010,
+ },
+ {
+ .description = "SDE/RGBA_1010102_UBWC",
+ .pixelformat = SDE_PIX_FMT_RGBA_1010102_UBWC,
+ },
+ {
+ .description = "SDE/RGBX_1010102_UBWC",
+ .pixelformat = SDE_PIX_FMT_RGBX_1010102_UBWC,
+ },
+ {
+ .description = "SDE/Y_CBCR_H2V2_P010",
+ .pixelformat = SDE_PIX_FMT_Y_CBCR_H2V2_P010,
+ },
+ {
+ .description = "SDE/Y_CBCR_H2V2_TP10",
+ .pixelformat = SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
+ },
+ {
+ .description = "SDE/Y_CBCR_H2V2_TP10_UBWC",
+ .pixelformat = SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
+ },
+};
+
+static void sde_rotator_submit_handler(struct work_struct *work);
+static void sde_rotator_retire_handler(struct work_struct *work);
+#ifdef CONFIG_COMPAT
+static long sde_rotator_compat_ioctl32(struct file *file,
+ unsigned int cmd, unsigned long arg);
+#endif
+
+/*
+ * sde_rotator_ctx_from_fh - Get rotator context from v4l2 fh.
+ * @fh: Pointer to v4l2 fh.
+ */
+static inline struct sde_rotator_ctx *sde_rotator_ctx_from_fh(
+ struct v4l2_fh *fh)
+{
+ return container_of(fh, struct sde_rotator_ctx, fh);
+}
+
+/*
+ * sde_rotator_get_format_idx - Get rotator format lookup index.
+ * @ctx: Pointer to rotator ctx.
+ * @f: v4l2 format.
+ */
+static int sde_rotator_get_format_idx(struct sde_rotator_ctx *ctx,
+ struct v4l2_format *f)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(fmtdesc); i++)
+ if (fmtdesc[i].pixelformat == f->fmt.pix.pixelformat)
+ break;
+
+ if (i == ARRAY_SIZE(fmtdesc))
+ return -EINVAL;
+
+ return i;
+}
+
+/*
+ * sde_rotator_get_flags_from_ctx - Get low-level command flag
+ * @ctx: Pointer to rotator context.
+ */
+static uint32_t sde_rotator_get_flags_from_ctx(struct sde_rotator_ctx *ctx)
+{
+ uint32_t ret_flags = 0;
+
+ if (ctx->rotate == SDE_ROTATOR_DEGREE_270)
+ ret_flags |= SDE_ROTATION_270;
+ else if (ctx->rotate == SDE_ROTATOR_DEGREE_180)
+ ret_flags |= SDE_ROTATION_180;
+ else if (ctx->rotate == SDE_ROTATOR_DEGREE_90)
+ ret_flags |= SDE_ROTATION_90;
+ if (ctx->hflip)
+ ret_flags ^= SDE_ROTATION_FLIP_LR;
+ if (ctx->vflip)
+ ret_flags ^= SDE_ROTATION_FLIP_UD;
+ if (ctx->secure)
+ ret_flags |= SDE_ROTATION_SECURE;
+ if (ctx->format_out.fmt.pix.field == V4L2_FIELD_INTERLACED &&
+ ctx->format_cap.fmt.pix.field == V4L2_FIELD_NONE)
+ ret_flags |= SDE_ROTATION_DEINTERLACE;
+
+ return ret_flags;
+}
+
+/*
+ * sde_rotator_get_config_from_ctx - Fill rotator configure structure.
+ * @ctx: Pointer to rotator ctx.
+ * @config: Pointer to config structure.
+ */
+static void sde_rotator_get_config_from_ctx(struct sde_rotator_ctx *ctx,
+ struct sde_rotation_config *config)
+{
+ memset(config, 0, sizeof(struct sde_rotation_config));
+ config->flags = sde_rotator_get_flags_from_ctx(ctx);
+ config->frame_rate = (ctx->timeperframe.numerator) ?
+ ctx->timeperframe.denominator
+ / ctx->timeperframe.numerator : 0;
+ config->session_id = ctx->session_id;
+ config->input.width = ctx->crop_out.width;
+ config->input.height = ctx->crop_out.height;
+ config->input.format = ctx->format_out.fmt.pix.pixelformat;
+ config->input.comp_ratio.numer = 1;
+ config->input.comp_ratio.denom = 1;
+ config->output.width = ctx->crop_cap.width;
+ config->output.height = ctx->crop_cap.height;
+ config->output.format = ctx->format_cap.fmt.pix.pixelformat;
+ config->output.comp_ratio.numer = 1;
+ config->output.comp_ratio.denom = 1;
+}
+
+/*
+ * sde_rotator_get_item_from_ctx - Fill rotator item structure.
+ * @ctx: Pointer to rotator ctx.
+ * @item: Pointer to item structure.
+ */
+static void sde_rotator_get_item_from_ctx(struct sde_rotator_ctx *ctx,
+ struct sde_rotation_item *item)
+{
+ memset(item, 0, sizeof(struct sde_rotation_item));
+ item->flags = sde_rotator_get_flags_from_ctx(ctx);
+ item->session_id = ctx->session_id;
+ item->sequence_id = 0;
+ /* assign high/low priority */
+ item->wb_idx = (ctx->priority >= V4L2_PRIORITY_DEFAULT) ? 0 : 1;
+ item->src_rect.x = ctx->crop_out.left;
+ item->src_rect.y = ctx->crop_out.top;
+ item->src_rect.w = ctx->crop_out.width;
+ item->src_rect.h = ctx->crop_out.height;
+ item->input.width = ctx->format_out.fmt.pix.width;
+ item->input.height = ctx->format_out.fmt.pix.height;
+ item->input.format = ctx->format_out.fmt.pix.pixelformat;
+ item->input.planes[0].fd = -1;
+ item->input.planes[0].offset = 0;
+ item->input.planes[0].stride = ctx->format_out.fmt.pix.bytesperline;
+ item->input.plane_count = 1;
+ item->input.fence = NULL;
+ item->input.comp_ratio.numer = 1;
+ item->input.comp_ratio.denom = 1;
+
+ item->dst_rect.x = ctx->crop_cap.left;
+ item->dst_rect.y = ctx->crop_cap.top;
+ item->dst_rect.w = ctx->crop_cap.width;
+ item->dst_rect.h = ctx->crop_cap.height;
+ item->output.width = ctx->format_cap.fmt.pix.width;
+ item->output.height = ctx->format_cap.fmt.pix.height;
+ item->output.format = ctx->format_cap.fmt.pix.pixelformat;
+ item->output.planes[0].fd = -1;
+ item->output.planes[0].offset = 0;
+ item->output.planes[0].stride = ctx->format_cap.fmt.pix.bytesperline;
+ item->output.plane_count = 1;
+ item->output.fence = NULL;
+ item->output.comp_ratio.numer = 1;
+ item->output.comp_ratio.denom = 1;
+}
+
+/*
+ * sde_rotator_format_recalc - Recalculate format parameters.
+ * @f: v4l2 format.
+ */
+static void sde_rotator_format_recalc(struct v4l2_format *f)
+{
+ int ret;
+ struct sde_mdp_format_params *fmt;
+ struct sde_mdp_plane_sizes ps;
+
+ fmt = sde_get_format_params(f->fmt.pix.pixelformat);
+ if (!fmt) {
+ SDEROT_ERR("invalid format\n");
+ goto error_fmt;
+ }
+
+ ret = sde_mdp_get_plane_sizes(fmt,
+ f->fmt.pix.width, f->fmt.pix.height, &ps, 0, 0);
+ if (ret) {
+ SDEROT_ERR("invalid plane size\n");
+ goto error_fmt;
+ }
+
+ f->fmt.pix.bytesperline = ps.ystride[0];
+ f->fmt.pix.sizeimage = ps.total_size;
+
+ return;
+error_fmt:
+ f->fmt.pix.bytesperline = 0;
+ f->fmt.pix.sizeimage = 0;
+}
+
+/*
+ * sde_rotator_validate_item - Check if rotator item is valid for processing.
+ * @ctx: Pointer to rotator ctx.
+ * @item: Pointer to item structure
+ */
+static int sde_rotator_validate_item(struct sde_rotator_ctx *ctx,
+ struct sde_rotation_item *item)
+{
+ int ret;
+ struct sde_rot_entry_container *req;
+ struct sde_rotator_device *rot_dev = ctx->rot_dev;
+
+ sde_rot_mgr_lock(rot_dev->mgr);
+ req = sde_rotator_req_init(rot_dev->mgr, ctx->private, item, 1, 0);
+ if (IS_ERR_OR_NULL(req)) {
+ SDEDEV_ERR(rot_dev->dev, "fail allocate item\n");
+ return -ENOMEM;
+ }
+
+ ret = sde_rotator_validate_request(rot_dev->mgr, ctx->private, req);
+ sde_rot_mgr_unlock(rot_dev->mgr);
+ devm_kfree(rot_dev->dev, req);
+ return ret;
+}
+
+/*
+ * sde_rotator_queue_setup - vb2_ops queue_setup callback.
+ * @q: Pointer to vb2 queue struct.
+ * @fmt: Pointer to v4l2 format struct (NULL is valid argument).
+ * @num_buffers: Pointer of number of buffers requested.
+ * @num_planes: Pointer to number of planes requested.
+ * @sizes: Array containing sizes of planes.
+ * @alloc_ctxs: Array of allocated contexts for each plane.
+ */
+static int sde_rotator_queue_setup(struct vb2_queue *q,
+ const struct v4l2_format *fmt,
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], void *alloc_ctxs[])
+{
+ struct sde_rotator_ctx *ctx = vb2_get_drv_priv(q);
+ int i;
+
+ if (!num_buffers)
+ return -EINVAL;
+
+ if (NULL == fmt) {
+ switch (q->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ sizes[0] = ctx->format_out.fmt.pix.sizeimage;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ sizes[0] = ctx->format_cap.fmt.pix.sizeimage;
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ sizes[0] = fmt->fmt.pix.sizeimage;
+ }
+
+ *num_planes = 1;
+ alloc_ctxs[0] = ctx;
+
+ switch (q->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ ctx->nbuf_out = *num_buffers;
+ kfree(ctx->vbinfo_out);
+ ctx->vbinfo_out = kzalloc(sizeof(struct sde_rotator_vbinfo) *
+ ctx->nbuf_out, GFP_KERNEL);
+ if (!ctx->vbinfo_out)
+ return -ENOMEM;
+ for (i = 0; i < ctx->nbuf_out; i++)
+ ctx->vbinfo_out[i].fd = -1;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ ctx->nbuf_cap = *num_buffers;
+ kfree(ctx->vbinfo_cap);
+ ctx->vbinfo_cap = kzalloc(sizeof(struct sde_rotator_vbinfo) *
+ ctx->nbuf_cap, GFP_KERNEL);
+ if (!ctx->vbinfo_cap)
+ return -ENOMEM;
+ for (i = 0; i < ctx->nbuf_cap; i++)
+ ctx->vbinfo_cap[i].fd = -1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * sde_rotator_buf_queue - vb2_ops buf_queue callback.
+ * @vb: Pointer to vb2 buffer struct.
+ */
+static void sde_rotator_buf_queue(struct vb2_buffer *vb)
+{
+ struct sde_rotator_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
+}
+
+/*
+ * sde_rotator_return_all_buffers - Return all buffers with the given status.
+ * @q: Pointer to vb2 buffer queue struct.
+ * @state: State of the buffer
+ */
+static void sde_rotator_return_all_buffers(struct vb2_queue *q,
+ enum vb2_buffer_state state)
+{
+ struct sde_rotator_ctx *ctx = vb2_get_drv_priv(q);
+ struct sde_rotator_device *rot_dev = ctx->rot_dev;
+
+ SDEDEV_DBG(rot_dev->dev,
+ "return q t:%d c:%d dc:%d s:%d\n",
+ q->type, q->queued_count,
+ atomic_read(&q->owned_by_drv_count),
+ state);
+
+ /* return buffers according videobuffer2-core.h */
+ if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ struct vb2_buffer *buf;
+
+ while ((buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx))) {
+ SDEDEV_DBG(rot_dev->dev,
+ "return vb t:%d i:%d\n",
+ buf->v4l2_buf.type,
+ buf->v4l2_buf.index);
+ v4l2_m2m_buf_done(buf, state);
+ }
+ } else if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ struct vb2_buffer *buf;
+
+ while ((buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx))) {
+ SDEDEV_DBG(rot_dev->dev,
+ "return vb t:%d i:%d\n",
+ buf->v4l2_buf.type,
+ buf->v4l2_buf.index);
+ v4l2_m2m_buf_done(buf, state);
+ }
+ } else {
+ SDEDEV_ERR(rot_dev->dev, "unsupported vb t:%d\n", q->type);
+ }
+}
+
+ /*
+ * sde_rotator_start_streaming - vb2_ops start_streaming callback.
+ * @q: Pointer to vb2 queue struct.
+ * @count: Number of buffer queued before stream on call.
+ */
+static int sde_rotator_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct sde_rotator_ctx *ctx = vb2_get_drv_priv(q);
+ struct sde_rotator_device *rot_dev = ctx->rot_dev;
+ struct sde_rotation_config config;
+ int ret;
+
+ SDEDEV_DBG(rot_dev->dev, "start streaming s:%d t:%d\n",
+ ctx->session_id, q->type);
+
+ sde_rot_mgr_lock(rot_dev->mgr);
+ sde_rotator_get_config_from_ctx(ctx, &config);
+ ret = sde_rotator_session_config(rot_dev->mgr, ctx->private, &config);
+ sde_rot_mgr_unlock(rot_dev->mgr);
+ if (ret < 0) {
+ SDEDEV_ERR(rot_dev->dev,
+ "fail config in stream on s:%d t:%d r:%d\n",
+ ctx->session_id, q->type, ret);
+ return -EINVAL;
+ }
+
+ if (!IS_ERR_OR_NULL(ctx->request) ||
+ atomic_read(&ctx->command_pending))
+ SDEDEV_ERR(rot_dev->dev,
+ "command pending error s:%d t:%d p:%d\n",
+ ctx->session_id, q->type,
+ atomic_read(&ctx->command_pending));
+
+ ctx->request = NULL;
+ ctx->abort_pending = 0;
+ atomic_set(&ctx->command_pending, 0);
+
+ return 0;
+}
+
+/*
+ * sde_rotator_stop_streaming - vb2_ops stop_streaming callback.
+ * @q: Pointer to vb2 queue struct.
+ *
+ * This function will block waiting for stream to stop. Unlock queue
+ * lock to avoid deadlock.
+ */
+static void sde_rotator_stop_streaming(struct vb2_queue *q)
+{
+ struct sde_rotator_ctx *ctx = vb2_get_drv_priv(q);
+ struct sde_rotator_device *rot_dev = ctx->rot_dev;
+ int i;
+ int ret;
+
+ SDEDEV_DBG(rot_dev->dev, "stop streaming s:%d t:%d p:%d\n",
+ ctx->session_id, q->type,
+ atomic_read(&ctx->command_pending));
+ ctx->abort_pending = 1;
+ mutex_unlock(q->lock);
+ ret = wait_event_timeout(ctx->wait_queue,
+ (atomic_read(&ctx->command_pending) == 0),
+ msecs_to_jiffies(rot_dev->streamoff_timeout));
+ mutex_lock(q->lock);
+ if (!ret)
+ SDEDEV_ERR(rot_dev->dev,
+ "timeout to stream off s:%d t:%d p:%d\n",
+ ctx->session_id, q->type,
+ atomic_read(&ctx->command_pending));
+
+ sde_rotator_return_all_buffers(q, VB2_BUF_STATE_ERROR);
+
+ /* clear fence for buffer */
+ sde_rotator_resync_timeline(ctx->work_queue.timeline);
+ if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ for (i = 0; i < ctx->nbuf_cap; i++) {
+ struct sde_rotator_vbinfo *vbinfo =
+ &ctx->vbinfo_cap[i];
+
+ if (vbinfo->fence && vbinfo->fd < 0) {
+ /* fence is not used */
+ SDEDEV_DBG(rot_dev->dev,
+ "put fence s:%d t:%d i:%d\n",
+ ctx->session_id, q->type, i);
+ sde_rotator_put_sync_fence(vbinfo->fence);
+ }
+ vbinfo->fence = NULL;
+ vbinfo->fd = -1;
+ }
+ } else if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ for (i = 0; i < ctx->nbuf_out; i++) {
+ struct sde_rotator_vbinfo *vbinfo =
+ &ctx->vbinfo_out[i];
+
+ if (vbinfo->fence) {
+ SDEDEV_DBG(rot_dev->dev,
+ "put fence s:%d t:%d i:%d\n",
+ ctx->session_id, q->type, i);
+ sde_rotator_put_sync_fence(vbinfo->fence);
+ }
+ vbinfo->fence = NULL;
+ vbinfo->fd = -1;
+ }
+ }
+}
+
+/* Videobuf2 queue callbacks. */
+static struct vb2_ops sde_rotator_vb2_q_ops = {
+ .queue_setup = sde_rotator_queue_setup,
+ .buf_queue = sde_rotator_buf_queue,
+ .start_streaming = sde_rotator_start_streaming,
+ .stop_streaming = sde_rotator_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+/*
+ * sde_rotator_get_userptr - Map and get buffer handler for user pointer buffer.
+ * @alloc_ctx: Contexts allocated in buf_setup.
+ * @vaddr: Virtual addr passed from userpsace (in our case ion fd)
+ * @size: Size of the buffer
+ * @write: True if buffer will be used for writing the data.
+ */
+static void *sde_rotator_get_userptr(void *alloc_ctx,
+ unsigned long vaddr, unsigned long size, int write)
+{
+ struct sde_rotator_ctx *ctx = alloc_ctx;
+ struct sde_rotator_device *rot_dev = ctx->rot_dev;
+ struct sde_rotator_buf_handle *buf;
+
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ buf->fd = vaddr;
+ buf->secure = ctx->secure;
+ buf->ctx = ctx;
+ buf->rot_dev = rot_dev;
+ buf->buffer = dma_buf_get(buf->fd);
+
+ if (IS_ERR_OR_NULL(buf->buffer)) {
+ SDEDEV_ERR(rot_dev->dev, "fail get dmabuf fd:%d r:%ld\n",
+ buf->fd, PTR_ERR(buf->buffer));
+ goto error_dma_buf_get;
+ }
+
+ SDEDEV_DBG(rot_dev->dev, "get dmabuf s:%d fd:%d buf:%pad\n",
+ buf->ctx->session_id,
+ buf->fd, &buf->buffer);
+ return buf;
+error_dma_buf_get:
+ kfree(buf);
+ return ERR_PTR(-ENOMEM);
+}
+
+/*
+ * sde_rotator_put_userptr - Unmap and free buffer handler.
+ * @buf_priv: Buffer handler allocated get_userptr callback.
+ */
+static void sde_rotator_put_userptr(void *buf_priv)
+{
+ struct sde_rotator_buf_handle *buf = buf_priv;
+
+ if (IS_ERR_OR_NULL(buf))
+ return;
+
+ if (!buf->rot_dev || !buf->ctx) {
+ WARN_ON(!buf->rot_dev || !buf->ctx);
+ SDEROT_ERR("null rotator device/context\n");
+ return;
+ }
+
+ SDEDEV_DBG(buf->rot_dev->dev, "put dmabuf s:%d fd:%d buf:%pad\n",
+ buf->ctx->session_id,
+ buf->fd, &buf->buffer);
+
+ if (buf->buffer) {
+ dma_buf_put(buf->buffer);
+ buf->buffer = NULL;
+ }
+
+ kfree(buf_priv);
+}
+
+/* Videobuf2 memory callbacks. */
+static struct vb2_mem_ops sde_rotator_vb2_mem_ops = {
+ .get_userptr = sde_rotator_get_userptr,
+ .put_userptr = sde_rotator_put_userptr,
+};
+
+/*
+ * sde_rotator_s_ctx_ctrl - set context control variable to v4l2 control
+ * @ctx: Pointer to rotator context.
+ * @ctx_ctrl: Pointer to context control variable
+ * @ctrl: Pointer to v4l2 control variable
+ */
+static int sde_rotator_s_ctx_ctrl(struct sde_rotator_ctx *ctx,
+ s32 *ctx_ctrl, struct v4l2_ctrl *ctrl)
+{
+ struct sde_rotator_device *rot_dev = ctx->rot_dev;
+ struct sde_rotation_config config;
+ s32 prev_val;
+ int ret;
+
+ prev_val = *ctx_ctrl;
+ *ctx_ctrl = ctrl->val;
+ sde_rotator_get_config_from_ctx(ctx, &config);
+ ret = sde_rotator_session_config(rot_dev->mgr, ctx->private, &config);
+ if (ret) {
+ SDEDEV_WARN(rot_dev->dev, "fail %s:%d s:%d\n",
+ ctrl->name, ctrl->val, ctx->session_id);
+ *ctx_ctrl = prev_val;
+ }
+
+ return ret;
+}
+
+/*
+ * sde_rotator_s_ctrl - Set control.
+ * @ctrl: Pointer to v4l2 control structure.
+ */
+static int sde_rotator_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct sde_rotator_ctx *ctx =
+ container_of(ctrl->handler,
+ struct sde_rotator_ctx, ctrl_handler);
+ struct sde_rotator_device *rot_dev = ctx->rot_dev;
+ int ret;
+
+ SDEDEV_DBG(rot_dev->dev, "set %s:%d s:%d\n", ctrl->name, ctrl->val,
+ ctx->session_id);
+
+ sde_rot_mgr_lock(rot_dev->mgr);
+
+ switch (ctrl->id) {
+ case V4L2_CID_HFLIP:
+ ret = sde_rotator_s_ctx_ctrl(ctx, &ctx->hflip, ctrl);
+ break;
+
+ case V4L2_CID_VFLIP:
+ ret = sde_rotator_s_ctx_ctrl(ctx, &ctx->vflip, ctrl);
+ break;
+
+ case V4L2_CID_ROTATE:
+ ret = sde_rotator_s_ctx_ctrl(ctx, &ctx->rotate, ctrl);
+ break;
+
+ case V4L2_CID_SDE_ROTATOR_SECURE:
+ ret = sde_rotator_s_ctx_ctrl(ctx, &ctx->secure, ctrl);
+ break;
+
+ default:
+ v4l2_warn(&rot_dev->v4l2_dev, "invalid control %d\n", ctrl->id);
+ ret = -EINVAL;
+ }
+
+ sde_rot_mgr_unlock(rot_dev->mgr);
+ return ret;
+}
+
+/*
+ * sde_rotator_ctrl_ops - Control operations.
+ */
+static const struct v4l2_ctrl_ops sde_rotator_ctrl_ops = {
+ .s_ctrl = sde_rotator_s_ctrl,
+};
+
+/*
+ * sde_rotator_ctrl_secure - Non-secure/Secure.
+ */
+static const struct v4l2_ctrl_config sde_rotator_ctrl_secure = {
+ .ops = &sde_rotator_ctrl_ops,
+ .id = V4L2_CID_SDE_ROTATOR_SECURE,
+ .name = "Non-secure/Secure Domain",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .def = 0,
+ .min = 0,
+ .max = 1,
+ .step = 1,
+};
+
+/*
+ * sde_rotator_ctx_show - show context state.
+ */
+static ssize_t sde_rotator_ctx_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ size_t len = PAGE_SIZE;
+ int cnt = 0;
+ struct sde_rotator_ctx *ctx =
+ container_of(kobj, struct sde_rotator_ctx, kobj);
+
+ if (!ctx)
+ return cnt;
+
+#define SPRINT(fmt, ...) \
+ (cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__))
+
+ SPRINT("rotate=%d\n", ctx->rotate);
+ SPRINT("hflip=%d\n", ctx->hflip);
+ SPRINT("vflip=%d\n", ctx->vflip);
+ SPRINT("priority=%d\n", ctx->priority);
+ SPRINT("secure=%d\n", ctx->secure);
+ SPRINT("timeperframe=%u %u\n", ctx->timeperframe.numerator,
+ ctx->timeperframe.denominator);
+ SPRINT("nbuf_out=%d\n", ctx->nbuf_out);
+ SPRINT("nbuf_cap=%d\n", ctx->nbuf_cap);
+ SPRINT("crop_out=%u %u %u %u\n",
+ ctx->crop_out.left, ctx->crop_out.top,
+ ctx->crop_out.width, ctx->crop_out.height);
+ SPRINT("crop_cap=%u %u %u %u\n",
+ ctx->crop_cap.left, ctx->crop_cap.top,
+ ctx->crop_cap.width, ctx->crop_cap.height);
+ SPRINT("fmt_out=%c%c%c%c %u %u %u %u\n",
+ (ctx->format_out.fmt.pix.pixelformat>>0)&0xff,
+ (ctx->format_out.fmt.pix.pixelformat>>8)&0xff,
+ (ctx->format_out.fmt.pix.pixelformat>>16)&0xff,
+ (ctx->format_out.fmt.pix.pixelformat>>24)&0xff,
+ ctx->format_out.fmt.pix.width,
+ ctx->format_out.fmt.pix.height,
+ ctx->format_out.fmt.pix.bytesperline,
+ ctx->format_out.fmt.pix.sizeimage);
+ SPRINT("fmt_cap=%c%c%c%c %u %u %u %u\n",
+ (ctx->format_cap.fmt.pix.pixelformat>>0)&0xff,
+ (ctx->format_cap.fmt.pix.pixelformat>>8)&0xff,
+ (ctx->format_cap.fmt.pix.pixelformat>>16)&0xff,
+ (ctx->format_cap.fmt.pix.pixelformat>>24)&0xff,
+ ctx->format_cap.fmt.pix.width,
+ ctx->format_cap.fmt.pix.height,
+ ctx->format_cap.fmt.pix.bytesperline,
+ ctx->format_cap.fmt.pix.sizeimage);
+ SPRINT("abort_pending=%d\n", ctx->abort_pending);
+ SPRINT("command_pending=%d\n", atomic_read(&ctx->command_pending));
+ SPRINT("submit_work=%d\n", work_busy(&ctx->submit_work));
+ SPRINT("retire_work=%d\n", work_busy(&ctx->retire_work));
+ SPRINT("sequence=%u\n",
+ sde_rotator_get_timeline_commit_ts(ctx->work_queue.timeline));
+ SPRINT("timestamp=%u\n",
+ sde_rotator_get_timeline_retire_ts(ctx->work_queue.timeline));
+ return cnt;
+}
+
+static struct kobj_attribute sde_rotator_ctx_attr =
+ __ATTR(state, 0664, sde_rotator_ctx_show, NULL);
+
+static struct attribute *sde_rotator_fs_attrs[] = {
+ &sde_rotator_ctx_attr.attr,
+ NULL
+};
+
+static struct attribute_group sde_rotator_fs_attr_group = {
+ .attrs = sde_rotator_fs_attrs
+};
+
+/*
+ * sde_rotator_ctx_show - sysfs show callback.
+ */
+static ssize_t sde_rotator_fs_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ ssize_t ret = -EIO;
+ struct kobj_attribute *kattr =
+ container_of(attr, struct kobj_attribute, attr);
+ if (kattr->show)
+ ret = kattr->show(kobj, kattr, buf);
+ return ret;
+}
+
+/*
+ * sde_rotator_fs_store - sysfs store callback.
+ */
+static ssize_t sde_rotator_fs_store(struct kobject *kobj,
+ struct attribute *attr, const char *buf, size_t count)
+{
+ ssize_t ret = -EIO;
+ struct kobj_attribute *kattr =
+ container_of(attr, struct kobj_attribute, attr);
+ if (kattr->store)
+ ret = kattr->store(kobj, kattr, buf, count);
+ return ret;
+}
+
+static const struct sysfs_ops sde_rotator_fs_ops = {
+ .show = sde_rotator_fs_show,
+ .store = sde_rotator_fs_store,
+};
+
+static struct kobj_type sde_rotator_fs_ktype = {
+ .sysfs_ops = &sde_rotator_fs_ops,
+};
+
+/*
+ * sde_rotator_queue_init - m2m_ops queue_setup callback.
+ * @priv: Pointer to rotator ctx.
+ * @src_vq: vb2 source queue.
+ * @dst_vq: vb2 destination queue.
+ */
+static int sde_rotator_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct sde_rotator_ctx *ctx = priv;
+ int ret;
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ src_vq->io_modes = VB2_USERPTR;
+ src_vq->drv_priv = ctx;
+ src_vq->mem_ops = &sde_rotator_vb2_mem_ops;
+ src_vq->ops = &sde_rotator_vb2_q_ops;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = &ctx->rot_dev->lock;
+ src_vq->min_buffers_needed = 1;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret) {
+ SDEDEV_ERR(ctx->rot_dev->dev,
+ "fail init src queue r:%d\n", ret);
+ return ret;
+ }
+
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ dst_vq->io_modes = VB2_USERPTR;
+ dst_vq->drv_priv = ctx;
+ dst_vq->mem_ops = &sde_rotator_vb2_mem_ops;
+ dst_vq->ops = &sde_rotator_vb2_q_ops;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = &ctx->rot_dev->lock;
+ dst_vq->min_buffers_needed = 1;
+
+ ret = vb2_queue_init(dst_vq);
+ if (ret) {
+ SDEDEV_ERR(ctx->rot_dev->dev,
+ "fail init dst queue r:%d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * sde_rotator_open - Rotator device open method.
+ * @file: Pointer to file struct.
+ */
+static int sde_rotator_open(struct file *file)
+{
+ struct sde_rotator_device *rot_dev = video_drvdata(file);
+ struct video_device *video = video_devdata(file);
+ struct sde_rotator_ctx *ctx;
+ struct v4l2_ctrl_handler *ctrl_handler;
+ char name[32];
+ int ret;
+
+ if (atomic_read(&rot_dev->mgr->device_suspended))
+ return -EPERM;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ if (mutex_lock_interruptible(&rot_dev->lock)) {
+ ret = -ERESTARTSYS;
+ goto error_lock;
+ }
+
+ ctx->rot_dev = rot_dev;
+
+ /* Set context defaults */
+ ctx->session_id = rot_dev->session_id++;
+ SDEDEV_DBG(ctx->rot_dev->dev, "open %d\n", ctx->session_id);
+ ctx->timeperframe.numerator = 1;
+ ctx->timeperframe.denominator = SDE_ROTATOR_DEFAULT_FPS;
+ ctx->hflip = 0;
+ ctx->vflip = 0;
+ ctx->rotate = 0;
+ ctx->priority = V4L2_PRIORITY_DEFAULT;
+ ctx->secure = 0;
+ atomic_set(&ctx->command_pending, 0);
+ ctx->abort_pending = 0;
+ ctx->format_cap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ ctx->format_cap.fmt.pix.pixelformat = SDE_PIX_FMT_Y_CBCR_H2V2;
+ ctx->format_cap.fmt.pix.width = 640;
+ ctx->format_cap.fmt.pix.height = 480;
+ ctx->crop_cap.width = 640;
+ ctx->crop_cap.height = 480;
+ ctx->format_out.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ ctx->format_out.fmt.pix.pixelformat = SDE_PIX_FMT_Y_CBCR_H2V2;
+ ctx->format_out.fmt.pix.width = 640;
+ ctx->format_out.fmt.pix.height = 480;
+ ctx->crop_out.width = 640;
+ ctx->crop_out.height = 480;
+ init_waitqueue_head(&ctx->wait_queue);
+ INIT_WORK(&ctx->submit_work, sde_rotator_submit_handler);
+ INIT_WORK(&ctx->retire_work, sde_rotator_retire_handler);
+
+ v4l2_fh_init(&ctx->fh, video);
+ file->private_data = &ctx->fh;
+ v4l2_fh_add(&ctx->fh);
+
+ ctx->m2m_ctx = v4l2_m2m_ctx_init(rot_dev->m2m_dev,
+ ctx, sde_rotator_queue_init);
+ if (IS_ERR_OR_NULL(ctx->m2m_ctx)) {
+ ret = PTR_ERR(ctx->m2m_ctx);
+ goto error_m2m_init;
+ }
+
+ ret = kobject_init_and_add(&ctx->kobj, &sde_rotator_fs_ktype,
+ &rot_dev->dev->kobj, "session_%d", ctx->session_id);
+ if (ret) {
+ SDEDEV_ERR(ctx->rot_dev->dev,
+ "fail initialize context kobject\n");
+ goto error_kobj_init;
+ }
+
+ ret = sysfs_create_group(&ctx->kobj, &sde_rotator_fs_attr_group);
+ if (ret) {
+ SDEDEV_ERR(ctx->rot_dev->dev,
+ "fail register rotator sysfs nodes\n");
+ goto error_create_sysfs;
+ }
+
+ snprintf(name, sizeof(name), "rot_fenceq_%d_%d", rot_dev->dev->id,
+ ctx->session_id);
+ ctx->work_queue.rot_work_queue = alloc_ordered_workqueue("%s",
+ WQ_MEM_RECLAIM | WQ_HIGHPRI, name);
+ if (!ctx->work_queue.rot_work_queue) {
+ SDEDEV_ERR(ctx->rot_dev->dev, "fail allocate workqueue\n");
+ ret = -EPERM;
+ goto error_alloc_workqueue;
+ }
+ SDEDEV_DBG(ctx->rot_dev->dev, "work queue name=%s\n", name);
+
+ snprintf(name, sizeof(name), "%d_%d", rot_dev->dev->id,
+ ctx->session_id);
+ ctx->work_queue.timeline = sde_rotator_create_timeline(name);
+ if (!ctx->work_queue.timeline)
+ SDEDEV_DBG(ctx->rot_dev->dev, "timeline is not available\n");
+
+ sde_rot_mgr_lock(rot_dev->mgr);
+ ret = sde_rotator_session_open(rot_dev->mgr, &ctx->private,
+ ctx->session_id, &ctx->work_queue);
+ if (ret < 0) {
+ SDEDEV_ERR(ctx->rot_dev->dev, "fail open session\n");
+ goto error_open_session;
+ }
+ sde_rot_mgr_unlock(rot_dev->mgr);
+
+ /* Create control */
+ ctrl_handler = &ctx->ctrl_handler;
+ v4l2_ctrl_handler_init(ctrl_handler, 4);
+ v4l2_ctrl_new_std(ctrl_handler,
+ &sde_rotator_ctrl_ops, V4L2_CID_HFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(ctrl_handler,
+ &sde_rotator_ctrl_ops, V4L2_CID_VFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(ctrl_handler,
+ &sde_rotator_ctrl_ops, V4L2_CID_ROTATE, 0, 270, 90, 0);
+ v4l2_ctrl_new_custom(ctrl_handler,
+ &sde_rotator_ctrl_secure, NULL);
+ if (ctrl_handler->error) {
+ ret = ctrl_handler->error;
+ v4l2_ctrl_handler_free(ctrl_handler);
+ goto error_ctrl_handler;
+ }
+ ctx->fh.ctrl_handler = ctrl_handler;
+ v4l2_ctrl_handler_setup(ctrl_handler);
+
+ mutex_unlock(&rot_dev->lock);
+
+ SDEDEV_DBG(ctx->rot_dev->dev, "SDE v4l2 rotator open success\n");
+
+ ATRACE_BEGIN(ctx->kobj.name);
+
+ return 0;
+error_ctrl_handler:
+error_open_session:
+ sde_rot_mgr_unlock(rot_dev->mgr);
+ sde_rotator_destroy_timeline(ctx->work_queue.timeline);
+ destroy_workqueue(ctx->work_queue.rot_work_queue);
+error_alloc_workqueue:
+ sysfs_remove_group(&ctx->kobj, &sde_rotator_fs_attr_group);
+error_create_sysfs:
+ kobject_put(&ctx->kobj);
+error_kobj_init:
+error_m2m_init:
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ mutex_unlock(&rot_dev->lock);
+error_lock:
+ kfree(ctx);
+ return ret;
+}
+
+/*
+ * sde_rotator_release - Rotator device release method.
+ * @file: Pointer to file struct.
+ */
+static int sde_rotator_release(struct file *file)
+{
+ struct sde_rotator_device *rot_dev = video_drvdata(file);
+ struct sde_rotator_ctx *ctx =
+ sde_rotator_ctx_from_fh(file->private_data);
+ u32 session_id = ctx->session_id;
+
+ ATRACE_END(ctx->kobj.name);
+
+ SDEDEV_DBG(rot_dev->dev, "release s:%d\n", session_id);
+ mutex_lock(&rot_dev->lock);
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ SDEDEV_DBG(rot_dev->dev, "release streams s:%d\n", session_id);
+ v4l2_m2m_streamoff(file, ctx->m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ v4l2_m2m_streamoff(file, ctx->m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ mutex_unlock(&rot_dev->lock);
+ SDEDEV_DBG(rot_dev->dev, "release submit work s:%d w:%x\n",
+ session_id, work_busy(&ctx->submit_work));
+ cancel_work_sync(&ctx->submit_work);
+ SDEDEV_DBG(rot_dev->dev, "release session s:%d\n", session_id);
+ sde_rot_mgr_lock(rot_dev->mgr);
+ sde_rotator_session_close(rot_dev->mgr, ctx->private, session_id);
+ sde_rot_mgr_unlock(rot_dev->mgr);
+ SDEDEV_DBG(rot_dev->dev, "release retire work s:%d w:%x\n",
+ session_id, work_busy(&ctx->retire_work));
+ cancel_work_sync(&ctx->retire_work);
+ mutex_lock(&rot_dev->lock);
+ SDEDEV_DBG(rot_dev->dev, "release context s:%d\n", session_id);
+ sde_rotator_destroy_timeline(ctx->work_queue.timeline);
+ destroy_workqueue(ctx->work_queue.rot_work_queue);
+ sysfs_remove_group(&ctx->kobj, &sde_rotator_fs_attr_group);
+ kobject_put(&ctx->kobj);
+ v4l2_m2m_ctx_release(ctx->m2m_ctx);
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ kfree(ctx->vbinfo_out);
+ kfree(ctx->vbinfo_cap);
+ kfree(ctx);
+ mutex_unlock(&rot_dev->lock);
+ SDEDEV_DBG(rot_dev->dev, "release complete s:%d\n", session_id);
+ return 0;
+}
+
+/*
+ * sde_rotator_poll - rotator device pool method.
+ * @file: Pointer to file struct.
+ * @wait: Pointer to poll table struct.
+ */
+static unsigned int sde_rotator_poll(struct file *file,
+ struct poll_table_struct *wait)
+{
+ struct sde_rotator_device *rot_dev = video_drvdata(file);
+ struct sde_rotator_ctx *ctx =
+ sde_rotator_ctx_from_fh(file->private_data);
+ int ret;
+
+ mutex_lock(&rot_dev->lock);
+ ret = v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
+ mutex_unlock(&rot_dev->lock);
+ return ret;
+}
+
+/* rotator device file operations callbacks */
+static const struct v4l2_file_operations sde_rotator_fops = {
+ .owner = THIS_MODULE,
+ .open = sde_rotator_open,
+ .release = sde_rotator_release,
+ .poll = sde_rotator_poll,
+ .unlocked_ioctl = video_ioctl2,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl32 = sde_rotator_compat_ioctl32,
+#endif
+};
+
+/*
+ * sde_rotator_querycap - V4l2 ioctl query capability handler.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @cap: Pointer to v4l2_capability struct need to be filled.
+ */
+static int sde_rotator_querycap(struct file *file,
+ void *fh, struct v4l2_capability *cap)
+{
+ cap->bus_info[0] = 0;
+ strlcpy(cap->driver, SDE_ROTATOR_DRV_NAME, sizeof(cap->driver));
+ strlcpy(cap->card, SDE_ROTATOR_DRV_NAME, sizeof(cap->card));
+ cap->capabilities = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M |
+ V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_VIDEO_CAPTURE;
+
+ return 0;
+}
+
+/*
+ * sde_rotator_enum_fmt_vid_cap - V4l2 ioctl enumerate output format handler.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @f: Pointer to v4l2_fmtdesc struct need to be filled.
+ */
+static int sde_rotator_enum_fmt_vid_cap(struct file *file,
+ void *fh, struct v4l2_fmtdesc *f)
+{
+ if (f->index >= ARRAY_SIZE(fmtdesc))
+ return -EINVAL;
+
+ f->pixelformat = fmtdesc[f->index].pixelformat;
+ strlcpy(f->description, fmtdesc[f->index].description,
+ sizeof(f->description));
+
+ return 0;
+}
+
+/*
+ * sde_rotator_enum_fmt_vid_out - V4l2 ioctl enumerate capture format handler.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @f: Pointer to v4l2_fmtdesc struct need to be filled.
+ */
+static int sde_rotator_enum_fmt_vid_out(struct file *file,
+ void *fh, struct v4l2_fmtdesc *f)
+{
+ if (f->index >= ARRAY_SIZE(fmtdesc))
+ return -EINVAL;
+
+ f->pixelformat = fmtdesc[f->index].pixelformat;
+ strlcpy(f->description, fmtdesc[f->index].description,
+ sizeof(f->description));
+
+ return 0;
+}
+
+/*
+ * sde_rotator_g_fmt_cap - V4l2 ioctl get capture format handler.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @f: Pointer to v4l2_format struct need to be filled.
+ */
+static int sde_rotator_g_fmt_cap(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct sde_rotator_ctx *ctx = sde_rotator_ctx_from_fh(fh);
+
+ *f = ctx->format_cap;
+
+ return 0;
+}
+
+/*
+ * sde_rotator_g_fmt_out - V4l2 ioctl get output format handler.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @f: Pointer to v4l2_format struct need to be filled.
+ */
+static int sde_rotator_g_fmt_out(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct sde_rotator_ctx *ctx = sde_rotator_ctx_from_fh(fh);
+
+ *f = ctx->format_out;
+
+ return 0;
+}
+
+/*
+ * sde_rotator_try_fmt_vid_cap - V4l2 ioctl try capture format handler.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @f: Pointer to v4l2_format struct.
+ */
+static int sde_rotator_try_fmt_vid_cap(struct file *file,
+ void *fh, struct v4l2_format *f)
+{
+ struct sde_rotator_ctx *ctx = sde_rotator_ctx_from_fh(fh);
+ struct sde_rotator_device *rot_dev = ctx->rot_dev;
+ struct sde_rotation_config config;
+ int fmt_idx;
+ int ret;
+
+ fmt_idx = sde_rotator_get_format_idx(ctx, f);
+ if (fmt_idx < 0)
+ return -EINVAL;
+
+ sde_rot_mgr_lock(rot_dev->mgr);
+ sde_rotator_get_config_from_ctx(ctx, &config);
+ config.output.format = f->fmt.pix.pixelformat;
+ config.output.width = f->fmt.pix.width;
+ config.output.height = f->fmt.pix.height;
+ ret = sde_rotator_verify_config(rot_dev->mgr, &config);
+ sde_rot_mgr_unlock(rot_dev->mgr);
+ if (ret) {
+ if ((config.output.width == f->fmt.pix.width) &&
+ (config.output.height == f->fmt.pix.height)) {
+ SDEDEV_WARN(ctx->rot_dev->dev,
+ "invalid capture format 0x%8.8x %dx%d\n",
+ f->fmt.pix.pixelformat,
+ f->fmt.pix.width,
+ f->fmt.pix.height);
+ return -EINVAL;
+ }
+ f->fmt.pix.width = config.output.width;
+ f->fmt.pix.height = config.output.height;
+ }
+
+ sde_rotator_format_recalc(f);
+ return 0;
+}
+
+/*
+ * sde_rotator_try_fmt_vid_out - V4l2 ioctl try output format handler.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @f: Pointer to v4l2_format struct.
+ */
+static int sde_rotator_try_fmt_vid_out(struct file *file,
+ void *fh, struct v4l2_format *f)
+{
+ struct sde_rotator_ctx *ctx = sde_rotator_ctx_from_fh(fh);
+ struct sde_rotator_device *rot_dev = ctx->rot_dev;
+ struct sde_rotation_config config;
+ int fmt_idx;
+ int ret;
+
+ fmt_idx = sde_rotator_get_format_idx(ctx, f);
+ if (fmt_idx < 0)
+ return -EINVAL;
+
+ sde_rot_mgr_lock(rot_dev->mgr);
+ sde_rotator_get_config_from_ctx(ctx, &config);
+ config.input.format = f->fmt.pix.pixelformat;
+ config.input.width = f->fmt.pix.width;
+ config.input.height = f->fmt.pix.height;
+ config.flags |= SDE_ROTATION_VERIFY_INPUT_ONLY;
+ ret = sde_rotator_verify_config(rot_dev->mgr, &config);
+ sde_rot_mgr_unlock(rot_dev->mgr);
+ if (ret) {
+ if ((config.input.width == f->fmt.pix.width) &&
+ (config.input.height == f->fmt.pix.height)) {
+ SDEDEV_WARN(ctx->rot_dev->dev,
+ "invalid output format 0x%8.8x %dx%d\n",
+ f->fmt.pix.pixelformat,
+ f->fmt.pix.width,
+ f->fmt.pix.height);
+ return -EINVAL;
+ }
+ f->fmt.pix.width = config.input.width;
+ f->fmt.pix.height = config.input.height;
+ }
+
+ sde_rotator_format_recalc(f);
+ return 0;
+}
+
+/*
+ * sde_rotator_s_fmt_vid_cap - V4l2 ioctl set capture format handler.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @f: Pointer to v4l2_format struct.
+ */
+static int sde_rotator_s_fmt_vid_cap(struct file *file,
+ void *fh, struct v4l2_format *f)
+{
+ struct sde_rotator_ctx *ctx = sde_rotator_ctx_from_fh(fh);
+ struct sde_rotator_device *rot_dev = ctx->rot_dev;
+ struct sde_rotation_config config;
+ int ret;
+
+ ret = sde_rotator_try_fmt_vid_cap(file, fh, f);
+ if (ret)
+ return -EINVAL;
+
+ /* Initialize crop */
+ ctx->crop_cap.top = 0;
+ ctx->crop_cap.left = 0;
+ ctx->crop_cap.width = f->fmt.pix.width;
+ ctx->crop_cap.height = f->fmt.pix.height;
+
+ ctx->format_cap = *f;
+
+ SDEDEV_DBG(rot_dev->dev,
+ "s_fmt s:%d t:%d fmt:0x%8.8x field:%u (%u,%u)\n",
+ ctx->session_id, f->type,
+ f->fmt.pix.pixelformat,
+ f->fmt.pix.field,
+ f->fmt.pix.width, f->fmt.pix.height);
+
+ /* configure hal to current input/output setting */
+ sde_rot_mgr_lock(rot_dev->mgr);
+ sde_rotator_get_config_from_ctx(ctx, &config);
+ sde_rotator_session_config(rot_dev->mgr, ctx->private, &config);
+ sde_rot_mgr_unlock(rot_dev->mgr);
+
+ return 0;
+}
+
+/*
+ * sde_rotator_s_fmt_vid_out - V4l2 ioctl set output format handler.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @f: Pointer to v4l2_format struct.
+ */
+static int sde_rotator_s_fmt_vid_out(struct file *file,
+ void *fh, struct v4l2_format *f)
+{
+ struct sde_rotator_ctx *ctx = sde_rotator_ctx_from_fh(fh);
+ struct sde_rotator_device *rot_dev = ctx->rot_dev;
+ int ret;
+
+ ret = sde_rotator_try_fmt_vid_out(file, fh, f);
+ if (ret)
+ return -EINVAL;
+
+ /* Initialize crop */
+ ctx->crop_out.top = 0;
+ ctx->crop_out.left = 0;
+ ctx->crop_out.width = f->fmt.pix.width;
+ ctx->crop_out.height = f->fmt.pix.height;
+
+ ctx->format_out = *f;
+
+ SDEDEV_DBG(rot_dev->dev,
+ "s_fmt s:%d t:%d fmt:0x%8.8x field:%u (%u,%u)\n",
+ ctx->session_id, f->type,
+ f->fmt.pix.pixelformat,
+ f->fmt.pix.field,
+ f->fmt.pix.width, f->fmt.pix.height);
+
+ return 0;
+}
+
+/*
+ * sde_rotator_reqbufs - V4l2 ioctl request buffers handler.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @req: Pointer to v4l2_requestbuffer struct.
+ */
+static int sde_rotator_reqbufs(struct file *file,
+ void *fh, struct v4l2_requestbuffers *req)
+{
+ struct sde_rotator_ctx *ctx = sde_rotator_ctx_from_fh(fh);
+
+ return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, req);
+}
+
+/*
+ * sde_rotator_qbuf - V4l2 ioctl queue buffer handler.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @buf: Pointer to v4l2_buffer struct.
+ */
+static int sde_rotator_qbuf(struct file *file, void *fh,
+ struct v4l2_buffer *buf)
+{
+ struct sde_rotator_ctx *ctx = sde_rotator_ctx_from_fh(fh);
+ int ret;
+
+ /* create fence for capture buffer */
+ if ((buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ && (buf->index < ctx->nbuf_cap)) {
+ int idx = buf->index;
+
+ ctx->vbinfo_cap[idx].fd = -1;
+ ctx->vbinfo_cap[idx].fence = sde_rotator_get_sync_fence(
+ ctx->work_queue.timeline, NULL,
+ &ctx->vbinfo_cap[idx].fence_ts);
+ ctx->vbinfo_cap[idx].qbuf_ts = ktime_get();
+ ctx->vbinfo_cap[idx].dqbuf_ts = NULL;
+ SDEDEV_DBG(ctx->rot_dev->dev,
+ "create buffer fence s:%d.%u i:%d f:%p\n",
+ ctx->session_id,
+ ctx->vbinfo_cap[idx].fence_ts,
+ idx,
+ ctx->vbinfo_cap[idx].fence);
+ } else if ((buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ && (buf->index < ctx->nbuf_out)) {
+ int idx = buf->index;
+
+ ctx->vbinfo_out[idx].qbuf_ts = ktime_get();
+ ctx->vbinfo_out[idx].dqbuf_ts = NULL;
+ }
+
+ ret = v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
+ if (ret < 0)
+ SDEDEV_ERR(ctx->rot_dev->dev, "fail qbuf s:%d t:%d r:%d\n",
+ ctx->session_id, buf->type, ret);
+
+ return ret;
+}
+
+/*
+ * sde_rotator_dqbuf - V4l2 ioctl dequeue buffer handler.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @buf: Pointer to v4l2_buffer struct.
+ */
+static int sde_rotator_dqbuf(struct file *file,
+ void *fh, struct v4l2_buffer *buf)
+{
+ struct sde_rotator_ctx *ctx = sde_rotator_ctx_from_fh(fh);
+ int ret;
+
+ ret = v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
+
+ if (ret) {
+ SDEDEV_ERR(ctx->rot_dev->dev,
+ "fail dqbuf s:%d t:%d i:%d r:%d\n",
+ ctx->session_id, buf->type, buf->index, ret);
+ return ret;
+ }
+
+ /* clear fence for buffer */
+ if ((buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ && (buf->index < ctx->nbuf_cap)) {
+ int idx = buf->index;
+
+ if (ctx->vbinfo_cap[idx].fence && ctx->vbinfo_cap[idx].fd < 0) {
+ /* fence is not used */
+ SDEDEV_DBG(ctx->rot_dev->dev, "put fence s:%d i:%d\n",
+ ctx->session_id, idx);
+ sde_rotator_put_sync_fence(ctx->vbinfo_cap[idx].fence);
+ }
+ ctx->vbinfo_cap[idx].fence = NULL;
+ ctx->vbinfo_cap[idx].fd = -1;
+ if (ctx->vbinfo_cap[idx].dqbuf_ts)
+ *(ctx->vbinfo_cap[idx].dqbuf_ts) = ktime_get();
+ } else if ((buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ && (buf->index < ctx->nbuf_out)) {
+ int idx = buf->index;
+
+ ctx->vbinfo_out[idx].fence = NULL;
+ ctx->vbinfo_out[idx].fd = -1;
+ if (ctx->vbinfo_out[idx].dqbuf_ts)
+ *(ctx->vbinfo_out[idx].dqbuf_ts) = ktime_get();
+ } else {
+ SDEDEV_WARN(ctx->rot_dev->dev, "invalid dq s:%d t:%d i:%d\n",
+ ctx->session_id, buf->type, buf->index);
+ }
+
+ return 0;
+}
+
+/*
+ * sde_rotator_querybuf - V4l2 ioctl query buffer handler.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @buf: Pointer to v4l2_buffer struct.
+ */
+static int sde_rotator_querybuf(struct file *file,
+ void *fh, struct v4l2_buffer *buf)
+{
+ struct sde_rotator_ctx *ctx = sde_rotator_ctx_from_fh(fh);
+
+ return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf);
+}
+
+/*
+ * sde_rotator_streamon - V4l2 ioctl stream on handler.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @buf_type: V4l2 buffer type.
+ */
+static int sde_rotator_streamon(struct file *file,
+ void *fh, enum v4l2_buf_type buf_type)
+{
+ struct sde_rotator_ctx *ctx = sde_rotator_ctx_from_fh(fh);
+ int ret;
+
+ SDEDEV_DBG(ctx->rot_dev->dev, "stream on s:%d t:%d\n",
+ ctx->session_id, buf_type);
+
+ ret = v4l2_m2m_streamon(file, ctx->m2m_ctx, buf_type);
+ if (ret < 0)
+ SDEDEV_ERR(ctx->rot_dev->dev, "fail stream on s:%d t:%d\n",
+ ctx->session_id, buf_type);
+
+ return ret;
+}
+
+/*
+ * sde_rotator_streamoff - V4l2 ioctl stream off handler.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @buf_type: V4l2 buffer type.
+ */
+static int sde_rotator_streamoff(struct file *file,
+ void *fh, enum v4l2_buf_type buf_type)
+{
+ struct sde_rotator_ctx *ctx = sde_rotator_ctx_from_fh(fh);
+ int ret;
+
+ SDEDEV_DBG(ctx->rot_dev->dev, "stream off s:%d t:%d\n",
+ ctx->session_id, buf_type);
+
+ ret = v4l2_m2m_streamoff(file, ctx->m2m_ctx, buf_type);
+ if (ret < 0)
+ SDEDEV_ERR(ctx->rot_dev->dev, "fail stream off s:%d t:%d\n",
+ ctx->session_id, buf_type);
+
+ return ret;
+}
+
+/*
+ * sde_rotator_cropcap - V4l2 ioctl crop capabilities.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @a: Pointer to v4l2_cropcap struct need to be set.
+ */
+static int sde_rotator_cropcap(struct file *file, void *fh,
+ struct v4l2_cropcap *a)
+{
+ struct sde_rotator_ctx *ctx = sde_rotator_ctx_from_fh(fh);
+ struct v4l2_format *format;
+ struct v4l2_rect *crop;
+
+ switch (a->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ format = &ctx->format_out;
+ crop = &ctx->crop_out;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ format = &ctx->format_cap;
+ crop = &ctx->crop_cap;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ a->bounds.top = 0;
+ a->bounds.left = 0;
+ a->bounds.width = format->fmt.pix.width;
+ a->bounds.height = format->fmt.pix.height;
+
+ a->defrect = *crop;
+
+ a->pixelaspect.numerator = 1;
+ a->pixelaspect.denominator = 1;
+
+ return 0;
+}
+
+/*
+ * sde_rotator_g_crop - V4l2 ioctl get crop.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @crop: Pointer to v4l2_crop struct need to be set.
+ */
+static int sde_rotator_g_crop(struct file *file, void *fh,
+ struct v4l2_crop *crop)
+{
+ struct sde_rotator_ctx *ctx = sde_rotator_ctx_from_fh(fh);
+
+ switch (crop->type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ crop->c = ctx->crop_out;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ crop->c = ctx->crop_cap;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*
+ * sde_rotator_s_crop - V4l2 ioctl set crop.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @crop: Pointer to v4l2_crop struct need to be set.
+ */
+static int sde_rotator_s_crop(struct file *file, void *fh,
+ const struct v4l2_crop *crop)
+{
+ struct sde_rotator_ctx *ctx = sde_rotator_ctx_from_fh(fh);
+ struct sde_rotator_device *rot_dev = ctx->rot_dev;
+ struct sde_rotation_config config;
+ struct sde_rotation_item item;
+ struct v4l2_rect rect;
+
+ sde_rotator_get_item_from_ctx(ctx, &item);
+
+ rect.left = max_t(__u32, crop->c.left, 0);
+ rect.top = max_t(__u32, crop->c.top, 0);
+ rect.height = max_t(__u32, crop->c.height, 0);
+ rect.width = max_t(__u32, crop->c.width, 0);
+
+ if (crop->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ rect.left = min_t(__u32, rect.left,
+ ctx->format_out.fmt.pix.width - 1);
+ rect.top = min_t(__u32, rect.top,
+ ctx->format_out.fmt.pix.height - 1);
+ rect.width = min_t(__u32, rect.width,
+ (ctx->format_out.fmt.pix.width - rect.left));
+ rect.height = min_t(__u32, rect.height,
+ (ctx->format_out.fmt.pix.height - rect.top));
+
+ item.src_rect.x = rect.left;
+ item.src_rect.y = rect.top;
+ item.src_rect.w = rect.width;
+ item.src_rect.h = rect.height;
+
+ sde_rotator_validate_item(ctx, &item);
+
+ SDEDEV_DBG(rot_dev->dev,
+ "s_crop s:%d t:%d (%u,%u,%u,%u)->(%u,%u,%u,%u)\n",
+ ctx->session_id, crop->type,
+ crop->c.left, crop->c.top,
+ crop->c.width, crop->c.height,
+ item.src_rect.x, item.src_rect.y,
+ item.src_rect.w, item.src_rect.h);
+
+ ctx->crop_out.left = item.src_rect.x;
+ ctx->crop_out.top = item.src_rect.y;
+ ctx->crop_out.width = item.src_rect.w;
+ ctx->crop_out.height = item.src_rect.h;
+ } else if (crop->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ rect.left = min_t(__u32, rect.left,
+ ctx->format_cap.fmt.pix.width - 1);
+ rect.top = min_t(__u32, rect.top,
+ ctx->format_cap.fmt.pix.height - 1);
+ rect.width = min_t(__u32, rect.width,
+ (ctx->format_cap.fmt.pix.width - rect.left));
+ rect.height = min_t(__u32, rect.height,
+ (ctx->format_cap.fmt.pix.height - rect.top));
+
+ item.dst_rect.x = rect.left;
+ item.dst_rect.y = rect.top;
+ item.dst_rect.w = rect.width;
+ item.dst_rect.h = rect.height;
+
+ sde_rotator_validate_item(ctx, &item);
+
+ SDEDEV_DBG(rot_dev->dev,
+ "s_crop s:%d t:%d (%u,%u,%u,%u)->(%u,%u,%u,%u)\n",
+ ctx->session_id, crop->type,
+ crop->c.left, crop->c.top,
+ crop->c.width, crop->c.height,
+ item.dst_rect.x, item.dst_rect.y,
+ item.dst_rect.w, item.dst_rect.h);
+
+ ctx->crop_cap.left = item.dst_rect.x;
+ ctx->crop_cap.top = item.dst_rect.y;
+ ctx->crop_cap.width = item.dst_rect.w;
+ ctx->crop_cap.height = item.dst_rect.h;
+ } else {
+ return -EINVAL;
+ }
+
+ /* configure hal to current input/output setting */
+ sde_rot_mgr_lock(rot_dev->mgr);
+ sde_rotator_get_config_from_ctx(ctx, &config);
+ sde_rotator_session_config(rot_dev->mgr, ctx->private, &config);
+ sde_rot_mgr_unlock(rot_dev->mgr);
+
+ return 0;
+}
+
+/*
+ * sde_rotator_g_parm - V4l2 ioctl get parm.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @a: Pointer to v4l2_streamparm struct need to be filled.
+ */
+static int sde_rotator_g_parm(struct file *file, void *fh,
+ struct v4l2_streamparm *a)
+{
+ struct sde_rotator_ctx *ctx = sde_rotator_ctx_from_fh(fh);
+
+ /* Get param is supported only for input buffers */
+ if (a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ a->parm.output.capability = 0;
+ a->parm.output.extendedmode = 0;
+ a->parm.output.outputmode = 0;
+ a->parm.output.writebuffers = 0;
+ a->parm.output.timeperframe = ctx->timeperframe;
+
+ return 0;
+}
+
+/*
+ * sde_rotator_s_parm - V4l2 ioctl set parm.
+ * @file: Pointer to file struct.
+ * @fh: V4l2 File handle.
+ * @a: Pointer to v4l2_streamparm struct need to be set.
+ */
+static int sde_rotator_s_parm(struct file *file, void *fh,
+ struct v4l2_streamparm *a)
+{
+ struct sde_rotator_ctx *ctx = sde_rotator_ctx_from_fh(fh);
+
+ /* Set param is supported only for input buffers */
+ if (a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ if (!a->parm.output.timeperframe.numerator ||
+ !a->parm.output.timeperframe.denominator)
+ return -EINVAL;
+
+ ctx->timeperframe = a->parm.output.timeperframe;
+ return 0;
+}
+
+/*
+ * sde_rotator_g_priority - Get the priority
+ * @file: Pointer to file struct.
+ * @fh: V4l2 file handle.
+ * @p: Pointer to priority enumeration.
+ */
+static int sde_rotator_g_priority(struct file *file, void *fh,
+ enum v4l2_priority *p)
+{
+ struct sde_rotator_ctx *ctx = sde_rotator_ctx_from_fh(fh);
+
+ if (!p)
+ return -EINVAL;
+ *p = ctx->priority;
+ return 0;
+}
+
+/*
+ * sde_rotator_s_priority - Set the priority
+ * @file: Pointer to file struct.
+ * @fh: V4l2 file handle.
+ * @p: Pointer to priority enumeration.
+ */
+static int sde_rotator_s_priority(struct file *file, void *fh,
+ enum v4l2_priority p)
+{
+ struct sde_rotator_ctx *ctx = sde_rotator_ctx_from_fh(fh);
+
+ ctx->priority = p;
+ return 0;
+}
+
+/*
+ * sde_rotator_private_ioctl - V4l2 private ioctl handler.
+ * @file: Pointer to file struct.
+ * @fd: V4l2 device file handle.
+ * @valid_prio: Priority ioctl valid flag.
+ * @cmd: Ioctl command.
+ * @arg: Ioctl argument.
+ */
+static long sde_rotator_private_ioctl(struct file *file, void *fh,
+ bool valid_prio, unsigned int cmd, void *arg)
+{
+ struct sde_rotator_ctx *ctx =
+ sde_rotator_ctx_from_fh(file->private_data);
+ struct sde_rotator_device *rot_dev = ctx->rot_dev;
+ struct msm_sde_rotator_fence *fence = arg;
+ struct sde_rotator_vbinfo *vbinfo;
+
+ switch (cmd) {
+ case VIDIOC_S_SDE_ROTATOR_FENCE:
+ if (!fence)
+ return -EINVAL;
+
+ if (fence->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ if (fence->index >= ctx->nbuf_out)
+ return -EINVAL;
+
+ SDEDEV_DBG(rot_dev->dev,
+ "VIDIOC_S_SDE_ROTATOR_FENCE s:%d i:%d fd:%d\n",
+ ctx->session_id, fence->index,
+ fence->fd);
+
+ vbinfo = &ctx->vbinfo_out[fence->index];
+
+ if (vbinfo->fd >= 0) {
+ if (vbinfo->fence) {
+ SDEDEV_DBG(rot_dev->dev,
+ "put fence s:%d t:%d i:%d\n",
+ ctx->session_id,
+ fence->type, fence->index);
+ sde_rotator_put_sync_fence(vbinfo->fence);
+ }
+ vbinfo->fence = NULL;
+ vbinfo->fd = -1;
+ }
+
+ vbinfo->fd = fence->fd;
+ if (vbinfo->fd >= 0) {
+ vbinfo->fence =
+ sde_rotator_get_fd_sync_fence(vbinfo->fd);
+ if (!vbinfo->fence) {
+ SDEDEV_WARN(rot_dev->dev,
+ "invalid input fence fd s:%d fd:%d\n",
+ ctx->session_id, vbinfo->fd);
+ vbinfo->fd = -1;
+ return -EINVAL;
+ }
+ } else {
+ vbinfo->fence = NULL;
+ }
+ break;
+ case VIDIOC_G_SDE_ROTATOR_FENCE:
+ if (!fence)
+ return -EINVAL;
+
+ if (fence->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ if (fence->index >= ctx->nbuf_cap)
+ return -EINVAL;
+
+ vbinfo = &ctx->vbinfo_cap[fence->index];
+
+ if (vbinfo->fence == NULL) {
+ vbinfo->fd = -1;
+ } else {
+ vbinfo->fd =
+ sde_rotator_get_sync_fence_fd(vbinfo->fence);
+ if (vbinfo->fd < 0) {
+ SDEDEV_ERR(rot_dev->dev,
+ "fail get fence fd s:%d\n",
+ ctx->session_id);
+ return vbinfo->fd;
+ }
+ }
+ fence->fd = vbinfo->fd;
+
+ SDEDEV_DBG(rot_dev->dev,
+ "VIDIOC_G_SDE_ROTATOR_FENCE s:%d i:%d fd:%d\n",
+ ctx->session_id, fence->index,
+ fence->fd);
+ break;
+ default:
+ SDEDEV_WARN(rot_dev->dev, "invalid ioctl type %x\n", cmd);
+ return -ENOTTY;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_COMPAT
+/*
+ * sde_rotator_compat_ioctl32 - Compat ioctl handler function.
+ * @file: Pointer to file struct.
+ * @cmd: Ioctl command.
+ * @arg: Ioctl argument.
+ */
+static long sde_rotator_compat_ioctl32(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ long ret;
+
+ switch (cmd) {
+ case VIDIOC_S_SDE_ROTATOR_FENCE:
+ case VIDIOC_G_SDE_ROTATOR_FENCE:
+ {
+ struct msm_sde_rotator_fence fence;
+
+ if (copy_from_user(&fence, (void __user *)arg,
+ sizeof(struct msm_sde_rotator_fence)))
+ return -EFAULT;
+
+ ret = sde_rotator_private_ioctl(file, file->private_data,
+ 0, cmd, (void *)&fence);
+
+ if (copy_to_user((void __user *)arg, &fence,
+ sizeof(struct msm_sde_rotator_fence)))
+ return -EFAULT;
+
+ break;
+ }
+ default:
+ ret = -ENOIOCTLCMD;
+ break;
+
+ }
+
+ return ret;
+}
+#endif
+
+/* V4l2 ioctl handlers */
+static const struct v4l2_ioctl_ops sde_rotator_ioctl_ops = {
+ .vidioc_querycap = sde_rotator_querycap,
+ .vidioc_enum_fmt_vid_out = sde_rotator_enum_fmt_vid_out,
+ .vidioc_enum_fmt_vid_cap = sde_rotator_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_out = sde_rotator_g_fmt_out,
+ .vidioc_g_fmt_vid_cap = sde_rotator_g_fmt_cap,
+ .vidioc_try_fmt_vid_out = sde_rotator_try_fmt_vid_out,
+ .vidioc_try_fmt_vid_cap = sde_rotator_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_out = sde_rotator_s_fmt_vid_out,
+ .vidioc_s_fmt_vid_cap = sde_rotator_s_fmt_vid_cap,
+ .vidioc_reqbufs = sde_rotator_reqbufs,
+ .vidioc_qbuf = sde_rotator_qbuf,
+ .vidioc_dqbuf = sde_rotator_dqbuf,
+ .vidioc_querybuf = sde_rotator_querybuf,
+ .vidioc_streamon = sde_rotator_streamon,
+ .vidioc_streamoff = sde_rotator_streamoff,
+ .vidioc_cropcap = sde_rotator_cropcap,
+ .vidioc_g_crop = sde_rotator_g_crop,
+ .vidioc_s_crop = sde_rotator_s_crop,
+ .vidioc_g_parm = sde_rotator_g_parm,
+ .vidioc_s_parm = sde_rotator_s_parm,
+ .vidioc_g_priority = sde_rotator_g_priority,
+ .vidioc_s_priority = sde_rotator_s_priority,
+ .vidioc_default = sde_rotator_private_ioctl,
+ .vidioc_log_status = v4l2_ctrl_log_status,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+/*
+ * sde_rotator_retire_handler - Invoked by hal when processing is done.
+ * @work: Pointer to work structure.
+ *
+ * This function is scheduled in work queue context.
+ */
+static void sde_rotator_retire_handler(struct work_struct *work)
+{
+ struct vb2_buffer *src_buf;
+ struct vb2_buffer *dst_buf;
+ struct sde_rotator_ctx *ctx;
+ struct sde_rotator_device *rot_dev;
+
+ ctx = container_of(work, struct sde_rotator_ctx, retire_work);
+
+ if (!ctx || !ctx->rot_dev) {
+ SDEROT_ERR("null context/device\n");
+ return;
+ }
+
+ rot_dev = ctx->rot_dev;
+
+ SDEDEV_DBG(rot_dev->dev, "retire handler s:%d\n", ctx->session_id);
+
+ mutex_lock(&rot_dev->lock);
+ if (ctx->abort_pending) {
+ SDEDEV_DBG(rot_dev->dev, "abort command in retire s:%d\n",
+ ctx->session_id);
+ ctx->request = ERR_PTR(-EINTR);
+ atomic_dec(&ctx->command_pending);
+ wake_up(&ctx->wait_queue);
+ mutex_unlock(&rot_dev->lock);
+ return;
+ }
+
+ if (rot_dev->early_submit) {
+ if (IS_ERR_OR_NULL(ctx->request)) {
+ /* fail pending request or something wrong */
+ SDEDEV_ERR(rot_dev->dev,
+ "pending request fail in retire s:%d\n",
+ ctx->session_id);
+ }
+
+ /* pending request. reschedule this context. */
+ v4l2_m2m_try_schedule(ctx->m2m_ctx);
+ } else {
+ /* no pending request. acknowledge the usual way. */
+ src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+
+ if (!src_buf || !dst_buf) {
+ SDEDEV_ERR(rot_dev->dev,
+ "null buffer in retire s:%d sb:%p db:%p\n",
+ ctx->session_id,
+ src_buf, dst_buf);
+ }
+
+ ctx->request = NULL;
+ atomic_dec(&ctx->command_pending);
+ wake_up(&ctx->wait_queue);
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
+ v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE);
+ v4l2_m2m_job_finish(rot_dev->m2m_dev, ctx->m2m_ctx);
+ }
+ mutex_unlock(&rot_dev->lock);
+}
+
+/*
+ * sde_rotator_process_buffers - Start rotator processing.
+ * @ctx: Pointer rotator context.
+ * @src_buf: Pointer to Vb2 source buffer.
+ * @dst_buf: Pointer to Vb2 destination buffer.
+ */
+static int sde_rotator_process_buffers(struct sde_rotator_ctx *ctx,
+ struct vb2_buffer *src_buf, struct vb2_buffer *dst_buf)
+{
+ struct sde_rotator_device *rot_dev = ctx->rot_dev;
+ struct sde_rotation_item item;
+ struct sde_rot_entry_container *req = NULL;
+ struct sde_rotator_buf_handle *src_handle;
+ struct sde_rotator_buf_handle *dst_handle;
+ struct sde_rotator_statistics *stats = &rot_dev->stats;
+ struct sde_rotator_vbinfo *vbinfo_out;
+ struct sde_rotator_vbinfo *vbinfo_cap;
+ ktime_t *ts;
+ int ret;
+
+ if (!src_buf || !dst_buf) {
+ SDEDEV_ERR(rot_dev->dev, "null vb2 buffers\n");
+ ret = -EINVAL;
+ goto error_null_buffer;
+ }
+
+ src_handle = src_buf->planes[0].mem_priv;
+ dst_handle = dst_buf->planes[0].mem_priv;
+
+ if (!src_handle || !dst_handle) {
+ SDEDEV_ERR(rot_dev->dev, "null buffer handle\n");
+ ret = -EINVAL;
+ goto error_null_buffer;
+ }
+
+ vbinfo_out = &ctx->vbinfo_out[src_buf->v4l2_buf.index];
+ vbinfo_cap = &ctx->vbinfo_cap[dst_buf->v4l2_buf.index];
+
+ SDEDEV_DBG(rot_dev->dev,
+ "process buffer s:%d.%u src:(%u,%u,%u,%u) dst:(%u,%u,%u,%u) rot:%d flip:%d/%d sec:%d\n",
+ ctx->session_id, vbinfo_cap->fence_ts,
+ ctx->crop_out.left, ctx->crop_out.top,
+ ctx->crop_out.width, ctx->crop_out.height,
+ ctx->crop_cap.left, ctx->crop_cap.top,
+ ctx->crop_cap.width, ctx->crop_cap.height,
+ ctx->rotate, ctx->hflip, ctx->vflip, ctx->secure);
+
+ /* allocate slot for timestamp */
+ ts = stats->ts[stats->count++ % SDE_ROTATOR_NUM_EVENTS];
+ ts[SDE_ROTATOR_TS_SRCQB] = vbinfo_out->qbuf_ts;
+ ts[SDE_ROTATOR_TS_DSTQB] = vbinfo_cap->qbuf_ts;
+ vbinfo_out->dqbuf_ts = &ts[SDE_ROTATOR_TS_SRCDQB];
+ vbinfo_cap->dqbuf_ts = &ts[SDE_ROTATOR_TS_DSTDQB];
+
+ ts[SDE_ROTATOR_TS_FENCE] = ktime_get();
+
+ trace_rot_entry_fence(
+ ctx->session_id, vbinfo_cap->fence_ts,
+ ctx->priority,
+ (ctx->rotate << 0) | (ctx->hflip << 8) |
+ (ctx->hflip << 9) | (ctx->secure << 10),
+ ctx->format_out.fmt.pix.pixelformat,
+ ctx->format_out.fmt.pix.width,
+ ctx->format_out.fmt.pix.height,
+ ctx->crop_out.left, ctx->crop_out.top,
+ ctx->crop_out.width, ctx->crop_out.height,
+ ctx->format_cap.fmt.pix.pixelformat,
+ ctx->format_cap.fmt.pix.width,
+ ctx->format_cap.fmt.pix.height,
+ ctx->crop_cap.left, ctx->crop_cap.top,
+ ctx->crop_cap.width, ctx->crop_cap.height);
+
+ if (vbinfo_out->fence) {
+ sde_rot_mgr_unlock(rot_dev->mgr);
+ mutex_unlock(&rot_dev->lock);
+ SDEDEV_DBG(rot_dev->dev, "fence enter s:%d.%d fd:%d\n",
+ ctx->session_id, vbinfo_cap->fence_ts, vbinfo_out->fd);
+ ret = sde_rotator_wait_sync_fence(vbinfo_out->fence,
+ rot_dev->fence_timeout);
+ mutex_lock(&rot_dev->lock);
+ sde_rot_mgr_lock(rot_dev->mgr);
+ sde_rotator_put_sync_fence(vbinfo_out->fence);
+ vbinfo_out->fence = NULL;
+ if (ret) {
+ SDEDEV_ERR(rot_dev->dev,
+ "error waiting for fence s:%d.%d fd:%d r:%d\n",
+ ctx->session_id,
+ vbinfo_cap->fence_ts, vbinfo_out->fd, ret);
+ goto error_fence_wait;
+ } else {
+ SDEDEV_DBG(rot_dev->dev, "fence exit s:%d.%d fd:%d\n",
+ ctx->session_id,
+ vbinfo_cap->fence_ts, vbinfo_out->fd);
+ }
+ }
+
+ /* fill in item work structure */
+ sde_rotator_get_item_from_ctx(ctx, &item);
+ item.flags |= SDE_ROTATION_EXT_DMA_BUF;
+ item.input.planes[0].buffer = src_handle->buffer;
+ item.input.planes[0].offset = src_handle->addr;
+ item.input.planes[0].stride = ctx->format_out.fmt.pix.bytesperline;
+ item.input.plane_count = 1;
+ item.input.fence = NULL;
+ item.output.planes[0].buffer = dst_handle->buffer;
+ item.output.planes[0].offset = dst_handle->addr;
+ item.output.planes[0].stride = ctx->format_cap.fmt.pix.bytesperline;
+ item.output.plane_count = 1;
+ item.output.fence = NULL;
+ item.sequence_id = vbinfo_cap->fence_ts;
+ item.ts = ts;
+
+ req = sde_rotator_req_init(rot_dev->mgr, ctx->private, &item, 1, 0);
+ if (IS_ERR_OR_NULL(req)) {
+ SDEDEV_ERR(rot_dev->dev, "fail allocate rotation request\n");
+ ret = -ENOMEM;
+ goto error_init_request;
+ }
+
+ req->retireq = ctx->work_queue.rot_work_queue;
+ req->retire_work = &ctx->retire_work;
+
+ ret = sde_rotator_handle_request_common(
+ rot_dev->mgr, ctx->private, req, &item);
+ if (ret) {
+ SDEDEV_ERR(rot_dev->dev, "fail handle request\n");
+ goto error_handle_request;
+ }
+
+ sde_rotator_queue_request(rot_dev->mgr, ctx->private, req);
+ ctx->request = req;
+
+ return 0;
+error_handle_request:
+ devm_kfree(rot_dev->dev, req);
+error_init_request:
+error_fence_wait:
+error_null_buffer:
+ ctx->request = ERR_PTR(ret);
+ return ret;
+}
+
+/*
+ * sde_rotator_submit_handler - Invoked by m2m to submit job.
+ * @work: Pointer to work structure.
+ *
+ * This function is scheduled in work queue context.
+ */
+static void sde_rotator_submit_handler(struct work_struct *work)
+{
+ struct sde_rotator_ctx *ctx;
+ struct sde_rotator_device *rot_dev;
+ struct vb2_buffer *src_buf;
+ struct vb2_buffer *dst_buf;
+ int ret;
+
+ ctx = container_of(work, struct sde_rotator_ctx, submit_work);
+
+ if (!ctx->rot_dev) {
+ SDEROT_ERR("null device\n");
+ return;
+ }
+
+ rot_dev = ctx->rot_dev;
+ SDEDEV_DBG(rot_dev->dev, "submit handler s:%d\n", ctx->session_id);
+
+ mutex_lock(&rot_dev->lock);
+ if (ctx->abort_pending) {
+ SDEDEV_DBG(rot_dev->dev, "abort command in submit s:%d\n",
+ ctx->session_id);
+ ctx->request = ERR_PTR(-EINTR);
+ atomic_dec(&ctx->command_pending);
+ wake_up(&ctx->wait_queue);
+ mutex_unlock(&rot_dev->lock);
+ return;
+ }
+
+ /* submit new request */
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+ src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+ sde_rot_mgr_lock(rot_dev->mgr);
+ ret = sde_rotator_process_buffers(ctx, src_buf, dst_buf);
+ sde_rot_mgr_unlock(rot_dev->mgr);
+ if (ret) {
+ SDEDEV_ERR(rot_dev->dev,
+ "fail process buffer in submit s:%d\n",
+ ctx->session_id);
+ /* advance to device run to clean up buffers */
+ v4l2_m2m_try_schedule(ctx->m2m_ctx);
+ }
+
+ mutex_unlock(&rot_dev->lock);
+}
+
+/*
+ * sde_rotator_device_run - rotator m2m device run callback
+ * @priv: Pointer rotator context.
+ */
+static void sde_rotator_device_run(void *priv)
+{
+ struct sde_rotator_ctx *ctx = priv;
+ struct sde_rotator_device *rot_dev;
+ struct vb2_buffer *src_buf;
+ struct vb2_buffer *dst_buf;
+ int ret;
+
+ if (!ctx || !ctx->rot_dev) {
+ SDEROT_ERR("null context/device\n");
+ return;
+ }
+
+ rot_dev = ctx->rot_dev;
+ SDEDEV_DBG(rot_dev->dev, "device run s:%d\n", ctx->session_id);
+
+ if (rot_dev->early_submit) {
+ /* pending request mode, check for completion */
+ if (IS_ERR_OR_NULL(ctx->request)) {
+ /* pending request fails or something wrong. */
+ SDEDEV_ERR(rot_dev->dev,
+ "pending request fail in device run s:%d\n",
+ ctx->session_id);
+ rot_dev->stats.fail_count++;
+ ATRACE_INT("fail_count", rot_dev->stats.fail_count);
+ goto error_process_buffers;
+ } else if (!atomic_read(&ctx->request->pending_count)) {
+ /* pending request completed. signal done. */
+ int failed_count =
+ atomic_read(&ctx->request->failed_count);
+ SDEDEV_DBG(rot_dev->dev,
+ "pending request completed in device run s:%d\n",
+ ctx->session_id);
+
+ /* disconnect request (will be freed by core layer) */
+ sde_rot_mgr_lock(rot_dev->mgr);
+ ctx->request->retireq = NULL;
+ ctx->request->retire_work = NULL;
+ ctx->request = NULL;
+ sde_rot_mgr_unlock(rot_dev->mgr);
+
+ if (failed_count) {
+ SDEDEV_ERR(rot_dev->dev,
+ "pending request failed in device run s:%d f:%d\n",
+ ctx->session_id,
+ failed_count);
+ rot_dev->stats.fail_count++;
+ ATRACE_INT("fail_count",
+ rot_dev->stats.fail_count);
+ goto error_process_buffers;
+ }
+
+ src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+ if (!src_buf || !dst_buf) {
+ SDEDEV_ERR(rot_dev->dev,
+ "null buffer in device run s:%d sb:%p db:%p\n",
+ ctx->session_id,
+ src_buf, dst_buf);
+ goto error_process_buffers;
+ }
+
+ atomic_dec(&ctx->command_pending);
+ wake_up(&ctx->wait_queue);
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
+ v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE);
+ v4l2_m2m_job_finish(rot_dev->m2m_dev, ctx->m2m_ctx);
+ } else {
+ /* pending request not complete. something wrong. */
+ SDEDEV_ERR(rot_dev->dev,
+ "Incomplete pending request in device run s:%d\n",
+ ctx->session_id);
+
+ /* disconnect request (will be freed by core layer) */
+ sde_rot_mgr_lock(rot_dev->mgr);
+ ctx->request->retireq = NULL;
+ ctx->request->retire_work = NULL;
+ ctx->request = ERR_PTR(-EIO);
+ sde_rot_mgr_unlock(rot_dev->mgr);
+
+ goto error_process_buffers;
+ }
+ } else {
+ /* no pending request. submit buffer the usual way. */
+ atomic_inc(&ctx->command_pending);
+
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+ src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+ if (!src_buf || !dst_buf) {
+ SDEDEV_ERR(rot_dev->dev,
+ "null buffer in device run s:%d sb:%p db:%p\n",
+ ctx->session_id,
+ src_buf, dst_buf);
+ goto error_empty_buffer;
+ }
+
+ sde_rot_mgr_lock(rot_dev->mgr);
+ ret = sde_rotator_process_buffers(ctx, src_buf, dst_buf);
+ sde_rot_mgr_unlock(rot_dev->mgr);
+ if (ret) {
+ SDEDEV_ERR(rot_dev->dev,
+ "fail process buffer in device run s:%d\n",
+ ctx->session_id);
+ ctx->request = ERR_PTR(ret);
+ rot_dev->stats.fail_count++;
+ ATRACE_INT("fail_count", rot_dev->stats.fail_count);
+ goto error_process_buffers;
+ }
+ }
+
+ return;
+error_process_buffers:
+error_empty_buffer:
+ atomic_dec(&ctx->command_pending);
+ wake_up(&ctx->wait_queue);
+ src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+ if (src_buf)
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
+ if (dst_buf)
+ v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR);
+ sde_rotator_resync_timeline(ctx->work_queue.timeline);
+ v4l2_m2m_job_finish(rot_dev->m2m_dev, ctx->m2m_ctx);
+}
+
+/*
+ * sde_rotator_job_abort - rotator m2m job abort callback
+ * @priv: Pointer rotator context.
+ */
+static void sde_rotator_job_abort(void *priv)
+{
+ struct sde_rotator_ctx *ctx = priv;
+ struct sde_rotator_device *rot_dev;
+
+ if (!ctx || !ctx->rot_dev) {
+ SDEROT_ERR("null context/device\n");
+ return;
+ }
+
+ rot_dev = ctx->rot_dev;
+ SDEDEV_DBG(rot_dev->dev, "job abort s:%d\n", ctx->session_id);
+
+ v4l2_m2m_job_finish(rot_dev->m2m_dev, ctx->m2m_ctx);
+}
+
+/*
+ * sde_rotator_job_ready - rotator m2m job ready callback
+ * @priv: Pointer rotator context.
+ */
+static int sde_rotator_job_ready(void *priv)
+{
+ struct sde_rotator_ctx *ctx = priv;
+ struct sde_rotator_device *rot_dev;
+ int ret = 0;
+
+ if (!ctx || !ctx->rot_dev) {
+ SDEROT_ERR("null context/device\n");
+ return 0;
+ }
+
+ rot_dev = ctx->rot_dev;
+ SDEDEV_DBG(rot_dev->dev, "job ready s:%d\n", ctx->session_id);
+
+ if (!rot_dev->early_submit) {
+ /* always ready in normal mode. */
+ ret = 1;
+ } else if (IS_ERR(ctx->request)) {
+ /* if pending request fails, forward to device run state. */
+ SDEDEV_DBG(rot_dev->dev,
+ "pending request fail in job ready s:%d\n",
+ ctx->session_id);
+ ret = 1;
+ } else if (!ctx->request) {
+ /* if no pending request, submit a new request. */
+ SDEDEV_DBG(rot_dev->dev,
+ "submit job s:%d sc:%d dc:%d p:%d\n",
+ ctx->session_id,
+ v4l2_m2m_num_src_bufs_ready(ctx->m2m_ctx),
+ v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx),
+ atomic_read(&ctx->command_pending));
+ atomic_inc(&ctx->command_pending);
+ queue_work(ctx->work_queue.rot_work_queue, &ctx->submit_work);
+ } else if (!atomic_read(&ctx->request->pending_count)) {
+ /* if pending request completed, forward to device run state */
+ SDEDEV_DBG(rot_dev->dev,
+ "pending request completed in job ready s:%d\n",
+ ctx->session_id);
+ ret = 1;
+ }
+
+ return ret;
+}
+
+/* V4l2 mem2mem handlers */
+static struct v4l2_m2m_ops sde_rotator_m2m_ops = {
+ .device_run = sde_rotator_device_run,
+ .job_abort = sde_rotator_job_abort,
+ .job_ready = sde_rotator_job_ready,
+};
+
+/* Device tree match struct */
+static const struct of_device_id sde_rotator_dt_match[] = {
+ {
+ .compatible = "qcom,sde_rotator",
+ .data = NULL,
+ },
+ {}
+};
+
+/*
+ * sde_rotator_get_drv_data - rotator device driver data.
+ * @dev: Pointer to device.
+ */
+static const void *sde_rotator_get_drv_data(struct device *dev)
+{
+ const struct of_device_id *match;
+
+ match = of_match_node(sde_rotator_dt_match, dev->of_node);
+
+ if (match)
+ return match->data;
+
+ return NULL;
+}
+
+/*
+ * sde_rotator_probe - rotator device probe method.
+ * @pdev: Pointer to rotator platform device.
+ */
+static int sde_rotator_probe(struct platform_device *pdev)
+{
+ struct sde_rotator_device *rot_dev;
+ struct video_device *vdev;
+ int ret;
+
+ SDEDEV_DBG(&pdev->dev, "SDE v4l2 rotator probed\n");
+
+ /* sde rotator device struct */
+ rot_dev = kzalloc(sizeof(struct sde_rotator_device), GFP_KERNEL);
+ if (!rot_dev)
+ return -ENOMEM;
+
+ mutex_init(&rot_dev->lock);
+ rot_dev->early_submit = SDE_ROTATOR_EARLY_SUBMIT;
+ rot_dev->fence_timeout = SDE_ROTATOR_FENCE_TIMEOUT;
+ rot_dev->streamoff_timeout = SDE_ROTATOR_STREAM_OFF_TIMEOUT;
+ rot_dev->drvdata = sde_rotator_get_drv_data(&pdev->dev);
+
+ rot_dev->pdev = pdev;
+ rot_dev->dev = &pdev->dev;
+ platform_set_drvdata(pdev, rot_dev);
+
+ ret = sde_rotator_base_init(&rot_dev->mdata, pdev, rot_dev->drvdata);
+ if (ret < 0) {
+ SDEDEV_ERR(&pdev->dev, "fail init base data %d\n", ret);
+ goto error_rotator_base_init;
+ }
+
+ ret = sde_rotator_core_init(&rot_dev->mgr, pdev);
+ if (ret < 0) {
+ SDEDEV_ERR(&pdev->dev, "fail init core %d\n", ret);
+ goto error_rotator_core_init;
+ }
+
+ /* mem2mem device */
+ rot_dev->m2m_dev = v4l2_m2m_init(&sde_rotator_m2m_ops);
+ if (IS_ERR(rot_dev->m2m_dev)) {
+ ret = PTR_ERR(rot_dev->m2m_dev);
+ SDEDEV_ERR(&pdev->dev, "fail init mem2mem device %d\n", ret);
+ goto error_m2m_init;
+ }
+
+ /* v4l2 device */
+ ret = v4l2_device_register(&pdev->dev, &rot_dev->v4l2_dev);
+ if (ret < 0) {
+ SDEDEV_ERR(&pdev->dev, "fail register v4l2 device %d\n", ret);
+ goto error_v4l2_register;
+ }
+
+ vdev = video_device_alloc();
+ if (!vdev) {
+ SDEDEV_ERR(&pdev->dev, "fail allocate video device\n");
+ goto error_alloc_video_device;
+ }
+
+ vdev->fops = &sde_rotator_fops;
+ vdev->ioctl_ops = &sde_rotator_ioctl_ops;
+ vdev->lock = &rot_dev->lock;
+ vdev->minor = -1;
+ vdev->release = video_device_release;
+ vdev->v4l2_dev = &rot_dev->v4l2_dev;
+ vdev->vfl_dir = VFL_DIR_M2M;
+ vdev->vfl_type = VFL_TYPE_GRABBER;
+ strlcpy(vdev->name, SDE_ROTATOR_DRV_NAME, sizeof(vdev->name));
+
+ ret = video_register_device(vdev, VFL_TYPE_GRABBER,
+ SDE_ROTATOR_BASE_DEVICE_NUMBER);
+ if (ret < 0) {
+ SDEDEV_ERR(&pdev->dev, "fail register video device %d\n",
+ ret);
+ goto error_video_register;
+ }
+
+ rot_dev->vdev = vdev;
+ video_set_drvdata(rot_dev->vdev, rot_dev);
+
+ rot_dev->debugfs_root = sde_rotator_create_debugfs(rot_dev);
+
+ SDEDEV_INFO(&pdev->dev, "SDE v4l2 rotator probe success\n");
+
+ return 0;
+error_video_register:
+ video_device_release(vdev);
+error_alloc_video_device:
+ v4l2_device_unregister(&rot_dev->v4l2_dev);
+error_v4l2_register:
+ v4l2_m2m_release(rot_dev->m2m_dev);
+error_m2m_init:
+ sde_rotator_core_destroy(rot_dev->mgr);
+error_rotator_core_init:
+ sde_rotator_base_destroy(rot_dev->mdata);
+error_rotator_base_init:
+ kfree(rot_dev);
+ return ret;
+}
+
+/*
+ * sde_rotator_remove - rotator device remove method.
+ * @pdev: Pointer rotator platform device.
+ */
+static int sde_rotator_remove(struct platform_device *pdev)
+{
+ struct sde_rotator_device *rot_dev;
+
+ rot_dev = platform_get_drvdata(pdev);
+ if (NULL == rot_dev) {
+ SDEDEV_ERR(&pdev->dev, "fail get rotator drvdata\n");
+ return 0;
+ }
+
+ sde_rotator_destroy_debugfs(rot_dev->debugfs_root);
+ video_unregister_device(rot_dev->vdev);
+ video_device_release(rot_dev->vdev);
+ v4l2_device_unregister(&rot_dev->v4l2_dev);
+ v4l2_m2m_release(rot_dev->m2m_dev);
+ sde_rotator_core_destroy(rot_dev->mgr);
+ sde_rotator_base_destroy(rot_dev->mdata);
+ kfree(rot_dev);
+ return 0;
+}
+
+static const struct dev_pm_ops sde_rotator_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(sde_rotator_pm_suspend, sde_rotator_pm_resume)
+ SET_RUNTIME_PM_OPS(sde_rotator_runtime_suspend,
+ sde_rotator_runtime_resume,
+ sde_rotator_runtime_idle)
+};
+
+/* SDE Rotator platform driver definition */
+static struct platform_driver rotator_driver = {
+ .probe = sde_rotator_probe,
+ .remove = sde_rotator_remove,
+ .suspend = sde_rotator_suspend,
+ .resume = sde_rotator_resume,
+ .driver = {
+ .name = SDE_ROTATOR_DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = sde_rotator_dt_match,
+ .pm = &sde_rotator_pm_ops,
+ },
+};
+
+static int __init sde_rotator_init_module(void)
+{
+ return platform_driver_register(&rotator_driver);
+}
+
+static void __exit sde_rotator_exit_module(void)
+{
+ platform_driver_unregister(&rotator_driver);
+}
+
+module_init(sde_rotator_init_module);
+module_exit(sde_rotator_exit_module);
+MODULE_DESCRIPTION("MSM SDE ROTATOR driver");
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.h
new file mode 100644
index 000000000000..4e9a7878459b
--- /dev/null
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.h
@@ -0,0 +1,199 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __SDE_ROTATOR_DEV_H__
+#define __SDE_ROTATOR_DEV_H__
+
+#include <linux/types.h>
+#include <linux/atomic.h>
+#include <linux/slab.h>
+#include <linux/ion.h>
+#include <linux/ktime.h>
+#include <linux/iommu.h>
+#include <linux/dma-buf.h>
+#include <linux/msm-bus.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-ctrls.h>
+#include <media/msm_sde_rotator.h>
+
+#include "sde_rotator_core.h"
+#include "sde_rotator_sync.h"
+
+/* Rotator device name */
+#define SDE_ROTATOR_DRV_NAME "sde_rotator"
+
+/* Event logging constants */
+#define SDE_ROTATOR_NUM_EVENTS 256
+#define SDE_ROTATOR_NUM_TIMESTAMPS SDE_ROTATOR_TS_MAX
+
+struct sde_rotator_device;
+struct sde_rotator_ctx;
+
+/*
+ * struct sde_rotator_buf_handle - Structure contain rotator buffer information.
+ * @fd: ion file descriptor from which this buffer is imported.
+ * @rot_dev: Pointer to rotator device.
+ * @ctx: Pointer to rotator context.
+ * @size: Size of the buffer.
+ * @addr: Address of rotator mmu mapped buffer.
+ * @secure: Non-secure/secure buffer.
+ * @buffer: Pointer to dma buf associated with this fd.
+ */
+struct sde_rotator_buf_handle {
+ int fd;
+ struct sde_rotator_device *rot_dev;
+ struct sde_rotator_ctx *ctx;
+ unsigned long size;
+ ion_phys_addr_t addr;
+ int secure;
+ struct dma_buf *buffer;
+};
+
+/*
+ * struct sde_rotator_vbinfo - Structure define video buffer info.
+ * @fd: fence file descriptor.
+ * @fence: fence associated with fd.
+ * @fence_ts: completion timestamp associated with fd
+ * @qbuf_ts: timestamp associated with buffer queue event
+ * @dqbuf_ts: Pointer to timestamp associated with buffer dequeue event
+ */
+struct sde_rotator_vbinfo {
+ int fd;
+ struct sde_rot_sync_fence *fence;
+ u32 fence_ts;
+ ktime_t qbuf_ts;
+ ktime_t *dqbuf_ts;
+};
+
+/*
+ * struct sde_rotator_ctx - Structure contains per open file handle context.
+ * @kobj: kernel object of this context
+ * @rot_dev: Pointer to rotator device.
+ * @fh: V4l2 file handle.
+ * @m2m_ctx: Memory to memory context.
+ * @ctrl_handler: control handler
+ * @format_cap: Current capture format.
+ * @format_out: Current output format.
+ * @crop_cap: Current capture crop.
+ * @crop_out: Current output crop.
+ * @timeperframe: Time per frame in seconds.
+ * @session_id: unique id for this context
+ * @hflip: horizontal flip (1-flip)
+ * @vflip: vertical flip (1-flip)
+ * @rotate: rotation angle (0,90,180,270)
+ * @priority: Priority of this context
+ * @secure: Non-secure (0) / Secure processing
+ * @command_pending: Number of pending transaction in h/w
+ * @abort_pending: True if abort is requested for async handling.
+ * @nbuf_cap: Number of requested buffer for capture queue
+ * @nbuf_out: Number of requested buffer for output queue
+ * @fence_cap: Fence info for each requested capture buffer
+ * @fence_out: Fence info for each requested output buffer
+ * @wait_queue: Wait queue for signaling end of job
+ * @submit_work: Work structure for submitting work
+ * @retire_work: Work structure for retiring work
+ * @work_queue: work queue for submit and retire processing
+ * @request: current service request
+ * @private: Pointer to session private information
+ */
+struct sde_rotator_ctx {
+ struct kobject kobj;
+ struct sde_rotator_device *rot_dev;
+ struct v4l2_fh fh;
+ struct v4l2_m2m_ctx *m2m_ctx;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_format format_cap;
+ struct v4l2_format format_out;
+ struct v4l2_rect crop_cap;
+ struct v4l2_rect crop_out;
+ struct v4l2_fract timeperframe;
+ u32 session_id;
+ s32 hflip;
+ s32 vflip;
+ s32 rotate;
+ enum v4l2_priority priority;
+ s32 secure;
+ atomic_t command_pending;
+ int abort_pending;
+ int nbuf_cap;
+ int nbuf_out;
+ struct sde_rotator_vbinfo *vbinfo_cap;
+ struct sde_rotator_vbinfo *vbinfo_out;
+ wait_queue_head_t wait_queue;
+ struct work_struct submit_work;
+ struct work_struct retire_work;
+ struct sde_rot_queue work_queue;
+ struct sde_rot_entry_container *request;
+ struct sde_rot_file_private *private;
+};
+
+/*
+ * struct sde_rotator_statistics - Storage for statistics
+ * @count: Number of processed request
+ * @fail_count: Number of failed request
+ * @ts: Timestamps of most recent requests
+ */
+struct sde_rotator_statistics {
+ u64 count;
+ u64 fail_count;
+ ktime_t ts[SDE_ROTATOR_NUM_EVENTS][SDE_ROTATOR_NUM_TIMESTAMPS];
+};
+
+/*
+ * struct sde_rotator_device - FD device structure.
+ * @lock: Lock protecting this device structure and serializing IOCTL.
+ * @dev: Pointer to device struct.
+ * @v4l2_dev: V4l2 device.
+ * @vdev: Pointer to video device.
+ * @m2m_dev: Memory to memory device.
+ * @pdev: Pointer to platform device.
+ * @drvdata: Pointer to driver data.
+ * @early_submit: flag enable job submission in ready state.
+ * @mgr: Pointer to core rotator manager.
+ * @mdata: Pointer to common rotator data/resource.
+ * @session_id: Next context session identifier
+ * @fence_timeout: Timeout value in msec for fence wait
+ * @streamoff_timeout: Timeout value in msec for stream off
+ * @debugfs_root: Pointer to debugfs directory entry.
+ * @stats: placeholder for rotator statistics
+ */
+struct sde_rotator_device {
+ struct mutex lock;
+ struct device *dev;
+ struct v4l2_device v4l2_dev;
+ struct video_device *vdev;
+ struct v4l2_m2m_dev *m2m_dev;
+ struct platform_device *pdev;
+ const void *drvdata;
+ u32 early_submit;
+ struct sde_rot_mgr *mgr;
+ struct sde_rot_data_type *mdata;
+ u32 session_id;
+ u32 fence_timeout;
+ u32 streamoff_timeout;
+ struct sde_rotator_statistics stats;
+ struct dentry *debugfs_root;
+};
+
+static inline
+struct sde_rot_mgr *sde_rot_mgr_from_pdevice(struct platform_device *pdev)
+{
+ return ((struct sde_rotator_device *) platform_get_drvdata(pdev))->mgr;
+}
+
+static inline
+struct sde_rot_mgr *sde_rot_mgr_from_device(struct device *dev)
+{
+ return ((struct sde_rotator_device *) dev_get_drvdata(dev))->mgr;
+}
+#endif /* __SDE_ROTATOR_DEV_H__ */
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_formats.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_formats.c
new file mode 100644
index 000000000000..8e5793362875
--- /dev/null
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_formats.c
@@ -0,0 +1,511 @@
+/* Copyright (c) 2012, 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <media/msm_sde_rotator.h>
+
+#include "sde_rotator_formats.h"
+#include "sde_rotator_util.h"
+
+#define FMT_RGB_565(fmt, frame_fmt, flag_arg, e0, e1, e2, isubwc) \
+ { \
+ .format = (fmt), \
+ .flag = flag_arg, \
+ .fetch_planes = SDE_MDP_PLANE_INTERLEAVED, \
+ .unpack_tight = 1, \
+ .unpack_align_msb = 0, \
+ .alpha_enable = 0, \
+ .unpack_count = 3, \
+ .bpp = 2, \
+ .frame_format = (frame_fmt), \
+ .pixel_mode = SDE_MDP_PIXEL_NORMAL, \
+ .element = { (e0), (e1), (e2) }, \
+ .bits = { \
+ [C2_R_Cr] = COLOR_5BIT, \
+ [C0_G_Y] = COLOR_6BIT, \
+ [C1_B_Cb] = COLOR_5BIT, \
+ }, \
+ .is_ubwc = isubwc, \
+ }
+
+#define FMT_RGB_888(fmt, frame_fmt, flag_arg, e0, e1, e2, isubwc) \
+ { \
+ .format = (fmt), \
+ .flag = flag_arg, \
+ .fetch_planes = SDE_MDP_PLANE_INTERLEAVED, \
+ .unpack_tight = 1, \
+ .unpack_align_msb = 0, \
+ .alpha_enable = 0, \
+ .unpack_count = 3, \
+ .bpp = 3, \
+ .frame_format = (frame_fmt), \
+ .pixel_mode = SDE_MDP_PIXEL_NORMAL, \
+ .element = { (e0), (e1), (e2) }, \
+ .bits = { \
+ [C2_R_Cr] = COLOR_8BIT, \
+ [C0_G_Y] = COLOR_8BIT, \
+ [C1_B_Cb] = COLOR_8BIT, \
+ }, \
+ .is_ubwc = isubwc, \
+ }
+
+#define FMT_RGB_8888(fmt, frame_fmt, flag_arg, \
+ alpha_en, e0, e1, e2, e3, isubwc) \
+ { \
+ .format = (fmt), \
+ .flag = flag_arg, \
+ .fetch_planes = SDE_MDP_PLANE_INTERLEAVED, \
+ .unpack_tight = 1, \
+ .unpack_align_msb = 0, \
+ .alpha_enable = (alpha_en), \
+ .unpack_count = 4, \
+ .bpp = 4, \
+ .frame_format = (frame_fmt), \
+ .pixel_mode = SDE_MDP_PIXEL_NORMAL, \
+ .element = { (e0), (e1), (e2), (e3) }, \
+ .bits = { \
+ [C3_ALPHA] = COLOR_8BIT, \
+ [C2_R_Cr] = COLOR_8BIT, \
+ [C0_G_Y] = COLOR_8BIT, \
+ [C1_B_Cb] = COLOR_8BIT, \
+ }, \
+ .is_ubwc = isubwc, \
+ }
+
+#define FMT_YUV_COMMON(fmt) \
+ .format = (fmt), \
+ .is_yuv = 1, \
+ .bits = { \
+ [C2_R_Cr] = COLOR_8BIT, \
+ [C0_G_Y] = COLOR_8BIT, \
+ [C1_B_Cb] = COLOR_8BIT, \
+ }, \
+ .alpha_enable = 0, \
+ .unpack_tight = 1, \
+ .unpack_align_msb = 0
+
+#define FMT_YUV_PSEUDO(fmt, frame_fmt, samp, pixel_type, \
+ flag_arg, e0, e1, isubwc) \
+ { \
+ FMT_YUV_COMMON(fmt), \
+ .flag = flag_arg, \
+ .fetch_planes = SDE_MDP_PLANE_PSEUDO_PLANAR, \
+ .chroma_sample = samp, \
+ .unpack_count = 2, \
+ .bpp = 2, \
+ .frame_format = (frame_fmt), \
+ .pixel_mode = (pixel_type), \
+ .element = { (e0), (e1) }, \
+ .is_ubwc = isubwc, \
+ }
+
+#define FMT_YUV_PLANR(fmt, frame_fmt, samp, \
+ flag_arg, e0, e1) \
+ { \
+ FMT_YUV_COMMON(fmt), \
+ .flag = flag_arg, \
+ .fetch_planes = SDE_MDP_PLANE_PLANAR, \
+ .chroma_sample = samp, \
+ .bpp = 1, \
+ .unpack_count = 1, \
+ .frame_format = (frame_fmt), \
+ .pixel_mode = SDE_MDP_PIXEL_NORMAL, \
+ .element = { (e0), (e1) }, \
+ .is_ubwc = SDE_MDP_COMPRESS_NONE, \
+ }
+
+#define FMT_RGB_1555(fmt, alpha_en, flag_arg, e0, e1, e2, e3) \
+ { \
+ .format = (fmt), \
+ .flag = flag_arg, \
+ .fetch_planes = SDE_MDP_PLANE_INTERLEAVED, \
+ .unpack_tight = 1, \
+ .unpack_align_msb = 0, \
+ .alpha_enable = (alpha_en), \
+ .unpack_count = 4, \
+ .bpp = 2, \
+ .element = { (e0), (e1), (e2), (e3) }, \
+ .frame_format = SDE_MDP_FMT_LINEAR, \
+ .pixel_mode = SDE_MDP_PIXEL_NORMAL, \
+ .bits = { \
+ [C3_ALPHA] = COLOR_ALPHA_1BIT, \
+ [C2_R_Cr] = COLOR_5BIT, \
+ [C0_G_Y] = COLOR_5BIT, \
+ [C1_B_Cb] = COLOR_5BIT, \
+ }, \
+ .is_ubwc = SDE_MDP_COMPRESS_NONE, \
+ }
+
+#define FMT_RGB_4444(fmt, alpha_en, flag_arg, e0, e1, e2, e3) \
+ { \
+ .format = (fmt), \
+ .flag = flag_arg, \
+ .fetch_planes = SDE_MDP_PLANE_INTERLEAVED, \
+ .unpack_tight = 1, \
+ .unpack_align_msb = 0, \
+ .alpha_enable = (alpha_en), \
+ .unpack_count = 4, \
+ .bpp = 2, \
+ .frame_format = SDE_MDP_FMT_LINEAR, \
+ .pixel_mode = SDE_MDP_PIXEL_NORMAL, \
+ .element = { (e0), (e1), (e2), (e3) }, \
+ .bits = { \
+ [C3_ALPHA] = COLOR_ALPHA_4BIT, \
+ [C2_R_Cr] = COLOR_4BIT, \
+ [C0_G_Y] = COLOR_4BIT, \
+ [C1_B_Cb] = COLOR_4BIT, \
+ }, \
+ .is_ubwc = SDE_MDP_COMPRESS_NONE, \
+ }
+
+#define FMT_RGB_1010102(fmt, frame_fmt, flag_arg, \
+ alpha_en, e0, e1, e2, e3, isubwc) \
+ { \
+ .format = (fmt), \
+ .flag = flag_arg, \
+ .fetch_planes = SDE_MDP_PLANE_INTERLEAVED, \
+ .unpack_tight = 1, \
+ .unpack_align_msb = 0, \
+ .alpha_enable = (alpha_en), \
+ .unpack_count = 4, \
+ .bpp = 4, \
+ .frame_format = frame_fmt, \
+ .pixel_mode = SDE_MDP_PIXEL_10BIT, \
+ .element = { (e0), (e1), (e2), (e3) }, \
+ .bits = { \
+ [C3_ALPHA] = COLOR_8BIT, \
+ [C2_R_Cr] = COLOR_8BIT, \
+ [C0_G_Y] = COLOR_8BIT, \
+ [C1_B_Cb] = COLOR_8BIT, \
+ }, \
+ .is_ubwc = isubwc, \
+ }
+
+#define VALID_ROT_WB_ALL (VALID_ROT_WB_FORMAT | VALID_ROT_R3_WB_FORMAT)
+/*
+ * UBWC formats table:
+ * This table holds the UBWC formats supported.
+ * If a compression ratio needs to be used for this or any other format,
+ * the data will be passed by user-space.
+ */
+static struct sde_mdp_format_params_ubwc sde_mdp_format_ubwc_map[] = {
+ {
+ .mdp_format = FMT_RGB_565(SDE_PIX_FMT_RGB_565_UBWC,
+ SDE_MDP_FMT_TILE_A5X, VALID_ROT_WB_ALL,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, SDE_MDP_COMPRESS_UBWC),
+ .micro = {
+ .tile_height = 4,
+ .tile_width = 16,
+ },
+ },
+ {
+ .mdp_format = FMT_RGB_8888(SDE_PIX_FMT_RGBA_8888_UBWC,
+ SDE_MDP_FMT_TILE_A5X, VALID_ROT_WB_ALL, 1,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA,
+ SDE_MDP_COMPRESS_UBWC),
+ .micro = {
+ .tile_height = 4,
+ .tile_width = 16,
+ },
+ },
+ {
+ .mdp_format = FMT_RGB_8888(SDE_PIX_FMT_RGBX_8888_UBWC,
+ SDE_MDP_FMT_TILE_A5X, VALID_ROT_WB_ALL, 0,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA,
+ SDE_MDP_COMPRESS_UBWC),
+ .micro = {
+ .tile_height = 4,
+ .tile_width = 16,
+ },
+ },
+ {
+ .mdp_format = FMT_YUV_PSEUDO(SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
+ SDE_MDP_FMT_TILE_A5X, SDE_MDP_CHROMA_420,
+ SDE_MDP_PIXEL_NORMAL,
+ VALID_ROT_WB_ALL, C1_B_Cb, C2_R_Cr,
+ SDE_MDP_COMPRESS_UBWC),
+ .micro = {
+ .tile_height = 8,
+ .tile_width = 32,
+ },
+ },
+ {
+ .mdp_format = FMT_RGB_1010102(SDE_PIX_FMT_RGBA_1010102_UBWC,
+ SDE_MDP_FMT_TILE_A5X, VALID_ROT_R3_WB_FORMAT, 1,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA,
+ SDE_MDP_COMPRESS_UBWC),
+ .micro = {
+ .tile_height = 4,
+ .tile_width = 16,
+ },
+ },
+ {
+ .mdp_format = FMT_RGB_1010102(SDE_PIX_FMT_RGBX_1010102_UBWC,
+ SDE_MDP_FMT_TILE_A5X, VALID_ROT_R3_WB_FORMAT, 0,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA,
+ SDE_MDP_COMPRESS_UBWC),
+ .micro = {
+ .tile_height = 4,
+ .tile_width = 16,
+ },
+ },
+ {
+ .mdp_format = FMT_YUV_PSEUDO(SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
+ SDE_MDP_FMT_TILE_A5X, SDE_MDP_CHROMA_420,
+ SDE_MDP_PIXEL_10BIT,
+ VALID_ROT_R3_WB_FORMAT | VALID_MDP_WB_INTF_FORMAT,
+ C1_B_Cb, C2_R_Cr, SDE_MDP_COMPRESS_UBWC),
+ .micro = {
+ .tile_height = 4,
+ .tile_width = 48,
+ },
+ },
+};
+
+static struct sde_mdp_format_params sde_mdp_format_map[] = {
+ FMT_RGB_565(
+ SDE_PIX_FMT_RGB_565, SDE_MDP_FMT_LINEAR, VALID_ROT_WB_ALL |
+ VALID_MDP_WB_INTF_FORMAT, C1_B_Cb, C0_G_Y, C2_R_Cr,
+ SDE_MDP_COMPRESS_NONE),
+ FMT_RGB_565(
+ SDE_PIX_FMT_BGR_565, SDE_MDP_FMT_LINEAR, VALID_ROT_WB_ALL |
+ VALID_MDP_WB_INTF_FORMAT, C2_R_Cr, C0_G_Y, C1_B_Cb,
+ SDE_MDP_COMPRESS_NONE),
+ FMT_RGB_888(
+ SDE_PIX_FMT_RGB_888, SDE_MDP_FMT_LINEAR, VALID_ROT_WB_ALL |
+ VALID_MDP_WB_INTF_FORMAT, C2_R_Cr, C0_G_Y, C1_B_Cb,
+ SDE_MDP_COMPRESS_NONE),
+ FMT_RGB_888(
+ SDE_PIX_FMT_BGR_888, SDE_MDP_FMT_LINEAR, VALID_ROT_WB_ALL |
+ VALID_MDP_WB_INTF_FORMAT, C1_B_Cb, C0_G_Y, C2_R_Cr,
+ SDE_MDP_COMPRESS_NONE),
+
+ FMT_RGB_8888(
+ SDE_PIX_FMT_ABGR_8888, SDE_MDP_FMT_LINEAR,
+ VALID_ROT_WB_ALL, 1, C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr,
+ SDE_MDP_COMPRESS_NONE),
+
+ FMT_RGB_8888(
+ SDE_PIX_FMT_XRGB_8888, SDE_MDP_FMT_LINEAR,
+ VALID_ROT_WB_ALL | VALID_MDP_WB_INTF_FORMAT,
+ 0, C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, SDE_MDP_COMPRESS_NONE),
+ FMT_RGB_8888(
+ SDE_PIX_FMT_ARGB_8888, SDE_MDP_FMT_LINEAR,
+ VALID_ROT_WB_ALL, 1, C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb,
+ SDE_MDP_COMPRESS_NONE),
+ FMT_RGB_8888(
+ SDE_PIX_FMT_RGBA_8888, SDE_MDP_FMT_LINEAR,
+ VALID_ROT_WB_ALL, 1, C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA,
+ SDE_MDP_COMPRESS_NONE),
+ FMT_RGB_8888(
+ SDE_PIX_FMT_RGBX_8888, SDE_MDP_FMT_LINEAR,
+ VALID_ROT_WB_ALL | VALID_MDP_WB_INTF_FORMAT,
+ 0, C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, SDE_MDP_COMPRESS_NONE),
+ FMT_RGB_8888(
+ SDE_PIX_FMT_BGRA_8888, SDE_MDP_FMT_LINEAR,
+ VALID_ROT_WB_ALL, 1, C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA,
+ SDE_MDP_COMPRESS_NONE),
+ FMT_RGB_8888(
+ SDE_PIX_FMT_BGRX_8888, SDE_MDP_FMT_LINEAR,
+ VALID_ROT_WB_ALL | VALID_MDP_WB_INTF_FORMAT,
+ 0, C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, SDE_MDP_COMPRESS_NONE),
+
+ FMT_YUV_PSEUDO(SDE_PIX_FMT_Y_CRCB_H2V1, SDE_MDP_FMT_LINEAR,
+ SDE_MDP_CHROMA_H2V1, SDE_MDP_PIXEL_NORMAL,
+ VALID_ROT_WB_ALL, C2_R_Cr, C1_B_Cb, SDE_MDP_COMPRESS_NONE),
+ FMT_YUV_PSEUDO(SDE_PIX_FMT_Y_CBCR_H2V1, SDE_MDP_FMT_LINEAR,
+ SDE_MDP_CHROMA_H2V1, SDE_MDP_PIXEL_NORMAL,
+ VALID_ROT_WB_ALL, C1_B_Cb, C2_R_Cr, SDE_MDP_COMPRESS_NONE),
+ FMT_YUV_PSEUDO(SDE_PIX_FMT_Y_CRCB_H1V2, SDE_MDP_FMT_LINEAR,
+ SDE_MDP_CHROMA_H1V2, SDE_MDP_PIXEL_NORMAL,
+ VALID_ROT_WB_ALL, C2_R_Cr, C1_B_Cb, SDE_MDP_COMPRESS_NONE),
+ FMT_YUV_PSEUDO(SDE_PIX_FMT_Y_CBCR_H1V2, SDE_MDP_FMT_LINEAR,
+ SDE_MDP_CHROMA_H1V2, SDE_MDP_PIXEL_NORMAL,
+ VALID_ROT_WB_ALL, C1_B_Cb, C2_R_Cr, SDE_MDP_COMPRESS_NONE),
+ FMT_YUV_PSEUDO(SDE_PIX_FMT_Y_CRCB_H2V2, SDE_MDP_FMT_LINEAR,
+ SDE_MDP_CHROMA_420, SDE_MDP_PIXEL_NORMAL,
+ VALID_ROT_WB_ALL | VALID_MDP_WB_INTF_FORMAT,
+ C2_R_Cr, C1_B_Cb, SDE_MDP_COMPRESS_NONE),
+ FMT_YUV_PSEUDO(SDE_PIX_FMT_Y_CBCR_H2V2, SDE_MDP_FMT_LINEAR,
+ SDE_MDP_CHROMA_420, SDE_MDP_PIXEL_NORMAL,
+ VALID_ROT_WB_ALL | VALID_MDP_WB_INTF_FORMAT,
+ C1_B_Cb, C2_R_Cr, SDE_MDP_COMPRESS_NONE),
+ FMT_YUV_PSEUDO(SDE_PIX_FMT_Y_CBCR_H2V2_VENUS, SDE_MDP_FMT_LINEAR,
+ SDE_MDP_CHROMA_420, SDE_MDP_PIXEL_NORMAL,
+ VALID_ROT_WB_ALL | VALID_MDP_WB_INTF_FORMAT,
+ C1_B_Cb, C2_R_Cr, SDE_MDP_COMPRESS_NONE),
+ FMT_YUV_PSEUDO(SDE_PIX_FMT_Y_CRCB_H2V2_VENUS, SDE_MDP_FMT_LINEAR,
+ SDE_MDP_CHROMA_420, SDE_MDP_PIXEL_NORMAL,
+ VALID_ROT_WB_ALL | VALID_MDP_WB_INTF_FORMAT,
+ C2_R_Cr, C1_B_Cb, SDE_MDP_COMPRESS_NONE),
+
+ {
+ FMT_YUV_COMMON(SDE_PIX_FMT_Y_CBCR_H2V2_P010),
+ .flag = VALID_ROT_R3_WB_FORMAT,
+ .fetch_planes = SDE_MDP_PLANE_PSEUDO_PLANAR,
+ .chroma_sample = SDE_MDP_CHROMA_420,
+ .unpack_count = 2,
+ .bpp = 2,
+ .frame_format = SDE_MDP_FMT_LINEAR,
+ .pixel_mode = SDE_MDP_PIXEL_10BIT,
+ .element = { C1_B_Cb, C2_R_Cr },
+ .unpack_tight = 0,
+ .unpack_align_msb = 1,
+ .is_ubwc = SDE_MDP_COMPRESS_NONE,
+ },
+ {
+ FMT_YUV_COMMON(SDE_PIX_FMT_Y_CBCR_H2V2_TP10),
+ .flag = VALID_ROT_R3_WB_FORMAT,
+ .fetch_planes = SDE_MDP_PLANE_PSEUDO_PLANAR,
+ .chroma_sample = SDE_MDP_CHROMA_420,
+ .unpack_count = 2,
+ .bpp = 2,
+ .frame_format = SDE_MDP_FMT_TILE_A5X,
+ .pixel_mode = SDE_MDP_PIXEL_10BIT,
+ .element = { C1_B_Cb, C2_R_Cr },
+ .unpack_tight = 1,
+ .unpack_align_msb = 0,
+ .is_ubwc = SDE_MDP_COMPRESS_NONE,
+ },
+
+ FMT_YUV_PLANR(SDE_PIX_FMT_Y_CB_CR_H2V2, SDE_MDP_FMT_LINEAR,
+ SDE_MDP_CHROMA_420, VALID_ROT_WB_FORMAT |
+ VALID_MDP_WB_INTF_FORMAT, C2_R_Cr, C1_B_Cb),
+ FMT_YUV_PLANR(SDE_PIX_FMT_Y_CR_CB_H2V2, SDE_MDP_FMT_LINEAR,
+ SDE_MDP_CHROMA_420, VALID_ROT_WB_FORMAT |
+ VALID_MDP_WB_INTF_FORMAT, C1_B_Cb, C2_R_Cr),
+ FMT_YUV_PLANR(SDE_PIX_FMT_Y_CR_CB_GH2V2, SDE_MDP_FMT_LINEAR,
+ SDE_MDP_CHROMA_420, VALID_ROT_WB_FORMAT |
+ VALID_MDP_WB_INTF_FORMAT, C1_B_Cb, C2_R_Cr),
+
+ {
+ FMT_YUV_COMMON(SDE_PIX_FMT_YCBYCR_H2V1),
+ .flag = VALID_ROT_WB_FORMAT,
+ .fetch_planes = SDE_MDP_PLANE_INTERLEAVED,
+ .chroma_sample = SDE_MDP_CHROMA_H2V1,
+ .unpack_count = 4,
+ .bpp = 2,
+ .frame_format = SDE_MDP_FMT_LINEAR,
+ .pixel_mode = SDE_MDP_PIXEL_NORMAL,
+ .element = { C2_R_Cr, C0_G_Y, C1_B_Cb, C0_G_Y },
+ .is_ubwc = SDE_MDP_COMPRESS_NONE,
+ },
+ FMT_RGB_1555(SDE_PIX_FMT_RGBA_5551, 1, VALID_ROT_WB_ALL,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr),
+ FMT_RGB_4444(SDE_PIX_FMT_RGBA_4444, 1, VALID_ROT_WB_ALL,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr),
+ FMT_RGB_4444(SDE_PIX_FMT_ARGB_4444, 1, VALID_ROT_WB_ALL,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA),
+ FMT_RGB_1010102(SDE_PIX_FMT_RGBA_1010102, SDE_MDP_FMT_LINEAR,
+ VALID_ROT_R3_WB_FORMAT | VALID_MDP_WB_INTF_FORMAT,
+ 1, C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, SDE_MDP_COMPRESS_NONE),
+ FMT_RGB_1010102(SDE_PIX_FMT_RGBX_1010102, SDE_MDP_FMT_LINEAR,
+ VALID_ROT_R3_WB_FORMAT | VALID_MDP_WB_INTF_FORMAT,
+ 0, C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, SDE_MDP_COMPRESS_NONE),
+ FMT_RGB_1010102(SDE_PIX_FMT_BGRA_1010102, SDE_MDP_FMT_LINEAR,
+ VALID_ROT_R3_WB_FORMAT | VALID_MDP_WB_INTF_FORMAT,
+ 1, C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, SDE_MDP_COMPRESS_NONE),
+ FMT_RGB_1010102(SDE_PIX_FMT_BGRX_1010102, SDE_MDP_FMT_LINEAR,
+ VALID_ROT_R3_WB_FORMAT | VALID_MDP_WB_INTF_FORMAT,
+ 0, C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, SDE_MDP_COMPRESS_NONE),
+ FMT_RGB_1010102(SDE_PIX_FMT_ARGB_2101010, SDE_MDP_FMT_LINEAR,
+ INVALID_WB_FORMAT, 1, C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb,
+ SDE_MDP_COMPRESS_NONE),
+ FMT_RGB_1010102(SDE_PIX_FMT_XRGB_2101010, SDE_MDP_FMT_LINEAR,
+ INVALID_WB_FORMAT, 0, C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb,
+ SDE_MDP_COMPRESS_NONE),
+ FMT_RGB_1010102(SDE_PIX_FMT_ABGR_2101010, SDE_MDP_FMT_LINEAR,
+ INVALID_WB_FORMAT, 1, C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr,
+ SDE_MDP_COMPRESS_NONE),
+ FMT_RGB_1010102(SDE_PIX_FMT_XBGR_2101010, SDE_MDP_FMT_LINEAR,
+ INVALID_WB_FORMAT, 0, C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr,
+ SDE_MDP_COMPRESS_NONE),
+};
+
+/*
+ * sde_get_format_params - return format parameter of the given format
+ * @format: format to lookup
+ */
+struct sde_mdp_format_params *sde_get_format_params(u32 format)
+{
+ struct sde_mdp_format_params *fmt = NULL;
+ int i;
+ bool fmt_found = false;
+
+ for (i = 0; i < ARRAY_SIZE(sde_mdp_format_map); i++) {
+ fmt = &sde_mdp_format_map[i];
+ if (format == fmt->format) {
+ fmt_found = true;
+ break;
+ }
+ }
+
+ if (!fmt_found) {
+ for (i = 0; i < ARRAY_SIZE(sde_mdp_format_ubwc_map); i++) {
+ fmt = &sde_mdp_format_ubwc_map[i].mdp_format;
+ if (format == fmt->format)
+ break;
+ }
+ }
+
+ return fmt;
+}
+
+/*
+ * sde_rot_get_ubwc_micro_dim - return micro dimension of the given ubwc format
+ * @format: format to lookup
+ * @w: Pointer to returned width dimension
+ * @h: Pointer to returned height dimension
+ */
+int sde_rot_get_ubwc_micro_dim(u32 format, u16 *w, u16 *h)
+{
+ struct sde_mdp_format_params_ubwc *fmt = NULL;
+ bool fmt_found = false;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sde_mdp_format_ubwc_map); i++) {
+ fmt = &sde_mdp_format_ubwc_map[i];
+ if (format == fmt->mdp_format.format) {
+ fmt_found = true;
+ break;
+ }
+ }
+
+ if (!fmt_found)
+ return -EINVAL;
+
+ *w = fmt->micro.tile_width;
+ *h = fmt->micro.tile_height;
+
+ return 0;
+}
+
+/*
+ * sde_mdp_is_wb_format - determine if the given fmt is supported by writeback
+ * @fmt: Pointer to format parameter
+ */
+bool sde_mdp_is_wb_format(struct sde_mdp_format_params *fmt)
+{
+ struct sde_rot_data_type *mdata = sde_rot_get_mdata();
+
+ if (!mdata || !fmt)
+ return false;
+ else if (test_bit(SDE_CAPS_R1_WB, mdata->sde_caps_map) &&
+ (fmt->flag & VALID_ROT_WB_FORMAT))
+ return true;
+ else if (test_bit(SDE_CAPS_R3_WB, mdata->sde_caps_map) &&
+ (fmt->flag & VALID_ROT_R3_WB_FORMAT))
+ return true;
+ else
+ return false;
+}
+
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_formats.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_formats.h
new file mode 100644
index 000000000000..198bee373a42
--- /dev/null
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_formats.h
@@ -0,0 +1,146 @@
+/* Copyright (c) 2012, 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef SDE_ROTATOR_FORMATS_H
+#define SDE_ROTATOR_FORMATS_H
+
+#include <linux/types.h>
+#include <media/msm_sde_rotator.h>
+
+/* internal formats */
+#define SDE_PIX_FMT_Y_CBCR_H2V2_TP10 v4l2_fourcc('T', 'P', '1', '0')
+
+#define SDE_ROT_MAX_PLANES 4
+
+#define UBWC_META_MACRO_W_H 16
+#define UBWC_META_BLOCK_SIZE 256
+
+#define INVALID_WB_FORMAT 0
+#define VALID_ROT_WB_FORMAT BIT(0)
+#define VALID_MDP_WB_INTF_FORMAT BIT(1)
+#define VALID_ROT_R3_WB_FORMAT BIT(2)
+
+/*
+ * Value of enum chosen to fit the number of bits
+ * expected by the HW programming.
+ */
+enum {
+ COLOR_4BIT,
+ COLOR_5BIT,
+ COLOR_6BIT,
+ COLOR_8BIT,
+ COLOR_ALPHA_1BIT = 0,
+ COLOR_ALPHA_4BIT = 1,
+};
+
+#define C3_ALPHA 3 /* alpha */
+#define C2_R_Cr 2 /* R/Cr */
+#define C1_B_Cb 1 /* B/Cb */
+#define C0_G_Y 0 /* G/luma */
+
+enum sde_mdp_compress_type {
+ SDE_MDP_COMPRESS_NONE,
+ SDE_MDP_COMPRESS_UBWC,
+};
+
+enum sde_mdp_frame_format_type {
+ SDE_MDP_FMT_LINEAR,
+ SDE_MDP_FMT_TILE_A4X,
+ SDE_MDP_FMT_TILE_A5X,
+};
+
+enum sde_mdp_pixel_type {
+ SDE_MDP_PIXEL_NORMAL,
+ SDE_MDP_PIXEL_10BIT,
+};
+
+enum sde_mdp_sspp_fetch_type {
+ SDE_MDP_PLANE_INTERLEAVED,
+ SDE_MDP_PLANE_PLANAR,
+ SDE_MDP_PLANE_PSEUDO_PLANAR,
+};
+
+enum sde_mdp_sspp_chroma_samp_type {
+ SDE_MDP_CHROMA_RGB,
+ SDE_MDP_CHROMA_H2V1,
+ SDE_MDP_CHROMA_H1V2,
+ SDE_MDP_CHROMA_420
+};
+
+struct sde_mdp_format_params {
+ u32 format;
+ u32 flag;
+ u8 is_yuv;
+ u8 is_ubwc;
+
+ u8 frame_format;
+ u8 chroma_sample;
+ u8 solid_fill;
+ u8 fetch_planes;
+ u8 unpack_align_msb; /* 0 to LSB, 1 to MSB */
+ u8 unpack_tight; /* 0 for loose, 1 for tight */
+ u8 unpack_count; /* 0 = 1 component, 1 = 2 component ... */
+ u8 bpp;
+ u8 alpha_enable; /* source has alpha */
+ u8 pixel_mode; /* 0: normal, 1:10bit */
+ u8 bits[SDE_ROT_MAX_PLANES];
+ u8 element[SDE_ROT_MAX_PLANES];
+};
+
+struct sde_mdp_format_ubwc_tile_info {
+ u16 tile_height;
+ u16 tile_width;
+};
+
+struct sde_mdp_format_params_ubwc {
+ struct sde_mdp_format_params mdp_format;
+ struct sde_mdp_format_ubwc_tile_info micro;
+};
+
+struct sde_mdp_format_params *sde_get_format_params(u32 format);
+
+int sde_rot_get_ubwc_micro_dim(u32 format, u16 *w, u16 *h);
+
+bool sde_mdp_is_wb_format(struct sde_mdp_format_params *fmt);
+
+static inline bool sde_mdp_is_tilea4x_format(struct sde_mdp_format_params *fmt)
+{
+ return fmt && (fmt->frame_format == SDE_MDP_FMT_TILE_A4X);
+}
+
+static inline bool sde_mdp_is_tilea5x_format(struct sde_mdp_format_params *fmt)
+{
+ return fmt && (fmt->frame_format == SDE_MDP_FMT_TILE_A5X);
+}
+
+static inline bool sde_mdp_is_ubwc_format(struct sde_mdp_format_params *fmt)
+{
+ return fmt && (fmt->is_ubwc == SDE_MDP_COMPRESS_UBWC);
+}
+
+static inline bool sde_mdp_is_linear_format(struct sde_mdp_format_params *fmt)
+{
+ return fmt && (fmt->frame_format == SDE_MDP_FMT_LINEAR);
+}
+
+static inline bool sde_mdp_is_tp10_format(struct sde_mdp_format_params *fmt)
+{
+ return fmt && ((fmt->format == SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC) ||
+ (fmt->format == SDE_PIX_FMT_Y_CBCR_H2V2_TP10));
+}
+
+static inline bool sde_mdp_is_yuv_format(struct sde_mdp_format_params *fmt)
+{
+ return fmt && fmt->is_yuv;
+}
+#endif
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_hwio.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_hwio.h
new file mode 100644
index 000000000000..051db7863c54
--- /dev/null
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_hwio.h
@@ -0,0 +1,74 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef SDE_ROTATOR_HWIO_H
+#define SDE_ROTATOR_HWIO_H
+
+#include <linux/bitops.h>
+
+#define SDE_REG_HW_VERSION 0x0
+#define SDE_REG_HW_INTR_STATUS 0x10
+
+#define SDE_INTR_MDP BIT(0)
+
+#define SDE_MDP_OFFSET 0x1000
+
+#define MMSS_MDP_PANIC_ROBUST_CTRL 0x00178
+#define MMSS_MDP_PANIC_LUT0 0x0017C
+#define MMSS_MDP_PANIC_LUT1 0x00180
+#define MMSS_MDP_ROBUST_LUT 0x00184
+#define MMSS_MDP_RT_NRT_VBIF_CLIENT_SEL 0x00190
+
+/* following offsets are with respect to MDP VBIF base */
+#define MMSS_VBIF_CLKON 0x4
+#define MMSS_VBIF_RD_LIM_CONF 0x0B0
+#define MMSS_VBIF_WR_LIM_CONF 0x0C0
+
+#define MMSS_VBIF_XIN_HALT_CTRL0 0x200
+#define MMSS_VBIF_XIN_HALT_CTRL1 0x204
+#define MMSS_VBIF_AXI_HALT_CTRL0 0x208
+#define MMSS_VBIF_AXI_HALT_CTRL1 0x20C
+#define MMSS_VBIF_TEST_BUS_OUT_CTRL 0x210
+#define MMSS_VBIF_TEST_BUS_OUT 0x230
+
+#define SDE_VBIF_QOS_REMAP_BASE 0x020
+#define SDE_VBIF_QOS_REMAP_ENTRIES 0x4
+
+#define SDE_VBIF_FIXED_SORT_EN 0x30
+#define SDE_VBIF_FIXED_SORT_SEL0 0x34
+
+/* MMSS_VBIF_NRT - offset relative to base offset */
+#define MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0 0x0008
+#define MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0_XIN0 0
+#define MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0_XIN1 1
+#define MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL1 0x000C
+#define MMSS_VBIF_NRT_VBIF_QOS_REMAP_00 0x0020
+#define MMSS_VBIF_NRT_VBIF_QOS_REMAP_01 0x0024
+#define MMSS_VBIF_NRT_VBIF_QOS_REMAP_10 0x0028
+#define MMSS_VBIF_NRT_VBIF_QOS_REMAP_11 0x002C
+#define MMSS_VBIF_NRT_VBIF_WRITE_GATHTER_EN 0x00AC
+#define MMSS_VBIF_NRT_VBIF_IN_RD_LIM_CONF0 0x00B0
+#define MMSS_VBIF_NRT_VBIF_IN_RD_LIM_CONF1 0x00B4
+#define MMSS_VBIF_NRT_VBIF_IN_RD_LIM_CONF2 0x00B8
+#define MMSS_VBIF_NRT_VBIF_IN_WR_LIM_CONF0 0x00C0
+#define MMSS_VBIF_NRT_VBIF_IN_WR_LIM_CONF1 0x00C4
+#define MMSS_VBIF_NRT_VBIF_IN_WR_LIM_CONF2 0x00C8
+#define MMSS_VBIF_NRT_VBIF_OUT_RD_LIM_CONF0 0x00D0
+#define MMSS_VBIF_NRT_VBIF_OUT_WR_LIM_CONF0 0x00D4
+
+#define SDE_MDP_REG_TRAFFIC_SHAPER_EN BIT(31)
+#define SDE_MDP_REG_TRAFFIC_SHAPER_RD_CLIENT(num) (0x030 + (num * 4))
+#define SDE_MDP_REG_TRAFFIC_SHAPER_WR_CLIENT(num) (0x060 + (num * 4))
+#define SDE_MDP_REG_TRAFFIC_SHAPER_FIXPOINT_FACTOR 4
+
+#endif
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_io_util.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_io_util.c
new file mode 100644
index 000000000000..f1ed5ea359f4
--- /dev/null
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_io_util.c
@@ -0,0 +1,429 @@
+/* Copyright (c) 2012, 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+
+#include "sde_rotator_io_util.h"
+
+void sde_reg_w(struct sde_io_data *io, u32 offset, u32 value, u32 debug)
+{
+ u32 in_val;
+
+ if (!io || !io->base) {
+ DEV_ERR("%pS->%s: invalid input\n",
+ __builtin_return_address(0), __func__);
+ return;
+ }
+
+ if (offset > io->len) {
+ DEV_ERR("%pS->%s: offset out of range\n",
+ __builtin_return_address(0), __func__);
+ return;
+ }
+
+ DEV_DBG("sdeio:%6.6x:%8.8x\n", offset, value);
+ writel_relaxed(value, io->base + offset);
+ if (debug) {
+ /* ensure register read is ordered after register write */
+ mb();
+ in_val = readl_relaxed(io->base + offset);
+ DEV_DBG("[%08x] => %08x [%08x]\n",
+ (u32)(unsigned long)(io->base + offset),
+ value, in_val);
+ }
+} /* sde_reg_w */
+
+u32 sde_reg_r(struct sde_io_data *io, u32 offset, u32 debug)
+{
+ u32 value;
+
+ if (!io || !io->base) {
+ DEV_ERR("%pS->%s: invalid input\n",
+ __builtin_return_address(0), __func__);
+ return -EINVAL;
+ }
+
+ if (offset > io->len) {
+ DEV_ERR("%pS->%s: offset out of range\n",
+ __builtin_return_address(0), __func__);
+ return -EINVAL;
+ }
+
+ value = readl_relaxed(io->base + offset);
+ if (debug)
+ DEV_DBG("[%08x] <= %08x\n",
+ (u32)(unsigned long)(io->base + offset), value);
+
+ DEV_DBG("sdeio:%6.6x:%8.8x\n", offset, value);
+ return value;
+} /* sde_reg_r */
+
+void sde_reg_dump(void __iomem *base, u32 length, const char *prefix,
+ u32 debug)
+{
+ if (debug)
+ print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET, 32, 4,
+ (void *)base, length, false);
+} /* sde_reg_dump */
+
+static struct resource *sde_rot_get_res_byname(struct platform_device *pdev,
+ unsigned int type, const char *name)
+{
+ struct resource *res = NULL;
+
+ res = platform_get_resource_byname(pdev, type, name);
+ if (!res)
+ DEV_ERR("%s: '%s' resource not found\n", __func__, name);
+
+ return res;
+} /* sde_rot_get_res_byname */
+
+int sde_rot_ioremap_byname(struct platform_device *pdev,
+ struct sde_io_data *io_data, const char *name)
+{
+ struct resource *res = NULL;
+
+ if (!pdev || !io_data) {
+ DEV_ERR("%pS->%s: invalid input\n",
+ __builtin_return_address(0), __func__);
+ return -EINVAL;
+ }
+
+ res = sde_rot_get_res_byname(pdev, IORESOURCE_MEM, name);
+ if (!res) {
+ DEV_ERR("%pS->%s: '%s' sde_rot_get_res_byname failed\n",
+ __builtin_return_address(0), __func__, name);
+ return -ENODEV;
+ }
+
+ io_data->len = (u32)resource_size(res);
+ io_data->base = ioremap(res->start, io_data->len);
+ if (!io_data->base) {
+ DEV_ERR("%pS->%s: '%s' ioremap failed\n",
+ __builtin_return_address(0), __func__, name);
+ return -EIO;
+ }
+
+ return 0;
+} /* sde_rot_ioremap_byname */
+
+void sde_rot_iounmap(struct sde_io_data *io_data)
+{
+ if (!io_data) {
+ DEV_ERR("%pS->%s: invalid input\n",
+ __builtin_return_address(0), __func__);
+ return;
+ }
+
+ if (io_data->base) {
+ iounmap(io_data->base);
+ io_data->base = NULL;
+ }
+ io_data->len = 0;
+} /* sde_rot_iounmap */
+
+int sde_rot_config_vreg(struct device *dev, struct sde_vreg *in_vreg,
+ int num_vreg, int config)
+{
+ int i = 0, rc = 0;
+ struct sde_vreg *curr_vreg = NULL;
+ enum sde_vreg_type type;
+
+ if (!dev || !in_vreg || !num_vreg) {
+ DEV_ERR("%pS->%s: invalid input\n",
+ __builtin_return_address(0), __func__);
+ return -EINVAL;
+ }
+
+ if (config) {
+ for (i = 0; i < num_vreg; i++) {
+ curr_vreg = &in_vreg[i];
+ curr_vreg->vreg = regulator_get(dev,
+ curr_vreg->vreg_name);
+ rc = PTR_RET(curr_vreg->vreg);
+ if (rc) {
+ DEV_ERR("%pS->%s: %s get failed. rc=%d\n",
+ __builtin_return_address(0), __func__,
+ curr_vreg->vreg_name, rc);
+ curr_vreg->vreg = NULL;
+ goto vreg_get_fail;
+ }
+ type = (regulator_count_voltages(curr_vreg->vreg) > 0)
+ ? SDE_REG_LDO : SDE_REG_VS;
+ if (type == SDE_REG_LDO) {
+ rc = regulator_set_voltage(
+ curr_vreg->vreg,
+ curr_vreg->min_voltage,
+ curr_vreg->max_voltage);
+ if (rc < 0) {
+ DEV_ERR("%pS->%s: %s set vltg fail\n",
+ __builtin_return_address(0),
+ __func__,
+ curr_vreg->vreg_name);
+ goto vreg_set_voltage_fail;
+ }
+ }
+ }
+ } else {
+ for (i = num_vreg-1; i >= 0; i--) {
+ curr_vreg = &in_vreg[i];
+ if (curr_vreg->vreg) {
+ type = (regulator_count_voltages(
+ curr_vreg->vreg) > 0)
+ ? SDE_REG_LDO : SDE_REG_VS;
+ if (type == SDE_REG_LDO) {
+ regulator_set_voltage(curr_vreg->vreg,
+ 0, curr_vreg->max_voltage);
+ }
+ regulator_put(curr_vreg->vreg);
+ curr_vreg->vreg = NULL;
+ }
+ }
+ }
+ return 0;
+
+vreg_unconfig:
+if (type == SDE_REG_LDO)
+ regulator_set_optimum_mode(curr_vreg->vreg, 0);
+
+vreg_set_voltage_fail:
+ regulator_put(curr_vreg->vreg);
+ curr_vreg->vreg = NULL;
+
+vreg_get_fail:
+ for (i--; i >= 0; i--) {
+ curr_vreg = &in_vreg[i];
+ type = (regulator_count_voltages(curr_vreg->vreg) > 0)
+ ? SDE_REG_LDO : SDE_REG_VS;
+ goto vreg_unconfig;
+ }
+ return rc;
+} /* sde_rot_config_vreg */
+
+int sde_rot_enable_vreg(struct sde_vreg *in_vreg, int num_vreg, int enable)
+{
+ int i = 0, rc = 0;
+ bool need_sleep;
+
+ if (!in_vreg) {
+ DEV_ERR("%pS->%s: invalid input\n",
+ __builtin_return_address(0), __func__);
+ return -EINVAL;
+ }
+
+ if (enable) {
+ for (i = 0; i < num_vreg; i++) {
+ rc = PTR_RET(in_vreg[i].vreg);
+ if (rc) {
+ DEV_ERR("%pS->%s: %s regulator error. rc=%d\n",
+ __builtin_return_address(0), __func__,
+ in_vreg[i].vreg_name, rc);
+ goto vreg_set_opt_mode_fail;
+ }
+ need_sleep = !regulator_is_enabled(in_vreg[i].vreg);
+ if (in_vreg[i].pre_on_sleep && need_sleep)
+ usleep_range(in_vreg[i].pre_on_sleep * 1000,
+ in_vreg[i].pre_on_sleep * 1000);
+ rc = regulator_set_optimum_mode(in_vreg[i].vreg,
+ in_vreg[i].enable_load);
+ if (rc < 0) {
+ DEV_ERR("%pS->%s: %s set opt m fail\n",
+ __builtin_return_address(0), __func__,
+ in_vreg[i].vreg_name);
+ goto vreg_set_opt_mode_fail;
+ }
+ rc = regulator_enable(in_vreg[i].vreg);
+ if (in_vreg[i].post_on_sleep && need_sleep)
+ usleep_range(in_vreg[i].post_on_sleep * 1000,
+ in_vreg[i].post_on_sleep * 1000);
+ if (rc < 0) {
+ DEV_ERR("%pS->%s: %s enable failed\n",
+ __builtin_return_address(0), __func__,
+ in_vreg[i].vreg_name);
+ goto disable_vreg;
+ }
+ }
+ } else {
+ for (i = num_vreg-1; i >= 0; i--) {
+ if (in_vreg[i].pre_off_sleep)
+ usleep_range(in_vreg[i].pre_off_sleep * 1000,
+ in_vreg[i].pre_off_sleep * 1000);
+ regulator_set_optimum_mode(in_vreg[i].vreg,
+ in_vreg[i].disable_load);
+ regulator_disable(in_vreg[i].vreg);
+ if (in_vreg[i].post_off_sleep)
+ usleep_range(in_vreg[i].post_off_sleep * 1000,
+ in_vreg[i].post_off_sleep * 1000);
+ }
+ }
+ return rc;
+
+disable_vreg:
+ regulator_set_optimum_mode(in_vreg[i].vreg, in_vreg[i].disable_load);
+
+vreg_set_opt_mode_fail:
+ for (i--; i >= 0; i--) {
+ if (in_vreg[i].pre_off_sleep)
+ usleep_range(in_vreg[i].pre_off_sleep * 1000,
+ in_vreg[i].pre_off_sleep * 1000);
+ regulator_set_optimum_mode(in_vreg[i].vreg,
+ in_vreg[i].disable_load);
+ regulator_disable(in_vreg[i].vreg);
+ if (in_vreg[i].post_off_sleep)
+ usleep_range(in_vreg[i].post_off_sleep * 1000,
+ in_vreg[i].post_off_sleep * 1000);
+ }
+
+ return rc;
+} /* sde_rot_enable_vreg */
+
+void sde_rot_put_clk(struct sde_clk *clk_arry, int num_clk)
+{
+ int i;
+
+ if (!clk_arry) {
+ DEV_ERR("%pS->%s: invalid input\n",
+ __builtin_return_address(0), __func__);
+ return;
+ }
+
+ for (i = num_clk - 1; i >= 0; i--) {
+ if (clk_arry[i].clk)
+ clk_put(clk_arry[i].clk);
+ clk_arry[i].clk = NULL;
+ }
+} /* sde_rot_put_clk */
+
+int sde_rot_get_clk(struct device *dev, struct sde_clk *clk_arry, int num_clk)
+{
+ int i, rc = 0;
+
+ if (!dev || !clk_arry) {
+ DEV_ERR("%pS->%s: invalid input\n",
+ __builtin_return_address(0), __func__);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num_clk; i++) {
+ clk_arry[i].clk = clk_get(dev, clk_arry[i].clk_name);
+ rc = PTR_RET(clk_arry[i].clk);
+ if (rc) {
+ DEV_ERR("%pS->%s: '%s' get failed. rc=%d\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name, rc);
+ goto error;
+ }
+ }
+
+ return rc;
+
+error:
+ sde_rot_put_clk(clk_arry, num_clk);
+
+ return rc;
+} /* sde_rot_get_clk */
+
+int sde_rot_clk_set_rate(struct sde_clk *clk_arry, int num_clk)
+{
+ int i, rc = 0;
+
+ if (!clk_arry) {
+ DEV_ERR("%pS->%s: invalid input\n",
+ __builtin_return_address(0), __func__);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num_clk; i++) {
+ if (clk_arry[i].clk) {
+ if (SDE_CLK_AHB != clk_arry[i].type) {
+ DEV_DBG("%pS->%s: '%s' rate %ld\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name,
+ clk_arry[i].rate);
+ rc = clk_set_rate(clk_arry[i].clk,
+ clk_arry[i].rate);
+ if (rc) {
+ DEV_ERR("%pS->%s: %s failed. rc=%d\n",
+ __builtin_return_address(0),
+ __func__,
+ clk_arry[i].clk_name, rc);
+ break;
+ }
+ }
+ } else {
+ DEV_ERR("%pS->%s: '%s' is not available\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name);
+ rc = -EPERM;
+ break;
+ }
+ }
+
+ return rc;
+} /* sde_rot_clk_set_rate */
+
+int sde_rot_enable_clk(struct sde_clk *clk_arry, int num_clk, int enable)
+{
+ int i, rc = 0;
+
+ if (!clk_arry) {
+ DEV_ERR("%pS->%s: invalid input\n",
+ __builtin_return_address(0), __func__);
+ return -EINVAL;
+ }
+
+ if (enable) {
+ for (i = 0; i < num_clk; i++) {
+ DEV_DBG("%pS->%s: enable '%s'\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name);
+ if (clk_arry[i].clk) {
+ rc = clk_prepare_enable(clk_arry[i].clk);
+ if (rc)
+ DEV_ERR("%pS->%s: %s en fail. rc=%d\n",
+ __builtin_return_address(0),
+ __func__,
+ clk_arry[i].clk_name, rc);
+ } else {
+ DEV_ERR("%pS->%s: '%s' is not available\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name);
+ rc = -EPERM;
+ }
+
+ if (rc) {
+ sde_rot_enable_clk(&clk_arry[i],
+ i, false);
+ break;
+ }
+ }
+ } else {
+ for (i = num_clk - 1; i >= 0; i--) {
+ DEV_DBG("%pS->%s: disable '%s'\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name);
+
+ if (clk_arry[i].clk)
+ clk_disable_unprepare(clk_arry[i].clk);
+ else
+ DEV_ERR("%pS->%s: '%s' is not available\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name);
+ }
+ }
+
+ return rc;
+} /* sde_rot_enable_clk */
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_io_util.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_io_util.h
new file mode 100644
index 000000000000..2d1303da8457
--- /dev/null
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_io_util.h
@@ -0,0 +1,105 @@
+/* Copyright (c) 2012, 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+#ifndef __SDE_ROTATOR_IO_UTIL_H__
+#define __SDE_ROTATOR_IO_UTIL_H__
+
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/i2c.h>
+#include <linux/types.h>
+
+#ifdef DEBUG
+#define DEV_DBG(fmt, args...) pr_err("<SDEROT_ERR> " fmt, ##args)
+#else
+#define DEV_DBG(fmt, args...) pr_debug("<SDEROT_DBG> " fmt, ##args)
+#endif
+#define DEV_INFO(fmt, args...) pr_info("<SDEROT_INFO> " fmt, ##args)
+#define DEV_WARN(fmt, args...) pr_warn("<SDEROT_WARN> " fmt, ##args)
+#define DEV_ERR(fmt, args...) pr_err("<SDEROT_ERR> " fmt, ##args)
+
+struct sde_io_data {
+ u32 len;
+ void __iomem *base;
+};
+
+void sde_reg_w(struct sde_io_data *io, u32 offset, u32 value, u32 debug);
+u32 sde_reg_r(struct sde_io_data *io, u32 offset, u32 debug);
+void sde_reg_dump(void __iomem *base, u32 len, const char *prefix, u32 debug);
+
+#define SDE_REG_W_ND(io, offset, val) sde_reg_w(io, offset, val, false)
+#define SDE_REG_W(io, offset, val) sde_reg_w(io, offset, val, true)
+#define SDE_REG_R_ND(io, offset) sde_reg_r(io, offset, false)
+#define SDE_REG_R(io, offset) sde_reg_r(io, offset, true)
+
+enum sde_vreg_type {
+ SDE_REG_LDO,
+ SDE_REG_VS,
+};
+
+struct sde_vreg {
+ struct regulator *vreg; /* vreg handle */
+ char vreg_name[32];
+ int min_voltage;
+ int max_voltage;
+ int enable_load;
+ int disable_load;
+ int pre_on_sleep;
+ int post_on_sleep;
+ int pre_off_sleep;
+ int post_off_sleep;
+};
+
+struct sde_gpio {
+ unsigned gpio;
+ unsigned value;
+ char gpio_name[32];
+};
+
+enum sde_clk_type {
+ SDE_CLK_AHB, /* no set rate. rate controlled through rpm */
+ SDE_CLK_PCLK,
+ SDE_CLK_OTHER,
+};
+
+struct sde_clk {
+ struct clk *clk; /* clk handle */
+ char clk_name[32];
+ enum sde_clk_type type;
+ unsigned long rate;
+};
+
+struct sde_module_power {
+ unsigned num_vreg;
+ struct sde_vreg *vreg_config;
+ unsigned num_gpio;
+ struct sde_gpio *gpio_config;
+ unsigned num_clk;
+ struct sde_clk *clk_config;
+};
+
+int sde_rot_ioremap_byname(struct platform_device *pdev,
+ struct sde_io_data *io_data, const char *name);
+void sde_rot_iounmap(struct sde_io_data *io_data);
+
+int sde_rot_config_vreg(struct device *dev, struct sde_vreg *in_vreg,
+ int num_vreg, int config);
+int sde_rot_enable_vreg(struct sde_vreg *in_vreg, int num_vreg, int enable);
+
+int sde_rot_get_clk(struct device *dev, struct sde_clk *clk_arry, int num_clk);
+void sde_rot_put_clk(struct sde_clk *clk_arry, int num_clk);
+int sde_rot_clk_set_rate(struct sde_clk *clk_arry, int num_clk);
+int sde_rot_enable_clk(struct sde_clk *clk_arry, int num_clk, int enable);
+
+#endif /* __SDE_ROTATOR_IO_UTIL_H__ */
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r1.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r1.c
new file mode 100644
index 000000000000..31cc4f3fb58a
--- /dev/null
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r1.c
@@ -0,0 +1,596 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/sync.h>
+#include <linux/delay.h>
+#include <linux/debugfs.h>
+#include <linux/interrupt.h>
+
+#include "sde_rotator_r1_hwio.h"
+#include "sde_rotator_core.h"
+#include "sde_rotator_util.h"
+#include "sde_rotator_r1_internal.h"
+#include "sde_rotator_r1.h"
+#include "sde_rotator_r1_debug.h"
+
+struct sde_mdp_hw_resource {
+ struct sde_rot_hw_resource hw;
+ struct sde_mdp_ctl *ctl;
+ struct sde_mdp_mixer *mixer;
+ struct sde_mdp_pipe *pipe;
+ struct sde_mdp_writeback *wb;
+};
+
+struct sde_rotator_r1_data {
+ struct sde_rot_mgr *mgr;
+ int wb_id;
+ int ctl_id;
+ int irq_num;
+ struct sde_mdp_hw_resource *mdp_hw;
+};
+
+static struct sde_mdp_hw_resource *sde_rotator_hw_alloc(
+ struct sde_rot_mgr *mgr, u32 ctl_id, u32 wb_id, int irq_num)
+{
+ struct sde_mdp_hw_resource *mdp_hw;
+ struct sde_rot_data_type *mdata = sde_rot_get_mdata();
+ int pipe_ndx, offset = ctl_id;
+ int ret;
+
+ mdp_hw = devm_kzalloc(&mgr->pdev->dev,
+ sizeof(struct sde_mdp_hw_resource), GFP_KERNEL);
+ if (!mdp_hw)
+ return ERR_PTR(-ENOMEM);
+
+ mdp_hw->ctl = sde_mdp_ctl_alloc(mdata, offset);
+ if (IS_ERR_OR_NULL(mdp_hw->ctl)) {
+ SDEROT_ERR("unable to allocate ctl\n");
+ ret = -ENODEV;
+ goto error;
+ }
+ mdp_hw->ctl->irq_num = irq_num;
+
+ mdp_hw->wb = sde_mdp_wb_assign(wb_id, mdp_hw->ctl->num);
+ if (IS_ERR_OR_NULL(mdp_hw->wb)) {
+ SDEROT_ERR("unable to allocate wb\n");
+ ret = -ENODEV;
+ goto error;
+ }
+
+ mdp_hw->ctl->wb = mdp_hw->wb;
+ mdp_hw->mixer = sde_mdp_mixer_assign(mdp_hw->wb->num, true);
+ if (IS_ERR_OR_NULL(mdp_hw->mixer)) {
+ SDEROT_ERR("unable to allocate wb mixer\n");
+ ret = -ENODEV;
+ goto error;
+ }
+
+ mdp_hw->ctl->mixer_left = mdp_hw->mixer;
+ mdp_hw->mixer->ctl = mdp_hw->ctl;
+
+ mdp_hw->mixer->rotator_mode = true;
+
+ switch (mdp_hw->mixer->num) {
+ case SDE_MDP_WB_LAYERMIXER0:
+ mdp_hw->ctl->opmode = SDE_MDP_CTL_OP_ROT0_MODE;
+ break;
+ case SDE_MDP_WB_LAYERMIXER1:
+ mdp_hw->ctl->opmode = SDE_MDP_CTL_OP_ROT1_MODE;
+ break;
+ default:
+ SDEROT_ERR("invalid layer mixer=%d\n", mdp_hw->mixer->num);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ mdp_hw->ctl->ops.start_fnc = sde_mdp_writeback_start;
+ mdp_hw->ctl->wb_type = SDE_MDP_WB_CTL_TYPE_BLOCK;
+
+ if (mdp_hw->ctl->ops.start_fnc)
+ ret = mdp_hw->ctl->ops.start_fnc(mdp_hw->ctl);
+
+ if (ret)
+ goto error;
+
+ /* override from dt */
+ pipe_ndx = wb_id;
+ mdp_hw->pipe = sde_mdp_pipe_assign(mdata, mdp_hw->mixer, pipe_ndx);
+ if (IS_ERR_OR_NULL(mdp_hw->pipe)) {
+ SDEROT_ERR("dma pipe allocation failed\n");
+ ret = -ENODEV;
+ goto error;
+ }
+
+ mdp_hw->pipe->mixer_left = mdp_hw->mixer;
+ mdp_hw->hw.wb_id = mdp_hw->wb->num;
+ mdp_hw->hw.pending_count = 0;
+ atomic_set(&mdp_hw->hw.num_active, 0);
+ mdp_hw->hw.max_active = 1;
+ init_waitqueue_head(&mdp_hw->hw.wait_queue);
+
+ return mdp_hw;
+error:
+ if (!IS_ERR_OR_NULL(mdp_hw->pipe))
+ sde_mdp_pipe_destroy(mdp_hw->pipe);
+ if (!IS_ERR_OR_NULL(mdp_hw->ctl)) {
+ if (mdp_hw->ctl->ops.stop_fnc)
+ mdp_hw->ctl->ops.stop_fnc(mdp_hw->ctl, 0);
+ sde_mdp_ctl_free(mdp_hw->ctl);
+ }
+ devm_kfree(&mgr->pdev->dev, mdp_hw);
+
+ return ERR_PTR(ret);
+}
+
+static void sde_rotator_hw_free(struct sde_rot_mgr *mgr,
+ struct sde_mdp_hw_resource *mdp_hw)
+{
+ struct sde_mdp_mixer *mixer;
+ struct sde_mdp_ctl *ctl;
+
+ if (!mgr || !mdp_hw)
+ return;
+
+ mixer = mdp_hw->pipe->mixer_left;
+
+ sde_mdp_pipe_destroy(mdp_hw->pipe);
+
+ ctl = sde_mdp_ctl_mixer_switch(mixer->ctl,
+ SDE_MDP_WB_CTL_TYPE_BLOCK);
+ if (ctl) {
+ if (ctl->ops.stop_fnc)
+ ctl->ops.stop_fnc(ctl, 0);
+ sde_mdp_ctl_free(ctl);
+ }
+
+ devm_kfree(&mgr->pdev->dev, mdp_hw);
+}
+
+static struct sde_rot_hw_resource *sde_rotator_hw_alloc_ext(
+ struct sde_rot_mgr *mgr, u32 pipe_id, u32 wb_id)
+{
+ struct sde_mdp_hw_resource *mdp_hw;
+ struct sde_rotator_r1_data *hw_data;
+
+ if (!mgr || !mgr->hw_data)
+ return NULL;
+
+ hw_data = mgr->hw_data;
+ mdp_hw = hw_data->mdp_hw;
+
+ return &mdp_hw->hw;
+}
+
+static void sde_rotator_hw_free_ext(struct sde_rot_mgr *mgr,
+ struct sde_rot_hw_resource *hw)
+{
+ /* currently nothing specific for this device */
+}
+
+static void sde_rotator_translate_rect(struct sde_rect *dst,
+ struct sde_rect *src)
+{
+ dst->x = src->x;
+ dst->y = src->y;
+ dst->w = src->w;
+ dst->h = src->h;
+}
+
+static u32 sde_rotator_translate_flags(u32 input)
+{
+ u32 output = 0;
+
+ if (input & SDE_ROTATION_NOP)
+ output |= SDE_ROT_NOP;
+ if (input & SDE_ROTATION_FLIP_LR)
+ output |= SDE_FLIP_LR;
+ if (input & SDE_ROTATION_FLIP_UD)
+ output |= SDE_FLIP_UD;
+ if (input & SDE_ROTATION_90)
+ output |= SDE_ROT_90;
+ if (input & SDE_ROTATION_DEINTERLACE)
+ output |= SDE_DEINTERLACE;
+ if (input & SDE_ROTATION_SECURE)
+ output |= SDE_SECURE_OVERLAY_SESSION;
+ return output;
+}
+
+static int sde_rotator_config_hw(struct sde_rot_hw_resource *hw,
+ struct sde_rot_entry *entry)
+{
+ struct sde_mdp_hw_resource *mdp_hw;
+ struct sde_mdp_pipe *pipe;
+ struct sde_rotation_item *item;
+ int ret;
+
+ if (!hw || !entry) {
+ SDEROT_ERR("null hw resource/entry");
+ return -EINVAL;
+ }
+
+ mdp_hw = container_of(hw, struct sde_mdp_hw_resource, hw);
+
+ pipe = mdp_hw->pipe;
+ item = &entry->item;
+
+ pipe->flags = sde_rotator_translate_flags(item->flags);
+ pipe->src_fmt = sde_get_format_params(item->input.format);
+ pipe->img_width = item->input.width;
+ pipe->img_height = item->input.height;
+ sde_rotator_translate_rect(&pipe->src, &item->src_rect);
+ sde_rotator_translate_rect(&pipe->dst, &item->src_rect);
+
+ pipe->params_changed++;
+
+ ret = sde_mdp_pipe_queue_data(pipe, &entry->src_buf);
+ SDEROT_DBG("Config pipe. src{%u,%u,%u,%u}f=%u\n"
+ "dst{%u,%u,%u,%u}f=%u session_id=%u\n",
+ item->src_rect.x, item->src_rect.y,
+ item->src_rect.w, item->src_rect.h, item->input.format,
+ item->dst_rect.x, item->dst_rect.y,
+ item->dst_rect.w, item->dst_rect.h, item->output.format,
+ item->session_id);
+
+ return ret;
+}
+
+static int sde_rotator_kickoff_entry(struct sde_rot_hw_resource *hw,
+ struct sde_rot_entry *entry)
+{
+ struct sde_mdp_hw_resource *mdp_hw;
+ int ret;
+ struct sde_mdp_writeback_arg wb_args;
+
+ if (!hw || !entry) {
+ SDEROT_ERR("null hw resource/entry");
+ return -EINVAL;
+ }
+
+ wb_args.data = &entry->dst_buf;
+ wb_args.priv_data = entry;
+
+ mdp_hw = container_of(hw, struct sde_mdp_hw_resource, hw);
+
+ ret = sde_mdp_writeback_display_commit(mdp_hw->ctl, &wb_args);
+ return ret;
+}
+
+static int sde_rotator_wait_for_entry(struct sde_rot_hw_resource *hw,
+ struct sde_rot_entry *entry)
+{
+ struct sde_mdp_hw_resource *mdp_hw;
+ int ret;
+ struct sde_mdp_ctl *ctl;
+
+ if (!hw || !entry) {
+ SDEROT_ERR("null hw resource/entry");
+ return -EINVAL;
+ }
+
+ mdp_hw = container_of(hw, struct sde_mdp_hw_resource, hw);
+
+ ctl = mdp_hw->ctl;
+
+ ret = sde_mdp_display_wait4comp(ctl);
+
+ return ret;
+}
+
+static int sde_rotator_hw_validate_entry(struct sde_rot_mgr *mgr,
+ struct sde_rot_entry *entry)
+{
+ int ret = 0;
+ u16 src_w, src_h, dst_w, dst_h, bit;
+ struct sde_rotation_item *item = &entry->item;
+ struct sde_mdp_format_params *fmt;
+
+ src_w = item->src_rect.w;
+ src_h = item->src_rect.h;
+
+ if (item->flags & SDE_ROTATION_90) {
+ dst_w = item->dst_rect.h;
+ dst_h = item->dst_rect.w;
+ } else {
+ dst_w = item->dst_rect.w;
+ dst_h = item->dst_rect.h;
+ }
+
+ entry->dnsc_factor_w = 0;
+ entry->dnsc_factor_h = 0;
+
+ if ((src_w != dst_w) || (src_h != dst_h)) {
+ if ((src_w % dst_w) || (src_h % dst_h)) {
+ SDEROT_DBG("non integral scale not support\n");
+ ret = -EINVAL;
+ goto dnsc_err;
+ }
+ entry->dnsc_factor_w = src_w / dst_w;
+ bit = fls(entry->dnsc_factor_w);
+ if ((entry->dnsc_factor_w & ~BIT(bit - 1)) || (bit > 5)) {
+ SDEROT_DBG("non power-of-2 scale not support\n");
+ ret = -EINVAL;
+ goto dnsc_err;
+ }
+ entry->dnsc_factor_h = src_h / dst_h;
+ bit = fls(entry->dnsc_factor_h);
+ if ((entry->dnsc_factor_h & ~BIT(bit - 1)) || (bit > 5)) {
+ SDEROT_DBG("non power-of-2 dscale not support\n");
+ ret = -EINVAL;
+ goto dnsc_err;
+ }
+ }
+
+ fmt = sde_get_format_params(item->output.format);
+ if (sde_mdp_is_ubwc_format(fmt) &&
+ (entry->dnsc_factor_h || entry->dnsc_factor_w)) {
+ SDEROT_DBG("downscale with ubwc not support\n");
+ ret = -EINVAL;
+ }
+
+dnsc_err:
+
+ /* Downscaler does not support asymmetrical dnsc */
+ if (entry->dnsc_factor_w != entry->dnsc_factor_h) {
+ SDEROT_DBG("asymmetric downscale not support\n");
+ ret = -EINVAL;
+ }
+
+ if (ret) {
+ entry->dnsc_factor_w = 0;
+ entry->dnsc_factor_h = 0;
+ }
+ return ret;
+}
+
+static ssize_t sde_rotator_hw_show_caps(struct sde_rot_mgr *mgr,
+ struct device_attribute *attr, char *buf, ssize_t len)
+{
+ struct sde_rotator_r1_data *hw_data;
+ int cnt = 0;
+
+ if (!mgr || !buf)
+ return 0;
+
+ hw_data = mgr->hw_data;
+
+#define SPRINT(fmt, ...) \
+ (cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__))
+
+ SPRINT("wb_id=%d\n", hw_data->wb_id);
+ SPRINT("ctl_id=%d\n", hw_data->ctl_id);
+ return cnt;
+}
+
+static ssize_t sde_rotator_hw_show_state(struct sde_rot_mgr *mgr,
+ struct device_attribute *attr, char *buf, ssize_t len)
+{
+ struct sde_rotator_r1_data *hw_data;
+ int cnt = 0;
+
+ if (!mgr || !buf)
+ return 0;
+
+ hw_data = mgr->hw_data;
+
+#define SPRINT(fmt, ...) \
+ (cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__))
+
+ if (hw_data && hw_data->mdp_hw) {
+ struct sde_rot_hw_resource *hw = &hw_data->mdp_hw->hw;
+
+ SPRINT("irq_num=%d\n", hw_data->irq_num);
+ SPRINT("max_active=%d\n", hw->max_active);
+ SPRINT("num_active=%d\n", atomic_read(&hw->num_active));
+ SPRINT("pending_cnt=%u\n", hw->pending_count);
+ }
+
+ return cnt;
+}
+
+static int sde_rotator_hw_parse_dt(struct sde_rotator_r1_data *hw_data,
+ struct platform_device *dev)
+{
+ int ret = 0;
+ u32 data;
+
+ if (!hw_data || !dev)
+ return -EINVAL;
+
+ ret = of_property_read_u32(dev->dev.of_node,
+ "qcom,mdss-wb-id", &data);
+ if (ret)
+ hw_data->wb_id = -1;
+ else
+ hw_data->wb_id = (int) data;
+ ret = of_property_read_u32(dev->dev.of_node,
+ "qcom,mdss-ctl-id", &data);
+ if (ret)
+ hw_data->ctl_id = -1;
+ else
+ hw_data->ctl_id = (int) data;
+
+ return ret;
+}
+
+static int sde_rotator_hw_rev_init(struct sde_rot_data_type *mdata)
+{
+ if (!mdata) {
+ SDEROT_ERR("null rotator data\n");
+ return -EINVAL;
+ }
+
+ clear_bit(SDE_QOS_PER_PIPE_IB, mdata->sde_qos_map);
+ set_bit(SDE_QOS_OVERHEAD_FACTOR, mdata->sde_qos_map);
+ clear_bit(SDE_QOS_CDP, mdata->sde_qos_map);
+ set_bit(SDE_QOS_OTLIM, mdata->sde_qos_map);
+ set_bit(SDE_QOS_PER_PIPE_LUT, mdata->sde_qos_map);
+ clear_bit(SDE_QOS_SIMPLIFIED_PREFILL, mdata->sde_qos_map);
+ set_bit(SDE_CAPS_R1_WB, mdata->sde_caps_map);
+
+ return 0;
+}
+
+enum {
+ SDE_ROTATOR_INTR_WB_0,
+ SDE_ROTATOR_INTR_WB_1,
+ SDE_ROTATOR_INTR_MAX,
+};
+
+struct intr_callback {
+ void (*func)(void *);
+ void *arg;
+};
+
+struct intr_callback sde_intr_cb[SDE_ROTATOR_INTR_MAX];
+
+int sde_mdp_set_intr_callback(u32 intr_type, u32 intf_num,
+ void (*fnc_ptr)(void *), void *arg)
+{
+ if (intf_num >= SDE_ROTATOR_INTR_MAX) {
+ SDEROT_WARN("invalid intr type=%u intf_num=%u\n",
+ intr_type, intf_num);
+ return -EINVAL;
+ }
+
+ sde_intr_cb[intf_num].func = fnc_ptr;
+ sde_intr_cb[intf_num].arg = arg;
+
+ return 0;
+}
+
+static irqreturn_t sde_irq_handler(int irq, void *ptr)
+{
+ struct sde_rot_data_type *mdata = ptr;
+ irqreturn_t ret = IRQ_NONE;
+ u32 isr;
+
+ isr = readl_relaxed(mdata->mdp_base + SDE_MDP_REG_INTR_STATUS);
+
+ SDEROT_DBG("intr_status = %8.8x\n", isr);
+
+ if (isr & SDE_MDP_INTR_WB_0_DONE) {
+ struct intr_callback *cb = &sde_intr_cb[SDE_ROTATOR_INTR_WB_0];
+
+ if (cb->func) {
+ writel_relaxed(SDE_MDP_INTR_WB_0_DONE,
+ mdata->mdp_base + SDE_MDP_REG_INTR_CLEAR);
+ cb->func(cb->arg);
+ ret = IRQ_HANDLED;
+ }
+ }
+
+ if (isr & SDE_MDP_INTR_WB_1_DONE) {
+ struct intr_callback *cb = &sde_intr_cb[SDE_ROTATOR_INTR_WB_1];
+
+ if (cb->func) {
+ writel_relaxed(SDE_MDP_INTR_WB_1_DONE,
+ mdata->mdp_base + SDE_MDP_REG_INTR_CLEAR);
+ cb->func(cb->arg);
+ ret = IRQ_HANDLED;
+ }
+ }
+
+ return ret;
+}
+
+static void sde_rotator_hw_destroy(struct sde_rot_mgr *mgr)
+{
+ struct sde_rot_data_type *mdata = sde_rot_get_mdata();
+ struct sde_rotator_r1_data *hw_data;
+
+ if (!mgr || !mgr->pdev || !mgr->hw_data)
+ return;
+
+ hw_data = mgr->hw_data;
+ if (hw_data->irq_num >= 0)
+ devm_free_irq(&mgr->pdev->dev, hw_data->irq_num, mdata);
+ sde_rotator_hw_free(mgr, hw_data->mdp_hw);
+ devm_kfree(&mgr->pdev->dev, mgr->hw_data);
+ mgr->hw_data = NULL;
+}
+
+int sde_rotator_r1_init(struct sde_rot_mgr *mgr)
+{
+ struct sde_rot_data_type *mdata = sde_rot_get_mdata();
+ struct sde_rotator_r1_data *hw_data;
+ int ret;
+
+ if (!mgr || !mgr->pdev) {
+ SDEROT_ERR("null rotator manager/platform device");
+ return -EINVAL;
+ }
+
+ hw_data = devm_kzalloc(&mgr->pdev->dev,
+ sizeof(struct sde_rotator_r1_data), GFP_KERNEL);
+ if (hw_data == NULL)
+ return -ENOMEM;
+
+ mgr->hw_data = hw_data;
+ mgr->ops_config_hw = sde_rotator_config_hw;
+ mgr->ops_kickoff_entry = sde_rotator_kickoff_entry;
+ mgr->ops_wait_for_entry = sde_rotator_wait_for_entry;
+ mgr->ops_hw_alloc = sde_rotator_hw_alloc_ext;
+ mgr->ops_hw_free = sde_rotator_hw_free_ext;
+ mgr->ops_hw_destroy = sde_rotator_hw_destroy;
+ mgr->ops_hw_validate_entry = sde_rotator_hw_validate_entry;
+ mgr->ops_hw_show_caps = sde_rotator_hw_show_caps;
+ mgr->ops_hw_show_state = sde_rotator_hw_show_state;
+ mgr->ops_hw_create_debugfs = sde_rotator_r1_create_debugfs;
+
+ ret = sde_rotator_hw_parse_dt(mgr->hw_data, mgr->pdev);
+ if (ret)
+ goto error_parse_dt;
+
+ hw_data->irq_num = platform_get_irq(mgr->pdev, 0);
+ if (hw_data->irq_num < 0) {
+ SDEROT_ERR("fail to get rotator irq\n");
+ } else {
+ ret = devm_request_threaded_irq(&mgr->pdev->dev,
+ hw_data->irq_num,
+ sde_irq_handler, NULL,
+ 0, "sde_rotator_r1", mdata);
+ if (ret) {
+ SDEROT_ERR("fail to request irq r:%d\n", ret);
+ hw_data->irq_num = -1;
+ } else {
+ disable_irq(hw_data->irq_num);
+ }
+ }
+
+ hw_data->mdp_hw = sde_rotator_hw_alloc(mgr, hw_data->ctl_id,
+ hw_data->wb_id, hw_data->irq_num);
+ if (IS_ERR_OR_NULL(hw_data->mdp_hw))
+ goto error_hw_alloc;
+
+ ret = sde_rotator_hw_rev_init(sde_rot_get_mdata());
+ if (ret)
+ goto error_hw_rev_init;
+
+ hw_data->mgr = mgr;
+
+ return 0;
+error_hw_rev_init:
+ if (hw_data->irq_num >= 0)
+ devm_free_irq(&mgr->pdev->dev, hw_data->irq_num, mdata);
+ sde_rotator_hw_free(mgr, hw_data->mdp_hw);
+error_hw_alloc:
+ devm_kfree(&mgr->pdev->dev, mgr->hw_data);
+error_parse_dt:
+ return ret;
+}
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r1.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_r1.h
new file mode 100644
index 000000000000..8faa337caa33
--- /dev/null
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r1.h
@@ -0,0 +1,22 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __SDE_ROTATOR_R1_H__
+#define __SDE_ROTATOR_R1_H__
+
+#include <linux/types.h>
+
+#include "sde_rotator_core.h"
+
+int sde_rotator_r1_init(struct sde_rot_mgr *mgr);
+
+#endif /* __SDE_ROTATOR_R1_H__ */
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r1_ctl.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r1_ctl.c
new file mode 100644
index 000000000000..b5c0790ef063
--- /dev/null
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r1_ctl.c
@@ -0,0 +1,267 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/sort.h>
+#include <linux/clk.h>
+#include <linux/bitmap.h>
+
+#include "sde_rotator_r1_hwio.h"
+#include "sde_rotator_util.h"
+#include "sde_rotator_r1_internal.h"
+#include "sde_rotator_core.h"
+
+struct sde_mdp_ctl *sde_mdp_ctl_alloc(struct sde_rot_data_type *mdata,
+ u32 off)
+{
+ struct sde_mdp_ctl *ctl = NULL;
+ static struct sde_mdp_ctl sde_ctl[5];
+ static const u32 offset[] = {0x00002000, 0x00002200, 0x00002400,
+ 0x00002600, 0x00002800};
+
+ if (off >= ARRAY_SIZE(offset)) {
+ SDEROT_ERR("invalid parameters\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ ctl = &sde_ctl[off];
+ ctl->mdata = mdata;
+ ctl->num = off;
+ ctl->offset = offset[ctl->num];
+ ctl->base = mdata->sde_io.base + ctl->offset;
+ return ctl;
+}
+
+int sde_mdp_ctl_free(struct sde_mdp_ctl *ctl)
+{
+ if (!ctl)
+ return -ENODEV;
+
+ if (ctl->wb)
+ sde_mdp_wb_free(ctl->wb);
+
+ ctl->is_secure = false;
+ ctl->mixer_left = NULL;
+ ctl->mixer_right = NULL;
+ ctl->wb = NULL;
+ memset(&ctl->ops, 0, sizeof(ctl->ops));
+
+ return 0;
+}
+
+struct sde_mdp_mixer *sde_mdp_mixer_assign(u32 id, bool wb)
+{
+ struct sde_mdp_mixer *mixer = NULL;
+ struct sde_rot_data_type *mdata = sde_rot_get_mdata();
+ static struct sde_mdp_mixer sde_mixer[16];
+ static const u32 offset[] = {0x00048000, 0x00049000};
+
+ if (id >= ARRAY_SIZE(offset)) {
+ SDEROT_ERR("invalid parameters\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ mixer = &sde_mixer[id];
+ mixer->num = id;
+ mixer->offset = offset[mixer->num];
+ mixer->base = mdata->sde_io.base + mixer->offset;
+ return mixer;
+}
+
+static void sde_mdp_mixer_setup(struct sde_mdp_ctl *master_ctl,
+ int mixer_mux)
+{
+ int i;
+ struct sde_mdp_ctl *ctl = NULL;
+ struct sde_mdp_mixer *mixer = sde_mdp_mixer_get(master_ctl,
+ mixer_mux);
+
+ if (!mixer)
+ return;
+
+ ctl = mixer->ctl;
+ if (!ctl)
+ return;
+
+ /* check if mixer setup for rotator is needed */
+ if (mixer->rotator_mode) {
+ int nmixers = 5;
+
+ for (i = 0; i < nmixers; i++)
+ sde_mdp_ctl_write(ctl, SDE_MDP_REG_CTL_LAYER(i), 0);
+ return;
+ }
+}
+
+struct sde_mdp_mixer *sde_mdp_mixer_get(struct sde_mdp_ctl *ctl, int mux)
+{
+ struct sde_mdp_mixer *mixer = NULL;
+
+ if (!ctl) {
+ SDEROT_ERR("ctl not initialized\n");
+ return NULL;
+ }
+
+ switch (mux) {
+ case SDE_MDP_MIXER_MUX_DEFAULT:
+ case SDE_MDP_MIXER_MUX_LEFT:
+ mixer = ctl->mixer_left;
+ break;
+ case SDE_MDP_MIXER_MUX_RIGHT:
+ mixer = ctl->mixer_right;
+ break;
+ }
+
+ return mixer;
+}
+
+int sde_mdp_get_pipe_flush_bits(struct sde_mdp_pipe *pipe)
+{
+ u32 flush_bits;
+
+ if (pipe->type == SDE_MDP_PIPE_TYPE_DMA)
+ flush_bits |= BIT(pipe->num) << 5;
+ else if (pipe->num == SDE_MDP_SSPP_VIG3 ||
+ pipe->num == SDE_MDP_SSPP_RGB3)
+ flush_bits |= BIT(pipe->num) << 10;
+ else if (pipe->type == SDE_MDP_PIPE_TYPE_CURSOR)
+ flush_bits |= BIT(22 + pipe->num - SDE_MDP_SSPP_CURSOR0);
+ else /* RGB/VIG 0-2 pipes */
+ flush_bits |= BIT(pipe->num);
+
+ return flush_bits;
+}
+
+int sde_mdp_mixer_pipe_update(struct sde_mdp_pipe *pipe,
+ struct sde_mdp_mixer *mixer, int params_changed)
+{
+ struct sde_mdp_ctl *ctl;
+
+ if (!pipe)
+ return -EINVAL;
+ if (!mixer)
+ return -EINVAL;
+ ctl = mixer->ctl;
+ if (!ctl)
+ return -EINVAL;
+
+ ctl->flush_bits |= sde_mdp_get_pipe_flush_bits(pipe);
+ return 0;
+}
+
+int sde_mdp_display_wait4comp(struct sde_mdp_ctl *ctl)
+{
+ int ret;
+
+ if (!ctl) {
+ SDEROT_ERR("invalid ctl\n");
+ return -ENODEV;
+ }
+
+ if (ctl->ops.wait_fnc)
+ ret = ctl->ops.wait_fnc(ctl, NULL);
+
+ return ret;
+}
+
+int sde_mdp_display_commit(struct sde_mdp_ctl *ctl, void *arg,
+ struct sde_mdp_commit_cb *commit_cb)
+{
+ int ret = 0;
+ u32 ctl_flush_bits = 0;
+
+ if (!ctl) {
+ SDEROT_ERR("display function not set\n");
+ return -ENODEV;
+ }
+
+ if (ctl->ops.prepare_fnc)
+ ret = ctl->ops.prepare_fnc(ctl, arg);
+
+ if (ret) {
+ SDEROT_ERR("error preparing display\n");
+ goto done;
+ }
+
+ sde_mdp_mixer_setup(ctl, SDE_MDP_MIXER_MUX_LEFT);
+ sde_mdp_mixer_setup(ctl, SDE_MDP_MIXER_MUX_RIGHT);
+
+ sde_mdp_ctl_write(ctl, SDE_MDP_REG_CTL_TOP, ctl->opmode);
+ ctl->flush_bits |= BIT(17); /* CTL */
+
+ ctl_flush_bits = ctl->flush_bits;
+
+ sde_mdp_ctl_write(ctl, SDE_MDP_REG_CTL_FLUSH, ctl_flush_bits);
+ /* ensure the flush command is issued after the barrier */
+ wmb();
+ ctl->flush_reg_data = ctl_flush_bits;
+ ctl->flush_bits = 0;
+ if (ctl->ops.display_fnc)
+ ret = ctl->ops.display_fnc(ctl, arg); /* DSI0 kickoff */
+ if (ret)
+ SDEROT_WARN("ctl %d error displaying frame\n", ctl->num);
+
+done:
+ return ret;
+}
+
+/**
+ * @sde_mdp_ctl_mixer_switch() - return ctl mixer of @return_type
+ * @ctl: Pointer to ctl structure to be switched.
+ * @return_type: wb_type of the ctl to be switched to.
+ *
+ * Virtual mixer switch should be performed only when there is no
+ * dedicated wfd block and writeback block is shared.
+ */
+struct sde_mdp_ctl *sde_mdp_ctl_mixer_switch(struct sde_mdp_ctl *ctl,
+ u32 return_type)
+{
+ if (ctl->wb_type == return_type)
+ return ctl;
+
+ SDEROT_ERR("unable to switch mixer to type=%d\n", return_type);
+ return NULL;
+}
+
+struct sde_mdp_writeback *sde_mdp_wb_assign(u32 num, u32 reg_index)
+{
+ struct sde_rot_data_type *mdata = sde_rot_get_mdata();
+ struct sde_mdp_writeback *wb = NULL;
+ static struct sde_mdp_writeback sde_wb[16];
+ static const u32 offset[] = {0x00065000, 0x00065800, 0x00066000};
+
+ if (num >= ARRAY_SIZE(offset)) {
+ SDEROT_ERR("invalid parameters\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ wb = &sde_wb[num];
+ wb->num = num;
+ wb->offset = offset[wb->num];
+ if (!wb)
+ return NULL;
+
+ wb->base = mdata->sde_io.base;
+ wb->base += wb->offset;
+ return wb;
+}
+
+void sde_mdp_wb_free(struct sde_mdp_writeback *wb)
+{
+}
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r1_debug.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r1_debug.c
new file mode 100644
index 000000000000..8df762b6a065
--- /dev/null
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r1_debug.c
@@ -0,0 +1,45 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+
+#include "sde_rotator_r1_debug.h"
+#include "sde_rotator_core.h"
+#include "sde_rotator_r1.h"
+#include "sde_rotator_r1_internal.h"
+
+/*
+ * sde_rotator_r1_create_debugfs - Setup rotator r1 debugfs directory structure.
+ * @rot_dev: Pointer to rotator device
+ */
+int sde_rotator_r1_create_debugfs(struct sde_rot_mgr *mgr,
+ struct dentry *debugfs_root)
+{
+ struct sde_rotator_r1_data *hw_data;
+
+ if (!mgr || !debugfs_root || !mgr->hw_data)
+ return -EINVAL;
+
+ hw_data = mgr->hw_data;
+
+ /* add debugfs */
+
+ return 0;
+}
+
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r1_debug.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_r1_debug.h
new file mode 100644
index 000000000000..ccd4d5338844
--- /dev/null
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r1_debug.h
@@ -0,0 +1,32 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __SDE_ROTATOR_R3_DEBUG_H__
+#define __SDE_ROTATOR_R3_DEBUG_H__
+
+#include <linux/types.h>
+#include <linux/dcache.h>
+
+struct sde_rot_mgr;
+
+#if defined(CONFIG_DEBUG_FS)
+int sde_rotator_r1_create_debugfs(struct sde_rot_mgr *mgr,
+ struct dentry *debugfs_root);
+#else
+static inline
+int sde_rotator_r1_create_debugfs(struct sde_rot_mgr *mgr,
+ struct dentry *debugfs_root)
+{
+ return 0;
+}
+#endif
+#endif /* __SDE_ROTATOR_R3_DEBUG_H__ */
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r1_hwio.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_r1_hwio.h
new file mode 100644
index 000000000000..bdedcfa533ae
--- /dev/null
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r1_hwio.h
@@ -0,0 +1,149 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef SDE_ROTATOR_R1_HWIO_H
+#define SDE_ROTATOR_R1_HWIO_H
+
+#include <linux/bitops.h>
+
+#define SDE_MDP_FETCH_CONFIG_RESET_VALUE 0x00000087
+
+#define SDE_MDP_REG_HW_VERSION 0x0
+#define SDE_MDP_REG_INTR_EN 0x00010
+#define SDE_MDP_REG_INTR_STATUS 0x00014
+#define SDE_MDP_REG_INTR_CLEAR 0x00018
+
+#define SDE_MDP_INTR_WB_0_DONE BIT(0)
+#define SDE_MDP_INTR_WB_1_DONE BIT(1)
+
+enum mdss_mdp_intr_type {
+ SDE_MDP_IRQ_WB_ROT_COMP = 0,
+ SDE_MDP_IRQ_WB_WFD = 4,
+ SDE_MDP_IRQ_PING_PONG_COMP = 8,
+ SDE_MDP_IRQ_PING_PONG_RD_PTR = 12,
+ SDE_MDP_IRQ_PING_PONG_WR_PTR = 16,
+ SDE_MDP_IRQ_PING_PONG_AUTO_REF = 20,
+ SDE_MDP_IRQ_INTF_UNDER_RUN = 24,
+ SDE_MDP_IRQ_INTF_VSYNC = 25,
+};
+
+enum mdss_mdp_ctl_index {
+ SDE_MDP_CTL0,
+ SDE_MDP_CTL1,
+ SDE_MDP_CTL2,
+ SDE_MDP_CTL3,
+ SDE_MDP_CTL4,
+ SDE_MDP_CTL5,
+ SDE_MDP_MAX_CTL
+};
+
+#define SDE_MDP_REG_CTL_LAYER(lm) \
+ ((lm == 5) ? (0x024) : ((lm) * 0x004))
+#define SDE_MDP_REG_CTL_TOP 0x014
+#define SDE_MDP_REG_CTL_FLUSH 0x018
+#define SDE_MDP_REG_CTL_START 0x01C
+
+#define SDE_MDP_CTL_OP_ROT0_MODE 0x1
+#define SDE_MDP_CTL_OP_ROT1_MODE 0x2
+
+enum sde_mdp_sspp_index {
+ SDE_MDP_SSPP_VIG0,
+ SDE_MDP_SSPP_VIG1,
+ SDE_MDP_SSPP_VIG2,
+ SDE_MDP_SSPP_RGB0,
+ SDE_MDP_SSPP_RGB1,
+ SDE_MDP_SSPP_RGB2,
+ SDE_MDP_SSPP_DMA0,
+ SDE_MDP_SSPP_DMA1,
+ SDE_MDP_SSPP_VIG3,
+ SDE_MDP_SSPP_RGB3,
+ SDE_MDP_SSPP_CURSOR0,
+ SDE_MDP_SSPP_CURSOR1,
+ SDE_MDP_MAX_SSPP
+};
+
+#define SDE_MDP_REG_SSPP_SRC_SIZE 0x000
+#define SDE_MDP_REG_SSPP_SRC_IMG_SIZE 0x004
+#define SDE_MDP_REG_SSPP_SRC_XY 0x008
+#define SDE_MDP_REG_SSPP_OUT_SIZE 0x00C
+#define SDE_MDP_REG_SSPP_OUT_XY 0x010
+#define SDE_MDP_REG_SSPP_SRC0_ADDR 0x014
+#define SDE_MDP_REG_SSPP_SRC1_ADDR 0x018
+#define SDE_MDP_REG_SSPP_SRC2_ADDR 0x01C
+#define SDE_MDP_REG_SSPP_SRC3_ADDR 0x020
+#define SDE_MDP_REG_SSPP_SRC_YSTRIDE0 0x024
+#define SDE_MDP_REG_SSPP_SRC_YSTRIDE1 0x028
+#define SDE_MDP_REG_SSPP_STILE_FRAME_SIZE 0x02C
+#define SDE_MDP_REG_SSPP_SRC_FORMAT 0x030
+#define SDE_MDP_REG_SSPP_SRC_UNPACK_PATTERN 0x034
+#define SDE_MDP_REG_SSPP_SRC_CONSTANT_COLOR 0x03C
+#define SDE_MDP_REG_SSPP_REQPRIO_FIFO_WM_0 0x050
+#define SDE_MDP_REG_SSPP_REQPRIO_FIFO_WM_1 0x054
+#define SDE_MDP_REG_SSPP_REQPRIO_FIFO_WM_2 0x058
+#define SDE_MDP_REG_SSPP_DANGER_LUT 0x060
+#define SDE_MDP_REG_SSPP_SAFE_LUT 0x064
+#define SDE_MDP_REG_SSPP_CREQ_LUT 0x068
+#define SDE_MDP_REG_SSPP_QOS_CTRL 0x06C
+#define SDE_MDP_REG_SSPP_CDP_CTRL 0x134
+#define SDE_MDP_REG_SSPP_UBWC_ERROR_STATUS 0x138
+
+#define SDE_MDP_REG_SSPP_SRC_OP_MODE 0x038
+#define SDE_MDP_OP_FLIP_UD BIT(14)
+#define SDE_MDP_OP_FLIP_LR BIT(13)
+#define SDE_MDP_OP_BWC_EN BIT(0)
+#define SDE_MDP_OP_BWC_LOSSLESS (0 << 1)
+#define SDE_MDP_OP_BWC_Q_HIGH (1 << 1)
+#define SDE_MDP_OP_BWC_Q_MED (2 << 1)
+
+#define SDE_MDP_REG_SSPP_SRC_CONSTANT_COLOR 0x03C
+#define SDE_MDP_REG_SSPP_FETCH_CONFIG 0x048
+#define SDE_MDP_REG_SSPP_VC1_RANGE 0x04C
+#define SDE_MDP_REG_SSPP_SRC_ADDR_SW_STATUS 0x070
+#define SDE_MDP_REG_SSPP_CURRENT_SRC0_ADDR 0x0A4
+#define SDE_MDP_REG_SSPP_CURRENT_SRC1_ADDR 0x0A8
+#define SDE_MDP_REG_SSPP_CURRENT_SRC2_ADDR 0x0AC
+#define SDE_MDP_REG_SSPP_CURRENT_SRC3_ADDR 0x0B0
+#define SDE_MDP_REG_SSPP_DECIMATION_CONFIG 0x0B4
+
+enum sde_mdp_mixer_wb_index {
+ SDE_MDP_WB_LAYERMIXER0,
+ SDE_MDP_WB_LAYERMIXER1,
+ SDE_MDP_WB_MAX_LAYERMIXER,
+};
+
+enum mdss_mdp_writeback_index {
+ SDE_MDP_WRITEBACK0,
+ SDE_MDP_WRITEBACK1,
+ SDE_MDP_WRITEBACK2,
+ SDE_MDP_WRITEBACK3,
+ SDE_MDP_WRITEBACK4,
+ SDE_MDP_MAX_WRITEBACK
+};
+
+#define SDE_MDP_REG_WB_DST_FORMAT 0x000
+#define SDE_MDP_REG_WB_DST_OP_MODE 0x004
+#define SDE_MDP_REG_WB_DST_PACK_PATTERN 0x008
+#define SDE_MDP_REG_WB_DST0_ADDR 0x00C
+#define SDE_MDP_REG_WB_DST1_ADDR 0x010
+#define SDE_MDP_REG_WB_DST2_ADDR 0x014
+#define SDE_MDP_REG_WB_DST3_ADDR 0x018
+#define SDE_MDP_REG_WB_DST_YSTRIDE0 0x01C
+#define SDE_MDP_REG_WB_DST_YSTRIDE1 0x020
+#define SDE_MDP_REG_WB_DST_WRITE_CONFIG 0x048
+#define SDE_MDP_REG_WB_ROTATION_DNSCALER 0x050
+#define SDE_MDP_REG_WB_ROTATOR_PIPE_DOWNSCALER 0x054
+#define SDE_MDP_REG_WB_OUT_SIZE 0x074
+#define SDE_MDP_REG_WB_ALPHA_X_VALUE 0x078
+#define SDE_MDP_REG_WB_DST_ADDR_SW_STATUS 0x2B0
+
+#endif
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r1_internal.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_r1_internal.h
new file mode 100644
index 000000000000..b4517755b65d
--- /dev/null
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r1_internal.h
@@ -0,0 +1,171 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __SDE_ROTATOR_R1_INTERNAL_H__
+#define __SDE_ROTATOR_R1_INTERNAL_H__
+
+#include <linux/types.h>
+#include <linux/file.h>
+#include <linux/kref.h>
+#include <linux/kernel.h>
+
+#include "sde_rotator_util.h"
+
+/**
+ * enum sde_commit_stage_type - Indicate different commit stages
+ */
+enum sde_commit_stage_type {
+ SDE_COMMIT_STAGE_SETUP_DONE,
+ SDE_COMMIT_STAGE_READY_FOR_KICKOFF,
+};
+
+enum sde_mdp_wb_ctl_type {
+ SDE_MDP_WB_CTL_TYPE_BLOCK = 1,
+ SDE_MDP_WB_CTL_TYPE_LINE
+};
+
+enum sde_mdp_mixer_mux {
+ SDE_MDP_MIXER_MUX_DEFAULT,
+ SDE_MDP_MIXER_MUX_LEFT,
+ SDE_MDP_MIXER_MUX_RIGHT,
+};
+
+enum sde_mdp_pipe_type {
+ SDE_MDP_PIPE_TYPE_UNUSED,
+ SDE_MDP_PIPE_TYPE_VIG,
+ SDE_MDP_PIPE_TYPE_RGB,
+ SDE_MDP_PIPE_TYPE_DMA,
+ SDE_MDP_PIPE_TYPE_CURSOR,
+};
+
+struct sde_mdp_data;
+struct sde_mdp_ctl;
+struct sde_mdp_pipe;
+struct sde_mdp_mixer;
+struct sde_mdp_wb;
+
+struct sde_mdp_writeback {
+ u32 num;
+ char __iomem *base;
+ u32 offset;
+};
+
+struct sde_mdp_ctl_intfs_ops {
+ int (*start_fnc)(struct sde_mdp_ctl *ctl);
+ int (*stop_fnc)(struct sde_mdp_ctl *ctl, int panel_power_state);
+ int (*prepare_fnc)(struct sde_mdp_ctl *ctl, void *arg);
+ int (*display_fnc)(struct sde_mdp_ctl *ctl, void *arg);
+ int (*wait_fnc)(struct sde_mdp_ctl *ctl, void *arg);
+};
+
+struct sde_mdp_ctl {
+ u32 num;
+ char __iomem *base;
+ u32 opmode;
+ u32 flush_bits;
+ u32 flush_reg_data;
+ bool is_secure;
+ struct sde_rot_data_type *mdata;
+ struct sde_mdp_mixer *mixer_left;
+ struct sde_mdp_mixer *mixer_right;
+ void *priv_data;
+ u32 wb_type;
+ struct sde_mdp_writeback *wb;
+ struct sde_mdp_ctl_intfs_ops ops;
+ u32 offset;
+ int irq_num;
+};
+
+struct sde_mdp_mixer {
+ u32 num;
+ char __iomem *base;
+ u8 rotator_mode;
+ struct sde_mdp_ctl *ctl;
+ u32 offset;
+};
+
+struct sde_mdp_shared_reg_ctrl {
+ u32 reg_off;
+ u32 bit_off;
+};
+
+struct sde_mdp_pipe {
+ u32 num;
+ u32 type;
+ u32 ndx;
+ char __iomem *base;
+ u32 xin_id;
+ u32 flags;
+ u32 bwc_mode;
+ u16 img_width;
+ u16 img_height;
+ u8 horz_deci;
+ u8 vert_deci;
+ struct sde_rect src;
+ struct sde_rect dst;
+ struct sde_mdp_format_params *src_fmt;
+ struct sde_mdp_plane_sizes src_planes;
+ struct sde_mdp_mixer *mixer_left;
+ struct sde_mdp_mixer *mixer_right;
+ u32 params_changed;
+ u32 offset;
+};
+
+struct sde_mdp_writeback_arg {
+ struct sde_mdp_data *data;
+ void *priv_data;
+};
+
+struct sde_mdp_commit_cb {
+ void *data;
+ int (*commit_cb_fnc)(enum sde_commit_stage_type commit_state,
+ void *data);
+};
+
+static inline void sde_mdp_ctl_write(struct sde_mdp_ctl *ctl,
+ u32 reg, u32 val)
+{
+ SDEROT_DBG("ctl%d:%6.6x:%8.8x\n", ctl->num, ctl->offset + reg, val);
+ writel_relaxed(val, ctl->base + reg);
+}
+
+static inline bool sde_mdp_is_nrt_vbif_client(struct sde_rot_data_type *mdata,
+ struct sde_mdp_pipe *pipe)
+{
+ return mdata->vbif_nrt_io.base && pipe->mixer_left &&
+ pipe->mixer_left->rotator_mode;
+}
+int sde_mdp_set_intr_callback(u32 intr_type, u32 intf_num,
+ void (*fnc_ptr)(void *), void *arg);
+int sde_mdp_display_wait4comp(struct sde_mdp_ctl *ctl);
+int sde_mdp_writeback_display_commit(struct sde_mdp_ctl *ctl, void *arg);
+int sde_mdp_pipe_queue_data(struct sde_mdp_pipe *pipe,
+ struct sde_mdp_data *src_data);
+struct sde_mdp_ctl *sde_mdp_ctl_alloc(struct sde_rot_data_type *mdata,
+ u32 off);
+struct sde_mdp_writeback *sde_mdp_wb_assign(u32 num, u32 reg_index);
+void sde_mdp_wb_free(struct sde_mdp_writeback *wb);
+struct sde_mdp_mixer *sde_mdp_mixer_assign(u32 id, bool wb);
+int sde_mdp_writeback_start(struct sde_mdp_ctl *ctl);
+struct sde_mdp_pipe *sde_mdp_pipe_assign(struct sde_rot_data_type *mdata,
+ struct sde_mdp_mixer *mixer, u32 ndx);
+int sde_mdp_pipe_destroy(struct sde_mdp_pipe *pipe);
+int sde_mdp_ctl_free(struct sde_mdp_ctl *ctl);
+int sde_mdp_display_commit(struct sde_mdp_ctl *ctl, void *arg,
+ struct sde_mdp_commit_cb *commit_cb);
+int sde_mdp_mixer_pipe_update(struct sde_mdp_pipe *pipe,
+ struct sde_mdp_mixer *mixer, int params_changed);
+int sde_mdp_get_pipe_flush_bits(struct sde_mdp_pipe *pipe);
+struct sde_mdp_ctl *sde_mdp_ctl_mixer_switch(struct sde_mdp_ctl *ctl,
+ u32 return_type);
+struct sde_mdp_mixer *sde_mdp_mixer_get(struct sde_mdp_ctl *ctl, int mux);
+#endif /* __SDE_ROTATOR_R1_INTERNAL_H__ */
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r1_pipe.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r1_pipe.c
new file mode 100644
index 000000000000..1181998c34b2
--- /dev/null
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r1_pipe.c
@@ -0,0 +1,405 @@
+/* Copyright (c) 2012, 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/bitmap.h>
+#include <linux/errno.h>
+#include <linux/iopoll.h>
+#include <linux/mutex.h>
+
+#include "sde_rotator_r1_hwio.h"
+#include "sde_rotator_base.h"
+#include "sde_rotator_util.h"
+#include "sde_rotator_r1_internal.h"
+#include "sde_rotator_core.h"
+#include "sde_rotator_trace.h"
+
+#define SMP_MB_SIZE (mdss_res->smp_mb_size)
+#define SMP_MB_CNT (mdss_res->smp_mb_cnt)
+#define SMP_MB_ENTRY_SIZE 16
+#define MAX_BPP 4
+
+#define PIPE_CLEANUP_TIMEOUT_US 100000
+
+/* following offsets are relative to ctrl register bit offset */
+#define CLK_FORCE_ON_OFFSET 0x0
+#define CLK_FORCE_OFF_OFFSET 0x1
+/* following offsets are relative to status register bit offset */
+#define CLK_STATUS_OFFSET 0x0
+
+#define QOS_LUT_NRT_READ 0x0
+#define PANIC_LUT_NRT_READ 0x0
+#define ROBUST_LUT_NRT_READ 0xFFFF
+
+/* Priority 2, no panic */
+#define VBLANK_PANIC_DEFAULT_CONFIG 0x200000
+
+static inline void sde_mdp_pipe_write(struct sde_mdp_pipe *pipe,
+ u32 reg, u32 val)
+{
+ SDEROT_DBG("pipe%d:%6.6x:%8.8x\n", pipe->num, pipe->offset + reg, val);
+ writel_relaxed(val, pipe->base + reg);
+}
+
+static int sde_mdp_pipe_qos_lut(struct sde_mdp_pipe *pipe)
+{
+ u32 qos_lut;
+
+ qos_lut = QOS_LUT_NRT_READ; /* low priority for nrt */
+
+ trace_rot_perf_set_qos_luts(pipe->num, pipe->src_fmt->format,
+ qos_lut, sde_mdp_is_linear_format(pipe->src_fmt));
+
+ sde_mdp_pipe_write(pipe, SDE_MDP_REG_SSPP_CREQ_LUT,
+ qos_lut);
+
+ return 0;
+}
+
+/**
+ * @sde_mdp_pipe_nrt_vbif_setup -
+ * @mdata: pointer to global driver data.
+ * @pipe: pointer to a pipe
+ *
+ * This function assumes that clocks are enabled, so it is callers
+ * responsibility to enable clocks before calling this function.
+ */
+static void sde_mdp_pipe_nrt_vbif_setup(struct sde_rot_data_type *mdata,
+ struct sde_mdp_pipe *pipe)
+{
+ uint32_t nrt_vbif_client_sel;
+
+ if (pipe->type != SDE_MDP_PIPE_TYPE_DMA)
+ return;
+
+ nrt_vbif_client_sel = readl_relaxed(mdata->mdp_base +
+ MMSS_MDP_RT_NRT_VBIF_CLIENT_SEL);
+ if (sde_mdp_is_nrt_vbif_client(mdata, pipe))
+ nrt_vbif_client_sel |= BIT(pipe->num - SDE_MDP_SSPP_DMA0);
+ else
+ nrt_vbif_client_sel &= ~BIT(pipe->num - SDE_MDP_SSPP_DMA0);
+ SDEROT_DBG("mdp:%6.6x:%8.8x\n", MMSS_MDP_RT_NRT_VBIF_CLIENT_SEL,
+ nrt_vbif_client_sel);
+ writel_relaxed(nrt_vbif_client_sel,
+ mdata->mdp_base + MMSS_MDP_RT_NRT_VBIF_CLIENT_SEL);
+}
+
+/**
+ * sde_mdp_qos_vbif_remapper_setup - Program the VBIF QoS remapper
+ * registers based on real or non real time clients
+ * @mdata: Pointer to the global mdss data structure.
+ * @pipe: Pointer to source pipe struct to get xin id's.
+ * @is_realtime: To determine if pipe's client is real or
+ * non real time.
+ * This function assumes that clocks are on, so it is caller responsibility to
+ * call this function with clocks enabled.
+ */
+static void sde_mdp_qos_vbif_remapper_setup(struct sde_rot_data_type *mdata,
+ struct sde_mdp_pipe *pipe, bool is_realtime)
+{
+ u32 mask, reg_val, i, vbif_qos;
+
+ if (mdata->npriority_lvl == 0)
+ return;
+
+ for (i = 0; i < mdata->npriority_lvl; i++) {
+ reg_val = SDE_VBIF_READ(mdata, SDE_VBIF_QOS_REMAP_BASE + i*4);
+ mask = 0x3 << (pipe->xin_id * 2);
+ reg_val &= ~(mask);
+ vbif_qos = is_realtime ?
+ mdata->vbif_rt_qos[i] : mdata->vbif_nrt_qos[i];
+ reg_val |= vbif_qos << (pipe->xin_id * 2);
+ SDE_VBIF_WRITE(mdata, SDE_VBIF_QOS_REMAP_BASE + i*4, reg_val);
+ }
+}
+
+struct sde_mdp_pipe *sde_mdp_pipe_assign(struct sde_rot_data_type *mdata,
+ struct sde_mdp_mixer *mixer, u32 ndx)
+{
+ struct sde_mdp_pipe *pipe = NULL;
+ static struct sde_mdp_pipe sde_pipe[16];
+ static const u32 offset[] = {0x00025000, 0x00027000};
+ static const u32 xin_id[] = {2, 10};
+
+ if (ndx >= ARRAY_SIZE(offset)) {
+ SDEROT_ERR("invalid parameters\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ pipe = &sde_pipe[ndx];
+ pipe->num = ndx + SDE_MDP_SSPP_DMA0;
+ pipe->offset = offset[pipe->num - SDE_MDP_SSPP_DMA0];
+ pipe->xin_id = xin_id[pipe->num - SDE_MDP_SSPP_DMA0];
+ pipe->base = mdata->sde_io.base + pipe->offset;
+ pipe->type = SDE_MDP_PIPE_TYPE_DMA;
+ pipe->mixer_left = mixer;
+
+ return pipe;
+}
+
+int sde_mdp_pipe_destroy(struct sde_mdp_pipe *pipe)
+{
+ return 0;
+}
+
+void sde_mdp_pipe_position_update(struct sde_mdp_pipe *pipe,
+ struct sde_rect *src, struct sde_rect *dst)
+{
+ u32 src_size, src_xy, dst_size, dst_xy;
+
+ src_size = (src->h << 16) | src->w;
+ src_xy = (src->y << 16) | src->x;
+ dst_size = (dst->h << 16) | dst->w;
+ dst_xy = (dst->y << 16) | dst->x;
+
+ sde_mdp_pipe_write(pipe, SDE_MDP_REG_SSPP_SRC_SIZE, src_size);
+ sde_mdp_pipe_write(pipe, SDE_MDP_REG_SSPP_SRC_XY, src_xy);
+ sde_mdp_pipe_write(pipe, SDE_MDP_REG_SSPP_OUT_SIZE, dst_size);
+ sde_mdp_pipe_write(pipe, SDE_MDP_REG_SSPP_OUT_XY, dst_xy);
+}
+
+static int sde_mdp_image_setup(struct sde_mdp_pipe *pipe,
+ struct sde_mdp_data *data)
+{
+ u32 img_size, ystride0, ystride1;
+ u32 width, height, decimation;
+ int ret = 0;
+ struct sde_rect dst, src;
+ bool rotation = false;
+
+ SDEROT_DBG(
+ "ctl: %d pnum=%d wh=%dx%d src={%d,%d,%d,%d} dst={%d,%d,%d,%d}\n",
+ pipe->mixer_left->ctl->num, pipe->num,
+ pipe->img_width, pipe->img_height,
+ pipe->src.x, pipe->src.y, pipe->src.w, pipe->src.h,
+ pipe->dst.x, pipe->dst.y, pipe->dst.w, pipe->dst.h);
+
+ width = pipe->img_width;
+ height = pipe->img_height;
+
+ if (pipe->flags & SDE_SOURCE_ROTATED_90)
+ rotation = true;
+
+ sde_mdp_get_plane_sizes(pipe->src_fmt, width, height,
+ &pipe->src_planes, pipe->bwc_mode, rotation);
+
+ if (data != NULL) {
+ ret = sde_mdp_data_check(data, &pipe->src_planes,
+ pipe->src_fmt);
+ if (ret)
+ return ret;
+ }
+
+ if ((pipe->flags & SDE_DEINTERLACE) &&
+ !(pipe->flags & SDE_SOURCE_ROTATED_90)) {
+ int i;
+
+ for (i = 0; i < pipe->src_planes.num_planes; i++)
+ pipe->src_planes.ystride[i] *= 2;
+ width *= 2;
+ height /= 2;
+ }
+
+ decimation = ((1 << pipe->horz_deci) - 1) << 8;
+ decimation |= ((1 << pipe->vert_deci) - 1);
+ if (decimation)
+ SDEROT_DBG("Image decimation h=%d v=%d\n",
+ pipe->horz_deci, pipe->vert_deci);
+
+ dst = pipe->dst;
+ src = pipe->src;
+
+ ystride0 = (pipe->src_planes.ystride[0]) |
+ (pipe->src_planes.ystride[1] << 16);
+ ystride1 = (pipe->src_planes.ystride[2]) |
+ (pipe->src_planes.ystride[3] << 16);
+
+ img_size = (height << 16) | width;
+
+ sde_mdp_pipe_position_update(pipe, &src, &dst);
+
+ sde_mdp_pipe_write(pipe, SDE_MDP_REG_SSPP_SRC_IMG_SIZE, img_size);
+ sde_mdp_pipe_write(pipe, SDE_MDP_REG_SSPP_SRC_YSTRIDE0, ystride0);
+ sde_mdp_pipe_write(pipe, SDE_MDP_REG_SSPP_SRC_YSTRIDE1, ystride1);
+ sde_mdp_pipe_write(pipe, SDE_MDP_REG_SSPP_DECIMATION_CONFIG,
+ decimation);
+
+ return 0;
+}
+
+static int sde_mdp_format_setup(struct sde_mdp_pipe *pipe)
+{
+ struct sde_mdp_format_params *fmt;
+ u32 chroma_samp, unpack, src_format;
+ u32 secure = 0;
+ u32 opmode;
+ struct sde_rot_data_type *mdata = sde_rot_get_mdata();
+
+ fmt = pipe->src_fmt;
+
+ if (pipe->flags & SDE_SECURE_OVERLAY_SESSION)
+ secure = 0xF;
+
+ opmode = pipe->bwc_mode;
+ if (pipe->flags & SDE_FLIP_LR)
+ opmode |= SDE_MDP_OP_FLIP_LR;
+ if (pipe->flags & SDE_FLIP_UD)
+ opmode |= SDE_MDP_OP_FLIP_UD;
+
+ SDEROT_DBG("pnum=%d format=%d opmode=%x\n", pipe->num, fmt->format,
+ opmode);
+
+ chroma_samp = fmt->chroma_sample;
+ if (pipe->flags & SDE_SOURCE_ROTATED_90) {
+ if (chroma_samp == SDE_MDP_CHROMA_H2V1)
+ chroma_samp = SDE_MDP_CHROMA_H1V2;
+ else if (chroma_samp == SDE_MDP_CHROMA_H1V2)
+ chroma_samp = SDE_MDP_CHROMA_H2V1;
+ }
+
+ src_format = (chroma_samp << 23) |
+ (fmt->fetch_planes << 19) |
+ (fmt->bits[C3_ALPHA] << 6) |
+ (fmt->bits[C2_R_Cr] << 4) |
+ (fmt->bits[C1_B_Cb] << 2) |
+ (fmt->bits[C0_G_Y] << 0);
+
+ if (sde_mdp_is_tilea4x_format(fmt))
+ src_format |= BIT(30);
+
+ if (sde_mdp_is_tilea5x_format(fmt))
+ src_format |= BIT(31);
+
+ if (pipe->flags & SDE_ROT_90)
+ src_format |= BIT(11); /* ROT90 */
+
+ if (fmt->alpha_enable &&
+ fmt->fetch_planes != SDE_MDP_PLANE_INTERLEAVED)
+ src_format |= BIT(8); /* SRCC3_EN */
+
+ unpack = (fmt->element[3] << 24) | (fmt->element[2] << 16) |
+ (fmt->element[1] << 8) | (fmt->element[0] << 0);
+ src_format |= ((fmt->unpack_count - 1) << 12) |
+ (fmt->unpack_tight << 17) |
+ (fmt->unpack_align_msb << 18) |
+ ((fmt->bpp - 1) << 9);
+
+ if (sde_mdp_is_ubwc_format(fmt))
+ opmode |= BIT(0);
+
+ if (fmt->is_yuv)
+ src_format |= BIT(15);
+
+ if (fmt->frame_format != SDE_MDP_FMT_LINEAR
+ && mdata->highest_bank_bit) {
+ sde_mdp_pipe_write(pipe, SDE_MDP_REG_SSPP_FETCH_CONFIG,
+ SDE_MDP_FETCH_CONFIG_RESET_VALUE |
+ mdata->highest_bank_bit << 18);
+ }
+
+ sde_mdp_pipe_write(pipe, SDE_MDP_REG_SSPP_SRC_FORMAT, src_format);
+ sde_mdp_pipe_write(pipe, SDE_MDP_REG_SSPP_SRC_UNPACK_PATTERN, unpack);
+ sde_mdp_pipe_write(pipe, SDE_MDP_REG_SSPP_SRC_OP_MODE, opmode);
+ sde_mdp_pipe_write(pipe, SDE_MDP_REG_SSPP_SRC_ADDR_SW_STATUS, secure);
+
+ /* clear UBWC error */
+ sde_mdp_pipe_write(pipe, SDE_MDP_REG_SSPP_UBWC_ERROR_STATUS, BIT(31));
+
+ return 0;
+}
+
+static int sde_mdp_src_addr_setup(struct sde_mdp_pipe *pipe,
+ struct sde_mdp_data *src_data)
+{
+ struct sde_mdp_data data = *src_data;
+ u32 x = 0, y = 0;
+ int ret = 0;
+
+ SDEROT_DBG("pnum=%d\n", pipe->num);
+
+ ret = sde_mdp_data_check(&data, &pipe->src_planes, pipe->src_fmt);
+ if (ret)
+ return ret;
+
+ sde_rot_data_calc_offset(&data, x, y,
+ &pipe->src_planes, pipe->src_fmt);
+
+ sde_mdp_pipe_write(pipe, SDE_MDP_REG_SSPP_SRC0_ADDR, data.p[0].addr);
+ sde_mdp_pipe_write(pipe, SDE_MDP_REG_SSPP_SRC1_ADDR, data.p[1].addr);
+ sde_mdp_pipe_write(pipe, SDE_MDP_REG_SSPP_SRC2_ADDR, data.p[2].addr);
+ sde_mdp_pipe_write(pipe, SDE_MDP_REG_SSPP_SRC3_ADDR, data.p[3].addr);
+
+ return 0;
+}
+
+int sde_mdp_pipe_queue_data(struct sde_mdp_pipe *pipe,
+ struct sde_mdp_data *src_data)
+{
+ int ret = 0;
+ u32 params_changed;
+ struct sde_rot_data_type *mdata = sde_rot_get_mdata();
+
+ if (!pipe) {
+ SDEROT_ERR("pipe not setup properly for queue\n");
+ return -ENODEV;
+ }
+
+ /*
+ * Reprogram the pipe when there is no dedicated wfd blk and
+ * virtual mixer is allocated for the DMA pipe during concurrent
+ * line and block mode operations
+ */
+
+ params_changed = (pipe->params_changed);
+ if (params_changed) {
+ bool is_realtime = !(pipe->mixer_left->rotator_mode);
+
+ sde_mdp_qos_vbif_remapper_setup(mdata, pipe, is_realtime);
+
+ if (mdata->vbif_nrt_io.base)
+ sde_mdp_pipe_nrt_vbif_setup(mdata, pipe);
+ }
+
+ if (params_changed) {
+ pipe->params_changed = 0;
+
+ ret = sde_mdp_image_setup(pipe, src_data);
+ if (ret) {
+ SDEROT_ERR("image setup error for pnum=%d\n",
+ pipe->num);
+ goto done;
+ }
+
+ ret = sde_mdp_format_setup(pipe);
+ if (ret) {
+ SDEROT_ERR("format %d setup error pnum=%d\n",
+ pipe->src_fmt->format, pipe->num);
+ goto done;
+ }
+
+ if (test_bit(SDE_QOS_PER_PIPE_LUT, mdata->sde_qos_map))
+ sde_mdp_pipe_qos_lut(pipe);
+ }
+
+ ret = sde_mdp_src_addr_setup(pipe, src_data);
+ if (ret) {
+ SDEROT_ERR("addr setup error for pnum=%d\n", pipe->num);
+ goto done;
+ }
+
+ sde_mdp_mixer_pipe_update(pipe, pipe->mixer_left,
+ params_changed);
+done:
+ return ret;
+}
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r1_wb.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r1_wb.c
new file mode 100644
index 000000000000..65cb3963e4a1
--- /dev/null
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r1_wb.c
@@ -0,0 +1,532 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+
+#include "sde_rotator_r1_hwio.h"
+#include "sde_rotator_util.h"
+#include "sde_rotator_r1_internal.h"
+#include "sde_rotator_core.h"
+
+/* wait for at most 2 vsync for lowest refresh rate (24hz) */
+#define KOFF_TIMEOUT msecs_to_jiffies(84)
+
+/*
+ * if BWC enabled and format is H1V2 or 420, do not use site C or I.
+ * Hence, set the bits 29:26 in format register, as zero.
+ */
+#define BWC_FMT_MASK 0xC3FFFFFF
+#define MDSS_DEFAULT_OT_SETTING 0x10
+
+enum sde_mdp_writeback_type {
+ SDE_MDP_WRITEBACK_TYPE_ROTATOR,
+ SDE_MDP_WRITEBACK_TYPE_LINE,
+ SDE_MDP_WRITEBACK_TYPE_WFD,
+};
+
+struct sde_mdp_writeback_ctx {
+ u32 wb_num;
+ char __iomem *base;
+ u8 ref_cnt;
+ u8 type;
+ struct completion wb_comp;
+ int comp_cnt;
+
+ u32 intr_type;
+ u32 intf_num;
+
+ u32 xin_id;
+ u32 wr_lim;
+ struct sde_mdp_shared_reg_ctrl clk_ctrl;
+
+ u32 opmode;
+ struct sde_mdp_format_params *dst_fmt;
+ u16 img_width;
+ u16 img_height;
+ u16 width;
+ u16 height;
+ struct sde_rect dst_rect;
+
+ u32 dnsc_factor_w;
+ u32 dnsc_factor_h;
+
+ u8 rot90;
+ u32 bwc_mode;
+
+ struct sde_mdp_plane_sizes dst_planes;
+
+ ktime_t start_time;
+ ktime_t end_time;
+ u32 offset;
+};
+
+static struct sde_mdp_writeback_ctx wb_ctx_list[SDE_MDP_MAX_WRITEBACK] = {
+ {
+ .type = SDE_MDP_WRITEBACK_TYPE_ROTATOR,
+ .intr_type = SDE_MDP_IRQ_WB_ROT_COMP,
+ .intf_num = 0,
+ .xin_id = 3,
+ .clk_ctrl.reg_off = 0x2BC,
+ .clk_ctrl.bit_off = 0x8,
+ },
+ {
+ .type = SDE_MDP_WRITEBACK_TYPE_ROTATOR,
+ .intr_type = SDE_MDP_IRQ_WB_ROT_COMP,
+ .intf_num = 1,
+ .xin_id = 11,
+ .clk_ctrl.reg_off = 0x2BC,
+ .clk_ctrl.bit_off = 0xC,
+ },
+};
+
+static inline void sde_wb_write(struct sde_mdp_writeback_ctx *ctx,
+ u32 reg, u32 val)
+{
+ SDEROT_DBG("wb%d:%6.6x:%8.8x\n", ctx->wb_num, ctx->offset + reg, val);
+ writel_relaxed(val, ctx->base + reg);
+}
+
+static int sde_mdp_writeback_addr_setup(struct sde_mdp_writeback_ctx *ctx,
+ const struct sde_mdp_data *in_data)
+{
+ int ret;
+ struct sde_mdp_data data;
+
+ if (!in_data)
+ return -EINVAL;
+ data = *in_data;
+
+ SDEROT_DBG("wb_num=%d addr=0x%pa\n", ctx->wb_num, &data.p[0].addr);
+
+ ret = sde_mdp_data_check(&data, &ctx->dst_planes, ctx->dst_fmt);
+ if (ret)
+ return ret;
+
+ sde_rot_data_calc_offset(&data, ctx->dst_rect.x, ctx->dst_rect.y,
+ &ctx->dst_planes, ctx->dst_fmt);
+
+ if ((ctx->dst_fmt->fetch_planes == SDE_MDP_PLANE_PLANAR) &&
+ (ctx->dst_fmt->element[0] == C1_B_Cb))
+ swap(data.p[1].addr, data.p[2].addr);
+
+ sde_wb_write(ctx, SDE_MDP_REG_WB_DST0_ADDR, data.p[0].addr);
+ sde_wb_write(ctx, SDE_MDP_REG_WB_DST1_ADDR, data.p[1].addr);
+ sde_wb_write(ctx, SDE_MDP_REG_WB_DST2_ADDR, data.p[2].addr);
+ sde_wb_write(ctx, SDE_MDP_REG_WB_DST3_ADDR, data.p[3].addr);
+
+ return 0;
+}
+
+static int sde_mdp_writeback_format_setup(struct sde_mdp_writeback_ctx *ctx,
+ u32 format, struct sde_mdp_ctl *ctl)
+{
+ struct sde_mdp_format_params *fmt;
+ u32 dst_format, pattern, ystride0, ystride1, outsize, chroma_samp;
+ u32 dnsc_factor, write_config = 0;
+ u32 opmode = ctx->opmode;
+ bool rotation = false;
+ struct sde_rot_data_type *mdata = sde_rot_get_mdata();
+
+ SDEROT_DBG("wb_num=%d format=%d\n", ctx->wb_num, format);
+
+ if (ctx->rot90)
+ rotation = true;
+
+ fmt = sde_get_format_params(format);
+ if (!fmt) {
+ SDEROT_ERR("wb format=%d not supported\n", format);
+ return -EINVAL;
+ }
+
+ sde_mdp_get_plane_sizes(fmt, ctx->img_width, ctx->img_height,
+ &ctx->dst_planes,
+ ctx->opmode & SDE_MDP_OP_BWC_EN, rotation);
+
+ ctx->dst_fmt = fmt;
+
+ chroma_samp = fmt->chroma_sample;
+
+ dst_format = (chroma_samp << 23) |
+ (fmt->fetch_planes << 19) |
+ (fmt->bits[C3_ALPHA] << 6) |
+ (fmt->bits[C2_R_Cr] << 4) |
+ (fmt->bits[C1_B_Cb] << 2) |
+ (fmt->bits[C0_G_Y] << 0);
+
+ dst_format &= BWC_FMT_MASK;
+
+ if (fmt->bits[C3_ALPHA] || fmt->alpha_enable) {
+ dst_format |= BIT(8); /* DSTC3_EN */
+ if (!fmt->alpha_enable)
+ dst_format |= BIT(14); /* DST_ALPHA_X */
+ }
+
+ if (fmt->is_yuv)
+ dst_format |= BIT(15);
+
+ pattern = (fmt->element[3] << 24) |
+ (fmt->element[2] << 16) |
+ (fmt->element[1] << 8) |
+ (fmt->element[0] << 0);
+
+ dst_format |= (fmt->unpack_align_msb << 18) |
+ (fmt->unpack_tight << 17) |
+ ((fmt->unpack_count - 1) << 12) |
+ ((fmt->bpp - 1) << 9);
+
+ ystride0 = (ctx->dst_planes.ystride[0]) |
+ (ctx->dst_planes.ystride[1] << 16);
+ ystride1 = (ctx->dst_planes.ystride[2]) |
+ (ctx->dst_planes.ystride[3] << 16);
+ outsize = (ctx->dst_rect.h << 16) | ctx->dst_rect.w;
+
+ if (sde_mdp_is_ubwc_format(fmt)) {
+ opmode |= BIT(0);
+
+ dst_format |= BIT(31);
+ if (mdata->highest_bank_bit)
+ write_config |= (mdata->highest_bank_bit << 8);
+
+ if (fmt->format == SDE_PIX_FMT_RGB_565_UBWC)
+ write_config |= 0x8;
+ }
+
+ if (ctx->type == SDE_MDP_WRITEBACK_TYPE_ROTATOR) {
+ dnsc_factor = (ctx->dnsc_factor_h) | (ctx->dnsc_factor_w << 16);
+ sde_wb_write(ctx, SDE_MDP_REG_WB_ROTATOR_PIPE_DOWNSCALER,
+ dnsc_factor);
+ }
+ sde_wb_write(ctx, SDE_MDP_REG_WB_ALPHA_X_VALUE, 0xFF);
+ sde_wb_write(ctx, SDE_MDP_REG_WB_DST_FORMAT, dst_format);
+ sde_wb_write(ctx, SDE_MDP_REG_WB_DST_OP_MODE, opmode);
+ sde_wb_write(ctx, SDE_MDP_REG_WB_DST_PACK_PATTERN, pattern);
+ sde_wb_write(ctx, SDE_MDP_REG_WB_DST_YSTRIDE0, ystride0);
+ sde_wb_write(ctx, SDE_MDP_REG_WB_DST_YSTRIDE1, ystride1);
+ sde_wb_write(ctx, SDE_MDP_REG_WB_OUT_SIZE, outsize);
+ sde_wb_write(ctx, SDE_MDP_REG_WB_DST_WRITE_CONFIG, write_config);
+ return 0;
+}
+
+static int sde_mdp_writeback_prepare_rot(struct sde_mdp_ctl *ctl, void *arg)
+{
+ struct sde_mdp_writeback_ctx *ctx;
+ struct sde_mdp_writeback_arg *wb_args;
+ struct sde_rot_entry *entry;
+ struct sde_rotation_item *item;
+ struct sde_rot_data_type *mdata;
+ u32 format;
+
+ ctx = (struct sde_mdp_writeback_ctx *) ctl->priv_data;
+ if (!ctx)
+ return -ENODEV;
+ wb_args = (struct sde_mdp_writeback_arg *) arg;
+ if (!wb_args)
+ return -ENOENT;
+
+ entry = (struct sde_rot_entry *) wb_args->priv_data;
+ if (!entry) {
+ SDEROT_ERR("unable to retrieve rot session ctl=%d\n", ctl->num);
+ return -ENODEV;
+ }
+ item = &entry->item;
+ mdata = ctl->mdata;
+ if (!mdata) {
+ SDEROT_ERR("no mdata attached to ctl=%d", ctl->num);
+ return -ENODEV;
+ }
+ SDEROT_DBG("rot setup wb_num=%d\n", ctx->wb_num);
+
+ ctx->opmode = BIT(6); /* ROT EN */
+ if (ctl->mdata->rot_block_size == 128)
+ ctx->opmode |= BIT(4); /* block size 128 */
+
+ ctx->bwc_mode = 0;
+ ctx->opmode |= ctx->bwc_mode;
+
+ ctx->img_width = item->output.width;
+ ctx->img_height = item->output.height;
+ ctx->width = ctx->dst_rect.w = item->dst_rect.w;
+ ctx->height = ctx->dst_rect.h = item->dst_rect.h;
+ ctx->dst_rect.x = item->dst_rect.x;
+ ctx->dst_rect.y = item->dst_rect.y;
+ ctx->dnsc_factor_w = entry->dnsc_factor_w;
+ ctx->dnsc_factor_h = entry->dnsc_factor_h;
+
+ ctx->rot90 = !!(item->flags & SDE_ROTATION_90);
+
+ format = item->output.format;
+
+ if (ctx->rot90)
+ ctx->opmode |= BIT(5); /* ROT 90 */
+
+ return sde_mdp_writeback_format_setup(ctx, format, ctl);
+}
+
+static int sde_mdp_writeback_stop(struct sde_mdp_ctl *ctl,
+ int panel_power_state)
+{
+ struct sde_mdp_writeback_ctx *ctx;
+
+ SDEROT_DBG("stop ctl=%d\n", ctl->num);
+
+ ctx = (struct sde_mdp_writeback_ctx *) ctl->priv_data;
+ if (ctx) {
+ sde_mdp_set_intr_callback(ctx->intr_type, ctx->intf_num,
+ NULL, NULL);
+
+ complete_all(&ctx->wb_comp);
+
+ ctl->priv_data = NULL;
+ ctx->ref_cnt--;
+ }
+
+ return 0;
+}
+
+static void sde_mdp_writeback_intr_done(void *arg)
+{
+ struct sde_mdp_ctl *ctl = arg;
+ struct sde_mdp_writeback_ctx *ctx = ctl->priv_data;
+
+ if (!ctx) {
+ SDEROT_ERR("invalid ctx\n");
+ return;
+ }
+
+ SDEROT_DBG("intr wb_num=%d\n", ctx->wb_num);
+ if (ctl->irq_num >= 0)
+ disable_irq_nosync(ctl->irq_num);
+ complete_all(&ctx->wb_comp);
+}
+
+static int sde_mdp_wb_wait4comp(struct sde_mdp_ctl *ctl, void *arg)
+{
+ struct sde_mdp_writeback_ctx *ctx;
+ int rc = 0;
+ u64 rot_time;
+ u32 status, mask, isr;
+
+ ctx = (struct sde_mdp_writeback_ctx *) ctl->priv_data;
+ if (!ctx) {
+ SDEROT_ERR("invalid ctx\n");
+ return -ENODEV;
+ }
+
+ if (ctx->comp_cnt == 0)
+ return rc;
+
+ if (ctl->irq_num >= 0) {
+ rc = wait_for_completion_timeout(&ctx->wb_comp,
+ KOFF_TIMEOUT);
+ sde_mdp_set_intr_callback(ctx->intr_type, ctx->intf_num,
+ NULL, NULL);
+
+ if (rc == 0) {
+ mask = BIT(ctx->intr_type + ctx->intf_num);
+
+ isr = readl_relaxed(ctl->mdata->mdp_base +
+ SDE_MDP_REG_INTR_STATUS);
+ status = mask & isr;
+
+ SDEROT_INFO_ONCE(
+ "mask: 0x%x, isr: 0x%x, status: 0x%x\n",
+ mask, isr, status);
+
+ if (status) {
+ SDEROT_WARN("wb done but irq not triggered\n");
+ writel_relaxed(BIT(ctl->wb->num),
+ ctl->mdata->mdp_base +
+ SDE_MDP_REG_INTR_CLEAR);
+ sde_mdp_writeback_intr_done(ctl);
+ rc = 0;
+ } else {
+ rc = -ENODEV;
+ WARN(1, "wb timeout (%d) ctl=%d\n",
+ rc, ctl->num);
+ if (ctl->irq_num >= 0)
+ disable_irq_nosync(ctl->irq_num);
+ }
+ } else {
+ rc = 0;
+ }
+ } else {
+ /* use polling if interrupt is not available */
+ int cnt = 200;
+
+ mask = BIT(ctl->wb->num);
+ do {
+ udelay(500);
+ isr = readl_relaxed(ctl->mdata->mdp_base +
+ SDE_MDP_REG_INTR_STATUS);
+ status = mask & isr;
+ cnt--;
+ } while (cnt > 0 && !status);
+ writel_relaxed(mask, ctl->mdata->mdp_base +
+ SDE_MDP_REG_INTR_CLEAR);
+
+ rc = (status) ? 0 : -ENODEV;
+ }
+
+ if (rc == 0)
+ ctx->end_time = ktime_get();
+
+ sde_smmu_ctrl(0);
+ ctx->comp_cnt--;
+
+ if (!rc) {
+ rot_time = (u64)ktime_to_us(ctx->end_time) -
+ (u64)ktime_to_us(ctx->start_time);
+ SDEROT_DBG(
+ "ctx%d type:%d xin_id:%d intf_num:%d took %llu microsecs\n",
+ ctx->wb_num, ctx->type, ctx->xin_id,
+ ctx->intf_num, rot_time);
+ }
+
+ SDEROT_DBG("s:%8.8x %s t:%llu c:%d\n", isr,
+ (rc)?"Timeout":"Done", rot_time, ctx->comp_cnt);
+ return rc;
+}
+
+static void sde_mdp_set_ot_limit_wb(struct sde_mdp_writeback_ctx *ctx)
+{
+ struct sde_mdp_set_ot_params ot_params;
+
+ ot_params.xin_id = ctx->xin_id;
+ ot_params.num = ctx->wb_num;
+ ot_params.width = ctx->width;
+ ot_params.height = ctx->height;
+ ot_params.reg_off_vbif_lim_conf = MMSS_VBIF_WR_LIM_CONF;
+ ot_params.reg_off_mdp_clk_ctrl = ctx->clk_ctrl.reg_off;
+ ot_params.bit_off_mdp_clk_ctrl = ctx->clk_ctrl.bit_off;
+ ot_params.is_rot = (ctx->type == SDE_MDP_WRITEBACK_TYPE_ROTATOR);
+ ot_params.is_wb = true;
+ ot_params.is_yuv = ctx->dst_fmt->is_yuv;
+
+ sde_mdp_set_ot_limit(&ot_params);
+}
+
+static int sde_mdp_writeback_display(struct sde_mdp_ctl *ctl, void *arg)
+{
+ struct sde_mdp_writeback_ctx *ctx;
+ struct sde_mdp_writeback_arg *wb_args;
+ u32 flush_bits = 0;
+ int ret;
+
+ if (!ctl || !ctl->mdata)
+ return -ENODEV;
+
+ ctx = (struct sde_mdp_writeback_ctx *) ctl->priv_data;
+ if (!ctx)
+ return -ENODEV;
+
+ if (ctx->comp_cnt) {
+ SDEROT_ERR("previous kickoff not completed yet, ctl=%d\n",
+ ctl->num);
+ return -EPERM;
+ }
+
+ if (ctl->mdata->default_ot_wr_limit ||
+ ctl->mdata->default_ot_rd_limit)
+ sde_mdp_set_ot_limit_wb(ctx);
+
+ wb_args = (struct sde_mdp_writeback_arg *) arg;
+ if (!wb_args)
+ return -ENOENT;
+
+ ret = sde_mdp_writeback_addr_setup(ctx, wb_args->data);
+ if (ret) {
+ SDEROT_ERR("writeback data setup error ctl=%d\n", ctl->num);
+ return ret;
+ }
+
+ sde_mdp_set_intr_callback(ctx->intr_type, ctx->intf_num,
+ sde_mdp_writeback_intr_done, ctl);
+
+ flush_bits |= ctl->flush_reg_data;
+ flush_bits |= BIT(16); /* WB */
+ sde_wb_write(ctx, SDE_MDP_REG_WB_DST_ADDR_SW_STATUS, ctl->is_secure);
+ sde_mdp_ctl_write(ctl, SDE_MDP_REG_CTL_FLUSH, flush_bits);
+
+ reinit_completion(&ctx->wb_comp);
+ if (ctl->irq_num >= 0)
+ enable_irq(ctl->irq_num);
+ ret = sde_smmu_ctrl(1);
+ if (IS_ERR_VALUE(ret)) {
+ SDEROT_ERR("IOMMU attach failed\n");
+ return ret;
+ }
+
+ ctx->start_time = ktime_get();
+ sde_mdp_ctl_write(ctl, SDE_MDP_REG_CTL_START, 1);
+ /* ensure that start command is issued after the barrier */
+ wmb();
+
+ SDEROT_DBG("ctx%d type:%d xin_id:%d intf_num:%d start\n",
+ ctx->wb_num, ctx->type, ctx->xin_id, ctx->intf_num);
+
+ ctx->comp_cnt++;
+
+ return 0;
+}
+
+int sde_mdp_writeback_start(struct sde_mdp_ctl *ctl)
+{
+ struct sde_mdp_writeback_ctx *ctx;
+ struct sde_mdp_writeback *wb;
+ u32 mem_sel;
+
+ SDEROT_DBG("start ctl=%d\n", ctl->num);
+
+ if (!ctl->wb) {
+ SDEROT_DBG("wb not setup in the ctl\n");
+ return 0;
+ }
+
+ wb = ctl->wb;
+ mem_sel = (ctl->opmode & 0xF) - 1;
+ if (mem_sel < SDE_MDP_MAX_WRITEBACK) {
+ ctx = &wb_ctx_list[mem_sel];
+ if (ctx->ref_cnt) {
+ SDEROT_ERR("writeback in use %d\n", mem_sel);
+ return -EBUSY;
+ }
+ ctx->ref_cnt++;
+ } else {
+ SDEROT_ERR("invalid writeback mode %d\n", mem_sel);
+ return -EINVAL;
+ }
+
+ ctl->priv_data = ctx;
+ ctx->wb_num = wb->num;
+ ctx->base = wb->base;
+ ctx->offset = wb->offset;
+
+ init_completion(&ctx->wb_comp);
+
+ if (ctx->type == SDE_MDP_WRITEBACK_TYPE_ROTATOR)
+ ctl->ops.prepare_fnc = sde_mdp_writeback_prepare_rot;
+
+ ctl->ops.stop_fnc = sde_mdp_writeback_stop;
+ ctl->ops.display_fnc = sde_mdp_writeback_display;
+ ctl->ops.wait_fnc = sde_mdp_wb_wait4comp;
+
+ return 0;
+}
+
+int sde_mdp_writeback_display_commit(struct sde_mdp_ctl *ctl, void *arg)
+{
+ return sde_mdp_display_commit(ctl, arg, NULL);
+}
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c
new file mode 100644
index 000000000000..6cc975e22cd4
--- /dev/null
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c
@@ -0,0 +1,623 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/iommu.h>
+#include <linux/qcom_iommu.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/clk/msm-clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma-buf.h>
+#include <linux/of_platform.h>
+#include <linux/msm_dma_iommu_mapping.h>
+#include <linux/qcom_iommu.h>
+#include <asm/dma-iommu.h>
+
+#include "soc/qcom/secure_buffer.h"
+#include "sde_rotator_base.h"
+#include "sde_rotator_util.h"
+#include "sde_rotator_io_util.h"
+#include "sde_rotator_smmu.h"
+
+#define SMMU_SDE_ROT_SEC "qcom,smmu_sde_rot_sec"
+#define SMMU_SDE_ROT_UNSEC "qcom,smmu_sde_rot_unsec"
+
+struct sde_smmu_domain {
+ char *ctx_name;
+ int domain;
+ unsigned long start;
+ unsigned long size;
+};
+
+static inline bool sde_smmu_is_valid_domain_type(
+ struct sde_rot_data_type *mdata, int domain_type)
+{
+ return true;
+}
+
+struct sde_smmu_client *sde_smmu_get_cb(u32 domain)
+{
+ struct sde_rot_data_type *mdata = sde_rot_get_mdata();
+
+ if (!sde_smmu_is_valid_domain_type(mdata, domain))
+ return NULL;
+
+ return (domain >= SDE_IOMMU_MAX_DOMAIN) ? NULL :
+ &mdata->sde_smmu[domain];
+}
+
+static int sde_smmu_util_parse_dt_clock(struct platform_device *pdev,
+ struct sde_module_power *mp)
+{
+ u32 i = 0, rc = 0;
+ const char *clock_name;
+ u32 clock_rate;
+ int num_clk;
+
+ num_clk = of_property_count_strings(pdev->dev.of_node,
+ "clock-names");
+ if (num_clk <= 0) {
+ SDEROT_ERR("clocks are not defined\n");
+ goto clk_err;
+ }
+
+ mp->num_clk = num_clk;
+ mp->clk_config = devm_kzalloc(&pdev->dev,
+ sizeof(struct sde_clk) * mp->num_clk, GFP_KERNEL);
+ if (!mp->clk_config) {
+ rc = -ENOMEM;
+ mp->num_clk = 0;
+ goto clk_err;
+ }
+
+ for (i = 0; i < mp->num_clk; i++) {
+ of_property_read_string_index(pdev->dev.of_node, "clock-names",
+ i, &clock_name);
+ strlcpy(mp->clk_config[i].clk_name, clock_name,
+ sizeof(mp->clk_config[i].clk_name));
+
+ of_property_read_u32_index(pdev->dev.of_node, "clock-rate",
+ i, &clock_rate);
+ mp->clk_config[i].rate = clock_rate;
+
+ if (!clock_rate)
+ mp->clk_config[i].type = SDE_CLK_AHB;
+ else
+ mp->clk_config[i].type = SDE_CLK_PCLK;
+ }
+
+clk_err:
+ return rc;
+}
+
+static int sde_smmu_clk_register(struct platform_device *pdev,
+ struct sde_module_power *mp)
+{
+ int i, ret;
+ struct clk *clk;
+
+ ret = sde_smmu_util_parse_dt_clock(pdev, mp);
+ if (ret) {
+ SDEROT_ERR("unable to parse clocks\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < mp->num_clk; i++) {
+ clk = devm_clk_get(&pdev->dev,
+ mp->clk_config[i].clk_name);
+ if (IS_ERR(clk)) {
+ SDEROT_ERR("unable to get clk: %s\n",
+ mp->clk_config[i].clk_name);
+ return PTR_ERR(clk);
+ }
+ mp->clk_config[i].clk = clk;
+ }
+ return 0;
+}
+
+static int sde_smmu_enable_power(struct sde_smmu_client *sde_smmu,
+ bool enable)
+{
+ int rc = 0;
+ struct sde_module_power *mp;
+
+ if (!sde_smmu)
+ return -EINVAL;
+
+ mp = &sde_smmu->mp;
+
+ if (!mp->num_vreg && !mp->num_clk)
+ return 0;
+
+ if (enable) {
+ rc = sde_rot_enable_vreg(mp->vreg_config, mp->num_vreg, true);
+ if (rc) {
+ SDEROT_ERR("vreg enable failed - rc:%d\n", rc);
+ goto end;
+ }
+ sde_update_reg_bus_vote(sde_smmu->reg_bus_clt,
+ VOTE_INDEX_19_MHZ);
+ rc = sde_rot_enable_clk(mp->clk_config, mp->num_clk, true);
+ if (rc) {
+ SDEROT_ERR("clock enable failed - rc:%d\n", rc);
+ sde_update_reg_bus_vote(sde_smmu->reg_bus_clt,
+ VOTE_INDEX_DISABLE);
+ sde_rot_enable_vreg(mp->vreg_config, mp->num_vreg,
+ false);
+ goto end;
+ }
+ } else {
+ sde_rot_enable_clk(mp->clk_config, mp->num_clk, false);
+ sde_update_reg_bus_vote(sde_smmu->reg_bus_clt,
+ VOTE_INDEX_DISABLE);
+ sde_rot_enable_vreg(mp->vreg_config, mp->num_vreg, false);
+ }
+end:
+ return rc;
+}
+
+/*
+ * sde_smmu_attach()
+ *
+ * Associates each configured VA range with the corresponding smmu context
+ * bank device. Enables the clks as smmu requires voting it before the usage.
+ * And iommu attach is done only once during the initial attach and it is never
+ * detached as smmu v2 uses a feature called 'retention'.
+ */
+static int sde_smmu_attach(struct sde_rot_data_type *mdata)
+{
+ struct sde_smmu_client *sde_smmu;
+ int i, rc = 0;
+
+ for (i = 0; i < SDE_IOMMU_MAX_DOMAIN; i++) {
+ if (!sde_smmu_is_valid_domain_type(mdata, i))
+ continue;
+
+ sde_smmu = sde_smmu_get_cb(i);
+ if (sde_smmu && sde_smmu->dev) {
+ rc = sde_smmu_enable_power(sde_smmu, true);
+ if (rc) {
+ SDEROT_ERR(
+ "power enable failed - domain:[%d] rc:%d\n",
+ i, rc);
+ goto err;
+ }
+
+ if (!sde_smmu->domain_attached) {
+ rc = arm_iommu_attach_device(sde_smmu->dev,
+ sde_smmu->mmu_mapping);
+ if (rc) {
+ SDEROT_ERR(
+ "iommu attach device failed for domain[%d] with err:%d\n",
+ i, rc);
+ sde_smmu_enable_power(sde_smmu,
+ false);
+ goto err;
+ }
+ sde_smmu->domain_attached = true;
+ SDEROT_DBG("iommu v2 domain[%i] attached\n", i);
+ }
+ } else {
+ SDEROT_ERR(
+ "iommu device not attached for domain[%d]\n",
+ i);
+ return -ENODEV;
+ }
+ }
+ return 0;
+
+err:
+ for (i--; i >= 0; i--) {
+ sde_smmu = sde_smmu_get_cb(i);
+ if (sde_smmu && sde_smmu->dev) {
+ arm_iommu_detach_device(sde_smmu->dev);
+ sde_smmu_enable_power(sde_smmu, false);
+ sde_smmu->domain_attached = false;
+ }
+ }
+ return rc;
+}
+
+/*
+ * sde_smmu_detach()
+ *
+ * Only disables the clks as it is not required to detach the iommu mapped
+ * VA range from the device in smmu as explained in the sde_smmu_attach
+ */
+static int sde_smmu_detach(struct sde_rot_data_type *mdata)
+{
+ struct sde_smmu_client *sde_smmu;
+ int i;
+
+ for (i = 0; i < SDE_IOMMU_MAX_DOMAIN; i++) {
+ if (!sde_smmu_is_valid_domain_type(mdata, i))
+ continue;
+
+ sde_smmu = sde_smmu_get_cb(i);
+ if (sde_smmu && sde_smmu->dev)
+ sde_smmu_enable_power(sde_smmu, false);
+ }
+ return 0;
+}
+
+int sde_smmu_get_domain_id(u32 type)
+{
+ return type;
+}
+
+/*
+ * sde_smmu_dma_buf_attach()
+ *
+ * Same as sde_smmu_dma_buf_attach except that the device is got from
+ * the configured smmu v2 context banks.
+ */
+struct dma_buf_attachment *sde_smmu_dma_buf_attach(
+ struct dma_buf *dma_buf, struct device *dev, int domain)
+{
+ struct sde_smmu_client *sde_smmu = sde_smmu_get_cb(domain);
+
+ if (!sde_smmu) {
+ SDEROT_ERR("not able to get smmu context\n");
+ return NULL;
+ }
+
+ return dma_buf_attach(dma_buf, sde_smmu->dev);
+}
+
+/*
+ * sde_smmu_map_dma_buf()
+ *
+ * Maps existing buffer (by struct scatterlist) into SMMU context bank device.
+ * From which we can take the virtual address and size allocated.
+ * msm_map_dma_buf is depricated with smmu v2 and it uses dma_map_sg instead
+ */
+int sde_smmu_map_dma_buf(struct dma_buf *dma_buf,
+ struct sg_table *table, int domain, dma_addr_t *iova,
+ unsigned long *size, int dir)
+{
+ int rc;
+ struct sde_smmu_client *sde_smmu = sde_smmu_get_cb(domain);
+
+ if (!sde_smmu) {
+ SDEROT_ERR("not able to get smmu context\n");
+ return -EINVAL;
+ }
+
+ rc = msm_dma_map_sg_lazy(sde_smmu->dev, table->sgl, table->nents, dir,
+ dma_buf);
+ if (rc != table->nents) {
+ SDEROT_ERR("dma map sg failed\n");
+ return -ENOMEM;
+ }
+
+ *iova = table->sgl->dma_address;
+ *size = table->sgl->dma_length;
+ return 0;
+}
+
+void sde_smmu_unmap_dma_buf(struct sg_table *table, int domain,
+ int dir, struct dma_buf *dma_buf)
+{
+ struct sde_smmu_client *sde_smmu = sde_smmu_get_cb(domain);
+
+ if (!sde_smmu) {
+ SDEROT_ERR("not able to get smmu context\n");
+ return;
+ }
+
+ msm_dma_unmap_sg(sde_smmu->dev, table->sgl, table->nents, dir,
+ dma_buf);
+}
+
+static DEFINE_MUTEX(sde_smmu_ref_cnt_lock);
+
+int sde_smmu_ctrl(int enable)
+{
+ struct sde_rot_data_type *mdata = sde_rot_get_mdata();
+ int rc = 0;
+
+ mutex_lock(&sde_smmu_ref_cnt_lock);
+ SDEROT_DBG("%pS: enable:%d ref_cnt:%d attach:%d\n",
+ __builtin_return_address(0), enable, mdata->iommu_ref_cnt,
+ mdata->iommu_attached);
+
+ if (enable) {
+ if (!mdata->iommu_attached) {
+ rc = sde_smmu_attach(mdata);
+ if (!rc)
+ mdata->iommu_attached = true;
+ }
+ mdata->iommu_ref_cnt++;
+ } else {
+ if (mdata->iommu_ref_cnt) {
+ mdata->iommu_ref_cnt--;
+ if (mdata->iommu_ref_cnt == 0)
+ if (mdata->iommu_attached) {
+ rc = sde_smmu_detach(mdata);
+ if (!rc)
+ mdata->iommu_attached = false;
+ }
+ } else {
+ SDEROT_ERR("unbalanced iommu ref\n");
+ }
+ }
+ mutex_unlock(&sde_smmu_ref_cnt_lock);
+
+ if (IS_ERR_VALUE(rc))
+ return rc;
+ else
+ return mdata->iommu_ref_cnt;
+}
+
+/*
+ * sde_smmu_device_create()
+ * @dev: sde_mdp device
+ *
+ * For smmu, each context bank is a separate child device of sde rot.
+ * Platform devices are created for those smmu related child devices of
+ * sde rot here. This would facilitate probes to happen for these devices in
+ * which the smmu mapping and initialization is handled.
+ */
+void sde_smmu_device_create(struct device *dev)
+{
+ struct device_node *parent, *child;
+
+ parent = dev->of_node;
+ for_each_child_of_node(parent, child) {
+ if (of_device_is_compatible(child, SMMU_SDE_ROT_SEC))
+ of_platform_device_create(child, NULL, dev);
+ else if (of_device_is_compatible(child, SMMU_SDE_ROT_UNSEC))
+ of_platform_device_create(child, NULL, dev);
+ }
+}
+
+int sde_smmu_init(struct device *dev)
+{
+ sde_smmu_device_create(dev);
+
+ return 0;
+}
+
+static int sde_smmu_fault_handler(struct iommu_domain *domain,
+ struct device *dev, unsigned long iova,
+ int flags, void *token)
+{
+ struct sde_smmu_client *sde_smmu;
+ int rc = -ENOSYS;
+
+ if (!token) {
+ SDEROT_ERR("Error: token is NULL\n");
+ return -ENOSYS;
+ }
+
+ sde_smmu = (struct sde_smmu_client *)token;
+
+ /* TODO: trigger rotator panic and dump */
+ SDEROT_ERR("TODO: trigger rotator panic and dump, iova=0x%08lx\n",
+ iova);
+
+ return rc;
+}
+
+static struct sde_smmu_domain sde_rot_unsec = {
+ "rot_0", SDE_IOMMU_DOMAIN_ROT_UNSECURE, SZ_128K, (SZ_1G - SZ_128K)};
+static struct sde_smmu_domain sde_rot_sec = {
+ "rot_1", SDE_IOMMU_DOMAIN_ROT_SECURE, SZ_1G, SZ_2G};
+
+static const struct of_device_id sde_smmu_dt_match[] = {
+ { .compatible = SMMU_SDE_ROT_UNSEC, .data = &sde_rot_unsec},
+ { .compatible = SMMU_SDE_ROT_SEC, .data = &sde_rot_sec},
+ {}
+};
+MODULE_DEVICE_TABLE(of, sde_smmu_dt_match);
+
+/*
+ * sde_smmu_probe()
+ * @pdev: platform device
+ *
+ * Each smmu context acts as a separate device and the context banks are
+ * configured with a VA range.
+ * Registeres the clks as each context bank has its own clks, for which voting
+ * has to be done everytime before using that context bank.
+ */
+int sde_smmu_probe(struct platform_device *pdev)
+{
+ struct device *dev;
+ struct sde_rot_data_type *mdata = sde_rot_get_mdata();
+ struct sde_smmu_client *sde_smmu;
+ int rc = 0;
+ struct sde_smmu_domain smmu_domain;
+ const struct of_device_id *match;
+ struct sde_module_power *mp;
+ int disable_htw = 1;
+ char name[MAX_CLIENT_NAME_LEN];
+
+ if (!mdata) {
+ SDEROT_ERR("probe failed as mdata is not initialized\n");
+ return -EPROBE_DEFER;
+ }
+
+ match = of_match_device(sde_smmu_dt_match, &pdev->dev);
+ if (!match || !match->data) {
+ SDEROT_ERR("probe failed as match data is invalid\n");
+ return -EINVAL;
+ }
+
+ smmu_domain = *(struct sde_smmu_domain *) (match->data);
+ if (smmu_domain.domain >= SDE_IOMMU_MAX_DOMAIN) {
+ SDEROT_ERR("no matching device found\n");
+ return -EINVAL;
+ }
+
+ if (of_find_property(pdev->dev.of_node, "iommus", NULL)) {
+ dev = &pdev->dev;
+ } else {
+ SDEROT_ERR("Invalid SMMU ctx for domain:%d\n",
+ smmu_domain.domain);
+ return -EINVAL;
+ }
+
+ sde_smmu = &mdata->sde_smmu[smmu_domain.domain];
+ mp = &sde_smmu->mp;
+ memset(mp, 0, sizeof(struct sde_module_power));
+
+ if (of_find_property(pdev->dev.of_node,
+ "gdsc-mdss-supply", NULL)) {
+
+ mp->vreg_config = devm_kzalloc(&pdev->dev,
+ sizeof(struct sde_vreg), GFP_KERNEL);
+ if (!mp->vreg_config)
+ return -ENOMEM;
+
+ strlcpy(mp->vreg_config->vreg_name, "gdsc-mdss",
+ sizeof(mp->vreg_config->vreg_name));
+ mp->num_vreg = 1;
+ }
+
+ rc = sde_rot_config_vreg(&pdev->dev, mp->vreg_config,
+ mp->num_vreg, true);
+ if (rc) {
+ SDEROT_ERR("vreg config failed rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = sde_smmu_clk_register(pdev, mp);
+ if (rc) {
+ SDEROT_ERR(
+ "smmu clk register failed for domain[%d] with err:%d\n",
+ smmu_domain.domain, rc);
+ sde_rot_config_vreg(&pdev->dev, mp->vreg_config, mp->num_vreg,
+ false);
+ return rc;
+ }
+
+ snprintf(name, MAX_CLIENT_NAME_LEN, "smmu:%u", smmu_domain.domain);
+ sde_smmu->reg_bus_clt = sde_reg_bus_vote_client_create(name);
+ if (IS_ERR_OR_NULL(sde_smmu->reg_bus_clt)) {
+ SDEROT_ERR("mdss bus client register failed\n");
+ sde_rot_config_vreg(&pdev->dev, mp->vreg_config, mp->num_vreg,
+ false);
+ return PTR_ERR(sde_smmu->reg_bus_clt);
+ }
+
+ rc = sde_smmu_enable_power(sde_smmu, true);
+ if (rc) {
+ SDEROT_ERR("power enable failed - domain:[%d] rc:%d\n",
+ smmu_domain.domain, rc);
+ goto bus_client_destroy;
+ }
+
+ sde_smmu->mmu_mapping = arm_iommu_create_mapping(
+ msm_iommu_get_bus(dev), smmu_domain.start, smmu_domain.size);
+ if (IS_ERR(sde_smmu->mmu_mapping)) {
+ SDEROT_ERR("iommu create mapping failed for domain[%d]\n",
+ smmu_domain.domain);
+ rc = PTR_ERR(sde_smmu->mmu_mapping);
+ goto disable_power;
+ }
+
+ rc = iommu_domain_set_attr(sde_smmu->mmu_mapping->domain,
+ DOMAIN_ATTR_COHERENT_HTW_DISABLE, &disable_htw);
+ if (rc) {
+ SDEROT_ERR("couldn't disable coherent HTW\n");
+ goto release_mapping;
+ }
+
+ if (smmu_domain.domain == SDE_IOMMU_DOMAIN_ROT_SECURE) {
+ int secure_vmid = VMID_CP_PIXEL;
+
+ rc = iommu_domain_set_attr(sde_smmu->mmu_mapping->domain,
+ DOMAIN_ATTR_SECURE_VMID, &secure_vmid);
+ if (rc) {
+ SDEROT_ERR("couldn't set secure pixel vmid\n");
+ goto release_mapping;
+ }
+ }
+
+ iommu_set_fault_handler(sde_smmu->mmu_mapping->domain,
+ sde_smmu_fault_handler, (void *)sde_smmu);
+
+ sde_smmu_enable_power(sde_smmu, false);
+
+ sde_smmu->dev = dev;
+ SDEROT_INFO(
+ "iommu v2 domain[%d] mapping and clk register successful!\n",
+ smmu_domain.domain);
+ return 0;
+
+release_mapping:
+ arm_iommu_release_mapping(sde_smmu->mmu_mapping);
+disable_power:
+ sde_smmu_enable_power(sde_smmu, false);
+bus_client_destroy:
+ sde_reg_bus_vote_client_destroy(sde_smmu->reg_bus_clt);
+ sde_smmu->reg_bus_clt = NULL;
+ sde_rot_config_vreg(&pdev->dev, mp->vreg_config, mp->num_vreg,
+ false);
+ return rc;
+}
+
+int sde_smmu_remove(struct platform_device *pdev)
+{
+ int i;
+ struct sde_smmu_client *sde_smmu;
+
+ for (i = 0; i < SDE_IOMMU_MAX_DOMAIN; i++) {
+ sde_smmu = sde_smmu_get_cb(i);
+ if (sde_smmu && sde_smmu->dev &&
+ (sde_smmu->dev == &pdev->dev))
+ arm_iommu_release_mapping(sde_smmu->mmu_mapping);
+ }
+ return 0;
+}
+
+static struct platform_driver sde_smmu_driver = {
+ .probe = sde_smmu_probe,
+ .remove = sde_smmu_remove,
+ .shutdown = NULL,
+ .driver = {
+ .name = "sde_smmu",
+ .of_match_table = sde_smmu_dt_match,
+ },
+};
+
+static int sde_smmu_register_driver(void)
+{
+ return platform_driver_register(&sde_smmu_driver);
+}
+
+static int __init sde_smmu_driver_init(void)
+{
+ int ret;
+
+ ret = sde_smmu_register_driver();
+ if (ret)
+ SDEROT_ERR("sde_smmu_register_driver() failed!\n");
+
+ return ret;
+}
+module_init(sde_smmu_driver_init);
+
+static void __exit sde_smmu_driver_cleanup(void)
+{
+ platform_driver_unregister(&sde_smmu_driver);
+}
+module_exit(sde_smmu_driver_cleanup);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("SDE SMMU driver");
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.h
new file mode 100644
index 000000000000..1adcfb7310f2
--- /dev/null
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.h
@@ -0,0 +1,48 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef SDE_ROTATOR_SMMU_H
+#define SDE_ROTATOR_SMMU_H
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/dma-buf.h>
+
+#include "sde_rotator_io_util.h"
+
+enum sde_iommu_domain_type {
+ SDE_IOMMU_DOMAIN_ROT_UNSECURE,
+ SDE_IOMMU_DOMAIN_ROT_SECURE,
+ SDE_IOMMU_MAX_DOMAIN
+};
+
+int sde_smmu_init(struct device *dev);
+
+static inline int sde_smmu_dma_data_direction(int dir)
+{
+ return dir;
+}
+
+int sde_smmu_ctrl(int enable);
+
+struct dma_buf_attachment *sde_smmu_dma_buf_attach(
+ struct dma_buf *dma_buf, struct device *dev, int domain);
+
+int sde_smmu_map_dma_buf(struct dma_buf *dma_buf,
+ struct sg_table *table, int domain, dma_addr_t *iova,
+ unsigned long *size, int dir);
+
+void sde_smmu_unmap_dma_buf(struct sg_table *table, int domain,
+ int dir, struct dma_buf *dma_buf);
+
+#endif /* SDE_ROTATOR_SMMU_H */
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_sync.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_sync.c
new file mode 100644
index 000000000000..2990522735ab
--- /dev/null
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_sync.c
@@ -0,0 +1,277 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/slab.h>
+#include <linux/sync.h>
+#include <linux/sw_sync.h>
+
+#include "sde_rotator_util.h"
+#include "sde_rotator_sync.h"
+
+struct sde_rot_timeline {
+ struct mutex lock;
+ struct sw_sync_timeline *timeline;
+ u32 next_value;
+ char fence_name[32];
+};
+/*
+ * sde_rotator_create_timeline - Create timeline object with the given name
+ * @name: Pointer to name character string.
+ */
+struct sde_rot_timeline *sde_rotator_create_timeline(const char *name)
+{
+ char tl_name[32];
+ struct sde_rot_timeline *tl;
+
+ if (!name) {
+ SDEROT_ERR("invalid parameters\n");
+ return NULL;
+ }
+
+ tl = kzalloc(sizeof(struct sde_rot_timeline), GFP_KERNEL);
+ if (!tl)
+ return NULL;
+
+ snprintf(tl_name, sizeof(tl_name), "rot_timeline_%s", name);
+ SDEROT_DBG("timeline name=%s\n", tl_name);
+ tl->timeline = sw_sync_timeline_create(tl_name);
+ if (!tl->timeline) {
+ SDEROT_ERR("fail to allocate timeline\n");
+ kfree(tl);
+ return NULL;
+ }
+
+ snprintf(tl->fence_name, sizeof(tl->fence_name), "rot_fence_%s", name);
+ mutex_init(&tl->lock);
+ tl->next_value = 0;
+
+ return tl;
+}
+
+/*
+ * sde_rotator_destroy_timeline - Destroy the given timeline object
+ * @tl: Pointer to timeline object.
+ */
+void sde_rotator_destroy_timeline(struct sde_rot_timeline *tl)
+{
+ if (!tl) {
+ SDEROT_ERR("invalid parameters\n");
+ return;
+ }
+
+ if (tl->timeline)
+ sync_timeline_destroy((struct sync_timeline *) tl->timeline);
+
+ kfree(tl);
+}
+
+/*
+ * sde_rotator_resync_timeline - Resync timeline to last committed value
+ * @tl: Pointer to timeline object.
+ */
+void sde_rotator_resync_timeline(struct sde_rot_timeline *tl)
+{
+ int val;
+
+ if (!tl || !tl->timeline) {
+ SDEROT_ERR("invalid parameters\n");
+ return;
+ }
+ mutex_lock(&tl->lock);
+ val = tl->next_value - tl->timeline->value;
+ if (val > 0) {
+ SDEROT_WARN("flush %s:%d\n", tl->fence_name, val);
+ sw_sync_timeline_inc(tl->timeline, val);
+ }
+ mutex_unlock(&tl->lock);
+}
+
+/*
+ * sde_rotator_get_sync_fence - Create fence object from the given timeline
+ * @tl: Pointer to timeline object
+ * @fence_fd: Pointer to file descriptor associated with the returned fence.
+ * Null if not required.
+ * @timestamp: Pointer to timestamp of the returned fence. Null if not required.
+ */
+struct sde_rot_sync_fence *sde_rotator_get_sync_fence(
+ struct sde_rot_timeline *tl, int *fence_fd,
+ u32 *timestamp)
+{
+ u32 val;
+ struct sync_pt *sync_pt;
+ struct sync_fence *fence;
+
+ if (!tl || !tl->timeline) {
+ SDEROT_ERR("invalid parameters\n");
+ return NULL;
+ }
+
+ mutex_lock(&tl->lock);
+ val = tl->next_value + 1;
+
+ sync_pt = sw_sync_pt_create(tl->timeline, val);
+ if (sync_pt == NULL) {
+ SDEROT_ERR("cannot create sync point\n");
+ goto sync_pt_create_err;
+ }
+
+ /* create fence */
+ fence = sync_fence_create(tl->fence_name, sync_pt);
+ if (fence == NULL) {
+ SDEROT_ERR("%s: cannot create fence\n",
+ tl->fence_name);
+ goto sync_fence_create_err;
+ }
+
+ if (fence_fd) {
+ int fd = get_unused_fd_flags(0);
+
+ if (fd < 0) {
+ SDEROT_ERR("get_unused_fd_flags failed error:0x%x\n",
+ fd);
+ goto get_fd_err;
+ }
+
+ sync_fence_install(fence, fd);
+ *fence_fd = fd;
+ }
+
+ if (timestamp)
+ *timestamp = val;
+
+ tl->next_value++;
+ mutex_unlock(&tl->lock);
+ SDEROT_DBG("output sync point created at val=%u\n", val);
+
+ return (struct sde_rot_sync_fence *) fence;
+get_fd_err:
+ SDEROT_DBG("sys_fence_put c:%p\n", fence);
+ sync_fence_put(fence);
+sync_fence_create_err:
+ sync_pt_free(sync_pt);
+sync_pt_create_err:
+ mutex_unlock(&tl->lock);
+ return NULL;
+}
+
+/*
+ * sde_rotator_inc_timeline - Increment timeline by given amount
+ * @tl: Pointer to timeline object.
+ * @increment: the amount to increase the timeline by.
+ */
+int sde_rotator_inc_timeline(struct sde_rot_timeline *tl, int increment)
+{
+ if (!tl || !tl->timeline) {
+ SDEROT_ERR("invalid parameters\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&tl->lock);
+ sw_sync_timeline_inc(tl->timeline, increment);
+ mutex_unlock(&tl->lock);
+
+ return 0;
+}
+
+/*
+ * sde_rotator_get_timeline_commit_ts - Return commit tick of given timeline
+ * @tl: Pointer to timeline object.
+ */
+u32 sde_rotator_get_timeline_commit_ts(struct sde_rot_timeline *tl)
+{
+ if (!tl)
+ return 0;
+
+ return tl->next_value;
+}
+
+/*
+ * sde_rotator_get_timeline_retire_ts - Return retire tick of given timeline
+ * @tl: Pointer to timeline object.
+ */
+u32 sde_rotator_get_timeline_retire_ts(struct sde_rot_timeline *tl)
+{
+ if (!tl || !tl->timeline) {
+ SDEROT_ERR("invalid parameters\n");
+ return 0;
+ }
+
+ return tl->timeline->value;
+}
+
+/*
+ * sde_rotator_put_sync_fence - Destroy given fence object
+ * @fence: Pointer to fence object.
+ */
+void sde_rotator_put_sync_fence(struct sde_rot_sync_fence *fence)
+{
+ if (!fence) {
+ SDEROT_ERR("invalid parameters\n");
+ return;
+ }
+
+ sync_fence_put((struct sync_fence *) fence);
+}
+
+/*
+ * sde_rotator_wait_sync_fence - Wait until fence signal or timeout
+ * @fence: Pointer to fence object.
+ * @timeout: maximum wait time, in msec, for fence to signal.
+ */
+int sde_rotator_wait_sync_fence(struct sde_rot_sync_fence *fence,
+ long timeout)
+{
+ if (!fence)
+ return -EINVAL;
+
+ return sync_fence_wait((struct sync_fence *) fence, timeout);
+}
+
+/*
+ * sde_rotator_get_sync_fence_fd - Get fence object of given file descriptor
+ * @fd: File description of fence object.
+ */
+struct sde_rot_sync_fence *sde_rotator_get_fd_sync_fence(int fd)
+{
+ return (struct sde_rot_sync_fence *) sync_fence_fdget(fd);
+}
+
+/*
+ * sde_rotator_get_sync_fence_fd - Get file descriptor of given fence object
+ * @fence: Pointer to fence object.
+ */
+int sde_rotator_get_sync_fence_fd(struct sde_rot_sync_fence *fence)
+{
+ int fd;
+
+ if (!fence) {
+ SDEROT_ERR("invalid parameters\n");
+ return -EINVAL;
+ }
+
+ fd = get_unused_fd_flags(0);
+
+ if (fd < 0) {
+ SDEROT_ERR("fail to get unused fd\n");
+ return fd;
+ }
+
+ sync_fence_install((struct sync_fence *) fence, fd);
+
+ return fd;
+}
+
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_sync.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_sync.h
new file mode 100644
index 000000000000..651f7f857d54
--- /dev/null
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_sync.h
@@ -0,0 +1,115 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef SDE_ROTATOR_SYNC_H
+#define SDE_ROTATOR_SYNC_H
+
+#include <linux/types.h>
+#include <linux/errno.h>
+
+struct sde_rot_sync_fence;
+struct sde_rot_timeline;
+
+#if defined(CONFIG_SYNC) && defined(CONFIG_SW_SYNC)
+struct sde_rot_timeline *sde_rotator_create_timeline(const char *name);
+
+void sde_rotator_destroy_timeline(struct sde_rot_timeline *tl);
+
+struct sde_rot_sync_fence *sde_rotator_get_sync_fence(
+ struct sde_rot_timeline *tl, int *fence_fd, u32 *timestamp);
+
+void sde_rotator_resync_timeline(struct sde_rot_timeline *tl);
+
+u32 sde_rotator_get_timeline_commit_ts(struct sde_rot_timeline *tl);
+
+u32 sde_rotator_get_timeline_retire_ts(struct sde_rot_timeline *tl);
+
+int sde_rotator_inc_timeline(struct sde_rot_timeline *tl, int increment);
+
+void sde_rotator_put_sync_fence(struct sde_rot_sync_fence *fence);
+
+int sde_rotator_wait_sync_fence(struct sde_rot_sync_fence *fence,
+ long timeout);
+
+struct sde_rot_sync_fence *sde_rotator_get_fd_sync_fence(int fd);
+
+int sde_rotator_get_sync_fence_fd(struct sde_rot_sync_fence *fence);
+
+#else
+static inline
+struct sde_rot_timeline *sde_rotator_create_timeline(const char *name)
+{
+ return NULL;
+}
+
+static inline
+void sde_rotator_destroy_timeline(struct sde_rot_timeline *tl)
+{
+}
+
+static inline
+struct sde_rot_sync_fence *sde_rotator_get_sync_fence(
+ struct sde_rot_timeline *tl, int *fence_fd, u32 *timestamp)
+{
+ return NULL;
+}
+
+static inline
+void sde_rotator_resync_timeline(struct sde_rot_timeline *tl)
+{
+}
+
+static inline
+int sde_rotator_inc_timeline(struct sde_rot_timeline *tl, int increment)
+{
+ return 0;
+}
+
+static inline
+u32 sde_rotator_get_timeline_commit_ts(struct sde_rot_timeline *tl)
+{
+ return 0;
+}
+
+static inline
+u32 sde_rotator_get_timeline_retire_ts(struct sde_rot_timeline *tl)
+{
+ return 0;
+}
+
+static inline
+void sde_rotator_put_sync_fence(struct sde_rot_sync_fence *fence)
+{
+}
+
+static inline
+int sde_rotator_wait_sync_fence(struct sde_rot_sync_fence *fence,
+ long timeout)
+{
+ return 0;
+}
+
+static inline
+struct sde_rot_sync_fence *sde_rotator_get_fd_sync_fence(int fd)
+{
+ return NULL;
+}
+
+static inline
+int sde_rotator_get_sync_fence_fd(struct sde_rot_sync_fence *fence)
+{
+ return -EBADF;
+}
+#endif
+
+#endif /* SDE_ROTATOR_SYNC_H */
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_trace.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_trace.h
new file mode 100644
index 000000000000..5a20c2166d91
--- /dev/null
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_trace.h
@@ -0,0 +1,302 @@
+/* Copyright (c) 2014, 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#if !defined(TRACE_SDE_ROTATOR_H) || defined(TRACE_HEADER_MULTI_READ)
+#define TRACE_SDE_ROTATOR_H
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM sde_rotator
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE sde_rotator_trace
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(rot_entry_template,
+ TP_PROTO(u32 ss_id, u32 sq_id, u32 pr_id, u32 flags,
+ u32 src_fmt, u32 src_bw, u32 src_bh,
+ u32 src_x, u32 src_y, u32 src_w, u32 src_h,
+ u32 dst_fmt, u32 dst_bw, u32 dst_bh,
+ u32 dst_x, u32 dst_y, u32 dst_w, u32 dst_h),
+ TP_ARGS(ss_id, sq_id, pr_id, flags,
+ src_fmt, src_bw, src_bh, src_x, src_y, src_w, src_h,
+ dst_fmt, dst_bw, dst_bh, dst_x, dst_y, dst_w, dst_h),
+ TP_STRUCT__entry(
+ __field(u32, ss_id)
+ __field(u32, sq_id)
+ __field(u32, pr_id)
+ __field(u32, flags)
+ __field(u32, src_fmt)
+ __field(u16, src_bw)
+ __field(u16, src_bh)
+ __field(u16, src_x)
+ __field(u16, src_y)
+ __field(u16, src_w)
+ __field(u16, src_h)
+ __field(u32, dst_fmt)
+ __field(u16, dst_bw)
+ __field(u16, dst_bh)
+ __field(u16, dst_x)
+ __field(u16, dst_y)
+ __field(u16, dst_w)
+ __field(u16, dst_h)
+ ),
+ TP_fast_assign(
+ __entry->ss_id = ss_id;
+ __entry->sq_id = sq_id;
+ __entry->pr_id = pr_id;
+ __entry->flags = flags;
+ __entry->src_fmt = src_fmt;
+ __entry->src_bw = src_bw;
+ __entry->src_bh = src_bh;
+ __entry->src_x = src_x;
+ __entry->src_y = src_y;
+ __entry->src_w = src_w;
+ __entry->src_h = src_h;
+ __entry->dst_fmt = dst_fmt;
+ __entry->dst_bw = dst_bw;
+ __entry->dst_bh = dst_bh;
+ __entry->dst_x = dst_x;
+ __entry->dst_y = dst_y;
+ __entry->dst_w = dst_w;
+ __entry->dst_h = dst_h;
+ ),
+
+ TP_printk("%d.%d|%d|%x|%x|%u,%u|%u,%u,%u,%u|%x|%u,%u|%u,%u,%u,%u|",
+ __entry->ss_id, __entry->sq_id, __entry->pr_id,
+ __entry->flags,
+ __entry->src_fmt, __entry->src_bw, __entry->src_bh,
+ __entry->src_x, __entry->src_y,
+ __entry->src_w, __entry->src_h,
+ __entry->dst_fmt, __entry->dst_bw, __entry->dst_bh,
+ __entry->dst_x, __entry->dst_y,
+ __entry->dst_w, __entry->dst_h)
+);
+
+DEFINE_EVENT(rot_entry_template, rot_entry_fence,
+ TP_PROTO(u32 ss_id, u32 sq_id, u32 pr_id, u32 flags,
+ u32 src_fmt, u32 src_bw, u32 src_bh,
+ u32 src_x, u32 src_y, u32 src_w, u32 src_h,
+ u32 dst_fmt, u32 dst_bw, u32 dst_bh,
+ u32 dst_x, u32 dst_y, u32 dst_w, u32 dst_h),
+ TP_ARGS(ss_id, sq_id, pr_id, flags,
+ src_fmt, src_bw, src_bh, src_x, src_y, src_w, src_h,
+ dst_fmt, dst_bw, dst_bh, dst_x, dst_y, dst_w, dst_h)
+);
+
+DEFINE_EVENT(rot_entry_template, rot_entry_commit,
+ TP_PROTO(u32 ss_id, u32 sq_id, u32 pr_id, u32 flags,
+ u32 src_fmt, u32 src_bw, u32 src_bh,
+ u32 src_x, u32 src_y, u32 src_w, u32 src_h,
+ u32 dst_fmt, u32 dst_bw, u32 dst_bh,
+ u32 dst_x, u32 dst_y, u32 dst_w, u32 dst_h),
+ TP_ARGS(ss_id, sq_id, pr_id, flags,
+ src_fmt, src_bw, src_bh, src_x, src_y, src_w, src_h,
+ dst_fmt, dst_bw, dst_bh, dst_x, dst_y, dst_w, dst_h)
+);
+
+DEFINE_EVENT(rot_entry_template, rot_entry_done,
+ TP_PROTO(u32 ss_id, u32 sq_id, u32 pr_id, u32 flags,
+ u32 src_fmt, u32 src_bw, u32 src_bh,
+ u32 src_x, u32 src_y, u32 src_w, u32 src_h,
+ u32 dst_fmt, u32 dst_bw, u32 dst_bh,
+ u32 dst_x, u32 dst_y, u32 dst_w, u32 dst_h),
+ TP_ARGS(ss_id, sq_id, pr_id, flags,
+ src_fmt, src_bw, src_bh, src_x, src_y, src_w, src_h,
+ dst_fmt, dst_bw, dst_bh, dst_x, dst_y, dst_w, dst_h)
+);
+
+TRACE_EVENT(rot_perf_set_qos_luts,
+ TP_PROTO(u32 pnum, u32 fmt, u32 lut, bool linear),
+ TP_ARGS(pnum, fmt, lut, linear),
+ TP_STRUCT__entry(
+ __field(u32, pnum)
+ __field(u32, fmt)
+ __field(u32, lut)
+ __field(bool, linear)
+ ),
+ TP_fast_assign(
+ __entry->pnum = pnum;
+ __entry->fmt = fmt;
+ __entry->lut = lut;
+ __entry->linear = linear;
+ ),
+ TP_printk("pnum=%d fmt=%d lut=0x%x lin:%d",
+ __entry->pnum, __entry->fmt,
+ __entry->lut, __entry->linear)
+);
+
+TRACE_EVENT(rot_perf_set_panic_luts,
+ TP_PROTO(u32 pnum, u32 fmt, u32 mode, u32 panic_lut,
+ u32 robust_lut),
+ TP_ARGS(pnum, fmt, mode, panic_lut, robust_lut),
+ TP_STRUCT__entry(
+ __field(u32, pnum)
+ __field(u32, fmt)
+ __field(u32, mode)
+ __field(u32, panic_lut)
+ __field(u32, robust_lut)
+ ),
+ TP_fast_assign(
+ __entry->pnum = pnum;
+ __entry->fmt = fmt;
+ __entry->mode = mode;
+ __entry->panic_lut = panic_lut;
+ __entry->robust_lut = robust_lut;
+ ),
+ TP_printk("pnum=%d fmt=%d mode=%d luts[0x%x, 0x%x]",
+ __entry->pnum, __entry->fmt,
+ __entry->mode, __entry->panic_lut,
+ __entry->robust_lut)
+);
+
+TRACE_EVENT(rot_perf_set_wm_levels,
+ TP_PROTO(u32 pnum, u32 use_space, u32 priority_bytes, u32 wm0, u32 wm1,
+ u32 wm2, u32 mb_cnt, u32 mb_size),
+ TP_ARGS(pnum, use_space, priority_bytes, wm0, wm1, wm2, mb_cnt,
+ mb_size),
+ TP_STRUCT__entry(
+ __field(u32, pnum)
+ __field(u32, use_space)
+ __field(u32, priority_bytes)
+ __field(u32, wm0)
+ __field(u32, wm1)
+ __field(u32, wm2)
+ __field(u32, mb_cnt)
+ __field(u32, mb_size)
+ ),
+ TP_fast_assign(
+ __entry->pnum = pnum;
+ __entry->use_space = use_space;
+ __entry->priority_bytes = priority_bytes;
+ __entry->wm0 = wm0;
+ __entry->wm1 = wm1;
+ __entry->wm2 = wm2;
+ __entry->mb_cnt = mb_cnt;
+ __entry->mb_size = mb_size;
+ ),
+ TP_printk(
+ "pnum:%d useable_space:%d priority_bytes:%d watermark:[%d | %d | %d] nmb=%d mb_size=%d",
+ __entry->pnum, __entry->use_space,
+ __entry->priority_bytes, __entry->wm0, __entry->wm1,
+ __entry->wm2, __entry->mb_cnt, __entry->mb_size)
+);
+
+TRACE_EVENT(rot_perf_set_ot,
+ TP_PROTO(u32 pnum, u32 xin_id, u32 rd_lim),
+ TP_ARGS(pnum, xin_id, rd_lim),
+ TP_STRUCT__entry(
+ __field(u32, pnum)
+ __field(u32, xin_id)
+ __field(u32, rd_lim)
+ ),
+ TP_fast_assign(
+ __entry->pnum = pnum;
+ __entry->xin_id = xin_id;
+ __entry->rd_lim = rd_lim;
+ ),
+ TP_printk("pnum:%d xin_id:%d ot:%d",
+ __entry->pnum, __entry->xin_id, __entry->rd_lim)
+);
+
+TRACE_EVENT(rot_perf_prefill_calc,
+ TP_PROTO(u32 pnum, u32 latency_buf, u32 ot, u32 y_buf, u32 y_scaler,
+ u32 pp_lines, u32 pp_bytes, u32 post_sc, u32 fbc_bytes,
+ u32 prefill_bytes),
+ TP_ARGS(pnum, latency_buf, ot, y_buf, y_scaler, pp_lines, pp_bytes,
+ post_sc, fbc_bytes, prefill_bytes),
+ TP_STRUCT__entry(
+ __field(u32, pnum)
+ __field(u32, latency_buf)
+ __field(u32, ot)
+ __field(u32, y_buf)
+ __field(u32, y_scaler)
+ __field(u32, pp_lines)
+ __field(u32, pp_bytes)
+ __field(u32, post_sc)
+ __field(u32, fbc_bytes)
+ __field(u32, prefill_bytes)
+ ),
+ TP_fast_assign(
+ __entry->pnum = pnum;
+ __entry->latency_buf = latency_buf;
+ __entry->ot = ot;
+ __entry->y_buf = y_buf;
+ __entry->y_scaler = y_scaler;
+ __entry->pp_lines = pp_lines;
+ __entry->pp_bytes = pp_bytes;
+ __entry->post_sc = post_sc;
+ __entry->fbc_bytes = fbc_bytes;
+ __entry->prefill_bytes = prefill_bytes;
+ ),
+ TP_printk(
+ "pnum:%d latency_buf:%d ot:%d y_buf:%d y_scaler:%d pp_lines:%d, pp_bytes=%d post_sc:%d fbc_bytes:%d prefill:%d",
+ __entry->pnum, __entry->latency_buf, __entry->ot,
+ __entry->y_buf, __entry->y_scaler, __entry->pp_lines,
+ __entry->pp_bytes, __entry->post_sc,
+ __entry->fbc_bytes, __entry->prefill_bytes)
+);
+
+TRACE_EVENT(rot_mark_write,
+ TP_PROTO(int pid, const char *name, bool trace_begin),
+ TP_ARGS(pid, name, trace_begin),
+ TP_STRUCT__entry(
+ __field(int, pid)
+ __string(trace_name, name)
+ __field(bool, trace_begin)
+ ),
+ TP_fast_assign(
+ __entry->pid = pid;
+ __assign_str(trace_name, name);
+ __entry->trace_begin = trace_begin;
+ ),
+ TP_printk("%s|%d|%s", __entry->trace_begin ? "B" : "E",
+ __entry->pid, __get_str(trace_name))
+);
+
+TRACE_EVENT(rot_trace_counter,
+ TP_PROTO(int pid, char *name, s64 value),
+ TP_ARGS(pid, name, value),
+ TP_STRUCT__entry(
+ __field(int, pid)
+ __string(counter_name, name)
+ __field(s64, value)
+ ),
+ TP_fast_assign(
+ __entry->pid = current->tgid;
+ __assign_str(counter_name, name);
+ __entry->value = value;
+ ),
+ TP_printk("%d|%s|%lld", __entry->pid,
+ __get_str(counter_name), __entry->value)
+);
+
+TRACE_EVENT(rot_bw_ao_as_context,
+ TP_PROTO(u32 state),
+ TP_ARGS(state),
+ TP_STRUCT__entry(
+ __field(u32, state)
+ ),
+ TP_fast_assign(
+ __entry->state = state;
+ ),
+ TP_printk("Rotator bw context %s",
+ __entry->state ? "Active Only" : "Active+Sleep")
+
+);
+
+#endif /* if !defined(TRACE_SDE_ROTATOR_H) ||
+ defined(TRACE_HEADER_MULTI_READ) */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_util.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_util.c
new file mode 100644
index 000000000000..c8814815ab66
--- /dev/null
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_util.c
@@ -0,0 +1,980 @@
+/* Copyright (c) 2012, 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/file.h>
+#include <linux/msm_ion.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/major.h>
+#include <linux/dma-buf.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include <linux/regulator/consumer.h>
+#include <media/msm_media_info.h>
+#include <linux/videodev2.h>
+
+#include "sde_rotator_util.h"
+#include "sde_rotator_smmu.h"
+
+#define Y_TILEWIDTH 48
+#define Y_TILEHEIGHT 4
+#define UV_TILEWIDTH 48
+#define UV_TILEHEIGHT 8
+#define TILEWIDTH_SIZE 64
+#define TILEHEIGHT_SIZE 4
+
+void sde_mdp_get_v_h_subsample_rate(u8 chroma_sample,
+ u8 *v_sample, u8 *h_sample)
+{
+ switch (chroma_sample) {
+ case SDE_MDP_CHROMA_H2V1:
+ *v_sample = 1;
+ *h_sample = 2;
+ break;
+ case SDE_MDP_CHROMA_H1V2:
+ *v_sample = 2;
+ *h_sample = 1;
+ break;
+ case SDE_MDP_CHROMA_420:
+ *v_sample = 2;
+ *h_sample = 2;
+ break;
+ default:
+ *v_sample = 1;
+ *h_sample = 1;
+ break;
+ }
+}
+
+void sde_rot_intersect_rect(struct sde_rect *res_rect,
+ const struct sde_rect *dst_rect,
+ const struct sde_rect *sci_rect)
+{
+ int l = max(dst_rect->x, sci_rect->x);
+ int t = max(dst_rect->y, sci_rect->y);
+ int r = min((dst_rect->x + dst_rect->w), (sci_rect->x + sci_rect->w));
+ int b = min((dst_rect->y + dst_rect->h), (sci_rect->y + sci_rect->h));
+
+ if (r < l || b < t)
+ *res_rect = (struct sde_rect){0, 0, 0, 0};
+ else
+ *res_rect = (struct sde_rect){l, t, (r-l), (b-t)};
+}
+
+void sde_rot_crop_rect(struct sde_rect *src_rect,
+ struct sde_rect *dst_rect,
+ const struct sde_rect *sci_rect)
+{
+ struct sde_rect res;
+
+ sde_rot_intersect_rect(&res, dst_rect, sci_rect);
+
+ if (res.w && res.h) {
+ if ((res.w != dst_rect->w) || (res.h != dst_rect->h)) {
+ src_rect->x = src_rect->x + (res.x - dst_rect->x);
+ src_rect->y = src_rect->y + (res.y - dst_rect->y);
+ src_rect->w = res.w;
+ src_rect->h = res.h;
+ }
+ *dst_rect = (struct sde_rect)
+ {(res.x - sci_rect->x), (res.y - sci_rect->y),
+ res.w, res.h};
+ }
+}
+
+/*
+ * sde_rect_cmp() - compares two rects
+ * @rect1 - rect value to compare
+ * @rect2 - rect value to compare
+ *
+ * Returns 1 if the rects are same, 0 otherwise.
+ */
+int sde_rect_cmp(struct sde_rect *rect1, struct sde_rect *rect2)
+{
+ return rect1->x == rect2->x && rect1->y == rect2->y &&
+ rect1->w == rect2->w && rect1->h == rect2->h;
+}
+
+/*
+ * sde_rect_overlap_check() - compare two rects and check if they overlap
+ * @rect1 - rect value to compare
+ * @rect2 - rect value to compare
+ *
+ * Returns true if rects overlap, false otherwise.
+ */
+bool sde_rect_overlap_check(struct sde_rect *rect1, struct sde_rect *rect2)
+{
+ u32 rect1_left = rect1->x, rect1_right = rect1->x + rect1->w;
+ u32 rect1_top = rect1->y, rect1_bottom = rect1->y + rect1->h;
+ u32 rect2_left = rect2->x, rect2_right = rect2->x + rect2->w;
+ u32 rect2_top = rect2->y, rect2_bottom = rect2->y + rect2->h;
+
+ if ((rect1_right <= rect2_left) ||
+ (rect1_left >= rect2_right) ||
+ (rect1_bottom <= rect2_top) ||
+ (rect1_top >= rect2_bottom))
+ return false;
+
+ return true;
+}
+
+int sde_mdp_get_rau_strides(u32 w, u32 h,
+ struct sde_mdp_format_params *fmt,
+ struct sde_mdp_plane_sizes *ps)
+{
+ if (fmt->is_yuv) {
+ ps->rau_cnt = DIV_ROUND_UP(w, 64);
+ ps->ystride[0] = 64 * 4;
+ ps->rau_h[0] = 4;
+ ps->rau_h[1] = 2;
+ if (fmt->chroma_sample == SDE_MDP_CHROMA_H1V2)
+ ps->ystride[1] = 64 * 2;
+ else if (fmt->chroma_sample == SDE_MDP_CHROMA_H2V1) {
+ ps->ystride[1] = 32 * 4;
+ ps->rau_h[1] = 4;
+ } else
+ ps->ystride[1] = 32 * 2;
+
+ /* account for both chroma components */
+ ps->ystride[1] <<= 1;
+ } else if (fmt->fetch_planes == SDE_MDP_PLANE_INTERLEAVED) {
+ ps->rau_cnt = DIV_ROUND_UP(w, 32);
+ ps->ystride[0] = 32 * 4 * fmt->bpp;
+ ps->ystride[1] = 0;
+ ps->rau_h[0] = 4;
+ ps->rau_h[1] = 0;
+ } else {
+ SDEROT_ERR("Invalid format=%d\n", fmt->format);
+ return -EINVAL;
+ }
+
+ ps->ystride[0] *= ps->rau_cnt;
+ ps->ystride[1] *= ps->rau_cnt;
+ ps->num_planes = 2;
+
+ SDEROT_DBG("BWC rau_cnt=%d strides={%d,%d} heights={%d,%d}\n",
+ ps->rau_cnt, ps->ystride[0], ps->ystride[1],
+ ps->rau_h[0], ps->rau_h[1]);
+
+ return 0;
+}
+
+static int sde_mdp_get_ubwc_plane_size(struct sde_mdp_format_params *fmt,
+ u32 width, u32 height, struct sde_mdp_plane_sizes *ps)
+{
+ int rc = 0;
+
+ if (fmt->format == SDE_PIX_FMT_Y_CBCR_H2V2_UBWC) {
+ ps->num_planes = 4;
+ /* Y bitstream stride and plane size */
+ ps->ystride[0] = ALIGN(width, 128);
+ ps->plane_size[0] = ALIGN(ps->ystride[0] * ALIGN(height, 32),
+ 4096);
+
+ /* CbCr bitstream stride and plane size */
+ ps->ystride[1] = ALIGN(width, 64);
+ ps->plane_size[1] = ALIGN(ps->ystride[1] *
+ ALIGN(height / 2, 32), 4096);
+
+ /* Y meta data stride and plane size */
+ ps->ystride[2] = ALIGN(DIV_ROUND_UP(width, 32), 64);
+ ps->plane_size[2] = ALIGN(ps->ystride[2] *
+ ALIGN(DIV_ROUND_UP(height, 8), 16), 4096);
+
+ /* CbCr meta data stride and plane size */
+ ps->ystride[3] = ALIGN(DIV_ROUND_UP(width / 2, 16), 64);
+ ps->plane_size[3] = ALIGN(ps->ystride[3] *
+ ALIGN(DIV_ROUND_UP(height / 2, 8), 16), 4096);
+ } else if (fmt->format == SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC) {
+ u32 yWidth = sde_mdp_general_align(width, 192);
+ u32 yHeight = ALIGN(height, 16);
+ u32 uvWidth = sde_mdp_general_align(width, 192);
+ u32 uvHeight = ALIGN(height, 32);
+
+ ps->num_planes = 4;
+
+ /* Y bitstream stride and plane size */
+ ps->ystride[0] = yWidth * TILEWIDTH_SIZE / Y_TILEWIDTH;
+ ps->plane_size[0] = ALIGN(ps->ystride[0] *
+ (yHeight * TILEHEIGHT_SIZE / Y_TILEHEIGHT),
+ 4096);
+
+ /* CbCr bitstream stride and plane size */
+ ps->ystride[1] = uvWidth * TILEWIDTH_SIZE / UV_TILEWIDTH;
+ ps->plane_size[1] = ALIGN(ps->ystride[1] *
+ (uvHeight * TILEHEIGHT_SIZE / UV_TILEHEIGHT),
+ 4096);
+
+ /* Y meta data stride and plane size */
+ ps->ystride[2] = ALIGN(yWidth / Y_TILEWIDTH, 64);
+ ps->plane_size[2] = ALIGN(ps->ystride[2] *
+ ALIGN((yHeight / Y_TILEHEIGHT), 16), 4096);
+
+ /* CbCr meta data stride and plane size */
+ ps->ystride[3] = ALIGN(uvWidth / UV_TILEWIDTH, 64);
+ ps->plane_size[3] = ALIGN(ps->ystride[3] *
+ ALIGN((uvHeight / UV_TILEHEIGHT), 16), 4096);
+ } else if (fmt->format == SDE_PIX_FMT_RGBA_8888_UBWC ||
+ fmt->format == SDE_PIX_FMT_RGBX_8888_UBWC ||
+ fmt->format == SDE_PIX_FMT_RGBA_1010102_UBWC ||
+ fmt->format == SDE_PIX_FMT_RGBX_1010102_UBWC ||
+ fmt->format == SDE_PIX_FMT_RGB_565_UBWC) {
+ uint32_t stride_alignment, bpp, aligned_bitstream_width;
+
+ if (fmt->format == SDE_PIX_FMT_RGB_565_UBWC) {
+ stride_alignment = 128;
+ bpp = 2;
+ } else {
+ stride_alignment = 64;
+ bpp = 4;
+ }
+ ps->num_planes = 2;
+
+ /* RGB bitstream stride and plane size */
+ aligned_bitstream_width = ALIGN(width, stride_alignment);
+ ps->ystride[0] = aligned_bitstream_width * bpp;
+ ps->plane_size[0] = ALIGN(bpp * aligned_bitstream_width *
+ ALIGN(height, 16), 4096);
+
+ /* RGB meta data stride and plane size */
+ ps->ystride[2] = ALIGN(DIV_ROUND_UP(aligned_bitstream_width,
+ 16), 64);
+ ps->plane_size[2] = ALIGN(ps->ystride[2] *
+ ALIGN(DIV_ROUND_UP(height, 4), 16), 4096);
+ } else {
+ SDEROT_ERR("%s: UBWC format not supported for fmt:%d\n",
+ __func__, fmt->format);
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+int sde_mdp_get_plane_sizes(struct sde_mdp_format_params *fmt, u32 w, u32 h,
+ struct sde_mdp_plane_sizes *ps, u32 bwc_mode, bool rotation)
+{
+ int i, rc = 0;
+ u32 bpp;
+
+ if (ps == NULL)
+ return -EINVAL;
+
+ if ((w > SDE_ROT_MAX_IMG_WIDTH) || (h > SDE_ROT_MAX_IMG_HEIGHT))
+ return -ERANGE;
+
+ bpp = fmt->bpp;
+ memset(ps, 0, sizeof(struct sde_mdp_plane_sizes));
+
+ if (sde_mdp_is_ubwc_format(fmt)) {
+ rc = sde_mdp_get_ubwc_plane_size(fmt, w, h, ps);
+ } else if (bwc_mode) {
+ u32 height, meta_size;
+
+ rc = sde_mdp_get_rau_strides(w, h, fmt, ps);
+ if (rc)
+ return rc;
+
+ height = DIV_ROUND_UP(h, ps->rau_h[0]);
+ meta_size = DIV_ROUND_UP(ps->rau_cnt, 8);
+ ps->ystride[1] += meta_size;
+ ps->ystride[0] += ps->ystride[1] + meta_size;
+ ps->plane_size[0] = ps->ystride[0] * height;
+
+ ps->ystride[1] = 2;
+ ps->plane_size[1] = 2 * ps->rau_cnt * height;
+
+ SDEROT_DBG("BWC data stride=%d size=%d meta size=%d\n",
+ ps->ystride[0], ps->plane_size[0], ps->plane_size[1]);
+ } else {
+ if (fmt->fetch_planes == SDE_MDP_PLANE_INTERLEAVED) {
+ ps->num_planes = 1;
+ ps->plane_size[0] = w * h * bpp;
+ ps->ystride[0] = w * bpp;
+ } else if (fmt->format == SDE_PIX_FMT_Y_CBCR_H2V2_VENUS ||
+ fmt->format == SDE_PIX_FMT_Y_CRCB_H2V2_VENUS) {
+
+ int cf = (fmt->format == SDE_PIX_FMT_Y_CBCR_H2V2_VENUS)
+ ? COLOR_FMT_NV12 : COLOR_FMT_NV21;
+ ps->num_planes = 2;
+ ps->ystride[0] = VENUS_Y_STRIDE(cf, w);
+ ps->ystride[1] = VENUS_UV_STRIDE(cf, w);
+ ps->plane_size[0] = VENUS_Y_SCANLINES(cf, h) *
+ ps->ystride[0];
+ ps->plane_size[1] = VENUS_UV_SCANLINES(cf, h) *
+ ps->ystride[1];
+ } else if (fmt->format == SDE_PIX_FMT_Y_CBCR_H2V2_P010) {
+ /*
+ * |<---Y1--->000000<---Y0--->000000| Plane0
+ * |rrrrrrrrrr000000bbbbbbbbbb000000| Plane1
+ * |--------------------------------|
+ * 33222222222211111111110000000000 Bit
+ * 10987654321098765432109876543210 Location
+ */
+ ps->num_planes = 2;
+ ps->ystride[0] = w * 2;
+ ps->ystride[1] = w * 2;
+ ps->plane_size[0] = ps->ystride[0] * h;
+ ps->plane_size[1] = ps->ystride[1] * h / 2;
+ } else if (fmt->format == SDE_PIX_FMT_Y_CBCR_H2V2_TP10) {
+ u32 yWidth = sde_mdp_general_align(w, 192);
+ u32 yHeight = ALIGN(h, 16);
+ u32 uvWidth = sde_mdp_general_align(w, 192);
+ u32 uvHeight = (ALIGN(h, 32)) / 2;
+
+ ps->num_planes = 2;
+
+ ps->ystride[0] = (yWidth / 3) * 4;
+ ps->ystride[1] = (uvWidth / 3) * 4;
+ ps->plane_size[0] = ALIGN(ps->ystride[0] * yHeight,
+ 4096);
+ ps->plane_size[1] = ALIGN(ps->ystride[1] * uvHeight,
+ 4096);
+ } else {
+ u8 v_subsample, h_subsample, stride_align, height_align;
+ u32 chroma_samp;
+
+ chroma_samp = fmt->chroma_sample;
+
+ if (rotation) {
+ if (chroma_samp == SDE_MDP_CHROMA_H2V1)
+ chroma_samp = SDE_MDP_CHROMA_H1V2;
+ else if (chroma_samp == SDE_MDP_CHROMA_H1V2)
+ chroma_samp = SDE_MDP_CHROMA_H2V1;
+ }
+
+ sde_mdp_get_v_h_subsample_rate(chroma_samp,
+ &v_subsample, &h_subsample);
+
+ switch (fmt->format) {
+ case SDE_PIX_FMT_Y_CR_CB_GH2V2:
+ stride_align = 16;
+ height_align = 1;
+ break;
+ default:
+ stride_align = 1;
+ height_align = 1;
+ break;
+ }
+
+ ps->ystride[0] = ALIGN(w, stride_align);
+ ps->ystride[1] = ALIGN(w / h_subsample, stride_align);
+ ps->plane_size[0] = ps->ystride[0] *
+ ALIGN(h, height_align);
+ ps->plane_size[1] = ps->ystride[1] * (h / v_subsample);
+
+ if (fmt->fetch_planes == SDE_MDP_PLANE_PSEUDO_PLANAR) {
+ ps->num_planes = 2;
+ ps->plane_size[1] *= 2;
+ ps->ystride[1] *= 2;
+ } else { /* planar */
+ ps->num_planes = 3;
+ ps->plane_size[2] = ps->plane_size[1];
+ ps->ystride[2] = ps->ystride[1];
+ }
+ }
+ }
+
+ /* Safe to use MAX_PLANES as ps is memset at start of function */
+ for (i = 0; i < SDE_ROT_MAX_PLANES; i++)
+ ps->total_size += ps->plane_size[i];
+
+ return rc;
+}
+
+static int sde_mdp_ubwc_data_check(struct sde_mdp_data *data,
+ struct sde_mdp_plane_sizes *ps,
+ struct sde_mdp_format_params *fmt)
+{
+ int i, inc;
+ unsigned long data_size = 0;
+ dma_addr_t base_addr;
+
+ if (data->p[0].len == ps->plane_size[0])
+ goto end;
+
+ /* From this point, assumption is plane 0 is to be divided */
+ data_size = data->p[0].len;
+ if (data_size < ps->total_size) {
+ SDEROT_ERR(
+ "insufficient current mem len=%lu required mem len=%u\n",
+ data_size, ps->total_size);
+ return -ENOMEM;
+ }
+
+ base_addr = data->p[0].addr;
+
+ if ((fmt->format == SDE_PIX_FMT_Y_CBCR_H2V2_UBWC) ||
+ (fmt->format == SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC)) {
+ /************************************************/
+ /* UBWC ** */
+ /* buffer ** MDP PLANE */
+ /* format ** */
+ /************************************************/
+ /* ------------------- ** -------------------- */
+ /* | Y meta | ** | Y bitstream | */
+ /* | data | ** | plane | */
+ /* ------------------- ** -------------------- */
+ /* | Y bitstream | ** | CbCr bitstream | */
+ /* | data | ** | plane | */
+ /* ------------------- ** -------------------- */
+ /* | Cbcr metadata | ** | Y meta | */
+ /* | data | ** | plane | */
+ /* ------------------- ** -------------------- */
+ /* | CbCr bitstream | ** | CbCr meta | */
+ /* | data | ** | plane | */
+ /* ------------------- ** -------------------- */
+ /************************************************/
+
+ /* configure Y bitstream plane */
+ data->p[0].addr = base_addr + ps->plane_size[2];
+ data->p[0].len = ps->plane_size[0];
+
+ /* configure CbCr bitstream plane */
+ data->p[1].addr = base_addr + ps->plane_size[0]
+ + ps->plane_size[2] + ps->plane_size[3];
+ data->p[1].len = ps->plane_size[1];
+
+ /* configure Y metadata plane */
+ data->p[2].addr = base_addr;
+ data->p[2].len = ps->plane_size[2];
+
+ /* configure CbCr metadata plane */
+ data->p[3].addr = base_addr + ps->plane_size[0]
+ + ps->plane_size[2];
+ data->p[3].len = ps->plane_size[3];
+ } else {
+ /************************************************/
+ /* UBWC ** */
+ /* buffer ** MDP PLANE */
+ /* format ** */
+ /************************************************/
+ /* ------------------- ** -------------------- */
+ /* | RGB meta | ** | RGB bitstream | */
+ /* | data | ** | plane | */
+ /* ------------------- ** -------------------- */
+ /* | RGB bitstream | ** | NONE | */
+ /* | data | ** | | */
+ /* ------------------- ** -------------------- */
+ /* ** | RGB meta | */
+ /* ** | plane | */
+ /* ** -------------------- */
+ /************************************************/
+
+ /* configure RGB bitstream plane */
+ data->p[0].addr = base_addr + ps->plane_size[2];
+ data->p[0].len = ps->plane_size[0];
+
+ /* configure RGB metadata plane */
+ data->p[2].addr = base_addr;
+ data->p[2].len = ps->plane_size[2];
+ }
+ data->num_planes = ps->num_planes;
+
+end:
+ if (data->num_planes != ps->num_planes) {
+ SDEROT_ERR("num_planes don't match: fmt:%d, data:%d, ps:%d\n",
+ fmt->format, data->num_planes, ps->num_planes);
+ return -EINVAL;
+ }
+
+ inc = ((fmt->format == SDE_PIX_FMT_Y_CBCR_H2V2_UBWC) ? 1 : 2);
+ for (i = 0; i < SDE_ROT_MAX_PLANES; i += inc) {
+ if (data->p[i].len != ps->plane_size[i]) {
+ SDEROT_ERR(
+ "plane:%d fmt:%d, len does not match: data:%lu, ps:%d\n",
+ i, fmt->format, data->p[i].len,
+ ps->plane_size[i]);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+int sde_mdp_data_check(struct sde_mdp_data *data,
+ struct sde_mdp_plane_sizes *ps,
+ struct sde_mdp_format_params *fmt)
+{
+ struct sde_mdp_img_data *prev, *curr;
+ int i;
+
+ if (!ps)
+ return 0;
+
+ if (!data || data->num_planes == 0)
+ return -ENOMEM;
+
+ if (sde_mdp_is_ubwc_format(fmt))
+ return sde_mdp_ubwc_data_check(data, ps, fmt);
+
+ SDEROT_DBG("srcp0=%pa len=%lu frame_size=%u\n", &data->p[0].addr,
+ data->p[0].len, ps->total_size);
+
+ for (i = 0; i < ps->num_planes; i++) {
+ curr = &data->p[i];
+ if (i >= data->num_planes) {
+ u32 psize = ps->plane_size[i-1];
+
+ prev = &data->p[i-1];
+ if (prev->len > psize) {
+ curr->len = prev->len - psize;
+ prev->len = psize;
+ }
+ curr->addr = prev->addr + psize;
+ }
+ if (curr->len < ps->plane_size[i]) {
+ SDEROT_ERR("insufficient mem=%lu p=%d len=%u\n",
+ curr->len, i, ps->plane_size[i]);
+ return -ENOMEM;
+ }
+ SDEROT_DBG("plane[%d] addr=%pa len=%lu\n", i,
+ &curr->addr, curr->len);
+ }
+ data->num_planes = ps->num_planes;
+
+ return 0;
+}
+
+int sde_validate_offset_for_ubwc_format(
+ struct sde_mdp_format_params *fmt, u16 x, u16 y)
+{
+ int ret;
+ u16 micro_w = 0, micro_h = 0;
+
+ ret = sde_rot_get_ubwc_micro_dim(fmt->format, &micro_w, &micro_h);
+ if (ret || !micro_w || !micro_h) {
+ SDEROT_ERR("Could not get valid micro tile dimensions\n");
+ return -EINVAL;
+ }
+
+ if (x % (micro_w * UBWC_META_MACRO_W_H)) {
+ SDEROT_ERR("x=%d does not align with meta width=%d\n", x,
+ micro_w * UBWC_META_MACRO_W_H);
+ return -EINVAL;
+ }
+
+ if (y % (micro_h * UBWC_META_MACRO_W_H)) {
+ SDEROT_ERR("y=%d does not align with meta height=%d\n", y,
+ UBWC_META_MACRO_W_H);
+ return -EINVAL;
+ }
+ return ret;
+}
+
+/* x and y are assumednt to be valid, expected to line up with start of tiles */
+void sde_rot_ubwc_data_calc_offset(struct sde_mdp_data *data, u16 x, u16 y,
+ struct sde_mdp_plane_sizes *ps, struct sde_mdp_format_params *fmt)
+{
+ u16 macro_w, micro_w, micro_h;
+ u32 offset;
+ int ret;
+
+ ret = sde_rot_get_ubwc_micro_dim(fmt->format, &micro_w, &micro_h);
+ if (ret || !micro_w || !micro_h) {
+ SDEROT_ERR("Could not get valid micro tile dimensions\n");
+ return;
+ }
+ macro_w = 4 * micro_w;
+
+ if (fmt->format == SDE_PIX_FMT_Y_CBCR_H2V2_UBWC) {
+ u16 chroma_macro_w = macro_w / 2;
+ u16 chroma_micro_w = micro_w / 2;
+
+ /* plane 1 and 3 are chroma, with sub sample of 2 */
+ offset = y * ps->ystride[0] +
+ (x / macro_w) * 4096;
+ if (offset < data->p[0].len) {
+ data->p[0].addr += offset;
+ } else {
+ ret = 1;
+ goto done;
+ }
+
+ offset = y / 2 * ps->ystride[1] +
+ ((x / 2) / chroma_macro_w) * 4096;
+ if (offset < data->p[1].len) {
+ data->p[1].addr += offset;
+ } else {
+ ret = 2;
+ goto done;
+ }
+
+ offset = (y / micro_h) * ps->ystride[2] +
+ ((x / micro_w) / UBWC_META_MACRO_W_H) *
+ UBWC_META_BLOCK_SIZE;
+ if (offset < data->p[2].len) {
+ data->p[2].addr += offset;
+ } else {
+ ret = 3;
+ goto done;
+ }
+
+ offset = ((y / 2) / micro_h) * ps->ystride[3] +
+ (((x / 2) / chroma_micro_w) / UBWC_META_MACRO_W_H) *
+ UBWC_META_BLOCK_SIZE;
+ if (offset < data->p[3].len) {
+ data->p[3].addr += offset;
+ } else {
+ ret = 4;
+ goto done;
+ }
+ } else if (fmt->format == SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC) {
+ /* TODO: */
+ SDEROT_ERR("UBWC TP10 format not implemented yet");
+ ret = 1;
+ goto done;
+ } else {
+ offset = y * ps->ystride[0] +
+ (x / macro_w) * 4096;
+ if (offset < data->p[0].len) {
+ data->p[0].addr += offset;
+ } else {
+ ret = 1;
+ goto done;
+ }
+
+ offset = DIV_ROUND_UP(y, micro_h) * ps->ystride[2] +
+ ((x / micro_w) / UBWC_META_MACRO_W_H) *
+ UBWC_META_BLOCK_SIZE;
+ if (offset < data->p[2].len) {
+ data->p[2].addr += offset;
+ } else {
+ ret = 3;
+ goto done;
+ }
+ }
+
+done:
+ if (ret) {
+ WARN(1, "idx %d, offsets:%u too large for buflen%lu\n",
+ (ret - 1), offset, data->p[(ret - 1)].len);
+ }
+}
+
+void sde_rot_data_calc_offset(struct sde_mdp_data *data, u16 x, u16 y,
+ struct sde_mdp_plane_sizes *ps, struct sde_mdp_format_params *fmt)
+{
+ if ((x == 0) && (y == 0))
+ return;
+
+ if (sde_mdp_is_ubwc_format(fmt)) {
+ sde_rot_ubwc_data_calc_offset(data, x, y, ps, fmt);
+ return;
+ }
+
+ data->p[0].addr += y * ps->ystride[0];
+
+ if (data->num_planes == 1) {
+ data->p[0].addr += x * fmt->bpp;
+ } else {
+ u16 xoff, yoff;
+ u8 v_subsample, h_subsample;
+
+ sde_mdp_get_v_h_subsample_rate(fmt->chroma_sample,
+ &v_subsample, &h_subsample);
+
+ xoff = x / h_subsample;
+ yoff = y / v_subsample;
+
+ data->p[0].addr += x;
+ data->p[1].addr += xoff + (yoff * ps->ystride[1]);
+ if (data->num_planes == 2) /* pseudo planar */
+ data->p[1].addr += xoff;
+ else /* planar */
+ data->p[2].addr += xoff + (yoff * ps->ystride[2]);
+ }
+}
+
+static int sde_smmu_get_domain_type(u32 flags, bool rotator)
+{
+ int type;
+
+ if (flags & SDE_SECURE_OVERLAY_SESSION)
+ type = SDE_IOMMU_DOMAIN_ROT_SECURE;
+ else
+ type = SDE_IOMMU_DOMAIN_ROT_UNSECURE;
+
+ return type;
+}
+
+static int sde_mdp_put_img(struct sde_mdp_img_data *data, bool rotator,
+ int dir)
+{
+ u32 domain;
+
+ if (!IS_ERR_OR_NULL(data->srcp_dma_buf)) {
+ SDEROT_DBG("ion hdl=%p buf=0x%pa\n", data->srcp_dma_buf,
+ &data->addr);
+ if (data->mapped) {
+ domain = sde_smmu_get_domain_type(data->flags,
+ rotator);
+ sde_smmu_unmap_dma_buf(data->srcp_table,
+ domain, dir,
+ data->srcp_dma_buf);
+ data->mapped = false;
+ SDEROT_DBG("unmap %pad/%lx d:%u f:%x\n", &data->addr,
+ data->len, domain, data->flags);
+ }
+ if (!data->skip_detach) {
+ dma_buf_unmap_attachment(data->srcp_attachment,
+ data->srcp_table, dir);
+ dma_buf_detach(data->srcp_dma_buf,
+ data->srcp_attachment);
+ if (!(data->flags & SDE_ROT_EXT_DMA_BUF)) {
+ dma_buf_put(data->srcp_dma_buf);
+ data->srcp_dma_buf = NULL;
+ }
+ }
+ } else {
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int sde_mdp_get_img(struct sde_fb_data *img,
+ struct sde_mdp_img_data *data, struct device *dev,
+ bool rotator, int dir)
+{
+ int ret = -EINVAL;
+ unsigned long *len;
+ u32 domain;
+ dma_addr_t *start;
+
+ start = &data->addr;
+ len = &data->len;
+ data->flags |= img->flags;
+ data->offset = img->offset;
+ if (data->flags & SDE_ROT_EXT_DMA_BUF)
+ data->srcp_dma_buf = img->buffer;
+ else if (IS_ERR(data->srcp_dma_buf)) {
+ SDEROT_ERR("error on ion_import_fd\n");
+ ret = PTR_ERR(data->srcp_dma_buf);
+ data->srcp_dma_buf = NULL;
+ return ret;
+ }
+ domain = sde_smmu_get_domain_type(data->flags, rotator);
+
+ SDEROT_DBG("%d domain=%d ihndl=%p\n",
+ __LINE__, domain, data->srcp_dma_buf);
+ data->srcp_attachment =
+ sde_smmu_dma_buf_attach(data->srcp_dma_buf, dev,
+ domain);
+ if (IS_ERR(data->srcp_attachment)) {
+ SDEROT_ERR("%d Failed to attach dma buf\n", __LINE__);
+ ret = PTR_ERR(data->srcp_attachment);
+ goto err_put;
+ }
+
+ SDEROT_DBG("%d attach=%p\n", __LINE__, data->srcp_attachment);
+ data->srcp_table =
+ dma_buf_map_attachment(data->srcp_attachment, dir);
+ if (IS_ERR(data->srcp_table)) {
+ SDEROT_ERR("%d Failed to map attachment\n", __LINE__);
+ ret = PTR_ERR(data->srcp_table);
+ goto err_detach;
+ }
+
+ SDEROT_DBG("%d table=%p\n", __LINE__, data->srcp_table);
+ data->addr = 0;
+ data->len = 0;
+ data->mapped = false;
+ data->skip_detach = false;
+ /* return early, mapping will be done later */
+
+ return 0;
+err_detach:
+ dma_buf_detach(data->srcp_dma_buf, data->srcp_attachment);
+err_put:
+ if (!(data->flags & SDE_ROT_EXT_DMA_BUF)) {
+ dma_buf_put(data->srcp_dma_buf);
+ data->srcp_dma_buf = NULL;
+ }
+ return ret;
+}
+
+static int sde_mdp_map_buffer(struct sde_mdp_img_data *data, bool rotator,
+ int dir)
+{
+ int ret = -EINVAL;
+ int domain;
+
+ if (data->addr && data->len)
+ return 0;
+
+ if (!IS_ERR_OR_NULL(data->srcp_dma_buf)) {
+ domain = sde_smmu_get_domain_type(data->flags,
+ rotator);
+ ret = sde_smmu_map_dma_buf(data->srcp_dma_buf,
+ data->srcp_table, domain,
+ &data->addr, &data->len, dir);
+ if (IS_ERR_VALUE(ret)) {
+ SDEROT_ERR("smmu map dma buf failed: (%d)\n", ret);
+ goto err_unmap;
+ }
+ SDEROT_DBG("map %pad/%lx d:%u f:%x\n", &data->addr, data->len,
+ domain, data->flags);
+ data->mapped = true;
+ }
+
+ if (!data->addr) {
+ SDEROT_ERR("start address is zero!\n");
+ sde_mdp_put_img(data, rotator, dir);
+ return -ENOMEM;
+ }
+
+ if (!ret && (data->offset < data->len)) {
+ data->addr += data->offset;
+ data->len -= data->offset;
+
+ SDEROT_DBG("ihdl=%p buf=0x%pa len=0x%lx\n",
+ data->srcp_dma_buf, &data->addr, data->len);
+ } else {
+ sde_mdp_put_img(data, rotator, dir);
+ return ret ? : -EOVERFLOW;
+ }
+
+ return ret;
+
+err_unmap:
+ dma_buf_unmap_attachment(data->srcp_attachment, data->srcp_table, dir);
+ dma_buf_detach(data->srcp_dma_buf, data->srcp_attachment);
+ if (!(data->flags & SDE_ROT_EXT_DMA_BUF)) {
+ dma_buf_put(data->srcp_dma_buf);
+ data->srcp_dma_buf = NULL;
+ }
+ return ret;
+}
+
+static int sde_mdp_data_get(struct sde_mdp_data *data,
+ struct sde_fb_data *planes, int num_planes, u32 flags,
+ struct device *dev, bool rotator, int dir)
+{
+ int i, rc = 0;
+
+ if ((num_planes <= 0) || (num_planes > SDE_ROT_MAX_PLANES))
+ return -EINVAL;
+
+ for (i = 0; i < num_planes; i++) {
+ data->p[i].flags = flags;
+ rc = sde_mdp_get_img(&planes[i], &data->p[i], dev, rotator,
+ dir);
+ if (rc) {
+ SDEROT_ERR("failed to get buf p=%d flags=%x\n",
+ i, flags);
+ while (i > 0) {
+ i--;
+ sde_mdp_put_img(&data->p[i], rotator, dir);
+ }
+ break;
+ }
+ }
+
+ data->num_planes = i;
+
+ return rc;
+}
+
+int sde_mdp_data_map(struct sde_mdp_data *data, bool rotator, int dir)
+{
+ int i, rc = 0;
+
+ if (!data || !data->num_planes || data->num_planes > SDE_ROT_MAX_PLANES)
+ return -EINVAL;
+
+ for (i = 0; i < data->num_planes; i++) {
+ rc = sde_mdp_map_buffer(&data->p[i], rotator, dir);
+ if (rc) {
+ SDEROT_ERR("failed to map buf p=%d\n", i);
+ while (i > 0) {
+ i--;
+ sde_mdp_put_img(&data->p[i], rotator, dir);
+ }
+ break;
+ }
+ }
+
+ return rc;
+}
+
+void sde_mdp_data_free(struct sde_mdp_data *data, bool rotator, int dir)
+{
+ int i;
+
+ sde_smmu_ctrl(1);
+ for (i = 0; i < data->num_planes && data->p[i].len; i++)
+ sde_mdp_put_img(&data->p[i], rotator, dir);
+ sde_smmu_ctrl(0);
+
+ data->num_planes = 0;
+}
+
+int sde_mdp_data_get_and_validate_size(struct sde_mdp_data *data,
+ struct sde_fb_data *planes, int num_planes, u32 flags,
+ struct device *dev, bool rotator, int dir,
+ struct sde_layer_buffer *buffer)
+{
+ struct sde_mdp_format_params *fmt;
+ struct sde_mdp_plane_sizes ps;
+ int ret, i;
+ unsigned long total_buf_len = 0;
+
+ fmt = sde_get_format_params(buffer->format);
+ if (!fmt) {
+ SDEROT_ERR("Format %d not supported\n", buffer->format);
+ return -EINVAL;
+ }
+
+ ret = sde_mdp_data_get(data, planes, num_planes,
+ flags, dev, rotator, dir);
+ if (ret)
+ return ret;
+
+ sde_mdp_get_plane_sizes(fmt, buffer->width, buffer->height, &ps, 0, 0);
+
+ for (i = 0; i < num_planes ; i++) {
+ unsigned long plane_len = (data->p[i].srcp_dma_buf) ?
+ data->p[i].srcp_dma_buf->size : data->p[i].len;
+
+ if (plane_len < planes[i].offset) {
+ SDEROT_ERR("Offset=%d larger than buffer size=%lu\n",
+ planes[i].offset, plane_len);
+ ret = -EINVAL;
+ goto buf_too_small;
+ }
+ total_buf_len += plane_len - planes[i].offset;
+ }
+
+ if (total_buf_len < ps.total_size) {
+ SDEROT_ERR("Buffer size=%lu, expected size=%d\n",
+ total_buf_len,
+ ps.total_size);
+ ret = -EINVAL;
+ goto buf_too_small;
+ }
+ return 0;
+
+buf_too_small:
+ sde_mdp_data_free(data, rotator, dir);
+ return ret;
+}
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_util.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_util.h
new file mode 100644
index 000000000000..93074d64bf93
--- /dev/null
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_util.h
@@ -0,0 +1,196 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __SDE_ROTATOR_UTIL_H__
+#define __SDE_ROTATOR_UTIL_H__
+
+#include <linux/types.h>
+#include <linux/file.h>
+#include <linux/kref.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/dma-buf.h>
+
+#include "sde_rotator_hwio.h"
+#include "sde_rotator_base.h"
+#include "sde_rotator_sync.h"
+#include "sde_rotator_io_util.h"
+#include "sde_rotator_formats.h"
+
+#define SDE_ROT_MAX_IMG_WIDTH 0x3FFF
+#define SDE_ROT_MAX_IMG_HEIGHT 0x3FFF
+
+#define SDEROT_DBG(fmt, ...) pr_debug("<SDEROT_DBG> " fmt, ##__VA_ARGS__)
+#define SDEROT_INFO(fmt, ...) pr_info("<SDEROT_INFO> " fmt, ##__VA_ARGS__)
+#define SDEROT_INFO_ONCE(fmt, ...) \
+ pr_info_once("<SDEROT_INFO> " fmt, ##__VA_ARGS__)
+#define SDEROT_WARN(fmt, ...) pr_warn("<SDEROT_WARN> " fmt, ##__VA_ARGS__)
+#define SDEROT_ERR(fmt, ...) pr_err("<SDEROT_ERR> " fmt, ##__VA_ARGS__)
+#define SDEDEV_DBG(dev, fmt, ...) \
+ dev_dbg(dev, "<SDEROT_DBG> " fmt, ##__VA_ARGS__)
+#define SDEDEV_INFO(dev, fmt, ...) \
+ dev_info(dev, "<SDEROT_INFO> " fmt, ##__VA_ARGS__)
+#define SDEDEV_WARN(dev, fmt, ...) \
+ dev_warn(dev, "<SDEROT_WARN> " fmt, ##__VA_ARGS__)
+#define SDEDEV_ERR(dev, fmt, ...) \
+ dev_err(dev, "<SDEROT_ERR> " fmt, ##__VA_ARGS__)
+
+struct sde_rect {
+ u16 x;
+ u16 y;
+ u16 w;
+ u16 h;
+};
+
+/* sde flag values */
+#define SDE_ROT_NOP 0
+#define SDE_FLIP_LR 0x1
+#define SDE_FLIP_UD 0x2
+#define SDE_ROT_90 0x4
+#define SDE_ROT_180 (SDE_FLIP_UD|SDE_FLIP_LR)
+#define SDE_ROT_270 (SDE_ROT_90|SDE_FLIP_UD|SDE_FLIP_LR)
+#define SDE_DEINTERLACE 0x80000000
+#define SDE_SOURCE_ROTATED_90 0x00100000
+#define SDE_SECURE_OVERLAY_SESSION 0x00008000
+#define SDE_ROT_EXT_DMA_BUF 0x00010000
+
+struct sde_rot_data_type;
+
+struct sde_fb_data {
+ uint32_t offset;
+ struct dma_buf *buffer;
+ int memory_id;
+ int id;
+ uint32_t flags;
+ uint32_t priv;
+ uint32_t iova;
+};
+
+struct sde_layer_plane {
+ /* DMA buffer file descriptor information. */
+ int fd;
+ struct dma_buf *buffer;
+
+ /* Pixel offset in the dma buffer. */
+ uint32_t offset;
+
+ /* Number of bytes in one scan line including padding bytes. */
+ uint32_t stride;
+};
+
+struct sde_layer_buffer {
+ /* layer width in pixels. */
+ uint32_t width;
+
+ /* layer height in pixels. */
+ uint32_t height;
+
+ /*
+ * layer format in DRM-style fourcc, refer drm_fourcc.h for
+ * standard formats
+ */
+ uint32_t format;
+
+ /* plane to hold the fd, offset, etc for all color components */
+ struct sde_layer_plane planes[SDE_ROT_MAX_PLANES];
+
+ /* valid planes count in layer planes list */
+ uint32_t plane_count;
+
+ /* compression ratio factor, value depends on the pixel format */
+ struct sde_mult_factor comp_ratio;
+
+ /*
+ * SyncFence associated with this buffer. It is used in two ways.
+ *
+ * 1. Driver waits to consume the buffer till producer signals in case
+ * of primary and external display.
+ *
+ * 2. Writeback device uses buffer structure for output buffer where
+ * driver is producer. However, client sends the fence with buffer to
+ * indicate that consumer is still using the buffer and it is not ready
+ * for new content.
+ */
+ struct sde_rot_sync_fence *fence;
+};
+
+struct sde_mdp_plane_sizes {
+ u32 num_planes;
+ u32 plane_size[SDE_ROT_MAX_PLANES];
+ u32 total_size;
+ u32 ystride[SDE_ROT_MAX_PLANES];
+ u32 rau_cnt;
+ u32 rau_h[2];
+};
+
+struct sde_mdp_img_data {
+ dma_addr_t addr;
+ unsigned long len;
+ u32 offset;
+ u32 flags;
+ bool mapped;
+ bool skip_detach;
+ struct fd srcp_f;
+ struct dma_buf *srcp_dma_buf;
+ struct dma_buf_attachment *srcp_attachment;
+ struct sg_table *srcp_table;
+};
+
+enum sde_data_state {
+ SDE_BUF_STATE_UNUSED,
+ SDE_BUF_STATE_READY,
+ SDE_BUF_STATE_ACTIVE,
+ SDE_BUF_STATE_CLEANUP,
+};
+
+struct sde_mdp_data {
+ enum sde_data_state state;
+ u8 num_planes;
+ struct sde_mdp_img_data p[SDE_ROT_MAX_PLANES];
+ struct list_head buf_list;
+ struct list_head pipe_list;
+ struct list_head chunk_list;
+ u64 last_alloc;
+ u64 last_freed;
+};
+
+void sde_mdp_get_v_h_subsample_rate(u8 chroma_sample,
+ u8 *v_sample, u8 *h_sample);
+
+static inline u32 sde_mdp_general_align(u32 data, u32 alignment)
+{
+ return ((data + alignment - 1)/alignment) * alignment;
+}
+
+void sde_rot_data_calc_offset(struct sde_mdp_data *data, u16 x, u16 y,
+ struct sde_mdp_plane_sizes *ps, struct sde_mdp_format_params *fmt);
+
+int sde_validate_offset_for_ubwc_format(
+ struct sde_mdp_format_params *fmt, u16 x, u16 y);
+
+int sde_mdp_data_get_and_validate_size(struct sde_mdp_data *data,
+ struct sde_fb_data *planes, int num_planes, u32 flags,
+ struct device *dev, bool rotator, int dir,
+ struct sde_layer_buffer *buffer);
+
+int sde_mdp_get_plane_sizes(struct sde_mdp_format_params *fmt, u32 w, u32 h,
+ struct sde_mdp_plane_sizes *ps, u32 bwc_mode,
+ bool rotation);
+
+int sde_mdp_data_map(struct sde_mdp_data *data, bool rotator, int dir);
+
+int sde_mdp_data_check(struct sde_mdp_data *data,
+ struct sde_mdp_plane_sizes *ps,
+ struct sde_mdp_format_params *fmt);
+
+void sde_mdp_data_free(struct sde_mdp_data *data, bool rotator, int dir);
+#endif /* __SDE_ROTATOR_UTIL_H__ */
diff --git a/drivers/misc/hdcp.c b/drivers/misc/hdcp.c
index 5018a06c1e16..4a941b9f7600 100644
--- a/drivers/misc/hdcp.c
+++ b/drivers/misc/hdcp.c
@@ -493,7 +493,7 @@ static int hdcp_lib_enable_encryption(struct hdcp_lib_handle *handle)
pr_debug("success\n");
return 0;
error:
- if (!atomic_read(&handle->hdcp_off))
+ if (handle && !atomic_read(&handle->hdcp_off))
HDCP_LIB_EXECUTE(clean);
return rc;
@@ -727,6 +727,11 @@ static void hdcp_lib_stream(struct hdcp_lib_handle *handle)
return;
}
+ if (!handle->repeater_flag) {
+ pr_debug("invalid state, not a repeater\n");
+ return;
+ }
+
/* send command to TZ */
req_buf = (struct hdcp_query_stream_type_req *)handle->
qseecom_handle->sbuf;
diff --git a/drivers/video/fbdev/msm/Makefile b/drivers/video/fbdev/msm/Makefile
index fdeea34122d5..c32772babc71 100644
--- a/drivers/video/fbdev/msm/Makefile
+++ b/drivers/video/fbdev/msm/Makefile
@@ -24,6 +24,8 @@ obj-$(CONFIG_FB_MSM_MDSS) += mdss-mdp.o
obj-$(CONFIG_FB_MSM_MDSS) += mdss_mdp_debug.o
mdss-mdp-objs += mdss_mdp_pp_v1_7.o
+mdss-mdp-objs += mdss_mdp_pp_v3.o
+mdss-mdp-objs += mdss_mdp_pp_common.o
ifeq ($(CONFIG_FB_MSM_MDSS),y)
obj-$(CONFIG_DEBUG_FS) += mdss_debug.o mdss_debug_xlog.o
@@ -48,6 +50,7 @@ obj-$(CONFIG_FB_MSM_MDSS_HDMI_PANEL) += mdss_hdmi_tx.o
obj-$(CONFIG_FB_MSM_MDSS_HDMI_PANEL) += mdss_hdmi_hdcp.o
obj-$(CONFIG_FB_MSM_MDSS_HDMI_PANEL) += mdss_hdmi_hdcp2p2.o
obj-$(CONFIG_FB_MSM_MDSS_HDMI_PANEL) += mdss_hdmi_cec.o
+obj-$(CONFIG_FB_MSM_MDSS_HDMI_PANEL) += mdss_hdmi_audio.o
obj-$(CONFIG_FB_MSM_MDSS_HDMI_MHL_SII8334) += mhl_sii8334.o mhl_msc.o
obj-$(CONFIG_FB_MSM_MDSS_WRITEBACK) += mdss_wb.o
diff --git a/drivers/video/fbdev/msm/mdp3.c b/drivers/video/fbdev/msm/mdp3.c
index db50d5cd2054..fd22928353b4 100644
--- a/drivers/video/fbdev/msm/mdp3.c
+++ b/drivers/video/fbdev/msm/mdp3.c
@@ -15,6 +15,7 @@
#include <linux/clk.h>
#include <linux/debugfs.h>
+#include <linux/dma-buf.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/interrupt.h>
@@ -22,6 +23,7 @@
#include <linux/iommu.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
#include <linux/module.h>
@@ -45,22 +47,27 @@
#include <linux/qcom_iommu.h>
#include <linux/msm_iommu_domains.h>
+#include <linux/msm_dma_iommu_mapping.h>
+
#include "mdp3.h"
#include "mdss_fb.h"
#include "mdp3_hwio.h"
#include "mdp3_ctrl.h"
#include "mdp3_ppp.h"
#include "mdss_debug.h"
+#include "mdss_smmu.h"
+#include "mdss.h"
#ifndef EXPORT_COMPAT
#define EXPORT_COMPAT(x)
#endif
+#define AUTOSUSPEND_TIMEOUT_MS 100
#define MISR_POLL_SLEEP 2000
#define MISR_POLL_TIMEOUT 32000
#define MDP3_REG_CAPTURED_DSI_PCLK_MASK 1
-#define MDP_CORE_HW_VERSION 0x03050305
+#define MDP_CORE_HW_VERSION 0x03050306
struct mdp3_hw_resource *mdp3_res;
#define MDP_BUS_VECTOR_ENTRY(ab_val, ib_val) \
@@ -178,8 +185,7 @@ void mdp3_irq_enable(int type)
pr_debug("mdp3_irq_enable type=%d\n", type);
spin_lock_irqsave(&mdp3_res->irq_lock, flag);
- mdp3_res->irq_ref_count[type] += 1;
- if (mdp3_res->irq_ref_count[type] > 1) {
+ if (mdp3_res->irq_ref_count[type] > 0) {
pr_debug("interrupt %d already enabled\n", type);
spin_unlock_irqrestore(&mdp3_res->irq_lock, flag);
return;
@@ -188,6 +194,7 @@ void mdp3_irq_enable(int type)
mdp3_res->irq_mask |= BIT(type);
MDP3_REG_WRITE(MDP3_REG_INTR_ENABLE, mdp3_res->irq_mask);
+ mdp3_res->irq_ref_count[type] += 1;
spin_unlock_irqrestore(&mdp3_res->irq_lock, flag);
}
@@ -371,7 +378,7 @@ int mdp3_bus_scale_set_quota(int client, u64 ab_quota, u64 ib_quota)
for (i = 0; i < MDP3_CLIENT_MAX; i++) {
total_ab += bus_handle->ab[i];
- total_ib += bus_handle->ab[i];
+ total_ib += bus_handle->ib[i];
}
if ((total_ab | total_ib) == 0) {
@@ -487,7 +494,8 @@ int mdp3_clk_set_rate(int clk_type, unsigned long clk_rate,
if (ret)
pr_err("clk_set_rate failed ret=%d\n", ret);
else
- pr_debug("mdp clk rate=%lu\n", rounded_rate);
+ pr_debug("mdp clk rate=%lu, client = %d\n",
+ rounded_rate, client);
}
mutex_unlock(&mdp3_res->res_mutex);
} else {
@@ -585,18 +593,63 @@ static void mdp3_clk_remove(void)
}
+u64 mdp3_clk_round_off(u64 clk_rate)
+{
+ u64 clk_round_off = 0;
+
+ if (clk_rate <= MDP_CORE_CLK_RATE_SVS)
+ clk_round_off = MDP_CORE_CLK_RATE_SVS;
+ else if (clk_rate <= MDP_CORE_CLK_RATE_SUPER_SVS)
+ clk_round_off = MDP_CORE_CLK_RATE_SUPER_SVS;
+ else
+ clk_round_off = MDP_CORE_CLK_RATE_MAX;
+
+ pr_debug("clk = %llu rounded to = %llu\n",
+ clk_rate, clk_round_off);
+ return clk_round_off;
+}
+
int mdp3_clk_enable(int enable, int dsi_clk)
{
- int rc;
+ int rc = 0;
+ int changed = 0;
pr_debug("MDP CLKS %s\n", (enable ? "Enable" : "Disable"));
mutex_lock(&mdp3_res->res_mutex);
+
+ if (enable) {
+ if (mdp3_res->clk_ena == 0)
+ changed++;
+ mdp3_res->clk_ena++;
+ } else {
+ if (mdp3_res->clk_ena) {
+ mdp3_res->clk_ena--;
+ if (mdp3_res->clk_ena == 0)
+ changed++;
+ } else {
+ pr_err("Can not be turned off\n");
+ }
+ }
+ pr_debug("%s: clk_ena=%d changed=%d enable=%d\n",
+ __func__, mdp3_res->clk_ena, changed, enable);
+
+ if (changed) {
+ if (enable)
+ pm_runtime_get_sync(&mdp3_res->pdev->dev);
+
rc = mdp3_clk_update(MDP3_CLK_AHB, enable);
rc |= mdp3_clk_update(MDP3_CLK_AXI, enable);
rc |= mdp3_clk_update(MDP3_CLK_MDP_SRC, enable);
rc |= mdp3_clk_update(MDP3_CLK_MDP_CORE, enable);
rc |= mdp3_clk_update(MDP3_CLK_VSYNC, enable);
+
+ if (!enable) {
+ pm_runtime_mark_last_busy(&mdp3_res->pdev->dev);
+ pm_runtime_put_autosuspend(&mdp3_res->pdev->dev);
+ }
+ }
+
mutex_unlock(&mdp3_res->res_mutex);
return rc;
}
@@ -606,7 +659,7 @@ void mdp3_bus_bw_iommu_enable(int enable, int client)
struct mdp3_bus_handle_map *bus_handle;
int client_idx;
u64 ab = 0, ib = 0;
- int ref_cnt, i;
+ int ref_cnt;
client_idx = MDP3_BUS_HANDLE;
@@ -619,27 +672,53 @@ void mdp3_bus_bw_iommu_enable(int enable, int client)
if (enable)
bus_handle->ref_cnt++;
else
- bus_handle->ref_cnt--;
+ if (bus_handle->ref_cnt)
+ bus_handle->ref_cnt--;
ref_cnt = bus_handle->ref_cnt;
mutex_unlock(&mdp3_res->res_mutex);
- if (enable && ref_cnt == 1) {
+ if (enable) {
if (mdp3_res->allow_iommu_update)
- mdp3_iommu_enable();
- for (i = 0; i < MDP3_CLIENT_MAX; i++) {
- ab += bus_handle->restore_ab[i];
- ib += bus_handle->restore_ib[i];
- }
+ mdp3_iommu_enable(client);
+ if (ref_cnt == 1) {
+ pm_runtime_get_sync(&mdp3_res->pdev->dev);
+ ab = bus_handle->restore_ab[client];
+ ib = bus_handle->restore_ib[client];
mdp3_bus_scale_set_quota(client, ab, ib);
- } else if (!enable && ref_cnt == 0) {
- mdp3_bus_scale_set_quota(client, 0, 0);
- mdp3_iommu_disable();
- } else if (ref_cnt < 0) {
+ }
+ } else {
+ if (ref_cnt == 0) {
+ mdp3_bus_scale_set_quota(client, 0, 0);
+ pm_runtime_mark_last_busy(&mdp3_res->pdev->dev);
+ pm_runtime_put_autosuspend(&mdp3_res->pdev->dev);
+ }
+ mdp3_iommu_disable(client);
+ }
+
+ if (ref_cnt < 0) {
pr_err("Ref count < 0, bus client=%d, ref_cnt=%d",
client_idx, ref_cnt);
}
}
+void mdp3_calc_dma_res(struct mdss_panel_info *panel_info, u64 *clk_rate,
+ u64 *ab, u64 *ib, uint32_t bpp)
+{
+ u32 vtotal = mdss_panel_get_vtotal(panel_info);
+ u32 htotal = mdss_panel_get_htotal(panel_info, 0);
+ u64 clk = htotal * vtotal * panel_info->mipi.frame_rate;
+
+ pr_debug("clk_rate for dma = %llu, bpp = %d\n", clk, bpp);
+ if (clk_rate)
+ *clk_rate = mdp3_clk_round_off(clk);
+
+ /* ab and ib vote should be same for honest voting */
+ if (ab || ib) {
+ *ab = clk * bpp;
+ *ib = *ab;
+ }
+}
+
int mdp3_res_update(int enable, int dsi_clk, int client)
{
int rc = 0;
@@ -681,6 +760,7 @@ int mdp3_get_mdp_dsi_clk(void)
int mdp3_put_mdp_dsi_clk(void)
{
int rc;
+
mutex_lock(&mdp3_res->res_mutex);
rc = mdp3_clk_update(MDP3_CLK_DSI, 0);
mutex_unlock(&mdp3_res->res_mutex);
@@ -701,7 +781,7 @@ static int mdp3_irq_setup(void)
pr_err("mdp request_irq() failed!\n");
return ret;
}
- mdp3_res->mdss_util->disable_irq_nosync(&mdp3_res->mdp3_hw);
+ disable_irq_nosync(mdp3_hw->irq_info->irq);
mdp3_res->irq_registered = true;
return 0;
}
@@ -782,7 +862,7 @@ int mdp3_iommu_domain_init(void)
layout.client_name = mdp3_iommu_domains[i].client_name;
layout.partitions = mdp3_iommu_domains[i].partitions;
layout.npartitions = mdp3_iommu_domains[i].npartitions;
- layout.is_secure = false;
+ layout.is_secure = (i == MDP3_IOMMU_DOMAIN_SECURE);
domain_idx = msm_register_domain(&layout);
if (IS_ERR_VALUE(domain_idx))
@@ -870,24 +950,23 @@ static int mdp3_check_version(void)
{
int rc;
- rc = mdp3_clk_update(MDP3_CLK_AHB, 1);
- rc |= mdp3_clk_update(MDP3_CLK_AXI, 1);
- rc |= mdp3_clk_update(MDP3_CLK_MDP_CORE, 1);
- if (rc)
+ rc = mdp3_clk_enable(1, 0);
+ if (rc) {
+ pr_err("fail to turn on MDP core clks\n");
return rc;
+ }
mdp3_res->mdp_rev = MDP3_REG_READ(MDP3_REG_HW_VERSION);
- rc = mdp3_clk_update(MDP3_CLK_AHB, 0);
- rc |= mdp3_clk_update(MDP3_CLK_AXI, 0);
- rc |= mdp3_clk_update(MDP3_CLK_MDP_CORE, 0);
- if (rc)
- pr_err("fail to turn off the MDP3_CLK_AHB clk\n");
-
if (mdp3_res->mdp_rev != MDP_CORE_HW_VERSION) {
pr_err("mdp_hw_revision=%x mismatch\n", mdp3_res->mdp_rev);
rc = -ENODEV;
}
+
+ rc = mdp3_clk_enable(0, 0);
+ if (rc)
+ pr_err("fail to turn off MDP core clks\n");
+
return rc;
}
@@ -900,9 +979,11 @@ static int mdp3_hw_init(void)
mdp3_res->dma[i].capability = MDP3_DMA_CAP_ALL;
mdp3_res->dma[i].in_use = 0;
mdp3_res->dma[i].available = 1;
+ mdp3_res->dma[i].cc_vect_sel = 0;
mdp3_res->dma[i].lut_sts = 0;
mdp3_res->dma[i].hist_cmap = NULL;
mdp3_res->dma[i].gc_cmap = NULL;
+ mutex_init(&mdp3_res->dma[i].pp_lock);
}
mdp3_res->dma[MDP3_DMA_S].capability = MDP3_DMA_CAP_DITHER;
mdp3_res->dma[MDP3_DMA_E].available = 0;
@@ -915,10 +996,121 @@ static int mdp3_hw_init(void)
}
mdp3_res->intf[MDP3_DMA_OUTPUT_SEL_AHB].available = 0;
mdp3_res->intf[MDP3_DMA_OUTPUT_SEL_LCDC].available = 0;
-
+ mdp3_res->smart_blit_en = SMART_BLIT_RGB_EN | SMART_BLIT_YUV_EN;
+ mdp3_res->solid_fill_vote_en = false;
return 0;
}
+int mdp3_dynamic_clock_gating_ctrl(int enable)
+{
+ int rc = 0;
+ int cgc_cfg = 0;
+ /*Disable dynamic auto clock gating*/
+ pr_debug("%s Status %s\n", __func__, (enable ? "ON":"OFF"));
+ rc = mdp3_clk_enable(1, 0);
+ if (rc) {
+ pr_err("fail to turn on MDP core clks\n");
+ return rc;
+ }
+ cgc_cfg = MDP3_REG_READ(MDP3_REG_CGC_EN);
+ if (enable) {
+ cgc_cfg |= (BIT(10));
+ cgc_cfg |= (BIT(18));
+ MDP3_REG_WRITE(MDP3_REG_CGC_EN, cgc_cfg);
+ VBIF_REG_WRITE(MDP3_VBIF_REG_FORCE_EN, 0x0);
+ } else {
+ cgc_cfg &= ~(BIT(10));
+ cgc_cfg &= ~(BIT(18));
+ MDP3_REG_WRITE(MDP3_REG_CGC_EN, cgc_cfg);
+ VBIF_REG_WRITE(MDP3_VBIF_REG_FORCE_EN, 0x3);
+ }
+
+ rc = mdp3_clk_enable(0, 0);
+ if (rc)
+ pr_err("fail to turn off MDP core clks\n");
+
+ return rc;
+}
+
+/**
+ * mdp3_get_panic_lut_cfg() - calculate panic and robust lut mask
+ * @panel_width: Panel width
+ *
+ * DMA buffer has 16 fill levels. Which needs to configured as safe
+ * and panic levels based on panel resolutions.
+ * No. of fill levels used = ((panel active width * 8) / 512).
+ * Roundoff the fill levels if needed.
+ * half of the total fill levels used will be treated as panic levels.
+ * Roundoff panic levels if total used fill levels are odd.
+ *
+ * Sample calculation for 720p display:
+ * Fill levels used = (720 * 8) / 512 = 12.5 after round off 13.
+ * panic levels = 13 / 2 = 6.5 after roundoff 7.
+ * Panic mask = 0x3FFF (2 bits per level)
+ * Robust mask = 0xFF80 (1 bit per level)
+ */
+u64 mdp3_get_panic_lut_cfg(u32 panel_width)
+{
+ u32 fill_levels = (((panel_width * 8) / 512) + 1);
+ u32 panic_mask = 0;
+ u32 robust_mask = 0;
+ u32 i = 0;
+ u64 panic_config = 0;
+ u32 panic_levels = 0;
+
+ panic_levels = fill_levels / 2;
+ if (fill_levels % 2)
+ panic_levels++;
+
+ for (i = 0; i < panic_levels; i++) {
+ panic_mask |= (BIT((i * 2) + 1) | BIT(i * 2));
+ robust_mask |= BIT(i);
+ }
+ panic_config = ~robust_mask;
+ panic_config = panic_config << 32;
+ panic_config |= panic_mask;
+ return panic_config;
+}
+
+int mdp3_qos_remapper_setup(struct mdss_panel_data *panel)
+{
+ int rc = 0;
+ u64 panic_config = mdp3_get_panic_lut_cfg(panel->panel_info.xres);
+
+ rc = mdp3_clk_update(MDP3_CLK_AHB, 1);
+ rc |= mdp3_clk_update(MDP3_CLK_AXI, 1);
+ rc |= mdp3_clk_update(MDP3_CLK_MDP_CORE, 1);
+ if (rc) {
+ pr_err("fail to turn on MDP core clks\n");
+ return rc;
+ }
+
+ if (!panel)
+ return -EINVAL;
+ /* Program MDP QOS Remapper */
+ MDP3_REG_WRITE(MDP3_DMA_P_QOS_REMAPPER, 0x1A9);
+ MDP3_REG_WRITE(MDP3_DMA_P_WATERMARK_0, 0x0);
+ MDP3_REG_WRITE(MDP3_DMA_P_WATERMARK_1, 0x0);
+ MDP3_REG_WRITE(MDP3_DMA_P_WATERMARK_2, 0x0);
+ /* PANIC setting depends on panel width*/
+ MDP3_REG_WRITE(MDP3_PANIC_LUT0, (panic_config & 0xFFFF));
+ MDP3_REG_WRITE(MDP3_PANIC_LUT1, ((panic_config >> 16) & 0xFFFF));
+ MDP3_REG_WRITE(MDP3_ROBUST_LUT, ((panic_config >> 32) & 0xFFFF));
+ MDP3_REG_WRITE(MDP3_PANIC_ROBUST_CTRL, 0x1);
+ pr_debug("Panel width %d Panic Lut0 %x Lut1 %x Robust %x\n",
+ panel->panel_info.xres,
+ MDP3_REG_READ(MDP3_PANIC_LUT0),
+ MDP3_REG_READ(MDP3_PANIC_LUT1),
+ MDP3_REG_READ(MDP3_ROBUST_LUT));
+
+ rc = mdp3_clk_update(MDP3_CLK_AHB, 0);
+ rc |= mdp3_clk_update(MDP3_CLK_AXI, 0);
+ rc |= mdp3_clk_update(MDP3_CLK_MDP_CORE, 0);
+ if (rc)
+ pr_err("fail to turn off MDP core clks\n");
+ return rc;
+}
+
static int mdp3_res_init(void)
{
int rc = 0;
@@ -958,12 +1150,14 @@ static int mdp3_res_init(void)
static void mdp3_res_deinit(void)
{
struct mdss_hw *mdp3_hw;
+ int i;
mdp3_hw = &mdp3_res->mdp3_hw;
mdp3_bus_scale_unregister();
mutex_lock(&mdp3_res->iommu_lock);
- mdp3_iommu_dettach(MDP3_IOMMU_CTX_MDP_0);
+ for (i = 0; i < MDP3_IOMMU_CTX_MAX; i++)
+ mdp3_iommu_dettach(i);
mutex_unlock(&mdp3_res->iommu_lock);
mdp3_iommu_deinit();
@@ -1154,6 +1348,24 @@ static int mdp3_parse_dt(struct platform_device *pdev)
(int) res->start,
(int) mdp3_res->mdp_base);
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vbif_phys");
+ if (!res) {
+ pr_err("unable to get VBIF base address\n");
+ return -EINVAL;
+ }
+
+ mdp3_res->vbif_reg_size = resource_size(res);
+ mdp3_res->vbif_base = devm_ioremap(&pdev->dev, res->start,
+ mdp3_res->vbif_reg_size);
+ if (unlikely(!mdp3_res->vbif_base)) {
+ pr_err("unable to map VBIF base\n");
+ return -ENOMEM;
+ }
+
+ pr_debug("VBIF HW Base phy_Address=0x%x virt=0x%x\n",
+ (int) res->start,
+ (int) mdp3_res->vbif_base);
+
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res) {
pr_err("unable to get MDSS irq\n");
@@ -1181,6 +1393,9 @@ static int mdp3_parse_dt(struct platform_device *pdev)
pdev->dev.of_node, "qcom,mdss-has-panic-ctrl");
mdp3_res->dma[MDP3_DMA_P].has_panic_ctrl = panic_ctrl;
+ mdp3_res->idle_pc_enabled = of_property_read_bool(
+ pdev->dev.of_node, "qcom,mdss-idle-power-collapse-enabled");
+
return 0;
}
@@ -1270,39 +1485,364 @@ void mdp3_enable_regulator(int enable)
mdp3_batfet_ctrl(enable);
}
-int mdp3_put_img(struct mdp3_img_data *data)
+static void mdp3_iommu_heap_unmap_iommu(struct mdp3_iommu_meta *meta)
+{
+ unsigned int domain_num;
+ unsigned int partition_num = 0;
+ struct iommu_domain *domain;
+
+ domain_num = (mdp3_res->domains +
+ MDP3_IOMMU_DOMAIN_UNSECURE)->domain_idx;
+ domain = msm_get_iommu_domain(domain_num);
+
+ if (!domain) {
+ pr_err("Could not get domain %d. Corruption?\n", domain_num);
+ return;
+ }
+
+ iommu_unmap_range(domain, meta->iova_addr, meta->mapped_size);
+ msm_free_iova_address(meta->iova_addr, domain_num, partition_num,
+ meta->mapped_size);
+}
+
+static void mdp3_iommu_meta_destroy(struct kref *kref)
+{
+ struct mdp3_iommu_meta *meta =
+ container_of(kref, struct mdp3_iommu_meta, ref);
+
+ rb_erase(&meta->node, &mdp3_res->iommu_root);
+ mdp3_iommu_heap_unmap_iommu(meta);
+ dma_buf_put(meta->dbuf);
+ kfree(meta);
+}
+
+
+static void mdp3_iommu_meta_put(struct mdp3_iommu_meta *meta)
+{
+ /* Need to lock here to prevent race against map/unmap */
+ mutex_lock(&mdp3_res->iommu_lock);
+ kref_put(&meta->ref, mdp3_iommu_meta_destroy);
+ mutex_unlock(&mdp3_res->iommu_lock);
+}
+
+static struct mdp3_iommu_meta *mdp3_iommu_meta_lookup(struct sg_table *table)
+{
+ struct rb_root *root = &mdp3_res->iommu_root;
+ struct rb_node **p = &root->rb_node;
+ struct rb_node *parent = NULL;
+ struct mdp3_iommu_meta *entry = NULL;
+
+ while (*p) {
+ parent = *p;
+ entry = rb_entry(parent, struct mdp3_iommu_meta, node);
+
+ if (table < entry->table)
+ p = &(*p)->rb_left;
+ else if (table > entry->table)
+ p = &(*p)->rb_right;
+ else
+ return entry;
+ }
+ return NULL;
+}
+
+void mdp3_unmap_iommu(struct ion_client *client, struct ion_handle *handle)
+{
+ struct mdp3_iommu_meta *meta;
+ struct sg_table *table;
+
+ table = ion_sg_table(client, handle);
+
+ mutex_lock(&mdp3_res->iommu_lock);
+ meta = mdp3_iommu_meta_lookup(table);
+ if (!meta) {
+ WARN(1, "%s: buffer was never mapped for %p\n", __func__,
+ handle);
+ mutex_unlock(&mdp3_res->iommu_lock);
+ return;
+ }
+ mutex_unlock(&mdp3_res->iommu_lock);
+
+ mdp3_iommu_meta_put(meta);
+}
+
+static void mdp3_iommu_meta_add(struct mdp3_iommu_meta *meta)
+{
+ struct rb_root *root = &mdp3_res->iommu_root;
+ struct rb_node **p = &root->rb_node;
+ struct rb_node *parent = NULL;
+ struct mdp3_iommu_meta *entry;
+
+ while (*p) {
+ parent = *p;
+ entry = rb_entry(parent, struct mdp3_iommu_meta, node);
+
+ if (meta->table < entry->table) {
+ p = &(*p)->rb_left;
+ } else if (meta->table > entry->table) {
+ p = &(*p)->rb_right;
+ } else {
+ pr_err("%s: handle %p already exists\n", __func__,
+ entry->handle);
+ BUG();
+ }
+ }
+
+ rb_link_node(&meta->node, parent, p);
+ rb_insert_color(&meta->node, root);
+}
+
+static int mdp3_iommu_map_iommu(struct mdp3_iommu_meta *meta,
+ unsigned long align, unsigned long iova_length,
+ unsigned int padding, unsigned long flags)
+{
+ struct iommu_domain *domain;
+ int ret = 0;
+ unsigned long size;
+ unsigned long unmap_size;
+ struct sg_table *table;
+ int prot = IOMMU_WRITE | IOMMU_READ;
+ unsigned int domain_num = (mdp3_res->domains +
+ MDP3_IOMMU_DOMAIN_UNSECURE)->domain_idx;
+ unsigned int partition_num = 0;
+
+ size = meta->size;
+ table = meta->table;
+
+ /* Use the biggest alignment to allow bigger IOMMU mappings.
+ * Use the first entry since the first entry will always be the
+ * biggest entry. To take advantage of bigger mapping sizes both the
+ * VA and PA addresses have to be aligned to the biggest size.
+ */
+ if (table->sgl->length > align)
+ align = table->sgl->length;
+
+ ret = msm_allocate_iova_address(domain_num, partition_num,
+ meta->mapped_size, align,
+ (unsigned long *)&meta->iova_addr);
+
+ if (ret)
+ goto out;
+
+ domain = msm_get_iommu_domain(domain_num);
+
+ if (!domain) {
+ ret = -ENOMEM;
+ goto out1;
+ }
+
+ /* Adding padding to before buffer */
+ if (padding) {
+ unsigned long phys_addr = sg_phys(table->sgl);
+
+ ret = msm_iommu_map_extra(domain, meta->iova_addr, phys_addr,
+ padding, SZ_4K, prot);
+ if (ret)
+ goto out1;
+ }
+
+ /* Mapping actual buffer */
+ ret = iommu_map_range(domain, meta->iova_addr + padding,
+ table->sgl, size, prot);
+ if (ret) {
+ pr_err("%s: could not map %pa in domain %p\n",
+ __func__, &meta->iova_addr, domain);
+ unmap_size = padding;
+ goto out2;
+ }
+
+ /* Adding padding to end of buffer */
+ if (padding) {
+ unsigned long phys_addr = sg_phys(table->sgl);
+ unsigned long extra_iova_addr = meta->iova_addr +
+ padding + size;
+ ret = msm_iommu_map_extra(domain, extra_iova_addr, phys_addr,
+ padding, SZ_4K, prot);
+ if (ret) {
+ unmap_size = padding + size;
+ goto out2;
+ }
+ }
+ return ret;
+
+out2:
+ iommu_unmap_range(domain, meta->iova_addr, unmap_size);
+out1:
+ msm_free_iova_address(meta->iova_addr, domain_num, partition_num,
+ iova_length);
+
+out:
+ return ret;
+}
+
+static struct mdp3_iommu_meta *mdp3_iommu_meta_create(struct ion_client *client,
+ struct ion_handle *handle, struct sg_table *table, unsigned long size,
+ unsigned long align, unsigned long iova_length, unsigned int padding,
+ unsigned long flags, dma_addr_t *iova)
+{
+ struct mdp3_iommu_meta *meta;
+ int ret;
+
+ meta = kzalloc(sizeof(*meta), GFP_KERNEL);
+
+ if (!meta)
+ return ERR_PTR(-ENOMEM);
+
+ meta->handle = handle;
+ meta->table = table;
+ meta->size = size;
+ meta->mapped_size = iova_length;
+ meta->dbuf = ion_share_dma_buf(client, handle);
+ kref_init(&meta->ref);
+
+ ret = mdp3_iommu_map_iommu(meta,
+ align, iova_length, padding, flags);
+ if (ret < 0) {
+ pr_err("%s: Unable to map buffer\n", __func__);
+ goto out;
+ }
+
+ *iova = meta->iova_addr;
+ mdp3_iommu_meta_add(meta);
+
+ return meta;
+out:
+ kfree(meta);
+ return ERR_PTR(ret);
+}
+
+/*
+ * PPP hw reads in tiles of 16 which might be outside mapped region
+ * need to map buffers ourseleve to add extra padding
+ */
+int mdp3_self_map_iommu(struct ion_client *client, struct ion_handle *handle,
+ unsigned long align, unsigned long padding, dma_addr_t *iova,
+ unsigned long *buffer_size, unsigned long flags,
+ unsigned long iommu_flags)
+{
+ struct mdp3_iommu_meta *iommu_meta = NULL;
+ struct sg_table *table;
+ struct scatterlist *sg;
+ unsigned long size = 0, iova_length = 0;
+ int ret = 0;
+ int i;
+
+ table = ion_sg_table(client, handle);
+ if (IS_ERR_OR_NULL(table))
+ return PTR_ERR(table);
+
+ for_each_sg(table->sgl, sg, table->nents, i)
+ size += sg->length;
+
+ padding = PAGE_ALIGN(padding);
+
+ /* Adding 16 lines padding before and after buffer */
+ iova_length = size + 2 * padding;
+
+ if (size & ~PAGE_MASK) {
+ pr_debug("%s: buffer size %lx is not aligned to %lx",
+ __func__, size, PAGE_SIZE);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (iova_length & ~PAGE_MASK) {
+ pr_debug("%s: iova_length %lx is not aligned to %lx",
+ __func__, iova_length, PAGE_SIZE);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ mutex_lock(&mdp3_res->iommu_lock);
+ iommu_meta = mdp3_iommu_meta_lookup(table);
+
+ if (!iommu_meta) {
+ iommu_meta = mdp3_iommu_meta_create(client, handle, table, size,
+ align, iova_length, padding, flags, iova);
+ if (!IS_ERR_OR_NULL(iommu_meta)) {
+ iommu_meta->flags = iommu_flags;
+ ret = 0;
+ } else {
+ ret = PTR_ERR(iommu_meta);
+ goto out_unlock;
+ }
+ } else {
+ if (iommu_meta->flags != iommu_flags) {
+ pr_err("%s: hndl %p already mapped with diff flag\n",
+ __func__, handle);
+ ret = -EINVAL;
+ goto out_unlock;
+ } else if (iommu_meta->mapped_size != iova_length) {
+ pr_err("%s: hndl %p already mapped with diff len\n",
+ __func__, handle);
+ ret = -EINVAL;
+ goto out_unlock;
+ } else {
+ kref_get(&iommu_meta->ref);
+ *iova = iommu_meta->iova_addr;
+ }
+ }
+ BUG_ON(iommu_meta->size != size);
+ mutex_unlock(&mdp3_res->iommu_lock);
+
+ *iova = *iova + padding;
+ *buffer_size = size;
+ return ret;
+
+out_unlock:
+ mutex_unlock(&mdp3_res->iommu_lock);
+out:
+ mdp3_iommu_meta_put(iommu_meta);
+ return ret;
+}
+
+int mdp3_put_img(struct mdp3_img_data *data, int client)
{
struct ion_client *iclient = mdp3_res->ion_client;
int dom = (mdp3_res->domains + MDP3_IOMMU_DOMAIN_UNSECURE)->domain_idx;
+ int dir = DMA_BIDIRECTIONAL;
- if (data->flags & MDP_MEMORY_ID_TYPE_FB) {
+ if (data->flags & MDP_MEMORY_ID_TYPE_FB) {
pr_info("mdp3_put_img fb mem buf=0x%pa\n", &data->addr);
fdput(data->srcp_f);
memset(&data->srcp_f, 0, sizeof(struct fd));
- } else if (!IS_ERR_OR_NULL(data->srcp_ihdl)) {
- ion_unmap_iommu(iclient, data->srcp_ihdl, dom, 0);
- ion_free(iclient, data->srcp_ihdl);
- data->srcp_ihdl = NULL;
+ } else if (!IS_ERR_OR_NULL(data->srcp_dma_buf)) {
+ pr_debug("ion hdl = %p buf=0x%pa\n", data->srcp_dma_buf,
+ &data->addr);
+ if (!iclient) {
+ pr_err("invalid ion client\n");
+ return -ENOMEM;
+ }
+ if (data->mapped) {
+ mdss_smmu_unmap_dma_buf(data->srcp_table,
+ dom, dir,
+ data->srcp_dma_buf);
+ data->mapped = false;
+ }
+ if (!data->skip_detach) {
+ dma_buf_unmap_attachment(data->srcp_attachment,
+ data->srcp_table,
+ mdss_smmu_dma_data_direction(dir));
+ dma_buf_detach(data->srcp_dma_buf,
+ data->srcp_attachment);
+ dma_buf_put(data->srcp_dma_buf);
+ data->srcp_dma_buf = NULL;
+ }
} else {
return -EINVAL;
}
return 0;
}
-int mdp3_get_img(struct msmfb_data *img, struct mdp3_img_data *data)
+int mdp3_get_img(struct msmfb_data *img, struct mdp3_img_data *data, int client)
{
struct fd f;
int ret = -EINVAL;
int fb_num;
- unsigned long *len;
- dma_addr_t *start;
struct ion_client *iclient = mdp3_res->ion_client;
int dom = (mdp3_res->domains + MDP3_IOMMU_DOMAIN_UNSECURE)->domain_idx;
- start = &data->addr;
- len = (unsigned long *) &data->len;
data->flags = img->flags;
- data->p_need = 0;
if (img->flags & MDP_MEMORY_ID_TYPE_FB) {
f = fdget(img->memory_id);
@@ -1313,7 +1853,8 @@ int mdp3_get_img(struct msmfb_data *img, struct mdp3_img_data *data)
}
if (MAJOR(f.file->f_dentry->d_inode->i_rdev) == FB_MAJOR) {
fb_num = MINOR(f.file->f_dentry->d_inode->i_rdev);
- ret = mdss_fb_get_phys_info(start, len, fb_num);
+ ret = mdss_fb_get_phys_info(&data->addr,
+ &data->len, fb_num);
if (ret) {
pr_err("mdss_fb_get_phys_info() failed\n");
fdput(f);
@@ -1328,125 +1869,126 @@ int mdp3_get_img(struct msmfb_data *img, struct mdp3_img_data *data)
if (!ret)
goto done;
} else if (iclient) {
- data->srcp_ihdl = ion_import_dma_buf(iclient, img->memory_id);
- if (IS_ERR_OR_NULL(data->srcp_ihdl)) {
- pr_err("error on ion_import_fd\n");
- if (!data->srcp_ihdl)
- ret = -EINVAL;
- else
- ret = PTR_ERR(data->srcp_ihdl);
- data->srcp_ihdl = NULL;
- return ret;
- }
- ret = ion_map_iommu(iclient, data->srcp_ihdl, dom,
- 0, SZ_4K, 0, start, len, 0, 0);
- if (IS_ERR_VALUE(ret)) {
- ion_free(iclient, data->srcp_ihdl);
- pr_err("failed to map ion handle (%d)\n", ret);
- return ret;
- }
+ data->srcp_dma_buf = dma_buf_get(img->memory_id);
+ if (IS_ERR(data->srcp_dma_buf)) {
+ pr_err("DMA : error on ion_import_fd\n");
+ ret = PTR_ERR(data->srcp_dma_buf);
+ data->srcp_dma_buf = NULL;
+ return ret;
+ }
+
+ data->srcp_attachment =
+ mdss_smmu_dma_buf_attach(data->srcp_dma_buf,
+ &mdp3_res->pdev->dev, dom);
+ if (IS_ERR(data->srcp_attachment)) {
+ ret = PTR_ERR(data->srcp_attachment);
+ goto err_put;
+ }
+
+ data->srcp_table =
+ dma_buf_map_attachment(data->srcp_attachment,
+ mdss_smmu_dma_data_direction(DMA_BIDIRECTIONAL));
+ if (IS_ERR(data->srcp_table)) {
+ ret = PTR_ERR(data->srcp_table);
+ goto err_detach;
+ }
+
+ ret = mdss_smmu_map_dma_buf(data->srcp_dma_buf,
+ data->srcp_table, dom,
+ &data->addr, &data->len, DMA_BIDIRECTIONAL);
+
+ if (IS_ERR_VALUE(ret)) {
+ pr_err("smmu map dma buf failed: (%d)\n", ret);
+ goto err_unmap;
+ }
+
+ data->mapped = true;
+ data->skip_detach = false;
}
done:
if (!ret && (img->offset < data->len)) {
data->addr += img->offset;
data->len -= img->offset;
- pr_debug("mem=%d ihdl=%p buf=0x%pa len=0x%x\n", img->memory_id,
- data->srcp_ihdl, &data->addr, data->len);
+ pr_debug("mem=%d ihdl=%p buf=0x%pa len=0x%lx\n",
+ img->memory_id, data->srcp_dma_buf,
+ &data->addr, data->len);
+
} else {
- mdp3_put_img(data);
+ mdp3_put_img(data, client);
return -EINVAL;
}
+ return ret;
+err_detach:
+ dma_buf_detach(data->srcp_dma_buf, data->srcp_attachment);
+err_put:
+ dma_buf_put(data->srcp_dma_buf);
+ return ret;
+err_unmap:
+ dma_buf_unmap_attachment(data->srcp_attachment, data->srcp_table,
+ mdss_smmu_dma_data_direction(DMA_BIDIRECTIONAL));
+ dma_buf_detach(data->srcp_dma_buf, data->srcp_attachment);
+ dma_buf_put(data->srcp_dma_buf);
return ret;
+
}
-int mdp3_iommu_enable()
+int mdp3_iommu_enable(int client)
{
- int i, rc = 0;
+ int rc = 0;
mutex_lock(&mdp3_res->iommu_lock);
+
if (mdp3_res->iommu_ref_cnt == 0) {
- for (i = 0; i < MDP3_IOMMU_CTX_MAX; i++) {
- rc = mdp3_iommu_attach(i);
- if (rc) {
- WARN(1, "IOMMU attach failed for ctx: %d\n", i);
- for (i--; i >= 0; i--)
- mdp3_iommu_dettach(i);
- }
- }
+ rc = mdss_smmu_attach(mdss_res);
+ if (rc)
+ rc = mdss_smmu_detach(mdss_res);
}
if (!rc)
mdp3_res->iommu_ref_cnt++;
mutex_unlock(&mdp3_res->iommu_lock);
+ pr_debug("client :%d total_ref_cnt: %d\n",
+ client, mdp3_res->iommu_ref_cnt);
return rc;
}
-int mdp3_iommu_disable()
+int mdp3_iommu_disable(int client)
{
- int i, rc = 0;
+ int rc = 0;
mutex_lock(&mdp3_res->iommu_lock);
if (mdp3_res->iommu_ref_cnt) {
mdp3_res->iommu_ref_cnt--;
- if (mdp3_res->iommu_ref_cnt == 0) {
- for (i = 0; i < MDP3_IOMMU_CTX_MAX; i++)
- rc = mdp3_iommu_dettach(i);
- }
+
+ pr_debug("client :%d total_ref_cnt: %d\n",
+ client, mdp3_res->iommu_ref_cnt);
+ if (mdp3_res->iommu_ref_cnt == 0)
+ rc = mdss_smmu_detach(mdss_res);
} else {
- pr_err("iommu ref count unbalanced\n");
+ pr_err("iommu ref count unbalanced for client %d\n", client);
}
mutex_unlock(&mdp3_res->iommu_lock);
return rc;
}
-int mdp3_panel_get_intf_status(u32 disp_num, u32 intf_type)
-{
- int rc = 0, status = 0;
-
- if (intf_type != MDSS_PANEL_INTF_DSI)
- return 0;
-
- mdp3_clk_update(MDP3_CLK_AHB, 1);
- mdp3_clk_update(MDP3_CLK_AXI, 1);
- mdp3_clk_update(MDP3_CLK_MDP_CORE, 1);
-
- status = (MDP3_REG_READ(MDP3_REG_DMA_P_CONFIG) & 0x180000);
- /* DSI video mode or command mode */
- rc = (status == 0x180000) || (status == 0x080000);
-
- mdp3_clk_update(MDP3_CLK_AHB, 0);
- mdp3_clk_update(MDP3_CLK_AXI, 0);
- mdp3_clk_update(MDP3_CLK_MDP_CORE, 0);
-
- return rc;
-}
-
int mdp3_iommu_ctrl(int enable)
{
int rc;
+ if (mdp3_res->allow_iommu_update == false)
+ return 0;
+
if (enable)
- rc = mdp3_iommu_enable();
+ rc = mdp3_iommu_enable(MDP3_CLIENT_DSI);
else
- rc = mdp3_iommu_disable();
+ rc = mdp3_iommu_disable(MDP3_CLIENT_DSI);
return rc;
}
-int mdp3_iommu_is_attached()
-{
- struct mdp3_iommu_ctx_map *context_map;
-
- if (!mdp3_res->iommu_contexts)
- return 0;
-
- context_map = mdp3_res->iommu_contexts + MDP3_IOMMU_CTX_MDP_0;
- return context_map->attached;
-}
-
static int mdp3_init(struct msm_fb_data_type *mfd)
{
int rc;
@@ -1479,57 +2021,124 @@ u32 mdp3_fb_stride(u32 fb_index, u32 xres, int bpp)
return xres * bpp;
}
+__ref int mdp3_parse_dt_splash(struct msm_fb_data_type *mfd)
+{
+ struct platform_device *pdev = mfd->pdev;
+ int len = 0, rc = 0;
+ u32 offsets[2];
+ struct device_node *pnode, *child_node;
+ struct property *prop = NULL;
+
+ mfd->splash_info.splash_logo_enabled =
+ of_property_read_bool(pdev->dev.of_node,
+ "qcom,mdss-fb-splash-logo-enabled");
+
+ prop = of_find_property(pdev->dev.of_node, "qcom,memblock-reserve",
+ &len);
+ if (!prop) {
+ pr_debug("Read memblock reserve settings for fb failed\n");
+ pr_debug("Read cont-splash-memory settings\n");
+ }
+
+ if (len) {
+ len = len / sizeof(u32);
+
+ rc = of_property_read_u32_array(pdev->dev.of_node,
+ "qcom,memblock-reserve", offsets, len);
+ if (rc) {
+ pr_err("error reading mem reserve settings for fb\n");
+ rc = -EINVAL;
+ goto error;
+ }
+ } else {
+ child_node = of_get_child_by_name(pdev->dev.of_node,
+ "qcom,cont-splash-memory");
+ if (!child_node) {
+ pr_err("splash mem child node is not present\n");
+ rc = -EINVAL;
+ goto error;
+ }
+
+ pnode = of_parse_phandle(child_node, "linux,contiguous-region",
+ 0);
+ if (pnode != NULL) {
+ const u32 *addr;
+ u64 size;
+
+ addr = of_get_address(pnode, 0, &size, NULL);
+ if (!addr) {
+ pr_err("failed to parse the splash memory address\n");
+ of_node_put(pnode);
+ rc = -EINVAL;
+ goto error;
+ }
+ offsets[0] = (u32) of_read_ulong(addr, 2);
+ offsets[1] = (u32) size;
+ of_node_put(pnode);
+ } else {
+ pr_err("mem reservation for splash screen fb not present\n");
+ rc = -EINVAL;
+ goto error;
+ }
+ }
+
+ if (!memblock_is_reserved(offsets[0])) {
+ pr_debug("failed to reserve memory for fb splash\n");
+ rc = -EINVAL;
+ goto error;
+ }
+
+ mfd->fbi->fix.smem_start = offsets[0];
+ mfd->fbi->fix.smem_len = offsets[1];
+ mdp3_res->splash_mem_addr = mfd->fbi->fix.smem_start;
+ mdp3_res->splash_mem_size = mfd->fbi->fix.smem_len;
+
+error:
+ if (rc && mfd->panel_info->cont_splash_enabled)
+ pr_err("no rsvd mem found in DT for splash screen\n");
+ else
+ rc = 0;
+
+ return rc;
+}
+
static int mdp3_alloc(struct msm_fb_data_type *mfd)
{
int ret;
int dom;
void *virt;
- unsigned long phys;
- u32 offsets[2];
+ phys_addr_t phys;
size_t size;
- struct platform_device *pdev = mfd->pdev;
mfd->fbi->screen_base = NULL;
mfd->fbi->fix.smem_start = 0;
mfd->fbi->fix.smem_len = 0;
- ret = of_property_read_u32_array(pdev->dev.of_node,
- "qcom,memblock-reserve", offsets, 2);
-
- if (ret) {
- pr_err("fail to parse splash memory address\n");
- return ret;
- }
+ mdp3_parse_dt_splash(mfd);
- phys = offsets[0];
- size = PAGE_ALIGN(mfd->fbi->fix.line_length *
- mfd->fbi->var.yres_virtual);
+ size = mfd->fbi->fix.smem_len;
- if (size > offsets[1]) {
- pr_err("reserved splash memory size too small\n");
- return -EINVAL;
- }
+ dom = mdp3_res->domains[MDP3_IOMMU_DOMAIN_UNSECURE].domain_idx;
- virt = phys_to_virt(phys);
- if (unlikely(!virt)) {
- pr_err("unable to map in splash memory\n");
+ ret = mdss_smmu_dma_alloc_coherent(&mdp3_res->pdev->dev, size,
+ &phys, &mfd->iova, &virt, GFP_KERNEL, dom);
+ if (ret) {
+ pr_err("unable to alloc fbmem size=%zx\n", size);
return -ENOMEM;
}
- dom = mdp3_res->domains[MDP3_IOMMU_DOMAIN_UNSECURE].domain_idx;
- ret = msm_iommu_map_contig_buffer(phys, dom, 0, size, SZ_4K, 0,
- &mfd->iova);
-
- if (ret) {
- pr_err("fail to map to IOMMU %d\n", ret);
- return ret;
+ if (MDSS_LPAE_CHECK(phys)) {
+ pr_warn("fb mem phys %pa > 4GB is not supported.\n", &phys);
+ mdss_smmu_dma_free_coherent(&mdp3_res->pdev->dev, size, &virt,
+ phys, mfd->iova, dom);
+ return -ERANGE;
}
- pr_info("allocating %u bytes at %p (%lx phys) for fb %d\n",
- size, virt, phys, mfd->index);
- mfd->fbi->screen_base = virt;
+ pr_debug("alloc 0x%zxB @ (%pa phys) (0x%p virt) (%pa iova) for fb%d\n",
+ size, &phys, virt, &mfd->iova, mfd->index);
+
mfd->fbi->fix.smem_start = phys;
- mfd->fbi->fix.smem_len = size;
+ mfd->fbi->screen_base = virt;
return 0;
}
@@ -1538,6 +2147,7 @@ void mdp3_free(struct msm_fb_data_type *mfd)
{
size_t size = 0;
int dom;
+ unsigned long phys;
if (!mfd->iova || !mfd->fbi->screen_base) {
pr_info("no fbmem allocated\n");
@@ -1545,41 +2155,17 @@ void mdp3_free(struct msm_fb_data_type *mfd)
}
size = mfd->fbi->fix.smem_len;
+ phys = mfd->fbi->fix.smem_start;
dom = mdp3_res->domains[MDP3_IOMMU_DOMAIN_UNSECURE].domain_idx;
+ iommu_unmap(mdp3_res->domains[MDP3_IOMMU_DOMAIN_UNSECURE].domain,
+ phys, size);
msm_iommu_unmap_contig_buffer(mfd->iova, dom, 0, size);
mfd->fbi->screen_base = NULL;
mfd->fbi->fix.smem_start = 0;
- mfd->fbi->fix.smem_len = 0;
mfd->iova = 0;
}
-int mdp3_parse_dt_splash(struct msm_fb_data_type *mfd)
-{
- struct platform_device *pdev = mfd->pdev;
- int rc;
- u32 offsets[2];
-
- rc = of_property_read_u32_array(pdev->dev.of_node,
- "qcom,memblock-reserve", offsets, 2);
-
- if (rc) {
- pr_err("fail to get memblock-reserve property\n");
- return rc;
- }
-
- if (mdp3_res->splash_mem_addr != offsets[0])
- rc = -EINVAL;
-
- mdp3_res->splash_mem_addr = offsets[0];
- mdp3_res->splash_mem_size = offsets[1];
-
- pr_debug("memaddr=%lx size=%x\n", mdp3_res->splash_mem_addr,
- mdp3_res->splash_mem_size);
-
- return rc;
-}
-
void mdp3_release_splash_memory(struct msm_fb_data_type *mfd)
{
/* Give back the reserved memory to the system */
@@ -1639,10 +2225,11 @@ static int mdp3_is_display_on(struct mdss_panel_data *pdata)
int rc = 0;
u32 status;
- mdp3_clk_update(MDP3_CLK_AHB, 1);
- mdp3_clk_update(MDP3_CLK_AXI, 1);
- mdp3_clk_update(MDP3_CLK_MDP_CORE, 1);
-
+ rc = mdp3_clk_enable(1, 0);
+ if (rc) {
+ pr_err("fail to turn on MDP core clks\n");
+ return rc;
+ }
if (pdata->panel_info.type == MIPI_VIDEO_PANEL) {
status = MDP3_REG_READ(MDP3_REG_DSI_VIDEO_EN);
rc = status & 0x1;
@@ -1654,9 +2241,9 @@ static int mdp3_is_display_on(struct mdss_panel_data *pdata)
mdp3_res->splash_mem_addr = MDP3_REG_READ(MDP3_REG_DMA_P_IBUF_ADDR);
- mdp3_clk_update(MDP3_CLK_AHB, 0);
- mdp3_clk_update(MDP3_CLK_AXI, 0);
- mdp3_clk_update(MDP3_CLK_MDP_CORE, 0);
+ rc = mdp3_clk_enable(0, 0);
+ if (rc)
+ pr_err("fail to turn off MDP core clks\n");
return rc;
}
@@ -1664,26 +2251,25 @@ static int mdp3_continuous_splash_on(struct mdss_panel_data *pdata)
{
struct mdss_panel_info *panel_info = &pdata->panel_info;
struct mdp3_bus_handle_map *bus_handle;
- u64 ab, ib;
- int rc;
+ u64 ab = 0;
+ u64 ib = 0;
+ u64 mdp_clk_rate = 0;
+ int rc = 0;
pr_debug("mdp3__continuous_splash_on\n");
- mdp3_clk_set_rate(MDP3_CLK_VSYNC, MDP_VSYNC_CLK_RATE,
- MDP3_CLIENT_DMA_P);
-
- mdp3_clk_set_rate(MDP3_CLK_MDP_SRC, MDP_CORE_CLK_RATE_SVS,
- MDP3_CLIENT_DMA_P);
-
bus_handle = &mdp3_res->bus_handle[MDP3_BUS_HANDLE];
if (bus_handle->handle < 1) {
pr_err("invalid bus handle %d\n", bus_handle->handle);
return -EINVAL;
}
+ mdp3_calc_dma_res(panel_info, &mdp_clk_rate, &ab, &ib, panel_info->bpp);
+
+ mdp3_clk_set_rate(MDP3_CLK_VSYNC, MDP_VSYNC_CLK_RATE,
+ MDP3_CLIENT_DMA_P);
+ mdp3_clk_set_rate(MDP3_CLK_MDP_SRC, mdp_clk_rate,
+ MDP3_CLIENT_DMA_P);
- ab = panel_info->xres * panel_info->yres * 4;
- ab *= panel_info->mipi.frame_rate;
- ib = (ab * 3) / 2;
rc = mdp3_bus_scale_set_quota(MDP3_CLIENT_DMA_P, ab, ib);
bus_handle->restore_ab[MDP3_CLIENT_DMA_P] = ab;
bus_handle->restore_ib[MDP3_CLIENT_DMA_P] = ib;
@@ -1700,15 +2286,6 @@ static int mdp3_continuous_splash_on(struct mdss_panel_data *pdata)
goto splash_on_err;
}
- if (pdata->event_handler) {
- rc = pdata->event_handler(pdata, MDSS_EVENT_CONT_SPLASH_BEGIN,
- NULL);
- if (rc) {
- pr_err("MDSS_EVENT_CONT_SPLASH_BEGIN event fail\n");
- goto splash_on_err;
- }
- }
-
if (panel_info->type == MIPI_VIDEO_PANEL)
mdp3_res->intf[MDP3_DMA_OUTPUT_SEL_DSI_VIDEO].active = 1;
else
@@ -1729,6 +2306,15 @@ static int mdp3_panel_register_done(struct mdss_panel_data *pdata)
{
int rc = 0;
+ /*
+ * If idle pc feature is not enabled, then get a reference to the
+ * runtime device which will be released when device is turned off
+ */
+ if (!mdp3_res->idle_pc_enabled ||
+ pdata->panel_info.type != MIPI_CMD_PANEL) {
+ pm_runtime_get_sync(&mdp3_res->pdev->dev);
+ }
+
if (pdata->panel_info.cont_splash_enabled) {
if (!mdp3_is_display_on(pdata)) {
pr_err("continuous splash, but bootloader is not\n");
@@ -1746,10 +2332,35 @@ static int mdp3_panel_register_done(struct mdss_panel_data *pdata)
* continue splash screen. This would have happened in
* res_update in continuous_splash_on without this flag.
*/
- mdp3_res->allow_iommu_update = true;
+ if (pdata->panel_info.cont_splash_enabled == false)
+ mdp3_res->allow_iommu_update = true;
+
return rc;
}
+/* mdp3_autorefresh_disable() - Disable Auto refresh
+ * @ panel_info : pointer to panel configuration structure
+ *
+ * This function displable Auto refresh block for command mode panel.
+ */
+int mdp3_autorefresh_disable(struct mdss_panel_info *panel_info)
+{
+ if ((panel_info->type == MIPI_CMD_PANEL) &&
+ (MDP3_REG_READ(MDP3_REG_AUTOREFRESH_CONFIG_P)))
+ MDP3_REG_WRITE(MDP3_REG_AUTOREFRESH_CONFIG_P, 0);
+ return 0;
+}
+
+int mdp3_splash_done(struct mdss_panel_info *panel_info)
+{
+ if (panel_info->cont_splash_enabled) {
+ pr_err("continuous splash is on and splash done called\n");
+ return -EINVAL;
+ }
+ mdp3_res->allow_iommu_update = true;
+ return 0;
+}
+
static int mdp3_debug_dump_stats_show(struct seq_file *s, void *v)
{
struct mdp3_hw_resource *res = (struct mdp3_hw_resource *)s->private;
@@ -1780,6 +2391,13 @@ static int mdp3_debug_init(struct platform_device *pdev)
return -ENOMEM;
mdss_res = mdata;
+ mutex_init(&mdata->reg_lock);
+ mutex_init(&mdata->reg_bus_lock);
+ mutex_init(&mdata->bus_lock);
+ INIT_LIST_HEAD(&mdata->reg_bus_clist);
+ atomic_set(&mdata->sd_client_count, 0);
+ atomic_set(&mdata->active_intf_cnt, 0);
+ mdss_res->mdss_util = mdp3_res->mdss_util;
mdata->debug_inf.debug_enable_clock = mdp3_debug_enable_clock;
@@ -1811,9 +2429,17 @@ static void mdp3_debug_deinit(struct platform_device *pdev)
static void mdp3_dma_underrun_intr_handler(int type, void *arg)
{
+ struct mdp3_dma *dma = &mdp3_res->dma[MDP3_DMA_P];
+
mdp3_res->underrun_cnt++;
- pr_err("display underrun detected count=%d\n",
+ pr_err_ratelimited("display underrun detected count=%d\n",
mdp3_res->underrun_cnt);
+ ATRACE_INT("mdp3_dma_underrun_intr_handler", mdp3_res->underrun_cnt);
+
+ if (dma->ccs_config.ccs_enable && !dma->ccs_config.ccs_dirty) {
+ dma->ccs_config.ccs_dirty = true;
+ schedule_work(&dma->underrun_work);
+ }
}
static ssize_t mdp3_show_capabilities(struct device *dev,
@@ -1826,7 +2452,7 @@ static ssize_t mdp3_show_capabilities(struct device *dev,
(cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__))
SPRINT("mdp_version=3\n");
- SPRINT("hw_rev=%d\n", 304);
+ SPRINT("hw_rev=%d\n", 305);
SPRINT("dma_pipes=%d\n", 1);
SPRINT("\n");
@@ -1835,8 +2461,46 @@ static ssize_t mdp3_show_capabilities(struct device *dev,
static DEVICE_ATTR(caps, S_IRUGO, mdp3_show_capabilities, NULL);
+static ssize_t mdp3_store_smart_blit(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ u32 data = -1;
+ ssize_t rc = 0;
+
+ rc = kstrtoint(buf, 10, &data);
+ if (rc) {
+ pr_err("kstrtoint failed. rc=%d\n", rc);
+ return rc;
+ }
+ mdp3_res->smart_blit_en = data;
+ pr_debug("mdp3 smart blit RGB %s YUV %s\n",
+ (mdp3_res->smart_blit_en & SMART_BLIT_RGB_EN) ?
+ "ENABLED" : "DISABLED",
+ (mdp3_res->smart_blit_en & SMART_BLIT_YUV_EN) ?
+ "ENABLED" : "DISABLED");
+ return len;
+}
+
+static ssize_t mdp3_show_smart_blit(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t ret = 0;
+
+ pr_debug("mdp3 smart blit RGB %s YUV %s\n",
+ (mdp3_res->smart_blit_en & SMART_BLIT_RGB_EN) ?
+ "ENABLED" : "DISABLED",
+ (mdp3_res->smart_blit_en & SMART_BLIT_YUV_EN) ?
+ "ENABLED" : "DISABLED");
+ ret = snprintf(buf, PAGE_SIZE, "%d\n", mdp3_res->smart_blit_en);
+ return ret;
+}
+
+static DEVICE_ATTR(smart_blit, S_IRUGO | S_IWUSR | S_IWGRP,
+ mdp3_show_smart_blit, mdp3_store_smart_blit);
+
static struct attribute *mdp3_fs_attrs[] = {
&dev_attr_caps.attr,
+ &dev_attr_smart_blit.attr,
NULL
};
@@ -1962,24 +2626,49 @@ int mdp3_misr_set(struct mdp_misr *misr_req)
return ret;
}
+struct mdss_panel_cfg *mdp3_panel_intf_type(int intf_val)
+{
+ if (!mdp3_res || !mdp3_res->pan_cfg.init_done)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ if (mdp3_res->pan_cfg.pan_intf == intf_val)
+ return &mdp3_res->pan_cfg;
+ else
+ return NULL;
+}
+EXPORT_SYMBOL(mdp3_panel_intf_type);
+
int mdp3_footswitch_ctrl(int enable)
{
int rc = 0;
+ int active_cnt = 0;
if (!mdp3_res->fs_ena && enable) {
rc = regulator_enable(mdp3_res->fs);
if (rc) {
pr_err("mdp footswitch ctrl enable failed\n");
return -EINVAL;
- } else {
+ }
pr_debug("mdp footswitch ctrl enable success\n");
+ mdp3_enable_regulator(true);
mdp3_res->fs_ena = true;
+ } else if (!enable && mdp3_res->fs_ena) {
+ active_cnt = atomic_read(&mdp3_res->active_intf_cnt);
+ if (active_cnt != 0) {
+ /*
+ * Turning off GDSC while overlays are still
+ * active.
+ */
+ mdp3_res->idle_pc = true;
+ pr_debug("idle pc. active overlays=%d\n",
+ active_cnt);
}
- } else if (mdp3_res->fs_ena && !enable) {
+ mdp3_enable_regulator(false);
rc = regulator_disable(mdp3_res->fs);
- if (rc)
- pr_warn("mdp footswitch ctrl disable failed\n");
- else
+ if (rc) {
+ pr_err("mdp footswitch ctrl disable failed\n");
+ return -EINVAL;
+ }
mdp3_res->fs_ena = false;
} else {
pr_debug("mdp3 footswitch ctrl already configured\n");
@@ -1988,17 +2677,28 @@ int mdp3_footswitch_ctrl(int enable)
return rc;
}
-struct mdss_panel_cfg *mdp3_panel_intf_type(int intf_val)
+int mdp3_panel_get_intf_status(u32 disp_num, u32 intf_type)
{
- if (!mdp3_res || !mdp3_res->pan_cfg.init_done)
- return ERR_PTR(-EPROBE_DEFER);
+ int rc = 0, status = 0;
- if (mdp3_res->pan_cfg.pan_intf == intf_val)
- return &mdp3_res->pan_cfg;
- else
- return NULL;
+ if (intf_type != MDSS_PANEL_INTF_DSI)
+ return 0;
+
+ rc = mdp3_clk_enable(1, 0);
+ if (rc) {
+ pr_err("fail to turn on MDP core clks\n");
+ return rc;
+ }
+
+ status = (MDP3_REG_READ(MDP3_REG_DMA_P_CONFIG) & 0x180000);
+ /* DSI video mode or command mode */
+ rc = (status == 0x180000) || (status == 0x080000);
+
+ rc = mdp3_clk_enable(0, 0);
+ if (rc)
+ pr_err("fail to turn off MDP core clks\n");
+ return rc;
}
-EXPORT_SYMBOL(mdp3_panel_intf_type);
static int mdp3_probe(struct platform_device *pdev)
{
@@ -2017,6 +2717,7 @@ static int mdp3_probe(struct platform_device *pdev)
.data = NULL,
};
+ pr_debug("%s: START\n", __func__);
if (!pdev->dev.of_node) {
pr_err("MDP driver only supports device tree probe\n");
return -ENOTSUPP;
@@ -2037,6 +2738,9 @@ static int mdp3_probe(struct platform_device *pdev)
mutex_init(&mdp3_res->res_mutex);
spin_lock_init(&mdp3_res->irq_lock);
platform_set_drvdata(pdev, mdp3_res);
+ atomic_set(&mdp3_res->active_intf_cnt, 0);
+ mutex_init(&mdp3_res->reg_bus_lock);
+ INIT_LIST_HEAD(&mdp3_res->reg_bus_clist);
mdp3_res->mdss_util = mdss_get_util_intf();
if (mdp3_res->mdss_util == NULL) {
@@ -2045,12 +2749,14 @@ static int mdp3_probe(struct platform_device *pdev)
goto get_util_fail;
}
mdp3_res->mdss_util->get_iommu_domain = mdp3_get_iommu_domain;
- mdp3_res->mdss_util->iommu_attached = mdp3_iommu_is_attached;
+ mdp3_res->mdss_util->iommu_attached = is_mdss_iommu_attached;
mdp3_res->mdss_util->iommu_ctrl = mdp3_iommu_ctrl;
mdp3_res->mdss_util->bus_scale_set_quota = mdp3_bus_scale_set_quota;
mdp3_res->mdss_util->panel_intf_type = mdp3_panel_intf_type;
+ mdp3_res->mdss_util->dyn_clk_gating_ctrl =
+ mdp3_dynamic_clock_gating_ctrl;
+ mdp3_res->mdss_util->panel_intf_type = mdp3_panel_intf_type;
mdp3_res->mdss_util->panel_intf_status = mdp3_panel_get_intf_status;
-
rc = mdp3_parse_dt(pdev);
if (rc)
goto probe_done;
@@ -2067,24 +2773,35 @@ static int mdp3_probe(struct platform_device *pdev)
pr_err("unable to get mdss gdsc regulator\n");
return -EINVAL;
}
- rc = mdp3_footswitch_ctrl(1);
+
+ rc = mdp3_debug_init(pdev);
if (rc) {
- pr_err("unable to turn on FS\n");
+ pr_err("unable to initialize mdp debugging\n");
goto probe_done;
}
- rc = mdp3_check_version();
- if (rc) {
- pr_err("mdp3 check version failed\n");
- goto probe_done;
+ pm_runtime_set_autosuspend_delay(&pdev->dev, AUTOSUSPEND_TIMEOUT_MS);
+ if (mdp3_res->idle_pc_enabled) {
+ pr_debug("%s: Enabling autosuspend\n", __func__);
+ pm_runtime_use_autosuspend(&pdev->dev);
}
+ /* Enable PM runtime */
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
- rc = mdp3_debug_init(pdev);
+ if (!pm_runtime_enabled(&pdev->dev)) {
+ rc = mdp3_footswitch_ctrl(1);
+ if (rc) {
+ pr_err("unable to turn on FS\n");
+ goto probe_done;
+ }
+ }
+
+ rc = mdp3_check_version();
if (rc) {
- pr_err("unable to initialize mdp debugging\n");
+ pr_err("mdp3 check version failed\n");
goto probe_done;
}
-
rc = mdp3_register_sysfs(pdev);
if (rc)
pr_err("unable to register mdp sysfs nodes\n");
@@ -2097,7 +2814,13 @@ static int mdp3_probe(struct platform_device *pdev)
&underrun_cb);
if (rc)
pr_err("unable to configure interrupt callback\n");
+
+ rc = mdss_smmu_init(mdss_res, &pdev->dev);
+ if (rc)
+ pr_err("mdss smmu init failed\n");
+
mdp3_res->mdss_util->mdp_probe_done = true;
+ pr_debug("%s: END\n", __func__);
probe_done:
if (IS_ERR_VALUE(rc))
@@ -2134,42 +2857,118 @@ int mdp3_panel_get_boot_cfg(void)
return rc;
}
-static int mdp3_suspend_sub(struct mdp3_hw_resource *mdata)
+static int mdp3_suspend_sub(void)
{
- mdp3_enable_regulator(false);
+ mdp3_footswitch_ctrl(0);
return 0;
}
-static int mdp3_resume_sub(struct mdp3_hw_resource *mdata)
+static int mdp3_resume_sub(void)
{
- mdp3_enable_regulator(true);
+ mdp3_footswitch_ctrl(1);
return 0;
}
-static int mdp3_suspend(struct platform_device *pdev, pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int mdp3_pm_suspend(struct device *dev)
{
- struct mdp3_hw_resource *mdata = platform_get_drvdata(pdev);
+ dev_dbg(dev, "Display pm suspend\n");
- if (!mdata)
- return -ENODEV;
+ return mdp3_suspend_sub();
+}
+
+static int mdp3_pm_resume(struct device *dev)
+{
+ dev_dbg(dev, "Display pm resume\n");
- pr_debug("display suspend\n");
+ /*
+ * It is possible that the runtime status of the mdp device may
+ * have been active when the system was suspended. Reset the runtime
+ * status to suspended state after a complete system resume.
+ */
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_enable(dev);
+
+ return mdp3_resume_sub();
+}
+#endif
+
+#if defined(CONFIG_PM) && !defined(CONFIG_PM_SLEEP)
+static int mdp3_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ pr_debug("Display suspend\n");
- return mdp3_suspend_sub(mdata);
+ return mdp3_suspend_sub();
}
static int mdp3_resume(struct platform_device *pdev)
{
- struct mdp3_hw_resource *mdata = platform_get_drvdata(pdev);
+ pr_debug("Display resume\n");
- if (!mdata)
- return -ENODEV;
+ return mdp3_resume_sub();
+}
+#else
+#define mdp3_suspend NULL
+#define mdp3_resume NULL
+#endif
+
+
+#ifdef CONFIG_PM_RUNTIME
+static int mdp3_runtime_resume(struct device *dev)
+{
+ bool device_on = true;
+
+ dev_dbg(dev, "Display pm runtime resume, active overlay cnt=%d\n",
+ atomic_read(&mdp3_res->active_intf_cnt));
- pr_debug("display resume\n");
+ /* do not resume panels when coming out of idle power collapse */
+ if (!mdp3_res->idle_pc)
+ device_for_each_child(dev, &device_on, mdss_fb_suspres_panel);
- return mdp3_resume_sub(mdata);
+ mdp3_footswitch_ctrl(1);
+
+ return 0;
}
+static int mdp3_runtime_idle(struct device *dev)
+{
+ dev_dbg(dev, "Display pm runtime idle\n");
+
+ return 0;
+}
+
+static int mdp3_runtime_suspend(struct device *dev)
+{
+ bool device_on = false;
+
+ dev_dbg(dev, "Display pm runtime suspend, active overlay cnt=%d\n",
+ atomic_read(&mdp3_res->active_intf_cnt));
+
+ if (mdp3_res->clk_ena) {
+ pr_debug("Clk turned on...MDP suspend failed\n");
+ return -EBUSY;
+ }
+
+ mdp3_footswitch_ctrl(0);
+
+ /* do not suspend panels when going in to idle power collapse */
+ if (!mdp3_res->idle_pc)
+ device_for_each_child(dev, &device_on, mdss_fb_suspres_panel);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops mdp3_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(mdp3_pm_suspend,
+ mdp3_pm_resume)
+ SET_RUNTIME_PM_OPS(mdp3_runtime_suspend,
+ mdp3_runtime_resume,
+ mdp3_runtime_idle)
+};
+
+
static int mdp3_remove(struct platform_device *pdev)
{
struct mdp3_hw_resource *mdata = platform_get_drvdata(pdev);
@@ -2199,6 +2998,7 @@ static struct platform_driver mdp3_driver = {
.driver = {
.name = "mdp3",
.of_match_table = mdp3_dt_match,
+ .pm = &mdp3_pm_ops,
},
};
diff --git a/drivers/video/fbdev/msm/mdp3.h b/drivers/video/fbdev/msm/mdp3.h
index 99f557321758..5a27971398b5 100644
--- a/drivers/video/fbdev/msm/mdp3.h
+++ b/drivers/video/fbdev/msm/mdp3.h
@@ -21,18 +21,28 @@
#include <linux/msm_iommu_domains.h>
+#include "mdss_dsi_clk.h"
#include "mdp3_dma.h"
#include "mdss_fb.h"
#include "mdss.h"
#define MDP_VSYNC_CLK_RATE 19200000
-#define MDP_CORE_CLK_RATE_SVS 150000000
+#define MDP_CORE_CLK_RATE_SVS 160000000
+#define MDP_CORE_CLK_RATE_SUPER_SVS 200000000
#define MDP_CORE_CLK_RATE_MAX 307200000
/* PPP cant work at SVS for panel res above qHD */
#define SVS_MAX_PIXEL (540 * 960)
#define KOFF_TIMEOUT msecs_to_jiffies(84)
+#define WAIT_DMA_TIMEOUT msecs_to_jiffies(84)
+
+/*
+ * MDP_DEINTERLACE & MDP_SHARPENING Flags are not valid for MDP3
+ * so using them together for MDP_SMART_BLIT.
+ */
+#define MDP_SMART_BLIT 0xC0000000
+
enum {
MDP3_CLK_AHB,
@@ -50,8 +60,8 @@ enum {
};
enum {
- MDP3_IOMMU_DOMAIN_SECURE,
MDP3_IOMMU_DOMAIN_UNSECURE,
+ MDP3_IOMMU_DOMAIN_SECURE,
MDP3_IOMMU_DOMAIN_MAX,
};
@@ -67,9 +77,16 @@ enum {
MDP3_CLIENT_DMA_P,
MDP3_CLIENT_DSI = 1,
MDP3_CLIENT_PPP,
+ MDP3_CLIENT_IOMMU,
MDP3_CLIENT_MAX,
};
+enum {
+ DI_PARTITION_NUM = 0,
+ DI_DOMAIN_NUM = 1,
+ DI_MAX,
+};
+
struct mdp3_bus_handle_map {
struct msm_bus_vectors *bus_vector;
struct msm_bus_paths *usecases;
@@ -100,6 +117,19 @@ struct mdp3_iommu_ctx_map {
int attached;
};
+struct mdp3_iommu_meta {
+ struct rb_node node;
+ struct ion_handle *handle;
+ struct rb_root iommu_maps;
+ struct kref ref;
+ struct sg_table *table;
+ struct dma_buf *dbuf;
+ int mapped_size;
+ unsigned long size;
+ dma_addr_t iova_addr;
+ unsigned long flags;
+};
+
#define MDP3_MAX_INTR 28
struct mdp3_intr_cb {
@@ -107,6 +137,9 @@ struct mdp3_intr_cb {
void *data;
};
+#define SMART_BLIT_RGB_EN 1
+#define SMART_BLIT_YUV_EN 2
+
struct mdp3_hw_resource {
struct platform_device *pdev;
u32 mdp_rev;
@@ -123,6 +156,9 @@ struct mdp3_hw_resource {
char __iomem *mdp_base;
size_t mdp_reg_size;
+ char __iomem *vbif_base;
+ size_t vbif_reg_size;
+
struct mdp3_bus_handle_map *bus_handle;
struct ion_client *ion_client;
@@ -136,6 +172,7 @@ struct mdp3_hw_resource {
struct mdp3_dma dma[MDP3_DMA_MAX];
struct mdp3_intf intf[MDP3_DMA_OUTPUT_SEL_MAX];
+ struct rb_root iommu_root;
spinlock_t irq_lock;
u32 irq_ref_count[MDP3_MAX_INTR];
u32 irq_mask;
@@ -157,16 +194,32 @@ struct mdp3_hw_resource {
struct regulator *vdd_cx;
struct regulator *fs;
bool fs_ena;
+ int clk_ena;
+ bool idle_pc_enabled;
+ bool idle_pc;
+ atomic_t active_intf_cnt;
+ u8 smart_blit_en;
+ bool solid_fill_vote_en;
+ struct list_head reg_bus_clist;
+ struct mutex reg_bus_lock;
};
struct mdp3_img_data {
dma_addr_t addr;
- u32 len;
+ unsigned long len;
+ u32 offset;
u32 flags;
+ u32 padding;
int p_need;
- struct file *srcp_file;
struct ion_handle *srcp_ihdl;
+ u32 dir;
+ u32 domain;
+ bool mapped;
+ bool skip_detach;
struct fd srcp_f;
+ struct dma_buf *srcp_dma_buf;
+ struct dma_buf_attachment *srcp_attachment;
+ struct sg_table *srcp_table;
};
extern struct mdp3_hw_resource *mdp3_res;
@@ -183,10 +236,11 @@ int mdp3_clk_set_rate(int clk_type, unsigned long clk_rate, int client);
int mdp3_clk_enable(int enable, int dsi_clk);
int mdp3_res_update(int enable, int dsi_clk, int client);
int mdp3_bus_scale_set_quota(int client, u64 ab_quota, u64 ib_quota);
-int mdp3_put_img(struct mdp3_img_data *data);
-int mdp3_get_img(struct msmfb_data *img, struct mdp3_img_data *data);
-int mdp3_iommu_enable(void);
-int mdp3_iommu_disable(void);
+int mdp3_put_img(struct mdp3_img_data *data, int client);
+int mdp3_get_img(struct msmfb_data *img, struct mdp3_img_data *data,
+ int client);
+int mdp3_iommu_enable(int client);
+int mdp3_iommu_disable(int client);
int mdp3_iommu_is_attached(void);
void mdp3_free(struct msm_fb_data_type *mfd);
int mdp3_parse_dt_splash(struct msm_fb_data_type *mfd);
@@ -201,9 +255,20 @@ int mdp3_misr_get(struct mdp_misr *misr_resp);
void mdp3_enable_regulator(int enable);
void mdp3_check_dsi_ctrl_status(struct work_struct *work,
uint32_t interval);
+int mdp3_dynamic_clock_gating_ctrl(int enable);
int mdp3_footswitch_ctrl(int enable);
+int mdp3_qos_remapper_setup(struct mdss_panel_data *panel);
+int mdp3_splash_done(struct mdss_panel_info *panel_info);
+int mdp3_autorefresh_disable(struct mdss_panel_info *panel_info);
+u64 mdp3_clk_round_off(u64 clk_rate);
+
+void mdp3_calc_dma_res(struct mdss_panel_info *panel_info, u64 *clk_rate,
+ u64 *ab, u64 *ib, uint32_t bpp);
+
#define MDP3_REG_WRITE(addr, val) writel_relaxed(val, mdp3_res->mdp_base + addr)
#define MDP3_REG_READ(addr) readl_relaxed(mdp3_res->mdp_base + addr)
+#define VBIF_REG_WRITE(off, val) writel_relaxed(val, mdp3_res->vbif_base + off)
+#define VBIF_REG_READ(off) readl_relaxed(mdp3_res->vbif_base + off)
#endif /* MDP3_H */
diff --git a/drivers/video/fbdev/msm/mdp3_ctrl.c b/drivers/video/fbdev/msm/mdp3_ctrl.c
index a7f5bb132f94..31c0cd86df4b 100644
--- a/drivers/video/fbdev/msm/mdp3_ctrl.c
+++ b/drivers/video/fbdev/msm/mdp3_ctrl.c
@@ -21,8 +21,8 @@
#include <linux/uaccess.h>
#include <linux/delay.h>
#include <linux/dma-buf.h>
+#include <linux/pm_runtime.h>
-#include "mdss_dsi_clk.h"
#include "mdp3_ctrl.h"
#include "mdp3.h"
#include "mdp3_ppp.h"
@@ -39,8 +39,11 @@ static int mdp3_ctrl_vsync_enable(struct msm_fb_data_type *mfd, int enable);
static int mdp3_ctrl_get_intf_type(struct msm_fb_data_type *mfd);
static int mdp3_ctrl_lut_read(struct msm_fb_data_type *mfd,
struct mdp_rgb_lut_data *cfg);
-static int mdp3_ctrl_lut_update(struct msm_fb_data_type *mfd,
+static int mdp3_ctrl_lut_config(struct msm_fb_data_type *mfd,
struct mdp_rgb_lut_data *cfg);
+static void mdp3_ctrl_pp_resume(struct msm_fb_data_type *mfd);
+static int mdp3_ctrl_reset(struct msm_fb_data_type *mfd);
+static int mdp3_ctrl_get_pack_pattern(u32 imgType);
u32 mdp_lut_inverse16[MDP_LUT_SIZE] = {
0, 65536, 32768, 21845, 16384, 13107, 10923, 9362, 8192, 7282, 6554, 5958,
@@ -79,7 +82,7 @@ static void mdp3_bufq_deinit(struct mdp3_buffer_queue *bufq)
while (count-- && (bufq->pop_idx >= 0)) {
struct mdp3_img_data *data = &bufq->img_data[bufq->pop_idx];
bufq->pop_idx = (bufq->pop_idx + 1) % MDP3_MAX_BUF_QUEUE;
- mdp3_put_img(data);
+ mdp3_put_img(data, MDP3_CLIENT_DMA_P);
}
bufq->count = 0;
bufq->push_idx = 0;
@@ -157,6 +160,9 @@ static void mdp3_dispatch_dma_done(struct work_struct *work)
static void mdp3_dispatch_clk_off(struct work_struct *work)
{
struct mdp3_session_data *session;
+ int rc;
+ bool dmap_busy;
+ int retry_count = 2;
pr_debug("%s\n", __func__);
session = container_of(work, struct mdp3_session_data,
@@ -166,12 +172,36 @@ static void mdp3_dispatch_clk_off(struct work_struct *work)
mutex_lock(&session->lock);
if (session->vsync_enabled ||
- atomic_read(&session->vsync_countdown) != 0) {
+ atomic_read(&session->vsync_countdown) != 0) {
mutex_unlock(&session->lock);
pr_debug("Ignoring clk shut down\n");
return;
}
+ if (session->intf->active) {
+retry_dma_done:
+ rc = wait_for_completion_timeout(&session->dma_completion,
+ WAIT_DMA_TIMEOUT);
+ if (rc <= 0) {
+ struct mdss_panel_data *panel;
+
+ panel = session->panel;
+ pr_debug("cmd kickoff timed out (%d)\n", rc);
+ dmap_busy = session->dma->busy();
+ if (dmap_busy) {
+ if (--retry_count) {
+ pr_err("dmap is busy, retry %d\n",
+ retry_count);
+ goto retry_dma_done;
+ }
+ pr_err("dmap is still busy, bug_on\n");
+ BUG_ON(1);
+ } else {
+ pr_debug("dmap is not busy, continue\n");
+ }
+ }
+ }
+
mdp3_ctrl_vsync_enable(session->mfd, 0);
mdp3_ctrl_clk_enable(session->mfd, 0);
mutex_unlock(&session->lock);
@@ -189,7 +219,7 @@ void dma_done_notify_handler(void *arg)
struct mdp3_session_data *session = (struct mdp3_session_data *)arg;
atomic_inc(&session->dma_done_cnt);
schedule_work(&session->dma_done_work);
- complete(&session->dma_completion);
+ complete_all(&session->dma_completion);
}
void vsync_count_down(void *arg)
@@ -301,7 +331,7 @@ static int mdp3_ctrl_blit_req(struct msm_fb_data_type *mfd, void __user *p)
void __user *p_req;
if (copy_from_user(&(req_list_header.count), p,
- sizeof(struct mdp_blit_req_list)))
+ sizeof(struct mdp_blit_req_list)))
return -EFAULT;
p_req = p + sizeof(struct mdp_blit_req_list);
count = req_list_header.count;
@@ -328,12 +358,87 @@ static ssize_t mdp3_vsync_show_event(struct device *dev,
vsync_ticks = ktime_to_ns(mdp3_session->vsync_time);
- pr_debug("fb%d vsync=%llu", mfd->index, vsync_ticks);
+ pr_debug("fb%d vsync=%llu\n", mfd->index, vsync_ticks);
rc = scnprintf(buf, PAGE_SIZE, "VSYNC=%llu\n", vsync_ticks);
return rc;
}
+static ssize_t mdp3_packpattern_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
+ struct mdp3_session_data *mdp3_session = NULL;
+ int rc;
+ u32 pattern = 0;
+
+ if (!mfd || !mfd->mdp.private1)
+ return -EAGAIN;
+
+ mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1;
+
+ pattern = mdp3_session->dma->output_config.pack_pattern;
+
+ /* If pattern was found to be 0 then get pattern for fb imagetype */
+ if (!pattern)
+ pattern = mdp3_ctrl_get_pack_pattern(mfd->fb_imgType);
+
+ pr_debug("fb%d pack_pattern c= %d.", mfd->index, pattern);
+ rc = scnprintf(buf, PAGE_SIZE, "packpattern=%d\n", pattern);
+ return rc;
+}
+
+static ssize_t mdp3_dyn_pu_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ struct msm_fb_data_type *mfd = fbi->par;
+ struct mdp3_session_data *mdp3_session = NULL;
+ int ret, state;
+
+ if (!mfd || !mfd->mdp.private1)
+ return -EAGAIN;
+
+ mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1;
+ state = (mdp3_session->dyn_pu_state >= 0) ?
+ mdp3_session->dyn_pu_state : -1;
+ ret = scnprintf(buf, PAGE_SIZE, "%d", state);
+ return ret;
+}
+
+static ssize_t mdp3_dyn_pu_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ struct msm_fb_data_type *mfd = fbi->par;
+ struct mdp3_session_data *mdp3_session = NULL;
+ int ret, dyn_pu;
+
+ if (!mfd || !mfd->mdp.private1)
+ return -EAGAIN;
+
+ mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1;
+ ret = kstrtoint(buf, 10, &dyn_pu);
+ if (ret) {
+ pr_err("Invalid input for partial update: ret = %d\n", ret);
+ return ret;
+ }
+
+ mdp3_session->dyn_pu_state = dyn_pu;
+ sysfs_notify(&dev->kobj, NULL, "dyn_pu");
+ return count;
+}
+
static DEVICE_ATTR(vsync_event, S_IRUGO, mdp3_vsync_show_event, NULL);
+static DEVICE_ATTR(packpattern, S_IRUGO, mdp3_packpattern_show, NULL);
+static DEVICE_ATTR(dyn_pu, S_IRUGO | S_IWUSR | S_IWGRP, mdp3_dyn_pu_show,
+ mdp3_dyn_pu_store);
+
+static struct attribute *generic_attrs[] = {
+ &dev_attr_packpattern.attr,
+ &dev_attr_dyn_pu.attr,
+ NULL,
+};
static struct attribute *vsync_fs_attrs[] = {
&dev_attr_vsync_event.attr,
@@ -344,6 +449,10 @@ static struct attribute_group vsync_fs_attr_group = {
.attrs = vsync_fs_attrs,
};
+static struct attribute_group generic_attr_group = {
+ .attrs = generic_attrs,
+};
+
static int mdp3_ctrl_clk_enable(struct msm_fb_data_type *mfd, int enable)
{
struct mdp3_session_data *session;
@@ -377,13 +486,12 @@ static int mdp3_ctrl_clk_enable(struct msm_fb_data_type *mfd, int enable)
static int mdp3_ctrl_res_req_bus(struct msm_fb_data_type *mfd, int status)
{
int rc = 0;
+
if (status) {
- struct mdss_panel_info *panel_info = mfd->panel_info;
u64 ab = 0;
u64 ib = 0;
- ab = panel_info->xres * panel_info->yres * 4 * 2;
- ab *= panel_info->mipi.frame_rate;
- ib = (ab * 3) / 2;
+ mdp3_calc_dma_res(mfd->panel_info, NULL, &ab, &ib,
+ ppp_bpp(mfd->fb_imgType));
rc = mdp3_bus_scale_set_quota(MDP3_CLIENT_DMA_P, ab, ib);
} else {
rc = mdp3_bus_scale_set_quota(MDP3_CLIENT_DMA_P, 0, 0);
@@ -395,8 +503,12 @@ static int mdp3_ctrl_res_req_clk(struct msm_fb_data_type *mfd, int status)
{
int rc = 0;
if (status) {
+ u64 mdp_clk_rate = 0;
- mdp3_clk_set_rate(MDP3_CLK_MDP_SRC, MDP_CORE_CLK_RATE_SVS,
+ mdp3_calc_dma_res(mfd->panel_info, &mdp_clk_rate,
+ NULL, NULL, 0);
+
+ mdp3_clk_set_rate(MDP3_CLK_MDP_SRC, mdp_clk_rate,
MDP3_CLIENT_DMA_P);
mdp3_clk_set_rate(MDP3_CLK_VSYNC, MDP_VSYNC_CLK_RATE,
MDP3_CLIENT_DMA_P);
@@ -456,7 +568,7 @@ static int mdp3_ctrl_get_source_format(u32 imgType)
static int mdp3_ctrl_get_pack_pattern(u32 imgType)
{
int packPattern = MDP3_DMA_OUTPUT_PACK_PATTERN_RGB;
- if (imgType == MDP_RGBA_8888)
+ if (imgType == MDP_RGBA_8888 || imgType == MDP_RGB_888)
packPattern = MDP3_DMA_OUTPUT_PACK_PATTERN_BGR;
return packPattern;
}
@@ -464,7 +576,7 @@ static int mdp3_ctrl_get_pack_pattern(u32 imgType)
static int mdp3_ctrl_intf_init(struct msm_fb_data_type *mfd,
struct mdp3_intf *intf)
{
- int rc;
+ int rc = 0;
struct mdp3_intf_cfg cfg;
struct mdp3_video_intf_cfg *video = &cfg.video;
struct mdss_panel_info *p = mfd->panel_info;
@@ -479,6 +591,9 @@ static int mdp3_ctrl_intf_init(struct msm_fb_data_type *mfd,
int v_pulse_width = p->lcdc.v_pulse_width;
int hsync_period = h_front_porch + h_back_porch + w + h_pulse_width;
int vsync_period = v_front_porch + v_back_porch + h + v_pulse_width;
+ struct mdp3_session_data *mdp3_session;
+
+ mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1;
vsync_period *= hsync_period;
cfg.type = mdp3_ctrl_get_intf_type(mfd);
@@ -512,10 +627,12 @@ static int mdp3_ctrl_intf_init(struct msm_fb_data_type *mfd,
} else
return -EINVAL;
- if (intf->config)
- rc = intf->config(intf, &cfg);
- else
- rc = -EINVAL;
+ if (!(mdp3_session->in_splash_screen)) {
+ if (intf->config)
+ rc = intf->config(intf, &cfg);
+ else
+ rc = -EINVAL;
+ }
return rc;
}
@@ -534,6 +651,9 @@ static int mdp3_ctrl_dma_init(struct msm_fb_data_type *mfd,
int vtotal, vporch;
struct mdp3_notification dma_done_callback;
struct mdp3_tear_check te;
+ struct mdp3_session_data *mdp3_session;
+
+ mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1;
vbp = panel_info->lcdc.v_back_porch;
vfp = panel_info->lcdc.v_front_porch;
@@ -544,12 +664,10 @@ static int mdp3_ctrl_dma_init(struct msm_fb_data_type *mfd,
fix = &fbi->fix;
var = &fbi->var;
- sourceConfig.format = mdp3_ctrl_get_source_format(mfd->fb_imgType);
sourceConfig.width = panel_info->xres;
sourceConfig.height = panel_info->yres;
sourceConfig.x = 0;
sourceConfig.y = 0;
- sourceConfig.stride = fix->line_length;
sourceConfig.buf = mfd->iova;
sourceConfig.vporch = vporch;
sourceConfig.vsync_count =
@@ -559,12 +677,24 @@ static int mdp3_ctrl_dma_init(struct msm_fb_data_type *mfd,
outputConfig.out_sel = mdp3_ctrl_get_intf_type(mfd);
outputConfig.bit_mask_polarity = 0;
outputConfig.color_components_flip = 0;
- outputConfig.pack_pattern = mdp3_ctrl_get_pack_pattern(mfd->fb_imgType);
outputConfig.pack_align = MDP3_DMA_OUTPUT_PACK_ALIGN_LSB;
outputConfig.color_comp_out_bits = (MDP3_DMA_OUTPUT_COMP_BITS_8 << 4) |
(MDP3_DMA_OUTPUT_COMP_BITS_8 << 2)|
MDP3_DMA_OUTPUT_COMP_BITS_8;
+ if (dma->update_src_cfg) {
+ /* configuration has been updated through PREPARE call */
+ sourceConfig.format = dma->source_config.format;
+ sourceConfig.stride = dma->source_config.stride;
+ outputConfig.pack_pattern = dma->output_config.pack_pattern;
+ } else {
+ sourceConfig.format =
+ mdp3_ctrl_get_source_format(mfd->fb_imgType);
+ outputConfig.pack_pattern =
+ mdp3_ctrl_get_pack_pattern(mfd->fb_imgType);
+ sourceConfig.stride = fix->line_length;
+ }
+
te.frame_rate = panel_info->mipi.frame_rate;
te.hw_vsync_mode = panel_info->mipi.hw_vsync_mode;
te.tear_check_en = panel_info->te.tear_check_en;
@@ -576,10 +706,19 @@ static int mdp3_ctrl_dma_init(struct msm_fb_data_type *mfd,
te.rd_ptr_irq = panel_info->te.rd_ptr_irq;
te.refx100 = panel_info->te.refx100;
- if (dma->dma_config)
- rc = dma->dma_config(dma, &sourceConfig, &outputConfig);
- else
+ if (dma->dma_config) {
+ if (!panel_info->partial_update_enabled) {
+ dma->roi.w = sourceConfig.width;
+ dma->roi.h = sourceConfig.height;
+ dma->roi.x = sourceConfig.x;
+ dma->roi.y = sourceConfig.y;
+ }
+ rc = dma->dma_config(dma, &sourceConfig, &outputConfig,
+ mdp3_session->in_splash_screen);
+ } else {
+ pr_err("%s: dma config failed\n", __func__);
rc = -EINVAL;
+ }
if (outputConfig.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_CMD) {
if (dma->dma_sync_config)
@@ -609,22 +748,50 @@ static int mdp3_ctrl_on(struct msm_fb_data_type *mfd)
return -ENODEV;
}
mutex_lock(&mdp3_session->lock);
+
+ panel = mdp3_session->panel;
+ pr_err("%s %d in_splash_screen %d\n", __func__, __LINE__,
+ mdp3_session->in_splash_screen);
+ /* make sure DSI host is initialized properly */
+ if (panel) {
+ pr_debug("%s : dsi host init, power state = %d Splash %d\n",
+ __func__, mfd->panel_power_state,
+ mdp3_session->in_splash_screen);
+ if (mdss_fb_is_power_on_lp(mfd) ||
+ mdp3_session->in_splash_screen) {
+ /* Turn on panel so that it can exit low power mode */
+ pr_err("%s %d\n", __func__, __LINE__);
+ mdp3_clk_enable(1, 0);
+ rc = panel->event_handler(panel,
+ MDSS_EVENT_LINK_READY, NULL);
+ rc |= panel->event_handler(panel,
+ MDSS_EVENT_UNBLANK, NULL);
+ rc |= panel->event_handler(panel,
+ MDSS_EVENT_PANEL_ON, NULL);
+ mdp3_clk_enable(0, 0);
+ }
+ }
+
if (mdp3_session->status) {
- pr_debug("fb%d is on already", mfd->index);
- goto on_error;
+ pr_err("fb%d is on already\n", mfd->index);
+ goto end;
}
if (mdp3_session->intf->active) {
pr_debug("continuous splash screen, initialized already\n");
- goto on_error;
+ mdp3_session->status = 1;
+ goto end;
}
- mdp3_enable_regulator(true);
- rc = mdp3_footswitch_ctrl(1);
- if (rc) {
- pr_err("fail to enable mdp footswitch ctrl\n");
- goto on_error;
- }
+ /*
+ * Get a reference to the runtime pm device.
+ * If idle pc feature is enabled, it will be released
+ * at end of this routine else, when device is turned off.
+ */
+ pm_runtime_get_sync(&mdp3_res->pdev->dev);
+
+ /* Increment the overlay active count */
+ atomic_inc(&mdp3_res->active_intf_cnt);
mdp3_ctrl_notifier_register(mdp3_session,
&mdp3_session->mfd->mdp_sync_pt_data.notifier);
@@ -635,19 +802,35 @@ static int mdp3_ctrl_on(struct msm_fb_data_type *mfd)
goto on_error;
}
- panel = mdp3_session->panel;
+ rc = mdp3_dynamic_clock_gating_ctrl(0);
+ if (rc) {
+ pr_err("fail to disable dynamic clock gating\n");
+ goto on_error;
+ }
+ mdp3_qos_remapper_setup(panel);
+
+ rc = mdp3_ctrl_res_req_clk(mfd, 1);
+ if (rc) {
+ pr_err("fail to request mdp clk resource\n");
+ goto on_error;
+ }
+
if (panel->event_handler) {
rc = panel->event_handler(panel, MDSS_EVENT_LINK_READY, NULL);
rc |= panel->event_handler(panel, MDSS_EVENT_UNBLANK, NULL);
rc |= panel->event_handler(panel, MDSS_EVENT_PANEL_ON, NULL);
+ if (panel->panel_info.type == MIPI_CMD_PANEL) {
+ struct dsi_panel_clk_ctrl clk_ctrl;
+
+ clk_ctrl.state = MDSS_DSI_CLK_ON;
+ clk_ctrl.client = DSI_CLK_REQ_MDP_CLIENT;
+ rc |= panel->event_handler(panel,
+ MDSS_EVENT_PANEL_CLK_CTRL,
+ (void *)&clk_ctrl);
}
- if (rc) {
- pr_err("fail to turn on the panel\n");
- goto on_error;
}
- rc = mdp3_ctrl_res_req_clk(mfd, 1);
if (rc) {
- pr_err("fail to request mdp clk resource\n");
+ pr_err("fail to turn on the panel\n");
goto on_error;
}
@@ -671,10 +854,22 @@ static int mdp3_ctrl_on(struct msm_fb_data_type *mfd)
mdp3_session->clk_on = 1;
mdp3_session->first_commit = true;
+ if (mfd->panel_info->panel_dead)
+ mdp3_session->esd_recovery = true;
-on_error:
- if (!rc)
mdp3_session->status = 1;
+
+ mdp3_ctrl_pp_resume(mfd);
+on_error:
+ if (rc || (mdp3_res->idle_pc_enabled &&
+ (mfd->panel_info->type == MIPI_CMD_PANEL))) {
+ if (rc) {
+ pr_err("Failed to turn on fb%d\n", mfd->index);
+ atomic_dec(&mdp3_res->active_intf_cnt);
+ }
+ pm_runtime_put(&mdp3_res->pdev->dev);
+ }
+end:
mutex_unlock(&mdp3_session->lock);
return rc;
}
@@ -682,6 +877,7 @@ on_error:
static int mdp3_ctrl_off(struct msm_fb_data_type *mfd)
{
int rc = 0;
+ bool intf_stopped = true;
struct mdp3_session_data *mdp3_session;
struct mdss_panel_data *panel;
@@ -693,116 +889,135 @@ static int mdp3_ctrl_off(struct msm_fb_data_type *mfd)
return -ENODEV;
}
+ /*
+ * Keep a reference to the runtime pm until the overlay is turned
+ * off, and then release this last reference at the end. This will
+ * help in distinguishing between idle power collapse versus suspend
+ * power collapse
+ */
+ pm_runtime_get_sync(&mdp3_res->pdev->dev);
+
panel = mdp3_session->panel;
mutex_lock(&mdp3_session->lock);
- if (panel && panel->set_backlight)
- panel->set_backlight(panel, 0);
-
- if (!mdp3_session->status) {
- pr_debug("fb%d is off already", mfd->index);
- goto off_error;
+ pr_debug("Requested power state = %d\n", mfd->panel_power_state);
+ if (mdss_fb_is_power_on_lp(mfd)) {
+ /*
+ * Transition to low power
+ * As display updates are expected in low power mode,
+ * keep the interface and clocks on.
+ */
+ intf_stopped = false;
+ } else {
+ /* Transition to display off */
+ if (!mdp3_session->status) {
+ pr_debug("fb%d is off already", mfd->index);
+ goto off_error;
+ }
+ if (panel && panel->set_backlight)
+ panel->set_backlight(panel, 0);
}
- mdp3_ctrl_clk_enable(mfd, 1);
-
- mdp3_histogram_stop(mdp3_session, MDP_BLOCK_DMA_P);
-
+ /*
+ * While transitioning from interactive to low power,
+ * events need to be sent to the interface so that the
+ * panel can be configured in low power mode
+ */
if (panel->event_handler)
- rc = panel->event_handler(panel, MDSS_EVENT_PANEL_OFF, NULL);
- if (rc)
- pr_err("fail to turn off the panel\n");
-
- rc = mdp3_session->dma->stop(mdp3_session->dma, mdp3_session->intf);
+ rc = panel->event_handler(panel, MDSS_EVENT_BLANK,
+ (void *) (long int)mfd->panel_power_state);
if (rc)
- pr_debug("fail to stop the MDP3 dma\n");
- /* Wait for TG to turn off */
- msleep(20);
-
- mdp3_irq_deregister();
-
- pr_debug("mdp3_ctrl_off stop clock\n");
- if (mdp3_session->clk_on) {
- rc = mdp3_res_update(0, 1, MDP3_CLIENT_DMA_P);
+ pr_err("EVENT_BLANK error (%d)\n", rc);
+
+ if (intf_stopped) {
+ if (!mdp3_session->clk_on)
+ mdp3_ctrl_clk_enable(mfd, 1);
+ /* PP related programming for ctrl off */
+ mdp3_histogram_stop(mdp3_session, MDP_BLOCK_DMA_P);
+ mutex_lock(&mdp3_session->dma->pp_lock);
+ mdp3_session->dma->ccs_config.ccs_dirty = false;
+ mdp3_session->dma->lut_config.lut_dirty = false;
+ mutex_unlock(&mdp3_session->dma->pp_lock);
+
+ rc = mdp3_session->dma->stop(mdp3_session->dma,
+ mdp3_session->intf);
if (rc)
- pr_err("mdp clock resource release failed\n");
-
- pr_debug("mdp3_ctrl_off stop dsi controller\n");
- if (panel->event_handler)
- rc = panel->event_handler(panel,
- MDSS_EVENT_BLANK, NULL);
- if (rc)
- pr_err("fail to turn off the panel\n");
- }
-
- mdp3_ctrl_notifier_unregister(mdp3_session,
- &mdp3_session->mfd->mdp_sync_pt_data.notifier);
- mdp3_enable_regulator(false);
- mdp3_footswitch_ctrl(0);
- mdp3_session->vsync_enabled = 0;
- atomic_set(&mdp3_session->vsync_countdown, 0);
- atomic_set(&mdp3_session->dma_done_cnt, 0);
- mdp3_session->clk_on = 0;
- mdp3_session->in_splash_screen = 0;
-off_error:
- mdp3_session->status = 0;
- mdp3_bufq_deinit(&mdp3_session->bufq_out);
- if (mdp3_session->overlay.id != MSMFB_NEW_REQUEST) {
- mdp3_session->overlay.id = MSMFB_NEW_REQUEST;
- mdp3_bufq_deinit(&mdp3_session->bufq_in);
- }
- mutex_unlock(&mdp3_session->lock);
- return 0;
-}
+ pr_debug("fail to stop the MDP3 dma\n");
+ /* Wait to ensure TG to turn off */
+ msleep(20);
+ mfd->panel_info->cont_splash_enabled = 0;
-static int mdp3_ctrl_reset_cmd(struct msm_fb_data_type *mfd)
-{
- int rc = 0;
- struct mdp3_session_data *mdp3_session;
- struct mdp3_dma *mdp3_dma;
- struct mdss_panel_data *panel;
- struct mdp3_notification vsync_client;
+ /* Disable Auto refresh once continuous splash disabled */
+ mdp3_autorefresh_disable(mfd->panel_info);
+ mdp3_splash_done(mfd->panel_info);
- pr_debug("mdp3_ctrl_reset_cmd\n");
- mdp3_session = (struct mdp3_session_data *)mfd->mdp.private1;
- if (!mdp3_session || !mdp3_session->panel || !mdp3_session->dma ||
- !mdp3_session->intf) {
- pr_err("mdp3_ctrl_reset no device");
- return -ENODEV;
+ mdp3_irq_deregister();
}
- panel = mdp3_session->panel;
- mdp3_dma = mdp3_session->dma;
- mutex_lock(&mdp3_session->lock);
+ if (panel->event_handler)
+ rc = panel->event_handler(panel, MDSS_EVENT_PANEL_OFF,
+ (void *) (long int)mfd->panel_power_state);
+ if (rc)
+ pr_err("EVENT_PANEL_OFF error (%d)\n", rc);
+
+ if (intf_stopped) {
+ if (mdp3_session->clk_on) {
+ pr_debug("mdp3_ctrl_off stop clock\n");
+ if (panel->event_handler &&
+ (panel->panel_info.type == MIPI_CMD_PANEL)) {
+ struct dsi_panel_clk_ctrl clk_ctrl;
+
+ clk_ctrl.state = MDSS_DSI_CLK_OFF;
+ clk_ctrl.client = DSI_CLK_REQ_MDP_CLIENT;
+ rc |= panel->event_handler(panel,
+ MDSS_EVENT_PANEL_CLK_CTRL,
+ (void *)&clk_ctrl);
+ }
- vsync_client = mdp3_dma->vsync_client;
+ rc = mdp3_dynamic_clock_gating_ctrl(1);
+ rc = mdp3_res_update(0, 1, MDP3_CLIENT_DMA_P);
+ if (rc)
+ pr_err("mdp clock resource release failed\n");
+ }
- rc = mdp3_dma->stop(mdp3_dma, mdp3_session->intf);
- if (rc) {
- pr_err("fail to stop the MDP3 dma\n");
- goto reset_error;
- }
+ mdp3_ctrl_notifier_unregister(mdp3_session,
+ &mdp3_session->mfd->mdp_sync_pt_data.notifier);
- rc = mdp3_iommu_enable();
- if (rc) {
- pr_err("fail to attach dma iommu\n");
- goto reset_error;
+ mdp3_session->vsync_enabled = 0;
+ atomic_set(&mdp3_session->vsync_countdown, 0);
+ atomic_set(&mdp3_session->dma_done_cnt, 0);
+ mdp3_session->clk_on = 0;
+ mdp3_session->in_splash_screen = 0;
+ mdp3_res->solid_fill_vote_en = false;
+ mdp3_session->status = 0;
+ if (atomic_dec_return(&mdp3_res->active_intf_cnt) != 0) {
+ pr_warn("active_intf_cnt unbalanced\n");
+ atomic_set(&mdp3_res->active_intf_cnt, 0);
+ }
+ /*
+ * Release the pm runtime reference held when
+ * idle pc feature is not enabled
+ */
+ if (!mdp3_res->idle_pc_enabled ||
+ (mfd->panel_info->type != MIPI_CMD_PANEL)) {
+ rc = pm_runtime_put(&mdp3_res->pdev->dev);
+ if (rc)
+ pr_err("%s: pm_runtime_put failed (rc %d)\n",
+ __func__, rc);
+ }
+ mdp3_bufq_deinit(&mdp3_session->bufq_out);
+ if (mdp3_session->overlay.id != MSMFB_NEW_REQUEST) {
+ mdp3_session->overlay.id = MSMFB_NEW_REQUEST;
+ mdp3_bufq_deinit(&mdp3_session->bufq_in);
+ }
}
-
- mdp3_ctrl_intf_init(mfd, mdp3_session->intf);
- mdp3_ctrl_dma_init(mfd, mdp3_dma);
-
- if (vsync_client.handler)
- mdp3_dma->vsync_enable(mdp3_dma, &vsync_client);
-
- mdp3_session->first_commit = true;
- mdp3_session->in_splash_screen = 0;
-
-reset_error:
+off_error:
mutex_unlock(&mdp3_session->lock);
- return rc;
-}
+ /* Release the last reference to the runtime device */
+ pm_runtime_put(&mdp3_res->pdev->dev);
+ return 0;
+}
static int mdp3_ctrl_reset(struct msm_fb_data_type *mfd)
{
@@ -820,73 +1035,44 @@ static int mdp3_ctrl_reset(struct msm_fb_data_type *mfd)
return -ENODEV;
}
- if (mfd->panel.type == MIPI_CMD_PANEL) {
- rc = mdp3_ctrl_reset_cmd(mfd);
- return rc;
- }
-
panel = mdp3_session->panel;
mdp3_dma = mdp3_session->dma;
mutex_lock(&mdp3_session->lock);
-
- vsync_client = mdp3_dma->vsync_client;
- if (panel && panel->set_backlight)
- panel->set_backlight(panel, 0);
-
- rc = panel->event_handler(panel, MDSS_EVENT_PANEL_OFF, NULL);
- if (rc)
- pr_err("fail to turn off panel\n");
-
- rc = mdp3_dma->stop(mdp3_dma, mdp3_session->intf);
- if (rc) {
- pr_err("fail to stop the MDP3 dma %d\n", rc);
- goto reset_error;
- }
-
- rc = mdp3_put_mdp_dsi_clk();
- if (rc) {
- pr_err("fail to release mdp clocks\n");
- goto reset_error;
- }
-
- rc = panel->event_handler(panel, MDSS_EVENT_BLANK, NULL);
- if (rc) {
- pr_err("fail to blank the panel\n");
- goto reset_error;
+ if (mdp3_res->idle_pc) {
+ mdp3_clk_enable(1, 0);
+ mdp3_dynamic_clock_gating_ctrl(0);
+ mdp3_qos_remapper_setup(panel);
}
- rc = mdp3_iommu_enable();
+ rc = mdp3_iommu_enable(MDP3_CLIENT_DMA_P);
if (rc) {
pr_err("fail to attach dma iommu\n");
+ if (mdp3_res->idle_pc)
+ mdp3_clk_enable(0, 0);
goto reset_error;
}
- rc = panel->event_handler(panel, MDSS_EVENT_UNBLANK, NULL);
- if (rc) {
- pr_err("fail to unblank the panel\n");
- goto reset_error;
- }
-
- rc = panel->event_handler(panel, MDSS_EVENT_PANEL_ON, NULL);
- if (rc) {
- pr_err("fail to turn on the panel\n");
- goto reset_error;
- }
-
- rc = mdp3_get_mdp_dsi_clk();
- if (rc) {
- pr_err("fail to turn on mdp clks\n");
- goto reset_error;
- }
+ vsync_client = mdp3_dma->vsync_client;
mdp3_ctrl_intf_init(mfd, mdp3_session->intf);
mdp3_ctrl_dma_init(mfd, mdp3_dma);
-
+ mdp3_ppp_init();
+ mdp3_ctrl_pp_resume(mfd);
if (vsync_client.handler)
mdp3_dma->vsync_enable(mdp3_dma, &vsync_client);
- mdp3_session->first_commit = true;
+ if (!mdp3_res->idle_pc) {
+ mdp3_session->first_commit = true;
+ mfd->panel_info->cont_splash_enabled = 0;
mdp3_session->in_splash_screen = 0;
+ mdp3_splash_done(mfd->panel_info);
+ /* Disable Auto refresh */
+ mdp3_autorefresh_disable(mfd->panel_info);
+ } else {
+ mdp3_res->idle_pc = false;
+ mdp3_clk_enable(0, 0);
+ mdp3_iommu_disable(MDP3_CLIENT_DMA_P);
+ }
reset_error:
mutex_unlock(&mdp3_session->lock);
@@ -926,13 +1112,15 @@ static int mdp3_overlay_set(struct msm_fb_data_type *mfd,
stride = req->src.width * ppp_bpp(req->src.format);
format = mdp3_ctrl_get_source_format(req->src.format);
- mutex_lock(&mdp3_session->lock);
if (mdp3_session->overlay.id != req->id)
pr_err("overlay was not released, continue to recover\n");
-
- mdp3_session->overlay = *req;
+ /*
+ * A change in overlay structure will always come with
+ * MSMFB_NEW_REQUEST for MDP3
+ */
if (req->id == MSMFB_NEW_REQUEST) {
+ mutex_lock(&mdp3_session->lock);
if (dma->source_config.stride != stride ||
dma->source_config.format != format) {
dma->source_config.format = format;
@@ -941,11 +1129,11 @@ static int mdp3_overlay_set(struct msm_fb_data_type *mfd,
mdp3_ctrl_get_pack_pattern(req->src.format);
dma->update_src_cfg = true;
}
+ mdp3_session->overlay = *req;
mdp3_session->overlay.id = 1;
req->id = 1;
- }
-
mutex_unlock(&mdp3_session->lock);
+ }
return rc;
}
@@ -983,22 +1171,25 @@ static int mdp3_overlay_queue_buffer(struct msm_fb_data_type *mfd,
struct mdp3_img_data data;
struct mdp3_dma *dma = mdp3_session->dma;
- rc = mdp3_get_img(img, &data);
+ memset(&data, 0, sizeof(struct mdp3_img_data));
+ rc = mdp3_get_img(img, &data, MDP3_CLIENT_DMA_P);
if (rc) {
pr_err("fail to get overlay buffer\n");
return rc;
}
if (data.len < dma->source_config.stride * dma->source_config.height) {
- pr_err("buf length is smaller than required by dma configuration\n");
- mdp3_put_img(&data);
+ pr_err("buf size(0x%lx) is smaller than dma config(0x%x)\n",
+ data.len, (dma->source_config.stride *
+ dma->source_config.height));
+ mdp3_put_img(&data, MDP3_CLIENT_DMA_P);
return -EINVAL;
}
rc = mdp3_bufq_push(&mdp3_session->bufq_in, &data);
if (rc) {
pr_err("fail to queue the overlay buffer, buffer drop\n");
- mdp3_put_img(&data);
+ mdp3_put_img(&data, MDP3_CLIENT_DMA_P);
return rc;
}
return 0;
@@ -1039,10 +1230,11 @@ bool update_roi(struct mdp3_rect oldROI, struct mdp_rect newROI)
bool is_roi_valid(struct mdp3_dma_source source_config, struct mdp_rect roi)
{
- return ((roi.x >= source_config.x) &&
+ return (roi.w > 0) && (roi.h > 0) &&
+ (roi.x >= source_config.x) &&
((roi.x + roi.w) <= source_config.width) &&
(roi.y >= source_config.y) &&
- ((roi.y + roi.h) <= source_config.height));
+ ((roi.y + roi.h) <= source_config.height);
}
static int mdp3_ctrl_display_commit_kickoff(struct msm_fb_data_type *mfd,
@@ -1052,7 +1244,7 @@ static int mdp3_ctrl_display_commit_kickoff(struct msm_fb_data_type *mfd,
struct mdp3_img_data *data;
struct mdss_panel_info *panel_info;
int rc = 0;
- bool reset_done = false;
+ static bool splash_done;
struct mdss_panel_data *panel;
if (!mfd || !mfd->mdp.private1)
@@ -1067,6 +1259,7 @@ static int mdp3_ctrl_display_commit_kickoff(struct msm_fb_data_type *mfd,
pr_debug("no buffer in queue yet\n");
return -EPERM;
}
+
if (panel_info->partial_update_enabled &&
is_roi_valid(mdp3_session->dma->source_config, cmt_data->l_roi)
&& update_roi(mdp3_session->dma->roi, cmt_data->l_roi)) {
@@ -1075,17 +1268,23 @@ static int mdp3_ctrl_display_commit_kickoff(struct msm_fb_data_type *mfd,
mdp3_session->dma->roi.w = cmt_data->l_roi.w;
mdp3_session->dma->roi.h = cmt_data->l_roi.h;
mdp3_session->dma->update_src_cfg = true;
+ pr_debug("%s: ROI: x=%d y=%d w=%d h=%d\n", __func__,
+ mdp3_session->dma->roi.x,
+ mdp3_session->dma->roi.y,
+ mdp3_session->dma->roi.w,
+ mdp3_session->dma->roi.h);
}
panel = mdp3_session->panel;
- if (mdp3_session->in_splash_screen) {
- pr_debug("continuous splash screen, IOMMU not attached\n");
+ if (mdp3_session->in_splash_screen ||
+ mdp3_res->idle_pc) {
+ pr_debug("%s: reset- in_splash = %d, idle_pc = %d", __func__,
+ mdp3_session->in_splash_screen, mdp3_res->idle_pc);
rc = mdp3_ctrl_reset(mfd);
if (rc) {
pr_err("fail to reset display\n");
return -EINVAL;
}
- reset_done = true;
}
mutex_lock(&mdp3_session->lock);
@@ -1136,18 +1335,33 @@ static int mdp3_ctrl_display_commit_kickoff(struct msm_fb_data_type *mfd,
mdp3_release_splash_memory(mfd);
data = mdp3_bufq_pop(&mdp3_session->bufq_out);
if (data)
- mdp3_put_img(data);
+ mdp3_put_img(data, MDP3_CLIENT_DMA_P);
}
if (mdp3_session->first_commit) {
- /*wait for one frame time to ensure frame is sent to panel*/
- msleep(1000 / panel_info->mipi.frame_rate);
+ /*wait to ensure frame is sent to panel*/
+ if (panel_info->mipi.init_delay)
+ msleep(((1000 / panel_info->mipi.frame_rate) + 1) *
+ panel_info->mipi.init_delay);
+ else
+ msleep(1000 / panel_info->mipi.frame_rate);
mdp3_session->first_commit = false;
+ if (panel)
+ rc |= panel->event_handler(panel,
+ MDSS_EVENT_POST_PANEL_ON, NULL);
}
mdp3_session->vsync_before_commit = 0;
- if (reset_done && (panel && panel->set_backlight))
- panel->set_backlight(panel, panel->panel_info.bl_max);
+ if (!splash_done || mdp3_session->esd_recovery == true) {
+ if (panel && panel->set_backlight)
+ panel->set_backlight(panel, panel->panel_info.bl_max);
+ splash_done = true;
+ mdp3_session->esd_recovery = false;
+ }
+
+ /* start vsync tick countdown for cmd mode if vsync isn't enabled */
+ if (mfd->panel.type == MIPI_CMD_PANEL && !mdp3_session->vsync_enabled)
+ mdp3_ctrl_vsync_enable(mdp3_session->mfd, 0);
mutex_unlock(&mdp3_session->lock);
@@ -1204,8 +1418,10 @@ static void mdp3_ctrl_pan_display(struct msm_fb_data_type *mfd)
if (!mdp3_session || !mdp3_session->dma)
return;
- if (mdp3_session->in_splash_screen) {
- pr_debug("continuous splash screen, IOMMU not attached\n");
+ if (mdp3_session->in_splash_screen ||
+ mdp3_res->idle_pc) {
+ pr_debug("%s: reset- in_splash = %d, idle_pc = %d", __func__,
+ mdp3_session->in_splash_screen, mdp3_res->idle_pc);
rc = mdp3_ctrl_reset(mfd);
if (rc) {
pr_err("fail to reset display\n");
@@ -1250,7 +1466,7 @@ static void mdp3_ctrl_pan_display(struct msm_fb_data_type *mfd)
MDP_NOTIFY_FRAME_TIMEOUT);
} else {
if (mdp3_ctrl_get_intf_type(mfd) ==
- MDP3_DMA_OUTPUT_SEL_DSI_VIDEO) {
+ MDP3_DMA_OUTPUT_SEL_DSI_VIDEO) {
mdp3_ctrl_notify(mdp3_session,
MDP_NOTIFY_FRAME_DONE);
}
@@ -1265,17 +1481,26 @@ static void mdp3_ctrl_pan_display(struct msm_fb_data_type *mfd)
mdp3_clk_enable(0, 0);
}
+ panel = mdp3_session->panel;
if (mdp3_session->first_commit) {
- /*wait for one frame time to ensure frame is sent to panel*/
- msleep(1000 / panel_info->mipi.frame_rate);
+ /*wait to ensure frame is sent to panel*/
+ if (panel_info->mipi.init_delay)
+ msleep(((1000 / panel_info->mipi.frame_rate) + 1) *
+ panel_info->mipi.init_delay);
+ else
+ msleep(1000 / panel_info->mipi.frame_rate);
mdp3_session->first_commit = false;
+ if (panel)
+ panel->event_handler(panel, MDSS_EVENT_POST_PANEL_ON,
+ NULL);
}
mdp3_session->vsync_before_commit = 0;
- panel = mdp3_session->panel;
- if (!splash_done && (panel && panel->set_backlight)) {
- panel->set_backlight(panel, panel->panel_info.bl_max);
+ if (!splash_done || mdp3_session->esd_recovery == true) {
+ if (panel && panel->set_backlight)
+ panel->set_backlight(panel, panel->panel_info.bl_max);
splash_done = true;
+ mdp3_session->esd_recovery = false;
}
@@ -1319,7 +1544,7 @@ static int mdp3_get_metadata(struct msm_fb_data_type *mfd,
mfd->panel_info->mipi.frame_rate;
break;
case metadata_op_get_caps:
- metadata->data.caps.mdp_rev = 304;
+ metadata->data.caps.mdp_rev = 305;
metadata->data.caps.rgb_pipes = 0;
metadata->data.caps.vig_pipes = 0;
metadata->data.caps.dma_pipes = 1;
@@ -1388,10 +1613,17 @@ int mdp3_validate_scale_config(struct mdp_bl_scale_data *data)
int mdp3_validate_csc_data(struct mdp_csc_cfg_data *data)
{
int i;
+ bool mv_valid = false;
for (i = 0; i < 9; i++) {
if (data->csc_data.csc_mv[i] >=
MDP_HISTOGRAM_CSC_MATRIX_MAX)
return -EINVAL;
+ if ((!mv_valid) && (data->csc_data.csc_mv[i] != 0))
+ mv_valid = true;
+ }
+ if (!mv_valid) {
+ pr_err("%s: black screen data! csc_mv is all 0s\n", __func__);
+ return -EINVAL;
}
for (i = 0; i < 3; i++) {
if (data->csc_data.csc_pre_bv[i] >=
@@ -1418,6 +1650,12 @@ static int mdp3_histogram_start(struct mdp3_session_data *session,
int ret;
struct mdp3_dma_histogram_config histo_config;
+ mutex_lock(&session->lock);
+ if (!session->status) {
+ mutex_unlock(&session->lock);
+ return -EPERM;
+ }
+
pr_debug("mdp3_histogram_start\n");
ret = mdp3_validate_start_req(req);
@@ -1433,9 +1671,9 @@ static int mdp3_histogram_start(struct mdp3_session_data *session,
mutex_lock(&session->histo_lock);
if (session->histo_status) {
- pr_err("mdp3_histogram_start already started\n");
+ pr_info("mdp3_histogram_start already started\n");
mutex_unlock(&session->histo_lock);
- return -EBUSY;
+ return 0;
}
mdp3_res_update(1, 0, MDP3_CLIENT_DMA_P);
@@ -1466,6 +1704,7 @@ static int mdp3_histogram_start(struct mdp3_session_data *session,
histogram_start_err:
mdp3_res_update(0, 0, MDP3_CLIENT_DMA_P);
mutex_unlock(&session->histo_lock);
+ mutex_unlock(&session->lock);
return ret;
}
@@ -1483,6 +1722,7 @@ static int mdp3_histogram_stop(struct mdp3_session_data *session,
mutex_lock(&session->histo_lock);
if (!session->histo_status) {
+ pr_debug("mdp3_histogram_stop already stopped!");
ret = 0;
goto histogram_stop_err;
}
@@ -1512,21 +1752,21 @@ static int mdp3_histogram_collect(struct mdp3_session_data *session,
return -EINVAL;
}
- if (!session->clk_on) {
- pr_debug("mdp/dsi clock off currently\n");
- return -EPERM;
- }
-
mutex_lock(&session->histo_lock);
if (!session->histo_status) {
- pr_err("mdp3_histogram_collect not started\n");
+ pr_debug("mdp3_histogram_collect not started\n");
mutex_unlock(&session->histo_lock);
- return -EPERM;
+ return -EPROTO;
}
mutex_unlock(&session->histo_lock);
+ if (!session->clk_on) {
+ pr_debug("mdp/dsi clock off currently\n");
+ return -EPERM;
+ }
+
mdp3_clk_enable(1, 0);
ret = session->dma->get_histo(session->dma);
mdp3_clk_enable(0, 0);
@@ -1594,14 +1834,16 @@ static int mdp3_csc_config(struct mdp3_session_data *session,
return -EINVAL;
}
- session->cc_vect_sel = (session->cc_vect_sel + 1) % 2;
+ mutex_lock(&session->lock);
+ mutex_lock(&session->dma->pp_lock);
+ session->dma->cc_vect_sel = (session->dma->cc_vect_sel + 1) % 2;
config.ccs_enable = 1;
- config.ccs_sel = session->cc_vect_sel;
- config.pre_limit_sel = session->cc_vect_sel;
- config.post_limit_sel = session->cc_vect_sel;
- config.pre_bias_sel = session->cc_vect_sel;
- config.post_bias_sel = session->cc_vect_sel;
+ config.ccs_sel = session->dma->cc_vect_sel;
+ config.pre_limit_sel = session->dma->cc_vect_sel;
+ config.post_limit_sel = session->dma->cc_vect_sel;
+ config.pre_bias_sel = session->dma->cc_vect_sel;
+ config.post_bias_sel = session->dma->cc_vect_sel;
config.ccs_dirty = true;
ccs.mv = data->csc_data.csc_mv;
@@ -1610,10 +1852,13 @@ static int mdp3_csc_config(struct mdp3_session_data *session,
ccs.pre_lv = data->csc_data.csc_pre_lv;
ccs.post_lv = data->csc_data.csc_post_lv;
- mutex_lock(&session->lock);
+ /* cache one copy of setting for suspend/resume reconfiguring */
+ session->dma->ccs_cache = *data;
+
mdp3_clk_enable(1, 0);
ret = session->dma->config_ccs(session->dma, &config, &ccs);
mdp3_clk_enable(0, 0);
+ mutex_unlock(&session->dma->pp_lock);
mutex_unlock(&session->lock);
return ret;
}
@@ -1646,6 +1891,11 @@ static int mdp3_pp_ioctl(struct msm_fb_data_type *mfd,
&mdp_pp.data.bl_scale_data);
break;
case mdp_op_csc_cfg:
+ /* Checking state of dyn_pu before programming CSC block */
+ if (mdp3_session->dyn_pu_state) {
+ pr_debug("Partial update feature is enabled.\n");
+ return -EPERM;
+ }
ret = mdp3_validate_csc_data(&(mdp_pp.data.csc_cfg_data));
if (ret) {
pr_err("%s: invalid csc data\n", __func__);
@@ -1664,10 +1914,12 @@ static int mdp3_pp_ioctl(struct msm_fb_data_type *mfd,
ret = mdp3_ctrl_lut_read(mfd,
&(lut->data.rgb_lut_data));
else
- ret = mdp3_ctrl_lut_update(mfd,
+ ret = mdp3_ctrl_lut_config(mfd,
&(lut->data.rgb_lut_data));
if (ret)
- pr_err("%s: invalid rgb lut data\n", __func__);
+ pr_err("RGB LUT ioctl failed\n");
+ else
+ ret = copy_to_user(argp, &mdp_pp, sizeof(mdp_pp));
break;
default:
@@ -1746,6 +1998,23 @@ static int mdp3_validate_lut_data(struct fb_cmap *cmap)
return 0;
}
+static inline int mdp3_copy_lut_buffer(struct fb_cmap *dst, struct fb_cmap *src)
+{
+ if (!dst || !src || !dst->red || !dst->blue || !dst->green ||
+ !src->red || !src->green || !src->blue) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ dst->start = src->start;
+ dst->len = src->len;
+
+ memcpy(dst->red, src->red, MDP_LUT_SIZE * sizeof(u16));
+ memcpy(dst->green, src->green, MDP_LUT_SIZE * sizeof(u16));
+ memcpy(dst->blue, src->blue, MDP_LUT_SIZE * sizeof(u16));
+ return 0;
+}
+
static int mdp3_alloc_lut_buffer(struct platform_device *pdev, void **cmap)
{
struct fb_cmap *map;
@@ -1809,7 +2078,7 @@ static void mdp3_free_lut_buffer(struct platform_device *pdev, void **cmap)
map = NULL;
}
-static void mdp3_lut_combine_gain(struct fb_cmap *cmap, struct mdp3_dma *dma)
+static int mdp3_lut_combine_gain(struct fb_cmap *cmap, struct mdp3_dma *dma)
{
int i = 0;
u32 r = 0, g = 0, b = 0;
@@ -1819,7 +2088,7 @@ static void mdp3_lut_combine_gain(struct fb_cmap *cmap, struct mdp3_dma *dma)
!dma->gc_cmap->blue || !dma->hist_cmap->red ||
!dma->hist_cmap->green || !dma->hist_cmap->blue) {
pr_err("Invalid params\n");
- return;
+ return -EINVAL;
}
for (i = 1; i < MDP_LUT_SIZE; i++) {
@@ -1834,17 +2103,17 @@ static void mdp3_lut_combine_gain(struct fb_cmap *cmap, struct mdp3_dma *dma)
cmap->green[i] = (g >> 16) & 0xFF;
cmap->blue[i] = (b >> 16) & 0xFF;
}
+ return 0;
}
+/* Called from within pp_lock and session lock locked context */
static int mdp3_ctrl_lut_update(struct msm_fb_data_type *mfd,
- struct mdp_rgb_lut_data *cfg)
+ struct fb_cmap *cmap)
{
int rc = 0;
- bool data_validated = false;
struct mdp3_session_data *mdp3_session = mfd->mdp.private1;
struct mdp3_dma *dma;
struct mdp3_dma_lut_config lut_config;
- struct fb_cmap *cmap;
dma = mdp3_session->dma;
@@ -1853,6 +2122,37 @@ static int mdp3_ctrl_lut_update(struct msm_fb_data_type *mfd,
return -EINVAL;
}
+ lut_config.lut_enable = 7;
+ lut_config.lut_sel = mdp3_session->lut_sel;
+ lut_config.lut_position = 1;
+ lut_config.lut_dirty = true;
+
+ if (!mdp3_session->status) {
+ pr_err("display off!\n");
+ return -EPERM;
+ }
+
+ mdp3_clk_enable(1, 0);
+ rc = dma->config_lut(dma, &lut_config, cmap);
+ mdp3_clk_enable(0, 0);
+ if (rc)
+ pr_err("mdp3_ctrl_lut_update failed\n");
+
+ mdp3_session->lut_sel = (mdp3_session->lut_sel + 1) % 2;
+ return rc;
+}
+
+static int mdp3_ctrl_lut_config(struct msm_fb_data_type *mfd,
+ struct mdp_rgb_lut_data *cfg)
+{
+ int rc = 0;
+ bool data_validated = false;
+ struct mdp3_session_data *mdp3_session = mfd->mdp.private1;
+ struct mdp3_dma *dma;
+ struct fb_cmap *cmap;
+
+ dma = mdp3_session->dma;
+
if (cfg->cmap.start + cfg->cmap.len > MDP_LUT_SIZE) {
pr_err("Invalid arguments\n");
return -EINVAL;
@@ -1864,7 +2164,8 @@ static int mdp3_ctrl_lut_update(struct msm_fb_data_type *mfd,
return -ENOMEM;
}
- mutex_lock(&mdp3_session->pp_lock);
+ mutex_lock(&mdp3_session->lock);
+ mutex_lock(&dma->pp_lock);
rc = copy_from_user(cmap->red + cfg->cmap.start,
cfg->cmap.red, sizeof(u16) * cfg->cmap.len);
rc |= copy_from_user(cmap->green + cfg->cmap.start,
@@ -1918,8 +2219,11 @@ static int mdp3_ctrl_lut_update(struct msm_fb_data_type *mfd,
goto exit_err;
}
}
- memcpy(dma->gc_cmap, cmap,
- sizeof(struct fb_cmap));
+ rc = mdp3_copy_lut_buffer(dma->gc_cmap, cmap);
+ if (rc) {
+ pr_err("Could not store GC to cache\n");
+ goto exit_err;
+ }
}
break;
case mdp_rgb_lut_hist:
@@ -1963,8 +2267,11 @@ static int mdp3_ctrl_lut_update(struct msm_fb_data_type *mfd,
goto exit_err;
}
}
- memcpy(dma->hist_cmap, cmap,
- sizeof(struct fb_cmap));
+ rc = mdp3_copy_lut_buffer(dma->hist_cmap, cmap);
+ if (rc) {
+ pr_err("Could not cache Hist LUT\n");
+ goto exit_err;
+ }
}
break;
default:
@@ -1978,34 +2285,21 @@ static int mdp3_ctrl_lut_update(struct msm_fb_data_type *mfd,
* of each the individual LUTs need to be applied onto a single LUT
* and applied in HW
*/
- if (dma->lut_sts & MDP3_LUT_HIST_GC_EN)
- mdp3_lut_combine_gain(cmap, dma);
-
- lut_config.lut_enable = 7;
- lut_config.lut_sel = mdp3_session->lut_sel;
- lut_config.lut_position = 0;
- lut_config.lut_dirty = true;
-
- mutex_lock(&mdp3_session->lock);
-
- if (!mdp3_session->status) {
- pr_err("display off!\n");
- mutex_unlock(&mdp3_session->lock);
- rc = -EPERM;
+ if ((dma->lut_sts & MDP3_LUT_HIST_EN) &&
+ (dma->lut_sts & MDP3_LUT_GC_EN)) {
+ rc = mdp3_lut_combine_gain(cmap, dma);
+ if (rc) {
+ pr_err("Combining gains failed rc = %d\n", rc);
goto exit_err;
}
+ }
- mdp3_clk_enable(1, 0);
- rc = dma->config_lut(dma, &lut_config, cmap);
- mdp3_clk_enable(0, 0);
+ rc = mdp3_ctrl_lut_update(mfd, cmap);
if (rc)
- pr_err("mdp3_ctrl_lut_update failed\n");
-
- mdp3_session->lut_sel = (mdp3_session->lut_sel + 1) % 2;
-
- mutex_unlock(&mdp3_session->lock);
+ pr_err("Updating LUT failed! rc = %d\n", rc);
exit_err:
- mutex_unlock(&mdp3_session->pp_lock);
+ mutex_unlock(&dma->pp_lock);
+ mutex_unlock(&mdp3_session->lock);
mdp3_free_lut_buffer(mfd->pdev, (void **) &cmap);
return rc;
}
@@ -2041,17 +2335,77 @@ static int mdp3_ctrl_lut_read(struct msm_fb_data_type *mfd,
cfg->cmap.start = cmap->start;
cfg->cmap.len = cmap->len;
- mutex_lock(&mdp3_session->pp_lock);
+ mutex_lock(&dma->pp_lock);
rc = copy_to_user(cfg->cmap.red, cmap->red, sizeof(u16) *
MDP_LUT_SIZE);
rc |= copy_to_user(cfg->cmap.green, cmap->green, sizeof(u16) *
MDP_LUT_SIZE);
rc |= copy_to_user(cfg->cmap.blue, cmap->blue, sizeof(u16) *
MDP_LUT_SIZE);
- mutex_unlock(&mdp3_session->pp_lock);
+ mutex_unlock(&dma->pp_lock);
return rc;
}
+/* Invoked from ctrl_on with session lock locked context */
+static void mdp3_ctrl_pp_resume(struct msm_fb_data_type *mfd)
+{
+ struct mdp3_session_data *mdp3_session;
+ struct mdp3_dma *dma;
+ struct fb_cmap *cmap;
+ int rc = 0;
+
+ mdp3_session = mfd->mdp.private1;
+ dma = mdp3_session->dma;
+
+ mutex_lock(&dma->pp_lock);
+ /*
+ * if dma->ccs_config.ccs_enable is set then DMA PP block was enabled
+ * via user space IOCTL.
+ * Then set dma->ccs_config.ccs_dirty flag
+ * Then PP block will be reconfigured when next kickoff comes.
+ */
+ if (dma->ccs_config.ccs_enable)
+ dma->ccs_config.ccs_dirty = true;
+
+ /*
+ * If gamma correction was enabled then we program the LUT registers
+ * with the last configuration data before suspend. If gamma correction
+ * is not enabled then we do not program anything. The LUT from
+ * histogram processing algorithms will program hardware based on new
+ * frame data if they are enabled.
+ */
+ if (dma->lut_sts & MDP3_LUT_GC_EN) {
+
+ rc = mdp3_alloc_lut_buffer(mfd->pdev, (void **)&cmap);
+ if (rc) {
+ pr_err("No memory for GC LUT, rc = %d\n", rc);
+ goto exit_err;
+ }
+
+ if (dma->lut_sts & MDP3_LUT_HIST_EN) {
+ rc = mdp3_lut_combine_gain(cmap, dma);
+ if (rc) {
+ pr_err("Combining the gain failed rc=%d\n", rc);
+ goto exit_err;
+ }
+ } else {
+ rc = mdp3_copy_lut_buffer(cmap, dma->gc_cmap);
+ if (rc) {
+ pr_err("Updating GC failed rc = %d\n", rc);
+ goto exit_err;
+ }
+ }
+
+ rc = mdp3_ctrl_lut_update(mfd, cmap);
+ if (rc)
+ pr_err("GC Lut update failed rc=%d\n", rc);
+exit_err:
+ mdp3_free_lut_buffer(mfd->pdev, (void **)&cmap);
+ }
+
+ mutex_unlock(&dma->pp_lock);
+}
+
static int mdp3_overlay_prepare(struct msm_fb_data_type *mfd,
struct mdp_overlay_list __user *user_ovlist)
{
@@ -2111,7 +2465,7 @@ static int mdp3_ctrl_ioctl_handler(struct msm_fb_data_type *mfd,
req = &mdp3_session->req_overlay;
if (!mdp3_session->status && cmd != MSMFB_METADATA_GET &&
- cmd != MSMFB_HISTOGRAM_STOP) {
+ cmd != MSMFB_HISTOGRAM_STOP && cmd != MSMFB_HISTOGRAM) {
pr_err("mdp3_ctrl_ioctl_handler, display off!\n");
return -EPERM;
}
@@ -2139,9 +2493,17 @@ static int mdp3_ctrl_ioctl_handler(struct msm_fb_data_type *mfd,
}
break;
case MSMFB_ASYNC_BLIT:
+ if (mdp3_session->in_splash_screen || mdp3_res->idle_pc) {
+ pr_err("%s: reset- in_splash = %d, idle_pc = %d",
+ __func__, mdp3_session->in_splash_screen,
+ mdp3_res->idle_pc);
+ mdp3_ctrl_reset(mfd);
+ }
rc = mdp3_ctrl_async_blit_req(mfd, argp);
break;
case MSMFB_BLIT:
+ if (mdp3_session->in_splash_screen)
+ mdp3_ctrl_reset(mfd);
rc = mdp3_ctrl_blit_req(mfd, argp);
break;
case MSMFB_METADATA_GET:
@@ -2296,7 +2658,6 @@ int mdp3_ctrl_init(struct msm_fb_data_type *mfd)
INIT_WORK(&mdp3_session->dma_done_work, mdp3_dispatch_dma_done);
atomic_set(&mdp3_session->vsync_countdown, 0);
mutex_init(&mdp3_session->histo_lock);
- mutex_init(&mdp3_session->pp_lock);
mdp3_session->dma = mdp3_get_dma_pipe(MDP3_DMA_CAP_ALL);
if (!mdp3_session->dma) {
rc = -ENODEV;
@@ -2346,6 +2707,11 @@ int mdp3_ctrl_init(struct msm_fb_data_type *mfd)
pr_err("vsync sysfs group creation failed, ret=%d\n", rc);
goto init_done;
}
+ rc = sysfs_create_group(&dev->kobj, &generic_attr_group);
+ if (rc) {
+ pr_err("generic sysfs group creation failed, ret=%d\n", rc);
+ goto init_done;
+ }
mdp3_session->vsync_event_sd = sysfs_get_dirent(dev->kobj.sd,
"vsync_event");
@@ -2359,6 +2725,10 @@ int mdp3_ctrl_init(struct msm_fb_data_type *mfd)
if (rc)
pr_warn("problem creating link to mdp sysfs\n");
+ /* Enable PM runtime */
+ pm_runtime_set_suspended(&mdp3_res->pdev->dev);
+ pm_runtime_enable(&mdp3_res->pdev->dev);
+
kobject_uevent(&dev->kobj, KOBJ_ADD);
pr_debug("vsync kobject_uevent(KOBJ_ADD)\n");
@@ -2369,12 +2739,19 @@ int mdp3_ctrl_init(struct msm_fb_data_type *mfd)
&mdp3_session->mfd->mdp_sync_pt_data.notifier);
}
+ /*
+ * Increment the overlay active count.
+ * This is needed to ensure that if idle power collapse kicks in
+ * right away, it would be handled correctly.
+ */
+ atomic_inc(&mdp3_res->active_intf_cnt);
if (splash_mismatch) {
pr_err("splash memory mismatch, stop splash\n");
mdp3_ctrl_off(mfd);
}
mdp3_session->vsync_before_commit = true;
+ mdp3_session->dyn_pu_state = mfd->panel_info->partial_update_enabled;
init_done:
if (IS_ERR_VALUE(rc))
kfree(mdp3_session);
diff --git a/drivers/video/fbdev/msm/mdp3_ctrl.h b/drivers/video/fbdev/msm/mdp3_ctrl.h
index 420907eb1525..0853ed5e7a43 100644
--- a/drivers/video/fbdev/msm/mdp3_ctrl.h
+++ b/drivers/video/fbdev/msm/mdp3_ctrl.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2014, 2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -27,7 +27,6 @@
#define MDP3_MAX_BUF_QUEUE 8
#define MDP3_LUT_HIST_EN 0x001
#define MDP3_LUT_GC_EN 0x002
-#define MDP3_LUT_HIST_GC_EN (MDP3_LUT_HIST_EN | MDP3_LUT_GC_EN)
struct mdp3_buffer_queue {
struct mdp3_img_data img_data[MDP3_MAX_BUF_QUEUE];
@@ -56,9 +55,7 @@ struct mdp3_session_data {
atomic_t dma_done_cnt;
int histo_status;
struct mutex histo_lock;
- struct mutex pp_lock;
int lut_sel;
- int cc_vect_sel;
bool vsync_before_commit;
bool first_commit;
int clk_on;
@@ -67,6 +64,8 @@ struct mdp3_session_data {
int vsync_enabled;
atomic_t vsync_countdown; /* Used to count down */
bool in_splash_screen;
+ bool esd_recovery;
+ int dyn_pu_state; /* dynamic partial update status */
bool dma_active;
struct completion dma_completion;
diff --git a/drivers/video/fbdev/msm/mdp3_dma.c b/drivers/video/fbdev/msm/mdp3_dma.c
index 8bac9d84edde..d4c83d6e33f0 100644
--- a/drivers/video/fbdev/msm/mdp3_dma.c
+++ b/drivers/video/fbdev/msm/mdp3_dma.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2014, 2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -16,6 +16,7 @@
#include "mdp3.h"
#include "mdp3_dma.h"
#include "mdp3_hwio.h"
+#include "mdss_debug.h"
#define DMA_STOP_POLL_SLEEP_US 1000
#define DMA_STOP_POLL_TIMEOUT_US 200000
@@ -252,26 +253,6 @@ static void mdp3_dma_done_notifier(struct mdp3_dma *dma,
spin_unlock_irqrestore(&dma->dma_lock, flag);
}
-static void mdp3_dma_clk_auto_gating(struct mdp3_dma *dma, int enable)
-{
- u32 cgc;
- int clock_bit = 10;
-
- clock_bit += dma->dma_sel;
-
- if (enable) {
- cgc = MDP3_REG_READ(MDP3_REG_CGC_EN);
- cgc |= BIT(clock_bit);
- MDP3_REG_WRITE(MDP3_REG_CGC_EN, cgc);
-
- } else {
- cgc = MDP3_REG_READ(MDP3_REG_CGC_EN);
- cgc &= ~BIT(clock_bit);
- MDP3_REG_WRITE(MDP3_REG_CGC_EN, cgc);
- }
-}
-
-
int mdp3_dma_sync_config(struct mdp3_dma *dma,
struct mdp3_dma_source *source_config, struct mdp3_tear_check *te)
{
@@ -318,7 +299,8 @@ int mdp3_dma_sync_config(struct mdp3_dma *dma,
static int mdp3_dmap_config(struct mdp3_dma *dma,
struct mdp3_dma_source *source_config,
- struct mdp3_dma_output_config *output_config)
+ struct mdp3_dma_output_config *output_config,
+ bool splash_screen_active)
{
u32 dma_p_cfg_reg, dma_p_size, dma_p_out_xy;
@@ -334,22 +316,23 @@ static int mdp3_dmap_config(struct mdp3_dma *dma,
dma_p_size = source_config->width | (source_config->height << 16);
dma_p_out_xy = source_config->x | (source_config->y << 16);
-
- MDP3_REG_WRITE(MDP3_REG_DMA_P_CONFIG, dma_p_cfg_reg);
- MDP3_REG_WRITE(MDP3_REG_DMA_P_SIZE, dma_p_size);
- MDP3_REG_WRITE(MDP3_REG_DMA_P_IBUF_ADDR, (u32)source_config->buf);
- MDP3_REG_WRITE(MDP3_REG_DMA_P_IBUF_Y_STRIDE, source_config->stride);
- MDP3_REG_WRITE(MDP3_REG_DMA_P_OUT_XY, dma_p_out_xy);
-
- MDP3_REG_WRITE(MDP3_REG_DMA_P_FETCH_CFG, 0x40);
+ if (!splash_screen_active) {
+ MDP3_REG_WRITE(MDP3_REG_DMA_P_CONFIG, dma_p_cfg_reg);
+ MDP3_REG_WRITE(MDP3_REG_DMA_P_SIZE, dma_p_size);
+ MDP3_REG_WRITE(MDP3_REG_DMA_P_IBUF_ADDR,
+ (u32)source_config->buf);
+ MDP3_REG_WRITE(MDP3_REG_DMA_P_IBUF_Y_STRIDE,
+ source_config->stride);
+ MDP3_REG_WRITE(MDP3_REG_DMA_P_OUT_XY, dma_p_out_xy);
+ MDP3_REG_WRITE(MDP3_REG_DMA_P_FETCH_CFG, 0x40);
+ }
dma->source_config = *source_config;
dma->output_config = *output_config;
- dma->roi.w = dma->source_config.width;
- dma->roi.h = dma->source_config.height;
- dma->roi.x = dma->source_config.x;
- dma->roi.y = dma->source_config.y;
- mdp3_irq_enable(MDP3_INTR_LCDC_UNDERFLOW);
+
+ if (dma->output_config.out_sel != MDP3_DMA_OUTPUT_SEL_DSI_CMD)
+ mdp3_irq_enable(MDP3_INTR_LCDC_UNDERFLOW);
+
mdp3_dma_callback_setup(dma);
return 0;
}
@@ -374,7 +357,8 @@ static void mdp3_dmap_config_source(struct mdp3_dma *dma)
static int mdp3_dmas_config(struct mdp3_dma *dma,
struct mdp3_dma_source *source_config,
- struct mdp3_dma_output_config *output_config)
+ struct mdp3_dma_output_config *output_config,
+ bool splash_screen_active)
{
u32 dma_s_cfg_reg, dma_s_size, dma_s_out_xy;
@@ -391,14 +375,16 @@ static int mdp3_dmas_config(struct mdp3_dma *dma,
dma_s_size = source_config->width | (source_config->height << 16);
dma_s_out_xy = source_config->x | (source_config->y << 16);
- MDP3_REG_WRITE(MDP3_REG_DMA_S_CONFIG, dma_s_cfg_reg);
- MDP3_REG_WRITE(MDP3_REG_DMA_S_SIZE, dma_s_size);
- MDP3_REG_WRITE(MDP3_REG_DMA_S_IBUF_ADDR, (u32)source_config->buf);
- MDP3_REG_WRITE(MDP3_REG_DMA_S_IBUF_Y_STRIDE, source_config->stride);
- MDP3_REG_WRITE(MDP3_REG_DMA_S_OUT_XY, dma_s_out_xy);
-
- MDP3_REG_WRITE(MDP3_REG_SECONDARY_RD_PTR_IRQ, 0x10);
-
+ if (!splash_screen_active) {
+ MDP3_REG_WRITE(MDP3_REG_DMA_S_CONFIG, dma_s_cfg_reg);
+ MDP3_REG_WRITE(MDP3_REG_DMA_S_SIZE, dma_s_size);
+ MDP3_REG_WRITE(MDP3_REG_DMA_S_IBUF_ADDR,
+ (u32)source_config->buf);
+ MDP3_REG_WRITE(MDP3_REG_DMA_S_IBUF_Y_STRIDE,
+ source_config->stride);
+ MDP3_REG_WRITE(MDP3_REG_DMA_S_OUT_XY, dma_s_out_xy);
+ MDP3_REG_WRITE(MDP3_REG_SECONDARY_RD_PTR_IRQ, 0x10);
+ }
dma->source_config = *source_config;
dma->output_config = *output_config;
@@ -452,46 +438,7 @@ static int mdp3_dmap_cursor_config(struct mdp3_dma *dma,
return 0;
}
-static void mdp3_ccs_update(struct mdp3_dma *dma)
-{
- u32 cc_config;
- int updated = 0;
-
- cc_config = MDP3_REG_READ(MDP3_REG_DMA_P_COLOR_CORRECT_CONFIG);
-
- if (dma->ccs_config.ccs_dirty) {
- cc_config &= DMA_CCS_CONFIG_MASK;
- if (dma->ccs_config.ccs_enable)
- cc_config |= BIT(3);
- else
- cc_config &= ~BIT(3);
- cc_config |= dma->ccs_config.ccs_sel << 5;
- cc_config |= dma->ccs_config.pre_bias_sel << 6;
- cc_config |= dma->ccs_config.post_bias_sel << 7;
- cc_config |= dma->ccs_config.pre_limit_sel << 8;
- cc_config |= dma->ccs_config.post_limit_sel << 9;
- dma->ccs_config.ccs_dirty = false;
- updated = 1;
- }
-
- if (dma->lut_config.lut_dirty) {
- cc_config &= DMA_LUT_CONFIG_MASK;
- cc_config |= dma->lut_config.lut_enable;
- cc_config |= dma->lut_config.lut_position << 4;
- cc_config |= dma->lut_config.lut_sel << 10;
- dma->lut_config.lut_dirty = false;
- updated = 1;
- }
- if (updated) {
- MDP3_REG_WRITE(MDP3_REG_DMA_P_COLOR_CORRECT_CONFIG, cc_config);
-
- /* Make sure ccs configuration update is done before continuing
- with the DMA transfer */
- wmb();
- }
-}
-
-static int mdp3_dmap_ccs_config(struct mdp3_dma *dma,
+static int mdp3_dmap_ccs_config_internal(struct mdp3_dma *dma,
struct mdp3_dma_color_correct_config *config,
struct mdp3_dma_ccs *ccs)
{
@@ -542,10 +489,77 @@ static int mdp3_dmap_ccs_config(struct mdp3_dma *dma,
addr += 4;
}
}
+ return 0;
+}
+
+static void mdp3_ccs_update(struct mdp3_dma *dma, bool from_kickoff)
+{
+ u32 cc_config;
+ bool ccs_updated = false, lut_updated = false;
+ struct mdp3_dma_ccs ccs;
+
+ cc_config = MDP3_REG_READ(MDP3_REG_DMA_P_COLOR_CORRECT_CONFIG);
+
+ if (dma->ccs_config.ccs_dirty) {
+ cc_config &= DMA_CCS_CONFIG_MASK;
+ if (dma->ccs_config.ccs_enable)
+ cc_config |= BIT(3);
+ else
+ cc_config &= ~BIT(3);
+ cc_config |= dma->ccs_config.ccs_sel << 5;
+ cc_config |= dma->ccs_config.pre_bias_sel << 6;
+ cc_config |= dma->ccs_config.post_bias_sel << 7;
+ cc_config |= dma->ccs_config.pre_limit_sel << 8;
+ cc_config |= dma->ccs_config.post_limit_sel << 9;
+ /*
+ * CCS dirty flag should be reset when call is made from frame
+ * kickoff, or else upon resume the flag would be dirty and LUT
+ * config could call this function thereby causing no register
+ * programming for CCS, which will cause screen to go dark
+ */
+ if (from_kickoff)
+ dma->ccs_config.ccs_dirty = false;
+ ccs_updated = true;
+ }
+
+ if (dma->lut_config.lut_dirty) {
+ cc_config &= DMA_LUT_CONFIG_MASK;
+ cc_config |= dma->lut_config.lut_enable;
+ cc_config |= dma->lut_config.lut_position << 4;
+ cc_config |= dma->lut_config.lut_sel << 10;
+ dma->lut_config.lut_dirty = false;
+ lut_updated = true;
+ }
+
+ if (ccs_updated && from_kickoff) {
+ ccs.mv = dma->ccs_cache.csc_data.csc_mv;
+ ccs.pre_bv = dma->ccs_cache.csc_data.csc_pre_bv;
+ ccs.post_bv = dma->ccs_cache.csc_data.csc_post_bv;
+ ccs.pre_lv = dma->ccs_cache.csc_data.csc_pre_lv;
+ ccs.post_lv = dma->ccs_cache.csc_data.csc_post_lv;
+ mdp3_dmap_ccs_config_internal(dma, &dma->ccs_config, &ccs);
+ }
+
+ if (lut_updated || ccs_updated) {
+ MDP3_REG_WRITE(MDP3_REG_DMA_P_COLOR_CORRECT_CONFIG, cc_config);
+ /*
+ * Make sure ccs configuration update is done before continuing
+ * with the DMA transfer
+ */
+ wmb();
+ }
+}
+
+static int mdp3_dmap_ccs_config(struct mdp3_dma *dma,
+ struct mdp3_dma_color_correct_config *config,
+ struct mdp3_dma_ccs *ccs)
+{
+ mdp3_dmap_ccs_config_internal(dma, config, ccs);
+
dma->ccs_config = *config;
if (dma->output_config.out_sel != MDP3_DMA_OUTPUT_SEL_DSI_CMD)
- mdp3_ccs_update(dma);
+ mdp3_ccs_update(dma, false);
return 0;
}
@@ -574,7 +588,7 @@ static int mdp3_dmap_lut_config(struct mdp3_dma *dma,
dma->lut_config = *config;
if (dma->output_config.out_sel != MDP3_DMA_OUTPUT_SEL_DSI_CMD)
- mdp3_ccs_update(dma);
+ mdp3_ccs_update(dma, false);
return 0;
}
@@ -633,45 +647,64 @@ static int mdp3_dmap_update(struct mdp3_dma *dma, void *buf,
int cb_type = MDP3_DMA_CALLBACK_TYPE_VSYNC;
struct mdss_panel_data *panel;
int rc = 0;
+ int retry_count = 2;
+ ATRACE_BEGIN(__func__);
pr_debug("mdp3_dmap_update\n");
if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_CMD) {
cb_type = MDP3_DMA_CALLBACK_TYPE_DMA_DONE;
if (intf->active) {
+ ATRACE_BEGIN("mdp3_wait_for_dma_comp");
+retry_dma_done:
rc = wait_for_completion_timeout(&dma->dma_comp,
KOFF_TIMEOUT);
- if (rc <= 0) {
- WARN(1, "cmd kickoff timed out (%d)\n", rc);
+ if (rc <= 0 && --retry_count) {
+ int vsync_status;
+
+ vsync_status = (1 << MDP3_INTR_DMA_P_DONE) &
+ MDP3_REG_READ(MDP3_REG_INTR_STATUS);
+ if (!vsync_status) {
+ pr_err("%s: cmd timeout retry cnt %d\n",
+ __func__, retry_count);
+ goto retry_dma_done;
+ }
rc = -1;
}
+ ATRACE_END("mdp3_wait_for_dma_comp");
}
}
if (dma->update_src_cfg) {
if (dma->output_config.out_sel ==
- MDP3_DMA_OUTPUT_SEL_DSI_VIDEO && intf->active)
- pr_err("configuring dma source while dma is active\n");
+ MDP3_DMA_OUTPUT_SEL_DSI_VIDEO && intf->active)
+ pr_err("configuring dma source while it is active\n");
dma->dma_config_source(dma);
if (data) {
panel = (struct mdss_panel_data *)data;
- if (panel->event_handler)
+ if (panel->event_handler) {
panel->event_handler(panel,
MDSS_EVENT_ENABLE_PARTIAL_ROI, NULL);
+ panel->event_handler(panel,
+ MDSS_EVENT_DSI_STREAM_SIZE, NULL);
+ }
}
dma->update_src_cfg = false;
}
+ mutex_lock(&dma->pp_lock);
+ if (dma->ccs_config.ccs_dirty)
+ mdp3_ccs_update(dma, true);
+ mutex_unlock(&dma->pp_lock);
spin_lock_irqsave(&dma->dma_lock, flag);
MDP3_REG_WRITE(MDP3_REG_DMA_P_IBUF_ADDR, (u32)(buf +
dma->roi.y * dma->source_config.stride +
dma->roi.x * dma_bpp(dma->source_config.format)));
dma->source_config.buf = (int)buf;
if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_CMD) {
- mdp3_ccs_update(dma);
MDP3_REG_WRITE(MDP3_REG_DMA_P_START, 1);
}
if (!intf->active) {
- pr_debug("mdp3_dmap_update start interface\n");
+ pr_debug("%s start interface\n", __func__);
intf->start(intf);
}
@@ -682,14 +715,28 @@ static int mdp3_dmap_update(struct mdp3_dma *dma, void *buf,
spin_unlock_irqrestore(&dma->dma_lock, flag);
mdp3_dma_callback_enable(dma, cb_type);
- pr_debug("mdp3_dmap_update wait for vsync_comp in\n");
+ pr_debug("%s wait for vsync_comp\n", __func__);
if (dma->output_config.out_sel == MDP3_DMA_OUTPUT_SEL_DSI_VIDEO) {
+ ATRACE_BEGIN("mdp3_wait_for_vsync_comp");
+retry_vsync:
rc = wait_for_completion_timeout(&dma->vsync_comp,
KOFF_TIMEOUT);
- if (rc <= 0)
+ pr_err("%s VID DMA Buff Addr %p\n", __func__, buf);
+ if (rc <= 0 && --retry_count) {
+ int vsync = MDP3_REG_READ(MDP3_REG_INTR_STATUS) &
+ (1 << MDP3_INTR_LCDC_START_OF_FRAME);
+
+ if (!vsync) {
+ pr_err("%s trying again count = %d\n",
+ __func__, retry_count);
+ goto retry_vsync;
+ }
rc = -1;
+ }
+ ATRACE_END("mdp3_wait_for_vsync_comp");
}
- pr_debug("mdp3_dmap_update wait for vsync_comp out\n");
+ pr_debug("$%s wait for vsync_comp out\n", __func__);
+ ATRACE_END(__func__);
return rc;
}
@@ -834,7 +881,6 @@ static int mdp3_dmap_histo_reset(struct mdp3_dma *dma)
init_completion(&dma->histo_comp);
- mdp3_dma_clk_auto_gating(dma, 0);
MDP3_REG_WRITE(MDP3_REG_DMA_P_HIST_INTR_ENABLE, BIT(0)|BIT(1));
MDP3_REG_WRITE(MDP3_REG_DMA_P_HIST_RESET_SEQ_START, 1);
@@ -856,7 +902,6 @@ static int mdp3_dmap_histo_reset(struct mdp3_dma *dma)
ret = 0;
}
mdp3_dma_callback_disable(dma, MDP3_DMA_CALLBACK_TYPE_HIST_RESET_DONE);
- mdp3_dma_clk_auto_gating(dma, 1);
return ret;
}
@@ -902,6 +947,38 @@ static int mdp3_dmap_histo_op(struct mdp3_dma *dma, u32 op)
return ret;
}
+bool mdp3_dmap_busy(void)
+{
+ u32 val;
+
+ val = MDP3_REG_READ(MDP3_REG_DISPLAY_STATUS);
+ pr_err("%s DMAP Status %s\n", __func__,
+ (val & MDP3_DMA_P_BUSY_BIT) ? "BUSY":"IDLE");
+ return val & MDP3_DMA_P_BUSY_BIT;
+}
+
+/*
+ * During underrun DMA_P registers are reset. Reprogramming CSC to prevent
+ * black screen
+ */
+static void mdp3_dmap_underrun_worker(struct work_struct *work)
+{
+ struct mdp3_dma *dma;
+
+ dma = container_of(work, struct mdp3_dma, underrun_work);
+ mutex_lock(&dma->pp_lock);
+ if (dma->ccs_config.ccs_enable && dma->ccs_config.ccs_dirty) {
+ dma->cc_vect_sel = (dma->cc_vect_sel + 1) % 2;
+ dma->ccs_config.ccs_sel = dma->cc_vect_sel;
+ dma->ccs_config.pre_limit_sel = dma->cc_vect_sel;
+ dma->ccs_config.post_limit_sel = dma->cc_vect_sel;
+ dma->ccs_config.pre_bias_sel = dma->cc_vect_sel;
+ dma->ccs_config.post_bias_sel = dma->cc_vect_sel;
+ mdp3_ccs_update(dma, true);
+ }
+ mutex_unlock(&dma->pp_lock);
+}
+
static int mdp3_dma_start(struct mdp3_dma *dma, struct mdp3_intf *intf)
{
unsigned long flag;
@@ -995,6 +1072,8 @@ int mdp3_dma_init(struct mdp3_dma *dma)
dma->dma_done_notifier = mdp3_dma_done_notifier;
dma->start = mdp3_dma_start;
dma->stop = mdp3_dma_stop;
+ dma->busy = mdp3_dmap_busy;
+ INIT_WORK(&dma->underrun_work, mdp3_dmap_underrun_worker);
break;
case MDP3_DMA_S:
dma->dma_config = mdp3_dmas_config;
diff --git a/drivers/video/fbdev/msm/mdp3_dma.h b/drivers/video/fbdev/msm/mdp3_dma.h
index 60c9e1dfb67f..fb719f6ba62e 100644
--- a/drivers/video/fbdev/msm/mdp3_dma.h
+++ b/drivers/video/fbdev/msm/mdp3_dma.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2014, 2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -16,6 +16,7 @@
#include <linux/notifier.h>
#include <linux/sched.h>
+#include <linux/msm_mdp.h>
#define MDP_HISTOGRAM_BL_SCALE_MAX 1024
#define MDP_HISTOGRAM_BL_LEVEL_MAX 255
@@ -268,6 +269,12 @@ struct mdp3_dma {
struct mdp3_dma_cursor cursor;
struct mdp3_dma_color_correct_config ccs_config;
+ struct mdp_csc_cfg_data ccs_cache;
+ int cc_vect_sel;
+
+ struct work_struct underrun_work;
+ struct mutex pp_lock;
+
struct mdp3_dma_lut_config lut_config;
struct mdp3_dma_histogram_config histogram_config;
int histo_state;
@@ -281,9 +288,12 @@ struct mdp3_dma {
struct fb_cmap *gc_cmap;
struct fb_cmap *hist_cmap;
+ bool (*busy)(void);
+
int (*dma_config)(struct mdp3_dma *dma,
struct mdp3_dma_source *source_config,
- struct mdp3_dma_output_config *output_config);
+ struct mdp3_dma_output_config *output_config,
+ bool splash_screen_active);
int (*dma_sync_config)(struct mdp3_dma *dma, struct mdp3_dma_source
*source_config, struct mdp3_tear_check *te);
diff --git a/drivers/video/fbdev/msm/mdp3_hwio.h b/drivers/video/fbdev/msm/mdp3_hwio.h
index 83615a786e81..056355c17823 100644
--- a/drivers/video/fbdev/msm/mdp3_hwio.h
+++ b/drivers/video/fbdev/msm/mdp3_hwio.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2014, 2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -30,6 +30,7 @@
#define MDP3_REG_PRIMARY_VSYNC_INIT_VAL 0x0328
#define MDP3_REG_SECONDARY_VSYNC_INIT_VAL 0x032c
#define MDP3_REG_EXTERNAL_VSYNC_INIT_VAL 0x0330
+#define MDP3_REG_AUTOREFRESH_CONFIG_P 0x034C
#define MDP3_REG_SYNC_THRESH_0 0x0200
#define MDP3_REG_SYNC_THRESH_1 0x0204
#define MDP3_REG_SYNC_THRESH_2 0x0208
@@ -63,6 +64,17 @@
/*clock control*/
#define MDP3_REG_CGC_EN 0x0100
+#define MDP3_VBIF_REG_FORCE_EN 0x0004
+
+/* QOS Remapper */
+#define MDP3_DMA_P_QOS_REMAPPER 0x90090
+#define MDP3_DMA_P_WATERMARK_0 0x90094
+#define MDP3_DMA_P_WATERMARK_1 0x90098
+#define MDP3_DMA_P_WATERMARK_2 0x9009C
+#define MDP3_PANIC_ROBUST_CTRL 0x900A0
+#define MDP3_PANIC_LUT0 0x900A4
+#define MDP3_PANIC_LUT1 0x900A8
+#define MDP3_ROBUST_LUT 0x900AC
/*danger safe*/
#define MDP3_PANIC_ROBUST_CTRL 0x900A0
@@ -344,4 +356,6 @@ enum {
#define MDP3_DMA_P_HIST_INTR_HIST_DONE_BIT BIT(1)
#define MDP3_PPP_DONE MDP3_INTR_DP0_ROI_DONE
+#define MDP3_DMA_P_BUSY_BIT BIT(6)
+
#endif /* MDP3_HWIO_H */
diff --git a/drivers/video/fbdev/msm/mdp3_ppp.c b/drivers/video/fbdev/msm/mdp3_ppp.c
index 4bc71cccd289..eaacdd875747 100644
--- a/drivers/video/fbdev/msm/mdp3_ppp.c
+++ b/drivers/video/fbdev/msm/mdp3_ppp.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2007, 2013-2014 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2007, 2013-2014, 2016, The Linux Foundation. All rights reserved.
* Copyright (C) 2007 Google Incorporated
*
* This software is licensed under the terms of the GNU General Public
@@ -31,6 +31,7 @@
#include "mdp3_ppp.h"
#include "mdp3_hwio.h"
#include "mdp3.h"
+#include "mdss_debug.h"
#define MDP_IS_IMGTYPE_BAD(x) ((x) >= MDP_IMGTYPE_LIMIT)
#define MDP_RELEASE_BW_TIMEOUT 50
@@ -40,6 +41,14 @@
#define MDP_PPP_MAX_READ_WRITE 3
#define ENABLE_SOLID_FILL 0x2
#define DISABLE_SOLID_FILL 0x0
+#define BLEND_LATENCY 3
+#define CSC_LATENCY 1
+
+#define CLK_FUDGE_NUM 12
+#define CLK_FUDGE_DEN 10
+
+#define YUV_BW_FUDGE_NUM 10
+#define YUV_BW_FUDGE_DEN 10
struct ppp_resource ppp_res;
@@ -108,11 +117,20 @@ struct ppp_status {
};
static struct ppp_status *ppp_stat;
+static bool is_blit_optimization_possible(struct blit_req_list *req, int indx);
+
+static inline u64 fudge_factor(u64 val, u32 numer, u32 denom)
+{
+ u64 result = (val * (u64)numer);
+ do_div(result, denom);
+ return result;
+}
int ppp_get_bpp(uint32_t format, uint32_t fb_format)
{
int bpp = -EINVAL;
+
if (format == MDP_FB_FORMAT)
format = fb_format;
@@ -126,12 +144,22 @@ int mdp3_ppp_get_img(struct mdp_img *img, struct mdp_blit_req *req,
struct mdp3_img_data *data)
{
struct msmfb_data fb_data;
+ uint32_t stride;
+ int bpp = ppp_bpp(img->format);
+
+ if (bpp <= 0) {
+ pr_err("%s incorrect format %d\n", __func__, img->format);
+ return -EINVAL;
+ }
fb_data.flags = img->priv;
fb_data.memory_id = img->memory_id;
fb_data.offset = 0;
- return mdp3_get_img(&fb_data, data);
+ stride = img->width * bpp;
+ data->padding = 16 * stride;
+
+ return mdp3_get_img(&fb_data, data, MDP3_CLIENT_PPP);
}
/* Check format */
@@ -233,6 +261,12 @@ int mdp3_ppp_verify_scale(struct mdp_blit_req *req)
/* operation check */
int mdp3_ppp_verify_op(struct mdp_blit_req *req)
{
+ /*
+ * MDP_DEINTERLACE & MDP_SHARPENING Flags are not valid for MDP3
+ * so using them together for MDP_SMART_BLIT.
+ */
+ if ((req->flags & MDP_SMART_BLIT) == MDP_SMART_BLIT)
+ return 0;
if (req->flags & MDP_DEINTERLACE) {
pr_err("\n%s(): deinterlace not supported", __func__);
return -EINVAL;
@@ -327,34 +361,12 @@ void mdp3_ppp_kickoff(void)
init_completion(&ppp_stat->ppp_comp);
mdp3_irq_enable(MDP3_PPP_DONE);
ppp_enable();
+ ATRACE_BEGIN("mdp3_wait_for_ppp_comp");
mdp3_ppp_pipe_wait();
+ ATRACE_END("mdp3_wait_for_ppp_comp");
mdp3_irq_disable(MDP3_PPP_DONE);
}
-u32 mdp3_clk_calc(struct msm_fb_data_type *mfd, struct blit_req_list *lreq)
-{
- struct mdss_panel_info *panel_info = mfd->panel_info;
- int i, lcount = 0;
- struct mdp_blit_req *req;
- u32 total_pixel;
- u32 mdp_clk_rate = MDP_CORE_CLK_RATE_SVS;
-
- total_pixel = panel_info->xres * panel_info->yres;
- if (total_pixel > SVS_MAX_PIXEL)
- return MDP_CORE_CLK_RATE_MAX;
-
- for (i = 0; i < lcount; i++) {
- req = &(lreq->req_list[i]);
-
- if (req->src_rect.h != req->dst_rect.h ||
- req->src_rect.w != req->dst_rect.w) {
- mdp_clk_rate = MDP_CORE_CLK_RATE_MAX;
- break;
- }
- }
- return mdp_clk_rate;
-}
-
struct bpp_info {
int bpp_num;
int bpp_den;
@@ -413,6 +425,84 @@ int mdp3_get_bpp_info(int format, struct bpp_info *bpp)
return rc;
}
+bool mdp3_is_blend(struct mdp_blit_req *req)
+{
+ if ((req->transp_mask != MDP_TRANSP_NOP) ||
+ (req->alpha < MDP_ALPHA_NOP) ||
+ (req->src.format == MDP_ARGB_8888) ||
+ (req->src.format == MDP_BGRA_8888) ||
+ (req->src.format == MDP_RGBA_8888))
+ return true;
+ return false;
+}
+
+bool mdp3_is_scale(struct mdp_blit_req *req)
+{
+ if (req->flags & MDP_ROT_90) {
+ if (req->src_rect.w != req->dst_rect.h ||
+ req->src_rect.h != req->dst_rect.w)
+ return true;
+ } else {
+ if (req->src_rect.h != req->dst_rect.h ||
+ req->src_rect.w != req->dst_rect.w)
+ return true;
+ }
+ return false;
+}
+
+u32 mdp3_clk_calc(struct msm_fb_data_type *mfd,
+ struct blit_req_list *lreq, u32 fps)
+{
+ int i, lcount = 0;
+ struct mdp_blit_req *req;
+ u64 mdp_clk_rate = 0;
+ u32 scale_x = 0, scale_y = 0, scale = 0;
+ u32 blend_l, csc_l;
+
+ lcount = lreq->count;
+
+ blend_l = 100 * BLEND_LATENCY;
+ csc_l = 100 * CSC_LATENCY;
+
+ for (i = 0; i < lcount; i++) {
+ req = &(lreq->req_list[i]);
+
+ if (req->flags & MDP_SMART_BLIT)
+ continue;
+
+ if (mdp3_is_scale(req)) {
+ if (req->flags & MDP_ROT_90) {
+ scale_x = 100 * req->src_rect.h /
+ req->dst_rect.w;
+ scale_y = 100 * req->src_rect.w /
+ req->dst_rect.h;
+ } else {
+ scale_x = 100 * req->src_rect.w /
+ req->dst_rect.w;
+ scale_y = 100 * req->src_rect.h /
+ req->dst_rect.h;
+ }
+ scale = max(scale_x, scale_y);
+ }
+ scale = scale >= 100 ? scale : 100;
+ if (mdp3_is_blend(req))
+ scale = max(scale, blend_l);
+
+ if (!check_if_rgb(req->src.format))
+ scale = max(scale, csc_l);
+
+ mdp_clk_rate += (req->src_rect.w * req->src_rect.h *
+ scale / 100) * fps;
+ }
+ mdp_clk_rate += (ppp_res.solid_fill_pixel * fps);
+ mdp_clk_rate = fudge_factor(mdp_clk_rate,
+ CLK_FUDGE_NUM, CLK_FUDGE_DEN);
+ pr_debug("mdp_clk_rate for ppp = %llu\n", mdp_clk_rate);
+ mdp_clk_rate = mdp3_clk_round_off(mdp_clk_rate);
+
+ return mdp_clk_rate;
+}
+
u64 mdp3_adjust_scale_factor(struct mdp_blit_req *req, u32 bw_req, int bpp)
{
int src_h, src_w;
@@ -424,67 +514,145 @@ u64 mdp3_adjust_scale_factor(struct mdp_blit_req *req, u32 bw_req, int bpp)
dst_h = req->dst_rect.h;
dst_w = req->dst_rect.w;
- if ((!(req->flags & MDP_ROT_90) && src_h == dst_h && src_w == dst_w) ||
- ((req->flags & MDP_ROT_90) && src_h == dst_w && src_w == dst_h))
+ if ((!(req->flags & MDP_ROT_90) && src_h == dst_h &&
+ src_w == dst_w) || ((req->flags & MDP_ROT_90) &&
+ src_h == dst_w && src_w == dst_h))
return bw_req;
bw_req = (bw_req + (bw_req * dst_h) / (4 * src_h));
bw_req = (bw_req + (bw_req * dst_w) / (4 * src_w) +
(bw_req * dst_w) / (bpp * src_w));
-
return bw_req;
}
-int mdp3_calc_ppp_res(struct msm_fb_data_type *mfd, struct blit_req_list *lreq)
+int mdp3_calc_ppp_res(struct msm_fb_data_type *mfd,
+ struct blit_req_list *lreq)
{
struct mdss_panel_info *panel_info = mfd->panel_info;
int i, lcount = 0;
struct mdp_blit_req *req;
struct bpp_info bpp;
- u32 src_read_bw = 0;
- u32 dst_read_bw = 0;
+ u64 src_read_bw = 0;
+ u32 bg_read_bw = 0;
u32 dst_write_bw = 0;
u64 honest_ppp_ab = 0;
- u32 fps;
+ u32 fps = 0;
+ int smart_blit_fg_indx = -1;
+ u32 smart_blit_bg_read_bw = 0;
+ ATRACE_BEGIN(__func__);
lcount = lreq->count;
if (lcount == 0) {
pr_err("Blit with request count 0, continue to recover!!!\n");
+ ATRACE_END(__func__);
return 0;
}
-
- /* Set FPS to mipi rate as currently there is no way to get this */
- fps = panel_info->mipi.frame_rate;
+ if (lreq->req_list[0].flags & MDP_SOLID_FILL) {
+ req = &(lreq->req_list[0]);
+ mdp3_get_bpp_info(req->dst.format, &bpp);
+ ppp_res.solid_fill_pixel += req->dst_rect.w * req->dst_rect.h;
+ ppp_res.solid_fill_byte += req->dst_rect.w * req->dst_rect.h *
+ bpp.bpp_num / bpp.bpp_den;
+ if ((panel_info->yres/2 > req->dst_rect.h) ||
+ (mdp3_res->solid_fill_vote_en)) {
+ pr_debug("Solid fill less than H/2 or fill vote %d\n",
+ mdp3_res->solid_fill_vote_en);
+ ATRACE_END(__func__);
+ return 0;
+ }
+ }
for (i = 0; i < lcount; i++) {
+ /* Set Smart blit flag before BW calculation */
+ is_blit_optimization_possible(lreq, i);
req = &(lreq->req_list[i]);
+ if (req->fps > 0 && req->fps <= panel_info->mipi.frame_rate) {
+ if (fps == 0)
+ fps = req->fps;
+ else
+ fps = panel_info->mipi.frame_rate;
+ }
+
mdp3_get_bpp_info(req->src.format, &bpp);
- src_read_bw = req->src_rect.w * req->src_rect.h *
+ if (lreq->req_list[i].flags & MDP_SMART_BLIT) {
+ /*
+ * Flag for smart blit FG layer index
+ * If blit request at index "n" has
+ * MDP_SMART_BLIT flag set then it will be used as BG
+ * layer in smart blit and request at index "n+1"
+ * will be used as FG layer
+ */
+ smart_blit_fg_indx = i + 1;
+ bg_read_bw = req->src_rect.w * req->src_rect.h *
bpp.bpp_num / bpp.bpp_den;
- src_read_bw = mdp3_adjust_scale_factor(req,
- src_read_bw, bpp.bpp_pln);
-
- mdp3_get_bpp_info(req->dst.format, &bpp);
- dst_read_bw = req->dst_rect.w * req->dst_rect.h *
+ bg_read_bw = mdp3_adjust_scale_factor(req,
+ bg_read_bw, bpp.bpp_pln);
+ /* Cache read BW of smart blit BG layer */
+ smart_blit_bg_read_bw = bg_read_bw;
+ } else {
+ src_read_bw = req->src_rect.w * req->src_rect.h *
+ bpp.bpp_num / bpp.bpp_den;
+ src_read_bw = mdp3_adjust_scale_factor(req,
+ src_read_bw, bpp.bpp_pln);
+ if (!(check_if_rgb(req->src.format))) {
+ src_read_bw = fudge_factor(src_read_bw,
+ YUV_BW_FUDGE_NUM,
+ YUV_BW_FUDGE_DEN);
+ }
+ mdp3_get_bpp_info(req->dst.format, &bpp);
+
+ if (smart_blit_fg_indx == i) {
+ bg_read_bw = smart_blit_bg_read_bw;
+ smart_blit_fg_indx = -1;
+ } else {
+ if ((req->transp_mask != MDP_TRANSP_NOP) ||
+ (req->alpha < MDP_ALPHA_NOP) ||
+ (req->src.format == MDP_ARGB_8888) ||
+ (req->src.format == MDP_BGRA_8888) ||
+ (req->src.format == MDP_RGBA_8888)) {
+ bg_read_bw = req->dst_rect.w *
+ req->dst_rect.h *
bpp.bpp_num / bpp.bpp_den;
- dst_read_bw = mdp3_adjust_scale_factor(req,
- dst_read_bw, bpp.bpp_pln);
-
- dst_write_bw = req->dst_rect.w * req->dst_rect.h *
+ bg_read_bw = mdp3_adjust_scale_factor(
+ req, bg_read_bw,
+ bpp.bpp_pln);
+ } else {
+ bg_read_bw = 0;
+ }
+ }
+ dst_write_bw = req->dst_rect.w * req->dst_rect.h *
bpp.bpp_num / bpp.bpp_den;
- honest_ppp_ab += (src_read_bw + dst_read_bw + dst_write_bw);
+ honest_ppp_ab += (src_read_bw + bg_read_bw +
+ dst_write_bw);
+ }
}
- honest_ppp_ab = honest_ppp_ab * fps;
+ if (fps == 0)
+ fps = panel_info->mipi.frame_rate;
+
+ if (lreq->req_list[0].flags & MDP_SOLID_FILL) {
+ honest_ppp_ab = ppp_res.solid_fill_byte * 4;
+ pr_debug("solid fill honest_ppp_ab %llu\n", honest_ppp_ab);
+ } else {
+ honest_ppp_ab += ppp_res.solid_fill_byte;
+ mdp3_res->solid_fill_vote_en = true;
+ }
+
+ honest_ppp_ab = honest_ppp_ab * fps;
if (honest_ppp_ab != ppp_res.next_ab) {
- pr_debug("bandwidth vote update for ppp: ab = %llx\n",
- honest_ppp_ab);
ppp_res.next_ab = honest_ppp_ab;
ppp_res.next_ib = honest_ppp_ab;
ppp_stat->bw_update = true;
+ pr_debug("solid fill ab = %llx, total ab = %llx ",
+ (ppp_res.solid_fill_byte * fps), honest_ppp_ab);
+ pr_debug("(%d fps) Solid_fill_vote %d\n",
+ fps, mdp3_res->solid_fill_vote_en);
+ ATRACE_INT("mdp3_ppp_bus_quota", honest_ppp_ab);
}
- ppp_res.clk_rate = mdp3_clk_calc(mfd, lreq);
+ ppp_res.clk_rate = mdp3_clk_calc(mfd, lreq, fps);
+ ATRACE_INT("mdp3_ppp_clk_rate", ppp_res.clk_rate);
+ ATRACE_END(__func__);
return 0;
}
@@ -543,7 +711,16 @@ void mdp3_start_ppp(struct ppp_blit_op *blit_op)
MDP3_REG_WRITE(MDP3_TFETCH_SOLID_FILL,
DISABLE_SOLID_FILL);
}
+ /* Skip PPP kickoff for SMART_BLIT BG layer */
+ if (blit_op->mdp_op & MDPOP_SMART_BLIT)
+ pr_debug("Skip mdp3_ppp_kickoff\n");
+ else
mdp3_ppp_kickoff();
+
+ if (!(blit_op->solid_fill)) {
+ ppp_res.solid_fill_pixel = 0;
+ ppp_res.solid_fill_byte = 0;
+ }
}
static int solid_fill_workaround(struct mdp_blit_req *req,
@@ -572,9 +749,10 @@ static int solid_fill_workaround(struct mdp_blit_req *req,
blit_op->dst.roi.width = (blit_op->dst.roi.width / 2) * 2;
blit_op->src.roi.width = (blit_op->src.roi.width / 2) * 2;
+ /* Set src format to RGBX, to avoid ppp hang issues */
+ blit_op->src.color_fmt = MDP_RGBX_8888;
+
/* Avoid RGBA format, as it could hang ppp during solid fill */
- if (blit_op->src.color_fmt == MDP_RGBA_8888)
- blit_op->src.color_fmt = MDP_RGBX_8888;
if (blit_op->dst.color_fmt == MDP_RGBA_8888)
blit_op->dst.color_fmt = MDP_RGBX_8888;
return 0;
@@ -611,6 +789,7 @@ static int mdp3_ppp_process_req(struct ppp_blit_op *blit_op,
blit_op->src.roi.height = req->src_rect.h;
blit_op->src.prop.width = req->src.width;
+ blit_op->src.prop.height = req->src.height;
blit_op->src.color_fmt = req->src.format;
@@ -691,6 +870,10 @@ static int mdp3_ppp_process_req(struct ppp_blit_op *blit_op,
} else {
blit_op->solid_fill = false;
}
+
+ if (req->flags & MDP_SMART_BLIT)
+ blit_op->mdp_op |= MDPOP_SMART_BLIT;
+
return ret;
}
@@ -714,14 +897,16 @@ static void mdp3_ppp_tile_workaround(struct ppp_blit_op *blit_op,
/* if it's out of scale range... */
if (((MDP_SCALE_Q_FACTOR * blit_op->dst.roi.height) /
- blit_op->src.roi.width) > MDP_MAX_X_SCALE_FACTOR)
+ blit_op->src.roi.width) > MDP_MAX_X_SCALE_FACTOR)
blit_op->src.roi.width =
- (MDP_SCALE_Q_FACTOR * blit_op->dst.roi.height) /
+ (MDP_SCALE_Q_FACTOR *
+ blit_op->dst.roi.height) /
MDP_MAX_X_SCALE_FACTOR;
else if (((MDP_SCALE_Q_FACTOR * blit_op->dst.roi.height) /
- blit_op->src.roi.width) < MDP_MIN_X_SCALE_FACTOR)
+ blit_op->src.roi.width) < MDP_MIN_X_SCALE_FACTOR)
blit_op->src.roi.width =
- (MDP_SCALE_Q_FACTOR * blit_op->dst.roi.height) /
+ (MDP_SCALE_Q_FACTOR *
+ blit_op->dst.roi.height) /
MDP_MIN_X_SCALE_FACTOR;
mdp3_start_ppp(blit_op);
@@ -741,9 +926,8 @@ static void mdp3_ppp_tile_workaround(struct ppp_blit_op *blit_op,
}
if ((dst_h < 0) || (src_w < 0))
- pr_err
- ("msm_fb: mdp_blt_ex() unexpected result! line:%d\n",
- __LINE__);
+ pr_err("msm_fb: mdp_blt_ex() unexpected result! line:%d\n",
+ __LINE__);
/* remainder update */
if ((dst_h > 0) && (src_w > 0)) {
@@ -753,26 +937,25 @@ static void mdp3_ppp_tile_workaround(struct ppp_blit_op *blit_op,
blit_op->src.roi.width = src_w;
if (((MDP_SCALE_Q_FACTOR * blit_op->dst.roi.height) /
- blit_op->src.roi.width) > MDP_MAX_X_SCALE_FACTOR) {
- tmp_v =
- (MDP_SCALE_Q_FACTOR * blit_op->dst.roi.height) /
- MDP_MAX_X_SCALE_FACTOR +
- ((MDP_SCALE_Q_FACTOR *
- blit_op->dst.roi.height) %
- MDP_MAX_X_SCALE_FACTOR ? 1 : 0);
+ blit_op->src.roi.width) > MDP_MAX_X_SCALE_FACTOR) {
+ tmp_v = (MDP_SCALE_Q_FACTOR *
+ blit_op->dst.roi.height) /
+ MDP_MAX_X_SCALE_FACTOR +
+ ((MDP_SCALE_Q_FACTOR *
+ blit_op->dst.roi.height) %
+ MDP_MAX_X_SCALE_FACTOR ? 1 : 0);
/* move x location as roi width gets bigger */
blit_op->src.roi.x -= tmp_v - blit_op->src.roi.width;
blit_op->src.roi.width = tmp_v;
} else if (((MDP_SCALE_Q_FACTOR * blit_op->dst.roi.height) /
- blit_op->src.roi.width) < MDP_MIN_X_SCALE_FACTOR) {
- tmp_v =
- (MDP_SCALE_Q_FACTOR * blit_op->dst.roi.height) /
- MDP_MIN_X_SCALE_FACTOR +
- ((MDP_SCALE_Q_FACTOR *
- blit_op->dst.roi.height) %
- MDP_MIN_X_SCALE_FACTOR ? 1 : 0);
-
+ blit_op->src.roi.width) < MDP_MIN_X_SCALE_FACTOR) {
+ tmp_v = (MDP_SCALE_Q_FACTOR *
+ blit_op->dst.roi.height) /
+ MDP_MIN_X_SCALE_FACTOR +
+ ((MDP_SCALE_Q_FACTOR *
+ blit_op->dst.roi.height) %
+ MDP_MIN_X_SCALE_FACTOR ? 1 : 0);
/*
* we don't move x location for continuity of
* source image
@@ -993,6 +1176,7 @@ int mdp3_ppp_start_blit(struct msm_fb_data_type *mfd,
void mdp3_ppp_wait_for_fence(struct blit_req_list *req)
{
int i, ret = 0;
+ ATRACE_BEGIN(__func__);
/* buf sync */
for (i = 0; i < req->acq_fen_cnt; i++) {
ret = sync_fence_wait(req->acq_fen[i],
@@ -1004,7 +1188,7 @@ void mdp3_ppp_wait_for_fence(struct blit_req_list *req)
}
sync_fence_put(req->acq_fen[i]);
}
-
+ ATRACE_END(__func__);
if (ret < 0) {
while (i < req->acq_fen_cnt) {
sync_fence_put(req->acq_fen[i]);
@@ -1124,6 +1308,7 @@ void mdp3_ppp_req_pop(struct blit_req_queue *req_q)
void mdp3_free_fw_timer_func(unsigned long arg)
{
+ mdp3_res->solid_fill_vote_en = false;
schedule_work(&ppp_stat->free_bw_work);
}
@@ -1138,11 +1323,170 @@ static void mdp3_free_bw_wq_handler(struct work_struct *work)
mutex_unlock(&ppp_stat->config_ppp_mutex);
}
+static bool is_hw_workaround_needed(struct mdp_blit_req req)
+{
+ bool result = false;
+ bool is_bpp_4 = false;
+ uint32_t remainder = 0;
+ uint32_t bpp = ppp_get_bpp(req.dst.format, ppp_stat->mfd->fb_imgType);
+
+ /* MDP width split workaround */
+ remainder = (req.dst_rect.w) % 16;
+ is_bpp_4 = (bpp == 4) ? 1 : 0;
+ if ((is_bpp_4 && (remainder == 6 || remainder == 14)) &&
+ !(req.flags & MDP_SOLID_FILL))
+ result = true;
+
+ /* bg tile fetching HW workaround */
+ if (((req.alpha < MDP_ALPHA_NOP) ||
+ (req.transp_mask != MDP_TRANSP_NOP) ||
+ (req.src.format == MDP_ARGB_8888) ||
+ (req.src.format == MDP_BGRA_8888) ||
+ (req.src.format == MDP_RGBA_8888)) &&
+ (req.flags & MDP_ROT_90) && (req.dst_rect.w <= 16))
+ result = true;
+
+ return result;
+}
+
+static bool is_roi_equal(struct mdp_blit_req req0,
+ struct mdp_blit_req req1)
+{
+ bool result = false;
+ struct mdss_panel_info *panel_info = ppp_stat->mfd->panel_info;
+
+ /*
+ * Check req0 and req1 layer destination ROI and return true if
+ * they are equal.
+ */
+ if ((req0.dst_rect.x == req1.dst_rect.x) &&
+ (req0.dst_rect.y == req1.dst_rect.y) &&
+ (req0.dst_rect.w == req1.dst_rect.w) &&
+ (req0.dst_rect.h == req1.dst_rect.h))
+ result = true;
+ /*
+ * Layers are source cropped and cropped layer width and hight are
+ * same panel width and height
+ */
+ else if ((req0.dst_rect.w == req1.dst_rect.w) &&
+ (req0.dst_rect.h == req1.dst_rect.h) &&
+ (req0.dst_rect.w == panel_info->xres) &&
+ (req0.dst_rect.h == panel_info->yres))
+ result = true;
+
+ return result;
+}
+
+static bool is_scaling_needed(struct mdp_blit_req req)
+{
+ bool result = true;
+
+ /* Return true if layer need scaling else return false */
+ if ((req.src_rect.w == req.dst_rect.w) &&
+ (req.src_rect.h == req.dst_rect.h))
+ result = false;
+ return result;
+}
+
+static bool is_blit_optimization_possible(struct blit_req_list *req, int indx)
+{
+ int next = indx + 1;
+ bool status = false;
+ struct mdp3_img_data tmp_data;
+ bool dst_roi_equal = false;
+ bool hw_woraround_active = false;
+ struct mdp_blit_req bg_req;
+ struct mdp_blit_req fg_req;
+
+ if (!(mdp3_res->smart_blit_en)) {
+ pr_debug("Smart BLIT disabled from sysfs\n");
+ return status;
+ }
+ if (next < req->count) {
+ bg_req = req->req_list[indx];
+ fg_req = req->req_list[next];
+ hw_woraround_active = is_hw_workaround_needed(bg_req);
+ dst_roi_equal = is_roi_equal(bg_req, fg_req);
+ /*
+ * Check userspace Smart BLIT Flag for current and next
+ * request Flag for smart blit FG layer index If blit
+ * request at index "n" has MDP_SMART_BLIT flag set then
+ * it will be used as BG layer in smart blit
+ * and request at index "n+1" will be used as FG layer
+ */
+ if ((bg_req.flags & MDP_SMART_BLIT) &&
+ (!(fg_req.flags & MDP_SMART_BLIT)) &&
+ (!(hw_woraround_active)))
+ status = true;
+ /*
+ * Enable SMART blit between request 0(BG) & request 1(FG) when
+ * destination ROI of BG and FG layer are same,
+ * No scaling on BG layer
+ * No rotation on BG Layer.
+ * BG Layer color format is RGB and marked as MDP_IS_FG.
+ */
+ else if ((mdp3_res->smart_blit_en & SMART_BLIT_RGB_EN) &&
+ (indx == 0) && (dst_roi_equal) &&
+ (bg_req.flags & MDP_IS_FG) &&
+ (!(is_scaling_needed(bg_req))) &&
+ (!(bg_req.flags & (MDP_ROT_90))) &&
+ (check_if_rgb(bg_req.src.format)) &&
+ (!(hw_woraround_active))) {
+ status = true;
+ req->req_list[indx].flags |= MDP_SMART_BLIT;
+ pr_debug("Optimize RGB Blit for Req Indx %d\n", indx);
+ }
+ /*
+ * Swap BG and FG layer to enable SMART blit between request
+ * 0(BG) & request 1(FG) when destination ROI of BG and FG
+ * layer are same, No scaling on FG and BG layer
+ * No rotation on FG Layer. BG Layer color format is YUV
+ */
+ else if ((indx == 0) &&
+ (mdp3_res->smart_blit_en & SMART_BLIT_YUV_EN) &&
+ (!(fg_req.flags & (MDP_ROT_90))) && (dst_roi_equal) &&
+ (!(check_if_rgb(bg_req.src.format))) &&
+ (!(hw_woraround_active))) {
+ /*
+ * swap blit requests at index 0 and 1. YUV layer at
+ * index 0 is replaced with UI layer request present
+ * at index 1. Since UI layer will be in background
+ * set IS_FG flag and clear it from YUV layer flags
+ */
+ if (!(is_scaling_needed(req->req_list[next]))) {
+ if (bg_req.flags & MDP_IS_FG) {
+ req->req_list[indx].flags &=
+ ~MDP_IS_FG;
+ req->req_list[next].flags |= MDP_IS_FG;
+ }
+ bg_req = req->req_list[next];
+ req->req_list[next] = req->req_list[indx];
+ req->req_list[indx] = bg_req;
+
+ tmp_data = req->src_data[next];
+ req->src_data[next] = req->src_data[indx];
+ req->src_data[indx] = tmp_data;
+
+ tmp_data = req->dst_data[next];
+ req->dst_data[next] = req->dst_data[indx];
+ req->dst_data[indx] = tmp_data;
+ status = true;
+ req->req_list[indx].flags |= MDP_SMART_BLIT;
+ pr_debug("Optimize YUV Blit for Req Indx %d\n",
+ indx);
+ }
+ }
+ }
+ return status;
+}
+
static void mdp3_ppp_blit_wq_handler(struct work_struct *work)
{
struct msm_fb_data_type *mfd = ppp_stat->mfd;
struct blit_req_list *req;
int i, rc = 0;
+ bool smart_blit = false;
+ int smart_blit_fg_index = -1;
mutex_lock(&ppp_stat->config_ppp_mutex);
req = mdp3_ppp_next_req(&ppp_stat->req_q);
@@ -1176,7 +1520,15 @@ static void mdp3_ppp_blit_wq_handler(struct work_struct *work)
}
ppp_stat->bw_update = false;
}
+ ATRACE_BEGIN("mpd3_ppp_start");
for (i = 0; i < req->count; i++) {
+ smart_blit = is_blit_optimization_possible(req, i);
+ if (smart_blit)
+ /*
+ * Blit request index of FG layer in
+ * smart blit
+ */
+ smart_blit_fg_index = i + 1;
if (!(req->req_list[i].flags & MDP_NO_BLIT)) {
/* Do the actual blit. */
if (!rc) {
@@ -1185,10 +1537,23 @@ static void mdp3_ppp_blit_wq_handler(struct work_struct *work)
&req->src_data[i],
&req->dst_data[i]);
}
- mdp3_put_img(&req->src_data[i]);
- mdp3_put_img(&req->dst_data[i]);
+ /* Unmap blit source buffer */
+ if (smart_blit == false) {
+ mdp3_put_img(&req->src_data[i],
+ MDP3_CLIENT_PPP);
+ }
+ if (smart_blit_fg_index == i) {
+ /* Unmap smart blit BG buffer */
+ mdp3_put_img(&req->src_data[i - 1],
+ MDP3_CLIENT_PPP);
+ smart_blit_fg_index = -1;
+ }
+ mdp3_put_img(&req->dst_data[i],
+ MDP3_CLIENT_PPP);
+ smart_blit = false;
}
}
+ ATRACE_END("mdp3_ppp_start");
/* Signal to release fence */
mutex_lock(&ppp_stat->req_mutex);
mdp3_ppp_signal_timeline(req);
@@ -1257,7 +1622,7 @@ int mdp3_ppp_parse_req(void __user *p,
rc = mdp3_ppp_get_img(&req->req_list[i].dst,
&req->req_list[i], &req->dst_data[i]);
if (rc < 0 || req->dst_data[i].len == 0) {
- mdp3_put_img(&req->src_data[i]);
+ mdp3_put_img(&req->src_data[i], MDP3_CLIENT_PPP);
pr_err("mdp_ppp: couldn't retrieve dest img from mem\n");
goto parse_err_1;
}
@@ -1300,8 +1665,8 @@ parse_err_2:
put_unused_fd(req->cur_rel_fen_fd);
parse_err_1:
for (i--; i >= 0; i--) {
- mdp3_put_img(&req->src_data[i]);
- mdp3_put_img(&req->dst_data[i]);
+ mdp3_put_img(&req->src_data[i], MDP3_CLIENT_PPP);
+ mdp3_put_img(&req->dst_data[i], MDP3_CLIENT_PPP);
}
mdp3_ppp_deinit_buf_sync(req);
mutex_unlock(&ppp_stat->req_mutex);
diff --git a/drivers/video/fbdev/msm/mdp3_ppp.h b/drivers/video/fbdev/msm/mdp3_ppp.h
index b43807e62033..428906370a8c 100644
--- a/drivers/video/fbdev/msm/mdp3_ppp.h
+++ b/drivers/video/fbdev/msm/mdp3_ppp.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2007, 2013 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2007, 2013, 2016, The Linux Foundation. All rights reserved.
* Copyright (C) 2007 Google Incorporated
*
* This software is licensed under the terms of the GNU General Public
@@ -23,6 +23,8 @@
#define PPP_BLUR_SCALE_MAX 128
#define PPP_LUT_MAX 256
+#define MDPOP_SMART_BLIT BIT(31) /* blit optimization flag */
+
/* MDP PPP Operations */
#define MDPOP_NOP 0
#define MDPOP_LR BIT(0) /* left to right flip */
@@ -51,6 +53,8 @@
#define PPP_OP_FLIP_UD BIT(11)
#define PPP_OP_BLEND_ON BIT(12)
#define PPP_OP_BLEND_CONSTANT_ALPHA BIT(14)
+#define PPP_OP_BLEND_BG_ALPHA BIT(13)
+#define PPP_OP_BLEND_EQ_REVERSE BIT(15)
#define PPP_OP_DITHER_EN BIT(16)
#define PPP_BLEND_CALPHA_TRNASP BIT(24)
@@ -298,6 +302,8 @@ struct ppp_resource {
u64 next_ab;
u64 next_ib;
u64 clk_rate;
+ u64 solid_fill_pixel;
+ u64 solid_fill_byte;
};
struct ppp_csc_table {
@@ -393,6 +399,8 @@ struct ppp_edge_rep {
int32_t luma_repeat_bottom;
};
+bool check_if_rgb(int color);
+
/* func for ppp register values */
uint32_t ppp_bpp(uint32_t type);
uint32_t ppp_src_config(uint32_t type);
diff --git a/drivers/video/fbdev/msm/mdp3_ppp_hwio.c b/drivers/video/fbdev/msm/mdp3_ppp_hwio.c
index 8c5d77121ea5..5ba3fbdb6238 100644
--- a/drivers/video/fbdev/msm/mdp3_ppp_hwio.c
+++ b/drivers/video/fbdev/msm/mdp3_ppp_hwio.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2007, 2012-2013 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2007, 2012-2013, 2016, The Linux Foundation. All rights reserved.
* Copyright (C) 2007 Google Incorporated
*
* This software is licensed under the terms of the GNU General Public
@@ -33,6 +33,13 @@
#define PQF_PLUS_5_PLUS_2 (PQF_PLUS_5 + 2)
#define PQF_PLUS_5_MINUS_2 (PQF_PLUS_5 - 2)
+enum {
+ LAYER_FG = 0,
+ LAYER_BG,
+ LAYER_FB,
+ LAYER_MAX,
+};
+
static long long mdp_do_div(long long num, long long den)
{
do_div(num, den);
@@ -155,7 +162,7 @@ static int mdp_calc_scale_params(uint32_t org, uint32_t dim_in,
delta = ((int64_t) (org) << PQF_PLUS_4) - Oreq;
init_phase_temp -= delta;
- /* limit to valid range before the left shift */
+ /* limit to valid range before left shift */
delta = (init_phase_temp & (1LL << 63)) ?
4 : -4;
delta <<= PQF_PLUS_4;
@@ -174,8 +181,9 @@ static int mdp_calc_scale_params(uint32_t org, uint32_t dim_in,
/*
* RPA IMPLEMENTATION
*
- * init_phase needs to be calculated in all RPA_on cases
- * because it's a numerator, not a fixed point value.
+ * init_phase needs to be calculated in all RPA_on
+ * cases because it's a numerator, not a fixed
+ * point value.
*/
/* map (org - .5) into destination space */
@@ -195,8 +203,10 @@ static int mdp_calc_scale_params(uint32_t org, uint32_t dim_in,
dim_out);
Osprime -= point5;
- /* then floor & decrement to calculate the required
- starting coordinate */
+ /*
+ * then floor & decrement to calculate the required
+ * starting coordinate
+ */
Oreq = (Osprime & int_mask) - one;
/* calculate initial phase */
@@ -210,7 +220,9 @@ static int mdp_calc_scale_params(uint32_t org, uint32_t dim_in,
while (abs((int)(init_phase_temp >> PQF_PLUS_4)) > 4)
init_phase_temp += delta;
- /* right shift to account for extra bits of precision */
+ /*
+ * right shift to account for extra bits of precision
+ */
init_phase = (int)(init_phase_temp >> 4);
}
}
@@ -299,9 +311,12 @@ static uint32_t conv_rgb2yuv(uint32_t input_pixel,
comp_C0 = temp;
/* matrix multiplication */
- temp1 = comp_C0 * matrix[0] + comp_C1 * matrix[1] + comp_C2 * matrix[2];
- temp2 = comp_C0 * matrix[3] + comp_C1 * matrix[4] + comp_C2 * matrix[5];
- temp3 = comp_C0 * matrix[6] + comp_C1 * matrix[7] + comp_C2 * matrix[8];
+ temp1 = comp_C0 * matrix[0] + comp_C1 * matrix[1] +
+ comp_C2 * matrix[2];
+ temp2 = comp_C0 * matrix[3] + comp_C1 * matrix[4] +
+ comp_C2 * matrix[5];
+ temp3 = comp_C0 * matrix[6] + comp_C1 * matrix[7] +
+ comp_C2 * matrix[8];
comp_C0 = temp1 + 0x100;
comp_C1 = temp2 + 0x100;
@@ -362,27 +377,37 @@ bool check_if_rgb(int color)
return rgb;
}
-uint8_t *mdp_dst_adjust_rot_addr(struct ppp_blit_op *iBuf,
- uint8_t *addr, uint32_t bpp, uint32_t uv)
+uint8_t *mdp_adjust_rot_addr(struct ppp_blit_op *iBuf,
+ uint8_t *addr, uint32_t bpp, uint32_t uv, uint32_t layer)
{
- uint32_t dest_ystride = iBuf->dst.prop.width * bpp;
+ uint32_t ystride = 0;
uint32_t h_slice = 1;
-
- if (uv && ((iBuf->dst.color_fmt == MDP_Y_CBCR_H2V2) ||
- (iBuf->dst.color_fmt == MDP_Y_CRCB_H2V2)))
+ uint32_t roi_width = 0;
+ uint32_t roi_height = 0;
+ uint32_t color_fmt = 0;
+
+ if (layer == LAYER_BG) {
+ ystride = iBuf->bg.prop.width * bpp;
+ roi_width = iBuf->bg.roi.width;
+ roi_height = iBuf->bg.roi.height;
+ color_fmt = iBuf->bg.color_fmt;
+ } else {
+ ystride = iBuf->dst.prop.width * bpp;
+ roi_width = iBuf->dst.roi.width;
+ roi_height = iBuf->dst.roi.height;
+ color_fmt = iBuf->dst.color_fmt;
+ }
+ if (uv && ((color_fmt == MDP_Y_CBCR_H2V2) ||
+ (color_fmt == MDP_Y_CRCB_H2V2)))
h_slice = 2;
if (((iBuf->mdp_op & MDPOP_ROT90) == MDPOP_ROT90) ^
((iBuf->mdp_op & MDPOP_LR) == MDPOP_LR)) {
- addr +=
- (iBuf->dst.roi.width -
- MIN(16, iBuf->dst.roi.width)) * bpp;
+ addr += (roi_width - MIN(16, roi_width)) * bpp;
}
if ((iBuf->mdp_op & MDPOP_UD) == MDPOP_UD) {
- addr +=
- ((iBuf->dst.roi.height -
- MIN(16, iBuf->dst.roi.height))/h_slice) *
- dest_ystride;
+ addr += ((roi_height - MIN(16, roi_height))/h_slice) *
+ ystride;
}
return addr;
@@ -390,7 +415,7 @@ uint8_t *mdp_dst_adjust_rot_addr(struct ppp_blit_op *iBuf,
void mdp_adjust_start_addr(struct ppp_blit_op *blit_op,
struct ppp_img_desc *img, int v_slice,
- int h_slice, int layer)
+ int h_slice, uint32_t layer)
{
uint32_t bpp = ppp_bpp(img->color_fmt);
int x = img->roi.x;
@@ -403,8 +428,8 @@ void mdp_adjust_start_addr(struct ppp_blit_op *blit_op,
img->p0 += (x + y * ALIGN(width, 128)) * bpp;
else
img->p0 += (x + y * width) * bpp;
- if (layer != 0)
- img->p0 = mdp_dst_adjust_rot_addr(blit_op, img->p0, bpp, 0);
+ if (layer != LAYER_FG)
+ img->p0 = mdp_adjust_rot_addr(blit_op, img->p0, bpp, 0, layer);
if (img->p1) {
/*
@@ -421,9 +446,9 @@ void mdp_adjust_start_addr(struct ppp_blit_op *blit_op,
img->p1 += ((x / h_slice) * h_slice +
((y == 0) ? 0 : ((y + 1) / v_slice - 1) * width)) * bpp;
- if (layer != 0)
- img->p0 = mdp_dst_adjust_rot_addr(blit_op,
- img->p0, bpp, 0);
+ if (layer != LAYER_FG)
+ img->p0 = mdp_adjust_rot_addr(blit_op,
+ img->p0, bpp, 0, layer);
}
}
@@ -555,7 +580,7 @@ int config_ppp_out(struct ppp_img_desc *dst, uint32_t yuv2rgb)
return 0;
}
-int config_ppp_background(struct ppp_img_desc *bg)
+int config_ppp_background(struct ppp_img_desc *bg, uint32_t yuv2rgb)
{
uint32_t val;
@@ -573,7 +598,7 @@ int config_ppp_background(struct ppp_img_desc *bg)
PPP_WRITEL(ppp_src_config(bg->color_fmt),
MDP3_PPP_BG_FORMAT);
- PPP_WRITEL(ppp_pack_pattern(bg->color_fmt, 0),
+ PPP_WRITEL(ppp_pack_pattern(bg->color_fmt, yuv2rgb),
MDP3_PPP_BG_UNPACK_PATTERN1);
return 0;
}
@@ -938,11 +963,16 @@ int config_ppp_scale(struct ppp_blit_op *blit_op, uint32_t *pppop_reg_ptr)
if ((dstW != src->roi.width) ||
(dstH != src->roi.height) || mdp_blur) {
- mdp_calc_scale_params(blit_op->src.roi.x,
+ /*
+ * Use source origin as 0 for computing initial
+ * phase and step size. Incorrect initial phase and
+ * step size value results in green line issue.
+ */
+ mdp_calc_scale_params(0,
blit_op->src.roi.width,
dstW, 1, &phase_init_x,
&phase_step_x);
- mdp_calc_scale_params(blit_op->src.roi.y,
+ mdp_calc_scale_params(0,
blit_op->src.roi.height,
dstH, 0, &phase_init_y,
&phase_step_y);
@@ -996,7 +1026,8 @@ int config_ppp_csc(int src_color, int dst_color, uint32_t *pppop_reg_ptr)
}
int config_ppp_blend(struct ppp_blit_op *blit_op,
- uint32_t *pppop_reg_ptr)
+ uint32_t *pppop_reg_ptr,
+ bool is_yuv_smart_blit, int smart_blit_bg_alpha)
{
struct ppp_csc_table *csc;
uint32_t alpha, trans_color;
@@ -1070,11 +1101,32 @@ int config_ppp_blend(struct ppp_blit_op *blit_op,
if (blit_op->mdp_op & MDPOP_TRANSP)
*pppop_reg_ptr |=
PPP_BLEND_CALPHA_TRNASP;
+ if (is_yuv_smart_blit) {
+ *pppop_reg_ptr |= PPP_OP_ROT_ON |
+ PPP_OP_BLEND_ON |
+ PPP_OP_BLEND_BG_ALPHA |
+ PPP_OP_BLEND_EQ_REVERSE;
+
+ if (smart_blit_bg_alpha < 0xFF)
+ bg_alpha = PPP_BLEND_BG_USE_ALPHA_SEL |
+ PPP_BLEND_BG_DSTPIXEL_ALPHA;
+ else
+ bg_alpha = PPP_BLEND_BG_USE_ALPHA_SEL |
+ PPP_BLEND_BG_DSTPIXEL_ALPHA |
+ PPP_BLEND_BG_CONSTANT_ALPHA;
+
+ bg_alpha |= smart_blit_bg_alpha << 24;
+ PPP_WRITEL(bg_alpha, MDP3_PPP_BLEND_BG_ALPHA_SEL);
+ } else {
PPP_WRITEL(0, MDP3_PPP_BLEND_BG_ALPHA_SEL);
}
+ }
if (*pppop_reg_ptr & PPP_OP_BLEND_ON) {
- config_ppp_background(&blit_op->bg);
+ if (is_yuv_smart_blit)
+ config_ppp_background(&blit_op->bg, 1);
+ else
+ config_ppp_background(&blit_op->bg, 0);
if (blit_op->dst.color_fmt == MDP_YCRYCB_H2V1) {
*pppop_reg_ptr |= PPP_OP_BG_CHROMA_H2V1;
@@ -1086,9 +1138,13 @@ int config_ppp_blend(struct ppp_blit_op *blit_op,
}
}
}
+ if (is_yuv_smart_blit) {
+ PPP_WRITEL(0, MDP3_PPP_BLEND_PARAM);
+ } else {
val = (alpha << MDP_BLEND_CONST_ALPHA);
val |= (trans_color & MDP_BLEND_TRASP_COL_MASK);
PPP_WRITEL(val, MDP3_PPP_BLEND_PARAM);
+ }
return 0;
}
@@ -1112,6 +1168,20 @@ int config_ppp_op_mode(struct ppp_blit_op *blit_op)
uint32_t ppp_operation_reg = 0;
int sv_slice, sh_slice;
int dv_slice, dh_slice;
+ static struct ppp_img_desc bg_img_param;
+ static int bg_alpha;
+ static int bg_mdp_ops;
+ bool is_yuv_smart_blit = false;
+
+ /*
+ * Detect YUV smart blit,
+ * Check cached BG image plane 0 address is not NILL and
+ * source color format is YUV than it is YUV smart blit
+ * mark is_yuv_smart_blit true.
+ */
+ if ((bg_img_param.p0) &&
+ (!(check_if_rgb(blit_op->src.color_fmt))))
+ is_yuv_smart_blit = true;
sv_slice = sh_slice = dv_slice = dh_slice = 1;
@@ -1188,19 +1258,83 @@ int config_ppp_op_mode(struct ppp_blit_op *blit_op)
blit_op->dst.p1 = NULL;
}
+ if ((bg_img_param.p0) && (!(blit_op->mdp_op & MDPOP_SMART_BLIT))) {
+ /*
+ * Use cached smart blit BG layer info in
+ * smart Blit FG request
+ */
+ blit_op->bg = bg_img_param;
+ if (check_if_rgb(blit_op->bg.color_fmt)) {
+ blit_op->bg.p1 = 0;
+ blit_op->bg.stride1 = 0;
+ }
+ memset(&bg_img_param, 0, sizeof(bg_img_param));
+ } else {
blit_op->bg = blit_op->dst;
+ }
+ /* Cache smart blit BG layer info */
+ if (blit_op->mdp_op & MDPOP_SMART_BLIT)
+ bg_img_param = blit_op->src;
+
/* Jumping from Y-Plane to Chroma Plane */
/* first pixel addr calculation */
- mdp_adjust_start_addr(blit_op, &blit_op->src, sv_slice, sh_slice, 0);
- mdp_adjust_start_addr(blit_op, &blit_op->bg, dv_slice, dh_slice, 1);
- mdp_adjust_start_addr(blit_op, &blit_op->dst, dv_slice, dh_slice, 2);
+ mdp_adjust_start_addr(blit_op, &blit_op->src, sv_slice,
+ sh_slice, LAYER_FG);
+ mdp_adjust_start_addr(blit_op, &blit_op->bg, dv_slice,
+ dh_slice, LAYER_BG);
+ mdp_adjust_start_addr(blit_op, &blit_op->dst, dv_slice,
+ dh_slice, LAYER_FB);
config_ppp_scale(blit_op, &ppp_operation_reg);
- config_ppp_blend(blit_op, &ppp_operation_reg);
+ config_ppp_blend(blit_op, &ppp_operation_reg, is_yuv_smart_blit,
+ bg_alpha);
config_ppp_src(&blit_op->src, yuv2rgb);
config_ppp_out(&blit_op->dst, yuv2rgb);
+
+ /* Cache Smart blit BG alpha adn MDP OP values */
+ if (blit_op->mdp_op & MDPOP_SMART_BLIT) {
+ bg_alpha = blit_op->blend.const_alpha;
+ bg_mdp_ops = blit_op->mdp_op;
+ } else {
+ bg_alpha = 0;
+ bg_mdp_ops = 0;
+ }
+ pr_debug("BLIT FG Param Fmt %d (x %d,y %d,w %d,h %d), ",
+ blit_op->src.color_fmt, blit_op->src.prop.x,
+ blit_op->src.prop.y, blit_op->src.prop.width,
+ blit_op->src.prop.height);
+ pr_debug("ROI(x %d,y %d,w %d, h %d) ",
+ blit_op->src.roi.x, blit_op->src.roi.y,
+ blit_op->src.roi.width, blit_op->src.roi.height);
+ pr_debug("Addr_P0 %p, Stride S0 %d Addr_P1 %p, Stride S1 %d\n",
+ blit_op->src.p0, blit_op->src.stride0,
+ blit_op->src.p1, blit_op->src.stride1);
+
+ if (blit_op->bg.p0 != blit_op->dst.p0) {
+ pr_debug("BLIT BG Param Fmt %d (x %d,y %d,w %d,h %d), ",
+ blit_op->bg.color_fmt, blit_op->bg.prop.x,
+ blit_op->bg.prop.y, blit_op->bg.prop.width,
+ blit_op->bg.prop.height);
+ pr_debug("ROI(x %d,y %d, w %d, h %d) ",
+ blit_op->bg.roi.x, blit_op->bg.roi.y,
+ blit_op->bg.roi.width, blit_op->bg.roi.height);
+ pr_debug("Addr %p, Stride S0 %d Addr_P1 %p, Stride S1 %d\n",
+ blit_op->bg.p0, blit_op->bg.stride0,
+ blit_op->bg.p1, blit_op->bg.stride1);
+ }
+ pr_debug("BLIT FB Param Fmt %d (x %d,y %d,w %d,h %d), ",
+ blit_op->dst.color_fmt, blit_op->dst.prop.x,
+ blit_op->dst.prop.y, blit_op->dst.prop.width,
+ blit_op->dst.prop.height);
+ pr_debug("ROI(x %d,y %d, w %d, h %d) ",
+ blit_op->dst.roi.x, blit_op->dst.roi.y,
+ blit_op->dst.roi.width, blit_op->dst.roi.height);
+ pr_debug("Addr %p, Stride S0 %d Addr_P1 %p, Stride S1 %d\n",
+ blit_op->dst.p0, blit_op->src.stride0,
+ blit_op->dst.p1, blit_op->dst.stride1);
+
PPP_WRITEL(ppp_operation_reg, MDP3_PPP_OP_MODE);
mb();
return 0;
diff --git a/drivers/video/fbdev/msm/mdss.h b/drivers/video/fbdev/msm/mdss.h
index 83fb479fe1c5..f92f3ae40d74 100644
--- a/drivers/video/fbdev/msm/mdss.h
+++ b/drivers/video/fbdev/msm/mdss.h
@@ -20,6 +20,7 @@
#include <linux/types.h>
#include <linux/workqueue.h>
#include <linux/irqreturn.h>
+#include <linux/irqdomain.h>
#include <linux/mdss_io_util.h>
#include <linux/msm-bus.h>
@@ -54,9 +55,9 @@ enum mdss_iommu_domain_type {
enum mdss_bus_vote_type {
VOTE_INDEX_DISABLE,
- VOTE_INDEX_19_MHZ,
- VOTE_INDEX_40_MHZ,
- VOTE_INDEX_80_MHZ,
+ VOTE_INDEX_LOW,
+ VOTE_INDEX_MID,
+ VOTE_INDEX_HIGH,
VOTE_INDEX_MAX,
};
@@ -128,6 +129,7 @@ enum mdss_hw_index {
MDSS_HW_DSI1,
MDSS_HW_HDMI,
MDSS_HW_EDP,
+ MDSS_HW_MISC,
MDSS_MAX_HW_BLK
};
@@ -159,6 +161,7 @@ enum mdss_hw_quirk {
MDSS_QUIRK_DMA_BI_DIR,
MDSS_QUIRK_MIN_BUS_VOTE,
MDSS_QUIRK_FMT_PACK_PATTERN,
+ MDSS_QUIRK_NEED_SECURE_MAP,
MDSS_QUIRK_MAX,
};
@@ -316,6 +319,7 @@ struct mdss_data_type {
u32 default_ot_rd_limit;
u32 default_ot_wr_limit;
+ struct irq_domain *irq_domain;
u32 mdp_irq_mask;
u32 mdp_hist_irq_mask;
@@ -355,6 +359,7 @@ struct mdss_data_type {
u32 curr_bw_uc_idx;
u32 ao_bw_uc_idx; /* active only idx */
struct msm_bus_scale_pdata *bus_scale_table;
+ struct msm_bus_scale_pdata *reg_bus_scale_table;
u32 max_bw_low;
u32 max_bw_high;
u32 max_bw_per_pipe;
@@ -525,6 +530,7 @@ struct mdss_util_intf {
int (*bus_scale_set_quota)(int client, u64 ab_quota, u64 ib_quota);
int (*panel_intf_status)(u32 disp_num, u32 intf_type);
struct mdss_panel_cfg* (*panel_intf_type)(int intf_val);
+ int (*dyn_clk_gating_ctrl)(int enable);
};
struct mdss_util_intf *mdss_get_util_intf(void);
diff --git a/drivers/video/fbdev/msm/mdss_compat_utils.c b/drivers/video/fbdev/msm/mdss_compat_utils.c
index 3bc5de64941f..e391a5aaa45d 100644
--- a/drivers/video/fbdev/msm/mdss_compat_utils.c
+++ b/drivers/video/fbdev/msm/mdss_compat_utils.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
* Copyright (C) 1994 Martin Schaller
*
* 2001 - Documented with DocBook
@@ -445,8 +445,8 @@ static int __compat_async_position_update(struct fb_info *info,
update_pos.input_layer_cnt = update_pos32.input_layer_cnt;
layer_cnt = update_pos32.input_layer_cnt;
- if (!layer_cnt) {
- pr_err("no async layer to update\n");
+ if ((!layer_cnt) || (layer_cnt > MAX_LAYER_COUNT)) {
+ pr_err("invalid async layers :%d to update\n", layer_cnt);
return -EINVAL;
}
diff --git a/drivers/video/fbdev/msm/mdss_dba_utils.c b/drivers/video/fbdev/msm/mdss_dba_utils.c
index 65874e5ab3b8..b14a83e863ab 100644
--- a/drivers/video/fbdev/msm/mdss_dba_utils.c
+++ b/drivers/video/fbdev/msm/mdss_dba_utils.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -300,6 +300,7 @@ static void mdss_dba_utils_dba_cb(void *data, enum msm_dba_callback_event event)
bool operands_present = false;
u32 no_of_operands, size, i;
u32 operands_offset = MAX_CEC_FRAME_SIZE - MAX_OPERAND_SIZE;
+ struct msm_hdmi_audio_edid_blk blk;
if (!udata) {
pr_err("Invalid data\n");
@@ -319,10 +320,14 @@ static void mdss_dba_utils_dba_cb(void *data, enum msm_dba_callback_event event)
ret = udata->ops.get_raw_edid(udata->dba_data,
udata->edid_buf_size, udata->edid_buf, 0);
- if (!ret)
+ if (!ret) {
hdmi_edid_parser(udata->edid_data);
- else
+ hdmi_edid_get_audio_blk(udata->edid_data, &blk);
+ udata->ops.set_audio_block(udata->dba_data,
+ sizeof(blk), &blk);
+ } else {
pr_err("failed to get edid%d\n", ret);
+ }
}
if (pluggable) {
@@ -676,13 +681,6 @@ void *mdss_dba_utils_init(struct mdss_dba_utils_init_data *uid)
udata->kobj = uid->kobj;
udata->pinfo = uid->pinfo;
- /* register display and audio switch devices */
- ret = mdss_dba_utils_init_switch_dev(udata, uid->fb_node);
- if (ret) {
- pr_err("switch dev registration failed\n");
- goto error;
- }
-
/* Initialize EDID feature */
edid_init_data.kobj = uid->kobj;
edid_init_data.ds_data.ds_registered = true;
@@ -738,10 +736,18 @@ void *mdss_dba_utils_init(struct mdss_dba_utils_init_data *uid)
* this explicit calls to bridge chip driver.
*/
if (!uid->pinfo->is_pluggable) {
- if (udata->ops.power_on)
+ if (udata->ops.power_on && !(uid->cont_splash_enabled))
udata->ops.power_on(udata->dba_data, true, 0);
if (udata->ops.check_hpd)
udata->ops.check_hpd(udata->dba_data, 0);
+ } else {
+ /* register display and audio switch devices */
+ ret = mdss_dba_utils_init_switch_dev(udata,
+ uid->fb_node);
+ if (ret) {
+ pr_err("switch dev registration failed\n");
+ goto error;
+ }
}
}
diff --git a/drivers/video/fbdev/msm/mdss_dba_utils.h b/drivers/video/fbdev/msm/mdss_dba_utils.h
index cf43d2def7c3..70763af953f2 100644
--- a/drivers/video/fbdev/msm/mdss_dba_utils.h
+++ b/drivers/video/fbdev/msm/mdss_dba_utils.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -24,6 +24,7 @@
* @chip_name: Name of the device registered with DBA
* @client_name: Name of the client registering with DBA
* @pinfo: Detailed panel information
+ * @cont_splash_enabled: Flag to check if cont splash was enabled on bridge
*
* This structure's instance is needed to be passed as parameter
* to register API to let the DBA utils module configure and
@@ -36,6 +37,7 @@ struct mdss_dba_utils_init_data {
char *chip_name;
char *client_name;
struct mdss_panel_info *pinfo;
+ bool cont_splash_enabled;
};
int mdss_dba_utils_video_on(void *data, struct mdss_panel_info *pinfo);
diff --git a/drivers/video/fbdev/msm/mdss_debug.c b/drivers/video/fbdev/msm/mdss_debug.c
index 9ba51bcc900a..82144ec653f9 100644
--- a/drivers/video/fbdev/msm/mdss_debug.c
+++ b/drivers/video/fbdev/msm/mdss_debug.c
@@ -181,7 +181,8 @@ static ssize_t panel_debug_base_reg_write(struct file *file,
if (mdata->debug_inf.debug_enable_clock)
mdata->debug_inf.debug_enable_clock(1);
- mdss_dsi_cmdlist_put(ctrl_pdata, &cmdreq);
+ if (ctrl_pdata->ctrl_state & CTRL_STATE_PANEL_INIT)
+ mdss_dsi_cmdlist_put(ctrl_pdata, &cmdreq);
if (mdata->debug_inf.debug_enable_clock)
mdata->debug_inf.debug_enable_clock(0);
@@ -994,7 +995,7 @@ static ssize_t mdss_debug_perf_bw_limit_write(struct file *file,
{
struct mdss_data_type *mdata = file->private_data;
char buf[32];
- u32 mode, val;
+ u32 mode = 0, val = 0;
u32 cnt;
struct mdss_max_bw_settings *temp_settings;
diff --git a/drivers/video/fbdev/msm/mdss_dsi.c b/drivers/video/fbdev/msm/mdss_dsi.c
index 8cec9988f018..646f75653583 100644
--- a/drivers/video/fbdev/msm/mdss_dsi.c
+++ b/drivers/video/fbdev/msm/mdss_dsi.c
@@ -934,6 +934,9 @@ static int mdss_dsi_debugfs_init(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
}
pdata = &ctrl_pdata->panel_data;
+ if (!pdata)
+ return -EINVAL;
+
panel_info = pdata->panel_info;
rc = mdss_dsi_debugfs_setup(pdata, panel_info.debugfs_info->root);
if (rc) {
@@ -1129,6 +1132,8 @@ static int mdss_dsi_off(struct mdss_panel_data *pdata, int power_state)
/* disable DSI phy */
mdss_dsi_phy_disable(ctrl_pdata);
}
+ ctrl_pdata->ctrl_state &= ~CTRL_STATE_DSI_ACTIVE;
+
mdss_dsi_clk_ctrl(ctrl_pdata, ctrl_pdata->dsi_clk_handle,
MDSS_DSI_CORE_CLK, MDSS_DSI_CLK_OFF);
@@ -1289,8 +1294,6 @@ int mdss_dsi_on(struct mdss_panel_data *pdata)
* to be restored to allow dcs command be
* sent to panel
*/
- mdss_dsi_read_hw_revision(ctrl_pdata);
- mdss_dsi_read_phy_revision(ctrl_pdata);
mdss_dsi_restore_intr_mask(ctrl_pdata);
pr_debug("%s: panel already on\n", __func__);
goto end;
@@ -1321,10 +1324,6 @@ int mdss_dsi_on(struct mdss_panel_data *pdata)
mdss_dsi_clk_ctrl(ctrl_pdata, ctrl_pdata->dsi_clk_handle,
MDSS_DSI_CORE_CLK, MDSS_DSI_CLK_ON);
- /* Populate DSI Controller and PHY revision */
- mdss_dsi_read_hw_revision(ctrl_pdata);
- mdss_dsi_read_phy_revision(ctrl_pdata);
-
/*
* If ULPS during suspend feature is enabled, then DSI PHY was
* left on during suspend. In this case, we do not need to reset/init
@@ -1337,6 +1336,7 @@ int mdss_dsi_on(struct mdss_panel_data *pdata)
mdss_dsi_phy_init(ctrl_pdata);
mdss_dsi_ctrl_setup(ctrl_pdata);
}
+ ctrl_pdata->ctrl_state |= CTRL_STATE_DSI_ACTIVE;
/* DSI link clocks need to be on prior to ctrl sw reset */
mdss_dsi_clk_ctrl(ctrl_pdata, ctrl_pdata->dsi_clk_handle,
@@ -1453,9 +1453,9 @@ static int mdss_dsi_unblank(struct mdss_panel_data *pdata)
panel_data);
mipi = &pdata->panel_info.mipi;
- pr_debug("%s+: ctrl=%p ndx=%d cur_blank_state=%d ctrl_state=%x\n",
+ pr_debug("%s+: ctrl=%p ndx=%d cur_power_state=%d ctrl_state=%x\n",
__func__, ctrl_pdata, ctrl_pdata->ndx,
- pdata->panel_info.blank_state, ctrl_pdata->ctrl_state);
+ pdata->panel_info.panel_power_state, ctrl_pdata->ctrl_state);
mdss_dsi_pm_qos_update_request(DSI_DISABLE_PC_LATENCY);
@@ -1468,7 +1468,7 @@ static int mdss_dsi_unblank(struct mdss_panel_data *pdata)
mdss_dsi_clk_ctrl(sctrl, sctrl->dsi_clk_handle,
MDSS_DSI_ALL_CLKS, MDSS_DSI_CLK_ON);
- if (pdata->panel_info.blank_state == MDSS_PANEL_BLANK_LOW_POWER) {
+ if (mdss_dsi_is_panel_on_lp(pdata)) {
pr_debug("%s: dsi_unblank with panel always on\n", __func__);
if (ctrl_pdata->low_power_config)
ret = ctrl_pdata->low_power_config(pdata, false);
@@ -1486,7 +1486,6 @@ static int mdss_dsi_unblank(struct mdss_panel_data *pdata)
}
ATRACE_END("dsi_panel_on");
}
- ctrl_pdata->ctrl_state |= CTRL_STATE_PANEL_INIT;
}
if ((pdata->panel_info.type == MIPI_CMD_PANEL) &&
@@ -1496,6 +1495,8 @@ static int mdss_dsi_unblank(struct mdss_panel_data *pdata)
enable_irq(gpio_to_irq(ctrl_pdata->disp_te_gpio));
}
+ ctrl_pdata->ctrl_state |= CTRL_STATE_PANEL_INIT;
+
error:
mdss_dsi_clk_ctrl(ctrl_pdata, ctrl_pdata->dsi_clk_handle,
MDSS_DSI_ALL_CLKS, MDSS_DSI_CLK_OFF);
@@ -1702,7 +1703,7 @@ static void __mdss_dsi_dyn_refresh_config(
struct mdss_dsi_ctrl_pdata *ctrl_pdata)
{
int reg_data = 0;
- u32 phy_rev = mdss_dsi_get_phy_revision(ctrl_pdata);
+ u32 phy_rev = ctrl_pdata->shared_data->phy_rev;
/* configure only for master control in split display */
if (mdss_dsi_is_hw_config_split(ctrl_pdata->shared_data) &&
@@ -1815,7 +1816,7 @@ static int __mdss_dsi_dfps_calc_clks(struct mdss_panel_data *pdata,
}
pinfo = &pdata->panel_info;
- phy_rev = mdss_dsi_get_phy_revision(ctrl_pdata);
+ phy_rev = ctrl_pdata->shared_data->phy_rev;
rc = mdss_dsi_clk_div_config
(&ctrl_pdata->panel_data.panel_info, new_fps);
@@ -2086,7 +2087,7 @@ static int mdss_dsi_dfps_config(struct mdss_panel_data *pdata, int new_fps)
return -EINVAL;
}
- phy_rev = mdss_dsi_get_phy_revision(ctrl_pdata);
+ phy_rev = ctrl_pdata->shared_data->phy_rev;
pinfo = &pdata->panel_info;
/* get the fps configured in HW */
@@ -2260,12 +2261,19 @@ static void mdss_dsi_dba_work(struct work_struct *work)
memset(&utils_init_data, 0, sizeof(utils_init_data));
- utils_init_data.chip_name = "adv7533";
+ utils_init_data.chip_name = ctrl_pdata->bridge_name;
utils_init_data.client_name = "dsi";
- utils_init_data.instance_id = 0;
+ utils_init_data.instance_id = ctrl_pdata->bridge_index;
utils_init_data.fb_node = ctrl_pdata->fb_node;
utils_init_data.kobj = ctrl_pdata->kobj;
utils_init_data.pinfo = pinfo;
+ if (ctrl_pdata->mdss_util)
+ utils_init_data.cont_splash_enabled =
+ ctrl_pdata->mdss_util->panel_intf_status(
+ ctrl_pdata->panel_data.panel_info.pdest,
+ MDSS_PANEL_INTF_DSI) ? true : false;
+ else
+ utils_init_data.cont_splash_enabled = false;
pinfo->dba_data = mdss_dba_utils_init(&utils_init_data);
@@ -2339,6 +2347,15 @@ int mdss_dsi_register_recovery_handler(struct mdss_dsi_ctrl_pdata *ctrl,
return 0;
}
+static int mdss_dsi_register_mdp_callback(struct mdss_dsi_ctrl_pdata *ctrl,
+ struct mdss_intf_recovery *mdp_callback)
+{
+ mutex_lock(&ctrl->mutex);
+ ctrl->mdp_callback = mdp_callback;
+ mutex_unlock(&ctrl->mutex);
+ return 0;
+}
+
static struct device_node *mdss_dsi_get_fb_node_cb(struct platform_device *pdev)
{
struct device_node *fb_node;
@@ -2408,8 +2425,6 @@ static int mdss_dsi_event_handler(struct mdss_panel_data *pdata,
rc = mdss_dsi_clk_refresh(pdata,
ctrl_pdata->update_phy_timing);
- mdss_dsi_get_hw_revision(ctrl_pdata);
- mdss_dsi_get_phy_revision(ctrl_pdata);
rc = mdss_dsi_on(pdata);
mdss_dsi_op_mode_config(pdata->panel_info.mipi.mode,
pdata);
@@ -2491,6 +2506,10 @@ static int mdss_dsi_event_handler(struct mdss_panel_data *pdata,
rc = mdss_dsi_register_recovery_handler(ctrl_pdata,
(struct mdss_intf_recovery *)arg);
break;
+ case MDSS_EVENT_REGISTER_MDP_CALLBACK:
+ rc = mdss_dsi_register_mdp_callback(ctrl_pdata,
+ (struct mdss_intf_recovery *)arg);
+ break;
case MDSS_EVENT_DSI_DYNAMIC_SWITCH:
mode = (u32)(unsigned long) arg;
mdss_dsi_switch_mode(pdata, mode);
@@ -2862,7 +2881,7 @@ static int mdss_dsi_cont_splash_config(struct mdss_panel_info *pinfo,
struct mdss_dsi_ctrl_pdata *ctrl_pdata)
{
void *clk_handle;
- int rc = 0, data;
+ int rc = 0;
if (pinfo->cont_splash_enabled) {
rc = mdss_dsi_panel_power_ctrl(&(ctrl_pdata->panel_data),
@@ -2873,7 +2892,8 @@ static int mdss_dsi_cont_splash_config(struct mdss_panel_info *pinfo,
}
if (ctrl_pdata->bklt_ctrl == BL_PWM)
mdss_dsi_panel_pwm_enable(ctrl_pdata);
- pinfo->blank_state = MDSS_PANEL_BLANK_UNBLANK;
+ ctrl_pdata->ctrl_state |= (CTRL_STATE_PANEL_INIT |
+ CTRL_STATE_MDP_ACTIVE | CTRL_STATE_DSI_ACTIVE);
if (ctrl_pdata->panel_data.panel_info.type == MIPI_CMD_PANEL)
clk_handle = ctrl_pdata->mdp_clk_handle;
else
@@ -2881,23 +2901,55 @@ static int mdss_dsi_cont_splash_config(struct mdss_panel_info *pinfo,
mdss_dsi_clk_ctrl(ctrl_pdata, clk_handle,
MDSS_DSI_ALL_CLKS, MDSS_DSI_CLK_ON);
+ mdss_dsi_read_hw_revision(ctrl_pdata);
+ mdss_dsi_read_phy_revision(ctrl_pdata);
ctrl_pdata->is_phyreg_enabled = 1;
- mdss_dsi_get_hw_revision(ctrl_pdata);
- if ((ctrl_pdata->shared_data->hw_rev >= MDSS_DSI_HW_REV_103)
- && (pinfo->type == MIPI_CMD_PANEL)) {
- data = MIPI_INP(ctrl_pdata->ctrl_base + 0x1b8);
- if (data & BIT(16))
- ctrl_pdata->burst_mode_enabled = true;
- }
- ctrl_pdata->ctrl_state |=
- (CTRL_STATE_PANEL_INIT | CTRL_STATE_MDP_ACTIVE);
+ if (pinfo->type == MIPI_CMD_PANEL)
+ mdss_dsi_set_burst_mode(ctrl_pdata);
} else {
+ /* Turn on the clocks to read the DSI and PHY revision */
+ mdss_dsi_clk_ctrl(ctrl_pdata, ctrl_pdata->dsi_clk_handle,
+ MDSS_DSI_CORE_CLK, MDSS_DSI_CLK_ON);
+ mdss_dsi_read_hw_revision(ctrl_pdata);
+ mdss_dsi_read_phy_revision(ctrl_pdata);
+ mdss_dsi_clk_ctrl(ctrl_pdata, ctrl_pdata->dsi_clk_handle,
+ MDSS_DSI_CORE_CLK, MDSS_DSI_CLK_OFF);
pinfo->panel_power_state = MDSS_PANEL_POWER_OFF;
}
return rc;
}
+static int mdss_dsi_get_bridge_chip_params(struct mdss_panel_info *pinfo,
+ struct mdss_dsi_ctrl_pdata *ctrl_pdata,
+ struct platform_device *pdev)
+{
+ int rc = 0;
+ u32 temp_val = 0;
+
+ if (!ctrl_pdata || !pdev || !pinfo) {
+ pr_err("%s: Invalid Params ctrl_pdata=%p, pdev=%p\n", __func__,
+ ctrl_pdata, pdev);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ if (pinfo->is_dba_panel) {
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,bridge-index", &temp_val);
+ if (rc) {
+ pr_err("%s:%d Unable to read qcom,bridge-index, ret=%d\n",
+ __func__, __LINE__, rc);
+ goto end;
+ }
+ pr_debug("%s: DT property %s is %X\n", __func__,
+ "qcom,bridge-index", temp_val);
+ ctrl_pdata->bridge_index = temp_val;
+ }
+end:
+ return rc;
+}
+
static int mdss_dsi_ctrl_probe(struct platform_device *pdev)
{
int rc = 0;
@@ -3032,6 +3084,12 @@ static int mdss_dsi_ctrl_probe(struct platform_device *pdev)
disable_irq(gpio_to_irq(ctrl_pdata->disp_te_gpio));
}
+ rc = mdss_dsi_get_bridge_chip_params(pinfo, ctrl_pdata, pdev);
+ if (rc) {
+ pr_err("%s: Failed to get bridge params\n", __func__);
+ goto error_shadow_clk_deinit;
+ }
+
ctrl_pdata->workq = create_workqueue("mdss_dsi_dba");
if (!ctrl_pdata->workq) {
pr_err("%s: Error creating workqueue\n", __func__);
@@ -3041,7 +3099,9 @@ static int mdss_dsi_ctrl_probe(struct platform_device *pdev)
INIT_DELAYED_WORK(&ctrl_pdata->dba_work, mdss_dsi_dba_work);
- pr_debug("%s: Dsi Ctrl->%d initialized\n", __func__, index);
+ pr_info("%s: Dsi Ctrl->%d initialized, DSI rev:0x%x, PHY rev:0x%x\n",
+ __func__, index, ctrl_pdata->shared_data->hw_rev,
+ ctrl_pdata->shared_data->phy_rev);
if (index == 0)
ctrl_pdata->shared_data->dsi0_active = true;
diff --git a/drivers/video/fbdev/msm/mdss_dsi.h b/drivers/video/fbdev/msm/mdss_dsi.h
index f3f708448ebd..1606f0c72a13 100644
--- a/drivers/video/fbdev/msm/mdss_dsi.h
+++ b/drivers/video/fbdev/msm/mdss_dsi.h
@@ -135,9 +135,19 @@ enum dsi_pm_type {
DSI_MAX_PM
};
+/*
+ * DSI controller states.
+ * CTRL_STATE_UNKNOWN - Unknown state of DSI controller.
+ * CTRL_STATE_PANEL_INIT - State specifies that the panel is initialized.
+ * CTRL_STATE_MDP_ACTIVE - State specifies that MDP is ready to send
+ * data to DSI.
+ * CTRL_STATE_DSI_ACTIVE - State specifies that DSI controller/PHY is
+ * initialized.
+ */
#define CTRL_STATE_UNKNOWN 0x00
#define CTRL_STATE_PANEL_INIT BIT(0)
#define CTRL_STATE_MDP_ACTIVE BIT(1)
+#define CTRL_STATE_DSI_ACTIVE BIT(2)
#define DSI_NON_BURST_SYNCH_PULSE 0
#define DSI_NON_BURST_SYNCH_EVENT 1
@@ -376,6 +386,7 @@ struct dsi_err_container {
#define MDSS_DSI_COMMAND_COMPRESSION_MODE_CTRL 0x02a8
#define MDSS_DSI_COMMAND_COMPRESSION_MODE_CTRL2 0x02ac
#define MDSS_DSI_COMMAND_COMPRESSION_MODE_CTRL3 0x02b0
+#define MSM_DBA_CHIP_NAME_MAX_LEN 20
struct mdss_dsi_ctrl_pdata {
int ndx; /* panel_num */
@@ -445,6 +456,7 @@ struct mdss_dsi_ctrl_pdata {
u32 dsi_irq_mask;
struct mdss_hw *dsi_hw;
struct mdss_intf_recovery *recovery;
+ struct mdss_intf_recovery *mdp_callback;
struct dsi_panel_cmds on_cmds;
struct dsi_panel_cmds post_dms_on_cmds;
@@ -502,6 +514,7 @@ struct mdss_dsi_ctrl_pdata {
bool cmd_cfg_restore;
bool do_unicast;
+ bool idle_enabled;
int horizontal_idle_cnt;
struct panel_horizontal_idle *line_idle;
struct mdss_util_intf *mdss_util;
@@ -516,16 +529,20 @@ struct mdss_dsi_ctrl_pdata {
struct dsi_err_container err_cont;
-
- bool ds_registered;
-
struct kobject *kobj;
int fb_node;
+ /* DBA data */
struct workqueue_struct *workq;
struct delayed_work dba_work;
+ char bridge_name[MSM_DBA_CHIP_NAME_MAX_LEN];
+ uint32_t bridge_index;
+ bool ds_registered;
+
bool timing_db_mode;
bool update_phy_timing; /* flag to recalculate PHY timings */
+
+ bool phy_power_off;
};
struct dsi_status_data {
@@ -552,7 +569,7 @@ void mdss_dsi_cmd_mode_ctrl(int enable);
void mdp4_dsi_cmd_trigger(void);
void mdss_dsi_cmd_mdp_start(struct mdss_dsi_ctrl_pdata *ctrl);
void mdss_dsi_cmd_bta_sw_trigger(struct mdss_panel_data *pdata);
-void mdss_dsi_ack_err_status(struct mdss_dsi_ctrl_pdata *ctrl);
+bool mdss_dsi_ack_err_status(struct mdss_dsi_ctrl_pdata *ctrl);
int mdss_dsi_clk_ctrl(struct mdss_dsi_ctrl_pdata *ctrl, void *clk_handle,
enum mdss_dsi_clk_type clk_type, enum mdss_dsi_clk_state clk_state);
void mdss_dsi_clk_req(struct mdss_dsi_ctrl_pdata *ctrl,
@@ -614,9 +631,8 @@ int mdss_dsi_bta_status_check(struct mdss_dsi_ctrl_pdata *ctrl);
int mdss_dsi_reg_status_check(struct mdss_dsi_ctrl_pdata *ctrl);
bool __mdss_dsi_clk_enabled(struct mdss_dsi_ctrl_pdata *ctrl, u8 clk_type);
void mdss_dsi_ctrl_setup(struct mdss_dsi_ctrl_pdata *ctrl);
-void mdss_dsi_dln0_phy_err(struct mdss_dsi_ctrl_pdata *ctrl, bool print_en);
+bool mdss_dsi_dln0_phy_err(struct mdss_dsi_ctrl_pdata *ctrl, bool print_en);
void mdss_dsi_lp_cd_rx(struct mdss_dsi_ctrl_pdata *ctrl);
-void mdss_dsi_get_hw_revision(struct mdss_dsi_ctrl_pdata *ctrl);
void mdss_dsi_read_phy_revision(struct mdss_dsi_ctrl_pdata *ctrl);
int mdss_dsi_panel_cmd_read(struct mdss_dsi_ctrl_pdata *ctrl, char cmd0,
char cmd1, void (*fxn)(int), char *rbuf, int len);
@@ -639,6 +655,7 @@ void mdss_dsi_panel_dsc_pps_send(struct mdss_dsi_ctrl_pdata *ctrl,
void mdss_dsi_dsc_config(struct mdss_dsi_ctrl_pdata *ctrl,
struct dsc_desc *dsc);
void mdss_dsi_dfps_config_8996(struct mdss_dsi_ctrl_pdata *ctrl);
+void mdss_dsi_set_burst_mode(struct mdss_dsi_ctrl_pdata *ctrl);
static inline const char *__mdss_dsi_pm_name(enum dsi_pm_type module)
{
diff --git a/drivers/video/fbdev/msm/mdss_dsi_clk.c b/drivers/video/fbdev/msm/mdss_dsi_clk.c
index 0142ba8a5c5c..727d6707444c 100644
--- a/drivers/video/fbdev/msm/mdss_dsi_clk.c
+++ b/drivers/video/fbdev/msm/mdss_dsi_clk.c
@@ -99,7 +99,7 @@ static int dsi_core_clk_start(struct dsi_core_clks *c_clks)
}
}
- rc = mdss_update_reg_bus_vote(mngr->reg_bus_clt, VOTE_INDEX_19_MHZ);
+ rc = mdss_update_reg_bus_vote(mngr->reg_bus_clt, VOTE_INDEX_LOW);
if (rc) {
pr_err("failed to vote for reg bus\n");
goto disable_mmss_misc_clk;
@@ -876,7 +876,7 @@ void *mdss_dsi_clk_init(struct mdss_dsi_clk_info *info)
mngr->post_clkoff_cb = info->post_clkoff_cb;
mngr->priv_data = info->priv_data;
mngr->reg_bus_clt = mdss_reg_bus_vote_client_create(info->name);
- if (IS_ERR_OR_NULL(mngr->reg_bus_clt)) {
+ if (IS_ERR(mngr->reg_bus_clt)) {
pr_err("Unable to get handle for reg bus vote\n");
kfree(mngr);
mngr = ERR_PTR(-EINVAL);
diff --git a/drivers/video/fbdev/msm/mdss_dsi_host.c b/drivers/video/fbdev/msm/mdss_dsi_host.c
index 940b4b9db95e..665e3d03110f 100644
--- a/drivers/video/fbdev/msm/mdss_dsi_host.c
+++ b/drivers/video/fbdev/msm/mdss_dsi_host.c
@@ -277,23 +277,11 @@ void mdss_dsi_cmd_test_pattern(struct mdss_dsi_ctrl_pdata *ctrl)
void mdss_dsi_read_hw_revision(struct mdss_dsi_ctrl_pdata *ctrl)
{
- /* clock must be on */
- ctrl->shared_data->hw_rev = MIPI_INP(ctrl->ctrl_base);
-}
-
-void mdss_dsi_get_hw_revision(struct mdss_dsi_ctrl_pdata *ctrl)
-{
if (ctrl->shared_data->hw_rev)
return;
- mdss_dsi_clk_ctrl(ctrl, ctrl->dsi_clk_handle, MDSS_DSI_CORE_CLK,
- MDSS_DSI_CLK_ON);
+ /* clock must be on */
ctrl->shared_data->hw_rev = MIPI_INP(ctrl->ctrl_base);
- mdss_dsi_clk_ctrl(ctrl, ctrl->dsi_clk_handle, MDSS_DSI_CORE_CLK,
- MDSS_DSI_CLK_OFF);
-
- pr_debug("%s: ndx=%d hw_rev=%x\n", __func__,
- ctrl->ndx, ctrl->shared_data->hw_rev);
}
void mdss_dsi_read_phy_revision(struct mdss_dsi_ctrl_pdata *ctrl)
@@ -1243,6 +1231,32 @@ void mdss_dsi_dsc_config(struct mdss_dsi_ctrl_pdata *ctrl, struct dsc_desc *dsc)
MIPI_OUTP((ctrl->ctrl_base) + offset, data);
}
+void mdss_dsi_set_burst_mode(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ u32 data;
+
+ if (ctrl->shared_data->hw_rev < MDSS_DSI_HW_REV_103)
+ return;
+
+ data = MIPI_INP(ctrl->ctrl_base + 0x1b8);
+
+ /*
+ * idle and burst mode are mutually exclusive features,
+ * so disable burst mode if idle has been configured for
+ * the panel, otherwise enable the feature.
+ */
+ if (ctrl->idle_enabled)
+ data &= ~BIT(16); /* disable burst mode */
+ else
+ data |= BIT(16); /* enable burst mode */
+
+ ctrl->burst_mode_enabled = !ctrl->idle_enabled;
+
+ MIPI_OUTP((ctrl->ctrl_base + 0x1b8), data);
+ pr_debug("%s: burst=%d\n", __func__, ctrl->burst_mode_enabled);
+
+}
+
static void mdss_dsi_mode_setup(struct mdss_panel_data *pdata)
{
struct mdss_dsi_ctrl_pdata *ctrl_pdata = NULL;
@@ -1353,13 +1367,7 @@ static void mdss_dsi_mode_setup(struct mdss_panel_data *pdata)
MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x2b4, data);
}
- /* Enable frame transfer in burst mode */
- if (ctrl_pdata->shared_data->hw_rev >= MDSS_DSI_HW_REV_103) {
- data = MIPI_INP(ctrl_pdata->ctrl_base + 0x1b8);
- data = data | BIT(16);
- MIPI_OUTP((ctrl_pdata->ctrl_base + 0x1b8), data);
- ctrl_pdata->burst_mode_enabled = 1;
- }
+ mdss_dsi_set_burst_mode(ctrl_pdata);
/* DSI_COMMAND_MODE_MDP_STREAM_CTRL */
MIPI_OUTP((ctrl_pdata->ctrl_base) + 0x60, stream_ctrl);
@@ -1488,8 +1496,6 @@ static int mdss_dsi_cmd_dma_tpg_tx(struct mdss_dsi_ctrl_pdata *ctrl,
return -EINVAL;
}
- mdss_dsi_get_hw_revision(ctrl);
-
if (ctrl->shared_data->hw_rev < MDSS_DSI_HW_REV_103) {
pr_err("CMD DMA TPG not supported for this DSI version\n");
return -EINVAL;
@@ -2228,7 +2234,7 @@ int mdss_dsi_en_wait4dynamic_done(struct mdss_dsi_ctrl_pdata *ctrl)
unsigned long flag;
u32 data;
int rc = 0;
- struct mdss_dsi_ctrl_pdata *sctrl_pdata;
+ struct mdss_dsi_ctrl_pdata *sctrl_pdata = NULL;
/* DSI_INTL_CTRL */
data = MIPI_INP((ctrl->ctrl_base) + 0x0110);
@@ -2250,7 +2256,12 @@ int mdss_dsi_en_wait4dynamic_done(struct mdss_dsi_ctrl_pdata *ctrl)
MIPI_OUTP((ctrl->ctrl_base) + DSI_DYNAMIC_REFRESH_CTRL,
(BIT(13) | BIT(8) | BIT(0)));
- sctrl_pdata = mdss_dsi_get_ctrl_clk_slave();
+ /*
+ * Configure DYNAMIC_REFRESH_CTRL for second controller only
+ * for split DSI cases.
+ */
+ if (mdss_dsi_is_ctrl_clk_master(ctrl))
+ sctrl_pdata = mdss_dsi_get_ctrl_clk_slave();
if (sctrl_pdata)
MIPI_OUTP((sctrl_pdata->ctrl_base) + DSI_DYNAMIC_REFRESH_CTRL,
@@ -2468,6 +2479,49 @@ int mdss_dsi_cmdlist_rx(struct mdss_dsi_ctrl_pdata *ctrl,
return len;
}
+static inline bool mdss_dsi_delay_cmd(struct mdss_dsi_ctrl_pdata *ctrl,
+ bool from_mdp)
+{
+ unsigned long flags;
+ bool mdp_busy = false;
+ bool need_wait = false;
+
+ if (!ctrl->mdp_callback)
+ goto exit;
+
+ /* delay only for split dsi, cmd mode and burst mode enabled cases */
+ if (!mdss_dsi_is_hw_config_split(ctrl->shared_data) ||
+ !(ctrl->panel_mode == DSI_CMD_MODE) ||
+ !ctrl->burst_mode_enabled)
+ goto exit;
+
+ /* delay only if cmd is not from mdp and panel has been initialized */
+ if (from_mdp || !(ctrl->ctrl_state & CTRL_STATE_PANEL_INIT))
+ goto exit;
+
+ /* if broadcast enabled, apply delay only if this is the ctrl trigger */
+ if (mdss_dsi_sync_wait_enable(ctrl) &&
+ !mdss_dsi_sync_wait_trigger(ctrl))
+ goto exit;
+
+ spin_lock_irqsave(&ctrl->mdp_lock, flags);
+ if (ctrl->mdp_busy == true)
+ mdp_busy = true;
+ spin_unlock_irqrestore(&ctrl->mdp_lock, flags);
+
+ /*
+ * apply delay only if:
+ * mdp_busy bool is set - kickoff is being scheduled by sw
+ * MDP_BUSY bit is not set - transfer is not on-going in hw yet
+ */
+ if (mdp_busy && !(MIPI_INP(ctrl->ctrl_base + 0x008) & BIT(2)))
+ need_wait = true;
+
+exit:
+ MDSS_XLOG(need_wait, from_mdp, mdp_busy);
+ return need_wait;
+}
+
int mdss_dsi_cmdlist_commit(struct mdss_dsi_ctrl_pdata *ctrl, int from_mdp)
{
struct dcs_cmd_req *req;
@@ -2479,9 +2533,6 @@ int mdss_dsi_cmdlist_commit(struct mdss_dsi_ctrl_pdata *ctrl, int from_mdp)
bool hs_req = false;
bool cmd_mutex_acquired = false;
- if (mdss_get_sd_client_cnt())
- return -EPERM;
-
if (from_mdp) { /* from mdp kickoff */
if (!ctrl->burst_mode_enabled) {
mutex_lock(&ctrl->cmd_mutex);
@@ -2509,7 +2560,20 @@ int mdss_dsi_cmdlist_commit(struct mdss_dsi_ctrl_pdata *ctrl, int from_mdp)
mdss_dsi_cmd_mdp_busy(ctrl);
}
- mdss_dsi_get_hw_revision(ctrl);
+ /*
+ * if secure display session is enabled
+ * and DSI controller version is above 1.3.0,
+ * then send DSI commands using TPG FIFO.
+ */
+ if (mdss_get_sd_client_cnt() && req) {
+ if (ctrl->shared_data->hw_rev >= MDSS_DSI_HW_REV_103) {
+ req->flags |= CMD_REQ_DMA_TPG;
+ } else {
+ if (cmd_mutex_acquired)
+ mutex_unlock(&ctrl->cmd_mutex);
+ return -EPERM;
+ }
+ }
/* For DSI versions less than 1.3.0, CMD DMA TPG is not supported */
if (req && (ctrl->shared_data->hw_rev < MDSS_DSI_HW_REV_103))
@@ -2572,6 +2636,17 @@ int mdss_dsi_cmdlist_commit(struct mdss_dsi_ctrl_pdata *ctrl, int from_mdp)
mdss_dsi_clk_ctrl(ctrl, ctrl->dsi_clk_handle, MDSS_DSI_ALL_CLKS,
MDSS_DSI_CLK_ON);
+ /*
+ * In ping pong split cases, check if we need to apply a
+ * delay for any commands that are not coming from
+ * mdp path
+ */
+ mutex_lock(&ctrl->mutex);
+ if (mdss_dsi_delay_cmd(ctrl, from_mdp))
+ ctrl->mdp_callback->fxn(ctrl->mdp_callback->data,
+ MDP_INTF_CALLBACK_DSI_WAIT);
+ mutex_unlock(&ctrl->mutex);
+
if (req->flags & CMD_REQ_HS_MODE)
mdss_dsi_set_tx_power_mode(0, &ctrl->panel_data);
@@ -2588,10 +2663,10 @@ int mdss_dsi_cmdlist_commit(struct mdss_dsi_ctrl_pdata *ctrl, int from_mdp)
ctrl->mdss_util->iommu_ctrl(0);
(void)mdss_dsi_bus_bandwidth_vote(ctrl->shared_data, false);
- mdss_dsi_clk_ctrl(ctrl, ctrl->dsi_clk_handle, MDSS_DSI_ALL_CLKS,
- MDSS_DSI_CLK_OFF);
}
+ mdss_dsi_clk_ctrl(ctrl, ctrl->dsi_clk_handle, MDSS_DSI_ALL_CLKS,
+ MDSS_DSI_CLK_OFF);
need_lock:
MDSS_XLOG(ctrl->ndx, from_mdp, ctrl->mdp_busy, current->pid,
@@ -2795,10 +2870,11 @@ static int dsi_event_thread(void *data)
return 0;
}
-void mdss_dsi_ack_err_status(struct mdss_dsi_ctrl_pdata *ctrl)
+bool mdss_dsi_ack_err_status(struct mdss_dsi_ctrl_pdata *ctrl)
{
u32 status;
unsigned char *base;
+ bool ret = false;
base = ctrl->ctrl_base;
@@ -2816,16 +2892,20 @@ void mdss_dsi_ack_err_status(struct mdss_dsi_ctrl_pdata *ctrl)
*/
if (ctrl->panel_data.panel_info.esd_check_enabled &&
(ctrl->status_mode == ESD_BTA) && (status & 0x1008000))
- return;
+ return false;
pr_err("%s: status=%x\n", __func__, status);
+ ret = true;
}
+
+ return ret;
}
-void mdss_dsi_timeout_status(struct mdss_dsi_ctrl_pdata *ctrl)
+static bool mdss_dsi_timeout_status(struct mdss_dsi_ctrl_pdata *ctrl)
{
u32 status;
unsigned char *base;
+ bool ret = false;
base = ctrl->ctrl_base;
@@ -2836,13 +2916,17 @@ void mdss_dsi_timeout_status(struct mdss_dsi_ctrl_pdata *ctrl)
if (status & 0x0110)
dsi_send_events(ctrl, DSI_EV_LP_RX_TIMEOUT, 0);
pr_err("%s: status=%x\n", __func__, status);
+ ret = true;
}
+
+ return ret;
}
-void mdss_dsi_dln0_phy_err(struct mdss_dsi_ctrl_pdata *ctrl, bool print_en)
+bool mdss_dsi_dln0_phy_err(struct mdss_dsi_ctrl_pdata *ctrl, bool print_en)
{
u32 status;
unsigned char *base;
+ bool ret = false;
base = ctrl->ctrl_base;
@@ -2853,13 +2937,17 @@ void mdss_dsi_dln0_phy_err(struct mdss_dsi_ctrl_pdata *ctrl, bool print_en)
if (print_en)
pr_err("%s: status=%x\n", __func__, status);
ctrl->err_cont.phy_err_cnt++;
+ ret = true;
}
+
+ return ret;
}
-void mdss_dsi_fifo_status(struct mdss_dsi_ctrl_pdata *ctrl)
+static bool mdss_dsi_fifo_status(struct mdss_dsi_ctrl_pdata *ctrl)
{
u32 status, isr;
unsigned char *base;
+ bool ret = false;
base = ctrl->ctrl_base;
@@ -2891,13 +2979,17 @@ void mdss_dsi_fifo_status(struct mdss_dsi_ctrl_pdata *ctrl)
if (status & 0x11110000) /* DLN_FIFO_EMPTY */
dsi_send_events(ctrl, DSI_EV_DSI_FIFO_EMPTY, 0);
ctrl->err_cont.fifo_err_cnt++;
+ ret = true;
}
+
+ return ret;
}
-void mdss_dsi_status(struct mdss_dsi_ctrl_pdata *ctrl)
+static bool mdss_dsi_status(struct mdss_dsi_ctrl_pdata *ctrl)
{
u32 status;
unsigned char *base;
+ bool ret = false;
base = ctrl->ctrl_base;
@@ -2906,13 +2998,17 @@ void mdss_dsi_status(struct mdss_dsi_ctrl_pdata *ctrl)
if (status & 0x80000000) { /* INTERLEAVE_OP_CONTENTION */
MIPI_OUTP(base + 0x0008, status);
pr_err("%s: status=%x\n", __func__, status);
+ ret = true;
}
+
+ return ret;
}
-void mdss_dsi_clk_status(struct mdss_dsi_ctrl_pdata *ctrl)
+static bool mdss_dsi_clk_status(struct mdss_dsi_ctrl_pdata *ctrl)
{
u32 status;
unsigned char *base;
+ bool ret = false;
base = ctrl->ctrl_base;
status = MIPI_INP(base + 0x0120);/* DSI_CLK_STATUS */
@@ -2921,7 +3017,10 @@ void mdss_dsi_clk_status(struct mdss_dsi_ctrl_pdata *ctrl)
MIPI_OUTP(base + 0x0120, status);
dsi_send_events(ctrl, DSI_EV_PLL_UNLOCKED, 0);
pr_err("%s: status=%x\n", __func__, status);
+ ret = true;
}
+
+ return ret;
}
static void __dsi_error_counter(struct dsi_err_container *err_container)
@@ -2950,18 +3049,27 @@ static void __dsi_error_counter(struct dsi_err_container *err_container)
void mdss_dsi_error(struct mdss_dsi_ctrl_pdata *ctrl)
{
- u32 intr;
+ u32 intr, mask;
+ bool err_handled = false;
+
+ /* Ignore the interrupt if the error intr mask is not set */
+ mask = MIPI_INP(ctrl->ctrl_base + 0x0110);
+ if (!(mask & DSI_INTR_ERROR_MASK)) {
+ pr_debug("%s: Ignore interrupt as error mask not set, 0x%x\n",
+ __func__, mask);
+ return;
+ }
/* disable dsi error interrupt */
mdss_dsi_err_intr_ctrl(ctrl, DSI_INTR_ERROR_MASK, 0);
/* DSI_ERR_INT_MASK0 */
- mdss_dsi_clk_status(ctrl); /* Mask0, 0x10000000 */
- mdss_dsi_fifo_status(ctrl); /* mask0, 0x133d00 */
- mdss_dsi_ack_err_status(ctrl); /* mask0, 0x01f */
- mdss_dsi_timeout_status(ctrl); /* mask0, 0x0e0 */
- mdss_dsi_status(ctrl); /* mask0, 0xc0100 */
- mdss_dsi_dln0_phy_err(ctrl, true); /* mask0, 0x3e00000 */
+ err_handled |= mdss_dsi_clk_status(ctrl); /* Mask0, 0x10000000 */
+ err_handled |= mdss_dsi_fifo_status(ctrl); /* mask0, 0x133d00 */
+ err_handled |= mdss_dsi_ack_err_status(ctrl); /* mask0, 0x01f */
+ err_handled |= mdss_dsi_timeout_status(ctrl); /* mask0, 0x0e0 */
+ err_handled |= mdss_dsi_status(ctrl); /* mask0, 0xc0100 */
+ err_handled |= mdss_dsi_dln0_phy_err(ctrl, true);/* mask0, 0x3e00000 */
/* clear dsi error interrupt */
intr = MIPI_INP(ctrl->ctrl_base + 0x0110);
@@ -2969,7 +3077,9 @@ void mdss_dsi_error(struct mdss_dsi_ctrl_pdata *ctrl)
intr |= DSI_INTR_ERROR;
MIPI_OUTP(ctrl->ctrl_base + 0x0110, intr);
- __dsi_error_counter(&ctrl->err_cont);
+ if (err_handled)
+ __dsi_error_counter(&ctrl->err_cont);
+
dsi_send_events(ctrl, DSI_EV_MDP_BUSY_RELEASE, 0);
}
diff --git a/drivers/video/fbdev/msm/mdss_dsi_panel.c b/drivers/video/fbdev/msm/mdss_dsi_panel.c
index 9108bee02445..b6db0c2543af 100644
--- a/drivers/video/fbdev/msm/mdss_dsi_panel.c
+++ b/drivers/video/fbdev/msm/mdss_dsi_panel.c
@@ -731,10 +731,9 @@ static int mdss_dsi_panel_on(struct mdss_panel_data *pdata)
if (pinfo->compression_mode == COMPRESSION_DSC)
mdss_dsi_panel_dsc_pps_send(ctrl, pinfo);
- if (ctrl->ds_registered && pinfo->is_pluggable)
+ if (ctrl->ds_registered)
mdss_dba_utils_video_on(pinfo->dba_data, pinfo);
end:
- pinfo->blank_state = MDSS_PANEL_BLANK_UNBLANK;
pr_debug("%s:-\n", __func__);
return ret;
}
@@ -808,7 +807,6 @@ static int mdss_dsi_panel_off(struct mdss_panel_data *pdata)
}
end:
- pinfo->blank_state = MDSS_PANEL_BLANK_BLANK;
pr_debug("%s:-\n", __func__);
return 0;
}
@@ -832,10 +830,6 @@ static int mdss_dsi_panel_low_power_config(struct mdss_panel_data *pdata,
enable);
/* Any panel specific low power commands/config */
- if (enable)
- pinfo->blank_state = MDSS_PANEL_BLANK_LOW_POWER;
- else
- pinfo->blank_state = MDSS_PANEL_BLANK_UNBLANK;
pr_debug("%s:-\n", __func__);
return 0;
@@ -1331,6 +1325,7 @@ static void mdss_panel_parse_te_params(struct device_node *np,
rc = of_property_read_u32
(np, "qcom,mdss-tear-check-rd-ptr-trigger-intr", &tmp);
te->rd_ptr_irq = (!rc ? tmp : timing->yres + 1);
+ te->wr_ptr_irq = 0;
}
@@ -1789,6 +1784,13 @@ static void mdss_dsi_parse_panel_horizintal_line_idle(struct device_node *np,
ctrl->horizontal_idle_cnt++;
}
+ /*
+ * idle is enabled for this controller, this will be used to
+ * enable/disable burst mode since both features are mutually
+ * exclusive.
+ */
+ ctrl->idle_enabled = true;
+
pr_debug("%s: horizontal_idle_cnt=%d\n", __func__,
ctrl->horizontal_idle_cnt);
}
@@ -1997,7 +1999,7 @@ static int mdss_dsi_panel_timing_from_dt(struct device_node *np,
const char *data;
struct mdss_dsi_ctrl_pdata *ctrl_pdata;
struct mdss_panel_info *pinfo;
- bool phy_timings_present;
+ bool phy_timings_present = false;
pinfo = &panel_data->panel_info;
@@ -2211,9 +2213,10 @@ static int mdss_panel_parse_dt(struct device_node *np,
struct mdss_dsi_ctrl_pdata *ctrl_pdata)
{
u32 tmp;
- int rc;
+ int rc, len = 0;
const char *data;
static const char *pdest;
+ const char *bridge_chip_name;
struct mdss_panel_info *pinfo = &(ctrl_pdata->panel_data.panel_info);
if (mdss_dsi_is_hw_config_split(ctrl_pdata->shared_data))
@@ -2420,6 +2423,19 @@ static int mdss_panel_parse_dt(struct device_node *np,
pinfo->is_dba_panel = of_property_read_bool(np,
"qcom,dba-panel");
+ if (pinfo->is_dba_panel) {
+ bridge_chip_name = of_get_property(np,
+ "qcom,bridge-name", &len);
+ if (!bridge_chip_name || len <= 0) {
+ pr_err("%s:%d Unable to read qcom,bridge_name, data=%p,len=%d\n",
+ __func__, __LINE__, bridge_chip_name, len);
+ rc = -EINVAL;
+ goto error;
+ }
+ strlcpy(ctrl_pdata->bridge_name, bridge_chip_name,
+ MSM_DBA_CHIP_NAME_MAX_LEN);
+ }
+
return 0;
error:
diff --git a/drivers/video/fbdev/msm/mdss_fb.c b/drivers/video/fbdev/msm/mdss_fb.c
index 221b286b46d9..858465e6df78 100644
--- a/drivers/video/fbdev/msm/mdss_fb.c
+++ b/drivers/video/fbdev/msm/mdss_fb.c
@@ -1091,6 +1091,7 @@ static int mdss_fb_probe(struct platform_device *pdev)
mfd->pdev = pdev;
+ mfd->split_fb_left = mfd->split_fb_right = 0;
mfd->split_mode = MDP_SPLIT_MODE_NONE;
if (pdata->panel_info.is_split_display) {
struct mdss_panel_data *pnext = pdata->next;
@@ -1961,7 +1962,7 @@ void mdss_fb_free_fb_ion_memory(struct msm_fb_data_type *mfd)
int mdss_fb_alloc_fb_ion_memory(struct msm_fb_data_type *mfd, size_t fb_size)
{
- int rc;
+ int rc = 0;
void *vaddr;
int domain;
@@ -3907,8 +3908,8 @@ static int mdss_fb_async_position_update_ioctl(struct fb_info *info,
input_layer_list = update_pos.input_layers;
layer_cnt = update_pos.input_layer_cnt;
- if (!layer_cnt) {
- pr_err("no async layers to update\n");
+ if ((!layer_cnt) || (layer_cnt > MAX_LAYER_COUNT)) {
+ pr_err("invalid async layers :%d to update\n", layer_cnt);
return -EINVAL;
}
diff --git a/drivers/video/fbdev/msm/mdss_fb.h b/drivers/video/fbdev/msm/mdss_fb.h
index 954f11f729bd..67d42628597d 100644
--- a/drivers/video/fbdev/msm/mdss_fb.h
+++ b/drivers/video/fbdev/msm/mdss_fb.h
@@ -56,7 +56,6 @@
#define MDP_PP_AD_BL_LINEAR 0x0
#define MDP_PP_AD_BL_LINEAR_INV 0x1
-#define MAX_LAYER_COUNT 0xC
/**
* enum mdp_notify_event - Different frame events to indicate frame update state
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_audio.c b/drivers/video/fbdev/msm/mdss_hdmi_audio.c
new file mode 100644
index 000000000000..d949a86a8f5d
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_hdmi_audio.c
@@ -0,0 +1,525 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/iopoll.h>
+#include <linux/types.h>
+#include <linux/switch.h>
+#include <linux/gcd.h>
+
+#include "mdss_hdmi_audio.h"
+#include "mdss_hdmi_util.h"
+
+#define HDMI_AUDIO_INFO_FRAME_PACKET_HEADER 0x84
+#define HDMI_AUDIO_INFO_FRAME_PACKET_VERSION 0x1
+#define HDMI_AUDIO_INFO_FRAME_PACKET_LENGTH 0x0A
+
+#define HDMI_KHZ_TO_HZ 1000
+#define HDMI_MHZ_TO_HZ 1000000
+#define HDMI_ACR_N_MULTIPLIER 128
+#define DEFAULT_AUDIO_SAMPLE_RATE_HZ 48000
+
+/* Supported HDMI Audio channels */
+enum hdmi_audio_channels {
+ AUDIO_CHANNEL_2 = 2,
+ AUDIO_CHANNEL_3,
+ AUDIO_CHANNEL_4,
+ AUDIO_CHANNEL_5,
+ AUDIO_CHANNEL_6,
+ AUDIO_CHANNEL_7,
+ AUDIO_CHANNEL_8,
+};
+
+/* parameters for clock regeneration */
+struct hdmi_audio_acr {
+ u32 n;
+ u32 cts;
+};
+
+enum hdmi_audio_sample_rates {
+ AUDIO_SAMPLE_RATE_32KHZ,
+ AUDIO_SAMPLE_RATE_44_1KHZ,
+ AUDIO_SAMPLE_RATE_48KHZ,
+ AUDIO_SAMPLE_RATE_88_2KHZ,
+ AUDIO_SAMPLE_RATE_96KHZ,
+ AUDIO_SAMPLE_RATE_176_4KHZ,
+ AUDIO_SAMPLE_RATE_192KHZ,
+ AUDIO_SAMPLE_RATE_MAX
+};
+
+struct hdmi_audio {
+ struct dss_io_data *io;
+ struct msm_hdmi_audio_setup_params params;
+ struct switch_dev sdev;
+ u32 pclk;
+ bool ack_enabled;
+ bool audio_ack_enabled;
+ atomic_t ack_pending;
+};
+
+static void hdmi_audio_get_audio_sample_rate(u32 *sample_rate_hz)
+{
+ u32 rate = *sample_rate_hz;
+
+ switch (rate) {
+ case 32000:
+ *sample_rate_hz = AUDIO_SAMPLE_RATE_32KHZ;
+ break;
+ case 44100:
+ *sample_rate_hz = AUDIO_SAMPLE_RATE_44_1KHZ;
+ break;
+ case 48000:
+ *sample_rate_hz = AUDIO_SAMPLE_RATE_48KHZ;
+ break;
+ case 88200:
+ *sample_rate_hz = AUDIO_SAMPLE_RATE_88_2KHZ;
+ break;
+ case 96000:
+ *sample_rate_hz = AUDIO_SAMPLE_RATE_96KHZ;
+ break;
+ case 176400:
+ *sample_rate_hz = AUDIO_SAMPLE_RATE_176_4KHZ;
+ break;
+ case 192000:
+ *sample_rate_hz = AUDIO_SAMPLE_RATE_192KHZ;
+ break;
+ default:
+ pr_debug("%d unchanged\n", rate);
+ break;
+ }
+}
+
+static void hdmi_audio_get_acr_param(u32 pclk, u32 fs,
+ struct hdmi_audio_acr *acr)
+{
+ u32 div, mul;
+
+ if (!acr) {
+ pr_err("invalid data\n");
+ return;
+ }
+
+ /*
+ * as per HDMI specification, N/CTS = (128*fs)/pclk.
+ * get the ratio using this formula.
+ */
+ acr->n = HDMI_ACR_N_MULTIPLIER * fs;
+ acr->cts = pclk;
+
+ /* get the greatest common divisor for the ratio */
+ div = gcd(acr->n, acr->cts);
+
+ /* get the n and cts values wrt N/CTS formula */
+ acr->n /= div;
+ acr->cts /= div;
+
+ /*
+ * as per HDMI specification, 300 <= 128*fs/N <= 1500
+ * with a target of 128*fs/N = 1000. To get closest
+ * value without truncating fractional values, find
+ * the corresponding multiplier
+ */
+ mul = ((HDMI_ACR_N_MULTIPLIER * fs / HDMI_KHZ_TO_HZ)
+ + (acr->n - 1)) / acr->n;
+
+ acr->n *= mul;
+ acr->cts *= mul;
+}
+
+static void hdmi_audio_acr_enable(struct hdmi_audio *audio)
+{
+ struct dss_io_data *io;
+ struct hdmi_audio_acr acr;
+ struct msm_hdmi_audio_setup_params *params;
+ u32 pclk, layout, multiplier, sample_rate;
+ u32 acr_pkt_ctl, aud_pkt_ctl2, acr_reg_cts, acr_reg_n;
+
+ if (!audio) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ io = audio->io;
+ params = &audio->params;
+ pclk = audio->pclk;
+ sample_rate = params->sample_rate_hz;
+
+ hdmi_audio_get_acr_param(pclk * HDMI_KHZ_TO_HZ, sample_rate, &acr);
+ hdmi_audio_get_audio_sample_rate(&sample_rate);
+
+ layout = AUDIO_CHANNEL_2 == params->num_of_channels ? 0 : 1;
+
+ pr_debug("n=%u, cts=%u, layout=%u\n", acr.n, acr.cts, layout);
+
+ /* AUDIO_PRIORITY | SOURCE */
+ acr_pkt_ctl = BIT(31) | BIT(8);
+
+ /* N_MULTIPLE(multiplier) */
+ acr_pkt_ctl |= (multiplier & 0x7) << 16;
+
+ switch (sample_rate) {
+ case AUDIO_SAMPLE_RATE_44_1KHZ:
+ acr_pkt_ctl |= 0x2 << 4;
+ acr.cts <<= 12;
+
+ acr_reg_cts = HDMI_ACR_44_0;
+ acr_reg_n = HDMI_ACR_44_1;
+ break;
+ case AUDIO_SAMPLE_RATE_48KHZ:
+ acr_pkt_ctl |= 0x3 << 4;
+ acr.cts <<= 12;
+
+ acr_reg_cts = HDMI_ACR_48_0;
+ acr_reg_n = HDMI_ACR_48_1;
+ break;
+ case AUDIO_SAMPLE_RATE_192KHZ:
+ multiplier = 4;
+ acr.n >>= 2;
+
+ acr_pkt_ctl |= 0x3 << 4;
+ acr.cts <<= 12;
+
+ acr_reg_cts = HDMI_ACR_48_0;
+ acr_reg_n = HDMI_ACR_48_1;
+ break;
+ case AUDIO_SAMPLE_RATE_176_4KHZ:
+ multiplier = 4;
+ acr.n >>= 2;
+
+ acr_pkt_ctl |= 0x2 << 4;
+ acr.cts <<= 12;
+
+ acr_reg_cts = HDMI_ACR_44_0;
+ acr_reg_n = HDMI_ACR_44_1;
+ break;
+ case AUDIO_SAMPLE_RATE_96KHZ:
+ multiplier = 2;
+ acr.n >>= 1;
+
+ acr_pkt_ctl |= 0x3 << 4;
+ acr.cts <<= 12;
+
+ acr_reg_cts = HDMI_ACR_48_0;
+ acr_reg_n = HDMI_ACR_48_1;
+ break;
+ case AUDIO_SAMPLE_RATE_88_2KHZ:
+ multiplier = 2;
+ acr.n >>= 1;
+
+ acr_pkt_ctl |= 0x2 << 4;
+ acr.cts <<= 12;
+
+ acr_reg_cts = HDMI_ACR_44_0;
+ acr_reg_n = HDMI_ACR_44_1;
+ break;
+ default:
+ multiplier = 1;
+
+ acr_pkt_ctl |= 0x1 << 4;
+ acr.cts <<= 12;
+
+ acr_reg_cts = HDMI_ACR_32_0;
+ acr_reg_n = HDMI_ACR_32_1;
+ break;
+ }
+
+ aud_pkt_ctl2 = BIT(0) | (layout << 1);
+
+ /* SEND | CONT */
+ acr_pkt_ctl |= BIT(0) | BIT(1);
+
+ DSS_REG_W(io, acr_reg_cts, acr.cts);
+ DSS_REG_W(io, acr_reg_n, acr.n);
+ DSS_REG_W(io, HDMI_ACR_PKT_CTRL, acr_pkt_ctl);
+ DSS_REG_W(io, HDMI_AUDIO_PKT_CTRL2, aud_pkt_ctl2);
+}
+
+static void hdmi_audio_acr_setup(struct hdmi_audio *audio, bool on)
+{
+ if (on)
+ hdmi_audio_acr_enable(audio);
+ else
+ DSS_REG_W(audio->io, HDMI_ACR_PKT_CTRL, 0);
+}
+
+static void hdmi_audio_infoframe_setup(struct hdmi_audio *audio, bool enabled)
+{
+ struct dss_io_data *io = NULL;
+ u32 channels, channel_allocation, level_shift, down_mix, layout;
+ u32 hdmi_debug_reg = 0, audio_info_0_reg = 0, audio_info_1_reg = 0;
+ u32 audio_info_ctrl_reg, aud_pck_ctrl_2_reg;
+ u32 check_sum, sample_present;
+
+ if (!audio) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ io = audio->io;
+ if (!io->base) {
+ pr_err("core io not inititalized\n");
+ return;
+ }
+
+ audio_info_ctrl_reg = DSS_REG_R(io, HDMI_INFOFRAME_CTRL0);
+ audio_info_ctrl_reg &= ~0xF0;
+
+ if (!enabled)
+ goto end;
+
+ channels = audio->params.num_of_channels - 1;
+ channel_allocation = audio->params.channel_allocation;
+ level_shift = audio->params.level_shift;
+ down_mix = audio->params.down_mix;
+ sample_present = audio->params.sample_present;
+
+ layout = AUDIO_CHANNEL_2 == audio->params.num_of_channels ? 0 : 1;
+ aud_pck_ctrl_2_reg = BIT(0) | (layout << 1);
+ DSS_REG_W(io, HDMI_AUDIO_PKT_CTRL2, aud_pck_ctrl_2_reg);
+
+ audio_info_1_reg |= channel_allocation & 0xFF;
+ audio_info_1_reg |= ((level_shift & 0xF) << 11);
+ audio_info_1_reg |= ((down_mix & 0x1) << 15);
+
+ check_sum = 0;
+ check_sum += HDMI_AUDIO_INFO_FRAME_PACKET_HEADER;
+ check_sum += HDMI_AUDIO_INFO_FRAME_PACKET_VERSION;
+ check_sum += HDMI_AUDIO_INFO_FRAME_PACKET_LENGTH;
+ check_sum += channels;
+ check_sum += channel_allocation;
+ check_sum += (level_shift & 0xF) << 3 | (down_mix & 0x1) << 7;
+ check_sum &= 0xFF;
+ check_sum = (u8) (256 - check_sum);
+
+ audio_info_0_reg |= check_sum & 0xFF;
+ audio_info_0_reg |= ((channels & 0x7) << 8);
+
+ /* Enable Audio InfoFrame Transmission */
+ audio_info_ctrl_reg |= 0xF0;
+
+ if (layout) {
+ /* Set the Layout bit */
+ hdmi_debug_reg |= BIT(4);
+
+ /* Set the Sample Present bits */
+ hdmi_debug_reg |= sample_present & 0xF;
+ }
+end:
+ DSS_REG_W(io, HDMI_DEBUG, hdmi_debug_reg);
+ DSS_REG_W(io, HDMI_AUDIO_INFO0, audio_info_0_reg);
+ DSS_REG_W(io, HDMI_AUDIO_INFO1, audio_info_1_reg);
+ DSS_REG_W(io, HDMI_INFOFRAME_CTRL0, audio_info_ctrl_reg);
+}
+
+static int hdmi_audio_on(void *ctx, u32 pclk,
+ struct msm_hdmi_audio_setup_params *params)
+{
+ struct hdmi_audio *audio = ctx;
+ int rc = 0;
+
+ if (!audio) {
+ pr_err("invalid input\n");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ audio->pclk = pclk;
+ audio->params = *params;
+
+ if (!audio->params.num_of_channels) {
+ audio->params.sample_rate_hz = DEFAULT_AUDIO_SAMPLE_RATE_HZ;
+ audio->params.num_of_channels = AUDIO_CHANNEL_2;
+ }
+
+ hdmi_audio_acr_setup(audio, true);
+ hdmi_audio_infoframe_setup(audio, true);
+
+ pr_debug("HDMI Audio: Enabled\n");
+end:
+ return rc;
+}
+
+static void hdmi_audio_off(void *ctx)
+{
+ struct hdmi_audio *audio = ctx;
+
+ if (!audio) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ hdmi_audio_infoframe_setup(audio, false);
+ hdmi_audio_acr_setup(audio, false);
+
+ pr_debug("HDMI Audio: Disabled\n");
+}
+
+static void hdmi_audio_notify(void *ctx, int val)
+{
+ struct hdmi_audio *audio = ctx;
+ int state = 0;
+ bool switched;
+
+ if (!audio) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ state = audio->sdev.state;
+ if (state == val)
+ return;
+
+ if (audio->ack_enabled &&
+ atomic_read(&audio->ack_pending)) {
+ pr_err("%s ack pending, not notifying %s\n",
+ state ? "connect" : "disconnect",
+ val ? "connect" : "disconnect");
+ return;
+ }
+
+ switch_set_state(&audio->sdev, val);
+ switched = audio->sdev.state != state;
+
+ if (audio->ack_enabled && switched)
+ atomic_set(&audio->ack_pending, 1);
+
+ pr_debug("audio %s %s\n", switched ? "switched to" : "same as",
+ audio->sdev.state ? "HDMI" : "SPKR");
+}
+
+static void hdmi_audio_ack(void *ctx, u32 ack, u32 hpd)
+{
+ struct hdmi_audio *audio = ctx;
+ u32 ack_hpd;
+
+ if (!audio) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ if (ack & AUDIO_ACK_SET_ENABLE) {
+ audio->ack_enabled = ack & AUDIO_ACK_ENABLE ?
+ true : false;
+
+ pr_debug("audio ack feature %s\n",
+ audio->ack_enabled ? "enabled" : "disabled");
+ return;
+ }
+
+ if (!audio->ack_enabled)
+ return;
+
+ atomic_set(&audio->ack_pending, 0);
+
+ ack_hpd = ack & AUDIO_ACK_CONNECT;
+
+ pr_debug("acknowledging %s\n",
+ ack_hpd ? "connect" : "disconnect");
+
+ if (ack_hpd != hpd) {
+ pr_debug("unbalanced audio state, ack %d, hpd %d\n",
+ ack_hpd, hpd);
+
+ hdmi_audio_notify(ctx, hpd);
+ }
+}
+
+static void hdmi_audio_reset(void *ctx)
+{
+ struct hdmi_audio *audio = ctx;
+
+ if (!audio) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ atomic_set(&audio->ack_pending, 0);
+}
+
+static void hdmi_audio_status(void *ctx, struct hdmi_audio_status *status)
+{
+ struct hdmi_audio *audio = ctx;
+
+ if (!audio || !status) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ status->ack_enabled = audio->ack_enabled;
+ status->ack_pending = atomic_read(&audio->ack_pending);
+ status->switched = audio->sdev.state;
+}
+
+/**
+ * hdmi_audio_register() - audio registeration function
+ * @data: registeration initialization data
+ *
+ * This API configures audio module for client to use HDMI audio.
+ * Provides audio functionalities which client can call.
+ * Initializes internal data structures.
+ *
+ * Return: pointer to audio data that client needs to pass on
+ * calling audio functions.
+ */
+void *hdmi_audio_register(struct hdmi_audio_init_data *data)
+{
+ struct hdmi_audio *audio = NULL;
+ int rc = 0;
+
+ if (!data)
+ goto end;
+
+ audio = kzalloc(sizeof(*audio), GFP_KERNEL);
+ if (!audio)
+ goto end;
+
+ audio->sdev.name = "hdmi_audio";
+ rc = switch_dev_register(&audio->sdev);
+ if (rc) {
+ pr_err("audio switch registration failed\n");
+ kzfree(audio);
+ goto end;
+ }
+
+ audio->io = data->io;
+
+ data->ops->on = hdmi_audio_on;
+ data->ops->off = hdmi_audio_off;
+ data->ops->notify = hdmi_audio_notify;
+ data->ops->ack = hdmi_audio_ack;
+ data->ops->reset = hdmi_audio_reset;
+ data->ops->status = hdmi_audio_status;
+end:
+ return audio;
+}
+
+/**
+ * hdmi_audio_unregister() - unregister audio module
+ * @ctx: audio module's data
+ *
+ * Delete audio module's instance and allocated resources
+ */
+void hdmi_audio_unregister(void *ctx)
+{
+ struct hdmi_audio *audio = ctx;
+
+ if (audio) {
+ switch_dev_unregister(&audio->sdev);
+ kfree(ctx);
+ }
+}
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_audio.h b/drivers/video/fbdev/msm/mdss_hdmi_audio.h
new file mode 100644
index 000000000000..c53bfd9b1ff2
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_hdmi_audio.h
@@ -0,0 +1,72 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MDSS_HDMI_AUDIO_H__
+#define __MDSS_HDMI_AUDIO_H__
+
+#include <linux/mdss_io_util.h>
+#include <linux/msm_hdmi.h>
+
+#define AUDIO_ACK_SET_ENABLE BIT(5)
+#define AUDIO_ACK_ENABLE BIT(4)
+#define AUDIO_ACK_CONNECT BIT(0)
+
+/**
+ * struct hdmi_audio_status - hdmi audio current status info
+ * @ack_pending: notification acknowledgment status
+ * @ack_enabled: acknowledgment feature is enabled or disabled
+ * @switched: audio notification status for routing
+ *
+ * Data for client to query about the current status of audio
+ */
+struct hdmi_audio_status {
+ bool ack_pending;
+ bool ack_enabled;
+ bool switched;
+};
+
+/**
+ * struct hdmi_audio_ops - audio operations for clients to call
+ * @on: function pointer to enable audio
+ * @reset: function pointer to reset the audio current status to default
+ * @status: function pointer to get the current status of audio
+ * @notify: function pointer to notify other modules for audio routing
+ * @ack: function pointer to acknowledge audio routing change
+ *
+ * Provides client operations for audio functionalities
+ */
+struct hdmi_audio_ops {
+ int (*on)(void *ctx, u32 pclk,
+ struct msm_hdmi_audio_setup_params *params);
+ void (*off)(void *ctx);
+ void (*reset)(void *ctx);
+ void (*status)(void *ctx, struct hdmi_audio_status *status);
+ void (*notify)(void *ctx, int val);
+ void (*ack)(void *ctx, u32 ack, u32 hpd);
+};
+
+/**
+ * struct hdmi_audio_init_data - data needed for initializing audio module
+ * @io: pointer to register access related data
+ * @ops: pointer to populate operation functions.
+ *
+ * Defines the data needed to be provided while initializing audio module
+ */
+struct hdmi_audio_init_data {
+ struct dss_io_data *io;
+ struct hdmi_audio_ops *ops;
+};
+
+void *hdmi_audio_register(struct hdmi_audio_init_data *data);
+void hdmi_audio_unregister(void *data);
+
+#endif /* __MDSS_HDMI_AUDIO_H__ */
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_edid.c b/drivers/video/fbdev/msm/mdss_hdmi_edid.c
index 26c63ec417a6..f5c45571f0d2 100644
--- a/drivers/video/fbdev/msm/mdss_hdmi_edid.c
+++ b/drivers/video/fbdev/msm/mdss_hdmi_edid.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -114,6 +114,13 @@ struct hdmi_edid_sink_caps {
bool ind_view_support;
};
+struct hdmi_edid_override_data {
+ int scramble;
+ int sink_mode;
+ int format;
+ int vic;
+};
+
struct hdmi_edid_ctrl {
u8 pt_scan_info;
u8 it_scan_info;
@@ -134,11 +141,12 @@ struct hdmi_edid_ctrl {
u8 edid_buf[MAX_EDID_SIZE];
char vendor_id[EDID_VENDOR_ID_SIZE];
bool keep_resv_timings;
- u32 edid_override;
+ bool edid_override;
struct hdmi_edid_sink_data sink_data;
struct hdmi_edid_init_data init_data;
struct hdmi_edid_sink_caps sink_caps;
+ struct hdmi_edid_override_data override_data;
};
static bool hdmi_edid_is_mode_supported(struct hdmi_edid_ctrl *edid_ctrl,
@@ -315,12 +323,8 @@ static DEVICE_ATTR(spkr_alloc_data_block, S_IRUGO,
static ssize_t hdmi_edid_sysfs_wta_modes(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
- int scrambling, vic, format, sink;
ssize_t ret = strnlen(buf, PAGE_SIZE);
- int rc;
struct hdmi_edid_ctrl *edid_ctrl = hdmi_edid_get_ctrl(dev);
- struct hdmi_edid_sink_data *sd;
- struct msm_hdmi_mode_timing_info info = {0};
if (!edid_ctrl) {
DEV_ERR("%s: invalid ctrl\n", __func__);
@@ -328,52 +332,16 @@ static ssize_t hdmi_edid_sysfs_wta_modes(struct device *dev,
goto error;
}
- sd = &edid_ctrl->sink_data;
-
if (sscanf(buf, "%d %d %d %d",
- &scrambling, &sink, &format, &vic) != 4) {
+ &edid_ctrl->override_data.scramble,
+ &edid_ctrl->override_data.sink_mode,
+ &edid_ctrl->override_data.format,
+ &edid_ctrl->override_data.vic) != 4) {
DEV_ERR("could not read input\n");
ret = -EINVAL;
goto bail;
}
- if ((sink != SINK_MODE_DVI && sink != SINK_MODE_HDMI) ||
- !(format & (MSM_HDMI_RGB_888_24BPP_FORMAT |
- MSM_HDMI_YUV_420_12BPP_FORMAT)) ||
- vic <= HDMI_VFRMT_UNKNOWN || vic >= HDMI_VFRMT_MAX) {
- DEV_ERR("%s: invalid input: sink %d, format %d, vic %d\n",
- __func__, sink, format, vic);
- ret = -EINVAL;
- goto bail;
- }
-
- rc = hdmi_get_supported_mode(&info,
- &edid_ctrl->init_data.ds_data, vic);
- if (rc) {
- DEV_ERR("%s: error getting res details\n", __func__);
- ret = -EINVAL;
- goto bail;
- }
-
- if (!hdmi_edid_is_mode_supported(edid_ctrl, &info)) {
- DEV_ERR("%s: %d vic not supported\n", __func__, vic);
- ret = -EINVAL;
- goto bail;
- }
-
- sd->num_of_elements = 1;
- sd->disp_mode_list[0].video_format = vic;
-
- if (format & MSM_HDMI_RGB_888_24BPP_FORMAT)
- sd->disp_mode_list[0].rgb_support = true;
-
- if (format & MSM_HDMI_YUV_420_12BPP_FORMAT)
- sd->disp_mode_list[0].y420_support = true;
-
- edid_ctrl->sink_mode = sink;
- edid_ctrl->sink_caps.scramble_support = !!scrambling;
- edid_ctrl->sink_caps.scdc_present = !!scrambling;
-
edid_ctrl->edid_override = true;
return ret;
bail:
@@ -389,17 +357,26 @@ static ssize_t hdmi_edid_sysfs_rda_modes(struct device *dev,
ssize_t ret = 0;
int i;
struct hdmi_edid_ctrl *edid_ctrl = hdmi_edid_get_ctrl(dev);
+ u32 num_of_elements = 0;
+ struct disp_mode_info *video_mode;
if (!edid_ctrl) {
DEV_ERR("%s: invalid input\n", __func__);
return -EINVAL;
}
+ num_of_elements = edid_ctrl->sink_data.num_of_elements;
+ video_mode = edid_ctrl->sink_data.disp_mode_list;
+
+ if (edid_ctrl->edid_override && (edid_ctrl->override_data.vic > 0)) {
+ num_of_elements = 1;
+ edid_ctrl->sink_data.disp_mode_list[0].video_format =
+ edid_ctrl->override_data.vic;
+ }
+
buf[0] = 0;
- if (edid_ctrl->sink_data.num_of_elements) {
- struct disp_mode_info *video_mode =
- edid_ctrl->sink_data.disp_mode_list;
- for (i = 0; i < edid_ctrl->sink_data.num_of_elements; i++) {
+ if (num_of_elements) {
+ for (i = 0; i < num_of_elements; i++) {
if (ret > 0)
ret += scnprintf(buf + ret, PAGE_SIZE - ret,
",%d", video_mode[i].video_format);
@@ -420,6 +397,65 @@ static ssize_t hdmi_edid_sysfs_rda_modes(struct device *dev,
static DEVICE_ATTR(edid_modes, S_IRUGO | S_IWUSR, hdmi_edid_sysfs_rda_modes,
hdmi_edid_sysfs_wta_modes);
+static ssize_t hdmi_edid_sysfs_rda_res_info_data(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t ret;
+ u32 i, no_of_elem, offset = 0;
+ struct msm_hdmi_mode_timing_info info = {0};
+ struct hdmi_edid_ctrl *edid_ctrl = hdmi_edid_get_ctrl(dev);
+ struct disp_mode_info *minfo = NULL;
+
+ if (!edid_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ no_of_elem = edid_ctrl->sink_data.num_of_elements;
+ minfo = edid_ctrl->sink_data.disp_mode_list;
+
+ if (edid_ctrl->edid_override && (edid_ctrl->override_data.vic > 0)) {
+ no_of_elem = 1;
+ minfo[0].video_format = edid_ctrl->override_data.vic;
+ }
+
+ for (i = 0; i < no_of_elem; i++) {
+ ret = hdmi_get_supported_mode(&info,
+ &edid_ctrl->init_data.ds_data,
+ minfo->video_format);
+
+ if (edid_ctrl->edid_override &&
+ (edid_ctrl->override_data.format > 0))
+ info.pixel_formats = edid_ctrl->override_data.format;
+ else
+ info.pixel_formats =
+ (minfo->rgb_support ?
+ MSM_HDMI_RGB_888_24BPP_FORMAT : 0) |
+ (minfo->y420_support ?
+ MSM_HDMI_YUV_420_12BPP_FORMAT : 0);
+
+ minfo++;
+ if (ret || !info.supported)
+ continue;
+
+ offset += scnprintf(buf + offset, PAGE_SIZE - offset,
+ "%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d\n",
+ info.video_format, info.active_h,
+ info.front_porch_h, info.pulse_width_h,
+ info.back_porch_h, info.active_low_h,
+ info.active_v, info.front_porch_v,
+ info.pulse_width_v, info.back_porch_v,
+ info.active_low_v, info.pixel_freq,
+ info.refresh_rate, info.interlaced,
+ info.supported, info.ar,
+ info.pixel_formats);
+ }
+
+ return offset;
+}
+static DEVICE_ATTR(res_info_data, S_IRUGO, hdmi_edid_sysfs_rda_res_info_data,
+ NULL);
+
static ssize_t hdmi_edid_sysfs_wta_res_info(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
@@ -477,14 +513,25 @@ static ssize_t hdmi_edid_sysfs_rda_res_info(struct device *dev,
}
}
+ if (edid_ctrl->edid_override && (edid_ctrl->override_data.vic > 0)) {
+ no_of_elem = 1;
+ minfo[0].video_format = edid_ctrl->override_data.vic;
+ }
+
for (; i < no_of_elem && size_to_write < PAGE_SIZE; i++) {
ret = hdmi_get_supported_mode(&info,
&edid_ctrl->init_data.ds_data,
minfo->video_format);
- info.pixel_formats =
- (minfo->rgb_support ? MSM_HDMI_RGB_888_24BPP_FORMAT : 0) |
- (minfo->y420_support ? MSM_HDMI_YUV_420_12BPP_FORMAT : 0);
+ if (edid_ctrl->edid_override &&
+ (edid_ctrl->override_data.format > 0))
+ info.pixel_formats = edid_ctrl->override_data.format;
+ else
+ info.pixel_formats =
+ (minfo->rgb_support ?
+ MSM_HDMI_RGB_888_24BPP_FORMAT : 0) |
+ (minfo->y420_support ?
+ MSM_HDMI_YUV_420_12BPP_FORMAT : 0);
minfo++;
if (ret || !info.supported)
@@ -725,6 +772,7 @@ static struct attribute *hdmi_edid_fs_attrs[] = {
&dev_attr_edid_audio_latency.attr,
&dev_attr_edid_video_latency.attr,
&dev_attr_res_info.attr,
+ &dev_attr_res_info_data.attr,
&dev_attr_add_res.attr,
NULL,
};
@@ -2102,11 +2150,6 @@ int hdmi_edid_parser(void *input)
goto err_invalid_data;
}
- if (edid_ctrl->edid_override) {
- DEV_DBG("edid override enabled\n");
- goto err_invalid_data;
- }
-
/* reset edid data for new hdmi connection */
hdmi_edid_reset_parser(edid_ctrl);
@@ -2244,13 +2287,20 @@ end:
u32 hdmi_edid_get_sink_mode(void *input)
{
struct hdmi_edid_ctrl *edid_ctrl = (struct hdmi_edid_ctrl *)input;
+ bool sink_mode;
if (!edid_ctrl) {
DEV_ERR("%s: invalid input\n", __func__);
return 0;
}
- return edid_ctrl->sink_mode;
+ if (edid_ctrl->edid_override &&
+ (edid_ctrl->override_data.sink_mode != -1))
+ sink_mode = edid_ctrl->override_data.sink_mode;
+ else
+ sink_mode = edid_ctrl->sink_mode;
+
+ return sink_mode;
} /* hdmi_edid_get_sink_mode */
bool hdmi_edid_is_s3d_mode_supported(void *input, u32 video_mode, u32 s3d_mode)
@@ -2280,24 +2330,39 @@ bool hdmi_edid_is_s3d_mode_supported(void *input, u32 video_mode, u32 s3d_mode)
bool hdmi_edid_get_scdc_support(void *input)
{
struct hdmi_edid_ctrl *edid_ctrl = input;
+ bool scdc_present;
if (!edid_ctrl) {
DEV_ERR("%s: invalid input\n", __func__);
return false;
}
- return edid_ctrl->sink_caps.scdc_present;
+ if (edid_ctrl->edid_override &&
+ (edid_ctrl->override_data.scramble != -1))
+ scdc_present = edid_ctrl->override_data.scramble;
+ else
+ scdc_present = edid_ctrl->sink_caps.scdc_present;
+
+ return scdc_present;
}
bool hdmi_edid_get_sink_scrambler_support(void *input)
{
struct hdmi_edid_ctrl *edid_ctrl = (struct hdmi_edid_ctrl *)input;
+ bool scramble_support;
if (!edid_ctrl) {
DEV_ERR("%s: invalid input\n", __func__);
return 0;
}
- return edid_ctrl->sink_caps.scramble_support;
+
+ if (edid_ctrl->edid_override &&
+ (edid_ctrl->override_data.scramble != -1))
+ scramble_support = edid_ctrl->override_data.scramble;
+ else
+ scramble_support = edid_ctrl->sink_caps.scramble_support;
+
+ return scramble_support;
}
int hdmi_edid_get_audio_blk(void *input, struct msm_hdmi_audio_edid_blk *blk)
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_hdcp2p2.c b/drivers/video/fbdev/msm/mdss_hdmi_hdcp2p2.c
index 85175d69d6f1..fb59d0b03afe 100644
--- a/drivers/video/fbdev/msm/mdss_hdmi_hdcp2p2.c
+++ b/drivers/video/fbdev/msm/mdss_hdmi_hdcp2p2.c
@@ -47,6 +47,11 @@ enum hdmi_hdcp2p2_sink_status {
SINK_CONNECTED
};
+enum hdmi_auth_status {
+ HDMI_HDCP_AUTH_STATUS_FAILURE,
+ HDMI_HDCP_AUTH_STATUS_SUCCESS
+};
+
struct hdmi_hdcp2p2_ctrl {
atomic_t auth_state;
bool tethered;
@@ -60,6 +65,7 @@ struct hdmi_hdcp2p2_ctrl {
struct hdcp_txmtr_ops *lib; /* Ops for driver to call into TZ */
enum hdmi_hdcp_wakeup_cmd wakeup_cmd;
+ enum hdmi_auth_status auth_status;
char *send_msg_buf;
uint32_t send_msg_len;
uint32_t timeout;
@@ -156,6 +162,11 @@ static int hdmi_hdcp2p2_wakeup(struct hdmi_hdcp_wakeup_data *data)
if (hdmi_hdcp2p2_copy_buf(ctrl, data))
goto exit;
+ if (ctrl->wakeup_cmd == HDMI_HDCP_WKUP_CMD_STATUS_SUCCESS)
+ ctrl->auth_status = HDMI_HDCP_AUTH_STATUS_SUCCESS;
+ else if (ctrl->wakeup_cmd == HDMI_HDCP_WKUP_CMD_STATUS_FAILED)
+ ctrl->auth_status = HDMI_HDCP_AUTH_STATUS_FAILURE;
+
if (ctrl->tethered)
goto exit;
@@ -820,9 +831,7 @@ static void hdmi_hdcp2p2_auth_status(struct hdmi_hdcp2p2_ctrl *ctrl)
return;
}
- if (ctrl->wakeup_cmd == HDMI_HDCP_WKUP_CMD_STATUS_FAILED) {
- hdmi_hdcp2p2_auth_failed(ctrl);
- } else if (ctrl->wakeup_cmd == HDMI_HDCP_WKUP_CMD_STATUS_SUCCESS) {
+ if (ctrl->auth_status == HDMI_HDCP_AUTH_STATUS_SUCCESS) {
ctrl->init_data.notify_status(ctrl->init_data.cb_data,
HDCP_STATE_AUTHENTICATED);
@@ -830,6 +839,8 @@ static void hdmi_hdcp2p2_auth_status(struct hdmi_hdcp2p2_ctrl *ctrl)
if (ctrl->tethered)
hdmi_hdcp2p2_link_check(ctrl);
+ } else {
+ hdmi_hdcp2p2_auth_failed(ctrl);
}
}
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_tx.c b/drivers/video/fbdev/msm/mdss_hdmi_tx.c
index fc7ed49f8536..a902fd7b82c7 100644
--- a/drivers/video/fbdev/msm/mdss_hdmi_tx.c
+++ b/drivers/video/fbdev/msm/mdss_hdmi_tx.c
@@ -20,7 +20,6 @@
#include <linux/of_gpio.h>
#include <linux/of_platform.h>
#include <linux/types.h>
-#include <linux/msm_hdmi.h>
#include <linux/hdcp_qseecom.h>
#include <linux/clk.h>
@@ -32,6 +31,7 @@
#include "mdss_hdmi_edid.h"
#include "mdss_hdmi_hdcp.h"
#include "mdss_hdmi_tx.h"
+#include "mdss_hdmi_audio.h"
#include "mdss.h"
#include "mdss_panel.h"
#include "mdss_hdmi_mhl.h"
@@ -49,10 +49,6 @@
#define HPD_DISCONNECT_POLARITY 0
#define HPD_CONNECT_POLARITY 1
-#define AUDIO_ACK_SET_ENABLE BIT(5)
-#define AUDIO_ACK_ENABLE BIT(4)
-#define AUDIO_ACK_CONNECT BIT(0)
-
/*
* Audio engine may take 1 to 3 sec to shutdown
* in normal cases. To handle worst cases, making
@@ -76,9 +72,6 @@
#define HDMI_TX_SCRAMBLER_THRESHOLD_RATE_KHZ 340000
#define HDMI_TX_SCRAMBLER_TIMEOUT_MSEC 200
-#define HDMI_TX_KHZ_TO_HZ 1000
-#define HDMI_TX_MHZ_TO_HZ 1000000
-
/* Maximum pixel clock rates for hdmi tx */
#define HDMI_DEFAULT_MAX_PCLK_RATE 148500
#define HDMI_TX_3_MAX_PCLK_RATE 297000
@@ -87,15 +80,6 @@
/* Enable HDCP by default */
static bool hdcp_feature_on = true;
-/* Supported HDMI Audio channels */
-#define MSM_HDMI_AUDIO_CHANNEL_2 2
-#define MSM_HDMI_AUDIO_CHANNEL_3 3
-#define MSM_HDMI_AUDIO_CHANNEL_4 4
-#define MSM_HDMI_AUDIO_CHANNEL_5 5
-#define MSM_HDMI_AUDIO_CHANNEL_6 6
-#define MSM_HDMI_AUDIO_CHANNEL_7 7
-#define MSM_HDMI_AUDIO_CHANNEL_8 8
-
/* AVI INFOFRAME DATA */
#define NUM_MODES_AVI 20
#define AVI_MAX_DATA_BYTES 13
@@ -155,17 +139,6 @@ enum {
(byte = (byte & ~(BIT(4) | BIT(5))) |\
((bits & (BIT(0) | BIT(1))) << 4))
-enum msm_hdmi_supported_audio_sample_rates {
- AUDIO_SAMPLE_RATE_32KHZ,
- AUDIO_SAMPLE_RATE_44_1KHZ,
- AUDIO_SAMPLE_RATE_48KHZ,
- AUDIO_SAMPLE_RATE_88_2KHZ,
- AUDIO_SAMPLE_RATE_96KHZ,
- AUDIO_SAMPLE_RATE_176_4KHZ,
- AUDIO_SAMPLE_RATE_192KHZ,
- AUDIO_SAMPLE_RATE_MAX
-};
-
enum hdmi_tx_hpd_states {
HPD_OFF,
HPD_ON,
@@ -179,24 +152,12 @@ enum hdmi_tx_res_states {
RESOLUTION_CHANGED
};
-/* parameters for clock regeneration */
-struct hdmi_tx_audio_acr {
- u32 n;
- u32 cts;
-};
-
-struct hdmi_tx_audio_acr_arry {
- u32 pclk;
- struct hdmi_tx_audio_acr lut[AUDIO_SAMPLE_RATE_MAX];
-};
-
static int hdmi_tx_set_mhl_hpd(struct platform_device *pdev, uint8_t on);
static int hdmi_tx_sysfs_enable_hpd(struct hdmi_tx_ctrl *hdmi_ctrl, int on);
static irqreturn_t hdmi_tx_isr(int irq, void *data);
static void hdmi_tx_hpd_off(struct hdmi_tx_ctrl *hdmi_ctrl);
static int hdmi_tx_enable_power(struct hdmi_tx_ctrl *hdmi_ctrl,
enum hdmi_tx_power_module_type module, int enable);
-static int hdmi_tx_audio_setup(struct hdmi_tx_ctrl *hdmi_ctrl);
static int hdmi_tx_setup_tmds_clk_rate(struct hdmi_tx_ctrl *hdmi_ctrl);
static void hdmi_tx_set_vendor_specific_infoframe(
struct hdmi_tx_ctrl *hdmi_ctrl);
@@ -238,35 +199,6 @@ const char *hdmi_pm_name(enum hdmi_tx_power_module_type module)
}
} /* hdmi_pm_name */
-/* Audio constants lookup table for hdmi_tx_audio_acr_setup */
-/* Valid Pixel-Clock rates: 25.2MHz, 27MHz, 27.03MHz, 74.25MHz, 148.5MHz */
-static const struct hdmi_tx_audio_acr_arry hdmi_tx_audio_acr_lut[] = {
- /* 25.200MHz */
- {25200, {{4096, 25200}, {6272, 28000}, {6144, 25200}, {12544, 28000},
- {12288, 25200}, {25088, 28000}, {24576, 25200} } },
- /* 27.000MHz */
- {27000, {{4096, 27000}, {6272, 30000}, {6144, 27000}, {12544, 30000},
- {12288, 27000}, {25088, 30000}, {24576, 27000} } },
- /* 27.027MHz */
- {27027, {{4096, 27027}, {6272, 30030}, {6144, 27027}, {12544, 30030},
- {12288, 27027}, {25088, 30030}, {24576, 27027} } },
- /* 74.250MHz */
- {74250, {{4096, 74250}, {6272, 82500}, {6144, 74250}, {12544, 82500},
- {12288, 74250}, {25088, 82500}, {24576, 74250} } },
- /* 148.500MHz */
- {148500, {{4096, 148500}, {6272, 165000}, {6144, 148500},
- {12544, 165000}, {12288, 148500}, {25088, 165000},
- {24576, 148500} } },
- /* 297.000MHz */
- {297000, {{3072, 222750}, {4704, 247500}, {5120, 247500},
- {9408, 247500}, {10240, 247500}, {18816, 247500},
- {20480, 247500} } },
- /* 594.000MHz */
- {594000, {{3072, 445500}, {9408, 990000}, {6144, 594000},
- {18816, 990000}, {12288, 594000}, {37632, 990000},
- {24576, 594000} } },
-};
-
static int hdmi_tx_get_version(struct hdmi_tx_ctrl *hdmi_ctrl)
{
int rc;
@@ -425,6 +357,16 @@ static const char *hdmi_tx_io_name(u32 type)
}
} /* hdmi_tx_io_name */
+static void hdmi_tx_audio_setup(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+ if (hdmi_ctrl && hdmi_ctrl->audio_ops.on) {
+ u32 pclk = hdmi_tx_setup_tmds_clk_rate(hdmi_ctrl);
+
+ hdmi_ctrl->audio_ops.on(hdmi_ctrl->audio_data,
+ pclk, &hdmi_ctrl->audio_params);
+ }
+}
+
static int hdmi_tx_get_vic_from_panel_info(struct hdmi_tx_ctrl *hdmi_ctrl,
struct mdss_panel_info *pinfo)
{
@@ -546,41 +488,9 @@ static inline void hdmi_tx_send_cable_notification(
static inline void hdmi_tx_set_audio_switch_node(
struct hdmi_tx_ctrl *hdmi_ctrl, int val)
{
- int state = 0;
-
- if (!hdmi_ctrl) {
- DEV_ERR("%s: invalid input\n", __func__);
- return;
- }
-
- state = hdmi_ctrl->audio_sdev.state;
-
- if (state == val)
- return;
-
- if (hdmi_ctrl->audio_ack_enabled &&
- atomic_read(&hdmi_ctrl->audio_ack_pending)) {
- DEV_ERR("%s: %s ack pending, not notifying %s\n", __func__,
- state ? "connect" : "disconnect",
- val ? "connect" : "disconnect");
- return;
- }
-
- if (!hdmi_tx_is_dvi_mode(hdmi_ctrl) &&
- hdmi_tx_is_cea_format(hdmi_ctrl->vid_cfg.vic)) {
- bool switched;
-
- switch_set_state(&hdmi_ctrl->audio_sdev, val);
- switched = hdmi_ctrl->audio_sdev.state != state;
-
- if (hdmi_ctrl->audio_ack_enabled && switched)
- atomic_set(&hdmi_ctrl->audio_ack_pending, 1);
-
- DEV_INFO("%s: audio state %s %d\n", __func__,
- switched ? "switched to" : "is same",
- hdmi_ctrl->audio_sdev.state);
- }
-} /* hdmi_tx_set_audio_switch_node */
+ if (hdmi_ctrl && hdmi_ctrl->audio_ops.notify)
+ hdmi_ctrl->audio_ops.notify(hdmi_ctrl->audio_data, val);
+}
static void hdmi_tx_wait_for_audio_engine(struct hdmi_tx_ctrl *hdmi_ctrl)
{
@@ -737,6 +647,14 @@ static ssize_t hdmi_tx_sysfs_wta_edid(struct device *dev,
}
mutex_lock(&hdmi_ctrl->tx_lock);
+ if (edid_size < EDID_BLOCK_SIZE) {
+ DEV_DBG("%s: disabling custom edid\n", __func__);
+
+ ret = -EINVAL;
+ hdmi_ctrl->custom_edid = false;
+ goto end;
+ }
+
memset(hdmi_ctrl->edid_buf, 0, hdmi_ctrl->edid_buf_size);
while (edid_size--) {
@@ -799,7 +717,6 @@ static ssize_t hdmi_tx_sysfs_wta_audio_cb(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
int ack, rc = 0;
- int ack_hpd;
ssize_t ret = strnlen(buf, PAGE_SIZE);
struct hdmi_tx_ctrl *hdmi_ctrl = NULL;
@@ -817,34 +734,113 @@ static ssize_t hdmi_tx_sysfs_wta_audio_cb(struct device *dev,
goto end;
}
- if (ack & AUDIO_ACK_SET_ENABLE) {
- hdmi_ctrl->audio_ack_enabled = ack & AUDIO_ACK_ENABLE ?
- true : false;
+ if (hdmi_ctrl->audio_ops.ack)
+ hdmi_ctrl->audio_ops.ack(hdmi_ctrl->audio_data,
+ ack, hdmi_ctrl->hpd_state);
+end:
+ return ret;
+}
- DEV_INFO("%s: audio ack feature %s\n", __func__,
- hdmi_ctrl->audio_ack_enabled ? "enabled" : "disabled");
+static ssize_t hdmi_tx_sysfs_wta_hot_plug(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int hot_plug, rc;
+ struct hdmi_tx_ctrl *hdmi_ctrl = NULL;
+
+ hdmi_ctrl = hdmi_tx_get_drvdata_from_sysfs_dev(dev);
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&hdmi_ctrl->tx_lock);
+ rc = kstrtoint(buf, 10, &hot_plug);
+ if (rc) {
+ DEV_ERR("%s: kstrtoint failed. rc=%d\n", __func__, rc);
goto end;
}
- if (!hdmi_ctrl->audio_ack_enabled)
+ hdmi_ctrl->hpd_state = !!hot_plug;
+
+ queue_work(hdmi_ctrl->workq, &hdmi_ctrl->hpd_int_work);
+
+ rc = strnlen(buf, PAGE_SIZE);
+end:
+ mutex_unlock(&hdmi_ctrl->tx_lock);
+ return rc;
+}
+
+static ssize_t hdmi_tx_sysfs_rda_sim_mode(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t ret;
+ struct hdmi_tx_ctrl *hdmi_ctrl =
+ hdmi_tx_get_drvdata_from_sysfs_dev(dev);
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&hdmi_ctrl->tx_lock);
+ ret = snprintf(buf, PAGE_SIZE, "%d\n", hdmi_ctrl->sim_mode);
+ DEV_DBG("%s: '%d'\n", __func__, hdmi_ctrl->sim_mode);
+ mutex_unlock(&hdmi_ctrl->tx_lock);
+
+ return ret;
+}
+
+static ssize_t hdmi_tx_sysfs_wta_sim_mode(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int sim_mode, rc;
+ struct hdmi_tx_ctrl *hdmi_ctrl = NULL;
+ struct dss_io_data *io = NULL;
+
+ hdmi_ctrl = hdmi_tx_get_drvdata_from_sysfs_dev(dev);
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&hdmi_ctrl->tx_lock);
+ io = &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO];
+ if (!io->base) {
+ DEV_ERR("%s: core io is not initialized\n", __func__);
+ rc = -EINVAL;
goto end;
+ }
- atomic_set(&hdmi_ctrl->audio_ack_pending, 0);
+ if (!hdmi_ctrl->hpd_initialized) {
+ DEV_ERR("%s: hpd not enabled\n", __func__);
+ rc = -EINVAL;
+ goto end;
+ }
- ack_hpd = ack & AUDIO_ACK_CONNECT;
+ rc = kstrtoint(buf, 10, &sim_mode);
+ if (rc) {
+ DEV_ERR("%s: kstrtoint failed. rc=%d\n", __func__, rc);
+ goto end;
+ }
- DEV_DBG("%s: acknowledging %s\n", __func__,
- ack_hpd ? "connect" : "disconnect");
+ hdmi_ctrl->sim_mode = !!sim_mode;
- if (ack_hpd != hdmi_ctrl->hpd_state) {
- DEV_INFO("%s: unbalanced audio state, ack %d, hpd %d\n",
- __func__, ack_hpd, hdmi_ctrl->hpd_state);
+ if (hdmi_ctrl->sim_mode) {
+ DSS_REG_W(io, HDMI_HPD_INT_CTRL, BIT(0));
+ } else {
+ int cable_sense = DSS_REG_R(io, HDMI_HPD_INT_STATUS) & BIT(1);
- hdmi_tx_set_audio_switch_node(hdmi_ctrl, hdmi_ctrl->hpd_state);
+ DSS_REG_W(io, HDMI_HPD_INT_CTRL, BIT(0) | BIT(2) |
+ (cable_sense ? 0 : BIT(1)));
}
+
+ rc = strnlen(buf, PAGE_SIZE);
end:
- return ret;
+ mutex_unlock(&hdmi_ctrl->tx_lock);
+ return rc;
}
static ssize_t hdmi_tx_sysfs_rda_video_mode(struct device *dev,
@@ -928,7 +924,10 @@ static ssize_t hdmi_tx_sysfs_wta_hpd(struct device *dev,
goto end;
}
- hdmi_ctrl->audio_ack_enabled = false;
+ /* disable audio ack feature */
+ if (hdmi_ctrl->audio_ops.ack)
+ hdmi_ctrl->audio_ops.ack(hdmi_ctrl->audio_data,
+ AUDIO_ACK_SET_ENABLE, hdmi_ctrl->hpd_state);
if (hdmi_ctrl->panel_power_on) {
hdmi_ctrl->hpd_off_pending = true;
@@ -1312,6 +1311,9 @@ end:
static DEVICE_ATTR(connected, S_IRUGO, hdmi_tx_sysfs_rda_connected, NULL);
static DEVICE_ATTR(hdmi_audio_cb, S_IWUSR, NULL, hdmi_tx_sysfs_wta_audio_cb);
+static DEVICE_ATTR(hot_plug, S_IWUSR, NULL, hdmi_tx_sysfs_wta_hot_plug);
+static DEVICE_ATTR(sim_mode, S_IRUGO | S_IWUSR, hdmi_tx_sysfs_rda_sim_mode,
+ hdmi_tx_sysfs_wta_sim_mode);
static DEVICE_ATTR(edid, S_IRUGO | S_IWUSR, hdmi_tx_sysfs_rda_edid,
hdmi_tx_sysfs_wta_edid);
static DEVICE_ATTR(video_mode, S_IRUGO, hdmi_tx_sysfs_rda_video_mode, NULL);
@@ -1331,6 +1333,8 @@ static DEVICE_ATTR(5v, S_IWUSR, NULL, hdmi_tx_sysfs_wta_5v);
static struct attribute *hdmi_tx_fs_attrs[] = {
&dev_attr_connected.attr,
&dev_attr_hdmi_audio_cb.attr,
+ &dev_attr_hot_plug.attr,
+ &dev_attr_sim_mode.attr,
&dev_attr_edid.attr,
&dev_attr_video_mode.attr,
&dev_attr_hpd.attr,
@@ -1725,6 +1729,7 @@ static int hdmi_tx_init_features(struct hdmi_tx_ctrl *hdmi_ctrl,
struct hdmi_hdcp_init_data hdcp_init_data = {0};
struct hdmi_cec_init_data cec_init_data = {0};
struct cec_abstract_init_data cec_abst_init_data = {0};
+ struct hdmi_audio_init_data audio_init_data = {0};
struct resource *res = NULL;
void *fd = NULL;
int ret = 0;
@@ -1837,6 +1842,10 @@ static int hdmi_tx_init_features(struct hdmi_tx_ctrl *hdmi_ctrl,
hdmi_ctrl->feature_data[HDMI_TX_FEAT_CEC_ABST] = cec_abst_data;
hdmi_ctrl->panel_data.panel_info.cec_data = cec_abst_data;
+ audio_init_data.io = &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO];
+ audio_init_data.ops = &hdmi_ctrl->audio_ops;
+ hdmi_ctrl->audio_data = hdmi_audio_register(&audio_init_data);
+
return 0;
err_cec_abst:
@@ -1914,20 +1923,23 @@ static int hdmi_tx_init_panel_info(struct hdmi_tx_ctrl *hdmi_ctrl)
static int hdmi_tx_read_sink_info(struct hdmi_tx_ctrl *hdmi_ctrl)
{
- int status;
+ int status = 0;
+ void *data;
if (!hdmi_ctrl) {
DEV_ERR("%s: invalid input\n", __func__);
return -EINVAL;
}
+ data = hdmi_ctrl->feature_data[HDMI_TX_FEAT_EDID];
+
if (!hdmi_tx_is_controller_on(hdmi_ctrl)) {
DEV_ERR("%s: failed: HDMI controller is off", __func__);
status = -ENXIO;
goto error;
}
- if (!hdmi_ctrl->custom_edid) {
+ if (!hdmi_ctrl->custom_edid && !hdmi_ctrl->sim_mode) {
hdmi_ddc_config(&hdmi_ctrl->ddc_ctrl);
status = hdmi_tx_read_edid(hdmi_ctrl);
@@ -1935,13 +1947,14 @@ static int hdmi_tx_read_sink_info(struct hdmi_tx_ctrl *hdmi_ctrl)
DEV_ERR("%s: error reading edid\n", __func__);
goto error;
}
- } else {
- hdmi_ctrl->custom_edid = false;
}
- status = hdmi_edid_parser(hdmi_ctrl->feature_data[HDMI_TX_FEAT_EDID]);
- if (status)
- DEV_ERR("%s: edid parse failed\n", __func__);
+ /* parse edid if a valid edid buffer is present */
+ if (hdmi_ctrl->custom_edid || !hdmi_ctrl->sim_mode) {
+ status = hdmi_edid_parser(data);
+ if (status)
+ DEV_ERR("%s: edid parse failed\n", __func__);
+ }
error:
return status;
@@ -2784,7 +2797,7 @@ static int hdmi_tx_config_power(struct hdmi_tx_ctrl *hdmi_ctrl,
snprintf(name, MAX_CLIENT_NAME_LEN, "hdmi:%u", module);
hdmi_ctrl->pdata.reg_bus_clt[module] =
mdss_reg_bus_vote_client_create(name);
- if (IS_ERR_OR_NULL(hdmi_ctrl->pdata.reg_bus_clt[module])) {
+ if (IS_ERR(hdmi_ctrl->pdata.reg_bus_clt[module])) {
pr_err("reg bus client create failed\n");
msm_dss_config_vreg(&hdmi_ctrl->pdev->dev,
power_data->vreg_config, power_data->num_vreg, 0);
@@ -2917,7 +2930,7 @@ static int hdmi_tx_enable_power(struct hdmi_tx_ctrl *hdmi_ctrl,
goto disable_vreg;
}
mdss_update_reg_bus_vote(hdmi_ctrl->pdata.reg_bus_clt[module],
- VOTE_INDEX_19_MHZ);
+ VOTE_INDEX_LOW);
rc = msm_dss_clk_set_rate(power_data->clk_config,
power_data->num_clk);
@@ -3045,341 +3058,12 @@ static void hdmi_tx_phy_reset(struct hdmi_tx_ctrl *hdmi_ctrl)
DSS_REG_W_ND(io, HDMI_PHY_CTRL, val | SW_RESET_PLL);
} /* hdmi_tx_phy_reset */
-static int hdmi_tx_audio_acr_setup(struct hdmi_tx_ctrl *hdmi_ctrl,
- bool enabled)
-{
- /* Read first before writing */
- u32 acr_pck_ctrl_reg;
- u32 sample_rate_hz;
- u32 pixel_freq;
- struct dss_io_data *io = NULL;
-
- if (!hdmi_ctrl) {
- DEV_ERR("%s: Invalid input\n", __func__);
- return -EINVAL;
- }
-
- sample_rate_hz = hdmi_ctrl->audio_data.sample_rate_hz;
-
- io = &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO];
- if (!io->base) {
- DEV_ERR("%s: core io not inititalized\n", __func__);
- return -EINVAL;
- }
-
- acr_pck_ctrl_reg = DSS_REG_R(io, HDMI_ACR_PKT_CTRL);
-
- if (enabled) {
- struct msm_hdmi_mode_timing_info *timing =
- &hdmi_ctrl->vid_cfg.timing;
- const struct hdmi_tx_audio_acr_arry *audio_acr =
- &hdmi_tx_audio_acr_lut[0];
- const int lut_size = sizeof(hdmi_tx_audio_acr_lut)
- / sizeof(*hdmi_tx_audio_acr_lut);
- u32 i, n, cts, layout, multiplier, aud_pck_ctrl_2_reg;
-
- if (timing == NULL) {
- DEV_WARN("%s: video format %d not supported\n",
- __func__, hdmi_ctrl->vid_cfg.vic);
- return -EPERM;
- }
- pixel_freq = hdmi_tx_setup_tmds_clk_rate(hdmi_ctrl);
-
- for (i = 0; i < lut_size;
- audio_acr = &hdmi_tx_audio_acr_lut[++i]) {
- if (audio_acr->pclk == pixel_freq)
- break;
- }
- if (i >= lut_size) {
- DEV_WARN("%s: pixel clk %d not supported\n", __func__,
- pixel_freq);
- return -EPERM;
- }
-
- n = audio_acr->lut[sample_rate_hz].n;
- cts = audio_acr->lut[sample_rate_hz].cts;
- layout = (MSM_HDMI_AUDIO_CHANNEL_2 ==
- hdmi_ctrl->audio_data.num_of_channels) ? 0 : 1;
-
- if (
- (AUDIO_SAMPLE_RATE_192KHZ == sample_rate_hz) ||
- (AUDIO_SAMPLE_RATE_176_4KHZ == sample_rate_hz)) {
- multiplier = 4;
- n >>= 2; /* divide N by 4 and use multiplier */
- } else if (
- (AUDIO_SAMPLE_RATE_96KHZ == sample_rate_hz) ||
- (AUDIO_SAMPLE_RATE_88_2KHZ == sample_rate_hz)) {
- multiplier = 2;
- n >>= 1; /* divide N by 2 and use multiplier */
- } else {
- multiplier = 1;
- }
- DEV_DBG("%s: n=%u, cts=%u, layout=%u\n", __func__, n, cts,
- layout);
-
- /* AUDIO_PRIORITY | SOURCE */
- acr_pck_ctrl_reg |= 0x80000100;
-
- /* Reset multiplier bits */
- acr_pck_ctrl_reg &= ~(7 << 16);
-
- /* N_MULTIPLE(multiplier) */
- acr_pck_ctrl_reg |= (multiplier & 7) << 16;
-
- if ((AUDIO_SAMPLE_RATE_48KHZ == sample_rate_hz) ||
- (AUDIO_SAMPLE_RATE_96KHZ == sample_rate_hz) ||
- (AUDIO_SAMPLE_RATE_192KHZ == sample_rate_hz)) {
- /* SELECT(3) */
- acr_pck_ctrl_reg |= 3 << 4;
- /* CTS_48 */
- cts <<= 12;
-
- /* CTS: need to determine how many fractional bits */
- DSS_REG_W(io, HDMI_ACR_48_0, cts);
- /* N */
- DSS_REG_W(io, HDMI_ACR_48_1, n);
- } else if (
- (AUDIO_SAMPLE_RATE_44_1KHZ == sample_rate_hz) ||
- (AUDIO_SAMPLE_RATE_88_2KHZ == sample_rate_hz) ||
- (AUDIO_SAMPLE_RATE_176_4KHZ == sample_rate_hz)) {
- /* SELECT(2) */
- acr_pck_ctrl_reg |= 2 << 4;
- /* CTS_44 */
- cts <<= 12;
-
- /* CTS: need to determine how many fractional bits */
- DSS_REG_W(io, HDMI_ACR_44_0, cts);
- /* N */
- DSS_REG_W(io, HDMI_ACR_44_1, n);
- } else { /* default to 32k */
- /* SELECT(1) */
- acr_pck_ctrl_reg |= 1 << 4;
- /* CTS_32 */
- cts <<= 12;
-
- /* CTS: need to determine how many fractional bits */
- DSS_REG_W(io, HDMI_ACR_32_0, cts);
- /* N */
- DSS_REG_W(io, HDMI_ACR_32_1, n);
- }
- /* Payload layout depends on number of audio channels */
- /* LAYOUT_SEL(layout) */
- aud_pck_ctrl_2_reg = 1 | (layout << 1);
- /* override | layout */
- DSS_REG_W(io, HDMI_AUDIO_PKT_CTRL2, aud_pck_ctrl_2_reg);
-
- /* SEND | CONT */
- acr_pck_ctrl_reg |= 0x00000003;
- } else {
- /* ~(SEND | CONT) */
- acr_pck_ctrl_reg &= ~0x00000003;
- }
- DSS_REG_W(io, HDMI_ACR_PKT_CTRL, acr_pck_ctrl_reg);
-
- return 0;
-} /* hdmi_tx_audio_acr_setup */
-
-static int hdmi_tx_audio_iframe_setup(struct hdmi_tx_ctrl *hdmi_ctrl,
- bool enabled)
-{
- struct dss_io_data *io = NULL;
-
- u32 hdmi_debug_reg = 0;
- u32 channel_count = 1; /* Def to 2 channels -> Table 17 in CEA-D */
- u32 num_of_channels;
- u32 channel_allocation;
- u32 level_shift;
- u32 down_mix;
- u32 check_sum, audio_info_0_reg, audio_info_1_reg;
- u32 audio_info_ctrl_reg;
- u32 aud_pck_ctrl_2_reg;
- u32 layout;
- u32 sample_present;
-
- if (!hdmi_ctrl) {
- DEV_ERR("%s: invalid input\n", __func__);
- return -EINVAL;
- }
-
- num_of_channels = hdmi_ctrl->audio_data.num_of_channels;
- channel_allocation = hdmi_ctrl->audio_data.channel_allocation;
- level_shift = hdmi_ctrl->audio_data.level_shift;
- down_mix = hdmi_ctrl->audio_data.down_mix;
- sample_present = hdmi_ctrl->audio_data.sample_present;
-
- io = &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO];
- if (!io->base) {
- DEV_ERR("%s: core io not inititalized\n", __func__);
- return -EINVAL;
- }
-
- layout = (MSM_HDMI_AUDIO_CHANNEL_2 == num_of_channels) ? 0 : 1;
- aud_pck_ctrl_2_reg = 1 | (layout << 1);
- DSS_REG_W(io, HDMI_AUDIO_PKT_CTRL2, aud_pck_ctrl_2_reg);
-
- /*
- * Please see table 20 Audio InfoFrame in HDMI spec
- * FL = front left
- * FC = front Center
- * FR = front right
- * FLC = front left center
- * FRC = front right center
- * RL = rear left
- * RC = rear center
- * RR = rear right
- * RLC = rear left center
- * RRC = rear right center
- * LFE = low frequency effect
- */
-
- /* Read first then write because it is bundled with other controls */
- audio_info_ctrl_reg = DSS_REG_R(io, HDMI_INFOFRAME_CTRL0);
-
- if (enabled) {
- switch (num_of_channels) {
- case MSM_HDMI_AUDIO_CHANNEL_2:
- break;
- case MSM_HDMI_AUDIO_CHANNEL_3:
- channel_count = 2;
- break;
- case MSM_HDMI_AUDIO_CHANNEL_4:
- channel_count = 3;
- break;
- case MSM_HDMI_AUDIO_CHANNEL_5:
- channel_count = 4;
- break;
- case MSM_HDMI_AUDIO_CHANNEL_6:
- channel_count = 5;
- break;
- case MSM_HDMI_AUDIO_CHANNEL_7:
- channel_count = 6;
- break;
- case MSM_HDMI_AUDIO_CHANNEL_8:
- channel_count = 7;
- break;
- default:
- DEV_ERR("%s: Unsupported num_of_channels = %u\n",
- __func__, num_of_channels);
- return -EINVAL;
- }
-
- /* Program the Channel-Speaker allocation */
- audio_info_1_reg = 0;
- /* CA(channel_allocation) */
- audio_info_1_reg |= channel_allocation & 0xff;
- /* Program the Level shifter */
- audio_info_1_reg |= (level_shift << 11) & 0x00007800;
- /* Program the Down-mix Inhibit Flag */
- audio_info_1_reg |= (down_mix << 15) & 0x00008000;
-
- DSS_REG_W(io, HDMI_AUDIO_INFO1, audio_info_1_reg);
-
- /*
- * Calculate CheckSum: Sum of all the bytes in the
- * Audio Info Packet (See table 8.4 in HDMI spec)
- */
- check_sum = 0;
- /* HDMI_AUDIO_INFO_FRAME_PACKET_HEADER_TYPE[0x84] */
- check_sum += 0x84;
- /* HDMI_AUDIO_INFO_FRAME_PACKET_HEADER_VERSION[0x01] */
- check_sum += 1;
- /* HDMI_AUDIO_INFO_FRAME_PACKET_LENGTH[0x0A] */
- check_sum += 0x0A;
- check_sum += channel_count;
- check_sum += channel_allocation;
- /* See Table 8.5 in HDMI spec */
- check_sum += (level_shift & 0xF) << 3 | (down_mix & 0x1) << 7;
- check_sum &= 0xFF;
- check_sum = (u8) (256 - check_sum);
-
- audio_info_0_reg = 0;
- /* CHECKSUM(check_sum) */
- audio_info_0_reg |= check_sum & 0xff;
- /* CC(channel_count) */
- audio_info_0_reg |= (channel_count << 8) & 0x00000700;
-
- DSS_REG_W(io, HDMI_AUDIO_INFO0, audio_info_0_reg);
-
- /*
- * Set these flags
- * AUDIO_INFO_UPDATE |
- * AUDIO_INFO_SOURCE |
- * AUDIO_INFO_CONT |
- * AUDIO_INFO_SEND
- */
- audio_info_ctrl_reg |= 0x000000F0;
-
- /*
- * Program the Sample Present into the debug register so that
- * the HDMI transmitter core can add the sample present to
- * Audio Sample Packet once tranmission starts.
- */
- if (layout) {
- /* Set the Layout bit */
- hdmi_debug_reg |= BIT(4);
- /* Set the Sample Present bits */
- hdmi_debug_reg |= sample_present & 0xF;
- DSS_REG_W(io, HDMI_DEBUG, hdmi_debug_reg);
- }
- } else {
- /*Clear these flags
- * ~(AUDIO_INFO_UPDATE |
- * AUDIO_INFO_SOURCE |
- * AUDIO_INFO_CONT |
- * AUDIO_INFO_SEND)
- */
- audio_info_ctrl_reg &= ~0x000000F0;
- }
- DSS_REG_W(io, HDMI_INFOFRAME_CTRL0, audio_info_ctrl_reg);
-
- dss_reg_dump(io->base, io->len,
- enabled ? "HDMI-AUDIO-ON: " : "HDMI-AUDIO-OFF: ", REG_DUMP);
-
- return 0;
-} /* hdmi_tx_audio_iframe_setup */
-
-static int hdmi_tx_get_audio_sample_rate(u32 *sample_rate_hz)
-{
- int ret = 0;
- u32 rate = *sample_rate_hz;
-
- switch (rate) {
- case 32000:
- *sample_rate_hz = AUDIO_SAMPLE_RATE_32KHZ;
- break;
- case 44100:
- *sample_rate_hz = AUDIO_SAMPLE_RATE_44_1KHZ;
- break;
- case 48000:
- *sample_rate_hz = AUDIO_SAMPLE_RATE_48KHZ;
- break;
- case 88200:
- *sample_rate_hz = AUDIO_SAMPLE_RATE_88_2KHZ;
- break;
- case 96000:
- *sample_rate_hz = AUDIO_SAMPLE_RATE_96KHZ;
- break;
- case 176400:
- *sample_rate_hz = AUDIO_SAMPLE_RATE_176_4KHZ;
- break;
- case 192000:
- *sample_rate_hz = AUDIO_SAMPLE_RATE_192KHZ;
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
-} /* hdmi_tx_get_audio_sample_rate */
-
static int hdmi_tx_audio_info_setup(struct platform_device *pdev,
struct msm_hdmi_audio_setup_params *params)
{
int rc = 0;
struct hdmi_tx_ctrl *hdmi_ctrl = platform_get_drvdata(pdev);
u32 is_mode_dvi;
- u32 *sample_rate_hz;
if (!hdmi_ctrl || !params) {
DEV_ERR("%s: invalid input\n", __func__);
@@ -3391,38 +3075,31 @@ static int hdmi_tx_audio_info_setup(struct platform_device *pdev,
is_mode_dvi = hdmi_tx_is_dvi_mode(hdmi_ctrl);
if (!is_mode_dvi && hdmi_tx_is_panel_on(hdmi_ctrl)) {
- memcpy(&hdmi_ctrl->audio_data, params,
+ memcpy(&hdmi_ctrl->audio_params, params,
sizeof(struct msm_hdmi_audio_setup_params));
- sample_rate_hz = &hdmi_ctrl->audio_data.sample_rate_hz;
- rc = hdmi_tx_get_audio_sample_rate(sample_rate_hz);
- if (rc) {
- DEV_ERR("%s: invalid sample rate = %d\n",
- __func__, hdmi_ctrl->audio_data.sample_rate_hz);
- goto exit;
- }
-
- rc = hdmi_tx_audio_setup(hdmi_ctrl);
- if (rc)
- DEV_ERR("%s: hdmi_tx_audio_iframe_setup failed.rc=%d\n",
- __func__, rc);
+ hdmi_tx_audio_setup(hdmi_ctrl);
} else {
rc = -EPERM;
}
- if (rc)
+ if (rc) {
+ struct hdmi_audio_status status = {0};
+
+ if (hdmi_ctrl->audio_ops.status)
+ hdmi_ctrl->audio_ops.status(hdmi_ctrl->audio_data,
+ &status);
+
dev_err_ratelimited(&hdmi_ctrl->pdev->dev,
"%s: hpd %d, ack %d, switch %d, mode %s, power %d\n",
__func__, hdmi_ctrl->hpd_state,
- atomic_read(&hdmi_ctrl->audio_ack_pending),
- hdmi_ctrl->audio_sdev.state,
+ status.ack_pending, status.switched,
is_mode_dvi ? "dvi" : "hdmi",
hdmi_ctrl->panel_power_on);
-
-exit:
+ }
mutex_unlock(&hdmi_ctrl->tx_lock);
return rc;
-} /* hdmi_tx_audio_info_setup */
+}
static int hdmi_tx_get_audio_edid_blk(struct platform_device *pdev,
struct msm_hdmi_audio_edid_blk *blk)
@@ -3434,9 +3111,6 @@ static int hdmi_tx_get_audio_edid_blk(struct platform_device *pdev,
return -ENODEV;
}
- if (!hdmi_ctrl->audio_sdev.state)
- return -EPERM;
-
return hdmi_edid_get_audio_blk(
hdmi_ctrl->feature_data[HDMI_TX_FEAT_EDID], blk);
} /* hdmi_tx_get_audio_edid_blk */
@@ -3524,13 +3198,19 @@ static int hdmi_tx_get_cable_status(struct platform_device *pdev, u32 vote)
* consider this as an error as it will result in whole
* audio path to fail.
*/
- if (!hpd)
+ if (!hpd) {
+ struct hdmi_audio_status status = {0};
+
+ if (hdmi_ctrl->audio_ops.status)
+ hdmi_ctrl->audio_ops.status(hdmi_ctrl->audio_data,
+ &status);
+
dev_err_ratelimited(&hdmi_ctrl->pdev->dev,
"%s: hpd %d, ack %d, switch %d, power %d\n",
__func__, hdmi_ctrl->hpd_state,
- atomic_read(&hdmi_ctrl->audio_ack_pending),
- hdmi_ctrl->audio_sdev.state,
+ status.ack_pending, status.switched,
hdmi_ctrl->panel_power_on);
+ }
return hpd;
}
@@ -3553,68 +3233,6 @@ int msm_hdmi_register_audio_codec(struct platform_device *pdev,
} /* hdmi_tx_audio_register */
EXPORT_SYMBOL(msm_hdmi_register_audio_codec);
-static int hdmi_tx_audio_setup(struct hdmi_tx_ctrl *hdmi_ctrl)
-{
- int rc = 0;
- struct dss_io_data *io = NULL;
-
- if (!hdmi_ctrl) {
- DEV_ERR("%s: invalid input\n", __func__);
- return -EINVAL;
- }
-
- io = &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO];
- if (!io->base) {
- DEV_ERR("%s: core io not inititalized\n", __func__);
- return -EINVAL;
- }
-
- rc = hdmi_tx_audio_acr_setup(hdmi_ctrl, true);
- if (rc) {
- DEV_ERR("%s: hdmi_tx_audio_acr_setup failed. rc=%d\n",
- __func__, rc);
- return rc;
- }
-
- rc = hdmi_tx_audio_iframe_setup(hdmi_ctrl, true);
- if (rc) {
- DEV_ERR("%s: hdmi_tx_audio_iframe_setup failed. rc=%d\n",
- __func__, rc);
- return rc;
- }
-
- DEV_INFO("HDMI Audio: Enabled\n");
-
- return 0;
-} /* hdmi_tx_audio_setup */
-
-static void hdmi_tx_audio_off(struct hdmi_tx_ctrl *hdmi_ctrl)
-{
- struct dss_io_data *io = NULL;
-
- if (!hdmi_ctrl) {
- DEV_ERR("%s: invalid input\n", __func__);
- return;
- }
-
- io = &hdmi_ctrl->pdata.io[HDMI_TX_CORE_IO];
- if (!io->base) {
- DEV_ERR("%s: core io not inititalized\n", __func__);
- return;
- }
-
- if (hdmi_tx_audio_iframe_setup(hdmi_ctrl, false))
- DEV_ERR("%s: hdmi_tx_audio_iframe_setup failed.\n", __func__);
-
- if (hdmi_tx_audio_acr_setup(hdmi_ctrl, false))
- DEV_ERR("%s: hdmi_tx_audio_acr_setup failed.\n", __func__);
-
- hdmi_ctrl->audio_data.sample_rate_hz = AUDIO_SAMPLE_RATE_48KHZ;
- hdmi_ctrl->audio_data.num_of_channels = MSM_HDMI_AUDIO_CHANNEL_2;
-
- DEV_INFO("HDMI Audio: Disabled\n");
-} /* hdmi_tx_audio_off */
-
static int hdmi_tx_setup_tmds_clk_rate(struct hdmi_tx_ctrl *hdmi_ctrl)
{
u32 rate = 0;
@@ -3824,6 +3442,11 @@ static void hdmi_tx_hpd_polarity_setup(struct hdmi_tx_ctrl *hdmi_ctrl,
return;
}
+ if (hdmi_ctrl->sim_mode) {
+ DEV_DBG("%s: sim mode enabled\n", __func__);
+ return;
+ }
+
if (polarity)
DSS_REG_W(io, HDMI_HPD_INT_CTRL, BIT(2) | BIT(1));
else
@@ -3843,6 +3466,15 @@ static void hdmi_tx_hpd_polarity_setup(struct hdmi_tx_ctrl *hdmi_ctrl,
}
} /* hdmi_tx_hpd_polarity_setup */
+static inline void hdmi_tx_audio_off(struct hdmi_tx_ctrl *hdmi_ctrl)
+{
+ if (hdmi_ctrl && hdmi_ctrl->audio_ops.off)
+ hdmi_ctrl->audio_ops.off(hdmi_ctrl->audio_data);
+
+ memset(&hdmi_ctrl->audio_params, 0,
+ sizeof(struct msm_hdmi_audio_setup_params));
+}
+
static int hdmi_tx_power_off(struct mdss_panel_data *panel_data)
{
struct dss_io_data *io = NULL;
@@ -4077,8 +3709,6 @@ static int hdmi_tx_hpd_on(struct hdmi_tx_ctrl *hdmi_ctrl)
/* Turn on HPD HW circuit */
DSS_REG_W(io, HDMI_HPD_CTRL, reg_val | BIT(28));
- atomic_set(&hdmi_ctrl->audio_ack_pending, 0);
-
hdmi_tx_hpd_polarity_setup(hdmi_ctrl, HPD_CONNECT_POLARITY);
DEV_DBG("%s: HPD is now ON\n", __func__);
}
@@ -4268,7 +3898,6 @@ static void hdmi_tx_dev_deinit(struct hdmi_tx_ctrl *hdmi_ctrl)
hdmi_ctrl->feature_data[HDMI_TX_FEAT_EDID] = NULL;
}
- switch_dev_unregister(&hdmi_ctrl->audio_sdev);
switch_dev_unregister(&hdmi_ctrl->sdev);
if (hdmi_ctrl->workq)
destroy_workqueue(hdmi_ctrl->workq);
@@ -4328,9 +3957,6 @@ static int hdmi_tx_dev_init(struct hdmi_tx_ctrl *hdmi_ctrl)
spin_lock_init(&hdmi_ctrl->hpd_state_lock);
- hdmi_ctrl->audio_data.sample_rate_hz = AUDIO_SAMPLE_RATE_48KHZ;
- hdmi_ctrl->audio_data.num_of_channels = MSM_HDMI_AUDIO_CHANNEL_2;
-
return 0;
fail_create_workq:
@@ -4379,12 +4005,6 @@ static int hdmi_tx_init_switch_dev(struct hdmi_tx_ctrl *hdmi_ctrl)
DEV_ERR("%s: display switch registration failed\n", __func__);
goto end;
}
-
- hdmi_ctrl->audio_sdev.name = "hdmi_audio";
- rc = switch_dev_register(&hdmi_ctrl->audio_sdev);
- if (rc)
- DEV_ERR("%s: audio switch registration failed\n", __func__);
-
end:
return rc;
}
@@ -4545,11 +4165,14 @@ static int hdmi_tx_panel_event_handler(struct mdss_panel_data *panel_data,
break;
case MDSS_EVENT_PANEL_ON:
- hdmi_tx_update_hdcp_info(hdmi_ctrl);
+ if (!hdmi_ctrl->sim_mode) {
+ hdmi_tx_update_hdcp_info(hdmi_ctrl);
- rc = hdmi_tx_start_hdcp(hdmi_ctrl);
- if (rc)
- DEV_ERR("%s: hdcp start failed rc=%d\n", __func__, rc);
+ rc = hdmi_tx_start_hdcp(hdmi_ctrl);
+ if (rc)
+ DEV_ERR("%s: hdcp start failed rc=%d\n",
+ __func__, rc);
+ }
hdmi_ctrl->timing_gen_on = true;
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_tx.h b/drivers/video/fbdev/msm/mdss_hdmi_tx.h
index 34474ecf0ff0..170837dcc9e6 100644
--- a/drivers/video/fbdev/msm/mdss_hdmi_tx.h
+++ b/drivers/video/fbdev/msm/mdss_hdmi_tx.h
@@ -16,6 +16,7 @@
#include <linux/switch.h>
#include "mdss_hdmi_util.h"
#include "mdss_cec_core.h"
+#include "mdss_hdmi_audio.h"
#define MAX_SWITCH_NAME_SIZE 5
@@ -130,14 +131,12 @@ struct hdmi_tx_ctrl {
struct hdmi_tx_pinctrl pin_res;
- struct msm_hdmi_audio_setup_params audio_data;
struct mutex mutex;
struct mutex tx_lock;
struct list_head cable_notify_handlers;
struct kobject *kobj;
struct switch_dev sdev;
- struct switch_dev audio_sdev;
struct workqueue_struct *workq;
spinlock_t hpd_state_lock;
@@ -168,11 +167,10 @@ struct hdmi_tx_ctrl {
bool scrambler_enabled;
u32 hdcp14_present;
bool hdcp1_use_sw_keys;
- bool audio_ack_enabled;
- atomic_t audio_ack_pending;
bool hdcp14_sw_keys;
bool auth_state;
bool custom_edid;
+ bool sim_mode;
u32 enc_lvl;
u8 spd_vendor_name[9];
@@ -182,6 +180,7 @@ struct hdmi_tx_ctrl {
void (*hdmi_tx_hpd_done) (void *data);
void *downstream_data;
+ void *audio_data;
void *feature_data[HDMI_TX_FEAT_MAX];
struct hdmi_hdcp_ops *hdcp_ops;
@@ -194,6 +193,8 @@ struct hdmi_tx_ctrl {
struct cec_ops hdmi_cec_ops;
struct cec_cbs hdmi_cec_cbs;
+ struct hdmi_audio_ops audio_ops;
+ struct msm_hdmi_audio_setup_params audio_params;
char disp_switch_name[MAX_SWITCH_NAME_SIZE];
bool power_data_enable[HDMI_TX_MAX_PM];
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_util.c b/drivers/video/fbdev/msm/mdss_hdmi_util.c
index 33f086dcf62f..4640c3f505b6 100644
--- a/drivers/video/fbdev/msm/mdss_hdmi_util.c
+++ b/drivers/video/fbdev/msm/mdss_hdmi_util.c
@@ -725,10 +725,9 @@ static void hdmi_ddc_trigger(struct hdmi_tx_ddc_ctrl *ddc_ctrl,
DSS_REG_W_ND(io, HDMI_DDC_CTRL, ddc_ctrl_reg_val);
}
-static int hdmi_ddc_check_status(struct hdmi_tx_ddc_ctrl *ddc_ctrl)
+static void hdmi_ddc_clear_status(struct hdmi_tx_ddc_ctrl *ddc_ctrl)
{
u32 reg_val;
- int rc = 0;
/* Read DDC status */
reg_val = DSS_REG_R(ddc_ctrl->io, HDMI_DDC_SW_STATUS);
@@ -743,18 +742,14 @@ static int hdmi_ddc_check_status(struct hdmi_tx_ddc_ctrl *ddc_ctrl)
reg_val = BIT(3) | BIT(1);
DSS_REG_W_ND(ddc_ctrl->io, HDMI_DDC_CTRL, reg_val);
-
- rc = -ECOMM;
}
-
- return rc;
}
static int hdmi_ddc_read_retry(struct hdmi_tx_ddc_ctrl *ddc_ctrl)
{
u32 reg_val, ndx, time_out_count, wait_time;
struct hdmi_tx_ddc_data *ddc_data;
- int status, rc;
+ int status;
int busy_wait_us;
if (!ddc_ctrl || !ddc_ctrl->io) {
@@ -823,10 +818,7 @@ static int hdmi_ddc_read_retry(struct hdmi_tx_ddc_ctrl *ddc_ctrl)
status = -ETIMEDOUT;
}
- rc = hdmi_ddc_check_status(ddc_ctrl);
-
- if (!status)
- status = rc;
+ hdmi_ddc_clear_status(ddc_ctrl);
} while (status && ddc_data->retry--);
if (status)
@@ -1165,7 +1157,7 @@ int hdmi_ddc_read(struct hdmi_tx_ddc_ctrl *ddc_ctrl)
int hdmi_ddc_read_seg(struct hdmi_tx_ddc_ctrl *ddc_ctrl)
{
- int status, rc;
+ int status;
u32 reg_val, ndx, time_out_count;
struct hdmi_tx_ddc_data *ddc_data;
@@ -1206,10 +1198,7 @@ int hdmi_ddc_read_seg(struct hdmi_tx_ddc_ctrl *ddc_ctrl)
status = -ETIMEDOUT;
}
- rc = hdmi_ddc_check_status(ddc_ctrl);
-
- if (!status)
- status = rc;
+ hdmi_ddc_clear_status(ddc_ctrl);
} while (status && ddc_data->retry--);
if (status)
@@ -1235,7 +1224,7 @@ error:
int hdmi_ddc_write(struct hdmi_tx_ddc_ctrl *ddc_ctrl)
{
- int status, rc;
+ int status;
u32 time_out_count;
struct hdmi_tx_ddc_data *ddc_data;
u32 wait_time;
@@ -1307,10 +1296,7 @@ int hdmi_ddc_write(struct hdmi_tx_ddc_ctrl *ddc_ctrl)
status = -ETIMEDOUT;
}
- rc = hdmi_ddc_check_status(ddc_ctrl);
-
- if (!status)
- status = rc;
+ hdmi_ddc_clear_status(ddc_ctrl);
} while (status && ddc_data->retry--);
if (status)
diff --git a/drivers/video/fbdev/msm/mdss_mdp.c b/drivers/video/fbdev/msm/mdss_mdp.c
index 55c98e75755f..b50e29c0e610 100644
--- a/drivers/video/fbdev/msm/mdss_mdp.c
+++ b/drivers/video/fbdev/msm/mdss_mdp.c
@@ -40,6 +40,8 @@
#include <linux/semaphore.h>
#include <linux/uaccess.h>
#include <linux/clk/msm-clk.h>
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
#include <linux/msm-bus.h>
#include <linux/msm-bus-board.h>
@@ -64,6 +66,7 @@
#define RES_UHD (3840*2160)
struct mdss_data_type *mdss_res;
+static u32 mem_protect_sd_ctrl_id;
static int mdss_fb_mem_get_iommu_domain(void)
{
@@ -78,10 +81,6 @@ struct msm_mdp_interface mdp5 = {
.get_format_params = mdss_mdp_get_format_params,
};
-#define DEFAULT_TOTAL_RGB_PIPES 3
-#define DEFAULT_TOTAL_VIG_PIPES 3
-#define DEFAULT_TOTAL_DMA_PIPES 2
-
#define IB_QUOTA 2000000000
#define AB_QUOTA 2000000000
@@ -108,6 +107,14 @@ struct mdss_hw mdss_mdp_hw = {
.irq_handler = mdss_mdp_isr,
};
+/* define for h/w block with external driver */
+struct mdss_hw mdss_misc_hw = {
+ .hw_ndx = MDSS_HW_MISC,
+ .ptr = NULL,
+ .irq_handler = NULL,
+};
+
+#ifdef CONFIG_MSM_BUS_SCALING
#define MDP_REG_BUS_VECTOR_ENTRY(ab_val, ib_val) \
{ \
.src = MSM_BUS_MASTER_AMPSS_M0, \
@@ -134,6 +141,7 @@ static struct msm_bus_scale_pdata mdp_reg_bus_scale_table = {
.name = "mdss_reg",
.active_only = true,
};
+#endif
u32 invalid_mdp107_wb_output_fmts[] = {
MDP_XRGB_8888,
@@ -174,6 +182,61 @@ u32 mdss_mdp_fb_stride(u32 fb_index, u32 xres, int bpp)
return xres * bpp;
}
+static void mdss_irq_mask(struct irq_data *data)
+{
+ struct mdss_data_type *mdata = irq_data_get_irq_chip_data(data);
+ unsigned long irq_flags;
+
+ if (!mdata)
+ return;
+
+ pr_debug("irq_domain_mask %lu\n", data->hwirq);
+
+ if (data->hwirq < 32) {
+ spin_lock_irqsave(&mdp_lock, irq_flags);
+ mdata->mdss_util->disable_irq(&mdss_misc_hw);
+ spin_unlock_irqrestore(&mdp_lock, irq_flags);
+ }
+}
+
+static void mdss_irq_unmask(struct irq_data *data)
+{
+ struct mdss_data_type *mdata = irq_data_get_irq_chip_data(data);
+ unsigned long irq_flags;
+
+ if (!mdata)
+ return;
+
+ pr_debug("irq_domain_unmask %lu\n", data->hwirq);
+
+ if (data->hwirq < 32) {
+ spin_lock_irqsave(&mdp_lock, irq_flags);
+ mdata->mdss_util->enable_irq(&mdss_misc_hw);
+ spin_unlock_irqrestore(&mdp_lock, irq_flags);
+ }
+}
+
+static struct irq_chip mdss_irq_chip = {
+ .name = "mdss",
+ .irq_mask = mdss_irq_mask,
+ .irq_unmask = mdss_irq_unmask,
+};
+
+static int mdss_irq_domain_map(struct irq_domain *d,
+ unsigned int virq, irq_hw_number_t hw)
+{
+ struct mdss_data_type *mdata = d->host_data;
+ /* check here if virq is a valid interrupt line */
+ irq_set_chip_and_handler(virq, &mdss_irq_chip, handle_level_irq);
+ irq_set_chip_data(virq, mdata);
+ return 0;
+}
+
+static struct irq_domain_ops mdss_irq_domain_ops = {
+ .map = mdss_irq_domain_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
static irqreturn_t mdss_irq_handler(int irq, void *ptr)
{
struct mdss_data_type *mdata = ptr;
@@ -192,25 +255,44 @@ static irqreturn_t mdss_irq_handler(int irq, void *ptr)
spin_lock(&mdp_lock);
mdata->mdss_util->irq_dispatch(MDSS_HW_MDP, irq, ptr);
spin_unlock(&mdp_lock);
+ intr &= ~MDSS_INTR_MDP;
}
- if (intr & MDSS_INTR_DSI0)
+ if (intr & MDSS_INTR_DSI0) {
mdata->mdss_util->irq_dispatch(MDSS_HW_DSI0, irq, ptr);
+ intr &= ~MDSS_INTR_DSI0;
+ }
- if (intr & MDSS_INTR_DSI1)
+ if (intr & MDSS_INTR_DSI1) {
mdata->mdss_util->irq_dispatch(MDSS_HW_DSI1, irq, ptr);
+ intr &= ~MDSS_INTR_DSI1;
+ }
- if (intr & MDSS_INTR_EDP)
+ if (intr & MDSS_INTR_EDP) {
mdata->mdss_util->irq_dispatch(MDSS_HW_EDP, irq, ptr);
+ intr &= ~MDSS_INTR_EDP;
+ }
- if (intr & MDSS_INTR_HDMI)
+ if (intr & MDSS_INTR_HDMI) {
mdata->mdss_util->irq_dispatch(MDSS_HW_HDMI, irq, ptr);
+ intr &= ~MDSS_INTR_HDMI;
+ }
+
+ /* route misc. interrupts to external drivers */
+ while (intr) {
+ irq_hw_number_t hwirq = fls(intr) - 1;
+
+ generic_handle_irq(irq_find_mapping(
+ mdata->irq_domain, hwirq));
+ intr &= ~(1 << hwirq);
+ }
mdss_mdp_hw.irq_info->irq_buzy = false;
return IRQ_HANDLED;
}
+#ifdef CONFIG_MSM_BUS_SCALING
static int mdss_mdp_bus_scale_register(struct mdss_data_type *mdata)
{
struct msm_bus_scale_pdata *reg_bus_pdata;
@@ -227,20 +309,24 @@ static int mdss_mdp_bus_scale_register(struct mdss_data_type *mdata)
pr_debug("register bus_hdl=%x\n", mdata->bus_hdl);
}
- if (!mdata->reg_bus_hdl) {
+ if (!mdata->reg_bus_scale_table) {
reg_bus_pdata = &mdp_reg_bus_scale_table;
for (i = 0; i < reg_bus_pdata->num_usecases; i++) {
mdp_reg_bus_usecases[i].num_paths = 1;
mdp_reg_bus_usecases[i].vectors =
&mdp_reg_bus_vectors[i];
}
+ mdata->reg_bus_scale_table = reg_bus_pdata;
+ }
+ if (!mdata->reg_bus_hdl) {
mdata->reg_bus_hdl =
- msm_bus_scale_register_client(reg_bus_pdata);
- if (!mdata->reg_bus_hdl) {
+ msm_bus_scale_register_client(
+ mdata->reg_bus_scale_table);
+ if (!mdata->reg_bus_hdl)
/* Continue without reg_bus scaling */
pr_warn("reg_bus_client register failed\n");
- } else
+ else
pr_debug("register reg_bus_hdl=%x\n",
mdata->reg_bus_hdl);
}
@@ -484,6 +570,42 @@ int mdss_bus_scale_set_quota(int client, u64 ab_quota, u64 ib_quota)
return rc;
}
+#else
+static int mdss_mdp_bus_scale_register(struct mdss_data_type *mdata)
+{
+ return 0;
+}
+
+static void mdss_mdp_bus_scale_unregister(struct mdss_data_type *mdata)
+{
+}
+
+int mdss_bus_scale_set_quota(int client, u64 ab_quota, u64 ib_quota)
+{
+ pr_debug("No bus scaling! client=%d ab=%llu ib=%llu\n",
+ client, ab_quota, ib_quota);
+
+ return 0;
+}
+
+struct reg_bus_client *mdss_reg_bus_vote_client_create(char *client_name)
+{
+ return NULL;
+}
+
+void mdss_reg_bus_vote_client_destroy(struct reg_bus_client *client)
+{
+}
+
+int mdss_update_reg_bus_vote(struct reg_bus_client *bus_client, u32 usecase_ndx)
+{
+ pr_debug("%pS: No reg scaling! usecase=%u\n",
+ __builtin_return_address(0), usecase_ndx);
+
+ return 0;
+}
+#endif
+
static inline u32 mdss_mdp_irq_mask(u32 intr_type, u32 intf_num)
{
@@ -746,7 +868,7 @@ static inline void __mdss_mdp_reg_access_clk_enable(
{
if (enable) {
mdss_update_reg_bus_vote(mdata->reg_bus_clt,
- VOTE_INDEX_19_MHZ);
+ VOTE_INDEX_LOW);
if (mdss_has_quirk(mdata, MDSS_QUIRK_MIN_BUS_VOTE))
mdss_bus_scale_set_quota(MDSS_HW_RT,
SZ_1M, SZ_1M);
@@ -1029,7 +1151,7 @@ void mdss_mdp_clk_ctrl(int enable)
pm_runtime_get_sync(&mdata->pdev->dev);
mdss_update_reg_bus_vote(mdata->reg_bus_clt,
- VOTE_INDEX_19_MHZ);
+ VOTE_INDEX_LOW);
rc = mdss_iommu_ctrl(1);
if (IS_ERR_VALUE(rc))
@@ -1184,7 +1306,7 @@ static int mdss_mdp_irq_clk_setup(struct mdss_data_type *mdata)
}
mdata->reg_bus_clt = mdss_reg_bus_vote_client_create("mdp\0");
- if (IS_ERR_OR_NULL(mdata->reg_bus_clt)) {
+ if (IS_ERR(mdata->reg_bus_clt)) {
pr_err("bus client register failed\n");
return PTR_ERR(mdata->reg_bus_clt);
}
@@ -1274,9 +1396,10 @@ static void mdss_mdp_hw_rev_caps_init(struct mdss_data_type *mdata)
mdata->hflip_buffer_reused = true;
/* prevent disable of prefill calculations */
mdata->min_prefill_lines = 0xffff;
- /* clock gating feature is disabled by default */
+ /* clock gating feature is enabled by default */
mdata->enable_gate = true;
mdata->pixel_ram_size = 0;
+ mem_protect_sd_ctrl_id = MEM_PROTECT_SD_CTRL_FLAT;
mdss_mdp_hw_rev_debug_caps_init(mdata);
@@ -1336,6 +1459,8 @@ static void mdss_mdp_hw_rev_caps_init(struct mdss_data_type *mdata)
set_bit(MDSS_QOS_OTLIM, mdata->mdss_qos_map);
break;
case MDSS_MDP_HW_REV_114:
+ /* disable ECG for 28nm PHY platform */
+ mdata->enable_gate = false;
case MDSS_MDP_HW_REV_116:
mdata->max_target_zorder = 4; /* excluding base layer */
mdata->max_cursor_size = 128;
@@ -1344,6 +1469,7 @@ static void mdss_mdp_hw_rev_caps_init(struct mdss_data_type *mdata)
mdata->pixel_ram_size = 40 * 1024;
mdata->apply_post_scale_bytes = false;
mdata->hflip_buffer_reused = false;
+ mem_protect_sd_ctrl_id = MEM_PROTECT_SD_CTRL;
set_bit(MDSS_QOS_OVERHEAD_FACTOR, mdata->mdss_qos_map);
set_bit(MDSS_QOS_CDP, mdata->mdss_qos_map);
set_bit(MDSS_QOS_PER_PIPE_LUT, mdata->mdss_qos_map);
@@ -1353,6 +1479,7 @@ static void mdss_mdp_hw_rev_caps_init(struct mdss_data_type *mdata)
set_bit(MDSS_QOS_OTLIM, mdata->mdss_qos_map);
mdss_set_quirk(mdata, MDSS_QUIRK_DMA_BI_DIR);
mdss_set_quirk(mdata, MDSS_QUIRK_MIN_BUS_VOTE);
+ mdss_set_quirk(mdata, MDSS_QUIRK_NEED_SECURE_MAP);
break;
case MDSS_MDP_HW_REV_115:
mdata->max_target_zorder = 4; /* excluding base layer */
@@ -1362,6 +1489,9 @@ static void mdss_mdp_hw_rev_caps_init(struct mdss_data_type *mdata)
mdata->pixel_ram_size = 16 * 1024;
mdata->apply_post_scale_bytes = false;
mdata->hflip_buffer_reused = false;
+ /* disable ECG for 28nm PHY platform */
+ mdata->enable_gate = false;
+ mem_protect_sd_ctrl_id = MEM_PROTECT_SD_CTRL;
set_bit(MDSS_QOS_CDP, mdata->mdss_qos_map);
set_bit(MDSS_QOS_PER_PIPE_LUT, mdata->mdss_qos_map);
set_bit(MDSS_QOS_SIMPLIFIED_PREFILL, mdata->mdss_qos_map);
@@ -1371,6 +1501,7 @@ static void mdss_mdp_hw_rev_caps_init(struct mdss_data_type *mdata)
set_bit(MDSS_QOS_OTLIM, mdata->mdss_qos_map);
mdss_set_quirk(mdata, MDSS_QUIRK_DMA_BI_DIR);
mdss_set_quirk(mdata, MDSS_QUIRK_MIN_BUS_VOTE);
+ mdss_set_quirk(mdata, MDSS_QUIRK_NEED_SECURE_MAP);
break;
case MDSS_MDP_HW_REV_300:
case MDSS_MDP_HW_REV_301:
@@ -1436,8 +1567,6 @@ static void mdss_hw_rev_init(struct mdss_data_type *mdata)
*/
void mdss_hw_init(struct mdss_data_type *mdata)
{
- int i, j;
- char *offset;
struct mdss_mdp_pipe *vig;
mdss_hw_rev_init(mdata);
@@ -1456,28 +1585,7 @@ void mdss_hw_init(struct mdss_data_type *mdata)
}
}
- for (i = 0; i < mdata->ndspp; i++) {
- offset = mdata->mixer_intf[i].dspp_base +
- MDSS_MDP_REG_DSPP_HIST_LUT_BASE;
- for (j = 0; j < ENHIST_LUT_ENTRIES; j++)
- writel_relaxed(j, offset);
-
- /* swap */
- writel_relaxed(1, offset + 4);
- }
vig = mdata->vig_pipes;
- for (i = 0; i < mdata->nvig_pipes; i++) {
- offset = vig[i].base +
- MDSS_MDP_REG_VIG_HIST_LUT_BASE;
- for (j = 0; j < ENHIST_LUT_ENTRIES; j++)
- writel_relaxed(j, offset);
- /* swap */
- writel_relaxed(1, offset + 16);
- }
-
- /* initialize csc matrix default value */
- for (i = 0; i < mdata->nvig_pipes; i++)
- vig[i].csc_coeff_set = MDP_CSC_ITU_R_709;
mdata->nmax_concurrent_ad_hw =
(mdata->mdp_rev < MDSS_MDP_HW_REV_103) ? 1 : 2;
@@ -1767,11 +1875,11 @@ static int mdss_mdp_get_cmdline_config(struct platform_device *pdev)
rc = mdss_mdp_parse_dt_pan_intf(pdev);
/* if pref pan intf is not present */
if (rc)
- pr_err("unable to parse device tree for pan intf\n");
- else
- pan_cfg->init_done = true;
+ pr_warn("unable to parse device tree for pan intf\n");
- return rc;
+ pan_cfg->init_done = true;
+
+ return 0;
}
static void __update_sspp_info(struct mdss_mdp_pipe *pipe,
@@ -2121,6 +2229,20 @@ static int mdss_mdp_probe(struct platform_device *pdev)
mdss_mdp_hw.irq_info->irq = res->start;
mdss_mdp_hw.ptr = mdata;
+ /* export misc. interrupts to external driver */
+ mdata->irq_domain = irq_domain_add_linear(pdev->dev.of_node, 32,
+ &mdss_irq_domain_ops, mdata);
+ if (!mdata->irq_domain) {
+ pr_err("unable to add linear domain\n");
+ rc = -ENOMEM;
+ goto probe_done;
+ }
+
+ mdss_misc_hw.irq_info = mdss_intr_line();
+ rc = mdss_res->mdss_util->register_irq(&mdss_misc_hw);
+ if (rc)
+ pr_err("mdss_register_irq failed.\n");
+
/*populate hw iomem base info from device tree*/
rc = mdss_mdp_parse_dt(pdev);
if (rc) {
@@ -2139,11 +2261,6 @@ static int mdss_mdp_probe(struct platform_device *pdev)
pr_err("unable to initialize mdss mdp resources\n");
goto probe_done;
}
- rc = mdss_mdp_pp_init(&pdev->dev);
- if (rc) {
- pr_err("unable to initialize mdss pp resources\n");
- goto probe_done;
- }
rc = mdss_mdp_bus_scale_register(mdata);
if (rc) {
pr_err("unable to register bus scaling\n");
@@ -2200,6 +2317,10 @@ static int mdss_mdp_probe(struct platform_device *pdev)
mdss_mdp_footswitch_ctrl_splash(true);
mdss_hw_init(mdata);
+ rc = mdss_mdp_pp_init(&pdev->dev);
+ if (rc)
+ pr_err("unable to initialize mdss pp resources\n");
+
/* Restoring Secure configuration during boot-up */
if (mdss_mdp_req_init_restore_cfg(mdata))
__mdss_restore_sec_cfg(mdata);
@@ -2268,7 +2389,7 @@ int mdss_mdp_parse_dt_hw_settings(struct platform_device *pdev)
vbif_arr = of_get_property(pdev->dev.of_node, "qcom,vbif-settings",
&vbif_len);
if (!vbif_arr || (vbif_len & 1)) {
- pr_warn("MDSS VBIF settings not found\n");
+ pr_debug("MDSS VBIF settings not found\n");
vbif_len = 0;
}
vbif_len /= 2 * sizeof(u32);
@@ -2284,7 +2405,7 @@ int mdss_mdp_parse_dt_hw_settings(struct platform_device *pdev)
mdp_arr = of_get_property(pdev->dev.of_node, "qcom,mdp-settings",
&mdp_len);
if (!mdp_arr || (mdp_len & 1)) {
- pr_warn("MDSS MDP settings not found\n");
+ pr_debug("MDSS MDP settings not found\n");
mdp_len = 0;
}
mdp_len /= 2 * sizeof(u32);
@@ -2512,267 +2633,86 @@ static void mdss_mdp_parse_dt_pipe_panic_ctrl(struct platform_device *pdev,
}
static int mdss_mdp_parse_dt_pipe_helper(struct platform_device *pdev,
- u32 npipes,
- u32 nfids)
+ u32 ptype, char *ptypestr,
+ struct mdss_mdp_pipe **out_plist,
+ size_t len,
+ u8 priority_base)
{
- u32 dma_off;
- u32 *offsets = NULL, *ftch_id = NULL, *xin_id = NULL;
- u32 len;
- uint32_t setup_cnt = 0;
- int rc = 0, i;
struct mdss_data_type *mdata = platform_get_drvdata(pdev);
+ u32 offsets[MDSS_MDP_MAX_SSPP];
+ u32 ftch_id[MDSS_MDP_MAX_SSPP];
+ u32 xin_id[MDSS_MDP_MAX_SSPP];
+ u32 pnums[MDSS_MDP_MAX_SSPP];
+ struct mdss_mdp_pipe *pipe_list;
+ char prop_name[64];
+ int i, cnt, rc;
+
+ if (!out_plist)
+ return -EINVAL;
- offsets = kcalloc(npipes, sizeof(u32), GFP_KERNEL);
- if (!offsets)
- return -ENOMEM;
-
- ftch_id = kcalloc(npipes, sizeof(u32), GFP_KERNEL);
- if (!ftch_id) {
- rc = -ENOMEM;
- goto ftch_alloc_fail;
- }
-
- xin_id = kcalloc(npipes, sizeof(u32), GFP_KERNEL);
- if (!xin_id) {
- rc = -ENOMEM;
- goto xin_alloc_fail;
- }
-
- if (mdata->nvig_pipes) {
- mdata->vig_pipes = devm_kzalloc(&mdata->pdev->dev,
- (sizeof(struct mdss_mdp_pipe)
- * mdata->nvig_pipes), GFP_KERNEL);
- if (!mdata->vig_pipes) {
- rc = -ENOMEM;
- goto vig_alloc_fail;
- }
- if (nfids) {
- rc = mdss_mdp_parse_dt_handler(pdev,
- "qcom,mdss-pipe-vig-fetch-id", ftch_id,
- mdata->nvig_pipes);
- if (rc)
- goto parse_fail;
- }
-
- rc = mdss_mdp_parse_dt_handler(pdev,
- "qcom,mdss-pipe-vig-xin-id", xin_id,
- mdata->nvig_pipes);
- if (rc)
- goto parse_fail;
-
- rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-pipe-vig-off",
- offsets, mdata->nvig_pipes);
- if (rc)
- goto parse_fail;
-
- len = min_t(int, DEFAULT_TOTAL_VIG_PIPES,
- (int)mdata->nvig_pipes);
- rc = mdss_mdp_pipe_addr_setup(mdata, mdata->vig_pipes, offsets,
- ftch_id, xin_id, MDSS_MDP_PIPE_TYPE_VIG,
- MDSS_MDP_SSPP_VIG0, len, 0);
- if (rc)
- goto parse_fail;
-
- setup_cnt += len;
- }
-
- if (mdata->nrgb_pipes) {
- mdata->rgb_pipes = devm_kzalloc(&mdata->pdev->dev,
- (sizeof(struct mdss_mdp_pipe) *
- mdata->nrgb_pipes), GFP_KERNEL);
- if (!mdata->rgb_pipes) {
- rc = -ENOMEM;
- goto rgb_alloc_fail;
- }
-
- if (nfids) {
- rc = mdss_mdp_parse_dt_handler(pdev,
- "qcom,mdss-pipe-rgb-fetch-id",
- ftch_id + mdata->nvig_pipes,
- mdata->nrgb_pipes);
- if (rc)
- goto parse_fail;
+ for (i = 0, cnt = 0; i < MDSS_MDP_MAX_SSPP && cnt < len; i++) {
+ if (ptype == get_pipe_type_from_num(i)) {
+ pnums[cnt] = i;
+ cnt++;
}
-
- rc = mdss_mdp_parse_dt_handler(pdev,
- "qcom,mdss-pipe-rgb-xin-id",
- xin_id + mdata->nvig_pipes, mdata->nrgb_pipes);
- if (rc)
- goto parse_fail;
-
- rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-pipe-rgb-off",
- offsets + mdata->nvig_pipes, mdata->nrgb_pipes);
- if (rc)
- goto parse_fail;
-
- len = min_t(int, DEFAULT_TOTAL_RGB_PIPES,
- (int)mdata->nrgb_pipes);
- rc = mdss_mdp_pipe_addr_setup(mdata, mdata->rgb_pipes,
- offsets + mdata->nvig_pipes,
- ftch_id + mdata->nvig_pipes,
- xin_id + mdata->nvig_pipes,
- MDSS_MDP_PIPE_TYPE_RGB,
- MDSS_MDP_SSPP_RGB0, len, mdata->nvig_pipes);
- if (rc)
- goto parse_fail;
-
- setup_cnt += len;
}
- if (mdata->ndma_pipes) {
- mdata->dma_pipes = devm_kzalloc(&mdata->pdev->dev,
- sizeof(struct mdss_mdp_pipe) * mdata->ndma_pipes,
- GFP_KERNEL);
- if (!mdata->dma_pipes) {
- pr_err("no mem for dma_pipes: kzalloc fail\n");
- rc = -ENOMEM;
- goto dma_alloc_fail;
- }
+ if (cnt < len)
+ pr_warn("Invalid %s pipe count: %zu, max supported: %d\n",
+ ptypestr, len, cnt);
+ if (cnt == 0) {
+ *out_plist = NULL;
- dma_off = mdata->nvig_pipes + mdata->nrgb_pipes;
-
- if (nfids) {
- rc = mdss_mdp_parse_dt_handler(pdev,
- "qcom,mdss-pipe-dma-fetch-id",
- ftch_id + dma_off, mdata->ndma_pipes);
- if (rc)
- goto parse_fail;
- }
-
- rc = mdss_mdp_parse_dt_handler(pdev,
- "qcom,mdss-pipe-dma-xin-id",
- xin_id + dma_off, mdata->ndma_pipes);
- if (rc)
- goto parse_fail;
-
- rc = mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-pipe-dma-off",
- offsets + dma_off, mdata->ndma_pipes);
- if (rc)
- goto parse_fail;
-
- len = mdata->ndma_pipes;
- rc = mdss_mdp_pipe_addr_setup(mdata, mdata->dma_pipes,
- offsets + dma_off, ftch_id + dma_off, xin_id + dma_off,
- MDSS_MDP_PIPE_TYPE_DMA, MDSS_MDP_SSPP_DMA0, len,
- mdata->nvig_pipes + mdata->nrgb_pipes);
- if (rc)
- goto parse_fail;
-
- setup_cnt += len;
- }
-
- if (mdata->nvig_pipes > DEFAULT_TOTAL_VIG_PIPES) {
- rc = mdss_mdp_pipe_addr_setup(mdata,
- mdata->vig_pipes + DEFAULT_TOTAL_VIG_PIPES,
- offsets + DEFAULT_TOTAL_VIG_PIPES,
- ftch_id + DEFAULT_TOTAL_VIG_PIPES,
- xin_id + DEFAULT_TOTAL_VIG_PIPES,
- MDSS_MDP_PIPE_TYPE_VIG, setup_cnt,
- mdata->nvig_pipes - DEFAULT_TOTAL_VIG_PIPES,
- DEFAULT_TOTAL_VIG_PIPES);
- if (rc)
- goto parse_fail;
-
- setup_cnt += mdata->nvig_pipes - DEFAULT_TOTAL_VIG_PIPES;
- }
-
- if (mdata->nrgb_pipes > DEFAULT_TOTAL_RGB_PIPES) {
- rc = mdss_mdp_pipe_addr_setup(mdata,
- mdata->rgb_pipes + DEFAULT_TOTAL_RGB_PIPES,
- offsets + mdata->nvig_pipes + DEFAULT_TOTAL_RGB_PIPES,
- ftch_id + mdata->nvig_pipes + DEFAULT_TOTAL_RGB_PIPES,
- xin_id + mdata->nvig_pipes + DEFAULT_TOTAL_RGB_PIPES,
- MDSS_MDP_PIPE_TYPE_RGB, setup_cnt,
- mdata->nrgb_pipes - DEFAULT_TOTAL_RGB_PIPES,
- mdata->nvig_pipes + DEFAULT_TOTAL_RGB_PIPES);
- if (rc)
- goto parse_fail;
-
- setup_cnt += mdata->nrgb_pipes - DEFAULT_TOTAL_RGB_PIPES;
- }
-
- if (mdata->nvig_pipes) {
- rc = mdss_mdp_parse_dt_pipe_clk_ctrl(pdev,
- "qcom,mdss-pipe-vig-clk-ctrl-offsets",
- mdata->vig_pipes,
- mdata->nvig_pipes);
- if (rc)
- goto parse_fail;
+ return 0;
}
- if (mdata->nrgb_pipes) {
- rc = mdss_mdp_parse_dt_pipe_clk_ctrl(pdev,
- "qcom,mdss-pipe-rgb-clk-ctrl-offsets",
- mdata->rgb_pipes,
- mdata->nrgb_pipes);
- if (rc)
- goto parse_fail;
- }
+ pipe_list = devm_kzalloc(&pdev->dev,
+ (sizeof(struct mdss_mdp_pipe) * cnt), GFP_KERNEL);
+ if (!pipe_list)
+ return -ENOMEM;
- if (mdata->ndma_pipes) {
- rc = mdss_mdp_parse_dt_pipe_clk_ctrl(pdev,
- "qcom,mdss-pipe-dma-clk-ctrl-offsets", mdata->dma_pipes,
- mdata->ndma_pipes);
+ if (mdata->has_pixel_ram || (ptype == MDSS_MDP_PIPE_TYPE_CURSOR)) {
+ for (i = 0; i < cnt; i++)
+ ftch_id[i] = -1;
+ } else {
+ snprintf(prop_name, sizeof(prop_name),
+ "qcom,mdss-pipe-%s-fetch-id", ptypestr);
+ rc = mdss_mdp_parse_dt_handler(pdev, prop_name, ftch_id,
+ cnt);
if (rc)
goto parse_fail;
}
- if (mdata->ncursor_pipes) {
- mdata->cursor_pipes = devm_kzalloc(&mdata->pdev->dev,
- sizeof(struct mdss_mdp_pipe) * mdata->ncursor_pipes,
- GFP_KERNEL);
+ snprintf(prop_name, sizeof(prop_name),
+ "qcom,mdss-pipe-%s-xin-id", ptypestr);
+ rc = mdss_mdp_parse_dt_handler(pdev, prop_name, xin_id, cnt);
+ if (rc)
+ goto parse_fail;
- if (!mdata->cursor_pipes) {
- pr_err("no mem for cursor_pipes: kzalloc fail\n");
- rc = -ENOMEM;
- goto cursor_alloc_fail;
- }
- rc = mdss_mdp_parse_dt_handler(pdev,
- "qcom,mdss-pipe-cursor-off", offsets,
- mdata->ncursor_pipes);
- if (rc)
- goto parse_fail;
+ snprintf(prop_name, sizeof(prop_name),
+ "qcom,mdss-pipe-%s-off", ptypestr);
+ rc = mdss_mdp_parse_dt_handler(pdev, prop_name, offsets, cnt);
+ if (rc)
+ goto parse_fail;
- rc = mdss_mdp_parse_dt_handler(pdev,
- "qcom,mdss-pipe-cursor-xin-id", xin_id,
- mdata->ncursor_pipes);
- if (rc)
- goto parse_fail;
+ rc = mdss_mdp_pipe_addr_setup(mdata, pipe_list, offsets, ftch_id,
+ xin_id, ptype, pnums, cnt, priority_base);
+ if (rc)
+ goto parse_fail;
- rc = mdss_mdp_parse_dt_pipe_clk_ctrl(pdev,
- "qcom,mdss-pipe-cursor-clk-ctrl-offsets",
- mdata->cursor_pipes, mdata->ncursor_pipes);
- if (rc)
- goto parse_fail;
+ snprintf(prop_name, sizeof(prop_name),
+ "qcom,mdss-pipe-%s-clk-ctrl-offsets", ptypestr);
+ rc = mdss_mdp_parse_dt_pipe_clk_ctrl(pdev, prop_name,
+ pipe_list, cnt);
+ if (rc)
+ goto parse_fail;
- /* set the fetch id to an invalid value */
- for (i = 0; i < mdata->ncursor_pipes; i++)
- ftch_id[i] = -1;
- rc = mdss_mdp_pipe_addr_setup(mdata, mdata->cursor_pipes,
- offsets, ftch_id, xin_id, MDSS_MDP_PIPE_TYPE_CURSOR,
- MDSS_MDP_SSPP_CURSOR0, mdata->ncursor_pipes, 0);
- if (rc)
- goto parse_fail;
- pr_info("dedicated vp cursors detected, num=%d\n",
- mdata->ncursor_pipes);
- }
- goto parse_done;
+ *out_plist = pipe_list;
+ return cnt;
parse_fail:
- kfree(mdata->cursor_pipes);
-cursor_alloc_fail:
- kfree(mdata->cursor_pipes);
-dma_alloc_fail:
- kfree(mdata->rgb_pipes);
-rgb_alloc_fail:
- kfree(mdata->vig_pipes);
-vig_alloc_fail:
- kfree(xin_id);
-xin_alloc_fail:
- kfree(ftch_id);
-ftch_alloc_fail:
- kfree(offsets);
-parse_done:
+ devm_kfree(&pdev->dev, pipe_list);
+
return rc;
}
@@ -2826,9 +2766,34 @@ static int mdss_mdp_parse_dt_pipe(struct platform_device *pdev)
return -EINVAL;
}
- rc = mdss_mdp_parse_dt_pipe_helper(pdev, npipes, nfids);
- if (rc)
+ rc = mdss_mdp_parse_dt_pipe_helper(pdev, MDSS_MDP_PIPE_TYPE_VIG, "vig",
+ &mdata->vig_pipes, mdata->nvig_pipes, 0);
+ if (IS_ERR_VALUE(rc))
+ goto parse_fail;
+ mdata->nvig_pipes = rc;
+
+ rc = mdss_mdp_parse_dt_pipe_helper(pdev, MDSS_MDP_PIPE_TYPE_RGB, "rgb",
+ &mdata->rgb_pipes, mdata->nrgb_pipes,
+ mdata->nvig_pipes);
+ if (IS_ERR_VALUE(rc))
+ goto parse_fail;
+ mdata->nrgb_pipes = rc;
+
+ rc = mdss_mdp_parse_dt_pipe_helper(pdev, MDSS_MDP_PIPE_TYPE_DMA, "dma",
+ &mdata->dma_pipes, mdata->ndma_pipes,
+ mdata->nvig_pipes + mdata->nrgb_pipes);
+ if (IS_ERR_VALUE(rc))
goto parse_fail;
+ mdata->ndma_pipes = rc;
+
+ rc = mdss_mdp_parse_dt_pipe_helper(pdev, MDSS_MDP_PIPE_TYPE_CURSOR,
+ "cursor", &mdata->cursor_pipes, mdata->ncursor_pipes,
+ 0);
+ if (IS_ERR_VALUE(rc))
+ goto parse_fail;
+ mdata->ncursor_pipes = rc;
+
+ rc = 0;
mdss_mdp_parse_dt_handler(pdev, "qcom,mdss-pipe-sw-reset-off",
&sw_reset_offset, 1);
@@ -3820,9 +3785,11 @@ static int mdss_mdp_parse_dt_ppb_off(struct platform_device *pdev)
return -EINVAL;
}
+#ifdef CONFIG_MSM_BUS_SCALING
static int mdss_mdp_parse_dt_bus_scale(struct platform_device *pdev)
{
int rc, paths;
+ struct device_node *node;
struct mdss_data_type *mdata = platform_get_drvdata(pdev);
rc = of_property_read_u32(pdev->dev.of_node,
@@ -3851,10 +3818,39 @@ static int mdss_mdp_parse_dt_bus_scale(struct platform_device *pdev)
rc = -EINVAL;
pr_err("msm_bus_cl_get_pdata failed. rc=%d\n", rc);
mdata->bus_scale_table = NULL;
+ return rc;
+ }
+
+ /*
+ * if mdss-reg-bus is not found then default table is picked
+ * hence below code wont return error.
+ */
+ node = of_get_child_by_name(pdev->dev.of_node, "qcom,mdss-reg-bus");
+ if (node) {
+ mdata->reg_bus_scale_table =
+ msm_bus_pdata_from_node(pdev, node);
+ if (IS_ERR_OR_NULL(mdata->reg_bus_scale_table)) {
+ rc = PTR_ERR(mdata->reg_bus_scale_table);
+ if (!rc)
+ pr_err("bus_pdata reg_bus failed rc=%d\n", rc);
+ rc = 0;
+ mdata->reg_bus_scale_table = NULL;
+ }
+ } else {
+ rc = 0;
+ mdata->reg_bus_scale_table = NULL;
+ pr_debug("mdss-reg-bus not found\n");
}
return rc;
}
+#else
+static int mdss_mdp_parse_dt_bus_scale(struct platform_device *pdev)
+{
+ return 0;
+}
+
+#endif
static int mdss_mdp_parse_dt_handler(struct platform_device *pdev,
char *prop_name, u32 *offsets, int len)
@@ -3878,7 +3874,7 @@ static int mdss_mdp_parse_dt_prop_len(struct platform_device *pdev,
of_find_property(pdev->dev.of_node, prop_name, &len);
if (len < 1) {
- pr_info("prop %s : doesn't exist in device tree\n",
+ pr_debug("prop %s : doesn't exist in device tree\n",
prop_name);
return 0;
}
@@ -4046,9 +4042,9 @@ static void apply_dynamic_ot_limit(u32 *ot_lim,
res = params->width * params->height;
- pr_debug("w:%d h:%d rot:%d yuv:%d wb:%d res:%d\n",
+ pr_debug("w:%d h:%d rot:%d yuv:%d wb:%d res:%d fps:%d\n",
params->width, params->height, params->is_rot,
- params->is_yuv, params->is_wb, res);
+ params->is_yuv, params->is_wb, res, params->frame_rate);
switch (mdata->mdp_rev) {
case MDSS_MDP_HW_REV_114:
@@ -4350,7 +4346,7 @@ int mdss_mdp_secure_display_ctrl(unsigned int enable)
&request, sizeof(request), &resp, sizeof(resp));
} else {
ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
- MEM_PROTECT_SD_CTRL_FLAT), &desc);
+ mem_protect_sd_ctrl_id), &desc);
resp = desc.ret[0];
}
diff --git a/drivers/video/fbdev/msm/mdss_mdp.h b/drivers/video/fbdev/msm/mdss_mdp.h
index 9505b18c5c45..6b36e2a9bc58 100644
--- a/drivers/video/fbdev/msm/mdss_mdp.h
+++ b/drivers/video/fbdev/msm/mdss_mdp.h
@@ -84,6 +84,8 @@
#define XIN_HALT_TIMEOUT_US 0x4000
+#define MAX_LAYER_COUNT 0xC
+
/* hw cursor can only be setup in highest mixer stage */
#define HW_CURSOR_STAGE(mdata) \
(((mdata)->max_target_zorder + MDSS_MDP_STAGE_0) - 1)
@@ -113,13 +115,73 @@ enum mdss_mdp_mixer_mux {
};
enum mdss_mdp_pipe_type {
- MDSS_MDP_PIPE_TYPE_UNUSED,
+ MDSS_MDP_PIPE_TYPE_INVALID,
MDSS_MDP_PIPE_TYPE_VIG,
MDSS_MDP_PIPE_TYPE_RGB,
MDSS_MDP_PIPE_TYPE_DMA,
MDSS_MDP_PIPE_TYPE_CURSOR,
};
+static inline enum mdss_mdp_sspp_index get_pipe_num_from_ndx(u32 ndx)
+{
+ u32 id;
+
+ if (unlikely(!ndx))
+ return MDSS_MDP_MAX_SSPP;
+
+ id = fls(ndx) - 1;
+
+ if (unlikely(ndx ^ BIT(id)))
+ return MDSS_MDP_MAX_SSPP;
+
+ return id;
+}
+
+static inline enum mdss_mdp_pipe_type
+get_pipe_type_from_num(enum mdss_mdp_sspp_index pnum)
+{
+ enum mdss_mdp_pipe_type ptype;
+
+ switch (pnum) {
+ case MDSS_MDP_SSPP_VIG0:
+ case MDSS_MDP_SSPP_VIG1:
+ case MDSS_MDP_SSPP_VIG2:
+ case MDSS_MDP_SSPP_VIG3:
+ ptype = MDSS_MDP_PIPE_TYPE_VIG;
+ break;
+ case MDSS_MDP_SSPP_RGB0:
+ case MDSS_MDP_SSPP_RGB1:
+ case MDSS_MDP_SSPP_RGB2:
+ case MDSS_MDP_SSPP_RGB3:
+ ptype = MDSS_MDP_PIPE_TYPE_RGB;
+ break;
+ case MDSS_MDP_SSPP_DMA0:
+ case MDSS_MDP_SSPP_DMA1:
+ case MDSS_MDP_SSPP_DMA2:
+ case MDSS_MDP_SSPP_DMA3:
+ ptype = MDSS_MDP_PIPE_TYPE_DMA;
+ break;
+ case MDSS_MDP_SSPP_CURSOR0:
+ case MDSS_MDP_SSPP_CURSOR1:
+ ptype = MDSS_MDP_PIPE_TYPE_CURSOR;
+ break;
+ default:
+ ptype = MDSS_MDP_PIPE_TYPE_INVALID;
+ break;
+ }
+
+ return ptype;
+}
+
+static inline enum mdss_mdp_pipe_type get_pipe_type_from_ndx(u32 ndx)
+{
+ enum mdss_mdp_sspp_index pnum;
+
+ pnum = get_pipe_num_from_ndx(ndx);
+
+ return get_pipe_type_from_num(pnum);
+}
+
enum mdss_mdp_block_type {
MDSS_MDP_BLOCK_UNUSED,
MDSS_MDP_BLOCK_SSPP,
@@ -190,6 +252,12 @@ struct mdss_mdp_vsync_handler {
struct list_head list;
};
+struct mdss_mdp_lineptr_handler {
+ bool enabled;
+ mdp_vsync_handler_t lineptr_handler;
+ struct list_head list;
+};
+
enum mdss_mdp_wb_ctl_type {
MDSS_MDP_WB_CTL_TYPE_BLOCK = 1,
MDSS_MDP_WB_CTL_TYPE_LINE
@@ -342,6 +410,8 @@ struct mdss_mdp_ctl {
struct work_struct recover_work;
struct work_struct remove_underrun_handler;
+ struct mdss_mdp_lineptr_handler lineptr_handler;
+
/*
* This ROI is aligned to as per following guidelines and
* sent to the panel driver.
@@ -550,6 +620,7 @@ struct pp_sts_type {
u32 gamut_sts;
u32 pgc_sts;
u32 sharp_sts;
+ u32 hist_sts;
u32 side_sts;
};
@@ -659,7 +730,9 @@ struct mdss_mdp_wfd;
struct mdss_overlay_private {
ktime_t vsync_time;
+ ktime_t lineptr_time;
struct kernfs_node *vsync_event_sd;
+ struct kernfs_node *lineptr_event_sd;
struct kernfs_node *hist_event_sd;
struct kernfs_node *bl_event_sd;
struct kernfs_node *ad_event_sd;
@@ -1186,6 +1259,60 @@ static inline int mdss_mdp_get_display_id(struct mdss_mdp_pipe *pipe)
return (pipe && pipe->mfd) ? pipe->mfd->index : -1;
}
+static inline bool mdss_mdp_is_full_frame_update(struct mdss_mdp_ctl *ctl)
+{
+ struct mdss_mdp_mixer *mixer;
+ struct mdss_rect *roi;
+
+ if (mdss_mdp_get_pu_type(ctl) != MDSS_MDP_DEFAULT_UPDATE)
+ return false;
+
+ if (ctl->mixer_left->valid_roi) {
+ mixer = ctl->mixer_left;
+ roi = &mixer->roi;
+ if ((roi->x != 0) || (roi->y != 0) || (roi->w != mixer->width)
+ || (roi->h != mixer->height))
+ return false;
+ }
+
+ if (ctl->mixer_right && ctl->mixer_right->valid_roi) {
+ mixer = ctl->mixer_right;
+ roi = &mixer->roi;
+ if ((roi->x != 0) || (roi->y != 0) || (roi->w != mixer->width)
+ || (roi->h != mixer->height))
+ return false;
+ }
+
+ return true;
+}
+
+static inline bool mdss_mdp_is_lineptr_supported(struct mdss_mdp_ctl *ctl)
+{
+ struct mdss_panel_info *pinfo;
+
+ if (!ctl || !ctl->mixer_left || !ctl->is_master)
+ return false;
+
+ pinfo = &ctl->panel_data->panel_info;
+
+ return (((pinfo->type == MIPI_CMD_PANEL)
+ && (pinfo->te.tear_check_en)) ? true : false);
+}
+
+static inline bool mdss_mdp_is_map_needed(struct mdss_data_type *mdata,
+ struct mdss_mdp_img_data *data)
+{
+ u32 is_secure_ui = data->flags & MDP_SECURE_DISPLAY_OVERLAY_SESSION;
+
+ /*
+ * For ULT Targets we need SMMU Map, to issue map call for secure Display.
+ */
+ if (is_secure_ui && !mdss_has_quirk(mdata, MDSS_QUIRK_NEED_SECURE_MAP))
+ return false;
+
+ return true;
+}
+
irqreturn_t mdss_mdp_isr(int irq, void *ptr);
void mdss_mdp_irq_clear(struct mdss_data_type *mdata,
u32 intr_type, u32 intf_num);
@@ -1412,8 +1539,8 @@ void mdss_mdp_smp_unreserve(struct mdss_mdp_pipe *pipe);
void mdss_mdp_smp_release(struct mdss_mdp_pipe *pipe);
int mdss_mdp_pipe_addr_setup(struct mdss_data_type *mdata,
- struct mdss_mdp_pipe *head, u32 *offsets, u32 *ftch_y_id, u32 *xin_id,
- u32 type, u32 num_base, u32 len, u8 priority_base);
+ struct mdss_mdp_pipe *head, u32 *offsets, u32 *ftch_id, u32 *xin_id,
+ u32 type, const int *pnums, u32 len, u8 priority_base);
int mdss_mdp_mixer_addr_setup(struct mdss_data_type *mdata, u32 *mixer_offsets,
u32 *dspp_offsets, u32 *pingpong_offsets, u32 type, u32 len);
int mdss_mdp_ctl_addr_setup(struct mdss_data_type *mdata, u32 *ctl_offsets,
@@ -1472,9 +1599,8 @@ int mdss_mdp_wb_kickoff(struct msm_fb_data_type *mfd,
int mdss_mdp_wb_ioctl_handler(struct msm_fb_data_type *mfd, u32 cmd, void *arg);
int mdss_mdp_get_ctl_mixers(u32 fb_num, u32 *mixer_id);
-u32 mdss_mdp_get_mixer_mask(u32 pipe_num, u32 stage);
-u32 mdss_mdp_get_mixer_extn_mask(u32 pipe_num, u32 stage);
-u32 mdss_mdp_get_mixercfg(struct mdss_mdp_mixer *mixer, bool extn);
+bool mdss_mdp_mixer_reg_has_pipe(struct mdss_mdp_mixer *mixer,
+ struct mdss_mdp_pipe *pipe);
u32 mdss_mdp_fb_stride(u32 fb_index, u32 xres, int bpp);
void mdss_check_dsi_ctrl_status(struct work_struct *work, uint32_t interval);
diff --git a/drivers/video/fbdev/msm/mdss_mdp_ctl.c b/drivers/video/fbdev/msm/mdss_mdp_ctl.c
index 294d2248ec83..4fd5b3a1f7f7 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_ctl.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_ctl.c
@@ -27,7 +27,39 @@
#include "mdss_mdp_trace.h"
#include "mdss_debug.h"
+#define NUM_MIXERCFG_REGS 3
#define MDSS_MDP_WB_OUTPUT_BPP 3
+struct mdss_mdp_mixer_cfg {
+ u32 config_masks[NUM_MIXERCFG_REGS];
+ bool border_enabled;
+ bool cursor_enabled;
+};
+
+static struct {
+ u32 flush_bit;
+ struct mdss_mdp_hwio_cfg base;
+ struct mdss_mdp_hwio_cfg ext;
+ struct mdss_mdp_hwio_cfg ext2;
+} mdp_pipe_hwio[MDSS_MDP_MAX_SSPP] = {
+ [MDSS_MDP_SSPP_VIG0] = { 0, { 0, 3, 0 }, { 0, 1, 3 } },
+ [MDSS_MDP_SSPP_VIG1] = { 1, { 3, 3, 0 }, { 2, 1, 3 } },
+ [MDSS_MDP_SSPP_VIG2] = { 2, { 6, 3, 0 }, { 4, 1, 3 } },
+ [MDSS_MDP_SSPP_VIG3] = { 18, { 26, 3, 0 }, { 4, 1, 3 } },
+ [MDSS_MDP_SSPP_RGB0] = { 3, { 9, 3, 0 }, { 8, 1, 3 } },
+ [MDSS_MDP_SSPP_RGB1] = { 4, { 12, 3, 0 }, { 10, 1, 3 } },
+ [MDSS_MDP_SSPP_RGB2] = { 5, { 15, 3, 0 }, { 12, 1, 3 } },
+ [MDSS_MDP_SSPP_RGB3] = { 19, { 29, 3, 0 }, { 14, 1, 3 } },
+ [MDSS_MDP_SSPP_DMA0] = { 11, { 18, 3, 0 }, { 16, 1, 3 } },
+ [MDSS_MDP_SSPP_DMA1] = { 12, { 21, 3, 0 }, { 18, 1, 3 } },
+ [MDSS_MDP_SSPP_DMA2] = { 24, .ext2 = { 0, 4, 0 } },
+ [MDSS_MDP_SSPP_DMA3] = { 25, .ext2 = { 4, 4, 0 } },
+ [MDSS_MDP_SSPP_CURSOR0] = { 22, .ext = { 20, 4, 0 } },
+ [MDSS_MDP_SSPP_CURSOR1] = { 23, .ext = { 26, 4, 0 } },
+};
+
+static void __mdss_mdp_mixer_write_cfg(struct mdss_mdp_mixer *mixer,
+ struct mdss_mdp_mixer_cfg *cfg);
+static void __mdss_mdp_reset_mixercfg(struct mdss_mdp_ctl *ctl);
static inline u64 fudge_factor(u64 val, u32 numer, u32 denom)
{
@@ -52,82 +84,6 @@ static DEFINE_MUTEX(mdss_mdp_ctl_lock);
static u32 mdss_mdp_get_vbp_factor_max(struct mdss_mdp_ctl *ctl);
-static inline u32 __mdss_mdp_get_wb_mixer(struct mdss_mdp_mixer *mixer)
-{
- /* Return the dedicated WB mixer. */
- if (test_bit(MDSS_CAPS_MIXER_1_FOR_WB,
- mixer->ctl->mdata->mdss_caps_map))
- return MDSS_MDP_INTF_LAYERMIXER1;
- else
- return MDSS_MDP_INTF_LAYERMIXER3;
-}
-
-static void __mdss_mdp_reset_mixercfg(struct mdss_mdp_ctl *ctl)
-{
- u32 off;
- int i, nmixers;
- struct mdss_data_type *mdata = mdss_mdp_get_mdata();
-
- if (!ctl || !mdata)
- return;
-
- nmixers = mdata->nmixers_intf + mdata->nmixers_wb;
-
- for (i = 0; i < nmixers; i++) {
- off = MDSS_MDP_REG_CTL_LAYER(i);
- mdss_mdp_ctl_write(ctl, off, 0);
-
- off += MDSS_MDP_REG_CTL_LAYER_EXTN(i);
- mdss_mdp_ctl_write(ctl, off, 0);
- }
-}
-
-static inline int __mdss_mdp_ctl_get_mixer_off(struct mdss_mdp_mixer *mixer)
-{
- u32 wb_mixer_num = 0;
-
- if (mixer->type == MDSS_MDP_MIXER_TYPE_INTF) {
- if (mixer->num == MDSS_MDP_INTF_LAYERMIXER3)
- return MDSS_MDP_CTL_X_LAYER_5;
- else
- return MDSS_MDP_REG_CTL_LAYER(mixer->num);
- } else {
- wb_mixer_num = __mdss_mdp_get_wb_mixer(mixer);
- return MDSS_MDP_REG_CTL_LAYER(mixer->num + wb_mixer_num);
- }
-}
-
-static inline int __mdss_mdp_ctl_get_mixer_extn_off(
- struct mdss_mdp_mixer *mixer)
-{
- u32 wb_mixer_num = 0;
-
- if (mixer->type == MDSS_MDP_MIXER_TYPE_INTF) {
- if (mixer->num == MDSS_MDP_INTF_LAYERMIXER3)
- return MDSS_MDP_REG_CTL_LAYER_EXTN(5);
- else
- return MDSS_MDP_REG_CTL_LAYER_EXTN(mixer->num);
- } else {
- wb_mixer_num = __mdss_mdp_get_wb_mixer(mixer);
- return MDSS_MDP_REG_CTL_LAYER_EXTN(wb_mixer_num);
- }
-}
-
-u32 mdss_mdp_get_mixercfg(struct mdss_mdp_mixer *mixer, bool extn)
-{
- u32 mixer_off;
-
- if (!mixer || !mixer->ctl)
- return 0;
-
- if (extn)
- mixer_off = __mdss_mdp_ctl_get_mixer_extn_off(mixer);
- else
- mixer_off = __mdss_mdp_ctl_get_mixer_off(mixer);
-
- return mdss_mdp_ctl_read(mixer->ctl, mixer_off);
-}
-
static inline u32 mdss_mdp_get_pclk_rate(struct mdss_mdp_ctl *ctl)
{
struct mdss_panel_info *pinfo = &ctl->panel_data->panel_info;
@@ -334,8 +290,8 @@ static u32 mdss_mdp_perf_calc_pipe_prefill_video(struct mdss_mdp_prefill_params
{
struct mdss_data_type *mdata = mdss_mdp_get_mdata();
struct mdss_prefill_data *prefill = &mdata->prefill_data;
- u32 prefill_bytes;
- u32 latency_buf_bytes;
+ u32 prefill_bytes = 0;
+ u32 latency_buf_bytes = 0;
u32 y_buf_bytes = 0;
u32 y_scaler_bytes = 0;
u32 pp_bytes = 0, pp_lines = 0;
@@ -958,7 +914,7 @@ static void mdss_mdp_perf_calc_mixer(struct mdss_mdp_mixer *mixer,
u32 prefill_val = 0;
struct mdss_data_type *mdata = mdss_mdp_get_mdata();
bool apply_fudge = true;
- struct mdss_mdp_format_params *fmt;
+ struct mdss_mdp_format_params *fmt = NULL;
BUG_ON(num_pipes > MAX_PIPES_PER_LM);
@@ -1537,7 +1493,7 @@ int mdss_mdp_perf_bw_check_pipe(struct mdss_mdp_perf_params *perf,
{
struct mdss_data_type *mdata = pipe->mixer_left->ctl->mdata;
struct mdss_mdp_ctl *ctl = pipe->mixer_left->ctl;
- u32 vbp_fac, threshold;
+ u32 vbp_fac = 0, threshold = 0;
u64 prefill_bw, pipe_bw, max_pipe_bw;
/* we only need bandwidth check on real-time clients (interfaces) */
@@ -4027,9 +3983,11 @@ void mdss_mdp_set_roi(struct mdss_mdp_ctl *ctl,
(!l_roi->w && !l_roi->h && !r_roi->w && !r_roi->h) ||
!ctl->panel_data->panel_info.partial_update_enabled) {
- *l_roi = (struct mdss_rect) {0, 0,
- ctl->mixer_left->width,
- ctl->mixer_left->height};
+ if (ctl->mixer_left) {
+ *l_roi = (struct mdss_rect) {0, 0,
+ ctl->mixer_left->width,
+ ctl->mixer_left->height};
+ }
if (ctl->mixer_right) {
*r_roi = (struct mdss_rect) {0, 0,
@@ -4040,14 +3998,16 @@ void mdss_mdp_set_roi(struct mdss_mdp_ctl *ctl,
previous_frame_pu_type = mdss_mdp_get_pu_type(ctl);
mdss_mdp_set_mixer_roi(ctl->mixer_left, l_roi);
- ctl->roi = ctl->mixer_left->roi;
+ if (ctl->mixer_left)
+ ctl->roi = ctl->mixer_left->roi;
if (ctl->mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY) {
struct mdss_mdp_ctl *sctl = mdss_mdp_get_split_ctl(ctl);
if (sctl) {
mdss_mdp_set_mixer_roi(sctl->mixer_left, r_roi);
- sctl->roi = sctl->mixer_left->roi;
+ if (sctl->mixer_left)
+ sctl->roi = sctl->mixer_left->roi;
}
} else if (is_dual_lm_single_display(ctl->mfd) && ctl->mixer_right) {
@@ -4057,7 +4017,7 @@ void mdss_mdp_set_roi(struct mdss_mdp_ctl *ctl,
ctl->roi.w += ctl->mixer_right->roi.w;
/* right_only, update roi.x as per CTL ROI guidelines */
- if (!ctl->mixer_left->valid_roi) {
+ if (ctl->mixer_left && !ctl->mixer_left->valid_roi) {
ctl->roi = ctl->mixer_right->roi;
ctl->roi.x = left_lm_w_from_mfd(ctl->mfd) +
ctl->mixer_right->roi.x;
@@ -4079,56 +4039,157 @@ void mdss_mdp_set_roi(struct mdss_mdp_ctl *ctl,
}
}
-u32 mdss_mdp_get_mixer_mask(u32 pipe_num, u32 stage)
+static void __mdss_mdp_mixer_update_cfg_masks(u32 pnum, u32 stage,
+ struct mdss_mdp_mixer_cfg *cfg)
{
- u32 mask = 0;
+ u32 masks[NUM_MIXERCFG_REGS] = { 0 };
+ int i;
- if ((pipe_num == MDSS_MDP_SSPP_VIG3 ||
- pipe_num == MDSS_MDP_SSPP_RGB3)) {
- /* Add 2 to account for Cursor & Border bits */
- mask = stage << ((3 * pipe_num) + 2);
- } else {
- mask = stage << (3 * pipe_num);
- }
- return mask;
+ if (pnum >= MDSS_MDP_MAX_SSPP)
+ return;
+
+ masks[0] = mdss_mdp_hwio_mask(&mdp_pipe_hwio[pnum].base, stage);
+ masks[1] = mdss_mdp_hwio_mask(&mdp_pipe_hwio[pnum].ext, stage);
+ masks[2] = mdss_mdp_hwio_mask(&mdp_pipe_hwio[pnum].ext2, stage);
+
+ for (i = 0; i < NUM_MIXERCFG_REGS; i++)
+ cfg->config_masks[i] |= masks[i];
+
+ pr_debug("pnum=%d stage=%d cfg=0x%08x ext=0x%08x\n",
+ pnum, stage, masks[0], masks[1]);
}
-u32 mdss_mdp_get_mixer_extn_mask(u32 pipe_num, u32 stage)
+static void __mdss_mdp_mixer_get_offsets(u32 mixer_num,
+ u32 *offsets, size_t count)
{
- u32 mask = 0;
+ BUG_ON(count < NUM_MIXERCFG_REGS);
+
+ offsets[0] = MDSS_MDP_REG_CTL_LAYER(mixer_num);
+ offsets[1] = MDSS_MDP_REG_CTL_LAYER_EXTN(mixer_num);
+ offsets[2] = MDSS_MDP_REG_CTL_LAYER_EXTN2(mixer_num);
+}
+static inline int __mdss_mdp_mixer_get_hw_num(struct mdss_mdp_mixer *mixer)
+{
/*
- * The ctl layer extension bits are ordered
- * VIG0-3, RGB0-3, DMA0-1
+ * mapping to hardware expectation of actual mixer programming to
+ * happen on following registers:
+ * INTF: 0, 1, 2, 5
+ * WB: 3, 4
+ * With some exceptions on certain revisions
*/
- if (pipe_num < MDSS_MDP_SSPP_RGB0) {
- mask = BIT(pipe_num << 1);
- } else if (pipe_num >= MDSS_MDP_SSPP_RGB0 &&
- pipe_num < MDSS_MDP_SSPP_DMA0) {
- mask = BIT((pipe_num + 1) << 1);
- } else if (pipe_num >= MDSS_MDP_SSPP_DMA0 &&
- pipe_num < MDSS_MDP_SSPP_VIG3) {
- mask = BIT((pipe_num + 2) << 1);
- } else if (pipe_num >= MDSS_MDP_SSPP_CURSOR0 &&
- pipe_num <= MDSS_MDP_SSPP_CURSOR1) {
- mask = stage << (20 + (6 * (pipe_num - MDSS_MDP_SSPP_CURSOR0)));
- } else if (pipe_num == MDSS_MDP_SSPP_VIG3) {
- mask = BIT(6);
- } else if (pipe_num == MDSS_MDP_SSPP_RGB3) {
- mask = BIT(14);
- }
-
- return mask;
+ if (mixer->type == MDSS_MDP_MIXER_TYPE_WRITEBACK) {
+ u32 wb_offset;
+
+ if (test_bit(MDSS_CAPS_MIXER_1_FOR_WB,
+ mixer->ctl->mdata->mdss_caps_map))
+ wb_offset = MDSS_MDP_INTF_LAYERMIXER1;
+ else
+ wb_offset = MDSS_MDP_INTF_LAYERMIXER3;
+
+ return mixer->num + wb_offset;
+ } else if (mixer->num == MDSS_MDP_INTF_LAYERMIXER3) {
+ return 5;
+ } else {
+ return mixer->num;
+ }
+}
+
+static inline void __mdss_mdp_mixer_write_layer(struct mdss_mdp_ctl *ctl,
+ u32 mixer_num, u32 *values, size_t count)
+{
+ u32 off[NUM_MIXERCFG_REGS];
+ int i;
+
+ BUG_ON(!values || count < NUM_MIXERCFG_REGS);
+
+ __mdss_mdp_mixer_get_offsets(mixer_num, off, ARRAY_SIZE(off));
+
+ for (i = 0; i < count; i++)
+ mdss_mdp_ctl_write(ctl, off[i], values[i]);
+}
+
+static void __mdss_mdp_mixer_write_cfg(struct mdss_mdp_mixer *mixer,
+ struct mdss_mdp_mixer_cfg *cfg)
+{
+ u32 vals[NUM_MIXERCFG_REGS] = {0};
+ int i, mixer_num;
+
+ if (!mixer)
+ return;
+
+ mixer_num = __mdss_mdp_mixer_get_hw_num(mixer);
+
+ if (cfg) {
+ for (i = 0; i < NUM_MIXERCFG_REGS; i++)
+ vals[i] = cfg->config_masks[i];
+
+ if (cfg->border_enabled)
+ vals[0] |= MDSS_MDP_LM_BORDER_COLOR;
+ if (cfg->cursor_enabled)
+ vals[0] |= MDSS_MDP_LM_CURSOR_OUT;
+ }
+
+ __mdss_mdp_mixer_write_layer(mixer->ctl, mixer_num,
+ vals, ARRAY_SIZE(vals));
+
+ pr_debug("mixer=%d cfg=0%08x cfg_extn=0x%08x\n",
+ mixer->num, vals[0], vals[1]);
+ MDSS_XLOG(mixer->num, vals[0], vals[1]);
+}
+
+static void __mdss_mdp_reset_mixercfg(struct mdss_mdp_ctl *ctl)
+{
+ u32 vals[NUM_MIXERCFG_REGS] = {0};
+ int i, nmixers;
+
+ if (!ctl)
+ return;
+
+ nmixers = MDSS_MDP_INTF_MAX_LAYERMIXER + MDSS_MDP_WB_MAX_LAYERMIXER;
+
+ for (i = 0; i < nmixers; i++)
+ __mdss_mdp_mixer_write_layer(ctl, i, vals, ARRAY_SIZE(vals));
+}
+
+bool mdss_mdp_mixer_reg_has_pipe(struct mdss_mdp_mixer *mixer,
+ struct mdss_mdp_pipe *pipe)
+{
+ u32 offs[NUM_MIXERCFG_REGS];
+ u32 cfgs[NUM_MIXERCFG_REGS];
+ struct mdss_mdp_mixer_cfg mixercfg;
+ int i, mixer_num;
+
+ if (!mixer)
+ return false;
+
+ memset(&mixercfg, 0, sizeof(mixercfg));
+
+ mixer_num = __mdss_mdp_mixer_get_hw_num(mixer);
+ __mdss_mdp_mixer_get_offsets(mixer_num, offs, NUM_MIXERCFG_REGS);
+
+ for (i = 0; i < NUM_MIXERCFG_REGS; i++)
+ cfgs[i] = mdss_mdp_ctl_read(mixer->ctl, offs[i]);
+
+ __mdss_mdp_mixer_update_cfg_masks(pipe->num, -1, &mixercfg);
+ for (i = 0; i < NUM_MIXERCFG_REGS; i++) {
+ if (cfgs[i] & mixercfg.config_masks[i]) {
+ MDSS_XLOG(mixer->num, cfgs[0], cfgs[1]);
+ return true;
+ }
+ }
+
+ return false;
}
static void mdss_mdp_mixer_setup(struct mdss_mdp_ctl *master_ctl,
int mixer_mux, bool lm_swap)
{
- int i;
+ int i, mixer_num;
int stage, screen_state, outsize;
u32 off, blend_op, blend_stage;
- u32 mixercfg = 0, mixer_op_mode = 0, bg_alpha_enable = 0,
- mixercfg_extn = 0;
+ u32 mixer_op_mode = 0, bg_alpha_enable = 0;
+ struct mdss_mdp_mixer_cfg mixercfg;
u32 fg_alpha = 0, bg_alpha = 0;
struct mdss_mdp_pipe *pipe;
struct mdss_mdp_ctl *ctl, *ctl_hw;
@@ -4149,10 +4210,12 @@ static void mdss_mdp_mixer_setup(struct mdss_mdp_ctl *master_ctl,
/* check if mixer setup for rotator is needed */
if (mixer_hw->rotator_mode) {
- __mdss_mdp_reset_mixercfg(ctl_hw);
+ __mdss_mdp_mixer_write_cfg(mixer_hw, NULL);
return;
}
+ memset(&mixercfg, 0, sizeof(mixercfg));
+
if (lm_swap) {
if (mixer_mux == MDSS_MDP_MIXER_MUX_RIGHT)
mixer = mdss_mdp_mixer_get(master_ctl,
@@ -4182,11 +4245,7 @@ static void mdss_mdp_mixer_setup(struct mdss_mdp_ctl *master_ctl,
* mode is MDP_DUAL_LM_SINGLE_DISPLAY but update is only on
* one side.
*/
- off = __mdss_mdp_ctl_get_mixer_off(mixer_hw);
- mdss_mdp_ctl_write(ctl_hw, off, 0);
- /* Program ctl layer extension bits */
- off = __mdss_mdp_ctl_get_mixer_extn_off(mixer_hw);
- mdss_mdp_ctl_write(ctl_hw, off, 0);
+ __mdss_mdp_mixer_write_cfg(mixer_hw, NULL);
MDSS_XLOG(mixer->num, mixer_hw->num, XLOG_FUNC_EXIT);
return;
@@ -4200,19 +4259,16 @@ static void mdss_mdp_mixer_setup(struct mdss_mdp_ctl *master_ctl,
mdp_mixer_write(mixer_hw, MDSS_MDP_REG_LM_OUT_SIZE, outsize);
if (screen_state == MDSS_SCREEN_FORCE_BLANK) {
- mixercfg = MDSS_MDP_LM_BORDER_COLOR;
+ mixercfg.border_enabled = true;
goto update_mixer;
}
pipe = mixer->stage_pipe[MDSS_MDP_STAGE_BASE * MAX_PIPES_PER_STAGE];
if (pipe == NULL) {
- mixercfg = MDSS_MDP_LM_BORDER_COLOR;
+ mixercfg.border_enabled = true;
} else {
- if (pipe->type == MDSS_MDP_PIPE_TYPE_CURSOR)
- mixercfg_extn |= mdss_mdp_get_mixer_extn_mask(
- pipe->num, 1);
- else
- mixercfg |= mdss_mdp_get_mixer_mask(pipe->num, 1);
+ __mdss_mdp_mixer_update_cfg_masks(pipe->num,
+ MDSS_MDP_STAGE_BASE, &mixercfg);
if (pipe->src_fmt->alpha_enable)
bg_alpha_enable = 1;
@@ -4313,12 +4369,7 @@ static void mdss_mdp_mixer_setup(struct mdss_mdp_ctl *master_ctl,
if (!pipe->src_fmt->alpha_enable && bg_alpha_enable)
mixer_op_mode = 0;
- if ((stage < MDSS_MDP_STAGE_6) &&
- (pipe->type != MDSS_MDP_PIPE_TYPE_CURSOR))
- mixercfg |= mdss_mdp_get_mixer_mask(pipe->num, stage);
- else
- mixercfg_extn |= mdss_mdp_get_mixer_extn_mask(
- pipe->num, stage);
+ __mdss_mdp_mixer_update_cfg_masks(pipe->num, stage, &mixercfg);
trace_mdp_sspp_change(pipe);
@@ -4333,20 +4384,11 @@ static void mdss_mdp_mixer_setup(struct mdss_mdp_ctl *master_ctl,
}
if (mixer->cursor_enabled)
- mixercfg |= MDSS_MDP_LM_CURSOR_OUT;
+ mixercfg.cursor_enabled = true;
update_mixer:
- if (mixer_hw->num == MDSS_MDP_INTF_LAYERMIXER3) {
- ctl_hw->flush_bits |= BIT(20);
- } else if (mixer_hw->type == MDSS_MDP_MIXER_TYPE_WRITEBACK) {
- if (test_bit(MDSS_CAPS_MIXER_1_FOR_WB,
- mdata->mdss_caps_map))
- ctl_hw->flush_bits |= BIT(7) << mixer_hw->num;
- else
- ctl_hw->flush_bits |= BIT(9) << mixer_hw->num;
- } else {
- ctl_hw->flush_bits |= BIT(6) << mixer_hw->num;
- }
+ mixer_num = __mdss_mdp_mixer_get_hw_num(mixer_hw);
+ ctl_hw->flush_bits |= BIT(mixer_num < 5 ? 6 + mixer_num : 20);
/* Read GC enable/disable status on LM */
mixer_op_mode |=
@@ -4362,18 +4404,14 @@ update_mixer:
mdp_mixer_write(mixer_hw, MDSS_MDP_REG_LM_BORDER_COLOR_1,
mdata->bcolor2 & 0xFFF);
- off = __mdss_mdp_ctl_get_mixer_off(mixer_hw);
- mdss_mdp_ctl_write(ctl_hw, off, mixercfg);
- /* Program ctl layer extension bits */
- off = __mdss_mdp_ctl_get_mixer_extn_off(mixer_hw);
- mdss_mdp_ctl_write(ctl_hw, off, mixercfg_extn);
+ __mdss_mdp_mixer_write_cfg(mixer_hw, &mixercfg);
- pr_debug("mixer=%d hw=%d cfg=0%08x cfg_extn=0x%08x op_mode=0x%08x w=%d h=%d bc0=0x%x bc1=0x%x\n",
- mixer->num, mixer_hw->num, mixercfg, mixercfg_extn,
+ pr_debug("mixer=%d hw=%d op_mode=0x%08x w=%d h=%d bc0=0x%x bc1=0x%x\n",
+ mixer->num, mixer_hw->num,
mixer_op_mode, mixer->roi.w, mixer->roi.h,
(mdata->bcolor0 & 0xFFF) | ((mdata->bcolor1 & 0xFFF) << 16),
mdata->bcolor2 & 0xFFF);
- MDSS_XLOG(mixer->num, mixer_hw->num, mixercfg, mixercfg_extn,
+ MDSS_XLOG(mixer->num, mixer_hw->num,
mixer_op_mode, mixer->roi.h, mixer->roi.w);
}
@@ -4567,19 +4605,10 @@ struct mdss_mdp_pipe *mdss_mdp_get_staged_pipe(struct mdss_mdp_ctl *ctl,
int mdss_mdp_get_pipe_flush_bits(struct mdss_mdp_pipe *pipe)
{
- u32 flush_bits;
-
- if (pipe->type == MDSS_MDP_PIPE_TYPE_DMA)
- flush_bits |= BIT(pipe->num) << 5;
- else if (pipe->num == MDSS_MDP_SSPP_VIG3 ||
- pipe->num == MDSS_MDP_SSPP_RGB3)
- flush_bits |= BIT(pipe->num) << 10;
- else if (pipe->type == MDSS_MDP_PIPE_TYPE_CURSOR)
- flush_bits |= BIT(22 + pipe->num - MDSS_MDP_SSPP_CURSOR0);
- else /* RGB/VIG 0-2 pipes */
- flush_bits |= BIT(pipe->num);
+ if (WARN_ON(!pipe || pipe->num >= MDSS_MDP_MAX_SSPP))
+ return 0;
- return flush_bits;
+ return BIT(mdp_pipe_hwio[pipe->num].flush_bit);
}
int mdss_mdp_async_ctl_flush(struct msm_fb_data_type *mfd,
@@ -4630,17 +4659,12 @@ int mdss_mdp_mixer_pipe_update(struct mdss_mdp_pipe *pipe,
j = i * MAX_PIPES_PER_STAGE;
/*
- * 1. If pipe is on the right side of the blending
- * stage, on either left LM or right LM but it is not
- * crossing LM boundry then right_blend ndx is used.
- * 2. If pipe is on the right side of the blending
- * stage on left LM and it is crossing LM boundry
- * then for left LM it is placed into right_blend
- * index but for right LM it still placed into
- * left_blend index.
+ * this could lead to cases where left blend index is
+ * not populated. For instance, where pipe is spanning
+ * across layer mixers. But this is handled properly
+ * within mixer programming code.
*/
- if (pipe->is_right_blend && (!pipe->src_split_req ||
- (pipe->src_split_req && !mixer->is_right_mixer)))
+ if (pipe->is_right_blend)
j++;
/* First clear all blend containers for current stage */
@@ -4694,27 +4718,44 @@ void mdss_mdp_mixer_unstage_all(struct mdss_mdp_mixer *mixer)
int mdss_mdp_mixer_pipe_unstage(struct mdss_mdp_pipe *pipe,
struct mdss_mdp_mixer *mixer)
{
- int index;
- u8 right_blend_index;
+ int i, right_blend;
if (!pipe)
return -EINVAL;
if (!mixer)
return -EINVAL;
- right_blend_index = pipe->is_right_blend &&
- !(pipe->src_split_req && mixer->is_right_mixer);
- index = (pipe->mixer_stage * MAX_PIPES_PER_STAGE) + right_blend_index;
-
- if (index < MAX_PIPES_PER_LM && pipe == mixer->stage_pipe[index]) {
+ right_blend = pipe->is_right_blend ? 1 : 0;
+ i = (pipe->mixer_stage * MAX_PIPES_PER_STAGE) + right_blend;
+ if ((i < MAX_PIPES_PER_LM) && (pipe == mixer->stage_pipe[i])) {
pr_debug("unstage p%d from %s side of stage=%d lm=%d ndx=%d\n",
- pipe->num, pipe->is_right_blend ? "right" : "left",
- pipe->mixer_stage, mixer->num, index);
+ pipe->num, right_blend ? "right" : "left",
+ pipe->mixer_stage, mixer->num, i);
+ } else {
+ int stage;
- mixer->params_changed++;
- mixer->stage_pipe[index] = NULL;
+ for (i = 0; i < MAX_PIPES_PER_LM; i++) {
+ if (pipe != mixer->stage_pipe[i])
+ continue;
+
+ stage = i / MAX_PIPES_PER_STAGE;
+ right_blend = i & 1;
+
+ pr_warn("lm=%d pipe #%d stage=%d with %s blend, unstaged from %s side of stage=%d!\n",
+ mixer->num, pipe->num, pipe->mixer_stage,
+ pipe->is_right_blend ? "right" : "left",
+ right_blend ? "right" : "left", stage);
+ break;
+ }
+
+ /* pipe not found, not a failure */
+ if (i == MAX_PIPES_PER_LM)
+ return 0;
}
+ mixer->params_changed++;
+ mixer->stage_pipe[i] = NULL;
+
return 0;
}
diff --git a/drivers/video/fbdev/msm/mdss_mdp_hwio.h b/drivers/video/fbdev/msm/mdss_mdp_hwio.h
index 87d73e659c9c..9a0c1c9afa53 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_hwio.h
+++ b/drivers/video/fbdev/msm/mdss_mdp_hwio.h
@@ -16,6 +16,23 @@
#include <linux/bitops.h>
+/*
+ * struct mdss_mdp_hwio_cfg - used to define a register bitfield
+ * @start: bitfield offset start from lsb
+ * @len: number of lsb bits that can be taken from field value
+ * @shift: number of lsb bits to truncate from field value
+ */
+struct mdss_mdp_hwio_cfg {
+ u32 start, len, shift;
+};
+
+static inline u32 mdss_mdp_hwio_mask(struct mdss_mdp_hwio_cfg *cfg, u32 val)
+{
+ u32 mask = (1 << cfg->len) - 1;
+
+ return ((val >> cfg->shift) & mask) << cfg->start;
+}
+
#define IGC_LUT_ENTRIES 256
#define GC_LUT_SEGMENTS 16
#define ENHIST_LUT_ENTRIES 256
@@ -141,16 +158,26 @@ enum mdss_mdp_ctl_index {
MDSS_MDP_MAX_CTL
};
+
+#define MDSS_MDP_REG_CTL_LAYER_EXTN_OFFSET 0x40
+#define MDSS_MDP_REG_CTL_LAYER_EXTN2_OFFSET 0x70
+#define MDSS_MDP_CTL_X_LAYER_5 0x24
+
+/* mixer 5 has different offset than others */
#define MDSS_MDP_REG_CTL_LAYER(lm) \
- ((lm == 5) ? (0x024) : ((lm) * 0x004))
+ (((lm) == 5) ? MDSS_MDP_CTL_X_LAYER_5 : ((lm) * 0x004))
+
#define MDSS_MDP_REG_CTL_LAYER_EXTN(lm) \
- ((lm == 5) ? (0x54) : (MDSS_MDP_REG_CTL_LAYER(lm) + 0x40))
+ (MDSS_MDP_REG_CTL_LAYER_EXTN_OFFSET + ((lm) * 0x004))
+
+#define MDSS_MDP_REG_CTL_LAYER_EXTN2(lm) \
+ (MDSS_MDP_REG_CTL_LAYER_EXTN2_OFFSET + ((lm) * 0x004))
+
#define MDSS_MDP_REG_CTL_TOP 0x014
#define MDSS_MDP_REG_CTL_FLUSH 0x018
#define MDSS_MDP_REG_CTL_START 0x01C
#define MDSS_MDP_REG_CTL_PACK_3D 0x020
#define MDSS_MDP_REG_CTL_SW_RESET 0x030
-#define MDSS_MDP_REG_CTL_LAYER_EXTN_OFFSET 0x40
#define MDSS_MDP_CTL_OP_VIDEO_MODE (0 << 17)
#define MDSS_MDP_CTL_OP_CMD_MODE (1 << 17)
@@ -180,7 +207,9 @@ enum mdss_mdp_sspp_index {
MDSS_MDP_SSPP_RGB3,
MDSS_MDP_SSPP_CURSOR0,
MDSS_MDP_SSPP_CURSOR1,
- MDSS_MDP_MAX_SSPP
+ MDSS_MDP_SSPP_DMA2,
+ MDSS_MDP_SSPP_DMA3,
+ MDSS_MDP_MAX_SSPP,
};
enum mdss_mdp_sspp_fetch_type {
@@ -361,10 +390,6 @@ enum mdss_mdp_sspp_chroma_samp_type {
#define MDSS_MDP_SCALEX_EN BIT(0)
#define MDSS_MDP_FMT_SOLID_FILL 0x4037FF
-#define MDSS_MDP_NUM_REG_MIXERS 3
-#define MDSS_MDP_NUM_WB_MIXERS 2
-#define MDSS_MDP_CTL_X_LAYER_5 0x24
-
#define MDSS_MDP_INTF_EDP_SEL (BIT(3) | BIT(1))
#define MDSS_MDP_INTF_HDMI_SEL (BIT(25) | BIT(24))
#define MDSS_MDP_INTF_DSI0_SEL BIT(8)
diff --git a/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c b/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c
index 950228e405a4..ae7b6a6ed015 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c
@@ -13,6 +13,8 @@
#include <linux/kernel.h>
#include <linux/pm_runtime.h>
+#include <linux/iopoll.h>
+#include <linux/delay.h>
#include "mdss_mdp.h"
#include "mdss_panel.h"
@@ -57,13 +59,17 @@ struct mdss_mdp_cmd_ctx {
u8 ref_cnt;
struct completion stop_comp;
+ atomic_t rdptr_cnt;
+ wait_queue_head_t rdptr_waitq;
struct completion pp_done;
wait_queue_head_t pp_waitq;
struct list_head vsync_handlers;
+ struct list_head lineptr_handlers;
int panel_power_state;
atomic_t koff_cnt;
u32 intf_stopped;
struct mutex mdp_rdptr_lock;
+ struct mutex mdp_wrptr_lock;
struct mutex clk_mtx;
spinlock_t clk_lock;
spinlock_t koff_lock;
@@ -86,8 +92,12 @@ struct mdss_mdp_cmd_ctx {
struct completion autorefresh_done;
int vsync_irq_cnt;
+ int lineptr_irq_cnt;
+ bool lineptr_enabled;
+ u32 prev_wr_ptr_irq;
struct mdss_intf_recovery intf_recovery;
+ struct mdss_intf_recovery intf_mdp_callback;
struct mdss_mdp_cmd_ctx *sync_ctx; /* for partial update */
u32 pp_timeout_report_cnt;
bool pingpong_split_slave;
@@ -101,6 +111,7 @@ static inline void mdss_mdp_cmd_clk_off(struct mdss_mdp_cmd_ctx *ctx);
static int mdss_mdp_cmd_wait4pingpong(struct mdss_mdp_ctl *ctl, void *arg);
static int mdss_mdp_disable_autorefresh(struct mdss_mdp_ctl *ctl,
struct mdss_mdp_ctl *sctl);
+static int mdss_mdp_setup_vsync(struct mdss_mdp_cmd_ctx *ctx, bool enable);
static bool __mdss_mdp_cmd_is_aux_pp_needed(struct mdss_data_type *mdata,
struct mdss_mdp_ctl *mctl)
@@ -280,9 +291,10 @@ static int mdss_mdp_cmd_tearcheck_cfg(struct mdss_mdp_mixer *mixer,
cfg |= vclks_line;
- pr_debug("%s: yres=%d vclks=%x height=%d init=%d rd=%d start=%d\n",
+ pr_debug("%s: yres=%d vclks=%x height=%d init=%d rd=%d start=%d wr=%d\n",
__func__, pinfo->yres, vclks_line, te->sync_cfg_height,
- te->vsync_init_val, te->rd_ptr_irq, te->start_pos);
+ te->vsync_init_val, te->rd_ptr_irq, te->start_pos,
+ te->wr_ptr_irq);
pr_debug("thrd_start =%d thrd_cont=%d pp_split=%d\n",
te->sync_threshold_start, te->sync_threshold_continue,
ctx->pingpong_split_slave);
@@ -304,6 +316,9 @@ static int mdss_mdp_cmd_tearcheck_cfg(struct mdss_mdp_mixer *mixer,
MDSS_MDP_REG_PP_RD_PTR_IRQ,
te ? te->rd_ptr_irq : 0);
mdss_mdp_pingpong_write(pingpong_base,
+ MDSS_MDP_REG_PP_WR_PTR_IRQ,
+ te ? te->wr_ptr_irq : 0);
+ mdss_mdp_pingpong_write(pingpong_base,
MDSS_MDP_REG_PP_START_POS,
te ? te->start_pos : 0);
mdss_mdp_pingpong_write(pingpong_base,
@@ -649,7 +664,7 @@ int mdss_mdp_resource_control(struct mdss_mdp_ctl *ctl, u32 sw_event)
MDP_RSRC_CTL_STATE_OFF) {
/* Add an extra vote for the ahb bus */
mdss_update_reg_bus_vote(mdata->reg_bus_clt,
- VOTE_INDEX_19_MHZ);
+ VOTE_INDEX_LOW);
/* Enable MDP resources */
mdss_mdp_cmd_clk_on(ctx);
@@ -959,6 +974,17 @@ static void mdss_mdp_cmd_readptr_done(void *arg)
MDSS_XLOG(ctl->num, atomic_read(&ctx->koff_cnt));
complete_all(&ctx->rdptr_done);
+ /* If caller is waiting for the read pointer, notify. */
+ if (atomic_read(&ctx->rdptr_cnt)) {
+ if (atomic_add_unless(&ctx->rdptr_cnt, -1, 0)) {
+ MDSS_XLOG(atomic_read(&ctx->rdptr_cnt));
+ if (atomic_read(&ctx->rdptr_cnt))
+ pr_warn("%s: too many rdptrs=%d!\n",
+ __func__, atomic_read(&ctx->rdptr_cnt));
+ }
+ wake_up_all(&ctx->rdptr_waitq);
+ }
+
spin_lock(&ctx->clk_lock);
list_for_each_entry(tmp, &ctx->vsync_handlers, list) {
if (tmp->enabled && !tmp->cmd_post_flush)
@@ -967,6 +993,104 @@ static void mdss_mdp_cmd_readptr_done(void *arg)
spin_unlock(&ctx->clk_lock);
}
+static int mdss_mdp_cmd_wait4readptr(struct mdss_mdp_cmd_ctx *ctx)
+{
+ int rc = 0;
+
+ rc = wait_event_timeout(ctx->rdptr_waitq,
+ atomic_read(&ctx->rdptr_cnt) == 0,
+ KOFF_TIMEOUT);
+ if (rc <= 0) {
+ if (atomic_read(&ctx->rdptr_cnt))
+ pr_err("timed out waiting for rdptr irq\n");
+ else
+ rc = 1;
+ }
+ return rc;
+}
+
+static void mdss_mdp_cmd_intf_callback(void *data, int event)
+{
+ struct mdss_mdp_cmd_ctx *ctx = data;
+ struct mdss_mdp_pp_tear_check *te = NULL;
+ u32 timeout_us = 3000, val = 0;
+ struct mdss_mdp_mixer *mixer;
+
+ if (!data) {
+ pr_err("%s: invalid ctx\n", __func__);
+ return;
+ }
+
+ if (!ctx->ctl)
+ return;
+
+ switch (event) {
+ case MDP_INTF_CALLBACK_DSI_WAIT:
+ pr_debug("%s: wait for frame cnt:%d event:%d\n",
+ __func__, atomic_read(&ctx->rdptr_cnt), event);
+
+ /*
+ * if we are going to suspended or pp split is not enabled,
+ * just return
+ */
+ if (ctx->intf_stopped || !is_pingpong_split(ctx->ctl->mfd))
+ return;
+ atomic_inc(&ctx->rdptr_cnt);
+
+ /* enable clks and rd_ptr interrupt */
+ mdss_mdp_setup_vsync(ctx, true);
+
+ mixer = mdss_mdp_mixer_get(ctx->ctl, MDSS_MDP_MIXER_MUX_LEFT);
+ if (!mixer) {
+ pr_err("%s: null mixer\n", __func__);
+ return;
+ }
+
+ /* wait for read pointer */
+ MDSS_XLOG(atomic_read(&ctx->rdptr_cnt));
+ pr_debug("%s: wait for frame cnt:%d\n",
+ __func__, atomic_read(&ctx->rdptr_cnt));
+ mdss_mdp_cmd_wait4readptr(ctx);
+
+ /* wait for 3ms to make sure we are within the frame */
+ te = &ctx->ctl->panel_data->panel_info.te;
+ readl_poll_timeout(mixer->pingpong_base +
+ MDSS_MDP_REG_PP_INT_COUNT_VAL, val,
+ (val & 0xffff) > (te->start_pos +
+ te->sync_threshold_start), 10, timeout_us);
+
+ /* disable rd_ptr interrupt */
+ mdss_mdp_setup_vsync(ctx, false);
+
+ break;
+ default:
+ pr_debug("%s: unhandled event=%d\n", __func__, event);
+ break;
+ }
+}
+
+static void mdss_mdp_cmd_writeptr_done(void *arg)
+{
+ struct mdss_mdp_ctl *ctl = arg;
+ struct mdss_mdp_cmd_ctx *ctx = ctl->intf_ctx[MASTER_CTX];
+ struct mdss_mdp_lineptr_handler *tmp;
+ ktime_t lineptr_time;
+
+ if (!ctx) {
+ pr_err("invalid ctx\n");
+ return;
+ }
+
+ lineptr_time = ktime_get();
+
+ spin_lock(&ctx->clk_lock);
+ list_for_each_entry(tmp, &ctx->lineptr_handlers, list) {
+ if (tmp->enabled)
+ tmp->lineptr_handler(ctl, lineptr_time);
+ }
+ spin_unlock(&ctx->clk_lock);
+}
+
static void mdss_mdp_cmd_intf_recovery(void *data, int event)
{
struct mdss_mdp_cmd_ctx *ctx = data;
@@ -1020,6 +1144,7 @@ static void mdss_mdp_cmd_pingpong_done(void *arg)
struct mdss_mdp_cmd_ctx *ctx = ctl->intf_ctx[MASTER_CTX];
struct mdss_mdp_vsync_handler *tmp;
ktime_t vsync_time;
+ bool sync_ppdone;
if (!ctx) {
pr_err("%s: invalid ctx\n", __func__);
@@ -1045,11 +1170,18 @@ static void mdss_mdp_cmd_pingpong_done(void *arg)
MDSS_XLOG(ctl->num, atomic_read(&ctx->koff_cnt), ctx->current_pp_num);
+ /*
+ * check state of sync ctx before decrementing koff_cnt to avoid race
+ * condition. That is, once both koff_cnt have been served and new koff
+ * can be triggered (sctx->koff_cnt could change)
+ */
+ sync_ppdone = mdss_mdp_cmd_do_notifier(ctx);
+
if (atomic_add_unless(&ctx->koff_cnt, -1, 0)) {
if (atomic_read(&ctx->koff_cnt))
pr_err("%s: too many kickoffs=%d!\n", __func__,
atomic_read(&ctx->koff_cnt));
- if (mdss_mdp_cmd_do_notifier(ctx)) {
+ if (sync_ppdone) {
atomic_inc(&ctx->pp_done_cnt);
schedule_work(&ctx->pp_done_work);
@@ -1071,6 +1203,178 @@ static void mdss_mdp_cmd_pingpong_done(void *arg)
spin_unlock(&ctx->koff_lock);
}
+static int mdss_mdp_setup_lineptr(struct mdss_mdp_cmd_ctx *ctx,
+ bool enable)
+{
+ int changed = 0;
+
+ mutex_lock(&ctx->mdp_wrptr_lock);
+
+ if (enable) {
+ if (ctx->lineptr_irq_cnt == 0)
+ changed++;
+ ctx->lineptr_irq_cnt++;
+ } else {
+ if (ctx->lineptr_irq_cnt) {
+ ctx->lineptr_irq_cnt--;
+ if (ctx->lineptr_irq_cnt == 0)
+ changed++;
+ } else {
+ pr_warn("%pS->%s: wr_ptr can not be turned off\n",
+ __builtin_return_address(0), __func__);
+ }
+ }
+
+ if (changed)
+ MDSS_XLOG(ctx->lineptr_irq_cnt, enable, current->pid);
+
+ pr_debug("%pS->%s: lineptr_irq_cnt=%d changed=%d enable=%d ctl:%d pp:%d\n",
+ __builtin_return_address(0), __func__,
+ ctx->lineptr_irq_cnt, changed, enable,
+ ctx->ctl->num, ctx->default_pp_num);
+
+ if (changed) {
+ if (enable) {
+ /* enable clocks and irq */
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+ mdss_mdp_irq_enable(MDSS_MDP_IRQ_PING_PONG_WR_PTR,
+ ctx->default_pp_num);
+ } else {
+ /* disable clocks and irq */
+ mdss_mdp_irq_disable(MDSS_MDP_IRQ_PING_PONG_WR_PTR,
+ ctx->default_pp_num);
+ /*
+ * check the intr status and clear the irq before
+ * disabling the clocks
+ */
+ mdss_mdp_intr_check_and_clear(
+ MDSS_MDP_IRQ_PING_PONG_WR_PTR,
+ ctx->default_pp_num);
+
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+ }
+ }
+
+ mutex_unlock(&ctx->mdp_wrptr_lock);
+ return ctx->lineptr_irq_cnt;
+}
+
+static int mdss_mdp_cmd_add_lineptr_handler(struct mdss_mdp_ctl *ctl,
+ struct mdss_mdp_lineptr_handler *handle)
+{
+ struct mdss_mdp_cmd_ctx *ctx;
+ unsigned long flags;
+ int ret = 0;
+
+ mutex_lock(&ctl->offlock);
+ ctx = (struct mdss_mdp_cmd_ctx *) ctl->intf_ctx[MASTER_CTX];
+ if (!ctx || !ctl->is_master) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ pr_debug("%pS->%s: ctl=%d\n",
+ __builtin_return_address(0), __func__, ctl->num);
+
+ MDSS_XLOG(ctl->num, atomic_read(&ctx->koff_cnt));
+
+ spin_lock_irqsave(&ctx->clk_lock, flags);
+ if (!handle->enabled) {
+ handle->enabled = true;
+ list_add(&handle->list, &ctx->lineptr_handlers);
+ }
+ spin_unlock_irqrestore(&ctx->clk_lock, flags);
+
+ if (ctl->mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY)
+ mutex_lock(&cmd_clk_mtx);
+
+ mdss_mdp_setup_lineptr(ctx, true);
+ ctx->lineptr_enabled = true;
+
+ if (ctl->mfd->split_mode == MDP_DUAL_LM_DUAL_DISPLAY)
+ mutex_unlock(&cmd_clk_mtx);
+done:
+ mutex_unlock(&ctl->offlock);
+
+ return ret;
+}
+
+static int mdss_mdp_cmd_remove_lineptr_handler(struct mdss_mdp_ctl *ctl,
+ struct mdss_mdp_lineptr_handler *handle)
+{
+ struct mdss_mdp_cmd_ctx *ctx;
+ unsigned long flags;
+ bool disabled = true;
+
+ ctx = (struct mdss_mdp_cmd_ctx *) ctl->intf_ctx[MASTER_CTX];
+ if (!ctx || !ctl->is_master || !ctx->lineptr_enabled)
+ return -EINVAL;
+
+ pr_debug("%pS->%s: ctl=%d\n",
+ __builtin_return_address(0), __func__, ctl->num);
+
+ MDSS_XLOG(ctl->num, atomic_read(&ctx->koff_cnt));
+
+ spin_lock_irqsave(&ctx->clk_lock, flags);
+ if (handle->enabled) {
+ handle->enabled = false;
+ list_del_init(&handle->list);
+ } else {
+ disabled = false;
+ }
+ spin_unlock_irqrestore(&ctx->clk_lock, flags);
+
+ if (disabled)
+ mdss_mdp_setup_lineptr(ctx, false);
+ ctx->lineptr_enabled = false;
+ ctx->prev_wr_ptr_irq = 0;
+
+ return 0;
+}
+
+static int mdss_mdp_cmd_lineptr_ctrl(struct mdss_mdp_ctl *ctl, bool enable)
+{
+ struct mdss_mdp_pp_tear_check *te;
+ struct mdss_mdp_cmd_ctx *ctx;
+ int rc = 0;
+
+ ctx = (struct mdss_mdp_cmd_ctx *) ctl->intf_ctx[MASTER_CTX];
+ if (!ctx || !ctl->is_master)
+ return -EINVAL;
+
+ te = &ctl->panel_data->panel_info.te;
+ pr_debug("%pS->%s: ctl=%d en=%d, prev_lineptr=%d, lineptr=%d\n",
+ __builtin_return_address(0), __func__, ctl->num,
+ enable, ctx->prev_wr_ptr_irq, te->wr_ptr_irq);
+
+ if (enable) {
+ /* update reg only if the value has changed */
+ if (ctx->prev_wr_ptr_irq != te->wr_ptr_irq) {
+ ctx->prev_wr_ptr_irq = te->wr_ptr_irq;
+ mdss_mdp_pingpong_write(ctl->mixer_left->pingpong_base,
+ MDSS_MDP_REG_PP_WR_PTR_IRQ, te->wr_ptr_irq);
+ }
+
+ /*
+ * add handler only when lineptr is not enabled
+ * and wr ptr is non zero
+ */
+ if (!ctx->lineptr_enabled && te->wr_ptr_irq)
+ rc = mdss_mdp_cmd_add_lineptr_handler(ctl,
+ &ctl->lineptr_handler);
+ /* Disable handler when the value is zero */
+ else if (ctx->lineptr_enabled && !te->wr_ptr_irq)
+ rc = mdss_mdp_cmd_remove_lineptr_handler(ctl,
+ &ctl->lineptr_handler);
+ } else {
+ if (ctx->lineptr_enabled)
+ rc = mdss_mdp_cmd_remove_lineptr_handler(ctl,
+ &ctl->lineptr_handler);
+ }
+
+ return rc;
+}
+
/**
* mdss_mdp_cmd_autorefresh_pp_done() - pp done irq callback for autorefresh
* @arg: void pointer to the controller context.
@@ -1106,14 +1410,20 @@ static void pingpong_done_work(struct work_struct *work)
u32 status;
struct mdss_mdp_cmd_ctx *ctx =
container_of(work, typeof(*ctx), pp_done_work);
+ struct mdss_mdp_ctl *ctl = ctx->ctl;
- if (ctx->ctl) {
+ if (ctl) {
while (atomic_add_unless(&ctx->pp_done_cnt, -1, 0))
mdss_mdp_ctl_notify(ctx->ctl, MDP_NOTIFY_FRAME_DONE);
status = mdss_mdp_ctl_perf_get_transaction_status(ctx->ctl);
if (status == 0)
mdss_mdp_ctl_perf_release_bw(ctx->ctl);
+
+ if (!ctl->is_master)
+ ctl = mdss_mdp_get_main_ctl(ctl);
+ if (mdss_mdp_is_lineptr_supported(ctl))
+ mdss_mdp_cmd_lineptr_ctrl(ctl, false);
}
}
@@ -1557,7 +1867,7 @@ static int mdss_mdp_cmd_wait4pingpong(struct mdss_mdp_ctl *ctl, void *arg)
mask = BIT(MDSS_MDP_IRQ_PING_PONG_COMP + ctx->current_pp_num);
status = mask & readl_relaxed(ctl->mdata->mdp_base +
MDSS_MDP_REG_INTR_STATUS);
- MDSS_XLOG(status, atomic_read(&ctx->koff_cnt), rc);
+ MDSS_XLOG(status, rc, atomic_read(&ctx->koff_cnt));
if (status) {
pr_warn("pp done but irq not triggered\n");
mdss_mdp_irq_clear(ctl->mdata,
@@ -1749,6 +2059,11 @@ static int mdss_mdp_cmd_panel_on(struct mdss_mdp_ctl *ctl,
(void *)&ctx->intf_recovery,
CTL_INTF_EVENT_FLAG_DEFAULT);
+ mdss_mdp_ctl_intf_event(ctl,
+ MDSS_EVENT_REGISTER_MDP_CALLBACK,
+ (void *)&ctx->intf_mdp_callback,
+ CTL_INTF_EVENT_FLAG_DEFAULT);
+
ctx->intf_stopped = 0;
if (sctx)
sctx->intf_stopped = 0;
@@ -2026,14 +2341,17 @@ static void mdss_mdp_cmd_autorefresh_done(void *arg)
static u32 get_autorefresh_timeout(struct mdss_mdp_ctl *ctl,
struct mdss_mdp_cmd_ctx *ctx, u32 frame_cnt)
{
- struct mdss_mdp_mixer *mixer =
- mdss_mdp_mixer_get(ctl, MDSS_MDP_MIXER_MUX_LEFT);
+ struct mdss_mdp_mixer *mixer;
struct mdss_panel_info *pinfo;
u32 line_count;
u32 fps, v_total;
unsigned long autorefresh_timeout;
pinfo = &ctl->panel_data->panel_info;
+ mixer = mdss_mdp_mixer_get(ctl, MDSS_MDP_MIXER_MUX_LEFT);
+
+ if (!mixer || !pinfo)
+ return -EINVAL;
if (!ctx->ignore_external_te)
line_count = ctl->mixer_left->roi.h;
@@ -2362,6 +2680,13 @@ static int mdss_mdp_cmd_kickoff(struct mdss_mdp_ctl *ctl, void *arg)
PERF_SW_COMMIT_STATE, PERF_STATUS_DONE);
}
+ if (mdss_mdp_is_lineptr_supported(ctl)) {
+ if (mdss_mdp_is_full_frame_update(ctl))
+ mdss_mdp_cmd_lineptr_ctrl(ctl, true);
+ else if (ctx->lineptr_enabled)
+ mdss_mdp_cmd_lineptr_ctrl(ctl, false);
+ }
+
/* Kickoff */
__mdss_mdp_kickoff(ctl, ctx);
@@ -2430,6 +2755,14 @@ int mdss_mdp_cmd_ctx_stop(struct mdss_mdp_ctl *ctl,
/* intf stopped, no more kickoff */
ctx->intf_stopped = 1;
+ /* Make sure any rd ptr for dsi callback is done before disable vsync */
+ if (is_pingpong_split(ctl->mfd)) {
+ pr_debug("%s will wait for rd ptr:%d\n", __func__,
+ atomic_read(&ctx->rdptr_cnt));
+ MDSS_XLOG(atomic_read(&ctx->rdptr_cnt));
+ mdss_mdp_cmd_wait4readptr(ctx);
+ }
+
/*
* if any vsyncs are still enabled, loop until the refcount
* goes to zero, so the rd ptr interrupt is disabled.
@@ -2442,11 +2775,20 @@ int mdss_mdp_cmd_ctx_stop(struct mdss_mdp_ctl *ctl,
while (mdss_mdp_setup_vsync(ctx, false))
;
}
+ if (ctx->lineptr_irq_cnt) {
+ WARN(1, "lineptr irq still enabled\n");
+ while (mdss_mdp_setup_lineptr(ctx, false))
+ ;
+ }
if (!ctl->pending_mode_switch) {
mdss_mdp_ctl_intf_event(ctl,
MDSS_EVENT_REGISTER_RECOVERY_HANDLER,
NULL, CTL_INTF_EVENT_FLAG_DEFAULT);
+
+ mdss_mdp_ctl_intf_event(ctl,
+ MDSS_EVENT_REGISTER_MDP_CALLBACK,
+ NULL, CTL_INTF_EVENT_FLAG_DEFAULT);
}
/* shut down the MDP/DSI resources if still enabled */
@@ -2465,6 +2807,8 @@ int mdss_mdp_cmd_ctx_stop(struct mdss_mdp_ctl *ctl,
mdss_mdp_set_intr_callback(MDSS_MDP_IRQ_PING_PONG_RD_PTR,
ctx->default_pp_num, NULL, NULL);
+ mdss_mdp_set_intr_callback(MDSS_MDP_IRQ_PING_PONG_WR_PTR,
+ ctx->default_pp_num, NULL, NULL);
mdss_mdp_set_intr_callback_nosync(MDSS_MDP_IRQ_PING_PONG_COMP,
ctx->default_pp_num, NULL, NULL);
@@ -2521,6 +2865,8 @@ static int mdss_mdp_cmd_stop_sub(struct mdss_mdp_ctl *ctl,
list_for_each_entry_safe(handle, tmp, &ctx->vsync_handlers, list)
mdss_mdp_cmd_remove_vsync_handler(ctl, handle);
+ if (mdss_mdp_is_lineptr_supported(ctl))
+ mdss_mdp_cmd_lineptr_ctrl(ctl, false);
MDSS_XLOG(ctl->num, atomic_read(&ctx->koff_cnt), XLOG_FUNC_ENTRY);
/* Command mode is supported only starting at INTF1 */
@@ -2531,7 +2877,7 @@ static int mdss_mdp_cmd_stop_sub(struct mdss_mdp_ctl *ctl,
int mdss_mdp_cmd_stop(struct mdss_mdp_ctl *ctl, int panel_power_state)
{
struct mdss_mdp_cmd_ctx *ctx = ctl->intf_ctx[MASTER_CTX];
- struct mdss_mdp_cmd_ctx *sctx;
+ struct mdss_mdp_cmd_ctx *sctx = NULL;
struct mdss_mdp_ctl *sctl = mdss_mdp_get_split_ctl(ctl);
bool panel_off = false;
bool turn_off_clocks = false;
@@ -2600,6 +2946,12 @@ int mdss_mdp_cmd_stop(struct mdss_mdp_ctl *ctl, int panel_power_state)
MDSS_EVENT_REGISTER_RECOVERY_HANDLER,
(void *)&ctx->intf_recovery,
CTL_INTF_EVENT_FLAG_DEFAULT);
+
+ mdss_mdp_ctl_intf_event(ctl,
+ MDSS_EVENT_REGISTER_MDP_CALLBACK,
+ (void *)&ctx->intf_mdp_callback,
+ CTL_INTF_EVENT_FLAG_DEFAULT);
+
ctx->intf_stopped = 0;
if (sctx)
sctx->intf_stopped = 0;
@@ -2754,6 +3106,7 @@ static int mdss_mdp_cmd_ctx_setup(struct mdss_mdp_ctl *ctl,
ctx->pingpong_split_slave = pingpong_split_slave;
ctx->pp_timeout_report_cnt = 0;
init_waitqueue_head(&ctx->pp_waitq);
+ init_waitqueue_head(&ctx->rdptr_waitq);
init_completion(&ctx->stop_comp);
init_completion(&ctx->autorefresh_ppdone);
init_completion(&ctx->rdptr_done);
@@ -2763,6 +3116,7 @@ static int mdss_mdp_cmd_ctx_setup(struct mdss_mdp_ctl *ctl,
spin_lock_init(&ctx->koff_lock);
mutex_init(&ctx->clk_mtx);
mutex_init(&ctx->mdp_rdptr_lock);
+ mutex_init(&ctx->mdp_wrptr_lock);
INIT_WORK(&ctx->gate_clk_work, clk_ctrl_gate_work);
INIT_DELAYED_WORK(&ctx->delayed_off_clk_work,
clk_ctrl_delayed_off_work);
@@ -2772,10 +3126,14 @@ static int mdss_mdp_cmd_ctx_setup(struct mdss_mdp_ctl *ctl,
ctx->autorefresh_state = MDP_AUTOREFRESH_OFF;
ctx->autorefresh_frame_cnt = 0;
INIT_LIST_HEAD(&ctx->vsync_handlers);
+ INIT_LIST_HEAD(&ctx->lineptr_handlers);
ctx->intf_recovery.fxn = mdss_mdp_cmd_intf_recovery;
ctx->intf_recovery.data = ctx;
+ ctx->intf_mdp_callback.fxn = mdss_mdp_cmd_intf_callback;
+ ctx->intf_mdp_callback.data = ctx;
+
ctx->intf_stopped = 0;
pr_debug("%s: ctx=%p num=%d aux=%d\n", __func__, ctx,
@@ -2785,6 +3143,9 @@ static int mdss_mdp_cmd_ctx_setup(struct mdss_mdp_ctl *ctl,
mdss_mdp_set_intr_callback(MDSS_MDP_IRQ_PING_PONG_RD_PTR,
ctx->default_pp_num, mdss_mdp_cmd_readptr_done, ctl);
+ mdss_mdp_set_intr_callback(MDSS_MDP_IRQ_PING_PONG_WR_PTR,
+ ctx->default_pp_num, mdss_mdp_cmd_writeptr_done, ctl);
+
ret = mdss_mdp_cmd_tearcheck_setup(ctx, false);
if (ret)
pr_err("tearcheck setup failed\n");
diff --git a/drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c b/drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c
index 4b7a76035586..e019bcf6eeaf 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c
@@ -397,6 +397,7 @@ static int mdss_mdp_writeback_prepare_rot(struct mdss_mdp_ctl *ctl, void *arg)
struct mdss_mdp_writeback_arg *wb_args;
struct mdss_rot_entry *entry;
struct mdp_rotation_item *item;
+ struct mdss_rot_perf *perf;
struct mdss_data_type *mdata;
u32 format;
@@ -413,6 +414,7 @@ static int mdss_mdp_writeback_prepare_rot(struct mdss_mdp_ctl *ctl, void *arg)
return -ENODEV;
}
item = &entry->item;
+ perf = entry->perf;
mdata = ctl->mdata;
if (!mdata) {
pr_err("no mdata attached to ctl=%d", ctl->num);
@@ -433,6 +435,7 @@ static int mdss_mdp_writeback_prepare_rot(struct mdss_mdp_ctl *ctl, void *arg)
ctx->height = ctx->dst_rect.h = item->dst_rect.h;
ctx->dst_rect.x = item->dst_rect.x;
ctx->dst_rect.y = item->dst_rect.y;
+ ctx->frame_rate = perf->config.frame_rate;
ctx->dnsc_factor_w = entry->dnsc_factor_w;
ctx->dnsc_factor_h = entry->dnsc_factor_h;
diff --git a/drivers/video/fbdev/msm/mdss_mdp_layer.c b/drivers/video/fbdev/msm/mdss_mdp_layer.c
index 06e106191f01..bc439a92524e 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_layer.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_layer.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -36,16 +36,9 @@
#define CHECK_LAYER_BOUNDS(offset, size, max_size) \
(((size) > (max_size)) || ((offset) > ((max_size) - (size))))
-#define IS_PIPE_TYPE_CURSOR(pipe_ndx) \
- ((pipe_ndx >= (1 << MDSS_MDP_SSPP_CURSOR0)) &&\
- (pipe_ndx <= (1 << MDSS_MDP_SSPP_CURSOR1)))
-
-#define IS_PIPE_TYPE_DMA(pipe_ndx) \
- ((pipe_ndx >= (1 << MDSS_MDP_SSPP_DMA0)) &&\
- (pipe_ndx <= (1 << MDSS_MDP_SSPP_DMA1)))
-
#define SCALER_ENABLED \
(MDP_LAYER_ENABLE_PIXEL_EXT | MDP_LAYER_ENABLE_QSEED3_SCALE)
+
enum {
MDSS_MDP_RELEASE_FENCE = 0,
MDSS_MDP_RETIRE_FENCE,
@@ -57,25 +50,22 @@ enum layer_pipe_q {
LAYER_USES_DESTROY_PIPE_Q,
};
-static inline bool is_layer_right_blend(struct mdp_rect *left_blend,
- struct mdp_rect *right_blend, u32 left_lm_w)
-{
- return ((left_blend->x + left_blend->w) == right_blend->x) &&
- ((left_blend->x + left_blend->w) != left_lm_w) &&
- (left_blend->y == right_blend->y) &&
- (left_blend->h == right_blend->h);
-}
+enum layer_zorder_used {
+ LAYER_ZORDER_NONE = 0,
+ LAYER_ZORDER_LEFT = 1,
+ LAYER_ZORDER_RIGHT = 2,
+ LAYER_ZORDER_BOTH = 3,
+};
-static bool is_pipe_type_vig(struct mdss_data_type *mdata, u32 ndx)
+/*
+ * __layer_needs_src_split() - check needs source split configuration
+ * @layer: input layer
+ *
+ * return true if the layer should be used as source split
+ */
+static bool __layer_needs_src_split(struct mdp_input_layer *layer)
{
- u32 i;
-
- for (i = 0; i < mdata->nvig_pipes; i++) {
- if (mdata->vig_pipes[i].ndx == ndx)
- break;
- }
-
- return i < mdata->nvig_pipes;
+ return layer->flags & MDP_LAYER_ASYNC;
}
static int __async_update_position_check(struct msm_fb_data_type *mfd,
@@ -361,11 +351,16 @@ static int __validate_single_layer(struct msm_fb_data_type *mfd,
u32 bwc_enabled;
int ret;
bool is_vig_needed = false;
-
struct mdss_mdp_format_params *fmt;
struct mdss_mdp_mixer *mixer = NULL;
struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
struct mdss_data_type *mdata = mfd_to_mdata(mfd);
+ int ptype = get_pipe_type_from_ndx(layer->pipe_ndx);
+
+ if (ptype == MDSS_MDP_PIPE_TYPE_INVALID) {
+ pr_err("Invalid pipe ndx=%d\n", layer->pipe_ndx);
+ return -EINVAL;
+ }
if ((layer->dst_rect.w > mdata->max_mixer_width) ||
(layer->dst_rect.h > MAX_DST_H)) {
@@ -407,7 +402,7 @@ static int __validate_single_layer(struct msm_fb_data_type *mfd,
}
}
- if (IS_PIPE_TYPE_CURSOR(layer->pipe_ndx)) {
+ if (ptype == MDSS_MDP_PIPE_TYPE_CURSOR) {
ret = __cursor_layer_check(mfd, layer);
if (ret)
goto exit_fail;
@@ -435,14 +430,14 @@ static int __validate_single_layer(struct msm_fb_data_type *mfd,
(layer->src_rect.h != layer->dst_rect.h))))
is_vig_needed = true;
- if (is_vig_needed && !is_pipe_type_vig(mdata, layer->pipe_ndx)) {
+ if (is_vig_needed && ptype != MDSS_MDP_PIPE_TYPE_VIG) {
pr_err("pipe is non-scalar ndx=%x\n", layer->pipe_ndx);
ret = -EINVAL;
goto exit_fail;
}
- if ((IS_PIPE_TYPE_DMA(layer->pipe_ndx) ||
- IS_PIPE_TYPE_CURSOR(layer->pipe_ndx)) &&
+ if (((ptype == MDSS_MDP_PIPE_TYPE_DMA) ||
+ (ptype == MDSS_MDP_PIPE_TYPE_CURSOR)) &&
(layer->dst_rect.h != layer->src_rect.h ||
layer->dst_rect.w != layer->src_rect.w)) {
pr_err("no scaling supported on dma/cursor pipe, pipe num:%d\n",
@@ -551,6 +546,13 @@ static int __configure_pipe_params(struct msm_fb_data_type *mfd,
}
/*
+ * unstage the pipe if it's current z_order does not match with new
+ * z_order because client may only call the validate.
+ */
+ if (pipe->mixer_stage != layer->z_order)
+ mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_left);
+
+ /*
* check if overlay span across two mixers and if source split is
* available. If yes, enable src_split_req flag so that during mixer
* staging, same pipe will be stagged on both layer mixers.
@@ -577,7 +579,7 @@ static int __configure_pipe_params(struct msm_fb_data_type *mfd,
pipe->is_right_blend = false;
}
- if (pipe->async_update && is_split_lm(mfd)) {
+ if (is_split_lm(mfd) && __layer_needs_src_split(layer)) {
pipe->src_split_req = true;
} else if ((mixer_mux == MDSS_MDP_MIXER_MUX_LEFT) &&
((layer->dst_rect.x + layer->dst_rect.w) > mixer->width)) {
@@ -1177,6 +1179,7 @@ static int __validate_layers(struct msm_fb_data_type *mfd,
struct mdp_input_layer *layer, *prev_layer, *layer_list;
bool is_single_layer = false;
enum layer_pipe_q pipe_q_type;
+ enum layer_zorder_used zorder_used[MDSS_MDP_MAX_STAGE] = {0};
ret = mutex_lock_interruptible(&mdp5_data->ov_lock);
if (ret)
@@ -1201,6 +1204,8 @@ static int __validate_layers(struct msm_fb_data_type *mfd,
}
for (i = 0; i < layer_count; i++) {
+ enum layer_zorder_used z = LAYER_ZORDER_NONE;
+
layer = &layer_list[i];
dst_x = layer->dst_rect.x;
left_blend_pipe = NULL;
@@ -1217,9 +1222,12 @@ static int __validate_layers(struct msm_fb_data_type *mfd,
*
* Following logic of selecting left_blend has an inherent
* assumption that layer list is sorted on dst_x within a
- * same z_order.
+ * same z_order. Otherwise it will fail based on z_order checks.
*/
if (prev_layer && (prev_layer->z_order == layer->z_order)) {
+ struct mdp_rect *left = &prev_layer->dst_rect;
+ struct mdp_rect *right = &layer->dst_rect;
+
if ((layer->flags & MDP_LAYER_ASYNC)
|| (prev_layer->flags & MDP_LAYER_ASYNC)) {
ret = -EINVAL;
@@ -1228,13 +1236,46 @@ static int __validate_layers(struct msm_fb_data_type *mfd,
goto validate_exit;
}
- if (is_layer_right_blend(&prev_layer->dst_rect,
- &layer->dst_rect, left_lm_w))
+ /*
+ * check if layer is right blend by checking it's
+ * directly to the right.
+ */
+ if (((left->x + left->w) == right->x) &&
+ (left->y == right->y) && (left->h == right->h))
left_blend_pipe = pipe;
+
+ /*
+ * if the layer is right at the left lm boundary and
+ * src split is not required then right blend is not
+ * required as it will lie only on the left mixer
+ */
+ if (!__layer_needs_src_split(prev_layer) &&
+ ((left->x + left->w) == left_lm_w))
+ left_blend_pipe = NULL;
+ }
+
+ if (__layer_needs_src_split(layer))
+ z = LAYER_ZORDER_BOTH;
+ else if (dst_x >= left_lm_w)
+ z = LAYER_ZORDER_RIGHT;
+ else if ((dst_x + layer->dst_rect.w) <= left_lm_w)
+ z = LAYER_ZORDER_LEFT;
+ else
+ z = LAYER_ZORDER_BOTH;
+
+ if (!left_blend_pipe && (layer->z_order >= MDSS_MDP_MAX_STAGE ||
+ (z & zorder_used[layer->z_order]))) {
+ pr_err("invalid z_order=%d or already in use %x\n",
+ layer->z_order, z);
+ ret = -EINVAL;
+ layer->error_code = ret;
+ goto validate_exit;
+ } else {
+ zorder_used[layer->z_order] |= z;
}
if ((layer->dst_rect.x < left_lm_w) ||
- (layer->flags & MDP_LAYER_ASYNC)) {
+ __layer_needs_src_split(layer)) {
is_single_layer = (left_lm_layers == 1);
mixer_mux = MDSS_MDP_MIXER_MUX_LEFT;
} else {
diff --git a/drivers/video/fbdev/msm/mdss_mdp_overlay.c b/drivers/video/fbdev/msm/mdss_mdp_overlay.c
index 00b12296b47e..57b6af00b8c1 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_overlay.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_overlay.c
@@ -1319,6 +1319,7 @@ int mdss_mdp_overlay_start(struct msm_fb_data_type *mfd)
int rc;
struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
struct mdss_mdp_ctl *ctl = mdp5_data->ctl;
+ struct mdss_data_type *mdata = mfd_to_mdata(mfd);
if (mdss_mdp_ctl_is_power_on(ctl)) {
if (!mdp5_data->mdata->batfet)
@@ -1334,6 +1335,10 @@ int mdss_mdp_overlay_start(struct msm_fb_data_type *mfd)
mfd->index);
return 0;
}
+ } else if (mdata->handoff_pending) {
+ pr_warn("fb%d: commit while splash handoff pending\n",
+ mfd->index);
+ return -EPERM;
}
pr_debug("starting fb%d overlay\n", mfd->index);
@@ -2619,6 +2624,30 @@ static void mdss_mdp_overlay_handle_vsync(struct mdss_mdp_ctl *ctl,
sysfs_notify_dirent(mdp5_data->vsync_event_sd);
}
+/* function is called in irq context should have minimum processing */
+static void mdss_mdp_overlay_handle_lineptr(struct mdss_mdp_ctl *ctl,
+ ktime_t t)
+{
+ struct mdss_overlay_private *mdp5_data = NULL;
+
+ if (!ctl || !ctl->mfd) {
+ pr_warn("Invalid handle for lineptr\n");
+ return;
+ }
+
+ mdp5_data = mfd_to_mdp5_data(ctl->mfd);
+ if (!mdp5_data) {
+ pr_err("mdp5_data is NULL\n");
+ return;
+ }
+
+ pr_debug("lineptr irq on fb%d play_cnt=%d\n",
+ ctl->mfd->index, ctl->play_cnt);
+
+ mdp5_data->lineptr_time = t;
+ sysfs_notify_dirent(mdp5_data->lineptr_event_sd);
+}
+
int mdss_mdp_overlay_vsync_ctrl(struct msm_fb_data_type *mfd, int en)
{
struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
@@ -2891,6 +2920,78 @@ static ssize_t mdss_mdp_vsync_show_event(struct device *dev,
return ret;
}
+static ssize_t mdss_mdp_lineptr_show_event(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ u64 lineptr_ticks;
+ int ret;
+
+ if (!mdp5_data->ctl ||
+ (!mdp5_data->ctl->panel_data->panel_info.cont_splash_enabled
+ && !mdss_mdp_ctl_is_power_on(mdp5_data->ctl)))
+ return -EAGAIN;
+
+ lineptr_ticks = ktime_to_ns(mdp5_data->lineptr_time);
+
+ pr_debug("fb%d lineptr=%llu\n", mfd->index, lineptr_ticks);
+ ret = scnprintf(buf, PAGE_SIZE, "LINEPTR=%llu\n", lineptr_ticks);
+
+ return ret;
+}
+
+static ssize_t mdss_mdp_lineptr_show_value(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ int ret, lineptr_val;
+
+ if (!mdp5_data->ctl ||
+ (!mdp5_data->ctl->panel_data->panel_info.cont_splash_enabled
+ && !mdss_mdp_ctl_is_power_on(mdp5_data->ctl)))
+ return -EAGAIN;
+
+ lineptr_val = mfd->panel_info->te.wr_ptr_irq;
+
+ ret = scnprintf(buf, PAGE_SIZE, "%d\n", lineptr_val);
+
+ return ret;
+}
+
+static ssize_t mdss_mdp_lineptr_set_value(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct fb_info *fbi = dev_get_drvdata(dev);
+ struct msm_fb_data_type *mfd = fbi->par;
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ int ret, lineptr_value;
+
+ ret = kstrtoint(buf, 10, &lineptr_value);
+ if (ret) {
+ pr_err("Invalid input for ad\n");
+ return -EINVAL;
+ }
+
+ if (!mdp5_data->ctl ||
+ (!mdp5_data->ctl->panel_data->panel_info.cont_splash_enabled
+ && !mdss_mdp_ctl_is_power_on(mdp5_data->ctl)))
+ return -EAGAIN;
+
+ if (!mdss_mdp_is_lineptr_supported(mdp5_data->ctl)) {
+ pr_err("lineptr not supported\n");
+ return -ENOTSUPP;
+ }
+
+ /* the new lineptr value will take effect in the next kickoff */
+ mfd->panel_info->te.wr_ptr_irq = lineptr_value;
+
+ return count;
+}
+
static ssize_t mdss_mdp_bl_show_event(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -3224,6 +3325,9 @@ static DEVICE_ATTR(msm_misr_en, S_IRUGO | S_IWUSR,
static DEVICE_ATTR(msm_cmd_autorefresh_en, S_IRUGO | S_IWUSR,
mdss_mdp_cmd_autorefresh_show, mdss_mdp_cmd_autorefresh_store);
static DEVICE_ATTR(vsync_event, S_IRUGO, mdss_mdp_vsync_show_event, NULL);
+static DEVICE_ATTR(lineptr_event, S_IRUGO, mdss_mdp_lineptr_show_event, NULL);
+static DEVICE_ATTR(lineptr_value, S_IRUGO | S_IWUSR | S_IWGRP,
+ mdss_mdp_lineptr_show_value, mdss_mdp_lineptr_set_value);
static DEVICE_ATTR(ad, S_IRUGO | S_IWUSR | S_IWGRP, mdss_mdp_ad_show,
mdss_mdp_ad_store);
static DEVICE_ATTR(dyn_pu, S_IRUGO | S_IWUSR | S_IWGRP, mdss_mdp_dyn_pu_show,
@@ -3235,6 +3339,8 @@ static DEVICE_ATTR(ad_bl_event, S_IRUGO, mdss_mdp_ad_bl_show_event, NULL);
static struct attribute *mdp_overlay_sysfs_attrs[] = {
&dev_attr_vsync_event.attr,
+ &dev_attr_lineptr_event.attr,
+ &dev_attr_lineptr_value.attr,
&dev_attr_ad.attr,
&dev_attr_dyn_pu.attr,
&dev_attr_msm_misr_en.attr,
@@ -4597,6 +4703,9 @@ static struct mdss_mdp_ctl *__mdss_mdp_overlay_ctl_init(
mdss_mdp_recover_underrun_handler;
ctl->recover_underrun_handler.cmd_post_flush = false;
+ ctl->lineptr_handler.lineptr_handler =
+ mdss_mdp_overlay_handle_lineptr;
+
INIT_WORK(&ctl->remove_underrun_handler,
remove_underrun_vsync_handler);
@@ -4699,7 +4808,8 @@ static int mdss_mdp_overlay_on(struct msm_fb_data_type *mfd)
goto panel_on;
if (!mfd->panel_info->cont_splash_enabled &&
- (mfd->panel_info->type != DTV_PANEL)) {
+ (mfd->panel_info->type != DTV_PANEL) &&
+ !mfd->panel_info->is_pluggable) {
rc = mdss_mdp_overlay_start(mfd);
if (rc)
goto end;
@@ -5360,6 +5470,14 @@ int mdss_mdp_overlay_init(struct msm_fb_data_type *mfd)
goto init_fail;
}
+ mdp5_data->lineptr_event_sd = sysfs_get_dirent(dev->kobj.sd,
+ "lineptr_event");
+ if (!mdp5_data->lineptr_event_sd) {
+ pr_err("lineptr_event sysfs lookup failed\n");
+ rc = -ENODEV;
+ goto init_fail;
+ }
+
mdp5_data->hist_event_sd = sysfs_get_dirent(dev->kobj.sd,
"hist_event");
if (!mdp5_data->hist_event_sd) {
diff --git a/drivers/video/fbdev/msm/mdss_mdp_pipe.c b/drivers/video/fbdev/msm/mdss_mdp_pipe.c
index dcf815f74d5d..44eded98e785 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_pipe.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_pipe.c
@@ -1394,46 +1394,33 @@ static void mdss_mdp_pipe_free(struct kref *kref)
static bool mdss_mdp_check_pipe_in_use(struct mdss_mdp_pipe *pipe)
{
int i;
- u32 mixercfg, mixercfg_extn, stage_off_mask, stage_off_extn_mask;
- u32 stage = BIT(0) | BIT(1) | BIT(2);
bool in_use = false;
struct mdss_data_type *mdata = mdss_mdp_get_mdata();
struct mdss_mdp_ctl *ctl;
struct mdss_mdp_mixer *mixer;
- stage_off_mask = mdss_mdp_get_mixer_mask(pipe->num, stage);
- stage_off_extn_mask = mdss_mdp_get_mixer_extn_mask(pipe->num, stage);
-
for (i = 0; i < mdata->nctl; i++) {
ctl = mdata->ctl_off + i;
if (!ctl || !ctl->ref_cnt)
continue;
mixer = ctl->mixer_left;
- if (mixer && mixer->rotator_mode)
+ if (!mixer || mixer->rotator_mode)
continue;
- mixercfg = mdss_mdp_get_mixercfg(mixer, false);
- mixercfg_extn = mdss_mdp_get_mixercfg(mixer, true);
- if ((mixercfg & stage_off_mask) ||
- (mixercfg_extn & stage_off_extn_mask)) {
- pr_err("IN USE: mixer=%d pipe=%d mcfg:0x%x mask:0x%x mcfg_extn:0x%x mask_ext:0x%x\n",
- mixer->num, pipe->num,
- mixercfg, stage_off_mask,
- mixercfg_extn, stage_off_extn_mask);
+ if (mdss_mdp_mixer_reg_has_pipe(mixer, pipe)) {
+ in_use = true;
+ pr_err("IN USE: pipe=%d mixer=%d\n",
+ pipe->num, mixer->num);
MDSS_XLOG_TOUT_HANDLER("mdp", "vbif", "vbif_nrt",
"dbg_bus", "vbif_dbg_bus", "panic");
}
mixer = ctl->mixer_right;
- mixercfg = mdss_mdp_get_mixercfg(mixer, false);
- mixercfg_extn = mdss_mdp_get_mixercfg(mixer, true);
- if ((mixercfg & stage_off_mask) ||
- (mixercfg_extn & stage_off_extn_mask)) {
- pr_err("IN USE: mixer=%d pipe=%d mcfg:0x%x mask:0x%x mcfg_extn:0x%x mask_ext:0x%x\n",
- mixer->num, pipe->num,
- mixercfg, stage_off_mask,
- mixercfg_extn, stage_off_extn_mask);
+ if (mixer && mdss_mdp_mixer_reg_has_pipe(mixer, pipe)) {
+ in_use = true;
+ pr_err("IN USE: pipe=%d mixer=%d\n",
+ pipe->num, mixer->num);
MDSS_XLOG_TOUT_HANDLER("mdp", "vbif", "vbif_nrt",
"dbg_bus", "vbif_dbg_bus", "panic");
}
@@ -2035,7 +2022,7 @@ static int mdss_mdp_format_setup(struct mdss_mdp_pipe *pipe)
int mdss_mdp_pipe_addr_setup(struct mdss_data_type *mdata,
struct mdss_mdp_pipe *head, u32 *offsets, u32 *ftch_id, u32 *xin_id,
- u32 type, u32 num_base, u32 len, u8 priority_base)
+ u32 type, const int *pnums, u32 len, u8 priority_base)
{
u32 i;
@@ -2048,8 +2035,8 @@ int mdss_mdp_pipe_addr_setup(struct mdss_data_type *mdata,
head[i].type = type;
head[i].ftch_id = ftch_id[i];
head[i].xin_id = xin_id[i];
- head[i].num = i + num_base;
- head[i].ndx = BIT(i + num_base);
+ head[i].num = pnums[i];
+ head[i].ndx = BIT(pnums[i]);
head[i].priority = i + priority_base;
head[i].base = mdata->mdss_io.base + offsets[i];
pr_info("type:%d ftchid:%d xinid:%d num:%d ndx:0x%x prio:%d\n",
@@ -2152,6 +2139,7 @@ static void mdss_mdp_set_ot_limit_pipe(struct mdss_mdp_pipe *pipe)
ot_params.is_rot = pipe->mixer_left->rotator_mode;
ot_params.is_wb = ctl->intf_num == MDSS_MDP_NO_INTF;
ot_params.is_yuv = pipe->src_fmt->is_yuv;
+ ot_params.frame_rate = pipe->frame_rate;
/* rotator read uses nrt vbif */
if (mdss_mdp_is_nrt_vbif_base_defined(ctl->mdata) &&
diff --git a/drivers/video/fbdev/msm/mdss_mdp_pp.c b/drivers/video/fbdev/msm/mdss_mdp_pp.c
index f74e46dccdb9..6a4e31038d98 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_pp.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_pp.c
@@ -407,7 +407,8 @@ static struct mdss_pp_res_type *mdss_pp_res;
static u32 pp_hist_read(char __iomem *v_addr,
struct pp_hist_col_info *hist_info);
-static int pp_hist_setup(u32 *op, u32 block, struct mdss_mdp_mixer *mix);
+static int pp_hist_setup(u32 *op, u32 block, struct mdss_mdp_mixer *mix,
+ struct pp_sts_type *pp_sts);
static int pp_hist_disable(struct pp_hist_col_info *hist_info);
static void pp_update_pcc_regs(char __iomem *addr,
struct mdp_pcc_cfg_data *cfg_ptr);
@@ -506,6 +507,8 @@ static inline int pp_validate_dspp_mfd_block(struct msm_fb_data_type *mfd,
static int pp_mfd_release_all(struct msm_fb_data_type *mfd);
static int pp_mfd_ad_release_all(struct msm_fb_data_type *mfd);
static int mdss_mdp_ad_ipc_reset(struct msm_fb_data_type *mfd);
+static int pp_get_driver_ops(struct mdp_pp_driver_ops *ops);
+
static u32 last_sts, last_state;
static inline void mdss_mdp_pp_get_dcm_state(struct mdss_mdp_pipe *pipe,
@@ -529,6 +532,26 @@ inline int linear_map(int in, int *out, int in_max, int out_max)
}
+/**
+ * __get_hist_pipe() - get a pipe only if histogram is supported on it
+ * @pnum: pipe number desired
+ *
+ * returns the pipe with id only if the pipe supports sspp histogram
+ */
+static inline struct mdss_mdp_pipe *__get_hist_pipe(int pnum)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ enum mdss_mdp_pipe_type ptype;
+
+ ptype = get_pipe_type_from_num(pnum);
+
+ /* only VIG pipes support histogram */
+ if (ptype != MDSS_MDP_PIPE_TYPE_VIG)
+ return NULL;
+
+ return mdss_mdp_pipe_get(mdata, BIT(pnum));
+}
+
int mdss_mdp_csc_setup_data(u32 block, u32 blk_idx, struct mdp_csc_cfg *data)
{
int i, ret = 0;
@@ -1026,7 +1049,8 @@ static int pp_vig_pipe_setup(struct mdss_mdp_pipe *pipe, u32 *op)
}
/* Histogram collection enabled checked inside pp_hist_setup */
- pp_hist_setup(op, MDSS_PP_SSPP_CFG | pipe->num, pipe->mixer_left);
+ pp_hist_setup(op, MDSS_PP_SSPP_CFG | pipe->num, pipe->mixer_left,
+ &pipe->pp_res.pp_sts);
if (!(pipe->flags & MDP_OVERLAY_PP_CFG_EN)) {
pr_debug("Overlay PP CFG enable not set\n");
@@ -1797,6 +1821,7 @@ int mdss_mdp_pipe_sspp_setup(struct mdss_mdp_pipe *pipe, u32 *op)
struct mdss_data_type *mdata = mdss_mdp_get_mdata();
u32 current_opmode, location;
u32 dcm_state = DCM_UNINIT;
+ struct mdss_mdp_pipe *pipe_list;
if (pipe == NULL)
return -EINVAL;
@@ -1820,53 +1845,19 @@ int mdss_mdp_pipe_sspp_setup(struct mdss_mdp_pipe *pipe, u32 *op)
case MDSS_MDP_PIPE_TYPE_VIG:
pipe_base = mdata->mdp_base + MDSS_MDP_REG_IGC_VIG_BASE;
pipe_cnt = mdata->nvig_pipes;
+ pipe_list = mdata->vig_pipes;
location = SSPP_VIG;
- switch (pipe->num) {
- case MDSS_MDP_SSPP_VIG0:
- pipe_num = 0;
- break;
- case MDSS_MDP_SSPP_VIG1:
- pipe_num = 1;
- break;
- case MDSS_MDP_SSPP_VIG2:
- pipe_num = 2;
- break;
- case MDSS_MDP_SSPP_VIG3:
- pipe_num = 3;
- break;
- default:
- pr_err("Invalid pipe num %d pipe type %d\n",
- pipe->num, pipe->type);
- return -EINVAL;
- }
break;
case MDSS_MDP_PIPE_TYPE_RGB:
pipe_base = mdata->mdp_base + MDSS_MDP_REG_IGC_RGB_BASE;
pipe_cnt = mdata->nrgb_pipes;
+ pipe_list = mdata->rgb_pipes;
location = SSPP_RGB;
- switch (pipe->num) {
- case MDSS_MDP_SSPP_RGB0:
- pipe_num = 0;
- break;
- case MDSS_MDP_SSPP_RGB1:
- pipe_num = 1;
- break;
- case MDSS_MDP_SSPP_RGB2:
- pipe_num = 2;
- break;
- case MDSS_MDP_SSPP_RGB3:
- pipe_num = 3;
- break;
- default:
- pr_err("Invalid pipe num %d pipe type %d\n",
- pipe->num, pipe->type);
- return -EINVAL;
- }
break;
case MDSS_MDP_PIPE_TYPE_DMA:
pipe_base = mdata->mdp_base + MDSS_MDP_REG_IGC_DMA_BASE;
- pipe_num = pipe->num - MDSS_MDP_SSPP_DMA0;
pipe_cnt = mdata->ndma_pipes;
+ pipe_list = mdata->dma_pipes;
location = SSPP_DMA;
break;
case MDSS_MDP_PIPE_TYPE_CURSOR:
@@ -1877,6 +1868,17 @@ int mdss_mdp_pipe_sspp_setup(struct mdss_mdp_pipe *pipe, u32 *op)
return -EINVAL;
}
+ for (pipe_num = 0; pipe_num < pipe_cnt; pipe_num++) {
+ if (pipe == (pipe_list + pipe_num))
+ break;
+ }
+
+ if (pipe_num == pipe_cnt) {
+ pr_err("Invalid pipe num %d pipe type %d\n",
+ pipe->num, pipe->type);
+ return -EINVAL;
+ }
+
if (pipe->pp_cfg.config_ops & MDP_OVERLAY_PP_IGC_CFG) {
flags |= PP_FLAGS_DIRTY_IGC;
if (!pp_ops[IGC].pp_set_config) {
@@ -2002,11 +2004,12 @@ static char __iomem *mdss_mdp_get_dspp_addr_off(u32 dspp_num)
}
/* Assumes that function will be called from within clock enabled space*/
-static int pp_hist_setup(u32 *op, u32 block, struct mdss_mdp_mixer *mix)
+static int pp_hist_setup(u32 *op, u32 block, struct mdss_mdp_mixer *mix,
+ struct pp_sts_type *pp_sts)
{
- int ret = -EINVAL;
+ int ret = 0;
char __iomem *base;
- u32 op_flags;
+ u32 op_flags = 0, block_type = 0;
struct mdss_mdp_pipe *pipe;
struct pp_hist_col_info *hist_info;
unsigned long flag;
@@ -2019,6 +2022,7 @@ static int pp_hist_setup(u32 *op, u32 block, struct mdss_mdp_mixer *mix)
intr_mask = 1;
if (mix && (PP_LOCAT(block) == MDSS_PP_DSPP_CFG)) {
/* HIST_EN */
+ block_type = DSPP;
op_flags = BIT(16);
hist_info = &mdss_pp_res->dspp_hist[mix->num];
base = mdss_mdp_get_dspp_addr_off(PP_BLOCK(block));
@@ -2027,11 +2031,13 @@ static int pp_hist_setup(u32 *op, u32 block, struct mdss_mdp_mixer *mix)
goto error;
}
} else if (PP_LOCAT(block) == MDSS_PP_SSPP_CFG &&
+ (pp_driver_ops.is_sspp_hist_supp) &&
(pp_driver_ops.is_sspp_hist_supp())) {
- pipe = mdss_mdp_pipe_get(mdata, BIT(PP_BLOCK(block)));
+ block_type = SSPP_VIG;
+ pipe = __get_hist_pipe(PP_BLOCK(block));
if (IS_ERR_OR_NULL(pipe)) {
pr_debug("pipe DNE (%d)\n",
- (u32) BIT(PP_BLOCK(block)));
+ (u32) PP_BLOCK(block));
ret = -ENODEV;
goto error;
}
@@ -2040,18 +2046,38 @@ static int pp_hist_setup(u32 *op, u32 block, struct mdss_mdp_mixer *mix)
base = pipe->base;
mdss_mdp_pipe_unmap(pipe);
} else {
+ ret = -EINVAL;
goto error;
}
mutex_lock(&hist_info->hist_mutex);
spin_lock_irqsave(&hist_info->hist_lock, flag);
- if (hist_info->col_en) {
+ /*
+ * Set histogram interrupt if histogram collection is enabled. The
+ * interrupt register offsets are the same across different mdss
+ * versions so far, hence mdss_mdp_hist_irq_set_mask is used for
+ * all the mdss versions.
+ */
+ if (hist_info->col_en)
mdss_mdp_hist_irq_set_mask(intr_mask << hist_info->intr_shift);
+ /*
+ * Starting from msmcobalt, the histogram enable bit has been moved
+ * from DSPP opmode register to PA_HIST opmode register, hence we need
+ * to update the histogram enable bit differently based on mdss version.
+ * If HIST pp_set_config is defined, we will enable or disable the
+ * hist_en bit in PA_HIST opmode register inside HIST pp_set_config
+ * function; else, we only need to add the hist_en bit to the *op when
+ * histogram collection is enable, and *op will be passed to
+ * pp_dspp_setup to update the DSPP opmode register.
+ */
+ if (pp_ops[HIST].pp_set_config)
+ ret = pp_ops[HIST].pp_set_config(base, pp_sts, hist_info,
+ block_type);
+ else if (hist_info->col_en)
*op |= op_flags;
- }
+
spin_unlock_irqrestore(&hist_info->hist_lock, flag);
mutex_unlock(&hist_info->hist_mutex);
- ret = 0;
error:
return ret;
}
@@ -2159,7 +2185,7 @@ static int pp_dspp_setup(u32 disp_num, struct mdss_mdp_mixer *mixer)
u32 ad_flags, flags, dspp_num, opmode = 0, ad_bypass;
struct mdp_pgc_lut_data *pgc_config;
struct pp_sts_type *pp_sts;
- char __iomem *base, *addr;
+ char __iomem *base, *addr = NULL;
int ret = 0;
struct mdss_data_type *mdata;
struct mdss_ad_info *ad = NULL;
@@ -2194,14 +2220,20 @@ static int pp_dspp_setup(u32 disp_num, struct mdss_mdp_mixer *mixer)
(pp_driver_ops.gamut_clk_gate_en))
pp_driver_ops.gamut_clk_gate_en(base +
mdata->pp_block_off.dspp_gamut_off);
- ret = pp_hist_setup(&opmode, MDSS_PP_DSPP_CFG | dspp_num, mixer);
- if (ret)
- goto dspp_exit;
- if (disp_num < MDSS_BLOCK_DISP_NUM)
+ if (disp_num < MDSS_BLOCK_DISP_NUM) {
+ pp_sts = &mdss_pp_res->pp_disp_sts[disp_num];
+ pp_sts->side_sts = side;
+
+ ret = pp_hist_setup(&opmode, MDSS_PP_DSPP_CFG | dspp_num, mixer,
+ pp_sts);
+ if (ret)
+ goto dspp_exit;
+
flags = mdss_pp_res->pp_disp_flags[disp_num];
- else
+ } else {
flags = 0;
+ }
mixer_cnt = mdss_mdp_get_ctl_mixers(disp_num, mixer_id);
if (dspp_num < mdata->nad_cfgs && disp_num < mdata->nad_cfgs &&
@@ -2217,9 +2249,6 @@ static int pp_dspp_setup(u32 disp_num, struct mdss_mdp_mixer *mixer)
if ((!flags) && (!(opmode)) && (!ad_flags))
goto dspp_exit;
- pp_sts = &mdss_pp_res->pp_disp_sts[disp_num];
- pp_sts->side_sts = side;
-
if (flags & PP_FLAGS_DIRTY_PA) {
if (!pp_ops[PA].pp_set_config) {
if (mdata->mdp_rev >= MDSS_MDP_HW_REV_103) {
@@ -2471,7 +2500,7 @@ int mdss_mdp_pp_setup_locked(struct mdss_mdp_ctl *ctl)
IS_SIX_ZONE_DIRTY(flags, pa_v2_flags)));
if (mdata->pp_reg_bus_clt && max_bw_needed) {
ret = mdss_update_reg_bus_vote(mdata->pp_reg_bus_clt,
- VOTE_INDEX_80_MHZ);
+ VOTE_INDEX_HIGH);
if (ret)
pr_err("Updated reg_bus_scale failed, ret = %d", ret);
}
@@ -2769,8 +2798,7 @@ int mdss_mdp_pp_init(struct device *dev)
int i, ret = 0;
struct mdss_data_type *mdata = mdss_mdp_get_mdata();
struct mdss_mdp_pipe *vig;
- struct pp_hist_col_info *hist;
- void *ret_ptr = NULL;
+ struct pp_hist_col_info *hist = NULL;
u32 ctl_off = 0;
if (!mdata)
@@ -2778,7 +2806,7 @@ int mdss_mdp_pp_init(struct device *dev)
mdata->pp_reg_bus_clt = mdss_reg_bus_vote_client_create("pp\0");
- if (IS_ERR_OR_NULL(mdata->pp_reg_bus_clt))
+ if (IS_ERR(mdata->pp_reg_bus_clt))
pr_err("bus client register failed\n");
mutex_lock(&mdss_pp_mutex);
@@ -2792,17 +2820,13 @@ int mdss_mdp_pp_init(struct device *dev)
if (mdss_mdp_pp_dt_parse(dev))
pr_info("No PP info in device tree\n");
- ret_ptr = pp_get_driver_ops(&pp_driver_ops);
- if (IS_ERR(ret_ptr)) {
+ ret = pp_get_driver_ops(&pp_driver_ops);
+ if (ret) {
pr_err("pp_get_driver_ops failed, ret=%d\n",
- (int) PTR_ERR(ret_ptr));
- ret = PTR_ERR(ret_ptr);
+ ret);
goto pp_exit;
- } else {
- mdss_pp_res->pp_data_res = ret_ptr;
- pp_ops = pp_driver_ops.pp_ops;
}
-
+ pp_ops = pp_driver_ops.pp_ops;
hist = devm_kzalloc(dev,
sizeof(struct pp_hist_col_info) *
mdata->ndspp,
@@ -2851,7 +2875,7 @@ int mdss_mdp_pp_init(struct device *dev)
vig[i].pp_res.hist.intr_shift = 10;
if (pp_driver_ops.get_hist_offset) {
ret = pp_driver_ops.get_hist_offset(
- DSPP, &ctl_off);
+ SSPP_VIG, &ctl_off);
if (ret) {
pr_err("get_hist_offset ret %d\n",
ret);
@@ -4646,16 +4670,9 @@ int mdss_mdp_hist_start(struct mdp_histogram_start_req *req)
for (i = 0; i < MDSS_PP_ARG_NUM; i++) {
if (!PP_ARG(i, req->block))
continue;
- pipe = mdss_mdp_pipe_get(mdata, BIT(i));
+ pipe = __get_hist_pipe(i);
if (IS_ERR_OR_NULL(pipe))
continue;
- if ((pipe->num > MDSS_MDP_SSPP_VIG2) &&
- (pipe->num != MDSS_MDP_SSPP_VIG3)) {
- ret = -EINVAL;
- pr_warn("Invalid Hist pipe (%d)\n", i);
- mdss_mdp_pipe_unmap(pipe);
- goto hist_stop_clk;
- }
hist_info = &pipe->pp_res.hist;
ret = pp_hist_enable(hist_info, req, NULL);
intr_mask = 1 << hist_info->intr_shift;
@@ -4780,16 +4797,10 @@ int mdss_mdp_hist_stop(u32 block)
for (i = 0; i < MDSS_PP_ARG_NUM; i++) {
if (!PP_ARG(i, block))
continue;
- pipe = mdss_mdp_pipe_get(mdata, BIT(i));
+ pipe = __get_hist_pipe(i);
if (IS_ERR_OR_NULL(pipe)) {
pr_warn("Invalid Hist pipe (%d)\n", i);
continue;
- } else if ((pipe->num > MDSS_MDP_SSPP_VIG2) &&
- (pipe->num != MDSS_MDP_SSPP_VIG3)) {
- mdss_mdp_pipe_unmap(pipe);
- pr_warn("Invalid Hist pipe (%d) pipe->num (%d)\n",
- i, pipe->num);
- continue;
}
hist_info = &pipe->pp_res.hist;
ret = pp_hist_disable(hist_info);
@@ -4820,7 +4831,7 @@ hist_stop_clk:
}
/**
- * mdss_mdp_hist_intr_req() - Request changes the histogram interupts
+ * mdss_mdp_hist_intr_req() - Request changes the histogram interrupts
* @intr: structure containting state of interrupt register
* @bits: the bits on interrupt register that should be changed
* @en: true if bits should be set, false if bits should be cleared
@@ -5142,7 +5153,7 @@ int mdss_mdp_hist_collect(struct mdp_histogram_data *hist)
}
}
- pipe = mdss_mdp_pipe_get(mdata, BIT(pipe_num));
+ pipe = __get_hist_pipe(pipe_num);
if (IS_ERR_OR_NULL(pipe)) {
pr_warn("Invalid starting hist pipe, %d\n", pipe_num);
ret = -ENODEV;
@@ -5154,15 +5165,10 @@ int mdss_mdp_hist_collect(struct mdp_histogram_data *hist)
if (!PP_ARG(i, hist->block))
continue;
pipe_cnt++;
- pipe = mdss_mdp_pipe_get(mdata, BIT(i));
+ pipe = __get_hist_pipe(i);
if (IS_ERR_OR_NULL(pipe)) {
pr_warn("Invalid Hist pipe (%d)\n", i);
continue;
- } else if ((pipe->num > MDSS_MDP_SSPP_VIG2) &&
- (pipe->num != MDSS_MDP_SSPP_VIG3)) {
- mdss_mdp_pipe_unmap(pipe);
- pr_warn("Invalid Hist pipe (%d)\n", i);
- continue;
}
hist_info = &pipe->pp_res.hist;
mdss_mdp_pipe_unmap(pipe);
@@ -5171,15 +5177,10 @@ int mdss_mdp_hist_collect(struct mdp_histogram_data *hist)
if (!PP_ARG(i, hist->block))
continue;
pipe_cnt++;
- pipe = mdss_mdp_pipe_get(mdata, BIT(i));
+ pipe = __get_hist_pipe(i);
if (IS_ERR_OR_NULL(pipe)) {
pr_warn("Invalid Hist pipe (%d)\n", i);
continue;
- } else if ((pipe->num > MDSS_MDP_SSPP_VIG2) &&
- (pipe->num != MDSS_MDP_SSPP_VIG3)) {
- mdss_mdp_pipe_unmap(pipe);
- pr_warn("Invalid Hist pipe (%d)\n", i);
- continue;
}
hist_info = &pipe->pp_res.hist;
ctl_base = pipe->base;
@@ -5197,15 +5198,10 @@ int mdss_mdp_hist_collect(struct mdp_histogram_data *hist)
if (!PP_ARG(i, hist->block))
continue;
pipe_cnt++;
- pipe = mdss_mdp_pipe_get(mdata, BIT(i));
+ pipe = __get_hist_pipe(i);
if (IS_ERR_OR_NULL(pipe)) {
pr_warn("Invalid Hist pipe (%d)\n", i);
continue;
- } else if ((pipe->num > MDSS_MDP_SSPP_VIG2) &&
- (pipe->num != MDSS_MDP_SSPP_VIG3)) {
- mdss_mdp_pipe_unmap(pipe);
- pr_warn("Invalid Hist pipe (%d)\n", i);
- continue;
}
hist_info = &pipe->pp_res.hist;
mdss_mdp_pipe_unmap(pipe);
@@ -5233,7 +5229,7 @@ int mdss_mdp_hist_collect(struct mdp_histogram_data *hist)
for (i = pipe_num; i < MDSS_PP_ARG_NUM; i++) {
if (!PP_ARG(i, hist->block))
continue;
- pipe = mdss_mdp_pipe_get(mdata, BIT(i));
+ pipe = __get_hist_pipe(i);
if (IS_ERR_OR_NULL(pipe)) {
pr_warn("Invalid Hist pipe (%d)\n", i);
continue;
@@ -7304,3 +7300,43 @@ static inline int pp_validate_dspp_mfd_block(struct msm_fb_data_type *mfd,
return 0;
}
+
+static int pp_get_driver_ops(struct mdp_pp_driver_ops *ops)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ int ret = 0;
+ void *pp_cfg = NULL;
+
+ switch (mdata->mdp_rev) {
+ case MDSS_MDP_HW_REV_107:
+ case MDSS_MDP_HW_REV_107_1:
+ case MDSS_MDP_HW_REV_107_2:
+ case MDSS_MDP_HW_REV_114:
+ case MDSS_MDP_HW_REV_115:
+ case MDSS_MDP_HW_REV_116:
+ pp_cfg = pp_get_driver_ops_v1_7(ops);
+ if (IS_ERR_OR_NULL(pp_cfg))
+ ret = -EINVAL;
+ else
+ mdss_pp_res->pp_data_v1_7 = pp_cfg;
+ break;
+ case MDSS_MDP_HW_REV_300:
+ case MDSS_MDP_HW_REV_301:
+ pp_cfg = pp_get_driver_ops_v3(ops);
+ if (IS_ERR_OR_NULL(pp_cfg)) {
+ ret = -EINVAL;
+ } else {
+ mdss_pp_res->pp_data_v1_7 = pp_cfg;
+ /* Currently all caching data is used from v17 for V3
+ * hence setting the pointer to NULL. Will be used if we
+ * have to add any caching specific to V3.
+ */
+ mdss_pp_res->pp_data_v3 = NULL;
+ }
+ break;
+ default:
+ memset(ops, 0, sizeof(struct mdp_pp_driver_ops));
+ break;
+ }
+ return ret;
+}
diff --git a/drivers/video/fbdev/msm/mdss_mdp_pp.h b/drivers/video/fbdev/msm/mdss_mdp_pp.h
index acdebb7d43cf..31ddc7d49705 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_pp.h
+++ b/drivers/video/fbdev/msm/mdss_mdp_pp.h
@@ -158,14 +158,16 @@ struct mdss_pp_res_type {
/* physical info */
struct pp_hist_col_info *dspp_hist;
/*
- * The pp_data_res will be a pointer to newer MDP revisions of the
+ * The pp_data_v1_7 will be a pointer to newer MDP revisions of the
* pp_res, which will hold the cfg_payloads of each feature in a single
* struct.
*/
- void *pp_data_res;
+ void *pp_data_v1_7;
+ void *pp_data_v3;
};
-void *pp_get_driver_ops(struct mdp_pp_driver_ops *ops);
+void *pp_get_driver_ops_v1_7(struct mdp_pp_driver_ops *ops);
+void *pp_get_driver_ops_v3(struct mdp_pp_driver_ops *ops);
static inline void pp_sts_set_split_bits(u32 *sts, u32 bits)
{
diff --git a/drivers/video/fbdev/msm/mdss_mdp_pp_cache_config.c b/drivers/video/fbdev/msm/mdss_mdp_pp_cache_config.c
index 7769a8fbf644..d1b3f1a89812 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_pp_cache_config.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_pp_cache_config.c
@@ -112,12 +112,12 @@ static int pp_hist_lut_cache_params_v1_7(struct mdp_hist_lut_data *config,
pr_err("invalid config block %d\n", config->block);
return -EINVAL;
}
- if (!mdss_pp_res->pp_data_res) {
- pr_err("invalid pp_data_res %p\n", mdss_pp_res->pp_data_res);
+ if (!mdss_pp_res->pp_data_v1_7) {
+ pr_err("invalid pp_data_v1_7 %p\n", mdss_pp_res->pp_data_v1_7);
return -EINVAL;
}
- res_cache = mdss_pp_res->pp_data_res;
+ res_cache = mdss_pp_res->pp_data_v1_7;
if (config->ops & MDP_PP_OPS_READ) {
pr_err("read op is not supported\n");
return -EINVAL;
@@ -295,12 +295,12 @@ int pp_dither_cache_params_v1_7(struct mdp_dither_cfg_data *config,
pr_err("invalid config block %d\n", config->block);
return -EINVAL;
}
- if (!mdss_pp_res->pp_data_res) {
- pr_err("invalid pp_data_res %p\n", mdss_pp_res->pp_data_res);
+ if (!mdss_pp_res->pp_data_v1_7) {
+ pr_err("invalid pp_data_v1_7 %p\n", mdss_pp_res->pp_data_v1_7);
return -EINVAL;
}
- res_cache = mdss_pp_res->pp_data_res;
+ res_cache = mdss_pp_res->pp_data_v1_7;
if ((config->flags & MDSS_PP_SPLIT_MASK) == MDSS_PP_SPLIT_MASK) {
pr_warn("Can't set both split bits\n");
@@ -397,11 +397,11 @@ static int pp_gamut_cache_params_v1_7(struct mdp_gamut_cfg_data *config,
pr_err("invalid config block %d\n", config->block);
return -EINVAL;
}
- if (!mdss_pp_res->pp_data_res) {
- pr_err("invalid pp_data_res %p\n", mdss_pp_res->pp_data_res);
+ if (!mdss_pp_res->pp_data_v1_7) {
+ pr_err("invalid pp_data_v1_7 %p\n", mdss_pp_res->pp_data_v1_7);
return -EINVAL;
}
- res_cache = mdss_pp_res->pp_data_res;
+ res_cache = mdss_pp_res->pp_data_v1_7;
if (config->flags & MDP_PP_OPS_READ) {
pr_err("read op is not supported\n");
return -EINVAL;
@@ -646,12 +646,12 @@ static int pp_pcc_cache_params_v1_7(struct mdp_pcc_cfg_data *config,
pr_err("invalid config block %d\n", config->block);
return -EINVAL;
}
- if (!mdss_pp_res->pp_data_res) {
- pr_err("invalid pp_data_res %p\n", mdss_pp_res->pp_data_res);
+ if (!mdss_pp_res->pp_data_v1_7) {
+ pr_err("invalid pp_data_v1_7 %p\n", mdss_pp_res->pp_data_v1_7);
return -EINVAL;
}
- res_cache = mdss_pp_res->pp_data_res;
+ res_cache = mdss_pp_res->pp_data_v1_7;
if (config->ops & MDP_PP_OPS_READ) {
pr_err("read op is not supported\n");
return -EINVAL;
@@ -744,11 +744,11 @@ static int pp_igc_lut_cache_params_v1_7(struct mdp_igc_lut_data *config,
pr_err("invalid config block %d\n", config->block);
return -EINVAL;
}
- if (!mdss_pp_res->pp_data_res) {
- pr_err("invalid pp_data_res %p\n", mdss_pp_res->pp_data_res);
+ if (!mdss_pp_res->pp_data_v1_7) {
+ pr_err("invalid pp_data_v1_7 %p\n", mdss_pp_res->pp_data_v1_7);
return -EINVAL;
}
- res_cache = mdss_pp_res->pp_data_res;
+ res_cache = mdss_pp_res->pp_data_v1_7;
if (config->ops & MDP_PP_OPS_READ) {
pr_err("read op is not supported\n");
return -EINVAL;
@@ -1019,7 +1019,7 @@ static int pp_pgc_lut_cache_params_v1_7(struct mdp_pgc_lut_data *config,
pr_err("invalid disp_num %d\n", disp_num);
return -EINVAL;
}
- res_cache = mdss_pp_res->pp_data_res;
+ res_cache = mdss_pp_res->pp_data_v1_7;
if (!res_cache) {
pr_err("invalid resource payload\n");
return -EINVAL;
@@ -1139,12 +1139,12 @@ static int pp_pa_cache_params_v1_7(struct mdp_pa_v2_cfg_data *config,
return -EINVAL;
}
- if (!mdss_pp_res->pp_data_res) {
- pr_err("Invalid pp_data_res %p\n", mdss_pp_res->pp_data_res);
+ if (!mdss_pp_res->pp_data_v1_7) {
+ pr_err("Invalid pp_data_v1_7 %p\n", mdss_pp_res->pp_data_v1_7);
return -EINVAL;
}
- res_cache = mdss_pp_res->pp_data_res;
+ res_cache = mdss_pp_res->pp_data_v1_7;
if (config->flags & MDP_PP_OPS_READ) {
pr_err("Read op is not supported\n");
return -EINVAL;
diff --git a/drivers/video/fbdev/msm/mdss_mdp_pp_common.c b/drivers/video/fbdev/msm/mdss_mdp_pp_common.c
new file mode 100644
index 000000000000..7742b5e4ad0c
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_mdp_pp_common.c
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+#include "mdss_mdp_pp_common.h"
+
+void pp_pa_set_sts(struct pp_sts_type *pp_sts,
+ struct mdp_pa_data_v1_7 *pa_data,
+ int enable_flag, int block_type)
+{
+ if (!pp_sts) {
+ pr_err("invalid input pp_sts %p\n", pp_sts);
+ return;
+ }
+
+ pp_sts->pa_sts = 0;
+
+ if (enable_flag & MDP_PP_OPS_DISABLE) {
+ pp_sts->pa_sts &= ~PP_STS_ENABLE;
+ return;
+ } else if (enable_flag & MDP_PP_OPS_ENABLE) {
+ pp_sts->pa_sts |= PP_STS_ENABLE;
+ }
+
+ if (!pa_data) {
+ pr_err("invalid input pa_data %p\n", pa_data);
+ return;
+ }
+
+ /* Global HSV STS update */
+ if (pa_data->mode & MDP_PP_PA_HUE_MASK)
+ pp_sts->pa_sts |= PP_STS_PA_HUE_MASK;
+ if (pa_data->mode & MDP_PP_PA_SAT_MASK)
+ pp_sts->pa_sts |= PP_STS_PA_SAT_MASK;
+ if (pa_data->mode & MDP_PP_PA_VAL_MASK)
+ pp_sts->pa_sts |= PP_STS_PA_VAL_MASK;
+ if (pa_data->mode & MDP_PP_PA_CONT_MASK)
+ pp_sts->pa_sts |= PP_STS_PA_CONT_MASK;
+ if (pa_data->mode & MDP_PP_PA_SAT_ZERO_EXP_EN)
+ pp_sts->pa_sts |= PP_STS_PA_SAT_ZERO_EXP_EN;
+
+ /* Memory Protect STS update */
+ if (pa_data->mode & MDP_PP_PA_MEM_PROT_HUE_EN)
+ pp_sts->pa_sts |= PP_STS_PA_MEM_PROT_HUE_EN;
+ if (pa_data->mode & MDP_PP_PA_MEM_PROT_SAT_EN)
+ pp_sts->pa_sts |= PP_STS_PA_MEM_PROT_SAT_EN;
+ if (pa_data->mode & MDP_PP_PA_MEM_PROT_VAL_EN)
+ pp_sts->pa_sts |= PP_STS_PA_MEM_PROT_VAL_EN;
+ if (pa_data->mode & MDP_PP_PA_MEM_PROT_CONT_EN)
+ pp_sts->pa_sts |= PP_STS_PA_MEM_PROT_CONT_EN;
+ if (pa_data->mode & MDP_PP_PA_MEM_PROT_BLEND_EN)
+ pp_sts->pa_sts |= PP_STS_PA_MEM_PROT_BLEND_EN;
+ if ((block_type == DSPP) &&
+ (pa_data->mode & MDP_PP_PA_MEM_PROT_SIX_EN))
+ pp_sts->pa_sts |= PP_STS_PA_MEM_PROT_SIX_EN;
+
+ /* Memory Color STS update */
+ if (pa_data->mode & MDP_PP_PA_MEM_COL_SKIN_MASK)
+ pp_sts->pa_sts |= PP_STS_PA_MEM_COL_SKIN_MASK;
+ if (pa_data->mode & MDP_PP_PA_MEM_COL_SKY_MASK)
+ pp_sts->pa_sts |= PP_STS_PA_MEM_COL_SKY_MASK;
+ if (pa_data->mode & MDP_PP_PA_MEM_COL_FOL_MASK)
+ pp_sts->pa_sts |= PP_STS_PA_MEM_COL_FOL_MASK;
+
+ /* Six Zone STS update */
+ if (block_type == DSPP) {
+ if (pa_data->mode & MDP_PP_PA_SIX_ZONE_HUE_MASK)
+ pp_sts->pa_sts |= PP_STS_PA_SIX_ZONE_HUE_MASK;
+ if (pa_data->mode & MDP_PP_PA_SIX_ZONE_SAT_MASK)
+ pp_sts->pa_sts |= PP_STS_PA_SIX_ZONE_SAT_MASK;
+ if (pa_data->mode & MDP_PP_PA_SIX_ZONE_VAL_MASK)
+ pp_sts->pa_sts |= PP_STS_PA_SIX_ZONE_VAL_MASK;
+
+ pp_sts_set_split_bits(&pp_sts->pa_sts, enable_flag);
+ }
+}
diff --git a/drivers/video/fbdev/msm/mdss_mdp_pp_common.h b/drivers/video/fbdev/msm/mdss_mdp_pp_common.h
new file mode 100644
index 000000000000..b835f8fc1621
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_mdp_pp_common.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef MDSS_MDP_PP_COMMON_H
+#define MDSS_MDP_PP_COMMON_H
+
+#include "mdss_mdp.h"
+#include "mdss_mdp_pp.h"
+
+#define JUMP_REGISTERS_OFF(n) ((n) * (sizeof(uint32_t)))
+#define REG_MASK(n) ((BIT(n)) - 1)
+#define REG_MASK_SHIFT(n, shift) ((REG_MASK(n)) << (shift))
+
+void pp_pa_set_sts(struct pp_sts_type *pp_sts,
+ struct mdp_pa_data_v1_7 *pa_data,
+ int enable_flag, int block_type);
+#endif
diff --git a/drivers/video/fbdev/msm/mdss_mdp_pp_v1_7.c b/drivers/video/fbdev/msm/mdss_mdp_pp_v1_7.c
index b6b2d2c3c0f0..86312366571d 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_pp_v1_7.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_pp_v1_7.c
@@ -18,6 +18,7 @@
#include "mdss_fb.h"
#include "mdss_mdp.h"
#include "mdss_mdp_pp.h"
+#include "mdss_mdp_pp_common.h"
/* MDP v1.7 specific macros */
@@ -242,7 +243,7 @@ static int pp_dither_get_version(u32 *version);
static int pp_hist_lut_get_version(u32 *version);
static void pp_gamut_clock_gating_en(char __iomem *base_addr);
-void *pp_get_driver_ops(struct mdp_pp_driver_ops *ops)
+void *pp_get_driver_ops_v1_7(struct mdp_pp_driver_ops *ops)
{
if (!ops) {
pr_err("PP driver ops invalid %p\n", ops);
@@ -420,7 +421,7 @@ static int pp_hist_lut_get_config(char __iomem *base_addr, void *cfg_data,
hist_addr += 4;
}
if (copy_to_user(lut_data->data, data, sz)) {
- pr_err("faild to copy the hist_lut back to user\n");
+ pr_err("failed to copy the hist_lut back to user\n");
ret = -EFAULT;
}
kfree(data);
@@ -502,7 +503,8 @@ static int pp_hist_lut_set_config(char __iomem *base_addr,
}
if (lut_cfg_data->hist_lut_first)
pp_sts->enhist_sts |= PP_STS_PA_LUT_FIRST;
-
+ else
+ pp_sts->enhist_sts &= ~PP_STS_PA_LUT_FIRST;
writel_relaxed(1, swap_addr);
@@ -1269,74 +1271,6 @@ static void pp_pa_set_six_zone(char __iomem *base_addr,
*pa_hold_mask |= PA_HOLD_SIX_ZONE_MASK;
}
-static void pp_pa_set_sts(struct pp_sts_type *pp_sts,
- struct mdp_pa_data_v1_7 *pa_data,
- int enable_flag,
- int block_type)
-{
- pp_sts->pa_sts = 0;
-
- if (enable_flag & MDP_PP_OPS_ENABLE)
- pp_sts->pa_sts |= PP_STS_ENABLE;
- /* Disable takes priority over all flags */
- if (enable_flag & MDP_PP_OPS_DISABLE) {
- pp_sts->pa_sts &= ~PP_STS_ENABLE;
- return;
- }
-
- if (!pa_data) {
- pr_err("PA cfg payload is null, enable flag %d\n", enable_flag);
- return;
- }
-
- /* Global HSV STS update */
- if (pa_data->mode & MDP_PP_PA_HUE_MASK)
- pp_sts->pa_sts |= PP_STS_PA_HUE_MASK;
- if (pa_data->mode & MDP_PP_PA_SAT_MASK)
- pp_sts->pa_sts |= PP_STS_PA_SAT_MASK;
- if (pa_data->mode & MDP_PP_PA_VAL_MASK)
- pp_sts->pa_sts |= PP_STS_PA_VAL_MASK;
- if (pa_data->mode & MDP_PP_PA_CONT_MASK)
- pp_sts->pa_sts |= PP_STS_PA_CONT_MASK;
- if (pa_data->mode & MDP_PP_PA_SAT_ZERO_EXP_EN)
- pp_sts->pa_sts |= PP_STS_PA_SAT_ZERO_EXP_EN;
-
- /* Memory Protect STS update */
- if (pa_data->mode & MDP_PP_PA_MEM_PROT_HUE_EN)
- pp_sts->pa_sts |= PP_STS_PA_MEM_PROT_HUE_EN;
- if (pa_data->mode & MDP_PP_PA_MEM_PROT_SAT_EN)
- pp_sts->pa_sts |= PP_STS_PA_MEM_PROT_SAT_EN;
- if (pa_data->mode & MDP_PP_PA_MEM_PROT_VAL_EN)
- pp_sts->pa_sts |= PP_STS_PA_MEM_PROT_VAL_EN;
- if (pa_data->mode & MDP_PP_PA_MEM_PROT_CONT_EN)
- pp_sts->pa_sts |= PP_STS_PA_MEM_PROT_CONT_EN;
- if (pa_data->mode & MDP_PP_PA_MEM_PROT_BLEND_EN)
- pp_sts->pa_sts |= PP_STS_PA_MEM_PROT_BLEND_EN;
- if ((block_type == DSPP) &&
- (pa_data->mode & MDP_PP_PA_MEM_PROT_SIX_EN))
- pp_sts->pa_sts |= PP_STS_PA_MEM_PROT_SIX_EN;
-
- /* Memory Color STS update */
- if (pa_data->mode & MDP_PP_PA_MEM_COL_SKIN_MASK)
- pp_sts->pa_sts |= PP_STS_PA_MEM_COL_SKIN_MASK;
- if (pa_data->mode & MDP_PP_PA_MEM_COL_SKY_MASK)
- pp_sts->pa_sts |= PP_STS_PA_MEM_COL_SKY_MASK;
- if (pa_data->mode & MDP_PP_PA_MEM_COL_FOL_MASK)
- pp_sts->pa_sts |= PP_STS_PA_MEM_COL_FOL_MASK;
-
- /* Six Zone STS update */
- if (block_type == DSPP) {
- if (pa_data->mode & MDP_PP_PA_SIX_ZONE_HUE_MASK)
- pp_sts->pa_sts |= PP_STS_PA_SIX_ZONE_HUE_MASK;
- if (pa_data->mode & MDP_PP_PA_SIX_ZONE_SAT_MASK)
- pp_sts->pa_sts |= PP_STS_PA_SIX_ZONE_SAT_MASK;
- if (pa_data->mode & MDP_PP_PA_SIX_ZONE_VAL_MASK)
- pp_sts->pa_sts |= PP_STS_PA_SIX_ZONE_VAL_MASK;
-
- pp_sts_set_split_bits(&pp_sts->pa_sts, enable_flag);
- }
-}
-
static int pp_pa_set_config(char __iomem *base_addr,
struct pp_sts_type *pp_sts, void *cfg_data,
u32 block_type)
@@ -1763,7 +1697,7 @@ static int pp_igc_set_config(char __iomem *base_addr,
lut_cfg_data = (struct mdp_igc_lut_data *) cfg_data;
if (lut_cfg_data->version != mdp_igc_v1_7 ||
!lut_cfg_data->cfg_payload) {
- pr_err("invalid igc version %d payload %p\n",
+ pr_err_once("invalid igc version %d payload %p\n",
lut_cfg_data->version, lut_cfg_data->cfg_payload);
return -EINVAL;
}
diff --git a/drivers/video/fbdev/msm/mdss_mdp_pp_v3.c b/drivers/video/fbdev/msm/mdss_mdp_pp_v3.c
new file mode 100644
index 000000000000..1bc803452931
--- /dev/null
+++ b/drivers/video/fbdev/msm/mdss_mdp_pp_v3.c
@@ -0,0 +1,737 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/uaccess.h>
+#include "mdss_fb.h"
+#include "mdss_mdp.h"
+#include "mdss_mdp_pp.h"
+#include "mdss_mdp_pp_common.h"
+
+#define IGC_DSPP_OP_MODE_EN BIT(0)
+#define ENHIST_BIT_SHIFT 16
+/* PA related define */
+
+/* Offsets from DSPP/VIG base to PA block */
+#define PA_DSPP_BLOCK_REG_OFF 0x800
+#define PA_VIG_BLOCK_REG_OFF 0x1200
+
+/* Offsets to various subblocks from PA block
+ * in VIG/DSPP.
+ */
+#define PA_OP_MODE_REG_OFF 0x0
+#define PA_HIST_REG_OFF 0x4
+#define PA_LUTV_SWAP_REG_OFF 0x18
+#define PA_HSIC_REG_OFF 0x1C
+#define PA_DITHER_CTL_REG_OFF 0x2C
+#define PA_PWL_HOLD_REG_OFF 0x40
+
+/* Memory Color offsets */
+#define PA_MEM_COL_REG_OFF 0x80
+#define PA_MEM_SKIN_REG_OFF (PA_MEM_COL_REG_OFF)
+#define PA_MEM_SKY_REG_OFF (PA_MEM_SKIN_REG_OFF + \
+ JUMP_REGISTERS_OFF(5))
+#define PA_MEM_FOL_REG_OFF (PA_MEM_SKY_REG_OFF + \
+ JUMP_REGISTERS_OFF(5))
+#define PA_MEM_SKIN_ADJUST_P2_REG_OFF (PA_MEM_FOL_REG_OFF + \
+ JUMP_REGISTERS_OFF(5))
+#define PA_MEM_SKY_ADJUST_P2_REG_OFF (PA_MEM_SKIN_ADJUST_P2_REG_OFF + \
+ JUMP_REGISTERS_OFF(2))
+#define PA_MEM_FOL_ADJUST_P2_REG_OFF (PA_MEM_SKY_ADJUST_P2_REG_OFF + \
+ JUMP_REGISTERS_OFF(2))
+
+#define PA_SZONE_REG_OFF 0x100
+#define PA_LUTV_REG_OFF 0x200
+#define PA_HIST_RAM_REG_OFF 0x400
+
+/* histogram prototypes */
+static int pp_get_hist_offset(u32 block, u32 *ctl_off);
+static int pp_hist_set_config(char __iomem *base_addr,
+ struct pp_sts_type *pp_sts, void *cfg_data,
+ u32 block_type);
+static int pp_hist_get_config(char __iomem *base_addr, void *cfg_data,
+ u32 block_type, u32 disp_num);
+
+/* PA LUT prototypes */
+static int pp_hist_lut_get_config(char __iomem *base_addr, void *cfg_data,
+ u32 block_type, u32 disp_num);
+static int pp_hist_lut_set_config(char __iomem *base_addr,
+ struct pp_sts_type *pp_sts, void *cfg_data,
+ u32 block_type);
+static int pp_hist_lut_get_version(u32 *version);
+static void pp_hist_lut_opmode_config(char __iomem *base_addr,
+ struct pp_sts_type *pp_sts);
+
+static int pp_pa_set_config(char __iomem *base_addr,
+ struct pp_sts_type *pp_sts, void *cfg_data,
+ u32 block_type);
+static int pp_pa_get_config(char __iomem *base_addr, void *cfg_data,
+ u32 block_type, u32 disp_num);
+static int pp_pa_get_version(u32 *version);
+
+static int pp_dither_get_config(char __iomem *base_addr, void *cfg_data,
+ u32 block_type, u32 disp_num);
+static int pp_dither_set_config(char __iomem *base_addr,
+ struct pp_sts_type *pp_sts, void *cfg_data,
+ u32 block_type);
+static int pp_dither_get_version(u32 *version);
+
+static void pp_opmode_config(int location, struct pp_sts_type *pp_sts,
+ u32 *opmode, int side);
+
+static void pp_pa_set_global_adj_regs(char __iomem *base_addr,
+ struct mdp_pa_data_v1_7 *pa_data, u32 flag);
+
+static void pp_pa_set_mem_col(char __iomem *base_addr,
+ struct mdp_pa_data_v1_7 *pa_data, u32 flags);
+
+static void pp_pa_set_six_zone(char __iomem *base_addr,
+ struct mdp_pa_data_v1_7 *pa_data,
+ u32 flags);
+
+static void pp_pa_opmode_config(char __iomem *base_addr,
+ struct pp_sts_type *pp_sts);
+
+void *pp_get_driver_ops_v3(struct mdp_pp_driver_ops *ops)
+{
+ void *pp_cfg = NULL;
+
+ if (!ops) {
+ pr_err("PP driver ops invalid %p\n", ops);
+ return ERR_PTR(-EINVAL);
+ }
+
+ pp_cfg = pp_get_driver_ops_v1_7(ops);
+ if (IS_ERR_OR_NULL(pp_cfg))
+ return NULL;
+ /* PA ops */
+ ops->pp_ops[PA].pp_set_config = pp_pa_set_config;
+ ops->pp_ops[PA].pp_get_config = pp_pa_get_config;
+ ops->pp_ops[PA].pp_get_version = pp_pa_get_version;
+
+ /* HIST_LUT ops */
+ ops->pp_ops[HIST_LUT].pp_set_config = pp_hist_lut_set_config;
+ ops->pp_ops[HIST_LUT].pp_get_config = pp_hist_lut_get_config;
+ ops->pp_ops[HIST_LUT].pp_get_version = pp_hist_lut_get_version;
+
+ /* HIST ops */
+ ops->pp_ops[HIST].pp_set_config = pp_hist_set_config;
+ ops->pp_ops[HIST].pp_get_config = pp_hist_get_config;
+ ops->pp_ops[HIST].pp_get_version = NULL;
+
+ /* Dither ops */
+ ops->pp_ops[DITHER].pp_set_config = pp_dither_set_config;
+ ops->pp_ops[DITHER].pp_get_config = pp_dither_get_config;
+ ops->pp_ops[DITHER].pp_get_version = pp_dither_get_version;
+
+ /* Set opmode pointers */
+ ops->pp_opmode_config = pp_opmode_config;
+
+ ops->get_hist_offset = pp_get_hist_offset;
+ ops->gamut_clk_gate_en = NULL;
+
+ return pp_cfg;
+}
+
+static int pp_get_hist_offset(u32 block, u32 *ctl_off)
+{
+ int ret = 0;
+
+ if (!ctl_off) {
+ pr_err("invalid params ctl_off %p\n", ctl_off);
+ return -EINVAL;
+ }
+
+ switch (block) {
+ case SSPP_VIG:
+ *ctl_off = PA_VIG_BLOCK_REG_OFF + PA_HIST_REG_OFF;
+ break;
+ case DSPP:
+ *ctl_off = PA_DSPP_BLOCK_REG_OFF + PA_HIST_REG_OFF;
+ break;
+ default:
+ pr_err("Invalid block type %d\n", block);
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static int pp_hist_set_config(char __iomem *base_addr,
+ struct pp_sts_type *pp_sts, void *cfg_data, u32 block_type)
+{
+ u32 opmode = 0;
+ struct pp_hist_col_info *hist_info = NULL;
+
+ if (!base_addr || !cfg_data || !pp_sts) {
+ pr_err("invalid params base_addr %p cfg_data %p pp_sts_type %p\n",
+ base_addr, cfg_data, pp_sts);
+ return -EINVAL;
+ }
+
+ if (block_type != DSPP) {
+ pr_err("Invalid block type %d\n", block_type);
+ return -EINVAL;
+ }
+
+ hist_info = (struct pp_hist_col_info *)cfg_data;
+ opmode = readl_relaxed(base_addr + PA_DSPP_BLOCK_REG_OFF +
+ PA_OP_MODE_REG_OFF);
+ /* set the hist_en bit */
+ if (hist_info->col_en) {
+ pp_sts->hist_sts |= PP_STS_ENABLE;
+ opmode |= BIT(16);
+ } else {
+ pp_sts->hist_sts &= ~PP_STS_ENABLE;
+ opmode &= ~BIT(16);
+ }
+
+ writel_relaxed(opmode, base_addr + PA_DSPP_BLOCK_REG_OFF +
+ PA_OP_MODE_REG_OFF);
+ return 0;
+}
+
+static int pp_hist_get_config(char __iomem *base_addr, void *cfg_data,
+ u32 block_type, u32 disp_num)
+{
+ int i = 0;
+ u32 sum = 0;
+ struct pp_hist_col_info *hist_info = NULL;
+ char __iomem *hist_addr;
+
+ if (!base_addr || !cfg_data) {
+ pr_err("invalid params base_addr %p cfg_data %p\n",
+ base_addr, cfg_data);
+ return -EINVAL;
+ }
+
+ if (block_type != DSPP) {
+ pr_err("Invalid block type %d\n", block_type);
+ return -EINVAL;
+ }
+
+ hist_info = (struct pp_hist_col_info *) cfg_data;
+ hist_addr = base_addr + PA_DSPP_BLOCK_REG_OFF + PA_HIST_RAM_REG_OFF;
+
+ for (i = 0; i < HIST_V_SIZE; i++) {
+ hist_info->data[i] = readl_relaxed(hist_addr) & REG_MASK(24);
+ hist_addr += 0x4;
+ sum += hist_info->data[i];
+ }
+ hist_info->hist_cnt_read++;
+ return sum;
+}
+
+static int pp_hist_lut_get_config(char __iomem *base_addr, void *cfg_data,
+ u32 block_type, u32 disp_num)
+{
+
+ int ret = 0, i = 0;
+ char __iomem *hist_lut_addr;
+ u32 sz = 0, temp = 0, *data = NULL;
+ struct mdp_hist_lut_data_v1_7 *lut_data = NULL;
+ struct mdp_hist_lut_data *lut_cfg_data = NULL;
+
+ if (!base_addr || !cfg_data) {
+ pr_err("invalid params base_addr %p cfg_data %p\n",
+ base_addr, cfg_data);
+ return -EINVAL;
+ }
+
+ if (block_type != DSPP) {
+ pr_err("Invalid block type %d\n", block_type);
+ return -EINVAL;
+ }
+
+ lut_cfg_data = (struct mdp_hist_lut_data *) cfg_data;
+ if (!(lut_cfg_data->ops & MDP_PP_OPS_READ)) {
+ pr_err("read ops not set for hist_lut %d\n", lut_cfg_data->ops);
+ return 0;
+ }
+ if (lut_cfg_data->version != mdp_hist_lut_v1_7 ||
+ !lut_cfg_data->cfg_payload) {
+ pr_err("invalid hist_lut version %d payload %p\n",
+ lut_cfg_data->version, lut_cfg_data->cfg_payload);
+ return -EINVAL;
+ }
+ lut_data = lut_cfg_data->cfg_payload;
+ if (lut_data->len != ENHIST_LUT_ENTRIES) {
+ pr_err("invalid hist_lut len %d", lut_data->len);
+ return -EINVAL;
+ }
+ sz = ENHIST_LUT_ENTRIES * sizeof(u32);
+ if (!access_ok(VERIFY_WRITE, lut_data->data, sz)) {
+ pr_err("invalid lut address for hist_lut sz %d\n", sz);
+ return -EFAULT;
+ }
+
+ hist_lut_addr = base_addr + PA_DSPP_BLOCK_REG_OFF + PA_LUTV_REG_OFF;
+
+ data = kzalloc(sz, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ for (i = 0; i < ENHIST_LUT_ENTRIES; i += 2) {
+ temp = readl_relaxed(hist_lut_addr);
+ data[i] = temp & REG_MASK(10);
+ data[i + 1] =
+ (temp & REG_MASK_SHIFT(10, 16)) >> ENHIST_BIT_SHIFT;
+ hist_lut_addr += 4;
+ }
+ if (copy_to_user(lut_data->data, data, sz)) {
+ pr_err("failed to copy the hist_lut back to user\n");
+ ret = -EFAULT;
+ }
+ kfree(data);
+ return ret;
+}
+
+static int pp_hist_lut_set_config(char __iomem *base_addr,
+ struct pp_sts_type *pp_sts, void *cfg_data,
+ u32 block_type)
+{
+ int ret = 0, i = 0;
+ u32 temp = 0;
+ struct mdp_hist_lut_data *lut_cfg_data = NULL;
+ struct mdp_hist_lut_data_v1_7 *lut_data = NULL;
+ char __iomem *hist_lut_addr = NULL, *swap_addr = NULL;
+
+ if (!base_addr || !cfg_data || !pp_sts) {
+ pr_err("invalid params base_addr %p cfg_data %p pp_sts_type %p\n",
+ base_addr, cfg_data, pp_sts);
+ return -EINVAL;
+ }
+
+ if (block_type != DSPP) {
+ pr_err("Invalid block type %d\n", block_type);
+ return -EINVAL;
+ }
+
+ lut_cfg_data = (struct mdp_hist_lut_data *) cfg_data;
+ if (lut_cfg_data->version != mdp_hist_lut_v1_7) {
+ pr_err("invalid hist_lut version %d\n", lut_cfg_data->version);
+ return -EINVAL;
+ }
+
+ if (!(lut_cfg_data->ops & ~(MDP_PP_OPS_READ))) {
+ pr_err("only read ops set for lut\n");
+ return ret;
+ }
+ if (lut_cfg_data->ops & MDP_PP_OPS_DISABLE ||
+ !(lut_cfg_data->ops & MDP_PP_OPS_WRITE)) {
+ pr_debug("non write ops set %d\n", lut_cfg_data->ops);
+ goto hist_lut_set_sts;
+ }
+ lut_data = lut_cfg_data->cfg_payload;
+ if (!lut_data) {
+ pr_err("invalid hist_lut cfg_payload %p\n", lut_data);
+ return -EINVAL;
+ }
+
+ if (lut_data->len != ENHIST_LUT_ENTRIES || !lut_data->data) {
+ pr_err("invalid hist_lut len %d data %p\n",
+ lut_data->len, lut_data->data);
+ return -EINVAL;
+ }
+
+ hist_lut_addr = base_addr + PA_DSPP_BLOCK_REG_OFF + PA_LUTV_REG_OFF;
+ swap_addr = base_addr + PA_DSPP_BLOCK_REG_OFF + PA_LUTV_SWAP_REG_OFF;
+
+ for (i = 0; i < ENHIST_LUT_ENTRIES; i += 2) {
+ temp = (lut_data->data[i] & REG_MASK(10)) |
+ ((lut_data->data[i + 1] & REG_MASK(10))
+ << ENHIST_BIT_SHIFT);
+
+ writel_relaxed(temp, hist_lut_addr);
+ hist_lut_addr += 4;
+ }
+
+ writel_relaxed(1, swap_addr);
+
+hist_lut_set_sts:
+ if (lut_cfg_data->ops & MDP_PP_OPS_DISABLE) {
+ pp_sts->enhist_sts &= ~(PP_STS_ENABLE | PP_STS_PA_LUT_FIRST);
+ } else if (lut_cfg_data->ops & MDP_PP_OPS_ENABLE) {
+ pp_sts->enhist_sts |= PP_STS_ENABLE;
+ if (lut_cfg_data->hist_lut_first)
+ pp_sts->enhist_sts |= PP_STS_PA_LUT_FIRST;
+ else
+ pp_sts->enhist_sts &= ~PP_STS_PA_LUT_FIRST;
+ }
+
+ pp_hist_lut_opmode_config(base_addr + PA_DSPP_BLOCK_REG_OFF, pp_sts);
+ return ret;
+}
+
+static int pp_hist_lut_get_version(u32 *version)
+{
+ if (!version) {
+ pr_err("invalid param version %p\n", version);
+ return -EINVAL;
+ }
+ *version = mdp_hist_lut_v1_7;
+ return 0;
+}
+
+static void pp_hist_lut_opmode_config(char __iomem *base_addr,
+ struct pp_sts_type *pp_sts)
+{
+ u32 opmode = 0;
+
+ if (!base_addr || !pp_sts) {
+ pr_err("invalid params base_addr %p pp_sts_type %p\n",
+ base_addr, pp_sts);
+ return;
+ }
+ opmode = readl_relaxed(base_addr + PA_OP_MODE_REG_OFF);
+
+ /* set the hist_lutv_en and hist_lutv_first_en bits */
+ if (pp_sts->enhist_sts & PP_STS_ENABLE) {
+ opmode |= BIT(19) | BIT(20);
+ opmode |= (pp_sts->enhist_sts & PP_STS_PA_LUT_FIRST) ?
+ BIT(21) : 0;
+ } else {
+ opmode &= ~(BIT(19) | BIT(21));
+ if (!(pp_sts->pa_sts & PP_STS_ENABLE))
+ opmode &= ~BIT(20);
+ }
+
+ writel_relaxed(opmode, base_addr + PA_OP_MODE_REG_OFF);
+}
+
+static int pp_pa_set_config(char __iomem *base_addr,
+ struct pp_sts_type *pp_sts, void *cfg_data,
+ u32 block_type)
+{
+ struct mdp_pa_v2_cfg_data *pa_cfg_data = NULL;
+ struct mdp_pa_data_v1_7 *pa_data = NULL;
+ char __iomem *block_addr = NULL;
+
+ if (!base_addr || !cfg_data || !pp_sts) {
+ pr_err("invalid params base_addr %p cfg_data %p pp_sts_type %p\n",
+ base_addr, cfg_data, pp_sts);
+ return -EINVAL;
+ }
+ if ((block_type != DSPP) && (block_type != SSPP_VIG)) {
+ pr_err("Invalid block type %d\n", block_type);
+ return -EINVAL;
+ }
+
+ pa_cfg_data = (struct mdp_pa_v2_cfg_data *) cfg_data;
+ if (pa_cfg_data->version != mdp_pa_v1_7) {
+ pr_err("invalid pa version %d\n", pa_cfg_data->version);
+ return -EINVAL;
+ }
+ if (!(pa_cfg_data->flags & ~(MDP_PP_OPS_READ))) {
+ pr_info("only read ops is set %d", pa_cfg_data->flags);
+ return 0;
+ }
+
+ block_addr = base_addr +
+ ((block_type == DSPP) ? PA_DSPP_BLOCK_REG_OFF :
+ PA_VIG_BLOCK_REG_OFF);
+
+ if (pa_cfg_data->flags & MDP_PP_OPS_DISABLE ||
+ !(pa_cfg_data->flags & MDP_PP_OPS_WRITE)) {
+ pr_debug("pa_cfg_data->flags = %d\n", pa_cfg_data->flags);
+ goto pa_set_sts;
+ }
+
+ pa_data = pa_cfg_data->cfg_payload;
+ if (!pa_data) {
+ pr_err("invalid payload for pa %p\n", pa_data);
+ return -EINVAL;
+ }
+
+ pp_pa_set_global_adj_regs(block_addr, pa_data, pa_cfg_data->flags);
+ pp_pa_set_mem_col(block_addr, pa_data, pa_cfg_data->flags);
+ if (block_type == DSPP)
+ pp_pa_set_six_zone(block_addr, pa_data, pa_cfg_data->flags);
+
+pa_set_sts:
+ pp_pa_set_sts(pp_sts, pa_data, pa_cfg_data->flags, block_type);
+ pp_pa_opmode_config(block_addr, pp_sts);
+
+ return 0;
+}
+
+static int pp_pa_get_config(char __iomem *base_addr, void *cfg_data,
+ u32 block_type, u32 disp_num)
+{
+ return -EINVAL;
+}
+
+static int pp_pa_get_version(u32 *version)
+{
+ if (!version) {
+ pr_err("invalid param version");
+ return -EINVAL;
+ }
+ *version = mdp_pa_v1_7;
+ return 0;
+}
+
+static int pp_dither_get_config(char __iomem *base_addr, void *cfg_data,
+ u32 block_type, u32 disp_num)
+{
+ return -EINVAL;
+}
+
+static int pp_dither_set_config(char __iomem *base_addr,
+ struct pp_sts_type *pp_sts, void *cfg_data,
+ u32 block_type)
+{
+ return -EINVAL;
+}
+
+static int pp_dither_get_version(u32 *version)
+{
+ if (!version) {
+ pr_err("invalid param version");
+ return -EINVAL;
+ }
+ *version = mdp_dither_v1_7;
+ return 0;
+}
+
+static void pp_opmode_config(int location, struct pp_sts_type *pp_sts,
+ u32 *opmode, int side)
+{
+ if (!pp_sts || !opmode) {
+ pr_err("Invalid pp_sts %p or opmode %p\n", pp_sts, opmode);
+ return;
+ }
+ switch (location) {
+ case SSPP_DMA:
+ break;
+ case SSPP_VIG:
+ break;
+ case DSPP:
+ if (pp_sts_is_enabled(pp_sts->igc_sts, side))
+ *opmode |= IGC_DSPP_OP_MODE_EN;
+ break;
+ case LM:
+ if (pp_sts->argc_sts & PP_STS_ENABLE)
+ pr_debug("pgc in LM enabled\n");
+ break;
+ default:
+ pr_err("Invalid block type %d\n", location);
+ break;
+ }
+}
+
+static void pp_pa_set_global_adj_regs(char __iomem *base_addr,
+ struct mdp_pa_data_v1_7 *pa_data, u32 flags)
+{
+ char __iomem *addr = NULL;
+
+ addr = base_addr + PA_HSIC_REG_OFF;
+ if (flags & MDP_PP_PA_HUE_ENABLE)
+ writel_relaxed((pa_data->global_hue_adj &
+ REG_MASK(12)), addr);
+ addr += 4;
+ if (flags & MDP_PP_PA_SAT_ENABLE)
+ writel_relaxed((pa_data->global_sat_adj &
+ REG_MASK(16)), addr);
+ addr += 4;
+ if (flags & MDP_PP_PA_VAL_ENABLE)
+ writel_relaxed((pa_data->global_val_adj &
+ REG_MASK(8)), addr);
+ addr += 4;
+ if (flags & MDP_PP_PA_CONT_ENABLE)
+ writel_relaxed((pa_data->global_cont_adj &
+ REG_MASK(8)), addr);
+}
+
+static void pp_pa_set_mem_col(char __iomem *base_addr,
+ struct mdp_pa_data_v1_7 *pa_data, u32 flags)
+{
+ char __iomem *mem_col_base = NULL, *mem_col_p2 = NULL;
+ struct mdp_pa_mem_col_data_v1_7 *mem_col_data = NULL;
+ uint32_t mask = 0, hold = 0, hold_mask = 0;
+ uint32_t hold_curr = 0;
+
+ flags &= (MDP_PP_PA_SKIN_ENABLE | MDP_PP_PA_SKY_ENABLE |
+ MDP_PP_PA_FOL_ENABLE);
+ if (!flags)
+ return;
+ while (flags) {
+ if (flags & MDP_PP_PA_SKIN_ENABLE) {
+ flags &= ~MDP_PP_PA_SKIN_ENABLE;
+ mem_col_base = base_addr + PA_MEM_SKIN_REG_OFF;
+ mem_col_p2 = base_addr + PA_MEM_SKIN_ADJUST_P2_REG_OFF;
+ mem_col_data = &pa_data->skin_cfg;
+ hold |= pa_data->skin_cfg.sat_hold & REG_MASK(2);
+ hold |= (pa_data->skin_cfg.val_hold & REG_MASK(2))
+ << 2;
+ hold_mask |= REG_MASK(4);
+ } else if (flags & MDP_PP_PA_SKY_ENABLE) {
+ flags &= ~MDP_PP_PA_SKY_ENABLE;
+ mem_col_base = base_addr + PA_MEM_SKY_REG_OFF;
+ mem_col_p2 = base_addr + PA_MEM_SKY_ADJUST_P2_REG_OFF;
+ mem_col_data = &pa_data->sky_cfg;
+ hold |= (pa_data->sky_cfg.sat_hold & REG_MASK(2)) << 4;
+ hold |= (pa_data->sky_cfg.val_hold & REG_MASK(2)) << 6;
+ hold_mask |= REG_MASK_SHIFT(4, 4);
+ } else if (flags & MDP_PP_PA_FOL_ENABLE) {
+ flags &= ~MDP_PP_PA_FOL_ENABLE;
+ mem_col_base = base_addr + PA_MEM_FOL_REG_OFF;
+ mem_col_p2 = base_addr + PA_MEM_FOL_ADJUST_P2_REG_OFF;
+ mem_col_data = &pa_data->fol_cfg;
+ hold |= (pa_data->fol_cfg.sat_hold & REG_MASK(2)) << 8;
+ hold |= (pa_data->fol_cfg.val_hold & REG_MASK(2)) << 10;
+ hold_mask |= REG_MASK_SHIFT(4, 8);
+ } else {
+ break;
+ }
+ mask = REG_MASK_SHIFT(16, 16) | REG_MASK(11);
+ writel_relaxed((mem_col_data->color_adjust_p0 & mask),
+ mem_col_base);
+ mem_col_base += 4;
+ mask = U32_MAX;
+ writel_relaxed((mem_col_data->color_adjust_p1 & mask),
+ mem_col_base);
+ mem_col_base += 4;
+ mask = REG_MASK_SHIFT(11, 16) | REG_MASK(11);
+ writel_relaxed((mem_col_data->hue_region & mask),
+ mem_col_base);
+ mem_col_base += 4;
+ mask = REG_MASK(24);
+ writel_relaxed((mem_col_data->sat_region & mask),
+ mem_col_base);
+ mem_col_base += 4;
+ /* mask is same for val and sat */
+ writel_relaxed((mem_col_data->val_region & mask),
+ mem_col_base);
+ mask = U32_MAX;
+ writel_relaxed((mem_col_data->color_adjust_p2 & mask),
+ mem_col_p2);
+ mem_col_p2 += 4;
+ writel_relaxed((mem_col_data->blend_gain & mask),
+ mem_col_p2);
+ }
+ hold_curr = readl_relaxed(base_addr + PA_PWL_HOLD_REG_OFF) &
+ REG_MASK(16);
+ hold_curr &= ~hold_mask;
+ hold = hold_curr | (hold & hold_mask);
+ writel_relaxed(hold, (base_addr + PA_PWL_HOLD_REG_OFF));
+}
+
+static void pp_pa_set_six_zone(char __iomem *base_addr,
+ struct mdp_pa_data_v1_7 *pa_data,
+ u32 flags)
+{
+ char __iomem *addr = base_addr + PA_SZONE_REG_OFF;
+ uint32_t mask_p0 = 0, mask_p1 = 0, hold = 0, hold_mask = 0;
+ uint32_t hold_curr = 0;
+ int i = 0;
+
+ if (!(flags & MDP_PP_PA_SIX_ZONE_ENABLE))
+ return;
+
+ if (pa_data->six_zone_len != MDP_SIX_ZONE_LUT_SIZE ||
+ !pa_data->six_zone_curve_p0 ||
+ !pa_data->six_zone_curve_p1) {
+ pr_err("Invalid six zone data: len %d curve_p0 %p curve_p1 %p\n",
+ pa_data->six_zone_len,
+ pa_data->six_zone_curve_p0,
+ pa_data->six_zone_curve_p1);
+ return;
+ }
+ mask_p0 = REG_MASK(12);
+ mask_p1 = REG_MASK(12) | REG_MASK_SHIFT(12, 16);
+ writel_relaxed((pa_data->six_zone_curve_p1[0] & mask_p1), addr + 4);
+ /* Update the index to 0 and write value */
+ writel_relaxed((pa_data->six_zone_curve_p0[0] & mask_p0) | BIT(26),
+ addr);
+ for (i = 1; i < MDP_SIX_ZONE_LUT_SIZE; i++) {
+ writel_relaxed((pa_data->six_zone_curve_p1[i] & mask_p1),
+ addr + 4);
+ writel_relaxed((pa_data->six_zone_curve_p0[i] & mask_p0), addr);
+ }
+ addr += 8;
+ writel_relaxed(pa_data->six_zone_thresh, addr);
+ addr += 4;
+ writel_relaxed(pa_data->six_zone_adj_p0 & REG_MASK(16), addr);
+ addr += 4;
+ writel_relaxed(pa_data->six_zone_adj_p1, addr);
+
+ hold = (pa_data->six_zone_sat_hold & REG_MASK(2)) << 12;
+ hold |= (pa_data->six_zone_val_hold & REG_MASK(2)) << 14;
+ hold_mask = REG_MASK_SHIFT(4, 12);
+ hold_curr = readl_relaxed(base_addr + PA_PWL_HOLD_REG_OFF) &
+ REG_MASK(16);
+ hold_curr &= ~hold_mask;
+ hold = hold_curr | (hold & hold_mask);
+ writel_relaxed(hold, (base_addr + PA_PWL_HOLD_REG_OFF));
+}
+
+static void pp_pa_opmode_config(char __iomem *base_addr,
+ struct pp_sts_type *pp_sts)
+{
+ uint32_t opmode = 0;
+
+ /* set the PA bits */
+ if (pp_sts->pa_sts & PP_STS_ENABLE) {
+ opmode |= BIT(20);
+
+ if (pp_sts->pa_sts & PP_STS_PA_HUE_MASK)
+ opmode |= BIT(25);
+ if (pp_sts->pa_sts & PP_STS_PA_SAT_MASK)
+ opmode |= BIT(26);
+ if (pp_sts->pa_sts & PP_STS_PA_VAL_MASK)
+ opmode |= BIT(27);
+ if (pp_sts->pa_sts & PP_STS_PA_CONT_MASK)
+ opmode |= BIT(28);
+ if (pp_sts->pa_sts & PP_STS_PA_SAT_ZERO_EXP_EN)
+ opmode |= BIT(1);
+ if (pp_sts->pa_sts & PP_STS_PA_MEM_COL_SKIN_MASK)
+ opmode |= BIT(5);
+ if (pp_sts->pa_sts & PP_STS_PA_MEM_COL_FOL_MASK)
+ opmode |= BIT(6);
+ if (pp_sts->pa_sts & PP_STS_PA_MEM_COL_SKY_MASK)
+ opmode |= BIT(7);
+ if (pp_sts->pa_sts & PP_STS_PA_SIX_ZONE_HUE_MASK)
+ opmode |= BIT(29);
+ if (pp_sts->pa_sts & PP_STS_PA_SIX_ZONE_SAT_MASK)
+ opmode |= BIT(30);
+ if (pp_sts->pa_sts & PP_STS_PA_SIX_ZONE_VAL_MASK)
+ opmode |= BIT(31);
+ if (pp_sts->pa_sts & PP_STS_PA_MEM_PROT_HUE_EN)
+ opmode |= BIT(22);
+ if (pp_sts->pa_sts & PP_STS_PA_MEM_PROT_SAT_EN)
+ opmode |= BIT(23);
+ if (pp_sts->pa_sts & PP_STS_PA_MEM_PROT_VAL_EN)
+ opmode |= BIT(24);
+ if (pp_sts->pa_sts & PP_STS_PA_MEM_PROT_CONT_EN)
+ opmode |= BIT(18);
+ if (pp_sts->pa_sts & PP_STS_PA_MEM_PROT_BLEND_EN)
+ opmode |= BIT(3);
+ if (pp_sts->pa_sts & PP_STS_PA_MEM_PROT_SIX_EN)
+ opmode |= BIT(17);
+ }
+
+ /* reset hist_en, hist_lutv_en and hist_lutv_first_en
+ bits based on the pp_sts
+ */
+ if (pp_sts->hist_sts & PP_STS_ENABLE)
+ opmode |= BIT(16);
+ if (pp_sts->enhist_sts & PP_STS_ENABLE)
+ opmode |= BIT(19) | BIT(20);
+ if (pp_sts->enhist_sts & PP_STS_PA_LUT_FIRST)
+ opmode |= BIT(21);
+
+ writel_relaxed(opmode, base_addr + PA_OP_MODE_REG_OFF);
+}
diff --git a/drivers/video/fbdev/msm/mdss_mdp_util.c b/drivers/video/fbdev/msm/mdss_mdp_util.c
index c86f924d83db..d85d85204cb2 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_util.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_util.c
@@ -49,6 +49,10 @@ enum {
MDP_INTR_PING_PONG_1_RD_PTR,
MDP_INTR_PING_PONG_2_RD_PTR,
MDP_INTR_PING_PONG_3_RD_PTR,
+ MDP_INTR_PING_PONG_0_WR_PTR,
+ MDP_INTR_PING_PONG_1_WR_PTR,
+ MDP_INTR_PING_PONG_2_WR_PTR,
+ MDP_INTR_PING_PONG_3_WR_PTR,
MDP_INTR_WB_0,
MDP_INTR_WB_1,
MDP_INTR_WB_2,
@@ -83,6 +87,9 @@ static int mdss_mdp_intr2index(u32 intr_type, u32 intf_num)
case MDSS_MDP_IRQ_PING_PONG_RD_PTR:
index = MDP_INTR_PING_PONG_0_RD_PTR + intf_num;
break;
+ case MDSS_MDP_IRQ_PING_PONG_WR_PTR:
+ index = MDP_INTR_PING_PONG_0_WR_PTR + intf_num;
+ break;
case MDSS_MDP_IRQ_WB_ROT_COMP:
index = MDP_INTR_WB_0 + intf_num;
break;
@@ -216,6 +223,18 @@ irqreturn_t mdss_mdp_isr(int irq, void *ptr)
if (isr & MDSS_MDP_INTR_PING_PONG_3_RD_PTR)
mdss_mdp_intr_done(MDP_INTR_PING_PONG_3_RD_PTR);
+ if (isr & MDSS_MDP_INTR_PING_PONG_0_WR_PTR)
+ mdss_mdp_intr_done(MDP_INTR_PING_PONG_0_WR_PTR);
+
+ if (isr & MDSS_MDP_INTR_PING_PONG_1_WR_PTR)
+ mdss_mdp_intr_done(MDP_INTR_PING_PONG_1_WR_PTR);
+
+ if (isr & MDSS_MDP_INTR_PING_PONG_2_WR_PTR)
+ mdss_mdp_intr_done(MDP_INTR_PING_PONG_2_WR_PTR);
+
+ if (isr & MDSS_MDP_INTR_PING_PONG_3_WR_PTR)
+ mdss_mdp_intr_done(MDP_INTR_PING_PONG_3_WR_PTR);
+
if (isr & MDSS_MDP_INTR_INTF_0_VSYNC) {
mdss_mdp_intr_done(MDP_INTR_VSYNC_INTF_0);
mdss_misr_crc_collect(mdata, DISPLAY_MISR_EDP, true);
@@ -1218,6 +1237,10 @@ static int mdss_mdp_put_img(struct mdss_mdp_img_data *data, bool rotator,
/*
* skip memory unmapping - secure display uses physical
* address which does not require buffer unmapping
+ *
+ * For LT targets in secure display usecase, srcp_dma_buf will
+ * be filled due to map call which will be unmapped above.
+ *
*/
pr_debug("skip memory unmapping for secure display content\n");
} else {
@@ -1238,6 +1261,7 @@ static int mdss_mdp_get_img(struct msmfb_data *img,
u32 domain;
dma_addr_t *start;
struct ion_client *iclient = mdss_get_ionclient();
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
start = &data->addr;
len = &data->len;
@@ -1261,84 +1285,85 @@ static int mdss_mdp_get_img(struct msmfb_data *img,
pr_err("invalid FB_MAJOR\n");
ret = -1;
}
- } else if (iclient &&
- !(data->flags & MDP_SECURE_DISPLAY_OVERLAY_SESSION)) {
- data->srcp_dma_buf = dma_buf_get(img->memory_id);
- if (IS_ERR(data->srcp_dma_buf)) {
- pr_err("error on ion_import_fd\n");
- ret = PTR_ERR(data->srcp_dma_buf);
- data->srcp_dma_buf = NULL;
- return ret;
- }
- domain = mdss_smmu_get_domain_type(data->flags, rotator);
-
- data->srcp_attachment =
- mdss_smmu_dma_buf_attach(data->srcp_dma_buf, dev,
- domain);
- if (IS_ERR(data->srcp_attachment)) {
- ret = PTR_ERR(data->srcp_attachment);
- goto err_put;
- }
-
- data->srcp_table =
- dma_buf_map_attachment(data->srcp_attachment,
- mdss_smmu_dma_data_direction(dir));
- if (IS_ERR(data->srcp_table)) {
- ret = PTR_ERR(data->srcp_table);
- goto err_detach;
- }
-
- data->addr = 0;
- data->len = 0;
- data->mapped = false;
- data->skip_detach = false;
- /* return early, mapping will be done later */
-
- return 0;
- } else if (iclient &&
- (data->flags & MDP_SECURE_DISPLAY_OVERLAY_SESSION)) {
- struct ion_handle *ihandle = NULL;
- struct sg_table *sg_ptr = NULL;
-
- do {
- ihandle = ion_import_dma_buf(iclient, img->memory_id);
- if (IS_ERR_OR_NULL(ihandle)) {
- ret = -EINVAL;
- pr_err("ion import buffer failed\n");
- break;
- }
-
- sg_ptr = ion_sg_table(iclient, ihandle);
- if (sg_ptr == NULL) {
- pr_err("ion sg table get failed\n");
- ret = -EINVAL;
- break;
+ } else if (iclient) {
+ if (mdss_mdp_is_map_needed(mdata, data)) {
+ data->srcp_dma_buf = dma_buf_get(img->memory_id);
+ if (IS_ERR(data->srcp_dma_buf)) {
+ pr_err("error on ion_import_fd\n");
+ ret = PTR_ERR(data->srcp_dma_buf);
+ data->srcp_dma_buf = NULL;
+ return ret;
}
-
- if (sg_ptr->nents != 1) {
- pr_err("ion buffer mapping failed\n");
- ret = -EINVAL;
- break;
+ domain = mdss_smmu_get_domain_type(data->flags,
+ rotator);
+
+ data->srcp_attachment =
+ mdss_smmu_dma_buf_attach(data->srcp_dma_buf,
+ dev, domain);
+ if (IS_ERR(data->srcp_attachment)) {
+ ret = PTR_ERR(data->srcp_attachment);
+ goto err_put;
}
- if (((uint64_t)sg_dma_address(sg_ptr->sgl) >=
- PHY_ADDR_4G - sg_ptr->sgl->length)) {
- pr_err("ion buffer mapped size is invalid\n");
- ret = -EINVAL;
- break;
+ data->srcp_table =
+ dma_buf_map_attachment(data->srcp_attachment,
+ mdss_smmu_dma_data_direction(dir));
+ if (IS_ERR(data->srcp_table)) {
+ ret = PTR_ERR(data->srcp_table);
+ goto err_detach;
}
- data->addr = sg_dma_address(sg_ptr->sgl);
- data->len = sg_ptr->sgl->length;
- data->mapped = true;
+ data->addr = 0;
+ data->len = 0;
+ data->mapped = false;
+ data->skip_detach = false;
+ /* return early, mapping will be done later */
ret = 0;
- } while (0);
-
- if (!IS_ERR_OR_NULL(ihandle))
- ion_free(iclient, ihandle);
- return ret;
+ goto done;
+ } else {
+ struct ion_handle *ihandle = NULL;
+ struct sg_table *sg_ptr = NULL;
+
+ do {
+ ihandle = ion_import_dma_buf(iclient,
+ img->memory_id);
+ if (IS_ERR_OR_NULL(ihandle)) {
+ ret = -EINVAL;
+ pr_err("ion import buffer failed\n");
+ break;
+ }
+
+ sg_ptr = ion_sg_table(iclient, ihandle);
+ if (sg_ptr == NULL) {
+ pr_err("ion sg table get failed\n");
+ ret = -EINVAL;
+ break;
+ }
+
+ if (sg_ptr->nents != 1) {
+ pr_err("ion buffer mapping failed\n");
+ ret = -EINVAL;
+ break;
+ }
+
+ if (((uint64_t)sg_dma_address(sg_ptr->sgl) >=
+ PHY_ADDR_4G - sg_ptr->sgl->length)) {
+ pr_err("ion buffer mapped size is invalid\n");
+ ret = -EINVAL;
+ break;
+ }
+
+ data->addr = sg_dma_address(sg_ptr->sgl);
+ data->len = sg_ptr->sgl->length;
+ data->mapped = true;
+ ret = 0;
+ } while (0);
+
+ if (!IS_ERR_OR_NULL(ihandle))
+ ion_free(iclient, ihandle);
+ return ret;
+ }
}
-
if (!*start) {
pr_err("start address is zero!\n");
mdss_mdp_put_img(data, rotator, dir);
@@ -1361,6 +1386,7 @@ err_detach:
dma_buf_detach(data->srcp_dma_buf, data->srcp_attachment);
err_put:
dma_buf_put(data->srcp_dma_buf);
+done:
return ret;
}
@@ -1369,13 +1395,14 @@ static int mdss_mdp_map_buffer(struct mdss_mdp_img_data *data, bool rotator,
{
int ret = -EINVAL;
int domain;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
if (data->addr && data->len)
return 0;
if (!IS_ERR_OR_NULL(data->srcp_dma_buf)) {
if (mdss_res->mdss_util->iommu_attached() &&
- !(data->flags & MDP_SECURE_DISPLAY_OVERLAY_SESSION)) {
+ (mdss_mdp_is_map_needed(mdata, data))) {
domain = mdss_smmu_get_domain_type(data->flags,
rotator);
data->dir = dir;
diff --git a/drivers/video/fbdev/msm/mdss_panel.h b/drivers/video/fbdev/msm/mdss_panel.h
index 83949d821fdb..33d8d7bd77bd 100644
--- a/drivers/video/fbdev/msm/mdss_panel.h
+++ b/drivers/video/fbdev/msm/mdss_panel.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -158,6 +158,11 @@ struct mdss_panel_cfg {
#define MDP_INTF_DSI_CMD_FIFO_UNDERFLOW 0x0001
#define MDP_INTF_DSI_VIDEO_FIFO_OVERFLOW 0x0002
+
+enum {
+ MDP_INTF_CALLBACK_DSI_WAIT,
+};
+
struct mdss_intf_recovery {
void (*fxn)(void *ctx, int event);
void *data;
@@ -210,6 +215,7 @@ struct mdss_intf_recovery {
* - 1: update to command mode
* @MDSS_EVENT_REGISTER_RECOVERY_HANDLER: Event to recover the interface in
* case there was any errors detected.
+ * @MDSS_EVENT_REGISTER_MDP_CALLBACK: Event to register callback to MDP driver.
* @MDSS_EVENT_DSI_PANEL_STATUS: Event to check the panel status
* <= 0: panel check fail
* > 0: panel check success
@@ -248,6 +254,7 @@ enum mdss_intf_events {
MDSS_EVENT_DSI_STREAM_SIZE,
MDSS_EVENT_DSI_UPDATE_PANEL_DATA,
MDSS_EVENT_REGISTER_RECOVERY_HANDLER,
+ MDSS_EVENT_REGISTER_MDP_CALLBACK,
MDSS_EVENT_DSI_PANEL_STATUS,
MDSS_EVENT_DSI_DYNAMIC_SWITCH,
MDSS_EVENT_DSI_RECONFIG_CMD,
@@ -531,6 +538,7 @@ struct mdss_mdp_pp_tear_check {
u32 sync_threshold_continue;
u32 start_pos;
u32 rd_ptr_irq;
+ u32 wr_ptr_irq;
u32 refx100;
};
@@ -599,7 +607,6 @@ struct mdss_panel_info {
u32 partial_update_roi_merge;
struct ion_handle *splash_ihdl;
int panel_power_state;
- int blank_state;
int compression_mode;
uint32_t panel_dead;
diff --git a/drivers/video/fbdev/msm/mdss_rotator.c b/drivers/video/fbdev/msm/mdss_rotator.c
index 6b9e8a76f406..6aba03ea2570 100644
--- a/drivers/video/fbdev/msm/mdss_rotator.c
+++ b/drivers/video/fbdev/msm/mdss_rotator.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -375,7 +375,7 @@ static bool mdss_rotator_is_work_pending(struct mdss_rot_mgr *mgr,
static int mdss_rotator_create_fence(struct mdss_rot_entry *entry)
{
- int ret, fd;
+ int ret = 0, fd;
u32 val;
struct sync_pt *sync_pt;
struct sync_fence *fence;
@@ -1123,6 +1123,7 @@ static void mdss_rotator_release_from_work_distribution(
entry->perf->work_distribution);
devm_kfree(&mgr->pdev->dev, entry->perf);
mdss_rotator_update_perf(mgr);
+ mdss_rotator_clk_ctrl(mgr, false);
entry->perf = NULL;
}
}
@@ -1700,11 +1701,13 @@ static int mdss_rotator_config_hw(struct mdss_rot_hw_resource *hw,
{
struct mdss_mdp_pipe *pipe;
struct mdp_rotation_item *item;
+ struct mdss_rot_perf *perf;
int ret;
ATRACE_BEGIN(__func__);
pipe = hw->pipe;
item = &entry->item;
+ perf = entry->perf;
pipe->flags = mdss_rotator_translate_flags(item->flags);
pipe->src_fmt = mdss_mdp_get_format_params(item->input.format);
@@ -1713,6 +1716,7 @@ static int mdss_rotator_config_hw(struct mdss_rot_hw_resource *hw,
mdss_rotator_translate_rect(&pipe->src, &item->src_rect);
mdss_rotator_translate_rect(&pipe->dst, &item->src_rect);
pipe->scaler.enable = 0;
+ pipe->frame_rate = perf->config.frame_rate;
pipe->params_changed++;
@@ -2142,6 +2146,7 @@ static int mdss_rotator_handle_request(struct mdss_rot_mgr *mgr,
struct mdp_rotation_item *items = NULL;
struct mdss_rot_entry_container *req = NULL;
int size, ret;
+ uint32_t req_count;
if (mdss_get_sd_client_cnt()) {
pr_err("rot request not permitted during secure display session\n");
@@ -2155,12 +2160,18 @@ static int mdss_rotator_handle_request(struct mdss_rot_mgr *mgr,
return ret;
}
+ req_count = user_req.count;
+ if ((!req_count) || (req_count > MAX_LAYER_COUNT)) {
+ pr_err("invalid rotator req count :%d\n", req_count);
+ return -EINVAL;
+ }
+
/*
* here, we make a copy of the items so that we can copy
* all the output fences to the client in one call. Otherwise,
* we will have to call multiple copy_to_user
*/
- size = sizeof(struct mdp_rotation_item) * user_req.count;
+ size = sizeof(struct mdp_rotation_item) * req_count;
items = devm_kzalloc(&mgr->pdev->dev, size, GFP_KERNEL);
if (!items) {
pr_err("fail to allocate rotation items\n");
@@ -2299,6 +2310,7 @@ static int mdss_rotator_handle_request32(struct mdss_rot_mgr *mgr,
struct mdp_rotation_item *items = NULL;
struct mdss_rot_entry_container *req = NULL;
int size, ret;
+ uint32_t req_count;
if (mdss_get_sd_client_cnt()) {
pr_err("rot request not permitted during secure display session\n");
@@ -2312,13 +2324,19 @@ static int mdss_rotator_handle_request32(struct mdss_rot_mgr *mgr,
return ret;
}
- size = sizeof(struct mdp_rotation_item) * user_req32.count;
+ req_count = user_req32.count;
+ if ((!req_count) || (req_count > MAX_LAYER_COUNT)) {
+ pr_err("invalid rotator req count :%d\n", req_count);
+ return -EINVAL;
+ }
+
+ size = sizeof(struct mdp_rotation_item) * req_count;
items = devm_kzalloc(&mgr->pdev->dev, size, GFP_KERNEL);
if (!items) {
pr_err("fail to allocate rotation items\n");
return -ENOMEM;
}
- ret = copy_from_user(items, user_req32.list, size);
+ ret = copy_from_user(items, compat_ptr(user_req32.list), size);
if (ret) {
pr_err("fail to copy rotation items\n");
goto handle_request32_err;
@@ -2345,7 +2363,7 @@ static int mdss_rotator_handle_request32(struct mdss_rot_mgr *mgr,
goto handle_request32_err1;
}
- ret = copy_to_user(user_req32.list, items, size);
+ ret = copy_to_user(compat_ptr(user_req32.list), items, size);
if (ret) {
pr_err("fail to copy output fence to user\n");
mdss_rotator_remove_request(mgr, private, req);
@@ -2503,6 +2521,7 @@ static const struct file_operations mdss_rotator_fops = {
static int mdss_rotator_parse_dt_bus(struct mdss_rot_mgr *mgr,
struct platform_device *dev)
{
+ struct device_node *node;
int ret = 0, i;
bool register_bus_needed;
int usecases;
@@ -2520,12 +2539,26 @@ static int mdss_rotator_parse_dt_bus(struct mdss_rot_mgr *mgr,
register_bus_needed = of_property_read_bool(dev->dev.of_node,
"qcom,mdss-has-reg-bus");
if (register_bus_needed) {
- mgr->reg_bus.bus_scale_pdata = &rot_reg_bus_scale_table;
- usecases = mgr->reg_bus.bus_scale_pdata->num_usecases;
- for (i = 0; i < usecases; i++) {
- rot_reg_bus_usecases[i].num_paths = 1;
- rot_reg_bus_usecases[i].vectors =
- &rot_reg_bus_vectors[i];
+ node = of_get_child_by_name(
+ dev->dev.of_node, "qcom,mdss-rot-reg-bus");
+ if (!node) {
+ mgr->reg_bus.bus_scale_pdata = &rot_reg_bus_scale_table;
+ usecases = mgr->reg_bus.bus_scale_pdata->num_usecases;
+ for (i = 0; i < usecases; i++) {
+ rot_reg_bus_usecases[i].num_paths = 1;
+ rot_reg_bus_usecases[i].vectors =
+ &rot_reg_bus_vectors[i];
+ }
+ } else {
+ mgr->reg_bus.bus_scale_pdata =
+ msm_bus_pdata_from_node(dev, node);
+ if (IS_ERR_OR_NULL(mgr->reg_bus.bus_scale_pdata)) {
+ ret = PTR_ERR(mgr->reg_bus.bus_scale_pdata);
+ if (!ret)
+ ret = -EINVAL;
+ pr_err("reg_rot_bus failed rc=%d\n", ret);
+ mgr->reg_bus.bus_scale_pdata = NULL;
+ }
}
}
return ret;
diff --git a/drivers/video/fbdev/msm/mdss_rotator_internal.h b/drivers/video/fbdev/msm/mdss_rotator_internal.h
index 87c5dcd98813..dae5f5cb117e 100644
--- a/drivers/video/fbdev/msm/mdss_rotator_internal.h
+++ b/drivers/video/fbdev/msm/mdss_rotator_internal.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -191,7 +191,7 @@ struct mdp_rotation_request32 {
uint32_t version;
uint32_t flags;
uint32_t count;
- compat_caddr_t __user *list;
+ compat_caddr_t list;
uint32_t reserved[6];
};
#endif
diff --git a/drivers/video/fbdev/msm/mdss_smmu.c b/drivers/video/fbdev/msm/mdss_smmu.c
index c60074e76bf2..9aa2c8386b17 100644
--- a/drivers/video/fbdev/msm/mdss_smmu.c
+++ b/drivers/video/fbdev/msm/mdss_smmu.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2007-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2007-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -138,7 +138,7 @@ static int mdss_smmu_enable_power(struct mdss_smmu_client *mdss_smmu,
goto end;
}
mdss_update_reg_bus_vote(mdss_smmu->reg_bus_clt,
- VOTE_INDEX_19_MHZ);
+ VOTE_INDEX_LOW);
rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
if (rc) {
pr_err("clock enable failed - rc:%d\n", rc);
@@ -604,7 +604,7 @@ int mdss_smmu_probe(struct platform_device *pdev)
snprintf(name, MAX_CLIENT_NAME_LEN, "smmu:%u", smmu_domain.domain);
mdss_smmu->reg_bus_clt = mdss_reg_bus_vote_client_create(name);
- if (IS_ERR_OR_NULL(mdss_smmu->reg_bus_clt)) {
+ if (IS_ERR(mdss_smmu->reg_bus_clt)) {
pr_err("mdss bus client register failed\n");
msm_dss_config_vreg(&pdev->dev, mp->vreg_config, mp->num_vreg,
false);
diff --git a/drivers/video/fbdev/msm/msm_dba/adv7533.c b/drivers/video/fbdev/msm/msm_dba/adv7533.c
index e27f3b97471a..a3b4466d105d 100644
--- a/drivers/video/fbdev/msm/msm_dba/adv7533.c
+++ b/drivers/video/fbdev/msm/msm_dba/adv7533.c
@@ -38,6 +38,8 @@
#define MDSS_MAX_PANEL_LEN 256
#define EDID_SEG_SIZE 0x100
+/* size of audio and speaker info Block */
+#define AUDIO_DATA_SIZE 32
/* 0x94 interrupts */
#define HPD_INT_ENABLE BIT(7)
@@ -129,6 +131,7 @@ struct adv7533 {
bool is_power_on;
void *edid_data;
u8 edid_buf[EDID_SEG_SIZE];
+ u8 audio_spkr_data[AUDIO_DATA_SIZE];
struct workqueue_struct *workq;
struct delayed_work adv7533_intr_work_id;
struct msm_dba_device_info dev_info;
@@ -1274,6 +1277,44 @@ static int adv7533_cec_enable(void *client, bool cec_on, u32 flags)
end:
return ret;
}
+static void adv7533_set_audio_block(void *client, u32 size, void *buf)
+{
+ struct adv7533 *pdata =
+ adv7533_get_platform_data(client);
+
+ if (!pdata || !buf) {
+ pr_err("%s: invalid data\n", __func__);
+ return;
+ }
+
+ mutex_lock(&pdata->ops_mutex);
+
+ size = min_t(u32, size, AUDIO_DATA_SIZE);
+
+ memset(pdata->audio_spkr_data, 0, AUDIO_DATA_SIZE);
+ memcpy(pdata->audio_spkr_data, buf, size);
+
+ mutex_unlock(&pdata->ops_mutex);
+}
+
+static void adv7533_get_audio_block(void *client, u32 size, void *buf)
+{
+ struct adv7533 *pdata =
+ adv7533_get_platform_data(client);
+
+ if (!pdata || !buf) {
+ pr_err("%s: invalid data\n", __func__);
+ return;
+ }
+
+ mutex_lock(&pdata->ops_mutex);
+
+ size = min_t(u32, size, AUDIO_DATA_SIZE);
+
+ memcpy(buf, pdata->audio_spkr_data, size);
+
+ mutex_unlock(&pdata->ops_mutex);
+}
static int adv7533_check_hpd(void *client, u32 flags)
{
@@ -1880,6 +1921,8 @@ static int adv7533_register_dba(struct adv7533 *pdata)
client_ops->get_edid_size = adv7533_get_edid_size;
client_ops->get_raw_edid = adv7533_get_raw_edid;
client_ops->check_hpd = adv7533_check_hpd;
+ client_ops->get_audio_block = adv7533_get_audio_block;
+ client_ops->set_audio_block = adv7533_set_audio_block;
dev_ops->write_reg = adv7533_write_reg;
dev_ops->read_reg = adv7533_read_reg;
@@ -1888,8 +1931,6 @@ static int adv7533_register_dba(struct adv7533 *pdata)
strlcpy(pdata->dev_info.chip_name, "adv7533",
sizeof(pdata->dev_info.chip_name));
- pdata->dev_info.instance_id = 0;
-
mutex_init(&pdata->dev_info.dev_mutex);
INIT_LIST_HEAD(&pdata->dev_info.client_list);
diff --git a/drivers/video/fbdev/msm/msm_mdss_io_8974.c b/drivers/video/fbdev/msm/msm_mdss_io_8974.c
index 7cc2717a6df5..b23e24362af6 100644
--- a/drivers/video/fbdev/msm/msm_mdss_io_8974.c
+++ b/drivers/video/fbdev/msm/msm_mdss_io_8974.c
@@ -53,6 +53,9 @@
#define DSIPHY_PLL_CLKBUFLR_EN 0x041c
#define DSIPHY_PLL_PLL_BANDGAP 0x0508
+#define DSIPHY_LANE_STRENGTH_CTRL_1 0x003c
+#define DSIPHY_LANE_VREG_CNTRL 0x0064
+
#define DSI_DYNAMIC_REFRESH_PLL_CTRL0 0x214
#define DSI_DYNAMIC_REFRESH_PLL_CTRL1 0x218
#define DSI_DYNAMIC_REFRESH_PLL_CTRL2 0x21C
@@ -875,6 +878,92 @@ static void mdss_dsi_8996_phy_regulator_enable(
}
+static void mdss_dsi_8996_phy_power_off(
+ struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ int ln;
+ void __iomem *base;
+
+ MIPI_OUTP(ctrl->phy_io.base + DSIPHY_CMN_CTRL_0, 0x7f);
+
+ /* 4 lanes + clk lane configuration */
+ for (ln = 0; ln < 5; ln++) {
+ base = ctrl->phy_io.base +
+ DATALANE_OFFSET_FROM_BASE_8996;
+ base += (ln * DATALANE_SIZE_8996); /* lane base */
+
+ /* turn off phy ldo */
+ MIPI_OUTP(base + DSIPHY_LANE_VREG_CNTRL, 0x1c);
+ }
+ MIPI_OUTP((ctrl->phy_io.base) + DSIPHY_CMN_LDO_CNTRL, 0x1c);
+
+ /* 4 lanes + clk lane configuration */
+ for (ln = 0; ln < 5; ln++) {
+ base = ctrl->phy_io.base +
+ DATALANE_OFFSET_FROM_BASE_8996;
+ base += (ln * DATALANE_SIZE_8996); /* lane base */
+
+ MIPI_OUTP(base + DSIPHY_LANE_STRENGTH_CTRL_1, 0x0);
+ }
+
+ wmb(); /* make sure registers committed */
+}
+
+static void mdss_dsi_phy_power_off(
+ struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ if (ctrl->phy_power_off)
+ return;
+
+ /* supported for phy rev 2.0 */
+ if (ctrl->shared_data->phy_rev != DSI_PHY_REV_20)
+ return;
+
+ mdss_dsi_8996_phy_power_off(ctrl);
+
+ ctrl->phy_power_off = true;
+}
+
+static void mdss_dsi_8996_phy_power_on(
+ struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ int j, off, ln, cnt, ln_off;
+ void __iomem *base;
+ struct mdss_dsi_phy_ctrl *pd;
+ char *ip;
+
+ pd = &(((ctrl->panel_data).panel_info.mipi).dsi_phy_db);
+
+ /* 4 lanes + clk lane configuration */
+ for (ln = 0; ln < 5; ln++) {
+ base = ctrl->phy_io.base +
+ DATALANE_OFFSET_FROM_BASE_8996;
+ base += (ln * DATALANE_SIZE_8996); /* lane base */
+
+ /* strength, 2 * 5 */
+ cnt = 2;
+ ln_off = cnt * ln;
+ ip = &pd->strength[ln_off];
+ off = 0x38;
+ for (j = 0; j < cnt; j++, off += 4)
+ MIPI_OUTP(base + off, *ip++);
+ }
+
+ mdss_dsi_8996_phy_regulator_enable(ctrl);
+}
+
+static void mdss_dsi_phy_power_on(
+ struct mdss_dsi_ctrl_pdata *ctrl, bool mmss_clamp)
+{
+ if (mmss_clamp && (ctrl->shared_data->phy_rev != DSI_PHY_REV_20))
+ mdss_dsi_phy_init(ctrl);
+ else if ((ctrl->shared_data->phy_rev == DSI_PHY_REV_20) &&
+ ctrl->phy_power_off)
+ mdss_dsi_8996_phy_power_on(ctrl);
+
+ ctrl->phy_power_off = false;
+}
+
static void mdss_dsi_8996_phy_config(struct mdss_dsi_ctrl_pdata *ctrl)
{
struct mdss_dsi_phy_ctrl *pd;
@@ -1171,6 +1260,7 @@ int mdss_dsi_clk_refresh(struct mdss_panel_data *pdata, bool update_phy)
pr_err("Error in calculating phy timings\n");
return rc;
}
+ ctrl_pdata->update_phy_timing = false;
}
return rc;
@@ -1970,8 +2060,7 @@ int mdss_dsi_pre_clkoff_cb(void *priv,
* However, when blanking the panel, we should enter ULPS
* only if ULPS during suspend feature is enabled.
*/
- if (pdata->panel_info.blank_state ==
- MDSS_PANEL_BLANK_BLANK) {
+ if (!(ctrl->ctrl_state & CTRL_STATE_PANEL_INIT)) {
if (pdata->panel_info.ulps_suspend_enabled)
mdss_dsi_ulps_config(ctrl, 1);
} else if (mdss_dsi_ulps_feature_enabled(pdata)) {
@@ -1988,8 +2077,9 @@ int mdss_dsi_pre_clkoff_cb(void *priv,
* Enable DSI clamps only if entering idle power collapse or
* when ULPS during suspend is enabled.
*/
- if ((pdata->panel_info.blank_state != MDSS_PANEL_BLANK_BLANK) ||
+ if ((ctrl->ctrl_state & CTRL_STATE_DSI_ACTIVE) ||
pdata->panel_info.ulps_suspend_enabled) {
+ mdss_dsi_phy_power_off(ctrl);
rc = mdss_dsi_clamp_ctrl(ctrl, 1);
if (rc)
pr_err("%s: Failed to enable dsi clamps. rc=%d\n",
@@ -2016,23 +2106,18 @@ int mdss_dsi_post_clkon_cb(void *priv,
int rc = 0;
struct mdss_panel_data *pdata = NULL;
struct mdss_dsi_ctrl_pdata *ctrl = priv;
+ bool mmss_clamp;
pdata = &ctrl->panel_data;
if (clk & MDSS_DSI_CORE_CLK) {
- if (!pdata->panel_info.cont_splash_enabled) {
- mdss_dsi_read_hw_revision(ctrl);
- mdss_dsi_read_phy_revision(ctrl);
- }
-
+ mmss_clamp = ctrl->mmss_clamp;
/*
- * Phy and controller setup is needed if coming out of idle
+ * controller setup is needed if coming out of idle
* power collapse with clamps enabled.
*/
- if (ctrl->mmss_clamp) {
- mdss_dsi_phy_init(ctrl);
+ if (mmss_clamp)
mdss_dsi_ctrl_setup(ctrl);
- }
if (ctrl->ulps) {
/*
@@ -2064,6 +2149,13 @@ int mdss_dsi_post_clkon_cb(void *priv,
__func__, rc);
goto error;
}
+
+ /*
+ * Phy setup is needed if coming out of idle
+ * power collapse with clamps enabled.
+ */
+ if (ctrl->phy_power_off || mmss_clamp)
+ mdss_dsi_phy_power_on(ctrl, mmss_clamp);
}
if (clk & MDSS_DSI_LINK_CLK) {
if (ctrl->ulps) {
@@ -2100,9 +2192,18 @@ int mdss_dsi_post_clkoff_cb(void *priv,
pdata = &ctrl->panel_data;
for (i = DSI_MAX_PM - 1; i >= DSI_CORE_PM; i--) {
- if ((i != DSI_CORE_PM) &&
- (pdata->panel_info.blank_state !=
- MDSS_PANEL_BLANK_BLANK))
+ /*
+ * if DSI state is active
+ * 1. allow to turn off the core power module.
+ * 2. allow to turn off phy power module if it is
+ * turned off
+ *
+ * allow to turn off all power modules if DSI is not
+ * active
+ */
+ if ((ctrl->ctrl_state & CTRL_STATE_DSI_ACTIVE) &&
+ (i != DSI_CORE_PM) &&
+ (ctrl->phy_power_off && (i != DSI_PHY_PM)))
continue;
rc = msm_dss_enable_vreg(
sdata->power_data[i].vreg_config,
@@ -2140,20 +2241,22 @@ int mdss_dsi_pre_clkon_cb(void *priv,
sdata = ctrl->shared_data;
pdata = &ctrl->panel_data;
/*
- * Enable DSI core power
+ * Enable DSI core power
* 1.> PANEL_PM are controlled as part of
* panel_power_ctrl. Needed not be handled here.
* 2.> CORE_PM are controlled by dsi clk manager.
- * 2.> PHY_PM and CTRL_PM need to be enabled/disabled
+ * 3.> CTRL_PM need to be enabled/disabled
* only during unblank/blank. Their state should
* not be changed during static screen.
+ * 4.> PHY_PM can be turned enabled/disabled
+ * if phy regulators are enabled/disabled.
*/
pr_debug("%s: Enable DSI core power\n", __func__);
for (i = DSI_CORE_PM; i < DSI_MAX_PM; i++) {
- if ((i != DSI_CORE_PM) &&
- (pdata->panel_info.blank_state !=
- MDSS_PANEL_BLANK_BLANK) &&
- !pdata->panel_info.cont_splash_enabled)
+ if ((ctrl->ctrl_state & CTRL_STATE_DSI_ACTIVE) &&
+ (!pdata->panel_info.cont_splash_enabled) &&
+ (i != DSI_CORE_PM) &&
+ (ctrl->phy_power_off && (i != DSI_PHY_PM)))
continue;
rc = msm_dss_enable_vreg(
sdata->power_data[i].vreg_config,
diff --git a/include/uapi/linux/msm_mdp.h b/include/uapi/linux/msm_mdp.h
index 346081003969..b4eba416cbc2 100644
--- a/include/uapi/linux/msm_mdp.h
+++ b/include/uapi/linux/msm_mdp.h
@@ -344,7 +344,7 @@ struct mdp_csc {
* to include
*/
-#define MDP_BLIT_REQ_VERSION 2
+#define MDP_BLIT_REQ_VERSION 3
struct color {
uint32_t r;
@@ -364,6 +364,7 @@ struct mdp_blit_req {
uint32_t flags;
int sharpening_strength; /* -127 <--> 127, default 64 */
uint8_t color_space;
+ uint32_t fps;
};
struct mdp_blit_req_list {
diff --git a/include/video/msm_dba.h b/include/video/msm_dba.h
index 16f9052fc2ce..3d20fd8d65eb 100644
--- a/include/video/msm_dba.h
+++ b/include/video/msm_dba.h
@@ -466,6 +466,10 @@ struct msm_dba_video_cfg {
* @dump_debug_info: dumps debug information to dmesg.
* @check_hpd: Check if cable is connected or not. if cable is connected we
* send notification to display framework.
+ * @set_audio_block: This function will populate the raw audio speaker block
+ * data along with size of each block in bridgechip buffer.
+ * @get_audio_block: This function will return the raw audio speaker block
+ * along with size of each block.
*
* The msm_dba_ops structure represents a set of operations that can be
* supported by each bridge chip. Depending on the functionality supported by a
@@ -564,6 +568,8 @@ struct msm_dba_ops {
int (*force_reset)(void *client, u32 flags);
int (*dump_debug_info)(void *client, u32 flags);
int (*check_hpd)(void *client, u32 flags);
+ void (*set_audio_block)(void *client, u32 size, void *buf);
+ void (*get_audio_block)(void *client, u32 size, void *buf);
};
/**