summaryrefslogtreecommitdiff
path: root/drivers/clk
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/clk')
-rw-r--r--drivers/clk/Kconfig3
-rw-r--r--drivers/clk/Makefile3
-rw-r--r--drivers/clk/clk.c18
-rw-r--r--drivers/clk/clk.h2
-rw-r--r--drivers/clk/clkdev.c12
-rw-r--r--drivers/clk/msm/Kconfig7
-rw-r--r--drivers/clk/msm/Makefile28
-rw-r--r--drivers/clk/msm/clock-alpha-pll.c1248
-rw-r--r--drivers/clk/msm/clock-cpu-8996.c1888
-rw-r--r--drivers/clk/msm/clock-debug.c676
-rw-r--r--drivers/clk/msm/clock-dummy.c113
-rw-r--r--drivers/clk/msm/clock-gcc-8996.c3825
-rw-r--r--drivers/clk/msm/clock-gcc-cobalt.c2915
-rw-r--r--drivers/clk/msm/clock-generic.c917
-rw-r--r--drivers/clk/msm/clock-gpu-cobalt.c790
-rw-r--r--drivers/clk/msm/clock-local2.c2973
-rw-r--r--drivers/clk/msm/clock-mmss-8996.c3994
-rw-r--r--drivers/clk/msm/clock-mmss-cobalt.c2810
-rw-r--r--drivers/clk/msm/clock-osm.c2546
-rw-r--r--drivers/clk/msm/clock-pll.c1206
-rw-r--r--drivers/clk/msm/clock-rpm.c472
-rw-r--r--drivers/clk/msm/clock-voter.c202
-rw-r--r--drivers/clk/msm/clock.c1389
-rw-r--r--drivers/clk/msm/clock.h52
-rw-r--r--drivers/clk/msm/gdsc.c684
-rw-r--r--drivers/clk/msm/mdss/Kconfig6
-rw-r--r--drivers/clk/msm/mdss/Makefile9
-rw-r--r--drivers/clk/msm/mdss/mdss-dp-pll-cobalt-util.c730
-rw-r--r--drivers/clk/msm/mdss/mdss-dp-pll-cobalt.c249
-rw-r--r--drivers/clk/msm/mdss/mdss-dp-pll-cobalt.h169
-rw-r--r--drivers/clk/msm/mdss/mdss-dp-pll.h36
-rw-r--r--drivers/clk/msm/mdss/mdss-dsi-pll-8996-util.c1138
-rw-r--r--drivers/clk/msm/mdss/mdss-dsi-pll-8996.c566
-rw-r--r--drivers/clk/msm/mdss/mdss-dsi-pll-8996.h221
-rw-r--r--drivers/clk/msm/mdss/mdss-dsi-pll-cobalt.c1305
-rw-r--r--drivers/clk/msm/mdss/mdss-dsi-pll.h110
-rw-r--r--drivers/clk/msm/mdss/mdss-hdmi-pll-8996.c2683
-rw-r--r--drivers/clk/msm/mdss/mdss-hdmi-pll-cobalt.c841
-rw-r--r--drivers/clk/msm/mdss/mdss-hdmi-pll.h61
-rw-r--r--drivers/clk/msm/mdss/mdss-pll-util.c441
-rw-r--r--drivers/clk/msm/mdss/mdss-pll.c437
-rw-r--r--drivers/clk/msm/mdss/mdss-pll.h233
-rw-r--r--drivers/clk/msm/msm-clock-controller.c755
-rw-r--r--drivers/clk/msm/vdd-level-8996.h93
-rw-r--r--drivers/clk/msm/vdd-level-cobalt.h98
-rw-r--r--drivers/clk/qcom/Kconfig82
-rw-r--r--drivers/clk/qcom/Makefile11
-rw-r--r--drivers/clk/qcom/clk-a53.c202
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.c464
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.h68
-rw-r--r--drivers/clk/qcom/clk-hfpll.c253
-rw-r--r--drivers/clk/qcom/clk-hfpll.h54
-rw-r--r--drivers/clk/qcom/clk-krait.c167
-rw-r--r--drivers/clk/qcom/clk-krait.h49
-rw-r--r--drivers/clk/qcom/clk-pll.h3
-rw-r--r--drivers/clk/qcom/clk-rcg.h1
-rw-r--r--drivers/clk/qcom/clk-rcg2.c87
-rw-r--r--drivers/clk/qcom/clk-rpm.c503
-rw-r--r--drivers/clk/qcom/clk-smd-rpm.c653
-rw-r--r--drivers/clk/qcom/common.c115
-rw-r--r--drivers/clk/qcom/common.h4
-rw-r--r--drivers/clk/qcom/gcc-ipq806x.c37
-rw-r--r--drivers/clk/qcom/gcc-msm8660.c32
-rw-r--r--drivers/clk/qcom/gcc-msm8960.c42
-rw-r--r--drivers/clk/qcom/gcc-msm8974.c1
-rw-r--r--drivers/clk/qcom/gcc-msm8996.c3516
-rw-r--r--drivers/clk/qcom/gdsc.c89
-rw-r--r--drivers/clk/qcom/gdsc.h34
-rw-r--r--drivers/clk/qcom/hfpll.c109
-rw-r--r--drivers/clk/qcom/kpss-xcc.c95
-rw-r--r--drivers/clk/qcom/krait-cc.c352
-rw-r--r--drivers/clk/qcom/mmcc-msm8960.c35
-rw-r--r--drivers/clk/qcom/mmcc-msm8996.c3383
73 files changed, 49199 insertions, 196 deletions
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index c3e3a02f7f1f..b00e391238b8 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -42,7 +42,7 @@ config COMMON_CLK_MAX77686
depends on MFD_MAX77686
select COMMON_CLK_MAX_GEN
---help---
- This driver supports Maxim 77686 crystal oscillator clock.
+ This driver supports Maxim 77686 crystal oscillator clock.
config COMMON_CLK_MAX77802
tristate "Clock driver for Maxim 77802 PMIC"
@@ -198,3 +198,4 @@ source "drivers/clk/mvebu/Kconfig"
source "drivers/clk/samsung/Kconfig"
source "drivers/clk/tegra/Kconfig"
+source "drivers/clk/msm/Kconfig"
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index 820714c72d36..37f67c77fe7c 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -1,7 +1,7 @@
# common clock types
obj-$(CONFIG_HAVE_CLK) += clk-devres.o
obj-$(CONFIG_CLKDEV_LOOKUP) += clkdev.o
-obj-$(CONFIG_COMMON_CLK) += clk.o
+obj-$(CONFIG_OF) += clk.o
obj-$(CONFIG_COMMON_CLK) += clk-divider.o
obj-$(CONFIG_COMMON_CLK) += clk-fixed-factor.o
obj-$(CONFIG_COMMON_CLK) += clk-fixed-rate.o
@@ -82,3 +82,4 @@ obj-$(CONFIG_X86) += x86/
obj-$(CONFIG_ARCH_ZX) += zte/
obj-$(CONFIG_ARCH_ZYNQ) += zynq/
obj-$(CONFIG_H8300) += h8300/
+obj-$(CONFIG_ARCH_QCOM) += msm/
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index f13c3f4228d4..cd54a184e997 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -26,6 +26,8 @@
#include "clk.h"
+#if defined(CONFIG_COMMON_CLK)
+
static DEFINE_SPINLOCK(enable_lock);
static DEFINE_MUTEX(prepare_lock);
@@ -2901,6 +2903,8 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
}
EXPORT_SYMBOL_GPL(clk_notifier_unregister);
+#endif /* CONFIG_COMMON_CLK */
+
#ifdef CONFIG_OF
/**
* struct of_clk_provider - Clock provider registration structure
@@ -2931,6 +2935,8 @@ struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
}
EXPORT_SYMBOL_GPL(of_clk_src_simple_get);
+#if defined(CONFIG_COMMON_CLK)
+
struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data)
{
struct clk_onecell_data *clk_data = data;
@@ -2945,6 +2951,11 @@ struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data)
}
EXPORT_SYMBOL_GPL(of_clk_src_onecell_get);
+#endif /* CONFIG_COMMON_CLK */
+
+/* forward declaration */
+void of_clk_del_provider(struct device_node *np);
+
/**
* of_clk_add_provider() - Register a clock provider for a node
* @np: Device node pointer associated with clock provider
@@ -3100,8 +3111,10 @@ const char *of_clk_get_parent_name(struct device_node *np, int index)
else
clk_name = NULL;
} else {
+#if defined(CONFIG_COMMON_CLK)
clk_name = __clk_get_name(clk);
clk_put(clk);
+#endif
}
}
@@ -3132,6 +3145,8 @@ int of_clk_parent_fill(struct device_node *np, const char **parents,
}
EXPORT_SYMBOL_GPL(of_clk_parent_fill);
+#if defined(CONFIG_COMMON_CLK)
+
struct clock_provider {
of_clk_init_cb_t clk_init_cb;
struct device_node *np;
@@ -3240,4 +3255,7 @@ void __init of_clk_init(const struct of_device_id *matches)
force = true;
}
}
+
+#endif /* CONFIG_COMMON_CLK */
+
#endif
diff --git a/drivers/clk/clk.h b/drivers/clk/clk.h
index 00b35a13cdf3..97941b0f8f32 100644
--- a/drivers/clk/clk.h
+++ b/drivers/clk/clk.h
@@ -11,7 +11,7 @@
struct clk_hw;
-#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
+#if defined(CONFIG_OF)
struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec,
const char *dev_id, const char *con_id);
#endif
diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c
index 779b6ff0c7ad..05c516a67457 100644
--- a/drivers/clk/clkdev.c
+++ b/drivers/clk/clkdev.c
@@ -27,7 +27,7 @@
static LIST_HEAD(clocks);
static DEFINE_MUTEX(clocks_mutex);
-#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
+#if defined(CONFIG_OF)
static struct clk *__of_clk_get(struct device_node *np, int index,
const char *dev_id, const char *con_id)
{
@@ -73,14 +73,10 @@ static struct clk *__of_clk_get_by_name(struct device_node *np,
if (name)
index = of_property_match_string(np, "clock-names", name);
clk = __of_clk_get(np, index, dev_id, name);
- if (!IS_ERR(clk)) {
+ if (!IS_ERR(clk))
break;
- } else if (name && index >= 0) {
- if (PTR_ERR(clk) != -EPROBE_DEFER)
- pr_err("ERROR: could not get clock %s:%s(%i)\n",
- np->full_name, name ? name : "", index);
+ else if (name && index >= 0)
return clk;
- }
/*
* No matching clock found on this node. If the parent node
@@ -190,7 +186,7 @@ struct clk *clk_get_sys(const char *dev_id, const char *con_id)
out:
mutex_unlock(&clocks_mutex);
- return cl ? clk : ERR_PTR(-ENOENT);
+ return cl ? cl->clk : ERR_PTR(-ENOENT);
}
EXPORT_SYMBOL(clk_get_sys);
diff --git a/drivers/clk/msm/Kconfig b/drivers/clk/msm/Kconfig
new file mode 100644
index 000000000000..17102dd1b3d1
--- /dev/null
+++ b/drivers/clk/msm/Kconfig
@@ -0,0 +1,7 @@
+config MSM_CLK_CONTROLLER_V2
+ bool "QTI clock driver"
+ ---help---
+ Generate clock data structures from definitions found in
+ device tree.
+
+source "drivers/clk/msm/mdss/Kconfig"
diff --git a/drivers/clk/msm/Makefile b/drivers/clk/msm/Makefile
new file mode 100644
index 000000000000..cd652c691ca3
--- /dev/null
+++ b/drivers/clk/msm/Makefile
@@ -0,0 +1,28 @@
+obj-y += clock.o
+obj-y += clock-dummy.o
+obj-y += clock-generic.o
+obj-y += clock-local2.o
+obj-y += clock-pll.o
+obj-y += clock-alpha-pll.o
+obj-y += clock-rpm.o
+obj-y += clock-voter.o
+
+obj-$(CONFIG_MSM_CLK_CONTROLLER_V2) += msm-clock-controller.o
+
+obj-y += clock-debug.o
+
+# MSM 8996
+obj-$(CONFIG_ARCH_QCOM) += clock-gcc-8996.o
+obj-$(CONFIG_ARCH_QCOM) += clock-mmss-8996.o
+obj-$(CONFIG_ARCH_QCOM) += clock-cpu-8996.o
+
+# MSM COBALT
+obj-$(CONFIG_ARCH_MSMCOBALT) += clock-gcc-cobalt.o
+obj-$(CONFIG_ARCH_MSMCOBALT) += clock-gpu-cobalt.o
+obj-$(CONFIG_ARCH_MSMCOBALT) += clock-mmss-cobalt.o
+obj-$(CONFIG_ARCH_MSMCOBALT) += clock-osm.o
+
+# CPU clocks
+
+obj-y += gdsc.o
+obj-y += mdss/
diff --git a/drivers/clk/msm/clock-alpha-pll.c b/drivers/clk/msm/clock-alpha-pll.c
new file mode 100644
index 000000000000..5c8c45fe4ff6
--- /dev/null
+++ b/drivers/clk/msm/clock-alpha-pll.c
@@ -0,0 +1,1248 @@
+/*
+ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <soc/qcom/clock-alpha-pll.h>
+#include <soc/qcom/msm-clock-controller.h>
+
+#include "clock.h"
+
+#define WAIT_MAX_LOOPS 100
+
+#define MODE_REG(pll) (*pll->base + pll->offset + 0x0)
+#define LOCK_REG(pll) (*pll->base + pll->offset + 0x0)
+#define ACTIVE_REG(pll) (*pll->base + pll->offset + 0x0)
+#define UPDATE_REG(pll) (*pll->base + pll->offset + 0x0)
+#define L_REG(pll) (*pll->base + pll->offset + 0x4)
+#define A_REG(pll) (*pll->base + pll->offset + 0x8)
+#define VCO_REG(pll) (*pll->base + pll->offset + 0x10)
+#define ALPHA_EN_REG(pll) (*pll->base + pll->offset + 0x10)
+#define OUTPUT_REG(pll) (*pll->base + pll->offset + 0x10)
+#define VOTE_REG(pll) (*pll->base + pll->fsm_reg_offset)
+#define USER_CTL_LO_REG(pll) (*pll->base + pll->offset + 0x10)
+#define USER_CTL_HI_REG(pll) (*pll->base + pll->offset + 0x14)
+#define CONFIG_CTL_REG(pll) (*pll->base + pll->offset + 0x18)
+#define TEST_CTL_LO_REG(pll) (*pll->base + pll->offset + 0x1c)
+#define TEST_CTL_HI_REG(pll) (*pll->base + pll->offset + 0x20)
+
+#define PLL_BYPASSNL 0x2
+#define PLL_RESET_N 0x4
+#define PLL_OUTCTRL 0x1
+#define PLL_LATCH_INTERFACE BIT(11)
+
+#define FABIA_CONFIG_CTL_REG(pll) (*pll->base + pll->offset + 0x14)
+#define FABIA_USER_CTL_LO_REG(pll) (*pll->base + pll->offset + 0xc)
+#define FABIA_USER_CTL_HI_REG(pll) (*pll->base + pll->offset + 0x10)
+#define FABIA_TEST_CTL_LO_REG(pll) (*pll->base + pll->offset + 0x1c)
+#define FABIA_TEST_CTL_HI_REG(pll) (*pll->base + pll->offset + 0x20)
+#define FABIA_L_REG(pll) (*pll->base + pll->offset + 0x4)
+#define FABIA_CAL_L_VAL(pll) (*pll->base + pll->offset + 0x8)
+#define FABIA_FRAC_REG(pll) (*pll->base + pll->offset + 0x38)
+#define FABIA_PLL_OPMODE(pll) (*pll->base + pll->offset + 0x2c)
+
+#define FABIA_PLL_STANDBY 0x0
+#define FABIA_PLL_RUN 0x1
+#define FABIA_PLL_OUT_MAIN 0x7
+#define FABIA_RATE_MARGIN 500
+#define ALPHA_PLL_ACK_LATCH BIT(29)
+#define ALPHA_PLL_HW_UPDATE_LOGIC_BYPASS BIT(23)
+
+/*
+ * Even though 40 bits are present, use only 32 for ease of calculation.
+ */
+#define ALPHA_REG_BITWIDTH 40
+#define ALPHA_BITWIDTH 32
+#define FABIA_ALPHA_BITWIDTH 16
+
+/*
+ * Enable/disable registers could be shared among PLLs when FSM voting
+ * is used. This lock protects against potential race when multiple
+ * PLLs are being enabled/disabled together.
+ */
+static DEFINE_SPINLOCK(alpha_pll_reg_lock);
+
+static unsigned long compute_rate(struct alpha_pll_clk *pll,
+ u32 l_val, u32 a_val)
+{
+ u64 rate, parent_rate;
+ int alpha_bw = ALPHA_BITWIDTH;
+
+ if (pll->is_fabia)
+ alpha_bw = FABIA_ALPHA_BITWIDTH;
+
+ parent_rate = clk_get_rate(pll->c.parent);
+ rate = parent_rate * l_val;
+ rate += (parent_rate * a_val) >> alpha_bw;
+ return rate;
+}
+
+static bool is_locked(struct alpha_pll_clk *pll)
+{
+ u32 reg = readl_relaxed(LOCK_REG(pll));
+ u32 mask = pll->masks->lock_mask;
+ return (reg & mask) == mask;
+}
+
+static bool is_active(struct alpha_pll_clk *pll)
+{
+ u32 reg = readl_relaxed(ACTIVE_REG(pll));
+ u32 mask = pll->masks->active_mask;
+ return (reg & mask) == mask;
+}
+
+/*
+ * Check active_flag if PLL is in FSM mode, otherwise check lock_det
+ * bit. This function assumes PLLs are already configured to the
+ * right mode.
+ */
+static bool update_finish(struct alpha_pll_clk *pll)
+{
+ if (pll->fsm_en_mask)
+ return is_active(pll);
+ else
+ return is_locked(pll);
+}
+
+static int wait_for_update(struct alpha_pll_clk *pll)
+{
+ int count;
+
+ for (count = WAIT_MAX_LOOPS; count > 0; count--) {
+ if (update_finish(pll))
+ break;
+ udelay(1);
+ }
+
+ if (!count) {
+ pr_err("%s didn't lock after enabling it!\n", pll->c.dbg_name);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __alpha_pll_vote_enable(struct alpha_pll_clk *pll)
+{
+ u32 ena;
+
+ ena = readl_relaxed(VOTE_REG(pll));
+ ena |= pll->fsm_en_mask;
+ writel_relaxed(ena, VOTE_REG(pll));
+ mb();
+
+ return wait_for_update(pll);
+}
+
+static int __alpha_pll_enable(struct alpha_pll_clk *pll, int enable_output)
+{
+ int rc;
+ u32 mode;
+
+ mode = readl_relaxed(MODE_REG(pll));
+ mode |= PLL_BYPASSNL;
+ writel_relaxed(mode, MODE_REG(pll));
+
+ /*
+ * H/W requires a 5us delay between disabling the bypass and
+ * de-asserting the reset.
+ */
+ mb();
+ udelay(5);
+
+ mode |= PLL_RESET_N;
+ writel_relaxed(mode, MODE_REG(pll));
+
+ rc = wait_for_update(pll);
+ if (rc < 0)
+ return rc;
+
+ /* Enable PLL output. */
+ if (enable_output) {
+ mode |= PLL_OUTCTRL;
+ writel_relaxed(mode, MODE_REG(pll));
+ }
+
+ /* Ensure that the write above goes through before returning. */
+ mb();
+ return 0;
+}
+
+static void setup_alpha_pll_values(u64 a_val, u32 l_val, u32 vco_val,
+ struct alpha_pll_clk *pll)
+{
+ struct alpha_pll_masks *masks = pll->masks;
+ u32 regval;
+
+ a_val = a_val << (ALPHA_REG_BITWIDTH - ALPHA_BITWIDTH);
+
+ writel_relaxed(l_val, L_REG(pll));
+ __iowrite32_copy(A_REG(pll), &a_val, 2);
+
+ if (vco_val != UINT_MAX) {
+ regval = readl_relaxed(VCO_REG(pll));
+ regval &= ~(masks->vco_mask << masks->vco_shift);
+ regval |= vco_val << masks->vco_shift;
+ writel_relaxed(regval, VCO_REG(pll));
+ }
+
+ regval = readl_relaxed(ALPHA_EN_REG(pll));
+ regval |= masks->alpha_en_mask;
+ writel_relaxed(regval, ALPHA_EN_REG(pll));
+}
+
+static int alpha_pll_enable(struct clk *c)
+{
+ struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+ unsigned long flags;
+ int rc;
+
+ if (unlikely(!pll->inited))
+ __init_alpha_pll(c);
+
+ spin_lock_irqsave(&alpha_pll_reg_lock, flags);
+ if (pll->fsm_en_mask)
+ rc = __alpha_pll_vote_enable(pll);
+ else
+ rc = __alpha_pll_enable(pll, true);
+ spin_unlock_irqrestore(&alpha_pll_reg_lock, flags);
+
+ return rc;
+}
+
+static int __calibrate_alpha_pll(struct alpha_pll_clk *pll);
+static int dyna_alpha_pll_enable(struct clk *c)
+{
+ struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+ unsigned long flags;
+ int rc;
+
+ if (unlikely(!pll->inited))
+ __init_alpha_pll(c);
+
+ spin_lock_irqsave(&alpha_pll_reg_lock, flags);
+
+ if (pll->slew)
+ __calibrate_alpha_pll(pll);
+
+ if (pll->fsm_en_mask)
+ rc = __alpha_pll_vote_enable(pll);
+ else
+ rc = __alpha_pll_enable(pll, true);
+ spin_unlock_irqrestore(&alpha_pll_reg_lock, flags);
+
+ return rc;
+}
+
+#define PLL_OFFLINE_REQ_BIT BIT(7)
+#define PLL_FSM_ENA_BIT BIT(20)
+#define PLL_OFFLINE_ACK_BIT BIT(28)
+#define PLL_ACTIVE_FLAG BIT(30)
+
+static int alpha_pll_enable_hwfsm(struct clk *c)
+{
+ u32 mode;
+ struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+
+ /* Re-enable HW FSM mode, clear OFFLINE request */
+ mode = readl_relaxed(MODE_REG(pll));
+ mode |= PLL_FSM_ENA_BIT;
+ mode &= ~PLL_OFFLINE_REQ_BIT;
+ writel_relaxed(mode, MODE_REG(pll));
+
+ /* Make sure enable request goes through before waiting for update */
+ mb();
+
+ if (wait_for_update(pll) < 0)
+ panic("PLL %s failed to lock", c->dbg_name);
+
+ return 0;
+}
+
+static void alpha_pll_disable_hwfsm(struct clk *c)
+{
+ u32 mode;
+ struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+
+ /* Request PLL_OFFLINE and wait for ack */
+ mode = readl_relaxed(MODE_REG(pll));
+ writel_relaxed(mode | PLL_OFFLINE_REQ_BIT, MODE_REG(pll));
+ while (!(readl_relaxed(MODE_REG(pll)) & PLL_OFFLINE_ACK_BIT))
+ ;
+
+ /* Disable HW FSM */
+ mode = readl_relaxed(MODE_REG(pll));
+ mode &= ~PLL_FSM_ENA_BIT;
+ if (pll->offline_bit_workaround)
+ mode &= ~PLL_OFFLINE_REQ_BIT;
+ writel_relaxed(mode, MODE_REG(pll));
+
+ while (readl_relaxed(MODE_REG(pll)) & PLL_ACTIVE_FLAG)
+ ;
+}
+
+static void __alpha_pll_vote_disable(struct alpha_pll_clk *pll)
+{
+ u32 ena;
+
+ ena = readl_relaxed(VOTE_REG(pll));
+ ena &= ~pll->fsm_en_mask;
+ writel_relaxed(ena, VOTE_REG(pll));
+}
+
+static void __alpha_pll_disable(struct alpha_pll_clk *pll)
+{
+ u32 mode;
+
+ mode = readl_relaxed(MODE_REG(pll));
+ mode &= ~PLL_OUTCTRL;
+ writel_relaxed(mode, MODE_REG(pll));
+
+ /* Delay of 2 output clock ticks required until output is disabled */
+ mb();
+ udelay(1);
+
+ mode &= ~(PLL_BYPASSNL | PLL_RESET_N);
+ writel_relaxed(mode, MODE_REG(pll));
+}
+
+static void alpha_pll_disable(struct clk *c)
+{
+ struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+ unsigned long flags;
+
+ spin_lock_irqsave(&alpha_pll_reg_lock, flags);
+ if (pll->fsm_en_mask)
+ __alpha_pll_vote_disable(pll);
+ else
+ __alpha_pll_disable(pll);
+ spin_unlock_irqrestore(&alpha_pll_reg_lock, flags);
+}
+
+static void dyna_alpha_pll_disable(struct clk *c)
+{
+ struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+ unsigned long flags;
+
+ spin_lock_irqsave(&alpha_pll_reg_lock, flags);
+ if (pll->fsm_en_mask)
+ __alpha_pll_vote_disable(pll);
+ else
+ __alpha_pll_disable(pll);
+
+ spin_unlock_irqrestore(&alpha_pll_reg_lock, flags);
+}
+
+static u32 find_vco(struct alpha_pll_clk *pll, unsigned long rate)
+{
+ unsigned long i;
+ struct alpha_pll_vco_tbl *v = pll->vco_tbl;
+
+ for (i = 0; i < pll->num_vco; i++) {
+ if (rate >= v[i].min_freq && rate <= v[i].max_freq)
+ return v[i].vco_val;
+ }
+
+ return -EINVAL;
+}
+
+static unsigned long __calc_values(struct alpha_pll_clk *pll,
+ unsigned long rate, int *l_val, u64 *a_val, bool round_up)
+{
+ u32 parent_rate;
+ u64 remainder;
+ u64 quotient;
+ unsigned long freq_hz;
+ int alpha_bw = ALPHA_BITWIDTH;
+
+ parent_rate = clk_get_rate(pll->c.parent);
+ quotient = rate;
+ remainder = do_div(quotient, parent_rate);
+ *l_val = quotient;
+
+ if (!remainder) {
+ *a_val = 0;
+ return rate;
+ }
+
+ if (pll->is_fabia)
+ alpha_bw = FABIA_ALPHA_BITWIDTH;
+
+ /* Upper ALPHA_BITWIDTH bits of Alpha */
+ quotient = remainder << alpha_bw;
+ remainder = do_div(quotient, parent_rate);
+
+ if (remainder && round_up)
+ quotient++;
+
+ *a_val = quotient;
+ freq_hz = compute_rate(pll, *l_val, *a_val);
+ return freq_hz;
+}
+
+static unsigned long round_rate_down(struct alpha_pll_clk *pll,
+ unsigned long rate, int *l_val, u64 *a_val)
+{
+ return __calc_values(pll, rate, l_val, a_val, false);
+}
+
+static unsigned long round_rate_up(struct alpha_pll_clk *pll,
+ unsigned long rate, int *l_val, u64 *a_val)
+{
+ return __calc_values(pll, rate, l_val, a_val, true);
+}
+
+static bool dynamic_update_finish(struct alpha_pll_clk *pll)
+{
+ u32 reg = readl_relaxed(UPDATE_REG(pll));
+ u32 mask = pll->masks->update_mask;
+
+ return (reg & mask) == 0;
+}
+
+static int wait_for_dynamic_update(struct alpha_pll_clk *pll)
+{
+ int count;
+
+ for (count = WAIT_MAX_LOOPS; count > 0; count--) {
+ if (dynamic_update_finish(pll))
+ break;
+ udelay(1);
+ }
+
+ if (!count) {
+ pr_err("%s didn't latch after updating it!\n", pll->c.dbg_name);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int dyna_alpha_pll_dynamic_update(struct alpha_pll_clk *pll)
+{
+ struct alpha_pll_masks *masks = pll->masks;
+ u32 regval;
+ int rc;
+
+ regval = readl_relaxed(UPDATE_REG(pll));
+ regval |= masks->update_mask;
+ writel_relaxed(regval, UPDATE_REG(pll));
+
+ rc = wait_for_dynamic_update(pll);
+ if (rc < 0)
+ return rc;
+
+ /*
+ * HPG mandates a wait of at least 570ns before polling the LOCK
+ * detect bit. Have a delay of 1us just to be safe.
+ */
+ mb();
+ udelay(1);
+
+ rc = wait_for_update(pll);
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
+
+static int alpha_pll_set_rate(struct clk *c, unsigned long rate);
+static int dyna_alpha_pll_set_rate(struct clk *c, unsigned long rate)
+{
+ struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+ unsigned long freq_hz, flags;
+ u32 l_val, vco_val;
+ u64 a_val;
+ int ret;
+
+ freq_hz = round_rate_up(pll, rate, &l_val, &a_val);
+ if (freq_hz != rate) {
+ pr_err("alpha_pll: Call clk_set_rate with rounded rates!\n");
+ return -EINVAL;
+ }
+
+ vco_val = find_vco(pll, freq_hz);
+
+ /*
+ * Dynamic pll update will not support switching frequencies across
+ * vco ranges. In those cases fall back to normal alpha set rate.
+ */
+ if (pll->current_vco_val != vco_val) {
+ ret = alpha_pll_set_rate(c, rate);
+ if (!ret)
+ pll->current_vco_val = vco_val;
+ else
+ return ret;
+ return 0;
+ }
+
+ spin_lock_irqsave(&c->lock, flags);
+
+ a_val = a_val << (ALPHA_REG_BITWIDTH - ALPHA_BITWIDTH);
+
+ writel_relaxed(l_val, L_REG(pll));
+ __iowrite32_copy(A_REG(pll), &a_val, 2);
+
+ /* Ensure that the write above goes through before proceeding. */
+ mb();
+
+ if (c->count)
+ dyna_alpha_pll_dynamic_update(pll);
+
+ spin_unlock_irqrestore(&c->lock, flags);
+ return 0;
+}
+
+/*
+ * Slewing plls should be bought up at frequency which is in the middle of the
+ * desired VCO range. So after bringing up the pll at calibration freq, set it
+ * back to desired frequency(that was set by previous clk_set_rate).
+ */
+static int __calibrate_alpha_pll(struct alpha_pll_clk *pll)
+{
+ unsigned long calibration_freq, freq_hz;
+ struct alpha_pll_vco_tbl *vco_tbl = pll->vco_tbl;
+ u64 a_val;
+ u32 l_val, vco_val;
+ int rc;
+
+ vco_val = find_vco(pll, pll->c.rate);
+ if (IS_ERR_VALUE(vco_val)) {
+ pr_err("alpha pll: not in a valid vco range\n");
+ return -EINVAL;
+ }
+ /*
+ * As during slewing plls vco_sel won't be allowed to change, vco table
+ * should have only one entry table, i.e. index = 0, find the
+ * calibration frequency.
+ */
+ calibration_freq = (vco_tbl[0].min_freq +
+ vco_tbl[0].max_freq)/2;
+
+ freq_hz = round_rate_up(pll, calibration_freq, &l_val, &a_val);
+ if (freq_hz != calibration_freq) {
+ pr_err("alpha_pll: call clk_set_rate with rounded rates!\n");
+ return -EINVAL;
+ }
+
+ setup_alpha_pll_values(a_val, l_val, vco_tbl->vco_val, pll);
+
+ /* Bringup the pll at calibration frequency */
+ rc = __alpha_pll_enable(pll, false);
+ if (rc) {
+ pr_err("alpha pll calibration failed\n");
+ return rc;
+ }
+
+ /*
+ * PLL is already running at calibration frequency.
+ * So slew pll to the previously set frequency.
+ */
+ pr_debug("pll %s: setting back to required rate %lu\n", pll->c.dbg_name,
+ pll->c.rate);
+ freq_hz = round_rate_up(pll, pll->c.rate, &l_val, &a_val);
+ setup_alpha_pll_values(a_val, l_val, UINT_MAX, pll);
+ dyna_alpha_pll_dynamic_update(pll);
+
+ return 0;
+}
+
+static int alpha_pll_dynamic_update(struct alpha_pll_clk *pll)
+{
+ u32 regval;
+
+ /* Latch the input to the PLL */
+ regval = readl_relaxed(MODE_REG(pll));
+ regval |= pll->masks->update_mask;
+ writel_relaxed(regval, MODE_REG(pll));
+
+ /* Wait for 2 reference cycle before checking ACK bit */
+ udelay(1);
+ if (!(readl_relaxed(MODE_REG(pll)) & ALPHA_PLL_ACK_LATCH)) {
+ WARN(1, "%s: PLL latch failed. Output may be unstable!\n",
+ pll->c.dbg_name);
+ return -EINVAL;
+ }
+
+ /* Return latch input to 0 */
+ regval = readl_relaxed(MODE_REG(pll));
+ regval &= ~pll->masks->update_mask;
+ writel_relaxed(regval, MODE_REG(pll));
+
+ /* Wait for PLL output to stabilize */
+ udelay(100);
+
+ return 0;
+}
+
+static int alpha_pll_set_rate(struct clk *c, unsigned long rate)
+{
+ struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+ struct alpha_pll_masks *masks = pll->masks;
+ unsigned long flags, freq_hz;
+ u32 regval, l_val;
+ int vco_val;
+ u64 a_val;
+
+ freq_hz = round_rate_up(pll, rate, &l_val, &a_val);
+ if (freq_hz != rate) {
+ pr_err("alpha_pll: Call clk_set_rate with rounded rates!\n");
+ return -EINVAL;
+ }
+
+ vco_val = find_vco(pll, freq_hz);
+ if (IS_ERR_VALUE(vco_val)) {
+ pr_err("alpha pll: not in a valid vco range\n");
+ return -EINVAL;
+ }
+
+ /*
+ * For PLLs that do not support dynamic programming (dynamic_update
+ * is not set), ensure PLL is off before changing rate. For
+ * optimization reasons, assume no downstream clock is actively
+ * using it.
+ */
+ spin_lock_irqsave(&c->lock, flags);
+ if (c->count && !pll->dynamic_update)
+ c->ops->disable(c);
+
+ a_val = a_val << (ALPHA_REG_BITWIDTH - ALPHA_BITWIDTH);
+
+ writel_relaxed(l_val, L_REG(pll));
+ __iowrite32_copy(A_REG(pll), &a_val, 2);
+
+ if (masks->vco_mask) {
+ regval = readl_relaxed(VCO_REG(pll));
+ regval &= ~(masks->vco_mask << masks->vco_shift);
+ regval |= vco_val << masks->vco_shift;
+ writel_relaxed(regval, VCO_REG(pll));
+ }
+
+ regval = readl_relaxed(ALPHA_EN_REG(pll));
+ regval |= masks->alpha_en_mask;
+ writel_relaxed(regval, ALPHA_EN_REG(pll));
+
+ if (c->count && pll->dynamic_update)
+ alpha_pll_dynamic_update(pll);
+
+ if (c->count && !pll->dynamic_update)
+ c->ops->enable(c);
+
+ spin_unlock_irqrestore(&c->lock, flags);
+ return 0;
+}
+
+static long alpha_pll_round_rate(struct clk *c, unsigned long rate)
+{
+ struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+ struct alpha_pll_vco_tbl *v = pll->vco_tbl;
+ int ret;
+ u32 l_val;
+ unsigned long freq_hz;
+ u64 a_val;
+ int i;
+
+ if (pll->no_prepared_reconfig && c->prepare_count)
+ return -EINVAL;
+
+ freq_hz = round_rate_up(pll, rate, &l_val, &a_val);
+ if (rate < pll->min_supported_freq)
+ return pll->min_supported_freq;
+ if (pll->is_fabia)
+ return freq_hz;
+
+ ret = find_vco(pll, freq_hz);
+ if (!IS_ERR_VALUE(ret))
+ return freq_hz;
+
+ freq_hz = 0;
+ for (i = 0; i < pll->num_vco; i++) {
+ if (is_better_rate(rate, freq_hz, v[i].min_freq))
+ freq_hz = v[i].min_freq;
+ if (is_better_rate(rate, freq_hz, v[i].max_freq))
+ freq_hz = v[i].max_freq;
+ }
+ if (!freq_hz)
+ return -EINVAL;
+ return freq_hz;
+}
+
+static void update_vco_tbl(struct alpha_pll_clk *pll)
+{
+ int i, l_val;
+ u64 a_val;
+ unsigned long hz;
+
+ /* Round vco limits to valid rates */
+ for (i = 0; i < pll->num_vco; i++) {
+ hz = round_rate_up(pll, pll->vco_tbl[i].min_freq, &l_val,
+ &a_val);
+ pll->vco_tbl[i].min_freq = hz;
+
+ hz = round_rate_down(pll, pll->vco_tbl[i].max_freq, &l_val,
+ &a_val);
+ pll->vco_tbl[i].max_freq = hz;
+ }
+}
+
+/*
+ * Program bias count to be 0x6 (corresponds to 5us), and lock count
+ * bits to 0 (check lock_det for locking).
+ */
+static void __set_fsm_mode(void __iomem *mode_reg)
+{
+ u32 regval = readl_relaxed(mode_reg);
+
+ /* De-assert reset to FSM */
+ regval &= ~BIT(21);
+ writel_relaxed(regval, mode_reg);
+
+ /* Program bias count */
+ regval &= ~BM(19, 14);
+ regval |= BVAL(19, 14, 0x6);
+ writel_relaxed(regval, mode_reg);
+
+ /* Program lock count */
+ regval &= ~BM(13, 8);
+ regval |= BVAL(13, 8, 0x0);
+ writel_relaxed(regval, mode_reg);
+
+ /* Enable PLL FSM voting */
+ regval |= BIT(20);
+ writel_relaxed(regval, mode_reg);
+}
+
+static bool is_fsm_mode(void __iomem *mode_reg)
+{
+ return !!(readl_relaxed(mode_reg) & BIT(20));
+}
+
+void __init_alpha_pll(struct clk *c)
+{
+ struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+ struct alpha_pll_masks *masks = pll->masks;
+ u32 regval;
+
+ if (pll->config_ctl_val)
+ writel_relaxed(pll->config_ctl_val, CONFIG_CTL_REG(pll));
+
+ if (masks->output_mask && pll->enable_config) {
+ regval = readl_relaxed(OUTPUT_REG(pll));
+ regval &= ~masks->output_mask;
+ regval |= pll->enable_config;
+ writel_relaxed(regval, OUTPUT_REG(pll));
+ }
+
+ if (masks->post_div_mask) {
+ regval = readl_relaxed(USER_CTL_LO_REG(pll));
+ regval &= ~masks->post_div_mask;
+ regval |= pll->post_div_config;
+ writel_relaxed(regval, USER_CTL_LO_REG(pll));
+ }
+
+ if (pll->slew) {
+ regval = readl_relaxed(USER_CTL_HI_REG(pll));
+ regval &= ~PLL_LATCH_INTERFACE;
+ writel_relaxed(regval, USER_CTL_HI_REG(pll));
+ }
+
+ if (masks->test_ctl_lo_mask) {
+ regval = readl_relaxed(TEST_CTL_LO_REG(pll));
+ regval &= ~masks->test_ctl_lo_mask;
+ regval |= pll->test_ctl_lo_val;
+ writel_relaxed(regval, TEST_CTL_LO_REG(pll));
+ }
+
+ if (masks->test_ctl_hi_mask) {
+ regval = readl_relaxed(TEST_CTL_HI_REG(pll));
+ regval &= ~masks->test_ctl_hi_mask;
+ regval |= pll->test_ctl_hi_val;
+ writel_relaxed(regval, TEST_CTL_HI_REG(pll));
+ }
+
+ if (pll->fsm_en_mask)
+ __set_fsm_mode(MODE_REG(pll));
+
+ pll->inited = true;
+}
+
+static enum handoff alpha_pll_handoff(struct clk *c)
+{
+ struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+ struct alpha_pll_masks *masks = pll->masks;
+ u64 a_val;
+ u32 alpha_en, l_val, regval;
+
+ /* Set the PLL_HW_UPDATE_LOGIC_BYPASS bit before continuing */
+ if (pll->dynamic_update) {
+ regval = readl_relaxed(MODE_REG(pll));
+ regval |= ALPHA_PLL_HW_UPDATE_LOGIC_BYPASS;
+ writel_relaxed(regval, MODE_REG(pll));
+ }
+
+ update_vco_tbl(pll);
+
+ if (!is_locked(pll)) {
+ if (c->rate && alpha_pll_set_rate(c, c->rate))
+ WARN(1, "%s: Failed to configure rate\n", c->dbg_name);
+ __init_alpha_pll(c);
+ return HANDOFF_DISABLED_CLK;
+ } else if (pll->fsm_en_mask && !is_fsm_mode(MODE_REG(pll))) {
+ WARN(1, "%s should be in FSM mode but is not\n", c->dbg_name);
+ }
+
+ l_val = readl_relaxed(L_REG(pll));
+ /* read u64 in two steps to satisfy alignment constraint */
+ a_val = readl_relaxed(A_REG(pll) + 0x4);
+ a_val = a_val << 32 | readl_relaxed(A_REG(pll));
+ /* get upper 32 bits */
+ a_val = a_val >> (ALPHA_REG_BITWIDTH - ALPHA_BITWIDTH);
+
+ alpha_en = readl_relaxed(ALPHA_EN_REG(pll));
+ alpha_en &= masks->alpha_en_mask;
+ if (!alpha_en)
+ a_val = 0;
+
+ c->rate = compute_rate(pll, l_val, a_val);
+
+ /*
+ * Unconditionally vote for the PLL; it might be on because of
+ * another master's vote.
+ */
+ if (pll->fsm_en_mask)
+ __alpha_pll_vote_enable(pll);
+
+ return HANDOFF_ENABLED_CLK;
+}
+
+static void __iomem *alpha_pll_list_registers(struct clk *clk, int n,
+ struct clk_register_data **regs, u32 *size)
+{
+ struct alpha_pll_clk *pll = to_alpha_pll_clk(clk);
+ static struct clk_register_data data[] = {
+ {"PLL_MODE", 0x0},
+ {"PLL_L_VAL", 0x4},
+ {"PLL_ALPHA_VAL", 0x8},
+ {"PLL_ALPHA_VAL_U", 0xC},
+ {"PLL_USER_CTL", 0x10},
+ {"PLL_CONFIG_CTL", 0x18},
+ };
+
+ if (n)
+ return ERR_PTR(-EINVAL);
+
+ *regs = data;
+ *size = ARRAY_SIZE(data);
+ return MODE_REG(pll);
+}
+
+static int __fabia_alpha_pll_enable(struct alpha_pll_clk *pll)
+{
+ int rc;
+ u32 mode;
+
+ /* Disable PLL output */
+ mode = readl_relaxed(MODE_REG(pll));
+ mode &= ~PLL_OUTCTRL;
+ writel_relaxed(mode, MODE_REG(pll));
+
+ /* Set operation mode to STANDBY */
+ writel_relaxed(FABIA_PLL_STANDBY, FABIA_PLL_OPMODE(pll));
+
+ /* PLL should be in STANDBY mode before continuing */
+ mb();
+
+ /* Bring PLL out of reset */
+ mode = readl_relaxed(MODE_REG(pll));
+ mode |= PLL_RESET_N;
+ writel_relaxed(mode, MODE_REG(pll));
+
+ /* Set operation mode to RUN */
+ writel_relaxed(FABIA_PLL_RUN, FABIA_PLL_OPMODE(pll));
+
+ rc = wait_for_update(pll);
+ if (rc < 0)
+ return rc;
+
+ /* Enable the main PLL output */
+ mode = readl_relaxed(FABIA_USER_CTL_LO_REG(pll));
+ mode |= FABIA_PLL_OUT_MAIN;
+ writel_relaxed(mode, FABIA_USER_CTL_LO_REG(pll));
+
+ /* Enable PLL outputs */
+ mode = readl_relaxed(MODE_REG(pll));
+ mode |= PLL_OUTCTRL;
+ writel_relaxed(mode, MODE_REG(pll));
+
+ /* Ensure that the write above goes through before returning. */
+ mb();
+ return 0;
+}
+
+static int fabia_alpha_pll_enable(struct clk *c)
+{
+ struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(&alpha_pll_reg_lock, flags);
+ if (pll->fsm_en_mask)
+ rc = __alpha_pll_vote_enable(pll);
+ else
+ rc = __fabia_alpha_pll_enable(pll);
+ spin_unlock_irqrestore(&alpha_pll_reg_lock, flags);
+
+ return rc;
+}
+
+static void __fabia_alpha_pll_disable(struct alpha_pll_clk *pll)
+{
+ u32 mode;
+
+ /* Disable PLL outputs */
+ mode = readl_relaxed(MODE_REG(pll));
+ mode &= ~PLL_OUTCTRL;
+ writel_relaxed(mode, MODE_REG(pll));
+
+ /* Disable the main PLL output */
+ mode = readl_relaxed(FABIA_USER_CTL_LO_REG(pll));
+ mode &= ~FABIA_PLL_OUT_MAIN;
+ writel_relaxed(mode, FABIA_USER_CTL_LO_REG(pll));
+
+ /* Place the PLL mode in STANDBY */
+ writel_relaxed(FABIA_PLL_STANDBY, FABIA_PLL_OPMODE(pll));
+}
+
+static void fabia_alpha_pll_disable(struct clk *c)
+{
+ struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+ unsigned long flags;
+
+ spin_lock_irqsave(&alpha_pll_reg_lock, flags);
+ if (pll->fsm_en_mask)
+ __alpha_pll_vote_disable(pll);
+ else
+ __fabia_alpha_pll_disable(pll);
+ spin_unlock_irqrestore(&alpha_pll_reg_lock, flags);
+}
+
+static int fabia_alpha_pll_set_rate(struct clk *c, unsigned long rate)
+{
+ struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+ unsigned long flags, freq_hz;
+ u32 l_val;
+ u64 a_val;
+
+ freq_hz = round_rate_up(pll, rate, &l_val, &a_val);
+ if (freq_hz > rate + FABIA_RATE_MARGIN || freq_hz < rate) {
+ pr_err("%s: Call clk_set_rate with rounded rates!\n",
+ c->dbg_name);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&c->lock, flags);
+ /* Set the new L value */
+ writel_relaxed(l_val, FABIA_L_REG(pll));
+ /*
+ * pll_cal_l_val is set to pll_l_val on MOST targets. Set it
+ * explicitly here for PLL out-of-reset calibration to work
+ * without a glitch on all of them.
+ */
+ writel_relaxed(l_val, FABIA_CAL_L_VAL(pll));
+ writel_relaxed(a_val, FABIA_FRAC_REG(pll));
+
+ alpha_pll_dynamic_update(pll);
+
+ spin_unlock_irqrestore(&c->lock, flags);
+ return 0;
+}
+
+void __init_fabia_alpha_pll(struct clk *c)
+{
+ struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+ struct alpha_pll_masks *masks = pll->masks;
+ u32 regval;
+
+ if (pll->config_ctl_val)
+ writel_relaxed(pll->config_ctl_val, FABIA_CONFIG_CTL_REG(pll));
+
+ if (masks->output_mask && pll->enable_config) {
+ regval = readl_relaxed(FABIA_USER_CTL_LO_REG(pll));
+ regval &= ~masks->output_mask;
+ regval |= pll->enable_config;
+ writel_relaxed(regval, FABIA_USER_CTL_LO_REG(pll));
+ }
+
+ if (masks->post_div_mask) {
+ regval = readl_relaxed(FABIA_USER_CTL_LO_REG(pll));
+ regval &= ~masks->post_div_mask;
+ regval |= pll->post_div_config;
+ writel_relaxed(regval, FABIA_USER_CTL_LO_REG(pll));
+ }
+
+ if (pll->slew) {
+ regval = readl_relaxed(FABIA_USER_CTL_HI_REG(pll));
+ regval &= ~PLL_LATCH_INTERFACE;
+ writel_relaxed(regval, FABIA_USER_CTL_HI_REG(pll));
+ }
+
+ if (masks->test_ctl_lo_mask) {
+ regval = readl_relaxed(FABIA_TEST_CTL_LO_REG(pll));
+ regval &= ~masks->test_ctl_lo_mask;
+ regval |= pll->test_ctl_lo_val;
+ writel_relaxed(regval, FABIA_TEST_CTL_LO_REG(pll));
+ }
+
+ if (masks->test_ctl_hi_mask) {
+ regval = readl_relaxed(FABIA_TEST_CTL_HI_REG(pll));
+ regval &= ~masks->test_ctl_hi_mask;
+ regval |= pll->test_ctl_hi_val;
+ writel_relaxed(regval, FABIA_TEST_CTL_HI_REG(pll));
+ }
+
+ if (pll->fsm_en_mask)
+ __set_fsm_mode(MODE_REG(pll));
+
+ pll->inited = true;
+}
+
+static enum handoff fabia_alpha_pll_handoff(struct clk *c)
+{
+ struct alpha_pll_clk *pll = to_alpha_pll_clk(c);
+ u64 a_val;
+ u32 l_val, regval;
+
+ /* Set the PLL_HW_UPDATE_LOGIC_BYPASS bit before continuing */
+ regval = readl_relaxed(MODE_REG(pll));
+ regval |= ALPHA_PLL_HW_UPDATE_LOGIC_BYPASS;
+ writel_relaxed(regval, MODE_REG(pll));
+
+ /* Set the PLL_RESET_N bit to place the PLL in STANDBY from OFF */
+ regval |= PLL_RESET_N;
+ writel_relaxed(regval, MODE_REG(pll));
+
+ if (!is_locked(pll)) {
+ if (c->rate && fabia_alpha_pll_set_rate(c, c->rate))
+ WARN(1, "%s: Failed to configure rate\n", c->dbg_name);
+ __init_alpha_pll(c);
+ return HANDOFF_DISABLED_CLK;
+ } else if (pll->fsm_en_mask && !is_fsm_mode(MODE_REG(pll))) {
+ WARN(1, "%s should be in FSM mode but is not\n", c->dbg_name);
+ }
+
+ l_val = readl_relaxed(FABIA_L_REG(pll));
+ a_val = readl_relaxed(FABIA_FRAC_REG(pll));
+
+ c->rate = compute_rate(pll, l_val, a_val);
+
+ /*
+ * Unconditionally vote for the PLL; it might be on because of
+ * another master's vote.
+ */
+ if (pll->fsm_en_mask)
+ __alpha_pll_vote_enable(pll);
+
+ return HANDOFF_ENABLED_CLK;
+}
+
+struct clk_ops clk_ops_alpha_pll = {
+ .enable = alpha_pll_enable,
+ .disable = alpha_pll_disable,
+ .round_rate = alpha_pll_round_rate,
+ .set_rate = alpha_pll_set_rate,
+ .handoff = alpha_pll_handoff,
+ .list_registers = alpha_pll_list_registers,
+};
+
+struct clk_ops clk_ops_alpha_pll_hwfsm = {
+ .enable = alpha_pll_enable_hwfsm,
+ .disable = alpha_pll_disable_hwfsm,
+ .round_rate = alpha_pll_round_rate,
+ .set_rate = alpha_pll_set_rate,
+ .handoff = alpha_pll_handoff,
+ .list_registers = alpha_pll_list_registers,
+};
+
+struct clk_ops clk_ops_fixed_alpha_pll = {
+ .enable = alpha_pll_enable,
+ .disable = alpha_pll_disable,
+ .handoff = alpha_pll_handoff,
+ .list_registers = alpha_pll_list_registers,
+};
+
+struct clk_ops clk_ops_fixed_fabia_alpha_pll = {
+ .enable = fabia_alpha_pll_enable,
+ .disable = fabia_alpha_pll_disable,
+ .handoff = fabia_alpha_pll_handoff,
+};
+
+struct clk_ops clk_ops_fabia_alpha_pll = {
+ .enable = fabia_alpha_pll_enable,
+ .disable = fabia_alpha_pll_disable,
+ .round_rate = alpha_pll_round_rate,
+ .set_rate = fabia_alpha_pll_set_rate,
+ .handoff = fabia_alpha_pll_handoff,
+};
+
+struct clk_ops clk_ops_dyna_alpha_pll = {
+ .enable = dyna_alpha_pll_enable,
+ .disable = dyna_alpha_pll_disable,
+ .round_rate = alpha_pll_round_rate,
+ .set_rate = dyna_alpha_pll_set_rate,
+ .handoff = alpha_pll_handoff,
+ .list_registers = alpha_pll_list_registers,
+};
+
+static struct alpha_pll_masks masks_20nm_p = {
+ .lock_mask = BIT(31),
+ .active_mask = BIT(30),
+ .vco_mask = BM(21, 20) >> 20,
+ .vco_shift = 20,
+ .alpha_en_mask = BIT(24),
+ .output_mask = 0xF,
+ .post_div_mask = 0xF00,
+};
+
+static struct alpha_pll_vco_tbl vco_20nm_p[] = {
+ VCO(3, 250000000, 500000000),
+ VCO(2, 500000000, 1000000000),
+ VCO(1, 1000000000, 1500000000),
+ VCO(0, 1500000000, 2000000000),
+};
+
+static struct alpha_pll_masks masks_20nm_t = {
+ .lock_mask = BIT(31),
+ .alpha_en_mask = BIT(24),
+ .output_mask = 0xf,
+};
+
+static struct alpha_pll_vco_tbl vco_20nm_t[] = {
+ VCO(0, 500000000, 1250000000),
+};
+
+static struct alpha_pll_clk *alpha_pll_dt_parser(struct device *dev,
+ struct device_node *np)
+{
+ struct alpha_pll_clk *pll;
+ struct msmclk_data *drv;
+
+ pll = devm_kzalloc(dev, sizeof(*pll), GFP_KERNEL);
+ if (!pll) {
+ dt_err(np, "memory alloc failure\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ if (of_property_read_u32(np, "qcom,base-offset", &pll->offset)) {
+ dt_err(np, "missing qcom,base-offset\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Optional property */
+ of_property_read_u32(np, "qcom,post-div-config",
+ &pll->post_div_config);
+
+ pll->masks = devm_kzalloc(dev, sizeof(*pll->masks), GFP_KERNEL);
+ if (!pll->masks) {
+ dt_err(np, "memory alloc failure\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ if (of_device_is_compatible(np, "qcom,fixed-alpha-pll-20p") ||
+ of_device_is_compatible(np, "qcom,alpha-pll-20p")) {
+ *pll->masks = masks_20nm_p;
+ pll->vco_tbl = vco_20nm_p;
+ pll->num_vco = ARRAY_SIZE(vco_20nm_p);
+ } else if (of_device_is_compatible(np, "qcom,fixed-alpha-pll-20t") ||
+ of_device_is_compatible(np, "qcom,alpha-pll-20t")) {
+ *pll->masks = masks_20nm_t;
+ pll->vco_tbl = vco_20nm_t;
+ pll->num_vco = ARRAY_SIZE(vco_20nm_t);
+ } else {
+ dt_err(np, "unexpected compatible string\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ drv = msmclk_parse_phandle(dev, np->parent->phandle);
+ if (IS_ERR_OR_NULL(drv))
+ return ERR_CAST(drv);
+ pll->base = &drv->base;
+ return pll;
+}
+
+static void *variable_rate_alpha_pll_dt_parser(struct device *dev,
+ struct device_node *np)
+{
+ struct alpha_pll_clk *pll;
+
+ pll = alpha_pll_dt_parser(dev, np);
+ if (IS_ERR(pll))
+ return pll;
+
+ /* Optional Property */
+ of_property_read_u32(np, "qcom,output-enable", &pll->enable_config);
+
+ pll->c.ops = &clk_ops_alpha_pll;
+ return msmclk_generic_clk_init(dev, np, &pll->c);
+}
+
+static void *fixed_rate_alpha_pll_dt_parser(struct device *dev,
+ struct device_node *np)
+{
+ struct alpha_pll_clk *pll;
+ int rc;
+ u32 val;
+
+ pll = alpha_pll_dt_parser(dev, np);
+ if (IS_ERR(pll))
+ return pll;
+
+ rc = of_property_read_u32(np, "qcom,pll-config-rate", &val);
+ if (rc) {
+ dt_err(np, "missing qcom,pll-config-rate\n");
+ return ERR_PTR(-EINVAL);
+ }
+ pll->c.rate = val;
+
+ rc = of_property_read_u32(np, "qcom,output-enable",
+ &pll->enable_config);
+ if (rc) {
+ dt_err(np, "missing qcom,output-enable\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Optional Property */
+ rc = of_property_read_u32(np, "qcom,fsm-en-bit", &val);
+ if (!rc) {
+ rc = of_property_read_u32(np, "qcom,fsm-en-offset",
+ &pll->fsm_reg_offset);
+ if (rc) {
+ dt_err(np, "missing qcom,fsm-en-offset\n");
+ return ERR_PTR(-EINVAL);
+ }
+ pll->fsm_en_mask = BIT(val);
+ }
+
+ pll->c.ops = &clk_ops_fixed_alpha_pll;
+ return msmclk_generic_clk_init(dev, np, &pll->c);
+}
+
+MSMCLK_PARSER(fixed_rate_alpha_pll_dt_parser, "qcom,fixed-alpha-pll-20p", 0);
+MSMCLK_PARSER(fixed_rate_alpha_pll_dt_parser, "qcom,fixed-alpha-pll-20t", 1);
+MSMCLK_PARSER(variable_rate_alpha_pll_dt_parser, "qcom,alpha-pll-20p", 0);
+MSMCLK_PARSER(variable_rate_alpha_pll_dt_parser, "qcom,alpha-pll-20t", 1);
diff --git a/drivers/clk/msm/clock-cpu-8996.c b/drivers/clk/msm/clock-cpu-8996.c
new file mode 100644
index 000000000000..7e03a599fccc
--- /dev/null
+++ b/drivers/clk/msm/clock-cpu-8996.c
@@ -0,0 +1,1888 @@
+/*
+ * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/clk.h>
+#include <linux/clk/msm-clk-provider.h>
+#include <linux/clk/msm-clk.h>
+#include <linux/clk/msm-clock-generic.h>
+#include <linux/cpu.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/pm_opp.h>
+#include <linux/pm_qos.h>
+
+#include <asm/cputype.h>
+
+#include <soc/qcom/scm.h>
+#include <soc/qcom/clock-pll.h>
+#include <soc/qcom/clock-local2.h>
+#include <soc/qcom/clock-alpha-pll.h>
+#include <soc/qcom/kryo-l2-accessors.h>
+
+#include <dt-bindings/clock/msm-clocks-8996.h>
+
+#include "clock.h"
+#include "vdd-level-8996.h"
+
+enum {
+ APC0_PLL_BASE,
+ APC1_PLL_BASE,
+ CBF_PLL_BASE,
+ APC0_BASE,
+ APC1_BASE,
+ CBF_BASE,
+ EFUSE_BASE,
+ DEBUG_BASE,
+ NUM_BASES
+};
+
+static char *base_names[] = {
+ "pwrcl_pll",
+ "perfcl_pll",
+ "cbf_pll",
+ "pwrcl_mux",
+ "perfcl_mux",
+ "cbf_mux",
+ "efuse",
+ "debug",
+};
+
+static void *vbases[NUM_BASES];
+static bool cpu_clocks_v3;
+static bool cpu_clocks_pro;
+
+static DEFINE_VDD_REGULATORS(vdd_dig, VDD_DIG_NUM, 1, vdd_corner, NULL);
+
+/* Power cluster primary PLL */
+#define C0_PLL_MODE 0x0
+#define C0_PLL_L_VAL 0x4
+#define C0_PLL_ALPHA 0x8
+#define C0_PLL_USER_CTL 0x10
+#define C0_PLL_CONFIG_CTL 0x18
+#define C0_PLL_CONFIG_CTL_HI 0x1C
+#define C0_PLL_STATUS 0x28
+#define C0_PLL_TEST_CTL_LO 0x20
+#define C0_PLL_TEST_CTL_HI 0x24
+
+/* Power cluster alt PLL */
+#define C0_PLLA_MODE 0x100
+#define C0_PLLA_L_VAL 0x104
+#define C0_PLLA_ALPHA 0x108
+#define C0_PLLA_USER_CTL 0x110
+#define C0_PLLA_CONFIG_CTL 0x118
+#define C0_PLLA_STATUS 0x128
+#define C0_PLLA_TEST_CTL_LO 0x120
+
+/* Perf cluster primary PLL */
+#define C1_PLL_MODE 0x0
+#define C1_PLL_L_VAL 0x4
+#define C1_PLL_ALPHA 0x8
+#define C1_PLL_USER_CTL 0x10
+#define C1_PLL_CONFIG_CTL 0x18
+#define C1_PLL_CONFIG_CTL_HI 0x1C
+#define C1_PLL_STATUS 0x28
+#define C1_PLL_TEST_CTL_LO 0x20
+#define C1_PLL_TEST_CTL_HI 0x24
+
+/* Perf cluster alt PLL */
+#define C1_PLLA_MODE 0x100
+#define C1_PLLA_L_VAL 0x104
+#define C1_PLLA_ALPHA 0x108
+#define C1_PLLA_USER_CTL 0x110
+#define C1_PLLA_CONFIG_CTL 0x118
+#define C1_PLLA_STATUS 0x128
+#define C1_PLLA_TEST_CTL_LO 0x120
+
+#define CBF_PLL_MODE 0x0
+#define CBF_PLL_L_VAL 0x8
+#define CBF_PLL_ALPHA 0x10
+#define CBF_PLL_USER_CTL 0x18
+#define CBF_PLL_CONFIG_CTL 0x20
+#define CBF_PLL_CONFIG_CTL_HI 0x24
+#define CBF_PLL_STATUS 0x28
+#define CBF_PLL_TEST_CTL_LO 0x30
+#define CBF_PLL_TEST_CTL_HI 0x34
+
+#define APC_DIAG_OFFSET 0x48
+#define MUX_OFFSET 0x40
+
+#define MDD_DROOP_CODE 0x7C
+
+DEFINE_EXT_CLK(xo_ao, NULL);
+DEFINE_CLK_DUMMY(alpha_xo_ao, 19200000);
+DEFINE_EXT_CLK(sys_apcsaux_clk_gcc, NULL);
+DEFINE_FIXED_SLAVE_DIV_CLK(sys_apcsaux_clk, 2, &sys_apcsaux_clk_gcc.c);
+
+#define L2ACDCR_REG 0x580ULL
+#define L2ACDTD_REG 0x581ULL
+#define L2ACDDVMRC_REG 0x584ULL
+#define L2ACDSSCR_REG 0x589ULL
+
+#define EFUSE_SHIFT 29
+#define EFUSE_MASK 0x7
+
+/* ACD static settings */
+static int acdtd_val_pwrcl = 0x00006A11;
+static int acdtd_val_perfcl = 0x00006A11;
+static int dvmrc_val = 0x000E0F0F;
+static int acdsscr_val = 0x00000601;
+static int acdcr_val_pwrcl = 0x002C5FFD;
+module_param(acdcr_val_pwrcl, int, 0444);
+static int acdcr_val_perfcl = 0x002C5FFD;
+module_param(acdcr_val_perfcl, int, 0444);
+int enable_acd = 1;
+module_param(enable_acd, int, 0444);
+
+#define WRITE_L2ACDCR(val) \
+ set_l2_indirect_reg(L2ACDCR_REG, (val))
+#define WRITE_L2ACDTD(val) \
+ set_l2_indirect_reg(L2ACDTD_REG, (val))
+#define WRITE_L2ACDDVMRC(val) \
+ set_l2_indirect_reg(L2ACDDVMRC_REG, (val))
+#define WRITE_L2ACDSSCR(val) \
+ set_l2_indirect_reg(L2ACDSSCR_REG, (val))
+
+#define READ_L2ACDCR(reg) \
+ (reg = get_l2_indirect_reg(L2ACDCR_REG))
+#define READ_L2ACDTD(reg) \
+ (reg = get_l2_indirect_reg(L2ACDTD_REG))
+#define READ_L2ACDDVMRC(reg) \
+ (reg = get_l2_indirect_reg(L2ACDDVMRC_REG))
+#define READ_L2ACDSSCR(reg) \
+ (reg = get_l2_indirect_reg(L2ACDSSCR_REG))
+
+static struct pll_clk perfcl_pll = {
+ .mode_reg = (void __iomem *)C1_PLL_MODE,
+ .l_reg = (void __iomem *)C1_PLL_L_VAL,
+ .alpha_reg = (void __iomem *)C1_PLL_ALPHA,
+ .config_reg = (void __iomem *)C1_PLL_USER_CTL,
+ .config_ctl_reg = (void __iomem *)C1_PLL_CONFIG_CTL,
+ .config_ctl_hi_reg = (void __iomem *)C1_PLL_CONFIG_CTL_HI,
+ .status_reg = (void __iomem *)C1_PLL_MODE,
+ .test_ctl_lo_reg = (void __iomem *)C1_PLL_TEST_CTL_LO,
+ .test_ctl_hi_reg = (void __iomem *)C1_PLL_TEST_CTL_HI,
+ .pgm_test_ctl_enable = true,
+ .init_test_ctl = true,
+ .masks = {
+ .pre_div_mask = BIT(12),
+ .post_div_mask = BM(9, 8),
+ .mn_en_mask = BIT(24),
+ .main_output_mask = BIT(0),
+ .early_output_mask = BIT(3),
+ .apc_pdn_mask = BIT(24),
+ .lock_mask = BIT(31),
+ },
+ .vals = {
+ .post_div_masked = 0x100,
+ .pre_div_masked = 0x0,
+ .test_ctl_hi_val = 0x00004000,
+ .test_ctl_lo_val = 0x04000000,
+ .config_ctl_val = 0x200D4AA8,
+ .config_ctl_hi_val = 0x002,
+ },
+ .min_rate = 600000000,
+ .max_rate = 3000000000,
+ .src_rate = 19200000,
+ .base = &vbases[APC1_PLL_BASE],
+ .c = {
+ .always_on = true,
+ .parent = &xo_ao.c,
+ .dbg_name = "perfcl_pll",
+ .ops = &clk_ops_variable_rate_pll_hwfsm,
+ VDD_DIG_FMAX_MAP1(LOW, 3000000000),
+ CLK_INIT(perfcl_pll.c),
+ },
+};
+
+static struct alpha_pll_masks alt_pll_masks = {
+ .lock_mask = BIT(31),
+ .active_mask = BIT(30),
+ .vco_mask = BM(21, 20) >> 20,
+ .vco_shift = 20,
+ .alpha_en_mask = BIT(24),
+ .output_mask = 0xf,
+ .post_div_mask = 0xf00,
+};
+
+static struct alpha_pll_vco_tbl alt_pll_vco_modes[] = {
+ VCO(3, 250000000, 500000000),
+ VCO(2, 500000000, 750000000),
+ VCO(1, 750000000, 1000000000),
+ VCO(0, 1000000000, 2150400000),
+};
+
+static struct alpha_pll_clk perfcl_alt_pll = {
+ .masks = &alt_pll_masks,
+ .base = &vbases[APC1_PLL_BASE],
+ .offset = C1_PLLA_MODE,
+ .vco_tbl = alt_pll_vco_modes,
+ .num_vco = ARRAY_SIZE(alt_pll_vco_modes),
+ .enable_config = 0x9, /* Main and early outputs */
+ .post_div_config = 0x100, /* Div-2 */
+ .config_ctl_val = 0x4001051B,
+ .offline_bit_workaround = true,
+ .c = {
+ .always_on = true,
+ .parent = &alpha_xo_ao.c,
+ .dbg_name = "perfcl_alt_pll",
+ .ops = &clk_ops_alpha_pll_hwfsm,
+ CLK_INIT(perfcl_alt_pll.c),
+ },
+};
+
+static struct pll_clk pwrcl_pll = {
+ .mode_reg = (void __iomem *)C0_PLL_MODE,
+ .l_reg = (void __iomem *)C0_PLL_L_VAL,
+ .alpha_reg = (void __iomem *)C0_PLL_ALPHA,
+ .config_reg = (void __iomem *)C0_PLL_USER_CTL,
+ .config_ctl_reg = (void __iomem *)C0_PLL_CONFIG_CTL,
+ .config_ctl_hi_reg = (void __iomem *)C0_PLL_CONFIG_CTL_HI,
+ .status_reg = (void __iomem *)C0_PLL_MODE,
+ .test_ctl_lo_reg = (void __iomem *)C0_PLL_TEST_CTL_LO,
+ .test_ctl_hi_reg = (void __iomem *)C0_PLL_TEST_CTL_HI,
+ .pgm_test_ctl_enable = true,
+ .init_test_ctl = true,
+ .masks = {
+ .pre_div_mask = BIT(12),
+ .post_div_mask = BM(9, 8),
+ .mn_en_mask = BIT(24),
+ .main_output_mask = BIT(0),
+ .early_output_mask = BIT(3),
+ .apc_pdn_mask = BIT(24),
+ .lock_mask = BIT(31),
+ },
+ .vals = {
+ .post_div_masked = 0x100,
+ .pre_div_masked = 0x0,
+ .test_ctl_hi_val = 0x00004000,
+ .test_ctl_lo_val = 0x04000000,
+ .config_ctl_val = 0x200D4AA8,
+ .config_ctl_hi_val = 0x002,
+ },
+ .min_rate = 600000000,
+ .max_rate = 3000000000,
+ .src_rate = 19200000,
+ .base = &vbases[APC0_PLL_BASE],
+ .c = {
+ .always_on = true,
+ .parent = &xo_ao.c,
+ .dbg_name = "pwrcl_pll",
+ .ops = &clk_ops_variable_rate_pll_hwfsm,
+ VDD_DIG_FMAX_MAP1(LOW, 3000000000),
+ CLK_INIT(pwrcl_pll.c),
+ },
+};
+
+static struct alpha_pll_clk pwrcl_alt_pll = {
+ .masks = &alt_pll_masks,
+ .base = &vbases[APC0_PLL_BASE],
+ .offset = C0_PLLA_MODE,
+ .vco_tbl = alt_pll_vco_modes,
+ .num_vco = ARRAY_SIZE(alt_pll_vco_modes),
+ .enable_config = 0x9, /* Main and early outputs */
+ .post_div_config = 0x100, /* Div-2 */
+ .config_ctl_val = 0x4001051B,
+ .offline_bit_workaround = true,
+ .c = {
+ .always_on = true,
+ .dbg_name = "pwrcl_alt_pll",
+ .parent = &alpha_xo_ao.c,
+ .ops = &clk_ops_alpha_pll_hwfsm,
+ CLK_INIT(pwrcl_alt_pll.c),
+ },
+};
+
+static DEFINE_SPINLOCK(mux_reg_lock);
+
+DEFINE_FIXED_DIV_CLK(pwrcl_pll_main, 2, &pwrcl_pll.c);
+DEFINE_FIXED_DIV_CLK(perfcl_pll_main, 2, &perfcl_pll.c);
+
+static void __cpu_mux_set_sel(struct mux_clk *mux, int sel)
+{
+ u32 regval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mux_reg_lock, flags);
+
+ if (mux->priv)
+ regval = scm_io_read(*(u32 *)mux->priv + mux->offset);
+ else
+ regval = readl_relaxed(*mux->base + mux->offset);
+
+ regval &= ~(mux->mask << mux->shift);
+ regval |= (sel & mux->mask) << mux->shift;
+
+ if (mux->priv)
+ scm_io_write(*(u32 *)mux->priv + mux->offset, regval);
+ else
+ writel_relaxed(regval, *mux->base + mux->offset);
+
+ spin_unlock_irqrestore(&mux_reg_lock, flags);
+
+ /* Ensure switch request goes through before returning */
+ mb();
+ /* Hardware mandated delay */
+ udelay(5);
+}
+
+/* It is assumed that the mux enable state is locked in this function */
+static int cpu_mux_set_sel(struct mux_clk *mux, int sel)
+{
+ __cpu_mux_set_sel(mux, sel);
+
+ return 0;
+}
+
+static int cpu_mux_get_sel(struct mux_clk *mux)
+{
+ u32 regval;
+
+ if (mux->priv)
+ regval = scm_io_read(*(u32 *)mux->priv + mux->offset);
+ else
+ regval = readl_relaxed(*mux->base + mux->offset);
+ return (regval >> mux->shift) & mux->mask;
+}
+
+static int cpu_mux_enable(struct mux_clk *mux)
+{
+ return 0;
+}
+
+static void cpu_mux_disable(struct mux_clk *mux)
+{
+}
+
+/* It is assumed that the mux enable state is locked in this function */
+static int cpu_debug_mux_set_sel(struct mux_clk *mux, int sel)
+{
+ __cpu_mux_set_sel(mux, sel);
+
+ return 0;
+}
+
+static int cpu_debug_mux_get_sel(struct mux_clk *mux)
+{
+ u32 regval = readl_relaxed(*mux->base + mux->offset);
+ return (regval >> mux->shift) & mux->mask;
+}
+
+static int cpu_debug_mux_enable(struct mux_clk *mux)
+{
+ u32 val;
+
+ /* Enable debug clocks */
+ val = readl_relaxed(vbases[APC0_BASE] + APC_DIAG_OFFSET);
+ val |= BM(11, 8);
+ writel_relaxed(val, vbases[APC0_BASE] + APC_DIAG_OFFSET);
+
+ val = readl_relaxed(vbases[APC1_BASE] + APC_DIAG_OFFSET);
+ val |= BM(11, 8);
+ writel_relaxed(val, vbases[APC1_BASE] + APC_DIAG_OFFSET);
+
+ /* Ensure enable request goes through for correct measurement*/
+ mb();
+ udelay(5);
+ return 0;
+}
+
+static void cpu_debug_mux_disable(struct mux_clk *mux)
+{
+ u32 val;
+
+ /* Disable debug clocks */
+ val = readl_relaxed(vbases[APC0_BASE] + APC_DIAG_OFFSET);
+ val &= ~BM(11, 8);
+ writel_relaxed(val, vbases[APC0_BASE] + APC_DIAG_OFFSET);
+
+ val = readl_relaxed(vbases[APC1_BASE] + APC_DIAG_OFFSET);
+ val &= ~BM(11, 8);
+ writel_relaxed(val, vbases[APC1_BASE] + APC_DIAG_OFFSET);
+}
+
+static struct clk_mux_ops cpu_mux_ops = {
+ .enable = cpu_mux_enable,
+ .disable = cpu_mux_disable,
+ .set_mux_sel = cpu_mux_set_sel,
+ .get_mux_sel = cpu_mux_get_sel,
+};
+
+static struct clk_mux_ops cpu_debug_mux_ops = {
+ .enable = cpu_debug_mux_enable,
+ .disable = cpu_debug_mux_disable,
+ .set_mux_sel = cpu_debug_mux_set_sel,
+ .get_mux_sel = cpu_debug_mux_get_sel,
+};
+
+static struct mux_clk pwrcl_lf_mux = {
+ .offset = MUX_OFFSET,
+ MUX_SRC_LIST(
+ { &pwrcl_pll_main.c, 1 },
+ { &sys_apcsaux_clk.c, 3 },
+ ),
+ .ops = &cpu_mux_ops,
+ .mask = 0x3,
+ .shift = 2,
+ .base = &vbases[APC0_BASE],
+ .c = {
+ .dbg_name = "pwrcl_lf_mux",
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ .ops = &clk_ops_gen_mux,
+ CLK_INIT(pwrcl_lf_mux.c),
+ },
+};
+
+static struct mux_clk pwrcl_hf_mux = {
+ .offset = MUX_OFFSET,
+ MUX_SRC_LIST(
+ /* This may be changed by acd_init to select the ACD leg */
+ { &pwrcl_pll.c, 1 },
+ { &pwrcl_lf_mux.c, 0 },
+ ),
+ .ops = &cpu_mux_ops,
+ .mask = 0x3,
+ .shift = 0,
+ .base = &vbases[APC0_BASE],
+ .c = {
+ .dbg_name = "pwrcl_hf_mux",
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ .ops = &clk_ops_gen_mux,
+ CLK_INIT(pwrcl_hf_mux.c),
+ },
+};
+
+static struct mux_clk perfcl_lf_mux = {
+ .offset = MUX_OFFSET,
+ MUX_SRC_LIST(
+ { &perfcl_pll_main.c, 1 },
+ { &sys_apcsaux_clk.c, 3 },
+ ),
+ .ops = &cpu_mux_ops,
+ .mask = 0x3,
+ .shift = 2,
+ .base = &vbases[APC1_BASE],
+ .c = {
+ .dbg_name = "perfcl_lf_mux",
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ .ops = &clk_ops_gen_mux,
+ CLK_INIT(perfcl_lf_mux.c),
+ },
+};
+
+static struct mux_clk perfcl_hf_mux = {
+ .offset = MUX_OFFSET,
+ MUX_SRC_LIST(
+ /* This may be changed by acd_init to select the ACD leg */
+ { &perfcl_pll.c, 1 },
+ { &perfcl_lf_mux.c, 0 },
+ ),
+ .ops = &cpu_mux_ops,
+ .mask = 0x3,
+ .shift = 0,
+ .base = &vbases[APC1_BASE],
+ .c = {
+ .dbg_name = "perfcl_hf_mux",
+ .ops = &clk_ops_gen_mux,
+ CLK_INIT(perfcl_hf_mux.c),
+ },
+};
+
+static struct clk_src clk_src_perfcl_hf_mux_alt[] = {
+ { &perfcl_alt_pll.c, 3 },
+ { &perfcl_lf_mux.c, 0 },
+ };
+
+static struct clk_src clk_src_pwrcl_hf_mux_alt[] = {
+ { &pwrcl_alt_pll.c, 3 },
+ { &pwrcl_lf_mux.c, 0 },
+ };
+
+static struct clk_src clk_src_perfcl_lf_mux_alt[] = {
+ { &sys_apcsaux_clk.c, 3 },
+ };
+
+static struct clk_src clk_src_pwrcl_lf_mux_alt[] = {
+ { &sys_apcsaux_clk.c, 3 },
+ };
+
+struct cpu_clk_8996 {
+ u32 cpu_reg_mask;
+ struct clk *alt_pll;
+ unsigned long *alt_pll_freqs;
+ unsigned long alt_pll_thresh;
+ int n_alt_pll_freqs;
+ struct clk c;
+ bool hw_low_power_ctrl;
+ int pm_qos_latency;
+ cpumask_t cpumask;
+ struct pm_qos_request req;
+ bool do_half_rate;
+ bool has_acd;
+ int postdiv;
+};
+
+static inline struct cpu_clk_8996 *to_cpu_clk_8996(struct clk *c)
+{
+ return container_of(c, struct cpu_clk_8996, c);
+}
+
+static enum handoff cpu_clk_8996_handoff(struct clk *c)
+{
+ c->rate = clk_get_rate(c->parent);
+
+ return HANDOFF_DISABLED_CLK;
+}
+
+static long cpu_clk_8996_round_rate(struct clk *c, unsigned long rate)
+{
+ int i;
+
+ for (i = 0; i < c->num_fmax; i++)
+ if (rate <= c->fmax[i])
+ return clk_round_rate(c->parent, c->fmax[i]);
+
+ return clk_round_rate(c->parent, c->fmax[c->num_fmax - 1]);
+}
+
+static unsigned long alt_pll_perfcl_freqs[] = {
+ 307200000,
+ 556800000,
+};
+
+static void do_nothing(void *unused) { }
+
+/* ACD programming */
+static struct cpu_clk_8996 perfcl_clk;
+static struct cpu_clk_8996 pwrcl_clk;
+
+static void cpu_clock_8996_acd_init(void);
+
+static void cpu_clk_8996_disable(struct clk *c)
+{
+ struct cpu_clk_8996 *cpuclk = to_cpu_clk_8996(c);
+
+ if (!enable_acd)
+ return;
+
+ /* Ensure that we switch to GPLL0 across D5 */
+ if (cpuclk == &pwrcl_clk)
+ writel_relaxed(0x3C, vbases[APC0_BASE] + MUX_OFFSET);
+ else if (cpuclk == &perfcl_clk)
+ writel_relaxed(0x3C, vbases[APC1_BASE] + MUX_OFFSET);
+}
+
+#define MAX_PLL_MAIN_FREQ 595200000
+
+/*
+ * Returns the max safe frequency that will guarantee we switch to main output
+ */
+unsigned long acd_safe_freq(unsigned long freq)
+{
+ /*
+ * If we're running at less than double the max PLL main rate,
+ * just return half the rate. This will ensure we switch to
+ * the main output, without violating voltage constraints
+ * that might happen if we choose to go with MAX_PLL_MAIN_FREQ.
+ */
+ if (freq > MAX_PLL_MAIN_FREQ && freq <= MAX_PLL_MAIN_FREQ*2)
+ return freq/2;
+
+ /*
+ * We're higher than the max main output, and higher than twice
+ * the max main output. Safe to go to the max main output.
+ */
+ if (freq > MAX_PLL_MAIN_FREQ)
+ return MAX_PLL_MAIN_FREQ;
+
+ /* Shouldn't get here, just return the safest rate possible */
+ return sys_apcsaux_clk.c.rate;
+}
+
+static int cpu_clk_8996_pre_set_rate(struct clk *c, unsigned long rate)
+{
+ struct cpu_clk_8996 *cpuclk = to_cpu_clk_8996(c);
+ int ret;
+ bool hw_low_power_ctrl = cpuclk->hw_low_power_ctrl;
+ bool on_acd_leg = c->rate > MAX_PLL_MAIN_FREQ;
+ bool increase_freq = rate > c->rate;
+
+ /*
+ * If hardware control of the clock tree is enabled during power
+ * collapse, setup a PM QOS request to prevent power collapse and
+ * wake up one of the CPUs in this clock domain, to ensure software
+ * control while the clock rate is being switched.
+ */
+ if (hw_low_power_ctrl) {
+ memset(&cpuclk->req, 0, sizeof(cpuclk->req));
+ cpuclk->req.cpus_affine = cpuclk->cpumask;
+ cpuclk->req.type = PM_QOS_REQ_AFFINE_CORES;
+ pm_qos_add_request(&cpuclk->req, PM_QOS_CPU_DMA_LATENCY,
+ cpuclk->pm_qos_latency);
+
+ ret = smp_call_function_any(&cpuclk->cpumask, do_nothing,
+ NULL, 1);
+ }
+
+ /* Select a non-ACD'ed safe source across voltage and freq switches */
+ if (enable_acd && cpuclk->has_acd && on_acd_leg && increase_freq) {
+ /*
+ * We're on the ACD leg, and the voltage will be
+ * scaled before clk_set_rate. Switch to the main output.
+ */
+ return clk_set_rate(c->parent, acd_safe_freq(c->rate));
+ }
+
+ return 0;
+}
+
+static void cpu_clk_8996_post_set_rate(struct clk *c, unsigned long start_rate)
+{
+ struct cpu_clk_8996 *cpuclk = to_cpu_clk_8996(c);
+ int ret;
+ bool hw_low_power_ctrl = cpuclk->hw_low_power_ctrl;
+ bool on_acd_leg = c->rate > MAX_PLL_MAIN_FREQ;
+ bool decrease_freq = start_rate > c->rate;
+
+ if (cpuclk->has_acd && enable_acd && on_acd_leg && decrease_freq) {
+ ret = clk_set_rate(c->parent, c->rate);
+ if (ret)
+ pr_err("Unable to reset parent rate!\n");
+ }
+
+ if (hw_low_power_ctrl)
+ pm_qos_remove_request(&cpuclk->req);
+}
+
+static int cpu_clk_8996_set_rate(struct clk *c, unsigned long rate)
+{
+ struct cpu_clk_8996 *cpuclk = to_cpu_clk_8996(c);
+ int ret, err_ret;
+ unsigned long alt_pll_prev_rate = 0;
+ unsigned long alt_pll_rate;
+ unsigned long n_alt_freqs = cpuclk->n_alt_pll_freqs;
+ bool on_acd_leg = rate > MAX_PLL_MAIN_FREQ;
+ bool decrease_freq = rate < c->rate;
+
+ if (cpuclk->alt_pll && (n_alt_freqs > 0)) {
+ alt_pll_prev_rate = cpuclk->alt_pll->rate;
+ alt_pll_rate = cpuclk->alt_pll_freqs[0];
+ if (rate > cpuclk->alt_pll_thresh)
+ alt_pll_rate = cpuclk->alt_pll_freqs[1];
+ if (!cpu_clocks_v3)
+ mutex_lock(&scm_lmh_lock);
+ ret = clk_set_rate(cpuclk->alt_pll, alt_pll_rate);
+ if (!cpu_clocks_v3)
+ mutex_unlock(&scm_lmh_lock);
+ if (ret) {
+ pr_err("failed to set rate %lu on alt_pll when setting %lu on %s (%d)\n",
+ alt_pll_rate, rate, c->dbg_name, ret);
+ goto out;
+ }
+ }
+
+ /*
+ * Special handling needed for the case when using the div-2 output.
+ * Since the PLL needs to be running at twice the requested rate,
+ * we need to switch the mux first and then change the PLL rate.
+ * Otherwise the current voltage may not suffice with the PLL running at
+ * 2 * rate. Example: switching from 768MHz down to 550Mhz - if we raise
+ * the PLL to 1.1GHz, that's undervolting the system. So, we switch to
+ * the div-2 mux select first (by requesting rate/2), allowing the CPU
+ * to run at 384MHz. Then, we ramp up the PLL to 1.1GHz, allowing the
+ * CPU frequency to ramp up from 384MHz to 550MHz.
+ */
+ if (cpuclk->do_half_rate
+ && c->rate > 600000000 && rate < 600000000) {
+ if (!cpu_clocks_v3)
+ mutex_lock(&scm_lmh_lock);
+ ret = clk_set_rate(c->parent, c->rate/cpuclk->postdiv);
+ if (!cpu_clocks_v3)
+ mutex_unlock(&scm_lmh_lock);
+ if (ret) {
+ pr_err("failed to set rate %lu on %s (%d)\n",
+ c->rate/cpuclk->postdiv, c->dbg_name, ret);
+ goto fail;
+ }
+ }
+
+ if (!cpu_clocks_v3)
+ mutex_lock(&scm_lmh_lock);
+ ret = clk_set_rate(c->parent, rate);
+ if (!cpu_clocks_v3)
+ mutex_unlock(&scm_lmh_lock);
+ if (ret) {
+ pr_err("failed to set rate %lu on %s (%d)\n",
+ rate, c->dbg_name, ret);
+ goto set_rate_fail;
+ }
+
+ /*
+ * If we're on the ACD leg and decreasing freq, voltage will be changed
+ * after this function. Switch to main output.
+ */
+ if (enable_acd && cpuclk->has_acd && decrease_freq && on_acd_leg)
+ return clk_set_rate(c->parent, acd_safe_freq(c->rate));
+
+ return 0;
+
+set_rate_fail:
+ /* Restore parent rate if we halved it */
+ if (cpuclk->do_half_rate && c->rate > 600000000 && rate < 600000000) {
+ if (!cpu_clocks_v3)
+ mutex_lock(&scm_lmh_lock);
+ err_ret = clk_set_rate(c->parent, c->rate);
+ if (!cpu_clocks_v3)
+ mutex_unlock(&scm_lmh_lock);
+ if (err_ret)
+ pr_err("failed to restore %s rate to %lu\n",
+ c->dbg_name, c->rate);
+ }
+
+fail:
+ if (cpuclk->alt_pll && (n_alt_freqs > 0)) {
+ if (!cpu_clocks_v3)
+ mutex_lock(&scm_lmh_lock);
+ err_ret = clk_set_rate(cpuclk->alt_pll, alt_pll_prev_rate);
+ if (!cpu_clocks_v3)
+ mutex_unlock(&scm_lmh_lock);
+ if (err_ret)
+ pr_err("failed to reset rate to %lu on alt pll after failing to set %lu on %s (%d)\n",
+ alt_pll_prev_rate, rate, c->dbg_name, err_ret);
+ }
+out:
+ return ret;
+}
+
+static struct clk_ops clk_ops_cpu_8996 = {
+ .disable = cpu_clk_8996_disable,
+ .set_rate = cpu_clk_8996_set_rate,
+ .pre_set_rate = cpu_clk_8996_pre_set_rate,
+ .post_set_rate = cpu_clk_8996_post_set_rate,
+ .round_rate = cpu_clk_8996_round_rate,
+ .handoff = cpu_clk_8996_handoff,
+};
+
+DEFINE_VDD_REGS_INIT(vdd_pwrcl, 1);
+
+#define PERFCL_LATENCY_NO_L2_PC_US (1)
+#define PWRCL_LATENCY_NO_L2_PC_US (1)
+
+static struct cpu_clk_8996 pwrcl_clk = {
+ .cpu_reg_mask = 0x3,
+ .pm_qos_latency = PWRCL_LATENCY_NO_L2_PC_US,
+ .do_half_rate = true,
+ .postdiv = 2,
+ .c = {
+ .parent = &pwrcl_hf_mux.c,
+ .dbg_name = "pwrcl_clk",
+ .ops = &clk_ops_cpu_8996,
+ .vdd_class = &vdd_pwrcl,
+ CLK_INIT(pwrcl_clk.c),
+ },
+};
+
+DEFINE_VDD_REGS_INIT(vdd_perfcl, 1);
+
+static struct cpu_clk_8996 perfcl_clk = {
+ .cpu_reg_mask = 0x103,
+ .alt_pll = &perfcl_alt_pll.c,
+ .alt_pll_freqs = alt_pll_perfcl_freqs,
+ .alt_pll_thresh = 1190400000,
+ .n_alt_pll_freqs = ARRAY_SIZE(alt_pll_perfcl_freqs),
+ .pm_qos_latency = PERFCL_LATENCY_NO_L2_PC_US,
+ .do_half_rate = true,
+ .postdiv = 2,
+ .c = {
+ .parent = &perfcl_hf_mux.c,
+ .dbg_name = "perfcl_clk",
+ .ops = &clk_ops_cpu_8996,
+ .vdd_class = &vdd_perfcl,
+ CLK_INIT(perfcl_clk.c),
+ },
+};
+
+static DEFINE_SPINLOCK(acd_lock);
+
+static struct clk *mpidr_to_clk(void)
+{
+ u64 hwid = read_cpuid_mpidr() & 0xFFF;
+
+ if ((hwid | pwrcl_clk.cpu_reg_mask) == pwrcl_clk.cpu_reg_mask)
+ return &pwrcl_clk.c;
+ if ((hwid | perfcl_clk.cpu_reg_mask) == perfcl_clk.cpu_reg_mask)
+ return &perfcl_clk.c;
+
+ return NULL;
+}
+
+#define SSSCTL_OFFSET 0x160
+
+/*
+ * This *has* to be called on the intended CPU
+ */
+static void cpu_clock_8996_acd_init(void)
+{
+ u64 l2acdtd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&acd_lock, flags);
+
+ if (!enable_acd) {
+ spin_unlock_irqrestore(&acd_lock, flags);
+ return;
+ }
+
+ READ_L2ACDTD(l2acdtd);
+
+ /* If we have init'ed and the config is still present, return */
+ if (mpidr_to_clk() == &pwrcl_clk.c && l2acdtd == acdtd_val_pwrcl) {
+ spin_unlock_irqrestore(&acd_lock, flags);
+ return;
+ } else if (mpidr_to_clk() == &pwrcl_clk.c) {
+ WRITE_L2ACDTD(acdtd_val_pwrcl);
+ }
+
+ if (mpidr_to_clk() == &perfcl_clk.c && l2acdtd == acdtd_val_perfcl) {
+ spin_unlock_irqrestore(&acd_lock, flags);
+ return;
+ } else if (mpidr_to_clk() == &perfcl_clk.c) {
+ WRITE_L2ACDTD(acdtd_val_perfcl);
+ }
+
+ /* Initial ACD for *this* cluster */
+ WRITE_L2ACDDVMRC(dvmrc_val);
+ WRITE_L2ACDSSCR(acdsscr_val);
+
+ if (mpidr_to_clk() == &pwrcl_clk.c) {
+ if (vbases[APC0_BASE])
+ writel_relaxed(0x0000000F, vbases[APC0_BASE] +
+ SSSCTL_OFFSET);
+ /* Ensure SSSCTL config goes through before enabling ACD. */
+ mb();
+ WRITE_L2ACDCR(acdcr_val_pwrcl);
+ } else {
+ WRITE_L2ACDCR(acdcr_val_perfcl);
+ if (vbases[APC1_BASE])
+ writel_relaxed(0x0000000F, vbases[APC1_BASE] +
+ SSSCTL_OFFSET);
+ /* Ensure SSSCTL config goes through before enabling ACD. */
+ mb();
+ }
+
+ spin_unlock_irqrestore(&acd_lock, flags);
+}
+
+static struct clk *logical_cpu_to_clk(int cpu)
+{
+ struct device_node *cpu_node;
+ const u32 *cell;
+ u64 hwid;
+ static struct clk *cpu_clk_map[NR_CPUS];
+
+ if (cpu_clk_map[cpu])
+ return cpu_clk_map[cpu];
+
+ cpu_node = of_get_cpu_node(cpu, NULL);
+ if (!cpu_node)
+ goto fail;
+
+ cell = of_get_property(cpu_node, "reg", NULL);
+ if (!cell) {
+ pr_err("%s: missing reg property\n", cpu_node->full_name);
+ goto fail;
+ }
+
+ hwid = of_read_number(cell, of_n_addr_cells(cpu_node));
+ if ((hwid | pwrcl_clk.cpu_reg_mask) == pwrcl_clk.cpu_reg_mask) {
+ cpu_clk_map[cpu] = &pwrcl_clk.c;
+ return &pwrcl_clk.c;
+ }
+ if ((hwid | perfcl_clk.cpu_reg_mask) == perfcl_clk.cpu_reg_mask) {
+ cpu_clk_map[cpu] = &perfcl_clk.c;
+ return &perfcl_clk.c;
+ }
+
+fail:
+ return NULL;
+}
+
+static struct pll_clk cbf_pll = {
+ .mode_reg = (void __iomem *)CBF_PLL_MODE,
+ .l_reg = (void __iomem *)CBF_PLL_L_VAL,
+ .alpha_reg = (void __iomem *)CBF_PLL_ALPHA,
+ .config_reg = (void __iomem *)CBF_PLL_USER_CTL,
+ .config_ctl_reg = (void __iomem *)CBF_PLL_CONFIG_CTL,
+ .config_ctl_hi_reg = (void __iomem *)CBF_PLL_CONFIG_CTL_HI,
+ .status_reg = (void __iomem *)CBF_PLL_MODE,
+ .test_ctl_lo_reg = (void __iomem *)CBF_PLL_TEST_CTL_LO,
+ .test_ctl_hi_reg = (void __iomem *)CBF_PLL_TEST_CTL_HI,
+ .pgm_test_ctl_enable = true,
+ .init_test_ctl = true,
+ .masks = {
+ .pre_div_mask = BIT(12),
+ .post_div_mask = BM(9, 8),
+ .mn_en_mask = BIT(24),
+ .main_output_mask = BIT(0),
+ .early_output_mask = BIT(3),
+ .apc_pdn_mask = BIT(24),
+ .lock_mask = BIT(31),
+ },
+ .vals = {
+ .post_div_masked = 0x100,
+ .pre_div_masked = 0x0,
+ .test_ctl_hi_val = 0x00004000,
+ .test_ctl_lo_val = 0x04000000,
+ .config_ctl_val = 0x200D4AA8,
+ .config_ctl_hi_val = 0x002,
+ },
+ .min_rate = 600000000,
+ .max_rate = 3000000000,
+ .src_rate = 19200000,
+ .base = &vbases[CBF_PLL_BASE],
+ .c = {
+ .parent = &xo_ao.c,
+ .dbg_name = "cbf_pll",
+ .ops = &clk_ops_variable_rate_pll_hwfsm,
+ VDD_DIG_FMAX_MAP1(LOW, 3000000000),
+ CLK_INIT(cbf_pll.c),
+ },
+};
+
+DEFINE_FIXED_DIV_CLK(cbf_pll_main, 2, &cbf_pll.c);
+
+#define CBF_MUX_OFFSET 0x018
+
+DEFINE_VDD_REGS_INIT(vdd_cbf, 1);
+
+static struct mux_clk cbf_hf_mux = {
+ .offset = CBF_MUX_OFFSET,
+ MUX_SRC_LIST(
+ { &cbf_pll.c, 1 },
+ { &cbf_pll_main.c, 2 },
+ { &sys_apcsaux_clk.c, 3 },
+ ),
+ .en_mask = 0,
+ .ops = &cpu_mux_ops,
+ .mask = 0x3,
+ .shift = 0,
+ .base = &vbases[CBF_BASE],
+ .c = {
+ .dbg_name = "cbf_hf_mux",
+ .ops = &clk_ops_gen_mux,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(cbf_hf_mux.c),
+ },
+};
+
+static struct cpu_clk_8996 cbf_clk = {
+ .do_half_rate = true,
+ .postdiv = 2,
+ .c = {
+ .parent = &cbf_hf_mux.c,
+ .dbg_name = "cbf_clk",
+ .ops = &clk_ops_cpu_8996,
+ .vdd_class = &vdd_cbf,
+ CLK_INIT(cbf_clk.c),
+ },
+};
+
+#define APCS_CLK_DIAG 0x78
+
+DEFINE_FIXED_SLAVE_DIV_CLK(pwrcl_div_clk, 16, &pwrcl_clk.c);
+DEFINE_FIXED_SLAVE_DIV_CLK(perfcl_div_clk, 16, &perfcl_clk.c);
+
+static struct mux_clk cpu_debug_mux = {
+ .offset = APCS_CLK_DIAG,
+ MUX_SRC_LIST(
+ { &cbf_clk.c, 0x01 },
+ { &pwrcl_div_clk.c, 0x11 },
+ { &perfcl_div_clk.c, 0x21 },
+ ),
+ MUX_REC_SRC_LIST(
+ &cbf_clk.c,
+ &pwrcl_div_clk.c,
+ &perfcl_div_clk.c,
+ ),
+ .ops = &cpu_debug_mux_ops,
+ .mask = 0xFF,
+ .shift = 8,
+ .base = &vbases[DEBUG_BASE],
+ .c = {
+ .dbg_name = "cpu_debug_mux",
+ .ops = &clk_ops_gen_mux,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(cpu_debug_mux.c),
+ },
+};
+
+static struct clk_lookup cpu_clocks_8996[] = {
+ CLK_LIST(pwrcl_clk),
+ CLK_LIST(pwrcl_pll),
+ CLK_LIST(pwrcl_alt_pll),
+ CLK_LIST(pwrcl_pll_main),
+ CLK_LIST(pwrcl_hf_mux),
+ CLK_LIST(pwrcl_lf_mux),
+
+ CLK_LIST(perfcl_clk),
+ CLK_LIST(perfcl_pll),
+ CLK_LIST(perfcl_alt_pll),
+ CLK_LIST(perfcl_pll_main),
+ CLK_LIST(perfcl_hf_mux),
+ CLK_LIST(perfcl_lf_mux),
+
+ CLK_LIST(cbf_pll),
+ CLK_LIST(cbf_pll_main),
+ CLK_LIST(cbf_hf_mux),
+ CLK_LIST(cbf_clk),
+
+ CLK_LIST(xo_ao),
+ CLK_LIST(sys_apcsaux_clk),
+
+ CLK_LIST(cpu_debug_mux),
+};
+
+static int of_get_fmax_vdd_class(struct platform_device *pdev, struct clk *c,
+ char *prop_name)
+{
+ struct device_node *of = pdev->dev.of_node;
+ int prop_len, i;
+ struct clk_vdd_class *vdd = c->vdd_class;
+ u32 *array;
+
+ if (!of_find_property(of, prop_name, &prop_len)) {
+ dev_err(&pdev->dev, "missing %s\n", prop_name);
+ return -EINVAL;
+ }
+
+ prop_len /= sizeof(u32);
+ if (prop_len % 2) {
+ dev_err(&pdev->dev, "bad length %d\n", prop_len);
+ return -EINVAL;
+ }
+
+ prop_len /= 2;
+ vdd->level_votes = devm_kzalloc(&pdev->dev, prop_len * sizeof(int),
+ GFP_KERNEL);
+ if (!vdd->level_votes)
+ return -ENOMEM;
+
+ vdd->vdd_uv = devm_kzalloc(&pdev->dev, prop_len * sizeof(int),
+ GFP_KERNEL);
+ if (!vdd->vdd_uv)
+ return -ENOMEM;
+
+ c->fmax = devm_kzalloc(&pdev->dev, prop_len * sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!c->fmax)
+ return -ENOMEM;
+
+ array = devm_kzalloc(&pdev->dev,
+ prop_len * sizeof(u32) * 2, GFP_KERNEL);
+ if (!array)
+ return -ENOMEM;
+
+ of_property_read_u32_array(of, prop_name, array, prop_len * 2);
+ for (i = 0; i < prop_len; i++) {
+ c->fmax[i] = array[2 * i];
+ vdd->vdd_uv[i] = array[2 * i + 1];
+ }
+
+ devm_kfree(&pdev->dev, array);
+ vdd->num_levels = prop_len;
+ vdd->cur_level = prop_len;
+ c->num_fmax = prop_len;
+ return 0;
+}
+
+static int cpu_clock_8996_resources_init(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct clk *c;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(base_names); i++) {
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ base_names[i]);
+ if (!res) {
+ dev_err(&pdev->dev,
+ "Unable to get platform resource for %s",
+ base_names[i]);
+ return -ENOMEM;
+ }
+
+ vbases[i] = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!vbases[i]) {
+ dev_err(&pdev->dev, "Unable to map in base %s\n",
+ base_names[i]);
+ return -ENOMEM;
+ }
+ }
+
+ vdd_dig.regulator[0] = devm_regulator_get(&pdev->dev, "vdd-dig");
+ if (IS_ERR(vdd_dig.regulator[0])) {
+ if (PTR_ERR(vdd_dig.regulator[0]) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Unable to get the CX regulator");
+ return PTR_ERR(vdd_dig.regulator[0]);
+ }
+
+ vdd_pwrcl.regulator[0] = devm_regulator_get(&pdev->dev, "vdd-pwrcl");
+ if (IS_ERR(vdd_pwrcl.regulator[0])) {
+ if (PTR_ERR(vdd_pwrcl.regulator[0]) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Unable to get the pwrcl vreg\n");
+ return PTR_ERR(vdd_pwrcl.regulator[0]);
+ }
+
+ vdd_perfcl.regulator[0] = devm_regulator_get(&pdev->dev, "vdd-perfcl");
+ if (IS_ERR(vdd_perfcl.regulator[0])) {
+ if (PTR_ERR(vdd_perfcl.regulator[0]) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Unable to get the perfcl vreg\n");
+ return PTR_ERR(vdd_perfcl.regulator[0]);
+ }
+
+ /* Leakage constraints disallow a turbo vote during bootup */
+ vdd_perfcl.skip_handoff = true;
+
+ vdd_cbf.regulator[0] = devm_regulator_get(&pdev->dev, "vdd-cbf");
+ if (IS_ERR(vdd_cbf.regulator[0])) {
+ if (PTR_ERR(vdd_cbf.regulator[0]) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Unable to get the cbf vreg\n");
+ return PTR_ERR(vdd_cbf.regulator[0]);
+ }
+
+ c = devm_clk_get(&pdev->dev, "xo_ao");
+ if (IS_ERR(c)) {
+ if (PTR_ERR(c) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Unable to get xo (rc = %ld)!\n",
+ PTR_ERR(c));
+ return PTR_ERR(c);
+ }
+ xo_ao.c.parent = c;
+
+ c = devm_clk_get(&pdev->dev, "aux_clk");
+ if (IS_ERR(c)) {
+ if (PTR_ERR(c) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Unable to get gpll0 (rc = %ld)!\n",
+ PTR_ERR(c));
+ return PTR_ERR(c);
+ }
+ sys_apcsaux_clk_gcc.c.parent = c;
+
+ vdd_perfcl.use_max_uV = true;
+ vdd_pwrcl.use_max_uV = true;
+ vdd_cbf.use_max_uV = true;
+
+ return 0;
+}
+
+static int add_opp(struct clk *c, struct device *dev, unsigned long max_rate)
+{
+ unsigned long rate = 0;
+ int level;
+ int uv;
+ int *vdd_uv = c->vdd_class->vdd_uv;
+ struct regulator *reg = c->vdd_class->regulator[0];
+ long ret;
+ bool first = true;
+ int j = 1;
+
+ while (1) {
+ rate = c->fmax[j++];
+ level = find_vdd_level(c, rate);
+ if (level <= 0) {
+ pr_warn("clock-cpu: no corner for %lu.\n", rate);
+ return -EINVAL;
+ }
+
+ uv = regulator_list_corner_voltage(reg, vdd_uv[level]);
+ if (uv < 0) {
+ pr_warn("clock-cpu: no uv for %lu.\n", rate);
+ return -EINVAL;
+ }
+
+ ret = dev_pm_opp_add(dev, rate, uv);
+ if (ret) {
+ pr_warn("clock-cpu: failed to add OPP for %lu\n", rate);
+ return ret;
+ }
+
+ /*
+ * Print the OPP pair for the lowest and highest frequency for
+ * each device that we're populating. This is important since
+ * this information will be used by thermal mitigation and the
+ * scheduler.
+ */
+ if ((rate >= max_rate) || first) {
+ /* one time print at bootup */
+ pr_info("clock-cpu-8996: set OPP pair (%lu Hz, %d uv) on %s\n",
+ rate, uv, dev_name(dev));
+ if (first)
+ first = false;
+ else
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static void populate_opp_table(struct platform_device *pdev)
+{
+ unsigned long pwrcl_fmax, perfcl_fmax, cbf_fmax;
+ struct device_node *cbf_node;
+ struct platform_device *cbf_dev;
+ int cpu;
+
+ pwrcl_fmax = pwrcl_clk.c.fmax[pwrcl_clk.c.num_fmax - 1];
+ perfcl_fmax = perfcl_clk.c.fmax[perfcl_clk.c.num_fmax - 1];
+ cbf_fmax = cbf_clk.c.fmax[cbf_clk.c.num_fmax - 1];
+
+ for_each_possible_cpu(cpu) {
+ if (logical_cpu_to_clk(cpu) == &pwrcl_clk.c) {
+ WARN(add_opp(&pwrcl_clk.c, get_cpu_device(cpu),
+ pwrcl_fmax),
+ "Failed to add OPP levels for power cluster\n");
+ }
+ if (logical_cpu_to_clk(cpu) == &perfcl_clk.c) {
+ WARN(add_opp(&perfcl_clk.c, get_cpu_device(cpu),
+ perfcl_fmax),
+ "Failed to add OPP levels for perf cluster\n");
+ }
+ }
+
+ cbf_node = of_parse_phandle(pdev->dev.of_node, "cbf-dev", 0);
+ if (!cbf_node) {
+ pr_err("can't find the CBF dt node\n");
+ return;
+ }
+
+ cbf_dev = of_find_device_by_node(cbf_node);
+ if (!cbf_dev) {
+ pr_err("can't find the CBF dt device\n");
+ return;
+ }
+
+ WARN(add_opp(&cbf_clk.c, &cbf_dev->dev, cbf_fmax),
+ "Failed to add OPP levels for CBF\n");
+}
+
+static void cpu_clock_8996_pro_fixup(void)
+{
+ cbf_pll.vals.post_div_masked = 0x300;
+ cbf_pll_main.data.max_div = 4;
+ cbf_pll_main.data.min_div = 4;
+ cbf_pll_main.data.div = 4;
+ cbf_clk.postdiv = 4;
+}
+
+static int perfclspeedbin;
+
+unsigned long pwrcl_early_boot_rate = 883200000;
+unsigned long perfcl_early_boot_rate = 883200000;
+unsigned long cbf_early_boot_rate = 614400000;
+unsigned long alt_pll_early_boot_rate = 307200000;
+
+static int cpu_clock_8996_driver_probe(struct platform_device *pdev)
+{
+ int ret, cpu;
+ unsigned long pwrclrate, perfclrate, cbfrate;
+ int pvs_ver = 0;
+ u32 pte_efuse;
+ char perfclspeedbinstr[] = "qcom,perfcl-speedbinXX-vXX";
+ char pwrclspeedbinstr[] = "qcom,pwrcl-speedbinXX-vXX";
+ char cbfspeedbinstr[] = "qcom,cbf-speedbinXX-vXX";
+
+ pwrcl_pll_main.c.flags = CLKFLAG_NO_RATE_CACHE;
+ perfcl_pll_main.c.flags = CLKFLAG_NO_RATE_CACHE;
+ cbf_pll_main.c.flags = CLKFLAG_NO_RATE_CACHE;
+ pwrcl_clk.hw_low_power_ctrl = true;
+ perfcl_clk.hw_low_power_ctrl = true;
+
+ ret = cpu_clock_8996_resources_init(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "resources init failed\n");
+ return ret;
+ }
+
+ pte_efuse = readl_relaxed(vbases[EFUSE_BASE]);
+ perfclspeedbin = ((pte_efuse >> EFUSE_SHIFT) & EFUSE_MASK);
+ dev_info(&pdev->dev, "using perf/pwr/cbf speed bin %u and pvs_ver %d\n",
+ perfclspeedbin, pvs_ver);
+
+ snprintf(perfclspeedbinstr, ARRAY_SIZE(perfclspeedbinstr),
+ "qcom,perfcl-speedbin%d-v%d", perfclspeedbin, pvs_ver);
+
+ ret = of_get_fmax_vdd_class(pdev, &perfcl_clk.c, perfclspeedbinstr);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't get speed bin for perfcl. Falling back to zero.\n");
+ ret = of_get_fmax_vdd_class(pdev, &perfcl_clk.c,
+ "qcom,perfcl-speedbin0-v0");
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to retrieve plan for perf. Bailing...\n");
+ return ret;
+ }
+ }
+
+ snprintf(pwrclspeedbinstr, ARRAY_SIZE(pwrclspeedbinstr),
+ "qcom,pwrcl-speedbin%d-v%d", perfclspeedbin, pvs_ver);
+
+ ret = of_get_fmax_vdd_class(pdev, &pwrcl_clk.c, pwrclspeedbinstr);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't get speed bin for pwrcl. Falling back to zero.\n");
+ ret = of_get_fmax_vdd_class(pdev, &pwrcl_clk.c,
+ "qcom,pwrcl-speedbin0-v0");
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to retrieve plan for pwrcl\n");
+ return ret;
+ }
+ }
+
+ snprintf(cbfspeedbinstr, ARRAY_SIZE(cbfspeedbinstr),
+ "qcom,cbf-speedbin%d-v%d", perfclspeedbin, pvs_ver);
+
+ ret = of_get_fmax_vdd_class(pdev, &cbf_clk.c, cbfspeedbinstr);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't get speed bin for cbf. Falling back to zero.\n");
+ ret = of_get_fmax_vdd_class(pdev, &cbf_clk.c,
+ "qcom,cbf-speedbin0-v0");
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to retrieve plan for cbf\n");
+ return ret;
+ }
+ }
+
+ get_online_cpus();
+
+ for_each_possible_cpu(cpu) {
+ if (logical_cpu_to_clk(cpu) == &pwrcl_clk.c)
+ cpumask_set_cpu(cpu, &pwrcl_clk.cpumask);
+ if (logical_cpu_to_clk(cpu) == &perfcl_clk.c)
+ cpumask_set_cpu(cpu, &perfcl_clk.cpumask);
+ }
+
+ ret = of_msm_clock_register(pdev->dev.of_node, cpu_clocks_8996,
+ ARRAY_SIZE(cpu_clocks_8996));
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to register CPU clocks.\n");
+ return ret;
+ }
+
+ for_each_online_cpu(cpu) {
+ WARN(clk_prepare_enable(&cbf_clk.c),
+ "Failed to enable cbf clock.\n");
+ WARN(clk_prepare_enable(logical_cpu_to_clk(cpu)),
+ "Failed to enable clock for cpu %d\n", cpu);
+ }
+
+ pwrclrate = clk_get_rate(&pwrcl_clk.c);
+ perfclrate = clk_get_rate(&perfcl_clk.c);
+ cbfrate = clk_get_rate(&cbf_clk.c);
+
+ if (!pwrclrate) {
+ dev_err(&pdev->dev, "Unknown pwrcl rate. Setting safe rate\n");
+ ret = clk_set_rate(&pwrcl_clk.c, sys_apcsaux_clk.c.rate);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't set a safe rate on A53.\n");
+ return -EINVAL;
+ }
+ pwrclrate = sys_apcsaux_clk.c.rate;
+ }
+
+ if (!perfclrate) {
+ dev_err(&pdev->dev, "Unknown perfcl rate. Setting safe rate\n");
+ ret = clk_set_rate(&perfcl_clk.c, sys_apcsaux_clk.c.rate);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't set a safe rate on perf.\n");
+ return -EINVAL;
+ }
+ perfclrate = sys_apcsaux_clk.c.rate;
+ }
+
+ if (!cbfrate) {
+ dev_err(&pdev->dev, "Unknown CBF rate. Setting safe rate\n");
+ cbfrate = sys_apcsaux_clk.c.rate;
+ ret = clk_set_rate(&cbf_clk.c, cbfrate);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't set a safe rate on CBF.\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Permanently enable the cluster PLLs */
+ clk_prepare_enable(&perfcl_pll.c);
+ clk_prepare_enable(&pwrcl_pll.c);
+ clk_prepare_enable(&perfcl_alt_pll.c);
+ clk_prepare_enable(&pwrcl_alt_pll.c);
+ clk_prepare_enable(&cbf_pll.c);
+
+ /* Set the early boot rate. This may also switch us to the ACD leg */
+ clk_set_rate(&pwrcl_clk.c, pwrcl_early_boot_rate);
+ clk_set_rate(&perfcl_clk.c, perfcl_early_boot_rate);
+
+ populate_opp_table(pdev);
+
+ put_online_cpus();
+
+ return 0;
+}
+
+static struct of_device_id match_table[] = {
+ { .compatible = "qcom,cpu-clock-8996" },
+ { .compatible = "qcom,cpu-clock-8996-v3" },
+ { .compatible = "qcom,cpu-clock-8996-pro" },
+ {}
+};
+
+static struct platform_driver cpu_clock_8996_driver = {
+ .probe = cpu_clock_8996_driver_probe,
+ .driver = {
+ .name = "cpu-clock-8996",
+ .of_match_table = match_table,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init cpu_clock_8996_init(void)
+{
+ return platform_driver_register(&cpu_clock_8996_driver);
+}
+arch_initcall(cpu_clock_8996_init);
+
+static void __exit cpu_clock_8996_exit(void)
+{
+ platform_driver_unregister(&cpu_clock_8996_driver);
+}
+module_exit(cpu_clock_8996_exit);
+
+#define APC0_BASE_PHY 0x06400000
+#define APC1_BASE_PHY 0x06480000
+#define CBF_BASE_PHY 0x09A11000
+#define CBF_PLL_BASE_PHY 0x09A20000
+#define AUX_BASE_PHY 0x09820050
+#define APCC_RECAL_DLY_BASE 0x099E00C8
+
+#define CLK_CTL_OFFSET 0x44
+#define PSCTL_OFFSET 0x164
+#define AUTO_CLK_SEL_BIT BIT(8)
+#define CBF_AUTO_CLK_SEL_BIT BIT(6)
+#define AUTO_CLK_SEL_ALWAYS_ON_MASK BM(5, 4)
+#define AUTO_CLK_SEL_ALWAYS_ON_GPLL0_SEL (0x3 << 4)
+#define APCC_RECAL_DLY_SIZE 0x10
+#define APCC_RECAL_VCTL_OFFSET 0x8
+#define APCC_RECAL_CPR_DLY_SETTING 0x00000000
+#define APCC_RECAL_VCTL_DLY_SETTING 0x800003ff
+
+#define HF_MUX_MASK 0x3
+#define LF_MUX_MASK 0x3
+#define LF_MUX_SHIFT 0x2
+#define HF_MUX_SEL_EARLY_PLL 0x1
+#define HF_MUX_SEL_LF_MUX 0x1
+#define LF_MUX_SEL_ALT_PLL 0x1
+
+static int use_alt_pll;
+module_param(use_alt_pll, int, 0444);
+
+static int clock_cpu_8996_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ if (!enable_acd)
+ return NOTIFY_OK;
+
+ switch (action & ~CPU_TASKS_FROZEN) {
+
+ case CPU_STARTING:
+ /* This is needed for the first time that CPUs come up */
+ cpu_clock_8996_acd_init();
+ break;
+
+ default:
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block __refdata clock_cpu_8996_cpu_notifier = {
+ .notifier_call = clock_cpu_8996_cpu_callback,
+};
+
+int __init cpu_clock_8996_early_init(void)
+{
+ int ret = 0;
+ void __iomem *auxbase, *acd_recal_base;
+ u32 regval;
+
+ if (of_find_compatible_node(NULL, NULL,
+ "qcom,cpu-clock-8996-pro")) {
+ cpu_clocks_v3 = true;
+ cpu_clocks_pro = true;
+ } else if (of_find_compatible_node(NULL, NULL,
+ "qcom,cpu-clock-8996-v3")) {
+ cpu_clocks_v3 = true;
+ } else if (!of_find_compatible_node(NULL, NULL,
+ "qcom,cpu-clock-8996")) {
+ return 0;
+ }
+
+ pr_info("clock-cpu-8996: configuring clocks for the perf cluster\n");
+
+ if (cpu_clocks_v3) {
+ pwrcl_alt_pll.offline_bit_workaround = false;
+ perfcl_alt_pll.offline_bit_workaround = false;
+ pwrcl_pll.pgm_test_ctl_enable = false;
+ perfcl_pll.pgm_test_ctl_enable = false;
+ pwrcl_pll.vals.config_ctl_val = 0x200d4828;
+ pwrcl_pll.vals.config_ctl_hi_val = 0x006;
+ perfcl_pll.vals.config_ctl_val = 0x200d4828;
+ perfcl_pll.vals.config_ctl_hi_val = 0x006;
+ cbf_pll.vals.config_ctl_val = 0x200d4828;
+ cbf_pll.vals.config_ctl_hi_val = 0x006;
+ pwrcl_pll.vals.test_ctl_lo_val = 0x1C000000;
+ perfcl_pll.vals.test_ctl_lo_val = 0x1C000000;
+ cbf_pll.vals.test_ctl_lo_val = 0x1C000000;
+ }
+
+ if (cpu_clocks_pro)
+ cpu_clock_8996_pro_fixup();
+
+ /*
+ * We definitely don't want to parse DT here - this is too early and in
+ * the critical path for boot timing. Just ioremap the bases.
+ */
+ vbases[APC0_BASE] = ioremap(APC0_BASE_PHY, SZ_4K);
+ if (!vbases[APC0_BASE]) {
+ WARN(1, "Unable to ioremap power mux base. Can't configure CPU clocks\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ vbases[APC1_BASE] = ioremap(APC1_BASE_PHY, SZ_4K);
+ if (!vbases[APC1_BASE]) {
+ WARN(1, "Unable to ioremap perf mux base. Can't configure CPU clocks\n");
+ ret = -ENOMEM;
+ goto apc1_fail;
+ }
+
+ vbases[CBF_BASE] = ioremap(CBF_BASE_PHY, SZ_4K);
+ if (!vbases[CBF_BASE]) {
+ WARN(1, "Unable to ioremap cbf mux base. Can't configure CPU clocks\n");
+ ret = -ENOMEM;
+ goto cbf_map_fail;
+ }
+
+ vbases[CBF_PLL_BASE] = ioremap(CBF_PLL_BASE_PHY, SZ_4K);
+ if (!vbases[CBF_BASE]) {
+ WARN(1, "Unable to ioremap cbf pll base. Can't configure CPU clocks\n");
+ ret = -ENOMEM;
+ goto cbf_pll_map_fail;
+ }
+
+ vbases[APC0_PLL_BASE] = vbases[APC0_BASE];
+ vbases[APC1_PLL_BASE] = vbases[APC1_BASE];
+
+ auxbase = ioremap(AUX_BASE_PHY, SZ_4K);
+ if (!auxbase) {
+ WARN(1, "Unable to ioremap aux base. Can't configure CPU clocks\n");
+ ret = -ENOMEM;
+ goto auxbase_fail;
+ }
+
+ acd_recal_base = ioremap(APCC_RECAL_DLY_BASE, APCC_RECAL_DLY_SIZE);
+ if (!acd_recal_base) {
+ WARN(1, "Unable to ioremap ACD recal base. Can't configure ACD\n");
+ ret = -ENOMEM;
+ goto acd_recal_base_fail;
+ }
+
+ /*
+ * Set GPLL0 divider for div-2 to get 300Mhz. This divider
+ * can be programmed dynamically.
+ */
+ regval = readl_relaxed(auxbase);
+ regval &= ~BM(17, 16);
+ regval |= 0x1 << 16;
+ writel_relaxed(regval, auxbase);
+
+ /* Ensure write goes through before selecting the aux clock */
+ mb();
+ udelay(5);
+
+ /* Select GPLL0 for 300MHz for the perf cluster */
+ writel_relaxed(0xC, vbases[APC1_BASE] + MUX_OFFSET);
+
+ /* Select GPLL0 for 300MHz for the power cluster */
+ writel_relaxed(0xC, vbases[APC0_BASE] + MUX_OFFSET);
+
+ /* Select GPLL0 for 300MHz on the CBF */
+ writel_relaxed(0x3, vbases[CBF_BASE] + CBF_MUX_OFFSET);
+
+ /* Ensure write goes through before PLLs are reconfigured */
+ mb();
+ udelay(5);
+
+ /* Set the auto clock sel always-on source to GPLL0/2 (300MHz) */
+ regval = readl_relaxed(vbases[APC0_BASE] + MUX_OFFSET);
+ regval &= ~AUTO_CLK_SEL_ALWAYS_ON_MASK;
+ regval |= AUTO_CLK_SEL_ALWAYS_ON_GPLL0_SEL;
+ writel_relaxed(regval, vbases[APC0_BASE] + MUX_OFFSET);
+
+ regval = readl_relaxed(vbases[APC1_BASE] + MUX_OFFSET);
+ regval &= ~AUTO_CLK_SEL_ALWAYS_ON_MASK;
+ regval |= AUTO_CLK_SEL_ALWAYS_ON_GPLL0_SEL;
+ writel_relaxed(regval, vbases[APC1_BASE] + MUX_OFFSET);
+
+ regval = readl_relaxed(vbases[CBF_BASE] + MUX_OFFSET);
+ regval &= ~AUTO_CLK_SEL_ALWAYS_ON_MASK;
+ regval |= AUTO_CLK_SEL_ALWAYS_ON_GPLL0_SEL;
+ writel_relaxed(regval, vbases[CBF_BASE] + MUX_OFFSET);
+
+ /* == Setup PLLs in FSM mode == */
+
+ /* Disable all PLLs (we're already on GPLL0 for both clusters) */
+ perfcl_alt_pll.c.ops->disable(&perfcl_alt_pll.c);
+ pwrcl_alt_pll.c.ops->disable(&pwrcl_alt_pll.c);
+ writel_relaxed(0x0, vbases[APC0_BASE] +
+ (unsigned long)pwrcl_pll.mode_reg);
+ writel_relaxed(0x0, vbases[APC1_BASE] +
+ (unsigned long)perfcl_pll.mode_reg);
+ writel_relaxed(0x0, vbases[CBF_PLL_BASE] +
+ (unsigned long)cbf_pll.mode_reg);
+
+ /* Let PLLs disable before re-init'ing them */
+ mb();
+
+ /* Initialize all the PLLs */
+ __variable_rate_pll_init(&perfcl_pll.c);
+ __variable_rate_pll_init(&pwrcl_pll.c);
+ __variable_rate_pll_init(&cbf_pll.c);
+ __init_alpha_pll(&perfcl_alt_pll.c);
+ __init_alpha_pll(&pwrcl_alt_pll.c);
+
+ /* Set an appropriate rate on the perf clusters PLLs */
+ perfcl_pll.c.ops->set_rate(&perfcl_pll.c, perfcl_early_boot_rate);
+ perfcl_alt_pll.c.ops->set_rate(&perfcl_alt_pll.c,
+ alt_pll_early_boot_rate);
+ pwrcl_pll.c.ops->set_rate(&pwrcl_pll.c, pwrcl_early_boot_rate);
+ pwrcl_alt_pll.c.ops->set_rate(&pwrcl_alt_pll.c,
+ alt_pll_early_boot_rate);
+
+ /* Set an appropriate rate on the CBF PLL */
+ cbf_pll.c.ops->set_rate(&cbf_pll.c, cbf_early_boot_rate);
+
+ /*
+ * Enable FSM mode on the primary PLLs.
+ * This should turn on the PLLs as well.
+ */
+ writel_relaxed(0x00118000, vbases[APC0_BASE] +
+ (unsigned long)pwrcl_pll.mode_reg);
+ writel_relaxed(0x00118000, vbases[APC1_BASE] +
+ (unsigned long)perfcl_pll.mode_reg);
+ /*
+ * Enable FSM mode on the alternate PLLs.
+ * This should turn on the PLLs as well.
+ */
+ writel_relaxed(0x00118000, vbases[APC0_BASE] +
+ (unsigned long)pwrcl_alt_pll.offset);
+ writel_relaxed(0x00118000, vbases[APC1_BASE] +
+ (unsigned long)perfcl_alt_pll.offset);
+
+ /*
+ * Enable FSM mode on the CBF PLL.
+ * This should turn on the PLL as well.
+ */
+ writel_relaxed(0x00118000, vbases[CBF_PLL_BASE] +
+ (unsigned long)cbf_pll.mode_reg);
+
+ /*
+ * If we're on MSM8996 V1, the CBF FSM bits are not present, and
+ * the mode register will read zero at this point. In that case,
+ * just enable the CBF PLL to "simulate" FSM mode.
+ */
+ if (!readl_relaxed(vbases[CBF_PLL_BASE] +
+ (unsigned long)cbf_pll.mode_reg))
+ clk_ops_variable_rate_pll.enable(&cbf_pll.c);
+
+ /* Ensure write goes through before auto clock selection is enabled */
+ mb();
+
+ /* Wait for PLL(s) to lock */
+ udelay(50);
+
+ /* Enable auto clock selection for both clusters and the CBF */
+ regval = readl_relaxed(vbases[APC0_BASE] + CLK_CTL_OFFSET);
+ regval |= AUTO_CLK_SEL_BIT;
+ writel_relaxed(regval, vbases[APC0_BASE] + CLK_CTL_OFFSET);
+
+ regval = readl_relaxed(vbases[APC1_BASE] + CLK_CTL_OFFSET);
+ regval |= AUTO_CLK_SEL_BIT;
+ writel_relaxed(regval, vbases[APC1_BASE] + CLK_CTL_OFFSET);
+
+ regval = readl_relaxed(vbases[CBF_BASE] + CBF_MUX_OFFSET);
+ regval |= CBF_AUTO_CLK_SEL_BIT;
+ writel_relaxed(regval, vbases[CBF_BASE] + CBF_MUX_OFFSET);
+
+ /* Ensure write goes through before muxes are switched */
+ mb();
+ udelay(5);
+
+ if (use_alt_pll) {
+ perfcl_hf_mux.safe_parent = &perfcl_lf_mux.c;
+ pwrcl_hf_mux.safe_parent = &pwrcl_lf_mux.c;
+ pwrcl_clk.alt_pll = NULL;
+ perfcl_clk.alt_pll = NULL;
+ pwrcl_clk.do_half_rate = false;
+ perfcl_clk.do_half_rate = false;
+
+ perfcl_hf_mux.parents = (struct clk_src *)
+ &clk_src_perfcl_hf_mux_alt;
+ perfcl_hf_mux.num_parents =
+ ARRAY_SIZE(clk_src_perfcl_hf_mux_alt);
+ pwrcl_hf_mux.parents = (struct clk_src *)
+ &clk_src_pwrcl_hf_mux_alt;
+ pwrcl_hf_mux.num_parents =
+ ARRAY_SIZE(clk_src_pwrcl_hf_mux_alt);
+
+ perfcl_lf_mux.parents = (struct clk_src *)
+ &clk_src_perfcl_lf_mux_alt;
+ perfcl_lf_mux.num_parents =
+ ARRAY_SIZE(clk_src_perfcl_lf_mux_alt);
+ pwrcl_lf_mux.parents = (struct clk_src *)
+ &clk_src_pwrcl_lf_mux_alt;
+ pwrcl_lf_mux.num_parents =
+ ARRAY_SIZE(clk_src_pwrcl_lf_mux_alt);
+
+ /* Switch the clusters to use the alternate PLLs */
+ writel_relaxed(0x33, vbases[APC0_BASE] + MUX_OFFSET);
+ writel_relaxed(0x33, vbases[APC1_BASE] + MUX_OFFSET);
+ }
+
+ /* Switch the CBF to use the primary PLL */
+ regval = readl_relaxed(vbases[CBF_BASE] + CBF_MUX_OFFSET);
+ regval &= ~BM(1, 0);
+ regval |= 0x1;
+ writel_relaxed(regval, vbases[CBF_BASE] + CBF_MUX_OFFSET);
+
+ if (!cpu_clocks_v3)
+ enable_acd = 0;
+
+ if (enable_acd) {
+ int i;
+
+ if (use_alt_pll)
+ panic("Can't enable ACD on the the alternate PLL\n");
+
+ perfcl_clk.has_acd = true;
+ pwrcl_clk.has_acd = true;
+
+ if (cpu_clocks_pro) {
+ /*
+ * Configure ACS logic to switch to always-on clock
+ * source during D2-D5 entry. In addition, gate the
+ * limits management clock during certain sleep states.
+ */
+ writel_relaxed(0x3, vbases[APC0_BASE] +
+ MDD_DROOP_CODE);
+ writel_relaxed(0x3, vbases[APC1_BASE] +
+ MDD_DROOP_CODE);
+ /*
+ * Ensure that the writes go through before going
+ * forward.
+ */
+ wmb();
+
+ /*
+ * Program the DLY registers to set a voltage settling
+ * delay time for HW based ACD recalibration.
+ */
+ writel_relaxed(APCC_RECAL_CPR_DLY_SETTING,
+ acd_recal_base);
+ writel_relaxed(APCC_RECAL_VCTL_DLY_SETTING,
+ acd_recal_base +
+ APCC_RECAL_VCTL_OFFSET);
+ /*
+ * Ensure that the writes go through before enabling
+ * ACD.
+ */
+ wmb();
+ }
+
+ /* Enable ACD on this cluster if necessary */
+ cpu_clock_8996_acd_init();
+
+ /* Ensure we never use the non-ACD leg of the GFMUX */
+ for (i = 0; i < pwrcl_hf_mux.num_parents; i++)
+ if (pwrcl_hf_mux.parents[i].src == &pwrcl_pll.c)
+ pwrcl_hf_mux.parents[i].sel = 2;
+
+ for (i = 0; i < perfcl_hf_mux.num_parents; i++)
+ if (perfcl_hf_mux.parents[i].src == &perfcl_pll.c)
+ perfcl_hf_mux.parents[i].sel = 2;
+
+ BUG_ON(register_hotcpu_notifier(&clock_cpu_8996_cpu_notifier));
+
+ /* Pulse swallower and soft-start settings */
+ writel_relaxed(0x00030005, vbases[APC0_BASE] + PSCTL_OFFSET);
+ writel_relaxed(0x00030005, vbases[APC1_BASE] + PSCTL_OFFSET);
+
+ /* Ensure all config above goes through before the ACD switch */
+ mb();
+
+ /* Switch the clusters to use the ACD leg */
+ writel_relaxed(0x32, vbases[APC0_BASE] + MUX_OFFSET);
+ writel_relaxed(0x32, vbases[APC1_BASE] + MUX_OFFSET);
+ } else {
+ /* Switch the clusters to use the primary PLLs */
+ writel_relaxed(0x31, vbases[APC0_BASE] + MUX_OFFSET);
+ writel_relaxed(0x31, vbases[APC1_BASE] + MUX_OFFSET);
+ }
+
+ /*
+ * One time print during boot - this is the earliest time
+ * that Linux configures the CPU clocks. It's critical for
+ * debugging that we know that this configuration completed,
+ * especially when debugging CPU hangs.
+ */
+ pr_info("%s: finished CPU clock configuration\n", __func__);
+
+ iounmap(acd_recal_base);
+acd_recal_base_fail:
+ iounmap(auxbase);
+auxbase_fail:
+ iounmap(vbases[CBF_PLL_BASE]);
+cbf_pll_map_fail:
+ iounmap(vbases[CBF_BASE]);
+cbf_map_fail:
+ if (ret) {
+ iounmap(vbases[APC1_BASE]);
+ vbases[APC1_BASE] = NULL;
+ }
+apc1_fail:
+ if (ret) {
+ iounmap(vbases[APC0_BASE]);
+ vbases[APC0_BASE] = NULL;
+ }
+fail:
+ return ret;
+}
+early_initcall(cpu_clock_8996_early_init);
+
+MODULE_DESCRIPTION("CPU clock driver for msm8996");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/msm/clock-debug.c b/drivers/clk/msm/clock-debug.c
new file mode 100644
index 000000000000..d0ff821eb203
--- /dev/null
+++ b/drivers/clk/msm/clock-debug.c
@@ -0,0 +1,676 @@
+/*
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2007-2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/ctype.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/clk.h>
+#include <linux/list.h>
+#include <linux/clkdev.h>
+#include <linux/uaccess.h>
+#include <linux/mutex.h>
+#include <linux/io.h>
+#include <linux/clk/msm-clk-provider.h>
+#include <trace/events/power.h>
+
+
+#include "clock.h"
+
+static LIST_HEAD(clk_list);
+static DEFINE_MUTEX(clk_list_lock);
+
+static struct dentry *debugfs_base;
+static u32 debug_suspend;
+
+static int clock_debug_rate_set(void *data, u64 val)
+{
+ struct clk *clock = data;
+ int ret;
+
+ /* Only increases to max rate will succeed, but that's actually good
+ * for debugging purposes so we don't check for error. */
+ if (clock->flags & CLKFLAG_MAX)
+ clk_set_max_rate(clock, val);
+ ret = clk_set_rate(clock, val);
+ if (ret)
+ pr_err("clk_set_rate(%s, %lu) failed (%d)\n", clock->dbg_name,
+ (unsigned long)val, ret);
+
+ return ret;
+}
+
+static int clock_debug_rate_get(void *data, u64 *val)
+{
+ struct clk *clock = data;
+ *val = clk_get_rate(clock);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(clock_rate_fops, clock_debug_rate_get,
+ clock_debug_rate_set, "%llu\n");
+
+static struct clk *measure;
+
+static int clock_debug_measure_get(void *data, u64 *val)
+{
+ struct clk *clock = data, *par;
+ int ret, is_hw_gated;
+ unsigned long meas_rate, sw_rate;
+
+ /* Check to see if the clock is in hardware gating mode */
+ if (clock->ops->in_hwcg_mode)
+ is_hw_gated = clock->ops->in_hwcg_mode(clock);
+ else
+ is_hw_gated = 0;
+
+ ret = clk_set_parent(measure, clock);
+ if (!ret) {
+ /*
+ * Disable hw gating to get accurate rate measurements. Only do
+ * this if the clock is explictly enabled by software. This
+ * allows us to detect errors where clocks are on even though
+ * software is not requesting them to be on due to broken
+ * hardware gating signals.
+ */
+ if (is_hw_gated && clock->count)
+ clock->ops->disable_hwcg(clock);
+ par = measure;
+ while (par && par != clock) {
+ if (par->ops->enable)
+ par->ops->enable(par);
+ par = par->parent;
+ }
+ *val = clk_get_rate(measure);
+ /* Reenable hwgating if it was disabled */
+ if (is_hw_gated && clock->count)
+ clock->ops->enable_hwcg(clock);
+ }
+
+ /*
+ * If there's a divider on the path from the clock output to the
+ * measurement circuitry, account for it by dividing the original clock
+ * rate with the rate set on the parent of the measure clock.
+ */
+ meas_rate = clk_get_rate(clock);
+ sw_rate = clk_get_rate(measure->parent);
+ if (sw_rate && meas_rate >= (sw_rate * 2))
+ *val *= DIV_ROUND_CLOSEST(meas_rate, sw_rate);
+
+ return ret;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(clock_measure_fops, clock_debug_measure_get,
+ NULL, "%lld\n");
+
+static int clock_debug_enable_set(void *data, u64 val)
+{
+ struct clk *clock = data;
+ int rc = 0;
+
+ if (val)
+ rc = clk_prepare_enable(clock);
+ else
+ clk_disable_unprepare(clock);
+
+ return rc;
+}
+
+static int clock_debug_enable_get(void *data, u64 *val)
+{
+ struct clk *clock = data;
+ int enabled;
+
+ if (clock->ops->is_enabled)
+ enabled = clock->ops->is_enabled(clock);
+ else
+ enabled = !!(clock->count);
+
+ *val = enabled;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(clock_enable_fops, clock_debug_enable_get,
+ clock_debug_enable_set, "%lld\n");
+
+static int clock_debug_local_get(void *data, u64 *val)
+{
+ struct clk *clock = data;
+
+ if (!clock->ops->is_local)
+ *val = true;
+ else
+ *val = clock->ops->is_local(clock);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(clock_local_fops, clock_debug_local_get,
+ NULL, "%llu\n");
+
+static int clock_debug_hwcg_get(void *data, u64 *val)
+{
+ struct clk *clock = data;
+ if (clock->ops->in_hwcg_mode)
+ *val = !!clock->ops->in_hwcg_mode(clock);
+ else
+ *val = 0;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(clock_hwcg_fops, clock_debug_hwcg_get,
+ NULL, "%llu\n");
+
+static void clock_print_fmax_by_level(struct seq_file *m, int level)
+{
+ struct clk *clock = m->private;
+ struct clk_vdd_class *vdd_class = clock->vdd_class;
+ int off, i, vdd_level, nregs = vdd_class->num_regulators;
+
+ vdd_level = find_vdd_level(clock, clock->rate);
+
+ seq_printf(m, "%2s%10lu", vdd_level == level ? "[" : "",
+ clock->fmax[level]);
+ for (i = 0; i < nregs; i++) {
+ off = nregs*level + i;
+ if (vdd_class->vdd_uv)
+ seq_printf(m, "%10u", vdd_class->vdd_uv[off]);
+ if (vdd_class->vdd_ua)
+ seq_printf(m, "%10u", vdd_class->vdd_ua[off]);
+ }
+
+ if (vdd_level == level)
+ seq_puts(m, "]");
+ seq_puts(m, "\n");
+}
+
+static int fmax_rates_show(struct seq_file *m, void *unused)
+{
+ struct clk *clock = m->private;
+ struct clk_vdd_class *vdd_class = clock->vdd_class;
+ int level = 0, i, nregs = vdd_class->num_regulators;
+ char reg_name[10];
+
+ int vdd_level = find_vdd_level(clock, clock->rate);
+ if (vdd_level < 0) {
+ seq_printf(m, "could not find_vdd_level for %s, %ld\n",
+ clock->dbg_name, clock->rate);
+ return 0;
+ }
+
+ seq_printf(m, "%12s", "");
+ for (i = 0; i < nregs; i++) {
+ snprintf(reg_name, ARRAY_SIZE(reg_name), "reg %d", i);
+ seq_printf(m, "%10s", reg_name);
+ if (vdd_class->vdd_ua)
+ seq_printf(m, "%10s", "");
+ }
+
+ seq_printf(m, "\n%12s", "freq");
+ for (i = 0; i < nregs; i++) {
+ seq_printf(m, "%10s", "uV");
+ if (vdd_class->vdd_ua)
+ seq_printf(m, "%10s", "uA");
+ }
+ seq_printf(m, "\n");
+
+ for (level = 0; level < clock->num_fmax; level++)
+ clock_print_fmax_by_level(m, level);
+
+ return 0;
+}
+
+static int fmax_rates_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, fmax_rates_show, inode->i_private);
+}
+
+static const struct file_operations fmax_rates_fops = {
+ .open = fmax_rates_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int orphan_list_show(struct seq_file *m, void *unused)
+{
+ struct clk *c, *safe;
+
+ list_for_each_entry_safe(c, safe, &orphan_clk_list, list)
+ seq_printf(m, "%s\n", c->dbg_name);
+
+ return 0;
+}
+
+static int orphan_list_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, orphan_list_show, inode->i_private);
+}
+
+static const struct file_operations orphan_list_fops = {
+ .open = orphan_list_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+#define clock_debug_output(m, c, fmt, ...) \
+do { \
+ if (m) \
+ seq_printf(m, fmt, ##__VA_ARGS__); \
+ else if (c) \
+ pr_cont(fmt, ##__VA_ARGS__); \
+ else \
+ pr_info(fmt, ##__VA_ARGS__); \
+} while (0)
+
+static int clock_debug_print_clock(struct clk *c, struct seq_file *m)
+{
+ char *start = "";
+
+ if (!c || !c->prepare_count)
+ return 0;
+
+ clock_debug_output(m, 0, "\t");
+ do {
+ if (c->vdd_class)
+ clock_debug_output(m, 1, "%s%s:%u:%u [%ld, %d]", start,
+ c->dbg_name, c->prepare_count, c->count,
+ c->rate, find_vdd_level(c, c->rate));
+ else
+ clock_debug_output(m, 1, "%s%s:%u:%u [%ld]", start,
+ c->dbg_name, c->prepare_count, c->count,
+ c->rate);
+ start = " -> ";
+ } while ((c = clk_get_parent(c)));
+
+ clock_debug_output(m, 1, "\n");
+
+ return 1;
+}
+
+/**
+ * clock_debug_print_enabled_clocks() - Print names of enabled clocks
+ *
+ */
+static void clock_debug_print_enabled_clocks(struct seq_file *m)
+{
+ struct clk *c;
+ int cnt = 0;
+
+ if (!mutex_trylock(&clk_list_lock)) {
+ pr_err("clock-debug: Clocks are being registered. Cannot print clock state now.\n");
+ return;
+ }
+ clock_debug_output(m, 0, "Enabled clocks:\n");
+ list_for_each_entry(c, &clk_list, list) {
+ cnt += clock_debug_print_clock(c, m);
+ }
+ mutex_unlock(&clk_list_lock);
+
+ if (cnt)
+ clock_debug_output(m, 0, "Enabled clock count: %d\n", cnt);
+ else
+ clock_debug_output(m, 0, "No clocks enabled.\n");
+}
+
+static int enabled_clocks_show(struct seq_file *m, void *unused)
+{
+ clock_debug_print_enabled_clocks(m);
+ return 0;
+}
+
+static int enabled_clocks_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, enabled_clocks_show, inode->i_private);
+}
+
+static const struct file_operations enabled_clocks_fops = {
+ .open = enabled_clocks_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int trace_clocks_show(struct seq_file *m, void *unused)
+{
+ struct clk *c;
+ int total_cnt = 0;
+
+ if (!mutex_trylock(&clk_list_lock)) {
+ pr_err("trace_clocks: Clocks are being registered. Cannot trace clock state now.\n");
+ return 1;
+ }
+ list_for_each_entry(c, &clk_list, list) {
+ trace_clock_state(c->dbg_name, c->prepare_count, c->count,
+ c->rate);
+ total_cnt++;
+ }
+ mutex_unlock(&clk_list_lock);
+ clock_debug_output(m, 0, "Total clock count: %d\n", total_cnt);
+
+ return 0;
+}
+
+static int trace_clocks_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, trace_clocks_show, inode->i_private);
+}
+static const struct file_operations trace_clocks_fops = {
+ .open = trace_clocks_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int list_rates_show(struct seq_file *m, void *unused)
+{
+ struct clk *clock = m->private;
+ int level, i = 0;
+ unsigned long rate, fmax = 0;
+
+ /* Find max frequency supported within voltage constraints. */
+ if (!clock->vdd_class) {
+ fmax = ULONG_MAX;
+ } else {
+ for (level = 0; level < clock->num_fmax; level++)
+ if (clock->fmax[level])
+ fmax = clock->fmax[level];
+ }
+
+ /*
+ * List supported frequencies <= fmax. Higher frequencies may appear in
+ * the frequency table, but are not valid and should not be listed.
+ */
+ while (!IS_ERR_VALUE(rate = clock->ops->list_rate(clock, i++))) {
+ if (rate <= fmax)
+ seq_printf(m, "%lu\n", rate);
+ }
+
+ return 0;
+}
+
+static int list_rates_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, list_rates_show, inode->i_private);
+}
+
+static const struct file_operations list_rates_fops = {
+ .open = list_rates_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static ssize_t clock_parent_read(struct file *filp, char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ struct clk *clock = filp->private_data;
+ struct clk *p = clock->parent;
+ char name[256] = {0};
+
+ snprintf(name, sizeof(name), "%s\n", p ? p->dbg_name : "None\n");
+
+ return simple_read_from_buffer(ubuf, cnt, ppos, name, strlen(name));
+}
+
+
+static ssize_t clock_parent_write(struct file *filp,
+ const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ struct clk *clock = filp->private_data;
+ char buf[256];
+ char *cmp;
+ int ret;
+ struct clk *parent = NULL;
+
+ cnt = min(cnt, sizeof(buf) - 1);
+ if (copy_from_user(&buf, ubuf, cnt))
+ return -EFAULT;
+ buf[cnt] = '\0';
+ cmp = strstrip(buf);
+
+ mutex_lock(&clk_list_lock);
+ list_for_each_entry(parent, &clk_list, list) {
+ if (!strcmp(cmp, parent->dbg_name))
+ break;
+ }
+
+ if (&parent->list == &clk_list) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ mutex_unlock(&clk_list_lock);
+ ret = clk_set_parent(clock, parent);
+ if (ret)
+ return ret;
+
+ return cnt;
+err:
+ mutex_unlock(&clk_list_lock);
+ return ret;
+}
+
+
+static const struct file_operations clock_parent_fops = {
+ .open = simple_open,
+ .read = clock_parent_read,
+ .write = clock_parent_write,
+};
+
+void clk_debug_print_hw(struct clk *clk, struct seq_file *f)
+{
+ void __iomem *base;
+ struct clk_register_data *regs;
+ u32 i, j, size;
+
+ if (IS_ERR_OR_NULL(clk))
+ return;
+
+ clk_debug_print_hw(clk->parent, f);
+
+ clock_debug_output(f, false, "%s\n", clk->dbg_name);
+
+ if (!clk->ops->list_registers)
+ return;
+
+ j = 0;
+ base = clk->ops->list_registers(clk, j, &regs, &size);
+ while (!IS_ERR(base)) {
+ for (i = 0; i < size; i++) {
+ u32 val = readl_relaxed(base + regs[i].offset);
+ clock_debug_output(f, false, "%20s: 0x%.8x\n",
+ regs[i].name, val);
+ }
+ j++;
+ base = clk->ops->list_registers(clk, j, &regs, &size);
+ }
+}
+
+static int print_hw_show(struct seq_file *m, void *unused)
+{
+ struct clk *c = m->private;
+ clk_debug_print_hw(c, m);
+
+ return 0;
+}
+
+static int print_hw_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, print_hw_show, inode->i_private);
+}
+
+static const struct file_operations clock_print_hw_fops = {
+ .open = print_hw_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+
+static void clock_measure_add(struct clk *clock)
+{
+ if (IS_ERR_OR_NULL(measure))
+ return;
+
+ if (clk_set_parent(measure, clock))
+ return;
+
+ debugfs_create_file("measure", S_IRUGO, clock->clk_dir, clock,
+ &clock_measure_fops);
+}
+
+static int clock_debug_add(struct clk *clock)
+{
+ char temp[50], *ptr;
+ struct dentry *clk_dir;
+
+ if (!debugfs_base)
+ return -ENOMEM;
+
+ strlcpy(temp, clock->dbg_name, ARRAY_SIZE(temp));
+ for (ptr = temp; *ptr; ptr++)
+ *ptr = tolower(*ptr);
+
+ clk_dir = debugfs_create_dir(temp, debugfs_base);
+ if (!clk_dir)
+ return -ENOMEM;
+
+ clock->clk_dir = clk_dir;
+
+ if (!debugfs_create_file("rate", S_IRUGO | S_IWUSR, clk_dir,
+ clock, &clock_rate_fops))
+ goto error;
+
+ if (!debugfs_create_file("enable", S_IRUGO | S_IWUSR, clk_dir,
+ clock, &clock_enable_fops))
+ goto error;
+
+ if (!debugfs_create_file("is_local", S_IRUGO, clk_dir, clock,
+ &clock_local_fops))
+ goto error;
+
+ if (!debugfs_create_file("has_hw_gating", S_IRUGO, clk_dir, clock,
+ &clock_hwcg_fops))
+ goto error;
+
+ if (clock->ops->list_rate)
+ if (!debugfs_create_file("list_rates",
+ S_IRUGO, clk_dir, clock, &list_rates_fops))
+ goto error;
+
+ if (clock->vdd_class && !debugfs_create_file("fmax_rates",
+ S_IRUGO, clk_dir, clock, &fmax_rates_fops))
+ goto error;
+
+ if (!debugfs_create_file("parent", S_IRUGO, clk_dir, clock,
+ &clock_parent_fops))
+ goto error;
+
+ if (!debugfs_create_file("print", S_IRUGO, clk_dir, clock,
+ &clock_print_hw_fops))
+ goto error;
+
+ clock_measure_add(clock);
+
+ return 0;
+error:
+ debugfs_remove_recursive(clk_dir);
+ return -ENOMEM;
+}
+static DEFINE_MUTEX(clk_debug_lock);
+static int clk_debug_init_once;
+
+/**
+ * clock_debug_init() - Initialize clock debugfs
+ * Lock clk_debug_lock before invoking this function.
+ */
+static int clock_debug_init(void)
+{
+ if (clk_debug_init_once)
+ return 0;
+
+ clk_debug_init_once = 1;
+
+ debugfs_base = debugfs_create_dir("clk", NULL);
+ if (!debugfs_base)
+ return -ENOMEM;
+
+ if (!debugfs_create_u32("debug_suspend", S_IRUGO | S_IWUSR,
+ debugfs_base, &debug_suspend)) {
+ debugfs_remove_recursive(debugfs_base);
+ return -ENOMEM;
+ }
+
+ if (!debugfs_create_file("enabled_clocks", S_IRUGO, debugfs_base, NULL,
+ &enabled_clocks_fops))
+ return -ENOMEM;
+
+ if (!debugfs_create_file("orphan_list", S_IRUGO, debugfs_base, NULL,
+ &orphan_list_fops))
+ return -ENOMEM;
+
+ if (!debugfs_create_file("trace_clocks", S_IRUGO, debugfs_base, NULL,
+ &trace_clocks_fops))
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * clock_debug_register() - Add additional clocks to clock debugfs hierarchy
+ * @list: List of clocks to create debugfs nodes for
+ */
+int clock_debug_register(struct clk *clk)
+{
+ int ret = 0;
+ struct clk *c;
+
+ mutex_lock(&clk_list_lock);
+ if (!list_empty(&clk->list))
+ goto out;
+
+ ret = clock_debug_init();
+ if (ret)
+ goto out;
+
+ if (IS_ERR_OR_NULL(measure)) {
+ if (clk->flags & CLKFLAG_MEASURE)
+ measure = clk;
+ if (!IS_ERR_OR_NULL(measure)) {
+ list_for_each_entry(c, &clk_list, list)
+ clock_measure_add(c);
+ }
+ }
+
+ list_add_tail(&clk->list, &clk_list);
+ clock_debug_add(clk);
+out:
+ mutex_unlock(&clk_list_lock);
+ return ret;
+}
+
+/*
+ * Print the names of enabled clocks and their parents if debug_suspend is set
+ */
+void clock_debug_print_enabled(void)
+{
+ if (likely(!debug_suspend))
+ return;
+
+ clock_debug_print_enabled_clocks(NULL);
+}
diff --git a/drivers/clk/msm/clock-dummy.c b/drivers/clk/msm/clock-dummy.c
new file mode 100644
index 000000000000..e5339b110cd6
--- /dev/null
+++ b/drivers/clk/msm/clock-dummy.c
@@ -0,0 +1,113 @@
+/* Copyright (c) 2011,2013-2014 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk/msm-clk-provider.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <soc/qcom/msm-clock-controller.h>
+
+static int dummy_clk_reset(struct clk *clk, enum clk_reset_action action)
+{
+ return 0;
+}
+
+static int dummy_clk_set_rate(struct clk *clk, unsigned long rate)
+{
+ clk->rate = rate;
+ return 0;
+}
+
+static int dummy_clk_set_max_rate(struct clk *clk, unsigned long rate)
+{
+ return 0;
+}
+
+static int dummy_clk_set_flags(struct clk *clk, unsigned flags)
+{
+ return 0;
+}
+
+static unsigned long dummy_clk_get_rate(struct clk *clk)
+{
+ return clk->rate;
+}
+
+static long dummy_clk_round_rate(struct clk *clk, unsigned long rate)
+{
+ return rate;
+}
+
+struct clk_ops clk_ops_dummy = {
+ .reset = dummy_clk_reset,
+ .set_rate = dummy_clk_set_rate,
+ .set_max_rate = dummy_clk_set_max_rate,
+ .set_flags = dummy_clk_set_flags,
+ .get_rate = dummy_clk_get_rate,
+ .round_rate = dummy_clk_round_rate,
+};
+
+struct clk dummy_clk = {
+ .dbg_name = "dummy_clk",
+ .ops = &clk_ops_dummy,
+ CLK_INIT(dummy_clk),
+};
+
+static void *dummy_clk_dt_parser(struct device *dev, struct device_node *np)
+{
+ struct clk *c;
+ c = devm_kzalloc(dev, sizeof(*c), GFP_KERNEL);
+ if (!c) {
+ dev_err(dev, "failed to map memory for %s\n", np->name);
+ return ERR_PTR(-ENOMEM);
+ }
+ c->ops = &clk_ops_dummy;
+ return msmclk_generic_clk_init(dev, np, c);
+}
+MSMCLK_PARSER(dummy_clk_dt_parser, "qcom,dummy-clk", 0);
+
+static struct clk *of_dummy_get(struct of_phandle_args *clkspec,
+ void *data)
+{
+ return &dummy_clk;
+}
+
+static struct of_device_id msm_clock_dummy_match_table[] = {
+ { .compatible = "qcom,dummycc" },
+ {}
+};
+
+static int msm_clock_dummy_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = of_clk_add_provider(pdev->dev.of_node, of_dummy_get, NULL);
+ if (ret)
+ return -ENOMEM;
+
+ dev_info(&pdev->dev, "Registered DUMMY provider.\n");
+ return ret;
+}
+
+static struct platform_driver msm_clock_dummy_driver = {
+ .probe = msm_clock_dummy_probe,
+ .driver = {
+ .name = "clock-dummy",
+ .of_match_table = msm_clock_dummy_match_table,
+ .owner = THIS_MODULE,
+ },
+};
+
+int __init msm_dummy_clk_init(void)
+{
+ return platform_driver_register(&msm_clock_dummy_driver);
+}
+arch_initcall(msm_dummy_clk_init);
diff --git a/drivers/clk/msm/clock-gcc-8996.c b/drivers/clk/msm/clock-gcc-8996.c
new file mode 100644
index 000000000000..edd78e29957d
--- /dev/null
+++ b/drivers/clk/msm/clock-gcc-8996.c
@@ -0,0 +1,3825 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/ctype.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/clk/msm-clock-generic.h>
+
+#include <soc/qcom/clock-local2.h>
+#include <soc/qcom/clock-voter.h>
+#include <soc/qcom/clock-pll.h>
+#include <soc/qcom/clock-alpha-pll.h>
+#include <soc/qcom/rpm-smd.h>
+#include <soc/qcom/clock-rpm.h>
+
+#include <dt-bindings/clock/msm-clocks-8996.h>
+#include <dt-bindings/clock/msm-clocks-hwio-8996.h>
+
+#include "vdd-level-8996.h"
+
+static void __iomem *virt_base;
+static void __iomem *virt_dbgbase;
+
+#define cxo_clk_src_source_val 0
+#define cxo_clk_src_ao_source_val 0
+#define gpll0_out_main_source_val 1
+#define gpll4_out_main_source_val 5
+
+#define FIXDIV(div) (div ? (2 * (div) - 1) : (0))
+
+#define F(f, s, div, m, n) \
+ { \
+ .freq_hz = (f), \
+ .src_clk = &s.c, \
+ .m_val = (m), \
+ .n_val = ~((n)-(m)) * !!(n), \
+ .d_val = ~(n),\
+ .div_src_val = BVAL(4, 0, (int)FIXDIV(div)) \
+ | BVAL(10, 8, s##_source_val), \
+ }
+
+static DEFINE_VDD_REGULATORS(vdd_dig, VDD_DIG_NUM, 1, vdd_corner, NULL);
+
+DEFINE_EXT_CLK(mmss_gcc_dbg_clk, NULL);
+DEFINE_EXT_CLK(gpu_gcc_dbg_clk, NULL);
+DEFINE_EXT_CLK(cpu_dbg_clk, NULL);
+DEFINE_CLK_RPM_SMD_BRANCH(cxo_clk_src, cxo_clk_src_ao, RPM_MISC_CLK_TYPE,
+ CXO_CLK_SRC_ID, 19200000);
+DEFINE_CLK_RPM_SMD(pnoc_clk, pnoc_a_clk, RPM_BUS_CLK_TYPE, PNOC_CLK_ID, NULL);
+DEFINE_CLK_RPM_SMD(bimc_clk, bimc_a_clk, RPM_MEM_CLK_TYPE, BIMC_CLK_ID, NULL);
+DEFINE_CLK_RPM_SMD(cnoc_clk, cnoc_a_clk, RPM_BUS_CLK_TYPE, CNOC_CLK_ID, NULL);
+DEFINE_CLK_RPM_SMD(snoc_clk, snoc_a_clk, RPM_BUS_CLK_TYPE, SNOC_CLK_ID, NULL);
+
+DEFINE_CLK_RPM_SMD(mmssnoc_axi_rpm_clk, mmssnoc_axi_rpm_a_clk,
+ RPM_MMAXI_CLK_TYPE, MMXI_CLK_ID, NULL);
+DEFINE_CLK_VOTER(mmssnoc_axi_clk, &mmssnoc_axi_rpm_clk.c, 0);
+DEFINE_CLK_VOTER(mmssnoc_axi_a_clk, &mmssnoc_axi_rpm_a_clk.c, 0);
+DEFINE_CLK_VOTER(mmssnoc_gds_clk, &mmssnoc_axi_rpm_clk.c, 40000000);
+
+DEFINE_CLK_RPM_SMD_BRANCH(aggre1_noc_clk, aggre1_noc_a_clk,
+ RPM_AGGR_CLK_TYPE, AGGR1_NOC_ID, 1000);
+DEFINE_CLK_RPM_SMD_BRANCH(aggre2_noc_clk, aggre2_noc_a_clk,
+ RPM_AGGR_CLK_TYPE, AGGR2_NOC_ID, 1000);
+DEFINE_CLK_RPM_SMD_XO_BUFFER(bb_clk1, bb_clk1_ao, BB_CLK1_ID);
+DEFINE_CLK_RPM_SMD_XO_BUFFER_PINCTRL(bb_clk1_pin, bb_clk1_pin_ao,
+ BB_CLK1_PIN_ID);
+DEFINE_CLK_RPM_SMD_XO_BUFFER(bb_clk2, bb_clk2_ao, BB_CLK2_ID);
+DEFINE_CLK_RPM_SMD_XO_BUFFER_PINCTRL(bb_clk2_pin, bb_clk2_pin_ao,
+ BB_CLK2_PIN_ID);
+static DEFINE_CLK_VOTER(bimc_msmbus_clk, &bimc_clk.c, LONG_MAX);
+static DEFINE_CLK_VOTER(bimc_msmbus_a_clk, &bimc_a_clk.c, LONG_MAX);
+static DEFINE_CLK_VOTER(cnoc_msmbus_clk, &cnoc_clk.c, LONG_MAX);
+static DEFINE_CLK_VOTER(cnoc_msmbus_a_clk, &cnoc_a_clk.c, LONG_MAX);
+static DEFINE_CLK_BRANCH_VOTER(cxo_dwc3_clk, &cxo_clk_src.c);
+static DEFINE_CLK_BRANCH_VOTER(cxo_lpm_clk, &cxo_clk_src.c);
+static DEFINE_CLK_BRANCH_VOTER(cxo_otg_clk, &cxo_clk_src.c);
+static DEFINE_CLK_BRANCH_VOTER(cxo_pil_lpass_clk, &cxo_clk_src.c);
+static DEFINE_CLK_BRANCH_VOTER(cxo_pil_ssc_clk, &cxo_clk_src.c);
+DEFINE_CLK_RPM_SMD_XO_BUFFER(div_clk1, div_clk1_ao, DIV_CLK1_ID);
+DEFINE_CLK_RPM_SMD_XO_BUFFER(div_clk2, div_clk2_ao, DIV_CLK2_ID);
+DEFINE_CLK_RPM_SMD_XO_BUFFER(div_clk3, div_clk3_ao, DIV_CLK3_ID);
+DEFINE_CLK_RPM_SMD(ipa_clk, ipa_a_clk, RPM_IPA_CLK_TYPE,
+ IPA_CLK_ID, NULL);
+
+DEFINE_CLK_RPM_SMD(ce1_clk, ce1_a_clk, RPM_CE_CLK_TYPE,
+ CE1_CLK_ID, NULL);
+DEFINE_CLK_DUMMY(gcc_ce1_ahb_m_clk, 0);
+DEFINE_CLK_DUMMY(gcc_ce1_axi_m_clk, 0);
+DEFINE_CLK_DUMMY(measure_only_bimc_hmss_axi_clk, 0);
+
+DEFINE_CLK_RPM_SMD_XO_BUFFER(ln_bb_clk, ln_bb_a_clk, LN_BB_CLK_ID);
+static DEFINE_CLK_VOTER(mcd_ce1_clk, &ce1_clk.c, 85710000);
+static DEFINE_CLK_VOTER(pnoc_keepalive_a_clk, &pnoc_a_clk.c, LONG_MAX);
+static DEFINE_CLK_VOTER(pnoc_msmbus_clk, &pnoc_clk.c, LONG_MAX);
+static DEFINE_CLK_VOTER(pnoc_msmbus_a_clk, &pnoc_a_clk.c, LONG_MAX);
+static DEFINE_CLK_VOTER(pnoc_pm_clk, &pnoc_clk.c, LONG_MAX);
+static DEFINE_CLK_VOTER(pnoc_sps_clk, &pnoc_clk.c, 0);
+static DEFINE_CLK_VOTER(qcedev_ce1_clk, &ce1_clk.c, 85710000);
+static DEFINE_CLK_VOTER(qcrypto_ce1_clk, &ce1_clk.c, 85710000);
+DEFINE_CLK_RPM_SMD_QDSS(qdss_clk, qdss_a_clk, RPM_MISC_CLK_TYPE,
+ QDSS_CLK_ID);
+static DEFINE_CLK_VOTER(qseecom_ce1_clk, &ce1_clk.c, 85710000);
+DEFINE_CLK_RPM_SMD_XO_BUFFER(rf_clk1, rf_clk1_ao, RF_CLK1_ID);
+DEFINE_CLK_RPM_SMD_XO_BUFFER_PINCTRL(rf_clk1_pin, rf_clk1_pin_ao,
+ RF_CLK1_PIN_ID);
+DEFINE_CLK_RPM_SMD_XO_BUFFER(rf_clk2, rf_clk2_ao, RF_CLK2_ID);
+DEFINE_CLK_RPM_SMD_XO_BUFFER_PINCTRL(rf_clk2_pin, rf_clk2_pin_ao,
+ RF_CLK2_PIN_ID);
+static DEFINE_CLK_VOTER(scm_ce1_clk, &ce1_clk.c, 85710000);
+static DEFINE_CLK_VOTER(snoc_msmbus_clk, &snoc_clk.c, LONG_MAX);
+static DEFINE_CLK_VOTER(snoc_msmbus_a_clk, &snoc_a_clk.c, LONG_MAX);
+
+static unsigned int soft_vote_gpll0;
+
+static struct pll_vote_clk gpll0 = {
+ .en_reg = (void __iomem *)GCC_APCS_GPLL_ENA_VOTE,
+ .en_mask = BIT(0),
+ .status_reg = (void __iomem *)GCC_GPLL0_MODE,
+ .status_mask = BIT(30),
+ .soft_vote = &soft_vote_gpll0,
+ .soft_vote_mask = PLL_SOFT_VOTE_PRIMARY,
+ .base = &virt_base,
+ .c = {
+ .rate = 600000000,
+ .parent = &cxo_clk_src.c,
+ .dbg_name = "gpll0",
+ .ops = &clk_ops_pll_acpu_vote,
+ CLK_INIT(gpll0.c),
+ },
+};
+
+static struct pll_vote_clk gpll0_ao = {
+ .en_reg = (void __iomem *)GCC_APCS_GPLL_ENA_VOTE,
+ .en_mask = BIT(0),
+ .status_reg = (void __iomem *)GCC_GPLL0_MODE,
+ .status_mask = BIT(30),
+ .soft_vote = &soft_vote_gpll0,
+ .soft_vote_mask = PLL_SOFT_VOTE_ACPU,
+ .base = &virt_base,
+ .c = {
+ .rate = 600000000,
+ .parent = &cxo_clk_src_ao.c,
+ .dbg_name = "gpll0_ao",
+ .ops = &clk_ops_pll_acpu_vote,
+ CLK_INIT(gpll0_ao.c),
+ },
+};
+
+DEFINE_EXT_CLK(gpll0_out_main, &gpll0.c);
+
+static struct local_vote_clk gcc_mmss_gpll0_div_clk = {
+ .vote_reg = GCC_APCS_CLOCK_BRANCH_ENA_VOTE_1,
+ .cbcr_reg = GCC_APCS_CLOCK_BRANCH_ENA_VOTE_1,
+ .en_mask = BIT(0),
+ .base = &virt_base,
+ .halt_check = DELAY,
+ .c = {
+ .dbg_name = "gcc_mmss_gpll0_div_clk",
+ .parent = &gpll0.c,
+ .ops = &clk_ops_vote,
+ CLK_INIT(gcc_mmss_gpll0_div_clk.c),
+ },
+};
+
+static struct pll_vote_clk gpll4 = {
+ .en_reg = (void __iomem *)GCC_APCS_GPLL_ENA_VOTE,
+ .en_mask = BIT(4),
+ .status_reg = (void __iomem *)GCC_GPLL4_MODE,
+ .status_mask = BIT(30),
+ .base = &virt_base,
+ .c = {
+ .rate = 384000000,
+ .parent = &cxo_clk_src.c,
+ .dbg_name = "gpll4",
+ .ops = &clk_ops_pll_vote,
+ VDD_DIG_FMAX_MAP1(NOMINAL, 384000000),
+ CLK_INIT(gpll4.c),
+ },
+};
+
+DEFINE_EXT_CLK(gpll4_out_main, &gpll4.c);
+
+static struct clk_freq_tbl ftbl_ufs_axi_clk_src[] = {
+ F( 100000000, gpll0_out_main, 6, 0, 0),
+ F( 200000000, gpll0_out_main, 3, 0, 0),
+ F( 240000000, gpll0_out_main, 2.5, 0, 0),
+ F_END
+};
+
+static struct rcg_clk ufs_axi_clk_src = {
+ .cmd_rcgr_reg = GCC_UFS_AXI_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_ufs_axi_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "ufs_axi_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP4(LOWER, 19200000, LOW, 100000000,
+ NOMINAL, 200000000, HIGH, 240000000),
+ CLK_INIT(ufs_axi_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_pcie_aux_clk_src[] = {
+ F( 1010526, cxo_clk_src, 1, 1, 19),
+ F_END
+};
+
+static struct rcg_clk pcie_aux_clk_src = {
+ .cmd_rcgr_reg = GCC_PCIE_AUX_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_pcie_aux_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "pcie_aux_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP1(LOWER, 1011000),
+ CLK_INIT(pcie_aux_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_usb30_master_clk_src[] = {
+ F( 19200000, cxo_clk_src, 1, 0, 0),
+ F( 120000000, gpll0_out_main, 5, 0, 0),
+ F( 150000000, gpll0_out_main, 4, 0, 0),
+ F_END
+};
+
+static struct rcg_clk usb30_master_clk_src = {
+ .cmd_rcgr_reg = GCC_USB30_MASTER_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_usb30_master_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "usb30_master_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 60000000, LOW, 120000000,
+ NOMINAL, 150000000),
+ CLK_INIT(usb30_master_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_usb20_master_clk_src[] = {
+ F( 120000000, gpll0_out_main, 5, 0, 0),
+ F_END
+};
+
+static struct rcg_clk usb20_master_clk_src = {
+ .cmd_rcgr_reg = GCC_USB20_MASTER_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_usb20_master_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "usb20_master_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 60000000,
+ NOMINAL, 120000000),
+ CLK_INIT(usb20_master_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_ufs_ice_core_clk_src[] = {
+ F( 19200000, cxo_clk_src, 1, 0, 0),
+ F( 150000000, gpll0_out_main, 4, 0, 0),
+ F( 300000000, gpll0_out_main, 2, 0, 0),
+ F_END
+};
+
+static struct rcg_clk ufs_ice_core_clk_src = {
+ .cmd_rcgr_reg = GCC_UFS_ICE_CORE_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_ufs_ice_core_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "ufs_ice_core_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 150000000,
+ NOMINAL, 300000000),
+ CLK_INIT(ufs_ice_core_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_blsp1_qup1_i2c_apps_clk_src[] = {
+ F( 19200000, cxo_clk_src, 1, 0, 0),
+ F( 50000000, gpll0_out_main, 12, 0, 0),
+ F_END
+};
+
+static struct rcg_clk blsp1_qup1_i2c_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_BLSP1_QUP1_I2C_APPS_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "blsp1_qup1_i2c_apps_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP2(LOWER, 19200000, LOW, 50000000),
+ CLK_INIT(blsp1_qup1_i2c_apps_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_blsp1_qup1_spi_apps_clk_src[] = {
+ F( 960000, cxo_clk_src, 10, 1, 2),
+ F( 4800000, cxo_clk_src, 4, 0, 0),
+ F( 9600000, cxo_clk_src, 2, 0, 0),
+ F( 15000000, gpll0_out_main, 10, 1, 4),
+ F( 25000000, gpll0_out_main, 12, 1, 2),
+ F( 50000000, gpll0_out_main, 12, 0, 0),
+ F_END
+};
+
+static struct rcg_clk blsp1_qup1_spi_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_BLSP1_QUP1_SPI_APPS_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "blsp1_qup1_spi_apps_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 25000000,
+ NOMINAL, 50000000),
+ CLK_INIT(blsp1_qup1_spi_apps_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_blsp1_qup2_i2c_apps_clk_src[] = {
+ F( 19200000, cxo_clk_src, 1, 0, 0),
+ F( 50000000, gpll0_out_main, 12, 0, 0),
+ F_END
+};
+
+static struct rcg_clk blsp1_qup2_i2c_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_BLSP1_QUP2_I2C_APPS_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_blsp1_qup2_i2c_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "blsp1_qup2_i2c_apps_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP2(LOWER, 19200000, LOW, 50000000),
+ CLK_INIT(blsp1_qup2_i2c_apps_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_blsp1_qup2_spi_apps_clk_src[] = {
+ F( 960000, cxo_clk_src, 10, 1, 2),
+ F( 4800000, cxo_clk_src, 4, 0, 0),
+ F( 9600000, cxo_clk_src, 2, 0, 0),
+ F( 15000000, gpll0_out_main, 10, 1, 4),
+ F( 25000000, gpll0_out_main, 12, 1, 2),
+ F( 50000000, gpll0_out_main, 12, 0, 0),
+ F_END
+};
+
+static struct rcg_clk blsp1_qup2_spi_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_BLSP1_QUP2_SPI_APPS_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_blsp1_qup2_spi_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "blsp1_qup2_spi_apps_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 25000000,
+ NOMINAL, 50000000),
+ CLK_INIT(blsp1_qup2_spi_apps_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_blsp1_qup3_i2c_apps_clk_src[] = {
+ F( 19200000, cxo_clk_src, 1, 0, 0),
+ F( 50000000, gpll0_out_main, 12, 0, 0),
+ F_END
+};
+
+static struct rcg_clk blsp1_qup3_i2c_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_BLSP1_QUP3_I2C_APPS_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_blsp1_qup3_i2c_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "blsp1_qup3_i2c_apps_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP2(LOWER, 19200000, LOW, 50000000),
+ CLK_INIT(blsp1_qup3_i2c_apps_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_blsp1_qup3_spi_apps_clk_src[] = {
+ F( 960000, cxo_clk_src, 10, 1, 2),
+ F( 4800000, cxo_clk_src, 4, 0, 0),
+ F( 9600000, cxo_clk_src, 2, 0, 0),
+ F( 15000000, gpll0_out_main, 10, 1, 4),
+ F( 25000000, gpll0_out_main, 12, 1, 2),
+ F( 50000000, gpll0_out_main, 12, 0, 0),
+ F_END
+};
+
+static struct rcg_clk blsp1_qup3_spi_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_BLSP1_QUP3_SPI_APPS_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_blsp1_qup3_spi_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "blsp1_qup3_spi_apps_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 25000000,
+ NOMINAL, 50000000),
+ CLK_INIT(blsp1_qup3_spi_apps_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_blsp1_qup4_i2c_apps_clk_src[] = {
+ F( 19200000, cxo_clk_src, 1, 0, 0),
+ F( 50000000, gpll0_out_main, 12, 0, 0),
+ F_END
+};
+
+static struct rcg_clk blsp1_qup4_i2c_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_BLSP1_QUP4_I2C_APPS_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_blsp1_qup4_i2c_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "blsp1_qup4_i2c_apps_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP2(LOWER, 19200000, LOW, 50000000),
+ CLK_INIT(blsp1_qup4_i2c_apps_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_blsp1_qup4_spi_apps_clk_src[] = {
+ F( 960000, cxo_clk_src, 10, 1, 2),
+ F( 4800000, cxo_clk_src, 4, 0, 0),
+ F( 9600000, cxo_clk_src, 2, 0, 0),
+ F( 15000000, gpll0_out_main, 10, 1, 4),
+ F( 25000000, gpll0_out_main, 12, 1, 2),
+ F( 50000000, gpll0_out_main, 12, 0, 0),
+ F_END
+};
+
+static struct rcg_clk blsp1_qup4_spi_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_BLSP1_QUP4_SPI_APPS_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_blsp1_qup4_spi_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "blsp1_qup4_spi_apps_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 25000000,
+ NOMINAL, 50000000),
+ CLK_INIT(blsp1_qup4_spi_apps_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_blsp1_qup5_i2c_apps_clk_src[] = {
+ F( 19200000, cxo_clk_src, 1, 0, 0),
+ F( 50000000, gpll0_out_main, 12, 0, 0),
+ F_END
+};
+
+static struct rcg_clk blsp1_qup5_i2c_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_BLSP1_QUP5_I2C_APPS_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_blsp1_qup5_i2c_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "blsp1_qup5_i2c_apps_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP2(LOWER, 19200000, LOW, 50000000),
+ CLK_INIT(blsp1_qup5_i2c_apps_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_blsp1_qup5_spi_apps_clk_src[] = {
+ F( 960000, cxo_clk_src, 10, 1, 2),
+ F( 4800000, cxo_clk_src, 4, 0, 0),
+ F( 9600000, cxo_clk_src, 2, 0, 0),
+ F( 15000000, gpll0_out_main, 10, 1, 4),
+ F( 25000000, gpll0_out_main, 12, 1, 2),
+ F( 50000000, gpll0_out_main, 12, 0, 0),
+ F_END
+};
+
+static struct rcg_clk blsp1_qup5_spi_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_BLSP1_QUP5_SPI_APPS_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_blsp1_qup5_spi_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "blsp1_qup5_spi_apps_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 25000000,
+ NOMINAL, 50000000),
+ CLK_INIT(blsp1_qup5_spi_apps_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_blsp1_qup6_i2c_apps_clk_src[] = {
+ F( 19200000, cxo_clk_src, 1, 0, 0),
+ F( 50000000, gpll0_out_main, 12, 0, 0),
+ F_END
+};
+
+static struct rcg_clk blsp1_qup6_i2c_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_BLSP1_QUP6_I2C_APPS_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_blsp1_qup6_i2c_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "blsp1_qup6_i2c_apps_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP2(LOWER, 19200000, LOW, 50000000),
+ CLK_INIT(blsp1_qup6_i2c_apps_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_blsp1_qup6_spi_apps_clk_src[] = {
+ F( 960000, cxo_clk_src, 10, 1, 2),
+ F( 4800000, cxo_clk_src, 4, 0, 0),
+ F( 9600000, cxo_clk_src, 2, 0, 0),
+ F( 15000000, gpll0_out_main, 10, 1, 4),
+ F( 25000000, gpll0_out_main, 12, 1, 2),
+ F( 50000000, gpll0_out_main, 12, 0, 0),
+ F_END
+};
+
+static struct rcg_clk blsp1_qup6_spi_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_BLSP1_QUP6_SPI_APPS_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_blsp1_qup6_spi_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "blsp1_qup6_spi_apps_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 25000000,
+ NOMINAL, 50000000),
+ CLK_INIT(blsp1_qup6_spi_apps_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_blsp1_uart1_apps_clk_src[] = {
+ F( 3686400, gpll0_out_main, 1, 96, 15625),
+ F( 7372800, gpll0_out_main, 1, 192, 15625),
+ F( 14745600, gpll0_out_main, 1, 384, 15625),
+ F( 16000000, gpll0_out_main, 5, 2, 15),
+ F( 24000000, gpll0_out_main, 5, 1, 5),
+ F( 32000000, gpll0_out_main, 1, 4, 75),
+ F( 40000000, gpll0_out_main, 15, 0, 0),
+ F( 46400000, gpll0_out_main, 1, 29, 375),
+ F( 48000000, gpll0_out_main, 12.5, 0, 0),
+ F( 51200000, gpll0_out_main, 1, 32, 375),
+ F( 56000000, gpll0_out_main, 1, 7, 75),
+ F( 58982400, gpll0_out_main, 1, 1536, 15625),
+ F( 60000000, gpll0_out_main, 10, 0, 0),
+ F( 63157895, gpll0_out_main, 9.5, 0, 0),
+ F_END
+};
+
+static struct rcg_clk blsp1_uart1_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_BLSP1_UART1_APPS_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_blsp1_uart1_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "blsp1_uart1_apps_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 31580000,
+ NOMINAL, 63160000),
+ CLK_INIT(blsp1_uart1_apps_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_blsp1_uart2_apps_clk_src[] = {
+ F( 3686400, gpll0_out_main, 1, 96, 15625),
+ F( 7372800, gpll0_out_main, 1, 192, 15625),
+ F( 14745600, gpll0_out_main, 1, 384, 15625),
+ F( 16000000, gpll0_out_main, 5, 2, 15),
+ F( 24000000, gpll0_out_main, 5, 1, 5),
+ F( 32000000, gpll0_out_main, 1, 4, 75),
+ F( 40000000, gpll0_out_main, 15, 0, 0),
+ F( 46400000, gpll0_out_main, 1, 29, 375),
+ F( 48000000, gpll0_out_main, 12.5, 0, 0),
+ F( 51200000, gpll0_out_main, 1, 32, 375),
+ F( 56000000, gpll0_out_main, 1, 7, 75),
+ F( 58982400, gpll0_out_main, 1, 1536, 15625),
+ F( 60000000, gpll0_out_main, 10, 0, 0),
+ F( 63157895, gpll0_out_main, 9.5, 0, 0),
+ F_END
+};
+
+static struct rcg_clk blsp1_uart2_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_BLSP1_UART2_APPS_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_blsp1_uart2_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "blsp1_uart2_apps_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 31580000,
+ NOMINAL, 63160000),
+ CLK_INIT(blsp1_uart2_apps_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_blsp1_uart3_apps_clk_src[] = {
+ F( 3686400, gpll0_out_main, 1, 96, 15625),
+ F( 7372800, gpll0_out_main, 1, 192, 15625),
+ F( 14745600, gpll0_out_main, 1, 384, 15625),
+ F( 16000000, gpll0_out_main, 5, 2, 15),
+ F( 24000000, gpll0_out_main, 5, 1, 5),
+ F( 32000000, gpll0_out_main, 1, 4, 75),
+ F( 40000000, gpll0_out_main, 15, 0, 0),
+ F( 46400000, gpll0_out_main, 1, 29, 375),
+ F( 48000000, gpll0_out_main, 12.5, 0, 0),
+ F( 51200000, gpll0_out_main, 1, 32, 375),
+ F( 56000000, gpll0_out_main, 1, 7, 75),
+ F( 58982400, gpll0_out_main, 1, 1536, 15625),
+ F( 60000000, gpll0_out_main, 10, 0, 0),
+ F( 63157895, gpll0_out_main, 9.5, 0, 0),
+ F_END
+};
+
+static struct rcg_clk blsp1_uart3_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_BLSP1_UART3_APPS_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_blsp1_uart3_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "blsp1_uart3_apps_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 31580000,
+ NOMINAL, 63160000),
+ CLK_INIT(blsp1_uart3_apps_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_blsp1_uart4_apps_clk_src[] = {
+ F( 3686400, gpll0_out_main, 1, 96, 15625),
+ F( 7372800, gpll0_out_main, 1, 192, 15625),
+ F( 14745600, gpll0_out_main, 1, 384, 15625),
+ F( 16000000, gpll0_out_main, 5, 2, 15),
+ F( 24000000, gpll0_out_main, 5, 1, 5),
+ F( 32000000, gpll0_out_main, 1, 4, 75),
+ F( 40000000, gpll0_out_main, 15, 0, 0),
+ F( 46400000, gpll0_out_main, 1, 29, 375),
+ F( 48000000, gpll0_out_main, 12.5, 0, 0),
+ F( 51200000, gpll0_out_main, 1, 32, 375),
+ F( 56000000, gpll0_out_main, 1, 7, 75),
+ F( 58982400, gpll0_out_main, 1, 1536, 15625),
+ F( 60000000, gpll0_out_main, 10, 0, 0),
+ F( 63157895, gpll0_out_main, 9.5, 0, 0),
+ F_END
+};
+
+static struct rcg_clk blsp1_uart4_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_BLSP1_UART4_APPS_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_blsp1_uart4_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "blsp1_uart4_apps_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 31580000,
+ NOMINAL, 63160000),
+ CLK_INIT(blsp1_uart4_apps_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_blsp1_uart5_apps_clk_src[] = {
+ F( 3686400, gpll0_out_main, 1, 96, 15625),
+ F( 7372800, gpll0_out_main, 1, 192, 15625),
+ F( 14745600, gpll0_out_main, 1, 384, 15625),
+ F( 16000000, gpll0_out_main, 5, 2, 15),
+ F( 24000000, gpll0_out_main, 5, 1, 5),
+ F( 32000000, gpll0_out_main, 1, 4, 75),
+ F( 40000000, gpll0_out_main, 15, 0, 0),
+ F( 46400000, gpll0_out_main, 1, 29, 375),
+ F( 48000000, gpll0_out_main, 12.5, 0, 0),
+ F( 51200000, gpll0_out_main, 1, 32, 375),
+ F( 56000000, gpll0_out_main, 1, 7, 75),
+ F( 58982400, gpll0_out_main, 1, 1536, 15625),
+ F( 60000000, gpll0_out_main, 10, 0, 0),
+ F( 63157895, gpll0_out_main, 9.5, 0, 0),
+ F_END
+};
+
+static struct rcg_clk blsp1_uart5_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_BLSP1_UART5_APPS_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_blsp1_uart5_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "blsp1_uart5_apps_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 31580000,
+ NOMINAL, 63160000),
+ CLK_INIT(blsp1_uart5_apps_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_blsp1_uart6_apps_clk_src[] = {
+ F( 3686400, gpll0_out_main, 1, 96, 15625),
+ F( 7372800, gpll0_out_main, 1, 192, 15625),
+ F( 14745600, gpll0_out_main, 1, 384, 15625),
+ F( 16000000, gpll0_out_main, 5, 2, 15),
+ F( 24000000, gpll0_out_main, 5, 1, 5),
+ F( 32000000, gpll0_out_main, 1, 4, 75),
+ F( 40000000, gpll0_out_main, 15, 0, 0),
+ F( 46400000, gpll0_out_main, 1, 29, 375),
+ F( 48000000, gpll0_out_main, 12.5, 0, 0),
+ F( 51200000, gpll0_out_main, 1, 32, 375),
+ F( 56000000, gpll0_out_main, 1, 7, 75),
+ F( 58982400, gpll0_out_main, 1, 1536, 15625),
+ F( 60000000, gpll0_out_main, 10, 0, 0),
+ F( 63157895, gpll0_out_main, 9.5, 0, 0),
+ F_END
+};
+
+static struct rcg_clk blsp1_uart6_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_BLSP1_UART6_APPS_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_blsp1_uart6_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "blsp1_uart6_apps_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 31580000,
+ NOMINAL, 63160000),
+ CLK_INIT(blsp1_uart6_apps_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_blsp2_qup1_i2c_apps_clk_src[] = {
+ F( 19200000, cxo_clk_src, 1, 0, 0),
+ F( 50000000, gpll0_out_main, 12, 0, 0),
+ F_END
+};
+
+static struct rcg_clk blsp2_qup1_i2c_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_BLSP2_QUP1_I2C_APPS_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_blsp2_qup1_i2c_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "blsp2_qup1_i2c_apps_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP2(LOWER, 19200000, LOW, 50000000),
+ CLK_INIT(blsp2_qup1_i2c_apps_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_blsp2_qup1_spi_apps_clk_src[] = {
+ F( 960000, cxo_clk_src, 10, 1, 2),
+ F( 4800000, cxo_clk_src, 4, 0, 0),
+ F( 9600000, cxo_clk_src, 2, 0, 0),
+ F( 15000000, gpll0_out_main, 10, 1, 4),
+ F( 25000000, gpll0_out_main, 12, 1, 2),
+ F( 50000000, gpll0_out_main, 12, 0, 0),
+ F_END
+};
+
+static struct rcg_clk blsp2_qup1_spi_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_BLSP2_QUP1_SPI_APPS_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_blsp2_qup1_spi_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "blsp2_qup1_spi_apps_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 25000000,
+ NOMINAL, 50000000),
+ CLK_INIT(blsp2_qup1_spi_apps_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_blsp2_qup2_i2c_apps_clk_src[] = {
+ F( 19200000, cxo_clk_src, 1, 0, 0),
+ F( 50000000, gpll0_out_main, 12, 0, 0),
+ F_END
+};
+
+static struct rcg_clk blsp2_qup2_i2c_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_BLSP2_QUP2_I2C_APPS_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_blsp2_qup2_i2c_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "blsp2_qup2_i2c_apps_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP2(LOWER, 19200000, LOW, 50000000),
+ CLK_INIT(blsp2_qup2_i2c_apps_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_blsp2_qup2_spi_apps_clk_src[] = {
+ F( 960000, cxo_clk_src, 10, 1, 2),
+ F( 4800000, cxo_clk_src, 4, 0, 0),
+ F( 9600000, cxo_clk_src, 2, 0, 0),
+ F( 15000000, gpll0_out_main, 10, 1, 4),
+ F( 25000000, gpll0_out_main, 12, 1, 2),
+ F( 50000000, gpll0_out_main, 12, 0, 0),
+ F_END
+};
+
+static struct rcg_clk blsp2_qup2_spi_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_BLSP2_QUP2_SPI_APPS_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_blsp2_qup2_spi_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "blsp2_qup2_spi_apps_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 25000000,
+ NOMINAL, 50000000),
+ CLK_INIT(blsp2_qup2_spi_apps_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_blsp2_qup3_i2c_apps_clk_src[] = {
+ F( 19200000, cxo_clk_src, 1, 0, 0),
+ F( 50000000, gpll0_out_main, 12, 0, 0),
+ F_END
+};
+
+static struct rcg_clk blsp2_qup3_i2c_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_BLSP2_QUP3_I2C_APPS_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_blsp2_qup3_i2c_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "blsp2_qup3_i2c_apps_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP2(LOWER, 19200000, LOW, 50000000),
+ CLK_INIT(blsp2_qup3_i2c_apps_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_blsp2_qup3_spi_apps_clk_src[] = {
+ F( 960000, cxo_clk_src, 10, 1, 2),
+ F( 4800000, cxo_clk_src, 4, 0, 0),
+ F( 9600000, cxo_clk_src, 2, 0, 0),
+ F( 15000000, gpll0_out_main, 10, 1, 4),
+ F( 25000000, gpll0_out_main, 12, 1, 2),
+ F( 50000000, gpll0_out_main, 12, 0, 0),
+ F_END
+};
+
+static struct rcg_clk blsp2_qup3_spi_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_BLSP2_QUP3_SPI_APPS_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_blsp2_qup3_spi_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "blsp2_qup3_spi_apps_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 25000000,
+ NOMINAL, 50000000),
+ CLK_INIT(blsp2_qup3_spi_apps_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_blsp2_qup4_i2c_apps_clk_src[] = {
+ F( 19200000, cxo_clk_src, 1, 0, 0),
+ F( 50000000, gpll0_out_main, 12, 0, 0),
+ F_END
+};
+
+static struct rcg_clk blsp2_qup4_i2c_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_BLSP2_QUP4_I2C_APPS_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_blsp2_qup4_i2c_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "blsp2_qup4_i2c_apps_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP2(LOWER, 19200000, LOW, 50000000),
+ CLK_INIT(blsp2_qup4_i2c_apps_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_blsp2_qup4_spi_apps_clk_src[] = {
+ F( 960000, cxo_clk_src, 10, 1, 2),
+ F( 4800000, cxo_clk_src, 4, 0, 0),
+ F( 9600000, cxo_clk_src, 2, 0, 0),
+ F( 15000000, gpll0_out_main, 10, 1, 4),
+ F( 25000000, gpll0_out_main, 12, 1, 2),
+ F( 50000000, gpll0_out_main, 12, 0, 0),
+ F_END
+};
+
+static struct rcg_clk blsp2_qup4_spi_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_BLSP2_QUP4_SPI_APPS_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_blsp2_qup4_spi_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "blsp2_qup4_spi_apps_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 25000000,
+ NOMINAL, 50000000),
+ CLK_INIT(blsp2_qup4_spi_apps_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_blsp2_qup5_i2c_apps_clk_src[] = {
+ F( 19200000, cxo_clk_src, 1, 0, 0),
+ F( 50000000, gpll0_out_main, 12, 0, 0),
+ F_END
+};
+
+static struct rcg_clk blsp2_qup5_i2c_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_BLSP2_QUP5_I2C_APPS_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_blsp2_qup5_i2c_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "blsp2_qup5_i2c_apps_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP2(LOWER, 19200000, LOW, 50000000),
+ CLK_INIT(blsp2_qup5_i2c_apps_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_blsp2_qup5_spi_apps_clk_src[] = {
+ F( 960000, cxo_clk_src, 10, 1, 2),
+ F( 4800000, cxo_clk_src, 4, 0, 0),
+ F( 9600000, cxo_clk_src, 2, 0, 0),
+ F( 15000000, gpll0_out_main, 10, 1, 4),
+ F( 25000000, gpll0_out_main, 12, 1, 2),
+ F( 50000000, gpll0_out_main, 12, 0, 0),
+ F_END
+};
+
+static struct rcg_clk blsp2_qup5_spi_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_BLSP2_QUP5_SPI_APPS_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_blsp2_qup5_spi_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "blsp2_qup5_spi_apps_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 25000000,
+ NOMINAL, 50000000),
+ CLK_INIT(blsp2_qup5_spi_apps_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_blsp2_qup6_i2c_apps_clk_src[] = {
+ F( 19200000, cxo_clk_src, 1, 0, 0),
+ F( 50000000, gpll0_out_main, 12, 0, 0),
+ F_END
+};
+
+static struct rcg_clk blsp2_qup6_i2c_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_BLSP2_QUP6_I2C_APPS_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_blsp2_qup6_i2c_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "blsp2_qup6_i2c_apps_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP2(LOWER, 19200000, LOW, 50000000),
+ CLK_INIT(blsp2_qup6_i2c_apps_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_blsp2_qup6_spi_apps_clk_src[] = {
+ F( 960000, cxo_clk_src, 10, 1, 2),
+ F( 4800000, cxo_clk_src, 4, 0, 0),
+ F( 9600000, cxo_clk_src, 2, 0, 0),
+ F( 15000000, gpll0_out_main, 10, 1, 4),
+ F( 25000000, gpll0_out_main, 12, 1, 2),
+ F( 50000000, gpll0_out_main, 12, 0, 0),
+ F_END
+};
+
+static struct rcg_clk blsp2_qup6_spi_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_BLSP2_QUP6_SPI_APPS_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_blsp2_qup6_spi_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "blsp2_qup6_spi_apps_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 25000000,
+ NOMINAL, 50000000),
+ CLK_INIT(blsp2_qup6_spi_apps_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_blsp2_uart1_apps_clk_src[] = {
+ F( 3686400, gpll0_out_main, 1, 96, 15625),
+ F( 7372800, gpll0_out_main, 1, 192, 15625),
+ F( 14745600, gpll0_out_main, 1, 384, 15625),
+ F( 16000000, gpll0_out_main, 5, 2, 15),
+ F( 24000000, gpll0_out_main, 5, 1, 5),
+ F( 32000000, gpll0_out_main, 1, 4, 75),
+ F( 40000000, gpll0_out_main, 15, 0, 0),
+ F( 46400000, gpll0_out_main, 1, 29, 375),
+ F( 48000000, gpll0_out_main, 12.5, 0, 0),
+ F( 51200000, gpll0_out_main, 1, 32, 375),
+ F( 56000000, gpll0_out_main, 1, 7, 75),
+ F( 58982400, gpll0_out_main, 1, 1536, 15625),
+ F( 60000000, gpll0_out_main, 10, 0, 0),
+ F( 63157895, gpll0_out_main, 9.5, 0, 0),
+ F_END
+};
+
+static struct rcg_clk blsp2_uart1_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_BLSP2_UART1_APPS_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_blsp2_uart1_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "blsp2_uart1_apps_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 31580000,
+ NOMINAL, 63160000),
+ CLK_INIT(blsp2_uart1_apps_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_blsp2_uart2_apps_clk_src[] = {
+ F( 3686400, gpll0_out_main, 1, 96, 15625),
+ F( 7372800, gpll0_out_main, 1, 192, 15625),
+ F( 14745600, gpll0_out_main, 1, 384, 15625),
+ F( 16000000, gpll0_out_main, 5, 2, 15),
+ F( 24000000, gpll0_out_main, 5, 1, 5),
+ F( 32000000, gpll0_out_main, 1, 4, 75),
+ F( 40000000, gpll0_out_main, 15, 0, 0),
+ F( 46400000, gpll0_out_main, 1, 29, 375),
+ F( 48000000, gpll0_out_main, 12.5, 0, 0),
+ F( 51200000, gpll0_out_main, 1, 32, 375),
+ F( 56000000, gpll0_out_main, 1, 7, 75),
+ F( 58982400, gpll0_out_main, 1, 1536, 15625),
+ F( 60000000, gpll0_out_main, 10, 0, 0),
+ F( 63157895, gpll0_out_main, 9.5, 0, 0),
+ F_END
+};
+
+static struct rcg_clk blsp2_uart2_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_BLSP2_UART2_APPS_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_blsp2_uart2_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "blsp2_uart2_apps_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 31580000,
+ NOMINAL, 63160000),
+ CLK_INIT(blsp2_uart2_apps_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_blsp2_uart3_apps_clk_src[] = {
+ F( 3686400, gpll0_out_main, 1, 96, 15625),
+ F( 7372800, gpll0_out_main, 1, 192, 15625),
+ F( 14745600, gpll0_out_main, 1, 384, 15625),
+ F( 16000000, gpll0_out_main, 5, 2, 15),
+ F( 24000000, gpll0_out_main, 5, 1, 5),
+ F( 32000000, gpll0_out_main, 1, 4, 75),
+ F( 40000000, gpll0_out_main, 15, 0, 0),
+ F( 46400000, gpll0_out_main, 1, 29, 375),
+ F( 48000000, gpll0_out_main, 12.5, 0, 0),
+ F( 51200000, gpll0_out_main, 1, 32, 375),
+ F( 56000000, gpll0_out_main, 1, 7, 75),
+ F( 58982400, gpll0_out_main, 1, 1536, 15625),
+ F( 60000000, gpll0_out_main, 10, 0, 0),
+ F( 63157895, gpll0_out_main, 9.5, 0, 0),
+ F_END
+};
+
+static struct rcg_clk blsp2_uart3_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_BLSP2_UART3_APPS_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_blsp2_uart3_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "blsp2_uart3_apps_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 31580000,
+ NOMINAL, 63160000),
+ CLK_INIT(blsp2_uart3_apps_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_blsp2_uart4_apps_clk_src[] = {
+ F( 3686400, gpll0_out_main, 1, 96, 15625),
+ F( 7372800, gpll0_out_main, 1, 192, 15625),
+ F( 14745600, gpll0_out_main, 1, 384, 15625),
+ F( 16000000, gpll0_out_main, 5, 2, 15),
+ F( 24000000, gpll0_out_main, 5, 1, 5),
+ F( 32000000, gpll0_out_main, 1, 4, 75),
+ F( 40000000, gpll0_out_main, 15, 0, 0),
+ F( 46400000, gpll0_out_main, 1, 29, 375),
+ F( 48000000, gpll0_out_main, 12.5, 0, 0),
+ F( 51200000, gpll0_out_main, 1, 32, 375),
+ F( 56000000, gpll0_out_main, 1, 7, 75),
+ F( 58982400, gpll0_out_main, 1, 1536, 15625),
+ F( 60000000, gpll0_out_main, 10, 0, 0),
+ F( 63157895, gpll0_out_main, 9.5, 0, 0),
+ F_END
+};
+
+static struct rcg_clk blsp2_uart4_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_BLSP2_UART4_APPS_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_blsp2_uart4_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "blsp2_uart4_apps_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 31580000,
+ NOMINAL, 63160000),
+ CLK_INIT(blsp2_uart4_apps_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_blsp2_uart5_apps_clk_src[] = {
+ F( 3686400, gpll0_out_main, 1, 96, 15625),
+ F( 7372800, gpll0_out_main, 1, 192, 15625),
+ F( 14745600, gpll0_out_main, 1, 384, 15625),
+ F( 16000000, gpll0_out_main, 5, 2, 15),
+ F( 24000000, gpll0_out_main, 5, 1, 5),
+ F( 32000000, gpll0_out_main, 1, 4, 75),
+ F( 40000000, gpll0_out_main, 15, 0, 0),
+ F( 46400000, gpll0_out_main, 1, 29, 375),
+ F( 48000000, gpll0_out_main, 12.5, 0, 0),
+ F( 51200000, gpll0_out_main, 1, 32, 375),
+ F( 56000000, gpll0_out_main, 1, 7, 75),
+ F( 58982400, gpll0_out_main, 1, 1536, 15625),
+ F( 60000000, gpll0_out_main, 10, 0, 0),
+ F( 63157895, gpll0_out_main, 9.5, 0, 0),
+ F_END
+};
+
+static struct rcg_clk blsp2_uart5_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_BLSP2_UART5_APPS_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_blsp2_uart5_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "blsp2_uart5_apps_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 31580000,
+ NOMINAL, 63160000),
+ CLK_INIT(blsp2_uart5_apps_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_blsp2_uart6_apps_clk_src[] = {
+ F( 3686400, gpll0_out_main, 1, 96, 15625),
+ F( 7372800, gpll0_out_main, 1, 192, 15625),
+ F( 14745600, gpll0_out_main, 1, 384, 15625),
+ F( 16000000, gpll0_out_main, 5, 2, 15),
+ F( 24000000, gpll0_out_main, 5, 1, 5),
+ F( 32000000, gpll0_out_main, 1, 4, 75),
+ F( 40000000, gpll0_out_main, 15, 0, 0),
+ F( 46400000, gpll0_out_main, 1, 29, 375),
+ F( 48000000, gpll0_out_main, 12.5, 0, 0),
+ F( 51200000, gpll0_out_main, 1, 32, 375),
+ F( 56000000, gpll0_out_main, 1, 7, 75),
+ F( 58982400, gpll0_out_main, 1, 1536, 15625),
+ F( 60000000, gpll0_out_main, 10, 0, 0),
+ F( 63157895, gpll0_out_main, 9.5, 0, 0),
+ F_END
+};
+
+static struct rcg_clk blsp2_uart6_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_BLSP2_UART6_APPS_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_blsp2_uart6_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "blsp2_uart6_apps_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 31580000,
+ NOMINAL, 63160000),
+ CLK_INIT(blsp2_uart6_apps_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_gp1_clk_src[] = {
+ F( 19200000, cxo_clk_src, 1, 0, 0),
+ F( 100000000, gpll0_out_main, 6, 0, 0),
+ F( 200000000, gpll0_out_main, 3, 0, 0),
+ F_END
+};
+
+static struct rcg_clk gp1_clk_src = {
+ .cmd_rcgr_reg = GCC_GP1_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_gp1_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gp1_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 50000000, LOW, 100000000,
+ NOMINAL, 200000000),
+ CLK_INIT(gp1_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_gp2_clk_src[] = {
+ F( 19200000, cxo_clk_src, 1, 0, 0),
+ F( 100000000, gpll0_out_main, 6, 0, 0),
+ F( 200000000, gpll0_out_main, 3, 0, 0),
+ F_END
+};
+
+static struct rcg_clk gp2_clk_src = {
+ .cmd_rcgr_reg = GCC_GP2_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_gp2_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gp2_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 50000000, LOW, 100000000,
+ NOMINAL, 200000000),
+ CLK_INIT(gp2_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_gp3_clk_src[] = {
+ F( 19200000, cxo_clk_src, 1, 0, 0),
+ F( 100000000, gpll0_out_main, 6, 0, 0),
+ F( 200000000, gpll0_out_main, 3, 0, 0),
+ F_END
+};
+
+static struct rcg_clk gp3_clk_src = {
+ .cmd_rcgr_reg = GCC_GP3_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_gp3_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gp3_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 50000000, LOW, 100000000,
+ NOMINAL, 200000000),
+ CLK_INIT(gp3_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_hmss_rbcpr_clk_src[] = {
+ F( 19200000, cxo_clk_src_ao, 1, 0, 0),
+ F_END
+};
+
+static struct rcg_clk hmss_rbcpr_clk_src = {
+ .cmd_rcgr_reg = GCC_HMSS_RBCPR_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_hmss_rbcpr_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "hmss_rbcpr_clk_src",
+ .ops = &clk_ops_rcg,
+ CLK_INIT(hmss_rbcpr_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_pdm2_clk_src[] = {
+ F( 60000000, gpll0_out_main, 10, 0, 0),
+ F_END
+};
+
+static struct rcg_clk pdm2_clk_src = {
+ .cmd_rcgr_reg = GCC_PDM2_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_pdm2_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "pdm2_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP2(LOWER, 19200000, LOW, 60000000),
+ CLK_INIT(pdm2_clk_src.c),
+ },
+};
+
+/* Frequency table might change later */
+static struct clk_freq_tbl ftbl_qspi_ser_clk_src[] = {
+ F( 192000000, gpll4_out_main, 2, 0, 0),
+ F_END
+};
+
+static struct rcg_clk qspi_ser_clk_src = {
+ .cmd_rcgr_reg = GCC_QSPI_SER_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_qspi_ser_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "qspi_ser_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP3(LOWER, 80200000, LOW, 160400000,
+ NOMINAL, 320000000),
+ CLK_INIT(qspi_ser_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_sdcc1_apps_clk_src[] = {
+ F( 144000, cxo_clk_src, 16, 3, 25),
+ F( 400000, cxo_clk_src, 12, 1, 4),
+ F( 20000000, gpll0_out_main, 15, 1, 2),
+ F( 25000000, gpll0_out_main, 12, 1, 2),
+ F( 50000000, gpll0_out_main, 12, 0, 0),
+ F( 96000000, gpll4_out_main, 4, 0, 0),
+ F( 192000000, gpll4_out_main, 2, 0, 0),
+ F( 384000000, gpll4_out_main, 1, 0, 0),
+ F_END
+};
+
+static struct rcg_clk sdcc1_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_SDCC1_APPS_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_sdcc1_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "sdcc1_apps_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 200000000,
+ NOMINAL, 400000000),
+ CLK_INIT(sdcc1_apps_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_sdcc1_ice_core_clk_src[] = {
+ F( 19200000, cxo_clk_src, 1, 0, 0),
+ F( 150000000, gpll0_out_main, 4, 0, 0),
+ F( 300000000, gpll0_out_main, 2, 0, 0),
+ F_END
+};
+
+static struct rcg_clk sdcc1_ice_core_clk_src = {
+ .cmd_rcgr_reg = GCC_SDCC1_ICE_CORE_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_sdcc1_ice_core_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "sdcc1_ice_core_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 150000000,
+ NOMINAL, 300000000),
+ CLK_INIT(sdcc1_ice_core_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_sdcc2_apps_clk_src[] = {
+ F( 144000, cxo_clk_src, 16, 3, 25),
+ F( 400000, cxo_clk_src, 12, 1, 4),
+ F( 20000000, gpll0_out_main, 15, 1, 2),
+ F( 25000000, gpll0_out_main, 12, 1, 2),
+ F( 50000000, gpll0_out_main, 12, 0, 0),
+ F( 100000000, gpll0_out_main, 6, 0, 0),
+ F( 200000000, gpll0_out_main, 3, 0, 0),
+ F_END
+};
+
+static struct rcg_clk sdcc2_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_SDCC2_APPS_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_sdcc2_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "sdcc2_apps_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 100000000,
+ NOMINAL, 200000000),
+ CLK_INIT(sdcc2_apps_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_sdcc3_apps_clk_src[] = {
+ F( 144000, cxo_clk_src, 16, 3, 25),
+ F( 400000, cxo_clk_src, 12, 1, 4),
+ F( 20000000, gpll0_out_main, 15, 1, 2),
+ F( 25000000, gpll0_out_main, 12, 1, 2),
+ F( 50000000, gpll0_out_main, 12, 0, 0),
+ F( 100000000, gpll0_out_main, 6, 0, 0),
+ F( 200000000, gpll0_out_main, 3, 0, 0),
+ F_END
+};
+
+static struct rcg_clk sdcc3_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_SDCC3_APPS_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_sdcc3_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "sdcc3_apps_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 100000000,
+ NOMINAL, 200000000),
+ CLK_INIT(sdcc3_apps_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_sdcc4_apps_clk_src[] = {
+ F( 144000, cxo_clk_src, 16, 3, 25),
+ F( 400000, cxo_clk_src, 12, 1, 4),
+ F( 20000000, gpll0_out_main, 15, 1, 2),
+ F( 25000000, gpll0_out_main, 12, 1, 2),
+ F( 50000000, gpll0_out_main, 12, 0, 0),
+ F( 100000000, gpll0_out_main, 6, 0, 0),
+ F_END
+};
+
+static struct rcg_clk sdcc4_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_SDCC4_APPS_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_sdcc4_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "sdcc4_apps_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 50000000,
+ NOMINAL, 100000000),
+ CLK_INIT(sdcc4_apps_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_tsif_ref_clk_src[] = {
+ F( 105495, cxo_clk_src, 1, 1, 182),
+ F_END
+};
+
+static struct rcg_clk tsif_ref_clk_src = {
+ .cmd_rcgr_reg = GCC_TSIF_REF_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_tsif_ref_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "tsif_ref_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP1(LOWER, 105500),
+ CLK_INIT(tsif_ref_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_usb20_mock_utmi_clk_src[] = {
+ F( 19200000, cxo_clk_src, 1, 0, 0),
+ F_END
+};
+
+static struct rcg_clk usb20_mock_utmi_clk_src = {
+ .cmd_rcgr_reg = GCC_USB20_MOCK_UTMI_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_usb20_mock_utmi_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "usb20_mock_utmi_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP2(LOWER, 19200000, LOW, 60000000),
+ CLK_INIT(usb20_mock_utmi_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_usb30_mock_utmi_clk_src[] = {
+ F( 19200000, cxo_clk_src, 1, 0, 0),
+ F_END
+};
+
+static struct rcg_clk usb30_mock_utmi_clk_src = {
+ .cmd_rcgr_reg = GCC_USB30_MOCK_UTMI_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_usb30_mock_utmi_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "usb30_mock_utmi_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP2(LOWER, 40000000, LOW, 60000000),
+ CLK_INIT(usb30_mock_utmi_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_usb3_phy_aux_clk_src[] = {
+ F( 1200000, cxo_clk_src, 16, 0, 0),
+ F_END
+};
+
+static struct rcg_clk usb3_phy_aux_clk_src = {
+ .cmd_rcgr_reg = GCC_USB3_PHY_AUX_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_usb3_phy_aux_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "usb3_phy_aux_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP1(LOWER, 1200000),
+ CLK_INIT(usb3_phy_aux_clk_src.c),
+ },
+};
+
+static struct reset_clk gcc_qusb2phy_prim_reset = {
+ .reset_reg = GCC_QUSB2PHY_PRIM_BCR,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_qusb2phy_prim_reset",
+ .ops = &clk_ops_rst,
+ CLK_INIT(gcc_qusb2phy_prim_reset.c),
+ },
+};
+
+static struct reset_clk gcc_qusb2phy_sec_reset = {
+ .reset_reg = GCC_QUSB2PHY_SEC_BCR,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_qusb2phy_sec_reset",
+ .ops = &clk_ops_rst,
+ CLK_INIT(gcc_qusb2phy_sec_reset.c),
+ },
+};
+
+static struct branch_clk gcc_periph_noc_usb20_ahb_clk = {
+ .cbcr_reg = GCC_PERIPH_NOC_USB20_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_periph_noc_usb20_ahb_clk",
+ .parent = &usb20_master_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_periph_noc_usb20_ahb_clk.c),
+ },
+};
+
+static struct branch_clk gcc_aggre0_cnoc_ahb_clk = {
+ .cbcr_reg = GCC_AGGRE0_CNOC_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_aggre0_cnoc_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_aggre0_cnoc_ahb_clk.c),
+ },
+};
+
+static struct branch_clk gcc_aggre0_snoc_axi_clk = {
+ .cbcr_reg = GCC_AGGRE0_SNOC_AXI_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_aggre0_snoc_axi_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_aggre0_snoc_axi_clk.c),
+ },
+};
+
+static struct branch_clk gcc_smmu_aggre0_ahb_clk = {
+ .cbcr_reg = GCC_SMMU_AGGRE0_AHB_CBCR,
+ .has_sibling = 1,
+ .check_enable_bit = true,
+ .base = &virt_base,
+ .no_halt_check_on_disable = true,
+ .c = {
+ .dbg_name = "gcc_smmu_aggre0_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_smmu_aggre0_ahb_clk.c),
+ },
+};
+
+static struct branch_clk gcc_smmu_aggre0_axi_clk = {
+ .cbcr_reg = GCC_SMMU_AGGRE0_AXI_CBCR,
+ .has_sibling = 1,
+ .check_enable_bit = true,
+ .base = &virt_base,
+ .no_halt_check_on_disable = true,
+ .c = {
+ .dbg_name = "gcc_smmu_aggre0_axi_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_smmu_aggre0_axi_clk.c),
+ },
+};
+
+static struct gate_clk gcc_aggre0_noc_qosgen_extref_clk = {
+ .en_reg = GCC_AGGRE0_NOC_QOSGEN_EXTREF_CTL,
+ .en_mask = BIT(0),
+ .delay_us = 500,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_aggre0_noc_qosgen_extref_clk",
+ .ops = &clk_ops_gate,
+ CLK_INIT(gcc_aggre0_noc_qosgen_extref_clk.c),
+ },
+};
+
+static struct reset_clk gcc_pcie_0_phy_reset = {
+ .reset_reg = GCC_PCIE_0_PHY_BCR,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_pcie_0_phy_reset",
+ .ops = &clk_ops_rst,
+ CLK_INIT(gcc_pcie_0_phy_reset.c),
+ },
+};
+
+static struct gate_clk gcc_pcie_0_pipe_clk = {
+ .en_reg = GCC_PCIE_0_PIPE_CBCR,
+ .en_mask = BIT(0),
+ .delay_us = 500,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_pcie_0_pipe_clk",
+ .ops = &clk_ops_gate,
+ CLK_INIT(gcc_pcie_0_pipe_clk.c),
+ },
+};
+
+static struct reset_clk gcc_pcie_1_phy_reset = {
+ .reset_reg = GCC_PCIE_1_PHY_BCR,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_pcie_1_phy_reset",
+ .ops = &clk_ops_rst,
+ CLK_INIT(gcc_pcie_1_phy_reset.c),
+ },
+};
+
+static struct gate_clk gcc_pcie_1_pipe_clk = {
+ .en_reg = GCC_PCIE_1_PIPE_CBCR,
+ .en_mask = BIT(0),
+ .delay_us = 500,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_pcie_1_pipe_clk",
+ .ops = &clk_ops_gate,
+ CLK_INIT(gcc_pcie_1_pipe_clk.c),
+ },
+};
+
+static struct local_vote_clk gcc_blsp1_ahb_clk = {
+ .cbcr_reg = GCC_BLSP1_AHB_CBCR,
+ .bcr_reg = GCC_BLSP1_BCR,
+ .vote_reg = GCC_APCS_CLOCK_BRANCH_ENA_VOTE,
+ .en_mask = BIT(17),
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp1_ahb_clk",
+ .ops = &clk_ops_vote,
+ CLK_INIT(gcc_blsp1_ahb_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp1_qup1_i2c_apps_clk = {
+ .cbcr_reg = GCC_BLSP1_QUP1_I2C_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp1_qup1_i2c_apps_clk",
+ .parent = &blsp1_qup1_i2c_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp1_qup1_i2c_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp1_qup1_spi_apps_clk = {
+ .cbcr_reg = GCC_BLSP1_QUP1_SPI_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp1_qup1_spi_apps_clk",
+ .parent = &blsp1_qup1_spi_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp1_qup1_spi_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp1_qup2_i2c_apps_clk = {
+ .cbcr_reg = GCC_BLSP1_QUP2_I2C_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp1_qup2_i2c_apps_clk",
+ .parent = &blsp1_qup2_i2c_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp1_qup2_i2c_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp1_qup2_spi_apps_clk = {
+ .cbcr_reg = GCC_BLSP1_QUP2_SPI_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp1_qup2_spi_apps_clk",
+ .parent = &blsp1_qup2_spi_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp1_qup2_spi_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp1_qup3_i2c_apps_clk = {
+ .cbcr_reg = GCC_BLSP1_QUP3_I2C_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp1_qup3_i2c_apps_clk",
+ .parent = &blsp1_qup3_i2c_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp1_qup3_i2c_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp1_qup3_spi_apps_clk = {
+ .cbcr_reg = GCC_BLSP1_QUP3_SPI_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp1_qup3_spi_apps_clk",
+ .parent = &blsp1_qup3_spi_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp1_qup3_spi_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp1_qup4_i2c_apps_clk = {
+ .cbcr_reg = GCC_BLSP1_QUP4_I2C_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp1_qup4_i2c_apps_clk",
+ .parent = &blsp1_qup4_i2c_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp1_qup4_i2c_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp1_qup4_spi_apps_clk = {
+ .cbcr_reg = GCC_BLSP1_QUP4_SPI_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp1_qup4_spi_apps_clk",
+ .parent = &blsp1_qup4_spi_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp1_qup4_spi_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp1_qup5_i2c_apps_clk = {
+ .cbcr_reg = GCC_BLSP1_QUP5_I2C_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp1_qup5_i2c_apps_clk",
+ .parent = &blsp1_qup5_i2c_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp1_qup5_i2c_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp1_qup5_spi_apps_clk = {
+ .cbcr_reg = GCC_BLSP1_QUP5_SPI_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp1_qup5_spi_apps_clk",
+ .parent = &blsp1_qup5_spi_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp1_qup5_spi_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp1_qup6_i2c_apps_clk = {
+ .cbcr_reg = GCC_BLSP1_QUP6_I2C_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp1_qup6_i2c_apps_clk",
+ .parent = &blsp1_qup6_i2c_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp1_qup6_i2c_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp1_qup6_spi_apps_clk = {
+ .cbcr_reg = GCC_BLSP1_QUP6_SPI_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp1_qup6_spi_apps_clk",
+ .parent = &blsp1_qup6_spi_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp1_qup6_spi_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp1_uart1_apps_clk = {
+ .cbcr_reg = GCC_BLSP1_UART1_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp1_uart1_apps_clk",
+ .parent = &blsp1_uart1_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp1_uart1_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp1_uart2_apps_clk = {
+ .cbcr_reg = GCC_BLSP1_UART2_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp1_uart2_apps_clk",
+ .parent = &blsp1_uart2_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp1_uart2_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp1_uart3_apps_clk = {
+ .cbcr_reg = GCC_BLSP1_UART3_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp1_uart3_apps_clk",
+ .parent = &blsp1_uart3_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp1_uart3_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp1_uart4_apps_clk = {
+ .cbcr_reg = GCC_BLSP1_UART4_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp1_uart4_apps_clk",
+ .parent = &blsp1_uart4_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp1_uart4_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp1_uart5_apps_clk = {
+ .cbcr_reg = GCC_BLSP1_UART5_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp1_uart5_apps_clk",
+ .parent = &blsp1_uart5_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp1_uart5_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp1_uart6_apps_clk = {
+ .cbcr_reg = GCC_BLSP1_UART6_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp1_uart6_apps_clk",
+ .parent = &blsp1_uart6_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp1_uart6_apps_clk.c),
+ },
+};
+
+static struct local_vote_clk gcc_blsp2_ahb_clk = {
+ .cbcr_reg = GCC_BLSP2_AHB_CBCR,
+ .bcr_reg = GCC_BLSP2_BCR,
+ .vote_reg = GCC_APCS_CLOCK_BRANCH_ENA_VOTE,
+ .en_mask = BIT(15),
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp2_ahb_clk",
+ .ops = &clk_ops_vote,
+ CLK_INIT(gcc_blsp2_ahb_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp2_qup1_i2c_apps_clk = {
+ .cbcr_reg = GCC_BLSP2_QUP1_I2C_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp2_qup1_i2c_apps_clk",
+ .parent = &blsp2_qup1_i2c_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp2_qup1_i2c_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp2_qup1_spi_apps_clk = {
+ .cbcr_reg = GCC_BLSP2_QUP1_SPI_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp2_qup1_spi_apps_clk",
+ .parent = &blsp2_qup1_spi_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp2_qup1_spi_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp2_qup2_i2c_apps_clk = {
+ .cbcr_reg = GCC_BLSP2_QUP2_I2C_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp2_qup2_i2c_apps_clk",
+ .parent = &blsp2_qup2_i2c_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp2_qup2_i2c_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp2_qup2_spi_apps_clk = {
+ .cbcr_reg = GCC_BLSP2_QUP2_SPI_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp2_qup2_spi_apps_clk",
+ .parent = &blsp2_qup2_spi_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp2_qup2_spi_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp2_qup3_i2c_apps_clk = {
+ .cbcr_reg = GCC_BLSP2_QUP3_I2C_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp2_qup3_i2c_apps_clk",
+ .parent = &blsp2_qup3_i2c_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp2_qup3_i2c_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp2_qup3_spi_apps_clk = {
+ .cbcr_reg = GCC_BLSP2_QUP3_SPI_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp2_qup3_spi_apps_clk",
+ .parent = &blsp2_qup3_spi_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp2_qup3_spi_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp2_qup4_i2c_apps_clk = {
+ .cbcr_reg = GCC_BLSP2_QUP4_I2C_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp2_qup4_i2c_apps_clk",
+ .parent = &blsp2_qup4_i2c_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp2_qup4_i2c_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp2_qup4_spi_apps_clk = {
+ .cbcr_reg = GCC_BLSP2_QUP4_SPI_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp2_qup4_spi_apps_clk",
+ .parent = &blsp2_qup4_spi_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp2_qup4_spi_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp2_qup5_i2c_apps_clk = {
+ .cbcr_reg = GCC_BLSP2_QUP5_I2C_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp2_qup5_i2c_apps_clk",
+ .parent = &blsp2_qup5_i2c_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp2_qup5_i2c_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp2_qup5_spi_apps_clk = {
+ .cbcr_reg = GCC_BLSP2_QUP5_SPI_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp2_qup5_spi_apps_clk",
+ .parent = &blsp2_qup5_spi_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp2_qup5_spi_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp2_qup6_i2c_apps_clk = {
+ .cbcr_reg = GCC_BLSP2_QUP6_I2C_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp2_qup6_i2c_apps_clk",
+ .parent = &blsp2_qup6_i2c_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp2_qup6_i2c_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp2_qup6_spi_apps_clk = {
+ .cbcr_reg = GCC_BLSP2_QUP6_SPI_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp2_qup6_spi_apps_clk",
+ .parent = &blsp2_qup6_spi_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp2_qup6_spi_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp2_uart1_apps_clk = {
+ .cbcr_reg = GCC_BLSP2_UART1_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp2_uart1_apps_clk",
+ .parent = &blsp2_uart1_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp2_uart1_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp2_uart2_apps_clk = {
+ .cbcr_reg = GCC_BLSP2_UART2_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp2_uart2_apps_clk",
+ .parent = &blsp2_uart2_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp2_uart2_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp2_uart3_apps_clk = {
+ .cbcr_reg = GCC_BLSP2_UART3_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp2_uart3_apps_clk",
+ .parent = &blsp2_uart3_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp2_uart3_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp2_uart4_apps_clk = {
+ .cbcr_reg = GCC_BLSP2_UART4_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp2_uart4_apps_clk",
+ .parent = &blsp2_uart4_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp2_uart4_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp2_uart5_apps_clk = {
+ .cbcr_reg = GCC_BLSP2_UART5_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp2_uart5_apps_clk",
+ .parent = &blsp2_uart5_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp2_uart5_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp2_uart6_apps_clk = {
+ .cbcr_reg = GCC_BLSP2_UART6_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp2_uart6_apps_clk",
+ .parent = &blsp2_uart6_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp2_uart6_apps_clk.c),
+ },
+};
+
+static struct local_vote_clk gcc_boot_rom_ahb_clk = {
+ .cbcr_reg = GCC_BOOT_ROM_AHB_CBCR,
+ .bcr_reg = GCC_BOOT_ROM_BCR,
+ .vote_reg = GCC_APCS_CLOCK_BRANCH_ENA_VOTE,
+ .en_mask = BIT(10),
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_boot_rom_ahb_clk",
+ .ops = &clk_ops_vote,
+ CLK_INIT(gcc_boot_rom_ahb_clk.c),
+ },
+};
+
+static struct branch_clk gcc_gp1_clk = {
+ .cbcr_reg = GCC_GP1_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_gp1_clk",
+ .parent = &gp1_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_gp1_clk.c),
+ },
+};
+
+static struct branch_clk gcc_gp2_clk = {
+ .cbcr_reg = GCC_GP2_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_gp2_clk",
+ .parent = &gp2_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_gp2_clk.c),
+ },
+};
+
+static struct branch_clk gcc_gp3_clk = {
+ .cbcr_reg = GCC_GP3_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_gp3_clk",
+ .parent = &gp3_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_gp3_clk.c),
+ },
+};
+
+static struct branch_clk gcc_mmss_noc_cfg_ahb_clk = {
+ .cbcr_reg = GCC_MMSS_NOC_CFG_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_mmss_noc_cfg_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_mmss_noc_cfg_ahb_clk.c),
+ },
+};
+
+static struct branch_clk gcc_pcie_0_aux_clk = {
+ .cbcr_reg = GCC_PCIE_0_AUX_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_pcie_0_aux_clk",
+ .parent = &pcie_aux_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_pcie_0_aux_clk.c),
+ },
+};
+
+static struct branch_clk gcc_pcie_0_cfg_ahb_clk = {
+ .cbcr_reg = GCC_PCIE_0_CFG_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_pcie_0_cfg_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_pcie_0_cfg_ahb_clk.c),
+ },
+};
+
+static struct branch_clk gcc_pcie_0_mstr_axi_clk = {
+ .cbcr_reg = GCC_PCIE_0_MSTR_AXI_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_pcie_0_mstr_axi_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_pcie_0_mstr_axi_clk.c),
+ },
+};
+
+static struct branch_clk gcc_pcie_0_slv_axi_clk = {
+ .cbcr_reg = GCC_PCIE_0_SLV_AXI_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_pcie_0_slv_axi_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_pcie_0_slv_axi_clk.c),
+ },
+};
+
+static struct branch_clk gcc_pcie_1_aux_clk = {
+ .cbcr_reg = GCC_PCIE_1_AUX_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_pcie_1_aux_clk",
+ .parent = &pcie_aux_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_pcie_1_aux_clk.c),
+ },
+};
+
+static struct branch_clk gcc_pcie_1_cfg_ahb_clk = {
+ .cbcr_reg = GCC_PCIE_1_CFG_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_pcie_1_cfg_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_pcie_1_cfg_ahb_clk.c),
+ },
+};
+
+static struct branch_clk gcc_pcie_1_mstr_axi_clk = {
+ .cbcr_reg = GCC_PCIE_1_MSTR_AXI_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_pcie_1_mstr_axi_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_pcie_1_mstr_axi_clk.c),
+ },
+};
+
+static struct branch_clk gcc_pcie_1_slv_axi_clk = {
+ .cbcr_reg = GCC_PCIE_1_SLV_AXI_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_pcie_1_slv_axi_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_pcie_1_slv_axi_clk.c),
+ },
+};
+
+static struct branch_clk gcc_pcie_2_aux_clk = {
+ .cbcr_reg = GCC_PCIE_2_AUX_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_pcie_2_aux_clk",
+ .parent = &pcie_aux_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_pcie_2_aux_clk.c),
+ },
+};
+
+static struct branch_clk gcc_pcie_2_cfg_ahb_clk = {
+ .cbcr_reg = GCC_PCIE_2_CFG_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_pcie_2_cfg_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_pcie_2_cfg_ahb_clk.c),
+ },
+};
+
+static struct branch_clk gcc_pcie_2_mstr_axi_clk = {
+ .cbcr_reg = GCC_PCIE_2_MSTR_AXI_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_pcie_2_mstr_axi_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_pcie_2_mstr_axi_clk.c),
+ },
+};
+
+static struct reset_clk gcc_pcie_2_phy_reset = {
+ .reset_reg = GCC_PCIE_2_PHY_BCR,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_pcie_2_phy_reset",
+ .ops = &clk_ops_rst,
+ CLK_INIT(gcc_pcie_2_phy_reset.c),
+ },
+};
+
+static struct gate_clk gcc_pcie_2_pipe_clk = {
+ .en_reg = GCC_PCIE_2_PIPE_CBCR,
+ .en_mask = BIT(0),
+ .delay_us = 500,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_pcie_2_pipe_clk",
+ .ops = &clk_ops_gate,
+ CLK_INIT(gcc_pcie_2_pipe_clk.c),
+ },
+};
+
+static struct branch_clk gcc_pcie_2_slv_axi_clk = {
+ .cbcr_reg = GCC_PCIE_2_SLV_AXI_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_pcie_2_slv_axi_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_pcie_2_slv_axi_clk.c),
+ },
+};
+
+static struct reset_clk gcc_pcie_phy_reset = {
+ .reset_reg = GCC_PCIE_PHY_BCR,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_pcie_phy_reset",
+ .ops = &clk_ops_rst,
+ CLK_INIT(gcc_pcie_phy_reset.c),
+ },
+};
+
+static struct reset_clk gcc_pcie_phy_com_reset = {
+ .reset_reg = GCC_PCIE_PHY_COM_BCR,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_pcie_phy_com_reset",
+ .ops = &clk_ops_rst,
+ CLK_INIT(gcc_pcie_phy_com_reset.c),
+ },
+};
+
+static struct reset_clk gcc_pcie_phy_nocsr_com_phy_reset = {
+ .reset_reg = GCC_PCIE_PHY_NOCSR_COM_PHY_BCR,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_pcie_phy_nocsr_com_phy_reset",
+ .ops = &clk_ops_rst,
+ CLK_INIT(gcc_pcie_phy_nocsr_com_phy_reset.c),
+ },
+};
+
+static struct branch_clk gcc_pcie_phy_aux_clk = {
+ .cbcr_reg = GCC_PCIE_PHY_AUX_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_pcie_phy_aux_clk",
+ .parent = &pcie_aux_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_pcie_phy_aux_clk.c),
+ },
+};
+
+static struct branch_clk gcc_pcie_phy_cfg_ahb_clk = {
+ .cbcr_reg = GCC_PCIE_PHY_CFG_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_pcie_phy_cfg_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_pcie_phy_cfg_ahb_clk.c),
+ },
+};
+
+static struct branch_clk gcc_pdm2_clk = {
+ .cbcr_reg = GCC_PDM2_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_pdm2_clk",
+ .parent = &pdm2_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_pdm2_clk.c),
+ },
+};
+
+static struct branch_clk gcc_pdm_ahb_clk = {
+ .cbcr_reg = GCC_PDM_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_pdm_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_pdm_ahb_clk.c),
+ },
+};
+
+static struct local_vote_clk gcc_prng_ahb_clk = {
+ .cbcr_reg = GCC_PRNG_AHB_CBCR,
+ .bcr_reg = GCC_PRNG_BCR,
+ .vote_reg = GCC_APCS_CLOCK_BRANCH_ENA_VOTE,
+ .en_mask = BIT(13),
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_prng_ahb_clk",
+ .ops = &clk_ops_vote,
+ CLK_INIT(gcc_prng_ahb_clk.c),
+ },
+};
+
+static struct branch_clk gcc_qspi_ahb_clk = {
+ .cbcr_reg = GCC_QSPI_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_qspi_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_qspi_ahb_clk.c),
+ },
+};
+
+static struct branch_clk gcc_qspi_ser_clk = {
+ .cbcr_reg = GCC_QSPI_SER_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_qspi_ser_clk",
+ .parent = &qspi_ser_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_qspi_ser_clk.c),
+ },
+};
+
+static struct branch_clk gcc_sdcc1_ahb_clk = {
+ .cbcr_reg = GCC_SDCC1_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_sdcc1_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_sdcc1_ahb_clk.c),
+ },
+};
+
+static struct branch_clk gcc_sdcc1_apps_clk = {
+ .cbcr_reg = GCC_SDCC1_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_sdcc1_apps_clk",
+ .parent = &sdcc1_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_sdcc1_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_sdcc1_ice_core_clk = {
+ .cbcr_reg = GCC_SDCC1_ICE_CORE_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_sdcc1_ice_core_clk",
+ .parent = &sdcc1_ice_core_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_sdcc1_ice_core_clk.c),
+ },
+};
+
+static struct branch_clk gcc_sdcc2_ahb_clk = {
+ .cbcr_reg = GCC_SDCC2_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_sdcc2_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_sdcc2_ahb_clk.c),
+ },
+};
+
+static struct branch_clk gcc_sdcc2_apps_clk = {
+ .cbcr_reg = GCC_SDCC2_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_sdcc2_apps_clk",
+ .parent = &sdcc2_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_sdcc2_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_sdcc3_ahb_clk = {
+ .cbcr_reg = GCC_SDCC3_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_sdcc3_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_sdcc3_ahb_clk.c),
+ },
+};
+
+static struct branch_clk gcc_sdcc3_apps_clk = {
+ .cbcr_reg = GCC_SDCC3_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_sdcc3_apps_clk",
+ .parent = &sdcc3_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_sdcc3_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_sdcc4_ahb_clk = {
+ .cbcr_reg = GCC_SDCC4_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_sdcc4_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_sdcc4_ahb_clk.c),
+ },
+};
+
+static struct branch_clk gcc_sdcc4_apps_clk = {
+ .cbcr_reg = GCC_SDCC4_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_sdcc4_apps_clk",
+ .parent = &sdcc4_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_sdcc4_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_tsif_ahb_clk = {
+ .cbcr_reg = GCC_TSIF_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_tsif_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_tsif_ahb_clk.c),
+ },
+};
+
+static struct branch_clk gcc_tsif_ref_clk = {
+ .cbcr_reg = GCC_TSIF_REF_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_tsif_ref_clk",
+ .parent = &tsif_ref_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_tsif_ref_clk.c),
+ },
+};
+
+static struct branch_clk gcc_ufs_ahb_clk = {
+ .cbcr_reg = GCC_UFS_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_ufs_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_ufs_ahb_clk.c),
+ },
+};
+
+static struct branch_clk gcc_ufs_axi_clk = {
+ .bcr_reg = GCC_UFS_BCR,
+ .cbcr_reg = GCC_UFS_AXI_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_ufs_axi_clk",
+ .parent = &ufs_axi_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_ufs_axi_clk.c),
+ },
+};
+
+static struct branch_clk gcc_ufs_ice_core_clk = {
+ .cbcr_reg = GCC_UFS_ICE_CORE_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_ufs_ice_core_clk",
+ .parent = &ufs_ice_core_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_ufs_ice_core_clk.c),
+ },
+};
+
+static struct branch_clk gcc_ufs_rx_cfg_branch_clk = {
+ .cbcr_reg = GCC_UFS_RX_CFG_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_ufs_rx_cfg_branch_clk",
+ .parent = &ufs_axi_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_ufs_rx_cfg_branch_clk.c),
+ },
+};
+
+DEFINE_FIXED_SLAVE_DIV_CLK(gcc_ufs_rx_cfg_clk, 16,
+ &gcc_ufs_rx_cfg_branch_clk.c);
+
+static struct gate_clk gcc_ufs_rx_symbol_0_clk = {
+ .en_reg = GCC_UFS_RX_SYMBOL_0_CBCR,
+ .en_mask = BIT(0),
+ .delay_us = 500,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_ufs_rx_symbol_0_clk",
+ .ops = &clk_ops_gate,
+ CLK_INIT(gcc_ufs_rx_symbol_0_clk.c),
+ },
+};
+
+static struct branch_clk gcc_ufs_rx_symbol_1_clk = {
+ .cbcr_reg = GCC_UFS_RX_SYMBOL_1_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_ufs_rx_symbol_1_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_ufs_rx_symbol_1_clk.c),
+ },
+};
+
+static struct branch_clk gcc_ufs_tx_cfg_branch_clk = {
+ .cbcr_reg = GCC_UFS_TX_CFG_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_ufs_tx_cfg_branch_clk",
+ .parent = &ufs_axi_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_ufs_tx_cfg_branch_clk.c),
+ },
+};
+
+DEFINE_FIXED_SLAVE_DIV_CLK(gcc_ufs_tx_cfg_clk, 16,
+ &gcc_ufs_tx_cfg_branch_clk.c);
+
+static struct gate_clk gcc_ufs_tx_symbol_0_clk = {
+ .en_reg = GCC_UFS_TX_SYMBOL_0_CBCR,
+ .en_mask = BIT(0),
+ .delay_us = 500,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_ufs_tx_symbol_0_clk",
+ .ops = &clk_ops_gate,
+ CLK_INIT(gcc_ufs_tx_symbol_0_clk.c),
+ },
+};
+
+DEFINE_FIXED_DIV_CLK(gcc_ufs_ice_core_postdiv_clk_src, 2,
+ &ufs_ice_core_clk_src.c);
+
+static struct branch_clk gcc_ufs_unipro_core_clk = {
+ .cbcr_reg = GCC_UFS_UNIPRO_CORE_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_ufs_unipro_core_clk",
+ .parent = &gcc_ufs_ice_core_postdiv_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_ufs_unipro_core_clk.c),
+ },
+};
+
+static struct gate_clk gcc_ufs_sys_clk_core_clk = {
+ .en_reg = GCC_UFS_SYS_CLK_CORE_CBCR,
+ .en_mask = BIT(0),
+ .delay_us = 500,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_ufs_sys_clk_core_clk",
+ .ops = &clk_ops_gate,
+ CLK_INIT(gcc_ufs_sys_clk_core_clk.c),
+ },
+};
+
+static struct gate_clk gcc_ufs_tx_symbol_clk_core_clk = {
+ .en_reg = GCC_UFS_TX_SYMBOL_CLK_CORE_CBCR,
+ .en_mask = BIT(0),
+ .delay_us = 500,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_ufs_tx_symbol_clk_core_clk",
+ .ops = &clk_ops_gate,
+ CLK_INIT(gcc_ufs_tx_symbol_clk_core_clk.c),
+ },
+};
+
+static struct gate_clk gcc_usb3_phy_pipe_clk = {
+ .en_reg = GCC_USB3_PHY_PIPE_CBCR,
+ .en_mask = BIT(0),
+ .delay_us = 50,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_usb3_phy_pipe_clk",
+ .ops = &clk_ops_gate,
+ CLK_INIT(gcc_usb3_phy_pipe_clk.c),
+ },
+};
+
+static struct branch_clk gcc_usb20_master_clk = {
+ .cbcr_reg = GCC_USB20_MASTER_CBCR,
+ .bcr_reg = GCC_USB_20_BCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_usb20_master_clk",
+ .parent = &usb20_master_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_usb20_master_clk.c),
+ },
+};
+
+static struct branch_clk gcc_usb20_mock_utmi_clk = {
+ .cbcr_reg = GCC_USB20_MOCK_UTMI_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_usb20_mock_utmi_clk",
+ .parent = &usb20_mock_utmi_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_usb20_mock_utmi_clk.c),
+ },
+};
+
+static struct branch_clk gcc_usb20_sleep_clk = {
+ .cbcr_reg = GCC_USB20_SLEEP_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_usb20_sleep_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_usb20_sleep_clk.c),
+ },
+};
+
+static struct branch_clk gcc_usb30_master_clk = {
+ .cbcr_reg = GCC_USB30_MASTER_CBCR,
+ .bcr_reg = GCC_USB_30_BCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_usb30_master_clk",
+ .parent = &usb30_master_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_usb30_master_clk.c),
+ },
+};
+
+static struct branch_clk gcc_usb30_mock_utmi_clk = {
+ .cbcr_reg = GCC_USB30_MOCK_UTMI_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_usb30_mock_utmi_clk",
+ .parent = &usb30_mock_utmi_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_usb30_mock_utmi_clk.c),
+ },
+};
+
+static struct branch_clk gcc_usb30_sleep_clk = {
+ .cbcr_reg = GCC_USB30_SLEEP_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_usb30_sleep_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_usb30_sleep_clk.c),
+ },
+};
+
+static struct branch_clk gcc_usb3_phy_aux_clk = {
+ .cbcr_reg = GCC_USB3_PHY_AUX_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_usb3_phy_aux_clk",
+ .parent = &usb3_phy_aux_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_usb3_phy_aux_clk.c),
+ },
+};
+
+static struct branch_clk gcc_usb_phy_cfg_ahb2phy_clk = {
+ .cbcr_reg = GCC_USB_PHY_CFG_AHB2PHY_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_usb_phy_cfg_ahb2phy_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_usb_phy_cfg_ahb2phy_clk.c),
+ },
+};
+
+static struct branch_clk gcc_hmss_rbcpr_clk = {
+ .cbcr_reg = GCC_HMSS_RBCPR_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_hmss_rbcpr_clk",
+ .parent = &hmss_rbcpr_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_hmss_rbcpr_clk.c),
+ },
+};
+
+static struct branch_clk hlos1_vote_lpass_core_smmu_clk = {
+ .cbcr_reg = GCC_HLOS1_VOTE_LPASS_CORE_SMMU_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .no_halt_check_on_disable = true,
+ .c = {
+ .dbg_name = "hlos1_vote_lpass_core_smmu_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(hlos1_vote_lpass_core_smmu_clk.c),
+ },
+};
+
+static struct branch_clk hlos1_vote_lpass_adsp_smmu_clk = {
+ .cbcr_reg = GCC_HLOS1_VOTE_LPASS_ADSP_SMMU_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .no_halt_check_on_disable = true,
+ .c = {
+ .dbg_name = "hlos1_vote_lpass_adsp_smmu_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(hlos1_vote_lpass_adsp_smmu_clk.c),
+ },
+};
+
+static struct reset_clk gcc_usb3_phy_reset = {
+ .reset_reg = GCC_USB3_PHY_BCR,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_usb3_phy_reset",
+ .ops = &clk_ops_rst,
+ CLK_INIT(gcc_usb3_phy_reset.c),
+ },
+};
+
+static struct reset_clk gcc_usb3phy_phy_reset = {
+ .reset_reg = GCC_USB3PHY_PHY_BCR,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_usb3phy_phy_reset",
+ .ops = &clk_ops_rst,
+ CLK_INIT(gcc_usb3phy_phy_reset.c),
+ },
+};
+
+static struct branch_clk gcc_usb3_clkref_clk = {
+ .cbcr_reg = GCC_USB3_CLKREF_EN,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_usb3_clkref_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_usb3_clkref_clk.c),
+ },
+};
+
+static struct branch_clk gcc_hdmi_clkref_clk = {
+ .cbcr_reg = GCC_HDMI_CLKREF_EN,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_hdmi_clkref_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_hdmi_clkref_clk.c),
+ },
+};
+
+static struct branch_clk gcc_edp_clkref_clk = {
+ .cbcr_reg = GCC_EDP_CLKREF_EN,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_edp_clkref_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_edp_clkref_clk.c),
+ },
+};
+
+static struct branch_clk gcc_ufs_clkref_clk = {
+ .cbcr_reg = GCC_UFS_CLKREF_EN,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_ufs_clkref_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_ufs_clkref_clk.c),
+ },
+};
+
+static struct branch_clk gcc_pcie_clkref_clk = {
+ .cbcr_reg = GCC_PCIE_CLKREF_EN,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_pcie_clkref_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_pcie_clkref_clk.c),
+ },
+};
+
+static struct local_vote_clk gcc_rx2_usb2_clkref_clk = {
+ .cbcr_reg = GCC_RX2_USB2_CLKREF_EN,
+ .vote_reg = GCC_RX2_USB2_CLKREF_EN,
+ .en_mask = BIT(0),
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_rx2_usb2_clkref_clk",
+ .ops = &clk_ops_vote,
+ CLK_INIT(gcc_rx2_usb2_clkref_clk.c),
+ },
+};
+
+static struct local_vote_clk gcc_rx1_usb2_clkref_clk = {
+ .cbcr_reg = GCC_RX1_USB2_CLKREF_EN,
+ .vote_reg = GCC_RX1_USB2_CLKREF_EN,
+ .en_mask = BIT(0),
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_rx1_usb2_clkref_clk",
+ .ops = &clk_ops_vote,
+ CLK_INIT(gcc_rx1_usb2_clkref_clk.c),
+ },
+};
+
+static struct branch_clk gcc_sys_noc_usb3_axi_clk = {
+ .cbcr_reg = GCC_SYS_NOC_USB3_AXI_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_sys_noc_usb3_axi_clk",
+ .parent = &usb30_master_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_sys_noc_usb3_axi_clk.c),
+ },
+};
+
+static struct branch_clk gcc_sys_noc_ufs_axi_clk = {
+ .cbcr_reg = GCC_SYS_NOC_UFS_AXI_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_sys_noc_ufs_axi_clk",
+ .parent = &ufs_axi_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_sys_noc_ufs_axi_clk.c),
+ },
+};
+
+static struct branch_clk gcc_aggre2_usb3_axi_clk = {
+ .cbcr_reg = GCC_AGGRE2_USB3_AXI_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_aggre2_usb3_axi_clk",
+ .parent = &usb30_master_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_aggre2_usb3_axi_clk.c),
+ },
+};
+
+static struct branch_clk gcc_aggre2_ufs_axi_clk = {
+ .cbcr_reg = GCC_AGGRE2_UFS_AXI_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_aggre2_ufs_axi_clk",
+ .parent = &ufs_axi_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_aggre2_ufs_axi_clk.c),
+ },
+};
+
+static struct branch_clk gcc_mmss_bimc_gfx_clk = {
+ .cbcr_reg = GCC_MMSS_BIMC_GFX_CBCR,
+ .has_sibling = 1,
+ .check_enable_bit = true,
+ .no_halt_check_on_disable = true,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_mmss_bimc_gfx_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_mmss_bimc_gfx_clk.c),
+ },
+};
+
+static struct branch_clk gcc_bimc_gfx_clk = {
+ .cbcr_reg = GCC_BIMC_GFX_CBCR,
+ .has_sibling = 1,
+ .check_enable_bit = true,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_bimc_gfx_clk",
+ .always_on = true,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_bimc_gfx_clk.c),
+ },
+};
+
+static struct branch_clk gcc_mss_cfg_ahb_clk = {
+ .cbcr_reg = GCC_MSS_CFG_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_mss_cfg_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_mss_cfg_ahb_clk.c),
+ },
+};
+
+static struct branch_clk gcc_mss_q6_bimc_axi_clk = {
+ .cbcr_reg = GCC_MSS_Q6_BIMC_AXI_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_mss_q6_bimc_axi_clk",
+ .always_on = true,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_mss_q6_bimc_axi_clk.c),
+ },
+};
+
+static struct gate_clk gpll0_out_msscc = {
+ .en_reg = GCC_APCS_CLOCK_BRANCH_ENA_VOTE_1,
+ .en_mask = BIT(2),
+ .delay_us = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gpll0_out_msscc",
+ .ops = &clk_ops_gate,
+ CLK_INIT(gpll0_out_msscc.c),
+ },
+};
+
+static struct branch_clk gcc_mss_snoc_axi_clk = {
+ .cbcr_reg = GCC_MSS_SNOC_AXI_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_mss_snoc_axi_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_mss_snoc_axi_clk.c),
+ },
+};
+
+static struct branch_clk gcc_mss_mnoc_bimc_axi_clk = {
+ .cbcr_reg = GCC_MSS_MNOC_BIMC_AXI_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_mss_mnoc_bimc_axi_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_mss_mnoc_bimc_axi_clk.c),
+ },
+};
+
+static struct branch_clk gcc_dcc_ahb_clk = {
+ .cbcr_reg = GCC_DCC_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_dcc_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_dcc_ahb_clk.c),
+ },
+};
+
+static struct branch_clk gcc_aggre0_noc_mpu_cfg_ahb_clk = {
+ .cbcr_reg = GCC_AGGRE0_NOC_MPU_CFG_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_aggre0_noc_mpu_cfg_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_aggre0_noc_mpu_cfg_ahb_clk.c),
+ },
+};
+
+static struct mux_clk gcc_debug_mux;
+static struct mux_clk gcc_debug_mux_v2;
+static struct clk_ops clk_ops_debug_mux;
+
+static struct measure_clk_data debug_mux_priv = {
+ .cxo = &cxo_clk_src.c,
+ .plltest_reg = PLLTEST_PAD_CFG,
+ .plltest_val = 0x51A00,
+ .xo_div4_cbcr = GCC_XO_DIV4_CBCR,
+ .ctl_reg = CLOCK_FRQ_MEASURE_CTL,
+ .status_reg = CLOCK_FRQ_MEASURE_STATUS,
+ .base = &virt_base,
+};
+
+static struct mux_clk gcc_debug_mux = {
+ .priv = &debug_mux_priv,
+ .ops = &mux_reg_ops,
+ .en_mask = BIT(16),
+ .mask = 0x3FF,
+ .base = &virt_dbgbase,
+ MUX_REC_SRC_LIST(
+ &mmss_gcc_dbg_clk.c,
+ &gpu_gcc_dbg_clk.c,
+ &gcc_debug_mux_v2.c,
+ &cpu_dbg_clk.c,
+ ),
+ MUX_SRC_LIST(
+ { &mmss_gcc_dbg_clk.c, 0x001b },
+ { &gpu_gcc_dbg_clk.c, 0x001b },
+ { &gcc_debug_mux_v2.c, 0xffff },
+ { &cpu_dbg_clk.c, 0x00bb },
+ { &cnoc_clk.c, 0x000e },
+ { &pnoc_clk.c, 0x0011 },
+ { &snoc_clk.c, 0x0000 },
+ { &bimc_clk.c, 0x00ad },
+ { &ce1_clk.c, 0x0099 },
+ { &gcc_ce1_axi_m_clk.c, 0x009a },
+ { &gcc_ce1_ahb_m_clk.c, 0x009b },
+ { &measure_only_bimc_hmss_axi_clk.c, 0x00a5 },
+ { &gcc_periph_noc_usb20_ahb_clk.c, 0x0014 },
+ { &gcc_mmss_noc_cfg_ahb_clk.c, 0x0019 },
+ { &gcc_mmss_bimc_gfx_clk.c, 0x001c},
+ { &gcc_bimc_gfx_clk.c, 0x00af},
+ { &gcc_sys_noc_usb3_axi_clk.c, 0x0006 },
+ { &gcc_sys_noc_ufs_axi_clk.c, 0x0007 },
+ { &gcc_usb30_master_clk.c, 0x002d },
+ { &gcc_usb30_sleep_clk.c, 0x002e },
+ { &gcc_usb30_mock_utmi_clk.c, 0x002f },
+ { &gcc_usb3_phy_aux_clk.c, 0x0030 },
+ { &gcc_usb3_phy_pipe_clk.c, 0x0031 },
+ { &gcc_usb20_master_clk.c, 0x0035 },
+ { &gcc_usb20_sleep_clk.c, 0x0036 },
+ { &gcc_usb20_mock_utmi_clk.c, 0x0037 },
+ { &gcc_usb_phy_cfg_ahb2phy_clk.c, 0x0038 },
+ { &gcc_sdcc1_apps_clk.c, 0x0039 },
+ { &gcc_sdcc1_ahb_clk.c, 0x003a },
+ { &gcc_sdcc2_apps_clk.c, 0x003b },
+ { &gcc_sdcc2_ahb_clk.c, 0x003c },
+ { &gcc_sdcc3_apps_clk.c, 0x003d },
+ { &gcc_sdcc3_ahb_clk.c, 0x003e },
+ { &gcc_sdcc4_apps_clk.c, 0x003f },
+ { &gcc_sdcc4_ahb_clk.c, 0x0040 },
+ { &gcc_blsp1_ahb_clk.c, 0x0041 },
+ { &gcc_blsp1_qup1_spi_apps_clk.c, 0x0043 },
+ { &gcc_blsp1_qup1_i2c_apps_clk.c, 0x0044 },
+ { &gcc_blsp1_uart1_apps_clk.c, 0x0045 },
+ { &gcc_blsp1_qup2_spi_apps_clk.c, 0x0047 },
+ { &gcc_blsp1_qup2_i2c_apps_clk.c, 0x0048 },
+ { &gcc_blsp1_uart2_apps_clk.c, 0x0049 },
+ { &gcc_blsp1_qup3_spi_apps_clk.c, 0x004b },
+ { &gcc_blsp1_qup3_i2c_apps_clk.c, 0x004c },
+ { &gcc_blsp1_uart3_apps_clk.c, 0x004d },
+ { &gcc_blsp1_qup4_spi_apps_clk.c, 0x004f },
+ { &gcc_blsp1_qup4_i2c_apps_clk.c, 0x0050 },
+ { &gcc_blsp1_uart4_apps_clk.c, 0x0051 },
+ { &gcc_blsp1_qup5_spi_apps_clk.c, 0x0053 },
+ { &gcc_blsp1_qup5_i2c_apps_clk.c, 0x0054 },
+ { &gcc_blsp1_uart5_apps_clk.c, 0x0055 },
+ { &gcc_blsp1_qup6_spi_apps_clk.c, 0x0057 },
+ { &gcc_blsp1_qup6_i2c_apps_clk.c, 0x0058 },
+ { &gcc_blsp1_uart6_apps_clk.c, 0x0059 },
+ { &gcc_blsp2_ahb_clk.c, 0x005b },
+ { &gcc_blsp2_qup1_spi_apps_clk.c, 0x005d },
+ { &gcc_blsp2_qup1_i2c_apps_clk.c, 0x005e },
+ { &gcc_blsp2_uart1_apps_clk.c, 0x005f },
+ { &gcc_blsp2_qup2_spi_apps_clk.c, 0x0061 },
+ { &gcc_blsp2_qup2_i2c_apps_clk.c, 0x0062 },
+ { &gcc_blsp2_uart2_apps_clk.c, 0x0063 },
+ { &gcc_blsp2_qup3_spi_apps_clk.c, 0x0065 },
+ { &gcc_blsp2_qup3_i2c_apps_clk.c, 0x0066 },
+ { &gcc_blsp2_uart3_apps_clk.c, 0x0067 },
+ { &gcc_blsp2_qup4_spi_apps_clk.c, 0x0069 },
+ { &gcc_blsp2_qup4_i2c_apps_clk.c, 0x006a },
+ { &gcc_blsp2_uart4_apps_clk.c, 0x006b },
+ { &gcc_blsp2_qup5_spi_apps_clk.c, 0x006d },
+ { &gcc_blsp2_qup5_i2c_apps_clk.c, 0x006e },
+ { &gcc_blsp2_uart5_apps_clk.c, 0x006f },
+ { &gcc_blsp2_qup6_spi_apps_clk.c, 0x0071 },
+ { &gcc_blsp2_qup6_i2c_apps_clk.c, 0x0072 },
+ { &gcc_blsp2_uart6_apps_clk.c, 0x0073 },
+ { &gcc_pdm_ahb_clk.c, 0x0076 },
+ { &gcc_pdm2_clk.c, 0x0078 },
+ { &gcc_prng_ahb_clk.c, 0x0079 },
+ { &gcc_tsif_ahb_clk.c, 0x007a },
+ { &gcc_tsif_ref_clk.c, 0x007b },
+ { &gcc_boot_rom_ahb_clk.c, 0x007e },
+ { &gcc_gp1_clk.c, 0x00e3 },
+ { &gcc_gp2_clk.c, 0x00e4 },
+ { &gcc_gp3_clk.c, 0x00e5 },
+ { &gcc_hmss_rbcpr_clk.c, 0x00ba },
+ { &gcc_pcie_0_slv_axi_clk.c, 0x00e6 },
+ { &gcc_pcie_0_mstr_axi_clk.c, 0x00e7 },
+ { &gcc_pcie_0_cfg_ahb_clk.c, 0x00e8 },
+ { &gcc_pcie_0_aux_clk.c, 0x00e9 },
+ { &gcc_pcie_0_pipe_clk.c, 0x00ea },
+ { &gcc_pcie_1_slv_axi_clk.c, 0x00ec },
+ { &gcc_pcie_1_mstr_axi_clk.c, 0x00ed },
+ { &gcc_pcie_1_cfg_ahb_clk.c, 0x00ee },
+ { &gcc_pcie_1_aux_clk.c, 0x00ef },
+ { &gcc_pcie_1_pipe_clk.c, 0x00f0 },
+ { &gcc_pcie_2_slv_axi_clk.c, 0x00f2 },
+ { &gcc_pcie_2_mstr_axi_clk.c, 0x00f3 },
+ { &gcc_pcie_2_cfg_ahb_clk.c, 0x00f4 },
+ { &gcc_pcie_2_aux_clk.c, 0x00f5 },
+ { &gcc_pcie_2_pipe_clk.c, 0x00f6 },
+ { &gcc_pcie_phy_cfg_ahb_clk.c, 0x00f8 },
+ { &gcc_pcie_phy_aux_clk.c, 0x00f9 },
+ { &gcc_ufs_axi_clk.c, 0x00fc },
+ { &gcc_ufs_ahb_clk.c, 0x00fd },
+ { &gcc_ufs_tx_cfg_clk.c, 0x00fe },
+ { &gcc_ufs_rx_cfg_clk.c, 0x00ff },
+ { &gcc_ufs_tx_symbol_0_clk.c, 0x0100 },
+ { &gcc_ufs_rx_symbol_0_clk.c, 0x0101 },
+ { &gcc_ufs_rx_symbol_1_clk.c, 0x0102 },
+ { &gcc_ufs_unipro_core_clk.c, 0x0106 },
+ { &gcc_ufs_ice_core_clk.c, 0x0107 },
+ { &gcc_ufs_sys_clk_core_clk.c, 0x108},
+ { &gcc_ufs_tx_symbol_clk_core_clk.c, 0x0109 },
+ { &gcc_aggre0_snoc_axi_clk.c, 0x0116 },
+ { &gcc_aggre0_cnoc_ahb_clk.c, 0x0117 },
+ { &gcc_smmu_aggre0_axi_clk.c, 0x0119 },
+ { &gcc_smmu_aggre0_ahb_clk.c, 0x011a },
+ { &gcc_aggre0_noc_qosgen_extref_clk.c, 0x011b },
+ { &gcc_aggre2_ufs_axi_clk.c, 0x0126 },
+ { &gcc_aggre2_usb3_axi_clk.c, 0x0127 },
+ { &gcc_dcc_ahb_clk.c, 0x012b },
+ { &gcc_aggre0_noc_mpu_cfg_ahb_clk.c, 0x012c},
+ { &ipa_clk.c, 0x12f },
+ ),
+ .c = {
+ .dbg_name = "gcc_debug_mux",
+ .ops = &clk_ops_debug_mux,
+ .flags = CLKFLAG_NO_RATE_CACHE | CLKFLAG_MEASURE,
+ CLK_INIT(gcc_debug_mux.c),
+ },
+};
+
+static struct mux_clk gcc_debug_mux_v2 = {
+ .ops = &mux_reg_ops,
+ .en_mask = BIT(16),
+ .mask = 0x3FF,
+ .base = &virt_dbgbase,
+ MUX_SRC_LIST(
+ { &gcc_mss_cfg_ahb_clk.c, 0x0133 },
+ { &gcc_mss_mnoc_bimc_axi_clk.c, 0x0134 },
+ { &gcc_mss_snoc_axi_clk.c, 0x0135 },
+ { &gcc_mss_q6_bimc_axi_clk.c, 0x0136 },
+ ),
+ .c = {
+ .dbg_name = "gcc_debug_mux_v2",
+ .ops = &clk_ops_gen_mux,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(gcc_debug_mux_v2.c),
+ },
+};
+
+static struct clk_lookup msm_clocks_rpm_8996[] = {
+ CLK_LIST(cxo_clk_src),
+ CLK_LIST(pnoc_a_clk),
+ CLK_LIST(pnoc_clk),
+ CLK_LIST(bimc_a_clk),
+ CLK_LIST(bimc_clk),
+ CLK_LIST(cnoc_a_clk),
+ CLK_LIST(cnoc_clk),
+ CLK_LIST(snoc_a_clk),
+ CLK_LIST(snoc_clk),
+ CLK_LIST(aggre1_noc_clk),
+ CLK_LIST(aggre1_noc_a_clk),
+ CLK_LIST(aggre2_noc_clk),
+ CLK_LIST(aggre2_noc_a_clk),
+ CLK_LIST(mmssnoc_axi_rpm_clk),
+ CLK_LIST(mmssnoc_axi_rpm_a_clk),
+ CLK_LIST(mmssnoc_axi_clk),
+ CLK_LIST(mmssnoc_axi_a_clk),
+ CLK_LIST(mmssnoc_gds_clk),
+ CLK_LIST(bb_clk1),
+ CLK_LIST(bb_clk1_ao),
+ CLK_LIST(bb_clk1_pin),
+ CLK_LIST(bb_clk1_pin_ao),
+ CLK_LIST(bb_clk2),
+ CLK_LIST(bb_clk2_ao),
+ CLK_LIST(bb_clk2_pin),
+ CLK_LIST(bb_clk2_pin_ao),
+ CLK_LIST(bimc_msmbus_clk),
+ CLK_LIST(bimc_msmbus_a_clk),
+ CLK_LIST(ce1_a_clk),
+ CLK_LIST(cnoc_msmbus_clk),
+ CLK_LIST(cnoc_msmbus_a_clk),
+ CLK_LIST(cxo_clk_src_ao),
+ CLK_LIST(cxo_dwc3_clk),
+ CLK_LIST(cxo_lpm_clk),
+ CLK_LIST(cxo_otg_clk),
+ CLK_LIST(cxo_pil_lpass_clk),
+ CLK_LIST(cxo_pil_ssc_clk),
+ CLK_LIST(div_clk1),
+ CLK_LIST(div_clk1_ao),
+ CLK_LIST(div_clk2),
+ CLK_LIST(div_clk2_ao),
+ CLK_LIST(div_clk3),
+ CLK_LIST(div_clk3_ao),
+ CLK_LIST(ipa_a_clk),
+ CLK_LIST(ipa_clk),
+ CLK_LIST(ln_bb_clk),
+ CLK_LIST(ln_bb_a_clk),
+ CLK_LIST(mcd_ce1_clk),
+ CLK_LIST(pnoc_keepalive_a_clk),
+ CLK_LIST(pnoc_msmbus_clk),
+ CLK_LIST(pnoc_msmbus_a_clk),
+ CLK_LIST(pnoc_pm_clk),
+ CLK_LIST(pnoc_sps_clk),
+ CLK_LIST(qcedev_ce1_clk),
+ CLK_LIST(qcrypto_ce1_clk),
+ CLK_LIST(qdss_a_clk),
+ CLK_LIST(qdss_clk),
+ CLK_LIST(qseecom_ce1_clk),
+ CLK_LIST(rf_clk1),
+ CLK_LIST(rf_clk1_ao),
+ CLK_LIST(rf_clk1_pin),
+ CLK_LIST(rf_clk1_pin_ao),
+ CLK_LIST(rf_clk2),
+ CLK_LIST(rf_clk2_ao),
+ CLK_LIST(rf_clk2_pin),
+ CLK_LIST(rf_clk2_pin_ao),
+ CLK_LIST(scm_ce1_clk),
+ CLK_LIST(snoc_msmbus_clk),
+ CLK_LIST(snoc_msmbus_a_clk),
+ CLK_LIST(ce1_clk),
+ CLK_LIST(gcc_ce1_ahb_m_clk),
+ CLK_LIST(gcc_ce1_axi_m_clk),
+ CLK_LIST(measure_only_bimc_hmss_axi_clk),
+};
+
+static struct clk_lookup msm_clocks_gcc_8996[] = {
+ CLK_LIST(gpll0),
+ CLK_LIST(gpll0_ao),
+ CLK_LIST(gpll0_out_main),
+ CLK_LIST(gcc_mmss_gpll0_div_clk),
+ CLK_LIST(gpll4),
+ CLK_LIST(gpll4_out_main),
+ CLK_LIST(ufs_axi_clk_src),
+ CLK_LIST(pcie_aux_clk_src),
+ CLK_LIST(usb30_master_clk_src),
+ CLK_LIST(usb20_master_clk_src),
+ CLK_LIST(ufs_ice_core_clk_src),
+ CLK_LIST(blsp1_qup1_i2c_apps_clk_src),
+ CLK_LIST(blsp1_qup1_spi_apps_clk_src),
+ CLK_LIST(blsp1_qup2_i2c_apps_clk_src),
+ CLK_LIST(blsp1_qup2_spi_apps_clk_src),
+ CLK_LIST(blsp1_qup3_i2c_apps_clk_src),
+ CLK_LIST(blsp1_qup3_spi_apps_clk_src),
+ CLK_LIST(blsp1_qup4_i2c_apps_clk_src),
+ CLK_LIST(blsp1_qup4_spi_apps_clk_src),
+ CLK_LIST(blsp1_qup5_i2c_apps_clk_src),
+ CLK_LIST(blsp1_qup5_spi_apps_clk_src),
+ CLK_LIST(blsp1_qup6_i2c_apps_clk_src),
+ CLK_LIST(blsp1_qup6_spi_apps_clk_src),
+ CLK_LIST(blsp1_uart1_apps_clk_src),
+ CLK_LIST(blsp1_uart2_apps_clk_src),
+ CLK_LIST(blsp1_uart3_apps_clk_src),
+ CLK_LIST(blsp1_uart4_apps_clk_src),
+ CLK_LIST(blsp1_uart5_apps_clk_src),
+ CLK_LIST(blsp1_uart6_apps_clk_src),
+ CLK_LIST(blsp2_qup1_i2c_apps_clk_src),
+ CLK_LIST(blsp2_qup1_spi_apps_clk_src),
+ CLK_LIST(blsp2_qup2_i2c_apps_clk_src),
+ CLK_LIST(blsp2_qup2_spi_apps_clk_src),
+ CLK_LIST(blsp2_qup3_i2c_apps_clk_src),
+ CLK_LIST(blsp2_qup3_spi_apps_clk_src),
+ CLK_LIST(blsp2_qup4_i2c_apps_clk_src),
+ CLK_LIST(blsp2_qup4_spi_apps_clk_src),
+ CLK_LIST(blsp2_qup5_i2c_apps_clk_src),
+ CLK_LIST(blsp2_qup5_spi_apps_clk_src),
+ CLK_LIST(blsp2_qup6_i2c_apps_clk_src),
+ CLK_LIST(blsp2_qup6_spi_apps_clk_src),
+ CLK_LIST(blsp2_uart1_apps_clk_src),
+ CLK_LIST(blsp2_uart2_apps_clk_src),
+ CLK_LIST(blsp2_uart3_apps_clk_src),
+ CLK_LIST(blsp2_uart4_apps_clk_src),
+ CLK_LIST(blsp2_uart5_apps_clk_src),
+ CLK_LIST(blsp2_uart6_apps_clk_src),
+ CLK_LIST(gp1_clk_src),
+ CLK_LIST(gp2_clk_src),
+ CLK_LIST(gp3_clk_src),
+ CLK_LIST(hmss_rbcpr_clk_src),
+ CLK_LIST(pdm2_clk_src),
+ CLK_LIST(sdcc1_apps_clk_src),
+ CLK_LIST(sdcc2_apps_clk_src),
+ CLK_LIST(sdcc3_apps_clk_src),
+ CLK_LIST(sdcc4_apps_clk_src),
+ CLK_LIST(tsif_ref_clk_src),
+ CLK_LIST(usb20_mock_utmi_clk_src),
+ CLK_LIST(usb30_mock_utmi_clk_src),
+ CLK_LIST(usb3_phy_aux_clk_src),
+ CLK_LIST(gcc_qusb2phy_prim_reset),
+ CLK_LIST(gcc_qusb2phy_sec_reset),
+ CLK_LIST(gcc_periph_noc_usb20_ahb_clk),
+ CLK_LIST(gcc_aggre0_cnoc_ahb_clk),
+ CLK_LIST(gcc_aggre0_snoc_axi_clk),
+ CLK_LIST(gcc_smmu_aggre0_ahb_clk),
+ CLK_LIST(gcc_smmu_aggre0_axi_clk),
+ CLK_LIST(gcc_aggre0_noc_qosgen_extref_clk),
+ CLK_LIST(gcc_aggre2_usb3_axi_clk),
+ CLK_LIST(gcc_aggre2_ufs_axi_clk),
+ CLK_LIST(gcc_blsp1_ahb_clk),
+ CLK_LIST(gcc_blsp1_qup1_i2c_apps_clk),
+ CLK_LIST(gcc_blsp1_qup1_spi_apps_clk),
+ CLK_LIST(gcc_blsp1_qup2_i2c_apps_clk),
+ CLK_LIST(gcc_blsp1_qup2_spi_apps_clk),
+ CLK_LIST(gcc_blsp1_qup3_i2c_apps_clk),
+ CLK_LIST(gcc_blsp1_qup3_spi_apps_clk),
+ CLK_LIST(gcc_blsp1_qup4_i2c_apps_clk),
+ CLK_LIST(gcc_blsp1_qup4_spi_apps_clk),
+ CLK_LIST(gcc_blsp1_qup5_i2c_apps_clk),
+ CLK_LIST(gcc_blsp1_qup5_spi_apps_clk),
+ CLK_LIST(gcc_blsp1_qup6_i2c_apps_clk),
+ CLK_LIST(gcc_blsp1_qup6_spi_apps_clk),
+ CLK_LIST(gcc_blsp1_uart1_apps_clk),
+ CLK_LIST(gcc_blsp1_uart2_apps_clk),
+ CLK_LIST(gcc_blsp1_uart3_apps_clk),
+ CLK_LIST(gcc_blsp1_uart4_apps_clk),
+ CLK_LIST(gcc_blsp1_uart5_apps_clk),
+ CLK_LIST(gcc_blsp1_uart6_apps_clk),
+ CLK_LIST(gcc_blsp2_ahb_clk),
+ CLK_LIST(gcc_blsp2_qup1_i2c_apps_clk),
+ CLK_LIST(gcc_blsp2_qup1_spi_apps_clk),
+ CLK_LIST(gcc_blsp2_qup2_i2c_apps_clk),
+ CLK_LIST(gcc_blsp2_qup2_spi_apps_clk),
+ CLK_LIST(gcc_blsp2_qup3_i2c_apps_clk),
+ CLK_LIST(gcc_blsp2_qup3_spi_apps_clk),
+ CLK_LIST(gcc_blsp2_qup4_i2c_apps_clk),
+ CLK_LIST(gcc_blsp2_qup4_spi_apps_clk),
+ CLK_LIST(gcc_blsp2_qup5_i2c_apps_clk),
+ CLK_LIST(gcc_blsp2_qup5_spi_apps_clk),
+ CLK_LIST(gcc_blsp2_qup6_i2c_apps_clk),
+ CLK_LIST(gcc_blsp2_qup6_spi_apps_clk),
+ CLK_LIST(gcc_blsp2_uart1_apps_clk),
+ CLK_LIST(gcc_blsp2_uart2_apps_clk),
+ CLK_LIST(gcc_blsp2_uart3_apps_clk),
+ CLK_LIST(gcc_blsp2_uart4_apps_clk),
+ CLK_LIST(gcc_blsp2_uart5_apps_clk),
+ CLK_LIST(gcc_blsp2_uart6_apps_clk),
+ CLK_LIST(gcc_boot_rom_ahb_clk),
+ CLK_LIST(gcc_gp1_clk),
+ CLK_LIST(gcc_gp2_clk),
+ CLK_LIST(gcc_gp3_clk),
+ CLK_LIST(gcc_hmss_rbcpr_clk),
+ CLK_LIST(gcc_mmss_noc_cfg_ahb_clk),
+ CLK_LIST(gcc_sys_noc_usb3_axi_clk),
+ CLK_LIST(gcc_sys_noc_ufs_axi_clk),
+ CLK_LIST(gcc_pcie_0_phy_reset),
+ CLK_LIST(gcc_pcie_0_pipe_clk),
+ CLK_LIST(gcc_pcie_0_aux_clk),
+ CLK_LIST(gcc_pcie_0_cfg_ahb_clk),
+ CLK_LIST(gcc_pcie_0_mstr_axi_clk),
+ CLK_LIST(gcc_pcie_0_slv_axi_clk),
+ CLK_LIST(gcc_pcie_1_phy_reset),
+ CLK_LIST(gcc_pcie_1_pipe_clk),
+ CLK_LIST(gcc_pcie_1_aux_clk),
+ CLK_LIST(gcc_pcie_1_cfg_ahb_clk),
+ CLK_LIST(gcc_pcie_1_mstr_axi_clk),
+ CLK_LIST(gcc_pcie_1_slv_axi_clk),
+ CLK_LIST(gcc_pcie_2_phy_reset),
+ CLK_LIST(gcc_pcie_2_pipe_clk),
+ CLK_LIST(gcc_pcie_2_aux_clk),
+ CLK_LIST(gcc_pcie_2_cfg_ahb_clk),
+ CLK_LIST(gcc_pcie_2_mstr_axi_clk),
+ CLK_LIST(gcc_pcie_2_slv_axi_clk),
+ CLK_LIST(gcc_pcie_phy_reset),
+ CLK_LIST(gcc_pcie_phy_com_reset),
+ CLK_LIST(gcc_pcie_phy_nocsr_com_phy_reset),
+ CLK_LIST(gcc_pcie_phy_aux_clk),
+ CLK_LIST(gcc_pcie_phy_cfg_ahb_clk),
+ CLK_LIST(gcc_pdm2_clk),
+ CLK_LIST(gcc_pdm_ahb_clk),
+ CLK_LIST(gcc_prng_ahb_clk),
+ CLK_LIST(gcc_sdcc1_ahb_clk),
+ CLK_LIST(gcc_sdcc1_apps_clk),
+ CLK_LIST(gcc_sdcc2_ahb_clk),
+ CLK_LIST(gcc_sdcc2_apps_clk),
+ CLK_LIST(gcc_sdcc3_ahb_clk),
+ CLK_LIST(gcc_sdcc3_apps_clk),
+ CLK_LIST(gcc_sdcc4_ahb_clk),
+ CLK_LIST(gcc_sdcc4_apps_clk),
+ CLK_LIST(gcc_tsif_ahb_clk),
+ CLK_LIST(gcc_tsif_ref_clk),
+ CLK_LIST(gcc_ufs_ahb_clk),
+ CLK_LIST(gcc_ufs_axi_clk),
+ CLK_LIST(gcc_ufs_ice_core_clk),
+ CLK_LIST(gcc_ufs_rx_symbol_1_clk),
+ CLK_LIST(gcc_ufs_unipro_core_clk),
+ CLK_LIST(gcc_usb3_phy_pipe_clk),
+ CLK_LIST(gcc_usb20_master_clk),
+ CLK_LIST(gcc_usb20_mock_utmi_clk),
+ CLK_LIST(gcc_usb20_sleep_clk),
+ CLK_LIST(gcc_usb30_master_clk),
+ CLK_LIST(gcc_usb30_mock_utmi_clk),
+ CLK_LIST(gcc_usb30_sleep_clk),
+ CLK_LIST(gcc_usb3_phy_aux_clk),
+ CLK_LIST(gcc_usb_phy_cfg_ahb2phy_clk),
+ CLK_LIST(gcc_ufs_rx_cfg_clk),
+ CLK_LIST(gcc_ufs_rx_symbol_0_clk),
+ CLK_LIST(gcc_ufs_tx_cfg_clk),
+ CLK_LIST(gcc_ufs_tx_symbol_0_clk),
+ CLK_LIST(gcc_ufs_sys_clk_core_clk),
+ CLK_LIST(gcc_ufs_tx_symbol_clk_core_clk),
+ CLK_LIST(hlos1_vote_lpass_core_smmu_clk),
+ CLK_LIST(hlos1_vote_lpass_adsp_smmu_clk),
+ CLK_LIST(gcc_usb3_phy_reset),
+ CLK_LIST(gcc_usb3phy_phy_reset),
+ CLK_LIST(gcc_usb3_clkref_clk),
+ CLK_LIST(gcc_hdmi_clkref_clk),
+ CLK_LIST(gcc_edp_clkref_clk),
+ CLK_LIST(gcc_ufs_clkref_clk),
+ CLK_LIST(gcc_pcie_clkref_clk),
+ CLK_LIST(gcc_rx2_usb2_clkref_clk),
+ CLK_LIST(gcc_rx1_usb2_clkref_clk),
+ CLK_LIST(gcc_mmss_bimc_gfx_clk),
+ CLK_LIST(gcc_bimc_gfx_clk),
+ CLK_LIST(gcc_dcc_ahb_clk),
+ CLK_LIST(gcc_aggre0_noc_mpu_cfg_ahb_clk),
+};
+
+static struct clk_lookup msm_clocks_gcc_8996_v2[] = {
+ CLK_LIST(qspi_ser_clk_src),
+ CLK_LIST(sdcc1_ice_core_clk_src),
+ CLK_LIST(gcc_qspi_ahb_clk),
+ CLK_LIST(gcc_qspi_ser_clk),
+ CLK_LIST(gcc_sdcc1_ice_core_clk),
+ CLK_LIST(gcc_mss_cfg_ahb_clk),
+ CLK_LIST(gcc_mss_snoc_axi_clk),
+ CLK_LIST(gcc_mss_q6_bimc_axi_clk),
+ CLK_LIST(gcc_mss_mnoc_bimc_axi_clk),
+ CLK_LIST(gpll0_out_msscc),
+};
+
+static void msm_clocks_gcc_8996_v2_fixup(void)
+{
+ pcie_aux_clk_src.c.fmax[VDD_DIG_LOWER] = 9600000;
+ pcie_aux_clk_src.c.fmax[VDD_DIG_LOW] = 19200000;
+ pcie_aux_clk_src.c.fmax[VDD_DIG_NOMINAL] = 19200000;
+ pcie_aux_clk_src.c.fmax[VDD_DIG_HIGH] = 19200000;
+
+ usb30_master_clk_src.c.fmax[VDD_DIG_LOW] = 133333000;
+ usb30_master_clk_src.c.fmax[VDD_DIG_NOMINAL] = 200000000;
+ usb30_master_clk_src.c.fmax[VDD_DIG_HIGH] = 240000000;
+
+ sdcc1_apps_clk_src.c.fmax[VDD_DIG_LOW] = 50000000;
+ sdcc2_apps_clk_src.c.fmax[VDD_DIG_LOW] = 50000000;
+ sdcc3_apps_clk_src.c.fmax[VDD_DIG_LOW] = 50000000;
+}
+
+static int msm_gcc_8996_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ u32 regval;
+ const char *compat = NULL;
+ int compatlen = 0;
+ bool is_v2 = false;
+ int ret;
+
+ ret = vote_bimc(&bimc_clk, INT_MAX);
+ if (ret < 0)
+ return ret;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cc_base");
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get CC base.\n");
+ return -EINVAL;
+ }
+
+ virt_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!virt_base) {
+ dev_err(&pdev->dev, "Failed to map in CC registers.\n");
+ return -ENOMEM;
+ }
+
+ /* Set the HMSS_AHB_CLK_ENA bit to enable the hmss_ahb_clk */
+ regval = readl_relaxed(virt_base + GCC_APCS_CLOCK_BRANCH_ENA_VOTE);
+ regval |= BIT(21);
+ writel_relaxed(regval, virt_base + GCC_APCS_CLOCK_BRANCH_ENA_VOTE);
+
+ /*
+ * Set the HMSS_AHB_CLK_SLEEP_ENA bit to allow the hmss_ahb_clk to be
+ * turned off by hardware during certain apps low power modes.
+ */
+ regval = readl_relaxed(virt_base + GCC_APCS_CLOCK_SLEEP_ENA_VOTE);
+ regval |= BIT(21);
+ writel_relaxed(regval, virt_base + GCC_APCS_CLOCK_SLEEP_ENA_VOTE);
+
+ vdd_dig.vdd_uv[1] = RPM_REGULATOR_CORNER_SVS_KRAIT;
+ vdd_dig.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_dig");
+ if (IS_ERR(vdd_dig.regulator[0])) {
+ if (!(PTR_ERR(vdd_dig.regulator[0]) == -EPROBE_DEFER))
+ dev_err(&pdev->dev,
+ "Unable to get vdd_dig regulator!");
+ return PTR_ERR(vdd_dig.regulator[0]);
+ }
+
+ bimc_clk.c.parent = &cxo_clk_src.c;
+ ret = of_msm_clock_register(pdev->dev.of_node, msm_clocks_rpm_8996,
+ ARRAY_SIZE(msm_clocks_rpm_8996));
+ if (ret)
+ return ret;
+
+ ret = enable_rpm_scaling();
+ if (ret < 0)
+ return ret;
+
+ /* Perform revision specific fixes */
+ compat = of_get_property(pdev->dev.of_node, "compatible", &compatlen);
+ if (!compat || (compatlen <= 0))
+ return -EINVAL;
+ is_v2 = !strcmp(compat, "qcom,gcc-8996-v2") ||
+ !strcmp(compat, "qcom,gcc-8996-v3");
+ if (is_v2)
+ msm_clocks_gcc_8996_v2_fixup();
+
+ ret = of_msm_clock_register(pdev->dev.of_node, msm_clocks_gcc_8996,
+ ARRAY_SIZE(msm_clocks_gcc_8996));
+ if (ret)
+ return ret;
+
+ /* Register v2 specific clocks */
+ if (is_v2) {
+ ret = of_msm_clock_register(pdev->dev.of_node,
+ msm_clocks_gcc_8996_v2,
+ ARRAY_SIZE(msm_clocks_gcc_8996_v2));
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Hold an active set vote for the PNOC AHB source. Sleep set vote is 0.
+ */
+ clk_set_rate(&pnoc_keepalive_a_clk.c, 19200000);
+ clk_prepare_enable(&pnoc_keepalive_a_clk.c);
+
+ /* This clock is used for all MMSS register access */
+ clk_prepare_enable(&gcc_mmss_noc_cfg_ahb_clk.c);
+
+ /* Keep an active vote on CXO in case no other driver votes for it */
+ clk_prepare_enable(&cxo_clk_src_ao.c);
+
+ /*
+ * Keep the core memory settings enabled at all times for
+ * gcc_mmss_bimc_gfx_clk.
+ */
+ clk_set_flags(&gcc_mmss_bimc_gfx_clk.c, CLKFLAG_RETAIN_MEM);
+
+ dev_info(&pdev->dev, "Registered GCC clocks.\n");
+ return 0;
+}
+
+static struct of_device_id msm_clock_gcc_match_table[] = {
+ { .compatible = "qcom,gcc-8996" },
+ { .compatible = "qcom,gcc-8996-v2" },
+ { .compatible = "qcom,gcc-8996-v3" },
+ {}
+};
+
+static struct platform_driver msm_clock_gcc_driver = {
+ .probe = msm_gcc_8996_probe,
+ .driver = {
+ .name = "qcom,gcc-8996",
+ .of_match_table = msm_clock_gcc_match_table,
+ .owner = THIS_MODULE,
+ },
+};
+
+int __init msm_gcc_8996_init(void)
+{
+ return platform_driver_register(&msm_clock_gcc_driver);
+}
+arch_initcall(msm_gcc_8996_init);
+
+/* ======== Clock Debug Controller ======== */
+static struct clk_lookup msm_clocks_measure_8996[] = {
+ CLK_LIST(mmss_gcc_dbg_clk),
+ CLK_LIST(gpu_gcc_dbg_clk),
+ CLK_LIST(cpu_dbg_clk),
+ CLK_LOOKUP_OF("measure", gcc_debug_mux, "debug"),
+};
+
+static struct clk_lookup msm_clocks_measure_8996_v2[] = {
+ CLK_LIST(mmss_gcc_dbg_clk),
+ CLK_LIST(gpu_gcc_dbg_clk),
+ CLK_LIST(gcc_debug_mux_v2),
+ CLK_LIST(cpu_dbg_clk),
+ CLK_LOOKUP_OF("measure", gcc_debug_mux, "debug"),
+};
+
+static struct of_device_id msm_clock_debug_match_table[] = {
+ { .compatible = "qcom,cc-debug-8996" },
+ { .compatible = "qcom,cc-debug-8996-v2" },
+ { .compatible = "qcom,cc-debug-8996-v3" },
+ {}
+};
+
+static int msm_clock_debug_8996_probe(struct platform_device *pdev)
+{
+ const char *compat = NULL;
+ int compatlen = 0;
+ struct resource *res;
+ int ret;
+
+ clk_ops_debug_mux = clk_ops_gen_mux;
+ clk_ops_debug_mux.get_rate = measure_get_rate;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cc_base");
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get CC base.\n");
+ return -EINVAL;
+ }
+ virt_dbgbase = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!virt_dbgbase) {
+ dev_err(&pdev->dev, "Failed to map in CC registers.\n");
+ return -ENOMEM;
+ }
+
+ mmss_gcc_dbg_clk.dev = &pdev->dev;
+ mmss_gcc_dbg_clk.clk_id = "debug_mmss_clk";
+ cpu_dbg_clk.dev = &pdev->dev;
+ cpu_dbg_clk.clk_id = "debug_cpu_clk";
+
+ gpu_gcc_dbg_clk.dev = &pdev->dev;
+ gpu_gcc_dbg_clk.clk_id = "debug_gpu_clk";
+
+ /* Perform revision specific fixes */
+ compat = of_get_property(pdev->dev.of_node, "compatible", &compatlen);
+ if (!compat || (compatlen <= 0))
+ return -EINVAL;
+
+ if (!strcmp(compat, "qcom,cc-debug-8996"))
+ ret = of_msm_clock_register(pdev->dev.of_node,
+ msm_clocks_measure_8996,
+ ARRAY_SIZE(msm_clocks_measure_8996));
+ else
+ ret = of_msm_clock_register(pdev->dev.of_node,
+ msm_clocks_measure_8996_v2,
+ ARRAY_SIZE(msm_clocks_measure_8996_v2));
+ if (ret)
+ return ret;
+
+ dev_info(&pdev->dev, "Registered debug mux.\n");
+ return ret;
+}
+
+static struct platform_driver msm_clock_debug_driver = {
+ .probe = msm_clock_debug_8996_probe,
+ .driver = {
+ .name = "qcom,cc-debug-8996",
+ .of_match_table = msm_clock_debug_match_table,
+ .owner = THIS_MODULE,
+ },
+};
+
+int __init msm_clock_debug_8996_init(void)
+{
+ return platform_driver_register(&msm_clock_debug_driver);
+}
+late_initcall(msm_clock_debug_8996_init);
diff --git a/drivers/clk/msm/clock-gcc-cobalt.c b/drivers/clk/msm/clock-gcc-cobalt.c
new file mode 100644
index 000000000000..1756157c996d
--- /dev/null
+++ b/drivers/clk/msm/clock-gcc-cobalt.c
@@ -0,0 +1,2915 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/ctype.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/clk/msm-clock-generic.h>
+
+#include <soc/qcom/clock-local2.h>
+#include <soc/qcom/clock-voter.h>
+#include <soc/qcom/clock-pll.h>
+#include <soc/qcom/clock-alpha-pll.h>
+#include <soc/qcom/rpm-smd.h>
+#include <soc/qcom/clock-rpm.h>
+
+#include <dt-bindings/clock/msm-clocks-cobalt.h>
+#include <dt-bindings/clock/msm-clocks-hwio-cobalt.h>
+
+#include "vdd-level-cobalt.h"
+
+static void __iomem *virt_base;
+static void __iomem *virt_dbgbase;
+
+#define cxo_clk_src_source_val 0
+#define cxo_clk_src_ao_source_val 0
+#define gpll0_out_main_source_val 1
+#define gpll0_ao_source_val 1
+#define gpll4_out_main_source_val 5
+
+#define FIXDIV(div) (div ? (2 * (div) - 1) : (0))
+
+#define F(f, s, div, m, n) \
+ { \
+ .freq_hz = (f), \
+ .src_clk = &s.c, \
+ .m_val = (m), \
+ .n_val = ~((n)-(m)) * !!(n), \
+ .d_val = ~(n),\
+ .div_src_val = BVAL(4, 0, (int)FIXDIV(div)) \
+ | BVAL(10, 8, s##_source_val), \
+ }
+
+static DEFINE_VDD_REGULATORS(vdd_dig, VDD_DIG_NUM, 1, vdd_corner, NULL);
+
+DEFINE_CLK_RPM_SMD_BRANCH(cxo_clk_src, cxo_clk_src_ao, RPM_MISC_CLK_TYPE,
+ CXO_CLK_SRC_ID, 19200000);
+DEFINE_CLK_RPM_SMD(bimc_clk, bimc_a_clk, RPM_MEM_CLK_TYPE, BIMC_CLK_ID, NULL);
+DEFINE_CLK_RPM_SMD(cnoc_clk, cnoc_a_clk, RPM_BUS_CLK_TYPE, CNOC_CLK_ID, NULL);
+DEFINE_CLK_RPM_SMD(snoc_clk, snoc_a_clk, RPM_BUS_CLK_TYPE, SNOC_CLK_ID, NULL);
+DEFINE_CLK_RPM_SMD(cnoc_periph_clk, cnoc_periph_a_clk, RPM_BUS_CLK_TYPE,
+ CNOC_PERIPH_CLK_ID, NULL);
+static DEFINE_CLK_VOTER(cnoc_periph_keepalive_a_clk, &cnoc_periph_a_clk.c,
+ LONG_MAX);
+static DEFINE_CLK_VOTER(bimc_msmbus_clk, &bimc_clk.c, LONG_MAX);
+static DEFINE_CLK_VOTER(bimc_msmbus_a_clk, &bimc_a_clk.c, LONG_MAX);
+static DEFINE_CLK_VOTER(cnoc_msmbus_clk, &cnoc_clk.c, LONG_MAX);
+static DEFINE_CLK_VOTER(cnoc_msmbus_a_clk, &cnoc_a_clk.c, LONG_MAX);
+static DEFINE_CLK_BRANCH_VOTER(cxo_dwc3_clk, &cxo_clk_src.c);
+static DEFINE_CLK_BRANCH_VOTER(cxo_lpm_clk, &cxo_clk_src.c);
+static DEFINE_CLK_BRANCH_VOTER(cxo_otg_clk, &cxo_clk_src.c);
+static DEFINE_CLK_BRANCH_VOTER(cxo_pil_lpass_clk, &cxo_clk_src.c);
+static DEFINE_CLK_BRANCH_VOTER(cxo_pil_ssc_clk, &cxo_clk_src.c);
+static DEFINE_CLK_BRANCH_VOTER(cxo_pil_spss_clk, &cxo_clk_src.c);
+DEFINE_CLK_RPM_SMD_XO_BUFFER(div_clk1, div_clk1_ao, DIV_CLK1_ID);
+DEFINE_CLK_RPM_SMD_XO_BUFFER(div_clk2, div_clk2_ao, DIV_CLK2_ID);
+DEFINE_CLK_RPM_SMD_XO_BUFFER(div_clk3, div_clk3_ao, DIV_CLK3_ID);
+DEFINE_CLK_RPM_SMD(ipa_clk, ipa_a_clk, RPM_IPA_CLK_TYPE,
+ IPA_CLK_ID, NULL);
+DEFINE_CLK_RPM_SMD(ce1_clk, ce1_a_clk, RPM_CE_CLK_TYPE,
+ CE1_CLK_ID, NULL);
+DEFINE_CLK_RPM_SMD_XO_BUFFER(ln_bb_clk1, ln_bb_clk1_ao, LN_BB_CLK1_ID);
+DEFINE_CLK_RPM_SMD_XO_BUFFER_PINCTRL(ln_bb_clk1_pin, ln_bb_clk1_pin_ao,
+ LN_BB_CLK1_PIN_ID);
+DEFINE_CLK_RPM_SMD_XO_BUFFER(ln_bb_clk2, ln_bb_clk2_ao, LN_BB_CLK2_ID);
+DEFINE_CLK_RPM_SMD_XO_BUFFER_PINCTRL(ln_bb_clk2_pin, ln_bb_clk2_pin_ao,
+ LN_BB_CLK2_PIN_ID);
+DEFINE_CLK_RPM_SMD_XO_BUFFER(ln_bb_clk3, ln_bb_clk3_ao, LN_BB_CLK3_ID);
+DEFINE_CLK_RPM_SMD_XO_BUFFER_PINCTRL(ln_bb_clk3_pin, ln_bb_clk3_pin_ao,
+ LN_BB_CLK3_PIN_ID);
+static DEFINE_CLK_VOTER(mcd_ce1_clk, &ce1_clk.c, 85710000);
+DEFINE_CLK_DUMMY(measure_only_bimc_hmss_axi_clk, 0);
+DEFINE_CLK_RPM_SMD(mmssnoc_axi_clk, mmssnoc_axi_a_clk,
+ RPM_MMAXI_CLK_TYPE, MMSSNOC_AXI_CLK_ID, NULL);
+DEFINE_CLK_RPM_SMD(aggre1_noc_clk, aggre1_noc_a_clk, RPM_AGGR_CLK_TYPE,
+ AGGR1_NOC_ID, NULL);
+DEFINE_CLK_RPM_SMD(aggre2_noc_clk, aggre2_noc_a_clk, RPM_AGGR_CLK_TYPE,
+ AGGR2_NOC_ID, NULL);
+static DEFINE_CLK_VOTER(qcedev_ce1_clk, &ce1_clk.c, 85710000);
+static DEFINE_CLK_VOTER(qcrypto_ce1_clk, &ce1_clk.c, 85710000);
+DEFINE_CLK_RPM_SMD_QDSS(qdss_clk, qdss_a_clk, RPM_MISC_CLK_TYPE,
+ QDSS_CLK_ID);
+static DEFINE_CLK_VOTER(qseecom_ce1_clk, &ce1_clk.c, 85710000);
+DEFINE_CLK_RPM_SMD_XO_BUFFER(rf_clk1, rf_clk1_ao, RF_CLK1_ID);
+DEFINE_CLK_RPM_SMD_XO_BUFFER_PINCTRL(rf_clk1_pin, rf_clk1_pin_ao,
+ RF_CLK1_PIN_ID);
+DEFINE_CLK_RPM_SMD_XO_BUFFER(rf_clk2, rf_clk2_ao, RF_CLK2_ID);
+DEFINE_CLK_RPM_SMD_XO_BUFFER_PINCTRL(rf_clk2_pin, rf_clk2_pin_ao,
+ RF_CLK2_PIN_ID);
+DEFINE_CLK_RPM_SMD_XO_BUFFER(rf_clk3, rf_clk3_ao, RF_CLK3_ID);
+DEFINE_CLK_RPM_SMD_XO_BUFFER_PINCTRL(rf_clk3_pin, rf_clk3_pin_ao,
+ RF_CLK3_PIN_ID);
+static DEFINE_CLK_VOTER(scm_ce1_clk, &ce1_clk.c, 85710000);
+static DEFINE_CLK_VOTER(snoc_msmbus_clk, &snoc_clk.c, LONG_MAX);
+static DEFINE_CLK_VOTER(snoc_msmbus_a_clk, &snoc_a_clk.c, LONG_MAX);
+DEFINE_CLK_DUMMY(gcc_ce1_ahb_m_clk, 0);
+DEFINE_CLK_DUMMY(gcc_ce1_axi_m_clk, 0);
+
+DEFINE_EXT_CLK(debug_mmss_clk, NULL);
+DEFINE_EXT_CLK(gpu_gcc_debug_clk, NULL);
+DEFINE_EXT_CLK(gfx_gcc_debug_clk, NULL);
+DEFINE_EXT_CLK(debug_cpu_clk, NULL);
+
+static unsigned int soft_vote_gpll0;
+
+static struct pll_vote_clk gpll0 = {
+ .en_reg = (void __iomem *)GCC_APCS_GPLL_ENA_VOTE,
+ .en_mask = BIT(0),
+ .status_reg = (void __iomem *)GCC_GPLL0_MODE,
+ .status_mask = BIT(31),
+ .soft_vote = &soft_vote_gpll0,
+ .soft_vote_mask = PLL_SOFT_VOTE_PRIMARY,
+ .base = &virt_base,
+ .c = {
+ .rate = 600000000,
+ .parent = &cxo_clk_src.c,
+ .dbg_name = "gpll0",
+ .ops = &clk_ops_pll_acpu_vote,
+ CLK_INIT(gpll0.c),
+ },
+};
+
+static struct pll_vote_clk gpll0_ao = {
+ .en_reg = (void __iomem *)GCC_APCS_GPLL_ENA_VOTE,
+ .en_mask = BIT(0),
+ .status_reg = (void __iomem *)GCC_GPLL0_MODE,
+ .status_mask = BIT(31),
+ .soft_vote = &soft_vote_gpll0,
+ .soft_vote_mask = PLL_SOFT_VOTE_ACPU,
+ .base = &virt_base,
+ .c = {
+ .rate = 600000000,
+ .parent = &cxo_clk_src_ao.c,
+ .dbg_name = "gpll0_ao",
+ .ops = &clk_ops_pll_acpu_vote,
+ CLK_INIT(gpll0_ao.c),
+ },
+};
+
+DEFINE_EXT_CLK(gpll0_out_main, &gpll0.c);
+
+static struct local_vote_clk gcc_mmss_gpll0_div_clk = {
+ .cbcr_reg = GCC_APCS_CLOCK_BRANCH_ENA_VOTE_1,
+ .vote_reg = GCC_APCS_CLOCK_BRANCH_ENA_VOTE_1,
+ .en_mask = BIT(0),
+ .base = &virt_base,
+ .halt_check = DELAY,
+ .c = {
+ .dbg_name = "gcc_mmss_gpll0_div_clk",
+ .parent = &gpll0.c,
+ .ops = &clk_ops_vote,
+ CLK_INIT(gcc_mmss_gpll0_div_clk.c),
+ },
+};
+
+static struct pll_vote_clk gpll4 = {
+ .en_reg = (void __iomem *)GCC_APCS_GPLL_ENA_VOTE,
+ .en_mask = BIT(4),
+ .status_reg = (void __iomem *)GCC_GPLL4_MODE,
+ .status_mask = BIT(31),
+ .base = &virt_base,
+ .c = {
+ .rate = 384000000,
+ .parent = &cxo_clk_src.c,
+ .dbg_name = "gpll4",
+ .ops = &clk_ops_pll_vote,
+ VDD_DIG_FMAX_MAP3(LOWER, 400000000, LOW, 800000000,
+ NOMINAL, 1600000000),
+ CLK_INIT(gpll4.c),
+ },
+};
+DEFINE_EXT_CLK(gpll4_out_main, &gpll4.c);
+
+static struct clk_freq_tbl ftbl_hmss_ahb_clk_src[] = {
+ F( 19200000, cxo_clk_src_ao, 1, 0, 0),
+ F( 37500000, gpll0_out_main, 16, 0, 0),
+ F( 75000000, gpll0_out_main, 8, 0, 0),
+ F_END
+};
+
+static struct rcg_clk hmss_ahb_clk_src = {
+ .cmd_rcgr_reg = GCC_HMSS_AHB_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_hmss_ahb_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "hmss_ahb_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 50000000,
+ NOMINAL, 100000000),
+ CLK_INIT(hmss_ahb_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_usb30_master_clk_src[] = {
+ F( 19200000, cxo_clk_src, 1, 0, 0),
+ F( 120000000, gpll0_out_main, 5, 0, 0),
+ F( 150000000, gpll0_out_main, 4, 0, 0),
+ F_END
+};
+
+static struct rcg_clk usb30_master_clk_src = {
+ .cmd_rcgr_reg = GCC_USB30_MASTER_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_usb30_master_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "usb30_master_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP4(LOWER, 66670000, LOW, 133330000,
+ NOMINAL, 200000000, HIGH, 240000000),
+ CLK_INIT(usb30_master_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_pcie_aux_clk_src[] = {
+ F( 19200000, cxo_clk_src, 1, 0, 0),
+ F_END
+};
+
+static struct rcg_clk pcie_aux_clk_src = {
+ .cmd_rcgr_reg = GCC_PCIE_AUX_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_pcie_aux_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "pcie_aux_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP2(LOWER, 9600000, LOW, 19200000),
+ CLK_INIT(pcie_aux_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_ufs_axi_clk_src[] = {
+ F( 50000000, gpll0_out_main, 12, 0, 0),
+ F( 100000000, gpll0_out_main, 6, 0, 0),
+ F( 200000000, gpll0_out_main, 3, 0, 0),
+ F( 240000000, gpll0_out_main, 2.5, 0, 0),
+ F_END
+};
+
+static struct rcg_clk ufs_axi_clk_src = {
+ .cmd_rcgr_reg = GCC_UFS_AXI_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_ufs_axi_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "ufs_axi_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP4(LOWER, 50000000, LOW, 100000000,
+ NOMINAL, 200000000, HIGH, 240000000),
+ CLK_INIT(ufs_axi_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_blsp_i2c_apps_clk_src[] = {
+ F( 19200000, cxo_clk_src, 1, 0, 0),
+ F( 50000000, gpll0_out_main, 12, 0, 0),
+ F_END
+};
+
+static struct rcg_clk blsp1_qup1_i2c_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_BLSP1_QUP1_I2C_APPS_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "blsp1_qup1_i2c_apps_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP2(LOWER, 19200000, LOW, 50000000),
+ CLK_INIT(blsp1_qup1_i2c_apps_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_blsp_qup_spi_apps_clk_src[] = {
+ F( 960000, cxo_clk_src, 10, 1, 2),
+ F( 4800000, cxo_clk_src, 4, 0, 0),
+ F( 9600000, cxo_clk_src, 2, 0, 0),
+ F( 15000000, gpll0_out_main, 10, 1, 4),
+ F( 19200000, cxo_clk_src, 1, 0, 0),
+ F( 25000000, gpll0_out_main, 12, 1, 2),
+ F( 50000000, gpll0_out_main, 12, 0, 0),
+ F_END
+};
+
+static struct rcg_clk blsp1_qup1_spi_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_BLSP1_QUP1_SPI_APPS_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_blsp_qup_spi_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "blsp1_qup1_spi_apps_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 25000000,
+ NOMINAL, 50000000),
+ CLK_INIT(blsp1_qup1_spi_apps_clk_src.c),
+ },
+};
+
+static struct rcg_clk blsp1_qup2_i2c_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_BLSP1_QUP2_I2C_APPS_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "blsp1_qup2_i2c_apps_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP2(LOWER, 19200000, LOW, 50000000),
+ CLK_INIT(blsp1_qup2_i2c_apps_clk_src.c),
+ },
+};
+
+static struct rcg_clk blsp1_qup2_spi_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_BLSP1_QUP2_SPI_APPS_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_blsp_qup_spi_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "blsp1_qup2_spi_apps_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 25000000,
+ NOMINAL, 50000000),
+ CLK_INIT(blsp1_qup2_spi_apps_clk_src.c),
+ },
+};
+
+static struct rcg_clk blsp1_qup3_i2c_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_BLSP1_QUP3_I2C_APPS_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "blsp1_qup3_i2c_apps_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP2(LOWER, 19200000, LOW, 50000000),
+ CLK_INIT(blsp1_qup3_i2c_apps_clk_src.c),
+ },
+};
+
+static struct rcg_clk blsp1_qup3_spi_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_BLSP1_QUP3_SPI_APPS_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_blsp_qup_spi_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "blsp1_qup3_spi_apps_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 25000000,
+ NOMINAL, 50000000),
+ CLK_INIT(blsp1_qup3_spi_apps_clk_src.c),
+ },
+};
+
+static struct rcg_clk blsp1_qup4_i2c_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_BLSP1_QUP4_I2C_APPS_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "blsp1_qup4_i2c_apps_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP2(LOWER, 19200000, LOW, 50000000),
+ CLK_INIT(blsp1_qup4_i2c_apps_clk_src.c),
+ },
+};
+
+static struct rcg_clk blsp1_qup4_spi_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_BLSP1_QUP4_SPI_APPS_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_blsp_qup_spi_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "blsp1_qup4_spi_apps_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 25000000,
+ NOMINAL, 50000000),
+ CLK_INIT(blsp1_qup4_spi_apps_clk_src.c),
+ },
+};
+
+static struct rcg_clk blsp1_qup5_i2c_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_BLSP1_QUP5_I2C_APPS_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "blsp1_qup5_i2c_apps_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP2(LOWER, 19200000, LOW, 50000000),
+ CLK_INIT(blsp1_qup5_i2c_apps_clk_src.c),
+ },
+};
+
+static struct rcg_clk blsp1_qup5_spi_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_BLSP1_QUP5_SPI_APPS_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_blsp_qup_spi_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "blsp1_qup5_spi_apps_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 25000000,
+ NOMINAL, 50000000),
+ CLK_INIT(blsp1_qup5_spi_apps_clk_src.c),
+ },
+};
+
+static struct rcg_clk blsp1_qup6_i2c_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_BLSP1_QUP6_I2C_APPS_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "blsp1_qup6_i2c_apps_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP2(LOWER, 19200000, LOW, 50000000),
+ CLK_INIT(blsp1_qup6_i2c_apps_clk_src.c),
+ },
+};
+
+static struct rcg_clk blsp1_qup6_spi_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_BLSP1_QUP6_SPI_APPS_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_blsp_qup_spi_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "blsp1_qup6_spi_apps_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 25000000,
+ NOMINAL, 50000000),
+ CLK_INIT(blsp1_qup6_spi_apps_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_blsp_uart_apps_clk_src[] = {
+ F( 3686400, gpll0_out_main, 1, 96, 15625),
+ F( 7372800, gpll0_out_main, 1, 192, 15625),
+ F( 14745600, gpll0_out_main, 1, 384, 15625),
+ F( 16000000, gpll0_out_main, 5, 2, 15),
+ F( 19200000, cxo_clk_src, 1, 0, 0),
+ F( 24000000, gpll0_out_main, 5, 1, 5),
+ F( 32000000, gpll0_out_main, 1, 4, 75),
+ F( 40000000, gpll0_out_main, 15, 0, 0),
+ F( 46400000, gpll0_out_main, 1, 29, 375),
+ F( 48000000, gpll0_out_main, 12.5, 0, 0),
+ F( 51200000, gpll0_out_main, 1, 32, 375),
+ F( 56000000, gpll0_out_main, 1, 7, 75),
+ F( 58982400, gpll0_out_main, 1, 1536, 15625),
+ F( 60000000, gpll0_out_main, 10, 0, 0),
+ F( 63157895, gpll0_out_main, 9.5, 0, 0),
+ F_END
+};
+
+static struct rcg_clk blsp1_uart1_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_BLSP1_UART1_APPS_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_blsp_uart_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "blsp1_uart1_apps_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 31580000,
+ NOMINAL, 63160000),
+ CLK_INIT(blsp1_uart1_apps_clk_src.c),
+ },
+};
+
+static struct rcg_clk blsp1_uart2_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_BLSP1_UART2_APPS_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_blsp_uart_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "blsp1_uart2_apps_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 31580000,
+ NOMINAL, 63160000),
+ CLK_INIT(blsp1_uart2_apps_clk_src.c),
+ },
+};
+
+static struct rcg_clk blsp1_uart3_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_BLSP1_UART3_APPS_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_blsp_uart_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "blsp1_uart3_apps_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 31580000,
+ NOMINAL, 63160000),
+ CLK_INIT(blsp1_uart3_apps_clk_src.c),
+ },
+};
+
+static struct rcg_clk blsp2_qup1_i2c_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_BLSP2_QUP1_I2C_APPS_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "blsp2_qup1_i2c_apps_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP2(LOWER, 19200000, LOW, 50000000),
+ CLK_INIT(blsp2_qup1_i2c_apps_clk_src.c),
+ },
+};
+
+static struct rcg_clk blsp2_qup1_spi_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_BLSP2_QUP1_SPI_APPS_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_blsp_qup_spi_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "blsp2_qup1_spi_apps_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 25000000,
+ NOMINAL, 50000000),
+ CLK_INIT(blsp2_qup1_spi_apps_clk_src.c),
+ },
+};
+
+static struct rcg_clk blsp2_qup2_i2c_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_BLSP2_QUP2_I2C_APPS_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "blsp2_qup2_i2c_apps_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP2(LOWER, 19200000, LOW, 50000000),
+ CLK_INIT(blsp2_qup2_i2c_apps_clk_src.c),
+ },
+};
+
+static struct rcg_clk blsp2_qup2_spi_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_BLSP2_QUP2_SPI_APPS_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_blsp_qup_spi_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "blsp2_qup2_spi_apps_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 25000000,
+ NOMINAL, 50000000),
+ CLK_INIT(blsp2_qup2_spi_apps_clk_src.c),
+ },
+};
+
+static struct rcg_clk blsp2_qup3_i2c_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_BLSP2_QUP3_I2C_APPS_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "blsp2_qup3_i2c_apps_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP2(LOWER, 19200000, LOW, 50000000),
+ CLK_INIT(blsp2_qup3_i2c_apps_clk_src.c),
+ },
+};
+
+static struct rcg_clk blsp2_qup3_spi_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_BLSP2_QUP3_SPI_APPS_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_blsp_qup_spi_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "blsp2_qup3_spi_apps_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 25000000,
+ NOMINAL, 50000000),
+ CLK_INIT(blsp2_qup3_spi_apps_clk_src.c),
+ },
+};
+
+static struct rcg_clk blsp2_qup4_i2c_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_BLSP2_QUP4_I2C_APPS_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "blsp2_qup4_i2c_apps_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP2(LOWER, 19200000, LOW, 50000000),
+ CLK_INIT(blsp2_qup4_i2c_apps_clk_src.c),
+ },
+};
+
+static struct rcg_clk blsp2_qup4_spi_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_BLSP2_QUP4_SPI_APPS_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_blsp_qup_spi_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "blsp2_qup4_spi_apps_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 25000000,
+ NOMINAL, 50000000),
+ CLK_INIT(blsp2_qup4_spi_apps_clk_src.c),
+ },
+};
+
+static struct rcg_clk blsp2_qup5_i2c_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_BLSP2_QUP5_I2C_APPS_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "blsp2_qup5_i2c_apps_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP2(LOWER, 19200000, LOW, 50000000),
+ CLK_INIT(blsp2_qup5_i2c_apps_clk_src.c),
+ },
+};
+
+static struct rcg_clk blsp2_qup5_spi_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_BLSP2_QUP5_SPI_APPS_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_blsp_qup_spi_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "blsp2_qup5_spi_apps_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 25000000,
+ NOMINAL, 50000000),
+ CLK_INIT(blsp2_qup5_spi_apps_clk_src.c),
+ },
+};
+
+static struct rcg_clk blsp2_qup6_i2c_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_BLSP2_QUP6_I2C_APPS_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "blsp2_qup6_i2c_apps_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP2(LOWER, 19200000, LOW, 50000000),
+ CLK_INIT(blsp2_qup6_i2c_apps_clk_src.c),
+ },
+};
+
+static struct rcg_clk blsp2_qup6_spi_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_BLSP2_QUP6_SPI_APPS_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_blsp_qup_spi_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "blsp2_qup6_spi_apps_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 25000000,
+ NOMINAL, 50000000),
+ CLK_INIT(blsp2_qup6_spi_apps_clk_src.c),
+ },
+};
+
+static struct rcg_clk blsp2_uart1_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_BLSP2_UART1_APPS_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_blsp_uart_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "blsp2_uart1_apps_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 31580000,
+ NOMINAL, 63160000),
+ CLK_INIT(blsp2_uart1_apps_clk_src.c),
+ },
+};
+
+static struct rcg_clk blsp2_uart2_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_BLSP2_UART2_APPS_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_blsp_uart_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "blsp2_uart2_apps_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 31580000,
+ NOMINAL, 63160000),
+ CLK_INIT(blsp2_uart2_apps_clk_src.c),
+ },
+};
+
+static struct rcg_clk blsp2_uart3_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_BLSP2_UART3_APPS_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_blsp_uart_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "blsp2_uart3_apps_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 31580000,
+ NOMINAL, 63160000),
+ CLK_INIT(blsp2_uart3_apps_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_gp_clk_src[] = {
+ F( 19200000, cxo_clk_src, 1, 0, 0),
+ F( 100000000, gpll0_out_main, 6, 0, 0),
+ F( 200000000, gpll0_out_main, 3, 0, 0),
+ F_END
+};
+
+static struct rcg_clk gp1_clk_src = {
+ .cmd_rcgr_reg = GCC_GP1_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_gp_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gp1_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 50000000, LOW, 100000000,
+ NOMINAL, 200000000),
+ CLK_INIT(gp1_clk_src.c),
+ },
+};
+
+static struct rcg_clk gp2_clk_src = {
+ .cmd_rcgr_reg = GCC_GP2_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_gp_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gp2_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 50000000, LOW, 100000000,
+ NOMINAL, 200000000),
+ CLK_INIT(gp2_clk_src.c),
+ },
+};
+
+static struct rcg_clk gp3_clk_src = {
+ .cmd_rcgr_reg = GCC_GP3_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_gp_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gp3_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 50000000, LOW, 100000000,
+ NOMINAL, 200000000),
+ CLK_INIT(gp3_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_hmss_rbcpr_clk_src[] = {
+ F( 19200000, cxo_clk_src_ao, 1, 0, 0),
+ F_END
+};
+
+static struct rcg_clk hmss_rbcpr_clk_src = {
+ .cmd_rcgr_reg = GCC_HMSS_RBCPR_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_hmss_rbcpr_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "hmss_rbcpr_clk_src",
+ .ops = &clk_ops_rcg,
+ CLK_INIT(hmss_rbcpr_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_pdm2_clk_src[] = {
+ F( 60000000, gpll0_out_main, 10, 0, 0),
+ F_END
+};
+
+static struct rcg_clk pdm2_clk_src = {
+ .cmd_rcgr_reg = GCC_PDM2_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_pdm2_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "pdm2_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP2(LOWER, 19200000, LOW, 60000000),
+ CLK_INIT(pdm2_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_sdcc2_apps_clk_src[] = {
+ F( 144000, cxo_clk_src, 16, 3, 25),
+ F( 400000, cxo_clk_src, 12, 1, 4),
+ F( 20000000, gpll0_out_main, 15, 1, 2),
+ F( 25000000, gpll0_out_main, 12, 1, 2),
+ F( 50000000, gpll0_out_main, 12, 0, 0),
+ F( 100000000, gpll0_out_main, 6, 0, 0),
+ F( 200000000, gpll0_out_main, 3, 0, 0),
+ F_END
+};
+
+static struct rcg_clk sdcc2_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_SDCC2_APPS_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_sdcc2_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "sdcc2_apps_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 100000000,
+ NOMINAL, 200000000),
+ CLK_INIT(sdcc2_apps_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_sdcc4_apps_clk_src[] = {
+ F( 144000, cxo_clk_src, 16, 3, 25),
+ F( 400000, cxo_clk_src, 12, 1, 4),
+ F( 20000000, gpll0_out_main, 15, 1, 2),
+ F( 25000000, gpll0_out_main, 12, 1, 2),
+ F( 50000000, gpll0_out_main, 12, 0, 0),
+ F( 100000000, gpll0_out_main, 6, 0, 0),
+ F_END
+};
+
+static struct rcg_clk sdcc4_apps_clk_src = {
+ .cmd_rcgr_reg = GCC_SDCC4_APPS_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_sdcc4_apps_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "sdcc4_apps_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 50000000,
+ NOMINAL, 100000000),
+ CLK_INIT(sdcc4_apps_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_tsif_ref_clk_src[] = {
+ F( 105495, cxo_clk_src, 1, 1, 182),
+ F_END
+};
+
+static struct rcg_clk tsif_ref_clk_src = {
+ .cmd_rcgr_reg = GCC_TSIF_REF_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_tsif_ref_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "tsif_ref_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP1(LOWER, 105500),
+ CLK_INIT(tsif_ref_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_ufs_ice_core_clk_src[] = {
+ F( 75000000, gpll0_out_main, 8, 0, 0),
+ F( 150000000, gpll0_out_main, 4, 0, 0),
+ F( 300000000, gpll0_out_main, 2, 0, 0),
+ F_END
+};
+
+static struct rcg_clk ufs_ice_core_clk_src = {
+ .cmd_rcgr_reg = GCC_UFS_ICE_CORE_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_ufs_ice_core_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "ufs_ice_core_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP3(LOWER, 75000000, LOW, 150000000,
+ NOMINAL, 300000000),
+ CLK_INIT(ufs_ice_core_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_ufs_phy_aux_clk_src[] = {
+ F( 19200000, cxo_clk_src, 1, 0, 0),
+ F_END
+};
+
+static struct rcg_clk ufs_phy_aux_clk_src = {
+ .cmd_rcgr_reg = GCC_UFS_PHY_AUX_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_ufs_phy_aux_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "ufs_phy_aux_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP1(LOWER, 19200000),
+ CLK_INIT(ufs_phy_aux_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_ufs_unipro_core_clk_src[] = {
+ F( 37500000, gpll0_out_main, 16, 0, 0),
+ F( 75000000, gpll0_out_main, 8, 0, 0),
+ F( 150000000, gpll0_out_main, 4, 0, 0),
+ F_END
+};
+
+static struct rcg_clk ufs_unipro_core_clk_src = {
+ .cmd_rcgr_reg = GCC_UFS_UNIPRO_CORE_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_ufs_unipro_core_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "ufs_unipro_core_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP3(LOWER, 37500000, LOW, 75000000,
+ NOMINAL, 150000000),
+ CLK_INIT(ufs_unipro_core_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_usb30_mock_utmi_clk_src[] = {
+ F( 19200000, cxo_clk_src, 1, 0, 0),
+ F_END
+};
+
+static struct rcg_clk usb30_mock_utmi_clk_src = {
+ .cmd_rcgr_reg = GCC_USB30_MOCK_UTMI_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_usb30_mock_utmi_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "usb30_mock_utmi_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP2(LOWER, 40000000, LOW, 60000000),
+ CLK_INIT(usb30_mock_utmi_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_usb3_phy_aux_clk_src[] = {
+ F( 1200000, cxo_clk_src, 16, 0, 0),
+ F_END
+};
+
+static struct rcg_clk usb3_phy_aux_clk_src = {
+ .cmd_rcgr_reg = GCC_USB3_PHY_AUX_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_usb3_phy_aux_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "usb3_phy_aux_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP1(LOWER, 19200000),
+ CLK_INIT(usb3_phy_aux_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_hmss_gpll0_clk_src[] = {
+ F( 300000000, gpll0_ao, 2, 0, 0),
+ F_END
+};
+
+static struct rcg_clk hmss_gpll0_clk_src = {
+ .cmd_rcgr_reg = GCC_HMSS_GPLL0_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_hmss_gpll0_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "hmss_gpll0_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP1(LOWER, 600000000),
+ CLK_INIT(hmss_gpll0_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_qspi_ref_clk_src[] = {
+ F( 75000000, gpll0_out_main, 8, 0, 0),
+ F( 150000000, gpll0_out_main, 4, 0, 0),
+ F( 256000000, gpll4_out_main, 1.5, 0, 0),
+ F( 300000000, gpll0_out_main, 2, 0, 0),
+ F_END
+};
+
+static struct rcg_clk qspi_ref_clk_src = {
+ .cmd_rcgr_reg = GCC_QSPI_REF_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_qspi_ref_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "qspi_ref_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP3(LOWER, 40000000, LOW, 160400000,
+ NOMINAL, 320800000),
+ CLK_INIT(qspi_ref_clk_src.c),
+ },
+};
+
+static struct branch_clk gcc_hdmi_clkref_clk = {
+ .cbcr_reg = GCC_HDMI_CLKREF_EN,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_hdmi_clkref_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_hdmi_clkref_clk.c),
+ },
+};
+
+static struct branch_clk gcc_pcie_clkref_clk = {
+ .cbcr_reg = GCC_PCIE_CLKREF_EN,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_pcie_clkref_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_pcie_clkref_clk.c),
+ },
+};
+
+static struct branch_clk gcc_rx1_usb2_clkref_clk = {
+ .cbcr_reg = GCC_RX1_USB2_CLKREF_EN,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_rx1_usb2_clkref_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_rx1_usb2_clkref_clk.c),
+ },
+};
+
+static struct branch_clk gcc_ufs_clkref_clk = {
+ .cbcr_reg = GCC_UFS_CLKREF_EN,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_ufs_clkref_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_ufs_clkref_clk.c),
+ },
+};
+
+static struct branch_clk gcc_usb3_clkref_clk = {
+ .cbcr_reg = GCC_USB3_CLKREF_EN,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_usb3_clkref_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_usb3_clkref_clk.c),
+ },
+};
+
+static struct reset_clk gcc_usb3_phy_reset = {
+ .reset_reg = GCC_USB3_PHY_BCR,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_usb3_phy_reset",
+ .ops = &clk_ops_rst,
+ CLK_INIT(gcc_usb3_phy_reset.c),
+ },
+};
+
+static struct reset_clk gcc_usb3phy_phy_reset = {
+ .reset_reg = GCC_USB3PHY_PHY_BCR,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_usb3phy_phy_reset",
+ .ops = &clk_ops_rst,
+ CLK_INIT(gcc_usb3phy_phy_reset.c),
+ },
+};
+
+static struct gate_clk gpll0_out_msscc = {
+ .en_reg = GCC_APCS_CLOCK_BRANCH_ENA_VOTE_1,
+ .en_mask = BIT(2),
+ .delay_us = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gpll0_out_msscc",
+ .ops = &clk_ops_gate,
+ CLK_INIT(gpll0_out_msscc.c),
+ },
+};
+
+static struct branch_clk gcc_aggre1_ufs_axi_clk = {
+ .cbcr_reg = GCC_AGGRE1_UFS_AXI_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_aggre1_ufs_axi_clk",
+ .parent = &ufs_axi_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_aggre1_ufs_axi_clk.c),
+ },
+};
+
+static struct branch_clk gcc_aggre1_usb3_axi_clk = {
+ .cbcr_reg = GCC_AGGRE1_USB3_AXI_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_aggre1_usb3_axi_clk",
+ .parent = &usb30_master_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_aggre1_usb3_axi_clk.c),
+ },
+};
+
+static struct branch_clk gcc_bimc_mss_q6_axi_clk = {
+ .cbcr_reg = GCC_BIMC_MSS_Q6_AXI_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_bimc_mss_q6_axi_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_bimc_mss_q6_axi_clk.c),
+ },
+};
+
+static struct local_vote_clk gcc_blsp1_ahb_clk = {
+ .cbcr_reg = GCC_BLSP1_AHB_CBCR,
+ .bcr_reg = GCC_BLSP1_BCR,
+ .vote_reg = GCC_APCS_CLOCK_BRANCH_ENA_VOTE,
+ .en_mask = BIT(17),
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp1_ahb_clk",
+ .ops = &clk_ops_vote,
+ CLK_INIT(gcc_blsp1_ahb_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp1_qup1_i2c_apps_clk = {
+ .cbcr_reg = GCC_BLSP1_QUP1_I2C_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp1_qup1_i2c_apps_clk",
+ .parent = &blsp1_qup1_i2c_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp1_qup1_i2c_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp1_qup1_spi_apps_clk = {
+ .cbcr_reg = GCC_BLSP1_QUP1_SPI_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp1_qup1_spi_apps_clk",
+ .parent = &blsp1_qup1_spi_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp1_qup1_spi_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp1_qup2_i2c_apps_clk = {
+ .cbcr_reg = GCC_BLSP1_QUP2_I2C_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp1_qup2_i2c_apps_clk",
+ .parent = &blsp1_qup2_i2c_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp1_qup2_i2c_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp1_qup2_spi_apps_clk = {
+ .cbcr_reg = GCC_BLSP1_QUP2_SPI_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp1_qup2_spi_apps_clk",
+ .parent = &blsp1_qup2_spi_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp1_qup2_spi_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp1_qup3_i2c_apps_clk = {
+ .cbcr_reg = GCC_BLSP1_QUP3_I2C_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp1_qup3_i2c_apps_clk",
+ .parent = &blsp1_qup3_i2c_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp1_qup3_i2c_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp1_qup3_spi_apps_clk = {
+ .cbcr_reg = GCC_BLSP1_QUP3_SPI_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp1_qup3_spi_apps_clk",
+ .parent = &blsp1_qup3_spi_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp1_qup3_spi_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp1_qup4_i2c_apps_clk = {
+ .cbcr_reg = GCC_BLSP1_QUP4_I2C_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp1_qup4_i2c_apps_clk",
+ .parent = &blsp1_qup4_i2c_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp1_qup4_i2c_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp1_qup4_spi_apps_clk = {
+ .cbcr_reg = GCC_BLSP1_QUP4_SPI_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp1_qup4_spi_apps_clk",
+ .parent = &blsp1_qup4_spi_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp1_qup4_spi_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp1_qup5_i2c_apps_clk = {
+ .cbcr_reg = GCC_BLSP1_QUP5_I2C_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp1_qup5_i2c_apps_clk",
+ .parent = &blsp1_qup5_i2c_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp1_qup5_i2c_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp1_qup5_spi_apps_clk = {
+ .cbcr_reg = GCC_BLSP1_QUP5_SPI_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp1_qup5_spi_apps_clk",
+ .parent = &blsp1_qup5_spi_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp1_qup5_spi_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp1_qup6_i2c_apps_clk = {
+ .cbcr_reg = GCC_BLSP1_QUP6_I2C_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp1_qup6_i2c_apps_clk",
+ .parent = &blsp1_qup6_i2c_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp1_qup6_i2c_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp1_qup6_spi_apps_clk = {
+ .cbcr_reg = GCC_BLSP1_QUP6_SPI_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp1_qup6_spi_apps_clk",
+ .parent = &blsp1_qup6_spi_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp1_qup6_spi_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp1_uart1_apps_clk = {
+ .cbcr_reg = GCC_BLSP1_UART1_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp1_uart1_apps_clk",
+ .parent = &blsp1_uart1_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp1_uart1_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp1_uart2_apps_clk = {
+ .cbcr_reg = GCC_BLSP1_UART2_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp1_uart2_apps_clk",
+ .parent = &blsp1_uart2_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp1_uart2_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp1_uart3_apps_clk = {
+ .cbcr_reg = GCC_BLSP1_UART3_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp1_uart3_apps_clk",
+ .parent = &blsp1_uart3_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp1_uart3_apps_clk.c),
+ },
+};
+
+static struct local_vote_clk gcc_blsp2_ahb_clk = {
+ .cbcr_reg = GCC_BLSP2_AHB_CBCR,
+ .bcr_reg = GCC_BLSP2_BCR,
+ .vote_reg = GCC_APCS_CLOCK_BRANCH_ENA_VOTE,
+ .en_mask = BIT(15),
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp2_ahb_clk",
+ .ops = &clk_ops_vote,
+ CLK_INIT(gcc_blsp2_ahb_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp2_qup1_i2c_apps_clk = {
+ .cbcr_reg = GCC_BLSP2_QUP1_I2C_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp2_qup1_i2c_apps_clk",
+ .parent = &blsp2_qup1_i2c_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp2_qup1_i2c_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp2_qup1_spi_apps_clk = {
+ .cbcr_reg = GCC_BLSP2_QUP1_SPI_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp2_qup1_spi_apps_clk",
+ .parent = &blsp2_qup1_spi_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp2_qup1_spi_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp2_qup2_i2c_apps_clk = {
+ .cbcr_reg = GCC_BLSP2_QUP2_I2C_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp2_qup2_i2c_apps_clk",
+ .parent = &blsp2_qup2_i2c_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp2_qup2_i2c_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp2_qup2_spi_apps_clk = {
+ .cbcr_reg = GCC_BLSP2_QUP2_SPI_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp2_qup2_spi_apps_clk",
+ .parent = &blsp2_qup2_spi_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp2_qup2_spi_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp2_qup3_i2c_apps_clk = {
+ .cbcr_reg = GCC_BLSP2_QUP3_I2C_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp2_qup3_i2c_apps_clk",
+ .parent = &blsp2_qup3_i2c_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp2_qup3_i2c_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp2_qup3_spi_apps_clk = {
+ .cbcr_reg = GCC_BLSP2_QUP3_SPI_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp2_qup3_spi_apps_clk",
+ .parent = &blsp2_qup3_spi_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp2_qup3_spi_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp2_qup4_i2c_apps_clk = {
+ .cbcr_reg = GCC_BLSP2_QUP4_I2C_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp2_qup4_i2c_apps_clk",
+ .parent = &blsp2_qup4_i2c_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp2_qup4_i2c_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp2_qup4_spi_apps_clk = {
+ .cbcr_reg = GCC_BLSP2_QUP4_SPI_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp2_qup4_spi_apps_clk",
+ .parent = &blsp2_qup4_spi_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp2_qup4_spi_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp2_qup5_i2c_apps_clk = {
+ .cbcr_reg = GCC_BLSP2_QUP5_I2C_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp2_qup5_i2c_apps_clk",
+ .parent = &blsp2_qup5_i2c_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp2_qup5_i2c_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp2_qup5_spi_apps_clk = {
+ .cbcr_reg = GCC_BLSP2_QUP5_SPI_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp2_qup5_spi_apps_clk",
+ .parent = &blsp2_qup5_spi_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp2_qup5_spi_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp2_qup6_i2c_apps_clk = {
+ .cbcr_reg = GCC_BLSP2_QUP6_I2C_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp2_qup6_i2c_apps_clk",
+ .parent = &blsp2_qup6_i2c_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp2_qup6_i2c_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp2_qup6_spi_apps_clk = {
+ .cbcr_reg = GCC_BLSP2_QUP6_SPI_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp2_qup6_spi_apps_clk",
+ .parent = &blsp2_qup6_spi_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp2_qup6_spi_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp2_uart1_apps_clk = {
+ .cbcr_reg = GCC_BLSP2_UART1_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp2_uart1_apps_clk",
+ .parent = &blsp2_uart1_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp2_uart1_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp2_uart2_apps_clk = {
+ .cbcr_reg = GCC_BLSP2_UART2_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp2_uart2_apps_clk",
+ .parent = &blsp2_uart2_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp2_uart2_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_blsp2_uart3_apps_clk = {
+ .cbcr_reg = GCC_BLSP2_UART3_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_blsp2_uart3_apps_clk",
+ .parent = &blsp2_uart3_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_blsp2_uart3_apps_clk.c),
+ },
+};
+
+static struct local_vote_clk gcc_boot_rom_ahb_clk = {
+ .cbcr_reg = GCC_BOOT_ROM_AHB_CBCR,
+ .bcr_reg = GCC_BOOT_ROM_BCR,
+ .vote_reg = GCC_APCS_CLOCK_BRANCH_ENA_VOTE,
+ .en_mask = BIT(10),
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_boot_rom_ahb_clk",
+ .ops = &clk_ops_vote,
+ CLK_INIT(gcc_boot_rom_ahb_clk.c),
+ },
+};
+
+static struct branch_clk gcc_cfg_noc_usb3_axi_clk = {
+ .cbcr_reg = GCC_CFG_NOC_USB3_AXI_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_cfg_noc_usb3_axi_clk",
+ .parent = &usb30_master_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_cfg_noc_usb3_axi_clk.c),
+ },
+};
+
+static struct branch_clk gcc_bimc_gfx_clk = {
+ .cbcr_reg = GCC_BIMC_GFX_CBCR,
+ .has_sibling = 1,
+ .check_enable_bit = true,
+ .no_halt_check_on_disable = true,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_bimc_gfx_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_bimc_gfx_clk.c),
+ },
+};
+
+static struct branch_clk gcc_gp1_clk = {
+ .cbcr_reg = GCC_GP1_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_gp1_clk",
+ .parent = &gp1_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_gp1_clk.c),
+ },
+};
+
+static struct branch_clk gcc_gp2_clk = {
+ .cbcr_reg = GCC_GP2_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_gp2_clk",
+ .parent = &gp2_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_gp2_clk.c),
+ },
+};
+
+static struct branch_clk gcc_gp3_clk = {
+ .cbcr_reg = GCC_GP3_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_gp3_clk",
+ .parent = &gp3_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_gp3_clk.c),
+ },
+};
+
+static struct branch_clk gcc_gpu_bimc_gfx_clk = {
+ .cbcr_reg = GCC_GPU_BIMC_GFX_CBCR,
+ .has_sibling = 1,
+ .check_enable_bit = true,
+ .no_halt_check_on_disable = true,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_gpu_bimc_gfx_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_gpu_bimc_gfx_clk.c),
+ },
+};
+
+static struct branch_clk gcc_gpu_bimc_gfx_src_clk = {
+ .cbcr_reg = GCC_GPU_BIMC_GFX_SRC_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_gpu_bimc_gfx_src_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_gpu_bimc_gfx_src_clk.c),
+ },
+};
+
+static struct branch_clk gcc_gpu_cfg_ahb_clk = {
+ .cbcr_reg = GCC_GPU_CFG_AHB_CBCR,
+ .has_sibling = 1,
+ .check_enable_bit = true,
+ .no_halt_check_on_disable = true,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_gpu_cfg_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_gpu_cfg_ahb_clk.c),
+ },
+};
+
+static struct branch_clk gcc_gpu_snoc_dvm_gfx_clk = {
+ .cbcr_reg = GCC_GPU_SNOC_DVM_GFX_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_gpu_snoc_dvm_gfx_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_gpu_snoc_dvm_gfx_clk.c),
+ },
+};
+
+static struct branch_clk gcc_gpu_iref_clk = {
+ .cbcr_reg = GCC_GPU_IREF_EN,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_gpu_iref_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_gpu_iref_clk.c),
+ },
+};
+
+static struct local_vote_clk gcc_bimc_hmss_axi_clk = {
+ .cbcr_reg = GCC_BIMC_HMSS_AXI_CBCR,
+ .vote_reg = GCC_APCS_CLOCK_BRANCH_ENA_VOTE,
+ .en_mask = BIT(22),
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_bimc_hmss_axi_clk",
+ .always_on = true,
+ .ops = &clk_ops_vote,
+ CLK_INIT(gcc_bimc_hmss_axi_clk.c),
+ },
+};
+
+static struct local_vote_clk gcc_hmss_ahb_clk = {
+ .cbcr_reg = GCC_HMSS_AHB_CBCR,
+ .vote_reg = GCC_APCS_CLOCK_BRANCH_ENA_VOTE,
+ .en_mask = BIT(21),
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_hmss_ahb_clk",
+ .always_on = true,
+ .parent = &hmss_ahb_clk_src.c,
+ .ops = &clk_ops_vote,
+ CLK_INIT(gcc_hmss_ahb_clk.c),
+ },
+};
+
+static struct branch_clk gcc_hmss_dvm_bus_clk = {
+ .cbcr_reg = GCC_HMSS_DVM_BUS_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_hmss_dvm_bus_clk",
+ .always_on = true,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_hmss_dvm_bus_clk.c),
+ },
+};
+
+static struct branch_clk gcc_hmss_rbcpr_clk = {
+ .cbcr_reg = GCC_HMSS_RBCPR_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_hmss_rbcpr_clk",
+ .parent = &hmss_rbcpr_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_hmss_rbcpr_clk.c),
+ },
+};
+
+static struct branch_clk gcc_mmss_noc_cfg_ahb_clk = {
+ .cbcr_reg = GCC_MMSS_NOC_CFG_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_mmss_noc_cfg_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_mmss_noc_cfg_ahb_clk.c),
+ },
+};
+
+static struct branch_clk gcc_mmss_sys_noc_axi_clk = {
+ .cbcr_reg = GCC_MMSS_SYS_NOC_AXI_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_mmss_sys_noc_axi_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_mmss_sys_noc_axi_clk.c),
+ },
+};
+
+static struct branch_clk gcc_pcie_0_aux_clk = {
+ .cbcr_reg = GCC_PCIE_0_AUX_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_pcie_0_aux_clk",
+ .parent = &pcie_aux_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_pcie_0_aux_clk.c),
+ },
+};
+
+static struct branch_clk gcc_pcie_0_cfg_ahb_clk = {
+ .cbcr_reg = GCC_PCIE_0_CFG_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_pcie_0_cfg_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_pcie_0_cfg_ahb_clk.c),
+ },
+};
+
+static struct branch_clk gcc_pcie_0_mstr_axi_clk = {
+ .cbcr_reg = GCC_PCIE_0_MSTR_AXI_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_pcie_0_mstr_axi_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_pcie_0_mstr_axi_clk.c),
+ },
+};
+
+static struct branch_clk gcc_pcie_0_pipe_clk = {
+ .cbcr_reg = GCC_PCIE_0_PIPE_CBCR,
+ .bcr_reg = GCC_PCIE_0_PHY_BCR,
+ .has_sibling = 1,
+ .halt_check = DELAY,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_pcie_0_pipe_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_pcie_0_pipe_clk.c),
+ },
+};
+
+static struct branch_clk gcc_pcie_0_slv_axi_clk = {
+ .cbcr_reg = GCC_PCIE_0_SLV_AXI_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_pcie_0_slv_axi_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_pcie_0_slv_axi_clk.c),
+ },
+};
+
+static struct branch_clk gcc_pcie_phy_aux_clk = {
+ .cbcr_reg = GCC_PCIE_PHY_AUX_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_pcie_phy_aux_clk",
+ .parent = &pcie_aux_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_pcie_phy_aux_clk.c),
+ },
+};
+
+static struct reset_clk gcc_pcie_phy_reset = {
+ .reset_reg = GCC_PCIE_PHY_BCR,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_pcie_phy_reset",
+ .ops = &clk_ops_rst,
+ CLK_INIT(gcc_pcie_phy_reset.c),
+ },
+};
+
+static struct reset_clk gcc_pcie_phy_com_reset = {
+ .reset_reg = GCC_PCIE_PHY_COM_BCR,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_pcie_phy_com_reset",
+ .ops = &clk_ops_rst,
+ CLK_INIT(gcc_pcie_phy_com_reset.c),
+ },
+};
+
+static struct reset_clk gcc_pcie_phy_nocsr_com_phy_reset = {
+ .reset_reg = GCC_PCIE_PHY_NOCSR_COM_PHY_BCR,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_pcie_phy_nocsr_com_phy_reset",
+ .ops = &clk_ops_rst,
+ CLK_INIT(gcc_pcie_phy_nocsr_com_phy_reset.c),
+ },
+};
+
+static struct branch_clk gcc_pdm2_clk = {
+ .cbcr_reg = GCC_PDM2_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_pdm2_clk",
+ .parent = &pdm2_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_pdm2_clk.c),
+ },
+};
+
+static struct branch_clk gcc_pdm_ahb_clk = {
+ .cbcr_reg = GCC_PDM_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_pdm_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_pdm_ahb_clk.c),
+ },
+};
+
+static struct local_vote_clk gcc_prng_ahb_clk = {
+ .cbcr_reg = GCC_PRNG_AHB_CBCR,
+ .bcr_reg = GCC_PRNG_BCR,
+ .vote_reg = GCC_APCS_CLOCK_BRANCH_ENA_VOTE,
+ .en_mask = BIT(13),
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_prng_ahb_clk",
+ .ops = &clk_ops_vote,
+ CLK_INIT(gcc_prng_ahb_clk.c),
+ },
+};
+
+static struct branch_clk gcc_sdcc2_ahb_clk = {
+ .cbcr_reg = GCC_SDCC2_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_sdcc2_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_sdcc2_ahb_clk.c),
+ },
+};
+
+static struct branch_clk gcc_sdcc2_apps_clk = {
+ .cbcr_reg = GCC_SDCC2_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_sdcc2_apps_clk",
+ .parent = &sdcc2_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_sdcc2_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_sdcc4_ahb_clk = {
+ .cbcr_reg = GCC_SDCC4_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_sdcc4_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_sdcc4_ahb_clk.c),
+ },
+};
+
+static struct branch_clk gcc_sdcc4_apps_clk = {
+ .cbcr_reg = GCC_SDCC4_APPS_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_sdcc4_apps_clk",
+ .parent = &sdcc4_apps_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_sdcc4_apps_clk.c),
+ },
+};
+
+static struct branch_clk gcc_tsif_ahb_clk = {
+ .cbcr_reg = GCC_TSIF_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_tsif_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_tsif_ahb_clk.c),
+ },
+};
+
+static struct branch_clk gcc_tsif_ref_clk = {
+ .cbcr_reg = GCC_TSIF_REF_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_tsif_ref_clk",
+ .parent = &tsif_ref_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_tsif_ref_clk.c),
+ },
+};
+
+static struct branch_clk gcc_ufs_ahb_clk = {
+ .cbcr_reg = GCC_UFS_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_ufs_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_ufs_ahb_clk.c),
+ },
+};
+
+static struct branch_clk gcc_ufs_axi_clk = {
+ .cbcr_reg = GCC_UFS_AXI_CBCR,
+ .bcr_reg = GCC_UFS_BCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_ufs_axi_clk",
+ .parent = &ufs_axi_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_ufs_axi_clk.c),
+ },
+};
+
+static struct hw_ctl_clk gcc_ufs_axi_hw_ctl_clk = {
+ .cbcr_reg = GCC_UFS_AXI_CBCR,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_ufs_axi_hw_ctl_clk",
+ .parent = &gcc_ufs_axi_clk.c,
+ .ops = &clk_ops_branch_hw_ctl,
+ CLK_INIT(gcc_ufs_axi_hw_ctl_clk.c),
+ },
+};
+
+static struct branch_clk gcc_ufs_ice_core_clk = {
+ .cbcr_reg = GCC_UFS_ICE_CORE_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_ufs_ice_core_clk",
+ .parent = &ufs_ice_core_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_ufs_ice_core_clk.c),
+ },
+};
+
+static struct hw_ctl_clk gcc_ufs_ice_core_hw_ctl_clk = {
+ .cbcr_reg = GCC_UFS_ICE_CORE_CBCR,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_ufs_ice_core_hw_ctl_clk",
+ .parent = &gcc_ufs_ice_core_clk.c,
+ .ops = &clk_ops_branch_hw_ctl,
+ CLK_INIT(gcc_ufs_ice_core_hw_ctl_clk.c),
+ },
+};
+
+static struct branch_clk gcc_ufs_phy_aux_clk = {
+ .cbcr_reg = GCC_UFS_PHY_AUX_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_ufs_phy_aux_clk",
+ .parent = &ufs_phy_aux_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_ufs_phy_aux_clk.c),
+ },
+};
+
+static struct hw_ctl_clk gcc_ufs_phy_aux_hw_ctl_clk = {
+ .cbcr_reg = GCC_UFS_PHY_AUX_CBCR,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_ufs_phy_aux_hw_ctl_clk",
+ .parent = &gcc_ufs_phy_aux_clk.c,
+ .ops = &clk_ops_branch_hw_ctl,
+ CLK_INIT(gcc_ufs_phy_aux_hw_ctl_clk.c),
+ },
+};
+
+static struct gate_clk gcc_ufs_rx_symbol_0_clk = {
+ .en_reg = GCC_UFS_RX_SYMBOL_0_CBCR,
+ .en_mask = BIT(0),
+ .delay_us = 500,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_ufs_rx_symbol_0_clk",
+ .ops = &clk_ops_gate,
+ CLK_INIT(gcc_ufs_rx_symbol_0_clk.c),
+ },
+};
+
+static struct gate_clk gcc_ufs_rx_symbol_1_clk = {
+ .en_reg = GCC_UFS_RX_SYMBOL_1_CBCR,
+ .en_mask = BIT(0),
+ .delay_us = 500,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_ufs_rx_symbol_1_clk",
+ .ops = &clk_ops_gate,
+ CLK_INIT(gcc_ufs_rx_symbol_1_clk.c),
+ },
+};
+
+static struct gate_clk gcc_ufs_tx_symbol_0_clk = {
+ .en_reg = GCC_UFS_TX_SYMBOL_0_CBCR,
+ .en_mask = BIT(0),
+ .delay_us = 500,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_ufs_tx_symbol_0_clk",
+ .ops = &clk_ops_gate,
+ CLK_INIT(gcc_ufs_tx_symbol_0_clk.c),
+ },
+};
+
+static struct branch_clk gcc_ufs_unipro_core_clk = {
+ .cbcr_reg = GCC_UFS_UNIPRO_CORE_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_ufs_unipro_core_clk",
+ .parent = &ufs_unipro_core_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_ufs_unipro_core_clk.c),
+ },
+};
+
+static struct hw_ctl_clk gcc_ufs_unipro_core_hw_ctl_clk = {
+ .cbcr_reg = GCC_UFS_UNIPRO_CORE_CBCR,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_ufs_unipro_core_hw_ctl_clk",
+ .parent = &gcc_ufs_unipro_core_clk.c,
+ .ops = &clk_ops_branch_hw_ctl,
+ CLK_INIT(gcc_ufs_unipro_core_hw_ctl_clk.c),
+ },
+};
+
+static struct branch_clk gcc_usb30_master_clk = {
+ .cbcr_reg = GCC_USB30_MASTER_CBCR,
+ .bcr_reg = GCC_USB_30_BCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_usb30_master_clk",
+ .parent = &usb30_master_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_usb30_master_clk.c),
+ .depends = &gcc_cfg_noc_usb3_axi_clk.c,
+ },
+};
+
+static struct branch_clk gcc_usb30_mock_utmi_clk = {
+ .cbcr_reg = GCC_USB30_MOCK_UTMI_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_usb30_mock_utmi_clk",
+ .parent = &usb30_mock_utmi_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_usb30_mock_utmi_clk.c),
+ },
+};
+
+static struct branch_clk gcc_usb30_sleep_clk = {
+ .cbcr_reg = GCC_USB30_SLEEP_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_usb30_sleep_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_usb30_sleep_clk.c),
+ },
+};
+
+static struct branch_clk gcc_usb3_phy_aux_clk = {
+ .cbcr_reg = GCC_USB3_PHY_AUX_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_usb3_phy_aux_clk",
+ .parent = &usb3_phy_aux_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_usb3_phy_aux_clk.c),
+ },
+};
+
+static struct gate_clk gcc_usb3_phy_pipe_clk = {
+ .en_reg = GCC_USB3_PHY_PIPE_CBCR,
+ .en_mask = BIT(0),
+ .delay_us = 50,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_usb3_phy_pipe_clk",
+ .ops = &clk_ops_gate,
+ CLK_INIT(gcc_usb3_phy_pipe_clk.c),
+ },
+};
+
+static struct reset_clk gcc_qusb2phy_prim_reset = {
+ .reset_reg = GCC_QUSB2PHY_PRIM_BCR,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_qusb2phy_prim_reset",
+ .ops = &clk_ops_rst,
+ CLK_INIT(gcc_qusb2phy_prim_reset.c),
+ },
+};
+
+static struct reset_clk gcc_qusb2phy_sec_reset = {
+ .reset_reg = GCC_QUSB2PHY_SEC_BCR,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_qusb2phy_sec_reset",
+ .ops = &clk_ops_rst,
+ CLK_INIT(gcc_qusb2phy_sec_reset.c),
+ },
+};
+
+static struct branch_clk gcc_usb_phy_cfg_ahb2phy_clk = {
+ .cbcr_reg = GCC_USB_PHY_CFG_AHB2PHY_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_usb_phy_cfg_ahb2phy_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_usb_phy_cfg_ahb2phy_clk.c),
+ },
+};
+
+static struct branch_clk gcc_wcss_ahb_s0_clk = {
+ .cbcr_reg = GCC_WCSS_AHB_S0_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_wcss_ahb_s0_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_wcss_ahb_s0_clk.c),
+ },
+};
+
+static struct branch_clk gcc_wcss_axi_m_clk = {
+ .cbcr_reg = GCC_WCSS_AXI_M_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_wcss_axi_m_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_wcss_axi_m_clk.c),
+ },
+};
+
+static struct branch_clk gcc_wcss_ecahb_clk = {
+ .cbcr_reg = GCC_WCSS_ECAHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_wcss_ecahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_wcss_ecahb_clk.c),
+ },
+};
+
+static struct branch_clk gcc_wcss_shdreg_ahb_clk = {
+ .cbcr_reg = GCC_WCSS_SHDREG_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_wcss_shdreg_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_wcss_shdreg_ahb_clk.c),
+ },
+};
+
+static struct branch_clk gcc_mss_cfg_ahb_clk = {
+ .cbcr_reg = GCC_MSS_CFG_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_mss_cfg_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_mss_cfg_ahb_clk.c),
+ },
+};
+
+static struct branch_clk gcc_mss_q6_bimc_axi_clk = {
+ .cbcr_reg = GCC_MSS_Q6_BIMC_AXI_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_mss_q6_bimc_axi_clk",
+ .always_on = true,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_mss_q6_bimc_axi_clk.c),
+ },
+};
+
+static struct branch_clk gcc_mss_mnoc_bimc_axi_clk = {
+ .cbcr_reg = GCC_MSS_MNOC_BIMC_AXI_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_mss_mnoc_bimc_axi_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_mss_mnoc_bimc_axi_clk.c),
+ },
+};
+
+static struct branch_clk gcc_mss_snoc_axi_clk = {
+ .cbcr_reg = GCC_MSS_SNOC_AXI_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_mss_snoc_axi_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_mss_snoc_axi_clk.c),
+ },
+};
+
+static struct branch_clk gcc_dcc_ahb_clk = {
+ .cbcr_reg = GCC_DCC_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_dcc_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_dcc_ahb_clk.c),
+ },
+};
+
+static struct branch_clk hlos1_vote_lpass_core_smmu_clk = {
+ .cbcr_reg = GCC_HLOS1_VOTE_LPASS_CORE_SMMU_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .no_halt_check_on_disable = true,
+ .c = {
+ .dbg_name = "hlos1_vote_lpass_core_smmu_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(hlos1_vote_lpass_core_smmu_clk.c),
+ },
+};
+
+static struct branch_clk hlos1_vote_lpass_adsp_smmu_clk = {
+ .cbcr_reg = GCC_HLOS1_VOTE_LPASS_ADSP_SMMU_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .no_halt_check_on_disable = true,
+ .c = {
+ .dbg_name = "hlos1_vote_lpass_adsp_smmu_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(hlos1_vote_lpass_adsp_smmu_clk.c),
+ },
+};
+
+static struct branch_clk gcc_qspi_ahb_clk = {
+ .cbcr_reg = GCC_QSPI_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_qspi_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_qspi_ahb_clk.c),
+ },
+};
+
+static struct branch_clk gcc_qspi_ref_clk = {
+ .cbcr_reg = GCC_QSPI_REF_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gcc_qspi_ref_clk",
+ .parent = &qspi_ref_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gcc_qspi_ref_clk.c),
+ },
+};
+
+static struct mux_clk gcc_debug_mux;
+static struct clk_ops clk_ops_debug_mux;
+
+static struct measure_clk_data debug_mux_priv = {
+ .cxo = &cxo_clk_src.c,
+ .plltest_reg = PLLTEST_PAD_CFG,
+ .plltest_val = 0x51A00,
+ .xo_div4_cbcr = GCC_XO_DIV4_CBCR,
+ .ctl_reg = CLOCK_FRQ_MEASURE_CTL,
+ .status_reg = CLOCK_FRQ_MEASURE_STATUS,
+ .base = &virt_base,
+};
+
+static struct mux_clk gcc_debug_mux = {
+ .priv = &debug_mux_priv,
+ .ops = &mux_reg_ops,
+ .en_mask = BIT(16),
+ .mask = 0x3FF,
+ .base = &virt_dbgbase,
+ MUX_REC_SRC_LIST(
+ &gpu_gcc_debug_clk.c,
+ &gfx_gcc_debug_clk.c,
+ &debug_mmss_clk.c,
+ ),
+ MUX_SRC_LIST(
+ { &gpu_gcc_debug_clk.c, 0x013d },
+ { &gfx_gcc_debug_clk.c, 0x013d },
+ { &debug_mmss_clk.c, 0x0022 },
+ { &snoc_clk.c, 0x0000 },
+ { &cnoc_clk.c, 0x000e },
+ { &bimc_clk.c, 0x00a9 },
+ { &gcc_mmss_sys_noc_axi_clk.c, 0x001f },
+ { &gcc_mmss_noc_cfg_ahb_clk.c, 0x0020 },
+ { &gcc_usb30_master_clk.c, 0x003e },
+ { &gcc_usb30_sleep_clk.c, 0x003f },
+ { &gcc_usb30_mock_utmi_clk.c, 0x0040 },
+ { &gcc_usb3_phy_aux_clk.c, 0x0041 },
+ { &gcc_usb3_phy_pipe_clk.c, 0x0042 },
+ { &gcc_usb_phy_cfg_ahb2phy_clk.c, 0x0045 },
+ { &gcc_sdcc2_apps_clk.c, 0x0046 },
+ { &gcc_sdcc2_ahb_clk.c, 0x0047 },
+ { &gcc_sdcc4_apps_clk.c, 0x0048 },
+ { &gcc_sdcc4_ahb_clk.c, 0x0049 },
+ { &gcc_blsp1_ahb_clk.c, 0x004a },
+ { &gcc_blsp1_qup1_spi_apps_clk.c, 0x004c },
+ { &gcc_blsp1_qup1_i2c_apps_clk.c, 0x004d },
+ { &gcc_blsp1_uart1_apps_clk.c, 0x004e },
+ { &gcc_blsp1_qup2_spi_apps_clk.c, 0x0050 },
+ { &gcc_blsp1_qup2_i2c_apps_clk.c, 0x0051 },
+ { &gcc_blsp1_uart2_apps_clk.c, 0x0052 },
+ { &gcc_blsp1_qup3_spi_apps_clk.c, 0x0054 },
+ { &gcc_blsp1_qup3_i2c_apps_clk.c, 0x0055 },
+ { &gcc_blsp1_uart3_apps_clk.c, 0x0056 },
+ { &gcc_blsp1_qup4_spi_apps_clk.c, 0x0058 },
+ { &gcc_blsp1_qup4_i2c_apps_clk.c, 0x0059 },
+ { &gcc_blsp1_qup5_spi_apps_clk.c, 0x005a },
+ { &gcc_blsp1_qup5_i2c_apps_clk.c, 0x005b },
+ { &gcc_blsp1_qup6_spi_apps_clk.c, 0x005c },
+ { &gcc_blsp1_qup6_i2c_apps_clk.c, 0x005d },
+ { &gcc_blsp2_ahb_clk.c, 0x005e },
+ { &gcc_blsp2_qup1_spi_apps_clk.c, 0x0060 },
+ { &gcc_blsp2_qup1_i2c_apps_clk.c, 0x0061 },
+ { &gcc_blsp2_uart1_apps_clk.c, 0x0062 },
+ { &gcc_blsp2_qup2_spi_apps_clk.c, 0x0064 },
+ { &gcc_blsp2_qup2_i2c_apps_clk.c, 0x0065 },
+ { &gcc_blsp2_uart2_apps_clk.c, 0x0066 },
+ { &gcc_blsp2_qup3_spi_apps_clk.c, 0x0068 },
+ { &gcc_blsp2_qup3_i2c_apps_clk.c, 0x0069 },
+ { &gcc_blsp2_uart3_apps_clk.c, 0x006a },
+ { &gcc_blsp2_qup4_spi_apps_clk.c, 0x006c },
+ { &gcc_blsp2_qup4_i2c_apps_clk.c, 0x006d },
+ { &gcc_blsp2_qup5_spi_apps_clk.c, 0x006e },
+ { &gcc_blsp2_qup5_i2c_apps_clk.c, 0x006f },
+ { &gcc_blsp2_qup6_spi_apps_clk.c, 0x0070 },
+ { &gcc_blsp2_qup6_i2c_apps_clk.c, 0x0071 },
+ { &gcc_pdm_ahb_clk.c, 0x0072 },
+ { &gcc_pdm2_clk.c, 0x0074},
+ { &gcc_prng_ahb_clk.c, 0x0075 },
+ { &gcc_tsif_ahb_clk.c, 0x0076 },
+ { &gcc_tsif_ref_clk.c, 0x0077 },
+ { &gcc_boot_rom_ahb_clk.c, 0x007a },
+ { &ce1_clk.c, 0x0097 },
+ { &gcc_ce1_axi_m_clk.c, 0x0098 },
+ { &gcc_ce1_ahb_m_clk.c, 0x0099 },
+ { &gcc_bimc_hmss_axi_clk.c, 0x00bb },
+ { &gcc_bimc_gfx_clk.c, 0x00ac },
+ { &gcc_hmss_ahb_clk.c, 0x00ba },
+ { &gcc_hmss_rbcpr_clk.c, 0x00bc },
+ { &gcc_gp1_clk.c, 0x00df },
+ { &gcc_gp2_clk.c, 0x00e0 },
+ { &gcc_gp3_clk.c, 0x00e1 },
+ { &gcc_pcie_0_slv_axi_clk.c, 0x00e2 },
+ { &gcc_pcie_0_mstr_axi_clk.c, 0x00e3 },
+ { &gcc_pcie_0_cfg_ahb_clk.c, 0x00e4 },
+ { &gcc_pcie_0_aux_clk.c, 0x00e5 },
+ { &gcc_pcie_0_pipe_clk.c, 0x00e6 },
+ { &gcc_pcie_phy_aux_clk.c, 0x00e8 },
+ { &gcc_ufs_axi_clk.c, 0x00ea },
+ { &gcc_ufs_ahb_clk.c, 0x00eb },
+ { &gcc_ufs_tx_symbol_0_clk.c, 0x00ec },
+ { &gcc_ufs_rx_symbol_0_clk.c, 0x00ed },
+ { &gcc_ufs_rx_symbol_1_clk.c, 0x0162 },
+ { &gcc_ufs_unipro_core_clk.c, 0x00f0 },
+ { &gcc_ufs_ice_core_clk.c, 0x00f1 },
+ { &gcc_dcc_ahb_clk.c, 0x0119 },
+ { &ipa_clk.c, 0x011b },
+ { &gcc_mss_cfg_ahb_clk.c, 0x011f },
+ { &gcc_mss_q6_bimc_axi_clk.c, 0x0124 },
+ { &gcc_mss_mnoc_bimc_axi_clk.c, 0x0120 },
+ { &gcc_mss_snoc_axi_clk.c, 0x0123 },
+ { &gcc_gpu_cfg_ahb_clk.c, 0x013b },
+ { &gcc_gpu_bimc_gfx_src_clk.c, 0x013e },
+ { &gcc_gpu_bimc_gfx_clk.c, 0x013f },
+ { &gcc_qspi_ahb_clk.c, 0x0156 },
+ { &gcc_qspi_ref_clk.c, 0x0157 },
+ ),
+ .c = {
+ .dbg_name = "gcc_debug_mux",
+ .ops = &clk_ops_debug_mux,
+ .flags = CLKFLAG_NO_RATE_CACHE | CLKFLAG_MEASURE,
+ CLK_INIT(gcc_debug_mux.c),
+ },
+};
+
+static struct clk_lookup msm_clocks_rpm_cobalt[] = {
+ CLK_LIST(cxo_clk_src),
+ CLK_LIST(bimc_clk),
+ CLK_LIST(bimc_a_clk),
+ CLK_LIST(cnoc_clk),
+ CLK_LIST(cnoc_a_clk),
+ CLK_LIST(snoc_clk),
+ CLK_LIST(snoc_a_clk),
+ CLK_LIST(cnoc_periph_clk),
+ CLK_LIST(cnoc_periph_a_clk),
+ CLK_LIST(cnoc_periph_keepalive_a_clk),
+ CLK_LIST(bimc_msmbus_clk),
+ CLK_LIST(bimc_msmbus_a_clk),
+ CLK_LIST(ce1_clk),
+ CLK_LIST(ce1_a_clk),
+ CLK_LIST(cnoc_msmbus_clk),
+ CLK_LIST(cnoc_msmbus_a_clk),
+ CLK_LIST(cxo_clk_src_ao),
+ CLK_LIST(cxo_dwc3_clk),
+ CLK_LIST(cxo_lpm_clk),
+ CLK_LIST(cxo_otg_clk),
+ CLK_LIST(cxo_pil_lpass_clk),
+ CLK_LIST(cxo_pil_ssc_clk),
+ CLK_LIST(cxo_pil_spss_clk),
+ CLK_LIST(div_clk1),
+ CLK_LIST(div_clk1_ao),
+ CLK_LIST(div_clk2),
+ CLK_LIST(div_clk2_ao),
+ CLK_LIST(div_clk3),
+ CLK_LIST(div_clk3_ao),
+ CLK_LIST(ipa_clk),
+ CLK_LIST(ipa_a_clk),
+ CLK_LIST(ln_bb_clk1),
+ CLK_LIST(ln_bb_clk1_ao),
+ CLK_LIST(ln_bb_clk1_pin),
+ CLK_LIST(ln_bb_clk1_pin_ao),
+ CLK_LIST(ln_bb_clk2),
+ CLK_LIST(ln_bb_clk2_ao),
+ CLK_LIST(ln_bb_clk2_pin),
+ CLK_LIST(ln_bb_clk2_pin_ao),
+ CLK_LIST(ln_bb_clk3),
+ CLK_LIST(ln_bb_clk3_ao),
+ CLK_LIST(ln_bb_clk3_pin),
+ CLK_LIST(ln_bb_clk3_pin_ao),
+ CLK_LIST(mcd_ce1_clk),
+ CLK_LIST(measure_only_bimc_hmss_axi_clk),
+ CLK_LIST(mmssnoc_axi_clk),
+ CLK_LIST(mmssnoc_axi_a_clk),
+ CLK_LIST(aggre1_noc_clk),
+ CLK_LIST(aggre1_noc_a_clk),
+ CLK_LIST(aggre2_noc_clk),
+ CLK_LIST(aggre2_noc_a_clk),
+ CLK_LIST(qcedev_ce1_clk),
+ CLK_LIST(qcrypto_ce1_clk),
+ CLK_LIST(qdss_clk),
+ CLK_LIST(qdss_a_clk),
+ CLK_LIST(qseecom_ce1_clk),
+ CLK_LIST(rf_clk1),
+ CLK_LIST(rf_clk1_ao),
+ CLK_LIST(rf_clk1_pin),
+ CLK_LIST(rf_clk1_pin_ao),
+ CLK_LIST(rf_clk2),
+ CLK_LIST(rf_clk2_ao),
+ CLK_LIST(rf_clk2_pin),
+ CLK_LIST(rf_clk2_pin_ao),
+ CLK_LIST(rf_clk3),
+ CLK_LIST(rf_clk3_ao),
+ CLK_LIST(rf_clk3_pin),
+ CLK_LIST(rf_clk3_pin_ao),
+ CLK_LIST(scm_ce1_clk),
+ CLK_LIST(snoc_msmbus_clk),
+ CLK_LIST(snoc_msmbus_a_clk),
+ CLK_LIST(gcc_ce1_ahb_m_clk),
+ CLK_LIST(gcc_ce1_axi_m_clk),
+};
+
+static struct clk_lookup msm_clocks_gcc_cobalt[] = {
+ CLK_LIST(gpll0),
+ CLK_LIST(gpll0_ao),
+ CLK_LIST(gpll0_out_main),
+ CLK_LIST(gcc_mmss_gpll0_div_clk),
+ CLK_LIST(gpll4),
+ CLK_LIST(gpll4_out_main),
+ CLK_LIST(hmss_ahb_clk_src),
+ CLK_LIST(usb30_master_clk_src),
+ CLK_LIST(pcie_aux_clk_src),
+ CLK_LIST(ufs_axi_clk_src),
+ CLK_LIST(blsp1_qup1_i2c_apps_clk_src),
+ CLK_LIST(blsp1_qup1_spi_apps_clk_src),
+ CLK_LIST(blsp1_qup2_i2c_apps_clk_src),
+ CLK_LIST(blsp1_qup2_spi_apps_clk_src),
+ CLK_LIST(blsp1_qup3_i2c_apps_clk_src),
+ CLK_LIST(blsp1_qup3_spi_apps_clk_src),
+ CLK_LIST(blsp1_qup4_i2c_apps_clk_src),
+ CLK_LIST(blsp1_qup4_spi_apps_clk_src),
+ CLK_LIST(blsp1_qup5_i2c_apps_clk_src),
+ CLK_LIST(blsp1_qup5_spi_apps_clk_src),
+ CLK_LIST(blsp1_qup6_i2c_apps_clk_src),
+ CLK_LIST(blsp1_qup6_spi_apps_clk_src),
+ CLK_LIST(blsp1_uart1_apps_clk_src),
+ CLK_LIST(blsp1_uart2_apps_clk_src),
+ CLK_LIST(blsp1_uart3_apps_clk_src),
+ CLK_LIST(blsp2_qup1_i2c_apps_clk_src),
+ CLK_LIST(blsp2_qup1_spi_apps_clk_src),
+ CLK_LIST(blsp2_qup2_i2c_apps_clk_src),
+ CLK_LIST(blsp2_qup2_spi_apps_clk_src),
+ CLK_LIST(blsp2_qup3_i2c_apps_clk_src),
+ CLK_LIST(blsp2_qup3_spi_apps_clk_src),
+ CLK_LIST(blsp2_qup4_i2c_apps_clk_src),
+ CLK_LIST(blsp2_qup4_spi_apps_clk_src),
+ CLK_LIST(blsp2_qup5_i2c_apps_clk_src),
+ CLK_LIST(blsp2_qup5_spi_apps_clk_src),
+ CLK_LIST(blsp2_qup6_i2c_apps_clk_src),
+ CLK_LIST(blsp2_qup6_spi_apps_clk_src),
+ CLK_LIST(blsp2_uart1_apps_clk_src),
+ CLK_LIST(blsp2_uart2_apps_clk_src),
+ CLK_LIST(blsp2_uart3_apps_clk_src),
+ CLK_LIST(gp1_clk_src),
+ CLK_LIST(gp2_clk_src),
+ CLK_LIST(gp3_clk_src),
+ CLK_LIST(hmss_rbcpr_clk_src),
+ CLK_LIST(pdm2_clk_src),
+ CLK_LIST(sdcc2_apps_clk_src),
+ CLK_LIST(sdcc4_apps_clk_src),
+ CLK_LIST(tsif_ref_clk_src),
+ CLK_LIST(ufs_ice_core_clk_src),
+ CLK_LIST(ufs_phy_aux_clk_src),
+ CLK_LIST(ufs_unipro_core_clk_src),
+ CLK_LIST(usb30_mock_utmi_clk_src),
+ CLK_LIST(usb3_phy_aux_clk_src),
+ CLK_LIST(hmss_gpll0_clk_src),
+ CLK_LIST(qspi_ref_clk_src),
+ CLK_LIST(gcc_usb3_phy_reset),
+ CLK_LIST(gcc_usb3phy_phy_reset),
+ CLK_LIST(gcc_qusb2phy_prim_reset),
+ CLK_LIST(gcc_qusb2phy_sec_reset),
+ CLK_LIST(gpll0_out_msscc),
+ CLK_LIST(gcc_aggre1_ufs_axi_clk),
+ CLK_LIST(gcc_aggre1_usb3_axi_clk),
+ CLK_LIST(gcc_bimc_mss_q6_axi_clk),
+ CLK_LIST(gcc_blsp1_ahb_clk),
+ CLK_LIST(gcc_blsp1_qup1_i2c_apps_clk),
+ CLK_LIST(gcc_blsp1_qup1_spi_apps_clk),
+ CLK_LIST(gcc_blsp1_qup2_i2c_apps_clk),
+ CLK_LIST(gcc_blsp1_qup2_spi_apps_clk),
+ CLK_LIST(gcc_blsp1_qup3_i2c_apps_clk),
+ CLK_LIST(gcc_blsp1_qup3_spi_apps_clk),
+ CLK_LIST(gcc_blsp1_qup4_i2c_apps_clk),
+ CLK_LIST(gcc_blsp1_qup4_spi_apps_clk),
+ CLK_LIST(gcc_blsp1_qup5_i2c_apps_clk),
+ CLK_LIST(gcc_blsp1_qup5_spi_apps_clk),
+ CLK_LIST(gcc_blsp1_qup6_i2c_apps_clk),
+ CLK_LIST(gcc_blsp1_qup6_spi_apps_clk),
+ CLK_LIST(gcc_blsp1_uart1_apps_clk),
+ CLK_LIST(gcc_blsp1_uart2_apps_clk),
+ CLK_LIST(gcc_blsp1_uart3_apps_clk),
+ CLK_LIST(gcc_blsp2_ahb_clk),
+ CLK_LIST(gcc_blsp2_qup1_i2c_apps_clk),
+ CLK_LIST(gcc_blsp2_qup1_spi_apps_clk),
+ CLK_LIST(gcc_blsp2_qup2_i2c_apps_clk),
+ CLK_LIST(gcc_blsp2_qup2_spi_apps_clk),
+ CLK_LIST(gcc_blsp2_qup3_i2c_apps_clk),
+ CLK_LIST(gcc_blsp2_qup3_spi_apps_clk),
+ CLK_LIST(gcc_blsp2_qup4_i2c_apps_clk),
+ CLK_LIST(gcc_blsp2_qup4_spi_apps_clk),
+ CLK_LIST(gcc_blsp2_qup5_i2c_apps_clk),
+ CLK_LIST(gcc_blsp2_qup5_spi_apps_clk),
+ CLK_LIST(gcc_blsp2_qup6_i2c_apps_clk),
+ CLK_LIST(gcc_blsp2_qup6_spi_apps_clk),
+ CLK_LIST(gcc_blsp2_uart1_apps_clk),
+ CLK_LIST(gcc_blsp2_uart2_apps_clk),
+ CLK_LIST(gcc_blsp2_uart3_apps_clk),
+ CLK_LIST(gcc_cfg_noc_usb3_axi_clk),
+ CLK_LIST(gcc_bimc_gfx_clk),
+ CLK_LIST(gcc_gp1_clk),
+ CLK_LIST(gcc_gp2_clk),
+ CLK_LIST(gcc_gp3_clk),
+ CLK_LIST(gcc_gpu_bimc_gfx_clk),
+ CLK_LIST(gcc_gpu_bimc_gfx_src_clk),
+ CLK_LIST(gcc_gpu_cfg_ahb_clk),
+ CLK_LIST(gcc_gpu_snoc_dvm_gfx_clk),
+ CLK_LIST(gcc_gpu_iref_clk),
+ CLK_LIST(gcc_bimc_hmss_axi_clk),
+ CLK_LIST(gcc_hmss_ahb_clk),
+ CLK_LIST(gcc_hmss_dvm_bus_clk),
+ CLK_LIST(gcc_hmss_rbcpr_clk),
+ CLK_LIST(gcc_mmss_noc_cfg_ahb_clk),
+ CLK_LIST(gcc_mmss_sys_noc_axi_clk),
+ CLK_LIST(gcc_pcie_0_aux_clk),
+ CLK_LIST(gcc_pcie_0_cfg_ahb_clk),
+ CLK_LIST(gcc_pcie_0_mstr_axi_clk),
+ CLK_LIST(gcc_pcie_0_pipe_clk),
+ CLK_LIST(gcc_pcie_0_slv_axi_clk),
+ CLK_LIST(gcc_pcie_phy_aux_clk),
+ CLK_LIST(gcc_pcie_phy_reset),
+ CLK_LIST(gcc_pcie_phy_com_reset),
+ CLK_LIST(gcc_pcie_phy_nocsr_com_phy_reset),
+ CLK_LIST(gcc_pdm2_clk),
+ CLK_LIST(gcc_pdm_ahb_clk),
+ CLK_LIST(gcc_sdcc2_ahb_clk),
+ CLK_LIST(gcc_sdcc2_apps_clk),
+ CLK_LIST(gcc_sdcc4_ahb_clk),
+ CLK_LIST(gcc_sdcc4_apps_clk),
+ CLK_LIST(gcc_tsif_ahb_clk),
+ CLK_LIST(gcc_tsif_ref_clk),
+ CLK_LIST(gcc_ufs_ahb_clk),
+ CLK_LIST(gcc_ufs_axi_clk),
+ CLK_LIST(gcc_ufs_axi_hw_ctl_clk),
+ CLK_LIST(gcc_ufs_ice_core_clk),
+ CLK_LIST(gcc_ufs_ice_core_hw_ctl_clk),
+ CLK_LIST(gcc_ufs_phy_aux_clk),
+ CLK_LIST(gcc_ufs_phy_aux_hw_ctl_clk),
+ CLK_LIST(gcc_ufs_rx_symbol_0_clk),
+ CLK_LIST(gcc_ufs_rx_symbol_1_clk),
+ CLK_LIST(gcc_ufs_tx_symbol_0_clk),
+ CLK_LIST(gcc_ufs_unipro_core_clk),
+ CLK_LIST(gcc_ufs_unipro_core_hw_ctl_clk),
+ CLK_LIST(gcc_usb30_master_clk),
+ CLK_LIST(gcc_usb30_mock_utmi_clk),
+ CLK_LIST(gcc_usb30_sleep_clk),
+ CLK_LIST(gcc_usb3_phy_aux_clk),
+ CLK_LIST(gcc_usb3_phy_pipe_clk),
+ CLK_LIST(gcc_usb_phy_cfg_ahb2phy_clk),
+ CLK_LIST(gcc_prng_ahb_clk),
+ CLK_LIST(gcc_boot_rom_ahb_clk),
+ CLK_LIST(gcc_wcss_ahb_s0_clk),
+ CLK_LIST(gcc_wcss_axi_m_clk),
+ CLK_LIST(gcc_wcss_ecahb_clk),
+ CLK_LIST(gcc_wcss_shdreg_ahb_clk),
+ CLK_LIST(gcc_mss_cfg_ahb_clk),
+ CLK_LIST(gcc_mss_q6_bimc_axi_clk),
+ CLK_LIST(gcc_mss_mnoc_bimc_axi_clk),
+ CLK_LIST(gcc_mss_snoc_axi_clk),
+ CLK_LIST(gcc_hdmi_clkref_clk),
+ CLK_LIST(gcc_pcie_clkref_clk),
+ CLK_LIST(gcc_rx1_usb2_clkref_clk),
+ CLK_LIST(gcc_ufs_clkref_clk),
+ CLK_LIST(gcc_usb3_clkref_clk),
+ CLK_LIST(gcc_dcc_ahb_clk),
+ CLK_LIST(hlos1_vote_lpass_core_smmu_clk),
+ CLK_LIST(hlos1_vote_lpass_adsp_smmu_clk),
+ CLK_LIST(gcc_qspi_ahb_clk),
+ CLK_LIST(gcc_qspi_ref_clk),
+};
+
+static void msm_gcc_cobalt_v1_fixup(void)
+{
+ gcc_ufs_rx_symbol_1_clk.c.ops = &clk_ops_dummy;
+ qspi_ref_clk_src.c.ops = &clk_ops_dummy;
+ gcc_qspi_ref_clk.c.ops = &clk_ops_dummy;
+ gcc_qspi_ahb_clk.c.ops = &clk_ops_dummy;
+}
+
+static void msm_gcc_cobalt_v2_fixup(void)
+{
+ qspi_ref_clk_src.c.ops = &clk_ops_dummy;
+ gcc_qspi_ref_clk.c.ops = &clk_ops_dummy;
+ gcc_qspi_ahb_clk.c.ops = &clk_ops_dummy;
+}
+
+static int msm_gcc_cobalt_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ u32 regval;
+ int ret;
+ bool is_v1 = 0, is_v2 = 0;
+
+ ret = vote_bimc(&bimc_clk, INT_MAX);
+ if (ret < 0)
+ return ret;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cc_base");
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get CC base\n");
+ return -EINVAL;
+ }
+
+ virt_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!virt_base) {
+ dev_err(&pdev->dev, "Failed to map in CC registers\n");
+ return -ENOMEM;
+ }
+
+ /* Set the HMSS_AHB_CLK_ENA bit to enable the hmss_ahb_clk */
+ regval = readl_relaxed(virt_base + GCC_APCS_CLOCK_BRANCH_ENA_VOTE);
+ regval |= BIT(21);
+ writel_relaxed(regval, virt_base + GCC_APCS_CLOCK_BRANCH_ENA_VOTE);
+
+ /*
+ * Set the HMSS_AHB_CLK_SLEEP_ENA bit to allow the hmss_ahb_clk to be
+ * turned off by hardware during certain apps low power modes.
+ */
+ regval = readl_relaxed(virt_base + GCC_APCS_CLOCK_SLEEP_ENA_VOTE);
+ regval |= BIT(21);
+ writel_relaxed(regval, virt_base + GCC_APCS_CLOCK_SLEEP_ENA_VOTE);
+
+ vdd_dig.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_dig");
+ if (IS_ERR(vdd_dig.regulator[0])) {
+ if (!(PTR_ERR(vdd_dig.regulator[0]) == -EPROBE_DEFER))
+ dev_err(&pdev->dev,
+ "Unable to get vdd_dig regulator\n");
+ return PTR_ERR(vdd_dig.regulator[0]);
+ }
+
+ bimc_clk.c.parent = &cxo_clk_src.c;
+ ret = of_msm_clock_register(pdev->dev.of_node, msm_clocks_rpm_cobalt,
+ ARRAY_SIZE(msm_clocks_rpm_cobalt));
+ if (ret)
+ return ret;
+
+ ret = enable_rpm_scaling();
+ if (ret < 0)
+ return ret;
+
+ is_v1 = of_device_is_compatible(pdev->dev.of_node, "qcom,gcc-cobalt");
+ if (is_v1)
+ msm_gcc_cobalt_v1_fixup();
+
+ is_v2 = of_device_is_compatible(pdev->dev.of_node,
+ "qcom,gcc-cobalt-v2");
+ if (is_v2)
+ msm_gcc_cobalt_v2_fixup();
+
+ ret = of_msm_clock_register(pdev->dev.of_node, msm_clocks_gcc_cobalt,
+ ARRAY_SIZE(msm_clocks_gcc_cobalt));
+ if (ret)
+ return ret;
+
+ /* Hold an active set vote for the cnoc_periph resource */
+ clk_set_rate(&cnoc_periph_keepalive_a_clk.c, 19200000);
+ clk_prepare_enable(&cnoc_periph_keepalive_a_clk.c);
+
+ /* This clock is used for all MMSSCC register access */
+ clk_prepare_enable(&gcc_mmss_noc_cfg_ahb_clk.c);
+
+ /* This clock is used for all GPUCC register access */
+ clk_prepare_enable(&gcc_gpu_cfg_ahb_clk.c);
+
+ /* Keep an active vote on CXO in case no other driver votes for it */
+ clk_prepare_enable(&cxo_clk_src_ao.c);
+
+ clk_set_flags(&gcc_gpu_bimc_gfx_clk.c, CLKFLAG_RETAIN_MEM);
+
+ dev_info(&pdev->dev, "Registered GCC clocks\n");
+ return 0;
+}
+
+static struct of_device_id msm_clock_gcc_match_table[] = {
+ { .compatible = "qcom,gcc-cobalt" },
+ { .compatible = "qcom,gcc-cobalt-v2" },
+ { .compatible = "qcom,gcc-hamster" },
+ {}
+};
+
+static struct platform_driver msm_clock_gcc_driver = {
+ .probe = msm_gcc_cobalt_probe,
+ .driver = {
+ .name = "qcom,gcc-cobalt",
+ .of_match_table = msm_clock_gcc_match_table,
+ .owner = THIS_MODULE,
+ },
+};
+
+int __init msm_gcc_cobalt_init(void)
+{
+ return platform_driver_register(&msm_clock_gcc_driver);
+}
+arch_initcall(msm_gcc_cobalt_init);
+
+/* ======== Clock Debug Controller ======== */
+static struct clk_lookup msm_clocks_measure_cobalt[] = {
+ CLK_LIST(gpu_gcc_debug_clk),
+ CLK_LIST(gfx_gcc_debug_clk),
+ CLK_LIST(debug_mmss_clk),
+ CLK_LOOKUP_OF("measure", gcc_debug_mux, "debug"),
+};
+
+static struct of_device_id msm_clock_debug_match_table[] = {
+ { .compatible = "qcom,cc-debug-cobalt" },
+ {}
+};
+
+static int msm_clock_debug_cobalt_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ int ret;
+
+ clk_ops_debug_mux = clk_ops_gen_mux;
+ clk_ops_debug_mux.get_rate = measure_get_rate;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cc_base");
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get CC base\n");
+ return -EINVAL;
+ }
+ virt_dbgbase = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!virt_dbgbase) {
+ dev_err(&pdev->dev, "Failed to map in CC registers\n");
+ return -ENOMEM;
+ }
+
+ gpu_gcc_debug_clk.dev = &pdev->dev;
+ gpu_gcc_debug_clk.clk_id = "debug_gpu_clk";
+
+ gfx_gcc_debug_clk.dev = &pdev->dev;
+ gfx_gcc_debug_clk.clk_id = "debug_gfx_clk";
+
+ debug_mmss_clk.dev = &pdev->dev;
+ debug_mmss_clk.clk_id = "debug_mmss_clk";
+
+ ret = of_msm_clock_register(pdev->dev.of_node,
+ msm_clocks_measure_cobalt,
+ ARRAY_SIZE(msm_clocks_measure_cobalt));
+ if (ret)
+ return ret;
+
+ dev_info(&pdev->dev, "Registered debug mux\n");
+ return ret;
+}
+
+static struct platform_driver msm_clock_debug_driver = {
+ .probe = msm_clock_debug_cobalt_probe,
+ .driver = {
+ .name = "qcom,cc-debug-cobalt",
+ .of_match_table = msm_clock_debug_match_table,
+ .owner = THIS_MODULE,
+ },
+};
+
+int __init msm_clock_debug_cobalt_init(void)
+{
+ return platform_driver_register(&msm_clock_debug_driver);
+}
+late_initcall(msm_clock_debug_cobalt_init);
diff --git a/drivers/clk/msm/clock-generic.c b/drivers/clk/msm/clock-generic.c
new file mode 100644
index 000000000000..eeb940e434cf
--- /dev/null
+++ b/drivers/clk/msm/clock-generic.c
@@ -0,0 +1,917 @@
+/*
+ * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/clk/msm-clk-provider.h>
+#include <linux/clk/msm-clock-generic.h>
+#include <soc/qcom/msm-clock-controller.h>
+
+/* ==================== Mux clock ==================== */
+
+static int mux_parent_to_src_sel(struct mux_clk *mux, struct clk *p)
+{
+ return parent_to_src_sel(mux->parents, mux->num_parents, p);
+}
+
+static int mux_set_parent(struct clk *c, struct clk *p)
+{
+ struct mux_clk *mux = to_mux_clk(c);
+ int sel = mux_parent_to_src_sel(mux, p);
+ struct clk *old_parent;
+ int rc = 0, i;
+ unsigned long flags;
+
+ if (sel < 0 && mux->rec_parents) {
+ for (i = 0; i < mux->num_rec_parents; i++) {
+ rc = clk_set_parent(mux->rec_parents[i], p);
+ if (!rc) {
+ /*
+ * This is necessary to ensure prepare/enable
+ * counts get propagated correctly.
+ */
+ p = mux->rec_parents[i];
+ sel = mux_parent_to_src_sel(mux, p);
+ break;
+ }
+ }
+ }
+
+ if (sel < 0)
+ return sel;
+
+ rc = __clk_pre_reparent(c, p, &flags);
+ if (rc)
+ goto out;
+
+ rc = mux->ops->set_mux_sel(mux, sel);
+ if (rc)
+ goto set_fail;
+
+ old_parent = c->parent;
+ c->parent = p;
+ c->rate = clk_get_rate(p);
+ __clk_post_reparent(c, old_parent, &flags);
+
+ return 0;
+
+set_fail:
+ __clk_post_reparent(c, p, &flags);
+out:
+ return rc;
+}
+
+static long mux_round_rate(struct clk *c, unsigned long rate)
+{
+ struct mux_clk *mux = to_mux_clk(c);
+ int i;
+ unsigned long prate, rrate = 0;
+
+ for (i = 0; i < mux->num_parents; i++) {
+ prate = clk_round_rate(mux->parents[i].src, rate);
+ if (is_better_rate(rate, rrate, prate))
+ rrate = prate;
+ }
+ if (!rrate)
+ return -EINVAL;
+
+ return rrate;
+}
+
+static int mux_set_rate(struct clk *c, unsigned long rate)
+{
+ struct mux_clk *mux = to_mux_clk(c);
+ struct clk *new_parent = NULL;
+ int rc = 0, i;
+ unsigned long new_par_curr_rate;
+ unsigned long flags;
+
+ /*
+ * Check if one of the possible parents is already at the requested
+ * rate.
+ */
+ for (i = 0; i < mux->num_parents && mux->try_get_rate; i++) {
+ struct clk *p = mux->parents[i].src;
+ if (p->rate == rate && clk_round_rate(p, rate) == rate) {
+ new_parent = mux->parents[i].src;
+ break;
+ }
+ }
+
+ for (i = 0; i < mux->num_parents && !(!i && new_parent); i++) {
+ if (clk_round_rate(mux->parents[i].src, rate) == rate) {
+ new_parent = mux->parents[i].src;
+ if (!mux->try_new_parent)
+ break;
+ if (mux->try_new_parent && new_parent != c->parent)
+ break;
+ }
+ }
+
+ if (new_parent == NULL)
+ return -EINVAL;
+
+ /*
+ * Switch to safe parent since the old and new parent might be the
+ * same and the parent might temporarily turn off while switching
+ * rates. If the mux can switch between distinct sources safely
+ * (indicated by try_new_parent), and the new source is not the current
+ * parent, do not switch to the safe parent.
+ */
+ if (mux->safe_sel >= 0 &&
+ !(mux->try_new_parent && (new_parent != c->parent))) {
+ /*
+ * The safe parent might be a clock with multiple sources;
+ * to select the "safe" source, set a safe frequency.
+ */
+ if (mux->safe_freq) {
+ rc = clk_set_rate(mux->safe_parent, mux->safe_freq);
+ if (rc) {
+ pr_err("Failed to set safe rate on %s\n",
+ clk_name(mux->safe_parent));
+ return rc;
+ }
+ }
+
+ /*
+ * Some mux implementations might switch to/from a low power
+ * parent as part of their disable/enable ops. Grab the
+ * enable lock to avoid racing with these implementations.
+ */
+ spin_lock_irqsave(&c->lock, flags);
+ rc = mux->ops->set_mux_sel(mux, mux->safe_sel);
+ spin_unlock_irqrestore(&c->lock, flags);
+ if (rc)
+ return rc;
+
+ }
+
+ new_par_curr_rate = clk_get_rate(new_parent);
+ rc = clk_set_rate(new_parent, rate);
+ if (rc)
+ goto set_rate_fail;
+
+ rc = mux_set_parent(c, new_parent);
+ if (rc)
+ goto set_par_fail;
+
+ return 0;
+
+set_par_fail:
+ clk_set_rate(new_parent, new_par_curr_rate);
+set_rate_fail:
+ WARN(mux->ops->set_mux_sel(mux,
+ mux_parent_to_src_sel(mux, c->parent)),
+ "Set rate failed for %s. Also in bad state!\n", c->dbg_name);
+ return rc;
+}
+
+static int mux_enable(struct clk *c)
+{
+ struct mux_clk *mux = to_mux_clk(c);
+ if (mux->ops->enable)
+ return mux->ops->enable(mux);
+ return 0;
+}
+
+static void mux_disable(struct clk *c)
+{
+ struct mux_clk *mux = to_mux_clk(c);
+ if (mux->ops->disable)
+ return mux->ops->disable(mux);
+}
+
+static struct clk *mux_get_parent(struct clk *c)
+{
+ struct mux_clk *mux = to_mux_clk(c);
+ int sel = mux->ops->get_mux_sel(mux);
+ int i;
+
+ for (i = 0; i < mux->num_parents; i++) {
+ if (mux->parents[i].sel == sel)
+ return mux->parents[i].src;
+ }
+
+ /* Unfamiliar parent. */
+ return NULL;
+}
+
+static enum handoff mux_handoff(struct clk *c)
+{
+ struct mux_clk *mux = to_mux_clk(c);
+
+ c->rate = clk_get_rate(c->parent);
+ mux->safe_sel = mux_parent_to_src_sel(mux, mux->safe_parent);
+
+ if (mux->en_mask && mux->ops && mux->ops->is_enabled)
+ return mux->ops->is_enabled(mux)
+ ? HANDOFF_ENABLED_CLK
+ : HANDOFF_DISABLED_CLK;
+
+ /*
+ * If this function returns 'enabled' even when the clock downstream
+ * of this clock is disabled, then handoff code will unnecessarily
+ * enable the current parent of this clock. If this function always
+ * returns 'disabled' and a clock downstream is on, the clock handoff
+ * code will bump up the ref count for this clock and its current
+ * parent as necessary. So, clocks without an actual HW gate can
+ * always return disabled.
+ */
+ return HANDOFF_DISABLED_CLK;
+}
+
+static void __iomem *mux_clk_list_registers(struct clk *c, int n,
+ struct clk_register_data **regs, u32 *size)
+{
+ struct mux_clk *mux = to_mux_clk(c);
+
+ if (mux->ops && mux->ops->list_registers)
+ return mux->ops->list_registers(mux, n, regs, size);
+
+ return ERR_PTR(-EINVAL);
+}
+
+struct clk_ops clk_ops_gen_mux = {
+ .enable = mux_enable,
+ .disable = mux_disable,
+ .set_parent = mux_set_parent,
+ .round_rate = mux_round_rate,
+ .set_rate = mux_set_rate,
+ .handoff = mux_handoff,
+ .get_parent = mux_get_parent,
+ .list_registers = mux_clk_list_registers,
+};
+
+/* ==================== Divider clock ==================== */
+
+static long __div_round_rate(struct div_data *data, unsigned long rate,
+ struct clk *parent, unsigned int *best_div, unsigned long *best_prate)
+{
+ unsigned int div, min_div, max_div, _best_div = 1;
+ unsigned long prate, _best_prate = 0, rrate = 0, req_prate, actual_rate;
+ unsigned int numer;
+
+ rate = max(rate, 1UL);
+
+ min_div = max(data->min_div, 1U);
+ max_div = min(data->max_div, (unsigned int) (ULONG_MAX));
+
+ /*
+ * div values are doubled for half dividers.
+ * Adjust for that by picking a numer of 2.
+ */
+ numer = data->is_half_divider ? 2 : 1;
+
+ for (div = min_div; div <= max_div; div++) {
+ if (data->skip_odd_div && (div & 1))
+ if (!(data->allow_div_one && (div == 1)))
+ continue;
+ if (data->skip_even_div && !(div & 1))
+ continue;
+ req_prate = mult_frac(rate, div, numer);
+ prate = clk_round_rate(parent, req_prate);
+ if (IS_ERR_VALUE(prate))
+ break;
+
+ actual_rate = mult_frac(prate, numer, div);
+ if (is_better_rate(rate, rrate, actual_rate)) {
+ rrate = actual_rate;
+ _best_div = div;
+ _best_prate = prate;
+ }
+
+ /*
+ * Trying higher dividers is only going to ask the parent for
+ * a higher rate. If it can't even output a rate higher than
+ * the one we request for this divider, the parent is not
+ * going to be able to output an even higher rate required
+ * for a higher divider. So, stop trying higher dividers.
+ */
+ if (actual_rate < rate)
+ break;
+
+ if (rrate <= rate + data->rate_margin)
+ break;
+ }
+
+ if (!rrate)
+ return -EINVAL;
+ if (best_div)
+ *best_div = _best_div;
+ if (best_prate)
+ *best_prate = _best_prate;
+
+ return rrate;
+}
+
+static long div_round_rate(struct clk *c, unsigned long rate)
+{
+ struct div_clk *d = to_div_clk(c);
+
+ return __div_round_rate(&d->data, rate, c->parent, NULL, NULL);
+}
+
+static int _find_safe_div(struct clk *c, unsigned long rate)
+{
+ struct div_clk *d = to_div_clk(c);
+ struct div_data *data = &d->data;
+ unsigned long fast = max(rate, c->rate);
+ unsigned int numer = data->is_half_divider ? 2 : 1;
+ int i, safe_div = 0;
+
+ if (!d->safe_freq)
+ return 0;
+
+ /* Find the max safe freq that is lesser than fast */
+ for (i = data->max_div; i >= data->min_div; i--)
+ if (mult_frac(d->safe_freq, numer, i) <= fast)
+ safe_div = i;
+
+ return safe_div ?: -EINVAL;
+}
+
+static int div_set_rate(struct clk *c, unsigned long rate)
+{
+ struct div_clk *d = to_div_clk(c);
+ int safe_div, div, rc = 0;
+ long rrate, old_prate, new_prate;
+ struct div_data *data = &d->data;
+
+ rrate = __div_round_rate(data, rate, c->parent, &div, &new_prate);
+ if (rrate < rate || rrate > rate + data->rate_margin)
+ return -EINVAL;
+
+ /*
+ * For fixed divider clock we don't want to return an error if the
+ * requested rate matches the achievable rate. So, don't check for
+ * !d->ops and return an error. __div_round_rate() ensures div ==
+ * d->div if !d->ops.
+ */
+
+ safe_div = _find_safe_div(c, rate);
+ if (d->safe_freq && safe_div < 0) {
+ pr_err("No safe div on %s for transitioning from %lu to %lu\n",
+ c->dbg_name, c->rate, rate);
+ return -EINVAL;
+ }
+
+ safe_div = max(safe_div, div);
+
+ if (safe_div > data->div) {
+ rc = d->ops->set_div(d, safe_div);
+ if (rc) {
+ pr_err("Failed to set div %d on %s\n", safe_div,
+ c->dbg_name);
+ return rc;
+ }
+ }
+
+ old_prate = clk_get_rate(c->parent);
+ rc = clk_set_rate(c->parent, new_prate);
+ if (rc)
+ goto set_rate_fail;
+
+ if (div < data->div)
+ rc = d->ops->set_div(d, div);
+ else if (div < safe_div)
+ rc = d->ops->set_div(d, div);
+ if (rc)
+ goto div_dec_fail;
+
+ data->div = div;
+
+ return 0;
+
+div_dec_fail:
+ WARN(clk_set_rate(c->parent, old_prate),
+ "Set rate failed for %s. Also in bad state!\n", c->dbg_name);
+set_rate_fail:
+ if (safe_div > data->div)
+ WARN(d->ops->set_div(d, data->div),
+ "Set rate failed for %s. Also in bad state!\n",
+ c->dbg_name);
+ return rc;
+}
+
+static int div_enable(struct clk *c)
+{
+ struct div_clk *d = to_div_clk(c);
+ if (d->ops && d->ops->enable)
+ return d->ops->enable(d);
+ return 0;
+}
+
+static void div_disable(struct clk *c)
+{
+ struct div_clk *d = to_div_clk(c);
+ if (d->ops && d->ops->disable)
+ return d->ops->disable(d);
+}
+
+static enum handoff div_handoff(struct clk *c)
+{
+ struct div_clk *d = to_div_clk(c);
+ unsigned int div = d->data.div;
+
+ if (d->ops && d->ops->get_div)
+ div = max(d->ops->get_div(d), 1);
+ div = max(div, 1U);
+ c->rate = clk_get_rate(c->parent) / div;
+
+ if (!d->ops || !d->ops->set_div)
+ d->data.min_div = d->data.max_div = div;
+ d->data.div = div;
+
+ if (d->en_mask && d->ops && d->ops->is_enabled)
+ return d->ops->is_enabled(d)
+ ? HANDOFF_ENABLED_CLK
+ : HANDOFF_DISABLED_CLK;
+
+ /*
+ * If this function returns 'enabled' even when the clock downstream
+ * of this clock is disabled, then handoff code will unnecessarily
+ * enable the current parent of this clock. If this function always
+ * returns 'disabled' and a clock downstream is on, the clock handoff
+ * code will bump up the ref count for this clock and its current
+ * parent as necessary. So, clocks without an actual HW gate can
+ * always return disabled.
+ */
+ return HANDOFF_DISABLED_CLK;
+}
+
+static void __iomem *div_clk_list_registers(struct clk *c, int n,
+ struct clk_register_data **regs, u32 *size)
+{
+ struct div_clk *d = to_div_clk(c);
+
+ if (d->ops && d->ops->list_registers)
+ return d->ops->list_registers(d, n, regs, size);
+
+ return ERR_PTR(-EINVAL);
+}
+
+struct clk_ops clk_ops_div = {
+ .enable = div_enable,
+ .disable = div_disable,
+ .round_rate = div_round_rate,
+ .set_rate = div_set_rate,
+ .handoff = div_handoff,
+ .list_registers = div_clk_list_registers,
+};
+
+static long __slave_div_round_rate(struct clk *c, unsigned long rate,
+ int *best_div)
+{
+ struct div_clk *d = to_div_clk(c);
+ unsigned int div, min_div, max_div;
+ long p_rate;
+
+ rate = max(rate, 1UL);
+
+ min_div = d->data.min_div;
+ max_div = d->data.max_div;
+
+ p_rate = clk_get_rate(c->parent);
+ div = DIV_ROUND_CLOSEST(p_rate, rate);
+ div = max(div, min_div);
+ div = min(div, max_div);
+ if (best_div)
+ *best_div = div;
+
+ return p_rate / div;
+}
+
+static long slave_div_round_rate(struct clk *c, unsigned long rate)
+{
+ return __slave_div_round_rate(c, rate, NULL);
+}
+
+static int slave_div_set_rate(struct clk *c, unsigned long rate)
+{
+ struct div_clk *d = to_div_clk(c);
+ int div, rc = 0;
+ long rrate;
+
+ rrate = __slave_div_round_rate(c, rate, &div);
+ if (rrate != rate)
+ return -EINVAL;
+
+ if (div == d->data.div)
+ return 0;
+
+ /*
+ * For fixed divider clock we don't want to return an error if the
+ * requested rate matches the achievable rate. So, don't check for
+ * !d->ops and return an error. __slave_div_round_rate() ensures
+ * div == d->data.div if !d->ops.
+ */
+ rc = d->ops->set_div(d, div);
+ if (rc)
+ return rc;
+
+ d->data.div = div;
+
+ return 0;
+}
+
+static unsigned long slave_div_get_rate(struct clk *c)
+{
+ struct div_clk *d = to_div_clk(c);
+ if (!d->data.div)
+ return 0;
+ return clk_get_rate(c->parent) / d->data.div;
+}
+
+struct clk_ops clk_ops_slave_div = {
+ .enable = div_enable,
+ .disable = div_disable,
+ .round_rate = slave_div_round_rate,
+ .set_rate = slave_div_set_rate,
+ .get_rate = slave_div_get_rate,
+ .handoff = div_handoff,
+ .list_registers = div_clk_list_registers,
+};
+
+
+/**
+ * External clock
+ * Some clock controllers have input clock signal that come from outside the
+ * clock controller. That input clock signal might then be used as a source for
+ * several clocks inside the clock controller. This external clock
+ * implementation models this input clock signal by just passing on the requests
+ * to the clock's parent, the original external clock source. The driver for the
+ * clock controller should clk_get() the original external clock in the probe
+ * function and set is as a parent to this external clock..
+ */
+
+long parent_round_rate(struct clk *c, unsigned long rate)
+{
+ return clk_round_rate(c->parent, rate);
+}
+
+int parent_set_rate(struct clk *c, unsigned long rate)
+{
+ return clk_set_rate(c->parent, rate);
+}
+
+unsigned long parent_get_rate(struct clk *c)
+{
+ return clk_get_rate(c->parent);
+}
+
+static int ext_set_parent(struct clk *c, struct clk *p)
+{
+ return clk_set_parent(c->parent, p);
+}
+
+static struct clk *ext_get_parent(struct clk *c)
+{
+ struct ext_clk *ext = to_ext_clk(c);
+
+ if (!IS_ERR_OR_NULL(c->parent))
+ return c->parent;
+ return clk_get(ext->dev, ext->clk_id);
+}
+
+static enum handoff ext_handoff(struct clk *c)
+{
+ c->rate = clk_get_rate(c->parent);
+ /* Similar reasoning applied in div_handoff, see comment there. */
+ return HANDOFF_DISABLED_CLK;
+}
+
+struct clk_ops clk_ops_ext = {
+ .handoff = ext_handoff,
+ .round_rate = parent_round_rate,
+ .set_rate = parent_set_rate,
+ .get_rate = parent_get_rate,
+ .set_parent = ext_set_parent,
+ .get_parent = ext_get_parent,
+};
+
+static void *ext_clk_dt_parser(struct device *dev, struct device_node *np)
+{
+ struct ext_clk *ext;
+ const char *str;
+ int rc;
+
+ ext = devm_kzalloc(dev, sizeof(*ext), GFP_KERNEL);
+ if (!ext) {
+ dev_err(dev, "memory allocation failure\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ext->dev = dev;
+ rc = of_property_read_string(np, "qcom,clock-names", &str);
+ if (!rc)
+ ext->clk_id = (void *)str;
+
+ ext->c.ops = &clk_ops_ext;
+ return msmclk_generic_clk_init(dev, np, &ext->c);
+}
+MSMCLK_PARSER(ext_clk_dt_parser, "qcom,ext-clk", 0);
+
+/* ==================== Mux_div clock ==================== */
+
+static int mux_div_clk_enable(struct clk *c)
+{
+ struct mux_div_clk *md = to_mux_div_clk(c);
+
+ if (md->ops->enable)
+ return md->ops->enable(md);
+ return 0;
+}
+
+static void mux_div_clk_disable(struct clk *c)
+{
+ struct mux_div_clk *md = to_mux_div_clk(c);
+
+ if (md->ops->disable)
+ return md->ops->disable(md);
+}
+
+static long __mux_div_round_rate(struct clk *c, unsigned long rate,
+ struct clk **best_parent, int *best_div, unsigned long *best_prate)
+{
+ struct mux_div_clk *md = to_mux_div_clk(c);
+ unsigned int i;
+ unsigned long rrate, best = 0, _best_div = 0, _best_prate = 0;
+ struct clk *_best_parent = 0;
+
+ if (md->try_get_rate) {
+ for (i = 0; i < md->num_parents; i++) {
+ int divider;
+ unsigned long p_rate;
+
+ rrate = __div_round_rate(&md->data, rate,
+ md->parents[i].src,
+ &divider, &p_rate);
+ /*
+ * Check if one of the possible parents is already at
+ * the requested rate.
+ */
+ if (p_rate == clk_get_rate(md->parents[i].src)
+ && rrate == rate) {
+ best = rrate;
+ _best_div = divider;
+ _best_prate = p_rate;
+ _best_parent = md->parents[i].src;
+ goto end;
+ }
+ }
+ }
+
+ for (i = 0; i < md->num_parents; i++) {
+ int div;
+ unsigned long prate;
+
+ rrate = __div_round_rate(&md->data, rate, md->parents[i].src,
+ &div, &prate);
+
+ if (is_better_rate(rate, best, rrate)) {
+ best = rrate;
+ _best_div = div;
+ _best_prate = prate;
+ _best_parent = md->parents[i].src;
+ }
+
+ if (rate <= rrate && rrate <= rate + md->data.rate_margin)
+ break;
+ }
+end:
+ if (best_div)
+ *best_div = _best_div;
+ if (best_prate)
+ *best_prate = _best_prate;
+ if (best_parent)
+ *best_parent = _best_parent;
+
+ if (best)
+ return best;
+ return -EINVAL;
+}
+
+static long mux_div_clk_round_rate(struct clk *c, unsigned long rate)
+{
+ return __mux_div_round_rate(c, rate, NULL, NULL, NULL);
+}
+
+/* requires enable lock to be held */
+static int __set_src_div(struct mux_div_clk *md, struct clk *parent, u32 div)
+{
+ u32 rc = 0, src_sel;
+
+ src_sel = parent_to_src_sel(md->parents, md->num_parents, parent);
+ /*
+ * If the clock is disabled, don't change to the new settings until
+ * the clock is reenabled
+ */
+ if (md->c.count)
+ rc = md->ops->set_src_div(md, src_sel, div);
+ if (!rc) {
+ md->data.div = div;
+ md->src_sel = src_sel;
+ }
+
+ return rc;
+}
+
+static int set_src_div(struct mux_div_clk *md, struct clk *parent, u32 div)
+{
+ unsigned long flags;
+ u32 rc;
+
+ spin_lock_irqsave(&md->c.lock, flags);
+ rc = __set_src_div(md, parent, div);
+ spin_unlock_irqrestore(&md->c.lock, flags);
+
+ return rc;
+}
+
+/* Must be called after handoff to ensure parent clock rates are initialized */
+static int safe_parent_init_once(struct clk *c)
+{
+ unsigned long rrate;
+ u32 best_div;
+ struct clk *best_parent;
+ struct mux_div_clk *md = to_mux_div_clk(c);
+
+ if (IS_ERR(md->safe_parent))
+ return -EINVAL;
+ if (!md->safe_freq || md->safe_parent)
+ return 0;
+
+ rrate = __mux_div_round_rate(c, md->safe_freq, &best_parent,
+ &best_div, NULL);
+
+ if (rrate == md->safe_freq) {
+ md->safe_div = best_div;
+ md->safe_parent = best_parent;
+ } else {
+ md->safe_parent = ERR_PTR(-EINVAL);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int mux_div_clk_set_rate(struct clk *c, unsigned long rate)
+{
+ struct mux_div_clk *md = to_mux_div_clk(c);
+ unsigned long flags, rrate;
+ unsigned long new_prate, new_parent_orig_rate;
+ struct clk *old_parent, *new_parent;
+ u32 new_div, old_div;
+ int rc;
+
+ rc = safe_parent_init_once(c);
+ if (rc)
+ return rc;
+
+ rrate = __mux_div_round_rate(c, rate, &new_parent, &new_div,
+ &new_prate);
+ if (rrate < rate || rrate > rate + md->data.rate_margin)
+ return -EINVAL;
+
+ old_parent = c->parent;
+ old_div = md->data.div;
+
+ /* Refer to the description of safe_freq in clock-generic.h */
+ if (md->safe_freq)
+ rc = set_src_div(md, md->safe_parent, md->safe_div);
+
+ else if (new_parent == old_parent && new_div >= old_div) {
+ /*
+ * If both the parent_rate and divider changes, there may be an
+ * intermediate frequency generated. Ensure this intermediate
+ * frequency is less than both the new rate and previous rate.
+ */
+ rc = set_src_div(md, old_parent, new_div);
+ }
+ if (rc)
+ return rc;
+
+ new_parent_orig_rate = clk_get_rate(new_parent);
+ rc = clk_set_rate(new_parent, new_prate);
+ if (rc) {
+ pr_err("failed to set %s to %ld\n",
+ clk_name(new_parent), new_prate);
+ goto err_set_rate;
+ }
+
+ rc = __clk_pre_reparent(c, new_parent, &flags);
+ if (rc)
+ goto err_pre_reparent;
+
+ /* Set divider and mux src atomically */
+ rc = __set_src_div(md, new_parent, new_div);
+ if (rc)
+ goto err_set_src_div;
+
+ c->parent = new_parent;
+
+ __clk_post_reparent(c, old_parent, &flags);
+ return 0;
+
+err_set_src_div:
+ /* Not switching to new_parent, so disable it */
+ __clk_post_reparent(c, new_parent, &flags);
+err_pre_reparent:
+ rc = clk_set_rate(new_parent, new_parent_orig_rate);
+ WARN(rc, "%s: error changing new_parent (%s) rate back to %ld\n",
+ clk_name(c), clk_name(new_parent), new_parent_orig_rate);
+err_set_rate:
+ rc = set_src_div(md, old_parent, old_div);
+ WARN(rc, "%s: error changing back to original div (%d) and parent (%s)\n",
+ clk_name(c), old_div, clk_name(old_parent));
+
+ return rc;
+}
+
+static struct clk *mux_div_clk_get_parent(struct clk *c)
+{
+ struct mux_div_clk *md = to_mux_div_clk(c);
+ u32 i, div, src_sel;
+
+ md->ops->get_src_div(md, &src_sel, &div);
+
+ md->data.div = div;
+ md->src_sel = src_sel;
+
+ for (i = 0; i < md->num_parents; i++) {
+ if (md->parents[i].sel == src_sel)
+ return md->parents[i].src;
+ }
+
+ return NULL;
+}
+
+static enum handoff mux_div_clk_handoff(struct clk *c)
+{
+ struct mux_div_clk *md = to_mux_div_clk(c);
+ unsigned long parent_rate;
+ unsigned int numer;
+
+ parent_rate = clk_get_rate(c->parent);
+ /*
+ * div values are doubled for half dividers.
+ * Adjust for that by picking a numer of 2.
+ */
+ numer = md->data.is_half_divider ? 2 : 1;
+
+ if (md->data.div) {
+ c->rate = mult_frac(parent_rate, numer, md->data.div);
+ } else {
+ c->rate = 0;
+ return HANDOFF_DISABLED_CLK;
+ }
+
+ if (md->en_mask && md->ops && md->ops->is_enabled)
+ return md->ops->is_enabled(md)
+ ? HANDOFF_ENABLED_CLK
+ : HANDOFF_DISABLED_CLK;
+
+ /*
+ * If this function returns 'enabled' even when the clock downstream
+ * of this clock is disabled, then handoff code will unnecessarily
+ * enable the current parent of this clock. If this function always
+ * returns 'disabled' and a clock downstream is on, the clock handoff
+ * code will bump up the ref count for this clock and its current
+ * parent as necessary. So, clocks without an actual HW gate can
+ * always return disabled.
+ */
+ return HANDOFF_DISABLED_CLK;
+}
+
+static void __iomem *mux_div_clk_list_registers(struct clk *c, int n,
+ struct clk_register_data **regs, u32 *size)
+{
+ struct mux_div_clk *md = to_mux_div_clk(c);
+
+ if (md->ops && md->ops->list_registers)
+ return md->ops->list_registers(md, n , regs, size);
+
+ return ERR_PTR(-EINVAL);
+}
+
+struct clk_ops clk_ops_mux_div_clk = {
+ .enable = mux_div_clk_enable,
+ .disable = mux_div_clk_disable,
+ .set_rate = mux_div_clk_set_rate,
+ .round_rate = mux_div_clk_round_rate,
+ .get_parent = mux_div_clk_get_parent,
+ .handoff = mux_div_clk_handoff,
+ .list_registers = mux_div_clk_list_registers,
+};
diff --git a/drivers/clk/msm/clock-gpu-cobalt.c b/drivers/clk/msm/clock-gpu-cobalt.c
new file mode 100644
index 000000000000..63870aaa487b
--- /dev/null
+++ b/drivers/clk/msm/clock-gpu-cobalt.c
@@ -0,0 +1,790 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/ctype.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/clk/msm-clock-generic.h>
+
+#include <soc/qcom/clock-local2.h>
+#include <soc/qcom/clock-voter.h>
+#include <soc/qcom/clock-pll.h>
+#include <soc/qcom/clock-alpha-pll.h>
+
+#include <dt-bindings/clock/msm-clocks-cobalt.h>
+#include <dt-bindings/clock/msm-clocks-hwio-cobalt.h>
+
+#include "vdd-level-cobalt.h"
+
+static void __iomem *virt_base;
+static void __iomem *virt_base_gfx;
+
+#define gpucc_cxo_clk_source_val 0
+#define gpucc_gpll0_source_val 5
+#define gpu_pll0_pll_out_even_source_val 1
+#define gpu_pll0_pll_out_odd_source_val 2
+#define gpu_pll1_pll_out_even_source_val 3
+#define gpu_pll1_pll_out_odd_source_val 4
+
+#define SW_COLLAPSE_MASK BIT(0)
+#define GPU_CX_GDSCR_OFFSET 0x1004
+#define GPU_GX_GDSCR_OFFSET 0x1094
+#define CRC_SID_FSM_OFFSET 0x10A0
+#define CRC_MND_CFG_OFFSET 0x10A4
+
+#define F(f, s, div, m, n) \
+ { \
+ .freq_hz = (f), \
+ .src_clk = &s.c, \
+ .m_val = (m), \
+ .n_val = ~((n)-(m)) * !!(n), \
+ .d_val = ~(n),\
+ .div_src_val = BVAL(4, 0, (int)(2*(div) - 1)) \
+ | BVAL(10, 8, s##_source_val), \
+ }
+
+#define F_SLEW(f, s_f, s, div, m, n) \
+ { \
+ .freq_hz = (f), \
+ .src_freq = (s_f), \
+ .src_clk = &s.c, \
+ .m_val = (m), \
+ .n_val = ~((n)-(m)) * !!(n), \
+ .d_val = ~(n),\
+ .div_src_val = BVAL(4, 0, (int)(2*(div) - 1)) \
+ | BVAL(10, 8, s##_source_val), \
+ }
+
+static struct alpha_pll_masks pll_masks_p = {
+ .lock_mask = BIT(31),
+ .active_mask = BIT(30),
+ .update_mask = BIT(22),
+ .output_mask = 0xf,
+ .post_div_mask = BM(15, 8),
+};
+
+static DEFINE_VDD_REGULATORS(vdd_dig, VDD_DIG_NUM, 1, vdd_corner, NULL);
+DEFINE_VDD_REGS_INIT(vdd_gpucc, 2);
+DEFINE_VDD_REGS_INIT(vdd_gpucc_mx, 1);
+
+DEFINE_EXT_CLK(gpucc_xo, NULL);
+DEFINE_EXT_CLK(gpucc_gpll0, NULL);
+
+static struct branch_clk gpucc_cxo_clk = {
+ .cbcr_reg = GPUCC_CXO_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gpucc_cxo_clk",
+ .parent = &gpucc_xo.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gpucc_cxo_clk.c),
+ },
+};
+
+static struct alpha_pll_clk gpu_pll0_pll = {
+ .masks = &pll_masks_p,
+ .base = &virt_base_gfx,
+ .offset = GPUCC_GPU_PLL0_PLL_MODE,
+ .enable_config = 0x1,
+ .is_fabia = true,
+ .c = {
+ .rate = 0,
+ .parent = &gpucc_xo.c,
+ .dbg_name = "gpu_pll0_pll",
+ .ops = &clk_ops_fabia_alpha_pll,
+ VDD_GPU_PLL_FMAX_MAP1(NOMINAL, 1300000500),
+ CLK_INIT(gpu_pll0_pll.c),
+ },
+};
+
+static struct div_clk gpu_pll0_pll_out_even = {
+ .base = &virt_base_gfx,
+ .offset = GPUCC_GPU_PLL0_USER_CTL_MODE,
+ .mask = 0xf,
+ .shift = 8,
+ .data = {
+ .max_div = 8,
+ .min_div = 1,
+ .skip_odd_div = true,
+ .allow_div_one = true,
+ .rate_margin = 500,
+ },
+ .ops = &postdiv_reg_ops,
+ .c = {
+ .parent = &gpu_pll0_pll.c,
+ .dbg_name = "gpu_pll0_pll_out_even",
+ .ops = &clk_ops_div,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(gpu_pll0_pll_out_even.c),
+ },
+};
+
+static struct div_clk gpu_pll0_pll_out_odd = {
+ .base = &virt_base_gfx,
+ .offset = GPUCC_GPU_PLL0_USER_CTL_MODE,
+ .mask = 0xf,
+ .shift = 12,
+ .data = {
+ .max_div = 7,
+ .min_div = 3,
+ .skip_even_div = true,
+ .rate_margin = 500,
+ },
+ .ops = &postdiv_reg_ops,
+ .c = {
+ .parent = &gpu_pll0_pll.c,
+ .dbg_name = "gpu_pll0_pll_out_odd",
+ .ops = &clk_ops_div,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(gpu_pll0_pll_out_odd.c),
+ },
+};
+
+static struct alpha_pll_clk gpu_pll1_pll = {
+ .masks = &pll_masks_p,
+ .base = &virt_base_gfx,
+ .offset = GPUCC_GPU_PLL1_PLL_MODE,
+ .enable_config = 0x1,
+ .is_fabia = true,
+ .c = {
+ .rate = 0,
+ .parent = &gpucc_xo.c,
+ .dbg_name = "gpu_pll1_pll",
+ .ops = &clk_ops_fabia_alpha_pll,
+ VDD_GPU_PLL_FMAX_MAP1(NOMINAL, 1300000500),
+ CLK_INIT(gpu_pll1_pll.c),
+ },
+};
+
+static struct div_clk gpu_pll1_pll_out_even = {
+ .base = &virt_base_gfx,
+ .offset = GPUCC_GPU_PLL1_USER_CTL_MODE,
+ .mask = 0xf,
+ .shift = 8,
+ .data = {
+ .max_div = 8,
+ .min_div = 1,
+ .skip_odd_div = true,
+ .allow_div_one = true,
+ .rate_margin = 500,
+ },
+ .ops = &postdiv_reg_ops,
+ .c = {
+ .parent = &gpu_pll1_pll.c,
+ .dbg_name = "gpu_pll1_pll_out_even",
+ .ops = &clk_ops_div,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(gpu_pll1_pll_out_even.c),
+ },
+};
+
+static struct div_clk gpu_pll1_pll_out_odd = {
+ .base = &virt_base_gfx,
+ .offset = GPUCC_GPU_PLL0_USER_CTL_MODE,
+ .mask = 0xf,
+ .shift = 12,
+ .data = {
+ .max_div = 7,
+ .min_div = 3,
+ .skip_even_div = true,
+ .rate_margin = 500,
+ },
+ .ops = &postdiv_reg_ops,
+ .c = {
+ .parent = &gpu_pll1_pll.c,
+ .dbg_name = "gpu_pll1_pll_out_odd",
+ .ops = &clk_ops_div,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(gpu_pll1_pll_out_odd.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_gfx3d_clk_src[] = {
+ F_SLEW( 171000000, 342000000, gpu_pll0_pll_out_even, 1, 0, 0),
+ F_SLEW( 251000000, 502000000, gpu_pll0_pll_out_even, 1, 0, 0),
+ F_SLEW( 332000000, 664000000, gpu_pll0_pll_out_even, 1, 0, 0),
+ F_SLEW( 403000000, 806000000, gpu_pll0_pll_out_even, 1, 0, 0),
+ F_SLEW( 504000000, 1008000000, gpu_pll0_pll_out_even, 1, 0, 0),
+ F_SLEW( 650000000, 1300000000, gpu_pll0_pll_out_even, 1, 0, 0),
+ F_END
+};
+
+static struct clk_freq_tbl ftbl_gfx3d_clk_src_v2[] = {
+ F_SLEW( 189000000, 378000000, gpu_pll0_pll_out_even, 1, 0, 0),
+ F_SLEW( 264000000, 528000000, gpu_pll0_pll_out_even, 1, 0, 0),
+ F_SLEW( 342000000, 684000000, gpu_pll0_pll_out_even, 1, 0, 0),
+ F_SLEW( 414000000, 828000000, gpu_pll0_pll_out_even, 1, 0, 0),
+ F_SLEW( 520000000, 1040000000, gpu_pll0_pll_out_even, 1, 0, 0),
+ F_SLEW( 596000000, 1192000000, gpu_pll0_pll_out_even, 1, 0, 0),
+ F_SLEW( 670000000, 1340000000, gpu_pll0_pll_out_even, 1, 0, 0),
+ F_SLEW( 710000000, 1420000000, gpu_pll0_pll_out_even, 1, 0, 0),
+ F_END
+};
+
+static struct clk_freq_tbl ftbl_gfx3d_clk_src_vq[] = {
+ F_SLEW( 185000000, 370000000, gpu_pll0_pll_out_even, 1, 0, 0),
+ F_SLEW( 285000000, 570000000, gpu_pll0_pll_out_even, 1, 0, 0),
+ F_SLEW( 358000000, 716000000, gpu_pll0_pll_out_even, 1, 0, 0),
+ F_SLEW( 434000000, 868000000, gpu_pll0_pll_out_even, 1, 0, 0),
+ F_SLEW( 542000000, 1084000000, gpu_pll0_pll_out_even, 1, 0, 0),
+ F_SLEW( 630000000, 1260000000, gpu_pll0_pll_out_even, 1, 0, 0),
+ F_SLEW( 670000000, 1340000000, gpu_pll1_pll_out_even, 1, 0, 0),
+ F_SLEW( 710000000, 1420000000, gpu_pll1_pll_out_even, 1, 0, 0),
+ F_END
+};
+
+static struct rcg_clk gfx3d_clk_src = {
+ .cmd_rcgr_reg = GPUCC_GFX3D_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_gfx3d_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .force_enable_rcgr = true,
+ .base = &virt_base_gfx,
+ .c = {
+ .dbg_name = "gfx3d_clk_src",
+ .ops = &clk_ops_rcg,
+ .vdd_class = &vdd_gpucc,
+ CLK_INIT(gfx3d_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_rbbmtimer_clk_src[] = {
+ F( 19200000, gpucc_cxo_clk, 1, 0, 0),
+ F_END
+};
+
+static struct rcg_clk rbbmtimer_clk_src = {
+ .cmd_rcgr_reg = GPUCC_RBBMTIMER_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_rbbmtimer_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "rbbmtimer_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP1(MIN, 19200000),
+ CLK_INIT(rbbmtimer_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_gfx3d_isense_clk_src[] = {
+ F( 40000000, gpucc_gpll0, 15, 0, 0),
+ F( 200000000, gpucc_gpll0, 3, 0, 0),
+ F( 300000000, gpucc_gpll0, 2, 0, 0),
+ F_END
+};
+
+static struct rcg_clk gfx3d_isense_clk_src = {
+ .cmd_rcgr_reg = GPUCC_GFX3D_ISENSE_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_gfx3d_isense_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gfx3d_isense_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP4(MIN, 19200000, LOWER, 40000000,
+ LOW, 200000000, HIGH, 300000000),
+ CLK_INIT(gfx3d_isense_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_rbcpr_clk_src[] = {
+ F( 19200000, gpucc_cxo_clk, 1, 0, 0),
+ F( 50000000, gpucc_gpll0, 12, 0, 0),
+ F_END
+};
+
+static struct rcg_clk rbcpr_clk_src = {
+ .cmd_rcgr_reg = GPUCC_RBCPR_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_rbcpr_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "rbcpr_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP2(MIN, 19200000, NOMINAL, 50000000),
+ CLK_INIT(rbcpr_clk_src.c),
+ },
+};
+
+static struct branch_clk gpucc_gfx3d_clk = {
+ .cbcr_reg = GPUCC_GFX3D_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base_gfx,
+ .c = {
+ .dbg_name = "gpucc_gfx3d_clk",
+ .parent = &gfx3d_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gpucc_gfx3d_clk.c),
+ },
+};
+
+static struct branch_clk gpucc_rbbmtimer_clk = {
+ .cbcr_reg = GPUCC_RBBMTIMER_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gpucc_rbbmtimer_clk",
+ .parent = &rbbmtimer_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gpucc_rbbmtimer_clk.c),
+ },
+};
+
+static struct branch_clk gpucc_gfx3d_isense_clk = {
+ .cbcr_reg = GPUCC_GFX3D_ISENSE_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gpucc_gfx3d_isense_clk",
+ .parent = &gfx3d_isense_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gpucc_gfx3d_isense_clk.c),
+ },
+};
+
+static struct branch_clk gpucc_rbcpr_clk = {
+ .cbcr_reg = GPUCC_RBCPR_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "gpucc_rbcpr_clk",
+ .parent = &rbcpr_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gpucc_rbcpr_clk.c),
+ },
+};
+
+static struct fixed_clk gpucc_mx_clk = {
+ .c = {
+ .dbg_name = "gpucc_mx_clk",
+ .vdd_class = &vdd_gpucc_mx,
+ .ops = &clk_ops_dummy,
+ CLK_INIT(gpucc_mx_clk.c),
+ },
+};
+
+static int of_get_fmax_vdd_class(struct platform_device *pdev, struct clk *c,
+ char *prop_name)
+{
+ struct device_node *of = pdev->dev.of_node;
+ int prop_len, i, j;
+ struct clk_vdd_class *vdd = c->vdd_class;
+ int num = vdd->num_regulators + 1;
+ u32 *array;
+
+ if (!of_find_property(of, prop_name, &prop_len)) {
+ dev_err(&pdev->dev, "missing %s\n", prop_name);
+ return -EINVAL;
+ }
+
+ prop_len /= sizeof(u32);
+ if (prop_len % num) {
+ dev_err(&pdev->dev, "bad length %d\n", prop_len);
+ return -EINVAL;
+ }
+
+ prop_len /= num;
+ vdd->level_votes = devm_kzalloc(&pdev->dev, prop_len * sizeof(int),
+ GFP_KERNEL);
+ if (!vdd->level_votes)
+ return -ENOMEM;
+
+ vdd->vdd_uv = devm_kzalloc(&pdev->dev,
+ prop_len * sizeof(int) * (num - 1), GFP_KERNEL);
+ if (!vdd->vdd_uv)
+ return -ENOMEM;
+
+ c->fmax = devm_kzalloc(&pdev->dev, prop_len * sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!c->fmax)
+ return -ENOMEM;
+
+ array = devm_kzalloc(&pdev->dev,
+ prop_len * sizeof(u32) * num, GFP_KERNEL);
+ if (!array)
+ return -ENOMEM;
+
+ of_property_read_u32_array(of, prop_name, array, prop_len * num);
+ for (i = 0; i < prop_len; i++) {
+ c->fmax[i] = array[num * i];
+ for (j = 1; j < num; j++) {
+ vdd->vdd_uv[(num - 1) * i + (j - 1)] =
+ array[num * i + j];
+ }
+ }
+
+ devm_kfree(&pdev->dev, array);
+ vdd->num_levels = prop_len;
+ vdd->cur_level = prop_len;
+ c->num_fmax = prop_len;
+ return 0;
+}
+
+static struct mux_clk gpucc_gcc_dbg_clk = {
+ .ops = &mux_reg_ops,
+ .en_mask = BIT(16),
+ .mask = 0x3FF,
+ .offset = GPUCC_DEBUG_CLK_CTL,
+ .en_offset = GPUCC_DEBUG_CLK_CTL,
+ .base = &virt_base,
+ MUX_SRC_LIST(
+ { &gpucc_rbcpr_clk.c, 0x0003 },
+ { &gpucc_rbbmtimer_clk.c, 0x0005 },
+ { &gpucc_gfx3d_isense_clk.c, 0x000a },
+ ),
+ .c = {
+ .dbg_name = "gpucc_gcc_dbg_clk",
+ .ops = &clk_ops_gen_mux,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(gpucc_gcc_dbg_clk.c),
+ },
+};
+
+static void enable_gfx_crc(void)
+{
+ u32 regval;
+
+ /* Set graphics clock at a safe frequency */
+ clk_set_rate(&gpucc_gfx3d_clk.c, gfx3d_clk_src.c.fmax[2]);
+ /* Turn on the GPU_CX GDSC */
+ regval = readl_relaxed(virt_base_gfx + GPU_CX_GDSCR_OFFSET);
+ regval &= ~SW_COLLAPSE_MASK;
+ writel_relaxed(regval, virt_base_gfx + GPU_CX_GDSCR_OFFSET);
+ /* Wait for 10usecs to let the GDSC turn ON */
+ mb();
+ udelay(10);
+ /* Turn on the Graphics rail */
+ if (regulator_enable(vdd_gpucc.regulator[0]))
+ pr_warn("Enabling the graphics rail during CRC sequence failed!\n");
+ /* Turn on the GPU_GX GDSC */
+ writel_relaxed(0x1, virt_base_gfx + GPU_GX_BCR);
+ /*
+ * BLK_ARES should be kept asserted for 1us before being de-asserted.
+ */
+ wmb();
+ udelay(1);
+ writel_relaxed(0x0, virt_base_gfx + GPU_GX_BCR);
+ regval = readl_relaxed(virt_base_gfx + GPUCC_GX_DOMAIN_MISC);
+ regval |= BIT(4);
+ writel_relaxed(regval, virt_base_gfx + GPUCC_GX_DOMAIN_MISC);
+ /* Keep reset asserted for at-least 1us before continuing. */
+ wmb();
+ udelay(1);
+ regval &= ~BIT(4);
+ writel_relaxed(regval, virt_base_gfx + GPUCC_GX_DOMAIN_MISC);
+ /* Make sure GMEM_RESET is de-asserted before continuing. */
+ wmb();
+ regval &= ~BIT(0);
+ writel_relaxed(regval, virt_base_gfx + GPUCC_GX_DOMAIN_MISC);
+ /* All previous writes should be done at this point */
+ wmb();
+ regval = readl_relaxed(virt_base_gfx + GPU_GX_GDSCR_OFFSET);
+ regval &= ~SW_COLLAPSE_MASK;
+ writel_relaxed(regval, virt_base_gfx + GPU_GX_GDSCR_OFFSET);
+ /* Wait for 10usecs to let the GDSC turn ON */
+ mb();
+ udelay(10);
+ /* Enable the graphics clock */
+ clk_prepare_enable(&gpucc_gfx3d_clk.c);
+ /* Enabling MND RC in Bypass mode */
+ writel_relaxed(0x00015010, virt_base_gfx + CRC_MND_CFG_OFFSET);
+ writel_relaxed(0x00800000, virt_base_gfx + CRC_SID_FSM_OFFSET);
+ /* Wait for 16 cycles before continuing */
+ udelay(1);
+ clk_set_rate(&gpucc_gfx3d_clk.c,
+ gfx3d_clk_src.c.fmax[gfx3d_clk_src.c.num_fmax - 1]);
+ /* Disable the graphics clock */
+ clk_disable_unprepare(&gpucc_gfx3d_clk.c);
+ /* Turn off the gpu_cx and gpu_gx GDSCs */
+ regval = readl_relaxed(virt_base_gfx + GPU_GX_GDSCR_OFFSET);
+ regval |= SW_COLLAPSE_MASK;
+ writel_relaxed(regval, virt_base_gfx + GPU_GX_GDSCR_OFFSET);
+ /* Write to disable GX GDSC should go through before continuing */
+ wmb();
+ regval = readl_relaxed(virt_base_gfx + GPUCC_GX_DOMAIN_MISC);
+ regval |= BIT(0);
+ writel_relaxed(regval, virt_base_gfx + GPUCC_GX_DOMAIN_MISC);
+ /* Make sure GMEM_CLAMP_IO is asserted before continuing. */
+ wmb();
+ regulator_disable(vdd_gpucc.regulator[0]);
+ regval = readl_relaxed(virt_base_gfx + GPU_CX_GDSCR_OFFSET);
+ regval |= SW_COLLAPSE_MASK;
+ writel_relaxed(regval, virt_base_gfx + GPU_CX_GDSCR_OFFSET);
+}
+
+static struct mux_clk gfxcc_dbg_clk = {
+ .ops = &mux_reg_ops,
+ .en_mask = BIT(16),
+ .mask = 0x3FF,
+ .offset = GPUCC_DEBUG_CLK_CTL,
+ .en_offset = GPUCC_DEBUG_CLK_CTL,
+ .base = &virt_base_gfx,
+ MUX_SRC_LIST(
+ { &gpucc_gfx3d_clk.c, 0x0008 },
+ ),
+ .c = {
+ .dbg_name = "gfxcc_dbg_clk",
+ .ops = &clk_ops_gen_mux,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(gfxcc_dbg_clk.c),
+ },
+};
+
+static struct clk_lookup msm_clocks_gpucc_cobalt[] = {
+ CLK_LIST(gpucc_xo),
+ CLK_LIST(gpucc_gpll0),
+ CLK_LIST(gpucc_cxo_clk),
+ CLK_LIST(rbbmtimer_clk_src),
+ CLK_LIST(gfx3d_isense_clk_src),
+ CLK_LIST(rbcpr_clk_src),
+ CLK_LIST(gpucc_rbbmtimer_clk),
+ CLK_LIST(gpucc_gfx3d_isense_clk),
+ CLK_LIST(gpucc_rbcpr_clk),
+ CLK_LIST(gpucc_gcc_dbg_clk),
+};
+
+static void msm_gpucc_hamster_fixup(void)
+{
+ gfx3d_isense_clk_src.c.ops = &clk_ops_dummy;
+ gpucc_gfx3d_isense_clk.c.ops = &clk_ops_dummy;
+}
+
+static struct platform_driver msm_clock_gfxcc_driver;
+int msm_gpucc_cobalt_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct device_node *of_node = pdev->dev.of_node;
+ int rc;
+ struct regulator *reg;
+ u32 regval;
+ struct clk *tmp;
+ bool is_vq = 0;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cc_base");
+ if (!res) {
+ dev_err(&pdev->dev, "Unable to retrieve register base\n");
+ return -ENOMEM;
+ }
+
+ virt_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!virt_base) {
+ dev_err(&pdev->dev, "Failed to map CC registers\n");
+ return -ENOMEM;
+ }
+
+ reg = vdd_dig.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_dig");
+ if (IS_ERR(reg)) {
+ if (PTR_ERR(reg) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Unable to get vdd_dig regulator\n");
+ return PTR_ERR(reg);
+ }
+
+ tmp = gpucc_xo.c.parent = devm_clk_get(&pdev->dev, "xo_ao");
+ if (IS_ERR(tmp)) {
+ if (PTR_ERR(tmp) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Unable to get xo_ao clock\n");
+ return PTR_ERR(tmp);
+ }
+
+ tmp = gpucc_gpll0.c.parent = devm_clk_get(&pdev->dev, "gpll0");
+ if (IS_ERR(tmp)) {
+ if (PTR_ERR(tmp) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Unable to get gpll0 clock\n");
+ return PTR_ERR(tmp);
+ }
+
+ /* Clear the DBG_CLK_DIV bits of the GPU debug register */
+ regval = readl_relaxed(virt_base + gpucc_gcc_dbg_clk.offset);
+ regval &= ~BM(18, 17);
+ writel_relaxed(regval, virt_base + gpucc_gcc_dbg_clk.offset);
+
+ is_vq = of_device_is_compatible(pdev->dev.of_node,
+ "qcom,gpucc-hamster");
+ if (is_vq)
+ msm_gpucc_hamster_fixup();
+
+ rc = of_msm_clock_register(of_node, msm_clocks_gpucc_cobalt,
+ ARRAY_SIZE(msm_clocks_gpucc_cobalt));
+ if (rc)
+ return rc;
+
+ /*
+ * gpucc_cxo_clk works as the root clock for all GPUCC RCGs and GDSCs.
+ * Keep it enabled always.
+ */
+ clk_prepare_enable(&gpucc_cxo_clk.c);
+
+ dev_info(&pdev->dev, "Registered GPU clocks (barring gfx3d clocks)\n");
+ return platform_driver_register(&msm_clock_gfxcc_driver);
+}
+
+static const struct of_device_id msm_clock_gpucc_match_table[] = {
+ { .compatible = "qcom,gpucc-cobalt" },
+ { .compatible = "qcom,gpucc-cobalt-v2" },
+ { .compatible = "qcom,gpucc-hamster" },
+ {},
+};
+
+static struct platform_driver msm_clock_gpucc_driver = {
+ .probe = msm_gpucc_cobalt_probe,
+ .driver = {
+ .name = "qcom,gpucc-cobalt",
+ .of_match_table = msm_clock_gpucc_match_table,
+ .owner = THIS_MODULE,
+ },
+};
+
+static struct clk_lookup msm_clocks_gfxcc_cobalt[] = {
+ CLK_LIST(gpu_pll0_pll),
+ CLK_LIST(gpu_pll0_pll_out_even),
+ CLK_LIST(gpu_pll0_pll_out_odd),
+ CLK_LIST(gpu_pll1_pll),
+ CLK_LIST(gpu_pll1_pll_out_even),
+ CLK_LIST(gpu_pll1_pll_out_odd),
+ CLK_LIST(gfx3d_clk_src),
+ CLK_LIST(gpucc_gfx3d_clk),
+ CLK_LIST(gpucc_mx_clk),
+ CLK_LIST(gfxcc_dbg_clk),
+};
+
+static void msm_gfxcc_hamster_fixup(void)
+{
+ gpu_pll0_pll.c.fmax[VDD_DIG_NOMINAL] = 1420000500;
+ gpu_pll1_pll.c.fmax[VDD_DIG_NOMINAL] = 1420000500;
+ gfx3d_clk_src.freq_tbl = ftbl_gfx3d_clk_src_vq;
+}
+
+static void msm_gfxcc_cobalt_v2_fixup(void)
+{
+ gpu_pll0_pll.c.fmax[VDD_DIG_NOMINAL] = 1420000500;
+ gpu_pll1_pll.c.fmax[VDD_DIG_NOMINAL] = 1420000500;
+ gfx3d_clk_src.freq_tbl = ftbl_gfx3d_clk_src_v2;
+}
+
+int msm_gfxcc_cobalt_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct device_node *of_node = pdev->dev.of_node;
+ int rc;
+ struct regulator *reg;
+ bool is_v2 = 0, is_vq = 0;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cc_base");
+ if (!res) {
+ dev_err(&pdev->dev, "Unable to retrieve register base\n");
+ return -ENOMEM;
+ }
+
+ virt_base_gfx = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!virt_base_gfx) {
+ dev_err(&pdev->dev, "Failed to map CC registers\n");
+ return -ENOMEM;
+ }
+
+ reg = vdd_gpucc.regulator[0] = devm_regulator_get(&pdev->dev,
+ "vdd_gpucc");
+ if (IS_ERR(reg)) {
+ if (PTR_ERR(reg) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Unable to get vdd_gpucc regulator\n");
+ return PTR_ERR(reg);
+ }
+
+ reg = vdd_gpucc.regulator[1] = devm_regulator_get(&pdev->dev, "vdd_mx");
+ if (IS_ERR(reg)) {
+ if (PTR_ERR(reg) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Unable to get vdd_mx regulator\n");
+ return PTR_ERR(reg);
+ }
+
+ reg = vdd_gpucc_mx.regulator[0] = devm_regulator_get(&pdev->dev,
+ "vdd_gpu_mx");
+ if (IS_ERR(reg)) {
+ if (PTR_ERR(reg) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Unable to get vdd_gpu_mx regulator\n");
+ return PTR_ERR(reg);
+ }
+
+ rc = of_get_fmax_vdd_class(pdev, &gfx3d_clk_src.c,
+ "qcom,gfxfreq-speedbin0");
+ if (rc) {
+ dev_err(&pdev->dev, "Can't get freq-corner mapping for gfx3d_clk_src\n");
+ return rc;
+ }
+
+ rc = of_get_fmax_vdd_class(pdev, &gpucc_mx_clk.c,
+ "qcom,gfxfreq-mx-speedbin0");
+ if (rc) {
+ dev_err(&pdev->dev, "Can't get freq-corner mapping for gpucc_mx_clk\n");
+ return rc;
+ }
+
+ is_v2 = of_device_is_compatible(pdev->dev.of_node,
+ "qcom,gfxcc-cobalt-v2");
+ if (is_v2)
+ msm_gfxcc_cobalt_v2_fixup();
+
+ is_vq = of_device_is_compatible(pdev->dev.of_node,
+ "qcom,gfxcc-hamster");
+ if (is_vq)
+ msm_gfxcc_hamster_fixup();
+
+ rc = of_msm_clock_register(of_node, msm_clocks_gfxcc_cobalt,
+ ARRAY_SIZE(msm_clocks_gfxcc_cobalt));
+ if (rc)
+ return rc;
+
+ enable_gfx_crc();
+
+ /*
+ * Force periph logic to be ON since after NAP, the value of the perf
+ * counter might be corrupted frequently.
+ */
+ clk_set_flags(&gpucc_gfx3d_clk.c, CLKFLAG_RETAIN_PERIPH);
+
+ dev_info(&pdev->dev, "Completed registering all GPU clocks\n");
+
+ return 0;
+}
+
+static const struct of_device_id msm_clock_gfxcc_match_table[] = {
+ { .compatible = "qcom,gfxcc-cobalt" },
+ { .compatible = "qcom,gfxcc-cobalt-v2" },
+ { .compatible = "qcom,gfxcc-hamster" },
+ {},
+};
+
+static struct platform_driver msm_clock_gfxcc_driver = {
+ .probe = msm_gfxcc_cobalt_probe,
+ .driver = {
+ .name = "qcom,gfxcc-cobalt",
+ .of_match_table = msm_clock_gfxcc_match_table,
+ .owner = THIS_MODULE,
+ },
+};
+
+int __init msm_gpucc_cobalt_init(void)
+{
+ return platform_driver_register(&msm_clock_gpucc_driver);
+}
+arch_initcall(msm_gpucc_cobalt_init);
diff --git a/drivers/clk/msm/clock-local2.c b/drivers/clk/msm/clock-local2.c
new file mode 100644
index 000000000000..55fa76046def
--- /dev/null
+++ b/drivers/clk/msm/clock-local2.c
@@ -0,0 +1,2973 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/ctype.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/clk/msm-clk-provider.h>
+#include <linux/clk/msm-clk.h>
+#include <linux/clk/msm-clock-generic.h>
+#include <soc/qcom/clock-local2.h>
+#include <soc/qcom/msm-clock-controller.h>
+
+/*
+ * When enabling/disabling a clock, check the halt bit up to this number
+ * number of times (with a 1 us delay in between) before continuing.
+ */
+#define HALT_CHECK_MAX_LOOPS 500
+/* For clock without halt checking, wait this long after enables/disables. */
+#define HALT_CHECK_DELAY_US 500
+
+#define RCG_FORCE_DISABLE_DELAY_US 100
+
+/*
+ * When updating an RCG configuration, check the update bit up to this number
+ * number of times (with a 1 us delay in between) before continuing.
+ */
+#define UPDATE_CHECK_MAX_LOOPS 500
+
+DEFINE_SPINLOCK(local_clock_reg_lock);
+struct clk_freq_tbl rcg_dummy_freq = F_END;
+
+#define CMD_RCGR_REG(x) (*(x)->base + (x)->cmd_rcgr_reg)
+#define CFG_RCGR_REG(x) (*(x)->base + (x)->cmd_rcgr_reg + 0x4)
+#define M_REG(x) (*(x)->base + (x)->cmd_rcgr_reg + 0x8)
+#define N_REG(x) (*(x)->base + (x)->cmd_rcgr_reg + 0xC)
+#define D_REG(x) (*(x)->base + (x)->cmd_rcgr_reg + 0x10)
+#define CBCR_REG(x) (*(x)->base + (x)->cbcr_reg)
+#define BCR_REG(x) (*(x)->base + (x)->bcr_reg)
+#define RST_REG(x) (*(x)->base + (x)->reset_reg)
+#define VOTE_REG(x) (*(x)->base + (x)->vote_reg)
+#define GATE_EN_REG(x) (*(x)->base + (x)->en_reg)
+#define DIV_REG(x) (*(x)->base + (x)->offset)
+#define MUX_REG(x) (*(x)->base + (x)->offset)
+
+/*
+ * Important clock bit positions and masks
+ */
+#define CMD_RCGR_ROOT_ENABLE_BIT BIT(1)
+#define CBCR_BRANCH_ENABLE_BIT BIT(0)
+#define CBCR_BRANCH_OFF_BIT BIT(31)
+#define CMD_RCGR_CONFIG_UPDATE_BIT BIT(0)
+#define CMD_RCGR_ROOT_STATUS_BIT BIT(31)
+#define BCR_BLK_ARES_BIT BIT(0)
+#define CBCR_HW_CTL_BIT BIT(1)
+#define CFG_RCGR_DIV_MASK BM(4, 0)
+#define CFG_RCGR_SRC_SEL_MASK BM(10, 8)
+#define MND_MODE_MASK BM(13, 12)
+#define MND_DUAL_EDGE_MODE_BVAL BVAL(13, 12, 0x2)
+#define CMD_RCGR_CONFIG_DIRTY_MASK BM(7, 4)
+#define CBCR_CDIV_LSB 16
+#define CBCR_CDIV_MSB 19
+
+enum branch_state {
+ BRANCH_ON,
+ BRANCH_OFF,
+};
+
+static struct clk_freq_tbl cxo_f = {
+ .freq_hz = 19200000,
+ .m_val = 0,
+ .n_val = 0,
+ .d_val = 0,
+ .div_src_val = 0,
+};
+
+struct div_map {
+ u32 mask;
+ int div;
+};
+
+/*
+ * RCG functions
+ */
+
+/*
+ * Update an RCG with a new configuration. This may include a new M, N, or D
+ * value, source selection or pre-divider value.
+ *
+ */
+static void rcg_update_config(struct rcg_clk *rcg)
+{
+ u32 cmd_rcgr_regval;
+ int count = UPDATE_CHECK_MAX_LOOPS;
+
+ if (rcg->non_local_control_timeout)
+ count = rcg->non_local_control_timeout;
+
+ cmd_rcgr_regval = readl_relaxed(CMD_RCGR_REG(rcg));
+ cmd_rcgr_regval |= CMD_RCGR_CONFIG_UPDATE_BIT;
+ writel_relaxed(cmd_rcgr_regval, CMD_RCGR_REG(rcg));
+
+ /* Wait for update to take effect */
+ for (; count > 0; count--) {
+ if (!(readl_relaxed(CMD_RCGR_REG(rcg)) &
+ CMD_RCGR_CONFIG_UPDATE_BIT))
+ return;
+ udelay(1);
+ }
+
+ CLK_WARN(&rcg->c, count == 0, "rcg didn't update its configuration.");
+}
+
+static void rcg_on_check(struct rcg_clk *rcg)
+{
+ int count = UPDATE_CHECK_MAX_LOOPS;
+
+ if (rcg->non_local_control_timeout)
+ count = rcg->non_local_control_timeout;
+
+ /* Wait for RCG to turn on */
+ for (; count > 0; count--) {
+ if (!(readl_relaxed(CMD_RCGR_REG(rcg)) &
+ CMD_RCGR_ROOT_STATUS_BIT))
+ return;
+ udelay(1);
+ }
+ CLK_WARN(&rcg->c, count == 0, "rcg didn't turn on.");
+}
+
+/* RCG set rate function for clocks with Half Integer Dividers. */
+static void __set_rate_hid(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
+{
+ u32 cfg_regval;
+
+ cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
+ cfg_regval &= ~(CFG_RCGR_DIV_MASK | CFG_RCGR_SRC_SEL_MASK);
+ cfg_regval |= nf->div_src_val;
+ writel_relaxed(cfg_regval, CFG_RCGR_REG(rcg));
+
+ rcg_update_config(rcg);
+}
+
+void set_rate_hid(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&local_clock_reg_lock, flags);
+ __set_rate_hid(rcg, nf);
+ spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+}
+
+/* RCG set rate function for clocks with MND & Half Integer Dividers. */
+static void __set_rate_mnd(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
+{
+ u32 cfg_regval;
+
+ cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
+ writel_relaxed(nf->m_val, M_REG(rcg));
+ writel_relaxed(nf->n_val, N_REG(rcg));
+ writel_relaxed(nf->d_val, D_REG(rcg));
+
+ cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
+ cfg_regval &= ~(CFG_RCGR_DIV_MASK | CFG_RCGR_SRC_SEL_MASK);
+ cfg_regval |= nf->div_src_val;
+
+ /* Activate or disable the M/N:D divider as necessary */
+ cfg_regval &= ~MND_MODE_MASK;
+ if (nf->n_val != 0)
+ cfg_regval |= MND_DUAL_EDGE_MODE_BVAL;
+ writel_relaxed(cfg_regval, CFG_RCGR_REG(rcg));
+
+ rcg_update_config(rcg);
+}
+
+void set_rate_mnd(struct rcg_clk *rcg, struct clk_freq_tbl *nf)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&local_clock_reg_lock, flags);
+ __set_rate_mnd(rcg, nf);
+ spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+}
+
+static void rcg_set_force_enable(struct rcg_clk *rcg)
+{
+ u32 cmd_rcgr_regval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&local_clock_reg_lock, flags);
+ cmd_rcgr_regval = readl_relaxed(CMD_RCGR_REG(rcg));
+ cmd_rcgr_regval |= CMD_RCGR_ROOT_ENABLE_BIT;
+ writel_relaxed(cmd_rcgr_regval, CMD_RCGR_REG(rcg));
+ rcg_on_check(rcg);
+ spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+}
+
+static void rcg_clear_force_enable(struct rcg_clk *rcg)
+{
+ u32 cmd_rcgr_regval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&local_clock_reg_lock, flags);
+ cmd_rcgr_regval = readl_relaxed(CMD_RCGR_REG(rcg));
+ cmd_rcgr_regval &= ~CMD_RCGR_ROOT_ENABLE_BIT;
+ writel_relaxed(cmd_rcgr_regval, CMD_RCGR_REG(rcg));
+ spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+ /* Add a delay of 100usecs to let the RCG disable */
+ udelay(RCG_FORCE_DISABLE_DELAY_US);
+}
+
+static int rcg_clk_enable(struct clk *c)
+{
+ struct rcg_clk *rcg = to_rcg_clk(c);
+
+ WARN(rcg->current_freq == &rcg_dummy_freq,
+ "Attempting to prepare %s before setting its rate. "
+ "Set the rate first!\n", rcg->c.dbg_name);
+
+ if (rcg->force_enable_rcgr) {
+ rcg_set_force_enable(rcg);
+ return 0;
+ }
+
+ if (!rcg->non_local_children || rcg->current_freq == &rcg_dummy_freq)
+ return 0;
+ /*
+ * Switch from CXO to saved mux value. Force enable/disable while
+ * switching. The current parent is already prepared and enabled
+ * at this point, and the CXO source is always-on. Therefore the
+ * RCG can safely execute a dynamic switch.
+ */
+ rcg_set_force_enable(rcg);
+ rcg->set_rate(rcg, rcg->current_freq);
+ rcg_clear_force_enable(rcg);
+
+ return 0;
+}
+
+static void rcg_clk_disable(struct clk *c)
+{
+ struct rcg_clk *rcg = to_rcg_clk(c);
+
+ if (rcg->force_enable_rcgr) {
+ rcg_clear_force_enable(rcg);
+ return;
+ }
+
+ if (!rcg->non_local_children)
+ return;
+
+ /*
+ * Save mux select and switch to CXO. Force enable/disable while
+ * switching. The current parent is still prepared and enabled at this
+ * point, and the CXO source is always-on. Therefore the RCG can safely
+ * execute a dynamic switch.
+ */
+ rcg_set_force_enable(rcg);
+ rcg->set_rate(rcg, &cxo_f);
+ rcg_clear_force_enable(rcg);
+}
+
+static int prepare_enable_rcg_srcs(struct clk *c, struct clk *curr,
+ struct clk *new, unsigned long *flags)
+{
+ int rc;
+
+ rc = clk_prepare(curr);
+ if (rc)
+ return rc;
+
+ if (c->prepare_count) {
+ rc = clk_prepare(new);
+ if (rc)
+ goto err_new_src_prepare;
+ }
+
+ rc = clk_prepare(new);
+ if (rc)
+ goto err_new_src_prepare2;
+
+ spin_lock_irqsave(&c->lock, *flags);
+ rc = clk_enable(curr);
+ if (rc) {
+ spin_unlock_irqrestore(&c->lock, *flags);
+ goto err_curr_src_enable;
+ }
+
+ if (c->count) {
+ rc = clk_enable(new);
+ if (rc) {
+ spin_unlock_irqrestore(&c->lock, *flags);
+ goto err_new_src_enable;
+ }
+ }
+
+ rc = clk_enable(new);
+ if (rc) {
+ spin_unlock_irqrestore(&c->lock, *flags);
+ goto err_new_src_enable2;
+ }
+ return 0;
+
+err_new_src_enable2:
+ if (c->count)
+ clk_disable(new);
+err_new_src_enable:
+ clk_disable(curr);
+err_curr_src_enable:
+ clk_unprepare(new);
+err_new_src_prepare2:
+ if (c->prepare_count)
+ clk_unprepare(new);
+err_new_src_prepare:
+ clk_unprepare(curr);
+ return rc;
+}
+
+static void disable_unprepare_rcg_srcs(struct clk *c, struct clk *curr,
+ struct clk *new, unsigned long *flags)
+{
+ clk_disable(new);
+ clk_disable(curr);
+ if (c->count)
+ clk_disable(curr);
+ spin_unlock_irqrestore(&c->lock, *flags);
+
+ clk_unprepare(new);
+ clk_unprepare(curr);
+ if (c->prepare_count)
+ clk_unprepare(curr);
+}
+
+static int rcg_clk_set_rate(struct clk *c, unsigned long rate)
+{
+ struct clk_freq_tbl *cf, *nf;
+ struct rcg_clk *rcg = to_rcg_clk(c);
+ int rc;
+ unsigned long flags;
+
+ for (nf = rcg->freq_tbl; nf->freq_hz != FREQ_END
+ && nf->freq_hz != rate; nf++)
+ ;
+
+ if (nf->freq_hz == FREQ_END)
+ return -EINVAL;
+
+ cf = rcg->current_freq;
+ if (nf->src_freq != FIXED_CLK_SRC) {
+ rc = clk_set_rate(nf->src_clk, nf->src_freq);
+ if (rc)
+ return rc;
+ }
+
+ if (rcg->non_local_control_timeout) {
+ /*
+ * __clk_pre_reparent only enables the RCG source if the SW
+ * count for the RCG is non-zero. We need to make sure that
+ * both PLL sources are ON before force turning on the RCG.
+ */
+ rc = prepare_enable_rcg_srcs(c, cf->src_clk, nf->src_clk,
+ &flags);
+ } else
+ rc = __clk_pre_reparent(c, nf->src_clk, &flags);
+
+ if (rc)
+ return rc;
+
+ BUG_ON(!rcg->set_rate);
+
+ /* Perform clock-specific frequency switch operations. */
+ if ((rcg->non_local_children && c->count) ||
+ rcg->non_local_control_timeout) {
+ /*
+ * Force enable the RCG before updating the RCG configuration
+ * since the downstream clock/s can be disabled at around the
+ * same time causing the feedback from the CBCR to turn off
+ * the RCG.
+ */
+ rcg_set_force_enable(rcg);
+ rcg->set_rate(rcg, nf);
+ rcg_clear_force_enable(rcg);
+ } else if (!rcg->non_local_children) {
+ rcg->set_rate(rcg, nf);
+ }
+
+ /*
+ * If non_local_children is set and the RCG is not enabled,
+ * the following operations switch parent in software and cache
+ * the frequency. The mux switch will occur when the RCG is enabled.
+ */
+ rcg->current_freq = nf;
+ c->parent = nf->src_clk;
+
+ if (rcg->non_local_control_timeout)
+ disable_unprepare_rcg_srcs(c, cf->src_clk, nf->src_clk,
+ &flags);
+ else
+ __clk_post_reparent(c, cf->src_clk, &flags);
+
+ return 0;
+}
+
+/*
+ * Return a supported rate that's at least the specified rate or
+ * the max supported rate if the specified rate is larger than the
+ * max supported rate.
+ */
+static long rcg_clk_round_rate(struct clk *c, unsigned long rate)
+{
+ struct rcg_clk *rcg = to_rcg_clk(c);
+ struct clk_freq_tbl *f;
+
+ for (f = rcg->freq_tbl; f->freq_hz != FREQ_END; f++)
+ if (f->freq_hz >= rate)
+ return f->freq_hz;
+
+ f--;
+ return f->freq_hz;
+}
+
+/* Return the nth supported frequency for a given clock. */
+static long rcg_clk_list_rate(struct clk *c, unsigned n)
+{
+ struct rcg_clk *rcg = to_rcg_clk(c);
+
+ if (!rcg->freq_tbl || rcg->freq_tbl->freq_hz == FREQ_END)
+ return -ENXIO;
+
+ return (rcg->freq_tbl + n)->freq_hz;
+}
+
+static struct clk *_rcg_clk_get_parent(struct rcg_clk *rcg, bool has_mnd,
+ bool match_rate)
+{
+ u32 n_regval = 0, m_regval = 0, d_regval = 0;
+ u32 cfg_regval, div, div_regval;
+ struct clk_freq_tbl *freq;
+ u32 cmd_rcgr_regval;
+
+ if (!rcg->freq_tbl) {
+ WARN(1, "No frequency table present for rcg %s\n",
+ rcg->c.dbg_name);
+ return NULL;
+ }
+
+ /* Is there a pending configuration? */
+ cmd_rcgr_regval = readl_relaxed(CMD_RCGR_REG(rcg));
+ if (cmd_rcgr_regval & CMD_RCGR_CONFIG_DIRTY_MASK) {
+ WARN(1, "Pending transaction for rcg %s\n", rcg->c.dbg_name);
+ return NULL;
+ }
+
+ /* Get values of m, n, d, div and src_sel registers. */
+ if (has_mnd) {
+ m_regval = readl_relaxed(M_REG(rcg));
+ n_regval = readl_relaxed(N_REG(rcg));
+ d_regval = readl_relaxed(D_REG(rcg));
+
+ /*
+ * The n and d values stored in the frequency tables are sign
+ * extended to 32 bits. The n and d values in the registers are
+ * sign extended to 8 or 16 bits. Sign extend the values read
+ * from the registers so that they can be compared to the
+ * values in the frequency tables.
+ */
+ n_regval |= (n_regval >> 8) ? BM(31, 16) : BM(31, 8);
+ d_regval |= (d_regval >> 8) ? BM(31, 16) : BM(31, 8);
+ }
+
+ cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
+ cfg_regval &= CFG_RCGR_SRC_SEL_MASK | CFG_RCGR_DIV_MASK
+ | MND_MODE_MASK;
+
+ /* If mnd counter is present, check if it's in use. */
+ has_mnd = (has_mnd) &&
+ ((cfg_regval & MND_MODE_MASK) == MND_DUAL_EDGE_MODE_BVAL);
+
+ /*
+ * Clear out the mn counter mode bits since we now want to compare only
+ * the source mux selection and pre-divider values in the registers.
+ */
+ cfg_regval &= ~MND_MODE_MASK;
+
+ /* Figure out what rate the rcg is running at */
+ for (freq = rcg->freq_tbl; freq->freq_hz != FREQ_END; freq++) {
+ /* source select does not match */
+ if ((freq->div_src_val & CFG_RCGR_SRC_SEL_MASK)
+ != (cfg_regval & CFG_RCGR_SRC_SEL_MASK))
+ continue;
+ /*
+ * Stop if we found the required parent in the frequency table
+ * and only care if the source matches but dont care if the
+ * frequency matches
+ */
+ if (!match_rate)
+ break;
+ /* divider does not match */
+ div = freq->div_src_val & CFG_RCGR_DIV_MASK;
+ div_regval = cfg_regval & CFG_RCGR_DIV_MASK;
+ if (div != div_regval && (div > 1 || div_regval > 1))
+ continue;
+
+ if (has_mnd) {
+ if (freq->m_val != m_regval)
+ continue;
+ if (freq->n_val != n_regval)
+ continue;
+ if (freq->d_val != d_regval)
+ continue;
+ } else if (freq->n_val) {
+ continue;
+ }
+ break;
+ }
+
+ /* No known frequency found */
+ if (freq->freq_hz == FREQ_END) {
+ /*
+ * If we can't recognize the frequency and non_local_children is
+ * set, switch to safe frequency. It is assumed the current
+ * parent has been turned on by the bootchain if the RCG is on.
+ */
+ if (rcg->non_local_children) {
+ rcg->set_rate(rcg, &cxo_f);
+ WARN(1, "don't recognize rcg frequency for %s\n",
+ rcg->c.dbg_name);
+ }
+ return NULL;
+ }
+
+ rcg->current_freq = freq;
+ return freq->src_clk;
+}
+
+static enum handoff _rcg_clk_handoff(struct rcg_clk *rcg)
+{
+ u32 cmd_rcgr_regval;
+
+ if (rcg->current_freq && rcg->current_freq->freq_hz != FREQ_END)
+ rcg->c.rate = rcg->current_freq->freq_hz;
+
+ /* Is the root enabled? */
+ cmd_rcgr_regval = readl_relaxed(CMD_RCGR_REG(rcg));
+ if ((cmd_rcgr_regval & CMD_RCGR_ROOT_STATUS_BIT))
+ return HANDOFF_DISABLED_CLK;
+
+ return HANDOFF_ENABLED_CLK;
+}
+
+static struct clk *display_clk_get_parent(struct clk *c)
+{
+ return _rcg_clk_get_parent(to_rcg_clk(c), false, false);
+}
+
+static struct clk *rcg_mnd_clk_get_parent(struct clk *c)
+{
+ return _rcg_clk_get_parent(to_rcg_clk(c), true, true);
+}
+
+static struct clk *rcg_clk_get_parent(struct clk *c)
+{
+ return _rcg_clk_get_parent(to_rcg_clk(c), false, true);
+}
+
+static enum handoff rcg_mnd_clk_handoff(struct clk *c)
+{
+ return _rcg_clk_handoff(to_rcg_clk(c));
+}
+
+static enum handoff rcg_clk_handoff(struct clk *c)
+{
+ return _rcg_clk_handoff(to_rcg_clk(c));
+}
+
+static void __iomem *rcg_hid_clk_list_registers(struct clk *c, int n,
+ struct clk_register_data **regs, u32 *size)
+{
+ struct rcg_clk *rcg = to_rcg_clk(c);
+ static struct clk_register_data data[] = {
+ {"CMD_RCGR", 0x0},
+ {"CFG_RCGR", 0x4},
+ };
+ if (n)
+ return ERR_PTR(-EINVAL);
+
+ *regs = data;
+ *size = ARRAY_SIZE(data);
+ return CMD_RCGR_REG(rcg);
+}
+
+static void __iomem *rcg_mnd_clk_list_registers(struct clk *c, int n,
+ struct clk_register_data **regs, u32 *size)
+{
+ struct rcg_clk *rcg = to_rcg_clk(c);
+ static struct clk_register_data data[] = {
+ {"CMD_RCGR", 0x0},
+ {"CFG_RCGR", 0x4},
+ {"M_VAL", 0x8},
+ {"N_VAL", 0xC},
+ {"D_VAL", 0x10},
+ };
+ if (n)
+ return ERR_PTR(-EINVAL);
+
+ *regs = data;
+ *size = ARRAY_SIZE(data);
+ return CMD_RCGR_REG(rcg);
+}
+
+#define BRANCH_CHECK_MASK BM(31, 28)
+#define BRANCH_ON_VAL BVAL(31, 28, 0x0)
+#define BRANCH_OFF_VAL BVAL(31, 28, 0x8)
+#define BRANCH_NOC_FSM_ON_VAL BVAL(31, 28, 0x2)
+
+/*
+ * Branch clock functions
+ */
+static void branch_clk_halt_check(struct clk *c, u32 halt_check,
+ void __iomem *cbcr_reg, enum branch_state br_status)
+{
+ char *status_str = (br_status == BRANCH_ON) ? "off" : "on";
+
+ /*
+ * Use a memory barrier since some halt status registers are
+ * not within the same 1K segment as the branch/root enable
+ * registers. It's also needed in the udelay() case to ensure
+ * the delay starts after the branch disable.
+ */
+ mb();
+
+ if (halt_check == DELAY || halt_check == HALT_VOTED) {
+ udelay(HALT_CHECK_DELAY_US);
+ } else if (halt_check == HALT) {
+ int count;
+ u32 val;
+ for (count = HALT_CHECK_MAX_LOOPS; count > 0; count--) {
+ val = readl_relaxed(cbcr_reg);
+ val &= BRANCH_CHECK_MASK;
+ switch (br_status) {
+ case BRANCH_ON:
+ if (val == BRANCH_ON_VAL
+ || val == BRANCH_NOC_FSM_ON_VAL)
+ return;
+ break;
+
+ case BRANCH_OFF:
+ if (val == BRANCH_OFF_VAL)
+ return;
+ break;
+ };
+ udelay(1);
+ }
+ CLK_WARN(c, count == 0, "status stuck %s", status_str);
+ }
+}
+
+static unsigned long branch_clk_aggregate_rate(const struct clk *parent)
+{
+ struct clk *clk;
+ unsigned long rate = 0;
+
+ list_for_each_entry(clk, &parent->children, siblings) {
+ struct branch_clk *v = to_branch_clk(clk);
+
+ if (v->is_prepared)
+ rate = max(clk->rate, rate);
+ }
+ return rate;
+}
+
+static int cbcr_set_flags(void * __iomem regaddr, unsigned flags)
+{
+ u32 cbcr_val;
+ unsigned long irq_flags;
+ int delay_us = 0, ret = 0;
+
+ spin_lock_irqsave(&local_clock_reg_lock, irq_flags);
+ cbcr_val = readl_relaxed(regaddr);
+ switch (flags) {
+ case CLKFLAG_PERIPH_OFF_SET:
+ cbcr_val |= BIT(12);
+ delay_us = 1;
+ break;
+ case CLKFLAG_PERIPH_OFF_CLEAR:
+ cbcr_val &= ~BIT(12);
+ break;
+ case CLKFLAG_RETAIN_PERIPH:
+ cbcr_val |= BIT(13);
+ delay_us = 1;
+ break;
+ case CLKFLAG_NORETAIN_PERIPH:
+ cbcr_val &= ~BIT(13);
+ break;
+ case CLKFLAG_RETAIN_MEM:
+ cbcr_val |= BIT(14);
+ delay_us = 1;
+ break;
+ case CLKFLAG_NORETAIN_MEM:
+ cbcr_val &= ~BIT(14);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ writel_relaxed(cbcr_val, regaddr);
+ /* Make sure power is enabled before returning. */
+ mb();
+ udelay(delay_us);
+
+ spin_unlock_irqrestore(&local_clock_reg_lock, irq_flags);
+
+ return ret;
+}
+
+static int branch_clk_set_flags(struct clk *c, unsigned flags)
+{
+ return cbcr_set_flags(CBCR_REG(to_branch_clk(c)), flags);
+}
+
+static DEFINE_MUTEX(branch_clk_lock);
+
+static int branch_clk_prepare(struct clk *c)
+{
+ struct branch_clk *branch = to_branch_clk(c);
+ unsigned long curr_rate;
+ int ret = 0;
+
+ if (!branch->aggr_sibling_rates)
+ return ret;
+
+ mutex_lock(&branch_clk_lock);
+ branch->is_prepared = false;
+ curr_rate = branch_clk_aggregate_rate(c->parent);
+ if (c->rate > curr_rate) {
+ ret = clk_set_rate(c->parent, c->rate);
+ if (ret)
+ goto exit;
+ }
+ branch->is_prepared = true;
+exit:
+ mutex_unlock(&branch_clk_lock);
+ return ret;
+}
+
+static int branch_clk_enable(struct clk *c)
+{
+ unsigned long flags;
+ u32 cbcr_val;
+ struct branch_clk *branch = to_branch_clk(c);
+
+ if (branch->toggle_memory) {
+ branch_clk_set_flags(c, CLKFLAG_RETAIN_MEM);
+ branch_clk_set_flags(c, CLKFLAG_RETAIN_PERIPH);
+ }
+ spin_lock_irqsave(&local_clock_reg_lock, flags);
+ cbcr_val = readl_relaxed(CBCR_REG(branch));
+ cbcr_val |= CBCR_BRANCH_ENABLE_BIT;
+ writel_relaxed(cbcr_val, CBCR_REG(branch));
+ spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+
+ /*
+ * For clocks controlled by other masters via voting registers,
+ * delay polling for the status bit to allow previous clk_disable
+ * by the GDS controller to go through.
+ */
+ if (branch->no_halt_check_on_disable)
+ udelay(5);
+
+ /* Wait for clock to enable before continuing. */
+ branch_clk_halt_check(c, branch->halt_check, CBCR_REG(branch),
+ BRANCH_ON);
+
+ return 0;
+}
+
+static void branch_clk_unprepare(struct clk *c)
+{
+ struct branch_clk *branch = to_branch_clk(c);
+ unsigned long curr_rate, new_rate;
+
+ if (!branch->aggr_sibling_rates)
+ return;
+
+ mutex_lock(&branch_clk_lock);
+ branch->is_prepared = false;
+ new_rate = branch_clk_aggregate_rate(c->parent);
+ curr_rate = max(new_rate, c->rate);
+ if (new_rate < curr_rate)
+ clk_set_rate(c->parent, new_rate);
+ mutex_unlock(&branch_clk_lock);
+}
+
+static void branch_clk_disable(struct clk *c)
+{
+ unsigned long flags;
+ struct branch_clk *branch = to_branch_clk(c);
+ u32 reg_val;
+
+ spin_lock_irqsave(&local_clock_reg_lock, flags);
+ reg_val = readl_relaxed(CBCR_REG(branch));
+ reg_val &= ~CBCR_BRANCH_ENABLE_BIT;
+ writel_relaxed(reg_val, CBCR_REG(branch));
+ spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+
+ /* Wait for clock to disable before continuing. */
+ if (!branch->no_halt_check_on_disable)
+ branch_clk_halt_check(c, branch->halt_check, CBCR_REG(branch),
+ BRANCH_OFF);
+
+ if (branch->toggle_memory) {
+ branch_clk_set_flags(c, CLKFLAG_NORETAIN_MEM);
+ branch_clk_set_flags(c, CLKFLAG_NORETAIN_PERIPH);
+ }
+}
+
+static int branch_cdiv_set_rate(struct branch_clk *branch, unsigned long rate)
+{
+ unsigned long flags;
+ u32 regval;
+
+ if (rate > branch->max_div)
+ return -EINVAL;
+
+ spin_lock_irqsave(&local_clock_reg_lock, flags);
+ regval = readl_relaxed(CBCR_REG(branch));
+ regval &= ~BM(CBCR_CDIV_MSB, CBCR_CDIV_LSB);
+ regval |= BVAL(CBCR_CDIV_MSB, CBCR_CDIV_LSB, rate);
+ writel_relaxed(regval, CBCR_REG(branch));
+ spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+
+ return 0;
+}
+
+static int branch_clk_set_rate(struct clk *c, unsigned long rate)
+{
+ struct branch_clk *clkh, *branch = to_branch_clk(c);
+ struct clk *clkp, *parent = c->parent;
+ unsigned long flags, curr_rate, new_rate, other_rate = 0;
+ int ret = 0;
+
+ spin_lock_irqsave(&local_clock_reg_lock, flags);
+ if (readl_relaxed(CBCR_REG(branch)) & CBCR_HW_CTL_BIT) {
+ pr_err("Cannot scale %s clock while HW gating is enabled. Use corresponding hw_ctl_clk to scale it\n",
+ c->dbg_name);
+ spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+ return -EINVAL;
+ }
+ spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+
+ if (branch->max_div)
+ return branch_cdiv_set_rate(branch, rate);
+
+ if (branch->has_sibling)
+ return -EPERM;
+
+ if (!branch->aggr_sibling_rates)
+ return clk_set_rate(c->parent, rate);
+
+ mutex_lock(&branch_clk_lock);
+ if (!branch->is_prepared) {
+ c->rate = rate;
+ goto exit;
+ }
+ /*
+ * Get the aggregate rate without this clock's vote and update
+ * if the new rate is different than the current rate.
+ */
+ list_for_each_entry(clkp, &parent->children, siblings) {
+ clkh = to_branch_clk(clkp);
+ if (clkh->is_prepared && clkh != branch)
+ other_rate = max(clkp->rate, other_rate);
+ }
+ curr_rate = max(other_rate, c->rate);
+ new_rate = max(other_rate, rate);
+ if (new_rate != curr_rate) {
+ ret = clk_set_rate(parent, new_rate);
+ if (!ret)
+ c->rate = rate;
+ }
+exit:
+ mutex_unlock(&branch_clk_lock);
+ return ret;
+}
+
+static long branch_clk_round_rate(struct clk *c, unsigned long rate)
+{
+ struct branch_clk *branch = to_branch_clk(c);
+
+ if (branch->max_div)
+ return rate <= (branch->max_div) ? rate : -EPERM;
+
+ if (!branch->has_sibling)
+ return clk_round_rate(c->parent, rate);
+
+ return -EPERM;
+}
+
+static unsigned long branch_clk_get_rate(struct clk *c)
+{
+ struct branch_clk *branch = to_branch_clk(c);
+
+ if (branch->max_div)
+ return branch->c.rate;
+
+ return clk_get_rate(c->parent);
+}
+
+static long branch_clk_list_rate(struct clk *c, unsigned n)
+{
+ int level;
+ unsigned long fmax = 0, rate;
+ struct branch_clk *branch = to_branch_clk(c);
+ struct clk *parent = c->parent;
+
+ if (branch->has_sibling == 1)
+ return -ENXIO;
+
+ if (!parent || !parent->ops->list_rate)
+ return -ENXIO;
+
+ /* Find max frequency supported within voltage constraints. */
+ if (!parent->vdd_class) {
+ fmax = ULONG_MAX;
+ } else {
+ for (level = 0; level < parent->num_fmax; level++)
+ if (parent->fmax[level])
+ fmax = parent->fmax[level];
+ }
+
+ rate = parent->ops->list_rate(parent, n);
+ if (rate <= fmax)
+ return rate;
+ else
+ return -ENXIO;
+}
+
+static enum handoff branch_clk_handoff(struct clk *c)
+{
+ struct branch_clk *branch = to_branch_clk(c);
+ u32 cbcr_regval;
+
+ cbcr_regval = readl_relaxed(CBCR_REG(branch));
+
+ /* Set the cdiv to c->rate for fixed divider branch clock */
+ if (c->rate && (c->rate < branch->max_div)) {
+ cbcr_regval &= ~BM(CBCR_CDIV_MSB, CBCR_CDIV_LSB);
+ cbcr_regval |= BVAL(CBCR_CDIV_MSB, CBCR_CDIV_LSB, c->rate);
+ writel_relaxed(cbcr_regval, CBCR_REG(branch));
+ }
+
+ if ((cbcr_regval & CBCR_BRANCH_OFF_BIT))
+ return HANDOFF_DISABLED_CLK;
+
+ if (!(cbcr_regval & CBCR_BRANCH_ENABLE_BIT)) {
+ if (!branch->check_enable_bit)
+ pr_warn("%s clock is enabled in HW even though ENABLE_BIT is not set\n",
+ c->dbg_name);
+ return HANDOFF_DISABLED_CLK;
+ }
+
+ if (branch->max_div) {
+ cbcr_regval &= BM(CBCR_CDIV_MSB, CBCR_CDIV_LSB);
+ cbcr_regval >>= CBCR_CDIV_LSB;
+ c->rate = cbcr_regval;
+ } else if (!branch->has_sibling) {
+ c->rate = clk_get_rate(c->parent);
+ }
+
+ return HANDOFF_ENABLED_CLK;
+}
+
+static int __branch_clk_reset(void __iomem *bcr_reg,
+ enum clk_reset_action action)
+{
+ int ret = 0;
+ unsigned long flags;
+ u32 reg_val;
+
+ spin_lock_irqsave(&local_clock_reg_lock, flags);
+ reg_val = readl_relaxed(bcr_reg);
+ switch (action) {
+ case CLK_RESET_ASSERT:
+ reg_val |= BCR_BLK_ARES_BIT;
+ break;
+ case CLK_RESET_DEASSERT:
+ reg_val &= ~BCR_BLK_ARES_BIT;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ writel_relaxed(reg_val, bcr_reg);
+ spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+
+ /* Make sure write is issued before returning. */
+ mb();
+
+ return ret;
+}
+
+static int branch_clk_reset(struct clk *c, enum clk_reset_action action)
+{
+ struct branch_clk *branch = to_branch_clk(c);
+
+ if (!branch->bcr_reg)
+ return -EPERM;
+ return __branch_clk_reset(BCR_REG(branch), action);
+}
+
+static void __iomem *branch_clk_list_registers(struct clk *c, int n,
+ struct clk_register_data **regs, u32 *size)
+{
+ struct branch_clk *branch = to_branch_clk(c);
+ static struct clk_register_data data[] = {
+ {"CBCR", 0x0},
+ };
+ if (n)
+ return ERR_PTR(-EINVAL);
+
+ *regs = data;
+ *size = ARRAY_SIZE(data);
+ return CBCR_REG(branch);
+}
+
+static void _hw_ctl_clk_enable(struct hw_ctl_clk *hwctl_clk)
+{
+ unsigned long flags;
+ u32 cbcr_val;
+
+ spin_lock_irqsave(&local_clock_reg_lock, flags);
+ cbcr_val = readl_relaxed(CBCR_REG(hwctl_clk));
+ cbcr_val |= CBCR_HW_CTL_BIT;
+ writel_relaxed(cbcr_val, CBCR_REG(hwctl_clk));
+ spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+}
+
+static int hw_ctl_clk_enable(struct clk *c)
+{
+ struct hw_ctl_clk *hwctl_clk = to_hw_ctl_clk(c);
+ struct clk *parent = c->parent;
+
+ /* The parent branch clock should have been prepared prior to this. */
+ if (!parent || (parent && !parent->prepare_count))
+ return -EINVAL;
+
+ _hw_ctl_clk_enable(hwctl_clk);
+ return 0;
+}
+
+static void _hw_ctl_clk_disable(struct hw_ctl_clk *hwctl_clk)
+{
+ unsigned long flags;
+ u32 cbcr_val;
+
+ spin_lock_irqsave(&local_clock_reg_lock, flags);
+ cbcr_val = readl_relaxed(CBCR_REG(hwctl_clk));
+ cbcr_val &= ~CBCR_HW_CTL_BIT;
+ writel_relaxed(cbcr_val, CBCR_REG(hwctl_clk));
+ spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+}
+
+static void hw_ctl_clk_disable(struct clk *c)
+{
+ struct hw_ctl_clk *hwctl_clk = to_hw_ctl_clk(c);
+
+ if (!c->parent)
+ return;
+
+ _hw_ctl_clk_disable(hwctl_clk);
+}
+
+static int hw_ctl_clk_set_rate(struct clk *c, unsigned long rate)
+{
+ struct hw_ctl_clk *hwctl_clk = to_hw_ctl_clk(c);
+ struct clk *parent = c->parent;
+ int ret = 0;
+
+ if (!parent)
+ return -EINVAL;
+ /*
+ * Switch back to SW control while doing a frequency change to avoid
+ * having the downstream clock being gated at the same time that the
+ * RCG rate switch happens.
+ */
+ _hw_ctl_clk_disable(hwctl_clk);
+ ret = clk_set_rate(parent, rate);
+ if (ret)
+ return ret;
+ _hw_ctl_clk_enable(hwctl_clk);
+
+ return 0;
+}
+
+static long hw_ctl_clk_round_rate(struct clk *c, unsigned long rate)
+{
+ return clk_round_rate(c->parent, rate);
+}
+
+static unsigned long hw_ctl_clk_get_rate(struct clk *c)
+{
+ return clk_get_rate(c->parent);
+}
+
+/*
+ * Voteable clock functions
+ */
+static int local_vote_clk_reset(struct clk *c, enum clk_reset_action action)
+{
+ struct local_vote_clk *vclk = to_local_vote_clk(c);
+
+ if (!vclk->bcr_reg) {
+ WARN("clk_reset called on an unsupported clock (%s)\n",
+ c->dbg_name);
+ return -EPERM;
+ }
+ return __branch_clk_reset(BCR_REG(vclk), action);
+}
+
+static int local_vote_clk_enable(struct clk *c)
+{
+ unsigned long flags;
+ u32 ena;
+ struct local_vote_clk *vclk = to_local_vote_clk(c);
+
+ spin_lock_irqsave(&local_clock_reg_lock, flags);
+ ena = readl_relaxed(VOTE_REG(vclk));
+ ena |= vclk->en_mask;
+ writel_relaxed(ena, VOTE_REG(vclk));
+ spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+
+ branch_clk_halt_check(c, vclk->halt_check, CBCR_REG(vclk), BRANCH_ON);
+
+ return 0;
+}
+
+static void local_vote_clk_disable(struct clk *c)
+{
+ unsigned long flags;
+ u32 ena;
+ struct local_vote_clk *vclk = to_local_vote_clk(c);
+
+ spin_lock_irqsave(&local_clock_reg_lock, flags);
+ ena = readl_relaxed(VOTE_REG(vclk));
+ ena &= ~vclk->en_mask;
+ writel_relaxed(ena, VOTE_REG(vclk));
+ spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+}
+
+static enum handoff local_vote_clk_handoff(struct clk *c)
+{
+ struct local_vote_clk *vclk = to_local_vote_clk(c);
+ u32 vote_regval;
+
+ /* Is the branch voted on by apps? */
+ vote_regval = readl_relaxed(VOTE_REG(vclk));
+ if (!(vote_regval & vclk->en_mask))
+ return HANDOFF_DISABLED_CLK;
+
+ return HANDOFF_ENABLED_CLK;
+}
+
+/* Sample clock for 'ticks' reference clock ticks. */
+static u32 run_measurement(unsigned ticks, void __iomem *ctl_reg,
+ void __iomem *status_reg)
+{
+ /* Stop counters and set the XO4 counter start value. */
+ writel_relaxed(ticks, ctl_reg);
+
+ /* Wait for timer to become ready. */
+ while ((readl_relaxed(status_reg) & BIT(25)) != 0)
+ cpu_relax();
+
+ /* Run measurement and wait for completion. */
+ writel_relaxed(BIT(20)|ticks, ctl_reg);
+ while ((readl_relaxed(status_reg) & BIT(25)) == 0)
+ cpu_relax();
+
+ /* Return measured ticks. */
+ return readl_relaxed(status_reg) & BM(24, 0);
+}
+
+/*
+ * Perform a hardware rate measurement for a given clock.
+ * FOR DEBUG USE ONLY: Measurements take ~15 ms!
+ */
+unsigned long measure_get_rate(struct clk *c)
+{
+ unsigned long flags;
+ u32 gcc_xo4_reg, regval;
+ u64 raw_count_short, raw_count_full;
+ unsigned ret;
+ u32 sample_ticks = 0x10000;
+ u32 multiplier = to_mux_clk(c)->post_div + 1;
+ struct measure_clk_data *data = to_mux_clk(c)->priv;
+
+ regval = readl_relaxed(MUX_REG(to_mux_clk(c)));
+ /* clear and set post divider bits */
+ regval &= ~BM(15, 12);
+ regval |= BVAL(15, 12, to_mux_clk(c)->post_div);
+ writel_relaxed(regval, MUX_REG(to_mux_clk(c)));
+
+ ret = clk_prepare_enable(data->cxo);
+ if (ret) {
+ pr_warn("CXO clock failed to enable. Can't measure\n");
+ ret = 0;
+ goto fail;
+ }
+
+ spin_lock_irqsave(&local_clock_reg_lock, flags);
+
+ /* Enable CXO/4 and RINGOSC branch. */
+ gcc_xo4_reg = readl_relaxed(*data->base + data->xo_div4_cbcr);
+ gcc_xo4_reg |= CBCR_BRANCH_ENABLE_BIT;
+ writel_relaxed(gcc_xo4_reg, *data->base + data->xo_div4_cbcr);
+
+ /*
+ * The ring oscillator counter will not reset if the measured clock
+ * is not running. To detect this, run a short measurement before
+ * the full measurement. If the raw results of the two are the same
+ * then the clock must be off.
+ */
+
+ /* Run a short measurement. (~1 ms) */
+ raw_count_short = run_measurement(0x1000, *data->base + data->ctl_reg,
+ *data->base + data->status_reg);
+ /* Run a full measurement. (~14 ms) */
+ raw_count_full = run_measurement(sample_ticks,
+ *data->base + data->ctl_reg,
+ *data->base + data->status_reg);
+
+ gcc_xo4_reg &= ~CBCR_BRANCH_ENABLE_BIT;
+ writel_relaxed(gcc_xo4_reg, *data->base + data->xo_div4_cbcr);
+
+ /* Return 0 if the clock is off. */
+ if (raw_count_full == raw_count_short) {
+ ret = 0;
+ } else {
+ /* Compute rate in Hz. */
+ raw_count_full = ((raw_count_full * 10) + 15) * 4800000;
+ do_div(raw_count_full, ((sample_ticks * 10) + 35));
+ ret = (raw_count_full * multiplier);
+ }
+ writel_relaxed(data->plltest_val, *data->base + data->plltest_reg);
+ spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+
+ clk_disable_unprepare(data->cxo);
+
+fail:
+ regval = readl_relaxed(MUX_REG(to_mux_clk(c)));
+ /* clear post divider bits */
+ regval &= ~BM(15, 12);
+ writel_relaxed(regval, MUX_REG(to_mux_clk(c)));
+
+ return ret;
+}
+
+struct frac_entry {
+ int num;
+ int den;
+};
+
+static void __iomem *local_vote_clk_list_registers(struct clk *c, int n,
+ struct clk_register_data **regs, u32 *size)
+{
+ struct local_vote_clk *vclk = to_local_vote_clk(c);
+ static struct clk_register_data data1[] = {
+ {"CBCR", 0x0},
+ };
+ static struct clk_register_data data2[] = {
+ {"APPS_VOTE", 0x0},
+ {"APPS_SLEEP_VOTE", 0x4},
+ };
+ switch (n) {
+ case 0:
+ *regs = data1;
+ *size = ARRAY_SIZE(data1);
+ return CBCR_REG(vclk);
+ case 1:
+ *regs = data2;
+ *size = ARRAY_SIZE(data2);
+ return VOTE_REG(vclk);
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+}
+
+static struct frac_entry frac_table_675m[] = { /* link rate of 270M */
+ {52, 295}, /* 119 M */
+ {11, 57}, /* 130.25 M */
+ {63, 307}, /* 138.50 M */
+ {11, 50}, /* 148.50 M */
+ {47, 206}, /* 154 M */
+ {31, 100}, /* 205.25 M */
+ {107, 269}, /* 268.50 M */
+ {0, 0},
+};
+
+static struct frac_entry frac_table_810m[] = { /* Link rate of 162M */
+ {31, 211}, /* 119 M */
+ {32, 199}, /* 130.25 M */
+ {63, 307}, /* 138.50 M */
+ {11, 60}, /* 148.50 M */
+ {50, 263}, /* 154 M */
+ {31, 120}, /* 205.25 M */
+ {119, 359}, /* 268.50 M */
+ {0, 0},
+};
+
+static bool is_same_rcg_config(struct rcg_clk *rcg, struct clk_freq_tbl *freq,
+ bool has_mnd)
+{
+ u32 cfg;
+
+ /* RCG update pending */
+ if (readl_relaxed(CMD_RCGR_REG(rcg)) & CMD_RCGR_CONFIG_DIRTY_MASK)
+ return false;
+ if (has_mnd)
+ if (readl_relaxed(M_REG(rcg)) != freq->m_val ||
+ readl_relaxed(N_REG(rcg)) != freq->n_val ||
+ readl_relaxed(D_REG(rcg)) != freq->d_val)
+ return false;
+ /*
+ * Both 0 and 1 represent same divider value in HW.
+ * Always use 0 to simplify comparison.
+ */
+ if ((freq->div_src_val & CFG_RCGR_DIV_MASK) == 1)
+ freq->div_src_val &= ~CFG_RCGR_DIV_MASK;
+ cfg = readl_relaxed(CFG_RCGR_REG(rcg));
+ if ((cfg & CFG_RCGR_DIV_MASK) == 1)
+ cfg &= ~CFG_RCGR_DIV_MASK;
+ if (cfg != freq->div_src_val)
+ return false;
+
+ return true;
+}
+
+static int set_rate_edp_pixel(struct clk *clk, unsigned long rate)
+{
+ struct rcg_clk *rcg = to_rcg_clk(clk);
+ struct clk_freq_tbl *pixel_freq = rcg->current_freq;
+ struct frac_entry *frac;
+ int delta = 100000;
+ s64 request;
+ s64 src_rate;
+ unsigned long flags;
+
+ src_rate = clk_get_rate(clk->parent);
+
+ if (src_rate == 810000000)
+ frac = frac_table_810m;
+ else
+ frac = frac_table_675m;
+
+ while (frac->num) {
+ request = rate;
+ request *= frac->den;
+ request = div_s64(request, frac->num);
+ if ((src_rate < (request - delta)) ||
+ (src_rate > (request + delta))) {
+ frac++;
+ continue;
+ }
+
+ pixel_freq->div_src_val &= ~BM(4, 0);
+ if (frac->den == frac->num) {
+ pixel_freq->m_val = 0;
+ pixel_freq->n_val = 0;
+ } else {
+ pixel_freq->m_val = frac->num;
+ pixel_freq->n_val = ~(frac->den - frac->num);
+ pixel_freq->d_val = ~frac->den;
+ }
+ spin_lock_irqsave(&local_clock_reg_lock, flags);
+ if (!is_same_rcg_config(rcg, pixel_freq, true))
+ __set_rate_mnd(rcg, pixel_freq);
+ spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+enum handoff byte_rcg_handoff(struct clk *clk)
+{
+ struct rcg_clk *rcg = to_rcg_clk(clk);
+ u32 div_val;
+ unsigned long pre_div_rate, parent_rate = clk_get_rate(clk->parent);
+
+ /* If the pre-divider is used, find the rate after the division */
+ div_val = readl_relaxed(CFG_RCGR_REG(rcg)) & CFG_RCGR_DIV_MASK;
+ if (div_val > 1)
+ pre_div_rate = parent_rate / ((div_val + 1) >> 1);
+ else
+ pre_div_rate = parent_rate;
+
+ clk->rate = pre_div_rate;
+
+ if (readl_relaxed(CMD_RCGR_REG(rcg)) & CMD_RCGR_ROOT_STATUS_BIT)
+ return HANDOFF_DISABLED_CLK;
+
+ return HANDOFF_ENABLED_CLK;
+}
+
+static int set_rate_byte(struct clk *clk, unsigned long rate)
+{
+ struct rcg_clk *rcg = to_rcg_clk(clk);
+ struct clk *pll = clk->parent;
+ unsigned long source_rate, div, flags;
+ struct clk_freq_tbl *byte_freq = rcg->current_freq;
+ int rc;
+
+ if (rate == 0)
+ return -EINVAL;
+
+ rc = clk_set_rate(pll, rate);
+ if (rc)
+ return rc;
+
+ source_rate = clk_round_rate(pll, rate);
+ if ((2 * source_rate) % rate)
+ return -EINVAL;
+
+ div = ((2 * source_rate)/rate) - 1;
+ if (div > CFG_RCGR_DIV_MASK)
+ return -EINVAL;
+
+ /*
+ * Both 0 and 1 represent same divider value in HW.
+ * Always use 0 to simplify comparison.
+ */
+ div = (div == 1) ? 0 : div;
+
+ byte_freq->div_src_val &= ~CFG_RCGR_DIV_MASK;
+ byte_freq->div_src_val |= BVAL(4, 0, div);
+
+ spin_lock_irqsave(&local_clock_reg_lock, flags);
+ if (!is_same_rcg_config(rcg, byte_freq, false))
+ __set_rate_hid(rcg, byte_freq);
+ spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+
+ return 0;
+}
+
+enum handoff pixel_rcg_handoff(struct clk *clk)
+{
+ struct rcg_clk *rcg = to_rcg_clk(clk);
+ u32 div_val = 0, mval = 0, nval = 0, cfg_regval;
+ unsigned long pre_div_rate, parent_rate = clk_get_rate(clk->parent);
+
+ cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
+
+ /* If the pre-divider is used, find the rate after the division */
+ div_val = cfg_regval & CFG_RCGR_DIV_MASK;
+ if (div_val > 1)
+ pre_div_rate = parent_rate / ((div_val + 1) >> 1);
+ else
+ pre_div_rate = parent_rate;
+
+ clk->rate = pre_div_rate;
+
+ /*
+ * Pixel clocks have one frequency entry in their frequency table.
+ * Update that entry.
+ */
+ if (rcg->current_freq) {
+ rcg->current_freq->div_src_val &= ~CFG_RCGR_DIV_MASK;
+ rcg->current_freq->div_src_val |= div_val;
+ }
+
+ /* If MND is used, find the rate after the MND division */
+ if ((cfg_regval & MND_MODE_MASK) == MND_DUAL_EDGE_MODE_BVAL) {
+ mval = readl_relaxed(M_REG(rcg));
+ nval = readl_relaxed(N_REG(rcg));
+ if (!nval)
+ return HANDOFF_DISABLED_CLK;
+ nval = (~nval) + mval;
+ if (rcg->current_freq) {
+ rcg->current_freq->n_val = ~(nval - mval);
+ rcg->current_freq->m_val = mval;
+ rcg->current_freq->d_val = ~nval;
+ }
+ clk->rate = (pre_div_rate * mval) / nval;
+ }
+
+ if (readl_relaxed(CMD_RCGR_REG(rcg)) & CMD_RCGR_ROOT_STATUS_BIT)
+ return HANDOFF_DISABLED_CLK;
+
+ return HANDOFF_ENABLED_CLK;
+}
+
+static long round_rate_pixel(struct clk *clk, unsigned long rate)
+{
+ int frac_num[] = {3, 2, 4, 1};
+ int frac_den[] = {8, 9, 9, 1};
+ int delta = 100000;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(frac_num); i++) {
+ unsigned long request = (rate * frac_den[i]) / frac_num[i];
+ unsigned long src_rate;
+
+ src_rate = clk_round_rate(clk->parent, request);
+ if ((src_rate < (request - delta)) ||
+ (src_rate > (request + delta)))
+ continue;
+
+ return (src_rate * frac_num[i]) / frac_den[i];
+ }
+
+ return -EINVAL;
+}
+
+
+static int set_rate_pixel(struct clk *clk, unsigned long rate)
+{
+ struct rcg_clk *rcg = to_rcg_clk(clk);
+ struct clk_freq_tbl *pixel_freq = rcg->current_freq;
+ int frac_num[] = {3, 2, 4, 1};
+ int frac_den[] = {8, 9, 9, 1};
+ int delta = 100000;
+ int i, rc;
+
+ for (i = 0; i < ARRAY_SIZE(frac_num); i++) {
+ unsigned long request = (rate * frac_den[i]) / frac_num[i];
+ unsigned long src_rate;
+
+ src_rate = clk_round_rate(clk->parent, request);
+ if ((src_rate < (request - delta)) ||
+ (src_rate > (request + delta)))
+ continue;
+
+ rc = clk_set_rate(clk->parent, src_rate);
+ if (rc)
+ return rc;
+
+ pixel_freq->div_src_val &= ~BM(4, 0);
+ if (frac_den[i] == frac_num[i]) {
+ pixel_freq->m_val = 0;
+ pixel_freq->n_val = 0;
+ } else {
+ pixel_freq->m_val = frac_num[i];
+ pixel_freq->n_val = ~(frac_den[i] - frac_num[i]);
+ pixel_freq->d_val = ~frac_den[i];
+ }
+ set_rate_mnd(rcg, pixel_freq);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int rcg_clk_set_parent(struct clk *clk, struct clk *parent_clk)
+{
+ struct rcg_clk *rcg = to_rcg_clk(clk);
+ struct clk *old_parent = clk->parent;
+ struct clk_freq_tbl *nf;
+ unsigned long flags;
+ int rc = 0;
+ unsigned int parent_rate, rate;
+ u32 m_val, n_val, d_val, div_val;
+ u32 cfg_regval;
+
+ /* Find the source clock freq tbl for the requested parent */
+ if (!rcg->freq_tbl)
+ return -ENXIO;
+
+ for (nf = rcg->freq_tbl; parent_clk != nf->src_clk; nf++) {
+ if (nf->freq_hz == FREQ_END)
+ return -ENXIO;
+ }
+
+ /* This implementation recommends that the RCG be unprepared
+ * when switching RCG source since the divider configuration
+ * remains unchanged.
+ */
+ WARN(clk->prepare_count,
+ "Trying to switch RCG source while it is prepared!\n");
+
+ parent_rate = clk_get_rate(parent_clk);
+
+ div_val = (rcg->current_freq->div_src_val & CFG_RCGR_DIV_MASK);
+ if (div_val)
+ parent_rate /= ((div_val + 1) >> 1);
+
+ /* Update divisor. Source select bits should already be as expected */
+ nf->div_src_val &= ~CFG_RCGR_DIV_MASK;
+ nf->div_src_val |= div_val;
+
+ cfg_regval = readl_relaxed(CFG_RCGR_REG(rcg));
+
+ if ((cfg_regval & MND_MODE_MASK) == MND_DUAL_EDGE_MODE_BVAL) {
+ nf->m_val = m_val = readl_relaxed(M_REG(rcg));
+ n_val = readl_relaxed(N_REG(rcg));
+ d_val = readl_relaxed(D_REG(rcg));
+
+ /* Sign extend the n and d values as those in registers are not
+ * sign extended.
+ */
+ n_val |= (n_val >> 8) ? BM(31, 16) : BM(31, 8);
+ d_val |= (d_val >> 8) ? BM(31, 16) : BM(31, 8);
+
+ nf->n_val = n_val;
+ nf->d_val = d_val;
+
+ n_val = ~(n_val) + m_val;
+ rate = parent_rate * m_val;
+ if (n_val)
+ rate /= n_val;
+ else
+ WARN(1, "n_val was 0!!");
+ } else
+ rate = parent_rate;
+
+ /* Warn if switching to the new parent with the current m, n ,d values
+ * violates the voltage constraints for the RCG.
+ */
+ WARN(!is_rate_valid(clk, rate) && clk->prepare_count,
+ "Switch to new RCG parent violates voltage requirement!\n");
+
+ rc = __clk_pre_reparent(clk, nf->src_clk, &flags);
+ if (rc)
+ return rc;
+
+ /* Switch RCG source */
+ rcg->set_rate(rcg, nf);
+
+ rcg->current_freq = nf;
+ clk->parent = parent_clk;
+ clk->rate = rate;
+
+ __clk_post_reparent(clk, old_parent, &flags);
+
+ return 0;
+}
+
+/*
+ * Unlike other clocks, the HDMI rate is adjusted through PLL
+ * re-programming. It is also routed through an HID divider.
+ */
+static int rcg_clk_set_rate_hdmi(struct clk *c, unsigned long rate)
+{
+ struct rcg_clk *rcg = to_rcg_clk(c);
+ struct clk_freq_tbl *nf = rcg->freq_tbl;
+ int rc;
+
+ rc = clk_set_rate(nf->src_clk, rate);
+ if (rc < 0)
+ goto out;
+ set_rate_hid(rcg, nf);
+
+ rcg->current_freq = nf;
+out:
+ return rc;
+}
+
+static struct clk *rcg_hdmi_clk_get_parent(struct clk *c)
+{
+ struct rcg_clk *rcg = to_rcg_clk(c);
+ struct clk_freq_tbl *freq = rcg->freq_tbl;
+ u32 cmd_rcgr_regval;
+
+ /* Is there a pending configuration? */
+ cmd_rcgr_regval = readl_relaxed(CMD_RCGR_REG(rcg));
+ if (cmd_rcgr_regval & CMD_RCGR_CONFIG_DIRTY_MASK)
+ return NULL;
+
+ rcg->current_freq->freq_hz = clk_get_rate(c->parent);
+
+ return freq->src_clk;
+}
+
+static int rcg_clk_set_rate_edp(struct clk *c, unsigned long rate)
+{
+ struct clk_freq_tbl *nf;
+ struct rcg_clk *rcg = to_rcg_clk(c);
+ int rc;
+
+ for (nf = rcg->freq_tbl; nf->freq_hz != rate; nf++)
+ if (nf->freq_hz == FREQ_END) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ rc = clk_set_rate(nf->src_clk, rate);
+ if (rc < 0)
+ goto out;
+ set_rate_hid(rcg, nf);
+
+ rcg->current_freq = nf;
+ c->parent = nf->src_clk;
+out:
+ return rc;
+}
+
+static struct clk *edp_clk_get_parent(struct clk *c)
+{
+ struct rcg_clk *rcg = to_rcg_clk(c);
+ struct clk *clk;
+ struct clk_freq_tbl *freq;
+ unsigned long rate;
+ u32 cmd_rcgr_regval;
+
+ /* Is there a pending configuration? */
+ cmd_rcgr_regval = readl_relaxed(CMD_RCGR_REG(rcg));
+ if (cmd_rcgr_regval & CMD_RCGR_CONFIG_DIRTY_MASK)
+ return NULL;
+
+ /* Figure out what rate the rcg is running at */
+ for (freq = rcg->freq_tbl; freq->freq_hz != FREQ_END; freq++) {
+ clk = freq->src_clk;
+ if (clk && clk->ops->get_rate) {
+ rate = clk->ops->get_rate(clk);
+ if (rate == freq->freq_hz)
+ break;
+ }
+ }
+
+ /* No known frequency found */
+ if (freq->freq_hz == FREQ_END)
+ return NULL;
+
+ rcg->current_freq = freq;
+ return freq->src_clk;
+}
+
+static int gate_clk_enable(struct clk *c)
+{
+ unsigned long flags;
+ u32 regval;
+ struct gate_clk *g = to_gate_clk(c);
+
+ spin_lock_irqsave(&local_clock_reg_lock, flags);
+ regval = readl_relaxed(GATE_EN_REG(g));
+ regval |= g->en_mask;
+ writel_relaxed(regval, GATE_EN_REG(g));
+ spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+ if (g->delay_us)
+ udelay(g->delay_us);
+
+ return 0;
+}
+
+static void gate_clk_disable(struct clk *c)
+{
+ unsigned long flags;
+ u32 regval;
+ struct gate_clk *g = to_gate_clk(c);
+
+ spin_lock_irqsave(&local_clock_reg_lock, flags);
+ regval = readl_relaxed(GATE_EN_REG(g));
+ regval &= ~(g->en_mask);
+ writel_relaxed(regval, GATE_EN_REG(g));
+ spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+ if (g->delay_us)
+ udelay(g->delay_us);
+}
+
+static void __iomem *gate_clk_list_registers(struct clk *c, int n,
+ struct clk_register_data **regs, u32 *size)
+{
+ struct gate_clk *g = to_gate_clk(c);
+ static struct clk_register_data data[] = {
+ {"EN_REG", 0x0},
+ };
+ if (n)
+ return ERR_PTR(-EINVAL);
+
+ *regs = data;
+ *size = ARRAY_SIZE(data);
+ return GATE_EN_REG(g);
+}
+
+static enum handoff gate_clk_handoff(struct clk *c)
+{
+ struct gate_clk *g = to_gate_clk(c);
+ u32 regval;
+
+ regval = readl_relaxed(GATE_EN_REG(g));
+ if (regval & g->en_mask)
+ return HANDOFF_ENABLED_CLK;
+
+ return HANDOFF_DISABLED_CLK;
+}
+
+static int gate_clk_set_flags(struct clk *c, unsigned flags)
+{
+ return cbcr_set_flags(GATE_EN_REG(to_gate_clk(c)), flags);
+}
+
+
+static int reset_clk_rst(struct clk *c, enum clk_reset_action action)
+{
+ struct reset_clk *rst = to_reset_clk(c);
+
+ if (!rst->reset_reg)
+ return -EPERM;
+
+ return __branch_clk_reset(RST_REG(rst), action);
+}
+
+static void __iomem *reset_clk_list_registers(struct clk *clk, int n,
+ struct clk_register_data **regs, u32 *size)
+{
+ struct reset_clk *rst = to_reset_clk(clk);
+ static struct clk_register_data data[] = {
+ {"BCR", 0x0},
+ };
+
+ if (n)
+ return ERR_PTR(-EINVAL);
+
+ *regs = data;
+ *size = ARRAY_SIZE(data);
+ return RST_REG(rst);
+}
+
+static DEFINE_SPINLOCK(mux_reg_lock);
+
+static int mux_reg_enable(struct mux_clk *clk)
+{
+ u32 regval;
+ unsigned long flags;
+
+ if (!clk->en_mask)
+ return 0;
+
+ spin_lock_irqsave(&mux_reg_lock, flags);
+ regval = readl_relaxed(*clk->base + clk->en_offset);
+ regval |= clk->en_mask;
+ writel_relaxed(regval, *clk->base + clk->en_offset);
+ /* Ensure enable request goes through before returning */
+ mb();
+ spin_unlock_irqrestore(&mux_reg_lock, flags);
+
+ return 0;
+}
+
+static void mux_reg_disable(struct mux_clk *clk)
+{
+ u32 regval;
+ unsigned long flags;
+
+ if (!clk->en_mask)
+ return;
+
+ spin_lock_irqsave(&mux_reg_lock, flags);
+ regval = readl_relaxed(*clk->base + clk->en_offset);
+ regval &= ~clk->en_mask;
+ writel_relaxed(regval, *clk->base + clk->en_offset);
+ spin_unlock_irqrestore(&mux_reg_lock, flags);
+}
+
+static int mux_reg_set_mux_sel(struct mux_clk *clk, int sel)
+{
+ u32 regval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mux_reg_lock, flags);
+ regval = readl_relaxed(MUX_REG(clk));
+ regval &= ~(clk->mask << clk->shift);
+ regval |= (sel & clk->mask) << clk->shift;
+ writel_relaxed(regval, MUX_REG(clk));
+ /* Ensure switch request goes through before returning */
+ mb();
+ spin_unlock_irqrestore(&mux_reg_lock, flags);
+
+ return 0;
+}
+
+static int mux_reg_get_mux_sel(struct mux_clk *clk)
+{
+ u32 regval = readl_relaxed(MUX_REG(clk));
+ return (regval >> clk->shift) & clk->mask;
+}
+
+static bool mux_reg_is_enabled(struct mux_clk *clk)
+{
+ u32 regval = readl_relaxed(MUX_REG(clk));
+ return !!(regval & clk->en_mask);
+}
+
+static void __iomem *mux_clk_list_registers(struct mux_clk *clk, int n,
+ struct clk_register_data **regs, u32 *size)
+{
+ static struct clk_register_data data[] = {
+ {"DEBUG_CLK_CTL", 0x0},
+ };
+
+ if (n)
+ return ERR_PTR(-EINVAL);
+
+ *regs = data;
+ *size = ARRAY_SIZE(data);
+ return *clk->base + clk->offset;
+}
+
+/* PLL post-divider setting for each divider value */
+static struct div_map postdiv_map[] = {
+ { 0x0, 1 },
+ { 0x1, 2 },
+ { 0x3, 3 },
+ { 0x3, 4 },
+ { 0x5, 5 },
+ { 0x7, 7 },
+ { 0x7, 8 },
+ { 0xF, 16 },
+};
+
+static int postdiv_reg_set_div(struct div_clk *clk, int div)
+{
+ struct clk *parent = NULL;
+ u32 regval;
+ unsigned long flags;
+ unsigned int mask = -1;
+ int i, ret = 0;
+
+ /* Divider is not configurable */
+ if (!clk->mask)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(postdiv_map); i++) {
+ if (postdiv_map[i].div == div) {
+ mask = postdiv_map[i].mask;
+ break;
+ }
+ }
+
+ if (mask < 0)
+ return -EINVAL;
+
+ spin_lock_irqsave(&clk->c.lock, flags);
+ parent = clk->c.parent;
+ if (parent->count && parent->ops->disable)
+ parent->ops->disable(parent);
+
+ regval = readl_relaxed(DIV_REG(clk));
+ regval &= ~(clk->mask << clk->shift);
+ regval |= (mask & clk->mask) << clk->shift;
+ writel_relaxed(regval, DIV_REG(clk));
+ /* Ensure switch request goes through before returning */
+ mb();
+
+ if (parent->count && parent->ops->enable) {
+ ret = parent->ops->enable(parent);
+ if (ret)
+ pr_err("Failed to force enable div parent!\n");
+ }
+
+ spin_unlock_irqrestore(&clk->c.lock, flags);
+ return ret;
+}
+
+static int postdiv_reg_get_div(struct div_clk *clk)
+{
+ u32 regval;
+ int i, div = 0;
+
+ /* Divider is not configurable */
+ if (!clk->mask)
+ return clk->data.div;
+
+ regval = readl_relaxed(DIV_REG(clk));
+ regval = (regval >> clk->shift) & clk->mask;
+ for (i = 0; i < ARRAY_SIZE(postdiv_map); i++) {
+ if (postdiv_map[i].mask == regval) {
+ div = postdiv_map[i].div;
+ break;
+ }
+ }
+ if (!div)
+ return -EINVAL;
+
+ return div;
+}
+
+static int div_reg_set_div(struct div_clk *clk, int div)
+{
+ u32 regval;
+ unsigned long flags;
+
+ /* Divider is not configurable */
+ if (!clk->mask)
+ return 0;
+
+ spin_lock_irqsave(&local_clock_reg_lock, flags);
+ regval = readl_relaxed(*clk->base + clk->offset);
+ regval &= ~(clk->mask << clk->shift);
+ regval |= (div & clk->mask) << clk->shift;
+ /* Ensure switch request goes through before returning */
+ mb();
+ spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+
+ return 0;
+}
+
+static int div_reg_get_div(struct div_clk *clk)
+{
+ u32 regval;
+ /* Divider is not configurable */
+ if (!clk->mask)
+ return clk->data.div;
+
+ regval = readl_relaxed(*clk->base + clk->offset);
+ return (regval >> clk->shift) & clk->mask;
+}
+
+/* =================Half-integer RCG without MN counter================= */
+#define RCGR_CMD_REG(x) ((x)->base + (x)->div_offset)
+#define RCGR_DIV_REG(x) ((x)->base + (x)->div_offset + 4)
+#define RCGR_SRC_REG(x) ((x)->base + (x)->div_offset + 4)
+
+static int rcg_mux_div_update_config(struct mux_div_clk *md)
+{
+ u32 regval, count;
+
+ regval = readl_relaxed(RCGR_CMD_REG(md));
+ regval |= CMD_RCGR_CONFIG_UPDATE_BIT;
+ writel_relaxed(regval, RCGR_CMD_REG(md));
+
+ /* Wait for update to take effect */
+ for (count = UPDATE_CHECK_MAX_LOOPS; count > 0; count--) {
+ if (!(readl_relaxed(RCGR_CMD_REG(md)) &
+ CMD_RCGR_CONFIG_UPDATE_BIT))
+ return 0;
+ udelay(1);
+ }
+
+ CLK_WARN(&md->c, true, "didn't update its configuration.");
+
+ return -EBUSY;
+}
+
+static void rcg_get_src_div(struct mux_div_clk *md, u32 *src_sel, u32 *div)
+{
+ u32 regval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&local_clock_reg_lock, flags);
+ /* Is there a pending configuration? */
+ regval = readl_relaxed(RCGR_CMD_REG(md));
+ if (regval & CMD_RCGR_CONFIG_DIRTY_MASK) {
+ CLK_WARN(&md->c, true, "it's a pending configuration.");
+ spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+ return;
+ }
+
+ regval = readl_relaxed(RCGR_DIV_REG(md));
+ regval &= (md->div_mask << md->div_shift);
+ *div = regval >> md->div_shift;
+
+ /* bypass */
+ if (*div == 0)
+ *div = 1;
+ /* the div is doubled here*/
+ *div += 1;
+
+ regval = readl_relaxed(RCGR_SRC_REG(md));
+ regval &= (md->src_mask << md->src_shift);
+ *src_sel = regval >> md->src_shift;
+ spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+}
+
+static void mux_div_set_force_enable(struct mux_div_clk *md)
+{
+ u32 regval;
+ unsigned long flags;
+ int count;
+
+ spin_lock_irqsave(&local_clock_reg_lock, flags);
+ regval = readl_relaxed(RCGR_CMD_REG(md));
+ regval |= CMD_RCGR_ROOT_ENABLE_BIT;
+ writel_relaxed(regval, RCGR_CMD_REG(md));
+
+ /* Wait for RCG to turn ON */
+ for (count = UPDATE_CHECK_MAX_LOOPS; count > 0; count--) {
+ if (!(readl_relaxed(RCGR_CMD_REG(md)) &
+ CMD_RCGR_CONFIG_UPDATE_BIT))
+ goto exit;
+ udelay(1);
+ }
+ CLK_WARN(&md->c, count == 0, "rcg didn't turn on.");
+exit:
+ spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+}
+
+static void mux_div_clear_force_enable(struct mux_div_clk *md)
+{
+ u32 regval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&local_clock_reg_lock, flags);
+ regval = readl_relaxed(RCGR_CMD_REG(md));
+ regval &= ~CMD_RCGR_ROOT_ENABLE_BIT;
+ writel_relaxed(regval, RCGR_CMD_REG(md));
+ spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+}
+
+static int rcg_set_src_div(struct mux_div_clk *md, u32 src_sel, u32 div)
+{
+ u32 regval;
+ unsigned long flags;
+ int ret;
+
+ /* for half-integer divider, div here is doubled */
+ if (div)
+ div -= 1;
+
+ spin_lock_irqsave(&local_clock_reg_lock, flags);
+ regval = readl_relaxed(RCGR_DIV_REG(md));
+ regval &= ~(md->div_mask << md->div_shift);
+ regval |= div << md->div_shift;
+ writel_relaxed(regval, RCGR_DIV_REG(md));
+
+ regval = readl_relaxed(RCGR_SRC_REG(md));
+ regval &= ~(md->src_mask << md->src_shift);
+ regval |= src_sel << md->src_shift;
+ writel_relaxed(regval, RCGR_SRC_REG(md));
+
+ ret = rcg_mux_div_update_config(md);
+ spin_unlock_irqrestore(&local_clock_reg_lock, flags);
+ return ret;
+}
+
+static int rcg_enable(struct mux_div_clk *md)
+{
+ if (md->force_enable_md)
+ mux_div_set_force_enable(md);
+
+ return rcg_set_src_div(md, md->src_sel, md->data.div);
+}
+
+static void rcg_disable(struct mux_div_clk *md)
+{
+ u32 src_sel;
+
+ if (md->force_enable_md)
+ mux_div_clear_force_enable(md);
+
+ if (!md->safe_freq)
+ return;
+
+ src_sel = parent_to_src_sel(md->parents, md->num_parents,
+ md->safe_parent);
+
+ rcg_set_src_div(md, src_sel, md->safe_div);
+}
+
+static bool rcg_is_enabled(struct mux_div_clk *md)
+{
+ u32 regval;
+
+ regval = readl_relaxed(RCGR_CMD_REG(md));
+ if (regval & CMD_RCGR_ROOT_STATUS_BIT)
+ return false;
+ else
+ return true;
+}
+
+static void __iomem *rcg_list_registers(struct mux_div_clk *md, int n,
+ struct clk_register_data **regs, u32 *size)
+{
+ static struct clk_register_data data[] = {
+ {"CMD_RCGR", 0x0},
+ {"CFG_RCGR", 0x4},
+ };
+
+ if (n)
+ return ERR_PTR(-EINVAL);
+
+ *regs = data;
+ *size = ARRAY_SIZE(data);
+ return RCGR_CMD_REG(md);
+}
+
+struct clk_ops clk_ops_empty;
+
+struct clk_ops clk_ops_rst = {
+ .reset = reset_clk_rst,
+ .list_registers = reset_clk_list_registers,
+};
+
+struct clk_ops clk_ops_rcg = {
+ .enable = rcg_clk_enable,
+ .disable = rcg_clk_disable,
+ .set_rate = rcg_clk_set_rate,
+ .list_rate = rcg_clk_list_rate,
+ .round_rate = rcg_clk_round_rate,
+ .handoff = rcg_clk_handoff,
+ .get_parent = rcg_clk_get_parent,
+ .set_parent = rcg_clk_set_parent,
+ .list_registers = rcg_hid_clk_list_registers,
+};
+
+struct clk_ops clk_ops_rcg_mnd = {
+ .enable = rcg_clk_enable,
+ .disable = rcg_clk_disable,
+ .set_rate = rcg_clk_set_rate,
+ .list_rate = rcg_clk_list_rate,
+ .round_rate = rcg_clk_round_rate,
+ .handoff = rcg_mnd_clk_handoff,
+ .get_parent = rcg_mnd_clk_get_parent,
+ .set_parent = rcg_clk_set_parent,
+ .list_registers = rcg_mnd_clk_list_registers,
+};
+
+struct clk_ops clk_ops_pixel = {
+ .enable = rcg_clk_enable,
+ .disable = rcg_clk_disable,
+ .set_rate = set_rate_pixel,
+ .list_rate = rcg_clk_list_rate,
+ .round_rate = round_rate_pixel,
+ .handoff = pixel_rcg_handoff,
+ .list_registers = rcg_mnd_clk_list_registers,
+};
+
+struct clk_ops clk_ops_pixel_multiparent = {
+ .enable = rcg_clk_enable,
+ .disable = rcg_clk_disable,
+ .set_rate = set_rate_pixel,
+ .list_rate = rcg_clk_list_rate,
+ .round_rate = round_rate_pixel,
+ .handoff = pixel_rcg_handoff,
+ .list_registers = rcg_mnd_clk_list_registers,
+ .get_parent = display_clk_get_parent,
+ .set_parent = rcg_clk_set_parent,
+};
+
+struct clk_ops clk_ops_edppixel = {
+ .enable = rcg_clk_enable,
+ .disable = rcg_clk_disable,
+ .set_rate = set_rate_edp_pixel,
+ .list_rate = rcg_clk_list_rate,
+ .round_rate = rcg_clk_round_rate,
+ .handoff = pixel_rcg_handoff,
+ .list_registers = rcg_mnd_clk_list_registers,
+};
+
+struct clk_ops clk_ops_byte = {
+ .enable = rcg_clk_enable,
+ .disable = rcg_clk_disable,
+ .set_rate = set_rate_byte,
+ .list_rate = rcg_clk_list_rate,
+ .round_rate = rcg_clk_round_rate,
+ .handoff = byte_rcg_handoff,
+ .list_registers = rcg_hid_clk_list_registers,
+};
+
+struct clk_ops clk_ops_byte_multiparent = {
+ .enable = rcg_clk_enable,
+ .disable = rcg_clk_disable,
+ .set_rate = set_rate_byte,
+ .list_rate = rcg_clk_list_rate,
+ .round_rate = rcg_clk_round_rate,
+ .handoff = byte_rcg_handoff,
+ .list_registers = rcg_hid_clk_list_registers,
+ .get_parent = display_clk_get_parent,
+ .set_parent = rcg_clk_set_parent,
+};
+
+struct clk_ops clk_ops_rcg_hdmi = {
+ .enable = rcg_clk_enable,
+ .disable = rcg_clk_disable,
+ .set_rate = rcg_clk_set_rate_hdmi,
+ .list_rate = rcg_clk_list_rate,
+ .round_rate = rcg_clk_round_rate,
+ .handoff = rcg_clk_handoff,
+ .get_parent = rcg_hdmi_clk_get_parent,
+ .list_registers = rcg_hid_clk_list_registers,
+};
+
+struct clk_ops clk_ops_rcg_edp = {
+ .enable = rcg_clk_enable,
+ .disable = rcg_clk_disable,
+ .set_rate = rcg_clk_set_rate_edp,
+ .list_rate = rcg_clk_list_rate,
+ .round_rate = rcg_clk_round_rate,
+ .handoff = rcg_clk_handoff,
+ .get_parent = edp_clk_get_parent,
+ .list_registers = rcg_hid_clk_list_registers,
+};
+
+struct clk_ops clk_ops_branch = {
+ .enable = branch_clk_enable,
+ .prepare = branch_clk_prepare,
+ .disable = branch_clk_disable,
+ .unprepare = branch_clk_unprepare,
+ .set_rate = branch_clk_set_rate,
+ .get_rate = branch_clk_get_rate,
+ .list_rate = branch_clk_list_rate,
+ .round_rate = branch_clk_round_rate,
+ .reset = branch_clk_reset,
+ .set_flags = branch_clk_set_flags,
+ .handoff = branch_clk_handoff,
+ .list_registers = branch_clk_list_registers,
+};
+
+struct clk_ops clk_ops_branch_hw_ctl = {
+ .enable = hw_ctl_clk_enable,
+ .disable = hw_ctl_clk_disable,
+ .set_rate = hw_ctl_clk_set_rate,
+ .get_rate = hw_ctl_clk_get_rate,
+ .round_rate = hw_ctl_clk_round_rate,
+};
+
+struct clk_ops clk_ops_vote = {
+ .enable = local_vote_clk_enable,
+ .disable = local_vote_clk_disable,
+ .reset = local_vote_clk_reset,
+ .handoff = local_vote_clk_handoff,
+ .list_registers = local_vote_clk_list_registers,
+};
+
+struct clk_ops clk_ops_gate = {
+ .enable = gate_clk_enable,
+ .disable = gate_clk_disable,
+ .set_rate = parent_set_rate,
+ .get_rate = parent_get_rate,
+ .round_rate = parent_round_rate,
+ .set_flags = gate_clk_set_flags,
+ .handoff = gate_clk_handoff,
+ .list_registers = gate_clk_list_registers,
+};
+
+struct clk_mux_ops mux_reg_ops = {
+ .enable = mux_reg_enable,
+ .disable = mux_reg_disable,
+ .set_mux_sel = mux_reg_set_mux_sel,
+ .get_mux_sel = mux_reg_get_mux_sel,
+ .is_enabled = mux_reg_is_enabled,
+ .list_registers = mux_clk_list_registers,
+};
+
+struct clk_div_ops div_reg_ops = {
+ .set_div = div_reg_set_div,
+ .get_div = div_reg_get_div,
+};
+
+struct clk_div_ops postdiv_reg_ops = {
+ .set_div = postdiv_reg_set_div,
+ .get_div = postdiv_reg_get_div,
+};
+
+struct mux_div_ops rcg_mux_div_ops = {
+ .enable = rcg_enable,
+ .disable = rcg_disable,
+ .set_src_div = rcg_set_src_div,
+ .get_src_div = rcg_get_src_div,
+ .is_enabled = rcg_is_enabled,
+ .list_registers = rcg_list_registers,
+};
+
+static void *cbc_dt_parser(struct device *dev, struct device_node *np)
+{
+ struct msmclk_data *drv;
+ struct branch_clk *branch_clk;
+ u32 rc;
+
+ branch_clk = devm_kzalloc(dev, sizeof(*branch_clk), GFP_KERNEL);
+ if (!branch_clk) {
+ dt_err(np, "memory alloc failure\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ drv = msmclk_parse_phandle(dev, np->parent->phandle);
+ if (IS_ERR_OR_NULL(drv))
+ return ERR_CAST(drv);
+ branch_clk->base = &drv->base;
+
+ rc = of_property_read_u32(np, "qcom,base-offset",
+ &branch_clk->cbcr_reg);
+ if (rc) {
+ dt_err(np, "missing/incorrect qcom,base-offset dt property\n");
+ return ERR_PTR(rc);
+ }
+
+ /* Optional property */
+ of_property_read_u32(np, "qcom,bcr-offset", &branch_clk->bcr_reg);
+
+ branch_clk->has_sibling = of_property_read_bool(np,
+ "qcom,has-sibling");
+
+ branch_clk->c.ops = &clk_ops_branch;
+
+ return msmclk_generic_clk_init(dev, np, &branch_clk->c);
+}
+MSMCLK_PARSER(cbc_dt_parser, "qcom,cbc", 0);
+
+static void *local_vote_clk_dt_parser(struct device *dev,
+ struct device_node *np)
+{
+ struct local_vote_clk *vote_clk;
+ struct msmclk_data *drv;
+ int rc, val;
+
+ vote_clk = devm_kzalloc(dev, sizeof(*vote_clk), GFP_KERNEL);
+ if (!vote_clk) {
+ dt_err(np, "failed to alloc memory\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ drv = msmclk_parse_phandle(dev, np->parent->phandle);
+ if (IS_ERR_OR_NULL(drv))
+ return ERR_CAST(drv);
+ vote_clk->base = &drv->base;
+
+ rc = of_property_read_u32(np, "qcom,base-offset",
+ &vote_clk->cbcr_reg);
+ if (rc) {
+ dt_err(np, "missing/incorrect qcom,base-offset dt property\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ rc = of_property_read_u32(np, "qcom,en-offset", &vote_clk->vote_reg);
+ if (rc) {
+ dt_err(np, "missing/incorrect qcom,en-offset dt property\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ rc = of_property_read_u32(np, "qcom,en-bit", &val);
+ if (rc) {
+ dt_err(np, "missing/incorrect qcom,en-bit dt property\n");
+ return ERR_PTR(-EINVAL);
+ }
+ vote_clk->en_mask = BIT(val);
+
+ vote_clk->c.ops = &clk_ops_vote;
+
+ /* Optional property */
+ of_property_read_u32(np, "qcom,bcr-offset", &vote_clk->bcr_reg);
+
+ return msmclk_generic_clk_init(dev, np, &vote_clk->c);
+}
+MSMCLK_PARSER(local_vote_clk_dt_parser, "qcom,local-vote-clk", 0);
+
+static void *gate_clk_dt_parser(struct device *dev, struct device_node *np)
+{
+ struct gate_clk *gate_clk;
+ struct msmclk_data *drv;
+ u32 en_bit, rc;
+
+ gate_clk = devm_kzalloc(dev, sizeof(*gate_clk), GFP_KERNEL);
+ if (!gate_clk) {
+ dt_err(np, "memory alloc failure\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ drv = msmclk_parse_phandle(dev, np->parent->phandle);
+ if (IS_ERR_OR_NULL(drv))
+ return ERR_CAST(drv);
+ gate_clk->base = &drv->base;
+
+ rc = of_property_read_u32(np, "qcom,en-offset", &gate_clk->en_reg);
+ if (rc) {
+ dt_err(np, "missing qcom,en-offset dt property\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ rc = of_property_read_u32(np, "qcom,en-bit", &en_bit);
+ if (rc) {
+ dt_err(np, "missing qcom,en-bit dt property\n");
+ return ERR_PTR(-EINVAL);
+ }
+ gate_clk->en_mask = BIT(en_bit);
+
+ /* Optional Property */
+ rc = of_property_read_u32(np, "qcom,delay", &gate_clk->delay_us);
+ if (rc)
+ gate_clk->delay_us = 0;
+
+ gate_clk->c.ops = &clk_ops_gate;
+ return msmclk_generic_clk_init(dev, np, &gate_clk->c);
+}
+MSMCLK_PARSER(gate_clk_dt_parser, "qcom,gate-clk", 0);
+
+
+static inline u32 rcg_calc_m(u32 m, u32 n)
+{
+ return m;
+}
+
+static inline u32 rcg_calc_n(u32 m, u32 n)
+{
+ n = n > 1 ? n : 0;
+ return ~((n)-(m)) * !!(n);
+}
+
+static inline u32 rcg_calc_duty_cycle(u32 m, u32 n)
+{
+ return ~n;
+}
+
+static inline u32 rcg_calc_div_src(u32 div_int, u32 div_frac, u32 src_sel)
+{
+ int div = 2 * div_int + (div_frac ? 1 : 0) - 1;
+ /* set bypass mode instead of a divider of 1 */
+ div = (div != 1) ? div : 0;
+ return BVAL(4, 0, max(div, 0))
+ | BVAL(10, 8, src_sel);
+}
+
+struct clk_src *msmclk_parse_clk_src(struct device *dev,
+ struct device_node *np, int *array_size)
+{
+ struct clk_src *clks;
+ const void *prop;
+ int num_parents, len, i, prop_len, rc;
+ char *name = "qcom,parents";
+
+ if (!array_size) {
+ dt_err(np, "array_size must be a valid pointer\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ prop = of_get_property(np, name, &prop_len);
+ if (!prop) {
+ dt_prop_err(np, name, "missing dt property\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ len = sizeof(phandle) + sizeof(u32);
+ if (prop_len % len) {
+ dt_prop_err(np, name, "invalid property length\n");
+ return ERR_PTR(-EINVAL);
+ }
+ num_parents = prop_len / len;
+
+ clks = devm_kzalloc(dev, sizeof(*clks) * num_parents, GFP_KERNEL);
+ if (!clks) {
+ dt_err(np, "memory alloc failure\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* Assume that u32 and phandle have the same size */
+ for (i = 0; i < num_parents; i++) {
+ phandle p;
+ struct clk_src *a = &clks[i];
+
+ rc = of_property_read_u32_index(np, name, 2 * i, &a->sel);
+ rc |= of_property_read_phandle_index(np, name, 2 * i + 1, &p);
+
+ if (rc) {
+ dt_prop_err(np, name,
+ "unable to read parent clock or mux index\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ a->src = msmclk_parse_phandle(dev, p);
+ if (IS_ERR(a->src)) {
+ dt_prop_err(np, name, "hashtable lookup failed\n");
+ return ERR_CAST(a->src);
+ }
+ }
+
+ *array_size = num_parents;
+
+ return clks;
+}
+
+static int rcg_parse_freq_tbl(struct device *dev,
+ struct device_node *np, struct rcg_clk *rcg)
+{
+ const void *prop;
+ u32 prop_len, num_rows, i, j = 0;
+ struct clk_freq_tbl *tbl;
+ int rc;
+ char *name = "qcom,freq-tbl";
+
+ prop = of_get_property(np, name, &prop_len);
+ if (!prop) {
+ dt_prop_err(np, name, "missing dt property\n");
+ return -EINVAL;
+ }
+
+ prop_len /= sizeof(u32);
+ if (prop_len % 6) {
+ dt_prop_err(np, name, "bad length\n");
+ return -EINVAL;
+ }
+
+ num_rows = prop_len / 6;
+ /* Array is null terminated. */
+ rcg->freq_tbl = devm_kzalloc(dev,
+ sizeof(*rcg->freq_tbl) * (num_rows + 1),
+ GFP_KERNEL);
+
+ if (!rcg->freq_tbl) {
+ dt_err(np, "memory alloc failure\n");
+ return -ENOMEM;
+ }
+
+ tbl = rcg->freq_tbl;
+ for (i = 0; i < num_rows; i++, tbl++) {
+ phandle p;
+ u32 div_int, div_frac, m, n, src_sel, freq_hz;
+
+ rc = of_property_read_u32_index(np, name, j++, &freq_hz);
+ rc |= of_property_read_u32_index(np, name, j++, &div_int);
+ rc |= of_property_read_u32_index(np, name, j++, &div_frac);
+ rc |= of_property_read_u32_index(np, name, j++, &m);
+ rc |= of_property_read_u32_index(np, name, j++, &n);
+ rc |= of_property_read_u32_index(np, name, j++, &p);
+
+ if (rc) {
+ dt_prop_err(np, name, "unable to read u32\n");
+ return -EINVAL;
+ }
+
+ tbl->freq_hz = (unsigned long)freq_hz;
+ tbl->src_clk = msmclk_parse_phandle(dev, p);
+ if (IS_ERR_OR_NULL(tbl->src_clk)) {
+ dt_prop_err(np, name, "hashtable lookup failure\n");
+ return PTR_ERR(tbl->src_clk);
+ }
+
+ tbl->m_val = rcg_calc_m(m, n);
+ tbl->n_val = rcg_calc_n(m, n);
+ tbl->d_val = rcg_calc_duty_cycle(m, n);
+
+ src_sel = parent_to_src_sel(rcg->c.parents,
+ rcg->c.num_parents, tbl->src_clk);
+ tbl->div_src_val = rcg_calc_div_src(div_int, div_frac,
+ src_sel);
+ }
+ /* End table with special value */
+ tbl->freq_hz = FREQ_END;
+ return 0;
+}
+
+static void *rcg_clk_dt_parser(struct device *dev, struct device_node *np)
+{
+ struct rcg_clk *rcg;
+ struct msmclk_data *drv;
+ int rc;
+
+ rcg = devm_kzalloc(dev, sizeof(*rcg), GFP_KERNEL);
+ if (!rcg) {
+ dt_err(np, "memory alloc failure\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ drv = msmclk_parse_phandle(dev, np->parent->phandle);
+ if (IS_ERR_OR_NULL(drv))
+ return drv;
+ rcg->base = &drv->base;
+
+ rcg->c.parents = msmclk_parse_clk_src(dev, np, &rcg->c.num_parents);
+ if (IS_ERR(rcg->c.parents)) {
+ dt_err(np, "unable to read parents\n");
+ return ERR_CAST(rcg->c.parents);
+ }
+
+ rc = of_property_read_u32(np, "qcom,base-offset", &rcg->cmd_rcgr_reg);
+ if (rc) {
+ dt_err(np, "missing qcom,base-offset dt property\n");
+ return ERR_PTR(rc);
+ }
+
+ rc = rcg_parse_freq_tbl(dev, np, rcg);
+ if (rc) {
+ dt_err(np, "unable to read freq_tbl\n");
+ return ERR_PTR(rc);
+ }
+ rcg->current_freq = &rcg_dummy_freq;
+
+ if (of_device_is_compatible(np, "qcom,rcg-hid")) {
+ rcg->c.ops = &clk_ops_rcg;
+ rcg->set_rate = set_rate_hid;
+ } else if (of_device_is_compatible(np, "qcom,rcg-mn")) {
+ rcg->c.ops = &clk_ops_rcg_mnd;
+ rcg->set_rate = set_rate_mnd;
+ } else {
+ dt_err(np, "unexpected compatible string\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ return msmclk_generic_clk_init(dev, np, &rcg->c);
+}
+MSMCLK_PARSER(rcg_clk_dt_parser, "qcom,rcg-hid", 0);
+MSMCLK_PARSER(rcg_clk_dt_parser, "qcom,rcg-mn", 1);
+
+static int parse_rec_parents(struct device *dev,
+ struct device_node *np, struct mux_clk *mux)
+{
+ int i, rc;
+ char *name = "qcom,recursive-parents";
+ phandle p;
+
+ mux->num_rec_parents = of_property_count_phandles(np, name);
+ if (mux->num_rec_parents <= 0)
+ return 0;
+
+ mux->rec_parents = devm_kzalloc(dev,
+ sizeof(*mux->rec_parents) * mux->num_rec_parents,
+ GFP_KERNEL);
+
+ if (!mux->rec_parents) {
+ dt_err(np, "memory alloc failure\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < mux->num_rec_parents; i++) {
+ rc = of_property_read_phandle_index(np, name, i, &p);
+ if (rc) {
+ dt_prop_err(np, name, "unable to read u32\n");
+ return rc;
+ }
+
+ mux->rec_parents[i] = msmclk_parse_phandle(dev, p);
+ if (IS_ERR(mux->rec_parents[i])) {
+ dt_prop_err(np, name, "hashtable lookup failure\n");
+ return PTR_ERR(mux->rec_parents[i]);
+ }
+ }
+
+ return 0;
+}
+
+static void *mux_reg_clk_dt_parser(struct device *dev, struct device_node *np)
+{
+ struct mux_clk *mux;
+ struct msmclk_data *drv;
+ int rc;
+
+ mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
+ if (!mux) {
+ dt_err(np, "memory alloc failure\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ mux->parents = msmclk_parse_clk_src(dev, np, &mux->num_parents);
+ if (IS_ERR(mux->parents))
+ return mux->parents;
+
+ mux->c.parents = mux->parents;
+ mux->c.num_parents = mux->num_parents;
+
+ drv = msmclk_parse_phandle(dev, np->parent->phandle);
+ if (IS_ERR_OR_NULL(drv))
+ return drv;
+ mux->base = &drv->base;
+
+ rc = parse_rec_parents(dev, np, mux);
+ if (rc) {
+ dt_err(np, "Incorrect qcom,recursive-parents dt property\n");
+ return ERR_PTR(rc);
+ }
+
+ rc = of_property_read_u32(np, "qcom,offset", &mux->offset);
+ if (rc) {
+ dt_err(np, "missing qcom,offset dt property\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ rc = of_property_read_u32(np, "qcom,mask", &mux->mask);
+ if (rc) {
+ dt_err(np, "missing qcom,mask dt property\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ rc = of_property_read_u32(np, "qcom,shift", &mux->shift);
+ if (rc) {
+ dt_err(np, "missing qcom,shift dt property\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ mux->c.ops = &clk_ops_gen_mux;
+ mux->ops = &mux_reg_ops;
+
+ /* Optional Properties */
+ of_property_read_u32(np, "qcom,en-offset", &mux->en_offset);
+ of_property_read_u32(np, "qcom,en-mask", &mux->en_mask);
+
+ return msmclk_generic_clk_init(dev, np, &mux->c);
+};
+MSMCLK_PARSER(mux_reg_clk_dt_parser, "qcom,mux-reg", 0);
+
+static void *measure_clk_dt_parser(struct device *dev,
+ struct device_node *np)
+{
+ struct mux_clk *mux;
+ struct clk *c;
+ struct measure_clk_data *p;
+ struct clk_ops *clk_ops_measure_mux;
+ phandle cxo;
+ int rc;
+
+ c = mux_reg_clk_dt_parser(dev, np);
+ if (IS_ERR(c))
+ return c;
+
+ mux = to_mux_clk(c);
+
+ p = devm_kzalloc(dev, sizeof(*p), GFP_KERNEL);
+ if (!p) {
+ dt_err(np, "memory alloc failure\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ rc = of_property_read_phandle_index(np, "qcom,cxo", 0, &cxo);
+ if (rc) {
+ dt_err(np, "missing qcom,cxo\n");
+ return ERR_PTR(-EINVAL);
+ }
+ p->cxo = msmclk_parse_phandle(dev, cxo);
+ if (IS_ERR_OR_NULL(p->cxo)) {
+ dt_prop_err(np, "qcom,cxo", "hashtable lookup failure\n");
+ return p->cxo;
+ }
+
+ rc = of_property_read_u32(np, "qcom,xo-div4-cbcr", &p->xo_div4_cbcr);
+ if (rc) {
+ dt_err(np, "missing qcom,xo-div4-cbcr dt property\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ rc = of_property_read_u32(np, "qcom,test-pad-config", &p->plltest_val);
+ if (rc) {
+ dt_err(np, "missing qcom,test-pad-config dt property\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ p->base = mux->base;
+ p->ctl_reg = mux->offset + 0x4;
+ p->status_reg = mux->offset + 0x8;
+ p->plltest_reg = mux->offset + 0xC;
+ mux->priv = p;
+
+ clk_ops_measure_mux = devm_kzalloc(dev, sizeof(*clk_ops_measure_mux),
+ GFP_KERNEL);
+ if (!clk_ops_measure_mux) {
+ dt_err(np, "memory alloc failure\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ *clk_ops_measure_mux = clk_ops_gen_mux;
+ clk_ops_measure_mux->get_rate = measure_get_rate;
+
+ mux->c.ops = clk_ops_measure_mux;
+
+ /* Already did generic clk init */
+ return &mux->c;
+};
+MSMCLK_PARSER(measure_clk_dt_parser, "qcom,measure-mux", 0);
+
+static void *div_clk_dt_parser(struct device *dev,
+ struct device_node *np)
+{
+ struct div_clk *div_clk;
+ struct msmclk_data *drv;
+ int rc;
+
+ div_clk = devm_kzalloc(dev, sizeof(*div_clk), GFP_KERNEL);
+ if (!div_clk) {
+ dt_err(np, "memory alloc failed\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ rc = of_property_read_u32(np, "qcom,max-div", &div_clk->data.max_div);
+ if (rc) {
+ dt_err(np, "missing qcom,max-div\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ rc = of_property_read_u32(np, "qcom,min-div", &div_clk->data.min_div);
+ if (rc) {
+ dt_err(np, "missing qcom,min-div\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ rc = of_property_read_u32(np, "qcom,base-offset", &div_clk->offset);
+ if (rc) {
+ dt_err(np, "missing qcom,base-offset\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ rc = of_property_read_u32(np, "qcom,mask", &div_clk->mask);
+ if (rc) {
+ dt_err(np, "missing qcom,mask\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ rc = of_property_read_u32(np, "qcom,shift", &div_clk->shift);
+ if (rc) {
+ dt_err(np, "missing qcom,shift\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (of_property_read_bool(np, "qcom,slave-div"))
+ div_clk->c.ops = &clk_ops_slave_div;
+ else
+ div_clk->c.ops = &clk_ops_div;
+ div_clk->ops = &div_reg_ops;
+
+ drv = msmclk_parse_phandle(dev, np->parent->phandle);
+ if (IS_ERR_OR_NULL(drv))
+ return ERR_CAST(drv);
+ div_clk->base = &drv->base;
+
+ return msmclk_generic_clk_init(dev, np, &div_clk->c);
+};
+MSMCLK_PARSER(div_clk_dt_parser, "qcom,div-clk", 0);
+
+static void *fixed_div_clk_dt_parser(struct device *dev,
+ struct device_node *np)
+{
+ struct div_clk *div_clk;
+ int rc;
+
+ div_clk = devm_kzalloc(dev, sizeof(*div_clk), GFP_KERNEL);
+ if (!div_clk) {
+ dt_err(np, "memory alloc failed\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ rc = of_property_read_u32(np, "qcom,div", &div_clk->data.div);
+ if (rc) {
+ dt_err(np, "missing qcom,div\n");
+ return ERR_PTR(-EINVAL);
+ }
+ div_clk->data.min_div = div_clk->data.div;
+ div_clk->data.max_div = div_clk->data.div;
+
+ if (of_property_read_bool(np, "qcom,slave-div"))
+ div_clk->c.ops = &clk_ops_slave_div;
+ else
+ div_clk->c.ops = &clk_ops_div;
+ div_clk->ops = &div_reg_ops;
+
+ return msmclk_generic_clk_init(dev, np, &div_clk->c);
+}
+MSMCLK_PARSER(fixed_div_clk_dt_parser, "qcom,fixed-div-clk", 0);
+
+static void *reset_clk_dt_parser(struct device *dev,
+ struct device_node *np)
+{
+ struct reset_clk *reset_clk;
+ struct msmclk_data *drv;
+ int rc;
+
+ reset_clk = devm_kzalloc(dev, sizeof(*reset_clk), GFP_KERNEL);
+ if (!reset_clk) {
+ dt_err(np, "memory alloc failed\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ rc = of_property_read_u32(np, "qcom,base-offset",
+ &reset_clk->reset_reg);
+ if (rc) {
+ dt_err(np, "missing qcom,base-offset\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ drv = msmclk_parse_phandle(dev, np->parent->phandle);
+ if (IS_ERR_OR_NULL(drv))
+ return ERR_CAST(drv);
+ reset_clk->base = &drv->base;
+
+ reset_clk->c.ops = &clk_ops_rst;
+ return msmclk_generic_clk_init(dev, np, &reset_clk->c);
+};
+MSMCLK_PARSER(reset_clk_dt_parser, "qcom,reset-clk", 0);
diff --git a/drivers/clk/msm/clock-mmss-8996.c b/drivers/clk/msm/clock-mmss-8996.c
new file mode 100644
index 000000000000..ba81731ce1cb
--- /dev/null
+++ b/drivers/clk/msm/clock-mmss-8996.c
@@ -0,0 +1,3994 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/clk/msm-clk-provider.h>
+#include <linux/clk/msm-clock-generic.h>
+#include <linux/of_platform.h>
+
+#include <soc/qcom/clock-alpha-pll.h>
+#include <soc/qcom/clock-pll.h>
+#include <soc/qcom/clock-local2.h>
+#include <soc/qcom/clock-voter.h>
+#include <soc/qcom/rpm-smd.h>
+#include <soc/qcom/clock-rpm.h>
+
+#include <dt-bindings/clock/msm-clocks-8996.h>
+#include <dt-bindings/clock/msm-clocks-hwio-8996.h>
+
+#include "vdd-level-8996.h"
+#include "clock.h"
+
+static void __iomem *virt_base;
+static void __iomem *virt_base_gpu;
+
+#define mmpll0_out_main_mm_source_val 1
+#define mmpll1_out_main_mm_source_val 2
+#define mmpll2_out_main_mm_source_val 3
+#define mmpll3_out_main_mm_source_val 3
+#define mmpll4_out_main_mm_source_val 3
+#define mmpll5_out_main_mm_source_val 2
+#define mmpll8_out_main_mm_source_val 4
+#define mmpll9_out_main_mm_source_val 2
+
+#define mmsscc_xo_mm_source_val 0
+#define mmsscc_gpll0_mm_source_val 5
+#define mmsscc_gpll0_div_mm_source_val 6
+#define dsi0phypll_mm_source_val 1
+#define dsi1phypll_mm_source_val 2
+#define ext_extpclk_clk_src_mm_source_val 1
+
+#define FIXDIV(div) (div ? (2 * (div) - 1) : (0))
+
+#define F_MM(f, s, div, m, n) \
+ { \
+ .freq_hz = (f), \
+ .src_clk = &s.c, \
+ .m_val = (m), \
+ .n_val = ~((n)-(m)) * !!(n), \
+ .d_val = ~(n),\
+ .div_src_val = BVAL(4, 0, (int)FIXDIV(div)) \
+ | BVAL(10, 8, s##_mm_source_val), \
+ }
+
+#define GFX_MIN_SVS_LEVEL 2
+#define GPU_REQ_ID 0x3
+
+#define EFUSE_SHIFT_v3 29
+#define EFUSE_MASK_v3 0x7
+#define EFUSE_SHIFT_PRO 28
+#define EFUSE_MASK_PRO 0x3
+
+static struct clk_ops clk_ops_gpu;
+
+static DEFINE_VDD_REGULATORS(vdd_dig, VDD_DIG_NUM, 1, vdd_corner, NULL);
+
+static int vdd_mmpll4_levels[] = {
+ RPM_REGULATOR_CORNER_NONE, 0,
+ RPM_REGULATOR_CORNER_SVS_KRAIT, 1800000,
+ RPM_REGULATOR_CORNER_SVS_SOC, 1800000,
+ RPM_REGULATOR_CORNER_NORMAL, 1800000,
+ RPM_REGULATOR_CORNER_SUPER_TURBO, 1800000,
+};
+
+static DEFINE_VDD_REGULATORS(vdd_mmpll4, VDD_DIG_NUM, 2, vdd_mmpll4_levels,
+ NULL);
+DEFINE_VDD_REGS_INIT(vdd_gfx, 2);
+DEFINE_VDD_REGS_INIT(vdd_gpu_mx, 1);
+
+static struct alpha_pll_masks pll_masks_p = {
+ .lock_mask = BIT(31),
+ .active_mask = BIT(30),
+ .vco_mask = BM(21, 20) >> 20,
+ .vco_shift = 20,
+ .alpha_en_mask = BIT(24),
+ .output_mask = 0xf,
+};
+
+static struct alpha_pll_vco_tbl mmpll_p_vco[] = {
+ VCO(3, 250000000, 500000000),
+ VCO(2, 500000000, 1000000000),
+ VCO(1, 1000000000, 1500000000),
+ VCO(0, 1500000000, 2000000000),
+};
+
+static struct alpha_pll_vco_tbl mmpll_gfx_vco[] = {
+ VCO(2, 400000000, 1000000000),
+ VCO(1, 1000000000, 1500000000),
+ VCO(0, 1500000000, 2000000000),
+};
+
+static struct alpha_pll_masks pll_masks_t = {
+ .lock_mask = BIT(31),
+ .alpha_en_mask = BIT(24),
+ .output_mask = 0xf,
+};
+
+static struct alpha_pll_masks pll_masks_b = {
+ .lock_mask = BIT(31),
+ .alpha_en_mask = BIT(24),
+ .output_mask = 0xf,
+ .post_div_mask = BM(11, 8),
+ .update_mask = BIT(22),
+};
+
+static struct alpha_pll_vco_tbl mmpll_t_vco[] = {
+ VCO(0, 500000000, 1500000000),
+};
+
+DEFINE_EXT_CLK(mmsscc_xo, NULL);
+DEFINE_EXT_CLK(mmsscc_gpll0, NULL);
+DEFINE_EXT_CLK(mmsscc_gpll0_div, NULL);
+
+static struct alpha_pll_clk mmpll0 = {
+ .masks = &pll_masks_p,
+ .base = &virt_base,
+ .offset = MMSS_MMPLL0_MODE,
+ .vco_tbl = mmpll_p_vco,
+ .num_vco = ARRAY_SIZE(mmpll_p_vco),
+ .fsm_reg_offset = MMSS_MMSS_PLL_VOTE_APCS,
+ .fsm_en_mask = BIT(0),
+ .enable_config = 0x1,
+ .c = {
+ .rate = 800000000,
+ .parent = &mmsscc_xo.c,
+ .dbg_name = "mmpll0",
+ .ops = &clk_ops_fixed_alpha_pll,
+ VDD_DIG_FMAX_MAP2(LOWER, 400000000, NOMINAL, 800000000),
+ CLK_INIT(mmpll0.c),
+ },
+};
+DEFINE_EXT_CLK(mmpll0_out_main, &mmpll0.c);
+
+static struct alpha_pll_clk mmpll1 = {
+ .masks = &pll_masks_p,
+ .base = &virt_base,
+ .offset = MMSS_MMPLL1_MODE,
+ .vco_tbl = mmpll_p_vco,
+ .num_vco = ARRAY_SIZE(mmpll_p_vco),
+ .fsm_reg_offset = MMSS_MMSS_PLL_VOTE_APCS,
+ .fsm_en_mask = BIT(1),
+ .enable_config = 0x1,
+ .c = {
+ .rate = 740000000,
+ .parent = &mmsscc_xo.c,
+ .dbg_name = "mmpll1",
+ .ops = &clk_ops_fixed_alpha_pll,
+ VDD_DIG_FMAX_MAP2(LOWER, 370000000, NOMINAL, 740000000),
+ CLK_INIT(mmpll1.c),
+ },
+};
+DEFINE_EXT_CLK(mmpll1_out_main, &mmpll1.c);
+
+static struct alpha_pll_clk mmpll4 = {
+ .masks = &pll_masks_t,
+ .base = &virt_base,
+ .offset = MMSS_MMPLL4_MODE,
+ .vco_tbl = mmpll_t_vco,
+ .num_vco = ARRAY_SIZE(mmpll_t_vco),
+ .enable_config = 0x1,
+ .c = {
+ .parent = &mmsscc_xo.c,
+ .rate = 960000000,
+ .dbg_name = "mmpll4",
+ .ops = &clk_ops_fixed_alpha_pll,
+ VDD_MMPLL4_FMAX_MAP2(LOWER, 480000000, NOMINAL, 960000000),
+ CLK_INIT(mmpll4.c),
+ },
+};
+DEFINE_EXT_CLK(mmpll4_out_main, &mmpll4.c);
+
+static struct alpha_pll_clk mmpll3 = {
+ .masks = &pll_masks_p,
+ .base = &virt_base,
+ .offset = MMSS_MMPLL3_MODE,
+ .vco_tbl = mmpll_p_vco,
+ .num_vco = ARRAY_SIZE(mmpll_p_vco),
+ .enable_config = 0x1,
+ .c = {
+ .parent = &mmsscc_xo.c,
+ .rate = 900000000,
+ .dbg_name = "mmpll3",
+ .ops = &clk_ops_fixed_alpha_pll,
+ VDD_DIG_FMAX_MAP2(LOWER, 450000000, NOMINAL, 900000000),
+ CLK_INIT(mmpll3.c),
+ },
+};
+DEFINE_EXT_CLK(mmpll3_out_main, &mmpll3.c);
+
+static struct clk_freq_tbl ftbl_ahb_clk_src[] = {
+ F_MM( 19200000, mmsscc_xo, 1, 0, 0),
+ F_MM( 40000000, mmsscc_gpll0_div, 7.5, 0, 0),
+ F_MM( 80000000, mmpll0_out_main, 10, 0, 0),
+ F_END
+};
+
+static struct rcg_clk ahb_clk_src = {
+ .cmd_rcgr_reg = MMSS_AHB_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_ahb_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .non_local_children = true,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "ahb_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP2(LOWER, 40000000, NOMINAL, 80000000),
+ CLK_INIT(ahb_clk_src.c),
+ },
+};
+
+static struct alpha_pll_clk mmpll2 = {
+ .masks = &pll_masks_p,
+ .base = &virt_base,
+ .offset = MMSS_MMPLL2_MODE,
+ .vco_tbl = mmpll_p_vco,
+ .num_vco = ARRAY_SIZE(mmpll_p_vco),
+ .enable_config = 0x1,
+ .c = {
+ .parent = &mmsscc_xo.c,
+ .rate = 410000000,
+ .dbg_name = "mmpll2",
+ .ops = &clk_ops_fixed_alpha_pll,
+ VDD_DIG_FMAX_MAP1(LOWER, 410000000),
+ CLK_INIT(mmpll2.c),
+ },
+};
+DEFINE_EXT_CLK(mmpll2_out_main, &mmpll2.c);
+
+static struct div_clk mmpll2_postdiv_clk = {
+ .base = &virt_base,
+ .offset = MMSS_MMPLL2_USER_CTL_MODE,
+ .mask = 0xf,
+ .shift = 8,
+ .data = {
+ .max_div = 4,
+ .min_div = 1,
+ .skip_odd_div = true,
+ .allow_div_one = true,
+ },
+ .ops = &postdiv_reg_ops,
+ .c = {
+ .parent = &mmpll2_out_main.c,
+ .dbg_name = "mmpll2_postdiv_clk",
+ .ops = &clk_ops_div,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(mmpll2_postdiv_clk.c),
+ },
+};
+
+static struct alpha_pll_clk mmpll8 = {
+ .masks = &pll_masks_p,
+ .base = &virt_base,
+ .offset = MMSS_MMPLL8_MODE,
+ .vco_tbl = mmpll_p_vco,
+ .num_vco = ARRAY_SIZE(mmpll_p_vco),
+ .enable_config = 0x1,
+ .c = {
+ .parent = &mmsscc_xo.c,
+ .rate = 360000000,
+ .dbg_name = "mmpll8",
+ .ops = &clk_ops_fixed_alpha_pll,
+ VDD_DIG_FMAX_MAP1(LOWER, 360000000),
+ CLK_INIT(mmpll8.c),
+ },
+};
+DEFINE_EXT_CLK(mmpll8_out_main, &mmpll8.c);
+
+static struct div_clk mmpll8_postdiv_clk = {
+ .base = &virt_base,
+ .offset = MMSS_MMPLL8_USER_CTL_MODE,
+ .mask = 0xf,
+ .shift = 8,
+ .data = {
+ .max_div = 4,
+ .min_div = 1,
+ .skip_odd_div = true,
+ .allow_div_one = true,
+ },
+ .ops = &postdiv_reg_ops,
+ .c = {
+ .parent = &mmpll8_out_main.c,
+ .dbg_name = "mmpll8_postdiv_clk",
+ .ops = &clk_ops_div,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(mmpll8_postdiv_clk.c),
+ },
+};
+
+static struct alpha_pll_clk mmpll9 = {
+ .masks = &pll_masks_b,
+ .base = &virt_base,
+ .offset = MMSS_MMPLL9_MODE,
+ .vco_tbl = mmpll_t_vco,
+ .num_vco = ARRAY_SIZE(mmpll_t_vco),
+ .post_div_config = 0x100,
+ .enable_config = 0x1,
+ .dynamic_update = true,
+ .c = {
+ .parent = &mmsscc_xo.c,
+ .rate = 960000000,
+ .dbg_name = "mmpll9",
+ .ops = &clk_ops_fixed_alpha_pll,
+ VDD_MMPLL4_FMAX_MAP2(LOWER, 480000000, NOMINAL, 960000000),
+ CLK_INIT(mmpll9.c),
+ },
+};
+DEFINE_EXT_CLK(mmpll9_out_main, &mmpll9.c);
+
+static struct div_clk mmpll9_postdiv_clk = {
+ .base = &virt_base,
+ .offset = MMSS_MMPLL9_USER_CTL_MODE,
+ .mask = 0xf,
+ .shift = 8,
+ .data = {
+ .div = 2,
+ .min_div = 2,
+ .max_div = 2,
+ },
+ .ops = &postdiv_reg_ops,
+ .c = {
+ .parent = &mmpll9_out_main.c,
+ .dbg_name = "mmpll9_postdiv_clk",
+ .ops = &clk_ops_slave_div,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(mmpll9_postdiv_clk.c),
+ },
+};
+
+static struct alpha_pll_clk mmpll5 = {
+ .masks = &pll_masks_p,
+ .base = &virt_base,
+ .offset = MMSS_MMPLL5_MODE,
+ .vco_tbl = mmpll_p_vco,
+ .num_vco = ARRAY_SIZE(mmpll_p_vco),
+ .enable_config = 0x1,
+ .c = {
+ .parent = &mmsscc_xo.c,
+ .rate = 720000000,
+ .dbg_name = "mmpll5",
+ .ops = &clk_ops_fixed_alpha_pll,
+ VDD_DIG_FMAX_MAP2(LOWER, 360000000, LOW, 720000000),
+ CLK_INIT(mmpll5.c),
+ },
+};
+DEFINE_EXT_CLK(mmpll5_out_main, &mmpll5.c);
+
+static struct clk_freq_tbl ftbl_gfx3d_clk_src[] = {
+ F_MM( 19200000, mmsscc_xo, 1, 0, 0),
+ F_MM( 60000000, mmpll8_out_main, 6, 0, 0),
+ F_MM( 120000000, mmpll8_out_main, 3, 0, 0),
+ F_MM( 205000000, mmpll2_out_main, 2, 0, 0),
+ F_MM( 360000000, mmpll8_out_main, 1, 0, 0),
+ F_MM( 480000000, mmpll9_out_main, 1, 0, 0),
+ F_END
+};
+
+static struct rcg_clk gfx3d_clk_src = {
+ .cmd_rcgr_reg = MMSS_GFX3D_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_gfx3d_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .force_enable_rcgr = true,
+ .base = &virt_base_gpu,
+ .c = {
+ .dbg_name = "gfx3d_clk_src",
+ .ops = &clk_ops_gpu,
+ .vdd_class = &vdd_gfx,
+ CLK_INIT(gfx3d_clk_src.c),
+ },
+};
+
+static struct mux_div_clk gfx3d_clk_src_v2 = {
+ .div_offset = MMSS_GFX3D_CMD_RCGR,
+ .div_mask = BM(4, 0),
+ .src_mask = BM(10, 8) >> 8,
+ .src_shift = 8,
+ .ops = &rcg_mux_div_ops,
+ .try_get_rate = true,
+ .force_enable_md = true,
+ .data = {
+ .min_div = 1,
+ .max_div = 1,
+ },
+ .parents = (struct clk_src[]) {
+ {&mmpll9_postdiv_clk.c, 2},
+ {&mmpll2_postdiv_clk.c, 3},
+ {&mmpll8_postdiv_clk.c, 4},
+ },
+ .num_parents = 3,
+ .c = {
+ .dbg_name = "gfx3d_clk_src_v2",
+ .ops = &clk_ops_gpu,
+ .vdd_class = &vdd_gfx,
+ CLK_INIT(gfx3d_clk_src_v2.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_csi0_clk_src[] = {
+ F_MM( 100000000, mmsscc_gpll0_div, 3, 0, 0),
+ F_MM( 200000000, mmsscc_gpll0, 3, 0, 0),
+ F_MM( 274290000, mmpll4_out_main, 3.5, 0, 0),
+ F_MM( 320000000, mmpll4_out_main, 3, 0, 0),
+ F_MM( 480000000, mmpll4_out_main, 2, 0, 0),
+ F_MM( 600000000, mmsscc_gpll0, 1, 0, 0),
+ F_END
+};
+
+static struct clk_freq_tbl ftbl_csi0_clk_src_v2[] = {
+ F_MM( 100000000, mmsscc_gpll0_div, 3, 0, 0),
+ F_MM( 200000000, mmsscc_gpll0, 3, 0, 0),
+ F_MM( 320000000, mmpll4_out_main, 3, 0, 0),
+ F_MM( 480000000, mmpll4_out_main, 2, 0, 0),
+ F_MM( 600000000, mmsscc_gpll0, 1, 0, 0),
+ F_END
+};
+
+static struct clk_freq_tbl ftbl_csi0_clk_src_v3[] = {
+ F_MM( 100000000, mmsscc_gpll0_div, 3, 0, 0),
+ F_MM( 200000000, mmsscc_gpll0, 3, 0, 0),
+ F_MM( 266666667, mmpll0_out_main, 3, 0, 0),
+ F_MM( 320000000, mmpll4_out_main, 3, 0, 0),
+ F_MM( 480000000, mmpll4_out_main, 2, 0, 0),
+ F_MM( 600000000, mmsscc_gpll0, 1, 0, 0),
+ F_END
+};
+
+static struct rcg_clk csi0_clk_src = {
+ .cmd_rcgr_reg = MMSS_CSI0_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_csi0_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "csi0_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP4(LOWER, 100000000, LOW, 200000000,
+ NOMINAL, 480000000, HIGH, 600000000),
+ CLK_INIT(csi0_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_vfe0_clk_src[] = {
+ F_MM( 75000000, mmsscc_gpll0_div, 4, 0, 0),
+ F_MM( 100000000, mmsscc_gpll0_div, 3, 0, 0),
+ F_MM( 200000000, mmsscc_gpll0, 3, 0, 0),
+ F_MM( 320000000, mmpll0_out_main, 2.5, 0, 0),
+ F_MM( 400000000, mmpll0_out_main, 2, 0, 0),
+ F_MM( 480000000, mmpll4_out_main, 2, 0, 0),
+ F_MM( 600000000, mmsscc_gpll0, 1, 0, 0),
+ F_END
+};
+
+static struct clk_freq_tbl ftbl_vfe0_clk_src_v2[] = {
+ F_MM( 75000000, mmsscc_gpll0_div, 4, 0, 0),
+ F_MM( 100000000, mmsscc_gpll0_div, 3, 0, 0),
+ F_MM( 200000000, mmsscc_gpll0, 3, 0, 0),
+ F_MM( 320000000, mmpll0_out_main, 2.5, 0, 0),
+ F_MM( 480000000, mmpll4_out_main, 2, 0, 0),
+ F_MM( 600000000, mmsscc_gpll0, 1, 0, 0),
+ F_END
+};
+
+static struct clk_freq_tbl ftbl_vfe0_clk_src_v3[] = {
+ F_MM( 75000000, mmsscc_gpll0_div, 4, 0, 0),
+ F_MM( 100000000, mmsscc_gpll0_div, 3, 0, 0),
+ F_MM( 300000000, mmsscc_gpll0, 2, 0, 0),
+ F_MM( 320000000, mmpll0_out_main, 2.5, 0, 0),
+ F_MM( 480000000, mmpll4_out_main, 2, 0, 0),
+ F_MM( 600000000, mmsscc_gpll0, 1, 0, 0),
+ F_END
+};
+
+static struct rcg_clk vfe0_clk_src = {
+ .cmd_rcgr_reg = MMSS_VFE0_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_vfe0_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "vfe0_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP4(LOWER, 75000000, LOW, 200000000,
+ NOMINAL, 480000000, HIGH, 600000000),
+ CLK_INIT(vfe0_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_vfe1_clk_src[] = {
+ F_MM( 75000000, mmsscc_gpll0_div, 4, 0, 0),
+ F_MM( 100000000, mmsscc_gpll0_div, 3, 0, 0),
+ F_MM( 200000000, mmsscc_gpll0, 3, 0, 0),
+ F_MM( 320000000, mmpll0_out_main, 2.5, 0, 0),
+ F_MM( 400000000, mmpll0_out_main, 2, 0, 0),
+ F_MM( 480000000, mmpll4_out_main, 2, 0, 0),
+ F_MM( 600000000, mmsscc_gpll0, 1, 0, 0),
+ F_END
+};
+
+static struct clk_freq_tbl ftbl_vfe1_clk_src_v2[] = {
+ F_MM( 75000000, mmsscc_gpll0_div, 4, 0, 0),
+ F_MM( 100000000, mmsscc_gpll0_div, 3, 0, 0),
+ F_MM( 200000000, mmsscc_gpll0, 3, 0, 0),
+ F_MM( 320000000, mmpll0_out_main, 2.5, 0, 0),
+ F_MM( 480000000, mmpll4_out_main, 2, 0, 0),
+ F_MM( 600000000, mmsscc_gpll0, 1, 0, 0),
+ F_END
+};
+
+static struct clk_freq_tbl ftbl_vfe1_clk_src_v3[] = {
+ F_MM( 75000000, mmsscc_gpll0_div, 4, 0, 0),
+ F_MM( 100000000, mmsscc_gpll0_div, 3, 0, 0),
+ F_MM( 300000000, mmsscc_gpll0, 2, 0, 0),
+ F_MM( 320000000, mmpll0_out_main, 2.5, 0, 0),
+ F_MM( 480000000, mmpll4_out_main, 2, 0, 0),
+ F_MM( 600000000, mmsscc_gpll0, 1, 0, 0),
+ F_END
+};
+
+static struct rcg_clk vfe1_clk_src = {
+ .cmd_rcgr_reg = MMSS_VFE1_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_vfe1_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "vfe1_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP4(LOWER, 100000000, LOW, 200000000,
+ NOMINAL, 480000000, HIGH, 600000000),
+ CLK_INIT(vfe1_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_csi1_clk_src[] = {
+ F_MM( 100000000, mmsscc_gpll0_div, 3, 0, 0),
+ F_MM( 200000000, mmsscc_gpll0, 3, 0, 0),
+ F_MM( 274290000, mmpll4_out_main, 3.5, 0, 0),
+ F_MM( 320000000, mmpll4_out_main, 3, 0, 0),
+ F_MM( 480000000, mmpll4_out_main, 2, 0, 0),
+ F_MM( 600000000, mmsscc_gpll0, 1, 0, 0),
+ F_END
+};
+
+static struct clk_freq_tbl ftbl_csi1_clk_src_v2[] = {
+ F_MM( 100000000, mmsscc_gpll0_div, 3, 0, 0),
+ F_MM( 200000000, mmsscc_gpll0, 3, 0, 0),
+ F_MM( 320000000, mmpll4_out_main, 3, 0, 0),
+ F_MM( 480000000, mmpll4_out_main, 2, 0, 0),
+ F_MM( 600000000, mmsscc_gpll0, 1, 0, 0),
+ F_END
+};
+
+static struct clk_freq_tbl ftbl_csi1_clk_src_v3[] = {
+ F_MM( 100000000, mmsscc_gpll0_div, 3, 0, 0),
+ F_MM( 200000000, mmsscc_gpll0, 3, 0, 0),
+ F_MM( 266666667, mmpll0_out_main, 3, 0, 0),
+ F_MM( 320000000, mmpll4_out_main, 3, 0, 0),
+ F_MM( 480000000, mmpll4_out_main, 2, 0, 0),
+ F_MM( 600000000, mmsscc_gpll0, 1, 0, 0),
+ F_END
+};
+
+static struct rcg_clk csi1_clk_src = {
+ .cmd_rcgr_reg = MMSS_CSI1_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_csi1_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "csi1_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP4(LOWER, 100000000, LOW, 200000000,
+ NOMINAL, 480000000, HIGH, 600000000),
+ CLK_INIT(csi1_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_csi2_clk_src[] = {
+ F_MM( 100000000, mmsscc_gpll0_div, 3, 0, 0),
+ F_MM( 200000000, mmsscc_gpll0, 3, 0, 0),
+ F_MM( 274290000, mmpll4_out_main, 3.5, 0, 0),
+ F_MM( 320000000, mmpll4_out_main, 3, 0, 0),
+ F_MM( 480000000, mmpll4_out_main, 2, 0, 0),
+ F_MM( 600000000, mmsscc_gpll0, 1, 0, 0),
+ F_END
+};
+
+static struct clk_freq_tbl ftbl_csi2_clk_src_v2[] = {
+ F_MM( 100000000, mmsscc_gpll0_div, 3, 0, 0),
+ F_MM( 200000000, mmsscc_gpll0, 3, 0, 0),
+ F_MM( 320000000, mmpll4_out_main, 3, 0, 0),
+ F_MM( 480000000, mmpll4_out_main, 2, 0, 0),
+ F_MM( 600000000, mmsscc_gpll0, 1, 0, 0),
+ F_END
+};
+
+static struct clk_freq_tbl ftbl_csi2_clk_src_v3[] = {
+ F_MM( 100000000, mmsscc_gpll0_div, 3, 0, 0),
+ F_MM( 200000000, mmsscc_gpll0, 3, 0, 0),
+ F_MM( 266666667, mmpll0_out_main, 3, 0, 0),
+ F_MM( 320000000, mmpll4_out_main, 3, 0, 0),
+ F_MM( 480000000, mmpll4_out_main, 2, 0, 0),
+ F_MM( 600000000, mmsscc_gpll0, 1, 0, 0),
+ F_END
+};
+
+static struct rcg_clk csi2_clk_src = {
+ .cmd_rcgr_reg = MMSS_CSI2_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_csi2_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "csi2_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP4(LOWER, 100000000, LOW, 200000000,
+ NOMINAL, 480000000, HIGH, 600000000),
+ CLK_INIT(csi2_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_csi3_clk_src[] = {
+ F_MM( 100000000, mmsscc_gpll0_div, 3, 0, 0),
+ F_MM( 200000000, mmsscc_gpll0, 3, 0, 0),
+ F_MM( 274290000, mmpll4_out_main, 3.5, 0, 0),
+ F_MM( 320000000, mmpll4_out_main, 3, 0, 0),
+ F_MM( 480000000, mmpll4_out_main, 2, 0, 0),
+ F_MM( 600000000, mmsscc_gpll0, 1, 0, 0),
+ F_END
+};
+
+static struct clk_freq_tbl ftbl_csi3_clk_src_v2[] = {
+ F_MM( 100000000, mmsscc_gpll0_div, 3, 0, 0),
+ F_MM( 200000000, mmsscc_gpll0, 3, 0, 0),
+ F_MM( 320000000, mmpll4_out_main, 3, 0, 0),
+ F_MM( 480000000, mmpll4_out_main, 2, 0, 0),
+ F_MM( 600000000, mmsscc_gpll0, 1, 0, 0),
+ F_END
+};
+
+static struct clk_freq_tbl ftbl_csi3_clk_src_v3[] = {
+ F_MM( 100000000, mmsscc_gpll0_div, 3, 0, 0),
+ F_MM( 200000000, mmsscc_gpll0, 3, 0, 0),
+ F_MM( 266666667, mmpll0_out_main, 3, 0, 0),
+ F_MM( 320000000, mmpll4_out_main, 3, 0, 0),
+ F_MM( 480000000, mmpll4_out_main, 2, 0, 0),
+ F_MM( 600000000, mmsscc_gpll0, 1, 0, 0),
+ F_END
+};
+
+static struct rcg_clk csi3_clk_src = {
+ .cmd_rcgr_reg = MMSS_CSI3_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_csi3_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "csi3_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP4(LOWER, 100000000, LOW, 200000000,
+ NOMINAL, 480000000, HIGH, 600000000),
+ CLK_INIT(csi3_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_maxi_clk_src[] = {
+ F_MM( 19200000, mmsscc_xo, 1, 0, 0),
+ F_MM( 75000000, mmsscc_gpll0_div, 4, 0, 0),
+ F_MM( 100000000, mmsscc_gpll0, 6, 0, 0),
+ F_MM( 200000000, mmsscc_gpll0, 3, 0, 0),
+ F_MM( 320000000, mmpll0_out_main, 2.5, 0, 0),
+ F_MM( 370000000, mmpll1_out_main, 2, 0, 0),
+ F_END
+};
+
+static struct clk_freq_tbl ftbl_maxi_clk_src_v2[] = {
+ F_MM( 19200000, mmsscc_xo, 1, 0, 0),
+ F_MM( 75000000, mmsscc_gpll0_div, 4, 0, 0),
+ F_MM( 100000000, mmsscc_gpll0, 6, 0, 0),
+ F_MM( 171430000, mmsscc_gpll0, 3.5, 0, 0),
+ F_MM( 200000000, mmsscc_gpll0, 3, 0, 0),
+ F_MM( 320000000, mmpll0_out_main, 2.5, 0, 0),
+ F_MM( 405000000, mmpll1_out_main, 2, 0, 0),
+ F_END
+};
+
+static struct rcg_clk maxi_clk_src = {
+ .cmd_rcgr_reg = MMSS_MAXI_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_maxi_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "maxi_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP4(LOWER, 75000000, LOW, 171430000,
+ NOMINAL, 320000000, HIGH, 370000000),
+ CLK_INIT(maxi_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_cpp_clk_src[] = {
+ F_MM( 100000000, mmsscc_gpll0_div, 3, 0, 0),
+ F_MM( 200000000, mmsscc_gpll0, 3, 0, 0),
+ F_MM( 320000000, mmpll0_out_main, 2.5, 0, 0),
+ F_MM( 480000000, mmpll4_out_main, 2, 0, 0),
+ F_MM( 640000000, mmpll4_out_main, 1.5, 0, 0),
+ F_END
+};
+
+static struct rcg_clk cpp_clk_src = {
+ .cmd_rcgr_reg = MMSS_CPP_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_cpp_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "cpp_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP4(LOWER, 100000000, LOW, 200000000,
+ NOMINAL, 480000000, HIGH, 640000000),
+ CLK_INIT(cpp_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_jpeg0_clk_src[] = {
+ F_MM( 75000000, mmsscc_gpll0_div, 4, 0, 0),
+ F_MM( 150000000, mmsscc_gpll0, 4, 0, 0),
+ F_MM( 228571429, mmpll0_out_main, 3.5, 0, 0),
+ F_MM( 266666667, mmpll0_out_main, 3, 0, 0),
+ F_MM( 320000000, mmpll0_out_main, 2.5, 0, 0),
+ F_MM( 480000000, mmpll4_out_main, 2, 0, 0),
+ F_END
+};
+
+static struct rcg_clk jpeg0_clk_src = {
+ .cmd_rcgr_reg = MMSS_JPEG0_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_jpeg0_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "jpeg0_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP4(LOWER, 75000000, LOW, 150000000,
+ NOMINAL, 320000000, HIGH, 480000000),
+ CLK_INIT(jpeg0_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_jpeg2_clk_src[] = {
+ F_MM( 75000000, mmsscc_gpll0_div, 4, 0, 0),
+ F_MM( 150000000, mmsscc_gpll0, 4, 0, 0),
+ F_MM( 228571429, mmpll0_out_main, 3.5, 0, 0),
+ F_MM( 266666667, mmpll0_out_main, 3, 0, 0),
+ F_MM( 320000000, mmpll0_out_main, 2.5, 0, 0),
+ F_END
+};
+
+static struct rcg_clk jpeg2_clk_src = {
+ .cmd_rcgr_reg = MMSS_JPEG2_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_jpeg2_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "jpeg2_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP4(LOWER, 75000000, LOW, 150000000,
+ NOMINAL, 266670000, HIGH, 320000000),
+ CLK_INIT(jpeg2_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_jpeg_dma_clk_src[] = {
+ F_MM( 75000000, mmsscc_gpll0_div, 4, 0, 0),
+ F_MM( 150000000, mmsscc_gpll0, 4, 0, 0),
+ F_MM( 228571429, mmpll0_out_main, 3.5, 0, 0),
+ F_MM( 266666667, mmpll0_out_main, 3, 0, 0),
+ F_MM( 320000000, mmpll0_out_main, 2.5, 0, 0),
+ F_MM( 480000000, mmpll4_out_main, 2, 0, 0),
+ F_END
+};
+
+static struct rcg_clk jpeg_dma_clk_src = {
+ .cmd_rcgr_reg = MMSS_JPEG_DMA_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_jpeg_dma_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "jpeg_dma_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP4(LOWER, 75000000, LOW, 150000000,
+ NOMINAL, 320000000, HIGH, 480000000),
+ CLK_INIT(jpeg_dma_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_mdp_clk_src[] = {
+ F_MM( 85714286, mmsscc_gpll0, 7, 0, 0),
+ F_MM( 100000000, mmsscc_gpll0, 6, 0, 0),
+ F_MM( 150000000, mmsscc_gpll0, 4, 0, 0),
+ F_MM( 171428571, mmsscc_gpll0, 3.5, 0, 0),
+ F_MM( 240000000, mmpll5_out_main, 3, 0, 0),
+ F_MM( 320000000, mmpll0_out_main, 2.5, 0, 0),
+ F_MM( 360000000, mmpll5_out_main, 2, 0, 0),
+ F_END
+};
+
+static struct clk_freq_tbl ftbl_mdp_clk_src_v2[] = {
+ F_MM( 85714286, mmsscc_gpll0, 7, 0, 0),
+ F_MM( 100000000, mmsscc_gpll0, 6, 0, 0),
+ F_MM( 150000000, mmsscc_gpll0, 4, 0, 0),
+ F_MM( 171428571, mmsscc_gpll0, 3.5, 0, 0),
+ F_MM( 200000000, mmsscc_gpll0, 3, 0, 0),
+ F_MM( 275000000, mmpll5_out_main, 3, 0, 0),
+ F_MM( 300000000, mmsscc_gpll0, 2, 0, 0),
+ F_MM( 330000000, mmpll5_out_main, 2.5, 0, 0),
+ F_MM( 412500000, mmpll5_out_main, 2, 0, 0),
+ F_END
+};
+
+static struct rcg_clk mdp_clk_src = {
+ .cmd_rcgr_reg = MMSS_MDP_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_mdp_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mdp_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP4(LOWER, 171430000, LOW, 240000000,
+ NOMINAL, 320000000, HIGH, 360000000),
+ CLK_INIT(mdp_clk_src.c),
+ },
+};
+
+DEFINE_EXT_CLK(ext_pclk0_clk_src, NULL);
+DEFINE_EXT_CLK(ext_pclk1_clk_src, NULL);
+static struct clk_freq_tbl ftbl_pclk0_clk_src[] = {
+ {
+ .div_src_val = BVAL(10, 8, mmsscc_xo_mm_source_val)
+ | BVAL(4, 0, 0),
+ .src_clk = &mmsscc_xo.c,
+ .freq_hz = 0,
+ },
+ {
+ .div_src_val = BVAL(10, 8, dsi0phypll_mm_source_val)
+ | BVAL(4, 0, 0),
+ .src_clk = &ext_pclk0_clk_src.c,
+ .freq_hz = 0,
+ },
+ {
+ .div_src_val = BVAL(10, 8, dsi1phypll_mm_source_val)
+ | BVAL(4, 0, 0),
+ .src_clk = &ext_pclk1_clk_src.c,
+ .freq_hz = 0,
+ },
+ F_END
+};
+
+static struct rcg_clk pclk0_clk_src = {
+ .cmd_rcgr_reg = MMSS_PCLK0_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .current_freq = ftbl_pclk0_clk_src,
+ .freq_tbl = ftbl_pclk0_clk_src,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "pclk0_clk_src",
+ .parent = &ext_pclk0_clk_src.c,
+ .ops = &clk_ops_pixel_multiparent,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ VDD_DIG_FMAX_MAP3(LOWER, 175000000, LOW, 280000000,
+ NOMINAL, 350000000),
+ CLK_INIT(pclk0_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_pclk1_clk_src[] = {
+ {
+ .div_src_val = BVAL(10, 8, mmsscc_xo_mm_source_val)
+ | BVAL(4, 0, 0),
+ .src_clk = &mmsscc_xo.c,
+ .freq_hz = 0,
+ },
+ {
+ .div_src_val = BVAL(10, 8, dsi0phypll_mm_source_val)
+ | BVAL(4, 0, 0),
+ .src_clk = &ext_pclk0_clk_src.c,
+ .freq_hz = 0,
+ },
+ {
+ .div_src_val = BVAL(10, 8, dsi1phypll_mm_source_val)
+ | BVAL(4, 0, 0),
+ .src_clk = &ext_pclk1_clk_src.c,
+ .freq_hz = 0,
+ },
+ F_END
+};
+
+static struct rcg_clk pclk1_clk_src = {
+ .cmd_rcgr_reg = MMSS_PCLK1_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .current_freq = ftbl_pclk1_clk_src,
+ .freq_tbl = ftbl_pclk1_clk_src,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "pclk1_clk_src",
+ .parent = &ext_pclk1_clk_src.c,
+ .ops = &clk_ops_pixel_multiparent,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ VDD_DIG_FMAX_MAP3(LOWER, 175000000, LOW, 280000000,
+ NOMINAL, 350000000),
+ CLK_INIT(pclk1_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_video_core_clk_src[] = {
+ F_MM( 75000000, mmsscc_gpll0_div, 4, 0, 0),
+ F_MM( 150000000, mmsscc_gpll0, 4, 0, 0),
+ F_MM( 320000000, mmpll0_out_main, 2.5, 0, 0),
+ F_MM( 450000000, mmpll3_out_main, 2, 0, 0),
+ F_END
+};
+
+static struct clk_freq_tbl ftbl_video_core_clk_src_v2[] = {
+ F_MM( 75000000, mmsscc_gpll0_div, 4, 0, 0),
+ F_MM( 150000000, mmsscc_gpll0, 4, 0, 0),
+ F_MM( 320000000, mmpll0_out_main, 2.5, 0, 0),
+ F_MM( 490000000, mmpll3_out_main, 2, 0, 0),
+ F_END
+};
+
+static struct clk_freq_tbl ftbl_video_core_clk_src_v3[] = {
+ F_MM( 75000000, mmsscc_gpll0_div, 4, 0, 0),
+ F_MM( 150000000, mmsscc_gpll0, 4, 0, 0),
+ F_MM( 346666667, mmpll3_out_main, 3, 0, 0),
+ F_MM( 520000000, mmpll3_out_main, 2, 0, 0),
+ F_END
+};
+
+static struct rcg_clk video_core_clk_src = {
+ .cmd_rcgr_reg = MMSS_VIDEO_CORE_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_video_core_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "video_core_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP4(LOWER, 75000000, LOW, 150000000,
+ NOMINAL, 320000000, HIGH, 450000000),
+ CLK_INIT(video_core_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_fd_core_clk_src[] = {
+ F_MM( 100000000, mmsscc_gpll0_div, 3, 0, 0),
+ F_MM( 200000000, mmsscc_gpll0, 3, 0, 0),
+ F_MM( 400000000, mmpll0_out_main, 2, 0, 0),
+ F_END
+};
+
+static struct rcg_clk fd_core_clk_src = {
+ .cmd_rcgr_reg = MMSS_FD_CORE_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_fd_core_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "fd_core_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP3(LOWER, 100000000, LOW, 200000000,
+ NOMINAL, 400000000),
+ CLK_INIT(fd_core_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_cci_clk_src[] = {
+ F_MM( 19200000, mmsscc_xo, 1, 0, 0),
+ F_MM( 37500000, mmsscc_gpll0, 16, 0, 0),
+ F_MM( 50000000, mmsscc_gpll0, 12, 0, 0),
+ F_MM( 100000000, mmsscc_gpll0, 6, 0, 0),
+ F_END
+};
+
+static struct rcg_clk cci_clk_src = {
+ .cmd_rcgr_reg = MMSS_CCI_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_cci_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "cci_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 50000000,
+ NOMINAL, 100000000),
+ CLK_INIT(cci_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_csiphy0_3p_clk_src[] = {
+ F_MM( 100000000, mmsscc_gpll0_div, 3, 0, 0),
+ F_MM( 200000000, mmsscc_gpll0, 3, 0, 0),
+ F_MM( 320000000, mmpll4_out_main, 3, 0, 0),
+ F_END
+};
+
+static struct clk_freq_tbl ftbl_csiphy0_3p_clk_src_v2[] = {
+ F_MM( 100000000, mmsscc_gpll0_div, 3, 0, 0),
+ F_MM( 200000000, mmsscc_gpll0, 3, 0, 0),
+ F_MM( 320000000, mmpll4_out_main, 3, 0, 0),
+ F_MM( 384000000, mmpll4_out_main, 2.5, 0, 0),
+ F_END
+};
+
+static struct rcg_clk csiphy0_3p_clk_src = {
+ .cmd_rcgr_reg = MMSS_CSIPHY0_3P_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_csiphy0_3p_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "csiphy0_3p_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP3(LOWER, 100000000, LOW, 200000000,
+ NOMINAL, 320000000),
+ CLK_INIT(csiphy0_3p_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_csiphy1_3p_clk_src[] = {
+ F_MM( 100000000, mmsscc_gpll0_div, 3, 0, 0),
+ F_MM( 200000000, mmsscc_gpll0, 3, 0, 0),
+ F_MM( 320000000, mmpll4_out_main, 3, 0, 0),
+ F_END
+};
+
+static struct clk_freq_tbl ftbl_csiphy1_3p_clk_src_v2[] = {
+ F_MM( 100000000, mmsscc_gpll0_div, 3, 0, 0),
+ F_MM( 200000000, mmsscc_gpll0, 3, 0, 0),
+ F_MM( 320000000, mmpll4_out_main, 3, 0, 0),
+ F_MM( 384000000, mmpll4_out_main, 2.5, 0, 0),
+ F_END
+};
+
+static struct rcg_clk csiphy1_3p_clk_src = {
+ .cmd_rcgr_reg = MMSS_CSIPHY1_3P_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_csiphy1_3p_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "csiphy1_3p_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP3(LOWER, 100000000, LOW, 200000000,
+ NOMINAL, 320000000),
+ CLK_INIT(csiphy1_3p_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_csiphy2_3p_clk_src[] = {
+ F_MM( 100000000, mmsscc_gpll0_div, 3, 0, 0),
+ F_MM( 200000000, mmsscc_gpll0, 3, 0, 0),
+ F_MM( 320000000, mmpll4_out_main, 3, 0, 0),
+ F_END
+};
+
+static struct clk_freq_tbl ftbl_csiphy2_3p_clk_src_v2[] = {
+ F_MM( 100000000, mmsscc_gpll0_div, 3, 0, 0),
+ F_MM( 200000000, mmsscc_gpll0, 3, 0, 0),
+ F_MM( 320000000, mmpll4_out_main, 3, 0, 0),
+ F_MM( 384000000, mmpll4_out_main, 2.5, 0, 0),
+ F_END
+};
+
+static struct rcg_clk csiphy2_3p_clk_src = {
+ .cmd_rcgr_reg = MMSS_CSIPHY2_3P_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_csiphy2_3p_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "csiphy2_3p_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP3(LOWER, 100000000, LOW, 200000000,
+ NOMINAL, 320000000),
+ CLK_INIT(csiphy2_3p_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_camss_gp0_clk_src[] = {
+ F_MM( 10000, mmsscc_xo, 16, 1, 120),
+ F_MM( 24000, mmsscc_xo, 16, 1, 50),
+ F_MM( 6000000, mmsscc_gpll0_div, 10, 1, 5),
+ F_MM( 12000000, mmsscc_gpll0_div, 1, 1, 25),
+ F_MM( 13000000, mmsscc_gpll0_div, 2, 13, 150),
+ F_MM( 24000000, mmsscc_gpll0_div, 1, 2, 25),
+ F_END
+};
+
+static struct rcg_clk camss_gp0_clk_src = {
+ .cmd_rcgr_reg = MMSS_CAMSS_GP0_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_camss_gp0_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "camss_gp0_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 50000000, LOW, 100000000,
+ NOMINAL, 200000000),
+ CLK_INIT(camss_gp0_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_camss_gp1_clk_src[] = {
+ F_MM( 10000, mmsscc_xo, 16, 1, 120),
+ F_MM( 24000, mmsscc_xo, 16, 1, 50),
+ F_MM( 6000000, mmsscc_gpll0_div, 10, 1, 5),
+ F_MM( 12000000, mmsscc_gpll0_div, 1, 1, 25),
+ F_MM( 13000000, mmsscc_gpll0_div, 2, 13, 150),
+ F_MM( 24000000, mmsscc_gpll0_div, 1, 2, 25),
+ F_END
+};
+
+static struct rcg_clk camss_gp1_clk_src = {
+ .cmd_rcgr_reg = MMSS_CAMSS_GP1_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_camss_gp1_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "camss_gp1_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 50000000, LOW, 100000000,
+ NOMINAL, 200000000),
+ CLK_INIT(camss_gp1_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_mclk0_clk_src[] = {
+ F_MM( 4800000, mmsscc_xo, 4, 0, 0),
+ F_MM( 6000000, mmsscc_gpll0_div, 10, 1, 5),
+ F_MM( 8000000, mmsscc_gpll0_div, 1, 2, 75),
+ F_MM( 9600000, mmsscc_xo, 2, 0, 0),
+ F_MM( 16666667, mmsscc_gpll0_div, 2, 1, 9),
+ F_MM( 19200000, mmsscc_xo, 1, 0, 0),
+ F_MM( 24000000, mmsscc_gpll0_div, 1, 2, 25),
+ F_MM( 33333333, mmsscc_gpll0_div, 1, 1, 9),
+ F_MM( 48000000, mmsscc_gpll0, 1, 2, 25),
+ F_MM( 66666667, mmsscc_gpll0, 1, 1, 9),
+ F_END
+};
+
+static struct rcg_clk mclk0_clk_src = {
+ .cmd_rcgr_reg = MMSS_MCLK0_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_mclk0_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mclk0_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 33330000, LOW, 66670000,
+ NOMINAL, 68570000),
+ CLK_INIT(mclk0_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_mclk1_clk_src[] = {
+ F_MM( 4800000, mmsscc_xo, 4, 0, 0),
+ F_MM( 6000000, mmsscc_gpll0_div, 10, 1, 5),
+ F_MM( 8000000, mmsscc_gpll0_div, 1, 2, 75),
+ F_MM( 9600000, mmsscc_xo, 2, 0, 0),
+ F_MM( 16666667, mmsscc_gpll0_div, 2, 1, 9),
+ F_MM( 19200000, mmsscc_xo, 1, 0, 0),
+ F_MM( 24000000, mmsscc_gpll0_div, 1, 2, 25),
+ F_MM( 33333333, mmsscc_gpll0_div, 1, 1, 9),
+ F_MM( 48000000, mmsscc_gpll0, 1, 2, 25),
+ F_MM( 66666667, mmsscc_gpll0, 1, 1, 9),
+ F_END
+};
+
+static struct rcg_clk mclk1_clk_src = {
+ .cmd_rcgr_reg = MMSS_MCLK1_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_mclk1_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mclk1_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 33330000, LOW, 66670000,
+ NOMINAL, 68570000),
+ CLK_INIT(mclk1_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_mclk2_clk_src[] = {
+ F_MM( 4800000, mmsscc_xo, 4, 0, 0),
+ F_MM( 6000000, mmsscc_gpll0_div, 10, 1, 5),
+ F_MM( 8000000, mmsscc_gpll0_div, 1, 2, 75),
+ F_MM( 9600000, mmsscc_xo, 2, 0, 0),
+ F_MM( 16666667, mmsscc_gpll0_div, 2, 1, 9),
+ F_MM( 19200000, mmsscc_xo, 1, 0, 0),
+ F_MM( 24000000, mmsscc_gpll0_div, 1, 2, 25),
+ F_MM( 33333333, mmsscc_gpll0_div, 1, 1, 9),
+ F_MM( 48000000, mmsscc_gpll0, 1, 2, 25),
+ F_MM( 66666667, mmsscc_gpll0, 1, 1, 9),
+ F_END
+};
+
+static struct rcg_clk mclk2_clk_src = {
+ .cmd_rcgr_reg = MMSS_MCLK2_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_mclk2_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mclk2_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 33330000, LOW, 66670000,
+ NOMINAL, 68570000),
+ CLK_INIT(mclk2_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_mclk3_clk_src[] = {
+ F_MM( 4800000, mmsscc_xo, 4, 0, 0),
+ F_MM( 6000000, mmsscc_gpll0_div, 10, 1, 5),
+ F_MM( 8000000, mmsscc_gpll0_div, 1, 2, 75),
+ F_MM( 9600000, mmsscc_xo, 2, 0, 0),
+ F_MM( 16666667, mmsscc_gpll0_div, 2, 1, 9),
+ F_MM( 19200000, mmsscc_xo, 1, 0, 0),
+ F_MM( 24000000, mmsscc_gpll0_div, 1, 2, 25),
+ F_MM( 33333333, mmsscc_gpll0_div, 1, 1, 9),
+ F_MM( 48000000, mmsscc_gpll0, 1, 2, 25),
+ F_MM( 66666667, mmsscc_gpll0, 1, 1, 9),
+ F_END
+};
+
+static struct rcg_clk mclk3_clk_src = {
+ .cmd_rcgr_reg = MMSS_MCLK3_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_mclk3_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mclk3_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 33330000, LOW, 66670000,
+ NOMINAL, 68570000),
+ CLK_INIT(mclk3_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_csi0phytimer_clk_src[] = {
+ F_MM( 100000000, mmsscc_gpll0_div, 3, 0, 0),
+ F_MM( 200000000, mmpll0_out_main, 4, 0, 0),
+ F_MM( 266666667, mmpll0_out_main, 3, 0, 0),
+ F_END
+};
+
+static struct clk_freq_tbl ftbl_csi0phytimer_clk_src_v3[] = {
+ F_MM( 100000000, mmsscc_gpll0_div, 3, 0, 0),
+ F_MM( 200000000, mmsscc_gpll0, 3, 0, 0),
+ F_MM( 266666667, mmpll0_out_main, 3, 0, 0),
+ F_END
+};
+
+static struct rcg_clk csi0phytimer_clk_src = {
+ .cmd_rcgr_reg = MMSS_CSI0PHYTIMER_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_csi0phytimer_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "csi0phytimer_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP2(LOWER, 100000000, NOMINAL, 266670000),
+ CLK_INIT(csi0phytimer_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_csi1phytimer_clk_src[] = {
+ F_MM( 100000000, mmsscc_gpll0_div, 3, 0, 0),
+ F_MM( 200000000, mmpll0_out_main, 4, 0, 0),
+ F_MM( 266666667, mmpll0_out_main, 3, 0, 0),
+ F_END
+};
+
+static struct clk_freq_tbl ftbl_csi1phytimer_clk_src_v3[] = {
+ F_MM( 100000000, mmsscc_gpll0_div, 3, 0, 0),
+ F_MM( 200000000, mmsscc_gpll0, 3, 0, 0),
+ F_MM( 266666667, mmpll0_out_main, 3, 0, 0),
+ F_END
+};
+
+static struct rcg_clk csi1phytimer_clk_src = {
+ .cmd_rcgr_reg = MMSS_CSI1PHYTIMER_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_csi1phytimer_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "csi1phytimer_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP2(LOWER, 100000000, NOMINAL, 266670000),
+ CLK_INIT(csi1phytimer_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_csi2phytimer_clk_src[] = {
+ F_MM( 100000000, mmsscc_gpll0_div, 3, 0, 0),
+ F_MM( 200000000, mmpll0_out_main, 4, 0, 0),
+ F_MM( 266666667, mmpll0_out_main, 3, 0, 0),
+ F_END
+};
+
+static struct clk_freq_tbl ftbl_csi2phytimer_clk_src_v3[] = {
+ F_MM( 100000000, mmsscc_gpll0_div, 3, 0, 0),
+ F_MM( 200000000, mmsscc_gpll0, 3, 0, 0),
+ F_MM( 266666667, mmpll0_out_main, 3, 0, 0),
+ F_END
+};
+
+static struct rcg_clk csi2phytimer_clk_src = {
+ .cmd_rcgr_reg = MMSS_CSI2PHYTIMER_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_csi2phytimer_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "csi2phytimer_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP2(LOWER, 100000000, NOMINAL, 266670000),
+ CLK_INIT(csi2phytimer_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_rbbmtimer_clk_src[] = {
+ F_MM( 19200000, mmsscc_xo, 1, 0, 0),
+ F_END
+};
+
+static struct rcg_clk rbbmtimer_clk_src = {
+ .cmd_rcgr_reg = MMSS_RBBMTIMER_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_rbbmtimer_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base_gpu,
+ .c = {
+ .dbg_name = "rbbmtimer_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP1(LOWER, 19200000),
+ CLK_INIT(rbbmtimer_clk_src.c),
+ },
+};
+
+DEFINE_EXT_CLK(ext_byte0_clk_src, NULL);
+DEFINE_EXT_CLK(ext_byte1_clk_src, NULL);
+static struct clk_freq_tbl ftbl_byte0_clk_src[] = {
+ {
+ .div_src_val = BVAL(10, 8, mmsscc_xo_mm_source_val)
+ | BVAL(4, 0, 0),
+ .src_clk = &mmsscc_xo.c,
+ .freq_hz = 0,
+ },
+ {
+ .div_src_val = BVAL(10, 8, dsi0phypll_mm_source_val),
+ .src_clk = &ext_byte0_clk_src.c,
+ .freq_hz = 0,
+ },
+ {
+ .div_src_val = BVAL(10, 8, dsi1phypll_mm_source_val)
+ | BVAL(4, 0, 0),
+ .src_clk = &ext_byte1_clk_src.c,
+ .freq_hz = 0,
+ },
+ F_END
+};
+
+static struct rcg_clk byte0_clk_src = {
+ .cmd_rcgr_reg = MMSS_BYTE0_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .current_freq = ftbl_byte0_clk_src,
+ .freq_tbl = ftbl_byte0_clk_src,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "byte0_clk_src",
+ .parent = &ext_byte0_clk_src.c,
+ .ops = &clk_ops_byte_multiparent,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ VDD_DIG_FMAX_MAP3(LOWER, 131250000, LOW, 210000000,
+ NOMINAL, 262500000),
+ CLK_INIT(byte0_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_byte1_clk_src[] = {
+ {
+ .div_src_val = BVAL(10, 8, mmsscc_xo_mm_source_val)
+ | BVAL(4, 0, 0),
+ .src_clk = &mmsscc_xo.c,
+ .freq_hz = 0,
+ },
+ {
+ .div_src_val = BVAL(10, 8, dsi0phypll_mm_source_val),
+ .src_clk = &ext_byte0_clk_src.c,
+ .freq_hz = 0,
+ },
+ {
+ .div_src_val = BVAL(10, 8, dsi1phypll_mm_source_val)
+ | BVAL(4, 0, 0),
+ .src_clk = &ext_byte1_clk_src.c,
+ .freq_hz = 0,
+ },
+ F_END
+};
+
+static struct rcg_clk byte1_clk_src = {
+ .cmd_rcgr_reg = MMSS_BYTE1_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .current_freq = ftbl_byte1_clk_src,
+ .freq_tbl = ftbl_byte1_clk_src,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "byte1_clk_src",
+ .parent = &ext_byte1_clk_src.c,
+ .ops = &clk_ops_byte_multiparent,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ VDD_DIG_FMAX_MAP3(LOWER, 131250000, LOW, 210000000,
+ NOMINAL, 262500000),
+ CLK_INIT(byte1_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_esc0_clk_src[] = {
+ F_MM( 19200000, mmsscc_xo, 1, 0, 0),
+ F_END
+};
+
+static struct rcg_clk esc0_clk_src = {
+ .cmd_rcgr_reg = MMSS_ESC0_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_esc0_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "esc0_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP1(LOWER, 19200000),
+ CLK_INIT(esc0_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_esc1_clk_src[] = {
+ F_MM( 19200000, mmsscc_xo, 1, 0, 0),
+ F_END
+};
+
+static struct rcg_clk esc1_clk_src = {
+ .cmd_rcgr_reg = MMSS_ESC1_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_esc1_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "esc1_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP1(LOWER, 19200000),
+ CLK_INIT(esc1_clk_src.c),
+ },
+};
+
+DEFINE_EXT_CLK(ext_extpclk_clk_src, NULL);
+static struct clk_freq_tbl ftbl_extpclk_clk_src[] = {
+ {
+ .div_src_val = BVAL(10, 8, ext_extpclk_clk_src_mm_source_val),
+ .src_clk = &ext_extpclk_clk_src.c,
+ },
+ F_END
+};
+
+static struct rcg_clk extpclk_clk_src = {
+ .cmd_rcgr_reg = MMSS_EXTPCLK_CMD_RCGR,
+ .current_freq = ftbl_extpclk_clk_src,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "extpclk_clk_src",
+ .parent = &ext_extpclk_clk_src.c,
+ .ops = &clk_ops_byte,
+ VDD_DIG_FMAX_MAP3(LOWER, 150000000, LOW, 300000000,
+ NOMINAL, 600000000),
+ CLK_INIT(extpclk_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_hdmi_clk_src[] = {
+ F_MM( 19200000, mmsscc_xo, 1, 0, 0),
+ F_END
+};
+
+static struct rcg_clk hdmi_clk_src = {
+ .cmd_rcgr_reg = MMSS_HDMI_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_hdmi_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "hdmi_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP1(LOWER, 19200000),
+ CLK_INIT(hdmi_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_vsync_clk_src[] = {
+ F_MM( 19200000, mmsscc_xo, 1, 0, 0),
+ F_END
+};
+
+static struct rcg_clk vsync_clk_src = {
+ .cmd_rcgr_reg = MMSS_VSYNC_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_vsync_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "vsync_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP1(LOWER, 19200000),
+ CLK_INIT(vsync_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_rbcpr_clk_src[] = {
+ F_MM( 19200000, mmsscc_xo, 1, 0, 0),
+ F_END
+};
+
+static struct clk_freq_tbl ftbl_rbcpr_clk_src_v2[] = {
+ F_MM( 19200000, mmsscc_xo, 1, 0, 0),
+ F_MM( 50000000, mmsscc_gpll0, 12, 0, 0),
+ F_END
+};
+
+static struct rcg_clk rbcpr_clk_src = {
+ .cmd_rcgr_reg = MMSS_RBCPR_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_rbcpr_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "rbcpr_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP1(LOWER, 19200000),
+ CLK_INIT(rbcpr_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_video_subcore0_clk_src[] = {
+ F_MM( 75000000, mmsscc_gpll0_div, 4, 0, 0),
+ F_MM( 150000000, mmsscc_gpll0, 4, 0, 0),
+ F_MM( 320000000, mmpll0_out_main, 2.5, 0, 0),
+ F_MM( 450000000, mmpll3_out_main, 2, 0, 0),
+ F_END
+};
+
+static struct clk_freq_tbl ftbl_video_subcore0_clk_src_v2[] = {
+ F_MM( 75000000, mmsscc_gpll0_div, 4, 0, 0),
+ F_MM( 150000000, mmsscc_gpll0, 4, 0, 0),
+ F_MM( 320000000, mmpll0_out_main, 2.5, 0, 0),
+ F_MM( 490000000, mmpll3_out_main, 2, 0, 0),
+ F_END
+};
+
+static struct clk_freq_tbl ftbl_video_subcore0_clk_src_v3[] = {
+ F_MM( 75000000, mmsscc_gpll0_div, 4, 0, 0),
+ F_MM( 150000000, mmsscc_gpll0, 4, 0, 0),
+ F_MM( 346666667, mmpll3_out_main, 3, 0, 0),
+ F_MM( 520000000, mmpll3_out_main, 2, 0, 0),
+ F_END
+};
+
+static struct rcg_clk video_subcore0_clk_src = {
+ .cmd_rcgr_reg = MMSS_VIDEO_SUBCORE0_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_video_subcore0_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .non_local_control_timeout = 1000,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "video_subcore0_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP4(LOWER, 75000000, LOW, 150000000,
+ NOMINAL, 320000000, HIGH, 450000000),
+ CLK_INIT(video_subcore0_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_video_subcore1_clk_src[] = {
+ F_MM( 75000000, mmsscc_gpll0_div, 4, 0, 0),
+ F_MM( 150000000, mmsscc_gpll0, 4, 0, 0),
+ F_MM( 320000000, mmpll0_out_main, 2.5, 0, 0),
+ F_MM( 450000000, mmpll3_out_main, 2, 0, 0),
+ F_END
+};
+
+static struct clk_freq_tbl ftbl_video_subcore1_clk_src_v2[] = {
+ F_MM( 75000000, mmsscc_gpll0_div, 4, 0, 0),
+ F_MM( 150000000, mmsscc_gpll0, 4, 0, 0),
+ F_MM( 320000000, mmpll0_out_main, 2.5, 0, 0),
+ F_MM( 490000000, mmpll3_out_main, 2, 0, 0),
+ F_END
+};
+
+static struct clk_freq_tbl ftbl_video_subcore1_clk_src_v3[] = {
+ F_MM( 75000000, mmsscc_gpll0_div, 4, 0, 0),
+ F_MM( 150000000, mmsscc_gpll0, 4, 0, 0),
+ F_MM( 346666667, mmpll3_out_main, 3, 0, 0),
+ F_MM( 520000000, mmpll3_out_main, 2, 0, 0),
+ F_END
+};
+
+static struct rcg_clk video_subcore1_clk_src = {
+ .cmd_rcgr_reg = MMSS_VIDEO_SUBCORE1_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_video_subcore1_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .non_local_control_timeout = 1000,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "video_subcore1_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP4(LOWER, 75000000, LOW, 150000000,
+ NOMINAL, 320000000, HIGH, 450000000),
+ CLK_INIT(video_subcore1_clk_src.c),
+ },
+};
+
+static struct branch_clk mmss_mmagic_ahb_clk = {
+ .cbcr_reg = MMSS_MMSS_MMAGIC_AHB_CBCR,
+ .has_sibling = 1,
+ .check_enable_bit = true,
+ .base = &virt_base,
+ .no_halt_check_on_disable = true,
+ .c = {
+ .dbg_name = "mmss_mmagic_ahb_clk",
+ .parent = &ahb_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_mmagic_ahb_clk.c),
+ },
+};
+
+static struct branch_clk camss_ahb_clk = {
+ .cbcr_reg = MMSS_CAMSS_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "camss_ahb_clk",
+ .parent = &ahb_clk_src.c,
+ .ops = &clk_ops_branch,
+ .depends = &mmss_mmagic_ahb_clk.c,
+ CLK_INIT(camss_ahb_clk.c),
+ },
+};
+
+static struct branch_clk camss_cci_ahb_clk = {
+ .cbcr_reg = MMSS_CAMSS_CCI_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "camss_cci_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(camss_cci_ahb_clk.c),
+ },
+};
+
+static struct branch_clk camss_cci_clk = {
+ .cbcr_reg = MMSS_CAMSS_CCI_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "camss_cci_clk",
+ .parent = &cci_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(camss_cci_clk.c),
+ },
+};
+
+static struct branch_clk camss_cpp_ahb_clk = {
+ .cbcr_reg = MMSS_CAMSS_CPP_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "camss_cpp_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(camss_cpp_ahb_clk.c),
+ },
+};
+
+static struct branch_clk camss_cpp_clk = {
+ .cbcr_reg = MMSS_CAMSS_CPP_CBCR,
+ .bcr_reg = MMSS_CAMSS_CPP_BCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "camss_cpp_clk",
+ .parent = &cpp_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(camss_cpp_clk.c),
+ },
+};
+
+static struct branch_clk camss_cpp_axi_clk = {
+ .cbcr_reg = MMSS_CAMSS_CPP_AXI_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "camss_cpp_axi_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(camss_cpp_axi_clk.c),
+ },
+};
+
+static struct branch_clk camss_cpp_vbif_ahb_clk = {
+ .cbcr_reg = MMSS_CAMSS_CPP_VBIF_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "camss_cpp_vbif_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(camss_cpp_vbif_ahb_clk.c),
+ },
+};
+
+static struct branch_clk camss_csi0_ahb_clk = {
+ .cbcr_reg = MMSS_CAMSS_CSI0_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "camss_csi0_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(camss_csi0_ahb_clk.c),
+ },
+};
+
+static struct branch_clk camss_csi0_clk = {
+ .cbcr_reg = MMSS_CAMSS_CSI0_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "camss_csi0_clk",
+ .parent = &csi0_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(camss_csi0_clk.c),
+ },
+};
+
+static struct branch_clk camss_csi0phy_clk = {
+ .cbcr_reg = MMSS_CAMSS_CSI0PHY_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "camss_csi0phy_clk",
+ .parent = &csi0_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(camss_csi0phy_clk.c),
+ },
+};
+
+static struct branch_clk camss_csi0pix_clk = {
+ .cbcr_reg = MMSS_CAMSS_CSI0PIX_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "camss_csi0pix_clk",
+ .parent = &csi0_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(camss_csi0pix_clk.c),
+ },
+};
+
+static struct branch_clk camss_csi0rdi_clk = {
+ .cbcr_reg = MMSS_CAMSS_CSI0RDI_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "camss_csi0rdi_clk",
+ .parent = &csi0_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(camss_csi0rdi_clk.c),
+ },
+};
+
+static struct branch_clk camss_csi1_ahb_clk = {
+ .cbcr_reg = MMSS_CAMSS_CSI1_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "camss_csi1_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(camss_csi1_ahb_clk.c),
+ },
+};
+
+static struct branch_clk camss_csi1_clk = {
+ .cbcr_reg = MMSS_CAMSS_CSI1_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "camss_csi1_clk",
+ .parent = &csi1_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(camss_csi1_clk.c),
+ },
+};
+
+static struct branch_clk camss_csi1phy_clk = {
+ .cbcr_reg = MMSS_CAMSS_CSI1PHY_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "camss_csi1phy_clk",
+ .parent = &csi1_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(camss_csi1phy_clk.c),
+ },
+};
+
+static struct branch_clk camss_csi1pix_clk = {
+ .cbcr_reg = MMSS_CAMSS_CSI1PIX_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "camss_csi1pix_clk",
+ .parent = &csi1_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(camss_csi1pix_clk.c),
+ },
+};
+
+static struct branch_clk camss_csi1rdi_clk = {
+ .cbcr_reg = MMSS_CAMSS_CSI1RDI_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "camss_csi1rdi_clk",
+ .parent = &csi1_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(camss_csi1rdi_clk.c),
+ },
+};
+
+static struct branch_clk camss_csi2_ahb_clk = {
+ .cbcr_reg = MMSS_CAMSS_CSI2_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "camss_csi2_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(camss_csi2_ahb_clk.c),
+ },
+};
+
+static struct branch_clk camss_csi2_clk = {
+ .cbcr_reg = MMSS_CAMSS_CSI2_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "camss_csi2_clk",
+ .parent = &csi2_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(camss_csi2_clk.c),
+ },
+};
+
+static struct branch_clk camss_csi2phy_clk = {
+ .cbcr_reg = MMSS_CAMSS_CSI2PHY_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "camss_csi2phy_clk",
+ .parent = &csi2_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(camss_csi2phy_clk.c),
+ },
+};
+
+static struct branch_clk camss_csi2pix_clk = {
+ .cbcr_reg = MMSS_CAMSS_CSI2PIX_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "camss_csi2pix_clk",
+ .parent = &csi2_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(camss_csi2pix_clk.c),
+ },
+};
+
+static struct branch_clk camss_csi2rdi_clk = {
+ .cbcr_reg = MMSS_CAMSS_CSI2RDI_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "camss_csi2rdi_clk",
+ .parent = &csi2_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(camss_csi2rdi_clk.c),
+ },
+};
+
+static struct branch_clk camss_csi3_ahb_clk = {
+ .cbcr_reg = MMSS_CAMSS_CSI3_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "camss_csi3_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(camss_csi3_ahb_clk.c),
+ },
+};
+
+static struct branch_clk camss_csi3_clk = {
+ .cbcr_reg = MMSS_CAMSS_CSI3_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "camss_csi3_clk",
+ .parent = &csi3_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(camss_csi3_clk.c),
+ },
+};
+
+static struct branch_clk camss_csi3phy_clk = {
+ .cbcr_reg = MMSS_CAMSS_CSI3PHY_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "camss_csi3phy_clk",
+ .parent = &csi3_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(camss_csi3phy_clk.c),
+ },
+};
+
+static struct branch_clk camss_csi3pix_clk = {
+ .cbcr_reg = MMSS_CAMSS_CSI3PIX_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "camss_csi3pix_clk",
+ .parent = &csi3_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(camss_csi3pix_clk.c),
+ },
+};
+
+static struct branch_clk camss_csi3rdi_clk = {
+ .cbcr_reg = MMSS_CAMSS_CSI3RDI_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "camss_csi3rdi_clk",
+ .parent = &csi3_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(camss_csi3rdi_clk.c),
+ },
+};
+
+static struct branch_clk camss_csi_vfe0_clk = {
+ .cbcr_reg = MMSS_CAMSS_CSI_VFE0_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "camss_csi_vfe0_clk",
+ .parent = &vfe0_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(camss_csi_vfe0_clk.c),
+ },
+};
+
+static struct branch_clk camss_csi_vfe1_clk = {
+ .cbcr_reg = MMSS_CAMSS_CSI_VFE1_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "camss_csi_vfe1_clk",
+ .parent = &vfe1_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(camss_csi_vfe1_clk.c),
+ },
+};
+
+static struct branch_clk camss_csiphy0_3p_clk = {
+ .cbcr_reg = MMSS_CAMSS_CSIPHY0_3P_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "camss_csiphy0_3p_clk",
+ .parent = &csiphy0_3p_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(camss_csiphy0_3p_clk.c),
+ },
+};
+
+static struct branch_clk camss_csiphy1_3p_clk = {
+ .cbcr_reg = MMSS_CAMSS_CSIPHY1_3P_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "camss_csiphy1_3p_clk",
+ .parent = &csiphy1_3p_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(camss_csiphy1_3p_clk.c),
+ },
+};
+
+static struct branch_clk camss_csiphy2_3p_clk = {
+ .cbcr_reg = MMSS_CAMSS_CSIPHY2_3P_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "camss_csiphy2_3p_clk",
+ .parent = &csiphy2_3p_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(camss_csiphy2_3p_clk.c),
+ },
+};
+
+static struct branch_clk camss_gp0_clk = {
+ .cbcr_reg = MMSS_CAMSS_GP0_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "camss_gp0_clk",
+ .parent = &camss_gp0_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(camss_gp0_clk.c),
+ },
+};
+
+static struct branch_clk camss_gp1_clk = {
+ .cbcr_reg = MMSS_CAMSS_GP1_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "camss_gp1_clk",
+ .parent = &camss_gp1_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(camss_gp1_clk.c),
+ },
+};
+
+static struct branch_clk camss_ispif_ahb_clk = {
+ .cbcr_reg = MMSS_CAMSS_ISPIF_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "camss_ispif_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(camss_ispif_ahb_clk.c),
+ },
+};
+
+static struct branch_clk camss_jpeg0_clk = {
+ .cbcr_reg = MMSS_CAMSS_JPEG0_CBCR,
+ .bcr_reg = MMSS_CAMSS_JPEG_BCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "camss_jpeg0_clk",
+ .parent = &jpeg0_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(camss_jpeg0_clk.c),
+ },
+};
+
+static struct branch_clk camss_jpeg2_clk = {
+ .cbcr_reg = MMSS_CAMSS_JPEG2_CBCR,
+ .bcr_reg = MMSS_CAMSS_JPEG_BCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "camss_jpeg2_clk",
+ .parent = &jpeg2_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(camss_jpeg2_clk.c),
+ },
+};
+
+static struct branch_clk camss_jpeg_ahb_clk = {
+ .cbcr_reg = MMSS_CAMSS_JPEG_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "camss_jpeg_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(camss_jpeg_ahb_clk.c),
+ },
+};
+
+static struct branch_clk camss_jpeg_axi_clk = {
+ .cbcr_reg = MMSS_CAMSS_JPEG_AXI_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "camss_jpeg_axi_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(camss_jpeg_axi_clk.c),
+ },
+};
+
+static struct branch_clk camss_jpeg_dma_clk = {
+ .cbcr_reg = MMSS_CAMSS_JPEG_DMA_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "camss_jpeg_dma_clk",
+ .parent = &jpeg_dma_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(camss_jpeg_dma_clk.c),
+ },
+};
+
+static struct branch_clk camss_mclk0_clk = {
+ .cbcr_reg = MMSS_CAMSS_MCLK0_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "camss_mclk0_clk",
+ .parent = &mclk0_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(camss_mclk0_clk.c),
+ },
+};
+
+static struct branch_clk camss_mclk1_clk = {
+ .cbcr_reg = MMSS_CAMSS_MCLK1_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "camss_mclk1_clk",
+ .parent = &mclk1_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(camss_mclk1_clk.c),
+ },
+};
+
+static struct branch_clk camss_mclk2_clk = {
+ .cbcr_reg = MMSS_CAMSS_MCLK2_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "camss_mclk2_clk",
+ .parent = &mclk2_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(camss_mclk2_clk.c),
+ },
+};
+
+static struct branch_clk camss_mclk3_clk = {
+ .cbcr_reg = MMSS_CAMSS_MCLK3_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "camss_mclk3_clk",
+ .parent = &mclk3_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(camss_mclk3_clk.c),
+ },
+};
+
+static struct branch_clk camss_micro_ahb_clk = {
+ .cbcr_reg = MMSS_CAMSS_MICRO_AHB_CBCR,
+ .bcr_reg = MMSS_CAMSS_MICRO_BCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "camss_micro_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(camss_micro_ahb_clk.c),
+ },
+};
+
+static struct branch_clk camss_csi0phytimer_clk = {
+ .cbcr_reg = MMSS_CAMSS_CSI0PHYTIMER_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "camss_csi0phytimer_clk",
+ .parent = &csi0phytimer_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(camss_csi0phytimer_clk.c),
+ },
+};
+
+static struct branch_clk camss_csi1phytimer_clk = {
+ .cbcr_reg = MMSS_CAMSS_CSI1PHYTIMER_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "camss_csi1phytimer_clk",
+ .parent = &csi1phytimer_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(camss_csi1phytimer_clk.c),
+ },
+};
+
+static struct branch_clk camss_csi2phytimer_clk = {
+ .cbcr_reg = MMSS_CAMSS_CSI2PHYTIMER_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "camss_csi2phytimer_clk",
+ .parent = &csi2phytimer_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(camss_csi2phytimer_clk.c),
+ },
+};
+
+static struct branch_clk camss_top_ahb_clk = {
+ .cbcr_reg = MMSS_CAMSS_TOP_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "camss_top_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(camss_top_ahb_clk.c),
+ },
+};
+
+static struct branch_clk camss_vfe_ahb_clk = {
+ .cbcr_reg = MMSS_CAMSS_VFE_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "camss_vfe_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(camss_vfe_ahb_clk.c),
+ },
+};
+
+static struct branch_clk camss_vfe_axi_clk = {
+ .cbcr_reg = MMSS_CAMSS_VFE_AXI_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "camss_vfe_axi_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(camss_vfe_axi_clk.c),
+ },
+};
+
+static struct branch_clk camss_vfe0_ahb_clk = {
+ .cbcr_reg = MMSS_CAMSS_VFE0_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "camss_vfe0_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(camss_vfe0_ahb_clk.c),
+ },
+};
+
+static struct branch_clk camss_vfe0_clk = {
+ .cbcr_reg = MMSS_CAMSS_VFE0_CBCR,
+ .bcr_reg = MMSS_CAMSS_VFE0_BCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "camss_vfe0_clk",
+ .parent = &vfe0_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(camss_vfe0_clk.c),
+ },
+};
+
+static struct branch_clk camss_vfe0_stream_clk = {
+ .cbcr_reg = MMSS_CAMSS_VFE0_STREAM_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "camss_vfe0_stream_clk",
+ .parent = &vfe0_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(camss_vfe0_stream_clk.c),
+ },
+};
+
+static struct branch_clk camss_vfe1_ahb_clk = {
+ .cbcr_reg = MMSS_CAMSS_VFE1_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "camss_vfe1_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(camss_vfe1_ahb_clk.c),
+ },
+};
+
+static struct branch_clk camss_vfe1_clk = {
+ .cbcr_reg = MMSS_CAMSS_VFE1_CBCR,
+ .bcr_reg = MMSS_CAMSS_VFE1_BCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "camss_vfe1_clk",
+ .parent = &vfe1_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(camss_vfe1_clk.c),
+ },
+};
+
+static struct branch_clk camss_vfe1_stream_clk = {
+ .cbcr_reg = MMSS_CAMSS_VFE1_STREAM_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "camss_vfe1_stream_clk",
+ .parent = &vfe1_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(camss_vfe1_stream_clk.c),
+ },
+};
+
+static struct branch_clk fd_ahb_clk = {
+ .cbcr_reg = MMSS_FD_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "fd_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(fd_ahb_clk.c),
+ },
+};
+
+static struct branch_clk fd_core_clk = {
+ .cbcr_reg = MMSS_FD_CORE_CBCR,
+ .bcr_reg = MMSS_FD_BCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "fd_core_clk",
+ .parent = &fd_core_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(fd_core_clk.c),
+ },
+};
+
+static struct branch_clk fd_core_uar_clk = {
+ .cbcr_reg = MMSS_FD_CORE_UAR_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "fd_core_uar_clk",
+ .parent = &fd_core_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(fd_core_uar_clk.c),
+ },
+};
+
+static struct branch_clk gpu_ahb_clk = {
+ .cbcr_reg = MMSS_GPU_AHB_CBCR,
+ .has_sibling = 1,
+ .check_enable_bit = true,
+ .base = &virt_base_gpu,
+ .no_halt_check_on_disable = true,
+ .c = {
+ .dbg_name = "gpu_ahb_clk",
+ .parent = &ahb_clk_src.c,
+ .ops = &clk_ops_branch,
+ .depends = &mmss_mmagic_ahb_clk.c,
+ CLK_INIT(gpu_ahb_clk.c),
+ },
+};
+
+static struct branch_clk gpu_aon_isense_clk = {
+ .cbcr_reg = MMSS_GPU_AON_ISENSE_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base_gpu,
+ .c = {
+ .dbg_name = "gpu_aon_isense_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(gpu_aon_isense_clk.c),
+ },
+};
+
+static struct branch_clk gpu_gx_gfx3d_clk = {
+ .cbcr_reg = MMSS_GPU_GX_GFX3D_CBCR,
+ .bcr_reg = MMSS_GPU_GX_BCR,
+ .has_sibling = 0,
+ .base = &virt_base_gpu,
+ .c = {
+ .dbg_name = "gpu_gx_gfx3d_clk",
+ .parent = &gfx3d_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gpu_gx_gfx3d_clk.c),
+ },
+};
+
+static struct fixed_clk gpu_mx_clk = {
+ .c = {
+ .dbg_name = "gpu_mx_clk",
+ .vdd_class = &vdd_gpu_mx,
+ .ops = &clk_ops_dummy,
+ CLK_INIT(gpu_mx_clk.c),
+ },
+};
+
+static struct branch_clk gpu_gx_rbbmtimer_clk = {
+ .cbcr_reg = MMSS_GPU_GX_RBBMTIMER_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base_gpu,
+ .c = {
+ .dbg_name = "gpu_gx_rbbmtimer_clk",
+ .parent = &rbbmtimer_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(gpu_gx_rbbmtimer_clk.c),
+ },
+};
+
+static struct branch_clk mdss_ahb_clk = {
+ .cbcr_reg = MMSS_MDSS_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mdss_ahb_clk",
+ .parent = &ahb_clk_src.c,
+ .ops = &clk_ops_branch,
+ .depends = &mmss_mmagic_ahb_clk.c,
+ CLK_INIT(mdss_ahb_clk.c),
+ },
+};
+
+static struct branch_clk mdss_axi_clk = {
+ .cbcr_reg = MMSS_MDSS_AXI_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mdss_axi_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(mdss_axi_clk.c),
+ },
+};
+
+static struct branch_clk mdss_byte0_clk = {
+ .cbcr_reg = MMSS_MDSS_BYTE0_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mdss_byte0_clk",
+ .parent = &byte0_clk_src.c,
+ .ops = &clk_ops_branch,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(mdss_byte0_clk.c),
+ },
+};
+
+static struct branch_clk mdss_byte1_clk = {
+ .cbcr_reg = MMSS_MDSS_BYTE1_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mdss_byte1_clk",
+ .parent = &byte1_clk_src.c,
+ .ops = &clk_ops_branch,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(mdss_byte1_clk.c),
+ },
+};
+
+static struct branch_clk mdss_esc0_clk = {
+ .cbcr_reg = MMSS_MDSS_ESC0_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mdss_esc0_clk",
+ .parent = &esc0_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mdss_esc0_clk.c),
+ },
+};
+
+static struct branch_clk mdss_esc1_clk = {
+ .cbcr_reg = MMSS_MDSS_ESC1_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mdss_esc1_clk",
+ .parent = &esc1_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mdss_esc1_clk.c),
+ },
+};
+
+static struct branch_clk mdss_extpclk_clk = {
+ .cbcr_reg = MMSS_MDSS_EXTPCLK_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mdss_extpclk_clk",
+ .parent = &extpclk_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mdss_extpclk_clk.c),
+ },
+};
+
+static struct branch_clk mdss_hdmi_ahb_clk = {
+ .cbcr_reg = MMSS_MDSS_HDMI_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mdss_hdmi_ahb_clk",
+ .parent = &ahb_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mdss_hdmi_ahb_clk.c),
+ },
+};
+
+static struct branch_clk mdss_hdmi_clk = {
+ .cbcr_reg = MMSS_MDSS_HDMI_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mdss_hdmi_clk",
+ .parent = &hdmi_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mdss_hdmi_clk.c),
+ },
+};
+
+static struct branch_clk mdss_mdp_clk = {
+ .cbcr_reg = MMSS_MDSS_MDP_CBCR,
+ .bcr_reg = MMSS_MDSS_BCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mdss_mdp_clk",
+ .parent = &mdp_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mdss_mdp_clk.c),
+ },
+};
+
+static DEFINE_CLK_VOTER(mdss_mdp_vote_clk, &mdss_mdp_clk.c, 0);
+static DEFINE_CLK_VOTER(mdss_rotator_vote_clk, &mdss_mdp_clk.c, 0);
+
+static struct branch_clk mdss_pclk0_clk = {
+ .cbcr_reg = MMSS_MDSS_PCLK0_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mdss_pclk0_clk",
+ .parent = &pclk0_clk_src.c,
+ .ops = &clk_ops_branch,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(mdss_pclk0_clk.c),
+ },
+};
+
+static struct branch_clk mdss_pclk1_clk = {
+ .cbcr_reg = MMSS_MDSS_PCLK1_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mdss_pclk1_clk",
+ .parent = &pclk1_clk_src.c,
+ .ops = &clk_ops_branch,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(mdss_pclk1_clk.c),
+ },
+};
+
+static struct branch_clk mdss_vsync_clk = {
+ .cbcr_reg = MMSS_MDSS_VSYNC_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mdss_vsync_clk",
+ .parent = &vsync_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mdss_vsync_clk.c),
+ },
+};
+
+static struct branch_clk mmss_misc_ahb_clk = {
+ .cbcr_reg = MMSS_MMSS_MISC_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_misc_ahb_clk",
+ .ops = &clk_ops_branch,
+ .depends = &mmss_mmagic_ahb_clk.c,
+ CLK_INIT(mmss_misc_ahb_clk.c),
+ },
+};
+
+static struct branch_clk mmss_misc_cxo_clk = {
+ .cbcr_reg = MMSS_MMSS_MISC_CXO_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_misc_cxo_clk",
+ .parent = &mmsscc_xo.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_misc_cxo_clk.c),
+ },
+};
+
+static struct branch_clk mmagic_bimc_noc_cfg_ahb_clk = {
+ .cbcr_reg = MMSS_MMAGIC_BIMC_NOC_CFG_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmagic_bimc_noc_cfg_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmagic_bimc_noc_cfg_ahb_clk.c),
+ },
+};
+
+static struct branch_clk mmagic_camss_axi_clk = {
+ .cbcr_reg = MMSS_MMAGIC_CAMSS_AXI_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmagic_camss_axi_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmagic_camss_axi_clk.c),
+ },
+};
+
+static struct branch_clk mmagic_camss_noc_cfg_ahb_clk = {
+ .cbcr_reg = MMSS_MMAGIC_CAMSS_NOC_CFG_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmagic_camss_noc_cfg_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmagic_camss_noc_cfg_ahb_clk.c),
+ },
+};
+
+static struct branch_clk mmss_mmagic_cfg_ahb_clk = {
+ .cbcr_reg = MMSS_MMSS_MMAGIC_CFG_AHB_CBCR,
+ .has_sibling = 1,
+ .check_enable_bit = true,
+ .base = &virt_base,
+ .no_halt_check_on_disable = true,
+ .c = {
+ .dbg_name = "mmss_mmagic_cfg_ahb_clk",
+ .parent = &ahb_clk_src.c,
+ .ops = &clk_ops_branch,
+ .depends = &mmss_mmagic_ahb_clk.c,
+ CLK_INIT(mmss_mmagic_cfg_ahb_clk.c),
+ },
+};
+
+static struct branch_clk mmagic_mdss_axi_clk = {
+ .cbcr_reg = MMSS_MMAGIC_MDSS_AXI_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmagic_mdss_axi_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmagic_mdss_axi_clk.c),
+ },
+};
+
+static struct branch_clk mmagic_mdss_noc_cfg_ahb_clk = {
+ .cbcr_reg = MMSS_MMAGIC_MDSS_NOC_CFG_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmagic_mdss_noc_cfg_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmagic_mdss_noc_cfg_ahb_clk.c),
+ },
+};
+
+static struct branch_clk mmagic_video_axi_clk = {
+ .cbcr_reg = MMSS_MMAGIC_VIDEO_AXI_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmagic_video_axi_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmagic_video_axi_clk.c),
+ },
+};
+
+static struct branch_clk mmagic_video_noc_cfg_ahb_clk = {
+ .cbcr_reg = MMSS_MMAGIC_VIDEO_NOC_CFG_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmagic_video_noc_cfg_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmagic_video_noc_cfg_ahb_clk.c),
+ },
+};
+
+static struct branch_clk mmss_mmagic_maxi_clk = {
+ .cbcr_reg = MMSS_MMSS_MMAGIC_MAXI_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_mmagic_maxi_clk",
+ .parent = &maxi_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_mmagic_maxi_clk.c),
+ },
+};
+
+static struct branch_clk mmss_rbcpr_ahb_clk = {
+ .cbcr_reg = MMSS_MMSS_RBCPR_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_rbcpr_ahb_clk",
+ .parent = &ahb_clk_src.c,
+ .ops = &clk_ops_branch,
+ .depends = &mmss_mmagic_ahb_clk.c,
+ CLK_INIT(mmss_rbcpr_ahb_clk.c),
+ },
+};
+
+static struct branch_clk mmss_rbcpr_clk = {
+ .cbcr_reg = MMSS_MMSS_RBCPR_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_rbcpr_clk",
+ .parent = &rbcpr_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_rbcpr_clk.c),
+ },
+};
+
+static struct branch_clk smmu_cpp_ahb_clk = {
+ .cbcr_reg = MMSS_SMMU_CPP_AHB_CBCR,
+ .has_sibling = 1,
+ .check_enable_bit = true,
+ .base = &virt_base,
+ .no_halt_check_on_disable = true,
+ .c = {
+ .dbg_name = "smmu_cpp_ahb_clk",
+ .parent = &ahb_clk_src.c,
+ .ops = &clk_ops_branch,
+ .depends = &mmss_mmagic_ahb_clk.c,
+ CLK_INIT(smmu_cpp_ahb_clk.c),
+ },
+};
+
+static struct branch_clk smmu_cpp_axi_clk = {
+ .cbcr_reg = MMSS_SMMU_CPP_AXI_CBCR,
+ .has_sibling = 1,
+ .check_enable_bit = true,
+ .base = &virt_base,
+ .no_halt_check_on_disable = true,
+ .c = {
+ .dbg_name = "smmu_cpp_axi_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(smmu_cpp_axi_clk.c),
+ },
+};
+
+static struct branch_clk smmu_jpeg_ahb_clk = {
+ .cbcr_reg = MMSS_SMMU_JPEG_AHB_CBCR,
+ .has_sibling = 1,
+ .check_enable_bit = true,
+ .base = &virt_base,
+ .no_halt_check_on_disable = true,
+ .c = {
+ .dbg_name = "smmu_jpeg_ahb_clk",
+ .parent = &ahb_clk_src.c,
+ .ops = &clk_ops_branch,
+ .depends = &mmss_mmagic_ahb_clk.c,
+ CLK_INIT(smmu_jpeg_ahb_clk.c),
+ },
+};
+
+static struct branch_clk smmu_jpeg_axi_clk = {
+ .cbcr_reg = MMSS_SMMU_JPEG_AXI_CBCR,
+ .has_sibling = 1,
+ .check_enable_bit = true,
+ .base = &virt_base,
+ .no_halt_check_on_disable = true,
+ .c = {
+ .dbg_name = "smmu_jpeg_axi_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(smmu_jpeg_axi_clk.c),
+ },
+};
+
+static struct branch_clk smmu_mdp_ahb_clk = {
+ .cbcr_reg = MMSS_SMMU_MDP_AHB_CBCR,
+ .has_sibling = 1,
+ .check_enable_bit = true,
+ .base = &virt_base,
+ .no_halt_check_on_disable = true,
+ .c = {
+ .dbg_name = "smmu_mdp_ahb_clk",
+ .parent = &ahb_clk_src.c,
+ .ops = &clk_ops_branch,
+ .depends = &mmss_mmagic_ahb_clk.c,
+ CLK_INIT(smmu_mdp_ahb_clk.c),
+ },
+};
+
+static struct branch_clk smmu_mdp_axi_clk = {
+ .cbcr_reg = MMSS_SMMU_MDP_AXI_CBCR,
+ .has_sibling = 1,
+ .check_enable_bit = true,
+ .base = &virt_base,
+ .no_halt_check_on_disable = true,
+ .c = {
+ .dbg_name = "smmu_mdp_axi_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(smmu_mdp_axi_clk.c),
+ },
+};
+
+static struct branch_clk smmu_rot_ahb_clk = {
+ .cbcr_reg = MMSS_SMMU_ROT_AHB_CBCR,
+ .has_sibling = 1,
+ .check_enable_bit = true,
+ .base = &virt_base,
+ .no_halt_check_on_disable = true,
+ .c = {
+ .dbg_name = "smmu_rot_ahb_clk",
+ .parent = &ahb_clk_src.c,
+ .ops = &clk_ops_branch,
+ .depends = &mmss_mmagic_ahb_clk.c,
+ CLK_INIT(smmu_rot_ahb_clk.c),
+ },
+};
+
+static struct branch_clk smmu_rot_axi_clk = {
+ .cbcr_reg = MMSS_SMMU_ROT_AXI_CBCR,
+ .has_sibling = 1,
+ .check_enable_bit = true,
+ .base = &virt_base,
+ .no_halt_check_on_disable = true,
+ .c = {
+ .dbg_name = "smmu_rot_axi_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(smmu_rot_axi_clk.c),
+ },
+};
+
+static struct branch_clk smmu_vfe_ahb_clk = {
+ .cbcr_reg = MMSS_SMMU_VFE_AHB_CBCR,
+ .has_sibling = 1,
+ .check_enable_bit = true,
+ .base = &virt_base,
+ .no_halt_check_on_disable = true,
+ .c = {
+ .dbg_name = "smmu_vfe_ahb_clk",
+ .parent = &ahb_clk_src.c,
+ .ops = &clk_ops_branch,
+ .depends = &mmss_mmagic_ahb_clk.c,
+ CLK_INIT(smmu_vfe_ahb_clk.c),
+ },
+};
+
+static struct branch_clk smmu_vfe_axi_clk = {
+ .cbcr_reg = MMSS_SMMU_VFE_AXI_CBCR,
+ .has_sibling = 1,
+ .check_enable_bit = true,
+ .base = &virt_base,
+ .no_halt_check_on_disable = true,
+ .c = {
+ .dbg_name = "smmu_vfe_axi_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(smmu_vfe_axi_clk.c),
+ },
+};
+
+static struct branch_clk smmu_video_ahb_clk = {
+ .cbcr_reg = MMSS_SMMU_VIDEO_AHB_CBCR,
+ .has_sibling = 1,
+ .check_enable_bit = true,
+ .base = &virt_base,
+ .no_halt_check_on_disable = true,
+ .c = {
+ .dbg_name = "smmu_video_ahb_clk",
+ .parent = &ahb_clk_src.c,
+ .ops = &clk_ops_branch,
+ .depends = &mmss_mmagic_ahb_clk.c,
+ CLK_INIT(smmu_video_ahb_clk.c),
+ },
+};
+
+static struct branch_clk smmu_video_axi_clk = {
+ .cbcr_reg = MMSS_SMMU_VIDEO_AXI_CBCR,
+ .has_sibling = 1,
+ .check_enable_bit = true,
+ .base = &virt_base,
+ .no_halt_check_on_disable = true,
+ .c = {
+ .dbg_name = "smmu_video_axi_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(smmu_video_axi_clk.c),
+ },
+};
+
+static struct branch_clk video_ahb_clk = {
+ .cbcr_reg = MMSS_VIDEO_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "video_ahb_clk",
+ .parent = &ahb_clk_src.c,
+ .ops = &clk_ops_branch,
+ .depends = &mmss_mmagic_ahb_clk.c,
+ CLK_INIT(video_ahb_clk.c),
+ },
+};
+
+static struct branch_clk video_axi_clk = {
+ .cbcr_reg = MMSS_VIDEO_AXI_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "video_axi_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(video_axi_clk.c),
+ },
+};
+
+static struct branch_clk video_core_clk = {
+ .cbcr_reg = MMSS_VIDEO_CORE_CBCR,
+ .bcr_reg = MMSS_VIDEO_BCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "video_core_clk",
+ .parent = &video_core_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(video_core_clk.c),
+ },
+};
+
+static struct branch_clk video_maxi_clk = {
+ .cbcr_reg = MMSS_VIDEO_MAXI_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "video_maxi_clk",
+ .parent = &maxi_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(video_maxi_clk.c),
+ },
+};
+
+static struct branch_clk video_subcore0_clk = {
+ .cbcr_reg = MMSS_VIDEO_SUBCORE0_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "video_subcore0_clk",
+ .parent = &video_subcore0_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(video_subcore0_clk.c),
+ },
+};
+
+static struct branch_clk video_subcore1_clk = {
+ .cbcr_reg = MMSS_VIDEO_SUBCORE1_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "video_subcore1_clk",
+ .parent = &video_subcore1_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(video_subcore1_clk.c),
+ },
+};
+
+static struct branch_clk vmem_ahb_clk = {
+ .cbcr_reg = MMSS_VMEM_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "vmem_ahb_clk",
+ .parent = &ahb_clk_src.c,
+ .ops = &clk_ops_branch,
+ .depends = &mmss_mmagic_ahb_clk.c,
+ CLK_INIT(vmem_ahb_clk.c),
+ },
+};
+
+static struct branch_clk vmem_maxi_clk = {
+ .cbcr_reg = MMSS_VMEM_MAXI_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "vmem_maxi_clk",
+ .parent = &maxi_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(vmem_maxi_clk.c),
+ },
+};
+
+static struct mux_clk mmss_gcc_dbg_clk = {
+ .ops = &mux_reg_ops,
+ .en_mask = BIT(16),
+ .mask = 0x3FF,
+ .offset = MMSS_MMSS_DEBUG_CLK_CTL,
+ .en_offset = MMSS_MMSS_DEBUG_CLK_CTL,
+ .base = &virt_base,
+ MUX_SRC_LIST(
+ { &mmss_mmagic_ahb_clk.c, 0x0001 },
+ { &mmss_misc_ahb_clk.c, 0x0003 },
+ { &vmem_maxi_clk.c, 0x0009 },
+ { &vmem_ahb_clk.c, 0x000a },
+ { &video_core_clk.c, 0x000e },
+ { &video_axi_clk.c, 0x000f },
+ { &video_maxi_clk.c, 0x0010 },
+ { &video_ahb_clk.c, 0x0011 },
+ { &mmss_rbcpr_clk.c, 0x0012 },
+ { &mmss_rbcpr_ahb_clk.c, 0x0013 },
+ { &mdss_mdp_clk.c, 0x0014 },
+ { &mdss_pclk0_clk.c, 0x0016 },
+ { &mdss_pclk1_clk.c, 0x0017 },
+ { &mdss_extpclk_clk.c, 0x0018 },
+ { &video_subcore0_clk.c, 0x001a },
+ { &video_subcore1_clk.c, 0x001b },
+ { &mdss_vsync_clk.c, 0x001c },
+ { &mdss_hdmi_clk.c, 0x001d },
+ { &mdss_byte0_clk.c, 0x001e },
+ { &mdss_byte1_clk.c, 0x001f },
+ { &mdss_esc0_clk.c, 0x0020 },
+ { &mdss_esc1_clk.c, 0x0021 },
+ { &mdss_ahb_clk.c, 0x0022 },
+ { &mdss_hdmi_ahb_clk.c, 0x0023 },
+ { &mdss_axi_clk.c, 0x0024 },
+ { &camss_top_ahb_clk.c, 0x0025 },
+ { &camss_micro_ahb_clk.c, 0x0026 },
+ { &camss_gp0_clk.c, 0x0027 },
+ { &camss_gp1_clk.c, 0x0028 },
+ { &camss_mclk0_clk.c, 0x0029 },
+ { &camss_mclk1_clk.c, 0x002a },
+ { &camss_mclk2_clk.c, 0x002b },
+ { &camss_mclk3_clk.c, 0x002c },
+ { &camss_cci_clk.c, 0x002d },
+ { &camss_cci_ahb_clk.c, 0x002e },
+ { &camss_csi0phytimer_clk.c, 0x002f },
+ { &camss_csi1phytimer_clk.c, 0x0030 },
+ { &camss_csi2phytimer_clk.c, 0x0031 },
+ { &camss_jpeg0_clk.c, 0x0032 },
+ { &camss_ispif_ahb_clk.c, 0x0033 },
+ { &camss_jpeg2_clk.c, 0x0034 },
+ { &camss_jpeg_ahb_clk.c, 0x0035 },
+ { &camss_jpeg_axi_clk.c, 0x0036 },
+ { &camss_ahb_clk.c, 0x0037 },
+ { &camss_vfe0_clk.c, 0x0038 },
+ { &camss_vfe1_clk.c, 0x0039 },
+ { &camss_cpp_clk.c, 0x003a },
+ { &camss_cpp_ahb_clk.c, 0x003b },
+ { &camss_vfe_ahb_clk.c, 0x003c },
+ { &camss_vfe_axi_clk.c, 0x003d },
+ { &camss_csi_vfe0_clk.c, 0x003f },
+ { &camss_csi_vfe1_clk.c, 0x0040 },
+ { &camss_csi0_clk.c, 0x0041 },
+ { &camss_csi0_ahb_clk.c, 0x0042 },
+ { &camss_csi0phy_clk.c, 0x0043 },
+ { &camss_csi0rdi_clk.c, 0x0044 },
+ { &camss_csi0pix_clk.c, 0x0045 },
+ { &camss_csi1_clk.c, 0x0046 },
+ { &camss_csi1_ahb_clk.c, 0x0047 },
+ { &camss_csi1phy_clk.c, 0x0048 },
+ { &camss_csi1rdi_clk.c, 0x0049 },
+ { &camss_csi1pix_clk.c, 0x004a },
+ { &camss_csi2_clk.c, 0x004b },
+ { &camss_csi2_ahb_clk.c, 0x004c },
+ { &camss_csi2phy_clk.c, 0x004d },
+ { &camss_csi2rdi_clk.c, 0x004e },
+ { &camss_csi2pix_clk.c, 0x004f },
+ { &camss_csi3_clk.c, 0x0050 },
+ { &camss_csi3_ahb_clk.c, 0x0051 },
+ { &camss_csi3phy_clk.c, 0x0052 },
+ { &camss_csi3rdi_clk.c, 0x0053 },
+ { &camss_csi3pix_clk.c, 0x0054 },
+ { &mmss_mmagic_maxi_clk.c, 0x0070 },
+ { &camss_vfe0_stream_clk.c, 0x0071 },
+ { &camss_vfe1_stream_clk.c, 0x0072 },
+ { &camss_cpp_vbif_ahb_clk.c, 0x0073 },
+ { &mmss_mmagic_cfg_ahb_clk.c, 0x0074 },
+ { &mmss_misc_cxo_clk.c, 0x0077 },
+ { &camss_cpp_axi_clk.c, 0x007a },
+ { &camss_jpeg_dma_clk.c, 0x007b },
+ { &camss_vfe0_ahb_clk.c, 0x0086 },
+ { &camss_vfe1_ahb_clk.c, 0x0087 },
+ { &fd_core_clk.c, 0x0089 },
+ { &fd_core_uar_clk.c, 0x008a },
+ { &fd_ahb_clk.c, 0x008c },
+ { &camss_csiphy0_3p_clk.c, 0x0091 },
+ { &camss_csiphy1_3p_clk.c, 0x0092 },
+ { &camss_csiphy2_3p_clk.c, 0x0093 },
+ { &smmu_vfe_ahb_clk.c, 0x0094 },
+ { &smmu_vfe_axi_clk.c, 0x0095 },
+ { &smmu_cpp_ahb_clk.c, 0x0096 },
+ { &smmu_cpp_axi_clk.c, 0x0097 },
+ { &smmu_jpeg_ahb_clk.c, 0x0098 },
+ { &smmu_jpeg_axi_clk.c, 0x0099 },
+ { &mmagic_camss_axi_clk.c, 0x009a },
+ { &smmu_rot_ahb_clk.c, 0x009b },
+ { &smmu_rot_axi_clk.c, 0x009c },
+ { &smmu_mdp_ahb_clk.c, 0x009d },
+ { &smmu_mdp_axi_clk.c, 0x009e },
+ { &mmagic_mdss_axi_clk.c, 0x009f },
+ { &smmu_video_ahb_clk.c, 0x00a0 },
+ { &smmu_video_axi_clk.c, 0x00a1 },
+ { &mmagic_video_axi_clk.c, 0x00a2 },
+ { &mmagic_camss_noc_cfg_ahb_clk.c, 0x00ad },
+ { &mmagic_mdss_noc_cfg_ahb_clk.c, 0x00ae },
+ { &mmagic_video_noc_cfg_ahb_clk.c, 0x00af },
+ { &mmagic_bimc_noc_cfg_ahb_clk.c, 0x00b0 },
+ ),
+ .c = {
+ .dbg_name = "mmss_gcc_dbg_clk",
+ .ops = &clk_ops_gen_mux,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(mmss_gcc_dbg_clk.c),
+ },
+};
+
+static struct clk_lookup msm_clocks_mmss_8996[] = {
+ CLK_LIST(mmsscc_xo),
+ CLK_LIST(mmsscc_gpll0),
+ CLK_LIST(mmsscc_gpll0_div),
+ CLK_LIST(mmpll0),
+ CLK_LIST(mmpll0_out_main),
+ CLK_LIST(mmpll1),
+ CLK_LIST(mmpll1_out_main),
+ CLK_LIST(mmpll4),
+ CLK_LIST(mmpll4_out_main),
+ CLK_LIST(mmpll3),
+ CLK_LIST(mmpll3_out_main),
+ CLK_LIST(ahb_clk_src),
+ CLK_LIST(mmpll2),
+ CLK_LIST(mmpll2_out_main),
+ CLK_LIST(mmpll8),
+ CLK_LIST(mmpll8_out_main),
+ CLK_LIST(mmpll9),
+ CLK_LIST(mmpll9_out_main),
+ CLK_LIST(mmpll5),
+ CLK_LIST(mmpll5_out_main),
+ CLK_LIST(csi0_clk_src),
+ CLK_LIST(vfe0_clk_src),
+ CLK_LIST(vfe1_clk_src),
+ CLK_LIST(csi1_clk_src),
+ CLK_LIST(csi2_clk_src),
+ CLK_LIST(csi3_clk_src),
+ CLK_LIST(maxi_clk_src),
+ CLK_LIST(cpp_clk_src),
+ CLK_LIST(jpeg0_clk_src),
+ CLK_LIST(jpeg2_clk_src),
+ CLK_LIST(jpeg_dma_clk_src),
+ CLK_LIST(mdp_clk_src),
+ CLK_LIST(video_core_clk_src),
+ CLK_LIST(fd_core_clk_src),
+ CLK_LIST(cci_clk_src),
+ CLK_LIST(csiphy0_3p_clk_src),
+ CLK_LIST(csiphy1_3p_clk_src),
+ CLK_LIST(csiphy2_3p_clk_src),
+ CLK_LIST(camss_gp0_clk_src),
+ CLK_LIST(camss_gp1_clk_src),
+ CLK_LIST(mclk0_clk_src),
+ CLK_LIST(mclk1_clk_src),
+ CLK_LIST(mclk2_clk_src),
+ CLK_LIST(mclk3_clk_src),
+ CLK_LIST(csi0phytimer_clk_src),
+ CLK_LIST(csi1phytimer_clk_src),
+ CLK_LIST(csi2phytimer_clk_src),
+ CLK_LIST(esc0_clk_src),
+ CLK_LIST(esc1_clk_src),
+ CLK_LIST(hdmi_clk_src),
+ CLK_LIST(vsync_clk_src),
+ CLK_LIST(rbcpr_clk_src),
+ CLK_LIST(video_subcore0_clk_src),
+ CLK_LIST(video_subcore1_clk_src),
+ CLK_LIST(camss_ahb_clk),
+ CLK_LIST(camss_cci_ahb_clk),
+ CLK_LIST(camss_cci_clk),
+ CLK_LIST(camss_cpp_ahb_clk),
+ CLK_LIST(camss_cpp_clk),
+ CLK_LIST(camss_cpp_axi_clk),
+ CLK_LIST(camss_cpp_vbif_ahb_clk),
+ CLK_LIST(camss_csi0_ahb_clk),
+ CLK_LIST(camss_csi0_clk),
+ CLK_LIST(camss_csi0phy_clk),
+ CLK_LIST(camss_csi0pix_clk),
+ CLK_LIST(camss_csi0rdi_clk),
+ CLK_LIST(camss_csi1_ahb_clk),
+ CLK_LIST(camss_csi1_clk),
+ CLK_LIST(camss_csi1phy_clk),
+ CLK_LIST(camss_csi1pix_clk),
+ CLK_LIST(camss_csi1rdi_clk),
+ CLK_LIST(camss_csi2_ahb_clk),
+ CLK_LIST(camss_csi2_clk),
+ CLK_LIST(camss_csi2phy_clk),
+ CLK_LIST(camss_csi2pix_clk),
+ CLK_LIST(camss_csi2rdi_clk),
+ CLK_LIST(camss_csi3_ahb_clk),
+ CLK_LIST(camss_csi3_clk),
+ CLK_LIST(camss_csi3phy_clk),
+ CLK_LIST(camss_csi3pix_clk),
+ CLK_LIST(camss_csi3rdi_clk),
+ CLK_LIST(camss_csi_vfe0_clk),
+ CLK_LIST(camss_csi_vfe1_clk),
+ CLK_LIST(camss_csiphy0_3p_clk),
+ CLK_LIST(camss_csiphy1_3p_clk),
+ CLK_LIST(camss_csiphy2_3p_clk),
+ CLK_LIST(camss_gp0_clk),
+ CLK_LIST(camss_gp1_clk),
+ CLK_LIST(camss_ispif_ahb_clk),
+ CLK_LIST(camss_jpeg0_clk),
+ CLK_LIST(camss_jpeg2_clk),
+ CLK_LIST(camss_jpeg_ahb_clk),
+ CLK_LIST(camss_jpeg_axi_clk),
+ CLK_LIST(camss_jpeg_dma_clk),
+ CLK_LIST(camss_mclk0_clk),
+ CLK_LIST(camss_mclk1_clk),
+ CLK_LIST(camss_mclk2_clk),
+ CLK_LIST(camss_mclk3_clk),
+ CLK_LIST(camss_micro_ahb_clk),
+ CLK_LIST(camss_csi0phytimer_clk),
+ CLK_LIST(camss_csi1phytimer_clk),
+ CLK_LIST(camss_csi2phytimer_clk),
+ CLK_LIST(camss_top_ahb_clk),
+ CLK_LIST(camss_vfe_ahb_clk),
+ CLK_LIST(camss_vfe_axi_clk),
+ CLK_LIST(camss_vfe0_ahb_clk),
+ CLK_LIST(camss_vfe0_clk),
+ CLK_LIST(camss_vfe0_stream_clk),
+ CLK_LIST(camss_vfe1_ahb_clk),
+ CLK_LIST(camss_vfe1_clk),
+ CLK_LIST(camss_vfe1_stream_clk),
+ CLK_LIST(fd_ahb_clk),
+ CLK_LIST(fd_core_clk),
+ CLK_LIST(fd_core_uar_clk),
+ CLK_LIST(mdss_ahb_clk),
+ CLK_LIST(mdss_axi_clk),
+ CLK_LIST(mdss_byte0_clk),
+ CLK_LIST(mdss_byte1_clk),
+ CLK_LIST(byte0_clk_src),
+ CLK_LIST(byte1_clk_src),
+ CLK_LIST(ext_byte0_clk_src),
+ CLK_LIST(ext_byte1_clk_src),
+ CLK_LIST(mdss_esc0_clk),
+ CLK_LIST(mdss_esc1_clk),
+ CLK_LIST(mdss_extpclk_clk),
+ CLK_LIST(mdss_hdmi_ahb_clk),
+ CLK_LIST(mdss_hdmi_clk),
+ CLK_LIST(mdss_mdp_clk),
+ CLK_LIST(mdss_pclk0_clk),
+ CLK_LIST(mdss_pclk1_clk),
+ CLK_LIST(pclk0_clk_src),
+ CLK_LIST(pclk1_clk_src),
+ CLK_LIST(ext_pclk0_clk_src),
+ CLK_LIST(ext_pclk1_clk_src),
+ CLK_LIST(mdss_vsync_clk),
+ CLK_LIST(mmss_misc_ahb_clk),
+ CLK_LIST(mmss_misc_cxo_clk),
+ CLK_LIST(mmagic_bimc_noc_cfg_ahb_clk),
+ CLK_LIST(mmagic_camss_axi_clk),
+ CLK_LIST(mmagic_camss_noc_cfg_ahb_clk),
+ CLK_LIST(mmss_mmagic_cfg_ahb_clk),
+ CLK_LIST(mmagic_mdss_axi_clk),
+ CLK_LIST(mmagic_mdss_noc_cfg_ahb_clk),
+ CLK_LIST(mmagic_video_axi_clk),
+ CLK_LIST(mmagic_video_noc_cfg_ahb_clk),
+ CLK_LIST(mmss_mmagic_ahb_clk),
+ CLK_LIST(mmss_mmagic_maxi_clk),
+ CLK_LIST(mmss_rbcpr_ahb_clk),
+ CLK_LIST(mmss_rbcpr_clk),
+ CLK_LIST(smmu_cpp_ahb_clk),
+ CLK_LIST(smmu_cpp_axi_clk),
+ CLK_LIST(smmu_jpeg_ahb_clk),
+ CLK_LIST(smmu_jpeg_axi_clk),
+ CLK_LIST(smmu_mdp_ahb_clk),
+ CLK_LIST(smmu_mdp_axi_clk),
+ CLK_LIST(smmu_rot_ahb_clk),
+ CLK_LIST(smmu_rot_axi_clk),
+ CLK_LIST(smmu_vfe_ahb_clk),
+ CLK_LIST(smmu_vfe_axi_clk),
+ CLK_LIST(smmu_video_ahb_clk),
+ CLK_LIST(smmu_video_axi_clk),
+ CLK_LIST(video_ahb_clk),
+ CLK_LIST(video_axi_clk),
+ CLK_LIST(video_core_clk),
+ CLK_LIST(video_maxi_clk),
+ CLK_LIST(video_subcore0_clk),
+ CLK_LIST(video_subcore1_clk),
+ CLK_LIST(vmem_ahb_clk),
+ CLK_LIST(vmem_maxi_clk),
+ CLK_LIST(mmss_gcc_dbg_clk),
+ CLK_LIST(mdss_mdp_vote_clk),
+ CLK_LIST(mdss_rotator_vote_clk),
+};
+
+static struct clk_lookup msm_clocks_mmsscc_8996_v2[] = {
+ CLK_LIST(mmpll2_postdiv_clk),
+ CLK_LIST(mmpll8_postdiv_clk),
+ CLK_LIST(mmpll9_postdiv_clk),
+};
+
+static void msm_mmsscc_8996_v2_fixup(void)
+{
+ mmpll1.c.rate = 810000000;
+ mmpll1.c.fmax[VDD_DIG_LOWER] = 405000000;
+ mmpll1.c.fmax[VDD_DIG_LOW] = 405000000;
+ mmpll1.c.fmax[VDD_DIG_NOMINAL] = 1300000000;
+ mmpll1.c.fmax[VDD_DIG_HIGH] = 1300000000;
+
+ mmpll2.vco_tbl = mmpll_gfx_vco;
+ mmpll2.num_vco = ARRAY_SIZE(mmpll_gfx_vco),
+ mmpll2.c.rate = 0;
+ mmpll2.c.fmax[VDD_DIG_LOWER] = 1000000000;
+ mmpll2.c.fmax[VDD_DIG_LOW] = 1000000000;
+ mmpll2.c.fmax[VDD_DIG_NOMINAL] = 1000000000;
+ mmpll2.c.fmax[VDD_DIG_HIGH] = 1000000000;
+ mmpll2.no_prepared_reconfig = true;
+ mmpll2.c.ops = &clk_ops_alpha_pll;
+
+ mmpll3.c.rate = 980000000;
+ mmpll3.c.fmax[VDD_DIG_LOWER] = 650000000;
+ mmpll3.c.fmax[VDD_DIG_LOW] = 650000000;
+ mmpll3.c.fmax[VDD_DIG_NOMINAL] = 1300000000;
+ mmpll3.c.fmax[VDD_DIG_HIGH] = 1300000000;
+
+ mmpll4.c.fmax[VDD_DIG_LOWER] = 650000000;
+ mmpll4.c.fmax[VDD_DIG_LOW] = 650000000;
+ mmpll4.c.fmax[VDD_DIG_NOMINAL] = 1300000000;
+ mmpll4.c.fmax[VDD_DIG_HIGH] = 1300000000;
+
+ mmpll5.c.rate = 825000000;
+ mmpll5.c.fmax[VDD_DIG_LOWER] = 650000000;
+ mmpll5.c.fmax[VDD_DIG_LOW] = 650000000;
+ mmpll5.c.fmax[VDD_DIG_NOMINAL] = 1300000000;
+ mmpll5.c.fmax[VDD_DIG_HIGH] = 1300000000;
+
+ mmpll8.vco_tbl = mmpll_gfx_vco;
+ mmpll8.num_vco = ARRAY_SIZE(mmpll_gfx_vco),
+ mmpll8.c.rate = 0;
+ mmpll8.c.fmax[VDD_DIG_LOWER] = 1000000000;
+ mmpll8.c.fmax[VDD_DIG_LOW] = 1000000000;
+ mmpll8.c.fmax[VDD_DIG_NOMINAL] = 1000000000;
+ mmpll8.c.fmax[VDD_DIG_HIGH] = 1000000000;
+ mmpll8.no_prepared_reconfig = true;
+ mmpll8.c.ops = &clk_ops_alpha_pll;
+
+ mmpll9.c.rate = 1209600000;
+ mmpll9.c.fmax[VDD_DIG_LOWER] = 650000000;
+ mmpll9.c.fmax[VDD_DIG_LOW] = 650000000;
+ mmpll9.c.fmax[VDD_DIG_NOMINAL] = 1300000000;
+ mmpll9.c.fmax[VDD_DIG_HIGH] = 1300000000;
+
+ csi0_clk_src.freq_tbl = ftbl_csi0_clk_src_v2;
+ csi1_clk_src.freq_tbl = ftbl_csi1_clk_src_v2;
+ csi2_clk_src.freq_tbl = ftbl_csi2_clk_src_v2;
+ csi3_clk_src.freq_tbl = ftbl_csi3_clk_src_v2;
+
+ csiphy0_3p_clk_src.freq_tbl = ftbl_csiphy0_3p_clk_src_v2;
+ csiphy0_3p_clk_src.c.fmax[VDD_DIG_HIGH] = 384000000;
+ csiphy1_3p_clk_src.freq_tbl = ftbl_csiphy1_3p_clk_src_v2;
+ csiphy1_3p_clk_src.c.fmax[VDD_DIG_HIGH] = 384000000;
+ csiphy2_3p_clk_src.freq_tbl = ftbl_csiphy2_3p_clk_src_v2;
+ csiphy2_3p_clk_src.c.fmax[VDD_DIG_HIGH] = 384000000;
+
+ vfe0_clk_src.freq_tbl = ftbl_vfe0_clk_src_v2;
+ vfe0_clk_src.c.fmax[VDD_DIG_LOWER] = 100000000;
+ vfe1_clk_src.freq_tbl = ftbl_vfe1_clk_src_v2;
+
+ mdp_clk_src.freq_tbl = ftbl_mdp_clk_src_v2;
+ mdp_clk_src.c.fmax[VDD_DIG_LOW] = 275000000;
+ mdp_clk_src.c.fmax[VDD_DIG_NOMINAL] = 330000000;
+ mdp_clk_src.c.fmax[VDD_DIG_HIGH] = 412500000;
+
+ maxi_clk_src.freq_tbl = ftbl_maxi_clk_src_v2;
+ maxi_clk_src.c.fmax[VDD_DIG_HIGH] = 405000000;
+
+ rbcpr_clk_src.freq_tbl = ftbl_rbcpr_clk_src_v2;
+ rbcpr_clk_src.c.fmax[VDD_DIG_NOMINAL] = 50000000;
+ rbcpr_clk_src.c.fmax[VDD_DIG_HIGH] = 50000000;
+
+ video_core_clk_src.freq_tbl = ftbl_video_core_clk_src_v2;
+ video_core_clk_src.c.fmax[VDD_DIG_HIGH] = 516000000;
+ video_subcore0_clk_src.freq_tbl = ftbl_video_subcore0_clk_src_v2;
+ video_subcore0_clk_src.c.fmax[VDD_DIG_HIGH] = 490000000;
+ video_subcore1_clk_src.freq_tbl = ftbl_video_subcore1_clk_src_v2;
+ video_subcore1_clk_src.c.fmax[VDD_DIG_HIGH] = 516000000;
+}
+
+static void msm_mmsscc_8996_v3_fixup(void)
+{
+ mmpll1.c.rate = 810000000;
+ mmpll1.c.fmax[VDD_DIG_LOWER] = 405000000;
+ mmpll1.c.fmax[VDD_DIG_LOW] = 405000000;
+ mmpll1.c.fmax[VDD_DIG_NOMINAL] = 810000000;
+ mmpll1.c.fmax[VDD_DIG_HIGH] = 810000000;
+
+ mmpll2.vco_tbl = mmpll_gfx_vco;
+ mmpll2.num_vco = ARRAY_SIZE(mmpll_gfx_vco),
+ mmpll2.c.rate = 0;
+ mmpll2.c.fmax[VDD_DIG_LOWER] = 1000000000;
+ mmpll2.c.fmax[VDD_DIG_LOW] = 1000000000;
+ mmpll2.c.fmax[VDD_DIG_NOMINAL] = 1000000000;
+ mmpll2.c.fmax[VDD_DIG_HIGH] = 1000000000;
+ mmpll2.no_prepared_reconfig = true;
+ mmpll2.c.ops = &clk_ops_alpha_pll;
+
+ mmpll3.c.rate = 1040000000;
+ mmpll3.c.fmax[VDD_DIG_LOWER] = 520000000;
+ mmpll3.c.fmax[VDD_DIG_LOW] = 520000000;
+ mmpll3.c.fmax[VDD_DIG_NOMINAL] = 1040000000;
+ mmpll3.c.fmax[VDD_DIG_HIGH] = 1040000000;
+
+ mmpll5.c.rate = 825000000;
+ mmpll5.c.fmax[VDD_DIG_LOWER] = 412500000;
+ mmpll5.c.fmax[VDD_DIG_LOW] = 825000000;
+ mmpll5.c.fmax[VDD_DIG_NOMINAL] = 825000000;
+ mmpll5.c.fmax[VDD_DIG_HIGH] = 825000000;
+
+ mmpll8.vco_tbl = mmpll_gfx_vco;
+ mmpll8.num_vco = ARRAY_SIZE(mmpll_gfx_vco),
+ mmpll8.c.rate = 0;
+ mmpll8.c.fmax[VDD_DIG_LOWER] = 1000000000;
+ mmpll8.c.fmax[VDD_DIG_LOW] = 1000000000;
+ mmpll8.c.fmax[VDD_DIG_NOMINAL] = 1000000000;
+ mmpll8.c.fmax[VDD_DIG_HIGH] = 1000000000;
+ mmpll8.no_prepared_reconfig = true;
+ mmpll8.c.ops = &clk_ops_alpha_pll;
+
+ mmpll9.c.rate = 1248000000;
+ mmpll9.c.fmax[VDD_DIG_LOWER] = 624000000;
+ mmpll9.c.fmax[VDD_DIG_LOW] = 624000000;
+ mmpll9.c.fmax[VDD_DIG_NOMINAL] = 1248000000;
+ mmpll9.c.fmax[VDD_DIG_HIGH] = 1248000000;
+
+ csi0_clk_src.freq_tbl = ftbl_csi0_clk_src_v3;
+ csi1_clk_src.freq_tbl = ftbl_csi1_clk_src_v3;
+ csi2_clk_src.freq_tbl = ftbl_csi2_clk_src_v3;
+ csi3_clk_src.freq_tbl = ftbl_csi3_clk_src_v3;
+
+ csiphy0_3p_clk_src.freq_tbl = ftbl_csiphy0_3p_clk_src_v2;
+ csiphy0_3p_clk_src.c.fmax[VDD_DIG_HIGH] = 384000000;
+ csiphy1_3p_clk_src.freq_tbl = ftbl_csiphy1_3p_clk_src_v2;
+ csiphy1_3p_clk_src.c.fmax[VDD_DIG_HIGH] = 384000000;
+ csiphy2_3p_clk_src.freq_tbl = ftbl_csiphy2_3p_clk_src_v2;
+ csiphy2_3p_clk_src.c.fmax[VDD_DIG_HIGH] = 384000000;
+
+ csi0phytimer_clk_src.freq_tbl = ftbl_csi0phytimer_clk_src_v3;
+ csi0phytimer_clk_src.c.fmax[VDD_DIG_LOW] = 200000000;
+ csi1phytimer_clk_src.freq_tbl = ftbl_csi1phytimer_clk_src_v3;
+ csi1phytimer_clk_src.c.fmax[VDD_DIG_LOW] = 200000000;
+ csi2phytimer_clk_src.freq_tbl = ftbl_csi2phytimer_clk_src_v3;
+ csi2phytimer_clk_src.c.fmax[VDD_DIG_LOW] = 200000000;
+
+ vfe0_clk_src.freq_tbl = ftbl_vfe0_clk_src_v3;
+ vfe0_clk_src.c.fmax[VDD_DIG_LOWER] = 100000000;
+ vfe0_clk_src.c.fmax[VDD_DIG_LOW] = 300000000;
+ vfe1_clk_src.freq_tbl = ftbl_vfe1_clk_src_v3;
+ vfe1_clk_src.c.fmax[VDD_DIG_LOW] = 300000000;
+
+ mdp_clk_src.freq_tbl = ftbl_mdp_clk_src_v2;
+ mdp_clk_src.c.fmax[VDD_DIG_LOW] = 275000000;
+ mdp_clk_src.c.fmax[VDD_DIG_NOMINAL] = 330000000;
+ mdp_clk_src.c.fmax[VDD_DIG_HIGH] = 412500000;
+
+ maxi_clk_src.freq_tbl = ftbl_maxi_clk_src_v2;
+ maxi_clk_src.c.fmax[VDD_DIG_HIGH] = 405000000;
+
+ rbcpr_clk_src.freq_tbl = ftbl_rbcpr_clk_src_v2;
+ rbcpr_clk_src.c.fmax[VDD_DIG_NOMINAL] = 50000000;
+ rbcpr_clk_src.c.fmax[VDD_DIG_HIGH] = 50000000;
+
+ video_core_clk_src.freq_tbl = ftbl_video_core_clk_src_v3;
+ video_core_clk_src.c.fmax[VDD_DIG_NOMINAL] = 346666667;
+ video_core_clk_src.c.fmax[VDD_DIG_HIGH] = 520000000;
+ video_subcore0_clk_src.freq_tbl = ftbl_video_subcore0_clk_src_v3;
+ video_subcore0_clk_src.c.fmax[VDD_DIG_NOMINAL] = 346666667;
+ video_subcore0_clk_src.c.fmax[VDD_DIG_HIGH] = 520000000;
+ video_subcore1_clk_src.freq_tbl = ftbl_video_subcore1_clk_src_v3;
+ video_subcore1_clk_src.c.fmax[VDD_DIG_NOMINAL] = 346666667;
+ video_subcore1_clk_src.c.fmax[VDD_DIG_HIGH] = 520000000;
+}
+
+static void msm_mmsscc_8996_pro_fixup(void)
+{
+ mmpll9.c.rate = 0;
+ mmpll9.c.fmax[VDD_DIG_LOWER] = 652800000;
+ mmpll9.c.fmax[VDD_DIG_LOW] = 652800000;
+ mmpll9.c.fmax[VDD_DIG_NOMINAL] = 1305600000;
+ mmpll9.c.fmax[VDD_DIG_HIGH] = 1305600000;
+ mmpll9.c.ops = &clk_ops_alpha_pll;
+ mmpll9.min_supported_freq = 1248000000;
+
+ mmpll9_postdiv_clk.c.ops = &clk_ops_div;
+}
+
+static int is_v3_gpu;
+static int gpu_pre_set_rate(struct clk *clk, unsigned long new_rate)
+{
+ struct msm_rpm_kvp kvp;
+ struct clk_vdd_class *vdd = clk->vdd_class;
+ int old_level, new_level, old_uv, new_uv;
+ int n_regs = vdd->num_regulators;
+ uint32_t value;
+ int ret = 0;
+
+ if (!is_v3_gpu)
+ return ret;
+
+ old_level = find_vdd_level(clk, clk->rate);
+ if (old_level < 0)
+ return old_level;
+ old_uv = vdd->vdd_uv[old_level * n_regs];
+
+ new_level = find_vdd_level(clk, new_rate);
+ if (new_level < 0)
+ return new_level;
+ new_uv = vdd->vdd_uv[new_level * n_regs];
+
+ if (new_uv == old_uv)
+ return ret;
+
+ value = (new_uv == GFX_MIN_SVS_LEVEL);
+
+ kvp.key = RPM_SMD_KEY_STATE;
+ kvp.data = (void *)&value;
+ kvp.length = sizeof(value);
+
+ ret = msm_rpm_send_message(MSM_RPM_CTX_ACTIVE_SET, RPM_MISC_CLK_TYPE,
+ GPU_REQ_ID, &kvp, 1);
+ if (ret)
+ WARN_ONCE(1, "%s: Sending the RPM message failed (value - %u)\n",
+ __func__, value);
+ return 0;
+}
+
+static int of_get_fmax_vdd_class(struct platform_device *pdev, struct clk *c,
+ char *prop_name)
+{
+ struct device_node *of = pdev->dev.of_node;
+ int prop_len, i, j;
+ struct clk_vdd_class *vdd = c->vdd_class;
+ int num = vdd->num_regulators + 1;
+ u32 *array;
+
+ if (!of_find_property(of, prop_name, &prop_len)) {
+ dev_err(&pdev->dev, "missing %s\n", prop_name);
+ return -EINVAL;
+ }
+
+ prop_len /= sizeof(u32);
+ if (prop_len % num) {
+ dev_err(&pdev->dev, "bad length %d\n", prop_len);
+ return -EINVAL;
+ }
+
+ prop_len /= num;
+ vdd->level_votes = devm_kzalloc(&pdev->dev, prop_len * sizeof(int),
+ GFP_KERNEL);
+ if (!vdd->level_votes)
+ return -ENOMEM;
+
+ vdd->vdd_uv = devm_kzalloc(&pdev->dev,
+ prop_len * sizeof(int) * (num - 1), GFP_KERNEL);
+ if (!vdd->vdd_uv)
+ return -ENOMEM;
+
+ c->fmax = devm_kzalloc(&pdev->dev, prop_len * sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!c->fmax)
+ return -ENOMEM;
+
+ array = devm_kzalloc(&pdev->dev,
+ prop_len * sizeof(u32) * num, GFP_KERNEL);
+ if (!array)
+ return -ENOMEM;
+
+ of_property_read_u32_array(of, prop_name, array, prop_len * num);
+ for (i = 0; i < prop_len; i++) {
+ c->fmax[i] = array[num * i];
+ for (j = 1; j < num; j++) {
+ vdd->vdd_uv[(num - 1) * i + (j - 1)] =
+ array[num * i + j];
+ }
+ }
+
+ devm_kfree(&pdev->dev, array);
+ vdd->num_levels = prop_len;
+ vdd->cur_level = prop_len;
+ c->num_fmax = prop_len;
+ return 0;
+}
+
+static struct platform_driver msm_clock_gpu_driver;
+struct resource *efuse_res;
+void __iomem *gpu_base;
+u64 efuse;
+int gpu_speed_bin;
+
+int msm_mmsscc_8996_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ int rc;
+ struct clk *tmp;
+ struct regulator *reg;
+ u32 regval;
+ int is_pro, is_v2, is_v3 = 0;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cc_base");
+ if (!res) {
+ dev_err(&pdev->dev, "Unable to retrieve register base.\n");
+ return -ENOMEM;
+ }
+
+ efuse_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "efuse");
+ if (!efuse_res) {
+ dev_err(&pdev->dev, "Unable to retrieve efuse register base.\n");
+ return -ENOMEM;
+ }
+
+ virt_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!virt_base) {
+ dev_err(&pdev->dev, "Failed to map CC registers\n");
+ return -ENOMEM;
+ }
+
+ gpu_base = devm_ioremap(&pdev->dev, efuse_res->start,
+ resource_size(efuse_res));
+ if (!gpu_base) {
+ dev_err(&pdev->dev, "Unable to map in efuse base\n");
+ return -ENOMEM;
+ }
+
+ /* Clear the DBG_CLK_DIV bits of the MMSS debug register */
+ regval = readl_relaxed(virt_base + mmss_gcc_dbg_clk.offset);
+ regval &= ~BM(18, 17);
+ writel_relaxed(regval, virt_base + mmss_gcc_dbg_clk.offset);
+
+ /* Disable the AHB DCD */
+ regval = readl_relaxed(virt_base + MMSS_MNOC_DCD_CONFIG_AHB);
+ regval &= ~BIT(31);
+ writel_relaxed(regval, virt_base + MMSS_MNOC_DCD_CONFIG_AHB);
+
+ /* Disable the NoC FSM for mmss_mmagic_cfg_ahb_clk */
+ regval = readl_relaxed(virt_base + mmss_mmagic_cfg_ahb_clk.cbcr_reg);
+ regval &= ~BIT(15);
+ writel_relaxed(regval, virt_base + mmss_mmagic_cfg_ahb_clk.cbcr_reg);
+
+ vdd_dig.vdd_uv[1] = RPM_REGULATOR_CORNER_SVS_KRAIT;
+ reg = vdd_dig.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_dig");
+ if (IS_ERR(reg)) {
+ if (PTR_ERR(reg) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Unable to get vdd_dig regulator!");
+ return PTR_ERR(reg);
+ }
+
+ reg = vdd_mmpll4.regulator[0] = devm_regulator_get(&pdev->dev,
+ "mmpll4_dig");
+ if (IS_ERR(reg)) {
+ if (PTR_ERR(reg) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Unable to get mmpll4_dig regulator!");
+ return PTR_ERR(reg);
+ }
+
+ reg = vdd_mmpll4.regulator[1] = devm_regulator_get(&pdev->dev,
+ "mmpll4_analog");
+ if (IS_ERR(reg)) {
+ if (PTR_ERR(reg) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Unable to get mmpll4_analog regulator!");
+ return PTR_ERR(reg);
+ }
+
+ tmp = mmsscc_xo.c.parent = devm_clk_get(&pdev->dev, "xo");
+ if (IS_ERR(tmp)) {
+ if (PTR_ERR(tmp) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Unable to get xo clock!\n");
+ return PTR_ERR(tmp);
+ }
+
+ tmp = mmsscc_gpll0.c.parent = devm_clk_get(&pdev->dev, "gpll0");
+ if (IS_ERR(tmp)) {
+ if (PTR_ERR(tmp) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Unable to get gpll0 clock!\n");
+ return PTR_ERR(tmp);
+ }
+
+ tmp = mmsscc_gpll0_div.c.parent = devm_clk_get(&pdev->dev, "gpll0_div");
+ if (IS_ERR(tmp)) {
+ if (PTR_ERR(tmp) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Unable to get gpll0_div clock!\n");
+ return PTR_ERR(tmp);
+ }
+
+ ext_pclk0_clk_src.dev = &pdev->dev;
+ ext_pclk0_clk_src.clk_id = "pclk0_src";
+ ext_pclk0_clk_src.c.flags = CLKFLAG_NO_RATE_CACHE;
+ ext_pclk1_clk_src.dev = &pdev->dev;
+ ext_pclk1_clk_src.clk_id = "pclk1_src";
+ ext_pclk1_clk_src.c.flags = CLKFLAG_NO_RATE_CACHE;
+ ext_byte0_clk_src.dev = &pdev->dev;
+ ext_byte0_clk_src.clk_id = "byte0_src";
+ ext_byte0_clk_src.c.flags = CLKFLAG_NO_RATE_CACHE;
+ ext_byte1_clk_src.dev = &pdev->dev;
+ ext_byte1_clk_src.clk_id = "byte1_src";
+ ext_byte1_clk_src.c.flags = CLKFLAG_NO_RATE_CACHE;
+ ext_extpclk_clk_src.dev = &pdev->dev;
+ ext_extpclk_clk_src.clk_id = "extpclk_src";
+
+ efuse = readl_relaxed(gpu_base);
+ gpu_speed_bin = ((efuse >> EFUSE_SHIFT_v3) & EFUSE_MASK_v3);
+
+ is_v2 = of_device_is_compatible(pdev->dev.of_node,
+ "qcom,mmsscc-8996-v2");
+ if (is_v2)
+ msm_mmsscc_8996_v2_fixup();
+
+ is_v3 = of_device_is_compatible(pdev->dev.of_node,
+ "qcom,mmsscc-8996-v3");
+ if (is_v3)
+ msm_mmsscc_8996_v3_fixup();
+
+ is_pro = of_device_is_compatible(pdev->dev.of_node,
+ "qcom,mmsscc-8996-pro");
+ if (is_pro) {
+ gpu_speed_bin = ((efuse >> EFUSE_SHIFT_PRO) & EFUSE_MASK_PRO);
+ msm_mmsscc_8996_v3_fixup();
+ if (!gpu_speed_bin)
+ msm_mmsscc_8996_pro_fixup();
+ }
+
+ rc = of_msm_clock_register(pdev->dev.of_node, msm_clocks_mmss_8996,
+ ARRAY_SIZE(msm_clocks_mmss_8996));
+ if (rc)
+ return rc;
+
+ /* Register v2/v3/pro specific clocks */
+ if (is_v2 || is_v3 || is_pro) {
+ rc = of_msm_clock_register(pdev->dev.of_node,
+ msm_clocks_mmsscc_8996_v2,
+ ARRAY_SIZE(msm_clocks_mmsscc_8996_v2));
+ if (rc)
+ return rc;
+ }
+ dev_info(&pdev->dev, "Registered MMSS clocks.\n");
+
+ return platform_driver_register(&msm_clock_gpu_driver);
+}
+
+static struct of_device_id msm_clock_mmss_match_table[] = {
+ { .compatible = "qcom,mmsscc-8996" },
+ { .compatible = "qcom,mmsscc-8996-v2" },
+ { .compatible = "qcom,mmsscc-8996-v3" },
+ { .compatible = "qcom,mmsscc-8996-pro" },
+ {},
+};
+
+static struct platform_driver msm_clock_mmss_driver = {
+ .probe = msm_mmsscc_8996_probe,
+ .driver = {
+ .name = "qcom,mmsscc-8996",
+ .of_match_table = msm_clock_mmss_match_table,
+ .owner = THIS_MODULE,
+ },
+};
+
+/* ======== Graphics Clock Controller ======== */
+
+static struct mux_clk gpu_gcc_dbg_clk = {
+ .ops = &mux_reg_ops,
+ .en_mask = BIT(16),
+ .mask = 0x3FF,
+ .offset = MMSS_MMSS_DEBUG_CLK_CTL,
+ .en_offset = MMSS_MMSS_DEBUG_CLK_CTL,
+ .base = &virt_base_gpu,
+ MUX_SRC_LIST(
+ { &gpu_ahb_clk.c, 0x000c },
+ { &gpu_gx_gfx3d_clk.c, 0x000d },
+ { &gpu_gx_rbbmtimer_clk.c, 0x003e },
+ { &gpu_aon_isense_clk.c, 0x0088 },
+ ),
+ .c = {
+ .dbg_name = "gpu_gcc_dbg_clk",
+ .ops = &clk_ops_gen_mux,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(gpu_gcc_dbg_clk.c),
+ },
+};
+
+static struct clk_lookup msm_clocks_gpu_8996[] = {
+ CLK_LIST(gfx3d_clk_src),
+ CLK_LIST(rbbmtimer_clk_src),
+ CLK_LIST(gpu_ahb_clk),
+ CLK_LIST(gpu_aon_isense_clk),
+ CLK_LIST(gpu_gx_gfx3d_clk),
+ CLK_LIST(gpu_mx_clk),
+ CLK_LIST(gpu_gx_rbbmtimer_clk),
+ CLK_LIST(gpu_gcc_dbg_clk),
+};
+
+static struct clk_lookup msm_clocks_gpu_8996_v2[] = {
+ CLK_LIST(gfx3d_clk_src_v2),
+ CLK_LIST(rbbmtimer_clk_src),
+ CLK_LIST(gpu_ahb_clk),
+ CLK_LIST(gpu_aon_isense_clk),
+ CLK_LIST(gpu_gx_gfx3d_clk),
+ CLK_LIST(gpu_mx_clk),
+ CLK_LIST(gpu_gx_rbbmtimer_clk),
+ CLK_LIST(gpu_gcc_dbg_clk),
+};
+
+static void msm_gpucc_8996_v2_fixup(void)
+{
+ gpu_gx_gfx3d_clk.c.parent = &gfx3d_clk_src_v2.c;
+}
+
+int msm_gpucc_8996_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct device_node *of_node = pdev->dev.of_node;
+ int rc;
+ struct regulator *reg;
+ int is_v2_gpu, is_v3_0_gpu, is_pro_gpu;
+ char speedbin_str[] = "qcom,gfxfreq-speedbin0";
+ char mx_speedbin_str[] = "qcom,gfxfreq-mx-speedbin0";
+
+ if (!of_node)
+ return -EINVAL;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cc_base");
+ if (!res) {
+ dev_err(&pdev->dev, "Unable to retrieve register base.\n");
+ return -ENOMEM;
+ }
+
+ gfx3d_clk_src_v2.base = virt_base_gpu = devm_ioremap(&pdev->dev,
+ res->start, resource_size(res));
+ if (!virt_base_gpu) {
+ dev_err(&pdev->dev, "Failed to map CC registers\n");
+ return -ENOMEM;
+ }
+
+ reg = vdd_gfx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_gfx");
+ if (IS_ERR(reg)) {
+ if (PTR_ERR(reg) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Unable to get vdd_gfx regulator!");
+ return PTR_ERR(reg);
+ }
+
+ reg = vdd_gfx.regulator[1] = devm_regulator_get(&pdev->dev, "vdd_mx");
+ if (IS_ERR(reg)) {
+ if (PTR_ERR(reg) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Unable to get vdd_mx regulator!");
+ return PTR_ERR(reg);
+ }
+ vdd_gfx.use_max_uV = true;
+
+ reg = vdd_gpu_mx.regulator[0] = devm_regulator_get(&pdev->dev,
+ "vdd_gpu_mx");
+ if (IS_ERR(reg)) {
+ if (PTR_ERR(reg) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Unable to get vdd_gpu_mx regulator!");
+ return PTR_ERR(reg);
+ }
+ vdd_gpu_mx.use_max_uV = true;
+
+ is_v2_gpu = of_device_is_compatible(of_node, "qcom,gpucc-8996-v2");
+ is_v3_gpu = of_device_is_compatible(of_node, "qcom,gpucc-8996-v3");
+ is_v3_0_gpu = of_device_is_compatible(of_node, "qcom,gpucc-8996-v3.0");
+ is_pro_gpu = of_device_is_compatible(of_node, "qcom,gpucc-8996-pro");
+
+ dev_info(&pdev->dev, "using speed bin %u\n", gpu_speed_bin);
+ snprintf(speedbin_str, ARRAY_SIZE(speedbin_str),
+ "qcom,gfxfreq-speedbin%d", gpu_speed_bin);
+ snprintf(mx_speedbin_str, ARRAY_SIZE(mx_speedbin_str),
+ "qcom,gfxfreq-mx-speedbin%d", gpu_speed_bin);
+
+ rc = of_get_fmax_vdd_class(pdev, &gpu_mx_clk.c, mx_speedbin_str);
+ if (rc) {
+ dev_err(&pdev->dev, "Can't get speed bin for gpu_mx_clk. Falling back to zero.\n");
+ rc = of_get_fmax_vdd_class(pdev, &gpu_mx_clk.c,
+ "qcom,gfxfreq-mx-speedbin0");
+ if (rc) {
+ dev_err(&pdev->dev, "Unable to get gpu mx freq-corner mapping info\n");
+ return rc;
+ }
+ }
+
+ if (!is_v2_gpu && !is_v3_gpu && !is_v3_0_gpu && !is_pro_gpu) {
+ rc = of_get_fmax_vdd_class(pdev, &gfx3d_clk_src.c,
+ speedbin_str);
+ if (rc) {
+ dev_err(&pdev->dev, "Can't get speed bin for gfx3d_clk_src. Falling back to zero.\n");
+ rc = of_get_fmax_vdd_class(pdev, &gfx3d_clk_src.c,
+ "qcom,gfxfreq-speedbin0");
+ if (rc) {
+ dev_err(&pdev->dev, "Unable to get gfx freq-corner info for gfx3d_clk!\n");
+ return rc;
+ }
+ }
+
+ clk_ops_gpu = clk_ops_rcg;
+ clk_ops_gpu.pre_set_rate = gpu_pre_set_rate;
+
+ rc = of_msm_clock_register(of_node, msm_clocks_gpu_8996,
+ ARRAY_SIZE(msm_clocks_gpu_8996));
+ if (rc)
+ return rc;
+ } else {
+ msm_gpucc_8996_v2_fixup();
+ rc = of_get_fmax_vdd_class(pdev, &gfx3d_clk_src_v2.c,
+ speedbin_str);
+ if (rc) {
+ dev_err(&pdev->dev, "Can't get speed bin for gfx3d_clk_src. Falling back to zero.\n");
+ rc = of_get_fmax_vdd_class(pdev, &gfx3d_clk_src_v2.c,
+ "qcom,gfxfreq-speedbin0");
+ if (rc) {
+ dev_err(&pdev->dev, "Unable to get gfx freq-corner info for gfx3d_clk!\n");
+ return rc;
+ }
+ }
+
+ clk_ops_gpu = clk_ops_mux_div_clk;
+ clk_ops_gpu.pre_set_rate = gpu_pre_set_rate;
+
+ rc = of_msm_clock_register(of_node, msm_clocks_gpu_8996_v2,
+ ARRAY_SIZE(msm_clocks_gpu_8996_v2));
+ if (rc)
+ return rc;
+ }
+
+ dev_info(&pdev->dev, "Registered GPU clocks.\n");
+ return 0;
+}
+
+static struct of_device_id msm_clock_gpu_match_table[] = {
+ { .compatible = "qcom,gpucc-8996" },
+ { .compatible = "qcom,gpucc-8996-v2" },
+ { .compatible = "qcom,gpucc-8996-v3" },
+ { .compatible = "qcom,gpucc-8996-v3.0" },
+ { .compatible = "qcom,gpucc-8996-pro" },
+ {},
+};
+
+static struct platform_driver msm_clock_gpu_driver = {
+ .probe = msm_gpucc_8996_probe,
+ .driver = {
+ .name = "qcom,gpucc-8996",
+ .of_match_table = msm_clock_gpu_match_table,
+ .owner = THIS_MODULE,
+ },
+};
+
+int __init msm_mmsscc_8996_init(void)
+{
+ return platform_driver_register(&msm_clock_mmss_driver);
+}
+arch_initcall(msm_mmsscc_8996_init);
diff --git a/drivers/clk/msm/clock-mmss-cobalt.c b/drivers/clk/msm/clock-mmss-cobalt.c
new file mode 100644
index 000000000000..66807366d19d
--- /dev/null
+++ b/drivers/clk/msm/clock-mmss-cobalt.c
@@ -0,0 +1,2810 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/ctype.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/clk/msm-clock-generic.h>
+
+#include <soc/qcom/clock-local2.h>
+#include <soc/qcom/clock-voter.h>
+#include <soc/qcom/clock-pll.h>
+#include <soc/qcom/clock-alpha-pll.h>
+
+#include <dt-bindings/clock/msm-clocks-cobalt.h>
+#include <dt-bindings/clock/msm-clocks-hwio-cobalt.h>
+
+#include "vdd-level-cobalt.h"
+
+static void __iomem *virt_base;
+
+#define mmsscc_xo_mm_source_val 0
+#define mmsscc_gpll0_mm_source_val 5
+#define mmsscc_gpll0_div_mm_source_val 6
+#define mmpll0_pll_out_mm_source_val 1
+#define mmpll1_pll_out_mm_source_val 2
+#define mmpll3_pll_out_mm_source_val 3
+#define mmpll4_pll_out_mm_source_val 2
+#define mmpll5_pll_out_mm_source_val 2
+#define mmpll6_pll_out_mm_source_val 4
+#define mmpll7_pll_out_mm_source_val 3
+#define mmpll10_pll_out_mm_source_val 4
+#define dsi0phypll_mm_source_val 1
+#define dsi1phypll_mm_source_val 2
+#define hdmiphypll_mm_source_val 1
+#define ext_dp_phy_pll_link_mm_source_val 1
+#define ext_dp_phy_pll_vco_mm_source_val 2
+
+#define FIXDIV(div) (div ? (2 * (div) - 1) : (0))
+
+#define F_MM(f, s, div, m, n) \
+ { \
+ .freq_hz = (f), \
+ .src_clk = &s.c, \
+ .m_val = (m), \
+ .n_val = ~((n)-(m)) * !!(n), \
+ .d_val = ~(n),\
+ .div_src_val = BVAL(4, 0, (int)FIXDIV(div)) \
+ | BVAL(10, 8, s##_mm_source_val), \
+ }
+
+#define F_SLEW(f, s_f, s, div, m, n) \
+ { \
+ .freq_hz = (f), \
+ .src_freq = (s_f), \
+ .src_clk = &s.c, \
+ .m_val = (m), \
+ .n_val = ~((n)-(m)) * !!(n), \
+ .d_val = ~(n),\
+ .div_src_val = BVAL(4, 0, (int)(2*(div) - 1)) \
+ | BVAL(10, 8, s##_mm_source_val), \
+ }
+
+DEFINE_EXT_CLK(mmsscc_xo, NULL);
+DEFINE_EXT_CLK(mmsscc_gpll0, NULL);
+DEFINE_EXT_CLK(mmsscc_gpll0_div, NULL);
+DEFINE_EXT_CLK(ext_dp_phy_pll_vco, NULL);
+DEFINE_EXT_CLK(ext_dp_phy_pll_link, NULL);
+
+static DEFINE_VDD_REGULATORS(vdd_dig, VDD_DIG_NUM, 1, vdd_corner, NULL);
+
+static struct alpha_pll_masks pll_masks_p = {
+ .lock_mask = BIT(31),
+ .active_mask = BIT(30),
+ .update_mask = BIT(22),
+ .output_mask = 0xf,
+};
+
+static struct pll_vote_clk mmpll0_pll = {
+ .en_reg = (void __iomem *)MMSS_PLL_VOTE_APCS,
+ .en_mask = BIT(0),
+ .status_reg = (void __iomem *)MMSS_MMPLL0_PLL_MODE,
+ .status_mask = BIT(31),
+ .base = &virt_base,
+ .c = {
+ .rate = 808000000,
+ .parent = &mmsscc_xo.c,
+ .dbg_name = "mmpll0",
+ .ops = &clk_ops_pll_vote,
+ VDD_DIG_FMAX_MAP2(LOWER, 404000000, NOMINAL, 808000000),
+ CLK_INIT(mmpll0_pll.c),
+ },
+};
+DEFINE_EXT_CLK(mmpll0_pll_out, &mmpll0_pll.c);
+
+static struct pll_vote_clk mmpll1_pll = {
+ .en_reg = (void __iomem *)MMSS_PLL_VOTE_APCS,
+ .en_mask = BIT(1),
+ .status_reg = (void __iomem *)MMSS_MMPLL1_PLL_MODE,
+ .status_mask = BIT(31),
+ .base = &virt_base,
+ .c = {
+ .rate = 812000000,
+ .parent = &mmsscc_xo.c,
+ .dbg_name = "mmpll1_pll",
+ .ops = &clk_ops_pll_vote,
+ VDD_DIG_FMAX_MAP2(LOWER, 406000000, NOMINAL, 812000000),
+ CLK_INIT(mmpll1_pll.c),
+ },
+};
+DEFINE_EXT_CLK(mmpll1_pll_out, &mmpll1_pll.c);
+
+static struct alpha_pll_clk mmpll3_pll = {
+ .offset = MMSS_MMPLL3_PLL_MODE,
+ .masks = &pll_masks_p,
+ .enable_config = 0x1,
+ .base = &virt_base,
+ .is_fabia = true,
+ .c = {
+ .rate = 930000000,
+ .parent = &mmsscc_xo.c,
+ .dbg_name = "mmpll3_pll",
+ .ops = &clk_ops_fixed_fabia_alpha_pll,
+ VDD_DIG_FMAX_MAP2(LOWER, 465000000, LOW, 930000000),
+ CLK_INIT(mmpll3_pll.c),
+ },
+};
+DEFINE_EXT_CLK(mmpll3_pll_out, &mmpll3_pll.c);
+
+static struct alpha_pll_clk mmpll4_pll = {
+ .offset = MMSS_MMPLL4_PLL_MODE,
+ .masks = &pll_masks_p,
+ .enable_config = 0x1,
+ .base = &virt_base,
+ .is_fabia = true,
+ .c = {
+ .rate = 768000000,
+ .parent = &mmsscc_xo.c,
+ .dbg_name = "mmpll4_pll",
+ .ops = &clk_ops_fixed_fabia_alpha_pll,
+ VDD_DIG_FMAX_MAP2(LOWER, 384000000, LOW, 768000000),
+ CLK_INIT(mmpll4_pll.c),
+ },
+};
+DEFINE_EXT_CLK(mmpll4_pll_out, &mmpll4_pll.c);
+
+static struct alpha_pll_clk mmpll5_pll = {
+ .offset = MMSS_MMPLL5_PLL_MODE,
+ .masks = &pll_masks_p,
+ .enable_config = 0x1,
+ .base = &virt_base,
+ .is_fabia = true,
+ .c = {
+ .rate = 825000000,
+ .parent = &mmsscc_xo.c,
+ .dbg_name = "mmpll5_pll",
+ .ops = &clk_ops_fixed_fabia_alpha_pll,
+ VDD_DIG_FMAX_MAP2(LOWER, 412500000, LOW, 825000000),
+ CLK_INIT(mmpll5_pll.c),
+ },
+};
+DEFINE_EXT_CLK(mmpll5_pll_out, &mmpll5_pll.c);
+
+static struct alpha_pll_clk mmpll6_pll = {
+ .offset = MMSS_MMPLL6_PLL_MODE,
+ .masks = &pll_masks_p,
+ .enable_config = 0x1,
+ .base = &virt_base,
+ .is_fabia = true,
+ .c = {
+ .rate = 720000000,
+ .parent = &mmsscc_xo.c,
+ .dbg_name = "mmpll6_pll",
+ .ops = &clk_ops_fixed_fabia_alpha_pll,
+ VDD_DIG_FMAX_MAP2(LOWER, 360000000, NOMINAL, 720000000),
+ CLK_INIT(mmpll6_pll.c),
+ },
+};
+DEFINE_EXT_CLK(mmpll6_pll_out, &mmpll6_pll.c);
+
+static struct alpha_pll_clk mmpll7_pll = {
+ .offset = MMSS_MMPLL7_PLL_MODE,
+ .masks = &pll_masks_p,
+ .enable_config = 0x1,
+ .base = &virt_base,
+ .is_fabia = true,
+ .c = {
+ .rate = 960000000,
+ .parent = &mmsscc_xo.c,
+ .dbg_name = "mmpll7_pll",
+ .ops = &clk_ops_fixed_fabia_alpha_pll,
+ VDD_DIG_FMAX_MAP2(LOWER, 480000000, NOMINAL, 960000000),
+ CLK_INIT(mmpll7_pll.c),
+ },
+};
+DEFINE_EXT_CLK(mmpll7_pll_out, &mmpll7_pll.c);
+
+static struct alpha_pll_clk mmpll10_pll = {
+ .offset = MMSS_MMPLL10_PLL_MODE,
+ .masks = &pll_masks_p,
+ .enable_config = 0x1,
+ .base = &virt_base,
+ .is_fabia = true,
+ .c = {
+ .rate = 576000000,
+ .parent = &mmsscc_xo.c,
+ .dbg_name = "mmpll10_pll",
+ .ops = &clk_ops_fixed_fabia_alpha_pll,
+ VDD_DIG_FMAX_MAP2(LOWER, 288000000, NOMINAL, 576000000),
+ CLK_INIT(mmpll10_pll.c),
+ },
+};
+DEFINE_EXT_CLK(mmpll10_pll_out, &mmpll10_pll.c);
+
+static struct clk_freq_tbl ftbl_ahb_clk_src[] = {
+ F_MM( 19200000, mmsscc_xo, 1, 0, 0),
+ F_MM( 40000000, mmsscc_gpll0, 15, 0, 0),
+ F_MM( 80800000, mmpll0_pll_out, 10, 0, 0),
+ F_END
+};
+
+static struct rcg_clk ahb_clk_src = {
+ .cmd_rcgr_reg = MMSS_AHB_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_ahb_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "ahb_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP3(LOWER, 19200000, LOW, 40000000,
+ NOMINAL, 80800000),
+ CLK_INIT(ahb_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_csi_clk_src[] = {
+ F_MM( 164571429, mmpll10_pll_out, 3.5, 0, 0),
+ F_MM( 256000000, mmpll4_pll_out, 3, 0, 0),
+ F_MM( 384000000, mmpll4_pll_out, 2, 0, 0),
+ F_MM( 576000000, mmpll10_pll_out, 1, 0, 0),
+ F_END
+};
+
+static struct clk_freq_tbl ftbl_csi_clk_src_vq[] = {
+ F_MM( 164571429, mmpll10_pll_out, 3.5, 0, 0),
+ F_MM( 256000000, mmpll4_pll_out, 3, 0, 0),
+ F_MM( 300000000, mmsscc_gpll0, 2, 0, 0),
+ F_MM( 384000000, mmpll4_pll_out, 2, 0, 0),
+ F_MM( 576000000, mmpll10_pll_out, 1, 0, 0),
+ F_END
+};
+
+static struct rcg_clk csi0_clk_src = {
+ .cmd_rcgr_reg = MMSS_CSI0_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_csi_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "csi0_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP4(LOWER, 164570000, LOW, 256000000,
+ NOMINAL, 384000000, HIGH, 576000000),
+ CLK_INIT(csi0_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_vfe_clk_src[] = {
+ F_MM( 200000000, mmsscc_gpll0, 3, 0, 0),
+ F_MM( 300000000, mmsscc_gpll0, 2, 0, 0),
+ F_MM( 320000000, mmpll7_pll_out, 3, 0, 0),
+ F_MM( 384000000, mmpll4_pll_out, 2, 0, 0),
+ F_MM( 576000000, mmpll10_pll_out, 1, 0, 0),
+ F_MM( 600000000, mmsscc_gpll0, 1, 0, 0),
+ F_END
+};
+
+static struct clk_freq_tbl ftbl_vfe_clk_src_vq[] = {
+ F_MM( 200000000, mmsscc_gpll0, 3, 0, 0),
+ F_MM( 404000000, mmpll0_pll_out, 2, 0, 0),
+ F_MM( 480000000, mmpll7_pll_out, 2, 0, 0),
+ F_MM( 576000000, mmpll10_pll_out, 1, 0, 0),
+ F_MM( 600000000, mmsscc_gpll0, 1, 0, 0),
+ F_END
+};
+
+
+static struct rcg_clk vfe0_clk_src = {
+ .cmd_rcgr_reg = MMSS_VFE0_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_vfe_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "vfe0_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP4(LOWER, 200000000, LOW, 384000000,
+ NOMINAL, 576000000, HIGH, 600000000),
+ CLK_INIT(vfe0_clk_src.c),
+ },
+};
+
+static struct rcg_clk vfe1_clk_src = {
+ .cmd_rcgr_reg = MMSS_VFE1_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_vfe_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "vfe1_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP4(LOWER, 200000000, LOW, 384000000,
+ NOMINAL, 576000000, HIGH, 600000000),
+ CLK_INIT(vfe1_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_mdp_clk_src[] = {
+ F_MM( 85714286, mmsscc_gpll0, 7, 0, 0),
+ F_MM( 100000000, mmsscc_gpll0, 6, 0, 0),
+ F_MM( 150000000, mmsscc_gpll0, 4, 0, 0),
+ F_MM( 171428571, mmsscc_gpll0, 3.5, 0, 0),
+ F_MM( 200000000, mmsscc_gpll0, 3, 0, 0),
+ F_MM( 275000000, mmpll5_pll_out, 3, 0, 0),
+ F_MM( 300000000, mmsscc_gpll0, 2, 0, 0),
+ F_MM( 330000000, mmpll5_pll_out, 2.5, 0, 0),
+ F_MM( 412500000, mmpll5_pll_out, 2, 0, 0),
+ F_END
+};
+
+static struct rcg_clk mdp_clk_src = {
+ .cmd_rcgr_reg = MMSS_MDP_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_mdp_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mdp_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP4(LOWER, 171430000, LOW, 275000000,
+ NOMINAL, 330000000, HIGH, 412500000),
+ CLK_INIT(mdp_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_maxi_clk_src[] = {
+ F_MM( 19200000, mmsscc_xo, 1, 0, 0),
+ F_MM( 75000000, mmsscc_gpll0_div, 4, 0, 0),
+ F_MM( 171428571, mmsscc_gpll0, 3.5, 0, 0),
+ F_MM( 323200000, mmpll0_pll_out, 2.5, 0, 0),
+ F_MM( 406000000, mmpll1_pll_out, 2, 0, 0),
+ F_END
+};
+
+static struct clk_freq_tbl ftbl_maxi_clk_src_vq[] = {
+ F_MM( 19200000, mmsscc_xo, 1, 0, 0),
+ F_MM( 75000000, mmsscc_gpll0_div, 4, 0, 0),
+ F_MM( 171428571, mmsscc_gpll0, 3.5, 0, 0),
+ F_MM( 240000000, mmsscc_gpll0, 2.5, 0, 0),
+ F_MM( 323200000, mmpll0_pll_out, 2.5, 0, 0),
+ F_MM( 406000000, mmpll1_pll_out, 2, 0, 0),
+ F_END
+};
+
+static struct rcg_clk maxi_clk_src = {
+ .cmd_rcgr_reg = MMSS_MAXI_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_maxi_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "maxi_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP4(LOWER, 75000000, LOW, 171428571,
+ NOMINAL, 323200000, HIGH, 406000000),
+ CLK_INIT(maxi_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_cpp_clk_src[] = {
+ F_MM( 100000000, mmsscc_gpll0, 6, 0, 0),
+ F_MM( 200000000, mmsscc_gpll0, 3, 0, 0),
+ F_MM( 576000000, mmpll10_pll_out, 1, 0, 0),
+ F_MM( 600000000, mmsscc_gpll0, 1, 0, 0),
+ F_END
+};
+
+static struct clk_freq_tbl ftbl_cpp_clk_src_vq[] = {
+ F_MM( 100000000, mmsscc_gpll0, 6, 0, 0),
+ F_MM( 200000000, mmsscc_gpll0, 3, 0, 0),
+ F_MM( 480000000, mmpll7_pll_out, 2, 0, 0),
+ F_MM( 576000000, mmpll10_pll_out, 1, 0, 0),
+ F_MM( 600000000, mmsscc_gpll0, 1, 0, 0),
+ F_END
+};
+
+static struct rcg_clk cpp_clk_src = {
+ .cmd_rcgr_reg = MMSS_CPP_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_cpp_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "cpp_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP4(LOWER, 100000000, LOW, 200000000,
+ NOMINAL, 576000000, HIGH, 600000000),
+ CLK_INIT(cpp_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_jpeg0_clk_src[] = {
+ F_MM( 75000000, mmsscc_gpll0, 8, 0, 0),
+ F_MM( 150000000, mmsscc_gpll0, 4, 0, 0),
+ F_MM( 480000000, mmpll7_pll_out, 2, 0, 0),
+ F_END
+};
+
+static struct clk_freq_tbl ftbl_jpeg0_clk_src_vq[] = {
+ F_MM( 75000000, mmsscc_gpll0, 8, 0, 0),
+ F_MM( 150000000, mmsscc_gpll0, 4, 0, 0),
+ F_MM( 320000000, mmpll7_pll_out, 3, 0, 0),
+ F_MM( 480000000, mmpll7_pll_out, 2, 0, 0),
+ F_END
+};
+
+static struct rcg_clk jpeg0_clk_src = {
+ .cmd_rcgr_reg = MMSS_JPEG0_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_jpeg0_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "jpeg0_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP3(LOWER, 75000000, LOW, 150000000,
+ NOMINAL, 480000000),
+ CLK_INIT(jpeg0_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_rot_clk_src[] = {
+ F_MM( 171428571, mmsscc_gpll0, 3.5, 0, 0),
+ F_MM( 275000000, mmpll5_pll_out, 3, 0, 0),
+ F_MM( 330000000, mmpll5_pll_out, 2.5, 0, 0),
+ F_MM( 412500000, mmpll5_pll_out, 2, 0, 0),
+ F_END
+};
+
+static struct rcg_clk rot_clk_src = {
+ .cmd_rcgr_reg = MMSS_ROT_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_rot_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "rot_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP4(LOWER, 171430000, LOW, 275000000,
+ NOMINAL, 330000000, HIGH, 412500000),
+ CLK_INIT(rot_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_video_core_clk_src[] = {
+ F_MM( 100000000, mmsscc_gpll0, 6, 0, 0),
+ F_MM( 186000000, mmpll3_pll_out, 5, 0, 0),
+ F_MM( 360000000, mmpll6_pll_out, 2, 0, 0),
+ F_MM( 465000000, mmpll3_pll_out, 2, 0, 0),
+ F_END
+};
+
+static struct clk_freq_tbl ftbl_video_core_clk_src_vq[] = {
+ F_MM( 100000000, mmsscc_gpll0, 6, 0, 0),
+ F_MM( 200000000, mmsscc_gpll0, 3, 0, 0),
+ F_MM( 269330000, mmpll0_pll_out, 3, 0, 0),
+ F_MM( 404000000, mmpll0_pll_out, 2, 0, 0),
+ F_MM( 444000000, mmpll6_pll_out, 2, 0, 0),
+ F_MM( 533000000, mmpll3_pll_out, 2, 0, 0),
+ F_END
+};
+
+static struct rcg_clk video_core_clk_src = {
+ .cmd_rcgr_reg = MMSS_VIDEO_CORE_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_video_core_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "video_core_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP4(LOWER, 100000000, LOW, 186000000,
+ NOMINAL, 360000000, HIGH, 465000000),
+ CLK_INIT(video_core_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_csiphy_clk_src[] = {
+ F_MM( 164570000, mmpll10_pll_out, 3.5, 0, 0),
+ F_MM( 256000000, mmpll4_pll_out, 3, 0, 0),
+ F_MM( 384000000, mmpll4_pll_out, 2, 0, 0),
+ F_END
+};
+
+static struct clk_freq_tbl ftbl_csiphy_clk_src_vq[] = {
+ F_MM( 164570000, mmpll10_pll_out, 3.5, 0, 0),
+ F_MM( 256000000, mmpll4_pll_out, 3, 0, 0),
+ F_MM( 300000000, mmsscc_gpll0, 2, 0, 0),
+ F_MM( 384000000, mmpll4_pll_out, 2, 0, 0),
+ F_END
+};
+
+static struct rcg_clk csiphy_clk_src = {
+ .cmd_rcgr_reg = MMSS_CSIPHY_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_csiphy_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "csiphy_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP3(LOWER, 164570000, LOW, 256000000,
+ NOMINAL, 384000000),
+ CLK_INIT(csiphy_clk_src.c),
+ },
+};
+
+static struct rcg_clk csi1_clk_src = {
+ .cmd_rcgr_reg = MMSS_CSI1_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_csi_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "csi1_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP4(LOWER, 164570000, LOW, 256000000,
+ NOMINAL, 384000000, HIGH, 576000000),
+ CLK_INIT(csi1_clk_src.c),
+ },
+};
+
+static struct rcg_clk csi2_clk_src = {
+ .cmd_rcgr_reg = MMSS_CSI2_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_csi_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "csi2_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP4(LOWER, 164570000, LOW, 256000000,
+ NOMINAL, 384000000, HIGH, 576000000),
+ CLK_INIT(csi2_clk_src.c),
+ },
+};
+
+static struct rcg_clk csi3_clk_src = {
+ .cmd_rcgr_reg = MMSS_CSI3_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_csi_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "csi3_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP4(LOWER, 164570000, LOW, 256000000,
+ NOMINAL, 384000000, HIGH, 576000000),
+ CLK_INIT(csi3_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_fd_core_clk_src[] = {
+ F_MM( 100000000, mmsscc_gpll0, 6, 0, 0),
+ F_MM( 200000000, mmsscc_gpll0, 3, 0, 0),
+ F_MM( 576000000, mmpll10_pll_out, 1, 0, 0),
+ F_END
+};
+
+static struct clk_freq_tbl ftbl_fd_core_clk_src_v2[] = {
+ F_MM( 100000000, mmsscc_gpll0, 6, 0, 0),
+ F_MM( 404000000, mmpll0_pll_out, 2, 0, 0),
+ F_MM( 480000000, mmpll7_pll_out, 2, 0, 0),
+ F_MM( 576000000, mmpll10_pll_out, 1, 0, 0),
+ F_END
+};
+
+static struct clk_freq_tbl ftbl_fd_core_clk_src_vq[] = {
+ F_MM( 100000000, mmsscc_gpll0, 6, 0, 0),
+ F_MM( 200000000, mmsscc_gpll0, 3, 0, 0),
+ F_MM( 400000000, mmsscc_gpll0, 1.5, 0, 0),
+ F_MM( 576000000, mmpll10_pll_out, 1, 0, 0),
+ F_END
+};
+
+static struct rcg_clk fd_core_clk_src = {
+ .cmd_rcgr_reg = MMSS_FD_CORE_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_fd_core_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "fd_core_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP3(LOWER, 100000000, LOW, 200000000,
+ NOMINAL, 576000000),
+ CLK_INIT(fd_core_clk_src.c),
+ },
+};
+
+DEFINE_EXT_CLK(ext_byte0_clk_src, NULL);
+DEFINE_EXT_CLK(ext_byte1_clk_src, NULL);
+static struct clk_freq_tbl ftbl_byte_clk_src[] = {
+ {
+ .div_src_val = BVAL(10, 8, mmsscc_xo_mm_source_val)
+ | BVAL(4, 0, 0),
+ .src_clk = &mmsscc_xo.c,
+ .freq_hz = 0,
+ },
+ {
+ .div_src_val = BVAL(10, 8, dsi0phypll_mm_source_val),
+ .src_clk = &ext_byte0_clk_src.c,
+ .freq_hz = 0,
+ },
+ {
+ .div_src_val = BVAL(10, 8, dsi1phypll_mm_source_val)
+ | BVAL(4, 0, 0),
+ .src_clk = &ext_byte1_clk_src.c,
+ .freq_hz = 0,
+ },
+ F_END
+};
+
+static struct rcg_clk byte0_clk_src = {
+ .cmd_rcgr_reg = MMSS_BYTE0_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .current_freq = ftbl_byte_clk_src,
+ .freq_tbl = ftbl_byte_clk_src,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "byte0_clk_src",
+ .parent = &ext_byte0_clk_src.c,
+ .ops = &clk_ops_byte_multiparent,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ VDD_DIG_FMAX_MAP3(LOWER, 150000000, LOW, 240000000,
+ NOMINAL, 357140000),
+ CLK_INIT(byte0_clk_src.c),
+ },
+};
+
+static struct rcg_clk byte1_clk_src = {
+ .cmd_rcgr_reg = MMSS_BYTE1_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .current_freq = ftbl_byte_clk_src,
+ .freq_tbl = ftbl_byte_clk_src,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "byte1_clk_src",
+ .parent = &ext_byte1_clk_src.c,
+ .ops = &clk_ops_byte_multiparent,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ VDD_DIG_FMAX_MAP3(LOWER, 150000000, LOW, 240000000,
+ NOMINAL, 357140000),
+ CLK_INIT(byte1_clk_src.c),
+ },
+};
+
+DEFINE_EXT_CLK(ext_pclk0_clk_src, NULL);
+DEFINE_EXT_CLK(ext_pclk1_clk_src, NULL);
+static struct clk_freq_tbl ftbl_pclk_clk_src[] = {
+ {
+ .div_src_val = BVAL(10, 8, mmsscc_xo_mm_source_val)
+ | BVAL(4, 0, 0),
+ .src_clk = &mmsscc_xo.c,
+ .freq_hz = 0,
+ },
+ {
+ .div_src_val = BVAL(10, 8, dsi0phypll_mm_source_val)
+ | BVAL(4, 0, 0),
+ .src_clk = &ext_pclk0_clk_src.c,
+ .freq_hz = 0,
+ },
+ {
+ .div_src_val = BVAL(10, 8, dsi1phypll_mm_source_val)
+ | BVAL(4, 0, 0),
+ .src_clk = &ext_pclk1_clk_src.c,
+ .freq_hz = 0,
+ },
+ F_END
+};
+
+static struct rcg_clk pclk0_clk_src = {
+ .cmd_rcgr_reg = MMSS_PCLK0_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .current_freq = ftbl_pclk_clk_src,
+ .freq_tbl = ftbl_pclk_clk_src,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "pclk0_clk_src",
+ .parent = &ext_pclk0_clk_src.c,
+ .ops = &clk_ops_pixel_multiparent,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ VDD_DIG_FMAX_MAP3(LOWER, 184000000, LOW, 295000000,
+ NOMINAL, 610000000),
+ CLK_INIT(pclk0_clk_src.c),
+ },
+};
+
+static struct rcg_clk pclk1_clk_src = {
+ .cmd_rcgr_reg = MMSS_PCLK1_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .current_freq = ftbl_pclk_clk_src,
+ .freq_tbl = ftbl_pclk_clk_src,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "pclk1_clk_src",
+ .parent = &ext_pclk1_clk_src.c,
+ .ops = &clk_ops_pixel_multiparent,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ VDD_DIG_FMAX_MAP3(LOWER, 184000000, LOW, 295000000,
+ NOMINAL, 610000000),
+ CLK_INIT(pclk1_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_video_subcore_clk_src[] = {
+ F_MM( 100000000, mmsscc_gpll0, 6, 0, 0),
+ F_MM( 186000000, mmpll3_pll_out, 5, 0, 0),
+ F_MM( 360000000, mmpll6_pll_out, 2, 0, 0),
+ F_MM( 465000000, mmpll3_pll_out, 2, 0, 0),
+ F_END
+};
+
+static struct clk_freq_tbl ftbl_video_subcore_clk_src_vq[] = {
+ F_MM( 100000000, mmsscc_gpll0, 6, 0, 0),
+ F_MM( 200000000, mmsscc_gpll0, 3, 0, 0),
+ F_MM( 269330000, mmpll0_pll_out, 3, 0, 0),
+ F_MM( 404000000, mmpll0_pll_out, 2, 0, 0),
+ F_MM( 444000000, mmpll6_pll_out, 2, 0, 0),
+ F_MM( 533000000, mmpll3_pll_out, 2, 0, 0),
+ F_END
+};
+
+static struct rcg_clk video_subcore0_clk_src = {
+ .cmd_rcgr_reg = MMSS_VIDEO_SUBCORE0_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_video_subcore_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .non_local_control_timeout = 1000,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "video_subcore0_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP4(LOWER, 100000000, LOW, 186000000,
+ NOMINAL, 360000000, HIGH, 465000000),
+ CLK_INIT(video_subcore0_clk_src.c),
+ },
+};
+
+static struct rcg_clk video_subcore1_clk_src = {
+ .cmd_rcgr_reg = MMSS_VIDEO_SUBCORE1_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_video_subcore_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .non_local_control_timeout = 1000,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "video_subcore1_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP4(LOWER, 100000000, LOW, 186000000,
+ NOMINAL, 360000000, HIGH, 465000000),
+ CLK_INIT(video_subcore1_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_cci_clk_src[] = {
+ F_MM( 37500000, mmsscc_gpll0, 16, 0, 0),
+ F_MM( 50000000, mmsscc_gpll0, 12, 0, 0),
+ F_MM( 100000000, mmsscc_gpll0, 6, 0, 0),
+ F_END
+};
+
+static struct rcg_clk cci_clk_src = {
+ .cmd_rcgr_reg = MMSS_CCI_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_cci_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "cci_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 37500000, LOW, 50000000,
+ NOMINAL, 100000000),
+ CLK_INIT(cci_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_camss_gp_clk_src[] = {
+ F_MM( 10000, mmsscc_xo, 16, 1, 120),
+ F_MM( 24000, mmsscc_xo, 16, 1, 50),
+ F_MM( 6000000, mmsscc_gpll0, 10, 1, 10),
+ F_MM( 12000000, mmsscc_gpll0, 10, 1, 5),
+ F_MM( 13000000, mmsscc_gpll0, 4, 13, 150),
+ F_MM( 24000000, mmsscc_gpll0, 5, 1, 5),
+ F_END
+};
+
+static struct rcg_clk camss_gp0_clk_src = {
+ .cmd_rcgr_reg = MMSS_CAMSS_GP0_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_camss_gp_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "camss_gp0_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 50000000, LOW, 100000000,
+ NOMINAL, 200000000),
+ CLK_INIT(camss_gp0_clk_src.c),
+ },
+};
+
+static struct rcg_clk camss_gp1_clk_src = {
+ .cmd_rcgr_reg = MMSS_CAMSS_GP1_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_camss_gp_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "camss_gp1_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 50000000, LOW, 100000000,
+ NOMINAL, 200000000),
+ CLK_INIT(camss_gp1_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_mclk_clk_src[] = {
+ F_MM( 4800000, mmsscc_xo, 4, 0, 0),
+ F_MM( 6000000, mmsscc_gpll0_div, 10, 1, 5),
+ F_MM( 8000000, mmsscc_gpll0_div, 1, 2, 75),
+ F_MM( 9600000, mmsscc_xo, 2, 0, 0),
+ F_MM( 16666667, mmsscc_gpll0_div, 2, 1, 9),
+ F_MM( 19200000, mmsscc_xo, 1, 0, 0),
+ F_MM( 24000000, mmsscc_gpll0_div, 1, 2, 25),
+ F_MM( 33333333, mmsscc_gpll0_div, 1, 1, 9),
+ F_MM( 48000000, mmsscc_gpll0, 1, 2, 25),
+ F_MM( 66666667, mmsscc_gpll0, 1, 1, 9),
+ F_END
+};
+
+static struct rcg_clk mclk0_clk_src = {
+ .cmd_rcgr_reg = MMSS_MCLK0_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_mclk_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mclk0_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 33333333, LOW, 66666667,
+ NOMINAL, 68571429),
+ CLK_INIT(mclk0_clk_src.c),
+ },
+};
+
+static struct rcg_clk mclk1_clk_src = {
+ .cmd_rcgr_reg = MMSS_MCLK1_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_mclk_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mclk1_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 33333333, LOW, 66666667,
+ NOMINAL, 68571429),
+ CLK_INIT(mclk1_clk_src.c),
+ },
+};
+
+static struct rcg_clk mclk2_clk_src = {
+ .cmd_rcgr_reg = MMSS_MCLK2_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_mclk_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mclk2_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 33333333, LOW, 66666667,
+ NOMINAL, 68571429),
+ CLK_INIT(mclk2_clk_src.c),
+ },
+};
+
+static struct rcg_clk mclk3_clk_src = {
+ .cmd_rcgr_reg = MMSS_MCLK3_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_mclk_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mclk3_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 33333333, LOW, 66666667,
+ NOMINAL, 68571429),
+ CLK_INIT(mclk3_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_csiphytimer_clk_src[] = {
+ F_MM( 200000000, mmsscc_gpll0, 3, 0, 0),
+ F_MM( 269333333, mmpll0_pll_out, 3, 0, 0),
+ F_END
+};
+
+static struct rcg_clk csi0phytimer_clk_src = {
+ .cmd_rcgr_reg = MMSS_CSI0PHYTIMER_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_csiphytimer_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "csi0phytimer_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP3(LOWER, 100000000, LOW, 200000000,
+ NOMINAL, 269333333),
+ CLK_INIT(csi0phytimer_clk_src.c),
+ },
+};
+
+static struct rcg_clk csi1phytimer_clk_src = {
+ .cmd_rcgr_reg = MMSS_CSI1PHYTIMER_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_csiphytimer_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "csi1phytimer_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP3(LOWER, 100000000, LOW, 200000000,
+ NOMINAL, 269333333),
+ CLK_INIT(csi1phytimer_clk_src.c),
+ },
+};
+
+static struct rcg_clk csi2phytimer_clk_src = {
+ .cmd_rcgr_reg = MMSS_CSI2PHYTIMER_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_csiphytimer_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "csi2phytimer_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP3(LOWER, 100000000, LOW, 200000000,
+ NOMINAL, 269333333),
+ CLK_INIT(csi2phytimer_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_dp_gtc_clk_src[] = {
+ F_MM( 300000000, mmsscc_gpll0, 2, 0, 0),
+ F_END
+};
+
+static struct rcg_clk dp_gtc_clk_src = {
+ .cmd_rcgr_reg = MMSS_DP_GTC_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_dp_gtc_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "dp_gtc_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP2(LOWER, 40000000, LOW, 300000000),
+ CLK_INIT(dp_gtc_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_esc_clk_src[] = {
+ F_MM( 19200000, mmsscc_xo, 1, 0, 0),
+ F_END
+};
+
+static struct rcg_clk esc0_clk_src = {
+ .cmd_rcgr_reg = MMSS_ESC0_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_esc_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "esc0_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP2(LOWER, 19200000, NOMINAL, 19200000),
+ CLK_INIT(esc0_clk_src.c),
+ },
+};
+
+static struct rcg_clk esc1_clk_src = {
+ .cmd_rcgr_reg = MMSS_ESC1_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_esc_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "esc1_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP2(LOWER, 19200000, NOMINAL, 19200000),
+ CLK_INIT(esc1_clk_src.c),
+ },
+};
+
+DEFINE_EXT_CLK(ext_extpclk_clk_src, NULL);
+static struct clk_freq_tbl ftbl_extpclk_clk_src[] = {
+ {
+ .div_src_val = BVAL(10, 8, hdmiphypll_mm_source_val),
+ .src_clk = &ext_extpclk_clk_src.c,
+ },
+ F_END
+};
+
+static struct rcg_clk extpclk_clk_src = {
+ .cmd_rcgr_reg = MMSS_EXTPCLK_CMD_RCGR,
+ .current_freq = ftbl_extpclk_clk_src,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "extpclk_clk_src",
+ .parent = &ext_extpclk_clk_src.c,
+ .ops = &clk_ops_byte,
+ VDD_DIG_FMAX_MAP3(LOWER, 150000000, LOW, 300000000,
+ NOMINAL, 600000000),
+ CLK_INIT(extpclk_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_hdmi_clk_src[] = {
+ F_MM( 19200000, mmsscc_xo, 1, 0, 0),
+ F_END
+};
+
+static struct rcg_clk hdmi_clk_src = {
+ .cmd_rcgr_reg = MMSS_HDMI_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_hdmi_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "hdmi_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP2(LOWER, 19200000, NOMINAL, 19200000),
+ CLK_INIT(hdmi_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_vsync_clk_src[] = {
+ F_MM( 19200000, mmsscc_xo, 1, 0, 0),
+ F_END
+};
+
+static struct rcg_clk vsync_clk_src = {
+ .cmd_rcgr_reg = MMSS_VSYNC_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_vsync_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "vsync_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP2(LOWER, 19200000, NOMINAL, 19200000),
+ CLK_INIT(vsync_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_dp_aux_clk_src[] = {
+ F_MM( 19200000, mmsscc_xo, 1, 0, 0),
+ F_END
+};
+
+static struct rcg_clk dp_aux_clk_src = {
+ .cmd_rcgr_reg = MMSS_DP_AUX_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_dp_aux_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "dp_aux_clk_src",
+ .ops = &clk_ops_rcg,
+ VDD_DIG_FMAX_MAP2(LOWER, 19200000, NOMINAL, 19200000),
+ CLK_INIT(dp_aux_clk_src.c),
+ },
+};
+
+static struct clk_freq_tbl ftbl_dp_link_clk_src[] = {
+ F_SLEW( 162000000, 324000000, ext_dp_phy_pll_link, 2, 0, 0),
+ F_SLEW( 270000000, 540000000, ext_dp_phy_pll_link, 2, 0, 0),
+ F_SLEW( 540000000, 1080000000, ext_dp_phy_pll_link, 2, 0, 0),
+ F_END
+};
+
+static struct rcg_clk dp_link_clk_src = {
+ .cmd_rcgr_reg = MMSS_DP_LINK_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_dp_link_clk_src,
+ .current_freq = ftbl_dp_link_clk_src,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "dp_link_clk_src",
+ .ops = &clk_ops_rcg,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ VDD_DIG_FMAX_MAP3(LOWER, 162000000, LOW, 270000000,
+ NOMINAL, 540000000),
+ CLK_INIT(dp_link_clk_src.c),
+ },
+};
+
+/*
+ * Current understanding is that the DP PLL is going to be configured by using
+ * the set_rate ops for the dp_link_clk_src and dp_pixel_clk_src. When set_rate
+ * is called on this RCG, the rate call never makes it to the external DP
+ * clocks.
+ */
+static struct clk_freq_tbl ftbl_dp_crypto_clk_src[] = {
+ F_MM( 101250000, ext_dp_phy_pll_link, 1, 5, 16),
+ F_MM( 168750000, ext_dp_phy_pll_link, 1, 5, 16),
+ F_MM( 337500000, ext_dp_phy_pll_link, 1, 5, 16),
+ F_END
+};
+
+static struct rcg_clk dp_crypto_clk_src = {
+ .cmd_rcgr_reg = MMSS_DP_CRYPTO_CMD_RCGR,
+ .set_rate = set_rate_mnd,
+ .freq_tbl = ftbl_dp_crypto_clk_src,
+ .current_freq = ftbl_dp_crypto_clk_src,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "dp_crypto_clk_src",
+ .ops = &clk_ops_rcg_mnd,
+ VDD_DIG_FMAX_MAP3(LOWER, 101250000, LOW, 168750000,
+ NOMINAL, 337500000),
+ CLK_INIT(dp_crypto_clk_src.c),
+ },
+};
+
+static struct branch_clk mmss_bimc_smmu_ahb_clk = {
+ .cbcr_reg = MMSS_BIMC_SMMU_AHB_CBCR,
+ .has_sibling = 1,
+ .check_enable_bit = true,
+ .no_halt_check_on_disable = true,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_bimc_smmu_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_bimc_smmu_ahb_clk.c),
+ },
+};
+
+static struct branch_clk mmss_bimc_smmu_axi_clk = {
+ .cbcr_reg = MMSS_BIMC_SMMU_AXI_CBCR,
+ .has_sibling = 1,
+ .check_enable_bit = true,
+ .no_halt_check_on_disable = true,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_bimc_smmu_axi_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_bimc_smmu_axi_clk.c),
+ },
+};
+
+static struct branch_clk mmss_snoc_dvm_axi_clk = {
+ .cbcr_reg = MMSS_SNOC_DVM_AXI_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_snoc_dvm_axi_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_snoc_dvm_axi_clk.c),
+ },
+};
+
+static struct branch_clk mmss_camss_ahb_clk = {
+ .cbcr_reg = MMSS_CAMSS_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_camss_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_camss_ahb_clk.c),
+ },
+};
+
+static struct branch_clk mmss_camss_cci_ahb_clk = {
+ .cbcr_reg = MMSS_CAMSS_CCI_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_camss_cci_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_camss_cci_ahb_clk.c),
+ },
+};
+
+static struct branch_clk mmss_camss_cci_clk = {
+ .cbcr_reg = MMSS_CAMSS_CCI_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_camss_cci_clk",
+ .parent = &cci_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_camss_cci_clk.c),
+ },
+};
+
+static struct branch_clk mmss_camss_cpp_ahb_clk = {
+ .cbcr_reg = MMSS_CAMSS_CPP_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_camss_cpp_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_camss_cpp_ahb_clk.c),
+ },
+};
+
+static struct branch_clk mmss_camss_cpp_clk = {
+ .cbcr_reg = MMSS_CAMSS_CPP_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_camss_cpp_clk",
+ .parent = &cpp_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_camss_cpp_clk.c),
+ },
+};
+
+static struct branch_clk mmss_camss_cpp_axi_clk = {
+ .cbcr_reg = MMSS_CAMSS_CPP_AXI_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_camss_cpp_axi_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_camss_cpp_axi_clk.c),
+ },
+};
+
+static struct branch_clk mmss_camss_cpp_vbif_ahb_clk = {
+ .cbcr_reg = MMSS_CAMSS_CPP_VBIF_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_camss_cpp_vbif_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_camss_cpp_vbif_ahb_clk.c),
+ },
+};
+
+static struct branch_clk mmss_camss_cphy_csid0_clk = {
+ .cbcr_reg = MMSS_CAMSS_CPHY_CSID0_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_camss_cphy_csid0_clk",
+ .parent = &csiphy_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_camss_cphy_csid0_clk.c),
+ },
+};
+
+static struct branch_clk mmss_camss_csi0_ahb_clk = {
+ .cbcr_reg = MMSS_CAMSS_CSI0_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_camss_csi0_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_camss_csi0_ahb_clk.c),
+ },
+};
+
+static struct branch_clk mmss_camss_csi0_clk = {
+ .cbcr_reg = MMSS_CAMSS_CSI0_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_camss_csi0_clk",
+ .parent = &csi0_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_camss_csi0_clk.c),
+ },
+};
+
+static struct branch_clk mmss_camss_csi0pix_clk = {
+ .cbcr_reg = MMSS_CAMSS_CSI0PIX_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_camss_csi0pix_clk",
+ .parent = &csi0_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_camss_csi0pix_clk.c),
+ },
+};
+
+static struct branch_clk mmss_camss_csi0rdi_clk = {
+ .cbcr_reg = MMSS_CAMSS_CSI0RDI_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_camss_csi0rdi_clk",
+ .parent = &csi0_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_camss_csi0rdi_clk.c),
+ },
+};
+
+static struct branch_clk mmss_camss_cphy_csid1_clk = {
+ .cbcr_reg = MMSS_CAMSS_CPHY_CSID1_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_camss_cphy_csid1_clk",
+ .parent = &csiphy_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_camss_cphy_csid1_clk.c),
+ },
+};
+
+static struct branch_clk mmss_camss_csi1_ahb_clk = {
+ .cbcr_reg = MMSS_CAMSS_CSI1_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_camss_csi1_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_camss_csi1_ahb_clk.c),
+ },
+};
+
+static struct branch_clk mmss_camss_csi1_clk = {
+ .cbcr_reg = MMSS_CAMSS_CSI1_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_camss_csi1_clk",
+ .parent = &csi1_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_camss_csi1_clk.c),
+ },
+};
+
+static struct branch_clk mmss_camss_csi1pix_clk = {
+ .cbcr_reg = MMSS_CAMSS_CSI1PIX_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_camss_csi1pix_clk",
+ .parent = &csi1_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_camss_csi1pix_clk.c),
+ },
+};
+
+static struct branch_clk mmss_camss_csi1rdi_clk = {
+ .cbcr_reg = MMSS_CAMSS_CSI1RDI_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_camss_csi1rdi_clk",
+ .parent = &csi1_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_camss_csi1rdi_clk.c),
+ },
+};
+
+static struct branch_clk mmss_camss_cphy_csid2_clk = {
+ .cbcr_reg = MMSS_CAMSS_CPHY_CSID2_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_camss_cphy_csid2_clk",
+ .parent = &csiphy_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_camss_cphy_csid2_clk.c),
+ },
+};
+
+static struct branch_clk mmss_camss_csi2_ahb_clk = {
+ .cbcr_reg = MMSS_CAMSS_CSI2_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_camss_csi2_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_camss_csi2_ahb_clk.c),
+ },
+};
+
+static struct branch_clk mmss_camss_csi2_clk = {
+ .cbcr_reg = MMSS_CAMSS_CSI2_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_camss_csi2_clk",
+ .parent = &csi2_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_camss_csi2_clk.c),
+ },
+};
+
+static struct branch_clk mmss_camss_csi2pix_clk = {
+ .cbcr_reg = MMSS_CAMSS_CSI2PIX_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_camss_csi2pix_clk",
+ .parent = &csi2_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_camss_csi2pix_clk.c),
+ },
+};
+
+static struct branch_clk mmss_camss_csi2rdi_clk = {
+ .cbcr_reg = MMSS_CAMSS_CSI2RDI_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_camss_csi2rdi_clk",
+ .parent = &csi2_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_camss_csi2rdi_clk.c),
+ },
+};
+
+static struct branch_clk mmss_camss_cphy_csid3_clk = {
+ .cbcr_reg = MMSS_CAMSS_CPHY_CSID3_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_camss_cphy_csid3_clk",
+ .parent = &csiphy_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_camss_cphy_csid3_clk.c),
+ },
+};
+
+static struct branch_clk mmss_camss_csi3_ahb_clk = {
+ .cbcr_reg = MMSS_CAMSS_CSI3_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_camss_csi3_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_camss_csi3_ahb_clk.c),
+ },
+};
+
+static struct branch_clk mmss_camss_csi3_clk = {
+ .cbcr_reg = MMSS_CAMSS_CSI3_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_camss_csi3_clk",
+ .parent = &csi3_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_camss_csi3_clk.c),
+ },
+};
+
+static struct branch_clk mmss_camss_csi3pix_clk = {
+ .cbcr_reg = MMSS_CAMSS_CSI3PIX_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_camss_csi3pix_clk",
+ .parent = &csi3_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_camss_csi3pix_clk.c),
+ },
+};
+
+static struct branch_clk mmss_camss_csi3rdi_clk = {
+ .cbcr_reg = MMSS_CAMSS_CSI3RDI_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_camss_csi3rdi_clk",
+ .parent = &csi3_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_camss_csi3rdi_clk.c),
+ },
+};
+
+static struct branch_clk mmss_camss_csi_vfe0_clk = {
+ .cbcr_reg = MMSS_CAMSS_CSI_VFE0_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_camss_csi_vfe0_clk",
+ .parent = &vfe0_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_camss_csi_vfe0_clk.c),
+ },
+};
+
+static struct branch_clk mmss_camss_csi_vfe1_clk = {
+ .cbcr_reg = MMSS_CAMSS_CSI_VFE1_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_camss_csi_vfe1_clk",
+ .parent = &vfe1_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_camss_csi_vfe1_clk.c),
+ },
+};
+
+static struct branch_clk mmss_camss_csiphy0_clk = {
+ .cbcr_reg = MMSS_CAMSS_CSIPHY0_CBCR,
+ .has_sibling = 0,
+ .aggr_sibling_rates = true,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_camss_csiphy0_clk",
+ .parent = &csiphy_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_camss_csiphy0_clk.c),
+ },
+};
+
+static struct branch_clk mmss_camss_csiphy1_clk = {
+ .cbcr_reg = MMSS_CAMSS_CSIPHY1_CBCR,
+ .has_sibling = 0,
+ .aggr_sibling_rates = true,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_camss_csiphy1_clk",
+ .parent = &csiphy_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_camss_csiphy1_clk.c),
+ },
+};
+
+static struct branch_clk mmss_camss_csiphy2_clk = {
+ .cbcr_reg = MMSS_CAMSS_CSIPHY2_CBCR,
+ .has_sibling = 0,
+ .aggr_sibling_rates = true,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_camss_csiphy2_clk",
+ .parent = &csiphy_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_camss_csiphy2_clk.c),
+ },
+};
+
+static struct branch_clk mmss_fd_ahb_clk = {
+ .cbcr_reg = MMSS_FD_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_fd_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_fd_ahb_clk.c),
+ },
+};
+
+static struct branch_clk mmss_fd_core_clk = {
+ .cbcr_reg = MMSS_FD_CORE_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_fd_core_clk",
+ .parent = &fd_core_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_fd_core_clk.c),
+ },
+};
+
+static struct branch_clk mmss_fd_core_uar_clk = {
+ .cbcr_reg = MMSS_FD_CORE_UAR_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_fd_core_uar_clk",
+ .parent = &fd_core_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_fd_core_uar_clk.c),
+ },
+};
+
+static struct branch_clk mmss_camss_gp0_clk = {
+ .cbcr_reg = MMSS_CAMSS_GP0_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_camss_gp0_clk",
+ .parent = &camss_gp0_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_camss_gp0_clk.c),
+ },
+};
+
+static struct branch_clk mmss_camss_gp1_clk = {
+ .cbcr_reg = MMSS_CAMSS_GP1_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_camss_gp1_clk",
+ .parent = &camss_gp1_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_camss_gp1_clk.c),
+ },
+};
+
+static struct branch_clk mmss_camss_ispif_ahb_clk = {
+ .cbcr_reg = MMSS_CAMSS_ISPIF_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_camss_ispif_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_camss_ispif_ahb_clk.c),
+ },
+};
+
+static struct branch_clk mmss_camss_jpeg0_clk = {
+ .cbcr_reg = MMSS_CAMSS_JPEG0_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_camss_jpeg0_clk",
+ .parent = &jpeg0_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_camss_jpeg0_clk.c),
+ },
+};
+
+static struct branch_clk mmss_camss_jpeg_ahb_clk = {
+ .cbcr_reg = MMSS_CAMSS_JPEG_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_camss_jpeg_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_camss_jpeg_ahb_clk.c),
+ },
+};
+
+static struct branch_clk mmss_camss_jpeg_axi_clk = {
+ .cbcr_reg = MMSS_CAMSS_JPEG_AXI_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_camss_jpeg_axi_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_camss_jpeg_axi_clk.c),
+ },
+};
+
+static struct branch_clk mmss_camss_mclk0_clk = {
+ .cbcr_reg = MMSS_CAMSS_MCLK0_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_camss_mclk0_clk",
+ .parent = &mclk0_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_camss_mclk0_clk.c),
+ },
+};
+
+static struct branch_clk mmss_camss_mclk1_clk = {
+ .cbcr_reg = MMSS_CAMSS_MCLK1_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_camss_mclk1_clk",
+ .parent = &mclk1_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_camss_mclk1_clk.c),
+ },
+};
+
+static struct branch_clk mmss_camss_mclk2_clk = {
+ .cbcr_reg = MMSS_CAMSS_MCLK2_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_camss_mclk2_clk",
+ .parent = &mclk2_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_camss_mclk2_clk.c),
+ },
+};
+
+static struct branch_clk mmss_camss_mclk3_clk = {
+ .cbcr_reg = MMSS_CAMSS_MCLK3_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_camss_mclk3_clk",
+ .parent = &mclk3_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_camss_mclk3_clk.c),
+ },
+};
+
+static struct branch_clk mmss_camss_micro_ahb_clk = {
+ .cbcr_reg = MMSS_CAMSS_MICRO_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_camss_micro_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_camss_micro_ahb_clk.c),
+ },
+};
+
+static struct branch_clk mmss_camss_csi0phytimer_clk = {
+ .cbcr_reg = MMSS_CAMSS_CSI0PHYTIMER_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_camss_csi0phytimer_clk",
+ .parent = &csi0phytimer_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_camss_csi0phytimer_clk.c),
+ },
+};
+
+static struct branch_clk mmss_camss_csi1phytimer_clk = {
+ .cbcr_reg = MMSS_CAMSS_CSI1PHYTIMER_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_camss_csi1phytimer_clk",
+ .parent = &csi1phytimer_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_camss_csi1phytimer_clk.c),
+ },
+};
+
+static struct branch_clk mmss_camss_csi2phytimer_clk = {
+ .cbcr_reg = MMSS_CAMSS_CSI2PHYTIMER_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_camss_csi2phytimer_clk",
+ .parent = &csi2phytimer_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_camss_csi2phytimer_clk.c),
+ },
+};
+
+static struct branch_clk mmss_camss_top_ahb_clk = {
+ .cbcr_reg = MMSS_CAMSS_TOP_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_camss_top_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_camss_top_ahb_clk.c),
+ },
+};
+
+static struct branch_clk mmss_camss_vfe0_ahb_clk = {
+ .cbcr_reg = MMSS_CAMSS_VFE0_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_camss_vfe0_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_camss_vfe0_ahb_clk.c),
+ },
+};
+
+static struct branch_clk mmss_camss_vfe0_clk = {
+ .cbcr_reg = MMSS_CAMSS_VFE0_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_camss_vfe0_clk",
+ .parent = &vfe0_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_camss_vfe0_clk.c),
+ },
+};
+
+static struct branch_clk mmss_camss_vfe0_stream_clk = {
+ .cbcr_reg = MMSS_CAMSS_VFE0_STREAM_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_camss_vfe0_stream_clk",
+ .parent = &vfe0_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_camss_vfe0_stream_clk.c),
+ },
+};
+
+static struct branch_clk mmss_camss_vfe1_ahb_clk = {
+ .cbcr_reg = MMSS_CAMSS_VFE1_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_camss_vfe1_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_camss_vfe1_ahb_clk.c),
+ },
+};
+
+static struct branch_clk mmss_camss_vfe1_clk = {
+ .cbcr_reg = MMSS_CAMSS_VFE1_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_camss_vfe1_clk",
+ .parent = &vfe1_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_camss_vfe1_clk.c),
+ },
+};
+
+static struct branch_clk mmss_camss_vfe1_stream_clk = {
+ .cbcr_reg = MMSS_CAMSS_VFE1_STREAM_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_camss_vfe1_stream_clk",
+ .parent = &vfe1_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_camss_vfe1_stream_clk.c),
+ },
+};
+
+static struct branch_clk mmss_camss_vfe_vbif_ahb_clk = {
+ .cbcr_reg = MMSS_CAMSS_VFE_VBIF_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_camss_vfe_vbif_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_camss_vfe_vbif_ahb_clk.c),
+ },
+};
+
+static struct branch_clk mmss_camss_vfe_vbif_axi_clk = {
+ .cbcr_reg = MMSS_CAMSS_VFE_VBIF_AXI_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_camss_vfe_vbif_axi_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_camss_vfe_vbif_axi_clk.c),
+ },
+};
+
+static struct branch_clk mmss_mdss_ahb_clk = {
+ .cbcr_reg = MMSS_MDSS_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_mdss_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_mdss_ahb_clk.c),
+ },
+};
+
+static struct branch_clk mmss_mdss_axi_clk = {
+ .cbcr_reg = MMSS_MDSS_AXI_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_mdss_axi_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_mdss_axi_clk.c),
+ },
+};
+
+static struct branch_clk mmss_mdss_byte0_clk = {
+ .cbcr_reg = MMSS_MDSS_BYTE0_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_mdss_byte0_clk",
+ .parent = &byte0_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_mdss_byte0_clk.c),
+ },
+};
+
+static struct div_clk mmss_mdss_byte0_intf_div_clk = {
+ .offset = MMSS_MDSS_BYTE0_INTF_DIV,
+ .mask = 0x3,
+ .shift = 0,
+ .data = {
+ .min_div = 1,
+ .max_div = 4,
+ },
+ .base = &virt_base,
+ /*
+ * NOTE: Op does not work for div-3. Current assumption is that div-3
+ * is not a recommended setting for this divider.
+ */
+ .ops = &postdiv_reg_ops,
+ .c = {
+ .dbg_name = "mmss_mdss_byte0_intf_div_clk",
+ .parent = &byte0_clk_src.c,
+ .ops = &clk_ops_slave_div,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(mmss_mdss_byte0_intf_div_clk.c),
+ },
+};
+
+static struct branch_clk mmss_mdss_byte0_intf_clk = {
+ .cbcr_reg = MMSS_MDSS_BYTE0_INTF_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_mdss_byte0_intf_clk",
+ .parent = &mmss_mdss_byte0_intf_div_clk.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_mdss_byte0_intf_clk.c),
+ },
+};
+
+static struct branch_clk mmss_mdss_byte1_clk = {
+ .cbcr_reg = MMSS_MDSS_BYTE1_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_mdss_byte1_clk",
+ .parent = &byte1_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_mdss_byte1_clk.c),
+ },
+};
+
+static struct div_clk mmss_mdss_byte1_intf_div_clk = {
+ .offset = MMSS_MDSS_BYTE1_INTF_DIV,
+ .mask = 0x3,
+ .shift = 0,
+ .data = {
+ .min_div = 1,
+ .max_div = 4,
+ },
+ .base = &virt_base,
+ /*
+ * NOTE: Op does not work for div-3. Current assumption is that div-3
+ * is not a recommended setting for this divider.
+ */
+ .ops = &postdiv_reg_ops,
+ .c = {
+ .dbg_name = "mmss_mdss_byte1_intf_div_clk",
+ .parent = &byte1_clk_src.c,
+ .ops = &clk_ops_slave_div,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(mmss_mdss_byte1_intf_div_clk.c),
+ },
+};
+
+static struct branch_clk mmss_mdss_byte1_intf_clk = {
+ .cbcr_reg = MMSS_MDSS_BYTE1_INTF_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_mdss_byte1_intf_clk",
+ .parent = &mmss_mdss_byte1_intf_div_clk.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_mdss_byte1_intf_clk.c),
+ },
+};
+
+static struct branch_clk mmss_mdss_dp_aux_clk = {
+ .cbcr_reg = MMSS_MDSS_DP_AUX_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_mdss_dp_aux_clk",
+ .parent = &dp_aux_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_mdss_dp_aux_clk.c),
+ },
+};
+
+static struct branch_clk mmss_mdss_dp_link_clk = {
+ .cbcr_reg = MMSS_MDSS_DP_LINK_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_mdss_dp_link_clk",
+ .parent = &dp_link_clk_src.c,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_mdss_dp_link_clk.c),
+ },
+};
+
+/* Reset state of MMSS_MDSS_DP_LINK_INTF_DIV is 0x3 (div-4) */
+static struct branch_clk mmss_mdss_dp_link_intf_clk = {
+ .cbcr_reg = MMSS_MDSS_DP_LINK_INTF_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_mdss_dp_link_intf_clk",
+ .parent = &dp_link_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_mdss_dp_link_intf_clk.c),
+ },
+};
+
+static struct branch_clk mmss_mdss_dp_crypto_clk = {
+ .cbcr_reg = MMSS_MDSS_DP_CRYPTO_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_mdss_dp_crypto_clk",
+ .parent = &dp_crypto_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_mdss_dp_crypto_clk.c),
+ },
+};
+
+static struct branch_clk mmss_mdss_dp_gtc_clk = {
+ .cbcr_reg = MMSS_MDSS_DP_GTC_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_mdss_dp_gtc_clk",
+ .parent = &dp_gtc_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_mdss_dp_gtc_clk.c),
+ },
+};
+
+static struct branch_clk mmss_mdss_esc0_clk = {
+ .cbcr_reg = MMSS_MDSS_ESC0_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_mdss_esc0_clk",
+ .parent = &esc0_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_mdss_esc0_clk.c),
+ },
+};
+
+static struct branch_clk mmss_mdss_esc1_clk = {
+ .cbcr_reg = MMSS_MDSS_ESC1_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_mdss_esc1_clk",
+ .parent = &esc1_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_mdss_esc1_clk.c),
+ },
+};
+
+static struct branch_clk mmss_mdss_extpclk_clk = {
+ .cbcr_reg = MMSS_MDSS_EXTPCLK_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_mdss_extpclk_clk",
+ .parent = &extpclk_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_mdss_extpclk_clk.c),
+ },
+};
+
+static struct branch_clk mmss_mdss_hdmi_clk = {
+ .cbcr_reg = MMSS_MDSS_HDMI_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_mdss_hdmi_clk",
+ .parent = &hdmi_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_mdss_hdmi_clk.c),
+ },
+};
+
+static struct branch_clk mmss_mdss_hdmi_dp_ahb_clk = {
+ .cbcr_reg = MMSS_MDSS_HDMI_DP_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_mdss_hdmi_dp_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_mdss_hdmi_dp_ahb_clk.c),
+ },
+};
+
+static struct branch_clk mmss_mdss_mdp_clk = {
+ .cbcr_reg = MMSS_MDSS_MDP_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_mdss_mdp_clk",
+ .parent = &mdp_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_mdss_mdp_clk.c),
+ },
+};
+
+static struct branch_clk mmss_mdss_mdp_lut_clk = {
+ .cbcr_reg = MMSS_MDSS_MDP_LUT_CBCR,
+ .has_sibling = 1,
+ .check_enable_bit = true,
+ .halt_check = DELAY,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_mdss_mdp_lut_clk",
+ .parent = &mdp_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_mdss_mdp_lut_clk.c),
+ },
+};
+
+static struct branch_clk mmss_mdss_pclk0_clk = {
+ .cbcr_reg = MMSS_MDSS_PCLK0_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_mdss_pclk0_clk",
+ .parent = &pclk0_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_mdss_pclk0_clk.c),
+ },
+};
+
+static struct branch_clk mmss_mdss_pclk1_clk = {
+ .cbcr_reg = MMSS_MDSS_PCLK1_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_mdss_pclk1_clk",
+ .parent = &pclk1_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_mdss_pclk1_clk.c),
+ },
+};
+
+static struct branch_clk mmss_mdss_rot_clk = {
+ .cbcr_reg = MMSS_MDSS_ROT_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_mdss_rot_clk",
+ .parent = &rot_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_mdss_rot_clk.c),
+ },
+};
+
+static struct branch_clk mmss_mdss_vsync_clk = {
+ .cbcr_reg = MMSS_MDSS_VSYNC_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_mdss_vsync_clk",
+ .parent = &vsync_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_mdss_vsync_clk.c),
+ },
+};
+
+static struct branch_clk mmss_mnoc_ahb_clk = {
+ .cbcr_reg = MMSS_MNOC_AHB_CBCR,
+ .has_sibling = 0,
+ .check_enable_bit = true,
+ .no_halt_check_on_disable = true,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_mnoc_ahb_clk",
+ .parent = &ahb_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_mnoc_ahb_clk.c),
+ },
+};
+
+static struct branch_clk mmss_misc_ahb_clk = {
+ .cbcr_reg = MMSS_MISC_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_misc_ahb_clk",
+ .ops = &clk_ops_branch,
+ .depends = &mmss_mnoc_ahb_clk.c,
+ CLK_INIT(mmss_misc_ahb_clk.c),
+ },
+};
+
+static struct branch_clk mmss_misc_cxo_clk = {
+ .cbcr_reg = MMSS_MISC_CXO_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_misc_cxo_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_misc_cxo_clk.c),
+ },
+};
+
+static struct branch_clk mmss_mnoc_maxi_clk = {
+ .cbcr_reg = MMSS_MNOC_MAXI_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_mnoc_maxi_clk",
+ .parent = &maxi_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_mnoc_maxi_clk.c),
+ },
+};
+
+static struct branch_clk mmss_video_subcore0_clk = {
+ .cbcr_reg = MMSS_VIDEO_SUBCORE0_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_video_subcore0_clk",
+ .parent = &video_subcore0_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_video_subcore0_clk.c),
+ },
+};
+
+static struct branch_clk mmss_video_subcore1_clk = {
+ .cbcr_reg = MMSS_VIDEO_SUBCORE1_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_video_subcore1_clk",
+ .parent = &video_subcore1_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_video_subcore1_clk.c),
+ },
+};
+
+static struct branch_clk mmss_video_ahb_clk = {
+ .cbcr_reg = MMSS_VIDEO_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_video_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_video_ahb_clk.c),
+ },
+};
+
+static struct branch_clk mmss_video_axi_clk = {
+ .cbcr_reg = MMSS_VIDEO_AXI_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_video_axi_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_video_axi_clk.c),
+ },
+};
+
+static struct branch_clk mmss_video_core_clk = {
+ .cbcr_reg = MMSS_VIDEO_CORE_CBCR,
+ .has_sibling = 0,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_video_core_clk",
+ .parent = &video_core_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_video_core_clk.c),
+ },
+};
+
+static struct branch_clk mmss_video_maxi_clk = {
+ .cbcr_reg = MMSS_VIDEO_MAXI_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_video_maxi_clk",
+ .parent = &maxi_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_video_maxi_clk.c),
+ },
+};
+
+static struct branch_clk mmss_vmem_ahb_clk = {
+ .cbcr_reg = MMSS_VMEM_AHB_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_vmem_ahb_clk",
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_vmem_ahb_clk.c),
+ },
+};
+
+static struct branch_clk mmss_vmem_maxi_clk = {
+ .cbcr_reg = MMSS_VMEM_MAXI_CBCR,
+ .has_sibling = 1,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "mmss_vmem_maxi_clk",
+ .parent = &maxi_clk_src.c,
+ .ops = &clk_ops_branch,
+ CLK_INIT(mmss_vmem_maxi_clk.c),
+ },
+};
+
+static struct mux_clk mmss_debug_mux = {
+ .ops = &mux_reg_ops,
+ .en_mask = BIT(16),
+ .mask = 0x3FF,
+ .offset = MMSS_DEBUG_CLK_CTL,
+ .en_offset = MMSS_DEBUG_CLK_CTL,
+ .base = &virt_base,
+ MUX_SRC_LIST(
+ { &mmss_mnoc_ahb_clk.c, 0x0001 },
+ { &mmss_misc_ahb_clk.c, 0x0003 },
+ { &mmss_vmem_maxi_clk.c, 0x0009 },
+ { &mmss_vmem_ahb_clk.c, 0x000a },
+ { &mmss_bimc_smmu_ahb_clk.c, 0x000c },
+ { &mmss_bimc_smmu_axi_clk.c, 0x000d },
+ { &mmss_video_core_clk.c, 0x000e },
+ { &mmss_video_axi_clk.c, 0x000f },
+ { &mmss_video_maxi_clk.c, 0x0010 },
+ { &mmss_video_ahb_clk.c, 0x0011 },
+ { &mmss_mdss_rot_clk.c, 0x0012 },
+ { &mmss_snoc_dvm_axi_clk.c, 0x0013 },
+ { &mmss_mdss_mdp_clk.c, 0x0014 },
+ { &mmss_mdss_mdp_lut_clk.c, 0x0015 },
+ { &mmss_mdss_pclk0_clk.c, 0x0016 },
+ { &mmss_mdss_pclk1_clk.c, 0x0017 },
+ { &mmss_mdss_extpclk_clk.c, 0x0018 },
+ { &mmss_video_subcore0_clk.c, 0x001a },
+ { &mmss_video_subcore1_clk.c, 0x001b },
+ { &mmss_mdss_vsync_clk.c, 0x001c },
+ { &mmss_mdss_hdmi_clk.c, 0x001d },
+ { &mmss_mdss_byte0_clk.c, 0x001e },
+ { &mmss_mdss_byte1_clk.c, 0x001f },
+ { &mmss_mdss_esc0_clk.c, 0x0020 },
+ { &mmss_mdss_esc1_clk.c, 0x0021 },
+ { &mmss_mdss_ahb_clk.c, 0x0022 },
+ { &mmss_mdss_hdmi_dp_ahb_clk.c, 0x0023 },
+ { &mmss_mdss_axi_clk.c, 0x0024 },
+ { &mmss_camss_top_ahb_clk.c, 0x0025 },
+ { &mmss_camss_micro_ahb_clk.c, 0x0026 },
+ { &mmss_camss_gp0_clk.c, 0x0027 },
+ { &mmss_camss_gp1_clk.c, 0x0028 },
+ { &mmss_camss_mclk0_clk.c, 0x0029 },
+ { &mmss_camss_mclk1_clk.c, 0x002a },
+ { &mmss_camss_mclk2_clk.c, 0x002b },
+ { &mmss_camss_mclk3_clk.c, 0x002c },
+ { &mmss_camss_cci_clk.c, 0x002d },
+ { &mmss_camss_cci_ahb_clk.c, 0x002e },
+ { &mmss_camss_csi0phytimer_clk.c, 0x002f },
+ { &mmss_camss_csi1phytimer_clk.c, 0x0030 },
+ { &mmss_camss_csi2phytimer_clk.c, 0x0031 },
+ { &mmss_camss_jpeg0_clk.c, 0x0032 },
+ { &mmss_camss_ispif_ahb_clk.c, 0x0033 },
+ { &mmss_camss_jpeg_ahb_clk.c, 0x0035 },
+ { &mmss_camss_jpeg_axi_clk.c, 0x0036 },
+ { &mmss_camss_ahb_clk.c, 0x0037 },
+ { &mmss_camss_vfe0_clk.c, 0x0038 },
+ { &mmss_camss_vfe1_clk.c, 0x0039 },
+ { &mmss_camss_cpp_clk.c, 0x003a },
+ { &mmss_camss_cpp_ahb_clk.c, 0x003b },
+ { &mmss_camss_csi_vfe0_clk.c, 0x003f },
+ { &mmss_camss_csi_vfe1_clk.c, 0x0040 },
+ { &mmss_camss_csi0_clk.c, 0x0041 },
+ { &mmss_camss_csi0_ahb_clk.c, 0x0042 },
+ { &mmss_camss_csiphy0_clk.c, 0x0043 },
+ { &mmss_camss_csi0rdi_clk.c, 0x0044 },
+ { &mmss_camss_csi0pix_clk.c, 0x0045 },
+ { &mmss_camss_csi1_clk.c, 0x0046 },
+ { &mmss_camss_csi1_ahb_clk.c, 0x0047 },
+ { &mmss_camss_csi1rdi_clk.c, 0x0049 },
+ { &mmss_camss_csi1pix_clk.c, 0x004a },
+ { &mmss_camss_csi2_clk.c, 0x004b },
+ { &mmss_camss_csi2_ahb_clk.c, 0x004c },
+ { &mmss_camss_csi2rdi_clk.c, 0x004e },
+ { &mmss_camss_csi2pix_clk.c, 0x004f },
+ { &mmss_camss_csi3_clk.c, 0x0050 },
+ { &mmss_camss_csi3_ahb_clk.c, 0x0051 },
+ { &mmss_camss_csi3rdi_clk.c, 0x0053 },
+ { &mmss_camss_csi3pix_clk.c, 0x0054 },
+ { &mmss_mnoc_maxi_clk.c, 0x0070 },
+ { &mmss_camss_vfe0_stream_clk.c, 0x0071 },
+ { &mmss_camss_vfe1_stream_clk.c, 0x0072 },
+ { &mmss_camss_cpp_vbif_ahb_clk.c, 0x0073 },
+ { &mmss_misc_cxo_clk.c, 0x0077 },
+ { &mmss_camss_cpp_axi_clk.c, 0x007a },
+ { &mmss_camss_csiphy1_clk.c, 0x0085 },
+ { &mmss_camss_vfe0_ahb_clk.c, 0x0086 },
+ { &mmss_camss_vfe1_ahb_clk.c, 0x0087 },
+ { &mmss_camss_csiphy2_clk.c, 0x0088 },
+ { &mmss_fd_core_clk.c, 0x0089 },
+ { &mmss_fd_core_uar_clk.c, 0x008a },
+ { &mmss_fd_ahb_clk.c, 0x008c },
+ { &mmss_camss_cphy_csid0_clk.c, 0x008d },
+ { &mmss_camss_cphy_csid1_clk.c, 0x008e },
+ { &mmss_camss_cphy_csid2_clk.c, 0x008f },
+ { &mmss_camss_cphy_csid3_clk.c, 0x0090 },
+ { &mmss_mdss_dp_link_clk.c, 0x0098 },
+ { &mmss_mdss_dp_link_intf_clk.c, 0x0099 },
+ { &mmss_mdss_dp_crypto_clk.c, 0x009a },
+ { &mmss_mdss_dp_aux_clk.c, 0x009c },
+ { &mmss_mdss_dp_gtc_clk.c, 0x009d },
+ { &mmss_mdss_byte0_intf_clk.c, 0x00ad },
+ { &mmss_mdss_byte1_intf_clk.c, 0x00ae },
+ ),
+ .c = {
+ .dbg_name = "mmss_debug_mux",
+ .ops = &clk_ops_gen_mux,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(mmss_debug_mux.c),
+ },
+};
+
+static struct clk_lookup msm_clocks_mmss_cobalt[] = {
+ CLK_LIST(mmsscc_xo),
+ CLK_LIST(mmsscc_gpll0),
+ CLK_LIST(mmsscc_gpll0_div),
+ CLK_LIST(mmpll0_pll),
+ CLK_LIST(mmpll0_pll_out),
+ CLK_LIST(mmpll1_pll),
+ CLK_LIST(mmpll1_pll_out),
+ CLK_LIST(mmpll3_pll),
+ CLK_LIST(mmpll3_pll_out),
+ CLK_LIST(mmpll4_pll),
+ CLK_LIST(mmpll4_pll_out),
+ CLK_LIST(mmpll5_pll),
+ CLK_LIST(mmpll5_pll_out),
+ CLK_LIST(mmpll6_pll),
+ CLK_LIST(mmpll6_pll_out),
+ CLK_LIST(mmpll7_pll),
+ CLK_LIST(mmpll7_pll_out),
+ CLK_LIST(mmpll10_pll),
+ CLK_LIST(mmpll10_pll_out),
+ CLK_LIST(ahb_clk_src),
+ CLK_LIST(csi0_clk_src),
+ CLK_LIST(vfe0_clk_src),
+ CLK_LIST(vfe1_clk_src),
+ CLK_LIST(mdp_clk_src),
+ CLK_LIST(maxi_clk_src),
+ CLK_LIST(cpp_clk_src),
+ CLK_LIST(jpeg0_clk_src),
+ CLK_LIST(rot_clk_src),
+ CLK_LIST(video_core_clk_src),
+ CLK_LIST(csi1_clk_src),
+ CLK_LIST(csi2_clk_src),
+ CLK_LIST(csi3_clk_src),
+ CLK_LIST(fd_core_clk_src),
+ CLK_LIST(video_subcore0_clk_src),
+ CLK_LIST(video_subcore1_clk_src),
+ CLK_LIST(cci_clk_src),
+ CLK_LIST(csiphy_clk_src),
+ CLK_LIST(camss_gp0_clk_src),
+ CLK_LIST(camss_gp1_clk_src),
+ CLK_LIST(mclk0_clk_src),
+ CLK_LIST(mclk1_clk_src),
+ CLK_LIST(mclk2_clk_src),
+ CLK_LIST(mclk3_clk_src),
+ CLK_LIST(ext_byte0_clk_src),
+ CLK_LIST(ext_byte1_clk_src),
+ CLK_LIST(byte0_clk_src),
+ CLK_LIST(byte1_clk_src),
+ CLK_LIST(ext_pclk0_clk_src),
+ CLK_LIST(ext_pclk1_clk_src),
+ CLK_LIST(pclk0_clk_src),
+ CLK_LIST(pclk1_clk_src),
+ CLK_LIST(ext_extpclk_clk_src),
+ CLK_LIST(extpclk_clk_src),
+ CLK_LIST(ext_dp_phy_pll_vco),
+ CLK_LIST(ext_dp_phy_pll_link),
+ CLK_LIST(dp_link_clk_src),
+ CLK_LIST(dp_crypto_clk_src),
+ CLK_LIST(csi0phytimer_clk_src),
+ CLK_LIST(csi1phytimer_clk_src),
+ CLK_LIST(csi2phytimer_clk_src),
+ CLK_LIST(dp_aux_clk_src),
+ CLK_LIST(dp_gtc_clk_src),
+ CLK_LIST(esc0_clk_src),
+ CLK_LIST(esc1_clk_src),
+ CLK_LIST(hdmi_clk_src),
+ CLK_LIST(vsync_clk_src),
+ CLK_LIST(mmss_bimc_smmu_ahb_clk),
+ CLK_LIST(mmss_bimc_smmu_axi_clk),
+ CLK_LIST(mmss_snoc_dvm_axi_clk),
+ CLK_LIST(mmss_camss_ahb_clk),
+ CLK_LIST(mmss_camss_cci_ahb_clk),
+ CLK_LIST(mmss_camss_cci_clk),
+ CLK_LIST(mmss_camss_cpp_ahb_clk),
+ CLK_LIST(mmss_camss_cpp_clk),
+ CLK_LIST(mmss_camss_cpp_axi_clk),
+ CLK_LIST(mmss_camss_cpp_vbif_ahb_clk),
+ CLK_LIST(mmss_camss_cphy_csid0_clk),
+ CLK_LIST(mmss_camss_csi0_ahb_clk),
+ CLK_LIST(mmss_camss_csi0_clk),
+ CLK_LIST(mmss_camss_csi0pix_clk),
+ CLK_LIST(mmss_camss_csi0rdi_clk),
+ CLK_LIST(mmss_camss_cphy_csid1_clk),
+ CLK_LIST(mmss_camss_csi1_ahb_clk),
+ CLK_LIST(mmss_camss_csi1_clk),
+ CLK_LIST(mmss_camss_csi1pix_clk),
+ CLK_LIST(mmss_camss_csi1rdi_clk),
+ CLK_LIST(mmss_camss_cphy_csid2_clk),
+ CLK_LIST(mmss_camss_csi2_ahb_clk),
+ CLK_LIST(mmss_camss_csi2_clk),
+ CLK_LIST(mmss_camss_csi2pix_clk),
+ CLK_LIST(mmss_camss_csi2rdi_clk),
+ CLK_LIST(mmss_camss_cphy_csid3_clk),
+ CLK_LIST(mmss_camss_csi3_ahb_clk),
+ CLK_LIST(mmss_camss_csi3_clk),
+ CLK_LIST(mmss_camss_csi3pix_clk),
+ CLK_LIST(mmss_camss_csi3rdi_clk),
+ CLK_LIST(mmss_camss_csi_vfe0_clk),
+ CLK_LIST(mmss_camss_csi_vfe1_clk),
+ CLK_LIST(mmss_camss_csiphy0_clk),
+ CLK_LIST(mmss_camss_csiphy1_clk),
+ CLK_LIST(mmss_camss_csiphy2_clk),
+ CLK_LIST(mmss_fd_ahb_clk),
+ CLK_LIST(mmss_fd_core_clk),
+ CLK_LIST(mmss_fd_core_uar_clk),
+ CLK_LIST(mmss_camss_gp0_clk),
+ CLK_LIST(mmss_camss_gp1_clk),
+ CLK_LIST(mmss_camss_ispif_ahb_clk),
+ CLK_LIST(mmss_camss_jpeg0_clk),
+ CLK_LIST(mmss_camss_jpeg_ahb_clk),
+ CLK_LIST(mmss_camss_jpeg_axi_clk),
+ CLK_LIST(mmss_camss_mclk0_clk),
+ CLK_LIST(mmss_camss_mclk1_clk),
+ CLK_LIST(mmss_camss_mclk2_clk),
+ CLK_LIST(mmss_camss_mclk3_clk),
+ CLK_LIST(mmss_camss_micro_ahb_clk),
+ CLK_LIST(mmss_camss_csi0phytimer_clk),
+ CLK_LIST(mmss_camss_csi1phytimer_clk),
+ CLK_LIST(mmss_camss_csi2phytimer_clk),
+ CLK_LIST(mmss_camss_top_ahb_clk),
+ CLK_LIST(mmss_camss_vfe0_ahb_clk),
+ CLK_LIST(mmss_camss_vfe0_clk),
+ CLK_LIST(mmss_camss_vfe0_stream_clk),
+ CLK_LIST(mmss_camss_vfe1_ahb_clk),
+ CLK_LIST(mmss_camss_vfe1_clk),
+ CLK_LIST(mmss_camss_vfe1_stream_clk),
+ CLK_LIST(mmss_camss_vfe_vbif_ahb_clk),
+ CLK_LIST(mmss_camss_vfe_vbif_axi_clk),
+ CLK_LIST(mmss_mdss_ahb_clk),
+ CLK_LIST(mmss_mdss_axi_clk),
+ CLK_LIST(mmss_mdss_byte0_clk),
+ CLK_LIST(mmss_mdss_byte0_intf_div_clk),
+ CLK_LIST(mmss_mdss_byte0_intf_clk),
+ CLK_LIST(mmss_mdss_byte1_clk),
+ CLK_LIST(mmss_mdss_byte0_intf_div_clk),
+ CLK_LIST(mmss_mdss_byte1_intf_clk),
+ CLK_LIST(mmss_mdss_dp_aux_clk),
+ CLK_LIST(mmss_mdss_dp_crypto_clk),
+ CLK_LIST(mmss_mdss_dp_link_clk),
+ CLK_LIST(mmss_mdss_dp_link_intf_clk),
+ CLK_LIST(mmss_mdss_dp_gtc_clk),
+ CLK_LIST(mmss_mdss_esc0_clk),
+ CLK_LIST(mmss_mdss_esc1_clk),
+ CLK_LIST(mmss_mdss_extpclk_clk),
+ CLK_LIST(mmss_mdss_hdmi_clk),
+ CLK_LIST(mmss_mdss_hdmi_dp_ahb_clk),
+ CLK_LIST(mmss_mdss_mdp_clk),
+ CLK_LIST(mmss_mdss_mdp_lut_clk),
+ CLK_LIST(mmss_mdss_pclk0_clk),
+ CLK_LIST(mmss_mdss_pclk1_clk),
+ CLK_LIST(mmss_mdss_rot_clk),
+ CLK_LIST(mmss_mdss_vsync_clk),
+ CLK_LIST(mmss_misc_ahb_clk),
+ CLK_LIST(mmss_misc_cxo_clk),
+ CLK_LIST(mmss_mnoc_ahb_clk),
+ CLK_LIST(mmss_mnoc_maxi_clk),
+ CLK_LIST(mmss_video_subcore0_clk),
+ CLK_LIST(mmss_video_subcore1_clk),
+ CLK_LIST(mmss_video_ahb_clk),
+ CLK_LIST(mmss_video_axi_clk),
+ CLK_LIST(mmss_video_core_clk),
+ CLK_LIST(mmss_video_maxi_clk),
+ CLK_LIST(mmss_vmem_ahb_clk),
+ CLK_LIST(mmss_vmem_maxi_clk),
+ CLK_LIST(mmss_debug_mux),
+};
+
+static void msm_mmsscc_hamster_fixup(void)
+{
+ mmpll3_pll.c.rate = 1066000000;
+ mmpll3_pll.c.fmax[VDD_DIG_LOWER] = 533000000;
+ mmpll3_pll.c.fmax[VDD_DIG_LOW] = 533000000;
+ mmpll3_pll.c.fmax[VDD_DIG_LOW_L1] = 533000000;
+ mmpll3_pll.c.fmax[VDD_DIG_NOMINAL] = 1066000000;
+ mmpll3_pll.c.fmax[VDD_DIG_HIGH] = 1066000000;
+
+ mmpll4_pll.c.fmax[VDD_DIG_LOW] = 384000000;
+ mmpll4_pll.c.fmax[VDD_DIG_LOW_L1] = 384000000;
+ mmpll4_pll.c.fmax[VDD_DIG_NOMINAL] = 768000000;
+
+ mmpll5_pll.c.fmax[VDD_DIG_LOW] = 412500000;
+ mmpll5_pll.c.fmax[VDD_DIG_LOW_L1] = 412500000;
+ mmpll5_pll.c.fmax[VDD_DIG_NOMINAL] = 825000000;
+
+ mmpll6_pll.c.rate = 888000000;
+ mmpll6_pll.c.fmax[VDD_DIG_LOWER] = 444000000;
+ mmpll6_pll.c.fmax[VDD_DIG_LOW] = 444000000;
+ mmpll6_pll.c.fmax[VDD_DIG_LOW_L1] = 444000000;
+ mmpll6_pll.c.fmax[VDD_DIG_NOMINAL] = 888000000;
+ mmpll6_pll.c.fmax[VDD_DIG_HIGH] = 888000000;
+
+ vfe0_clk_src.freq_tbl = ftbl_vfe_clk_src_vq;
+ vfe0_clk_src.c.fmax[VDD_DIG_LOW] = 404000000;
+ vfe0_clk_src.c.fmax[VDD_DIG_LOW_L1] = 480000000;
+ vfe1_clk_src.freq_tbl = ftbl_vfe_clk_src_vq;
+ vfe1_clk_src.c.fmax[VDD_DIG_LOW] = 404000000;
+ vfe1_clk_src.c.fmax[VDD_DIG_LOW_L1] = 480000000;
+
+ csi0_clk_src.freq_tbl = ftbl_csi_clk_src_vq;
+ csi0_clk_src.c.fmax[VDD_DIG_LOW_L1] = 300000000;
+ csi1_clk_src.freq_tbl = ftbl_csi_clk_src_vq;
+ csi1_clk_src.c.fmax[VDD_DIG_LOW_L1] = 300000000;
+ csi2_clk_src.freq_tbl = ftbl_csi_clk_src_vq;
+ csi2_clk_src.c.fmax[VDD_DIG_LOW_L1] = 300000000;
+ csi3_clk_src.freq_tbl = ftbl_csi_clk_src_vq;
+ csi3_clk_src.c.fmax[VDD_DIG_LOW_L1] = 300000000;
+
+ cpp_clk_src.freq_tbl = ftbl_cpp_clk_src_vq;
+ cpp_clk_src.c.fmax[VDD_DIG_LOW_L1] = 480000000;
+ jpeg0_clk_src.freq_tbl = ftbl_jpeg0_clk_src_vq;
+ jpeg0_clk_src.c.fmax[VDD_DIG_LOW_L1] = 320000000;
+ csiphy_clk_src.freq_tbl = ftbl_csiphy_clk_src_vq;
+ csiphy_clk_src.c.fmax[VDD_DIG_LOW_L1] = 300000000;
+ fd_core_clk_src.freq_tbl = ftbl_fd_core_clk_src_vq;
+ fd_core_clk_src.c.fmax[VDD_DIG_LOW_L1] = 400000000;
+
+ csi0phytimer_clk_src.c.fmax[VDD_DIG_LOW_L1] = 269333333;
+ csi1phytimer_clk_src.c.fmax[VDD_DIG_LOW_L1] = 269333333;
+ csi2phytimer_clk_src.c.fmax[VDD_DIG_LOW_L1] = 269333333;
+
+ mdp_clk_src.c.fmax[VDD_DIG_LOW_L1] = 330000000;
+ extpclk_clk_src.c.fmax[VDD_DIG_LOW] = 312500000;
+ extpclk_clk_src.c.fmax[VDD_DIG_LOW_L1] = 375000000;
+ rot_clk_src.c.fmax[VDD_DIG_LOW_L1] = 330000000;
+
+ maxi_clk_src.freq_tbl = ftbl_maxi_clk_src_vq;
+ video_core_clk_src.freq_tbl = ftbl_video_core_clk_src_vq;
+ video_core_clk_src.c.fmax[VDD_DIG_LOWER] = 200000000;
+ video_core_clk_src.c.fmax[VDD_DIG_LOW] = 269330000;
+ video_core_clk_src.c.fmax[VDD_DIG_LOW_L1] = 404000000;
+ video_core_clk_src.c.fmax[VDD_DIG_NOMINAL] = 444000000;
+ video_core_clk_src.c.fmax[VDD_DIG_HIGH] = 533000000;
+
+ video_subcore0_clk_src.freq_tbl = ftbl_video_subcore_clk_src_vq;
+ video_subcore0_clk_src.c.fmax[VDD_DIG_LOWER] = 200000000;
+ video_subcore0_clk_src.c.fmax[VDD_DIG_LOW] = 269330000;
+ video_subcore0_clk_src.c.fmax[VDD_DIG_LOW_L1] = 404000000;
+ video_subcore0_clk_src.c.fmax[VDD_DIG_NOMINAL] = 444000000;
+ video_subcore0_clk_src.c.fmax[VDD_DIG_HIGH] = 533000000;
+
+ video_subcore1_clk_src.freq_tbl = ftbl_video_subcore_clk_src_vq;
+ video_subcore1_clk_src.c.fmax[VDD_DIG_LOWER] = 200000000;
+ video_subcore1_clk_src.c.fmax[VDD_DIG_LOW] = 269330000;
+ video_subcore1_clk_src.c.fmax[VDD_DIG_LOW_L1] = 404000000;
+ video_subcore1_clk_src.c.fmax[VDD_DIG_NOMINAL] = 444000000;
+ video_subcore1_clk_src.c.fmax[VDD_DIG_HIGH] = 533000000;
+};
+
+static void msm_mmsscc_v2_fixup(void)
+{
+ fd_core_clk_src.freq_tbl = ftbl_fd_core_clk_src_v2;
+ fd_core_clk_src.c.fmax[VDD_DIG_LOW] = 404000000;
+ fd_core_clk_src.c.fmax[VDD_DIG_LOW_L1] = 480000000;
+}
+
+int msm_mmsscc_cobalt_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ int rc;
+ struct clk *tmp;
+ struct regulator *reg;
+ u32 regval;
+ bool is_v2 = 0, is_vq = 0;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cc_base");
+ if (!res) {
+ dev_err(&pdev->dev, "Unable to retrieve register base.\n");
+ return -ENOMEM;
+ }
+ virt_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!virt_base) {
+ dev_err(&pdev->dev, "Failed to map CC registers\n");
+ return -ENOMEM;
+ }
+
+ /* Clear the DBG_CLK_DIV bits of the MMSS debug register */
+ regval = readl_relaxed(virt_base + mmss_debug_mux.offset);
+ regval &= ~BM(18, 17);
+ writel_relaxed(regval, virt_base + mmss_debug_mux.offset);
+
+ reg = vdd_dig.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_dig");
+ if (IS_ERR(reg)) {
+ if (PTR_ERR(reg) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Unable to get vdd_dig regulator!");
+ return PTR_ERR(reg);
+ }
+
+ tmp = mmsscc_xo.c.parent = devm_clk_get(&pdev->dev, "xo");
+ if (IS_ERR(tmp)) {
+ if (PTR_ERR(tmp) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Unable to get xo clock!\n");
+ return PTR_ERR(tmp);
+ }
+
+ tmp = mmsscc_gpll0.c.parent = devm_clk_get(&pdev->dev, "gpll0");
+ if (IS_ERR(tmp)) {
+ if (PTR_ERR(tmp) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Unable to get gpll0 clock!\n");
+ return PTR_ERR(tmp);
+ }
+
+ tmp = mmsscc_gpll0_div.c.parent = devm_clk_get(&pdev->dev, "gpll0_div");
+ if (IS_ERR(tmp)) {
+ if (PTR_ERR(tmp) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Unable to get gpll0_div clock!\n");
+ return PTR_ERR(tmp);
+ }
+
+ ext_pclk0_clk_src.dev = &pdev->dev;
+ ext_pclk0_clk_src.clk_id = "pclk0_src";
+ ext_pclk0_clk_src.c.flags = CLKFLAG_NO_RATE_CACHE;
+ ext_pclk1_clk_src.dev = &pdev->dev;
+ ext_pclk1_clk_src.clk_id = "pclk1_src";
+ ext_pclk1_clk_src.c.flags = CLKFLAG_NO_RATE_CACHE;
+ ext_byte0_clk_src.dev = &pdev->dev;
+ ext_byte0_clk_src.clk_id = "byte0_src";
+ ext_byte0_clk_src.c.flags = CLKFLAG_NO_RATE_CACHE;
+ ext_byte1_clk_src.dev = &pdev->dev;
+ ext_byte1_clk_src.clk_id = "byte1_src";
+ ext_byte1_clk_src.c.flags = CLKFLAG_NO_RATE_CACHE;
+ ext_extpclk_clk_src.dev = &pdev->dev;
+ ext_extpclk_clk_src.clk_id = "extpclk_src";
+
+ ext_dp_phy_pll_link.dev = &pdev->dev;
+ ext_dp_phy_pll_link.clk_id = "dp_link_src";
+ ext_dp_phy_pll_vco.dev = &pdev->dev;
+ ext_dp_phy_pll_vco.clk_id = "dp_vco_div";
+
+ is_vq = of_device_is_compatible(pdev->dev.of_node,
+ "qcom,mmsscc-hamster");
+ if (is_vq)
+ msm_mmsscc_hamster_fixup();
+
+ is_v2 = of_device_is_compatible(pdev->dev.of_node,
+ "qcom,mmsscc-cobalt-v2");
+ if (is_v2) {
+ msm_mmsscc_hamster_fixup();
+ msm_mmsscc_v2_fixup();
+ }
+
+ rc = of_msm_clock_register(pdev->dev.of_node, msm_clocks_mmss_cobalt,
+ ARRAY_SIZE(msm_clocks_mmss_cobalt));
+ if (rc)
+ return rc;
+
+ dev_info(&pdev->dev, "Registered MMSS clocks.\n");
+ return 0;
+}
+
+static struct of_device_id msm_clock_mmss_match_table[] = {
+ { .compatible = "qcom,mmsscc-cobalt" },
+ { .compatible = "qcom,mmsscc-cobalt-v2" },
+ { .compatible = "qcom,mmsscc-hamster" },
+ {},
+};
+
+static struct platform_driver msm_clock_mmss_driver = {
+ .probe = msm_mmsscc_cobalt_probe,
+ .driver = {
+ .name = "qcom,mmsscc-cobalt",
+ .of_match_table = msm_clock_mmss_match_table,
+ .owner = THIS_MODULE,
+ },
+};
+
+int __init msm_mmsscc_cobalt_init(void)
+{
+ return platform_driver_register(&msm_clock_mmss_driver);
+}
+arch_initcall(msm_mmsscc_cobalt_init);
diff --git a/drivers/clk/msm/clock-osm.c b/drivers/clk/msm/clock-osm.c
new file mode 100644
index 000000000000..598e52b54c99
--- /dev/null
+++ b/drivers/clk/msm/clock-osm.c
@@ -0,0 +1,2546 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/clk.h>
+#include <linux/clk/msm-clk-provider.h>
+#include <linux/clk/msm-clk.h>
+#include <linux/clk/msm-clock-generic.h>
+#include <linux/cpu.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/pm_opp.h>
+#include <linux/pm_qos.h>
+#include <linux/interrupt.h>
+#include <linux/regulator/driver.h>
+#include <linux/uaccess.h>
+#include <linux/sched.h>
+
+#include <soc/qcom/scm.h>
+#include <soc/qcom/clock-pll.h>
+#include <soc/qcom/clock-local2.h>
+#include <soc/qcom/clock-alpha-pll.h>
+
+#include <dt-bindings/clock/msm-clocks-hwio-cobalt.h>
+#include <dt-bindings/clock/msm-clocks-cobalt.h>
+
+#include "clock.h"
+
+enum clk_osm_bases {
+ OSM_BASE,
+ PLL_BASE,
+ NUM_BASES,
+};
+
+enum clk_osm_lut_data {
+ FREQ,
+ FREQ_DATA,
+ PLL_OVERRIDES,
+ SPARE_DATA,
+ NUM_FIELDS,
+};
+
+enum clk_osm_trace_method {
+ XOR_PACKET,
+ PERIODIC_PACKET,
+};
+
+enum clk_osm_trace_packet_id {
+ TRACE_PACKET0,
+ TRACE_PACKET1,
+ TRACE_PACKET2,
+ TRACE_PACKET3,
+};
+
+#define SEQ_REG(n) (0x300 + (n) * 4)
+#define MEM_ACC_SEQ_REG_CFG_START(n) (SEQ_REG(12 + (n)))
+#define MEM_ACC_SEQ_CONST(n) (n)
+#define MEM_ACC_INSTR_COMP(n) (0x67 + ((n) * 0x40))
+#define MEM_ACC_SEQ_REG_VAL_START(n) \
+ ((n) < 8 ? SEQ_REG(4 + (n)) : SEQ_REG(60 + (n) - 8))
+
+#define OSM_TABLE_SIZE 40
+#define MAX_CLUSTER_CNT 2
+#define MAX_CONFIG 4
+#define LLM_SW_OVERRIDE_CNT 3
+
+#define ENABLE_REG 0x1004
+#define INDEX_REG 0x1150
+#define FREQ_REG 0x1154
+#define VOLT_REG 0x1158
+#define OVERRIDE_REG 0x115C
+#define SPARE_REG 0x1164
+
+#define OSM_CYCLE_COUNTER_CTRL_REG 0x1F00
+#define OSM_CYCLE_COUNTER_STATUS_REG 0x1F04
+#define DCVS_PERF_STATE_DESIRED_REG 0x1F10
+#define DCVS_PERF_STATE_DEVIATION_INTR_STAT 0x1F14
+#define DCVS_PERF_STATE_DEVIATION_INTR_EN 0x1F18
+#define DCVS_PERF_STATE_DEVIATION_INTR_CLEAR 0x1F1C
+#define DCVS_PERF_STATE_DEVIATION_CORRECTED_INTR_STAT 0x1F20
+#define DCVS_PERF_STATE_DEVIATION_CORRECTED_INTR_EN 0x1F24
+#define DCVS_PERF_STATE_DEVIATION_CORRECTED_INTR_CLEAR 0x1F28
+#define DCVS_PERF_STATE_MET_INTR_STAT 0x1F2C
+#define DCVS_PERF_STATE_MET_INTR_EN 0x1F30
+#define DCVS_PERF_STATE_MET_INTR_CLR 0x1F34
+#define OSM_CORE_TABLE_SIZE 8192
+#define OSM_REG_SIZE 32
+
+#define WDOG_DOMAIN_PSTATE_STATUS 0x1c00
+#define WDOG_PROGRAM_COUNTER 0x1c74
+
+#define OSM_CYCLE_COUNTER_USE_XO_EDGE_EN BIT(8)
+#define PLL_MODE 0x0
+#define PLL_L_VAL 0x4
+#define PLL_USER_CTRL 0xC
+#define PLL_CONFIG_CTL_LO 0x10
+#define PLL_TEST_CTL_HI 0x1C
+#define PLL_STATUS 0x2C
+#define PLL_LOCK_DET_MASK BIT(16)
+#define PLL_WAIT_LOCK_TIME_US 5
+#define PLL_WAIT_LOCK_TIME_NS (PLL_WAIT_LOCK_TIME_US * 1000)
+#define PLL_MIN_LVAL 43
+
+#define CC_ZERO_BEHAV_CTRL 0x100C
+#define SPM_CC_DCVS_DISABLE 0x1020
+#define SPM_CC_CTRL 0x1028
+#define SPM_CC_HYSTERESIS 0x101C
+#define SPM_CORE_RET_MAPPING 0x1024
+
+#define LLM_FREQ_VOTE_HYSTERESIS 0x102C
+#define LLM_VOLT_VOTE_HYSTERESIS 0x1030
+#define LLM_INTF_DCVS_DISABLE 0x1034
+
+#define ENABLE_OVERRIDE BIT(0)
+
+#define ITM_CL0_DISABLE_CL1_ENABLED 0x2
+#define ITM_CL0_ENABLED_CL1_DISABLE 0x1
+
+#define APM_MX_MODE 0
+#define APM_APC_MODE BIT(1)
+#define APM_MODE_SWITCH_MASK (BVAL(4, 2, 7) | BVAL(1, 0, 3))
+#define APM_MX_MODE_VAL 0
+#define APM_APC_MODE_VAL 0x3
+
+#define GPLL_SEL 0x400
+#define PLL_EARLY_SEL 0x500
+#define PLL_MAIN_SEL 0x300
+#define RCG_UPDATE 0x3
+#define RCG_UPDATE_SUCCESS 0x2
+#define PLL_POST_DIV1 0x1F
+#define PLL_POST_DIV2 0x11F
+
+#define LLM_SW_OVERRIDE_REG 0x1038
+#define VMIN_REDUC_ENABLE_REG 0x103C
+#define VMIN_REDUC_TIMER_REG 0x1040
+#define PDN_FSM_CTRL_REG 0x1070
+#define CC_BOOST_TIMER_REG0 0x1074
+#define CC_BOOST_TIMER_REG1 0x1078
+#define CC_BOOST_TIMER_REG2 0x107C
+#define CC_BOOST_EN_MASK BIT(0)
+#define PS_BOOST_EN_MASK BIT(1)
+#define DCVS_BOOST_EN_MASK BIT(2)
+#define PC_RET_EXIT_DROOP_EN_MASK BIT(3)
+#define WFX_DROOP_EN_MASK BIT(4)
+#define DCVS_DROOP_EN_MASK BIT(5)
+#define LMH_PS_EN_MASK BIT(6)
+#define IGNORE_PLL_LOCK_MASK BIT(15)
+#define SAFE_FREQ_WAIT_NS 1000
+#define DCVS_BOOST_TIMER_REG0 0x1084
+#define DCVS_BOOST_TIMER_REG1 0x1088
+#define DCVS_BOOST_TIMER_REG2 0x108C
+#define PS_BOOST_TIMER_REG0 0x1094
+#define PS_BOOST_TIMER_REG1 0x1098
+#define PS_BOOST_TIMER_REG2 0x109C
+#define BOOST_PROG_SYNC_DELAY_REG 0x10A0
+#define DROOP_CTRL_REG 0x10A4
+#define DROOP_PROG_SYNC_DELAY_REG 0x10B8
+#define DROOP_UNSTALL_TIMER_CTRL_REG 0x10AC
+#define DROOP_WAIT_TO_RELEASE_TIMER_CTRL0_REG 0x10B0
+#define DROOP_WAIT_TO_RELEASE_TIMER_CTRL1_REG 0x10B4
+
+#define DCVS_DROOP_TIMER_CTRL 0x10B8
+#define SEQ_MEM_ADDR 0x500
+#define SEQ_CFG_BR_ADDR 0x170
+#define MAX_INSTRUCTIONS 256
+#define MAX_BR_INSTRUCTIONS 49
+
+#define MAX_MEM_ACC_LEVELS 4
+#define MAX_MEM_ACC_VAL_PER_LEVEL 3
+#define MAX_MEM_ACC_VALUES (MAX_MEM_ACC_LEVELS * \
+ MAX_MEM_ACC_VAL_PER_LEVEL)
+#define MEM_ACC_READ_MASK 0x7
+
+#define TRACE_CTRL 0x1F38
+#define TRACE_CTRL_EN_MASK BIT(0)
+#define TRACE_CTRL_ENABLE 1
+#define TRACE_CTRL_DISABLE 0
+#define TRACE_CTRL_PACKET_TYPE_MASK BVAL(2, 1, 3)
+#define TRACE_CTRL_PACKET_TYPE_SHIFT 1
+#define TRACE_CTRL_PERIODIC_TRACE_EN_MASK BIT(3)
+#define TRACE_CTRL_PERIODIC_TRACE_ENABLE BIT(3)
+#define PERIODIC_TRACE_TIMER_CTRL 0x1F3C
+#define PERIODIC_TRACE_MIN_NS 1000
+#define PERIODIC_TRACE_MAX_NS 21474836475
+#define PERIODIC_TRACE_DEFAULT_NS 1000000
+
+static void __iomem *virt_base;
+
+#define lmh_lite_clk_src_source_val 1
+
+#define FIXDIV(div) (div ? (2 * (div) - 1) : (0))
+
+#define F(f, s, div, m, n) \
+ { \
+ .freq_hz = (f), \
+ .src_clk = &s.c, \
+ .m_val = (m), \
+ .n_val = ~((n)-(m)) * !!(n), \
+ .d_val = ~(n),\
+ .div_src_val = BVAL(4, 0, (int)FIXDIV(div)) \
+ | BVAL(10, 8, s##_source_val), \
+ }
+
+static u32 seq_instr[] = {
+ 0xc2005000, 0x2c9e3b21, 0xc0ab2cdc, 0xc2882525, 0x359dc491,
+ 0x700a500b, 0x70005001, 0x390938c8, 0xcb44c833, 0xce56cd54,
+ 0x341336e0, 0xadba0000, 0x10004000, 0x70005001, 0x1000500c,
+ 0xc792c5a1, 0x501625e1, 0x3da335a2, 0x50170006, 0x50150006,
+ 0xafb9c633, 0xacb31000, 0xacb41000, 0x1000c422, 0x500baefc,
+ 0x5001700a, 0xaefd7000, 0x700b5010, 0x700c5012, 0xadb9ad41,
+ 0x181b0000, 0x500f500c, 0x34135011, 0x84b9181b, 0xbd808539,
+ 0x2ba40003, 0x0006a001, 0x10007105, 0x1000500e, 0x1c0a500c,
+ 0x3b181c01, 0x3b431c06, 0x10001c07, 0x39831c06, 0x500c1c07,
+ 0x1c0a1c02, 0x10000000, 0x70015002, 0x10000000, 0x50038103,
+ 0x50047002, 0x10007003, 0x39853b44, 0x50038104, 0x40037002,
+ 0x70095005, 0xb1c0a146, 0x238b0003, 0x10004005, 0x848b8308,
+ 0x1000850c, 0x848e830d, 0x1000850c, 0x3a4c5006, 0x3a8f39cd,
+ 0x40063ad0, 0x50071000, 0x2c127006, 0x4007a00f, 0x71050006,
+ 0x1000700d, 0x1c1aa964, 0x700d4007, 0x50071000, 0x1c167006,
+ 0x50125010, 0x40072411, 0x4007700d, 0xa00f1000, 0x0006a821,
+ 0x40077105, 0x500c700d, 0x1c1591ad, 0x5011500f, 0x10000000,
+ 0x500c2bd4, 0x0006a00f, 0x10007105, 0xa821a00f, 0x70050006,
+ 0x91ad500c, 0x500f1c15, 0x10005011, 0x1c162bce, 0x50125010,
+ 0xa82aa022, 0x71050006, 0x1c1591a6, 0x5011500f, 0x5014500c,
+ 0x0006a00f, 0x00007105, 0x91a41000, 0x22175013, 0x1c1aa963,
+ 0x22171000, 0x1c1aa963, 0x50081000, 0x40087007, 0x1c1aa963,
+ 0x70085009, 0x10004009, 0x850c848e, 0x0003b1c0, 0x400d2b99,
+ 0x500d1000, 0xabaf1000, 0x853184b0, 0x0003bb80, 0xa0371000,
+ 0x71050006, 0x85481000, 0xbf8084c3, 0x2ba80003, 0xbf8084c2,
+ 0x2ba70003, 0xbf8084c1, 0x2ba60003, 0x8ec71000, 0xc6dd8dc3,
+ 0x8c1625ec, 0x8d498c97, 0x8ec61c00, 0xc6dd8dc2, 0x8c1325ec,
+ 0x8d158c94, 0x8ec51c00, 0xc6dd8dc1, 0x8c1025ec, 0x8d128c91,
+ 0x8dc01c00, 0x182cc633, 0x84c08548, 0x0003bf80, 0x84c12ba9,
+ 0x0003bf80, 0x84c22baa, 0x0003bf80, 0x10002bab, 0x8dc08ec4,
+ 0x25ecc6dd, 0x8c948c13, 0x1c008d15, 0x8dc18ec5, 0x25ecc6dd,
+ 0x8c978c16, 0x1c008d49, 0x8dc28ec6, 0x25ecc6dd, 0x8ccb8c4a,
+ 0x1c008d4c, 0xc6338dc3, 0x1000af9b, 0xa759a79a, 0x1000a718,
+};
+
+static u32 seq_br_instr[] = {
+ 0x28c, 0x1e6, 0x238, 0xd0, 0xec,
+ 0xf4, 0xbc, 0xc4, 0x9c, 0xac,
+ 0xfc, 0xe2, 0x154, 0x174, 0x17c,
+ 0x10a, 0x126, 0x13a, 0x11c, 0x98,
+ 0x160, 0x1a6, 0x19a, 0x1ae, 0x1c0,
+ 0x1ce, 0x1d2, 0x30, 0x60, 0x86,
+ 0x7c, 0x1d8, 0x34, 0x3c, 0x56,
+ 0x5a, 0x1de, 0x2e, 0x222, 0x212,
+ 0x202, 0x254, 0x264, 0x274, 0x288,
+};
+
+DEFINE_EXT_CLK(xo_ao, NULL);
+DEFINE_EXT_CLK(sys_apcsaux_clk_gcc, NULL);
+DEFINE_EXT_CLK(lmh_lite_clk_src, NULL);
+
+struct osm_entry {
+ u16 virtual_corner;
+ u16 open_loop_volt;
+ u32 freq_data;
+ u32 override_data;
+ u32 spare_data;
+ long frequency;
+};
+
+static struct dentry *osm_debugfs_base;
+
+struct clk_osm {
+ struct clk c;
+ struct osm_entry osm_table[OSM_TABLE_SIZE];
+ struct dentry *debugfs;
+ struct regulator *vdd_reg;
+ struct platform_device *vdd_dev;
+ void *vbases[NUM_BASES];
+ unsigned long pbases[NUM_BASES];
+ spinlock_t lock;
+
+ u32 cpu_reg_mask;
+ u32 num_entries;
+ u32 cluster_num;
+ u32 irq;
+ u32 apm_crossover_vc;
+ u32 cycle_counter_reads;
+ u32 cycle_counter_delay;
+ u32 cycle_counter_factor;
+ u64 total_cycle_counter;
+ u32 prev_cycle_counter;
+ u32 l_val_base;
+ u32 apcs_itm_present;
+ u32 apcs_cfg_rcgr;
+ u32 apcs_cmd_rcgr;
+ u32 apcs_pll_user_ctl;
+ u32 apcs_mem_acc_cfg[MAX_MEM_ACC_VAL_PER_LEVEL];
+ u32 apcs_mem_acc_val[MAX_MEM_ACC_VALUES];
+ u32 llm_sw_overr[LLM_SW_OVERRIDE_CNT];
+ u32 apm_mode_ctl;
+ u32 apm_ctrl_status;
+ u32 osm_clk_rate;
+ u32 xo_clk_rate;
+ bool secure_init;
+ bool red_fsm_en;
+ bool boost_fsm_en;
+ bool safe_fsm_en;
+ bool ps_fsm_en;
+ bool droop_fsm_en;
+ bool wfx_fsm_en;
+ bool pc_fsm_en;
+
+ enum clk_osm_trace_method trace_method;
+ enum clk_osm_trace_packet_id trace_id;
+ struct notifier_block panic_notifier;
+ u32 trace_periodic_timer;
+ bool trace_en;
+};
+
+static inline void clk_osm_masked_write_reg(struct clk_osm *c, u32 val,
+ u32 offset, u32 mask)
+{
+ u32 val2, orig_val;
+
+ val2 = orig_val = readl_relaxed((char *)c->vbases[OSM_BASE] + offset);
+ val2 &= ~mask;
+ val2 |= val & mask;
+
+ if (val2 != orig_val)
+ writel_relaxed(val2, (char *)c->vbases[OSM_BASE] + offset);
+}
+
+static inline void clk_osm_write_reg(struct clk_osm *c, u32 val, u32 offset)
+{
+ writel_relaxed(val, (char *)c->vbases[OSM_BASE] + offset);
+}
+
+static inline int clk_osm_read_reg(struct clk_osm *c, u32 offset)
+{
+ return readl_relaxed((char *)c->vbases[OSM_BASE] + offset);
+}
+
+static inline int clk_osm_read_reg_no_log(struct clk_osm *c, u32 offset)
+{
+ return readl_relaxed_no_log((char *)c->vbases[OSM_BASE] + offset);
+}
+
+static inline int clk_osm_count_ns(struct clk_osm *c, u64 nsec)
+{
+ u64 temp;
+
+ temp = (u64)c->osm_clk_rate * nsec;
+ do_div(temp, 1000000000);
+
+ return temp;
+}
+
+static inline struct clk_osm *to_clk_osm(struct clk *c)
+{
+ return container_of(c, struct clk_osm, c);
+}
+
+static enum handoff clk_osm_handoff(struct clk *c)
+{
+ return HANDOFF_DISABLED_CLK;
+}
+
+static long clk_osm_list_rate(struct clk *c, unsigned n)
+{
+ if (n >= c->num_fmax)
+ return -ENXIO;
+ return c->fmax[n];
+}
+
+static long clk_osm_round_rate(struct clk *c, unsigned long rate)
+{
+ int i;
+
+ for (i = 0; i < c->num_fmax; i++)
+ if (rate <= c->fmax[i])
+ return c->fmax[i];
+
+ return c->fmax[i-1];
+}
+
+static int clk_osm_search_table(struct osm_entry *table, int entries, long rate)
+{
+ int i;
+
+ for (i = 0; i < entries; i++)
+ if (rate == table[i].frequency)
+ return i;
+ return -EINVAL;
+}
+
+static int clk_osm_set_rate(struct clk *c, unsigned long rate)
+{
+ struct clk_osm *cpuclk = to_clk_osm(c);
+ int index = 0;
+ unsigned long r_rate;
+
+ r_rate = clk_osm_round_rate(c, rate);
+
+ if (rate != r_rate) {
+ pr_err("invalid rate requested rate=%ld\n", rate);
+ return -EINVAL;
+ }
+
+ /* Convert rate to table index */
+ index = clk_osm_search_table(cpuclk->osm_table,
+ cpuclk->num_entries, r_rate);
+ if (index < 0) {
+ pr_err("cannot set cluster %u to %lu\n",
+ cpuclk->cluster_num, rate);
+ return -EINVAL;
+ }
+ pr_debug("rate: %lu --> index %d\n", rate, index);
+
+ if (cpuclk->llm_sw_overr) {
+ clk_osm_write_reg(cpuclk, cpuclk->llm_sw_overr[0],
+ LLM_SW_OVERRIDE_REG);
+ clk_osm_write_reg(cpuclk, cpuclk->llm_sw_overr[1],
+ LLM_SW_OVERRIDE_REG);
+ udelay(1);
+ }
+
+ /* Choose index and send request to OSM hardware */
+ clk_osm_write_reg(cpuclk, index, DCVS_PERF_STATE_DESIRED_REG);
+
+ if (cpuclk->llm_sw_overr) {
+ udelay(1);
+ clk_osm_write_reg(cpuclk, cpuclk->llm_sw_overr[2],
+ LLM_SW_OVERRIDE_REG);
+ }
+
+ /* Make sure the write goes through before proceeding */
+ mb();
+
+ return 0;
+}
+
+static int clk_osm_enable(struct clk *c)
+{
+ struct clk_osm *cpuclk = to_clk_osm(c);
+
+ clk_osm_write_reg(cpuclk, 1, ENABLE_REG);
+
+ /* Make sure the write goes through before proceeding */
+ mb();
+
+ /* Wait for 5us for OSM hardware to enable */
+ udelay(5);
+
+ pr_debug("OSM clk enabled for cluster=%d\n", cpuclk->cluster_num);
+
+ return 0;
+}
+
+static struct clk_ops clk_ops_cpu_osm = {
+ .enable = clk_osm_enable,
+ .set_rate = clk_osm_set_rate,
+ .round_rate = clk_osm_round_rate,
+ .list_rate = clk_osm_list_rate,
+ .handoff = clk_osm_handoff,
+};
+
+static struct regulator *vdd_pwrcl;
+static struct regulator *vdd_perfcl;
+
+static struct clk_freq_tbl ftbl_osm_clk_src[] = {
+ F( 200000000, lmh_lite_clk_src, 1.5, 0, 0),
+ F_END
+};
+
+static struct rcg_clk osm_clk_src = {
+ .cmd_rcgr_reg = APCS_COMMON_LMH_CMD_RCGR,
+ .set_rate = set_rate_hid,
+ .freq_tbl = ftbl_osm_clk_src,
+ .current_freq = &rcg_dummy_freq,
+ .base = &virt_base,
+ .c = {
+ .dbg_name = "osm_clk_src",
+ .ops = &clk_ops_rcg,
+ CLK_INIT(osm_clk_src.c),
+ },
+};
+
+static struct clk_osm pwrcl_clk = {
+ .cluster_num = 0,
+ .cpu_reg_mask = 0x3,
+ .c = {
+ .dbg_name = "pwrcl_clk",
+ .ops = &clk_ops_cpu_osm,
+ .parent = &xo_ao.c,
+ CLK_INIT(pwrcl_clk.c),
+ },
+};
+
+static struct clk_osm perfcl_clk = {
+ .cluster_num = 1,
+ .cpu_reg_mask = 0x103,
+ .c = {
+ .dbg_name = "perfcl_clk",
+ .ops = &clk_ops_cpu_osm,
+ .parent = &xo_ao.c,
+ CLK_INIT(perfcl_clk.c),
+ },
+};
+
+static struct clk_lookup cpu_clocks_osm[] = {
+ CLK_LIST(pwrcl_clk),
+ CLK_LIST(perfcl_clk),
+ CLK_LIST(sys_apcsaux_clk_gcc),
+ CLK_LIST(xo_ao),
+ CLK_LIST(osm_clk_src),
+};
+
+static void clk_osm_print_osm_table(struct clk_osm *c)
+{
+ int i;
+ struct osm_entry *table = c->osm_table;
+ u32 pll_src, pll_div, lval;
+
+ pr_debug("Index, Frequency, VC, OLV (mv), PLL Src, PLL Div, L-Val, ACC Level\n");
+ for (i = 0; i < c->num_entries; i++) {
+ pll_src = (table[i].freq_data & GENMASK(27, 26)) >> 26;
+ pll_div = (table[i].freq_data & GENMASK(25, 24)) >> 24;
+ lval = table[i].freq_data & GENMASK(7, 0);
+
+ pr_debug("%3d, %11lu, %2u, %5u, %6u, %8u, %7u, %5u\n",
+ i,
+ table[i].frequency,
+ table[i].virtual_corner,
+ table[i].open_loop_volt,
+ pll_src,
+ pll_div,
+ lval,
+ table[i].spare_data);
+ }
+ pr_debug("APM crossover corner: %d\n",
+ c->apm_crossover_vc);
+}
+
+static int clk_osm_get_lut(struct platform_device *pdev,
+ struct clk_osm *c, char *prop_name)
+{
+ struct clk *clk = &c->c;
+ struct device_node *of = pdev->dev.of_node;
+ int prop_len, total_elems, num_rows, i, j, k;
+ int rc = 0;
+ u32 *array;
+ u32 data;
+ bool last_entry = false;
+
+ if (!of_find_property(of, prop_name, &prop_len)) {
+ dev_err(&pdev->dev, "missing %s\n", prop_name);
+ return -EINVAL;
+ }
+
+ total_elems = prop_len / sizeof(u32);
+ if (total_elems % NUM_FIELDS) {
+ dev_err(&pdev->dev, "bad length %d\n", prop_len);
+ return -EINVAL;
+ }
+
+ num_rows = total_elems / NUM_FIELDS;
+
+ clk->fmax = devm_kzalloc(&pdev->dev, num_rows * sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!clk->fmax)
+ return -ENOMEM;
+
+ array = devm_kzalloc(&pdev->dev, prop_len, GFP_KERNEL);
+ if (!array)
+ return -ENOMEM;
+
+ rc = of_property_read_u32_array(of, prop_name, array, total_elems);
+ if (rc) {
+ dev_err(&pdev->dev, "Unable to parse OSM table, rc=%d\n", rc);
+ goto exit;
+ }
+
+ pr_debug("%s: Entries in Table: %d\n", __func__, num_rows);
+ c->num_entries = num_rows;
+ if (c->num_entries > OSM_TABLE_SIZE) {
+ pr_err("LUT entries %d exceed maximum size %d\n",
+ c->num_entries, OSM_TABLE_SIZE);
+ return -EINVAL;
+ }
+
+ for (i = 0, j = 0, k = 0; j < OSM_TABLE_SIZE; j++) {
+ c->osm_table[j].frequency = array[i + FREQ];
+ c->osm_table[j].freq_data = array[i + FREQ_DATA];
+ c->osm_table[j].override_data = array[i + PLL_OVERRIDES];
+ c->osm_table[j].spare_data = array[i + SPARE_DATA];
+ pr_debug("index=%d freq=%ld freq_data=0x%x override_data=0x%x spare_data=0x%x\n",
+ j, c->osm_table[j].frequency,
+ c->osm_table[j].freq_data,
+ c->osm_table[j].override_data,
+ c->osm_table[j].spare_data);
+
+ data = (array[i + FREQ_DATA] & GENMASK(18, 16)) >> 16;
+ if (!last_entry && data == MAX_CONFIG) {
+ clk->fmax[k] = array[i];
+ k++;
+ }
+
+ if (i < total_elems - NUM_FIELDS)
+ i += NUM_FIELDS;
+ else
+ last_entry = true;
+ }
+ clk->num_fmax = k;
+exit:
+ devm_kfree(&pdev->dev, array);
+ return rc;
+}
+
+static int clk_osm_parse_dt_configs(struct platform_device *pdev)
+{
+ struct device_node *of = pdev->dev.of_node;
+ u32 *array;
+ int i, rc = 0;
+
+ array = devm_kzalloc(&pdev->dev, MAX_CLUSTER_CNT * sizeof(u32),
+ GFP_KERNEL);
+ if (!array)
+ return -ENOMEM;
+
+ rc = of_property_read_u32_array(of, "qcom,l-val-base",
+ array, MAX_CLUSTER_CNT);
+ if (rc) {
+ dev_err(&pdev->dev, "unable to find qcom,l-val-base property, rc=%d\n",
+ rc);
+ return -EINVAL;
+ }
+
+ pwrcl_clk.l_val_base = array[pwrcl_clk.cluster_num];
+ perfcl_clk.l_val_base = array[perfcl_clk.cluster_num];
+
+ rc = of_property_read_u32_array(of, "qcom,apcs-itm-present",
+ array, MAX_CLUSTER_CNT);
+ if (rc) {
+ dev_err(&pdev->dev, "unable to find qcom,apcs-itm-present property, rc=%d\n",
+ rc);
+ return -EINVAL;
+ }
+
+ pwrcl_clk.apcs_itm_present = array[pwrcl_clk.cluster_num];
+ perfcl_clk.apcs_itm_present = array[perfcl_clk.cluster_num];
+
+ rc = of_property_read_u32_array(of, "qcom,apcs-cfg-rcgr",
+ array, MAX_CLUSTER_CNT);
+ if (rc) {
+ dev_err(&pdev->dev, "unable to find qcom,apcs-cfg-rcgr property, rc=%d\n",
+ rc);
+ return -EINVAL;
+ }
+
+ pwrcl_clk.apcs_cfg_rcgr = array[pwrcl_clk.cluster_num];
+ perfcl_clk.apcs_cfg_rcgr = array[perfcl_clk.cluster_num];
+
+ rc = of_property_read_u32_array(of, "qcom,apcs-cmd-rcgr",
+ array, MAX_CLUSTER_CNT);
+ if (rc) {
+ dev_err(&pdev->dev, "unable to find qcom,apcs-cmd-rcgr property, rc=%d\n",
+ rc);
+ return -EINVAL;
+ }
+
+ pwrcl_clk.apcs_cmd_rcgr = array[pwrcl_clk.cluster_num];
+ perfcl_clk.apcs_cmd_rcgr = array[perfcl_clk.cluster_num];
+
+ rc = of_property_read_u32_array(of, "qcom,apcs-pll-user-ctl",
+ array, MAX_CLUSTER_CNT);
+ if (rc) {
+ dev_err(&pdev->dev, "unable to find qcom,apcs-pll-user-ctl property, rc=%d\n",
+ rc);
+ return -EINVAL;
+ }
+
+ pwrcl_clk.apcs_pll_user_ctl = array[pwrcl_clk.cluster_num];
+ perfcl_clk.apcs_pll_user_ctl = array[perfcl_clk.cluster_num];
+
+ rc = of_property_read_u32_array(of, "qcom,apm-mode-ctl",
+ array, MAX_CLUSTER_CNT);
+ if (rc) {
+ dev_err(&pdev->dev, "unable to find qcom,apm-mode-ctl property, rc=%d\n",
+ rc);
+ return -EINVAL;
+ }
+
+ pwrcl_clk.apm_mode_ctl = array[pwrcl_clk.cluster_num];
+ perfcl_clk.apm_mode_ctl = array[perfcl_clk.cluster_num];
+
+ rc = of_property_read_u32_array(of, "qcom,apm-ctrl-status",
+ array, MAX_CLUSTER_CNT);
+ if (rc) {
+ dev_err(&pdev->dev, "unable to find qcom,apm-ctrl-status property, rc=%d\n",
+ rc);
+ return -EINVAL;
+ }
+
+ pwrcl_clk.apm_ctrl_status = array[pwrcl_clk.cluster_num];
+ perfcl_clk.apm_ctrl_status = array[perfcl_clk.cluster_num];
+
+ for (i = 0; i < LLM_SW_OVERRIDE_CNT; i++)
+ of_property_read_u32_index(of, "qcom,llm-sw-overr",
+ pwrcl_clk.cluster_num *
+ LLM_SW_OVERRIDE_CNT + i,
+ &pwrcl_clk.llm_sw_overr[i]);
+
+ for (i = 0; i < LLM_SW_OVERRIDE_CNT; i++)
+ of_property_read_u32_index(of, "qcom,llm-sw-overr",
+ perfcl_clk.cluster_num *
+ LLM_SW_OVERRIDE_CNT + i,
+ &perfcl_clk.llm_sw_overr[i]);
+
+ rc = of_property_read_u32(of, "qcom,xo-clk-rate",
+ &pwrcl_clk.xo_clk_rate);
+ if (rc) {
+ dev_err(&pdev->dev, "unable to find qcom,xo-clk-rate property, rc=%d\n",
+ rc);
+ return -EINVAL;
+ }
+
+ perfcl_clk.xo_clk_rate = pwrcl_clk.xo_clk_rate;
+
+ rc = of_property_read_u32(of, "qcom,osm-clk-rate",
+ &pwrcl_clk.osm_clk_rate);
+ if (rc) {
+ dev_err(&pdev->dev, "unable to find qcom,osm-clk-rate property, rc=%d\n",
+ rc);
+ return -EINVAL;
+ }
+ perfcl_clk.osm_clk_rate = pwrcl_clk.osm_clk_rate;
+
+ rc = of_property_read_u32(of, "qcom,cc-reads",
+ &pwrcl_clk.cycle_counter_reads);
+ if (rc) {
+ dev_err(&pdev->dev, "unable to find qcom,cc-reads property, rc=%d\n",
+ rc);
+ return -EINVAL;
+ }
+ perfcl_clk.cycle_counter_reads = pwrcl_clk.cycle_counter_reads;
+
+ rc = of_property_read_u32(of, "qcom,cc-delay",
+ &pwrcl_clk.cycle_counter_delay);
+ if (rc)
+ dev_dbg(&pdev->dev, "no delays between cycle counter reads\n");
+ else
+ perfcl_clk.cycle_counter_delay = pwrcl_clk.cycle_counter_delay;
+
+ rc = of_property_read_u32(of, "qcom,cc-factor",
+ &pwrcl_clk.cycle_counter_factor);
+ if (rc)
+ dev_dbg(&pdev->dev, "no factor specified for cycle counter estimation\n");
+ else
+ perfcl_clk.cycle_counter_factor =
+ pwrcl_clk.cycle_counter_factor;
+
+ perfcl_clk.red_fsm_en = pwrcl_clk.red_fsm_en =
+ of_property_read_bool(of, "qcom,red-fsm-en");
+
+ perfcl_clk.boost_fsm_en = pwrcl_clk.boost_fsm_en =
+ of_property_read_bool(of, "qcom,boost-fsm-en");
+
+ perfcl_clk.safe_fsm_en = pwrcl_clk.safe_fsm_en =
+ of_property_read_bool(of, "qcom,safe-fsm-en");
+
+ perfcl_clk.ps_fsm_en = pwrcl_clk.ps_fsm_en =
+ of_property_read_bool(of, "qcom,ps-fsm-en");
+
+ perfcl_clk.droop_fsm_en = pwrcl_clk.droop_fsm_en =
+ of_property_read_bool(of, "qcom,droop-fsm-en");
+
+ perfcl_clk.wfx_fsm_en = pwrcl_clk.wfx_fsm_en =
+ of_property_read_bool(of, "qcom,wfx-fsm-en");
+
+ perfcl_clk.pc_fsm_en = pwrcl_clk.pc_fsm_en =
+ of_property_read_bool(of, "qcom,pc-fsm-en");
+
+ devm_kfree(&pdev->dev, array);
+
+ perfcl_clk.secure_init = pwrcl_clk.secure_init =
+ of_property_read_bool(pdev->dev.of_node, "qcom,osm-no-tz");
+
+ if (!pwrcl_clk.secure_init)
+ return rc;
+
+ rc = of_property_read_u32_array(of, "qcom,pwrcl-apcs-mem-acc-cfg",
+ pwrcl_clk.apcs_mem_acc_cfg,
+ MAX_MEM_ACC_VAL_PER_LEVEL);
+ if (rc) {
+ dev_err(&pdev->dev, "unable to find qcom,pwrcl-apcs-mem-acc-cfg property, rc=%d\n",
+ rc);
+ return -EINVAL;
+ }
+
+ of_property_read_u32_array(of, "qcom,perfcl-apcs-mem-acc-cfg",
+ perfcl_clk.apcs_mem_acc_cfg,
+ MAX_MEM_ACC_VAL_PER_LEVEL);
+ if (rc) {
+ dev_err(&pdev->dev, "unable to find qcom,perfcl-apcs-mem-acc-cfg property, rc=%d\n",
+ rc);
+ return -EINVAL;
+ }
+
+ rc = of_property_read_u32_array(of, "qcom,pwrcl-apcs-mem-acc-val",
+ pwrcl_clk.apcs_mem_acc_val,
+ MAX_MEM_ACC_VALUES);
+ if (rc) {
+ dev_err(&pdev->dev, "unable to find qcom,pwrcl-apcs-mem-acc-val property, rc=%d\n",
+ rc);
+ return -EINVAL;
+ }
+
+ rc = of_property_read_u32_array(of, "qcom,perfcl-apcs-mem-acc-val",
+ perfcl_clk.apcs_mem_acc_val,
+ MAX_MEM_ACC_VALUES);
+ if (rc) {
+ dev_err(&pdev->dev, "unable to find qcom,perfcl-apcs-mem-acc-val property, rc=%d\n",
+ rc);
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+static int clk_osm_resources_init(struct platform_device *pdev)
+{
+ struct device_node *node;
+ struct resource *res;
+ struct clk *c;
+ unsigned long pbase;
+ int i, rc = 0;
+ void *vbase;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "osm");
+ if (!res) {
+ dev_err(&pdev->dev,
+ "Unable to get platform resource for osm");
+ return -ENOMEM;
+ }
+
+ pwrcl_clk.pbases[OSM_BASE] = (unsigned long)res->start;
+ pwrcl_clk.vbases[OSM_BASE] = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!pwrcl_clk.vbases[OSM_BASE]) {
+ dev_err(&pdev->dev, "Unable to map in osm base\n");
+ return -ENOMEM;
+ }
+
+ perfcl_clk.pbases[OSM_BASE] = pwrcl_clk.pbases[OSM_BASE] +
+ perfcl_clk.cluster_num * OSM_CORE_TABLE_SIZE;
+ perfcl_clk.vbases[OSM_BASE] = pwrcl_clk.vbases[OSM_BASE] +
+ perfcl_clk.cluster_num * OSM_CORE_TABLE_SIZE;
+
+ for (i = 0; i < MAX_CLUSTER_CNT; i++) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ i == pwrcl_clk.cluster_num ?
+ "pwrcl_pll" : "perfcl_pll");
+ if (!res) {
+ dev_err(&pdev->dev,
+ "Unable to get platform resource\n");
+ return -ENOMEM;
+ }
+ pbase = (unsigned long)res->start;
+ vbase = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+
+ if (!vbase) {
+ dev_err(&pdev->dev, "Unable to map in base\n");
+ return -ENOMEM;
+ }
+
+ if (i == pwrcl_clk.cluster_num) {
+ pwrcl_clk.pbases[PLL_BASE] = pbase;
+ pwrcl_clk.vbases[PLL_BASE] = vbase;
+ } else {
+ perfcl_clk.pbases[PLL_BASE] = pbase;
+ perfcl_clk.vbases[PLL_BASE] = vbase;
+ }
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apcs_common");
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get apcs common base\n");
+ return -EINVAL;
+ }
+
+ virt_base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!virt_base) {
+ dev_err(&pdev->dev, "Failed to map apcs common registers\n");
+ return -ENOMEM;
+ }
+
+ vdd_pwrcl = devm_regulator_get(&pdev->dev, "vdd-pwrcl");
+ if (IS_ERR(vdd_pwrcl)) {
+ rc = PTR_ERR(vdd_pwrcl);
+ if (rc != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Unable to get the pwrcl vreg, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ vdd_perfcl = devm_regulator_get(&pdev->dev, "vdd-perfcl");
+ if (IS_ERR(vdd_perfcl)) {
+ rc = PTR_ERR(vdd_perfcl);
+ if (rc != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Unable to get the perfcl vreg, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ pwrcl_clk.vdd_reg = vdd_pwrcl;
+ perfcl_clk.vdd_reg = vdd_perfcl;
+
+ node = of_parse_phandle(pdev->dev.of_node, "vdd-pwrcl-supply", 0);
+ if (!node) {
+ pr_err("Unable to find vdd-pwrcl-supply\n");
+ return -EINVAL;
+ }
+
+ pwrcl_clk.vdd_dev = of_find_device_by_node(node->parent->parent);
+ if (!pwrcl_clk.vdd_dev) {
+ pr_err("Unable to find device for vdd-pwrcl-supply node\n");
+ return -EINVAL;
+ }
+
+ node = of_parse_phandle(pdev->dev.of_node,
+ "vdd-perfcl-supply", 0);
+ if (!node) {
+ pr_err("Unable to find vdd-perfcl-supply\n");
+ return -EINVAL;
+ }
+
+ perfcl_clk.vdd_dev = of_find_device_by_node(node->parent->parent);
+ if (!perfcl_clk.vdd_dev) {
+ pr_err("Unable to find device for vdd-perfcl-supply\n");
+ return -EINVAL;
+ }
+
+ c = devm_clk_get(&pdev->dev, "aux_clk");
+ if (IS_ERR(c)) {
+ rc = PTR_ERR(c);
+ if (rc != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Unable to get aux_clk, rc=%d\n",
+ rc);
+ return rc;
+ }
+ sys_apcsaux_clk_gcc.c.parent = c;
+
+ c = devm_clk_get(&pdev->dev, "xo_ao");
+ if (IS_ERR(c)) {
+ rc = PTR_ERR(c);
+ if (rc != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Unable to get xo_ao clk, rc=%d\n",
+ rc);
+ return rc;
+ }
+ xo_ao.c.parent = c;
+
+ return 0;
+}
+
+static void clk_osm_setup_cluster_pll(struct clk_osm *c)
+{
+ writel_relaxed(0x0, c->vbases[PLL_BASE] + PLL_MODE);
+ writel_relaxed(0x20, c->vbases[PLL_BASE] + PLL_L_VAL);
+ writel_relaxed(0x01000008, c->vbases[PLL_BASE] +
+ PLL_USER_CTRL);
+ writel_relaxed(0x20004AA8, c->vbases[PLL_BASE] +
+ PLL_CONFIG_CTL_LO);
+ writel_relaxed(0x2, c->vbases[PLL_BASE] +
+ PLL_MODE);
+
+ /* Ensure writes complete before delaying */
+ mb();
+
+ udelay(PLL_WAIT_LOCK_TIME_US);
+
+ writel_relaxed(0x6, c->vbases[PLL_BASE] + PLL_MODE);
+
+ /* Ensure write completes before delaying */
+ mb();
+
+ usleep_range(50, 75);
+
+ writel_relaxed(0x7, c->vbases[PLL_BASE] + PLL_MODE);
+}
+
+static int clk_osm_setup_hw_table(struct clk_osm *c)
+{
+ struct osm_entry *entry = c->osm_table;
+ int i;
+ u32 freq_val, volt_val, override_val, spare_val;
+ u32 table_entry_offset, last_spare, last_virtual_corner = 0;
+
+ for (i = 0; i < OSM_TABLE_SIZE; i++) {
+ if (i < c->num_entries) {
+ freq_val = entry[i].freq_data;
+ volt_val = BVAL(21, 16, entry[i].virtual_corner)
+ | BVAL(11, 0, entry[i].open_loop_volt);
+ override_val = entry[i].override_data;
+ spare_val = entry[i].spare_data;
+
+ if (last_virtual_corner && last_virtual_corner ==
+ entry[i].virtual_corner && last_spare !=
+ entry[i].spare_data) {
+ pr_err("invalid LUT entry at row=%d virtual_corner=%d, spare_data=%d\n",
+ i, entry[i].virtual_corner,
+ entry[i].spare_data);
+ return -EINVAL;
+ }
+ last_virtual_corner = entry[i].virtual_corner;
+ last_spare = entry[i].spare_data;
+ }
+
+ table_entry_offset = i * OSM_REG_SIZE;
+ clk_osm_write_reg(c, i, INDEX_REG + table_entry_offset);
+ clk_osm_write_reg(c, freq_val, FREQ_REG + table_entry_offset);
+ clk_osm_write_reg(c, volt_val, VOLT_REG + table_entry_offset);
+ clk_osm_write_reg(c, override_val, OVERRIDE_REG +
+ table_entry_offset);
+ clk_osm_write_reg(c, spare_val, SPARE_REG +
+ table_entry_offset);
+ }
+
+ /* Make sure all writes go through */
+ mb();
+
+ return 0;
+}
+
+static int clk_osm_resolve_open_loop_voltages(struct clk_osm *c)
+{
+ struct regulator *regulator = c->vdd_reg;
+ struct dev_pm_opp *opp;
+ unsigned long freq;
+ u32 vc, mv, data;
+ int i, rc = 0;
+
+ /*
+ * Determine frequency -> virtual corner -> open-loop voltage
+ * mapping from the OPP table.
+ */
+ for (i = 0; i < OSM_TABLE_SIZE; i++) {
+ freq = c->osm_table[i].frequency;
+ /*
+ * Only frequencies that are supported across all configurations
+ * are present in the OPP table associated with the regulator
+ * device.
+ */
+ data = (c->osm_table[i].freq_data & GENMASK(18, 16)) >> 16;
+ if (data != MAX_CONFIG) {
+ if (i < 1) {
+ pr_err("Invalid LUT entry at index 0\n");
+ return -EINVAL;
+ }
+ c->osm_table[i].open_loop_volt =
+ c->osm_table[i-1].open_loop_volt;
+ c->osm_table[i].virtual_corner =
+ c->osm_table[i-1].virtual_corner;
+ continue;
+ }
+
+ rcu_read_lock();
+ opp = dev_pm_opp_find_freq_exact(&c->vdd_dev->dev, freq, true);
+ if (IS_ERR(opp)) {
+ rc = PTR_ERR(opp);
+ if (rc == -ERANGE)
+ pr_err("Frequency %lu not found\n", freq);
+ goto exit;
+ }
+
+ vc = dev_pm_opp_get_voltage(opp);
+ if (!vc) {
+ pr_err("No virtual corner found for frequency %lu\n",
+ freq);
+ rc = -ERANGE;
+ goto exit;
+ }
+
+ rcu_read_unlock();
+
+ /* Voltage is in uv. Convert to mv */
+ mv = regulator_list_corner_voltage(regulator, vc) / 1000;
+
+ /* CPR virtual corners are zero-based numbered */
+ vc--;
+ c->osm_table[i].open_loop_volt = mv;
+ c->osm_table[i].virtual_corner = vc;
+ }
+
+ return 0;
+exit:
+ rcu_read_unlock();
+ return rc;
+}
+
+static int clk_osm_resolve_crossover_corners(struct clk_osm *c,
+ struct platform_device *pdev)
+{
+ struct dev_pm_opp *opp;
+ unsigned long freq = 0;
+ int vc, rc = 0;
+
+ rcu_read_lock();
+ opp = dev_pm_opp_find_freq_exact(&c->vdd_dev->dev, freq, true);
+ if (IS_ERR(opp)) {
+ rc = PTR_ERR(opp);
+ if (rc == -ERANGE)
+ pr_debug("APM placeholder frequency entry not found\n");
+ goto exit;
+ }
+ vc = dev_pm_opp_get_voltage(opp);
+ if (!vc) {
+ pr_debug("APM crossover corner not found\n");
+ rc = -ERANGE;
+ goto exit;
+ }
+ rcu_read_unlock();
+ vc--;
+ c->apm_crossover_vc = vc;
+
+ return 0;
+exit:
+ rcu_read_unlock();
+ return rc;
+}
+
+static int clk_osm_set_cc_policy(struct platform_device *pdev)
+{
+ int rc = 0, val;
+ u32 *array;
+ struct device_node *of = pdev->dev.of_node;
+
+ array = devm_kzalloc(&pdev->dev, MAX_CLUSTER_CNT * sizeof(u32),
+ GFP_KERNEL);
+ if (!array)
+ return -ENOMEM;
+
+ rc = of_property_read_u32_array(of, "qcom,up-timer", array,
+ MAX_CLUSTER_CNT);
+ if (rc) {
+ dev_dbg(&pdev->dev, "No up timer value, rc=%d\n",
+ rc);
+ } else {
+ val = clk_osm_read_reg(&pwrcl_clk, SPM_CC_HYSTERESIS)
+ | BVAL(31, 16, clk_osm_count_ns(&pwrcl_clk,
+ array[pwrcl_clk.cluster_num]));
+ clk_osm_write_reg(&pwrcl_clk, val, SPM_CC_HYSTERESIS);
+ val = clk_osm_read_reg(&perfcl_clk, SPM_CC_HYSTERESIS)
+ | BVAL(31, 16, clk_osm_count_ns(&perfcl_clk,
+ array[perfcl_clk.cluster_num]));
+ clk_osm_write_reg(&perfcl_clk, val, SPM_CC_HYSTERESIS);
+ }
+
+ rc = of_property_read_u32_array(of, "qcom,down-timer",
+ array, MAX_CLUSTER_CNT);
+ if (rc) {
+ dev_dbg(&pdev->dev, "No down timer value, rc=%d\n", rc);
+ } else {
+ val = clk_osm_read_reg(&pwrcl_clk, SPM_CC_HYSTERESIS)
+ | BVAL(15, 0, clk_osm_count_ns(&pwrcl_clk,
+ array[pwrcl_clk.cluster_num]));
+ clk_osm_write_reg(&pwrcl_clk, val, SPM_CC_HYSTERESIS);
+ val = clk_osm_read_reg(&perfcl_clk, SPM_CC_HYSTERESIS)
+ | BVAL(15, 0, clk_osm_count_ns(&perfcl_clk,
+ array[perfcl_clk.cluster_num]));
+ clk_osm_write_reg(&perfcl_clk, val, SPM_CC_HYSTERESIS);
+ }
+
+ /* OSM index override for cluster PC */
+ rc = of_property_read_u32_array(of, "qcom,pc-override-index",
+ array, MAX_CLUSTER_CNT);
+ if (rc) {
+ dev_dbg(&pdev->dev, "No PC override index value, rc=%d\n",
+ rc);
+ clk_osm_write_reg(&pwrcl_clk, 0, CC_ZERO_BEHAV_CTRL);
+ clk_osm_write_reg(&perfcl_clk, 0, CC_ZERO_BEHAV_CTRL);
+ } else {
+ val = BVAL(6, 1, array[pwrcl_clk.cluster_num])
+ | ENABLE_OVERRIDE;
+ clk_osm_write_reg(&pwrcl_clk, val, CC_ZERO_BEHAV_CTRL);
+ val = BVAL(6, 1, array[perfcl_clk.cluster_num])
+ | ENABLE_OVERRIDE;
+ clk_osm_write_reg(&perfcl_clk, val, CC_ZERO_BEHAV_CTRL);
+ }
+
+ /* Wait for the writes to complete */
+ mb();
+
+ rc = of_property_read_bool(pdev->dev.of_node, "qcom,set-ret-inactive");
+ if (rc) {
+ dev_dbg(&pdev->dev, "Treat cores in retention as active\n");
+ val = 0;
+ } else {
+ dev_dbg(&pdev->dev, "Treat cores in retention as inactive\n");
+ val = 1;
+ }
+
+ clk_osm_write_reg(&pwrcl_clk, val, SPM_CORE_RET_MAPPING);
+ clk_osm_write_reg(&perfcl_clk, val, SPM_CORE_RET_MAPPING);
+
+ rc = of_property_read_bool(pdev->dev.of_node, "qcom,disable-cc-dvcs");
+ if (rc) {
+ dev_dbg(&pdev->dev, "Disabling CC based DCVS\n");
+ val = 1;
+ } else
+ val = 0;
+
+ clk_osm_write_reg(&pwrcl_clk, val, SPM_CC_DCVS_DISABLE);
+ clk_osm_write_reg(&perfcl_clk, val, SPM_CC_DCVS_DISABLE);
+
+ /* Wait for the writes to complete */
+ mb();
+
+ devm_kfree(&pdev->dev, array);
+ return 0;
+}
+
+static void clk_osm_setup_itm_to_osm_handoff(void)
+{
+ /* Program address of ITM_PRESENT of CPUSS */
+ clk_osm_write_reg(&pwrcl_clk, pwrcl_clk.apcs_itm_present,
+ SEQ_REG(37));
+ clk_osm_write_reg(&pwrcl_clk, 0, SEQ_REG(38));
+ clk_osm_write_reg(&perfcl_clk, perfcl_clk.apcs_itm_present,
+ SEQ_REG(37));
+ clk_osm_write_reg(&perfcl_clk, 0, SEQ_REG(38));
+
+ /*
+ * Program data to write to ITM_PRESENT assuming ITM for other domain
+ * is enabled and the ITM for this domain is to be disabled.
+ */
+ clk_osm_write_reg(&pwrcl_clk, ITM_CL0_DISABLE_CL1_ENABLED,
+ SEQ_REG(39));
+ clk_osm_write_reg(&perfcl_clk, ITM_CL0_ENABLED_CL1_DISABLE,
+ SEQ_REG(39));
+}
+
+static int clk_osm_set_llm_freq_policy(struct platform_device *pdev)
+{
+ struct device_node *of = pdev->dev.of_node;
+ u32 *array;
+ int rc = 0, val, regval;
+
+ array = devm_kzalloc(&pdev->dev, MAX_CLUSTER_CNT * sizeof(u32),
+ GFP_KERNEL);
+ if (!array)
+ return -ENOMEM;
+
+ /*
+ * Setup Timer to control how long OSM should wait before performing
+ * DCVS when a LLM up frequency request is received.
+ * Time is specified in us.
+ */
+ rc = of_property_read_u32_array(of, "qcom,llm-freq-up-timer", array,
+ MAX_CLUSTER_CNT);
+ if (rc) {
+ dev_dbg(&pdev->dev, "Unable to get CC up timer value: %d\n",
+ rc);
+ } else {
+ val = clk_osm_read_reg(&pwrcl_clk, LLM_FREQ_VOTE_HYSTERESIS)
+ | BVAL(31, 16, clk_osm_count_ns(&pwrcl_clk,
+ array[pwrcl_clk.cluster_num]));
+ clk_osm_write_reg(&pwrcl_clk, val, LLM_FREQ_VOTE_HYSTERESIS);
+ val = clk_osm_read_reg(&perfcl_clk, LLM_FREQ_VOTE_HYSTERESIS)
+ | BVAL(31, 16, clk_osm_count_ns(&perfcl_clk,
+ array[perfcl_clk.cluster_num]));
+ clk_osm_write_reg(&perfcl_clk, val, LLM_FREQ_VOTE_HYSTERESIS);
+ }
+
+ /*
+ * Setup Timer to control how long OSM should wait before performing
+ * DCVS when a LLM down frequency request is received.
+ * Time is specified in us.
+ */
+ rc = of_property_read_u32_array(of, "qcom,llm-freq-down-timer",
+ array, MAX_CLUSTER_CNT);
+ if (rc) {
+ dev_dbg(&pdev->dev, "No LLM Frequency down timer value: %d\n",
+ rc);
+ } else {
+ val = clk_osm_read_reg(&pwrcl_clk, LLM_FREQ_VOTE_HYSTERESIS)
+ | BVAL(15, 0, clk_osm_count_ns(&pwrcl_clk,
+ array[pwrcl_clk.cluster_num]));
+ clk_osm_write_reg(&pwrcl_clk, val, LLM_FREQ_VOTE_HYSTERESIS);
+ val = clk_osm_read_reg(&perfcl_clk, LLM_FREQ_VOTE_HYSTERESIS)
+ | BVAL(15, 0, clk_osm_count_ns(&perfcl_clk,
+ array[perfcl_clk.cluster_num]));
+ clk_osm_write_reg(&perfcl_clk, val, LLM_FREQ_VOTE_HYSTERESIS);
+ }
+
+ /* Enable or disable honoring of LLM frequency requests */
+ rc = of_property_read_bool(pdev->dev.of_node,
+ "qcom,enable-llm-freq-vote");
+ if (rc) {
+ dev_dbg(&pdev->dev, "Honoring LLM Frequency requests\n");
+ val = 0;
+ } else
+ val = 1;
+
+ /* Enable or disable LLM FREQ DVCS */
+ regval = val | clk_osm_read_reg(&pwrcl_clk, LLM_INTF_DCVS_DISABLE);
+ clk_osm_write_reg(&pwrcl_clk, regval, LLM_INTF_DCVS_DISABLE);
+ regval = val | clk_osm_read_reg(&perfcl_clk, LLM_INTF_DCVS_DISABLE);
+ clk_osm_write_reg(&perfcl_clk, regval, LLM_INTF_DCVS_DISABLE);
+
+ /* Wait for the write to complete */
+ mb();
+
+ devm_kfree(&pdev->dev, array);
+ return 0;
+}
+
+static int clk_osm_set_llm_volt_policy(struct platform_device *pdev)
+{
+ struct device_node *of = pdev->dev.of_node;
+ u32 *array;
+ int rc = 0, val, regval;
+
+ array = devm_kzalloc(&pdev->dev, MAX_CLUSTER_CNT * sizeof(u32),
+ GFP_KERNEL);
+ if (!array)
+ return -ENOMEM;
+
+ /*
+ * Setup Timer to control how long OSM should wait before performing
+ * DCVS when a LLM up voltage request is received.
+ * Time is specified in us.
+ */
+ rc = of_property_read_u32_array(of, "qcom,llm-volt-up-timer",
+ array, MAX_CLUSTER_CNT);
+ if (rc) {
+ dev_dbg(&pdev->dev, "No LLM voltage up timer value, rc=%d\n",
+ rc);
+ } else {
+ val = clk_osm_read_reg(&pwrcl_clk, LLM_VOLT_VOTE_HYSTERESIS)
+ | BVAL(31, 16, clk_osm_count_ns(&pwrcl_clk,
+ array[pwrcl_clk.cluster_num]));
+ clk_osm_write_reg(&pwrcl_clk, val, LLM_VOLT_VOTE_HYSTERESIS);
+ val = clk_osm_read_reg(&perfcl_clk, LLM_VOLT_VOTE_HYSTERESIS)
+ | BVAL(31, 16, clk_osm_count_ns(&perfcl_clk,
+ array[perfcl_clk.cluster_num]));
+ clk_osm_write_reg(&perfcl_clk, val, LLM_VOLT_VOTE_HYSTERESIS);
+ }
+
+ /*
+ * Setup Timer to control how long OSM should wait before performing
+ * DCVS when a LLM down voltage request is received.
+ * Time is specified in us.
+ */
+ rc = of_property_read_u32_array(of, "qcom,llm-volt-down-timer",
+ array, MAX_CLUSTER_CNT);
+ if (rc) {
+ dev_dbg(&pdev->dev, "No LLM Voltage down timer value: %d\n",
+ rc);
+ } else {
+ val = clk_osm_read_reg(&pwrcl_clk, LLM_VOLT_VOTE_HYSTERESIS)
+ | BVAL(15, 0, clk_osm_count_ns(&pwrcl_clk,
+ array[pwrcl_clk.cluster_num]));
+ clk_osm_write_reg(&pwrcl_clk, val, LLM_VOLT_VOTE_HYSTERESIS);
+ val = clk_osm_read_reg(&perfcl_clk, LLM_VOLT_VOTE_HYSTERESIS)
+ | BVAL(15, 0, clk_osm_count_ns(&perfcl_clk,
+ array[perfcl_clk.cluster_num]));
+ clk_osm_write_reg(&perfcl_clk, val, LLM_VOLT_VOTE_HYSTERESIS);
+ }
+
+ /* Enable or disable honoring of LLM Voltage requests */
+ rc = of_property_read_bool(pdev->dev.of_node,
+ "qcom,enable-llm-volt-vote");
+ if (rc) {
+ dev_dbg(&pdev->dev, "Honoring LLM Voltage requests\n");
+ val = 0;
+ } else
+ val = BIT(1);
+
+ /* Enable or disable LLM VOLT DVCS */
+ regval = val | clk_osm_read_reg(&pwrcl_clk, LLM_INTF_DCVS_DISABLE);
+ clk_osm_write_reg(&pwrcl_clk, val, LLM_INTF_DCVS_DISABLE);
+ regval = val | clk_osm_read_reg(&perfcl_clk, LLM_INTF_DCVS_DISABLE);
+ clk_osm_write_reg(&perfcl_clk, val, LLM_INTF_DCVS_DISABLE);
+
+ /* Wait for the writes to complete */
+ mb();
+
+ devm_kfree(&pdev->dev, array);
+ return 0;
+}
+
+static void clk_osm_program_apm_regs(struct clk_osm *c)
+{
+ /*
+ * Program address of the control register used to configure
+ * the Array Power Mux controller
+ */
+ clk_osm_write_reg(c, c->apm_mode_ctl, SEQ_REG(2));
+
+ /* Program mode value to switch APM from VDD_APCC to VDD_MX */
+ clk_osm_write_reg(c, APM_MX_MODE, SEQ_REG(22));
+
+ /* Program mode value to switch APM from VDD_MX to VDD_APCC */
+ clk_osm_write_reg(c, APM_APC_MODE, SEQ_REG(25));
+
+ /* Program address of controller status register */
+ clk_osm_write_reg(c, c->apm_ctrl_status, SEQ_REG(3));
+
+ /* Program mask used to determine status of APM power supply switch */
+ clk_osm_write_reg(c, APM_MODE_SWITCH_MASK, SEQ_REG(24));
+
+ /* Program value used to determine current APM power supply is VDD_MX */
+ clk_osm_write_reg(c, APM_MX_MODE_VAL, SEQ_REG(23));
+
+ /*
+ * Program value used to determine current APM power supply
+ * is VDD_APCC
+ */
+ clk_osm_write_reg(c, APM_APC_MODE_VAL, SEQ_REG(26));
+}
+
+static void clk_osm_program_mem_acc_regs(struct clk_osm *c)
+{
+ int i;
+
+ if (!c->secure_init)
+ return;
+
+ clk_osm_write_reg(c, c->pbases[OSM_BASE] + SEQ_REG(50),
+ SEQ_REG(49));
+ clk_osm_write_reg(c, MEM_ACC_SEQ_CONST(1), SEQ_REG(50));
+ clk_osm_write_reg(c, MEM_ACC_SEQ_CONST(1), SEQ_REG(51));
+ clk_osm_write_reg(c, MEM_ACC_SEQ_CONST(2), SEQ_REG(52));
+ clk_osm_write_reg(c, MEM_ACC_SEQ_CONST(3), SEQ_REG(53));
+ clk_osm_write_reg(c, MEM_ACC_SEQ_CONST(4), SEQ_REG(54));
+ clk_osm_write_reg(c, MEM_ACC_INSTR_COMP(0), SEQ_REG(55));
+ clk_osm_write_reg(c, MEM_ACC_INSTR_COMP(1), SEQ_REG(56));
+ clk_osm_write_reg(c, MEM_ACC_INSTR_COMP(2), SEQ_REG(57));
+ clk_osm_write_reg(c, MEM_ACC_INSTR_COMP(3), SEQ_REG(58));
+ clk_osm_write_reg(c, MEM_ACC_READ_MASK, SEQ_REG(59));
+
+ for (i = 0; i < MAX_MEM_ACC_VALUES; i++)
+ clk_osm_write_reg(c, c->apcs_mem_acc_val[i],
+ MEM_ACC_SEQ_REG_VAL_START(i));
+
+ for (i = 0; i < MAX_MEM_ACC_VAL_PER_LEVEL; i++)
+ clk_osm_write_reg(c, c->apcs_mem_acc_cfg[i],
+ MEM_ACC_SEQ_REG_CFG_START(i));
+}
+
+void clk_osm_setup_sequencer(struct clk_osm *c)
+{
+ u32 i;
+
+ pr_debug("Setting up sequencer for cluster=%d\n", c->cluster_num);
+ for (i = 0; i < ARRAY_SIZE(seq_instr); i++) {
+ clk_osm_write_reg(c, seq_instr[i],
+ (long)(SEQ_MEM_ADDR + i * 4));
+ }
+
+ pr_debug("Setting up sequencer branch instructions for cluster=%d\n",
+ c->cluster_num);
+ for (i = 0; i < ARRAY_SIZE(seq_br_instr); i++) {
+ clk_osm_write_reg(c, seq_br_instr[i],
+ (long)(SEQ_CFG_BR_ADDR + i * 4));
+ }
+}
+
+static void clk_osm_setup_cycle_counters(struct clk_osm *c)
+{
+ u32 ratio = c->osm_clk_rate;
+ u32 val = 0;
+
+ /* Enable cycle counter */
+ val |= BIT(0);
+ /* Setup OSM clock to XO ratio */
+ do_div(ratio, c->xo_clk_rate);
+ val |= BVAL(5, 1, ratio - 1) | OSM_CYCLE_COUNTER_USE_XO_EDGE_EN;
+ clk_osm_write_reg(c, val, OSM_CYCLE_COUNTER_CTRL_REG);
+ c->total_cycle_counter = 0;
+ c->prev_cycle_counter = 0;
+ pr_debug("OSM to XO clock ratio: %d\n", ratio);
+}
+
+static void clk_osm_setup_osm_was(struct clk_osm *c)
+{
+ u32 val;
+
+ val = clk_osm_read_reg(c, PDN_FSM_CTRL_REG);
+ val |= IGNORE_PLL_LOCK_MASK;
+
+ if (c->secure_init) {
+ clk_osm_write_reg(c, val, SEQ_REG(47));
+ val &= ~IGNORE_PLL_LOCK_MASK;
+ clk_osm_write_reg(c, val, SEQ_REG(48));
+
+ clk_osm_write_reg(c, c->pbases[OSM_BASE] + SEQ_REG(42),
+ SEQ_REG(40));
+ clk_osm_write_reg(c, c->pbases[OSM_BASE] + SEQ_REG(43),
+ SEQ_REG(41));
+ clk_osm_write_reg(c, 0x1, SEQ_REG(44));
+ clk_osm_write_reg(c, 0x0, SEQ_REG(45));
+ clk_osm_write_reg(c, c->pbases[OSM_BASE] + PDN_FSM_CTRL_REG,
+ SEQ_REG(46));
+ } else {
+ scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(47), val);
+ val &= ~IGNORE_PLL_LOCK_MASK;
+ scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(48), val);
+ }
+
+ if (c->cluster_num == 0) {
+ val = readl_relaxed(c->vbases[PLL_BASE] + PLL_TEST_CTL_HI)
+ | BIT(13);
+ writel_relaxed(val, c->vbases[PLL_BASE] +
+ PLL_TEST_CTL_HI);
+ }
+
+ /* Ensure writes complete before returning */
+ mb();
+}
+
+static void clk_osm_setup_fsms(struct clk_osm *c)
+{
+ u32 val;
+
+ /* Reduction FSM */
+ if (c->red_fsm_en) {
+ val = clk_osm_read_reg(c, VMIN_REDUC_ENABLE_REG) | BIT(0);
+ clk_osm_write_reg(c, val, VMIN_REDUC_ENABLE_REG);
+ clk_osm_write_reg(c, BVAL(15, 0, clk_osm_count_ns(c, 10000)),
+ VMIN_REDUC_TIMER_REG);
+ }
+
+ /* Boost FSM */
+ if (c->boost_fsm_en) {
+ val = clk_osm_read_reg(c, PDN_FSM_CTRL_REG);
+ clk_osm_write_reg(c, val | CC_BOOST_EN_MASK, PDN_FSM_CTRL_REG);
+ val = clk_osm_read_reg(c, CC_BOOST_TIMER_REG0);
+ val |= BVAL(15, 0, clk_osm_count_ns(c, PLL_WAIT_LOCK_TIME_NS));
+ val |= BVAL(31, 16, clk_osm_count_ns(c,
+ SAFE_FREQ_WAIT_NS));
+ clk_osm_write_reg(c, val, CC_BOOST_TIMER_REG0);
+
+ val = clk_osm_read_reg(c, CC_BOOST_TIMER_REG1);
+ val |= BVAL(15, 0, clk_osm_count_ns(c, PLL_WAIT_LOCK_TIME_NS));
+ val |= BVAL(31, 16, clk_osm_count_ns(c, SAFE_FREQ_WAIT_NS));
+ clk_osm_write_reg(c, val, CC_BOOST_TIMER_REG1);
+
+ val = clk_osm_read_reg(c, CC_BOOST_TIMER_REG2);
+ val |= BVAL(15, 0, clk_osm_count_ns(c, PLL_WAIT_LOCK_TIME_NS));
+ clk_osm_write_reg(c, val, CC_BOOST_TIMER_REG2);
+ }
+
+ /* Safe Freq FSM */
+ if (c->safe_fsm_en) {
+ val = clk_osm_read_reg(c, PDN_FSM_CTRL_REG);
+ clk_osm_write_reg(c, val | DCVS_BOOST_EN_MASK,
+ PDN_FSM_CTRL_REG);
+
+ val = clk_osm_read_reg(c, DCVS_BOOST_TIMER_REG0);
+ val |= BVAL(31, 16, clk_osm_count_ns(c, SAFE_FREQ_WAIT_NS));
+ clk_osm_write_reg(c, val, DCVS_BOOST_TIMER_REG0);
+
+ val = clk_osm_read_reg(c, DCVS_BOOST_TIMER_REG1);
+ val |= BVAL(15, 0, clk_osm_count_ns(c, PLL_WAIT_LOCK_TIME_NS));
+ clk_osm_write_reg(c, val, DCVS_BOOST_TIMER_REG1);
+ }
+
+ /* PS FSM */
+ if (c->ps_fsm_en) {
+ val = clk_osm_read_reg(c, PDN_FSM_CTRL_REG);
+ clk_osm_write_reg(c, val | PS_BOOST_EN_MASK, PDN_FSM_CTRL_REG);
+
+ val = clk_osm_read_reg(c, PS_BOOST_TIMER_REG0) |
+ BVAL(31, 16, clk_osm_count_ns(c, 1000));
+ clk_osm_write_reg(c, val, PS_BOOST_TIMER_REG0);
+
+ val = clk_osm_read_reg(c, PS_BOOST_TIMER_REG1) |
+ clk_osm_count_ns(c, 1000);
+ clk_osm_write_reg(c, val, PS_BOOST_TIMER_REG1);
+ }
+
+ /* PLL signal timing control */
+ if (c->boost_fsm_en || c->safe_fsm_en || c->ps_fsm_en)
+ clk_osm_write_reg(c, 0x5, BOOST_PROG_SYNC_DELAY_REG);
+
+ /* Droop FSM */
+ if (c->wfx_fsm_en) {
+ /* WFx FSM */
+ val = clk_osm_read_reg(c, PDN_FSM_CTRL_REG);
+ clk_osm_write_reg(c, val | WFX_DROOP_EN_MASK, PDN_FSM_CTRL_REG);
+
+ val = clk_osm_read_reg(c, DROOP_UNSTALL_TIMER_CTRL_REG) |
+ BVAL(31, 16, clk_osm_count_ns(c, 1000));
+ clk_osm_write_reg(c, val, DROOP_UNSTALL_TIMER_CTRL_REG);
+
+ val = clk_osm_read_reg(c,
+ DROOP_WAIT_TO_RELEASE_TIMER_CTRL0_REG) |
+ BVAL(31, 16, clk_osm_count_ns(c, 1000));
+ clk_osm_write_reg(c, val,
+ DROOP_WAIT_TO_RELEASE_TIMER_CTRL0_REG);
+ }
+
+ /* PC/RET FSM */
+ if (c->pc_fsm_en) {
+ val = clk_osm_read_reg(c, PDN_FSM_CTRL_REG);
+ clk_osm_write_reg(c, val | PC_RET_EXIT_DROOP_EN_MASK,
+ PDN_FSM_CTRL_REG);
+
+ val = clk_osm_read_reg(c, DROOP_UNSTALL_TIMER_CTRL_REG) |
+ BVAL(15, 0, clk_osm_count_ns(c, 5000));
+ clk_osm_write_reg(c, val, DROOP_UNSTALL_TIMER_CTRL_REG);
+ }
+
+ /* DCVS droop FSM - only if RCGwRC is not used for di/dt control */
+ if (c->droop_fsm_en) {
+ val = clk_osm_read_reg(c, PDN_FSM_CTRL_REG);
+ clk_osm_write_reg(c, val | DCVS_DROOP_EN_MASK,
+ PDN_FSM_CTRL_REG);
+ }
+
+ if (c->wfx_fsm_en || c->ps_fsm_en || c->droop_fsm_en) {
+ val = clk_osm_read_reg(c,
+ DROOP_WAIT_TO_RELEASE_TIMER_CTRL0_REG) |
+ BVAL(15, 0, clk_osm_count_ns(c, 1000));
+ clk_osm_write_reg(c, val,
+ DROOP_WAIT_TO_RELEASE_TIMER_CTRL0_REG);
+ clk_osm_write_reg(c, 0x1, DROOP_PROG_SYNC_DELAY_REG);
+ val = clk_osm_read_reg(c, DROOP_CTRL_REG) |
+ BVAL(22, 16, 0x2);
+ clk_osm_write_reg(c, val, DROOP_CTRL_REG);
+ }
+}
+
+static void clk_osm_do_additional_setup(struct clk_osm *c,
+ struct platform_device *pdev)
+{
+ if (!c->secure_init)
+ return;
+
+ dev_info(&pdev->dev, "Performing additional OSM setup due to lack of TZ for cluster=%d\n",
+ c->cluster_num);
+
+ clk_osm_write_reg(c, BVAL(23, 16, 0xF), SPM_CC_CTRL);
+
+ /* PLL LVAL programming */
+ clk_osm_write_reg(c, c->l_val_base, SEQ_REG(0));
+ clk_osm_write_reg(c, PLL_MIN_LVAL, SEQ_REG(21));
+
+ /* PLL post-div programming */
+ clk_osm_write_reg(c, c->apcs_pll_user_ctl, SEQ_REG(18));
+ clk_osm_write_reg(c, PLL_POST_DIV2, SEQ_REG(19));
+ clk_osm_write_reg(c, PLL_POST_DIV1, SEQ_REG(29));
+
+ /* APM Programming */
+ clk_osm_program_apm_regs(c);
+
+ /* MEM-ACC Programming */
+ clk_osm_program_mem_acc_regs(c);
+
+ /* GFMUX Programming */
+ clk_osm_write_reg(c, c->apcs_cfg_rcgr, SEQ_REG(16));
+ clk_osm_write_reg(c, GPLL_SEL, SEQ_REG(17));
+ clk_osm_write_reg(c, PLL_EARLY_SEL, SEQ_REG(20));
+ clk_osm_write_reg(c, PLL_MAIN_SEL, SEQ_REG(32));
+ clk_osm_write_reg(c, c->apcs_cmd_rcgr, SEQ_REG(33));
+ clk_osm_write_reg(c, RCG_UPDATE, SEQ_REG(34));
+ clk_osm_write_reg(c, RCG_UPDATE_SUCCESS, SEQ_REG(35));
+ clk_osm_write_reg(c, RCG_UPDATE, SEQ_REG(36));
+
+ pr_debug("seq_size: %lu, seqbr_size: %lu\n", ARRAY_SIZE(seq_instr),
+ ARRAY_SIZE(seq_br_instr));
+ clk_osm_setup_sequencer(&pwrcl_clk);
+ clk_osm_setup_sequencer(&perfcl_clk);
+}
+
+static void clk_osm_apm_vc_setup(struct clk_osm *c)
+{
+ /*
+ * APM crossover virtual corner at which the switch
+ * from APC to MX and vice-versa should take place.
+ */
+ if (c->secure_init) {
+ clk_osm_write_reg(c, c->apm_crossover_vc, SEQ_REG(1));
+
+ /* Ensure writes complete before returning */
+ mb();
+ } else {
+ scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(1),
+ c->apm_crossover_vc);
+ }
+}
+
+static irqreturn_t clk_osm_debug_irq_cb(int irq, void *data)
+{
+ struct clk_osm *c = data;
+ unsigned long first, second, total_delta = 0;
+ u32 val, factor;
+ int i;
+
+ val = clk_osm_read_reg(c, DCVS_PERF_STATE_DEVIATION_INTR_STAT);
+ if (val & BIT(0)) {
+ pr_info("OS DCVS performance state deviated\n");
+ clk_osm_write_reg(c, BIT(0),
+ DCVS_PERF_STATE_DEVIATION_INTR_CLEAR);
+ }
+
+ val = clk_osm_read_reg(c,
+ DCVS_PERF_STATE_DEVIATION_CORRECTED_INTR_STAT);
+ if (val & BIT(0)) {
+ pr_info("OS DCVS performance state corrected\n");
+ clk_osm_write_reg(c, BIT(0),
+ DCVS_PERF_STATE_DEVIATION_CORRECTED_INTR_CLEAR);
+ }
+
+ val = clk_osm_read_reg(c, DCVS_PERF_STATE_MET_INTR_STAT);
+ if (val & BIT(0)) {
+ pr_info("OS DCVS performance state desired reached\n");
+ clk_osm_write_reg(c, BIT(0), DCVS_PERF_STATE_MET_INTR_CLR);
+ }
+
+ factor = c->cycle_counter_factor ? c->cycle_counter_factor : 1;
+
+ for (i = 0; i < c->cycle_counter_reads; i++) {
+ first = clk_osm_read_reg(c, OSM_CYCLE_COUNTER_STATUS_REG);
+
+ if (c->cycle_counter_delay)
+ udelay(c->cycle_counter_delay);
+
+ second = clk_osm_read_reg(c, OSM_CYCLE_COUNTER_STATUS_REG);
+ total_delta = total_delta + ((second - first) / factor);
+ }
+
+ pr_info("cluster=%d, L_VAL (estimated)=%lu\n",
+ c->cluster_num, total_delta / c->cycle_counter_factor);
+
+ return IRQ_HANDLED;
+}
+
+static int clk_osm_setup_irq(struct platform_device *pdev, struct clk_osm *c,
+ char *irq_name)
+{
+ int rc = 0;
+
+ rc = c->irq = platform_get_irq_byname(pdev, irq_name);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "%s irq not specified\n", irq_name);
+ return rc;
+ }
+
+ rc = devm_request_irq(&pdev->dev, c->irq,
+ clk_osm_debug_irq_cb,
+ IRQF_TRIGGER_RISING | IRQF_SHARED,
+ "OSM IRQ", c);
+ if (rc)
+ dev_err(&pdev->dev, "Request IRQ failed for OSM IRQ\n");
+
+ return rc;
+}
+
+static u32 find_voltage(struct clk_osm *c, unsigned long rate)
+{
+ struct osm_entry *table = c->osm_table;
+ int entries = c->num_entries, i;
+
+ for (i = 0; i < entries; i++) {
+ if (rate == table[i].frequency) {
+ /* OPP table voltages have units of mV */
+ return table[i].open_loop_volt * 1000;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int add_opp(struct clk_osm *c, struct device *dev)
+{
+ unsigned long rate = 0;
+ u32 uv;
+ long rc;
+ int j = 0;
+ unsigned long min_rate = c->c.fmax[0];
+ unsigned long max_rate = c->c.fmax[c->c.num_fmax - 1];
+
+ while (1) {
+ rate = c->c.fmax[j++];
+ uv = find_voltage(c, rate);
+ if (uv <= 0) {
+ pr_warn("No voltage for %lu.\n", rate);
+ return -EINVAL;
+ }
+
+ rc = dev_pm_opp_add(dev, rate, uv);
+ if (rc) {
+ pr_warn("failed to add OPP for %lu\n", rate);
+ return rc;
+ }
+
+ /*
+ * Print the OPP pair for the lowest and highest frequency for
+ * each device that we're populating. This is important since
+ * this information will be used by thermal mitigation and the
+ * scheduler.
+ */
+ if (rate == min_rate)
+ pr_info("Set OPP pair (%lu Hz, %d uv) on %s\n",
+ rate, uv, dev_name(dev));
+
+ if (rate == max_rate && max_rate != min_rate) {
+ pr_info("Set OPP pair (%lu Hz, %d uv) on %s\n",
+ rate, uv, dev_name(dev));
+ break;
+ }
+
+ if (min_rate == max_rate)
+ break;
+ }
+
+ return 0;
+}
+
+static struct clk *logical_cpu_to_clk(int cpu)
+{
+ struct device_node *cpu_node;
+ const u32 *cell;
+ u64 hwid;
+ static struct clk *cpu_clk_map[NR_CPUS];
+
+ if (cpu_clk_map[cpu])
+ return cpu_clk_map[cpu];
+
+ cpu_node = of_get_cpu_node(cpu, NULL);
+ if (!cpu_node)
+ goto fail;
+
+ cell = of_get_property(cpu_node, "reg", NULL);
+ if (!cell) {
+ pr_err("%s: missing reg property\n", cpu_node->full_name);
+ goto fail;
+ }
+
+ hwid = of_read_number(cell, of_n_addr_cells(cpu_node));
+ if ((hwid | pwrcl_clk.cpu_reg_mask) == pwrcl_clk.cpu_reg_mask) {
+ cpu_clk_map[cpu] = &pwrcl_clk.c;
+ return &pwrcl_clk.c;
+ }
+ if ((hwid | perfcl_clk.cpu_reg_mask) == perfcl_clk.cpu_reg_mask) {
+ cpu_clk_map[cpu] = &perfcl_clk.c;
+ return &perfcl_clk.c;
+ }
+
+fail:
+ return NULL;
+}
+
+static u64 clk_osm_get_cpu_cycle_counter(int cpu)
+{
+ struct clk_osm *c;
+ u32 val;
+ unsigned long flags;
+
+ if (logical_cpu_to_clk(cpu) == &pwrcl_clk.c)
+ c = &pwrcl_clk;
+ else if (logical_cpu_to_clk(cpu) == &perfcl_clk.c)
+ c = &perfcl_clk;
+ else {
+ pr_err("no clock device for CPU=%d\n", cpu);
+ return 0;
+ }
+
+ spin_lock_irqsave(&c->lock, flags);
+ val = clk_osm_read_reg_no_log(c, OSM_CYCLE_COUNTER_STATUS_REG);
+
+ if (val < c->prev_cycle_counter) {
+ /* Handle counter overflow */
+ c->total_cycle_counter += UINT_MAX -
+ c->prev_cycle_counter + val;
+ c->prev_cycle_counter = val;
+ } else {
+ c->total_cycle_counter += val - c->prev_cycle_counter;
+ c->prev_cycle_counter = val;
+ }
+ spin_unlock_irqrestore(&c->lock, flags);
+
+ return c->total_cycle_counter;
+}
+
+static void populate_opp_table(struct platform_device *pdev)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ if (logical_cpu_to_clk(cpu) == &pwrcl_clk.c) {
+ WARN(add_opp(&pwrcl_clk, get_cpu_device(cpu)),
+ "Failed to add OPP levels for power cluster\n");
+ }
+ if (logical_cpu_to_clk(cpu) == &perfcl_clk.c) {
+ WARN(add_opp(&perfcl_clk, get_cpu_device(cpu)),
+ "Failed to add OPP levels for perf cluster\n");
+ }
+ }
+}
+
+static int debugfs_get_trace_enable(void *data, u64 *val)
+{
+ struct clk_osm *c = data;
+
+ *val = c->trace_en;
+ return 0;
+}
+
+static int debugfs_set_trace_enable(void *data, u64 val)
+{
+ struct clk_osm *c = data;
+
+ clk_osm_masked_write_reg(c, val ? TRACE_CTRL_ENABLE :
+ TRACE_CTRL_DISABLE,
+ TRACE_CTRL, TRACE_CTRL_EN_MASK);
+ c->trace_en = val ? true : false;
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(debugfs_trace_enable_fops,
+ debugfs_get_trace_enable,
+ debugfs_set_trace_enable,
+ "%llu\n");
+
+#define MAX_DEBUG_BUF_LEN 15
+
+static DEFINE_MUTEX(debug_buf_mutex);
+static char debug_buf[MAX_DEBUG_BUF_LEN];
+
+static ssize_t debugfs_trace_method_set(struct file *file,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct clk_osm *c = file->private_data;
+ u32 val;
+
+ if (IS_ERR(file) || file == NULL) {
+ pr_err("input error %ld\n", PTR_ERR(file));
+ return -EINVAL;
+ }
+
+ if (!c) {
+ pr_err("invalid clk_osm handle\n");
+ return -EINVAL;
+ }
+
+ if (count < MAX_DEBUG_BUF_LEN) {
+ mutex_lock(&debug_buf_mutex);
+
+ if (copy_from_user(debug_buf, (void __user *) buf, count)) {
+ mutex_unlock(&debug_buf_mutex);
+ return -EFAULT;
+ }
+ debug_buf[count] = '\0';
+ mutex_unlock(&debug_buf_mutex);
+
+ /* check that user entered a supported packet type */
+ if (strcmp(debug_buf, "periodic\n") == 0) {
+ clk_osm_write_reg(c, clk_osm_count_ns(c,
+ PERIODIC_TRACE_DEFAULT_NS),
+ PERIODIC_TRACE_TIMER_CTRL);
+ clk_osm_masked_write_reg(c,
+ TRACE_CTRL_PERIODIC_TRACE_ENABLE,
+ TRACE_CTRL, TRACE_CTRL_PERIODIC_TRACE_EN_MASK);
+ c->trace_method = PERIODIC_PACKET;
+ c->trace_periodic_timer = PERIODIC_TRACE_DEFAULT_NS;
+ return count;
+ } else if (strcmp(debug_buf, "xor\n") == 0) {
+ val = clk_osm_read_reg(c, TRACE_CTRL);
+ val &= ~TRACE_CTRL_PERIODIC_TRACE_ENABLE;
+ clk_osm_write_reg(c, val, TRACE_CTRL);
+ c->trace_method = XOR_PACKET;
+ return count;
+ }
+ }
+
+ pr_err("error, supported trace mode types: 'periodic' or 'xor'\n");
+ return -EINVAL;
+}
+
+static ssize_t debugfs_trace_method_get(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct clk_osm *c = file->private_data;
+ int len, rc;
+
+ if (IS_ERR(file) || file == NULL) {
+ pr_err("input error %ld\n", PTR_ERR(file));
+ return -EINVAL;
+ }
+
+ if (!c) {
+ pr_err("invalid clk_osm handle\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&debug_buf_mutex);
+
+ if (c->trace_method == PERIODIC_PACKET)
+ len = snprintf(debug_buf, sizeof(debug_buf), "periodic\n");
+ else if (c->trace_method == XOR_PACKET)
+ len = snprintf(debug_buf, sizeof(debug_buf), "xor\n");
+
+ rc = simple_read_from_buffer((void __user *) buf, len, ppos,
+ (void *) debug_buf, len);
+
+ mutex_unlock(&debug_buf_mutex);
+
+ return rc;
+}
+
+static int debugfs_trace_method_open(struct inode *inode, struct file *file)
+{
+ if (IS_ERR(file) || file == NULL) {
+ pr_err("input error %ld\n", PTR_ERR(file));
+ return -EINVAL;
+ }
+
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static const struct file_operations debugfs_trace_method_fops = {
+ .write = debugfs_trace_method_set,
+ .open = debugfs_trace_method_open,
+ .read = debugfs_trace_method_get,
+};
+
+static int debugfs_get_trace_packet_id(void *data, u64 *val)
+{
+ struct clk_osm *c = data;
+
+ *val = c->trace_id;
+ return 0;
+}
+
+static int debugfs_set_trace_packet_id(void *data, u64 val)
+{
+ struct clk_osm *c = data;
+
+ if (val < TRACE_PACKET0 || val > TRACE_PACKET3) {
+ pr_err("supported trace IDs=%d-%d\n",
+ TRACE_PACKET0, TRACE_PACKET3);
+ return 0;
+ }
+
+ clk_osm_masked_write_reg(c, val << TRACE_CTRL_PACKET_TYPE_SHIFT,
+ TRACE_CTRL, TRACE_CTRL_PACKET_TYPE_MASK);
+ c->trace_id = val;
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(debugfs_trace_packet_id_fops,
+ debugfs_get_trace_packet_id,
+ debugfs_set_trace_packet_id,
+ "%llu\n");
+
+static int debugfs_get_trace_periodic_timer(void *data, u64 *val)
+{
+ struct clk_osm *c = data;
+
+ *val = c->trace_periodic_timer;
+ return 0;
+}
+
+static int debugfs_set_trace_periodic_timer(void *data, u64 val)
+{
+ struct clk_osm *c = data;
+
+ if (val < PERIODIC_TRACE_MIN_NS || val > PERIODIC_TRACE_MAX_NS) {
+ pr_err("supported periodic trace periods=%d-%ld ns\n",
+ PERIODIC_TRACE_MIN_NS, PERIODIC_TRACE_MAX_NS);
+ return 0;
+ }
+
+ clk_osm_write_reg(c, clk_osm_count_ns(c, val),
+ PERIODIC_TRACE_TIMER_CTRL);
+ c->trace_periodic_timer = val;
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(debugfs_trace_periodic_timer_fops,
+ debugfs_get_trace_periodic_timer,
+ debugfs_set_trace_periodic_timer,
+ "%llu\n");
+
+static int debugfs_get_perf_state_met_irq(void *data, u64 *val)
+{
+ struct clk_osm *c = data;
+
+ *val = clk_osm_read_reg(c, DCVS_PERF_STATE_MET_INTR_EN);
+ return 0;
+}
+
+static int debugfs_set_perf_state_met_irq(void *data, u64 val)
+{
+ struct clk_osm *c = data;
+
+ clk_osm_write_reg(c, val ? 1 : 0,
+ DCVS_PERF_STATE_MET_INTR_EN);
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(debugfs_perf_state_met_irq_fops,
+ debugfs_get_perf_state_met_irq,
+ debugfs_set_perf_state_met_irq,
+ "%llu\n");
+
+static int debugfs_get_perf_state_deviation_irq(void *data, u64 *val)
+{
+ struct clk_osm *c = data;
+
+ *val = clk_osm_read_reg(c,
+ DCVS_PERF_STATE_DEVIATION_INTR_EN);
+ return 0;
+}
+
+static int debugfs_set_perf_state_deviation_irq(void *data, u64 val)
+{
+ struct clk_osm *c = data;
+
+ clk_osm_write_reg(c, val ? 1 : 0,
+ DCVS_PERF_STATE_DEVIATION_INTR_EN);
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(debugfs_perf_state_deviation_irq_fops,
+ debugfs_get_perf_state_deviation_irq,
+ debugfs_set_perf_state_deviation_irq,
+ "%llu\n");
+
+static int debugfs_get_perf_state_deviation_corrected_irq(void *data, u64 *val)
+{
+ struct clk_osm *c = data;
+
+ *val = clk_osm_read_reg(c,
+ DCVS_PERF_STATE_DEVIATION_CORRECTED_INTR_EN);
+ return 0;
+}
+
+static int debugfs_set_perf_state_deviation_corrected_irq(void *data, u64 val)
+{
+ struct clk_osm *c = data;
+
+ clk_osm_write_reg(c, val ? 1 : 0,
+ DCVS_PERF_STATE_DEVIATION_CORRECTED_INTR_EN);
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(debugfs_perf_state_deviation_corrected_irq_fops,
+ debugfs_get_perf_state_deviation_corrected_irq,
+ debugfs_set_perf_state_deviation_corrected_irq,
+ "%llu\n");
+
+static void populate_debugfs_dir(struct clk_osm *c)
+{
+ struct dentry *temp;
+
+ if (osm_debugfs_base == NULL) {
+ osm_debugfs_base = debugfs_create_dir("osm", NULL);
+ if (IS_ERR_OR_NULL(osm_debugfs_base)) {
+ pr_err("osm debugfs base directory creation failed\n");
+ osm_debugfs_base = NULL;
+ return;
+ }
+ }
+
+ c->debugfs = debugfs_create_dir(c->c.dbg_name, osm_debugfs_base);
+ if (IS_ERR_OR_NULL(c->debugfs)) {
+ pr_err("osm debugfs directory creation failed\n");
+ return;
+ }
+
+ temp = debugfs_create_file("perf_state_met_irq_enable",
+ S_IRUGO | S_IWUSR,
+ c->debugfs, c,
+ &debugfs_perf_state_met_irq_fops);
+ if (IS_ERR_OR_NULL(temp)) {
+ pr_err("perf_state_met_irq_enable debugfs file creation failed\n");
+ goto exit;
+ }
+
+ temp = debugfs_create_file("perf_state_deviation_irq_enable",
+ S_IRUGO | S_IWUSR,
+ c->debugfs, c,
+ &debugfs_perf_state_deviation_irq_fops);
+ if (IS_ERR_OR_NULL(temp)) {
+ pr_err("perf_state_deviation_irq_enable debugfs file creation failed\n");
+ goto exit;
+ }
+
+ temp = debugfs_create_file("perf_state_deviation_corrected_irq_enable",
+ S_IRUGO | S_IWUSR,
+ c->debugfs, c,
+ &debugfs_perf_state_deviation_corrected_irq_fops);
+ if (IS_ERR_OR_NULL(temp)) {
+ pr_err("debugfs_perf_state_deviation_corrected_irq_fops debugfs file creation failed\n");
+ goto exit;
+ }
+
+ temp = debugfs_create_file("trace_enable",
+ S_IRUGO | S_IWUSR,
+ c->debugfs, c,
+ &debugfs_trace_enable_fops);
+ if (IS_ERR_OR_NULL(temp)) {
+ pr_err("debugfs_trace_enable_fops debugfs file creation failed\n");
+ goto exit;
+ }
+
+ temp = debugfs_create_file("trace_method",
+ S_IRUGO | S_IWUSR,
+ c->debugfs, c,
+ &debugfs_trace_method_fops);
+ if (IS_ERR_OR_NULL(temp)) {
+ pr_err("debugfs_trace_method_fops debugfs file creation failed\n");
+ goto exit;
+ }
+
+ temp = debugfs_create_file("trace_packet_id",
+ S_IRUGO | S_IWUSR,
+ c->debugfs, c,
+ &debugfs_trace_packet_id_fops);
+ if (IS_ERR_OR_NULL(temp)) {
+ pr_err("debugfs_trace_packet_id_fops debugfs file creation failed\n");
+ goto exit;
+ }
+
+ temp = debugfs_create_file("trace_periodic_timer",
+ S_IRUGO | S_IWUSR,
+ c->debugfs, c,
+ &debugfs_trace_periodic_timer_fops);
+ if (IS_ERR_OR_NULL(temp)) {
+ pr_err("debugfs_trace_periodic_timer_fops debugfs file creation failed\n");
+ goto exit;
+ }
+
+exit:
+ if (IS_ERR_OR_NULL(temp))
+ debugfs_remove_recursive(c->debugfs);
+}
+
+static int clk_osm_panic_callback(struct notifier_block *nfb,
+ unsigned long event,
+ void *data)
+{
+ void __iomem *virt_addr;
+ u32 value, reg;
+ struct clk_osm *c = container_of(nfb,
+ struct clk_osm,
+ panic_notifier);
+
+ reg = c->pbases[OSM_BASE] + WDOG_DOMAIN_PSTATE_STATUS;
+ virt_addr = ioremap(reg, 0x4);
+ if (virt_addr != NULL) {
+ value = readl_relaxed(virt_addr);
+ pr_err("DOM%d_PSTATE_STATUS[0x%08x]=0x%08x\n", c->cluster_num,
+ reg, value);
+ iounmap(virt_addr);
+ }
+
+ reg = c->pbases[OSM_BASE] + WDOG_PROGRAM_COUNTER;
+ virt_addr = ioremap(reg, 0x4);
+ if (virt_addr != NULL) {
+ value = readl_relaxed(virt_addr);
+ pr_err("DOM%d_PROGRAM_COUNTER[0x%08x]=0x%08x\n", c->cluster_num,
+ reg, value);
+ iounmap(virt_addr);
+ }
+
+ virt_addr = ioremap(c->apm_ctrl_status, 0x4);
+ if (virt_addr != NULL) {
+ value = readl_relaxed(virt_addr);
+ pr_err("APM_CTLER_STATUS_%d[0x%08x]=0x%08x\n", c->cluster_num,
+ c->apm_ctrl_status, value);
+ iounmap(virt_addr);
+ }
+
+ return NOTIFY_OK;
+}
+
+static unsigned long init_rate = 300000000;
+static unsigned long osm_clk_init_rate = 200000000;
+
+static int cpu_clock_osm_driver_probe(struct platform_device *pdev)
+{
+ char perfclspeedbinstr[] = "qcom,perfcl-speedbin0-v0";
+ char pwrclspeedbinstr[] = "qcom,pwrcl-speedbin0-v0";
+ int rc, cpu;
+ struct cpu_cycle_counter_cb cb = {
+ .get_cpu_cycle_counter = clk_osm_get_cpu_cycle_counter,
+ };
+
+ rc = clk_osm_resources_init(pdev);
+ if (rc) {
+ if (rc != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "resources init failed, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = clk_osm_parse_dt_configs(pdev);
+ if (rc) {
+ dev_err(&pdev->dev, "Unable to parse device tree configurations\n");
+ return rc;
+ }
+
+ rc = clk_osm_get_lut(pdev, &pwrcl_clk,
+ pwrclspeedbinstr);
+ if (rc) {
+ dev_err(&pdev->dev, "Unable to get OSM LUT for power cluster, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = clk_osm_get_lut(pdev, &perfcl_clk, perfclspeedbinstr);
+ if (rc) {
+ dev_err(&pdev->dev, "Unable to get OSM LUT for perf cluster, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = clk_osm_resolve_open_loop_voltages(&pwrcl_clk);
+ if (rc) {
+ if (rc == -EPROBE_DEFER)
+ return rc;
+ dev_err(&pdev->dev, "Unable to determine open-loop voltages for power cluster, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = clk_osm_resolve_open_loop_voltages(&perfcl_clk);
+ if (rc) {
+ if (rc == -EPROBE_DEFER)
+ return rc;
+ dev_err(&pdev->dev, "Unable to determine open-loop voltages for perf cluster, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = clk_osm_resolve_crossover_corners(&pwrcl_clk, pdev);
+ if (rc)
+ dev_info(&pdev->dev, "No APM crossover corner programmed\n");
+
+ rc = clk_osm_resolve_crossover_corners(&perfcl_clk, pdev);
+ if (rc)
+ dev_info(&pdev->dev, "No APM crossover corner programmed\n");
+
+ clk_osm_setup_cycle_counters(&pwrcl_clk);
+ clk_osm_setup_cycle_counters(&perfcl_clk);
+
+ clk_osm_print_osm_table(&pwrcl_clk);
+ clk_osm_print_osm_table(&perfcl_clk);
+
+ /* Program the minimum PLL frequency */
+ clk_osm_write_reg(&pwrcl_clk, PLL_MIN_LVAL, SEQ_REG(27));
+ clk_osm_write_reg(&perfcl_clk, PLL_MIN_LVAL, SEQ_REG(27));
+
+ rc = clk_osm_setup_hw_table(&pwrcl_clk);
+ if (rc) {
+ dev_err(&pdev->dev, "failed to setup power cluster hardware table\n");
+ goto exit;
+ }
+ rc = clk_osm_setup_hw_table(&perfcl_clk);
+ if (rc) {
+ dev_err(&pdev->dev, "failed to setup perf cluster hardware table\n");
+ goto exit;
+ }
+
+ /* Policy tuning */
+ rc = clk_osm_set_cc_policy(pdev);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "cc policy setup failed");
+ goto exit;
+ }
+
+ clk_osm_setup_itm_to_osm_handoff();
+
+ /* LLM Freq Policy Tuning */
+ rc = clk_osm_set_llm_freq_policy(pdev);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "LLM Frequency Policy setup failed");
+ goto exit;
+ }
+
+ /* LLM Voltage Policy Tuning */
+ rc = clk_osm_set_llm_volt_policy(pdev);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "Failed to set LLM voltage Policy");
+ goto exit;
+ }
+
+ clk_osm_setup_fsms(&pwrcl_clk);
+ clk_osm_setup_fsms(&perfcl_clk);
+
+ /*
+ * Perform typical secure-world HW initialization
+ * as necessary.
+ */
+ clk_osm_do_additional_setup(&pwrcl_clk, pdev);
+ clk_osm_do_additional_setup(&perfcl_clk, pdev);
+
+ /* Program APM crossover corners */
+ clk_osm_apm_vc_setup(&pwrcl_clk);
+ clk_osm_apm_vc_setup(&perfcl_clk);
+
+ rc = clk_osm_setup_irq(pdev, &pwrcl_clk, "pwrcl-irq");
+ if (rc)
+ pr_err("Debug IRQ not set for pwrcl\n");
+
+ rc = clk_osm_setup_irq(pdev, &perfcl_clk, "perfcl-irq");
+ if (rc)
+ pr_err("Debug IRQ not set for perfcl\n");
+
+ clk_osm_setup_osm_was(&pwrcl_clk);
+ clk_osm_setup_osm_was(&perfcl_clk);
+
+ if (of_property_read_bool(pdev->dev.of_node,
+ "qcom,osm-pll-setup")) {
+ clk_osm_setup_cluster_pll(&pwrcl_clk);
+ clk_osm_setup_cluster_pll(&perfcl_clk);
+ }
+
+ spin_lock_init(&pwrcl_clk.lock);
+ spin_lock_init(&perfcl_clk.lock);
+
+ pwrcl_clk.panic_notifier.notifier_call = clk_osm_panic_callback;
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &pwrcl_clk.panic_notifier);
+ perfcl_clk.panic_notifier.notifier_call = clk_osm_panic_callback;
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &perfcl_clk.panic_notifier);
+
+ rc = of_msm_clock_register(pdev->dev.of_node, cpu_clocks_osm,
+ ARRAY_SIZE(cpu_clocks_osm));
+ if (rc) {
+ dev_err(&pdev->dev, "Unable to register CPU clocks, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ /*
+ * The hmss_gpll0 clock runs at 300 MHz. Ensure it is at the correct
+ * frequency before enabling OSM. LUT index 0 is always sourced from
+ * this clock.
+ */
+ rc = clk_set_rate(&sys_apcsaux_clk_gcc.c, init_rate);
+ if (rc) {
+ dev_err(&pdev->dev, "Unable to set init rate on hmss_gpll0, rc=%d\n",
+ rc);
+ return rc;
+ }
+ clk_prepare_enable(&sys_apcsaux_clk_gcc.c);
+
+ /* Set 300MHz index */
+ rc = clk_set_rate(&pwrcl_clk.c, init_rate);
+ if (rc) {
+ dev_err(&pdev->dev, "Unable to set init rate on pwr cluster, rc=%d\n",
+ rc);
+ clk_disable_unprepare(&sys_apcsaux_clk_gcc.c);
+ return rc;
+ }
+
+ rc = clk_set_rate(&perfcl_clk.c, init_rate);
+ if (rc) {
+ dev_err(&pdev->dev, "Unable to set init rate on perf cluster, rc=%d\n",
+ rc);
+ clk_disable_unprepare(&sys_apcsaux_clk_gcc.c);
+ return rc;
+ }
+
+ rc = clk_set_rate(&osm_clk_src.c, osm_clk_init_rate);
+ if (rc) {
+ dev_err(&pdev->dev, "Unable to set init rate on osm_clk, rc=%d\n",
+ rc);
+ clk_disable_unprepare(&sys_apcsaux_clk_gcc.c);
+ return rc;
+ }
+
+ get_online_cpus();
+
+ /* Enable OSM */
+ for_each_online_cpu(cpu) {
+ WARN(clk_prepare_enable(logical_cpu_to_clk(cpu)),
+ "Failed to enable clock for cpu %d\n", cpu);
+ }
+
+ populate_opp_table(pdev);
+ populate_debugfs_dir(&pwrcl_clk);
+ populate_debugfs_dir(&perfcl_clk);
+
+ of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+
+ register_cpu_cycle_counter_cb(&cb);
+
+ pr_info("OSM driver inited\n");
+ put_online_cpus();
+
+ return 0;
+
+exit:
+ dev_err(&pdev->dev, "OSM driver failed to initialize, rc=%d\n",
+ rc);
+ panic("Unable to Setup OSM");
+}
+
+static struct of_device_id match_table[] = {
+ { .compatible = "qcom,cpu-clock-osm" },
+ {}
+};
+
+static struct platform_driver cpu_clock_osm_driver = {
+ .probe = cpu_clock_osm_driver_probe,
+ .driver = {
+ .name = "cpu-clock-osm",
+ .of_match_table = match_table,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init cpu_clock_osm_init(void)
+{
+ return platform_driver_register(&cpu_clock_osm_driver);
+}
+arch_initcall(cpu_clock_osm_init);
+
+static void __exit cpu_clock_osm_exit(void)
+{
+ platform_driver_unregister(&cpu_clock_osm_driver);
+}
+module_exit(cpu_clock_osm_exit);
+
+MODULE_DESCRIPTION("CPU clock driver for OSM");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/msm/clock-pll.c b/drivers/clk/msm/clock-pll.c
new file mode 100644
index 000000000000..e4b6df769307
--- /dev/null
+++ b/drivers/clk/msm/clock-pll.c
@@ -0,0 +1,1206 @@
+/*
+ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/sched.h>
+#include <soc/qcom/clock-pll.h>
+#include <soc/qcom/msm-clock-controller.h>
+
+#include "clock.h"
+
+#define PLL_OUTCTRL BIT(0)
+#define PLL_BYPASSNL BIT(1)
+#define PLL_RESET_N BIT(2)
+#define PLL_MODE_MASK BM(3, 0)
+
+#define PLL_EN_REG(x) (*(x)->base + (unsigned long) (x)->en_reg)
+#define PLL_STATUS_REG(x) (*(x)->base + (unsigned long) (x)->status_reg)
+#define PLL_ALT_STATUS_REG(x) (*(x)->base + (unsigned long) \
+ (x)->alt_status_reg)
+#define PLL_MODE_REG(x) (*(x)->base + (unsigned long) (x)->mode_reg)
+#define PLL_L_REG(x) (*(x)->base + (unsigned long) (x)->l_reg)
+#define PLL_M_REG(x) (*(x)->base + (unsigned long) (x)->m_reg)
+#define PLL_N_REG(x) (*(x)->base + (unsigned long) (x)->n_reg)
+#define PLL_CONFIG_REG(x) (*(x)->base + (unsigned long) (x)->config_reg)
+#define PLL_ALPHA_REG(x) (*(x)->base + (unsigned long) (x)->alpha_reg)
+#define PLL_CFG_ALT_REG(x) (*(x)->base + (unsigned long) \
+ (x)->config_alt_reg)
+#define PLL_CFG_CTL_REG(x) (*(x)->base + (unsigned long) \
+ (x)->config_ctl_reg)
+#define PLL_CFG_CTL_HI_REG(x) (*(x)->base + (unsigned long) \
+ (x)->config_ctl_hi_reg)
+#define PLL_TEST_CTL_LO_REG(x) (*(x)->base + (unsigned long) \
+ (x)->test_ctl_lo_reg)
+#define PLL_TEST_CTL_HI_REG(x) (*(x)->base + (unsigned long) \
+ (x)->test_ctl_hi_reg)
+static DEFINE_SPINLOCK(pll_reg_lock);
+
+#define ENABLE_WAIT_MAX_LOOPS 200
+#define PLL_LOCKED_BIT BIT(16)
+
+#define SPM_FORCE_EVENT 0x4
+
+static int pll_vote_clk_enable(struct clk *c)
+{
+ u32 ena, count;
+ unsigned long flags;
+ struct pll_vote_clk *pllv = to_pll_vote_clk(c);
+
+ spin_lock_irqsave(&pll_reg_lock, flags);
+ ena = readl_relaxed(PLL_EN_REG(pllv));
+ ena |= pllv->en_mask;
+ writel_relaxed(ena, PLL_EN_REG(pllv));
+ spin_unlock_irqrestore(&pll_reg_lock, flags);
+
+ /*
+ * Use a memory barrier since some PLL status registers are
+ * not within the same 1K segment as the voting registers.
+ */
+ mb();
+
+ /* Wait for pll to enable. */
+ for (count = ENABLE_WAIT_MAX_LOOPS; count > 0; count--) {
+ if (readl_relaxed(PLL_STATUS_REG(pllv)) & pllv->status_mask)
+ return 0;
+ udelay(1);
+ }
+
+ WARN("PLL %s didn't enable after voting for it!\n", c->dbg_name);
+
+ return -ETIMEDOUT;
+}
+
+static void pll_vote_clk_disable(struct clk *c)
+{
+ u32 ena;
+ unsigned long flags;
+ struct pll_vote_clk *pllv = to_pll_vote_clk(c);
+
+ spin_lock_irqsave(&pll_reg_lock, flags);
+ ena = readl_relaxed(PLL_EN_REG(pllv));
+ ena &= ~(pllv->en_mask);
+ writel_relaxed(ena, PLL_EN_REG(pllv));
+ spin_unlock_irqrestore(&pll_reg_lock, flags);
+}
+
+static int pll_vote_clk_is_enabled(struct clk *c)
+{
+ struct pll_vote_clk *pllv = to_pll_vote_clk(c);
+ return !!(readl_relaxed(PLL_STATUS_REG(pllv)) & pllv->status_mask);
+}
+
+static enum handoff pll_vote_clk_handoff(struct clk *c)
+{
+ struct pll_vote_clk *pllv = to_pll_vote_clk(c);
+ if (readl_relaxed(PLL_EN_REG(pllv)) & pllv->en_mask)
+ return HANDOFF_ENABLED_CLK;
+
+ return HANDOFF_DISABLED_CLK;
+}
+
+static void __iomem *pll_vote_clk_list_registers(struct clk *c, int n,
+ struct clk_register_data **regs, u32 *size)
+{
+ struct pll_vote_clk *pllv = to_pll_vote_clk(c);
+ static struct clk_register_data data1[] = {
+ {"APPS_VOTE", 0x0},
+ };
+
+ if (n)
+ return ERR_PTR(-EINVAL);
+
+ *regs = data1;
+ *size = ARRAY_SIZE(data1);
+ return PLL_EN_REG(pllv);
+}
+
+struct clk_ops clk_ops_pll_vote = {
+ .enable = pll_vote_clk_enable,
+ .disable = pll_vote_clk_disable,
+ .is_enabled = pll_vote_clk_is_enabled,
+ .handoff = pll_vote_clk_handoff,
+ .list_registers = pll_vote_clk_list_registers,
+};
+
+/*
+ * spm_event() -- Set/Clear SPM events
+ * PLL off sequence -- enable (1)
+ * Set L2_SPM_FORCE_EVENT_EN[bit] register to 1
+ * Set L2_SPM_FORCE_EVENT[bit] register to 1
+ * PLL on sequence -- enable (0)
+ * Clear L2_SPM_FORCE_EVENT[bit] register to 0
+ * Clear L2_SPM_FORCE_EVENT_EN[bit] register to 0
+ */
+static void spm_event(void __iomem *base, u32 offset, u32 bit,
+ bool enable)
+{
+ uint32_t val;
+
+ if (!base)
+ return;
+
+ if (enable) {
+ /* L2_SPM_FORCE_EVENT_EN */
+ val = readl_relaxed(base + offset);
+ val |= BIT(bit);
+ writel_relaxed(val, (base + offset));
+ /* Ensure that the write above goes through. */
+ mb();
+
+ /* L2_SPM_FORCE_EVENT */
+ val = readl_relaxed(base + offset + SPM_FORCE_EVENT);
+ val |= BIT(bit);
+ writel_relaxed(val, (base + offset + SPM_FORCE_EVENT));
+ /* Ensure that the write above goes through. */
+ mb();
+ } else {
+ /* L2_SPM_FORCE_EVENT */
+ val = readl_relaxed(base + offset + SPM_FORCE_EVENT);
+ val &= ~BIT(bit);
+ writel_relaxed(val, (base + offset + SPM_FORCE_EVENT));
+ /* Ensure that the write above goes through. */
+ mb();
+
+ /* L2_SPM_FORCE_EVENT_EN */
+ val = readl_relaxed(base + offset);
+ val &= ~BIT(bit);
+ writel_relaxed(val, (base + offset));
+ /* Ensure that the write above goes through. */
+ mb();
+ }
+}
+
+static void __pll_config_reg(void __iomem *pll_config, struct pll_freq_tbl *f,
+ struct pll_config_masks *masks)
+{
+ u32 regval;
+
+ regval = readl_relaxed(pll_config);
+
+ /* Enable the MN counter if used */
+ if (f->m_val)
+ regval |= masks->mn_en_mask;
+
+ /* Set pre-divider and post-divider values */
+ regval &= ~masks->pre_div_mask;
+ regval |= f->pre_div_val;
+ regval &= ~masks->post_div_mask;
+ regval |= f->post_div_val;
+
+ /* Select VCO setting */
+ regval &= ~masks->vco_mask;
+ regval |= f->vco_val;
+
+ /* Enable main output if it has not been enabled */
+ if (masks->main_output_mask && !(regval & masks->main_output_mask))
+ regval |= masks->main_output_mask;
+
+ writel_relaxed(regval, pll_config);
+}
+
+static int sr2_pll_clk_enable(struct clk *c)
+{
+ unsigned long flags;
+ struct pll_clk *pll = to_pll_clk(c);
+ int ret = 0, count;
+ u32 mode = readl_relaxed(PLL_MODE_REG(pll));
+ u32 lockmask = pll->masks.lock_mask ?: PLL_LOCKED_BIT;
+
+ spin_lock_irqsave(&pll_reg_lock, flags);
+
+ spm_event(pll->spm_ctrl.spm_base, pll->spm_ctrl.offset,
+ pll->spm_ctrl.event_bit, false);
+
+ /* Disable PLL bypass mode. */
+ mode |= PLL_BYPASSNL;
+ writel_relaxed(mode, PLL_MODE_REG(pll));
+
+ /*
+ * H/W requires a 5us delay between disabling the bypass and
+ * de-asserting the reset. Delay 10us just to be safe.
+ */
+ mb();
+ udelay(10);
+
+ /* De-assert active-low PLL reset. */
+ mode |= PLL_RESET_N;
+ writel_relaxed(mode, PLL_MODE_REG(pll));
+
+ /* Wait for pll to lock. */
+ for (count = ENABLE_WAIT_MAX_LOOPS; count > 0; count--) {
+ if (readl_relaxed(PLL_STATUS_REG(pll)) & lockmask)
+ break;
+ udelay(1);
+ }
+
+ if (!(readl_relaxed(PLL_STATUS_REG(pll)) & lockmask))
+ pr_err("PLL %s didn't lock after enabling it!\n", c->dbg_name);
+
+ /* Enable PLL output. */
+ mode |= PLL_OUTCTRL;
+ writel_relaxed(mode, PLL_MODE_REG(pll));
+
+ /* Ensure that the write above goes through before returning. */
+ mb();
+
+ spin_unlock_irqrestore(&pll_reg_lock, flags);
+ return ret;
+}
+
+void __variable_rate_pll_init(struct clk *c)
+{
+ struct pll_clk *pll = to_pll_clk(c);
+ u32 regval;
+
+ regval = readl_relaxed(PLL_CONFIG_REG(pll));
+
+ if (pll->masks.post_div_mask) {
+ regval &= ~pll->masks.post_div_mask;
+ regval |= pll->vals.post_div_masked;
+ }
+
+ if (pll->masks.pre_div_mask) {
+ regval &= ~pll->masks.pre_div_mask;
+ regval |= pll->vals.pre_div_masked;
+ }
+
+ if (pll->masks.main_output_mask)
+ regval |= pll->masks.main_output_mask;
+
+ if (pll->masks.early_output_mask)
+ regval |= pll->masks.early_output_mask;
+
+ if (pll->vals.enable_mn)
+ regval |= pll->masks.mn_en_mask;
+ else
+ regval &= ~pll->masks.mn_en_mask;
+
+ writel_relaxed(regval, PLL_CONFIG_REG(pll));
+
+ regval = readl_relaxed(PLL_MODE_REG(pll));
+ if (pll->masks.apc_pdn_mask)
+ regval &= ~pll->masks.apc_pdn_mask;
+ writel_relaxed(regval, PLL_MODE_REG(pll));
+
+ writel_relaxed(pll->vals.alpha_val, PLL_ALPHA_REG(pll));
+ writel_relaxed(pll->vals.config_ctl_val, PLL_CFG_CTL_REG(pll));
+ if (pll->vals.config_ctl_hi_val)
+ writel_relaxed(pll->vals.config_ctl_hi_val,
+ PLL_CFG_CTL_HI_REG(pll));
+ if (pll->init_test_ctl) {
+ writel_relaxed(pll->vals.test_ctl_lo_val,
+ PLL_TEST_CTL_LO_REG(pll));
+ writel_relaxed(pll->vals.test_ctl_hi_val,
+ PLL_TEST_CTL_HI_REG(pll));
+ }
+
+ pll->inited = true;
+}
+
+static int variable_rate_pll_clk_enable(struct clk *c)
+{
+ unsigned long flags;
+ struct pll_clk *pll = to_pll_clk(c);
+ int ret = 0, count;
+ u32 mode, testlo;
+ u32 lockmask = pll->masks.lock_mask ?: PLL_LOCKED_BIT;
+ u32 mode_lock;
+ u64 time;
+ bool early_lock = false;
+
+ spin_lock_irqsave(&pll_reg_lock, flags);
+
+ if (unlikely(!to_pll_clk(c)->inited))
+ __variable_rate_pll_init(c);
+
+ mode = readl_relaxed(PLL_MODE_REG(pll));
+
+ /* Set test control bits as required by HW doc */
+ if (pll->test_ctl_lo_reg && pll->vals.test_ctl_lo_val &&
+ pll->pgm_test_ctl_enable)
+ writel_relaxed(pll->vals.test_ctl_lo_val,
+ PLL_TEST_CTL_LO_REG(pll));
+
+ if (!pll->test_ctl_dbg) {
+ /* Enable test_ctl debug */
+ mode |= BIT(3);
+ writel_relaxed(mode, PLL_MODE_REG(pll));
+
+ testlo = readl_relaxed(PLL_TEST_CTL_LO_REG(pll));
+ testlo &= ~BM(7, 6);
+ testlo |= 0xC0;
+ writel_relaxed(testlo, PLL_TEST_CTL_LO_REG(pll));
+ /* Wait for the write to complete */
+ mb();
+ }
+
+ /* Disable PLL bypass mode. */
+ mode |= PLL_BYPASSNL;
+ writel_relaxed(mode, PLL_MODE_REG(pll));
+
+ /*
+ * H/W requires a 5us delay between disabling the bypass and
+ * de-asserting the reset. Use 10us to be sure.
+ */
+ mb();
+ udelay(10);
+
+ /* De-assert active-low PLL reset. */
+ mode |= PLL_RESET_N;
+ writel_relaxed(mode, PLL_MODE_REG(pll));
+
+ /*
+ * 5us delay mandated by HPG. However, put in a 200us delay here.
+ * This is to address possible locking issues with the PLL exhibit
+ * early "transient" locks about 16us from this point. With this
+ * higher delay, we avoid running into those transients.
+ */
+ mb();
+ udelay(200);
+
+ /* Clear test control bits */
+ if (pll->test_ctl_lo_reg && pll->vals.test_ctl_lo_val &&
+ pll->pgm_test_ctl_enable)
+ writel_relaxed(0x0, PLL_TEST_CTL_LO_REG(pll));
+
+
+ time = sched_clock();
+ /* Wait for pll to lock. */
+ for (count = ENABLE_WAIT_MAX_LOOPS; count > 0; count--) {
+ if (readl_relaxed(PLL_STATUS_REG(pll)) & lockmask) {
+ udelay(1);
+ /*
+ * Check again to be sure. This is to avoid
+ * breaking too early if there is a "transient"
+ * lock.
+ */
+ if ((readl_relaxed(PLL_STATUS_REG(pll)) & lockmask))
+ break;
+ else
+ early_lock = true;
+ }
+ udelay(1);
+ }
+ time = sched_clock() - time;
+
+ mode_lock = readl_relaxed(PLL_STATUS_REG(pll));
+
+ if (!(mode_lock & lockmask)) {
+ pr_err("PLL lock bit detection total wait time: %lld ns", time);
+ pr_err("PLL %s didn't lock after enabling for L value 0x%x!\n",
+ c->dbg_name, readl_relaxed(PLL_L_REG(pll)));
+ pr_err("mode register is 0x%x\n",
+ readl_relaxed(PLL_STATUS_REG(pll)));
+ pr_err("user control register is 0x%x\n",
+ readl_relaxed(PLL_CONFIG_REG(pll)));
+ pr_err("config control register is 0x%x\n",
+ readl_relaxed(PLL_CFG_CTL_REG(pll)));
+ pr_err("test control high register is 0x%x\n",
+ readl_relaxed(PLL_TEST_CTL_HI_REG(pll)));
+ pr_err("test control low register is 0x%x\n",
+ readl_relaxed(PLL_TEST_CTL_LO_REG(pll)));
+ pr_err("early lock? %s\n", early_lock ? "yes" : "no");
+
+ testlo = readl_relaxed(PLL_TEST_CTL_LO_REG(pll));
+ testlo &= ~BM(7, 6);
+ writel_relaxed(testlo, PLL_TEST_CTL_LO_REG(pll));
+ /* Wait for the write to complete */
+ mb();
+
+ pr_err("test_ctl_lo = 0x%x, pll status is: 0x%x\n",
+ readl_relaxed(PLL_TEST_CTL_LO_REG(pll)),
+ readl_relaxed(PLL_ALT_STATUS_REG(pll)));
+
+ testlo = readl_relaxed(PLL_TEST_CTL_LO_REG(pll));
+ testlo &= ~BM(7, 6);
+ testlo |= 0x40;
+ writel_relaxed(testlo, PLL_TEST_CTL_LO_REG(pll));
+ /* Wait for the write to complete */
+ mb();
+ pr_err("test_ctl_lo = 0x%x, pll status is: 0x%x\n",
+ readl_relaxed(PLL_TEST_CTL_LO_REG(pll)),
+ readl_relaxed(PLL_ALT_STATUS_REG(pll)));
+
+ testlo = readl_relaxed(PLL_TEST_CTL_LO_REG(pll));
+ testlo &= ~BM(7, 6);
+ testlo |= 0x80;
+ writel_relaxed(testlo, PLL_TEST_CTL_LO_REG(pll));
+ /* Wait for the write to complete */
+ mb();
+
+ pr_err("test_ctl_lo = 0x%x, pll status is: 0x%x\n",
+ readl_relaxed(PLL_TEST_CTL_LO_REG(pll)),
+ readl_relaxed(PLL_ALT_STATUS_REG(pll)));
+
+ testlo = readl_relaxed(PLL_TEST_CTL_LO_REG(pll));
+ testlo &= ~BM(7, 6);
+ testlo |= 0xC0;
+ writel_relaxed(testlo, PLL_TEST_CTL_LO_REG(pll));
+ /* Wait for the write to complete */
+ mb();
+
+ pr_err("test_ctl_lo = 0x%x, pll status is: 0x%x\n",
+ readl_relaxed(PLL_TEST_CTL_LO_REG(pll)),
+ readl_relaxed(PLL_ALT_STATUS_REG(pll)));
+ panic("failed to lock %s PLL\n", c->dbg_name);
+ }
+
+ /* Enable PLL output. */
+ mode |= PLL_OUTCTRL;
+ writel_relaxed(mode, PLL_MODE_REG(pll));
+
+ /* Ensure that the write above goes through before returning. */
+ mb();
+
+ spin_unlock_irqrestore(&pll_reg_lock, flags);
+
+ return ret;
+}
+
+static void variable_rate_pll_clk_disable_hwfsm(struct clk *c)
+{
+ struct pll_clk *pll = to_pll_clk(c);
+ u32 regval;
+
+ /* Set test control bit to stay-in-CFA if necessary */
+ if (pll->test_ctl_lo_reg && pll->pgm_test_ctl_enable) {
+ regval = readl_relaxed(PLL_TEST_CTL_LO_REG(pll));
+ writel_relaxed(regval | BIT(16),
+ PLL_TEST_CTL_LO_REG(pll));
+ }
+
+ /* 8 reference clock cycle delay mandated by the HPG */
+ udelay(1);
+}
+
+static int variable_rate_pll_clk_enable_hwfsm(struct clk *c)
+{
+ struct pll_clk *pll = to_pll_clk(c);
+ int count;
+ u32 lockmask = pll->masks.lock_mask ?: PLL_LOCKED_BIT;
+ unsigned long flags;
+ u32 regval;
+
+ spin_lock_irqsave(&pll_reg_lock, flags);
+
+ /* Clear test control bit if necessary */
+ if (pll->test_ctl_lo_reg && pll->pgm_test_ctl_enable) {
+ regval = readl_relaxed(PLL_TEST_CTL_LO_REG(pll));
+ regval &= ~BIT(16);
+ writel_relaxed(regval, PLL_TEST_CTL_LO_REG(pll));
+ }
+
+ /* Wait for 50us explicitly to avoid transient locks */
+ udelay(50);
+
+ for (count = ENABLE_WAIT_MAX_LOOPS; count > 0; count--) {
+ if (readl_relaxed(PLL_STATUS_REG(pll)) & lockmask)
+ break;
+ udelay(1);
+ }
+
+ if (!(readl_relaxed(PLL_STATUS_REG(pll)) & lockmask))
+ pr_err("PLL %s didn't lock after enabling it!\n", c->dbg_name);
+
+ spin_unlock_irqrestore(&pll_reg_lock, flags);
+
+ return 0;
+}
+
+static void __pll_clk_enable_reg(void __iomem *mode_reg)
+{
+ u32 mode = readl_relaxed(mode_reg);
+ /* Disable PLL bypass mode. */
+ mode |= PLL_BYPASSNL;
+ writel_relaxed(mode, mode_reg);
+
+ /*
+ * H/W requires a 5us delay between disabling the bypass and
+ * de-asserting the reset. Delay 10us just to be safe.
+ */
+ mb();
+ udelay(10);
+
+ /* De-assert active-low PLL reset. */
+ mode |= PLL_RESET_N;
+ writel_relaxed(mode, mode_reg);
+
+ /* Wait until PLL is locked. */
+ mb();
+ udelay(50);
+
+ /* Enable PLL output. */
+ mode |= PLL_OUTCTRL;
+ writel_relaxed(mode, mode_reg);
+
+ /* Ensure that the write above goes through before returning. */
+ mb();
+}
+
+static int local_pll_clk_enable(struct clk *c)
+{
+ unsigned long flags;
+ struct pll_clk *pll = to_pll_clk(c);
+
+ spin_lock_irqsave(&pll_reg_lock, flags);
+ __pll_clk_enable_reg(PLL_MODE_REG(pll));
+ spin_unlock_irqrestore(&pll_reg_lock, flags);
+
+ return 0;
+}
+
+static void __pll_clk_disable_reg(void __iomem *mode_reg)
+{
+ u32 mode = readl_relaxed(mode_reg);
+ mode &= ~PLL_MODE_MASK;
+ writel_relaxed(mode, mode_reg);
+}
+
+static void local_pll_clk_disable(struct clk *c)
+{
+ unsigned long flags;
+ struct pll_clk *pll = to_pll_clk(c);
+
+ /*
+ * Disable the PLL output, disable test mode, enable
+ * the bypass mode, and assert the reset.
+ */
+ spin_lock_irqsave(&pll_reg_lock, flags);
+ spm_event(pll->spm_ctrl.spm_base, pll->spm_ctrl.offset,
+ pll->spm_ctrl.event_bit, true);
+ __pll_clk_disable_reg(PLL_MODE_REG(pll));
+ spin_unlock_irqrestore(&pll_reg_lock, flags);
+}
+
+static enum handoff local_pll_clk_handoff(struct clk *c)
+{
+ struct pll_clk *pll = to_pll_clk(c);
+ u32 mode = readl_relaxed(PLL_MODE_REG(pll));
+ u32 mask = PLL_BYPASSNL | PLL_RESET_N | PLL_OUTCTRL;
+ unsigned long parent_rate;
+ u32 lval, mval, nval, userval;
+
+ if ((mode & mask) != mask)
+ return HANDOFF_DISABLED_CLK;
+
+ /* Assume bootloaders configure PLL to c->rate */
+ if (c->rate)
+ return HANDOFF_ENABLED_CLK;
+
+ parent_rate = clk_get_rate(c->parent);
+ lval = readl_relaxed(PLL_L_REG(pll));
+ mval = readl_relaxed(PLL_M_REG(pll));
+ nval = readl_relaxed(PLL_N_REG(pll));
+ userval = readl_relaxed(PLL_CONFIG_REG(pll));
+
+ c->rate = parent_rate * lval;
+
+ if (pll->masks.mn_en_mask && userval) {
+ if (!nval)
+ nval = 1;
+ c->rate += (parent_rate * mval) / nval;
+ }
+
+ return HANDOFF_ENABLED_CLK;
+}
+
+static long local_pll_clk_round_rate(struct clk *c, unsigned long rate)
+{
+ struct pll_freq_tbl *nf;
+ struct pll_clk *pll = to_pll_clk(c);
+
+ if (!pll->freq_tbl)
+ return -EINVAL;
+
+ for (nf = pll->freq_tbl; nf->freq_hz != PLL_FREQ_END; nf++)
+ if (nf->freq_hz >= rate)
+ return nf->freq_hz;
+
+ nf--;
+ return nf->freq_hz;
+}
+
+static int local_pll_clk_set_rate(struct clk *c, unsigned long rate)
+{
+ struct pll_freq_tbl *nf;
+ struct pll_clk *pll = to_pll_clk(c);
+ unsigned long flags;
+
+ for (nf = pll->freq_tbl; nf->freq_hz != PLL_FREQ_END
+ && nf->freq_hz != rate; nf++)
+ ;
+
+ if (nf->freq_hz == PLL_FREQ_END)
+ return -EINVAL;
+
+ /*
+ * Ensure PLL is off before changing rate. For optimization reasons,
+ * assume no downstream clock is using actively using it.
+ */
+ spin_lock_irqsave(&c->lock, flags);
+ if (c->count)
+ c->ops->disable(c);
+
+ writel_relaxed(nf->l_val, PLL_L_REG(pll));
+ writel_relaxed(nf->m_val, PLL_M_REG(pll));
+ writel_relaxed(nf->n_val, PLL_N_REG(pll));
+
+ __pll_config_reg(PLL_CONFIG_REG(pll), nf, &pll->masks);
+
+ if (c->count)
+ c->ops->enable(c);
+
+ spin_unlock_irqrestore(&c->lock, flags);
+ return 0;
+}
+
+static enum handoff variable_rate_pll_handoff(struct clk *c)
+{
+ struct pll_clk *pll = to_pll_clk(c);
+ u32 mode = readl_relaxed(PLL_MODE_REG(pll));
+ u32 mask = PLL_BYPASSNL | PLL_RESET_N | PLL_OUTCTRL;
+ u32 lval;
+
+ pll->src_rate = clk_get_rate(c->parent);
+
+ lval = readl_relaxed(PLL_L_REG(pll));
+ if (!lval)
+ return HANDOFF_DISABLED_CLK;
+
+ c->rate = pll->src_rate * lval;
+
+ if (c->rate > pll->max_rate || c->rate < pll->min_rate) {
+ WARN(1, "%s: Out of spec PLL", c->dbg_name);
+ return HANDOFF_DISABLED_CLK;
+ }
+
+ if ((mode & mask) != mask)
+ return HANDOFF_DISABLED_CLK;
+
+ return HANDOFF_ENABLED_CLK;
+}
+
+static long variable_rate_pll_round_rate(struct clk *c, unsigned long rate)
+{
+ struct pll_clk *pll = to_pll_clk(c);
+
+ if (!pll->src_rate)
+ return 0;
+
+ if (pll->no_prepared_reconfig && c->prepare_count && c->rate != rate)
+ return -EINVAL;
+
+ if (rate < pll->min_rate)
+ rate = pll->min_rate;
+ if (rate > pll->max_rate)
+ rate = pll->max_rate;
+
+ return min(pll->max_rate,
+ DIV_ROUND_UP(rate, pll->src_rate) * pll->src_rate);
+}
+
+/*
+ * For optimization reasons, assumes no downstream clocks are actively using
+ * it.
+ */
+static int variable_rate_pll_set_rate(struct clk *c, unsigned long rate)
+{
+ struct pll_clk *pll = to_pll_clk(c);
+ unsigned long flags;
+ u32 l_val;
+
+ if (rate != variable_rate_pll_round_rate(c, rate))
+ return -EINVAL;
+
+ l_val = rate / pll->src_rate;
+
+ spin_lock_irqsave(&c->lock, flags);
+
+ if (c->count && c->ops->disable)
+ c->ops->disable(c);
+
+ writel_relaxed(l_val, PLL_L_REG(pll));
+
+ if (c->count && c->ops->enable)
+ c->ops->enable(c);
+
+ spin_unlock_irqrestore(&c->lock, flags);
+
+ return 0;
+}
+
+int sr_pll_clk_enable(struct clk *c)
+{
+ u32 mode;
+ unsigned long flags;
+ struct pll_clk *pll = to_pll_clk(c);
+
+ spin_lock_irqsave(&pll_reg_lock, flags);
+ mode = readl_relaxed(PLL_MODE_REG(pll));
+ /* De-assert active-low PLL reset. */
+ mode |= PLL_RESET_N;
+ writel_relaxed(mode, PLL_MODE_REG(pll));
+
+ /*
+ * H/W requires a 5us delay between disabling the bypass and
+ * de-asserting the reset. Delay 10us just to be safe.
+ */
+ mb();
+ udelay(10);
+
+ /* Disable PLL bypass mode. */
+ mode |= PLL_BYPASSNL;
+ writel_relaxed(mode, PLL_MODE_REG(pll));
+
+ /* Wait until PLL is locked. */
+ mb();
+ udelay(60);
+
+ /* Enable PLL output. */
+ mode |= PLL_OUTCTRL;
+ writel_relaxed(mode, PLL_MODE_REG(pll));
+
+ /* Ensure that the write above goes through before returning. */
+ mb();
+
+ spin_unlock_irqrestore(&pll_reg_lock, flags);
+
+ return 0;
+}
+
+int sr_hpm_lp_pll_clk_enable(struct clk *c)
+{
+ unsigned long flags;
+ struct pll_clk *pll = to_pll_clk(c);
+ u32 count, mode;
+ int ret = 0;
+
+ spin_lock_irqsave(&pll_reg_lock, flags);
+
+ /* Disable PLL bypass mode and de-assert reset. */
+ mode = PLL_BYPASSNL | PLL_RESET_N;
+ writel_relaxed(mode, PLL_MODE_REG(pll));
+
+ /* Wait for pll to lock. */
+ for (count = ENABLE_WAIT_MAX_LOOPS; count > 0; count--) {
+ if (readl_relaxed(PLL_STATUS_REG(pll)) & PLL_LOCKED_BIT)
+ break;
+ udelay(1);
+ }
+
+ if (!(readl_relaxed(PLL_STATUS_REG(pll)) & PLL_LOCKED_BIT)) {
+ WARN("PLL %s didn't lock after enabling it!\n", c->dbg_name);
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ /* Enable PLL output. */
+ mode |= PLL_OUTCTRL;
+ writel_relaxed(mode, PLL_MODE_REG(pll));
+
+ /* Ensure the write above goes through before returning. */
+ mb();
+
+out:
+ spin_unlock_irqrestore(&pll_reg_lock, flags);
+ return ret;
+}
+
+
+static void __iomem *variable_rate_pll_list_registers(struct clk *c, int n,
+ struct clk_register_data **regs, u32 *size)
+{
+ struct pll_clk *pll = to_pll_clk(c);
+ static struct clk_register_data data[] = {
+ {"MODE", 0x0},
+ {"L", 0x4},
+ {"ALPHA", 0x8},
+ {"USER_CTL", 0x10},
+ {"CONFIG_CTL", 0x14},
+ {"STATUS", 0x1C},
+ };
+ if (n)
+ return ERR_PTR(-EINVAL);
+
+ *regs = data;
+ *size = ARRAY_SIZE(data);
+ return PLL_MODE_REG(pll);
+}
+
+static void __iomem *local_pll_clk_list_registers(struct clk *c, int n,
+ struct clk_register_data **regs, u32 *size)
+{
+ /* Not compatible with 8960 & friends */
+ struct pll_clk *pll = to_pll_clk(c);
+ static struct clk_register_data data[] = {
+ {"MODE", 0x0},
+ {"L", 0x4},
+ {"M", 0x8},
+ {"N", 0xC},
+ {"USER", 0x10},
+ {"CONFIG", 0x14},
+ {"STATUS", 0x1C},
+ };
+ if (n)
+ return ERR_PTR(-EINVAL);
+
+ *regs = data;
+ *size = ARRAY_SIZE(data);
+ return PLL_MODE_REG(pll);
+}
+
+
+struct clk_ops clk_ops_local_pll = {
+ .enable = local_pll_clk_enable,
+ .disable = local_pll_clk_disable,
+ .set_rate = local_pll_clk_set_rate,
+ .handoff = local_pll_clk_handoff,
+ .list_registers = local_pll_clk_list_registers,
+};
+
+struct clk_ops clk_ops_sr2_pll = {
+ .enable = sr2_pll_clk_enable,
+ .disable = local_pll_clk_disable,
+ .set_rate = local_pll_clk_set_rate,
+ .round_rate = local_pll_clk_round_rate,
+ .handoff = local_pll_clk_handoff,
+ .list_registers = local_pll_clk_list_registers,
+};
+
+struct clk_ops clk_ops_variable_rate_pll_hwfsm = {
+ .enable = variable_rate_pll_clk_enable_hwfsm,
+ .disable = variable_rate_pll_clk_disable_hwfsm,
+ .set_rate = variable_rate_pll_set_rate,
+ .round_rate = variable_rate_pll_round_rate,
+ .handoff = variable_rate_pll_handoff,
+};
+
+struct clk_ops clk_ops_variable_rate_pll = {
+ .enable = variable_rate_pll_clk_enable,
+ .disable = local_pll_clk_disable,
+ .set_rate = variable_rate_pll_set_rate,
+ .round_rate = variable_rate_pll_round_rate,
+ .handoff = variable_rate_pll_handoff,
+ .list_registers = variable_rate_pll_list_registers,
+};
+
+static DEFINE_SPINLOCK(soft_vote_lock);
+
+static int pll_acpu_vote_clk_enable(struct clk *c)
+{
+ int ret = 0;
+ unsigned long flags;
+ struct pll_vote_clk *pllv = to_pll_vote_clk(c);
+
+ spin_lock_irqsave(&soft_vote_lock, flags);
+
+ if (!*pllv->soft_vote)
+ ret = pll_vote_clk_enable(c);
+ if (ret == 0)
+ *pllv->soft_vote |= (pllv->soft_vote_mask);
+
+ spin_unlock_irqrestore(&soft_vote_lock, flags);
+ return ret;
+}
+
+static void pll_acpu_vote_clk_disable(struct clk *c)
+{
+ unsigned long flags;
+ struct pll_vote_clk *pllv = to_pll_vote_clk(c);
+
+ spin_lock_irqsave(&soft_vote_lock, flags);
+
+ *pllv->soft_vote &= ~(pllv->soft_vote_mask);
+ if (!*pllv->soft_vote)
+ pll_vote_clk_disable(c);
+
+ spin_unlock_irqrestore(&soft_vote_lock, flags);
+}
+
+static enum handoff pll_acpu_vote_clk_handoff(struct clk *c)
+{
+ if (pll_vote_clk_handoff(c) == HANDOFF_DISABLED_CLK)
+ return HANDOFF_DISABLED_CLK;
+
+ if (pll_acpu_vote_clk_enable(c))
+ return HANDOFF_DISABLED_CLK;
+
+ return HANDOFF_ENABLED_CLK;
+}
+
+struct clk_ops clk_ops_pll_acpu_vote = {
+ .enable = pll_acpu_vote_clk_enable,
+ .disable = pll_acpu_vote_clk_disable,
+ .is_enabled = pll_vote_clk_is_enabled,
+ .handoff = pll_acpu_vote_clk_handoff,
+ .list_registers = pll_vote_clk_list_registers,
+};
+
+
+static int pll_sleep_clk_enable(struct clk *c)
+{
+ u32 ena;
+ unsigned long flags;
+ struct pll_vote_clk *pllv = to_pll_vote_clk(c);
+
+ spin_lock_irqsave(&pll_reg_lock, flags);
+ ena = readl_relaxed(PLL_EN_REG(pllv));
+ ena &= ~(pllv->en_mask);
+ writel_relaxed(ena, PLL_EN_REG(pllv));
+ spin_unlock_irqrestore(&pll_reg_lock, flags);
+ return 0;
+}
+
+static void pll_sleep_clk_disable(struct clk *c)
+{
+ u32 ena;
+ unsigned long flags;
+ struct pll_vote_clk *pllv = to_pll_vote_clk(c);
+
+ spin_lock_irqsave(&pll_reg_lock, flags);
+ ena = readl_relaxed(PLL_EN_REG(pllv));
+ ena |= pllv->en_mask;
+ writel_relaxed(ena, PLL_EN_REG(pllv));
+ spin_unlock_irqrestore(&pll_reg_lock, flags);
+}
+
+static enum handoff pll_sleep_clk_handoff(struct clk *c)
+{
+ struct pll_vote_clk *pllv = to_pll_vote_clk(c);
+
+ if (!(readl_relaxed(PLL_EN_REG(pllv)) & pllv->en_mask))
+ return HANDOFF_ENABLED_CLK;
+
+ return HANDOFF_DISABLED_CLK;
+}
+
+/*
+ * This .ops is meant to be used by gpll0_sleep_clk_src. The aim is to utilise
+ * the h/w feature of sleep enable bit to denote if the PLL can be turned OFF
+ * once APPS goes to PC. gpll0_sleep_clk_src will be enabled only if there is a
+ * peripheral client using it and disabled if there is none. The current
+ * implementation of enable .ops clears the h/w bit of sleep enable while the
+ * disable .ops asserts it.
+ */
+
+struct clk_ops clk_ops_pll_sleep_vote = {
+ .enable = pll_sleep_clk_enable,
+ .disable = pll_sleep_clk_disable,
+ .handoff = pll_sleep_clk_handoff,
+ .list_registers = pll_vote_clk_list_registers,
+};
+
+static void __set_fsm_mode(void __iomem *mode_reg,
+ u32 bias_count, u32 lock_count)
+{
+ u32 regval = readl_relaxed(mode_reg);
+
+ /* De-assert reset to FSM */
+ regval &= ~BIT(21);
+ writel_relaxed(regval, mode_reg);
+
+ /* Program bias count */
+ regval &= ~BM(19, 14);
+ regval |= BVAL(19, 14, bias_count);
+ writel_relaxed(regval, mode_reg);
+
+ /* Program lock count */
+ regval &= ~BM(13, 8);
+ regval |= BVAL(13, 8, lock_count);
+ writel_relaxed(regval, mode_reg);
+
+ /* Enable PLL FSM voting */
+ regval |= BIT(20);
+ writel_relaxed(regval, mode_reg);
+}
+
+static void __configure_alt_config(struct pll_alt_config config,
+ struct pll_config_regs *regs)
+{
+ u32 regval;
+
+ regval = readl_relaxed(PLL_CFG_ALT_REG(regs));
+
+ if (config.mask) {
+ regval &= ~config.mask;
+ regval |= config.val;
+ }
+
+ writel_relaxed(regval, PLL_CFG_ALT_REG(regs));
+}
+
+void __configure_pll(struct pll_config *config,
+ struct pll_config_regs *regs, u32 ena_fsm_mode)
+{
+ u32 regval;
+
+ writel_relaxed(config->l, PLL_L_REG(regs));
+ writel_relaxed(config->m, PLL_M_REG(regs));
+ writel_relaxed(config->n, PLL_N_REG(regs));
+
+ regval = readl_relaxed(PLL_CONFIG_REG(regs));
+
+ /* Enable the MN accumulator */
+ if (config->mn_ena_mask) {
+ regval &= ~config->mn_ena_mask;
+ regval |= config->mn_ena_val;
+ }
+
+ /* Enable the main output */
+ if (config->main_output_mask) {
+ regval &= ~config->main_output_mask;
+ regval |= config->main_output_val;
+ }
+
+ /* Enable the aux output */
+ if (config->aux_output_mask) {
+ regval &= ~config->aux_output_mask;
+ regval |= config->aux_output_val;
+ }
+
+ /* Set pre-divider and post-divider values */
+ regval &= ~config->pre_div_mask;
+ regval |= config->pre_div_val;
+ regval &= ~config->post_div_mask;
+ regval |= config->post_div_val;
+
+ /* Select VCO setting */
+ regval &= ~config->vco_mask;
+ regval |= config->vco_val;
+
+ if (config->add_factor_mask) {
+ regval &= ~config->add_factor_mask;
+ regval |= config->add_factor_val;
+ }
+
+ writel_relaxed(regval, PLL_CONFIG_REG(regs));
+
+ if (regs->config_alt_reg)
+ __configure_alt_config(config->alt_cfg, regs);
+
+ if (regs->config_ctl_reg)
+ writel_relaxed(config->cfg_ctl_val, PLL_CFG_CTL_REG(regs));
+}
+
+void configure_sr_pll(struct pll_config *config,
+ struct pll_config_regs *regs, u32 ena_fsm_mode)
+{
+ __configure_pll(config, regs, ena_fsm_mode);
+ if (ena_fsm_mode)
+ __set_fsm_mode(PLL_MODE_REG(regs), 0x1, 0x8);
+}
+
+void configure_sr_hpm_lp_pll(struct pll_config *config,
+ struct pll_config_regs *regs, u32 ena_fsm_mode)
+{
+ __configure_pll(config, regs, ena_fsm_mode);
+ if (ena_fsm_mode)
+ __set_fsm_mode(PLL_MODE_REG(regs), 0x1, 0x0);
+}
+
+static void *votable_pll_clk_dt_parser(struct device *dev,
+ struct device_node *np)
+{
+ struct pll_vote_clk *v, *peer;
+ struct clk *c;
+ u32 val, rc;
+ phandle p;
+ struct msmclk_data *drv;
+
+ v = devm_kzalloc(dev, sizeof(*v), GFP_KERNEL);
+ if (!v) {
+ dt_err(np, "memory alloc failure\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ drv = msmclk_parse_phandle(dev, np->parent->phandle);
+ if (IS_ERR_OR_NULL(drv))
+ return ERR_CAST(drv);
+ v->base = &drv->base;
+
+ rc = of_property_read_u32(np, "qcom,en-offset", (u32 *)&v->en_reg);
+ if (rc) {
+ dt_err(np, "missing qcom,en-offset dt property\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ rc = of_property_read_u32(np, "qcom,en-bit", &val);
+ if (rc) {
+ dt_err(np, "missing qcom,en-bit dt property\n");
+ return ERR_PTR(-EINVAL);
+ }
+ v->en_mask = BIT(val);
+
+ rc = of_property_read_u32(np, "qcom,status-offset",
+ (u32 *)&v->status_reg);
+ if (rc) {
+ dt_err(np, "missing qcom,status-offset dt property\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ rc = of_property_read_u32(np, "qcom,status-bit", &val);
+ if (rc) {
+ dt_err(np, "missing qcom,status-bit dt property\n");
+ return ERR_PTR(-EINVAL);
+ }
+ v->status_mask = BIT(val);
+
+ rc = of_property_read_u32(np, "qcom,pll-config-rate", &val);
+ if (rc) {
+ dt_err(np, "missing qcom,pll-config-rate dt property\n");
+ return ERR_PTR(-EINVAL);
+ }
+ v->c.rate = val;
+
+ if (of_device_is_compatible(np, "qcom,active-only-pll"))
+ v->soft_vote_mask = PLL_SOFT_VOTE_ACPU;
+ else if (of_device_is_compatible(np, "qcom,sleep-active-pll"))
+ v->soft_vote_mask = PLL_SOFT_VOTE_PRIMARY;
+
+ if (of_device_is_compatible(np, "qcom,votable-pll")) {
+ v->c.ops = &clk_ops_pll_vote;
+ return msmclk_generic_clk_init(dev, np, &v->c);
+ }
+
+ rc = of_property_read_phandle_index(np, "qcom,peer", 0, &p);
+ if (rc) {
+ dt_err(np, "missing qcom,peer dt property\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ c = msmclk_lookup_phandle(dev, p);
+ if (!IS_ERR_OR_NULL(c)) {
+ v->soft_vote = devm_kzalloc(dev, sizeof(*v->soft_vote),
+ GFP_KERNEL);
+ if (!v->soft_vote) {
+ dt_err(np, "memory alloc failure\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ peer = to_pll_vote_clk(c);
+ peer->soft_vote = v->soft_vote;
+ }
+
+ v->c.ops = &clk_ops_pll_acpu_vote;
+ return msmclk_generic_clk_init(dev, np, &v->c);
+}
+MSMCLK_PARSER(votable_pll_clk_dt_parser, "qcom,active-only-pll", 0);
+MSMCLK_PARSER(votable_pll_clk_dt_parser, "qcom,sleep-active-pll", 1);
+MSMCLK_PARSER(votable_pll_clk_dt_parser, "qcom,votable-pll", 2);
diff --git a/drivers/clk/msm/clock-rpm.c b/drivers/clk/msm/clock-rpm.c
new file mode 100644
index 000000000000..ac093463ff23
--- /dev/null
+++ b/drivers/clk/msm/clock-rpm.c
@@ -0,0 +1,472 @@
+/* Copyright (c) 2010-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/err.h>
+#include <linux/rtmutex.h>
+#include <linux/clk/msm-clk-provider.h>
+#include <soc/qcom/clock-rpm.h>
+#include <soc/qcom/msm-clock-controller.h>
+
+#define __clk_rpmrs_set_rate(r, value, ctx) \
+ ((r)->rpmrs_data->set_rate_fn((r), (value), (ctx)))
+
+#define clk_rpmrs_set_rate_sleep(r, value) \
+ __clk_rpmrs_set_rate((r), (value), (r)->rpmrs_data->ctx_sleep_id)
+
+#define clk_rpmrs_set_rate_active(r, value) \
+ __clk_rpmrs_set_rate((r), (value), (r)->rpmrs_data->ctx_active_id)
+
+static int clk_rpmrs_set_rate_smd(struct rpm_clk *r, uint32_t value,
+ uint32_t context)
+{
+ int ret;
+
+ struct msm_rpm_kvp kvp = {
+ .key = r->rpm_key,
+ .data = (void *)&value,
+ .length = sizeof(value),
+ };
+
+ switch (context) {
+ case MSM_RPM_CTX_ACTIVE_SET:
+ if (*r->last_active_set_vote == value)
+ return 0;
+ break;
+ case MSM_RPM_CTX_SLEEP_SET:
+ if (*r->last_sleep_set_vote == value)
+ return 0;
+ break;
+ default:
+ return -EINVAL;
+ };
+
+ ret = msm_rpm_send_message(context, r->rpm_res_type, r->rpm_clk_id,
+ &kvp, 1);
+ if (ret)
+ return ret;
+
+ switch (context) {
+ case MSM_RPM_CTX_ACTIVE_SET:
+ *r->last_active_set_vote = value;
+ break;
+ case MSM_RPM_CTX_SLEEP_SET:
+ *r->last_sleep_set_vote = value;
+ break;
+ }
+
+ return 0;
+}
+
+static int clk_rpmrs_handoff_smd(struct rpm_clk *r)
+{
+ if (!r->branch)
+ r->c.rate = INT_MAX;
+
+ return 0;
+}
+
+static int clk_rpmrs_is_enabled_smd(struct rpm_clk *r)
+{
+ return !!r->c.prepare_count;
+}
+
+struct clk_rpmrs_data {
+ int (*set_rate_fn)(struct rpm_clk *r, uint32_t value, uint32_t context);
+ int (*get_rate_fn)(struct rpm_clk *r);
+ int (*handoff_fn)(struct rpm_clk *r);
+ int (*is_enabled)(struct rpm_clk *r);
+ int ctx_active_id;
+ int ctx_sleep_id;
+};
+
+struct clk_rpmrs_data clk_rpmrs_data_smd = {
+ .set_rate_fn = clk_rpmrs_set_rate_smd,
+ .handoff_fn = clk_rpmrs_handoff_smd,
+ .is_enabled = clk_rpmrs_is_enabled_smd,
+ .ctx_active_id = MSM_RPM_CTX_ACTIVE_SET,
+ .ctx_sleep_id = MSM_RPM_CTX_SLEEP_SET,
+};
+
+static DEFINE_RT_MUTEX(rpm_clock_lock);
+
+static void to_active_sleep_khz(struct rpm_clk *r, unsigned long rate,
+ unsigned long *active_khz, unsigned long *sleep_khz)
+{
+ /* Convert the rate (hz) to khz */
+ *active_khz = DIV_ROUND_UP(rate, 1000);
+
+ /*
+ * Active-only clocks don't care what the rate is during sleep. So,
+ * they vote for zero.
+ */
+ if (r->active_only)
+ *sleep_khz = 0;
+ else
+ *sleep_khz = *active_khz;
+}
+
+static int rpm_clk_prepare(struct clk *clk)
+{
+ struct rpm_clk *r = to_rpm_clk(clk);
+ uint32_t value;
+ int rc = 0;
+ unsigned long this_khz, this_sleep_khz;
+ unsigned long peer_khz = 0, peer_sleep_khz = 0;
+ struct rpm_clk *peer = r->peer;
+
+ rt_mutex_lock(&rpm_clock_lock);
+
+ to_active_sleep_khz(r, r->c.rate, &this_khz, &this_sleep_khz);
+
+ /* Don't send requests to the RPM if the rate has not been set. */
+ if (this_khz == 0)
+ goto out;
+
+ /* Take peer clock's rate into account only if it's enabled. */
+ if (peer->enabled)
+ to_active_sleep_khz(peer, peer->c.rate,
+ &peer_khz, &peer_sleep_khz);
+
+ value = max(this_khz, peer_khz);
+ if (r->branch)
+ value = !!value;
+
+ rc = clk_rpmrs_set_rate_active(r, value);
+ if (rc)
+ goto out;
+
+ value = max(this_sleep_khz, peer_sleep_khz);
+ if (r->branch)
+ value = !!value;
+
+ rc = clk_rpmrs_set_rate_sleep(r, value);
+ if (rc) {
+ /* Undo the active set vote and restore it to peer_khz */
+ value = peer_khz;
+ rc = clk_rpmrs_set_rate_active(r, value);
+ }
+
+out:
+ if (!rc)
+ r->enabled = true;
+
+ rt_mutex_unlock(&rpm_clock_lock);
+
+ return rc;
+}
+
+static void rpm_clk_unprepare(struct clk *clk)
+{
+ struct rpm_clk *r = to_rpm_clk(clk);
+
+ rt_mutex_lock(&rpm_clock_lock);
+
+ if (r->c.rate) {
+ uint32_t value;
+ struct rpm_clk *peer = r->peer;
+ unsigned long peer_khz = 0, peer_sleep_khz = 0;
+ int rc;
+
+ /* Take peer clock's rate into account only if it's enabled. */
+ if (peer->enabled)
+ to_active_sleep_khz(peer, peer->c.rate,
+ &peer_khz, &peer_sleep_khz);
+
+ value = r->branch ? !!peer_khz : peer_khz;
+ rc = clk_rpmrs_set_rate_active(r, value);
+ if (rc)
+ goto out;
+
+ value = r->branch ? !!peer_sleep_khz : peer_sleep_khz;
+ rc = clk_rpmrs_set_rate_sleep(r, value);
+ }
+ r->enabled = false;
+out:
+ rt_mutex_unlock(&rpm_clock_lock);
+
+ return;
+}
+
+static int rpm_clk_set_rate(struct clk *clk, unsigned long rate)
+{
+ struct rpm_clk *r = to_rpm_clk(clk);
+ unsigned long this_khz, this_sleep_khz;
+ int rc = 0;
+
+ rt_mutex_lock(&rpm_clock_lock);
+
+ if (r->enabled) {
+ uint32_t value;
+ struct rpm_clk *peer = r->peer;
+ unsigned long peer_khz = 0, peer_sleep_khz = 0;
+
+ to_active_sleep_khz(r, rate, &this_khz, &this_sleep_khz);
+
+ /* Take peer clock's rate into account only if it's enabled. */
+ if (peer->enabled)
+ to_active_sleep_khz(peer, peer->c.rate,
+ &peer_khz, &peer_sleep_khz);
+
+ value = max(this_khz, peer_khz);
+ rc = clk_rpmrs_set_rate_active(r, value);
+ if (rc)
+ goto out;
+
+ value = max(this_sleep_khz, peer_sleep_khz);
+ rc = clk_rpmrs_set_rate_sleep(r, value);
+ }
+
+out:
+ rt_mutex_unlock(&rpm_clock_lock);
+
+ return rc;
+}
+
+static unsigned long rpm_clk_get_rate(struct clk *clk)
+{
+ struct rpm_clk *r = to_rpm_clk(clk);
+ if (r->rpmrs_data->get_rate_fn)
+ return r->rpmrs_data->get_rate_fn(r);
+ else
+ return clk->rate;
+}
+
+static int rpm_clk_is_enabled(struct clk *clk)
+{
+ struct rpm_clk *r = to_rpm_clk(clk);
+ return r->rpmrs_data->is_enabled(r);
+}
+
+static long rpm_clk_round_rate(struct clk *clk, unsigned long rate)
+{
+ /* Not supported. */
+ return rate;
+}
+
+static bool rpm_clk_is_local(struct clk *clk)
+{
+ return false;
+}
+
+static enum handoff rpm_clk_handoff(struct clk *clk)
+{
+ struct rpm_clk *r = to_rpm_clk(clk);
+ int rc;
+
+ /*
+ * Querying an RPM clock's status will return 0 unless the clock's
+ * rate has previously been set through the RPM. When handing off,
+ * assume these clocks are enabled (unless the RPM call fails) so
+ * child clocks of these RPM clocks can still be handed off.
+ */
+ rc = r->rpmrs_data->handoff_fn(r);
+ if (rc < 0)
+ return HANDOFF_DISABLED_CLK;
+
+ /*
+ * Since RPM handoff code may update the software rate of the clock by
+ * querying the RPM, we need to make sure our request to RPM now
+ * matches the software rate of the clock. When we send the request
+ * to RPM, we also need to update any other state info we would
+ * normally update. So, call the appropriate clock function instead
+ * of directly using the RPM driver APIs.
+ */
+ rc = rpm_clk_prepare(clk);
+ if (rc < 0)
+ return HANDOFF_DISABLED_CLK;
+
+ return HANDOFF_ENABLED_CLK;
+}
+
+#define RPM_MISC_CLK_TYPE 0x306b6c63
+#define RPM_SCALING_ENABLE_ID 0x2
+
+int enable_rpm_scaling(void)
+{
+ int rc, value = 0x1;
+ static int is_inited;
+
+ struct msm_rpm_kvp kvp = {
+ .key = RPM_SMD_KEY_ENABLE,
+ .data = (void *)&value,
+ .length = sizeof(value),
+ };
+
+ if (is_inited)
+ return 0;
+
+ rc = msm_rpm_send_message_noirq(MSM_RPM_CTX_SLEEP_SET,
+ RPM_MISC_CLK_TYPE, RPM_SCALING_ENABLE_ID, &kvp, 1);
+ if (rc < 0) {
+ if (rc != -EPROBE_DEFER)
+ WARN(1, "RPM clock scaling (sleep set) did not enable!\n");
+ return rc;
+ }
+
+ rc = msm_rpm_send_message_noirq(MSM_RPM_CTX_ACTIVE_SET,
+ RPM_MISC_CLK_TYPE, RPM_SCALING_ENABLE_ID, &kvp, 1);
+ if (rc < 0) {
+ if (rc != -EPROBE_DEFER)
+ WARN(1, "RPM clock scaling (active set) did not enable!\n");
+ return rc;
+ }
+
+ is_inited++;
+ return 0;
+}
+
+int vote_bimc(struct rpm_clk *r, uint32_t value)
+{
+ int rc;
+
+ struct msm_rpm_kvp kvp = {
+ .key = r->rpm_key,
+ .data = (void *)&value,
+ .length = sizeof(value),
+ };
+
+ rc = msm_rpm_send_message_noirq(MSM_RPM_CTX_ACTIVE_SET,
+ r->rpm_res_type, r->rpmrs_data->ctx_active_id,
+ &kvp, 1);
+ if (rc < 0) {
+ if (rc != -EPROBE_DEFER)
+ WARN(1, "BIMC vote not sent!\n");
+ return rc;
+ }
+
+ return rc;
+}
+
+struct clk_ops clk_ops_rpm = {
+ .prepare = rpm_clk_prepare,
+ .unprepare = rpm_clk_unprepare,
+ .set_rate = rpm_clk_set_rate,
+ .get_rate = rpm_clk_get_rate,
+ .is_enabled = rpm_clk_is_enabled,
+ .round_rate = rpm_clk_round_rate,
+ .is_local = rpm_clk_is_local,
+ .handoff = rpm_clk_handoff,
+};
+
+struct clk_ops clk_ops_rpm_branch = {
+ .prepare = rpm_clk_prepare,
+ .unprepare = rpm_clk_unprepare,
+ .is_local = rpm_clk_is_local,
+ .handoff = rpm_clk_handoff,
+};
+
+static struct rpm_clk *rpm_clk_dt_parser_common(struct device *dev,
+ struct device_node *np)
+{
+ struct rpm_clk *rpm, *peer;
+ struct clk *c;
+ int rc = 0;
+ phandle p;
+ const char *str;
+
+ rpm = devm_kzalloc(dev, sizeof(*rpm), GFP_KERNEL);
+ if (!rpm) {
+ dt_err(np, "memory alloc failure\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ rc = of_property_read_phandle_index(np, "qcom,rpm-peer", 0, &p);
+ if (rc) {
+ dt_err(np, "missing qcom,rpm-peer dt property\n");
+ return ERR_PTR(rc);
+ }
+
+ /* Rely on whoever's called last to setup the circular ref */
+ c = msmclk_lookup_phandle(dev, p);
+ if (!IS_ERR(c)) {
+ uint32_t *sleep = devm_kzalloc(dev, sizeof(uint32_t),
+ GFP_KERNEL);
+ uint32_t *active =
+ devm_kzalloc(dev, sizeof(uint32_t),
+ GFP_KERNEL);
+
+ if (!sleep || !active)
+ return ERR_PTR(-ENOMEM);
+ peer = to_rpm_clk(c);
+ peer->peer = rpm;
+ rpm->peer = peer;
+ rpm->last_active_set_vote = active;
+ peer->last_active_set_vote = active;
+ rpm->last_sleep_set_vote = sleep;
+ peer->last_sleep_set_vote = sleep;
+ }
+
+ rpm->rpmrs_data = &clk_rpmrs_data_smd;
+ rpm->active_only = of_device_is_compatible(np, "qcom,rpm-a-clk") ||
+ of_device_is_compatible(np, "qcom,rpm-branch-a-clk");
+
+ rc = of_property_read_string(np, "qcom,res-type", &str);
+ if (rc) {
+ dt_err(np, "missing qcom,res-type dt property\n");
+ return ERR_PTR(rc);
+ }
+ if (sscanf(str, "%4c", (char *) &rpm->rpm_res_type) <= 0)
+ return ERR_PTR(-EINVAL);
+
+ rc = of_property_read_u32(np, "qcom,res-id", &rpm->rpm_clk_id);
+ if (rc) {
+ dt_err(np, "missing qcom,res-id dt property\n");
+ return ERR_PTR(rc);
+ }
+
+ rc = of_property_read_string(np, "qcom,key", &str);
+ if (rc) {
+ dt_err(np, "missing qcom,key dt property\n");
+ return ERR_PTR(rc);
+ }
+ if (sscanf(str, "%4c", (char *) &rpm->rpm_key) <= 0)
+ return ERR_PTR(-EINVAL);
+ return rpm;
+}
+
+static void *rpm_clk_dt_parser(struct device *dev, struct device_node *np)
+{
+ struct rpm_clk *rpm;
+ rpm = rpm_clk_dt_parser_common(dev, np);
+ if (IS_ERR(rpm))
+ return rpm;
+
+ rpm->c.ops = &clk_ops_rpm;
+ return msmclk_generic_clk_init(dev, np, &rpm->c);
+}
+
+static void *rpm_branch_clk_dt_parser(struct device *dev,
+ struct device_node *np)
+{
+ struct rpm_clk *rpm;
+ u32 rate;
+ int rc;
+ rpm = rpm_clk_dt_parser_common(dev, np);
+ if (IS_ERR(rpm))
+ return rpm;
+
+ rpm->c.ops = &clk_ops_rpm_branch;
+ rpm->branch = true;
+
+ rc = of_property_read_u32(np, "qcom,rcg-init-rate", &rate);
+ if (!rc)
+ rpm->c.rate = rate;
+
+ return msmclk_generic_clk_init(dev, np, &rpm->c);
+}
+MSMCLK_PARSER(rpm_clk_dt_parser, "qcom,rpm-clk", 0);
+MSMCLK_PARSER(rpm_clk_dt_parser, "qcom,rpm-a-clk", 1);
+MSMCLK_PARSER(rpm_branch_clk_dt_parser, "qcom,rpm-branch-clk", 0);
+MSMCLK_PARSER(rpm_branch_clk_dt_parser, "qcom,rpm-branch-a-clk", 1);
diff --git a/drivers/clk/msm/clock-voter.c b/drivers/clk/msm/clock-voter.c
new file mode 100644
index 000000000000..0d609743ffc7
--- /dev/null
+++ b/drivers/clk/msm/clock-voter.c
@@ -0,0 +1,202 @@
+/* Copyright (c) 2010-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/err.h>
+#include <linux/rtmutex.h>
+#include <linux/clk.h>
+#include <linux/clk/msm-clk-provider.h>
+#include <soc/qcom/clock-voter.h>
+#include <soc/qcom/msm-clock-controller.h>
+
+static DEFINE_RT_MUTEX(voter_clk_lock);
+
+/* Aggregate the rate of clocks that are currently on. */
+static unsigned long voter_clk_aggregate_rate(const struct clk *parent)
+{
+ struct clk *clk;
+ unsigned long rate = 0;
+
+ list_for_each_entry(clk, &parent->children, siblings) {
+ struct clk_voter *v = to_clk_voter(clk);
+ if (v->enabled)
+ rate = max(clk->rate, rate);
+ }
+ return rate;
+}
+
+static int voter_clk_set_rate(struct clk *clk, unsigned long rate)
+{
+ int ret = 0;
+ struct clk *clkp;
+ struct clk_voter *clkh, *v = to_clk_voter(clk);
+ unsigned long cur_rate, new_rate, other_rate = 0;
+
+ if (v->is_branch)
+ return 0;
+
+ rt_mutex_lock(&voter_clk_lock);
+
+ if (v->enabled) {
+ struct clk *parent = clk->parent;
+
+ /*
+ * Get the aggregate rate without this clock's vote and update
+ * if the new rate is different than the current rate
+ */
+ list_for_each_entry(clkp, &parent->children, siblings) {
+ clkh = to_clk_voter(clkp);
+ if (clkh->enabled && clkh != v)
+ other_rate = max(clkp->rate, other_rate);
+ }
+
+ cur_rate = max(other_rate, clk->rate);
+ new_rate = max(other_rate, rate);
+
+ if (new_rate != cur_rate) {
+ ret = clk_set_rate(parent, new_rate);
+ if (ret)
+ goto unlock;
+ }
+ }
+ clk->rate = rate;
+unlock:
+ rt_mutex_unlock(&voter_clk_lock);
+
+ return ret;
+}
+
+static int voter_clk_prepare(struct clk *clk)
+{
+ int ret = 0;
+ unsigned long cur_rate;
+ struct clk *parent;
+ struct clk_voter *v = to_clk_voter(clk);
+
+ rt_mutex_lock(&voter_clk_lock);
+ parent = clk->parent;
+
+ if (v->is_branch) {
+ v->enabled = true;
+ goto out;
+ }
+
+ /*
+ * Increase the rate if this clock is voting for a higher rate
+ * than the current rate.
+ */
+ cur_rate = voter_clk_aggregate_rate(parent);
+ if (clk->rate > cur_rate) {
+ ret = clk_set_rate(parent, clk->rate);
+ if (ret)
+ goto out;
+ }
+ v->enabled = true;
+out:
+ rt_mutex_unlock(&voter_clk_lock);
+
+ return ret;
+}
+
+static void voter_clk_unprepare(struct clk *clk)
+{
+ unsigned long cur_rate, new_rate;
+ struct clk *parent;
+ struct clk_voter *v = to_clk_voter(clk);
+
+
+ rt_mutex_lock(&voter_clk_lock);
+ parent = clk->parent;
+
+ /*
+ * Decrease the rate if this clock was the only one voting for
+ * the highest rate.
+ */
+ v->enabled = false;
+ if (v->is_branch)
+ goto out;
+
+ new_rate = voter_clk_aggregate_rate(parent);
+ cur_rate = max(new_rate, clk->rate);
+
+ if (new_rate < cur_rate)
+ clk_set_rate(parent, new_rate);
+
+out:
+ rt_mutex_unlock(&voter_clk_lock);
+}
+
+static int voter_clk_is_enabled(struct clk *clk)
+{
+ struct clk_voter *v = to_clk_voter(clk);
+ return v->enabled;
+}
+
+static long voter_clk_round_rate(struct clk *clk, unsigned long rate)
+{
+ return clk_round_rate(clk->parent, rate);
+}
+
+static bool voter_clk_is_local(struct clk *clk)
+{
+ return true;
+}
+
+static enum handoff voter_clk_handoff(struct clk *clk)
+{
+ if (!clk->rate)
+ return HANDOFF_DISABLED_CLK;
+
+ /*
+ * Send the default rate to the parent if necessary and update the
+ * software state of the voter clock.
+ */
+ if (voter_clk_prepare(clk) < 0)
+ return HANDOFF_DISABLED_CLK;
+
+ return HANDOFF_ENABLED_CLK;
+}
+
+struct clk_ops clk_ops_voter = {
+ .prepare = voter_clk_prepare,
+ .unprepare = voter_clk_unprepare,
+ .set_rate = voter_clk_set_rate,
+ .is_enabled = voter_clk_is_enabled,
+ .round_rate = voter_clk_round_rate,
+ .is_local = voter_clk_is_local,
+ .handoff = voter_clk_handoff,
+};
+
+static void *sw_vote_clk_dt_parser(struct device *dev,
+ struct device_node *np)
+{
+ struct clk_voter *v;
+ int rc;
+ u32 temp;
+
+ v = devm_kzalloc(dev, sizeof(*v), GFP_KERNEL);
+ if (!v) {
+ dt_err(np, "failed to alloc memory\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ rc = of_property_read_u32(np, "qcom,config-rate", &temp);
+ if (rc) {
+ dt_prop_err(np, "qcom,config-rate", "is missing");
+ return ERR_PTR(rc);
+ }
+
+ v->c.ops = &clk_ops_voter;
+ return msmclk_generic_clk_init(dev, np, &v->c);
+}
+MSMCLK_PARSER(sw_vote_clk_dt_parser, "qcom,sw-vote-clk", 0);
diff --git a/drivers/clk/msm/clock.c b/drivers/clk/msm/clock.c
new file mode 100644
index 000000000000..8b30a1bc7eb5
--- /dev/null
+++ b/drivers/clk/msm/clock.c
@@ -0,0 +1,1389 @@
+/* arch/arm/mach-msm/clock.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2007-2015, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/list.h>
+#include <linux/regulator/consumer.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/clk/msm-clk-provider.h>
+#include <linux/of_platform.h>
+#include <linux/pm_opp.h>
+
+#include <trace/events/power.h>
+#include "clock.h"
+
+struct handoff_clk {
+ struct list_head list;
+ struct clk *clk;
+};
+static LIST_HEAD(handoff_list);
+
+struct handoff_vdd {
+ struct list_head list;
+ struct clk_vdd_class *vdd_class;
+};
+static LIST_HEAD(handoff_vdd_list);
+
+static DEFINE_MUTEX(msm_clock_init_lock);
+LIST_HEAD(orphan_clk_list);
+static LIST_HEAD(clk_notifier_list);
+
+/* Find the voltage level required for a given rate. */
+int find_vdd_level(struct clk *clk, unsigned long rate)
+{
+ int level;
+
+ for (level = 0; level < clk->num_fmax; level++)
+ if (rate <= clk->fmax[level])
+ break;
+
+ if (level == clk->num_fmax) {
+ pr_err("Rate %lu for %s is greater than highest Fmax\n", rate,
+ clk->dbg_name);
+ return -EINVAL;
+ }
+
+ return level;
+}
+
+/* Update voltage level given the current votes. */
+static int update_vdd(struct clk_vdd_class *vdd_class)
+{
+ int level, rc = 0, i, ignore;
+ struct regulator **r = vdd_class->regulator;
+ int *uv = vdd_class->vdd_uv;
+ int *ua = vdd_class->vdd_ua;
+ int n_reg = vdd_class->num_regulators;
+ int cur_lvl = vdd_class->cur_level;
+ int max_lvl = vdd_class->num_levels - 1;
+ int cur_base = cur_lvl * n_reg;
+ int new_base;
+
+ /* aggregate votes */
+ for (level = max_lvl; level > 0; level--)
+ if (vdd_class->level_votes[level])
+ break;
+
+ if (level == cur_lvl)
+ return 0;
+
+ max_lvl = max_lvl * n_reg;
+ new_base = level * n_reg;
+ for (i = 0; i < vdd_class->num_regulators; i++) {
+ rc = regulator_set_voltage(r[i], uv[new_base + i],
+ vdd_class->use_max_uV ? INT_MAX : uv[max_lvl + i]);
+ if (rc)
+ goto set_voltage_fail;
+
+ if (ua) {
+ rc = regulator_set_load(r[i], ua[new_base + i]);
+ rc = rc > 0 ? 0 : rc;
+ if (rc)
+ goto set_mode_fail;
+ }
+ if (cur_lvl == 0 || cur_lvl == vdd_class->num_levels)
+ rc = regulator_enable(r[i]);
+ else if (level == 0)
+ rc = regulator_disable(r[i]);
+ if (rc)
+ goto enable_disable_fail;
+ }
+ if (vdd_class->set_vdd && !vdd_class->num_regulators)
+ rc = vdd_class->set_vdd(vdd_class, level);
+
+ if (!rc)
+ vdd_class->cur_level = level;
+
+ return rc;
+
+enable_disable_fail:
+ /*
+ * set_optimum_mode could use voltage to derive mode. Restore
+ * previous voltage setting for r[i] first.
+ */
+ if (ua) {
+ regulator_set_voltage(r[i], uv[cur_base + i],
+ vdd_class->use_max_uV ? INT_MAX : uv[max_lvl + i]);
+ regulator_set_load(r[i], ua[cur_base + i]);
+ }
+
+set_mode_fail:
+ regulator_set_voltage(r[i], uv[cur_base + i],
+ vdd_class->use_max_uV ? INT_MAX : uv[max_lvl + i]);
+
+set_voltage_fail:
+ for (i--; i >= 0; i--) {
+ regulator_set_voltage(r[i], uv[cur_base + i],
+ vdd_class->use_max_uV ? INT_MAX : uv[max_lvl + i]);
+ if (ua)
+ regulator_set_load(r[i], ua[cur_base + i]);
+ if (cur_lvl == 0 || cur_lvl == vdd_class->num_levels)
+ regulator_disable(r[i]);
+ else if (level == 0)
+ ignore = regulator_enable(r[i]);
+ }
+ return rc;
+}
+
+/* Vote for a voltage level. */
+int vote_vdd_level(struct clk_vdd_class *vdd_class, int level)
+{
+ int rc;
+
+ if (level >= vdd_class->num_levels)
+ return -EINVAL;
+
+ mutex_lock(&vdd_class->lock);
+ vdd_class->level_votes[level]++;
+ rc = update_vdd(vdd_class);
+ if (rc)
+ vdd_class->level_votes[level]--;
+ mutex_unlock(&vdd_class->lock);
+
+ return rc;
+}
+
+/* Remove vote for a voltage level. */
+int unvote_vdd_level(struct clk_vdd_class *vdd_class, int level)
+{
+ int rc = 0;
+
+ if (level >= vdd_class->num_levels)
+ return -EINVAL;
+
+ mutex_lock(&vdd_class->lock);
+ if (WARN(!vdd_class->level_votes[level],
+ "Reference counts are incorrect for %s level %d\n",
+ vdd_class->class_name, level))
+ goto out;
+ vdd_class->level_votes[level]--;
+ rc = update_vdd(vdd_class);
+ if (rc)
+ vdd_class->level_votes[level]++;
+out:
+ mutex_unlock(&vdd_class->lock);
+ return rc;
+}
+
+/* Vote for a voltage level corresponding to a clock's rate. */
+static int vote_rate_vdd(struct clk *clk, unsigned long rate)
+{
+ int level;
+
+ if (!clk->vdd_class)
+ return 0;
+
+ level = find_vdd_level(clk, rate);
+ if (level < 0)
+ return level;
+
+ return vote_vdd_level(clk->vdd_class, level);
+}
+
+/* Remove vote for a voltage level corresponding to a clock's rate. */
+static void unvote_rate_vdd(struct clk *clk, unsigned long rate)
+{
+ int level;
+
+ if (!clk->vdd_class)
+ return;
+
+ level = find_vdd_level(clk, rate);
+ if (level < 0)
+ return;
+
+ unvote_vdd_level(clk->vdd_class, level);
+}
+
+/* Check if the rate is within the voltage limits of the clock. */
+bool is_rate_valid(struct clk *clk, unsigned long rate)
+{
+ int level;
+
+ if (!clk->vdd_class)
+ return true;
+
+ level = find_vdd_level(clk, rate);
+ return level >= 0;
+}
+
+/**
+ * __clk_pre_reparent() - Set up the new parent before switching to it and
+ * prevent the enable state of the child clock from changing.
+ * @c: The child clock that's going to switch parents
+ * @new: The new parent that the child clock is going to switch to
+ * @flags: Pointer to scratch space to save spinlock flags
+ *
+ * Cannot be called from atomic context.
+ *
+ * Use this API to set up the @new parent clock to be able to support the
+ * current prepare and enable state of the child clock @c. Once the parent is
+ * set up, the child clock can safely switch to it.
+ *
+ * The caller shall grab the prepare_lock of clock @c before calling this API
+ * and only release it after calling __clk_post_reparent() for clock @c (or
+ * if this API fails). This is necessary to prevent the prepare state of the
+ * child clock @c from changing while the reparenting is in progress. Since
+ * this API takes care of grabbing the enable lock of @c, only atomic
+ * operation are allowed between calls to __clk_pre_reparent and
+ * __clk_post_reparent()
+ *
+ * The scratch space pointed to by @flags should not be altered before
+ * calling __clk_post_reparent() for clock @c.
+ *
+ * See also: __clk_post_reparent()
+ */
+int __clk_pre_reparent(struct clk *c, struct clk *new, unsigned long *flags)
+{
+ int rc;
+
+ if (c->prepare_count) {
+ rc = clk_prepare(new);
+ if (rc)
+ return rc;
+ }
+
+ spin_lock_irqsave(&c->lock, *flags);
+ if (c->count) {
+ rc = clk_enable(new);
+ if (rc) {
+ spin_unlock_irqrestore(&c->lock, *flags);
+ clk_unprepare(new);
+ return rc;
+ }
+ }
+ return 0;
+}
+
+/**
+ * __clk_post_reparent() - Release requirements on old parent after switching
+ * away from it and allow changes to the child clock's enable state.
+ * @c: The child clock that switched parents
+ * @old: The old parent that the child clock switched away from or the new
+ * parent of a failed reparent attempt.
+ * @flags: Pointer to scratch space where spinlock flags were saved
+ *
+ * Cannot be called from atomic context.
+ *
+ * This API works in tandem with __clk_pre_reparent. Use this API to
+ * - Remove prepare and enable requirements from the @old parent after
+ * switching away from it
+ * - Or, undo the effects of __clk_pre_reparent() after a failed attempt to
+ * change parents
+ *
+ * The caller shall release the prepare_lock of @c that was grabbed before
+ * calling __clk_pre_reparent() only after this API is called (or if
+ * __clk_pre_reparent() fails). This is necessary to prevent the prepare
+ * state of the child clock @c from changing while the reparenting is in
+ * progress. Since this API releases the enable lock of @c, the limit to
+ * atomic operations set by __clk_pre_reparent() is no longer present.
+ *
+ * The scratch space pointed to by @flags shall not be altered since the call
+ * to __clk_pre_reparent() for clock @c.
+ *
+ * See also: __clk_pre_reparent()
+ */
+void __clk_post_reparent(struct clk *c, struct clk *old, unsigned long *flags)
+{
+ if (c->count)
+ clk_disable(old);
+ spin_unlock_irqrestore(&c->lock, *flags);
+
+ if (c->prepare_count)
+ clk_unprepare(old);
+}
+
+int clk_prepare(struct clk *clk)
+{
+ int ret = 0;
+ struct clk *parent;
+
+ if (!clk)
+ return 0;
+ if (IS_ERR(clk))
+ return -EINVAL;
+
+ mutex_lock(&clk->prepare_lock);
+ if (clk->prepare_count == 0) {
+ parent = clk->parent;
+
+ ret = clk_prepare(parent);
+ if (ret)
+ goto out;
+ ret = clk_prepare(clk->depends);
+ if (ret)
+ goto err_prepare_depends;
+
+ ret = vote_rate_vdd(clk, clk->rate);
+ if (ret)
+ goto err_vote_vdd;
+ if (clk->ops->prepare)
+ ret = clk->ops->prepare(clk);
+ if (ret)
+ goto err_prepare_clock;
+ }
+ clk->prepare_count++;
+out:
+ mutex_unlock(&clk->prepare_lock);
+ return ret;
+err_prepare_clock:
+ unvote_rate_vdd(clk, clk->rate);
+err_vote_vdd:
+ clk_unprepare(clk->depends);
+err_prepare_depends:
+ clk_unprepare(parent);
+ goto out;
+}
+EXPORT_SYMBOL(clk_prepare);
+
+/*
+ * Standard clock functions defined in include/linux/clk.h
+ */
+int clk_enable(struct clk *clk)
+{
+ int ret = 0;
+ unsigned long flags;
+ struct clk *parent;
+ const char *name;
+
+ if (!clk)
+ return 0;
+ if (IS_ERR(clk))
+ return -EINVAL;
+ name = clk->dbg_name;
+
+ spin_lock_irqsave(&clk->lock, flags);
+ WARN(!clk->prepare_count,
+ "%s: Don't call enable on unprepared clocks\n", name);
+ if (clk->count == 0) {
+ parent = clk->parent;
+
+ ret = clk_enable(parent);
+ if (ret)
+ goto err_enable_parent;
+ ret = clk_enable(clk->depends);
+ if (ret)
+ goto err_enable_depends;
+
+ trace_clock_enable(name, 1, smp_processor_id());
+ if (clk->ops->enable)
+ ret = clk->ops->enable(clk);
+ if (ret)
+ goto err_enable_clock;
+ }
+ clk->count++;
+ spin_unlock_irqrestore(&clk->lock, flags);
+
+ return 0;
+
+err_enable_clock:
+ clk_disable(clk->depends);
+err_enable_depends:
+ clk_disable(parent);
+err_enable_parent:
+ spin_unlock_irqrestore(&clk->lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL(clk_enable);
+
+void clk_disable(struct clk *clk)
+{
+ const char *name;
+ unsigned long flags;
+
+ if (IS_ERR_OR_NULL(clk))
+ return;
+ name = clk->dbg_name;
+
+ spin_lock_irqsave(&clk->lock, flags);
+ WARN(!clk->prepare_count,
+ "%s: Never called prepare or calling disable after unprepare\n",
+ name);
+ if (WARN(clk->count == 0, "%s is unbalanced", name))
+ goto out;
+ if (clk->count == 1) {
+ struct clk *parent = clk->parent;
+
+ trace_clock_disable(name, 0, smp_processor_id());
+ if (clk->ops->disable)
+ clk->ops->disable(clk);
+ clk_disable(clk->depends);
+ clk_disable(parent);
+ }
+ clk->count--;
+out:
+ spin_unlock_irqrestore(&clk->lock, flags);
+}
+EXPORT_SYMBOL(clk_disable);
+
+void clk_unprepare(struct clk *clk)
+{
+ const char *name;
+
+ if (IS_ERR_OR_NULL(clk))
+ return;
+ name = clk->dbg_name;
+
+ mutex_lock(&clk->prepare_lock);
+ if (WARN(!clk->prepare_count, "%s is unbalanced (prepare)", name))
+ goto out;
+ if (clk->prepare_count == 1) {
+ struct clk *parent = clk->parent;
+
+ WARN(clk->count,
+ "%s: Don't call unprepare when the clock is enabled\n",
+ name);
+
+ if (clk->ops->unprepare)
+ clk->ops->unprepare(clk);
+ unvote_rate_vdd(clk, clk->rate);
+ clk_unprepare(clk->depends);
+ clk_unprepare(parent);
+ }
+ clk->prepare_count--;
+out:
+ mutex_unlock(&clk->prepare_lock);
+}
+EXPORT_SYMBOL(clk_unprepare);
+
+int clk_reset(struct clk *clk, enum clk_reset_action action)
+{
+ if (IS_ERR_OR_NULL(clk))
+ return -EINVAL;
+
+ if (!clk->ops->reset)
+ return -ENOSYS;
+
+ return clk->ops->reset(clk, action);
+}
+EXPORT_SYMBOL(clk_reset);
+
+/**
+ * __clk_notify - call clk notifier chain
+ * @clk: struct clk * that is changing rate
+ * @msg: clk notifier type (see include/linux/clk.h)
+ * @old_rate: old clk rate
+ * @new_rate: new clk rate
+ *
+ * Triggers a notifier call chain on the clk rate-change notification
+ * for 'clk'. Passes a pointer to the struct clk and the previous
+ * and current rates to the notifier callback. Intended to be called by
+ * internal clock code only. Returns NOTIFY_DONE from the last driver
+ * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
+ * a driver returns that.
+ */
+static int __clk_notify(struct clk *clk, unsigned long msg,
+ unsigned long old_rate, unsigned long new_rate)
+{
+ struct msm_clk_notifier *cn;
+ struct msm_clk_notifier_data cnd;
+ int ret = NOTIFY_DONE;
+
+ cnd.clk = clk;
+ cnd.old_rate = old_rate;
+ cnd.new_rate = new_rate;
+
+ list_for_each_entry(cn, &clk_notifier_list, node) {
+ if (cn->clk == clk) {
+ ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
+ &cnd);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+/*
+ * clk rate change notifiers
+ *
+ * Note - The following notifier functionality is a verbatim copy
+ * of the implementation in the common clock framework, copied here
+ * until MSM switches to the common clock framework.
+ */
+
+/**
+ * msm_clk_notif_register - add a clk rate change notifier
+ * @clk: struct clk * to watch
+ * @nb: struct notifier_block * with callback info
+ *
+ * Request notification when clk's rate changes. This uses an SRCU
+ * notifier because we want it to block and notifier unregistrations are
+ * uncommon. The callbacks associated with the notifier must not
+ * re-enter into the clk framework by calling any top-level clk APIs;
+ * this will cause a nested prepare_lock mutex.
+ *
+ * Pre-change notifier callbacks will be passed the current, pre-change
+ * rate of the clk via struct msm_clk_notifier_data.old_rate. The new,
+ * post-change rate of the clk is passed via struct
+ * msm_clk_notifier_data.new_rate.
+ *
+ * Post-change notifiers will pass the now-current, post-change rate of
+ * the clk in both struct msm_clk_notifier_data.old_rate and struct
+ * msm_clk_notifier_data.new_rate.
+ *
+ * Abort-change notifiers are effectively the opposite of pre-change
+ * notifiers: the original pre-change clk rate is passed in via struct
+ * msm_clk_notifier_data.new_rate and the failed post-change rate is passed
+ * in via struct msm_clk_notifier_data.old_rate.
+ *
+ * msm_clk_notif_register() must be called from non-atomic context.
+ * Returns -EINVAL if called with null arguments, -ENOMEM upon
+ * allocation failure; otherwise, passes along the return value of
+ * srcu_notifier_chain_register().
+ */
+int msm_clk_notif_register(struct clk *clk, struct notifier_block *nb)
+{
+ struct msm_clk_notifier *cn;
+ int ret = -ENOMEM;
+
+ if (!clk || !nb)
+ return -EINVAL;
+
+ mutex_lock(&clk->prepare_lock);
+
+ /* search the list of notifiers for this clk */
+ list_for_each_entry(cn, &clk_notifier_list, node)
+ if (cn->clk == clk)
+ break;
+
+ /* if clk wasn't in the notifier list, allocate new clk_notifier */
+ if (cn->clk != clk) {
+ cn = kzalloc(sizeof(struct msm_clk_notifier), GFP_KERNEL);
+ if (!cn)
+ goto out;
+
+ cn->clk = clk;
+ srcu_init_notifier_head(&cn->notifier_head);
+
+ list_add(&cn->node, &clk_notifier_list);
+ }
+
+ ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
+
+ clk->notifier_count++;
+
+out:
+ mutex_unlock(&clk->prepare_lock);
+
+ return ret;
+}
+
+/**
+ * msm_clk_notif_unregister - remove a clk rate change notifier
+ * @clk: struct clk *
+ * @nb: struct notifier_block * with callback info
+ *
+ * Request no further notification for changes to 'clk' and frees memory
+ * allocated in msm_clk_notifier_register.
+ *
+ * Returns -EINVAL if called with null arguments; otherwise, passes
+ * along the return value of srcu_notifier_chain_unregister().
+ */
+int msm_clk_notif_unregister(struct clk *clk, struct notifier_block *nb)
+{
+ struct msm_clk_notifier *cn = NULL;
+ int ret = -EINVAL;
+
+ if (!clk || !nb)
+ return -EINVAL;
+
+ mutex_lock(&clk->prepare_lock);
+
+ list_for_each_entry(cn, &clk_notifier_list, node)
+ if (cn->clk == clk)
+ break;
+
+ if (cn->clk == clk) {
+ ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
+
+ clk->notifier_count--;
+
+ /* XXX the notifier code should handle this better */
+ if (!cn->notifier_head.head) {
+ srcu_cleanup_notifier_head(&cn->notifier_head);
+ list_del(&cn->node);
+ kfree(cn);
+ }
+
+ } else {
+ ret = -ENOENT;
+ }
+
+ mutex_unlock(&clk->prepare_lock);
+
+ return ret;
+}
+
+unsigned long clk_get_rate(struct clk *clk)
+{
+ if (IS_ERR_OR_NULL(clk))
+ return 0;
+
+ if (!clk->ops->get_rate)
+ return clk->rate;
+
+ return clk->ops->get_rate(clk);
+}
+EXPORT_SYMBOL(clk_get_rate);
+
+int clk_set_rate(struct clk *clk, unsigned long rate)
+{
+ unsigned long start_rate;
+ int rc = 0;
+ const char *name;
+
+ if (IS_ERR_OR_NULL(clk))
+ return -EINVAL;
+ name = clk->dbg_name;
+
+ if (!is_rate_valid(clk, rate))
+ return -EINVAL;
+
+ mutex_lock(&clk->prepare_lock);
+
+ /* Return early if the rate isn't going to change */
+ if (clk->rate == rate && !(clk->flags & CLKFLAG_NO_RATE_CACHE))
+ goto out;
+
+ if (!clk->ops->set_rate) {
+ rc = -ENOSYS;
+ goto out;
+ }
+
+ trace_clock_set_rate(name, rate, raw_smp_processor_id());
+
+ start_rate = clk->rate;
+
+ if (clk->notifier_count)
+ __clk_notify(clk, PRE_RATE_CHANGE, clk->rate, rate);
+
+ if (clk->ops->pre_set_rate) {
+ rc = clk->ops->pre_set_rate(clk, rate);
+ if (rc)
+ goto abort_set_rate;
+ }
+
+ /* Enforce vdd requirements for target frequency. */
+ if (clk->prepare_count) {
+ rc = vote_rate_vdd(clk, rate);
+ if (rc)
+ goto err_vote_vdd;
+ }
+
+ rc = clk->ops->set_rate(clk, rate);
+ if (rc)
+ goto err_set_rate;
+ clk->rate = rate;
+
+ /* Release vdd requirements for starting frequency. */
+ if (clk->prepare_count)
+ unvote_rate_vdd(clk, start_rate);
+
+ if (clk->ops->post_set_rate)
+ clk->ops->post_set_rate(clk, start_rate);
+
+ if (clk->notifier_count)
+ __clk_notify(clk, POST_RATE_CHANGE, start_rate, clk->rate);
+
+ trace_clock_set_rate_complete(name, clk->rate, raw_smp_processor_id());
+out:
+ mutex_unlock(&clk->prepare_lock);
+ return rc;
+
+abort_set_rate:
+ __clk_notify(clk, ABORT_RATE_CHANGE, clk->rate, rate);
+err_set_rate:
+ if (clk->prepare_count)
+ unvote_rate_vdd(clk, rate);
+err_vote_vdd:
+ /* clk->rate is still the old rate. So, pass the new rate instead. */
+ if (clk->ops->post_set_rate)
+ clk->ops->post_set_rate(clk, rate);
+ goto out;
+}
+EXPORT_SYMBOL(clk_set_rate);
+
+long clk_round_rate(struct clk *clk, unsigned long rate)
+{
+ long rrate;
+ unsigned long fmax = 0, i;
+
+ if (IS_ERR_OR_NULL(clk))
+ return -EINVAL;
+
+ for (i = 0; i < clk->num_fmax; i++)
+ fmax = max(fmax, clk->fmax[i]);
+ if (!fmax)
+ fmax = ULONG_MAX;
+ rate = min(rate, fmax);
+
+ if (clk->ops->round_rate)
+ rrate = clk->ops->round_rate(clk, rate);
+ else if (clk->rate)
+ rrate = clk->rate;
+ else
+ return -ENOSYS;
+
+ if (rrate > fmax)
+ return -EINVAL;
+ return rrate;
+}
+EXPORT_SYMBOL(clk_round_rate);
+
+int clk_set_max_rate(struct clk *clk, unsigned long rate)
+{
+ if (IS_ERR_OR_NULL(clk))
+ return -EINVAL;
+
+ if (!clk->ops->set_max_rate)
+ return -ENOSYS;
+
+ return clk->ops->set_max_rate(clk, rate);
+}
+EXPORT_SYMBOL(clk_set_max_rate);
+
+int parent_to_src_sel(struct clk_src *parents, int num_parents, struct clk *p)
+{
+ int i;
+
+ for (i = 0; i < num_parents; i++) {
+ if (parents[i].src == p)
+ return parents[i].sel;
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL(parent_to_src_sel);
+
+int clk_get_parent_sel(struct clk *c, struct clk *parent)
+{
+ return parent_to_src_sel(c->parents, c->num_parents, parent);
+}
+EXPORT_SYMBOL(clk_get_parent_sel);
+
+int clk_set_parent(struct clk *clk, struct clk *parent)
+{
+ int rc = 0;
+ if (IS_ERR_OR_NULL(clk))
+ return -EINVAL;
+
+ if (!clk->ops->set_parent && clk->parent == parent)
+ return 0;
+
+ if (!clk->ops->set_parent)
+ return -ENOSYS;
+
+ mutex_lock(&clk->prepare_lock);
+ if (clk->parent == parent && !(clk->flags & CLKFLAG_NO_RATE_CACHE))
+ goto out;
+ rc = clk->ops->set_parent(clk, parent);
+out:
+ mutex_unlock(&clk->prepare_lock);
+
+ return rc;
+}
+EXPORT_SYMBOL(clk_set_parent);
+
+struct clk *clk_get_parent(struct clk *clk)
+{
+ if (IS_ERR_OR_NULL(clk))
+ return NULL;
+
+ return clk->parent;
+}
+EXPORT_SYMBOL(clk_get_parent);
+
+int clk_set_flags(struct clk *clk, unsigned long flags)
+{
+ if (IS_ERR_OR_NULL(clk))
+ return -EINVAL;
+ if (!clk->ops->set_flags)
+ return -ENOSYS;
+
+ return clk->ops->set_flags(clk, flags);
+}
+EXPORT_SYMBOL(clk_set_flags);
+
+static LIST_HEAD(initdata_list);
+
+static void init_sibling_lists(struct clk_lookup *clock_tbl, size_t num_clocks)
+{
+ struct clk *clk, *parent;
+ unsigned n;
+
+ for (n = 0; n < num_clocks; n++) {
+ clk = clock_tbl[n].clk;
+ parent = clk->parent;
+ if (parent && list_empty(&clk->siblings))
+ list_add(&clk->siblings, &parent->children);
+ }
+}
+
+static void vdd_class_init(struct clk_vdd_class *vdd)
+{
+ struct handoff_vdd *v;
+
+ if (!vdd)
+ return;
+
+ if (vdd->skip_handoff)
+ return;
+
+ list_for_each_entry(v, &handoff_vdd_list, list) {
+ if (v->vdd_class == vdd)
+ return;
+ }
+
+ pr_debug("voting for vdd_class %s\n", vdd->class_name);
+ if (vote_vdd_level(vdd, vdd->num_levels - 1))
+ pr_err("failed to vote for %s\n", vdd->class_name);
+
+ v = kmalloc(sizeof(*v), GFP_KERNEL);
+ if (!v) {
+ pr_err("Unable to kmalloc. %s will be stuck at max.\n",
+ vdd->class_name);
+ return;
+ }
+
+ v->vdd_class = vdd;
+ list_add_tail(&v->list, &handoff_vdd_list);
+}
+
+static int __handoff_clk(struct clk *clk)
+{
+ enum handoff state = HANDOFF_DISABLED_CLK;
+ struct handoff_clk *h = NULL;
+ int rc, i;
+
+ if (clk == NULL || clk->flags & CLKFLAG_INIT_DONE ||
+ clk->flags & CLKFLAG_SKIP_HANDOFF)
+ return 0;
+
+ if (clk->flags & CLKFLAG_INIT_ERR)
+ return -ENXIO;
+
+ if (clk->flags & CLKFLAG_EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ /* Handoff any 'depends' clock first. */
+ rc = __handoff_clk(clk->depends);
+ if (rc)
+ goto err;
+
+ /*
+ * Handoff functions for the parent must be called before the
+ * children can be handed off. Without handing off the parents and
+ * knowing their rate and state (on/off), it's impossible to figure
+ * out the rate and state of the children.
+ */
+ if (clk->ops->get_parent)
+ clk->parent = clk->ops->get_parent(clk);
+
+ if (IS_ERR(clk->parent)) {
+ rc = PTR_ERR(clk->parent);
+ goto err;
+ }
+
+ rc = __handoff_clk(clk->parent);
+ if (rc)
+ goto err;
+
+ for (i = 0; i < clk->num_parents; i++) {
+ rc = __handoff_clk(clk->parents[i].src);
+ if (rc)
+ goto err;
+ }
+
+ if (clk->ops->handoff)
+ state = clk->ops->handoff(clk);
+
+ if (state == HANDOFF_ENABLED_CLK) {
+
+ h = kmalloc(sizeof(*h), GFP_KERNEL);
+ if (!h) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ rc = clk_prepare_enable(clk->parent);
+ if (rc)
+ goto err;
+
+ rc = clk_prepare_enable(clk->depends);
+ if (rc)
+ goto err_depends;
+
+ rc = vote_rate_vdd(clk, clk->rate);
+ WARN(rc, "%s unable to vote for voltage!\n", clk->dbg_name);
+
+ clk->count = 1;
+ clk->prepare_count = 1;
+ h->clk = clk;
+ list_add_tail(&h->list, &handoff_list);
+
+ pr_debug("Handed off %s rate=%lu\n", clk->dbg_name, clk->rate);
+ }
+
+ if (clk->init_rate && clk_set_rate(clk, clk->init_rate))
+ pr_err("failed to set an init rate of %lu on %s\n",
+ clk->init_rate, clk->dbg_name);
+ if (clk->always_on && clk_prepare_enable(clk))
+ pr_err("failed to enable always-on clock %s\n",
+ clk->dbg_name);
+
+ clk->flags |= CLKFLAG_INIT_DONE;
+ /* if the clk is on orphan list, remove it */
+ list_del_init(&clk->list);
+ clock_debug_register(clk);
+
+ return 0;
+
+err_depends:
+ clk_disable_unprepare(clk->parent);
+err:
+ kfree(h);
+ if (rc == -EPROBE_DEFER) {
+ clk->flags |= CLKFLAG_EPROBE_DEFER;
+ if (list_empty(&clk->list))
+ list_add_tail(&clk->list, &orphan_clk_list);
+ } else {
+ pr_err("%s handoff failed (%d)\n", clk->dbg_name, rc);
+ clk->flags |= CLKFLAG_INIT_ERR;
+ }
+ return rc;
+}
+
+/**
+ * msm_clock_register() - Register additional clock tables
+ * @table: Table of clocks
+ * @size: Size of @table
+ *
+ * Upon return, clock APIs may be used to control clocks registered using this
+ * function.
+ */
+int msm_clock_register(struct clk_lookup *table, size_t size)
+{
+ int n = 0, rc;
+ struct clk *c, *safe;
+ bool found_more_clks;
+
+ mutex_lock(&msm_clock_init_lock);
+
+ init_sibling_lists(table, size);
+
+ /*
+ * Enable regulators and temporarily set them up at maximum voltage.
+ * Once all the clocks have made their respective vote, remove this
+ * temporary vote. The removing of the temporary vote is done at
+ * late_init, by which time we assume all the clocks would have been
+ * handed off.
+ */
+ for (n = 0; n < size; n++)
+ vdd_class_init(table[n].clk->vdd_class);
+
+ /*
+ * Detect and preserve initial clock state until clock_late_init() or
+ * a driver explicitly changes it, whichever is first.
+ */
+
+ for (n = 0; n < size; n++)
+ __handoff_clk(table[n].clk);
+
+ /* maintain backwards compatibility */
+ if (table[0].con_id || table[0].dev_id)
+ clkdev_add_table(table, size);
+
+ do {
+ found_more_clks = false;
+ /* clear cached __handoff_clk return values */
+ list_for_each_entry_safe(c, safe, &orphan_clk_list, list)
+ c->flags &= ~CLKFLAG_EPROBE_DEFER;
+
+ list_for_each_entry_safe(c, safe, &orphan_clk_list, list) {
+ rc = __handoff_clk(c);
+ if (!rc)
+ found_more_clks = true;
+ }
+ } while (found_more_clks);
+
+ mutex_unlock(&msm_clock_init_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(msm_clock_register);
+
+struct of_msm_provider_data {
+ struct clk_lookup *table;
+ size_t size;
+};
+
+static struct clk *of_clk_src_get(struct of_phandle_args *clkspec,
+ void *data)
+{
+ struct of_msm_provider_data *ofdata = data;
+ int n;
+
+ for (n = 0; n < ofdata->size; n++) {
+ if (clkspec->args[0] == ofdata->table[n].of_idx)
+ return ofdata->table[n].clk;
+ }
+ return ERR_PTR(-ENOENT);
+}
+
+#define MAX_LEN_OPP_HANDLE 50
+#define LEN_OPP_HANDLE 16
+#define LEN_OPP_VCORNER_HANDLE 22
+
+static struct device **derive_device_list(struct clk *clk,
+ struct device_node *np,
+ char *clk_handle_name, int len)
+{
+ int j, count, cpu;
+ struct platform_device *pdev;
+ struct device_node *dev_node;
+ struct device **device_list;
+
+ count = len/sizeof(u32);
+ device_list = kmalloc_array(count, sizeof(struct device *),
+ GFP_KERNEL);
+ if (!device_list)
+ return ERR_PTR(-ENOMEM);
+
+ for (j = 0; j < count; j++) {
+ device_list[j] = NULL;
+ dev_node = of_parse_phandle(np, clk_handle_name, j);
+ if (!dev_node) {
+ pr_err("Unable to get device_node pointer for %s opp-handle (%s)\n",
+ clk->dbg_name, clk_handle_name);
+ goto err_parse_phandle;
+ }
+
+ for_each_possible_cpu(cpu) {
+ if (of_get_cpu_node(cpu, NULL) == dev_node) {
+ device_list[j] = get_cpu_device(cpu);
+ }
+ }
+
+ if (device_list[j])
+ continue;
+
+ pdev = of_find_device_by_node(dev_node);
+ if (!pdev) {
+ pr_err("Unable to find platform_device node for %s opp-handle\n",
+ clk->dbg_name);
+ goto err_parse_phandle;
+ }
+ device_list[j] = &pdev->dev;
+ }
+ return device_list;
+err_parse_phandle:
+ kfree(device_list);
+ return ERR_PTR(-EINVAL);
+}
+
+static int get_voltage(struct clk *clk, unsigned long rate,
+ int store_vcorner, int n)
+{
+ struct clk_vdd_class *vdd;
+ int uv, level, corner;
+
+ /*
+ * Use the first regulator in the vdd class
+ * for the OPP table.
+ */
+ vdd = clk->vdd_class;
+ if (vdd->num_regulators > 1) {
+ corner = vdd->vdd_uv[vdd->num_regulators * n];
+ } else {
+ level = find_vdd_level(clk, rate);
+ if (level < 0) {
+ pr_err("Could not find vdd level\n");
+ return -EINVAL;
+ }
+ corner = vdd->vdd_uv[level];
+ }
+
+ if (!corner) {
+ pr_err("%s: Unable to find vdd level for rate %lu\n",
+ clk->dbg_name, rate);
+ return -EINVAL;
+ }
+
+ if (store_vcorner) {
+ uv = corner;
+ return uv;
+ }
+
+ uv = regulator_list_corner_voltage(vdd->regulator[0], corner);
+ if (uv < 0) {
+ pr_err("%s: no uv for corner %d - err: %d\n",
+ clk->dbg_name, corner, uv);
+ return uv;
+ }
+ return uv;
+}
+
+static int add_and_print_opp(struct clk *clk, struct device **device_list,
+ int count, unsigned long rate, int uv, int n)
+{
+ int j, ret = 0;
+
+ for (j = 0; j < count; j++) {
+ ret = dev_pm_opp_add(device_list[j], rate, uv);
+ if (ret) {
+ pr_err("%s: couldn't add OPP for %lu - err: %d\n",
+ clk->dbg_name, rate, ret);
+ return ret;
+ }
+ if (n == 1 || n == clk->num_fmax - 1 ||
+ rate == clk_round_rate(clk, INT_MAX))
+ pr_info("%s: set OPP pair(%lu Hz: %u uV) on %s\n",
+ clk->dbg_name, rate, uv,
+ dev_name(device_list[j]));
+ }
+ return ret;
+}
+
+static void populate_clock_opp_table(struct device_node *np,
+ struct clk_lookup *table, size_t size)
+{
+ struct device **device_list;
+ struct clk *clk;
+ char clk_handle_name[MAX_LEN_OPP_HANDLE];
+ char clk_store_volt_corner[MAX_LEN_OPP_HANDLE];
+ size_t i;
+ int n, len, count, uv = 0;
+ unsigned long rate, ret = 0;
+ bool store_vcorner;
+
+ /* Iterate across all clocks in the clock controller */
+ for (i = 0; i < size; i++) {
+ n = 1;
+ rate = 0;
+
+ store_vcorner = false;
+ clk = table[i].clk;
+ if (!clk || !clk->num_fmax || clk->opp_table_populated)
+ continue;
+
+ if (strlen(clk->dbg_name) + LEN_OPP_HANDLE
+ < MAX_LEN_OPP_HANDLE) {
+ ret = snprintf(clk_handle_name,
+ ARRAY_SIZE(clk_handle_name),
+ "qcom,%s-opp-handle", clk->dbg_name);
+ if (ret < strlen(clk->dbg_name) + LEN_OPP_HANDLE) {
+ pr_err("Failed to hold clk_handle_name\n");
+ continue;
+ }
+ } else {
+ pr_err("clk name (%s) too large to fit in clk_handle_name\n",
+ clk->dbg_name);
+ continue;
+ }
+
+ if (strlen(clk->dbg_name) + LEN_OPP_VCORNER_HANDLE
+ < MAX_LEN_OPP_HANDLE) {
+ ret = snprintf(clk_store_volt_corner,
+ ARRAY_SIZE(clk_store_volt_corner),
+ "qcom,%s-opp-store-vcorner", clk->dbg_name);
+ if (ret < strlen(clk->dbg_name) +
+ LEN_OPP_VCORNER_HANDLE) {
+ pr_err("Failed to hold clk_store_volt_corner\n");
+ continue;
+ }
+ } else {
+ pr_err("clk name (%s) too large to fit in clk_store_volt_corner\n",
+ clk->dbg_name);
+ continue;
+ }
+
+ if (!of_find_property(np, clk_handle_name, &len)) {
+ pr_debug("Unable to find %s\n", clk_handle_name);
+ if (!of_find_property(np, clk_store_volt_corner,
+ &len)) {
+ pr_debug("Unable to find %s\n",
+ clk_store_volt_corner);
+ continue;
+ } else {
+ store_vcorner = true;
+ device_list = derive_device_list(clk, np,
+ clk_store_volt_corner, len);
+ }
+ } else
+ device_list = derive_device_list(clk, np,
+ clk_handle_name, len);
+ if (IS_ERR_OR_NULL(device_list)) {
+ pr_err("Failed to fill device_list\n");
+ continue;
+ }
+
+ count = len/sizeof(u32);
+ while (1) {
+ /*
+ * Calling clk_round_rate will not work for all clocks
+ * (eg. mux_div). Use their fmax values instead to get
+ * list of all available frequencies.
+ */
+ if (clk->ops->list_rate) {
+ ret = clk_round_rate(clk, rate + 1);
+ if (ret < 0) {
+ pr_err("clk_round_rate failed for %s\n",
+ clk->dbg_name);
+ goto err_round_rate;
+ }
+ /*
+ * If clk_round_rate give the same value on
+ * consecutive iterations, exit loop since
+ * we're at the maximum clock frequency.
+ */
+ if (rate == ret)
+ break;
+ rate = ret;
+ } else {
+ if (n < clk->num_fmax)
+ rate = clk->fmax[n];
+ else
+ break;
+ }
+
+ uv = get_voltage(clk, rate, store_vcorner, n);
+ if (uv < 0)
+ goto err_round_rate;
+
+ ret = add_and_print_opp(clk, device_list, count,
+ rate, uv , n);
+ if (ret)
+ goto err_round_rate;
+
+ n++;
+ }
+err_round_rate:
+ /* If OPP table population was successful, set the flag */
+ if (uv >= 0 && ret >= 0)
+ clk->opp_table_populated = true;
+ kfree(device_list);
+ }
+}
+
+/**
+ * of_msm_clock_register() - Register clock tables with clkdev and with the
+ * clock DT framework
+ * @table: Table of clocks
+ * @size: Size of @table
+ * @np: Device pointer corresponding to the clock-provider device
+ *
+ * Upon return, clock APIs may be used to control clocks registered using this
+ * function.
+ */
+int of_msm_clock_register(struct device_node *np, struct clk_lookup *table,
+ size_t size)
+{
+ int ret = 0;
+ struct of_msm_provider_data *data;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->table = table;
+ data->size = size;
+
+ ret = of_clk_add_provider(np, of_clk_src_get, data);
+ if (ret) {
+ kfree(data);
+ return -ENOMEM;
+ }
+
+ populate_clock_opp_table(np, table, size);
+ return msm_clock_register(table, size);
+}
+EXPORT_SYMBOL(of_msm_clock_register);
+
+/**
+ * msm_clock_init() - Register and initialize a clock driver
+ * @data: Driver-specific clock initialization data
+ *
+ * Upon return from this call, clock APIs may be used to control
+ * clocks registered with this API.
+ */
+int __init msm_clock_init(struct clock_init_data *data)
+{
+ if (!data)
+ return -EINVAL;
+
+ if (data->pre_init)
+ data->pre_init();
+
+ mutex_lock(&msm_clock_init_lock);
+ if (data->late_init)
+ list_add(&data->list, &initdata_list);
+ mutex_unlock(&msm_clock_init_lock);
+
+ msm_clock_register(data->table, data->size);
+
+ if (data->post_init)
+ data->post_init();
+
+ return 0;
+}
+
+static int __init clock_late_init(void)
+{
+ struct handoff_clk *h, *h_temp;
+ struct handoff_vdd *v, *v_temp;
+ struct clock_init_data *initdata, *initdata_temp;
+ int ret = 0;
+
+ pr_info("%s: Removing enables held for handed-off clocks\n", __func__);
+
+ mutex_lock(&msm_clock_init_lock);
+
+ list_for_each_entry_safe(initdata, initdata_temp,
+ &initdata_list, list) {
+ ret = initdata->late_init();
+ if (ret)
+ pr_err("%s: %pS failed late_init.\n", __func__,
+ initdata);
+ }
+
+ list_for_each_entry_safe(h, h_temp, &handoff_list, list) {
+ clk_disable_unprepare(h->clk);
+ list_del(&h->list);
+ kfree(h);
+ }
+
+ list_for_each_entry_safe(v, v_temp, &handoff_vdd_list, list) {
+ unvote_vdd_level(v->vdd_class, v->vdd_class->num_levels - 1);
+ list_del(&v->list);
+ kfree(v);
+ }
+
+ mutex_unlock(&msm_clock_init_lock);
+
+ return ret;
+}
+/* clock_late_init should run only after all deferred probing
+ * (excluding DLKM probes) has completed.
+ */
+late_initcall_sync(clock_late_init);
diff --git a/drivers/clk/msm/clock.h b/drivers/clk/msm/clock.h
new file mode 100644
index 000000000000..bee769921ff7
--- /dev/null
+++ b/drivers/clk/msm/clock.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DRIVERS_CLK_MSM_CLOCK_H
+#define __DRIVERS_CLK_MSM_CLOCK_H
+
+#include <linux/clkdev.h>
+
+/**
+ * struct clock_init_data - SoC specific clock initialization data
+ * @table: table of lookups to add
+ * @size: size of @table
+ * @pre_init: called before initializing the clock driver.
+ * @post_init: called after registering @table. clock APIs can be called inside.
+ * @late_init: called during late init
+ */
+struct clock_init_data {
+ struct list_head list;
+ struct clk_lookup *table;
+ size_t size;
+ void (*pre_init)(void);
+ void (*post_init)(void);
+ int (*late_init)(void);
+};
+
+int msm_clock_init(struct clock_init_data *data);
+int find_vdd_level(struct clk *clk, unsigned long rate);
+extern struct list_head orphan_clk_list;
+
+#ifdef CONFIG_DEBUG_FS
+int clock_debug_register(struct clk *clk);
+void clock_debug_print_enabled(void);
+#else
+static inline int clock_debug_register(struct clk *unused)
+{
+ return 0;
+}
+static inline void clock_debug_print_enabled(void) { return; }
+#endif
+
+#endif
diff --git a/drivers/clk/msm/gdsc.c b/drivers/clk/msm/gdsc.c
new file mode 100644
index 000000000000..5ea2a9ccd1bb
--- /dev/null
+++ b/drivers/clk/msm/gdsc.c
@@ -0,0 +1,684 @@
+/*
+ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/clk/msm-clk.h>
+
+#define PWR_ON_MASK BIT(31)
+#define EN_REST_WAIT_MASK (0xF << 20)
+#define EN_FEW_WAIT_MASK (0xF << 16)
+#define CLK_DIS_WAIT_MASK (0xF << 12)
+#define SW_OVERRIDE_MASK BIT(2)
+#define HW_CONTROL_MASK BIT(1)
+#define SW_COLLAPSE_MASK BIT(0)
+#define GMEM_CLAMP_IO_MASK BIT(0)
+#define GMEM_RESET_MASK BIT(4)
+#define BCR_BLK_ARES_BIT BIT(0)
+
+/* Wait 2^n CXO cycles between all states. Here, n=2 (4 cycles). */
+#define EN_REST_WAIT_VAL (0x2 << 20)
+#define EN_FEW_WAIT_VAL (0x8 << 16)
+#define CLK_DIS_WAIT_VAL (0x2 << 12)
+
+#define TIMEOUT_US 100
+
+struct gdsc {
+ struct regulator_dev *rdev;
+ struct regulator_desc rdesc;
+ void __iomem *gdscr;
+ struct clk **clocks;
+ int clock_count;
+ bool toggle_mem;
+ bool toggle_periph;
+ bool toggle_logic;
+ bool resets_asserted;
+ bool root_en;
+ bool force_root_en;
+ int root_clk_idx;
+ bool no_status_check_on_disable;
+ bool is_gdsc_enabled;
+ bool allow_clear;
+ bool reset_aon;
+ void __iomem *domain_addr;
+ void __iomem *hw_ctrl_addr;
+ void __iomem *sw_reset_addr;
+ u32 gds_timeout;
+};
+
+enum gdscr_status {
+ ENABLED,
+ DISABLED,
+};
+
+static DEFINE_MUTEX(gdsc_seq_lock);
+
+void gdsc_allow_clear_retention(struct regulator *regulator)
+{
+ struct gdsc *sc = regulator_get_drvdata(regulator);
+
+ if (sc)
+ sc->allow_clear = true;
+}
+
+static int poll_gdsc_status(struct gdsc *sc, enum gdscr_status status)
+{
+ void __iomem *gdscr;
+ int count = sc->gds_timeout;
+ u32 val;
+
+ if (sc->hw_ctrl_addr)
+ gdscr = sc->hw_ctrl_addr;
+ else
+ gdscr = sc->gdscr;
+
+ for (; count > 0; count--) {
+ val = readl_relaxed(gdscr);
+ val &= PWR_ON_MASK;
+ switch (status) {
+ case ENABLED:
+ if (val)
+ return 0;
+ break;
+ case DISABLED:
+ if (!val)
+ return 0;
+ break;
+ }
+ /*
+ * There is no guarantee about the delay needed for the enable
+ * bit in the GDSCR to be set or reset after the GDSC state
+ * changes. Hence, keep on checking for a reasonable number
+ * of times until the bit is set with the least possible delay
+ * between succeessive tries.
+ */
+ udelay(1);
+ }
+ return -ETIMEDOUT;
+}
+
+static int gdsc_is_enabled(struct regulator_dev *rdev)
+{
+ struct gdsc *sc = rdev_get_drvdata(rdev);
+ uint32_t regval;
+
+ if (!sc->toggle_logic)
+ return !sc->resets_asserted;
+
+ regval = readl_relaxed(sc->gdscr);
+ if (regval & PWR_ON_MASK) {
+ /*
+ * The GDSC might be turned on due to TZ/HYP vote on the
+ * votable GDS registers. Check the SW_COLLAPSE_MASK to
+ * determine if HLOS has voted for it.
+ */
+ if (!(regval & SW_COLLAPSE_MASK))
+ return true;
+ }
+ return false;
+}
+
+static int gdsc_enable(struct regulator_dev *rdev)
+{
+ struct gdsc *sc = rdev_get_drvdata(rdev);
+ uint32_t regval, hw_ctrl_regval = 0x0;
+ int i, ret = 0;
+
+ mutex_lock(&gdsc_seq_lock);
+
+ if (sc->root_en || sc->force_root_en)
+ clk_prepare_enable(sc->clocks[sc->root_clk_idx]);
+
+ if (sc->toggle_logic) {
+ if (sc->sw_reset_addr) {
+ regval = readl_relaxed(sc->sw_reset_addr);
+ regval |= BCR_BLK_ARES_BIT;
+ writel_relaxed(regval, sc->sw_reset_addr);
+ /*
+ * BLK_ARES should be kept asserted for 1us before
+ * being de-asserted.
+ */
+ wmb();
+ udelay(1);
+
+ regval &= ~BCR_BLK_ARES_BIT;
+ writel_relaxed(regval, sc->sw_reset_addr);
+
+ /* Make sure de-assert goes through before continuing */
+ wmb();
+ }
+
+ if (sc->domain_addr) {
+ if (sc->reset_aon) {
+ regval = readl_relaxed(sc->domain_addr);
+ regval |= GMEM_RESET_MASK;
+ writel_relaxed(regval, sc->domain_addr);
+ /*
+ * Keep reset asserted for at-least 1us before
+ * continuing.
+ */
+ wmb();
+ udelay(1);
+
+ regval &= ~GMEM_RESET_MASK;
+ writel_relaxed(regval, sc->domain_addr);
+ /*
+ * Make sure GMEM_RESET is de-asserted before
+ * continuing.
+ */
+ wmb();
+ }
+
+ regval = readl_relaxed(sc->domain_addr);
+ regval &= ~GMEM_CLAMP_IO_MASK;
+ writel_relaxed(regval, sc->domain_addr);
+ /*
+ * Make sure CLAMP_IO is de-asserted before continuing.
+ */
+ wmb();
+ }
+
+ regval = readl_relaxed(sc->gdscr);
+ if (regval & HW_CONTROL_MASK) {
+ dev_warn(&rdev->dev, "Invalid enable while %s is under HW control\n",
+ sc->rdesc.name);
+ mutex_unlock(&gdsc_seq_lock);
+ return -EBUSY;
+ }
+
+ regval &= ~SW_COLLAPSE_MASK;
+ writel_relaxed(regval, sc->gdscr);
+
+ /* Wait for 8 XO cycles before polling the status bit. */
+ mb();
+ udelay(1);
+
+ ret = poll_gdsc_status(sc, ENABLED);
+ if (ret) {
+ regval = readl_relaxed(sc->gdscr);
+ if (sc->hw_ctrl_addr) {
+ hw_ctrl_regval =
+ readl_relaxed(sc->hw_ctrl_addr);
+ dev_warn(&rdev->dev, "%s state (after %d us timeout): 0x%x, GDS_HW_CTRL: 0x%x. Re-polling.\n",
+ sc->rdesc.name, sc->gds_timeout,
+ regval, hw_ctrl_regval);
+
+ ret = poll_gdsc_status(sc, ENABLED);
+ if (ret) {
+ dev_err(&rdev->dev, "%s final state (after additional %d us timeout): 0x%x, GDS_HW_CTRL: 0x%x\n",
+ sc->rdesc.name, sc->gds_timeout,
+ readl_relaxed(sc->gdscr),
+ readl_relaxed(sc->hw_ctrl_addr));
+
+ mutex_unlock(&gdsc_seq_lock);
+ return ret;
+ }
+ } else {
+ dev_err(&rdev->dev, "%s enable timed out: 0x%x\n",
+ sc->rdesc.name,
+ regval);
+ udelay(sc->gds_timeout);
+ regval = readl_relaxed(sc->gdscr);
+ dev_err(&rdev->dev, "%s final state: 0x%x (%d us after timeout)\n",
+ sc->rdesc.name, regval,
+ sc->gds_timeout);
+ mutex_unlock(&gdsc_seq_lock);
+ return ret;
+ }
+ }
+ } else {
+ for (i = 0; i < sc->clock_count; i++)
+ if (likely(i != sc->root_clk_idx))
+ clk_reset(sc->clocks[i], CLK_RESET_DEASSERT);
+ sc->resets_asserted = false;
+ }
+
+ for (i = 0; i < sc->clock_count; i++) {
+ if (unlikely(i == sc->root_clk_idx))
+ continue;
+ if (sc->toggle_mem)
+ clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_MEM);
+ if (sc->toggle_periph)
+ clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_PERIPH);
+ }
+
+ /*
+ * If clocks to this power domain were already on, they will take an
+ * additional 4 clock cycles to re-enable after the rail is enabled.
+ * Delay to account for this. A delay is also needed to ensure clocks
+ * are not enabled within 400ns of enabling power to the memories.
+ */
+ udelay(1);
+
+ /* Delay to account for staggered memory powerup. */
+ udelay(1);
+
+ if (sc->force_root_en)
+ clk_disable_unprepare(sc->clocks[sc->root_clk_idx]);
+ sc->is_gdsc_enabled = true;
+
+ mutex_unlock(&gdsc_seq_lock);
+
+ return ret;
+}
+
+static int gdsc_disable(struct regulator_dev *rdev)
+{
+ struct gdsc *sc = rdev_get_drvdata(rdev);
+ uint32_t regval;
+ int i, ret = 0;
+
+ mutex_lock(&gdsc_seq_lock);
+
+ if (sc->force_root_en)
+ clk_prepare_enable(sc->clocks[sc->root_clk_idx]);
+
+ for (i = sc->clock_count-1; i >= 0; i--) {
+ if (unlikely(i == sc->root_clk_idx))
+ continue;
+ if (sc->toggle_mem && sc->allow_clear)
+ clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_MEM);
+ if (sc->toggle_periph && sc->allow_clear)
+ clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_PERIPH);
+ }
+
+ /* Delay to account for staggered memory powerdown. */
+ udelay(1);
+
+ if (sc->toggle_logic) {
+ regval = readl_relaxed(sc->gdscr);
+ if (regval & HW_CONTROL_MASK) {
+ dev_warn(&rdev->dev, "Invalid disable while %s is under HW control\n",
+ sc->rdesc.name);
+ mutex_unlock(&gdsc_seq_lock);
+ return -EBUSY;
+ }
+
+ regval |= SW_COLLAPSE_MASK;
+ writel_relaxed(regval, sc->gdscr);
+ /* Wait for 8 XO cycles before polling the status bit. */
+ mb();
+ udelay(1);
+
+ if (sc->no_status_check_on_disable) {
+ /*
+ * Add a short delay here to ensure that gdsc_enable
+ * right after it was disabled does not put it in a
+ * wierd state.
+ */
+ udelay(TIMEOUT_US);
+ } else {
+ ret = poll_gdsc_status(sc, DISABLED);
+ if (ret)
+ dev_err(&rdev->dev, "%s disable timed out: 0x%x\n",
+ sc->rdesc.name, regval);
+ }
+
+ if (sc->domain_addr) {
+ regval = readl_relaxed(sc->domain_addr);
+ regval |= GMEM_CLAMP_IO_MASK;
+ writel_relaxed(regval, sc->domain_addr);
+ /* Make sure CLAMP_IO is asserted before continuing. */
+ wmb();
+ }
+ } else {
+ for (i = sc->clock_count-1; i >= 0; i--)
+ if (likely(i != sc->root_clk_idx))
+ clk_reset(sc->clocks[i], CLK_RESET_ASSERT);
+ sc->resets_asserted = true;
+ }
+
+ /*
+ * Check if gdsc_enable was called for this GDSC. If not, the root
+ * clock will not have been enabled prior to this.
+ */
+ if ((sc->is_gdsc_enabled && sc->root_en) || sc->force_root_en)
+ clk_disable_unprepare(sc->clocks[sc->root_clk_idx]);
+ sc->is_gdsc_enabled = false;
+
+ mutex_unlock(&gdsc_seq_lock);
+
+ return ret;
+}
+
+static unsigned int gdsc_get_mode(struct regulator_dev *rdev)
+{
+ struct gdsc *sc = rdev_get_drvdata(rdev);
+ uint32_t regval;
+
+ mutex_lock(&gdsc_seq_lock);
+ regval = readl_relaxed(sc->gdscr);
+ mutex_unlock(&gdsc_seq_lock);
+ if (regval & HW_CONTROL_MASK)
+ return REGULATOR_MODE_FAST;
+ return REGULATOR_MODE_NORMAL;
+}
+
+static int gdsc_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+ struct gdsc *sc = rdev_get_drvdata(rdev);
+ uint32_t regval;
+ int ret = 0;
+
+ mutex_lock(&gdsc_seq_lock);
+
+ regval = readl_relaxed(sc->gdscr);
+
+ /*
+ * HW control can only be enable/disabled when SW_COLLAPSE
+ * indicates on.
+ */
+ if (regval & SW_COLLAPSE_MASK) {
+ dev_err(&rdev->dev, "can't enable hw collapse now\n");
+ mutex_unlock(&gdsc_seq_lock);
+ return -EBUSY;
+ }
+
+ switch (mode) {
+ case REGULATOR_MODE_FAST:
+ /* Turn on HW trigger mode */
+ regval |= HW_CONTROL_MASK;
+ writel_relaxed(regval, sc->gdscr);
+ /*
+ * There may be a race with internal HW trigger signal,
+ * that will result in GDSC going through a power down and
+ * up cycle. In case HW trigger signal is controlled by
+ * firmware that also poll same status bits as we do, FW
+ * might read an 'on' status before the GDSC can finish
+ * power cycle. We wait 1us before returning to ensure
+ * FW can't immediately poll the status bit.
+ */
+ mb();
+ udelay(1);
+ break;
+
+ case REGULATOR_MODE_NORMAL:
+ /* Turn off HW trigger mode */
+ regval &= ~HW_CONTROL_MASK;
+ writel_relaxed(regval, sc->gdscr);
+ /*
+ * There may be a race with internal HW trigger signal,
+ * that will result in GDSC going through a power down and
+ * up cycle. If we poll too early, status bit will
+ * indicate 'on' before the GDSC can finish the power cycle.
+ * Account for this case by waiting 1us before polling.
+ */
+ mb();
+ udelay(1);
+
+ ret = poll_gdsc_status(sc, ENABLED);
+ if (ret)
+ dev_err(&rdev->dev, "%s set_mode timed out: 0x%x\n",
+ sc->rdesc.name, regval);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ mutex_unlock(&gdsc_seq_lock);
+
+ return ret;
+}
+
+static struct regulator_ops gdsc_ops = {
+ .is_enabled = gdsc_is_enabled,
+ .enable = gdsc_enable,
+ .disable = gdsc_disable,
+ .set_mode = gdsc_set_mode,
+ .get_mode = gdsc_get_mode,
+};
+
+static int gdsc_probe(struct platform_device *pdev)
+{
+ static atomic_t gdsc_count = ATOMIC_INIT(-1);
+ struct regulator_config reg_config = {};
+ struct regulator_init_data *init_data;
+ struct resource *res;
+ struct gdsc *sc;
+ uint32_t regval, clk_dis_wait_val = CLK_DIS_WAIT_VAL;
+ bool retain_mem, retain_periph, support_hw_trigger;
+ int i, ret;
+ u32 timeout;
+
+ sc = devm_kzalloc(&pdev->dev, sizeof(struct gdsc), GFP_KERNEL);
+ if (sc == NULL)
+ return -ENOMEM;
+
+ init_data = of_get_regulator_init_data(&pdev->dev, pdev->dev.of_node,
+ &sc->rdesc);
+ if (init_data == NULL)
+ return -ENOMEM;
+
+ if (of_get_property(pdev->dev.of_node, "parent-supply", NULL))
+ init_data->supply_regulator = "parent";
+
+ ret = of_property_read_string(pdev->dev.of_node, "regulator-name",
+ &sc->rdesc.name);
+ if (ret)
+ return ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL)
+ return -EINVAL;
+ sc->gdscr = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (sc->gdscr == NULL)
+ return -ENOMEM;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "domain_addr");
+ if (res) {
+ sc->domain_addr = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (sc->domain_addr == NULL)
+ return -ENOMEM;
+ }
+
+ sc->reset_aon = of_property_read_bool(pdev->dev.of_node,
+ "qcom,reset-aon-logic");
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "sw_reset");
+ if (res) {
+ sc->sw_reset_addr = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (sc->sw_reset_addr == NULL)
+ return -ENOMEM;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "hw_ctrl_addr");
+ if (res) {
+ sc->hw_ctrl_addr = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (sc->hw_ctrl_addr == NULL)
+ return -ENOMEM;
+ }
+
+ sc->gds_timeout = TIMEOUT_US;
+ ret = of_property_read_u32(pdev->dev.of_node, "qcom,gds-timeout",
+ &timeout);
+ if (!ret)
+ sc->gds_timeout = timeout;
+
+ sc->clock_count = of_property_count_strings(pdev->dev.of_node,
+ "clock-names");
+ if (sc->clock_count == -EINVAL) {
+ sc->clock_count = 0;
+ } else if (IS_ERR_VALUE(sc->clock_count)) {
+ dev_err(&pdev->dev, "Failed to get clock names\n");
+ return -EINVAL;
+ }
+
+ sc->clocks = devm_kzalloc(&pdev->dev,
+ sizeof(struct clk *) * sc->clock_count, GFP_KERNEL);
+ if (!sc->clocks)
+ return -ENOMEM;
+
+ sc->root_clk_idx = -1;
+
+ sc->root_en = of_property_read_bool(pdev->dev.of_node,
+ "qcom,enable-root-clk");
+ sc->force_root_en = of_property_read_bool(pdev->dev.of_node,
+ "qcom,force-enable-root-clk");
+ for (i = 0; i < sc->clock_count; i++) {
+ const char *clock_name;
+ of_property_read_string_index(pdev->dev.of_node, "clock-names",
+ i, &clock_name);
+ sc->clocks[i] = devm_clk_get(&pdev->dev, clock_name);
+ if (IS_ERR(sc->clocks[i])) {
+ int rc = PTR_ERR(sc->clocks[i]);
+ if (rc != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Failed to get %s\n",
+ clock_name);
+ return rc;
+ }
+
+ if (!strcmp(clock_name, "core_root_clk"))
+ sc->root_clk_idx = i;
+ }
+
+ if ((sc->root_en || sc->force_root_en) && (sc->root_clk_idx == -1)) {
+ dev_err(&pdev->dev, "Failed to get root clock name\n");
+ return -EINVAL;
+ }
+
+ sc->rdesc.id = atomic_inc_return(&gdsc_count);
+ sc->rdesc.ops = &gdsc_ops;
+ sc->rdesc.type = REGULATOR_VOLTAGE;
+ sc->rdesc.owner = THIS_MODULE;
+ platform_set_drvdata(pdev, sc);
+
+ /*
+ * Disable HW trigger: collapse/restore occur based on registers writes.
+ * Disable SW override: Use hardware state-machine for sequencing.
+ */
+ regval = readl_relaxed(sc->gdscr);
+ regval &= ~(HW_CONTROL_MASK | SW_OVERRIDE_MASK);
+
+ if (!of_property_read_u32(pdev->dev.of_node, "qcom,clk-dis-wait-val",
+ &clk_dis_wait_val))
+ clk_dis_wait_val = clk_dis_wait_val << 12;
+
+ /* Configure wait time between states. */
+ regval &= ~(EN_REST_WAIT_MASK | EN_FEW_WAIT_MASK | CLK_DIS_WAIT_MASK);
+ regval |= EN_REST_WAIT_VAL | EN_FEW_WAIT_VAL | clk_dis_wait_val;
+ writel_relaxed(regval, sc->gdscr);
+
+ sc->no_status_check_on_disable =
+ of_property_read_bool(pdev->dev.of_node,
+ "qcom,no-status-check-on-disable");
+ retain_mem = of_property_read_bool(pdev->dev.of_node,
+ "qcom,retain-mem");
+ sc->toggle_mem = !retain_mem;
+ retain_periph = of_property_read_bool(pdev->dev.of_node,
+ "qcom,retain-periph");
+ sc->toggle_periph = !retain_periph;
+ sc->toggle_logic = !of_property_read_bool(pdev->dev.of_node,
+ "qcom,skip-logic-collapse");
+ support_hw_trigger = of_property_read_bool(pdev->dev.of_node,
+ "qcom,support-hw-trigger");
+ if (support_hw_trigger) {
+ init_data->constraints.valid_ops_mask |= REGULATOR_CHANGE_MODE;
+ init_data->constraints.valid_modes_mask |=
+ REGULATOR_MODE_NORMAL | REGULATOR_MODE_FAST;
+ }
+
+ if (!sc->toggle_logic) {
+ regval &= ~SW_COLLAPSE_MASK;
+ writel_relaxed(regval, sc->gdscr);
+
+ ret = poll_gdsc_status(sc, ENABLED);
+ if (ret) {
+ dev_err(&pdev->dev, "%s enable timed out: 0x%x\n",
+ sc->rdesc.name, regval);
+ return ret;
+ }
+ }
+
+ sc->allow_clear = of_property_read_bool(pdev->dev.of_node,
+ "qcom,disallow-clear");
+ sc->allow_clear = !sc->allow_clear;
+
+ for (i = 0; i < sc->clock_count; i++) {
+ if (retain_mem || (regval & PWR_ON_MASK) || !sc->allow_clear)
+ clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_MEM);
+ else
+ clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_MEM);
+
+ if (retain_periph || (regval & PWR_ON_MASK) || !sc->allow_clear)
+ clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_PERIPH);
+ else
+ clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_PERIPH);
+ }
+
+ reg_config.dev = &pdev->dev;
+ reg_config.init_data = init_data;
+ reg_config.driver_data = sc;
+ reg_config.of_node = pdev->dev.of_node;
+ sc->rdev = regulator_register(&sc->rdesc, &reg_config);
+ if (IS_ERR(sc->rdev)) {
+ dev_err(&pdev->dev, "regulator_register(\"%s\") failed.\n",
+ sc->rdesc.name);
+ return PTR_ERR(sc->rdev);
+ }
+
+ return 0;
+}
+
+static int gdsc_remove(struct platform_device *pdev)
+{
+ struct gdsc *sc = platform_get_drvdata(pdev);
+ regulator_unregister(sc->rdev);
+ return 0;
+}
+
+static struct of_device_id gdsc_match_table[] = {
+ { .compatible = "qcom,gdsc" },
+ {}
+};
+
+static struct platform_driver gdsc_driver = {
+ .probe = gdsc_probe,
+ .remove = gdsc_remove,
+ .driver = {
+ .name = "gdsc",
+ .of_match_table = gdsc_match_table,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init gdsc_init(void)
+{
+ return platform_driver_register(&gdsc_driver);
+}
+subsys_initcall(gdsc_init);
+
+static void __exit gdsc_exit(void)
+{
+ platform_driver_unregister(&gdsc_driver);
+}
+module_exit(gdsc_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM8974 GDSC power rail regulator driver");
diff --git a/drivers/clk/msm/mdss/Kconfig b/drivers/clk/msm/mdss/Kconfig
new file mode 100644
index 000000000000..229780e45bb8
--- /dev/null
+++ b/drivers/clk/msm/mdss/Kconfig
@@ -0,0 +1,6 @@
+config MSM_MDSS_PLL
+ bool "MDSS pll programming"
+ ---help---
+ It provides support for DSI, eDP and HDMI interface pll programming on MDSS
+ hardware. It also handles the pll specific resources and turn them on/off when
+ mdss pll client tries to enable/disable pll clocks.
diff --git a/drivers/clk/msm/mdss/Makefile b/drivers/clk/msm/mdss/Makefile
new file mode 100644
index 000000000000..89651e6ebc96
--- /dev/null
+++ b/drivers/clk/msm/mdss/Makefile
@@ -0,0 +1,9 @@
+obj-$(CONFIG_MSM_MDSS_PLL) += mdss-pll-util.o
+obj-$(CONFIG_MSM_MDSS_PLL) += mdss-pll.o
+obj-$(CONFIG_MSM_MDSS_PLL) += mdss-dsi-pll-8996.o
+obj-$(CONFIG_MSM_MDSS_PLL) += mdss-dsi-pll-8996-util.o
+obj-$(CONFIG_MSM_MDSS_PLL) += mdss-dsi-pll-cobalt.o
+obj-$(CONFIG_MSM_MDSS_PLL) += mdss-dp-pll-cobalt.o
+obj-$(CONFIG_MSM_MDSS_PLL) += mdss-dp-pll-cobalt-util.o
+obj-$(CONFIG_MSM_MDSS_PLL) += mdss-hdmi-pll-8996.o
+obj-$(CONFIG_MSM_MDSS_PLL) += mdss-hdmi-pll-cobalt.o
diff --git a/drivers/clk/msm/mdss/mdss-dp-pll-cobalt-util.c b/drivers/clk/msm/mdss/mdss-dp-pll-cobalt-util.c
new file mode 100644
index 000000000000..74032aba22bc
--- /dev/null
+++ b/drivers/clk/msm/mdss/mdss-dp-pll-cobalt-util.c
@@ -0,0 +1,730 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/iopoll.h>
+#include <linux/delay.h>
+#include <linux/clk/msm-clock-generic.h>
+
+#include "mdss-pll.h"
+#include "mdss-dp-pll.h"
+#include "mdss-dp-pll-cobalt.h"
+
+int link2xclk_divsel_set_div(struct div_clk *clk, int div)
+{
+ int rc;
+ u32 link2xclk_div_tx0, link2xclk_div_tx1;
+ u32 phy_mode;
+ struct mdss_pll_resources *dp_res = clk->priv;
+
+ rc = mdss_pll_resource_enable(dp_res, true);
+ if (rc) {
+ pr_err("Failed to enable mdss DP PLL resources\n");
+ return rc;
+ }
+
+ link2xclk_div_tx0 = MDSS_PLL_REG_R(dp_res->phy_base,
+ QSERDES_TX0_OFFSET + TXn_TX_BAND);
+ link2xclk_div_tx1 = MDSS_PLL_REG_R(dp_res->phy_base,
+ QSERDES_TX1_OFFSET + TXn_TX_BAND);
+
+ link2xclk_div_tx0 &= ~0x07; /* bits 0 to 2 */
+ link2xclk_div_tx1 &= ~0x07; /* bits 0 to 2 */
+
+ /* Configure TX band Mux */
+ link2xclk_div_tx0 |= 0x4;
+ link2xclk_div_tx1 |= 0x4;
+
+ /*configure DP PHY MODE */
+ phy_mode = 0x48;
+
+ if (div == 10) {
+ link2xclk_div_tx0 |= 1;
+ link2xclk_div_tx1 |= 1;
+ phy_mode |= 1;
+ }
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ QSERDES_TX0_OFFSET + TXn_TX_BAND,
+ link2xclk_div_tx0);
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ QSERDES_TX1_OFFSET + TXn_TX_BAND,
+ link2xclk_div_tx1);
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ DP_PHY_MODE, phy_mode);
+
+
+ pr_debug("%s: div=%d link2xclk_div_tx0=%x, link2xclk_div_tx1=%x\n",
+ __func__, div, link2xclk_div_tx0, link2xclk_div_tx1);
+
+ mdss_pll_resource_enable(dp_res, false);
+
+ return rc;
+}
+
+int link2xclk_divsel_get_div(struct div_clk *clk)
+{
+ int rc;
+ u32 div = 0, phy_mode;
+ struct mdss_pll_resources *dp_res = clk->priv;
+
+ rc = mdss_pll_resource_enable(dp_res, true);
+ if (rc) {
+ pr_err("Failed to enable dp_res resources\n");
+ return rc;
+ }
+
+ phy_mode = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_MODE);
+
+ if (phy_mode & 0x48)
+ pr_err("%s: DP PAR Rate not correct\n", __func__);
+
+ if ((phy_mode & 0x3) == 1)
+ div = 10;
+ else if ((phy_mode & 0x3) == 0)
+ div = 5;
+ else
+ pr_err("%s: unsupported div: %d\n", __func__, phy_mode);
+
+ mdss_pll_resource_enable(dp_res, false);
+ pr_debug("%s: phy_mode=%d, div=%d\n", __func__,
+ phy_mode, div);
+
+ return div;
+}
+
+int hsclk_divsel_set_div(struct div_clk *clk, int div)
+{
+ int rc;
+ u32 hsclk_div;
+ struct mdss_pll_resources *dp_res = clk->priv;
+
+ rc = mdss_pll_resource_enable(dp_res, true);
+ if (rc) {
+ pr_err("Failed to enable mdss DP PLL resources\n");
+ return rc;
+ }
+
+ hsclk_div = MDSS_PLL_REG_R(dp_res->pll_base, QSERDES_COM_HSCLK_SEL);
+ hsclk_div &= ~0x0f; /* bits 0 to 3 */
+
+ if (div == 3)
+ hsclk_div |= 4;
+ else
+ hsclk_div |= 0;
+
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_HSCLK_SEL, hsclk_div);
+
+ pr_debug("%s: div=%d hsclk_div=%x\n", __func__, div, hsclk_div);
+
+ mdss_pll_resource_enable(dp_res, false);
+
+ return rc;
+}
+
+int hsclk_divsel_get_div(struct div_clk *clk)
+{
+ int rc;
+ u32 hsclk_div, div;
+ struct mdss_pll_resources *dp_res = clk->priv;
+
+ rc = mdss_pll_resource_enable(dp_res, true);
+ if (rc) {
+ pr_err("Failed to enable dp_res resources\n");
+ return rc;
+ }
+
+ hsclk_div = MDSS_PLL_REG_R(dp_res->pll_base, QSERDES_COM_HSCLK_SEL);
+ hsclk_div &= 0x0f;
+
+ if (hsclk_div == 4)
+ div = 3;
+ else
+ div = 2;
+
+ mdss_pll_resource_enable(dp_res, false);
+
+ pr_debug("%s: hsclk_div:%d, div=%d\n", __func__, hsclk_div, div);
+
+ return div;
+}
+
+int vco_divided_clk_set_div(struct div_clk *clk, int div)
+{
+ int rc;
+ u32 auxclk_div;
+ struct mdss_pll_resources *dp_res = clk->priv;
+
+ rc = mdss_pll_resource_enable(dp_res, true);
+ if (rc) {
+ pr_err("Failed to enable mdss DP PLL resources\n");
+ return rc;
+ }
+
+ auxclk_div = MDSS_PLL_REG_R(dp_res->pll_base, DP_PHY_VCO_DIV);
+ auxclk_div &= ~0x03; /* bits 0 to 1 */
+ if (div == 4)
+ auxclk_div |= 2;
+ else if (div == 2)
+ auxclk_div |= 1;
+ else
+ auxclk_div |= 2; /* Default divider */
+
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ DP_PHY_VCO_DIV, auxclk_div);
+
+ pr_debug("%s: div=%d auxclk_div=%x\n", __func__, div, auxclk_div);
+
+ mdss_pll_resource_enable(dp_res, false);
+
+ return rc;
+}
+
+
+enum handoff vco_divided_clk_handoff(struct clk *c)
+{
+ /*
+ * Since cont-splash is not enabled, disable handoff
+ * for vco_divider_clk.
+ */
+ return HANDOFF_DISABLED_CLK;
+}
+
+int vco_divided_clk_get_div(struct div_clk *clk)
+{
+ int rc;
+ u32 div, auxclk_div;
+ struct mdss_pll_resources *dp_res = clk->priv;
+
+ rc = mdss_pll_resource_enable(dp_res, true);
+ if (rc) {
+ pr_err("Failed to enable dp_res resources\n");
+ return rc;
+ }
+
+ auxclk_div = MDSS_PLL_REG_R(dp_res->pll_base, DP_PHY_VCO_DIV);
+ auxclk_div &= 0x03;
+
+ if (auxclk_div == 2)
+ div = 4;
+ else if (auxclk_div == 1)
+ div = 2;
+ else
+ div = 0;
+
+ mdss_pll_resource_enable(dp_res, false);
+
+ pr_debug("%s: auxclk_div=%d, div=%d\n", __func__, auxclk_div, div);
+
+ return div;
+}
+
+int dp_config_vco_rate(struct dp_pll_vco_clk *vco, unsigned long rate)
+{
+ u32 res = 0;
+ struct mdss_pll_resources *dp_res = vco->priv;
+
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ DP_PHY_PD_CTL, 0x3d);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_PLL_IVCO, 0x0f);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_SVS_MODE_CLK_SEL, 0x01);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_SYSCLK_EN_SEL, 0x37);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_SYS_CLK_CTRL, 0x06);
+
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_CLK_ENABLE1, 0x0e);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_SYSCLK_BUF_ENABLE, 0x06);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_CLK_SEL, 0x30);
+
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_LOCK_CMP_EN, 0x00);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_PLL_CCTRL_MODE0, 0x34);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_PLL_RCTRL_MODE0, 0x16);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_CP_CTRL_MODE0, 0x08);
+ /* Different for each clock rates */
+ if (rate == DP_VCO_RATE_8100MHz) {
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_DEC_START_MODE0, 0x69);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_DIV_FRAC_START1_MODE0, 0x00);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_DIV_FRAC_START2_MODE0, 0x80);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_DIV_FRAC_START3_MODE0, 0x07);
+ } else if (rate == DP_VCO_RATE_9720MHz) {
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_DEC_START_MODE0, 0x7e);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_DIV_FRAC_START1_MODE0, 0x00);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_DIV_FRAC_START2_MODE0, 0x00);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_DIV_FRAC_START3_MODE0, 0x09);
+ } else if (rate == DP_VCO_RATE_10800MHz) {
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_DEC_START_MODE0, 0x8c);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_DIV_FRAC_START1_MODE0, 0x00);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_DIV_FRAC_START2_MODE0, 0x00);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_DIV_FRAC_START3_MODE0, 0x0a);
+ } else {
+ pr_err("%s: unsupported rate: %ld\n", __func__, rate);
+ return -EINVAL;
+ }
+
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x3f);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_VCO_TUNE_MAP, 0x00);
+
+ if (rate == DP_VCO_RATE_8100MHz) {
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_LOCK_CMP1_MODE0, 0x3f);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_LOCK_CMP2_MODE0, 0x38);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_LOCK_CMP3_MODE0, 0x00);
+ } else if (rate == DP_VCO_RATE_9720MHz) {
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_LOCK_CMP1_MODE0, 0x7f);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_LOCK_CMP2_MODE0, 0x43);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_LOCK_CMP3_MODE0, 0x00);
+ } else {
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_LOCK_CMP1_MODE0, 0x7f);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_LOCK_CMP2_MODE0, 0x70);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_LOCK_CMP3_MODE0, 0x00);
+ }
+
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_BG_TIMER, 0x00);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_BG_TIMER, 0x0a);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_CORECLK_DIV_MODE0, 0x05);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_VCO_TUNE_CTRL, 0x00);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x37);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_CORE_CLK_EN, 0x0f);
+
+ /* Different for each clock rate */
+ if (rate == DP_VCO_RATE_8100MHz) {
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_CMN_CONFIG, 0x02);
+ } else if (rate == DP_VCO_RATE_9720MHz) {
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_CMN_CONFIG, 0x02);
+ } else {
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_CMN_CONFIG, 0x02);
+ }
+
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ QSERDES_TX0_OFFSET + TXn_TRANSCEIVER_BIAS_EN,
+ 0x1a);
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ QSERDES_TX1_OFFSET + TXn_TRANSCEIVER_BIAS_EN,
+ 0x1a);
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ QSERDES_TX0_OFFSET + TXn_HIGHZ_DRVR_EN,
+ 0x00);
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ QSERDES_TX1_OFFSET + TXn_HIGHZ_DRVR_EN,
+ 0x00);
+
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ QSERDES_TX0_OFFSET + TXn_TX_DRV_LVL,
+ 0x38);
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ QSERDES_TX1_OFFSET + TXn_TX_DRV_LVL,
+ 0x38);
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ QSERDES_TX0_OFFSET + TXn_TX_EMP_POST1_LVL,
+ 0x2c);
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ QSERDES_TX1_OFFSET + TXn_TX_EMP_POST1_LVL,
+ 0x2c);
+
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ DP_PHY_TX0_TX1_LANE_CTL, 0x05);
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ DP_PHY_TX2_TX3_LANE_CTL, 0x05);
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ QSERDES_TX0_OFFSET + TXn_VMODE_CTRL1,
+ 0x40);
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ QSERDES_TX1_OFFSET + TXn_VMODE_CTRL1,
+ 0x40);
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ QSERDES_TX0_OFFSET + TXn_PRE_STALL_LDO_BOOST_EN,
+ 0x30);
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ QSERDES_TX1_OFFSET + TXn_PRE_STALL_LDO_BOOST_EN,
+ 0x30);
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ QSERDES_TX0_OFFSET + TXn_INTERFACE_SELECT,
+ 0x3d);
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ QSERDES_TX1_OFFSET + TXn_INTERFACE_SELECT,
+ 0x3d);
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ QSERDES_TX0_OFFSET + TXn_CLKBUF_ENABLE,
+ 0x0f);
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ QSERDES_TX1_OFFSET + TXn_CLKBUF_ENABLE,
+ 0x0f);
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ QSERDES_TX0_OFFSET + TXn_RESET_TSYNC_EN,
+ 0x03);
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ QSERDES_TX1_OFFSET + TXn_RESET_TSYNC_EN,
+ 0x03);
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ QSERDES_TX0_OFFSET + TXn_TRAN_DRVR_EMP_EN,
+ 0x03);
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ QSERDES_TX1_OFFSET + TXn_TRAN_DRVR_EMP_EN,
+ 0x03);
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ QSERDES_TX0_OFFSET + TXn_PARRATE_REC_DETECT_IDLE_EN,
+ 0x00);
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ QSERDES_TX1_OFFSET + TXn_PARRATE_REC_DETECT_IDLE_EN,
+ 0x00);
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ QSERDES_TX0_OFFSET + TXn_TX_INTERFACE_MODE,
+ 0x00);
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ QSERDES_TX1_OFFSET + TXn_TX_INTERFACE_MODE,
+ 0x00);
+ return res;
+}
+
+static bool dp_pll_lock_status(struct mdss_pll_resources *dp_res)
+{
+ u32 status;
+ bool pll_locked;
+
+ /* poll for PLL ready status */
+ if (readl_poll_timeout_atomic((dp_res->pll_base +
+ QSERDES_COM_C_READY_STATUS),
+ status,
+ ((status & BIT(0)) > 0),
+ DP_PLL_POLL_SLEEP_US,
+ DP_PLL_POLL_TIMEOUT_US)) {
+ pr_err("%s: C_READY status is not high. Status=%x\n",
+ __func__, status);
+ pll_locked = false;
+ } else if (readl_poll_timeout_atomic((dp_res->pll_base +
+ DP_PHY_STATUS),
+ status,
+ ((status & BIT(1)) > 0),
+ DP_PLL_POLL_SLEEP_US,
+ DP_PLL_POLL_TIMEOUT_US)) {
+ pr_err("%s: Phy_ready is not high. Status=%x\n",
+ __func__, status);
+ pll_locked = false;
+ } else {
+ pll_locked = true;
+ }
+
+ return pll_locked;
+}
+
+
+static int dp_pll_enable(struct clk *c)
+{
+ int rc = 0;
+ u32 status;
+ struct dp_pll_vco_clk *vco = mdss_dp_to_vco_clk(c);
+ struct mdss_pll_resources *dp_res = vco->priv;
+
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ DP_PHY_CFG, 0x01);
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ DP_PHY_CFG, 0x05);
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ DP_PHY_CFG, 0x01);
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ DP_PHY_CFG, 0x09);
+ MDSS_PLL_REG_W(dp_res->pll_base,
+ QSERDES_COM_RESETSM_CNTRL, 0x20);
+
+ /* poll for PLL ready status */
+ if (readl_poll_timeout_atomic((dp_res->pll_base +
+ QSERDES_COM_C_READY_STATUS),
+ status,
+ ((status & BIT(0)) > 0),
+ DP_PLL_POLL_SLEEP_US,
+ DP_PLL_POLL_TIMEOUT_US)) {
+ pr_err("%s: C_READY status is not high. Status=%x\n",
+ __func__, status);
+ rc = -EINVAL;
+ goto lock_err;
+ }
+
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ DP_PHY_CFG, 0x19);
+
+ /* poll for PHY ready status */
+ if (readl_poll_timeout_atomic((dp_res->phy_base +
+ DP_PHY_STATUS),
+ status,
+ ((status & BIT(1)) > 0),
+ DP_PLL_POLL_SLEEP_US,
+ DP_PLL_POLL_TIMEOUT_US)) {
+ pr_err("%s: Phy_ready is not high. Status=%x\n",
+ __func__, status);
+ rc = -EINVAL;
+ goto lock_err;
+ }
+
+ pr_debug("%s: PLL is locked\n", __func__);
+
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ QSERDES_TX0_OFFSET + TXn_TRANSCEIVER_BIAS_EN,
+ 0x3f);
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ QSERDES_TX1_OFFSET + TXn_TRANSCEIVER_BIAS_EN,
+ 0x3f);
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ QSERDES_TX0_OFFSET + TXn_HIGHZ_DRVR_EN,
+ 0x10);
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ QSERDES_TX1_OFFSET + TXn_HIGHZ_DRVR_EN,
+ 0x10);
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ QSERDES_TX0_OFFSET + TXn_TX_POL_INV,
+ 0x0a);
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ QSERDES_TX1_OFFSET + TXn_TX_POL_INV,
+ 0x0a);
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ DP_PHY_CFG, 0x18);
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ DP_PHY_CFG, 0x19);
+
+ /*
+ * Make sure all the register writes are completed before
+ * doing any other operation
+ */
+ wmb();
+
+lock_err:
+ return rc;
+}
+
+static int dp_pll_disable(struct clk *c)
+{
+ int rc = 0;
+ struct dp_pll_vco_clk *vco = mdss_dp_to_vco_clk(c);
+ struct mdss_pll_resources *dp_res = vco->priv;
+
+ /* Assert DP PHY power down */
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ DP_PHY_PD_CTL, 0x3c);
+ /*
+ * Make sure all the register writes to disable PLL are
+ * completed before doing any other operation
+ */
+ wmb();
+
+ return rc;
+}
+
+
+int dp_vco_prepare(struct clk *c)
+{
+ int rc = 0;
+ struct dp_pll_vco_clk *vco = mdss_dp_to_vco_clk(c);
+ struct mdss_pll_resources *dp_pll_res = vco->priv;
+
+ DEV_DBG("rate=%ld\n", vco->rate);
+ rc = mdss_pll_resource_enable(dp_pll_res, true);
+ if (rc) {
+ pr_err("Failed to enable mdss DP pll resources\n");
+ goto error;
+ }
+
+ rc = dp_pll_enable(c);
+ if (rc) {
+ mdss_pll_resource_enable(dp_pll_res, false);
+ pr_err("ndx=%d failed to enable dsi pll\n",
+ dp_pll_res->index);
+ }
+
+error:
+ return rc;
+}
+
+void dp_vco_unprepare(struct clk *c)
+{
+ struct dp_pll_vco_clk *vco = mdss_dp_to_vco_clk(c);
+ struct mdss_pll_resources *io = vco->priv;
+
+ if (!io) {
+ DEV_ERR("Invalid input parameter\n");
+ return;
+ }
+
+ if (!io->pll_on &&
+ mdss_pll_resource_enable(io, true)) {
+ DEV_ERR("pll resource can't be enabled\n");
+ return;
+ }
+ dp_pll_disable(c);
+
+ io->handoff_resources = false;
+ mdss_pll_resource_enable(io, false);
+ io->pll_on = false;
+}
+
+int dp_vco_set_rate(struct clk *c, unsigned long rate)
+{
+ struct dp_pll_vco_clk *vco = mdss_dp_to_vco_clk(c);
+ struct mdss_pll_resources *io = vco->priv;
+ int rc;
+
+ rc = mdss_pll_resource_enable(io, true);
+ if (rc) {
+ DEV_ERR("pll resource can't be enabled\n");
+ return rc;
+ }
+
+ DEV_DBG("DP lane CLK rate=%ld\n", rate);
+
+ rc = dp_config_vco_rate(vco, rate);
+ if (rc)
+ DEV_ERR("%s: Failed to set clk rate\n", __func__);
+
+ mdss_pll_resource_enable(io, false);
+
+ vco->rate = rate;
+
+ return 0;
+}
+
+unsigned long dp_vco_get_rate(struct clk *c)
+{
+ struct dp_pll_vco_clk *vco = mdss_dp_to_vco_clk(c);
+ int rc;
+ u32 div, hsclk_div, link2xclk_div;
+ u64 vco_rate;
+ struct mdss_pll_resources *pll = vco->priv;
+
+ rc = mdss_pll_resource_enable(pll, true);
+ if (rc) {
+ pr_err("Failed to enable mdss DP pll=%d\n", pll->index);
+ return rc;
+ }
+
+ div = MDSS_PLL_REG_R(pll->pll_base, QSERDES_COM_HSCLK_SEL);
+ div &= 0x0f;
+
+ if (div == 4)
+ hsclk_div = 3;
+ else
+ hsclk_div = 2;
+
+ div = MDSS_PLL_REG_R(pll->phy_base, DP_PHY_MODE);
+
+ if (div & 0x48)
+ pr_err("%s: DP PAR Rate not correct\n", __func__);
+
+ if ((div & 0x3) == 1)
+ link2xclk_div = 10;
+ else if ((div & 0x3) == 0)
+ link2xclk_div = 5;
+ else
+ pr_err("%s: unsupported div. Phy_mode: %d\n", __func__, div);
+
+ if (link2xclk_div == 10) {
+ vco_rate = DP_VCO_RATE_9720MHz;
+ } else {
+ if (hsclk_div == 3)
+ vco_rate = DP_VCO_RATE_8100MHz;
+ else
+ vco_rate = DP_VCO_RATE_10800MHz;
+ }
+
+ pr_debug("returning vco rate = %lu\n", (unsigned long)vco_rate);
+
+ mdss_pll_resource_enable(pll, false);
+
+ return (unsigned long)vco_rate;
+}
+
+long dp_vco_round_rate(struct clk *c, unsigned long rate)
+{
+ unsigned long rrate = rate;
+ struct dp_pll_vco_clk *vco = mdss_dp_to_vco_clk(c);
+
+ if (rate <= vco->min_rate)
+ rrate = vco->min_rate;
+ else if (rate <= DP_VCO_RATE_9720MHz)
+ rrate = DP_VCO_RATE_9720MHz;
+ else
+ rrate = vco->max_rate;
+
+ pr_debug("%s: rrate=%ld\n", __func__, rrate);
+
+ return rrate;
+}
+
+enum handoff dp_vco_handoff(struct clk *c)
+{
+ enum handoff ret = HANDOFF_DISABLED_CLK;
+ struct dp_pll_vco_clk *vco = mdss_dp_to_vco_clk(c);
+ struct mdss_pll_resources *io = vco->priv;
+
+ if (mdss_pll_resource_enable(io, true)) {
+ DEV_ERR("pll resource can't be enabled\n");
+ return ret;
+ }
+
+ if (dp_pll_lock_status(io)) {
+ io->pll_on = true;
+ c->rate = dp_vco_get_rate(c);
+ io->handoff_resources = true;
+ ret = HANDOFF_ENABLED_CLK;
+ } else {
+ io->handoff_resources = false;
+ mdss_pll_resource_enable(io, false);
+ DEV_DBG("%s: PLL not locked\n", __func__);
+ }
+
+ DEV_DBG("done, ret=%d\n", ret);
+ return ret;
+}
diff --git a/drivers/clk/msm/mdss/mdss-dp-pll-cobalt.c b/drivers/clk/msm/mdss/mdss-dp-pll-cobalt.c
new file mode 100644
index 000000000000..8b06f07b00ee
--- /dev/null
+++ b/drivers/clk/msm/mdss/mdss-dp-pll-cobalt.c
@@ -0,0 +1,249 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/*
+***************************************************************************
+******** Display Port PLL driver block diagram for branch clocks **********
+***************************************************************************
+
+ +-------------------+
+ | dp_vco_clk |
+ | (DP PLL/VCO) |
+ +---------+---------+
+ |
+ |
+ v
+ +----------+-----------+
+ | hsclk_divsel_clk_src |
+ +----------+-----------+
+ |
+ |
+ v
+ +------------<------------|------------>-------------+
+ | | |
+ | | |
++----------v----------+ +----------v----------+ +----------v----------+
+|vco_divided_clk_src | | dp_link_2x_clk | | dp_link_2x_clk |
+| (aux_clk_ops) | | | | |
+v----------+----------v | divsel_five | | divsel_ten |
+ | +----------+----------+ +----------+----------+
+ | | |
+ v v v
+ | +--------------------+ |
+ Input to MMSSCC block | | | |
+ for DP pixel clock +--> dp_link_2x_clk_mux <--+
+ | |
+ +----------+---------+
+ |
+ v
+ Input to MMSSCC block
+ for link clk, crypto clk
+ and interface clock
+
+
+******************************************************************************
+*/
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/clk/msm-clk-provider.h>
+#include <linux/clk/msm-clk.h>
+#include <linux/clk/msm-clock-generic.h>
+#include <dt-bindings/clock/msm-clocks-cobalt.h>
+
+#include "mdss-pll.h"
+#include "mdss-dp-pll.h"
+#include "mdss-dp-pll-cobalt.h"
+
+static struct clk_ops clk_ops_hsclk_divsel_clk_src_c;
+static struct clk_ops clk_ops_vco_divided_clk_src_c;
+static struct clk_ops clk_ops_link_2x_clk_div_c;
+
+static struct clk_div_ops hsclk_divsel_ops = {
+ .set_div = hsclk_divsel_set_div,
+ .get_div = hsclk_divsel_get_div,
+};
+
+static struct clk_div_ops link2xclk_divsel_ops = {
+ .set_div = link2xclk_divsel_set_div,
+ .get_div = link2xclk_divsel_get_div,
+};
+
+static struct clk_div_ops vco_divided_clk_ops = {
+ .set_div = vco_divided_clk_set_div,
+ .get_div = vco_divided_clk_get_div,
+};
+
+static struct clk_ops dp_cobalt_vco_clk_ops = {
+ .set_rate = dp_vco_set_rate,
+ .round_rate = dp_vco_round_rate,
+ .prepare = dp_vco_prepare,
+ .unprepare = dp_vco_unprepare,
+ .handoff = dp_vco_handoff,
+};
+
+static struct clk_mux_ops mdss_mux_ops = {
+ .set_mux_sel = mdss_set_mux_sel,
+ .get_mux_sel = mdss_get_mux_sel,
+};
+
+static struct dp_pll_vco_clk dp_vco_clk = {
+ .min_rate = DP_VCO_RATE_8100MHz,
+ .max_rate = DP_VCO_RATE_10800MHz,
+ .c = {
+ .dbg_name = "dp_vco_clk",
+ .ops = &dp_cobalt_vco_clk_ops,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dp_vco_clk.c),
+ },
+};
+
+static struct div_clk hsclk_divsel_clk_src = {
+ .data = {
+ .min_div = 2,
+ .max_div = 3,
+ },
+ .ops = &hsclk_divsel_ops,
+ .c = {
+ .parent = &dp_vco_clk.c,
+ .dbg_name = "hsclk_divsel_clk_src",
+ .ops = &clk_ops_hsclk_divsel_clk_src_c,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(hsclk_divsel_clk_src.c),
+ },
+};
+
+static struct div_clk dp_link_2x_clk_divsel_five = {
+ .data = {
+ .div = 5,
+ .min_div = 5,
+ .max_div = 5,
+ },
+ .ops = &link2xclk_divsel_ops,
+ .c = {
+ .parent = &hsclk_divsel_clk_src.c,
+ .dbg_name = "dp_link_2x_clk_divsel_five",
+ .ops = &clk_ops_link_2x_clk_div_c,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dp_link_2x_clk_divsel_five.c),
+ },
+};
+
+static struct div_clk dp_link_2x_clk_divsel_ten = {
+ .data = {
+ .div = 10,
+ .min_div = 10,
+ .max_div = 10,
+ },
+ .ops = &link2xclk_divsel_ops,
+ .c = {
+ .parent = &hsclk_divsel_clk_src.c,
+ .dbg_name = "dp_link_2x_clk_divsel_ten",
+ .ops = &clk_ops_link_2x_clk_div_c,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dp_link_2x_clk_divsel_ten.c),
+ },
+};
+
+static struct mux_clk dp_link_2x_clk_mux = {
+ .num_parents = 2,
+ .parents = (struct clk_src[]) {
+ {&dp_link_2x_clk_divsel_five.c, 0},
+ {&dp_link_2x_clk_divsel_ten.c, 1},
+ },
+ .ops = &mdss_mux_ops,
+ .c = {
+ .parent = &dp_link_2x_clk_divsel_five.c,
+ .dbg_name = "dp_link_2x_clk_mux",
+ .ops = &clk_ops_gen_mux,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dp_link_2x_clk_mux.c),
+ }
+};
+
+static struct div_clk vco_divided_clk_src = {
+ .data = {
+ .div = 4,
+ .min_div = 4,
+ .max_div = 4,
+ },
+ .ops = &vco_divided_clk_ops,
+ .c = {
+ .parent = &hsclk_divsel_clk_src.c,
+ .dbg_name = "vco_divided_clk",
+ .ops = &clk_ops_vco_divided_clk_src_c,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(vco_divided_clk_src.c),
+ },
+};
+
+static struct clk_lookup dp_pllcc_cobalt[] = {
+ CLK_LIST(dp_vco_clk),
+ CLK_LIST(hsclk_divsel_clk_src),
+ CLK_LIST(dp_link_2x_clk_divsel_five),
+ CLK_LIST(dp_link_2x_clk_divsel_ten),
+ CLK_LIST(dp_link_2x_clk_mux),
+ CLK_LIST(vco_divided_clk_src),
+};
+
+int dp_pll_clock_register_cobalt(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res)
+{
+ int rc = -ENOTSUPP;
+
+ if (!pll_res || !pll_res->pll_base || !pll_res->phy_base) {
+ DEV_ERR("%s: Invalid input parameters\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Set client data for vco, mux and div clocks */
+ dp_vco_clk.priv = pll_res;
+ hsclk_divsel_clk_src.priv = pll_res;
+ dp_link_2x_clk_mux.priv = pll_res;
+ vco_divided_clk_src.priv = pll_res;
+ dp_link_2x_clk_divsel_five.priv = pll_res;
+ dp_link_2x_clk_divsel_ten.priv = pll_res;
+
+ clk_ops_hsclk_divsel_clk_src_c = clk_ops_div;
+ clk_ops_hsclk_divsel_clk_src_c.prepare = mdss_pll_div_prepare;
+
+ clk_ops_link_2x_clk_div_c = clk_ops_div;
+ clk_ops_link_2x_clk_div_c.prepare = mdss_pll_div_prepare;
+
+ /*
+ * Set the ops for the divider in the pixel clock tree to the
+ * slave_div to ensure that a set rate on this divider clock will not
+ * be propagated to it's parent. This is needed ensure that when we set
+ * the rate for pixel clock, the vco is not reconfigured
+ */
+ clk_ops_vco_divided_clk_src_c = clk_ops_slave_div;
+ clk_ops_vco_divided_clk_src_c.prepare = mdss_pll_div_prepare;
+ clk_ops_vco_divided_clk_src_c.handoff = vco_divided_clk_handoff;
+
+ /* We can select different clock ops for future versions */
+ dp_vco_clk.c.ops = &dp_cobalt_vco_clk_ops;
+
+ rc = of_msm_clock_register(pdev->dev.of_node, dp_pllcc_cobalt,
+ ARRAY_SIZE(dp_pllcc_cobalt));
+ if (rc) {
+ DEV_ERR("%s: Clock register failed rc=%d\n", __func__, rc);
+ rc = -EPROBE_DEFER;
+ } else {
+ DEV_DBG("%s SUCCESS\n", __func__);
+ }
+
+ return rc;
+}
diff --git a/drivers/clk/msm/mdss/mdss-dp-pll-cobalt.h b/drivers/clk/msm/mdss/mdss-dp-pll-cobalt.h
new file mode 100644
index 000000000000..290933c0cbb4
--- /dev/null
+++ b/drivers/clk/msm/mdss/mdss-dp-pll-cobalt.h
@@ -0,0 +1,169 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MDSS_DP_PLL_COBALT_H
+#define __MDSS_DP_PLL_COBALT_H
+
+#define DP_PHY_REVISION_ID0 0x0000
+#define DP_PHY_REVISION_ID1 0x0004
+#define DP_PHY_REVISION_ID2 0x0008
+#define DP_PHY_REVISION_ID3 0x000C
+
+#define DP_PHY_CFG 0x0010
+#define DP_PHY_PD_CTL 0x0014
+#define DP_PHY_MODE 0x0018
+
+#define DP_PHY_AUX_CFG0 0x001C
+#define DP_PHY_AUX_CFG1 0x0020
+#define DP_PHY_AUX_CFG2 0x0024
+#define DP_PHY_AUX_CFG3 0x0028
+#define DP_PHY_AUX_CFG4 0x002C
+#define DP_PHY_AUX_CFG5 0x0030
+#define DP_PHY_AUX_CFG6 0x0034
+#define DP_PHY_AUX_CFG7 0x0038
+#define DP_PHY_AUX_CFG8 0x003C
+#define DP_PHY_AUX_CFG9 0x0040
+#define DP_PHY_AUX_INTERRUPT_MASK 0x0044
+#define DP_PHY_AUX_INTERRUPT_CLEAR 0x0048
+#define DP_PHY_AUX_BIST_CFG 0x004C
+
+#define DP_PHY_VCO_DIV 0x0064
+#define DP_PHY_TX0_TX1_LANE_CTL 0x0068
+
+#define DP_PHY_TX2_TX3_LANE_CTL 0x0084
+#define DP_PHY_STATUS 0x00BC
+
+/* Tx registers */
+#define QSERDES_TX0_OFFSET 0x0400
+#define QSERDES_TX1_OFFSET 0x0800
+
+#define TXn_BIST_MODE_LANENO 0x0000
+#define TXn_CLKBUF_ENABLE 0x0008
+#define TXn_TX_EMP_POST1_LVL 0x000C
+
+#define TXn_TX_DRV_LVL 0x001C
+
+#define TXn_RESET_TSYNC_EN 0x0024
+#define TXn_PRE_STALL_LDO_BOOST_EN 0x0028
+#define TXn_TX_BAND 0x002C
+#define TXn_SLEW_CNTL 0x0030
+#define TXn_INTERFACE_SELECT 0x0034
+
+#define TXn_DEBUG_BUS_SEL 0x0058
+#define TXn_TRANSCEIVER_BIAS_EN 0x005C
+#define TXn_HIGHZ_DRVR_EN 0x0060
+#define TXn_TX_POL_INV 0x0064
+#define TXn_PARRATE_REC_DETECT_IDLE_EN 0x0068
+
+#define TXn_TRAN_DRVR_EMP_EN 0x00C0
+#define TXn_TX_INTERFACE_MODE 0x00C4
+
+#define TXn_VMODE_CTRL1 0x00F0
+
+
+/* PLL register offset */
+#define QSERDES_COM_ATB_SEL1 0x0000
+#define QSERDES_COM_ATB_SEL2 0x0004
+#define QSERDES_COM_FREQ_UPDATE 0x0008
+#define QSERDES_COM_BG_TIMER 0x000C
+#define QSERDES_COM_SSC_EN_CENTER 0x0010
+#define QSERDES_COM_SSC_ADJ_PER1 0x0014
+#define QSERDES_COM_SSC_ADJ_PER2 0x0018
+#define QSERDES_COM_SSC_PER1 0x001C
+#define QSERDES_COM_SSC_PER2 0x0020
+#define QSERDES_COM_SSC_STEP_SIZE1 0x0024
+#define QSERDES_COM_SSC_STEP_SIZE2 0x0028
+#define QSERDES_COM_POST_DIV 0x002C
+#define QSERDES_COM_POST_DIV_MUX 0x0030
+#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN 0x0034
+#define QSERDES_COM_CLK_ENABLE1 0x0038
+#define QSERDES_COM_SYS_CLK_CTRL 0x003C
+#define QSERDES_COM_SYSCLK_BUF_ENABLE 0x0040
+#define QSERDES_COM_PLL_EN 0x0044
+#define QSERDES_COM_PLL_IVCO 0x0048
+#define QSERDES_COM_CMN_IETRIM 0x004C
+#define QSERDES_COM_CMN_IPTRIM 0x0050
+
+#define QSERDES_COM_CP_CTRL_MODE0 0x0060
+#define QSERDES_COM_CP_CTRL_MODE1 0x0064
+#define QSERDES_COM_PLL_RCTRL_MODE0 0x0068
+#define QSERDES_COM_PLL_RCTRL_MODE1 0x006C
+#define QSERDES_COM_PLL_CCTRL_MODE0 0x0070
+#define QSERDES_COM_PLL_CCTRL_MODE1 0x0074
+#define QSERDES_COM_PLL_CNTRL 0x0078
+#define QSERDES_COM_BIAS_EN_CTRL_BY_PSM 0x007C
+#define QSERDES_COM_SYSCLK_EN_SEL 0x0080
+#define QSERDES_COM_CML_SYSCLK_SEL 0x0084
+#define QSERDES_COM_RESETSM_CNTRL 0x0088
+#define QSERDES_COM_RESETSM_CNTRL2 0x008C
+#define QSERDES_COM_LOCK_CMP_EN 0x0090
+#define QSERDES_COM_LOCK_CMP_CFG 0x0094
+#define QSERDES_COM_LOCK_CMP1_MODE0 0x0098
+#define QSERDES_COM_LOCK_CMP2_MODE0 0x009C
+#define QSERDES_COM_LOCK_CMP3_MODE0 0x00A0
+
+#define QSERDES_COM_DEC_START_MODE0 0x00B0
+#define QSERDES_COM_DEC_START_MODE1 0x00B4
+#define QSERDES_COM_DIV_FRAC_START1_MODE0 0x00B8
+#define QSERDES_COM_DIV_FRAC_START2_MODE0 0x00BC
+#define QSERDES_COM_DIV_FRAC_START3_MODE0 0x00C0
+#define QSERDES_COM_DIV_FRAC_START1_MODE1 0x00C4
+#define QSERDES_COM_DIV_FRAC_START2_MODE1 0x00C8
+#define QSERDES_COM_DIV_FRAC_START3_MODE1 0x00CC
+#define QSERDES_COM_INTEGLOOP_INITVAL 0x00D0
+#define QSERDES_COM_INTEGLOOP_EN 0x00D4
+#define QSERDES_COM_INTEGLOOP_GAIN0_MODE0 0x00D8
+#define QSERDES_COM_INTEGLOOP_GAIN1_MODE0 0x00DC
+#define QSERDES_COM_INTEGLOOP_GAIN0_MODE1 0x00E0
+#define QSERDES_COM_INTEGLOOP_GAIN1_MODE1 0x00E4
+#define QSERDES_COM_VCOCAL_DEADMAN_CTRL 0x00E8
+#define QSERDES_COM_VCO_TUNE_CTRL 0x00EC
+#define QSERDES_COM_VCO_TUNE_MAP 0x00F0
+
+#define QSERDES_COM_CMN_STATUS 0x0124
+#define QSERDES_COM_RESET_SM_STATUS 0x0128
+
+#define QSERDES_COM_CLK_SEL 0x0138
+#define QSERDES_COM_HSCLK_SEL 0x013C
+
+#define QSERDES_COM_CORECLK_DIV_MODE0 0x0148
+
+#define QSERDES_COM_SW_RESET 0x0150
+#define QSERDES_COM_CORE_CLK_EN 0x0154
+#define QSERDES_COM_C_READY_STATUS 0x0158
+#define QSERDES_COM_CMN_CONFIG 0x015C
+
+#define QSERDES_COM_SVS_MODE_CLK_SEL 0x0164
+
+#define DP_PLL_POLL_SLEEP_US 500
+#define DP_PLL_POLL_TIMEOUT_US 10000
+
+#define DP_VCO_RATE_8100MHz 8100000000ULL
+#define DP_VCO_RATE_9720MHz 9720000000ULL
+#define DP_VCO_RATE_10800MHz 10800000000ULL
+
+int dp_vco_set_rate(struct clk *c, unsigned long rate);
+unsigned long dp_vco_get_rate(struct clk *c);
+long dp_vco_round_rate(struct clk *c, unsigned long rate);
+enum handoff dp_vco_handoff(struct clk *c);
+enum handoff vco_divided_clk_handoff(struct clk *c);
+int dp_vco_prepare(struct clk *c);
+void dp_vco_unprepare(struct clk *c);
+int hsclk_divsel_set_div(struct div_clk *clk, int div);
+int hsclk_divsel_get_div(struct div_clk *clk);
+int link2xclk_divsel_set_div(struct div_clk *clk, int div);
+int link2xclk_divsel_get_div(struct div_clk *clk);
+int vco_divided_clk_set_div(struct div_clk *clk, int div);
+int vco_divided_clk_get_div(struct div_clk *clk);
+
+#endif /* __MDSS_DP_PLL_COBALT_H */
diff --git a/drivers/clk/msm/mdss/mdss-dp-pll.h b/drivers/clk/msm/mdss/mdss-dp-pll.h
new file mode 100644
index 000000000000..3abc4c29c17e
--- /dev/null
+++ b/drivers/clk/msm/mdss/mdss-dp-pll.h
@@ -0,0 +1,36 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MDSS_DP_PLL_H
+#define __MDSS_DP_PLL_H
+
+struct dp_pll_vco_clk {
+ unsigned long rate; /* current vco rate */
+ u64 min_rate; /* min vco rate */
+ u64 max_rate; /* max vco rate */
+ void *priv;
+
+ struct clk c;
+};
+
+static inline struct dp_pll_vco_clk *mdss_dp_to_vco_clk(struct clk *clk)
+{
+ return container_of(clk, struct dp_pll_vco_clk, c);
+}
+
+int dp_pll_clock_register_cobalt(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res);
+
+
+#endif /* __MDSS_DP_PLL_H */
+
diff --git a/drivers/clk/msm/mdss/mdss-dsi-pll-8996-util.c b/drivers/clk/msm/mdss/mdss-dsi-pll-8996-util.c
new file mode 100644
index 000000000000..f6c85cf8d9a4
--- /dev/null
+++ b/drivers/clk/msm/mdss/mdss-dsi-pll-8996-util.c
@@ -0,0 +1,1138 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/iopoll.h>
+#include <linux/delay.h>
+#include <linux/clk/msm-clock-generic.h>
+
+#include "mdss-pll.h"
+#include "mdss-dsi-pll.h"
+#include "mdss-dsi-pll-8996.h"
+
+#define DSI_PLL_POLL_MAX_READS 15
+#define DSI_PLL_POLL_TIMEOUT_US 1000
+#define MSM8996_DSI_PLL_REVISION_2 2
+
+#define CEIL(x, y) (((x) + ((y)-1)) / (y))
+
+int set_mdss_byte_mux_sel_8996(struct mux_clk *clk, int sel)
+{
+ return 0;
+}
+
+int get_mdss_byte_mux_sel_8996(struct mux_clk *clk)
+{
+ return 0;
+}
+
+int set_mdss_pixel_mux_sel_8996(struct mux_clk *clk, int sel)
+{
+ return 0;
+}
+
+int get_mdss_pixel_mux_sel_8996(struct mux_clk *clk)
+{
+ return 0;
+}
+
+static int mdss_pll_read_stored_trim_codes(
+ struct mdss_pll_resources *dsi_pll_res, s64 vco_clk_rate)
+{
+ int i;
+ int rc = 0;
+ bool found = false;
+
+ if (!dsi_pll_res->dfps) {
+ rc = -EINVAL;
+ goto end_read;
+ }
+
+ for (i = 0; i < dsi_pll_res->dfps->panel_dfps.frame_rate_cnt; i++) {
+ struct dfps_codes_info *codes_info =
+ &dsi_pll_res->dfps->codes_dfps[i];
+
+ pr_debug("valid=%d frame_rate=%d, vco_rate=%d, code %d %d\n",
+ codes_info->is_valid, codes_info->frame_rate,
+ codes_info->clk_rate, codes_info->pll_codes.pll_codes_1,
+ codes_info->pll_codes.pll_codes_2);
+
+ if (vco_clk_rate != codes_info->clk_rate &&
+ codes_info->is_valid)
+ continue;
+
+ dsi_pll_res->cache_pll_trim_codes[0] =
+ codes_info->pll_codes.pll_codes_1;
+ dsi_pll_res->cache_pll_trim_codes[1] =
+ codes_info->pll_codes.pll_codes_2;
+ found = true;
+ break;
+ }
+
+ if (!found) {
+ rc = -EINVAL;
+ goto end_read;
+ }
+
+ pr_debug("core_kvco_code=0x%x core_vco_tune=0x%x\n",
+ dsi_pll_res->cache_pll_trim_codes[0],
+ dsi_pll_res->cache_pll_trim_codes[1]);
+
+end_read:
+ return rc;
+}
+
+int post_n1_div_set_div(struct div_clk *clk, int div)
+{
+ struct mdss_pll_resources *pll = clk->priv;
+ struct dsi_pll_db *pdb;
+ struct dsi_pll_output *pout;
+ int rc;
+ u32 n1div = 0;
+
+ rc = mdss_pll_resource_enable(pll, true);
+ if (rc) {
+ pr_err("Failed to enable mdss dsi pll resources\n");
+ return rc;
+ }
+
+ pdb = (struct dsi_pll_db *)pll->priv;
+ pout = &pdb->out;
+
+ /*
+ * vco rate = bit_clk * postdiv * n1div
+ * vco range from 1300 to 2600 Mhz
+ * postdiv = 1
+ * n1div = 1 to 15
+ * n1div = roundup(1300Mhz / bit_clk)
+ * support bit_clk above 86.67Mhz
+ */
+
+ /* this is for vco/bit clock */
+ pout->pll_postdiv = 1; /* fixed, divided by 1 */
+ pout->pll_n1div = div;
+
+ n1div = MDSS_PLL_REG_R(pll->pll_base, DSIPHY_CMN_CLK_CFG0);
+ n1div &= ~0xf;
+ n1div |= (div & 0xf);
+ MDSS_PLL_REG_W(pll->pll_base, DSIPHY_CMN_CLK_CFG0, n1div);
+ /* ensure n1 divider is programed */
+ wmb();
+ pr_debug("ndx=%d div=%d postdiv=%x n1div=%x\n",
+ pll->index, div, pout->pll_postdiv, pout->pll_n1div);
+
+ mdss_pll_resource_enable(pll, false);
+
+ return 0;
+}
+
+int post_n1_div_get_div(struct div_clk *clk)
+{
+ u32 div;
+ int rc;
+ struct mdss_pll_resources *pll = clk->priv;
+
+ if (is_gdsc_disabled(pll))
+ return 0;
+
+ rc = mdss_pll_resource_enable(pll, true);
+ if (rc) {
+ pr_err("Failed to enable mdss dsi pll resources\n");
+ return rc;
+ }
+
+ /*
+ * postdiv = 1/2/4/8
+ * n1div = 1 - 15
+ * fot the time being, assume postdiv = 1
+ */
+
+ div = MDSS_PLL_REG_R(pll->pll_base, DSIPHY_CMN_CLK_CFG0);
+ div &= 0xF;
+ pr_debug("n1 div = %d\n", div);
+
+ mdss_pll_resource_enable(pll, false);
+
+ return div;
+}
+
+int n2_div_set_div(struct div_clk *clk, int div)
+{
+ int rc;
+ u32 n2div;
+ struct mdss_pll_resources *pll = clk->priv;
+ struct dsi_pll_db *pdb;
+ struct dsi_pll_output *pout;
+ struct mdss_pll_resources *slave;
+
+ rc = mdss_pll_resource_enable(pll, true);
+ if (rc) {
+ pr_err("Failed to enable mdss dsi pll resources\n");
+ return rc;
+ }
+
+ pdb = (struct dsi_pll_db *)pll->priv;
+ pout = &pdb->out;
+
+ /* this is for pixel clock */
+ n2div = MDSS_PLL_REG_R(pll->pll_base, DSIPHY_CMN_CLK_CFG0);
+ n2div &= ~0xf0; /* bits 4 to 7 */
+ n2div |= (div << 4);
+ MDSS_PLL_REG_W(pll->pll_base, DSIPHY_CMN_CLK_CFG0, n2div);
+
+ /* commit slave if split display is enabled */
+ slave = pll->slave;
+ if (slave)
+ MDSS_PLL_REG_W(slave->pll_base, DSIPHY_CMN_CLK_CFG0, n2div);
+
+ pout->pll_n2div = div;
+
+ /* set dsiclk_sel=1 so that n2div *= 2 */
+ MDSS_PLL_REG_W(pll->pll_base, DSIPHY_CMN_CLK_CFG1, 1);
+ pr_debug("ndx=%d div=%d n2div=%x\n", pll->index, div, n2div);
+
+ mdss_pll_resource_enable(pll, false);
+
+ return rc;
+}
+
+int shadow_n2_div_set_div(struct div_clk *clk, int div)
+{
+ struct mdss_pll_resources *pll = clk->priv;
+ struct dsi_pll_db *pdb;
+ struct dsi_pll_output *pout;
+ u32 data;
+
+ pdb = pll->priv;
+ pout = &pdb->out;
+
+ pout->pll_n2div = div;
+
+ data = (pout->pll_n1div | (pout->pll_n2div << 4));
+ MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL19,
+ DSIPHY_CMN_CLK_CFG0, DSIPHY_CMN_CLK_CFG1,
+ data, 1);
+ return 0;
+}
+
+int n2_div_get_div(struct div_clk *clk)
+{
+ int rc;
+ u32 n2div;
+ struct mdss_pll_resources *pll = clk->priv;
+
+ if (is_gdsc_disabled(pll))
+ return 0;
+
+ rc = mdss_pll_resource_enable(pll, true);
+ if (rc) {
+ pr_err("Failed to enable mdss dsi pll=%d resources\n",
+ pll->index);
+ return rc;
+ }
+
+ n2div = MDSS_PLL_REG_R(pll->pll_base, DSIPHY_CMN_CLK_CFG0);
+ n2div >>= 4;
+ n2div &= 0x0f;
+
+ mdss_pll_resource_enable(pll, false);
+
+ pr_debug("ndx=%d div=%d\n", pll->index, n2div);
+
+ return n2div;
+}
+
+static bool pll_is_pll_locked_8996(struct mdss_pll_resources *pll)
+{
+ u32 status;
+ bool pll_locked;
+
+ /* poll for PLL ready status */
+ if (readl_poll_timeout_atomic((pll->pll_base +
+ DSIPHY_PLL_RESET_SM_READY_STATUS),
+ status,
+ ((status & BIT(5)) > 0),
+ DSI_PLL_POLL_MAX_READS,
+ DSI_PLL_POLL_TIMEOUT_US)) {
+ pr_err("DSI PLL ndx=%d status=%x failed to Lock\n",
+ pll->index, status);
+ pll_locked = false;
+ } else if (readl_poll_timeout_atomic((pll->pll_base +
+ DSIPHY_PLL_RESET_SM_READY_STATUS),
+ status,
+ ((status & BIT(0)) > 0),
+ DSI_PLL_POLL_MAX_READS,
+ DSI_PLL_POLL_TIMEOUT_US)) {
+ pr_err("DSI PLL ndx=%d status=%x PLl not ready\n",
+ pll->index, status);
+ pll_locked = false;
+ } else {
+ pll_locked = true;
+ }
+
+ return pll_locked;
+}
+
+static void dsi_pll_start_8996(void __iomem *pll_base)
+{
+ pr_debug("start PLL at base=%p\n", pll_base);
+
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_VREF_CFG1, 0x10);
+ MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_PLL_CNTRL, 1);
+}
+
+static void dsi_pll_stop_8996(void __iomem *pll_base)
+{
+ pr_debug("stop PLL at base=%p\n", pll_base);
+
+ MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_PLL_CNTRL, 0);
+}
+
+int dsi_pll_enable_seq_8996(struct mdss_pll_resources *pll)
+{
+ int rc = 0;
+
+ if (!pll) {
+ pr_err("Invalid PLL resources\n");
+ return -EINVAL;
+ }
+
+ dsi_pll_start_8996(pll->pll_base);
+
+ /*
+ * both DSIPHY_PLL_CLKBUFLR_EN and DSIPHY_CMN_GLBL_TEST_CTRL
+ * enabled at mdss_dsi_8996_phy_config()
+ */
+
+ if (!pll_is_pll_locked_8996(pll)) {
+ pr_err("DSI PLL ndx=%d lock failed\n", pll->index);
+ rc = -EINVAL;
+ goto init_lock_err;
+ }
+
+ pr_debug("DSI PLL ndx=%d Lock success\n", pll->index);
+
+init_lock_err:
+ return rc;
+}
+
+static int dsi_pll_enable(struct clk *c)
+{
+ int i, rc = 0;
+ struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+ struct mdss_pll_resources *pll = vco->priv;
+
+ /* Try all enable sequences until one succeeds */
+ for (i = 0; i < vco->pll_en_seq_cnt; i++) {
+ rc = vco->pll_enable_seqs[i](pll);
+ pr_debug("DSI PLL %s after sequence #%d\n",
+ rc ? "unlocked" : "locked", i + 1);
+ if (!rc)
+ break;
+ }
+
+ if (rc)
+ pr_err("ndx=%d DSI PLL failed to lock\n", pll->index);
+ else
+ pll->pll_on = true;
+
+ return rc;
+}
+
+static void dsi_pll_disable(struct clk *c)
+{
+ struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+ struct mdss_pll_resources *pll = vco->priv;
+ struct mdss_pll_resources *slave;
+
+ if (!pll->pll_on &&
+ mdss_pll_resource_enable(pll, true)) {
+ pr_err("Failed to enable mdss dsi pll=%d\n", pll->index);
+ return;
+ }
+
+ pll->handoff_resources = false;
+ slave = pll->slave;
+
+ dsi_pll_stop_8996(pll->pll_base);
+
+ mdss_pll_resource_enable(pll, false);
+
+ pll->pll_on = false;
+
+ pr_debug("DSI PLL ndx=%d Disabled\n", pll->index);
+ return;
+}
+
+static void mdss_dsi_pll_8996_input_init(struct mdss_pll_resources *pll,
+ struct dsi_pll_db *pdb)
+{
+ pdb->in.fref = 19200000; /* 19.2 Mhz*/
+ pdb->in.fdata = 0; /* bit clock rate */
+ pdb->in.dsiclk_sel = 1; /* 1, reg: 0x0014 */
+ pdb->in.ssc_en = pll->ssc_en; /* 1, reg: 0x0494, bit 0 */
+ pdb->in.ldo_en = 0; /* 0, reg: 0x004c, bit 0 */
+
+ /* fixed input */
+ pdb->in.refclk_dbler_en = 0; /* 0, reg: 0x04c0, bit 1 */
+ pdb->in.vco_measure_time = 5; /* 5, unknown */
+ pdb->in.kvco_measure_time = 5; /* 5, unknown */
+ pdb->in.bandgap_timer = 4; /* 4, reg: 0x0430, bit 3 - 5 */
+ pdb->in.pll_wakeup_timer = 5; /* 5, reg: 0x043c, bit 0 - 2 */
+ pdb->in.plllock_cnt = 1; /* 1, reg: 0x0488, bit 1 - 2 */
+ pdb->in.plllock_rng = 0; /* 0, reg: 0x0488, bit 3 - 4 */
+ pdb->in.ssc_center = pll->ssc_center;/* 0, reg: 0x0494, bit 1 */
+ pdb->in.ssc_adj_period = 37; /* 37, reg: 0x498, bit 0 - 9 */
+ pdb->in.ssc_spread = pll->ssc_ppm / 1000;
+ pdb->in.ssc_freq = pll->ssc_freq;
+
+ pdb->in.pll_ie_trim = 4; /* 4, reg: 0x0400 */
+ pdb->in.pll_ip_trim = 4; /* 4, reg: 0x0404 */
+ pdb->in.pll_cpcset_cur = 1; /* 1, reg: 0x04f0, bit 0 - 2 */
+ pdb->in.pll_cpmset_cur = 1; /* 1, reg: 0x04f0, bit 3 - 5 */
+ pdb->in.pll_icpmset = 4; /* 4, reg: 0x04fc, bit 3 - 5 */
+ pdb->in.pll_icpcset = 4; /* 4, reg: 0x04fc, bit 0 - 2 */
+ pdb->in.pll_icpmset_p = 0; /* 0, reg: 0x04f4, bit 0 - 2 */
+ pdb->in.pll_icpmset_m = 0; /* 0, reg: 0x04f4, bit 3 - 5 */
+ pdb->in.pll_icpcset_p = 0; /* 0, reg: 0x04f8, bit 0 - 2 */
+ pdb->in.pll_icpcset_m = 0; /* 0, reg: 0x04f8, bit 3 - 5 */
+ pdb->in.pll_lpf_res1 = 3; /* 3, reg: 0x0504, bit 0 - 3 */
+ pdb->in.pll_lpf_cap1 = 11; /* 11, reg: 0x0500, bit 0 - 3 */
+ pdb->in.pll_lpf_cap2 = 1; /* 1, reg: 0x0500, bit 4 - 7 */
+ pdb->in.pll_iptat_trim = 7;
+ pdb->in.pll_c3ctrl = 2; /* 2 */
+ pdb->in.pll_r3ctrl = 1; /* 1 */
+}
+
+static void pll_8996_ssc_calc(struct mdss_pll_resources *pll,
+ struct dsi_pll_db *pdb)
+{
+ u32 period, ssc_period;
+ u32 ref, rem;
+ s64 step_size;
+
+ pr_debug("%s: vco=%lld ref=%lld\n", __func__,
+ pll->vco_current_rate, pll->vco_ref_clk_rate);
+
+ ssc_period = pdb->in.ssc_freq / 500;
+ period = (unsigned long)pll->vco_ref_clk_rate / 1000;
+ ssc_period = CEIL(period, ssc_period);
+ ssc_period -= 1;
+ pdb->out.ssc_period = ssc_period;
+
+ pr_debug("%s: ssc, freq=%d spread=%d period=%d\n", __func__,
+ pdb->in.ssc_freq, pdb->in.ssc_spread, pdb->out.ssc_period);
+
+ step_size = (u32)pll->vco_current_rate;
+ ref = pll->vco_ref_clk_rate;
+ ref /= 1000;
+ step_size = div_s64(step_size, ref);
+ step_size <<= 20;
+ step_size = div_s64(step_size, 1000);
+ step_size *= pdb->in.ssc_spread;
+ step_size = div_s64(step_size, 1000);
+ step_size *= (pdb->in.ssc_adj_period + 1);
+
+ rem = 0;
+ step_size = div_s64_rem(step_size, ssc_period + 1, &rem);
+ if (rem)
+ step_size++;
+
+ pr_debug("%s: step_size=%lld\n", __func__, step_size);
+
+ step_size &= 0x0ffff; /* take lower 16 bits */
+
+ pdb->out.ssc_step_size = step_size;
+}
+
+static void pll_8996_dec_frac_calc(struct mdss_pll_resources *pll,
+ struct dsi_pll_db *pdb)
+{
+ struct dsi_pll_input *pin = &pdb->in;
+ struct dsi_pll_output *pout = &pdb->out;
+ s64 multiplier = BIT(20);
+ s64 dec_start_multiple, dec_start, pll_comp_val;
+ s32 duration, div_frac_start;
+ s64 vco_clk_rate = pll->vco_current_rate;
+ s64 fref = pll->vco_ref_clk_rate;
+
+ pr_debug("vco_clk_rate=%lld ref_clk_rate=%lld\n",
+ vco_clk_rate, fref);
+
+ dec_start_multiple = div_s64(vco_clk_rate * multiplier, fref);
+ div_s64_rem(dec_start_multiple, multiplier, &div_frac_start);
+
+ dec_start = div_s64(dec_start_multiple, multiplier);
+
+ pout->dec_start = (u32)dec_start;
+ pout->div_frac_start = div_frac_start;
+
+ if (pin->plllock_cnt == 0)
+ duration = 1024;
+ else if (pin->plllock_cnt == 1)
+ duration = 256;
+ else if (pin->plllock_cnt == 2)
+ duration = 128;
+ else
+ duration = 32;
+
+ pll_comp_val = duration * dec_start_multiple;
+ pll_comp_val = div_s64(pll_comp_val, multiplier);
+ do_div(pll_comp_val, 10);
+
+ pout->plllock_cmp = (u32)pll_comp_val;
+
+ pout->pll_txclk_en = 1;
+ if (pll->revision == MSM8996_DSI_PLL_REVISION_2)
+ pout->cmn_ldo_cntrl = 0x3c;
+ else
+ pout->cmn_ldo_cntrl = 0x1c;
+}
+
+static u32 pll_8996_kvco_slop(u32 vrate)
+{
+ u32 slop = 0;
+
+ if (vrate > 1300000000UL && vrate <= 1800000000UL)
+ slop = 600;
+ else if (vrate > 1800000000UL && vrate < 2300000000UL)
+ slop = 400;
+ else if (vrate > 2300000000UL && vrate < 2600000000UL)
+ slop = 280;
+
+ return slop;
+}
+
+static void pll_8996_calc_vco_count(struct dsi_pll_db *pdb,
+ s64 vco_clk_rate, s64 fref)
+{
+ struct dsi_pll_input *pin = &pdb->in;
+ struct dsi_pll_output *pout = &pdb->out;
+ s64 data;
+ u32 cnt;
+
+ data = fref * pin->vco_measure_time;
+ do_div(data, 1000000);
+ data &= 0x03ff; /* 10 bits */
+ data -= 2;
+ pout->pll_vco_div_ref = data;
+
+ data = (unsigned long)vco_clk_rate / 1000000; /* unit is Mhz */
+ data *= pin->vco_measure_time;
+ do_div(data, 10);
+ pout->pll_vco_count = data; /* reg: 0x0474, 0x0478 */
+
+ data = fref * pin->kvco_measure_time;
+ do_div(data, 1000000);
+ data &= 0x03ff; /* 10 bits */
+ data -= 1;
+ pout->pll_kvco_div_ref = data;
+
+ cnt = pll_8996_kvco_slop(vco_clk_rate);
+ cnt *= 2;
+ do_div(cnt, 100);
+ cnt *= pin->kvco_measure_time;
+ pout->pll_kvco_count = cnt;
+
+ pout->pll_misc1 = 16;
+ pout->pll_resetsm_cntrl = 48;
+ pout->pll_resetsm_cntrl2 = pin->bandgap_timer << 3;
+ pout->pll_resetsm_cntrl5 = pin->pll_wakeup_timer;
+ pout->pll_kvco_code = 0;
+}
+
+static void pll_db_commit_ssc(struct mdss_pll_resources *pll,
+ struct dsi_pll_db *pdb)
+{
+ void __iomem *pll_base = pll->pll_base;
+ struct dsi_pll_input *pin = &pdb->in;
+ struct dsi_pll_output *pout = &pdb->out;
+ char data;
+
+ data = pin->ssc_adj_period;
+ data &= 0x0ff;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_SSC_ADJ_PER1, data);
+ data = (pin->ssc_adj_period >> 8);
+ data &= 0x03;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_SSC_ADJ_PER2, data);
+
+ data = pout->ssc_period;
+ data &= 0x0ff;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_SSC_PER1, data);
+ data = (pout->ssc_period >> 8);
+ data &= 0x0ff;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_SSC_PER2, data);
+
+ data = pout->ssc_step_size;
+ data &= 0x0ff;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_SSC_STEP_SIZE1, data);
+ data = (pout->ssc_step_size >> 8);
+ data &= 0x0ff;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_SSC_STEP_SIZE2, data);
+
+ data = (pin->ssc_center & 0x01);
+ data <<= 1;
+ data |= 0x01; /* enable */
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_SSC_EN_CENTER, data);
+
+ wmb(); /* make sure register committed */
+}
+
+static void pll_db_commit_common(struct mdss_pll_resources *pll,
+ struct dsi_pll_db *pdb)
+{
+ void __iomem *pll_base = pll->pll_base;
+ struct dsi_pll_input *pin = &pdb->in;
+ struct dsi_pll_output *pout = &pdb->out;
+ char data;
+
+ /* confgiure the non frequency dependent pll registers */
+ data = 0;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_SYSCLK_EN_RESET, data);
+
+ /* DSIPHY_PLL_CLKBUFLR_EN updated at dsi phy */
+
+ data = pout->pll_txclk_en;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_TXCLK_EN, data);
+
+ data = pout->pll_resetsm_cntrl;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_RESETSM_CNTRL, data);
+ data = pout->pll_resetsm_cntrl2;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_RESETSM_CNTRL2, data);
+ data = pout->pll_resetsm_cntrl5;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_RESETSM_CNTRL5, data);
+
+ data = pout->pll_vco_div_ref;
+ data &= 0x0ff;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_VCO_DIV_REF1, data);
+ data = (pout->pll_vco_div_ref >> 8);
+ data &= 0x03;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_VCO_DIV_REF2, data);
+
+ data = pout->pll_kvco_div_ref;
+ data &= 0x0ff;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_KVCO_DIV_REF1, data);
+ data = (pout->pll_kvco_div_ref >> 8);
+ data &= 0x03;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_KVCO_DIV_REF2, data);
+
+ data = pout->pll_misc1;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLL_MISC1, data);
+
+ data = pin->pll_ie_trim;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_IE_TRIM, data);
+
+ data = pin->pll_ip_trim;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_IP_TRIM, data);
+
+ data = ((pin->pll_cpmset_cur << 3) | pin->pll_cpcset_cur);
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_CP_SET_CUR, data);
+
+ data = ((pin->pll_icpcset_p << 3) | pin->pll_icpcset_m);
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLL_ICPCSET, data);
+
+ data = ((pin->pll_icpmset_p << 3) | pin->pll_icpcset_m);
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLL_ICPMSET, data);
+
+ data = ((pin->pll_icpmset << 3) | pin->pll_icpcset);
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLL_ICP_SET, data);
+
+ data = ((pdb->in.pll_lpf_cap2 << 4) | pdb->in.pll_lpf_cap1);
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLL_LPF1, data);
+
+ data = pin->pll_iptat_trim;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_IPTAT_TRIM, data);
+
+ data = (pdb->in.pll_c3ctrl | (pdb->in.pll_r3ctrl << 4));
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLL_CRCTRL, data);
+}
+
+static void pll_db_commit_8996(struct mdss_pll_resources *pll,
+ struct dsi_pll_db *pdb)
+{
+ void __iomem *pll_base = pll->pll_base;
+ struct dsi_pll_input *pin = &pdb->in;
+ struct dsi_pll_output *pout = &pdb->out;
+ char data;
+
+ data = pout->cmn_ldo_cntrl;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_LDO_CNTRL, data);
+
+ pll_db_commit_common(pll, pdb);
+
+ /* de assert pll start and apply pll sw reset */
+ /* stop pll */
+ MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_PLL_CNTRL, 0);
+
+ /* pll sw reset */
+ MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_CTRL_1, 0x20);
+ wmb(); /* make sure register committed */
+ udelay(10);
+
+ MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_CTRL_1, 0);
+ wmb(); /* make sure register committed */
+
+ data = pdb->in.dsiclk_sel; /* set dsiclk_sel = 1 */
+ MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_CLK_CFG1, data);
+
+ data = 0xff; /* data, clk, pll normal operation */
+ MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_CTRL_0, data);
+
+ /* confgiure the frequency dependent pll registers */
+ data = pout->dec_start;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_DEC_START, data);
+
+ data = pout->div_frac_start;
+ data &= 0x0ff;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_DIV_FRAC_START1, data);
+ data = (pout->div_frac_start >> 8);
+ data &= 0x0ff;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_DIV_FRAC_START2, data);
+ data = (pout->div_frac_start >> 16);
+ data &= 0x0f;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_DIV_FRAC_START3, data);
+
+ data = pout->plllock_cmp;
+ data &= 0x0ff;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLLLOCK_CMP1, data);
+ data = (pout->plllock_cmp >> 8);
+ data &= 0x0ff;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLLLOCK_CMP2, data);
+ data = (pout->plllock_cmp >> 16);
+ data &= 0x03;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLLLOCK_CMP3, data);
+
+ data = ((pin->plllock_cnt << 1) | (pin->plllock_rng << 3));
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLLLOCK_CMP_EN, data);
+
+ data = pout->pll_vco_count;
+ data &= 0x0ff;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_VCO_COUNT1, data);
+ data = (pout->pll_vco_count >> 8);
+ data &= 0x0ff;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_VCO_COUNT2, data);
+
+ data = pout->pll_kvco_count;
+ data &= 0x0ff;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_KVCO_COUNT1, data);
+ data = (pout->pll_kvco_count >> 8);
+ data &= 0x03;
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_KVCO_COUNT2, data);
+
+ /*
+ * tx_band = pll_postdiv
+ * 0: divided by 1 <== for now
+ * 1: divided by 2
+ * 2: divided by 4
+ * 3: divided by 8
+ */
+ data = (((pout->pll_postdiv - 1) << 4) | pdb->in.pll_lpf_res1);
+ MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLL_LPF2_POSTDIV, data);
+
+ data = (pout->pll_n1div | (pout->pll_n2div << 4));
+ MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_CLK_CFG0, data);
+
+ if (pll->ssc_en)
+ pll_db_commit_ssc(pll, pdb);
+
+ wmb(); /* make sure register committed */
+}
+
+/*
+ * pll_source_finding:
+ * Both GLBL_TEST_CTRL and CLKBUFLR_EN are configured
+ * at mdss_dsi_8996_phy_config()
+ */
+static int pll_source_finding(struct mdss_pll_resources *pll)
+{
+ u32 clk_buf_en;
+ u32 glbl_test_ctrl;
+
+ glbl_test_ctrl = MDSS_PLL_REG_R(pll->pll_base,
+ DSIPHY_CMN_GLBL_TEST_CTRL);
+ clk_buf_en = MDSS_PLL_REG_R(pll->pll_base,
+ DSIPHY_PLL_CLKBUFLR_EN);
+
+ glbl_test_ctrl &= BIT(2);
+ glbl_test_ctrl >>= 2;
+
+ pr_debug("%s: pll=%d clk_buf_en=%x glbl_test_ctrl=%x\n",
+ __func__, pll->index, clk_buf_en, glbl_test_ctrl);
+
+ clk_buf_en &= (PLL_OUTPUT_RIGHT | PLL_OUTPUT_LEFT);
+
+ if ((glbl_test_ctrl == PLL_SOURCE_FROM_LEFT) &&
+ (clk_buf_en == PLL_OUTPUT_BOTH))
+ return PLL_MASTER;
+
+ if ((glbl_test_ctrl == PLL_SOURCE_FROM_RIGHT) &&
+ (clk_buf_en == PLL_OUTPUT_NONE))
+ return PLL_SLAVE;
+
+ if ((glbl_test_ctrl == PLL_SOURCE_FROM_LEFT) &&
+ (clk_buf_en == PLL_OUTPUT_RIGHT))
+ return PLL_STANDALONE;
+
+ pr_debug("%s: Error pll setup, clk_buf_en=%x glbl_test_ctrl=%x\n",
+ __func__, clk_buf_en, glbl_test_ctrl);
+
+ return PLL_UNKNOWN;
+}
+
+static void pll_source_setup(struct mdss_pll_resources *pll)
+{
+ int status;
+ struct dsi_pll_db *pdb = (struct dsi_pll_db *)pll->priv;
+ struct mdss_pll_resources *other;
+
+ if (pdb->source_setup_done)
+ return;
+
+ pdb->source_setup_done++;
+
+ status = pll_source_finding(pll);
+
+ if (status == PLL_STANDALONE || status == PLL_UNKNOWN)
+ return;
+
+ other = pdb->next->pll;
+ if (!other)
+ return;
+
+ pr_debug("%s: status=%d pll=%d other=%d\n", __func__,
+ status, pll->index, other->index);
+
+ if (status == PLL_MASTER)
+ pll->slave = other;
+ else
+ other->slave = pll;
+}
+
+int pll_vco_set_rate_8996(struct clk *c, unsigned long rate)
+{
+ int rc;
+ struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+ struct mdss_pll_resources *pll = vco->priv;
+ struct mdss_pll_resources *slave;
+ struct dsi_pll_db *pdb;
+
+ pdb = (struct dsi_pll_db *)pll->priv;
+ if (!pdb) {
+ pr_err("No prov found\n");
+ return -EINVAL;
+ }
+
+ rc = mdss_pll_resource_enable(pll, true);
+ if (rc) {
+ pr_err("Failed to enable mdss dsi plla=%d\n", pll->index);
+ return rc;
+ }
+
+ pll_source_setup(pll);
+
+ pr_debug("%s: ndx=%d base=%p rate=%lu slave=%p\n", __func__,
+ pll->index, pll->pll_base, rate, pll->slave);
+
+ pll->vco_current_rate = rate;
+ pll->vco_ref_clk_rate = vco->ref_clk_rate;
+
+ mdss_dsi_pll_8996_input_init(pll, pdb);
+
+ pll_8996_dec_frac_calc(pll, pdb);
+
+ if (pll->ssc_en)
+ pll_8996_ssc_calc(pll, pdb);
+
+ pll_8996_calc_vco_count(pdb, pll->vco_current_rate,
+ pll->vco_ref_clk_rate);
+
+ /* commit slave if split display is enabled */
+ slave = pll->slave;
+ if (slave)
+ pll_db_commit_8996(slave, pdb);
+
+ /* commit master itself */
+ pll_db_commit_8996(pll, pdb);
+
+ mdss_pll_resource_enable(pll, false);
+
+ return rc;
+}
+
+static void shadow_pll_dynamic_refresh_8996(struct mdss_pll_resources *pll,
+ struct dsi_pll_db *pdb)
+{
+ struct dsi_pll_output *pout = &pdb->out;
+
+ MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL20,
+ DSIPHY_CMN_CTRL_0, DSIPHY_PLL_SYSCLK_EN_RESET,
+ 0xFF, 0x0);
+ MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL21,
+ DSIPHY_PLL_DEC_START, DSIPHY_PLL_DIV_FRAC_START1,
+ pout->dec_start, (pout->div_frac_start & 0x0FF));
+ MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL22,
+ DSIPHY_PLL_DIV_FRAC_START2, DSIPHY_PLL_DIV_FRAC_START3,
+ ((pout->div_frac_start >> 8) & 0x0FF),
+ ((pout->div_frac_start >> 16) & 0x0F));
+ MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL23,
+ DSIPHY_PLL_PLLLOCK_CMP1, DSIPHY_PLL_PLLLOCK_CMP2,
+ (pout->plllock_cmp & 0x0FF),
+ ((pout->plllock_cmp >> 8) & 0x0FF));
+ MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL24,
+ DSIPHY_PLL_PLLLOCK_CMP3, DSIPHY_PLL_PLL_VCO_TUNE,
+ ((pout->plllock_cmp >> 16) & 0x03),
+ (pll->cache_pll_trim_codes[1] | BIT(7))); /* VCO tune*/
+ MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL25,
+ DSIPHY_PLL_KVCO_CODE, DSIPHY_PLL_RESETSM_CNTRL,
+ (pll->cache_pll_trim_codes[0] | BIT(5)), 0x38);
+ MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL26,
+ DSIPHY_PLL_PLL_LPF2_POSTDIV, DSIPHY_CMN_PLL_CNTRL,
+ (((pout->pll_postdiv - 1) << 4) | pdb->in.pll_lpf_res1), 0x01);
+ MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL27,
+ DSIPHY_CMN_PLL_CNTRL, DSIPHY_CMN_PLL_CNTRL,
+ 0x01, 0x01);
+ MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL28,
+ DSIPHY_CMN_PLL_CNTRL, DSIPHY_CMN_PLL_CNTRL,
+ 0x01, 0x01);
+ MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
+ DSI_DYNAMIC_REFRESH_PLL_CTRL29,
+ DSIPHY_CMN_PLL_CNTRL, DSIPHY_CMN_PLL_CNTRL,
+ 0x01, 0x01);
+ MDSS_PLL_REG_W(pll->dyn_pll_base,
+ DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR, 0x0000001E);
+ MDSS_PLL_REG_W(pll->dyn_pll_base,
+ DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR2, 0x001FFE00);
+
+ /*
+ * Ensure all the dynamic refresh registers are written before
+ * dynamic refresh to change the fps is triggered
+ */
+ wmb();
+}
+
+int shadow_pll_vco_set_rate_8996(struct clk *c, unsigned long rate)
+{
+ int rc;
+ struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+ struct mdss_pll_resources *pll = vco->priv;
+ struct dsi_pll_db *pdb;
+ s64 vco_clk_rate = (s64)rate;
+
+ if (!pll) {
+ pr_err("PLL data not found\n");
+ return -EINVAL;
+ }
+
+ pdb = pll->priv;
+ if (!pdb) {
+ pr_err("No priv data found\n");
+ return -EINVAL;
+ }
+
+ rc = mdss_pll_read_stored_trim_codes(pll, vco_clk_rate);
+ if (rc) {
+ pr_err("cannot find pll codes rate=%lld\n", vco_clk_rate);
+ return -EINVAL;
+ }
+
+ rc = mdss_pll_resource_enable(pll, true);
+ if (rc) {
+ pr_err("Failed to enable mdss dsi plla=%d\n", pll->index);
+ return rc;
+ }
+
+ pr_debug("%s: ndx=%d base=%p rate=%lu\n", __func__,
+ pll->index, pll->pll_base, rate);
+
+ pll->vco_current_rate = rate;
+ pll->vco_ref_clk_rate = vco->ref_clk_rate;
+
+ mdss_dsi_pll_8996_input_init(pll, pdb);
+
+ pll_8996_dec_frac_calc(pll, pdb);
+
+ pll_8996_calc_vco_count(pdb, pll->vco_current_rate,
+ pll->vco_ref_clk_rate);
+
+ shadow_pll_dynamic_refresh_8996(pll, pdb);
+
+ rc = mdss_pll_resource_enable(pll, false);
+ if (rc) {
+ pr_err("Failed to enable mdss dsi plla=%d\n", pll->index);
+ return rc;
+ }
+
+ return rc;
+}
+
+unsigned long pll_vco_get_rate_8996(struct clk *c)
+{
+ u64 vco_rate, multiplier = BIT(20);
+ s32 div_frac_start;
+ u32 dec_start;
+ struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+ u64 ref_clk = vco->ref_clk_rate;
+ int rc;
+ struct mdss_pll_resources *pll = vco->priv;
+
+ if (is_gdsc_disabled(pll))
+ return 0;
+
+ rc = mdss_pll_resource_enable(pll, true);
+ if (rc) {
+ pr_err("Failed to enable mdss dsi pll=%d\n", pll->index);
+ return rc;
+ }
+
+ dec_start = MDSS_PLL_REG_R(pll->pll_base,
+ DSIPHY_PLL_DEC_START);
+ dec_start &= 0x0ff;
+ pr_debug("dec_start = 0x%x\n", dec_start);
+
+ div_frac_start = (MDSS_PLL_REG_R(pll->pll_base,
+ DSIPHY_PLL_DIV_FRAC_START3) & 0x0f) << 16;
+ div_frac_start |= (MDSS_PLL_REG_R(pll->pll_base,
+ DSIPHY_PLL_DIV_FRAC_START2) & 0x0ff) << 8;
+ div_frac_start |= MDSS_PLL_REG_R(pll->pll_base,
+ DSIPHY_PLL_DIV_FRAC_START1) & 0x0ff;
+ pr_debug("div_frac_start = 0x%x\n", div_frac_start);
+
+ vco_rate = ref_clk * dec_start;
+ vco_rate += ((ref_clk * div_frac_start) / multiplier);
+
+ pr_debug("returning vco rate = %lu\n", (unsigned long)vco_rate);
+
+ mdss_pll_resource_enable(pll, false);
+
+ return (unsigned long)vco_rate;
+}
+
+long pll_vco_round_rate_8996(struct clk *c, unsigned long rate)
+{
+ unsigned long rrate = rate;
+ u32 div;
+ struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+
+ div = vco->min_rate / rate;
+ if (div > 15) {
+ /* rate < 86.67 Mhz */
+ pr_err("rate=%lu NOT supportted\n", rate);
+ return -EINVAL;
+ }
+
+ if (rate < vco->min_rate)
+ rrate = vco->min_rate;
+ if (rate > vco->max_rate)
+ rrate = vco->max_rate;
+
+ return rrate;
+}
+
+enum handoff pll_vco_handoff_8996(struct clk *c)
+{
+ int rc;
+ enum handoff ret = HANDOFF_DISABLED_CLK;
+ struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+ struct mdss_pll_resources *pll = vco->priv;
+
+ if (is_gdsc_disabled(pll))
+ return HANDOFF_DISABLED_CLK;
+
+ rc = mdss_pll_resource_enable(pll, true);
+ if (rc) {
+ pr_err("Failed to enable mdss dsi pll=%d\n", pll->index);
+ return ret;
+ }
+
+ if (pll_is_pll_locked_8996(pll)) {
+ pll->handoff_resources = true;
+ pll->pll_on = true;
+ c->rate = pll_vco_get_rate_8996(c);
+ ret = HANDOFF_ENABLED_CLK;
+ } else {
+ mdss_pll_resource_enable(pll, false);
+ }
+
+ return ret;
+}
+
+enum handoff shadow_pll_vco_handoff_8996(struct clk *c)
+{
+ return HANDOFF_DISABLED_CLK;
+}
+
+int pll_vco_prepare_8996(struct clk *c)
+{
+ int rc = 0;
+ struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+ struct mdss_pll_resources *pll = vco->priv;
+
+ if (!pll) {
+ pr_err("Dsi pll resources are not available\n");
+ return -EINVAL;
+ }
+
+ rc = mdss_pll_resource_enable(pll, true);
+ if (rc) {
+ pr_err("ndx=%d Failed to enable mdss dsi pll resources\n",
+ pll->index);
+ return rc;
+ }
+
+ if ((pll->vco_cached_rate != 0)
+ && (pll->vco_cached_rate == c->rate)) {
+ rc = c->ops->set_rate(c, pll->vco_cached_rate);
+ if (rc) {
+ pr_err("index=%d vco_set_rate failed. rc=%d\n",
+ rc, pll->index);
+ mdss_pll_resource_enable(pll, false);
+ goto error;
+ }
+ }
+
+ rc = dsi_pll_enable(c);
+
+ if (rc) {
+ mdss_pll_resource_enable(pll, false);
+ pr_err("ndx=%d failed to enable dsi pll\n", pll->index);
+ }
+
+error:
+ return rc;
+}
+
+void pll_vco_unprepare_8996(struct clk *c)
+{
+ struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+ struct mdss_pll_resources *pll = vco->priv;
+
+ if (!pll) {
+ pr_err("Dsi pll resources are not available\n");
+ return;
+ }
+
+ pll->vco_cached_rate = c->rate;
+ dsi_pll_disable(c);
+}
diff --git a/drivers/clk/msm/mdss/mdss-dsi-pll-8996.c b/drivers/clk/msm/mdss/mdss-dsi-pll-8996.c
new file mode 100644
index 000000000000..1de1b997a041
--- /dev/null
+++ b/drivers/clk/msm/mdss/mdss-dsi-pll-8996.c
@@ -0,0 +1,566 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/clk/msm-clk-provider.h>
+#include <linux/clk/msm-clk.h>
+#include <linux/workqueue.h>
+#include <linux/clk/msm-clock-generic.h>
+#include <dt-bindings/clock/msm-clocks-8996.h>
+
+#include "mdss-pll.h"
+#include "mdss-dsi-pll.h"
+#include "mdss-dsi-pll-8996.h"
+
+#define VCO_DELAY_USEC 1
+
+static struct dsi_pll_db pll_db[DSI_PLL_NUM];
+
+static struct clk_ops n2_clk_src_ops;
+static struct clk_ops shadow_n2_clk_src_ops;
+static struct clk_ops byte_clk_src_ops;
+static struct clk_ops post_n1_div_clk_src_ops;
+static struct clk_ops shadow_post_n1_div_clk_src_ops;
+
+static struct clk_ops clk_ops_gen_mux_dsi;
+
+/* Op structures */
+static struct clk_ops clk_ops_dsi_vco = {
+ .set_rate = pll_vco_set_rate_8996,
+ .round_rate = pll_vco_round_rate_8996,
+ .handoff = pll_vco_handoff_8996,
+ .prepare = pll_vco_prepare_8996,
+ .unprepare = pll_vco_unprepare_8996,
+};
+
+static struct clk_div_ops post_n1_div_ops = {
+ .set_div = post_n1_div_set_div,
+ .get_div = post_n1_div_get_div,
+};
+
+static struct clk_div_ops n2_div_ops = { /* hr_oclk3 */
+ .set_div = n2_div_set_div,
+ .get_div = n2_div_get_div,
+};
+
+static struct clk_mux_ops mdss_byte_mux_ops = {
+ .set_mux_sel = set_mdss_byte_mux_sel_8996,
+ .get_mux_sel = get_mdss_byte_mux_sel_8996,
+};
+
+static struct clk_mux_ops mdss_pixel_mux_ops = {
+ .set_mux_sel = set_mdss_pixel_mux_sel_8996,
+ .get_mux_sel = get_mdss_pixel_mux_sel_8996,
+};
+
+/* Shadow ops for dynamic refresh */
+static struct clk_ops clk_ops_shadow_dsi_vco = {
+ .set_rate = shadow_pll_vco_set_rate_8996,
+ .round_rate = pll_vco_round_rate_8996,
+ .handoff = shadow_pll_vco_handoff_8996,
+};
+
+static struct clk_div_ops shadow_post_n1_div_ops = {
+ .set_div = post_n1_div_set_div,
+};
+
+static struct clk_div_ops shadow_n2_div_ops = {
+ .set_div = shadow_n2_div_set_div,
+};
+
+static struct dsi_pll_vco_clk dsi0pll_vco_clk = {
+ .ref_clk_rate = 19200000UL,
+ .min_rate = 1300000000UL,
+ .max_rate = 2600000000UL,
+ .pll_en_seq_cnt = 1,
+ .pll_enable_seqs[0] = dsi_pll_enable_seq_8996,
+ .c = {
+ .dbg_name = "dsi0pll_vco_clk_8996",
+ .ops = &clk_ops_dsi_vco,
+ CLK_INIT(dsi0pll_vco_clk.c),
+ },
+};
+
+static struct dsi_pll_vco_clk dsi0pll_shadow_vco_clk = {
+ .ref_clk_rate = 19200000u,
+ .min_rate = 1300000000u,
+ .max_rate = 2600000000u,
+ .c = {
+ .dbg_name = "dsi0pll_shadow_vco_clk",
+ .ops = &clk_ops_shadow_dsi_vco,
+ CLK_INIT(dsi0pll_shadow_vco_clk.c),
+ },
+};
+
+static struct dsi_pll_vco_clk dsi1pll_vco_clk = {
+ .ref_clk_rate = 19200000UL,
+ .min_rate = 1300000000UL,
+ .max_rate = 2600000000UL,
+ .pll_en_seq_cnt = 1,
+ .pll_enable_seqs[0] = dsi_pll_enable_seq_8996,
+ .c = {
+ .dbg_name = "dsi1pll_vco_clk_8996",
+ .ops = &clk_ops_dsi_vco,
+ CLK_INIT(dsi1pll_vco_clk.c),
+ },
+};
+
+static struct dsi_pll_vco_clk dsi1pll_shadow_vco_clk = {
+ .ref_clk_rate = 19200000u,
+ .min_rate = 1300000000u,
+ .max_rate = 2600000000u,
+ .pll_en_seq_cnt = 1,
+ .pll_enable_seqs[0] = dsi_pll_enable_seq_8996,
+ .c = {
+ .dbg_name = "dsi1pll_shadow_vco_clk",
+ .ops = &clk_ops_shadow_dsi_vco,
+ CLK_INIT(dsi1pll_shadow_vco_clk.c),
+ },
+};
+
+static struct div_clk dsi0pll_post_n1_div_clk = {
+ .data = {
+ .max_div = 15,
+ .min_div = 1,
+ },
+ .ops = &post_n1_div_ops,
+ .c = {
+ .parent = &dsi0pll_vco_clk.c,
+ .dbg_name = "dsi0pll_post_n1_div_clk",
+ .ops = &post_n1_div_clk_src_ops,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi0pll_post_n1_div_clk.c),
+ },
+};
+
+static struct div_clk dsi0pll_shadow_post_n1_div_clk = {
+ .data = {
+ .max_div = 15,
+ .min_div = 1,
+ },
+ .ops = &shadow_post_n1_div_ops,
+ .c = {
+ .parent = &dsi0pll_shadow_vco_clk.c,
+ .dbg_name = "dsi0pll_shadow_post_n1_div_clk",
+ .ops = &shadow_post_n1_div_clk_src_ops,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi0pll_shadow_post_n1_div_clk.c),
+ },
+};
+
+static struct div_clk dsi1pll_post_n1_div_clk = {
+ .data = {
+ .max_div = 15,
+ .min_div = 1,
+ },
+ .ops = &post_n1_div_ops,
+ .c = {
+ .parent = &dsi1pll_vco_clk.c,
+ .dbg_name = "dsi1pll_post_n1_div_clk",
+ .ops = &post_n1_div_clk_src_ops,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi1pll_post_n1_div_clk.c),
+ },
+};
+
+static struct div_clk dsi1pll_shadow_post_n1_div_clk = {
+ .data = {
+ .max_div = 15,
+ .min_div = 1,
+ },
+ .ops = &shadow_post_n1_div_ops,
+ .c = {
+ .parent = &dsi1pll_shadow_vco_clk.c,
+ .dbg_name = "dsi1pll_shadow_post_n1_div_clk",
+ .ops = &shadow_post_n1_div_clk_src_ops,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi1pll_shadow_post_n1_div_clk.c),
+ },
+};
+
+static struct div_clk dsi0pll_n2_div_clk = {
+ .data = {
+ .max_div = 15,
+ .min_div = 1,
+ },
+ .ops = &n2_div_ops,
+ .c = {
+ .parent = &dsi0pll_post_n1_div_clk.c,
+ .dbg_name = "dsi0pll_n2_div_clk",
+ .ops = &n2_clk_src_ops,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi0pll_n2_div_clk.c),
+ },
+};
+
+static struct div_clk dsi0pll_shadow_n2_div_clk = {
+ .data = {
+ .max_div = 15,
+ .min_div = 1,
+ },
+ .ops = &shadow_n2_div_ops,
+ .c = {
+ .parent = &dsi0pll_shadow_post_n1_div_clk.c,
+ .dbg_name = "dsi0pll_shadow_n2_div_clk",
+ .ops = &shadow_n2_clk_src_ops,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi0pll_shadow_n2_div_clk.c),
+ },
+};
+
+static struct div_clk dsi1pll_n2_div_clk = {
+ .data = {
+ .max_div = 15,
+ .min_div = 1,
+ },
+ .ops = &n2_div_ops,
+ .c = {
+ .parent = &dsi1pll_post_n1_div_clk.c,
+ .dbg_name = "dsi1pll_n2_div_clk",
+ .ops = &n2_clk_src_ops,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi1pll_n2_div_clk.c),
+ },
+};
+
+static struct div_clk dsi1pll_shadow_n2_div_clk = {
+ .data = {
+ .max_div = 15,
+ .min_div = 1,
+ },
+ .ops = &shadow_n2_div_ops,
+ .c = {
+ .parent = &dsi1pll_shadow_post_n1_div_clk.c,
+ .dbg_name = "dsi1pll_shadow_n2_div_clk",
+ .ops = &shadow_n2_clk_src_ops,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi1pll_shadow_n2_div_clk.c),
+ },
+};
+
+static struct div_clk dsi0pll_pixel_clk_src = {
+ .data = {
+ .div = 2,
+ .min_div = 2,
+ .max_div = 2,
+ },
+ .c = {
+ .parent = &dsi0pll_n2_div_clk.c,
+ .dbg_name = "dsi0pll_pixel_clk_src",
+ .ops = &clk_ops_div,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi0pll_pixel_clk_src.c),
+ },
+};
+
+static struct div_clk dsi0pll_shadow_pixel_clk_src = {
+ .data = {
+ .div = 2,
+ .min_div = 2,
+ .max_div = 2,
+ },
+ .c = {
+ .parent = &dsi0pll_shadow_n2_div_clk.c,
+ .dbg_name = "dsi0pll_shadow_pixel_clk_src",
+ .ops = &clk_ops_div,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi0pll_shadow_pixel_clk_src.c),
+ },
+};
+
+static struct div_clk dsi1pll_pixel_clk_src = {
+ .data = {
+ .div = 2,
+ .min_div = 2,
+ .max_div = 2,
+ },
+ .c = {
+ .parent = &dsi1pll_n2_div_clk.c,
+ .dbg_name = "dsi1pll_pixel_clk_src",
+ .ops = &clk_ops_div,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi1pll_pixel_clk_src.c),
+ },
+};
+
+static struct div_clk dsi1pll_shadow_pixel_clk_src = {
+ .data = {
+ .div = 2,
+ .min_div = 2,
+ .max_div = 2,
+ },
+ .c = {
+ .parent = &dsi1pll_shadow_n2_div_clk.c,
+ .dbg_name = "dsi1pll_shadow_pixel_clk_src",
+ .ops = &clk_ops_div,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi1pll_shadow_pixel_clk_src.c),
+ },
+};
+
+static struct mux_clk dsi0pll_pixel_clk_mux = {
+ .num_parents = 2,
+ .parents = (struct clk_src[]) {
+ {&dsi0pll_pixel_clk_src.c, 0},
+ {&dsi0pll_shadow_pixel_clk_src.c, 1},
+ },
+ .ops = &mdss_pixel_mux_ops,
+ .c = {
+ .parent = &dsi0pll_pixel_clk_src.c,
+ .dbg_name = "dsi0pll_pixel_clk_mux",
+ .ops = &clk_ops_gen_mux_dsi,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi0pll_pixel_clk_mux.c),
+ }
+};
+
+static struct mux_clk dsi1pll_pixel_clk_mux = {
+ .num_parents = 2,
+ .parents = (struct clk_src[]) {
+ {&dsi1pll_pixel_clk_src.c, 0},
+ {&dsi1pll_shadow_pixel_clk_src.c, 1},
+ },
+ .ops = &mdss_pixel_mux_ops,
+ .c = {
+ .parent = &dsi1pll_pixel_clk_src.c,
+ .dbg_name = "dsi1pll_pixel_clk_mux",
+ .ops = &clk_ops_gen_mux_dsi,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi1pll_pixel_clk_mux.c),
+ }
+};
+
+static struct div_clk dsi0pll_byte_clk_src = {
+ .data = {
+ .div = 8,
+ .min_div = 8,
+ .max_div = 8,
+ },
+ .c = {
+ .parent = &dsi0pll_post_n1_div_clk.c,
+ .dbg_name = "dsi0pll_byte_clk_src",
+ .ops = &clk_ops_div,
+ CLK_INIT(dsi0pll_byte_clk_src.c),
+ },
+};
+
+static struct div_clk dsi0pll_shadow_byte_clk_src = {
+ .data = {
+ .div = 8,
+ .min_div = 8,
+ .max_div = 8,
+ },
+ .c = {
+ .parent = &dsi0pll_shadow_post_n1_div_clk.c,
+ .dbg_name = "dsi0pll_shadow_byte_clk_src",
+ .ops = &clk_ops_div,
+ CLK_INIT(dsi0pll_shadow_byte_clk_src.c),
+ },
+};
+
+static struct div_clk dsi1pll_byte_clk_src = {
+ .data = {
+ .div = 8,
+ .min_div = 8,
+ .max_div = 8,
+ },
+ .c = {
+ .parent = &dsi1pll_post_n1_div_clk.c,
+ .dbg_name = "dsi1pll_byte_clk_src",
+ .ops = &clk_ops_div,
+ CLK_INIT(dsi1pll_byte_clk_src.c),
+ },
+};
+
+static struct div_clk dsi1pll_shadow_byte_clk_src = {
+ .data = {
+ .div = 8,
+ .min_div = 8,
+ .max_div = 8,
+ },
+ .c = {
+ .parent = &dsi1pll_shadow_post_n1_div_clk.c,
+ .dbg_name = "dsi1pll_shadow_byte_clk_src",
+ .ops = &clk_ops_div,
+ CLK_INIT(dsi1pll_shadow_byte_clk_src.c),
+ },
+};
+
+static struct mux_clk dsi0pll_byte_clk_mux = {
+ .num_parents = 2,
+ .parents = (struct clk_src[]) {
+ {&dsi0pll_byte_clk_src.c, 0},
+ {&dsi0pll_shadow_byte_clk_src.c, 1},
+ },
+ .ops = &mdss_byte_mux_ops,
+ .c = {
+ .parent = &dsi0pll_byte_clk_src.c,
+ .dbg_name = "dsi0pll_byte_clk_mux",
+ .ops = &clk_ops_gen_mux_dsi,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi0pll_byte_clk_mux.c),
+ }
+};
+static struct mux_clk dsi1pll_byte_clk_mux = {
+ .num_parents = 2,
+ .parents = (struct clk_src[]) {
+ {&dsi1pll_byte_clk_src.c, 0},
+ {&dsi1pll_shadow_byte_clk_src.c, 1},
+ },
+ .ops = &mdss_byte_mux_ops,
+ .c = {
+ .parent = &dsi1pll_byte_clk_src.c,
+ .dbg_name = "dsi1pll_byte_clk_mux",
+ .ops = &clk_ops_gen_mux_dsi,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi1pll_byte_clk_mux.c),
+ }
+};
+
+static struct clk_lookup mdss_dsi_pllcc_8996[] = {
+ CLK_LIST(dsi0pll_byte_clk_mux),
+ CLK_LIST(dsi0pll_byte_clk_src),
+ CLK_LIST(dsi0pll_pixel_clk_mux),
+ CLK_LIST(dsi0pll_pixel_clk_src),
+ CLK_LIST(dsi0pll_n2_div_clk),
+ CLK_LIST(dsi0pll_post_n1_div_clk),
+ CLK_LIST(dsi0pll_vco_clk),
+ CLK_LIST(dsi0pll_shadow_byte_clk_src),
+ CLK_LIST(dsi0pll_shadow_pixel_clk_src),
+ CLK_LIST(dsi0pll_shadow_n2_div_clk),
+ CLK_LIST(dsi0pll_shadow_post_n1_div_clk),
+ CLK_LIST(dsi0pll_shadow_vco_clk),
+};
+
+static struct clk_lookup mdss_dsi_pllcc_8996_1[] = {
+ CLK_LIST(dsi1pll_byte_clk_mux),
+ CLK_LIST(dsi1pll_byte_clk_src),
+ CLK_LIST(dsi1pll_pixel_clk_mux),
+ CLK_LIST(dsi1pll_pixel_clk_src),
+ CLK_LIST(dsi1pll_n2_div_clk),
+ CLK_LIST(dsi1pll_post_n1_div_clk),
+ CLK_LIST(dsi1pll_vco_clk),
+ CLK_LIST(dsi1pll_shadow_byte_clk_src),
+ CLK_LIST(dsi1pll_shadow_pixel_clk_src),
+ CLK_LIST(dsi1pll_shadow_n2_div_clk),
+ CLK_LIST(dsi1pll_shadow_post_n1_div_clk),
+ CLK_LIST(dsi1pll_shadow_vco_clk),
+};
+
+int dsi_pll_clock_register_8996(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res)
+{
+ int rc = 0, ndx;
+ int const ssc_freq_default = 31500; /* default h/w recommended value */
+ int const ssc_ppm_default = 5000; /* default h/w recommended value */
+ struct dsi_pll_db *pdb;
+
+ if (!pdev || !pdev->dev.of_node) {
+ pr_err("Invalid input parameters\n");
+ return -EINVAL;
+ }
+
+ if (!pll_res || !pll_res->pll_base) {
+ pr_err("Invalid PLL resources\n");
+ return -EPROBE_DEFER;
+ }
+
+ if (pll_res->index >= DSI_PLL_NUM) {
+ pr_err("pll ndx=%d is NOT supported\n", pll_res->index);
+ return -EINVAL;
+ }
+
+ ndx = pll_res->index;
+ pdb = &pll_db[ndx];
+ pll_res->priv = pdb;
+ pdb->pll = pll_res;
+ ndx++;
+ ndx %= DSI_PLL_NUM;
+ pdb->next = &pll_db[ndx];
+
+ /* Set clock source operations */
+
+ /* hr_oclk3, pixel */
+ n2_clk_src_ops = clk_ops_slave_div;
+ n2_clk_src_ops.prepare = mdss_pll_div_prepare;
+
+ shadow_n2_clk_src_ops = clk_ops_slave_div;
+
+ /* hr_ockl2, byte, vco pll */
+ post_n1_div_clk_src_ops = clk_ops_div;
+ post_n1_div_clk_src_ops.prepare = mdss_pll_div_prepare;
+
+ shadow_post_n1_div_clk_src_ops = clk_ops_div;
+
+ byte_clk_src_ops = clk_ops_div;
+ byte_clk_src_ops.prepare = mdss_pll_div_prepare;
+
+ clk_ops_gen_mux_dsi = clk_ops_gen_mux;
+ clk_ops_gen_mux_dsi.round_rate = parent_round_rate;
+ clk_ops_gen_mux_dsi.set_rate = parent_set_rate;
+
+ if (pll_res->ssc_en) {
+ if (!pll_res->ssc_freq)
+ pll_res->ssc_freq = ssc_freq_default;
+ if (!pll_res->ssc_ppm)
+ pll_res->ssc_ppm = ssc_ppm_default;
+ }
+
+ /* Set client data to mux, div and vco clocks. */
+ if (pll_res->index == DSI_PLL_1) {
+ dsi1pll_byte_clk_src.priv = pll_res;
+ dsi1pll_pixel_clk_src.priv = pll_res;
+ dsi1pll_post_n1_div_clk.priv = pll_res;
+ dsi1pll_n2_div_clk.priv = pll_res;
+ dsi1pll_vco_clk.priv = pll_res;
+
+ dsi1pll_shadow_byte_clk_src.priv = pll_res;
+ dsi1pll_shadow_pixel_clk_src.priv = pll_res;
+ dsi1pll_shadow_post_n1_div_clk.priv = pll_res;
+ dsi1pll_shadow_n2_div_clk.priv = pll_res;
+ dsi1pll_shadow_vco_clk.priv = pll_res;
+
+ pll_res->vco_delay = VCO_DELAY_USEC;
+ rc = of_msm_clock_register(pdev->dev.of_node,
+ mdss_dsi_pllcc_8996_1,
+ ARRAY_SIZE(mdss_dsi_pllcc_8996_1));
+ } else {
+ dsi0pll_byte_clk_src.priv = pll_res;
+ dsi0pll_pixel_clk_src.priv = pll_res;
+ dsi0pll_post_n1_div_clk.priv = pll_res;
+ dsi0pll_n2_div_clk.priv = pll_res;
+ dsi0pll_vco_clk.priv = pll_res;
+
+ dsi0pll_shadow_byte_clk_src.priv = pll_res;
+ dsi0pll_shadow_pixel_clk_src.priv = pll_res;
+ dsi0pll_shadow_post_n1_div_clk.priv = pll_res;
+ dsi0pll_shadow_n2_div_clk.priv = pll_res;
+ dsi0pll_shadow_vco_clk.priv = pll_res;
+
+ pll_res->vco_delay = VCO_DELAY_USEC;
+ rc = of_msm_clock_register(pdev->dev.of_node,
+ mdss_dsi_pllcc_8996,
+ ARRAY_SIZE(mdss_dsi_pllcc_8996));
+ }
+
+ if (!rc) {
+ pr_info("Registered DSI PLL ndx=%d clocks successfully\n",
+ pll_res->index);
+ }
+
+ return rc;
+}
diff --git a/drivers/clk/msm/mdss/mdss-dsi-pll-8996.h b/drivers/clk/msm/mdss/mdss-dsi-pll-8996.h
new file mode 100644
index 000000000000..611e79101d4f
--- /dev/null
+++ b/drivers/clk/msm/mdss/mdss-dsi-pll-8996.h
@@ -0,0 +1,221 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MDSS_DSI_PLL_8996_H
+#define MDSS_DSI_PLL_8996_H
+
+#define DSIPHY_CMN_CLK_CFG0 0x0010
+#define DSIPHY_CMN_CLK_CFG1 0x0014
+#define DSIPHY_CMN_GLBL_TEST_CTRL 0x0018
+
+#define DSIPHY_CMN_PLL_CNTRL 0x0048
+#define DSIPHY_CMN_CTRL_0 0x001c
+#define DSIPHY_CMN_CTRL_1 0x0020
+
+#define DSIPHY_CMN_LDO_CNTRL 0x004c
+
+#define DSIPHY_PLL_IE_TRIM 0x0400
+#define DSIPHY_PLL_IP_TRIM 0x0404
+
+#define DSIPHY_PLL_IPTAT_TRIM 0x0410
+
+#define DSIPHY_PLL_CLKBUFLR_EN 0x041c
+
+#define DSIPHY_PLL_SYSCLK_EN_RESET 0x0428
+#define DSIPHY_PLL_RESETSM_CNTRL 0x042c
+#define DSIPHY_PLL_RESETSM_CNTRL2 0x0430
+#define DSIPHY_PLL_RESETSM_CNTRL3 0x0434
+#define DSIPHY_PLL_RESETSM_CNTRL4 0x0438
+#define DSIPHY_PLL_RESETSM_CNTRL5 0x043c
+#define DSIPHY_PLL_KVCO_DIV_REF1 0x0440
+#define DSIPHY_PLL_KVCO_DIV_REF2 0x0444
+#define DSIPHY_PLL_KVCO_COUNT1 0x0448
+#define DSIPHY_PLL_KVCO_COUNT2 0x044c
+#define DSIPHY_PLL_VREF_CFG1 0x045c
+
+#define DSIPHY_PLL_KVCO_CODE 0x0458
+
+#define DSIPHY_PLL_VCO_DIV_REF1 0x046c
+#define DSIPHY_PLL_VCO_DIV_REF2 0x0470
+#define DSIPHY_PLL_VCO_COUNT1 0x0474
+#define DSIPHY_PLL_VCO_COUNT2 0x0478
+#define DSIPHY_PLL_PLLLOCK_CMP1 0x047c
+#define DSIPHY_PLL_PLLLOCK_CMP2 0x0480
+#define DSIPHY_PLL_PLLLOCK_CMP3 0x0484
+#define DSIPHY_PLL_PLLLOCK_CMP_EN 0x0488
+#define DSIPHY_PLL_PLL_VCO_TUNE 0x048C
+#define DSIPHY_PLL_DEC_START 0x0490
+#define DSIPHY_PLL_SSC_EN_CENTER 0x0494
+#define DSIPHY_PLL_SSC_ADJ_PER1 0x0498
+#define DSIPHY_PLL_SSC_ADJ_PER2 0x049c
+#define DSIPHY_PLL_SSC_PER1 0x04a0
+#define DSIPHY_PLL_SSC_PER2 0x04a4
+#define DSIPHY_PLL_SSC_STEP_SIZE1 0x04a8
+#define DSIPHY_PLL_SSC_STEP_SIZE2 0x04ac
+#define DSIPHY_PLL_DIV_FRAC_START1 0x04b4
+#define DSIPHY_PLL_DIV_FRAC_START2 0x04b8
+#define DSIPHY_PLL_DIV_FRAC_START3 0x04bc
+#define DSIPHY_PLL_TXCLK_EN 0x04c0
+#define DSIPHY_PLL_PLL_CRCTRL 0x04c4
+
+#define DSIPHY_PLL_RESET_SM_READY_STATUS 0x04cc
+
+#define DSIPHY_PLL_PLL_MISC1 0x04e8
+
+#define DSIPHY_PLL_CP_SET_CUR 0x04f0
+#define DSIPHY_PLL_PLL_ICPMSET 0x04f4
+#define DSIPHY_PLL_PLL_ICPCSET 0x04f8
+#define DSIPHY_PLL_PLL_ICP_SET 0x04fc
+#define DSIPHY_PLL_PLL_LPF1 0x0500
+#define DSIPHY_PLL_PLL_LPF2_POSTDIV 0x0504
+#define DSIPHY_PLL_PLL_BANDGAP 0x0508
+
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL15 0x050
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL19 0x060
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL20 0x064
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL21 0x068
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL22 0x06C
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL23 0x070
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL24 0x074
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL25 0x078
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL26 0x07C
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL27 0x080
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL28 0x084
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL29 0x088
+#define DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR 0x094
+#define DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR2 0x098
+
+struct dsi_pll_input {
+ u32 fref; /* 19.2 Mhz, reference clk */
+ u32 fdata; /* bit clock rate */
+ u32 dsiclk_sel; /* 1, reg: 0x0014 */
+ u32 n2div; /* 1, reg: 0x0010, bit 4-7 */
+ u32 ssc_en; /* 1, reg: 0x0494, bit 0 */
+ u32 ldo_en; /* 0, reg: 0x004c, bit 0 */
+
+ /* fixed */
+ u32 refclk_dbler_en; /* 0, reg: 0x04c0, bit 1 */
+ u32 vco_measure_time; /* 5, unknown */
+ u32 kvco_measure_time; /* 5, unknown */
+ u32 bandgap_timer; /* 4, reg: 0x0430, bit 3 - 5 */
+ u32 pll_wakeup_timer; /* 5, reg: 0x043c, bit 0 - 2 */
+ u32 plllock_cnt; /* 1, reg: 0x0488, bit 1 - 2 */
+ u32 plllock_rng; /* 1, reg: 0x0488, bit 3 - 4 */
+ u32 ssc_center; /* 0, reg: 0x0494, bit 1 */
+ u32 ssc_adj_period; /* 37, reg: 0x498, bit 0 - 9 */
+ u32 ssc_spread; /* 0.005 */
+ u32 ssc_freq; /* unknown */
+ u32 pll_ie_trim; /* 4, reg: 0x0400 */
+ u32 pll_ip_trim; /* 4, reg: 0x0404 */
+ u32 pll_iptat_trim; /* reg: 0x0410 */
+ u32 pll_cpcset_cur; /* 1, reg: 0x04f0, bit 0 - 2 */
+ u32 pll_cpmset_cur; /* 1, reg: 0x04f0, bit 3 - 5 */
+
+ u32 pll_icpmset; /* 4, reg: 0x04fc, bit 3 - 5 */
+ u32 pll_icpcset; /* 4, reg: 0x04fc, bit 0 - 2 */
+
+ u32 pll_icpmset_p; /* 0, reg: 0x04f4, bit 0 - 2 */
+ u32 pll_icpmset_m; /* 0, reg: 0x04f4, bit 3 - 5 */
+
+ u32 pll_icpcset_p; /* 0, reg: 0x04f8, bit 0 - 2 */
+ u32 pll_icpcset_m; /* 0, reg: 0x04f8, bit 3 - 5 */
+
+ u32 pll_lpf_res1; /* 3, reg: 0x0504, bit 0 - 3 */
+ u32 pll_lpf_cap1; /* 11, reg: 0x0500, bit 0 - 3 */
+ u32 pll_lpf_cap2; /* 1, reg: 0x0500, bit 4 - 7 */
+ u32 pll_c3ctrl; /* 2, reg: 0x04c4 */
+ u32 pll_r3ctrl; /* 1, reg: 0x04c4 */
+};
+
+struct dsi_pll_output {
+ u32 pll_txclk_en; /* reg: 0x04c0 */
+ u32 dec_start; /* reg: 0x0490 */
+ u32 div_frac_start; /* reg: 0x04b4, 0x4b8, 0x04bc */
+ u32 ssc_period; /* reg: 0x04a0, 0x04a4 */
+ u32 ssc_step_size; /* reg: 0x04a8, 0x04ac */
+ u32 plllock_cmp; /* reg: 0x047c, 0x0480, 0x0484 */
+ u32 pll_vco_div_ref; /* reg: 0x046c, 0x0470 */
+ u32 pll_vco_count; /* reg: 0x0474, 0x0478 */
+ u32 pll_kvco_div_ref; /* reg: 0x0440, 0x0444 */
+ u32 pll_kvco_count; /* reg: 0x0448, 0x044c */
+ u32 pll_misc1; /* reg: 0x04e8 */
+ u32 pll_lpf2_postdiv; /* reg: 0x0504 */
+ u32 pll_resetsm_cntrl; /* reg: 0x042c */
+ u32 pll_resetsm_cntrl2; /* reg: 0x0430 */
+ u32 pll_resetsm_cntrl5; /* reg: 0x043c */
+ u32 pll_kvco_code; /* reg: 0x0458 */
+
+ u32 cmn_clk_cfg0; /* reg: 0x0010 */
+ u32 cmn_clk_cfg1; /* reg: 0x0014 */
+ u32 cmn_ldo_cntrl; /* reg: 0x004c */
+
+ u32 pll_postdiv; /* vco */
+ u32 pll_n1div; /* vco */
+ u32 pll_n2div; /* hr_oclk3, pixel */
+ u32 fcvo;
+};
+
+enum {
+ DSI_PLL_0,
+ DSI_PLL_1,
+ DSI_PLL_NUM
+};
+
+struct dsi_pll_db {
+ struct dsi_pll_db *next;
+ struct mdss_pll_resources *pll;
+ struct dsi_pll_input in;
+ struct dsi_pll_output out;
+ int source_setup_done;
+};
+
+enum {
+ PLL_OUTPUT_NONE,
+ PLL_OUTPUT_RIGHT,
+ PLL_OUTPUT_LEFT,
+ PLL_OUTPUT_BOTH
+};
+
+enum {
+ PLL_SOURCE_FROM_LEFT,
+ PLL_SOURCE_FROM_RIGHT
+};
+
+enum {
+ PLL_UNKNOWN,
+ PLL_STANDALONE,
+ PLL_SLAVE,
+ PLL_MASTER
+};
+
+int pll_vco_set_rate_8996(struct clk *c, unsigned long rate);
+long pll_vco_round_rate_8996(struct clk *c, unsigned long rate);
+enum handoff pll_vco_handoff_8996(struct clk *c);
+enum handoff shadow_pll_vco_handoff_8996(struct clk *c);
+int shadow_post_n1_div_set_div(struct div_clk *clk, int div);
+int shadow_post_n1_div_get_div(struct div_clk *clk);
+int shadow_n2_div_set_div(struct div_clk *clk, int div);
+int shadow_n2_div_get_div(struct div_clk *clk);
+int shadow_pll_vco_set_rate_8996(struct clk *c, unsigned long rate);
+int pll_vco_prepare_8996(struct clk *c);
+void pll_vco_unprepare_8996(struct clk *c);
+int set_mdss_byte_mux_sel_8996(struct mux_clk *clk, int sel);
+int get_mdss_byte_mux_sel_8996(struct mux_clk *clk);
+int set_mdss_pixel_mux_sel_8996(struct mux_clk *clk, int sel);
+int get_mdss_pixel_mux_sel_8996(struct mux_clk *clk);
+int post_n1_div_set_div(struct div_clk *clk, int div);
+int post_n1_div_get_div(struct div_clk *clk);
+int n2_div_set_div(struct div_clk *clk, int div);
+int n2_div_get_div(struct div_clk *clk);
+int dsi_pll_enable_seq_8996(struct mdss_pll_resources *pll);
+
+#endif /* MDSS_DSI_PLL_8996_H */
diff --git a/drivers/clk/msm/mdss/mdss-dsi-pll-cobalt.c b/drivers/clk/msm/mdss/mdss-dsi-pll-cobalt.c
new file mode 100644
index 000000000000..e6153553e48a
--- /dev/null
+++ b/drivers/clk/msm/mdss/mdss-dsi-pll-cobalt.c
@@ -0,0 +1,1305 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/iopoll.h>
+#include <linux/delay.h>
+#include <linux/clk/msm-clk-provider.h>
+#include <linux/clk/msm-clk.h>
+#include <linux/clk/msm-clock-generic.h>
+#include <dt-bindings/clock/msm-clocks-cobalt.h>
+
+#include "mdss-pll.h"
+#include "mdss-dsi-pll.h"
+#include "mdss-pll.h"
+
+#define VCO_DELAY_USEC 1
+
+#define MHZ_375 375000000UL
+#define MHZ_750 750000000UL
+#define MHZ_1500 1500000000UL
+#define MHZ_1900 1900000000UL
+#define MHZ_3000 3000000000UL
+
+/* Register Offsets from PLL base address */
+#define PLL_ANALOG_CONTROLS_ONE 0x000
+#define PLL_ANALOG_CONTROLS_TWO 0x004
+#define PLL_ANALOG_CONTROLS_THREE 0x010
+#define PLL_DSM_DIVIDER 0x01c
+#define PLL_FEEDBACK_DIVIDER 0x020
+#define PLL_SYSTEM_MUXES 0x024
+#define PLL_CMODE 0x02c
+#define PLL_CALIBRATION_SETTINGS 0x030
+#define PLL_BAND_SEL_CAL_SETTINGS_THREE 0x054
+#define PLL_FREQ_DETECT_SETTINGS_ONE 0x064
+#define PLL_OUTDIV 0x094
+#define PLL_CORE_OVERRIDE 0x0a4
+#define PLL_CORE_INPUT_OVERRIDE 0x0a8
+#define PLL_PLL_DIGITAL_TIMERS_TWO 0x0b4
+#define PLL_DECIMAL_DIV_START_1 0x0cc
+#define PLL_FRAC_DIV_START_LOW_1 0x0d0
+#define PLL_FRAC_DIV_START_MID_1 0x0d4
+#define PLL_FRAC_DIV_START_HIGH_1 0x0d8
+#define PLL_PLL_OUTDIV_RATE 0x140
+#define PLL_PLL_LOCKDET_RATE_1 0x144
+#define PLL_PLL_PROP_GAIN_RATE_1 0x14c
+#define PLL_PLL_BAND_SET_RATE_1 0x154
+#define PLL_PLL_INT_GAIN_IFILT_BAND_1 0x15c
+#define PLL_PLL_FL_INT_GAIN_PFILT_BAND_1 0x164
+#define PLL_PLL_LOCK_OVERRIDE 0x180
+#define PLL_PLL_LOCK_DELAY 0x184
+#define PLL_COMMON_STATUS_ONE 0x1a0
+
+/* Register Offsets from PHY base address */
+#define PHY_CMN_CLK_CFG0 0x010
+#define PHY_CMN_CLK_CFG1 0x014
+#define PHY_CMN_RBUF_CTRL 0x01c
+#define PHY_CMN_PLL_CNTRL 0x038
+#define PHY_CMN_CTRL_0 0x024
+
+enum {
+ DSI_PLL_0,
+ DSI_PLL_1,
+ DSI_PLL_MAX
+};
+
+struct dsi_pll_regs {
+ u32 pll_prop_gain_rate;
+ u32 pll_outdiv_rate;
+ u32 pll_lockdet_rate;
+ u32 decimal_div_start;
+ u32 frac_div_start_low;
+ u32 frac_div_start_mid;
+ u32 frac_div_start_high;
+};
+
+struct dsi_pll_config {
+ u32 ref_freq;
+ bool div_override;
+ u32 output_div;
+ bool ignore_frac;
+ bool disable_prescaler;
+ u32 dec_bits;
+ u32 frac_bits;
+ u32 lock_timer;
+ u32 ssc_freq;
+ u32 ssc_offset;
+ u32 ssc_adj_per;
+ u32 thresh_cycles;
+ u32 refclk_cycles;
+};
+
+struct dsi_pll_cobalt {
+ struct mdss_pll_resources *rsc;
+ struct dsi_pll_config pll_configuration;
+ struct dsi_pll_regs reg_setup;
+};
+
+static struct mdss_pll_resources *pll_rsc_db[DSI_PLL_MAX];
+static struct dsi_pll_cobalt plls[DSI_PLL_MAX];
+
+static void dsi_pll_config_slave(struct mdss_pll_resources *rsc)
+{
+ u32 reg;
+ struct mdss_pll_resources *orsc = pll_rsc_db[DSI_PLL_1];
+
+ if (!rsc)
+ return;
+
+ /* Only DSI PLL0 can act as a master */
+ if (rsc->index != DSI_PLL_0)
+ return;
+
+ /* default configuration: source is either internal or ref clock */
+ rsc->slave = NULL;
+
+ if (!orsc) {
+ pr_warn("slave PLL unavilable, assuming standalone config\n");
+ return;
+ }
+
+ /* check to see if the source of DSI1 PLL bitclk is set to external */
+ reg = MDSS_PLL_REG_R(orsc->phy_base, PHY_CMN_CLK_CFG1);
+ reg &= (BIT(2) | BIT(3));
+ if (reg == 0x04)
+ rsc->slave = pll_rsc_db[DSI_PLL_1]; /* external source */
+
+ pr_debug("Slave PLL %s\n", rsc->slave ? "configured" : "absent");
+}
+
+static void dsi_pll_setup_config(struct dsi_pll_cobalt *pll,
+ struct mdss_pll_resources *rsc)
+{
+ struct dsi_pll_config *config = &pll->pll_configuration;
+
+ config->ref_freq = 19200000;
+ config->output_div = 1;
+ config->dec_bits = 8;
+ config->frac_bits = 18;
+ config->lock_timer = 64;
+ config->ssc_freq = 31500;
+ config->ssc_offset = 4800;
+ config->ssc_adj_per = 2;
+ config->thresh_cycles = 32;
+ config->refclk_cycles = 256;
+
+ config->div_override = false;
+ config->ignore_frac = false;
+ config->disable_prescaler = false;
+
+ dsi_pll_config_slave(rsc);
+}
+
+static void dsi_pll_calc_dec_frac(struct dsi_pll_cobalt *pll,
+ struct mdss_pll_resources *rsc)
+{
+ struct dsi_pll_config *config = &pll->pll_configuration;
+ struct dsi_pll_regs *regs = &pll->reg_setup;
+ u64 target_freq;
+ u64 fref = rsc->vco_ref_clk_rate;
+ u32 computed_output_div, div_log;
+ u64 pll_freq;
+ u64 divider;
+ u64 dec, dec_multiple;
+ u32 frac;
+ u64 multiplier;
+
+ target_freq = rsc->vco_current_rate;
+ pr_debug("target_freq = %llu\n", target_freq);
+
+ if (config->div_override) {
+ computed_output_div = config->output_div;
+ } else {
+ if (target_freq < MHZ_375) {
+ computed_output_div = 8;
+ div_log = 3;
+ } else if (target_freq < MHZ_750) {
+ computed_output_div = 4;
+ div_log = 2;
+ } else if (target_freq < MHZ_1500) {
+ computed_output_div = 2;
+ div_log = 1;
+ } else {
+ computed_output_div = 1;
+ div_log = 0;
+ }
+ }
+ pr_debug("computed_output_div = %d\n", computed_output_div);
+
+ pll_freq = target_freq * computed_output_div;
+
+ if (config->disable_prescaler)
+ divider = fref;
+ else
+ divider = fref * 2;
+
+ multiplier = 1 << config->frac_bits;
+ dec_multiple = div_u64(pll_freq * multiplier, divider);
+ div_u64_rem(dec_multiple, multiplier, &frac);
+
+ dec = div_u64(dec_multiple, multiplier);
+
+ if (pll_freq <= MHZ_1900)
+ regs->pll_prop_gain_rate = 8;
+ else if (pll_freq <= MHZ_3000)
+ regs->pll_prop_gain_rate = 10;
+ else
+ regs->pll_prop_gain_rate = 12;
+
+ regs->pll_outdiv_rate = div_log;
+ regs->pll_lockdet_rate = config->lock_timer;
+ regs->decimal_div_start = dec;
+ regs->frac_div_start_low = (frac & 0xff);
+ regs->frac_div_start_mid = (frac & 0xff00) >> 8;
+ regs->frac_div_start_high = (frac & 0x30000) >> 16;
+}
+
+static void dsi_pll_config_hzindep_reg(struct dsi_pll_cobalt *pll,
+ struct mdss_pll_resources *rsc)
+{
+ void __iomem *pll_base = rsc->pll_base;
+
+ MDSS_PLL_REG_W(pll_base, PLL_ANALOG_CONTROLS_ONE, 0x80);
+ MDSS_PLL_REG_W(pll_base, PLL_ANALOG_CONTROLS_TWO, 0x03);
+ MDSS_PLL_REG_W(pll_base, PLL_ANALOG_CONTROLS_THREE, 0x00);
+ MDSS_PLL_REG_W(pll_base, PLL_DSM_DIVIDER, 0x00);
+ MDSS_PLL_REG_W(pll_base, PLL_FEEDBACK_DIVIDER, 0x4e);
+ MDSS_PLL_REG_W(pll_base, PLL_CMODE, 0x00);
+ MDSS_PLL_REG_W(pll_base, PLL_CALIBRATION_SETTINGS, 0x40);
+ MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_CAL_SETTINGS_THREE, 0xba);
+ MDSS_PLL_REG_W(pll_base, PLL_FREQ_DETECT_SETTINGS_ONE, 0x0c);
+ MDSS_PLL_REG_W(pll_base, PLL_OUTDIV, 0x00);
+ MDSS_PLL_REG_W(pll_base, PLL_CORE_OVERRIDE, 0x00);
+ MDSS_PLL_REG_W(pll_base, PLL_PLL_DIGITAL_TIMERS_TWO, 0x08);
+ MDSS_PLL_REG_W(pll_base, PLL_PLL_PROP_GAIN_RATE_1, 0x08);
+ MDSS_PLL_REG_W(pll_base, PLL_PLL_BAND_SET_RATE_1, 0xc0);
+ MDSS_PLL_REG_W(pll_base, PLL_PLL_INT_GAIN_IFILT_BAND_1, 0x82);
+ MDSS_PLL_REG_W(pll_base, PLL_PLL_FL_INT_GAIN_PFILT_BAND_1, 0x4c);
+ MDSS_PLL_REG_W(pll_base, PLL_PLL_LOCK_OVERRIDE, 0x80);
+}
+
+static void dsi_pll_commit(struct dsi_pll_cobalt *pll,
+ struct mdss_pll_resources *rsc)
+{
+ void __iomem *pll_base = rsc->pll_base;
+ struct dsi_pll_regs *reg = &pll->reg_setup;
+
+ MDSS_PLL_REG_W(pll_base, PLL_CORE_INPUT_OVERRIDE, 0x12);
+ MDSS_PLL_REG_W(pll_base, PLL_DECIMAL_DIV_START_1,
+ reg->decimal_div_start);
+ MDSS_PLL_REG_W(pll_base, PLL_FRAC_DIV_START_LOW_1,
+ reg->frac_div_start_low);
+ MDSS_PLL_REG_W(pll_base, PLL_FRAC_DIV_START_MID_1,
+ reg->frac_div_start_mid);
+ MDSS_PLL_REG_W(pll_base, PLL_FRAC_DIV_START_HIGH_1,
+ reg->frac_div_start_high);
+ MDSS_PLL_REG_W(pll_base, PLL_PLL_LOCKDET_RATE_1, 0xc8);
+ MDSS_PLL_REG_W(pll_base, PLL_PLL_OUTDIV_RATE, reg->pll_outdiv_rate);
+ MDSS_PLL_REG_W(pll_base, PLL_PLL_LOCK_DELAY, 0x0a);
+
+ /* flush, ensure all register writes are done*/
+ wmb();
+}
+
+static int vco_cobalt_set_rate(struct clk *c, unsigned long rate)
+{
+ int rc;
+ struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+ struct mdss_pll_resources *rsc = vco->priv;
+ struct dsi_pll_cobalt *pll;
+
+ if (!rsc) {
+ pr_err("pll resource not found\n");
+ return -EINVAL;
+ }
+
+ if (rsc->pll_on)
+ return 0;
+
+ pll = rsc->priv;
+ if (!pll) {
+ pr_err("pll configuration not found\n");
+ return -EINVAL;
+ }
+
+ pr_debug("ndx=%d, rate=%lu\n", rsc->index, rate);
+
+ rsc->vco_current_rate = rate;
+ rsc->vco_ref_clk_rate = vco->ref_clk_rate;
+
+ rc = mdss_pll_resource_enable(rsc, true);
+ if (rc) {
+ pr_err("failed to enable mdss dsi pll(%d), rc=%d\n",
+ rsc->index, rc);
+ return rc;
+ }
+
+ dsi_pll_setup_config(pll, rsc);
+
+ dsi_pll_calc_dec_frac(pll, rsc);
+
+ dsi_pll_config_hzindep_reg(pll, rsc);
+
+ /* todo: ssc configuration */
+
+ dsi_pll_commit(pll, rsc);
+
+ mdss_pll_resource_enable(rsc, false);
+
+ return 0;
+}
+
+static int dsi_pll_cobalt_lock_status(struct mdss_pll_resources *pll)
+{
+ int rc;
+ u32 status;
+ u32 const delay_us = 100;
+ u32 const timeout_us = 5000;
+
+ rc = readl_poll_timeout_atomic(pll->pll_base + PLL_COMMON_STATUS_ONE,
+ status,
+ ((status & BIT(0)) > 0),
+ delay_us,
+ timeout_us);
+ if (rc)
+ pr_err("DSI PLL(%d) lock failed, status=0x%08x\n",
+ pll->index, status);
+
+ return rc;
+}
+
+static void dsi_pll_disable_pll_bias(struct mdss_pll_resources *rsc)
+{
+ u32 data = MDSS_PLL_REG_R(rsc->phy_base, PHY_CMN_CTRL_0);
+
+ MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_CTRL_0, data & ~BIT(5));
+ MDSS_PLL_REG_W(rsc->pll_base, PLL_SYSTEM_MUXES, 0);
+ ndelay(250);
+}
+
+static void dsi_pll_enable_pll_bias(struct mdss_pll_resources *rsc)
+{
+ u32 data = MDSS_PLL_REG_R(rsc->phy_base, PHY_CMN_CTRL_0);
+
+ MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_CTRL_0, data | BIT(5));
+ MDSS_PLL_REG_W(rsc->pll_base, PLL_SYSTEM_MUXES, 0xc0);
+ ndelay(250);
+}
+
+static int dsi_pll_enable(struct dsi_pll_vco_clk *vco)
+{
+ int rc;
+ struct mdss_pll_resources *rsc = vco->priv;
+
+ dsi_pll_enable_pll_bias(rsc);
+ if (rsc->slave)
+ dsi_pll_enable_pll_bias(rsc->slave);
+
+ /* Start PLL */
+ MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_PLL_CNTRL, 0x01);
+
+ /*
+ * ensure all PLL configurations are written prior to checking
+ * for PLL lock.
+ */
+ wmb();
+
+ /* Check for PLL lock */
+ rc = dsi_pll_cobalt_lock_status(rsc);
+ if (rc) {
+ pr_err("PLL(%d) lock failed\n", rsc->index);
+ goto error;
+ }
+
+ rsc->pll_on = true;
+ MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_RBUF_CTRL, 0x01);
+ if (rsc->slave)
+ MDSS_PLL_REG_W(rsc->slave->phy_base, PHY_CMN_RBUF_CTRL, 0x01);
+
+error:
+ return rc;
+}
+
+static void dsi_pll_disable_sub(struct mdss_pll_resources *rsc)
+{
+ dsi_pll_disable_pll_bias(rsc);
+ MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_RBUF_CTRL, 0);
+}
+
+static void dsi_pll_disable(struct dsi_pll_vco_clk *vco)
+{
+ struct mdss_pll_resources *rsc = vco->priv;
+
+ if (!rsc->pll_on &&
+ mdss_pll_resource_enable(rsc, true)) {
+ pr_err("failed to enable pll (%d) resources\n", rsc->index);
+ return;
+ }
+
+ rsc->handoff_resources = false;
+
+ pr_debug("stop PLL (%d)\n", rsc->index);
+
+ MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_PLL_CNTRL, 0);
+ dsi_pll_disable_sub(rsc);
+ if (rsc->slave)
+ dsi_pll_disable_sub(rsc->slave);
+
+ /* flush, ensure all register writes are done*/
+ wmb();
+ rsc->pll_on = false;
+}
+
+static void vco_cobalt_unprepare(struct clk *c)
+{
+ struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+ struct mdss_pll_resources *pll = vco->priv;
+
+ if (!pll) {
+ pr_err("dsi pll resources not available\n");
+ return;
+ }
+
+ pll->vco_cached_rate = c->rate;
+ dsi_pll_disable(vco);
+ mdss_pll_resource_enable(pll, false);
+}
+
+static int vco_cobalt_prepare(struct clk *c)
+{
+ int rc = 0;
+ struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+ struct mdss_pll_resources *pll = vco->priv;
+
+ if (!pll) {
+ pr_err("dsi pll resources are not available\n");
+ return -EINVAL;
+ }
+
+ rc = mdss_pll_resource_enable(pll, true);
+ if (rc) {
+ pr_err("failed to enable pll (%d) resource, rc=%d\n",
+ pll->index, rc);
+ return rc;
+ }
+
+ if ((pll->vco_cached_rate != 0) &&
+ (pll->vco_cached_rate == c->rate)) {
+ rc = c->ops->set_rate(c, pll->vco_cached_rate);
+ if (rc) {
+ pr_err("pll(%d) set_rate failed, rc=%d\n",
+ pll->index, rc);
+ mdss_pll_resource_enable(pll, false);
+ return rc;
+ }
+ }
+
+ rc = dsi_pll_enable(vco);
+ if (rc) {
+ mdss_pll_resource_enable(pll, false);
+ pr_err("pll(%d) enable failed, rc=%d\n", pll->index, rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+static unsigned long dsi_pll_get_vco_rate(struct clk *c)
+{
+ struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+ struct mdss_pll_resources *pll = vco->priv;
+ int rc;
+ u64 ref_clk = vco->ref_clk_rate;
+ u64 vco_rate;
+ u64 multiplier;
+ u32 frac;
+ u32 dec;
+ u32 outdiv;
+ u64 pll_freq, tmp64;
+
+ rc = mdss_pll_resource_enable(pll, true);
+ if (rc) {
+ pr_err("failed to enable pll(%d) resource, rc=%d\n",
+ pll->index, rc);
+ return 0;
+ }
+
+ dec = MDSS_PLL_REG_R(pll->pll_base, PLL_DECIMAL_DIV_START_1);
+ dec &= 0xFF;
+
+ frac = MDSS_PLL_REG_R(pll->pll_base, PLL_FRAC_DIV_START_LOW_1);
+ frac |= ((MDSS_PLL_REG_R(pll->pll_base, PLL_FRAC_DIV_START_MID_1) &
+ 0xFF) <<
+ 8);
+ frac |= ((MDSS_PLL_REG_R(pll->pll_base, PLL_FRAC_DIV_START_HIGH_1) &
+ 0x3) <<
+ 16);
+
+ /* OUTDIV_1:0 field is (log(outdiv, 2)) */
+ outdiv = MDSS_PLL_REG_R(pll->pll_base, PLL_OUTDIV);
+ outdiv &= 0x3;
+ outdiv = 1 << outdiv;
+
+ /*
+ * TODO:
+ * 1. Assumes prescaler is disabled
+ * 2. Multiplier is 2^18. it should be 2^(num_of_frac_bits)
+ **/
+ multiplier = 1 << 18;
+ pll_freq = dec * (ref_clk * 2);
+ tmp64 = (ref_clk * 2 * frac);
+ pll_freq += do_div(tmp64, multiplier);
+
+ vco_rate = do_div(pll_freq, outdiv);
+
+ pr_debug("dec=0x%x\n, frac=0x%x, outdiv=%d, vco=%lu\n",
+ dec, frac, outdiv, (unsigned long)vco_rate);
+
+ (void)mdss_pll_resource_enable(pll, false);
+
+ return (unsigned long)vco_rate;
+}
+
+enum handoff vco_cobalt_handoff(struct clk *c)
+{
+ enum handoff ret = HANDOFF_DISABLED_CLK;
+ int rc;
+ struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+ struct mdss_pll_resources *pll = vco->priv;
+ u32 status;
+
+ if (!pll) {
+ pr_err("Unable to find pll resource\n");
+ return HANDOFF_DISABLED_CLK;
+ }
+
+ rc = mdss_pll_resource_enable(pll, true);
+ if (rc) {
+ pr_err("failed to enable pll(%d) resources, rc=%d\n",
+ pll->index, rc);
+ return ret;
+ }
+
+ status = MDSS_PLL_REG_R(pll->pll_base, PLL_COMMON_STATUS_ONE);
+ if (status & BIT(0)) {
+ pll->handoff_resources = true;
+ pll->pll_on = true;
+ c->rate = dsi_pll_get_vco_rate(c);
+ ret = HANDOFF_ENABLED_CLK;
+ } else {
+ (void)mdss_pll_resource_enable(pll, false);
+ ret = HANDOFF_DISABLED_CLK;
+ }
+
+ return ret;
+}
+
+static int pixel_clk_get_div(struct div_clk *clk)
+{
+ int rc;
+ struct mdss_pll_resources *pll = clk->priv;
+ u32 reg_val;
+ int div;
+
+ rc = mdss_pll_resource_enable(pll, true);
+ if (rc) {
+ pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+ return rc;
+ }
+
+ reg_val = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG0);
+ div = (reg_val & 0xF0) >> 4;
+
+ (void)mdss_pll_resource_enable(pll, false);
+
+ return div;
+}
+
+static void pixel_clk_set_div_sub(struct mdss_pll_resources *pll, int div)
+{
+ u32 reg_val;
+
+ reg_val = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG0);
+ reg_val &= ~0xF0;
+ reg_val |= (div << 4);
+ MDSS_PLL_REG_W(pll->phy_base, PHY_CMN_CLK_CFG0, reg_val);
+}
+
+static int pixel_clk_set_div(struct div_clk *clk, int div)
+{
+ int rc;
+ struct mdss_pll_resources *pll = clk->priv;
+
+ rc = mdss_pll_resource_enable(pll, true);
+ if (rc) {
+ pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+ return rc;
+ }
+
+ pixel_clk_set_div_sub(pll, div);
+ if (pll->slave)
+ pixel_clk_set_div_sub(pll->slave, div);
+
+ (void)mdss_pll_resource_enable(pll, false);
+
+ return 0;
+}
+
+static int bit_clk_get_div(struct div_clk *clk)
+{
+ int rc;
+ struct mdss_pll_resources *pll = clk->priv;
+ u32 reg_val;
+ int div;
+
+ rc = mdss_pll_resource_enable(pll, true);
+ if (rc) {
+ pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+ return rc;
+ }
+
+ reg_val = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG0);
+ div = (reg_val & 0x0F);
+
+ (void)mdss_pll_resource_enable(pll, false);
+
+ return div;
+}
+
+static void bit_clk_set_div_sub(struct mdss_pll_resources *rsc, int div)
+{
+ u32 reg_val;
+
+ reg_val = MDSS_PLL_REG_R(rsc->phy_base, PHY_CMN_CLK_CFG0);
+ reg_val &= ~0x0F;
+ reg_val |= div;
+ MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_CLK_CFG0, reg_val);
+}
+
+static int bit_clk_set_div(struct div_clk *clk, int div)
+{
+ int rc;
+ struct mdss_pll_resources *rsc = clk->priv;
+ struct dsi_pll_cobalt *pll;
+
+ if (!rsc) {
+ pr_err("pll resource not found\n");
+ return -EINVAL;
+ }
+
+ pll = rsc->priv;
+ if (!pll) {
+ pr_err("pll configuration not found\n");
+ return -EINVAL;
+ }
+
+ rc = mdss_pll_resource_enable(rsc, true);
+ if (rc) {
+ pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+ return rc;
+ }
+
+ bit_clk_set_div_sub(rsc, div);
+ /* For slave PLL, this divider always should be set to 1 */
+ if (rsc->slave)
+ bit_clk_set_div_sub(rsc->slave, 1);
+
+ (void)mdss_pll_resource_enable(rsc, false);
+
+ return rc;
+}
+
+static int post_vco_clk_get_div(struct div_clk *clk)
+{
+ int rc;
+ struct mdss_pll_resources *pll = clk->priv;
+ u32 reg_val;
+ int div;
+
+ rc = mdss_pll_resource_enable(pll, true);
+ if (rc) {
+ pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+ return rc;
+ }
+
+ reg_val = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG1);
+ reg_val &= 0x3;
+
+ if (reg_val == 2)
+ div = 1;
+ else if (reg_val == 3)
+ div = 4;
+ else
+ div = 1;
+
+ (void)mdss_pll_resource_enable(pll, false);
+
+ return div;
+}
+
+static int post_vco_clk_set_div_sub(struct mdss_pll_resources *pll, int div)
+{
+ u32 reg_val;
+ int rc = 0;
+
+ reg_val = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG1);
+ reg_val &= ~0x03;
+ if (div == 1) {
+ reg_val |= 0x2;
+ } else if (div == 4) {
+ reg_val |= 0x3;
+ } else {
+ rc = -EINVAL;
+ pr_err("unsupported divider %d\n", div);
+ goto error;
+ }
+
+ MDSS_PLL_REG_W(pll->phy_base, PHY_CMN_CLK_CFG1, reg_val);
+
+error:
+ return rc;
+}
+
+static int post_vco_clk_set_div(struct div_clk *clk, int div)
+{
+ int rc = 0;
+ struct mdss_pll_resources *pll = clk->priv;
+
+ rc = mdss_pll_resource_enable(pll, true);
+ if (rc) {
+ pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = post_vco_clk_set_div_sub(pll, div);
+ if (!rc && pll->slave)
+ rc = post_vco_clk_set_div_sub(pll->slave, div);
+
+ (void)mdss_pll_resource_enable(pll, false);
+
+ return rc;
+}
+
+static int post_bit_clk_get_div(struct div_clk *clk)
+{
+ int rc;
+ struct mdss_pll_resources *pll = clk->priv;
+ u32 reg_val;
+ int div;
+
+ rc = mdss_pll_resource_enable(pll, true);
+ if (rc) {
+ pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+ return rc;
+ }
+
+ reg_val = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG1);
+ reg_val &= 0x3;
+
+ if (reg_val == 0)
+ div = 1;
+ else if (reg_val == 1)
+ div = 2;
+ else
+ div = 1;
+
+ (void)mdss_pll_resource_enable(pll, false);
+
+ return div;
+}
+
+static int post_bit_clk_set_div_sub(struct mdss_pll_resources *pll, int div)
+{
+ int rc = 0;
+ u32 reg_val;
+
+ reg_val = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG1);
+ reg_val &= ~0x03;
+ if (div == 1) {
+ reg_val |= 0x0;
+ } else if (div == 2) {
+ reg_val |= 0x1;
+ } else {
+ rc = -EINVAL;
+ pr_err("unsupported divider %d\n", div);
+ goto error;
+ }
+
+ MDSS_PLL_REG_W(pll->phy_base, PHY_CMN_CLK_CFG1, reg_val);
+
+error:
+ return rc;
+}
+
+static int post_bit_clk_set_div(struct div_clk *clk, int div)
+{
+ int rc = 0;
+ struct mdss_pll_resources *pll = clk->priv;
+
+ rc = mdss_pll_resource_enable(pll, true);
+ if (rc) {
+ pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = post_bit_clk_set_div_sub(pll, div);
+ if (!rc && pll->slave)
+ rc = post_bit_clk_set_div_sub(pll->slave, div);
+
+ (void)mdss_pll_resource_enable(pll, false);
+
+ return rc;
+}
+
+long vco_cobalt_round_rate(struct clk *c, unsigned long rate)
+{
+ unsigned long rrate = rate;
+ struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+
+ if (rate < vco->min_rate)
+ rrate = vco->min_rate;
+ if (rate > vco->max_rate)
+ rrate = vco->max_rate;
+
+ return rrate;
+}
+
+/* clk ops that require runtime fixup */
+static struct clk_ops clk_ops_gen_mux_dsi;
+static struct clk_ops clk_ops_bitclk_src_c;
+static struct clk_ops clk_ops_post_vco_div_c;
+static struct clk_ops clk_ops_post_bit_div_c;
+static struct clk_ops clk_ops_pclk_src_c;
+
+static struct clk_div_ops clk_post_vco_div_ops = {
+ .set_div = post_vco_clk_set_div,
+ .get_div = post_vco_clk_get_div,
+};
+
+static struct clk_div_ops clk_post_bit_div_ops = {
+ .set_div = post_bit_clk_set_div,
+ .get_div = post_bit_clk_get_div,
+};
+
+static struct clk_div_ops pixel_clk_div_ops = {
+ .set_div = pixel_clk_set_div,
+ .get_div = pixel_clk_get_div,
+};
+
+static struct clk_div_ops clk_bitclk_src_ops = {
+ .set_div = bit_clk_set_div,
+ .get_div = bit_clk_get_div,
+};
+
+static struct clk_ops clk_ops_vco_cobalt = {
+ .set_rate = vco_cobalt_set_rate,
+ .round_rate = vco_cobalt_round_rate,
+ .handoff = vco_cobalt_handoff,
+ .prepare = vco_cobalt_prepare,
+ .unprepare = vco_cobalt_unprepare,
+};
+
+static struct clk_mux_ops mdss_mux_ops = {
+ .set_mux_sel = mdss_set_mux_sel,
+ .get_mux_sel = mdss_get_mux_sel,
+};
+
+/*
+ * Clock tree for generating DSI byte and pixel clocks.
+ *
+ *
+ * +---------------+
+ * | vco_clk |
+ * +-------+-------+
+ * |
+ * +--------------------------------------+
+ * | |
+ * +-------v-------+ |
+ * | bitclk_src | |
+ * | DIV(1..15) | |
+ * +-------+-------+ |
+ * | |
+ * +--------------------+ |
+ * Shadow Path | | |
+ * + +-------v-------+ +------v------+ +------v-------+
+ * | | byteclk_src | |post_bit_div | |post_vco_div |
+ * | | DIV(8) | |DIV(1,2) | |DIV(1,4) |
+ * | +-------+-------+ +------+------+ +------+-------+
+ * | | | |
+ * | | +------+ +----+
+ * | +--------+ | |
+ * | | +----v-----v------+
+ * +-v---------v----+ \ pclk_src_mux /
+ * \ byteclk_mux / \ /
+ * \ / +-----+-----+
+ * +----+-----+ | Shadow Path
+ * | | +
+ * v +-----v------+ |
+ * dsi_byte_clk | pclk_src | |
+ * | DIV(1..15) | |
+ * +-----+------+ |
+ * | |
+ * | |
+ * +--------+ |
+ * | |
+ * +---v----v----+
+ * \ pclk_mux /
+ * \ /
+ * +---+---+
+ * |
+ * |
+ * v
+ * dsi_pclk
+ *
+ */
+
+static struct dsi_pll_vco_clk dsi0pll_vco_clk = {
+ .ref_clk_rate = 19200000UL,
+ .min_rate = 1500000000UL,
+ .max_rate = 3500000000UL,
+ .c = {
+ .dbg_name = "dsi0pll_vco_clk",
+ .ops = &clk_ops_vco_cobalt,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi0pll_vco_clk.c),
+ },
+};
+
+static struct div_clk dsi0pll_bitclk_src = {
+ .data = {
+ .div = 1,
+ .min_div = 1,
+ .max_div = 15,
+ },
+ .ops = &clk_bitclk_src_ops,
+ .c = {
+ .parent = &dsi0pll_vco_clk.c,
+ .dbg_name = "dsi0pll_bitclk_src",
+ .ops = &clk_ops_bitclk_src_c,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi0pll_bitclk_src.c),
+ }
+};
+
+static struct div_clk dsi0pll_post_vco_div = {
+ .data = {
+ .div = 1,
+ .min_div = 1,
+ .max_div = 4,
+ },
+ .ops = &clk_post_vco_div_ops,
+ .c = {
+ .parent = &dsi0pll_vco_clk.c,
+ .dbg_name = "dsi0pll_post_vco_div",
+ .ops = &clk_ops_post_vco_div_c,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi0pll_post_vco_div.c),
+ }
+};
+
+static struct div_clk dsi0pll_post_bit_div = {
+ .data = {
+ .div = 1,
+ .min_div = 1,
+ .max_div = 2,
+ },
+ .ops = &clk_post_bit_div_ops,
+ .c = {
+ .parent = &dsi0pll_bitclk_src.c,
+ .dbg_name = "dsi0pll_post_bit_div",
+ .ops = &clk_ops_post_bit_div_c,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi0pll_post_bit_div.c),
+ }
+};
+
+static struct mux_clk dsi0pll_pclk_src_mux = {
+ .num_parents = 2,
+ .parents = (struct clk_src[]) {
+ {&dsi0pll_post_bit_div.c, 0},
+ {&dsi0pll_post_vco_div.c, 1},
+ },
+ .ops = &mdss_mux_ops,
+ .c = {
+ .parent = &dsi0pll_post_bit_div.c,
+ .dbg_name = "dsi0pll_pclk_src_mux",
+ .ops = &clk_ops_gen_mux,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi0pll_pclk_src_mux.c),
+ }
+};
+
+static struct div_clk dsi0pll_pclk_src = {
+ .data = {
+ .div = 1,
+ .min_div = 1,
+ .max_div = 15,
+ },
+ .ops = &pixel_clk_div_ops,
+ .c = {
+ .parent = &dsi0pll_pclk_src_mux.c,
+ .dbg_name = "dsi0pll_pclk_src",
+ .ops = &clk_ops_pclk_src_c,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi0pll_pclk_src.c),
+ },
+};
+
+static struct mux_clk dsi0pll_pclk_mux = {
+ .num_parents = 1,
+ .parents = (struct clk_src[]) {
+ {&dsi0pll_pclk_src.c, 0},
+ },
+ .ops = &mdss_mux_ops,
+ .c = {
+ .parent = &dsi0pll_pclk_src.c,
+ .dbg_name = "dsi0pll_pclk_mux",
+ .ops = &clk_ops_gen_mux_dsi,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi0pll_pclk_mux.c),
+ }
+};
+
+static struct div_clk dsi0pll_byteclk_src = {
+ .data = {
+ .div = 8,
+ .min_div = 8,
+ .max_div = 8,
+ },
+ .c = {
+ .parent = &dsi0pll_bitclk_src.c,
+ .dbg_name = "dsi0pll_byteclk_src",
+ .ops = &clk_ops_div,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi0pll_byteclk_src.c),
+ },
+};
+
+static struct mux_clk dsi0pll_byteclk_mux = {
+ .num_parents = 1,
+ .parents = (struct clk_src[]) {
+ {&dsi0pll_byteclk_src.c, 0},
+ },
+ .ops = &mdss_mux_ops,
+ .c = {
+ .parent = &dsi0pll_byteclk_src.c,
+ .dbg_name = "dsi0pll_byteclk_mux",
+ .ops = &clk_ops_gen_mux_dsi,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi0pll_byteclk_mux.c),
+ }
+};
+
+static struct dsi_pll_vco_clk dsi1pll_vco_clk = {
+ .ref_clk_rate = 19200000UL,
+ .min_rate = 1500000000UL,
+ .max_rate = 3500000000UL,
+ .c = {
+ .dbg_name = "dsi1pll_vco_clk",
+ .ops = &clk_ops_vco_cobalt,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi1pll_vco_clk.c),
+ },
+};
+
+static struct div_clk dsi1pll_bitclk_src = {
+ .data = {
+ .div = 1,
+ .min_div = 1,
+ .max_div = 15,
+ },
+ .ops = &clk_bitclk_src_ops,
+ .c = {
+ .parent = &dsi1pll_vco_clk.c,
+ .dbg_name = "dsi1pll_bitclk_src",
+ .ops = &clk_ops_bitclk_src_c,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi1pll_bitclk_src.c),
+ }
+};
+
+static struct div_clk dsi1pll_post_vco_div = {
+ .data = {
+ .div = 1,
+ .min_div = 1,
+ .max_div = 4,
+ },
+ .ops = &clk_post_vco_div_ops,
+ .c = {
+ .parent = &dsi1pll_vco_clk.c,
+ .dbg_name = "dsi1pll_post_vco_div",
+ .ops = &clk_ops_post_vco_div_c,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi1pll_post_vco_div.c),
+ }
+};
+
+static struct div_clk dsi1pll_post_bit_div = {
+ .data = {
+ .div = 1,
+ .min_div = 1,
+ .max_div = 2,
+ },
+ .ops = &clk_post_bit_div_ops,
+ .c = {
+ .parent = &dsi1pll_bitclk_src.c,
+ .dbg_name = "dsi1pll_post_bit_div",
+ .ops = &clk_ops_post_bit_div_c,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi1pll_post_bit_div.c),
+ }
+};
+
+static struct mux_clk dsi1pll_pclk_src_mux = {
+ .num_parents = 2,
+ .parents = (struct clk_src[]) {
+ {&dsi1pll_post_bit_div.c, 0},
+ {&dsi1pll_post_vco_div.c, 1},
+ },
+ .ops = &mdss_mux_ops,
+ .c = {
+ .parent = &dsi1pll_post_bit_div.c,
+ .dbg_name = "dsi1pll_pclk_src_mux",
+ .ops = &clk_ops_gen_mux,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi1pll_pclk_src_mux.c),
+ }
+};
+
+static struct div_clk dsi1pll_pclk_src = {
+ .data = {
+ .div = 1,
+ .min_div = 1,
+ .max_div = 15,
+ },
+ .ops = &pixel_clk_div_ops,
+ .c = {
+ .parent = &dsi1pll_pclk_src_mux.c,
+ .dbg_name = "dsi1pll_pclk_src",
+ .ops = &clk_ops_pclk_src_c,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi1pll_pclk_src.c),
+ },
+};
+
+static struct mux_clk dsi1pll_pclk_mux = {
+ .num_parents = 1,
+ .parents = (struct clk_src[]) {
+ {&dsi1pll_pclk_src.c, 0},
+ },
+ .ops = &mdss_mux_ops,
+ .c = {
+ .parent = &dsi1pll_pclk_src.c,
+ .dbg_name = "dsi1pll_pclk_mux",
+ .ops = &clk_ops_gen_mux_dsi,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi1pll_pclk_mux.c),
+ }
+};
+
+static struct div_clk dsi1pll_byteclk_src = {
+ .data = {
+ .div = 8,
+ .min_div = 8,
+ .max_div = 8,
+ },
+ .c = {
+ .parent = &dsi1pll_bitclk_src.c,
+ .dbg_name = "dsi1pll_byteclk_src",
+ .ops = &clk_ops_div,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi1pll_byteclk_src.c),
+ },
+};
+
+static struct mux_clk dsi1pll_byteclk_mux = {
+ .num_parents = 1,
+ .parents = (struct clk_src[]) {
+ {&dsi1pll_byteclk_src.c, 0},
+ },
+ .ops = &mdss_mux_ops,
+ .c = {
+ .parent = &dsi1pll_byteclk_src.c,
+ .dbg_name = "dsi1pll_byteclk_mux",
+ .ops = &clk_ops_gen_mux_dsi,
+ .flags = CLKFLAG_NO_RATE_CACHE,
+ CLK_INIT(dsi1pll_byteclk_mux.c),
+ }
+};
+
+static struct clk_lookup mdss_dsi_pll0cc_cobalt[] = {
+ CLK_LIST(dsi0pll_byteclk_mux),
+ CLK_LIST(dsi0pll_byteclk_src),
+ CLK_LIST(dsi0pll_pclk_mux),
+ CLK_LIST(dsi0pll_pclk_src),
+ CLK_LIST(dsi0pll_pclk_src_mux),
+ CLK_LIST(dsi0pll_post_bit_div),
+ CLK_LIST(dsi0pll_post_vco_div),
+ CLK_LIST(dsi0pll_bitclk_src),
+ CLK_LIST(dsi0pll_vco_clk),
+};
+static struct clk_lookup mdss_dsi_pll1cc_cobalt[] = {
+ CLK_LIST(dsi1pll_byteclk_mux),
+ CLK_LIST(dsi1pll_byteclk_src),
+ CLK_LIST(dsi1pll_pclk_mux),
+ CLK_LIST(dsi1pll_pclk_src),
+ CLK_LIST(dsi1pll_pclk_src_mux),
+ CLK_LIST(dsi1pll_post_bit_div),
+ CLK_LIST(dsi1pll_post_vco_div),
+ CLK_LIST(dsi1pll_bitclk_src),
+ CLK_LIST(dsi1pll_vco_clk),
+};
+
+int dsi_pll_clock_register_cobalt(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res)
+{
+ int rc = 0, ndx;
+
+ if (!pdev || !pdev->dev.of_node ||
+ !pll_res || !pll_res->pll_base || !pll_res->phy_base) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ ndx = pll_res->index;
+
+ if (ndx >= DSI_PLL_MAX) {
+ pr_err("pll index(%d) NOT supported\n", ndx);
+ return -EINVAL;
+ }
+
+ pll_rsc_db[ndx] = pll_res;
+ pll_res->priv = &plls[ndx];
+ plls[ndx].rsc = pll_res;
+
+ /* runtime fixup of all div and mux clock ops */
+ clk_ops_gen_mux_dsi = clk_ops_gen_mux;
+ clk_ops_gen_mux_dsi.round_rate = parent_round_rate;
+ clk_ops_gen_mux_dsi.set_rate = parent_set_rate;
+
+ clk_ops_bitclk_src_c = clk_ops_div;
+ clk_ops_bitclk_src_c.prepare = mdss_pll_div_prepare;
+
+ /*
+ * Set the ops for the two dividers in the pixel clock tree to the
+ * slave_div to ensure that a set rate on this divider clock will not
+ * be propagated to it's parent. This is needed ensure that when we set
+ * the rate for pixel clock, the vco is not reconfigured
+ */
+ clk_ops_post_vco_div_c = clk_ops_slave_div;
+ clk_ops_post_vco_div_c.prepare = mdss_pll_div_prepare;
+
+ clk_ops_post_bit_div_c = clk_ops_slave_div;
+ clk_ops_post_bit_div_c.prepare = mdss_pll_div_prepare;
+
+ clk_ops_pclk_src_c = clk_ops_div;
+ clk_ops_pclk_src_c.prepare = mdss_pll_div_prepare;
+
+ pll_res->vco_delay = VCO_DELAY_USEC;
+ if (ndx == 0) {
+ dsi0pll_byteclk_mux.priv = pll_res;
+ dsi0pll_byteclk_src.priv = pll_res;
+ dsi0pll_pclk_mux.priv = pll_res;
+ dsi0pll_pclk_src.priv = pll_res;
+ dsi0pll_pclk_src_mux.priv = pll_res;
+ dsi0pll_post_bit_div.priv = pll_res;
+ dsi0pll_post_vco_div.priv = pll_res;
+ dsi0pll_bitclk_src.priv = pll_res;
+ dsi0pll_vco_clk.priv = pll_res;
+
+ rc = of_msm_clock_register(pdev->dev.of_node,
+ mdss_dsi_pll0cc_cobalt,
+ ARRAY_SIZE(mdss_dsi_pll0cc_cobalt));
+ } else {
+ dsi1pll_byteclk_mux.priv = pll_res;
+ dsi1pll_byteclk_src.priv = pll_res;
+ dsi1pll_pclk_mux.priv = pll_res;
+ dsi1pll_pclk_src.priv = pll_res;
+ dsi1pll_pclk_src_mux.priv = pll_res;
+ dsi1pll_post_bit_div.priv = pll_res;
+ dsi1pll_post_vco_div.priv = pll_res;
+ dsi1pll_bitclk_src.priv = pll_res;
+ dsi1pll_vco_clk.priv = pll_res;
+
+ rc = of_msm_clock_register(pdev->dev.of_node,
+ mdss_dsi_pll1cc_cobalt,
+ ARRAY_SIZE(mdss_dsi_pll1cc_cobalt));
+ }
+ if (rc)
+ pr_err("dsi%dpll clock register failed, rc=%d\n", ndx, rc);
+
+ return rc;
+}
diff --git a/drivers/clk/msm/mdss/mdss-dsi-pll.h b/drivers/clk/msm/mdss/mdss-dsi-pll.h
new file mode 100644
index 000000000000..f88ae4d0eea1
--- /dev/null
+++ b/drivers/clk/msm/mdss/mdss-dsi-pll.h
@@ -0,0 +1,110 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MDSS_DSI_PLL_H
+#define __MDSS_DSI_PLL_H
+
+#define MAX_DSI_PLL_EN_SEQS 10
+
+#define DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG (0x0020)
+#define DSI_PHY_PLL_UNIPHY_PLL_LKDET_CFG2 (0x0064)
+#define DSI_PHY_PLL_UNIPHY_PLL_TEST_CFG (0x0068)
+#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG1 (0x0070)
+
+/* Register offsets for 20nm PHY PLL */
+#define MMSS_DSI_PHY_PLL_PLL_CNTRL (0x0014)
+#define MMSS_DSI_PHY_PLL_PLL_BKG_KVCO_CAL_EN (0x002C)
+#define MMSS_DSI_PHY_PLL_PLLLOCK_CMP_EN (0x009C)
+
+struct lpfr_cfg {
+ unsigned long vco_rate;
+ u32 r;
+};
+
+struct dsi_pll_vco_clk {
+ unsigned long ref_clk_rate;
+ unsigned long min_rate;
+ unsigned long max_rate;
+ u32 pll_en_seq_cnt;
+ struct lpfr_cfg *lpfr_lut;
+ u32 lpfr_lut_size;
+ void *priv;
+
+ struct clk c;
+
+ int (*pll_enable_seqs[MAX_DSI_PLL_EN_SEQS])
+ (struct mdss_pll_resources *dsi_pll_Res);
+};
+
+static inline struct dsi_pll_vco_clk *to_vco_clk(struct clk *clk)
+{
+ return container_of(clk, struct dsi_pll_vco_clk, c);
+}
+
+int dsi_pll_clock_register_hpm(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res);
+int dsi_pll_clock_register_20nm(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res);
+int dsi_pll_clock_register_lpm(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res);
+int dsi_pll_clock_register_8996(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res);
+int dsi_pll_clock_register_cobalt(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res);
+
+int set_byte_mux_sel(struct mux_clk *clk, int sel);
+int get_byte_mux_sel(struct mux_clk *clk);
+int dsi_pll_mux_prepare(struct clk *c);
+int fixed_4div_set_div(struct div_clk *clk, int div);
+int fixed_4div_get_div(struct div_clk *clk);
+int digital_set_div(struct div_clk *clk, int div);
+int digital_get_div(struct div_clk *clk);
+int analog_set_div(struct div_clk *clk, int div);
+int analog_get_div(struct div_clk *clk);
+int dsi_pll_lock_status(struct mdss_pll_resources *dsi_pll_res);
+int vco_set_rate(struct dsi_pll_vco_clk *vco, unsigned long rate);
+unsigned long vco_get_rate(struct clk *c);
+long vco_round_rate(struct clk *c, unsigned long rate);
+enum handoff vco_handoff(struct clk *c);
+int vco_prepare(struct clk *c);
+void vco_unprepare(struct clk *c);
+
+/* APIs for 20nm PHY PLL */
+int pll_20nm_vco_set_rate(struct dsi_pll_vco_clk *vco, unsigned long rate);
+int shadow_pll_20nm_vco_set_rate(struct dsi_pll_vco_clk *vco,
+ unsigned long rate);
+long pll_20nm_vco_round_rate(struct clk *c, unsigned long rate);
+enum handoff pll_20nm_vco_handoff(struct clk *c);
+int pll_20nm_vco_prepare(struct clk *c);
+void pll_20nm_vco_unprepare(struct clk *c);
+int pll_20nm_vco_enable_seq(struct mdss_pll_resources *dsi_pll_res);
+
+int set_bypass_lp_div_mux_sel(struct mux_clk *clk, int sel);
+int set_shadow_bypass_lp_div_mux_sel(struct mux_clk *clk, int sel);
+int get_bypass_lp_div_mux_sel(struct mux_clk *clk);
+int fixed_hr_oclk2_set_div(struct div_clk *clk, int div);
+int shadow_fixed_hr_oclk2_set_div(struct div_clk *clk, int div);
+int fixed_hr_oclk2_get_div(struct div_clk *clk);
+int hr_oclk3_set_div(struct div_clk *clk, int div);
+int shadow_hr_oclk3_set_div(struct div_clk *clk, int div);
+int hr_oclk3_get_div(struct div_clk *clk);
+int ndiv_set_div(struct div_clk *clk, int div);
+int shadow_ndiv_set_div(struct div_clk *clk, int div);
+int ndiv_get_div(struct div_clk *clk);
+void __dsi_pll_disable(void __iomem *pll_base);
+
+int set_mdss_pixel_mux_sel(struct mux_clk *clk, int sel);
+int get_mdss_pixel_mux_sel(struct mux_clk *clk);
+int set_mdss_byte_mux_sel(struct mux_clk *clk, int sel);
+int get_mdss_byte_mux_sel(struct mux_clk *clk);
+
+#endif
diff --git a/drivers/clk/msm/mdss/mdss-hdmi-pll-8996.c b/drivers/clk/msm/mdss/mdss-hdmi-pll-8996.c
new file mode 100644
index 000000000000..b6856f56db49
--- /dev/null
+++ b/drivers/clk/msm/mdss/mdss-hdmi-pll-8996.c
@@ -0,0 +1,2683 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/iopoll.h>
+#include <linux/clk/msm-clk-provider.h>
+#include <linux/clk/msm-clk.h>
+#include <linux/clk/msm-clock-generic.h>
+#include <dt-bindings/clock/msm-clocks-8996.h>
+
+#include "mdss-pll.h"
+#include "mdss-hdmi-pll.h"
+
+/* CONSTANTS */
+#define HDMI_BIT_CLK_TO_PIX_CLK_RATIO 10
+#define HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD 3400000000UL
+#define HDMI_DIG_FREQ_BIT_CLK_THRESHOLD 1500000000UL
+#define HDMI_MID_FREQ_BIT_CLK_THRESHOLD 750000000
+#define HDMI_CLKS_PLL_DIVSEL 0
+#define HDMI_CORECLK_DIV 5
+#define HDMI_REF_CLOCK 19200000
+#define HDMI_64B_ERR_VAL 0xFFFFFFFFFFFFFFFF
+#define HDMI_VERSION_8996_V1 1
+#define HDMI_VERSION_8996_V2 2
+#define HDMI_VERSION_8996_V3 3
+#define HDMI_VERSION_8996_V3_1_8 4
+
+#define HDMI_VCO_MAX_FREQ 12000000000
+#define HDMI_VCO_MIN_FREQ 8000000000
+#define HDMI_2400MHZ_BIT_CLK_HZ 2400000000UL
+#define HDMI_2250MHZ_BIT_CLK_HZ 2250000000UL
+#define HDMI_2000MHZ_BIT_CLK_HZ 2000000000UL
+#define HDMI_1700MHZ_BIT_CLK_HZ 1700000000UL
+#define HDMI_1200MHZ_BIT_CLK_HZ 1200000000UL
+#define HDMI_1334MHZ_BIT_CLK_HZ 1334000000UL
+#define HDMI_1000MHZ_BIT_CLK_HZ 1000000000UL
+#define HDMI_850MHZ_BIT_CLK_HZ 850000000
+#define HDMI_667MHZ_BIT_CLK_HZ 667000000
+#define HDMI_600MHZ_BIT_CLK_HZ 600000000
+#define HDMI_500MHZ_BIT_CLK_HZ 500000000
+#define HDMI_450MHZ_BIT_CLK_HZ 450000000
+#define HDMI_334MHZ_BIT_CLK_HZ 334000000
+#define HDMI_300MHZ_BIT_CLK_HZ 300000000
+#define HDMI_282MHZ_BIT_CLK_HZ 282000000
+#define HDMI_250MHZ_BIT_CLK_HZ 250000000
+#define HDMI_KHZ_TO_HZ 1000
+
+/* PLL REGISTERS */
+#define QSERDES_COM_ATB_SEL1 (0x000)
+#define QSERDES_COM_ATB_SEL2 (0x004)
+#define QSERDES_COM_FREQ_UPDATE (0x008)
+#define QSERDES_COM_BG_TIMER (0x00C)
+#define QSERDES_COM_SSC_EN_CENTER (0x010)
+#define QSERDES_COM_SSC_ADJ_PER1 (0x014)
+#define QSERDES_COM_SSC_ADJ_PER2 (0x018)
+#define QSERDES_COM_SSC_PER1 (0x01C)
+#define QSERDES_COM_SSC_PER2 (0x020)
+#define QSERDES_COM_SSC_STEP_SIZE1 (0x024)
+#define QSERDES_COM_SSC_STEP_SIZE2 (0x028)
+#define QSERDES_COM_POST_DIV (0x02C)
+#define QSERDES_COM_POST_DIV_MUX (0x030)
+#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN (0x034)
+#define QSERDES_COM_CLK_ENABLE1 (0x038)
+#define QSERDES_COM_SYS_CLK_CTRL (0x03C)
+#define QSERDES_COM_SYSCLK_BUF_ENABLE (0x040)
+#define QSERDES_COM_PLL_EN (0x044)
+#define QSERDES_COM_PLL_IVCO (0x048)
+#define QSERDES_COM_LOCK_CMP1_MODE0 (0x04C)
+#define QSERDES_COM_LOCK_CMP2_MODE0 (0x050)
+#define QSERDES_COM_LOCK_CMP3_MODE0 (0x054)
+#define QSERDES_COM_LOCK_CMP1_MODE1 (0x058)
+#define QSERDES_COM_LOCK_CMP2_MODE1 (0x05C)
+#define QSERDES_COM_LOCK_CMP3_MODE1 (0x060)
+#define QSERDES_COM_LOCK_CMP1_MODE2 (0x064)
+#define QSERDES_COM_CMN_RSVD0 (0x064)
+#define QSERDES_COM_LOCK_CMP2_MODE2 (0x068)
+#define QSERDES_COM_EP_CLOCK_DETECT_CTRL (0x068)
+#define QSERDES_COM_LOCK_CMP3_MODE2 (0x06C)
+#define QSERDES_COM_SYSCLK_DET_COMP_STATUS (0x06C)
+#define QSERDES_COM_BG_TRIM (0x070)
+#define QSERDES_COM_CLK_EP_DIV (0x074)
+#define QSERDES_COM_CP_CTRL_MODE0 (0x078)
+#define QSERDES_COM_CP_CTRL_MODE1 (0x07C)
+#define QSERDES_COM_CP_CTRL_MODE2 (0x080)
+#define QSERDES_COM_CMN_RSVD1 (0x080)
+#define QSERDES_COM_PLL_RCTRL_MODE0 (0x084)
+#define QSERDES_COM_PLL_RCTRL_MODE1 (0x088)
+#define QSERDES_COM_PLL_RCTRL_MODE2 (0x08C)
+#define QSERDES_COM_CMN_RSVD2 (0x08C)
+#define QSERDES_COM_PLL_CCTRL_MODE0 (0x090)
+#define QSERDES_COM_PLL_CCTRL_MODE1 (0x094)
+#define QSERDES_COM_PLL_CCTRL_MODE2 (0x098)
+#define QSERDES_COM_CMN_RSVD3 (0x098)
+#define QSERDES_COM_PLL_CNTRL (0x09C)
+#define QSERDES_COM_PHASE_SEL_CTRL (0x0A0)
+#define QSERDES_COM_PHASE_SEL_DC (0x0A4)
+#define QSERDES_COM_CORE_CLK_IN_SYNC_SEL (0x0A8)
+#define QSERDES_COM_BIAS_EN_CTRL_BY_PSM (0x0A8)
+#define QSERDES_COM_SYSCLK_EN_SEL (0x0AC)
+#define QSERDES_COM_CML_SYSCLK_SEL (0x0B0)
+#define QSERDES_COM_RESETSM_CNTRL (0x0B4)
+#define QSERDES_COM_RESETSM_CNTRL2 (0x0B8)
+#define QSERDES_COM_RESTRIM_CTRL (0x0BC)
+#define QSERDES_COM_RESTRIM_CTRL2 (0x0C0)
+#define QSERDES_COM_RESCODE_DIV_NUM (0x0C4)
+#define QSERDES_COM_LOCK_CMP_EN (0x0C8)
+#define QSERDES_COM_LOCK_CMP_CFG (0x0CC)
+#define QSERDES_COM_DEC_START_MODE0 (0x0D0)
+#define QSERDES_COM_DEC_START_MODE1 (0x0D4)
+#define QSERDES_COM_DEC_START_MODE2 (0x0D8)
+#define QSERDES_COM_VCOCAL_DEADMAN_CTRL (0x0D8)
+#define QSERDES_COM_DIV_FRAC_START1_MODE0 (0x0DC)
+#define QSERDES_COM_DIV_FRAC_START2_MODE0 (0x0E0)
+#define QSERDES_COM_DIV_FRAC_START3_MODE0 (0x0E4)
+#define QSERDES_COM_DIV_FRAC_START1_MODE1 (0x0E8)
+#define QSERDES_COM_DIV_FRAC_START2_MODE1 (0x0EC)
+#define QSERDES_COM_DIV_FRAC_START3_MODE1 (0x0F0)
+#define QSERDES_COM_DIV_FRAC_START1_MODE2 (0x0F4)
+#define QSERDES_COM_VCO_TUNE_MINVAL1 (0x0F4)
+#define QSERDES_COM_DIV_FRAC_START2_MODE2 (0x0F8)
+#define QSERDES_COM_VCO_TUNE_MINVAL2 (0x0F8)
+#define QSERDES_COM_DIV_FRAC_START3_MODE2 (0x0FC)
+#define QSERDES_COM_CMN_RSVD4 (0x0FC)
+#define QSERDES_COM_INTEGLOOP_INITVAL (0x100)
+#define QSERDES_COM_INTEGLOOP_EN (0x104)
+#define QSERDES_COM_INTEGLOOP_GAIN0_MODE0 (0x108)
+#define QSERDES_COM_INTEGLOOP_GAIN1_MODE0 (0x10C)
+#define QSERDES_COM_INTEGLOOP_GAIN0_MODE1 (0x110)
+#define QSERDES_COM_INTEGLOOP_GAIN1_MODE1 (0x114)
+#define QSERDES_COM_INTEGLOOP_GAIN0_MODE2 (0x118)
+#define QSERDES_COM_VCO_TUNE_MAXVAL1 (0x118)
+#define QSERDES_COM_INTEGLOOP_GAIN1_MODE2 (0x11C)
+#define QSERDES_COM_VCO_TUNE_MAXVAL2 (0x11C)
+#define QSERDES_COM_RES_TRIM_CONTROL2 (0x120)
+#define QSERDES_COM_VCO_TUNE_CTRL (0x124)
+#define QSERDES_COM_VCO_TUNE_MAP (0x128)
+#define QSERDES_COM_VCO_TUNE1_MODE0 (0x12C)
+#define QSERDES_COM_VCO_TUNE2_MODE0 (0x130)
+#define QSERDES_COM_VCO_TUNE1_MODE1 (0x134)
+#define QSERDES_COM_VCO_TUNE2_MODE1 (0x138)
+#define QSERDES_COM_VCO_TUNE1_MODE2 (0x13C)
+#define QSERDES_COM_VCO_TUNE_INITVAL1 (0x13C)
+#define QSERDES_COM_VCO_TUNE2_MODE2 (0x140)
+#define QSERDES_COM_VCO_TUNE_INITVAL2 (0x140)
+#define QSERDES_COM_VCO_TUNE_TIMER1 (0x144)
+#define QSERDES_COM_VCO_TUNE_TIMER2 (0x148)
+#define QSERDES_COM_SAR (0x14C)
+#define QSERDES_COM_SAR_CLK (0x150)
+#define QSERDES_COM_SAR_CODE_OUT_STATUS (0x154)
+#define QSERDES_COM_SAR_CODE_READY_STATUS (0x158)
+#define QSERDES_COM_CMN_STATUS (0x15C)
+#define QSERDES_COM_RESET_SM_STATUS (0x160)
+#define QSERDES_COM_RESTRIM_CODE_STATUS (0x164)
+#define QSERDES_COM_PLLCAL_CODE1_STATUS (0x168)
+#define QSERDES_COM_PLLCAL_CODE2_STATUS (0x16C)
+#define QSERDES_COM_BG_CTRL (0x170)
+#define QSERDES_COM_CLK_SELECT (0x174)
+#define QSERDES_COM_HSCLK_SEL (0x178)
+#define QSERDES_COM_INTEGLOOP_BINCODE_STATUS (0x17C)
+#define QSERDES_COM_PLL_ANALOG (0x180)
+#define QSERDES_COM_CORECLK_DIV (0x184)
+#define QSERDES_COM_SW_RESET (0x188)
+#define QSERDES_COM_CORE_CLK_EN (0x18C)
+#define QSERDES_COM_C_READY_STATUS (0x190)
+#define QSERDES_COM_CMN_CONFIG (0x194)
+#define QSERDES_COM_CMN_RATE_OVERRIDE (0x198)
+#define QSERDES_COM_SVS_MODE_CLK_SEL (0x19C)
+#define QSERDES_COM_DEBUG_BUS0 (0x1A0)
+#define QSERDES_COM_DEBUG_BUS1 (0x1A4)
+#define QSERDES_COM_DEBUG_BUS2 (0x1A8)
+#define QSERDES_COM_DEBUG_BUS3 (0x1AC)
+#define QSERDES_COM_DEBUG_BUS_SEL (0x1B0)
+#define QSERDES_COM_CMN_MISC1 (0x1B4)
+#define QSERDES_COM_CMN_MISC2 (0x1B8)
+#define QSERDES_COM_CORECLK_DIV_MODE1 (0x1BC)
+#define QSERDES_COM_CORECLK_DIV_MODE2 (0x1C0)
+#define QSERDES_COM_CMN_RSVD5 (0x1C0)
+
+/* Tx Channel base addresses */
+#define HDMI_TX_L0_BASE_OFFSET (0x400)
+#define HDMI_TX_L1_BASE_OFFSET (0x600)
+#define HDMI_TX_L2_BASE_OFFSET (0x800)
+#define HDMI_TX_L3_BASE_OFFSET (0xA00)
+
+/* Tx Channel PHY registers */
+#define QSERDES_TX_L0_BIST_MODE_LANENO (0x000)
+#define QSERDES_TX_L0_BIST_INVERT (0x004)
+#define QSERDES_TX_L0_CLKBUF_ENABLE (0x008)
+#define QSERDES_TX_L0_CMN_CONTROL_ONE (0x00C)
+#define QSERDES_TX_L0_CMN_CONTROL_TWO (0x010)
+#define QSERDES_TX_L0_CMN_CONTROL_THREE (0x014)
+#define QSERDES_TX_L0_TX_EMP_POST1_LVL (0x018)
+#define QSERDES_TX_L0_TX_POST2_EMPH (0x01C)
+#define QSERDES_TX_L0_TX_BOOST_LVL_UP_DN (0x020)
+#define QSERDES_TX_L0_HP_PD_ENABLES (0x024)
+#define QSERDES_TX_L0_TX_IDLE_LVL_LARGE_AMP (0x028)
+#define QSERDES_TX_L0_TX_DRV_LVL (0x02C)
+#define QSERDES_TX_L0_TX_DRV_LVL_OFFSET (0x030)
+#define QSERDES_TX_L0_RESET_TSYNC_EN (0x034)
+#define QSERDES_TX_L0_PRE_STALL_LDO_BOOST_EN (0x038)
+#define QSERDES_TX_L0_TX_BAND (0x03C)
+#define QSERDES_TX_L0_SLEW_CNTL (0x040)
+#define QSERDES_TX_L0_INTERFACE_SELECT (0x044)
+#define QSERDES_TX_L0_LPB_EN (0x048)
+#define QSERDES_TX_L0_RES_CODE_LANE_TX (0x04C)
+#define QSERDES_TX_L0_RES_CODE_LANE_RX (0x050)
+#define QSERDES_TX_L0_RES_CODE_LANE_OFFSET (0x054)
+#define QSERDES_TX_L0_PERL_LENGTH1 (0x058)
+#define QSERDES_TX_L0_PERL_LENGTH2 (0x05C)
+#define QSERDES_TX_L0_SERDES_BYP_EN_OUT (0x060)
+#define QSERDES_TX_L0_DEBUG_BUS_SEL (0x064)
+#define QSERDES_TX_L0_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN (0x068)
+#define QSERDES_TX_L0_TX_POL_INV (0x06C)
+#define QSERDES_TX_L0_PARRATE_REC_DETECT_IDLE_EN (0x070)
+#define QSERDES_TX_L0_BIST_PATTERN1 (0x074)
+#define QSERDES_TX_L0_BIST_PATTERN2 (0x078)
+#define QSERDES_TX_L0_BIST_PATTERN3 (0x07C)
+#define QSERDES_TX_L0_BIST_PATTERN4 (0x080)
+#define QSERDES_TX_L0_BIST_PATTERN5 (0x084)
+#define QSERDES_TX_L0_BIST_PATTERN6 (0x088)
+#define QSERDES_TX_L0_BIST_PATTERN7 (0x08C)
+#define QSERDES_TX_L0_BIST_PATTERN8 (0x090)
+#define QSERDES_TX_L0_LANE_MODE (0x094)
+#define QSERDES_TX_L0_IDAC_CAL_LANE_MODE (0x098)
+#define QSERDES_TX_L0_IDAC_CAL_LANE_MODE_CONFIGURATION (0x09C)
+#define QSERDES_TX_L0_ATB_SEL1 (0x0A0)
+#define QSERDES_TX_L0_ATB_SEL2 (0x0A4)
+#define QSERDES_TX_L0_RCV_DETECT_LVL (0x0A8)
+#define QSERDES_TX_L0_RCV_DETECT_LVL_2 (0x0AC)
+#define QSERDES_TX_L0_PRBS_SEED1 (0x0B0)
+#define QSERDES_TX_L0_PRBS_SEED2 (0x0B4)
+#define QSERDES_TX_L0_PRBS_SEED3 (0x0B8)
+#define QSERDES_TX_L0_PRBS_SEED4 (0x0BC)
+#define QSERDES_TX_L0_RESET_GEN (0x0C0)
+#define QSERDES_TX_L0_RESET_GEN_MUXES (0x0C4)
+#define QSERDES_TX_L0_TRAN_DRVR_EMP_EN (0x0C8)
+#define QSERDES_TX_L0_TX_INTERFACE_MODE (0x0CC)
+#define QSERDES_TX_L0_PWM_CTRL (0x0D0)
+#define QSERDES_TX_L0_PWM_ENCODED_OR_DATA (0x0D4)
+#define QSERDES_TX_L0_PWM_GEAR_1_DIVIDER_BAND2 (0x0D8)
+#define QSERDES_TX_L0_PWM_GEAR_2_DIVIDER_BAND2 (0x0DC)
+#define QSERDES_TX_L0_PWM_GEAR_3_DIVIDER_BAND2 (0x0E0)
+#define QSERDES_TX_L0_PWM_GEAR_4_DIVIDER_BAND2 (0x0E4)
+#define QSERDES_TX_L0_PWM_GEAR_1_DIVIDER_BAND0_1 (0x0E8)
+#define QSERDES_TX_L0_PWM_GEAR_2_DIVIDER_BAND0_1 (0x0EC)
+#define QSERDES_TX_L0_PWM_GEAR_3_DIVIDER_BAND0_1 (0x0F0)
+#define QSERDES_TX_L0_PWM_GEAR_4_DIVIDER_BAND0_1 (0x0F4)
+#define QSERDES_TX_L0_VMODE_CTRL1 (0x0F8)
+#define QSERDES_TX_L0_VMODE_CTRL2 (0x0FC)
+#define QSERDES_TX_L0_TX_ALOG_INTF_OBSV_CNTL (0x100)
+#define QSERDES_TX_L0_BIST_STATUS (0x104)
+#define QSERDES_TX_L0_BIST_ERROR_COUNT1 (0x108)
+#define QSERDES_TX_L0_BIST_ERROR_COUNT2 (0x10C)
+#define QSERDES_TX_L0_TX_ALOG_INTF_OBSV (0x110)
+
+/* HDMI PHY REGISTERS */
+#define HDMI_PHY_BASE_OFFSET (0xC00)
+
+#define HDMI_PHY_CFG (0x00)
+#define HDMI_PHY_PD_CTL (0x04)
+#define HDMI_PHY_MODE (0x08)
+#define HDMI_PHY_MISR_CLEAR (0x0C)
+#define HDMI_PHY_TX0_TX1_BIST_CFG0 (0x10)
+#define HDMI_PHY_TX0_TX1_BIST_CFG1 (0x14)
+#define HDMI_PHY_TX0_TX1_PRBS_SEED_BYTE0 (0x18)
+#define HDMI_PHY_TX0_TX1_PRBS_SEED_BYTE1 (0x1C)
+#define HDMI_PHY_TX0_TX1_BIST_PATTERN0 (0x20)
+#define HDMI_PHY_TX0_TX1_BIST_PATTERN1 (0x24)
+#define HDMI_PHY_TX2_TX3_BIST_CFG0 (0x28)
+#define HDMI_PHY_TX2_TX3_BIST_CFG1 (0x2C)
+#define HDMI_PHY_TX2_TX3_PRBS_SEED_BYTE0 (0x30)
+#define HDMI_PHY_TX2_TX3_PRBS_SEED_BYTE1 (0x34)
+#define HDMI_PHY_TX2_TX3_BIST_PATTERN0 (0x38)
+#define HDMI_PHY_TX2_TX3_BIST_PATTERN1 (0x3C)
+#define HDMI_PHY_DEBUG_BUS_SEL (0x40)
+#define HDMI_PHY_TXCAL_CFG0 (0x44)
+#define HDMI_PHY_TXCAL_CFG1 (0x48)
+#define HDMI_PHY_TX0_TX1_LANE_CTL (0x4C)
+#define HDMI_PHY_TX2_TX3_LANE_CTL (0x50)
+#define HDMI_PHY_LANE_BIST_CONFIG (0x54)
+#define HDMI_PHY_CLOCK (0x58)
+#define HDMI_PHY_MISC1 (0x5C)
+#define HDMI_PHY_MISC2 (0x60)
+#define HDMI_PHY_TX0_TX1_BIST_STATUS0 (0x64)
+#define HDMI_PHY_TX0_TX1_BIST_STATUS1 (0x68)
+#define HDMI_PHY_TX0_TX1_BIST_STATUS2 (0x6C)
+#define HDMI_PHY_TX2_TX3_BIST_STATUS0 (0x70)
+#define HDMI_PHY_TX2_TX3_BIST_STATUS1 (0x74)
+#define HDMI_PHY_TX2_TX3_BIST_STATUS2 (0x78)
+#define HDMI_PHY_PRE_MISR_STATUS0 (0x7C)
+#define HDMI_PHY_PRE_MISR_STATUS1 (0x80)
+#define HDMI_PHY_PRE_MISR_STATUS2 (0x84)
+#define HDMI_PHY_PRE_MISR_STATUS3 (0x88)
+#define HDMI_PHY_POST_MISR_STATUS0 (0x8C)
+#define HDMI_PHY_POST_MISR_STATUS1 (0x90)
+#define HDMI_PHY_POST_MISR_STATUS2 (0x94)
+#define HDMI_PHY_POST_MISR_STATUS3 (0x98)
+#define HDMI_PHY_STATUS (0x9C)
+#define HDMI_PHY_MISC3_STATUS (0xA0)
+#define HDMI_PHY_MISC4_STATUS (0xA4)
+#define HDMI_PHY_DEBUG_BUS0 (0xA8)
+#define HDMI_PHY_DEBUG_BUS1 (0xAC)
+#define HDMI_PHY_DEBUG_BUS2 (0xB0)
+#define HDMI_PHY_DEBUG_BUS3 (0xB4)
+#define HDMI_PHY_PHY_REVISION_ID0 (0xB8)
+#define HDMI_PHY_PHY_REVISION_ID1 (0xBC)
+#define HDMI_PHY_PHY_REVISION_ID2 (0xC0)
+#define HDMI_PHY_PHY_REVISION_ID3 (0xC4)
+
+#define HDMI_PLL_POLL_MAX_READS 100
+#define HDMI_PLL_POLL_TIMEOUT_US 1500
+
+enum hdmi_pll_freqs {
+ HDMI_PCLK_25200_KHZ,
+ HDMI_PCLK_27027_KHZ,
+ HDMI_PCLK_27000_KHZ,
+ HDMI_PCLK_74250_KHZ,
+ HDMI_PCLK_148500_KHZ,
+ HDMI_PCLK_154000_KHZ,
+ HDMI_PCLK_268500_KHZ,
+ HDMI_PCLK_297000_KHZ,
+ HDMI_PCLK_594000_KHZ,
+ HDMI_PCLK_MAX
+};
+
+struct hdmi_8996_phy_pll_reg_cfg {
+ u32 tx_l0_lane_mode;
+ u32 tx_l2_lane_mode;
+ u32 tx_l0_tx_band;
+ u32 tx_l1_tx_band;
+ u32 tx_l2_tx_band;
+ u32 tx_l3_tx_band;
+ u32 com_svs_mode_clk_sel;
+ u32 com_hsclk_sel;
+ u32 com_pll_cctrl_mode0;
+ u32 com_pll_rctrl_mode0;
+ u32 com_cp_ctrl_mode0;
+ u32 com_dec_start_mode0;
+ u32 com_div_frac_start1_mode0;
+ u32 com_div_frac_start2_mode0;
+ u32 com_div_frac_start3_mode0;
+ u32 com_integloop_gain0_mode0;
+ u32 com_integloop_gain1_mode0;
+ u32 com_lock_cmp_en;
+ u32 com_lock_cmp1_mode0;
+ u32 com_lock_cmp2_mode0;
+ u32 com_lock_cmp3_mode0;
+ u32 com_core_clk_en;
+ u32 com_coreclk_div;
+ u32 com_restrim_ctrl;
+ u32 com_vco_tune_ctrl;
+
+ u32 tx_l0_tx_drv_lvl;
+ u32 tx_l0_tx_emp_post1_lvl;
+ u32 tx_l1_tx_drv_lvl;
+ u32 tx_l1_tx_emp_post1_lvl;
+ u32 tx_l2_tx_drv_lvl;
+ u32 tx_l2_tx_emp_post1_lvl;
+ u32 tx_l3_tx_drv_lvl;
+ u32 tx_l3_tx_emp_post1_lvl;
+ u32 tx_l0_vmode_ctrl1;
+ u32 tx_l0_vmode_ctrl2;
+ u32 tx_l1_vmode_ctrl1;
+ u32 tx_l1_vmode_ctrl2;
+ u32 tx_l2_vmode_ctrl1;
+ u32 tx_l2_vmode_ctrl2;
+ u32 tx_l3_vmode_ctrl1;
+ u32 tx_l3_vmode_ctrl2;
+ u32 tx_l0_res_code_lane_tx;
+ u32 tx_l1_res_code_lane_tx;
+ u32 tx_l2_res_code_lane_tx;
+ u32 tx_l3_res_code_lane_tx;
+
+ u32 phy_mode;
+};
+
+struct hdmi_8996_v3_post_divider {
+ u64 vco_freq;
+ u64 hsclk_divsel;
+ u64 vco_ratio;
+ u64 tx_band_sel;
+ u64 half_rate_mode;
+};
+
+static inline struct hdmi_pll_vco_clk *to_hdmi_8996_vco_clk(struct clk *clk)
+{
+ return container_of(clk, struct hdmi_pll_vco_clk, c);
+}
+
+static inline u64 hdmi_8996_v1_get_post_div_lt_2g(u64 bclk)
+{
+ if (bclk >= HDMI_2400MHZ_BIT_CLK_HZ)
+ return 2;
+ else if (bclk >= HDMI_1700MHZ_BIT_CLK_HZ)
+ return 3;
+ else if (bclk >= HDMI_1200MHZ_BIT_CLK_HZ)
+ return 4;
+ else if (bclk >= HDMI_850MHZ_BIT_CLK_HZ)
+ return 3;
+ else if (bclk >= HDMI_600MHZ_BIT_CLK_HZ)
+ return 4;
+ else if (bclk >= HDMI_450MHZ_BIT_CLK_HZ)
+ return 3;
+ else if (bclk >= HDMI_300MHZ_BIT_CLK_HZ)
+ return 4;
+
+ return HDMI_64B_ERR_VAL;
+}
+
+static inline u64 hdmi_8996_v2_get_post_div_lt_2g(u64 bclk, u64 vco_range)
+{
+ u64 hdmi_8ghz = vco_range;
+ u64 tmp_calc;
+
+ hdmi_8ghz <<= 2;
+ tmp_calc = hdmi_8ghz;
+ do_div(tmp_calc, 6U);
+
+ if (bclk >= vco_range)
+ return 2;
+ else if (bclk >= tmp_calc)
+ return 3;
+ else if (bclk >= vco_range >> 1)
+ return 4;
+
+ tmp_calc = hdmi_8ghz;
+ do_div(tmp_calc, 12U);
+ if (bclk >= tmp_calc)
+ return 3;
+ else if (bclk >= vco_range >> 2)
+ return 4;
+
+ tmp_calc = hdmi_8ghz;
+ do_div(tmp_calc, 24U);
+ if (bclk >= tmp_calc)
+ return 3;
+ else if (bclk >= vco_range >> 3)
+ return 4;
+
+ return HDMI_64B_ERR_VAL;
+}
+
+static inline u64 hdmi_8996_v2_get_post_div_gt_2g(u64 hsclk)
+{
+ if (hsclk >= 0 && hsclk <= 3)
+ return hsclk + 1;
+
+ return HDMI_64B_ERR_VAL;
+}
+
+static inline u64 hdmi_8996_get_coreclk_div_lt_2g(u64 bclk)
+{
+ if (bclk >= HDMI_1334MHZ_BIT_CLK_HZ)
+ return 1;
+ else if (bclk >= HDMI_1000MHZ_BIT_CLK_HZ)
+ return 1;
+ else if (bclk >= HDMI_667MHZ_BIT_CLK_HZ)
+ return 2;
+ else if (bclk >= HDMI_500MHZ_BIT_CLK_HZ)
+ return 2;
+ else if (bclk >= HDMI_334MHZ_BIT_CLK_HZ)
+ return 3;
+ else if (bclk >= HDMI_250MHZ_BIT_CLK_HZ)
+ return 3;
+
+ return HDMI_64B_ERR_VAL;
+}
+
+static inline u64 hdmi_8996_get_coreclk_div_ratio(u64 clks_pll_divsel,
+ u64 coreclk_div)
+{
+ if (clks_pll_divsel == 0)
+ return coreclk_div*2;
+ else if (clks_pll_divsel == 1)
+ return coreclk_div*4;
+
+ return HDMI_64B_ERR_VAL;
+}
+
+static inline u64 hdmi_8996_v1_get_tx_band(u64 bclk)
+{
+ if (bclk >= 2400000000UL)
+ return 0;
+ if (bclk >= 1200000000UL)
+ return 1;
+ if (bclk >= 600000000UL)
+ return 2;
+ if (bclk >= 300000000UL)
+ return 3;
+
+ return HDMI_64B_ERR_VAL;
+}
+
+static inline u64 hdmi_8996_v2_get_tx_band(u64 bclk, u64 vco_range)
+{
+ if (bclk >= vco_range)
+ return 0;
+ else if (bclk >= vco_range >> 1)
+ return 1;
+ else if (bclk >= vco_range >> 2)
+ return 2;
+ else if (bclk >= vco_range >> 3)
+ return 3;
+
+ return HDMI_64B_ERR_VAL;
+}
+
+static inline u64 hdmi_8996_v1_get_hsclk(u64 fdata)
+{
+ if (fdata >= 9600000000UL)
+ return 0;
+ else if (fdata >= 4800000000UL)
+ return 1;
+ else if (fdata >= 3200000000UL)
+ return 2;
+ else if (fdata >= 2400000000UL)
+ return 3;
+
+ return HDMI_64B_ERR_VAL;
+}
+
+static inline u64 hdmi_8996_v2_get_hsclk(u64 fdata, u64 vco_range)
+{
+ u64 tmp_calc = vco_range;
+ tmp_calc <<= 2;
+ do_div(tmp_calc, 3U);
+ if (fdata >= (vco_range << 2))
+ return 0;
+ else if (fdata >= (vco_range << 1))
+ return 1;
+ else if (fdata >= tmp_calc)
+ return 2;
+ else if (fdata >= vco_range)
+ return 3;
+
+ return HDMI_64B_ERR_VAL;
+
+}
+
+static inline u64 hdmi_8996_v2_get_vco_freq(u64 bclk, u64 vco_range)
+{
+ u64 tx_band_div_ratio = 1U << hdmi_8996_v2_get_tx_band(bclk, vco_range);
+ u64 pll_post_div_ratio;
+
+ if (bclk >= vco_range) {
+ u64 hsclk = hdmi_8996_v2_get_hsclk(bclk, vco_range);
+ pll_post_div_ratio = hdmi_8996_v2_get_post_div_gt_2g(hsclk);
+ } else {
+ pll_post_div_ratio = hdmi_8996_v2_get_post_div_lt_2g(bclk,
+ vco_range);
+ }
+
+ return bclk * (pll_post_div_ratio * tx_band_div_ratio);
+}
+
+static inline u64 hdmi_8996_v2_get_fdata(u64 bclk, u64 vco_range)
+{
+ if (bclk >= vco_range) {
+ return bclk;
+ } else {
+ u64 tmp_calc = hdmi_8996_v2_get_vco_freq(bclk, vco_range);
+ u64 pll_post_div_ratio_lt_2g = hdmi_8996_v2_get_post_div_lt_2g(
+ bclk, vco_range);
+ if (pll_post_div_ratio_lt_2g == HDMI_64B_ERR_VAL)
+ return HDMI_64B_ERR_VAL;
+
+ do_div(tmp_calc, pll_post_div_ratio_lt_2g);
+ return tmp_calc;
+ }
+}
+
+static inline u64 hdmi_8996_get_cpctrl(u64 frac_start, bool gen_ssc)
+{
+ if ((frac_start != 0) ||
+ (gen_ssc == true))
+ /*
+ * This should be ROUND(11/(19.2/20))).
+ * Since ref clock does not change, hardcoding to 11
+ */
+ return 0xB;
+
+ return 0x23;
+}
+
+static inline u64 hdmi_8996_get_rctrl(u64 frac_start, bool gen_ssc)
+{
+ if ((frac_start != 0) || (gen_ssc == true))
+ return 0x16;
+
+ return 0x10;
+}
+
+static inline u64 hdmi_8996_get_cctrl(u64 frac_start, bool gen_ssc)
+{
+ if ((frac_start != 0) || (gen_ssc == true))
+ return 0x28;
+
+ return 0x1;
+}
+
+static inline u64 hdmi_8996_get_integloop_gain(u64 frac_start, bool gen_ssc)
+{
+ if ((frac_start != 0) || (gen_ssc == true))
+ return 0x80;
+
+ return 0xC4;
+}
+
+static inline u64 hdmi_8996_v3_get_integloop_gain(u64 frac_start, u64 bclk,
+ bool gen_ssc)
+{
+ u64 digclk_divsel = bclk >= HDMI_DIG_FREQ_BIT_CLK_THRESHOLD ? 1 : 2;
+ u64 base = ((frac_start != 0) || (gen_ssc == true)) ? 0x40 : 0xC4;
+
+ base <<= digclk_divsel;
+
+ return (base <= 2046 ? base : 0x7FE);
+}
+
+static inline u64 hdmi_8996_get_vco_tune(u64 fdata, u64 div)
+{
+ u64 vco_tune;
+
+ vco_tune = fdata * div;
+ do_div(vco_tune, 1000000);
+ vco_tune = 13000 - vco_tune - 256;
+ do_div(vco_tune, 5);
+
+ return vco_tune;
+}
+
+static inline u64 hdmi_8996_get_pll_cmp(u64 pll_cmp_cnt, u64 core_clk)
+{
+ u64 pll_cmp;
+ u64 rem;
+ pll_cmp = pll_cmp_cnt * core_clk;
+ rem = do_div(pll_cmp, HDMI_REF_CLOCK);
+ if (rem > (HDMI_REF_CLOCK >> 1))
+ pll_cmp++;
+ pll_cmp -= 1;
+
+ return pll_cmp;
+}
+
+static inline u64 hdmi_8996_v3_get_pll_cmp(u64 pll_cmp_cnt, u64 fdata)
+{
+ u64 dividend = pll_cmp_cnt * fdata;
+ u64 divisor = HDMI_REF_CLOCK * 10;
+ u64 rem;
+
+ rem = do_div(dividend, divisor);
+ if (rem > (divisor >> 1))
+ dividend++;
+
+ return dividend - 1;
+}
+
+static int hdmi_8996_v3_get_post_div(struct hdmi_8996_v3_post_divider *pd,
+ u64 bclk)
+{
+ u32 ratio[] = {2, 3, 4, 5, 6, 9, 10, 12, 14, 15, 20, 21, 25, 28, 35};
+ u32 tx_band_sel[] = {0, 1, 2, 3};
+ u64 vco_freq[60];
+ u64 vco, vco_optimal, half_rate_mode = 0;
+ int vco_optimal_index, vco_freq_index;
+ int i, j, k, x;
+
+ for (i = 0; i <= 1; i++) {
+ vco_optimal = HDMI_VCO_MAX_FREQ;
+ vco_optimal_index = -1;
+ vco_freq_index = 0;
+ for (j = 0; j < 15; j++) {
+ for (k = 0; k < 4; k++) {
+ u64 ratio_mult = ratio[j] << tx_band_sel[k];
+
+ vco = bclk >> half_rate_mode;
+ vco *= ratio_mult;
+ vco_freq[vco_freq_index++] = vco;
+ }
+ }
+
+ for (x = 0; x < 60; x++) {
+ u64 vco_tmp = vco_freq[x];
+
+ if ((vco_tmp >= HDMI_VCO_MIN_FREQ) &&
+ (vco_tmp <= vco_optimal)) {
+ vco_optimal = vco_tmp;
+ vco_optimal_index = x;
+ }
+ }
+
+ if (vco_optimal_index == -1) {
+ if (!half_rate_mode)
+ half_rate_mode++;
+ else
+ return -EINVAL;
+ } else {
+ pd->vco_freq = vco_optimal;
+ pd->tx_band_sel = tx_band_sel[vco_optimal_index % 4];
+ pd->vco_ratio = ratio[vco_optimal_index / 4];
+ break;
+ }
+ }
+
+ switch (pd->vco_ratio) {
+ case 2:
+ pd->hsclk_divsel = 0;
+ break;
+ case 3:
+ pd->hsclk_divsel = 4;
+ break;
+ case 4:
+ pd->hsclk_divsel = 8;
+ break;
+ case 5:
+ pd->hsclk_divsel = 12;
+ break;
+ case 6:
+ pd->hsclk_divsel = 1;
+ break;
+ case 9:
+ pd->hsclk_divsel = 5;
+ break;
+ case 10:
+ pd->hsclk_divsel = 2;
+ break;
+ case 12:
+ pd->hsclk_divsel = 9;
+ break;
+ case 14:
+ pd->hsclk_divsel = 3;
+ break;
+ case 15:
+ pd->hsclk_divsel = 13;
+ break;
+ case 20:
+ pd->hsclk_divsel = 10;
+ break;
+ case 21:
+ pd->hsclk_divsel = 7;
+ break;
+ case 25:
+ pd->hsclk_divsel = 14;
+ break;
+ case 28:
+ pd->hsclk_divsel = 11;
+ break;
+ case 35:
+ pd->hsclk_divsel = 15;
+ break;
+ };
+
+ return 0;
+}
+
+static int hdmi_8996_v1_calculate(u32 pix_clk,
+ struct hdmi_8996_phy_pll_reg_cfg *cfg)
+{
+ int rc = -EINVAL;
+ u64 fdata, clk_divtx, tmds_clk;
+ u64 bclk;
+ u64 post_div_gt_2g;
+ u64 post_div_lt_2g;
+ u64 coreclk_div1_lt_2g;
+ u64 core_clk_div_ratio;
+ u64 core_clk;
+ u64 pll_cmp;
+ u64 tx_band;
+ u64 tx_band_div_ratio;
+ u64 hsclk;
+ u64 dec_start;
+ u64 frac_start;
+ u64 pll_divisor = 4 * HDMI_REF_CLOCK;
+ u64 cpctrl;
+ u64 rctrl;
+ u64 cctrl;
+ u64 integloop_gain;
+ u64 vco_tune;
+ u64 vco_freq;
+ u64 rem;
+
+ /* FDATA, CLK_DIVTX, PIXEL_CLK, TMDS_CLK */
+ bclk = ((u64)pix_clk) * HDMI_BIT_CLK_TO_PIX_CLK_RATIO;
+
+ if (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD)
+ tmds_clk = bclk/4;
+ else
+ tmds_clk = bclk;
+
+ post_div_lt_2g = hdmi_8996_v1_get_post_div_lt_2g(bclk);
+ if (post_div_lt_2g == HDMI_64B_ERR_VAL)
+ goto fail;
+
+ coreclk_div1_lt_2g = hdmi_8996_get_coreclk_div_lt_2g(bclk);
+
+ core_clk_div_ratio = hdmi_8996_get_coreclk_div_ratio(
+ HDMI_CLKS_PLL_DIVSEL, HDMI_CORECLK_DIV);
+
+ tx_band = hdmi_8996_v1_get_tx_band(bclk);
+ if (tx_band == HDMI_64B_ERR_VAL)
+ goto fail;
+
+ tx_band_div_ratio = 1 << tx_band;
+
+ if (bclk >= HDMI_2400MHZ_BIT_CLK_HZ) {
+ fdata = bclk;
+ hsclk = hdmi_8996_v1_get_hsclk(fdata);
+ if (hsclk == HDMI_64B_ERR_VAL)
+ goto fail;
+
+ post_div_gt_2g = (hsclk <= 3) ? (hsclk + 1) : HDMI_64B_ERR_VAL;
+ if (post_div_gt_2g == HDMI_64B_ERR_VAL)
+ goto fail;
+
+ vco_freq = bclk * (post_div_gt_2g * tx_band_div_ratio);
+ clk_divtx = vco_freq;
+ do_div(clk_divtx, post_div_gt_2g);
+ } else {
+ vco_freq = bclk * (post_div_lt_2g * tx_band_div_ratio);
+ fdata = vco_freq;
+ do_div(fdata, post_div_lt_2g);
+ hsclk = hdmi_8996_v1_get_hsclk(fdata);
+ if (hsclk == HDMI_64B_ERR_VAL)
+ goto fail;
+
+ clk_divtx = vco_freq;
+ do_div(clk_divtx, post_div_lt_2g);
+ post_div_gt_2g = (hsclk <= 3) ? (hsclk + 1) : HDMI_64B_ERR_VAL;
+ if (post_div_gt_2g == HDMI_64B_ERR_VAL)
+ goto fail;
+ }
+
+ /* Decimal and fraction values */
+ dec_start = fdata * post_div_gt_2g;
+ do_div(dec_start, pll_divisor);
+ frac_start = ((pll_divisor - (((dec_start + 1) * pll_divisor) -
+ (fdata * post_div_gt_2g))) * (1 << 20));
+ rem = do_div(frac_start, pll_divisor);
+ /* Round off frac_start to closest integer */
+ if (rem >= (pll_divisor >> 1))
+ frac_start++;
+
+ cpctrl = hdmi_8996_get_cpctrl(frac_start, false);
+ rctrl = hdmi_8996_get_rctrl(frac_start, false);
+ cctrl = hdmi_8996_get_cctrl(frac_start, false);
+ integloop_gain = hdmi_8996_get_integloop_gain(frac_start, false);
+ vco_tune = hdmi_8996_get_vco_tune(fdata, post_div_gt_2g);
+
+ core_clk = clk_divtx;
+ do_div(core_clk, core_clk_div_ratio);
+ pll_cmp = hdmi_8996_get_pll_cmp(1024, core_clk);
+
+ /* Debug dump */
+ DEV_DBG("%s: VCO freq: %llu\n", __func__, vco_freq);
+ DEV_DBG("%s: fdata: %llu\n", __func__, fdata);
+ DEV_DBG("%s: CLK_DIVTX: %llu\n", __func__, clk_divtx);
+ DEV_DBG("%s: pix_clk: %d\n", __func__, pix_clk);
+ DEV_DBG("%s: tmds clk: %llu\n", __func__, tmds_clk);
+ DEV_DBG("%s: HSCLK_SEL: %llu\n", __func__, hsclk);
+ DEV_DBG("%s: DEC_START: %llu\n", __func__, dec_start);
+ DEV_DBG("%s: DIV_FRAC_START: %llu\n", __func__, frac_start);
+ DEV_DBG("%s: PLL_CPCTRL: %llu\n", __func__, cpctrl);
+ DEV_DBG("%s: PLL_RCTRL: %llu\n", __func__, rctrl);
+ DEV_DBG("%s: PLL_CCTRL: %llu\n", __func__, cctrl);
+ DEV_DBG("%s: INTEGLOOP_GAIN: %llu\n", __func__, integloop_gain);
+ DEV_DBG("%s: VCO_TUNE: %llu\n", __func__, vco_tune);
+ DEV_DBG("%s: TX_BAND: %llu\n", __func__, tx_band);
+ DEV_DBG("%s: PLL_CMP: %llu\n", __func__, pll_cmp);
+
+ /* Convert these values to register specific values */
+ cfg->tx_l0_lane_mode = 0x3;
+ cfg->tx_l2_lane_mode = 0x3;
+ cfg->tx_l0_tx_band = tx_band + 4;
+ cfg->tx_l1_tx_band = tx_band + 4;
+ cfg->tx_l2_tx_band = tx_band + 4;
+ cfg->tx_l3_tx_band = tx_band + 4;
+ cfg->tx_l0_res_code_lane_tx = 0x33;
+ cfg->tx_l1_res_code_lane_tx = 0x33;
+ cfg->tx_l2_res_code_lane_tx = 0x33;
+ cfg->tx_l3_res_code_lane_tx = 0x33;
+ cfg->com_restrim_ctrl = 0x0;
+ cfg->com_vco_tune_ctrl = 0x1C;
+
+ cfg->com_svs_mode_clk_sel =
+ (bclk >= HDMI_DIG_FREQ_BIT_CLK_THRESHOLD ? 1 : 2);
+ cfg->com_hsclk_sel = (0x28 | hsclk);
+ cfg->com_pll_cctrl_mode0 = cctrl;
+ cfg->com_pll_rctrl_mode0 = rctrl;
+ cfg->com_cp_ctrl_mode0 = cpctrl;
+ cfg->com_dec_start_mode0 = dec_start;
+ cfg->com_div_frac_start1_mode0 = (frac_start & 0xFF);
+ cfg->com_div_frac_start2_mode0 = ((frac_start & 0xFF00) >> 8);
+ cfg->com_div_frac_start3_mode0 = ((frac_start & 0xF0000) >> 16);
+ cfg->com_integloop_gain0_mode0 = (integloop_gain & 0xFF);
+ cfg->com_integloop_gain1_mode0 = ((integloop_gain & 0xF00) >> 8);
+ cfg->com_lock_cmp1_mode0 = (pll_cmp & 0xFF);
+ cfg->com_lock_cmp2_mode0 = ((pll_cmp & 0xFF00) >> 8);
+ cfg->com_lock_cmp3_mode0 = ((pll_cmp & 0x30000) >> 16);
+ cfg->com_core_clk_en = (0x6C | (HDMI_CLKS_PLL_DIVSEL << 4));
+ cfg->com_coreclk_div = HDMI_CORECLK_DIV;
+
+ if (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD) {
+ cfg->tx_l0_tx_drv_lvl = 0x25;
+ cfg->tx_l0_tx_emp_post1_lvl = 0x23;
+ cfg->tx_l1_tx_drv_lvl = 0x25;
+ cfg->tx_l1_tx_emp_post1_lvl = 0x23;
+ cfg->tx_l2_tx_drv_lvl = 0x25;
+ cfg->tx_l2_tx_emp_post1_lvl = 0x23;
+ cfg->tx_l3_tx_drv_lvl = 0x22;
+ cfg->tx_l3_tx_emp_post1_lvl = 0x27;
+ cfg->tx_l0_vmode_ctrl1 = 0x00;
+ cfg->tx_l0_vmode_ctrl2 = 0x0D;
+ cfg->tx_l1_vmode_ctrl1 = 0x00;
+ cfg->tx_l1_vmode_ctrl2 = 0x0D;
+ cfg->tx_l2_vmode_ctrl1 = 0x00;
+ cfg->tx_l2_vmode_ctrl2 = 0x0D;
+ cfg->tx_l3_vmode_ctrl1 = 0x00;
+ cfg->tx_l3_vmode_ctrl2 = 0x00;
+ cfg->com_restrim_ctrl = 0x0;
+ } else if (bclk > HDMI_MID_FREQ_BIT_CLK_THRESHOLD) {
+ cfg->tx_l0_tx_drv_lvl = 0x25;
+ cfg->tx_l0_tx_emp_post1_lvl = 0x23;
+ cfg->tx_l1_tx_drv_lvl = 0x25;
+ cfg->tx_l1_tx_emp_post1_lvl = 0x23;
+ cfg->tx_l2_tx_drv_lvl = 0x25;
+ cfg->tx_l2_tx_emp_post1_lvl = 0x23;
+ cfg->tx_l3_tx_drv_lvl = 0x25;
+ cfg->tx_l3_tx_emp_post1_lvl = 0x23;
+ cfg->tx_l0_vmode_ctrl1 = 0x00;
+ cfg->tx_l0_vmode_ctrl2 = 0x0D;
+ cfg->tx_l1_vmode_ctrl1 = 0x00;
+ cfg->tx_l1_vmode_ctrl2 = 0x0D;
+ cfg->tx_l2_vmode_ctrl1 = 0x00;
+ cfg->tx_l2_vmode_ctrl2 = 0x0D;
+ cfg->tx_l3_vmode_ctrl1 = 0x00;
+ cfg->tx_l3_vmode_ctrl2 = 0x00;
+ cfg->com_restrim_ctrl = 0x0;
+ } else {
+ cfg->tx_l0_tx_drv_lvl = 0x20;
+ cfg->tx_l0_tx_emp_post1_lvl = 0x20;
+ cfg->tx_l1_tx_drv_lvl = 0x20;
+ cfg->tx_l1_tx_emp_post1_lvl = 0x20;
+ cfg->tx_l2_tx_drv_lvl = 0x20;
+ cfg->tx_l2_tx_emp_post1_lvl = 0x20;
+ cfg->tx_l3_tx_drv_lvl = 0x20;
+ cfg->tx_l3_tx_emp_post1_lvl = 0x20;
+ cfg->tx_l0_vmode_ctrl1 = 0x00;
+ cfg->tx_l0_vmode_ctrl2 = 0x0E;
+ cfg->tx_l1_vmode_ctrl1 = 0x00;
+ cfg->tx_l1_vmode_ctrl2 = 0x0E;
+ cfg->tx_l2_vmode_ctrl1 = 0x00;
+ cfg->tx_l2_vmode_ctrl2 = 0x0E;
+ cfg->tx_l3_vmode_ctrl1 = 0x00;
+ cfg->tx_l3_vmode_ctrl2 = 0x0E;
+ cfg->com_restrim_ctrl = 0xD8;
+ }
+
+ cfg->phy_mode = (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD) ? 0x10 : 0x0;
+ DEV_DBG("HDMI 8996 PLL: PLL Settings\n");
+ DEV_DBG("PLL PARAM: tx_l0_lane_mode = 0x%x\n", cfg->tx_l0_lane_mode);
+ DEV_DBG("PLL PARAM: tx_l2_lane_mode = 0x%x\n", cfg->tx_l2_lane_mode);
+ DEV_DBG("PLL PARAM: tx_l0_tx_band = 0x%x\n", cfg->tx_l0_tx_band);
+ DEV_DBG("PLL PARAM: tx_l1_tx_band = 0x%x\n", cfg->tx_l1_tx_band);
+ DEV_DBG("PLL PARAM: tx_l2_tx_band = 0x%x\n", cfg->tx_l2_tx_band);
+ DEV_DBG("PLL PARAM: tx_l3_tx_band = 0x%x\n", cfg->tx_l3_tx_band);
+ DEV_DBG("PLL PARAM: com_svs_mode_clk_sel = 0x%x\n",
+ cfg->com_svs_mode_clk_sel);
+ DEV_DBG("PLL PARAM: com_hsclk_sel = 0x%x\n", cfg->com_hsclk_sel);
+ DEV_DBG("PLL PARAM: com_pll_cctrl_mode0 = 0x%x\n",
+ cfg->com_pll_cctrl_mode0);
+ DEV_DBG("PLL PARAM: com_pll_rctrl_mode0 = 0x%x\n",
+ cfg->com_pll_rctrl_mode0);
+ DEV_DBG("PLL PARAM: com_cp_ctrl_mode0 = 0x%x\n",
+ cfg->com_cp_ctrl_mode0);
+ DEV_DBG("PLL PARAM: com_dec_start_mode0 = 0x%x\n",
+ cfg->com_dec_start_mode0);
+ DEV_DBG("PLL PARAM: com_div_frac_start1_mode0 = 0x%x\n",
+ cfg->com_div_frac_start1_mode0);
+ DEV_DBG("PLL PARAM: com_div_frac_start2_mode0 = 0x%x\n",
+ cfg->com_div_frac_start2_mode0);
+ DEV_DBG("PLL PARAM: com_div_frac_start3_mode0 = 0x%x\n",
+ cfg->com_div_frac_start3_mode0);
+ DEV_DBG("PLL PARAM: com_integloop_gain0_mode0 = 0x%x\n",
+ cfg->com_integloop_gain0_mode0);
+ DEV_DBG("PLL PARAM: com_integloop_gain1_mode0 = 0x%x\n",
+ cfg->com_integloop_gain1_mode0);
+ DEV_DBG("PLL PARAM: com_lock_cmp1_mode0 = 0x%x\n",
+ cfg->com_lock_cmp1_mode0);
+ DEV_DBG("PLL PARAM: com_lock_cmp2_mode0 = 0x%x\n",
+ cfg->com_lock_cmp2_mode0);
+ DEV_DBG("PLL PARAM: com_lock_cmp3_mode0 = 0x%x\n",
+ cfg->com_lock_cmp3_mode0);
+ DEV_DBG("PLL PARAM: com_core_clk_en = 0x%x\n", cfg->com_core_clk_en);
+ DEV_DBG("PLL PARAM: com_coreclk_div = 0x%x\n", cfg->com_coreclk_div);
+ DEV_DBG("PLL PARAM: com_restrim_ctrl = 0x%x\n", cfg->com_restrim_ctrl);
+
+ DEV_DBG("PLL PARAM: l0_tx_drv_lvl = 0x%x\n", cfg->tx_l0_tx_drv_lvl);
+ DEV_DBG("PLL PARAM: l0_tx_emp_post1_lvl = 0x%x\n",
+ cfg->tx_l0_tx_emp_post1_lvl);
+ DEV_DBG("PLL PARAM: l1_tx_drv_lvl = 0x%x\n", cfg->tx_l1_tx_drv_lvl);
+ DEV_DBG("PLL PARAM: l1_tx_emp_post1_lvl = 0x%x\n",
+ cfg->tx_l1_tx_emp_post1_lvl);
+ DEV_DBG("PLL PARAM: l2_tx_drv_lvl = 0x%x\n", cfg->tx_l2_tx_drv_lvl);
+ DEV_DBG("PLL PARAM: l2_tx_emp_post1_lvl = 0x%x\n",
+ cfg->tx_l2_tx_emp_post1_lvl);
+ DEV_DBG("PLL PARAM: l3_tx_drv_lvl = 0x%x\n", cfg->tx_l3_tx_drv_lvl);
+ DEV_DBG("PLL PARAM: l3_tx_emp_post1_lvl = 0x%x\n",
+ cfg->tx_l3_tx_emp_post1_lvl);
+
+ DEV_DBG("PLL PARAM: l0_vmode_ctrl1 = 0x%x\n", cfg->tx_l0_vmode_ctrl1);
+ DEV_DBG("PLL PARAM: l0_vmode_ctrl2 = 0x%x\n", cfg->tx_l0_vmode_ctrl2);
+ DEV_DBG("PLL PARAM: l1_vmode_ctrl1 = 0x%x\n", cfg->tx_l1_vmode_ctrl1);
+ DEV_DBG("PLL PARAM: l1_vmode_ctrl2 = 0x%x\n", cfg->tx_l1_vmode_ctrl2);
+ DEV_DBG("PLL PARAM: l2_vmode_ctrl1 = 0x%x\n", cfg->tx_l2_vmode_ctrl1);
+ DEV_DBG("PLL PARAM: l2_vmode_ctrl2 = 0x%x\n", cfg->tx_l2_vmode_ctrl2);
+ DEV_DBG("PLL PARAM: l3_vmode_ctrl1 = 0x%x\n", cfg->tx_l3_vmode_ctrl1);
+ DEV_DBG("PLL PARAM: l3_vmode_ctrl2 = 0x%x\n", cfg->tx_l3_vmode_ctrl2);
+ DEV_DBG("PLL PARAM: tx_l0_res_code_lane_tx = 0x%x\n",
+ cfg->tx_l0_res_code_lane_tx);
+ DEV_DBG("PLL PARAM: tx_l1_res_code_lane_tx = 0x%x\n",
+ cfg->tx_l1_res_code_lane_tx);
+ DEV_DBG("PLL PARAM: tx_l2_res_code_lane_tx = 0x%x\n",
+ cfg->tx_l2_res_code_lane_tx);
+ DEV_DBG("PLL PARAM: tx_l3_res_code_lane_tx = 0x%x\n",
+ cfg->tx_l3_res_code_lane_tx);
+
+ DEV_DBG("PLL PARAM: phy_mode = 0x%x\n", cfg->phy_mode);
+ rc = 0;
+fail:
+ return rc;
+}
+
+static int hdmi_8996_v2_calculate(u32 pix_clk,
+ struct hdmi_8996_phy_pll_reg_cfg *cfg)
+{
+ int rc = -EINVAL;
+ u64 fdata, clk_divtx, tmds_clk;
+ u64 bclk;
+ u64 post_div;
+ u64 core_clk_div;
+ u64 core_clk_div_ratio;
+ u64 core_clk;
+ u64 pll_cmp;
+ u64 tx_band;
+ u64 tx_band_div_ratio;
+ u64 hsclk;
+ u64 dec_start;
+ u64 frac_start;
+ u64 pll_divisor = 4 * HDMI_REF_CLOCK;
+ u64 cpctrl;
+ u64 rctrl;
+ u64 cctrl;
+ u64 integloop_gain;
+ u64 vco_tune;
+ u64 vco_freq;
+ u64 vco_range;
+ u64 rem;
+
+ /* FDATA, CLK_DIVTX, PIXEL_CLK, TMDS_CLK */
+ bclk = ((u64)pix_clk) * HDMI_BIT_CLK_TO_PIX_CLK_RATIO;
+
+ if (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD)
+ tmds_clk = pix_clk >> 2;
+ else
+ tmds_clk = pix_clk;
+
+ vco_range = bclk < HDMI_282MHZ_BIT_CLK_HZ ? HDMI_2000MHZ_BIT_CLK_HZ :
+ HDMI_2250MHZ_BIT_CLK_HZ;
+
+ fdata = hdmi_8996_v2_get_fdata(bclk, vco_range);
+ if (fdata == HDMI_64B_ERR_VAL)
+ goto fail;
+
+ hsclk = hdmi_8996_v2_get_hsclk(fdata, vco_range);
+ if (hsclk == HDMI_64B_ERR_VAL)
+ goto fail;
+
+ if (bclk >= vco_range)
+ post_div = hdmi_8996_v2_get_post_div_gt_2g(hsclk);
+ else
+ post_div = hdmi_8996_v2_get_post_div_lt_2g(bclk, vco_range);
+
+ if (post_div == HDMI_64B_ERR_VAL)
+ goto fail;
+
+ core_clk_div = 5;
+ core_clk_div_ratio = core_clk_div * 2;
+
+ tx_band = hdmi_8996_v2_get_tx_band(bclk, vco_range);
+ if (tx_band == HDMI_64B_ERR_VAL)
+ goto fail;
+
+ tx_band_div_ratio = 1 << tx_band;
+
+ vco_freq = hdmi_8996_v2_get_vco_freq(bclk, vco_range);
+ clk_divtx = vco_freq;
+ do_div(clk_divtx, post_div);
+
+ /* Decimal and fraction values */
+ dec_start = fdata * post_div;
+ do_div(dec_start, pll_divisor);
+ frac_start = ((pll_divisor - (((dec_start + 1) * pll_divisor) -
+ (fdata * post_div))) * (1 << 20));
+ rem = do_div(frac_start, pll_divisor);
+ /* Round off frac_start to closest integer */
+ if (rem >= (pll_divisor >> 1))
+ frac_start++;
+
+ cpctrl = hdmi_8996_get_cpctrl(frac_start, false);
+ rctrl = hdmi_8996_get_rctrl(frac_start, false);
+ cctrl = hdmi_8996_get_cctrl(frac_start, false);
+ integloop_gain = hdmi_8996_get_integloop_gain(frac_start, false);
+ vco_tune = hdmi_8996_get_vco_tune(fdata, post_div);
+
+ core_clk = clk_divtx;
+ do_div(core_clk, core_clk_div_ratio);
+ pll_cmp = hdmi_8996_get_pll_cmp(1024, core_clk);
+
+ /* Debug dump */
+ DEV_DBG("%s: VCO freq: %llu\n", __func__, vco_freq);
+ DEV_DBG("%s: fdata: %llu\n", __func__, fdata);
+ DEV_DBG("%s: CLK_DIVTX: %llu\n", __func__, clk_divtx);
+ DEV_DBG("%s: pix_clk: %d\n", __func__, pix_clk);
+ DEV_DBG("%s: tmds clk: %llu\n", __func__, tmds_clk);
+ DEV_DBG("%s: HSCLK_SEL: %llu\n", __func__, hsclk);
+ DEV_DBG("%s: DEC_START: %llu\n", __func__, dec_start);
+ DEV_DBG("%s: DIV_FRAC_START: %llu\n", __func__, frac_start);
+ DEV_DBG("%s: PLL_CPCTRL: %llu\n", __func__, cpctrl);
+ DEV_DBG("%s: PLL_RCTRL: %llu\n", __func__, rctrl);
+ DEV_DBG("%s: PLL_CCTRL: %llu\n", __func__, cctrl);
+ DEV_DBG("%s: INTEGLOOP_GAIN: %llu\n", __func__, integloop_gain);
+ DEV_DBG("%s: VCO_TUNE: %llu\n", __func__, vco_tune);
+ DEV_DBG("%s: TX_BAND: %llu\n", __func__, tx_band);
+ DEV_DBG("%s: PLL_CMP: %llu\n", __func__, pll_cmp);
+
+ /* Convert these values to register specific values */
+ cfg->tx_l0_lane_mode = 0x3;
+ cfg->tx_l2_lane_mode = 0x3;
+ cfg->tx_l0_tx_band = tx_band + 4;
+ cfg->tx_l1_tx_band = tx_band + 4;
+ cfg->tx_l2_tx_band = tx_band + 4;
+ cfg->tx_l3_tx_band = tx_band + 4;
+
+ if (bclk > HDMI_DIG_FREQ_BIT_CLK_THRESHOLD)
+ cfg->com_svs_mode_clk_sel = 1;
+ else
+ cfg->com_svs_mode_clk_sel = 2;
+
+ cfg->com_hsclk_sel = (0x28 | hsclk);
+ cfg->com_pll_cctrl_mode0 = cctrl;
+ cfg->com_pll_rctrl_mode0 = rctrl;
+ cfg->com_cp_ctrl_mode0 = cpctrl;
+ cfg->com_dec_start_mode0 = dec_start;
+ cfg->com_div_frac_start1_mode0 = (frac_start & 0xFF);
+ cfg->com_div_frac_start2_mode0 = ((frac_start & 0xFF00) >> 8);
+ cfg->com_div_frac_start3_mode0 = ((frac_start & 0xF0000) >> 16);
+ cfg->com_integloop_gain0_mode0 = (integloop_gain & 0xFF);
+ cfg->com_integloop_gain1_mode0 = ((integloop_gain & 0xF00) >> 8);
+ cfg->com_lock_cmp1_mode0 = (pll_cmp & 0xFF);
+ cfg->com_lock_cmp2_mode0 = ((pll_cmp & 0xFF00) >> 8);
+ cfg->com_lock_cmp3_mode0 = ((pll_cmp & 0x30000) >> 16);
+ cfg->com_core_clk_en = (0x6C | (HDMI_CLKS_PLL_DIVSEL << 4));
+ cfg->com_coreclk_div = HDMI_CORECLK_DIV;
+ cfg->com_vco_tune_ctrl = 0x0;
+
+ if (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD) {
+ cfg->tx_l0_tx_drv_lvl = 0x25;
+ cfg->tx_l0_tx_emp_post1_lvl = 0x23;
+ cfg->tx_l1_tx_drv_lvl = 0x25;
+ cfg->tx_l1_tx_emp_post1_lvl = 0x23;
+ cfg->tx_l2_tx_drv_lvl = 0x25;
+ cfg->tx_l2_tx_emp_post1_lvl = 0x23;
+ cfg->tx_l3_tx_drv_lvl = 0x22;
+ cfg->tx_l3_tx_emp_post1_lvl = 0x27;
+ cfg->tx_l0_vmode_ctrl1 = 0x00;
+ cfg->tx_l0_vmode_ctrl2 = 0x0D;
+ cfg->tx_l1_vmode_ctrl1 = 0x00;
+ cfg->tx_l1_vmode_ctrl2 = 0x0D;
+ cfg->tx_l2_vmode_ctrl1 = 0x00;
+ cfg->tx_l2_vmode_ctrl2 = 0x0D;
+ cfg->tx_l3_vmode_ctrl1 = 0x00;
+ cfg->tx_l3_vmode_ctrl2 = 0x00;
+ cfg->tx_l0_res_code_lane_tx = 0x3F;
+ cfg->tx_l1_res_code_lane_tx = 0x3F;
+ cfg->tx_l2_res_code_lane_tx = 0x3F;
+ cfg->tx_l3_res_code_lane_tx = 0x3F;
+ cfg->com_restrim_ctrl = 0x0;
+ } else if (bclk > HDMI_MID_FREQ_BIT_CLK_THRESHOLD) {
+ cfg->tx_l0_tx_drv_lvl = 0x25;
+ cfg->tx_l0_tx_emp_post1_lvl = 0x23;
+ cfg->tx_l1_tx_drv_lvl = 0x25;
+ cfg->tx_l1_tx_emp_post1_lvl = 0x23;
+ cfg->tx_l2_tx_drv_lvl = 0x25;
+ cfg->tx_l2_tx_emp_post1_lvl = 0x23;
+ cfg->tx_l3_tx_drv_lvl = 0x25;
+ cfg->tx_l3_tx_emp_post1_lvl = 0x23;
+ cfg->tx_l0_vmode_ctrl1 = 0x00;
+ cfg->tx_l0_vmode_ctrl2 = 0x0D;
+ cfg->tx_l1_vmode_ctrl1 = 0x00;
+ cfg->tx_l1_vmode_ctrl2 = 0x0D;
+ cfg->tx_l2_vmode_ctrl1 = 0x00;
+ cfg->tx_l2_vmode_ctrl2 = 0x0D;
+ cfg->tx_l3_vmode_ctrl1 = 0x00;
+ cfg->tx_l3_vmode_ctrl2 = 0x00;
+ cfg->tx_l0_res_code_lane_tx = 0x39;
+ cfg->tx_l1_res_code_lane_tx = 0x39;
+ cfg->tx_l2_res_code_lane_tx = 0x39;
+ cfg->tx_l3_res_code_lane_tx = 0x39;
+ cfg->com_restrim_ctrl = 0x0;
+ } else {
+ cfg->tx_l0_tx_drv_lvl = 0x20;
+ cfg->tx_l0_tx_emp_post1_lvl = 0x20;
+ cfg->tx_l1_tx_drv_lvl = 0x20;
+ cfg->tx_l1_tx_emp_post1_lvl = 0x20;
+ cfg->tx_l2_tx_drv_lvl = 0x20;
+ cfg->tx_l2_tx_emp_post1_lvl = 0x20;
+ cfg->tx_l3_tx_drv_lvl = 0x20;
+ cfg->tx_l3_tx_emp_post1_lvl = 0x20;
+ cfg->tx_l0_vmode_ctrl1 = 0x00;
+ cfg->tx_l0_vmode_ctrl2 = 0x0E;
+ cfg->tx_l1_vmode_ctrl1 = 0x00;
+ cfg->tx_l1_vmode_ctrl2 = 0x0E;
+ cfg->tx_l2_vmode_ctrl1 = 0x00;
+ cfg->tx_l2_vmode_ctrl2 = 0x0E;
+ cfg->tx_l3_vmode_ctrl1 = 0x00;
+ cfg->tx_l3_vmode_ctrl2 = 0x0E;
+ cfg->tx_l0_res_code_lane_tx = 0x3F;
+ cfg->tx_l1_res_code_lane_tx = 0x3F;
+ cfg->tx_l2_res_code_lane_tx = 0x3F;
+ cfg->tx_l3_res_code_lane_tx = 0x3F;
+ cfg->com_restrim_ctrl = 0xD8;
+ }
+
+ cfg->phy_mode = (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD) ? 0x10 : 0x0;
+ DEV_DBG("HDMI 8996 PLL: PLL Settings\n");
+ DEV_DBG("PLL PARAM: tx_l0_lane_mode = 0x%x\n", cfg->tx_l0_lane_mode);
+ DEV_DBG("PLL PARAM: tx_l2_lane_mode = 0x%x\n", cfg->tx_l2_lane_mode);
+ DEV_DBG("PLL PARAM: tx_l0_tx_band = 0x%x\n", cfg->tx_l0_tx_band);
+ DEV_DBG("PLL PARAM: tx_l1_tx_band = 0x%x\n", cfg->tx_l1_tx_band);
+ DEV_DBG("PLL PARAM: tx_l2_tx_band = 0x%x\n", cfg->tx_l2_tx_band);
+ DEV_DBG("PLL PARAM: tx_l3_tx_band = 0x%x\n", cfg->tx_l3_tx_band);
+ DEV_DBG("PLL PARAM: com_svs_mode_clk_sel = 0x%x\n",
+ cfg->com_svs_mode_clk_sel);
+ DEV_DBG("PLL PARAM: com_vco_tune_ctrl = 0x%x\n",
+ cfg->com_vco_tune_ctrl);
+ DEV_DBG("PLL PARAM: com_hsclk_sel = 0x%x\n", cfg->com_hsclk_sel);
+ DEV_DBG("PLL PARAM: com_lock_cmp_en = 0x%x\n", cfg->com_lock_cmp_en);
+ DEV_DBG("PLL PARAM: com_pll_cctrl_mode0 = 0x%x\n",
+ cfg->com_pll_cctrl_mode0);
+ DEV_DBG("PLL PARAM: com_pll_rctrl_mode0 = 0x%x\n",
+ cfg->com_pll_rctrl_mode0);
+ DEV_DBG("PLL PARAM: com_cp_ctrl_mode0 = 0x%x\n",
+ cfg->com_cp_ctrl_mode0);
+ DEV_DBG("PLL PARAM: com_dec_start_mode0 = 0x%x\n",
+ cfg->com_dec_start_mode0);
+ DEV_DBG("PLL PARAM: com_div_frac_start1_mode0 = 0x%x\n",
+ cfg->com_div_frac_start1_mode0);
+ DEV_DBG("PLL PARAM: com_div_frac_start2_mode0 = 0x%x\n",
+ cfg->com_div_frac_start2_mode0);
+ DEV_DBG("PLL PARAM: com_div_frac_start3_mode0 = 0x%x\n",
+ cfg->com_div_frac_start3_mode0);
+ DEV_DBG("PLL PARAM: com_integloop_gain0_mode0 = 0x%x\n",
+ cfg->com_integloop_gain0_mode0);
+ DEV_DBG("PLL PARAM: com_integloop_gain1_mode0 = 0x%x\n",
+ cfg->com_integloop_gain1_mode0);
+ DEV_DBG("PLL PARAM: com_lock_cmp1_mode0 = 0x%x\n",
+ cfg->com_lock_cmp1_mode0);
+ DEV_DBG("PLL PARAM: com_lock_cmp2_mode0 = 0x%x\n",
+ cfg->com_lock_cmp2_mode0);
+ DEV_DBG("PLL PARAM: com_lock_cmp3_mode0 = 0x%x\n",
+ cfg->com_lock_cmp3_mode0);
+ DEV_DBG("PLL PARAM: com_core_clk_en = 0x%x\n", cfg->com_core_clk_en);
+ DEV_DBG("PLL PARAM: com_coreclk_div = 0x%x\n", cfg->com_coreclk_div);
+
+ DEV_DBG("PLL PARAM: l0_tx_drv_lvl = 0x%x\n", cfg->tx_l0_tx_drv_lvl);
+ DEV_DBG("PLL PARAM: l0_tx_emp_post1_lvl = 0x%x\n",
+ cfg->tx_l0_tx_emp_post1_lvl);
+ DEV_DBG("PLL PARAM: l1_tx_drv_lvl = 0x%x\n", cfg->tx_l1_tx_drv_lvl);
+ DEV_DBG("PLL PARAM: l1_tx_emp_post1_lvl = 0x%x\n",
+ cfg->tx_l1_tx_emp_post1_lvl);
+ DEV_DBG("PLL PARAM: l2_tx_drv_lvl = 0x%x\n", cfg->tx_l2_tx_drv_lvl);
+ DEV_DBG("PLL PARAM: l2_tx_emp_post1_lvl = 0x%x\n",
+ cfg->tx_l2_tx_emp_post1_lvl);
+ DEV_DBG("PLL PARAM: l3_tx_drv_lvl = 0x%x\n", cfg->tx_l3_tx_drv_lvl);
+ DEV_DBG("PLL PARAM: l3_tx_emp_post1_lvl = 0x%x\n",
+ cfg->tx_l3_tx_emp_post1_lvl);
+
+ DEV_DBG("PLL PARAM: l0_vmode_ctrl1 = 0x%x\n", cfg->tx_l0_vmode_ctrl1);
+ DEV_DBG("PLL PARAM: l0_vmode_ctrl2 = 0x%x\n", cfg->tx_l0_vmode_ctrl2);
+ DEV_DBG("PLL PARAM: l1_vmode_ctrl1 = 0x%x\n", cfg->tx_l1_vmode_ctrl1);
+ DEV_DBG("PLL PARAM: l1_vmode_ctrl2 = 0x%x\n", cfg->tx_l1_vmode_ctrl2);
+ DEV_DBG("PLL PARAM: l2_vmode_ctrl1 = 0x%x\n", cfg->tx_l2_vmode_ctrl1);
+ DEV_DBG("PLL PARAM: l2_vmode_ctrl2 = 0x%x\n", cfg->tx_l2_vmode_ctrl2);
+ DEV_DBG("PLL PARAM: l3_vmode_ctrl1 = 0x%x\n", cfg->tx_l3_vmode_ctrl1);
+ DEV_DBG("PLL PARAM: l3_vmode_ctrl2 = 0x%x\n", cfg->tx_l3_vmode_ctrl2);
+ DEV_DBG("PLL PARAM: tx_l0_res_code_lane_tx = 0x%x\n",
+ cfg->tx_l0_res_code_lane_tx);
+ DEV_DBG("PLL PARAM: tx_l1_res_code_lane_tx = 0x%x\n",
+ cfg->tx_l1_res_code_lane_tx);
+ DEV_DBG("PLL PARAM: tx_l2_res_code_lane_tx = 0x%x\n",
+ cfg->tx_l2_res_code_lane_tx);
+ DEV_DBG("PLL PARAM: tx_l3_res_code_lane_tx = 0x%x\n",
+ cfg->tx_l3_res_code_lane_tx);
+ DEV_DBG("PLL PARAM: com_restrim_ctrl = 0x%x\n", cfg->com_restrim_ctrl);
+
+ DEV_DBG("PLL PARAM: phy_mode = 0x%x\n", cfg->phy_mode);
+ rc = 0;
+fail:
+ return rc;
+}
+
+static int hdmi_8996_v3_calculate(u32 pix_clk,
+ struct hdmi_8996_phy_pll_reg_cfg *cfg)
+{
+ int rc = -EINVAL;
+ struct hdmi_8996_v3_post_divider pd;
+ u64 fdata, tmds_clk;
+ u64 bclk;
+ u64 pll_cmp;
+ u64 tx_band;
+ u64 hsclk;
+ u64 dec_start;
+ u64 frac_start;
+ u64 pll_divisor = 4 * HDMI_REF_CLOCK;
+ u64 cpctrl;
+ u64 rctrl;
+ u64 cctrl;
+ u64 integloop_gain;
+ u64 vco_freq;
+ u64 rem;
+
+ /* FDATA, HSCLK, PIXEL_CLK, TMDS_CLK */
+ bclk = ((u64)pix_clk) * HDMI_BIT_CLK_TO_PIX_CLK_RATIO;
+
+ if (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD)
+ tmds_clk = pix_clk >> 2;
+ else
+ tmds_clk = pix_clk;
+
+ if (hdmi_8996_v3_get_post_div(&pd, bclk) || pd.vco_ratio <= 0 ||
+ pd.vco_freq <= 0)
+ goto fail;
+
+ vco_freq = pd.vco_freq;
+ fdata = pd.vco_freq;
+ do_div(fdata, pd.vco_ratio);
+
+ hsclk = pd.hsclk_divsel;
+ dec_start = vco_freq;
+ do_div(dec_start, pll_divisor);
+
+ frac_start = vco_freq * (1 << 20);
+ rem = do_div(frac_start, pll_divisor);
+ frac_start -= dec_start * (1 << 20);
+ if (rem > (pll_divisor >> 1))
+ frac_start++;
+
+ cpctrl = hdmi_8996_get_cpctrl(frac_start, false);
+ rctrl = hdmi_8996_get_rctrl(frac_start, false);
+ cctrl = hdmi_8996_get_cctrl(frac_start, false);
+ integloop_gain = hdmi_8996_v3_get_integloop_gain(frac_start, bclk,
+ false);
+ pll_cmp = hdmi_8996_v3_get_pll_cmp(1024, fdata);
+ tx_band = pd.tx_band_sel;
+
+ /* Debug dump */
+ DEV_DBG("%s: VCO freq: %llu\n", __func__, vco_freq);
+ DEV_DBG("%s: fdata: %llu\n", __func__, fdata);
+ DEV_DBG("%s: pix_clk: %d\n", __func__, pix_clk);
+ DEV_DBG("%s: tmds clk: %llu\n", __func__, tmds_clk);
+ DEV_DBG("%s: HSCLK_SEL: %llu\n", __func__, hsclk);
+ DEV_DBG("%s: DEC_START: %llu\n", __func__, dec_start);
+ DEV_DBG("%s: DIV_FRAC_START: %llu\n", __func__, frac_start);
+ DEV_DBG("%s: PLL_CPCTRL: %llu\n", __func__, cpctrl);
+ DEV_DBG("%s: PLL_RCTRL: %llu\n", __func__, rctrl);
+ DEV_DBG("%s: PLL_CCTRL: %llu\n", __func__, cctrl);
+ DEV_DBG("%s: INTEGLOOP_GAIN: %llu\n", __func__, integloop_gain);
+ DEV_DBG("%s: TX_BAND: %llu\n", __func__, tx_band);
+ DEV_DBG("%s: PLL_CMP: %llu\n", __func__, pll_cmp);
+
+ /* Convert these values to register specific values */
+ cfg->tx_l0_tx_band = tx_band + 4;
+ cfg->tx_l1_tx_band = tx_band + 4;
+ cfg->tx_l2_tx_band = tx_band + 4;
+ cfg->tx_l3_tx_band = tx_band + 4;
+
+ if (bclk > HDMI_DIG_FREQ_BIT_CLK_THRESHOLD)
+ cfg->com_svs_mode_clk_sel = 1;
+ else
+ cfg->com_svs_mode_clk_sel = 2;
+
+ cfg->com_hsclk_sel = (0x20 | hsclk);
+ cfg->com_pll_cctrl_mode0 = cctrl;
+ cfg->com_pll_rctrl_mode0 = rctrl;
+ cfg->com_cp_ctrl_mode0 = cpctrl;
+ cfg->com_dec_start_mode0 = dec_start;
+ cfg->com_div_frac_start1_mode0 = (frac_start & 0xFF);
+ cfg->com_div_frac_start2_mode0 = ((frac_start & 0xFF00) >> 8);
+ cfg->com_div_frac_start3_mode0 = ((frac_start & 0xF0000) >> 16);
+ cfg->com_integloop_gain0_mode0 = (integloop_gain & 0xFF);
+ cfg->com_integloop_gain1_mode0 = ((integloop_gain & 0xF00) >> 8);
+ cfg->com_lock_cmp1_mode0 = (pll_cmp & 0xFF);
+ cfg->com_lock_cmp2_mode0 = ((pll_cmp & 0xFF00) >> 8);
+ cfg->com_lock_cmp3_mode0 = ((pll_cmp & 0x30000) >> 16);
+ cfg->com_lock_cmp_en = 0x04;
+ cfg->com_core_clk_en = 0x2C;
+ cfg->com_coreclk_div = HDMI_CORECLK_DIV;
+ cfg->phy_mode = (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD) ? 0x10 : 0x0;
+ cfg->com_vco_tune_ctrl = 0x0;
+
+ cfg->tx_l0_lane_mode = 0x43;
+ cfg->tx_l2_lane_mode = 0x43;
+
+ if (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD) {
+ cfg->tx_l0_tx_drv_lvl = 0x25;
+ cfg->tx_l0_tx_emp_post1_lvl = 0x23;
+ cfg->tx_l1_tx_drv_lvl = 0x25;
+ cfg->tx_l1_tx_emp_post1_lvl = 0x23;
+ cfg->tx_l2_tx_drv_lvl = 0x25;
+ cfg->tx_l2_tx_emp_post1_lvl = 0x23;
+ cfg->tx_l3_tx_drv_lvl = 0x22;
+ cfg->tx_l3_tx_emp_post1_lvl = 0x27;
+ cfg->tx_l0_vmode_ctrl1 = 0x00;
+ cfg->tx_l0_vmode_ctrl2 = 0x0D;
+ cfg->tx_l1_vmode_ctrl1 = 0x00;
+ cfg->tx_l1_vmode_ctrl2 = 0x0D;
+ cfg->tx_l2_vmode_ctrl1 = 0x00;
+ cfg->tx_l2_vmode_ctrl2 = 0x0D;
+ cfg->tx_l3_vmode_ctrl1 = 0x00;
+ cfg->tx_l3_vmode_ctrl2 = 0x00;
+ } else if (bclk > HDMI_MID_FREQ_BIT_CLK_THRESHOLD) {
+ cfg->tx_l0_tx_drv_lvl = 0x25;
+ cfg->tx_l0_tx_emp_post1_lvl = 0x23;
+ cfg->tx_l1_tx_drv_lvl = 0x25;
+ cfg->tx_l1_tx_emp_post1_lvl = 0x23;
+ cfg->tx_l2_tx_drv_lvl = 0x25;
+ cfg->tx_l2_tx_emp_post1_lvl = 0x23;
+ cfg->tx_l3_tx_drv_lvl = 0x25;
+ cfg->tx_l3_tx_emp_post1_lvl = 0x23;
+ cfg->tx_l0_vmode_ctrl1 = 0x00;
+ cfg->tx_l0_vmode_ctrl2 = 0x0D;
+ cfg->tx_l1_vmode_ctrl1 = 0x00;
+ cfg->tx_l1_vmode_ctrl2 = 0x0D;
+ cfg->tx_l2_vmode_ctrl1 = 0x00;
+ cfg->tx_l2_vmode_ctrl2 = 0x0D;
+ cfg->tx_l3_vmode_ctrl1 = 0x00;
+ cfg->tx_l3_vmode_ctrl2 = 0x00;
+ } else {
+ cfg->tx_l0_tx_drv_lvl = 0x20;
+ cfg->tx_l0_tx_emp_post1_lvl = 0x20;
+ cfg->tx_l1_tx_drv_lvl = 0x20;
+ cfg->tx_l1_tx_emp_post1_lvl = 0x20;
+ cfg->tx_l2_tx_drv_lvl = 0x20;
+ cfg->tx_l2_tx_emp_post1_lvl = 0x20;
+ cfg->tx_l3_tx_drv_lvl = 0x20;
+ cfg->tx_l3_tx_emp_post1_lvl = 0x20;
+ cfg->tx_l0_vmode_ctrl1 = 0x00;
+ cfg->tx_l0_vmode_ctrl2 = 0x0E;
+ cfg->tx_l1_vmode_ctrl1 = 0x00;
+ cfg->tx_l1_vmode_ctrl2 = 0x0E;
+ cfg->tx_l2_vmode_ctrl1 = 0x00;
+ cfg->tx_l2_vmode_ctrl2 = 0x0E;
+ cfg->tx_l3_vmode_ctrl1 = 0x00;
+ cfg->tx_l3_vmode_ctrl2 = 0x0E;
+ }
+
+ DEV_DBG("HDMI 8996 PLL: PLL Settings\n");
+ DEV_DBG("PLL PARAM: tx_l0_tx_band = 0x%x\n", cfg->tx_l0_tx_band);
+ DEV_DBG("PLL PARAM: tx_l1_tx_band = 0x%x\n", cfg->tx_l1_tx_band);
+ DEV_DBG("PLL PARAM: tx_l2_tx_band = 0x%x\n", cfg->tx_l2_tx_band);
+ DEV_DBG("PLL PARAM: tx_l3_tx_band = 0x%x\n", cfg->tx_l3_tx_band);
+ DEV_DBG("PLL PARAM: com_svs_mode_clk_sel = 0x%x\n",
+ cfg->com_svs_mode_clk_sel);
+ DEV_DBG("PLL PARAM: com_hsclk_sel = 0x%x\n", cfg->com_hsclk_sel);
+ DEV_DBG("PLL PARAM: com_lock_cmp_en = 0x%x\n", cfg->com_lock_cmp_en);
+ DEV_DBG("PLL PARAM: com_pll_cctrl_mode0 = 0x%x\n",
+ cfg->com_pll_cctrl_mode0);
+ DEV_DBG("PLL PARAM: com_pll_rctrl_mode0 = 0x%x\n",
+ cfg->com_pll_rctrl_mode0);
+ DEV_DBG("PLL PARAM: com_cp_ctrl_mode0 = 0x%x\n",
+ cfg->com_cp_ctrl_mode0);
+ DEV_DBG("PLL PARAM: com_dec_start_mode0 = 0x%x\n",
+ cfg->com_dec_start_mode0);
+ DEV_DBG("PLL PARAM: com_div_frac_start1_mode0 = 0x%x\n",
+ cfg->com_div_frac_start1_mode0);
+ DEV_DBG("PLL PARAM: com_div_frac_start2_mode0 = 0x%x\n",
+ cfg->com_div_frac_start2_mode0);
+ DEV_DBG("PLL PARAM: com_div_frac_start3_mode0 = 0x%x\n",
+ cfg->com_div_frac_start3_mode0);
+ DEV_DBG("PLL PARAM: com_integloop_gain0_mode0 = 0x%x\n",
+ cfg->com_integloop_gain0_mode0);
+ DEV_DBG("PLL PARAM: com_integloop_gain1_mode0 = 0x%x\n",
+ cfg->com_integloop_gain1_mode0);
+ DEV_DBG("PLL PARAM: com_lock_cmp1_mode0 = 0x%x\n",
+ cfg->com_lock_cmp1_mode0);
+ DEV_DBG("PLL PARAM: com_lock_cmp2_mode0 = 0x%x\n",
+ cfg->com_lock_cmp2_mode0);
+ DEV_DBG("PLL PARAM: com_lock_cmp3_mode0 = 0x%x\n",
+ cfg->com_lock_cmp3_mode0);
+ DEV_DBG("PLL PARAM: com_core_clk_en = 0x%x\n", cfg->com_core_clk_en);
+ DEV_DBG("PLL PARAM: com_coreclk_div = 0x%x\n", cfg->com_coreclk_div);
+ DEV_DBG("PLL PARAM: phy_mode = 0x%x\n", cfg->phy_mode);
+
+ DEV_DBG("PLL PARAM: tx_l0_lane_mode = 0x%x\n", cfg->tx_l0_lane_mode);
+ DEV_DBG("PLL PARAM: tx_l2_lane_mode = 0x%x\n", cfg->tx_l2_lane_mode);
+ DEV_DBG("PLL PARAM: l0_tx_drv_lvl = 0x%x\n", cfg->tx_l0_tx_drv_lvl);
+ DEV_DBG("PLL PARAM: l0_tx_emp_post1_lvl = 0x%x\n",
+ cfg->tx_l0_tx_emp_post1_lvl);
+ DEV_DBG("PLL PARAM: l1_tx_drv_lvl = 0x%x\n", cfg->tx_l1_tx_drv_lvl);
+ DEV_DBG("PLL PARAM: l1_tx_emp_post1_lvl = 0x%x\n",
+ cfg->tx_l1_tx_emp_post1_lvl);
+ DEV_DBG("PLL PARAM: l2_tx_drv_lvl = 0x%x\n", cfg->tx_l2_tx_drv_lvl);
+ DEV_DBG("PLL PARAM: l2_tx_emp_post1_lvl = 0x%x\n",
+ cfg->tx_l2_tx_emp_post1_lvl);
+ DEV_DBG("PLL PARAM: l3_tx_drv_lvl = 0x%x\n", cfg->tx_l3_tx_drv_lvl);
+ DEV_DBG("PLL PARAM: l3_tx_emp_post1_lvl = 0x%x\n",
+ cfg->tx_l3_tx_emp_post1_lvl);
+
+ DEV_DBG("PLL PARAM: l0_vmode_ctrl1 = 0x%x\n", cfg->tx_l0_vmode_ctrl1);
+ DEV_DBG("PLL PARAM: l0_vmode_ctrl2 = 0x%x\n", cfg->tx_l0_vmode_ctrl2);
+ DEV_DBG("PLL PARAM: l1_vmode_ctrl1 = 0x%x\n", cfg->tx_l1_vmode_ctrl1);
+ DEV_DBG("PLL PARAM: l1_vmode_ctrl2 = 0x%x\n", cfg->tx_l1_vmode_ctrl2);
+ DEV_DBG("PLL PARAM: l2_vmode_ctrl1 = 0x%x\n", cfg->tx_l2_vmode_ctrl1);
+ DEV_DBG("PLL PARAM: l2_vmode_ctrl2 = 0x%x\n", cfg->tx_l2_vmode_ctrl2);
+ DEV_DBG("PLL PARAM: l3_vmode_ctrl1 = 0x%x\n", cfg->tx_l3_vmode_ctrl1);
+ DEV_DBG("PLL PARAM: l3_vmode_ctrl2 = 0x%x\n", cfg->tx_l3_vmode_ctrl2);
+ rc = 0;
+fail:
+ return rc;
+}
+
+static int hdmi_8996_calculate(u32 pix_clk,
+ struct hdmi_8996_phy_pll_reg_cfg *cfg, u32 ver)
+{
+ switch (ver) {
+ case HDMI_VERSION_8996_V3:
+ case HDMI_VERSION_8996_V3_1_8:
+ return hdmi_8996_v3_calculate(pix_clk, cfg);
+ case HDMI_VERSION_8996_V2:
+ return hdmi_8996_v2_calculate(pix_clk, cfg);
+ default:
+ return hdmi_8996_v1_calculate(pix_clk, cfg);
+ }
+}
+
+static int hdmi_8996_phy_pll_set_clk_rate(struct clk *c, u32 tmds_clk, u32 ver)
+{
+ int rc = 0;
+ struct hdmi_pll_vco_clk *vco = to_hdmi_8996_vco_clk(c);
+ struct mdss_pll_resources *io = vco->priv;
+ struct hdmi_8996_phy_pll_reg_cfg cfg = {0};
+
+ rc = hdmi_8996_calculate(tmds_clk, &cfg, ver);
+ if (rc) {
+ DEV_ERR("%s: PLL calculation failed\n", __func__);
+ return rc;
+ }
+
+ /* Initially shut down PHY */
+ DEV_DBG("%s: Disabling PHY\n", __func__);
+ MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_PD_CTL, 0x0);
+ udelay(500);
+
+ /* Power up sequence */
+ switch (ver) {
+ case HDMI_VERSION_8996_V2:
+ case HDMI_VERSION_8996_V3:
+ case HDMI_VERSION_8996_V3_1_8:
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_BG_CTRL, 0x04);
+ break;
+ };
+
+ MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_PD_CTL, 0x1);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_RESETSM_CNTRL, 0x20);
+ MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_TX0_TX1_LANE_CTL, 0x0F);
+ MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_TX2_TX3_LANE_CTL, 0x0F);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+ QSERDES_TX_L0_CLKBUF_ENABLE, 0x03);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
+ QSERDES_TX_L0_CLKBUF_ENABLE, 0x03);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
+ QSERDES_TX_L0_CLKBUF_ENABLE, 0x03);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
+ QSERDES_TX_L0_CLKBUF_ENABLE, 0x03);
+
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+ QSERDES_TX_L0_LANE_MODE, cfg.tx_l0_lane_mode);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
+ QSERDES_TX_L0_LANE_MODE, cfg.tx_l2_lane_mode);
+
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+ QSERDES_TX_L0_TX_BAND, cfg.tx_l0_tx_band);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
+ QSERDES_TX_L0_TX_BAND, cfg.tx_l1_tx_band);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
+ QSERDES_TX_L0_TX_BAND, cfg.tx_l2_tx_band);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
+ QSERDES_TX_L0_TX_BAND, cfg.tx_l3_tx_band);
+
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+ QSERDES_TX_L0_RESET_TSYNC_EN, 0x03);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
+ QSERDES_TX_L0_RESET_TSYNC_EN, 0x03);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
+ QSERDES_TX_L0_RESET_TSYNC_EN, 0x03);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
+ QSERDES_TX_L0_RESET_TSYNC_EN, 0x03);
+
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_SYSCLK_BUF_ENABLE, 0x1E);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x07);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_SYSCLK_EN_SEL, 0x37);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_SYS_CLK_CTRL, 0x02);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_CLK_ENABLE1, 0x0E);
+ if (ver == HDMI_VERSION_8996_V1)
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_BG_CTRL, 0x06);
+
+ /* Bypass VCO calibration */
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_SVS_MODE_CLK_SEL,
+ cfg.com_svs_mode_clk_sel);
+
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_BG_TRIM, 0x0F);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_PLL_IVCO, 0x0F);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_VCO_TUNE_CTRL,
+ cfg.com_vco_tune_ctrl);
+
+ switch (ver) {
+ case HDMI_VERSION_8996_V1:
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_SVS_MODE_CLK_SEL,
+ cfg.com_svs_mode_clk_sel);
+ break;
+ default:
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_BG_CTRL, 0x06);
+ }
+
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_CLK_SELECT, 0x30);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_HSCLK_SEL,
+ cfg.com_hsclk_sel);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_LOCK_CMP_EN,
+ cfg.com_lock_cmp_en);
+
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_PLL_CCTRL_MODE0,
+ cfg.com_pll_cctrl_mode0);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_PLL_RCTRL_MODE0,
+ cfg.com_pll_rctrl_mode0);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_CP_CTRL_MODE0,
+ cfg.com_cp_ctrl_mode0);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_DEC_START_MODE0,
+ cfg.com_dec_start_mode0);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_DIV_FRAC_START1_MODE0,
+ cfg.com_div_frac_start1_mode0);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_DIV_FRAC_START2_MODE0,
+ cfg.com_div_frac_start2_mode0);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_DIV_FRAC_START3_MODE0,
+ cfg.com_div_frac_start3_mode0);
+
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_INTEGLOOP_GAIN0_MODE0,
+ cfg.com_integloop_gain0_mode0);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_INTEGLOOP_GAIN1_MODE0,
+ cfg.com_integloop_gain1_mode0);
+
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_LOCK_CMP1_MODE0,
+ cfg.com_lock_cmp1_mode0);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_LOCK_CMP2_MODE0,
+ cfg.com_lock_cmp2_mode0);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_LOCK_CMP3_MODE0,
+ cfg.com_lock_cmp3_mode0);
+
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_VCO_TUNE_MAP, 0x00);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_CORE_CLK_EN,
+ cfg.com_core_clk_en);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_CORECLK_DIV,
+ cfg.com_coreclk_div);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_CMN_CONFIG, 0x02);
+
+ if (ver == HDMI_VERSION_8996_V3 || ver == HDMI_VERSION_8996_V3_1_8)
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_RESCODE_DIV_NUM, 0x15);
+
+ /* TX lanes setup (TX 0/1/2/3) */
+ if (ver == HDMI_VERSION_8996_V3_1_8) {
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+ QSERDES_TX_L0_TX_DRV_LVL,
+ 0x00000023);
+ } else {
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+ QSERDES_TX_L0_TX_DRV_LVL,
+ cfg.tx_l0_tx_drv_lvl);
+ }
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+ QSERDES_TX_L0_TX_EMP_POST1_LVL,
+ cfg.tx_l0_tx_emp_post1_lvl);
+
+ if (ver == HDMI_VERSION_8996_V3_1_8) {
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
+ QSERDES_TX_L0_TX_DRV_LVL,
+ 0x00000023);
+ } else {
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
+ QSERDES_TX_L0_TX_DRV_LVL,
+ cfg.tx_l1_tx_drv_lvl);
+ }
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
+ QSERDES_TX_L0_TX_EMP_POST1_LVL,
+ cfg.tx_l1_tx_emp_post1_lvl);
+
+ if (ver == HDMI_VERSION_8996_V3_1_8) {
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
+ QSERDES_TX_L0_TX_DRV_LVL,
+ 0x00000023);
+ } else {
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
+ QSERDES_TX_L0_TX_DRV_LVL,
+ cfg.tx_l2_tx_drv_lvl);
+ }
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
+ QSERDES_TX_L0_TX_EMP_POST1_LVL,
+ cfg.tx_l2_tx_emp_post1_lvl);
+
+ if (ver == HDMI_VERSION_8996_V3_1_8) {
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
+ QSERDES_TX_L0_TX_DRV_LVL,
+ 0x00000020);
+ } else {
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
+ QSERDES_TX_L0_TX_DRV_LVL,
+ cfg.tx_l3_tx_drv_lvl);
+ }
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
+ QSERDES_TX_L0_TX_EMP_POST1_LVL,
+ cfg.tx_l3_tx_emp_post1_lvl);
+
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+ QSERDES_TX_L0_VMODE_CTRL1,
+ cfg.tx_l0_vmode_ctrl1);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+ QSERDES_TX_L0_VMODE_CTRL2,
+ cfg.tx_l0_vmode_ctrl2);
+
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
+ QSERDES_TX_L0_VMODE_CTRL1,
+ cfg.tx_l1_vmode_ctrl1);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
+ QSERDES_TX_L0_VMODE_CTRL2,
+ cfg.tx_l1_vmode_ctrl2);
+
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
+ QSERDES_TX_L0_VMODE_CTRL1,
+ cfg.tx_l2_vmode_ctrl1);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
+ QSERDES_TX_L0_VMODE_CTRL2,
+ cfg.tx_l2_vmode_ctrl2);
+
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
+ QSERDES_TX_L0_VMODE_CTRL1,
+ cfg.tx_l3_vmode_ctrl1);
+ if (ver == HDMI_VERSION_8996_V3_1_8) {
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
+ QSERDES_TX_L0_VMODE_CTRL2,
+ 0x0000000D);
+ } else {
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
+ QSERDES_TX_L0_VMODE_CTRL2,
+ cfg.tx_l3_vmode_ctrl2);
+ }
+
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+ QSERDES_TX_L0_TX_DRV_LVL_OFFSET, 0x00);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
+ QSERDES_TX_L0_TX_DRV_LVL_OFFSET, 0x00);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
+ QSERDES_TX_L0_TX_DRV_LVL_OFFSET, 0x00);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
+ QSERDES_TX_L0_TX_DRV_LVL_OFFSET, 0x00);
+
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+ QSERDES_TX_L0_RES_CODE_LANE_OFFSET, 0x00);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
+ QSERDES_TX_L0_RES_CODE_LANE_OFFSET, 0x00);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
+ QSERDES_TX_L0_RES_CODE_LANE_OFFSET, 0x00);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
+ QSERDES_TX_L0_RES_CODE_LANE_OFFSET, 0x00);
+
+ if (ver < HDMI_VERSION_8996_V3) {
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+ QSERDES_TX_L0_RES_CODE_LANE_TX,
+ cfg.tx_l0_res_code_lane_tx);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
+ QSERDES_TX_L0_RES_CODE_LANE_TX,
+ cfg.tx_l1_res_code_lane_tx);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
+ QSERDES_TX_L0_RES_CODE_LANE_TX,
+ cfg.tx_l2_res_code_lane_tx);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
+ QSERDES_TX_L0_RES_CODE_LANE_TX,
+ cfg.tx_l3_res_code_lane_tx);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_RESTRIM_CTRL,
+ cfg.com_restrim_ctrl);
+
+ MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_TXCAL_CFG0, 0x00);
+ MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_TXCAL_CFG1, 0x05);
+ }
+
+ MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_MODE, cfg.phy_mode);
+ MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_PD_CTL, 0x1F);
+
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+ QSERDES_TX_L0_TRAN_DRVR_EMP_EN, 0x03);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
+ QSERDES_TX_L0_TRAN_DRVR_EMP_EN, 0x03);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
+ QSERDES_TX_L0_TRAN_DRVR_EMP_EN, 0x03);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
+ QSERDES_TX_L0_TRAN_DRVR_EMP_EN, 0x03);
+
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+ QSERDES_TX_L0_PARRATE_REC_DETECT_IDLE_EN, 0x40);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
+ QSERDES_TX_L0_PARRATE_REC_DETECT_IDLE_EN, 0x40);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
+ QSERDES_TX_L0_PARRATE_REC_DETECT_IDLE_EN, 0x40);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
+ QSERDES_TX_L0_PARRATE_REC_DETECT_IDLE_EN, 0x40);
+
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+ QSERDES_TX_L0_HP_PD_ENABLES, 0x0C);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
+ QSERDES_TX_L0_HP_PD_ENABLES, 0x0C);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
+ QSERDES_TX_L0_HP_PD_ENABLES, 0x0C);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
+ QSERDES_TX_L0_HP_PD_ENABLES, 0x03);
+
+ if (ver == HDMI_VERSION_8996_V2) {
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_ATB_SEL1, 0x01);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_ATB_SEL2, 0x01);
+ }
+ /*
+ * Ensure that vco configuration gets flushed to hardware before
+ * enabling the PLL
+ */
+ wmb();
+ return 0;
+}
+
+static int hdmi_8996_phy_ready_status(struct mdss_pll_resources *io)
+{
+ u32 status = 0;
+ int phy_ready = 0;
+ int rc;
+ u32 read_count = 0;
+
+ rc = mdss_pll_resource_enable(io, true);
+ if (rc) {
+ DEV_ERR("%s: pll resource can't be enabled\n", __func__);
+ return rc;
+ }
+
+ DEV_DBG("%s: Waiting for PHY Ready\n", __func__);
+
+ /* Poll for PHY read status */
+ while (read_count < HDMI_PLL_POLL_MAX_READS) {
+ status = MDSS_PLL_REG_R(io->phy_base, HDMI_PHY_STATUS);
+ if ((status & BIT(0)) == 1) {
+ phy_ready = 1;
+ DEV_DBG("%s: PHY READY\n", __func__);
+ break;
+ }
+ udelay(HDMI_PLL_POLL_TIMEOUT_US);
+ read_count++;
+ }
+
+ if (read_count == HDMI_PLL_POLL_MAX_READS) {
+ phy_ready = 0;
+ DEV_DBG("%s: PHY READY TIMEOUT\n", __func__);
+ }
+
+ mdss_pll_resource_enable(io, false);
+
+ return phy_ready;
+}
+
+static int hdmi_8996_pll_lock_status(struct mdss_pll_resources *io)
+{
+ u32 status;
+ int pll_locked = 0;
+ int rc;
+ u32 read_count = 0;
+
+ rc = mdss_pll_resource_enable(io, true);
+ if (rc) {
+ DEV_ERR("%s: pll resource can't be enabled\n", __func__);
+ return rc;
+ }
+
+ DEV_DBG("%s: Waiting for PLL lock\n", __func__);
+
+ while (read_count < HDMI_PLL_POLL_MAX_READS) {
+ status = MDSS_PLL_REG_R(io->pll_base,
+ QSERDES_COM_C_READY_STATUS);
+ if ((status & BIT(0)) == 1) {
+ pll_locked = 1;
+ DEV_DBG("%s: C READY\n", __func__);
+ break;
+ }
+ udelay(HDMI_PLL_POLL_TIMEOUT_US);
+ read_count++;
+ }
+
+ if (read_count == HDMI_PLL_POLL_MAX_READS) {
+ pll_locked = 0;
+ DEV_DBG("%s: C READY TIMEOUT\n", __func__);
+ }
+
+ mdss_pll_resource_enable(io, false);
+
+ return pll_locked;
+}
+
+static int hdmi_8996_v1_perform_sw_calibration(struct clk *c)
+{
+ int rc = 0;
+ struct hdmi_pll_vco_clk *vco = to_hdmi_8996_vco_clk(c);
+ struct mdss_pll_resources *io = vco->priv;
+
+ u32 max_code = 0x190;
+ u32 min_code = 0x0;
+ u32 max_cnt = 0;
+ u32 min_cnt = 0;
+ u32 expected_counter_value = 0;
+ u32 step = 0;
+ u32 dbus_all = 0;
+ u32 dbus_sel = 0;
+ u32 vco_code = 0;
+ u32 val = 0;
+
+ vco_code = 0xC8;
+
+ DEV_DBG("%s: Starting SW calibration with vco_code = %d\n", __func__,
+ vco_code);
+
+ expected_counter_value =
+ (MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_LOCK_CMP3_MODE0) << 16) |
+ (MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_LOCK_CMP2_MODE0) << 8) |
+ (MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_LOCK_CMP1_MODE0));
+
+ DEV_DBG("%s: expected_counter_value = %d\n", __func__,
+ expected_counter_value);
+
+ val = MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_CMN_MISC1);
+ val |= BIT(4);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_CMN_MISC1, val);
+
+ val = MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_CMN_MISC1);
+ val |= BIT(3);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_CMN_MISC1, val);
+
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_DEBUG_BUS_SEL, 0x4);
+
+ val = MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_LOCK_CMP_CFG);
+ val |= BIT(1);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_LOCK_CMP_CFG, val);
+
+ udelay(60);
+
+ while (1) {
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_VCO_TUNE1_MODE0,
+ vco_code & 0xFF);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_VCO_TUNE2_MODE0,
+ (vco_code >> 8) & 0x3);
+
+ udelay(20);
+
+ val = MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_LOCK_CMP_CFG);
+ val &= ~BIT(1);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_LOCK_CMP_CFG, val);
+
+ udelay(60);
+
+ val = MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_LOCK_CMP_CFG);
+ val |= BIT(1);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_LOCK_CMP_CFG, val);
+
+ udelay(60);
+
+ dbus_all =
+ (MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_DEBUG_BUS3) << 24) |
+ (MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_DEBUG_BUS2) << 16) |
+ (MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_DEBUG_BUS1) << 8) |
+ (MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_DEBUG_BUS0));
+
+ dbus_sel = (dbus_all >> 9) & 0x3FFFF;
+ DEV_DBG("%s: loop[%d], dbus_all = 0x%x, dbus_sel = 0x%x\n",
+ __func__, step, dbus_all, dbus_sel);
+ if (dbus_sel == 0)
+ DEV_ERR("%s: CHECK HDMI REF CLK\n", __func__);
+
+ if (dbus_sel == expected_counter_value) {
+ max_code = vco_code;
+ max_cnt = dbus_sel;
+ min_code = vco_code;
+ min_cnt = dbus_sel;
+ } else if (dbus_sel == 0) {
+ max_code = vco_code;
+ max_cnt = dbus_sel;
+ vco_code = (max_code + min_code)/2;
+ } else if (dbus_sel > expected_counter_value) {
+ min_code = vco_code;
+ min_cnt = dbus_sel;
+ vco_code = (max_code + min_code)/2;
+ } else if (dbus_sel < expected_counter_value) {
+ max_code = vco_code;
+ max_cnt = dbus_sel;
+ vco_code = (max_code + min_code)/2;
+ }
+
+ step++;
+
+ if ((vco_code == 0) || (vco_code == 0x3FF) || (step > 0x3FF)) {
+ DEV_ERR("%s: VCO tune code search failed\n", __func__);
+ rc = -ENOTSUPP;
+ break;
+ }
+ if ((max_code - min_code) <= 1) {
+ if ((max_code - min_code) == 1) {
+ if (abs((int)(max_cnt - expected_counter_value))
+ < abs((int)(min_cnt - expected_counter_value
+ ))) {
+ vco_code = max_code;
+ } else {
+ vco_code = min_code;
+ }
+ }
+ break;
+ }
+ DEV_DBG("%s: loop[%d], new vco_code = %d\n", __func__, step,
+ vco_code);
+ }
+
+ DEV_DBG("%s: CALIB done. vco_code = %d\n", __func__, vco_code);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_VCO_TUNE1_MODE0,
+ vco_code & 0xFF);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_VCO_TUNE2_MODE0,
+ (vco_code >> 8) & 0x3);
+ val = MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_LOCK_CMP_CFG);
+ val &= ~BIT(1);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_LOCK_CMP_CFG, val);
+
+ val = MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_CMN_MISC1);
+ val |= BIT(4);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_CMN_MISC1, val);
+
+ val = MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_CMN_MISC1);
+ val &= ~BIT(3);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_CMN_MISC1, val);
+
+ return rc;
+}
+
+static int hdmi_8996_v2_perform_sw_calibration(struct clk *c)
+{
+ int rc = 0;
+ struct hdmi_pll_vco_clk *vco = to_hdmi_8996_vco_clk(c);
+ struct mdss_pll_resources *io = vco->priv;
+ u32 vco_code1, vco_code2, integral_loop, ready_poll;
+ u32 read_count = 0;
+
+ while (read_count < (HDMI_PLL_POLL_MAX_READS << 1)) {
+ ready_poll = MDSS_PLL_REG_R(io->pll_base,
+ QSERDES_COM_C_READY_STATUS);
+ if ((ready_poll & BIT(0)) == 1) {
+ ready_poll = 1;
+ DEV_DBG("%s: C READY\n", __func__);
+ break;
+ }
+ udelay(HDMI_PLL_POLL_TIMEOUT_US);
+ read_count++;
+ }
+
+ if (read_count == (HDMI_PLL_POLL_MAX_READS << 1)) {
+ ready_poll = 0;
+ DEV_DBG("%s: C READY TIMEOUT, TRYING SW CALIBRATION\n",
+ __func__);
+ }
+
+ vco_code1 = MDSS_PLL_REG_R(io->pll_base,
+ QSERDES_COM_PLLCAL_CODE1_STATUS);
+ vco_code2 = MDSS_PLL_REG_R(io->pll_base,
+ QSERDES_COM_PLLCAL_CODE2_STATUS);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_DEBUG_BUS_SEL, 0x5);
+ integral_loop = MDSS_PLL_REG_R(io->pll_base,
+ QSERDES_COM_DEBUG_BUS0);
+
+ if (((ready_poll & 0x1) == 0) || (((ready_poll & 1) == 1) &&
+ (vco_code1 == 0xFF) && ((vco_code2 & 0x3) == 0x1) &&
+ (integral_loop > 0xC0))) {
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_ATB_SEL1, 0x04);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_ATB_SEL2, 0x00);
+ MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_CFG, 0x17);
+ udelay(100);
+
+ MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_CFG, 0x11);
+ udelay(100);
+
+ MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_CFG, 0x19);
+ }
+ return rc;
+}
+
+static int hdmi_8996_perform_sw_calibration(struct clk *c, u32 ver)
+{
+ switch (ver) {
+ case HDMI_VERSION_8996_V1:
+ return hdmi_8996_v1_perform_sw_calibration(c);
+ case HDMI_VERSION_8996_V2:
+ return hdmi_8996_v2_perform_sw_calibration(c);
+ }
+ return 0;
+}
+
+static int hdmi_8996_vco_enable(struct clk *c, u32 ver)
+{
+ int rc = 0;
+ struct hdmi_pll_vco_clk *vco = to_hdmi_8996_vco_clk(c);
+ struct mdss_pll_resources *io = vco->priv;
+
+ MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_CFG, 0x1);
+ udelay(100);
+
+ MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_CFG, 0x19);
+ udelay(100);
+
+ rc = hdmi_8996_perform_sw_calibration(c, ver);
+ if (rc) {
+ DEV_ERR("%s: software calibration failed\n", __func__);
+ return rc;
+ }
+
+ rc = hdmi_8996_pll_lock_status(io);
+ if (!rc) {
+ DEV_ERR("%s: PLL not locked\n", __func__);
+ return rc;
+ }
+
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+ QSERDES_TX_L0_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN,
+ 0x6F);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
+ QSERDES_TX_L0_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN,
+ 0x6F);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
+ QSERDES_TX_L0_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN,
+ 0x6F);
+ MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
+ QSERDES_TX_L0_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN,
+ 0x6F);
+
+ /* Disable SSC */
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_SSC_PER1, 0x0);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_SSC_PER2, 0x0);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_SSC_STEP_SIZE1, 0x0);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_SSC_STEP_SIZE2, 0x0);
+ MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_SSC_EN_CENTER, 0x2);
+
+ rc = hdmi_8996_phy_ready_status(io);
+ if (!rc) {
+ DEV_ERR("%s: PHY not READY\n", __func__);
+ return rc;
+ }
+
+ /* Restart the retiming buffer */
+ MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_CFG, 0x18);
+ udelay(1);
+ MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_CFG, 0x19);
+
+ io->pll_on = true;
+ return 0;
+}
+
+static int hdmi_8996_v1_vco_enable(struct clk *c)
+{
+ return hdmi_8996_vco_enable(c, HDMI_VERSION_8996_V1);
+}
+
+static int hdmi_8996_v2_vco_enable(struct clk *c)
+{
+ return hdmi_8996_vco_enable(c, HDMI_VERSION_8996_V2);
+}
+
+static int hdmi_8996_v3_vco_enable(struct clk *c)
+{
+ return hdmi_8996_vco_enable(c, HDMI_VERSION_8996_V3);
+}
+
+static int hdmi_8996_v3_1p8_vco_enable(struct clk *c)
+{
+ return hdmi_8996_vco_enable(c, HDMI_VERSION_8996_V3_1_8);
+}
+
+static int hdmi_8996_vco_get_lock_range(struct clk *c, unsigned long pixel_clk)
+{
+ u32 rng = 64, cmp_cnt = 1024;
+ u32 coreclk_div = 5, clks_pll_divsel = 2;
+ u32 vco_freq, vco_ratio, ppm_range;
+ u64 bclk;
+ struct hdmi_8996_v3_post_divider pd;
+
+ bclk = ((u64)pixel_clk) * HDMI_BIT_CLK_TO_PIX_CLK_RATIO;
+
+ DEV_DBG("%s: rate=%ld\n", __func__, pixel_clk);
+
+ if (hdmi_8996_v3_get_post_div(&pd, bclk) ||
+ pd.vco_ratio <= 0 || pd.vco_freq <= 0) {
+ DEV_ERR("%s: couldn't get post div\n", __func__);
+ return -EINVAL;
+ }
+
+ do_div(pd.vco_freq, HDMI_KHZ_TO_HZ * HDMI_KHZ_TO_HZ);
+
+ vco_freq = (u32) pd.vco_freq;
+ vco_ratio = (u32) pd.vco_ratio;
+
+ DEV_DBG("%s: freq %d, ratio %d\n", __func__,
+ vco_freq, vco_ratio);
+
+ ppm_range = (rng * HDMI_REF_CLOCK) / cmp_cnt;
+ ppm_range /= vco_freq / vco_ratio;
+ ppm_range *= coreclk_div * clks_pll_divsel;
+
+ DEV_DBG("%s: ppm range: %d\n", __func__, ppm_range);
+
+ return ppm_range;
+}
+
+static int hdmi_8996_vco_rate_atomic_update(struct clk *c,
+ unsigned long rate, u32 ver)
+{
+ struct hdmi_pll_vco_clk *vco = to_hdmi_8996_vco_clk(c);
+ struct mdss_pll_resources *io = vco->priv;
+ void __iomem *pll;
+ struct hdmi_8996_phy_pll_reg_cfg cfg = {0};
+ int rc = 0;
+
+ rc = hdmi_8996_calculate(rate, &cfg, ver);
+ if (rc) {
+ DEV_ERR("%s: PLL calculation failed\n", __func__);
+ goto end;
+ }
+
+ pll = io->pll_base;
+
+ MDSS_PLL_REG_W(pll, QSERDES_COM_DEC_START_MODE0,
+ cfg.com_dec_start_mode0);
+ MDSS_PLL_REG_W(pll, QSERDES_COM_DIV_FRAC_START1_MODE0,
+ cfg.com_div_frac_start1_mode0);
+ MDSS_PLL_REG_W(pll, QSERDES_COM_DIV_FRAC_START2_MODE0,
+ cfg.com_div_frac_start2_mode0);
+ MDSS_PLL_REG_W(pll, QSERDES_COM_DIV_FRAC_START3_MODE0,
+ cfg.com_div_frac_start3_mode0);
+
+ MDSS_PLL_REG_W(pll, QSERDES_COM_FREQ_UPDATE, 0x01);
+ MDSS_PLL_REG_W(pll, QSERDES_COM_FREQ_UPDATE, 0x00);
+
+ DEV_DBG("%s: updated to rate %ld\n", __func__, rate);
+end:
+ return rc;
+}
+
+static int hdmi_8996_vco_set_rate(struct clk *c, unsigned long rate, u32 ver)
+{
+ struct hdmi_pll_vco_clk *vco = to_hdmi_8996_vco_clk(c);
+ struct mdss_pll_resources *io = vco->priv;
+ unsigned int set_power_dwn = 0;
+ bool atomic_update = false;
+ int rc, pll_lock_range;
+
+ rc = mdss_pll_resource_enable(io, true);
+ if (rc) {
+ DEV_ERR("pll resource can't be enabled\n");
+ return rc;
+ }
+
+ DEV_DBG("%s: rate %ld\n", __func__, rate);
+
+ if (MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_C_READY_STATUS) & BIT(0) &&
+ MDSS_PLL_REG_R(io->phy_base, HDMI_PHY_STATUS) & BIT(0)) {
+ pll_lock_range = hdmi_8996_vco_get_lock_range(c, vco->rate);
+
+ if (pll_lock_range > 0 && vco->rate) {
+ u32 range_limit;
+
+ range_limit = vco->rate *
+ (pll_lock_range / HDMI_KHZ_TO_HZ);
+ range_limit /= HDMI_KHZ_TO_HZ;
+
+ DEV_DBG("%s: range limit %d\n", __func__, range_limit);
+
+ if (abs(rate - vco->rate) < range_limit)
+ atomic_update = true;
+ }
+ }
+
+ if (io->pll_on && !atomic_update)
+ set_power_dwn = 1;
+
+ if (atomic_update) {
+ hdmi_8996_vco_rate_atomic_update(c, rate, ver);
+ } else {
+ rc = hdmi_8996_phy_pll_set_clk_rate(c, rate, ver);
+ if (rc)
+ DEV_ERR("%s: Failed to set clk rate\n", __func__);
+ }
+
+ mdss_pll_resource_enable(io, false);
+
+ if (set_power_dwn)
+ hdmi_8996_vco_enable(c, ver);
+
+ vco->rate = rate;
+ vco->rate_set = true;
+
+ return 0;
+}
+
+static int hdmi_8996_v1_vco_set_rate(struct clk *c, unsigned long rate)
+{
+ return hdmi_8996_vco_set_rate(c, rate, HDMI_VERSION_8996_V1);
+}
+
+static int hdmi_8996_v2_vco_set_rate(struct clk *c, unsigned long rate)
+{
+ return hdmi_8996_vco_set_rate(c, rate, HDMI_VERSION_8996_V2);
+}
+
+static int hdmi_8996_v3_vco_set_rate(struct clk *c, unsigned long rate)
+{
+ return hdmi_8996_vco_set_rate(c, rate, HDMI_VERSION_8996_V3);
+}
+
+static int hdmi_8996_v3_1p8_vco_set_rate(struct clk *c, unsigned long rate)
+{
+ return hdmi_8996_vco_set_rate(c, rate, HDMI_VERSION_8996_V3_1_8);
+}
+
+static unsigned long hdmi_get_hsclk_sel_divisor(unsigned long hsclk_sel)
+{
+ unsigned long divisor;
+
+ switch (hsclk_sel) {
+ case 0:
+ divisor = 2;
+ break;
+ case 1:
+ divisor = 6;
+ break;
+ case 2:
+ divisor = 10;
+ break;
+ case 3:
+ divisor = 14;
+ break;
+ case 4:
+ divisor = 3;
+ break;
+ case 5:
+ divisor = 9;
+ break;
+ case 6:
+ case 13:
+ divisor = 15;
+ break;
+ case 7:
+ divisor = 21;
+ break;
+ case 8:
+ divisor = 4;
+ break;
+ case 9:
+ divisor = 12;
+ break;
+ case 10:
+ divisor = 20;
+ break;
+ case 11:
+ divisor = 28;
+ break;
+ case 12:
+ divisor = 5;
+ break;
+ case 14:
+ divisor = 25;
+ break;
+ case 15:
+ divisor = 35;
+ break;
+ default:
+ divisor = 1;
+ DEV_ERR("%s: invalid hsclk_sel value = %lu",
+ __func__, hsclk_sel);
+ break;
+ }
+
+ return divisor;
+}
+
+static unsigned long hdmi_8996_vco_get_rate(struct clk *c)
+{
+ unsigned long freq = 0, hsclk_sel = 0, tx_band = 0, dec_start = 0,
+ div_frac_start = 0, vco_clock_freq = 0;
+ struct hdmi_pll_vco_clk *vco = to_hdmi_8996_vco_clk(c);
+ struct mdss_pll_resources *io = vco->priv;
+
+ if (mdss_pll_resource_enable(io, true)) {
+ DEV_ERR("%s: pll resource can't be enabled\n", __func__);
+ return freq;
+ }
+
+ dec_start = MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_DEC_START_MODE0);
+
+ div_frac_start =
+ MDSS_PLL_REG_R(io->pll_base,
+ QSERDES_COM_DIV_FRAC_START1_MODE0) |
+ MDSS_PLL_REG_R(io->pll_base,
+ QSERDES_COM_DIV_FRAC_START2_MODE0) << 8 |
+ MDSS_PLL_REG_R(io->pll_base,
+ QSERDES_COM_DIV_FRAC_START3_MODE0) << 16;
+
+ vco_clock_freq = (dec_start + (div_frac_start / (1 << 20)))
+ * 4 * (HDMI_REF_CLOCK);
+
+ hsclk_sel = MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_HSCLK_SEL) & 0x15;
+ hsclk_sel = hdmi_get_hsclk_sel_divisor(hsclk_sel);
+ tx_band = MDSS_PLL_REG_R(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+ QSERDES_TX_L0_TX_BAND) & 0x3;
+
+ freq = vco_clock_freq / (10 * hsclk_sel * (1 << tx_band));
+
+ mdss_pll_resource_enable(io, false);
+
+ DEV_DBG("%s: freq = %lu\n", __func__, freq);
+
+ return freq;
+}
+
+static long hdmi_8996_vco_round_rate(struct clk *c, unsigned long rate)
+{
+ unsigned long rrate = rate;
+
+ DEV_DBG("rrate=%ld\n", rrate);
+
+ return rrate;
+}
+
+static int hdmi_8996_vco_prepare(struct clk *c, u32 ver)
+{
+ struct hdmi_pll_vco_clk *vco = to_hdmi_8996_vco_clk(c);
+ struct mdss_pll_resources *io = vco->priv;
+ int ret = 0;
+
+ DEV_DBG("rate=%ld\n", vco->rate);
+
+ if (!vco->rate_set && vco->rate)
+ ret = hdmi_8996_vco_set_rate(c, vco->rate, ver);
+
+ if (!ret) {
+ ret = mdss_pll_resource_enable(io, true);
+ if (ret)
+ DEV_ERR("pll resource can't be enabled\n");
+ }
+
+ return ret;
+}
+
+static int hdmi_8996_v1_vco_prepare(struct clk *c)
+{
+ return hdmi_8996_vco_prepare(c, HDMI_VERSION_8996_V1);
+}
+
+static int hdmi_8996_v2_vco_prepare(struct clk *c)
+{
+ return hdmi_8996_vco_prepare(c, HDMI_VERSION_8996_V2);
+}
+
+static int hdmi_8996_v3_vco_prepare(struct clk *c)
+{
+ return hdmi_8996_vco_prepare(c, HDMI_VERSION_8996_V3);
+}
+
+static int hdmi_8996_v3_1p8_vco_prepare(struct clk *c)
+{
+ return hdmi_8996_vco_prepare(c, HDMI_VERSION_8996_V3_1_8);
+}
+
+static void hdmi_8996_vco_unprepare(struct clk *c)
+{
+ struct hdmi_pll_vco_clk *vco = to_hdmi_8996_vco_clk(c);
+ struct mdss_pll_resources *io = vco->priv;
+
+ vco->rate_set = false;
+
+ if (!io) {
+ DEV_ERR("Invalid input parameter\n");
+ return;
+ }
+
+ if (!io->pll_on &&
+ mdss_pll_resource_enable(io, true)) {
+ DEV_ERR("pll resource can't be enabled\n");
+ return;
+ }
+
+ io->handoff_resources = false;
+ mdss_pll_resource_enable(io, false);
+ io->pll_on = false;
+}
+
+static enum handoff hdmi_8996_vco_handoff(struct clk *c)
+{
+ enum handoff ret = HANDOFF_DISABLED_CLK;
+ struct hdmi_pll_vco_clk *vco = to_hdmi_8996_vco_clk(c);
+ struct mdss_pll_resources *io = vco->priv;
+
+ if (is_gdsc_disabled(io))
+ return HANDOFF_DISABLED_CLK;
+
+ if (mdss_pll_resource_enable(io, true)) {
+ DEV_ERR("pll resource can't be enabled\n");
+ return ret;
+ }
+
+ io->handoff_resources = true;
+
+ if (MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_C_READY_STATUS) & BIT(0)) {
+ if (MDSS_PLL_REG_R(io->phy_base, HDMI_PHY_STATUS) & BIT(0)) {
+ io->pll_on = true;
+ c->rate = hdmi_8996_vco_get_rate(c);
+ vco->rate = c->rate;
+ ret = HANDOFF_ENABLED_CLK;
+ } else {
+ io->handoff_resources = false;
+ mdss_pll_resource_enable(io, false);
+ DEV_DBG("%s: PHY not ready\n", __func__);
+ }
+ } else {
+ io->handoff_resources = false;
+ mdss_pll_resource_enable(io, false);
+ DEV_DBG("%s: PLL not locked\n", __func__);
+ }
+
+ DEV_DBG("done, ret=%d\n", ret);
+ return ret;
+}
+
+static struct clk_ops hdmi_8996_v1_vco_clk_ops = {
+ .enable = hdmi_8996_v1_vco_enable,
+ .set_rate = hdmi_8996_v1_vco_set_rate,
+ .get_rate = hdmi_8996_vco_get_rate,
+ .round_rate = hdmi_8996_vco_round_rate,
+ .prepare = hdmi_8996_v1_vco_prepare,
+ .unprepare = hdmi_8996_vco_unprepare,
+ .handoff = hdmi_8996_vco_handoff,
+};
+
+static struct clk_ops hdmi_8996_v2_vco_clk_ops = {
+ .enable = hdmi_8996_v2_vco_enable,
+ .set_rate = hdmi_8996_v2_vco_set_rate,
+ .get_rate = hdmi_8996_vco_get_rate,
+ .round_rate = hdmi_8996_vco_round_rate,
+ .prepare = hdmi_8996_v2_vco_prepare,
+ .unprepare = hdmi_8996_vco_unprepare,
+ .handoff = hdmi_8996_vco_handoff,
+};
+
+static struct clk_ops hdmi_8996_v3_vco_clk_ops = {
+ .enable = hdmi_8996_v3_vco_enable,
+ .set_rate = hdmi_8996_v3_vco_set_rate,
+ .get_rate = hdmi_8996_vco_get_rate,
+ .round_rate = hdmi_8996_vco_round_rate,
+ .prepare = hdmi_8996_v3_vco_prepare,
+ .unprepare = hdmi_8996_vco_unprepare,
+ .handoff = hdmi_8996_vco_handoff,
+};
+
+static struct clk_ops hdmi_8996_v3_1p8_vco_clk_ops = {
+ .enable = hdmi_8996_v3_1p8_vco_enable,
+ .set_rate = hdmi_8996_v3_1p8_vco_set_rate,
+ .get_rate = hdmi_8996_vco_get_rate,
+ .round_rate = hdmi_8996_vco_round_rate,
+ .prepare = hdmi_8996_v3_1p8_vco_prepare,
+ .unprepare = hdmi_8996_vco_unprepare,
+ .handoff = hdmi_8996_vco_handoff,
+};
+
+
+static struct hdmi_pll_vco_clk hdmi_vco_clk = {
+ .c = {
+ .dbg_name = "hdmi_8996_vco_clk",
+ .ops = &hdmi_8996_v1_vco_clk_ops,
+ CLK_INIT(hdmi_vco_clk.c),
+ },
+};
+
+static struct clk_lookup hdmipllcc_8996[] = {
+ CLK_LIST(hdmi_vco_clk),
+};
+
+int hdmi_8996_pll_clock_register(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res, u32 ver)
+{
+ int rc = -ENOTSUPP;
+ if (!pll_res || !pll_res->phy_base || !pll_res->pll_base) {
+ DEV_ERR("%s: Invalid input parameters\n", __func__);
+ return -EPROBE_DEFER;
+ }
+
+ /* Set client data for vco, mux and div clocks */
+ hdmi_vco_clk.priv = pll_res;
+
+ switch (ver) {
+ case HDMI_VERSION_8996_V2:
+ hdmi_vco_clk.c.ops = &hdmi_8996_v2_vco_clk_ops;
+ break;
+ case HDMI_VERSION_8996_V3:
+ hdmi_vco_clk.c.ops = &hdmi_8996_v3_vco_clk_ops;
+ break;
+ case HDMI_VERSION_8996_V3_1_8:
+ hdmi_vco_clk.c.ops = &hdmi_8996_v3_1p8_vco_clk_ops;
+ break;
+ default:
+ hdmi_vco_clk.c.ops = &hdmi_8996_v1_vco_clk_ops;
+ break;
+ };
+
+ rc = of_msm_clock_register(pdev->dev.of_node, hdmipllcc_8996,
+ ARRAY_SIZE(hdmipllcc_8996));
+ if (rc) {
+ DEV_ERR("%s: Clock register failed rc=%d\n", __func__, rc);
+ rc = -EPROBE_DEFER;
+ } else {
+ DEV_DBG("%s SUCCESS\n", __func__);
+ }
+
+ return rc;
+}
+
+int hdmi_8996_v1_pll_clock_register(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res)
+{
+ return hdmi_8996_pll_clock_register(pdev, pll_res,
+ HDMI_VERSION_8996_V1);
+}
+
+int hdmi_8996_v2_pll_clock_register(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res)
+{
+ return hdmi_8996_pll_clock_register(pdev, pll_res,
+ HDMI_VERSION_8996_V2);
+}
+
+int hdmi_8996_v3_pll_clock_register(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res)
+{
+ return hdmi_8996_pll_clock_register(pdev, pll_res,
+ HDMI_VERSION_8996_V3);
+}
+
+int hdmi_8996_v3_1p8_pll_clock_register(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res)
+{
+ return hdmi_8996_pll_clock_register(pdev, pll_res,
+ HDMI_VERSION_8996_V3_1_8);
+}
diff --git a/drivers/clk/msm/mdss/mdss-hdmi-pll-cobalt.c b/drivers/clk/msm/mdss/mdss-hdmi-pll-cobalt.c
new file mode 100644
index 000000000000..eac501e28d7b
--- /dev/null
+++ b/drivers/clk/msm/mdss/mdss-hdmi-pll-cobalt.c
@@ -0,0 +1,841 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/iopoll.h>
+#include <linux/clk/msm-clk-provider.h>
+#include <linux/clk/msm-clk.h>
+#include <linux/clk/msm-clock-generic.h>
+#include <dt-bindings/clock/msm-clocks-cobalt.h>
+
+#include "mdss-pll.h"
+#include "mdss-hdmi-pll.h"
+
+#define _W(x, y, z) MDSS_PLL_REG_W(x, y, z)
+#define _R(x, y) MDSS_PLL_REG_R(x, y)
+
+/* PLL REGISTERS */
+#define BIAS_EN_CLKBUFLR_EN (0x034)
+#define CLK_ENABLE1 (0x038)
+#define SYS_CLK_CTRL (0x03C)
+#define SYSCLK_BUF_ENABLE (0x040)
+#define PLL_IVCO (0x048)
+#define CP_CTRL_MODE0 (0x060)
+#define PLL_RCTRL_MODE0 (0x068)
+#define PLL_CCTRL_MODE0 (0x070)
+#define SYSCLK_EN_SEL (0x080)
+#define RESETSM_CNTRL (0x088)
+#define LOCK_CMP_EN (0x090)
+#define LOCK_CMP1_MODE0 (0x098)
+#define LOCK_CMP2_MODE0 (0x09C)
+#define LOCK_CMP3_MODE0 (0x0A0)
+#define DEC_START_MODE0 (0x0B0)
+#define DIV_FRAC_START1_MODE0 (0x0B8)
+#define DIV_FRAC_START2_MODE0 (0x0BC)
+#define DIV_FRAC_START3_MODE0 (0x0C0)
+#define INTEGLOOP_GAIN0_MODE0 (0x0D8)
+#define INTEGLOOP_GAIN1_MODE0 (0x0DC)
+#define VCO_TUNE_CTRL (0x0EC)
+#define VCO_TUNE_MAP (0x0F0)
+#define CLK_SELECT (0x138)
+#define HSCLK_SEL (0x13C)
+#define CORECLK_DIV_MODE0 (0x148)
+#define CORE_CLK_EN (0x154)
+#define C_READY_STATUS (0x158)
+#define SVS_MODE_CLK_SEL (0x164)
+
+/* Tx Channel PHY registers */
+#define PHY_TX_EMP_POST1_LVL(n) ((((n) * 0x200) + 0x400) + 0x000)
+#define PHY_TX_INTERFACE_SELECT_TX_BAND(n) ((((n) * 0x200) + 0x400) + 0x008)
+#define PHY_TX_CLKBUF_TERM_ENABLE(n) ((((n) * 0x200) + 0x400) + 0x00C)
+#define PHY_TX_DRV_LVL_RES_CODE_OFFSET(n) ((((n) * 0x200) + 0x400) + 0x014)
+#define PHY_TX_DRV_LVL(n) ((((n) * 0x200) + 0x400) + 0x018)
+#define PHY_TX_LANE_CONFIG(n) ((((n) * 0x200) + 0x400) + 0x01C)
+#define PHY_TX_PRE_DRIVER_1(n) ((((n) * 0x200) + 0x400) + 0x024)
+#define PHY_TX_PRE_DRIVER_2(n) ((((n) * 0x200) + 0x400) + 0x028)
+#define PHY_TX_LANE_MODE(n) ((((n) * 0x200) + 0x400) + 0x02C)
+
+/* HDMI PHY registers */
+#define PHY_CFG (0x00)
+#define PHY_PD_CTL (0x04)
+#define PHY_MODE (0x10)
+#define PHY_CLOCK (0x5C)
+#define PHY_CMN_CTRL (0x68)
+#define PHY_STATUS (0xB4)
+
+#define HDMI_BIT_CLK_TO_PIX_CLK_RATIO 10
+#define HDMI_MHZ_TO_HZ 1000000
+#define HDMI_HZ_TO_MHZ 1000000
+#define HDMI_REF_CLOCK_MHZ 19.2
+#define HDMI_REF_CLOCK_HZ (HDMI_REF_CLOCK_MHZ * 1000000)
+#define HDMI_VCO_MIN_RATE_HZ 30000000
+#define HDMI_VCO_MAX_RATE_HZ 600000000
+
+struct cobalt_reg_cfg {
+ u32 tx_band;
+ u32 svs_mode_clk_sel;
+ u32 hsclk_sel;
+ u32 lock_cmp_en;
+ u32 cctrl_mode0;
+ u32 rctrl_mode0;
+ u32 cpctrl_mode0;
+ u32 dec_start_mode0;
+ u32 div_frac_start1_mode0;
+ u32 div_frac_start2_mode0;
+ u32 div_frac_start3_mode0;
+ u32 integloop_gain0_mode0;
+ u32 integloop_gain1_mode0;
+ u32 lock_cmp1_mode0;
+ u32 lock_cmp2_mode0;
+ u32 lock_cmp3_mode0;
+ u32 ssc_per1;
+ u32 ssc_per2;
+ u32 ssc_step_size1;
+ u32 ssc_step_size2;
+ u32 core_clk_en;
+ u32 coreclk_div_mode0;
+ u32 phy_mode;
+ u32 vco_freq;
+ u32 hsclk_divsel;
+ u32 vco_ratio;
+ u32 ssc_en_center;
+
+ u32 l0_tx_drv_lvl;
+ u32 l0_tx_emp_post1_lvl;
+ u32 l1_tx_drv_lvl;
+ u32 l1_tx_emp_post1_lvl;
+ u32 l2_tx_drv_lvl;
+ u32 l2_tx_emp_post1_lvl;
+ u32 l3_tx_drv_lvl;
+ u32 l3_tx_emp_post1_lvl;
+
+ u32 l0_pre_driver_1;
+ u32 l0_pre_driver_2;
+ u32 l1_pre_driver_1;
+ u32 l1_pre_driver_2;
+ u32 l2_pre_driver_1;
+ u32 l2_pre_driver_2;
+ u32 l3_pre_driver_1;
+ u32 l3_pre_driver_2;
+
+ bool debug;
+};
+
+static void hdmi_cobalt_get_div(struct cobalt_reg_cfg *cfg, unsigned long pclk)
+{
+ u32 const ratio_list[] = {1, 2, 3, 4, 5, 6,
+ 9, 10, 12, 15, 25};
+ u32 const band_list[] = {0, 1, 2, 3};
+ u32 const sz_ratio = ARRAY_SIZE(ratio_list);
+ u32 const sz_band = ARRAY_SIZE(band_list);
+ u32 const min_freq = 8000, max_freq = 12000;
+ u32 const cmp_cnt = 1024;
+ u32 const th_min = 500, th_max = 1000;
+ u64 bit_clk = pclk * HDMI_BIT_CLK_TO_PIX_CLK_RATIO;
+ u32 half_rate_mode = 0;
+ u32 freq_optimal, list_elements;
+ int optimal_index;
+ u32 i, j, k;
+ u32 freq_list[sz_ratio * sz_band];
+ u32 found_hsclk_divsel = 0, found_vco_ratio;
+ u32 found_tx_band_sel, found_vco_freq;
+
+find_optimal_index:
+ freq_optimal = max_freq;
+ optimal_index = -1;
+ list_elements = 0;
+
+ for (i = 0; i < sz_ratio; i++) {
+ for (j = 0; j < sz_band; j++) {
+ u64 freq = (bit_clk / (1 << half_rate_mode));
+
+ freq *= (ratio_list[i] * (1 << band_list[j]));
+ do_div(freq, (u64) HDMI_MHZ_TO_HZ);
+ freq_list[list_elements++] = freq;
+ }
+ }
+
+ for (k = 0; k < ARRAY_SIZE(freq_list); k++) {
+ u32 const clks_pll_div = 2, core_clk_div = 5;
+ u32 const rng1 = 16, rng2 = 8;
+ u32 core_clk, rvar1;
+ u32 th1, th2;
+
+ core_clk = (((freq_list[k] /
+ ratio_list[k / sz_band]) /
+ clks_pll_div) / core_clk_div);
+
+ rvar1 = HDMI_REF_CLOCK_HZ / cmp_cnt;
+ rvar1 *= rng1;
+ rvar1 /= core_clk;
+
+ th1 = rvar1;
+
+ rvar1 = HDMI_REF_CLOCK_HZ / cmp_cnt;
+ rvar1 *= rng2;
+ rvar1 /= core_clk;
+
+ th2 = rvar1;
+
+ if (freq_list[k] >= min_freq &&
+ freq_list[k] <= max_freq) {
+ if ((th1 >= th_min && th1 <= th_max) ||
+ (th2 >= th_min && th2 <= th_max)) {
+ if (freq_list[k] <= freq_optimal) {
+ freq_optimal = freq_list[k];
+ optimal_index = k;
+ }
+ }
+ }
+ }
+
+ if (optimal_index == -1) {
+ if (!half_rate_mode) {
+ half_rate_mode = 1;
+ goto find_optimal_index;
+ } else {
+ /* set to default values */
+ found_vco_freq = max_freq;
+ found_hsclk_divsel = 0;
+ found_vco_ratio = 2;
+ found_tx_band_sel = 0;
+ pr_err("Config error for pclk %ld\n", pclk);
+ }
+ } else {
+ found_vco_ratio = ratio_list[optimal_index / sz_band];
+ found_tx_band_sel = band_list[optimal_index % sz_band];
+ found_vco_freq = freq_optimal;
+ }
+
+ switch (found_vco_ratio) {
+ case 1:
+ found_hsclk_divsel = 15;
+ break;
+ case 2:
+ found_hsclk_divsel = 0;
+ break;
+ case 3:
+ found_hsclk_divsel = 4;
+ break;
+ case 4:
+ found_hsclk_divsel = 8;
+ break;
+ case 5:
+ found_hsclk_divsel = 12;
+ break;
+ case 6:
+ found_hsclk_divsel = 1;
+ break;
+ case 9:
+ found_hsclk_divsel = 5;
+ break;
+ case 10:
+ found_hsclk_divsel = 2;
+ break;
+ case 12:
+ found_hsclk_divsel = 9;
+ break;
+ case 15:
+ found_hsclk_divsel = 13;
+ break;
+ case 25:
+ found_hsclk_divsel = 14;
+ break;
+ };
+
+ pr_debug("found_vco_freq=%d\n", found_vco_freq);
+ pr_debug("found_hsclk_divsel=%d\n", found_hsclk_divsel);
+ pr_debug("found_vco_ratio=%d\n", found_vco_ratio);
+ pr_debug("found_tx_band_sel=%d\n", found_tx_band_sel);
+ pr_debug("half_rate_mode=%d\n", half_rate_mode);
+ pr_debug("optimal_index=%d\n", optimal_index);
+
+ cfg->vco_freq = found_vco_freq;
+ cfg->hsclk_divsel = found_hsclk_divsel;
+ cfg->vco_ratio = found_vco_ratio;
+ cfg->tx_band = found_tx_band_sel;
+}
+
+static int hdmi_cobalt_config_phy(unsigned long rate,
+ struct cobalt_reg_cfg *cfg)
+{
+ u64 const high_freq_bit_clk_threshold = 3400000000UL;
+ u64 const dig_freq_bit_clk_threshold = 1500000000UL;
+ u64 const mid_freq_bit_clk_threshold = 750000000;
+ int rc = 0;
+ u64 fdata, tmds_clk;
+ u64 pll_div = 4 * HDMI_REF_CLOCK_HZ;
+ u64 bclk;
+ u64 vco_freq_mhz;
+ u64 hsclk_sel, dec_start, div_frac_start;
+ u64 rem;
+ u64 cpctrl, rctrl, cctrl;
+ u64 integloop_gain;
+ u32 digclk_divsel;
+ u32 tmds_bclk_ratio;
+ u64 cmp_rng, cmp_cnt = 1024, pll_cmp;
+ bool gen_ssc = false;
+
+ bclk = rate * HDMI_BIT_CLK_TO_PIX_CLK_RATIO;
+
+ if (bclk > high_freq_bit_clk_threshold) {
+ tmds_clk = rate / 4;
+ tmds_bclk_ratio = 1;
+ } else {
+ tmds_clk = rate;
+ tmds_bclk_ratio = 0;
+ }
+
+ hdmi_cobalt_get_div(cfg, rate);
+
+ vco_freq_mhz = cfg->vco_freq * (u64) HDMI_HZ_TO_MHZ;
+ fdata = cfg->vco_freq;
+ do_div(fdata, cfg->vco_ratio);
+
+ hsclk_sel = cfg->hsclk_divsel;
+ dec_start = vco_freq_mhz;
+ do_div(dec_start, pll_div);
+
+ div_frac_start = vco_freq_mhz * (1 << 20);
+ rem = do_div(div_frac_start, pll_div);
+ div_frac_start -= (dec_start * (1 << 20));
+ if (rem > (pll_div >> 1))
+ div_frac_start++;
+
+ if ((div_frac_start != 0) || (gen_ssc == true)) {
+ cpctrl = 0x8;
+ rctrl = 0x16;
+ cctrl = 0x34;
+ } else {
+ cpctrl = 0x30;
+ rctrl = 0x18;
+ cctrl = 0x2;
+ }
+
+ digclk_divsel = (bclk > dig_freq_bit_clk_threshold) ? 0x1 : 0x2;
+
+ integloop_gain = ((div_frac_start != 0) ||
+ (gen_ssc == true)) ? 0x3F : 0xC4;
+ integloop_gain <<= digclk_divsel;
+ integloop_gain = (integloop_gain <= 2046 ? integloop_gain : 0x7FE);
+
+ cmp_rng = gen_ssc ? 0x40 : 0x10;
+
+ pll_cmp = cmp_cnt * fdata;
+ rem = do_div(pll_cmp, (u64)(HDMI_REF_CLOCK_MHZ * 10));
+ if (rem > ((u64)(HDMI_REF_CLOCK_MHZ * 10) >> 1))
+ pll_cmp++;
+
+ pll_cmp = pll_cmp - 1;
+
+ pr_debug("VCO_FREQ = %u\n", cfg->vco_freq);
+ pr_debug("FDATA = %llu\n", fdata);
+ pr_debug("DEC_START = %llu\n", dec_start);
+ pr_debug("DIV_FRAC_START = %llu\n", div_frac_start);
+ pr_debug("CPCTRL = %llu\n", cpctrl);
+ pr_debug("RCTRL = %llu\n", rctrl);
+ pr_debug("CCTRL = %llu\n", cctrl);
+ pr_debug("DIGCLK_DIVSEL = %u\n", digclk_divsel);
+ pr_debug("INTEGLOOP_GAIN = %llu\n", integloop_gain);
+ pr_debug("CMP_RNG = %llu\n", cmp_rng);
+ pr_debug("PLL_CMP = %llu\n", pll_cmp);
+
+ cfg->svs_mode_clk_sel = (digclk_divsel & 0xFF);
+ cfg->hsclk_sel = (0x20 | hsclk_sel);
+ cfg->lock_cmp_en = (gen_ssc ? 0x4 : 0x0);
+ cfg->cctrl_mode0 = (cctrl & 0xFF);
+ cfg->rctrl_mode0 = (rctrl & 0xFF);
+ cfg->cpctrl_mode0 = (cpctrl & 0xFF);
+ cfg->dec_start_mode0 = (dec_start & 0xFF);
+ cfg->div_frac_start1_mode0 = (div_frac_start & 0xFF);
+ cfg->div_frac_start2_mode0 = ((div_frac_start & 0xFF00) >> 8);
+ cfg->div_frac_start3_mode0 = ((div_frac_start & 0xF0000) >> 16);
+ cfg->integloop_gain0_mode0 = (integloop_gain & 0xFF);
+ cfg->integloop_gain1_mode0 = (integloop_gain & 0xF00) >> 8;
+ cfg->lock_cmp1_mode0 = (pll_cmp & 0xFF);
+ cfg->lock_cmp2_mode0 = ((pll_cmp & 0xFF00) >> 8);
+ cfg->lock_cmp3_mode0 = ((pll_cmp & 0x30000) >> 16);
+ cfg->ssc_per1 = 0;
+ cfg->ssc_per2 = 0;
+ cfg->ssc_step_size1 = 0;
+ cfg->ssc_step_size2 = 0;
+ cfg->core_clk_en = 0x2C;
+ cfg->coreclk_div_mode0 = 0x5;
+ cfg->phy_mode = (tmds_bclk_ratio ? 0x5 : 0x4);
+ cfg->ssc_en_center = 0x0;
+
+ if (bclk > high_freq_bit_clk_threshold) {
+ cfg->l0_tx_drv_lvl = 0xA;
+ cfg->l0_tx_emp_post1_lvl = 0x3;
+ cfg->l1_tx_drv_lvl = 0xA;
+ cfg->l1_tx_emp_post1_lvl = 0x3;
+ cfg->l2_tx_drv_lvl = 0xA;
+ cfg->l2_tx_emp_post1_lvl = 0x3;
+ cfg->l3_tx_drv_lvl = 0x8;
+ cfg->l3_tx_emp_post1_lvl = 0x3;
+ cfg->l0_pre_driver_1 = 0x0;
+ cfg->l0_pre_driver_2 = 0x1C;
+ cfg->l1_pre_driver_1 = 0x0;
+ cfg->l1_pre_driver_2 = 0x1C;
+ cfg->l2_pre_driver_1 = 0x0;
+ cfg->l2_pre_driver_2 = 0x1C;
+ cfg->l3_pre_driver_1 = 0x0;
+ cfg->l3_pre_driver_2 = 0x0;
+ } else if (bclk > dig_freq_bit_clk_threshold) {
+ cfg->l0_tx_drv_lvl = 0x9;
+ cfg->l0_tx_emp_post1_lvl = 0x3;
+ cfg->l1_tx_drv_lvl = 0x9;
+ cfg->l1_tx_emp_post1_lvl = 0x3;
+ cfg->l2_tx_drv_lvl = 0x9;
+ cfg->l2_tx_emp_post1_lvl = 0x3;
+ cfg->l3_tx_drv_lvl = 0x8;
+ cfg->l3_tx_emp_post1_lvl = 0x3;
+ cfg->l0_pre_driver_1 = 0x0;
+ cfg->l0_pre_driver_2 = 0x16;
+ cfg->l1_pre_driver_1 = 0x0;
+ cfg->l1_pre_driver_2 = 0x16;
+ cfg->l2_pre_driver_1 = 0x0;
+ cfg->l2_pre_driver_2 = 0x16;
+ cfg->l3_pre_driver_1 = 0x0;
+ cfg->l3_pre_driver_2 = 0x0;
+ } else if (bclk > mid_freq_bit_clk_threshold) {
+ cfg->l0_tx_drv_lvl = 0x9;
+ cfg->l0_tx_emp_post1_lvl = 0x3;
+ cfg->l1_tx_drv_lvl = 0x9;
+ cfg->l1_tx_emp_post1_lvl = 0x3;
+ cfg->l2_tx_drv_lvl = 0x9;
+ cfg->l2_tx_emp_post1_lvl = 0x3;
+ cfg->l3_tx_drv_lvl = 0x8;
+ cfg->l3_tx_emp_post1_lvl = 0x3;
+ cfg->l0_pre_driver_1 = 0x0;
+ cfg->l0_pre_driver_2 = 0x0E;
+ cfg->l1_pre_driver_1 = 0x0;
+ cfg->l1_pre_driver_2 = 0x0E;
+ cfg->l2_pre_driver_1 = 0x0;
+ cfg->l2_pre_driver_2 = 0x0E;
+ cfg->l3_pre_driver_1 = 0x0;
+ cfg->l3_pre_driver_2 = 0x0;
+ } else {
+ cfg->l0_tx_drv_lvl = 0x0;
+ cfg->l0_tx_emp_post1_lvl = 0x0;
+ cfg->l1_tx_drv_lvl = 0x0;
+ cfg->l1_tx_emp_post1_lvl = 0x0;
+ cfg->l2_tx_drv_lvl = 0x0;
+ cfg->l2_tx_emp_post1_lvl = 0x0;
+ cfg->l3_tx_drv_lvl = 0x0;
+ cfg->l3_tx_emp_post1_lvl = 0x0;
+ cfg->l0_pre_driver_1 = 0x0;
+ cfg->l0_pre_driver_2 = 0x01;
+ cfg->l1_pre_driver_1 = 0x0;
+ cfg->l1_pre_driver_2 = 0x01;
+ cfg->l2_pre_driver_1 = 0x0;
+ cfg->l2_pre_driver_2 = 0x01;
+ cfg->l3_pre_driver_1 = 0x0;
+ cfg->l3_pre_driver_2 = 0x0;
+ }
+
+ return rc;
+}
+
+static int hdmi_cobalt_pll_set_clk_rate(struct clk *c, unsigned long rate)
+{
+ int rc = 0;
+ struct hdmi_pll_vco_clk *vco = to_hdmi_vco_clk(c);
+ struct mdss_pll_resources *io = vco->priv;
+ struct cobalt_reg_cfg cfg = {0};
+ void __iomem *phy = io->phy_base, *pll = io->pll_base;
+
+ rc = hdmi_cobalt_config_phy(rate, &cfg);
+ if (rc) {
+ pr_err("rate calculation failed\n, rc=%d", rc);
+ return rc;
+ }
+
+ _W(phy, PHY_PD_CTL, 0x0);
+ udelay(500);
+
+ _W(phy, PHY_PD_CTL, 0x1);
+ _W(pll, RESETSM_CNTRL, 0x20);
+ _W(phy, PHY_CMN_CTRL, 0x6);
+ _W(pll, PHY_TX_INTERFACE_SELECT_TX_BAND(0), cfg.tx_band);
+ _W(pll, PHY_TX_INTERFACE_SELECT_TX_BAND(1), cfg.tx_band);
+ _W(pll, PHY_TX_INTERFACE_SELECT_TX_BAND(2), cfg.tx_band);
+ _W(pll, PHY_TX_INTERFACE_SELECT_TX_BAND(3), cfg.tx_band);
+ _W(pll, PHY_TX_CLKBUF_TERM_ENABLE(0), 0x1);
+ _W(pll, PHY_TX_LANE_MODE(0), 0x20);
+ _W(pll, PHY_TX_LANE_MODE(1), 0x20);
+ _W(pll, PHY_TX_LANE_MODE(2), 0x20);
+ _W(pll, PHY_TX_LANE_MODE(3), 0x20);
+ _W(pll, PHY_TX_CLKBUF_TERM_ENABLE(1), 0x1);
+ _W(pll, PHY_TX_CLKBUF_TERM_ENABLE(2), 0x1);
+ _W(pll, PHY_TX_CLKBUF_TERM_ENABLE(3), 0x1);
+ _W(pll, SYSCLK_BUF_ENABLE, 0x2);
+ _W(pll, BIAS_EN_CLKBUFLR_EN, 0xB);
+ _W(pll, SYSCLK_EN_SEL, 0x37);
+ _W(pll, SYS_CLK_CTRL, 0x2);
+ _W(pll, CLK_ENABLE1, 0xE);
+ _W(pll, PLL_IVCO, 0xF);
+ _W(pll, VCO_TUNE_CTRL, 0x0);
+ _W(pll, SVS_MODE_CLK_SEL, cfg.svs_mode_clk_sel);
+ _W(pll, CLK_SELECT, 0x30);
+ _W(pll, HSCLK_SEL, cfg.hsclk_sel);
+ _W(pll, LOCK_CMP_EN, cfg.lock_cmp_en);
+ _W(pll, PLL_CCTRL_MODE0, cfg.cctrl_mode0);
+ _W(pll, PLL_RCTRL_MODE0, cfg.rctrl_mode0);
+ _W(pll, CP_CTRL_MODE0, cfg.cpctrl_mode0);
+ _W(pll, DEC_START_MODE0, cfg.dec_start_mode0);
+ _W(pll, DIV_FRAC_START1_MODE0, cfg.div_frac_start1_mode0);
+ _W(pll, DIV_FRAC_START2_MODE0, cfg.div_frac_start2_mode0);
+ _W(pll, DIV_FRAC_START3_MODE0, cfg.div_frac_start3_mode0);
+ _W(pll, INTEGLOOP_GAIN0_MODE0, cfg.integloop_gain0_mode0);
+ _W(pll, INTEGLOOP_GAIN1_MODE0, cfg.integloop_gain1_mode0);
+ _W(pll, LOCK_CMP1_MODE0, cfg.lock_cmp1_mode0);
+ _W(pll, LOCK_CMP2_MODE0, cfg.lock_cmp2_mode0);
+ _W(pll, LOCK_CMP3_MODE0, cfg.lock_cmp3_mode0);
+ _W(pll, VCO_TUNE_MAP, 0x0);
+ _W(pll, CORE_CLK_EN, cfg.core_clk_en);
+ _W(pll, CORECLK_DIV_MODE0, cfg.coreclk_div_mode0);
+
+ _W(pll, PHY_TX_DRV_LVL(0), cfg.l0_tx_drv_lvl);
+ _W(pll, PHY_TX_DRV_LVL(1), cfg.l1_tx_drv_lvl);
+ _W(pll, PHY_TX_DRV_LVL(2), cfg.l2_tx_drv_lvl);
+ _W(pll, PHY_TX_DRV_LVL(3), cfg.l3_tx_drv_lvl);
+
+ _W(pll, PHY_TX_EMP_POST1_LVL(0), cfg.l0_tx_emp_post1_lvl);
+ _W(pll, PHY_TX_EMP_POST1_LVL(1), cfg.l1_tx_emp_post1_lvl);
+ _W(pll, PHY_TX_EMP_POST1_LVL(2), cfg.l2_tx_emp_post1_lvl);
+ _W(pll, PHY_TX_EMP_POST1_LVL(3), cfg.l3_tx_emp_post1_lvl);
+
+ _W(pll, PHY_TX_PRE_DRIVER_1(0), cfg.l0_pre_driver_1);
+ _W(pll, PHY_TX_PRE_DRIVER_1(1), cfg.l1_pre_driver_1);
+ _W(pll, PHY_TX_PRE_DRIVER_1(2), cfg.l2_pre_driver_1);
+ _W(pll, PHY_TX_PRE_DRIVER_1(3), cfg.l3_pre_driver_1);
+
+ _W(pll, PHY_TX_PRE_DRIVER_2(0), cfg.l0_pre_driver_2);
+ _W(pll, PHY_TX_PRE_DRIVER_2(1), cfg.l1_pre_driver_2);
+ _W(pll, PHY_TX_PRE_DRIVER_2(2), cfg.l2_pre_driver_2);
+ _W(pll, PHY_TX_PRE_DRIVER_2(3), cfg.l3_pre_driver_2);
+
+ _W(pll, PHY_TX_DRV_LVL_RES_CODE_OFFSET(0), 0x0);
+ _W(pll, PHY_TX_DRV_LVL_RES_CODE_OFFSET(1), 0x0);
+ _W(pll, PHY_TX_DRV_LVL_RES_CODE_OFFSET(2), 0x0);
+ _W(pll, PHY_TX_DRV_LVL_RES_CODE_OFFSET(3), 0x0);
+
+ _W(phy, PHY_MODE, cfg.phy_mode);
+
+ _W(pll, PHY_TX_LANE_CONFIG(0), 0x10);
+ _W(pll, PHY_TX_LANE_CONFIG(1), 0x10);
+ _W(pll, PHY_TX_LANE_CONFIG(2), 0x10);
+ _W(pll, PHY_TX_LANE_CONFIG(3), 0x10);
+
+ /* Ensure all registers are flushed to hardware */
+ wmb();
+
+ return 0;
+}
+
+static int hdmi_cobalt_pll_lock_status(struct mdss_pll_resources *io)
+{
+ u32 const delay_us = 100;
+ u32 const timeout_us = 5000;
+ u32 status;
+ int rc = 0;
+ void __iomem *pll = io->pll_base;
+
+ rc = mdss_pll_resource_enable(io, true);
+ if (rc) {
+ pr_err("pll resource can't be enabled\n");
+ return rc;
+ }
+ rc = readl_poll_timeout_atomic(pll + C_READY_STATUS,
+ status,
+ ((status & BIT(0)) > 0),
+ delay_us,
+ timeout_us);
+ if (rc)
+ pr_err("HDMI PLL(%d) lock failed, status=0x%08x\n",
+ io->index, status);
+ else
+ pr_debug("HDMI PLL(%d) lock passed, status=0x%08x\n",
+ io->index, status);
+
+ mdss_pll_resource_enable(io, false);
+
+ return rc;
+}
+
+static int hdmi_cobalt_phy_ready_status(struct mdss_pll_resources *io)
+{
+ u32 const delay_us = 100;
+ u32 const timeout_us = 5000;
+ u32 status;
+ int rc = 0;
+ void __iomem *phy = io->phy_base;
+
+ rc = mdss_pll_resource_enable(io, true);
+ if (rc) {
+ pr_err("pll resource can't be enabled\n");
+ return rc;
+ }
+
+ rc = readl_poll_timeout_atomic(phy + PHY_STATUS,
+ status,
+ ((status & BIT(0)) > 0),
+ delay_us,
+ timeout_us);
+ if (rc)
+ pr_err("HDMI PHY(%d) not ready, status=0x%08x\n",
+ io->index, status);
+ else
+ pr_debug("HDMI PHY(%d) ready, status=0x%08x\n",
+ io->index, status);
+
+ mdss_pll_resource_enable(io, false);
+
+ return rc;
+}
+
+static int hdmi_cobalt_vco_set_rate(struct clk *c, unsigned long rate)
+{
+ int rc = 0;
+ struct hdmi_pll_vco_clk *vco = to_hdmi_vco_clk(c);
+ struct mdss_pll_resources *io = vco->priv;
+
+ rc = mdss_pll_resource_enable(io, true);
+ if (rc) {
+ pr_err("pll resource enable failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ if (io->pll_on)
+ goto error;
+
+ rc = hdmi_cobalt_pll_set_clk_rate(c, rate);
+ if (rc) {
+ pr_err("failed to set clk rate, rc=%d\n", rc);
+ goto error;
+ }
+
+ vco->rate = rate;
+ vco->rate_set = true;
+
+error:
+ (void)mdss_pll_resource_enable(io, false);
+
+ return rc;
+}
+
+static long hdmi_cobalt_vco_round_rate(struct clk *c, unsigned long rate)
+{
+ unsigned long rrate = rate;
+ struct hdmi_pll_vco_clk *vco = to_hdmi_vco_clk(c);
+
+ if (rate < vco->min_rate)
+ rrate = vco->min_rate;
+ if (rate > vco->max_rate)
+ rrate = vco->max_rate;
+
+ return rrate;
+}
+
+static int hdmi_cobalt_pll_enable(struct clk *c)
+{
+ int rc = 0;
+ struct hdmi_pll_vco_clk *vco = to_hdmi_vco_clk(c);
+ struct mdss_pll_resources *io = vco->priv;
+ void __iomem *phy = io->phy_base, *pll = io->pll_base;
+
+ _W(phy, PHY_CFG, 0x1);
+ udelay(100);
+ _W(phy, PHY_CFG, 0x59);
+ udelay(100);
+
+ _W(phy, PHY_CLOCK, 0x6);
+
+ /* Ensure all registers are flushed to hardware */
+ wmb();
+
+ rc = hdmi_cobalt_pll_lock_status(io);
+ if (rc) {
+ pr_err("PLL not locked, rc=%d\n", rc);
+ return rc;
+ }
+
+ _W(pll, PHY_TX_LANE_CONFIG(0), 0x1F);
+ _W(pll, PHY_TX_LANE_CONFIG(1), 0x1F);
+ _W(pll, PHY_TX_LANE_CONFIG(2), 0x1F);
+ _W(pll, PHY_TX_LANE_CONFIG(3), 0x1F);
+
+ /* Ensure all registers are flushed to hardware */
+ wmb();
+
+ rc = hdmi_cobalt_phy_ready_status(io);
+ if (rc) {
+ pr_err("PHY NOT READY, rc=%d\n", rc);
+ return rc;
+ }
+
+ _W(phy, PHY_CFG, 0x58);
+ udelay(1);
+ _W(phy, PHY_CFG, 0x59);
+
+ /* Ensure all registers are flushed to hardware */
+ wmb();
+
+ io->pll_on = true;
+ return rc;
+}
+
+static int hdmi_cobalt_vco_prepare(struct clk *c)
+{
+ struct hdmi_pll_vco_clk *vco = to_hdmi_vco_clk(c);
+ struct mdss_pll_resources *io = vco->priv;
+ int rc = 0;
+
+ if (!io) {
+ pr_err("hdmi pll resources are not available\n");
+ return -EINVAL;
+ }
+
+ rc = mdss_pll_resource_enable(io, true);
+ if (rc) {
+ pr_err("pll resource enable failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ if (!vco->rate_set && vco->rate) {
+ rc = hdmi_cobalt_pll_set_clk_rate(c, vco->rate);
+ if (rc) {
+ pr_err("set rate failed, rc=%d\n", rc);
+ goto error;
+ }
+ }
+
+ rc = hdmi_cobalt_pll_enable(c);
+ if (rc)
+ pr_err("pll enabled failed, rc=%d\n", rc);
+
+error:
+ if (rc)
+ mdss_pll_resource_enable(io, false);
+
+ return rc;
+}
+
+static void hdmi_cobalt_pll_disable(struct hdmi_pll_vco_clk *vco)
+{
+ struct mdss_pll_resources *io = vco->priv;
+ void __iomem *phy = io->phy_base;
+
+ if (!io->pll_on)
+ return;
+
+ _W(phy, PHY_PD_CTL, 0x0);
+
+ /* Ensure all registers are flushed to hardware */
+ wmb();
+
+ vco->rate_set = false;
+ io->handoff_resources = false;
+ io->pll_on = false;
+}
+
+static void hdmi_cobalt_vco_unprepare(struct clk *c)
+{
+ struct hdmi_pll_vco_clk *vco = to_hdmi_vco_clk(c);
+ struct mdss_pll_resources *io = vco->priv;
+
+ if (!io) {
+ pr_err("HDMI pll resources not available\n");
+ return;
+ }
+
+ hdmi_cobalt_pll_disable(vco);
+ mdss_pll_resource_enable(io, false);
+}
+
+static enum handoff hdmi_cobalt_vco_handoff(struct clk *c)
+{
+ enum handoff ret = HANDOFF_DISABLED_CLK;
+ struct hdmi_pll_vco_clk *vco = to_hdmi_vco_clk(c);
+ struct mdss_pll_resources *io = vco->priv;
+
+ if (mdss_pll_resource_enable(io, true)) {
+ pr_err("pll resource can't be enabled\n");
+ return ret;
+ }
+
+ io->handoff_resources = true;
+
+ if (_R(io->pll_base, C_READY_STATUS) & BIT(0) &&
+ _R(io->phy_base, PHY_STATUS) & BIT(0)) {
+ io->pll_on = true;
+ /* TODO: calculate rate based on the phy/pll register values. */
+ ret = HANDOFF_ENABLED_CLK;
+ } else {
+ io->handoff_resources = false;
+ mdss_pll_resource_enable(io, false);
+ pr_debug("%s: PHY/PLL not ready\n", __func__);
+ }
+
+ pr_debug("done, ret=%d\n", ret);
+ return ret;
+}
+
+static struct clk_ops hdmi_cobalt_vco_clk_ops = {
+ .set_rate = hdmi_cobalt_vco_set_rate,
+ .round_rate = hdmi_cobalt_vco_round_rate,
+ .prepare = hdmi_cobalt_vco_prepare,
+ .unprepare = hdmi_cobalt_vco_unprepare,
+ .handoff = hdmi_cobalt_vco_handoff,
+};
+
+static struct hdmi_pll_vco_clk hdmi_vco_clk = {
+ .min_rate = HDMI_VCO_MIN_RATE_HZ,
+ .max_rate = HDMI_VCO_MAX_RATE_HZ,
+ .c = {
+ .dbg_name = "hdmi_cobalt_vco_clk",
+ .ops = &hdmi_cobalt_vco_clk_ops,
+ CLK_INIT(hdmi_vco_clk.c),
+ },
+};
+
+static struct clk_lookup hdmipllcc_cobalt[] = {
+ CLK_LIST(hdmi_vco_clk),
+};
+
+int hdmi_cobalt_pll_clock_register(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res)
+{
+ int rc = 0;
+
+ if (!pdev || !pll_res) {
+ pr_err("invalid input parameters\n");
+ return -EINVAL;
+ }
+
+ hdmi_vco_clk.priv = pll_res;
+
+ rc = of_msm_clock_register(pdev->dev.of_node, hdmipllcc_cobalt,
+ ARRAY_SIZE(hdmipllcc_cobalt));
+ if (rc) {
+ pr_err("clock register failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ return rc;
+}
diff --git a/drivers/clk/msm/mdss/mdss-hdmi-pll.h b/drivers/clk/msm/mdss/mdss-hdmi-pll.h
new file mode 100644
index 000000000000..d4226bf43e13
--- /dev/null
+++ b/drivers/clk/msm/mdss/mdss-hdmi-pll.h
@@ -0,0 +1,61 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MDSS_HDMI_PLL_H
+#define __MDSS_HDMI_PLL_H
+
+struct hdmi_pll_cfg {
+ unsigned long vco_rate;
+ u32 reg;
+};
+
+struct hdmi_pll_vco_clk {
+ unsigned long rate; /* current vco rate */
+ unsigned long min_rate; /* min vco rate */
+ unsigned long max_rate; /* max vco rate */
+ bool rate_set;
+ struct hdmi_pll_cfg *ip_seti;
+ struct hdmi_pll_cfg *cp_seti;
+ struct hdmi_pll_cfg *ip_setp;
+ struct hdmi_pll_cfg *cp_setp;
+ struct hdmi_pll_cfg *crctrl;
+ void *priv;
+
+ struct clk c;
+};
+
+static inline struct hdmi_pll_vco_clk *to_hdmi_vco_clk(struct clk *clk)
+{
+ return container_of(clk, struct hdmi_pll_vco_clk, c);
+}
+
+int hdmi_pll_clock_register(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res);
+
+int hdmi_20nm_pll_clock_register(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res);
+
+int hdmi_8996_v1_pll_clock_register(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res);
+
+int hdmi_8996_v2_pll_clock_register(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res);
+
+int hdmi_8996_v3_pll_clock_register(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res);
+
+int hdmi_8996_v3_1p8_pll_clock_register(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res);
+
+int hdmi_cobalt_pll_clock_register(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res);
+#endif
diff --git a/drivers/clk/msm/mdss/mdss-pll-util.c b/drivers/clk/msm/mdss/mdss-pll-util.c
new file mode 100644
index 000000000000..3f1ccb38abc8
--- /dev/null
+++ b/drivers/clk/msm/mdss/mdss-pll-util.c
@@ -0,0 +1,441 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/clk/msm-clock-generic.h>
+#include <linux/of_address.h>
+#include <linux/dma-mapping.h>
+#include <linux/vmalloc.h>
+#include <linux/memblock.h>
+
+#include "mdss-pll.h"
+
+int mdss_pll_util_resource_init(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res)
+{
+ int rc = 0;
+ struct dss_module_power *mp = &pll_res->mp;
+
+ rc = msm_dss_config_vreg(&pdev->dev,
+ mp->vreg_config, mp->num_vreg, 1);
+ if (rc) {
+ pr_err("Vreg config failed rc=%d\n", rc);
+ goto vreg_err;
+ }
+
+ rc = msm_dss_get_clk(&pdev->dev, mp->clk_config, mp->num_clk);
+ if (rc) {
+ pr_err("Clock get failed rc=%d\n", rc);
+ goto clk_err;
+ }
+
+ return rc;
+
+clk_err:
+ msm_dss_config_vreg(&pdev->dev, mp->vreg_config, mp->num_vreg, 0);
+vreg_err:
+ return rc;
+}
+
+/**
+ * mdss_pll_get_mp_by_reg_name() -- Find power module by regulator name
+ *@pll_res: Pointer to the PLL resource
+ *@name: Regulator name as specified in the pll dtsi
+ *
+ * This is a helper function to retrieve the regulator information
+ * for each pll resource.
+ */
+struct dss_vreg *mdss_pll_get_mp_by_reg_name(struct mdss_pll_resources *pll_res
+ , char *name)
+{
+
+ struct dss_vreg *regulator = NULL;
+ int i;
+
+ if ((pll_res == NULL) || (pll_res->mp.vreg_config == NULL)) {
+ pr_err("%s Invalid PLL resource\n", __func__);
+ goto error;
+ }
+
+ regulator = pll_res->mp.vreg_config;
+
+ for (i = 0; i < pll_res->mp.num_vreg; i++) {
+ if (!strcmp(name, regulator->vreg_name)) {
+ pr_debug("Found regulator match for %s\n", name);
+ break;
+ }
+ regulator++;
+ }
+
+error:
+ return regulator;
+}
+
+void mdss_pll_util_resource_deinit(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res)
+{
+ struct dss_module_power *mp = &pll_res->mp;
+
+ msm_dss_put_clk(mp->clk_config, mp->num_clk);
+
+ msm_dss_config_vreg(&pdev->dev, mp->vreg_config, mp->num_vreg, 0);
+}
+
+void mdss_pll_util_resource_release(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res)
+{
+ struct dss_module_power *mp = &pll_res->mp;
+
+ devm_kfree(&pdev->dev, mp->clk_config);
+ devm_kfree(&pdev->dev, mp->vreg_config);
+ mp->num_vreg = 0;
+ mp->num_clk = 0;
+}
+
+int mdss_pll_util_resource_enable(struct mdss_pll_resources *pll_res,
+ bool enable)
+{
+ int rc = 0;
+ struct dss_module_power *mp = &pll_res->mp;
+
+ if (enable) {
+ rc = msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg, enable);
+ if (rc) {
+ pr_err("Failed to enable vregs rc=%d\n", rc);
+ goto vreg_err;
+ }
+
+ rc = msm_dss_clk_set_rate(mp->clk_config, mp->num_clk);
+ if (rc) {
+ pr_err("Failed to set clock rate rc=%d\n", rc);
+ goto clk_err;
+ }
+
+ rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, enable);
+ if (rc) {
+ pr_err("clock enable failed rc:%d\n", rc);
+ goto clk_err;
+ }
+ } else {
+ msm_dss_enable_clk(mp->clk_config, mp->num_clk, enable);
+
+ msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg, enable);
+ }
+
+ return rc;
+
+clk_err:
+ msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg, 0);
+vreg_err:
+ return rc;
+}
+
+static int mdss_pll_util_parse_dt_supply(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res)
+{
+ int i = 0, rc = 0;
+ u32 tmp = 0;
+ struct device_node *of_node = NULL, *supply_root_node = NULL;
+ struct device_node *supply_node = NULL;
+ struct dss_module_power *mp = &pll_res->mp;
+
+ of_node = pdev->dev.of_node;
+
+ mp->num_vreg = 0;
+ supply_root_node = of_get_child_by_name(of_node,
+ "qcom,platform-supply-entries");
+ if (!supply_root_node) {
+ pr_err("no supply entry present\n");
+ return rc;
+ }
+
+ for_each_child_of_node(supply_root_node, supply_node) {
+ mp->num_vreg++;
+ }
+
+ if (mp->num_vreg == 0) {
+ pr_debug("no vreg\n");
+ return rc;
+ } else {
+ pr_debug("vreg found. count=%d\n", mp->num_vreg);
+ }
+
+ mp->vreg_config = devm_kzalloc(&pdev->dev, sizeof(struct dss_vreg) *
+ mp->num_vreg, GFP_KERNEL);
+ if (!mp->vreg_config) {
+ pr_err("can't alloc vreg mem\n");
+ rc = -ENOMEM;
+ return rc;
+ }
+
+ for_each_child_of_node(supply_root_node, supply_node) {
+
+ const char *st = NULL;
+
+ rc = of_property_read_string(supply_node,
+ "qcom,supply-name", &st);
+ if (rc) {
+ pr_err(":error reading name. rc=%d\n", rc);
+ goto error;
+ }
+
+ strlcpy(mp->vreg_config[i].vreg_name, st,
+ sizeof(mp->vreg_config[i].vreg_name));
+
+ rc = of_property_read_u32(supply_node,
+ "qcom,supply-min-voltage", &tmp);
+ if (rc) {
+ pr_err(": error reading min volt. rc=%d\n", rc);
+ goto error;
+ }
+ mp->vreg_config[i].min_voltage = tmp;
+
+ rc = of_property_read_u32(supply_node,
+ "qcom,supply-max-voltage", &tmp);
+ if (rc) {
+ pr_err(": error reading max volt. rc=%d\n", rc);
+ goto error;
+ }
+ mp->vreg_config[i].max_voltage = tmp;
+
+ rc = of_property_read_u32(supply_node,
+ "qcom,supply-enable-load", &tmp);
+ if (rc) {
+ pr_err(": error reading enable load. rc=%d\n", rc);
+ goto error;
+ }
+ mp->vreg_config[i].enable_load = tmp;
+
+ rc = of_property_read_u32(supply_node,
+ "qcom,supply-disable-load", &tmp);
+ if (rc) {
+ pr_err(": error reading disable load. rc=%d\n", rc);
+ goto error;
+ }
+ mp->vreg_config[i].disable_load = tmp;
+
+ rc = of_property_read_u32(supply_node,
+ "qcom,supply-pre-on-sleep", &tmp);
+ if (rc)
+ pr_debug("error reading supply pre sleep value. rc=%d\n",
+ rc);
+
+ mp->vreg_config[i].pre_on_sleep = (!rc ? tmp : 0);
+
+ rc = of_property_read_u32(supply_node,
+ "qcom,supply-pre-off-sleep", &tmp);
+ if (rc)
+ pr_debug("error reading supply pre sleep value. rc=%d\n",
+ rc);
+
+ mp->vreg_config[i].pre_off_sleep = (!rc ? tmp : 0);
+
+ rc = of_property_read_u32(supply_node,
+ "qcom,supply-post-on-sleep", &tmp);
+ if (rc)
+ pr_debug("error reading supply post sleep value. rc=%d\n",
+ rc);
+
+ mp->vreg_config[i].post_on_sleep = (!rc ? tmp : 0);
+
+ rc = of_property_read_u32(supply_node,
+ "qcom,supply-post-off-sleep", &tmp);
+ if (rc)
+ pr_debug("error reading supply post sleep value. rc=%d\n",
+ rc);
+
+ mp->vreg_config[i].post_off_sleep = (!rc ? tmp : 0);
+
+ pr_debug("%s min=%d, max=%d, enable=%d, disable=%d, preonsleep=%d, postonsleep=%d, preoffsleep=%d, postoffsleep=%d\n",
+ mp->vreg_config[i].vreg_name,
+ mp->vreg_config[i].min_voltage,
+ mp->vreg_config[i].max_voltage,
+ mp->vreg_config[i].enable_load,
+ mp->vreg_config[i].disable_load,
+ mp->vreg_config[i].pre_on_sleep,
+ mp->vreg_config[i].post_on_sleep,
+ mp->vreg_config[i].pre_off_sleep,
+ mp->vreg_config[i].post_off_sleep);
+ ++i;
+
+ rc = 0;
+ }
+
+ return rc;
+
+error:
+ if (mp->vreg_config) {
+ devm_kfree(&pdev->dev, mp->vreg_config);
+ mp->vreg_config = NULL;
+ mp->num_vreg = 0;
+ }
+
+ return rc;
+}
+
+static int mdss_pll_util_parse_dt_clock(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res)
+{
+ u32 i = 0, rc = 0;
+ struct dss_module_power *mp = &pll_res->mp;
+ const char *clock_name;
+ u32 clock_rate;
+
+ mp->num_clk = of_property_count_strings(pdev->dev.of_node,
+ "clock-names");
+ if (mp->num_clk <= 0) {
+ pr_err("clocks are not defined\n");
+ goto clk_err;
+ }
+
+ mp->clk_config = devm_kzalloc(&pdev->dev,
+ sizeof(struct dss_clk) * mp->num_clk, GFP_KERNEL);
+ if (!mp->clk_config) {
+ pr_err("clock configuration allocation failed\n");
+ rc = -ENOMEM;
+ mp->num_clk = 0;
+ goto clk_err;
+ }
+
+ for (i = 0; i < mp->num_clk; i++) {
+ of_property_read_string_index(pdev->dev.of_node, "clock-names",
+ i, &clock_name);
+ strlcpy(mp->clk_config[i].clk_name, clock_name,
+ sizeof(mp->clk_config[i].clk_name));
+
+ of_property_read_u32_index(pdev->dev.of_node, "clock-rate",
+ i, &clock_rate);
+ mp->clk_config[i].rate = clock_rate;
+
+ if (!clock_rate)
+ mp->clk_config[i].type = DSS_CLK_AHB;
+ else
+ mp->clk_config[i].type = DSS_CLK_PCLK;
+ }
+
+clk_err:
+ return rc;
+}
+
+static void mdss_pll_free_bootmem(u32 mem_addr, u32 size)
+{
+ unsigned long pfn_start, pfn_end, pfn_idx;
+
+ pfn_start = mem_addr >> PAGE_SHIFT;
+ pfn_end = (mem_addr + size) >> PAGE_SHIFT;
+ for (pfn_idx = pfn_start; pfn_idx < pfn_end; pfn_idx++)
+ free_reserved_page(pfn_to_page(pfn_idx));
+}
+
+static int mdss_pll_util_parse_dt_dfps(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res)
+{
+ int rc = 0;
+ struct device_node *pnode;
+ const u32 *addr;
+ struct vm_struct *area;
+ u64 size;
+ u32 offsets[2];
+ unsigned long virt_add;
+
+ pnode = of_parse_phandle(pdev->dev.of_node, "memory-region", 0);
+ if (IS_ERR_OR_NULL(pnode)) {
+ rc = PTR_ERR(pnode);
+ goto pnode_err;
+ }
+
+ addr = of_get_address(pnode, 0, &size, NULL);
+ if (!addr) {
+ pr_err("failed to parse the dfps memory address\n");
+ rc = -EINVAL;
+ goto pnode_err;
+ }
+ /* maintain compatibility for 32/64 bit */
+ offsets[0] = (u32) of_read_ulong(addr, 2);
+ offsets[1] = (u32) size;
+
+ area = get_vm_area(offsets[1], VM_IOREMAP);
+ if (!area) {
+ rc = -ENOMEM;
+ goto dfps_mem_err;
+ }
+
+ virt_add = (unsigned long)area->addr;
+ rc = ioremap_page_range(virt_add, (virt_add + offsets[1]),
+ offsets[0], PAGE_KERNEL);
+ if (rc) {
+ rc = -ENOMEM;
+ goto ioremap_err;
+ }
+
+ pll_res->dfps = kzalloc(sizeof(struct dfps_info), GFP_KERNEL);
+ if (IS_ERR_OR_NULL(pll_res->dfps)) {
+ rc = PTR_ERR(pll_res->dfps);
+ pr_err("couldn't allocate dfps kernel memory\n");
+ goto addr_err;
+ }
+
+ /* memcopy complete dfps structure from kernel virtual memory */
+ memcpy_fromio(pll_res->dfps, area->addr, sizeof(struct dfps_info));
+
+addr_err:
+ if (virt_add)
+ unmap_kernel_range(virt_add, (unsigned long) size);
+ioremap_err:
+ if (area)
+ vfree(area->addr);
+dfps_mem_err:
+ /* free the dfps memory here */
+ memblock_free(offsets[0], offsets[1]);
+ mdss_pll_free_bootmem(offsets[0], offsets[1]);
+pnode_err:
+ if (pnode)
+ of_node_put(pnode);
+
+ dma_release_declared_memory(&pdev->dev);
+ return rc;
+}
+
+int mdss_pll_util_resource_parse(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res)
+{
+ int rc = 0;
+ struct dss_module_power *mp = &pll_res->mp;
+
+ rc = mdss_pll_util_parse_dt_supply(pdev, pll_res);
+ if (rc) {
+ pr_err("vreg parsing failed rc=%d\n", rc);
+ goto end;
+ }
+
+ rc = mdss_pll_util_parse_dt_clock(pdev, pll_res);
+ if (rc) {
+ pr_err("clock name parsing failed rc=%d", rc);
+ goto clk_err;
+ }
+
+ if (mdss_pll_util_parse_dt_dfps(pdev, pll_res))
+ pr_err("dfps not enabled!\n");
+
+ return rc;
+
+clk_err:
+ devm_kfree(&pdev->dev, mp->vreg_config);
+ mp->num_vreg = 0;
+end:
+ return rc;
+}
diff --git a/drivers/clk/msm/mdss/mdss-pll.c b/drivers/clk/msm/mdss/mdss-pll.c
new file mode 100644
index 000000000000..4633c7d0e245
--- /dev/null
+++ b/drivers/clk/msm/mdss/mdss-pll.c
@@ -0,0 +1,437 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/iopoll.h>
+#include <linux/clk/msm-clock-generic.h>
+
+#include "mdss-pll.h"
+#include "mdss-dsi-pll.h"
+#include "mdss-hdmi-pll.h"
+#include "mdss-dp-pll.h"
+
+int mdss_pll_resource_enable(struct mdss_pll_resources *pll_res, bool enable)
+{
+ int rc = 0;
+ int changed = 0;
+ if (!pll_res) {
+ pr_err("Invalid input parameters\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Don't turn off resources during handoff or add more than
+ * 1 refcount.
+ */
+ if (pll_res->handoff_resources &&
+ (!enable || (enable & pll_res->resource_enable))) {
+ pr_debug("Do not turn on/off pll resources during handoff case\n");
+ return rc;
+ }
+
+ if (enable) {
+ if (pll_res->resource_ref_cnt == 0)
+ changed++;
+ pll_res->resource_ref_cnt++;
+ } else {
+ if (pll_res->resource_ref_cnt) {
+ pll_res->resource_ref_cnt--;
+ if (pll_res->resource_ref_cnt == 0)
+ changed++;
+ } else {
+ pr_err("PLL Resources already OFF\n");
+ }
+ }
+
+ if (changed) {
+ rc = mdss_pll_util_resource_enable(pll_res, enable);
+ if (rc)
+ pr_err("Resource update failed rc=%d\n", rc);
+ else
+ pll_res->resource_enable = enable;
+ }
+
+ return rc;
+}
+
+static int mdss_pll_resource_init(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res)
+{
+ if (!pdev || !pll_res) {
+ pr_err("Invalid input parameters\n");
+ return -EINVAL;
+ }
+
+ return mdss_pll_util_resource_init(pdev, pll_res);
+}
+
+static void mdss_pll_resource_deinit(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res)
+{
+ if (!pdev || !pll_res) {
+ pr_err("Invalid input parameters\n");
+ return;
+ }
+
+ mdss_pll_util_resource_deinit(pdev, pll_res);
+}
+
+static void mdss_pll_resource_release(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res)
+{
+ if (!pdev || !pll_res) {
+ pr_err("Invalid input parameters\n");
+ return;
+ }
+
+ mdss_pll_util_resource_release(pdev, pll_res);
+}
+
+static int mdss_pll_resource_parse(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res)
+{
+ int rc = 0;
+ const char *compatible_stream;
+
+ if (!pdev || !pll_res) {
+ pr_err("Invalid input parameters\n");
+ return -EINVAL;
+ }
+
+ rc = mdss_pll_util_resource_parse(pdev, pll_res);
+ if (rc) {
+ pr_err("Failed to parse the resources rc=%d\n", rc);
+ goto end;
+ }
+
+ compatible_stream = of_get_property(pdev->dev.of_node,
+ "compatible", NULL);
+ if (!compatible_stream) {
+ pr_err("Failed to parse the compatible stream\n");
+ goto err;
+ }
+
+ if (!strcmp(compatible_stream, "qcom,mdss_dsi_pll_8996")) {
+ pll_res->pll_interface_type = MDSS_DSI_PLL_8996;
+ pll_res->target_id = MDSS_PLL_TARGET_8996;
+ pll_res->revision = 1;
+ } else if (!strcmp(compatible_stream, "qcom,mdss_dsi_pll_8996_v2")) {
+ pll_res->pll_interface_type = MDSS_DSI_PLL_8996;
+ pll_res->target_id = MDSS_PLL_TARGET_8996;
+ pll_res->revision = 2;
+ } else if (!strcmp(compatible_stream, "qcom,mdss_dsi_pll_cobalt")) {
+ pll_res->pll_interface_type = MDSS_DSI_PLL_COBALT;
+ } else if (!strcmp(compatible_stream, "qcom,mdss_dp_pll_cobalt")) {
+ pll_res->pll_interface_type = MDSS_DP_PLL_COBALT;
+ } else if (!strcmp(compatible_stream, "qcom,mdss_hdmi_pll_8996")) {
+ pll_res->pll_interface_type = MDSS_HDMI_PLL_8996;
+ } else if (!strcmp(compatible_stream, "qcom,mdss_hdmi_pll_8996_v2")) {
+ pll_res->pll_interface_type = MDSS_HDMI_PLL_8996_V2;
+ } else if (!strcmp(compatible_stream, "qcom,mdss_hdmi_pll_8996_v3")) {
+ pll_res->pll_interface_type = MDSS_HDMI_PLL_8996_V3;
+ } else if (!strcmp(compatible_stream,
+ "qcom,mdss_hdmi_pll_8996_v3_1p8")) {
+ pll_res->pll_interface_type = MDSS_HDMI_PLL_8996_V3_1_8;
+ } else if (!strcmp(compatible_stream, "qcom,mdss_hdmi_pll_cobalt")) {
+ pll_res->pll_interface_type = MDSS_HDMI_PLL_COBALT;
+ } else {
+ goto err;
+ }
+
+ return rc;
+
+err:
+ mdss_pll_resource_release(pdev, pll_res);
+end:
+ return rc;
+}
+
+static int mdss_pll_clock_register(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res)
+{
+ int rc;
+
+ if (!pdev || !pll_res) {
+ pr_err("Invalid input parameters\n");
+ return -EINVAL;
+ }
+
+ switch (pll_res->pll_interface_type) {
+ case MDSS_DSI_PLL_8996:
+ rc = dsi_pll_clock_register_8996(pdev, pll_res);
+ break;
+ case MDSS_DSI_PLL_COBALT:
+ rc = dsi_pll_clock_register_cobalt(pdev, pll_res);
+ case MDSS_DP_PLL_COBALT:
+ rc = dp_pll_clock_register_cobalt(pdev, pll_res);
+ break;
+ case MDSS_HDMI_PLL_8996:
+ rc = hdmi_8996_v1_pll_clock_register(pdev, pll_res);
+ break;
+ case MDSS_HDMI_PLL_8996_V2:
+ rc = hdmi_8996_v2_pll_clock_register(pdev, pll_res);
+ break;
+ case MDSS_HDMI_PLL_8996_V3:
+ rc = hdmi_8996_v3_pll_clock_register(pdev, pll_res);
+ break;
+ case MDSS_HDMI_PLL_8996_V3_1_8:
+ rc = hdmi_8996_v3_1p8_pll_clock_register(pdev, pll_res);
+ break;
+ case MDSS_HDMI_PLL_COBALT:
+ rc = hdmi_cobalt_pll_clock_register(pdev, pll_res);
+ break;
+ case MDSS_UNKNOWN_PLL:
+ default:
+ rc = -EINVAL;
+ break;
+ }
+
+ if (rc) {
+ pr_err("Pll ndx=%d clock register failed rc=%d\n",
+ pll_res->index, rc);
+ }
+
+ return rc;
+}
+
+static int mdss_pll_probe(struct platform_device *pdev)
+{
+ int rc = 0;
+ const char *label;
+ struct resource *pll_base_reg;
+ struct resource *phy_base_reg;
+ struct resource *dynamic_pll_base_reg;
+ struct resource *gdsc_base_reg;
+ struct mdss_pll_resources *pll_res;
+
+ if (!pdev->dev.of_node) {
+ pr_err("MDSS pll driver only supports device tree probe\n");
+ rc = -ENOTSUPP;
+ goto error;
+ }
+
+ label = of_get_property(pdev->dev.of_node, "label", NULL);
+ if (!label)
+ pr_info("%d: MDSS pll label not specified\n", __LINE__);
+ else
+ pr_info("MDSS pll label = %s\n", label);
+
+ pll_res = devm_kzalloc(&pdev->dev, sizeof(struct mdss_pll_resources),
+ GFP_KERNEL);
+ if (!pll_res) {
+ pr_err("Failed to allocate the clock pll\n");
+ rc = -ENOMEM;
+ goto error;
+ }
+ platform_set_drvdata(pdev, pll_res);
+
+ rc = of_property_read_u32(pdev->dev.of_node, "cell-index",
+ &pll_res->index);
+ if (rc) {
+ pr_err("Unable to get the cell-index rc=%d\n", rc);
+ pll_res->index = 0;
+ }
+
+ pll_res->ssc_en = of_property_read_bool(pdev->dev.of_node,
+ "qcom,dsi-pll-ssc-en");
+
+ if (pll_res->ssc_en) {
+ pr_info("%s: label=%s PLL SSC enabled\n", __func__, label);
+
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,ssc-frequency-hz", &pll_res->ssc_freq);
+
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,ssc-ppm", &pll_res->ssc_ppm);
+
+ pll_res->ssc_center = false;
+
+ label = of_get_property(pdev->dev.of_node,
+ "qcom,dsi-pll-ssc-mode", NULL);
+
+ if (label && !strcmp(label, "center-spread"))
+ pll_res->ssc_center = true;
+ }
+
+ pll_base_reg = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "pll_base");
+ if (!pll_base_reg) {
+ pr_err("Unable to get the pll base resources\n");
+ rc = -ENOMEM;
+ goto io_error;
+ }
+
+ pll_res->pll_base = ioremap(pll_base_reg->start,
+ resource_size(pll_base_reg));
+ if (!pll_res->pll_base) {
+ pr_err("Unable to remap pll base resources\n");
+ rc = -ENOMEM;
+ goto io_error;
+ }
+
+ pr_debug("%s: ndx=%d base=%p\n", __func__,
+ pll_res->index, pll_res->pll_base);
+
+ rc = mdss_pll_resource_parse(pdev, pll_res);
+ if (rc) {
+ pr_err("Pll resource parsing from dt failed rc=%d\n", rc);
+ goto res_parse_error;
+ }
+
+ phy_base_reg = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "phy_base");
+ if (phy_base_reg) {
+ pll_res->phy_base = ioremap(phy_base_reg->start,
+ resource_size(phy_base_reg));
+ if (!pll_res->phy_base) {
+ pr_err("Unable to remap pll phy base resources\n");
+ rc = -ENOMEM;
+ goto phy_io_error;
+ }
+ }
+
+ dynamic_pll_base_reg = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "dynamic_pll_base");
+ if (dynamic_pll_base_reg) {
+ pll_res->dyn_pll_base = ioremap(dynamic_pll_base_reg->start,
+ resource_size(dynamic_pll_base_reg));
+ if (!pll_res->dyn_pll_base) {
+ pr_err("Unable to remap dynamic pll base resources\n");
+ rc = -ENOMEM;
+ goto dyn_pll_io_error;
+ }
+ }
+
+ gdsc_base_reg = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "gdsc_base");
+ if (!gdsc_base_reg) {
+ pr_err("Unable to get the gdsc base resource\n");
+ rc = -ENOMEM;
+ goto gdsc_io_error;
+ }
+ pll_res->gdsc_base = ioremap(gdsc_base_reg->start,
+ resource_size(gdsc_base_reg));
+ if (!pll_res->gdsc_base) {
+ pr_err("Unable to remap gdsc base resources\n");
+ rc = -ENOMEM;
+ goto gdsc_io_error;
+ }
+
+ rc = mdss_pll_resource_init(pdev, pll_res);
+ if (rc) {
+ pr_err("Pll ndx=%d resource init failed rc=%d\n",
+ pll_res->index, rc);
+ goto res_init_error;
+ }
+
+ rc = mdss_pll_clock_register(pdev, pll_res);
+ if (rc) {
+ pr_err("Pll ndx=%d clock register failed rc=%d\n",
+ pll_res->index, rc);
+ goto clock_register_error;
+ }
+
+ return rc;
+
+clock_register_error:
+ mdss_pll_resource_deinit(pdev, pll_res);
+res_init_error:
+ if (pll_res->gdsc_base)
+ iounmap(pll_res->gdsc_base);
+gdsc_io_error:
+ if (pll_res->dyn_pll_base)
+ iounmap(pll_res->dyn_pll_base);
+dyn_pll_io_error:
+ if (pll_res->phy_base)
+ iounmap(pll_res->phy_base);
+phy_io_error:
+ mdss_pll_resource_release(pdev, pll_res);
+res_parse_error:
+ iounmap(pll_res->pll_base);
+io_error:
+ devm_kfree(&pdev->dev, pll_res);
+error:
+ return rc;
+}
+
+static int mdss_pll_remove(struct platform_device *pdev)
+{
+ struct mdss_pll_resources *pll_res;
+
+ pll_res = platform_get_drvdata(pdev);
+ if (!pll_res) {
+ pr_err("Invalid PLL resource data");
+ return 0;
+ }
+
+ mdss_pll_resource_deinit(pdev, pll_res);
+ if (pll_res->phy_base)
+ iounmap(pll_res->phy_base);
+ if (pll_res->gdsc_base)
+ iounmap(pll_res->gdsc_base);
+ mdss_pll_resource_release(pdev, pll_res);
+ iounmap(pll_res->pll_base);
+ devm_kfree(&pdev->dev, pll_res);
+ return 0;
+}
+
+static const struct of_device_id mdss_pll_dt_match[] = {
+ {.compatible = "qcom,mdss_dsi_pll_8996"},
+ {.compatible = "qcom,mdss_dsi_pll_8996_v2"},
+ {.compatible = "qcom,mdss_dsi_pll_cobalt"},
+ {.compatible = "qcom,mdss_hdmi_pll_8996"},
+ {.compatible = "qcom,mdss_hdmi_pll_8996_v2"},
+ {.compatible = "qcom,mdss_hdmi_pll_8996_v3"},
+ {.compatible = "qcom,mdss_hdmi_pll_8996_v3_1p8"},
+ {.compatible = "qcom,mdss_dp_pll_cobalt"},
+ {.compatible = "qcom,mdss_hdmi_pll_cobalt"},
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, mdss_clock_dt_match);
+
+static struct platform_driver mdss_pll_driver = {
+ .probe = mdss_pll_probe,
+ .remove = mdss_pll_remove,
+ .driver = {
+ .name = "mdss_pll",
+ .of_match_table = mdss_pll_dt_match,
+ },
+};
+
+static int __init mdss_pll_driver_init(void)
+{
+ int rc;
+
+ rc = platform_driver_register(&mdss_pll_driver);
+ if (rc)
+ pr_err("mdss_register_pll_driver() failed!\n");
+
+ return rc;
+}
+subsys_initcall(mdss_pll_driver_init);
+
+static void __exit mdss_pll_driver_deinit(void)
+{
+ platform_driver_unregister(&mdss_pll_driver);
+}
+module_exit(mdss_pll_driver_deinit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("mdss pll driver");
diff --git a/drivers/clk/msm/mdss/mdss-pll.h b/drivers/clk/msm/mdss/mdss-pll.h
new file mode 100644
index 000000000000..a2eb03e09146
--- /dev/null
+++ b/drivers/clk/msm/mdss/mdss-pll.h
@@ -0,0 +1,233 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MDSS_PLL_H
+#define __MDSS_PLL_H
+
+#include <linux/mdss_io_util.h>
+#include <linux/clk/msm-clock-generic.h>
+#include <linux/io.h>
+
+#define MDSS_PLL_REG_W(base, offset, data) \
+ writel_relaxed((data), (base) + (offset))
+#define MDSS_PLL_REG_R(base, offset) readl_relaxed((base) + (offset))
+
+#define PLL_CALC_DATA(addr0, addr1, data0, data1) \
+ (((data1) << 24) | ((((addr1) / 4) & 0xFF) << 16) | \
+ ((data0) << 8) | (((addr0) / 4) & 0xFF))
+
+#define MDSS_DYN_PLL_REG_W(base, offset, addr0, addr1, data0, data1) \
+ writel_relaxed(PLL_CALC_DATA(addr0, addr1, data0, data1), \
+ (base) + (offset))
+
+enum {
+ MDSS_DSI_PLL_8996,
+ MDSS_DSI_PLL_COBALT,
+ MDSS_DP_PLL_COBALT,
+ MDSS_HDMI_PLL_8996,
+ MDSS_HDMI_PLL_8996_V2,
+ MDSS_HDMI_PLL_8996_V3,
+ MDSS_HDMI_PLL_8996_V3_1_8,
+ MDSS_HDMI_PLL_COBALT,
+ MDSS_UNKNOWN_PLL,
+};
+
+enum {
+ MDSS_PLL_TARGET_8996,
+};
+
+#define DFPS_MAX_NUM_OF_FRAME_RATES 20
+
+struct dfps_panel_info {
+ uint32_t enabled;
+ uint32_t frame_rate_cnt;
+ uint32_t frame_rate[DFPS_MAX_NUM_OF_FRAME_RATES]; /* hz */
+};
+
+struct dfps_pll_codes {
+ uint32_t pll_codes_1;
+ uint32_t pll_codes_2;
+};
+
+struct dfps_codes_info {
+ uint32_t is_valid;
+ uint32_t frame_rate; /* hz */
+ uint32_t clk_rate; /* hz */
+ struct dfps_pll_codes pll_codes;
+};
+
+struct dfps_info {
+ struct dfps_panel_info panel_dfps;
+ struct dfps_codes_info codes_dfps[DFPS_MAX_NUM_OF_FRAME_RATES];
+ void *dfps_fb_base;
+};
+
+struct mdss_pll_resources {
+
+ /* Pll specific resources like GPIO, power supply, clocks, etc*/
+ struct dss_module_power mp;
+
+ /*
+ * dsi/edp/hmdi plls' base register, phy, gdsc and dynamic refresh
+ * register mapping
+ */
+ void __iomem *pll_base;
+ void __iomem *phy_base;
+ void __iomem *gdsc_base;
+ void __iomem *dyn_pll_base;
+
+ bool is_init_locked;
+ s64 vco_current_rate;
+ s64 vco_locking_rate;
+ s64 vco_ref_clk_rate;
+
+ /*
+ * Certain pll's needs to update the same vco rate after resume in
+ * suspend/resume scenario. Cached the vco rate for such plls.
+ */
+ unsigned long vco_cached_rate;
+
+ /* dsi/edp/hmdi pll interface type */
+ u32 pll_interface_type;
+
+ /*
+ * Target ID. Used in pll_register API for valid target check before
+ * registering the PLL clocks.
+ */
+ u32 target_id;
+
+ /* HW recommended delay during configuration of vco clock rate */
+ u32 vco_delay;
+
+ /* Ref-count of the PLL resources */
+ u32 resource_ref_cnt;
+
+ /*
+ * Keep track to resource status to avoid updating same status for the
+ * pll from different paths
+ */
+ bool resource_enable;
+
+ /*
+ * Certain plls' do not allow vco rate update if it is on. Keep track of
+ * status for them to turn on/off after set rate success.
+ */
+ bool pll_on;
+
+ /*
+ * handoff_status is true of pll is already enabled by bootloader with
+ * continuous splash enable case. Clock API will call the handoff API
+ * to enable the status. It is disabled if continuous splash
+ * feature is disabled.
+ */
+ bool handoff_resources;
+
+ /*
+ * caching the pll trim codes in the case of dynamic refresh
+ */
+ int cache_pll_trim_codes[2];
+
+ /*
+ * for maintaining the status of saving trim codes
+ */
+ bool reg_upd;
+
+ /*
+ * Notifier callback for MDSS gdsc regulator events
+ */
+ struct notifier_block gdsc_cb;
+
+ /*
+ * Worker function to call PLL off event
+ */
+ struct work_struct pll_off;
+
+ /*
+ * PLL index if multiple index are available. Eg. in case of
+ * DSI we have 2 plls.
+ */
+ uint32_t index;
+
+ bool ssc_en; /* share pll with master */
+ bool ssc_center; /* default is down spread */
+ u32 ssc_freq;
+ u32 ssc_ppm;
+
+ struct mdss_pll_resources *slave;
+
+ /*
+ * target pll revision information
+ */
+ int revision;
+
+ void *priv;
+
+ /*
+ * dynamic refresh pll codes stored in this structure
+ */
+ struct dfps_info *dfps;
+
+};
+
+struct mdss_pll_vco_calc {
+ s32 div_frac_start1;
+ s32 div_frac_start2;
+ s32 div_frac_start3;
+ s64 dec_start1;
+ s64 dec_start2;
+ s64 pll_plllock_cmp1;
+ s64 pll_plllock_cmp2;
+ s64 pll_plllock_cmp3;
+};
+
+static inline bool is_gdsc_disabled(struct mdss_pll_resources *pll_res)
+{
+ if (!pll_res->gdsc_base) {
+ WARN(1, "gdsc_base register is not defined\n");
+ return true;
+ }
+
+ return ((readl_relaxed(pll_res->gdsc_base + 0x4) & BIT(31)) &&
+ (!(readl_relaxed(pll_res->gdsc_base) & BIT(0)))) ? false : true;
+}
+
+static inline int mdss_pll_div_prepare(struct clk *c)
+{
+ struct div_clk *div = to_div_clk(c);
+ /* Restore the divider's value */
+ return div->ops->set_div(div, div->data.div);
+}
+
+static inline int mdss_set_mux_sel(struct mux_clk *clk, int sel)
+{
+ return 0;
+}
+
+static inline int mdss_get_mux_sel(struct mux_clk *clk)
+{
+ return 0;
+}
+
+int mdss_pll_resource_enable(struct mdss_pll_resources *pll_res, bool enable);
+int mdss_pll_util_resource_init(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res);
+void mdss_pll_util_resource_deinit(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res);
+void mdss_pll_util_resource_release(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res);
+int mdss_pll_util_resource_enable(struct mdss_pll_resources *pll_res,
+ bool enable);
+int mdss_pll_util_resource_parse(struct platform_device *pdev,
+ struct mdss_pll_resources *pll_res);
+struct dss_vreg *mdss_pll_get_mp_by_reg_name(struct mdss_pll_resources *pll_res
+ , char *name);
+#endif
diff --git a/drivers/clk/msm/msm-clock-controller.c b/drivers/clk/msm/msm-clock-controller.c
new file mode 100644
index 000000000000..cbe52555264d
--- /dev/null
+++ b/drivers/clk/msm/msm-clock-controller.c
@@ -0,0 +1,755 @@
+/*
+ * Copyright (c) 2014, 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "msmclock: %s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/io.h>
+#include <linux/mutex.h>
+#include <linux/regulator/consumer.h>
+#include <linux/of.h>
+#include <linux/hashtable.h>
+
+#include <linux/clk/msm-clk-provider.h>
+#include <soc/qcom/msm-clock-controller.h>
+#include <soc/qcom/clock-rpm.h>
+
+/* Protects list operations */
+static DEFINE_MUTEX(msmclk_lock);
+static LIST_HEAD(msmclk_parser_list);
+static u32 msmclk_debug;
+
+struct hitem {
+ struct hlist_node list;
+ phandle key;
+ void *ptr;
+};
+
+int of_property_count_phandles(struct device_node *np, char *propname)
+{
+ const __be32 *phandle;
+ int size;
+
+ phandle = of_get_property(np, propname, &size);
+ return phandle ? (size / sizeof(*phandle)) : -EINVAL;
+}
+EXPORT_SYMBOL(of_property_count_phandles);
+
+int of_property_read_phandle_index(struct device_node *np, char *propname,
+ int index, phandle *p)
+{
+ const __be32 *phandle;
+ int size;
+
+ phandle = of_get_property(np, propname, &size);
+ if ((!phandle) || (size < sizeof(*phandle) * (index + 1)))
+ return -EINVAL;
+
+ *p = be32_to_cpup(phandle + index);
+ return 0;
+}
+EXPORT_SYMBOL(of_property_read_phandle_index);
+
+static int generic_vdd_parse_regulators(struct device *dev,
+ struct clk_vdd_class *vdd, struct device_node *np)
+{
+ int num_regulators, i, rc;
+ char *name = "qcom,regulators";
+
+ num_regulators = of_property_count_phandles(np, name);
+ if (num_regulators <= 0) {
+ dt_prop_err(np, name, "missing dt property\n");
+ return -EINVAL;
+ }
+
+ vdd->regulator = devm_kzalloc(dev,
+ sizeof(*vdd->regulator) * num_regulators,
+ GFP_KERNEL);
+ if (!vdd->regulator) {
+ dt_err(np, "memory alloc failure\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < num_regulators; i++) {
+ phandle p;
+ rc = of_property_read_phandle_index(np, name, i, &p);
+ if (rc) {
+ dt_prop_err(np, name, "unable to read phandle\n");
+ return rc;
+ }
+
+ vdd->regulator[i] = msmclk_parse_phandle(dev, p);
+ if (IS_ERR(vdd->regulator[i])) {
+ dt_prop_err(np, name, "hashtable lookup failed\n");
+ return PTR_ERR(vdd->regulator[i]);
+ }
+ }
+
+ vdd->num_regulators = num_regulators;
+ return 0;
+}
+
+static int generic_vdd_parse_levels(struct device *dev,
+ struct clk_vdd_class *vdd, struct device_node *np)
+{
+ int len, rc;
+ char *name = "qcom,uV-levels";
+
+ if (!of_find_property(np, name, &len)) {
+ dt_prop_err(np, name, "missing dt property\n");
+ return -EINVAL;
+ }
+
+ len /= sizeof(u32);
+ if (len % vdd->num_regulators) {
+ dt_err(np, "mismatch beween qcom,uV-levels and qcom,regulators dt properties\n");
+ return -EINVAL;
+ }
+
+ vdd->num_levels = len / vdd->num_regulators;
+ vdd->vdd_uv = devm_kzalloc(dev, len * sizeof(*vdd->vdd_uv),
+ GFP_KERNEL);
+ vdd->level_votes = devm_kzalloc(dev,
+ vdd->num_levels * sizeof(*vdd->level_votes),
+ GFP_KERNEL);
+
+ if (!vdd->vdd_uv || !vdd->level_votes) {
+ dt_err(np, "memory alloc failure\n");
+ return -ENOMEM;
+ }
+
+ rc = of_property_read_u32_array(np, name, vdd->vdd_uv,
+ vdd->num_levels * vdd->num_regulators);
+ if (rc) {
+ dt_prop_err(np, name, "unable to read u32 array\n");
+ return -EINVAL;
+ }
+
+ /* Optional Property */
+ name = "qcom,uA-levels";
+ if (!of_find_property(np, name, &len))
+ return 0;
+
+ len /= sizeof(u32);
+ if (len / vdd->num_regulators != vdd->num_levels) {
+ dt_err(np, "size of qcom,uA-levels and qcom,uV-levels must match\n");
+ return -EINVAL;
+ }
+
+ vdd->vdd_ua = devm_kzalloc(dev, len * sizeof(*vdd->vdd_ua),
+ GFP_KERNEL);
+ if (!vdd->vdd_ua) {
+ dt_err(np, "memory alloc failure\n");
+ return -ENOMEM;
+ }
+
+ rc = of_property_read_u32_array(np, name, vdd->vdd_ua,
+ vdd->num_levels * vdd->num_regulators);
+ if (rc) {
+ dt_prop_err(np, name, "unable to read u32 array\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void *simple_vdd_class_dt_parser(struct device *dev,
+ struct device_node *np)
+{
+ struct clk_vdd_class *vdd;
+ int rc = 0;
+
+ vdd = devm_kzalloc(dev, sizeof(*vdd), GFP_KERNEL);
+ if (!vdd) {
+ dev_err(dev, "memory alloc failure\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ mutex_init(&vdd->lock);
+ vdd->class_name = np->name;
+
+ rc = generic_vdd_parse_regulators(dev, vdd, np);
+ rc |= generic_vdd_parse_levels(dev, vdd, np);
+ if (rc) {
+ dt_err(np, "unable to read vdd_class\n");
+ return ERR_PTR(rc);
+ }
+
+ return vdd;
+}
+MSMCLK_PARSER(simple_vdd_class_dt_parser, "qcom,simple-vdd-class", 0);
+
+static int generic_clk_parse_parents(struct device *dev, struct clk *c,
+ struct device_node *np)
+{
+ int rc;
+ phandle p;
+ char *name = "qcom,parent";
+
+ /* This property is optional */
+ if (!of_find_property(np, name, NULL))
+ return 0;
+
+ rc = of_property_read_phandle_index(np, name, 0, &p);
+ if (rc) {
+ dt_prop_err(np, name, "unable to read phandle\n");
+ return rc;
+ }
+
+ c->parent = msmclk_parse_phandle(dev, p);
+ if (IS_ERR(c->parent)) {
+ dt_prop_err(np, name, "hashtable lookup failed\n");
+ return PTR_ERR(c->parent);
+ }
+
+ return 0;
+}
+
+static int generic_clk_parse_vdd(struct device *dev, struct clk *c,
+ struct device_node *np)
+{
+ phandle p;
+ int rc;
+ char *name = "qcom,supply-group";
+
+ /* This property is optional */
+ if (!of_find_property(np, name, NULL))
+ return 0;
+
+ rc = of_property_read_phandle_index(np, name, 0, &p);
+ if (rc) {
+ dt_prop_err(np, name, "unable to read phandle\n");
+ return rc;
+ }
+
+ c->vdd_class = msmclk_parse_phandle(dev, p);
+ if (IS_ERR(c->vdd_class)) {
+ dt_prop_err(np, name, "hashtable lookup failed\n");
+ return PTR_ERR(c->vdd_class);
+ }
+
+ return 0;
+}
+
+static int generic_clk_parse_flags(struct device *dev, struct clk *c,
+ struct device_node *np)
+{
+ int rc;
+ char *name = "qcom,clk-flags";
+
+ /* This property is optional */
+ if (!of_find_property(np, name, NULL))
+ return 0;
+
+ rc = of_property_read_u32(np, name, &c->flags);
+ if (rc) {
+ dt_prop_err(np, name, "unable to read u32\n");
+ return rc;
+ }
+
+ return 0;
+}
+
+static int generic_clk_parse_fmax(struct device *dev, struct clk *c,
+ struct device_node *np)
+{
+ u32 prop_len, i;
+ int rc;
+ char *name = "qcom,clk-fmax";
+
+ /* This property is optional */
+ if (!of_find_property(np, name, &prop_len))
+ return 0;
+
+ if (!c->vdd_class) {
+ dt_err(np, "both qcom,clk-fmax and qcom,supply-group must be defined\n");
+ return -EINVAL;
+ }
+
+ prop_len /= sizeof(u32);
+ if (prop_len % 2) {
+ dt_prop_err(np, name, "bad length\n");
+ return -EINVAL;
+ }
+
+ /* Value at proplen - 2 is the index of the last entry in fmax array */
+ rc = of_property_read_u32_index(np, name, prop_len - 2, &c->num_fmax);
+ c->num_fmax += 1;
+ if (rc) {
+ dt_prop_err(np, name, "unable to read u32\n");
+ return rc;
+ }
+
+ c->fmax = devm_kzalloc(dev, sizeof(*c->fmax) * c->num_fmax, GFP_KERNEL);
+ if (!c->fmax) {
+ dev_err(dev, "memory alloc failure\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < prop_len; i += 2) {
+ u32 level, value;
+ rc = of_property_read_u32_index(np, name, i, &level);
+ if (rc) {
+ dt_prop_err(np, name, "unable to read u32\n");
+ return rc;
+ }
+
+ rc = of_property_read_u32_index(np, name, i + 1, &value);
+ if (rc) {
+ dt_prop_err(np, name, "unable to read u32\n");
+ return rc;
+ }
+
+ if (level >= c->num_fmax) {
+ dt_prop_err(np, name, "must be sorted\n");
+ return -EINVAL;
+ }
+ c->fmax[level] = value;
+ }
+
+ return 0;
+}
+
+static int generic_clk_add_lookup_tbl_entry(struct device *dev, struct clk *c)
+{
+ struct msmclk_data *drv = dev_get_drvdata(dev);
+ struct clk_lookup *cl;
+
+ if (drv->clk_tbl_size >= drv->max_clk_tbl_size) {
+ dev_err(dev, "child node count should be > clock_count?\n");
+ return -EINVAL;
+ }
+
+ cl = drv->clk_tbl + drv->clk_tbl_size;
+ cl->clk = c;
+ drv->clk_tbl_size++;
+ return 0;
+}
+
+static int generic_clk_parse_depends(struct device *dev, struct clk *c,
+ struct device_node *np)
+{
+ phandle p;
+ int rc;
+ char *name = "qcom,depends";
+
+ /* This property is optional */
+ if (!of_find_property(np, name, NULL))
+ return 0;
+
+ rc = of_property_read_phandle_index(np, name, 0, &p);
+ if (rc) {
+ dt_prop_err(np, name, "unable to read phandle\n");
+ return rc;
+ }
+
+ c->depends = msmclk_parse_phandle(dev, p);
+ if (IS_ERR(c->depends)) {
+ dt_prop_err(np, name, "hashtable lookup failed\n");
+ return PTR_ERR(c->depends);
+ }
+
+ return 0;
+}
+
+static int generic_clk_parse_init_config(struct device *dev, struct clk *c,
+ struct device_node *np)
+{
+ int rc;
+ u32 temp;
+ char *name = "qcom,always-on";
+
+ c->always_on = of_property_read_bool(np, name);
+
+ name = "qcom,config-rate";
+ /* This property is optional */
+ if (!of_find_property(np, name, NULL))
+ return 0;
+
+ rc = of_property_read_u32(np, name, &temp);
+ if (rc) {
+ dt_prop_err(np, name, "unable to read u32\n");
+ return rc;
+ }
+ c->init_rate = temp;
+
+ return rc;
+}
+
+void *msmclk_generic_clk_init(struct device *dev, struct device_node *np,
+ struct clk *c)
+{
+ int rc;
+
+ /* CLK_INIT macro */
+ spin_lock_init(&c->lock);
+ mutex_init(&c->prepare_lock);
+ INIT_LIST_HEAD(&c->children);
+ INIT_LIST_HEAD(&c->siblings);
+ INIT_LIST_HEAD(&c->list);
+ c->dbg_name = np->name;
+
+ rc = generic_clk_add_lookup_tbl_entry(dev, c);
+ rc |= generic_clk_parse_flags(dev, c, np);
+ rc |= generic_clk_parse_parents(dev, c, np);
+ rc |= generic_clk_parse_vdd(dev, c, np);
+ rc |= generic_clk_parse_fmax(dev, c, np);
+ rc |= generic_clk_parse_depends(dev, c, np);
+ rc |= generic_clk_parse_init_config(dev, c, np);
+
+ if (rc) {
+ dt_err(np, "unable to read clk\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ return c;
+}
+
+static struct msmclk_parser *msmclk_parser_lookup(struct device_node *np)
+{
+ struct msmclk_parser *item;
+ list_for_each_entry(item, &msmclk_parser_list, list) {
+ if (of_device_is_compatible(np, item->compatible))
+ return item;
+ }
+ return NULL;
+}
+void msmclk_parser_register(struct msmclk_parser *item)
+{
+ mutex_lock(&msmclk_lock);
+ list_add(&item->list, &msmclk_parser_list);
+ mutex_unlock(&msmclk_lock);
+}
+
+static int msmclk_htable_add(struct device *dev, void *result, phandle key);
+
+void *msmclk_parse_dt_node(struct device *dev, struct device_node *np)
+{
+ struct msmclk_parser *parser;
+ phandle key;
+ void *result;
+ int rc;
+
+ key = np->phandle;
+ result = msmclk_lookup_phandle(dev, key);
+ if (!result)
+ return ERR_PTR(-EINVAL);
+
+ if (!of_device_is_available(np)) {
+ dt_err(np, "node is disabled\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ parser = msmclk_parser_lookup(np);
+ if (IS_ERR_OR_NULL(parser)) {
+ dt_err(np, "no parser found\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* This may return -EPROBE_DEFER */
+ result = parser->parsedt(dev, np);
+ if (IS_ERR(result)) {
+ dt_err(np, "parsedt failed");
+ return result;
+ }
+
+ rc = msmclk_htable_add(dev, result, key);
+ if (rc)
+ return ERR_PTR(rc);
+
+ return result;
+}
+
+void *msmclk_parse_phandle(struct device *dev, phandle key)
+{
+ struct hitem *item;
+ struct device_node *np;
+ struct msmclk_data *drv = dev_get_drvdata(dev);
+
+ /*
+ * the default phandle value is 0. Since hashtable keys must
+ * be unique, reject the default value.
+ */
+ if (!key)
+ return ERR_PTR(-EINVAL);
+
+ hash_for_each_possible(drv->htable, item, list, key) {
+ if (item->key == key)
+ return item->ptr;
+ }
+
+ np = of_find_node_by_phandle(key);
+ if (!np)
+ return ERR_PTR(-EINVAL);
+
+ return msmclk_parse_dt_node(dev, np);
+}
+EXPORT_SYMBOL(msmclk_parse_phandle);
+
+void *msmclk_lookup_phandle(struct device *dev, phandle key)
+{
+ struct hitem *item;
+ struct msmclk_data *drv = dev_get_drvdata(dev);
+
+ hash_for_each_possible(drv->htable, item, list, key) {
+ if (item->key == key)
+ return item->ptr;
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+EXPORT_SYMBOL(msmclk_lookup_phandle);
+
+static int msmclk_htable_add(struct device *dev, void *data, phandle key)
+{
+ struct hitem *item;
+ struct msmclk_data *drv = dev_get_drvdata(dev);
+
+ /*
+ * If there are no phandle references to a node, key == 0. However, if
+ * there is a second node like this, both will have key == 0. This
+ * violates the requirement that hashtable keys be unique. Skip it.
+ */
+ if (!key)
+ return 0;
+
+ if (!IS_ERR(msmclk_lookup_phandle(dev, key))) {
+ struct device_node *np = of_find_node_by_phandle(key);
+ dev_err(dev, "attempt to add duplicate entry for %s\n",
+ np ? np->name : "NULL");
+ return -EINVAL;
+ }
+
+ item = devm_kzalloc(dev, sizeof(*item), GFP_KERNEL);
+ if (!item) {
+ dev_err(dev, "memory alloc failure\n");
+ return -ENOMEM;
+ }
+
+ INIT_HLIST_NODE(&item->list);
+ item->key = key;
+ item->ptr = data;
+
+ hash_add(drv->htable, &item->list, key);
+ return 0;
+}
+
+/*
+ * Currently, regulators are the only elements capable of probe deferral.
+ * Check them first to handle probe deferal efficiently.
+*/
+static int get_ext_regulators(struct device *dev)
+{
+ int num_strings, i, rc;
+ struct device_node *np;
+ void *item;
+ char *name = "qcom,regulator-names";
+
+ np = dev->of_node;
+ /* This property is optional */
+ num_strings = of_property_count_strings(np, name);
+ if (num_strings <= 0)
+ return 0;
+
+ for (i = 0; i < num_strings; i++) {
+ const char *str;
+ char buf[50];
+ phandle key;
+
+ rc = of_property_read_string_index(np, name, i, &str);
+ if (rc) {
+ dt_prop_err(np, name, "unable to read string\n");
+ return rc;
+ }
+
+ item = devm_regulator_get(dev, str);
+ if (IS_ERR(item)) {
+ dev_err(dev, "Failed to get regulator: %s\n", str);
+ return PTR_ERR(item);
+ }
+
+ snprintf(buf, ARRAY_SIZE(buf), "%s-supply", str);
+ rc = of_property_read_phandle_index(np, buf, 0, &key);
+ if (rc) {
+ dt_prop_err(np, buf, "unable to read phandle\n");
+ return rc;
+ }
+
+ rc = msmclk_htable_add(dev, item, key);
+ if (rc)
+ return rc;
+ }
+ return 0;
+}
+
+static struct clk *msmclk_clk_get(struct of_phandle_args *clkspec, void *data)
+{
+ phandle key;
+ struct clk *c = ERR_PTR(-ENOENT);
+
+ key = clkspec->args[0];
+ c = msmclk_lookup_phandle(data, key);
+
+ if (!IS_ERR(c) && !(c->flags & CLKFLAG_INIT_DONE))
+ return ERR_PTR(-EPROBE_DEFER);
+
+ return c;
+}
+
+static void *regulator_dt_parser(struct device *dev, struct device_node *np)
+{
+ dt_err(np, "regulators should be handled in probe()");
+ return ERR_PTR(-EINVAL);
+}
+MSMCLK_PARSER(regulator_dt_parser, "qcom,rpm-smd-regulator", 0);
+
+static void *msmclk_dt_parser(struct device *dev, struct device_node *np)
+{
+ dt_err(np, "calling into other clock controllers isn't allowed");
+ return ERR_PTR(-EINVAL);
+}
+MSMCLK_PARSER(msmclk_dt_parser, "qcom,msm-clock-controller", 0);
+
+static struct msmclk_data *msmclk_drv_init(struct device *dev)
+{
+ struct msmclk_data *drv;
+ size_t size;
+
+ drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL);
+ if (!drv) {
+ dev_err(dev, "memory alloc failure\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ dev_set_drvdata(dev, drv);
+
+ drv->dev = dev;
+ INIT_LIST_HEAD(&drv->list);
+
+ /* This overestimates size */
+ drv->max_clk_tbl_size = of_get_child_count(dev->of_node);
+ size = sizeof(*drv->clk_tbl) * drv->max_clk_tbl_size;
+ drv->clk_tbl = devm_kzalloc(dev, size, GFP_KERNEL);
+ if (!drv->clk_tbl) {
+ dev_err(dev, "memory alloc failure clock table size %zu\n",
+ size);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ hash_init(drv->htable);
+ return drv;
+}
+
+static int msmclk_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct device *dev;
+ struct msmclk_data *drv;
+ struct device_node *child;
+ void *result;
+ int rc = 0;
+
+ dev = &pdev->dev;
+ drv = msmclk_drv_init(dev);
+ if (IS_ERR(drv))
+ return PTR_ERR(drv);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cc-base");
+ if (!res) {
+ dt_err(dev->of_node, "missing cc-base\n");
+ return -EINVAL;
+ }
+ drv->base = devm_ioremap(dev, res->start, resource_size(res));
+ if (!drv->base) {
+ dev_err(dev, "ioremap failed for drv->base\n");
+ return -ENOMEM;
+ }
+ rc = msmclk_htable_add(dev, drv, dev->of_node->phandle);
+ if (rc)
+ return rc;
+
+ rc = enable_rpm_scaling();
+ if (rc)
+ return rc;
+
+ rc = get_ext_regulators(dev);
+ if (rc)
+ return rc;
+
+ /*
+ * Returning -EPROBE_DEFER here is inefficient due to
+ * destroying work 'unnecessarily'
+ */
+ for_each_available_child_of_node(dev->of_node, child) {
+ result = msmclk_parse_dt_node(dev, child);
+ if (!IS_ERR(result))
+ continue;
+ if (!msmclk_debug)
+ return PTR_ERR(result);
+ /*
+ * Parse and report all errors instead of immediately
+ * exiting. Return the first error code.
+ */
+ if (!rc)
+ rc = PTR_ERR(result);
+ }
+ if (rc)
+ return rc;
+
+ rc = of_clk_add_provider(dev->of_node, msmclk_clk_get, dev);
+ if (rc) {
+ dev_err(dev, "of_clk_add_provider failed\n");
+ return rc;
+ }
+
+ /*
+ * can't fail after registering clocks, because users may have
+ * gotten clock references. Failing would delete the memory.
+ */
+ WARN_ON(msm_clock_register(drv->clk_tbl, drv->clk_tbl_size));
+ dev_info(dev, "registered clocks\n");
+
+ return 0;
+}
+
+static struct of_device_id msmclk_match_table[] = {
+ {.compatible = "qcom,msm-clock-controller"},
+ {}
+};
+
+static struct platform_driver msmclk_driver = {
+ .probe = msmclk_probe,
+ .driver = {
+ .name = "msm-clock-controller",
+ .of_match_table = msmclk_match_table,
+ .owner = THIS_MODULE,
+ },
+};
+
+static bool initialized;
+int __init msmclk_init(void)
+{
+ int rc;
+ if (initialized)
+ return 0;
+
+ rc = platform_driver_register(&msmclk_driver);
+ if (rc)
+ return rc;
+ initialized = true;
+ return rc;
+}
+arch_initcall(msmclk_init);
diff --git a/drivers/clk/msm/vdd-level-8996.h b/drivers/clk/msm/vdd-level-8996.h
new file mode 100644
index 000000000000..24b2810aba16
--- /dev/null
+++ b/drivers/clk/msm/vdd-level-8996.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DRIVERS_CLK_QCOM_VDD_LEVEL_8996_H
+#define __DRIVERS_CLK_QCOM_VDD_LEVEL_8996_H
+
+#include <linux/clk/msm-clock-generic.h>
+#include <linux/regulator/rpm-smd-regulator.h>
+#include <linux/regulator/consumer.h>
+
+#define VDD_DIG_FMAX_MAP1(l1, f1) \
+ .vdd_class = &vdd_dig, \
+ .fmax = (unsigned long[VDD_DIG_NUM]) { \
+ [VDD_DIG_##l1] = (f1), \
+ }, \
+ .num_fmax = VDD_DIG_NUM
+#define VDD_DIG_FMAX_MAP2(l1, f1, l2, f2) \
+ .vdd_class = &vdd_dig, \
+ .fmax = (unsigned long[VDD_DIG_NUM]) { \
+ [VDD_DIG_##l1] = (f1), \
+ [VDD_DIG_##l2] = (f2), \
+ }, \
+ .num_fmax = VDD_DIG_NUM
+#define VDD_DIG_FMAX_MAP3(l1, f1, l2, f2, l3, f3) \
+ .vdd_class = &vdd_dig, \
+ .fmax = (unsigned long[VDD_DIG_NUM]) { \
+ [VDD_DIG_##l1] = (f1), \
+ [VDD_DIG_##l2] = (f2), \
+ [VDD_DIG_##l3] = (f3), \
+ }, \
+ .num_fmax = VDD_DIG_NUM
+#define VDD_DIG_FMAX_MAP4(l1, f1, l2, f2, l3, f3, l4, f4) \
+ .vdd_class = &vdd_dig, \
+ .fmax = (unsigned long[VDD_DIG_NUM]) { \
+ [VDD_DIG_##l1] = (f1), \
+ [VDD_DIG_##l2] = (f2), \
+ [VDD_DIG_##l3] = (f3), \
+ [VDD_DIG_##l4] = (f4), \
+ }, \
+ .num_fmax = VDD_DIG_NUM
+
+#define VDD_MMPLL4_FMAX_MAP1(l1, f1) \
+ .vdd_class = &vdd_mmpll4, \
+ .fmax = (unsigned long[VDD_DIG_NUM]) { \
+ [VDD_DIG_##l1] = (f1), \
+ }, \
+ .num_fmax = VDD_DIG_NUM
+
+#define VDD_MMPLL4_FMAX_MAP2(l1, f1, l2, f2) \
+ .vdd_class = &vdd_mmpll4, \
+ .fmax = (unsigned long[VDD_DIG_NUM]) { \
+ [VDD_DIG_##l1] = (f1), \
+ [VDD_DIG_##l2] = (f2), \
+ }, \
+ .num_fmax = VDD_DIG_NUM
+
+#define VDD_MMPLL4_FMAX_MAP3(l1, f1, l2, f2, l3, f3) \
+ .vdd_class = &vdd_mmpll4, \
+ .fmax = (unsigned long[VDD_DIG_NUM]) { \
+ [VDD_DIG_##l1] = (f1), \
+ [VDD_DIG_##l2] = (f2), \
+ [VDD_DIG_##l3] = (f3), \
+ }, \
+ .num_fmax = VDD_DIG_NUM
+
+enum vdd_dig_levels {
+ VDD_DIG_NONE,
+ VDD_DIG_LOWER, /* SVS2 */
+ VDD_DIG_LOW, /* SVS */
+ VDD_DIG_NOMINAL, /* NOMINAL */
+ VDD_DIG_HIGH, /* Turbo */
+ VDD_DIG_NUM
+};
+
+static int vdd_corner[] = {
+ RPM_REGULATOR_CORNER_NONE, /* VDD_DIG_NONE */
+ RPM_REGULATOR_CORNER_SVS_SOC, /* SVS2 is remapped to SVS */
+ RPM_REGULATOR_CORNER_SVS_SOC, /* VDD_DIG_SVS */
+ RPM_REGULATOR_CORNER_NORMAL, /* VDD_DIG_NOMINAL */
+ RPM_REGULATOR_CORNER_SUPER_TURBO, /* VDD_DIG_TURBO */
+};
+
+#endif
diff --git a/drivers/clk/msm/vdd-level-cobalt.h b/drivers/clk/msm/vdd-level-cobalt.h
new file mode 100644
index 000000000000..2cb40afafe3f
--- /dev/null
+++ b/drivers/clk/msm/vdd-level-cobalt.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DRIVERS_CLK_QCOM_VDD_LEVEL_COBALT_H
+#define __DRIVERS_CLK_QCOM_VDD_LEVEL_COBALT_H
+
+#include <linux/clk/msm-clock-generic.h>
+#include <linux/regulator/rpm-smd-regulator.h>
+#include <linux/regulator/consumer.h>
+
+#define VDD_DIG_FMAX_MAP1(l1, f1) \
+ .vdd_class = &vdd_dig, \
+ .fmax = (unsigned long[VDD_DIG_NUM]) { \
+ [VDD_DIG_##l1] = (f1), \
+ }, \
+ .num_fmax = VDD_DIG_NUM
+#define VDD_DIG_FMAX_MAP2(l1, f1, l2, f2) \
+ .vdd_class = &vdd_dig, \
+ .fmax = (unsigned long[VDD_DIG_NUM]) { \
+ [VDD_DIG_##l1] = (f1), \
+ [VDD_DIG_##l2] = (f2), \
+ }, \
+ .num_fmax = VDD_DIG_NUM
+
+#define VDD_DIG_FMAX_MAP3(l1, f1, l2, f2, l3, f3) \
+ .vdd_class = &vdd_dig, \
+ .fmax = (unsigned long[VDD_DIG_NUM]) { \
+ [VDD_DIG_##l1] = (f1), \
+ [VDD_DIG_##l2] = (f2), \
+ [VDD_DIG_##l3] = (f3), \
+ }, \
+ .num_fmax = VDD_DIG_NUM
+#define VDD_DIG_FMAX_MAP4(l1, f1, l2, f2, l3, f3, l4, f4) \
+ .vdd_class = &vdd_dig, \
+ .fmax = (unsigned long[VDD_DIG_NUM]) { \
+ [VDD_DIG_##l1] = (f1), \
+ [VDD_DIG_##l2] = (f2), \
+ [VDD_DIG_##l3] = (f3), \
+ [VDD_DIG_##l4] = (f4), \
+ }, \
+ .num_fmax = VDD_DIG_NUM
+
+#define VDD_DIG_FMAX_MAP2_AO(l1, f1, l2, f2) \
+ .vdd_class = &vdd_dig_ao, \
+ .fmax = (unsigned long[VDD_DIG_NUM]) { \
+ [VDD_DIG_##l1] = (f1), \
+ [VDD_DIG_##l2] = (f2), \
+ }, \
+ .num_fmax = VDD_DIG_NUM
+
+#define VDD_GPU_PLL_FMAX_MAP1(l1, f1) \
+ .vdd_class = &vdd_gpucc_mx, \
+ .fmax = (unsigned long[VDD_DIG_NUM]) { \
+ [VDD_DIG_##l1] = (f1), \
+ }, \
+ .num_fmax = VDD_DIG_NUM
+
+#define VDD_GPU_PLL_FMAX_MAP3(l1, f1, l2, f2, l3, f3) \
+ .vdd_class = &vdd_gpucc_mx, \
+ .fmax = (unsigned long[VDD_DIG_NUM]) { \
+ [VDD_DIG_##l1] = (f1), \
+ [VDD_DIG_##l2] = (f2), \
+ [VDD_DIG_##l3] = (f3), \
+ }, \
+ .num_fmax = VDD_DIG_NUM
+
+enum vdd_dig_levels {
+ VDD_DIG_NONE,
+ VDD_DIG_MIN, /* MIN SVS */
+ VDD_DIG_LOWER, /* SVS2 */
+ VDD_DIG_LOW, /* SVS */
+ VDD_DIG_LOW_L1, /* SVSL1 */
+ VDD_DIG_NOMINAL, /* NOM */
+ VDD_DIG_HIGH, /* TURBO */
+ VDD_DIG_NUM
+};
+
+static int vdd_corner[] = {
+ RPM_REGULATOR_LEVEL_NONE, /* VDD_DIG_NONE */
+ RPM_REGULATOR_LEVEL_MIN_SVS, /* VDD_DIG_MIN */
+ RPM_REGULATOR_LEVEL_LOW_SVS, /* VDD_DIG_LOWER */
+ RPM_REGULATOR_LEVEL_SVS, /* VDD_DIG_LOW */
+ RPM_REGULATOR_LEVEL_SVS_PLUS, /* VDD_DIG_LOW_L1 */
+ RPM_REGULATOR_LEVEL_NOM, /* VDD_DIG_NOMINAL */
+ RPM_REGULATOR_LEVEL_TURBO, /* VDD_DIG_HIGH */
+};
+
+#endif
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index ee4c83aab4f4..ea6e4a1423af 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -2,6 +2,9 @@ config QCOM_GDSC
bool
select PM_GENERIC_DOMAINS if PM
+config QCOM_RPMCC
+ bool
+
config COMMON_CLK_QCOM
tristate "Support for Qualcomm's clock controllers"
depends on OF
@@ -9,6 +12,32 @@ config COMMON_CLK_QCOM
select REGMAP_MMIO
select RESET_CONTROLLER
+config QCOM_CLK_RPM
+ tristate "RPM based Clock Controller"
+ depends on COMMON_CLK_QCOM && MFD_QCOM_RPM
+ select QCOM_RPMCC
+ help
+ The RPM (Resource Power Manager) is a dedicated hardware engine for
+ managing the shared SoC resources in order to keep the lowest power
+ profile. It communicates with other hardware subsystems via shared
+ memory and accepts clock requests, aggregates the requests and turns
+ the clocks on/off or scales them on demand.
+ Say Y if you want to support the clocks exposed by the RPM on
+ platforms such as apq8064, msm8660, msm8960 etc.
+
+config QCOM_CLK_SMD_RPM
+ tristate "RPM over SMD based Clock Controller"
+ depends on COMMON_CLK_QCOM && QCOM_SMD_RPM
+ select QCOM_RPMCC
+ help
+ The RPM (Resource Power Manager) is a dedicated hardware engine for
+ managing the shared SoC resources in order to keep the lowest power
+ profile. It communicates with other hardware subsystems via shared
+ memory and accepts clock requests, aggregates the requests and turns
+ the clocks on/off or scales them on demand.
+ Say Y if you want to support the clocks exposed by the RPM on
+ platforms such as apq8016, apq8084, msm8974 etc.
+
config APQ_GCC_8084
tristate "APQ8084 Global Clock Controller"
select QCOM_GDSC
@@ -106,3 +135,56 @@ config MSM_MMCC_8974
Support for the multimedia clock controller on msm8974 devices.
Say Y if you want to support multimedia devices such as display,
graphics, video encode/decode, camera, etc.
+
+config MSM_GCC_8996
+ tristate "MSM8996 Global Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on msm8996 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, UFS, SD/eMMC, PCIe, etc.
+
+config MSM_MMCC_8996
+ tristate "MSM8996 Multimedia Clock Controller"
+ select MSM_GCC_8996
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the multimedia clock controller on msm8996 devices.
+ Say Y if you want to support multimedia devices such as display,
+ graphics, video encode/decode, camera, etc.
+
+config QCOM_HFPLL
+ tristate "High-Frequency PLL (HFPLL) Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the high-frequency PLLs present on MSM devices.
+ Say Y if you want to support CPU frequency scaling on devices
+ such as MSM8974, APQ8084, etc.
+
+config KPSS_XCC
+ tristate "KPSS Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the Krait ACC and GCC clock controllers. Say Y
+ if you want to support CPU frequency scaling on devices such
+ as MSM8960, APQ8064, etc.
+
+config KRAITCC
+ tristate "Krait Clock Controller"
+ depends on COMMON_CLK_QCOM && ARM
+ select KRAIT_CLOCKS
+ help
+ Support for the Krait CPU clocks on MSM devices.
+ Say Y if you want to support CPU frequency scaling.
+
+config KRAIT_CLOCKS
+ bool
+ select KRAIT_L2_ACCESSORS
+
+config QCOM_A53
+ tristate "A53 Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the A53 clock controller on MSM devices.
+ Say Y if you want to support CPU frequency scaling on devices
+ such as MSM8916.
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index fe6252349e55..94419695cd2e 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -2,12 +2,15 @@ obj-$(CONFIG_COMMON_CLK_QCOM) += clk-qcom.o
clk-qcom-y += common.o
clk-qcom-y += clk-regmap.o
+clk-qcom-y += clk-alpha-pll.o
clk-qcom-y += clk-pll.o
clk-qcom-y += clk-rcg.o
clk-qcom-y += clk-rcg2.o
clk-qcom-y += clk-branch.o
clk-qcom-y += clk-regmap-divider.o
clk-qcom-y += clk-regmap-mux.o
+clk-qcom-$(CONFIG_KRAIT_CLOCKS) += clk-krait.o
+clk-qcom-y += clk-hfpll.o
clk-qcom-y += reset.o
clk-qcom-$(CONFIG_QCOM_GDSC) += gdsc.o
@@ -20,5 +23,13 @@ obj-$(CONFIG_MSM_GCC_8916) += gcc-msm8916.o
obj-$(CONFIG_MSM_GCC_8960) += gcc-msm8960.o
obj-$(CONFIG_MSM_LCC_8960) += lcc-msm8960.o
obj-$(CONFIG_MSM_GCC_8974) += gcc-msm8974.o
+obj-$(CONFIG_MSM_GCC_8996) += gcc-msm8996.o
obj-$(CONFIG_MSM_MMCC_8960) += mmcc-msm8960.o
obj-$(CONFIG_MSM_MMCC_8974) += mmcc-msm8974.o
+obj-$(CONFIG_MSM_MMCC_8996) += mmcc-msm8996.o
+obj-$(CONFIG_KPSS_XCC) += kpss-xcc.o
+obj-$(CONFIG_QCOM_HFPLL) += hfpll.o
+obj-$(CONFIG_KRAITCC) += krait-cc.o
+obj-$(CONFIG_QCOM_A53) += clk-a53.o
+obj-$(CONFIG_QCOM_CLK_RPM) += clk-rpm.o
+obj-$(CONFIG_QCOM_CLK_SMD_RPM) += clk-smd-rpm.o
diff --git a/drivers/clk/qcom/clk-a53.c b/drivers/clk/qcom/clk-a53.c
new file mode 100644
index 000000000000..d895af53a90e
--- /dev/null
+++ b/drivers/clk/qcom/clk-a53.c
@@ -0,0 +1,202 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/cpu.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "clk-pll.h"
+#include "clk-regmap.h"
+#include "clk-regmap-mux-div.h"
+
+#define F_APCS_PLL(f, l, m, n) { (f), (l), (m), (n), 0 }
+
+static struct pll_freq_tbl apcs_pll_freq[] = {
+ F_APCS_PLL(998400000, 52, 0x0, 0x1),
+ F_APCS_PLL(1094400000, 57, 0x0, 0x1),
+ F_APCS_PLL(1152000000, 62, 0x0, 0x1),
+ F_APCS_PLL(1209600000, 65, 0x0, 0x1),
+ F_APCS_PLL(1401600000, 73, 0x0, 0x1),
+};
+
+static struct clk_pll a53sspll = {
+ .l_reg = 0x04,
+ .m_reg = 0x08,
+ .n_reg = 0x0c,
+ .config_reg = 0x14,
+ .mode_reg = 0x00,
+ .status_reg = 0x1c,
+ .status_bit = 16,
+ .freq_tbl = apcs_pll_freq,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "a53sspll",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_pll_sr2_ops,
+ .flags = CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static const struct regmap_config a53sspll_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x40,
+ .fast_io = true,
+};
+
+static struct clk *a53ss_add_pll(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ void __iomem *base;
+ struct regmap *regmap;
+ struct clk_pll *pll;
+
+ pll = devm_kzalloc(dev, sizeof(*pll), GFP_KERNEL);
+ if (!pll)
+ return ERR_PTR(-ENOMEM);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return ERR_CAST(base);
+
+ pll = &a53sspll;
+
+ regmap = devm_regmap_init_mmio(dev, base, &a53sspll_regmap_config);
+ if (IS_ERR(regmap))
+ return ERR_CAST(regmap);
+
+ return devm_clk_register_regmap(dev, &pll->clkr);
+}
+
+enum {
+ P_GPLL0,
+ P_A53SSPLL,
+};
+
+static const struct parent_map gpll0_a53sspll_map[] = {
+ { P_GPLL0, 4 },
+ { P_A53SSPLL, 5 },
+};
+
+static const char * const gpll0_a53sspll[] = {
+ "gpll0_vote",
+ "a53sspll",
+};
+
+static struct clk_regmap_mux_div a53ssmux = {
+ .reg_offset = 0x50,
+ .hid_width = 5,
+ .hid_shift = 0,
+ .src_width = 3,
+ .src_shift = 8,
+ .safe_src = 4,
+ .safe_freq = 400000000,
+ .parent_map = gpll0_a53sspll_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "a53ssmux",
+ .parent_names = gpll0_a53sspll,
+ .num_parents = 2,
+ .ops = &clk_regmap_mux_div_ops,
+ .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk *a53ss_add_mux(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct regmap *regmap;
+ struct clk_regmap_mux_div *mux;
+
+ mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ return ERR_PTR(-ENOMEM);
+
+ mux = &a53ssmux;
+
+ regmap = syscon_regmap_lookup_by_phandle(np, "qcom,apcs");
+ if (IS_ERR(regmap))
+ return ERR_CAST(regmap);
+
+ mux->clkr.regmap = regmap;
+ return devm_clk_register(dev, &mux->clkr.hw);
+}
+
+static const struct of_device_id qcom_a53_match_table[] = {
+ { .compatible = "qcom,clock-a53-msm8916" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, qcom_a53_match_table);
+
+static int qcom_a53_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct clk *clk_pll, *clk_mux;
+ struct clk_onecell_data *data;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->clks = devm_kcalloc(dev, 2, sizeof(struct clk *), GFP_KERNEL);
+ if (!data->clks)
+ return -ENOMEM;
+
+ clk_pll = a53ss_add_pll(pdev);
+ if (IS_ERR(clk_pll))
+ return PTR_ERR(clk_pll);
+
+ clk_mux = a53ss_add_mux(pdev);
+ if (IS_ERR(clk_mux))
+ return PTR_ERR(clk_mux);
+
+ data->clks[0] = clk_pll;
+ data->clks[1] = clk_mux;
+ data->clk_num = 2;
+
+ clk_prepare_enable(clk_pll);
+
+ return of_clk_add_provider(dev->of_node, of_clk_src_onecell_get, data);
+}
+
+static struct platform_driver qcom_a53_driver = {
+ .probe = qcom_a53_probe,
+ .driver = {
+ .name = "qcom-a53",
+ .of_match_table = qcom_a53_match_table,
+ },
+};
+
+static int __init qcom_a53_init(void)
+{
+ return platform_driver_register(&qcom_a53_driver);
+}
+arch_initcall(qcom_a53_init);
+
+static void __exit qcom_a53_exit(void)
+{
+ platform_driver_unregister(&qcom_a53_driver);
+}
+module_exit(qcom_a53_exit);
+
+MODULE_DESCRIPTION("Qualcomm A53 Clock Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:qcom-a53");
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
new file mode 100644
index 000000000000..4d46d65898d1
--- /dev/null
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -0,0 +1,464 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/delay.h>
+
+#include "clk-alpha-pll.h"
+
+#define PLL_MODE 0x00
+# define PLL_OUTCTRL BIT(0)
+# define PLL_BYPASSNL BIT(1)
+# define PLL_RESET_N BIT(2)
+# define PLL_LOCK_COUNT_SHIFT 8
+# define PLL_LOCK_COUNT_MASK 0x3f
+# define PLL_BIAS_COUNT_SHIFT 14
+# define PLL_BIAS_COUNT_MASK 0x3f
+# define PLL_VOTE_FSM_ENA BIT(20)
+# define PLL_VOTE_FSM_RESET BIT(21)
+# define PLL_ACTIVE_FLAG BIT(30)
+# define PLL_LOCK_DET BIT(31)
+
+#define PLL_L_VAL 0x04
+#define PLL_ALPHA_VAL 0x08
+#define PLL_ALPHA_VAL_U 0x0c
+
+#define PLL_USER_CTL 0x10
+# define PLL_POST_DIV_SHIFT 8
+# define PLL_POST_DIV_MASK 0xf
+# define PLL_ALPHA_EN BIT(24)
+# define PLL_VCO_SHIFT 20
+# define PLL_VCO_MASK 0x3
+
+#define PLL_USER_CTL_U 0x14
+
+#define PLL_CONFIG_CTL 0x18
+#define PLL_TEST_CTL 0x1c
+#define PLL_TEST_CTL_U 0x20
+#define PLL_STATUS 0x24
+
+/*
+ * Even though 40 bits are present, use only 32 for ease of calculation.
+ */
+#define ALPHA_REG_BITWIDTH 40
+#define ALPHA_BITWIDTH 32
+
+#define to_clk_alpha_pll(_hw) container_of(to_clk_regmap(_hw), \
+ struct clk_alpha_pll, clkr)
+
+#define to_clk_alpha_pll_postdiv(_hw) container_of(to_clk_regmap(_hw), \
+ struct clk_alpha_pll_postdiv, clkr)
+
+static int wait_for_pll(struct clk_alpha_pll *pll, u32 mask, bool inverse,
+ const char *action)
+{
+ u32 val, off;
+ int count;
+ int ret;
+ const char *name = clk_hw_get_name(&pll->clkr.hw);
+
+ off = pll->offset;
+ ret = regmap_read(pll->clkr.regmap, off + PLL_MODE, &val);
+ if (ret)
+ return ret;
+
+ for (count = 100; count > 0; count--) {
+ ret = regmap_read(pll->clkr.regmap, off + PLL_MODE, &val);
+ if (ret)
+ return ret;
+ if (inverse && (val & mask))
+ return 0;
+ else if ((val & mask) == mask)
+ return 0;
+
+ udelay(1);
+ }
+
+ WARN(1, "%s failed to %s!\n", name, action);
+ return -ETIMEDOUT;
+}
+
+static int wait_for_pll_enable(struct clk_alpha_pll *pll, u32 mask)
+{
+ return wait_for_pll(pll, mask, 0, "enable");
+}
+
+static int wait_for_pll_disable(struct clk_alpha_pll *pll, u32 mask)
+{
+ return wait_for_pll(pll, mask, 1, "disable");
+}
+
+static int wait_for_pll_offline(struct clk_alpha_pll *pll, u32 mask)
+{
+ return wait_for_pll(pll, mask, 0, "offline");
+}
+
+
+/* alpha pll with hwfsm support */
+
+#define PLL_OFFLINE_REQ BIT(7)
+#define PLL_FSM_ENA BIT(20)
+#define PLL_OFFLINE_ACK BIT(28)
+#define PLL_ACTIVE_FLAG BIT(30)
+
+void clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+ const struct pll_config *config)
+{
+ u32 val, mask;
+
+ regmap_write(regmap, pll->offset + PLL_CONFIG_CTL,
+ config->config_ctl_val);
+
+ val = config->main_output_mask;
+ val |= config->aux_output_mask;
+ val |= config->aux2_output_mask;
+ val |= config->early_output_mask;
+ val |= config->post_div_val;
+
+ mask = config->main_output_mask;
+ mask |= config->aux_output_mask;
+ mask |= config->aux2_output_mask;
+ mask |= config->early_output_mask;
+ mask |= config->post_div_mask;
+
+ regmap_update_bits(regmap, pll->offset + PLL_USER_CTL, mask, val);
+
+ return;
+}
+
+static int clk_alpha_pll_hwfsm_enable(struct clk_hw *hw)
+{
+ int ret;
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 val, off;
+
+ off = pll->offset;
+ ret = regmap_read(pll->clkr.regmap, off + PLL_MODE, &val);
+ if (ret)
+ return ret;
+ /* Enable HW FSM mode, clear OFFLINE request */
+ val |= PLL_FSM_ENA;
+ val &= ~PLL_OFFLINE_REQ;
+ ret = regmap_write(pll->clkr.regmap, off + PLL_MODE, val);
+ if (ret)
+ return ret;
+
+ /* Make sure enable request goes through before waiting for update */
+ mb();
+
+ ret = wait_for_pll_enable(pll, PLL_ACTIVE_FLAG);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void clk_alpha_pll_hwfsm_disable(struct clk_hw *hw)
+{
+ int ret;
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 val, off;
+
+ off = pll->offset;
+ ret = regmap_read(pll->clkr.regmap, off + PLL_MODE, &val);
+ if (ret)
+ return;
+ /* Request PLL_OFFLINE and wait for ack */
+ ret = regmap_update_bits(pll->clkr.regmap, off + PLL_MODE,
+ PLL_OFFLINE_REQ, PLL_OFFLINE_REQ);
+ if (ret)
+ return;
+ ret = wait_for_pll_offline(pll, PLL_OFFLINE_ACK);
+ if (ret)
+ return;
+
+ /* Disable hwfsm */
+ ret = regmap_update_bits(pll->clkr.regmap, off + PLL_MODE,
+ PLL_FSM_ENA, 0);
+ if (ret)
+ return;
+ wait_for_pll_disable(pll, PLL_ACTIVE_FLAG);
+ return;
+}
+
+static int clk_alpha_pll_enable(struct clk_hw *hw)
+{
+ int ret;
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 val, mask, off;
+
+ off = pll->offset;
+
+ mask = PLL_OUTCTRL | PLL_RESET_N | PLL_BYPASSNL;
+ ret = regmap_read(pll->clkr.regmap, off + PLL_MODE, &val);
+ if (ret)
+ return ret;
+
+ /* If in FSM mode, just vote for it */
+ if (val & PLL_VOTE_FSM_ENA) {
+ ret = clk_enable_regmap(hw);
+ if (ret)
+ return ret;
+ return wait_for_pll_enable(pll, PLL_ACTIVE_FLAG);
+ }
+
+ /* Skip if already enabled */
+ if ((val & mask) == mask)
+ return 0;
+
+ ret = regmap_update_bits(pll->clkr.regmap, off + PLL_MODE,
+ PLL_BYPASSNL, PLL_BYPASSNL);
+ if (ret)
+ return ret;
+
+ /*
+ * H/W requires a 5us delay between disabling the bypass and
+ * de-asserting the reset.
+ */
+ mb();
+ udelay(5);
+
+ ret = regmap_update_bits(pll->clkr.regmap, off + PLL_MODE,
+ PLL_RESET_N, PLL_RESET_N);
+ if (ret)
+ return ret;
+
+ ret = wait_for_pll_enable(pll, PLL_LOCK_DET);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(pll->clkr.regmap, off + PLL_MODE,
+ PLL_OUTCTRL, PLL_OUTCTRL);
+
+ /* Ensure that the write above goes through before returning. */
+ mb();
+ return ret;
+}
+
+static void clk_alpha_pll_disable(struct clk_hw *hw)
+{
+ int ret;
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 val, mask, off;
+
+ off = pll->offset;
+
+ ret = regmap_read(pll->clkr.regmap, off + PLL_MODE, &val);
+ if (ret)
+ return;
+
+ /* If in FSM mode, just unvote it */
+ if (val & PLL_VOTE_FSM_ENA) {
+ clk_disable_regmap(hw);
+ return;
+ }
+
+ mask = PLL_OUTCTRL;
+ regmap_update_bits(pll->clkr.regmap, off + PLL_MODE, mask, 0);
+
+ /* Delay of 2 output clock ticks required until output is disabled */
+ mb();
+ udelay(1);
+
+ mask = PLL_RESET_N | PLL_BYPASSNL;
+ regmap_update_bits(pll->clkr.regmap, off + PLL_MODE, mask, 0);
+}
+
+static unsigned long alpha_pll_calc_rate(u64 prate, u32 l, u32 a)
+{
+ return (prate * l) + ((prate * a) >> ALPHA_BITWIDTH);
+}
+
+static unsigned long
+alpha_pll_round_rate(unsigned long rate, unsigned long prate, u32 *l, u64 *a)
+{
+ u64 remainder;
+ u64 quotient;
+
+ quotient = rate;
+ remainder = do_div(quotient, prate);
+ *l = quotient;
+
+ if (!remainder) {
+ *a = 0;
+ return rate;
+ }
+
+ /* Upper ALPHA_BITWIDTH bits of Alpha */
+ quotient = remainder << ALPHA_BITWIDTH;
+ remainder = do_div(quotient, prate);
+
+ if (remainder)
+ quotient++;
+
+ *a = quotient;
+ return alpha_pll_calc_rate(prate, *l, *a);
+}
+
+static const struct pll_vco *
+alpha_pll_find_vco(const struct clk_alpha_pll *pll, unsigned long rate)
+{
+ const struct pll_vco *v = pll->vco_table;
+ const struct pll_vco *end = v + pll->num_vco;
+
+ for (; v < end; v++)
+ if (rate >= v->min_freq && rate <= v->max_freq)
+ return v;
+
+ return NULL;
+}
+
+static unsigned long
+clk_alpha_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ u32 l, low, high, ctl;
+ u64 a = 0, prate = parent_rate;
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 off = pll->offset;
+
+ regmap_read(pll->clkr.regmap, off + PLL_L_VAL, &l);
+
+ regmap_read(pll->clkr.regmap, off + PLL_USER_CTL, &ctl);
+ if (ctl & PLL_ALPHA_EN) {
+ regmap_read(pll->clkr.regmap, off + PLL_ALPHA_VAL, &low);
+ regmap_read(pll->clkr.regmap, off + PLL_ALPHA_VAL_U, &high);
+ a = (u64)high << 32 | low;
+ a >>= ALPHA_REG_BITWIDTH - ALPHA_BITWIDTH;
+ }
+
+ return alpha_pll_calc_rate(prate, l, a);
+}
+
+static int clk_alpha_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ const struct pll_vco *vco;
+ u32 l, off = pll->offset;
+ u64 a;
+
+ rate = alpha_pll_round_rate(rate, prate, &l, &a);
+ vco = alpha_pll_find_vco(pll, rate);
+ if (!vco) {
+ pr_err("alpha pll not in a valid vco range\n");
+ return -EINVAL;
+ }
+
+ a <<= (ALPHA_REG_BITWIDTH - ALPHA_BITWIDTH);
+
+ regmap_write(pll->clkr.regmap, off + PLL_L_VAL, l);
+ regmap_write(pll->clkr.regmap, off + PLL_ALPHA_VAL, a);
+ regmap_write(pll->clkr.regmap, off + PLL_ALPHA_VAL_U, a >> 32);
+
+ regmap_update_bits(pll->clkr.regmap, off + PLL_USER_CTL,
+ PLL_VCO_MASK << PLL_VCO_SHIFT,
+ vco->val << PLL_VCO_SHIFT);
+
+ regmap_update_bits(pll->clkr.regmap, off + PLL_USER_CTL, PLL_ALPHA_EN,
+ PLL_ALPHA_EN);
+
+ return 0;
+}
+
+static long clk_alpha_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 l;
+ u64 a;
+ unsigned long min_freq, max_freq;
+
+ rate = alpha_pll_round_rate(rate, *prate, &l, &a);
+ if (alpha_pll_find_vco(pll, rate))
+ return rate;
+
+ min_freq = pll->vco_table[0].min_freq;
+ max_freq = pll->vco_table[pll->num_vco - 1].max_freq;
+
+ return clamp(rate, min_freq, max_freq);
+}
+
+const struct clk_ops clk_alpha_pll_ops = {
+ .enable = clk_alpha_pll_enable,
+ .disable = clk_alpha_pll_disable,
+ .recalc_rate = clk_alpha_pll_recalc_rate,
+ .round_rate = clk_alpha_pll_round_rate,
+ .set_rate = clk_alpha_pll_set_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_ops);
+
+const struct clk_ops clk_alpha_pll_hwfsm_ops = {
+ .enable = clk_alpha_pll_hwfsm_enable,
+ .disable = clk_alpha_pll_hwfsm_disable,
+ .recalc_rate = clk_alpha_pll_recalc_rate,
+ .round_rate = clk_alpha_pll_round_rate,
+ .set_rate = clk_alpha_pll_set_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_hwfsm_ops);
+
+static unsigned long
+clk_alpha_pll_postdiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
+ u32 ctl;
+
+ regmap_read(pll->clkr.regmap, pll->offset + PLL_USER_CTL, &ctl);
+
+ ctl >>= PLL_POST_DIV_SHIFT;
+ ctl &= PLL_POST_DIV_MASK;
+
+ return parent_rate >> fls(ctl);
+}
+
+static const struct clk_div_table clk_alpha_div_table[] = {
+ { 0x0, 1 },
+ { 0x1, 2 },
+ { 0x3, 4 },
+ { 0x7, 8 },
+ { 0xf, 16 },
+ { }
+};
+
+static long
+clk_alpha_pll_postdiv_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
+
+ return divider_round_rate(hw, rate, prate, clk_alpha_div_table,
+ pll->width, CLK_DIVIDER_POWER_OF_TWO);
+}
+
+static int clk_alpha_pll_postdiv_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
+ int div;
+
+ /* 16 -> 0xf, 8 -> 0x7, 4 -> 0x3, 2 -> 0x1, 1 -> 0x0 */
+ div = DIV_ROUND_UP_ULL((u64)parent_rate, rate) - 1;
+
+ return regmap_update_bits(pll->clkr.regmap, pll->offset + PLL_USER_CTL,
+ PLL_POST_DIV_MASK << PLL_POST_DIV_SHIFT,
+ div << PLL_POST_DIV_SHIFT);
+}
+
+const struct clk_ops clk_alpha_pll_postdiv_ops = {
+ .recalc_rate = clk_alpha_pll_postdiv_recalc_rate,
+ .round_rate = clk_alpha_pll_postdiv_round_rate,
+ .set_rate = clk_alpha_pll_postdiv_set_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_ops);
diff --git a/drivers/clk/qcom/clk-alpha-pll.h b/drivers/clk/qcom/clk-alpha-pll.h
new file mode 100644
index 000000000000..7718bd5beefc
--- /dev/null
+++ b/drivers/clk/qcom/clk-alpha-pll.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QCOM_CLK_ALPHA_PLL_H__
+#define __QCOM_CLK_ALPHA_PLL_H__
+
+#include <linux/clk-provider.h>
+#include "clk-regmap.h"
+#include "clk-pll.h"
+
+struct pll_vco {
+ unsigned long min_freq;
+ unsigned long max_freq;
+ u32 val;
+};
+
+/**
+ * struct clk_alpha_pll - phase locked loop (PLL)
+ * @offset: base address of registers
+ * @vco_table: array of VCO settings
+ * @clkr: regmap clock handle
+ */
+struct clk_alpha_pll {
+ u32 offset;
+
+ const struct pll_vco *vco_table;
+ size_t num_vco;
+
+ struct clk_regmap clkr;
+ u32 config_ctl_val;
+#define PLLOUT_MAIN BIT(0)
+#define PLLOUT_AUX BIT(1)
+#define PLLOUT_AUX2 BIT(2)
+#define PLLOUT_EARLY BIT(3)
+ u32 pllout_flags;
+};
+
+/**
+ * struct clk_alpha_pll_postdiv - phase locked loop (PLL) post-divider
+ * @offset: base address of registers
+ * @width: width of post-divider
+ * @clkr: regmap clock handle
+ */
+struct clk_alpha_pll_postdiv {
+ u32 offset;
+ u8 width;
+
+ struct clk_regmap clkr;
+};
+
+extern const struct clk_ops clk_alpha_pll_ops;
+extern const struct clk_ops clk_alpha_pll_hwfsm_ops;
+extern const struct clk_ops clk_alpha_pll_postdiv_ops;
+
+void clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+ const struct pll_config *config);
+
+#endif
diff --git a/drivers/clk/qcom/clk-hfpll.c b/drivers/clk/qcom/clk-hfpll.c
new file mode 100644
index 000000000000..eacf853c132e
--- /dev/null
+++ b/drivers/clk/qcom/clk-hfpll.c
@@ -0,0 +1,253 @@
+/*
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/regmap.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/clk-provider.h>
+#include <linux/spinlock.h>
+
+#include "clk-regmap.h"
+#include "clk-hfpll.h"
+
+#define PLL_OUTCTRL BIT(0)
+#define PLL_BYPASSNL BIT(1)
+#define PLL_RESET_N BIT(2)
+
+/* Initialize a HFPLL at a given rate and enable it. */
+static void __clk_hfpll_init_once(struct clk_hw *hw)
+{
+ struct clk_hfpll *h = to_clk_hfpll(hw);
+ struct hfpll_data const *hd = h->d;
+ struct regmap *regmap = h->clkr.regmap;
+
+ if (likely(h->init_done))
+ return;
+
+ /* Configure PLL parameters for integer mode. */
+ if (hd->config_val)
+ regmap_write(regmap, hd->config_reg, hd->config_val);
+ regmap_write(regmap, hd->m_reg, 0);
+ regmap_write(regmap, hd->n_reg, 1);
+
+ if (hd->user_reg) {
+ u32 regval = hd->user_val;
+ unsigned long rate;
+
+ rate = clk_hw_get_rate(hw);
+
+ /* Pick the right VCO. */
+ if (hd->user_vco_mask && rate > hd->low_vco_max_rate)
+ regval |= hd->user_vco_mask;
+ regmap_write(regmap, hd->user_reg, regval);
+ }
+
+ if (hd->droop_reg)
+ regmap_write(regmap, hd->droop_reg, hd->droop_val);
+
+ h->init_done = true;
+}
+
+static void __clk_hfpll_enable(struct clk_hw *hw)
+{
+ struct clk_hfpll *h = to_clk_hfpll(hw);
+ struct hfpll_data const *hd = h->d;
+ struct regmap *regmap = h->clkr.regmap;
+ u32 val;
+
+ __clk_hfpll_init_once(hw);
+
+ /* Disable PLL bypass mode. */
+ regmap_update_bits(regmap, hd->mode_reg, PLL_BYPASSNL, PLL_BYPASSNL);
+
+ /*
+ * H/W requires a 5us delay between disabling the bypass and
+ * de-asserting the reset. Delay 10us just to be safe.
+ */
+ udelay(10);
+
+ /* De-assert active-low PLL reset. */
+ regmap_update_bits(regmap, hd->mode_reg, PLL_RESET_N, PLL_RESET_N);
+
+ /* Wait for PLL to lock. */
+ if (hd->status_reg) {
+ do {
+ regmap_read(regmap, hd->status_reg, &val);
+ } while (!(val & BIT(hd->lock_bit)));
+ } else {
+ udelay(60);
+ }
+
+ /* Enable PLL output. */
+ regmap_update_bits(regmap, hd->mode_reg, PLL_OUTCTRL, PLL_OUTCTRL);
+}
+
+/* Enable an already-configured HFPLL. */
+static int clk_hfpll_enable(struct clk_hw *hw)
+{
+ unsigned long flags;
+ struct clk_hfpll *h = to_clk_hfpll(hw);
+ struct hfpll_data const *hd = h->d;
+ struct regmap *regmap = h->clkr.regmap;
+ u32 mode;
+
+ spin_lock_irqsave(&h->lock, flags);
+ regmap_read(regmap, hd->mode_reg, &mode);
+ if (!(mode & (PLL_BYPASSNL | PLL_RESET_N | PLL_OUTCTRL)))
+ __clk_hfpll_enable(hw);
+ spin_unlock_irqrestore(&h->lock, flags);
+
+ return 0;
+}
+
+static void __clk_hfpll_disable(struct clk_hfpll *h)
+{
+ struct hfpll_data const *hd = h->d;
+ struct regmap *regmap = h->clkr.regmap;
+
+ /*
+ * Disable the PLL output, disable test mode, enable the bypass mode,
+ * and assert the reset.
+ */
+ regmap_update_bits(regmap, hd->mode_reg,
+ PLL_BYPASSNL | PLL_RESET_N | PLL_OUTCTRL, 0);
+}
+
+static void clk_hfpll_disable(struct clk_hw *hw)
+{
+ struct clk_hfpll *h = to_clk_hfpll(hw);
+ unsigned long flags;
+
+ spin_lock_irqsave(&h->lock, flags);
+ __clk_hfpll_disable(h);
+ spin_unlock_irqrestore(&h->lock, flags);
+}
+
+static long clk_hfpll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct clk_hfpll *h = to_clk_hfpll(hw);
+ struct hfpll_data const *hd = h->d;
+ unsigned long rrate;
+
+ rate = clamp(rate, hd->min_rate, hd->max_rate);
+
+ rrate = DIV_ROUND_UP(rate, *parent_rate) * *parent_rate;
+ if (rrate > hd->max_rate)
+ rrate -= *parent_rate;
+
+ return rrate;
+}
+
+/*
+ * For optimization reasons, assumes no downstream clocks are actively using
+ * it.
+ */
+static int clk_hfpll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_hfpll *h = to_clk_hfpll(hw);
+ struct hfpll_data const *hd = h->d;
+ struct regmap *regmap = h->clkr.regmap;
+ unsigned long flags;
+ u32 l_val, val;
+ bool enabled;
+
+ l_val = rate / parent_rate;
+
+ spin_lock_irqsave(&h->lock, flags);
+
+ enabled = __clk_is_enabled(hw->clk);
+ if (enabled)
+ __clk_hfpll_disable(h);
+
+ /* Pick the right VCO. */
+ if (hd->user_reg && hd->user_vco_mask) {
+ regmap_read(regmap, hd->user_reg, &val);
+ if (rate <= hd->low_vco_max_rate)
+ val &= ~hd->user_vco_mask;
+ else
+ val |= hd->user_vco_mask;
+ regmap_write(regmap, hd->user_reg, val);
+ }
+
+ regmap_write(regmap, hd->l_reg, l_val);
+
+ if (enabled)
+ __clk_hfpll_enable(hw);
+
+ spin_unlock_irqrestore(&h->lock, flags);
+
+ return 0;
+}
+
+static unsigned long clk_hfpll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_hfpll *h = to_clk_hfpll(hw);
+ struct hfpll_data const *hd = h->d;
+ struct regmap *regmap = h->clkr.regmap;
+ u32 l_val;
+
+ regmap_read(regmap, hd->l_reg, &l_val);
+
+ return l_val * parent_rate;
+}
+
+static void clk_hfpll_init(struct clk_hw *hw)
+{
+ struct clk_hfpll *h = to_clk_hfpll(hw);
+ struct hfpll_data const *hd = h->d;
+ struct regmap *regmap = h->clkr.regmap;
+ u32 mode, status;
+
+ regmap_read(regmap, hd->mode_reg, &mode);
+ if (mode != (PLL_BYPASSNL | PLL_RESET_N | PLL_OUTCTRL)) {
+ __clk_hfpll_init_once(hw);
+ return;
+ }
+
+ if (hd->status_reg) {
+ regmap_read(regmap, hd->status_reg, &status);
+ if (!(status & BIT(hd->lock_bit))) {
+ WARN(1, "HFPLL %s is ON, but not locked!\n",
+ __clk_get_name(hw->clk));
+ clk_hfpll_disable(hw);
+ __clk_hfpll_init_once(hw);
+ }
+ }
+}
+
+static int hfpll_is_enabled(struct clk_hw *hw)
+{
+ struct clk_hfpll *h = to_clk_hfpll(hw);
+ struct hfpll_data const *hd = h->d;
+ struct regmap *regmap = h->clkr.regmap;
+ u32 mode;
+
+ regmap_read(regmap, hd->mode_reg, &mode);
+ mode &= 0x7;
+ return mode == (PLL_BYPASSNL | PLL_RESET_N | PLL_OUTCTRL);
+}
+
+const struct clk_ops clk_ops_hfpll = {
+ .enable = clk_hfpll_enable,
+ .disable = clk_hfpll_disable,
+ .is_enabled = hfpll_is_enabled,
+ .round_rate = clk_hfpll_round_rate,
+ .set_rate = clk_hfpll_set_rate,
+ .recalc_rate = clk_hfpll_recalc_rate,
+ .init = clk_hfpll_init,
+};
+EXPORT_SYMBOL_GPL(clk_ops_hfpll);
diff --git a/drivers/clk/qcom/clk-hfpll.h b/drivers/clk/qcom/clk-hfpll.h
new file mode 100644
index 000000000000..48c18d664f4e
--- /dev/null
+++ b/drivers/clk/qcom/clk-hfpll.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __QCOM_CLK_HFPLL_H__
+#define __QCOM_CLK_HFPLL_H__
+
+#include <linux/clk-provider.h>
+#include <linux/spinlock.h>
+#include "clk-regmap.h"
+
+struct hfpll_data {
+ u32 mode_reg;
+ u32 l_reg;
+ u32 m_reg;
+ u32 n_reg;
+ u32 user_reg;
+ u32 droop_reg;
+ u32 config_reg;
+ u32 status_reg;
+ u8 lock_bit;
+
+ u32 droop_val;
+ u32 config_val;
+ u32 user_val;
+ u32 user_vco_mask;
+ unsigned long low_vco_max_rate;
+
+ unsigned long min_rate;
+ unsigned long max_rate;
+};
+
+struct clk_hfpll {
+ struct hfpll_data const *d;
+ int init_done;
+
+ struct clk_regmap clkr;
+ spinlock_t lock;
+};
+
+#define to_clk_hfpll(_hw) \
+ container_of(to_clk_regmap(_hw), struct clk_hfpll, clkr)
+
+extern const struct clk_ops clk_ops_hfpll;
+
+#endif
diff --git a/drivers/clk/qcom/clk-krait.c b/drivers/clk/qcom/clk-krait.c
new file mode 100644
index 000000000000..84277741a9c8
--- /dev/null
+++ b/drivers/clk/qcom/clk-krait.c
@@ -0,0 +1,167 @@
+/*
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/clk-provider.h>
+#include <linux/spinlock.h>
+
+#include <asm/krait-l2-accessors.h>
+
+#include "clk-krait.h"
+
+/* Secondary and primary muxes share the same cp15 register */
+static DEFINE_SPINLOCK(krait_clock_reg_lock);
+
+#define LPL_SHIFT 8
+static void __krait_mux_set_sel(struct krait_mux_clk *mux, int sel)
+{
+ unsigned long flags;
+ u32 regval;
+
+ spin_lock_irqsave(&krait_clock_reg_lock, flags);
+ regval = krait_get_l2_indirect_reg(mux->offset);
+ regval &= ~(mux->mask << mux->shift);
+ regval |= (sel & mux->mask) << mux->shift;
+ if (mux->lpl) {
+ regval &= ~(mux->mask << (mux->shift + LPL_SHIFT));
+ regval |= (sel & mux->mask) << (mux->shift + LPL_SHIFT);
+ }
+ krait_set_l2_indirect_reg(mux->offset, regval);
+ spin_unlock_irqrestore(&krait_clock_reg_lock, flags);
+
+ /* Wait for switch to complete. */
+ mb();
+ udelay(1);
+}
+
+static int krait_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct krait_mux_clk *mux = to_krait_mux_clk(hw);
+ u32 sel;
+
+ sel = clk_mux_reindex(index, mux->parent_map, 0);
+ mux->en_mask = sel;
+ /* Don't touch mux if CPU is off as it won't work */
+ if (__clk_is_enabled(hw->clk))
+ __krait_mux_set_sel(mux, sel);
+ return 0;
+}
+
+static u8 krait_mux_get_parent(struct clk_hw *hw)
+{
+ struct krait_mux_clk *mux = to_krait_mux_clk(hw);
+ u32 sel;
+
+ sel = krait_get_l2_indirect_reg(mux->offset);
+ sel >>= mux->shift;
+ sel &= mux->mask;
+ mux->en_mask = sel;
+
+ return clk_mux_get_parent(hw, sel, mux->parent_map, 0);
+}
+
+static struct clk_hw *krait_mux_get_safe_parent(struct clk_hw *hw,
+ unsigned long *safe_freq)
+{
+ int i;
+ struct krait_mux_clk *mux = to_krait_mux_clk(hw);
+ int num_parents = clk_hw_get_num_parents(hw);
+
+ i = mux->safe_sel;
+ for (i = 0; i < num_parents; i++)
+ if (mux->safe_sel == mux->parent_map[i])
+ break;
+
+ return clk_hw_get_parent_by_index(hw, i);
+}
+
+static int krait_mux_enable(struct clk_hw *hw)
+{
+ struct krait_mux_clk *mux = to_krait_mux_clk(hw);
+
+ __krait_mux_set_sel(mux, mux->en_mask);
+
+ return 0;
+}
+
+static void krait_mux_disable(struct clk_hw *hw)
+{
+ struct krait_mux_clk *mux = to_krait_mux_clk(hw);
+
+ __krait_mux_set_sel(mux, mux->safe_sel);
+}
+
+const struct clk_ops krait_mux_clk_ops = {
+ .enable = krait_mux_enable,
+ .disable = krait_mux_disable,
+ .set_parent = krait_mux_set_parent,
+ .get_parent = krait_mux_get_parent,
+ .determine_rate = __clk_mux_determine_rate_closest,
+ .get_safe_parent = krait_mux_get_safe_parent,
+};
+EXPORT_SYMBOL_GPL(krait_mux_clk_ops);
+
+/* The divider can divide by 2, 4, 6 and 8. But we only really need div-2. */
+static long krait_div2_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ *parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw), rate * 2);
+ return DIV_ROUND_UP(*parent_rate, 2);
+}
+
+static int krait_div2_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct krait_div2_clk *d = to_krait_div2_clk(hw);
+ unsigned long flags;
+ u32 val;
+ u32 mask = BIT(d->width) - 1;
+
+ if (d->lpl)
+ mask = mask << (d->shift + LPL_SHIFT) | mask << d->shift;
+
+ spin_lock_irqsave(&krait_clock_reg_lock, flags);
+ val = krait_get_l2_indirect_reg(d->offset);
+ val &= ~mask;
+ krait_set_l2_indirect_reg(d->offset, val);
+ spin_unlock_irqrestore(&krait_clock_reg_lock, flags);
+
+ return 0;
+}
+
+static unsigned long
+krait_div2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ struct krait_div2_clk *d = to_krait_div2_clk(hw);
+ u32 mask = BIT(d->width) - 1;
+ u32 div;
+
+ div = krait_get_l2_indirect_reg(d->offset);
+ div >>= d->shift;
+ div &= mask;
+ div = (div + 1) * 2;
+
+ return DIV_ROUND_UP(parent_rate, div);
+}
+
+const struct clk_ops krait_div2_clk_ops = {
+ .round_rate = krait_div2_round_rate,
+ .set_rate = krait_div2_set_rate,
+ .recalc_rate = krait_div2_recalc_rate,
+};
+EXPORT_SYMBOL_GPL(krait_div2_clk_ops);
diff --git a/drivers/clk/qcom/clk-krait.h b/drivers/clk/qcom/clk-krait.h
new file mode 100644
index 000000000000..5d0063538e5d
--- /dev/null
+++ b/drivers/clk/qcom/clk-krait.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QCOM_CLK_KRAIT_H
+#define __QCOM_CLK_KRAIT_H
+
+#include <linux/clk-provider.h>
+
+struct krait_mux_clk {
+ unsigned int *parent_map;
+ bool has_safe_parent;
+ u8 safe_sel;
+ u32 offset;
+ u32 mask;
+ u32 shift;
+ u32 en_mask;
+ bool lpl;
+
+ struct clk_hw hw;
+};
+
+#define to_krait_mux_clk(_hw) container_of(_hw, struct krait_mux_clk, hw)
+
+extern const struct clk_ops krait_mux_clk_ops;
+
+struct krait_div2_clk {
+ u32 offset;
+ u8 width;
+ u32 shift;
+ bool lpl;
+
+ struct clk_hw hw;
+};
+
+#define to_krait_div2_clk(_hw) container_of(_hw, struct krait_div2_clk, hw)
+
+extern const struct clk_ops krait_div2_clk_ops;
+
+#endif
diff --git a/drivers/clk/qcom/clk-pll.h b/drivers/clk/qcom/clk-pll.h
index ffd0c63bddbc..1e64fe9326d6 100644
--- a/drivers/clk/qcom/clk-pll.h
+++ b/drivers/clk/qcom/clk-pll.h
@@ -79,6 +79,9 @@ struct pll_config {
u32 mn_ena_mask;
u32 main_output_mask;
u32 aux_output_mask;
+ u32 aux2_output_mask;
+ u32 early_output_mask;
+ u32 config_ctl_val;
};
void clk_pll_configure_sr(struct clk_pll *pll, struct regmap *regmap,
diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h
index 4b1e94bdf29e..b904c335cda4 100644
--- a/drivers/clk/qcom/clk-rcg.h
+++ b/drivers/clk/qcom/clk-rcg.h
@@ -178,5 +178,6 @@ extern const struct clk_ops clk_edp_pixel_ops;
extern const struct clk_ops clk_byte_ops;
extern const struct clk_ops clk_byte2_ops;
extern const struct clk_ops clk_pixel_ops;
+extern const struct clk_ops clk_gfx3d_ops;
#endif
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index b544bb302f79..a071bba8018c 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -723,3 +723,90 @@ const struct clk_ops clk_pixel_ops = {
.determine_rate = clk_pixel_determine_rate,
};
EXPORT_SYMBOL_GPL(clk_pixel_ops);
+
+static int clk_gfx3d_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_rate_request parent_req = { };
+ struct clk_hw *p2, *p8, *p9, *xo;
+ unsigned long p9_rate;
+ int ret;
+
+ xo = clk_hw_get_parent_by_index(hw, 0);
+ if (req->rate == clk_hw_get_rate(xo)) {
+ req->best_parent_hw = xo;
+ return 0;
+ }
+
+ p9 = clk_hw_get_parent_by_index(hw, 2);
+ p2 = clk_hw_get_parent_by_index(hw, 3);
+ p8 = clk_hw_get_parent_by_index(hw, 4);
+
+ /* PLL9 is a fixed rate PLL */
+ p9_rate = clk_hw_get_rate(p9);
+
+ parent_req.rate = req->rate = min(req->rate, p9_rate);
+ if (req->rate == p9_rate) {
+ req->rate = req->best_parent_rate = p9_rate;
+ req->best_parent_hw = p9;
+ return 0;
+ }
+
+ if (req->best_parent_hw == p9) {
+ /* Are we going back to a previously used rate? */
+ if (clk_hw_get_rate(p8) == req->rate)
+ req->best_parent_hw = p8;
+ else
+ req->best_parent_hw = p2;
+ } else if (req->best_parent_hw == p8) {
+ req->best_parent_hw = p2;
+ } else {
+ req->best_parent_hw = p8;
+ }
+
+ ret = __clk_determine_rate(req->best_parent_hw, &parent_req);
+ if (ret)
+ return ret;
+
+ req->rate = req->best_parent_rate = parent_req.rate;
+
+ return 0;
+}
+
+static int clk_gfx3d_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate, u8 index)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ u32 cfg;
+ int ret;
+
+ /* Just mux it, we don't use the division or m/n hardware */
+ cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
+ ret = regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, cfg);
+ if (ret)
+ return ret;
+
+ return update_config(rcg);
+}
+
+static int clk_gfx3d_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ /*
+ * We should never get here; clk_gfx3d_determine_rate() should always
+ * make us use a different parent than what we're currently using, so
+ * clk_gfx3d_set_rate_and_parent() should always be called.
+ */
+ return 0;
+}
+
+const struct clk_ops clk_gfx3d_ops = {
+ .is_enabled = clk_rcg2_is_enabled,
+ .get_parent = clk_rcg2_get_parent,
+ .set_parent = clk_rcg2_set_parent,
+ .recalc_rate = clk_rcg2_recalc_rate,
+ .set_rate = clk_gfx3d_set_rate,
+ .set_rate_and_parent = clk_gfx3d_set_rate_and_parent,
+ .determine_rate = clk_gfx3d_determine_rate,
+};
+EXPORT_SYMBOL_GPL(clk_gfx3d_ops);
diff --git a/drivers/clk/qcom/clk-rpm.c b/drivers/clk/qcom/clk-rpm.c
new file mode 100644
index 000000000000..05cf83c2ca83
--- /dev/null
+++ b/drivers/clk/qcom/clk-rpm.c
@@ -0,0 +1,503 @@
+/*
+ * Copyright (c) 2016, Linaro Limited
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/mfd/qcom_rpm.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/mfd/qcom-rpm.h>
+#include <dt-bindings/clock/qcom,rpmcc.h>
+
+#define QCOM_RPM_MISC_CLK_TYPE 0x306b6c63
+#define QCOM_RPM_SCALING_ENABLE_ID 0x2
+
+#define DEFINE_CLK_RPM(_platform, _name, _active, r_id) \
+ static struct clk_rpm _platform##_##_active; \
+ static struct clk_rpm _platform##_##_name = { \
+ .rpm_clk_id = (r_id), \
+ .peer = &_platform##_##_active, \
+ .rate = INT_MAX, \
+ .hw.init = &(struct clk_init_data){ \
+ .ops = &clk_rpm_ops, \
+ .name = #_name, \
+ .parent_names = (const char *[]){ "pxo_board" }, \
+ .num_parents = 1, \
+ }, \
+ }; \
+ static struct clk_rpm _platform##_##_active = { \
+ .rpm_clk_id = (r_id), \
+ .peer = &_platform##_##_name, \
+ .active_only = true, \
+ .rate = INT_MAX, \
+ .hw.init = &(struct clk_init_data){ \
+ .ops = &clk_rpm_ops, \
+ .name = #_active, \
+ .parent_names = (const char *[]){ "pxo_board" }, \
+ .num_parents = 1, \
+ }, \
+ }
+
+#define DEFINE_CLK_RPM_PXO_BRANCH(_platform, _name, _active, r_id, r) \
+ static struct clk_rpm _platform##_##_active; \
+ static struct clk_rpm _platform##_##_name = { \
+ .rpm_clk_id = (r_id), \
+ .active_only = true, \
+ .peer = &_platform##_##_active, \
+ .rate = (r), \
+ .branch = true, \
+ .hw.init = &(struct clk_init_data){ \
+ .ops = &clk_rpm_branch_ops, \
+ .name = #_name, \
+ .parent_names = (const char *[]){ "pxo_board" }, \
+ .num_parents = 1, \
+ }, \
+ }; \
+ static struct clk_rpm _platform##_##_active = { \
+ .rpm_clk_id = (r_id), \
+ .peer = &_platform##_##_name, \
+ .rate = (r), \
+ .branch = true, \
+ .hw.init = &(struct clk_init_data){ \
+ .ops = &clk_rpm_branch_ops, \
+ .name = #_active, \
+ .parent_names = (const char *[]){ "pxo_board" }, \
+ .num_parents = 1, \
+ }, \
+ }
+
+#define DEFINE_CLK_RPM_CXO_BRANCH(_platform, _name, _active, r_id, r) \
+ static struct clk_rpm _platform##_##_active; \
+ static struct clk_rpm _platform##_##_name = { \
+ .rpm_clk_id = (r_id), \
+ .peer = &_platform##_##_active, \
+ .rate = (r), \
+ .branch = true, \
+ .hw.init = &(struct clk_init_data){ \
+ .ops = &clk_rpm_branch_ops, \
+ .name = #_name, \
+ .parent_names = (const char *[]){ "cxo_board" }, \
+ .num_parents = 1, \
+ }, \
+ }; \
+ static struct clk_rpm _platform##_##_active = { \
+ .rpm_clk_id = (r_id), \
+ .active_only = true, \
+ .peer = &_platform##_##_name, \
+ .rate = (r), \
+ .branch = true, \
+ .hw.init = &(struct clk_init_data){ \
+ .ops = &clk_rpm_branch_ops, \
+ .name = #_active, \
+ .parent_names = (const char *[]){ "cxo_board" }, \
+ .num_parents = 1, \
+ }, \
+ }
+
+#define to_clk_rpm(_hw) container_of(_hw, struct clk_rpm, hw)
+
+struct clk_rpm {
+ const int rpm_clk_id;
+ const bool active_only;
+ unsigned long rate;
+ bool enabled;
+ bool branch;
+ struct clk_rpm *peer;
+ struct clk_hw hw;
+ struct qcom_rpm *rpm;
+};
+
+struct rpm_cc {
+ struct qcom_rpm *rpm;
+ struct clk_onecell_data data;
+ struct clk *clks[];
+};
+
+struct rpm_clk_desc {
+ struct clk_rpm **clks;
+ size_t num_clks;
+};
+
+static DEFINE_MUTEX(rpm_clk_lock);
+
+static int clk_rpm_handoff(struct clk_rpm *r)
+{
+ int ret;
+ u32 value = INT_MAX;
+
+ ret = qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE,
+ r->rpm_clk_id, &value, 1);
+ if (ret)
+ return ret;
+ ret = qcom_rpm_write(r->rpm, QCOM_RPM_SLEEP_STATE,
+ r->rpm_clk_id, &value, 1);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int clk_rpm_set_rate_active(struct clk_rpm *r, unsigned long rate)
+{
+ u32 value = DIV_ROUND_UP(rate, 1000); /* to kHz */
+
+ return qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE,
+ r->rpm_clk_id, &value, 1);
+}
+
+static int clk_rpm_set_rate_sleep(struct clk_rpm *r, unsigned long rate)
+{
+ u32 value = DIV_ROUND_UP(rate, 1000); /* to kHz */
+
+ return qcom_rpm_write(r->rpm, QCOM_RPM_SLEEP_STATE,
+ r->rpm_clk_id, &value, 1);
+}
+
+static void to_active_sleep(struct clk_rpm *r, unsigned long rate,
+ unsigned long *active, unsigned long *sleep)
+{
+ *active = rate;
+
+ /*
+ * Active-only clocks don't care what the rate is during sleep. So,
+ * they vote for zero.
+ */
+ if (r->active_only)
+ *sleep = 0;
+ else
+ *sleep = *active;
+}
+
+static int clk_rpm_prepare(struct clk_hw *hw)
+{
+ struct clk_rpm *r = to_clk_rpm(hw);
+ struct clk_rpm *peer = r->peer;
+ unsigned long this_rate = 0, this_sleep_rate = 0;
+ unsigned long peer_rate = 0, peer_sleep_rate = 0;
+ unsigned long active_rate, sleep_rate;
+ int ret = 0;
+
+ mutex_lock(&rpm_clk_lock);
+
+ /* Don't send requests to the RPM if the rate has not been set. */
+ if (!r->rate)
+ goto out;
+
+ to_active_sleep(r, r->rate, &this_rate, &this_sleep_rate);
+
+ /* Take peer clock's rate into account only if it's enabled. */
+ if (peer->enabled)
+ to_active_sleep(peer, peer->rate,
+ &peer_rate, &peer_sleep_rate);
+
+ active_rate = max(this_rate, peer_rate);
+
+ if (r->branch)
+ active_rate = !!active_rate;
+
+ ret = clk_rpm_set_rate_active(r, active_rate);
+ if (ret)
+ goto out;
+
+ sleep_rate = max(this_sleep_rate, peer_sleep_rate);
+ if (r->branch)
+ sleep_rate = !!sleep_rate;
+
+ ret = clk_rpm_set_rate_sleep(r, sleep_rate);
+ if (ret)
+ /* Undo the active set vote and restore it */
+ ret = clk_rpm_set_rate_active(r, peer_rate);
+
+out:
+ if (!ret)
+ r->enabled = true;
+
+ mutex_unlock(&rpm_clk_lock);
+
+ return ret;
+}
+
+static void clk_rpm_unprepare(struct clk_hw *hw)
+{
+ struct clk_rpm *r = to_clk_rpm(hw);
+ struct clk_rpm *peer = r->peer;
+ unsigned long peer_rate = 0, peer_sleep_rate = 0;
+ unsigned long active_rate, sleep_rate;
+ int ret;
+
+ mutex_lock(&rpm_clk_lock);
+
+ if (!r->rate)
+ goto out;
+
+ /* Take peer clock's rate into account only if it's enabled. */
+ if (peer->enabled)
+ to_active_sleep(peer, peer->rate, &peer_rate,
+ &peer_sleep_rate);
+
+ active_rate = r->branch ? !!peer_rate : peer_rate;
+ ret = clk_rpm_set_rate_active(r, active_rate);
+ if (ret)
+ goto out;
+
+ sleep_rate = r->branch ? !!peer_sleep_rate : peer_sleep_rate;
+ ret = clk_rpm_set_rate_sleep(r, sleep_rate);
+ if (ret)
+ goto out;
+
+ r->enabled = false;
+
+out:
+ mutex_unlock(&rpm_clk_lock);
+}
+
+static int clk_rpm_set_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate)
+{
+ struct clk_rpm *r = to_clk_rpm(hw);
+ struct clk_rpm *peer = r->peer;
+ unsigned long active_rate, sleep_rate;
+ unsigned long this_rate = 0, this_sleep_rate = 0;
+ unsigned long peer_rate = 0, peer_sleep_rate = 0;
+ int ret = 0;
+
+ mutex_lock(&rpm_clk_lock);
+
+ if (!r->enabled)
+ goto out;
+
+ to_active_sleep(r, rate, &this_rate, &this_sleep_rate);
+
+ /* Take peer clock's rate into account only if it's enabled. */
+ if (peer->enabled)
+ to_active_sleep(peer, peer->rate,
+ &peer_rate, &peer_sleep_rate);
+
+ active_rate = max(this_rate, peer_rate);
+ ret = clk_rpm_set_rate_active(r, active_rate);
+ if (ret)
+ goto out;
+
+ sleep_rate = max(this_sleep_rate, peer_sleep_rate);
+ ret = clk_rpm_set_rate_sleep(r, sleep_rate);
+ if (ret)
+ goto out;
+
+ r->rate = rate;
+
+out:
+ mutex_unlock(&rpm_clk_lock);
+
+ return ret;
+}
+
+static long clk_rpm_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ /*
+ * RPM handles rate rounding and we don't have a way to
+ * know what the rate will be, so just return whatever
+ * rate is requested.
+ */
+ return rate;
+}
+
+static unsigned long clk_rpm_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_rpm *r = to_clk_rpm(hw);
+
+ /*
+ * RPM handles rate rounding and we don't have a way to
+ * know what the rate will be, so just return whatever
+ * rate was set.
+ */
+ return r->rate;
+}
+
+static const struct clk_ops clk_rpm_ops = {
+ .prepare = clk_rpm_prepare,
+ .unprepare = clk_rpm_unprepare,
+ .set_rate = clk_rpm_set_rate,
+ .round_rate = clk_rpm_round_rate,
+ .recalc_rate = clk_rpm_recalc_rate,
+};
+
+static const struct clk_ops clk_rpm_branch_ops = {
+ .prepare = clk_rpm_prepare,
+ .unprepare = clk_rpm_unprepare,
+ .round_rate = clk_rpm_round_rate,
+ .recalc_rate = clk_rpm_recalc_rate,
+};
+
+/* apq8064 */
+DEFINE_CLK_RPM_PXO_BRANCH(apq8064, pxo, pxo_a_clk, QCOM_RPM_PXO_CLK, 27000000);
+DEFINE_CLK_RPM_CXO_BRANCH(apq8064, cxo, cxo_a_clk, QCOM_RPM_CXO_CLK, 19200000);
+DEFINE_CLK_RPM(apq8064, afab_clk, afab_a_clk, QCOM_RPM_APPS_FABRIC_CLK);
+DEFINE_CLK_RPM(apq8064, cfpb_clk, cfpb_a_clk, QCOM_RPM_CFPB_CLK);
+DEFINE_CLK_RPM(apq8064, daytona_clk, daytona_a_clk, QCOM_RPM_DAYTONA_FABRIC_CLK);
+DEFINE_CLK_RPM(apq8064, ebi1_clk, ebi1_a_clk, QCOM_RPM_EBI1_CLK);
+DEFINE_CLK_RPM(apq8064, mmfab_clk, mmfab_a_clk, QCOM_RPM_MM_FABRIC_CLK);
+DEFINE_CLK_RPM(apq8064, mmfpb_clk, mmfpb_a_clk, QCOM_RPM_MMFPB_CLK);
+DEFINE_CLK_RPM(apq8064, sfab_clk, sfab_a_clk, QCOM_RPM_SYS_FABRIC_CLK);
+DEFINE_CLK_RPM(apq8064, sfpb_clk, sfpb_a_clk, QCOM_RPM_SFPB_CLK);
+DEFINE_CLK_RPM(apq8064, qdss_clk, qdss_a_clk, QCOM_RPM_QDSS_CLK);
+
+static struct clk_rpm *apq8064_clks[] = {
+ [RPM_PXO_CLK] = &apq8064_pxo,
+ [RPM_PXO_A_CLK] = &apq8064_pxo_a_clk,
+ [RPM_CXO_CLK] = &apq8064_cxo,
+ [RPM_CXO_A_CLK] = &apq8064_cxo_a_clk,
+ [RPM_APPS_FABRIC_CLK] = &apq8064_afab_clk,
+ [RPM_APPS_FABRIC_A_CLK] = &apq8064_afab_a_clk,
+ [RPM_CFPB_CLK] = &apq8064_cfpb_clk,
+ [RPM_CFPB_A_CLK] = &apq8064_cfpb_a_clk,
+ [RPM_DAYTONA_FABRIC_CLK] = &apq8064_daytona_clk,
+ [RPM_DAYTONA_FABRIC_A_CLK] = &apq8064_daytona_a_clk,
+ [RPM_EBI1_CLK] = &apq8064_ebi1_clk,
+ [RPM_EBI1_A_CLK] = &apq8064_ebi1_a_clk,
+ [RPM_MM_FABRIC_CLK] = &apq8064_mmfab_clk,
+ [RPM_MM_FABRIC_A_CLK] = &apq8064_mmfab_a_clk,
+ [RPM_MMFPB_CLK] = &apq8064_mmfpb_clk,
+ [RPM_MMFPB_A_CLK] = &apq8064_mmfpb_a_clk,
+ [RPM_SYS_FABRIC_CLK] = &apq8064_sfab_clk,
+ [RPM_SYS_FABRIC_A_CLK] = &apq8064_sfab_a_clk,
+ [RPM_SFPB_CLK] = &apq8064_sfpb_clk,
+ [RPM_SFPB_A_CLK] = &apq8064_sfpb_a_clk,
+ [RPM_QDSS_CLK] = &apq8064_qdss_clk,
+ [RPM_QDSS_A_CLK] = &apq8064_qdss_a_clk,
+};
+
+static const struct rpm_clk_desc rpm_clk_apq8064 = {
+ .clks = apq8064_clks,
+ .num_clks = ARRAY_SIZE(apq8064_clks),
+};
+
+static const struct of_device_id rpm_clk_match_table[] = {
+ { .compatible = "qcom,rpmcc-apq8064", .data = &rpm_clk_apq8064},
+ { }
+};
+MODULE_DEVICE_TABLE(of, rpm_clk_match_table);
+
+static int rpm_clk_probe(struct platform_device *pdev)
+{
+ struct clk **clks;
+ struct clk *clk;
+ struct rpm_cc *rcc;
+ struct clk_onecell_data *data;
+ int ret;
+ size_t num_clks, i;
+ struct qcom_rpm *rpm;
+ struct clk_rpm **rpm_clks;
+ const struct rpm_clk_desc *desc;
+
+ rpm = dev_get_drvdata(pdev->dev.parent);
+ if (!rpm) {
+ dev_err(&pdev->dev, "Unable to retrieve handle to RPM\n");
+ return -ENODEV;
+ }
+
+ desc = of_device_get_match_data(&pdev->dev);
+ if (!desc)
+ return -EINVAL;
+
+ rpm_clks = desc->clks;
+ num_clks = desc->num_clks;
+
+ rcc = devm_kzalloc(&pdev->dev, sizeof(*rcc) + sizeof(*clks) * num_clks,
+ GFP_KERNEL);
+ if (!rcc)
+ return -ENOMEM;
+
+ clks = rcc->clks;
+ data = &rcc->data;
+ data->clks = clks;
+ data->clk_num = num_clks;
+
+ for (i = 0; i < num_clks; i++) {
+ if (!rpm_clks[i]) {
+ clks[i] = ERR_PTR(-ENOENT);
+ continue;
+ }
+
+ rpm_clks[i]->rpm = rpm;
+
+ ret = clk_rpm_handoff(rpm_clks[i]);
+ if (ret)
+ goto err;
+ }
+
+ for (i = 0; i < num_clks; i++) {
+ if (!rpm_clks[i]) {
+ clks[i] = ERR_PTR(-ENOENT);
+ continue;
+ }
+
+ clk = devm_clk_register(&pdev->dev, &rpm_clks[i]->hw);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ goto err;
+ }
+
+ clks[i] = clk;
+ }
+
+ ret = of_clk_add_provider(pdev->dev.of_node, of_clk_src_onecell_get,
+ data);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ dev_err(&pdev->dev, "Error registering RPM Clock driver (%d)\n", ret);
+ return ret;
+}
+
+static int rpm_clk_remove(struct platform_device *pdev)
+{
+ of_clk_del_provider(pdev->dev.of_node);
+ return 0;
+}
+
+static struct platform_driver rpm_clk_driver = {
+ .driver = {
+ .name = "qcom-clk-rpm",
+ .of_match_table = rpm_clk_match_table,
+ },
+ .probe = rpm_clk_probe,
+ .remove = rpm_clk_remove,
+};
+
+static int __init rpm_clk_init(void)
+{
+ return platform_driver_register(&rpm_clk_driver);
+}
+core_initcall(rpm_clk_init);
+
+static void __exit rpm_clk_exit(void)
+{
+ platform_driver_unregister(&rpm_clk_driver);
+}
+module_exit(rpm_clk_exit);
+
+MODULE_DESCRIPTION("Qualcomm RPM Clock Controller Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:qcom-clk-rpm");
diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c
new file mode 100644
index 000000000000..06eb06009268
--- /dev/null
+++ b/drivers/clk/qcom/clk-smd-rpm.c
@@ -0,0 +1,653 @@
+/*
+ * Copyright (c) 2016, Linaro Limited
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/soc/qcom/smd-rpm.h>
+
+#include <dt-bindings/clock/qcom,rpmcc.h>
+#include <dt-bindings/mfd/qcom-rpm.h>
+
+#define QCOM_RPM_KEY_SOFTWARE_ENABLE 0x6e657773
+#define QCOM_RPM_KEY_PIN_CTRL_CLK_BUFFER_ENABLE_KEY 0x62636370
+#define QCOM_RPM_SMD_KEY_RATE 0x007a484b
+#define QCOM_RPM_SMD_KEY_ENABLE 0x62616e45
+#define QCOM_RPM_SMD_KEY_STATE 0x54415453
+#define QCOM_RPM_SCALING_ENABLE_ID 0x2
+
+#define __DEFINE_CLK_SMD_RPM(_platform, _name, _active, type, r_id, stat_id, \
+ key) \
+ static struct clk_smd_rpm _platform##_##_active; \
+ static struct clk_smd_rpm _platform##_##_name = { \
+ .rpm_res_type = (type), \
+ .rpm_clk_id = (r_id), \
+ .rpm_status_id = (stat_id), \
+ .rpm_key = (key), \
+ .peer = &_platform##_##_active, \
+ .rate = INT_MAX, \
+ .hw.init = &(struct clk_init_data){ \
+ .ops = &clk_smd_rpm_ops, \
+ .name = #_name, \
+ .parent_names = (const char *[]){ "xo_board" }, \
+ .num_parents = 1, \
+ }, \
+ }; \
+ static struct clk_smd_rpm _platform##_##_active = { \
+ .rpm_res_type = (type), \
+ .rpm_clk_id = (r_id), \
+ .rpm_status_id = (stat_id), \
+ .active_only = true, \
+ .rpm_key = (key), \
+ .peer = &_platform##_##_name, \
+ .rate = INT_MAX, \
+ .hw.init = &(struct clk_init_data){ \
+ .ops = &clk_smd_rpm_ops, \
+ .name = #_active, \
+ .parent_names = (const char *[]){ "xo_board" }, \
+ .num_parents = 1, \
+ }, \
+ }
+
+#define __DEFINE_CLK_SMD_RPM_BRANCH(_platform, _name, _active, type, r_id, \
+ stat_id, r, key) \
+ static struct clk_smd_rpm _platform##_##_active; \
+ static struct clk_smd_rpm _platform##_##_name = { \
+ .rpm_res_type = (type), \
+ .rpm_clk_id = (r_id), \
+ .rpm_status_id = (stat_id), \
+ .rpm_key = (key), \
+ .branch = true, \
+ .peer = &_platform##_##_active, \
+ .rate = (r), \
+ .hw.init = &(struct clk_init_data){ \
+ .ops = &clk_smd_rpm_branch_ops, \
+ .name = #_name, \
+ .parent_names = (const char *[]){ "xo_board" }, \
+ .num_parents = 1, \
+ }, \
+ }; \
+ static struct clk_smd_rpm _platform##_##_active = { \
+ .rpm_res_type = (type), \
+ .rpm_clk_id = (r_id), \
+ .rpm_status_id = (stat_id), \
+ .active_only = true, \
+ .rpm_key = (key), \
+ .branch = true, \
+ .peer = &_platform##_##_name, \
+ .rate = (r), \
+ .hw.init = &(struct clk_init_data){ \
+ .ops = &clk_smd_rpm_branch_ops, \
+ .name = #_active, \
+ .parent_names = (const char *[]){ "xo_board" }, \
+ .num_parents = 1, \
+ }, \
+ }
+
+#define DEFINE_CLK_SMD_RPM(_platform, _name, _active, type, r_id) \
+ __DEFINE_CLK_SMD_RPM(_platform, _name, _active, type, r_id, \
+ 0, QCOM_RPM_SMD_KEY_RATE)
+
+#define DEFINE_CLK_SMD_RPM_BRANCH(_platform, _name, _active, type, r_id, r) \
+ __DEFINE_CLK_SMD_RPM_BRANCH(_platform, _name, _active, type, \
+ r_id, 0, r, QCOM_RPM_SMD_KEY_ENABLE)
+
+#define DEFINE_CLK_SMD_RPM_QDSS(_platform, _name, _active, type, r_id) \
+ __DEFINE_CLK_SMD_RPM(_platform, _name, _active, type, r_id, \
+ 0, QCOM_RPM_SMD_KEY_STATE)
+
+#define DEFINE_CLK_SMD_RPM_XO_BUFFER(_platform, _name, _active, r_id) \
+ __DEFINE_CLK_SMD_RPM_BRANCH(_platform, _name, _active, \
+ QCOM_SMD_RPM_CLK_BUF_A, r_id, 0, 1000, \
+ QCOM_RPM_KEY_SOFTWARE_ENABLE)
+
+#define DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(_platform, _name, _active, r_id) \
+ __DEFINE_CLK_SMD_RPM_BRANCH(_platform, _name, _active, \
+ QCOM_SMD_RPM_CLK_BUF_A, r_id, 0, 1000, \
+ QCOM_RPM_KEY_PIN_CTRL_CLK_BUFFER_ENABLE_KEY)
+
+#define to_clk_smd_rpm(_hw) container_of(_hw, struct clk_smd_rpm, hw)
+
+struct clk_smd_rpm {
+ const int rpm_res_type;
+ const int rpm_key;
+ const int rpm_clk_id;
+ const int rpm_status_id;
+ const bool active_only;
+ bool enabled;
+ bool branch;
+ struct clk_smd_rpm *peer;
+ struct clk_hw hw;
+ unsigned long rate;
+ struct qcom_smd_rpm *rpm;
+};
+
+struct clk_smd_rpm_req {
+ __le32 key;
+ __le32 nbytes;
+ __le32 value;
+};
+
+struct rpm_cc {
+ struct qcom_rpm *rpm;
+ struct clk_onecell_data data;
+ struct clk *clks[];
+};
+
+struct rpm_smd_clk_desc {
+ struct clk_smd_rpm **clks;
+ size_t num_clks;
+};
+
+static DEFINE_MUTEX(rpm_smd_clk_lock);
+
+static int clk_smd_rpm_handoff(struct clk_smd_rpm *r)
+{
+ int ret;
+ struct clk_smd_rpm_req req = {
+ .key = cpu_to_le32(r->rpm_key),
+ .nbytes = cpu_to_le32(sizeof(u32)),
+ .value = cpu_to_le32(INT_MAX),
+ };
+
+ ret = qcom_rpm_smd_write(r->rpm, QCOM_SMD_RPM_ACTIVE_STATE,
+ r->rpm_res_type, r->rpm_clk_id, &req,
+ sizeof(req));
+ if (ret)
+ return ret;
+ ret = qcom_rpm_smd_write(r->rpm, QCOM_SMD_RPM_SLEEP_STATE,
+ r->rpm_res_type, r->rpm_clk_id, &req,
+ sizeof(req));
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int clk_smd_rpm_set_rate_active(struct clk_smd_rpm *r,
+ unsigned long rate)
+{
+ struct clk_smd_rpm_req req = {
+ .key = cpu_to_le32(r->rpm_key),
+ .nbytes = cpu_to_le32(sizeof(u32)),
+ .value = cpu_to_le32(DIV_ROUND_UP(rate, 1000)), /* to kHz */
+ };
+
+ return qcom_rpm_smd_write(r->rpm, QCOM_SMD_RPM_ACTIVE_STATE,
+ r->rpm_res_type, r->rpm_clk_id, &req,
+ sizeof(req));
+}
+
+static int clk_smd_rpm_set_rate_sleep(struct clk_smd_rpm *r,
+ unsigned long rate)
+{
+ struct clk_smd_rpm_req req = {
+ .key = cpu_to_le32(r->rpm_key),
+ .nbytes = cpu_to_le32(sizeof(u32)),
+ .value = cpu_to_le32(DIV_ROUND_UP(rate, 1000)), /* to kHz */
+ };
+
+ return qcom_rpm_smd_write(r->rpm, QCOM_SMD_RPM_SLEEP_STATE,
+ r->rpm_res_type, r->rpm_clk_id, &req,
+ sizeof(req));
+}
+
+static void to_active_sleep(struct clk_smd_rpm *r, unsigned long rate,
+ unsigned long *active, unsigned long *sleep)
+{
+ *active = rate;
+
+ /*
+ * Active-only clocks don't care what the rate is during sleep. So,
+ * they vote for zero.
+ */
+ if (r->active_only)
+ *sleep = 0;
+ else
+ *sleep = *active;
+}
+
+static int clk_smd_rpm_prepare(struct clk_hw *hw)
+{
+ struct clk_smd_rpm *r = to_clk_smd_rpm(hw);
+ struct clk_smd_rpm *peer = r->peer;
+ unsigned long this_rate = 0, this_sleep_rate = 0;
+ unsigned long peer_rate = 0, peer_sleep_rate = 0;
+ unsigned long active_rate, sleep_rate;
+ int ret = 0;
+
+ mutex_lock(&rpm_smd_clk_lock);
+
+ /* Don't send requests to the RPM if the rate has not been set. */
+ if (!r->rate)
+ goto out;
+
+ to_active_sleep(r, r->rate, &this_rate, &this_sleep_rate);
+
+ /* Take peer clock's rate into account only if it's enabled. */
+ if (peer->enabled)
+ to_active_sleep(peer, peer->rate,
+ &peer_rate, &peer_sleep_rate);
+
+ active_rate = max(this_rate, peer_rate);
+
+ if (r->branch)
+ active_rate = !!active_rate;
+
+ ret = clk_smd_rpm_set_rate_active(r, active_rate);
+ if (ret)
+ goto out;
+
+ sleep_rate = max(this_sleep_rate, peer_sleep_rate);
+ if (r->branch)
+ sleep_rate = !!sleep_rate;
+
+ ret = clk_smd_rpm_set_rate_sleep(r, sleep_rate);
+ if (ret)
+ /* Undo the active set vote and restore it */
+ ret = clk_smd_rpm_set_rate_active(r, peer_rate);
+
+out:
+ if (!ret)
+ r->enabled = true;
+
+ mutex_unlock(&rpm_smd_clk_lock);
+
+ return ret;
+}
+
+static void clk_smd_rpm_unprepare(struct clk_hw *hw)
+{
+ struct clk_smd_rpm *r = to_clk_smd_rpm(hw);
+ struct clk_smd_rpm *peer = r->peer;
+ unsigned long peer_rate = 0, peer_sleep_rate = 0;
+ unsigned long active_rate, sleep_rate;
+ int ret;
+
+ mutex_lock(&rpm_smd_clk_lock);
+
+ if (!r->rate)
+ goto out;
+
+ /* Take peer clock's rate into account only if it's enabled. */
+ if (peer->enabled)
+ to_active_sleep(peer, peer->rate, &peer_rate,
+ &peer_sleep_rate);
+
+ active_rate = r->branch ? !!peer_rate : peer_rate;
+ ret = clk_smd_rpm_set_rate_active(r, active_rate);
+ if (ret)
+ goto out;
+
+ sleep_rate = r->branch ? !!peer_sleep_rate : peer_sleep_rate;
+ ret = clk_smd_rpm_set_rate_sleep(r, sleep_rate);
+ if (ret)
+ goto out;
+
+ r->enabled = false;
+
+out:
+ mutex_unlock(&rpm_smd_clk_lock);
+}
+
+static int clk_smd_rpm_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_smd_rpm *r = to_clk_smd_rpm(hw);
+ struct clk_smd_rpm *peer = r->peer;
+ unsigned long active_rate, sleep_rate;
+ unsigned long this_rate = 0, this_sleep_rate = 0;
+ unsigned long peer_rate = 0, peer_sleep_rate = 0;
+ int ret = 0;
+
+ mutex_lock(&rpm_smd_clk_lock);
+
+ if (!r->enabled)
+ goto out;
+
+ to_active_sleep(r, rate, &this_rate, &this_sleep_rate);
+
+ /* Take peer clock's rate into account only if it's enabled. */
+ if (peer->enabled)
+ to_active_sleep(peer, peer->rate,
+ &peer_rate, &peer_sleep_rate);
+
+ active_rate = max(this_rate, peer_rate);
+ ret = clk_smd_rpm_set_rate_active(r, active_rate);
+ if (ret)
+ goto out;
+
+ sleep_rate = max(this_sleep_rate, peer_sleep_rate);
+ ret = clk_smd_rpm_set_rate_sleep(r, sleep_rate);
+ if (ret)
+ goto out;
+
+ r->rate = rate;
+
+out:
+ mutex_unlock(&rpm_smd_clk_lock);
+
+ return ret;
+}
+
+static long clk_smd_rpm_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ /*
+ * RPM handles rate rounding and we don't have a way to
+ * know what the rate will be, so just return whatever
+ * rate is requested.
+ */
+ return rate;
+}
+
+static unsigned long clk_smd_rpm_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_smd_rpm *r = to_clk_smd_rpm(hw);
+
+ /*
+ * RPM handles rate rounding and we don't have a way to
+ * know what the rate will be, so just return whatever
+ * rate was set.
+ */
+ return r->rate;
+}
+
+static int clk_smd_rpm_enable_scaling(struct qcom_smd_rpm *rpm)
+{
+ int ret;
+ struct clk_smd_rpm_req req = {
+ .key = cpu_to_le32(QCOM_RPM_SMD_KEY_ENABLE),
+ .nbytes = cpu_to_le32(sizeof(u32)),
+ .value = cpu_to_le32(1),
+ };
+
+ ret = qcom_rpm_smd_write(rpm, QCOM_SMD_RPM_SLEEP_STATE,
+ QCOM_SMD_RPM_MISC_CLK,
+ QCOM_RPM_SCALING_ENABLE_ID, &req, sizeof(req));
+ if (ret) {
+ pr_err("RPM clock scaling (sleep set) not enabled!\n");
+ return ret;
+ }
+
+ ret = qcom_rpm_smd_write(rpm, QCOM_SMD_RPM_ACTIVE_STATE,
+ QCOM_SMD_RPM_MISC_CLK,
+ QCOM_RPM_SCALING_ENABLE_ID, &req, sizeof(req));
+ if (ret) {
+ pr_err("RPM clock scaling (active set) not enabled!\n");
+ return ret;
+ }
+
+ pr_debug("%s: RPM clock scaling is enabled\n", __func__);
+ return 0;
+}
+
+static const struct clk_ops clk_smd_rpm_ops = {
+ .prepare = clk_smd_rpm_prepare,
+ .unprepare = clk_smd_rpm_unprepare,
+ .set_rate = clk_smd_rpm_set_rate,
+ .round_rate = clk_smd_rpm_round_rate,
+ .recalc_rate = clk_smd_rpm_recalc_rate,
+};
+
+static const struct clk_ops clk_smd_rpm_branch_ops = {
+ .prepare = clk_smd_rpm_prepare,
+ .unprepare = clk_smd_rpm_unprepare,
+ .round_rate = clk_smd_rpm_round_rate,
+ .recalc_rate = clk_smd_rpm_recalc_rate,
+};
+
+/* msm8916 */
+DEFINE_CLK_SMD_RPM(msm8916, pcnoc_clk, pcnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 0);
+DEFINE_CLK_SMD_RPM(msm8916, snoc_clk, snoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 1);
+DEFINE_CLK_SMD_RPM(msm8916, bimc_clk, bimc_a_clk, QCOM_SMD_RPM_MEM_CLK, 0);
+DEFINE_CLK_SMD_RPM_QDSS(msm8916, qdss_clk, qdss_a_clk, QCOM_SMD_RPM_MISC_CLK, 1);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8916, bb_clk1, bb_clk1_a, 1);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8916, bb_clk2, bb_clk2_a, 2);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8916, rf_clk1, rf_clk1_a, 4);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8916, rf_clk2, rf_clk2_a, 5);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8916, bb_clk1_pin, bb_clk1_a_pin, 1);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8916, bb_clk2_pin, bb_clk2_a_pin, 2);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8916, rf_clk1_pin, rf_clk1_a_pin, 4);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8916, rf_clk2_pin, rf_clk2_a_pin, 5);
+
+static struct clk_smd_rpm *msm8916_clks[] = {
+ [RPM_SMD_PCNOC_CLK] = &msm8916_pcnoc_clk,
+ [RPM_SMD_PCNOC_A_CLK] = &msm8916_pcnoc_a_clk,
+ [RPM_SMD_SNOC_CLK] = &msm8916_snoc_clk,
+ [RPM_SMD_SNOC_A_CLK] = &msm8916_snoc_a_clk,
+ [RPM_SMD_BIMC_CLK] = &msm8916_bimc_clk,
+ [RPM_SMD_BIMC_A_CLK] = &msm8916_bimc_a_clk,
+ [RPM_SMD_QDSS_CLK] = &msm8916_qdss_clk,
+ [RPM_SMD_QDSS_A_CLK] = &msm8916_qdss_a_clk,
+ [RPM_SMD_BB_CLK1] = &msm8916_bb_clk1,
+ [RPM_SMD_BB_CLK1_A] = &msm8916_bb_clk1_a,
+ [RPM_SMD_BB_CLK2] = &msm8916_bb_clk2,
+ [RPM_SMD_BB_CLK2_A] = &msm8916_bb_clk2_a,
+ [RPM_SMD_RF_CLK1] = &msm8916_rf_clk1,
+ [RPM_SMD_RF_CLK1_A] = &msm8916_rf_clk1_a,
+ [RPM_SMD_RF_CLK2] = &msm8916_rf_clk2,
+ [RPM_SMD_RF_CLK2_A] = &msm8916_rf_clk2_a,
+ [RPM_SMD_BB_CLK1_PIN] = &msm8916_bb_clk1_pin,
+ [RPM_SMD_BB_CLK1_A_PIN] = &msm8916_bb_clk1_a_pin,
+ [RPM_SMD_BB_CLK2_PIN] = &msm8916_bb_clk2_pin,
+ [RPM_SMD_BB_CLK2_A_PIN] = &msm8916_bb_clk2_a_pin,
+ [RPM_SMD_RF_CLK1_PIN] = &msm8916_rf_clk1_pin,
+ [RPM_SMD_RF_CLK1_A_PIN] = &msm8916_rf_clk1_a_pin,
+ [RPM_SMD_RF_CLK2_PIN] = &msm8916_rf_clk2_pin,
+ [RPM_SMD_RF_CLK2_A_PIN] = &msm8916_rf_clk2_a_pin,
+};
+
+static const struct rpm_smd_clk_desc rpm_clk_msm8916 = {
+ .clks = msm8916_clks,
+ .num_clks = ARRAY_SIZE(msm8916_clks),
+};
+
+/* msm8996 */
+DEFINE_CLK_SMD_RPM(msm8996, pcnoc_clk, pcnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 0);
+DEFINE_CLK_SMD_RPM(msm8996, snoc_clk, snoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 1);
+DEFINE_CLK_SMD_RPM(msm8996, cnoc_clk, cnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 2);
+DEFINE_CLK_SMD_RPM(msm8996, bimc_clk, bimc_a_clk, QCOM_SMD_RPM_MEM_CLK, 0);
+DEFINE_CLK_SMD_RPM(msm8996, mmssnoc_axi_rpm_clk, mmssnoc_axi_rpm_a_clk,
+ QCOM_SMD_RPM_MMAXI_CLK, 0);
+DEFINE_CLK_SMD_RPM(msm8996, ipa_clk, ipa_a_clk, QCOM_SMD_RPM_IPA_CLK, 0);
+DEFINE_CLK_SMD_RPM(msm8996, ce1_clk, ce1_a_clk, QCOM_SMD_RPM_CE_CLK, 0);
+DEFINE_CLK_SMD_RPM_BRANCH(msm8996, cxo, cxo_a, QCOM_SMD_RPM_MISC_CLK, 0, 19200000);
+DEFINE_CLK_SMD_RPM_BRANCH(msm8996, aggre1_noc_clk, aggre1_noc_a_clk,
+ QCOM_SMD_RPM_AGGR_CLK, 0, 1000);
+DEFINE_CLK_SMD_RPM_BRANCH(msm8996, aggre2_noc_clk, aggre2_noc_a_clk,
+ QCOM_SMD_RPM_AGGR_CLK, 1, 1000);
+DEFINE_CLK_SMD_RPM_QDSS(msm8996, qdss_clk, qdss_a_clk, QCOM_SMD_RPM_MISC_CLK, 1);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8996, bb_clk1, bb_clk1_a, 1);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8996, bb_clk2, bb_clk2_a, 2);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8996, rf_clk1, rf_clk1_a, 4);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8996, rf_clk2, rf_clk2_a, 5);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8996, ln_bb_clk, ln_bb_a_clk, 8);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8996, div_clk1, div_clk1_ao, 0xb);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8996, div_clk2, div_clk2_ao, 0xc);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8996, div_clk3, div_clk3_ao, 0xc);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8996, bb_clk1_pin, bb_clk1_a_pin, 1);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8996, bb_clk2_pin, bb_clk2_a_pin, 2);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8996, rf_clk1_pin, rf_clk1_a_pin, 4);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8996, rf_clk2_pin, rf_clk2_a_pin, 5);
+
+static struct clk_smd_rpm *msm8996_clks[] = {
+ [RPM_SMD_XO_CLK_SRC] = &msm8996_cxo,
+ [RPM_SMD_XO_A_CLK_SRC] = &msm8996_cxo_a,
+ [RPM_AGGR1_NOC_CLK] = &msm8996_aggre1_noc_clk,
+ [RPM_AGGR1_NOC_A_CLK] = &msm8996_aggre1_noc_a_clk,
+ [RPM_AGGR2_NOC_CLK] = &msm8996_aggre2_noc_clk,
+ [RPM_AGGR2_NOC_A_CLK] = &msm8996_aggre2_noc_a_clk,
+ [RPM_SMD_PCNOC_CLK] = &msm8996_pcnoc_clk,
+ [RPM_SMD_PCNOC_A_CLK] = &msm8996_pcnoc_a_clk,
+ [RPM_SMD_SNOC_CLK] = &msm8996_snoc_clk,
+ [RPM_SMD_SNOC_A_CLK] = &msm8996_snoc_a_clk,
+ [RPM_CNOC_CLK] = &msm8996_cnoc_clk,
+ [RPM_CNOC_A_CLK] = &msm8996_cnoc_a_clk,
+ [RPM_SMD_BIMC_CLK] = &msm8996_bimc_clk,
+ [RPM_SMD_BIMC_A_CLK] = &msm8996_bimc_a_clk,
+ [RPM_MMAXI_CLK] = &msm8996_mmssnoc_axi_rpm_clk,
+ [RPM_MMAXI_A_CLK] = &msm8996_mmssnoc_axi_rpm_a_clk,
+ [RPM_IPA_CLK] = &msm8996_ipa_clk,
+ [RPM_IPA_A_CLK] = &msm8996_ipa_a_clk,
+ [RPM_CE1_CLK] = &msm8996_ce1_clk,
+ [RPM_CE1_A_CLK] = &msm8996_ce1_a_clk,
+ [RPM_SMD_QDSS_CLK] = &msm8996_qdss_clk,
+ [RPM_SMD_QDSS_A_CLK] = &msm8996_qdss_a_clk,
+ [RPM_LN_BB_CLK] = &msm8996_ln_bb_clk,
+ [RPM_LN_BB_A_CLK] = &msm8996_ln_bb_a_clk,
+ [RPM_DIV_CLK1] = &msm8996_div_clk1,
+ [RPM_DIV_CLK1_AO] = &msm8996_div_clk1_ao,
+ [RPM_DIV_CLK2] = &msm8996_div_clk2,
+ [RPM_DIV_CLK2_AO] = &msm8996_div_clk2_ao,
+ [RPM_DIV_CLK3] = &msm8996_div_clk3,
+ [RPM_DIV_CLK3_AO] = &msm8996_div_clk3_ao,
+ [RPM_BB_CLK1_PIN] = &msm8996_bb_clk1_pin,
+ [RPM_BB_CLK1_A_PIN] = &msm8996_bb_clk1_a_pin,
+ [RPM_BB_CLK2_PIN] = &msm8996_bb_clk2_pin,
+ [RPM_BB_CLK2_A_PIN] = &msm8996_bb_clk2_a_pin,
+ [RPM_RF_CLK1_PIN] = &msm8996_rf_clk1_pin,
+ [RPM_RF_CLK1_A_PIN] = &msm8996_rf_clk1_a_pin,
+ [RPM_RF_CLK2_PIN] = &msm8996_rf_clk2_pin,
+ [RPM_RF_CLK2_A_PIN] = &msm8996_rf_clk2_a_pin,
+};
+
+static const struct rpm_smd_clk_desc rpm_clk_msm8996 = {
+ .clks = msm8996_clks,
+ .num_clks = ARRAY_SIZE(msm8996_clks),
+};
+
+static const struct of_device_id rpm_smd_clk_match_table[] = {
+ { .compatible = "qcom,rpmcc-msm8916", .data = &rpm_clk_msm8916},
+ { .compatible = "qcom,rpmcc-msm8996", .data = &rpm_clk_msm8996},
+ { }
+};
+MODULE_DEVICE_TABLE(of, rpm_smd_clk_match_table);
+
+static int rpm_smd_clk_probe(struct platform_device *pdev)
+{
+ struct clk **clks;
+ struct clk *clk;
+ struct rpm_cc *rcc;
+ struct clk_onecell_data *data;
+ int ret;
+ size_t num_clks, i;
+ struct qcom_smd_rpm *rpm;
+ struct clk_smd_rpm **rpm_smd_clks;
+ const struct rpm_smd_clk_desc *desc;
+
+ rpm = dev_get_drvdata(pdev->dev.parent);
+ if (!rpm) {
+ dev_err(&pdev->dev, "Unable to retrieve handle to RPM\n");
+ return -ENODEV;
+ }
+
+ desc = of_device_get_match_data(&pdev->dev);
+ if (!desc)
+ return -EINVAL;
+
+ rpm_smd_clks = desc->clks;
+ num_clks = desc->num_clks;
+
+ rcc = devm_kzalloc(&pdev->dev, sizeof(*rcc) + sizeof(*clks) * num_clks,
+ GFP_KERNEL);
+ if (!rcc)
+ return -ENOMEM;
+
+ clks = rcc->clks;
+ data = &rcc->data;
+ data->clks = clks;
+ data->clk_num = num_clks;
+
+ for (i = 0; i < num_clks; i++) {
+ if (!rpm_smd_clks[i]) {
+ clks[i] = ERR_PTR(-ENOENT);
+ continue;
+ }
+
+ rpm_smd_clks[i]->rpm = rpm;
+
+ ret = clk_smd_rpm_handoff(rpm_smd_clks[i]);
+ if (ret)
+ goto err;
+ }
+
+ ret = clk_smd_rpm_enable_scaling(rpm);
+ if (ret)
+ goto err;
+
+ for (i = 0; i < num_clks; i++) {
+ if (!rpm_smd_clks[i]) {
+ clks[i] = ERR_PTR(-ENOENT);
+ continue;
+ }
+
+ clk = devm_clk_register(&pdev->dev, &rpm_smd_clks[i]->hw);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ goto err;
+ }
+
+ clks[i] = clk;
+ }
+
+ ret = of_clk_add_provider(pdev->dev.of_node, of_clk_src_onecell_get,
+ data);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ dev_err(&pdev->dev, "Error registering SMD clock driver (%d)\n", ret);
+ return ret;
+}
+
+static int rpm_smd_clk_remove(struct platform_device *pdev)
+{
+ of_clk_del_provider(pdev->dev.of_node);
+ return 0;
+}
+
+static struct platform_driver rpm_smd_clk_driver = {
+ .driver = {
+ .name = "qcom-clk-smd-rpm",
+ .of_match_table = rpm_smd_clk_match_table,
+ },
+ .probe = rpm_smd_clk_probe,
+ .remove = rpm_smd_clk_remove,
+};
+
+static int __init rpm_smd_clk_init(void)
+{
+ return platform_driver_register(&rpm_smd_clk_driver);
+}
+core_initcall(rpm_smd_clk_init);
+
+static void __exit rpm_smd_clk_exit(void)
+{
+ platform_driver_unregister(&rpm_smd_clk_driver);
+}
+module_exit(rpm_smd_clk_exit);
+
+MODULE_DESCRIPTION("Qualcomm RPM over SMD Clock Controller Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:qcom-clk-smd-rpm");
diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c
index 8fa477293ae0..f7c226ab4307 100644
--- a/drivers/clk/qcom/common.c
+++ b/drivers/clk/qcom/common.c
@@ -17,6 +17,7 @@
#include <linux/platform_device.h>
#include <linux/clk-provider.h>
#include <linux/reset-controller.h>
+#include <linux/of.h>
#include "common.h"
#include "clk-rcg.h"
@@ -88,6 +89,91 @@ static void qcom_cc_gdsc_unregister(void *data)
gdsc_unregister(data);
}
+/*
+ * Backwards compatibility with old DTs. Register a pass-through factor 1/1
+ * clock to translate 'path' clk into 'name' clk and regsiter the 'path'
+ * clk as a fixed rate clock if it isn't present.
+ */
+static int _qcom_cc_register_board_clk(struct device *dev, const char *path,
+ const char *name, unsigned long rate,
+ bool add_factor)
+{
+ struct device_node *node = NULL;
+ struct device_node *clocks_node;
+ struct clk_fixed_factor *factor;
+ struct clk_fixed_rate *fixed;
+ struct clk *clk;
+ struct clk_init_data init_data = { };
+
+ clocks_node = of_find_node_by_path("/clocks");
+ if (clocks_node)
+ node = of_find_node_by_name(clocks_node, path);
+ of_node_put(clocks_node);
+
+ if (!node) {
+ fixed = devm_kzalloc(dev, sizeof(*fixed), GFP_KERNEL);
+ if (!fixed)
+ return -EINVAL;
+
+ fixed->fixed_rate = rate;
+ fixed->hw.init = &init_data;
+
+ init_data.name = path;
+ init_data.ops = &clk_fixed_rate_ops;
+
+ clk = devm_clk_register(dev, &fixed->hw);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+ }
+ of_node_put(node);
+
+ if (add_factor) {
+ factor = devm_kzalloc(dev, sizeof(*factor), GFP_KERNEL);
+ if (!factor)
+ return -EINVAL;
+
+ factor->mult = factor->div = 1;
+ factor->hw.init = &init_data;
+
+ init_data.name = name;
+ init_data.parent_names = &path;
+ init_data.num_parents = 1;
+ init_data.flags = 0;
+ init_data.ops = &clk_fixed_factor_ops;
+
+ clk = devm_clk_register(dev, &factor->hw);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+ }
+
+ return 0;
+}
+
+int qcom_cc_register_board_clk(struct device *dev, const char *path,
+ const char *name, unsigned long rate)
+{
+ bool add_factor = true;
+ struct device_node *node;
+
+ /* The RPM clock driver will add the factor clock if present */
+ if (IS_ENABLED(CONFIG_QCOM_RPMCC)) {
+ node = of_find_compatible_node(NULL, NULL, "qcom,rpmcc");
+ if (of_device_is_available(node))
+ add_factor = false;
+ of_node_put(node);
+ }
+
+ return _qcom_cc_register_board_clk(dev, path, name, rate, add_factor);
+}
+EXPORT_SYMBOL_GPL(qcom_cc_register_board_clk);
+
+int qcom_cc_register_sleep_clk(struct device *dev)
+{
+ return _qcom_cc_register_board_clk(dev, "sleep_clk", "sleep_clk_src",
+ 32768, true);
+}
+EXPORT_SYMBOL_GPL(qcom_cc_register_sleep_clk);
+
int qcom_cc_really_probe(struct platform_device *pdev,
const struct qcom_cc_desc *desc, struct regmap *regmap)
{
@@ -98,6 +184,7 @@ int qcom_cc_really_probe(struct platform_device *pdev,
struct clk **clks;
struct qcom_reset_controller *reset;
struct qcom_cc *cc;
+ struct gdsc_desc *scd;
size_t num_clks = desc->num_clks;
struct clk_regmap **rclks = desc->clks;
@@ -126,7 +213,11 @@ int qcom_cc_really_probe(struct platform_device *pdev,
if (ret)
return ret;
- devm_add_action(dev, qcom_cc_del_clk_provider, pdev->dev.of_node);
+ ret = devm_add_action_or_reset(dev, qcom_cc_del_clk_provider,
+ pdev->dev.of_node);
+
+ if (ret)
+ return ret;
reset = &cc->reset;
reset->rcdev.of_node = dev->of_node;
@@ -140,18 +231,28 @@ int qcom_cc_really_probe(struct platform_device *pdev,
if (ret)
return ret;
- devm_add_action(dev, qcom_cc_reset_unregister, &reset->rcdev);
+ ret = devm_add_action_or_reset(dev, qcom_cc_reset_unregister,
+ &reset->rcdev);
+
+ if (ret)
+ return ret;
if (desc->gdscs && desc->num_gdscs) {
- ret = gdsc_register(dev, desc->gdscs, desc->num_gdscs,
- &reset->rcdev, regmap);
+ scd = devm_kzalloc(dev, sizeof(*scd), GFP_KERNEL);
+ if (!scd)
+ return -ENOMEM;
+ scd->dev = dev;
+ scd->scs = desc->gdscs;
+ scd->num = desc->num_gdscs;
+ ret = gdsc_register(scd, &reset->rcdev, regmap);
+ if (ret)
+ return ret;
+ ret = devm_add_action_or_reset(dev, qcom_cc_gdsc_unregister,
+ scd);
if (ret)
return ret;
}
- devm_add_action(dev, qcom_cc_gdsc_unregister, dev);
-
-
return 0;
}
EXPORT_SYMBOL_GPL(qcom_cc_really_probe);
diff --git a/drivers/clk/qcom/common.h b/drivers/clk/qcom/common.h
index 7c1fba3ebc03..ae9bdeb21f29 100644
--- a/drivers/clk/qcom/common.h
+++ b/drivers/clk/qcom/common.h
@@ -37,6 +37,10 @@ extern const struct freq_tbl *qcom_find_freq(const struct freq_tbl *f,
extern int qcom_find_src_index(struct clk_hw *hw, const struct parent_map *map,
u8 src);
+extern int qcom_cc_register_board_clk(struct device *dev, const char *path,
+ const char *name, unsigned long rate);
+extern int qcom_cc_register_sleep_clk(struct device *dev);
+
extern struct regmap *qcom_cc_map(struct platform_device *pdev,
const struct qcom_cc_desc *desc);
extern int qcom_cc_really_probe(struct platform_device *pdev,
diff --git a/drivers/clk/qcom/gcc-ipq806x.c b/drivers/clk/qcom/gcc-ipq806x.c
index 16fc64c082a5..53f8c67bdbce 100644
--- a/drivers/clk/qcom/gcc-ipq806x.c
+++ b/drivers/clk/qcom/gcc-ipq806x.c
@@ -890,7 +890,6 @@ static struct clk_branch gsbi1_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi1_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -906,7 +905,6 @@ static struct clk_branch gsbi2_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi2_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -922,7 +920,6 @@ static struct clk_branch gsbi4_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi4_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -938,7 +935,6 @@ static struct clk_branch gsbi5_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi5_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -954,7 +950,6 @@ static struct clk_branch gsbi6_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi6_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -970,7 +965,6 @@ static struct clk_branch gsbi7_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi7_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1144,7 +1138,6 @@ static struct clk_branch pmem_clk = {
.hw.init = &(struct clk_init_data){
.name = "pmem_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1308,7 +1301,6 @@ static struct clk_branch sdc1_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "sdc1_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1324,7 +1316,6 @@ static struct clk_branch sdc3_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "sdc3_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1394,7 +1385,6 @@ static struct clk_branch tsif_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "tsif_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1410,7 +1400,6 @@ static struct clk_branch dma_bam_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "dma_bam_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1425,7 +1414,6 @@ static struct clk_branch adm0_clk = {
.hw.init = &(struct clk_init_data){
.name = "adm0_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1442,7 +1430,6 @@ static struct clk_branch adm0_pbus_clk = {
.hw.init = &(struct clk_init_data){
.name = "adm0_pbus_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1457,7 +1444,6 @@ static struct clk_branch pmic_arb0_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "pmic_arb0_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1472,7 +1458,6 @@ static struct clk_branch pmic_arb1_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "pmic_arb1_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1487,7 +1472,6 @@ static struct clk_branch pmic_ssbi2_clk = {
.hw.init = &(struct clk_init_data){
.name = "pmic_ssbi2_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1504,7 +1488,6 @@ static struct clk_branch rpm_msg_ram_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "rpm_msg_ram_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1563,7 +1546,6 @@ static struct clk_branch pcie_a_clk = {
.hw.init = &(struct clk_init_data){
.name = "pcie_a_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1577,7 +1559,6 @@ static struct clk_branch pcie_aux_clk = {
.hw.init = &(struct clk_init_data){
.name = "pcie_aux_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1591,7 +1572,6 @@ static struct clk_branch pcie_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "pcie_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1605,7 +1585,6 @@ static struct clk_branch pcie_phy_clk = {
.hw.init = &(struct clk_init_data){
.name = "pcie_phy_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1659,7 +1638,6 @@ static struct clk_branch pcie1_a_clk = {
.hw.init = &(struct clk_init_data){
.name = "pcie1_a_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1673,7 +1651,6 @@ static struct clk_branch pcie1_aux_clk = {
.hw.init = &(struct clk_init_data){
.name = "pcie1_aux_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1687,7 +1664,6 @@ static struct clk_branch pcie1_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "pcie1_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1701,7 +1677,6 @@ static struct clk_branch pcie1_phy_clk = {
.hw.init = &(struct clk_init_data){
.name = "pcie1_phy_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1755,7 +1730,6 @@ static struct clk_branch pcie2_a_clk = {
.hw.init = &(struct clk_init_data){
.name = "pcie2_a_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1769,7 +1743,6 @@ static struct clk_branch pcie2_aux_clk = {
.hw.init = &(struct clk_init_data){
.name = "pcie2_aux_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1783,7 +1756,6 @@ static struct clk_branch pcie2_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "pcie2_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1797,7 +1769,6 @@ static struct clk_branch pcie2_phy_clk = {
.hw.init = &(struct clk_init_data){
.name = "pcie2_phy_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1887,7 +1858,6 @@ static struct clk_branch sata_a_clk = {
.hw.init = &(struct clk_init_data){
.name = "sata_a_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1901,7 +1871,6 @@ static struct clk_branch sata_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "sata_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1915,7 +1884,6 @@ static struct clk_branch sfab_sata_s_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "sfab_sata_s_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1929,7 +1897,6 @@ static struct clk_branch sata_phy_cfg_clk = {
.hw.init = &(struct clk_init_data){
.name = "sata_phy_cfg_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2139,7 +2106,6 @@ static struct clk_branch usb_hs1_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "usb_hs1_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2218,7 +2184,6 @@ static struct clk_branch usb_fs1_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "usb_fs1_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2234,7 +2199,6 @@ static struct clk_branch ebi2_clk = {
.hw.init = &(struct clk_init_data){
.name = "ebi2_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2248,7 +2212,6 @@ static struct clk_branch ebi2_aon_clk = {
.hw.init = &(struct clk_init_data){
.name = "ebi2_always_on_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
diff --git a/drivers/clk/qcom/gcc-msm8660.c b/drivers/clk/qcom/gcc-msm8660.c
index f110bb5a1df3..7d327274a4ae 100644
--- a/drivers/clk/qcom/gcc-msm8660.c
+++ b/drivers/clk/qcom/gcc-msm8660.c
@@ -1479,7 +1479,6 @@ static struct clk_branch pmem_clk = {
.hw.init = &(struct clk_init_data){
.name = "pmem_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2027,7 +2026,6 @@ static struct clk_branch gsbi1_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi1_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2041,7 +2039,6 @@ static struct clk_branch gsbi2_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi2_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2055,7 +2052,6 @@ static struct clk_branch gsbi3_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi3_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2069,7 +2065,6 @@ static struct clk_branch gsbi4_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi4_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2083,7 +2078,6 @@ static struct clk_branch gsbi5_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi5_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2097,7 +2091,6 @@ static struct clk_branch gsbi6_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi6_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2111,7 +2104,6 @@ static struct clk_branch gsbi7_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi7_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2125,7 +2117,6 @@ static struct clk_branch gsbi8_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi8_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2139,7 +2130,6 @@ static struct clk_branch gsbi9_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi9_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2153,7 +2143,6 @@ static struct clk_branch gsbi10_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi10_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2167,7 +2156,6 @@ static struct clk_branch gsbi11_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi11_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2181,7 +2169,6 @@ static struct clk_branch gsbi12_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi12_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2195,7 +2182,6 @@ static struct clk_branch tsif_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "tsif_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2209,7 +2195,6 @@ static struct clk_branch usb_fs1_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "usb_fs1_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2223,7 +2208,6 @@ static struct clk_branch usb_fs2_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "usb_fs2_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2237,7 +2221,6 @@ static struct clk_branch usb_hs1_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "usb_hs1_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2251,7 +2234,6 @@ static struct clk_branch sdc1_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "sdc1_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2265,7 +2247,6 @@ static struct clk_branch sdc2_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "sdc2_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2279,7 +2260,6 @@ static struct clk_branch sdc3_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "sdc3_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2293,7 +2273,6 @@ static struct clk_branch sdc4_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "sdc4_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2307,7 +2286,6 @@ static struct clk_branch sdc5_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "sdc5_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2322,7 +2300,6 @@ static struct clk_branch adm0_clk = {
.hw.init = &(struct clk_init_data){
.name = "adm0_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2337,7 +2314,6 @@ static struct clk_branch adm0_pbus_clk = {
.hw.init = &(struct clk_init_data){
.name = "adm0_pbus_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2352,7 +2328,6 @@ static struct clk_branch adm1_clk = {
.hw.init = &(struct clk_init_data){
.name = "adm1_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2367,7 +2342,6 @@ static struct clk_branch adm1_pbus_clk = {
.hw.init = &(struct clk_init_data){
.name = "adm1_pbus_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2382,7 +2356,6 @@ static struct clk_branch modem_ahb1_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "modem_ahb1_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2397,7 +2370,6 @@ static struct clk_branch modem_ahb2_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "modem_ahb2_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2412,7 +2384,6 @@ static struct clk_branch pmic_arb0_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "pmic_arb0_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2427,7 +2398,6 @@ static struct clk_branch pmic_arb1_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "pmic_arb1_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2442,7 +2412,6 @@ static struct clk_branch pmic_ssbi2_clk = {
.hw.init = &(struct clk_init_data){
.name = "pmic_ssbi2_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2459,7 +2428,6 @@ static struct clk_branch rpm_msg_ram_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "rpm_msg_ram_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
diff --git a/drivers/clk/qcom/gcc-msm8960.c b/drivers/clk/qcom/gcc-msm8960.c
index 66c18bc97857..6c899fb5b856 100644
--- a/drivers/clk/qcom/gcc-msm8960.c
+++ b/drivers/clk/qcom/gcc-msm8960.c
@@ -1546,7 +1546,6 @@ static struct clk_branch pmem_clk = {
.hw.init = &(struct clk_init_data){
.name = "pmem_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2143,7 +2142,6 @@ static struct clk_branch usb_hsic_hsio_cal_clk = {
.hw.init = &(struct clk_init_data){
.name = "usb_hsic_hsio_cal_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2293,7 +2291,6 @@ static struct clk_branch ce1_core_clk = {
.hw.init = &(struct clk_init_data){
.name = "ce1_core_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2307,7 +2304,6 @@ static struct clk_branch ce1_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "ce1_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2323,7 +2319,6 @@ static struct clk_branch dma_bam_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "dma_bam_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2339,7 +2334,6 @@ static struct clk_branch gsbi1_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi1_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2355,7 +2349,6 @@ static struct clk_branch gsbi2_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi2_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2371,7 +2364,6 @@ static struct clk_branch gsbi3_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi3_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2387,7 +2379,6 @@ static struct clk_branch gsbi4_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi4_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2403,7 +2394,6 @@ static struct clk_branch gsbi5_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi5_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2419,7 +2409,6 @@ static struct clk_branch gsbi6_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi6_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2435,7 +2424,6 @@ static struct clk_branch gsbi7_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi7_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2451,7 +2439,6 @@ static struct clk_branch gsbi8_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi8_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2467,7 +2454,6 @@ static struct clk_branch gsbi9_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi9_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2483,7 +2469,6 @@ static struct clk_branch gsbi10_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi10_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2499,7 +2484,6 @@ static struct clk_branch gsbi11_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi11_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2515,7 +2499,6 @@ static struct clk_branch gsbi12_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "gsbi12_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2531,7 +2514,6 @@ static struct clk_branch tsif_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "tsif_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2545,7 +2527,6 @@ static struct clk_branch usb_fs1_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "usb_fs1_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2559,7 +2540,6 @@ static struct clk_branch usb_fs2_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "usb_fs2_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2575,7 +2555,6 @@ static struct clk_branch usb_hs1_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "usb_hs1_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2589,7 +2568,6 @@ static struct clk_branch usb_hs3_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "usb_hs3_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2603,7 +2581,6 @@ static struct clk_branch usb_hs4_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "usb_hs4_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2617,7 +2594,6 @@ static struct clk_branch usb_hsic_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "usb_hsic_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2633,7 +2609,6 @@ static struct clk_branch sdc1_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "sdc1_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2649,7 +2624,6 @@ static struct clk_branch sdc2_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "sdc2_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2665,7 +2639,6 @@ static struct clk_branch sdc3_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "sdc3_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2681,7 +2654,6 @@ static struct clk_branch sdc4_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "sdc4_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2697,7 +2669,6 @@ static struct clk_branch sdc5_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "sdc5_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2712,7 +2683,6 @@ static struct clk_branch adm0_clk = {
.hw.init = &(struct clk_init_data){
.name = "adm0_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2729,7 +2699,6 @@ static struct clk_branch adm0_pbus_clk = {
.hw.init = &(struct clk_init_data){
.name = "adm0_pbus_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2883,7 +2852,6 @@ static struct clk_branch sata_a_clk = {
.hw.init = &(struct clk_init_data){
.name = "sata_a_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2897,7 +2865,6 @@ static struct clk_branch sata_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "sata_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2911,7 +2878,6 @@ static struct clk_branch sfab_sata_s_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "sfab_sata_s_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2925,7 +2891,6 @@ static struct clk_branch sata_phy_cfg_clk = {
.hw.init = &(struct clk_init_data){
.name = "sata_phy_cfg_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2939,7 +2904,6 @@ static struct clk_branch pcie_phy_ref_clk = {
.hw.init = &(struct clk_init_data){
.name = "pcie_phy_ref_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2953,7 +2917,6 @@ static struct clk_branch pcie_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "pcie_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2967,7 +2930,6 @@ static struct clk_branch pcie_a_clk = {
.hw.init = &(struct clk_init_data){
.name = "pcie_a_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2982,7 +2944,6 @@ static struct clk_branch pmic_arb0_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "pmic_arb0_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2997,7 +2958,6 @@ static struct clk_branch pmic_arb1_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "pmic_arb1_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -3012,7 +2972,6 @@ static struct clk_branch pmic_ssbi2_clk = {
.hw.init = &(struct clk_init_data){
.name = "pmic_ssbi2_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -3029,7 +2988,6 @@ static struct clk_branch rpm_msg_ram_h_clk = {
.hw.init = &(struct clk_init_data){
.name = "rpm_msg_ram_h_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
diff --git a/drivers/clk/qcom/gcc-msm8974.c b/drivers/clk/qcom/gcc-msm8974.c
index 28abb8f8f293..cb399b94a971 100644
--- a/drivers/clk/qcom/gcc-msm8974.c
+++ b/drivers/clk/qcom/gcc-msm8974.c
@@ -1965,7 +1965,6 @@ static struct clk_branch gcc_mss_q6_bimc_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_mss_q6_bimc_axi_clk",
- .flags = CLK_IS_ROOT,
.ops = &clk_branch2_ops,
},
},
diff --git a/drivers/clk/qcom/gcc-msm8996.c b/drivers/clk/qcom/gcc-msm8996.c
new file mode 100644
index 000000000000..a63680290219
--- /dev/null
+++ b/drivers/clk/qcom/gcc-msm8996.c
@@ -0,0 +1,3516 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/clock/qcom,gcc-msm8996.h>
+
+#include "common.h"
+#include "clk-regmap.h"
+#include "clk-alpha-pll.h"
+#include "clk-rcg.h"
+#include "clk-branch.h"
+#include "reset.h"
+#include "gdsc.h"
+
+#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
+
+enum {
+ P_XO,
+ P_GPLL0,
+ P_GPLL2,
+ P_GPLL3,
+ P_GPLL1,
+ P_GPLL2_EARLY,
+ P_GPLL0_EARLY_DIV,
+ P_SLEEP_CLK,
+ P_GPLL4,
+ P_AUD_REF_CLK,
+ P_GPLL1_EARLY_DIV
+};
+
+static const struct parent_map gcc_sleep_clk_map[] = {
+ { P_SLEEP_CLK, 5 }
+};
+
+static const char * const gcc_sleep_clk[] = {
+ "sleep_clk"
+};
+
+static const struct parent_map gcc_xo_gpll0_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 }
+};
+
+static const char * const gcc_xo_gpll0[] = {
+ "xo",
+ "gpll0"
+};
+
+static const struct parent_map gcc_xo_sleep_clk_map[] = {
+ { P_XO, 0 },
+ { P_SLEEP_CLK, 5 }
+};
+
+static const char * const gcc_xo_sleep_clk[] = {
+ "xo",
+ "sleep_clk"
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll0_early_div_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL0_EARLY_DIV, 6 }
+};
+
+static const char * const gcc_xo_gpll0_gpll0_early_div[] = {
+ "xo",
+ "gpll0",
+ "gpll0_early_div"
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll4_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL4, 5 }
+};
+
+static const char * const gcc_xo_gpll0_gpll4[] = {
+ "xo",
+ "gpll0",
+ "gpll4"
+};
+
+static const struct parent_map gcc_xo_gpll0_aud_ref_clk_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_AUD_REF_CLK, 2 }
+};
+
+static const char * const gcc_xo_gpll0_aud_ref_clk[] = {
+ "xo",
+ "gpll0",
+ "aud_ref_clk"
+};
+
+static const struct parent_map gcc_xo_gpll0_sleep_clk_gpll0_early_div_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_SLEEP_CLK, 5 },
+ { P_GPLL0_EARLY_DIV, 6 }
+};
+
+static const char * const gcc_xo_gpll0_sleep_clk_gpll0_early_div[] = {
+ "xo",
+ "gpll0",
+ "sleep_clk",
+ "gpll0_early_div"
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll4_gpll0_early_div_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL4, 5 },
+ { P_GPLL0_EARLY_DIV, 6 }
+};
+
+static const char * const gcc_xo_gpll0_gpll4_gpll0_early_div[] = {
+ "xo",
+ "gpll0",
+ "gpll4",
+ "gpll0_early_div"
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll2_gpll3_gpll0_early_div_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL2, 2 },
+ { P_GPLL3, 3 },
+ { P_GPLL0_EARLY_DIV, 6 }
+};
+
+static const char * const gcc_xo_gpll0_gpll2_gpll3_gpll0_early_div[] = {
+ "xo",
+ "gpll0",
+ "gpll2",
+ "gpll3",
+ "gpll0_early_div"
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll1_early_div_gpll1_gpll4_gpll0_early_div_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL1_EARLY_DIV, 3 },
+ { P_GPLL1, 4 },
+ { P_GPLL4, 5 },
+ { P_GPLL0_EARLY_DIV, 6 }
+};
+
+static const char * const gcc_xo_gpll0_gpll1_early_div_gpll1_gpll4_gpll0_early_div[] = {
+ "xo",
+ "gpll0",
+ "gpll1_early_div",
+ "gpll1",
+ "gpll4",
+ "gpll0_early_div"
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll2_gpll3_gpll1_gpll2_early_gpll0_early_div_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL2, 2 },
+ { P_GPLL3, 3 },
+ { P_GPLL1, 4 },
+ { P_GPLL2_EARLY, 5 },
+ { P_GPLL0_EARLY_DIV, 6 }
+};
+
+static const char * const gcc_xo_gpll0_gpll2_gpll3_gpll1_gpll2_early_gpll0_early_div[] = {
+ "xo",
+ "gpll0",
+ "gpll2",
+ "gpll3",
+ "gpll1",
+ "gpll2_early",
+ "gpll0_early_div"
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll2_gpll3_gpll1_gpll4_gpll0_early_div_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL2, 2 },
+ { P_GPLL3, 3 },
+ { P_GPLL1, 4 },
+ { P_GPLL4, 5 },
+ { P_GPLL0_EARLY_DIV, 6 }
+};
+
+static const char * const gcc_xo_gpll0_gpll2_gpll3_gpll1_gpll4_gpll0_early_div[] = {
+ "xo",
+ "gpll0",
+ "gpll2",
+ "gpll3",
+ "gpll1",
+ "gpll4",
+ "gpll0_early_div"
+};
+
+static struct clk_fixed_factor xo = {
+ .mult = 1,
+ .div = 1,
+ .hw.init = &(struct clk_init_data){
+ .name = "xo",
+ .parent_names = (const char *[]){ "xo_board" },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll0_early = {
+ .offset = 0x00000,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll0_early",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static struct clk_fixed_factor gpll0_early_div = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll0_early_div",
+ .parent_names = (const char *[]){ "gpll0_early" },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll0 = {
+ .offset = 0x00000,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll0",
+ .parent_names = (const char *[]){ "gpll0_early" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll4_early = {
+ .offset = 0x77000,
+ .clkr = {
+ .enable_reg = 0x52000,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll4_early",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll4 = {
+ .offset = 0x77000,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll4",
+ .parent_names = (const char *[]){ "gpll4_early" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_system_noc_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(50000000, P_GPLL0_EARLY_DIV, 6, 0, 0),
+ F(100000000, P_GPLL0, 6, 0, 0),
+ F(150000000, P_GPLL0, 4, 0, 0),
+ F(200000000, P_GPLL0, 3, 0, 0),
+ F(240000000, P_GPLL0, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 system_noc_clk_src = {
+ .cmd_rcgr = 0x0401c,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll2_gpll3_gpll1_gpll2_early_gpll0_early_div_map,
+ .freq_tbl = ftbl_system_noc_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "system_noc_clk_src",
+ .parent_names = gcc_xo_gpll0_gpll2_gpll3_gpll1_gpll2_early_gpll0_early_div,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_config_noc_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(37500000, P_GPLL0, 16, 0, 0),
+ F(75000000, P_GPLL0, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 config_noc_clk_src = {
+ .cmd_rcgr = 0x0500c,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_config_noc_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "config_noc_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_periph_noc_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(37500000, P_GPLL0, 16, 0, 0),
+ F(50000000, P_GPLL0, 12, 0, 0),
+ F(75000000, P_GPLL0, 8, 0, 0),
+ F(100000000, P_GPLL0, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 periph_noc_clk_src = {
+ .cmd_rcgr = 0x06014,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_periph_noc_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "periph_noc_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_usb30_master_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(120000000, P_GPLL0, 5, 0, 0),
+ F(150000000, P_GPLL0, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 usb30_master_clk_src = {
+ .cmd_rcgr = 0x0f014,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_early_div_map,
+ .freq_tbl = ftbl_usb30_master_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "usb30_master_clk_src",
+ .parent_names = gcc_xo_gpll0_gpll0_early_div,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_usb30_mock_utmi_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 usb30_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x0f028,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_early_div_map,
+ .freq_tbl = ftbl_usb30_mock_utmi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "usb30_mock_utmi_clk_src",
+ .parent_names = gcc_xo_gpll0_gpll0_early_div,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_usb3_phy_aux_clk_src[] = {
+ F(1200000, P_XO, 16, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 usb3_phy_aux_clk_src = {
+ .cmd_rcgr = 0x5000c,
+ .hid_width = 5,
+ .parent_map = gcc_xo_sleep_clk_map,
+ .freq_tbl = ftbl_usb3_phy_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "usb3_phy_aux_clk_src",
+ .parent_names = gcc_xo_sleep_clk,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_usb20_master_clk_src[] = {
+ F(120000000, P_GPLL0, 5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 usb20_master_clk_src = {
+ .cmd_rcgr = 0x12010,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_early_div_map,
+ .freq_tbl = ftbl_usb20_master_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "usb20_master_clk_src",
+ .parent_names = gcc_xo_gpll0_gpll0_early_div,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 usb20_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x12024,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll0_early_div_map,
+ .freq_tbl = ftbl_usb30_mock_utmi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "usb20_mock_utmi_clk_src",
+ .parent_names = gcc_xo_gpll0_gpll0_early_div,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_sdcc1_apps_clk_src[] = {
+ F(144000, P_XO, 16, 3, 25),
+ F(400000, P_XO, 12, 1, 4),
+ F(20000000, P_GPLL0, 15, 1, 2),
+ F(25000000, P_GPLL0, 12, 1, 2),
+ F(50000000, P_GPLL0, 12, 0, 0),
+ F(96000000, P_GPLL4, 4, 0, 0),
+ F(192000000, P_GPLL4, 2, 0, 0),
+ F(384000000, P_GPLL4, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 sdcc1_apps_clk_src = {
+ .cmd_rcgr = 0x13010,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll4_gpll0_early_div_map,
+ .freq_tbl = ftbl_sdcc1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "sdcc1_apps_clk_src",
+ .parent_names = gcc_xo_gpll0_gpll4_gpll0_early_div,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 sdcc1_ice_core_clk_src = {
+ .cmd_rcgr = 0x13024,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll4_gpll0_early_div_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "sdcc1_ice_core_clk_src",
+ .parent_names = gcc_xo_gpll0_gpll4_gpll0_early_div,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_sdcc2_apps_clk_src[] = {
+ F(144000, P_XO, 16, 3, 25),
+ F(400000, P_XO, 12, 1, 4),
+ F(20000000, P_GPLL0, 15, 1, 2),
+ F(25000000, P_GPLL0, 12, 1, 2),
+ F(50000000, P_GPLL0, 12, 0, 0),
+ F(100000000, P_GPLL0, 6, 0, 0),
+ F(200000000, P_GPLL0, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 sdcc2_apps_clk_src = {
+ .cmd_rcgr = 0x14010,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll4_map,
+ .freq_tbl = ftbl_sdcc2_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "sdcc2_apps_clk_src",
+ .parent_names = gcc_xo_gpll0_gpll4,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 sdcc3_apps_clk_src = {
+ .cmd_rcgr = 0x15010,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll4_map,
+ .freq_tbl = ftbl_sdcc2_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "sdcc3_apps_clk_src",
+ .parent_names = gcc_xo_gpll0_gpll4,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_sdcc4_apps_clk_src[] = {
+ F(144000, P_XO, 16, 3, 25),
+ F(400000, P_XO, 12, 1, 4),
+ F(20000000, P_GPLL0, 15, 1, 2),
+ F(25000000, P_GPLL0, 12, 1, 2),
+ F(50000000, P_GPLL0, 12, 0, 0),
+ F(100000000, P_GPLL0, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 sdcc4_apps_clk_src = {
+ .cmd_rcgr = 0x16010,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_sdcc4_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "sdcc4_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_blsp1_qup1_spi_apps_clk_src[] = {
+ F(960000, P_XO, 10, 1, 2),
+ F(4800000, P_XO, 4, 0, 0),
+ F(9600000, P_XO, 2, 0, 0),
+ F(15000000, P_GPLL0, 10, 1, 4),
+ F(19200000, P_XO, 1, 0, 0),
+ F(25000000, P_GPLL0, 12, 1, 2),
+ F(50000000, P_GPLL0, 12, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 blsp1_qup1_spi_apps_clk_src = {
+ .cmd_rcgr = 0x1900c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup1_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_blsp1_qup1_i2c_apps_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(50000000, P_GPLL0, 12, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 blsp1_qup1_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x19020,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup1_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_blsp1_uart1_apps_clk_src[] = {
+ F(3686400, P_GPLL0, 1, 96, 15625),
+ F(7372800, P_GPLL0, 1, 192, 15625),
+ F(14745600, P_GPLL0, 1, 384, 15625),
+ F(16000000, P_GPLL0, 5, 2, 15),
+ F(19200000, P_XO, 1, 0, 0),
+ F(24000000, P_GPLL0, 5, 1, 5),
+ F(32000000, P_GPLL0, 1, 4, 75),
+ F(40000000, P_GPLL0, 15, 0, 0),
+ F(46400000, P_GPLL0, 1, 29, 375),
+ F(48000000, P_GPLL0, 12.5, 0, 0),
+ F(51200000, P_GPLL0, 1, 32, 375),
+ F(56000000, P_GPLL0, 1, 7, 75),
+ F(58982400, P_GPLL0, 1, 1536, 15625),
+ F(60000000, P_GPLL0, 10, 0, 0),
+ F(63157895, P_GPLL0, 9.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 blsp1_uart1_apps_clk_src = {
+ .cmd_rcgr = 0x1a00c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp1_uart1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_uart1_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup2_spi_apps_clk_src = {
+ .cmd_rcgr = 0x1b00c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup2_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup2_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x1b020,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup2_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_uart2_apps_clk_src = {
+ .cmd_rcgr = 0x1c00c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp1_uart1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_uart2_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup3_spi_apps_clk_src = {
+ .cmd_rcgr = 0x1d00c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup3_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup3_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x1d020,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup3_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_uart3_apps_clk_src = {
+ .cmd_rcgr = 0x1e00c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp1_uart1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_uart3_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup4_spi_apps_clk_src = {
+ .cmd_rcgr = 0x1f00c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup4_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup4_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x1f020,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup4_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_uart4_apps_clk_src = {
+ .cmd_rcgr = 0x2000c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp1_uart1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_uart4_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup5_spi_apps_clk_src = {
+ .cmd_rcgr = 0x2100c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup5_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup5_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x21020,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup5_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_uart5_apps_clk_src = {
+ .cmd_rcgr = 0x2200c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp1_uart1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_uart5_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup6_spi_apps_clk_src = {
+ .cmd_rcgr = 0x2300c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup6_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup6_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x23020,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup6_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_uart6_apps_clk_src = {
+ .cmd_rcgr = 0x2400c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp1_uart1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_uart6_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup1_spi_apps_clk_src = {
+ .cmd_rcgr = 0x2600c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup1_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup1_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x26020,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup1_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_uart1_apps_clk_src = {
+ .cmd_rcgr = 0x2700c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp1_uart1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_uart1_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup2_spi_apps_clk_src = {
+ .cmd_rcgr = 0x2800c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup2_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup2_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x28020,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup2_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_uart2_apps_clk_src = {
+ .cmd_rcgr = 0x2900c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp1_uart1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_uart2_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup3_spi_apps_clk_src = {
+ .cmd_rcgr = 0x2a00c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup3_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup3_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x2a020,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup3_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_uart3_apps_clk_src = {
+ .cmd_rcgr = 0x2b00c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp1_uart1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_uart3_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup4_spi_apps_clk_src = {
+ .cmd_rcgr = 0x2c00c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup4_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup4_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x2c020,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup4_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_uart4_apps_clk_src = {
+ .cmd_rcgr = 0x2d00c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp1_uart1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_uart4_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup5_spi_apps_clk_src = {
+ .cmd_rcgr = 0x2e00c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup5_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup5_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x2e020,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup5_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_uart5_apps_clk_src = {
+ .cmd_rcgr = 0x2f00c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp1_uart1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_uart5_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup6_spi_apps_clk_src = {
+ .cmd_rcgr = 0x3000c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup6_spi_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup6_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x30020,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup6_i2c_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_uart6_apps_clk_src = {
+ .cmd_rcgr = 0x3100c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_blsp1_uart1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_uart6_apps_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_pdm2_clk_src[] = {
+ F(60000000, P_GPLL0, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 pdm2_clk_src = {
+ .cmd_rcgr = 0x33010,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_pdm2_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pdm2_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_tsif_ref_clk_src[] = {
+ F(105495, P_XO, 1, 1, 182),
+ { }
+};
+
+static struct clk_rcg2 tsif_ref_clk_src = {
+ .cmd_rcgr = 0x36010,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_aud_ref_clk_map,
+ .freq_tbl = ftbl_tsif_ref_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "tsif_ref_clk_src",
+ .parent_names = gcc_xo_gpll0_aud_ref_clk,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_sleep_clk_src = {
+ .cmd_rcgr = 0x43014,
+ .hid_width = 5,
+ .parent_map = gcc_sleep_clk_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sleep_clk_src",
+ .parent_names = gcc_sleep_clk,
+ .num_parents = 1,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 hmss_rbcpr_clk_src = {
+ .cmd_rcgr = 0x48040,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_usb30_mock_utmi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "hmss_rbcpr_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 hmss_gpll0_clk_src = {
+ .cmd_rcgr = 0x48058,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "hmss_gpll0_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gp1_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(100000000, P_GPLL0, 6, 0, 0),
+ F(200000000, P_GPLL0, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gp1_clk_src = {
+ .cmd_rcgr = 0x64004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_sleep_clk_gpll0_early_div_map,
+ .freq_tbl = ftbl_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gp1_clk_src",
+ .parent_names = gcc_xo_gpll0_sleep_clk_gpll0_early_div,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gp2_clk_src = {
+ .cmd_rcgr = 0x65004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_sleep_clk_gpll0_early_div_map,
+ .freq_tbl = ftbl_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gp2_clk_src",
+ .parent_names = gcc_xo_gpll0_sleep_clk_gpll0_early_div,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gp3_clk_src = {
+ .cmd_rcgr = 0x66004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_sleep_clk_gpll0_early_div_map,
+ .freq_tbl = ftbl_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gp3_clk_src",
+ .parent_names = gcc_xo_gpll0_sleep_clk_gpll0_early_div,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_pcie_aux_clk_src[] = {
+ F(1010526, P_XO, 1, 1, 19),
+ { }
+};
+
+static struct clk_rcg2 pcie_aux_clk_src = {
+ .cmd_rcgr = 0x6c000,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_xo_sleep_clk_map,
+ .freq_tbl = ftbl_pcie_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pcie_aux_clk_src",
+ .parent_names = gcc_xo_sleep_clk,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_ufs_axi_clk_src[] = {
+ F(100000000, P_GPLL0, 6, 0, 0),
+ F(200000000, P_GPLL0, 3, 0, 0),
+ F(240000000, P_GPLL0, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 ufs_axi_clk_src = {
+ .cmd_rcgr = 0x75024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .freq_tbl = ftbl_ufs_axi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "ufs_axi_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 ufs_ice_core_clk_src = {
+ .cmd_rcgr = 0x76014,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "ufs_ice_core_clk_src",
+ .parent_names = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 qspi_ser_clk_src = {
+ .cmd_rcgr = 0x8b00c,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_gpll1_early_div_gpll1_gpll4_gpll0_early_div_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "qspi_ser_clk_src",
+ .parent_names = gcc_xo_gpll0_gpll1_early_div_gpll1_gpll4_gpll0_early_div,
+ .num_parents = 6,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_sys_noc_usb3_axi_clk = {
+ .halt_reg = 0x0f03c,
+ .clkr = {
+ .enable_reg = 0x0f03c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sys_noc_usb3_axi_clk",
+ .parent_names = (const char *[]){ "usb30_master_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sys_noc_ufs_axi_clk = {
+ .halt_reg = 0x75038,
+ .clkr = {
+ .enable_reg = 0x75038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sys_noc_ufs_axi_clk",
+ .parent_names = (const char *[]){ "ufs_axi_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_periph_noc_usb20_ahb_clk = {
+ .halt_reg = 0x6010,
+ .clkr = {
+ .enable_reg = 0x6010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_periph_noc_usb20_ahb_clk",
+ .parent_names = (const char *[]){ "usb20_master_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mmss_noc_cfg_ahb_clk = {
+ .halt_reg = 0x9008,
+ .clkr = {
+ .enable_reg = 0x9008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mmss_noc_cfg_ahb_clk",
+ .parent_names = (const char *[]){ "config_noc_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mmss_bimc_gfx_clk = {
+ .halt_reg = 0x9010,
+ .clkr = {
+ .enable_reg = 0x9010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mmss_bimc_gfx_clk",
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_master_clk = {
+ .halt_reg = 0x0f008,
+ .clkr = {
+ .enable_reg = 0x0f008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_master_clk",
+ .parent_names = (const char *[]){ "usb30_master_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sleep_clk = {
+ .halt_reg = 0x0f00c,
+ .clkr = {
+ .enable_reg = 0x0f00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sleep_clk",
+ .parent_names = (const char *[]){ "gcc_sleep_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_mock_utmi_clk = {
+ .halt_reg = 0x0f010,
+ .clkr = {
+ .enable_reg = 0x0f010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_mock_utmi_clk",
+ .parent_names = (const char *[]){ "usb30_mock_utmi_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_phy_aux_clk = {
+ .halt_reg = 0x50000,
+ .clkr = {
+ .enable_reg = 0x50000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_phy_aux_clk",
+ .parent_names = (const char *[]){ "usb3_phy_aux_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_phy_pipe_clk = {
+ .halt_reg = 0x50004,
+ .clkr = {
+ .enable_reg = 0x50004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_phy_pipe_clk",
+ .parent_names = (const char *[]){ "usb3_phy_pipe_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb20_master_clk = {
+ .halt_reg = 0x12004,
+ .clkr = {
+ .enable_reg = 0x12004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb20_master_clk",
+ .parent_names = (const char *[]){ "usb20_master_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb20_sleep_clk = {
+ .halt_reg = 0x12008,
+ .clkr = {
+ .enable_reg = 0x12008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb20_sleep_clk",
+ .parent_names = (const char *[]){ "gcc_sleep_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb20_mock_utmi_clk = {
+ .halt_reg = 0x1200c,
+ .clkr = {
+ .enable_reg = 0x1200c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb20_mock_utmi_clk",
+ .parent_names = (const char *[]){ "usb20_mock_utmi_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_phy_cfg_ahb2phy_clk = {
+ .halt_reg = 0x6a004,
+ .clkr = {
+ .enable_reg = 0x6a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb_phy_cfg_ahb2phy_clk",
+ .parent_names = (const char *[]){ "periph_noc_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_apps_clk = {
+ .halt_reg = 0x13004,
+ .clkr = {
+ .enable_reg = 0x13004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_apps_clk",
+ .parent_names = (const char *[]){ "sdcc1_apps_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ahb_clk = {
+ .halt_reg = 0x13008,
+ .clkr = {
+ .enable_reg = 0x13008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_ahb_clk",
+ .parent_names = (const char *[]){ "periph_noc_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ice_core_clk = {
+ .halt_reg = 0x13038,
+ .clkr = {
+ .enable_reg = 0x13038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_ice_core_clk",
+ .parent_names = (const char *[]){ "sdcc1_ice_core_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_apps_clk = {
+ .halt_reg = 0x14004,
+ .clkr = {
+ .enable_reg = 0x14004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_apps_clk",
+ .parent_names = (const char *[]){ "sdcc2_apps_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_ahb_clk = {
+ .halt_reg = 0x14008,
+ .clkr = {
+ .enable_reg = 0x14008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_ahb_clk",
+ .parent_names = (const char *[]){ "periph_noc_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc3_apps_clk = {
+ .halt_reg = 0x15004,
+ .clkr = {
+ .enable_reg = 0x15004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc3_apps_clk",
+ .parent_names = (const char *[]){ "sdcc3_apps_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc3_ahb_clk = {
+ .halt_reg = 0x15008,
+ .clkr = {
+ .enable_reg = 0x15008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc3_ahb_clk",
+ .parent_names = (const char *[]){ "periph_noc_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc4_apps_clk = {
+ .halt_reg = 0x16004,
+ .clkr = {
+ .enable_reg = 0x16004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc4_apps_clk",
+ .parent_names = (const char *[]){ "sdcc4_apps_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc4_ahb_clk = {
+ .halt_reg = 0x16008,
+ .clkr = {
+ .enable_reg = 0x16008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc4_ahb_clk",
+ .parent_names = (const char *[]){ "periph_noc_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_ahb_clk = {
+ .halt_reg = 0x17004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(17),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_ahb_clk",
+ .parent_names = (const char *[]){ "periph_noc_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_sleep_clk = {
+ .halt_reg = 0x17008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(16),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_sleep_clk",
+ .parent_names = (const char *[]){ "gcc_sleep_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup1_spi_apps_clk = {
+ .halt_reg = 0x19004,
+ .clkr = {
+ .enable_reg = 0x19004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup1_spi_apps_clk",
+ .parent_names = (const char *[]){ "blsp1_qup1_spi_apps_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup1_i2c_apps_clk = {
+ .halt_reg = 0x19008,
+ .clkr = {
+ .enable_reg = 0x19008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup1_i2c_apps_clk",
+ .parent_names = (const char *[]){ "blsp1_qup1_i2c_apps_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart1_apps_clk = {
+ .halt_reg = 0x1a004,
+ .clkr = {
+ .enable_reg = 0x1a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart1_apps_clk",
+ .parent_names = (const char *[]){ "blsp1_uart1_apps_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = {
+ .halt_reg = 0x1b004,
+ .clkr = {
+ .enable_reg = 0x1b004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup2_spi_apps_clk",
+ .parent_names = (const char *[]){ "blsp1_qup2_spi_apps_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup2_i2c_apps_clk = {
+ .halt_reg = 0x1b008,
+ .clkr = {
+ .enable_reg = 0x1b008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup2_i2c_apps_clk",
+ .parent_names = (const char *[]){ "blsp1_qup2_i2c_apps_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart2_apps_clk = {
+ .halt_reg = 0x1c004,
+ .clkr = {
+ .enable_reg = 0x1c004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart2_apps_clk",
+ .parent_names = (const char *[]){ "blsp1_uart2_apps_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup3_spi_apps_clk = {
+ .halt_reg = 0x1d004,
+ .clkr = {
+ .enable_reg = 0x1d004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup3_spi_apps_clk",
+ .parent_names = (const char *[]){ "blsp1_qup3_spi_apps_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup3_i2c_apps_clk = {
+ .halt_reg = 0x1d008,
+ .clkr = {
+ .enable_reg = 0x1d008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup3_i2c_apps_clk",
+ .parent_names = (const char *[]){ "blsp1_qup3_i2c_apps_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart3_apps_clk = {
+ .halt_reg = 0x1e004,
+ .clkr = {
+ .enable_reg = 0x1e004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart3_apps_clk",
+ .parent_names = (const char *[]){ "blsp1_uart3_apps_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup4_spi_apps_clk = {
+ .halt_reg = 0x1f004,
+ .clkr = {
+ .enable_reg = 0x1f004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup4_spi_apps_clk",
+ .parent_names = (const char *[]){ "blsp1_qup4_spi_apps_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup4_i2c_apps_clk = {
+ .halt_reg = 0x1f008,
+ .clkr = {
+ .enable_reg = 0x1f008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup4_i2c_apps_clk",
+ .parent_names = (const char *[]){ "blsp1_qup4_i2c_apps_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart4_apps_clk = {
+ .halt_reg = 0x20004,
+ .clkr = {
+ .enable_reg = 0x20004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart4_apps_clk",
+ .parent_names = (const char *[]){ "blsp1_uart4_apps_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup5_spi_apps_clk = {
+ .halt_reg = 0x21004,
+ .clkr = {
+ .enable_reg = 0x21004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup5_spi_apps_clk",
+ .parent_names = (const char *[]){ "blsp1_qup5_spi_apps_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup5_i2c_apps_clk = {
+ .halt_reg = 0x21008,
+ .clkr = {
+ .enable_reg = 0x21008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup5_i2c_apps_clk",
+ .parent_names = (const char *[]){ "blsp1_qup5_i2c_apps_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart5_apps_clk = {
+ .halt_reg = 0x22004,
+ .clkr = {
+ .enable_reg = 0x22004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart5_apps_clk",
+ .parent_names = (const char *[]){ "blsp1_uart5_apps_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup6_spi_apps_clk = {
+ .halt_reg = 0x23004,
+ .clkr = {
+ .enable_reg = 0x23004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup6_spi_apps_clk",
+ .parent_names = (const char *[]){ "blsp1_qup6_spi_apps_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup6_i2c_apps_clk = {
+ .halt_reg = 0x23008,
+ .clkr = {
+ .enable_reg = 0x23008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup6_i2c_apps_clk",
+ .parent_names = (const char *[]){ "blsp1_qup6_i2c_apps_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart6_apps_clk = {
+ .halt_reg = 0x24004,
+ .clkr = {
+ .enable_reg = 0x24004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart6_apps_clk",
+ .parent_names = (const char *[]){ "blsp1_uart6_apps_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_ahb_clk = {
+ .halt_reg = 0x25004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_ahb_clk",
+ .parent_names = (const char *[]){ "periph_noc_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_sleep_clk = {
+ .halt_reg = 0x25008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(14),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_sleep_clk",
+ .parent_names = (const char *[]){ "gcc_sleep_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup1_spi_apps_clk = {
+ .halt_reg = 0x26004,
+ .clkr = {
+ .enable_reg = 0x26004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup1_spi_apps_clk",
+ .parent_names = (const char *[]){ "blsp2_qup1_spi_apps_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup1_i2c_apps_clk = {
+ .halt_reg = 0x26008,
+ .clkr = {
+ .enable_reg = 0x26008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup1_i2c_apps_clk",
+ .parent_names = (const char *[]){ "blsp2_qup1_i2c_apps_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_uart1_apps_clk = {
+ .halt_reg = 0x27004,
+ .clkr = {
+ .enable_reg = 0x27004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_uart1_apps_clk",
+ .parent_names = (const char *[]){ "blsp2_uart1_apps_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup2_spi_apps_clk = {
+ .halt_reg = 0x28004,
+ .clkr = {
+ .enable_reg = 0x28004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup2_spi_apps_clk",
+ .parent_names = (const char *[]){ "blsp2_qup2_spi_apps_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup2_i2c_apps_clk = {
+ .halt_reg = 0x28008,
+ .clkr = {
+ .enable_reg = 0x28008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup2_i2c_apps_clk",
+ .parent_names = (const char *[]){ "blsp2_qup2_i2c_apps_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_uart2_apps_clk = {
+ .halt_reg = 0x29004,
+ .clkr = {
+ .enable_reg = 0x29004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_uart2_apps_clk",
+ .parent_names = (const char *[]){ "blsp2_uart2_apps_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup3_spi_apps_clk = {
+ .halt_reg = 0x2a004,
+ .clkr = {
+ .enable_reg = 0x2a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup3_spi_apps_clk",
+ .parent_names = (const char *[]){ "blsp2_qup3_spi_apps_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup3_i2c_apps_clk = {
+ .halt_reg = 0x2a008,
+ .clkr = {
+ .enable_reg = 0x2a008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup3_i2c_apps_clk",
+ .parent_names = (const char *[]){ "blsp2_qup3_i2c_apps_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_uart3_apps_clk = {
+ .halt_reg = 0x2b004,
+ .clkr = {
+ .enable_reg = 0x2b004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_uart3_apps_clk",
+ .parent_names = (const char *[]){ "blsp2_uart3_apps_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup4_spi_apps_clk = {
+ .halt_reg = 0x2c004,
+ .clkr = {
+ .enable_reg = 0x2c004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup4_spi_apps_clk",
+ .parent_names = (const char *[]){ "blsp2_qup4_spi_apps_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup4_i2c_apps_clk = {
+ .halt_reg = 0x2c008,
+ .clkr = {
+ .enable_reg = 0x2c008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup4_i2c_apps_clk",
+ .parent_names = (const char *[]){ "blsp2_qup4_i2c_apps_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_uart4_apps_clk = {
+ .halt_reg = 0x2d004,
+ .clkr = {
+ .enable_reg = 0x2d004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_uart4_apps_clk",
+ .parent_names = (const char *[]){ "blsp2_uart4_apps_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup5_spi_apps_clk = {
+ .halt_reg = 0x2e004,
+ .clkr = {
+ .enable_reg = 0x2e004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup5_spi_apps_clk",
+ .parent_names = (const char *[]){ "blsp2_qup5_spi_apps_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup5_i2c_apps_clk = {
+ .halt_reg = 0x2e008,
+ .clkr = {
+ .enable_reg = 0x2e008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup5_i2c_apps_clk",
+ .parent_names = (const char *[]){ "blsp2_qup5_i2c_apps_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_uart5_apps_clk = {
+ .halt_reg = 0x2f004,
+ .clkr = {
+ .enable_reg = 0x2f004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_uart5_apps_clk",
+ .parent_names = (const char *[]){ "blsp2_uart5_apps_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup6_spi_apps_clk = {
+ .halt_reg = 0x30004,
+ .clkr = {
+ .enable_reg = 0x30004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup6_spi_apps_clk",
+ .parent_names = (const char *[]){ "blsp2_qup6_spi_apps_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup6_i2c_apps_clk = {
+ .halt_reg = 0x30008,
+ .clkr = {
+ .enable_reg = 0x30008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup6_i2c_apps_clk",
+ .parent_names = (const char *[]){ "blsp2_qup6_i2c_apps_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_uart6_apps_clk = {
+ .halt_reg = 0x31004,
+ .clkr = {
+ .enable_reg = 0x31004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_uart6_apps_clk",
+ .parent_names = (const char *[]){ "blsp2_uart6_apps_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x33004,
+ .clkr = {
+ .enable_reg = 0x33004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_ahb_clk",
+ .parent_names = (const char *[]){ "periph_noc_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x3300c,
+ .clkr = {
+ .enable_reg = 0x3300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk",
+ .parent_names = (const char *[]){ "pdm2_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_prng_ahb_clk = {
+ .halt_reg = 0x34004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_prng_ahb_clk",
+ .parent_names = (const char *[]){ "config_noc_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_tsif_ahb_clk = {
+ .halt_reg = 0x36004,
+ .clkr = {
+ .enable_reg = 0x36004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_tsif_ahb_clk",
+ .parent_names = (const char *[]){ "periph_noc_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_tsif_ref_clk = {
+ .halt_reg = 0x36008,
+ .clkr = {
+ .enable_reg = 0x36008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_tsif_ref_clk",
+ .parent_names = (const char *[]){ "tsif_ref_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_tsif_inactivity_timers_clk = {
+ .halt_reg = 0x3600c,
+ .clkr = {
+ .enable_reg = 0x3600c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_tsif_inactivity_timers_clk",
+ .parent_names = (const char *[]){ "gcc_sleep_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x38004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x52004,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_boot_rom_ahb_clk",
+ .parent_names = (const char *[]){ "config_noc_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_bimc_gfx_clk = {
+ .halt_reg = 0x46018,
+ .clkr = {
+ .enable_reg = 0x46018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_bimc_gfx_clk",
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_hmss_rbcpr_clk = {
+ .halt_reg = 0x4800c,
+ .clkr = {
+ .enable_reg = 0x4800c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_hmss_rbcpr_clk",
+ .parent_names = (const char *[]){ "hmss_rbcpr_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x64000,
+ .clkr = {
+ .enable_reg = 0x64000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk",
+ .parent_names = (const char *[]){ "gp1_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x65000,
+ .clkr = {
+ .enable_reg = 0x65000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk",
+ .parent_names = (const char *[]){ "gp2_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x66000,
+ .clkr = {
+ .enable_reg = 0x66000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk",
+ .parent_names = (const char *[]){ "gp3_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_axi_clk = {
+ .halt_reg = 0x6b008,
+ .clkr = {
+ .enable_reg = 0x6b008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_slv_axi_clk",
+ .parent_names = (const char *[]){ "system_noc_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_mstr_axi_clk = {
+ .halt_reg = 0x6b00c,
+ .clkr = {
+ .enable_reg = 0x6b00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_mstr_axi_clk",
+ .parent_names = (const char *[]){ "system_noc_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_cfg_ahb_clk = {
+ .halt_reg = 0x6b010,
+ .clkr = {
+ .enable_reg = 0x6b010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_cfg_ahb_clk",
+ .parent_names = (const char *[]){ "config_noc_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_aux_clk = {
+ .halt_reg = 0x6b014,
+ .clkr = {
+ .enable_reg = 0x6b014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_aux_clk",
+ .parent_names = (const char *[]){ "pcie_aux_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_pipe_clk = {
+ .halt_reg = 0x6b018,
+ .clkr = {
+ .enable_reg = 0x6b018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_pipe_clk",
+ .parent_names = (const char *[]){ "pcie_0_pipe_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_slv_axi_clk = {
+ .halt_reg = 0x6d008,
+ .clkr = {
+ .enable_reg = 0x6d008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_slv_axi_clk",
+ .parent_names = (const char *[]){ "system_noc_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_mstr_axi_clk = {
+ .halt_reg = 0x6d00c,
+ .clkr = {
+ .enable_reg = 0x6d00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_mstr_axi_clk",
+ .parent_names = (const char *[]){ "system_noc_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_cfg_ahb_clk = {
+ .halt_reg = 0x6d010,
+ .clkr = {
+ .enable_reg = 0x6d010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_cfg_ahb_clk",
+ .parent_names = (const char *[]){ "config_noc_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_aux_clk = {
+ .halt_reg = 0x6d014,
+ .clkr = {
+ .enable_reg = 0x6d014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_aux_clk",
+ .parent_names = (const char *[]){ "pcie_aux_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_pipe_clk = {
+ .halt_reg = 0x6d018,
+ .clkr = {
+ .enable_reg = 0x6d018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_pipe_clk",
+ .parent_names = (const char *[]){ "pcie_1_pipe_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_slv_axi_clk = {
+ .halt_reg = 0x6e008,
+ .clkr = {
+ .enable_reg = 0x6e008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_2_slv_axi_clk",
+ .parent_names = (const char *[]){ "system_noc_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_mstr_axi_clk = {
+ .halt_reg = 0x6e00c,
+ .clkr = {
+ .enable_reg = 0x6e00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_2_mstr_axi_clk",
+ .parent_names = (const char *[]){ "system_noc_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_cfg_ahb_clk = {
+ .halt_reg = 0x6e010,
+ .clkr = {
+ .enable_reg = 0x6e010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_2_cfg_ahb_clk",
+ .parent_names = (const char *[]){ "config_noc_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_aux_clk = {
+ .halt_reg = 0x6e014,
+ .clkr = {
+ .enable_reg = 0x6e014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_2_aux_clk",
+ .parent_names = (const char *[]){ "pcie_aux_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_pipe_clk = {
+ .halt_reg = 0x6e108,
+ .clkr = {
+ .enable_reg = 0x6e108,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_2_pipe_clk",
+ .parent_names = (const char *[]){ "pcie_2_pipe_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_phy_cfg_ahb_clk = {
+ .halt_reg = 0x6f004,
+ .clkr = {
+ .enable_reg = 0x6f004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_phy_cfg_ahb_clk",
+ .parent_names = (const char *[]){ "config_noc_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_phy_aux_clk = {
+ .halt_reg = 0x6f008,
+ .clkr = {
+ .enable_reg = 0x6f008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_phy_aux_clk",
+ .parent_names = (const char *[]){ "pcie_aux_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_axi_clk = {
+ .halt_reg = 0x75008,
+ .clkr = {
+ .enable_reg = 0x75008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_axi_clk",
+ .parent_names = (const char *[]){ "ufs_axi_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_ahb_clk = {
+ .halt_reg = 0x7500c,
+ .clkr = {
+ .enable_reg = 0x7500c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_ahb_clk",
+ .parent_names = (const char *[]){ "config_noc_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_fixed_factor ufs_tx_cfg_clk_src = {
+ .mult = 1,
+ .div = 16,
+ .hw.init = &(struct clk_init_data){
+ .name = "ufs_tx_cfg_clk_src",
+ .parent_names = (const char *[]){ "ufs_axi_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static struct clk_branch gcc_ufs_tx_cfg_clk = {
+ .halt_reg = 0x75010,
+ .clkr = {
+ .enable_reg = 0x75010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_tx_cfg_clk",
+ .parent_names = (const char *[]){ "ufs_tx_cfg_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_fixed_factor ufs_rx_cfg_clk_src = {
+ .mult = 1,
+ .div = 16,
+ .hw.init = &(struct clk_init_data){
+ .name = "ufs_rx_cfg_clk_src",
+ .parent_names = (const char *[]){ "ufs_axi_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static struct clk_branch gcc_ufs_rx_cfg_clk = {
+ .halt_reg = 0x75014,
+ .clkr = {
+ .enable_reg = 0x75014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_rx_cfg_clk",
+ .parent_names = (const char *[]){ "ufs_rx_cfg_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_tx_symbol_0_clk = {
+ .halt_reg = 0x75018,
+ .clkr = {
+ .enable_reg = 0x75018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_tx_symbol_0_clk",
+ .parent_names = (const char *[]){ "ufs_tx_symbol_0_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_rx_symbol_0_clk = {
+ .halt_reg = 0x7501c,
+ .clkr = {
+ .enable_reg = 0x7501c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_rx_symbol_0_clk",
+ .parent_names = (const char *[]){ "ufs_rx_symbol_0_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_rx_symbol_1_clk = {
+ .halt_reg = 0x75020,
+ .clkr = {
+ .enable_reg = 0x75020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_rx_symbol_1_clk",
+ .parent_names = (const char *[]){ "ufs_rx_symbol_1_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_fixed_factor ufs_ice_core_postdiv_clk_src = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "ufs_ice_core_postdiv_clk_src",
+ .parent_names = (const char *[]){ "ufs_ice_core_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static struct clk_branch gcc_ufs_unipro_core_clk = {
+ .halt_reg = 0x7600c,
+ .clkr = {
+ .enable_reg = 0x7600c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_unipro_core_clk",
+ .parent_names = (const char *[]){ "ufs_ice_core_postdiv_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_ice_core_clk = {
+ .halt_reg = 0x76010,
+ .clkr = {
+ .enable_reg = 0x76010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_ice_core_clk",
+ .parent_names = (const char *[]){ "ufs_ice_core_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_sys_clk_core_clk = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x76030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_sys_clk_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_tx_symbol_clk_core_clk = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x76034,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_tx_symbol_clk_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre0_snoc_axi_clk = {
+ .halt_reg = 0x81008,
+ .clkr = {
+ .enable_reg = 0x81008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre0_snoc_axi_clk",
+ .parent_names = (const char *[]){ "system_noc_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre0_cnoc_ahb_clk = {
+ .halt_reg = 0x8100c,
+ .clkr = {
+ .enable_reg = 0x8100c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre0_cnoc_ahb_clk",
+ .parent_names = (const char *[]){ "config_noc_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_smmu_aggre0_axi_clk = {
+ .halt_reg = 0x81014,
+ .clkr = {
+ .enable_reg = 0x81014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_smmu_aggre0_axi_clk",
+ .parent_names = (const char *[]){ "system_noc_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_smmu_aggre0_ahb_clk = {
+ .halt_reg = 0x81018,
+ .clkr = {
+ .enable_reg = 0x81018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_smmu_aggre0_ahb_clk",
+ .parent_names = (const char *[]){ "config_noc_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre1_pnoc_ahb_clk = {
+ .halt_reg = 0x82014,
+ .clkr = {
+ .enable_reg = 0x82014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre1_pnoc_ahb_clk",
+ .parent_names = (const char *[]){ "periph_noc_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre2_ufs_axi_clk = {
+ .halt_reg = 0x83014,
+ .clkr = {
+ .enable_reg = 0x83014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre2_ufs_axi_clk",
+ .parent_names = (const char *[]){ "ufs_axi_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre2_usb3_axi_clk = {
+ .halt_reg = 0x83018,
+ .clkr = {
+ .enable_reg = 0x83018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre2_usb3_axi_clk",
+ .parent_names = (const char *[]){ "usb30_master_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qspi_ahb_clk = {
+ .halt_reg = 0x8b004,
+ .clkr = {
+ .enable_reg = 0x8b004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qspi_ahb_clk",
+ .parent_names = (const char *[]){ "periph_noc_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qspi_ser_clk = {
+ .halt_reg = 0x8b008,
+ .clkr = {
+ .enable_reg = 0x8b008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qspi_ser_clk",
+ .parent_names = (const char *[]){ "qspi_ser_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_clkref_clk = {
+ .halt_reg = 0x8800C,
+ .clkr = {
+ .enable_reg = 0x8800C,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_clkref_clk",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_hdmi_clkref_clk = {
+ .halt_reg = 0x88000,
+ .clkr = {
+ .enable_reg = 0x88000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_hdmi_clkref_clk",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_clkref_clk = {
+ .halt_reg = 0x88008,
+ .clkr = {
+ .enable_reg = 0x88008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_clkref_clk",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_clkref_clk = {
+ .halt_reg = 0x88010,
+ .clkr = {
+ .enable_reg = 0x88010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_clkref_clk",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_rx2_usb2_clkref_clk = {
+ .halt_reg = 0x88014,
+ .clkr = {
+ .enable_reg = 0x88014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_rx2_usb2_clkref_clk",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_rx1_usb2_clkref_clk = {
+ .halt_reg = 0x88018,
+ .clkr = {
+ .enable_reg = 0x88018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_rx1_usb2_clkref_clk",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_hw *gcc_msm8996_hws[] = {
+ &xo.hw,
+ &gpll0_early_div.hw,
+ &ufs_tx_cfg_clk_src.hw,
+ &ufs_rx_cfg_clk_src.hw,
+ &ufs_ice_core_postdiv_clk_src.hw,
+};
+
+static struct gdsc aggre0_noc_gdsc = {
+ .gdscr = 0x81004,
+ .gds_hw_ctrl = 0x81028,
+ .pd = {
+ .name = "aggre0_noc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_aggre0_noc_gdsc = {
+ .gdscr = 0x7d024,
+ .pd = {
+ .name = "hlos1_vote_aggre0_noc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_lpass_adsp_gdsc = {
+ .gdscr = 0x7d034,
+ .pd = {
+ .name = "hlos1_vote_lpass_adsp",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_lpass_core_gdsc = {
+ .gdscr = 0x7d038,
+ .pd = {
+ .name = "hlos1_vote_lpass_core",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc usb30_gdsc = {
+ .gdscr = 0xf004,
+ .pd = {
+ .name = "usb30",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc pcie0_gdsc = {
+ .gdscr = 0x6b004,
+ .pd = {
+ .name = "pcie0",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc pcie1_gdsc = {
+ .gdscr = 0x6d004,
+ .pd = {
+ .name = "pcie1",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc pcie2_gdsc = {
+ .gdscr = 0x6e004,
+ .pd = {
+ .name = "pcie2",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc ufs_gdsc = {
+ .gdscr = 0x75004,
+ .pd = {
+ .name = "ufs",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct clk_regmap *gcc_msm8996_clocks[] = {
+ [GPLL0_EARLY] = &gpll0_early.clkr,
+ [GPLL0] = &gpll0.clkr,
+ [GPLL4_EARLY] = &gpll4_early.clkr,
+ [GPLL4] = &gpll4.clkr,
+ [SYSTEM_NOC_CLK_SRC] = &system_noc_clk_src.clkr,
+ [CONFIG_NOC_CLK_SRC] = &config_noc_clk_src.clkr,
+ [PERIPH_NOC_CLK_SRC] = &periph_noc_clk_src.clkr,
+ [USB30_MASTER_CLK_SRC] = &usb30_master_clk_src.clkr,
+ [USB30_MOCK_UTMI_CLK_SRC] = &usb30_mock_utmi_clk_src.clkr,
+ [USB3_PHY_AUX_CLK_SRC] = &usb3_phy_aux_clk_src.clkr,
+ [USB20_MASTER_CLK_SRC] = &usb20_master_clk_src.clkr,
+ [USB20_MOCK_UTMI_CLK_SRC] = &usb20_mock_utmi_clk_src.clkr,
+ [SDCC1_APPS_CLK_SRC] = &sdcc1_apps_clk_src.clkr,
+ [SDCC1_ICE_CORE_CLK_SRC] = &sdcc1_ice_core_clk_src.clkr,
+ [SDCC2_APPS_CLK_SRC] = &sdcc2_apps_clk_src.clkr,
+ [SDCC3_APPS_CLK_SRC] = &sdcc3_apps_clk_src.clkr,
+ [SDCC4_APPS_CLK_SRC] = &sdcc4_apps_clk_src.clkr,
+ [BLSP1_QUP1_SPI_APPS_CLK_SRC] = &blsp1_qup1_spi_apps_clk_src.clkr,
+ [BLSP1_QUP1_I2C_APPS_CLK_SRC] = &blsp1_qup1_i2c_apps_clk_src.clkr,
+ [BLSP1_UART1_APPS_CLK_SRC] = &blsp1_uart1_apps_clk_src.clkr,
+ [BLSP1_QUP2_SPI_APPS_CLK_SRC] = &blsp1_qup2_spi_apps_clk_src.clkr,
+ [BLSP1_QUP2_I2C_APPS_CLK_SRC] = &blsp1_qup2_i2c_apps_clk_src.clkr,
+ [BLSP1_UART2_APPS_CLK_SRC] = &blsp1_uart2_apps_clk_src.clkr,
+ [BLSP1_QUP3_SPI_APPS_CLK_SRC] = &blsp1_qup3_spi_apps_clk_src.clkr,
+ [BLSP1_QUP3_I2C_APPS_CLK_SRC] = &blsp1_qup3_i2c_apps_clk_src.clkr,
+ [BLSP1_UART3_APPS_CLK_SRC] = &blsp1_uart3_apps_clk_src.clkr,
+ [BLSP1_QUP4_SPI_APPS_CLK_SRC] = &blsp1_qup4_spi_apps_clk_src.clkr,
+ [BLSP1_QUP4_I2C_APPS_CLK_SRC] = &blsp1_qup4_i2c_apps_clk_src.clkr,
+ [BLSP1_UART4_APPS_CLK_SRC] = &blsp1_uart4_apps_clk_src.clkr,
+ [BLSP1_QUP5_SPI_APPS_CLK_SRC] = &blsp1_qup5_spi_apps_clk_src.clkr,
+ [BLSP1_QUP5_I2C_APPS_CLK_SRC] = &blsp1_qup5_i2c_apps_clk_src.clkr,
+ [BLSP1_UART5_APPS_CLK_SRC] = &blsp1_uart5_apps_clk_src.clkr,
+ [BLSP1_QUP6_SPI_APPS_CLK_SRC] = &blsp1_qup6_spi_apps_clk_src.clkr,
+ [BLSP1_QUP6_I2C_APPS_CLK_SRC] = &blsp1_qup6_i2c_apps_clk_src.clkr,
+ [BLSP1_UART6_APPS_CLK_SRC] = &blsp1_uart6_apps_clk_src.clkr,
+ [BLSP2_QUP1_SPI_APPS_CLK_SRC] = &blsp2_qup1_spi_apps_clk_src.clkr,
+ [BLSP2_QUP1_I2C_APPS_CLK_SRC] = &blsp2_qup1_i2c_apps_clk_src.clkr,
+ [BLSP2_UART1_APPS_CLK_SRC] = &blsp2_uart1_apps_clk_src.clkr,
+ [BLSP2_QUP2_SPI_APPS_CLK_SRC] = &blsp2_qup2_spi_apps_clk_src.clkr,
+ [BLSP2_QUP2_I2C_APPS_CLK_SRC] = &blsp2_qup2_i2c_apps_clk_src.clkr,
+ [BLSP2_UART2_APPS_CLK_SRC] = &blsp2_uart2_apps_clk_src.clkr,
+ [BLSP2_QUP3_SPI_APPS_CLK_SRC] = &blsp2_qup3_spi_apps_clk_src.clkr,
+ [BLSP2_QUP3_I2C_APPS_CLK_SRC] = &blsp2_qup3_i2c_apps_clk_src.clkr,
+ [BLSP2_UART3_APPS_CLK_SRC] = &blsp2_uart3_apps_clk_src.clkr,
+ [BLSP2_QUP4_SPI_APPS_CLK_SRC] = &blsp2_qup4_spi_apps_clk_src.clkr,
+ [BLSP2_QUP4_I2C_APPS_CLK_SRC] = &blsp2_qup4_i2c_apps_clk_src.clkr,
+ [BLSP2_UART4_APPS_CLK_SRC] = &blsp2_uart4_apps_clk_src.clkr,
+ [BLSP2_QUP5_SPI_APPS_CLK_SRC] = &blsp2_qup5_spi_apps_clk_src.clkr,
+ [BLSP2_QUP5_I2C_APPS_CLK_SRC] = &blsp2_qup5_i2c_apps_clk_src.clkr,
+ [BLSP2_UART5_APPS_CLK_SRC] = &blsp2_uart5_apps_clk_src.clkr,
+ [BLSP2_QUP6_SPI_APPS_CLK_SRC] = &blsp2_qup6_spi_apps_clk_src.clkr,
+ [BLSP2_QUP6_I2C_APPS_CLK_SRC] = &blsp2_qup6_i2c_apps_clk_src.clkr,
+ [BLSP2_UART6_APPS_CLK_SRC] = &blsp2_uart6_apps_clk_src.clkr,
+ [PDM2_CLK_SRC] = &pdm2_clk_src.clkr,
+ [TSIF_REF_CLK_SRC] = &tsif_ref_clk_src.clkr,
+ [GCC_SLEEP_CLK_SRC] = &gcc_sleep_clk_src.clkr,
+ [HMSS_RBCPR_CLK_SRC] = &hmss_rbcpr_clk_src.clkr,
+ [HMSS_GPLL0_CLK_SRC] = &hmss_gpll0_clk_src.clkr,
+ [GP1_CLK_SRC] = &gp1_clk_src.clkr,
+ [GP2_CLK_SRC] = &gp2_clk_src.clkr,
+ [GP3_CLK_SRC] = &gp3_clk_src.clkr,
+ [PCIE_AUX_CLK_SRC] = &pcie_aux_clk_src.clkr,
+ [UFS_AXI_CLK_SRC] = &ufs_axi_clk_src.clkr,
+ [UFS_ICE_CORE_CLK_SRC] = &ufs_ice_core_clk_src.clkr,
+ [QSPI_SER_CLK_SRC] = &qspi_ser_clk_src.clkr,
+ [GCC_SYS_NOC_USB3_AXI_CLK] = &gcc_sys_noc_usb3_axi_clk.clkr,
+ [GCC_SYS_NOC_UFS_AXI_CLK] = &gcc_sys_noc_ufs_axi_clk.clkr,
+ [GCC_PERIPH_NOC_USB20_AHB_CLK] = &gcc_periph_noc_usb20_ahb_clk.clkr,
+ [GCC_MMSS_NOC_CFG_AHB_CLK] = &gcc_mmss_noc_cfg_ahb_clk.clkr,
+ [GCC_MMSS_BIMC_GFX_CLK] = &gcc_mmss_bimc_gfx_clk.clkr,
+ [GCC_USB30_MASTER_CLK] = &gcc_usb30_master_clk.clkr,
+ [GCC_USB30_SLEEP_CLK] = &gcc_usb30_sleep_clk.clkr,
+ [GCC_USB30_MOCK_UTMI_CLK] = &gcc_usb30_mock_utmi_clk.clkr,
+ [GCC_USB3_PHY_AUX_CLK] = &gcc_usb3_phy_aux_clk.clkr,
+ [GCC_USB3_PHY_PIPE_CLK] = &gcc_usb3_phy_pipe_clk.clkr,
+ [GCC_USB20_MASTER_CLK] = &gcc_usb20_master_clk.clkr,
+ [GCC_USB20_SLEEP_CLK] = &gcc_usb20_sleep_clk.clkr,
+ [GCC_USB20_MOCK_UTMI_CLK] = &gcc_usb20_mock_utmi_clk.clkr,
+ [GCC_USB_PHY_CFG_AHB2PHY_CLK] = &gcc_usb_phy_cfg_ahb2phy_clk.clkr,
+ [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+ [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK] = &gcc_sdcc1_ice_core_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+ [GCC_SDCC3_APPS_CLK] = &gcc_sdcc3_apps_clk.clkr,
+ [GCC_SDCC3_AHB_CLK] = &gcc_sdcc3_ahb_clk.clkr,
+ [GCC_SDCC4_APPS_CLK] = &gcc_sdcc4_apps_clk.clkr,
+ [GCC_SDCC4_AHB_CLK] = &gcc_sdcc4_ahb_clk.clkr,
+ [GCC_BLSP1_AHB_CLK] = &gcc_blsp1_ahb_clk.clkr,
+ [GCC_BLSP1_SLEEP_CLK] = &gcc_blsp1_sleep_clk.clkr,
+ [GCC_BLSP1_QUP1_SPI_APPS_CLK] = &gcc_blsp1_qup1_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP1_I2C_APPS_CLK] = &gcc_blsp1_qup1_i2c_apps_clk.clkr,
+ [GCC_BLSP1_UART1_APPS_CLK] = &gcc_blsp1_uart1_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_SPI_APPS_CLK] = &gcc_blsp1_qup2_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_I2C_APPS_CLK] = &gcc_blsp1_qup2_i2c_apps_clk.clkr,
+ [GCC_BLSP1_UART2_APPS_CLK] = &gcc_blsp1_uart2_apps_clk.clkr,
+ [GCC_BLSP1_QUP3_SPI_APPS_CLK] = &gcc_blsp1_qup3_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP3_I2C_APPS_CLK] = &gcc_blsp1_qup3_i2c_apps_clk.clkr,
+ [GCC_BLSP1_UART3_APPS_CLK] = &gcc_blsp1_uart3_apps_clk.clkr,
+ [GCC_BLSP1_QUP4_SPI_APPS_CLK] = &gcc_blsp1_qup4_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP4_I2C_APPS_CLK] = &gcc_blsp1_qup4_i2c_apps_clk.clkr,
+ [GCC_BLSP1_UART4_APPS_CLK] = &gcc_blsp1_uart4_apps_clk.clkr,
+ [GCC_BLSP1_QUP5_SPI_APPS_CLK] = &gcc_blsp1_qup5_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP5_I2C_APPS_CLK] = &gcc_blsp1_qup5_i2c_apps_clk.clkr,
+ [GCC_BLSP1_UART5_APPS_CLK] = &gcc_blsp1_uart5_apps_clk.clkr,
+ [GCC_BLSP1_QUP6_SPI_APPS_CLK] = &gcc_blsp1_qup6_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP6_I2C_APPS_CLK] = &gcc_blsp1_qup6_i2c_apps_clk.clkr,
+ [GCC_BLSP1_UART6_APPS_CLK] = &gcc_blsp1_uart6_apps_clk.clkr,
+ [GCC_BLSP2_AHB_CLK] = &gcc_blsp2_ahb_clk.clkr,
+ [GCC_BLSP2_SLEEP_CLK] = &gcc_blsp2_sleep_clk.clkr,
+ [GCC_BLSP2_QUP1_SPI_APPS_CLK] = &gcc_blsp2_qup1_spi_apps_clk.clkr,
+ [GCC_BLSP2_QUP1_I2C_APPS_CLK] = &gcc_blsp2_qup1_i2c_apps_clk.clkr,
+ [GCC_BLSP2_UART1_APPS_CLK] = &gcc_blsp2_uart1_apps_clk.clkr,
+ [GCC_BLSP2_QUP2_SPI_APPS_CLK] = &gcc_blsp2_qup2_spi_apps_clk.clkr,
+ [GCC_BLSP2_QUP2_I2C_APPS_CLK] = &gcc_blsp2_qup2_i2c_apps_clk.clkr,
+ [GCC_BLSP2_UART2_APPS_CLK] = &gcc_blsp2_uart2_apps_clk.clkr,
+ [GCC_BLSP2_QUP3_SPI_APPS_CLK] = &gcc_blsp2_qup3_spi_apps_clk.clkr,
+ [GCC_BLSP2_QUP3_I2C_APPS_CLK] = &gcc_blsp2_qup3_i2c_apps_clk.clkr,
+ [GCC_BLSP2_UART3_APPS_CLK] = &gcc_blsp2_uart3_apps_clk.clkr,
+ [GCC_BLSP2_QUP4_SPI_APPS_CLK] = &gcc_blsp2_qup4_spi_apps_clk.clkr,
+ [GCC_BLSP2_QUP4_I2C_APPS_CLK] = &gcc_blsp2_qup4_i2c_apps_clk.clkr,
+ [GCC_BLSP2_UART4_APPS_CLK] = &gcc_blsp2_uart4_apps_clk.clkr,
+ [GCC_BLSP2_QUP5_SPI_APPS_CLK] = &gcc_blsp2_qup5_spi_apps_clk.clkr,
+ [GCC_BLSP2_QUP5_I2C_APPS_CLK] = &gcc_blsp2_qup5_i2c_apps_clk.clkr,
+ [GCC_BLSP2_UART5_APPS_CLK] = &gcc_blsp2_uart5_apps_clk.clkr,
+ [GCC_BLSP2_QUP6_SPI_APPS_CLK] = &gcc_blsp2_qup6_spi_apps_clk.clkr,
+ [GCC_BLSP2_QUP6_I2C_APPS_CLK] = &gcc_blsp2_qup6_i2c_apps_clk.clkr,
+ [GCC_BLSP2_UART6_APPS_CLK] = &gcc_blsp2_uart6_apps_clk.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
+ [GCC_TSIF_AHB_CLK] = &gcc_tsif_ahb_clk.clkr,
+ [GCC_TSIF_REF_CLK] = &gcc_tsif_ref_clk.clkr,
+ [GCC_TSIF_INACTIVITY_TIMERS_CLK] = &gcc_tsif_inactivity_timers_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_BIMC_GFX_CLK] = &gcc_bimc_gfx_clk.clkr,
+ [GCC_HMSS_RBCPR_CLK] = &gcc_hmss_rbcpr_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_PCIE_0_SLV_AXI_CLK] = &gcc_pcie_0_slv_axi_clk.clkr,
+ [GCC_PCIE_0_MSTR_AXI_CLK] = &gcc_pcie_0_mstr_axi_clk.clkr,
+ [GCC_PCIE_0_CFG_AHB_CLK] = &gcc_pcie_0_cfg_ahb_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK] = &gcc_pcie_0_aux_clk.clkr,
+ [GCC_PCIE_0_PIPE_CLK] = &gcc_pcie_0_pipe_clk.clkr,
+ [GCC_PCIE_1_SLV_AXI_CLK] = &gcc_pcie_1_slv_axi_clk.clkr,
+ [GCC_PCIE_1_MSTR_AXI_CLK] = &gcc_pcie_1_mstr_axi_clk.clkr,
+ [GCC_PCIE_1_CFG_AHB_CLK] = &gcc_pcie_1_cfg_ahb_clk.clkr,
+ [GCC_PCIE_1_AUX_CLK] = &gcc_pcie_1_aux_clk.clkr,
+ [GCC_PCIE_1_PIPE_CLK] = &gcc_pcie_1_pipe_clk.clkr,
+ [GCC_PCIE_2_SLV_AXI_CLK] = &gcc_pcie_2_slv_axi_clk.clkr,
+ [GCC_PCIE_2_MSTR_AXI_CLK] = &gcc_pcie_2_mstr_axi_clk.clkr,
+ [GCC_PCIE_2_CFG_AHB_CLK] = &gcc_pcie_2_cfg_ahb_clk.clkr,
+ [GCC_PCIE_2_AUX_CLK] = &gcc_pcie_2_aux_clk.clkr,
+ [GCC_PCIE_2_PIPE_CLK] = &gcc_pcie_2_pipe_clk.clkr,
+ [GCC_PCIE_PHY_CFG_AHB_CLK] = &gcc_pcie_phy_cfg_ahb_clk.clkr,
+ [GCC_PCIE_PHY_AUX_CLK] = &gcc_pcie_phy_aux_clk.clkr,
+ [GCC_UFS_AXI_CLK] = &gcc_ufs_axi_clk.clkr,
+ [GCC_UFS_AHB_CLK] = &gcc_ufs_ahb_clk.clkr,
+ [GCC_UFS_TX_CFG_CLK] = &gcc_ufs_tx_cfg_clk.clkr,
+ [GCC_UFS_RX_CFG_CLK] = &gcc_ufs_rx_cfg_clk.clkr,
+ [GCC_UFS_TX_SYMBOL_0_CLK] = &gcc_ufs_tx_symbol_0_clk.clkr,
+ [GCC_UFS_RX_SYMBOL_0_CLK] = &gcc_ufs_rx_symbol_0_clk.clkr,
+ [GCC_UFS_RX_SYMBOL_1_CLK] = &gcc_ufs_rx_symbol_1_clk.clkr,
+ [GCC_UFS_UNIPRO_CORE_CLK] = &gcc_ufs_unipro_core_clk.clkr,
+ [GCC_UFS_ICE_CORE_CLK] = &gcc_ufs_ice_core_clk.clkr,
+ [GCC_UFS_SYS_CLK_CORE_CLK] = &gcc_ufs_sys_clk_core_clk.clkr,
+ [GCC_UFS_TX_SYMBOL_CLK_CORE_CLK] = &gcc_ufs_tx_symbol_clk_core_clk.clkr,
+ [GCC_AGGRE0_SNOC_AXI_CLK] = &gcc_aggre0_snoc_axi_clk.clkr,
+ [GCC_AGGRE0_CNOC_AHB_CLK] = &gcc_aggre0_cnoc_ahb_clk.clkr,
+ [GCC_SMMU_AGGRE0_AXI_CLK] = &gcc_smmu_aggre0_axi_clk.clkr,
+ [GCC_SMMU_AGGRE0_AHB_CLK] = &gcc_smmu_aggre0_ahb_clk.clkr,
+ [GCC_AGGRE1_PNOC_AHB_CLK] = &gcc_aggre1_pnoc_ahb_clk.clkr,
+ [GCC_AGGRE2_UFS_AXI_CLK] = &gcc_aggre2_ufs_axi_clk.clkr,
+ [GCC_AGGRE2_USB3_AXI_CLK] = &gcc_aggre2_usb3_axi_clk.clkr,
+ [GCC_QSPI_AHB_CLK] = &gcc_qspi_ahb_clk.clkr,
+ [GCC_QSPI_SER_CLK] = &gcc_qspi_ser_clk.clkr,
+ [GCC_USB3_CLKREF_CLK] = &gcc_usb3_clkref_clk.clkr,
+ [GCC_HDMI_CLKREF_CLK] = &gcc_hdmi_clkref_clk.clkr,
+ [GCC_UFS_CLKREF_CLK] = &gcc_ufs_clkref_clk.clkr,
+ [GCC_PCIE_CLKREF_CLK] = &gcc_pcie_clkref_clk.clkr,
+ [GCC_RX2_USB2_CLKREF_CLK] = &gcc_rx2_usb2_clkref_clk.clkr,
+ [GCC_RX1_USB2_CLKREF_CLK] = &gcc_rx1_usb2_clkref_clk.clkr,
+};
+
+static struct gdsc *gcc_msm8996_gdscs[] = {
+ [AGGRE0_NOC_GDSC] = &aggre0_noc_gdsc,
+ [HLOS1_VOTE_AGGRE0_NOC_GDSC] = &hlos1_vote_aggre0_noc_gdsc,
+ [HLOS1_VOTE_LPASS_ADSP_GDSC] = &hlos1_vote_lpass_adsp_gdsc,
+ [HLOS1_VOTE_LPASS_CORE_GDSC] = &hlos1_vote_lpass_core_gdsc,
+ [USB30_GDSC] = &usb30_gdsc,
+ [PCIE0_GDSC] = &pcie0_gdsc,
+ [PCIE1_GDSC] = &pcie1_gdsc,
+ [PCIE2_GDSC] = &pcie2_gdsc,
+ [UFS_GDSC] = &ufs_gdsc,
+};
+
+static const struct qcom_reset_map gcc_msm8996_resets[] = {
+ [GCC_SYSTEM_NOC_BCR] = { 0x4000 },
+ [GCC_CONFIG_NOC_BCR] = { 0x5000 },
+ [GCC_PERIPH_NOC_BCR] = { 0x6000 },
+ [GCC_IMEM_BCR] = { 0x8000 },
+ [GCC_MMSS_BCR] = { 0x9000 },
+ [GCC_PIMEM_BCR] = { 0x0a000 },
+ [GCC_QDSS_BCR] = { 0x0c000 },
+ [GCC_USB_30_BCR] = { 0x0f000 },
+ [GCC_USB_20_BCR] = { 0x12000 },
+ [GCC_QUSB2PHY_PRIM_BCR] = { 0x12038 },
+ [GCC_QUSB2PHY_SEC_BCR] = { 0x1203c },
+ [GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0x6a000 },
+ [GCC_SDCC1_BCR] = { 0x13000 },
+ [GCC_SDCC2_BCR] = { 0x14000 },
+ [GCC_SDCC3_BCR] = { 0x15000 },
+ [GCC_SDCC4_BCR] = { 0x16000 },
+ [GCC_BLSP1_BCR] = { 0x17000 },
+ [GCC_BLSP1_QUP1_BCR] = { 0x19000 },
+ [GCC_BLSP1_UART1_BCR] = { 0x1a000 },
+ [GCC_BLSP1_QUP2_BCR] = { 0x1b000 },
+ [GCC_BLSP1_UART2_BCR] = { 0x1c000 },
+ [GCC_BLSP1_QUP3_BCR] = { 0x1d000 },
+ [GCC_BLSP1_UART3_BCR] = { 0x1e000 },
+ [GCC_BLSP1_QUP4_BCR] = { 0x1f000 },
+ [GCC_BLSP1_UART4_BCR] = { 0x20000 },
+ [GCC_BLSP1_QUP5_BCR] = { 0x21000 },
+ [GCC_BLSP1_UART5_BCR] = { 0x22000 },
+ [GCC_BLSP1_QUP6_BCR] = { 0x23000 },
+ [GCC_BLSP1_UART6_BCR] = { 0x24000 },
+ [GCC_BLSP2_BCR] = { 0x25000 },
+ [GCC_BLSP2_QUP1_BCR] = { 0x26000 },
+ [GCC_BLSP2_UART1_BCR] = { 0x27000 },
+ [GCC_BLSP2_QUP2_BCR] = { 0x28000 },
+ [GCC_BLSP2_UART2_BCR] = { 0x29000 },
+ [GCC_BLSP2_QUP3_BCR] = { 0x2a000 },
+ [GCC_BLSP2_UART3_BCR] = { 0x2b000 },
+ [GCC_BLSP2_QUP4_BCR] = { 0x2c000 },
+ [GCC_BLSP2_UART4_BCR] = { 0x2d000 },
+ [GCC_BLSP2_QUP5_BCR] = { 0x2e000 },
+ [GCC_BLSP2_UART5_BCR] = { 0x2f000 },
+ [GCC_BLSP2_QUP6_BCR] = { 0x30000 },
+ [GCC_BLSP2_UART6_BCR] = { 0x31000 },
+ [GCC_PDM_BCR] = { 0x33000 },
+ [GCC_PRNG_BCR] = { 0x34000 },
+ [GCC_TSIF_BCR] = { 0x36000 },
+ [GCC_TCSR_BCR] = { 0x37000 },
+ [GCC_BOOT_ROM_BCR] = { 0x38000 },
+ [GCC_MSG_RAM_BCR] = { 0x39000 },
+ [GCC_TLMM_BCR] = { 0x3a000 },
+ [GCC_MPM_BCR] = { 0x3b000 },
+ [GCC_SEC_CTRL_BCR] = { 0x3d000 },
+ [GCC_SPMI_BCR] = { 0x3f000 },
+ [GCC_SPDM_BCR] = { 0x40000 },
+ [GCC_CE1_BCR] = { 0x41000 },
+ [GCC_BIMC_BCR] = { 0x44000 },
+ [GCC_SNOC_BUS_TIMEOUT0_BCR] = { 0x49000 },
+ [GCC_SNOC_BUS_TIMEOUT2_BCR] = { 0x49008 },
+ [GCC_SNOC_BUS_TIMEOUT1_BCR] = { 0x49010 },
+ [GCC_SNOC_BUS_TIMEOUT3_BCR] = { 0x49018 },
+ [GCC_SNOC_BUS_TIMEOUT_EXTREF_BCR] = { 0x49020 },
+ [GCC_PNOC_BUS_TIMEOUT0_BCR] = { 0x4a000 },
+ [GCC_PNOC_BUS_TIMEOUT1_BCR] = { 0x4a008 },
+ [GCC_PNOC_BUS_TIMEOUT2_BCR] = { 0x4a010 },
+ [GCC_PNOC_BUS_TIMEOUT3_BCR] = { 0x4a018 },
+ [GCC_PNOC_BUS_TIMEOUT4_BCR] = { 0x4a020 },
+ [GCC_CNOC_BUS_TIMEOUT0_BCR] = { 0x4b000 },
+ [GCC_CNOC_BUS_TIMEOUT1_BCR] = { 0x4b008 },
+ [GCC_CNOC_BUS_TIMEOUT2_BCR] = { 0x4b010 },
+ [GCC_CNOC_BUS_TIMEOUT3_BCR] = { 0x4b018 },
+ [GCC_CNOC_BUS_TIMEOUT4_BCR] = { 0x4b020 },
+ [GCC_CNOC_BUS_TIMEOUT5_BCR] = { 0x4b028 },
+ [GCC_CNOC_BUS_TIMEOUT6_BCR] = { 0x4b030 },
+ [GCC_CNOC_BUS_TIMEOUT7_BCR] = { 0x4b038 },
+ [GCC_CNOC_BUS_TIMEOUT8_BCR] = { 0x80000 },
+ [GCC_CNOC_BUS_TIMEOUT9_BCR] = { 0x80008 },
+ [GCC_CNOC_BUS_TIMEOUT_EXTREF_BCR] = { 0x80010 },
+ [GCC_APB2JTAG_BCR] = { 0x4c000 },
+ [GCC_RBCPR_CX_BCR] = { 0x4e000 },
+ [GCC_RBCPR_MX_BCR] = { 0x4f000 },
+ [GCC_PCIE_0_BCR] = { 0x6b000 },
+ [GCC_PCIE_0_PHY_BCR] = { 0x6c01c },
+ [GCC_PCIE_1_BCR] = { 0x6d000 },
+ [GCC_PCIE_1_PHY_BCR] = { 0x6d038 },
+ [GCC_PCIE_2_BCR] = { 0x6e000 },
+ [GCC_PCIE_2_PHY_BCR] = { 0x6e038 },
+ [GCC_PCIE_PHY_BCR] = { 0x6f000 },
+ [GCC_DCD_BCR] = { 0x70000 },
+ [GCC_OBT_ODT_BCR] = { 0x73000 },
+ [GCC_UFS_BCR] = { 0x75000 },
+ [GCC_SSC_BCR] = { 0x63000 },
+ [GCC_VS_BCR] = { 0x7a000 },
+ [GCC_AGGRE0_NOC_BCR] = { 0x81000 },
+ [GCC_AGGRE1_NOC_BCR] = { 0x82000 },
+ [GCC_AGGRE2_NOC_BCR] = { 0x83000 },
+ [GCC_DCC_BCR] = { 0x84000 },
+ [GCC_IPA_BCR] = { 0x89000 },
+ [GCC_QSPI_BCR] = { 0x8b000 },
+ [GCC_SKL_BCR] = { 0x8c000 },
+ [GCC_MSMPU_BCR] = { 0x8d000 },
+ [GCC_MSS_Q6_BCR] = { 0x8e000 },
+ [GCC_QREFS_VBG_CAL_BCR] = { 0x88020 },
+ [GCC_USB3_PHY_BCR] = { 0x50020 },
+ [GCC_USB3PHY_PHY_BCR] = { 0x50024 },
+ [GCC_PCIE_PHY_NOCSR_COM_PHY_BCR] = { 0x6F00C },
+ [GCC_PCIE_PHY_COM_BCR] = { 0x6F014 },
+};
+
+static const struct regmap_config gcc_msm8996_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x8f010,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_msm8996_desc = {
+ .config = &gcc_msm8996_regmap_config,
+ .clks = gcc_msm8996_clocks,
+ .num_clks = ARRAY_SIZE(gcc_msm8996_clocks),
+ .resets = gcc_msm8996_resets,
+ .num_resets = ARRAY_SIZE(gcc_msm8996_resets),
+ .gdscs = gcc_msm8996_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_msm8996_gdscs),
+};
+
+static const struct of_device_id gcc_msm8996_match_table[] = {
+ { .compatible = "qcom,gcc-msm8996" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_msm8996_match_table);
+
+static int gcc_msm8996_probe(struct platform_device *pdev)
+{
+ struct clk *clk;
+ struct device *dev = &pdev->dev;
+ int i;
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, &gcc_msm8996_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ /*
+ * Set the HMSS_AHB_CLK_SLEEP_ENA bit to allow the hmss_ahb_clk to be
+ * turned off by hardware during certain apps low power modes.
+ */
+ regmap_update_bits(regmap, 0x52008, BIT(21), BIT(21));
+
+ for (i = 0; i < ARRAY_SIZE(gcc_msm8996_hws); i++) {
+ clk = devm_clk_register(dev, gcc_msm8996_hws[i]);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+ }
+
+ return qcom_cc_really_probe(pdev, &gcc_msm8996_desc, regmap);
+}
+
+static struct platform_driver gcc_msm8996_driver = {
+ .probe = gcc_msm8996_probe,
+ .driver = {
+ .name = "gcc-msm8996",
+ .of_match_table = gcc_msm8996_match_table,
+ },
+};
+
+static int __init gcc_msm8996_init(void)
+{
+ return platform_driver_register(&gcc_msm8996_driver);
+}
+core_initcall(gcc_msm8996_init);
+
+static void __exit gcc_msm8996_exit(void)
+{
+ platform_driver_unregister(&gcc_msm8996_driver);
+}
+module_exit(gcc_msm8996_exit);
+
+MODULE_DESCRIPTION("QCOM GCC MSM8996 Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:gcc-msm8996");
diff --git a/drivers/clk/qcom/gdsc.c b/drivers/clk/qcom/gdsc.c
index da9fad8b642b..f12d7b2bddd7 100644
--- a/drivers/clk/qcom/gdsc.c
+++ b/drivers/clk/qcom/gdsc.c
@@ -16,6 +16,7 @@
#include <linux/err.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
+#include <linux/ktime.h>
#include <linux/pm_domain.h>
#include <linux/regmap.h>
#include <linux/reset-controller.h>
@@ -42,12 +43,12 @@
#define domain_to_gdsc(domain) container_of(domain, struct gdsc, pd)
-static int gdsc_is_enabled(struct gdsc *sc)
+static int gdsc_is_enabled(struct gdsc *sc, unsigned int reg)
{
u32 val;
int ret;
- ret = regmap_read(sc->regmap, sc->gdscr, &val);
+ ret = regmap_read(sc->regmap, reg, &val);
if (ret)
return ret;
@@ -58,28 +59,46 @@ static int gdsc_toggle_logic(struct gdsc *sc, bool en)
{
int ret;
u32 val = en ? 0 : SW_COLLAPSE_MASK;
- u32 check = en ? PWR_ON_MASK : 0;
- unsigned long timeout;
+ ktime_t start;
+ unsigned int status_reg = sc->gdscr;
ret = regmap_update_bits(sc->regmap, sc->gdscr, SW_COLLAPSE_MASK, val);
if (ret)
return ret;
- timeout = jiffies + usecs_to_jiffies(TIMEOUT_US);
- do {
- ret = regmap_read(sc->regmap, sc->gdscr, &val);
- if (ret)
- return ret;
+ /* If disabling votable gdscs, don't poll on status */
+ if ((sc->flags & VOTABLE) && !en) {
+ /*
+ * Add a short delay here to ensure that an enable
+ * right after it was disabled does not put it in an
+ * unknown state
+ */
+ udelay(TIMEOUT_US);
+ return 0;
+ }
- if ((val & PWR_ON_MASK) == check)
- return 0;
- } while (time_before(jiffies, timeout));
+ if (sc->gds_hw_ctrl) {
+ status_reg = sc->gds_hw_ctrl;
+ /*
+ * The gds hw controller asserts/de-asserts the status bit soon
+ * after it receives a power on/off request from a master.
+ * The controller then takes around 8 xo cycles to start its
+ * internal state machine and update the status bit. During
+ * this time, the status bit does not reflect the true status
+ * of the core.
+ * Add a delay of 1 us between writing to the SW_COLLAPSE bit
+ * and polling the status bit.
+ */
+ udelay(1);
+ }
- ret = regmap_read(sc->regmap, sc->gdscr, &val);
- if (ret)
- return ret;
+ start = ktime_get();
+ do {
+ if (gdsc_is_enabled(sc, status_reg) == en)
+ return 0;
+ } while (ktime_us_delta(ktime_get(), start) < TIMEOUT_US);
- if ((val & PWR_ON_MASK) == check)
+ if (gdsc_is_enabled(sc, status_reg) == en)
return 0;
return -ETIMEDOUT;
@@ -165,6 +184,7 @@ static int gdsc_init(struct gdsc *sc)
{
u32 mask, val;
int on, ret;
+ unsigned int reg;
/*
* Disable HW trigger: collapse/restore occur based on registers writes.
@@ -185,10 +205,18 @@ static int gdsc_init(struct gdsc *sc)
return ret;
}
- on = gdsc_is_enabled(sc);
+ reg = sc->gds_hw_ctrl ? sc->gds_hw_ctrl : sc->gdscr;
+ on = gdsc_is_enabled(sc, reg);
if (on < 0)
return on;
+ /*
+ * Votable GDSCs can be ON due to Vote from other masters.
+ * If a Votable GDSC is ON, make sure we have a Vote.
+ */
+ if ((sc->flags & VOTABLE) && on)
+ gdsc_enable(&sc->pd);
+
if (on || (sc->pwrsts & PWRSTS_RET))
gdsc_force_mem_on(sc);
else
@@ -201,11 +229,14 @@ static int gdsc_init(struct gdsc *sc)
return 0;
}
-int gdsc_register(struct device *dev, struct gdsc **scs, size_t num,
+int gdsc_register(struct gdsc_desc *desc,
struct reset_controller_dev *rcdev, struct regmap *regmap)
{
int i, ret;
struct genpd_onecell_data *data;
+ struct device *dev = desc->dev;
+ struct gdsc **scs = desc->scs;
+ size_t num = desc->num;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
@@ -228,10 +259,30 @@ int gdsc_register(struct device *dev, struct gdsc **scs, size_t num,
data->domains[i] = &scs[i]->pd;
}
+ /* Add subdomains */
+ for (i = 0; i < num; i++) {
+ if (!scs[i])
+ continue;
+ if (scs[i]->parent)
+ pm_genpd_add_subdomain(scs[i]->parent, &scs[i]->pd);
+ }
+
return of_genpd_add_provider_onecell(dev->of_node, data);
}
-void gdsc_unregister(struct device *dev)
+void gdsc_unregister(struct gdsc_desc *desc)
{
+ int i;
+ struct device *dev = desc->dev;
+ struct gdsc **scs = desc->scs;
+ size_t num = desc->num;
+
+ /* Remove subdomains */
+ for (i = 0; i < num; i++) {
+ if (!scs[i])
+ continue;
+ if (scs[i]->parent)
+ pm_genpd_remove_subdomain(scs[i]->parent, &scs[i]->pd);
+ }
of_genpd_del_provider(dev->of_node);
}
diff --git a/drivers/clk/qcom/gdsc.h b/drivers/clk/qcom/gdsc.h
index 5ded26884f08..3bf497c36bdf 100644
--- a/drivers/clk/qcom/gdsc.h
+++ b/drivers/clk/qcom/gdsc.h
@@ -20,18 +20,12 @@
struct regmap;
struct reset_controller_dev;
-/* Powerdomain allowable state bitfields */
-#define PWRSTS_OFF BIT(0)
-#define PWRSTS_RET BIT(1)
-#define PWRSTS_ON BIT(2)
-#define PWRSTS_OFF_ON (PWRSTS_OFF | PWRSTS_ON)
-#define PWRSTS_RET_ON (PWRSTS_RET | PWRSTS_ON)
-
/**
* struct gdsc - Globally Distributed Switch Controller
* @pd: generic power domain
* @regmap: regmap for MMIO accesses
* @gdscr: gsdc control register
+ * @gds_hw_ctrl: gds_hw_ctrl register
* @cxcs: offsets of branch registers to toggle mem/periph bits in
* @cxc_count: number of @cxcs
* @pwrsts: Possible powerdomain power states
@@ -41,28 +35,44 @@ struct reset_controller_dev;
*/
struct gdsc {
struct generic_pm_domain pd;
+ struct generic_pm_domain *parent;
struct regmap *regmap;
unsigned int gdscr;
+ unsigned int gds_hw_ctrl;
unsigned int *cxcs;
unsigned int cxc_count;
const u8 pwrsts;
+/* Powerdomain allowable state bitfields */
+#define PWRSTS_OFF BIT(0)
+#define PWRSTS_RET BIT(1)
+#define PWRSTS_ON BIT(2)
+#define PWRSTS_OFF_ON (PWRSTS_OFF | PWRSTS_ON)
+#define PWRSTS_RET_ON (PWRSTS_RET | PWRSTS_ON)
+ const u8 flags;
+#define VOTABLE BIT(0)
struct reset_controller_dev *rcdev;
unsigned int *resets;
unsigned int reset_count;
};
+struct gdsc_desc {
+ struct device *dev;
+ struct gdsc **scs;
+ size_t num;
+};
+
#ifdef CONFIG_QCOM_GDSC
-int gdsc_register(struct device *, struct gdsc **, size_t n,
- struct reset_controller_dev *, struct regmap *);
-void gdsc_unregister(struct device *);
+int gdsc_register(struct gdsc_desc *desc, struct reset_controller_dev *,
+ struct regmap *);
+void gdsc_unregister(struct gdsc_desc *desc);
#else
-static inline int gdsc_register(struct device *d, struct gdsc **g, size_t n,
+static inline int gdsc_register(struct gdsc_desc *desc,
struct reset_controller_dev *rcdev,
struct regmap *r)
{
return -ENOSYS;
}
-static inline void gdsc_unregister(struct device *d) {};
+static inline void gdsc_unregister(struct gdsc_desc *desc) {};
#endif /* CONFIG_QCOM_GDSC */
#endif /* __QCOM_GDSC_H__ */
diff --git a/drivers/clk/qcom/hfpll.c b/drivers/clk/qcom/hfpll.c
new file mode 100644
index 000000000000..1492f4c79c35
--- /dev/null
+++ b/drivers/clk/qcom/hfpll.c
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+
+#include "clk-regmap.h"
+#include "clk-hfpll.h"
+
+static const struct hfpll_data hdata = {
+ .mode_reg = 0x00,
+ .l_reg = 0x04,
+ .m_reg = 0x08,
+ .n_reg = 0x0c,
+ .user_reg = 0x10,
+ .config_reg = 0x14,
+ .config_val = 0x430405d,
+ .status_reg = 0x1c,
+ .lock_bit = 16,
+
+ .user_val = 0x8,
+ .user_vco_mask = 0x100000,
+ .low_vco_max_rate = 1248000000,
+ .min_rate = 537600000UL,
+ .max_rate = 2900000000UL,
+};
+
+static const struct of_device_id qcom_hfpll_match_table[] = {
+ { .compatible = "qcom,hfpll" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, qcom_hfpll_match_table);
+
+static const struct regmap_config hfpll_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x30,
+ .fast_io = true,
+};
+
+static int qcom_hfpll_probe(struct platform_device *pdev)
+{
+ struct clk *clk;
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+ void __iomem *base;
+ struct regmap *regmap;
+ struct clk_hfpll *h;
+ struct clk_init_data init = {
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_ops_hfpll,
+ };
+
+ h = devm_kzalloc(dev, sizeof(*h), GFP_KERNEL);
+ if (!h)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ regmap = devm_regmap_init_mmio(&pdev->dev, base, &hfpll_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ if (of_property_read_string_index(dev->of_node, "clock-output-names",
+ 0, &init.name))
+ return -ENODEV;
+
+ h->d = &hdata;
+ h->clkr.hw.init = &init;
+ spin_lock_init(&h->lock);
+
+ clk = devm_clk_register_regmap(&pdev->dev, &h->clkr);
+
+ return PTR_ERR_OR_ZERO(clk);
+}
+
+static struct platform_driver qcom_hfpll_driver = {
+ .probe = qcom_hfpll_probe,
+ .driver = {
+ .name = "qcom-hfpll",
+ .of_match_table = qcom_hfpll_match_table,
+ },
+};
+module_platform_driver(qcom_hfpll_driver);
+
+MODULE_DESCRIPTION("QCOM HFPLL Clock Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:qcom-hfpll");
diff --git a/drivers/clk/qcom/kpss-xcc.c b/drivers/clk/qcom/kpss-xcc.c
new file mode 100644
index 000000000000..abf6bfd053c1
--- /dev/null
+++ b/drivers/clk/qcom/kpss-xcc.c
@@ -0,0 +1,95 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+
+static const char *aux_parents[] = {
+ "pll8_vote",
+ "pxo",
+};
+
+static unsigned int aux_parent_map[] = {
+ 3,
+ 0,
+};
+
+static const struct of_device_id kpss_xcc_match_table[] = {
+ { .compatible = "qcom,kpss-acc-v1", .data = (void *)1UL },
+ { .compatible = "qcom,kpss-gcc" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, kpss_xcc_match_table);
+
+static int kpss_xcc_driver_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *id;
+ struct clk *clk;
+ struct resource *res;
+ void __iomem *base;
+ const char *name;
+
+ id = of_match_device(kpss_xcc_match_table, &pdev->dev);
+ if (!id)
+ return -ENODEV;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ if (id->data) {
+ if (of_property_read_string_index(pdev->dev.of_node,
+ "clock-output-names", 0, &name))
+ return -ENODEV;
+ base += 0x14;
+ } else {
+ name = "acpu_l2_aux";
+ base += 0x28;
+ }
+
+ clk = clk_register_mux_table(&pdev->dev, name, aux_parents,
+ ARRAY_SIZE(aux_parents), 0, base, 0, 0x3,
+ 0, aux_parent_map, NULL);
+
+ platform_set_drvdata(pdev, clk);
+
+ return PTR_ERR_OR_ZERO(clk);
+}
+
+static int kpss_xcc_driver_remove(struct platform_device *pdev)
+{
+ clk_unregister_mux(platform_get_drvdata(pdev));
+ return 0;
+}
+
+static struct platform_driver kpss_xcc_driver = {
+ .probe = kpss_xcc_driver_probe,
+ .remove = kpss_xcc_driver_remove,
+ .driver = {
+ .name = "kpss-xcc",
+ .of_match_table = kpss_xcc_match_table,
+ },
+};
+module_platform_driver(kpss_xcc_driver);
+
+MODULE_DESCRIPTION("Krait Processor Sub System (KPSS) Clock Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:kpss-xcc");
diff --git a/drivers/clk/qcom/krait-cc.c b/drivers/clk/qcom/krait-cc.c
new file mode 100644
index 000000000000..f55b5ecd0df8
--- /dev/null
+++ b/drivers/clk/qcom/krait-cc.c
@@ -0,0 +1,352 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+
+#include "clk-krait.h"
+
+static unsigned int sec_mux_map[] = {
+ 2,
+ 0,
+};
+
+static unsigned int pri_mux_map[] = {
+ 1,
+ 2,
+ 0,
+};
+
+static int
+krait_add_div(struct device *dev, int id, const char *s, unsigned offset)
+{
+ struct krait_div2_clk *div;
+ struct clk_init_data init = {
+ .num_parents = 1,
+ .ops = &krait_div2_clk_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ };
+ const char *p_names[1];
+ struct clk *clk;
+
+ div = devm_kzalloc(dev, sizeof(*div), GFP_KERNEL);
+ if (!div)
+ return -ENOMEM;
+
+ div->width = 2;
+ div->shift = 6;
+ div->lpl = id >= 0;
+ div->offset = offset;
+ div->hw.init = &init;
+
+ init.name = kasprintf(GFP_KERNEL, "hfpll%s_div", s);
+ if (!init.name)
+ return -ENOMEM;
+
+ init.parent_names = p_names;
+ p_names[0] = kasprintf(GFP_KERNEL, "hfpll%s", s);
+ if (!p_names[0]) {
+ kfree(init.name);
+ return -ENOMEM;
+ }
+
+ clk = devm_clk_register(dev, &div->hw);
+ kfree(p_names[0]);
+ kfree(init.name);
+
+ return PTR_ERR_OR_ZERO(clk);
+}
+
+static int
+krait_add_sec_mux(struct device *dev, int id, const char *s, unsigned offset,
+ bool unique_aux)
+{
+ struct krait_mux_clk *mux;
+ static const char *sec_mux_list[] = {
+ "acpu_aux",
+ "qsb",
+ };
+ struct clk_init_data init = {
+ .parent_names = sec_mux_list,
+ .num_parents = ARRAY_SIZE(sec_mux_list),
+ .ops = &krait_mux_clk_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ };
+ struct clk *clk;
+
+ mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ return -ENOMEM;
+
+ mux->offset = offset;
+ mux->lpl = id >= 0;
+ mux->has_safe_parent = true;
+ mux->safe_sel = 2;
+ mux->mask = 0x3;
+ mux->shift = 2;
+ mux->parent_map = sec_mux_map;
+ mux->hw.init = &init;
+
+ init.name = kasprintf(GFP_KERNEL, "krait%s_sec_mux", s);
+ if (!init.name)
+ return -ENOMEM;
+
+ if (unique_aux) {
+ sec_mux_list[0] = kasprintf(GFP_KERNEL, "acpu%s_aux", s);
+ if (!sec_mux_list[0]) {
+ clk = ERR_PTR(-ENOMEM);
+ goto err_aux;
+ }
+ }
+
+ clk = devm_clk_register(dev, &mux->hw);
+
+ if (unique_aux)
+ kfree(sec_mux_list[0]);
+err_aux:
+ kfree(init.name);
+ return PTR_ERR_OR_ZERO(clk);
+}
+
+static struct clk *
+krait_add_pri_mux(struct device *dev, int id, const char *s, unsigned offset)
+{
+ struct krait_mux_clk *mux;
+ const char *p_names[3];
+ struct clk_init_data init = {
+ .parent_names = p_names,
+ .num_parents = ARRAY_SIZE(p_names),
+ .ops = &krait_mux_clk_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ };
+ struct clk *clk;
+
+ mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ return ERR_PTR(-ENOMEM);
+
+ mux->has_safe_parent = true;
+ mux->safe_sel = 0;
+ mux->mask = 0x3;
+ mux->shift = 0;
+ mux->offset = offset;
+ mux->lpl = id >= 0;
+ mux->parent_map = pri_mux_map;
+ mux->hw.init = &init;
+
+ init.name = kasprintf(GFP_KERNEL, "krait%s_pri_mux", s);
+ if (!init.name)
+ return ERR_PTR(-ENOMEM);
+
+ p_names[0] = kasprintf(GFP_KERNEL, "hfpll%s", s);
+ if (!p_names[0]) {
+ clk = ERR_PTR(-ENOMEM);
+ goto err_p0;
+ }
+
+ p_names[1] = kasprintf(GFP_KERNEL, "hfpll%s_div", s);
+ if (!p_names[1]) {
+ clk = ERR_PTR(-ENOMEM);
+ goto err_p1;
+ }
+
+ p_names[2] = kasprintf(GFP_KERNEL, "krait%s_sec_mux", s);
+ if (!p_names[2]) {
+ clk = ERR_PTR(-ENOMEM);
+ goto err_p2;
+ }
+
+ clk = devm_clk_register(dev, &mux->hw);
+
+ kfree(p_names[2]);
+err_p2:
+ kfree(p_names[1]);
+err_p1:
+ kfree(p_names[0]);
+err_p0:
+ kfree(init.name);
+ return clk;
+}
+
+/* id < 0 for L2, otherwise id == physical CPU number */
+static struct clk *krait_add_clks(struct device *dev, int id, bool unique_aux)
+{
+ int ret;
+ unsigned offset;
+ void *p = NULL;
+ const char *s;
+ struct clk *clk;
+
+ if (id >= 0) {
+ offset = 0x4501 + (0x1000 * id);
+ s = p = kasprintf(GFP_KERNEL, "%d", id);
+ if (!s)
+ return ERR_PTR(-ENOMEM);
+ } else {
+ offset = 0x500;
+ s = "_l2";
+ }
+
+ ret = krait_add_div(dev, id, s, offset);
+ if (ret) {
+ clk = ERR_PTR(ret);
+ goto err;
+ }
+
+ ret = krait_add_sec_mux(dev, id, s, offset, unique_aux);
+ if (ret) {
+ clk = ERR_PTR(ret);
+ goto err;
+ }
+
+ clk = krait_add_pri_mux(dev, id, s, offset);
+err:
+ kfree(p);
+ return clk;
+}
+
+static struct clk *krait_of_get(struct of_phandle_args *clkspec, void *data)
+{
+ unsigned int idx = clkspec->args[0];
+ struct clk **clks = data;
+
+ if (idx >= 5) {
+ pr_err("%s: invalid clock index %d\n", __func__, idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return clks[idx] ? : ERR_PTR(-ENODEV);
+}
+
+static const struct of_device_id krait_cc_match_table[] = {
+ { .compatible = "qcom,krait-cc-v1", (void *)1UL },
+ { .compatible = "qcom,krait-cc-v2" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, krait_cc_match_table);
+
+static int krait_cc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct of_device_id *id;
+ unsigned long cur_rate, aux_rate;
+ int cpu;
+ struct clk *clk;
+ struct clk **clks;
+ struct clk *l2_pri_mux_clk;
+
+ id = of_match_device(krait_cc_match_table, dev);
+ if (!id)
+ return -ENODEV;
+
+ /* Rate is 1 because 0 causes problems for __clk_mux_determine_rate */
+ clk = clk_register_fixed_rate(dev, "qsb", NULL, CLK_IS_ROOT, 1);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ if (!id->data) {
+ clk = clk_register_fixed_factor(dev, "acpu_aux",
+ "gpll0_vote", 0, 1, 2);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+ }
+
+ /* Krait configurations have at most 4 CPUs and one L2 */
+ clks = devm_kcalloc(dev, 5, sizeof(*clks), GFP_KERNEL);
+ if (!clks)
+ return -ENOMEM;
+
+ for_each_possible_cpu(cpu) {
+ clk = krait_add_clks(dev, cpu, id->data);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+ clks[cpu] = clk;
+ }
+
+ l2_pri_mux_clk = krait_add_clks(dev, -1, id->data);
+ if (IS_ERR(l2_pri_mux_clk))
+ return PTR_ERR(l2_pri_mux_clk);
+ clks[4] = l2_pri_mux_clk;
+
+ /*
+ * We don't want the CPU or L2 clocks to be turned off at late init
+ * if CPUFREQ or HOTPLUG configs are disabled. So, bump up the
+ * refcount of these clocks. Any cpufreq/hotplug manager can assume
+ * that the clocks have already been prepared and enabled by the time
+ * they take over.
+ */
+ for_each_online_cpu(cpu) {
+ clk_prepare_enable(l2_pri_mux_clk);
+ WARN(clk_prepare_enable(clks[cpu]),
+ "Unable to turn on CPU%d clock", cpu);
+ }
+
+ /*
+ * Force reinit of HFPLLs and muxes to overwrite any potential
+ * incorrect configuration of HFPLLs and muxes by the bootloader.
+ * While at it, also make sure the cores are running at known rates
+ * and print the current rate.
+ *
+ * The clocks are set to aux clock rate first to make sure the
+ * secondary mux is not sourcing off of QSB. The rate is then set to
+ * two different rates to force a HFPLL reinit under all
+ * circumstances.
+ */
+ cur_rate = clk_get_rate(l2_pri_mux_clk);
+ aux_rate = 384000000;
+ if (cur_rate == 1) {
+ pr_info("L2 @ QSB rate. Forcing new rate.\n");
+ cur_rate = aux_rate;
+ }
+ clk_set_rate(l2_pri_mux_clk, aux_rate);
+ clk_set_rate(l2_pri_mux_clk, 2);
+ clk_set_rate(l2_pri_mux_clk, cur_rate);
+ pr_info("L2 @ %lu KHz\n", clk_get_rate(l2_pri_mux_clk) / 1000);
+ for_each_possible_cpu(cpu) {
+ clk = clks[cpu];
+ cur_rate = clk_get_rate(clk);
+ if (cur_rate == 1) {
+ pr_info("CPU%d @ QSB rate. Forcing new rate.\n", cpu);
+ cur_rate = aux_rate;
+ }
+ clk_set_rate(clk, aux_rate);
+ clk_set_rate(clk, 2);
+ clk_set_rate(clk, cur_rate);
+ pr_info("CPU%d @ %lu KHz\n", cpu, clk_get_rate(clk) / 1000);
+ }
+
+ of_clk_add_provider(dev->of_node, krait_of_get, clks);
+
+ return 0;
+}
+
+static struct platform_driver krait_cc_driver = {
+ .probe = krait_cc_probe,
+ .driver = {
+ .name = "krait-cc",
+ .of_match_table = krait_cc_match_table,
+ },
+};
+module_platform_driver(krait_cc_driver);
+
+MODULE_DESCRIPTION("Krait CPU Clock Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:krait-cc");
diff --git a/drivers/clk/qcom/mmcc-msm8960.c b/drivers/clk/qcom/mmcc-msm8960.c
index 00e36192a1de..7f21421c87d6 100644
--- a/drivers/clk/qcom/mmcc-msm8960.c
+++ b/drivers/clk/qcom/mmcc-msm8960.c
@@ -1789,7 +1789,6 @@ static struct clk_branch gmem_axi_clk = {
.hw.init = &(struct clk_init_data){
.name = "gmem_axi_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1805,7 +1804,6 @@ static struct clk_branch ijpeg_axi_clk = {
.hw.init = &(struct clk_init_data){
.name = "ijpeg_axi_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1821,7 +1819,6 @@ static struct clk_branch mmss_imem_axi_clk = {
.hw.init = &(struct clk_init_data){
.name = "mmss_imem_axi_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1835,7 +1832,6 @@ static struct clk_branch jpegd_axi_clk = {
.hw.init = &(struct clk_init_data){
.name = "jpegd_axi_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1851,7 +1847,6 @@ static struct clk_branch vcodec_axi_b_clk = {
.hw.init = &(struct clk_init_data){
.name = "vcodec_axi_b_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1867,7 +1862,6 @@ static struct clk_branch vcodec_axi_a_clk = {
.hw.init = &(struct clk_init_data){
.name = "vcodec_axi_a_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1883,7 +1877,6 @@ static struct clk_branch vcodec_axi_clk = {
.hw.init = &(struct clk_init_data){
.name = "vcodec_axi_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1897,7 +1890,6 @@ static struct clk_branch vfe_axi_clk = {
.hw.init = &(struct clk_init_data){
.name = "vfe_axi_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1913,7 +1905,6 @@ static struct clk_branch mdp_axi_clk = {
.hw.init = &(struct clk_init_data){
.name = "mdp_axi_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1929,7 +1920,6 @@ static struct clk_branch rot_axi_clk = {
.hw.init = &(struct clk_init_data){
.name = "rot_axi_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1945,7 +1935,6 @@ static struct clk_branch vcap_axi_clk = {
.hw.init = &(struct clk_init_data){
.name = "vcap_axi_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1961,7 +1950,6 @@ static struct clk_branch vpe_axi_clk = {
.hw.init = &(struct clk_init_data){
.name = "vpe_axi_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1977,7 +1965,6 @@ static struct clk_branch gfx3d_axi_clk = {
.hw.init = &(struct clk_init_data){
.name = "gfx3d_axi_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -1991,7 +1978,6 @@ static struct clk_branch amp_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "amp_ahb_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2005,7 +1991,6 @@ static struct clk_branch csi_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "csi_ahb_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT
},
},
};
@@ -2019,7 +2004,6 @@ static struct clk_branch dsi_m_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "dsi_m_ahb_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2035,7 +2019,6 @@ static struct clk_branch dsi_s_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "dsi_s_ahb_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2049,7 +2032,6 @@ static struct clk_branch dsi2_m_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "dsi2_m_ahb_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT
},
},
};
@@ -2065,7 +2047,6 @@ static struct clk_branch dsi2_s_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "dsi2_s_ahb_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2425,7 +2406,6 @@ static struct clk_branch gfx2d0_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "gfx2d0_ahb_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2441,7 +2421,6 @@ static struct clk_branch gfx2d1_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "gfx2d1_ahb_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2457,7 +2436,6 @@ static struct clk_branch gfx3d_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "gfx3d_ahb_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2473,7 +2451,6 @@ static struct clk_branch hdmi_m_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "hdmi_m_ahb_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2489,7 +2466,6 @@ static struct clk_branch hdmi_s_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "hdmi_s_ahb_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2503,7 +2479,6 @@ static struct clk_branch ijpeg_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "ijpeg_ahb_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT
},
},
};
@@ -2519,7 +2494,6 @@ static struct clk_branch mmss_imem_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "mmss_imem_ahb_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT
},
},
};
@@ -2533,7 +2507,6 @@ static struct clk_branch jpegd_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "jpegd_ahb_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2547,7 +2520,6 @@ static struct clk_branch mdp_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "mdp_ahb_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2561,7 +2533,6 @@ static struct clk_branch rot_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "rot_ahb_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT
},
},
};
@@ -2577,7 +2548,6 @@ static struct clk_branch smmu_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "smmu_ahb_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2591,7 +2561,6 @@ static struct clk_branch tv_enc_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "tv_enc_ahb_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2605,7 +2574,6 @@ static struct clk_branch vcap_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "vcap_ahb_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2621,7 +2589,6 @@ static struct clk_branch vcodec_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "vcodec_ahb_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2635,7 +2602,6 @@ static struct clk_branch vfe_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "vfe_ahb_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
@@ -2649,7 +2615,6 @@ static struct clk_branch vpe_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "vpe_ahb_clk",
.ops = &clk_branch_ops,
- .flags = CLK_IS_ROOT,
},
},
};
diff --git a/drivers/clk/qcom/mmcc-msm8996.c b/drivers/clk/qcom/mmcc-msm8996.c
new file mode 100644
index 000000000000..2af5c1a1d7ca
--- /dev/null
+++ b/drivers/clk/qcom/mmcc-msm8996.c
@@ -0,0 +1,3383 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+#include <linux/clk.h>
+
+#include <dt-bindings/clock/qcom,mmcc-msm8996.h>
+
+#include "common.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-alpha-pll.h"
+#include "clk-rcg.h"
+#include "clk-branch.h"
+#include "reset.h"
+#include "gdsc.h"
+
+#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
+
+enum {
+ P_XO,
+ P_MMPLL0,
+ P_GPLL0,
+ P_GPLL0_DIV,
+ P_MMPLL1,
+ P_MMPLL9,
+ P_MMPLL2,
+ P_MMPLL8,
+ P_MMPLL3,
+ P_DSI0PLL,
+ P_DSI1PLL,
+ P_MMPLL5,
+ P_HDMIPLL,
+ P_DSI0PLL_BYTE,
+ P_DSI1PLL_BYTE,
+ P_MMPLL4,
+};
+
+static const struct parent_map mmss_xo_hdmi_map[] = {
+ { P_XO, 0 },
+ { P_HDMIPLL, 1 }
+};
+
+static const char * const mmss_xo_hdmi[] = {
+ "xo",
+ "hdmipll"
+};
+
+static const struct parent_map mmss_xo_dsi0pll_dsi1pll_map[] = {
+ { P_XO, 0 },
+ { P_DSI0PLL, 1 },
+ { P_DSI1PLL, 2 }
+};
+
+static const char * const mmss_xo_dsi0pll_dsi1pll[] = {
+ "xo",
+ "dsi0pll",
+ "dsi1pll"
+};
+
+static const struct parent_map mmss_xo_gpll0_gpll0_div_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 5 },
+ { P_GPLL0_DIV, 6 }
+};
+
+static const char * const mmss_xo_gpll0_gpll0_div[] = {
+ "xo",
+ "gpll0",
+ "gpll0_div"
+};
+
+static const struct parent_map mmss_xo_dsibyte_map[] = {
+ { P_XO, 0 },
+ { P_DSI0PLL_BYTE, 1 },
+ { P_DSI1PLL_BYTE, 2 }
+};
+
+static const char * const mmss_xo_dsibyte[] = {
+ "xo",
+ "dsi0pllbyte",
+ "dsi1pllbyte"
+};
+
+static const struct parent_map mmss_xo_mmpll0_gpll0_gpll0_div_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL0, 1 },
+ { P_GPLL0, 5 },
+ { P_GPLL0_DIV, 6 }
+};
+
+static const char * const mmss_xo_mmpll0_gpll0_gpll0_div[] = {
+ "xo",
+ "mmpll0",
+ "gpll0",
+ "gpll0_div"
+};
+
+static const struct parent_map mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL0, 1 },
+ { P_MMPLL1, 2 },
+ { P_GPLL0, 5 },
+ { P_GPLL0_DIV, 6 }
+};
+
+static const char * const mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div[] = {
+ "xo",
+ "mmpll0",
+ "mmpll1",
+ "gpll0",
+ "gpll0_div"
+};
+
+static const struct parent_map mmss_xo_mmpll0_mmpll3_gpll0_gpll0_div_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL0, 1 },
+ { P_MMPLL3, 3 },
+ { P_GPLL0, 5 },
+ { P_GPLL0_DIV, 6 }
+};
+
+static const char * const mmss_xo_mmpll0_mmpll3_gpll0_gpll0_div[] = {
+ "xo",
+ "mmpll0",
+ "mmpll3",
+ "gpll0",
+ "gpll0_div"
+};
+
+static const struct parent_map mmss_xo_mmpll0_mmpll5_gpll0_gpll0_div_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL0, 1 },
+ { P_MMPLL5, 2 },
+ { P_GPLL0, 5 },
+ { P_GPLL0_DIV, 6 }
+};
+
+static const char * const mmss_xo_mmpll0_mmpll5_gpll0_gpll0_div[] = {
+ "xo",
+ "mmpll0",
+ "mmpll5",
+ "gpll0",
+ "gpll0_div"
+};
+
+static const struct parent_map mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL0, 1 },
+ { P_MMPLL4, 3 },
+ { P_GPLL0, 5 },
+ { P_GPLL0_DIV, 6 }
+};
+
+static const char * const mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div[] = {
+ "xo",
+ "mmpll0",
+ "mmpll4",
+ "gpll0",
+ "gpll0_div"
+};
+
+static const struct parent_map mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL0, 1 },
+ { P_MMPLL9, 2 },
+ { P_MMPLL2, 3 },
+ { P_MMPLL8, 4 },
+ { P_GPLL0, 5 }
+};
+
+static const char * const mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0[] = {
+ "xo",
+ "mmpll0",
+ "mmpll9",
+ "mmpll2",
+ "mmpll8",
+ "gpll0"
+};
+
+static const struct parent_map mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0_gpll0_div_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL0, 1 },
+ { P_MMPLL9, 2 },
+ { P_MMPLL2, 3 },
+ { P_MMPLL8, 4 },
+ { P_GPLL0, 5 },
+ { P_GPLL0_DIV, 6 }
+};
+
+static const char * const mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0_gpll0_div[] = {
+ "xo",
+ "mmpll0",
+ "mmpll9",
+ "mmpll2",
+ "mmpll8",
+ "gpll0",
+ "gpll0_div"
+};
+
+static const struct parent_map mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div_map[] = {
+ { P_XO, 0 },
+ { P_MMPLL0, 1 },
+ { P_MMPLL1, 2 },
+ { P_MMPLL4, 3 },
+ { P_MMPLL3, 4 },
+ { P_GPLL0, 5 },
+ { P_GPLL0_DIV, 6 }
+};
+
+static const char * const mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div[] = {
+ "xo",
+ "mmpll0",
+ "mmpll1",
+ "mmpll4",
+ "mmpll3",
+ "gpll0",
+ "gpll0_div"
+};
+
+static struct clk_fixed_factor gpll0_div = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll0_div",
+ .parent_names = (const char *[]){ "gpll0" },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static struct pll_vco mmpll_p_vco[] = {
+ { 250000000, 500000000, 3 },
+ { 500000000, 1000000000, 2 },
+ { 1000000000, 1500000000, 1 },
+ { 1500000000, 2000000000, 0 },
+};
+
+static struct pll_vco mmpll_gfx_vco[] = {
+ { 400000000, 1000000000, 2 },
+ { 1000000000, 1500000000, 1 },
+ { 1500000000, 2000000000, 0 },
+};
+
+static struct pll_vco mmpll_t_vco[] = {
+ { 500000000, 1500000000, 0 },
+};
+
+static struct clk_alpha_pll mmpll0_early = {
+ .offset = 0x0,
+ .vco_table = mmpll_p_vco,
+ .num_vco = ARRAY_SIZE(mmpll_p_vco),
+ .clkr = {
+ .enable_reg = 0x100,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mmpll0_early",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll_postdiv mmpll0 = {
+ .offset = 0x0,
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mmpll0",
+ .parent_names = (const char *[]){ "mmpll0_early" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_alpha_pll mmpll1_early = {
+ .offset = 0x30,
+ .vco_table = mmpll_p_vco,
+ .num_vco = ARRAY_SIZE(mmpll_p_vco),
+ .clkr = {
+ .enable_reg = 0x100,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "mmpll1_early",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ }
+ },
+};
+
+static struct clk_alpha_pll_postdiv mmpll1 = {
+ .offset = 0x30,
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mmpll1",
+ .parent_names = (const char *[]){ "mmpll1_early" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_alpha_pll mmpll2_early = {
+ .offset = 0x4100,
+ .vco_table = mmpll_gfx_vco,
+ .num_vco = ARRAY_SIZE(mmpll_gfx_vco),
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mmpll2_early",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+};
+
+static struct clk_alpha_pll_postdiv mmpll2 = {
+ .offset = 0x4100,
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mmpll2",
+ .parent_names = (const char *[]){ "mmpll2_early" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_alpha_pll mmpll3_early = {
+ .offset = 0x60,
+ .vco_table = mmpll_p_vco,
+ .num_vco = ARRAY_SIZE(mmpll_p_vco),
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mmpll3_early",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+};
+
+static struct clk_alpha_pll_postdiv mmpll3 = {
+ .offset = 0x60,
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mmpll3",
+ .parent_names = (const char *[]){ "mmpll3_early" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_alpha_pll mmpll4_early = {
+ .offset = 0x90,
+ .vco_table = mmpll_t_vco,
+ .num_vco = ARRAY_SIZE(mmpll_t_vco),
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mmpll4_early",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+};
+
+static struct clk_alpha_pll_postdiv mmpll4 = {
+ .offset = 0x90,
+ .width = 2,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mmpll4",
+ .parent_names = (const char *[]){ "mmpll4_early" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_alpha_pll mmpll5_early = {
+ .offset = 0xc0,
+ .vco_table = mmpll_p_vco,
+ .num_vco = ARRAY_SIZE(mmpll_p_vco),
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mmpll5_early",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+};
+
+static struct clk_alpha_pll_postdiv mmpll5 = {
+ .offset = 0xc0,
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mmpll5",
+ .parent_names = (const char *[]){ "mmpll5_early" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_alpha_pll mmpll8_early = {
+ .offset = 0x4130,
+ .vco_table = mmpll_gfx_vco,
+ .num_vco = ARRAY_SIZE(mmpll_gfx_vco),
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mmpll8_early",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+};
+
+static struct clk_alpha_pll_postdiv mmpll8 = {
+ .offset = 0x4130,
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mmpll8",
+ .parent_names = (const char *[]){ "mmpll8_early" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_alpha_pll mmpll9_early = {
+ .offset = 0x4200,
+ .vco_table = mmpll_t_vco,
+ .num_vco = ARRAY_SIZE(mmpll_t_vco),
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mmpll9_early",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+};
+
+static struct clk_alpha_pll_postdiv mmpll9 = {
+ .offset = 0x4200,
+ .width = 2,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mmpll9",
+ .parent_names = (const char *[]){ "mmpll9_early" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static const struct freq_tbl ftbl_ahb_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(40000000, P_GPLL0_DIV, 7.5, 0, 0),
+ F(80000000, P_MMPLL0, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 ahb_clk_src = {
+ .cmd_rcgr = 0x5000,
+ .hid_width = 5,
+ .parent_map = mmss_xo_mmpll0_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "ahb_clk_src",
+ .parent_names = mmss_xo_mmpll0_gpll0_gpll0_div,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_axi_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(75000000, P_GPLL0_DIV, 4, 0, 0),
+ F(100000000, P_GPLL0, 6, 0, 0),
+ F(171430000, P_GPLL0, 3.5, 0, 0),
+ F(200000000, P_GPLL0, 3, 0, 0),
+ F(320000000, P_MMPLL0, 2.5, 0, 0),
+ F(400000000, P_MMPLL0, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 axi_clk_src = {
+ .cmd_rcgr = 0x5040,
+ .hid_width = 5,
+ .parent_map = mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_axi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "axi_clk_src",
+ .parent_names = mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 maxi_clk_src = {
+ .cmd_rcgr = 0x5090,
+ .hid_width = 5,
+ .parent_map = mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_axi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "maxi_clk_src",
+ .parent_names = mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gfx3d_clk_src = {
+ .cmd_rcgr = 0x4000,
+ .hid_width = 5,
+ .parent_map = mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gfx3d_clk_src",
+ .parent_names = mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0,
+ .num_parents = 6,
+ .ops = &clk_gfx3d_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static const struct freq_tbl ftbl_rbbmtimer_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 rbbmtimer_clk_src = {
+ .cmd_rcgr = 0x4090,
+ .hid_width = 5,
+ .parent_map = mmss_xo_mmpll0_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_rbbmtimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "rbbmtimer_clk_src",
+ .parent_names = mmss_xo_mmpll0_gpll0_gpll0_div,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 isense_clk_src = {
+ .cmd_rcgr = 0x4010,
+ .hid_width = 5,
+ .parent_map = mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0_gpll0_div_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "isense_clk_src",
+ .parent_names = mmss_xo_mmpll0_mmpll9_mmpll2_mmpll8_gpll0_gpll0_div,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_rbcpr_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(50000000, P_GPLL0, 12, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 rbcpr_clk_src = {
+ .cmd_rcgr = 0x4060,
+ .hid_width = 5,
+ .parent_map = mmss_xo_mmpll0_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_rbcpr_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "rbcpr_clk_src",
+ .parent_names = mmss_xo_mmpll0_gpll0_gpll0_div,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_video_core_clk_src[] = {
+ F(75000000, P_GPLL0_DIV, 4, 0, 0),
+ F(150000000, P_GPLL0, 4, 0, 0),
+ F(346666667, P_MMPLL3, 3, 0, 0),
+ F(520000000, P_MMPLL3, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 video_core_clk_src = {
+ .cmd_rcgr = 0x1000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = mmss_xo_mmpll0_mmpll3_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_video_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "video_core_clk_src",
+ .parent_names = mmss_xo_mmpll0_mmpll3_gpll0_gpll0_div,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 video_subcore0_clk_src = {
+ .cmd_rcgr = 0x1060,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = mmss_xo_mmpll0_mmpll3_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_video_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "video_subcore0_clk_src",
+ .parent_names = mmss_xo_mmpll0_mmpll3_gpll0_gpll0_div,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 video_subcore1_clk_src = {
+ .cmd_rcgr = 0x1080,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = mmss_xo_mmpll0_mmpll3_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_video_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "video_subcore1_clk_src",
+ .parent_names = mmss_xo_mmpll0_mmpll3_gpll0_gpll0_div,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 pclk0_clk_src = {
+ .cmd_rcgr = 0x2000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = mmss_xo_dsi0pll_dsi1pll_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pclk0_clk_src",
+ .parent_names = mmss_xo_dsi0pll_dsi1pll,
+ .num_parents = 3,
+ .ops = &clk_pixel_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_rcg2 pclk1_clk_src = {
+ .cmd_rcgr = 0x2020,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = mmss_xo_dsi0pll_dsi1pll_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pclk1_clk_src",
+ .parent_names = mmss_xo_dsi0pll_dsi1pll,
+ .num_parents = 3,
+ .ops = &clk_pixel_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static const struct freq_tbl ftbl_mdp_clk_src[] = {
+ F(85714286, P_GPLL0, 7, 0, 0),
+ F(100000000, P_GPLL0, 6, 0, 0),
+ F(150000000, P_GPLL0, 4, 0, 0),
+ F(171428571, P_GPLL0, 3.5, 0, 0),
+ F(200000000, P_GPLL0, 3, 0, 0),
+ F(275000000, P_MMPLL5, 3, 0, 0),
+ F(300000000, P_GPLL0, 2, 0, 0),
+ F(330000000, P_MMPLL5, 2.5, 0, 0),
+ F(412500000, P_MMPLL5, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 mdp_clk_src = {
+ .cmd_rcgr = 0x2040,
+ .hid_width = 5,
+ .parent_map = mmss_xo_mmpll0_mmpll5_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_mdp_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mdp_clk_src",
+ .parent_names = mmss_xo_mmpll0_mmpll5_gpll0_gpll0_div,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl extpclk_freq_tbl[] = {
+ { .src = P_HDMIPLL },
+ { }
+};
+
+static struct clk_rcg2 extpclk_clk_src = {
+ .cmd_rcgr = 0x2060,
+ .hid_width = 5,
+ .parent_map = mmss_xo_hdmi_map,
+ .freq_tbl = extpclk_freq_tbl,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "extpclk_clk_src",
+ .parent_names = mmss_xo_hdmi,
+ .num_parents = 2,
+ .ops = &clk_byte_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct freq_tbl ftbl_mdss_vsync_clk[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 vsync_clk_src = {
+ .cmd_rcgr = 0x2080,
+ .hid_width = 5,
+ .parent_map = mmss_xo_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_mdss_vsync_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "vsync_clk_src",
+ .parent_names = mmss_xo_gpll0_gpll0_div,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct freq_tbl ftbl_mdss_hdmi_clk[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 hdmi_clk_src = {
+ .cmd_rcgr = 0x2100,
+ .hid_width = 5,
+ .parent_map = mmss_xo_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_mdss_hdmi_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "hdmi_clk_src",
+ .parent_names = mmss_xo_gpll0_gpll0_div,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 byte0_clk_src = {
+ .cmd_rcgr = 0x2120,
+ .hid_width = 5,
+ .parent_map = mmss_xo_dsibyte_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "byte0_clk_src",
+ .parent_names = mmss_xo_dsibyte,
+ .num_parents = 3,
+ .ops = &clk_byte2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_rcg2 byte1_clk_src = {
+ .cmd_rcgr = 0x2140,
+ .hid_width = 5,
+ .parent_map = mmss_xo_dsibyte_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "byte1_clk_src",
+ .parent_names = mmss_xo_dsibyte,
+ .num_parents = 3,
+ .ops = &clk_byte2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct freq_tbl ftbl_mdss_esc0_1_clk[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 esc0_clk_src = {
+ .cmd_rcgr = 0x2160,
+ .hid_width = 5,
+ .parent_map = mmss_xo_dsibyte_map,
+ .freq_tbl = ftbl_mdss_esc0_1_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "esc0_clk_src",
+ .parent_names = mmss_xo_dsibyte,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 esc1_clk_src = {
+ .cmd_rcgr = 0x2180,
+ .hid_width = 5,
+ .parent_map = mmss_xo_dsibyte_map,
+ .freq_tbl = ftbl_mdss_esc0_1_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "esc1_clk_src",
+ .parent_names = mmss_xo_dsibyte,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_camss_gp0_clk_src[] = {
+ F(10000, P_XO, 16, 1, 120),
+ F(24000, P_XO, 16, 1, 50),
+ F(6000000, P_GPLL0_DIV, 10, 1, 5),
+ F(12000000, P_GPLL0_DIV, 1, 1, 25),
+ F(13000000, P_GPLL0_DIV, 2, 13, 150),
+ F(24000000, P_GPLL0_DIV, 1, 2, 25),
+ { }
+};
+
+static struct clk_rcg2 camss_gp0_clk_src = {
+ .cmd_rcgr = 0x3420,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_camss_gp0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camss_gp0_clk_src",
+ .parent_names = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 camss_gp1_clk_src = {
+ .cmd_rcgr = 0x3450,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_camss_gp0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camss_gp1_clk_src",
+ .parent_names = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_mclk0_clk_src[] = {
+ F(4800000, P_XO, 4, 0, 0),
+ F(6000000, P_GPLL0_DIV, 10, 1, 5),
+ F(8000000, P_GPLL0_DIV, 1, 2, 75),
+ F(9600000, P_XO, 2, 0, 0),
+ F(16666667, P_GPLL0_DIV, 2, 1, 9),
+ F(19200000, P_XO, 1, 0, 0),
+ F(24000000, P_GPLL0_DIV, 1, 2, 25),
+ F(33333333, P_GPLL0_DIV, 1, 1, 9),
+ F(48000000, P_GPLL0, 1, 2, 25),
+ F(66666667, P_GPLL0, 1, 1, 9),
+ { }
+};
+
+static struct clk_rcg2 mclk0_clk_src = {
+ .cmd_rcgr = 0x3360,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mclk0_clk_src",
+ .parent_names = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 mclk1_clk_src = {
+ .cmd_rcgr = 0x3390,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mclk1_clk_src",
+ .parent_names = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 mclk2_clk_src = {
+ .cmd_rcgr = 0x33c0,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mclk2_clk_src",
+ .parent_names = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 mclk3_clk_src = {
+ .cmd_rcgr = 0x33f0,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mclk3_clk_src",
+ .parent_names = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cci_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(37500000, P_GPLL0, 16, 0, 0),
+ F(50000000, P_GPLL0, 12, 0, 0),
+ F(100000000, P_GPLL0, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cci_clk_src = {
+ .cmd_rcgr = 0x3300,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_cci_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cci_clk_src",
+ .parent_names = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_csi0phytimer_clk_src[] = {
+ F(100000000, P_GPLL0_DIV, 3, 0, 0),
+ F(200000000, P_GPLL0, 3, 0, 0),
+ F(266666667, P_MMPLL0, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 csi0phytimer_clk_src = {
+ .cmd_rcgr = 0x3000,
+ .hid_width = 5,
+ .parent_map = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_csi0phytimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi0phytimer_clk_src",
+ .parent_names = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 csi1phytimer_clk_src = {
+ .cmd_rcgr = 0x3030,
+ .hid_width = 5,
+ .parent_map = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_csi0phytimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi1phytimer_clk_src",
+ .parent_names = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 csi2phytimer_clk_src = {
+ .cmd_rcgr = 0x3060,
+ .hid_width = 5,
+ .parent_map = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_csi0phytimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi2phytimer_clk_src",
+ .parent_names = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_csiphy0_3p_clk_src[] = {
+ F(100000000, P_GPLL0_DIV, 3, 0, 0),
+ F(200000000, P_GPLL0, 3, 0, 0),
+ F(320000000, P_MMPLL4, 3, 0, 0),
+ F(384000000, P_MMPLL4, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 csiphy0_3p_clk_src = {
+ .cmd_rcgr = 0x3240,
+ .hid_width = 5,
+ .parent_map = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_csiphy0_3p_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csiphy0_3p_clk_src",
+ .parent_names = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 csiphy1_3p_clk_src = {
+ .cmd_rcgr = 0x3260,
+ .hid_width = 5,
+ .parent_map = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_csiphy0_3p_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csiphy1_3p_clk_src",
+ .parent_names = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 csiphy2_3p_clk_src = {
+ .cmd_rcgr = 0x3280,
+ .hid_width = 5,
+ .parent_map = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_csiphy0_3p_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csiphy2_3p_clk_src",
+ .parent_names = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_jpeg0_clk_src[] = {
+ F(75000000, P_GPLL0_DIV, 4, 0, 0),
+ F(150000000, P_GPLL0, 4, 0, 0),
+ F(228571429, P_MMPLL0, 3.5, 0, 0),
+ F(266666667, P_MMPLL0, 3, 0, 0),
+ F(320000000, P_MMPLL0, 2.5, 0, 0),
+ F(480000000, P_MMPLL4, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 jpeg0_clk_src = {
+ .cmd_rcgr = 0x3500,
+ .hid_width = 5,
+ .parent_map = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_jpeg0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "jpeg0_clk_src",
+ .parent_names = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_jpeg2_clk_src[] = {
+ F(75000000, P_GPLL0_DIV, 4, 0, 0),
+ F(150000000, P_GPLL0, 4, 0, 0),
+ F(228571429, P_MMPLL0, 3.5, 0, 0),
+ F(266666667, P_MMPLL0, 3, 0, 0),
+ F(320000000, P_MMPLL0, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 jpeg2_clk_src = {
+ .cmd_rcgr = 0x3540,
+ .hid_width = 5,
+ .parent_map = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_jpeg2_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "jpeg2_clk_src",
+ .parent_names = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 jpeg_dma_clk_src = {
+ .cmd_rcgr = 0x3560,
+ .hid_width = 5,
+ .parent_map = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_jpeg0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "jpeg_dma_clk_src",
+ .parent_names = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_vfe0_clk_src[] = {
+ F(75000000, P_GPLL0_DIV, 4, 0, 0),
+ F(100000000, P_GPLL0_DIV, 3, 0, 0),
+ F(300000000, P_GPLL0, 2, 0, 0),
+ F(320000000, P_MMPLL0, 2.5, 0, 0),
+ F(480000000, P_MMPLL4, 2, 0, 0),
+ F(600000000, P_GPLL0, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 vfe0_clk_src = {
+ .cmd_rcgr = 0x3600,
+ .hid_width = 5,
+ .parent_map = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_vfe0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "vfe0_clk_src",
+ .parent_names = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 vfe1_clk_src = {
+ .cmd_rcgr = 0x3620,
+ .hid_width = 5,
+ .parent_map = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_vfe0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "vfe1_clk_src",
+ .parent_names = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cpp_clk_src[] = {
+ F(100000000, P_GPLL0_DIV, 3, 0, 0),
+ F(200000000, P_GPLL0, 3, 0, 0),
+ F(320000000, P_MMPLL0, 2.5, 0, 0),
+ F(480000000, P_MMPLL4, 2, 0, 0),
+ F(640000000, P_MMPLL4, 1.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cpp_clk_src = {
+ .cmd_rcgr = 0x3640,
+ .hid_width = 5,
+ .parent_map = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_cpp_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cpp_clk_src",
+ .parent_names = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_csi0_clk_src[] = {
+ F(100000000, P_GPLL0_DIV, 3, 0, 0),
+ F(200000000, P_GPLL0, 3, 0, 0),
+ F(266666667, P_MMPLL0, 3, 0, 0),
+ F(480000000, P_MMPLL4, 2, 0, 0),
+ F(600000000, P_GPLL0, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 csi0_clk_src = {
+ .cmd_rcgr = 0x3090,
+ .hid_width = 5,
+ .parent_map = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_csi0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi0_clk_src",
+ .parent_names = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 csi1_clk_src = {
+ .cmd_rcgr = 0x3100,
+ .hid_width = 5,
+ .parent_map = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_csi0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi1_clk_src",
+ .parent_names = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 csi2_clk_src = {
+ .cmd_rcgr = 0x3160,
+ .hid_width = 5,
+ .parent_map = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_csi0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi2_clk_src",
+ .parent_names = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 csi3_clk_src = {
+ .cmd_rcgr = 0x31c0,
+ .hid_width = 5,
+ .parent_map = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_csi0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi3_clk_src",
+ .parent_names = mmss_xo_mmpll0_mmpll1_mmpll4_mmpll3_gpll0_gpll0_div,
+ .num_parents = 7,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_fd_core_clk_src[] = {
+ F(100000000, P_GPLL0_DIV, 3, 0, 0),
+ F(200000000, P_GPLL0, 3, 0, 0),
+ F(400000000, P_MMPLL0, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 fd_core_clk_src = {
+ .cmd_rcgr = 0x3b00,
+ .hid_width = 5,
+ .parent_map = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div_map,
+ .freq_tbl = ftbl_fd_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "fd_core_clk_src",
+ .parent_names = mmss_xo_mmpll0_mmpll4_gpll0_gpll0_div,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch mmss_mmagic_ahb_clk = {
+ .halt_reg = 0x5024,
+ .clkr = {
+ .enable_reg = 0x5024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mmss_mmagic_ahb_clk",
+ .parent_names = (const char *[]){ "ahb_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mmss_mmagic_cfg_ahb_clk = {
+ .halt_reg = 0x5054,
+ .clkr = {
+ .enable_reg = 0x5054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mmss_mmagic_cfg_ahb_clk",
+ .parent_names = (const char *[]){ "ahb_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mmss_misc_ahb_clk = {
+ .halt_reg = 0x5018,
+ .clkr = {
+ .enable_reg = 0x5018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mmss_misc_ahb_clk",
+ .parent_names = (const char *[]){ "ahb_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mmss_misc_cxo_clk = {
+ .halt_reg = 0x5014,
+ .clkr = {
+ .enable_reg = 0x5014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mmss_misc_cxo_clk",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mmss_mmagic_axi_clk = {
+ .halt_reg = 0x506c,
+ .clkr = {
+ .enable_reg = 0x506c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mmss_mmagic_axi_clk",
+ .parent_names = (const char *[]){ "axi_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mmss_mmagic_maxi_clk = {
+ .halt_reg = 0x5074,
+ .clkr = {
+ .enable_reg = 0x5074,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mmss_mmagic_maxi_clk",
+ .parent_names = (const char *[]){ "maxi_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mmagic_camss_axi_clk = {
+ .halt_reg = 0x3c44,
+ .clkr = {
+ .enable_reg = 0x3c44,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mmagic_camss_axi_clk",
+ .parent_names = (const char *[]){ "axi_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mmagic_camss_noc_cfg_ahb_clk = {
+ .halt_reg = 0x3c48,
+ .clkr = {
+ .enable_reg = 0x3c48,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mmagic_camss_noc_cfg_ahb_clk",
+ .parent_names = (const char *[]){ "gcc_mmss_noc_cfg_ahb_clk" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch smmu_vfe_ahb_clk = {
+ .halt_reg = 0x3c04,
+ .clkr = {
+ .enable_reg = 0x3c04,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "smmu_vfe_ahb_clk",
+ .parent_names = (const char *[]){ "ahb_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch smmu_vfe_axi_clk = {
+ .halt_reg = 0x3c08,
+ .clkr = {
+ .enable_reg = 0x3c08,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "smmu_vfe_axi_clk",
+ .parent_names = (const char *[]){ "axi_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch smmu_cpp_ahb_clk = {
+ .halt_reg = 0x3c14,
+ .clkr = {
+ .enable_reg = 0x3c14,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "smmu_cpp_ahb_clk",
+ .parent_names = (const char *[]){ "ahb_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch smmu_cpp_axi_clk = {
+ .halt_reg = 0x3c18,
+ .clkr = {
+ .enable_reg = 0x3c18,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "smmu_cpp_axi_clk",
+ .parent_names = (const char *[]){ "axi_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch smmu_jpeg_ahb_clk = {
+ .halt_reg = 0x3c24,
+ .clkr = {
+ .enable_reg = 0x3c24,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "smmu_jpeg_ahb_clk",
+ .parent_names = (const char *[]){ "ahb_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch smmu_jpeg_axi_clk = {
+ .halt_reg = 0x3c28,
+ .clkr = {
+ .enable_reg = 0x3c28,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "smmu_jpeg_axi_clk",
+ .parent_names = (const char *[]){ "axi_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mmagic_mdss_axi_clk = {
+ .halt_reg = 0x2474,
+ .clkr = {
+ .enable_reg = 0x2474,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mmagic_mdss_axi_clk",
+ .parent_names = (const char *[]){ "axi_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mmagic_mdss_noc_cfg_ahb_clk = {
+ .halt_reg = 0x2478,
+ .clkr = {
+ .enable_reg = 0x2478,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mmagic_mdss_noc_cfg_ahb_clk",
+ .parent_names = (const char *[]){ "gcc_mmss_noc_cfg_ahb_clk" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch smmu_rot_ahb_clk = {
+ .halt_reg = 0x2444,
+ .clkr = {
+ .enable_reg = 0x2444,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "smmu_rot_ahb_clk",
+ .parent_names = (const char *[]){ "ahb_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch smmu_rot_axi_clk = {
+ .halt_reg = 0x2448,
+ .clkr = {
+ .enable_reg = 0x2448,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "smmu_rot_axi_clk",
+ .parent_names = (const char *[]){ "axi_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch smmu_mdp_ahb_clk = {
+ .halt_reg = 0x2454,
+ .clkr = {
+ .enable_reg = 0x2454,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "smmu_mdp_ahb_clk",
+ .parent_names = (const char *[]){ "ahb_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch smmu_mdp_axi_clk = {
+ .halt_reg = 0x2458,
+ .clkr = {
+ .enable_reg = 0x2458,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "smmu_mdp_axi_clk",
+ .parent_names = (const char *[]){ "axi_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mmagic_video_axi_clk = {
+ .halt_reg = 0x1194,
+ .clkr = {
+ .enable_reg = 0x1194,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mmagic_video_axi_clk",
+ .parent_names = (const char *[]){ "axi_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mmagic_video_noc_cfg_ahb_clk = {
+ .halt_reg = 0x1198,
+ .clkr = {
+ .enable_reg = 0x1198,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mmagic_video_noc_cfg_ahb_clk",
+ .parent_names = (const char *[]){ "gcc_mmss_noc_cfg_ahb_clk" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch smmu_video_ahb_clk = {
+ .halt_reg = 0x1174,
+ .clkr = {
+ .enable_reg = 0x1174,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "smmu_video_ahb_clk",
+ .parent_names = (const char *[]){ "ahb_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch smmu_video_axi_clk = {
+ .halt_reg = 0x1178,
+ .clkr = {
+ .enable_reg = 0x1178,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "smmu_video_axi_clk",
+ .parent_names = (const char *[]){ "axi_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mmagic_bimc_axi_clk = {
+ .halt_reg = 0x5294,
+ .clkr = {
+ .enable_reg = 0x5294,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mmagic_bimc_axi_clk",
+ .parent_names = (const char *[]){ "axi_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mmagic_bimc_noc_cfg_ahb_clk = {
+ .halt_reg = 0x5298,
+ .clkr = {
+ .enable_reg = 0x5298,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mmagic_bimc_noc_cfg_ahb_clk",
+ .parent_names = (const char *[]){ "gcc_mmss_noc_cfg_ahb_clk" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_gx_gfx3d_clk = {
+ .halt_reg = 0x4028,
+ .clkr = {
+ .enable_reg = 0x4028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_gx_gfx3d_clk",
+ .parent_names = (const char *[]){ "gfx3d_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_gx_rbbmtimer_clk = {
+ .halt_reg = 0x40b0,
+ .clkr = {
+ .enable_reg = 0x40b0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_gx_rbbmtimer_clk",
+ .parent_names = (const char *[]){ "rbbmtimer_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_ahb_clk = {
+ .halt_reg = 0x403c,
+ .clkr = {
+ .enable_reg = 0x403c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_ahb_clk",
+ .parent_names = (const char *[]){ "ahb_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_aon_isense_clk = {
+ .halt_reg = 0x4044,
+ .clkr = {
+ .enable_reg = 0x4044,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_aon_isense_clk",
+ .parent_names = (const char *[]){ "isense_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch vmem_maxi_clk = {
+ .halt_reg = 0x1204,
+ .clkr = {
+ .enable_reg = 0x1204,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "vmem_maxi_clk",
+ .parent_names = (const char *[]){ "maxi_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch vmem_ahb_clk = {
+ .halt_reg = 0x1208,
+ .clkr = {
+ .enable_reg = 0x1208,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "vmem_ahb_clk",
+ .parent_names = (const char *[]){ "ahb_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mmss_rbcpr_clk = {
+ .halt_reg = 0x4084,
+ .clkr = {
+ .enable_reg = 0x4084,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mmss_rbcpr_clk",
+ .parent_names = (const char *[]){ "rbcpr_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mmss_rbcpr_ahb_clk = {
+ .halt_reg = 0x4088,
+ .clkr = {
+ .enable_reg = 0x4088,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mmss_rbcpr_ahb_clk",
+ .parent_names = (const char *[]){ "ahb_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_core_clk = {
+ .halt_reg = 0x1028,
+ .clkr = {
+ .enable_reg = 0x1028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "video_core_clk",
+ .parent_names = (const char *[]){ "video_core_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_axi_clk = {
+ .halt_reg = 0x1034,
+ .clkr = {
+ .enable_reg = 0x1034,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "video_axi_clk",
+ .parent_names = (const char *[]){ "axi_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_maxi_clk = {
+ .halt_reg = 0x1038,
+ .clkr = {
+ .enable_reg = 0x1038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "video_maxi_clk",
+ .parent_names = (const char *[]){ "maxi_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_ahb_clk = {
+ .halt_reg = 0x1030,
+ .clkr = {
+ .enable_reg = 0x1030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "video_ahb_clk",
+ .parent_names = (const char *[]){ "ahb_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_subcore0_clk = {
+ .halt_reg = 0x1048,
+ .clkr = {
+ .enable_reg = 0x1048,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "video_subcore0_clk",
+ .parent_names = (const char *[]){ "video_subcore0_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_subcore1_clk = {
+ .halt_reg = 0x104c,
+ .clkr = {
+ .enable_reg = 0x104c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "video_subcore1_clk",
+ .parent_names = (const char *[]){ "video_subcore1_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_ahb_clk = {
+ .halt_reg = 0x2308,
+ .clkr = {
+ .enable_reg = 0x2308,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_ahb_clk",
+ .parent_names = (const char *[]){ "ahb_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_hdmi_ahb_clk = {
+ .halt_reg = 0x230c,
+ .clkr = {
+ .enable_reg = 0x230c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_hdmi_ahb_clk",
+ .parent_names = (const char *[]){ "ahb_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_axi_clk = {
+ .halt_reg = 0x2310,
+ .clkr = {
+ .enable_reg = 0x2310,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_axi_clk",
+ .parent_names = (const char *[]){ "axi_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_pclk0_clk = {
+ .halt_reg = 0x2314,
+ .clkr = {
+ .enable_reg = 0x2314,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_pclk0_clk",
+ .parent_names = (const char *[]){ "pclk0_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_pclk1_clk = {
+ .halt_reg = 0x2318,
+ .clkr = {
+ .enable_reg = 0x2318,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_pclk1_clk",
+ .parent_names = (const char *[]){ "pclk1_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_mdp_clk = {
+ .halt_reg = 0x231c,
+ .clkr = {
+ .enable_reg = 0x231c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_mdp_clk",
+ .parent_names = (const char *[]){ "mdp_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_extpclk_clk = {
+ .halt_reg = 0x2324,
+ .clkr = {
+ .enable_reg = 0x2324,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_extpclk_clk",
+ .parent_names = (const char *[]){ "extpclk_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_vsync_clk = {
+ .halt_reg = 0x2328,
+ .clkr = {
+ .enable_reg = 0x2328,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_vsync_clk",
+ .parent_names = (const char *[]){ "vsync_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_hdmi_clk = {
+ .halt_reg = 0x2338,
+ .clkr = {
+ .enable_reg = 0x2338,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_hdmi_clk",
+ .parent_names = (const char *[]){ "hdmi_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_byte0_clk = {
+ .halt_reg = 0x233c,
+ .clkr = {
+ .enable_reg = 0x233c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_byte0_clk",
+ .parent_names = (const char *[]){ "byte0_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_byte1_clk = {
+ .halt_reg = 0x2340,
+ .clkr = {
+ .enable_reg = 0x2340,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_byte1_clk",
+ .parent_names = (const char *[]){ "byte1_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_esc0_clk = {
+ .halt_reg = 0x2344,
+ .clkr = {
+ .enable_reg = 0x2344,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_esc0_clk",
+ .parent_names = (const char *[]){ "esc0_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch mdss_esc1_clk = {
+ .halt_reg = 0x2348,
+ .clkr = {
+ .enable_reg = 0x2348,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "mdss_esc1_clk",
+ .parent_names = (const char *[]){ "esc1_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_top_ahb_clk = {
+ .halt_reg = 0x3484,
+ .clkr = {
+ .enable_reg = 0x3484,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_top_ahb_clk",
+ .parent_names = (const char *[]){ "ahb_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_ahb_clk = {
+ .halt_reg = 0x348c,
+ .clkr = {
+ .enable_reg = 0x348c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_ahb_clk",
+ .parent_names = (const char *[]){ "ahb_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_micro_ahb_clk = {
+ .halt_reg = 0x3494,
+ .clkr = {
+ .enable_reg = 0x3494,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_micro_ahb_clk",
+ .parent_names = (const char *[]){ "ahb_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_gp0_clk = {
+ .halt_reg = 0x3444,
+ .clkr = {
+ .enable_reg = 0x3444,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_gp0_clk",
+ .parent_names = (const char *[]){ "camss_gp0_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_gp1_clk = {
+ .halt_reg = 0x3474,
+ .clkr = {
+ .enable_reg = 0x3474,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_gp1_clk",
+ .parent_names = (const char *[]){ "camss_gp1_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_mclk0_clk = {
+ .halt_reg = 0x3384,
+ .clkr = {
+ .enable_reg = 0x3384,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_mclk0_clk",
+ .parent_names = (const char *[]){ "mclk0_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_mclk1_clk = {
+ .halt_reg = 0x33b4,
+ .clkr = {
+ .enable_reg = 0x33b4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_mclk1_clk",
+ .parent_names = (const char *[]){ "mclk1_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_mclk2_clk = {
+ .halt_reg = 0x33e4,
+ .clkr = {
+ .enable_reg = 0x33e4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_mclk2_clk",
+ .parent_names = (const char *[]){ "mclk2_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_mclk3_clk = {
+ .halt_reg = 0x3414,
+ .clkr = {
+ .enable_reg = 0x3414,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_mclk3_clk",
+ .parent_names = (const char *[]){ "mclk3_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_cci_clk = {
+ .halt_reg = 0x3344,
+ .clkr = {
+ .enable_reg = 0x3344,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_cci_clk",
+ .parent_names = (const char *[]){ "cci_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_cci_ahb_clk = {
+ .halt_reg = 0x3348,
+ .clkr = {
+ .enable_reg = 0x3348,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_cci_ahb_clk",
+ .parent_names = (const char *[]){ "ahb_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi0phytimer_clk = {
+ .halt_reg = 0x3024,
+ .clkr = {
+ .enable_reg = 0x3024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi0phytimer_clk",
+ .parent_names = (const char *[]){ "csi0phytimer_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi1phytimer_clk = {
+ .halt_reg = 0x3054,
+ .clkr = {
+ .enable_reg = 0x3054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi1phytimer_clk",
+ .parent_names = (const char *[]){ "csi1phytimer_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi2phytimer_clk = {
+ .halt_reg = 0x3084,
+ .clkr = {
+ .enable_reg = 0x3084,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi2phytimer_clk",
+ .parent_names = (const char *[]){ "csi2phytimer_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csiphy0_3p_clk = {
+ .halt_reg = 0x3234,
+ .clkr = {
+ .enable_reg = 0x3234,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csiphy0_3p_clk",
+ .parent_names = (const char *[]){ "csiphy0_3p_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csiphy1_3p_clk = {
+ .halt_reg = 0x3254,
+ .clkr = {
+ .enable_reg = 0x3254,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csiphy1_3p_clk",
+ .parent_names = (const char *[]){ "csiphy1_3p_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csiphy2_3p_clk = {
+ .halt_reg = 0x3274,
+ .clkr = {
+ .enable_reg = 0x3274,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csiphy2_3p_clk",
+ .parent_names = (const char *[]){ "csiphy2_3p_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_jpeg0_clk = {
+ .halt_reg = 0x35a8,
+ .clkr = {
+ .enable_reg = 0x35a8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_jpeg0_clk",
+ .parent_names = (const char *[]){ "jpeg0_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_jpeg2_clk = {
+ .halt_reg = 0x35b0,
+ .clkr = {
+ .enable_reg = 0x35b0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_jpeg2_clk",
+ .parent_names = (const char *[]){ "jpeg2_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_jpeg_dma_clk = {
+ .halt_reg = 0x35c0,
+ .clkr = {
+ .enable_reg = 0x35c0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_jpeg_dma_clk",
+ .parent_names = (const char *[]){ "jpeg_dma_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_jpeg_ahb_clk = {
+ .halt_reg = 0x35b4,
+ .clkr = {
+ .enable_reg = 0x35b4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_jpeg_ahb_clk",
+ .parent_names = (const char *[]){ "ahb_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_jpeg_axi_clk = {
+ .halt_reg = 0x35b8,
+ .clkr = {
+ .enable_reg = 0x35b8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_jpeg_axi_clk",
+ .parent_names = (const char *[]){ "axi_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_vfe_ahb_clk = {
+ .halt_reg = 0x36b8,
+ .clkr = {
+ .enable_reg = 0x36b8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_vfe_ahb_clk",
+ .parent_names = (const char *[]){ "ahb_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_vfe_axi_clk = {
+ .halt_reg = 0x36bc,
+ .clkr = {
+ .enable_reg = 0x36bc,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_vfe_axi_clk",
+ .parent_names = (const char *[]){ "axi_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_vfe0_clk = {
+ .halt_reg = 0x36a8,
+ .clkr = {
+ .enable_reg = 0x36a8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_vfe0_clk",
+ .parent_names = (const char *[]){ "vfe0_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_vfe0_stream_clk = {
+ .halt_reg = 0x3720,
+ .clkr = {
+ .enable_reg = 0x3720,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_vfe0_stream_clk",
+ .parent_names = (const char *[]){ "vfe0_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_vfe0_ahb_clk = {
+ .halt_reg = 0x3668,
+ .clkr = {
+ .enable_reg = 0x3668,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_vfe0_ahb_clk",
+ .parent_names = (const char *[]){ "ahb_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_vfe1_clk = {
+ .halt_reg = 0x36ac,
+ .clkr = {
+ .enable_reg = 0x36ac,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_vfe1_clk",
+ .parent_names = (const char *[]){ "vfe1_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_vfe1_stream_clk = {
+ .halt_reg = 0x3724,
+ .clkr = {
+ .enable_reg = 0x3724,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_vfe1_stream_clk",
+ .parent_names = (const char *[]){ "vfe1_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_vfe1_ahb_clk = {
+ .halt_reg = 0x3678,
+ .clkr = {
+ .enable_reg = 0x3678,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_vfe1_ahb_clk",
+ .parent_names = (const char *[]){ "ahb_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi_vfe0_clk = {
+ .halt_reg = 0x3704,
+ .clkr = {
+ .enable_reg = 0x3704,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi_vfe0_clk",
+ .parent_names = (const char *[]){ "vfe0_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi_vfe1_clk = {
+ .halt_reg = 0x3714,
+ .clkr = {
+ .enable_reg = 0x3714,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi_vfe1_clk",
+ .parent_names = (const char *[]){ "vfe1_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_cpp_vbif_ahb_clk = {
+ .halt_reg = 0x36c8,
+ .clkr = {
+ .enable_reg = 0x36c8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_cpp_vbif_ahb_clk",
+ .parent_names = (const char *[]){ "ahb_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_cpp_axi_clk = {
+ .halt_reg = 0x36c4,
+ .clkr = {
+ .enable_reg = 0x36c4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_cpp_axi_clk",
+ .parent_names = (const char *[]){ "axi_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_cpp_clk = {
+ .halt_reg = 0x36b0,
+ .clkr = {
+ .enable_reg = 0x36b0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_cpp_clk",
+ .parent_names = (const char *[]){ "cpp_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_cpp_ahb_clk = {
+ .halt_reg = 0x36b4,
+ .clkr = {
+ .enable_reg = 0x36b4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_cpp_ahb_clk",
+ .parent_names = (const char *[]){ "ahb_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi0_clk = {
+ .halt_reg = 0x30b4,
+ .clkr = {
+ .enable_reg = 0x30b4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi0_clk",
+ .parent_names = (const char *[]){ "csi0_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi0_ahb_clk = {
+ .halt_reg = 0x30bc,
+ .clkr = {
+ .enable_reg = 0x30bc,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi0_ahb_clk",
+ .parent_names = (const char *[]){ "ahb_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi0phy_clk = {
+ .halt_reg = 0x30c4,
+ .clkr = {
+ .enable_reg = 0x30c4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi0phy_clk",
+ .parent_names = (const char *[]){ "csi0_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi0rdi_clk = {
+ .halt_reg = 0x30d4,
+ .clkr = {
+ .enable_reg = 0x30d4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi0rdi_clk",
+ .parent_names = (const char *[]){ "csi0_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi0pix_clk = {
+ .halt_reg = 0x30e4,
+ .clkr = {
+ .enable_reg = 0x30e4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi0pix_clk",
+ .parent_names = (const char *[]){ "csi0_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi1_clk = {
+ .halt_reg = 0x3124,
+ .clkr = {
+ .enable_reg = 0x3124,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi1_clk",
+ .parent_names = (const char *[]){ "csi1_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi1_ahb_clk = {
+ .halt_reg = 0x3128,
+ .clkr = {
+ .enable_reg = 0x3128,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi1_ahb_clk",
+ .parent_names = (const char *[]){ "ahb_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi1phy_clk = {
+ .halt_reg = 0x3134,
+ .clkr = {
+ .enable_reg = 0x3134,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi1phy_clk",
+ .parent_names = (const char *[]){ "csi1_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi1rdi_clk = {
+ .halt_reg = 0x3144,
+ .clkr = {
+ .enable_reg = 0x3144,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi1rdi_clk",
+ .parent_names = (const char *[]){ "csi1_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi1pix_clk = {
+ .halt_reg = 0x3154,
+ .clkr = {
+ .enable_reg = 0x3154,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi1pix_clk",
+ .parent_names = (const char *[]){ "csi1_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi2_clk = {
+ .halt_reg = 0x3184,
+ .clkr = {
+ .enable_reg = 0x3184,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi2_clk",
+ .parent_names = (const char *[]){ "csi2_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi2_ahb_clk = {
+ .halt_reg = 0x3188,
+ .clkr = {
+ .enable_reg = 0x3188,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi2_ahb_clk",
+ .parent_names = (const char *[]){ "ahb_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi2phy_clk = {
+ .halt_reg = 0x3194,
+ .clkr = {
+ .enable_reg = 0x3194,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi2phy_clk",
+ .parent_names = (const char *[]){ "csi2_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi2rdi_clk = {
+ .halt_reg = 0x31a4,
+ .clkr = {
+ .enable_reg = 0x31a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi2rdi_clk",
+ .parent_names = (const char *[]){ "csi2_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi2pix_clk = {
+ .halt_reg = 0x31b4,
+ .clkr = {
+ .enable_reg = 0x31b4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi2pix_clk",
+ .parent_names = (const char *[]){ "csi2_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi3_clk = {
+ .halt_reg = 0x31e4,
+ .clkr = {
+ .enable_reg = 0x31e4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi3_clk",
+ .parent_names = (const char *[]){ "csi3_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi3_ahb_clk = {
+ .halt_reg = 0x31e8,
+ .clkr = {
+ .enable_reg = 0x31e8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi3_ahb_clk",
+ .parent_names = (const char *[]){ "ahb_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi3phy_clk = {
+ .halt_reg = 0x31f4,
+ .clkr = {
+ .enable_reg = 0x31f4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi3phy_clk",
+ .parent_names = (const char *[]){ "csi3_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi3rdi_clk = {
+ .halt_reg = 0x3204,
+ .clkr = {
+ .enable_reg = 0x3204,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi3rdi_clk",
+ .parent_names = (const char *[]){ "csi3_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_csi3pix_clk = {
+ .halt_reg = 0x3214,
+ .clkr = {
+ .enable_reg = 0x3214,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_csi3pix_clk",
+ .parent_names = (const char *[]){ "csi3_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch camss_ispif_ahb_clk = {
+ .halt_reg = 0x3224,
+ .clkr = {
+ .enable_reg = 0x3224,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "camss_ispif_ahb_clk",
+ .parent_names = (const char *[]){ "ahb_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch fd_core_clk = {
+ .halt_reg = 0x3b68,
+ .clkr = {
+ .enable_reg = 0x3b68,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "fd_core_clk",
+ .parent_names = (const char *[]){ "fd_core_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch fd_core_uar_clk = {
+ .halt_reg = 0x3b6c,
+ .clkr = {
+ .enable_reg = 0x3b6c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "fd_core_uar_clk",
+ .parent_names = (const char *[]){ "fd_core_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch fd_ahb_clk = {
+ .halt_reg = 0x3ba74,
+ .clkr = {
+ .enable_reg = 0x3ba74,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "fd_ahb_clk",
+ .parent_names = (const char *[]){ "ahb_clk_src" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_hw *mmcc_msm8996_hws[] = {
+ &gpll0_div.hw,
+};
+
+static struct gdsc mmagic_bimc_gdsc = {
+ .gdscr = 0x529c,
+ .pd = {
+ .name = "mmagic_bimc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc mmagic_video_gdsc = {
+ .gdscr = 0x119c,
+ .gds_hw_ctrl = 0x120c,
+ .pd = {
+ .name = "mmagic_video",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc mmagic_mdss_gdsc = {
+ .gdscr = 0x247c,
+ .gds_hw_ctrl = 0x2480,
+ .pd = {
+ .name = "mmagic_mdss",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc mmagic_camss_gdsc = {
+ .gdscr = 0x3c4c,
+ .gds_hw_ctrl = 0x3c50,
+ .pd = {
+ .name = "mmagic_camss",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc venus_gdsc = {
+ .gdscr = 0x1024,
+ .cxcs = (unsigned int []){ 0x1028, 0x1034, 0x1038 },
+ .cxc_count = 3,
+ .pd = {
+ .name = "venus",
+ },
+ .parent = &mmagic_video_gdsc.pd,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc venus_core0_gdsc = {
+ .gdscr = 0x1040,
+ .cxcs = (unsigned int []){ 0x1048 },
+ .cxc_count = 1,
+ .pd = {
+ .name = "venus_core0",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc venus_core1_gdsc = {
+ .gdscr = 0x1044,
+ .cxcs = (unsigned int []){ 0x104c },
+ .cxc_count = 1,
+ .pd = {
+ .name = "venus_core1",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc camss_gdsc = {
+ .gdscr = 0x34a0,
+ .cxcs = (unsigned int []){ 0x36bc, 0x36c4 },
+ .cxc_count = 2,
+ .pd = {
+ .name = "camss",
+ },
+ .parent = &mmagic_camss_gdsc.pd,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc vfe0_gdsc = {
+ .gdscr = 0x3664,
+ .cxcs = (unsigned int []){ 0x36a8 },
+ .cxc_count = 1,
+ .pd = {
+ .name = "vfe0",
+ },
+ .parent = &camss_gdsc.pd,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc vfe1_gdsc = {
+ .gdscr = 0x3674,
+ .cxcs = (unsigned int []){ 0x36ac },
+ .cxc_count = 1,
+ .pd = {
+ .name = "vfe0",
+ },
+ .parent = &camss_gdsc.pd,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc jpeg_gdsc = {
+ .gdscr = 0x35a4,
+ .cxcs = (unsigned int []){ 0x35a8, 0x35b0, 0x35c0, 0x35b8 },
+ .cxc_count = 4,
+ .pd = {
+ .name = "jpeg",
+ },
+ .parent = &camss_gdsc.pd,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc cpp_gdsc = {
+ .gdscr = 0x36d4,
+ .cxcs = (unsigned int []){ 0x36b0 },
+ .cxc_count = 1,
+ .pd = {
+ .name = "cpp",
+ },
+ .parent = &camss_gdsc.pd,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc fd_gdsc = {
+ .gdscr = 0x3b64,
+ .cxcs = (unsigned int []){ 0x3b68, 0x3b6c },
+ .cxc_count = 2,
+ .pd = {
+ .name = "fd",
+ },
+ .parent = &camss_gdsc.pd,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc mdss_gdsc = {
+ .gdscr = 0x2304,
+ .cxcs = (unsigned int []){ 0x2310, 0x231c },
+ .cxc_count = 2,
+ .pd = {
+ .name = "mdss",
+ },
+ .parent = &mmagic_mdss_gdsc.pd,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct clk_regmap *mmcc_msm8996_clocks[] = {
+ [MMPLL0_EARLY] = &mmpll0_early.clkr,
+ [MMPLL0_PLL] = &mmpll0.clkr,
+ [MMPLL1_EARLY] = &mmpll1_early.clkr,
+ [MMPLL1_PLL] = &mmpll1.clkr,
+ [MMPLL2_EARLY] = &mmpll2_early.clkr,
+ [MMPLL2_PLL] = &mmpll2.clkr,
+ [MMPLL3_EARLY] = &mmpll3_early.clkr,
+ [MMPLL3_PLL] = &mmpll3.clkr,
+ [MMPLL4_EARLY] = &mmpll4_early.clkr,
+ [MMPLL4_PLL] = &mmpll4.clkr,
+ [MMPLL5_EARLY] = &mmpll5_early.clkr,
+ [MMPLL5_PLL] = &mmpll5.clkr,
+ [MMPLL8_EARLY] = &mmpll8_early.clkr,
+ [MMPLL8_PLL] = &mmpll8.clkr,
+ [MMPLL9_EARLY] = &mmpll9_early.clkr,
+ [MMPLL9_PLL] = &mmpll9.clkr,
+ [AHB_CLK_SRC] = &ahb_clk_src.clkr,
+ [AXI_CLK_SRC] = &axi_clk_src.clkr,
+ [MAXI_CLK_SRC] = &maxi_clk_src.clkr,
+ [GFX3D_CLK_SRC] = &gfx3d_clk_src.clkr,
+ [RBBMTIMER_CLK_SRC] = &rbbmtimer_clk_src.clkr,
+ [ISENSE_CLK_SRC] = &isense_clk_src.clkr,
+ [RBCPR_CLK_SRC] = &rbcpr_clk_src.clkr,
+ [VIDEO_CORE_CLK_SRC] = &video_core_clk_src.clkr,
+ [VIDEO_SUBCORE0_CLK_SRC] = &video_subcore0_clk_src.clkr,
+ [VIDEO_SUBCORE1_CLK_SRC] = &video_subcore1_clk_src.clkr,
+ [PCLK0_CLK_SRC] = &pclk0_clk_src.clkr,
+ [PCLK1_CLK_SRC] = &pclk1_clk_src.clkr,
+ [MDP_CLK_SRC] = &mdp_clk_src.clkr,
+ [EXTPCLK_CLK_SRC] = &extpclk_clk_src.clkr,
+ [VSYNC_CLK_SRC] = &vsync_clk_src.clkr,
+ [HDMI_CLK_SRC] = &hdmi_clk_src.clkr,
+ [BYTE0_CLK_SRC] = &byte0_clk_src.clkr,
+ [BYTE1_CLK_SRC] = &byte1_clk_src.clkr,
+ [ESC0_CLK_SRC] = &esc0_clk_src.clkr,
+ [ESC1_CLK_SRC] = &esc1_clk_src.clkr,
+ [CAMSS_GP0_CLK_SRC] = &camss_gp0_clk_src.clkr,
+ [CAMSS_GP1_CLK_SRC] = &camss_gp1_clk_src.clkr,
+ [MCLK0_CLK_SRC] = &mclk0_clk_src.clkr,
+ [MCLK1_CLK_SRC] = &mclk1_clk_src.clkr,
+ [MCLK2_CLK_SRC] = &mclk2_clk_src.clkr,
+ [MCLK3_CLK_SRC] = &mclk3_clk_src.clkr,
+ [CCI_CLK_SRC] = &cci_clk_src.clkr,
+ [CSI0PHYTIMER_CLK_SRC] = &csi0phytimer_clk_src.clkr,
+ [CSI1PHYTIMER_CLK_SRC] = &csi1phytimer_clk_src.clkr,
+ [CSI2PHYTIMER_CLK_SRC] = &csi2phytimer_clk_src.clkr,
+ [CSIPHY0_3P_CLK_SRC] = &csiphy0_3p_clk_src.clkr,
+ [CSIPHY1_3P_CLK_SRC] = &csiphy1_3p_clk_src.clkr,
+ [CSIPHY2_3P_CLK_SRC] = &csiphy2_3p_clk_src.clkr,
+ [JPEG0_CLK_SRC] = &jpeg0_clk_src.clkr,
+ [JPEG2_CLK_SRC] = &jpeg2_clk_src.clkr,
+ [JPEG_DMA_CLK_SRC] = &jpeg_dma_clk_src.clkr,
+ [VFE0_CLK_SRC] = &vfe0_clk_src.clkr,
+ [VFE1_CLK_SRC] = &vfe1_clk_src.clkr,
+ [CPP_CLK_SRC] = &cpp_clk_src.clkr,
+ [CSI0_CLK_SRC] = &csi0_clk_src.clkr,
+ [CSI1_CLK_SRC] = &csi1_clk_src.clkr,
+ [CSI2_CLK_SRC] = &csi2_clk_src.clkr,
+ [CSI3_CLK_SRC] = &csi3_clk_src.clkr,
+ [FD_CORE_CLK_SRC] = &fd_core_clk_src.clkr,
+ [MMSS_MMAGIC_AHB_CLK] = &mmss_mmagic_ahb_clk.clkr,
+ [MMSS_MMAGIC_CFG_AHB_CLK] = &mmss_mmagic_cfg_ahb_clk.clkr,
+ [MMSS_MISC_AHB_CLK] = &mmss_misc_ahb_clk.clkr,
+ [MMSS_MISC_CXO_CLK] = &mmss_misc_cxo_clk.clkr,
+ [MMSS_MMAGIC_AXI_CLK] = &mmss_mmagic_axi_clk.clkr,
+ [MMSS_MMAGIC_MAXI_CLK] = &mmss_mmagic_maxi_clk.clkr,
+ [MMAGIC_CAMSS_AXI_CLK] = &mmagic_camss_axi_clk.clkr,
+ [MMAGIC_CAMSS_NOC_CFG_AHB_CLK] = &mmagic_camss_noc_cfg_ahb_clk.clkr,
+ [SMMU_VFE_AHB_CLK] = &smmu_vfe_ahb_clk.clkr,
+ [SMMU_VFE_AXI_CLK] = &smmu_vfe_axi_clk.clkr,
+ [SMMU_CPP_AHB_CLK] = &smmu_cpp_ahb_clk.clkr,
+ [SMMU_CPP_AXI_CLK] = &smmu_cpp_axi_clk.clkr,
+ [SMMU_JPEG_AHB_CLK] = &smmu_jpeg_ahb_clk.clkr,
+ [SMMU_JPEG_AXI_CLK] = &smmu_jpeg_axi_clk.clkr,
+ [MMAGIC_MDSS_AXI_CLK] = &mmagic_mdss_axi_clk.clkr,
+ [MMAGIC_MDSS_NOC_CFG_AHB_CLK] = &mmagic_mdss_noc_cfg_ahb_clk.clkr,
+ [SMMU_ROT_AHB_CLK] = &smmu_rot_ahb_clk.clkr,
+ [SMMU_ROT_AXI_CLK] = &smmu_rot_axi_clk.clkr,
+ [SMMU_MDP_AHB_CLK] = &smmu_mdp_ahb_clk.clkr,
+ [SMMU_MDP_AXI_CLK] = &smmu_mdp_axi_clk.clkr,
+ [MMAGIC_VIDEO_AXI_CLK] = &mmagic_video_axi_clk.clkr,
+ [MMAGIC_VIDEO_NOC_CFG_AHB_CLK] = &mmagic_video_noc_cfg_ahb_clk.clkr,
+ [SMMU_VIDEO_AHB_CLK] = &smmu_video_ahb_clk.clkr,
+ [SMMU_VIDEO_AXI_CLK] = &smmu_video_axi_clk.clkr,
+ [MMAGIC_BIMC_AXI_CLK] = &mmagic_bimc_axi_clk.clkr,
+ [MMAGIC_BIMC_NOC_CFG_AHB_CLK] = &mmagic_bimc_noc_cfg_ahb_clk.clkr,
+ [GPU_GX_GFX3D_CLK] = &gpu_gx_gfx3d_clk.clkr,
+ [GPU_GX_RBBMTIMER_CLK] = &gpu_gx_rbbmtimer_clk.clkr,
+ [GPU_AHB_CLK] = &gpu_ahb_clk.clkr,
+ [GPU_AON_ISENSE_CLK] = &gpu_aon_isense_clk.clkr,
+ [VMEM_MAXI_CLK] = &vmem_maxi_clk.clkr,
+ [VMEM_AHB_CLK] = &vmem_ahb_clk.clkr,
+ [MMSS_RBCPR_CLK] = &mmss_rbcpr_clk.clkr,
+ [MMSS_RBCPR_AHB_CLK] = &mmss_rbcpr_ahb_clk.clkr,
+ [VIDEO_CORE_CLK] = &video_core_clk.clkr,
+ [VIDEO_AXI_CLK] = &video_axi_clk.clkr,
+ [VIDEO_MAXI_CLK] = &video_maxi_clk.clkr,
+ [VIDEO_AHB_CLK] = &video_ahb_clk.clkr,
+ [VIDEO_SUBCORE0_CLK] = &video_subcore0_clk.clkr,
+ [VIDEO_SUBCORE1_CLK] = &video_subcore1_clk.clkr,
+ [MDSS_AHB_CLK] = &mdss_ahb_clk.clkr,
+ [MDSS_HDMI_AHB_CLK] = &mdss_hdmi_ahb_clk.clkr,
+ [MDSS_AXI_CLK] = &mdss_axi_clk.clkr,
+ [MDSS_PCLK0_CLK] = &mdss_pclk0_clk.clkr,
+ [MDSS_PCLK1_CLK] = &mdss_pclk1_clk.clkr,
+ [MDSS_MDP_CLK] = &mdss_mdp_clk.clkr,
+ [MDSS_EXTPCLK_CLK] = &mdss_extpclk_clk.clkr,
+ [MDSS_VSYNC_CLK] = &mdss_vsync_clk.clkr,
+ [MDSS_HDMI_CLK] = &mdss_hdmi_clk.clkr,
+ [MDSS_BYTE0_CLK] = &mdss_byte0_clk.clkr,
+ [MDSS_BYTE1_CLK] = &mdss_byte1_clk.clkr,
+ [MDSS_ESC0_CLK] = &mdss_esc0_clk.clkr,
+ [MDSS_ESC1_CLK] = &mdss_esc1_clk.clkr,
+ [CAMSS_TOP_AHB_CLK] = &camss_top_ahb_clk.clkr,
+ [CAMSS_AHB_CLK] = &camss_ahb_clk.clkr,
+ [CAMSS_MICRO_AHB_CLK] = &camss_micro_ahb_clk.clkr,
+ [CAMSS_GP0_CLK] = &camss_gp0_clk.clkr,
+ [CAMSS_GP1_CLK] = &camss_gp1_clk.clkr,
+ [CAMSS_MCLK0_CLK] = &camss_mclk0_clk.clkr,
+ [CAMSS_MCLK1_CLK] = &camss_mclk1_clk.clkr,
+ [CAMSS_MCLK2_CLK] = &camss_mclk2_clk.clkr,
+ [CAMSS_MCLK3_CLK] = &camss_mclk3_clk.clkr,
+ [CAMSS_CCI_CLK] = &camss_cci_clk.clkr,
+ [CAMSS_CCI_AHB_CLK] = &camss_cci_ahb_clk.clkr,
+ [CAMSS_CSI0PHYTIMER_CLK] = &camss_csi0phytimer_clk.clkr,
+ [CAMSS_CSI1PHYTIMER_CLK] = &camss_csi1phytimer_clk.clkr,
+ [CAMSS_CSI2PHYTIMER_CLK] = &camss_csi2phytimer_clk.clkr,
+ [CAMSS_CSIPHY0_3P_CLK] = &camss_csiphy0_3p_clk.clkr,
+ [CAMSS_CSIPHY1_3P_CLK] = &camss_csiphy1_3p_clk.clkr,
+ [CAMSS_CSIPHY2_3P_CLK] = &camss_csiphy2_3p_clk.clkr,
+ [CAMSS_JPEG0_CLK] = &camss_jpeg0_clk.clkr,
+ [CAMSS_JPEG2_CLK] = &camss_jpeg2_clk.clkr,
+ [CAMSS_JPEG_DMA_CLK] = &camss_jpeg_dma_clk.clkr,
+ [CAMSS_JPEG_AHB_CLK] = &camss_jpeg_ahb_clk.clkr,
+ [CAMSS_JPEG_AXI_CLK] = &camss_jpeg_axi_clk.clkr,
+ [CAMSS_VFE_AHB_CLK] = &camss_vfe_ahb_clk.clkr,
+ [CAMSS_VFE_AXI_CLK] = &camss_vfe_axi_clk.clkr,
+ [CAMSS_VFE0_CLK] = &camss_vfe0_clk.clkr,
+ [CAMSS_VFE0_STREAM_CLK] = &camss_vfe0_stream_clk.clkr,
+ [CAMSS_VFE0_AHB_CLK] = &camss_vfe0_ahb_clk.clkr,
+ [CAMSS_VFE1_CLK] = &camss_vfe1_clk.clkr,
+ [CAMSS_VFE1_STREAM_CLK] = &camss_vfe1_stream_clk.clkr,
+ [CAMSS_VFE1_AHB_CLK] = &camss_vfe1_ahb_clk.clkr,
+ [CAMSS_CSI_VFE0_CLK] = &camss_csi_vfe0_clk.clkr,
+ [CAMSS_CSI_VFE1_CLK] = &camss_csi_vfe1_clk.clkr,
+ [CAMSS_CPP_VBIF_AHB_CLK] = &camss_cpp_vbif_ahb_clk.clkr,
+ [CAMSS_CPP_AXI_CLK] = &camss_cpp_axi_clk.clkr,
+ [CAMSS_CPP_CLK] = &camss_cpp_clk.clkr,
+ [CAMSS_CPP_AHB_CLK] = &camss_cpp_ahb_clk.clkr,
+ [CAMSS_CSI0_CLK] = &camss_csi0_clk.clkr,
+ [CAMSS_CSI0_AHB_CLK] = &camss_csi0_ahb_clk.clkr,
+ [CAMSS_CSI0PHY_CLK] = &camss_csi0phy_clk.clkr,
+ [CAMSS_CSI0RDI_CLK] = &camss_csi0rdi_clk.clkr,
+ [CAMSS_CSI0PIX_CLK] = &camss_csi0pix_clk.clkr,
+ [CAMSS_CSI1_CLK] = &camss_csi1_clk.clkr,
+ [CAMSS_CSI1_AHB_CLK] = &camss_csi1_ahb_clk.clkr,
+ [CAMSS_CSI1PHY_CLK] = &camss_csi1phy_clk.clkr,
+ [CAMSS_CSI1RDI_CLK] = &camss_csi1rdi_clk.clkr,
+ [CAMSS_CSI1PIX_CLK] = &camss_csi1pix_clk.clkr,
+ [CAMSS_CSI2_CLK] = &camss_csi2_clk.clkr,
+ [CAMSS_CSI2_AHB_CLK] = &camss_csi2_ahb_clk.clkr,
+ [CAMSS_CSI2PHY_CLK] = &camss_csi2phy_clk.clkr,
+ [CAMSS_CSI2RDI_CLK] = &camss_csi2rdi_clk.clkr,
+ [CAMSS_CSI2PIX_CLK] = &camss_csi2pix_clk.clkr,
+ [CAMSS_CSI3_CLK] = &camss_csi3_clk.clkr,
+ [CAMSS_CSI3_AHB_CLK] = &camss_csi3_ahb_clk.clkr,
+ [CAMSS_CSI3PHY_CLK] = &camss_csi3phy_clk.clkr,
+ [CAMSS_CSI3RDI_CLK] = &camss_csi3rdi_clk.clkr,
+ [CAMSS_CSI3PIX_CLK] = &camss_csi3pix_clk.clkr,
+ [CAMSS_ISPIF_AHB_CLK] = &camss_ispif_ahb_clk.clkr,
+ [FD_CORE_CLK] = &fd_core_clk.clkr,
+ [FD_CORE_UAR_CLK] = &fd_core_uar_clk.clkr,
+ [FD_AHB_CLK] = &fd_ahb_clk.clkr,
+};
+
+static struct gdsc *mmcc_msm8996_gdscs[] = {
+ [MMAGIC_BIMC_GDSC] = &mmagic_bimc_gdsc,
+ [MMAGIC_VIDEO_GDSC] = &mmagic_video_gdsc,
+ [MMAGIC_MDSS_GDSC] = &mmagic_mdss_gdsc,
+ [MMAGIC_CAMSS_GDSC] = &mmagic_camss_gdsc,
+ [VENUS_GDSC] = &venus_gdsc,
+ [VENUS_CORE0_GDSC] = &venus_core0_gdsc,
+ [VENUS_CORE1_GDSC] = &venus_core1_gdsc,
+ [CAMSS_GDSC] = &camss_gdsc,
+ [VFE0_GDSC] = &vfe0_gdsc,
+ [VFE1_GDSC] = &vfe1_gdsc,
+ [JPEG_GDSC] = &jpeg_gdsc,
+ [CPP_GDSC] = &cpp_gdsc,
+ [FD_GDSC] = &fd_gdsc,
+ [MDSS_GDSC] = &mdss_gdsc,
+};
+
+static const struct qcom_reset_map mmcc_msm8996_resets[] = {
+ [MMAGICAHB_BCR] = { 0x5020 },
+ [MMAGIC_CFG_BCR] = { 0x5050 },
+ [MISC_BCR] = { 0x5010 },
+ [BTO_BCR] = { 0x5030 },
+ [MMAGICAXI_BCR] = { 0x5060 },
+ [MMAGICMAXI_BCR] = { 0x5070 },
+ [DSA_BCR] = { 0x50a0 },
+ [MMAGIC_CAMSS_BCR] = { 0x3c40 },
+ [THROTTLE_CAMSS_BCR] = { 0x3c30 },
+ [SMMU_VFE_BCR] = { 0x3c00 },
+ [SMMU_CPP_BCR] = { 0x3c10 },
+ [SMMU_JPEG_BCR] = { 0x3c20 },
+ [MMAGIC_MDSS_BCR] = { 0x2470 },
+ [THROTTLE_MDSS_BCR] = { 0x2460 },
+ [SMMU_ROT_BCR] = { 0x2440 },
+ [SMMU_MDP_BCR] = { 0x2450 },
+ [MMAGIC_VIDEO_BCR] = { 0x1190 },
+ [THROTTLE_VIDEO_BCR] = { 0x1180 },
+ [SMMU_VIDEO_BCR] = { 0x1170 },
+ [MMAGIC_BIMC_BCR] = { 0x5290 },
+ [GPU_GX_BCR] = { 0x4020 },
+ [GPU_BCR] = { 0x4030 },
+ [GPU_AON_BCR] = { 0x4040 },
+ [VMEM_BCR] = { 0x1200 },
+ [MMSS_RBCPR_BCR] = { 0x4080 },
+ [VIDEO_BCR] = { 0x1020 },
+ [MDSS_BCR] = { 0x2300 },
+ [CAMSS_TOP_BCR] = { 0x3480 },
+ [CAMSS_AHB_BCR] = { 0x3488 },
+ [CAMSS_MICRO_BCR] = { 0x3490 },
+ [CAMSS_CCI_BCR] = { 0x3340 },
+ [CAMSS_PHY0_BCR] = { 0x3020 },
+ [CAMSS_PHY1_BCR] = { 0x3050 },
+ [CAMSS_PHY2_BCR] = { 0x3080 },
+ [CAMSS_CSIPHY0_3P_BCR] = { 0x3230 },
+ [CAMSS_CSIPHY1_3P_BCR] = { 0x3250 },
+ [CAMSS_CSIPHY2_3P_BCR] = { 0x3270 },
+ [CAMSS_JPEG_BCR] = { 0x35a0 },
+ [CAMSS_VFE_BCR] = { 0x36a0 },
+ [CAMSS_VFE0_BCR] = { 0x3660 },
+ [CAMSS_VFE1_BCR] = { 0x3670 },
+ [CAMSS_CSI_VFE0_BCR] = { 0x3700 },
+ [CAMSS_CSI_VFE1_BCR] = { 0x3710 },
+ [CAMSS_CPP_TOP_BCR] = { 0x36c0 },
+ [CAMSS_CPP_BCR] = { 0x36d0 },
+ [CAMSS_CSI0_BCR] = { 0x30b0 },
+ [CAMSS_CSI0RDI_BCR] = { 0x30d0 },
+ [CAMSS_CSI0PIX_BCR] = { 0x30e0 },
+ [CAMSS_CSI1_BCR] = { 0x3120 },
+ [CAMSS_CSI1RDI_BCR] = { 0x3140 },
+ [CAMSS_CSI1PIX_BCR] = { 0x3150 },
+ [CAMSS_CSI2_BCR] = { 0x3180 },
+ [CAMSS_CSI2RDI_BCR] = { 0x31a0 },
+ [CAMSS_CSI2PIX_BCR] = { 0x31b0 },
+ [CAMSS_CSI3_BCR] = { 0x31e0 },
+ [CAMSS_CSI3RDI_BCR] = { 0x3200 },
+ [CAMSS_CSI3PIX_BCR] = { 0x3210 },
+ [CAMSS_ISPIF_BCR] = { 0x3220 },
+ [FD_BCR] = { 0x3b60 },
+ [MMSS_SPDM_RM_BCR] = { 0x300 },
+};
+
+static const struct regmap_config mmcc_msm8996_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xb008,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc mmcc_msm8996_desc = {
+ .config = &mmcc_msm8996_regmap_config,
+ .clks = mmcc_msm8996_clocks,
+ .num_clks = ARRAY_SIZE(mmcc_msm8996_clocks),
+ .resets = mmcc_msm8996_resets,
+ .num_resets = ARRAY_SIZE(mmcc_msm8996_resets),
+ .gdscs = mmcc_msm8996_gdscs,
+ .num_gdscs = ARRAY_SIZE(mmcc_msm8996_gdscs),
+};
+
+static const struct of_device_id mmcc_msm8996_match_table[] = {
+ { .compatible = "qcom,mmcc-msm8996" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mmcc_msm8996_match_table);
+
+static int mmcc_msm8996_probe(struct platform_device *pdev)
+{
+ struct clk *clk;
+ struct device *dev = &pdev->dev;
+ int i;
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, &mmcc_msm8996_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ /* Disable the AHB DCD */
+ regmap_update_bits(regmap, 0x50d8, BIT(31), 0);
+ /* Disable the NoC FSM for mmss_mmagic_cfg_ahb_clk */
+ regmap_update_bits(regmap, 0x5054, BIT(15), 0);
+
+ for (i = 0; i < ARRAY_SIZE(mmcc_msm8996_hws); i++) {
+ clk = devm_clk_register(dev, mmcc_msm8996_hws[i]);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+ }
+
+ return qcom_cc_really_probe(pdev, &mmcc_msm8996_desc, regmap);
+}
+
+static struct platform_driver mmcc_msm8996_driver = {
+ .probe = mmcc_msm8996_probe,
+ .driver = {
+ .name = "mmcc-msm8996",
+ .of_match_table = mmcc_msm8996_match_table,
+ },
+};
+module_platform_driver(mmcc_msm8996_driver);
+
+MODULE_DESCRIPTION("QCOM MMCC MSM8996 Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:mmcc-msm8996");