summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGilad Broner <gbroner@codeaurora.org>2016-02-24 12:01:29 +0200
committerDavid Keitel <dkeitel@codeaurora.org>2016-03-23 21:24:13 -0700
commitce93b221ca894563da0f0500bdb8162c944bd164 (patch)
treef4c94bba49e1c656b68bea2e79921e974809897b
parent03bad71331e87b52d377cf7fcdab547f6c744902 (diff)
crypto: msm: add inline crypto engine (ICE) driver snapshot
This snapshot is taken as of msm-3.18 commit e70ad0c ("Promotion of kernel.lnx.3.18-151201") Signed-off-by: Gilad Broner <gbroner@codeaurora.org>
-rw-r--r--Documentation/crypto/msm/msm_ice_driver.txt235
-rw-r--r--Documentation/devicetree/bindings/crypto/msm/ice.txt32
-rw-r--r--drivers/crypto/Kconfig5
-rw-r--r--drivers/crypto/Makefile1
-rw-r--r--drivers/crypto/msm/Kconfig9
-rw-r--r--drivers/crypto/msm/Makefile2
-rw-r--r--drivers/crypto/msm/ice.c1743
-rw-r--r--drivers/crypto/msm/iceregs.h147
-rw-r--r--include/linux/blk_types.h1
9 files changed, 2174 insertions, 1 deletions
diff --git a/Documentation/crypto/msm/msm_ice_driver.txt b/Documentation/crypto/msm/msm_ice_driver.txt
new file mode 100644
index 000000000000..ddb81766b134
--- /dev/null
+++ b/Documentation/crypto/msm/msm_ice_driver.txt
@@ -0,0 +1,235 @@
+Introduction:
+=============
+Storage encryption has been one of the most required feature from security
+point of view. QTI based storage encryption solution uses general purpose
+crypto engine. While this kind of solution provide a decent amount of
+performance, it falls short as storage speed is improving significantly
+continuously. To overcome performance degradation, newer chips are going to
+have Inline Crypto Engine (ICE) embedded into storage device. ICE is supposed
+to meet the line speed of storage devices.
+
+Hardware Description
+====================
+ICE is a HW block that is embedded into storage device such as UFS/eMMC. By
+default, ICE works in bypass mode i.e. ICE HW does not perform any crypto
+operation on data to be processed by storage device. If required, ICE can be
+configured to perform crypto operation in one direction (i.e. either encryption
+or decryption) or in both direction(both encryption & decryption).
+
+When a switch between the operation modes(plain to crypto or crypto to plain)
+is desired for a particular partition, SW must complete all transactions for
+that particular partition before switching the crypto mode i.e. no crypto, one
+direction crypto or both direction crypto operation. Requests for other
+partitions are not impacted due to crypto mode switch.
+
+ICE HW currently supports AES128/256 bit ECB & XTS mode encryption algorithms.
+
+Keys for crypto operations are loaded from SW. Keys are stored in a lookup
+table(LUT) located inside ICE HW. Maximum of 32 keys can be loaded in ICE key
+LUT. A Key inside the LUT can be referred using a key index.
+
+SW Description
+==============
+ICE HW has catagorized ICE registers in 2 groups: those which can be accessed by
+only secure side i.e. TZ and those which can be accessed by non-secure side such
+as HLOS as well. This requires that ICE driver to be split in two pieces: one
+running from TZ space and another from HLOS space.
+
+ICE driver from TZ would configure keys as requested by HLOS side.
+
+ICE driver on HLOS side is responsible for initialization of ICE HW.
+
+SW Architecture Diagram
+=======================
+Following are all the components involved in the ICE driver for control path:
+
++++++++++++++++++++++++++++++++++++++++++
++ App layer +
++++++++++++++++++++++++++++++++++++++++++
++ System layer +
++ ++++++++ +++++++ +
++ + VOLD + + PFM + +
++ ++++++++ +++++++ +
++ || || +
++ || || +
++ \/ \/ +
++ ++++++++++++++ +
++ + LibQSEECom + +
++ ++++++++++++++ +
++++++++++++++++++++++++++++++++++++++++++
++ Kernel + +++++++++++++++++
++ + + KMS +
++ +++++++ +++++++++++ +++++++++++ + +++++++++++++++++
++ + ICE + + Storage + + QSEECom + + + ICE Driver +
++++++++++++++++++++++++++++++++++++++++++ <===> +++++++++++++++++
+ || ||
+ || ||
+ \/ \/
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
++ Storage Device +
++ ++++++++++++++ +
++ + ICE HW + +
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+Use Cases:
+----------
+a) Device bootup
+ICE HW is detected during bootup time and corresponding probe function is
+called. ICE driver parses its data from device tree node. ICE HW and storage
+HW are tightly coupled. Storage device probing is dependent upon ICE device
+probing. ICE driver configures all the required registers to put the ICE HW
+in bypass mode.
+
+b) Configuring keys
+Currently, there are couple of use cases to configure the keys.
+
+1) Full Disk Encryption(FDE)
+System layer(VOLD) at invocation of apps layer would call libqseecom to create
+the encryption key. Libqseecom calls qseecom driver to communicate with KMS
+module on the secure side i.e. TZ. KMS would call ICE driver on the TZ side to
+create and set the keys in ICE HW. At the end of transaction, VOLD would have
+key index of key LUT where encryption key is present.
+
+2) Per File Encryption (PFE)
+Per File Manager(PFM) calls QSEECom api to create the key. PFM has a peer comp-
+onent(PFT) at kernel layer which gets the corresponding key index from PFM.
+
+Following are all the components involved in the ICE driver for data path:
+
++++++++++++++++++++++++++++++++++++++++++
++ App layer +
++++++++++++++++++++++++++++++++++++++++++
++ VFS +
++---------------------------------------+
++ File System (EXT4) +
++---------------------------------------+
++ Block Layer +
++ --------------------------------------+
++ +++++++ +
++ dm-req-crypt => + PFT + +
++ +++++++ +
++ +
++---------------------------------------+
++ +++++++++++ +++++++ +
++ + Storage + + ICE + +
++++++++++++++++++++++++++++++++++++++++++
++ || +
++ || (Storage Req with +
++ \/ ICE parameters ) +
++++++++++++++++++++++++++++++++++++++++++
++ Storage Device +
++ ++++++++++++++ +
++ + ICE HW + +
++++++++++++++++++++++++++++++++++++++++++
+
+c) Data transaction
+Once the crypto key has been configured, VOLD/PFM creates device mapping for
+data partition. As part of device mapping VOLD passes key index, crypto
+algorithm, mode and key length to dm layer. In case of PFE, keys are provided
+by PFT as and when request is processed by dm-req-crypt. When any application
+needs to read/write data, it would go through DM layer which would add crypto
+information, provided by VOLD/PFT, to Request. For each Request, Storage driver
+would ask ICE driver to configure crypto part of request. ICE driver extracts
+crypto data from Request structure and provide it to storage driver which would
+finally dispatch request to storage device.
+
+d) Error Handling
+Due to issue # 1 mentioned in "Known Issues", ICE driver does not register for
+any interrupt. However, it enables sources of interrupt for ICE HW. After each
+data transaction, Storage driver receives transaction completion event. As part
+of event handling, storage driver calls ICE driver to check if any of ICE
+interrupt status is set. If yes, storage driver returns error to upper layer.
+
+Error handling would be changed in future chips.
+
+Interfaces
+==========
+ICE driver exposes interfaces for storage driver to :
+1. Get the global instance of ICE driver
+2. Get the implemented interfaces of the particular ice instance
+3. Initialize the ICE HW
+4. Reset the ICE HW
+5. Resume/Suspend the ICE HW
+6. Get the Crypto configuration for the data request for storage
+7. Check if current data transaction has generated any interrupt
+
+Driver Parameters
+=================
+This driver is built and statically linked into the kernel; therefore,
+there are no module parameters supported by this driver.
+
+There are no kernel command line parameters supported by this driver.
+
+Power Management
+================
+ICE driver does not do power management on its own as it is part of storage
+hardware. Whenever storage driver receives request for power collapse/suspend
+resume, it would call ICE driver which exposes APIs for Storage HW. ICE HW
+during power collapse or reset, wipes crypto configuration data. When ICE
+driver receives request to resume, it would ask ICE driver on TZ side to
+restore the configuration. ICE driver does not do anything as part of power
+collapse or suspend event.
+
+Interface:
+==========
+ICE driver exposes following APIs for storage driver to use:
+
+int (*init)(struct platform_device *, void *, ice_success_cb, ice_error_cb);
+ -- This function is invoked by storage controller during initialization of
+ storage controller. Storage controller would provide success and error call
+ backs which would be invoked asynchronously once ICE HW init is done.
+
+int (*reset)(struct platform_device *);
+ -- ICE HW reset as part of storage controller reset. When storage controller
+ received reset command, it would call reset on ICE HW. As of now, ICE HW
+ does not need to do anything as part of reset.
+
+int (*resume)(struct platform_device *);
+ -- ICE HW while going to reset, wipes all crypto keys and other data from ICE
+ HW. ICE driver would reconfigure those data as part of resume operation.
+
+int (*suspend)(struct platform_device *);
+ -- This API would be called by storage driver when storage device is going to
+ suspend mode. As of today, ICE driver does not do anything to handle suspend.
+
+int (*config)(struct platform_device *, struct request* , struct ice_data_setting*);
+ -- Storage driver would call this interface to get all crypto data required to
+ perform crypto operation.
+
+int (*status)(struct platform_device *);
+ -- Storage driver would call this interface to check if previous data transfer
+ generated any error.
+
+Config options
+==============
+This driver is enabled by the kernel config option CONFIG_CRYPTO_DEV_MSM_ICE.
+
+Dependencies
+============
+ICE driver depends upon corresponding ICE driver on TZ side to function
+appropriately.
+
+Known Issues
+============
+1. ICE HW emits 0s even if it has generated an interrupt
+This issue has significant impact on how ICE interrupts are handled. Currently,
+ICE driver does not register for any of the ICE interrupts but enables the
+sources of interrupt. Once storage driver asks to check the status of interrupt,
+it reads and clears the clear status and provide read status to storage driver.
+This mechanism though not optimal but prevents filesystem curruption.
+This issue has been fixed in newer chips.
+
+2. ICE HW wipes all crypto data during power collapse
+This issue necessiate that ICE driver on TZ side store the crypto material
+which is not required in the case of general purpose crypto engine.
+This issue has been fixed in newer chips.
+
+Further Improvements
+====================
+Currently, Due to PFE use case, ICE driver is dependent upon dm-req-crypt to
+provide the keys as part of request structure. This couples ICE driver with
+dm-req-crypt based solution. It is under discussion to expose an IOCTL based
+and registeration based interface APIs from ICE driver. ICE driver would use
+these two interfaces to find out if any key exists for current request. If
+yes, choose the right key index received from IOCTL or registeration based
+APIs. If not, dont set any crypto parameter in the request.
diff --git a/Documentation/devicetree/bindings/crypto/msm/ice.txt b/Documentation/devicetree/bindings/crypto/msm/ice.txt
new file mode 100644
index 000000000000..2d0e58059da3
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/msm/ice.txt
@@ -0,0 +1,32 @@
+* Inline Crypto Engine (ICE)
+
+Required properties:
+ - compatible : should be "qcom,ice"
+ - reg : <register mapping>
+
+Optional properties:
+ - interrupt-names : name describing the interrupts for ICE IRQ
+ - interrupts : <interrupt mapping for ICE IRQ>
+ - qcom,enable-ice-clk : should enable clocks for ICE HW
+ - clocks : List of phandle and clock specifier pairs
+ - clock-names : List of clock input name strings sorted in the same
+ order as the clocks property.
+ - qocm,op-freq-hz : max clock speed sorted in the same order as the clocks
+ property.
+ - qcom,instance-type : describe the storage type for which ICE node is defined
+ currently, only "ufs" and "sdcc" are supported storage type
+
+Example:
+ ufs_ice: ufsice@630000 {
+ compatible = "qcom,ice";
+ reg = <0x630000 0x8000>;
+ interrupt-names = "ufs_ice_nonsec_level_irq", "ufs_ice_sec_level_irq";
+ interrupts = <0 258 0>, <0 257 0>;
+ qcom,enable-ice-clk;
+ clock-names = "ice_core_clk_src", "ice_core_clk";
+ clocks = <&clock_gcc clk_ufs_ice_core_clk_src>,
+ <&clock_gcc clk_gcc_ufs_ice_core_clk>;
+ qcom,op-freq-hz = <300000000>, <0>;
+ qcom,instance-type = "ufs";
+ status = "disabled";
+ };
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index da13b6339d18..32b18ba328eb 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -556,4 +556,9 @@ config CRYPTO_DEV_SUN4I_SS
To compile this driver as a module, choose M here: the module
will be called sun4i-ss.
+if ARCH_QCOM
+source "drivers/crypto/msm/Kconfig"
+endif # ARCH_QCOM
+
+
endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 1098d5e643ae..a4c74902655c 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -30,3 +30,4 @@ obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/
obj-$(CONFIG_CRYPTO_DEV_QCOM_MSM_QCE) += msm/
obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/
obj-$(CONFIG_CRYPTO_DEV_SUN4I_SS) += sunxi-ss/
+obj-$(CONFIG_ARCH_QCOM) += msm/
diff --git a/drivers/crypto/msm/Kconfig b/drivers/crypto/msm/Kconfig
new file mode 100644
index 000000000000..b5a85c2732a7
--- /dev/null
+++ b/drivers/crypto/msm/Kconfig
@@ -0,0 +1,9 @@
+
+config CRYPTO_DEV_QCOM_ICE
+ tristate "Inline Crypto Module"
+ default n
+ help
+ This driver supports Inline Crypto Engine for QTI chipsets, MSM8994
+ and later, to accelerate crypto operations for storage needs.
+ To compile this driver as a module, choose M here: the
+ module will be called ice.
diff --git a/drivers/crypto/msm/Makefile b/drivers/crypto/msm/Makefile
index c4bbc2c2622b..993840ca12eb 100644
--- a/drivers/crypto/msm/Makefile
+++ b/drivers/crypto/msm/Makefile
@@ -9,4 +9,4 @@ obj-$(CONFIG_CRYPTO_DEV_QCOM_MSM_QCE) += compat_qcedev.o
endif
obj-$(CONFIG_CRYPTO_DEV_QCRYPTO) += qcrypto.o
obj-$(CONFIG_CRYPTO_DEV_OTA_CRYPTO) += ota_crypto.o
-
+obj-$(CONFIG_CRYPTO_DEV_QCOM_ICE) += ice.o
diff --git a/drivers/crypto/msm/ice.c b/drivers/crypto/msm/ice.c
new file mode 100644
index 000000000000..abe736cb7120
--- /dev/null
+++ b/drivers/crypto/msm/ice.c
@@ -0,0 +1,1743 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/async.h>
+#include <linux/of.h>
+#include <linux/device-mapper.h>
+#include <linux/blk_types.h>
+#include <linux/blkdev.h>
+#include <linux/clk.h>
+#include <linux/cdev.h>
+#include <linux/regulator/consumer.h>
+#include <linux/msm-bus.h>
+#include <linux/pfk.h>
+#include <crypto/ice.h>
+#include <soc/qcom/scm.h>
+#include "iceregs.h"
+
+#define SCM_IO_READ 0x1
+#define SCM_IO_WRITE 0x2
+#define TZ_SYSCALL_CREATE_SMC_ID(o, s, f) \
+ ((uint32_t)((((o & 0x3f) << 24) | (s & 0xff) << 8) | (f & 0xff)))
+
+#define TZ_OWNER_QSEE_OS 50
+#define TZ_SVC_KEYSTORE 5 /* Keystore management */
+
+#define TZ_OS_KS_RESTORE_KEY_ID \
+ TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_KEYSTORE, 0x06)
+
+#define TZ_SYSCALL_CREATE_PARAM_ID_0 0
+
+#define TZ_OS_KS_RESTORE_KEY_ID_PARAM_ID \
+ TZ_SYSCALL_CREATE_PARAM_ID_0
+
+#define ICE_REV(x, y) (((x) & ICE_CORE_##y##_REV_MASK) >> ICE_CORE_##y##_REV)
+#define QCOM_UFS_ICE_DEV "iceufs"
+#define QCOM_SDCC_ICE_DEV "icesdcc"
+#define QCOM_ICE_TYPE_NAME_LEN 8
+#define QCOM_ICE_MAX_BIST_CHECK_COUNT 100
+
+const struct qcom_ice_variant_ops qcom_ice_ops;
+
+struct ice_clk_info {
+ struct list_head list;
+ struct clk *clk;
+ const char *name;
+ u32 max_freq;
+ u32 min_freq;
+ u32 curr_freq;
+ bool enabled;
+};
+
+struct qcom_ice_bus_vote {
+ uint32_t client_handle;
+ uint32_t curr_vote;
+ int min_bw_vote;
+ int max_bw_vote;
+ int saved_vote;
+ bool is_max_bw_needed;
+ struct device_attribute max_bus_bw;
+};
+
+static LIST_HEAD(ice_devices);
+/*
+ * ICE HW device structure.
+ */
+struct ice_device {
+ struct list_head list;
+ struct device *pdev;
+ struct cdev cdev;
+ dev_t device_no;
+ struct class *driver_class;
+ void __iomem *mmio;
+ struct resource *res;
+ int irq;
+ bool is_irq_enabled;
+ bool is_ice_enabled;
+ bool is_ice_disable_fuse_blown;
+ bool is_clear_irq_pending;
+ ice_error_cb error_cb;
+ void *host_controller_data; /* UFS/EMMC/other? */
+ spinlock_t lock;
+ struct list_head clk_list_head;
+ u32 ice_hw_version;
+ bool is_ice_clk_available;
+ char ice_instance_type[QCOM_ICE_TYPE_NAME_LEN];
+ struct regulator *reg;
+ bool is_regulator_available;
+ struct qcom_ice_bus_vote bus_vote;
+ ktime_t ice_reset_start_time;
+ ktime_t ice_reset_complete_time;
+};
+
+static int qti_ice_setting_config(struct request *req,
+ struct platform_device *pdev,
+ struct ice_crypto_setting *crypto_data,
+ struct ice_data_setting *setting,
+ bool *configured)
+{
+ struct ice_device *ice_dev = NULL;
+
+ *configured = false;
+ ice_dev = platform_get_drvdata(pdev);
+
+ if (!ice_dev) {
+ pr_debug("%s no ICE device\n", __func__);
+
+ /* make the caller finish peacfully */
+ *configured = true;
+ return 0;
+ }
+
+ if (ice_dev->is_ice_disable_fuse_blown) {
+ pr_err("%s ICE disabled fuse is blown\n", __func__);
+ return -ENODEV;
+ }
+
+ if ((short)(crypto_data->key_index) >= 0) {
+
+ *configured = true;
+
+ memcpy(&setting->crypto_data, crypto_data,
+ sizeof(setting->crypto_data));
+
+ if (rq_data_dir(req) == WRITE)
+ setting->encr_bypass = false;
+ else if (rq_data_dir(req) == READ)
+ setting->decr_bypass = false;
+ else {
+ /* Should I say BUG_ON */
+ setting->encr_bypass = true;
+ setting->decr_bypass = true;
+ }
+ }
+
+ return 0;
+}
+
+static int qcom_ice_enable_clocks(struct ice_device *, bool);
+
+static int qcom_ice_set_bus_vote(struct ice_device *ice_dev, int vote)
+{
+ int err = 0;
+
+ if (vote != ice_dev->bus_vote.curr_vote) {
+ err = msm_bus_scale_client_update_request(
+ ice_dev->bus_vote.client_handle, vote);
+ if (err) {
+ dev_err(ice_dev->pdev,
+ "%s:failed:client_handle=0x%x, vote=%d, err=%d\n",
+ __func__, ice_dev->bus_vote.client_handle,
+ vote, err);
+ goto out;
+ }
+ ice_dev->bus_vote.curr_vote = vote;
+ }
+out:
+ return err;
+}
+
+static int qcom_ice_get_bus_vote(struct ice_device *ice_dev,
+ const char *speed_mode)
+{
+ struct device *dev = ice_dev->pdev;
+ struct device_node *np = dev->of_node;
+ int err;
+ const char *key = "qcom,bus-vector-names";
+
+ if (!speed_mode) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (ice_dev->bus_vote.is_max_bw_needed && !!strcmp(speed_mode, "MIN"))
+ err = of_property_match_string(np, key, "MAX");
+ else
+ err = of_property_match_string(np, key, speed_mode);
+out:
+ if (err < 0)
+ dev_err(dev, "%s: Invalid %s mode %d\n",
+ __func__, speed_mode, err);
+ return err;
+}
+
+static int qcom_ice_bus_register(struct ice_device *ice_dev)
+{
+ int err = 0;
+ struct msm_bus_scale_pdata *bus_pdata;
+ struct device *dev = ice_dev->pdev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct device_node *np = dev->of_node;
+
+ bus_pdata = msm_bus_cl_get_pdata(pdev);
+ if (!bus_pdata) {
+ dev_err(dev, "%s: failed to get bus vectors\n", __func__);
+ err = -ENODATA;
+ goto out;
+ }
+
+ err = of_property_count_strings(np, "qcom,bus-vector-names");
+ if (err < 0 || err != bus_pdata->num_usecases) {
+ dev_err(dev, "%s: Error = %d with qcom,bus-vector-names\n",
+ __func__, err);
+ goto out;
+ }
+ err = 0;
+
+ ice_dev->bus_vote.client_handle =
+ msm_bus_scale_register_client(bus_pdata);
+ if (!ice_dev->bus_vote.client_handle) {
+ dev_err(dev, "%s: msm_bus_scale_register_client failed\n",
+ __func__);
+ err = -EFAULT;
+ goto out;
+ }
+
+ /* cache the vote index for minimum and maximum bandwidth */
+ ice_dev->bus_vote.min_bw_vote = qcom_ice_get_bus_vote(ice_dev, "MIN");
+ ice_dev->bus_vote.max_bw_vote = qcom_ice_get_bus_vote(ice_dev, "MAX");
+out:
+ return err;
+}
+
+static int qcom_ice_get_vreg(struct ice_device *ice_dev)
+{
+ int ret = 0;
+
+ if (!ice_dev->is_regulator_available)
+ return 0;
+
+ if (ice_dev->reg)
+ return 0;
+
+ ice_dev->reg = devm_regulator_get(ice_dev->pdev, "vdd-hba");
+ if (IS_ERR(ice_dev->reg)) {
+ ret = PTR_ERR(ice_dev->reg);
+ dev_err(ice_dev->pdev, "%s: %s get failed, err=%d\n",
+ __func__, "vdd-hba-supply", ret);
+ }
+ return ret;
+}
+
+static void qcom_ice_config_proc_ignore(struct ice_device *ice_dev)
+{
+ u32 regval;
+ if (ICE_REV(ice_dev->ice_hw_version, MAJOR) == 2 &&
+ ICE_REV(ice_dev->ice_hw_version, MINOR) == 0 &&
+ ICE_REV(ice_dev->ice_hw_version, STEP) == 0) {
+ regval = qcom_ice_readl(ice_dev,
+ QCOM_ICE_REGS_ADVANCED_CONTROL);
+ regval |= 0x800;
+ qcom_ice_writel(ice_dev, regval,
+ QCOM_ICE_REGS_ADVANCED_CONTROL);
+ /* Ensure register is updated */
+ mb();
+ }
+}
+
+static void qcom_ice_low_power_mode_enable(struct ice_device *ice_dev)
+{
+ u32 regval;
+ regval = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_ADVANCED_CONTROL);
+ /*
+ * Enable low power mode sequence
+ * [0]-0, [1]-0, [2]-0, [3]-E, [4]-0, [5]-0, [6]-0, [7]-0
+ */
+ regval |= 0x7000;
+ qcom_ice_writel(ice_dev, regval, QCOM_ICE_REGS_ADVANCED_CONTROL);
+ /*
+ * Ensure previous instructions was completed before issuing next
+ * ICE initialization/optimization instruction
+ */
+ mb();
+}
+
+static void qcom_ice_enable_test_bus_config(struct ice_device *ice_dev)
+{
+ /*
+ * Configure & enable ICE_TEST_BUS_REG to reflect ICE intr lines
+ * MAIN_TEST_BUS_SELECTOR = 0 (ICE_CONFIG)
+ * TEST_BUS_REG_EN = 1 (ENABLE)
+ */
+ u32 regval;
+
+ if (ICE_REV(ice_dev->ice_hw_version, MAJOR) >= 2)
+ return;
+
+ regval = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_TEST_BUS_CONTROL);
+ regval &= 0x0FFFFFFF;
+ /* TBD: replace 0x2 with define in iceregs.h */
+ regval |= 0x2;
+ qcom_ice_writel(ice_dev, regval, QCOM_ICE_REGS_TEST_BUS_CONTROL);
+
+ /*
+ * Ensure previous instructions was completed before issuing next
+ * ICE initialization/optimization instruction
+ */
+ mb();
+}
+
+static void qcom_ice_optimization_enable(struct ice_device *ice_dev)
+{
+ u32 regval;
+
+ regval = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_ADVANCED_CONTROL);
+ if (ICE_REV(ice_dev->ice_hw_version, MAJOR) >= 2)
+ regval |= 0xD807100;
+ else if (ICE_REV(ice_dev->ice_hw_version, MAJOR) == 1)
+ regval |= 0x3F007100;
+
+ /* ICE Optimizations Enable Sequence */
+ udelay(5);
+ /* [0]-0, [1]-0, [2]-8, [3]-E, [4]-0, [5]-0, [6]-F, [7]-A */
+ qcom_ice_writel(ice_dev, regval, QCOM_ICE_REGS_ADVANCED_CONTROL);
+ /*
+ * Ensure previous instructions was completed before issuing next
+ * ICE initialization/optimization instruction
+ */
+ mb();
+
+ /* ICE HPG requires sleep before writing */
+ udelay(5);
+ if (ICE_REV(ice_dev->ice_hw_version, MAJOR) == 1) {
+ regval = 0;
+ regval = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_ENDIAN_SWAP);
+ regval |= 0xF;
+ qcom_ice_writel(ice_dev, regval, QCOM_ICE_REGS_ENDIAN_SWAP);
+ /*
+ * Ensure previous instructions were completed before issue
+ * next ICE commands
+ */
+ mb();
+ }
+}
+
+static void qcom_ice_enable(struct ice_device *ice_dev)
+{
+ unsigned int reg;
+ int count;
+
+ if ((ICE_REV(ice_dev->ice_hw_version, MAJOR) > 2) ||
+ ((ICE_REV(ice_dev->ice_hw_version, MAJOR) == 2) &&
+ (ICE_REV(ice_dev->ice_hw_version, MINOR) >= 1))) {
+ for (count = 0; count < QCOM_ICE_MAX_BIST_CHECK_COUNT;
+ count++) {
+ reg = qcom_ice_readl(ice_dev,
+ QCOM_ICE_REGS_BIST_STATUS);
+ if ((reg & 0xF0000000) != 0x0)
+ udelay(50);
+ }
+ if ((reg & 0xF0000000) != 0x0) {
+ pr_err("%s: BIST validation failed for ice = %p",
+ __func__, (void *)ice_dev);
+ BUG();
+ }
+ }
+
+ /*
+ * To enable ICE, perform following
+ * 1. Set IGNORE_CONTROLLER_RESET to USE in ICE_RESET register
+ * 2. Disable GLOBAL_BYPASS bit in ICE_CONTROL register
+ */
+ reg = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_RESET);
+
+ if (ICE_REV(ice_dev->ice_hw_version, MAJOR) >= 2)
+ reg &= 0x0;
+ else if (ICE_REV(ice_dev->ice_hw_version, MAJOR) == 1)
+ reg &= ~0x100;
+
+ qcom_ice_writel(ice_dev, reg, QCOM_ICE_REGS_RESET);
+
+ /*
+ * Ensure previous instructions was completed before issuing next
+ * ICE initialization/optimization instruction
+ */
+ mb();
+
+ reg = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_CONTROL);
+
+ if (ICE_REV(ice_dev->ice_hw_version, MAJOR) >= 2)
+ reg &= 0xFFFE;
+ else if (ICE_REV(ice_dev->ice_hw_version, MAJOR) == 1)
+ reg &= ~0x7;
+ qcom_ice_writel(ice_dev, reg, QCOM_ICE_REGS_CONTROL);
+
+ /*
+ * Ensure previous instructions was completed before issuing next
+ * ICE initialization/optimization instruction
+ */
+ mb();
+
+
+ if ((ICE_REV(ice_dev->ice_hw_version, MAJOR) > 2) ||
+ ((ICE_REV(ice_dev->ice_hw_version, MAJOR) == 2) &&
+ (ICE_REV(ice_dev->ice_hw_version, MINOR) >= 1))) {
+ reg = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_BYPASS_STATUS);
+ if ((reg & 0x80000000) != 0x0) {
+ pr_err("%s: Bypass failed for ice = %p",
+ __func__, (void *)ice_dev);
+ BUG();
+ }
+ }
+}
+
+static int qcom_ice_verify_ice(struct ice_device *ice_dev)
+{
+ unsigned int rev;
+ unsigned int maj_rev, min_rev, step_rev;
+
+ rev = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_VERSION);
+ maj_rev = (rev & ICE_CORE_MAJOR_REV_MASK) >> ICE_CORE_MAJOR_REV;
+ min_rev = (rev & ICE_CORE_MINOR_REV_MASK) >> ICE_CORE_MINOR_REV;
+ step_rev = (rev & ICE_CORE_STEP_REV_MASK) >> ICE_CORE_STEP_REV;
+
+ if (maj_rev > ICE_CORE_CURRENT_MAJOR_VERSION) {
+ pr_err("%s: Unknown QC ICE device at 0x%lu, rev %d.%d.%d\n",
+ __func__, (unsigned long)ice_dev->mmio,
+ maj_rev, min_rev, step_rev);
+ return -EIO;
+ }
+ ice_dev->ice_hw_version = rev;
+
+ dev_info(ice_dev->pdev, "QC ICE %d.%d.%d device found @0x%p\n",
+ maj_rev, min_rev, step_rev,
+ ice_dev->mmio);
+
+ return 0;
+}
+
+static void qcom_ice_enable_intr(struct ice_device *ice_dev)
+{
+ unsigned reg = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_NON_SEC_IRQ_MASK);
+
+ reg &= ~QCOM_ICE_NON_SEC_IRQ_MASK;
+ qcom_ice_writel(ice_dev, reg, QCOM_ICE_REGS_NON_SEC_IRQ_MASK);
+ /*
+ * Ensure previous instructions was completed before issuing next
+ * ICE initialization/optimization instruction
+ */
+ mb();
+}
+
+static void qcom_ice_disable_intr(struct ice_device *ice_dev)
+{
+ unsigned reg = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_NON_SEC_IRQ_MASK);
+
+ reg |= QCOM_ICE_NON_SEC_IRQ_MASK;
+ qcom_ice_writel(ice_dev, reg, QCOM_ICE_REGS_NON_SEC_IRQ_MASK);
+ /*
+ * Ensure previous instructions was completed before issuing next
+ * ICE initialization/optimization instruction
+ */
+ mb();
+}
+
+static int qcom_ice_clear_irq(struct ice_device *ice_dev)
+{
+ qcom_ice_writel(ice_dev, QCOM_ICE_NON_SEC_IRQ_MASK,
+ QCOM_ICE_REGS_NON_SEC_IRQ_CLR);
+ /*
+ * Ensure previous instructions was completed before issuing next
+ * ICE initialization/optimization instruction
+ */
+ mb();
+ ice_dev->is_clear_irq_pending = false;
+
+ return 0;
+}
+
+static irqreturn_t qcom_ice_isr(int isr, void *data)
+{
+ irqreturn_t retval = IRQ_NONE;
+ unsigned int intr_status, clear_reg;
+ struct ice_device *ice_dev = data;
+ enum ice_error_code err;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ice_dev->lock, flags);
+ intr_status = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_NON_SEC_IRQ_STTS);
+ if (intr_status) {
+ clear_reg = qcom_ice_readl(ice_dev,
+ QCOM_ICE_REGS_NON_SEC_IRQ_CLR);
+
+ /* Check the source of interrupt */
+ if (intr_status & QCOM_ICE_STREAM1_PREMATURE_LBA_CHANGE) {
+ err = ICE_ERROR_STREAM1_PREMATURE_LBA_CHANGE;
+ clear_reg |= QCOM_ICE_STREAM1_PREMATURE_LBA_CHANGE;
+ } else if (intr_status &
+ QCOM_ICE_STREAM2_PREMATURE_LBA_CHANGE) {
+ err = ICE_ERROR_STREAM2_PREMATURE_LBA_CHANGE;
+ clear_reg |= QCOM_ICE_STREAM2_PREMATURE_LBA_CHANGE;
+ } else if (intr_status & QCOM_ICE_STREAM1_NOT_EXPECTED_LBO) {
+ err = ICE_ERROR_STREAM1_UNEXPECTED_LBA;
+ clear_reg |= QCOM_ICE_STREAM1_NOT_EXPECTED_LBO;
+ } else if (intr_status & QCOM_ICE_STREAM2_NOT_EXPECTED_LBO) {
+ err = ICE_ERROR_STREAM2_UNEXPECTED_LBA;
+ clear_reg |= QCOM_ICE_STREAM2_NOT_EXPECTED_LBO;
+ } else if (intr_status & QCOM_ICE_STREAM1_NOT_EXPECTED_DUN) {
+ err = ICE_ERROR_STREAM1_NOT_EXPECTED_DUN;
+ clear_reg |= QCOM_ICE_STREAM1_NOT_EXPECTED_DUN;
+ } else if (intr_status & QCOM_ICE_STREAM2_NOT_EXPECTED_DUN) {
+ err = ICE_ERROR_STREAM2_NOT_EXPECTED_DUN;
+ clear_reg |= QCOM_ICE_STREAM2_NOT_EXPECTED_DUN;
+ } else if (intr_status & QCOM_ICE_STREAM1_NOT_EXPECTED_DUS) {
+ err = ICE_ERROR_STREAM1_NOT_EXPECTED_DUS;
+ clear_reg |= QCOM_ICE_STREAM1_NOT_EXPECTED_DUS;
+ } else if (intr_status & QCOM_ICE_STREAM2_NOT_EXPECTED_DUS) {
+ err = ICE_ERROR_STREAM2_NOT_EXPECTED_DUS;
+ clear_reg |= QCOM_ICE_STREAM2_NOT_EXPECTED_DUS;
+ } else if (intr_status & QCOM_ICE_STREAM1_NOT_EXPECTED_DBO) {
+ err = ICE_ERROR_STREAM1_NOT_EXPECTED_DBO;
+ clear_reg |= QCOM_ICE_STREAM1_NOT_EXPECTED_DBO;
+ } else if (intr_status & QCOM_ICE_STREAM2_NOT_EXPECTED_DBO) {
+ err = ICE_ERROR_STREAM2_NOT_EXPECTED_DBO;
+ clear_reg |= QCOM_ICE_STREAM2_NOT_EXPECTED_DBO;
+ } else if (intr_status &
+ QCOM_ICE_STREAM1_NOT_EXPECTED_ENC_SEL) {
+ err = ICE_ERROR_STREAM1_NOT_EXPECTED_ENC_SEL;
+ clear_reg |= QCOM_ICE_STREAM1_NOT_EXPECTED_ENC_SEL;
+ } else if (intr_status &
+ QCOM_ICE_STREAM2_NOT_EXPECTED_ENC_SEL) {
+ err = ICE_ERROR_STREAM2_NOT_EXPECTED_ENC_SEL;
+ clear_reg |= QCOM_ICE_STREAM2_NOT_EXPECTED_ENC_SEL;
+ } else if (intr_status &
+ QCOM_ICE_STREAM1_NOT_EXPECTED_CONF_IDX) {
+ err = ICE_ERROR_STREAM1_NOT_EXPECTED_CONF_IDX;
+ clear_reg |= QCOM_ICE_STREAM1_NOT_EXPECTED_CONF_IDX;
+ } else if (intr_status &
+ QCOM_ICE_STREAM2_NOT_EXPECTED_CONF_IDX) {
+ err = ICE_ERROR_STREAM2_NOT_EXPECTED_CONF_IDX;
+ clear_reg |= QCOM_ICE_STREAM2_NOT_EXPECTED_CONF_IDX;
+ } else if (intr_status &
+ QCOM_ICE_STREAM1_NOT_EXPECTED_NEW_TRNS) {
+ err = ICE_ERROR_STREAM1_NOT_EXPECTED_NEW_TRNS;
+ clear_reg |= QCOM_ICE_STREAM1_NOT_EXPECTED_NEW_TRNS;
+ } else if (intr_status &
+ QCOM_ICE_STREAM2_NOT_EXPECTED_NEW_TRNS) {
+ err = ICE_ERROR_STREAM2_NOT_EXPECTED_NEW_TRNS;
+ clear_reg |= QCOM_ICE_STREAM2_NOT_EXPECTED_NEW_TRNS;
+ }
+
+ ice_dev->error_cb(ice_dev->host_controller_data, err);
+
+ /* Interrupt has been handled. Clear the IRQ */
+ qcom_ice_clear_irq(ice_dev);
+ retval = IRQ_HANDLED;
+ }
+ spin_unlock_irqrestore(&ice_dev->lock, flags);
+ return retval;
+}
+
+static void qcom_ice_parse_ice_instance_type(struct platform_device *pdev,
+ struct ice_device *ice_dev)
+{
+ int ret = -1;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ const char *type;
+
+ ret = of_property_read_string_index(np, "qcom,instance-type", 0, &type);
+ if (ret) {
+ pr_err("%s: Could not get ICE instance type\n", __func__);
+ goto out;
+ }
+ strlcpy(ice_dev->ice_instance_type, type, QCOM_ICE_TYPE_NAME_LEN);
+out:
+ return;
+}
+
+static int qcom_ice_parse_clock_info(struct platform_device *pdev,
+ struct ice_device *ice_dev)
+{
+ int ret = -1, cnt, i, len;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ char *name;
+ struct ice_clk_info *clki;
+ u32 *clkfreq = NULL;
+
+ if (!np)
+ goto out;
+
+ cnt = of_property_count_strings(np, "clock-names");
+ if (cnt <= 0) {
+ dev_info(dev, "%s: Unable to find clocks, assuming enabled\n",
+ __func__);
+ ret = cnt;
+ goto out;
+ }
+
+ if (!of_get_property(np, "qcom,op-freq-hz", &len)) {
+ dev_info(dev, "qcom,op-freq-hz property not specified\n");
+ goto out;
+ }
+
+ len = len/sizeof(*clkfreq);
+ if (len != cnt)
+ goto out;
+
+ clkfreq = devm_kzalloc(dev, len * sizeof(*clkfreq), GFP_KERNEL);
+ if (!clkfreq) {
+ dev_err(dev, "%s: no memory\n", "qcom,op-freq-hz");
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = of_property_read_u32_array(np, "qcom,op-freq-hz", clkfreq, len);
+
+ INIT_LIST_HEAD(&ice_dev->clk_list_head);
+
+ for (i = 0; i < cnt; i++) {
+ ret = of_property_read_string_index(np,
+ "clock-names", i, (const char **)&name);
+ if (ret)
+ goto out;
+
+ clki = devm_kzalloc(dev, sizeof(*clki), GFP_KERNEL);
+ if (!clki) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ clki->max_freq = clkfreq[i];
+ clki->name = kstrdup(name, GFP_KERNEL);
+ list_add_tail(&clki->list, &ice_dev->clk_list_head);
+ }
+out:
+ if (clkfreq)
+ devm_kfree(dev, (void *)clkfreq);
+ return ret;
+}
+
+static int qcom_ice_get_device_tree_data(struct platform_device *pdev,
+ struct ice_device *ice_dev)
+{
+ struct device *dev = &pdev->dev;
+ int irq, rc = -1;
+
+ ice_dev->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!ice_dev->res) {
+ pr_err("%s: No memory available for IORESOURCE\n", __func__);
+ return -ENOMEM;
+ }
+
+ ice_dev->mmio = devm_ioremap_resource(dev, ice_dev->res);
+ if (IS_ERR(ice_dev->mmio)) {
+ rc = PTR_ERR(ice_dev->mmio);
+ pr_err("%s: Error = %d mapping ICE io memory\n", __func__, rc);
+ goto out;
+ }
+
+ if (!of_parse_phandle(pdev->dev.of_node, "vdd-hba-supply", 0)) {
+ pr_err("%s: No vdd-hba-supply regulator, assuming not needed\n",
+ __func__);
+ ice_dev->is_regulator_available = false;
+ } else {
+ ice_dev->is_regulator_available = true;
+ }
+ ice_dev->is_ice_clk_available = of_property_read_bool(
+ (&pdev->dev)->of_node,
+ "qcom,enable-ice-clk");
+
+
+ if (ice_dev->is_ice_clk_available) {
+ rc = qcom_ice_parse_clock_info(pdev, ice_dev);
+ if (rc)
+ goto err_dev;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "IRQ resource not available\n");
+ rc = -ENODEV;
+ goto err_dev;
+ }
+ rc = devm_request_irq(dev, irq, qcom_ice_isr, 0,
+ dev_name(dev), ice_dev);
+ if (rc) {
+ goto err_dev;
+ } else {
+ ice_dev->irq = irq;
+ ice_dev->is_irq_enabled = true;
+ }
+ pr_info("ICE IRQ = %d\n", ice_dev->irq);
+ qcom_ice_parse_ice_instance_type(pdev, ice_dev);
+ }
+ return 0;
+err_dev:
+ if (rc && ice_dev->mmio)
+ devm_iounmap(dev, ice_dev->mmio);
+out:
+ return rc;
+}
+
+/*
+ * ICE HW instance can exist in UFS or eMMC based storage HW
+ * Userspace does not know what kind of ICE it is dealing with.
+ * Though userspace can find which storage device it is booting
+ * from but all kind of storage types dont support ICE from
+ * beginning. So ICE device is created for user space to ping
+ * if ICE exist for that kind of storage
+ */
+static const struct file_operations qcom_ice_fops = {
+ .owner = THIS_MODULE,
+};
+
+static int register_ice_device(struct ice_device *ice_dev)
+{
+ int rc = 0;
+ unsigned baseminor = 0;
+ unsigned count = 1;
+ struct device *class_dev;
+ int is_sdcc_ice = !strcmp(ice_dev->ice_instance_type, "sdcc");
+
+ rc = alloc_chrdev_region(&ice_dev->device_no, baseminor, count,
+ is_sdcc_ice ? QCOM_SDCC_ICE_DEV : QCOM_UFS_ICE_DEV);
+ if (rc < 0) {
+ pr_err("alloc_chrdev_region failed %d for %s\n", rc,
+ is_sdcc_ice ? QCOM_SDCC_ICE_DEV : QCOM_UFS_ICE_DEV);
+ return rc;
+ }
+ ice_dev->driver_class = class_create(THIS_MODULE,
+ is_sdcc_ice ? QCOM_SDCC_ICE_DEV : QCOM_UFS_ICE_DEV);
+ if (IS_ERR(ice_dev->driver_class)) {
+ rc = -ENOMEM;
+ pr_err("class_create failed %d for %s\n", rc,
+ is_sdcc_ice ? QCOM_SDCC_ICE_DEV : QCOM_UFS_ICE_DEV);
+ goto exit_unreg_chrdev_region;
+ }
+ class_dev = device_create(ice_dev->driver_class, NULL,
+ ice_dev->device_no, NULL,
+ is_sdcc_ice ? QCOM_SDCC_ICE_DEV : QCOM_UFS_ICE_DEV);
+
+ if (!class_dev) {
+ pr_err("class_device_create failed %d for %s\n", rc,
+ is_sdcc_ice ? QCOM_SDCC_ICE_DEV : QCOM_UFS_ICE_DEV);
+ rc = -ENOMEM;
+ goto exit_destroy_class;
+ }
+
+ cdev_init(&ice_dev->cdev, &qcom_ice_fops);
+ ice_dev->cdev.owner = THIS_MODULE;
+
+ rc = cdev_add(&ice_dev->cdev, MKDEV(MAJOR(ice_dev->device_no), 0), 1);
+ if (rc < 0) {
+ pr_err("cdev_add failed %d for %s\n", rc,
+ is_sdcc_ice ? QCOM_SDCC_ICE_DEV : QCOM_UFS_ICE_DEV);
+ goto exit_destroy_device;
+ }
+ return 0;
+
+exit_destroy_device:
+ device_destroy(ice_dev->driver_class, ice_dev->device_no);
+
+exit_destroy_class:
+ class_destroy(ice_dev->driver_class);
+
+exit_unreg_chrdev_region:
+ unregister_chrdev_region(ice_dev->device_no, 1);
+ return rc;
+}
+
+static int qcom_ice_probe(struct platform_device *pdev)
+{
+ struct ice_device *ice_dev;
+ int rc = 0;
+
+ if (!pdev) {
+ pr_err("%s: Invalid platform_device passed\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ ice_dev = kzalloc(sizeof(struct ice_device), GFP_KERNEL);
+
+ if (!ice_dev) {
+ rc = -ENOMEM;
+ pr_err("%s: Error %d allocating memory for ICE device:\n",
+ __func__, rc);
+ goto out;
+ }
+
+ ice_dev->pdev = &pdev->dev;
+ if (!ice_dev->pdev) {
+ rc = -EINVAL;
+ pr_err("%s: Invalid device passed in platform_device\n",
+ __func__);
+ goto err_ice_dev;
+ }
+
+ if (pdev->dev.of_node)
+ rc = qcom_ice_get_device_tree_data(pdev, ice_dev);
+ else {
+ rc = -EINVAL;
+ pr_err("%s: ICE device node not found\n", __func__);
+ }
+
+ if (rc)
+ goto err_ice_dev;
+
+ pr_debug("%s: Registering ICE device\n", __func__);
+ rc = register_ice_device(ice_dev);
+ if (rc) {
+ pr_err("create character device failed.\n");
+ goto err_ice_dev;
+ }
+ spin_lock_init(&ice_dev->lock);
+ /*
+ * If ICE is enabled here, it would be waste of power.
+ * We would enable ICE when first request for crypto
+ * operation arrives.
+ */
+ ice_dev->is_ice_enabled = false;
+
+ platform_set_drvdata(pdev, ice_dev);
+ list_add_tail(&ice_dev->list, &ice_devices);
+
+ goto out;
+
+err_ice_dev:
+ kfree(ice_dev);
+out:
+ return rc;
+}
+
+static int qcom_ice_remove(struct platform_device *pdev)
+{
+ struct ice_device *ice_dev;
+
+ ice_dev = (struct ice_device *)platform_get_drvdata(pdev);
+
+ if (!ice_dev)
+ return 0;
+
+ qcom_ice_disable_intr(ice_dev);
+
+ device_init_wakeup(&pdev->dev, false);
+ if (ice_dev->mmio)
+ iounmap(ice_dev->mmio);
+
+ list_del_init(&ice_dev->list);
+ kfree(ice_dev);
+
+ return 1;
+}
+
+static int qcom_ice_suspend(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static int qcom_ice_restore_config(void)
+{
+ struct scm_desc desc = {0};
+ int ret;
+
+ /*
+ * TZ would check KEYS_RAM_RESET_COMPLETED status bit before processing
+ * restore config command. This would prevent two calls from HLOS to TZ
+ * One to check KEYS_RAM_RESET_COMPLETED status bit second to restore
+ * config
+ */
+
+ desc.arginfo = TZ_OS_KS_RESTORE_KEY_ID_PARAM_ID;
+
+ ret = scm_call2(TZ_OS_KS_RESTORE_KEY_ID, &desc);
+
+ if (ret)
+ pr_err("%s: Error: 0x%x\n", __func__, ret);
+
+ return ret;
+}
+
+static int qcom_ice_init_clocks(struct ice_device *ice)
+{
+ int ret = -1;
+ struct ice_clk_info *clki;
+ struct device *dev = ice->pdev;
+ struct list_head *head = &ice->clk_list_head;
+
+ if (!head || list_empty(head)) {
+ dev_err(dev, "%s:ICE Clock list null/empty\n", __func__);
+ goto out;
+ }
+
+ list_for_each_entry(clki, head, list) {
+ if (!clki->name)
+ continue;
+
+ clki->clk = devm_clk_get(dev, clki->name);
+ if (IS_ERR(clki->clk)) {
+ ret = PTR_ERR(clki->clk);
+ dev_err(dev, "%s: %s clk get failed, %d\n",
+ __func__, clki->name, ret);
+ goto out;
+ }
+
+ /* Not all clocks would have a rate to be set */
+ ret = 0;
+ if (clki->max_freq) {
+ ret = clk_set_rate(clki->clk, clki->max_freq);
+ if (ret) {
+ dev_err(dev,
+ "%s: %s clk set rate(%dHz) failed, %d\n",
+ __func__, clki->name,
+ clki->max_freq, ret);
+ goto out;
+ }
+ clki->curr_freq = clki->max_freq;
+ dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
+ clki->name, clk_get_rate(clki->clk));
+ }
+ }
+out:
+ return ret;
+}
+
+static int qcom_ice_enable_clocks(struct ice_device *ice, bool enable)
+{
+ int ret = 0;
+ struct ice_clk_info *clki;
+ struct device *dev = ice->pdev;
+ struct list_head *head = &ice->clk_list_head;
+
+ if (!head || list_empty(head)) {
+ dev_err(dev, "%s:ICE Clock list null/empty\n", __func__);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!ice->is_ice_clk_available) {
+ dev_err(dev, "%s:ICE Clock not available\n", __func__);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ list_for_each_entry(clki, head, list) {
+ if (!clki->name)
+ continue;
+
+ if (enable)
+ ret = clk_prepare_enable(clki->clk);
+ else
+ clk_disable_unprepare(clki->clk);
+
+ if (ret) {
+ dev_err(dev, "Unable to %s ICE core clk\n",
+ enable?"enable":"disable");
+ goto out;
+ }
+ }
+out:
+ return ret;
+}
+
+static int qcom_ice_secure_ice_init(struct ice_device *ice_dev)
+{
+ /* We need to enable source for ICE secure interrupts */
+ int ret = 0;
+ u32 regval;
+
+ regval = scm_io_read((unsigned long)ice_dev->res +
+ QCOM_ICE_LUT_KEYS_ICE_SEC_IRQ_MASK);
+
+ regval &= ~QCOM_ICE_SEC_IRQ_MASK;
+ ret = scm_io_write((unsigned long)ice_dev->res +
+ QCOM_ICE_LUT_KEYS_ICE_SEC_IRQ_MASK, regval);
+
+ /*
+ * Ensure previous instructions was completed before issuing next
+ * ICE initialization/optimization instruction
+ */
+ mb();
+
+ if (!ret)
+ pr_err("%s: failed(0x%x) to init secure ICE config\n",
+ __func__, ret);
+ return ret;
+}
+
+static int qcom_ice_update_sec_cfg(struct ice_device *ice_dev)
+{
+ int ret = 0, scm_ret = 0;
+
+ /* scm command buffer structrue */
+ struct qcom_scm_cmd_buf {
+ unsigned int device_id;
+ unsigned int spare;
+ } cbuf = {0};
+
+ /*
+ * Ideally, we should check ICE version to decide whether to proceed or
+ * or not. Since version wont be available when this function is called
+ * we need to depend upon is_ice_clk_available to decide
+ */
+ if (ice_dev->is_ice_clk_available)
+ goto out;
+
+ /*
+ * Store dev_id in ice_device structure so that emmc/ufs cases can be
+ * handled properly
+ */
+ #define RESTORE_SEC_CFG_CMD 0x2
+ #define ICE_TZ_DEV_ID 20
+
+ cbuf.device_id = ICE_TZ_DEV_ID;
+ ret = scm_restore_sec_cfg(cbuf.device_id, cbuf.spare, &scm_ret);
+ if (ret || scm_ret) {
+ pr_err("%s: failed, ret %d scm_ret %d\n",
+ __func__, ret, scm_ret);
+ if (!ret)
+ ret = scm_ret;
+ }
+out:
+
+ return ret;
+}
+
+static int qcom_ice_finish_init(struct ice_device *ice_dev)
+{
+ unsigned reg;
+ int err = 0;
+
+ if (!ice_dev) {
+ pr_err("%s: Null data received\n", __func__);
+ err = -ENODEV;
+ goto out;
+ }
+
+ if (ice_dev->is_ice_clk_available) {
+ if (qcom_ice_init_clocks(ice_dev)) {
+ err = -ICE_ERROR_IMPROPER_INITIALIZATION;
+ goto out;
+ }
+ if (qcom_ice_bus_register(ice_dev)) {
+ err = -ICE_ERROR_IMPROPER_INITIALIZATION;
+ goto out;
+ }
+ }
+
+ /*
+ * It is possible that ICE device is not probed when host is probed
+ * This would cause host probe to be deferred. When probe for host is
+ * defered, it can cause power collapse for host and that can wipe
+ * configurations of host & ice. It is prudent to restore the config
+ */
+ if (qcom_ice_update_sec_cfg(ice_dev)) {
+ err = -ICE_ERROR_ICE_TZ_INIT_FAILED;
+ goto out;
+ }
+
+ if (qcom_ice_verify_ice(ice_dev)) {
+ err = -ICE_ERROR_UNEXPECTED_ICE_DEVICE;
+ goto out;
+ }
+
+ /* if ICE_DISABLE_FUSE is blown, return immediately
+ * Currently, FORCE HW Keys are also disabled, since
+ * there is no use case for their usage neither in FDE
+ * nor in PFE
+ */
+ reg = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_FUSE_SETTING);
+ reg &= (ICE_FUSE_SETTING_MASK |
+ ICE_FORCE_HW_KEY0_SETTING_MASK |
+ ICE_FORCE_HW_KEY1_SETTING_MASK);
+
+ if (reg) {
+ ice_dev->is_ice_disable_fuse_blown = true;
+ pr_err("%s: Error: ICE_ERROR_HW_DISABLE_FUSE_BLOWN\n",
+ __func__);
+ err = -ICE_ERROR_HW_DISABLE_FUSE_BLOWN;
+ goto out;
+ }
+
+ /* TZ side of ICE driver would handle secure init of ICE HW from v2 */
+ if (ICE_REV(ice_dev->ice_hw_version, MAJOR) == 1 &&
+ !qcom_ice_secure_ice_init(ice_dev)) {
+ pr_err("%s: Error: ICE_ERROR_ICE_TZ_INIT_FAILED\n", __func__);
+ err = -ICE_ERROR_ICE_TZ_INIT_FAILED;
+ goto out;
+ }
+
+ qcom_ice_low_power_mode_enable(ice_dev);
+ qcom_ice_optimization_enable(ice_dev);
+ qcom_ice_config_proc_ignore(ice_dev);
+ qcom_ice_enable_test_bus_config(ice_dev);
+ qcom_ice_enable(ice_dev);
+ ice_dev->is_ice_enabled = true;
+ qcom_ice_enable_intr(ice_dev);
+
+out:
+ return err;
+}
+
+static int qcom_ice_init(struct platform_device *pdev,
+ void *host_controller_data,
+ ice_error_cb error_cb)
+{
+ /*
+ * A completion event for host controller would be triggered upon
+ * initialization completion
+ * When ICE is initialized, it would put ICE into Global Bypass mode
+ * When any request for data transfer is received, it would enable
+ * the ICE for that particular request
+ */
+ struct ice_device *ice_dev;
+
+ ice_dev = platform_get_drvdata(pdev);
+ if (!ice_dev) {
+ pr_err("%s: invalid device\n", __func__);
+ return -EINVAL;
+ }
+
+ ice_dev->error_cb = error_cb;
+ ice_dev->host_controller_data = host_controller_data;
+
+ return qcom_ice_finish_init(ice_dev);
+}
+EXPORT_SYMBOL(qcom_ice_init);
+
+static int qcom_ice_finish_power_collapse(struct ice_device *ice_dev)
+{
+ int err = 0;
+
+ if (ice_dev->is_ice_disable_fuse_blown) {
+ err = -ICE_ERROR_HW_DISABLE_FUSE_BLOWN;
+ goto out;
+ }
+
+ if (ice_dev->is_ice_enabled) {
+ /*
+ * ICE resets into global bypass mode with optimization and
+ * low power mode disabled. Hence we need to redo those seq's.
+ */
+ qcom_ice_low_power_mode_enable(ice_dev);
+
+ qcom_ice_enable_test_bus_config(ice_dev);
+
+ qcom_ice_optimization_enable(ice_dev);
+ qcom_ice_enable(ice_dev);
+
+ if (ICE_REV(ice_dev->ice_hw_version, MAJOR) == 1) {
+ /*
+ * When ICE resets, it wipes all of keys from LUTs
+ * ICE driver should call TZ to restore keys
+ */
+ if (qcom_ice_restore_config()) {
+ err = -ICE_ERROR_ICE_KEY_RESTORE_FAILED;
+ goto out;
+ }
+ }
+ /*
+ * INTR Status are not retained. So there is no need to
+ * clear those.
+ */
+ ice_dev->is_clear_irq_pending = false;
+ }
+
+ ice_dev->ice_reset_complete_time = ktime_get();
+out:
+ return err;
+}
+
+static int qcom_ice_resume(struct platform_device *pdev)
+{
+ /*
+ * ICE is power collapsed when storage controller is power collapsed
+ * ICE resume function is responsible for:
+ * ICE HW enabling sequence
+ * Key restoration
+ * A completion event should be triggered
+ * upon resume completion
+ * Storage driver will be fully operational only
+ * after receiving this event
+ */
+ struct ice_device *ice_dev;
+
+ ice_dev = platform_get_drvdata(pdev);
+
+ if (!ice_dev)
+ return -EINVAL;
+
+ if (ice_dev->is_ice_clk_available) {
+ /*
+ * Storage is calling this function after power collapse which
+ * would put ICE into GLOBAL_BYPASS mode. Make sure to enable
+ * ICE
+ */
+ qcom_ice_enable(ice_dev);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(qcom_ice_resume);
+
+static void qcom_ice_dump_test_bus(struct ice_device *ice_dev)
+{
+ u32 reg = 0x1;
+ u32 val;
+ u8 bus_selector;
+ u8 stream_selector;
+
+ pr_err("ICE TEST BUS DUMP:\n");
+
+ for (bus_selector = 0; bus_selector <= 0xF; bus_selector++) {
+ reg = 0x1; /* enable test bus */
+ reg |= bus_selector << 28;
+ if (bus_selector == 0xD)
+ continue;
+ qcom_ice_writel(ice_dev, reg, QCOM_ICE_REGS_TEST_BUS_CONTROL);
+ /*
+ * make sure test bus selector is written before reading
+ * the test bus register
+ */
+ mb();
+ val = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_TEST_BUS_REG);
+ pr_err("ICE_TEST_BUS_CONTROL: 0x%08x | ICE_TEST_BUS_REG: 0x%08x\n",
+ reg, val);
+ }
+
+ pr_err("ICE TEST BUS DUMP (ICE_STREAM1_DATAPATH_TEST_BUS):\n");
+ for (stream_selector = 0; stream_selector <= 0xF; stream_selector++) {
+ reg = 0xD0000001; /* enable stream test bus */
+ reg |= stream_selector << 16;
+ qcom_ice_writel(ice_dev, reg, QCOM_ICE_REGS_TEST_BUS_CONTROL);
+ /*
+ * make sure test bus selector is written before reading
+ * the test bus register
+ */
+ mb();
+ val = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_TEST_BUS_REG);
+ pr_err("ICE_TEST_BUS_CONTROL: 0x%08x | ICE_TEST_BUS_REG: 0x%08x\n",
+ reg, val);
+ }
+}
+
+static void qcom_ice_debug(struct platform_device *pdev)
+{
+ struct ice_device *ice_dev;
+
+ if (!pdev) {
+ pr_err("%s: Invalid params passed\n", __func__);
+ goto out;
+ }
+
+ ice_dev = platform_get_drvdata(pdev);
+
+ if (!ice_dev) {
+ pr_err("%s: No ICE device available\n", __func__);
+ goto out;
+ }
+
+ if (!ice_dev->is_ice_enabled) {
+ pr_err("%s: ICE device is not enabled\n", __func__);
+ goto out;
+ }
+
+ pr_err("%s: =========== REGISTER DUMP (%p)===========\n",
+ ice_dev->ice_instance_type, ice_dev);
+
+ pr_err("%s: ICE Control: 0x%08x | ICE Reset: 0x%08x\n",
+ ice_dev->ice_instance_type,
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_CONTROL),
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_RESET));
+
+ pr_err("%s: ICE Version: 0x%08x | ICE FUSE: 0x%08x\n",
+ ice_dev->ice_instance_type,
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_VERSION),
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_FUSE_SETTING));
+
+ pr_err("%s: ICE Param1: 0x%08x | ICE Param2: 0x%08x\n",
+ ice_dev->ice_instance_type,
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_PARAMETERS_1),
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_PARAMETERS_2));
+
+ pr_err("%s: ICE Param3: 0x%08x | ICE Param4: 0x%08x\n",
+ ice_dev->ice_instance_type,
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_PARAMETERS_3),
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_PARAMETERS_4));
+
+ pr_err("%s: ICE Param5: 0x%08x | ICE IRQ STTS: 0x%08x\n",
+ ice_dev->ice_instance_type,
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_PARAMETERS_5),
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_NON_SEC_IRQ_STTS));
+
+ pr_err("%s: ICE IRQ MASK: 0x%08x | ICE IRQ CLR: 0x%08x\n",
+ ice_dev->ice_instance_type,
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_NON_SEC_IRQ_MASK),
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_NON_SEC_IRQ_CLR));
+
+ if ((ICE_REV(ice_dev->ice_hw_version, MAJOR) > 2) ||
+ ((ICE_REV(ice_dev->ice_hw_version, MAJOR) == 2) &&
+ (ICE_REV(ice_dev->ice_hw_version, MINOR) >= 1))) {
+ pr_err("%s: ICE BIST Sts: 0x%08x | ICE Bypass Sts: 0x%08x\n",
+ ice_dev->ice_instance_type,
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_BIST_STATUS),
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_BYPASS_STATUS));
+ }
+
+ pr_err("%s: ICE ADV CTRL: 0x%08x | ICE ENDIAN SWAP: 0x%08x\n",
+ ice_dev->ice_instance_type,
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_ADVANCED_CONTROL),
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_ENDIAN_SWAP));
+
+ pr_err("%s: ICE_STM1_ERR_SYND1: 0x%08x | ICE_STM1_ERR_SYND2: 0x%08x\n",
+ ice_dev->ice_instance_type,
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_ERROR_SYNDROME1),
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_ERROR_SYNDROME2));
+
+ pr_err("%s: ICE_STM2_ERR_SYND1: 0x%08x | ICE_STM2_ERR_SYND2: 0x%08x\n",
+ ice_dev->ice_instance_type,
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_ERROR_SYNDROME1),
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_ERROR_SYNDROME2));
+
+ pr_err("%s: ICE_STM1_COUNTER1: 0x%08x | ICE_STM1_COUNTER2: 0x%08x\n",
+ ice_dev->ice_instance_type,
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS1),
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS2));
+
+ pr_err("%s: ICE_STM1_COUNTER3: 0x%08x | ICE_STM1_COUNTER4: 0x%08x\n",
+ ice_dev->ice_instance_type,
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS3),
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS4));
+
+ pr_err("%s: ICE_STM2_COUNTER1: 0x%08x | ICE_STM2_COUNTER2: 0x%08x\n",
+ ice_dev->ice_instance_type,
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS1),
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS2));
+
+ pr_err("%s: ICE_STM2_COUNTER3: 0x%08x | ICE_STM2_COUNTER4: 0x%08x\n",
+ ice_dev->ice_instance_type,
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS3),
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS4));
+
+ pr_err("%s: ICE_STM1_CTR5_MSB: 0x%08x | ICE_STM1_CTR5_LSB: 0x%08x\n",
+ ice_dev->ice_instance_type,
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS5_MSB),
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS5_LSB));
+
+ pr_err("%s: ICE_STM1_CTR6_MSB: 0x%08x | ICE_STM1_CTR6_LSB: 0x%08x\n",
+ ice_dev->ice_instance_type,
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS6_MSB),
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS6_LSB));
+
+ pr_err("%s: ICE_STM1_CTR7_MSB: 0x%08x | ICE_STM1_CTR7_LSB: 0x%08x\n",
+ ice_dev->ice_instance_type,
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS7_MSB),
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS7_LSB));
+
+ pr_err("%s: ICE_STM1_CTR8_MSB: 0x%08x | ICE_STM1_CTR8_LSB: 0x%08x\n",
+ ice_dev->ice_instance_type,
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS8_MSB),
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS8_LSB));
+
+ pr_err("%s: ICE_STM1_CTR9_MSB: 0x%08x | ICE_STM1_CTR9_LSB: 0x%08x\n",
+ ice_dev->ice_instance_type,
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS9_MSB),
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS9_LSB));
+
+ pr_err("%s: ICE_STM2_CTR5_MSB: 0x%08x | ICE_STM2_CTR5_LSB: 0x%08x\n",
+ ice_dev->ice_instance_type,
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS5_MSB),
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS5_LSB));
+
+ pr_err("%s: ICE_STM2_CTR6_MSB: 0x%08x | ICE_STM2_CTR6_LSB: 0x%08x\n",
+ ice_dev->ice_instance_type,
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS6_MSB),
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS6_LSB));
+
+ pr_err("%s: ICE_STM2_CTR7_MSB: 0x%08x | ICE_STM2_CTR7_LSB: 0x%08x\n",
+ ice_dev->ice_instance_type,
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS7_MSB),
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS7_LSB));
+
+ pr_err("%s: ICE_STM2_CTR8_MSB: 0x%08x | ICE_STM2_CTR8_LSB: 0x%08x\n",
+ ice_dev->ice_instance_type,
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS8_MSB),
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS8_LSB));
+
+ pr_err("%s: ICE_STM2_CTR9_MSB: 0x%08x | ICE_STM2_CTR9_LSB: 0x%08x\n",
+ ice_dev->ice_instance_type,
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS9_MSB),
+ qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS9_LSB));
+
+ qcom_ice_dump_test_bus(ice_dev);
+ pr_err("%s: ICE reset start time: %llu ICE reset done time: %llu\n",
+ ice_dev->ice_instance_type,
+ (unsigned long long)ice_dev->ice_reset_start_time.tv64,
+ (unsigned long long)ice_dev->ice_reset_complete_time.tv64);
+
+ if (ktime_to_us(ktime_sub(ice_dev->ice_reset_complete_time,
+ ice_dev->ice_reset_start_time)) > 0)
+ pr_err("%s: Time taken for reset: %lu\n",
+ ice_dev->ice_instance_type,
+ (unsigned long)ktime_to_us(ktime_sub(
+ ice_dev->ice_reset_complete_time,
+ ice_dev->ice_reset_start_time)));
+out:
+ return;
+}
+EXPORT_SYMBOL(qcom_ice_debug);
+
+
+static int qcom_ice_reset(struct platform_device *pdev)
+{
+ struct ice_device *ice_dev;
+
+ ice_dev = platform_get_drvdata(pdev);
+ if (!ice_dev) {
+ pr_err("%s: INVALID ice_dev\n", __func__);
+ return -EINVAL;
+ }
+
+ ice_dev->ice_reset_start_time = ktime_get();
+
+ return qcom_ice_finish_power_collapse(ice_dev);
+}
+EXPORT_SYMBOL(qcom_ice_reset);
+
+static int qcom_ice_config(struct platform_device *pdev, struct request *req,
+ struct ice_data_setting *setting)
+{
+ struct ice_crypto_setting *crypto_data;
+ struct ice_crypto_setting pfk_crypto_data = {0};
+ union map_info *info;
+ int ret = 0;
+ bool configured = 0;
+
+ if (!pdev || !req || !setting) {
+ pr_err("%s: Invalid params passed\n", __func__);
+ return -EINVAL;
+ }
+
+ /*
+ * It is not an error to have a request with no bio
+ * Such requests must bypass ICE. So first set bypass and then
+ * return if bio is not available in request
+ */
+ if (setting) {
+ setting->encr_bypass = true;
+ setting->decr_bypass = true;
+ }
+
+ if (!req->bio) {
+ /* It is not an error to have a request with no bio */
+ return 0;
+ }
+
+ ret = pfk_load_key(req->bio, &pfk_crypto_data);
+ if (0 == ret) {
+ ret = qti_ice_setting_config(req, pdev, &pfk_crypto_data,
+ setting, &configured);
+
+ if (0 == ret) {
+ /**
+ * if configuration was complete, we are done, no need
+ * to go further with FDE
+ */
+ if (configured)
+ return 0;
+ } else {
+ /**
+ * there was an error with configuring the setting,
+ * exit with error
+ */
+ return ret;
+ }
+ }
+
+ /*
+ * info field in req->end_io_data could be used by mulitple dm or
+ * non-dm entities. To ensure that we are running operation on dm
+ * based request, check BIO_DONT_FREE flag
+ */
+ if (bio_flagged(req->bio, BIO_INLINECRYPT)) {
+ info = dm_get_rq_mapinfo(req);
+ if (!info) {
+ pr_debug("%s info not available in request\n",
+ __func__);
+ return 0;
+ }
+
+ crypto_data = (struct ice_crypto_setting *)info->ptr;
+ if (!crypto_data) {
+ pr_err("%s crypto_data not available in request\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ return qti_ice_setting_config(req, pdev, crypto_data,
+ setting, &configured);
+ }
+
+ /*
+ * It is not an error. If target is not req-crypt based, all request
+ * from storage driver would come here to check if there is any ICE
+ * setting required
+ */
+ return 0;
+}
+EXPORT_SYMBOL(qcom_ice_config);
+
+static int qcom_ice_status(struct platform_device *pdev)
+{
+ struct ice_device *ice_dev;
+ unsigned int test_bus_reg_status;
+
+ if (!pdev) {
+ pr_err("%s: Invalid params passed\n", __func__);
+ return -EINVAL;
+ }
+
+ ice_dev = platform_get_drvdata(pdev);
+
+ if (!ice_dev)
+ return -ENODEV;
+
+ if (!ice_dev->is_ice_enabled)
+ return -ENODEV;
+
+ test_bus_reg_status = qcom_ice_readl(ice_dev,
+ QCOM_ICE_REGS_TEST_BUS_REG);
+
+ return !!(test_bus_reg_status & QCOM_ICE_TEST_BUS_REG_NON_SECURE_INTR);
+
+}
+EXPORT_SYMBOL(qcom_ice_status);
+
+const struct qcom_ice_variant_ops qcom_ice_ops = {
+ .name = "qcom",
+ .init = qcom_ice_init,
+ .reset = qcom_ice_reset,
+ .resume = qcom_ice_resume,
+ .suspend = qcom_ice_suspend,
+ .config = qcom_ice_config,
+ .status = qcom_ice_status,
+ .debug = qcom_ice_debug,
+};
+
+/* Following struct is required to match device with driver from dts file */
+static struct of_device_id qcom_ice_match[] = {
+ { .compatible = "qcom,ice",
+ .data = (void *)&qcom_ice_ops},
+ {},
+};
+MODULE_DEVICE_TABLE(of, qcom_ice_match);
+
+struct platform_device *qcom_ice_get_pdevice(struct device_node *node)
+{
+ struct platform_device *ice_pdev = NULL;
+ struct ice_device *ice_dev = NULL;
+
+ if (!node) {
+ pr_err("%s: invalid node %p", __func__, node);
+ goto out;
+ }
+
+ if (!of_device_is_available(node)) {
+ pr_err("%s: device unavailable\n", __func__);
+ goto out;
+ }
+
+
+ if (list_empty(&ice_devices)) {
+ pr_err("%s: invalid device list\n", __func__);
+ ice_pdev = ERR_PTR(-EPROBE_DEFER);
+ goto out;
+ }
+
+ list_for_each_entry(ice_dev, &ice_devices, list) {
+ if (ice_dev->pdev->of_node == node) {
+ pr_info("%s: found ice device %p\n", __func__, ice_dev);
+ break;
+ }
+ }
+
+ ice_pdev = to_platform_device(ice_dev->pdev);
+ pr_info("%s: matching platform device %p\n", __func__, ice_pdev);
+out:
+ return ice_pdev;
+}
+
+static struct ice_device *get_ice_device_from_storage_type
+ (const char *storage_type)
+{
+ struct ice_device *ice_dev = NULL;
+
+ if (list_empty(&ice_devices)) {
+ pr_err("%s: invalid device list\n", __func__);
+ ice_dev = ERR_PTR(-EPROBE_DEFER);
+ goto out;
+ }
+
+ list_for_each_entry(ice_dev, &ice_devices, list) {
+ if (!strcmp(ice_dev->ice_instance_type, storage_type)) {
+ pr_info("%s: found ice device %p\n", __func__, ice_dev);
+ break;
+ }
+ }
+out:
+ return ice_dev;
+}
+
+
+static int enable_ice_setup(struct ice_device *ice_dev)
+{
+ int ret = -1, vote;
+
+ /* Setup Regulator */
+ if (ice_dev->is_regulator_available) {
+ if (qcom_ice_get_vreg(ice_dev)) {
+ pr_err("%s: Could not get regulator\n", __func__);
+ goto out;
+ }
+ ret = regulator_enable(ice_dev->reg);
+ if (ret) {
+ pr_err("%s:%p: Could not enable regulator\n",
+ __func__, ice_dev);
+ goto out;
+ }
+ }
+
+ /* Setup Clocks */
+ if (qcom_ice_enable_clocks(ice_dev, true)) {
+ pr_err("%s:%p:%s Could not enable clocks\n", __func__,
+ ice_dev, ice_dev->ice_instance_type);
+ goto out_reg;
+ }
+
+ /* Setup Bus Vote */
+ vote = qcom_ice_get_bus_vote(ice_dev, "MAX");
+ if (vote < 0)
+ goto out_clocks;
+
+ ret = qcom_ice_set_bus_vote(ice_dev, vote);
+ if (ret) {
+ pr_err("%s:%p: failed %d\n", __func__, ice_dev, ret);
+ goto out_clocks;
+ }
+
+ return ret;
+
+out_clocks:
+ qcom_ice_enable_clocks(ice_dev, false);
+out_reg:
+ regulator_disable(ice_dev->reg);
+out:
+ return ret;
+}
+
+static int disable_ice_setup(struct ice_device *ice_dev)
+{
+ int ret = -1, vote;
+
+ /* Setup Bus Vote */
+ vote = qcom_ice_get_bus_vote(ice_dev, "MIN");
+ if (vote < 0) {
+ pr_err("%s:%p: Unable to get bus vote\n", __func__, ice_dev);
+ goto out_disable_clocks;
+ }
+
+ ret = qcom_ice_set_bus_vote(ice_dev, vote);
+ if (ret)
+ pr_err("%s:%p: failed %d\n", __func__, ice_dev, ret);
+
+out_disable_clocks:
+
+ /* Setup Clocks */
+ if (qcom_ice_enable_clocks(ice_dev, false))
+ pr_err("%s:%p:%s Could not disable clocks\n", __func__,
+ ice_dev, ice_dev->ice_instance_type);
+
+/* Setup Regulator */
+ if (ice_dev->is_regulator_available) {
+ if (qcom_ice_get_vreg(ice_dev)) {
+ pr_err("%s: Could not get regulator\n", __func__);
+ goto out;
+ }
+ ret = regulator_disable(ice_dev->reg);
+ if (ret) {
+ pr_err("%s:%p: Could not disable regulator\n",
+ __func__, ice_dev);
+ goto out;
+ }
+ }
+out:
+ return ret;
+}
+
+int qcom_ice_setup_ice_hw(const char *storage_type, int enable)
+{
+ int ret = -1;
+ struct ice_device *ice_dev = NULL;
+
+ ice_dev = get_ice_device_from_storage_type(storage_type);
+ if (!ice_dev)
+ return ret;
+
+ if (enable)
+ return enable_ice_setup(ice_dev);
+ else
+ return disable_ice_setup(ice_dev);
+}
+
+struct qcom_ice_variant_ops *qcom_ice_get_variant_ops(struct device_node *node)
+{
+ if (node) {
+ const struct of_device_id *match;
+ match = of_match_node(qcom_ice_match, node);
+ if (match)
+ return (struct qcom_ice_variant_ops *)(match->data);
+ pr_err("%s: error matching\n", __func__);
+ } else {
+ pr_err("%s: invalid node\n", __func__);
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(qcom_ice_get_variant_ops);
+
+static struct platform_driver qcom_ice_driver = {
+ .probe = qcom_ice_probe,
+ .remove = qcom_ice_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "qcom_ice",
+ .of_match_table = qcom_ice_match,
+ },
+};
+module_platform_driver(qcom_ice_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("QTI Inline Crypto Engine driver");
diff --git a/drivers/crypto/msm/iceregs.h b/drivers/crypto/msm/iceregs.h
new file mode 100644
index 000000000000..01f3e9783503
--- /dev/null
+++ b/drivers/crypto/msm/iceregs.h
@@ -0,0 +1,147 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _QCOM_INLINE_CRYPTO_ENGINE_REGS_H_
+#define _QCOM_INLINE_CRYPTO_ENGINE_REGS_H_
+
+/* Register bits for ICE version */
+#define ICE_CORE_CURRENT_MAJOR_VERSION 0x02
+
+#define ICE_CORE_STEP_REV_MASK 0xFFFF
+#define ICE_CORE_STEP_REV 0 /* bit 15-0 */
+#define ICE_CORE_MAJOR_REV_MASK 0xFF000000
+#define ICE_CORE_MAJOR_REV 24 /* bit 31-24 */
+#define ICE_CORE_MINOR_REV_MASK 0xFF0000
+#define ICE_CORE_MINOR_REV 16 /* bit 23-16 */
+
+
+#define ICE_FUSE_SETTING_MASK 0x1
+#define ICE_FORCE_HW_KEY0_SETTING_MASK 0x2
+#define ICE_FORCE_HW_KEY1_SETTING_MASK 0x4
+
+/* QCOM ICE Registers from SWI */
+#define QCOM_ICE_REGS_CONTROL 0x0000
+#define QCOM_ICE_REGS_RESET 0x0004
+#define QCOM_ICE_REGS_VERSION 0x0008
+#define QCOM_ICE_REGS_FUSE_SETTING 0x0010
+#define QCOM_ICE_REGS_PARAMETERS_1 0x0014
+#define QCOM_ICE_REGS_PARAMETERS_2 0x0018
+#define QCOM_ICE_REGS_PARAMETERS_3 0x001C
+#define QCOM_ICE_REGS_PARAMETERS_4 0x0020
+#define QCOM_ICE_REGS_PARAMETERS_5 0x0024
+#define QCOM_ICE_REGS_NON_SEC_IRQ_STTS 0x0040
+#define QCOM_ICE_REGS_NON_SEC_IRQ_MASK 0x0044
+#define QCOM_ICE_REGS_NON_SEC_IRQ_CLR 0x0048
+#define QCOM_ICE_REGS_STREAM1_ERROR_SYNDROME1 0x0050
+#define QCOM_ICE_REGS_STREAM1_ERROR_SYNDROME2 0x0054
+#define QCOM_ICE_REGS_STREAM2_ERROR_SYNDROME1 0x0058
+#define QCOM_ICE_REGS_STREAM2_ERROR_SYNDROME2 0x005C
+#define QCOM_ICE_REGS_STREAM1_BIST_ERROR_VEC 0x0060
+#define QCOM_ICE_REGS_STREAM2_BIST_ERROR_VEC 0x0064
+#define QCOM_ICE_REGS_STREAM1_BIST_FINISH_VEC 0x0068
+#define QCOM_ICE_REGS_STREAM2_BIST_FINISH_VEC 0x006C
+#define QCOM_ICE_REGS_BIST_STATUS 0x0070
+#define QCOM_ICE_REGS_BYPASS_STATUS 0x0074
+#define QCOM_ICE_REGS_ADVANCED_CONTROL 0x1000
+#define QCOM_ICE_REGS_ENDIAN_SWAP 0x1004
+#define QCOM_ICE_REGS_TEST_BUS_CONTROL 0x1010
+#define QCOM_ICE_REGS_TEST_BUS_REG 0x1014
+#define QCOM_ICE_REGS_STREAM1_COUNTERS1 0x1100
+#define QCOM_ICE_REGS_STREAM1_COUNTERS2 0x1104
+#define QCOM_ICE_REGS_STREAM1_COUNTERS3 0x1108
+#define QCOM_ICE_REGS_STREAM1_COUNTERS4 0x110C
+#define QCOM_ICE_REGS_STREAM1_COUNTERS5_MSB 0x1110
+#define QCOM_ICE_REGS_STREAM1_COUNTERS5_LSB 0x1114
+#define QCOM_ICE_REGS_STREAM1_COUNTERS6_MSB 0x1118
+#define QCOM_ICE_REGS_STREAM1_COUNTERS6_LSB 0x111C
+#define QCOM_ICE_REGS_STREAM1_COUNTERS7_MSB 0x1120
+#define QCOM_ICE_REGS_STREAM1_COUNTERS7_LSB 0x1124
+#define QCOM_ICE_REGS_STREAM1_COUNTERS8_MSB 0x1128
+#define QCOM_ICE_REGS_STREAM1_COUNTERS8_LSB 0x112C
+#define QCOM_ICE_REGS_STREAM1_COUNTERS9_MSB 0x1130
+#define QCOM_ICE_REGS_STREAM1_COUNTERS9_LSB 0x1134
+#define QCOM_ICE_REGS_STREAM2_COUNTERS1 0x1200
+#define QCOM_ICE_REGS_STREAM2_COUNTERS2 0x1204
+#define QCOM_ICE_REGS_STREAM2_COUNTERS3 0x1208
+#define QCOM_ICE_REGS_STREAM2_COUNTERS4 0x120C
+#define QCOM_ICE_REGS_STREAM2_COUNTERS5_MSB 0x1210
+#define QCOM_ICE_REGS_STREAM2_COUNTERS5_LSB 0x1214
+#define QCOM_ICE_REGS_STREAM2_COUNTERS6_MSB 0x1218
+#define QCOM_ICE_REGS_STREAM2_COUNTERS6_LSB 0x121C
+#define QCOM_ICE_REGS_STREAM2_COUNTERS7_MSB 0x1220
+#define QCOM_ICE_REGS_STREAM2_COUNTERS7_LSB 0x1224
+#define QCOM_ICE_REGS_STREAM2_COUNTERS8_MSB 0x1228
+#define QCOM_ICE_REGS_STREAM2_COUNTERS8_LSB 0x122C
+#define QCOM_ICE_REGS_STREAM2_COUNTERS9_MSB 0x1230
+#define QCOM_ICE_REGS_STREAM2_COUNTERS9_LSB 0x1234
+
+#define QCOM_ICE_STREAM1_PREMATURE_LBA_CHANGE (1L << 0)
+#define QCOM_ICE_STREAM2_PREMATURE_LBA_CHANGE (1L << 1)
+#define QCOM_ICE_STREAM1_NOT_EXPECTED_LBO (1L << 2)
+#define QCOM_ICE_STREAM2_NOT_EXPECTED_LBO (1L << 3)
+#define QCOM_ICE_STREAM1_NOT_EXPECTED_DUN (1L << 4)
+#define QCOM_ICE_STREAM2_NOT_EXPECTED_DUN (1L << 5)
+#define QCOM_ICE_STREAM1_NOT_EXPECTED_DUS (1L << 6)
+#define QCOM_ICE_STREAM2_NOT_EXPECTED_DUS (1L << 7)
+#define QCOM_ICE_STREAM1_NOT_EXPECTED_DBO (1L << 8)
+#define QCOM_ICE_STREAM2_NOT_EXPECTED_DBO (1L << 9)
+#define QCOM_ICE_STREAM1_NOT_EXPECTED_ENC_SEL (1L << 10)
+#define QCOM_ICE_STREAM2_NOT_EXPECTED_ENC_SEL (1L << 11)
+#define QCOM_ICE_STREAM1_NOT_EXPECTED_CONF_IDX (1L << 12)
+#define QCOM_ICE_STREAM2_NOT_EXPECTED_CONF_IDX (1L << 13)
+#define QCOM_ICE_STREAM1_NOT_EXPECTED_NEW_TRNS (1L << 14)
+#define QCOM_ICE_STREAM2_NOT_EXPECTED_NEW_TRNS (1L << 15)
+
+#define QCOM_ICE_NON_SEC_IRQ_MASK \
+ (QCOM_ICE_STREAM1_PREMATURE_LBA_CHANGE |\
+ QCOM_ICE_STREAM2_PREMATURE_LBA_CHANGE |\
+ QCOM_ICE_STREAM1_NOT_EXPECTED_LBO |\
+ QCOM_ICE_STREAM2_NOT_EXPECTED_LBO |\
+ QCOM_ICE_STREAM1_NOT_EXPECTED_DUN |\
+ QCOM_ICE_STREAM2_NOT_EXPECTED_DUN |\
+ QCOM_ICE_STREAM2_NOT_EXPECTED_DUN |\
+ QCOM_ICE_STREAM2_NOT_EXPECTED_DUS |\
+ QCOM_ICE_STREAM1_NOT_EXPECTED_DBO |\
+ QCOM_ICE_STREAM2_NOT_EXPECTED_DBO |\
+ QCOM_ICE_STREAM1_NOT_EXPECTED_ENC_SEL |\
+ QCOM_ICE_STREAM2_NOT_EXPECTED_ENC_SEL |\
+ QCOM_ICE_STREAM1_NOT_EXPECTED_CONF_IDX |\
+ QCOM_ICE_STREAM1_NOT_EXPECTED_NEW_TRNS |\
+ QCOM_ICE_STREAM2_NOT_EXPECTED_NEW_TRNS)
+
+/* QCOM ICE registers from secure side */
+#define QCOM_ICE_TEST_BUS_REG_SECURE_INTR (1L << 28)
+#define QCOM_ICE_TEST_BUS_REG_NON_SECURE_INTR (1L << 2)
+
+#define QCOM_ICE_LUT_KEYS_ICE_SEC_IRQ_STTS 0x2050
+#define QCOM_ICE_LUT_KEYS_ICE_SEC_IRQ_MASK 0x2054
+#define QCOM_ICE_LUT_KEYS_ICE_SEC_IRQ_CLR 0x2058
+
+#define QCOM_ICE_STREAM1_PARTIALLY_SET_KEY_USED (1L << 0)
+#define QCOM_ICE_STREAM2_PARTIALLY_SET_KEY_USED (1L << 1)
+#define QCOM_ICE_QCOMC_DBG_OPEN_EVENT (1L << 30)
+#define QCOM_ICE_KEYS_RAM_RESET_COMPLETED (1L << 31)
+
+#define QCOM_ICE_SEC_IRQ_MASK \
+ (QCOM_ICE_STREAM1_PARTIALLY_SET_KEY_USED |\
+ QCOM_ICE_STREAM2_PARTIALLY_SET_KEY_USED |\
+ QCOM_ICE_QCOMC_DBG_OPEN_EVENT | \
+ QCOM_ICE_KEYS_RAM_RESET_COMPLETED)
+
+
+#define qcom_ice_writel(ice, val, reg) \
+ writel_relaxed((val), (ice)->mmio + (reg))
+#define qcom_ice_readl(ice, reg) \
+ readl_relaxed((ice)->mmio + (reg))
+
+
+#endif /* _QCOM_INLINE_CRYPTO_ENGINE_REGS_H_ */
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index e897f4d3810c..b054edb07e49 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -134,6 +134,7 @@ struct bio {
*/
#define BIO_RESET_BITS 13
#define BIO_OWNS_VEC 13 /* bio_free() should free bvec */
+#define BIO_INLINECRYPT 15
/*
* top 4 bits of bio flags indicate the pool this bio came from