summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/cnss/cnss-wlan.txt14
-rw-r--r--Documentation/vm/page_owner.txt9
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-cdp.dtsi10
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-mtp.dtsi10
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-qrd-skuk.dtsi10
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-qrd.dtsi10
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c15
-rw-r--r--drivers/i2c/busses/i2c-msm-v2.c19
-rw-r--r--drivers/net/wireless/Kconfig1
-rw-r--r--drivers/net/wireless/Makefile1
-rw-r--r--drivers/net/wireless/cnss/Kconfig5
-rw-r--r--drivers/net/wireless/cnss2/Kconfig17
-rw-r--r--drivers/net/wireless/cnss2/Makefile9
-rw-r--r--drivers/net/wireless/cnss2/debug.c174
-rw-r--r--drivers/net/wireless/cnss2/debug.h73
-rw-r--r--drivers/net/wireless/cnss2/main.c2309
-rw-r--r--drivers/net/wireless/cnss2/main.h221
-rw-r--r--drivers/net/wireless/cnss2/pci.c1582
-rw-r--r--drivers/net/wireless/cnss2/pci.h141
-rw-r--r--drivers/net/wireless/cnss2/power.c386
-rw-r--r--drivers/net/wireless/cnss2/qmi.c1006
-rw-r--r--drivers/net/wireless/cnss2/qmi.h41
-rw-r--r--drivers/net/wireless/cnss2/utils.c129
-rw-r--r--drivers/net/wireless/cnss2/wlan_firmware_service_v01.c2168
-rw-r--r--drivers/net/wireless/cnss2/wlan_firmware_service_v01.h645
-rw-r--r--drivers/power/reset/msm-poweroff.c1
-rw-r--r--include/linux/migrate.h6
-rw-r--r--include/linux/mm.h1
-rw-r--r--include/linux/mmzone.h3
-rw-r--r--include/linux/page_ext.h5
-rw-r--r--include/linux/page_idle.h43
-rw-r--r--include/linux/page_owner.h58
-rw-r--r--include/linux/stackdepot.h32
-rw-r--r--include/net/cnss2.h192
-rw-r--r--lib/Kconfig4
-rw-r--r--lib/Kconfig.debug1
-rw-r--r--lib/Makefile2
-rw-r--r--lib/stackdepot.c284
-rw-r--r--mm/compaction.c44
-rw-r--r--mm/debug.c13
-rw-r--r--mm/internal.h2
-rw-r--r--mm/migrate.c12
-rw-r--r--mm/page_alloc.c83
-rw-r--r--mm/page_isolation.c9
-rw-r--r--mm/page_owner.c263
-rw-r--r--mm/vmstat.c17
-rw-r--r--net/core/dev.c7
-rw-r--r--tools/vm/page_owner_sort.c9
48 files changed, 9926 insertions, 170 deletions
diff --git a/Documentation/devicetree/bindings/cnss/cnss-wlan.txt b/Documentation/devicetree/bindings/cnss/cnss-wlan.txt
index 6d63d1123f4c..6aa3bfe4b1d8 100644
--- a/Documentation/devicetree/bindings/cnss/cnss-wlan.txt
+++ b/Documentation/devicetree/bindings/cnss/cnss-wlan.txt
@@ -11,8 +11,9 @@ the WLAN enable GPIO, 3.3V fixed voltage regulator resources. It also
provides the reserved RAM dump memory location and size.
Required properties:
- - compatible: "qcom,cnss"
- - wlan-en-gpio: WLAN_EN GPIO signal specified by QCA6174 specifications
+ - compatible: "qcom,cnss" for QCA6174 device
+ "qcom,cnss-qca6290" for QCA6290 device
+ - wlan-en-gpio: WLAN_EN GPIO signal specified by the chip specifications
- vdd-wlan-supply: phandle to the regulator device tree node
- pinctrl-names: Names corresponding to the numbered pinctrl states
- pinctrl-<n>: Pinctrl states as described in
@@ -44,6 +45,13 @@ Optional properties:
which should be drived depending on platforms
- qcom,is-dual-wifi-enabled: Boolean property to control wlan enable(wlan-en)
gpio on dual-wifi platforms.
+ - vdd-wlan-en-supply: WLAN_EN fixed regulator specified by QCA6174 specifications.
+ - qcom,wlan-en-vreg-support: Boolean property to decide the whether the WLAN_EN pin
+ is a gpio or fixed regulator.
+ - qcom,mhi: phandle to indicate the device which needs MHI support.
+ - qcom,cap-tsf-gpio: WLAN_TSF_CAPTURED GPIO signal specified by the chip
+ specifications, should be drived depending on
+ products
Example:
@@ -60,4 +68,6 @@ Example:
pinctrl-0 = <&cnss_default>;
qcom,wlan-rc-num = <0>;
qcom,wlan-smmu-iova-address = <0 0x10000000>;
+ qcom,mhi = <&mhi_wlan>;
+ qcom,cap-tsf-gpio = <&tlmm 126 1>;
};
diff --git a/Documentation/vm/page_owner.txt b/Documentation/vm/page_owner.txt
index 8f3ce9b3aa11..ffff1439076a 100644
--- a/Documentation/vm/page_owner.txt
+++ b/Documentation/vm/page_owner.txt
@@ -28,10 +28,11 @@ with page owner and page owner is disabled in runtime due to no enabling
boot option, runtime overhead is marginal. If disabled in runtime, it
doesn't require memory to store owner information, so there is no runtime
memory overhead. And, page owner inserts just two unlikely branches into
-the page allocator hotpath and if it returns false then allocation is
-done like as the kernel without page owner. These two unlikely branches
-would not affect to allocation performance. Following is the kernel's
-code size change due to this facility.
+the page allocator hotpath and if not enabled, then allocation is done
+like as the kernel without page owner. These two unlikely branches should
+not affect to allocation performance, especially if the static keys jump
+label patching functionality is available. Following is the kernel's code
+size change due to this facility.
- Without page owner
text data bss dec hex filename
diff --git a/arch/arm/boot/dts/qcom/msm8998-cdp.dtsi b/arch/arm/boot/dts/qcom/msm8998-cdp.dtsi
index 6ff62544b03c..0859fd638a00 100644
--- a/arch/arm/boot/dts/qcom/msm8998-cdp.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998-cdp.dtsi
@@ -149,6 +149,16 @@
status = "okay";
};
+ gpio@d200 { /* GPIO 19 - wil6210 refclk3_en */
+ qcom,mode = <0>; /* Input */
+ qcom,pull = <5>; /* No Pull */
+ qcom,vin-sel = <1>; /* VIN1 GPIO_MV */
+ qcom,src-sel = <0>; /* GPIO */
+ qcom,invert = <0>; /* Invert */
+ qcom,master-en = <1>; /* Enable GPIO */
+ status = "okay";
+ };
+
/* GPIO 21 (NFC_CLK_REQ) */
gpio@d400 {
qcom,mode = <0>;
diff --git a/arch/arm/boot/dts/qcom/msm8998-mtp.dtsi b/arch/arm/boot/dts/qcom/msm8998-mtp.dtsi
index bafe29211ef0..3827b1bbf8ba 100644
--- a/arch/arm/boot/dts/qcom/msm8998-mtp.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998-mtp.dtsi
@@ -150,6 +150,16 @@
status = "okay";
};
+ gpio@d200 { /* GPIO 19 - wil6210 refclk3_en */
+ qcom,mode = <0>; /* Input */
+ qcom,pull = <5>; /* No Pull */
+ qcom,vin-sel = <1>; /* VIN1 GPIO_MV */
+ qcom,src-sel = <0>; /* GPIO */
+ qcom,invert = <0>; /* Invert */
+ qcom,master-en = <1>; /* Enable GPIO */
+ status = "okay";
+ };
+
/* GPIO 21 (NFC_CLK_REQ) */
gpio@d400 {
qcom,mode = <0>;
diff --git a/arch/arm/boot/dts/qcom/msm8998-qrd-skuk.dtsi b/arch/arm/boot/dts/qcom/msm8998-qrd-skuk.dtsi
index 97c4c5b1d455..c4a428635231 100644
--- a/arch/arm/boot/dts/qcom/msm8998-qrd-skuk.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998-qrd-skuk.dtsi
@@ -143,6 +143,16 @@
qcom,out-strength = <1>;
};
+ gpio@d200 { /* GPIO 19 - wil6210 refclk3_en */
+ qcom,mode = <0>; /* Input */
+ qcom,pull = <5>; /* No Pull */
+ qcom,vin-sel = <1>; /* VIN1 GPIO_MV */
+ qcom,src-sel = <0>; /* GPIO */
+ qcom,invert = <0>; /* Invert */
+ qcom,master-en = <1>; /* Enable GPIO */
+ status = "okay";
+ };
+
/* GPIO 21 (NFC_CLK_REQ) */
gpio@d400 {
qcom,mode = <0>;
diff --git a/arch/arm/boot/dts/qcom/msm8998-qrd.dtsi b/arch/arm/boot/dts/qcom/msm8998-qrd.dtsi
index a3eb3e5ab0d0..3c6b23d9581c 100644
--- a/arch/arm/boot/dts/qcom/msm8998-qrd.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998-qrd.dtsi
@@ -139,6 +139,16 @@
status = "okay";
};
+ gpio@d200 { /* GPIO 19 - wil6210 refclk3_en */
+ qcom,mode = <0>; /* Input */
+ qcom,pull = <5>; /* No Pull */
+ qcom,vin-sel = <1>; /* VIN1 GPIO_MV */
+ qcom,src-sel = <0>; /* GPIO */
+ qcom,invert = <0>; /* Invert */
+ qcom,master-en = <1>; /* Enable GPIO */
+ status = "okay";
+ };
+
/* GPIO 21 (NFC_CLK_REQ) */
gpio@d400 {
qcom,mode = <0>;
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c
index 2ee75a2c1039..b30e894ebc4d 100644
--- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c
@@ -1893,11 +1893,6 @@ struct drm_msm_ext_panel_hdr_metadata *hdr_meta)
return;
}
- if (!connector->hdr_supported) {
- SDE_ERROR("Sink does not support HDR\n");
- return;
- }
-
/* Setup Packet header and payload */
packet_header = type_code | (version << 8) | (length << 16);
hdmi_write(hdmi, HDMI_GENERIC0_HDR, packet_header);
@@ -2308,8 +2303,14 @@ int sde_hdmi_pre_kickoff(struct drm_connector *connector,
struct msm_display_kickoff_params *params)
{
- sde_hdmi_panel_set_hdr_infoframe(display,
- params->hdr_metadata);
+ if (!connector || !display || !params) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ if (connector->hdr_supported)
+ sde_hdmi_panel_set_hdr_infoframe(display,
+ params->hdr_metadata);
return 0;
}
diff --git a/drivers/i2c/busses/i2c-msm-v2.c b/drivers/i2c/busses/i2c-msm-v2.c
index 1f042fa56eea..f4ed71f9c1a7 100644
--- a/drivers/i2c/busses/i2c-msm-v2.c
+++ b/drivers/i2c/busses/i2c-msm-v2.c
@@ -2232,19 +2232,8 @@ static int i2c_msm_pm_clk_enable(struct i2c_msm_ctrl *ctrl)
static int i2c_msm_pm_xfer_start(struct i2c_msm_ctrl *ctrl)
{
int ret;
- struct i2c_msm_xfer *xfer = &ctrl->xfer;
mutex_lock(&ctrl->xfer.mtx);
- /* if system is suspended just bail out */
- if (ctrl->pwr_state == I2C_MSM_PM_SYS_SUSPENDED) {
- struct i2c_msg *msgs = xfer->msgs + xfer->cur_buf.msg_idx;
- dev_err(ctrl->dev,
- "slave:0x%x is calling xfer when system is suspended\n",
- msgs->addr);
- mutex_unlock(&ctrl->xfer.mtx);
- return -EIO;
- }
-
i2c_msm_pm_pinctrl_state(ctrl, true);
pm_runtime_get_sync(ctrl->dev);
/*
@@ -2330,6 +2319,14 @@ i2c_msm_frmwrk_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
return PTR_ERR(msgs);
}
+ /* if system is suspended just bail out */
+ if (ctrl->pwr_state == I2C_MSM_PM_SYS_SUSPENDED) {
+ dev_err(ctrl->dev,
+ "slave:0x%x is calling xfer when system is suspended\n",
+ msgs->addr);
+ return -EIO;
+ }
+
ret = i2c_msm_pm_xfer_start(ctrl);
if (ret)
return ret;
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 2217e0cae4da..59db73f538d7 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -332,6 +332,7 @@ source "drivers/net/wireless/mwifiex/Kconfig"
source "drivers/net/wireless/cw1200/Kconfig"
source "drivers/net/wireless/rsi/Kconfig"
source "drivers/net/wireless/cnss/Kconfig"
+source "drivers/net/wireless/cnss2/Kconfig"
source "drivers/net/wireless/cnss_genl/Kconfig"
source "drivers/net/wireless/cnss_utils/Kconfig"
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 704a976bdfb5..f23a2fbc3afa 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -64,6 +64,7 @@ obj-$(CONFIG_RSI_91X) += rsi/
obj-$(CONFIG_WCNSS_CORE) += wcnss/
obj-$(CONFIG_CNSS) += cnss/
+obj-$(CONFIG_CNSS2) += cnss2/
obj-$(CONFIG_WCNSS_MEM_PRE_ALLOC) += cnss_prealloc/
obj-$(CONFIG_CNSS_CRYPTO) += cnss_crypto/
obj-$(CONFIG_CNSS_GENL) += cnss_genl/
diff --git a/drivers/net/wireless/cnss/Kconfig b/drivers/net/wireless/cnss/Kconfig
index 8946f65df716..6faf9f1ef5d0 100644
--- a/drivers/net/wireless/cnss/Kconfig
+++ b/drivers/net/wireless/cnss/Kconfig
@@ -56,6 +56,9 @@ config CLD_LL_CORE
select WEXT_PRIV
select WEXT_SPY
select WIRELESS_EXT
+ select CRYPTO
+ select CRYPTO_HASH
+ select CRYPTO_BLKCIPHER
---help---
This section contains the necessary modules needed to enable the
core WLAN driver for Qualcomm QCA6174 chipset.
@@ -73,7 +76,7 @@ config CNSS_SECURE_FW
config BUS_AUTO_SUSPEND
bool "Enable/Disable Runtime PM support for PCIe based WLAN Drivers"
- depends on CNSS
+ depends on CNSS || CNSS2
depends on PCI
---help---
Runtime Power Management is supported for PCIe based WLAN Drivers.
diff --git a/drivers/net/wireless/cnss2/Kconfig b/drivers/net/wireless/cnss2/Kconfig
new file mode 100644
index 000000000000..85d2a7b30a84
--- /dev/null
+++ b/drivers/net/wireless/cnss2/Kconfig
@@ -0,0 +1,17 @@
+config CNSS2
+ tristate "CNSS2 Platform Driver for Wi-Fi Module"
+ depends on !CNSS && PCI_MSM
+ ---help---
+ This module adds the support for Connectivity Subsystem (CNSS) used
+ for PCIe based Wi-Fi devices with QCA6174/QCA6290 chipsets.
+ This driver also adds support to integrate WLAN module to subsystem
+ restart framework.
+
+config CNSS2_DEBUG
+ bool "CNSS2 Platform Driver Debug Support"
+ depends on CNSS2
+ ---help---
+ This option is to enable CNSS2 platform driver debug support which
+ primarily includes providing additional verbose logs for certain
+ features, enabling kernel panic for certain cases to aid the
+ debugging, and enabling any other debug mechanisms.
diff --git a/drivers/net/wireless/cnss2/Makefile b/drivers/net/wireless/cnss2/Makefile
new file mode 100644
index 000000000000..9d383c8daa43
--- /dev/null
+++ b/drivers/net/wireless/cnss2/Makefile
@@ -0,0 +1,9 @@
+obj-$(CONFIG_CNSS2) += cnss2.o
+
+cnss2-y := main.o
+cnss2-y += debug.o
+cnss2-y += pci.o
+cnss2-y += power.o
+cnss2-y += qmi.o
+cnss2-y += utils.o
+cnss2-y += wlan_firmware_service_v01.o
diff --git a/drivers/net/wireless/cnss2/debug.c b/drivers/net/wireless/cnss2/debug.c
new file mode 100644
index 000000000000..360ab31c61dd
--- /dev/null
+++ b/drivers/net/wireless/cnss2/debug.c
@@ -0,0 +1,174 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/err.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include "main.h"
+#include "debug.h"
+
+#define CNSS_IPC_LOG_PAGES 32
+
+void *cnss_ipc_log_context;
+
+static int cnss_pin_connect_show(struct seq_file *s, void *data)
+{
+ struct cnss_plat_data *cnss_priv = s->private;
+
+ seq_puts(s, "Pin connect results\n");
+ seq_printf(s, "FW power pin result: %04x\n",
+ cnss_priv->pin_result.fw_pwr_pin_result);
+ seq_printf(s, "FW PHY IO pin result: %04x\n",
+ cnss_priv->pin_result.fw_phy_io_pin_result);
+ seq_printf(s, "FW RF pin result: %04x\n",
+ cnss_priv->pin_result.fw_rf_pin_result);
+ seq_printf(s, "Host pin result: %04x\n",
+ cnss_priv->pin_result.host_pin_result);
+ seq_puts(s, "\n");
+
+ return 0;
+}
+
+static int cnss_pin_connect_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, cnss_pin_connect_show, inode->i_private);
+}
+
+static const struct file_operations cnss_pin_connect_fops = {
+ .read = seq_read,
+ .release = single_release,
+ .open = cnss_pin_connect_open,
+ .owner = THIS_MODULE,
+ .llseek = seq_lseek,
+};
+
+static int cnss_stats_show_state(struct seq_file *s,
+ struct cnss_plat_data *plat_priv)
+{
+ enum cnss_driver_state i;
+ int skip = 0;
+ unsigned long state;
+
+ seq_printf(s, "\nState: 0x%lx(", plat_priv->driver_state);
+ for (i = 0, state = plat_priv->driver_state; state != 0;
+ state >>= 1, i++) {
+ if (!(state & 0x1))
+ continue;
+
+ if (skip++)
+ seq_puts(s, " | ");
+
+ switch (i) {
+ case CNSS_QMI_WLFW_CONNECTED:
+ seq_puts(s, "QMI_WLFW_CONNECTED");
+ continue;
+ case CNSS_FW_MEM_READY:
+ seq_puts(s, "FW_MEM_READY");
+ continue;
+ case CNSS_FW_READY:
+ seq_puts(s, "FW_READY");
+ continue;
+ case CNSS_COLD_BOOT_CAL:
+ seq_puts(s, "COLD_BOOT_CAL");
+ continue;
+ case CNSS_DRIVER_LOADING:
+ seq_puts(s, "DRIVER_LOADING");
+ continue;
+ case CNSS_DRIVER_UNLOADING:
+ seq_puts(s, "DRIVER_UNLOADING");
+ continue;
+ case CNSS_DRIVER_PROBED:
+ seq_puts(s, "DRIVER_PROBED");
+ continue;
+ case CNSS_DRIVER_RECOVERY:
+ seq_puts(s, "DRIVER_RECOVERY");
+ continue;
+ case CNSS_FW_BOOT_RECOVERY:
+ seq_puts(s, "FW_BOOT_RECOVERY");
+ continue;
+ case CNSS_DEV_ERR_NOTIFY:
+ seq_puts(s, "DEV_ERR");
+ }
+
+ seq_printf(s, "UNKNOWN-%d", i);
+ }
+ seq_puts(s, ")\n");
+
+ return 0;
+}
+
+static int cnss_stats_show(struct seq_file *s, void *data)
+{
+ struct cnss_plat_data *plat_priv = s->private;
+
+ cnss_stats_show_state(s, plat_priv);
+
+ return 0;
+}
+
+static int cnss_stats_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, cnss_stats_show, inode->i_private);
+}
+
+static const struct file_operations cnss_stats_fops = {
+ .read = seq_read,
+ .release = single_release,
+ .open = cnss_stats_open,
+ .owner = THIS_MODULE,
+ .llseek = seq_lseek,
+};
+
+int cnss_debugfs_create(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+ struct dentry *root_dentry;
+
+ root_dentry = debugfs_create_dir("cnss", 0);
+ if (IS_ERR(root_dentry)) {
+ ret = PTR_ERR(root_dentry);
+ cnss_pr_err("Unable to create debugfs %d\n", ret);
+ goto out;
+ }
+ plat_priv->root_dentry = root_dentry;
+ debugfs_create_file("pin_connect_result", 0644, root_dentry, plat_priv,
+ &cnss_pin_connect_fops);
+ debugfs_create_file("stats", 0644, root_dentry, plat_priv,
+ &cnss_stats_fops);
+out:
+ return ret;
+}
+
+void cnss_debugfs_destroy(struct cnss_plat_data *plat_priv)
+{
+ debugfs_remove_recursive(plat_priv->root_dentry);
+}
+
+int cnss_debug_init(void)
+{
+ cnss_ipc_log_context = ipc_log_context_create(CNSS_IPC_LOG_PAGES,
+ "cnss", 0);
+ if (!cnss_ipc_log_context) {
+ cnss_pr_err("Unable to create IPC log context!\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void cnss_debug_deinit(void)
+{
+ if (cnss_ipc_log_context) {
+ ipc_log_context_destroy(cnss_ipc_log_context);
+ cnss_ipc_log_context = NULL;
+ }
+}
diff --git a/drivers/net/wireless/cnss2/debug.h b/drivers/net/wireless/cnss2/debug.h
new file mode 100644
index 000000000000..1621514eb5b2
--- /dev/null
+++ b/drivers/net/wireless/cnss2/debug.h
@@ -0,0 +1,73 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CNSS_DEBUG_H
+#define _CNSS_DEBUG_H
+
+#include <linux/ipc_logging.h>
+#include <linux/printk.h>
+
+extern void *cnss_ipc_log_context;
+
+#define cnss_ipc_log_string(_x...) do { \
+ if (cnss_ipc_log_context) \
+ ipc_log_string(cnss_ipc_log_context, _x); \
+ } while (0)
+
+#define cnss_pr_err(_fmt, ...) do { \
+ pr_err("cnss: " _fmt, ##__VA_ARGS__); \
+ cnss_ipc_log_string("ERR: " pr_fmt(_fmt), \
+ ##__VA_ARGS__); \
+ } while (0)
+
+#define cnss_pr_warn(_fmt, ...) do { \
+ pr_warn("cnss: " _fmt, ##__VA_ARGS__); \
+ cnss_ipc_log_string("WRN: " pr_fmt(_fmt), \
+ ##__VA_ARGS__); \
+ } while (0)
+
+#define cnss_pr_info(_fmt, ...) do { \
+ pr_info("cnss: " _fmt, ##__VA_ARGS__); \
+ cnss_ipc_log_string("INF: " pr_fmt(_fmt), \
+ ##__VA_ARGS__); \
+ } while (0)
+
+#define cnss_pr_dbg(_fmt, ...) do { \
+ pr_debug("cnss: " _fmt, ##__VA_ARGS__); \
+ cnss_ipc_log_string("DBG: " pr_fmt(_fmt), \
+ ##__VA_ARGS__); \
+ } while (0)
+
+#ifdef CONFIG_CNSS2_DEBUG
+#define CNSS_ASSERT(_condition) do { \
+ if (!(_condition)) { \
+ cnss_pr_err("ASSERT at line %d\n", \
+ __LINE__); \
+ WARN_ON(1); \
+ } \
+ } while (0)
+#else
+#define CNSS_ASSERT(_condition) do { \
+ if (!(_condition)) { \
+ cnss_pr_err("ASSERT at line %d\n", \
+ __LINE__); \
+ WARN_ON(1); \
+ } \
+ } while (0)
+#endif
+
+int cnss_debug_init(void);
+void cnss_debug_deinit(void);
+int cnss_debugfs_create(struct cnss_plat_data *plat_priv);
+void cnss_debugfs_destroy(struct cnss_plat_data *plat_priv);
+
+#endif /* _CNSS_DEBUG_H */
diff --git a/drivers/net/wireless/cnss2/main.c b/drivers/net/wireless/cnss2/main.c
new file mode 100644
index 000000000000..29bfe1f4d6ed
--- /dev/null
+++ b/drivers/net/wireless/cnss2/main.c
@@ -0,0 +1,2309 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pm_wakeup.h>
+#include <linux/rwsem.h>
+#include <linux/suspend.h>
+#include <linux/timer.h>
+#include <soc/qcom/ramdump.h>
+#include <soc/qcom/subsystem_notif.h>
+
+#include "main.h"
+#include "debug.h"
+#include "pci.h"
+
+#define CNSS_DUMP_FORMAT_VER 0x11
+#define CNSS_DUMP_FORMAT_VER_V2 0x22
+#define CNSS_DUMP_MAGIC_VER_V2 0x42445953
+#define CNSS_DUMP_NAME "CNSS_WLAN"
+#define CNSS_DUMP_DESC_SIZE 0x1000
+#define CNSS_DUMP_SEG_VER 0x1
+#define WLAN_RECOVERY_DELAY 1000
+#define FILE_SYSTEM_READY 1
+#define FW_READY_TIMEOUT 20000
+#define FW_ASSERT_TIMEOUT 5000
+#define CNSS_EVENT_PENDING 2989
+
+static struct cnss_plat_data *plat_env;
+
+static DECLARE_RWSEM(cnss_pm_sem);
+
+static bool qmi_bypass;
+#ifdef CONFIG_CNSS2_DEBUG
+module_param(qmi_bypass, bool, 0600);
+MODULE_PARM_DESC(qmi_bypass, "Bypass QMI from platform driver");
+#endif
+
+static bool enable_waltest;
+#ifdef CONFIG_CNSS2_DEBUG
+module_param(enable_waltest, bool, 0600);
+MODULE_PARM_DESC(enable_waltest, "Enable to handle firmware waltest");
+#endif
+
+enum cnss_debug_quirks {
+ LINK_DOWN_SELF_RECOVERY,
+};
+
+unsigned long quirks;
+#ifdef CONFIG_CNSS2_DEBUG
+module_param(quirks, ulong, 0600);
+MODULE_PARM_DESC(quirks, "Debug quirks for the driver");
+#endif
+
+static struct cnss_fw_files FW_FILES_QCA6174_FW_3_0 = {
+ "qwlan30.bin", "bdwlan30.bin", "otp30.bin", "utf30.bin",
+ "utfbd30.bin", "epping30.bin", "evicted30.bin"
+};
+
+static struct cnss_fw_files FW_FILES_DEFAULT = {
+ "qwlan.bin", "bdwlan.bin", "otp.bin", "utf.bin",
+ "utfbd.bin", "epping.bin", "evicted.bin"
+};
+
+struct cnss_driver_event {
+ struct list_head list;
+ enum cnss_driver_event_type type;
+ bool sync;
+ struct completion complete;
+ int ret;
+ void *data;
+};
+
+static enum cnss_dev_bus_type cnss_get_dev_bus_type(struct device *dev)
+{
+ if (!dev)
+ return CNSS_BUS_NONE;
+
+ if (!dev->bus)
+ return CNSS_BUS_NONE;
+
+ if (memcmp(dev->bus->name, "pci", 3) == 0)
+ return CNSS_BUS_PCI;
+ else
+ return CNSS_BUS_NONE;
+}
+
+static void cnss_set_plat_priv(struct platform_device *plat_dev,
+ struct cnss_plat_data *plat_priv)
+{
+ plat_env = plat_priv;
+}
+
+static struct cnss_plat_data *cnss_get_plat_priv(struct platform_device
+ *plat_dev)
+{
+ return plat_env;
+}
+
+void *cnss_bus_dev_to_bus_priv(struct device *dev)
+{
+ if (!dev)
+ return NULL;
+
+ switch (cnss_get_dev_bus_type(dev)) {
+ case CNSS_BUS_PCI:
+ return cnss_get_pci_priv(to_pci_dev(dev));
+ default:
+ return NULL;
+ }
+}
+
+struct cnss_plat_data *cnss_bus_dev_to_plat_priv(struct device *dev)
+{
+ void *bus_priv;
+
+ if (!dev)
+ return cnss_get_plat_priv(NULL);
+
+ bus_priv = cnss_bus_dev_to_bus_priv(dev);
+ if (!bus_priv)
+ return NULL;
+
+ switch (cnss_get_dev_bus_type(dev)) {
+ case CNSS_BUS_PCI:
+ return cnss_pci_priv_to_plat_priv(bus_priv);
+ default:
+ return NULL;
+ }
+}
+
+static int cnss_pm_notify(struct notifier_block *b,
+ unsigned long event, void *p)
+{
+ switch (event) {
+ case PM_SUSPEND_PREPARE:
+ down_write(&cnss_pm_sem);
+ break;
+ case PM_POST_SUSPEND:
+ up_write(&cnss_pm_sem);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block cnss_pm_notifier = {
+ .notifier_call = cnss_pm_notify,
+};
+
+static void cnss_pm_stay_awake(struct cnss_plat_data *plat_priv)
+{
+ if (atomic_inc_return(&plat_priv->pm_count) != 1)
+ return;
+
+ cnss_pr_dbg("PM stay awake, state: 0x%lx, count: %d\n",
+ plat_priv->driver_state,
+ atomic_read(&plat_priv->pm_count));
+ pm_stay_awake(&plat_priv->plat_dev->dev);
+}
+
+static void cnss_pm_relax(struct cnss_plat_data *plat_priv)
+{
+ int r = atomic_dec_return(&plat_priv->pm_count);
+
+ WARN_ON(r < 0);
+
+ if (r != 0)
+ return;
+
+ cnss_pr_dbg("PM relax, state: 0x%lx, count: %d\n",
+ plat_priv->driver_state,
+ atomic_read(&plat_priv->pm_count));
+ pm_relax(&plat_priv->plat_dev->dev);
+}
+
+void cnss_lock_pm_sem(void)
+{
+ down_read(&cnss_pm_sem);
+}
+EXPORT_SYMBOL(cnss_lock_pm_sem);
+
+void cnss_release_pm_sem(void)
+{
+ up_read(&cnss_pm_sem);
+}
+EXPORT_SYMBOL(cnss_release_pm_sem);
+
+int cnss_get_fw_files_for_target(struct cnss_fw_files *pfw_files,
+ u32 target_type, u32 target_version)
+{
+ if (!pfw_files)
+ return -ENODEV;
+
+ switch (target_version) {
+ case QCA6174_REV3_VERSION:
+ case QCA6174_REV3_2_VERSION:
+ memcpy(pfw_files, &FW_FILES_QCA6174_FW_3_0, sizeof(*pfw_files));
+ break;
+ default:
+ memcpy(pfw_files, &FW_FILES_DEFAULT, sizeof(*pfw_files));
+ cnss_pr_err("Unknown target version, type: 0x%X, version: 0x%X",
+ target_type, target_version);
+ break;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(cnss_get_fw_files_for_target);
+
+int cnss_request_bus_bandwidth(int bandwidth)
+{
+ int ret = 0;
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL);
+ struct cnss_bus_bw_info *bus_bw_info;
+
+ if (!plat_priv)
+ return -ENODEV;
+
+ bus_bw_info = &plat_priv->bus_bw_info;
+ if (!bus_bw_info->bus_client)
+ return -EINVAL;
+
+ switch (bandwidth) {
+ case CNSS_BUS_WIDTH_NONE:
+ case CNSS_BUS_WIDTH_LOW:
+ case CNSS_BUS_WIDTH_MEDIUM:
+ case CNSS_BUS_WIDTH_HIGH:
+ ret = msm_bus_scale_client_update_request(
+ bus_bw_info->bus_client, bandwidth);
+ if (!ret)
+ bus_bw_info->current_bw_vote = bandwidth;
+ else
+ cnss_pr_err("Could not set bus bandwidth: %d, err = %d\n",
+ bandwidth, ret);
+ break;
+ default:
+ cnss_pr_err("Invalid bus bandwidth: %d", bandwidth);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(cnss_request_bus_bandwidth);
+
+int cnss_get_platform_cap(struct cnss_platform_cap *cap)
+{
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL);
+
+ if (!plat_priv)
+ return -ENODEV;
+
+ if (cap)
+ *cap = plat_priv->cap;
+
+ return 0;
+}
+EXPORT_SYMBOL(cnss_get_platform_cap);
+
+int cnss_get_soc_info(struct device *dev, struct cnss_soc_info *info)
+{
+ int ret = 0;
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+ void *bus_priv = cnss_bus_dev_to_bus_priv(dev);
+
+ if (!plat_priv)
+ return -ENODEV;
+
+ ret = cnss_pci_get_bar_info(bus_priv, &info->va, &info->pa);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(cnss_get_soc_info);
+
+void cnss_set_driver_status(enum cnss_driver_status driver_status)
+{
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL);
+
+ if (!plat_priv)
+ return;
+
+ plat_priv->driver_status = driver_status;
+}
+EXPORT_SYMBOL(cnss_set_driver_status);
+
+void cnss_request_pm_qos(u32 qos_val)
+{
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL);
+
+ if (!plat_priv)
+ return;
+
+ pm_qos_add_request(&plat_priv->qos_request, PM_QOS_CPU_DMA_LATENCY,
+ qos_val);
+}
+EXPORT_SYMBOL(cnss_request_pm_qos);
+
+void cnss_remove_pm_qos(void)
+{
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL);
+
+ if (!plat_priv)
+ return;
+
+ pm_qos_remove_request(&plat_priv->qos_request);
+}
+EXPORT_SYMBOL(cnss_remove_pm_qos);
+
+u8 *cnss_common_get_wlan_mac_address(struct device *dev, u32 *num)
+{
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+ struct cnss_wlan_mac_info *wlan_mac_info;
+ struct cnss_wlan_mac_addr *addr;
+
+ if (!plat_priv)
+ goto out;
+
+ wlan_mac_info = &plat_priv->wlan_mac_info;
+ if (!wlan_mac_info->is_wlan_mac_set) {
+ cnss_pr_info("Platform driver doesn't have any MAC address!\n");
+ goto out;
+ }
+
+ addr = &wlan_mac_info->wlan_mac_addr;
+ *num = addr->no_of_mac_addr_set;
+
+ return &addr->mac_addr[0][0];
+out:
+ *num = 0;
+ return NULL;
+}
+EXPORT_SYMBOL(cnss_common_get_wlan_mac_address);
+
+int cnss_wlan_enable(struct device *dev,
+ struct cnss_wlan_enable_cfg *config,
+ enum cnss_driver_mode mode,
+ const char *host_version)
+{
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+ struct wlfw_wlan_cfg_req_msg_v01 req;
+ u32 i;
+ int ret = 0;
+
+ if (plat_priv->device_id == QCA6174_DEVICE_ID)
+ return 0;
+
+ if (qmi_bypass)
+ return 0;
+
+ if (!config || !host_version) {
+ cnss_pr_err("Invalid config or host_version pointer\n");
+ return -EINVAL;
+ }
+
+ cnss_pr_dbg("Mode: %d, config: %pK, host_version: %s\n",
+ mode, config, host_version);
+
+ if (mode == CNSS_WALTEST || mode == CNSS_CCPM)
+ goto skip_cfg;
+
+ memset(&req, 0, sizeof(req));
+
+ req.host_version_valid = 1;
+ strlcpy(req.host_version, host_version,
+ QMI_WLFW_MAX_STR_LEN_V01 + 1);
+
+ req.tgt_cfg_valid = 1;
+ if (config->num_ce_tgt_cfg > QMI_WLFW_MAX_NUM_CE_V01)
+ req.tgt_cfg_len = QMI_WLFW_MAX_NUM_CE_V01;
+ else
+ req.tgt_cfg_len = config->num_ce_tgt_cfg;
+ for (i = 0; i < req.tgt_cfg_len; i++) {
+ req.tgt_cfg[i].pipe_num = config->ce_tgt_cfg[i].pipe_num;
+ req.tgt_cfg[i].pipe_dir = config->ce_tgt_cfg[i].pipe_dir;
+ req.tgt_cfg[i].nentries = config->ce_tgt_cfg[i].nentries;
+ req.tgt_cfg[i].nbytes_max = config->ce_tgt_cfg[i].nbytes_max;
+ req.tgt_cfg[i].flags = config->ce_tgt_cfg[i].flags;
+ }
+
+ req.svc_cfg_valid = 1;
+ if (config->num_ce_svc_pipe_cfg > QMI_WLFW_MAX_NUM_SVC_V01)
+ req.svc_cfg_len = QMI_WLFW_MAX_NUM_SVC_V01;
+ else
+ req.svc_cfg_len = config->num_ce_svc_pipe_cfg;
+ for (i = 0; i < req.svc_cfg_len; i++) {
+ req.svc_cfg[i].service_id = config->ce_svc_cfg[i].service_id;
+ req.svc_cfg[i].pipe_dir = config->ce_svc_cfg[i].pipe_dir;
+ req.svc_cfg[i].pipe_num = config->ce_svc_cfg[i].pipe_num;
+ }
+
+ req.shadow_reg_v2_valid = 1;
+ if (config->num_shadow_reg_v2_cfg >
+ QMI_WLFW_MAX_NUM_SHADOW_REG_V2_V01)
+ req.shadow_reg_v2_len = QMI_WLFW_MAX_NUM_SHADOW_REG_V2_V01;
+ else
+ req.shadow_reg_v2_len = config->num_shadow_reg_v2_cfg;
+
+ memcpy(req.shadow_reg_v2, config->shadow_reg_v2_cfg,
+ sizeof(struct wlfw_shadow_reg_v2_cfg_s_v01)
+ * req.shadow_reg_v2_len);
+
+ ret = cnss_wlfw_wlan_cfg_send_sync(plat_priv, &req);
+ if (ret)
+ goto out;
+
+skip_cfg:
+ ret = cnss_wlfw_wlan_mode_send_sync(plat_priv, mode);
+out:
+ return ret;
+}
+EXPORT_SYMBOL(cnss_wlan_enable);
+
+int cnss_wlan_disable(struct device *dev, enum cnss_driver_mode mode)
+{
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+
+ if (plat_priv->device_id == QCA6174_DEVICE_ID)
+ return 0;
+
+ if (qmi_bypass)
+ return 0;
+
+ return cnss_wlfw_wlan_mode_send_sync(plat_priv, QMI_WLFW_OFF_V01);
+}
+EXPORT_SYMBOL(cnss_wlan_disable);
+
+#ifdef CONFIG_CNSS2_DEBUG
+int cnss_athdiag_read(struct device *dev, u32 offset, u32 mem_type,
+ u32 data_len, u8 *output)
+{
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+ int ret = 0;
+
+ if (!plat_priv) {
+ cnss_pr_err("plat_priv is NULL!\n");
+ return -EINVAL;
+ }
+
+ if (plat_priv->device_id == QCA6174_DEVICE_ID)
+ return 0;
+
+ if (!output || data_len == 0 || data_len > QMI_WLFW_MAX_DATA_SIZE_V01) {
+ cnss_pr_err("Invalid parameters for athdiag read: output %p, data_len %u\n",
+ output, data_len);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!test_bit(CNSS_FW_READY, &plat_priv->driver_state)) {
+ cnss_pr_err("Invalid state for athdiag read: 0x%lx\n",
+ plat_priv->driver_state);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = cnss_wlfw_athdiag_read_send_sync(plat_priv, offset, mem_type,
+ data_len, output);
+
+out:
+ return ret;
+}
+EXPORT_SYMBOL(cnss_athdiag_read);
+
+int cnss_athdiag_write(struct device *dev, u32 offset, u32 mem_type,
+ u32 data_len, u8 *input)
+{
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+ int ret = 0;
+
+ if (!plat_priv) {
+ cnss_pr_err("plat_priv is NULL!\n");
+ return -EINVAL;
+ }
+
+ if (plat_priv->device_id == QCA6174_DEVICE_ID)
+ return 0;
+
+ if (!input || data_len == 0 || data_len > QMI_WLFW_MAX_DATA_SIZE_V01) {
+ cnss_pr_err("Invalid parameters for athdiag write: input %p, data_len %u\n",
+ input, data_len);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!test_bit(CNSS_FW_READY, &plat_priv->driver_state)) {
+ cnss_pr_err("Invalid state for athdiag write: 0x%lx\n",
+ plat_priv->driver_state);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = cnss_wlfw_athdiag_write_send_sync(plat_priv, offset, mem_type,
+ data_len, input);
+
+out:
+ return ret;
+}
+EXPORT_SYMBOL(cnss_athdiag_write);
+#else
+int cnss_athdiag_read(struct device *dev, u32 offset, u32 mem_type,
+ u32 data_len, u8 *output)
+{
+ return -EPERM;
+}
+EXPORT_SYMBOL(cnss_athdiag_read);
+
+int cnss_athdiag_write(struct device *dev, u32 offset, u32 mem_type,
+ u32 data_len, u8 *input)
+{
+ return -EPERM;
+}
+EXPORT_SYMBOL(cnss_athdiag_write);
+#endif
+
+int cnss_set_fw_log_mode(struct device *dev, u8 fw_log_mode)
+{
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+
+ if (plat_priv->device_id == QCA6174_DEVICE_ID)
+ return 0;
+
+ return cnss_wlfw_ini_send_sync(plat_priv, fw_log_mode);
+}
+EXPORT_SYMBOL(cnss_set_fw_log_mode);
+
+static int cnss_fw_mem_ready_hdlr(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+
+ if (!plat_priv)
+ return -ENODEV;
+
+ set_bit(CNSS_FW_MEM_READY, &plat_priv->driver_state);
+
+ ret = cnss_wlfw_tgt_cap_send_sync(plat_priv);
+ if (ret)
+ goto out;
+
+ ret = cnss_wlfw_bdf_dnld_send_sync(plat_priv);
+ if (ret)
+ goto out;
+
+ ret = cnss_pci_load_m3(plat_priv->bus_priv);
+ if (ret)
+ goto out;
+
+ ret = cnss_wlfw_m3_dnld_send_sync(plat_priv);
+ if (ret)
+ goto out;
+
+ return 0;
+out:
+ return ret;
+}
+
+static int cnss_driver_call_probe(struct cnss_plat_data *plat_priv)
+{
+ int ret;
+ struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
+
+ if (!plat_priv->driver_ops) {
+ cnss_pr_err("driver_ops is NULL!");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
+ ret = plat_priv->driver_ops->reinit(pci_priv->pci_dev,
+ pci_priv->pci_device_id);
+ if (ret) {
+ cnss_pr_err("Failed to reinit host driver, err = %d\n",
+ ret);
+ goto out;
+ }
+ clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
+ } else {
+ ret = plat_priv->driver_ops->probe(pci_priv->pci_dev,
+ pci_priv->pci_device_id);
+ if (ret) {
+ cnss_pr_err("Failed to probe host driver, err = %d\n",
+ ret);
+ goto out;
+ }
+ clear_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state);
+ set_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state);
+ }
+
+ return 0;
+
+out:
+ return ret;
+}
+
+static int cnss_fw_ready_hdlr(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+
+ if (!plat_priv)
+ return -ENODEV;
+
+ del_timer(&plat_priv->fw_boot_timer);
+ set_bit(CNSS_FW_READY, &plat_priv->driver_state);
+
+ if (test_bit(CNSS_FW_BOOT_RECOVERY, &plat_priv->driver_state)) {
+ clear_bit(CNSS_FW_BOOT_RECOVERY, &plat_priv->driver_state);
+ clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
+ }
+
+ if (enable_waltest) {
+ ret = cnss_wlfw_wlan_mode_send_sync(plat_priv,
+ QMI_WLFW_WALTEST_V01);
+ } else if (test_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state)) {
+ ret = cnss_wlfw_wlan_mode_send_sync(plat_priv,
+ QMI_WLFW_CALIBRATION_V01);
+ } else {
+ ret = cnss_driver_call_probe(plat_priv);
+ }
+
+ if (ret)
+ goto shutdown;
+
+ return 0;
+
+shutdown:
+ cnss_pci_stop_mhi(plat_priv->bus_priv);
+ cnss_suspend_pci_link(plat_priv->bus_priv);
+ cnss_power_off_device(plat_priv);
+
+ return ret;
+}
+
+static char *cnss_driver_event_to_str(enum cnss_driver_event_type type)
+{
+ switch (type) {
+ case CNSS_DRIVER_EVENT_SERVER_ARRIVE:
+ return "SERVER_ARRIVE";
+ case CNSS_DRIVER_EVENT_SERVER_EXIT:
+ return "SERVER_EXIT";
+ case CNSS_DRIVER_EVENT_REQUEST_MEM:
+ return "REQUEST_MEM";
+ case CNSS_DRIVER_EVENT_FW_MEM_READY:
+ return "FW_MEM_READY";
+ case CNSS_DRIVER_EVENT_FW_READY:
+ return "FW_READY";
+ case CNSS_DRIVER_EVENT_COLD_BOOT_CAL_START:
+ return "COLD_BOOT_CAL_START";
+ case CNSS_DRIVER_EVENT_COLD_BOOT_CAL_DONE:
+ return "COLD_BOOT_CAL_DONE";
+ case CNSS_DRIVER_EVENT_REGISTER_DRIVER:
+ return "REGISTER_DRIVER";
+ case CNSS_DRIVER_EVENT_UNREGISTER_DRIVER:
+ return "UNREGISTER_DRIVER";
+ case CNSS_DRIVER_EVENT_RECOVERY:
+ return "RECOVERY";
+ case CNSS_DRIVER_EVENT_FORCE_FW_ASSERT:
+ return "FORCE_FW_ASSERT";
+ case CNSS_DRIVER_EVENT_MAX:
+ return "EVENT_MAX";
+ }
+
+ return "UNKNOWN";
+};
+
+int cnss_driver_event_post(struct cnss_plat_data *plat_priv,
+ enum cnss_driver_event_type type,
+ bool sync, void *data)
+{
+ struct cnss_driver_event *event;
+ unsigned long flags;
+ int gfp = GFP_KERNEL;
+ int ret = 0;
+
+ if (!plat_priv)
+ return -ENODEV;
+
+ cnss_pr_dbg("Posting event: %s(%d)%s, state: 0x%lx\n",
+ cnss_driver_event_to_str(type), type,
+ sync ? "-sync" : "", plat_priv->driver_state);
+
+ if (type >= CNSS_DRIVER_EVENT_MAX) {
+ cnss_pr_err("Invalid Event type: %d, can't post", type);
+ return -EINVAL;
+ }
+
+ if (in_interrupt() || irqs_disabled())
+ gfp = GFP_ATOMIC;
+
+ event = kzalloc(sizeof(*event), gfp);
+ if (!event)
+ return -ENOMEM;
+
+ cnss_pm_stay_awake(plat_priv);
+
+ event->type = type;
+ event->data = data;
+ init_completion(&event->complete);
+ event->ret = CNSS_EVENT_PENDING;
+ event->sync = sync;
+
+ spin_lock_irqsave(&plat_priv->event_lock, flags);
+ list_add_tail(&event->list, &plat_priv->event_list);
+ spin_unlock_irqrestore(&plat_priv->event_lock, flags);
+
+ queue_work(plat_priv->event_wq, &plat_priv->event_work);
+
+ if (!sync)
+ goto out;
+
+ ret = wait_for_completion_interruptible(&event->complete);
+
+ cnss_pr_dbg("Completed event: %s(%d), state: 0x%lx, ret: %d/%d\n",
+ cnss_driver_event_to_str(type), type,
+ plat_priv->driver_state, ret, event->ret);
+
+ spin_lock_irqsave(&plat_priv->event_lock, flags);
+ if (ret == -ERESTARTSYS && event->ret == CNSS_EVENT_PENDING) {
+ event->sync = false;
+ spin_unlock_irqrestore(&plat_priv->event_lock, flags);
+ ret = -EINTR;
+ goto out;
+ }
+ spin_unlock_irqrestore(&plat_priv->event_lock, flags);
+
+ ret = event->ret;
+ kfree(event);
+
+out:
+ cnss_pm_relax(plat_priv);
+ return ret;
+}
+
+int cnss_power_up(struct device *dev)
+{
+ int ret = 0;
+ void *bus_priv = cnss_bus_dev_to_bus_priv(dev);
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+
+ if (!bus_priv || !plat_priv)
+ return -ENODEV;
+
+ if (plat_priv->device_id != QCA6174_DEVICE_ID) {
+ cnss_pr_dbg("Power up is not supported for device ID 0x%lx\n",
+ plat_priv->device_id);
+ return 0;
+ }
+
+ ret = cnss_power_on_device(plat_priv);
+ if (ret) {
+ cnss_pr_err("Failed to power on device, err = %d\n", ret);
+ goto err_power_on;
+ }
+
+ ret = cnss_resume_pci_link(bus_priv);
+ if (ret) {
+ cnss_pr_err("Failed to resume PCI link, err = %d\n", ret);
+ goto err_resume_link;
+ }
+
+ return 0;
+err_resume_link:
+ cnss_power_off_device(plat_priv);
+err_power_on:
+ return ret;
+}
+EXPORT_SYMBOL(cnss_power_up);
+
+int cnss_power_down(struct device *dev)
+{
+ int ret = 0;
+ void *bus_priv = cnss_bus_dev_to_bus_priv(dev);
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+
+ if (!bus_priv || !plat_priv)
+ return -ENODEV;
+
+ if (plat_priv->device_id != QCA6174_DEVICE_ID) {
+ cnss_pr_dbg("Power down is not supported for device ID 0x%lx\n",
+ plat_priv->device_id);
+ return 0;
+ }
+
+ cnss_request_bus_bandwidth(CNSS_BUS_WIDTH_NONE);
+ cnss_pci_set_monitor_wake_intr(bus_priv, false);
+ cnss_pci_set_auto_suspended(bus_priv, 0);
+
+ ret = cnss_suspend_pci_link(bus_priv);
+ if (ret)
+ cnss_pr_err("Failed to suspend PCI link, err = %d\n", ret);
+
+ cnss_power_off_device(plat_priv);
+
+ return 0;
+}
+EXPORT_SYMBOL(cnss_power_down);
+
+int cnss_wlan_register_driver(struct cnss_wlan_driver *driver_ops)
+{
+ int ret = 0;
+ struct cnss_plat_data *plat_priv = cnss_get_plat_priv(NULL);
+
+ if (!plat_priv) {
+ cnss_pr_err("plat_priv is NULL!\n");
+ return -ENODEV;
+ }
+
+ if (plat_priv->driver_ops) {
+ cnss_pr_err("Driver has already registered!\n");
+ return -EEXIST;
+ }
+
+ ret = cnss_driver_event_post(plat_priv,
+ CNSS_DRIVER_EVENT_REGISTER_DRIVER,
+ true, driver_ops);
+ return ret;
+}
+EXPORT_SYMBOL(cnss_wlan_register_driver);
+
+void cnss_wlan_unregister_driver(struct cnss_wlan_driver *driver_ops)
+{
+ struct cnss_plat_data *plat_priv = cnss_get_plat_priv(NULL);
+
+ if (!plat_priv) {
+ cnss_pr_err("plat_priv is NULL!\n");
+ return;
+ }
+
+ cnss_driver_event_post(plat_priv,
+ CNSS_DRIVER_EVENT_UNREGISTER_DRIVER,
+ true, NULL);
+}
+EXPORT_SYMBOL(cnss_wlan_unregister_driver);
+
+static int cnss_get_resources(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+
+ ret = cnss_get_vreg(plat_priv);
+ if (ret) {
+ cnss_pr_err("Failed to get vreg, err = %d\n", ret);
+ goto out;
+ }
+
+ ret = cnss_get_pinctrl(plat_priv);
+ if (ret) {
+ cnss_pr_err("Failed to get pinctrl, err = %d\n", ret);
+ goto out;
+ }
+
+ return 0;
+out:
+ return ret;
+}
+
+static void cnss_put_resources(struct cnss_plat_data *plat_priv)
+{
+}
+
+static int cnss_modem_notifier_nb(struct notifier_block *nb,
+ unsigned long code,
+ void *ss_handle)
+{
+ struct cnss_plat_data *plat_priv =
+ container_of(nb, struct cnss_plat_data, modem_nb);
+ struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
+ struct cnss_esoc_info *esoc_info;
+ struct cnss_wlan_driver *driver_ops;
+
+ cnss_pr_dbg("Modem notifier: event %lu\n", code);
+
+ if (!pci_priv)
+ return NOTIFY_DONE;
+
+ esoc_info = &plat_priv->esoc_info;
+
+ if (code == SUBSYS_AFTER_POWERUP)
+ esoc_info->modem_current_status = 1;
+ else if (code == SUBSYS_BEFORE_SHUTDOWN)
+ esoc_info->modem_current_status = 0;
+ else
+ return NOTIFY_DONE;
+
+ driver_ops = plat_priv->driver_ops;
+ if (!driver_ops || !driver_ops->modem_status)
+ return NOTIFY_DONE;
+
+ driver_ops->modem_status(pci_priv->pci_dev,
+ esoc_info->modem_current_status);
+
+ return NOTIFY_OK;
+}
+
+static int cnss_register_esoc(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+ struct device *dev;
+ struct cnss_esoc_info *esoc_info;
+ struct esoc_desc *esoc_desc;
+ const char *client_desc;
+
+ dev = &plat_priv->plat_dev->dev;
+ esoc_info = &plat_priv->esoc_info;
+
+ esoc_info->notify_modem_status =
+ of_property_read_bool(dev->of_node,
+ "qcom,notify-modem-status");
+
+ if (esoc_info->notify_modem_status)
+ goto out;
+
+ ret = of_property_read_string_index(dev->of_node, "esoc-names", 0,
+ &client_desc);
+ if (ret) {
+ cnss_pr_dbg("esoc-names is not defined in DT, skip!\n");
+ } else {
+ esoc_desc = devm_register_esoc_client(dev, client_desc);
+ if (IS_ERR_OR_NULL(esoc_desc)) {
+ ret = PTR_RET(esoc_desc);
+ cnss_pr_err("Failed to register esoc_desc, err = %d\n",
+ ret);
+ goto out;
+ }
+ esoc_info->esoc_desc = esoc_desc;
+ }
+
+ plat_priv->modem_nb.notifier_call = cnss_modem_notifier_nb;
+ esoc_info->modem_current_status = 0;
+ esoc_info->modem_notify_handler =
+ subsys_notif_register_notifier(esoc_info->esoc_desc ?
+ esoc_info->esoc_desc->name :
+ "modem", &plat_priv->modem_nb);
+ if (IS_ERR(esoc_info->modem_notify_handler)) {
+ ret = PTR_ERR(esoc_info->modem_notify_handler);
+ cnss_pr_err("Failed to register esoc notifier, err = %d\n",
+ ret);
+ goto unreg_esoc;
+ }
+
+ return 0;
+unreg_esoc:
+ if (esoc_info->esoc_desc)
+ devm_unregister_esoc_client(dev, esoc_info->esoc_desc);
+out:
+ return ret;
+}
+
+static void cnss_unregister_esoc(struct cnss_plat_data *plat_priv)
+{
+ struct device *dev;
+ struct cnss_esoc_info *esoc_info;
+
+ dev = &plat_priv->plat_dev->dev;
+ esoc_info = &plat_priv->esoc_info;
+
+ if (esoc_info->notify_modem_status)
+ subsys_notif_unregister_notifier(esoc_info->
+ modem_notify_handler,
+ &plat_priv->modem_nb);
+ if (esoc_info->esoc_desc)
+ devm_unregister_esoc_client(dev, esoc_info->esoc_desc);
+}
+
+static int cnss_qca6174_powerup(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+ struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
+
+ if (!pci_priv) {
+ cnss_pr_err("pci_priv is NULL!\n");
+ return -ENODEV;
+ }
+
+ if (!plat_priv->driver_ops) {
+ cnss_pr_err("driver_ops is NULL!\n");
+ return -EINVAL;
+ }
+
+ ret = cnss_power_on_device(plat_priv);
+ if (ret) {
+ cnss_pr_err("Failed to power on device, err = %d\n", ret);
+ goto out;
+ }
+
+ ret = cnss_resume_pci_link(pci_priv);
+ if (ret) {
+ cnss_pr_err("Failed to resume PCI link, err = %d\n", ret);
+ goto power_off;
+ }
+
+ ret = cnss_driver_call_probe(plat_priv);
+ if (ret)
+ goto suspend_link;
+
+ return 0;
+suspend_link:
+ cnss_suspend_pci_link(pci_priv);
+power_off:
+ cnss_power_off_device(plat_priv);
+out:
+ return ret;
+}
+
+static int cnss_qca6174_shutdown(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+ struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
+
+ if (!pci_priv)
+ return -ENODEV;
+
+ if (!plat_priv->driver_ops)
+ return -EINVAL;
+
+ if (test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state)) {
+ cnss_request_bus_bandwidth(CNSS_BUS_WIDTH_NONE);
+ plat_priv->driver_ops->remove(pci_priv->pci_dev);
+ cnss_pci_set_monitor_wake_intr(pci_priv, false);
+ cnss_pci_set_auto_suspended(pci_priv, 0);
+ } else {
+ plat_priv->driver_ops->shutdown(pci_priv->pci_dev);
+ }
+
+ ret = cnss_suspend_pci_link(pci_priv);
+ if (ret)
+ cnss_pr_err("Failed to suspend PCI link, err = %d\n", ret);
+
+ cnss_power_off_device(plat_priv);
+
+ if (test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state)) {
+ clear_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state);
+ clear_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state);
+ }
+
+ return ret;
+}
+
+static void cnss_qca6174_crash_shutdown(struct cnss_plat_data *plat_priv)
+{
+ struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
+
+ if (!plat_priv->driver_ops)
+ return;
+
+ plat_priv->driver_ops->crash_shutdown(pci_priv->pci_dev);
+}
+
+static int cnss_qca6290_powerup(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+ struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
+ unsigned int timeout;
+
+ if (!pci_priv) {
+ cnss_pr_err("pci_priv is NULL!\n");
+ return -ENODEV;
+ }
+
+ if (plat_priv->ramdump_info_v2.dump_data_valid) {
+ cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_DEINIT);
+ cnss_pci_clear_dump_info(pci_priv);
+ }
+
+ ret = cnss_power_on_device(plat_priv);
+ if (ret) {
+ cnss_pr_err("Failed to power on device, err = %d\n", ret);
+ goto out;
+ }
+
+ ret = cnss_resume_pci_link(pci_priv);
+ if (ret) {
+ cnss_pr_err("Failed to resume PCI link, err = %d\n", ret);
+ goto power_off;
+ }
+
+ timeout = cnss_get_qmi_timeout();
+
+ ret = cnss_pci_start_mhi(pci_priv);
+ if (ret) {
+ cnss_pr_err("Failed to start MHI, err = %d\n", ret);
+ if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state) &&
+ !pci_priv->pci_link_down_ind && timeout)
+ mod_timer(&plat_priv->fw_boot_timer,
+ jiffies + msecs_to_jiffies(timeout >> 1));
+ return 0;
+ }
+
+ cnss_set_pin_connect_status(plat_priv);
+
+ if (qmi_bypass) {
+ ret = cnss_driver_call_probe(plat_priv);
+ if (ret)
+ goto stop_mhi;
+ } else if (timeout) {
+ mod_timer(&plat_priv->fw_boot_timer,
+ jiffies + msecs_to_jiffies(timeout << 1));
+ }
+
+ return 0;
+
+stop_mhi:
+ cnss_pci_stop_mhi(pci_priv);
+ cnss_suspend_pci_link(pci_priv);
+power_off:
+ cnss_power_off_device(plat_priv);
+out:
+ return ret;
+}
+
+static int cnss_qca6290_shutdown(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+ struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
+
+ if (!pci_priv)
+ return -ENODEV;
+
+ if (test_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state) ||
+ test_bit(CNSS_FW_BOOT_RECOVERY, &plat_priv->driver_state))
+ goto skip_driver_remove;
+
+ if (!plat_priv->driver_ops)
+ return -EINVAL;
+
+ if (test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state)) {
+ cnss_request_bus_bandwidth(CNSS_BUS_WIDTH_NONE);
+ plat_priv->driver_ops->remove(pci_priv->pci_dev);
+ cnss_pci_set_monitor_wake_intr(pci_priv, false);
+ cnss_pci_set_auto_suspended(pci_priv, 0);
+ } else {
+ plat_priv->driver_ops->shutdown(pci_priv->pci_dev);
+ }
+
+skip_driver_remove:
+ cnss_pci_stop_mhi(pci_priv);
+
+ ret = cnss_suspend_pci_link(pci_priv);
+ if (ret)
+ cnss_pr_err("Failed to suspend PCI link, err = %d\n", ret);
+
+ cnss_power_off_device(plat_priv);
+
+ clear_bit(CNSS_FW_READY, &plat_priv->driver_state);
+ clear_bit(CNSS_FW_MEM_READY, &plat_priv->driver_state);
+
+ if (test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state)) {
+ clear_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state);
+ clear_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state);
+ }
+
+ return ret;
+}
+
+static void cnss_qca6290_crash_shutdown(struct cnss_plat_data *plat_priv)
+{
+ struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
+ int ret = 0;
+
+ cnss_pr_dbg("Crash shutdown with driver_state 0x%lx\n",
+ plat_priv->driver_state);
+
+ if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state) ||
+ test_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state) ||
+ test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state))
+ return;
+
+ ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RDDM_KERNEL_PANIC);
+ if (ret) {
+ cnss_pr_err("Fail to complete RDDM, err = %d\n", ret);
+ return;
+ }
+
+ cnss_pci_collect_dump_info(pci_priv);
+}
+
+static int cnss_powerup(const struct subsys_desc *subsys_desc)
+{
+ int ret = 0;
+ struct cnss_plat_data *plat_priv;
+
+ if (!subsys_desc->dev) {
+ cnss_pr_err("dev from subsys_desc is NULL\n");
+ return -ENODEV;
+ }
+
+ plat_priv = dev_get_drvdata(subsys_desc->dev);
+ if (!plat_priv) {
+ cnss_pr_err("plat_priv is NULL!\n");
+ return -ENODEV;
+ }
+
+ if (!plat_priv->driver_state) {
+ cnss_pr_dbg("Powerup is ignored.\n");
+ return 0;
+ }
+
+ switch (plat_priv->device_id) {
+ case QCA6174_DEVICE_ID:
+ ret = cnss_qca6174_powerup(plat_priv);
+ break;
+ case QCA6290_EMULATION_DEVICE_ID:
+ case QCA6290_DEVICE_ID:
+ ret = cnss_qca6290_powerup(plat_priv);
+ break;
+ default:
+ cnss_pr_err("Unknown device_id found: 0x%lx\n",
+ plat_priv->device_id);
+ ret = -ENODEV;
+ }
+
+ return ret;
+}
+
+static int cnss_shutdown(const struct subsys_desc *subsys_desc, bool force_stop)
+{
+ int ret = 0;
+ struct cnss_plat_data *plat_priv;
+
+ if (!subsys_desc->dev) {
+ cnss_pr_err("dev from subsys_desc is NULL\n");
+ return -ENODEV;
+ }
+
+ plat_priv = dev_get_drvdata(subsys_desc->dev);
+ if (!plat_priv) {
+ cnss_pr_err("plat_priv is NULL!\n");
+ return -ENODEV;
+ }
+
+ switch (plat_priv->device_id) {
+ case QCA6174_DEVICE_ID:
+ ret = cnss_qca6174_shutdown(plat_priv);
+ break;
+ case QCA6290_EMULATION_DEVICE_ID:
+ case QCA6290_DEVICE_ID:
+ ret = cnss_qca6290_shutdown(plat_priv);
+ break;
+ default:
+ cnss_pr_err("Unknown device_id found: 0x%lx\n",
+ plat_priv->device_id);
+ ret = -ENODEV;
+ }
+
+ return ret;
+}
+
+static int cnss_qca6290_ramdump(struct cnss_plat_data *plat_priv)
+{
+ struct cnss_ramdump_info_v2 *info_v2 = &plat_priv->ramdump_info_v2;
+ struct cnss_dump_data *dump_data = &info_v2->dump_data;
+ struct cnss_dump_seg *dump_seg = info_v2->dump_data_vaddr;
+ struct ramdump_segment *ramdump_segs, *s;
+ int i, ret = 0;
+
+ if (!info_v2->dump_data_valid ||
+ dump_data->nentries == 0)
+ return 0;
+
+ ramdump_segs = kcalloc(dump_data->nentries,
+ sizeof(*ramdump_segs),
+ GFP_KERNEL);
+ if (!ramdump_segs)
+ return -ENOMEM;
+
+ s = ramdump_segs;
+ for (i = 0; i < dump_data->nentries; i++) {
+ s->address = dump_seg->address;
+ s->v_address = dump_seg->v_address;
+ s->size = dump_seg->size;
+ s++;
+ dump_seg++;
+ }
+
+ ret = do_elf_ramdump(info_v2->ramdump_dev, ramdump_segs,
+ dump_data->nentries);
+ kfree(ramdump_segs);
+
+ cnss_pci_set_mhi_state(plat_priv->bus_priv, CNSS_MHI_DEINIT);
+ cnss_pci_clear_dump_info(plat_priv->bus_priv);
+
+ return ret;
+}
+
+static int cnss_qca6174_ramdump(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+ struct cnss_ramdump_info *ramdump_info;
+ struct ramdump_segment segment;
+
+ ramdump_info = &plat_priv->ramdump_info;
+ if (!ramdump_info->ramdump_size)
+ return -EINVAL;
+
+ memset(&segment, 0, sizeof(segment));
+ segment.v_address = ramdump_info->ramdump_va;
+ segment.size = ramdump_info->ramdump_size;
+ ret = do_ramdump(ramdump_info->ramdump_dev, &segment, 1);
+
+ return ret;
+}
+
+static int cnss_ramdump(int enable, const struct subsys_desc *subsys_desc)
+{
+ int ret = 0;
+ struct cnss_plat_data *plat_priv = dev_get_drvdata(subsys_desc->dev);
+
+ if (!plat_priv) {
+ cnss_pr_err("plat_priv is NULL!\n");
+ return -ENODEV;
+ }
+
+ if (!enable)
+ return 0;
+
+ switch (plat_priv->device_id) {
+ case QCA6174_DEVICE_ID:
+ ret = cnss_qca6174_ramdump(plat_priv);
+ break;
+ case QCA6290_EMULATION_DEVICE_ID:
+ case QCA6290_DEVICE_ID:
+ ret = cnss_qca6290_ramdump(plat_priv);
+ break;
+ default:
+ cnss_pr_err("Unknown device_id found: 0x%lx\n",
+ plat_priv->device_id);
+ ret = -ENODEV;
+ }
+
+ return ret;
+}
+
+void *cnss_get_virt_ramdump_mem(unsigned long *size)
+{
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL);
+ struct cnss_ramdump_info *ramdump_info;
+
+ if (!plat_priv)
+ return NULL;
+
+ ramdump_info = &plat_priv->ramdump_info;
+ *size = ramdump_info->ramdump_size;
+
+ return ramdump_info->ramdump_va;
+}
+EXPORT_SYMBOL(cnss_get_virt_ramdump_mem);
+
+void cnss_device_crashed(void)
+{
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL);
+ struct cnss_subsys_info *subsys_info;
+
+ if (!plat_priv)
+ return;
+
+ subsys_info = &plat_priv->subsys_info;
+ if (subsys_info->subsys_device) {
+ set_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
+ subsys_set_crash_status(subsys_info->subsys_device, true);
+ subsystem_restart_dev(subsys_info->subsys_device);
+ }
+}
+EXPORT_SYMBOL(cnss_device_crashed);
+
+static void cnss_crash_shutdown(const struct subsys_desc *subsys_desc)
+{
+ struct cnss_plat_data *plat_priv = dev_get_drvdata(subsys_desc->dev);
+
+ if (!plat_priv) {
+ cnss_pr_err("plat_priv is NULL!\n");
+ return;
+ }
+
+ switch (plat_priv->device_id) {
+ case QCA6174_DEVICE_ID:
+ cnss_qca6174_crash_shutdown(plat_priv);
+ break;
+ case QCA6290_EMULATION_DEVICE_ID:
+ case QCA6290_DEVICE_ID:
+ cnss_qca6290_crash_shutdown(plat_priv);
+ break;
+ default:
+ cnss_pr_err("Unknown device_id found: 0x%lx\n",
+ plat_priv->device_id);
+ }
+}
+
+static const char *cnss_recovery_reason_to_str(enum cnss_recovery_reason reason)
+{
+ switch (reason) {
+ case CNSS_REASON_DEFAULT:
+ return "DEFAULT";
+ case CNSS_REASON_LINK_DOWN:
+ return "LINK_DOWN";
+ case CNSS_REASON_RDDM:
+ return "RDDM";
+ case CNSS_REASON_TIMEOUT:
+ return "TIMEOUT";
+ }
+
+ return "UNKNOWN";
+};
+
+static int cnss_do_recovery(struct cnss_plat_data *plat_priv,
+ enum cnss_recovery_reason reason)
+{
+ struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
+ struct cnss_subsys_info *subsys_info =
+ &plat_priv->subsys_info;
+ int ret = 0;
+
+ plat_priv->recovery_count++;
+
+ if (plat_priv->device_id == QCA6174_DEVICE_ID)
+ goto self_recovery;
+
+ if (plat_priv->driver_ops &&
+ test_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state))
+ plat_priv->driver_ops->update_status(pci_priv->pci_dev,
+ CNSS_RECOVERY);
+
+ switch (reason) {
+ case CNSS_REASON_LINK_DOWN:
+ if (test_bit(LINK_DOWN_SELF_RECOVERY, &quirks))
+ goto self_recovery;
+ break;
+ case CNSS_REASON_RDDM:
+ clear_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state);
+ ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RDDM);
+ if (ret) {
+ cnss_pr_err("Failed to complete RDDM, err = %d\n", ret);
+ break;
+ }
+ cnss_pci_collect_dump_info(pci_priv);
+ break;
+ case CNSS_REASON_DEFAULT:
+ case CNSS_REASON_TIMEOUT:
+ break;
+ default:
+ cnss_pr_err("Unsupported recovery reason: %s(%d)\n",
+ cnss_recovery_reason_to_str(reason), reason);
+ break;
+ }
+
+ if (!subsys_info->subsys_device)
+ return 0;
+
+ subsys_set_crash_status(subsys_info->subsys_device, true);
+ subsystem_restart_dev(subsys_info->subsys_device);
+
+ return 0;
+
+self_recovery:
+ cnss_shutdown(&subsys_info->subsys_desc, false);
+ cnss_powerup(&subsys_info->subsys_desc);
+
+ return 0;
+}
+
+static int cnss_driver_recovery_hdlr(struct cnss_plat_data *plat_priv,
+ void *data)
+{
+ struct cnss_recovery_data *recovery_data = data;
+ int ret = 0;
+
+ cnss_pr_dbg("Driver recovery is triggered with reason: %s(%d)\n",
+ cnss_recovery_reason_to_str(recovery_data->reason),
+ recovery_data->reason);
+
+ if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
+ cnss_pr_err("Recovery is already in progress!\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state)) {
+ cnss_pr_err("Driver unload is in progress, ignore recovery\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ switch (plat_priv->device_id) {
+ case QCA6174_DEVICE_ID:
+ if (test_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state)) {
+ cnss_pr_err("Driver load is in progress, ignore recovery\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ break;
+ default:
+ if (!test_bit(CNSS_FW_READY, &plat_priv->driver_state)) {
+ set_bit(CNSS_FW_BOOT_RECOVERY,
+ &plat_priv->driver_state);
+ } else if (test_bit(CNSS_DRIVER_LOADING,
+ &plat_priv->driver_state)) {
+ cnss_pr_err("Driver probe is in progress, ignore recovery\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ break;
+ }
+
+ set_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
+ ret = cnss_do_recovery(plat_priv, recovery_data->reason);
+
+out:
+ kfree(data);
+ return ret;
+}
+
+int cnss_self_recovery(struct device *dev,
+ enum cnss_recovery_reason reason)
+{
+ cnss_schedule_recovery(dev, reason);
+ return 0;
+}
+EXPORT_SYMBOL(cnss_self_recovery);
+
+void cnss_schedule_recovery(struct device *dev,
+ enum cnss_recovery_reason reason)
+{
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+ struct cnss_recovery_data *data;
+ int gfp = GFP_KERNEL;
+
+ if (in_interrupt() || irqs_disabled())
+ gfp = GFP_ATOMIC;
+
+ data = kzalloc(sizeof(*data), gfp);
+ if (!data)
+ return;
+
+ data->reason = reason;
+ cnss_driver_event_post(plat_priv,
+ CNSS_DRIVER_EVENT_RECOVERY,
+ false, data);
+}
+EXPORT_SYMBOL(cnss_schedule_recovery);
+
+static int cnss_force_fw_assert_hdlr(struct cnss_plat_data *plat_priv)
+{
+ struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
+ int ret;
+
+ ret = cnss_pci_set_mhi_state(plat_priv->bus_priv,
+ CNSS_MHI_TRIGGER_RDDM);
+ if (ret) {
+ cnss_pr_err("Failed to trigger RDDM, err = %d\n", ret);
+ cnss_schedule_recovery(&pci_priv->pci_dev->dev,
+ CNSS_REASON_DEFAULT);
+ return 0;
+ }
+
+ if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state)) {
+ mod_timer(&plat_priv->fw_boot_timer,
+ jiffies + msecs_to_jiffies(FW_ASSERT_TIMEOUT));
+ }
+
+ return 0;
+}
+
+int cnss_force_fw_assert(struct device *dev)
+{
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+
+ if (!plat_priv) {
+ cnss_pr_err("plat_priv is NULL\n");
+ return -ENODEV;
+ }
+
+ if (plat_priv->device_id == QCA6174_DEVICE_ID) {
+ cnss_pr_info("Forced FW assert is not supported\n");
+ return -EINVAL;
+ }
+
+ if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
+ cnss_pr_info("Recovery is already in progress, ignore forced FW assert\n");
+ return 0;
+ }
+
+ cnss_driver_event_post(plat_priv,
+ CNSS_DRIVER_EVENT_FORCE_FW_ASSERT,
+ false, NULL);
+
+ return 0;
+}
+EXPORT_SYMBOL(cnss_force_fw_assert);
+
+void fw_boot_timeout(unsigned long data)
+{
+ struct cnss_plat_data *plat_priv = (struct cnss_plat_data *)data;
+ struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
+
+ cnss_pr_err("Timeout waiting for FW ready indication!\n");
+
+ cnss_schedule_recovery(&pci_priv->pci_dev->dev,
+ CNSS_REASON_TIMEOUT);
+}
+
+static int cnss_register_driver_hdlr(struct cnss_plat_data *plat_priv,
+ void *data)
+{
+ int ret = 0;
+ struct cnss_subsys_info *subsys_info = &plat_priv->subsys_info;
+
+ set_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state);
+ plat_priv->driver_ops = data;
+
+ ret = cnss_powerup(&subsys_info->subsys_desc);
+ if (ret) {
+ clear_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state);
+ plat_priv->driver_ops = NULL;
+ }
+
+ return ret;
+}
+
+static int cnss_unregister_driver_hdlr(struct cnss_plat_data *plat_priv)
+{
+ struct cnss_subsys_info *subsys_info = &plat_priv->subsys_info;
+
+ set_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state);
+ cnss_shutdown(&subsys_info->subsys_desc, false);
+ plat_priv->driver_ops = NULL;
+
+ return 0;
+}
+
+static int cnss_cold_boot_cal_start_hdlr(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+ struct cnss_subsys_info *subsys_info = &plat_priv->subsys_info;
+
+ set_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state);
+ ret = cnss_powerup(&subsys_info->subsys_desc);
+ if (ret)
+ clear_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state);
+
+ return ret;
+}
+
+static int cnss_cold_boot_cal_done_hdlr(struct cnss_plat_data *plat_priv)
+{
+ struct cnss_subsys_info *subsys_info = &plat_priv->subsys_info;
+
+ cnss_wlfw_wlan_mode_send_sync(plat_priv, QMI_WLFW_OFF_V01);
+ cnss_shutdown(&subsys_info->subsys_desc, false);
+ clear_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state);
+
+ return 0;
+}
+
+static void cnss_driver_event_work(struct work_struct *work)
+{
+ struct cnss_plat_data *plat_priv =
+ container_of(work, struct cnss_plat_data, event_work);
+ struct cnss_driver_event *event;
+ unsigned long flags;
+ int ret = 0;
+
+ if (!plat_priv) {
+ cnss_pr_err("plat_priv is NULL!\n");
+ return;
+ }
+
+ cnss_pm_stay_awake(plat_priv);
+
+ spin_lock_irqsave(&plat_priv->event_lock, flags);
+
+ while (!list_empty(&plat_priv->event_list)) {
+ event = list_first_entry(&plat_priv->event_list,
+ struct cnss_driver_event, list);
+ list_del(&event->list);
+ spin_unlock_irqrestore(&plat_priv->event_lock, flags);
+
+ cnss_pr_dbg("Processing driver event: %s%s(%d), state: 0x%lx\n",
+ cnss_driver_event_to_str(event->type),
+ event->sync ? "-sync" : "", event->type,
+ plat_priv->driver_state);
+
+ switch (event->type) {
+ case CNSS_DRIVER_EVENT_SERVER_ARRIVE:
+ ret = cnss_wlfw_server_arrive(plat_priv);
+ break;
+ case CNSS_DRIVER_EVENT_SERVER_EXIT:
+ ret = cnss_wlfw_server_exit(plat_priv);
+ break;
+ case CNSS_DRIVER_EVENT_REQUEST_MEM:
+ ret = cnss_pci_alloc_fw_mem(plat_priv->bus_priv);
+ if (ret)
+ break;
+ ret = cnss_wlfw_respond_mem_send_sync(plat_priv);
+ break;
+ case CNSS_DRIVER_EVENT_FW_MEM_READY:
+ ret = cnss_fw_mem_ready_hdlr(plat_priv);
+ break;
+ case CNSS_DRIVER_EVENT_FW_READY:
+ ret = cnss_fw_ready_hdlr(plat_priv);
+ break;
+ case CNSS_DRIVER_EVENT_COLD_BOOT_CAL_START:
+ ret = cnss_cold_boot_cal_start_hdlr(plat_priv);
+ break;
+ case CNSS_DRIVER_EVENT_COLD_BOOT_CAL_DONE:
+ ret = cnss_cold_boot_cal_done_hdlr(plat_priv);
+ break;
+ case CNSS_DRIVER_EVENT_REGISTER_DRIVER:
+ ret = cnss_register_driver_hdlr(plat_priv,
+ event->data);
+ break;
+ case CNSS_DRIVER_EVENT_UNREGISTER_DRIVER:
+ ret = cnss_unregister_driver_hdlr(plat_priv);
+ break;
+ case CNSS_DRIVER_EVENT_RECOVERY:
+ ret = cnss_driver_recovery_hdlr(plat_priv,
+ event->data);
+ break;
+ case CNSS_DRIVER_EVENT_FORCE_FW_ASSERT:
+ ret = cnss_force_fw_assert_hdlr(plat_priv);
+ break;
+ default:
+ cnss_pr_err("Invalid driver event type: %d",
+ event->type);
+ kfree(event);
+ spin_lock_irqsave(&plat_priv->event_lock, flags);
+ continue;
+ }
+
+ spin_lock_irqsave(&plat_priv->event_lock, flags);
+ if (event->sync) {
+ event->ret = ret;
+ complete(&event->complete);
+ continue;
+ }
+ spin_unlock_irqrestore(&plat_priv->event_lock, flags);
+
+ kfree(event);
+
+ spin_lock_irqsave(&plat_priv->event_lock, flags);
+ }
+ spin_unlock_irqrestore(&plat_priv->event_lock, flags);
+
+ cnss_pm_relax(plat_priv);
+}
+
+int cnss_register_subsys(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+ struct cnss_subsys_info *subsys_info;
+
+ subsys_info = &plat_priv->subsys_info;
+
+ switch (plat_priv->device_id) {
+ case QCA6174_DEVICE_ID:
+ subsys_info->subsys_desc.name = "AR6320";
+ break;
+ case QCA6290_EMULATION_DEVICE_ID:
+ case QCA6290_DEVICE_ID:
+ subsys_info->subsys_desc.name = "QCA6290";
+ break;
+ default:
+ cnss_pr_err("Unknown device ID: 0x%lx\n", plat_priv->device_id);
+ ret = -ENODEV;
+ goto out;
+ }
+
+ subsys_info->subsys_desc.owner = THIS_MODULE;
+ subsys_info->subsys_desc.powerup = cnss_powerup;
+ subsys_info->subsys_desc.shutdown = cnss_shutdown;
+ subsys_info->subsys_desc.ramdump = cnss_ramdump;
+ subsys_info->subsys_desc.crash_shutdown = cnss_crash_shutdown;
+ subsys_info->subsys_desc.dev = &plat_priv->plat_dev->dev;
+
+ subsys_info->subsys_device = subsys_register(&subsys_info->subsys_desc);
+ if (IS_ERR(subsys_info->subsys_device)) {
+ ret = PTR_ERR(subsys_info->subsys_device);
+ cnss_pr_err("Failed to register subsys, err = %d\n", ret);
+ goto out;
+ }
+
+ subsys_info->subsys_handle =
+ subsystem_get(subsys_info->subsys_desc.name);
+ if (!subsys_info->subsys_handle) {
+ cnss_pr_err("Failed to get subsys_handle!\n");
+ ret = -EINVAL;
+ goto unregister_subsys;
+ } else if (IS_ERR(subsys_info->subsys_handle)) {
+ ret = PTR_ERR(subsys_info->subsys_handle);
+ cnss_pr_err("Failed to do subsystem_get, err = %d\n", ret);
+ goto unregister_subsys;
+ }
+
+ return 0;
+
+unregister_subsys:
+ subsys_unregister(subsys_info->subsys_device);
+out:
+ return ret;
+}
+
+void cnss_unregister_subsys(struct cnss_plat_data *plat_priv)
+{
+ struct cnss_subsys_info *subsys_info;
+
+ subsys_info = &plat_priv->subsys_info;
+ subsystem_put(subsys_info->subsys_handle);
+ subsys_unregister(subsys_info->subsys_device);
+}
+
+static int cnss_init_dump_entry(struct cnss_plat_data *plat_priv)
+{
+ struct cnss_ramdump_info *ramdump_info;
+ struct msm_dump_entry dump_entry;
+
+ ramdump_info = &plat_priv->ramdump_info;
+ ramdump_info->dump_data.addr = ramdump_info->ramdump_pa;
+ ramdump_info->dump_data.len = ramdump_info->ramdump_size;
+ ramdump_info->dump_data.version = CNSS_DUMP_FORMAT_VER;
+ ramdump_info->dump_data.magic = CNSS_DUMP_MAGIC_VER_V2;
+ strlcpy(ramdump_info->dump_data.name, CNSS_DUMP_NAME,
+ sizeof(ramdump_info->dump_data.name));
+ dump_entry.id = MSM_DUMP_DATA_CNSS_WLAN;
+ dump_entry.addr = virt_to_phys(&ramdump_info->dump_data);
+
+ return msm_dump_data_register(MSM_DUMP_TABLE_APPS, &dump_entry);
+}
+
+static int cnss_qca6174_register_ramdump(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+ struct device *dev;
+ struct cnss_subsys_info *subsys_info;
+ struct cnss_ramdump_info *ramdump_info;
+ u32 ramdump_size = 0;
+
+ dev = &plat_priv->plat_dev->dev;
+ subsys_info = &plat_priv->subsys_info;
+ ramdump_info = &plat_priv->ramdump_info;
+
+ if (of_property_read_u32(dev->of_node, "qcom,wlan-ramdump-dynamic",
+ &ramdump_size) == 0) {
+ ramdump_info->ramdump_va = dma_alloc_coherent(dev, ramdump_size,
+ &ramdump_info->ramdump_pa, GFP_KERNEL);
+
+ if (ramdump_info->ramdump_va)
+ ramdump_info->ramdump_size = ramdump_size;
+ }
+
+ cnss_pr_dbg("ramdump va: %pK, pa: %pa\n",
+ ramdump_info->ramdump_va, &ramdump_info->ramdump_pa);
+
+ if (ramdump_info->ramdump_size == 0) {
+ cnss_pr_info("Ramdump will not be collected");
+ goto out;
+ }
+
+ ret = cnss_init_dump_entry(plat_priv);
+ if (ret) {
+ cnss_pr_err("Failed to setup dump table, err = %d\n", ret);
+ goto free_ramdump;
+ }
+
+ ramdump_info->ramdump_dev = create_ramdump_device(
+ subsys_info->subsys_desc.name, subsys_info->subsys_desc.dev);
+ if (!ramdump_info->ramdump_dev) {
+ cnss_pr_err("Failed to create ramdump device!");
+ ret = -ENOMEM;
+ goto free_ramdump;
+ }
+
+ return 0;
+free_ramdump:
+ dma_free_coherent(dev, ramdump_info->ramdump_size,
+ ramdump_info->ramdump_va, ramdump_info->ramdump_pa);
+out:
+ return ret;
+}
+
+static void cnss_qca6174_unregister_ramdump(struct cnss_plat_data *plat_priv)
+{
+ struct device *dev;
+ struct cnss_ramdump_info *ramdump_info;
+
+ dev = &plat_priv->plat_dev->dev;
+ ramdump_info = &plat_priv->ramdump_info;
+
+ if (ramdump_info->ramdump_dev)
+ destroy_ramdump_device(ramdump_info->ramdump_dev);
+
+ if (ramdump_info->ramdump_va)
+ dma_free_coherent(dev, ramdump_info->ramdump_size,
+ ramdump_info->ramdump_va,
+ ramdump_info->ramdump_pa);
+}
+
+static int cnss_qca6290_register_ramdump(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+ struct cnss_subsys_info *subsys_info;
+ struct cnss_ramdump_info_v2 *info_v2;
+ struct cnss_dump_data *dump_data;
+ struct msm_dump_entry dump_entry;
+ struct device *dev = &plat_priv->plat_dev->dev;
+ u32 ramdump_size = 0;
+
+ subsys_info = &plat_priv->subsys_info;
+ info_v2 = &plat_priv->ramdump_info_v2;
+ dump_data = &info_v2->dump_data;
+
+ if (of_property_read_u32(dev->of_node, "qcom,wlan-ramdump-dynamic",
+ &ramdump_size) == 0)
+ info_v2->ramdump_size = ramdump_size;
+
+ cnss_pr_dbg("Ramdump size 0x%lx\n", info_v2->ramdump_size);
+
+ info_v2->dump_data_vaddr = kzalloc(CNSS_DUMP_DESC_SIZE, GFP_KERNEL);
+ if (!info_v2->dump_data_vaddr)
+ return -ENOMEM;
+
+ dump_data->paddr = virt_to_phys(info_v2->dump_data_vaddr);
+ dump_data->version = CNSS_DUMP_FORMAT_VER_V2;
+ dump_data->magic = CNSS_DUMP_MAGIC_VER_V2;
+ dump_data->seg_version = CNSS_DUMP_SEG_VER;
+ strlcpy(dump_data->name, CNSS_DUMP_NAME,
+ sizeof(dump_data->name));
+ dump_entry.id = MSM_DUMP_DATA_CNSS_WLAN;
+ dump_entry.addr = virt_to_phys(dump_data);
+
+ ret = msm_dump_data_register(MSM_DUMP_TABLE_APPS, &dump_entry);
+ if (ret) {
+ cnss_pr_err("Failed to setup dump table, err = %d\n", ret);
+ goto free_ramdump;
+ }
+
+ info_v2->ramdump_dev =
+ create_ramdump_device(subsys_info->subsys_desc.name,
+ subsys_info->subsys_desc.dev);
+ if (!info_v2->ramdump_dev) {
+ cnss_pr_err("Failed to create ramdump device!\n");
+ ret = -ENOMEM;
+ goto free_ramdump;
+ }
+
+ return 0;
+
+free_ramdump:
+ kfree(info_v2->dump_data_vaddr);
+ info_v2->dump_data_vaddr = NULL;
+ return ret;
+}
+
+static void cnss_qca6290_unregister_ramdump(struct cnss_plat_data *plat_priv)
+{
+ struct cnss_ramdump_info_v2 *info_v2;
+
+ info_v2 = &plat_priv->ramdump_info_v2;
+
+ if (info_v2->ramdump_dev)
+ destroy_ramdump_device(info_v2->ramdump_dev);
+
+ kfree(info_v2->dump_data_vaddr);
+ info_v2->dump_data_vaddr = NULL;
+ info_v2->dump_data_valid = false;
+}
+
+int cnss_register_ramdump(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+
+ switch (plat_priv->device_id) {
+ case QCA6174_DEVICE_ID:
+ ret = cnss_qca6174_register_ramdump(plat_priv);
+ break;
+ case QCA6290_EMULATION_DEVICE_ID:
+ case QCA6290_DEVICE_ID:
+ ret = cnss_qca6290_register_ramdump(plat_priv);
+ break;
+ default:
+ cnss_pr_err("Unknown device ID: 0x%lx\n", plat_priv->device_id);
+ ret = -ENODEV;
+ break;
+ }
+ return ret;
+}
+
+void cnss_unregister_ramdump(struct cnss_plat_data *plat_priv)
+{
+ switch (plat_priv->device_id) {
+ case QCA6174_DEVICE_ID:
+ cnss_qca6174_unregister_ramdump(plat_priv);
+ break;
+ case QCA6290_EMULATION_DEVICE_ID:
+ case QCA6290_DEVICE_ID:
+ cnss_qca6290_unregister_ramdump(plat_priv);
+ break;
+ default:
+ cnss_pr_err("Unknown device ID: 0x%lx\n", plat_priv->device_id);
+ break;
+ }
+}
+
+static int cnss_register_bus_scale(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+ struct cnss_bus_bw_info *bus_bw_info;
+
+ bus_bw_info = &plat_priv->bus_bw_info;
+
+ bus_bw_info->bus_scale_table =
+ msm_bus_cl_get_pdata(plat_priv->plat_dev);
+ if (bus_bw_info->bus_scale_table) {
+ bus_bw_info->bus_client =
+ msm_bus_scale_register_client(
+ bus_bw_info->bus_scale_table);
+ if (!bus_bw_info->bus_client) {
+ cnss_pr_err("Failed to register bus scale client!\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ return 0;
+out:
+ return ret;
+}
+
+static void cnss_unregister_bus_scale(struct cnss_plat_data *plat_priv)
+{
+ struct cnss_bus_bw_info *bus_bw_info;
+
+ bus_bw_info = &plat_priv->bus_bw_info;
+
+ if (bus_bw_info->bus_client)
+ msm_bus_scale_unregister_client(bus_bw_info->bus_client);
+}
+
+static ssize_t cnss_fs_ready_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ int fs_ready = 0;
+ struct cnss_plat_data *plat_priv = dev_get_drvdata(dev);
+
+ if (sscanf(buf, "%du", &fs_ready) != 1)
+ return -EINVAL;
+
+ cnss_pr_dbg("File system is ready, fs_ready is %d, count is %zu\n",
+ fs_ready, count);
+
+ if (qmi_bypass) {
+ cnss_pr_dbg("QMI is bypassed.\n");
+ return count;
+ }
+
+ if (!plat_priv) {
+ cnss_pr_err("plat_priv is NULL!\n");
+ return count;
+ }
+
+ switch (plat_priv->device_id) {
+ case QCA6290_EMULATION_DEVICE_ID:
+ case QCA6290_DEVICE_ID:
+ break;
+ default:
+ cnss_pr_err("Not supported for device ID 0x%lx\n",
+ plat_priv->device_id);
+ return count;
+ }
+
+ if (fs_ready == FILE_SYSTEM_READY) {
+ cnss_driver_event_post(plat_priv,
+ CNSS_DRIVER_EVENT_COLD_BOOT_CAL_START,
+ true, NULL);
+ }
+
+ return count;
+}
+
+static DEVICE_ATTR(fs_ready, 0220, NULL, cnss_fs_ready_store);
+
+static int cnss_create_sysfs(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+
+ ret = device_create_file(&plat_priv->plat_dev->dev, &dev_attr_fs_ready);
+ if (ret) {
+ cnss_pr_err("Failed to create device file, err = %d\n", ret);
+ goto out;
+ }
+
+ return 0;
+out:
+ return ret;
+}
+
+static void cnss_remove_sysfs(struct cnss_plat_data *plat_priv)
+{
+ device_remove_file(&plat_priv->plat_dev->dev, &dev_attr_fs_ready);
+}
+
+static int cnss_event_work_init(struct cnss_plat_data *plat_priv)
+{
+ spin_lock_init(&plat_priv->event_lock);
+ plat_priv->event_wq = alloc_workqueue("cnss_driver_event",
+ WQ_UNBOUND, 1);
+ if (!plat_priv->event_wq) {
+ cnss_pr_err("Failed to create event workqueue!\n");
+ return -EFAULT;
+ }
+
+ INIT_WORK(&plat_priv->event_work, cnss_driver_event_work);
+ INIT_LIST_HEAD(&plat_priv->event_list);
+
+ return 0;
+}
+
+static void cnss_event_work_deinit(struct cnss_plat_data *plat_priv)
+{
+ destroy_workqueue(plat_priv->event_wq);
+}
+
+static const struct platform_device_id cnss_platform_id_table[] = {
+ { .name = "qca6174", .driver_data = QCA6174_DEVICE_ID, },
+ { .name = "qca6290", .driver_data = QCA6290_DEVICE_ID, },
+};
+
+static const struct of_device_id cnss_of_match_table[] = {
+ {
+ .compatible = "qcom,cnss",
+ .data = (void *)&cnss_platform_id_table[0]},
+ {
+ .compatible = "qcom,cnss-qca6290",
+ .data = (void *)&cnss_platform_id_table[1]},
+ { },
+};
+MODULE_DEVICE_TABLE(of, cnss_of_match_table);
+
+static int cnss_probe(struct platform_device *plat_dev)
+{
+ int ret = 0;
+ struct cnss_plat_data *plat_priv;
+ const struct of_device_id *of_id;
+ const struct platform_device_id *device_id;
+
+ if (cnss_get_plat_priv(plat_dev)) {
+ cnss_pr_err("Driver is already initialized!\n");
+ ret = -EEXIST;
+ goto out;
+ }
+
+ of_id = of_match_device(cnss_of_match_table, &plat_dev->dev);
+ if (!of_id || !of_id->data) {
+ cnss_pr_err("Failed to find of match device!\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ device_id = of_id->data;
+
+ plat_priv = devm_kzalloc(&plat_dev->dev, sizeof(*plat_priv),
+ GFP_KERNEL);
+ if (!plat_priv) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ plat_priv->plat_dev = plat_dev;
+ plat_priv->device_id = device_id->driver_data;
+ cnss_set_plat_priv(plat_dev, plat_priv);
+ platform_set_drvdata(plat_dev, plat_priv);
+
+ ret = cnss_get_resources(plat_priv);
+ if (ret)
+ goto reset_ctx;
+
+ ret = cnss_power_on_device(plat_priv);
+ if (ret)
+ goto free_res;
+
+ ret = cnss_pci_init(plat_priv);
+ if (ret)
+ goto power_off;
+
+ ret = cnss_register_esoc(plat_priv);
+ if (ret)
+ goto deinit_pci;
+
+ ret = cnss_register_bus_scale(plat_priv);
+ if (ret)
+ goto unreg_esoc;
+
+ ret = cnss_create_sysfs(plat_priv);
+ if (ret)
+ goto unreg_bus_scale;
+
+ ret = cnss_event_work_init(plat_priv);
+ if (ret)
+ goto remove_sysfs;
+
+ ret = cnss_qmi_init(plat_priv);
+ if (ret)
+ goto deinit_event_work;
+
+ ret = cnss_debugfs_create(plat_priv);
+ if (ret)
+ goto deinit_qmi;
+
+ setup_timer(&plat_priv->fw_boot_timer,
+ fw_boot_timeout, (unsigned long)plat_priv);
+
+ register_pm_notifier(&cnss_pm_notifier);
+
+ ret = device_init_wakeup(&plat_dev->dev, true);
+ if (ret)
+ cnss_pr_err("Failed to init platform device wakeup source, err = %d\n",
+ ret);
+
+ cnss_pr_info("Platform driver probed successfully.\n");
+
+ return 0;
+
+deinit_qmi:
+ cnss_qmi_deinit(plat_priv);
+deinit_event_work:
+ cnss_event_work_deinit(plat_priv);
+remove_sysfs:
+ cnss_remove_sysfs(plat_priv);
+unreg_bus_scale:
+ cnss_unregister_bus_scale(plat_priv);
+unreg_esoc:
+ cnss_unregister_esoc(plat_priv);
+deinit_pci:
+ cnss_pci_deinit(plat_priv);
+power_off:
+ cnss_power_off_device(plat_priv);
+free_res:
+ cnss_put_resources(plat_priv);
+reset_ctx:
+ platform_set_drvdata(plat_dev, NULL);
+ cnss_set_plat_priv(plat_dev, NULL);
+out:
+ return ret;
+}
+
+static int cnss_remove(struct platform_device *plat_dev)
+{
+ struct cnss_plat_data *plat_priv = platform_get_drvdata(plat_dev);
+
+ device_init_wakeup(&plat_dev->dev, false);
+ unregister_pm_notifier(&cnss_pm_notifier);
+ del_timer(&plat_priv->fw_boot_timer);
+ cnss_debugfs_destroy(plat_priv);
+ cnss_qmi_deinit(plat_priv);
+ cnss_event_work_deinit(plat_priv);
+ cnss_remove_sysfs(plat_priv);
+ cnss_unregister_bus_scale(plat_priv);
+ cnss_unregister_esoc(plat_priv);
+ cnss_pci_deinit(plat_priv);
+ cnss_put_resources(plat_priv);
+ platform_set_drvdata(plat_dev, NULL);
+ plat_env = NULL;
+
+ return 0;
+}
+
+static struct platform_driver cnss_platform_driver = {
+ .probe = cnss_probe,
+ .remove = cnss_remove,
+ .driver = {
+ .name = "cnss2",
+ .owner = THIS_MODULE,
+ .of_match_table = cnss_of_match_table,
+ },
+};
+
+static int __init cnss_initialize(void)
+{
+ int ret = 0;
+
+ cnss_debug_init();
+ ret = platform_driver_register(&cnss_platform_driver);
+ if (ret)
+ cnss_debug_deinit();
+
+ return ret;
+}
+
+static void __exit cnss_exit(void)
+{
+ platform_driver_unregister(&cnss_platform_driver);
+ cnss_debug_deinit();
+}
+
+module_init(cnss_initialize);
+module_exit(cnss_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("CNSS2 Platform Driver");
diff --git a/drivers/net/wireless/cnss2/main.h b/drivers/net/wireless/cnss2/main.h
new file mode 100644
index 000000000000..e3a8d0cccd52
--- /dev/null
+++ b/drivers/net/wireless/cnss2/main.h
@@ -0,0 +1,221 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CNSS_MAIN_H
+#define _CNSS_MAIN_H
+
+#include <linux/esoc_client.h>
+#include <linux/etherdevice.h>
+#include <linux/msm-bus.h>
+#include <linux/pm_qos.h>
+#include <net/cnss2.h>
+#include <soc/qcom/memory_dump.h>
+#include <soc/qcom/subsystem_restart.h>
+
+#include "qmi.h"
+
+#define MAX_NO_OF_MAC_ADDR 4
+
+enum cnss_dev_bus_type {
+ CNSS_BUS_NONE = -1,
+ CNSS_BUS_PCI,
+};
+
+struct cnss_vreg_info {
+ struct regulator *reg;
+ const char *name;
+ u32 min_uv;
+ u32 max_uv;
+ u32 load_ua;
+ u32 delay_us;
+};
+
+struct cnss_pinctrl_info {
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *bootstrap_active;
+ struct pinctrl_state *wlan_en_active;
+ struct pinctrl_state *wlan_en_sleep;
+};
+
+struct cnss_subsys_info {
+ struct subsys_device *subsys_device;
+ struct subsys_desc subsys_desc;
+ void *subsys_handle;
+};
+
+struct cnss_ramdump_info {
+ struct ramdump_device *ramdump_dev;
+ unsigned long ramdump_size;
+ void *ramdump_va;
+ phys_addr_t ramdump_pa;
+ struct msm_dump_data dump_data;
+};
+
+struct cnss_dump_seg {
+ unsigned long address;
+ void *v_address;
+ unsigned long size;
+ u32 type;
+};
+
+struct cnss_dump_data {
+ u32 version;
+ u32 magic;
+ char name[32];
+ phys_addr_t paddr;
+ int nentries;
+ u32 seg_version;
+};
+
+struct cnss_ramdump_info_v2 {
+ struct ramdump_device *ramdump_dev;
+ unsigned long ramdump_size;
+ void *dump_data_vaddr;
+ bool dump_data_valid;
+ struct cnss_dump_data dump_data;
+};
+
+struct cnss_esoc_info {
+ struct esoc_desc *esoc_desc;
+ bool notify_modem_status;
+ void *modem_notify_handler;
+ int modem_current_status;
+};
+
+struct cnss_bus_bw_info {
+ struct msm_bus_scale_pdata *bus_scale_table;
+ u32 bus_client;
+ int current_bw_vote;
+};
+
+struct cnss_wlan_mac_addr {
+ u8 mac_addr[MAX_NO_OF_MAC_ADDR][ETH_ALEN];
+ u32 no_of_mac_addr_set;
+};
+
+struct cnss_wlan_mac_info {
+ struct cnss_wlan_mac_addr wlan_mac_addr;
+ bool is_wlan_mac_set;
+};
+
+struct cnss_fw_mem {
+ size_t size;
+ void *va;
+ phys_addr_t pa;
+ bool valid;
+};
+
+enum cnss_driver_event_type {
+ CNSS_DRIVER_EVENT_SERVER_ARRIVE,
+ CNSS_DRIVER_EVENT_SERVER_EXIT,
+ CNSS_DRIVER_EVENT_REQUEST_MEM,
+ CNSS_DRIVER_EVENT_FW_MEM_READY,
+ CNSS_DRIVER_EVENT_FW_READY,
+ CNSS_DRIVER_EVENT_COLD_BOOT_CAL_START,
+ CNSS_DRIVER_EVENT_COLD_BOOT_CAL_DONE,
+ CNSS_DRIVER_EVENT_REGISTER_DRIVER,
+ CNSS_DRIVER_EVENT_UNREGISTER_DRIVER,
+ CNSS_DRIVER_EVENT_RECOVERY,
+ CNSS_DRIVER_EVENT_FORCE_FW_ASSERT,
+ CNSS_DRIVER_EVENT_MAX,
+};
+
+enum cnss_driver_state {
+ CNSS_QMI_WLFW_CONNECTED,
+ CNSS_FW_MEM_READY,
+ CNSS_FW_READY,
+ CNSS_COLD_BOOT_CAL,
+ CNSS_DRIVER_LOADING,
+ CNSS_DRIVER_UNLOADING,
+ CNSS_DRIVER_PROBED,
+ CNSS_DRIVER_RECOVERY,
+ CNSS_FW_BOOT_RECOVERY,
+ CNSS_DEV_ERR_NOTIFY,
+};
+
+struct cnss_recovery_data {
+ enum cnss_recovery_reason reason;
+};
+
+enum cnss_pins {
+ CNSS_WLAN_EN,
+ CNSS_PCIE_TXP,
+ CNSS_PCIE_TXN,
+ CNSS_PCIE_RXP,
+ CNSS_PCIE_RXN,
+ CNSS_PCIE_REFCLKP,
+ CNSS_PCIE_REFCLKN,
+ CNSS_PCIE_RST,
+ CNSS_PCIE_WAKE,
+};
+
+struct cnss_pin_connect_result {
+ u32 fw_pwr_pin_result;
+ u32 fw_phy_io_pin_result;
+ u32 fw_rf_pin_result;
+ u32 host_pin_result;
+};
+
+struct cnss_plat_data {
+ struct platform_device *plat_dev;
+ void *bus_priv;
+ struct cnss_vreg_info *vreg_info;
+ struct cnss_pinctrl_info pinctrl_info;
+ struct cnss_subsys_info subsys_info;
+ struct cnss_ramdump_info ramdump_info;
+ struct cnss_ramdump_info_v2 ramdump_info_v2;
+ struct cnss_esoc_info esoc_info;
+ struct cnss_bus_bw_info bus_bw_info;
+ struct notifier_block modem_nb;
+ struct cnss_platform_cap cap;
+ struct pm_qos_request qos_request;
+ unsigned long device_id;
+ struct cnss_wlan_driver *driver_ops;
+ enum cnss_driver_status driver_status;
+ u32 recovery_count;
+ struct cnss_wlan_mac_info wlan_mac_info;
+ unsigned long driver_state;
+ struct list_head event_list;
+ spinlock_t event_lock; /* spinlock for driver work event handling */
+ struct work_struct event_work;
+ struct workqueue_struct *event_wq;
+ struct qmi_handle *qmi_wlfw_clnt;
+ struct work_struct qmi_recv_msg_work;
+ struct notifier_block qmi_wlfw_clnt_nb;
+ struct wlfw_rf_chip_info_s_v01 chip_info;
+ struct wlfw_rf_board_info_s_v01 board_info;
+ struct wlfw_soc_info_s_v01 soc_info;
+ struct wlfw_fw_version_info_s_v01 fw_version_info;
+ struct cnss_fw_mem fw_mem;
+ struct cnss_fw_mem m3_mem;
+ struct cnss_pin_connect_result pin_result;
+ struct dentry *root_dentry;
+ atomic_t pm_count;
+ struct timer_list fw_boot_timer;
+};
+
+void *cnss_bus_dev_to_bus_priv(struct device *dev);
+struct cnss_plat_data *cnss_bus_dev_to_plat_priv(struct device *dev);
+int cnss_driver_event_post(struct cnss_plat_data *plat_priv,
+ enum cnss_driver_event_type type,
+ bool sync, void *data);
+int cnss_get_vreg(struct cnss_plat_data *plat_priv);
+int cnss_get_pinctrl(struct cnss_plat_data *plat_priv);
+int cnss_power_on_device(struct cnss_plat_data *plat_priv);
+void cnss_power_off_device(struct cnss_plat_data *plat_priv);
+int cnss_register_subsys(struct cnss_plat_data *plat_priv);
+void cnss_unregister_subsys(struct cnss_plat_data *plat_priv);
+int cnss_register_ramdump(struct cnss_plat_data *plat_priv);
+void cnss_unregister_ramdump(struct cnss_plat_data *plat_priv);
+void cnss_set_pin_connect_status(struct cnss_plat_data *plat_priv);
+
+#endif /* _CNSS_MAIN_H */
diff --git a/drivers/net/wireless/cnss2/pci.c b/drivers/net/wireless/cnss2/pci.c
new file mode 100644
index 000000000000..f914f4352392
--- /dev/null
+++ b/drivers/net/wireless/cnss2/pci.c
@@ -0,0 +1,1582 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/firmware.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of.h>
+#include <linux/pm_runtime.h>
+
+#include "main.h"
+#include "debug.h"
+#include "pci.h"
+
+#define PCI_LINK_UP 1
+#define PCI_LINK_DOWN 0
+
+#define SAVE_PCI_CONFIG_SPACE 1
+#define RESTORE_PCI_CONFIG_SPACE 0
+
+#define PM_OPTIONS_DEFAULT 0
+#define PM_OPTIONS_LINK_DOWN \
+ (MSM_PCIE_CONFIG_NO_CFG_RESTORE | MSM_PCIE_CONFIG_LINKDOWN)
+
+#define PCI_BAR_NUM 0
+
+#ifdef CONFIG_ARM_LPAE
+#define PCI_DMA_MASK 64
+#else
+#define PCI_DMA_MASK 32
+#endif
+
+#define MHI_NODE_NAME "qcom,mhi"
+
+#define MAX_M3_FILE_NAME_LENGTH 13
+#define DEFAULT_M3_FILE_NAME "m3.bin"
+
+static DEFINE_SPINLOCK(pci_link_down_lock);
+
+static unsigned int pci_link_down_panic;
+module_param(pci_link_down_panic, uint, 0600);
+MODULE_PARM_DESC(pci_link_down_panic,
+ "Trigger kernel panic when PCI link down is detected");
+
+static bool fbc_bypass;
+#ifdef CONFIG_CNSS2_DEBUG
+module_param(fbc_bypass, bool, 0600);
+MODULE_PARM_DESC(fbc_bypass,
+ "Bypass firmware download when loading WLAN driver");
+#endif
+
+static int cnss_set_pci_config_space(struct cnss_pci_data *pci_priv, bool save)
+{
+ int ret = 0;
+ struct pci_dev *pci_dev = pci_priv->pci_dev;
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+ bool link_down_or_recovery;
+
+ if (!plat_priv)
+ return -ENODEV;
+
+ link_down_or_recovery = pci_priv->pci_link_down_ind ||
+ (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state));
+
+ if (save) {
+ if (link_down_or_recovery) {
+ pci_priv->saved_state = NULL;
+ } else {
+ pci_save_state(pci_dev);
+ pci_priv->saved_state = pci_store_saved_state(pci_dev);
+ }
+ } else {
+ if (link_down_or_recovery) {
+ ret = msm_pcie_recover_config(pci_dev);
+ if (ret) {
+ cnss_pr_err("Failed to recover PCI config space, err = %d\n",
+ ret);
+ return ret;
+ }
+ } else if (pci_priv->saved_state) {
+ pci_load_and_free_saved_state(pci_dev,
+ &pci_priv->saved_state);
+ pci_restore_state(pci_dev);
+ }
+ }
+
+ return 0;
+}
+
+static int cnss_set_pci_link(struct cnss_pci_data *pci_priv, bool link_up)
+{
+ int ret = 0;
+ struct pci_dev *pci_dev = pci_priv->pci_dev;
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+ bool link_down_or_recovery;
+
+ if (!plat_priv)
+ return -ENODEV;
+
+ link_down_or_recovery = pci_priv->pci_link_down_ind ||
+ (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state));
+
+ ret = msm_pcie_pm_control(link_up ? MSM_PCIE_RESUME :
+ MSM_PCIE_SUSPEND,
+ pci_dev->bus->number,
+ pci_dev, NULL,
+ link_down_or_recovery ?
+ PM_OPTIONS_LINK_DOWN :
+ PM_OPTIONS_DEFAULT);
+ if (ret) {
+ cnss_pr_err("Failed to %s PCI link with %s option, err = %d\n",
+ link_up ? "resume" : "suspend",
+ link_down_or_recovery ? "link down" : "default",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int cnss_suspend_pci_link(struct cnss_pci_data *pci_priv)
+{
+ int ret = 0;
+
+ if (!pci_priv)
+ return -ENODEV;
+
+ if (!pci_priv->pci_link_state) {
+ cnss_pr_info("PCI link is already suspended!\n");
+ goto out;
+ }
+
+ ret = cnss_set_pci_config_space(pci_priv, SAVE_PCI_CONFIG_SPACE);
+ if (ret)
+ goto out;
+
+ ret = cnss_set_pci_link(pci_priv, PCI_LINK_DOWN);
+ if (ret)
+ goto out;
+
+ pci_priv->pci_link_state = PCI_LINK_DOWN;
+
+ return 0;
+out:
+ return ret;
+}
+
+int cnss_resume_pci_link(struct cnss_pci_data *pci_priv)
+{
+ int ret = 0;
+
+ if (!pci_priv)
+ return -ENODEV;
+
+ if (pci_priv->pci_link_state) {
+ cnss_pr_info("PCI link is already resumed!\n");
+ goto out;
+ }
+
+ ret = cnss_set_pci_link(pci_priv, PCI_LINK_UP);
+ if (ret)
+ goto out;
+
+ pci_priv->pci_link_state = PCI_LINK_UP;
+
+ ret = cnss_set_pci_config_space(pci_priv, RESTORE_PCI_CONFIG_SPACE);
+ if (ret)
+ goto out;
+
+ if (pci_priv->pci_link_down_ind)
+ pci_priv->pci_link_down_ind = false;
+
+ return 0;
+out:
+ return ret;
+}
+
+int cnss_pci_link_down(struct device *dev)
+{
+ unsigned long flags;
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
+
+ if (!pci_priv) {
+ cnss_pr_err("pci_priv is NULL!\n");
+ return -EINVAL;
+ }
+
+ if (pci_link_down_panic)
+ panic("cnss: PCI link is down!\n");
+
+ spin_lock_irqsave(&pci_link_down_lock, flags);
+ if (pci_priv->pci_link_down_ind) {
+ cnss_pr_dbg("PCI link down recovery is in progress, ignore!\n");
+ spin_unlock_irqrestore(&pci_link_down_lock, flags);
+ return -EINVAL;
+ }
+ pci_priv->pci_link_down_ind = true;
+ spin_unlock_irqrestore(&pci_link_down_lock, flags);
+
+ cnss_pr_err("PCI link down is detected by host driver, schedule recovery!\n");
+
+ cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_NOTIFY_LINK_ERROR);
+ cnss_schedule_recovery(dev, CNSS_REASON_LINK_DOWN);
+
+ return 0;
+}
+EXPORT_SYMBOL(cnss_pci_link_down);
+
+static int cnss_pci_init_smmu(struct cnss_pci_data *pci_priv)
+{
+ int ret = 0;
+ struct device *dev;
+ struct dma_iommu_mapping *mapping;
+ int atomic_ctx = 1;
+ int s1_bypass = 1;
+
+ dev = &pci_priv->pci_dev->dev;
+
+ mapping = arm_iommu_create_mapping(&platform_bus_type,
+ pci_priv->smmu_iova_start,
+ pci_priv->smmu_iova_len);
+ if (IS_ERR(mapping)) {
+ ret = PTR_ERR(mapping);
+ cnss_pr_err("Failed to create SMMU mapping, err = %d\n", ret);
+ goto out;
+ }
+
+ ret = iommu_domain_set_attr(mapping->domain,
+ DOMAIN_ATTR_ATOMIC,
+ &atomic_ctx);
+ if (ret) {
+ pr_err("Failed to set SMMU atomic_ctx attribute, err = %d\n",
+ ret);
+ goto release_mapping;
+ }
+
+ ret = iommu_domain_set_attr(mapping->domain,
+ DOMAIN_ATTR_S1_BYPASS,
+ &s1_bypass);
+ if (ret) {
+ pr_err("Failed to set SMMU s1_bypass attribute, err = %d\n",
+ ret);
+ goto release_mapping;
+ }
+
+ ret = arm_iommu_attach_device(dev, mapping);
+ if (ret) {
+ pr_err("Failed to attach SMMU device, err = %d\n", ret);
+ goto release_mapping;
+ }
+
+ pci_priv->smmu_mapping = mapping;
+
+ return ret;
+release_mapping:
+ arm_iommu_release_mapping(mapping);
+out:
+ return ret;
+}
+
+static void cnss_pci_deinit_smmu(struct cnss_pci_data *pci_priv)
+{
+ arm_iommu_detach_device(&pci_priv->pci_dev->dev);
+ arm_iommu_release_mapping(pci_priv->smmu_mapping);
+
+ pci_priv->smmu_mapping = NULL;
+}
+
+static void cnss_pci_event_cb(struct msm_pcie_notify *notify)
+{
+ unsigned long flags;
+ struct pci_dev *pci_dev;
+ struct cnss_pci_data *pci_priv;
+
+ if (!notify)
+ return;
+
+ pci_dev = notify->user;
+ if (!pci_dev)
+ return;
+
+ pci_priv = cnss_get_pci_priv(pci_dev);
+ if (!pci_priv)
+ return;
+
+ switch (notify->event) {
+ case MSM_PCIE_EVENT_LINKDOWN:
+ if (pci_link_down_panic)
+ panic("cnss: PCI link is down!\n");
+
+ spin_lock_irqsave(&pci_link_down_lock, flags);
+ if (pci_priv->pci_link_down_ind) {
+ cnss_pr_dbg("PCI link down recovery is in progress, ignore!\n");
+ spin_unlock_irqrestore(&pci_link_down_lock, flags);
+ return;
+ }
+ pci_priv->pci_link_down_ind = true;
+ spin_unlock_irqrestore(&pci_link_down_lock, flags);
+
+ cnss_pr_err("PCI link down, schedule recovery!\n");
+ cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_NOTIFY_LINK_ERROR);
+ if (pci_dev->device == QCA6174_DEVICE_ID)
+ disable_irq(pci_dev->irq);
+ cnss_schedule_recovery(&pci_dev->dev, CNSS_REASON_LINK_DOWN);
+ break;
+ case MSM_PCIE_EVENT_WAKEUP:
+ if (cnss_pci_get_monitor_wake_intr(pci_priv) &&
+ cnss_pci_get_auto_suspended(pci_priv)) {
+ cnss_pci_set_monitor_wake_intr(pci_priv, false);
+ pm_request_resume(&pci_dev->dev);
+ }
+ break;
+ default:
+ cnss_pr_err("Received invalid PCI event: %d\n", notify->event);
+ }
+}
+
+static int cnss_reg_pci_event(struct cnss_pci_data *pci_priv)
+{
+ int ret = 0;
+ struct msm_pcie_register_event *pci_event;
+
+ pci_event = &pci_priv->msm_pci_event;
+ pci_event->events = MSM_PCIE_EVENT_LINKDOWN |
+ MSM_PCIE_EVENT_WAKEUP;
+ pci_event->user = pci_priv->pci_dev;
+ pci_event->mode = MSM_PCIE_TRIGGER_CALLBACK;
+ pci_event->callback = cnss_pci_event_cb;
+ pci_event->options = MSM_PCIE_CONFIG_NO_RECOVERY;
+
+ ret = msm_pcie_register_event(pci_event);
+ if (ret)
+ cnss_pr_err("Failed to register MSM PCI event, err = %d\n",
+ ret);
+
+ return ret;
+}
+
+static void cnss_dereg_pci_event(struct cnss_pci_data *pci_priv)
+{
+ msm_pcie_deregister_event(&pci_priv->msm_pci_event);
+}
+
+static int cnss_pci_suspend(struct device *dev)
+{
+ int ret = 0;
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
+ struct cnss_plat_data *plat_priv;
+ struct cnss_wlan_driver *driver_ops;
+
+ pm_message_t state = { .event = PM_EVENT_SUSPEND };
+
+ if (!pci_priv)
+ goto out;
+
+ plat_priv = pci_priv->plat_priv;
+ if (!plat_priv)
+ goto out;
+
+ driver_ops = plat_priv->driver_ops;
+ if (driver_ops && driver_ops->suspend) {
+ ret = driver_ops->suspend(pci_dev, state);
+ if (pci_priv->pci_link_state) {
+ if (cnss_pci_set_mhi_state(pci_priv,
+ CNSS_MHI_SUSPEND)) {
+ driver_ops->resume(pci_dev);
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ cnss_set_pci_config_space(pci_priv,
+ SAVE_PCI_CONFIG_SPACE);
+ }
+ }
+
+ cnss_pci_set_monitor_wake_intr(pci_priv, false);
+
+out:
+ return ret;
+}
+
+static int cnss_pci_resume(struct device *dev)
+{
+ int ret = 0;
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
+ struct cnss_plat_data *plat_priv;
+ struct cnss_wlan_driver *driver_ops;
+
+ if (!pci_priv)
+ goto out;
+
+ plat_priv = pci_priv->plat_priv;
+ if (!plat_priv)
+ goto out;
+
+ driver_ops = plat_priv->driver_ops;
+ if (driver_ops && driver_ops->resume && !pci_priv->pci_link_down_ind) {
+ if (pci_priv->saved_state)
+ cnss_set_pci_config_space(pci_priv,
+ RESTORE_PCI_CONFIG_SPACE);
+ cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RESUME);
+ ret = driver_ops->resume(pci_dev);
+ }
+
+out:
+ return ret;
+}
+
+static int cnss_pci_suspend_noirq(struct device *dev)
+{
+ int ret = 0;
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
+ struct cnss_plat_data *plat_priv;
+ struct cnss_wlan_driver *driver_ops;
+
+ if (!pci_priv)
+ goto out;
+
+ plat_priv = pci_priv->plat_priv;
+ if (!plat_priv)
+ goto out;
+
+ driver_ops = plat_priv->driver_ops;
+ if (driver_ops && driver_ops->suspend_noirq)
+ ret = driver_ops->suspend_noirq(pci_dev);
+
+out:
+ return ret;
+}
+
+static int cnss_pci_resume_noirq(struct device *dev)
+{
+ int ret = 0;
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
+ struct cnss_plat_data *plat_priv;
+ struct cnss_wlan_driver *driver_ops;
+
+ if (!pci_priv)
+ goto out;
+
+ plat_priv = pci_priv->plat_priv;
+ if (!plat_priv)
+ goto out;
+
+ driver_ops = plat_priv->driver_ops;
+ if (driver_ops && driver_ops->resume_noirq &&
+ !pci_priv->pci_link_down_ind)
+ ret = driver_ops->resume_noirq(pci_dev);
+
+out:
+ return ret;
+}
+
+static int cnss_pci_runtime_suspend(struct device *dev)
+{
+ int ret = 0;
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
+ struct cnss_plat_data *plat_priv;
+ struct cnss_wlan_driver *driver_ops;
+
+ if (!pci_priv)
+ return -EAGAIN;
+
+ plat_priv = pci_priv->plat_priv;
+ if (!plat_priv)
+ return -EAGAIN;
+
+ if (pci_priv->pci_link_down_ind) {
+ cnss_pr_dbg("PCI link down recovery is in progress!\n");
+ return -EAGAIN;
+ }
+
+ cnss_pr_dbg("Runtime suspend start\n");
+
+ driver_ops = plat_priv->driver_ops;
+ if (driver_ops && driver_ops->runtime_ops &&
+ driver_ops->runtime_ops->runtime_suspend)
+ ret = driver_ops->runtime_ops->runtime_suspend(pci_dev);
+
+ cnss_pr_info("Runtime suspend status: %d\n", ret);
+
+ return ret;
+}
+
+static int cnss_pci_runtime_resume(struct device *dev)
+{
+ int ret = 0;
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
+ struct cnss_plat_data *plat_priv;
+ struct cnss_wlan_driver *driver_ops;
+
+ if (!pci_priv)
+ return -EAGAIN;
+
+ plat_priv = pci_priv->plat_priv;
+ if (!plat_priv)
+ return -EAGAIN;
+
+ if (pci_priv->pci_link_down_ind) {
+ cnss_pr_dbg("PCI link down recovery is in progress!\n");
+ return -EAGAIN;
+ }
+
+ cnss_pr_dbg("Runtime resume start\n");
+
+ driver_ops = plat_priv->driver_ops;
+ if (driver_ops && driver_ops->runtime_ops &&
+ driver_ops->runtime_ops->runtime_resume)
+ ret = driver_ops->runtime_ops->runtime_resume(pci_dev);
+
+ cnss_pr_info("Runtime resume status: %d\n", ret);
+
+ return ret;
+}
+
+static int cnss_pci_runtime_idle(struct device *dev)
+{
+ cnss_pr_dbg("Runtime idle\n");
+
+ pm_request_autosuspend(dev);
+
+ return -EBUSY;
+}
+
+int cnss_wlan_pm_control(bool vote)
+{
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL);
+ struct cnss_pci_data *pci_priv;
+ struct pci_dev *pci_dev;
+
+ if (!plat_priv)
+ return -ENODEV;
+
+ pci_priv = plat_priv->bus_priv;
+ if (!pci_priv)
+ return -ENODEV;
+
+ pci_dev = pci_priv->pci_dev;
+
+ return msm_pcie_pm_control(vote ? MSM_PCIE_DISABLE_PC :
+ MSM_PCIE_ENABLE_PC,
+ pci_dev->bus->number, pci_dev,
+ NULL, PM_OPTIONS_DEFAULT);
+}
+EXPORT_SYMBOL(cnss_wlan_pm_control);
+
+int cnss_auto_suspend(void)
+{
+ int ret = 0;
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL);
+ struct pci_dev *pci_dev;
+ struct cnss_pci_data *pci_priv;
+ struct cnss_bus_bw_info *bus_bw_info;
+
+ if (!plat_priv)
+ return -ENODEV;
+
+ pci_priv = plat_priv->bus_priv;
+ if (!pci_priv)
+ return -ENODEV;
+
+ pci_dev = pci_priv->pci_dev;
+
+ if (pci_priv->pci_link_state) {
+ if (cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_SUSPEND)) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ cnss_set_pci_config_space(pci_priv, SAVE_PCI_CONFIG_SPACE);
+ pci_disable_device(pci_dev);
+
+ ret = pci_set_power_state(pci_dev, PCI_D3hot);
+ if (ret)
+ cnss_pr_err("Failed to set D3Hot, err = %d\n", ret);
+ if (cnss_set_pci_link(pci_priv, PCI_LINK_DOWN)) {
+ cnss_pr_err("Failed to shutdown PCI link!\n");
+ ret = -EAGAIN;
+ goto resume_mhi;
+ }
+ }
+
+ pci_priv->pci_link_state = PCI_LINK_DOWN;
+ cnss_pci_set_auto_suspended(pci_priv, 1);
+ cnss_pci_set_monitor_wake_intr(pci_priv, true);
+
+ bus_bw_info = &plat_priv->bus_bw_info;
+ msm_bus_scale_client_update_request(bus_bw_info->bus_client,
+ CNSS_BUS_WIDTH_NONE);
+
+ return 0;
+
+resume_mhi:
+ if (pci_enable_device(pci_dev))
+ cnss_pr_err("Failed to enable PCI device!\n");
+ cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RESUME);
+out:
+ return ret;
+}
+EXPORT_SYMBOL(cnss_auto_suspend);
+
+int cnss_auto_resume(void)
+{
+ int ret = 0;
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL);
+ struct pci_dev *pci_dev;
+ struct cnss_pci_data *pci_priv;
+ struct cnss_bus_bw_info *bus_bw_info;
+
+ if (!plat_priv)
+ return -ENODEV;
+
+ pci_priv = plat_priv->bus_priv;
+ if (!pci_priv)
+ return -ENODEV;
+
+ pci_dev = pci_priv->pci_dev;
+ if (!pci_priv->pci_link_state) {
+ if (cnss_set_pci_link(pci_priv, PCI_LINK_UP)) {
+ cnss_pr_err("Failed to resume PCI link!\n");
+ ret = -EAGAIN;
+ goto out;
+ }
+ pci_priv->pci_link_state = PCI_LINK_UP;
+ ret = pci_enable_device(pci_dev);
+ if (ret)
+ cnss_pr_err("Failed to enable PCI device, err = %d\n",
+ ret);
+ }
+
+ cnss_set_pci_config_space(pci_priv, RESTORE_PCI_CONFIG_SPACE);
+ pci_set_master(pci_dev);
+ cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RESUME);
+ cnss_pci_set_auto_suspended(pci_priv, 0);
+
+ bus_bw_info = &plat_priv->bus_bw_info;
+ msm_bus_scale_client_update_request(bus_bw_info->bus_client,
+ bus_bw_info->current_bw_vote);
+out:
+ return ret;
+}
+EXPORT_SYMBOL(cnss_auto_resume);
+
+int cnss_pci_alloc_fw_mem(struct cnss_pci_data *pci_priv)
+{
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+ struct cnss_fw_mem *fw_mem = &plat_priv->fw_mem;
+
+ if (!fw_mem->va && fw_mem->size) {
+ fw_mem->va = dma_alloc_coherent(&pci_priv->pci_dev->dev,
+ fw_mem->size, &fw_mem->pa,
+ GFP_KERNEL);
+ if (!fw_mem->va) {
+ cnss_pr_err("Failed to allocate memory for FW, size: 0x%zx\n",
+ fw_mem->size);
+ fw_mem->size = 0;
+
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static void cnss_pci_free_fw_mem(struct cnss_pci_data *pci_priv)
+{
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+ struct cnss_fw_mem *fw_mem = &plat_priv->fw_mem;
+
+ if (fw_mem->va && fw_mem->size) {
+ cnss_pr_dbg("Freeing memory for FW, va: 0x%pK, pa: %pa, size: 0x%zx\n",
+ fw_mem->va, &fw_mem->pa, fw_mem->size);
+ dma_free_coherent(&pci_priv->pci_dev->dev, fw_mem->size,
+ fw_mem->va, fw_mem->pa);
+ fw_mem->va = NULL;
+ fw_mem->pa = 0;
+ fw_mem->size = 0;
+ }
+}
+
+int cnss_pci_load_m3(struct cnss_pci_data *pci_priv)
+{
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+ struct cnss_fw_mem *m3_mem = &plat_priv->m3_mem;
+ char filename[MAX_M3_FILE_NAME_LENGTH];
+ const struct firmware *fw_entry;
+ int ret = 0;
+
+ if (!m3_mem->va && !m3_mem->size) {
+ snprintf(filename, sizeof(filename), DEFAULT_M3_FILE_NAME);
+
+ ret = request_firmware(&fw_entry, filename,
+ &pci_priv->pci_dev->dev);
+ if (ret) {
+ cnss_pr_err("Failed to load M3 image: %s\n", filename);
+ return ret;
+ }
+
+ m3_mem->va = dma_alloc_coherent(&pci_priv->pci_dev->dev,
+ fw_entry->size, &m3_mem->pa,
+ GFP_KERNEL);
+ if (!m3_mem->va) {
+ cnss_pr_err("Failed to allocate memory for M3, size: 0x%zx\n",
+ fw_entry->size);
+ release_firmware(fw_entry);
+ return -ENOMEM;
+ }
+
+ memcpy(m3_mem->va, fw_entry->data, fw_entry->size);
+ m3_mem->size = fw_entry->size;
+ release_firmware(fw_entry);
+ }
+
+ return 0;
+}
+
+static void cnss_pci_free_m3_mem(struct cnss_pci_data *pci_priv)
+{
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+ struct cnss_fw_mem *m3_mem = &plat_priv->m3_mem;
+
+ if (m3_mem->va && m3_mem->size) {
+ cnss_pr_dbg("Freeing memory for M3, va: 0x%pK, pa: %pa, size: 0x%zx\n",
+ m3_mem->va, &m3_mem->pa, m3_mem->size);
+ dma_free_coherent(&pci_priv->pci_dev->dev, m3_mem->size,
+ m3_mem->va, m3_mem->pa);
+ }
+
+ m3_mem->va = NULL;
+ m3_mem->pa = 0;
+ m3_mem->size = 0;
+}
+
+int cnss_pci_get_bar_info(struct cnss_pci_data *pci_priv, void __iomem **va,
+ phys_addr_t *pa)
+{
+ if (!pci_priv)
+ return -ENODEV;
+
+ *va = pci_priv->bar;
+ *pa = pci_resource_start(pci_priv->pci_dev, PCI_BAR_NUM);
+
+ return 0;
+}
+
+#ifdef CONFIG_CNSS_QCA6290
+#define PCI_MAX_BAR_SIZE 0xD00000
+
+static void __iomem *cnss_pci_iomap(struct pci_dev *dev, int bar,
+ unsigned long maxlen)
+{
+ resource_size_t start = pci_resource_start(dev, bar);
+ resource_size_t len = PCI_MAX_BAR_SIZE;
+ unsigned long flags = pci_resource_flags(dev, bar);
+
+ if (!len || !start)
+ return NULL;
+
+ if ((flags & IORESOURCE_IO) || (flags & IORESOURCE_MEM)) {
+ if (flags & IORESOURCE_CACHEABLE && !(flags & IORESOURCE_IO))
+ return ioremap(start, len);
+ else
+ return ioremap_nocache(start, len);
+ }
+
+ return NULL;
+}
+#else
+static void __iomem *cnss_pci_iomap(struct pci_dev *dev, int bar,
+ unsigned long maxlen)
+{
+ return pci_iomap(dev, bar, maxlen);
+}
+#endif
+
+static struct cnss_msi_config msi_config = {
+ .total_vectors = 32,
+ .total_users = 4,
+ .users = (struct cnss_msi_user[]) {
+ { .name = "MHI", .num_vectors = 2, .base_vector = 0 },
+ { .name = "CE", .num_vectors = 11, .base_vector = 2 },
+ { .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
+ { .name = "DP", .num_vectors = 18, .base_vector = 14 },
+ },
+};
+
+static int cnss_pci_get_msi_assignment(struct cnss_pci_data *pci_priv)
+{
+ pci_priv->msi_config = &msi_config;
+
+ return 0;
+}
+
+static int cnss_pci_enable_msi(struct cnss_pci_data *pci_priv)
+{
+ int ret = 0;
+ struct pci_dev *pci_dev = pci_priv->pci_dev;
+ int num_vectors;
+ struct cnss_msi_config *msi_config;
+ struct msi_desc *msi_desc;
+
+ ret = cnss_pci_get_msi_assignment(pci_priv);
+ if (ret) {
+ cnss_pr_err("Failed to get MSI assignment, err = %d\n", ret);
+ goto out;
+ }
+
+ msi_config = pci_priv->msi_config;
+ if (!msi_config) {
+ cnss_pr_err("msi_config is NULL!\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ num_vectors = pci_enable_msi_range(pci_dev,
+ msi_config->total_vectors,
+ msi_config->total_vectors);
+ if (num_vectors != msi_config->total_vectors) {
+ cnss_pr_err("Failed to get enough MSI vectors (%d), available vectors = %d",
+ msi_config->total_vectors, num_vectors);
+ ret = -EINVAL;
+ goto reset_msi_config;
+ }
+
+ msi_desc = irq_get_msi_desc(pci_dev->irq);
+ if (!msi_desc) {
+ cnss_pr_err("msi_desc is NULL!\n");
+ ret = -EINVAL;
+ goto disable_msi;
+ }
+
+ pci_priv->msi_ep_base_data = msi_desc->msg.data;
+ if (!pci_priv->msi_ep_base_data) {
+ cnss_pr_err("Got 0 MSI base data!\n");
+ CNSS_ASSERT(0);
+ }
+
+ cnss_pr_dbg("MSI base data is %d\n", pci_priv->msi_ep_base_data);
+
+ return 0;
+
+disable_msi:
+ pci_disable_msi(pci_priv->pci_dev);
+reset_msi_config:
+ pci_priv->msi_config = NULL;
+out:
+ return ret;
+}
+
+static void cnss_pci_disable_msi(struct cnss_pci_data *pci_priv)
+{
+ pci_disable_msi(pci_priv->pci_dev);
+}
+
+int cnss_get_user_msi_assignment(struct device *dev, char *user_name,
+ int *num_vectors, u32 *user_base_data,
+ u32 *base_vector)
+{
+ struct cnss_pci_data *pci_priv = dev_get_drvdata(dev);
+ struct cnss_msi_config *msi_config;
+ int idx;
+
+ if (!pci_priv)
+ return -ENODEV;
+
+ msi_config = pci_priv->msi_config;
+ if (!msi_config) {
+ cnss_pr_err("MSI is not supported.\n");
+ return -EINVAL;
+ }
+
+ for (idx = 0; idx < msi_config->total_users; idx++) {
+ if (strcmp(user_name, msi_config->users[idx].name) == 0) {
+ *num_vectors = msi_config->users[idx].num_vectors;
+ *user_base_data = msi_config->users[idx].base_vector
+ + pci_priv->msi_ep_base_data;
+ *base_vector = msi_config->users[idx].base_vector;
+
+ cnss_pr_dbg("Assign MSI to user: %s, num_vectors: %d, user_base_data: %u, base_vector: %u\n",
+ user_name, *num_vectors, *user_base_data,
+ *base_vector);
+
+ return 0;
+ }
+ }
+
+ cnss_pr_err("Failed to find MSI assignment for %s!\n", user_name);
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL(cnss_get_user_msi_assignment);
+
+int cnss_get_msi_irq(struct device *dev, unsigned int vector)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+
+ return pci_dev->irq + vector;
+}
+EXPORT_SYMBOL(cnss_get_msi_irq);
+
+void cnss_get_msi_address(struct device *dev, u32 *msi_addr_low,
+ u32 *msi_addr_high)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+
+ pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_LO,
+ msi_addr_low);
+
+ pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_HI,
+ msi_addr_high);
+}
+EXPORT_SYMBOL(cnss_get_msi_address);
+
+static int cnss_pci_enable_bus(struct cnss_pci_data *pci_priv)
+{
+ int ret = 0;
+ struct pci_dev *pci_dev = pci_priv->pci_dev;
+ u16 device_id;
+
+ pci_read_config_word(pci_dev, PCI_DEVICE_ID, &device_id);
+ if (device_id != pci_priv->pci_device_id->device) {
+ cnss_pr_err("PCI device ID mismatch, config ID: 0x%x, probe ID: 0x%x\n",
+ device_id, pci_priv->pci_device_id->device);
+ ret = -EIO;
+ goto out;
+ }
+
+ ret = pci_assign_resource(pci_dev, PCI_BAR_NUM);
+ if (ret) {
+ pr_err("Failed to assign PCI resource, err = %d\n", ret);
+ goto out;
+ }
+
+ ret = pci_enable_device(pci_dev);
+ if (ret) {
+ cnss_pr_err("Failed to enable PCI device, err = %d\n", ret);
+ goto out;
+ }
+
+ ret = pci_request_region(pci_dev, PCI_BAR_NUM, "cnss");
+ if (ret) {
+ cnss_pr_err("Failed to request PCI region, err = %d\n", ret);
+ goto disable_device;
+ }
+
+ ret = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(PCI_DMA_MASK));
+ if (ret) {
+ cnss_pr_err("Failed to set PCI DMA mask (%d), err = %d\n",
+ ret, PCI_DMA_MASK);
+ goto release_region;
+ }
+
+ ret = pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(PCI_DMA_MASK));
+ if (ret) {
+ cnss_pr_err("Failed to set PCI consistent DMA mask (%d), err = %d\n",
+ ret, PCI_DMA_MASK);
+ goto release_region;
+ }
+
+ pci_set_master(pci_dev);
+
+ pci_priv->bar = cnss_pci_iomap(pci_dev, PCI_BAR_NUM, 0);
+ if (!pci_priv->bar) {
+ cnss_pr_err("Failed to do PCI IO map!\n");
+ ret = -EIO;
+ goto clear_master;
+ }
+ return 0;
+
+clear_master:
+ pci_clear_master(pci_dev);
+release_region:
+ pci_release_region(pci_dev, PCI_BAR_NUM);
+disable_device:
+ pci_disable_device(pci_dev);
+out:
+ return ret;
+}
+
+static void cnss_pci_disable_bus(struct cnss_pci_data *pci_priv)
+{
+ struct pci_dev *pci_dev = pci_priv->pci_dev;
+
+ if (pci_priv->bar) {
+ pci_iounmap(pci_dev, pci_priv->bar);
+ pci_priv->bar = NULL;
+ }
+
+ pci_clear_master(pci_dev);
+ pci_release_region(pci_dev, PCI_BAR_NUM);
+ pci_disable_device(pci_dev);
+}
+
+static int cnss_mhi_pm_runtime_get(struct pci_dev *pci_dev)
+{
+ return pm_runtime_get(&pci_dev->dev);
+}
+
+static void cnss_mhi_pm_runtime_put_noidle(struct pci_dev *pci_dev)
+{
+ pm_runtime_put_noidle(&pci_dev->dev);
+}
+
+static char *cnss_mhi_state_to_str(enum cnss_mhi_state mhi_state)
+{
+ switch (mhi_state) {
+ case CNSS_MHI_INIT:
+ return "INIT";
+ case CNSS_MHI_DEINIT:
+ return "DEINIT";
+ case CNSS_MHI_POWER_ON:
+ return "POWER_ON";
+ case CNSS_MHI_POWER_OFF:
+ return "POWER_OFF";
+ case CNSS_MHI_SUSPEND:
+ return "SUSPEND";
+ case CNSS_MHI_RESUME:
+ return "RESUME";
+ case CNSS_MHI_TRIGGER_RDDM:
+ return "TRIGGER_RDDM";
+ case CNSS_MHI_RDDM:
+ return "RDDM";
+ case CNSS_MHI_RDDM_KERNEL_PANIC:
+ return "RDDM_KERNEL_PANIC";
+ case CNSS_MHI_NOTIFY_LINK_ERROR:
+ return "NOTIFY_LINK_ERROR";
+ default:
+ return "UNKNOWN";
+ }
+};
+
+static void *cnss_pci_collect_dump_seg(struct cnss_pci_data *pci_priv,
+ enum mhi_rddm_segment type,
+ void *start_addr)
+{
+ int count;
+ struct scatterlist *sg_list, *s;
+ unsigned int i;
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+ struct cnss_dump_data *dump_data =
+ &plat_priv->ramdump_info_v2.dump_data;
+ struct cnss_dump_seg *dump_seg = start_addr;
+
+ count = mhi_xfer_rddm(&pci_priv->mhi_dev, type, &sg_list);
+ if (count <= 0 || !sg_list) {
+ cnss_pr_err("Invalid dump_seg for type %u, count %u, sg_list %pK\n",
+ type, count, sg_list);
+ return start_addr;
+ }
+
+ cnss_pr_dbg("Collect dump seg: type %u, nentries %d\n", type, count);
+
+ for_each_sg(sg_list, s, count, i) {
+ dump_seg->address = sg_dma_address(s);
+ dump_seg->v_address = sg_virt(s);
+ dump_seg->size = s->length;
+ dump_seg->type = type;
+ cnss_pr_dbg("seg-%d: address 0x%lx, v_address %pK, size 0x%lx\n",
+ i, dump_seg->address,
+ dump_seg->v_address, dump_seg->size);
+ dump_seg++;
+ }
+
+ dump_data->nentries += count;
+
+ return dump_seg;
+}
+
+void cnss_pci_collect_dump_info(struct cnss_pci_data *pci_priv)
+{
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+ struct cnss_dump_data *dump_data =
+ &plat_priv->ramdump_info_v2.dump_data;
+ void *start_addr, *end_addr;
+
+ dump_data->nentries = 0;
+
+ start_addr = plat_priv->ramdump_info_v2.dump_data_vaddr;
+ end_addr = cnss_pci_collect_dump_seg(pci_priv,
+ MHI_RDDM_FW_SEGMENT, start_addr);
+
+ start_addr = end_addr;
+ end_addr = cnss_pci_collect_dump_seg(pci_priv,
+ MHI_RDDM_RD_SEGMENT, start_addr);
+
+ if (dump_data->nentries > 0)
+ plat_priv->ramdump_info_v2.dump_data_valid = true;
+}
+
+void cnss_pci_clear_dump_info(struct cnss_pci_data *pci_priv)
+{
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+
+ plat_priv->ramdump_info_v2.dump_data.nentries = 0;
+ plat_priv->ramdump_info_v2.dump_data_valid = false;
+}
+
+static void cnss_mhi_notify_status(enum MHI_CB_REASON reason, void *priv)
+{
+ struct cnss_pci_data *pci_priv = priv;
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+ enum cnss_recovery_reason cnss_reason = CNSS_REASON_RDDM;
+
+ if (!pci_priv)
+ return;
+
+ cnss_pr_dbg("MHI status cb is called with reason %d\n", reason);
+
+ set_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state);
+ del_timer(&plat_priv->fw_boot_timer);
+
+ if (reason == MHI_CB_SYS_ERROR)
+ cnss_reason = CNSS_REASON_TIMEOUT;
+
+ cnss_schedule_recovery(&pci_priv->pci_dev->dev,
+ cnss_reason);
+}
+
+static int cnss_pci_register_mhi(struct cnss_pci_data *pci_priv)
+{
+ int ret = 0;
+ struct pci_dev *pci_dev = pci_priv->pci_dev;
+ struct mhi_device *mhi_dev = &pci_priv->mhi_dev;
+
+ mhi_dev->dev = &pci_priv->plat_priv->plat_dev->dev;
+ mhi_dev->pci_dev = pci_dev;
+
+ mhi_dev->resources[0].start = (resource_size_t)pci_priv->bar;
+ mhi_dev->resources[0].end = (resource_size_t)pci_priv->bar +
+ pci_resource_len(pci_dev, PCI_BAR_NUM);
+ mhi_dev->resources[0].flags =
+ pci_resource_flags(pci_dev, PCI_BAR_NUM);
+ mhi_dev->resources[0].name = "BAR";
+ cnss_pr_dbg("BAR start is %pa, BAR end is %pa\n",
+ &mhi_dev->resources[0].start, &mhi_dev->resources[0].end);
+
+ if (!mhi_dev->resources[1].start) {
+ mhi_dev->resources[1].start = pci_dev->irq;
+ mhi_dev->resources[1].end = pci_dev->irq + 1;
+ mhi_dev->resources[1].flags = IORESOURCE_IRQ;
+ mhi_dev->resources[1].name = "IRQ";
+ }
+ cnss_pr_dbg("IRQ start is %pa, IRQ end is %pa\n",
+ &mhi_dev->resources[1].start, &mhi_dev->resources[1].end);
+
+ mhi_dev->pm_runtime_get = cnss_mhi_pm_runtime_get;
+ mhi_dev->pm_runtime_put_noidle = cnss_mhi_pm_runtime_put_noidle;
+
+ mhi_dev->support_rddm = true;
+ mhi_dev->rddm_size = pci_priv->plat_priv->ramdump_info_v2.ramdump_size;
+ mhi_dev->status_cb = cnss_mhi_notify_status;
+
+ ret = mhi_register_device(mhi_dev, MHI_NODE_NAME, pci_priv);
+ if (ret) {
+ cnss_pr_err("Failed to register as MHI device, err = %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void cnss_pci_unregister_mhi(struct cnss_pci_data *pci_priv)
+{
+}
+
+static enum mhi_dev_ctrl cnss_to_mhi_dev_state(enum cnss_mhi_state state)
+{
+ switch (state) {
+ case CNSS_MHI_INIT:
+ return MHI_DEV_CTRL_INIT;
+ case CNSS_MHI_DEINIT:
+ return MHI_DEV_CTRL_DE_INIT;
+ case CNSS_MHI_POWER_ON:
+ return MHI_DEV_CTRL_POWER_ON;
+ case CNSS_MHI_POWER_OFF:
+ return MHI_DEV_CTRL_POWER_OFF;
+ case CNSS_MHI_SUSPEND:
+ return MHI_DEV_CTRL_SUSPEND;
+ case CNSS_MHI_RESUME:
+ return MHI_DEV_CTRL_RESUME;
+ case CNSS_MHI_TRIGGER_RDDM:
+ return MHI_DEV_CTRL_TRIGGER_RDDM;
+ case CNSS_MHI_RDDM:
+ return MHI_DEV_CTRL_RDDM;
+ case CNSS_MHI_RDDM_KERNEL_PANIC:
+ return MHI_DEV_CTRL_RDDM_KERNEL_PANIC;
+ case CNSS_MHI_NOTIFY_LINK_ERROR:
+ return MHI_DEV_CTRL_NOTIFY_LINK_ERROR;
+ default:
+ cnss_pr_err("Unknown CNSS MHI state (%d)\n", state);
+ return -EINVAL;
+ }
+}
+
+static int cnss_pci_check_mhi_state_bit(struct cnss_pci_data *pci_priv,
+ enum cnss_mhi_state mhi_state)
+{
+ switch (mhi_state) {
+ case CNSS_MHI_INIT:
+ if (!test_bit(CNSS_MHI_INIT, &pci_priv->mhi_state))
+ return 0;
+ break;
+ case CNSS_MHI_DEINIT:
+ case CNSS_MHI_POWER_ON:
+ if (test_bit(CNSS_MHI_INIT, &pci_priv->mhi_state) &&
+ !test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state))
+ return 0;
+ break;
+ case CNSS_MHI_POWER_OFF:
+ case CNSS_MHI_SUSPEND:
+ if (test_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state) &&
+ !test_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state))
+ return 0;
+ break;
+ case CNSS_MHI_RESUME:
+ if (test_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state))
+ return 0;
+ break;
+ case CNSS_MHI_TRIGGER_RDDM:
+ case CNSS_MHI_RDDM:
+ case CNSS_MHI_RDDM_KERNEL_PANIC:
+ case CNSS_MHI_NOTIFY_LINK_ERROR:
+ return 0;
+ default:
+ cnss_pr_err("Unhandled MHI state: %s(%d)\n",
+ cnss_mhi_state_to_str(mhi_state), mhi_state);
+ }
+
+ cnss_pr_err("Cannot set MHI state %s(%d) in current MHI state (0x%lx)\n",
+ cnss_mhi_state_to_str(mhi_state), mhi_state,
+ pci_priv->mhi_state);
+
+ return -EINVAL;
+}
+
+static void cnss_pci_set_mhi_state_bit(struct cnss_pci_data *pci_priv,
+ enum cnss_mhi_state mhi_state)
+{
+ switch (mhi_state) {
+ case CNSS_MHI_INIT:
+ set_bit(CNSS_MHI_INIT, &pci_priv->mhi_state);
+ break;
+ case CNSS_MHI_DEINIT:
+ clear_bit(CNSS_MHI_INIT, &pci_priv->mhi_state);
+ break;
+ case CNSS_MHI_POWER_ON:
+ set_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state);
+ break;
+ case CNSS_MHI_POWER_OFF:
+ clear_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state);
+ break;
+ case CNSS_MHI_SUSPEND:
+ set_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state);
+ break;
+ case CNSS_MHI_RESUME:
+ clear_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state);
+ break;
+ case CNSS_MHI_TRIGGER_RDDM:
+ case CNSS_MHI_RDDM:
+ case CNSS_MHI_RDDM_KERNEL_PANIC:
+ case CNSS_MHI_NOTIFY_LINK_ERROR:
+ break;
+ default:
+ cnss_pr_err("Unhandled MHI state (%d)\n", mhi_state);
+ }
+}
+
+int cnss_pci_set_mhi_state(struct cnss_pci_data *pci_priv,
+ enum cnss_mhi_state mhi_state)
+{
+ int ret = 0;
+ enum mhi_dev_ctrl mhi_dev_state = cnss_to_mhi_dev_state(mhi_state);
+
+ if (!pci_priv) {
+ cnss_pr_err("pci_priv is NULL!\n");
+ return -ENODEV;
+ }
+
+ if (pci_priv->device_id == QCA6174_DEVICE_ID)
+ return 0;
+
+ if (mhi_dev_state < 0) {
+ cnss_pr_err("Invalid MHI DEV state (%d)\n", mhi_dev_state);
+ return -EINVAL;
+ }
+
+ ret = cnss_pci_check_mhi_state_bit(pci_priv, mhi_state);
+ if (ret)
+ goto out;
+
+ cnss_pr_dbg("Setting MHI state: %s(%d)\n",
+ cnss_mhi_state_to_str(mhi_state), mhi_state);
+ ret = mhi_pm_control_device(&pci_priv->mhi_dev, mhi_dev_state);
+ if (ret) {
+ cnss_pr_err("Failed to set MHI state: %s(%d)\n",
+ cnss_mhi_state_to_str(mhi_state), mhi_state);
+ goto out;
+ }
+
+ cnss_pci_set_mhi_state_bit(pci_priv, mhi_state);
+
+out:
+ return ret;
+}
+
+int cnss_pci_start_mhi(struct cnss_pci_data *pci_priv)
+{
+ int ret = 0;
+
+ if (!pci_priv) {
+ cnss_pr_err("pci_priv is NULL!\n");
+ return -ENODEV;
+ }
+
+ if (fbc_bypass)
+ return 0;
+
+ ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_INIT);
+ if (ret)
+ goto out;
+
+ ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_POWER_ON);
+ if (ret)
+ goto deinit_mhi;
+
+ return 0;
+
+deinit_mhi:
+ cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_DEINIT);
+out:
+ return ret;
+}
+
+void cnss_pci_stop_mhi(struct cnss_pci_data *pci_priv)
+{
+ struct cnss_plat_data *plat_priv;
+
+ if (!pci_priv) {
+ cnss_pr_err("pci_priv is NULL!\n");
+ return;
+ }
+
+ if (fbc_bypass)
+ return;
+
+ plat_priv = pci_priv->plat_priv;
+
+ cnss_pci_set_mhi_state_bit(pci_priv, CNSS_MHI_RESUME);
+ cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_POWER_OFF);
+ if (!plat_priv->ramdump_info_v2.dump_data_valid)
+ cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_DEINIT);
+}
+
+static int cnss_pci_probe(struct pci_dev *pci_dev,
+ const struct pci_device_id *id)
+{
+ int ret = 0;
+ struct cnss_pci_data *pci_priv;
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL);
+ struct resource *res;
+
+ cnss_pr_dbg("PCI is probing, vendor ID: 0x%x, device ID: 0x%x\n",
+ id->vendor, pci_dev->device);
+
+ switch (pci_dev->device) {
+ case QCA6290_EMULATION_DEVICE_ID:
+ case QCA6290_DEVICE_ID:
+ if (!mhi_is_device_ready(&plat_priv->plat_dev->dev,
+ MHI_NODE_NAME)) {
+ cnss_pr_err("MHI driver is not ready, defer PCI probe!\n");
+ ret = -EPROBE_DEFER;
+ goto out;
+ }
+ break;
+ default:
+ break;
+ }
+
+ pci_priv = devm_kzalloc(&pci_dev->dev, sizeof(*pci_priv),
+ GFP_KERNEL);
+ if (!pci_priv) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ pci_priv->pci_link_state = PCI_LINK_UP;
+ pci_priv->plat_priv = plat_priv;
+ pci_priv->pci_dev = pci_dev;
+ pci_priv->pci_device_id = id;
+ pci_priv->device_id = pci_dev->device;
+ cnss_set_pci_priv(pci_dev, pci_priv);
+ plat_priv->device_id = pci_dev->device;
+ plat_priv->bus_priv = pci_priv;
+
+ ret = cnss_register_subsys(plat_priv);
+ if (ret)
+ goto reset_ctx;
+
+ ret = cnss_register_ramdump(plat_priv);
+ if (ret)
+ goto unregister_subsys;
+
+ res = platform_get_resource_byname(plat_priv->plat_dev, IORESOURCE_MEM,
+ "smmu_iova_base");
+ if (res) {
+ pci_priv->smmu_iova_start = res->start;
+ pci_priv->smmu_iova_len = resource_size(res);
+ cnss_pr_dbg("smmu_iova_start: %pa, smmu_iova_len: %zu\n",
+ &pci_priv->smmu_iova_start,
+ pci_priv->smmu_iova_len);
+
+ ret = cnss_pci_init_smmu(pci_priv);
+ if (ret) {
+ cnss_pr_err("Failed to init SMMU, err = %d\n", ret);
+ goto unregister_ramdump;
+ }
+ }
+
+ ret = cnss_reg_pci_event(pci_priv);
+ if (ret) {
+ cnss_pr_err("Failed to register PCI event, err = %d\n", ret);
+ goto deinit_smmu;
+ }
+
+ ret = cnss_pci_enable_bus(pci_priv);
+ if (ret)
+ goto dereg_pci_event;
+
+ switch (pci_dev->device) {
+ case QCA6174_DEVICE_ID:
+ pci_read_config_word(pci_dev, QCA6174_REV_ID_OFFSET,
+ &pci_priv->revision_id);
+ ret = cnss_suspend_pci_link(pci_priv);
+ if (ret)
+ cnss_pr_err("Failed to suspend PCI link, err = %d\n",
+ ret);
+ cnss_power_off_device(plat_priv);
+ break;
+ case QCA6290_EMULATION_DEVICE_ID:
+ case QCA6290_DEVICE_ID:
+ ret = cnss_pci_enable_msi(pci_priv);
+ if (ret)
+ goto disable_bus;
+ ret = cnss_pci_register_mhi(pci_priv);
+ if (ret) {
+ cnss_pci_disable_msi(pci_priv);
+ goto disable_bus;
+ }
+ break;
+ default:
+ cnss_pr_err("Unknown PCI device found: 0x%x\n",
+ pci_dev->device);
+ ret = -ENODEV;
+ goto disable_bus;
+ }
+
+ return 0;
+
+disable_bus:
+ cnss_pci_disable_bus(pci_priv);
+dereg_pci_event:
+ cnss_dereg_pci_event(pci_priv);
+deinit_smmu:
+ if (pci_priv->smmu_mapping)
+ cnss_pci_deinit_smmu(pci_priv);
+unregister_ramdump:
+ cnss_unregister_ramdump(plat_priv);
+unregister_subsys:
+ cnss_unregister_subsys(plat_priv);
+reset_ctx:
+ plat_priv->bus_priv = NULL;
+out:
+ return ret;
+}
+
+static void cnss_pci_remove(struct pci_dev *pci_dev)
+{
+ struct cnss_pci_data *pci_priv = cnss_get_pci_priv(pci_dev);
+ struct cnss_plat_data *plat_priv =
+ cnss_bus_dev_to_plat_priv(&pci_dev->dev);
+
+ cnss_pci_free_m3_mem(pci_priv);
+ cnss_pci_free_fw_mem(pci_priv);
+
+ switch (pci_dev->device) {
+ case QCA6290_EMULATION_DEVICE_ID:
+ case QCA6290_DEVICE_ID:
+ cnss_pci_unregister_mhi(pci_priv);
+ cnss_pci_disable_msi(pci_priv);
+ break;
+ default:
+ break;
+ }
+
+ cnss_pci_disable_bus(pci_priv);
+ cnss_dereg_pci_event(pci_priv);
+ if (pci_priv->smmu_mapping)
+ cnss_pci_deinit_smmu(pci_priv);
+ cnss_unregister_ramdump(plat_priv);
+ cnss_unregister_subsys(plat_priv);
+ plat_priv->bus_priv = NULL;
+}
+
+static const struct pci_device_id cnss_pci_id_table[] = {
+ { QCA6174_VENDOR_ID, QCA6174_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
+ { QCA6290_EMULATION_VENDOR_ID, QCA6290_EMULATION_DEVICE_ID,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { QCA6290_VENDOR_ID, QCA6290_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID },
+ { 0 }
+};
+MODULE_DEVICE_TABLE(pci, cnss_pci_id_table);
+
+static const struct dev_pm_ops cnss_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(cnss_pci_suspend, cnss_pci_resume)
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(cnss_pci_suspend_noirq,
+ cnss_pci_resume_noirq)
+ SET_RUNTIME_PM_OPS(cnss_pci_runtime_suspend, cnss_pci_runtime_resume,
+ cnss_pci_runtime_idle)
+};
+
+struct pci_driver cnss_pci_driver = {
+ .name = "cnss_pci",
+ .id_table = cnss_pci_id_table,
+ .probe = cnss_pci_probe,
+ .remove = cnss_pci_remove,
+ .driver = {
+ .pm = &cnss_pm_ops,
+ },
+};
+
+int cnss_pci_init(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+ struct device *dev = &plat_priv->plat_dev->dev;
+ u32 rc_num;
+
+ ret = of_property_read_u32(dev->of_node, "qcom,wlan-rc-num", &rc_num);
+ if (ret) {
+ cnss_pr_err("Failed to find PCIe RC number, err = %d\n", ret);
+ goto out;
+ }
+
+ ret = msm_pcie_enumerate(rc_num);
+ if (ret) {
+ cnss_pr_err("Failed to enable PCIe RC%x, err = %d\n",
+ rc_num, ret);
+ goto out;
+ }
+
+ ret = pci_register_driver(&cnss_pci_driver);
+ if (ret) {
+ cnss_pr_err("Failed to register to PCI framework, err = %d\n",
+ ret);
+ goto out;
+ }
+
+ return 0;
+out:
+ return ret;
+}
+
+void cnss_pci_deinit(struct cnss_plat_data *plat_priv)
+{
+ pci_unregister_driver(&cnss_pci_driver);
+}
diff --git a/drivers/net/wireless/cnss2/pci.h b/drivers/net/wireless/cnss2/pci.h
new file mode 100644
index 000000000000..4dc29c3c1f10
--- /dev/null
+++ b/drivers/net/wireless/cnss2/pci.h
@@ -0,0 +1,141 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CNSS_PCI_H
+#define _CNSS_PCI_H
+
+#include <asm/dma-iommu.h>
+#include <linux/iommu.h>
+#include <linux/msm_mhi.h>
+#include <linux/msm_pcie.h>
+#include <linux/pci.h>
+
+#include "main.h"
+
+#define QCA6174_VENDOR_ID 0x168C
+#define QCA6174_DEVICE_ID 0x003E
+#define QCA6174_REV_ID_OFFSET 0x08
+#define QCA6174_REV3_VERSION 0x5020000
+#define QCA6174_REV3_2_VERSION 0x5030000
+#define QCA6290_VENDOR_ID 0x17CB
+#define QCA6290_DEVICE_ID 0x1100
+#define QCA6290_EMULATION_VENDOR_ID 0x168C
+#define QCA6290_EMULATION_DEVICE_ID 0xABCD
+
+enum cnss_mhi_state {
+ CNSS_MHI_INIT,
+ CNSS_MHI_DEINIT,
+ CNSS_MHI_SUSPEND,
+ CNSS_MHI_RESUME,
+ CNSS_MHI_POWER_OFF,
+ CNSS_MHI_POWER_ON,
+ CNSS_MHI_TRIGGER_RDDM,
+ CNSS_MHI_RDDM,
+ CNSS_MHI_RDDM_KERNEL_PANIC,
+ CNSS_MHI_NOTIFY_LINK_ERROR,
+};
+
+struct cnss_msi_user {
+ char *name;
+ int num_vectors;
+ u32 base_vector;
+};
+
+struct cnss_msi_config {
+ int total_vectors;
+ int total_users;
+ struct cnss_msi_user *users;
+};
+
+struct cnss_pci_data {
+ struct pci_dev *pci_dev;
+ struct cnss_plat_data *plat_priv;
+ const struct pci_device_id *pci_device_id;
+ u32 device_id;
+ u16 revision_id;
+ bool pci_link_state;
+ bool pci_link_down_ind;
+ struct pci_saved_state *saved_state;
+ struct msm_pcie_register_event msm_pci_event;
+ atomic_t auto_suspended;
+ bool monitor_wake_intr;
+ struct dma_iommu_mapping *smmu_mapping;
+ dma_addr_t smmu_iova_start;
+ size_t smmu_iova_len;
+ void __iomem *bar;
+ struct cnss_msi_config *msi_config;
+ u32 msi_ep_base_data;
+ struct mhi_device mhi_dev;
+ unsigned long mhi_state;
+};
+
+static inline void cnss_set_pci_priv(struct pci_dev *pci_dev, void *data)
+{
+ pci_set_drvdata(pci_dev, data);
+}
+
+static inline struct cnss_pci_data *cnss_get_pci_priv(struct pci_dev *pci_dev)
+{
+ return pci_get_drvdata(pci_dev);
+}
+
+static inline struct cnss_plat_data *cnss_pci_priv_to_plat_priv(void *bus_priv)
+{
+ struct cnss_pci_data *pci_priv = bus_priv;
+
+ return pci_priv->plat_priv;
+}
+
+static inline void cnss_pci_set_monitor_wake_intr(void *bus_priv, bool val)
+{
+ struct cnss_pci_data *pci_priv = bus_priv;
+
+ pci_priv->monitor_wake_intr = val;
+}
+
+static inline bool cnss_pci_get_monitor_wake_intr(void *bus_priv)
+{
+ struct cnss_pci_data *pci_priv = bus_priv;
+
+ return pci_priv->monitor_wake_intr;
+}
+
+static inline void cnss_pci_set_auto_suspended(void *bus_priv, int val)
+{
+ struct cnss_pci_data *pci_priv = bus_priv;
+
+ atomic_set(&pci_priv->auto_suspended, val);
+}
+
+static inline int cnss_pci_get_auto_suspended(void *bus_priv)
+{
+ struct cnss_pci_data *pci_priv = bus_priv;
+
+ return atomic_read(&pci_priv->auto_suspended);
+}
+
+int cnss_suspend_pci_link(struct cnss_pci_data *pci_priv);
+int cnss_resume_pci_link(struct cnss_pci_data *pci_priv);
+int cnss_pci_init(struct cnss_plat_data *plat_priv);
+void cnss_pci_deinit(struct cnss_plat_data *plat_priv);
+int cnss_pci_alloc_fw_mem(struct cnss_pci_data *pci_priv);
+int cnss_pci_load_m3(struct cnss_pci_data *pci_priv);
+int cnss_pci_get_bar_info(struct cnss_pci_data *pci_priv, void __iomem **va,
+ phys_addr_t *pa);
+int cnss_pci_set_mhi_state(struct cnss_pci_data *pci_priv,
+ enum cnss_mhi_state state);
+int cnss_pci_start_mhi(struct cnss_pci_data *pci_priv);
+void cnss_pci_stop_mhi(struct cnss_pci_data *pci_priv);
+void cnss_pci_collect_dump_info(struct cnss_pci_data *pci_priv);
+void cnss_pci_clear_dump_info(struct cnss_pci_data *pci_priv);
+
+#endif /* _CNSS_PCI_H */
diff --git a/drivers/net/wireless/cnss2/power.c b/drivers/net/wireless/cnss2/power.c
new file mode 100644
index 000000000000..8ed1507bde11
--- /dev/null
+++ b/drivers/net/wireless/cnss2/power.c
@@ -0,0 +1,386 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/regulator/consumer.h>
+
+#include "main.h"
+#include "debug.h"
+
+static struct cnss_vreg_info cnss_vreg_info[] = {
+ {NULL, "vdd-wlan-core", 1300000, 1300000, 0, 0},
+ {NULL, "vdd-wlan-io", 1800000, 1800000, 0, 0},
+ {NULL, "vdd-wlan-xtal-aon", 0, 0, 0, 0},
+ {NULL, "vdd-wlan-xtal", 1800000, 1800000, 0, 2},
+ {NULL, "vdd-wlan", 0, 0, 0, 0},
+ {NULL, "vdd-wlan-sp2t", 2700000, 2700000, 0, 0},
+ {NULL, "wlan-ant-switch", 2700000, 2700000, 20000, 0},
+ {NULL, "wlan-soc-swreg", 1200000, 1200000, 0, 0},
+ {NULL, "vdd-wlan-en", 0, 0, 0, 10},
+};
+
+#define CNSS_VREG_INFO_SIZE ARRAY_SIZE(cnss_vreg_info)
+#define MAX_PROP_SIZE 32
+
+#define BOOTSTRAP_GPIO "qcom,enable-bootstrap-gpio"
+#define BOOTSTRAP_ACTIVE "bootstrap_active"
+#define WLAN_EN_GPIO "wlan-en-gpio"
+#define WLAN_EN_ACTIVE "wlan_en_active"
+#define WLAN_EN_SLEEP "wlan_en_sleep"
+
+#define BOOTSTRAP_DELAY 1000
+#define WLAN_ENABLE_DELAY 1000
+
+int cnss_get_vreg(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+ int i;
+ struct cnss_vreg_info *vreg_info;
+ struct device *dev;
+ struct regulator *reg;
+ const __be32 *prop;
+ char prop_name[MAX_PROP_SIZE];
+ int len;
+
+ dev = &plat_priv->plat_dev->dev;
+
+ plat_priv->vreg_info = devm_kzalloc(dev, sizeof(cnss_vreg_info),
+ GFP_KERNEL);
+ if (!plat_priv->vreg_info) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ memcpy(plat_priv->vreg_info, cnss_vreg_info, sizeof(cnss_vreg_info));
+
+ for (i = 0; i < CNSS_VREG_INFO_SIZE; i++) {
+ vreg_info = &plat_priv->vreg_info[i];
+ reg = devm_regulator_get_optional(dev, vreg_info->name);
+ if (IS_ERR(reg)) {
+ ret = PTR_ERR(reg);
+ if (ret == -ENODEV)
+ continue;
+ else if (ret == -EPROBE_DEFER)
+ cnss_pr_info("EPROBE_DEFER for regulator: %s\n",
+ vreg_info->name);
+ else
+ cnss_pr_err("Failed to get regulator %s, err = %d\n",
+ vreg_info->name, ret);
+ goto out;
+ }
+
+ vreg_info->reg = reg;
+
+ snprintf(prop_name, MAX_PROP_SIZE, "qcom,%s-info",
+ vreg_info->name);
+
+ prop = of_get_property(dev->of_node, prop_name, &len);
+ cnss_pr_dbg("Got regulator info, name: %s, len: %d\n",
+ prop_name, len);
+
+ if (!prop || len != (4 * sizeof(__be32))) {
+ cnss_pr_dbg("Property %s %s, use default\n", prop_name,
+ prop ? "invalid format" : "doesn't exist");
+ } else {
+ vreg_info->min_uv = be32_to_cpup(&prop[0]);
+ vreg_info->max_uv = be32_to_cpup(&prop[1]);
+ vreg_info->load_ua = be32_to_cpup(&prop[2]);
+ vreg_info->delay_us = be32_to_cpup(&prop[3]);
+ }
+
+ cnss_pr_dbg("Got regulator: %s, min_uv: %u, max_uv: %u, load_ua: %u, delay_us: %u\n",
+ vreg_info->name, vreg_info->min_uv,
+ vreg_info->max_uv, vreg_info->load_ua,
+ vreg_info->delay_us);
+ }
+
+ return 0;
+out:
+ return ret;
+}
+
+static int cnss_vreg_on(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+ struct cnss_vreg_info *vreg_info;
+ int i;
+
+ if (!plat_priv) {
+ cnss_pr_err("plat_priv is NULL!\n");
+ return -ENODEV;
+ }
+
+ for (i = 0; i < CNSS_VREG_INFO_SIZE; i++) {
+ vreg_info = &plat_priv->vreg_info[i];
+
+ if (!vreg_info->reg)
+ continue;
+
+ cnss_pr_dbg("Regulator %s is being enabled\n", vreg_info->name);
+
+ if (vreg_info->min_uv != 0 && vreg_info->max_uv != 0) {
+ ret = regulator_set_voltage(vreg_info->reg,
+ vreg_info->min_uv,
+ vreg_info->max_uv);
+
+ if (ret) {
+ cnss_pr_err("Failed to set voltage for regulator %s, min_uv: %u, max_uv: %u, err = %d\n",
+ vreg_info->name, vreg_info->min_uv,
+ vreg_info->max_uv, ret);
+ break;
+ }
+ }
+
+ if (vreg_info->load_ua) {
+ ret = regulator_set_load(vreg_info->reg,
+ vreg_info->load_ua);
+
+ if (ret < 0) {
+ cnss_pr_err("Failed to set load for regulator %s, load: %u, err = %d\n",
+ vreg_info->name, vreg_info->load_ua,
+ ret);
+ break;
+ }
+ }
+
+ if (vreg_info->delay_us)
+ udelay(vreg_info->delay_us);
+
+ ret = regulator_enable(vreg_info->reg);
+ if (ret) {
+ cnss_pr_err("Failed to enable regulator %s, err = %d\n",
+ vreg_info->name, ret);
+ break;
+ }
+ }
+
+ if (ret) {
+ for (; i >= 0; i--) {
+ vreg_info = &plat_priv->vreg_info[i];
+
+ if (!vreg_info->reg)
+ continue;
+
+ regulator_disable(vreg_info->reg);
+ if (vreg_info->load_ua)
+ regulator_set_load(vreg_info->reg, 0);
+ if (vreg_info->min_uv != 0 && vreg_info->max_uv != 0)
+ regulator_set_voltage(vreg_info->reg, 0,
+ vreg_info->max_uv);
+ }
+
+ return ret;
+ }
+
+ return 0;
+}
+
+static int cnss_vreg_off(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+ struct cnss_vreg_info *vreg_info;
+ int i;
+
+ if (!plat_priv) {
+ cnss_pr_err("plat_priv is NULL!\n");
+ return -ENODEV;
+ }
+
+ for (i = CNSS_VREG_INFO_SIZE - 1; i >= 0; i--) {
+ vreg_info = &plat_priv->vreg_info[i];
+
+ if (!vreg_info->reg)
+ continue;
+
+ cnss_pr_dbg("Regulator %s is being disabled\n",
+ vreg_info->name);
+
+ ret = regulator_disable(vreg_info->reg);
+ if (ret)
+ cnss_pr_err("Failed to disable regulator %s, err = %d\n",
+ vreg_info->name, ret);
+
+ if (vreg_info->load_ua) {
+ ret = regulator_set_load(vreg_info->reg, 0);
+ if (ret < 0)
+ cnss_pr_err("Failed to set load for regulator %s, err = %d\n",
+ vreg_info->name, ret);
+ }
+
+ if (vreg_info->min_uv != 0 && vreg_info->max_uv != 0) {
+ ret = regulator_set_voltage(vreg_info->reg, 0,
+ vreg_info->max_uv);
+ if (ret)
+ cnss_pr_err("Failed to set voltage for regulator %s, err = %d\n",
+ vreg_info->name, ret);
+ }
+ }
+
+ return ret;
+}
+
+int cnss_get_pinctrl(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+ struct device *dev;
+ struct cnss_pinctrl_info *pinctrl_info;
+
+ dev = &plat_priv->plat_dev->dev;
+ pinctrl_info = &plat_priv->pinctrl_info;
+
+ pinctrl_info->pinctrl = devm_pinctrl_get(dev);
+ if (IS_ERR_OR_NULL(pinctrl_info->pinctrl)) {
+ ret = PTR_ERR(pinctrl_info->pinctrl);
+ cnss_pr_err("Failed to get pinctrl, err = %d\n", ret);
+ goto out;
+ }
+
+ if (of_find_property(dev->of_node, BOOTSTRAP_GPIO, NULL)) {
+ pinctrl_info->bootstrap_active =
+ pinctrl_lookup_state(pinctrl_info->pinctrl,
+ BOOTSTRAP_ACTIVE);
+ if (IS_ERR_OR_NULL(pinctrl_info->bootstrap_active)) {
+ ret = PTR_ERR(pinctrl_info->bootstrap_active);
+ cnss_pr_err("Failed to get bootstrap active state, err = %d\n",
+ ret);
+ goto out;
+ }
+ }
+
+ if (of_find_property(dev->of_node, WLAN_EN_GPIO, NULL)) {
+ pinctrl_info->wlan_en_active =
+ pinctrl_lookup_state(pinctrl_info->pinctrl,
+ WLAN_EN_ACTIVE);
+ if (IS_ERR_OR_NULL(pinctrl_info->wlan_en_active)) {
+ ret = PTR_ERR(pinctrl_info->wlan_en_active);
+ cnss_pr_err("Failed to get wlan_en active state, err = %d\n",
+ ret);
+ goto out;
+ }
+
+ pinctrl_info->wlan_en_sleep =
+ pinctrl_lookup_state(pinctrl_info->pinctrl,
+ WLAN_EN_SLEEP);
+ if (IS_ERR_OR_NULL(pinctrl_info->wlan_en_sleep)) {
+ ret = PTR_ERR(pinctrl_info->wlan_en_sleep);
+ cnss_pr_err("Failed to get wlan_en sleep state, err = %d\n",
+ ret);
+ goto out;
+ }
+ }
+
+ return 0;
+out:
+ return ret;
+}
+
+static int cnss_select_pinctrl_state(struct cnss_plat_data *plat_priv,
+ bool state)
+{
+ int ret = 0;
+ struct cnss_pinctrl_info *pinctrl_info;
+
+ if (!plat_priv) {
+ cnss_pr_err("plat_priv is NULL!\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ pinctrl_info = &plat_priv->pinctrl_info;
+
+ if (state) {
+ if (!IS_ERR_OR_NULL(pinctrl_info->bootstrap_active)) {
+ ret = pinctrl_select_state(pinctrl_info->pinctrl,
+ pinctrl_info->
+ bootstrap_active);
+ if (ret) {
+ cnss_pr_err("Failed to select bootstrap active state, err = %d\n",
+ ret);
+ goto out;
+ }
+ udelay(BOOTSTRAP_DELAY);
+ }
+
+ if (!IS_ERR_OR_NULL(pinctrl_info->wlan_en_active)) {
+ ret = pinctrl_select_state(pinctrl_info->pinctrl,
+ pinctrl_info->
+ wlan_en_active);
+ if (ret) {
+ cnss_pr_err("Failed to select wlan_en active state, err = %d\n",
+ ret);
+ goto out;
+ }
+ udelay(WLAN_ENABLE_DELAY);
+ }
+ } else {
+ if (!IS_ERR_OR_NULL(pinctrl_info->wlan_en_sleep)) {
+ ret = pinctrl_select_state(pinctrl_info->pinctrl,
+ pinctrl_info->wlan_en_sleep);
+ if (ret) {
+ cnss_pr_err("Failed to select wlan_en sleep state, err = %d\n",
+ ret);
+ goto out;
+ }
+ }
+ }
+
+ return 0;
+out:
+ return ret;
+}
+
+int cnss_power_on_device(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+
+ ret = cnss_vreg_on(plat_priv);
+ if (ret) {
+ cnss_pr_err("Failed to turn on vreg, err = %d\n", ret);
+ goto out;
+ }
+
+ ret = cnss_select_pinctrl_state(plat_priv, true);
+ if (ret) {
+ cnss_pr_err("Failed to select pinctrl state, err = %d\n", ret);
+ goto vreg_off;
+ }
+
+ return 0;
+vreg_off:
+ cnss_vreg_off(plat_priv);
+out:
+ return ret;
+}
+
+void cnss_power_off_device(struct cnss_plat_data *plat_priv)
+{
+ cnss_select_pinctrl_state(plat_priv, false);
+ cnss_vreg_off(plat_priv);
+}
+
+void cnss_set_pin_connect_status(struct cnss_plat_data *plat_priv)
+{
+ unsigned long pin_status = 0;
+
+ set_bit(CNSS_WLAN_EN, &pin_status);
+ set_bit(CNSS_PCIE_TXN, &pin_status);
+ set_bit(CNSS_PCIE_TXP, &pin_status);
+ set_bit(CNSS_PCIE_RXN, &pin_status);
+ set_bit(CNSS_PCIE_RXP, &pin_status);
+ set_bit(CNSS_PCIE_REFCLKN, &pin_status);
+ set_bit(CNSS_PCIE_REFCLKP, &pin_status);
+ set_bit(CNSS_PCIE_RST, &pin_status);
+
+ plat_priv->pin_result.host_pin_result = pin_status;
+}
diff --git a/drivers/net/wireless/cnss2/qmi.c b/drivers/net/wireless/cnss2/qmi.c
new file mode 100644
index 000000000000..db55d3350eb5
--- /dev/null
+++ b/drivers/net/wireless/cnss2/qmi.c
@@ -0,0 +1,1006 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/qmi_encdec.h>
+#include <soc/qcom/msm_qmi_interface.h>
+
+#include "main.h"
+#include "debug.h"
+#include "qmi.h"
+
+#define WLFW_SERVICE_INS_ID_V01 1
+#define WLFW_CLIENT_ID 0x4b4e454c
+#define MAX_BDF_FILE_NAME 11
+#define DEFAULT_BDF_FILE_NAME "bdwlan.elf"
+#define BDF_FILE_NAME_PREFIX "bdwlan.e"
+
+#ifdef CONFIG_CNSS2_DEBUG
+static unsigned int qmi_timeout = 10000;
+module_param(qmi_timeout, uint, 0600);
+MODULE_PARM_DESC(qmi_timeout, "Timeout for QMI message in milliseconds");
+
+#define QMI_WLFW_TIMEOUT_MS qmi_timeout
+#else
+#define QMI_WLFW_TIMEOUT_MS 10000
+#endif
+
+static bool daemon_support;
+module_param(daemon_support, bool, 0600);
+MODULE_PARM_DESC(daemon_support, "User space has cnss-daemon support or not");
+
+static bool bdf_bypass = true;
+#ifdef CONFIG_CNSS2_DEBUG
+module_param(bdf_bypass, bool, 0600);
+MODULE_PARM_DESC(bdf_bypass, "If BDF is not found, send dummy BDF to FW");
+#endif
+
+enum cnss_bdf_type {
+ CNSS_BDF_BIN,
+ CNSS_BDF_ELF,
+};
+
+static void cnss_wlfw_clnt_notifier_work(struct work_struct *work)
+{
+ struct cnss_plat_data *plat_priv =
+ container_of(work, struct cnss_plat_data, qmi_recv_msg_work);
+ int ret = 0;
+
+ cnss_pr_dbg("Receiving QMI WLFW event in work queue context\n");
+
+ do {
+ ret = qmi_recv_msg(plat_priv->qmi_wlfw_clnt);
+ } while (ret == 0);
+
+ if (ret != -ENOMSG)
+ cnss_pr_err("Error receiving message: %d\n", ret);
+
+ cnss_pr_dbg("Receiving QMI event completed\n");
+}
+
+static void cnss_wlfw_clnt_notifier(struct qmi_handle *handle,
+ enum qmi_event_type event,
+ void *notify_priv)
+{
+ struct cnss_plat_data *plat_priv = notify_priv;
+
+ cnss_pr_dbg("Received QMI WLFW event: %d\n", event);
+
+ if (!plat_priv) {
+ cnss_pr_err("plat_priv is NULL!\n");
+ return;
+ }
+
+ switch (event) {
+ case QMI_RECV_MSG:
+ schedule_work(&plat_priv->qmi_recv_msg_work);
+ break;
+ case QMI_SERVER_EXIT:
+ break;
+ default:
+ cnss_pr_dbg("Unhandled QMI event: %d\n", event);
+ break;
+ }
+}
+
+static int cnss_wlfw_clnt_svc_event_notifier(struct notifier_block *nb,
+ unsigned long code, void *_cmd)
+{
+ struct cnss_plat_data *plat_priv =
+ container_of(nb, struct cnss_plat_data, qmi_wlfw_clnt_nb);
+ int ret = 0;
+
+ cnss_pr_dbg("Received QMI WLFW service event: %ld\n", code);
+
+ switch (code) {
+ case QMI_SERVER_ARRIVE:
+ ret = cnss_driver_event_post(plat_priv,
+ CNSS_DRIVER_EVENT_SERVER_ARRIVE,
+ false, NULL);
+ break;
+
+ case QMI_SERVER_EXIT:
+ ret = cnss_driver_event_post(plat_priv,
+ CNSS_DRIVER_EVENT_SERVER_EXIT,
+ false, NULL);
+ break;
+ default:
+ cnss_pr_dbg("Invalid QMI service event: %ld\n", code);
+ break;
+ }
+
+ return ret;
+}
+
+static int cnss_wlfw_host_cap_send_sync(struct cnss_plat_data *plat_priv)
+{
+ struct wlfw_host_cap_req_msg_v01 req;
+ struct wlfw_host_cap_resp_msg_v01 resp;
+ struct msg_desc req_desc, resp_desc;
+ int ret = 0;
+
+ cnss_pr_dbg("Sending host capability message, state: 0x%lx\n",
+ plat_priv->driver_state);
+
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+
+ req.daemon_support_valid = 1;
+ req.daemon_support = daemon_support;
+
+ cnss_pr_dbg("daemon_support is %d\n", req.daemon_support);
+
+ req_desc.max_msg_len = WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN;
+ req_desc.msg_id = QMI_WLFW_HOST_CAP_REQ_V01;
+ req_desc.ei_array = wlfw_host_cap_req_msg_v01_ei;
+
+ resp_desc.max_msg_len = WLFW_HOST_CAP_RESP_MSG_V01_MAX_MSG_LEN;
+ resp_desc.msg_id = QMI_WLFW_HOST_CAP_RESP_V01;
+ resp_desc.ei_array = wlfw_host_cap_resp_msg_v01_ei;
+
+ ret = qmi_send_req_wait(plat_priv->qmi_wlfw_clnt, &req_desc, &req,
+ sizeof(req), &resp_desc, &resp, sizeof(resp),
+ QMI_WLFW_TIMEOUT_MS);
+ if (ret < 0) {
+ cnss_pr_err("Failed to send host capability request, err = %d\n",
+ ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ cnss_pr_err("Host capability request failed, result: %d, err: %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = resp.resp.result;
+ goto out;
+ }
+
+ return 0;
+out:
+ CNSS_ASSERT(0);
+ return ret;
+}
+
+static int cnss_wlfw_ind_register_send_sync(struct cnss_plat_data *plat_priv)
+{
+ struct wlfw_ind_register_req_msg_v01 req;
+ struct wlfw_ind_register_resp_msg_v01 resp;
+ struct msg_desc req_desc, resp_desc;
+ int ret = 0;
+
+ cnss_pr_dbg("Sending indication register message, state: 0x%lx\n",
+ plat_priv->driver_state);
+
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+
+ req.client_id_valid = 1;
+ req.client_id = WLFW_CLIENT_ID;
+ req.fw_ready_enable_valid = 1;
+ req.fw_ready_enable = 1;
+ req.request_mem_enable_valid = 1;
+ req.request_mem_enable = 1;
+ req.fw_mem_ready_enable_valid = 1;
+ req.fw_mem_ready_enable = 1;
+ req.cold_boot_cal_done_enable_valid = 1;
+ req.cold_boot_cal_done_enable = 1;
+ req.pin_connect_result_enable_valid = 1;
+ req.pin_connect_result_enable = 1;
+
+ req_desc.max_msg_len = WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN;
+ req_desc.msg_id = QMI_WLFW_IND_REGISTER_REQ_V01;
+ req_desc.ei_array = wlfw_ind_register_req_msg_v01_ei;
+
+ resp_desc.max_msg_len = WLFW_IND_REGISTER_RESP_MSG_V01_MAX_MSG_LEN;
+ resp_desc.msg_id = QMI_WLFW_IND_REGISTER_RESP_V01;
+ resp_desc.ei_array = wlfw_ind_register_resp_msg_v01_ei;
+
+ ret = qmi_send_req_wait(plat_priv->qmi_wlfw_clnt, &req_desc, &req,
+ sizeof(req), &resp_desc, &resp, sizeof(resp),
+ QMI_WLFW_TIMEOUT_MS);
+ if (ret < 0) {
+ cnss_pr_err("Failed to send indication register request, err = %d\n",
+ ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ cnss_pr_err("Indication register request failed, result: %d, err: %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = resp.resp.result;
+ goto out;
+ }
+
+ return 0;
+out:
+ CNSS_ASSERT(0);
+ return ret;
+}
+
+static int cnss_wlfw_request_mem_ind_hdlr(struct cnss_plat_data *plat_priv,
+ void *msg, unsigned int msg_len)
+{
+ struct msg_desc ind_desc;
+ struct wlfw_request_mem_ind_msg_v01 ind_msg;
+ struct cnss_fw_mem *fw_mem = &plat_priv->fw_mem;
+ int ret = 0;
+
+ ind_desc.msg_id = QMI_WLFW_REQUEST_MEM_IND_V01;
+ ind_desc.max_msg_len = WLFW_REQUEST_MEM_IND_MSG_V01_MAX_MSG_LEN;
+ ind_desc.ei_array = wlfw_request_mem_ind_msg_v01_ei;
+
+ ret = qmi_kernel_decode(&ind_desc, &ind_msg, msg, msg_len);
+ if (ret < 0) {
+ cnss_pr_err("Failed to decode request memory indication, msg_len: %u, err = %d\n",
+ ret, msg_len);
+ return ret;
+ }
+
+ fw_mem->size = ind_msg.size;
+
+ cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_REQUEST_MEM,
+ false, NULL);
+
+ return 0;
+}
+
+static int cnss_qmi_pin_result_ind_hdlr(struct cnss_plat_data *plat_priv,
+ void *msg, unsigned int msg_len)
+{
+ struct msg_desc ind_desc;
+ struct wlfw_pin_connect_result_ind_msg_v01 ind_msg;
+ int ret = 0;
+
+ ind_desc.msg_id = QMI_WLFW_PIN_CONNECT_RESULT_IND_V01;
+ ind_desc.max_msg_len = WLFW_PIN_CONNECT_RESULT_IND_MSG_V01_MAX_MSG_LEN;
+ ind_desc.ei_array = wlfw_pin_connect_result_ind_msg_v01_ei;
+
+ ret = qmi_kernel_decode(&ind_desc, &ind_msg, msg, msg_len);
+ if (ret < 0) {
+ cnss_pr_err("Failed to decode pin connect result indication, msg_len: %u, err = %d\n",
+ msg_len, ret);
+ return ret;
+ }
+ if (ind_msg.pwr_pin_result_valid)
+ plat_priv->pin_result.fw_pwr_pin_result =
+ ind_msg.pwr_pin_result;
+ if (ind_msg.phy_io_pin_result_valid)
+ plat_priv->pin_result.fw_phy_io_pin_result =
+ ind_msg.phy_io_pin_result;
+ if (ind_msg.rf_pin_result_valid)
+ plat_priv->pin_result.fw_rf_pin_result = ind_msg.rf_pin_result;
+
+ cnss_pr_dbg("Pin connect Result: pwr_pin: 0x%x phy_io_pin: 0x%x rf_io_pin: 0x%x\n",
+ ind_msg.pwr_pin_result, ind_msg.phy_io_pin_result,
+ ind_msg.rf_pin_result);
+ return ret;
+}
+
+int cnss_wlfw_respond_mem_send_sync(struct cnss_plat_data *plat_priv)
+{
+ struct wlfw_respond_mem_req_msg_v01 req;
+ struct wlfw_respond_mem_resp_msg_v01 resp;
+ struct msg_desc req_desc, resp_desc;
+ struct cnss_fw_mem *fw_mem = &plat_priv->fw_mem;
+ int ret = 0;
+
+ cnss_pr_dbg("Sending respond memory message, state: 0x%lx\n",
+ plat_priv->driver_state);
+
+ if (!fw_mem->pa || !fw_mem->size) {
+ cnss_pr_err("Memory for FW is not available!\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ cnss_pr_dbg("Memory for FW, va: 0x%pK, pa: %pa, size: 0x%zx\n",
+ fw_mem->va, &fw_mem->pa, fw_mem->size);
+
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+
+ req.addr = fw_mem->pa;
+ req.size = fw_mem->size;
+
+ req_desc.max_msg_len = WLFW_RESPOND_MEM_REQ_MSG_V01_MAX_MSG_LEN;
+ req_desc.msg_id = QMI_WLFW_RESPOND_MEM_REQ_V01;
+ req_desc.ei_array = wlfw_respond_mem_req_msg_v01_ei;
+
+ resp_desc.max_msg_len = WLFW_RESPOND_MEM_RESP_MSG_V01_MAX_MSG_LEN;
+ resp_desc.msg_id = QMI_WLFW_RESPOND_MEM_RESP_V01;
+ resp_desc.ei_array = wlfw_respond_mem_resp_msg_v01_ei;
+
+ ret = qmi_send_req_wait(plat_priv->qmi_wlfw_clnt, &req_desc, &req,
+ sizeof(req), &resp_desc, &resp, sizeof(resp),
+ QMI_WLFW_TIMEOUT_MS);
+ if (ret < 0) {
+ cnss_pr_err("Failed to send respond memory request, err = %d\n",
+ ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ cnss_pr_err("Respond memory request failed, result: %d, err: %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = resp.resp.result;
+ goto out;
+ }
+
+ return 0;
+out:
+ CNSS_ASSERT(0);
+ return ret;
+}
+
+int cnss_wlfw_tgt_cap_send_sync(struct cnss_plat_data *plat_priv)
+{
+ struct wlfw_cap_req_msg_v01 req;
+ struct wlfw_cap_resp_msg_v01 resp;
+ struct msg_desc req_desc, resp_desc;
+ int ret = 0;
+
+ cnss_pr_dbg("Sending target capability message, state: 0x%lx\n",
+ plat_priv->driver_state);
+
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+
+ req_desc.max_msg_len = WLFW_CAP_REQ_MSG_V01_MAX_MSG_LEN;
+ req_desc.msg_id = QMI_WLFW_CAP_REQ_V01;
+ req_desc.ei_array = wlfw_cap_req_msg_v01_ei;
+
+ resp_desc.max_msg_len = WLFW_CAP_RESP_MSG_V01_MAX_MSG_LEN;
+ resp_desc.msg_id = QMI_WLFW_CAP_RESP_V01;
+ resp_desc.ei_array = wlfw_cap_resp_msg_v01_ei;
+
+ ret = qmi_send_req_wait(plat_priv->qmi_wlfw_clnt, &req_desc, &req,
+ sizeof(req), &resp_desc, &resp, sizeof(resp),
+ QMI_WLFW_TIMEOUT_MS);
+ if (ret < 0) {
+ cnss_pr_err("Failed to send target capability request, err = %d\n",
+ ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ cnss_pr_err("Target capability request failed, result: %d, err: %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = resp.resp.result;
+ goto out;
+ }
+
+ if (resp.chip_info_valid)
+ plat_priv->chip_info = resp.chip_info;
+ if (resp.board_info_valid)
+ plat_priv->board_info = resp.board_info;
+ else
+ plat_priv->board_info.board_id = 0xFF;
+ if (resp.soc_info_valid)
+ plat_priv->soc_info = resp.soc_info;
+ if (resp.fw_version_info_valid)
+ plat_priv->fw_version_info = resp.fw_version_info;
+
+ cnss_pr_dbg("Target capability: chip_id: 0x%x, chip_family: 0x%x, board_id: 0x%x, soc_id: 0x%x, fw_version: 0x%x, fw_build_timestamp: %s",
+ plat_priv->chip_info.chip_id,
+ plat_priv->chip_info.chip_family,
+ plat_priv->board_info.board_id, plat_priv->soc_info.soc_id,
+ plat_priv->fw_version_info.fw_version,
+ plat_priv->fw_version_info.fw_build_timestamp);
+
+ return 0;
+out:
+ CNSS_ASSERT(0);
+ return ret;
+}
+
+int cnss_wlfw_bdf_dnld_send_sync(struct cnss_plat_data *plat_priv)
+{
+ struct wlfw_bdf_download_req_msg_v01 *req;
+ struct wlfw_bdf_download_resp_msg_v01 resp;
+ struct msg_desc req_desc, resp_desc;
+ char filename[MAX_BDF_FILE_NAME];
+ const struct firmware *fw_entry;
+ const u8 *temp;
+ unsigned int remaining;
+ int ret = 0;
+
+ cnss_pr_dbg("Sending BDF download message, state: 0x%lx\n",
+ plat_priv->driver_state);
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (plat_priv->board_info.board_id == 0xFF)
+ snprintf(filename, sizeof(filename), DEFAULT_BDF_FILE_NAME);
+ else
+ snprintf(filename, sizeof(filename),
+ BDF_FILE_NAME_PREFIX "%02x",
+ plat_priv->board_info.board_id);
+
+ ret = request_firmware(&fw_entry, filename, &plat_priv->plat_dev->dev);
+ if (ret) {
+ cnss_pr_err("Failed to load BDF: %s\n", filename);
+ if (bdf_bypass) {
+ cnss_pr_info("bdf_bypass is enabled, sending dummy BDF\n");
+ temp = filename;
+ remaining = MAX_BDF_FILE_NAME;
+ goto bypass_bdf;
+ } else {
+ goto err_req_fw;
+ }
+ }
+
+ temp = fw_entry->data;
+ remaining = fw_entry->size;
+
+bypass_bdf:
+ cnss_pr_dbg("Downloading BDF: %s, size: %u\n", filename, remaining);
+
+ memset(&resp, 0, sizeof(resp));
+
+ req_desc.max_msg_len = WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN;
+ req_desc.msg_id = QMI_WLFW_BDF_DOWNLOAD_REQ_V01;
+ req_desc.ei_array = wlfw_bdf_download_req_msg_v01_ei;
+
+ resp_desc.max_msg_len = WLFW_BDF_DOWNLOAD_RESP_MSG_V01_MAX_MSG_LEN;
+ resp_desc.msg_id = QMI_WLFW_BDF_DOWNLOAD_RESP_V01;
+ resp_desc.ei_array = wlfw_bdf_download_resp_msg_v01_ei;
+
+ while (remaining) {
+ req->valid = 1;
+ req->file_id_valid = 1;
+ req->file_id = plat_priv->board_info.board_id;
+ req->total_size_valid = 1;
+ req->total_size = remaining;
+ req->seg_id_valid = 1;
+ req->data_valid = 1;
+ req->end_valid = 1;
+ req->bdf_type_valid = 1;
+ req->bdf_type = CNSS_BDF_ELF;
+
+ if (remaining > QMI_WLFW_MAX_DATA_SIZE_V01) {
+ req->data_len = QMI_WLFW_MAX_DATA_SIZE_V01;
+ } else {
+ req->data_len = remaining;
+ req->end = 1;
+ }
+
+ memcpy(req->data, temp, req->data_len);
+
+ ret = qmi_send_req_wait(plat_priv->qmi_wlfw_clnt, &req_desc,
+ req, sizeof(*req), &resp_desc, &resp,
+ sizeof(resp), QMI_WLFW_TIMEOUT_MS);
+ if (ret < 0) {
+ cnss_pr_err("Failed to send BDF download request, err = %d\n",
+ ret);
+ goto err_send;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ cnss_pr_err("BDF download request failed, result: %d, err: %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = resp.resp.result;
+ goto err_send;
+ }
+
+ remaining -= req->data_len;
+ temp += req->data_len;
+ req->seg_id++;
+ }
+
+err_send:
+ if (!bdf_bypass)
+ release_firmware(fw_entry);
+err_req_fw:
+ kfree(req);
+out:
+ if (ret)
+ CNSS_ASSERT(0);
+ return ret;
+}
+
+int cnss_wlfw_m3_dnld_send_sync(struct cnss_plat_data *plat_priv)
+{
+ struct wlfw_m3_info_req_msg_v01 req;
+ struct wlfw_m3_info_resp_msg_v01 resp;
+ struct msg_desc req_desc, resp_desc;
+ struct cnss_fw_mem *m3_mem = &plat_priv->m3_mem;
+ int ret = 0;
+
+ cnss_pr_dbg("Sending M3 information message, state: 0x%lx\n",
+ plat_priv->driver_state);
+
+ if (!m3_mem->pa || !m3_mem->size) {
+ cnss_pr_err("Memory for M3 is not available!\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ cnss_pr_dbg("M3 memory, va: 0x%pK, pa: %pa, size: 0x%zx\n",
+ m3_mem->va, &m3_mem->pa, m3_mem->size);
+
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+
+ req.addr = plat_priv->m3_mem.pa;
+ req.size = plat_priv->m3_mem.size;
+
+ req_desc.max_msg_len = WLFW_M3_INFO_REQ_MSG_V01_MAX_MSG_LEN;
+ req_desc.msg_id = QMI_WLFW_M3_INFO_REQ_V01;
+ req_desc.ei_array = wlfw_m3_info_req_msg_v01_ei;
+
+ resp_desc.max_msg_len = WLFW_M3_INFO_RESP_MSG_V01_MAX_MSG_LEN;
+ resp_desc.msg_id = QMI_WLFW_M3_INFO_RESP_V01;
+ resp_desc.ei_array = wlfw_m3_info_resp_msg_v01_ei;
+
+ ret = qmi_send_req_wait(plat_priv->qmi_wlfw_clnt, &req_desc, &req,
+ sizeof(req), &resp_desc, &resp, sizeof(resp),
+ QMI_WLFW_TIMEOUT_MS);
+ if (ret < 0) {
+ cnss_pr_err("Failed to send M3 information request, err = %d\n",
+ ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ cnss_pr_err("M3 information request failed, result: %d, err: %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = resp.resp.result;
+ goto out;
+ }
+
+ return 0;
+
+out:
+ CNSS_ASSERT(0);
+ return ret;
+}
+
+int cnss_wlfw_wlan_mode_send_sync(struct cnss_plat_data *plat_priv,
+ enum wlfw_driver_mode_enum_v01 mode)
+{
+ struct wlfw_wlan_mode_req_msg_v01 req;
+ struct wlfw_wlan_mode_resp_msg_v01 resp;
+ struct msg_desc req_desc, resp_desc;
+ int ret = 0;
+
+ if (!plat_priv)
+ return -ENODEV;
+
+ cnss_pr_dbg("Sending mode message, state: 0x%lx, mode: %d\n",
+ plat_priv->driver_state, mode);
+
+ if (mode == QMI_WLFW_OFF_V01 &&
+ test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
+ cnss_pr_dbg("Recovery is in progress, ignore mode off request.\n");
+ return 0;
+ }
+
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+
+ req.mode = mode;
+ req.hw_debug_valid = 1;
+ req.hw_debug = 0;
+
+ req_desc.max_msg_len = WLFW_WLAN_MODE_REQ_MSG_V01_MAX_MSG_LEN;
+ req_desc.msg_id = QMI_WLFW_WLAN_MODE_REQ_V01;
+ req_desc.ei_array = wlfw_wlan_mode_req_msg_v01_ei;
+
+ resp_desc.max_msg_len = WLFW_WLAN_MODE_RESP_MSG_V01_MAX_MSG_LEN;
+ resp_desc.msg_id = QMI_WLFW_WLAN_MODE_RESP_V01;
+ resp_desc.ei_array = wlfw_wlan_mode_resp_msg_v01_ei;
+
+ ret = qmi_send_req_wait(plat_priv->qmi_wlfw_clnt, &req_desc, &req,
+ sizeof(req), &resp_desc, &resp, sizeof(resp),
+ QMI_WLFW_TIMEOUT_MS);
+ if (ret < 0) {
+ if (mode == QMI_WLFW_OFF_V01 && ret == -ENETRESET) {
+ cnss_pr_dbg("WLFW service is disconnected while sending mode off request.\n");
+ return 0;
+ }
+ cnss_pr_err("Failed to send mode request, mode: %d, err = %d\n",
+ mode, ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ cnss_pr_err("Mode request failed, mode: %d, result: %d err: %d\n",
+ mode, resp.resp.result, resp.resp.error);
+ ret = resp.resp.result;
+ goto out;
+ }
+
+ return 0;
+out:
+ if (mode != QMI_WLFW_OFF_V01)
+ CNSS_ASSERT(0);
+ return ret;
+}
+
+int cnss_wlfw_wlan_cfg_send_sync(struct cnss_plat_data *plat_priv,
+ struct wlfw_wlan_cfg_req_msg_v01 *data)
+{
+ struct wlfw_wlan_cfg_req_msg_v01 req;
+ struct wlfw_wlan_cfg_resp_msg_v01 resp;
+ struct msg_desc req_desc, resp_desc;
+ int ret = 0;
+
+ cnss_pr_dbg("Sending WLAN config message, state: 0x%lx\n",
+ plat_priv->driver_state);
+
+ if (!plat_priv)
+ return -ENODEV;
+
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+
+ memcpy(&req, data, sizeof(req));
+
+ req_desc.max_msg_len = WLFW_WLAN_CFG_REQ_MSG_V01_MAX_MSG_LEN;
+ req_desc.msg_id = QMI_WLFW_WLAN_CFG_REQ_V01;
+ req_desc.ei_array = wlfw_wlan_cfg_req_msg_v01_ei;
+
+ resp_desc.max_msg_len = WLFW_WLAN_CFG_RESP_MSG_V01_MAX_MSG_LEN;
+ resp_desc.msg_id = QMI_WLFW_WLAN_CFG_RESP_V01;
+ resp_desc.ei_array = wlfw_wlan_cfg_resp_msg_v01_ei;
+
+ ret = qmi_send_req_wait(plat_priv->qmi_wlfw_clnt, &req_desc, &req,
+ sizeof(req), &resp_desc, &resp, sizeof(resp),
+ QMI_WLFW_TIMEOUT_MS);
+ if (ret < 0) {
+ cnss_pr_err("Failed to send WLAN config request, err = %d\n",
+ ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ cnss_pr_err("WLAN config request failed, result: %d, err: %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = resp.resp.result;
+ goto out;
+ }
+
+ return 0;
+out:
+ CNSS_ASSERT(0);
+ return ret;
+}
+
+int cnss_wlfw_athdiag_read_send_sync(struct cnss_plat_data *plat_priv,
+ u32 offset, u32 mem_type,
+ u32 data_len, u8 *data)
+{
+ struct wlfw_athdiag_read_req_msg_v01 req;
+ struct wlfw_athdiag_read_resp_msg_v01 *resp;
+ struct msg_desc req_desc, resp_desc;
+ int ret = 0;
+
+ if (!plat_priv)
+ return -ENODEV;
+
+ if (!plat_priv->qmi_wlfw_clnt)
+ return -EINVAL;
+
+ cnss_pr_dbg("athdiag read: state 0x%lx, offset %x, mem_type %x, data_len %u\n",
+ plat_priv->driver_state, offset, mem_type, data_len);
+
+ resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+ if (!resp)
+ return -ENOMEM;
+
+ memset(&req, 0, sizeof(req));
+
+ req.offset = offset;
+ req.mem_type = mem_type;
+ req.data_len = data_len;
+
+ req_desc.max_msg_len = WLFW_ATHDIAG_READ_REQ_MSG_V01_MAX_MSG_LEN;
+ req_desc.msg_id = QMI_WLFW_ATHDIAG_READ_REQ_V01;
+ req_desc.ei_array = wlfw_athdiag_read_req_msg_v01_ei;
+
+ resp_desc.max_msg_len = WLFW_ATHDIAG_READ_RESP_MSG_V01_MAX_MSG_LEN;
+ resp_desc.msg_id = QMI_WLFW_ATHDIAG_READ_RESP_V01;
+ resp_desc.ei_array = wlfw_athdiag_read_resp_msg_v01_ei;
+
+ ret = qmi_send_req_wait(plat_priv->qmi_wlfw_clnt, &req_desc, &req,
+ sizeof(req), &resp_desc, resp, sizeof(*resp),
+ QMI_WLFW_TIMEOUT_MS);
+ if (ret < 0) {
+ cnss_pr_err("Failed to send athdiag read request, err = %d\n",
+ ret);
+ goto out;
+ }
+
+ if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+ cnss_pr_err("athdiag read request failed, result: %d, err: %d\n",
+ resp->resp.result, resp->resp.error);
+ ret = resp->resp.result;
+ goto out;
+ }
+
+ if (!resp->data_valid || resp->data_len != data_len) {
+ cnss_pr_err("athdiag read data is invalid, data_valid = %u, data_len = %u\n",
+ resp->data_valid, resp->data_len);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ memcpy(data, resp->data, resp->data_len);
+
+out:
+ kfree(resp);
+ return ret;
+}
+
+int cnss_wlfw_athdiag_write_send_sync(struct cnss_plat_data *plat_priv,
+ u32 offset, u32 mem_type,
+ u32 data_len, u8 *data)
+{
+ struct wlfw_athdiag_write_req_msg_v01 *req;
+ struct wlfw_athdiag_write_resp_msg_v01 resp;
+ struct msg_desc req_desc, resp_desc;
+ int ret = 0;
+
+ if (!plat_priv)
+ return -ENODEV;
+
+ if (!plat_priv->qmi_wlfw_clnt)
+ return -EINVAL;
+
+ cnss_pr_dbg("athdiag write: state 0x%lx, offset %x, mem_type %x, data_len %u, data %p\n",
+ plat_priv->driver_state, offset, mem_type, data_len, data);
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ memset(&resp, 0, sizeof(resp));
+
+ req->offset = offset;
+ req->mem_type = mem_type;
+ req->data_len = data_len;
+ memcpy(req->data, data, data_len);
+
+ req_desc.max_msg_len = WLFW_ATHDIAG_WRITE_REQ_MSG_V01_MAX_MSG_LEN;
+ req_desc.msg_id = QMI_WLFW_ATHDIAG_WRITE_REQ_V01;
+ req_desc.ei_array = wlfw_athdiag_write_req_msg_v01_ei;
+
+ resp_desc.max_msg_len = WLFW_ATHDIAG_WRITE_RESP_MSG_V01_MAX_MSG_LEN;
+ resp_desc.msg_id = QMI_WLFW_ATHDIAG_WRITE_RESP_V01;
+ resp_desc.ei_array = wlfw_athdiag_write_resp_msg_v01_ei;
+
+ ret = qmi_send_req_wait(plat_priv->qmi_wlfw_clnt, &req_desc, req,
+ sizeof(*req), &resp_desc, &resp, sizeof(resp),
+ QMI_WLFW_TIMEOUT_MS);
+ if (ret < 0) {
+ cnss_pr_err("Failed to send athdiag write request, err = %d\n",
+ ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ cnss_pr_err("athdiag write request failed, result: %d, err: %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = resp.resp.result;
+ goto out;
+ }
+
+out:
+ kfree(req);
+ return ret;
+}
+
+int cnss_wlfw_ini_send_sync(struct cnss_plat_data *plat_priv,
+ u8 fw_log_mode)
+{
+ int ret;
+ struct wlfw_ini_req_msg_v01 req;
+ struct wlfw_ini_resp_msg_v01 resp;
+ struct msg_desc req_desc, resp_desc;
+
+ if (!plat_priv)
+ return -ENODEV;
+
+ cnss_pr_dbg("Sending ini sync request, state: 0x%lx, fw_log_mode: %d\n",
+ plat_priv->driver_state, fw_log_mode);
+
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+
+ req.enablefwlog_valid = 1;
+ req.enablefwlog = fw_log_mode;
+
+ req_desc.max_msg_len = WLFW_INI_REQ_MSG_V01_MAX_MSG_LEN;
+ req_desc.msg_id = QMI_WLFW_INI_REQ_V01;
+ req_desc.ei_array = wlfw_ini_req_msg_v01_ei;
+
+ resp_desc.max_msg_len = WLFW_INI_RESP_MSG_V01_MAX_MSG_LEN;
+ resp_desc.msg_id = QMI_WLFW_INI_RESP_V01;
+ resp_desc.ei_array = wlfw_ini_resp_msg_v01_ei;
+
+ ret = qmi_send_req_wait(plat_priv->qmi_wlfw_clnt,
+ &req_desc, &req, sizeof(req),
+ &resp_desc, &resp, sizeof(resp),
+ QMI_WLFW_TIMEOUT_MS);
+ if (ret < 0) {
+ cnss_pr_err("Send INI req failed fw_log_mode: %d, ret: %d\n",
+ fw_log_mode, ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ cnss_pr_err("QMI INI request rejected, fw_log_mode:%d result:%d error:%d\n",
+ fw_log_mode, resp.resp.result, resp.resp.error);
+ ret = resp.resp.result;
+ goto out;
+ }
+
+ return 0;
+
+out:
+ return ret;
+}
+
+static void cnss_wlfw_clnt_ind(struct qmi_handle *handle,
+ unsigned int msg_id, void *msg,
+ unsigned int msg_len, void *ind_cb_priv)
+{
+ struct cnss_plat_data *plat_priv = ind_cb_priv;
+
+ cnss_pr_dbg("Received QMI WLFW indication, msg_id: 0x%x, msg_len: %d\n",
+ msg_id, msg_len);
+
+ if (!plat_priv) {
+ cnss_pr_err("plat_priv is NULL!\n");
+ return;
+ }
+
+ switch (msg_id) {
+ case QMI_WLFW_REQUEST_MEM_IND_V01:
+ cnss_wlfw_request_mem_ind_hdlr(plat_priv, msg, msg_len);
+ break;
+ case QMI_WLFW_FW_MEM_READY_IND_V01:
+ cnss_driver_event_post(plat_priv,
+ CNSS_DRIVER_EVENT_FW_MEM_READY,
+ false, NULL);
+ break;
+ case QMI_WLFW_COLD_BOOT_CAL_DONE_IND_V01:
+ cnss_driver_event_post(plat_priv,
+ CNSS_DRIVER_EVENT_COLD_BOOT_CAL_DONE,
+ false, NULL);
+ break;
+ case QMI_WLFW_FW_READY_IND_V01:
+ cnss_driver_event_post(plat_priv,
+ CNSS_DRIVER_EVENT_FW_READY,
+ false, NULL);
+ break;
+ case QMI_WLFW_PIN_CONNECT_RESULT_IND_V01:
+ cnss_qmi_pin_result_ind_hdlr(plat_priv, msg, msg_len);
+ break;
+ default:
+ cnss_pr_err("Invalid QMI WLFW indication, msg_id: 0x%x\n",
+ msg_id);
+ break;
+ }
+}
+
+unsigned int cnss_get_qmi_timeout(void)
+{
+ cnss_pr_dbg("QMI timeout is %u ms\n", QMI_WLFW_TIMEOUT_MS);
+
+ return QMI_WLFW_TIMEOUT_MS;
+}
+EXPORT_SYMBOL(cnss_get_qmi_timeout);
+
+int cnss_wlfw_server_arrive(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+
+ if (!plat_priv)
+ return -ENODEV;
+
+ plat_priv->qmi_wlfw_clnt =
+ qmi_handle_create(cnss_wlfw_clnt_notifier, plat_priv);
+ if (!plat_priv->qmi_wlfw_clnt) {
+ cnss_pr_err("Failed to create QMI client handle!\n");
+ ret = -ENOMEM;
+ goto err_create_handle;
+ }
+
+ ret = qmi_connect_to_service(plat_priv->qmi_wlfw_clnt,
+ WLFW_SERVICE_ID_V01,
+ WLFW_SERVICE_VERS_V01,
+ WLFW_SERVICE_INS_ID_V01);
+ if (ret < 0) {
+ cnss_pr_err("Failed to connect to QMI WLFW service, err = %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_register_ind_cb(plat_priv->qmi_wlfw_clnt,
+ cnss_wlfw_clnt_ind, plat_priv);
+ if (ret < 0) {
+ cnss_pr_err("Failed to register QMI WLFW service indication callback, err = %d\n",
+ ret);
+ goto out;
+ }
+
+ set_bit(CNSS_QMI_WLFW_CONNECTED, &plat_priv->driver_state);
+
+ cnss_pr_info("QMI WLFW service connected, state: 0x%lx\n",
+ plat_priv->driver_state);
+
+ ret = cnss_wlfw_host_cap_send_sync(plat_priv);
+ if (ret < 0)
+ goto out;
+
+ ret = cnss_wlfw_ind_register_send_sync(plat_priv);
+ if (ret < 0)
+ goto out;
+
+ return 0;
+out:
+ qmi_handle_destroy(plat_priv->qmi_wlfw_clnt);
+ plat_priv->qmi_wlfw_clnt = NULL;
+err_create_handle:
+ CNSS_ASSERT(0);
+ return ret;
+}
+
+int cnss_wlfw_server_exit(struct cnss_plat_data *plat_priv)
+{
+ if (!plat_priv)
+ return -ENODEV;
+
+ qmi_handle_destroy(plat_priv->qmi_wlfw_clnt);
+ plat_priv->qmi_wlfw_clnt = NULL;
+
+ clear_bit(CNSS_QMI_WLFW_CONNECTED, &plat_priv->driver_state);
+
+ cnss_pr_info("QMI WLFW service disconnected, state: 0x%lx\n",
+ plat_priv->driver_state);
+
+ return 0;
+}
+
+int cnss_qmi_init(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+
+ INIT_WORK(&plat_priv->qmi_recv_msg_work,
+ cnss_wlfw_clnt_notifier_work);
+
+ plat_priv->qmi_wlfw_clnt_nb.notifier_call =
+ cnss_wlfw_clnt_svc_event_notifier;
+
+ ret = qmi_svc_event_notifier_register(WLFW_SERVICE_ID_V01,
+ WLFW_SERVICE_VERS_V01,
+ WLFW_SERVICE_INS_ID_V01,
+ &plat_priv->qmi_wlfw_clnt_nb);
+ if (ret < 0)
+ cnss_pr_err("Failed to register QMI event notifier, err = %d\n",
+ ret);
+
+ return ret;
+}
+
+void cnss_qmi_deinit(struct cnss_plat_data *plat_priv)
+{
+ qmi_svc_event_notifier_unregister(WLFW_SERVICE_ID_V01,
+ WLFW_SERVICE_VERS_V01,
+ WLFW_SERVICE_INS_ID_V01,
+ &plat_priv->qmi_wlfw_clnt_nb);
+}
diff --git a/drivers/net/wireless/cnss2/qmi.h b/drivers/net/wireless/cnss2/qmi.h
new file mode 100644
index 000000000000..70d8d404c48e
--- /dev/null
+++ b/drivers/net/wireless/cnss2/qmi.h
@@ -0,0 +1,41 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CNSS_QMI_H
+#define _CNSS_QMI_H
+
+#include "wlan_firmware_service_v01.h"
+
+struct cnss_plat_data;
+
+int cnss_qmi_init(struct cnss_plat_data *plat_priv);
+void cnss_qmi_deinit(struct cnss_plat_data *plat_priv);
+int cnss_wlfw_server_arrive(struct cnss_plat_data *plat_priv);
+int cnss_wlfw_server_exit(struct cnss_plat_data *plat_priv);
+int cnss_wlfw_respond_mem_send_sync(struct cnss_plat_data *plat_priv);
+int cnss_wlfw_tgt_cap_send_sync(struct cnss_plat_data *plat_priv);
+int cnss_wlfw_bdf_dnld_send_sync(struct cnss_plat_data *plat_priv);
+int cnss_wlfw_m3_dnld_send_sync(struct cnss_plat_data *plat_priv);
+int cnss_wlfw_wlan_mode_send_sync(struct cnss_plat_data *plat_priv,
+ enum wlfw_driver_mode_enum_v01 mode);
+int cnss_wlfw_wlan_cfg_send_sync(struct cnss_plat_data *plat_priv,
+ struct wlfw_wlan_cfg_req_msg_v01 *data);
+int cnss_wlfw_athdiag_read_send_sync(struct cnss_plat_data *plat_priv,
+ u32 offset, u32 mem_type,
+ u32 data_len, u8 *data);
+int cnss_wlfw_athdiag_write_send_sync(struct cnss_plat_data *plat_priv,
+ u32 offset, u32 mem_type,
+ u32 data_len, u8 *data);
+int cnss_wlfw_ini_send_sync(struct cnss_plat_data *plat_priv,
+ u8 fw_log_mode);
+
+#endif /* _CNSS_QMI_H */
diff --git a/drivers/net/wireless/cnss2/utils.c b/drivers/net/wireless/cnss2/utils.c
new file mode 100644
index 000000000000..9ffe386e3677
--- /dev/null
+++ b/drivers/net/wireless/cnss2/utils.c
@@ -0,0 +1,129 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define CNSS_MAX_CH_NUM 45
+
+#include <linux/module.h>
+#include <linux/slab.h>
+
+static DEFINE_MUTEX(unsafe_channel_list_lock);
+static DEFINE_MUTEX(dfs_nol_info_lock);
+
+static struct cnss_unsafe_channel_list {
+ u16 unsafe_ch_count;
+ u16 unsafe_ch_list[CNSS_MAX_CH_NUM];
+} unsafe_channel_list;
+
+static struct cnss_dfs_nol_info {
+ void *dfs_nol_info;
+ u16 dfs_nol_info_len;
+} dfs_nol_info;
+
+int cnss_set_wlan_unsafe_channel(u16 *unsafe_ch_list, u16 ch_count)
+{
+ mutex_lock(&unsafe_channel_list_lock);
+ if ((!unsafe_ch_list) || (ch_count > CNSS_MAX_CH_NUM)) {
+ mutex_unlock(&unsafe_channel_list_lock);
+ return -EINVAL;
+ }
+
+ unsafe_channel_list.unsafe_ch_count = ch_count;
+
+ if (ch_count != 0) {
+ memcpy((char *)unsafe_channel_list.unsafe_ch_list,
+ (char *)unsafe_ch_list, ch_count * sizeof(u16));
+ }
+ mutex_unlock(&unsafe_channel_list_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(cnss_set_wlan_unsafe_channel);
+
+int cnss_get_wlan_unsafe_channel(u16 *unsafe_ch_list,
+ u16 *ch_count, u16 buf_len)
+{
+ mutex_lock(&unsafe_channel_list_lock);
+ if (!unsafe_ch_list || !ch_count) {
+ mutex_unlock(&unsafe_channel_list_lock);
+ return -EINVAL;
+ }
+
+ if (buf_len < (unsafe_channel_list.unsafe_ch_count * sizeof(u16))) {
+ mutex_unlock(&unsafe_channel_list_lock);
+ return -ENOMEM;
+ }
+
+ *ch_count = unsafe_channel_list.unsafe_ch_count;
+ memcpy((char *)unsafe_ch_list,
+ (char *)unsafe_channel_list.unsafe_ch_list,
+ unsafe_channel_list.unsafe_ch_count * sizeof(u16));
+ mutex_unlock(&unsafe_channel_list_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(cnss_get_wlan_unsafe_channel);
+
+int cnss_wlan_set_dfs_nol(const void *info, u16 info_len)
+{
+ void *temp;
+ struct cnss_dfs_nol_info *dfs_info;
+
+ mutex_lock(&dfs_nol_info_lock);
+ if (!info || !info_len) {
+ mutex_unlock(&dfs_nol_info_lock);
+ return -EINVAL;
+ }
+
+ temp = kmalloc(info_len, GFP_KERNEL);
+ if (!temp) {
+ mutex_unlock(&dfs_nol_info_lock);
+ return -ENOMEM;
+ }
+
+ memcpy(temp, info, info_len);
+ dfs_info = &dfs_nol_info;
+ kfree(dfs_info->dfs_nol_info);
+
+ dfs_info->dfs_nol_info = temp;
+ dfs_info->dfs_nol_info_len = info_len;
+ mutex_unlock(&dfs_nol_info_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(cnss_wlan_set_dfs_nol);
+
+int cnss_wlan_get_dfs_nol(void *info, u16 info_len)
+{
+ int len;
+ struct cnss_dfs_nol_info *dfs_info;
+
+ mutex_lock(&dfs_nol_info_lock);
+ if (!info || !info_len) {
+ mutex_unlock(&dfs_nol_info_lock);
+ return -EINVAL;
+ }
+
+ dfs_info = &dfs_nol_info;
+
+ if (!dfs_info->dfs_nol_info || dfs_info->dfs_nol_info_len == 0) {
+ mutex_unlock(&dfs_nol_info_lock);
+ return -ENOENT;
+ }
+
+ len = min(info_len, dfs_info->dfs_nol_info_len);
+
+ memcpy(info, dfs_info->dfs_nol_info, len);
+ mutex_unlock(&dfs_nol_info_lock);
+
+ return len;
+}
+EXPORT_SYMBOL(cnss_wlan_get_dfs_nol);
diff --git a/drivers/net/wireless/cnss2/wlan_firmware_service_v01.c b/drivers/net/wireless/cnss2/wlan_firmware_service_v01.c
new file mode 100644
index 000000000000..84a4707e9cc3
--- /dev/null
+++ b/drivers/net/wireless/cnss2/wlan_firmware_service_v01.c
@@ -0,0 +1,2168 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "wlan_firmware_service_v01.h"
+
+static struct elem_info wlfw_ce_tgt_pipe_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01,
+ pipe_num),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_pipedir_enum_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01,
+ pipe_dir),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01,
+ nentries),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01,
+ nbytes_max),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01,
+ flags),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct elem_info wlfw_ce_svc_pipe_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_ce_svc_pipe_cfg_s_v01,
+ service_id),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_pipedir_enum_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_ce_svc_pipe_cfg_s_v01,
+ pipe_dir),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_ce_svc_pipe_cfg_s_v01,
+ pipe_num),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct elem_info wlfw_shadow_reg_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_shadow_reg_cfg_s_v01,
+ id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_shadow_reg_cfg_s_v01,
+ offset),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct elem_info wlfw_shadow_reg_v2_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_shadow_reg_v2_cfg_s_v01,
+ addr),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct elem_info wlfw_memory_region_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_memory_region_info_s_v01,
+ region_addr),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_memory_region_info_s_v01,
+ size),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_memory_region_info_s_v01,
+ secure_flag),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct elem_info wlfw_rf_chip_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_rf_chip_info_s_v01,
+ chip_id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_rf_chip_info_s_v01,
+ chip_family),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct elem_info wlfw_rf_board_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_rf_board_info_s_v01,
+ board_id),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct elem_info wlfw_soc_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_soc_info_s_v01,
+ soc_id),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct elem_info wlfw_fw_version_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_fw_version_info_s_v01,
+ fw_version),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = QMI_WLFW_MAX_TIMESTAMP_LEN_V01 + 1,
+ .elem_size = sizeof(char),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_fw_version_info_s_v01,
+ fw_build_timestamp),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_ind_register_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_ind_register_req_msg_v01,
+ fw_ready_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ fw_ready_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ initiate_cal_download_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ initiate_cal_download_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ initiate_cal_update_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ initiate_cal_update_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ msa_ready_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ msa_ready_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ pin_connect_result_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ pin_connect_result_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ client_id_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ client_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ request_mem_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ request_mem_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ fw_mem_ready_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ fw_mem_ready_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ cold_boot_cal_done_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ cold_boot_cal_done_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ rejuvenate_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ rejuvenate_enable),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_ind_register_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ wlfw_ind_register_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_ind_register_resp_msg_v01,
+ fw_status_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_ind_register_resp_msg_v01,
+ fw_status),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_fw_ready_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_msa_ready_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_pin_connect_result_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_pin_connect_result_ind_msg_v01,
+ pwr_pin_result_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_pin_connect_result_ind_msg_v01,
+ pwr_pin_result),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct
+ wlfw_pin_connect_result_ind_msg_v01,
+ phy_io_pin_result_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct
+ wlfw_pin_connect_result_ind_msg_v01,
+ phy_io_pin_result),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct
+ wlfw_pin_connect_result_ind_msg_v01,
+ rf_pin_result_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct
+ wlfw_pin_connect_result_ind_msg_v01,
+ rf_pin_result),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_wlan_mode_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_driver_mode_enum_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_wlan_mode_req_msg_v01,
+ mode),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_wlan_mode_req_msg_v01,
+ hw_debug_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_wlan_mode_req_msg_v01,
+ hw_debug),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_wlan_mode_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_wlan_mode_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_wlan_cfg_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ host_version_valid),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = QMI_WLFW_MAX_STR_LEN_V01 + 1,
+ .elem_size = sizeof(char),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ host_version),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ tgt_cfg_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ tgt_cfg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLFW_MAX_NUM_CE_V01,
+ .elem_size = sizeof(struct wlfw_ce_tgt_pipe_cfg_s_v01),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ tgt_cfg),
+ .ei_array = wlfw_ce_tgt_pipe_cfg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ svc_cfg_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ svc_cfg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLFW_MAX_NUM_SVC_V01,
+ .elem_size = sizeof(struct wlfw_ce_svc_pipe_cfg_s_v01),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ svc_cfg),
+ .ei_array = wlfw_ce_svc_pipe_cfg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ shadow_reg_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ shadow_reg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLFW_MAX_NUM_SHADOW_REG_V01,
+ .elem_size = sizeof(struct wlfw_shadow_reg_cfg_s_v01),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ shadow_reg),
+ .ei_array = wlfw_shadow_reg_cfg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ shadow_reg_v2_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ shadow_reg_v2_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLFW_MAX_NUM_SHADOW_REG_V2_V01,
+ .elem_size = sizeof(struct wlfw_shadow_reg_v2_cfg_s_v01),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ shadow_reg_v2),
+ .ei_array = wlfw_shadow_reg_v2_cfg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_wlan_cfg_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_wlan_cfg_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_cap_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_cap_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ chip_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct wlfw_rf_chip_info_s_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ chip_info),
+ .ei_array = wlfw_rf_chip_info_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ board_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct wlfw_rf_board_info_s_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ board_info),
+ .ei_array = wlfw_rf_board_info_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ soc_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct wlfw_soc_info_s_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ soc_info),
+ .ei_array = wlfw_soc_info_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ fw_version_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct wlfw_fw_version_info_s_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ fw_version_info),
+ .ei_array = wlfw_fw_version_info_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ fw_build_id_valid),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = QMI_WLFW_MAX_BUILD_ID_LEN_V01 + 1,
+ .elem_size = sizeof(char),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ fw_build_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ num_macs_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ num_macs),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_bdf_download_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ valid),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ file_id_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ file_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ total_size_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ total_size),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ seg_id_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ seg_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ data_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ data_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = QMI_WLFW_MAX_DATA_SIZE_V01,
+ .elem_size = sizeof(u8),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ data),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ end_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ end),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ bdf_type_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ bdf_type),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_bdf_download_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ wlfw_bdf_download_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_cal_report_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_cal_report_req_msg_v01,
+ meta_data_len),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = QMI_WLFW_MAX_NUM_CAL_V01,
+ .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_cal_report_req_msg_v01,
+ meta_data),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_cal_report_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_cal_report_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_initiate_cal_download_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(
+ struct wlfw_initiate_cal_download_ind_msg_v01,
+ cal_id),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_cal_download_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ valid),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ file_id_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ file_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ total_size_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ total_size),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ seg_id_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ seg_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ data_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ data_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = QMI_WLFW_MAX_DATA_SIZE_V01,
+ .elem_size = sizeof(u8),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ data),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ end_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ end),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_cal_download_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ wlfw_cal_download_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_initiate_cal_update_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct
+ wlfw_initiate_cal_update_ind_msg_v01,
+ cal_id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ wlfw_initiate_cal_update_ind_msg_v01,
+ total_size),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_cal_update_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_cal_update_req_msg_v01,
+ cal_id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_cal_update_req_msg_v01,
+ seg_id),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_cal_update_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ file_id_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ file_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ total_size_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ total_size),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ seg_id_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ seg_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ data_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ data_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = QMI_WLFW_MAX_DATA_SIZE_V01,
+ .elem_size = sizeof(u8),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ data),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ end_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ end),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_msa_info_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_msa_info_req_msg_v01,
+ msa_addr),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_msa_info_req_msg_v01,
+ size),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_msa_info_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_msa_info_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x03,
+ .offset = offsetof(struct wlfw_msa_info_resp_msg_v01,
+ mem_region_info_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLFW_MAX_NUM_MEMORY_REGIONS_V01,
+ .elem_size = sizeof(struct wlfw_memory_region_info_s_v01),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x03,
+ .offset = offsetof(struct wlfw_msa_info_resp_msg_v01,
+ mem_region_info),
+ .ei_array = wlfw_memory_region_info_s_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_msa_ready_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_msa_ready_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_msa_ready_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_ini_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_ini_req_msg_v01,
+ enablefwlog_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_ini_req_msg_v01,
+ enablefwlog),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_ini_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_ini_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_athdiag_read_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_athdiag_read_req_msg_v01,
+ offset),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_athdiag_read_req_msg_v01,
+ mem_type),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x03,
+ .offset = offsetof(struct wlfw_athdiag_read_req_msg_v01,
+ data_len),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_athdiag_read_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ wlfw_athdiag_read_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_athdiag_read_resp_msg_v01,
+ data_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_athdiag_read_resp_msg_v01,
+ data_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01,
+ .elem_size = sizeof(u8),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ wlfw_athdiag_read_resp_msg_v01,
+ data),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_athdiag_write_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct
+ wlfw_athdiag_write_req_msg_v01,
+ offset),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ wlfw_athdiag_write_req_msg_v01,
+ mem_type),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x03,
+ .offset = offsetof(struct
+ wlfw_athdiag_write_req_msg_v01,
+ data_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01,
+ .elem_size = sizeof(u8),
+ .is_array = VAR_LEN_ARRAY,
+ .tlv_type = 0x03,
+ .offset = offsetof(struct
+ wlfw_athdiag_write_req_msg_v01,
+ data),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_athdiag_write_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ wlfw_athdiag_write_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_vbatt_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_vbatt_req_msg_v01,
+ voltage_uv),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_vbatt_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_vbatt_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_mac_addr_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_mac_addr_req_msg_v01,
+ mac_addr_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = QMI_WLFW_MAC_ADDR_SIZE_V01,
+ .elem_size = sizeof(u8),
+ .is_array = STATIC_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_mac_addr_req_msg_v01,
+ mac_addr),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_mac_addr_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_mac_addr_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_host_cap_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ daemon_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ daemon_support),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_host_cap_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_host_cap_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_request_mem_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_request_mem_ind_msg_v01,
+ size),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_respond_mem_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_respond_mem_req_msg_v01,
+ addr),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_respond_mem_req_msg_v01,
+ size),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_respond_mem_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_respond_mem_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_fw_mem_ready_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_cold_boot_cal_done_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_rejuvenate_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+ cause_for_rejuvenation_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+ cause_for_rejuvenation),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+ requesting_sub_system_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+ requesting_sub_system),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+ line_number_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+ line_number),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+ function_name_valid),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = QMI_WLFW_FUNCTION_NAME_LEN_V01 + 1,
+ .elem_size = sizeof(char),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+ function_name),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_rejuvenate_ack_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_rejuvenate_ack_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ wlfw_rejuvenate_ack_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_dynamic_feature_mask_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct wlfw_dynamic_feature_mask_req_msg_v01,
+ mask_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct wlfw_dynamic_feature_mask_req_msg_v01,
+ mask),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_dynamic_feature_mask_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(
+ struct wlfw_dynamic_feature_mask_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct wlfw_dynamic_feature_mask_resp_msg_v01,
+ prev_mask_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct wlfw_dynamic_feature_mask_resp_msg_v01,
+ prev_mask),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(
+ struct wlfw_dynamic_feature_mask_resp_msg_v01,
+ curr_mask_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(
+ struct wlfw_dynamic_feature_mask_resp_msg_v01,
+ curr_mask),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_m3_info_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_m3_info_req_msg_v01,
+ addr),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_m3_info_req_msg_v01,
+ size),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+struct elem_info wlfw_m3_info_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_m3_info_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
diff --git a/drivers/net/wireless/cnss2/wlan_firmware_service_v01.h b/drivers/net/wireless/cnss2/wlan_firmware_service_v01.h
new file mode 100644
index 000000000000..cb8225a7c3c3
--- /dev/null
+++ b/drivers/net/wireless/cnss2/wlan_firmware_service_v01.h
@@ -0,0 +1,645 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef WLAN_FIRMWARE_SERVICE_V01_H
+#define WLAN_FIRMWARE_SERVICE_V01_H
+
+#include <linux/qmi_encdec.h>
+#include <soc/qcom/msm_qmi_interface.h>
+
+#define WLFW_SERVICE_ID_V01 0x45
+#define WLFW_SERVICE_VERS_V01 0x01
+
+#define QMI_WLFW_BDF_DOWNLOAD_REQ_V01 0x0025
+#define QMI_WLFW_FW_MEM_READY_IND_V01 0x0037
+#define QMI_WLFW_INITIATE_CAL_UPDATE_IND_V01 0x002A
+#define QMI_WLFW_HOST_CAP_REQ_V01 0x0034
+#define QMI_WLFW_DYNAMIC_FEATURE_MASK_RESP_V01 0x003B
+#define QMI_WLFW_M3_INFO_REQ_V01 0x003C
+#define QMI_WLFW_CAP_REQ_V01 0x0024
+#define QMI_WLFW_CAL_REPORT_REQ_V01 0x0026
+#define QMI_WLFW_M3_INFO_RESP_V01 0x003C
+#define QMI_WLFW_CAL_UPDATE_RESP_V01 0x0029
+#define QMI_WLFW_CAL_DOWNLOAD_RESP_V01 0x0027
+#define QMI_WLFW_INI_RESP_V01 0x002F
+#define QMI_WLFW_CAL_REPORT_RESP_V01 0x0026
+#define QMI_WLFW_MAC_ADDR_RESP_V01 0x0033
+#define QMI_WLFW_INITIATE_CAL_DOWNLOAD_IND_V01 0x0028
+#define QMI_WLFW_HOST_CAP_RESP_V01 0x0034
+#define QMI_WLFW_MSA_READY_IND_V01 0x002B
+#define QMI_WLFW_ATHDIAG_WRITE_RESP_V01 0x0031
+#define QMI_WLFW_WLAN_MODE_REQ_V01 0x0022
+#define QMI_WLFW_IND_REGISTER_REQ_V01 0x0020
+#define QMI_WLFW_WLAN_CFG_RESP_V01 0x0023
+#define QMI_WLFW_COLD_BOOT_CAL_DONE_IND_V01 0x0038
+#define QMI_WLFW_REQUEST_MEM_IND_V01 0x0035
+#define QMI_WLFW_REJUVENATE_IND_V01 0x0039
+#define QMI_WLFW_DYNAMIC_FEATURE_MASK_REQ_V01 0x003B
+#define QMI_WLFW_ATHDIAG_WRITE_REQ_V01 0x0031
+#define QMI_WLFW_WLAN_MODE_RESP_V01 0x0022
+#define QMI_WLFW_RESPOND_MEM_REQ_V01 0x0036
+#define QMI_WLFW_PIN_CONNECT_RESULT_IND_V01 0x002C
+#define QMI_WLFW_FW_READY_IND_V01 0x0021
+#define QMI_WLFW_MSA_READY_RESP_V01 0x002E
+#define QMI_WLFW_CAL_UPDATE_REQ_V01 0x0029
+#define QMI_WLFW_INI_REQ_V01 0x002F
+#define QMI_WLFW_BDF_DOWNLOAD_RESP_V01 0x0025
+#define QMI_WLFW_REJUVENATE_ACK_RESP_V01 0x003A
+#define QMI_WLFW_MSA_INFO_RESP_V01 0x002D
+#define QMI_WLFW_MSA_READY_REQ_V01 0x002E
+#define QMI_WLFW_CAP_RESP_V01 0x0024
+#define QMI_WLFW_REJUVENATE_ACK_REQ_V01 0x003A
+#define QMI_WLFW_ATHDIAG_READ_RESP_V01 0x0030
+#define QMI_WLFW_VBATT_REQ_V01 0x0032
+#define QMI_WLFW_MAC_ADDR_REQ_V01 0x0033
+#define QMI_WLFW_RESPOND_MEM_RESP_V01 0x0036
+#define QMI_WLFW_VBATT_RESP_V01 0x0032
+#define QMI_WLFW_MSA_INFO_REQ_V01 0x002D
+#define QMI_WLFW_CAL_DOWNLOAD_REQ_V01 0x0027
+#define QMI_WLFW_ATHDIAG_READ_REQ_V01 0x0030
+#define QMI_WLFW_WLAN_CFG_REQ_V01 0x0023
+#define QMI_WLFW_IND_REGISTER_RESP_V01 0x0020
+
+#define QMI_WLFW_MAX_NUM_MEMORY_REGIONS_V01 2
+#define QMI_WLFW_MAX_NUM_CAL_V01 5
+#define QMI_WLFW_MAX_DATA_SIZE_V01 6144
+#define QMI_WLFW_FUNCTION_NAME_LEN_V01 128
+#define QMI_WLFW_MAX_NUM_CE_V01 12
+#define QMI_WLFW_MAX_TIMESTAMP_LEN_V01 32
+#define QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01 512
+#define QMI_WLFW_MAX_BUILD_ID_LEN_V01 128
+#define QMI_WLFW_MAX_STR_LEN_V01 16
+#define QMI_WLFW_MAX_NUM_SHADOW_REG_V01 24
+#define QMI_WLFW_MAC_ADDR_SIZE_V01 6
+#define QMI_WLFW_MAX_NUM_SHADOW_REG_V2_V01 36
+#define QMI_WLFW_MAX_NUM_SVC_V01 24
+
+enum wlfw_driver_mode_enum_v01 {
+ WLFW_DRIVER_MODE_ENUM_MIN_VAL_V01 = INT_MIN,
+ QMI_WLFW_MISSION_V01 = 0,
+ QMI_WLFW_FTM_V01 = 1,
+ QMI_WLFW_EPPING_V01 = 2,
+ QMI_WLFW_WALTEST_V01 = 3,
+ QMI_WLFW_OFF_V01 = 4,
+ QMI_WLFW_CCPM_V01 = 5,
+ QMI_WLFW_QVIT_V01 = 6,
+ QMI_WLFW_CALIBRATION_V01 = 7,
+ WLFW_DRIVER_MODE_ENUM_MAX_VAL_V01 = INT_MAX,
+};
+
+enum wlfw_cal_temp_id_enum_v01 {
+ WLFW_CAL_TEMP_ID_ENUM_MIN_VAL_V01 = INT_MIN,
+ QMI_WLFW_CAL_TEMP_IDX_0_V01 = 0,
+ QMI_WLFW_CAL_TEMP_IDX_1_V01 = 1,
+ QMI_WLFW_CAL_TEMP_IDX_2_V01 = 2,
+ QMI_WLFW_CAL_TEMP_IDX_3_V01 = 3,
+ QMI_WLFW_CAL_TEMP_IDX_4_V01 = 4,
+ WLFW_CAL_TEMP_ID_ENUM_MAX_VAL_V01 = INT_MAX,
+};
+
+enum wlfw_pipedir_enum_v01 {
+ WLFW_PIPEDIR_ENUM_MIN_VAL_V01 = INT_MIN,
+ QMI_WLFW_PIPEDIR_NONE_V01 = 0,
+ QMI_WLFW_PIPEDIR_IN_V01 = 1,
+ QMI_WLFW_PIPEDIR_OUT_V01 = 2,
+ QMI_WLFW_PIPEDIR_INOUT_V01 = 3,
+ WLFW_PIPEDIR_ENUM_MAX_VAL_V01 = INT_MAX,
+};
+
+#define QMI_WLFW_CE_ATTR_FLAGS_V01 ((u32)0x00)
+#define QMI_WLFW_CE_ATTR_NO_SNOOP_V01 ((u32)0x01)
+#define QMI_WLFW_CE_ATTR_BYTE_SWAP_DATA_V01 ((u32)0x02)
+#define QMI_WLFW_CE_ATTR_SWIZZLE_DESCRIPTORS_V01 ((u32)0x04)
+#define QMI_WLFW_CE_ATTR_DISABLE_INTR_V01 ((u32)0x08)
+#define QMI_WLFW_CE_ATTR_ENABLE_POLL_V01 ((u32)0x10)
+
+#define QMI_WLFW_ALREADY_REGISTERED_V01 ((u64)0x01ULL)
+#define QMI_WLFW_FW_READY_V01 ((u64)0x02ULL)
+#define QMI_WLFW_MSA_READY_V01 ((u64)0x04ULL)
+#define QMI_WLFW_FW_MEM_READY_V01 ((u64)0x08ULL)
+
+#define QMI_WLFW_FW_REJUVENATE_V01 ((u64)0x01ULL)
+
+struct wlfw_ce_tgt_pipe_cfg_s_v01 {
+ u32 pipe_num;
+ enum wlfw_pipedir_enum_v01 pipe_dir;
+ u32 nentries;
+ u32 nbytes_max;
+ u32 flags;
+};
+
+struct wlfw_ce_svc_pipe_cfg_s_v01 {
+ u32 service_id;
+ enum wlfw_pipedir_enum_v01 pipe_dir;
+ u32 pipe_num;
+};
+
+struct wlfw_shadow_reg_cfg_s_v01 {
+ u16 id;
+ u16 offset;
+};
+
+struct wlfw_shadow_reg_v2_cfg_s_v01 {
+ u32 addr;
+};
+
+struct wlfw_memory_region_info_s_v01 {
+ u64 region_addr;
+ u32 size;
+ u8 secure_flag;
+};
+
+struct wlfw_rf_chip_info_s_v01 {
+ u32 chip_id;
+ u32 chip_family;
+};
+
+struct wlfw_rf_board_info_s_v01 {
+ u32 board_id;
+};
+
+struct wlfw_soc_info_s_v01 {
+ u32 soc_id;
+};
+
+struct wlfw_fw_version_info_s_v01 {
+ u32 fw_version;
+ char fw_build_timestamp[QMI_WLFW_MAX_TIMESTAMP_LEN_V01 + 1];
+};
+
+struct wlfw_ind_register_req_msg_v01 {
+ u8 fw_ready_enable_valid;
+ u8 fw_ready_enable;
+ u8 initiate_cal_download_enable_valid;
+ u8 initiate_cal_download_enable;
+ u8 initiate_cal_update_enable_valid;
+ u8 initiate_cal_update_enable;
+ u8 msa_ready_enable_valid;
+ u8 msa_ready_enable;
+ u8 pin_connect_result_enable_valid;
+ u8 pin_connect_result_enable;
+ u8 client_id_valid;
+ u32 client_id;
+ u8 request_mem_enable_valid;
+ u8 request_mem_enable;
+ u8 fw_mem_ready_enable_valid;
+ u8 fw_mem_ready_enable;
+ u8 cold_boot_cal_done_enable_valid;
+ u8 cold_boot_cal_done_enable;
+ u8 rejuvenate_enable_valid;
+ u32 rejuvenate_enable;
+};
+
+#define WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN 46
+extern struct elem_info wlfw_ind_register_req_msg_v01_ei[];
+
+struct wlfw_ind_register_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u8 fw_status_valid;
+ u64 fw_status;
+};
+
+#define WLFW_IND_REGISTER_RESP_MSG_V01_MAX_MSG_LEN 18
+extern struct elem_info wlfw_ind_register_resp_msg_v01_ei[];
+
+struct wlfw_fw_ready_ind_msg_v01 {
+ char placeholder;
+};
+
+#define WLFW_FW_READY_IND_MSG_V01_MAX_MSG_LEN 0
+extern struct elem_info wlfw_fw_ready_ind_msg_v01_ei[];
+
+struct wlfw_msa_ready_ind_msg_v01 {
+ char placeholder;
+};
+
+#define WLFW_MSA_READY_IND_MSG_V01_MAX_MSG_LEN 0
+extern struct elem_info wlfw_msa_ready_ind_msg_v01_ei[];
+
+struct wlfw_pin_connect_result_ind_msg_v01 {
+ u8 pwr_pin_result_valid;
+ u32 pwr_pin_result;
+ u8 phy_io_pin_result_valid;
+ u32 phy_io_pin_result;
+ u8 rf_pin_result_valid;
+ u32 rf_pin_result;
+};
+
+#define WLFW_PIN_CONNECT_RESULT_IND_MSG_V01_MAX_MSG_LEN 21
+extern struct elem_info wlfw_pin_connect_result_ind_msg_v01_ei[];
+
+struct wlfw_wlan_mode_req_msg_v01 {
+ enum wlfw_driver_mode_enum_v01 mode;
+ u8 hw_debug_valid;
+ u8 hw_debug;
+};
+
+#define WLFW_WLAN_MODE_REQ_MSG_V01_MAX_MSG_LEN 11
+extern struct elem_info wlfw_wlan_mode_req_msg_v01_ei[];
+
+struct wlfw_wlan_mode_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_WLAN_MODE_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_wlan_mode_resp_msg_v01_ei[];
+
+struct wlfw_wlan_cfg_req_msg_v01 {
+ u8 host_version_valid;
+ char host_version[QMI_WLFW_MAX_STR_LEN_V01 + 1];
+ u8 tgt_cfg_valid;
+ u32 tgt_cfg_len;
+ struct wlfw_ce_tgt_pipe_cfg_s_v01 tgt_cfg[QMI_WLFW_MAX_NUM_CE_V01];
+ u8 svc_cfg_valid;
+ u32 svc_cfg_len;
+ struct wlfw_ce_svc_pipe_cfg_s_v01 svc_cfg[QMI_WLFW_MAX_NUM_SVC_V01];
+ u8 shadow_reg_valid;
+ u32 shadow_reg_len;
+ struct wlfw_shadow_reg_cfg_s_v01
+ shadow_reg[QMI_WLFW_MAX_NUM_SHADOW_REG_V01];
+ u8 shadow_reg_v2_valid;
+ u32 shadow_reg_v2_len;
+ struct wlfw_shadow_reg_v2_cfg_s_v01
+ shadow_reg_v2[QMI_WLFW_MAX_NUM_SHADOW_REG_V2_V01];
+};
+
+#define WLFW_WLAN_CFG_REQ_MSG_V01_MAX_MSG_LEN 803
+extern struct elem_info wlfw_wlan_cfg_req_msg_v01_ei[];
+
+struct wlfw_wlan_cfg_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_WLAN_CFG_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_wlan_cfg_resp_msg_v01_ei[];
+
+struct wlfw_cap_req_msg_v01 {
+ char placeholder;
+};
+
+#define WLFW_CAP_REQ_MSG_V01_MAX_MSG_LEN 0
+extern struct elem_info wlfw_cap_req_msg_v01_ei[];
+
+struct wlfw_cap_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u8 chip_info_valid;
+ struct wlfw_rf_chip_info_s_v01 chip_info;
+ u8 board_info_valid;
+ struct wlfw_rf_board_info_s_v01 board_info;
+ u8 soc_info_valid;
+ struct wlfw_soc_info_s_v01 soc_info;
+ u8 fw_version_info_valid;
+ struct wlfw_fw_version_info_s_v01 fw_version_info;
+ u8 fw_build_id_valid;
+ char fw_build_id[QMI_WLFW_MAX_BUILD_ID_LEN_V01 + 1];
+ u8 num_macs_valid;
+ u8 num_macs;
+};
+
+#define WLFW_CAP_RESP_MSG_V01_MAX_MSG_LEN 207
+extern struct elem_info wlfw_cap_resp_msg_v01_ei[];
+
+struct wlfw_bdf_download_req_msg_v01 {
+ u8 valid;
+ u8 file_id_valid;
+ enum wlfw_cal_temp_id_enum_v01 file_id;
+ u8 total_size_valid;
+ u32 total_size;
+ u8 seg_id_valid;
+ u32 seg_id;
+ u8 data_valid;
+ u32 data_len;
+ u8 data[QMI_WLFW_MAX_DATA_SIZE_V01];
+ u8 end_valid;
+ u8 end;
+ u8 bdf_type_valid;
+ u8 bdf_type;
+};
+
+#define WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN 6182
+extern struct elem_info wlfw_bdf_download_req_msg_v01_ei[];
+
+struct wlfw_bdf_download_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_BDF_DOWNLOAD_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_bdf_download_resp_msg_v01_ei[];
+
+struct wlfw_cal_report_req_msg_v01 {
+ u32 meta_data_len;
+ enum wlfw_cal_temp_id_enum_v01 meta_data[QMI_WLFW_MAX_NUM_CAL_V01];
+};
+
+#define WLFW_CAL_REPORT_REQ_MSG_V01_MAX_MSG_LEN 24
+extern struct elem_info wlfw_cal_report_req_msg_v01_ei[];
+
+struct wlfw_cal_report_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_CAL_REPORT_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_cal_report_resp_msg_v01_ei[];
+
+struct wlfw_initiate_cal_download_ind_msg_v01 {
+ enum wlfw_cal_temp_id_enum_v01 cal_id;
+};
+
+#define WLFW_INITIATE_CAL_DOWNLOAD_IND_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_initiate_cal_download_ind_msg_v01_ei[];
+
+struct wlfw_cal_download_req_msg_v01 {
+ u8 valid;
+ u8 file_id_valid;
+ enum wlfw_cal_temp_id_enum_v01 file_id;
+ u8 total_size_valid;
+ u32 total_size;
+ u8 seg_id_valid;
+ u32 seg_id;
+ u8 data_valid;
+ u32 data_len;
+ u8 data[QMI_WLFW_MAX_DATA_SIZE_V01];
+ u8 end_valid;
+ u8 end;
+};
+
+#define WLFW_CAL_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN 6178
+extern struct elem_info wlfw_cal_download_req_msg_v01_ei[];
+
+struct wlfw_cal_download_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_CAL_DOWNLOAD_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_cal_download_resp_msg_v01_ei[];
+
+struct wlfw_initiate_cal_update_ind_msg_v01 {
+ enum wlfw_cal_temp_id_enum_v01 cal_id;
+ u32 total_size;
+};
+
+#define WLFW_INITIATE_CAL_UPDATE_IND_MSG_V01_MAX_MSG_LEN 14
+extern struct elem_info wlfw_initiate_cal_update_ind_msg_v01_ei[];
+
+struct wlfw_cal_update_req_msg_v01 {
+ enum wlfw_cal_temp_id_enum_v01 cal_id;
+ u32 seg_id;
+};
+
+#define WLFW_CAL_UPDATE_REQ_MSG_V01_MAX_MSG_LEN 14
+extern struct elem_info wlfw_cal_update_req_msg_v01_ei[];
+
+struct wlfw_cal_update_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u8 file_id_valid;
+ enum wlfw_cal_temp_id_enum_v01 file_id;
+ u8 total_size_valid;
+ u32 total_size;
+ u8 seg_id_valid;
+ u32 seg_id;
+ u8 data_valid;
+ u32 data_len;
+ u8 data[QMI_WLFW_MAX_DATA_SIZE_V01];
+ u8 end_valid;
+ u8 end;
+};
+
+#define WLFW_CAL_UPDATE_RESP_MSG_V01_MAX_MSG_LEN 6181
+extern struct elem_info wlfw_cal_update_resp_msg_v01_ei[];
+
+struct wlfw_msa_info_req_msg_v01 {
+ u64 msa_addr;
+ u32 size;
+};
+
+#define WLFW_MSA_INFO_REQ_MSG_V01_MAX_MSG_LEN 18
+extern struct elem_info wlfw_msa_info_req_msg_v01_ei[];
+
+struct wlfw_msa_info_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u32 mem_region_info_len;
+ struct wlfw_memory_region_info_s_v01
+ mem_region_info[QMI_WLFW_MAX_NUM_MEMORY_REGIONS_V01];
+};
+
+#define WLFW_MSA_INFO_RESP_MSG_V01_MAX_MSG_LEN 37
+extern struct elem_info wlfw_msa_info_resp_msg_v01_ei[];
+
+struct wlfw_msa_ready_req_msg_v01 {
+ char placeholder;
+};
+
+#define WLFW_MSA_READY_REQ_MSG_V01_MAX_MSG_LEN 0
+extern struct elem_info wlfw_msa_ready_req_msg_v01_ei[];
+
+struct wlfw_msa_ready_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_MSA_READY_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_msa_ready_resp_msg_v01_ei[];
+
+struct wlfw_ini_req_msg_v01 {
+ u8 enablefwlog_valid;
+ u8 enablefwlog;
+};
+
+#define WLFW_INI_REQ_MSG_V01_MAX_MSG_LEN 4
+extern struct elem_info wlfw_ini_req_msg_v01_ei[];
+
+struct wlfw_ini_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_INI_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_ini_resp_msg_v01_ei[];
+
+struct wlfw_athdiag_read_req_msg_v01 {
+ u32 offset;
+ u32 mem_type;
+ u32 data_len;
+};
+
+#define WLFW_ATHDIAG_READ_REQ_MSG_V01_MAX_MSG_LEN 21
+extern struct elem_info wlfw_athdiag_read_req_msg_v01_ei[];
+
+struct wlfw_athdiag_read_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u8 data_valid;
+ u32 data_len;
+ u8 data[QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01];
+};
+
+#define WLFW_ATHDIAG_READ_RESP_MSG_V01_MAX_MSG_LEN 524
+extern struct elem_info wlfw_athdiag_read_resp_msg_v01_ei[];
+
+struct wlfw_athdiag_write_req_msg_v01 {
+ u32 offset;
+ u32 mem_type;
+ u32 data_len;
+ u8 data[QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01];
+};
+
+#define WLFW_ATHDIAG_WRITE_REQ_MSG_V01_MAX_MSG_LEN 531
+extern struct elem_info wlfw_athdiag_write_req_msg_v01_ei[];
+
+struct wlfw_athdiag_write_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_ATHDIAG_WRITE_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_athdiag_write_resp_msg_v01_ei[];
+
+struct wlfw_vbatt_req_msg_v01 {
+ u64 voltage_uv;
+};
+
+#define WLFW_VBATT_REQ_MSG_V01_MAX_MSG_LEN 11
+extern struct elem_info wlfw_vbatt_req_msg_v01_ei[];
+
+struct wlfw_vbatt_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_VBATT_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_vbatt_resp_msg_v01_ei[];
+
+struct wlfw_mac_addr_req_msg_v01 {
+ u8 mac_addr_valid;
+ u8 mac_addr[QMI_WLFW_MAC_ADDR_SIZE_V01];
+};
+
+#define WLFW_MAC_ADDR_REQ_MSG_V01_MAX_MSG_LEN 9
+extern struct elem_info wlfw_mac_addr_req_msg_v01_ei[];
+
+struct wlfw_mac_addr_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_MAC_ADDR_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_mac_addr_resp_msg_v01_ei[];
+
+struct wlfw_host_cap_req_msg_v01 {
+ u8 daemon_support_valid;
+ u8 daemon_support;
+};
+
+#define WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN 4
+extern struct elem_info wlfw_host_cap_req_msg_v01_ei[];
+
+struct wlfw_host_cap_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_HOST_CAP_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_host_cap_resp_msg_v01_ei[];
+
+struct wlfw_request_mem_ind_msg_v01 {
+ u32 size;
+};
+
+#define WLFW_REQUEST_MEM_IND_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_request_mem_ind_msg_v01_ei[];
+
+struct wlfw_respond_mem_req_msg_v01 {
+ u64 addr;
+ u32 size;
+};
+
+#define WLFW_RESPOND_MEM_REQ_MSG_V01_MAX_MSG_LEN 18
+extern struct elem_info wlfw_respond_mem_req_msg_v01_ei[];
+
+struct wlfw_respond_mem_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_RESPOND_MEM_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_respond_mem_resp_msg_v01_ei[];
+
+struct wlfw_fw_mem_ready_ind_msg_v01 {
+ char placeholder;
+};
+
+#define WLFW_FW_MEM_READY_IND_MSG_V01_MAX_MSG_LEN 0
+extern struct elem_info wlfw_fw_mem_ready_ind_msg_v01_ei[];
+
+struct wlfw_cold_boot_cal_done_ind_msg_v01 {
+ char placeholder;
+};
+
+#define WLFW_COLD_BOOT_CAL_DONE_IND_MSG_V01_MAX_MSG_LEN 0
+extern struct elem_info wlfw_cold_boot_cal_done_ind_msg_v01_ei[];
+
+struct wlfw_rejuvenate_ind_msg_v01 {
+ u8 cause_for_rejuvenation_valid;
+ u8 cause_for_rejuvenation;
+ u8 requesting_sub_system_valid;
+ u8 requesting_sub_system;
+ u8 line_number_valid;
+ u16 line_number;
+ u8 function_name_valid;
+ char function_name[QMI_WLFW_FUNCTION_NAME_LEN_V01 + 1];
+};
+
+#define WLFW_REJUVENATE_IND_MSG_V01_MAX_MSG_LEN 144
+extern struct elem_info wlfw_rejuvenate_ind_msg_v01_ei[];
+
+struct wlfw_rejuvenate_ack_req_msg_v01 {
+ char placeholder;
+};
+
+#define WLFW_REJUVENATE_ACK_REQ_MSG_V01_MAX_MSG_LEN 0
+extern struct elem_info wlfw_rejuvenate_ack_req_msg_v01_ei[];
+
+struct wlfw_rejuvenate_ack_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_REJUVENATE_ACK_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_rejuvenate_ack_resp_msg_v01_ei[];
+
+struct wlfw_dynamic_feature_mask_req_msg_v01 {
+ u8 mask_valid;
+ u64 mask;
+};
+
+#define WLFW_DYNAMIC_FEATURE_MASK_REQ_MSG_V01_MAX_MSG_LEN 11
+extern struct elem_info wlfw_dynamic_feature_mask_req_msg_v01_ei[];
+
+struct wlfw_dynamic_feature_mask_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u8 prev_mask_valid;
+ u64 prev_mask;
+ u8 curr_mask_valid;
+ u64 curr_mask;
+};
+
+#define WLFW_DYNAMIC_FEATURE_MASK_RESP_MSG_V01_MAX_MSG_LEN 29
+extern struct elem_info wlfw_dynamic_feature_mask_resp_msg_v01_ei[];
+
+struct wlfw_m3_info_req_msg_v01 {
+ u64 addr;
+ u32 size;
+};
+
+#define WLFW_M3_INFO_REQ_MSG_V01_MAX_MSG_LEN 18
+extern struct elem_info wlfw_m3_info_req_msg_v01_ei[];
+
+struct wlfw_m3_info_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_M3_INFO_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info wlfw_m3_info_resp_msg_v01_ei[];
+
+#endif
diff --git a/drivers/power/reset/msm-poweroff.c b/drivers/power/reset/msm-poweroff.c
index e17c65c1e4e7..209263ccced7 100644
--- a/drivers/power/reset/msm-poweroff.c
+++ b/drivers/power/reset/msm-poweroff.c
@@ -389,6 +389,7 @@ static void do_msm_restart(enum reboot_mode reboot_mode, const char *cmd)
msm_trigger_wdog_bite();
#endif
+ scm_disable_sdi();
halt_spmi_pmic_arbiter();
deassert_ps_hold();
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 5219df44cfec..ae8d475a9385 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -23,9 +23,13 @@ enum migrate_reason {
MR_SYSCALL, /* also applies to cpusets */
MR_MEMPOLICY_MBIND,
MR_NUMA_MISPLACED,
- MR_CMA
+ MR_CMA,
+ MR_TYPES
};
+/* In mm/debug.c; also keep sync with include/trace/events/migrate.h */
+extern char *migrate_reason_names[MR_TYPES];
+
#ifdef CONFIG_MIGRATION
extern void putback_movable_pages(struct list_head *l);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index e599eb35c10e..7d0b5e7bcadb 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -529,7 +529,6 @@ void put_page(struct page *page);
void put_pages_list(struct list_head *pages);
void split_page(struct page *page, unsigned int order);
-int split_free_page(struct page *page);
/*
* Compound pages have a destructor function. Provide a
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 9d1161a8d6b7..2b1be7efde55 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -71,6 +71,9 @@ enum {
*/
extern int *get_migratetype_fallbacks(int mtype);
+/* In mm/page_alloc.c; keep in sync also with show_migration_types() there */
+extern char * const migratetype_names[MIGRATE_TYPES];
+
#ifdef CONFIG_CMA
bool is_cma_pageblock(struct page *page);
# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h
index 17f118a82854..03f2a3e7d76d 100644
--- a/include/linux/page_ext.h
+++ b/include/linux/page_ext.h
@@ -3,6 +3,7 @@
#include <linux/types.h>
#include <linux/stacktrace.h>
+#include <linux/stackdepot.h>
struct pglist_data;
struct page_ext_operations {
@@ -44,8 +45,8 @@ struct page_ext {
#ifdef CONFIG_PAGE_OWNER
unsigned int order;
gfp_t gfp_mask;
- unsigned int nr_entries;
- unsigned long trace_entries[8];
+ int last_migrate_reason;
+ depot_stack_handle_t handle;
#endif
};
diff --git a/include/linux/page_idle.h b/include/linux/page_idle.h
index bf268fa92c5b..fec40271339f 100644
--- a/include/linux/page_idle.h
+++ b/include/linux/page_idle.h
@@ -46,33 +46,62 @@ extern struct page_ext_operations page_idle_ops;
static inline bool page_is_young(struct page *page)
{
- return test_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags);
+ struct page_ext *page_ext = lookup_page_ext(page);
+
+ if (unlikely(!page_ext))
+ return false;
+
+ return test_bit(PAGE_EXT_YOUNG, &page_ext->flags);
}
static inline void set_page_young(struct page *page)
{
- set_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags);
+ struct page_ext *page_ext = lookup_page_ext(page);
+
+ if (unlikely(!page_ext))
+ return;
+
+ set_bit(PAGE_EXT_YOUNG, &page_ext->flags);
}
static inline bool test_and_clear_page_young(struct page *page)
{
- return test_and_clear_bit(PAGE_EXT_YOUNG,
- &lookup_page_ext(page)->flags);
+ struct page_ext *page_ext = lookup_page_ext(page);
+
+ if (unlikely(!page_ext))
+ return false;
+
+ return test_and_clear_bit(PAGE_EXT_YOUNG, &page_ext->flags);
}
static inline bool page_is_idle(struct page *page)
{
- return test_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags);
+ struct page_ext *page_ext = lookup_page_ext(page);
+
+ if (unlikely(!page_ext))
+ return false;
+
+ return test_bit(PAGE_EXT_IDLE, &page_ext->flags);
}
static inline void set_page_idle(struct page *page)
{
- set_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags);
+ struct page_ext *page_ext = lookup_page_ext(page);
+
+ if (unlikely(!page_ext))
+ return;
+
+ set_bit(PAGE_EXT_IDLE, &page_ext->flags);
}
static inline void clear_page_idle(struct page *page)
{
- clear_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags);
+ struct page_ext *page_ext = lookup_page_ext(page);
+
+ if (unlikely(!page_ext))
+ return;
+
+ clear_bit(PAGE_EXT_IDLE, &page_ext->flags);
}
#endif /* CONFIG_64BIT */
diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h
index cacaabea8a09..30583ab0ffb1 100644
--- a/include/linux/page_owner.h
+++ b/include/linux/page_owner.h
@@ -1,38 +1,52 @@
#ifndef __LINUX_PAGE_OWNER_H
#define __LINUX_PAGE_OWNER_H
+#include <linux/jump_label.h>
+
#ifdef CONFIG_PAGE_OWNER
-extern bool page_owner_inited;
+extern struct static_key_false page_owner_inited;
extern struct page_ext_operations page_owner_ops;
extern void __reset_page_owner(struct page *page, unsigned int order);
extern void __set_page_owner(struct page *page,
unsigned int order, gfp_t gfp_mask);
-extern gfp_t __get_page_owner_gfp(struct page *page);
+extern void __split_page_owner(struct page *page, unsigned int order);
+extern void __copy_page_owner(struct page *oldpage, struct page *newpage);
+extern void __set_page_owner_migrate_reason(struct page *page, int reason);
+extern void __dump_page_owner(struct page *page);
static inline void reset_page_owner(struct page *page, unsigned int order)
{
- if (likely(!page_owner_inited))
- return;
-
- __reset_page_owner(page, order);
+ if (static_branch_unlikely(&page_owner_inited))
+ __reset_page_owner(page, order);
}
static inline void set_page_owner(struct page *page,
unsigned int order, gfp_t gfp_mask)
{
- if (likely(!page_owner_inited))
- return;
-
- __set_page_owner(page, order, gfp_mask);
+ if (static_branch_unlikely(&page_owner_inited))
+ __set_page_owner(page, order, gfp_mask);
}
-static inline gfp_t get_page_owner_gfp(struct page *page)
+static inline void split_page_owner(struct page *page, unsigned int order)
{
- if (likely(!page_owner_inited))
- return 0;
-
- return __get_page_owner_gfp(page);
+ if (static_branch_unlikely(&page_owner_inited))
+ __split_page_owner(page, order);
+}
+static inline void copy_page_owner(struct page *oldpage, struct page *newpage)
+{
+ if (static_branch_unlikely(&page_owner_inited))
+ __copy_page_owner(oldpage, newpage);
+}
+static inline void set_page_owner_migrate_reason(struct page *page, int reason)
+{
+ if (static_branch_unlikely(&page_owner_inited))
+ __set_page_owner_migrate_reason(page, reason);
+}
+static inline void dump_page_owner(struct page *page)
+{
+ if (static_branch_unlikely(&page_owner_inited))
+ __dump_page_owner(page);
}
#else
static inline void reset_page_owner(struct page *page, unsigned int order)
@@ -42,10 +56,18 @@ static inline void set_page_owner(struct page *page,
unsigned int order, gfp_t gfp_mask)
{
}
-static inline gfp_t get_page_owner_gfp(struct page *page)
+static inline void split_page_owner(struct page *page,
+ unsigned int order)
+{
+}
+static inline void copy_page_owner(struct page *oldpage, struct page *newpage)
+{
+}
+static inline void set_page_owner_migrate_reason(struct page *page, int reason)
+{
+}
+static inline void dump_page_owner(struct page *page)
{
- return 0;
}
-
#endif /* CONFIG_PAGE_OWNER */
#endif /* __LINUX_PAGE_OWNER_H */
diff --git a/include/linux/stackdepot.h b/include/linux/stackdepot.h
new file mode 100644
index 000000000000..7978b3e2c1e1
--- /dev/null
+++ b/include/linux/stackdepot.h
@@ -0,0 +1,32 @@
+/*
+ * A generic stack depot implementation
+ *
+ * Author: Alexander Potapenko <glider@google.com>
+ * Copyright (C) 2016 Google, Inc.
+ *
+ * Based on code by Dmitry Chernenkov.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_STACKDEPOT_H
+#define _LINUX_STACKDEPOT_H
+
+typedef u32 depot_stack_handle_t;
+
+struct stack_trace;
+
+depot_stack_handle_t depot_save_stack(struct stack_trace *trace, gfp_t flags);
+
+void depot_fetch_stack(depot_stack_handle_t handle, struct stack_trace *trace);
+
+#endif
diff --git a/include/net/cnss2.h b/include/net/cnss2.h
new file mode 100644
index 000000000000..5409e1b15a25
--- /dev/null
+++ b/include/net/cnss2.h
@@ -0,0 +1,192 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _NET_CNSS2_H
+#define _NET_CNSS2_H
+
+#include <linux/pci.h>
+
+#define CNSS_MAX_FILE_NAME 20
+#define CNSS_MAX_TIMESTAMP_LEN 32
+
+enum cnss_bus_width_type {
+ CNSS_BUS_WIDTH_NONE,
+ CNSS_BUS_WIDTH_LOW,
+ CNSS_BUS_WIDTH_MEDIUM,
+ CNSS_BUS_WIDTH_HIGH
+};
+
+enum cnss_platform_cap_flag {
+ CNSS_HAS_EXTERNAL_SWREG = 0x01,
+ CNSS_HAS_UART_ACCESS = 0x02,
+};
+
+struct cnss_platform_cap {
+ u32 cap_flag;
+};
+
+struct cnss_fw_files {
+ char image_file[CNSS_MAX_FILE_NAME];
+ char board_data[CNSS_MAX_FILE_NAME];
+ char otp_data[CNSS_MAX_FILE_NAME];
+ char utf_file[CNSS_MAX_FILE_NAME];
+ char utf_board_data[CNSS_MAX_FILE_NAME];
+ char epping_file[CNSS_MAX_FILE_NAME];
+ char evicted_data[CNSS_MAX_FILE_NAME];
+};
+
+struct cnss_soc_info {
+ void __iomem *va;
+ phys_addr_t pa;
+ uint32_t chip_id;
+ uint32_t chip_family;
+ uint32_t board_id;
+ uint32_t soc_id;
+ uint32_t fw_version;
+ char fw_build_timestamp[CNSS_MAX_TIMESTAMP_LEN + 1];
+};
+
+struct cnss_wlan_runtime_ops {
+ int (*runtime_suspend)(struct pci_dev *pdev);
+ int (*runtime_resume)(struct pci_dev *pdev);
+};
+
+struct cnss_wlan_driver {
+ char *name;
+ int (*probe)(struct pci_dev *pdev, const struct pci_device_id *id);
+ void (*remove)(struct pci_dev *pdev);
+ int (*reinit)(struct pci_dev *pdev, const struct pci_device_id *id);
+ void (*shutdown)(struct pci_dev *pdev);
+ void (*crash_shutdown)(struct pci_dev *pdev);
+ int (*suspend)(struct pci_dev *pdev, pm_message_t state);
+ int (*resume)(struct pci_dev *pdev);
+ int (*suspend_noirq)(struct pci_dev *pdev);
+ int (*resume_noirq)(struct pci_dev *pdev);
+ void (*modem_status)(struct pci_dev *, int state);
+ void (*update_status)(struct pci_dev *pdev, uint32_t status);
+ struct cnss_wlan_runtime_ops *runtime_ops;
+ const struct pci_device_id *id_table;
+};
+
+enum cnss_driver_status {
+ CNSS_UNINITIALIZED,
+ CNSS_INITIALIZED,
+ CNSS_LOAD_UNLOAD,
+ CNSS_RECOVERY,
+};
+
+struct cnss_ce_tgt_pipe_cfg {
+ u32 pipe_num;
+ u32 pipe_dir;
+ u32 nentries;
+ u32 nbytes_max;
+ u32 flags;
+ u32 reserved;
+};
+
+struct cnss_ce_svc_pipe_cfg {
+ u32 service_id;
+ u32 pipe_dir;
+ u32 pipe_num;
+};
+
+struct cnss_shadow_reg_cfg {
+ u16 ce_id;
+ u16 reg_offset;
+};
+
+struct cnss_shadow_reg_v2_cfg {
+ u32 addr;
+};
+
+struct cnss_wlan_enable_cfg {
+ u32 num_ce_tgt_cfg;
+ struct cnss_ce_tgt_pipe_cfg *ce_tgt_cfg;
+ u32 num_ce_svc_pipe_cfg;
+ struct cnss_ce_svc_pipe_cfg *ce_svc_cfg;
+ u32 num_shadow_reg_cfg;
+ struct cnss_shadow_reg_cfg *shadow_reg_cfg;
+ u32 num_shadow_reg_v2_cfg;
+ struct cnss_shadow_reg_v2_cfg *shadow_reg_v2_cfg;
+};
+
+enum cnss_driver_mode {
+ CNSS_MISSION,
+ CNSS_FTM,
+ CNSS_EPPING,
+ CNSS_WALTEST,
+ CNSS_OFF,
+ CNSS_CCPM,
+ CNSS_QVIT,
+ CNSS_CALIBRATION,
+};
+
+enum cnss_recovery_reason {
+ CNSS_REASON_DEFAULT,
+ CNSS_REASON_LINK_DOWN,
+ CNSS_REASON_RDDM,
+ CNSS_REASON_TIMEOUT,
+};
+
+extern int cnss_wlan_register_driver(struct cnss_wlan_driver *driver);
+extern void cnss_wlan_unregister_driver(struct cnss_wlan_driver *driver);
+extern void cnss_device_crashed(void);
+extern int cnss_pci_link_down(struct device *dev);
+extern void cnss_schedule_recovery(struct device *dev,
+ enum cnss_recovery_reason reason);
+extern int cnss_self_recovery(struct device *dev,
+ enum cnss_recovery_reason reason);
+extern int cnss_force_fw_assert(struct device *dev);
+extern void *cnss_get_virt_ramdump_mem(unsigned long *size);
+extern int cnss_get_fw_files_for_target(struct cnss_fw_files *pfw_files,
+ u32 target_type, u32 target_version);
+extern int cnss_get_platform_cap(struct cnss_platform_cap *cap);
+extern int cnss_get_soc_info(struct device *dev, struct cnss_soc_info *info);
+extern void cnss_set_driver_status(enum cnss_driver_status driver_status);
+extern int cnss_request_bus_bandwidth(int bandwidth);
+extern int cnss_set_wlan_unsafe_channel(u16 *unsafe_ch_list, u16 ch_count);
+extern int cnss_get_wlan_unsafe_channel(u16 *unsafe_ch_list, u16 *ch_count,
+ u16 buf_len);
+extern int cnss_wlan_set_dfs_nol(const void *info, u16 info_len);
+extern int cnss_wlan_get_dfs_nol(void *info, u16 info_len);
+extern int cnss_power_up(struct device *dev);
+extern int cnss_power_down(struct device *dev);
+extern u8 *cnss_common_get_wlan_mac_address(struct device *dev, uint32_t *num);
+extern void cnss_request_pm_qos(u32 qos_val);
+extern void cnss_remove_pm_qos(void);
+extern void cnss_lock_pm_sem(void);
+extern void cnss_release_pm_sem(void);
+extern int cnss_wlan_pm_control(bool vote);
+extern int cnss_auto_suspend(void);
+extern int cnss_auto_resume(void);
+extern int cnss_get_user_msi_assignment(struct device *dev, char *user_name,
+ int *num_vectors,
+ uint32_t *user_base_data,
+ uint32_t *base_vector);
+extern int cnss_get_msi_irq(struct device *dev, unsigned int vector);
+extern void cnss_get_msi_address(struct device *dev, uint32_t *msi_addr_low,
+ uint32_t *msi_addr_high);
+extern int cnss_wlan_enable(struct device *dev,
+ struct cnss_wlan_enable_cfg *config,
+ enum cnss_driver_mode mode,
+ const char *host_version);
+extern int cnss_wlan_disable(struct device *dev, enum cnss_driver_mode mode);
+extern unsigned int cnss_get_qmi_timeout(void);
+extern int cnss_athdiag_read(struct device *dev, uint32_t offset,
+ uint32_t mem_type, uint32_t data_len,
+ uint8_t *output);
+extern int cnss_athdiag_write(struct device *dev, uint32_t offset,
+ uint32_t mem_type, uint32_t data_len,
+ uint8_t *input);
+extern int cnss_set_fw_log_mode(struct device *dev, uint8_t fw_log_mode);
+
+#endif /* _NET_CNSS2_H */
diff --git a/lib/Kconfig b/lib/Kconfig
index 7f6feb7a4687..4bb61d2e6746 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -547,4 +547,8 @@ config QMI_ENCDEC_DEBUG
library. This will log the information regarding the element
and message being encoded & decoded.
+config STACKDEPOT
+ bool
+ select STACKTRACE
+
endmenu
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 20b74735508d..6da96c4f98a5 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -244,6 +244,7 @@ config PAGE_OWNER
depends on DEBUG_KERNEL && STACKTRACE_SUPPORT
select DEBUG_FS
select STACKTRACE
+ select STACKDEPOT
select PAGE_EXTENSION
help
This keeps track of what call chain is the owner of a page, may
diff --git a/lib/Makefile b/lib/Makefile
index 03a447a234cc..3ea641eb91bc 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -167,6 +167,8 @@ obj-$(CONFIG_GENERIC_NET_UTILS) += net_utils.o
obj-$(CONFIG_SG_SPLIT) += sg_split.o
obj-$(CONFIG_STMP_DEVICE) += stmp_device.o
+obj-$(CONFIG_STACKDEPOT) += stackdepot.o
+
libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o \
fdt_empty_tree.o
$(foreach file, $(libfdt_files), \
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
new file mode 100644
index 000000000000..192134b225ca
--- /dev/null
+++ b/lib/stackdepot.c
@@ -0,0 +1,284 @@
+/*
+ * Generic stack depot for storing stack traces.
+ *
+ * Some debugging tools need to save stack traces of certain events which can
+ * be later presented to the user. For example, KASAN needs to safe alloc and
+ * free stacks for each object, but storing two stack traces per object
+ * requires too much memory (e.g. SLUB_DEBUG needs 256 bytes per object for
+ * that).
+ *
+ * Instead, stack depot maintains a hashtable of unique stacktraces. Since alloc
+ * and free stacks repeat a lot, we save about 100x space.
+ * Stacks are never removed from depot, so we store them contiguously one after
+ * another in a contiguos memory allocation.
+ *
+ * Author: Alexander Potapenko <glider@google.com>
+ * Copyright (C) 2016 Google, Inc.
+ *
+ * Based on code by Dmitry Chernenkov.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ */
+
+#include <linux/gfp.h>
+#include <linux/jhash.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/percpu.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include <linux/stacktrace.h>
+#include <linux/stackdepot.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#define DEPOT_STACK_BITS (sizeof(depot_stack_handle_t) * 8)
+
+#define STACK_ALLOC_NULL_PROTECTION_BITS 1
+#define STACK_ALLOC_ORDER 2 /* 'Slab' size order for stack depot, 4 pages */
+#define STACK_ALLOC_SIZE (1LL << (PAGE_SHIFT + STACK_ALLOC_ORDER))
+#define STACK_ALLOC_ALIGN 4
+#define STACK_ALLOC_OFFSET_BITS (STACK_ALLOC_ORDER + PAGE_SHIFT - \
+ STACK_ALLOC_ALIGN)
+#define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - \
+ STACK_ALLOC_NULL_PROTECTION_BITS - STACK_ALLOC_OFFSET_BITS)
+#define STACK_ALLOC_SLABS_CAP 1024
+#define STACK_ALLOC_MAX_SLABS \
+ (((1LL << (STACK_ALLOC_INDEX_BITS)) < STACK_ALLOC_SLABS_CAP) ? \
+ (1LL << (STACK_ALLOC_INDEX_BITS)) : STACK_ALLOC_SLABS_CAP)
+
+/* The compact structure to store the reference to stacks. */
+union handle_parts {
+ depot_stack_handle_t handle;
+ struct {
+ u32 slabindex : STACK_ALLOC_INDEX_BITS;
+ u32 offset : STACK_ALLOC_OFFSET_BITS;
+ u32 valid : STACK_ALLOC_NULL_PROTECTION_BITS;
+ };
+};
+
+struct stack_record {
+ struct stack_record *next; /* Link in the hashtable */
+ u32 hash; /* Hash in the hastable */
+ u32 size; /* Number of frames in the stack */
+ union handle_parts handle;
+ unsigned long entries[1]; /* Variable-sized array of entries. */
+};
+
+static void *stack_slabs[STACK_ALLOC_MAX_SLABS];
+
+static int depot_index;
+static int next_slab_inited;
+static size_t depot_offset;
+static DEFINE_SPINLOCK(depot_lock);
+
+static bool init_stack_slab(void **prealloc)
+{
+ if (!*prealloc)
+ return false;
+ /*
+ * This smp_load_acquire() pairs with smp_store_release() to
+ * |next_slab_inited| below and in depot_alloc_stack().
+ */
+ if (smp_load_acquire(&next_slab_inited))
+ return true;
+ if (stack_slabs[depot_index] == NULL) {
+ stack_slabs[depot_index] = *prealloc;
+ } else {
+ stack_slabs[depot_index + 1] = *prealloc;
+ /*
+ * This smp_store_release pairs with smp_load_acquire() from
+ * |next_slab_inited| above and in depot_save_stack().
+ */
+ smp_store_release(&next_slab_inited, 1);
+ }
+ *prealloc = NULL;
+ return true;
+}
+
+/* Allocation of a new stack in raw storage */
+static struct stack_record *depot_alloc_stack(unsigned long *entries, int size,
+ u32 hash, void **prealloc, gfp_t alloc_flags)
+{
+ int required_size = offsetof(struct stack_record, entries) +
+ sizeof(unsigned long) * size;
+ struct stack_record *stack;
+
+ required_size = ALIGN(required_size, 1 << STACK_ALLOC_ALIGN);
+
+ if (unlikely(depot_offset + required_size > STACK_ALLOC_SIZE)) {
+ if (unlikely(depot_index + 1 >= STACK_ALLOC_MAX_SLABS)) {
+ WARN_ONCE(1, "Stack depot reached limit capacity");
+ return NULL;
+ }
+ depot_index++;
+ depot_offset = 0;
+ /*
+ * smp_store_release() here pairs with smp_load_acquire() from
+ * |next_slab_inited| in depot_save_stack() and
+ * init_stack_slab().
+ */
+ if (depot_index + 1 < STACK_ALLOC_MAX_SLABS)
+ smp_store_release(&next_slab_inited, 0);
+ }
+ init_stack_slab(prealloc);
+ if (stack_slabs[depot_index] == NULL)
+ return NULL;
+
+ stack = stack_slabs[depot_index] + depot_offset;
+
+ stack->hash = hash;
+ stack->size = size;
+ stack->handle.slabindex = depot_index;
+ stack->handle.offset = depot_offset >> STACK_ALLOC_ALIGN;
+ stack->handle.valid = 1;
+ memcpy(stack->entries, entries, size * sizeof(unsigned long));
+ depot_offset += required_size;
+
+ return stack;
+}
+
+#define STACK_HASH_ORDER 18
+#define STACK_HASH_SIZE (1L << STACK_HASH_ORDER)
+#define STACK_HASH_MASK (STACK_HASH_SIZE - 1)
+#define STACK_HASH_SEED 0x9747b28c
+
+static struct stack_record *stack_table[STACK_HASH_SIZE] = {
+ [0 ... STACK_HASH_SIZE - 1] = NULL
+};
+
+/* Calculate hash for a stack */
+static inline u32 hash_stack(unsigned long *entries, unsigned int size)
+{
+ return jhash2((u32 *)entries,
+ size * sizeof(unsigned long) / sizeof(u32),
+ STACK_HASH_SEED);
+}
+
+/* Find a stack that is equal to the one stored in entries in the hash */
+static inline struct stack_record *find_stack(struct stack_record *bucket,
+ unsigned long *entries, int size,
+ u32 hash)
+{
+ struct stack_record *found;
+
+ for (found = bucket; found; found = found->next) {
+ if (found->hash == hash &&
+ found->size == size &&
+ !memcmp(entries, found->entries,
+ size * sizeof(unsigned long))) {
+ return found;
+ }
+ }
+ return NULL;
+}
+
+void depot_fetch_stack(depot_stack_handle_t handle, struct stack_trace *trace)
+{
+ union handle_parts parts = { .handle = handle };
+ void *slab = stack_slabs[parts.slabindex];
+ size_t offset = parts.offset << STACK_ALLOC_ALIGN;
+ struct stack_record *stack = slab + offset;
+
+ trace->nr_entries = trace->max_entries = stack->size;
+ trace->entries = stack->entries;
+ trace->skip = 0;
+}
+
+/**
+ * depot_save_stack - save stack in a stack depot.
+ * @trace - the stacktrace to save.
+ * @alloc_flags - flags for allocating additional memory if required.
+ *
+ * Returns the handle of the stack struct stored in depot.
+ */
+depot_stack_handle_t depot_save_stack(struct stack_trace *trace,
+ gfp_t alloc_flags)
+{
+ u32 hash;
+ depot_stack_handle_t retval = 0;
+ struct stack_record *found = NULL, **bucket;
+ unsigned long flags;
+ struct page *page = NULL;
+ void *prealloc = NULL;
+
+ if (unlikely(trace->nr_entries == 0))
+ goto fast_exit;
+
+ hash = hash_stack(trace->entries, trace->nr_entries);
+ bucket = &stack_table[hash & STACK_HASH_MASK];
+
+ /*
+ * Fast path: look the stack trace up without locking.
+ * The smp_load_acquire() here pairs with smp_store_release() to
+ * |bucket| below.
+ */
+ found = find_stack(smp_load_acquire(bucket), trace->entries,
+ trace->nr_entries, hash);
+ if (found)
+ goto exit;
+
+ /*
+ * Check if the current or the next stack slab need to be initialized.
+ * If so, allocate the memory - we won't be able to do that under the
+ * lock.
+ *
+ * The smp_load_acquire() here pairs with smp_store_release() to
+ * |next_slab_inited| in depot_alloc_stack() and init_stack_slab().
+ */
+ if (unlikely(!smp_load_acquire(&next_slab_inited))) {
+ /*
+ * Zero out zone modifiers, as we don't have specific zone
+ * requirements. Keep the flags related to allocation in atomic
+ * contexts and I/O.
+ */
+ alloc_flags &= ~GFP_ZONEMASK;
+ alloc_flags &= (GFP_ATOMIC | GFP_KERNEL);
+ page = alloc_pages(alloc_flags, STACK_ALLOC_ORDER);
+ if (page)
+ prealloc = page_address(page);
+ }
+
+ spin_lock_irqsave(&depot_lock, flags);
+
+ found = find_stack(*bucket, trace->entries, trace->nr_entries, hash);
+ if (!found) {
+ struct stack_record *new =
+ depot_alloc_stack(trace->entries, trace->nr_entries,
+ hash, &prealloc, alloc_flags);
+ if (new) {
+ new->next = *bucket;
+ /*
+ * This smp_store_release() pairs with
+ * smp_load_acquire() from |bucket| above.
+ */
+ smp_store_release(bucket, new);
+ found = new;
+ }
+ } else if (prealloc) {
+ /*
+ * We didn't need to store this stack trace, but let's keep
+ * the preallocated memory for the future.
+ */
+ WARN_ON(!init_stack_slab(&prealloc));
+ }
+
+ spin_unlock_irqrestore(&depot_lock, flags);
+exit:
+ if (prealloc) {
+ /* Nobody used this memory, ok to free it. */
+ free_pages((unsigned long)prealloc, STACK_ALLOC_ORDER);
+ }
+ if (found)
+ retval = found->handle.handle;
+fast_exit:
+ return retval;
+}
diff --git a/mm/compaction.c b/mm/compaction.c
index 87ed50835881..f96a58e1843a 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -19,6 +19,7 @@
#include <linux/kasan.h>
#include <linux/kthread.h>
#include <linux/freezer.h>
+#include <linux/page_owner.h>
#include "internal.h"
#ifdef CONFIG_COMPACTION
@@ -59,13 +60,27 @@ static unsigned long release_freepages(struct list_head *freelist)
static void map_pages(struct list_head *list)
{
- struct page *page;
+ unsigned int i, order, nr_pages;
+ struct page *page, *next;
+ LIST_HEAD(tmp_list);
+
+ list_for_each_entry_safe(page, next, list, lru) {
+ list_del(&page->lru);
- list_for_each_entry(page, list, lru) {
- kasan_alloc_pages(page, 0);
- arch_alloc_page(page, 0);
- kernel_map_pages(page, 1, 1);
+ order = page_private(page);
+ nr_pages = 1 << order;
+
+ post_alloc_hook(page, order, __GFP_MOVABLE);
+ if (order)
+ split_page(page, order);
+
+ for (i = 0; i < nr_pages; i++) {
+ list_add(&page->lru, &tmp_list);
+ page++;
+ }
}
+
+ list_splice(&tmp_list, list);
}
static inline bool migrate_async_suitable(int migratetype)
@@ -442,12 +457,13 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
unsigned long flags = 0;
bool locked = false;
unsigned long blockpfn = *start_pfn;
+ unsigned int order;
cursor = pfn_to_page(blockpfn);
/* Isolate free pages. */
for (; blockpfn < end_pfn; blockpfn++, cursor++) {
- int isolated, i;
+ int isolated;
struct page *page = cursor;
/*
@@ -513,17 +529,17 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
goto isolate_fail;
}
- /* Found a free page, break it into order-0 pages */
- isolated = split_free_page(page);
+ /* Found a free page, will break it into order-0 pages */
+ order = page_order(page);
+ isolated = __isolate_free_page(page, order);
if (!isolated)
break;
+ set_page_private(page, order);
total_isolated += isolated;
cc->nr_freepages += isolated;
- for (i = 0; i < isolated; i++) {
- list_add(&page->lru, freelist);
- page++;
- }
+ list_add_tail(&page->lru, freelist);
+
if (!strict && cc->nr_migratepages <= cc->nr_freepages) {
blockpfn += isolated;
break;
@@ -636,7 +652,7 @@ isolate_freepages_range(struct compact_control *cc,
*/
}
- /* split_free_page does not map the pages */
+ /* __isolate_free_page() does not map the pages */
map_pages(&freelist);
if (pfn < end_pfn) {
@@ -1085,7 +1101,7 @@ static void isolate_freepages(struct compact_control *cc)
}
}
- /* split_free_page does not map the pages */
+ /* __isolate_free_page() does not map the pages */
map_pages(freelist);
/*
diff --git a/mm/debug.c b/mm/debug.c
index 5cf26f8c69a3..3621385c09ac 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -9,6 +9,18 @@
#include <linux/mm.h>
#include <linux/trace_events.h>
#include <linux/memcontrol.h>
+#include <linux/migrate.h>
+#include <linux/page_owner.h>
+
+char *migrate_reason_names[MR_TYPES] = {
+ "compaction",
+ "memory_failure",
+ "memory_hotplug",
+ "syscall_or_cpuset",
+ "mempolicy_mbind",
+ "numa_misplaced",
+ "cma",
+};
static const struct trace_print_flags pageflag_names[] = {
{1UL << PG_locked, "locked" },
@@ -106,6 +118,7 @@ void dump_page_badflags(struct page *page, const char *reason,
void dump_page(struct page *page, const char *reason)
{
dump_page_badflags(page, reason, 0);
+ dump_page_owner(page);
}
EXPORT_SYMBOL(dump_page);
diff --git a/mm/internal.h b/mm/internal.h
index 7b9e313d9dea..46d27f378885 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -182,6 +182,8 @@ extern void prep_compound_page(struct page *page, unsigned int order);
#ifdef CONFIG_MEMORY_FAILURE
extern bool is_free_buddy_page(struct page *page);
#endif
+extern void post_alloc_hook(struct page *page, unsigned int order,
+ gfp_t gfp_flags);
extern int user_min_free_kbytes;
#if defined CONFIG_COMPACTION || defined CONFIG_CMA
diff --git a/mm/migrate.c b/mm/migrate.c
index 9a5ccfc71afc..85af2816b6d2 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -39,6 +39,7 @@
#include <linux/balloon_compaction.h>
#include <linux/mmu_notifier.h>
#include <linux/page_idle.h>
+#include <linux/page_owner.h>
#include <asm/tlbflush.h>
@@ -668,6 +669,8 @@ void migrate_page_copy(struct page *newpage, struct page *page)
*/
if (PageWriteback(newpage))
end_page_writeback(newpage);
+
+ copy_page_owner(page, newpage);
}
EXPORT_SYMBOL(migrate_page_copy);
@@ -1097,6 +1100,9 @@ static ICE_noinline int unmap_and_move(new_page_t get_new_page,
goto out;
rc = __unmap_and_move(page, newpage, force, mode);
+ if (rc == MIGRATEPAGE_SUCCESS) {
+ set_page_owner_migrate_reason(newpage, reason);
+ }
out:
if (rc != -EAGAIN) {
@@ -1179,7 +1185,7 @@ put_new:
static int unmap_and_move_huge_page(new_page_t get_new_page,
free_page_t put_new_page, unsigned long private,
struct page *hpage, int force,
- enum migrate_mode mode)
+ enum migrate_mode mode, int reason)
{
int rc = -EAGAIN;
int *result = NULL;
@@ -1237,6 +1243,7 @@ put_anon:
if (rc == MIGRATEPAGE_SUCCESS) {
hugetlb_cgroup_migrate(hpage, new_hpage);
put_new_page = NULL;
+ set_page_owner_migrate_reason(new_hpage, reason);
}
unlock_page(hpage);
@@ -1311,7 +1318,7 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
if (PageHuge(page))
rc = unmap_and_move_huge_page(get_new_page,
put_new_page, private, page,
- pass > 2, mode);
+ pass > 2, mode, reason);
else
rc = unmap_and_move(get_new_page, put_new_page,
private, page, pass > 2, mode,
@@ -2001,6 +2008,7 @@ fail_putback:
set_page_memcg(new_page, page_memcg(page));
set_page_memcg(page, NULL);
page_remove_rmap(page);
+ set_page_owner_migrate_reason(new_page, MR_NUMA_MISPLACED);
spin_unlock(ptl);
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 4fbb23c1cba7..6759192e69de 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -223,6 +223,20 @@ static char * const zone_names[MAX_NR_ZONES] = {
};
static void free_compound_page(struct page *page);
+
+char * const migratetype_names[MIGRATE_TYPES] = {
+ "Unmovable",
+ "Movable",
+ "Reclaimable",
+#ifdef CONFIG_CMA
+ "CMA",
+#endif
+ "HighAtomic",
+#ifdef CONFIG_MEMORY_ISOLATION
+ "Isolate",
+#endif
+};
+
compound_page_dtor * const compound_page_dtors[] = {
NULL,
free_compound_page,
@@ -459,6 +473,7 @@ static void bad_page(struct page *page, const char *reason,
printk(KERN_ALERT "BUG: Bad page state in process %s pfn:%05lx\n",
current->comm, page_to_pfn(page));
dump_page_badflags(page, reason, bad_flags);
+ dump_page_owner(page);
print_modules();
dump_stack();
@@ -569,6 +584,9 @@ static inline void set_page_guard(struct zone *zone, struct page *page,
return;
page_ext = lookup_page_ext(page);
+ if (unlikely(!page_ext))
+ return;
+
__set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
INIT_LIST_HEAD(&page->lru);
@@ -586,6 +604,9 @@ static inline void clear_page_guard(struct zone *zone, struct page *page,
return;
page_ext = lookup_page_ext(page);
+ if (unlikely(!page_ext))
+ return;
+
__clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
set_page_private(page, 0);
@@ -1422,8 +1443,21 @@ static inline bool free_pages_prezeroed(void)
page_poisoning_enabled();
}
+inline void post_alloc_hook(struct page *page, unsigned int order,
+ gfp_t gfp_flags)
+{
+ set_page_private(page, 0);
+ set_page_refcounted(page);
+
+ kasan_alloc_pages(page, order);
+ arch_alloc_page(page, order);
+ kernel_map_pages(page, 1 << order, 1);
+ kernel_poison_pages(page, 1 << order, 1);
+ set_page_owner(page, order, gfp_flags);
+}
+
static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
- int alloc_flags)
+ int alloc_flags)
{
int i;
@@ -1433,13 +1467,7 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
return 1;
}
- set_page_private(page, 0);
- set_page_refcounted(page);
-
- kasan_alloc_pages(page, order);
- arch_alloc_page(page, order);
- kernel_map_pages(page, 1 << order, 1);
- kernel_poison_pages(page, 1 << order, 1);
+ post_alloc_hook(page, order, gfp_flags);
if (!free_pages_prezeroed() && (gfp_flags & __GFP_ZERO))
for (i = 0; i < (1 << order); i++)
@@ -1448,8 +1476,6 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
if (order && (gfp_flags & __GFP_COMP))
prep_compound_page(page, order);
- set_page_owner(page, order, gfp_flags);
-
/*
* page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
* allocate the page. The expectation is that the caller is taking
@@ -2217,7 +2243,6 @@ void free_hot_cold_page_list(struct list_head *list, bool cold)
void split_page(struct page *page, unsigned int order)
{
int i;
- gfp_t gfp_mask;
VM_BUG_ON_PAGE(PageCompound(page), page);
VM_BUG_ON_PAGE(!page_count(page), page);
@@ -2231,12 +2256,9 @@ void split_page(struct page *page, unsigned int order)
split_page(virt_to_page(page[0].shadow), order);
#endif
- gfp_mask = get_page_owner_gfp(page);
- set_page_owner(page, 0, gfp_mask);
- for (i = 1; i < (1 << order); i++) {
+ for (i = 1; i < (1 << order); i++)
set_page_refcounted(page + i);
- set_page_owner(page + i, 0, gfp_mask);
- }
+ split_page_owner(page, order);
}
EXPORT_SYMBOL_GPL(split_page);
@@ -2266,8 +2288,6 @@ int __isolate_free_page(struct page *page, unsigned int order)
zone->free_area[order].nr_free--;
rmv_page_order(page);
- set_page_owner(page, order, __GFP_MOVABLE);
-
/* Set the pageblock if the isolated page is at least a pageblock */
if (order >= pageblock_order - 1) {
struct page *endpage = page + (1 << order) - 1;
@@ -2285,33 +2305,6 @@ int __isolate_free_page(struct page *page, unsigned int order)
}
/*
- * Similar to split_page except the page is already free. As this is only
- * being used for migration, the migratetype of the block also changes.
- * As this is called with interrupts disabled, the caller is responsible
- * for calling arch_alloc_page() and kernel_map_page() after interrupts
- * are enabled.
- *
- * Note: this is probably too low level an operation for use in drivers.
- * Please consult with lkml before using this in your driver.
- */
-int split_free_page(struct page *page)
-{
- unsigned int order;
- int nr_pages;
-
- order = page_order(page);
-
- nr_pages = __isolate_free_page(page, order);
- if (!nr_pages)
- return 0;
-
- /* Split into individual pages */
- set_page_refcounted(page);
- split_page(page, order);
- return nr_pages;
-}
-
-/*
* Allocate a page from the given zone. Use pcplists for order-0 allocations.
*/
static inline
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 3ecd3807c2c2..efb6c3c38c01 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -8,6 +8,7 @@
#include <linux/memory.h>
#include <linux/hugetlb.h>
#include <linux/kasan.h>
+#include <linux/page_owner.h>
#include "internal.h"
static int set_migratetype_isolate(struct page *page,
@@ -106,10 +107,6 @@ static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
if (pfn_valid_within(page_to_pfn(buddy)) &&
!is_migrate_isolate_page(buddy)) {
__isolate_free_page(page, order);
- kasan_alloc_pages(page, order);
- arch_alloc_page(page, order);
- kernel_map_pages(page, (1 << order), 1);
- set_page_refcounted(page);
isolated_page = page;
}
}
@@ -128,8 +125,10 @@ static void unset_migratetype_isolate(struct page *page, unsigned migratetype)
zone->nr_isolate_pageblock--;
out:
spin_unlock_irqrestore(&zone->lock, flags);
- if (isolated_page)
+ if (isolated_page) {
+ post_alloc_hook(page, order, __GFP_MOVABLE);
__free_pages(isolated_page, order);
+ }
}
static inline struct page *
diff --git a/mm/page_owner.c b/mm/page_owner.c
index 45cccaa1ce32..3a9a358e7c63 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -5,11 +5,24 @@
#include <linux/bootmem.h>
#include <linux/stacktrace.h>
#include <linux/page_owner.h>
+#include <linux/jump_label.h>
+#include <linux/migrate.h>
+#include <linux/stackdepot.h>
+
#include "internal.h"
+/*
+ * TODO: teach PAGE_OWNER_STACK_DEPTH (__dump_page_owner and save_stack)
+ * to use off stack temporal storage
+ */
+#define PAGE_OWNER_STACK_DEPTH (16)
+
static bool page_owner_disabled =
!IS_ENABLED(CONFIG_PAGE_OWNER_ENABLE_DEFAULT);
-bool page_owner_inited __read_mostly;
+DEFINE_STATIC_KEY_FALSE(page_owner_inited);
+
+static depot_stack_handle_t dummy_handle;
+static depot_stack_handle_t failure_handle;
static void init_early_allocated_pages(void);
@@ -36,12 +49,42 @@ static bool need_page_owner(void)
return true;
}
+static noinline void register_dummy_stack(void)
+{
+ unsigned long entries[4];
+ struct stack_trace dummy;
+
+ dummy.nr_entries = 0;
+ dummy.max_entries = ARRAY_SIZE(entries);
+ dummy.entries = &entries[0];
+ dummy.skip = 0;
+
+ save_stack_trace(&dummy);
+ dummy_handle = depot_save_stack(&dummy, GFP_KERNEL);
+}
+
+static noinline void register_failure_stack(void)
+{
+ unsigned long entries[4];
+ struct stack_trace failure;
+
+ failure.nr_entries = 0;
+ failure.max_entries = ARRAY_SIZE(entries);
+ failure.entries = &entries[0];
+ failure.skip = 0;
+
+ save_stack_trace(&failure);
+ failure_handle = depot_save_stack(&failure, GFP_KERNEL);
+}
+
static void init_page_owner(void)
{
if (page_owner_disabled)
return;
- page_owner_inited = true;
+ register_dummy_stack();
+ register_failure_stack();
+ static_branch_enable(&page_owner_inited);
init_early_allocated_pages();
}
@@ -57,46 +100,141 @@ void __reset_page_owner(struct page *page, unsigned int order)
for (i = 0; i < (1 << order); i++) {
page_ext = lookup_page_ext(page + i);
+ if (unlikely(!page_ext))
+ continue;
__clear_bit(PAGE_EXT_OWNER, &page_ext->flags);
}
}
-void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask)
+static inline bool check_recursive_alloc(struct stack_trace *trace,
+ unsigned long ip)
{
- struct page_ext *page_ext = lookup_page_ext(page);
+ int i, count;
+
+ if (!trace->nr_entries)
+ return false;
+
+ for (i = 0, count = 0; i < trace->nr_entries; i++) {
+ if (trace->entries[i] == ip && ++count == 2)
+ return true;
+ }
+
+ return false;
+}
+
+static noinline depot_stack_handle_t save_stack(gfp_t flags)
+{
+ unsigned long entries[PAGE_OWNER_STACK_DEPTH];
struct stack_trace trace = {
.nr_entries = 0,
- .max_entries = ARRAY_SIZE(page_ext->trace_entries),
- .entries = &page_ext->trace_entries[0],
- .skip = 3,
+ .entries = entries,
+ .max_entries = PAGE_OWNER_STACK_DEPTH,
+ .skip = 0
};
+ depot_stack_handle_t handle;
save_stack_trace(&trace);
+ if (trace.nr_entries != 0 &&
+ trace.entries[trace.nr_entries-1] == ULONG_MAX)
+ trace.nr_entries--;
+
+ /*
+ * We need to check recursion here because our request to stackdepot
+ * could trigger memory allocation to save new entry. New memory
+ * allocation would reach here and call depot_save_stack() again
+ * if we don't catch it. There is still not enough memory in stackdepot
+ * so it would try to allocate memory again and loop forever.
+ */
+ if (check_recursive_alloc(&trace, _RET_IP_))
+ return dummy_handle;
+
+ handle = depot_save_stack(&trace, flags);
+ if (!handle)
+ handle = failure_handle;
+
+ return handle;
+}
+
+noinline void __set_page_owner(struct page *page, unsigned int order,
+ gfp_t gfp_mask)
+{
+ struct page_ext *page_ext = lookup_page_ext(page);
+
+ if (unlikely(!page_ext))
+ return;
+ page_ext->handle = save_stack(gfp_mask);
page_ext->order = order;
page_ext->gfp_mask = gfp_mask;
- page_ext->nr_entries = trace.nr_entries;
+ page_ext->last_migrate_reason = -1;
__set_bit(PAGE_EXT_OWNER, &page_ext->flags);
}
-gfp_t __get_page_owner_gfp(struct page *page)
+void __set_page_owner_migrate_reason(struct page *page, int reason)
+{
+ struct page_ext *page_ext = lookup_page_ext(page);
+ if (unlikely(!page_ext))
+ return;
+
+ page_ext->last_migrate_reason = reason;
+}
+
+void __split_page_owner(struct page *page, unsigned int order)
{
+ int i;
struct page_ext *page_ext = lookup_page_ext(page);
+ if (unlikely(!page_ext))
+ /*
+ * The caller just returns if no valid gfp
+ * So return here too.
+ */
+ return;
- return page_ext->gfp_mask;
+ page_ext->order = 0;
+ for (i = 1; i < (1 << order); i++)
+ __copy_page_owner(page, page + i);
+}
+
+void __copy_page_owner(struct page *oldpage, struct page *newpage)
+{
+ struct page_ext *old_ext = lookup_page_ext(oldpage);
+ struct page_ext *new_ext = lookup_page_ext(newpage);
+
+ if (unlikely(!old_ext || !new_ext))
+ return;
+
+ new_ext->order = old_ext->order;
+ new_ext->gfp_mask = old_ext->gfp_mask;
+ new_ext->last_migrate_reason = old_ext->last_migrate_reason;
+ new_ext->handle = old_ext->handle;
+
+ /*
+ * We don't clear the bit on the oldpage as it's going to be freed
+ * after migration. Until then, the info can be useful in case of
+ * a bug, and the overal stats will be off a bit only temporarily.
+ * Also, migrate_misplaced_transhuge_page() can still fail the
+ * migration and then we want the oldpage to retain the info. But
+ * in that case we also don't need to explicitly clear the info from
+ * the new page, which will be freed.
+ */
+ __set_bit(PAGE_EXT_OWNER, &new_ext->flags);
}
static ssize_t
print_page_owner(char __user *buf, size_t count, unsigned long pfn,
- struct page *page, struct page_ext *page_ext)
+ struct page *page, struct page_ext *page_ext,
+ depot_stack_handle_t handle)
{
int ret;
int pageblock_mt, page_mt;
char *kbuf;
+ unsigned long entries[PAGE_OWNER_STACK_DEPTH];
struct stack_trace trace = {
- .nr_entries = page_ext->nr_entries,
- .entries = &page_ext->trace_entries[0],
+ .nr_entries = 0,
+ .entries = entries,
+ .max_entries = PAGE_OWNER_STACK_DEPTH,
+ .skip = 0
};
kbuf = kmalloc(count, GFP_KERNEL);
@@ -104,8 +242,9 @@ print_page_owner(char __user *buf, size_t count, unsigned long pfn,
return -ENOMEM;
ret = snprintf(kbuf, count,
- "Page allocated via order %u, mask 0x%x\n",
- page_ext->order, page_ext->gfp_mask);
+ "Page allocated via order %u, mask %#x(%pGg)\n",
+ page_ext->order, page_ext->gfp_mask,
+ &page_ext->gfp_mask);
if (ret >= count)
goto err;
@@ -114,31 +253,29 @@ print_page_owner(char __user *buf, size_t count, unsigned long pfn,
pageblock_mt = get_pfnblock_migratetype(page, pfn);
page_mt = gfpflags_to_migratetype(page_ext->gfp_mask);
ret += snprintf(kbuf + ret, count - ret,
- "PFN %lu Block %lu type %d %s Flags %s%s%s%s%s%s%s%s%s%s%s%s\n",
+ "PFN %lu type %s Block %lu type %s Flags %#lx(%pGp)\n",
pfn,
+ migratetype_names[page_mt],
pfn >> pageblock_order,
- pageblock_mt,
- pageblock_mt != page_mt ? "Fallback" : " ",
- PageLocked(page) ? "K" : " ",
- PageError(page) ? "E" : " ",
- PageReferenced(page) ? "R" : " ",
- PageUptodate(page) ? "U" : " ",
- PageDirty(page) ? "D" : " ",
- PageLRU(page) ? "L" : " ",
- PageActive(page) ? "A" : " ",
- PageSlab(page) ? "S" : " ",
- PageWriteback(page) ? "W" : " ",
- PageCompound(page) ? "C" : " ",
- PageSwapCache(page) ? "B" : " ",
- PageMappedToDisk(page) ? "M" : " ");
+ migratetype_names[pageblock_mt],
+ page->flags, &page->flags);
if (ret >= count)
goto err;
+ depot_fetch_stack(handle, &trace);
ret += snprint_stack_trace(kbuf + ret, count - ret, &trace, 0);
if (ret >= count)
goto err;
+ if (page_ext->last_migrate_reason != -1) {
+ ret += snprintf(kbuf + ret, count - ret,
+ "Page has been migrated, last migrate reason: %s\n",
+ migrate_reason_names[page_ext->last_migrate_reason]);
+ if (ret >= count)
+ goto err;
+ }
+
ret += snprintf(kbuf + ret, count - ret, "\n");
if (ret >= count)
goto err;
@@ -154,14 +291,58 @@ err:
return -ENOMEM;
}
+void __dump_page_owner(struct page *page)
+{
+ struct page_ext *page_ext = lookup_page_ext(page);
+ unsigned long entries[PAGE_OWNER_STACK_DEPTH];
+ struct stack_trace trace = {
+ .nr_entries = 0,
+ .entries = entries,
+ .max_entries = PAGE_OWNER_STACK_DEPTH,
+ .skip = 0
+ };
+ depot_stack_handle_t handle;
+ gfp_t gfp_mask;
+ int mt;
+
+ if (unlikely(!page_ext)) {
+ pr_alert("There is not page extension available.\n");
+ return;
+ }
+ gfp_mask = page_ext->gfp_mask;
+ mt = gfpflags_to_migratetype(gfp_mask);
+
+ if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) {
+ pr_alert("page_owner info is not active (free page?)\n");
+ return;
+ }
+
+ handle = READ_ONCE(page_ext->handle);
+ if (!handle) {
+ pr_alert("page_owner info is not active (free page?)\n");
+ return;
+ }
+
+ depot_fetch_stack(handle, &trace);
+ pr_alert("page allocated via order %u, migratetype %s, "
+ "gfp_mask %#x(%pGg)\n", page_ext->order,
+ migratetype_names[mt], gfp_mask, &gfp_mask);
+ print_stack_trace(&trace, 0);
+
+ if (page_ext->last_migrate_reason != -1)
+ pr_alert("page has been migrated, last migrate reason: %s\n",
+ migrate_reason_names[page_ext->last_migrate_reason]);
+}
+
static ssize_t
read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
unsigned long pfn;
struct page *page;
struct page_ext *page_ext;
+ depot_stack_handle_t handle;
- if (!page_owner_inited)
+ if (!static_branch_unlikely(&page_owner_inited))
return -EINVAL;
page = NULL;
@@ -198,6 +379,8 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
}
page_ext = lookup_page_ext(page);
+ if (unlikely(!page_ext))
+ continue;
/*
* Some pages could be missed by concurrent allocation or free,
@@ -206,10 +389,19 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
continue;
+ /*
+ * Access to page_ext->handle isn't synchronous so we should
+ * be careful to access it.
+ */
+ handle = READ_ONCE(page_ext->handle);
+ if (!handle)
+ continue;
+
/* Record the next PFN to read in the file offset */
*ppos = (pfn - min_low_pfn) + 1;
- return print_page_owner(buf, count, pfn, page, page_ext);
+ return print_page_owner(buf, count, pfn, page,
+ page_ext, handle);
}
return 0;
@@ -248,6 +440,9 @@ static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
page = pfn_to_page(pfn);
+ if (page_zone(page) != zone)
+ continue;
+
/*
* We are safe to check buddy flag and order, because
* this is init stage and only single thread runs.
@@ -261,6 +456,8 @@ static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
continue;
page_ext = lookup_page_ext(page);
+ if (unlikely(!page_ext))
+ continue;
/* Maybe overraping zone */
if (test_bit(PAGE_EXT_OWNER, &page_ext->flags))
@@ -309,7 +506,7 @@ static int __init pageowner_init(void)
{
struct dentry *dentry;
- if (!page_owner_inited) {
+ if (!static_branch_unlikely(&page_owner_inited)) {
pr_info("page_owner is disabled\n");
return 0;
}
diff --git a/mm/vmstat.c b/mm/vmstat.c
index d6b4817c4416..3c0796cd3f80 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -927,19 +927,6 @@ static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
#endif
#ifdef CONFIG_PROC_FS
-static char * const migratetype_names[MIGRATE_TYPES] = {
- "Unmovable",
- "Movable",
- "Reclaimable",
-#ifdef CONFIG_CMA
- "CMA",
-#endif
- "HighAtomic",
-#ifdef CONFIG_MEMORY_ISOLATION
- "Isolate",
-#endif
-};
-
static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
struct zone *zone)
{
@@ -1103,6 +1090,8 @@ static void pagetypeinfo_showmixedcount_print(struct seq_file *m,
continue;
page_ext = lookup_page_ext(page);
+ if (unlikely(!page_ext))
+ continue;
if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
continue;
@@ -1140,7 +1129,7 @@ static void pagetypeinfo_showmixedcount(struct seq_file *m, pg_data_t *pgdat)
#ifdef CONFIG_PAGE_OWNER
int mtype;
- if (!page_owner_inited)
+ if (!static_branch_unlikely(&page_owner_inited))
return;
drain_all_pages(NULL);
diff --git a/net/core/dev.c b/net/core/dev.c
index 50e77fe096f4..16467dc215d0 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4560,8 +4560,13 @@ static void net_rps_send_ipi(struct softnet_data *remsd)
while (remsd) {
struct softnet_data *next = remsd->rps_ipi_next;
- if (cpu_online(remsd->cpu))
+ if (cpu_online(remsd->cpu)) {
smp_call_function_single_async(remsd->cpu, &remsd->csd);
+ } else {
+ rps_lock(remsd);
+ remsd->backlog.state = 0;
+ rps_unlock(remsd);
+ }
remsd = next;
}
#endif
diff --git a/tools/vm/page_owner_sort.c b/tools/vm/page_owner_sort.c
index 77147b42d598..f1c055f3c243 100644
--- a/tools/vm/page_owner_sort.c
+++ b/tools/vm/page_owner_sort.c
@@ -79,12 +79,12 @@ static void add_list(char *buf, int len)
}
}
-#define BUF_SIZE 1024
+#define BUF_SIZE (128 * 1024)
int main(int argc, char **argv)
{
FILE *fin, *fout;
- char buf[BUF_SIZE];
+ char *buf;
int ret, i, count;
struct block_list *list2;
struct stat st;
@@ -107,6 +107,11 @@ int main(int argc, char **argv)
max_size = st.st_size / 100; /* hack ... */
list = malloc(max_size * sizeof(*list));
+ buf = malloc(BUF_SIZE);
+ if (!list || !buf) {
+ printf("Out of memory\n");
+ exit(1);
+ }
for ( ; ; ) {
ret = read_block(buf, BUF_SIZE, fin);