summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSkylar Chang <chiaweic@codeaurora.org>2016-03-14 15:51:49 -0700
committerDavid Keitel <dkeitel@codeaurora.org>2016-03-23 21:25:09 -0700
commitf36ae7405af944bb43f807c3e59bcddf47fd41e4 (patch)
treeb754fa2b6e2528147d1127e5ba862f25088603bb
parentd2c1940363d41cdfea8ac42c99558472688d3166 (diff)
msm: ipa: snapshot of IPA changes
This snapshot is taken as of msm-3.18 commit d5809484b (Merge "msm: ipa: fix race condition when teardown pipe" ) Signed-off-by: Skylar Chang <chiaweic@codeaurora.org>
-rw-r--r--Documentation/devicetree/bindings/platform/msm/ipa.txt22
-rw-r--r--Documentation/devicetree/bindings/platform/msm/rmnet_ipa.txt2
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/msm/rndis_ipa.c7
-rw-r--r--drivers/net/ethernet/msm/rndis_ipa_trace.h81
-rw-r--r--drivers/platform/msm/ipa/Makefile2
-rw-r--r--drivers/platform/msm/ipa/ipa_api.c149
-rw-r--r--drivers/platform/msm/ipa/ipa_api.h26
-rw-r--r--drivers/platform/msm/ipa/ipa_clients/Makefile1
-rw-r--r--drivers/platform/msm/ipa/ipa_clients/ipa_usb.c (renamed from drivers/platform/msm/ipa/ipa_v3/ipa_usb.c)1997
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa.c309
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_client.c13
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c38
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_dp.c51
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_i.h42
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_rm.c4
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_trace.h135
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_uc.c4
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c25
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_utils.c7
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c57
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/Makefile4
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa.c1190
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_client.c566
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c227
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_dma.c4
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_dp.c447
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_flt.c283
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c118
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_hw_defs.h269
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_i.h411
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_interrupts.c135
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_intf.c4
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c28
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_nat.c254
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c4
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_reg.h318
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_rm.c14
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_rm_inactivity_timer.c25
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_rm_resource.c7
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_rt.c235
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_trace.h135
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_uc.c127
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_uc_mhi.c36
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c85
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_utils.c1038
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipahal/Makefile3
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c768
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h395
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h351
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c1352
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h434
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h279
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c521
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/teth_bridge.c59
-rw-r--r--include/linux/ipa.h329
-rw-r--r--include/linux/ipa_usb.h321
-rw-r--r--include/linux/msm-sps.h2
-rw-r--r--include/linux/rndis_ipa.h4
-rw-r--r--include/uapi/linux/msm_ipa.h4
60 files changed, 9313 insertions, 4446 deletions
diff --git a/Documentation/devicetree/bindings/platform/msm/ipa.txt b/Documentation/devicetree/bindings/platform/msm/ipa.txt
index 137ff6ee9f63..b4b74e98092b 100644
--- a/Documentation/devicetree/bindings/platform/msm/ipa.txt
+++ b/Documentation/devicetree/bindings/platform/msm/ipa.txt
@@ -97,6 +97,17 @@ Optional properties:
- clock-names: This property shall contain the clock input names used
by driver in same order as the clocks property.This should be "iface_clk"
+IPA SMP2P sub nodes
+
+-compatible: "qcom,smp2pgpio-map-ipa-1-out" - represents the out gpio from
+ ipa driver to modem.
+
+-compatible: "qcom,smp2pgpio-map-ipa-1-in" - represents the in gpio to
+ ipa driver from modem.
+
+-gpios: Binding to the gpio defined in XXX-smp2p.dtsi
+
+
Example:
qcom,ipa@fd4c0000 {
@@ -147,4 +158,15 @@ qcom,ipa@fd4c0000 {
qcom,descriptor-fifo-offset = <0xd00>;
qcom,descriptor-fifo-size = <0x300>;
};
+
+ /* smp2p gpio information */
+ qcom,smp2pgpio_map_ipa_1_out {
+ compatible = "qcom,smp2pgpio-map-ipa-1-out";
+ gpios = <&smp2pgpio_ipa_1_out 0 0>;
+ };
+
+ qcom,smp2pgpio_map_ipa_1_in {
+ compatible = "qcom,smp2pgpio-map-ipa-1-in";
+ gpios = <&smp2pgpio_ipa_1_in 0 0>;
+ };
};
diff --git a/Documentation/devicetree/bindings/platform/msm/rmnet_ipa.txt b/Documentation/devicetree/bindings/platform/msm/rmnet_ipa.txt
index c9ca9c25ce73..c7024e07a71e 100644
--- a/Documentation/devicetree/bindings/platform/msm/rmnet_ipa.txt
+++ b/Documentation/devicetree/bindings/platform/msm/rmnet_ipa.txt
@@ -8,6 +8,8 @@ Required properties:
Optional:
- qcom,rmnet-ipa-ssr: determine if modem SSR is supported
- qcom,ipa-loaduC: indicate that ipa uC should be loaded
+- qcom,ipa-advertise-sg-support: determine how to respond to a query
+regarding scatter-gather capability
Example:
qcom,rmnet-ipa {
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 071f84eb6f3f..7d7fe79da74d 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -50,6 +50,7 @@ obj-$(CONFIG_NET_VENDOR_MELLANOX) += mellanox/
obj-$(CONFIG_NET_VENDOR_MICREL) += micrel/
obj-$(CONFIG_NET_VENDOR_MICROCHIP) += microchip/
obj-$(CONFIG_NET_VENDOR_MOXART) += moxa/
+obj-$(CONFIG_ARCH_MSM) += msm/
obj-$(CONFIG_NET_VENDOR_MYRI) += myricom/
obj-$(CONFIG_FEALNX) += fealnx.o
obj-$(CONFIG_NET_VENDOR_NATSEMI) += natsemi/
diff --git a/drivers/net/ethernet/msm/rndis_ipa.c b/drivers/net/ethernet/msm/rndis_ipa.c
index ff0fc3d72547..52ce5536aed5 100644
--- a/drivers/net/ethernet/msm/rndis_ipa.c
+++ b/drivers/net/ethernet/msm/rndis_ipa.c
@@ -28,6 +28,9 @@
#include <linux/rndis_ipa.h>
#include <linux/workqueue.h>
+#define CREATE_TRACE_POINTS
+#include "rndis_ipa_trace.h"
+
#define DRV_NAME "RNDIS_IPA"
#define DEBUGFS_DIR_NAME "rndis_ipa"
#define DEBUGFS_AGGR_DIR_NAME "rndis_ipa_aggregation"
@@ -912,6 +915,7 @@ static netdev_tx_t rndis_ipa_start_xmit(struct sk_buff *skb,
}
skb = rndis_encapsulate_skb(skb);
+ trace_rndis_tx_dp(skb->protocol);
ret = ipa_tx_dp(IPA_TO_USB_CLIENT, skb, NULL);
if (ret) {
RNDIS_IPA_ERROR("ipa transmit failed (%d)\n", ret);
@@ -957,6 +961,8 @@ static void rndis_ipa_tx_complete_notify(void *private,
NULL_CHECK_NO_RETVAL(private);
+ trace_rndis_status_rcvd(skb->protocol);
+
RNDIS_IPA_DEBUG("Tx-complete, len=%d, skb->prot=%d, outstanding=%d\n",
skb->len, skb->protocol,
atomic_read(&rndis_ipa_ctx->outstanding_pkts));
@@ -1121,6 +1127,7 @@ static void rndis_ipa_packet_receive_notify(void *private,
return;
}
+ trace_rndis_netif_ni(skb->protocol);
result = netif_rx_ni(skb);
if (result)
RNDIS_IPA_ERROR("fail on netif_rx_ni\n");
diff --git a/drivers/net/ethernet/msm/rndis_ipa_trace.h b/drivers/net/ethernet/msm/rndis_ipa_trace.h
new file mode 100644
index 000000000000..c0fc573799f2
--- /dev/null
+++ b/drivers/net/ethernet/msm/rndis_ipa_trace.h
@@ -0,0 +1,81 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rndis_ipa
+#define TRACE_INCLUDE_FILE rndis_ipa_trace
+
+#if !defined(_RNDIS_IPA_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _RNDIS_IPA_TRACE_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(
+ rndis_netif_ni,
+
+ TP_PROTO(unsigned long proto),
+
+ TP_ARGS(proto),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, proto)
+ ),
+
+ TP_fast_assign(
+ __entry->proto = proto;
+ ),
+
+ TP_printk("proto =%lu\n", __entry->proto)
+);
+
+TRACE_EVENT(
+ rndis_tx_dp,
+
+ TP_PROTO(unsigned long proto),
+
+ TP_ARGS(proto),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, proto)
+ ),
+
+ TP_fast_assign(
+ __entry->proto = proto;
+ ),
+
+ TP_printk("proto =%lu\n", __entry->proto)
+);
+
+TRACE_EVENT(
+ rndis_status_rcvd,
+
+ TP_PROTO(unsigned long proto),
+
+ TP_ARGS(proto),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, proto)
+ ),
+
+ TP_fast_assign(
+ __entry->proto = proto;
+ ),
+
+ TP_printk("proto =%lu\n", __entry->proto)
+);
+
+#endif /* _RNDIS_IPA_TRACE_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>
diff --git a/drivers/platform/msm/ipa/Makefile b/drivers/platform/msm/ipa/Makefile
index d2f4991cc9f7..19ae5234a6d0 100644
--- a/drivers/platform/msm/ipa/Makefile
+++ b/drivers/platform/msm/ipa/Makefile
@@ -1,5 +1,5 @@
obj-$(CONFIG_IPA) += ipa_v2/
-obj-$(CONFIG_IPA3) += ipa_v3/
+obj-$(CONFIG_IPA3) += ipa_v3/ ipa_clients/
obj-$(CONFIG_IPA) += ipa_api.o
obj-$(CONFIG_IPA3) += ipa_api.o
diff --git a/drivers/platform/msm/ipa/ipa_api.c b/drivers/platform/msm/ipa/ipa_api.c
index 54d8189a0922..31db8ff2709f 100644
--- a/drivers/platform/msm/ipa/ipa_api.c
+++ b/drivers/platform/msm/ipa/ipa_api.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -408,7 +408,7 @@ int ipa_cfg_ep_holb_by_client(enum ipa_client_type client,
EXPORT_SYMBOL(ipa_cfg_ep_holb_by_client);
/**
- * ipa_cfg_ep_hdr() - IPA end-point Control configuration
+ * ipa_cfg_ep_ctrl() - IPA end-point Control configuration
* @clnt_hdl: [in] opaque client handle assigned by IPA to client
* @ipa_ep_cfg_ctrl: [in] IPA end-point configuration params
*
@@ -1443,6 +1443,22 @@ int ipa_uc_reg_rdyCB(
EXPORT_SYMBOL(ipa_uc_reg_rdyCB);
/**
+ * ipa_uc_dereg_rdyCB() - To de-register uC ready CB
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ */
+int ipa_uc_dereg_rdyCB(void)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_uc_dereg_rdyCB);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_uc_dereg_rdyCB);
+
+/**
* ipa_rm_create_resource() - create resource
* @create_params: [in] parameters needed
* for resource initialization
@@ -2632,6 +2648,8 @@ static struct of_device_id ipa_plat_drv_match[] = {
{ .compatible = "qcom,ipa-smmu-ap-cb", },
{ .compatible = "qcom,ipa-smmu-wlan-cb", },
{ .compatible = "qcom,ipa-smmu-uc-cb", },
+ { .compatible = "qcom,smp2pgpio-map-ipa-1-in", },
+ { .compatible = "qcom,smp2pgpio-map-ipa-1-out", },
{}
};
@@ -2639,21 +2657,29 @@ static int ipa_generic_plat_drv_probe(struct platform_device *pdev_p)
{
int result;
- pr_debug("ipa: IPA driver probing started\n");
-
- ipa_api_ctrl = kzalloc(sizeof(*ipa_api_ctrl), GFP_KERNEL);
- if (!ipa_api_ctrl)
- return -ENOMEM;
-
- /* Get IPA HW Version */
- result = of_property_read_u32(pdev_p->dev.of_node, "qcom,ipa-hw-ver",
- &ipa_api_hw_type);
- if ((result) || (ipa_api_hw_type == 0)) {
- pr_err("ipa: get resource failed for ipa-hw-ver!\n");
- result = -ENODEV;
- goto fail;
+ /*
+ * IPA probe function can be called for multiple times as the same probe
+ * function handles multiple compatibilities
+ */
+ pr_debug("ipa: IPA driver probing started for %s\n",
+ pdev_p->dev.of_node->name);
+
+ if (!ipa_api_ctrl) {
+ ipa_api_ctrl = kzalloc(sizeof(*ipa_api_ctrl), GFP_KERNEL);
+ if (!ipa_api_ctrl)
+ return -ENOMEM;
+
+ /* Get IPA HW Version */
+ result = of_property_read_u32(pdev_p->dev.of_node,
+ "qcom,ipa-hw-ver", &ipa_api_hw_type);
+ if ((result) || (ipa_api_hw_type == 0)) {
+ pr_err("ipa: get resource failed for ipa-hw-ver!\n");
+ kfree(ipa_api_ctrl);
+ ipa_api_ctrl = 0;
+ return -ENODEV;
+ }
+ pr_debug("ipa: ipa_api_hw_type = %d", ipa_api_hw_type);
}
- pr_debug("ipa: ipa_api_hw_type = %d", ipa_api_hw_type);
/* call probe based on IPA HW version */
switch (ipa_api_hw_type) {
@@ -2663,30 +2689,20 @@ static int ipa_generic_plat_drv_probe(struct platform_device *pdev_p)
case IPA_HW_v2_6L:
result = ipa_plat_drv_probe(pdev_p, ipa_api_ctrl,
ipa_plat_drv_match);
- if (result) {
- pr_err("ipa: ipa_plat_drv_probe failed\n");
- goto fail;
- }
break;
case IPA_HW_v3_0:
case IPA_HW_v3_1:
result = ipa3_plat_drv_probe(pdev_p, ipa_api_ctrl,
ipa_plat_drv_match);
- if (result) {
- pr_err("ipa: ipa3_plat_drv_probe failed\n");
- goto fail;
- }
break;
default:
pr_err("ipa: unsupported version %d\n", ipa_api_hw_type);
- result = -EPERM;
- goto fail;
+ return -EPERM;
}
- return 0;
-fail:
- kfree(ipa_api_ctrl);
- ipa_api_ctrl = 0;
+ if (result && result != -EPROBE_DEFER)
+ pr_err("ipa: ipa_plat_drv_probe failed\n");
+
return result;
}
@@ -2708,79 +2724,6 @@ static int ipa_ap_resume(struct device *dev)
return ret;
}
-int ipa_usb_init_teth_prot(enum ipa_usb_teth_prot teth_prot,
- struct ipa_usb_teth_params *teth_params,
- int (*ipa_usb_notify_cb)(enum ipa_usb_notify_event, void *),
- void *user_data)
-{
- int ret;
-
- IPA_API_DISPATCH_RETURN(ipa_usb_init_teth_prot, teth_prot, teth_params,
- ipa_usb_notify_cb, user_data);
-
- return ret;
-}
-EXPORT_SYMBOL(ipa_usb_init_teth_prot);
-
-int ipa_usb_xdci_connect(struct ipa_usb_xdci_chan_params *ul_chan_params,
- struct ipa_usb_xdci_chan_params *dl_chan_params,
- struct ipa_req_chan_out_params *ul_out_params,
- struct ipa_req_chan_out_params *dl_out_params,
- struct ipa_usb_xdci_connect_params *connect_params)
-{
- int ret;
-
- IPA_API_DISPATCH_RETURN(ipa_usb_xdci_connect, ul_chan_params,
- dl_chan_params, ul_out_params, dl_out_params, connect_params);
-
- return ret;
-}
-EXPORT_SYMBOL(ipa_usb_xdci_connect);
-
-int ipa_usb_xdci_disconnect(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
- enum ipa_usb_teth_prot teth_prot)
-{
- int ret;
-
- IPA_API_DISPATCH_RETURN(ipa_usb_xdci_disconnect, ul_clnt_hdl,
- dl_clnt_hdl, teth_prot);
-
- return ret;
-}
-EXPORT_SYMBOL(ipa_usb_xdci_disconnect);
-
-int ipa_usb_deinit_teth_prot(enum ipa_usb_teth_prot teth_prot)
-{
- int ret;
-
- IPA_API_DISPATCH_RETURN(ipa_usb_deinit_teth_prot, teth_prot);
-
- return ret;
-}
-EXPORT_SYMBOL(ipa_usb_deinit_teth_prot);
-
-int ipa_usb_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
- enum ipa_usb_teth_prot teth_prot)
-{
- int ret;
-
- IPA_API_DISPATCH_RETURN(ipa_usb_xdci_suspend, ul_clnt_hdl,
- dl_clnt_hdl, teth_prot);
-
- return ret;
-}
-EXPORT_SYMBOL(ipa_usb_xdci_suspend);
-
-int ipa_usb_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl)
-{
- int ret;
-
- IPA_API_DISPATCH_RETURN(ipa_usb_xdci_resume, ul_clnt_hdl, dl_clnt_hdl);
-
- return ret;
-}
-EXPORT_SYMBOL(ipa_usb_xdci_resume);
-
int ipa_register_ipa_ready_cb(void (*ipa_ready_cb)(void *user_data),
void *user_data)
{
diff --git a/drivers/platform/msm/ipa/ipa_api.h b/drivers/platform/msm/ipa/ipa_api.h
index ba66709d41a4..e108f0ca0bae 100644
--- a/drivers/platform/msm/ipa/ipa_api.h
+++ b/drivers/platform/msm/ipa/ipa_api.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -181,6 +181,8 @@ struct ipa_api_controller {
int (*ipa_uc_reg_rdyCB)(struct ipa_wdi_uc_ready_params *param);
+ int (*ipa_uc_dereg_rdyCB)(void);
+
int (*ipa_rm_create_resource)(
struct ipa_rm_create_params *create_params);
@@ -331,28 +333,6 @@ struct ipa_api_controller {
struct ipa_gsi_ep_config *(*ipa_get_gsi_ep_info)(int ipa_ep_idx);
- int (*ipa_usb_init_teth_prot)(enum ipa_usb_teth_prot teth_prot,
- struct ipa_usb_teth_params *teth_params,
- int (*ipa_usb_notify_cb)(enum ipa_usb_notify_event, void*),
- void *user_data);
-
- int (*ipa_usb_xdci_connect)(
- struct ipa_usb_xdci_chan_params *ul_chan_params,
- struct ipa_usb_xdci_chan_params *dl_chan_params,
- struct ipa_req_chan_out_params *ul_out_params,
- struct ipa_req_chan_out_params *dl_out_params,
- struct ipa_usb_xdci_connect_params *connect_params);
-
- int (*ipa_usb_xdci_disconnect)(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
- enum ipa_usb_teth_prot teth_prot);
-
- int (*ipa_usb_deinit_teth_prot)(enum ipa_usb_teth_prot teth_prot);
-
- int (*ipa_usb_xdci_suspend)(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
- enum ipa_usb_teth_prot teth_prot);
-
- int (*ipa_usb_xdci_resume)(u32 ul_clnt_hdl, u32 dl_clnt_hdl);
-
int (*ipa_register_ipa_ready_cb)(void (*ipa_ready_cb)(void *user_data),
void *user_data);
diff --git a/drivers/platform/msm/ipa/ipa_clients/Makefile b/drivers/platform/msm/ipa/ipa_clients/Makefile
new file mode 100644
index 000000000000..eb0e91b70690
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_clients/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_IPA3) += ipa_usb.o \ No newline at end of file
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_usb.c b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
index cef1729d0031..9b9ec31f34fc 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_usb.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015, 2016 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -10,11 +10,15 @@
* GNU General Public License for more details.
*/
-#include "linux/ipa.h"
-#include "linux/rndis_ipa.h"
-#include "linux/ecm_ipa.h"
-#include "ipa_i.h"
#include <linux/mutex.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/debugfs.h>
+#include <linux/ipa.h>
+#include <linux/ipa_usb.h>
+#include <linux/rndis_ipa.h>
+#include <linux/ecm_ipa.h>
+#include "../ipa_v3/ipa_i.h"
#define IPA_USB_RM_TIMEOUT_MSEC 10000
#define IPA_USB_DEV_READY_TIMEOUT_MSEC 10000
@@ -25,11 +29,57 @@
#define IPA_USB_DL_CHAN_LOW_WEIGHT 0x5
#define IPA_USB_UL_CHAN_LOW_WEIGHT 0x4
+#define IPA_USB_MAX_MSG_LEN 4096
+
#define IPA_USB_DRV_NAME "ipa_usb"
+
+#define IPA_USB_IPC_LOG_PAGES 10
+#define IPA_USB_IPC_LOG(buf, fmt, args...) \
+ ipc_log_string((buf), \
+ DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args)
#define IPA_USB_DBG(fmt, args...) \
- pr_debug(IPA_USB_DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args)
+ do { \
+ pr_debug(IPA_USB_DRV_NAME " %s:%d " fmt, \
+ __func__, __LINE__, ## args); \
+ if (ipa3_usb_ctx) { \
+ IPA_USB_IPC_LOG(ipa3_usb_ctx->logbuf, fmt, ## args); \
+ IPA_USB_IPC_LOG(ipa3_usb_ctx->logbuf_low, \
+ fmt, ## args); \
+ } \
+ } while (0)
+
+#define IPA_USB_DBG_LOW(fmt, args...) \
+ do { \
+ pr_debug(IPA_USB_DRV_NAME " %s:%d " fmt, \
+ __func__, __LINE__, ## args); \
+ if (ipa3_usb_ctx && \
+ ipa3_usb_ctx->enable_low_prio_print) { \
+ IPA_USB_IPC_LOG(ipa3_usb_ctx->logbuf_low, \
+ fmt, ## args); \
+ } \
+ } while (0)
+
#define IPA_USB_ERR(fmt, args...) \
- pr_err(IPA_USB_DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args)
+ do { \
+ pr_err(IPA_USB_DRV_NAME " %s:%d " fmt, \
+ __func__, __LINE__, ## args); \
+ if (ipa3_usb_ctx) { \
+ IPA_USB_IPC_LOG(ipa3_usb_ctx->logbuf, fmt, ## args); \
+ IPA_USB_IPC_LOG(ipa3_usb_ctx->logbuf_low, \
+ fmt, ## args); \
+ } \
+ } while (0)
+
+#define IPA_USB_INFO(fmt, args...) \
+ do { \
+ pr_info(IPA_USB_DRV_NAME " %s:%d " fmt, \
+ __func__, __LINE__, ## args); \
+ if (ipa3_usb_ctx) { \
+ IPA_USB_IPC_LOG(ipa3_usb_ctx->logbuf, fmt, ## args); \
+ IPA_USB_IPC_LOG(ipa3_usb_ctx->logbuf_low, \
+ fmt, ## args); \
+ } \
+ } while (0)
struct ipa_usb_xdci_connect_params_internal {
enum ipa_usb_max_usb_packet_size max_pkt_size;
@@ -71,7 +121,6 @@ struct ipa3_usb_rm_context {
bool prod_valid;
bool cons_valid;
struct completion prod_comp;
- struct completion cons_comp;
enum ipa3_usb_cons_state cons_state;
/* consumer was requested*/
bool cons_requested;
@@ -90,31 +139,59 @@ enum ipa3_usb_state {
IPA_USB_RESUME_IN_PROGRESS
};
+enum ipa3_usb_transport_type {
+ IPA_USB_TRANSPORT_TETH,
+ IPA_USB_TRANSPORT_DPL,
+ IPA_USB_TRANSPORT_MAX
+};
+
+/* Get transport type from tethering protocol */
+#define IPA3_USB_GET_TTYPE(__teth_prot) \
+ (((__teth_prot) == IPA_USB_DIAG) ? \
+ IPA_USB_TRANSPORT_DPL : IPA_USB_TRANSPORT_TETH)
+
+/* Does the given transport type is DPL? */
+#define IPA3_USB_IS_TTYPE_DPL(__ttype) \
+ ((__ttype) == IPA_USB_TRANSPORT_DPL)
+
struct finish_suspend_work_context {
struct work_struct work;
+ enum ipa3_usb_transport_type ttype;
u32 dl_clnt_hdl;
u32 ul_clnt_hdl;
};
+/**
+ * Transport type - could be either data tethering or DPL
+ * Each transport has it's own RM resources and statuses
+ */
+struct ipa3_usb_transport_type_ctx {
+ struct ipa3_usb_rm_context rm_ctx;
+ int (*ipa_usb_notify_cb)(enum ipa_usb_notify_event, void *user_data);
+ void *user_data;
+ enum ipa3_usb_state state;
+ struct finish_suspend_work_context finish_suspend_work;
+};
+
struct ipa3_usb_context {
struct ipa3_usb_teth_prot_context
teth_prot_ctx[IPA_USB_MAX_TETH_PROT_SIZE];
- int num_init_prot;
- enum ipa3_usb_teth_prot_state teth_bridge_state;
+ int num_init_prot; /* without dpl */
struct teth_bridge_init_params teth_bridge_params;
struct completion dev_ready_comp;
- struct ipa3_usb_rm_context rm_ctx;
- int (*ipa_usb_notify_cb)(enum ipa_usb_notify_event, void *user_data);
- void *user_data;
- void *diag_user_data;
u32 qmi_req_id;
- enum ipa3_usb_state state;
- enum ipa3_usb_state diag_state;
spinlock_t state_lock;
bool dl_data_pending;
struct workqueue_struct *wq;
- struct finish_suspend_work_context finish_suspend_work;
struct mutex general_mutex;
+ struct ipa3_usb_transport_type_ctx
+ ttype_ctx[IPA_USB_TRANSPORT_MAX];
+ struct dentry *dfile_state_info;
+ struct dentry *dent;
+ struct dentry *dfile_enable_low_prio;
+ void *logbuf;
+ void *logbuf_low;
+ u32 enable_low_prio_print;
};
enum ipa3_usb_op {
@@ -128,15 +205,56 @@ enum ipa3_usb_op {
IPA_USB_RESUME
};
+struct ipa3_usb_status_dbg_info {
+ const char *teth_state;
+ const char *dpl_state;
+ int num_init_prot;
+ const char *inited_prots[IPA_USB_MAX_TETH_PROT_SIZE];
+ const char *teth_connected_prot;
+ const char *dpl_connected_prot;
+ const char *teth_cons_state;
+ const char *dpl_cons_state;
+};
+
static void ipa3_usb_wq_notify_remote_wakeup(struct work_struct *work);
+static void ipa3_usb_wq_dpl_notify_remote_wakeup(struct work_struct *work);
static void ipa3_usb_wq_notify_suspend_completed(struct work_struct *work);
+static void ipa3_usb_wq_dpl_notify_suspend_completed(struct work_struct *work);
static DECLARE_WORK(ipa3_usb_notify_remote_wakeup_work,
ipa3_usb_wq_notify_remote_wakeup);
+static DECLARE_WORK(ipa3_usb_dpl_notify_remote_wakeup_work,
+ ipa3_usb_wq_dpl_notify_remote_wakeup);
static DECLARE_WORK(ipa3_usb_notify_suspend_completed_work,
ipa3_usb_wq_notify_suspend_completed);
+static DECLARE_WORK(ipa3_usb_dpl_notify_suspend_completed_work,
+ ipa3_usb_wq_dpl_notify_suspend_completed);
struct ipa3_usb_context *ipa3_usb_ctx;
+static char *ipa3_usb_op_to_string(enum ipa3_usb_op op)
+{
+ switch (op) {
+ case IPA_USB_INIT_TETH_PROT:
+ return "IPA_USB_INIT_TETH_PROT";
+ case IPA_USB_REQUEST_CHANNEL:
+ return "IPA_USB_REQUEST_CHANNEL";
+ case IPA_USB_CONNECT:
+ return "IPA_USB_CONNECT";
+ case IPA_USB_DISCONNECT:
+ return "IPA_USB_DISCONNECT";
+ case IPA_USB_RELEASE_CHANNEL:
+ return "IPA_USB_RELEASE_CHANNEL";
+ case IPA_USB_DEINIT_TETH_PROT:
+ return "IPA_USB_DEINIT_TETH_PROT";
+ case IPA_USB_SUSPEND:
+ return "IPA_USB_SUSPEND";
+ case IPA_USB_RESUME:
+ return "IPA_USB_RESUME";
+ }
+
+ return "UNSUPPORTED";
+}
+
static char *ipa3_usb_state_to_string(enum ipa3_usb_state state)
{
switch (state) {
@@ -156,240 +274,198 @@ static char *ipa3_usb_state_to_string(enum ipa3_usb_state state)
return "IPA_USB_SUSPENDED";
case IPA_USB_RESUME_IN_PROGRESS:
return "IPA_USB_RESUME_IN_PROGRESS";
- default:
- return "UNSUPPORTED";
}
- return NULL;
+ return "UNSUPPORTED";
+}
+
+static char *ipa3_usb_notify_event_to_string(enum ipa_usb_notify_event event)
+{
+ switch (event) {
+ case IPA_USB_DEVICE_READY:
+ return "IPA_USB_DEVICE_READY";
+ case IPA_USB_REMOTE_WAKEUP:
+ return "IPA_USB_REMOTE_WAKEUP";
+ case IPA_USB_SUSPEND_COMPLETED:
+ return "IPA_USB_SUSPEND_COMPLETED";
+ }
+
+ return "UNSUPPORTED";
}
-static bool ipa3_usb_set_state(enum ipa3_usb_state new_state)
+static bool ipa3_usb_set_state(enum ipa3_usb_state new_state, bool err_permit,
+ enum ipa3_usb_transport_type ttype)
{
unsigned long flags;
int state_legal = false;
+ enum ipa3_usb_state state;
+ struct ipa3_usb_rm_context *rm_ctx;
spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
+ state = ipa3_usb_ctx->ttype_ctx[ttype].state;
switch (new_state) {
case IPA_USB_INVALID:
- if (ipa3_usb_ctx->state == IPA_USB_INITIALIZED)
+ if (state == IPA_USB_INITIALIZED)
state_legal = true;
break;
case IPA_USB_INITIALIZED:
- if (ipa3_usb_ctx->state == IPA_USB_STOPPED ||
- ipa3_usb_ctx->state == IPA_USB_INVALID ||
- ipa3_usb_ctx->state == IPA_USB_INITIALIZED)
+ if (state == IPA_USB_STOPPED || state == IPA_USB_INVALID ||
+ ((!IPA3_USB_IS_TTYPE_DPL(ttype)) &&
+ (state == IPA_USB_INITIALIZED)))
state_legal = true;
break;
case IPA_USB_CONNECTED:
- if (ipa3_usb_ctx->state == IPA_USB_INITIALIZED ||
- ipa3_usb_ctx->state == IPA_USB_STOPPED ||
- ipa3_usb_ctx->state == IPA_USB_RESUME_IN_PROGRESS ||
- ipa3_usb_ctx->state == IPA_USB_SUSPEND_REQUESTED)
+ if (state == IPA_USB_INITIALIZED ||
+ state == IPA_USB_STOPPED ||
+ state == IPA_USB_RESUME_IN_PROGRESS ||
+ /*
+ * In case of failure during suspend request
+ * handling, state is reverted to connected.
+ */
+ (err_permit && state == IPA_USB_SUSPEND_REQUESTED) ||
+ /*
+ * In case of failure during suspend completing
+ * handling, state is reverted to connected.
+ */
+ (err_permit && state == IPA_USB_SUSPEND_IN_PROGRESS))
state_legal = true;
- if (ipa3_usb_ctx->diag_state == IPA_USB_SUSPEND_REQUESTED ||
- (ipa3_usb_ctx->diag_state ==
- IPA_USB_SUSPEND_IN_PROGRESS) ||
- ipa3_usb_ctx->diag_state == IPA_USB_SUSPENDED ||
- (ipa3_usb_ctx->diag_state ==
- IPA_USB_RESUME_IN_PROGRESS)) {
- state_legal = false;
- IPA_USB_ERR("Cannot connect while DPL in Suspend\n");
- }
-
- if (state_legal) {
- if ((ipa3_usb_ctx->rm_ctx.cons_state ==
- IPA_USB_CONS_GRANTED) ||
- ipa3_usb_ctx->rm_ctx.cons_requested_released) {
- ipa3_usb_ctx->rm_ctx.cons_requested = false;
- ipa3_usb_ctx->rm_ctx.cons_requested_released =
- false;
- }
- /* Notify RM that consumer is granted */
- if (ipa3_usb_ctx->rm_ctx.cons_requested) {
- ipa3_rm_notify_completion(
- IPA_RM_RESOURCE_GRANTED,
- ipa3_usb_ctx->rm_ctx.cons_params.name);
- ipa3_usb_ctx->rm_ctx.cons_state =
- IPA_USB_CONS_GRANTED;
- ipa3_usb_ctx->rm_ctx.cons_requested = false;
- }
- }
break;
case IPA_USB_STOPPED:
- if (ipa3_usb_ctx->state == IPA_USB_SUSPEND_IN_PROGRESS ||
- ipa3_usb_ctx->state == IPA_USB_CONNECTED ||
- ipa3_usb_ctx->state == IPA_USB_SUSPENDED)
+ if (state == IPA_USB_SUSPEND_IN_PROGRESS ||
+ state == IPA_USB_CONNECTED ||
+ state == IPA_USB_SUSPENDED)
state_legal = true;
break;
case IPA_USB_SUSPEND_REQUESTED:
- if (ipa3_usb_ctx->state == IPA_USB_CONNECTED)
+ if (state == IPA_USB_CONNECTED)
state_legal = true;
break;
case IPA_USB_SUSPEND_IN_PROGRESS:
- if (ipa3_usb_ctx->state == IPA_USB_SUSPEND_REQUESTED)
+ if (state == IPA_USB_SUSPEND_REQUESTED ||
+ /*
+ * In case of failure during resume, state is reverted
+ * to original, which could be suspend_in_progress.
+ * Allow it.
+ */
+ (err_permit && state == IPA_USB_RESUME_IN_PROGRESS))
state_legal = true;
break;
case IPA_USB_SUSPENDED:
- if (ipa3_usb_ctx->state == IPA_USB_SUSPEND_REQUESTED ||
- ipa3_usb_ctx->state == IPA_USB_SUSPEND_IN_PROGRESS)
+ if (state == IPA_USB_SUSPEND_REQUESTED ||
+ state == IPA_USB_SUSPEND_IN_PROGRESS ||
+ /*
+ * In case of failure during resume, state is reverted
+ * to original, which could be suspended. Allow it
+ */
+ (err_permit && state == IPA_USB_RESUME_IN_PROGRESS))
state_legal = true;
break;
case IPA_USB_RESUME_IN_PROGRESS:
- if (ipa3_usb_ctx->state == IPA_USB_SUSPEND_IN_PROGRESS ||
- ipa3_usb_ctx->state == IPA_USB_SUSPENDED)
+ if (state == IPA_USB_SUSPEND_IN_PROGRESS ||
+ state == IPA_USB_SUSPENDED)
state_legal = true;
break;
default:
state_legal = false;
break;
+
}
if (state_legal) {
- if (ipa3_usb_ctx->state != new_state) {
- IPA_USB_DBG("ipa_usb state changed %s -> %s\n",
- ipa3_usb_state_to_string(ipa3_usb_ctx->state),
+ if (state != new_state) {
+ IPA_USB_DBG("ipa_usb %s state changed %s -> %s\n",
+ IPA3_USB_IS_TTYPE_DPL(ttype) ? "DPL" : "",
+ ipa3_usb_state_to_string(state),
ipa3_usb_state_to_string(new_state));
- ipa3_usb_ctx->state = new_state;
+ ipa3_usb_ctx->ttype_ctx[ttype].state = new_state;
}
} else {
IPA_USB_ERR("invalid state change %s -> %s\n",
- ipa3_usb_state_to_string(ipa3_usb_ctx->state),
+ ipa3_usb_state_to_string(state),
ipa3_usb_state_to_string(new_state));
}
- spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
- return state_legal;
-}
-static bool ipa3_usb_set_diag_state(enum ipa3_usb_state new_state)
-{
- unsigned long flags;
- int state_legal = false;
-
- spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
- IPA_USB_DBG("current diag state = %s\n",
- ipa3_usb_state_to_string(ipa3_usb_ctx->diag_state));
- switch (new_state) {
- case IPA_USB_INVALID:
- if (ipa3_usb_ctx->diag_state == IPA_USB_INITIALIZED)
- state_legal = true;
- break;
- case IPA_USB_INITIALIZED:
- if (ipa3_usb_ctx->diag_state == IPA_USB_INVALID ||
- ipa3_usb_ctx->diag_state == IPA_USB_STOPPED)
- state_legal = true;
- break;
- case IPA_USB_CONNECTED:
- if (ipa3_usb_ctx->diag_state == IPA_USB_INITIALIZED ||
- ipa3_usb_ctx->diag_state == IPA_USB_STOPPED)
- state_legal = true;
- if (ipa3_usb_ctx->state == IPA_USB_SUSPEND_REQUESTED ||
- ipa3_usb_ctx->state == IPA_USB_SUSPEND_IN_PROGRESS ||
- ipa3_usb_ctx->state == IPA_USB_SUSPENDED ||
- ipa3_usb_ctx->state == IPA_USB_RESUME_IN_PROGRESS) {
- state_legal = false;
- IPA_USB_ERR("Cannot connect DPL while in suspend\n");
+ if (state_legal && (new_state == IPA_USB_CONNECTED)) {
+ rm_ctx = &ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx;
+ if ((rm_ctx->cons_state == IPA_USB_CONS_GRANTED) ||
+ rm_ctx->cons_requested_released) {
+ rm_ctx->cons_requested = false;
+ rm_ctx->cons_requested_released =
+ false;
}
- break;
- case IPA_USB_STOPPED:
- if (ipa3_usb_ctx->diag_state == IPA_USB_CONNECTED)
- state_legal = true;
- break;
- default:
- state_legal = false;
- break;
- }
- if (state_legal) {
- if (ipa3_usb_ctx->diag_state != new_state) {
- IPA_USB_DBG("DIAG state changed %s -> %s\n",
- ipa3_usb_state_to_string(
- ipa3_usb_ctx->diag_state),
- ipa3_usb_state_to_string(new_state));
- ipa3_usb_ctx->diag_state = new_state;
+ /* Notify RM that consumer is granted */
+ if (rm_ctx->cons_requested) {
+ ipa3_rm_notify_completion(
+ IPA_RM_RESOURCE_GRANTED,
+ rm_ctx->cons_params.name);
+ rm_ctx->cons_state = IPA_USB_CONS_GRANTED;
+ rm_ctx->cons_requested = false;
}
- } else {
- IPA_USB_ERR("invalid DIAG state change %s -> %s\n",
- ipa3_usb_state_to_string(ipa3_usb_ctx->diag_state),
- ipa3_usb_state_to_string(new_state));
}
+
spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
return state_legal;
}
-static bool ipa3_usb_check_legal_op(enum ipa3_usb_op op, bool is_diag)
+static bool ipa3_usb_check_legal_op(enum ipa3_usb_op op,
+ enum ipa3_usb_transport_type ttype)
{
unsigned long flags;
bool is_legal = false;
+ enum ipa3_usb_state state;
+ bool is_dpl;
if (ipa3_usb_ctx == NULL) {
IPA_USB_ERR("ipa_usb_ctx is not initialized!\n");
return false;
}
+ is_dpl = IPA3_USB_IS_TTYPE_DPL(ttype);
+
spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
+ state = ipa3_usb_ctx->ttype_ctx[ttype].state;
switch (op) {
case IPA_USB_INIT_TETH_PROT:
- if (!is_diag && (ipa3_usb_ctx->state == IPA_USB_INVALID ||
- ipa3_usb_ctx->state == IPA_USB_INITIALIZED))
- is_legal = true;
- if (is_diag && ipa3_usb_ctx->diag_state == IPA_USB_INVALID)
+ if (state == IPA_USB_INVALID ||
+ (!is_dpl && state == IPA_USB_INITIALIZED))
is_legal = true;
break;
case IPA_USB_REQUEST_CHANNEL:
- if (!is_diag && ipa3_usb_ctx->state == IPA_USB_INITIALIZED)
- is_legal = true;
- if (is_diag && ipa3_usb_ctx->diag_state == IPA_USB_INITIALIZED)
+ if (state == IPA_USB_INITIALIZED)
is_legal = true;
break;
case IPA_USB_CONNECT:
- if (!is_diag && (ipa3_usb_ctx->state == IPA_USB_INITIALIZED ||
- ipa3_usb_ctx->state == IPA_USB_STOPPED) &&
- (ipa3_usb_ctx->diag_state !=
- IPA_USB_SUSPEND_REQUESTED) &&
- (ipa3_usb_ctx->diag_state !=
- IPA_USB_SUSPEND_IN_PROGRESS) &&
- (ipa3_usb_ctx->diag_state != IPA_USB_SUSPENDED) &&
- (ipa3_usb_ctx->diag_state !=
- IPA_USB_RESUME_IN_PROGRESS))
- is_legal = true;
- if (is_diag && (ipa3_usb_ctx->diag_state == IPA_USB_INITIALIZED
- || ipa3_usb_ctx->diag_state == IPA_USB_STOPPED) &&
- ipa3_usb_ctx->state != IPA_USB_SUSPEND_REQUESTED &&
- ipa3_usb_ctx->state != IPA_USB_SUSPEND_IN_PROGRESS &&
- ipa3_usb_ctx->state != IPA_USB_SUSPENDED &&
- ipa3_usb_ctx->state != IPA_USB_RESUME_IN_PROGRESS)
+ if (state == IPA_USB_INITIALIZED || state == IPA_USB_STOPPED)
is_legal = true;
break;
case IPA_USB_DISCONNECT:
- if (!is_diag && (ipa3_usb_ctx->state == IPA_USB_CONNECTED ||
- ipa3_usb_ctx->state == IPA_USB_SUSPEND_IN_PROGRESS ||
- ipa3_usb_ctx->state == IPA_USB_SUSPENDED))
- is_legal = true;
- if (is_diag &&
- ipa3_usb_ctx->diag_state == IPA_USB_CONNECTED)
+ if (state == IPA_USB_CONNECTED ||
+ state == IPA_USB_SUSPEND_IN_PROGRESS ||
+ state == IPA_USB_SUSPENDED)
is_legal = true;
break;
case IPA_USB_RELEASE_CHANNEL:
- if (!is_diag && (ipa3_usb_ctx->state == IPA_USB_STOPPED ||
- ipa3_usb_ctx->state == IPA_USB_INITIALIZED))
- is_legal = true;
- if (is_diag &&
- (ipa3_usb_ctx->diag_state == IPA_USB_STOPPED ||
- ipa3_usb_ctx->diag_state == IPA_USB_INITIALIZED))
+ /* when releasing 1st channel state will be changed already */
+ if (state == IPA_USB_STOPPED ||
+ (!is_dpl && state == IPA_USB_INITIALIZED))
is_legal = true;
break;
case IPA_USB_DEINIT_TETH_PROT:
- if (!is_diag && ipa3_usb_ctx->state == IPA_USB_INITIALIZED)
- is_legal = true;
- if (is_diag && ipa3_usb_ctx->diag_state == IPA_USB_INITIALIZED)
+ /*
+ * For data tethering we should allow deinit an inited protocol
+ * always. E.g. rmnet is inited and rndis is connected.
+ * USB can deinit rmnet first and then disconnect rndis
+ * on cable disconnect.
+ */
+ if (!is_dpl || state == IPA_USB_INITIALIZED)
is_legal = true;
break;
case IPA_USB_SUSPEND:
- if (ipa3_usb_ctx->state == IPA_USB_CONNECTED &&
- ipa3_usb_ctx->diag_state != IPA_USB_CONNECTED)
+ if (state == IPA_USB_CONNECTED)
is_legal = true;
break;
case IPA_USB_RESUME:
- if (ipa3_usb_ctx->state == IPA_USB_SUSPENDED ||
- ipa3_usb_ctx->state == IPA_USB_SUSPEND_IN_PROGRESS)
+ if (state == IPA_USB_SUSPENDED ||
+ state == IPA_USB_SUSPEND_IN_PROGRESS)
is_legal = true;
break;
default:
@@ -397,105 +473,104 @@ static bool ipa3_usb_check_legal_op(enum ipa3_usb_op op, bool is_diag)
break;
}
+ if (!is_legal) {
+ IPA_USB_ERR("Illegal %s operation: state=%s operation=%s\n",
+ is_dpl ? "DPL" : "",
+ ipa3_usb_state_to_string(state),
+ ipa3_usb_op_to_string(op));
+ }
+
spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
return is_legal;
}
-int ipa3_usb_init(void)
+static void ipa3_usb_notify_do(enum ipa3_usb_transport_type ttype,
+ enum ipa_usb_notify_event event)
{
- int i;
- unsigned long flags;
-
- IPA_USB_DBG("ipa3_usb_init: entry\n");
- ipa3_usb_ctx = kzalloc(sizeof(struct ipa3_usb_context), GFP_KERNEL);
- if (ipa3_usb_ctx == NULL) {
- IPA_USB_ERR("failed to allocate memory\n");
- return -EFAULT;
- }
-
- memset(ipa3_usb_ctx, 0, sizeof(struct ipa3_usb_context));
-
- for (i = 0; i < IPA_USB_MAX_TETH_PROT_SIZE; i++)
- ipa3_usb_ctx->teth_prot_ctx[i].state =
- IPA_USB_TETH_PROT_INVALID;
- ipa3_usb_ctx->num_init_prot = 0;
- ipa3_usb_ctx->teth_bridge_state = IPA_USB_TETH_PROT_INVALID;
- init_completion(&ipa3_usb_ctx->dev_ready_comp);
-
- ipa3_usb_ctx->rm_ctx.prod_valid = false;
- ipa3_usb_ctx->rm_ctx.cons_valid = false;
- init_completion(&ipa3_usb_ctx->rm_ctx.prod_comp);
- init_completion(&ipa3_usb_ctx->rm_ctx.cons_comp);
-
- ipa3_usb_ctx->qmi_req_id = 0;
- spin_lock_init(&ipa3_usb_ctx->state_lock);
- ipa3_usb_ctx->dl_data_pending = false;
- mutex_init(&ipa3_usb_ctx->general_mutex);
- spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
- ipa3_usb_ctx->state = IPA_USB_INVALID;
- ipa3_usb_ctx->diag_state = IPA_USB_INVALID;
- ipa3_usb_ctx->rm_ctx.cons_state = IPA_USB_CONS_RELEASED;
- ipa3_usb_ctx->user_data = NULL;
- ipa3_usb_ctx->diag_user_data = NULL;
- spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
-
- ipa3_usb_ctx->wq = create_singlethread_workqueue("ipa_usb_wq");
- if (!ipa3_usb_ctx->wq) {
- IPA_USB_ERR("failed to create workqueue\n");
- kfree(ipa3_usb_ctx);
- return -EFAULT;
- }
+ int (*cb)(enum ipa_usb_notify_event, void *user_data);
+ void *user_data;
+ int res;
- IPA_USB_DBG("ipa3_usb_init: exit\n");
+ IPA_USB_DBG_LOW("Trying to notify USB with %s\n",
+ ipa3_usb_notify_event_to_string(event));
- return 0;
-}
+ cb = ipa3_usb_ctx->ttype_ctx[ttype].ipa_usb_notify_cb;
+ user_data = ipa3_usb_ctx->ttype_ctx[ttype].user_data;
-static void ipa3_usb_notify_device_ready(void *user_data)
-{
- if (ipa3_usb_ctx->ipa_usb_notify_cb) {
- ipa3_usb_ctx->ipa_usb_notify_cb(IPA_USB_DEVICE_READY,
- user_data);
- IPA_USB_DBG("invoked device_ready CB\n");
+ if (cb) {
+ res = cb(event, user_data);
+ IPA_USB_DBG_LOW("Notified USB with %s. is_dpl=%d result=%d\n",
+ ipa3_usb_notify_event_to_string(event),
+ IPA3_USB_IS_TTYPE_DPL(ttype), res);
}
}
+/*
+ * This call-back is called from ECM or RNDIS drivers.
+ * Both drivers are data tethering drivers and not DPL
+ */
void ipa3_usb_device_ready_notify_cb(void)
{
- IPA_USB_DBG("ipa3_usb_device_ready_notify_cb: entry\n");
- ipa3_usb_notify_device_ready(ipa3_usb_ctx->user_data);
- IPA_USB_DBG("ipa3_usb_device_ready_notify_cb: exit\n");
+ IPA_USB_DBG_LOW("entry\n");
+ ipa3_usb_notify_do(IPA_USB_TRANSPORT_TETH,
+ IPA_USB_DEVICE_READY);
+ IPA_USB_DBG_LOW("exit\n");
}
-void ipa3_usb_prod_notify_cb(void *user_data, enum ipa_rm_event event,
- unsigned long data)
+static void ipa3_usb_prod_notify_cb_do(enum ipa_rm_event event,
+ enum ipa3_usb_transport_type ttype)
{
- IPA_USB_DBG("ipa3_usb_prod_notify_cb: entry\n");
+ struct ipa3_usb_rm_context *rm_ctx;
+
+ IPA_USB_DBG_LOW("entry\n");
+
+ rm_ctx = &ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx;
+
switch (event) {
case IPA_RM_RESOURCE_GRANTED:
- IPA_USB_DBG(":USB_PROD granted\n");
- complete_all(&ipa3_usb_ctx->rm_ctx.prod_comp);
+ IPA_USB_DBG(":%s granted\n",
+ ipa3_rm_resource_str(rm_ctx->prod_params.name));
+ complete_all(&rm_ctx->prod_comp);
break;
case IPA_RM_RESOURCE_RELEASED:
- IPA_USB_DBG(":USB_PROD released\n");
- complete_all(&ipa3_usb_ctx->rm_ctx.prod_comp);
+ IPA_USB_DBG(":%s released\n",
+ ipa3_rm_resource_str(rm_ctx->prod_params.name));
+ complete_all(&rm_ctx->prod_comp);
break;
}
- IPA_USB_DBG("ipa3_usb_prod_notify_cb: exit\n");
+ IPA_USB_DBG_LOW("exit\n");
+}
+
+static void ipa3_usb_prod_notify_cb(void *user_data, enum ipa_rm_event event,
+ unsigned long data)
+{
+ ipa3_usb_prod_notify_cb_do(event, IPA_USB_TRANSPORT_TETH);
+}
+
+static void ipa3_usb_dpl_dummy_prod_notify_cb(void *user_data,
+ enum ipa_rm_event event, unsigned long data)
+{
+ ipa3_usb_prod_notify_cb_do(event, IPA_USB_TRANSPORT_TETH);
}
static void ipa3_usb_wq_notify_remote_wakeup(struct work_struct *work)
{
- ipa3_usb_ctx->ipa_usb_notify_cb(IPA_USB_REMOTE_WAKEUP,
- ipa3_usb_ctx->user_data);
- IPA_USB_DBG("invoked remote wakeup event\n");
+ ipa3_usb_notify_do(IPA_USB_TRANSPORT_TETH, IPA_USB_REMOTE_WAKEUP);
+}
+
+static void ipa3_usb_wq_dpl_notify_remote_wakeup(struct work_struct *work)
+{
+ ipa3_usb_notify_do(IPA_USB_TRANSPORT_DPL, IPA_USB_REMOTE_WAKEUP);
}
static void ipa3_usb_wq_notify_suspend_completed(struct work_struct *work)
{
- ipa3_usb_ctx->ipa_usb_notify_cb(IPA_USB_SUSPEND_COMPLETED,
- ipa3_usb_ctx->user_data);
- IPA_USB_DBG("invoked suspend completed event\n");
+ ipa3_usb_notify_do(IPA_USB_TRANSPORT_TETH, IPA_USB_SUSPEND_COMPLETED);
+}
+
+static void ipa3_usb_wq_dpl_notify_suspend_completed(struct work_struct *work)
+{
+ ipa3_usb_notify_do(IPA_USB_TRANSPORT_DPL, IPA_USB_SUSPEND_COMPLETED);
}
static void ipa3_usb_wq_finish_suspend_work(struct work_struct *work)
@@ -503,129 +578,181 @@ static void ipa3_usb_wq_finish_suspend_work(struct work_struct *work)
struct finish_suspend_work_context *finish_suspend_work_ctx;
unsigned long flags;
int result = -EFAULT;
+ struct ipa3_usb_transport_type_ctx *tctx;
mutex_lock(&ipa3_usb_ctx->general_mutex);
- IPA_USB_DBG("ipa3_usb_wq_finish_suspend_work: entry\n");
+ IPA_USB_DBG_LOW("entry\n");
finish_suspend_work_ctx = container_of(work,
struct finish_suspend_work_context, work);
+ tctx = &ipa3_usb_ctx->ttype_ctx[finish_suspend_work_ctx->ttype];
spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
- if (ipa3_usb_ctx->state != IPA_USB_SUSPEND_IN_PROGRESS) {
+ if (tctx->state != IPA_USB_SUSPEND_IN_PROGRESS) {
spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
mutex_unlock(&ipa3_usb_ctx->general_mutex);
return;
}
spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
- /* Stop DL channel */
+ /* Stop DL/DPL channel */
result = ipa3_stop_gsi_channel(finish_suspend_work_ctx->dl_clnt_hdl);
if (result) {
- IPAERR("Error stopping DL channel: %d, resuming channel\n",
+ IPAERR("Error stopping DL/DPL channel: %d, resuming channel\n",
result);
ipa3_xdci_resume(finish_suspend_work_ctx->ul_clnt_hdl,
- finish_suspend_work_ctx->dl_clnt_hdl);
- /* Change ipa_usb state back to CONNECTED */
- if (!ipa3_usb_set_state(IPA_USB_CONNECTED))
+ finish_suspend_work_ctx->dl_clnt_hdl,
+ IPA3_USB_IS_TTYPE_DPL(finish_suspend_work_ctx->ttype));
+ /* Change state back to CONNECTED */
+ if (!ipa3_usb_set_state(IPA_USB_CONNECTED, true,
+ finish_suspend_work_ctx->ttype))
IPA_USB_ERR("failed to change state to connected\n");
queue_work(ipa3_usb_ctx->wq,
+ IPA3_USB_IS_TTYPE_DPL(finish_suspend_work_ctx->ttype) ?
+ &ipa3_usb_dpl_notify_remote_wakeup_work :
&ipa3_usb_notify_remote_wakeup_work);
mutex_unlock(&ipa3_usb_ctx->general_mutex);
return;
}
/* Change ipa_usb state to SUSPENDED */
- if (!ipa3_usb_set_state(IPA_USB_SUSPENDED))
+ if (!ipa3_usb_set_state(IPA_USB_SUSPENDED, false,
+ finish_suspend_work_ctx->ttype))
IPA_USB_ERR("failed to change state to suspended\n");
queue_work(ipa3_usb_ctx->wq,
+ IPA3_USB_IS_TTYPE_DPL(finish_suspend_work_ctx->ttype) ?
+ &ipa3_usb_dpl_notify_suspend_completed_work :
&ipa3_usb_notify_suspend_completed_work);
- IPA_USB_DBG("ipa3_usb_wq_finish_suspend_work: exit\n");
+ IPA_USB_DBG_LOW("exit\n");
mutex_unlock(&ipa3_usb_ctx->general_mutex);
}
-int ipa3_usb_cons_request_resource_cb(void)
+static int ipa3_usb_cons_request_resource_cb_do(
+ enum ipa3_usb_transport_type ttype,
+ struct work_struct *remote_wakeup_work)
{
+ struct ipa3_usb_rm_context *rm_ctx;
unsigned long flags;
int result;
- IPA_USB_DBG("ipa3_usb_cons_request_resource_cb: entry\n");
+ IPA_USB_DBG_LOW("entry\n");
+ rm_ctx = &ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx;
spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
- switch (ipa3_usb_ctx->state) {
+ IPA_USB_DBG("state is %s\n",
+ ipa3_usb_state_to_string(
+ ipa3_usb_ctx->ttype_ctx[ttype].state));
+ switch (ipa3_usb_ctx->ttype_ctx[ttype].state) {
case IPA_USB_CONNECTED:
- ipa3_usb_ctx->rm_ctx.cons_state = IPA_USB_CONS_GRANTED;
+ rm_ctx->cons_state = IPA_USB_CONS_GRANTED;
result = 0;
break;
case IPA_USB_SUSPEND_REQUESTED:
- ipa3_usb_ctx->rm_ctx.cons_requested = true;
- if (ipa3_usb_ctx->rm_ctx.cons_state == IPA_USB_CONS_GRANTED)
+ rm_ctx->cons_requested = true;
+ if (rm_ctx->cons_state == IPA_USB_CONS_GRANTED)
result = 0;
else
result = -EINPROGRESS;
break;
case IPA_USB_SUSPEND_IN_PROGRESS:
- ipa3_usb_ctx->rm_ctx.cons_requested = true;
- queue_work(ipa3_usb_ctx->wq,
- &ipa3_usb_notify_remote_wakeup_work);
+ /*
+ * This case happens due to suspend interrupt.
+ * CONS is granted
+ */
+ if (!rm_ctx->cons_requested) {
+ rm_ctx->cons_requested = true;
+ queue_work(ipa3_usb_ctx->wq, remote_wakeup_work);
+ }
result = 0;
break;
case IPA_USB_SUSPENDED:
- ipa3_usb_ctx->rm_ctx.cons_requested = true;
- queue_work(ipa3_usb_ctx->wq,
- &ipa3_usb_notify_remote_wakeup_work);
+ if (!rm_ctx->cons_requested) {
+ rm_ctx->cons_requested = true;
+ queue_work(ipa3_usb_ctx->wq, remote_wakeup_work);
+ }
result = -EINPROGRESS;
break;
default:
- ipa3_usb_ctx->rm_ctx.cons_requested = true;
+ rm_ctx->cons_requested = true;
result = -EINPROGRESS;
break;
}
spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
- IPA_USB_DBG("ipa3_usb_cons_request_resource_cb: exit with %d\n",
- result);
+ IPA_USB_DBG_LOW("exit with %d\n", result);
return result;
}
-int ipa3_usb_cons_release_resource_cb(void)
+static int ipa3_usb_cons_request_resource_cb(void)
+{
+ return ipa3_usb_cons_request_resource_cb_do(IPA_USB_TRANSPORT_TETH,
+ &ipa3_usb_notify_remote_wakeup_work);
+}
+
+static int ipa3_usb_dpl_cons_request_resource_cb(void)
+{
+ return ipa3_usb_cons_request_resource_cb_do(IPA_USB_TRANSPORT_DPL,
+ &ipa3_usb_dpl_notify_remote_wakeup_work);
+}
+
+static int ipa3_usb_cons_release_resource_cb_do(
+ enum ipa3_usb_transport_type ttype)
{
unsigned long flags;
+ struct ipa3_usb_rm_context *rm_ctx;
- IPA_USB_DBG("ipa3_usb_cons_release_resource_cb: entry\n");
+ IPA_USB_DBG_LOW("entry\n");
+ rm_ctx = &ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx;
spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
- switch (ipa3_usb_ctx->state) {
+ IPA_USB_DBG("state is %s\n",
+ ipa3_usb_state_to_string(
+ ipa3_usb_ctx->ttype_ctx[ttype].state));
+ switch (ipa3_usb_ctx->ttype_ctx[ttype].state) {
case IPA_USB_SUSPEND_IN_PROGRESS:
- if (ipa3_usb_ctx->rm_ctx.cons_requested)
- ipa3_usb_ctx->rm_ctx.cons_requested_released = true;
+ /* Proceed with the suspend if no DL/DPL data */
+ if (rm_ctx->cons_requested)
+ rm_ctx->cons_requested_released = true;
else {
queue_work(ipa3_usb_ctx->wq,
- &ipa3_usb_ctx->finish_suspend_work.work);
+ &ipa3_usb_ctx->ttype_ctx[ttype].
+ finish_suspend_work.work);
}
break;
case IPA_USB_SUSPEND_REQUESTED:
- if (ipa3_usb_ctx->rm_ctx.cons_requested)
- ipa3_usb_ctx->rm_ctx.cons_requested_released = true;
+ if (rm_ctx->cons_requested)
+ rm_ctx->cons_requested_released = true;
break;
case IPA_USB_STOPPED:
case IPA_USB_RESUME_IN_PROGRESS:
- if (ipa3_usb_ctx->rm_ctx.cons_requested)
- ipa3_usb_ctx->rm_ctx.cons_requested = false;
+ if (rm_ctx->cons_requested)
+ rm_ctx->cons_requested = false;
break;
case IPA_USB_CONNECTED:
case IPA_USB_INITIALIZED:
break;
default:
IPA_USB_ERR("received cons_release_cb in bad state: %s!\n",
- ipa3_usb_state_to_string(ipa3_usb_ctx->state));
+ ipa3_usb_state_to_string(
+ ipa3_usb_ctx->ttype_ctx[ttype].state));
WARN_ON(1);
break;
}
- ipa3_usb_ctx->rm_ctx.cons_state = IPA_USB_CONS_RELEASED;
+ rm_ctx->cons_state = IPA_USB_CONS_RELEASED;
spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
- IPA_USB_DBG("ipa3_usb_cons_release_resource_cb: exit\n");
+ IPA_USB_DBG_LOW("exit\n");
return 0;
}
+static int ipa3_usb_cons_release_resource_cb(void)
+{
+ return ipa3_usb_cons_release_resource_cb_do(IPA_USB_TRANSPORT_TETH);
+}
+
+static int ipa3_usb_dpl_cons_release_resource_cb(void)
+{
+ return ipa3_usb_cons_release_resource_cb_do(IPA_USB_TRANSPORT_DPL);
+}
+
static char *ipa3_usb_teth_prot_to_string(enum ipa_usb_teth_prot teth_prot)
{
switch (teth_prot) {
@@ -637,12 +764,12 @@ static char *ipa3_usb_teth_prot_to_string(enum ipa_usb_teth_prot teth_prot)
case IPA_USB_MBIM:
return "teth_bridge";
case IPA_USB_DIAG:
- return "diag";
+ return "dpl";
default:
- return "unsupported";
+ break;
}
- return NULL;
+ return "unsupported";
}
static char *ipa3_usb_teth_bridge_prot_to_string(
@@ -654,39 +781,106 @@ static char *ipa3_usb_teth_bridge_prot_to_string(
case IPA_USB_MBIM:
return "mbim";
default:
- return "unsupported";
+ break;
}
- return NULL;
+ return "unsupported";
}
static int ipa3_usb_init_teth_bridge(void)
{
int result;
- if (ipa3_usb_ctx->teth_bridge_state != IPA_USB_TETH_PROT_INVALID)
- return 0;
-
result = teth_bridge_init(&ipa3_usb_ctx->teth_bridge_params);
if (result) {
IPA_USB_ERR("Failed to initialize teth_bridge.\n");
return result;
}
- ipa3_usb_ctx->teth_bridge_state = IPA_USB_TETH_PROT_INITIALIZED;
return 0;
}
-int ipa3_usb_init_teth_prot(enum ipa_usb_teth_prot teth_prot,
+static int ipa3_usb_create_rm_resources(enum ipa3_usb_transport_type ttype)
+{
+ struct ipa3_usb_rm_context *rm_ctx;
+ int result = -EFAULT;
+ bool created = false;
+
+ rm_ctx = &ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx;
+
+ /* create PROD */
+ if (!rm_ctx->prod_valid) {
+ rm_ctx->prod_params.name = IPA3_USB_IS_TTYPE_DPL(ttype) ?
+ IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD :
+ IPA_RM_RESOURCE_USB_PROD;
+ rm_ctx->prod_params.floor_voltage = IPA_VOLTAGE_SVS;
+ rm_ctx->prod_params.reg_params.user_data = NULL;
+ rm_ctx->prod_params.reg_params.notify_cb =
+ IPA3_USB_IS_TTYPE_DPL(ttype) ?
+ ipa3_usb_dpl_dummy_prod_notify_cb :
+ ipa3_usb_prod_notify_cb;
+ rm_ctx->prod_params.request_resource = NULL;
+ rm_ctx->prod_params.release_resource = NULL;
+ result = ipa_rm_create_resource(&rm_ctx->prod_params);
+ if (result) {
+ IPA_USB_ERR("Failed to create %s RM resource\n",
+ ipa3_rm_resource_str(rm_ctx->prod_params.name));
+ return result;
+ }
+ rm_ctx->prod_valid = true;
+ created = true;
+ IPA_USB_DBG("Created %s RM resource\n",
+ ipa3_rm_resource_str(rm_ctx->prod_params.name));
+ }
+
+ /* Create CONS */
+ if (!rm_ctx->cons_valid) {
+ rm_ctx->cons_params.name = IPA3_USB_IS_TTYPE_DPL(ttype) ?
+ IPA_RM_RESOURCE_USB_DPL_CONS :
+ IPA_RM_RESOURCE_USB_CONS;
+ rm_ctx->cons_params.floor_voltage = IPA_VOLTAGE_SVS;
+ rm_ctx->cons_params.reg_params.user_data = NULL;
+ rm_ctx->cons_params.reg_params.notify_cb = NULL;
+ rm_ctx->cons_params.request_resource =
+ IPA3_USB_IS_TTYPE_DPL(ttype) ?
+ ipa3_usb_dpl_cons_request_resource_cb :
+ ipa3_usb_cons_request_resource_cb;
+ rm_ctx->cons_params.release_resource =
+ IPA3_USB_IS_TTYPE_DPL(ttype) ?
+ ipa3_usb_dpl_cons_release_resource_cb :
+ ipa3_usb_cons_release_resource_cb;
+ result = ipa_rm_create_resource(&rm_ctx->cons_params);
+ if (result) {
+ IPA_USB_ERR("Failed to create %s RM resource\n",
+ ipa3_rm_resource_str(rm_ctx->cons_params.name));
+ goto create_cons_rsc_fail;
+ }
+ rm_ctx->cons_valid = true;
+ IPA_USB_DBG("Created %s RM resource\n",
+ ipa3_rm_resource_str(rm_ctx->cons_params.name));
+ }
+
+ return 0;
+
+create_cons_rsc_fail:
+ if (created) {
+ rm_ctx->prod_valid = false;
+ ipa_rm_delete_resource(rm_ctx->prod_params.name);
+ }
+ return result;
+}
+
+int ipa_usb_init_teth_prot(enum ipa_usb_teth_prot teth_prot,
struct ipa_usb_teth_params *teth_params,
int (*ipa_usb_notify_cb)(enum ipa_usb_notify_event,
void *),
void *user_data)
{
int result = -EFAULT;
+ enum ipa3_usb_transport_type ttype;
mutex_lock(&ipa3_usb_ctx->general_mutex);
- IPA_USB_DBG("entry\n");
+ IPA_USB_DBG_LOW("entry\n");
if (teth_prot > IPA_USB_MAX_TETH_PROT_SIZE ||
((teth_prot == IPA_USB_RNDIS || teth_prot == IPA_USB_ECM) &&
teth_params == NULL) || ipa_usb_notify_cb == NULL ||
@@ -696,60 +890,35 @@ int ipa3_usb_init_teth_prot(enum ipa_usb_teth_prot teth_prot,
goto bad_params;
}
- if (!ipa3_usb_check_legal_op(IPA_USB_INIT_TETH_PROT,
- (teth_prot == IPA_USB_DIAG))) {
+ ttype = IPA3_USB_GET_TTYPE(teth_prot);
+
+ if (!ipa3_usb_check_legal_op(IPA_USB_INIT_TETH_PROT, ttype)) {
IPA_USB_ERR("Illegal operation.\n");
result = -EPERM;
goto bad_params;
}
/* Create IPA RM USB resources */
- if (!ipa3_usb_ctx->rm_ctx.prod_valid) {
- ipa3_usb_ctx->rm_ctx.prod_params.name =
- IPA_RM_RESOURCE_USB_PROD;
- ipa3_usb_ctx->rm_ctx.prod_params.floor_voltage =
- IPA_VOLTAGE_SVS;
- ipa3_usb_ctx->rm_ctx.prod_params.reg_params.user_data = NULL;
- ipa3_usb_ctx->rm_ctx.prod_params.reg_params.notify_cb =
- ipa3_usb_prod_notify_cb;
- ipa3_usb_ctx->rm_ctx.prod_params.request_resource = NULL;
- ipa3_usb_ctx->rm_ctx.prod_params.release_resource = NULL;
- result = ipa_rm_create_resource(
- &ipa3_usb_ctx->rm_ctx.prod_params);
- if (result) {
- IPA_USB_ERR("Failed to create USB_PROD RM resource.\n");
- goto bad_params;
- }
- ipa3_usb_ctx->rm_ctx.prod_valid = true;
- IPA_USB_DBG("Created USB_PROD RM resource.\n");
+ result = ipa3_usb_create_rm_resources(ttype);
+ if (result) {
+ IPA_USB_ERR("Failed creating IPA RM USB resources\n");
+ goto bad_params;
}
- if (!ipa3_usb_ctx->rm_ctx.cons_valid) {
- ipa3_usb_ctx->rm_ctx.cons_params.name =
- IPA_RM_RESOURCE_USB_CONS;
- ipa3_usb_ctx->rm_ctx.cons_params.floor_voltage =
- IPA_VOLTAGE_SVS;
- ipa3_usb_ctx->rm_ctx.cons_params.reg_params.user_data = NULL;
- ipa3_usb_ctx->rm_ctx.cons_params.reg_params.notify_cb = NULL;
- ipa3_usb_ctx->rm_ctx.cons_params.request_resource =
- ipa3_usb_cons_request_resource_cb;
- ipa3_usb_ctx->rm_ctx.cons_params.release_resource =
- ipa3_usb_cons_release_resource_cb;
- result = ipa_rm_create_resource(
- &ipa3_usb_ctx->rm_ctx.cons_params);
- if (result) {
- IPA_USB_ERR("Failed to create USB_CONS RM resource.\n");
- goto create_cons_rsc_fail;
+ if (!ipa3_usb_ctx->ttype_ctx[ttype].ipa_usb_notify_cb) {
+ ipa3_usb_ctx->ttype_ctx[ttype].ipa_usb_notify_cb =
+ ipa_usb_notify_cb;
+ } else if (!IPA3_USB_IS_TTYPE_DPL(ttype)) {
+ if (ipa3_usb_ctx->ttype_ctx[ttype].ipa_usb_notify_cb !=
+ ipa_usb_notify_cb) {
+ IPA_USB_ERR("Got different notify_cb\n");
+ result = -EINVAL;
+ goto bad_params;
}
- ipa3_usb_ctx->rm_ctx.cons_valid = true;
- IPA_USB_DBG("Created USB_CONS RM resource.\n");
- }
- if (!ipa3_usb_ctx->ipa_usb_notify_cb) {
- ipa3_usb_ctx->ipa_usb_notify_cb = ipa_usb_notify_cb;
- } else if (ipa3_usb_ctx->ipa_usb_notify_cb != ipa_usb_notify_cb) {
- IPA_USB_ERR("usb_notify_cb different from the current one\n");
+ } else {
+ IPA_USB_ERR("Already has dpl_notify_cb\n");
result = -EINVAL;
- goto init_rndis_ipa_fail;
+ goto bad_params;
}
/* Initialize tethering protocol */
@@ -781,10 +950,10 @@ int ipa3_usb_init_teth_prot(enum ipa_usb_teth_prot teth_prot,
teth_prot_ctx[teth_prot].
teth_prot_params.rndis);
if (result) {
- IPA_USB_ERR("Failed to initialize %s.\n",
+ IPA_USB_ERR("Failed to initialize %s\n",
ipa3_usb_teth_prot_to_string(
teth_prot));
- goto init_rndis_ipa_fail;
+ goto teth_prot_init_fail;
}
} else {
ipa3_usb_ctx->teth_prot_ctx[teth_prot].
@@ -802,10 +971,10 @@ int ipa3_usb_init_teth_prot(enum ipa_usb_teth_prot teth_prot,
result = ecm_ipa_init(&ipa3_usb_ctx->
teth_prot_ctx[teth_prot].teth_prot_params.ecm);
if (result) {
- IPA_USB_ERR("Failed to initialize %s.\n",
+ IPA_USB_ERR("Failed to initialize %s\n",
ipa3_usb_teth_prot_to_string(
teth_prot));
- goto init_rndis_ipa_fail;
+ goto teth_prot_init_fail;
}
}
ipa3_usb_ctx->teth_prot_ctx[teth_prot].state =
@@ -816,7 +985,6 @@ int ipa3_usb_init_teth_prot(enum ipa_usb_teth_prot teth_prot,
break;
case IPA_USB_RMNET:
case IPA_USB_MBIM:
- case IPA_USB_DIAG:
if (ipa3_usb_ctx->teth_prot_ctx[teth_prot].state !=
IPA_USB_TETH_PROT_INVALID) {
IPA_USB_DBG("%s already initialized\n",
@@ -827,83 +995,92 @@ int ipa3_usb_init_teth_prot(enum ipa_usb_teth_prot teth_prot,
ipa3_usb_ctx->teth_prot_ctx[teth_prot].user_data = user_data;
result = ipa3_usb_init_teth_bridge();
if (result)
- goto init_rndis_ipa_fail;
+ goto teth_prot_init_fail;
ipa3_usb_ctx->teth_prot_ctx[teth_prot].state =
IPA_USB_TETH_PROT_INITIALIZED;
ipa3_usb_ctx->num_init_prot++;
IPA_USB_DBG("initialized %s %s\n",
ipa3_usb_teth_prot_to_string(teth_prot),
- (teth_prot == IPA_USB_DIAG) ? "" :
ipa3_usb_teth_bridge_prot_to_string(teth_prot));
break;
+ case IPA_USB_DIAG:
+ if (ipa3_usb_ctx->teth_prot_ctx[teth_prot].state !=
+ IPA_USB_TETH_PROT_INVALID) {
+ IPA_USB_DBG("DPL already initialized\n");
+ result = -EPERM;
+ goto bad_params;
+ }
+ ipa3_usb_ctx->teth_prot_ctx[teth_prot].user_data = user_data;
+ ipa3_usb_ctx->teth_prot_ctx[teth_prot].state =
+ IPA_USB_TETH_PROT_INITIALIZED;
+ IPA_USB_DBG("initialized DPL\n");
+ break;
default:
IPA_USB_ERR("unexpected tethering protocol\n");
result = -EINVAL;
goto bad_params;
}
- if (teth_prot != IPA_USB_DIAG) {
- if (!ipa3_usb_set_state(IPA_USB_INITIALIZED))
- IPA_USB_ERR("failed to change state to initialized\n");
- } else {
- if (!ipa3_usb_set_diag_state(IPA_USB_INITIALIZED))
- IPA_USB_ERR("failed to change diag state to init\n");
- }
+ if (!ipa3_usb_set_state(IPA_USB_INITIALIZED, false, ttype))
+ IPA_USB_ERR("failed to change state to initialized\n");
- IPA_USB_DBG("exit\n");
+ IPA_USB_DBG_LOW("exit\n");
mutex_unlock(&ipa3_usb_ctx->general_mutex);
return 0;
-init_rndis_ipa_fail:
- if (ipa3_usb_ctx->num_init_prot == 0) {
- ipa3_usb_ctx->rm_ctx.cons_valid = false;
- ipa_rm_delete_resource(ipa3_usb_ctx->rm_ctx.cons_params.name);
- }
-create_cons_rsc_fail:
- if (ipa3_usb_ctx->num_init_prot == 0) {
- ipa3_usb_ctx->rm_ctx.prod_valid = false;
- ipa_rm_delete_resource(ipa3_usb_ctx->rm_ctx.prod_params.name);
+teth_prot_init_fail:
+ if ((IPA3_USB_IS_TTYPE_DPL(ttype))
+ || (ipa3_usb_ctx->num_init_prot == 0)) {
+ ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.prod_valid = false;
+ ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_valid = false;
+ ipa_rm_delete_resource(
+ ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.prod_params.name);
+ ipa_rm_delete_resource(
+ ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_params.name);
}
bad_params:
mutex_unlock(&ipa3_usb_ctx->general_mutex);
return result;
}
+EXPORT_SYMBOL(ipa_usb_init_teth_prot);
void ipa3_usb_gsi_evt_err_cb(struct gsi_evt_err_notify *notify)
{
- IPA_USB_DBG("ipa3_usb_gsi_evt_err_cb: entry\n");
+ IPA_USB_DBG_LOW("entry\n");
if (!notify)
return;
IPA_USB_ERR("Received event error %d, description: %d\n",
notify->evt_id, notify->err_desc);
- IPA_USB_DBG("ipa3_usb_gsi_evt_err_cb: exit\n");
+ IPA_USB_DBG_LOW("exit\n");
}
void ipa3_usb_gsi_chan_err_cb(struct gsi_chan_err_notify *notify)
{
- IPA_USB_DBG("ipa3_usb_gsi_chan_err_cb: entry\n");
+ IPA_USB_DBG_LOW("entry\n");
if (!notify)
return;
IPA_USB_ERR("Received channel error %d, description: %d\n",
notify->evt_id, notify->err_desc);
- IPA_USB_DBG("ipa3_usb_gsi_chan_err_cb: exit\n");
+ IPA_USB_DBG_LOW("exit\n");
}
static bool ipa3_usb_check_chan_params(struct ipa_usb_xdci_chan_params *params)
{
- IPA_USB_DBG("gevntcount_low_addr = %x\n", params->gevntcount_low_addr);
- IPA_USB_DBG("gevntcount_hi_addr = %x\n", params->gevntcount_hi_addr);
- IPA_USB_DBG("dir = %d\n", params->dir);
- IPA_USB_DBG("xfer_ring_len = %d\n", params->xfer_ring_len);
- IPA_USB_DBG("xfer_ring_base_addr = %llx\n",
+ IPA_USB_DBG_LOW("gevntcount_low_addr = %x\n",
+ params->gevntcount_low_addr);
+ IPA_USB_DBG_LOW("gevntcount_hi_addr = %x\n",
+ params->gevntcount_hi_addr);
+ IPA_USB_DBG_LOW("dir = %d\n", params->dir);
+ IPA_USB_DBG_LOW("xfer_ring_len = %d\n", params->xfer_ring_len);
+ IPA_USB_DBG_LOW("xfer_ring_base_addr = %llx\n",
params->xfer_ring_base_addr);
- IPA_USB_DBG("last_trb_addr = %x\n",
+ IPA_USB_DBG_LOW("last_trb_addr = %x\n",
params->xfer_scratch.last_trb_addr);
- IPA_USB_DBG("const_buffer_size = %d\n",
+ IPA_USB_DBG_LOW("const_buffer_size = %d\n",
params->xfer_scratch.const_buffer_size);
- IPA_USB_DBG("depcmd_low_addr = %x\n",
+ IPA_USB_DBG_LOW("depcmd_low_addr = %x\n",
params->xfer_scratch.depcmd_low_addr);
- IPA_USB_DBG("depcmd_hi_addr = %x\n",
+ IPA_USB_DBG_LOW("depcmd_hi_addr = %x\n",
params->xfer_scratch.depcmd_hi_addr);
if (params->client >= IPA_CLIENT_MAX ||
@@ -911,14 +1088,17 @@ static bool ipa3_usb_check_chan_params(struct ipa_usb_xdci_chan_params *params)
params->xfer_ring_len % GSI_CHAN_RE_SIZE_16B ||
params->xfer_scratch.const_buffer_size < 1 ||
params->xfer_scratch.const_buffer_size > 31) {
+ IPA_USB_ERR("Invalid params\n");
return false;
}
switch (params->teth_prot) {
+ case IPA_USB_DIAG:
+ if (!IPA_CLIENT_IS_CONS(params->client)) {
+ IPA_USB_ERR("DPL supports only DL channel\n");
+ return false;
+ }
case IPA_USB_RNDIS:
case IPA_USB_ECM:
- case IPA_USB_RMNET:
- case IPA_USB_MBIM:
- case IPA_USB_DIAG:
if (ipa3_usb_ctx->teth_prot_ctx[params->teth_prot].state ==
IPA_USB_TETH_PROT_INVALID) {
IPA_USB_ERR("%s is not initialized\n",
@@ -926,14 +1106,21 @@ static bool ipa3_usb_check_chan_params(struct ipa_usb_xdci_chan_params *params)
params->teth_prot));
return false;
}
- if ((params->teth_prot == IPA_USB_DIAG) &&
- !IPA_CLIENT_IS_CONS(params->client)) {
- IPA_USB_ERR("DIAG supports only DL channel\n");
+ break;
+ case IPA_USB_RMNET:
+ case IPA_USB_MBIM:
+ if (ipa3_usb_ctx->teth_prot_ctx[params->teth_prot].state ==
+ IPA_USB_TETH_PROT_INVALID) {
+ IPA_USB_ERR("%s is not initialized\n",
+ ipa3_usb_teth_bridge_prot_to_string(
+ params->teth_prot));
return false;
}
break;
default:
- break;
+ IPA_USB_ERR("Unknown tethering protocol (%d)\n",
+ params->teth_prot);
+ return false;
}
return true;
}
@@ -944,17 +1131,19 @@ static int ipa3_usb_request_xdci_channel(
{
int result = -EFAULT;
struct ipa_request_gsi_channel_params chan_params;
+ enum ipa3_usb_transport_type ttype;
- IPA_USB_DBG("entry\n");
+ IPA_USB_DBG_LOW("entry\n");
if (params == NULL || out_params == NULL ||
!ipa3_usb_check_chan_params(params)) {
- IPA_USB_ERR("bad parameters.\n");
+ IPA_USB_ERR("bad parameters\n");
return -EINVAL;
}
- if (!ipa3_usb_check_legal_op(IPA_USB_REQUEST_CHANNEL,
- (params->teth_prot == IPA_USB_DIAG))) {
- IPA_USB_ERR("Illegal operation.\n");
+ ttype = IPA3_USB_GET_TTYPE(params->teth_prot);
+
+ if (!ipa3_usb_check_legal_op(IPA_USB_REQUEST_CHANNEL, ttype)) {
+ IPA_USB_ERR("Illegal operation\n");
return -EPERM;
}
@@ -1067,51 +1256,26 @@ static int ipa3_usb_request_xdci_channel(
return result;
}
- IPA_USB_DBG("exit\n");
- return 0;
-}
-
-static int ipa3_usb_release_diag_channel(u32 clnt_hdl)
-{
- int result = 0;
-
- /* Release DIAG channel */
- result = ipa3_release_gsi_channel(clnt_hdl);
- if (result) {
- IPA_USB_ERR("failed to release DIAG channel.\n");
- return result;
- }
-
- /* Change ipa_usb_diag state to INITIALIZED */
- if (!ipa3_usb_set_diag_state(IPA_USB_INITIALIZED))
- IPA_USB_ERR("failed to change DIAG state to initialized\n");
-
+ IPA_USB_DBG_LOW("exit\n");
return 0;
}
static int ipa3_usb_release_xdci_channel(u32 clnt_hdl,
- enum ipa_usb_teth_prot teth_prot)
+ enum ipa3_usb_transport_type ttype)
{
int result = 0;
- IPA_USB_DBG("ipa3_usb_release_xdci_channel: entry\n");
- if (teth_prot > IPA_USB_MAX_TETH_PROT_SIZE) {
+ IPA_USB_DBG_LOW("entry\n");
+ if (ttype > IPA_USB_TRANSPORT_MAX) {
IPA_USB_ERR("bad parameter.\n");
return -EINVAL;
}
- if (!ipa3_usb_check_legal_op(IPA_USB_RELEASE_CHANNEL,
- (teth_prot == IPA_USB_DIAG))) {
+ if (!ipa3_usb_check_legal_op(IPA_USB_RELEASE_CHANNEL, ttype)) {
IPA_USB_ERR("Illegal operation.\n");
return -EPERM;
}
- if (teth_prot == IPA_USB_DIAG) {
- result = ipa3_usb_release_diag_channel(clnt_hdl);
- IPA_USB_DBG("ipa3_usb_release_xdci_channel: exit\n");
- return result;
- }
-
/* Release channel */
result = ipa3_release_gsi_channel(clnt_hdl);
if (result) {
@@ -1120,70 +1284,80 @@ static int ipa3_usb_release_xdci_channel(u32 clnt_hdl,
}
/* Change ipa_usb state to INITIALIZED */
- if (!ipa3_usb_set_state(IPA_USB_INITIALIZED))
+ if (!ipa3_usb_set_state(IPA_USB_INITIALIZED, false, ttype))
IPA_USB_ERR("failed to change state to initialized\n");
- IPA_USB_DBG("ipa3_usb_release_xdci_channel: exit\n");
+ IPA_USB_DBG_LOW("exit\n");
return 0;
}
-static int ipa3_usb_request_prod(void)
+static int ipa3_usb_request_prod(enum ipa3_usb_transport_type ttype)
{
int result;
+ struct ipa3_usb_rm_context *rm_ctx;
+ const char *rsrc_str;
- init_completion(&ipa3_usb_ctx->rm_ctx.prod_comp);
- IPA_USB_DBG("requesting USB_PROD\n");
- result = ipa3_rm_request_resource(
- ipa3_usb_ctx->rm_ctx.prod_params.name);
+ rm_ctx = &ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx;
+ rsrc_str = ipa3_rm_resource_str(rm_ctx->prod_params.name);
+
+ IPA_USB_DBG_LOW("requesting %s\n", rsrc_str);
+ init_completion(&rm_ctx->prod_comp);
+ result = ipa3_rm_request_resource(rm_ctx->prod_params.name);
if (result) {
if (result != -EINPROGRESS) {
- IPA_USB_ERR("failed to request USB_PROD: %d\n", result);
+ IPA_USB_ERR("failed to request %s: %d\n",
+ rsrc_str, result);
return result;
}
- result = wait_for_completion_timeout(
- &ipa3_usb_ctx->rm_ctx.prod_comp,
- msecs_to_jiffies(IPA_USB_RM_TIMEOUT_MSEC));
+ result = wait_for_completion_timeout(&rm_ctx->prod_comp,
+ msecs_to_jiffies(IPA_USB_RM_TIMEOUT_MSEC));
if (result == 0) {
- IPA_USB_ERR("timeout request USB_PROD\n");
+ IPA_USB_ERR("timeout request %s\n", rsrc_str);
return -ETIME;
}
}
- IPA_USB_DBG("USB_PROD granted\n");
+ IPA_USB_DBG_LOW("%s granted\n", rsrc_str);
return 0;
}
-static int ipa3_usb_release_prod(void)
+static int ipa3_usb_release_prod(enum ipa3_usb_transport_type ttype)
{
int result;
+ struct ipa3_usb_rm_context *rm_ctx;
+ const char *rsrc_str;
- init_completion(&ipa3_usb_ctx->rm_ctx.prod_comp);
- IPA_USB_DBG("releasing USB_PROD\n");
- result = ipa_rm_release_resource(ipa3_usb_ctx->rm_ctx.prod_params.name);
+ rm_ctx = &ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx;
+ rsrc_str = ipa3_rm_resource_str(rm_ctx->prod_params.name);
+
+ IPA_USB_DBG_LOW("releasing %s\n", rsrc_str);
+
+ init_completion(&rm_ctx->prod_comp);
+ result = ipa_rm_release_resource(rm_ctx->prod_params.name);
if (result) {
if (result != -EINPROGRESS) {
- IPA_USB_ERR("failed to release USB_PROD: %d\n", result);
+ IPA_USB_ERR("failed to release %s: %d\n",
+ rsrc_str, result);
return result;
}
- result = wait_for_completion_timeout(
- &ipa3_usb_ctx->rm_ctx.prod_comp,
+ result = wait_for_completion_timeout(&rm_ctx->prod_comp,
msecs_to_jiffies(IPA_USB_RM_TIMEOUT_MSEC));
if (result == 0) {
- IPA_USB_ERR("timeout release USB_PROD\n");
+ IPA_USB_ERR("timeout release %s\n", rsrc_str);
return -ETIME;
}
}
- IPA_USB_DBG("USB_PROD released\n");
+ IPA_USB_DBG_LOW("%s released\n", rsrc_str);
return 0;
}
static bool ipa3_usb_check_connect_params(
struct ipa_usb_xdci_connect_params_internal *params)
{
- IPA_USB_DBG("ul xferrscidx = %d\n", params->usb_to_ipa_xferrscidx);
- IPA_USB_DBG("dl xferrscidx = %d\n", params->ipa_to_usb_xferrscidx);
- IPA_USB_DBG("max_supported_bandwidth_mbps = %d\n",
+ IPA_USB_DBG_LOW("ul xferrscidx = %d\n", params->usb_to_ipa_xferrscidx);
+ IPA_USB_DBG_LOW("dl xferrscidx = %d\n", params->ipa_to_usb_xferrscidx);
+ IPA_USB_DBG_LOW("max_supported_bandwidth_mbps = %d\n",
params->max_supported_bandwidth_mbps);
if (params->max_pkt_size < IPA_USB_HIGH_SPEED_512B ||
@@ -1194,6 +1368,7 @@ static bool ipa3_usb_check_connect_params(
(params->usb_to_ipa_xferrscidx < 0 ||
params->usb_to_ipa_xferrscidx > 127)) ||
params->teth_prot > IPA_USB_MAX_TETH_PROT_SIZE) {
+ IPA_USB_ERR("Invalid params\n");
return false;
}
@@ -1213,22 +1388,53 @@ static int ipa3_usb_connect_teth_bridge(
{
int result;
- if (ipa3_usb_ctx->teth_bridge_state != IPA_USB_TETH_PROT_INITIALIZED)
- return 0;
-
result = teth_bridge_connect(params);
if (result) {
- IPA_USB_ERR("failed to connect teth_bridge.\n");
- ipa3_usb_ctx->user_data = NULL;
+ IPA_USB_ERR("failed to connect teth_bridge (%s)\n",
+ params->tethering_mode == TETH_TETHERING_MODE_RMNET ?
+ "rmnet" : "mbim");
return result;
}
- ipa3_usb_ctx->teth_bridge_state = IPA_USB_TETH_PROT_CONNECTED;
+
+ return 0;
+}
+
+static int ipa3_usb_connect_dpl(void)
+{
+ int res = 0;
+
+ /*
+ * Add DPL dependency to RM dependency graph, first add_dependency call
+ * is sync in order to make sure the IPA clocks are up before we
+ * continue and notify the USB driver it may continue.
+ */
+ res = ipa3_rm_add_dependency_sync(IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD,
+ IPA_RM_RESOURCE_Q6_CONS);
+ if (res < 0) {
+ IPA_USB_ERR("ipa3_rm_add_dependency_sync() failed.\n");
+ return res;
+ }
+
+ /*
+ * this add_dependency call can't be sync since it will block until DPL
+ * status is connected (which can happen only later in the flow),
+ * the clocks are already up so the call doesn't need to block.
+ */
+ res = ipa3_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD,
+ IPA_RM_RESOURCE_USB_DPL_CONS);
+ if (res < 0 && res != -EINPROGRESS) {
+ IPA_USB_ERR("ipa3_rm_add_dependency() failed.\n");
+ ipa3_rm_delete_dependency(IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD,
+ IPA_RM_RESOURCE_Q6_CONS);
+ return res;
+ }
return 0;
}
static int ipa3_usb_connect_teth_prot(
- struct ipa_usb_xdci_connect_params_internal *params)
+ struct ipa_usb_xdci_connect_params_internal *params,
+ enum ipa3_usb_transport_type ttype)
{
int result;
struct teth_bridge_connect_params teth_bridge_params;
@@ -1244,7 +1450,7 @@ static int ipa3_usb_connect_teth_prot(
params->teth_prot));
break;
}
- ipa3_usb_ctx->user_data =
+ ipa3_usb_ctx->ttype_ctx[ttype].user_data =
ipa3_usb_ctx->teth_prot_ctx[IPA_USB_RNDIS].user_data;
result = rndis_ipa_pipe_connect_notify(
params->usb_to_ipa_clnt_hdl,
@@ -1258,7 +1464,7 @@ static int ipa3_usb_connect_teth_prot(
IPA_USB_ERR("failed to connect %s.\n",
ipa3_usb_teth_prot_to_string(
params->teth_prot));
- ipa3_usb_ctx->user_data = NULL;
+ ipa3_usb_ctx->ttype_ctx[ttype].user_data = NULL;
return result;
}
ipa3_usb_ctx->teth_prot_ctx[IPA_USB_RNDIS].state =
@@ -1275,7 +1481,7 @@ static int ipa3_usb_connect_teth_prot(
params->teth_prot));
break;
}
- ipa3_usb_ctx->user_data =
+ ipa3_usb_ctx->ttype_ctx[ttype].user_data =
ipa3_usb_ctx->teth_prot_ctx[IPA_USB_ECM].user_data;
result = ecm_ipa_connect(params->usb_to_ipa_clnt_hdl,
params->ipa_to_usb_clnt_hdl,
@@ -1285,7 +1491,7 @@ static int ipa3_usb_connect_teth_prot(
IPA_USB_ERR("failed to connect %s.\n",
ipa3_usb_teth_prot_to_string(
params->teth_prot));
- ipa3_usb_ctx->user_data = NULL;
+ ipa3_usb_ctx->ttype_ctx[ttype].user_data = NULL;
return result;
}
ipa3_usb_ctx->teth_prot_ctx[IPA_USB_ECM].state =
@@ -1307,7 +1513,7 @@ static int ipa3_usb_connect_teth_prot(
if (result)
return result;
- ipa3_usb_ctx->user_data =
+ ipa3_usb_ctx->ttype_ctx[ttype].user_data =
ipa3_usb_ctx->teth_prot_ctx[params->teth_prot].
user_data;
teth_bridge_params.ipa_usb_pipe_hdl =
@@ -1319,11 +1525,13 @@ static int ipa3_usb_connect_teth_prot(
(TETH_TETHERING_MODE_RMNET):(TETH_TETHERING_MODE_MBIM);
teth_bridge_params.client_type = IPA_CLIENT_USB_PROD;
result = ipa3_usb_connect_teth_bridge(&teth_bridge_params);
- if (result)
+ if (result) {
+ ipa3_usb_ctx->ttype_ctx[ttype].user_data = NULL;
return result;
+ }
ipa3_usb_ctx->teth_prot_ctx[params->teth_prot].state =
IPA_USB_TETH_PROT_CONNECTED;
- ipa3_usb_notify_device_ready(ipa3_usb_ctx->user_data);
+ ipa3_usb_notify_do(ttype, IPA_USB_DEVICE_READY);
IPA_USB_DBG("%s (%s) is connected.\n",
ipa3_usb_teth_prot_to_string(
params->teth_prot),
@@ -1336,32 +1544,29 @@ static int ipa3_usb_connect_teth_prot(
IPA_USB_DBG("%s is already connected.\n",
ipa3_usb_teth_prot_to_string(
params->teth_prot));
- return -EPERM;
+ break;
}
- result = ipa3_usb_init_teth_bridge();
- if (result)
- return result;
- ipa3_usb_ctx->diag_user_data =
+ ipa3_usb_ctx->ttype_ctx[ttype].user_data =
ipa3_usb_ctx->teth_prot_ctx[params->teth_prot].
user_data;
- teth_bridge_params.ipa_usb_pipe_hdl =
- params->ipa_to_usb_clnt_hdl;
- teth_bridge_params.usb_ipa_pipe_hdl =
- params->usb_to_ipa_clnt_hdl;
- teth_bridge_params.client_type = IPA_CLIENT_USB_CONS;
- result = ipa3_usb_connect_teth_bridge(&teth_bridge_params);
- if (result)
+ result = ipa3_usb_connect_dpl();
+ if (result) {
+ IPA_USB_ERR("Failed connecting DPL result=%d\n",
+ result);
+ ipa3_usb_ctx->ttype_ctx[ttype].user_data = NULL;
return result;
+ }
ipa3_usb_ctx->teth_prot_ctx[IPA_USB_DIAG].state =
IPA_USB_TETH_PROT_CONNECTED;
- ipa3_usb_notify_device_ready(ipa3_usb_ctx->diag_user_data);
+ ipa3_usb_notify_do(ttype, IPA_USB_DEVICE_READY);
IPA_USB_DBG("%s is connected.\n",
ipa3_usb_teth_prot_to_string(
params->teth_prot));
break;
default:
- break;
+ IPA_USB_ERR("Invalid tethering protocol\n");
+ return -EFAULT;
}
return 0;
@@ -1371,15 +1576,29 @@ static int ipa3_usb_disconnect_teth_bridge(void)
{
int result;
- if (ipa3_usb_ctx->teth_bridge_state != IPA_USB_TETH_PROT_CONNECTED)
- return 0;
-
result = teth_bridge_disconnect(IPA_CLIENT_USB_PROD);
if (result) {
IPA_USB_ERR("failed to disconnect teth_bridge.\n");
return result;
}
- ipa3_usb_ctx->teth_bridge_state = IPA_USB_TETH_PROT_INVALID;
+
+ return 0;
+}
+
+static int ipa3_usb_disconnect_dpl(void)
+{
+ int res;
+
+ /* Remove DPL RM dependency */
+ res = ipa3_rm_delete_dependency(IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD,
+ IPA_RM_RESOURCE_Q6_CONS);
+ if (res)
+ IPA_USB_ERR("deleting DPL_DUMMY_PROD rsrc dependency fail\n");
+
+ res = ipa3_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
+ IPA_RM_RESOURCE_USB_DPL_CONS);
+ if (res)
+ IPA_USB_ERR("deleting DPL_CONS rsrc dependencty fail\n");
return 0;
}
@@ -1387,6 +1606,9 @@ static int ipa3_usb_disconnect_teth_bridge(void)
static int ipa3_usb_disconnect_teth_prot(enum ipa_usb_teth_prot teth_prot)
{
int result = 0;
+ enum ipa3_usb_transport_type ttype;
+
+ ttype = IPA3_USB_GET_TTYPE(teth_prot);
switch (teth_prot) {
case IPA_USB_RNDIS:
@@ -1413,7 +1635,6 @@ static int ipa3_usb_disconnect_teth_prot(enum ipa_usb_teth_prot teth_prot)
}
ipa3_usb_ctx->teth_prot_ctx[teth_prot].state =
IPA_USB_TETH_PROT_INITIALIZED;
- ipa3_usb_ctx->user_data = NULL;
IPA_USB_DBG("disconnected %s\n",
ipa3_usb_teth_prot_to_string(teth_prot));
break;
@@ -1421,19 +1642,17 @@ static int ipa3_usb_disconnect_teth_prot(enum ipa_usb_teth_prot teth_prot)
case IPA_USB_MBIM:
if (ipa3_usb_ctx->teth_prot_ctx[teth_prot].state !=
IPA_USB_TETH_PROT_CONNECTED) {
- IPA_USB_DBG("%s is not connected.\n",
- ipa3_usb_teth_prot_to_string(teth_prot));
+ IPA_USB_DBG("%s (%s) is not connected.\n",
+ ipa3_usb_teth_prot_to_string(teth_prot),
+ ipa3_usb_teth_bridge_prot_to_string(teth_prot));
return -EPERM;
}
- if (ipa3_usb_ctx->teth_prot_ctx[IPA_USB_DIAG].state !=
- IPA_USB_TETH_PROT_CONNECTED) {
- result = ipa3_usb_disconnect_teth_bridge();
- if (result)
- break;
- }
+ result = ipa3_usb_disconnect_teth_bridge();
+ if (result)
+ break;
+
ipa3_usb_ctx->teth_prot_ctx[teth_prot].state =
IPA_USB_TETH_PROT_INITIALIZED;
- ipa3_usb_ctx->user_data = NULL;
IPA_USB_DBG("disconnected %s (%s)\n",
ipa3_usb_teth_prot_to_string(teth_prot),
ipa3_usb_teth_bridge_prot_to_string(teth_prot));
@@ -1445,17 +1664,11 @@ static int ipa3_usb_disconnect_teth_prot(enum ipa_usb_teth_prot teth_prot)
ipa3_usb_teth_prot_to_string(teth_prot));
return -EPERM;
}
- if ((ipa3_usb_ctx->teth_prot_ctx[IPA_USB_RMNET].state !=
- IPA_USB_TETH_PROT_CONNECTED) &&
- (ipa3_usb_ctx->teth_prot_ctx[IPA_USB_MBIM].state !=
- IPA_USB_TETH_PROT_CONNECTED)) {
- result = ipa3_usb_disconnect_teth_bridge();
- if (result)
- break;
- }
+ result = ipa3_usb_disconnect_dpl();
+ if (result)
+ break;
ipa3_usb_ctx->teth_prot_ctx[teth_prot].state =
IPA_USB_TETH_PROT_INITIALIZED;
- ipa3_usb_ctx->diag_user_data = NULL;
IPA_USB_DBG("disconnected %s\n",
ipa3_usb_teth_prot_to_string(teth_prot));
break;
@@ -1463,6 +1676,7 @@ static int ipa3_usb_disconnect_teth_prot(enum ipa_usb_teth_prot teth_prot)
break;
}
+ ipa3_usb_ctx->ttype_ctx[ttype].user_data = NULL;
return result;
}
@@ -1471,15 +1685,18 @@ static int ipa3_usb_xdci_connect_internal(
{
int result = -EFAULT;
struct ipa_rm_perf_profile profile;
+ enum ipa3_usb_transport_type ttype;
- IPA_USB_DBG("entry\n");
+ IPA_USB_DBG_LOW("entry\n");
if (params == NULL || !ipa3_usb_check_connect_params(params)) {
IPA_USB_ERR("bad parameters.\n");
return -EINVAL;
}
- if (!ipa3_usb_check_legal_op(IPA_USB_CONNECT,
- (params->teth_prot == IPA_USB_DIAG))) {
+ ttype = (params->teth_prot == IPA_USB_DIAG) ? IPA_USB_TRANSPORT_DPL :
+ IPA_USB_TRANSPORT_TETH;
+
+ if (!ipa3_usb_check_legal_op(IPA_USB_CONNECT, ttype)) {
IPA_USB_ERR("Illegal operation.\n");
return -EPERM;
}
@@ -1491,24 +1708,30 @@ static int ipa3_usb_xdci_connect_internal(
return result;
}
- /* Set USB_PROD & USB_CONS perf profile */
+ /* Set RM PROD & CONS perf profile */
profile.max_supported_bandwidth_mbps =
- params->max_supported_bandwidth_mbps;
- result = ipa_rm_set_perf_profile(ipa3_usb_ctx->rm_ctx.prod_params.name,
- &profile);
+ params->max_supported_bandwidth_mbps;
+ result = ipa_rm_set_perf_profile(
+ ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.prod_params.name,
+ &profile);
if (result) {
- IPA_USB_ERR("failed to set USB_PROD perf profile.\n");
+ IPA_USB_ERR("failed to set %s perf profile\n",
+ ipa3_rm_resource_str(ipa3_usb_ctx->ttype_ctx[ttype].
+ rm_ctx.prod_params.name));
return result;
}
- result = ipa_rm_set_perf_profile(ipa3_usb_ctx->rm_ctx.cons_params.name,
- &profile);
+ result = ipa_rm_set_perf_profile(
+ ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_params.name,
+ &profile);
if (result) {
- IPA_USB_ERR("failed to set USB_CONS perf profile.\n");
+ IPA_USB_ERR("failed to set %s perf profile\n",
+ ipa3_rm_resource_str(ipa3_usb_ctx->ttype_ctx[ttype].
+ rm_ctx.cons_params.name));
return result;
}
- /* Request USB_PROD */
- result = ipa3_usb_request_prod();
+ /* Request PROD */
+ result = ipa3_usb_request_prod(ttype);
if (result)
return result;
@@ -1523,37 +1746,29 @@ static int ipa3_usb_xdci_connect_internal(
}
}
- /* Start DL/Diag channel */
+ /* Start DL/DPL channel */
result = ipa3_xdci_connect(params->ipa_to_usb_clnt_hdl,
params->ipa_to_usb_xferrscidx,
params->ipa_to_usb_xferrscidx_valid);
if (result) {
- IPA_USB_ERR("failed to connect DL/Diag channel.\n");
+ IPA_USB_ERR("failed to connect DL/DPL channel.\n");
goto connect_dl_fail;
}
/* Connect tethering protocol */
- result = ipa3_usb_connect_teth_prot(params);
- if (result)
+ result = ipa3_usb_connect_teth_prot(params, ttype);
+ if (result) {
+ IPA_USB_ERR("failed to connect teth protocol\n");
goto connect_teth_prot_fail;
+ }
-
- if (params->teth_prot == IPA_USB_DIAG) {
- /* Change diag state to CONNECTED */
- if (!ipa3_usb_set_diag_state(IPA_USB_CONNECTED)) {
- IPA_USB_ERR(
- "failed to change diag state to connected\n");
- goto state_change_connected_fail;
- }
- } else {
- /* Change ipa_usb state to CONNECTED */
- if (!ipa3_usb_set_state(IPA_USB_CONNECTED)) {
- IPA_USB_ERR("failed to change state to connected\n");
- goto state_change_connected_fail;
- }
+ if (!ipa3_usb_set_state(IPA_USB_CONNECTED, false, ttype)) {
+ IPA_USB_ERR(
+ "failed to change state to connected\n");
+ goto state_change_connected_fail;
}
- IPA_USB_DBG("exit\n");
+ IPA_USB_DBG_LOW("exit\n");
return 0;
state_change_connected_fail:
@@ -1569,11 +1784,253 @@ connect_dl_fail:
ipa3_reset_gsi_event_ring(params->usb_to_ipa_clnt_hdl);
}
connect_ul_fail:
- ipa3_usb_release_prod();
+ ipa3_usb_release_prod(ttype);
+ return result;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static char dbg_buff[IPA_USB_MAX_MSG_LEN];
+
+static char *ipa3_usb_cons_state_to_string(enum ipa3_usb_cons_state state)
+{
+ switch (state) {
+ case IPA_USB_CONS_GRANTED:
+ return "CONS_GRANTED";
+ case IPA_USB_CONS_RELEASED:
+ return "CONS_RELEASED";
+ }
+
+ return "UNSUPPORTED";
+}
+
+static int ipa3_usb_get_status_dbg_info(struct ipa3_usb_status_dbg_info *status)
+{
+ int res;
+ int i;
+ unsigned long flags;
+
+ IPA_USB_DBG_LOW("entry\n");
+
+ if (ipa3_usb_ctx == NULL) {
+ IPA_USB_ERR("IPA USB was not inited yet\n");
+ return -EFAULT;
+ }
+
+ mutex_lock(&ipa3_usb_ctx->general_mutex);
+
+ if (!status) {
+ IPA_USB_ERR("Invalid input\n");
+ res = -EINVAL;
+ goto bail;
+ }
+
+ memset(status, 0, sizeof(struct ipa3_usb_status_dbg_info));
+
+ spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
+ status->teth_state = ipa3_usb_state_to_string(
+ ipa3_usb_ctx->ttype_ctx[IPA_USB_TRANSPORT_TETH].state);
+ status->dpl_state = ipa3_usb_state_to_string(
+ ipa3_usb_ctx->ttype_ctx[IPA_USB_TRANSPORT_DPL].state);
+ if (ipa3_usb_ctx->ttype_ctx[IPA_USB_TRANSPORT_TETH].rm_ctx.cons_valid)
+ status->teth_cons_state = ipa3_usb_cons_state_to_string(
+ ipa3_usb_ctx->ttype_ctx[IPA_USB_TRANSPORT_TETH].
+ rm_ctx.cons_state);
+ if (ipa3_usb_ctx->ttype_ctx[IPA_USB_TRANSPORT_DPL].rm_ctx.cons_valid)
+ status->dpl_cons_state = ipa3_usb_cons_state_to_string(
+ ipa3_usb_ctx->ttype_ctx[IPA_USB_TRANSPORT_DPL].
+ rm_ctx.cons_state);
+ spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
+
+ for (i = 0 ; i < IPA_USB_MAX_TETH_PROT_SIZE ; i++) {
+ if (ipa3_usb_ctx->teth_prot_ctx[i].state ==
+ IPA_USB_TETH_PROT_INITIALIZED) {
+ if ((i == IPA_USB_RMNET) || (i == IPA_USB_MBIM))
+ status->inited_prots[status->num_init_prot++] =
+ ipa3_usb_teth_bridge_prot_to_string(i);
+ else
+ status->inited_prots[status->num_init_prot++] =
+ ipa3_usb_teth_prot_to_string(i);
+ } else if (ipa3_usb_ctx->teth_prot_ctx[i].state ==
+ IPA_USB_TETH_PROT_CONNECTED) {
+ switch (i) {
+ case IPA_USB_RMNET:
+ case IPA_USB_MBIM:
+ status->teth_connected_prot =
+ ipa3_usb_teth_bridge_prot_to_string(i);
+ break;
+ case IPA_USB_DIAG:
+ status->dpl_connected_prot =
+ ipa3_usb_teth_prot_to_string(i);
+ break;
+ default:
+ status->teth_connected_prot =
+ ipa3_usb_teth_prot_to_string(i);
+ }
+ }
+ }
+
+ res = 0;
+ IPA_USB_DBG_LOW("exit\n");
+bail:
+ mutex_unlock(&ipa3_usb_ctx->general_mutex);
+ return res;
+}
+
+static ssize_t ipa3_read_usb_state_info(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ipa3_usb_status_dbg_info status;
+ int result;
+ int nbytes;
+ int cnt = 0;
+ int i;
+
+ result = ipa3_usb_get_status_dbg_info(&status);
+ if (result) {
+ nbytes = scnprintf(dbg_buff, IPA_USB_MAX_MSG_LEN,
+ "Fail to read IPA USB status\n");
+ cnt += nbytes;
+ } else {
+ nbytes = scnprintf(dbg_buff, IPA_USB_MAX_MSG_LEN,
+ "Tethering Data State: %s\n"
+ "DPL State: %s\n"
+ "Protocols in Initialized State: ",
+ status.teth_state,
+ status.dpl_state);
+ cnt += nbytes;
+
+ for (i = 0 ; i < status.num_init_prot ; i++) {
+ nbytes = scnprintf(dbg_buff + cnt,
+ IPA_USB_MAX_MSG_LEN - cnt,
+ "%s ", status.inited_prots[i]);
+ cnt += nbytes;
+ }
+ nbytes = scnprintf(dbg_buff + cnt, IPA_USB_MAX_MSG_LEN - cnt,
+ status.num_init_prot ? "\n" : "None\n");
+ cnt += nbytes;
+
+ nbytes = scnprintf(dbg_buff + cnt, IPA_USB_MAX_MSG_LEN - cnt,
+ "Protocols in Connected State: ");
+ cnt += nbytes;
+ if (status.teth_connected_prot) {
+ nbytes = scnprintf(dbg_buff + cnt,
+ IPA_USB_MAX_MSG_LEN - cnt,
+ "%s ", status.teth_connected_prot);
+ cnt += nbytes;
+ }
+ if (status.dpl_connected_prot) {
+ nbytes = scnprintf(dbg_buff + cnt,
+ IPA_USB_MAX_MSG_LEN - cnt,
+ "%s ", status.dpl_connected_prot);
+ cnt += nbytes;
+ }
+ nbytes = scnprintf(dbg_buff + cnt, IPA_USB_MAX_MSG_LEN - cnt,
+ (status.teth_connected_prot ||
+ status.dpl_connected_prot) ? "\n" : "None\n");
+ cnt += nbytes;
+
+ nbytes = scnprintf(dbg_buff + cnt, IPA_USB_MAX_MSG_LEN - cnt,
+ "USB Tethering Consumer State: %s\n",
+ status.teth_cons_state ?
+ status.teth_cons_state : "Invalid");
+ cnt += nbytes;
+
+ nbytes = scnprintf(dbg_buff + cnt, IPA_USB_MAX_MSG_LEN - cnt,
+ "DPL Consumer State: %s\n",
+ status.dpl_cons_state ? status.dpl_cons_state :
+ "Invalid");
+ cnt += nbytes;
+ }
+
+ return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+const struct file_operations ipa3_ipa_usb_ops = {
+ .read = ipa3_read_usb_state_info,
+};
+
+void ipa_usb_debugfs_init(void)
+{
+ const mode_t read_only_mode = S_IRUSR | S_IRGRP | S_IROTH;
+ const mode_t read_write_mode = S_IRUSR | S_IRGRP | S_IROTH |
+ S_IWUSR | S_IWGRP;
+
+ ipa3_usb_ctx->dent = debugfs_create_dir("ipa_usb", 0);
+ if (IS_ERR(ipa3_usb_ctx->dent)) {
+ IPA_USB_ERR("fail to create folder in debug_fs.\n");
+ return;
+ }
+
+ ipa3_usb_ctx->dfile_state_info = debugfs_create_file("state_info",
+ read_only_mode, ipa3_usb_ctx->dent, 0,
+ &ipa3_ipa_usb_ops);
+ if (!ipa3_usb_ctx->dfile_state_info ||
+ IS_ERR(ipa3_usb_ctx->dfile_state_info)) {
+ IPA_USB_ERR("failed to create file for state_info\n");
+ goto fail;
+ }
+
+ ipa3_usb_ctx->dfile_enable_low_prio =
+ debugfs_create_u32("enable_low_prio_print",
+ read_write_mode, ipa3_usb_ctx->dent,
+ &ipa3_usb_ctx->enable_low_prio_print);
+ if (!ipa3_usb_ctx->dfile_enable_low_prio ||
+ IS_ERR(ipa3_usb_ctx->dfile_enable_low_prio)) {
+ IPA_USB_ERR("could not create enable_low_prio_print file\n");
+ goto fail;
+ }
+
+ return;
+
+fail:
+ debugfs_remove_recursive(ipa3_usb_ctx->dent);
+ ipa3_usb_ctx->dent = NULL;
+}
+
+static int ipa_usb_ipc_logging_init(void)
+{
+ int result;
+
+ ipa3_usb_ctx->logbuf = ipc_log_context_create(IPA_USB_IPC_LOG_PAGES,
+ "ipa_usb", 0);
+ if (ipa3_usb_ctx->logbuf == NULL) {
+ /* we can't use ipa_usb print macros on failures */
+ pr_err("ipa_usb: failed to get logbuf\n");
+ return -ENOMEM;
+ }
+
+ ipa3_usb_ctx->logbuf_low = ipc_log_context_create(IPA_USB_IPC_LOG_PAGES,
+ "ipa_usb_low", 0);
+ if (ipa3_usb_ctx->logbuf_low == NULL) {
+ pr_err("ipa_usb: failed to get logbuf_low\n");
+ result = -ENOMEM;
+ goto fail_logbuf_low;
+ }
+
+ return 0;
+
+fail_logbuf_low:
+ ipc_log_context_destroy(ipa3_usb_ctx->logbuf);
return result;
}
-int ipa3_usb_xdci_connect(struct ipa_usb_xdci_chan_params *ul_chan_params,
+void ipa_usb_debugfs_remove(void)
+{
+ if (IS_ERR(ipa3_usb_ctx->dent)) {
+ IPA_USB_ERR("ipa_debugfs_remove: folder was not created.\n");
+ return;
+ }
+
+ debugfs_remove_recursive(ipa3_usb_ctx->dent);
+}
+#else /* CONFIG_DEBUG_FS */
+void ipa_usb_debugfs_init(void){}
+void ipa_usb_debugfs_remove(void){}
+#endif /* CONFIG_DEBUG_FS */
+
+
+
+int ipa_usb_xdci_connect(struct ipa_usb_xdci_chan_params *ul_chan_params,
struct ipa_usb_xdci_chan_params *dl_chan_params,
struct ipa_req_chan_out_params *ul_out_params,
struct ipa_req_chan_out_params *dl_out_params,
@@ -1583,7 +2040,7 @@ int ipa3_usb_xdci_connect(struct ipa_usb_xdci_chan_params *ul_chan_params,
struct ipa_usb_xdci_connect_params_internal conn_params;
mutex_lock(&ipa3_usb_ctx->general_mutex);
- IPA_USB_DBG("entry\n");
+ IPA_USB_DBG_LOW("entry\n");
if (connect_params == NULL || dl_chan_params == NULL ||
dl_out_params == NULL ||
(connect_params->teth_prot != IPA_USB_DIAG &&
@@ -1604,7 +2061,7 @@ int ipa3_usb_xdci_connect(struct ipa_usb_xdci_chan_params *ul_chan_params,
result = ipa3_usb_request_xdci_channel(dl_chan_params, dl_out_params);
if (result) {
- IPA_USB_ERR("failed to allocate DL channel.\n");
+ IPA_USB_ERR("failed to allocate DL/DPL channel.\n");
goto alloc_dl_chan_fail;
}
@@ -1633,7 +2090,7 @@ int ipa3_usb_xdci_connect(struct ipa_usb_xdci_chan_params *ul_chan_params,
goto connect_fail;
}
- IPA_USB_DBG("exit\n");
+ IPA_USB_DBG_LOW("exit\n");
mutex_unlock(&ipa3_usb_ctx->general_mutex);
return 0;
@@ -1648,65 +2105,7 @@ bad_params:
mutex_unlock(&ipa3_usb_ctx->general_mutex);
return result;
}
-
-static int ipa3_usb_xdci_disconnect_diag(u32 dl_clnt_hdl)
-{
- int result = 0;
- unsigned long flags;
-
- /* Stop DIAG channel */
- result = ipa3_xdci_disconnect(dl_clnt_hdl, false, -1);
- if (result) {
- IPA_USB_ERR("failed to disconnect DIAG channel.\n");
- return result;
- }
-
- /* Reset DIAG channel */
- result = ipa3_reset_gsi_channel(dl_clnt_hdl);
- if (result) {
- IPA_USB_ERR("failed to reset DIAG channel.\n");
- return result;
- }
-
- /* Reset DIAG event ring */
- result = ipa3_reset_gsi_event_ring(dl_clnt_hdl);
- if (result) {
- IPA_USB_ERR("failed to reset DIAG event ring.\n");
- return result;
- }
-
- /* Change diag state to STOPPED */
- if (!ipa3_usb_set_diag_state(IPA_USB_STOPPED))
- IPA_USB_ERR("failed to change diag state to stopped\n");
-
- result = ipa3_usb_release_xdci_channel(dl_clnt_hdl, IPA_USB_DIAG);
- if (result) {
- IPA_USB_ERR("failed to release DIAG channel.\n");
- return result;
- }
-
- /* Disconnect DIAG */
- result = ipa3_usb_disconnect_teth_prot(IPA_USB_DIAG);
- if (result) {
- IPA_USB_ERR("failed to disconnect DIAG tethering protocol\n");
- return result;
- }
-
- spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
- if ((ipa3_usb_ctx->state != IPA_USB_CONNECTED) &&
- (ipa3_usb_ctx->state != IPA_USB_STOPPED)) {
- /* These are the only cases where data channels has USB_PROD */
- spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
- result = ipa3_usb_release_prod();
- if (result) {
- IPA_USB_ERR("failed to release USB_PROD.\n");
- return result;
- }
- } else
- spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
-
- return 0;
-}
+EXPORT_SYMBOL(ipa_usb_xdci_connect);
static int ipa3_usb_check_disconnect_prot(enum ipa_usb_teth_prot teth_prot)
{
@@ -1725,89 +2124,71 @@ static int ipa3_usb_check_disconnect_prot(enum ipa_usb_teth_prot teth_prot)
return 0;
}
-int ipa3_usb_xdci_disconnect(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
+int ipa_usb_xdci_disconnect(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
enum ipa_usb_teth_prot teth_prot)
{
int result = 0;
struct ipa_ep_cfg_holb holb_cfg;
unsigned long flags;
+ enum ipa3_usb_state orig_state;
+ enum ipa3_usb_transport_type ttype;
mutex_lock(&ipa3_usb_ctx->general_mutex);
- IPA_USB_DBG("entry\n");
+ IPA_USB_DBG_LOW("entry\n");
if (ipa3_usb_check_disconnect_prot(teth_prot)) {
result = -EINVAL;
goto bad_params;
}
- if (!ipa3_usb_check_legal_op(IPA_USB_DISCONNECT,
- (teth_prot == IPA_USB_DIAG))) {
+ ttype = IPA3_USB_GET_TTYPE(teth_prot);
+
+ if (!ipa3_usb_check_legal_op(IPA_USB_DISCONNECT, ttype)) {
IPA_USB_ERR("Illegal operation.\n");
result = -EPERM;
goto bad_params;
}
- if (teth_prot == IPA_USB_DIAG) {
- result = ipa3_usb_xdci_disconnect_diag(dl_clnt_hdl);
- if (!result)
- IPA_USB_DBG("exit\n");
- mutex_unlock(&ipa3_usb_ctx->general_mutex);
- return result;
- }
-
spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
- if (ipa3_usb_ctx->state != IPA_USB_SUSPEND_IN_PROGRESS &&
- ipa3_usb_ctx->state != IPA_USB_SUSPENDED) {
+ if (ipa3_usb_ctx->ttype_ctx[ttype].state != IPA_USB_SUSPENDED) {
spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
- /* Stop UL channel */
- result = ipa3_xdci_disconnect(ul_clnt_hdl,
- (teth_prot == IPA_USB_RMNET ||
- teth_prot == IPA_USB_MBIM),
- ipa3_usb_ctx->qmi_req_id);
- if (result) {
- IPA_USB_ERR("failed to disconnect UL channel.\n");
- goto bad_params;
- }
- if (teth_prot == IPA_USB_RMNET ||
- teth_prot == IPA_USB_MBIM)
- ipa3_usb_ctx->qmi_req_id++;
- } else
- spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
-
- spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
- if (ipa3_usb_ctx->state != IPA_USB_SUSPENDED) {
- spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
- /* Stop DL channel */
+ /* Stop DL/DPL channel */
result = ipa3_xdci_disconnect(dl_clnt_hdl, false, -1);
if (result) {
- IPA_USB_ERR("failed to disconnect DL channel.\n");
+ IPA_USB_ERR("failed to disconnect DL/DPL channel.\n");
goto bad_params;
}
- } else
- spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
-
- spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
- if (ipa3_usb_ctx->state == IPA_USB_SUSPENDED) {
+ } else {
spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
memset(&holb_cfg, 0, sizeof(holb_cfg));
holb_cfg.en = IPA_HOLB_TMR_EN;
holb_cfg.tmr_val = 0;
ipa3_cfg_ep_holb(dl_clnt_hdl, &holb_cfg);
- } else
- spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
-
- /* Reset UL channel */
- result = ipa3_reset_gsi_channel(ul_clnt_hdl);
- if (result) {
- IPA_USB_ERR("failed to reset UL channel.\n");
- goto bad_params;
}
- /* Reset UL event ring */
- result = ipa3_reset_gsi_event_ring(ul_clnt_hdl);
- if (result) {
- IPA_USB_ERR("failed to reset UL event ring.\n");
- goto bad_params;
- }
+ spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
+ orig_state = ipa3_usb_ctx->ttype_ctx[ttype].state;
+ if (!IPA3_USB_IS_TTYPE_DPL(ttype)) {
+ if (orig_state != IPA_USB_SUSPEND_IN_PROGRESS &&
+ orig_state != IPA_USB_SUSPENDED) {
+ spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock,
+ flags);
+ /* Stop UL channel */
+ result = ipa3_xdci_disconnect(ul_clnt_hdl,
+ (teth_prot == IPA_USB_RMNET ||
+ teth_prot == IPA_USB_MBIM),
+ ipa3_usb_ctx->qmi_req_id);
+ if (result) {
+ IPA_USB_ERR("failed disconnect UL channel\n");
+ goto bad_params;
+ }
+ if (teth_prot == IPA_USB_RMNET ||
+ teth_prot == IPA_USB_MBIM)
+ ipa3_usb_ctx->qmi_req_id++;
+ } else
+ spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock,
+ flags);
+ } else
+ spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
/* Reset DL channel */
result = ipa3_reset_gsi_channel(dl_clnt_hdl);
@@ -1823,17 +2204,35 @@ int ipa3_usb_xdci_disconnect(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
goto bad_params;
}
- /* Change ipa_usb state to STOPPED */
- if (!ipa3_usb_set_state(IPA_USB_STOPPED))
+ if (!IPA3_USB_IS_TTYPE_DPL(ttype)) {
+ /* Reset UL channel */
+ result = ipa3_reset_gsi_channel(ul_clnt_hdl);
+ if (result) {
+ IPA_USB_ERR("failed to reset UL channel.\n");
+ goto bad_params;
+ }
+
+ /* Reset UL event ring */
+ result = ipa3_reset_gsi_event_ring(ul_clnt_hdl);
+ if (result) {
+ IPA_USB_ERR("failed to reset UL event ring.\n");
+ goto bad_params;
+ }
+ }
+
+ /* Change state to STOPPED */
+ if (!ipa3_usb_set_state(IPA_USB_STOPPED, false, ttype))
IPA_USB_ERR("failed to change state to stopped\n");
- result = ipa3_usb_release_xdci_channel(ul_clnt_hdl, teth_prot);
- if (result) {
- IPA_USB_ERR("failed to release UL channel.\n");
- goto bad_params;
+ if (!IPA3_USB_IS_TTYPE_DPL(ttype)) {
+ result = ipa3_usb_release_xdci_channel(ul_clnt_hdl, ttype);
+ if (result) {
+ IPA_USB_ERR("failed to release UL channel.\n");
+ goto bad_params;
+ }
}
- result = ipa3_usb_release_xdci_channel(dl_clnt_hdl, teth_prot);
+ result = ipa3_usb_release_xdci_channel(dl_clnt_hdl, ttype);
if (result) {
IPA_USB_ERR("failed to release DL channel.\n");
goto bad_params;
@@ -1844,21 +2243,16 @@ int ipa3_usb_xdci_disconnect(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
if (result)
goto bad_params;
- spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
- if (ipa3_usb_ctx->state != IPA_USB_SUSPEND_IN_PROGRESS &&
- ipa3_usb_ctx->state != IPA_USB_SUSPENDED &&
- ipa3_usb_ctx->diag_state != IPA_USB_CONNECTED &&
- ipa3_usb_ctx->diag_state != IPA_USB_STOPPED) {
- spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
- result = ipa3_usb_release_prod();
+ if (orig_state != IPA_USB_SUSPEND_IN_PROGRESS &&
+ orig_state != IPA_USB_SUSPENDED) {
+ result = ipa3_usb_release_prod(ttype);
if (result) {
- IPA_USB_ERR("failed to release USB_PROD.\n");
+ IPA_USB_ERR("failed to release PROD.\n");
goto bad_params;
}
- } else
- spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
+ }
- IPA_USB_DBG("exit\n");
+ IPA_USB_DBG_LOW("exit\n");
mutex_unlock(&ipa3_usb_ctx->general_mutex);
return 0;
@@ -1867,19 +2261,29 @@ bad_params:
return result;
}
+EXPORT_SYMBOL(ipa_usb_xdci_disconnect);
-int ipa3_usb_deinit_teth_prot(enum ipa_usb_teth_prot teth_prot)
+int ipa_usb_deinit_teth_prot(enum ipa_usb_teth_prot teth_prot)
{
int result = -EFAULT;
+ enum ipa3_usb_transport_type ttype;
mutex_lock(&ipa3_usb_ctx->general_mutex);
- IPA_USB_DBG("entry\n");
+ IPA_USB_DBG_LOW("entry\n");
if (teth_prot > IPA_USB_MAX_TETH_PROT_SIZE) {
IPA_USB_ERR("bad parameters.\n");
result = -EINVAL;
goto bad_params;
}
+ ttype = IPA3_USB_GET_TTYPE(teth_prot);
+
+ if (!ipa3_usb_check_legal_op(IPA_USB_DEINIT_TETH_PROT, ttype)) {
+ IPA_USB_ERR("Illegal operation.\n");
+ result = -EPERM;
+ goto bad_params;
+ }
+
/* Clean-up tethering protocol */
switch (teth_prot) {
case IPA_USB_RNDIS:
@@ -1908,19 +2312,15 @@ int ipa3_usb_deinit_teth_prot(enum ipa_usb_teth_prot teth_prot)
break;
case IPA_USB_RMNET:
case IPA_USB_MBIM:
- if (ipa3_usb_ctx->teth_prot_ctx[teth_prot].state ==
- IPA_USB_TETH_PROT_CONNECTED) {
- IPA_USB_ERR("%s is connected\n",
- ipa3_usb_teth_prot_to_string(teth_prot));
+ if (ipa3_usb_ctx->teth_prot_ctx[teth_prot].state !=
+ IPA_USB_TETH_PROT_INITIALIZED) {
+ IPA_USB_ERR("%s (%s) is not initialized\n",
+ ipa3_usb_teth_prot_to_string(teth_prot),
+ ipa3_usb_teth_bridge_prot_to_string(teth_prot));
result = -EINVAL;
goto bad_params;
}
- if (ipa3_usb_ctx->teth_prot_ctx[IPA_USB_DIAG].state !=
- IPA_USB_TETH_PROT_CONNECTED) {
- result = ipa3_usb_disconnect_teth_bridge();
- if (result)
- goto bad_params;
- }
+
ipa3_usb_ctx->teth_prot_ctx[teth_prot].user_data =
NULL;
ipa3_usb_ctx->teth_prot_ctx[teth_prot].state =
@@ -1938,18 +2338,10 @@ int ipa3_usb_deinit_teth_prot(enum ipa_usb_teth_prot teth_prot)
result = -EINVAL;
goto bad_params;
}
- if ((ipa3_usb_ctx->teth_prot_ctx[IPA_USB_RMNET].state !=
- IPA_USB_TETH_PROT_CONNECTED) &&
- (ipa3_usb_ctx->teth_prot_ctx[IPA_USB_MBIM].state !=
- IPA_USB_TETH_PROT_CONNECTED)) {
- result = ipa3_usb_disconnect_teth_bridge();
- if (result)
- goto bad_params;
- }
- ipa3_usb_ctx->diag_user_data = NULL;
+ ipa3_usb_ctx->teth_prot_ctx[teth_prot].user_data =
+ NULL;
ipa3_usb_ctx->teth_prot_ctx[teth_prot].state =
IPA_USB_TETH_PROT_INVALID;
- ipa3_usb_ctx->num_init_prot--;
IPA_USB_DBG("deinitialized %s\n",
ipa3_usb_teth_prot_to_string(teth_prot));
break;
@@ -1959,22 +2351,20 @@ int ipa3_usb_deinit_teth_prot(enum ipa_usb_teth_prot teth_prot)
goto bad_params;
}
- if (teth_prot == IPA_USB_DIAG &&
- !ipa3_usb_set_diag_state(IPA_USB_INVALID)) {
- IPA_USB_ERR("failed to change diag state to invalid\n");
- }
- if (ipa3_usb_ctx->num_init_prot == 0) {
- if ((teth_prot != IPA_USB_DIAG) &&
- !ipa3_usb_set_state(IPA_USB_INVALID))
+ if (IPA3_USB_IS_TTYPE_DPL(ttype) ||
+ (ipa3_usb_ctx->num_init_prot == 0)) {
+ if (!ipa3_usb_set_state(IPA_USB_INVALID, false, ttype))
IPA_USB_ERR("failed to change state to invalid\n");
- ipa_rm_delete_resource(ipa3_usb_ctx->rm_ctx.prod_params.name);
- ipa3_usb_ctx->rm_ctx.prod_valid = false;
- ipa_rm_delete_resource(ipa3_usb_ctx->rm_ctx.cons_params.name);
- ipa3_usb_ctx->rm_ctx.cons_valid = false;
- ipa3_usb_ctx->ipa_usb_notify_cb = NULL;
+ ipa_rm_delete_resource(
+ ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.prod_params.name);
+ ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.prod_valid = false;
+ ipa_rm_delete_resource(
+ ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_params.name);
+ ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_valid = false;
+ ipa3_usb_ctx->ttype_ctx[ttype].ipa_usb_notify_cb = NULL;
}
- IPA_USB_DBG("exit\n");
+ IPA_USB_DBG_LOW("exit\n");
mutex_unlock(&ipa3_usb_ctx->general_mutex);
return 0;
@@ -1982,40 +2372,49 @@ bad_params:
mutex_unlock(&ipa3_usb_ctx->general_mutex);
return result;
}
+EXPORT_SYMBOL(ipa_usb_deinit_teth_prot);
-int ipa3_usb_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
+int ipa_usb_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
enum ipa_usb_teth_prot teth_prot)
{
int result = 0;
unsigned long flags;
enum ipa3_usb_cons_state curr_cons_state;
+ enum ipa3_usb_transport_type ttype;
mutex_lock(&ipa3_usb_ctx->general_mutex);
- IPA_USB_DBG("ipa3_usb_xdci_suspend: entry\n");
+ IPA_USB_DBG_LOW("entry\n");
if (teth_prot > IPA_USB_MAX_TETH_PROT_SIZE) {
IPA_USB_ERR("bad parameters.\n");
result = -EINVAL;
goto bad_params;
}
- if (!ipa3_usb_check_legal_op(IPA_USB_SUSPEND, false)) {
+ ttype = IPA3_USB_GET_TTYPE(teth_prot);
+
+ if (!ipa3_usb_check_legal_op(IPA_USB_SUSPEND, ttype)) {
IPA_USB_ERR("Illegal operation.\n");
result = -EPERM;
goto bad_params;
}
- /* Change ipa_usb state to SUSPEND_REQUESTED */
- if (!ipa3_usb_set_state(IPA_USB_SUSPEND_REQUESTED)) {
- IPA_USB_ERR("failed to change state to suspend_requested\n");
+ IPA_USB_DBG("Start suspend sequence: %s\n",
+ IPA3_USB_IS_TTYPE_DPL(ttype) ?
+ "DPL channel":"Data Tethering channels");
+
+ /* Change state to SUSPEND_REQUESTED */
+ if (!ipa3_usb_set_state(IPA_USB_SUSPEND_REQUESTED, false, ttype)) {
+ IPA_USB_ERR(
+ "fail changing state to suspend_req.\n");
result = -EFAULT;
goto bad_params;
}
- /* Stop UL channel & suspend DL EP */
+ /* Stop UL channel & suspend DL/DPL EP */
result = ipa3_xdci_suspend(ul_clnt_hdl, dl_clnt_hdl,
(teth_prot == IPA_USB_RMNET ||
teth_prot == IPA_USB_MBIM),
- ipa3_usb_ctx->qmi_req_id);
+ ipa3_usb_ctx->qmi_req_id, IPA3_USB_IS_TTYPE_DPL(ttype));
if (result) {
IPA_USB_ERR("failed to suspend\n");
goto suspend_fail;
@@ -2024,125 +2423,158 @@ int ipa3_usb_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
teth_prot == IPA_USB_MBIM)
ipa3_usb_ctx->qmi_req_id++;
- result = ipa3_usb_release_prod();
+ result = ipa3_usb_release_prod(ttype);
if (result) {
- IPA_USB_ERR("failed to release USB_PROD.\n");
+ IPA_USB_ERR("failed to release PROD\n");
goto release_prod_fail;
}
spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
- curr_cons_state = ipa3_usb_ctx->rm_ctx.cons_state;
+ curr_cons_state = ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_state;
spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
if (curr_cons_state == IPA_USB_CONS_GRANTED) {
- /* Change ipa_usb state to SUSPEND_IN_PROGRESS */
- if (!ipa3_usb_set_state(IPA_USB_SUSPEND_IN_PROGRESS))
+ /* Change state to SUSPEND_IN_PROGRESS */
+ if (!ipa3_usb_set_state(IPA_USB_SUSPEND_IN_PROGRESS,
+ false, ttype))
IPA_USB_ERR("fail set state to suspend_in_progress\n");
- /* Check if DL data pending */
+
+ /* Check if DL/DPL data pending */
spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
- if (ipa3_usb_ctx->rm_ctx.cons_requested) {
- IPA_USB_DBG("DL data pending, invoke remote wakeup\n");
+ if (ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_requested) {
+ IPA_USB_DBG(
+ "DL/DPL data pending, invoke remote wakeup\n");
queue_work(ipa3_usb_ctx->wq,
+ IPA3_USB_IS_TTYPE_DPL(ttype) ?
+ &ipa3_usb_dpl_notify_remote_wakeup_work :
&ipa3_usb_notify_remote_wakeup_work);
}
spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
- ipa3_usb_ctx->finish_suspend_work.dl_clnt_hdl = dl_clnt_hdl;
- ipa3_usb_ctx->finish_suspend_work.ul_clnt_hdl = ul_clnt_hdl;
- INIT_WORK(&ipa3_usb_ctx->finish_suspend_work.work,
+
+ ipa3_usb_ctx->ttype_ctx[ttype].finish_suspend_work.ttype =
+ ttype;
+ ipa3_usb_ctx->ttype_ctx[ttype].finish_suspend_work.dl_clnt_hdl =
+ dl_clnt_hdl;
+ ipa3_usb_ctx->ttype_ctx[ttype].finish_suspend_work.ul_clnt_hdl =
+ ul_clnt_hdl;
+ INIT_WORK(&ipa3_usb_ctx->ttype_ctx[ttype].
+ finish_suspend_work.work,
ipa3_usb_wq_finish_suspend_work);
+
result = -EINPROGRESS;
- IPA_USB_DBG("ipa3_usb_xdci_suspend: exit\n");
+ IPA_USB_DBG("exit with suspend_in_progress\n");
goto bad_params;
}
/* Stop DL channel */
result = ipa3_stop_gsi_channel(dl_clnt_hdl);
if (result) {
- IPAERR("Error stopping DL channel: %d\n", result);
+ IPAERR("Error stopping DL/DPL channel: %d\n", result);
result = -EFAULT;
goto release_prod_fail;
}
- /* Change ipa_usb state to SUSPENDED */
- if (!ipa3_usb_set_state(IPA_USB_SUSPENDED))
+ /* Change state to SUSPENDED */
+ if (!ipa3_usb_set_state(IPA_USB_SUSPENDED, false, ttype))
IPA_USB_ERR("failed to change state to suspended\n");
- /* Check if DL data pending */
+
+ /* Check if DL/DPL data pending */
spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
- if (ipa3_usb_ctx->rm_ctx.cons_requested) {
- IPA_USB_DBG("DL data is pending, invoking remote wakeup\n");
- queue_work(ipa3_usb_ctx->wq,
+ if (ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_requested) {
+ IPA_USB_DBG_LOW(
+ "DL/DPL data is pending, invoking remote wakeup\n");
+ queue_work(ipa3_usb_ctx->wq, IPA3_USB_IS_TTYPE_DPL(ttype) ?
+ &ipa3_usb_dpl_notify_remote_wakeup_work :
&ipa3_usb_notify_remote_wakeup_work);
}
spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
- IPA_USB_DBG("ipa3_usb_xdci_suspend: exit\n");
+ IPA_USB_DBG_LOW("exit\n");
mutex_unlock(&ipa3_usb_ctx->general_mutex);
return 0;
release_prod_fail:
- ipa3_xdci_resume(ul_clnt_hdl, dl_clnt_hdl);
+ ipa3_xdci_resume(ul_clnt_hdl, dl_clnt_hdl,
+ IPA3_USB_IS_TTYPE_DPL(ttype));
suspend_fail:
- /* Change ipa_usb state back to CONNECTED */
- if (!ipa3_usb_set_state(IPA_USB_CONNECTED))
- IPA_USB_ERR("failed to change state to connected\n");
+ /* Change state back to CONNECTED */
+ if (!ipa3_usb_set_state(IPA_USB_CONNECTED, true, ttype))
+ IPA_USB_ERR("failed to change state back to connected\n");
bad_params:
mutex_unlock(&ipa3_usb_ctx->general_mutex);
return result;
}
+EXPORT_SYMBOL(ipa_usb_xdci_suspend);
-int ipa3_usb_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl)
+int ipa_usb_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
+ enum ipa_usb_teth_prot teth_prot)
{
int result = -EFAULT;
enum ipa3_usb_state prev_state;
unsigned long flags;
+ enum ipa3_usb_transport_type ttype;
mutex_lock(&ipa3_usb_ctx->general_mutex);
- IPA_USB_DBG("ipa3_usb_xdci_resume: entry\n");
+ IPA_USB_DBG_LOW("entry\n");
+
+ if (teth_prot > IPA_USB_MAX_TETH_PROT_SIZE) {
+ IPA_USB_ERR("bad parameters.\n");
+ result = -EINVAL;
+ goto bad_params;
+ }
- if (!ipa3_usb_check_legal_op(IPA_USB_RESUME, false)) {
+ ttype = IPA3_USB_GET_TTYPE(teth_prot);
+
+ if (!ipa3_usb_check_legal_op(IPA_USB_RESUME, ttype)) {
IPA_USB_ERR("Illegal operation.\n");
result = -EPERM;
goto bad_params;
}
+ IPA_USB_DBG_LOW("Start resume sequence: %s\n",
+ IPA3_USB_IS_TTYPE_DPL(ttype) ?
+ "DPL channel" : "Data Tethering channels");
+
spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
- prev_state = ipa3_usb_ctx->state;
+ prev_state = ipa3_usb_ctx->ttype_ctx[ttype].state;
spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
- /* Change ipa_usb state to RESUME_IN_PROGRESS */
- if (!ipa3_usb_set_state(IPA_USB_RESUME_IN_PROGRESS)) {
+ /* Change state to RESUME_IN_PROGRESS */
+ if (!ipa3_usb_set_state(IPA_USB_RESUME_IN_PROGRESS, false, ttype)) {
IPA_USB_ERR("failed to change state to resume_in_progress\n");
result = -EFAULT;
goto bad_params;
}
/* Request USB_PROD */
- result = ipa3_usb_request_prod();
+ result = ipa3_usb_request_prod(ttype);
if (result)
goto prod_req_fail;
- /* Start UL channel */
- result = ipa3_start_gsi_channel(ul_clnt_hdl);
- if (result) {
- IPA_USB_ERR("failed to start UL channel.\n");
- goto start_ul_fail;
+ if (!IPA3_USB_IS_TTYPE_DPL(ttype)) {
+ /* Start UL channel */
+ result = ipa3_start_gsi_channel(ul_clnt_hdl);
+ if (result) {
+ IPA_USB_ERR("failed to start UL channel.\n");
+ goto start_ul_fail;
+ }
}
if (prev_state != IPA_USB_SUSPEND_IN_PROGRESS) {
- /* Start DL channel */
+ /* Start DL/DPL channel */
result = ipa3_start_gsi_channel(dl_clnt_hdl);
if (result) {
- IPA_USB_ERR("failed to start DL channel.\n");
+ IPA_USB_ERR("failed to start DL/DPL channel.\n");
goto start_dl_fail;
}
}
- /* Change ipa_usb state to CONNECTED */
- if (!ipa3_usb_set_state(IPA_USB_CONNECTED)) {
+ /* Change state to CONNECTED */
+ if (!ipa3_usb_set_state(IPA_USB_CONNECTED, false, ttype)) {
IPA_USB_ERR("failed to change state to connected\n");
result = -EFAULT;
goto state_change_connected_fail;
}
- IPA_USB_DBG("ipa3_usb_xdci_resume: exit\n");
+ IPA_USB_DBG_LOW("exit\n");
mutex_unlock(&ipa3_usb_ctx->general_mutex);
return 0;
@@ -2150,20 +2582,107 @@ state_change_connected_fail:
if (prev_state != IPA_USB_SUSPEND_IN_PROGRESS) {
result = ipa3_stop_gsi_channel(dl_clnt_hdl);
if (result)
- IPA_USB_ERR("Error stopping DL channel: %d\n", result);
+ IPA_USB_ERR("Error stopping DL/DPL channel: %d\n",
+ result);
}
start_dl_fail:
- result = ipa3_stop_gsi_channel(ul_clnt_hdl);
- if (result)
- IPA_USB_ERR("Error stopping UL channel: %d\n", result);
+ if (!IPA3_USB_IS_TTYPE_DPL(ttype)) {
+ result = ipa3_stop_gsi_channel(ul_clnt_hdl);
+ if (result)
+ IPA_USB_ERR("Error stopping UL channel: %d\n", result);
+ }
start_ul_fail:
- ipa3_usb_release_prod();
+ ipa3_usb_release_prod(ttype);
prod_req_fail:
- /* Change ipa_usb state back to prev_state */
- if (!ipa3_usb_set_state(prev_state))
+ /* Change state back to prev_state */
+ if (!ipa3_usb_set_state(prev_state, true, ttype))
IPA_USB_ERR("failed to change state back to %s\n",
ipa3_usb_state_to_string(prev_state));
bad_params:
mutex_unlock(&ipa3_usb_ctx->general_mutex);
return result;
}
+EXPORT_SYMBOL(ipa_usb_xdci_resume);
+
+static int __init ipa3_usb_init(void)
+{
+ int i;
+ unsigned long flags;
+ int res;
+
+ IPA_USB_DBG("entry\n");
+ ipa3_usb_ctx = kzalloc(sizeof(struct ipa3_usb_context), GFP_KERNEL);
+ if (ipa3_usb_ctx == NULL) {
+ IPA_USB_ERR("failed to allocate memory\n");
+ IPA_USB_ERR(":ipa_usb init failed\n");
+ return -EFAULT;
+ }
+ memset(ipa3_usb_ctx, 0, sizeof(struct ipa3_usb_context));
+
+ res = ipa_usb_ipc_logging_init();
+ if (res) {
+ /* IPA_USB_ERR will crash on NULL dereference if we use macro*/
+ pr_err("ipa_usb: failed to initialize ipc logging\n");
+ res = -EFAULT;
+ goto ipa_usb_init_ipc_log_fail;
+ }
+
+ for (i = 0; i < IPA_USB_MAX_TETH_PROT_SIZE; i++)
+ ipa3_usb_ctx->teth_prot_ctx[i].state =
+ IPA_USB_TETH_PROT_INVALID;
+ ipa3_usb_ctx->num_init_prot = 0;
+ init_completion(&ipa3_usb_ctx->dev_ready_comp);
+ ipa3_usb_ctx->qmi_req_id = 0;
+ spin_lock_init(&ipa3_usb_ctx->state_lock);
+ ipa3_usb_ctx->dl_data_pending = false;
+ mutex_init(&ipa3_usb_ctx->general_mutex);
+
+ for (i = 0; i < IPA_USB_TRANSPORT_MAX; i++) {
+ ipa3_usb_ctx->ttype_ctx[i].rm_ctx.prod_valid = false;
+ ipa3_usb_ctx->ttype_ctx[i].rm_ctx.cons_valid = false;
+ init_completion(&ipa3_usb_ctx->ttype_ctx[i].rm_ctx.prod_comp);
+ ipa3_usb_ctx->ttype_ctx[i].user_data = NULL;
+ }
+
+ spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
+ for (i = 0; i < IPA_USB_TRANSPORT_MAX; i++) {
+ ipa3_usb_ctx->ttype_ctx[i].state = IPA_USB_INVALID;
+ ipa3_usb_ctx->ttype_ctx[i].rm_ctx.cons_state =
+ IPA_USB_CONS_RELEASED;
+ }
+ spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
+
+ ipa3_usb_ctx->wq = create_singlethread_workqueue("ipa_usb_wq");
+ if (!ipa3_usb_ctx->wq) {
+ IPA_USB_ERR("failed to create workqueue\n");
+ res = -EFAULT;
+ goto ipa_usb_workqueue_fail;
+ }
+
+ ipa_usb_debugfs_init();
+
+ IPA_USB_INFO("exit: IPA_USB init success!\n");
+
+ return 0;
+
+ipa_usb_workqueue_fail:
+ IPA_USB_ERR(":init failed (%d)\n", -res);
+ ipc_log_context_destroy(ipa3_usb_ctx->logbuf);
+ ipc_log_context_destroy(ipa3_usb_ctx->logbuf_low);
+ipa_usb_init_ipc_log_fail:
+ kfree(ipa3_usb_ctx);
+ return res;
+}
+
+static void ipa3_usb_exit(void)
+{
+ IPA_USB_DBG_LOW("IPA_USB exit\n");
+ ipa_usb_debugfs_remove();
+ kfree(ipa3_usb_ctx);
+}
+
+arch_initcall(ipa3_usb_init);
+module_exit(ipa3_usb_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("IPA USB client driver");
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa.c b/drivers/platform/msm/ipa/ipa_v2/ipa.c
index 5d1c2de96ac9..c968eeec43a2 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa.c
@@ -32,9 +32,14 @@
#include <linux/delay.h>
#include <linux/qcom_iommu.h>
#include <linux/time.h>
+#include <linux/hashtable.h>
+#include <linux/hash.h>
#include "ipa_i.h"
#include "ipa_rm_i.h"
+#define CREATE_TRACE_POINTS
+#include "ipa_trace.h"
+
#define IPA_SUMMING_THRESHOLD (0x10)
#define IPA_PIPE_MEM_START_OFST (0x0)
#define IPA_PIPE_MEM_SIZE (0x0)
@@ -53,6 +58,13 @@
#define CLEANUP_TAG_PROCESS_TIMEOUT 150
+#define IPA2_ACTIVE_CLIENTS_TABLE_BUF_SIZE 2048
+
+#define IPA2_ACTIVE_CLIENT_LOG_TYPE_EP 0
+#define IPA2_ACTIVE_CLIENT_LOG_TYPE_SIMPLE 1
+#define IPA2_ACTIVE_CLIENT_LOG_TYPE_RESOURCE 2
+#define IPA2_ACTIVE_CLIENT_LOG_TYPE_SPECIAL 3
+
#define IPA_AGGR_STR_IN_BYTES(str) \
(strnlen((str), IPA_AGGR_MAX_STR_LENGTH - 1) + 1)
@@ -192,6 +204,8 @@ static bool smmu_present;
static bool arm_smmu;
static bool smmu_disable_htw;
+static char *active_clients_table_buf;
+
const char *ipa2_clients_strings[IPA_CLIENT_MAX] = {
__stringify(IPA_CLIENT_HSIC1_PROD),
__stringify(IPA_CLIENT_WLAN1_PROD),
@@ -264,23 +278,107 @@ const char *ipa2_clients_strings[IPA_CLIENT_MAX] = {
__stringify(IPA_CLIENT_TEST4_CONS),
};
+int ipa2_active_clients_log_print_buffer(char *buf, int size)
+{
+ int i;
+ int nbytes;
+ int cnt = 0;
+ int start_idx;
+ int end_idx;
+
+ start_idx = (ipa_ctx->ipa2_active_clients_logging.log_tail + 1) %
+ IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES;
+ end_idx = ipa_ctx->ipa2_active_clients_logging.log_head;
+ for (i = start_idx; i != end_idx;
+ i = (i + 1) % IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES) {
+ nbytes = scnprintf(buf + cnt, size - cnt, "%s\n",
+ ipa_ctx->ipa2_active_clients_logging
+ .log_buffer[i]);
+ cnt += nbytes;
+ }
+
+ return cnt;
+}
+
+int ipa2_active_clients_log_print_table(char *buf, int size)
+{
+ int i;
+ struct ipa2_active_client_htable_entry *iterator;
+ int cnt = 0;
+
+ cnt = scnprintf(buf, size, "\n---- Active Clients Table ----\n");
+ hash_for_each(ipa_ctx->ipa2_active_clients_logging.htable, i,
+ iterator, list) {
+ switch (iterator->type) {
+ case IPA2_ACTIVE_CLIENT_LOG_TYPE_EP:
+ cnt += scnprintf(buf + cnt, size - cnt,
+ "%-40s %-3d ENDPOINT\n",
+ iterator->id_string, iterator->count);
+ break;
+ case IPA2_ACTIVE_CLIENT_LOG_TYPE_SIMPLE:
+ cnt += scnprintf(buf + cnt, size - cnt,
+ "%-40s %-3d SIMPLE\n",
+ iterator->id_string, iterator->count);
+ break;
+ case IPA2_ACTIVE_CLIENT_LOG_TYPE_RESOURCE:
+ cnt += scnprintf(buf + cnt, size - cnt,
+ "%-40s %-3d RESOURCE\n",
+ iterator->id_string, iterator->count);
+ break;
+ case IPA2_ACTIVE_CLIENT_LOG_TYPE_SPECIAL:
+ cnt += scnprintf(buf + cnt, size - cnt,
+ "%-40s %-3d SPECIAL\n",
+ iterator->id_string, iterator->count);
+ break;
+ default:
+ IPAERR("Trying to print illegal active_clients type");
+ break;
+ }
+ }
+ cnt += scnprintf(buf + cnt, size - cnt,
+ "\nTotal active clients count: %d\n",
+ ipa_ctx->ipa_active_clients.cnt);
+
+ return cnt;
+}
+
+static int ipa2_active_clients_panic_notifier(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ ipa_active_clients_lock();
+ ipa2_active_clients_log_print_table(active_clients_table_buf,
+ IPA2_ACTIVE_CLIENTS_TABLE_BUF_SIZE);
+ IPAERR("%s", active_clients_table_buf);
+ ipa_active_clients_unlock();
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block ipa2_active_clients_panic_blk = {
+ .notifier_call = ipa2_active_clients_panic_notifier,
+};
+
static int ipa2_active_clients_log_insert(const char *string)
{
+ int head;
+ int tail;
+
+ head = ipa_ctx->ipa2_active_clients_logging.log_head;
+ tail = ipa_ctx->ipa2_active_clients_logging.log_tail;
+
if (!ipa_ctx->ipa2_active_clients_logging.log_rdy)
return -EPERM;
- strlcpy(ipa_ctx->ipa2_active_clients_logging.log_buffer
- [ipa_ctx->ipa2_active_clients_logging.log_head],
- string,
+ memset(ipa_ctx->ipa2_active_clients_logging.log_buffer[head], '_',
+ IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN);
+ strlcpy(ipa_ctx->ipa2_active_clients_logging.log_buffer[head], string,
(size_t)IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN);
- ipa_ctx->ipa2_active_clients_logging.log_head =
- (ipa_ctx->ipa2_active_clients_logging.log_head + 1) %
- IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES;
- if (ipa_ctx->ipa2_active_clients_logging.log_tail ==
- ipa_ctx->ipa2_active_clients_logging.log_head) {
- ipa_ctx->ipa2_active_clients_logging.log_tail =
- (ipa_ctx->ipa2_active_clients_logging.log_tail + 1) %
- IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES;
- }
+ head = (head + 1) % IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES;
+ if (tail == head)
+ tail = (tail + 1) % IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES;
+
+ ipa_ctx->ipa2_active_clients_logging.log_tail = tail;
+ ipa_ctx->ipa2_active_clients_logging.log_head = head;
+
return 0;
}
@@ -292,6 +390,8 @@ static int ipa2_active_clients_log_init(void)
IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES *
sizeof(char[IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN]),
GFP_KERNEL);
+ active_clients_table_buf = kzalloc(sizeof(
+ char[IPA2_ACTIVE_CLIENTS_TABLE_BUF_SIZE]), GFP_KERNEL);
if (ipa_ctx->ipa2_active_clients_logging.log_buffer == NULL) {
IPAERR("Active Clients Logging memory allocation failed");
goto bail;
@@ -304,6 +404,9 @@ static int ipa2_active_clients_log_init(void)
ipa_ctx->ipa2_active_clients_logging.log_head = 0;
ipa_ctx->ipa2_active_clients_logging.log_tail =
IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1;
+ hash_init(ipa_ctx->ipa2_active_clients_logging.htable);
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &ipa2_active_clients_panic_blk);
ipa_ctx->ipa2_active_clients_logging.log_rdy = 1;
return 0;
@@ -330,22 +433,6 @@ static void ipa2_active_clients_log_destroy(void)
IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1;
}
-void ipa2_active_clients_log_print_buffer(void)
-{
- int i;
-
- ipa_active_clients_lock();
- for (i = (ipa_ctx->ipa2_active_clients_logging.log_tail + 1) %
- IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES;
- i != ipa_ctx->ipa2_active_clients_logging.log_head;
- i = (i + 1) % IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES) {
- pr_err("%s\n", ipa_ctx->ipa2_active_clients_logging
- .log_buffer[i]);
- }
- ipa_active_clients_unlock();
-}
-
-
enum ipa_smmu_cb_type {
IPA_SMMU_CB_AP,
IPA_SMMU_CB_WLAN,
@@ -2527,7 +2614,7 @@ static void ipa_teardown_apps_pipes(void)
}
#ifdef CONFIG_COMPAT
-long compat_ipa3_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+long compat_ipa_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
int retval = 0;
struct ipa_ioc_nat_alloc_mem32 nat_mem32;
@@ -2668,7 +2755,7 @@ static const struct file_operations ipa_drv_fops = {
.read = ipa_read,
.unlocked_ioctl = ipa_ioctl,
#ifdef CONFIG_COMPAT
- .compat_ioctl = compat_ipa3_ioctl,
+ .compat_ioctl = compat_ipa_ioctl,
#endif
};
@@ -2919,37 +3006,116 @@ static void ipa_start_tag_process(struct work_struct *work)
if (res)
IPAERR("ipa_tag_aggr_force_close failed %d\n", res);
- IPA2_ACTIVE_CLIENTS_DEC_SIMPLE();
+ IPA2_ACTIVE_CLIENTS_DEC_SPECIAL("TAG_PROCESS");
IPADBG("TAG process done\n");
}
/**
-* ipa_inc_client_enable_clks() - Increase active clients counter, and
-* enable ipa clocks if necessary
+* ipa2_active_clients_log_mod() - Log a modification in the active clients
+* reference count
*
-* Please do not use this API, use the wrapper macros instead (ipa_i.h)
-* IPA2_ACTIVE_CLIENTS_INC_XXXX();
+* This method logs any modification in the active clients reference count:
+* It logs the modification in the circular history buffer
+* It logs the modification in the hash table - looking for an entry,
+* creating one if needed and deleting one if needed.
*
-* Return codes:
-* None
+* @id: ipa2_active client logging info struct to hold the log information
+* @inc: a boolean variable to indicate whether the modification is an increase
+* or decrease
+* @int_ctx: a boolean variable to indicate whether this call is being made from
+* an interrupt context and therefore should allocate GFP_ATOMIC memory
+*
+* Method process:
+* - Hash the unique identifier string
+* - Find the hash in the table
+* 1)If found, increase or decrease the reference count
+* 2)If not found, allocate a new hash table entry struct and initialize it
+* - Remove and deallocate unneeded data structure
+* - Log the call in the circular history buffer (unless it is a simple call)
*/
-void ipa2_inc_client_enable_clks(struct ipa2_active_client_logging_info *id)
+void ipa2_active_clients_log_mod(struct ipa2_active_client_logging_info *id,
+ bool inc, bool int_ctx)
{
char temp_str[IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN];
unsigned long long t;
unsigned long nanosec_rem;
+ struct ipa2_active_client_htable_entry *hentry;
+ struct ipa2_active_client_htable_entry *hfound;
+ u32 hkey;
+ char str_to_hash[IPA2_ACTIVE_CLIENTS_LOG_NAME_LEN];
+
+ hfound = NULL;
+ memset(str_to_hash, 0, IPA2_ACTIVE_CLIENTS_LOG_NAME_LEN);
+ strlcpy(str_to_hash, id->id_string, IPA2_ACTIVE_CLIENTS_LOG_NAME_LEN);
+ hkey = arch_fast_hash(str_to_hash, IPA2_ACTIVE_CLIENTS_LOG_NAME_LEN,
+ 0);
+ hash_for_each_possible(ipa_ctx->ipa2_active_clients_logging.htable,
+ hentry, list, hkey) {
+ if (!strcmp(hentry->id_string, id->id_string)) {
+ hentry->count = hentry->count + (inc ? 1 : -1);
+ hfound = hentry;
+ }
+ }
+ if (hfound == NULL) {
+ hentry = NULL;
+ hentry = kzalloc(sizeof(
+ struct ipa2_active_client_htable_entry),
+ int_ctx ? GFP_ATOMIC : GFP_KERNEL);
+ if (hentry == NULL) {
+ IPAERR("failed allocating active clients hash entry");
+ return;
+ }
+ hentry->type = id->type;
+ strlcpy(hentry->id_string, id->id_string,
+ IPA2_ACTIVE_CLIENTS_LOG_NAME_LEN);
+ INIT_HLIST_NODE(&hentry->list);
+ hentry->count = inc ? 1 : -1;
+ hash_add(ipa_ctx->ipa2_active_clients_logging.htable,
+ &hentry->list, hkey);
+ } else if (hfound->count == 0) {
+ hash_del(&hfound->list);
+ kfree(hfound);
+ }
- ipa_active_clients_lock();
if (id->type != SIMPLE) {
- t = cpu_clock(smp_processor_id());
+ t = local_clock();
nanosec_rem = do_div(t, 1000000000) / 1000;
snprintf(temp_str, IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN,
- "[%5lu.%06lu] ^ %s, %s: %d",
- (unsigned long)t, nanosec_rem,
- id->id_string, id->file, id->line);
+ inc ? "[%5lu.%06lu] ^ %s, %s: %d" :
+ "[%5lu.%06lu] v %s, %s: %d",
+ (unsigned long)t, nanosec_rem,
+ id->id_string, id->file, id->line);
ipa2_active_clients_log_insert(temp_str);
}
+}
+
+void ipa2_active_clients_log_dec(struct ipa2_active_client_logging_info *id,
+ bool int_ctx)
+{
+ ipa2_active_clients_log_mod(id, false, int_ctx);
+}
+
+void ipa2_active_clients_log_inc(struct ipa2_active_client_logging_info *id,
+ bool int_ctx)
+{
+ ipa2_active_clients_log_mod(id, true, int_ctx);
+}
+
+/**
+* ipa_inc_client_enable_clks() - Increase active clients counter, and
+* enable ipa clocks if necessary
+*
+* Please do not use this API, use the wrapper macros instead (ipa_i.h)
+* IPA2_ACTIVE_CLIENTS_INC_XXXX();
+*
+* Return codes:
+* None
+*/
+void ipa2_inc_client_enable_clks(struct ipa2_active_client_logging_info *id)
+{
+ ipa_active_clients_lock();
+ ipa2_active_clients_log_inc(id, false);
ipa_ctx->ipa_active_clients.cnt++;
if (ipa_ctx->ipa_active_clients.cnt == 1)
ipa_enable_clks();
@@ -2973,9 +3139,6 @@ int ipa2_inc_client_enable_clks_no_block(struct ipa2_active_client_logging_info
{
int res = 0;
unsigned long flags;
- char temp_str[IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN];
- unsigned long long t;
- unsigned long nanosec_rem;
if (ipa_active_clients_trylock(&flags) == 0)
return -EPERM;
@@ -2985,15 +3148,7 @@ int ipa2_inc_client_enable_clks_no_block(struct ipa2_active_client_logging_info
goto bail;
}
- if (id->type != SIMPLE) {
- t = cpu_clock(smp_processor_id());
- nanosec_rem = do_div(t, 1000000000) / 1000;
- snprintf(temp_str, IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN,
- "[%5lu.%06lu] ^ %s, %s: %d",
- (unsigned long)t, nanosec_rem,
- id->id_string, id->file, id->line);
- ipa2_active_clients_log_insert(temp_str);
- }
+ ipa2_active_clients_log_inc(id, true);
ipa_ctx->ipa_active_clients.cnt++;
IPADBG("active clients = %d\n", ipa_ctx->ipa_active_clients.cnt);
@@ -3019,24 +3174,17 @@ bail:
*/
void ipa2_dec_client_disable_clks(struct ipa2_active_client_logging_info *id)
{
- char temp_str[IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN];
- unsigned long long t;
- unsigned long nanosec_rem;
+ struct ipa2_active_client_logging_info log_info;
ipa_active_clients_lock();
- if (id->type != SIMPLE) {
- t = cpu_clock(smp_processor_id());
- nanosec_rem = do_div(t, 1000000000) / 1000;
- snprintf(temp_str, IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN,
- "[%5lu.%06lu] v %s, %s: %d",
- (unsigned long)t, nanosec_rem,
- id->id_string, id->file, id->line);
- ipa2_active_clients_log_insert(temp_str);
- }
+ ipa2_active_clients_log_dec(id, false);
ipa_ctx->ipa_active_clients.cnt--;
IPADBG("active clients = %d\n", ipa_ctx->ipa_active_clients.cnt);
if (ipa_ctx->ipa_active_clients.cnt == 0) {
if (ipa_ctx->tag_process_before_gating) {
+ IPA2_ACTIVE_CLIENTS_PREP_SPECIAL(log_info,
+ "TAG_PROCESS");
+ ipa2_active_clients_log_inc(&log_info, false);
ipa_ctx->tag_process_before_gating = false;
/*
* When TAG process ends, active clients will be
@@ -3058,15 +3206,21 @@ void ipa2_dec_client_disable_clks(struct ipa2_active_client_logging_info *id)
* Return codes:
* None
*/
-void ipa_inc_acquire_wakelock(void)
+void ipa_inc_acquire_wakelock(enum ipa_wakelock_ref_client ref_client)
{
unsigned long flags;
+ if (ref_client >= IPA_WAKELOCK_REF_CLIENT_MAX)
+ return;
spin_lock_irqsave(&ipa_ctx->wakelock_ref_cnt.spinlock, flags);
- ipa_ctx->wakelock_ref_cnt.cnt++;
- if (ipa_ctx->wakelock_ref_cnt.cnt == 1)
+ if (ipa_ctx->wakelock_ref_cnt.cnt & (1 << ref_client))
+ IPAERR("client enum %d mask already set. ref cnt = %d\n",
+ ref_client, ipa_ctx->wakelock_ref_cnt.cnt);
+ ipa_ctx->wakelock_ref_cnt.cnt |= (1 << ref_client);
+ if (ipa_ctx->wakelock_ref_cnt.cnt)
__pm_stay_awake(&ipa_ctx->w_lock);
- IPADBG("active wakelock ref cnt = %d\n", ipa_ctx->wakelock_ref_cnt.cnt);
+ IPADBG("active wakelock ref cnt = %d client enum %d\n",
+ ipa_ctx->wakelock_ref_cnt.cnt, ref_client);
spin_unlock_irqrestore(&ipa_ctx->wakelock_ref_cnt.spinlock, flags);
}
@@ -3078,13 +3232,16 @@ void ipa_inc_acquire_wakelock(void)
* Return codes:
* None
*/
-void ipa_dec_release_wakelock(void)
+void ipa_dec_release_wakelock(enum ipa_wakelock_ref_client ref_client)
{
unsigned long flags;
+ if (ref_client >= IPA_WAKELOCK_REF_CLIENT_MAX)
+ return;
spin_lock_irqsave(&ipa_ctx->wakelock_ref_cnt.spinlock, flags);
- ipa_ctx->wakelock_ref_cnt.cnt--;
- IPADBG("active wakelock ref cnt = %d\n", ipa_ctx->wakelock_ref_cnt.cnt);
+ ipa_ctx->wakelock_ref_cnt.cnt &= ~(1 << ref_client);
+ IPADBG("active wakelock ref cnt = %d client enum %d\n",
+ ipa_ctx->wakelock_ref_cnt.cnt, ref_client);
if (ipa_ctx->wakelock_ref_cnt.cnt == 0)
__pm_relax(&ipa_ctx->w_lock);
spin_unlock_irqrestore(&ipa_ctx->wakelock_ref_cnt.spinlock, flags);
@@ -3461,6 +3618,7 @@ static int ipa_init(const struct ipa_plat_drv_res *resource_p,
struct sps_bam_props bam_props = { 0 };
struct ipa_flt_tbl *flt_tbl;
struct ipa_rt_tbl_set *rset;
+ struct ipa2_active_client_logging_info log_info;
IPADBG("IPA Driver initialization started\n");
@@ -3584,6 +3742,8 @@ static int ipa_init(const struct ipa_plat_drv_res *resource_p,
mutex_init(&ipa_ctx->ipa_active_clients.mutex);
spin_lock_init(&ipa_ctx->ipa_active_clients.spinlock);
+ IPA2_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, "PROXY_CLK_VOTE");
+ ipa2_active_clients_log_inc(&log_info, false);
ipa_ctx->ipa_active_clients.cnt = 1;
/* Create workqueues for power management */
@@ -3617,6 +3777,7 @@ static int ipa_init(const struct ipa_plat_drv_res *resource_p,
bam_props.manage |= SPS_BAM_MGR_DEVICE_REMOTE;
if (ipa_ctx->smmu_present)
bam_props.options |= SPS_BAM_SMMU_EN;
+ bam_props.options |= SPS_BAM_CACHED_WP;
bam_props.ee = resource_p->ee;
bam_props.ipc_loglevel = 3;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_client.c b/drivers/platform/msm/ipa/ipa_v2/ipa_client.c
index d818660ef2b3..b3f50dd52528 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_client.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_client.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -407,7 +407,8 @@ int ipa2_connect(const struct ipa_connect_params *in,
}
if ((ipa_ctx->ipa_hw_type == IPA_HW_v2_0 ||
- ipa_ctx->ipa_hw_type == IPA_HW_v2_5) &&
+ ipa_ctx->ipa_hw_type == IPA_HW_v2_5 ||
+ ipa_ctx->ipa_hw_type == IPA_HW_v2_6L) &&
IPA_CLIENT_IS_USB_CONS(in->client))
ep->connect.event_thresh = IPA_USB_EVENT_THRESHOLD;
else
@@ -539,6 +540,7 @@ int ipa2_disconnect(u32 clnt_hdl)
struct iommu_domain *smmu_domain;
struct ipa_disable_force_clear_datapath_req_msg_v01 req = {0};
int res;
+ enum ipa_client_type client_type;
if (unlikely(!ipa_ctx)) {
IPAERR("IPA driver was not initialized\n");
@@ -552,10 +554,9 @@ int ipa2_disconnect(u32 clnt_hdl)
}
ep = &ipa_ctx->ep[clnt_hdl];
-
+ client_type = ipa2_get_client_mapping(clnt_hdl);
if (!ep->keep_ipa_awake)
- IPA2_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
-
+ IPA2_ACTIVE_CLIENTS_INC_EP(client_type);
/* Set Disconnect in Progress flag. */
spin_lock(&ipa_ctx->disconnect_lock);
@@ -662,7 +663,7 @@ int ipa2_disconnect(u32 clnt_hdl)
memset(&ipa_ctx->ep[clnt_hdl], 0, sizeof(struct ipa_ep_context));
spin_unlock(&ipa_ctx->disconnect_lock);
- IPA2_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
+ IPA2_ACTIVE_CLIENTS_DEC_EP(client_type);
IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
index 02b8c6f77427..daf6091aad67 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
@@ -21,6 +21,9 @@
#define IPA_MAX_MSG_LEN 4096
#define IPA_DBG_CNTR_ON 127265
#define IPA_DBG_CNTR_OFF 127264
+#define IPA_DBG_ACTIVE_CLIENTS_BUF_SIZE ((IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN \
+ * IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES) \
+ + IPA_MAX_MSG_LEN)
#define IPA_DUMP_STATUS_FIELD(f) \
pr_err(#f "=0x%x\n", status->f)
@@ -108,6 +111,7 @@ static struct dentry *dfile_rm_stats;
static struct dentry *dfile_status_stats;
static struct dentry *dfile_active_clients;
static char dbg_buff[IPA_MAX_MSG_LEN];
+static char *active_clients_buf;
static s8 ep_reg_idx;
int _ipa_read_gen_reg_v1_1(char *buff, int max_len)
@@ -1552,9 +1556,23 @@ static ssize_t ipa_status_stats_read(struct file *file, char __user *ubuf,
static ssize_t ipa2_print_active_clients_log(struct file *file,
char __user *ubuf, size_t count, loff_t *ppos)
{
- ipa2_active_clients_log_print_buffer();
+ int cnt;
+ int table_size;
- return 0;
+ if (active_clients_buf == NULL) {
+ IPAERR("Active Clients buffer is not allocated");
+ return 0;
+ }
+ memset(active_clients_buf, 0, IPA_DBG_ACTIVE_CLIENTS_BUF_SIZE);
+ ipa_active_clients_lock();
+ cnt = ipa2_active_clients_log_print_buffer(active_clients_buf,
+ IPA_DBG_ACTIVE_CLIENTS_BUF_SIZE - IPA_MAX_MSG_LEN);
+ table_size = ipa2_active_clients_log_print_table(active_clients_buf
+ + cnt, IPA_MAX_MSG_LEN);
+ ipa_active_clients_unlock();
+
+ return simple_read_from_buffer(ubuf, count, ppos, active_clients_buf,
+ cnt + table_size);
}
static ssize_t ipa2_clear_active_clients_log(struct file *file,
@@ -1682,13 +1700,19 @@ void ipa_debugfs_init(void)
goto fail;
}
- dfile_ep_reg = debugfs_create_file("active_clients",
+ dfile_active_clients = debugfs_create_file("active_clients",
read_write_mode, dent, 0, &ipa2_active_clients);
- if (!dfile_ep_reg || IS_ERR(dfile_active_clients)) {
- IPAERR("fail to create file for debug_fs ep_reg\n");
+ if (!dfile_active_clients || IS_ERR(dfile_active_clients)) {
+ IPAERR("fail to create file for debug_fs active_clients\n");
goto fail;
}
+ active_clients_buf = NULL;
+ active_clients_buf = kzalloc(IPA_DBG_ACTIVE_CLIENTS_BUF_SIZE,
+ GFP_KERNEL);
+ if (active_clients_buf == NULL)
+ IPAERR("fail to allocate active clients memory buffer");
+
dfile_ep_reg = debugfs_create_file("ep_reg", read_write_mode, dent, 0,
&ipa_ep_reg_ops);
if (!dfile_ep_reg || IS_ERR(dfile_ep_reg)) {
@@ -1843,6 +1867,10 @@ void ipa_debugfs_remove(void)
IPAERR("ipa_debugfs_remove: folder was not created.\n");
return;
}
+ if (active_clients_buf != NULL) {
+ kfree(active_clients_buf);
+ active_clients_buf = NULL;
+ }
debugfs_remove_recursive(dent);
}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
index fc382d6b110b..93abbfe0e6ac 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -16,6 +16,7 @@
#include <linux/list.h>
#include <linux/netdevice.h>
#include "ipa_i.h"
+#include "ipa_trace.h"
#define IPA_LAST_DESC_CNT 0xFFFF
#define POLLING_INACTIVITY_RX 40
@@ -241,7 +242,6 @@ static void ipa_tx_switch_to_intr_mode(struct ipa_sys_context *sys)
}
atomic_set(&sys->curr_polling_state, 0);
ipa_handle_tx_core(sys, true, false);
- ipa_dec_release_wakelock();
return;
fail:
@@ -725,7 +725,6 @@ static void ipa_sps_irq_tx_notify(struct sps_event_notify *notify)
IPAERR("sps_set_config() failed %d\n", ret);
break;
}
- ipa_inc_acquire_wakelock();
atomic_set(&sys->curr_polling_state, 1);
queue_work(sys->wq, &sys->work);
}
@@ -841,7 +840,7 @@ static void ipa_rx_switch_to_intr_mode(struct ipa_sys_context *sys)
}
atomic_set(&sys->curr_polling_state, 0);
ipa_handle_rx_core(sys, true, false);
- ipa_dec_release_wakelock();
+ ipa_dec_release_wakelock(sys->ep->wakelock_client);
return;
fail:
@@ -857,6 +856,16 @@ static void ipa_sps_irq_control(struct ipa_sys_context *sys, bool enable)
{
int ret;
+ /*
+ * Do not change sps config in case we are in polling mode as this
+ * indicates that sps driver already notified EOT event and sps config
+ * should not change until ipa driver processes the packet.
+ */
+ if (atomic_read(&sys->curr_polling_state)) {
+ IPADBG("in polling mode, do not change config\n");
+ return;
+ }
+
if (enable) {
ret = sps_get_config(sys->ep->ep_hdl, &sys->ep->connect);
if (ret) {
@@ -959,8 +968,9 @@ static void ipa_sps_irq_rx_notify(struct sps_event_notify *notify)
IPAERR("sps_set_config() failed %d\n", ret);
break;
}
- ipa_inc_acquire_wakelock();
+ ipa_inc_acquire_wakelock(sys->ep->wakelock_client);
atomic_set(&sys->curr_polling_state, 1);
+ trace_intr_to_poll(sys->ep->client);
queue_work(sys->wq, &sys->work);
}
break;
@@ -997,8 +1007,10 @@ static void ipa_handle_rx(struct ipa_sys_context *sys)
cnt = ipa_handle_rx_core(sys, true, true);
if (cnt == 0) {
inactive_cycles++;
+ trace_idle_sleep_enter(sys->ep->client);
usleep_range(POLLING_MIN_SLEEP_RX,
POLLING_MAX_SLEEP_RX);
+ trace_idle_sleep_exit(sys->ep->client);
} else {
inactive_cycles = 0;
}
@@ -1012,6 +1024,7 @@ static void ipa_handle_rx(struct ipa_sys_context *sys)
} while (inactive_cycles <= POLLING_INACTIVITY_RX);
+ trace_poll_to_intr(sys->ep->client);
ipa_rx_switch_to_intr_mode(sys);
IPA2_ACTIVE_CLIENTS_DEC_SIMPLE();
}
@@ -1309,14 +1322,6 @@ int ipa2_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
*clnt_hdl = ipa_ep_idx;
- if (IPA_CLIENT_IS_CONS(sys_in->client))
- ipa_replenish_rx_cache(ep->sys);
-
- if (IPA_CLIENT_IS_WLAN_CONS(sys_in->client)) {
- ipa_alloc_wlan_rx_common_cache(IPA_WLAN_COMM_RX_POOL_LOW);
- atomic_inc(&ipa_ctx->wc_memb.active_clnt_cnt);
- }
-
if (nr_cpu_ids > 1 &&
(sys_in->client == IPA_CLIENT_APPS_LAN_CONS ||
sys_in->client == IPA_CLIENT_APPS_WAN_CONS)) {
@@ -1334,6 +1339,14 @@ int ipa2_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
}
}
+ if (IPA_CLIENT_IS_CONS(sys_in->client))
+ ipa_replenish_rx_cache(ep->sys);
+
+ if (IPA_CLIENT_IS_WLAN_CONS(sys_in->client)) {
+ ipa_alloc_wlan_rx_common_cache(IPA_WLAN_COMM_RX_POOL_LOW);
+ atomic_inc(&ipa_ctx->wc_memb.active_clnt_cnt);
+ }
+
ipa_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg;
if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(sys_in->client)) {
if (ipa_ctx->modem_cfg_emb_pipe_flt &&
@@ -1414,6 +1427,8 @@ int ipa2_teardown_sys_pipe(u32 clnt_hdl)
} while (1);
}
+ if (IPA_CLIENT_IS_CONS(ep->client))
+ cancel_delayed_work_sync(&ep->sys->replenish_rx_work);
flush_workqueue(ep->sys->wq);
sps_disconnect(ep->ep_hdl);
dma_free_coherent(ipa_ctx->pdev, ep->connect.desc.size,
@@ -2858,6 +2873,7 @@ static int ipa_assign_policy(struct ipa_sys_connect_params *in,
}
} else if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_0) {
sys->ep->status.status_en = true;
+ sys->ep->wakelock_client = IPA_WAKELOCK_REF_CLIENT_MAX;
if (IPA_CLIENT_IS_PROD(in->client)) {
if (!sys->ep->skip_ep_cfg) {
sys->policy = IPA_POLICY_NOINTR_MODE;
@@ -2905,11 +2921,15 @@ static int ipa_assign_policy(struct ipa_sys_connect_params *in,
IPA_GENERIC_AGGR_BYTE_LIMIT;
in->ipa_ep_cfg.aggr.aggr_pkt_limit =
IPA_GENERIC_AGGR_PKT_LIMIT;
+ sys->ep->wakelock_client =
+ IPA_WAKELOCK_REF_CLIENT_LAN_RX;
} else if (in->client ==
IPA_CLIENT_APPS_WAN_CONS) {
sys->pyld_hdlr = ipa_wan_rx_pyld_hdlr;
sys->rx_pool_sz =
ipa_ctx->wan_rx_ring_size;
+ sys->ep->wakelock_client =
+ IPA_WAKELOCK_REF_CLIENT_WAN_RX;
if (ipa_ctx->
ipa_client_apps_wan_cons_agg_gro) {
IPAERR("get close-by %u\n",
@@ -2980,9 +3000,12 @@ static int ipa_assign_policy(struct ipa_sys_connect_params *in,
if (sys->rx_pool_sz > IPA_WLAN_RX_POOL_SZ)
sys->rx_pool_sz = IPA_WLAN_RX_POOL_SZ;
sys->pyld_hdlr = NULL;
+ sys->repl_hdlr = ipa_replenish_wlan_rx_cache;
sys->get_skb = ipa_get_skb_ipa_rx;
sys->free_skb = ipa_free_skb_rx;
in->ipa_ep_cfg.aggr.aggr_en = IPA_BYPASS_AGGR;
+ sys->ep->wakelock_client =
+ IPA_WAKELOCK_REF_CLIENT_WLAN_RX;
} else if (IPA_CLIENT_IS_ODU_CONS(in->client)) {
IPADBG("assigning policy to client:%d",
in->client);
@@ -3007,6 +3030,8 @@ static int ipa_assign_policy(struct ipa_sys_connect_params *in,
sys->get_skb = ipa_get_skb_ipa_rx;
sys->free_skb = ipa_free_skb_rx;
sys->repl_hdlr = ipa_replenish_rx_cache;
+ sys->ep->wakelock_client =
+ IPA_WAKELOCK_REF_CLIENT_ODU_RX;
} else if (in->client ==
IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS) {
IPADBG("assigning policy to client:%d",
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
index 52e2410126f4..f4148a5e52f1 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -235,7 +235,9 @@
} while (0)
#define IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES 120
-#define IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN 100
+#define IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN 96
+#define IPA2_ACTIVE_CLIENTS_LOG_HASHTABLE_SIZE 50
+#define IPA2_ACTIVE_CLIENTS_LOG_NAME_LEN 40
extern const char *ipa2_clients_strings[];
@@ -254,11 +256,19 @@ struct ipa2_active_client_logging_info {
enum ipa2_active_client_log_type type;
};
+struct ipa2_active_client_htable_entry {
+ struct hlist_node list;
+ char id_string[IPA2_ACTIVE_CLIENTS_LOG_NAME_LEN];
+ int count;
+ enum ipa2_active_client_log_type type;
+};
+
struct ipa2_active_clients_log_ctx {
char *log_buffer[IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES];
int log_head;
int log_tail;
bool log_rdy;
+ struct hlist_head htable[IPA2_ACTIVE_CLIENTS_LOG_HASHTABLE_SIZE];
};
@@ -596,6 +606,16 @@ struct ipa_status_stats {
int curr;
};
+enum ipa_wakelock_ref_client {
+ IPA_WAKELOCK_REF_CLIENT_TX = 0,
+ IPA_WAKELOCK_REF_CLIENT_LAN_RX = 1,
+ IPA_WAKELOCK_REF_CLIENT_WAN_RX = 2,
+ IPA_WAKELOCK_REF_CLIENT_WLAN_RX = 3,
+ IPA_WAKELOCK_REF_CLIENT_ODU_RX = 4,
+ IPA_WAKELOCK_REF_CLIENT_SPS = 5,
+ IPA_WAKELOCK_REF_CLIENT_MAX
+};
+
/**
* struct ipa_ep_context - IPA end point context
* @valid: flag indicating id EP context is valid
@@ -655,6 +675,7 @@ struct ipa_ep_context {
u32 rx_replenish_threshold;
bool disconnect_in_progress;
u32 qmi_request_sent;
+ enum ipa_wakelock_ref_client wakelock_client;
/* sys MUST be the last element of this struct */
struct ipa_sys_context *sys;
@@ -905,7 +926,7 @@ struct ipa_active_clients {
struct ipa_wakelock_ref_cnt {
spinlock_t spinlock;
- int cnt;
+ u32 cnt;
};
struct ipa_tag_completion {
@@ -1753,6 +1774,10 @@ int ipa2_uc_wdi_get_dbpa(struct ipa_wdi_db_params *out);
* if uC not ready only, register callback
*/
int ipa2_uc_reg_rdyCB(struct ipa_wdi_uc_ready_params *param);
+/*
+ * To de-register uC ready callback
+ */
+int ipa2_uc_dereg_rdyCB(void);
/*
* Resource manager
@@ -1945,7 +1970,12 @@ void ipa2_inc_client_enable_clks(struct ipa2_active_client_logging_info *id);
int ipa2_inc_client_enable_clks_no_block(struct ipa2_active_client_logging_info
*id);
void ipa2_dec_client_disable_clks(struct ipa2_active_client_logging_info *id);
-void ipa2_active_clients_log_print_buffer(void);
+void ipa2_active_clients_log_dec(struct ipa2_active_client_logging_info *id,
+ bool int_ctx);
+void ipa2_active_clients_log_inc(struct ipa2_active_client_logging_info *id,
+ bool int_ctx);
+int ipa2_active_clients_log_print_buffer(char *buf, int size);
+int ipa2_active_clients_log_print_table(char *buf, int size);
void ipa2_active_clients_log_clear(void);
int ipa_interrupts_init(u32 ipa_irq, u32 ee, struct device *ipa_dev);
int __ipa_del_rt_rule(u32 rule_hdl);
@@ -2118,7 +2148,7 @@ void ipa_flow_control(enum ipa_client_type ipa_client, bool enable,
uint32_t qmap_id);
int ipa2_restore_suspend_handler(void);
void ipa_sps_irq_control_all(bool enable);
-void ipa_inc_acquire_wakelock(void);
-void ipa_dec_release_wakelock(void);
+void ipa_inc_acquire_wakelock(enum ipa_wakelock_ref_client ref_client);
+void ipa_dec_release_wakelock(enum ipa_wakelock_ref_client ref_client);
const char *ipa_rm_resource_str(enum ipa_rm_resource_name resource_name);
#endif /* _IPA_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_rm.c b/drivers/platform/msm/ipa/ipa_v2/ipa_rm.c
index 14930fd6f522..0425a6356f02 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_rm.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_rm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -20,6 +20,7 @@
static const char *resource_name_to_str[IPA_RM_RESOURCE_MAX] = {
__stringify(IPA_RM_RESOURCE_Q6_PROD),
__stringify(IPA_RM_RESOURCE_USB_PROD),
+ __stringify(IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD),
__stringify(IPA_RM_RESOURCE_HSIC_PROD),
__stringify(IPA_RM_RESOURCE_STD_ECM_PROD),
__stringify(IPA_RM_RESOURCE_RNDIS_PROD),
@@ -29,6 +30,7 @@ static const char *resource_name_to_str[IPA_RM_RESOURCE_MAX] = {
__stringify(IPA_RM_RESOURCE_MHI_PROD),
__stringify(IPA_RM_RESOURCE_Q6_CONS),
__stringify(IPA_RM_RESOURCE_USB_CONS),
+ __stringify(IPA_RM_RESOURCE_USB_DPL_CONS),
__stringify(IPA_RM_RESOURCE_HSIC_CONS),
__stringify(IPA_RM_RESOURCE_WLAN_CONS),
__stringify(IPA_RM_RESOURCE_APPS_CONS),
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_trace.h b/drivers/platform/msm/ipa/ipa_v2/ipa_trace.h
new file mode 100644
index 000000000000..d70abdfa0469
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_trace.h
@@ -0,0 +1,135 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ipa
+#define TRACE_INCLUDE_FILE ipa_trace
+
+#if !defined(_IPA_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _IPA_TRACE_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(
+ intr_to_poll,
+
+ TP_PROTO(unsigned long client),
+
+ TP_ARGS(client),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, client)
+ ),
+
+ TP_fast_assign(
+ __entry->client = client;
+ ),
+
+ TP_printk("client=%lu", __entry->client)
+);
+
+TRACE_EVENT(
+ poll_to_intr,
+
+ TP_PROTO(unsigned long client),
+
+ TP_ARGS(client),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, client)
+ ),
+
+ TP_fast_assign(
+ __entry->client = client;
+ ),
+
+ TP_printk("client=%lu", __entry->client)
+);
+
+TRACE_EVENT(
+ idle_sleep_enter,
+
+ TP_PROTO(unsigned long client),
+
+ TP_ARGS(client),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, client)
+ ),
+
+ TP_fast_assign(
+ __entry->client = client;
+ ),
+
+ TP_printk("client=%lu", __entry->client)
+);
+
+TRACE_EVENT(
+ idle_sleep_exit,
+
+ TP_PROTO(unsigned long client),
+
+ TP_ARGS(client),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, client)
+ ),
+
+ TP_fast_assign(
+ __entry->client = client;
+ ),
+
+ TP_printk("client=%lu", __entry->client)
+);
+
+TRACE_EVENT(
+ rmnet_ipa_netifni,
+
+ TP_PROTO(unsigned long rx_pkt_cnt),
+
+ TP_ARGS(rx_pkt_cnt),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, rx_pkt_cnt)
+ ),
+
+ TP_fast_assign(
+ __entry->rx_pkt_cnt = rx_pkt_cnt;
+ ),
+
+ TP_printk("rx_pkt_cnt=%lu", __entry->rx_pkt_cnt)
+);
+
+TRACE_EVENT(
+ rmnet_ipa_netifrx,
+
+ TP_PROTO(unsigned long rx_pkt_cnt),
+
+ TP_ARGS(rx_pkt_cnt),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, rx_pkt_cnt)
+ ),
+
+ TP_fast_assign(
+ __entry->rx_pkt_cnt = rx_pkt_cnt;
+ ),
+
+ TP_printk("rx_pkt_cnt=%lu", __entry->rx_pkt_cnt)
+);
+
+#endif /* _IPA_TRACE_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc.c b/drivers/platform/msm/ipa/ipa_v2/ipa_uc.c
index 43f50c8384bc..f66c978bfd53 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_uc.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -732,7 +732,7 @@ int ipa_uc_reset_pipe(enum ipa_client_type ipa_client)
IPA_CLIENT_IS_PROD(ipa_client) ? "CONS" : "PROD", ep_idx);
ret = ipa_uc_send_cmd(cmd.raw32b, IPA_CPU_2_HW_CMD_RESET_PIPE, 0,
- true, 10*HZ);
+ false, 10*HZ);
return ret;
}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c
index 698d50c62ee2..f7148adb78a0 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1450,6 +1450,20 @@ int ipa2_uc_reg_rdyCB(
return 0;
}
+/**
+ * ipa2_uc_dereg_rdyCB() - To de-register uC ready CB
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ */
+int ipa2_uc_dereg_rdyCB(void)
+{
+ ipa_ctx->uc_wdi_ctx.uc_ready_cb = NULL;
+ ipa_ctx->uc_wdi_ctx.priv = NULL;
+
+ return 0;
+}
+
/**
* ipa2_uc_wdi_get_dbpa() - To retrieve
@@ -1519,9 +1533,16 @@ static void ipa_uc_wdi_loaded_handler(void)
return;
}
- if (ipa_ctx->uc_wdi_ctx.uc_ready_cb)
+ if (ipa_ctx->uc_wdi_ctx.uc_ready_cb) {
ipa_ctx->uc_wdi_ctx.uc_ready_cb(
ipa_ctx->uc_wdi_ctx.priv);
+
+ ipa_ctx->uc_wdi_ctx.uc_ready_cb =
+ NULL;
+ ipa_ctx->uc_wdi_ctx.priv = NULL;
+ }
+
+ return;
}
int ipa2_create_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info)
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
index acbafaf9d986..560a91004b91 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -569,6 +569,7 @@ int ipa_suspend_resource_no_block(enum ipa_rm_resource_name resource)
struct ipa_ep_cfg_ctrl suspend;
int ipa_ep_idx;
unsigned long flags;
+ struct ipa2_active_client_logging_info log_info;
if (ipa_active_clients_trylock(&flags) == 0)
return -EPERM;
@@ -606,6 +607,9 @@ int ipa_suspend_resource_no_block(enum ipa_rm_resource_name resource)
}
if (res == 0) {
+ IPA2_ACTIVE_CLIENTS_PREP_RESOURCE(log_info,
+ ipa_rm_resource_str(resource));
+ ipa2_active_clients_log_dec(&log_info, true);
ipa_ctx->ipa_active_clients.cnt--;
IPADBG("active clients = %d\n",
ipa_ctx->ipa_active_clients.cnt);
@@ -5004,6 +5008,7 @@ int ipa2_bind_api_controller(enum ipa_hw_type ipa_hw_type,
api_ctrl->ipa_get_smem_restr_bytes = ipa2_get_smem_restr_bytes;
api_ctrl->ipa_uc_wdi_get_dbpa = ipa2_uc_wdi_get_dbpa;
api_ctrl->ipa_uc_reg_rdyCB = ipa2_uc_reg_rdyCB;
+ api_ctrl->ipa_uc_dereg_rdyCB = ipa2_uc_dereg_rdyCB;
api_ctrl->ipa_create_wdi_mapping = ipa2_create_wdi_mapping;
api_ctrl->ipa_release_wdi_mapping = ipa2_release_wdi_mapping;
api_ctrl->ipa_rm_create_resource = ipa2_rm_create_resource;
diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
index 0eea4fcdb26a..a601cf29c5c3 100644
--- a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -34,6 +34,8 @@
#include <linux/rmnet_ipa_fd_ioctl.h>
#include <linux/ipa.h>
+#include "ipa_trace.h"
+
#define WWAN_METADATA_SHFT 24
#define WWAN_METADATA_MASK 0xFF000000
#define WWAN_DATA_LEN 2000
@@ -88,6 +90,7 @@ enum wwan_device_status {
struct ipa_rmnet_plat_drv_res {
bool ipa_rmnet_ssr;
bool ipa_loaduC;
+ bool ipa_advertise_sg_support;
};
/**
@@ -1147,10 +1150,13 @@ static void apps_ipa_packet_receive_notify(void *priv,
skb->dev = ipa_netdevs[0];
skb->protocol = htons(ETH_P_MAP);
- if (dev->stats.rx_packets % IPA_WWAN_RX_SOFTIRQ_THRESH == 0)
+ if (dev->stats.rx_packets % IPA_WWAN_RX_SOFTIRQ_THRESH == 0) {
+ trace_rmnet_ipa_netifni(dev->stats.rx_packets);
result = netif_rx_ni(skb);
- else
+ } else {
+ trace_rmnet_ipa_netifrx(dev->stats.rx_packets);
result = netif_rx(skb);
+ }
if (result) {
pr_err_ratelimited(DEV_NAME " %s:%d fail on netif_rx\n",
@@ -1161,6 +1167,8 @@ static void apps_ipa_packet_receive_notify(void *priv,
dev->stats.rx_bytes += packet_len;
}
+static struct ipa_rmnet_plat_drv_res ipa_rmnet_res = {0, };
+
/**
* ipa_wwan_ioctl() - I/O control for wwan network driver.
*
@@ -1289,6 +1297,15 @@ static int ipa_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
sizeof(struct rmnet_ioctl_extended_s)))
rc = -EFAULT;
break;
+ /* GET SG support */
+ case RMNET_IOCTL_GET_SG_SUPPORT:
+ extend_ioctl_data.u.data =
+ ipa_rmnet_res.ipa_advertise_sg_support;
+ if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
+ &extend_ioctl_data,
+ sizeof(struct rmnet_ioctl_extended_s)))
+ rc = -EFAULT;
+ break;
/* Get endpoint ID */
case RMNET_IOCTL_GET_EPID:
IPAWANDBG("get ioctl: RMNET_IOCTL_GET_EPID\n");
@@ -1349,6 +1366,11 @@ static int ipa_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
rmnet_mux_val.mux_id);
return rc;
}
+ if (rmnet_index >= MAX_NUM_OF_MUX_CHANNEL) {
+ IPAWANERR("Exceed mux_channel limit(%d)\n",
+ rmnet_index);
+ return -EFAULT;
+ }
IPAWANDBG("ADD_MUX_CHANNEL(%d, name: %s)\n",
extend_ioctl_data.u.rmnet_mux_val.mux_id,
extend_ioctl_data.u.rmnet_mux_val.vchannel_name);
@@ -1391,7 +1413,8 @@ static int ipa_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
RMNET_IOCTL_EGRESS_FORMAT_CHECKSUM) {
apps_to_ipa_ep_cfg.ipa_ep_cfg.hdr.hdr_len = 8;
apps_to_ipa_ep_cfg.ipa_ep_cfg.cfg.
- cs_offload_en = 1;
+ cs_offload_en =
+ IPA_ENABLE_CS_OFFLOAD_UL;
apps_to_ipa_ep_cfg.ipa_ep_cfg.cfg.
cs_metadata_hdr_offset = 1;
} else {
@@ -1449,7 +1472,8 @@ static int ipa_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if ((extend_ioctl_data.u.data) &
RMNET_IOCTL_INGRESS_FORMAT_CHECKSUM)
ipa_to_apps_ep_cfg.ipa_ep_cfg.cfg.
- cs_offload_en = 2;
+ cs_offload_en =
+ IPA_ENABLE_CS_OFFLOAD_DL;
if ((extend_ioctl_data.u.data) &
RMNET_IOCTL_INGRESS_FORMAT_AGG_DATA) {
@@ -1823,8 +1847,6 @@ static struct notifier_block ssr_notifier = {
.notifier_call = ssr_notifier_cb,
};
-static struct ipa_rmnet_plat_drv_res ipa_rmnet_res = {0, };
-
static int get_ipa_rmnet_dts_configuration(struct platform_device *pdev,
struct ipa_rmnet_plat_drv_res *ipa_rmnet_drv_res)
{
@@ -1838,6 +1860,12 @@ static int get_ipa_rmnet_dts_configuration(struct platform_device *pdev,
"qcom,ipa-loaduC");
pr_info("IPA ipa-loaduC = %s\n",
ipa_rmnet_drv_res->ipa_loaduC ? "True" : "False");
+
+ ipa_rmnet_drv_res->ipa_advertise_sg_support =
+ of_property_read_bool(pdev->dev.of_node,
+ "qcom,ipa-advertise-sg-support");
+ pr_info("IPA SG support = %s\n",
+ ipa_rmnet_drv_res->ipa_advertise_sg_support ? "True" : "False");
return 0;
}
@@ -2078,8 +2106,6 @@ static int ipa_wwan_remove(struct platform_device *pdev)
ipa_del_mux_qmap_hdrs();
if (ipa_qmi_ctx && ipa_qmi_ctx->modem_cfg_emb_pipe_flt == false)
wwan_del_ul_flt_rule_to_ipa();
- /* clean up cached QMI msg/handlers */
- ipa_qmi_service_exit();
ipa_cleanup_deregister_intf();
atomic_set(&is_initialized, 0);
pr_info("rmnet_ipa completed deinitialization\n");
@@ -2207,6 +2233,9 @@ static int ssr_notifier_cb(struct notifier_block *this,
}
if (SUBSYS_BEFORE_POWERUP == code) {
pr_info("IPA received MPSS BEFORE_POWERUP\n");
+ if (atomic_read(&is_ssr))
+ /* clean up cached QMI msg/handlers */
+ ipa_qmi_service_exit();
ipa2_proxy_clk_vote();
pr_info("IPA BEFORE_POWERUP handling is complete\n");
return NOTIFY_DONE;
@@ -2469,20 +2498,20 @@ int rmnet_ipa_query_tethering_stats(struct wan_ioctl_query_tether_stats *data,
{
struct ipa_get_data_stats_req_msg_v01 *req;
struct ipa_get_data_stats_resp_msg_v01 *resp;
- int pipe_len, rc = -ENOMEM;
+ int pipe_len, rc;
req = kzalloc(sizeof(struct ipa_get_data_stats_req_msg_v01),
GFP_KERNEL);
if (!req) {
- IPAWANERR("Can't allocate memory for stats message\n");
- return rc;
+ IPAWANERR("failed to allocate memory for stats message\n");
+ return -ENOMEM;
}
resp = kzalloc(sizeof(struct ipa_get_data_stats_resp_msg_v01),
GFP_KERNEL);
if (!resp) {
- IPAWANERR("Can't allocate memory for stats message\n");
+ IPAWANERR("failed to allocate memory for stats message\n");
kfree(req);
- return rc;
+ return -ENOMEM;
}
memset(req, 0, sizeof(struct ipa_get_data_stats_req_msg_v01));
memset(resp, 0, sizeof(struct ipa_get_data_stats_resp_msg_v01));
diff --git a/drivers/platform/msm/ipa/ipa_v3/Makefile b/drivers/platform/msm/ipa/ipa_v3/Makefile
index ccea22ebc50f..6452b12f553a 100644
--- a/drivers/platform/msm/ipa/ipa_v3/Makefile
+++ b/drivers/platform/msm/ipa/ipa_v3/Makefile
@@ -1,7 +1,9 @@
+obj-$(CONFIG_IPA3) += ipahal/
+
obj-$(CONFIG_IPA3) += ipat.o
ipat-y := ipa.o ipa_debugfs.o ipa_hdr.o ipa_flt.o ipa_rt.o ipa_dp.o ipa_client.o \
ipa_utils.o ipa_nat.o ipa_intf.o teth_bridge.o ipa_interrupts.o odu_bridge.o \
ipa_rm.o ipa_rm_dependency_graph.o ipa_rm_peers_list.o ipa_rm_resource.o ipa_rm_inactivity_timer.o \
- ipa_uc.o ipa_uc_wdi.o ipa_dma.o ipa_uc_mhi.o ipa_mhi.o ipa_usb.o
+ ipa_uc.o ipa_uc_wdi.o ipa_dma.o ipa_uc_mhi.o ipa_mhi.o
obj-$(CONFIG_RMNET_IPA3) += rmnet_ipa.o ipa_qmi_service_v01.o ipa_qmi_service.o rmnet_ipa_fd_ioctl.o
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index d32b41114f32..ce2d8e642314 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -24,6 +24,7 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/rbtree.h>
+#include <linux/of_gpio.h>
#include <linux/uaccess.h>
#include <linux/interrupt.h>
#include <linux/msm-bus.h>
@@ -32,8 +33,19 @@
#include <linux/delay.h>
#include <linux/msm_gsi.h>
#include <linux/qcom_iommu.h>
+#include <linux/time.h>
+#include <linux/hashtable.h>
+#include <linux/hash.h>
#include "ipa_i.h"
#include "ipa_rm_i.h"
+#include "ipahal/ipahal.h"
+
+#define CREATE_TRACE_POINTS
+#include "ipa_trace.h"
+
+#define IPA_GPIO_IN_QUERY_CLK_IDX 0
+#define IPA_GPIO_OUT_CLK_RSP_CMPLT_IDX 0
+#define IPA_GPIO_OUT_CLK_VOTE_IDX 1
#define IPA_SUMMING_THRESHOLD (0x10)
#define IPA_PIPE_MEM_START_OFST (0x0)
@@ -53,6 +65,15 @@
#define IPA_TRANSPORT_PROD_TIMEOUT_MSEC 100
+#define IPA_IPC_LOG_PAGES 50
+
+#define IPA3_ACTIVE_CLIENTS_TABLE_BUF_SIZE 2048
+
+#define IPA3_ACTIVE_CLIENT_LOG_TYPE_EP 0
+#define IPA3_ACTIVE_CLIENT_LOG_TYPE_SIMPLE 1
+#define IPA3_ACTIVE_CLIENT_LOG_TYPE_RESOURCE 2
+#define IPA3_ACTIVE_CLIENT_LOG_TYPE_SPECIAL 3
+
/* The relative location in /lib/firmware where the FWs will reside */
#define IPA_FWS_PATH "ipa/ipa_fws.elf"
@@ -194,6 +215,236 @@ static bool smmu_present;
static bool arm_smmu;
static bool smmu_disable_htw;
+static char *active_clients_table_buf;
+
+const char *ipa3_clients_strings[IPA_CLIENT_MAX] = {
+ __stringify(IPA_CLIENT_HSIC1_PROD),
+ __stringify(IPA_CLIENT_WLAN1_PROD),
+ __stringify(IPA_CLIENT_USB2_PROD),
+ __stringify(IPA_CLIENT_HSIC3_PROD),
+ __stringify(IPA_CLIENT_HSIC2_PROD),
+ __stringify(IPA_CLIENT_USB3_PROD),
+ __stringify(IPA_CLIENT_HSIC4_PROD),
+ __stringify(IPA_CLIENT_USB4_PROD),
+ __stringify(IPA_CLIENT_HSIC5_PROD),
+ __stringify(IPA_CLIENT_USB_PROD),
+ __stringify(IPA_CLIENT_A5_WLAN_AMPDU_PROD),
+ __stringify(IPA_CLIENT_A2_EMBEDDED_PROD),
+ __stringify(IPA_CLIENT_A2_TETHERED_PROD),
+ __stringify(IPA_CLIENT_APPS_LAN_WAN_PROD),
+ __stringify(IPA_CLIENT_APPS_CMD_PROD),
+ __stringify(IPA_CLIENT_ODU_PROD),
+ __stringify(IPA_CLIENT_MHI_PROD),
+ __stringify(IPA_CLIENT_Q6_LAN_PROD),
+ __stringify(IPA_CLIENT_Q6_WAN_PROD),
+ __stringify(IPA_CLIENT_Q6_CMD_PROD),
+ __stringify(IPA_CLIENT_MEMCPY_DMA_SYNC_PROD),
+ __stringify(IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD),
+ __stringify(IPA_CLIENT_Q6_DECOMP_PROD),
+ __stringify(IPA_CLIENT_Q6_DECOMP2_PROD),
+ __stringify(IPA_CLIENT_UC_USB_PROD),
+
+ /* Below PROD client type is only for test purpose */
+ __stringify(IPA_CLIENT_TEST_PROD),
+ __stringify(IPA_CLIENT_TEST1_PROD),
+ __stringify(IPA_CLIENT_TEST2_PROD),
+ __stringify(IPA_CLIENT_TEST3_PROD),
+ __stringify(IPA_CLIENT_TEST4_PROD),
+
+ __stringify(IPA_CLIENT_HSIC1_CONS),
+ __stringify(IPA_CLIENT_WLAN1_CONS),
+ __stringify(IPA_CLIENT_HSIC2_CONS),
+ __stringify(IPA_CLIENT_USB2_CONS),
+ __stringify(IPA_CLIENT_WLAN2_CONS),
+ __stringify(IPA_CLIENT_HSIC3_CONS),
+ __stringify(IPA_CLIENT_USB3_CONS),
+ __stringify(IPA_CLIENT_WLAN3_CONS),
+ __stringify(IPA_CLIENT_HSIC4_CONS),
+ __stringify(IPA_CLIENT_USB4_CONS),
+ __stringify(IPA_CLIENT_WLAN4_CONS),
+ __stringify(IPA_CLIENT_HSIC5_CONS),
+ __stringify(IPA_CLIENT_USB_CONS),
+ __stringify(IPA_CLIENT_USB_DPL_CONS),
+ __stringify(IPA_CLIENT_A2_EMBEDDED_CONS),
+ __stringify(IPA_CLIENT_A2_TETHERED_CONS),
+ __stringify(IPA_CLIENT_A5_LAN_WAN_CONS),
+ __stringify(IPA_CLIENT_APPS_LAN_CONS),
+ __stringify(IPA_CLIENT_APPS_WAN_CONS),
+ __stringify(IPA_CLIENT_ODU_EMB_CONS),
+ __stringify(IPA_CLIENT_ODU_TETH_CONS),
+ __stringify(IPA_CLIENT_MHI_CONS),
+ __stringify(IPA_CLIENT_Q6_LAN_CONS),
+ __stringify(IPA_CLIENT_Q6_WAN_CONS),
+ __stringify(IPA_CLIENT_Q6_DUN_CONS),
+ __stringify(IPA_CLIENT_MEMCPY_DMA_SYNC_CONS),
+ __stringify(IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS),
+ __stringify(IPA_CLIENT_Q6_DECOMP_CONS),
+ __stringify(IPA_CLIENT_Q6_DECOMP2_CONS),
+ __stringify(IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS),
+ /* Below CONS client type is only for test purpose */
+ __stringify(IPA_CLIENT_TEST_CONS),
+ __stringify(IPA_CLIENT_TEST1_CONS),
+ __stringify(IPA_CLIENT_TEST2_CONS),
+ __stringify(IPA_CLIENT_TEST3_CONS),
+ __stringify(IPA_CLIENT_TEST4_CONS),
+};
+
+int ipa3_active_clients_log_print_buffer(char *buf, int size)
+{
+ int i;
+ int nbytes;
+ int cnt = 0;
+ int start_idx;
+ int end_idx;
+
+ start_idx = (ipa3_ctx->ipa3_active_clients_logging.log_tail + 1) %
+ IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES;
+ end_idx = ipa3_ctx->ipa3_active_clients_logging.log_head;
+ for (i = start_idx; i != end_idx;
+ i = (i + 1) % IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES) {
+ nbytes = scnprintf(buf + cnt, size - cnt, "%s\n",
+ ipa3_ctx->ipa3_active_clients_logging
+ .log_buffer[i]);
+ cnt += nbytes;
+ }
+
+ return cnt;
+}
+
+int ipa3_active_clients_log_print_table(char *buf, int size)
+{
+ int i;
+ struct ipa3_active_client_htable_entry *iterator;
+ int cnt = 0;
+
+ cnt = scnprintf(buf, size, "\n---- Active Clients Table ----\n");
+ hash_for_each(ipa3_ctx->ipa3_active_clients_logging.htable, i,
+ iterator, list) {
+ switch (iterator->type) {
+ case IPA3_ACTIVE_CLIENT_LOG_TYPE_EP:
+ cnt += scnprintf(buf + cnt, size - cnt,
+ "%-40s %-3d ENDPOINT\n",
+ iterator->id_string, iterator->count);
+ break;
+ case IPA3_ACTIVE_CLIENT_LOG_TYPE_SIMPLE:
+ cnt += scnprintf(buf + cnt, size - cnt,
+ "%-40s %-3d SIMPLE\n",
+ iterator->id_string, iterator->count);
+ break;
+ case IPA3_ACTIVE_CLIENT_LOG_TYPE_RESOURCE:
+ cnt += scnprintf(buf + cnt, size - cnt,
+ "%-40s %-3d RESOURCE\n",
+ iterator->id_string, iterator->count);
+ break;
+ case IPA3_ACTIVE_CLIENT_LOG_TYPE_SPECIAL:
+ cnt += scnprintf(buf + cnt, size - cnt,
+ "%-40s %-3d SPECIAL\n",
+ iterator->id_string, iterator->count);
+ break;
+ default:
+ IPAERR("Trying to print illegal active_clients type");
+ break;
+ }
+ }
+ cnt += scnprintf(buf + cnt, size - cnt,
+ "\nTotal active clients count: %d\n",
+ ipa3_ctx->ipa3_active_clients.cnt);
+
+ return cnt;
+}
+
+static int ipa3_active_clients_panic_notifier(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ ipa3_active_clients_lock();
+ ipa3_active_clients_log_print_table(active_clients_table_buf,
+ IPA3_ACTIVE_CLIENTS_TABLE_BUF_SIZE);
+ IPAERR("%s", active_clients_table_buf);
+ ipa3_active_clients_unlock();
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block ipa3_active_clients_panic_blk = {
+ .notifier_call = ipa3_active_clients_panic_notifier,
+};
+
+static int ipa3_active_clients_log_insert(const char *string)
+{
+ int head;
+ int tail;
+
+ if (!ipa3_ctx->ipa3_active_clients_logging.log_rdy)
+ return -EPERM;
+
+ head = ipa3_ctx->ipa3_active_clients_logging.log_head;
+ tail = ipa3_ctx->ipa3_active_clients_logging.log_tail;
+
+ memset(ipa3_ctx->ipa3_active_clients_logging.log_buffer[head], '_',
+ IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN);
+ strlcpy(ipa3_ctx->ipa3_active_clients_logging.log_buffer[head], string,
+ (size_t)IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN);
+ head = (head + 1) % IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES;
+ if (tail == head)
+ tail = (tail + 1) % IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES;
+
+ ipa3_ctx->ipa3_active_clients_logging.log_tail = tail;
+ ipa3_ctx->ipa3_active_clients_logging.log_head = head;
+
+ return 0;
+}
+
+static int ipa3_active_clients_log_init(void)
+{
+ int i;
+
+ ipa3_ctx->ipa3_active_clients_logging.log_buffer[0] = kzalloc(
+ IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES *
+ sizeof(char[IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN]),
+ GFP_KERNEL);
+ active_clients_table_buf = kzalloc(sizeof(
+ char[IPA3_ACTIVE_CLIENTS_TABLE_BUF_SIZE]), GFP_KERNEL);
+ if (ipa3_ctx->ipa3_active_clients_logging.log_buffer == NULL) {
+ pr_err("Active Clients Logging memory allocation failed");
+ goto bail;
+ }
+ for (i = 0; i < IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES; i++) {
+ ipa3_ctx->ipa3_active_clients_logging.log_buffer[i] =
+ ipa3_ctx->ipa3_active_clients_logging.log_buffer[0] +
+ (IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN * i);
+ }
+ ipa3_ctx->ipa3_active_clients_logging.log_head = 0;
+ ipa3_ctx->ipa3_active_clients_logging.log_tail =
+ IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1;
+ hash_init(ipa3_ctx->ipa3_active_clients_logging.htable);
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &ipa3_active_clients_panic_blk);
+ ipa3_ctx->ipa3_active_clients_logging.log_rdy = 1;
+
+ return 0;
+
+bail:
+ return -ENOMEM;
+}
+
+void ipa3_active_clients_log_clear(void)
+{
+ ipa3_active_clients_lock();
+ ipa3_ctx->ipa3_active_clients_logging.log_head = 0;
+ ipa3_ctx->ipa3_active_clients_logging.log_tail =
+ IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1;
+ ipa3_active_clients_unlock();
+}
+
+static void ipa3_active_clients_log_destroy(void)
+{
+ ipa3_ctx->ipa3_active_clients_logging.log_rdy = 0;
+ kfree(ipa3_ctx->ipa3_active_clients_logging.log_buffer[0]);
+ ipa3_ctx->ipa3_active_clients_logging.log_head = 0;
+ ipa3_ctx->ipa3_active_clients_logging.log_tail =
+ IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1;
+}
+
enum ipa_smmu_cb_type {
IPA_SMMU_CB_AP,
IPA_SMMU_CB_WLAN,
@@ -255,7 +506,7 @@ static int ipa3_open(struct inode *inode, struct file *filp)
{
struct ipa3_context *ctx = NULL;
- IPADBG("ENTER\n");
+ IPADBG_LOW("ENTER\n");
ctx = container_of(inode->i_cdev, struct ipa3_context, cdev);
filp->private_data = ctx;
@@ -392,7 +643,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (_IOC_NR(cmd) >= IPA_IOCTL_MAX)
return -ENOTTY;
- ipa3_inc_client_enable_clks();
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
switch (cmd) {
case IPA_IOC_ALLOC_NAT_MEM:
@@ -1157,12 +1408,11 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
break;
default: /* redundant, as cmd was checked against MAXNR */
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return -ENOTTY;
}
kfree(param);
-
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return retval;
}
@@ -1233,7 +1483,7 @@ static int ipa3_setup_exception_path(void)
{
struct ipa_ioc_add_hdr *hdr;
struct ipa_hdr_add *hdr_entry;
- struct ipa3_route route = { 0 };
+ struct ipahal_reg_route route = { 0 };
int ret;
/* install the basic exception header */
@@ -1286,7 +1536,8 @@ bail:
static int ipa3_init_smem_region(int memory_region_size,
int memory_region_offset)
{
- struct ipa3_hw_imm_cmd_dma_shared_mem cmd;
+ struct ipahal_imm_cmd_dma_shared_mem cmd;
+ struct ipahal_imm_cmd_pyld *cmd_pyld;
struct ipa3_desc desc;
struct ipa3_mem_buffer mem;
int rc;
@@ -1307,15 +1558,22 @@ static int ipa3_init_smem_region(int memory_region_size,
}
memset(mem.base, 0, mem.size);
- cmd.skip_pipeline_clear = 0;
- cmd.pipeline_clear_options = IPA_HPS_CLEAR;
+ cmd.is_read = false;
+ cmd.skip_pipeline_clear = false;
+ cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
cmd.size = mem.size;
cmd.system_addr = mem.phys_base;
cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
memory_region_offset;
- desc.opcode = IPA_DMA_SHARED_MEM;
- desc.pyld = &cmd;
- desc.len = sizeof(cmd);
+ cmd_pyld = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
+ if (!cmd_pyld) {
+ IPAERR("failed to construct dma_shared_mem imm cmd\n");
+ return -ENOMEM;
+ }
+ desc.opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+ desc.pyld = cmd_pyld->data;
+ desc.len = cmd_pyld->len;
desc.type = IPA_IMM_CMD_DESC;
rc = ipa3_send_cmd(1, &desc);
@@ -1324,6 +1582,7 @@ static int ipa3_init_smem_region(int memory_region_size,
rc = -EFAULT;
}
+ ipahal_destroy_imm_cmd(cmd_pyld);
dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base,
mem.phys_base);
@@ -1343,13 +1602,13 @@ int ipa3_init_q6_smem(void)
{
int rc;
- ipa3_inc_client_enable_clks();
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
rc = ipa3_init_smem_region(IPA_MEM_PART(modem_size),
IPA_MEM_PART(modem_ofst));
if (rc) {
IPAERR("failed to initialize Modem RAM memory\n");
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return rc;
}
@@ -1357,7 +1616,7 @@ int ipa3_init_q6_smem(void)
IPA_MEM_PART(modem_hdr_ofst));
if (rc) {
IPAERR("failed to initialize Modem HDRs RAM memory\n");
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return rc;
}
@@ -1365,7 +1624,7 @@ int ipa3_init_q6_smem(void)
IPA_MEM_PART(modem_hdr_proc_ctx_ofst));
if (rc) {
IPAERR("failed to initialize Modem proc ctx RAM memory\n");
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return rc;
}
@@ -1373,38 +1632,36 @@ int ipa3_init_q6_smem(void)
IPA_MEM_PART(modem_comp_decomp_ofst));
if (rc) {
IPAERR("failed to initialize Modem Comp/Decomp RAM memory\n");
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return rc;
}
-
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return rc;
}
-static void ipa3_free_buffer(void *user1, int user2)
+static void ipa3_destroy_imm(void *user1, int user2)
{
- kfree(user1);
+ ipahal_destroy_imm_cmd(user1);
}
static int ipa3_q6_pipe_delay(void)
{
- u32 reg_val = 0;
int client_idx;
int ep_idx;
+ struct ipa_ep_cfg_ctrl ep_ctrl;
+ memset(&ep_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
if (IPA_CLIENT_IS_Q6_PROD(client_idx)) {
ep_idx = ipa3_get_ep_mapping(client_idx);
if (ep_idx == -1)
continue;
- IPA_SETFIELD_IN_REG(reg_val, 1,
- IPA_ENDP_INIT_CTRL_N_ENDP_DELAY_SHFT,
- IPA_ENDP_INIT_CTRL_N_ENDP_DELAY_BMSK);
+ ep_ctrl.ipa_ep_delay = 1;
- ipa_write_reg(ipa3_ctx->mmio,
- IPA_ENDP_INIT_CTRL_N_OFST(ep_idx), reg_val);
+ ipahal_write_reg_n_fields(IPA_ENDP_INIT_CTRL_n,
+ ep_idx, &ep_ctrl);
}
}
@@ -1413,12 +1670,13 @@ static int ipa3_q6_pipe_delay(void)
static int ipa3_q6_avoid_holb(void)
{
- u32 reg_val;
int ep_idx;
int client_idx;
struct ipa_ep_cfg_ctrl avoid_holb;
+ struct ipa_ep_cfg_holb ep_holb;
memset(&avoid_holb, 0, sizeof(avoid_holb));
+ memset(&ep_holb, 0, sizeof(ep_holb));
avoid_holb.ipa_ep_suspend = true;
for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
@@ -1433,23 +1691,14 @@ static int ipa3_q6_avoid_holb(void)
* they are not valid, therefore, the above function
* will fail.
*/
- reg_val = 0;
- IPA_SETFIELD_IN_REG(reg_val, 0,
- IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_TIMER_SHFT,
- IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_TIMER_BMSK);
-
- ipa_write_reg(ipa3_ctx->mmio,
- IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_OFST_v3_0(ep_idx),
- reg_val);
-
- reg_val = 0;
- IPA_SETFIELD_IN_REG(reg_val, 1,
- IPA_ENDP_INIT_HOL_BLOCK_EN_N_EN_SHFT,
- IPA_ENDP_INIT_HOL_BLOCK_EN_N_EN_BMSK);
-
- ipa_write_reg(ipa3_ctx->mmio,
- IPA_ENDP_INIT_HOL_BLOCK_EN_N_OFST_v3_0(ep_idx),
- reg_val);
+ ep_holb.tmr_val = 0;
+ ep_holb.en = 1;
+ ipahal_write_reg_n_fields(
+ IPA_ENDP_INIT_HOL_BLOCK_TIMER_n,
+ ep_idx, &ep_holb);
+ ipahal_write_reg_n_fields(
+ IPA_ENDP_INIT_HOL_BLOCK_EN_n,
+ ep_idx, &ep_holb);
ipa3_cfg_ep_ctrl(ep_idx, &avoid_holb);
}
@@ -1481,7 +1730,8 @@ static u32 ipa3_get_max_flt_rt_cmds(u32 num_pipes)
static int ipa3_q6_clean_q6_tables(void)
{
struct ipa3_desc *desc;
- struct ipa3_hw_imm_cmd_dma_shared_mem *cmd = NULL;
+ struct ipahal_imm_cmd_dma_shared_mem cmd;
+ struct ipahal_imm_cmd_pyld **cmd_pyld;
int pipe_idx;
int num_cmds = 0;
int index;
@@ -1509,9 +1759,9 @@ static int ipa3_q6_clean_q6_tables(void)
goto bail_dma;
}
- cmd = kcalloc(max_cmds, sizeof(struct ipa3_hw_imm_cmd_dma_shared_mem),
+ cmd_pyld = kcalloc(max_cmds, sizeof(struct ipahal_imm_cmd_pyld *),
GFP_KERNEL);
- if (!cmd) {
+ if (!cmd_pyld) {
IPAERR("failed to allocate memory\n");
retval = -ENOMEM;
goto bail_desc;
@@ -1531,37 +1781,53 @@ static int ipa3_q6_clean_q6_tables(void)
* Need to point v4 and v6 hash fltr tables to an
* empty table
*/
- cmd[num_cmds].skip_pipeline_clear = 0;
- cmd[num_cmds].pipeline_clear_options =
- IPA_FULL_PIPELINE_CLEAR;
- cmd[num_cmds].size = mem.size;
- cmd[num_cmds].system_addr = mem.phys_base;
- cmd[num_cmds].local_addr =
+ cmd.is_read = false;
+ cmd.skip_pipeline_clear = 0;
+ cmd.pipeline_clear_options =
+ IPAHAL_FULL_PIPELINE_CLEAR;
+ cmd.size = mem.size;
+ cmd.system_addr = mem.phys_base;
+ cmd.local_addr =
ipa3_ctx->smem_restricted_bytes +
IPA_MEM_PART(v4_flt_hash_ofst) +
IPA_HW_TBL_HDR_WIDTH +
flt_idx * IPA_HW_TBL_HDR_WIDTH;
-
- desc[num_cmds].opcode = IPA_DMA_SHARED_MEM;
- desc[num_cmds].pyld = &cmd[num_cmds];
- desc[num_cmds].len = sizeof(*cmd);
+ cmd_pyld[num_cmds] = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
+ if (!cmd_pyld[num_cmds]) {
+ IPAERR("fail construct dma_shared_mem cmd\n");
+ retval = -ENOMEM;
+ goto bail_cmd;
+ }
+ desc[num_cmds].opcode = ipahal_imm_cmd_get_opcode(
+ IPA_IMM_CMD_DMA_SHARED_MEM);
+ desc[num_cmds].pyld = cmd_pyld[num_cmds]->data;
+ desc[num_cmds].len = cmd_pyld[num_cmds]->len;
desc[num_cmds].type = IPA_IMM_CMD_DESC;
num_cmds++;
- cmd[num_cmds].skip_pipeline_clear = 0;
- cmd[num_cmds].pipeline_clear_options =
- IPA_FULL_PIPELINE_CLEAR;
- cmd[num_cmds].size = mem.size;
- cmd[num_cmds].system_addr = mem.phys_base;
- cmd[num_cmds].local_addr =
+ cmd.is_read = false;
+ cmd.skip_pipeline_clear = false;
+ cmd.pipeline_clear_options =
+ IPAHAL_FULL_PIPELINE_CLEAR;
+ cmd.size = mem.size;
+ cmd.system_addr = mem.phys_base;
+ cmd.local_addr =
ipa3_ctx->smem_restricted_bytes +
IPA_MEM_PART(v6_flt_hash_ofst) +
IPA_HW_TBL_HDR_WIDTH +
flt_idx * IPA_HW_TBL_HDR_WIDTH;
-
- desc[num_cmds].opcode = IPA_DMA_SHARED_MEM;
- desc[num_cmds].pyld = &cmd[num_cmds];
- desc[num_cmds].len = sizeof(*cmd);
+ cmd_pyld[num_cmds] = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
+ if (!cmd_pyld[num_cmds]) {
+ IPAERR("fail construct dma_shared_mem cmd\n");
+ retval = -ENOMEM;
+ goto bail_cmd;
+ }
+ desc[num_cmds].opcode = ipahal_imm_cmd_get_opcode(
+ IPA_IMM_CMD_DMA_SHARED_MEM);
+ desc[num_cmds].pyld = cmd_pyld[num_cmds]->data;
+ desc[num_cmds].len = cmd_pyld[num_cmds]->len;
desc[num_cmds].type = IPA_IMM_CMD_DESC;
num_cmds++;
@@ -1569,37 +1835,53 @@ static int ipa3_q6_clean_q6_tables(void)
* Need to point v4 and v6 non-hash fltr tables to an
* empty table
*/
- cmd[num_cmds].skip_pipeline_clear = 0;
- cmd[num_cmds].pipeline_clear_options =
- IPA_FULL_PIPELINE_CLEAR;
- cmd[num_cmds].size = mem.size;
- cmd[num_cmds].system_addr = mem.phys_base;
- cmd[num_cmds].local_addr =
+ cmd.is_read = false;
+ cmd.skip_pipeline_clear = false;
+ cmd.pipeline_clear_options =
+ IPAHAL_FULL_PIPELINE_CLEAR;
+ cmd.size = mem.size;
+ cmd.system_addr = mem.phys_base;
+ cmd.local_addr =
ipa3_ctx->smem_restricted_bytes +
IPA_MEM_PART(v4_flt_nhash_ofst) +
IPA_HW_TBL_HDR_WIDTH +
flt_idx * IPA_HW_TBL_HDR_WIDTH;
-
- desc[num_cmds].opcode = IPA_DMA_SHARED_MEM;
- desc[num_cmds].pyld = &cmd[num_cmds];
- desc[num_cmds].len = sizeof(*cmd);
+ cmd_pyld[num_cmds] = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
+ if (!cmd_pyld[num_cmds]) {
+ IPAERR("fail construct dma_shared_mem cmd\n");
+ retval = -ENOMEM;
+ goto bail_cmd;
+ }
+ desc[num_cmds].opcode = ipahal_imm_cmd_get_opcode(
+ IPA_IMM_CMD_DMA_SHARED_MEM);
+ desc[num_cmds].pyld = cmd_pyld[num_cmds]->data;
+ desc[num_cmds].len = cmd_pyld[num_cmds]->len;
desc[num_cmds].type = IPA_IMM_CMD_DESC;
num_cmds++;
- cmd[num_cmds].skip_pipeline_clear = 0;
- cmd[num_cmds].pipeline_clear_options =
- IPA_FULL_PIPELINE_CLEAR;
- cmd[num_cmds].size = mem.size;
- cmd[num_cmds].system_addr = mem.phys_base;
- cmd[num_cmds].local_addr =
+ cmd.is_read = false;
+ cmd.skip_pipeline_clear = 0;
+ cmd.pipeline_clear_options =
+ IPAHAL_FULL_PIPELINE_CLEAR;
+ cmd.size = mem.size;
+ cmd.system_addr = mem.phys_base;
+ cmd.local_addr =
ipa3_ctx->smem_restricted_bytes +
IPA_MEM_PART(v6_flt_nhash_ofst) +
IPA_HW_TBL_HDR_WIDTH +
flt_idx * IPA_HW_TBL_HDR_WIDTH;
-
- desc[num_cmds].opcode = IPA_DMA_SHARED_MEM;
- desc[num_cmds].pyld = &cmd[num_cmds];
- desc[num_cmds].len = sizeof(*cmd);
+ cmd_pyld[num_cmds] = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
+ if (!cmd_pyld[num_cmds]) {
+ IPAERR("fail construct dma_shared_mem cmd\n");
+ retval = -ENOMEM;
+ goto bail_cmd;
+ }
+ desc[num_cmds].opcode = ipahal_imm_cmd_get_opcode(
+ IPA_IMM_CMD_DMA_SHARED_MEM);
+ desc[num_cmds].pyld = cmd_pyld[num_cmds]->data;
+ desc[num_cmds].len = cmd_pyld[num_cmds]->len;
desc[num_cmds].type = IPA_IMM_CMD_DESC;
num_cmds++;
}
@@ -1611,33 +1893,47 @@ static int ipa3_q6_clean_q6_tables(void)
for (index = IPA_MEM_PART(v4_modem_rt_index_lo);
index <= IPA_MEM_PART(v4_modem_rt_index_hi);
index++) {
- cmd[num_cmds].skip_pipeline_clear = 0;
- cmd[num_cmds].pipeline_clear_options =
- IPA_FULL_PIPELINE_CLEAR;
- cmd[num_cmds].size = mem.size;
- cmd[num_cmds].system_addr = mem.phys_base;
- cmd[num_cmds].local_addr = ipa3_ctx->smem_restricted_bytes +
+ cmd.is_read = false;
+ cmd.skip_pipeline_clear = false;
+ cmd.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
+ cmd.size = mem.size;
+ cmd.system_addr = mem.phys_base;
+ cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
IPA_MEM_PART(v4_rt_hash_ofst) +
index * IPA_HW_TBL_HDR_WIDTH;
-
- desc[num_cmds].opcode = IPA_DMA_SHARED_MEM;
- desc[num_cmds].pyld = &cmd[num_cmds];
- desc[num_cmds].len = sizeof(*cmd);
+ cmd_pyld[num_cmds] = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
+ if (!cmd_pyld[num_cmds]) {
+ IPAERR("failed to construct dma_shared_mem imm cmd\n");
+ retval = -ENOMEM;
+ goto bail_cmd;
+ }
+ desc[num_cmds].opcode =
+ ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+ desc[num_cmds].pyld = cmd_pyld[num_cmds]->data;
+ desc[num_cmds].len = cmd_pyld[num_cmds]->len;
desc[num_cmds].type = IPA_IMM_CMD_DESC;
num_cmds++;
- cmd[num_cmds].skip_pipeline_clear = 0;
- cmd[num_cmds].pipeline_clear_options =
- IPA_FULL_PIPELINE_CLEAR;
- cmd[num_cmds].size = mem.size;
- cmd[num_cmds].system_addr = mem.phys_base;
- cmd[num_cmds].local_addr = ipa3_ctx->smem_restricted_bytes +
+ cmd.is_read = false;
+ cmd.skip_pipeline_clear = false;
+ cmd.pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
+ cmd.size = mem.size;
+ cmd.system_addr = mem.phys_base;
+ cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
IPA_MEM_PART(v4_rt_nhash_ofst) +
index * IPA_HW_TBL_HDR_WIDTH;
-
- desc[num_cmds].opcode = IPA_DMA_SHARED_MEM;
- desc[num_cmds].pyld = &cmd[num_cmds];
- desc[num_cmds].len = sizeof(*cmd);
+ cmd_pyld[num_cmds] = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
+ if (!cmd_pyld[num_cmds]) {
+ IPAERR("failed to construct dma_shared_mem imm cmd\n");
+ retval = -ENOMEM;
+ goto bail_cmd;
+ }
+ desc[num_cmds].opcode =
+ ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+ desc[num_cmds].pyld = cmd_pyld[num_cmds]->data;
+ desc[num_cmds].len = cmd_pyld[num_cmds]->len;
desc[num_cmds].type = IPA_IMM_CMD_DESC;
num_cmds++;
}
@@ -1645,74 +1941,83 @@ static int ipa3_q6_clean_q6_tables(void)
for (index = IPA_MEM_PART(v6_modem_rt_index_lo);
index <= IPA_MEM_PART(v6_modem_rt_index_hi);
index++) {
- cmd[num_cmds].skip_pipeline_clear = 0;
- cmd[num_cmds].pipeline_clear_options =
- IPA_FULL_PIPELINE_CLEAR;
- cmd[num_cmds].size = mem.size;
- cmd[num_cmds].system_addr = mem.phys_base;
- cmd[num_cmds].local_addr = ipa3_ctx->smem_restricted_bytes +
+ cmd.is_read = false;
+ cmd.skip_pipeline_clear = false;
+ cmd.pipeline_clear_options =
+ IPAHAL_FULL_PIPELINE_CLEAR;
+ cmd.size = mem.size;
+ cmd.system_addr = mem.phys_base;
+ cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
IPA_MEM_PART(v6_rt_hash_ofst) +
index * IPA_HW_TBL_HDR_WIDTH;
-
- desc[num_cmds].opcode = IPA_DMA_SHARED_MEM;
- desc[num_cmds].pyld = &cmd[num_cmds];
- desc[num_cmds].len = sizeof(*cmd);
+ cmd_pyld[num_cmds] = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
+ if (!cmd_pyld[num_cmds]) {
+ IPAERR("failed to construct dma_shared_mem imm cmd\n");
+ retval = -ENOMEM;
+ goto bail_cmd;
+ }
+ desc[num_cmds].opcode =
+ ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+ desc[num_cmds].pyld = cmd_pyld[num_cmds]->data;
+ desc[num_cmds].len = cmd_pyld[num_cmds]->len;
desc[num_cmds].type = IPA_IMM_CMD_DESC;
num_cmds++;
- cmd[num_cmds].skip_pipeline_clear = 0;
- cmd[num_cmds].pipeline_clear_options =
- IPA_FULL_PIPELINE_CLEAR;
- cmd[num_cmds].size = mem.size;
- cmd[num_cmds].system_addr = mem.phys_base;
- cmd[num_cmds].local_addr = ipa3_ctx->smem_restricted_bytes +
+ cmd.is_read = false;
+ cmd.skip_pipeline_clear = false;
+ cmd.pipeline_clear_options =
+ IPAHAL_FULL_PIPELINE_CLEAR;
+ cmd.size = mem.size;
+ cmd.system_addr = mem.phys_base;
+ cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
IPA_MEM_PART(v6_rt_nhash_ofst) +
index * IPA_HW_TBL_HDR_WIDTH;
-
- desc[num_cmds].opcode = IPA_DMA_SHARED_MEM;
- desc[num_cmds].pyld = &cmd[num_cmds];
- desc[num_cmds].len = sizeof(*cmd);
+ cmd_pyld[num_cmds] = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
+ if (!cmd_pyld[num_cmds]) {
+ IPAERR("failed to construct dma_shared_mem imm cmd\n");
+ retval = -ENOMEM;
+ goto bail_cmd;
+ }
+ desc[num_cmds].opcode =
+ ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+ desc[num_cmds].pyld = cmd_pyld[num_cmds]->data;
+ desc[num_cmds].len = cmd_pyld[num_cmds]->len;
desc[num_cmds].type = IPA_IMM_CMD_DESC;
num_cmds++;
}
retval = ipa3_send_cmd(num_cmds, desc);
if (retval) {
- IPAERR("failed to send immediate command (error %d)\n", retval);
+ IPAERR("failed to send immediate command (err %d)\n", retval);
retval = -EFAULT;
}
- kfree(cmd);
-
+bail_cmd:
+ for (index = 0; index < num_cmds; index++)
+ ipahal_destroy_imm_cmd(cmd_pyld[index]);
+ kfree(cmd_pyld);
bail_desc:
kfree(desc);
-
bail_dma:
dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
return retval;
}
-static void ipa3_q6_disable_agg_reg(struct ipa3_register_write *reg_write,
- int ep_idx)
+static void ipa3_q6_disable_agg_reg(
+ struct ipahal_imm_cmd_register_write *reg_write, int ep_idx)
{
- reg_write->skip_pipeline_clear = 0;
- reg_write->pipeline_clear_options = IPA_FULL_PIPELINE_CLEAR;
-
- reg_write->offset = IPA_ENDP_INIT_AGGR_N_OFST_v3_0(ep_idx);
- reg_write->value =
- (1 & IPA_ENDP_INIT_AGGR_N_AGGR_FORCE_CLOSE_BMSK) <<
- IPA_ENDP_INIT_AGGR_N_AGGR_FORCE_CLOSE_SHFT;
- reg_write->value_mask =
- IPA_ENDP_INIT_AGGR_N_AGGR_FORCE_CLOSE_BMSK <<
- IPA_ENDP_INIT_AGGR_N_AGGR_FORCE_CLOSE_SHFT;
-
- reg_write->value |=
- ((0 & IPA_ENDP_INIT_AGGR_N_AGGR_EN_BMSK) <<
- IPA_ENDP_INIT_AGGR_N_AGGR_EN_SHFT);
- reg_write->value_mask |=
- ((IPA_ENDP_INIT_AGGR_N_AGGR_EN_BMSK <<
- IPA_ENDP_INIT_AGGR_N_AGGR_EN_SHFT));
+ struct ipahal_reg_valmask valmask;
+
+ reg_write->skip_pipeline_clear = false;
+ reg_write->pipeline_clear_options = IPAHAL_FULL_PIPELINE_CLEAR;
+ reg_write->offset =
+ ipahal_get_reg_n_ofst(IPA_ENDP_INIT_AGGR_n, ep_idx);
+ ipahal_get_disable_aggr_valmask(&valmask);
+ reg_write->value = valmask.val;
+ reg_write->value_mask = valmask.mask;
}
static int ipa3_q6_set_ex_path_dis_agg(void)
@@ -1722,8 +2027,10 @@ static int ipa3_q6_set_ex_path_dis_agg(void)
struct ipa3_desc *desc;
int num_descs = 0;
int index;
- struct ipa3_register_write *reg_write;
+ struct ipahal_imm_cmd_register_write reg_write;
+ struct ipahal_imm_cmd_pyld *cmd_pyld;
int retval;
+ struct ipahal_reg_valmask valmask;
desc = kcalloc(ipa3_ctx->ipa_num_pipes, sizeof(struct ipa3_desc),
GFP_KERNEL);
@@ -1741,30 +2048,31 @@ static int ipa3_q6_set_ex_path_dis_agg(void)
if (ipa3_ctx->ep[ep_idx].valid &&
ipa3_ctx->ep[ep_idx].skip_ep_cfg) {
BUG_ON(num_descs >= ipa3_ctx->ipa_num_pipes);
- reg_write = kzalloc(sizeof(*reg_write), GFP_KERNEL);
- if (!reg_write) {
- IPAERR("failed to allocate memory\n");
+ reg_write.skip_pipeline_clear = false;
+ reg_write.pipeline_clear_options =
+ IPAHAL_FULL_PIPELINE_CLEAR;
+ reg_write.offset =
+ ipahal_get_reg_ofst(IPA_ENDP_STATUS_n);
+ ipahal_get_status_ep_valmask(
+ ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS),
+ &valmask);
+ reg_write.value = valmask.val;
+ reg_write.value_mask = valmask.mask;
+ cmd_pyld = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_REGISTER_WRITE, &reg_write, false);
+ if (!cmd_pyld) {
+ IPAERR("fail construct register_write cmd\n");
BUG();
}
- reg_write->skip_pipeline_clear = 0;
- reg_write->pipeline_clear_options =
- IPA_FULL_PIPELINE_CLEAR;
- reg_write->offset = IPA_ENDP_STATUS_n_OFST(ep_idx);
- reg_write->value =
- (ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS) &
- IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK) <<
- IPA_ENDP_STATUS_n_STATUS_ENDP_SHFT;
- reg_write->value_mask =
- IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK <<
- IPA_ENDP_STATUS_n_STATUS_ENDP_SHFT;
-
- desc[num_descs].opcode = IPA_REGISTER_WRITE;
- desc[num_descs].pyld = reg_write;
- desc[num_descs].len = sizeof(*reg_write);
+
+ desc[num_descs].opcode = ipahal_imm_cmd_get_opcode(
+ IPA_IMM_CMD_REGISTER_WRITE);
desc[num_descs].type = IPA_IMM_CMD_DESC;
- desc[num_descs].callback = ipa3_free_buffer;
- desc[num_descs].user1 = reg_write;
+ desc[num_descs].callback = ipa3_destroy_imm;
+ desc[num_descs].user1 = cmd_pyld;
+ desc[num_descs].pyld = cmd_pyld->data;
+ desc[num_descs].len = cmd_pyld->len;
num_descs++;
}
}
@@ -1772,35 +2080,38 @@ static int ipa3_q6_set_ex_path_dis_agg(void)
/* Disable AGGR on IPA->Q6 pipes */
for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
if (IPA_CLIENT_IS_Q6_CONS(client_idx)) {
- reg_write = kzalloc(sizeof(*reg_write), GFP_KERNEL);
- if (!reg_write) {
- IPAERR("failed to allocate memory\n");
+ ipa3_q6_disable_agg_reg(&reg_write,
+ ipa3_get_ep_mapping(client_idx));
+ cmd_pyld = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_REGISTER_WRITE, &reg_write, false);
+ if (!cmd_pyld) {
+ IPAERR("fail construct register_write cmd\n");
BUG();
}
- ipa3_q6_disable_agg_reg(reg_write,
- ipa3_get_ep_mapping(client_idx));
-
- desc[num_descs].opcode = IPA_REGISTER_WRITE;
- desc[num_descs].pyld = reg_write;
- desc[num_descs].len = sizeof(*reg_write);
+ desc[num_descs].opcode = ipahal_imm_cmd_get_opcode(
+ IPA_IMM_CMD_REGISTER_WRITE);
desc[num_descs].type = IPA_IMM_CMD_DESC;
- desc[num_descs].callback = ipa3_free_buffer;
- desc[num_descs].user1 = reg_write;
+ desc[num_descs].callback = ipa3_destroy_imm;
+ desc[num_descs].user1 = cmd_pyld;
+ desc[num_descs].pyld = cmd_pyld->data;
+ desc[num_descs].len = cmd_pyld->len;
num_descs++;
}
}
/* Will wait 150msecs for IPA tag process completion */
retval = ipa3_tag_process(desc, num_descs,
- msecs_to_jiffies(CLEANUP_TAG_PROCESS_TIMEOUT));
+ msecs_to_jiffies(CLEANUP_TAG_PROCESS_TIMEOUT));
if (retval) {
IPAERR("TAG process failed! (error %d)\n", retval);
- /* For timeout error ipa3_free_buffer cb will free user1 */
+ /* For timeout error ipa3_destroy_imm cb will destroy user1 */
if (retval != -ETIME) {
for (index = 0; index < num_descs; index++)
- kfree(desc[index].user1);
+ if (desc[index].callback)
+ desc[index].callback(desc[index].user1,
+ desc[index].user2);
retval = -EINVAL;
}
}
@@ -1827,7 +2138,7 @@ int ipa3_q6_cleanup(void)
if (ipa3_ctx->uc_ctx.uc_zip_error)
BUG();
- ipa3_inc_client_enable_clks();
+ IPA_ACTIVE_CLIENTS_INC_SPECIAL("Q6");
if (ipa3_q6_pipe_delay()) {
IPAERR("Failed to delay Q6 pipes\n");
@@ -1898,7 +2209,7 @@ int _ipa_init_sram_v3_0(void)
phys_addr = ipa3_ctx->ipa_wrapper_base +
ipa3_ctx->ctrl->ipa_reg_base_ofst +
- IPA_SRAM_DIRECT_ACCESS_N_OFST_v3_0(
+ ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n,
ipa3_ctx->smem_restricted_bytes / 4);
ipa_sram_mmio = ioremap(phys_addr, ipa3_ctx->smem_sz);
@@ -1950,8 +2261,9 @@ int _ipa_init_hdr_v3_0(void)
{
struct ipa3_desc desc = { 0 };
struct ipa3_mem_buffer mem;
- struct ipa3_hdr_init_local cmd = { 0 };
- struct ipa3_hw_imm_cmd_dma_shared_mem dma_cmd = { 0 };
+ struct ipahal_imm_cmd_hdr_init_local cmd = {0};
+ struct ipahal_imm_cmd_pyld *cmd_pyld;
+ struct ipahal_imm_cmd_dma_shared_mem dma_cmd = { 0 };
mem.size = IPA_MEM_PART(modem_hdr_size) + IPA_MEM_PART(apps_hdr_size);
mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size, &mem.phys_base,
@@ -1962,25 +2274,35 @@ int _ipa_init_hdr_v3_0(void)
}
memset(mem.base, 0, mem.size);
- cmd.hdr_table_src_addr = mem.phys_base;
+ cmd.hdr_table_addr = mem.phys_base;
cmd.size_hdr_table = mem.size;
- cmd.hdr_table_dst_addr = ipa3_ctx->smem_restricted_bytes +
+ cmd.hdr_addr = ipa3_ctx->smem_restricted_bytes +
IPA_MEM_PART(modem_hdr_ofst);
-
- desc.opcode = IPA_HDR_INIT_LOCAL;
- desc.pyld = &cmd;
- desc.len = sizeof(struct ipa3_hdr_init_local);
+ cmd_pyld = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_HDR_INIT_LOCAL, &cmd, false);
+ if (!cmd_pyld) {
+ IPAERR("fail to construct hdr_init_local imm cmd\n");
+ dma_free_coherent(ipa3_ctx->pdev,
+ mem.size, mem.base,
+ mem.phys_base);
+ return -EFAULT;
+ }
+ desc.opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_HDR_INIT_LOCAL);
desc.type = IPA_IMM_CMD_DESC;
+ desc.pyld = cmd_pyld->data;
+ desc.len = cmd_pyld->len;
IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
if (ipa3_send_cmd(1, &desc)) {
IPAERR("fail to send immediate command\n");
+ ipahal_destroy_imm_cmd(cmd_pyld);
dma_free_coherent(ipa3_ctx->pdev,
mem.size, mem.base,
mem.phys_base);
return -EFAULT;
}
+ ipahal_destroy_imm_cmd(cmd_pyld);
dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
mem.size = IPA_MEM_PART(modem_hdr_proc_ctx_size) +
@@ -1994,30 +2316,40 @@ int _ipa_init_hdr_v3_0(void)
memset(mem.base, 0, mem.size);
memset(&desc, 0, sizeof(desc));
- dma_cmd.skip_pipeline_clear = 0;
- dma_cmd.pipeline_clear_options = IPA_FULL_PIPELINE_CLEAR;
+ dma_cmd.is_read = false;
+ dma_cmd.skip_pipeline_clear = false;
+ dma_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
dma_cmd.system_addr = mem.phys_base;
dma_cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
IPA_MEM_PART(modem_hdr_proc_ctx_ofst);
dma_cmd.size = mem.size;
- desc.opcode = IPA_DMA_SHARED_MEM;
- desc.pyld = &dma_cmd;
- desc.len = sizeof(struct ipa3_hw_imm_cmd_dma_shared_mem);
+ cmd_pyld = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_DMA_SHARED_MEM, &dma_cmd, false);
+ if (!cmd_pyld) {
+ IPAERR("fail to construct dma_shared_mem imm\n");
+ dma_free_coherent(ipa3_ctx->pdev,
+ mem.size, mem.base,
+ mem.phys_base);
+ return -EFAULT;
+ }
+ desc.opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+ desc.pyld = cmd_pyld->data;
+ desc.len = cmd_pyld->len;
desc.type = IPA_IMM_CMD_DESC;
IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
if (ipa3_send_cmd(1, &desc)) {
IPAERR("fail to send immediate command\n");
+ ipahal_destroy_imm_cmd(cmd_pyld);
dma_free_coherent(ipa3_ctx->pdev,
mem.size,
mem.base,
mem.phys_base);
return -EFAULT;
}
+ ipahal_destroy_imm_cmd(cmd_pyld);
- ipa_write_reg(ipa3_ctx->mmio,
- IPA_LOCAL_PKT_PROC_CNTXT_BASE_OFST,
- dma_cmd.local_addr);
+ ipahal_write_reg(IPA_LOCAL_PKT_PROC_CNTXT_BASE, dma_cmd.local_addr);
dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
@@ -2033,7 +2365,8 @@ int _ipa_init_rt4_v3(void)
{
struct ipa3_desc desc = { 0 };
struct ipa3_mem_buffer mem;
- struct ipa3_ip_v4_routing_init v4_cmd;
+ struct ipahal_imm_cmd_ip_v4_routing_init v4_cmd;
+ struct ipahal_imm_cmd_pyld *cmd_pyld;
u64 *entry;
int i;
int rc = 0;
@@ -2058,7 +2391,6 @@ int _ipa_init_rt4_v3(void)
entry++;
}
- desc.opcode = IPA_IP_V4_ROUTING_INIT;
v4_cmd.hash_rules_addr = mem.phys_base;
v4_cmd.hash_rules_size = mem.size;
v4_cmd.hash_local_addr = ipa3_ctx->smem_restricted_bytes +
@@ -2071,10 +2403,19 @@ int _ipa_init_rt4_v3(void)
v4_cmd.hash_local_addr);
IPADBG("putting non-hashable routing IPv4 rules to phys 0x%x\n",
v4_cmd.nhash_local_addr);
+ cmd_pyld = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_IP_V4_ROUTING_INIT, &v4_cmd, false);
+ if (!cmd_pyld) {
+ IPAERR("fail construct ip_v4_rt_init imm cmd\n");
+ rc = -EPERM;
+ goto free_mem;
+ }
- desc.pyld = &v4_cmd;
- desc.len = sizeof(struct ipa3_ip_v4_routing_init);
+ desc.opcode =
+ ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_V4_ROUTING_INIT);
desc.type = IPA_IMM_CMD_DESC;
+ desc.pyld = cmd_pyld->data;
+ desc.len = cmd_pyld->len;
IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
if (ipa3_send_cmd(1, &desc)) {
@@ -2082,6 +2423,9 @@ int _ipa_init_rt4_v3(void)
rc = -EFAULT;
}
+ ipahal_destroy_imm_cmd(cmd_pyld);
+
+free_mem:
dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
return rc;
}
@@ -2095,7 +2439,8 @@ int _ipa_init_rt6_v3(void)
{
struct ipa3_desc desc = { 0 };
struct ipa3_mem_buffer mem;
- struct ipa3_ip_v6_routing_init v6_cmd;
+ struct ipahal_imm_cmd_ip_v6_routing_init v6_cmd;
+ struct ipahal_imm_cmd_pyld *cmd_pyld;
u64 *entry;
int i;
int rc = 0;
@@ -2120,7 +2465,6 @@ int _ipa_init_rt6_v3(void)
entry++;
}
- desc.opcode = IPA_IP_V6_ROUTING_INIT;
v6_cmd.hash_rules_addr = mem.phys_base;
v6_cmd.hash_rules_size = mem.size;
v6_cmd.hash_local_addr = ipa3_ctx->smem_restricted_bytes +
@@ -2133,10 +2477,19 @@ int _ipa_init_rt6_v3(void)
v6_cmd.hash_local_addr);
IPADBG("putting non-hashable routing IPv6 rules to phys 0x%x\n",
v6_cmd.nhash_local_addr);
+ cmd_pyld = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_IP_V6_ROUTING_INIT, &v6_cmd, false);
+ if (!cmd_pyld) {
+ IPAERR("fail construct ip_v6_rt_init imm cmd\n");
+ rc = -EPERM;
+ goto free_mem;
+ }
- desc.pyld = &v6_cmd;
- desc.len = sizeof(struct ipa3_ip_v6_routing_init);
+ desc.opcode =
+ ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_V6_ROUTING_INIT);
desc.type = IPA_IMM_CMD_DESC;
+ desc.pyld = cmd_pyld->data;
+ desc.len = cmd_pyld->len;
IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
if (ipa3_send_cmd(1, &desc)) {
@@ -2144,6 +2497,9 @@ int _ipa_init_rt6_v3(void)
rc = -EFAULT;
}
+ ipahal_destroy_imm_cmd(cmd_pyld);
+
+free_mem:
dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
return rc;
}
@@ -2157,7 +2513,8 @@ int _ipa_init_flt4_v3(void)
{
struct ipa3_desc desc = { 0 };
struct ipa3_mem_buffer mem;
- struct ipa3_ip_v4_filter_init v4_cmd;
+ struct ipahal_imm_cmd_ip_v4_filter_init v4_cmd;
+ struct ipahal_imm_cmd_pyld *cmd_pyld;
u64 *entry;
int i;
int rc = 0;
@@ -2202,7 +2559,6 @@ int _ipa_init_flt4_v3(void)
entry++;
}
- desc.opcode = IPA_IP_V4_FILTER_INIT;
v4_cmd.hash_rules_addr = mem.phys_base;
v4_cmd.hash_rules_size = mem.size;
v4_cmd.hash_local_addr = ipa3_ctx->smem_restricted_bytes +
@@ -2215,10 +2571,18 @@ int _ipa_init_flt4_v3(void)
v4_cmd.hash_local_addr);
IPADBG("putting non-hashable filtering IPv4 rules to phys 0x%x\n",
v4_cmd.nhash_local_addr);
+ cmd_pyld = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_IP_V4_FILTER_INIT, &v4_cmd, false);
+ if (!cmd_pyld) {
+ IPAERR("fail construct ip_v4_flt_init imm cmd\n");
+ rc = -EPERM;
+ goto free_mem;
+ }
- desc.pyld = &v4_cmd;
- desc.len = sizeof(struct ipa3_ip_v4_filter_init);
+ desc.opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_V4_FILTER_INIT);
desc.type = IPA_IMM_CMD_DESC;
+ desc.pyld = cmd_pyld->data;
+ desc.len = cmd_pyld->len;
IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
if (ipa3_send_cmd(1, &desc)) {
@@ -2226,6 +2590,9 @@ int _ipa_init_flt4_v3(void)
rc = -EFAULT;
}
+ ipahal_destroy_imm_cmd(cmd_pyld);
+
+free_mem:
dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
return rc;
}
@@ -2239,7 +2606,8 @@ int _ipa_init_flt6_v3(void)
{
struct ipa3_desc desc = { 0 };
struct ipa3_mem_buffer mem;
- struct ipa3_ip_v6_filter_init v6_cmd;
+ struct ipahal_imm_cmd_ip_v6_filter_init v6_cmd;
+ struct ipahal_imm_cmd_pyld *cmd_pyld;
u64 *entry;
int i;
int rc = 0;
@@ -2284,7 +2652,6 @@ int _ipa_init_flt6_v3(void)
entry++;
}
- desc.opcode = IPA_IP_V6_FILTER_INIT;
v6_cmd.hash_rules_addr = mem.phys_base;
v6_cmd.hash_rules_size = mem.size;
v6_cmd.hash_local_addr = ipa3_ctx->smem_restricted_bytes +
@@ -2298,9 +2665,18 @@ int _ipa_init_flt6_v3(void)
IPADBG("putting non-hashable filtering IPv6 rules to phys 0x%x\n",
v6_cmd.nhash_local_addr);
- desc.pyld = &v6_cmd;
- desc.len = sizeof(struct ipa3_ip_v6_filter_init);
+ cmd_pyld = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_IP_V6_FILTER_INIT, &v6_cmd, false);
+ if (!cmd_pyld) {
+ IPAERR("fail construct ip_v6_flt_init imm cmd\n");
+ rc = -EPERM;
+ goto free_mem;
+ }
+
+ desc.opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_V6_FILTER_INIT);
desc.type = IPA_IMM_CMD_DESC;
+ desc.pyld = cmd_pyld->data;
+ desc.len = cmd_pyld->len;
IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
if (ipa3_send_cmd(1, &desc)) {
@@ -2308,6 +2684,9 @@ int _ipa_init_flt6_v3(void)
rc = -EFAULT;
}
+ ipahal_destroy_imm_cmd(cmd_pyld);
+
+free_mem:
dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
return rc;
}
@@ -2315,9 +2694,9 @@ int _ipa_init_flt6_v3(void)
static int ipa3_setup_flt_hash_tuple(void)
{
int pipe_idx;
- struct ipa3_hash_tuple tuple;
+ struct ipahal_reg_hash_tuple tuple;
- memset(&tuple, 0, sizeof(struct ipa3_hash_tuple));
+ memset(&tuple, 0, sizeof(struct ipahal_reg_hash_tuple));
for (pipe_idx = 0; pipe_idx < ipa3_ctx->ipa_num_pipes ; pipe_idx++) {
if (!ipa_is_ep_support_flt(pipe_idx))
@@ -2338,9 +2717,9 @@ static int ipa3_setup_flt_hash_tuple(void)
static int ipa3_setup_rt_hash_tuple(void)
{
int tbl_idx;
- struct ipa3_hash_tuple tuple;
+ struct ipahal_reg_hash_tuple tuple;
- memset(&tuple, 0, sizeof(struct ipa3_hash_tuple));
+ memset(&tuple, 0, sizeof(struct ipahal_reg_hash_tuple));
for (tbl_idx = 0;
tbl_idx < max(IPA_MEM_PART(v6_rt_num_index),
@@ -2495,7 +2874,7 @@ static void ipa3_teardown_apps_pipes(void)
}
#ifdef CONFIG_COMPAT
-long compat_ipa_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+long compat_ipa3_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
int retval = 0;
struct ipa3_ioc_nat_alloc_mem32 nat_mem32;
@@ -2640,7 +3019,7 @@ static const struct file_operations ipa3_drv_fops = {
.write = ipa3_write,
.unlocked_ioctl = ipa3_ioctl,
#ifdef CONFIG_COMPAT
- .compat_ioctl = compat_ipa_ioctl,
+ .compat_ioctl = compat_ipa3_ioctl,
#endif
};
@@ -2676,11 +3055,11 @@ static int ipa3_get_clks(struct device *dev)
*/
void _ipa_enable_clks_v3_0(void)
{
- IPADBG("enabling gcc_ipa_clk\n");
+ IPADBG_LOW("enabling gcc_ipa_clk\n");
if (ipa3_clk) {
clk_prepare(ipa3_clk);
clk_enable(ipa3_clk);
- IPADBG("curr_ipa_clk_rate=%d", ipa3_ctx->curr_ipa_clk_rate);
+ IPADBG_LOW("curr_ipa_clk_rate=%d", ipa3_ctx->curr_ipa_clk_rate);
clk_set_rate(ipa3_clk, ipa3_ctx->curr_ipa_clk_rate);
ipa3_uc_notify_clk_state(true);
} else {
@@ -2740,7 +3119,7 @@ void ipa3_enable_clks(void)
*/
void _ipa_disable_clks_v3_0(void)
{
- IPADBG("disabling gcc_ipa_clk\n");
+ IPADBG_LOW("disabling gcc_ipa_clk\n");
ipa3_suspend_apps_pipes(true);
ipa3_uc_notify_clk_state(false);
if (ipa3_clk)
@@ -2790,26 +3169,117 @@ static void ipa3_start_tag_process(struct work_struct *work)
res = ipa3_tag_aggr_force_close(-1);
if (res)
IPAERR("ipa3_tag_aggr_force_close failed %d\n", res);
-
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_SPECIAL("TAG_PROCESS");
IPADBG("TAG process done\n");
}
/**
+* ipa3_active_clients_log_mod() - Log a modification in the active clients
+* reference count
+*
+* This method logs any modification in the active clients reference count:
+* It logs the modification in the circular history buffer
+* It logs the modification in the hash table - looking for an entry,
+* creating one if needed and deleting one if needed.
+*
+* @id: ipa3_active client logging info struct to hold the log information
+* @inc: a boolean variable to indicate whether the modification is an increase
+* or decrease
+* @int_ctx: a boolean variable to indicate whether this call is being made from
+* an interrupt context and therefore should allocate GFP_ATOMIC memory
+*
+* Method process:
+* - Hash the unique identifier string
+* - Find the hash in the table
+* 1)If found, increase or decrease the reference count
+* 2)If not found, allocate a new hash table entry struct and initialize it
+* - Remove and deallocate unneeded data structure
+* - Log the call in the circular history buffer (unless it is a simple call)
+*/
+void ipa3_active_clients_log_mod(struct ipa3_active_client_logging_info *id,
+ bool inc, bool int_ctx)
+{
+ char temp_str[IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN];
+ unsigned long long t;
+ unsigned long nanosec_rem;
+ struct ipa3_active_client_htable_entry *hentry;
+ struct ipa3_active_client_htable_entry *hfound;
+ u32 hkey;
+ char str_to_hash[IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN];
+
+ hfound = NULL;
+ memset(str_to_hash, 0, IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN);
+ strlcpy(str_to_hash, id->id_string, IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN);
+ hkey = arch_fast_hash(str_to_hash, IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN,
+ 0);
+ hash_for_each_possible(ipa3_ctx->ipa3_active_clients_logging.htable,
+ hentry, list, hkey) {
+ if (!strcmp(hentry->id_string, id->id_string)) {
+ hentry->count = hentry->count + (inc ? 1 : -1);
+ hfound = hentry;
+ }
+ }
+ if (hfound == NULL) {
+ hentry = NULL;
+ hentry = kzalloc(sizeof(
+ struct ipa3_active_client_htable_entry),
+ int_ctx ? GFP_ATOMIC : GFP_KERNEL);
+ if (hentry == NULL) {
+ IPAERR("failed allocating active clients hash entry");
+ return;
+ }
+ hentry->type = id->type;
+ strlcpy(hentry->id_string, id->id_string,
+ IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN);
+ INIT_HLIST_NODE(&hentry->list);
+ hentry->count = inc ? 1 : -1;
+ hash_add(ipa3_ctx->ipa3_active_clients_logging.htable,
+ &hentry->list, hkey);
+ } else if (hfound->count == 0) {
+ hash_del(&hfound->list);
+ kfree(hfound);
+ }
+
+ if (id->type != SIMPLE) {
+ t = local_clock();
+ nanosec_rem = do_div(t, 1000000000) / 1000;
+ snprintf(temp_str, IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN,
+ inc ? "[%5lu.%06lu] ^ %s, %s: %d" :
+ "[%5lu.%06lu] v %s, %s: %d",
+ (unsigned long)t, nanosec_rem,
+ id->id_string, id->file, id->line);
+ ipa3_active_clients_log_insert(temp_str);
+ }
+}
+
+void ipa3_active_clients_log_dec(struct ipa3_active_client_logging_info *id,
+ bool int_ctx)
+{
+ ipa3_active_clients_log_mod(id, false, int_ctx);
+}
+
+void ipa3_active_clients_log_inc(struct ipa3_active_client_logging_info *id,
+ bool int_ctx)
+{
+ ipa3_active_clients_log_mod(id, true, int_ctx);
+}
+
+/**
* ipa3_inc_client_enable_clks() - Increase active clients counter, and
* enable ipa clocks if necessary
*
* Return codes:
* None
*/
-void ipa3_inc_client_enable_clks(void)
+void ipa3_inc_client_enable_clks(struct ipa3_active_client_logging_info *id)
{
ipa3_active_clients_lock();
+ ipa3_active_clients_log_inc(id, false);
ipa3_ctx->ipa3_active_clients.cnt++;
if (ipa3_ctx->ipa3_active_clients.cnt == 1)
ipa3_enable_clks();
- IPADBG("active clients = %d\n", ipa3_ctx->ipa3_active_clients.cnt);
+ IPADBG_LOW("active clients = %d\n", ipa3_ctx->ipa3_active_clients.cnt);
ipa3_active_clients_unlock();
}
@@ -2821,7 +3291,8 @@ void ipa3_inc_client_enable_clks(void)
* Return codes: 0 for success
* -EPERM if an asynchronous action should have been done
*/
-int ipa3_inc_client_enable_clks_no_block(void)
+int ipa3_inc_client_enable_clks_no_block(struct ipa3_active_client_logging_info
+ *id)
{
int res = 0;
unsigned long flags;
@@ -2833,9 +3304,9 @@ int ipa3_inc_client_enable_clks_no_block(void)
res = -EPERM;
goto bail;
}
-
+ ipa3_active_clients_log_inc(id, true);
ipa3_ctx->ipa3_active_clients.cnt++;
- IPADBG("active clients = %d\n", ipa3_ctx->ipa3_active_clients.cnt);
+ IPADBG_LOW("active clients = %d\n", ipa3_ctx->ipa3_active_clients.cnt);
bail:
ipa3_active_clients_trylock_unlock(&flags);
@@ -2853,11 +3324,14 @@ bail:
* Return codes:
* None
*/
-void ipa3_dec_client_disable_clks(void)
+void ipa3_dec_client_disable_clks(struct ipa3_active_client_logging_info *id)
{
+ struct ipa3_active_client_logging_info log_info;
+
ipa3_active_clients_lock();
+ ipa3_active_clients_log_dec(id, false);
ipa3_ctx->ipa3_active_clients.cnt--;
- IPADBG("active clients = %d\n", ipa3_ctx->ipa3_active_clients.cnt);
+ IPADBG_LOW("active clients = %d\n", ipa3_ctx->ipa3_active_clients.cnt);
if (ipa3_ctx->ipa3_active_clients.cnt == 0) {
if (ipa3_ctx->tag_process_before_gating) {
ipa3_ctx->tag_process_before_gating = false;
@@ -2865,6 +3339,9 @@ void ipa3_dec_client_disable_clks(void)
* When TAG process ends, active clients will be
* decreased
*/
+ IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info,
+ "TAG_PROCESS");
+ ipa3_active_clients_log_inc(&log_info, false);
ipa3_ctx->ipa3_active_clients.cnt = 1;
queue_work(ipa3_ctx->power_mgmt_wq, &ipa3_tag_work);
} else {
@@ -2889,7 +3366,7 @@ void ipa3_inc_acquire_wakelock(void)
ipa3_ctx->wakelock_ref_cnt.cnt++;
if (ipa3_ctx->wakelock_ref_cnt.cnt == 1)
__pm_stay_awake(&ipa3_ctx->w_lock);
- IPADBG("active wakelock ref cnt = %d\n",
+ IPADBG_LOW("active wakelock ref cnt = %d\n",
ipa3_ctx->wakelock_ref_cnt.cnt);
spin_unlock_irqrestore(&ipa3_ctx->wakelock_ref_cnt.spinlock, flags);
}
@@ -2908,7 +3385,7 @@ void ipa3_dec_release_wakelock(void)
spin_lock_irqsave(&ipa3_ctx->wakelock_ref_cnt.spinlock, flags);
ipa3_ctx->wakelock_ref_cnt.cnt--;
- IPADBG("active wakelock ref cnt = %d\n",
+ IPADBG_LOW("active wakelock ref cnt = %d\n",
ipa3_ctx->wakelock_ref_cnt.cnt);
if (ipa3_ctx->wakelock_ref_cnt.cnt == 0)
__pm_relax(&ipa3_ctx->w_lock);
@@ -2921,7 +3398,7 @@ int ipa3_set_required_perf_profile(enum ipa_voltage_level floor_voltage,
enum ipa_voltage_level needed_voltage;
u32 clk_rate;
- IPADBG("floor_voltage=%d, bandwidth_mbps=%u",
+ IPADBG_LOW("floor_voltage=%d, bandwidth_mbps=%u",
floor_voltage, bandwidth_mbps);
if (floor_voltage < IPA_VOLTAGE_UNSPECIFIED ||
@@ -2931,7 +3408,7 @@ int ipa3_set_required_perf_profile(enum ipa_voltage_level floor_voltage,
}
if (ipa3_ctx->enable_clock_scaling) {
- IPADBG("Clock scaling is enabled\n");
+ IPADBG_LOW("Clock scaling is enabled\n");
if (bandwidth_mbps >=
ipa3_ctx->ctrl->clock_scaling_bw_threshold_turbo)
needed_voltage = IPA_VOLTAGE_TURBO;
@@ -2941,7 +3418,7 @@ int ipa3_set_required_perf_profile(enum ipa_voltage_level floor_voltage,
else
needed_voltage = IPA_VOLTAGE_SVS;
} else {
- IPADBG("Clock scaling is disabled\n");
+ IPADBG_LOW("Clock scaling is disabled\n");
needed_voltage = IPA_VOLTAGE_NOMINAL;
}
@@ -2963,13 +3440,13 @@ int ipa3_set_required_perf_profile(enum ipa_voltage_level floor_voltage,
}
if (clk_rate == ipa3_ctx->curr_ipa_clk_rate) {
- IPADBG("Same voltage\n");
+ IPADBG_LOW("Same voltage\n");
return 0;
}
ipa3_active_clients_lock();
ipa3_ctx->curr_ipa_clk_rate = clk_rate;
- IPADBG("setting clock rate to %u\n", ipa3_ctx->curr_ipa_clk_rate);
+ IPADBG_LOW("setting clock rate to %u\n", ipa3_ctx->curr_ipa_clk_rate);
if (ipa3_ctx->ipa3_active_clients.cnt > 0) {
clk_set_rate(ipa3_clk, ipa3_ctx->curr_ipa_clk_rate);
if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_VIRTUAL)
@@ -2977,10 +3454,10 @@ int ipa3_set_required_perf_profile(enum ipa_voltage_level floor_voltage,
ipa3_ctx->ipa_bus_hdl, ipa3_get_bus_vote()))
WARN_ON(1);
} else {
- IPADBG("clocks are gated, not setting rate\n");
+ IPADBG_LOW("clocks are gated, not setting rate\n");
}
ipa3_active_clients_unlock();
- IPADBG("Done\n");
+ IPADBG_LOW("Done\n");
return 0;
}
@@ -3010,7 +3487,8 @@ void ipa3_suspend_handler(enum ipa_irq_type interrupt,
int res;
struct ipa_ep_cfg_holb holb_cfg;
- IPADBG("interrupt=%d, interrupt_data=%u\n", interrupt, suspend_data);
+ IPADBG("interrupt=%d, interrupt_data=%u\n",
+ interrupt, suspend_data);
memset(&holb_cfg, 0, sizeof(holb_cfg));
holb_cfg.tmr_val = 0;
@@ -3024,9 +3502,10 @@ void ipa3_suspend_handler(enum ipa_irq_type interrupt,
if (!atomic_read(
&ipa3_ctx->transport_pm.dec_clients)
) {
- ipa3_inc_client_enable_clks();
- IPADBG("Pipes un-suspended.\n");
- IPADBG("Enter poll mode.\n");
+ IPA_ACTIVE_CLIENTS_INC_EP(
+ ipa3_ctx->ep[i].client);
+ IPADBG_LOW("Pipes un-suspended.\n");
+ IPADBG_LOW("Enter poll mode.\n");
atomic_set(
&ipa3_ctx->transport_pm.dec_clients,
1);
@@ -3101,7 +3580,7 @@ static void ipa3_sps_release_resource(struct work_struct *work)
ipa3_sps_process_irq_schedule_rel();
} else {
atomic_set(&ipa3_ctx->transport_pm.dec_clients, 0);
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_SPECIAL("SPS_RESOURCE");
}
}
atomic_set(&ipa3_ctx->transport_pm.eot_activity, 0);
@@ -3187,6 +3666,62 @@ static void ipa3_destroy_flt_tbl_idrs(void)
}
}
+static void ipa3_freeze_clock_vote_and_notify_modem(void)
+{
+ int res;
+ u32 ipa_clk_state;
+ struct ipa3_active_client_logging_info log_info;
+
+ if (ipa3_ctx->smp2p_info.res_sent)
+ return;
+
+ IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, "FREEZE_VOTE");
+ res = ipa3_inc_client_enable_clks_no_block(&log_info);
+ if (res)
+ ipa_clk_state = 0;
+ else
+ ipa_clk_state = 1;
+
+ if (ipa3_ctx->smp2p_info.out_base_id) {
+ gpio_set_value(ipa3_ctx->smp2p_info.out_base_id +
+ IPA_GPIO_OUT_CLK_VOTE_IDX, ipa_clk_state);
+ gpio_set_value(ipa3_ctx->smp2p_info.out_base_id +
+ IPA_GPIO_OUT_CLK_RSP_CMPLT_IDX, 1);
+ ipa3_ctx->smp2p_info.res_sent = true;
+ } else {
+ IPAERR("smp2p out gpio not assigned\n");
+ }
+
+ IPADBG("IPA clocks are %s\n", ipa_clk_state ? "ON" : "OFF");
+}
+
+static int ipa3_panic_notifier(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ int res;
+
+ ipa3_freeze_clock_vote_and_notify_modem();
+
+ IPADBG("Calling uC panic handler\n");
+ res = ipa3_uc_panic_notifier(this, event, ptr);
+ if (res)
+ IPAERR("uC panic handler failed %d\n", res);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block ipa3_panic_blk = {
+ .notifier_call = ipa3_panic_notifier,
+ /* IPA panic handler needs to run before modem shuts down */
+ .priority = INT_MAX,
+};
+
+static void ipa3_register_panic_hdlr(void)
+{
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &ipa3_panic_blk);
+}
+
static void ipa3_trigger_ipa_ready_cbs(void)
{
struct ipa3_ready_cb_info *info;
@@ -3205,8 +3740,7 @@ static int ipa3_gsi_pre_fw_load_init(void)
{
int result;
- /* Enable GSI */
- ipa_write_reg(ipa3_ctx->mmio, IPA_ENABLE_GSI_OFST, 1);
+ /* GSI already enabled by TZ */
result = gsi_configure_regs(ipa3_res.transport_mem_base,
ipa3_res.transport_mem_size,
@@ -3326,12 +3860,6 @@ static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p,
else
IPADBG(":wdi init ok\n");
- result = ipa3_usb_init();
- if (result)
- IPAERR(":ipa_usb init failed (%d)\n", -result);
- else
- IPADBG(":ipa_usb init ok\n");
-
ipa3_register_panic_hdlr();
ipa3_ctx->q6_proxy_clk_vote_valid = true;
@@ -3501,6 +4029,7 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
int i;
struct ipa3_flt_tbl *flt_tbl;
struct ipa3_rt_tbl_set *rset;
+ struct ipa3_active_client_logging_info log_info;
IPADBG("IPA Driver initialization started\n");
@@ -3517,6 +4046,20 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
goto fail_mem_ctx;
}
+ ipa3_ctx->logbuf = ipc_log_context_create(IPA_IPC_LOG_PAGES, "ipa", 0);
+ if (ipa3_ctx->logbuf == NULL) {
+ IPAERR("failed to get logbuf\n");
+ result = -ENOMEM;
+ goto fail_logbuf;
+ }
+ ipa3_ctx->logbuf_low =
+ ipc_log_context_create(IPA_IPC_LOG_PAGES, "ipa_low", 0);
+ if (ipa3_ctx->logbuf_low == NULL) {
+ IPAERR("failed to get logbuf_low\n");
+ result = -ENOMEM;
+ goto fail_logbuf_low;
+ }
+
ipa3_ctx->pdev = ipa_dev;
ipa3_ctx->uc_pdev = ipa_dev;
ipa3_ctx->smmu_present = smmu_present;
@@ -3532,6 +4075,7 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
ipa3_ctx->transport_prototype = resource_p->transport_prototype;
ipa3_ctx->ee = resource_p->ee;
ipa3_ctx->apply_rg10_wa = resource_p->apply_rg10_wa;
+ ipa3_ctx->ipa3_active_clients_logging.log_rdy = false;
/* default aggregation parameters */
ipa3_ctx->aggregation_type = IPA_MBIM_16;
@@ -3571,6 +4115,9 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
IPADBG("Skipping bus scaling registration on Virtual plat\n");
}
+ if (ipa3_active_clients_log_init())
+ goto fail_init_active_client;
+
/* get IPA clocks */
result = ipa3_get_clks(master_dev);
if (result)
@@ -3595,6 +4142,12 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
goto fail_remap;
}
+ if (ipahal_init(ipa3_ctx->ipa_hw_type, ipa3_ctx->mmio)) {
+ IPAERR("fail to init ipahal\n");
+ result = -EFAULT;
+ goto fail_ipahal;
+ }
+
result = ipa3_init_hw();
if (result) {
IPAERR(":error initializing HW.\n");
@@ -3643,6 +4196,8 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
mutex_init(&ipa3_ctx->ipa3_active_clients.mutex);
spin_lock_init(&ipa3_ctx->ipa3_active_clients.spinlock);
+ IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, "PROXY_CLK_VOTE");
+ ipa3_active_clients_log_inc(&log_info, false);
ipa3_ctx->ipa3_active_clients.cnt = 1;
/* Assign resource limitation to each group */
@@ -3977,12 +4532,20 @@ fail_init_hw:
fail_remap:
ipa3_disable_clks();
fail_clk:
+ ipa3_active_clients_log_destroy();
+fail_init_active_client:
msm_bus_scale_unregister_client(ipa3_ctx->ipa_bus_hdl);
-fail_bus_reg:
+fail_ipahal:
ipa3_bus_scale_table = NULL;
+fail_bus_reg:
+ ipahal_destroy();
fail_bind:
kfree(ipa3_ctx->ctrl);
fail_mem_ctrl:
+ ipc_log_context_destroy(ipa3_ctx->logbuf_low);
+fail_logbuf_low:
+ ipc_log_context_destroy(ipa3_ctx->logbuf);
+fail_logbuf:
kfree(ipa3_ctx);
ipa3_ctx = NULL;
fail_mem_ctx:
@@ -4342,6 +4905,66 @@ static int ipa_smmu_ap_cb_probe(struct device *dev)
return result;
}
+static irqreturn_t ipa3_smp2p_modem_clk_query_isr(int irq, void *ctxt)
+{
+ ipa3_freeze_clock_vote_and_notify_modem();
+
+ return IRQ_HANDLED;
+}
+
+static int ipa3_smp2p_probe(struct device *dev)
+{
+ struct device_node *node = dev->of_node;
+ int res;
+
+ IPADBG("node->name=%s\n", node->name);
+ if (strcmp("qcom,smp2pgpio_map_ipa_1_out", node->name) == 0) {
+ res = of_get_gpio(node, 0);
+ if (res < 0) {
+ IPADBG("of_get_gpio returned %d\n", res);
+ return res;
+ }
+
+ ipa3_ctx->smp2p_info.out_base_id = res;
+ IPADBG("smp2p out_base_id=%d\n",
+ ipa3_ctx->smp2p_info.out_base_id);
+ } else if (strcmp("qcom,smp2pgpio_map_ipa_1_in", node->name) == 0) {
+ int irq;
+
+ res = of_get_gpio(node, 0);
+ if (res < 0) {
+ IPADBG("of_get_gpio returned %d\n", res);
+ return res;
+ }
+
+ ipa3_ctx->smp2p_info.in_base_id = res;
+ IPADBG("smp2p in_base_id=%d\n",
+ ipa3_ctx->smp2p_info.in_base_id);
+
+ /* register for modem clk query */
+ irq = gpio_to_irq(ipa3_ctx->smp2p_info.in_base_id +
+ IPA_GPIO_IN_QUERY_CLK_IDX);
+ if (irq < 0) {
+ IPAERR("gpio_to_irq failed %d\n", irq);
+ return -ENODEV;
+ }
+ IPADBG("smp2p irq#=%d\n", irq);
+ res = request_irq(irq,
+ (irq_handler_t)ipa3_smp2p_modem_clk_query_isr,
+ IRQF_TRIGGER_RISING, "ipa_smp2p_clk_vote", dev);
+ if (res) {
+ IPAERR("fail to register smp2p irq=%d\n", irq);
+ return -ENODEV;
+ }
+ res = enable_irq_wake(ipa3_ctx->smp2p_info.in_base_id +
+ IPA_GPIO_IN_QUERY_CLK_IDX);
+ if (res)
+ IPAERR("failed to enable irq wake\n");
+ }
+
+ return 0;
+}
+
int ipa3_plat_drv_probe(struct platform_device *pdev_p,
struct ipa_api_controller *api_ctrl, struct of_device_id *pdrv_match)
{
@@ -4349,6 +4972,7 @@ int ipa3_plat_drv_probe(struct platform_device *pdev_p,
struct device *dev = &pdev_p->dev;
IPADBG("IPA driver probing started\n");
+ IPADBG("dev->of_node->name = %s\n", dev->of_node->name);
if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-ap-cb"))
return ipa_smmu_ap_cb_probe(dev);
@@ -4359,6 +4983,14 @@ int ipa3_plat_drv_probe(struct platform_device *pdev_p,
if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-uc-cb"))
return ipa_smmu_uc_cb_probe(dev);
+ if (of_device_is_compatible(dev->of_node,
+ "qcom,smp2pgpio-map-ipa-1-in"))
+ return ipa3_smp2p_probe(dev);
+
+ if (of_device_is_compatible(dev->of_node,
+ "qcom,smp2pgpio-map-ipa-1-out"))
+ return ipa3_smp2p_probe(dev);
+
master_dev = dev;
if (!ipa3_pdev)
ipa3_pdev = pdev_p;
@@ -4375,10 +5007,15 @@ int ipa3_plat_drv_probe(struct platform_device *pdev_p,
return result;
}
+ result = of_platform_populate(pdev_p->dev.of_node,
+ pdrv_match, NULL, &pdev_p->dev);
+ if (result) {
+ IPAERR("failed to populate platform\n");
+ return result;
+ }
+
if (of_property_read_bool(pdev_p->dev.of_node, "qcom,arm-smmu")) {
arm_smmu = true;
- result = of_platform_populate(pdev_p->dev.of_node,
- pdrv_match, NULL, &pdev_p->dev);
} else if (of_property_read_bool(pdev_p->dev.of_node,
"qcom,msm-smmu")) {
IPAERR("Legacy IOMMU not supported\n");
@@ -4390,16 +5027,16 @@ int ipa3_plat_drv_probe(struct platform_device *pdev_p,
IPAERR("DMA set mask failed\n");
return -EOPNOTSUPP;
}
+ }
- if (!ipa3_bus_scale_table)
- ipa3_bus_scale_table = msm_bus_cl_get_pdata(pdev_p);
+ if (!ipa3_bus_scale_table)
+ ipa3_bus_scale_table = msm_bus_cl_get_pdata(pdev_p);
- /* Proceed to real initialization */
- result = ipa3_pre_init(&ipa3_res, dev);
- if (result) {
- IPAERR("ipa3_init failed\n");
- return result;
- }
+ /* Proceed to real initialization */
+ result = ipa3_pre_init(&ipa3_res, dev);
+ if (result) {
+ IPAERR("ipa3_init failed\n");
+ return result;
}
return result;
@@ -4464,7 +5101,7 @@ static void ipa_gsi_request_resource(struct work_struct *work)
int ret;
/* request IPA clocks */
- ipa3_inc_client_enable_clks();
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
/* mark transport resource as granted */
spin_lock_irqsave(&ipa3_ctx->transport_pm.lock, flags);
@@ -4481,6 +5118,7 @@ static void ipa_gsi_request_resource(struct work_struct *work)
void ipa_gsi_req_res_cb(void *user_data, bool *granted)
{
unsigned long flags;
+ struct ipa3_active_client_logging_info log_info;
spin_lock_irqsave(&ipa3_ctx->transport_pm.lock, flags);
@@ -4491,7 +5129,8 @@ void ipa_gsi_req_res_cb(void *user_data, bool *granted)
if (ipa3_ctx->transport_pm.res_granted) {
*granted = true;
} else {
- if (ipa3_inc_client_enable_clks_no_block() == 0) {
+ IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, "GSI_RESOURCE");
+ if (ipa3_inc_client_enable_clks_no_block(&log_info) == 0) {
ipa3_ctx->transport_pm.res_granted = true;
*granted = true;
} else {
@@ -4517,7 +5156,7 @@ static void ipa_gsi_release_resource(struct work_struct *work)
}
spin_unlock_irqrestore(&ipa3_ctx->transport_pm.lock, flags);
if (dec_clients)
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_SPECIAL("GSI_RESOURCE");
}
int ipa_gsi_rel_res_cb(void *user_data)
@@ -4556,7 +5195,6 @@ static void ipa_gsi_notify_cb(struct gsi_per_notify *notify)
break;
case GSI_PER_EVT_GENERAL_BREAK_POINT:
IPAERR("Got GSI_PER_EVT_GENERAL_BREAK_POINT\n");
- BUG();
break;
case GSI_PER_EVT_GENERAL_BUS_ERROR:
IPAERR("Got GSI_PER_EVT_GENERAL_BUS_ERROR\n");
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
index 6db327163477..347e32a0238a 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -29,13 +29,14 @@
#define IPA_POLL_FOR_EMPTINESS_NUM 50
#define IPA_POLL_FOR_EMPTINESS_SLEEP_USEC 20
-#define IPA_POLL_FOR_CHANNEL_STOP_NUM 10
+#define IPA_CHANNEL_STOP_IN_PROC_TO_MSEC 5
+#define IPA_CHANNEL_STOP_IN_PROC_SLEEP_USEC 200
/* xfer_rsc_idx should be 7 bits */
#define IPA_XFER_RSC_IDX_MAX 127
-static bool ipa3_is_xdci_channel_empty(struct ipa3_ep_context *ep,
- struct gsi_chan_info *chan_info);
+static int ipa3_is_xdci_channel_empty(struct ipa3_ep_context *ep,
+ bool *is_empty);
int ipa3_enable_data_path(u32 clnt_hdl)
{
@@ -43,7 +44,7 @@ int ipa3_enable_data_path(u32 clnt_hdl)
struct ipa_ep_cfg_holb holb_cfg;
struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
int res = 0;
- u32 reg_val = 0;
+ struct ipahal_reg_endp_init_rsrc_grp rsrc_grp;
IPADBG("Enabling data path\n");
if (IPA_CLIENT_IS_CONS(ep->client)) {
@@ -63,20 +64,19 @@ int ipa3_enable_data_path(u32 clnt_hdl)
ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
}
- /* Assign the resource group for pipe*/
- if (ipa_get_ep_group(ep->client) == -1) {
+ /* Assign the resource group for pipe */
+ memset(&rsrc_grp, 0, sizeof(rsrc_grp));
+ rsrc_grp.rsrc_grp = ipa_get_ep_group(ep->client);
+ if (rsrc_grp.rsrc_grp == -1) {
IPAERR("invalid group for client %d\n", ep->client);
WARN_ON(1);
return -EFAULT;
}
IPADBG("Setting group %d for pipe %d\n",
- ipa_get_ep_group(ep->client), clnt_hdl);
- IPA_SETFIELD_IN_REG(reg_val, ipa_get_ep_group(ep->client),
- IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_SHFT,
- IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_BMSK);
- ipa_write_reg(ipa3_ctx->mmio,
- IPA_ENDP_INIT_RSRC_GRP_n(clnt_hdl), reg_val);
+ rsrc_grp.rsrc_grp, clnt_hdl);
+ ipahal_write_reg_n_fields(IPA_ENDP_INIT_RSRC_GRP_n, clnt_hdl,
+ &rsrc_grp);
return res;
}
@@ -86,7 +86,7 @@ int ipa3_disable_data_path(u32 clnt_hdl)
struct ipa3_ep_context *ep = &ipa3_ctx->ep[clnt_hdl];
struct ipa_ep_cfg_holb holb_cfg;
struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
- u32 aggr_init;
+ struct ipa_ep_cfg_aggr ep_aggr;
int res = 0;
IPADBG("Disabling data path\n");
@@ -105,10 +105,8 @@ int ipa3_disable_data_path(u32 clnt_hdl)
}
udelay(IPA_PKT_FLUSH_TO_US);
- aggr_init = ipa_read_reg(ipa3_ctx->mmio,
- IPA_ENDP_INIT_AGGR_N_OFST_v3_0(clnt_hdl));
- if (((aggr_init & IPA_ENDP_INIT_AGGR_N_AGGR_EN_BMSK) >>
- IPA_ENDP_INIT_AGGR_N_AGGR_EN_SHFT) == IPA_ENABLE_AGGR) {
+ ipahal_read_reg_n_fields(IPA_ENDP_INIT_AGGR_n, clnt_hdl, &ep_aggr);
+ if (ep_aggr.aggr_en) {
res = ipa3_tag_aggr_force_close(clnt_hdl);
if (res) {
IPAERR("tag process timeout, client:%d err:%d\n",
@@ -283,7 +281,7 @@ int ipa3_connect(const struct ipa_connect_params *in,
int ipa_ep_idx;
int result = -EFAULT;
struct ipa3_ep_context *ep;
- struct ipa3_ep_cfg_status ep_status;
+ struct ipahal_reg_ep_cfg_status ep_status;
unsigned long base;
struct iommu_domain *smmu_domain;
@@ -310,7 +308,7 @@ int ipa3_connect(const struct ipa_connect_params *in,
}
memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context));
- ipa3_inc_client_enable_clks();
+ IPA_ACTIVE_CLIENTS_INC_EP(in->client);
ep->skip_ep_cfg = in->skip_ep_cfg;
ep->valid = 1;
@@ -443,7 +441,7 @@ int ipa3_connect(const struct ipa_connect_params *in,
ipa3_install_dflt_flt_rules(ipa_ep_idx);
if (!ep->keep_ipa_awake)
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_EP(in->client);
IPADBG("client %d (ep: %d) connected\n", in->client, ipa_ep_idx);
@@ -497,7 +495,7 @@ desc_mem_alloc_fail:
sps_free_endpoint(ep->ep_hdl);
ipa_cfg_ep_fail:
memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context));
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_EP(in->client);
fail:
return result;
}
@@ -551,6 +549,7 @@ int ipa3_disconnect(u32 clnt_hdl)
struct iommu_domain *smmu_domain;
struct ipa_disable_force_clear_datapath_req_msg_v01 req = {0};
int res;
+ enum ipa_client_type client_type;
if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
ipa3_ctx->ep[clnt_hdl].valid == 0) {
@@ -559,9 +558,9 @@ int ipa3_disconnect(u32 clnt_hdl)
}
ep = &ipa3_ctx->ep[clnt_hdl];
-
+ client_type = ipa3_get_client_mapping(clnt_hdl);
if (!ep->keep_ipa_awake)
- ipa3_inc_client_enable_clks();
+ IPA_ACTIVE_CLIENTS_INC_EP(client_type);
/* Set Disconnect in Progress flag. */
spin_lock(&ipa3_ctx->disconnect_lock);
@@ -661,8 +660,7 @@ int ipa3_disconnect(u32 clnt_hdl)
spin_lock(&ipa3_ctx->disconnect_lock);
memset(&ipa3_ctx->ep[clnt_hdl], 0, sizeof(struct ipa3_ep_context));
spin_unlock(&ipa3_ctx->disconnect_lock);
-
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_EP(client_type);
IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
@@ -687,8 +685,7 @@ int ipa3_reset_endpoint(u32 clnt_hdl)
return -EFAULT;
}
ep = &ipa3_ctx->ep[clnt_hdl];
-
- ipa3_inc_client_enable_clks();
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
res = sps_disconnect(ep->ep_hdl);
if (res) {
IPAERR("sps_disconnect() failed, res=%d.\n", res);
@@ -703,8 +700,7 @@ int ipa3_reset_endpoint(u32 clnt_hdl)
}
bail:
- ipa3_dec_client_disable_clks();
-
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
return res;
}
@@ -849,8 +845,7 @@ static int ipa3_reset_with_open_aggr_frame_wa(u32 clnt_hdl,
int aggr_active_bitmap = 0;
IPADBG("Applying reset channel with open aggregation frame WA\n");
- ipa_write_reg(ipa3_ctx->mmio, IPA_AGGR_FORCE_CLOSE_OFST,
- (1 << clnt_hdl));
+ ipahal_write_reg(IPA_AGGR_FORCE_CLOSE, (1 << clnt_hdl));
/* Reset channel */
gsi_res = gsi_reset_channel(ep->gsi_chan_hdl);
@@ -899,8 +894,7 @@ static int ipa3_reset_with_open_aggr_frame_wa(u32 clnt_hdl,
/* Wait for aggregation frame to be closed and stop channel*/
for (i = 0; i < IPA_POLL_AGGR_STATE_RETRIES_NUM; i++) {
- aggr_active_bitmap = ipa_read_reg(ipa3_ctx->mmio,
- IPA_STATE_AGGR_ACTIVE_OFST);
+ aggr_active_bitmap = ipahal_read_reg(IPA_STATE_AGGR_ACTIVE);
if (!(aggr_active_bitmap & (1 << clnt_hdl)))
break;
msleep(IPA_POLL_AGGR_STATE_SLEEP_MSEC);
@@ -928,6 +922,12 @@ static int ipa3_reset_with_open_aggr_frame_wa(u32 clnt_hdl,
goto start_chan_fail;
}
+ /*
+ * Need to sleep for 1ms as required by H/W verified
+ * sequence for resetting GSI channel
+ */
+ msleep(IPA_POLL_AGGR_STATE_SLEEP_MSEC);
+
/* Restore channels properties */
result = ipa3_restore_channel_properties(ep, &orig_chan_props,
&orig_chan_scratch);
@@ -957,7 +957,7 @@ int ipa3_reset_gsi_channel(u32 clnt_hdl)
enum gsi_status gsi_res;
int aggr_active_bitmap = 0;
- IPADBG("ipa3_reset_gsi_channel: entry\n");
+ IPADBG("entry\n");
if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
ipa3_ctx->ep[clnt_hdl].valid == 0) {
IPAERR("Bad parameter.\n");
@@ -967,15 +967,13 @@ int ipa3_reset_gsi_channel(u32 clnt_hdl)
ep = &ipa3_ctx->ep[clnt_hdl];
if (!ep->keep_ipa_awake)
- ipa3_inc_client_enable_clks();
-
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
/*
* Check for open aggregation frame on Consumer EP -
* reset with open aggregation frame WA
*/
if (IPA_CLIENT_IS_CONS(ep->client)) {
- aggr_active_bitmap = ipa_read_reg(ipa3_ctx->mmio,
- IPA_STATE_AGGR_ACTIVE_OFST);
+ aggr_active_bitmap = ipahal_read_reg(IPA_STATE_AGGR_ACTIVE);
if (aggr_active_bitmap & (1 << clnt_hdl)) {
result = ipa3_reset_with_open_aggr_frame_wa(clnt_hdl,
ep);
@@ -985,7 +983,11 @@ int ipa3_reset_gsi_channel(u32 clnt_hdl)
}
}
- /* Reset channel */
+ /*
+ * Reset channel
+ * If the reset called after stop, need to wait 1ms
+ */
+ msleep(IPA_POLL_AGGR_STATE_SLEEP_MSEC);
gsi_res = gsi_reset_channel(ep->gsi_chan_hdl);
if (gsi_res != GSI_STATUS_SUCCESS) {
IPAERR("Error resetting channel: %d\n", gsi_res);
@@ -995,14 +997,14 @@ int ipa3_reset_gsi_channel(u32 clnt_hdl)
finish_reset:
if (!ep->keep_ipa_awake)
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
- IPADBG("ipa3_reset_gsi_channel: exit\n");
+ IPADBG("exit\n");
return 0;
reset_chan_fail:
if (!ep->keep_ipa_awake)
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
return result;
}
@@ -1012,7 +1014,7 @@ int ipa3_reset_gsi_event_ring(u32 clnt_hdl)
int result = -EFAULT;
enum gsi_status gsi_res;
- IPADBG("ipa3_reset_gsi_event_ring: entry\n");
+ IPADBG("entry\n");
if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
ipa3_ctx->ep[clnt_hdl].valid == 0) {
IPAERR("Bad parameter.\n");
@@ -1022,8 +1024,7 @@ int ipa3_reset_gsi_event_ring(u32 clnt_hdl)
ep = &ipa3_ctx->ep[clnt_hdl];
if (!ep->keep_ipa_awake)
- ipa3_inc_client_enable_clks();
-
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
/* Reset event ring */
gsi_res = gsi_reset_evt_ring(ep->gsi_evt_ring_hdl);
if (gsi_res != GSI_STATUS_SUCCESS) {
@@ -1033,14 +1034,14 @@ int ipa3_reset_gsi_event_ring(u32 clnt_hdl)
}
if (!ep->keep_ipa_awake)
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
- IPADBG("ipa3_reset_gsi_event_ring: exit\n");
+ IPADBG("exit\n");
return 0;
reset_evt_fail:
if (!ep->keep_ipa_awake)
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
return result;
}
@@ -1058,7 +1059,7 @@ int ipa3_request_gsi_channel(struct ipa_request_gsi_channel_params *params,
int ipa_ep_idx;
int result = -EFAULT;
struct ipa3_ep_context *ep;
- struct ipa3_ep_cfg_status ep_status;
+ struct ipahal_reg_ep_cfg_status ep_status;
unsigned long gsi_dev_hdl;
enum gsi_status gsi_res;
struct ipa_gsi_ep_config gsi_ep_cfg;
@@ -1085,7 +1086,7 @@ int ipa3_request_gsi_channel(struct ipa_request_gsi_channel_params *params,
}
memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context));
- ipa3_inc_client_enable_clks();
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
ep->skip_ep_cfg = params->skip_ep_cfg;
ep->valid = 1;
@@ -1185,7 +1186,7 @@ int ipa3_request_gsi_channel(struct ipa_request_gsi_channel_params *params,
if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(params->client))
ipa3_install_dflt_flt_rules(ipa_ep_idx);
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
IPADBG("client %d (ep: %d) connected\n", params->client, ipa_ep_idx);
IPADBG("exit\n");
@@ -1198,7 +1199,7 @@ write_evt_scratch_fail:
gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl);
ipa_cfg_ep_fail:
memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context));
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
fail:
return result;
}
@@ -1211,7 +1212,7 @@ int ipa3_set_usb_max_packet_size(
IPADBG("entry\n");
- ipa3_inc_client_enable_clks();
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
memset(&dev_scratch, 0, sizeof(struct gsi_device_scratch));
dev_scratch.mhi_base_chan_idx_valid = false;
@@ -1224,8 +1225,7 @@ int ipa3_set_usb_max_packet_size(
IPAERR("Error writing device scratch: %d\n", gsi_res);
return -EFAULT;
}
-
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
IPADBG("exit\n");
return 0;
@@ -1246,8 +1246,7 @@ int ipa3_xdci_connect(u32 clnt_hdl, u8 xferrscidx, bool xferrscidx_valid)
}
ep = &ipa3_ctx->ep[clnt_hdl];
-
- ipa3_inc_client_enable_clks();
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
if (xferrscidx_valid) {
ep->chan_scratch.xdci.xferrscidx = xferrscidx;
@@ -1264,18 +1263,37 @@ int ipa3_xdci_connect(u32 clnt_hdl, u8 xferrscidx, bool xferrscidx_valid)
goto write_chan_scratch_fail;
}
if (!ep->keep_ipa_awake)
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
IPADBG("exit\n");
return 0;
write_chan_scratch_fail:
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
return result;
}
-static bool ipa3_is_xdci_channel_empty(struct ipa3_ep_context *ep,
- struct gsi_chan_info *chan_info)
+static int ipa3_get_gsi_chan_info(struct gsi_chan_info *gsi_chan_info,
+ unsigned long chan_hdl)
+{
+ enum gsi_status gsi_res;
+
+ memset(gsi_chan_info, 0, sizeof(struct gsi_chan_info));
+ gsi_res = gsi_query_channel_info(chan_hdl, gsi_chan_info);
+ if (gsi_res != GSI_STATUS_SUCCESS) {
+ IPAERR("Error querying channel info: %d\n", gsi_res);
+ return -EFAULT;
+ }
+ if (!gsi_chan_info->evt_valid) {
+ IPAERR("Event info invalid\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static bool ipa3_is_xdci_channel_with_given_info_empty(
+ struct ipa3_ep_context *ep, struct gsi_chan_info *chan_info)
{
bool is_empty = false;
@@ -1303,6 +1321,28 @@ static bool ipa3_is_xdci_channel_empty(struct ipa3_ep_context *ep,
return is_empty;
}
+static int ipa3_is_xdci_channel_empty(struct ipa3_ep_context *ep,
+ bool *is_empty)
+{
+ struct gsi_chan_info chan_info;
+ int res;
+
+ if (!ep || !is_empty || !ep->valid) {
+ IPAERR("Input Error\n");
+ return -EFAULT;
+ }
+
+ res = ipa3_get_gsi_chan_info(&chan_info, ep->gsi_chan_hdl);
+ if (res) {
+ IPAERR("Failed to get GSI channel info\n");
+ return -EFAULT;
+ }
+
+ *is_empty = ipa3_is_xdci_channel_with_given_info_empty(ep, &chan_info);
+
+ return 0;
+}
+
static int ipa3_enable_force_clear(u32 request_id, bool throttle_source,
u32 source_pipe_bitmask)
{
@@ -1343,73 +1383,161 @@ static int ipa3_disable_force_clear(u32 request_id)
return 0;
}
-static int ipa3_get_gsi_chan_info(struct gsi_chan_info *gsi_chan_info,
- unsigned long chan_hdl)
+/* Clocks should be voted before invoking this function */
+static int ipa3_xdci_stop_gsi_channel(u32 clnt_hdl, bool *stop_in_proc)
{
- enum gsi_status gsi_res;
+ int res;
- memset(gsi_chan_info, 0, sizeof(struct gsi_chan_info));
- gsi_res = gsi_query_channel_info(chan_hdl, gsi_chan_info);
- if (gsi_res != GSI_STATUS_SUCCESS) {
- IPAERR("Error querying channel info: %d\n", gsi_res);
- return -EFAULT;
+ IPADBG("entry\n");
+ if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+ ipa3_ctx->ep[clnt_hdl].valid == 0 ||
+ !stop_in_proc) {
+ IPAERR("Bad parameter.\n");
+ return -EINVAL;
}
- if (!gsi_chan_info->evt_valid) {
- IPAERR("Event info invalid\n");
+
+ res = ipa3_stop_gsi_channel(clnt_hdl);
+ if (res != 0 && res != -GSI_STATUS_AGAIN &&
+ res != -GSI_STATUS_TIMED_OUT) {
+ IPAERR("xDCI stop channel failed res=%d\n", res);
return -EFAULT;
}
+ *stop_in_proc = res;
+
+ IPADBG("xDCI channel is %s (result=%d)\n",
+ res ? "STOP_IN_PROC/TimeOut" : "STOP", res);
+
+ IPADBG("exit\n");
return 0;
}
+/* Clocks should be voted before invoking this function */
+static int ipa3_xdci_stop_gsi_ch_brute_force(u32 clnt_hdl,
+ bool *stop_in_proc)
+{
+ unsigned long jiffies_start;
+ unsigned long jiffies_timeout =
+ msecs_to_jiffies(IPA_CHANNEL_STOP_IN_PROC_TO_MSEC);
+ int res;
+
+ if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+ ipa3_ctx->ep[clnt_hdl].valid == 0 ||
+ !stop_in_proc) {
+ IPAERR("Bad parameter.\n");
+ return -EINVAL;
+ }
+
+ jiffies_start = jiffies;
+ while (1) {
+ res = ipa3_xdci_stop_gsi_channel(clnt_hdl,
+ stop_in_proc);
+ if (res) {
+ IPAERR("failed to stop xDCI channel hdl=%d\n",
+ clnt_hdl);
+ return res;
+ }
+
+ if (!*stop_in_proc) {
+ IPADBG("xDCI channel STOP hdl=%d\n", clnt_hdl);
+ return res;
+ }
+
+ /*
+ * Give chance to the previous stop request to be accomplished
+ * before the retry
+ */
+ udelay(IPA_CHANNEL_STOP_IN_PROC_SLEEP_USEC);
+
+ if (time_after(jiffies, jiffies_start + jiffies_timeout)) {
+ IPADBG("timeout waiting for xDCI channel emptiness\n");
+ return res;
+ }
+ }
+}
+
/* Clocks should be voted for before invoking this function */
-static int ipa3_drain_ul_chan_data(struct ipa3_ep_context *ep, u32 qmi_req_id,
- u32 source_pipe_bitmask, bool should_force_clear)
+static int ipa3_stop_ul_chan_with_data_drain(u32 qmi_req_id,
+ u32 source_pipe_bitmask, bool should_force_clear, u32 clnt_hdl)
{
- int i;
- bool is_empty = false;
int result;
- struct gsi_chan_info gsi_chan_info;
+ bool is_empty = false;
+ int i;
+ bool stop_in_proc;
+ struct ipa3_ep_context *ep;
- result = ipa3_get_gsi_chan_info(&gsi_chan_info, ep->gsi_chan_hdl);
- if (result)
- return -EFAULT;
+ if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+ ipa3_ctx->ep[clnt_hdl].valid == 0) {
+ IPAERR("Bad parameter.\n");
+ return -EINVAL;
+ }
- do {
- for (i = 0; i < IPA_POLL_FOR_EMPTINESS_NUM; i++) {
- is_empty = ipa3_is_xdci_channel_empty(ep,
- &gsi_chan_info);
- if (is_empty)
- break;
- udelay(IPA_POLL_FOR_EMPTINESS_SLEEP_USEC);
- }
+ ep = &ipa3_ctx->ep[clnt_hdl];
+
+ /* first try to stop the channel */
+ result = ipa3_xdci_stop_gsi_ch_brute_force(clnt_hdl,
+ &stop_in_proc);
+ if (result) {
+ IPAERR("fail to stop UL channel - hdl=%d clnt=%d\n",
+ clnt_hdl, ep->client);
+ goto exit;
+ }
+ if (!stop_in_proc)
+ goto exit;
+
+ /* if stop_in_proc, lets wait for emptiness */
+ for (i = 0; i < IPA_POLL_FOR_EMPTINESS_NUM; i++) {
+ result = ipa3_is_xdci_channel_empty(ep, &is_empty);
+ if (result)
+ goto exit;
if (is_empty)
break;
- if (should_force_clear) {
- result = ipa3_enable_force_clear(qmi_req_id, true,
- source_pipe_bitmask);
- if (result)
- return -EFAULT;
- }
- for (i = 0; i < IPA_POLL_FOR_EMPTINESS_NUM; i++) {
- is_empty = ipa3_is_xdci_channel_empty(ep,
- &gsi_chan_info);
- if (is_empty)
- break;
- udelay(IPA_POLL_FOR_EMPTINESS_SLEEP_USEC);
- }
- if (should_force_clear) {
- result = ipa3_disable_force_clear(qmi_req_id);
- if (result)
- return -EFAULT;
+ udelay(IPA_POLL_FOR_EMPTINESS_SLEEP_USEC);
+ }
+ /* In case of empty, lets try to stop the channel again */
+ if (is_empty) {
+ result = ipa3_xdci_stop_gsi_ch_brute_force(clnt_hdl,
+ &stop_in_proc);
+ if (result) {
+ IPAERR("fail to stop UL channel - hdl=%d clnt=%d\n",
+ clnt_hdl, ep->client);
+ goto exit;
}
+ if (!stop_in_proc)
+ goto exit;
+ }
+ /* if still stop_in_proc or not empty, activate force clear */
+ if (should_force_clear) {
+ result = ipa3_enable_force_clear(qmi_req_id, true,
+ source_pipe_bitmask);
+ if (result)
+ goto exit;
+ }
+ /* with force clear, wait for emptiness */
+ for (i = 0; i < IPA_POLL_FOR_EMPTINESS_NUM; i++) {
+ result = ipa3_is_xdci_channel_empty(ep, &is_empty);
+ if (result)
+ goto disable_force_clear_and_exit;
if (is_empty)
break;
- IPAERR("UL channel is not empty after draining it!\n");
- BUG();
- } while (0);
- return 0;
+ udelay(IPA_POLL_FOR_EMPTINESS_SLEEP_USEC);
+ }
+ /* try to stop for the last time */
+ result = ipa3_xdci_stop_gsi_ch_brute_force(clnt_hdl,
+ &stop_in_proc);
+ if (result) {
+ IPAERR("fail to stop UL channel - hdl=%d clnt=%d\n",
+ clnt_hdl, ep->client);
+ goto disable_force_clear_and_exit;
+ }
+ result = stop_in_proc ? -EFAULT : 0;
+
+disable_force_clear_and_exit:
+ if (should_force_clear)
+ result = ipa3_disable_force_clear(qmi_req_id);
+exit:
+ return result;
}
int ipa3_xdci_disconnect(u32 clnt_hdl, bool should_force_clear, u32 qmi_req_id)
@@ -1418,7 +1546,7 @@ int ipa3_xdci_disconnect(u32 clnt_hdl, bool should_force_clear, u32 qmi_req_id)
int result;
u32 source_pipe_bitmask = 0;
- IPADBG("ipa3_xdci_disconnect: entry\n");
+ IPADBG("entry\n");
if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
ipa3_ctx->ep[clnt_hdl].valid == 0) {
IPAERR("Bad parameter.\n");
@@ -1428,33 +1556,40 @@ int ipa3_xdci_disconnect(u32 clnt_hdl, bool should_force_clear, u32 qmi_req_id)
ep = &ipa3_ctx->ep[clnt_hdl];
if (!ep->keep_ipa_awake)
- ipa3_inc_client_enable_clks();
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
ipa3_disable_data_path(clnt_hdl);
- /* Drain UL channel before stopping it */
if (!IPA_CLIENT_IS_CONS(ep->client)) {
- source_pipe_bitmask = 1 << ipa3_get_ep_mapping(ep->client);
- result = ipa3_drain_ul_chan_data(ep, qmi_req_id,
- source_pipe_bitmask, should_force_clear);
- if (result)
- IPAERR("Error draining UL channel data: %d\n", result);
- }
-
- result = ipa3_stop_gsi_channel(clnt_hdl);
- if (result) {
- IPAERR("Error stopping channel: %d\n", result);
- goto stop_chan_fail;
+ IPADBG("Stopping PROD channel - hdl=%d clnt=%d\n",
+ clnt_hdl, ep->client);
+ source_pipe_bitmask = 1 <<
+ ipa3_get_ep_mapping(ep->client);
+ result = ipa3_stop_ul_chan_with_data_drain(qmi_req_id,
+ source_pipe_bitmask, should_force_clear, clnt_hdl);
+ if (result) {
+ IPAERR("Fail to stop UL channel with data drain\n");
+ WARN_ON(1);
+ goto stop_chan_fail;
+ }
+ } else {
+ IPADBG("Stopping CONS channel - hdl=%d clnt=%d\n",
+ clnt_hdl, ep->client);
+ result = ipa3_stop_gsi_channel(clnt_hdl);
+ if (result) {
+ IPAERR("Error stopping channel (CONS client): %d\n",
+ result);
+ goto stop_chan_fail;
+ }
}
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
- ipa3_dec_client_disable_clks();
-
- IPADBG("ipa3_xdci_disconnect: exit\n");
+ IPADBG("exit\n");
return 0;
stop_chan_fail:
if (!ep->keep_ipa_awake)
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
return result;
}
@@ -1464,7 +1599,7 @@ int ipa3_release_gsi_channel(u32 clnt_hdl)
int result = -EFAULT;
enum gsi_status gsi_res;
- IPADBG("ipa3_release_gsi_channel: entry\n");
+ IPADBG("entry\n");
if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
ipa3_ctx->ep[clnt_hdl].valid == 0) {
IPAERR("Bad parameter.\n");
@@ -1474,7 +1609,7 @@ int ipa3_release_gsi_channel(u32 clnt_hdl)
ep = &ipa3_ctx->ep[clnt_hdl];
if (!ep->keep_ipa_awake)
- ipa3_inc_client_enable_clks();
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
gsi_res = gsi_dealloc_channel(ep->gsi_chan_hdl);
if (gsi_res != GSI_STATUS_SUCCESS) {
@@ -1492,21 +1627,21 @@ int ipa3_release_gsi_channel(u32 clnt_hdl)
ipa3_delete_dflt_flt_rules(clnt_hdl);
if (!ep->keep_ipa_awake)
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
memset(&ipa3_ctx->ep[clnt_hdl], 0, sizeof(struct ipa3_ep_context));
- IPADBG("ipa3_release_gsi_channel: exit\n");
+ IPADBG("exit\n");
return 0;
dealloc_chan_fail:
if (!ep->keep_ipa_awake)
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
return result;
}
int ipa3_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
- bool should_force_clear, u32 qmi_req_id)
+ bool should_force_clear, u32 qmi_req_id, bool is_dpl)
{
struct ipa3_ep_context *ul_ep, *dl_ep;
int result = -EFAULT;
@@ -1519,105 +1654,112 @@ int ipa3_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
int aggr_active_bitmap = 0;
struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
- IPADBG("ipa3_xdci_suspend: entry\n");
- if (ul_clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
- ipa3_ctx->ep[ul_clnt_hdl].valid == 0 ||
- dl_clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
- ipa3_ctx->ep[dl_clnt_hdl].valid == 0) {
+ /* In case of DPL, dl is the DPL channel/client */
+
+ IPADBG("entry\n");
+ if (dl_clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+ ipa3_ctx->ep[dl_clnt_hdl].valid == 0 ||
+ (!is_dpl && (ul_clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+ ipa3_ctx->ep[ul_clnt_hdl].valid == 0))) {
IPAERR("Bad parameter.\n");
return -EINVAL;
}
- ul_ep = &ipa3_ctx->ep[ul_clnt_hdl];
dl_ep = &ipa3_ctx->ep[dl_clnt_hdl];
-
- if (!ul_ep->keep_ipa_awake && !dl_ep->keep_ipa_awake)
- ipa3_inc_client_enable_clks();
-
- result = ipa3_get_gsi_chan_info(&ul_gsi_chan_info,
- ul_ep->gsi_chan_hdl);
- if (result)
- goto query_chan_info_fail;
+ if (!is_dpl)
+ ul_ep = &ipa3_ctx->ep[ul_clnt_hdl];
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(dl_clnt_hdl));
result = ipa3_get_gsi_chan_info(&dl_gsi_chan_info,
dl_ep->gsi_chan_hdl);
if (result)
- goto query_chan_info_fail;
+ goto disable_clk_and_exit;
+
+ if (!is_dpl) {
+ result = ipa3_get_gsi_chan_info(&ul_gsi_chan_info,
+ ul_ep->gsi_chan_hdl);
+ if (result)
+ goto disable_clk_and_exit;
+ }
for (i = 0; i < IPA_POLL_FOR_EMPTINESS_NUM; i++) {
if (!dl_data_pending && !ul_data_pending)
break;
- is_empty = ipa3_is_xdci_channel_empty(dl_ep,
- &dl_gsi_chan_info);
+ result = ipa3_is_xdci_channel_empty(dl_ep, &is_empty);
+ if (result)
+ goto disable_clk_and_exit;
if (!is_empty) {
dl_data_pending = true;
break;
}
dl_data_pending = false;
- is_empty = ipa3_is_xdci_channel_empty(ul_ep,
- &ul_gsi_chan_info);
- ul_data_pending = is_empty ? false : true;
+ if (!is_dpl) {
+ result = ipa3_is_xdci_channel_empty(ul_ep, &is_empty);
+ if (result)
+ goto disable_clk_and_exit;
+ ul_data_pending = !is_empty;
+ } else {
+ ul_data_pending = false;
+ }
+
udelay(IPA_POLL_FOR_EMPTINESS_SLEEP_USEC);
}
if (!dl_data_pending) {
- aggr_active_bitmap = ipa_read_reg(ipa3_ctx->mmio,
- IPA_STATE_AGGR_ACTIVE_OFST);
+ aggr_active_bitmap = ipahal_read_reg(IPA_STATE_AGGR_ACTIVE);
if (aggr_active_bitmap & (1 << dl_clnt_hdl)) {
- IPADBG("DL data pending due to open aggr. frame\n");
+ IPADBG("DL/DPL data pending due to open aggr. frame\n");
dl_data_pending = true;
}
}
if (dl_data_pending) {
- IPAERR("DL data pending, can't suspend\n");
+ IPAERR("DL/DPL data pending, can't suspend\n");
result = -EFAULT;
- goto query_chan_info_fail;
+ goto disable_clk_and_exit;
}
- /* Drain UL channel before stopping it */
- if (ul_data_pending) {
- source_pipe_bitmask = 1 << ipa3_get_ep_mapping(ul_ep->client);
- result = ipa3_drain_ul_chan_data(ul_ep, qmi_req_id,
- source_pipe_bitmask, should_force_clear);
- if (result)
- IPAERR("Error draining UL channel data: %d\n", result);
- }
-
- /* Suspend the DL EP */
+ /* Suspend the DL/DPL EP */
memset(&ep_cfg_ctrl, 0 , sizeof(struct ipa_ep_cfg_ctrl));
ep_cfg_ctrl.ipa_ep_suspend = true;
ipa3_cfg_ep_ctrl(dl_clnt_hdl, &ep_cfg_ctrl);
/*
- * Check if DL channel is empty again, data could enter the channel
+ * Check if DL/DPL channel is empty again, data could enter the channel
* before its IPA EP was suspended
*/
- is_empty = ipa3_is_xdci_channel_empty(dl_ep, &dl_gsi_chan_info);
+ result = ipa3_is_xdci_channel_empty(dl_ep, &is_empty);
+ if (result)
+ goto unsuspend_dl_and_exit;
if (!is_empty) {
- IPAERR("DL data pending, can't suspend\n");
- /* Unsuspend the DL EP */
- memset(&ep_cfg_ctrl, 0 , sizeof(struct ipa_ep_cfg_ctrl));
- ep_cfg_ctrl.ipa_ep_suspend = false;
- ipa3_cfg_ep_ctrl(dl_clnt_hdl, &ep_cfg_ctrl);
+ IPAERR("DL/DPL data pending, can't suspend\n");
result = -EFAULT;
- goto query_chan_info_fail;
+ goto unsuspend_dl_and_exit;
}
- result = ipa3_stop_gsi_channel(ul_clnt_hdl);
- if (result) {
- IPAERR("Error stopping UL channel: %d\n", result);
- goto query_chan_info_fail;
+ /* STOP UL channel */
+ if (!is_dpl) {
+ source_pipe_bitmask = 1 << ipa3_get_ep_mapping(ul_ep->client);
+ result = ipa3_stop_ul_chan_with_data_drain(qmi_req_id,
+ source_pipe_bitmask, should_force_clear, ul_clnt_hdl);
+ if (result) {
+ IPAERR("Error stopping UL channel: result = %d\n",
+ result);
+ goto unsuspend_dl_and_exit;
+ }
}
- if (!ul_ep->keep_ipa_awake && !dl_ep->keep_ipa_awake)
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(dl_clnt_hdl));
- IPADBG("ipa3_xdci_suspend: exit\n");
+ IPADBG("exit\n");
return 0;
-query_chan_info_fail:
- if (!ul_ep->keep_ipa_awake && !dl_ep->keep_ipa_awake)
- ipa3_dec_client_disable_clks();
+unsuspend_dl_and_exit:
+ /* Unsuspend the DL EP */
+ memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+ ep_cfg_ctrl.ipa_ep_suspend = false;
+ ipa3_cfg_ep_ctrl(dl_clnt_hdl, &ep_cfg_ctrl);
+disable_clk_and_exit:
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(dl_clnt_hdl));
return result;
}
@@ -1627,7 +1769,7 @@ int ipa3_start_gsi_channel(u32 clnt_hdl)
int result = -EFAULT;
enum gsi_status gsi_res;
- IPADBG("ipa3_start_gsi_channel: entry\n");
+ IPADBG("entry\n");
if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
ipa3_ctx->ep[clnt_hdl].valid == 0) {
IPAERR("Bad parameters.\n");
@@ -1637,7 +1779,7 @@ int ipa3_start_gsi_channel(u32 clnt_hdl)
ep = &ipa3_ctx->ep[clnt_hdl];
if (!ep->keep_ipa_awake)
- ipa3_inc_client_enable_clks();
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
gsi_res = gsi_start_channel(ep->gsi_chan_hdl);
if (gsi_res != GSI_STATUS_SUCCESS) {
@@ -1646,52 +1788,54 @@ int ipa3_start_gsi_channel(u32 clnt_hdl)
}
if (!ep->keep_ipa_awake)
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
- IPADBG("ipa3_start_gsi_channel: exit\n");
+ IPADBG("exit\n");
return 0;
start_chan_fail:
if (!ep->keep_ipa_awake)
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
return result;
}
-int ipa3_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl)
+int ipa3_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl, bool is_dpl)
{
struct ipa3_ep_context *ul_ep, *dl_ep;
enum gsi_status gsi_res;
struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
- IPADBG("ipa3_xdci_resume: entry\n");
- if (ul_clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
- ipa3_ctx->ep[ul_clnt_hdl].valid == 0 ||
- dl_clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
- ipa3_ctx->ep[dl_clnt_hdl].valid == 0) {
+ /* In case of DPL, dl is the DPL channel/client */
+
+ IPADBG("entry\n");
+ if (dl_clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+ ipa3_ctx->ep[dl_clnt_hdl].valid == 0 ||
+ (!is_dpl && (ul_clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+ ipa3_ctx->ep[ul_clnt_hdl].valid == 0))) {
IPAERR("Bad parameter.\n");
return -EINVAL;
}
- ul_ep = &ipa3_ctx->ep[ul_clnt_hdl];
dl_ep = &ipa3_ctx->ep[dl_clnt_hdl];
+ if (!is_dpl)
+ ul_ep = &ipa3_ctx->ep[ul_clnt_hdl];
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(dl_clnt_hdl));
- if (!ul_ep->keep_ipa_awake && !dl_ep->keep_ipa_awake)
- ipa3_inc_client_enable_clks();
-
- /* Unsuspend the DL EP */
+ /* Unsuspend the DL/DPL EP */
memset(&ep_cfg_ctrl, 0 , sizeof(struct ipa_ep_cfg_ctrl));
- ep_cfg_ctrl.ipa_ep_suspend = true;
+ ep_cfg_ctrl.ipa_ep_suspend = false;
ipa3_cfg_ep_ctrl(dl_clnt_hdl, &ep_cfg_ctrl);
/* Start UL channel */
- gsi_res = gsi_start_channel(ul_ep->gsi_chan_hdl);
- if (gsi_res != GSI_STATUS_SUCCESS)
- IPAERR("Error starting UL channel: %d\n", gsi_res);
+ if (!is_dpl) {
+ gsi_res = gsi_start_channel(ul_ep->gsi_chan_hdl);
+ if (gsi_res != GSI_STATUS_SUCCESS)
+ IPAERR("Error starting UL channel: %d\n", gsi_res);
+ }
- if (!ul_ep->keep_ipa_awake && !dl_ep->keep_ipa_awake)
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(dl_clnt_hdl));
- IPADBG("ipa3_xdci_resume: exit\n");
+ IPADBG("exit\n");
return 0;
}
/**
@@ -1741,7 +1885,7 @@ int ipa3_clear_endpoint_delay(u32 clnt_hdl)
ep->qmi_request_sent = true;
}
- ipa3_inc_client_enable_clks();
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
/* Set disconnect in progress flag so further flow control events are
* not honored.
*/
@@ -1754,7 +1898,7 @@ int ipa3_clear_endpoint_delay(u32 clnt_hdl)
ep_ctrl.ipa_ep_suspend = false;
ipa3_cfg_ep_ctrl(clnt_hdl, &ep_ctrl);
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
IPADBG("client (ep: %d) removed ep delay\n", clnt_hdl);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
index 2915c9472a5d..dabf73029fa8 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -19,9 +19,9 @@
#include "ipa_rm_i.h"
#define IPA_MAX_MSG_LEN 4096
-#define IPA_DBG_CNTR_ON 127265
-#define IPA_DBG_CNTR_OFF 127264
#define IPA_DBG_MAX_RULE_IN_TBL 128
+#define IPA_DBG_ACTIVE_CLIENT_BUF_SIZE ((IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN \
+ * IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES) + IPA_MAX_MSG_LEN)
#define IPA_DUMP_STATUS_FIELD(f) \
pr_err(#f "=0x%x\n", status->f)
@@ -111,44 +111,37 @@ static struct dentry *dfile_msg;
static struct dentry *dfile_ip4_nat;
static struct dentry *dfile_rm_stats;
static struct dentry *dfile_status_stats;
+static struct dentry *dfile_active_clients;
static char dbg_buff[IPA_MAX_MSG_LEN];
+static char *active_clients_buf;
+
static s8 ep_reg_idx;
-/**
- * _ipa_read_gen_reg_v3_0() - Reads and prints IPA general configuration
- * registers
- *
- * Returns the number of characters printed
- */
-int _ipa_read_gen_reg_v3_0(char *buff, int max_len)
+
+static ssize_t ipa3_read_gen_reg(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
{
- return scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+ int nbytes;
+ struct ipahal_reg_shared_mem_size smem_sz;
+
+ memset(&smem_sz, 0, sizeof(smem_sz));
+ ipahal_read_reg_fields(IPA_SHARED_MEM_SIZE, &smem_sz);
+
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+ nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
"IPA_VERSION=0x%x\n"
"IPA_COMP_HW_VERSION=0x%x\n"
"IPA_ROUTE=0x%x\n"
"IPA_SHARED_MEM_RESTRICTED=0x%x\n"
"IPA_SHARED_MEM_SIZE=0x%x\n",
- ipa_read_reg(ipa3_ctx->mmio, IPA_VERSION_OFST),
- ipa_read_reg(ipa3_ctx->mmio, IPA_COMP_HW_VERSION_OFST),
- ipa_read_reg(ipa3_ctx->mmio, IPA_ROUTE_OFST_v3_0),
- ipa_read_reg_field(ipa3_ctx->mmio,
- IPA_SHARED_MEM_SIZE_OFST_v3_0,
- IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_BMSK_v3_0,
- IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_SHFT_v3_0),
- ipa_read_reg_field(ipa3_ctx->mmio,
- IPA_SHARED_MEM_SIZE_OFST_v3_0,
- IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_BMSK_v3_0,
- IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_SHFT_v3_0));
-}
+ ipahal_read_reg(IPA_VERSION),
+ ipahal_read_reg(IPA_COMP_HW_VERSION),
+ ipahal_read_reg(IPA_ROUTE),
+ smem_sz.shared_mem_baddr,
+ smem_sz.shared_mem_sz);
-static ssize_t ipa3_read_gen_reg(struct file *file, char __user *ubuf,
- size_t count, loff_t *ppos)
-{
- int nbytes;
-
- ipa3_inc_client_enable_clks();
- nbytes = ipa3_ctx->ctrl->ipa3_read_gen_reg(dbg_buff, IPA_MAX_MSG_LEN);
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
}
@@ -247,28 +240,17 @@ int _ipa_read_ep_reg_v3_0(char *buf, int max_len, int pipe)
"IPA_ENDP_INIT_HOL_TIMER_%u=0x%x\n"
"IPA_ENDP_INIT_DEAGGR_%u=0x%x\n"
"IPA_ENDP_INIT_CFG_%u=0x%x\n",
- pipe, ipa_read_reg(ipa3_ctx->mmio,
- IPA_ENDP_INIT_NAT_N_OFST_v3_0(pipe)),
- pipe, ipa_read_reg(ipa3_ctx->mmio,
- IPA_ENDP_INIT_HDR_N_OFST_v3_0(pipe)),
- pipe, ipa_read_reg(ipa3_ctx->mmio,
- IPA_ENDP_INIT_HDR_EXT_n_OFST_v3_0(pipe)),
- pipe, ipa_read_reg(ipa3_ctx->mmio,
- IPA_ENDP_INIT_MODE_N_OFST_v3_0(pipe)),
- pipe, ipa_read_reg(ipa3_ctx->mmio,
- IPA_ENDP_INIT_AGGR_N_OFST_v3_0(pipe)),
- pipe, ipa_read_reg(ipa3_ctx->mmio,
- IPA_ENDP_INIT_ROUTE_N_OFST_v3_0(pipe)),
- pipe, ipa_read_reg(ipa3_ctx->mmio,
- IPA_ENDP_INIT_CTRL_N_OFST(pipe)),
- pipe, ipa_read_reg(ipa3_ctx->mmio,
- IPA_ENDP_INIT_HOL_BLOCK_EN_N_OFST_v3_0(pipe)),
- pipe, ipa_read_reg(ipa3_ctx->mmio,
- IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_OFST_v3_0(pipe)),
- pipe, ipa_read_reg(ipa3_ctx->mmio,
- IPA_ENDP_INIT_DEAGGR_n_OFST_v3_0(pipe)),
- pipe, ipa_read_reg(ipa3_ctx->mmio,
- IPA_ENDP_INIT_CFG_n_OFST(pipe)));
+ pipe, ipahal_read_reg_n(IPA_ENDP_INIT_NAT_n, pipe),
+ pipe, ipahal_read_reg_n(IPA_ENDP_INIT_HDR_n, pipe),
+ pipe, ipahal_read_reg_n(IPA_ENDP_INIT_HDR_EXT_n, pipe),
+ pipe, ipahal_read_reg_n(IPA_ENDP_INIT_MODE_n, pipe),
+ pipe, ipahal_read_reg_n(IPA_ENDP_INIT_AGGR_n, pipe),
+ pipe, ipahal_read_reg_n(IPA_ENDP_INIT_ROUTE_n, pipe),
+ pipe, ipahal_read_reg_n(IPA_ENDP_INIT_CTRL_n, pipe),
+ pipe, ipahal_read_reg_n(IPA_ENDP_INIT_HOL_BLOCK_EN_n, pipe),
+ pipe, ipahal_read_reg_n(IPA_ENDP_INIT_HOL_BLOCK_TIMER_n, pipe),
+ pipe, ipahal_read_reg_n(IPA_ENDP_INIT_DEAGGR_n, pipe),
+ pipe, ipahal_read_reg_n(IPA_ENDP_INIT_CFG_n, pipe));
}
static ssize_t ipa3_read_ep_reg(struct file *file, char __user *ubuf,
@@ -291,7 +273,7 @@ static ssize_t ipa3_read_ep_reg(struct file *file, char __user *ubuf,
end_idx = start_idx + 1;
}
pos = *ppos;
- ipa3_inc_client_enable_clks();
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
for (i = start_idx; i < end_idx; i++) {
nbytes = ipa3_ctx->ctrl->ipa3_read_ep_reg(dbg_buff,
@@ -301,7 +283,7 @@ static ssize_t ipa3_read_ep_reg(struct file *file, char __user *ubuf,
ret = simple_read_from_buffer(ubuf, count, ppos, dbg_buff,
nbytes);
if (ret < 0) {
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return ret;
}
@@ -309,7 +291,7 @@ static ssize_t ipa3_read_ep_reg(struct file *file, char __user *ubuf,
ubuf += nbytes;
count -= nbytes;
}
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
*ppos = pos + size;
return size;
@@ -333,9 +315,9 @@ static ssize_t ipa3_write_keep_awake(struct file *file, const char __user *buf,
return -EFAULT;
if (option == 1)
- ipa3_inc_client_enable_clks();
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
else if (option == 0)
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
else
return -EFAULT;
@@ -1250,25 +1232,12 @@ static ssize_t ipa3_read_wdi(struct file *file, char __user *ubuf,
return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
}
-/**
- * _ipa_write_dbg_cnt_v3_0() - Configure IPA debug counter register
- *
- */
-void _ipa_write_dbg_cnt_v3_0(int option)
-{
- if (option == 1)
- ipa_write_reg(ipa3_ctx->mmio, IPA_DEBUG_CNT_CTRL_N_OFST_v3_0(0),
- IPA_DBG_CNTR_ON);
- else
- ipa_write_reg(ipa3_ctx->mmio, IPA_DEBUG_CNT_CTRL_N_OFST_v3_0(0),
- IPA_DBG_CNTR_OFF);
-}
-
static ssize_t ipa3_write_dbg_cnt(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
unsigned long missing;
u32 option = 0;
+ struct ipahal_reg_debug_cnt_ctrl dbg_cnt_ctrl;
if (sizeof(dbg_buff) < count + 1)
return -EFAULT;
@@ -1281,36 +1250,36 @@ static ssize_t ipa3_write_dbg_cnt(struct file *file, const char __user *buf,
if (kstrtou32(dbg_buff, 0, &option))
return -EFAULT;
- ipa3_inc_client_enable_clks();
- ipa3_ctx->ctrl->ipa3_write_dbg_cnt(option);
- ipa3_dec_client_disable_clks();
-
- return count;
-}
-
-/**
- * _ipa_write_dbg_cnt_v3_0() - Read IPA debug counter register
- *
- */
-int _ipa_read_dbg_cnt_v3_0(char *buf, int max_len)
-{
- int regval;
+ memset(&dbg_cnt_ctrl, 0, sizeof(dbg_cnt_ctrl));
+ dbg_cnt_ctrl.type = DBG_CNT_TYPE_GENERAL;
+ dbg_cnt_ctrl.product = true;
+ dbg_cnt_ctrl.src_pipe = 0x1f;
+ dbg_cnt_ctrl.rule_idx_pipe_rule = false;
+ dbg_cnt_ctrl.rule_idx = 0;
+ if (option == 1)
+ dbg_cnt_ctrl.en = true;
+ else
+ dbg_cnt_ctrl.en = false;
- regval = ipa_read_reg(ipa3_ctx->mmio,
- IPA_DEBUG_CNT_REG_N_OFST_v3_0(0));
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+ ipahal_write_reg_n_fields(IPA_DEBUG_CNT_CTRL_n, 0, &dbg_cnt_ctrl);
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
- return scnprintf(buf, max_len,
- "IPA_DEBUG_CNT_REG_0=0x%x\n", regval);
+ return count;
}
static ssize_t ipa3_read_dbg_cnt(struct file *file, char __user *ubuf,
size_t count, loff_t *ppos)
{
int nbytes;
+ u32 regval;
- ipa3_inc_client_enable_clks();
- nbytes = ipa3_ctx->ctrl->ipa3_read_dbg_cnt(dbg_buff, IPA_MAX_MSG_LEN);
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+ regval =
+ ipahal_read_reg_n(IPA_DEBUG_CNT_REG_n, 0);
+ nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+ "IPA_DEBUG_CNT_REG_0=0x%x\n", regval);
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
}
@@ -1585,6 +1554,49 @@ static ssize_t ipa_status_stats_read(struct file *file, char __user *ubuf,
return 0;
}
+static ssize_t ipa3_print_active_clients_log(struct file *file,
+ char __user *ubuf, size_t count, loff_t *ppos)
+{
+ int cnt;
+ int table_size;
+
+ if (active_clients_buf == NULL) {
+ IPAERR("Active Clients buffer is not allocated");
+ return 0;
+ }
+ memset(active_clients_buf, 0, IPA_DBG_ACTIVE_CLIENT_BUF_SIZE);
+ ipa3_active_clients_lock();
+ cnt = ipa3_active_clients_log_print_buffer(active_clients_buf,
+ IPA_DBG_ACTIVE_CLIENT_BUF_SIZE - IPA_MAX_MSG_LEN);
+ table_size = ipa3_active_clients_log_print_table(active_clients_buf
+ + cnt, IPA_MAX_MSG_LEN);
+ ipa3_active_clients_unlock();
+
+ return simple_read_from_buffer(ubuf, count, ppos,
+ active_clients_buf, cnt + table_size);
+}
+
+static ssize_t ipa3_clear_active_clients_log(struct file *file,
+ const char __user *ubuf, size_t count, loff_t *ppos)
+{
+ unsigned long missing;
+ s8 option = 0;
+
+ if (sizeof(dbg_buff) < count + 1)
+ return -EFAULT;
+
+ missing = copy_from_user(dbg_buff, ubuf, count);
+ if (missing)
+ return -EFAULT;
+
+ dbg_buff[count] = '\0';
+ if (kstrtos8(dbg_buff, 0, &option))
+ return -EFAULT;
+
+ ipa3_active_clients_log_clear();
+
+ return count;
+}
const struct file_operations ipa3_gen_reg_ops = {
.read = ipa3_read_gen_reg,
@@ -1665,6 +1677,11 @@ const struct file_operations ipa3_rm_stats = {
.read = ipa3_rm_read_stats,
};
+const struct file_operations ipa3_active_clients = {
+ .read = ipa3_print_active_clients_log,
+ .write = ipa3_clear_active_clients_log,
+};
+
void ipa3_debugfs_init(void)
{
const mode_t read_only_mode = S_IRUSR | S_IRGRP | S_IROTH;
@@ -1694,6 +1711,19 @@ void ipa3_debugfs_init(void)
goto fail;
}
+ dfile_active_clients = debugfs_create_file("active_clients",
+ read_write_mode, dent, 0, &ipa3_active_clients);
+ if (!dfile_active_clients || IS_ERR(dfile_active_clients)) {
+ IPAERR("fail to create file for debug_fs active_clients\n");
+ goto fail;
+ }
+
+ active_clients_buf = NULL;
+ active_clients_buf = kzalloc(IPA_DBG_ACTIVE_CLIENT_BUF_SIZE,
+ GFP_KERNEL);
+ if (active_clients_buf == NULL)
+ IPAERR("fail to allocate active clients memory buffer");
+
dfile_ep_reg = debugfs_create_file("ep_reg", read_write_mode, dent, 0,
&ipa3_ep_reg_ops);
if (!dfile_ep_reg || IS_ERR(dfile_ep_reg)) {
@@ -1864,6 +1894,13 @@ void ipa3_debugfs_init(void)
goto fail;
}
+ file = debugfs_create_u32("enable_low_prio_print", read_write_mode,
+ dent, &ipa3_ctx->enable_low_prio_print);
+ if (!file) {
+ IPAERR("could not create enable_low_prio_print file\n");
+ goto fail;
+ }
+
return;
fail:
@@ -1876,6 +1913,10 @@ void ipa3_debugfs_remove(void)
IPAERR("ipa3_debugfs_remove: folder was not created.\n");
return;
}
+ if (active_clients_buf != NULL) {
+ kfree(active_clients_buf);
+ active_clients_buf = NULL;
+ }
debugfs_remove_recursive(dent);
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dma.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dma.c
index 42ef80218c21..966f279d863b 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dma.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dma.c
@@ -259,7 +259,7 @@ int ipa3_dma_enable(void)
mutex_unlock(&ipa3_dma_ctx->enable_lock);
return -EPERM;
}
- ipa3_inc_client_enable_clks();
+ IPA_ACTIVE_CLIENTS_INC_SPECIAL("DMA");
ipa3_dma_ctx->is_enabled = true;
mutex_unlock(&ipa3_dma_ctx->enable_lock);
@@ -322,7 +322,7 @@ int ipa3_dma_disable(void)
}
ipa3_dma_ctx->is_enabled = false;
spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags);
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_SPECIAL("DMA");
mutex_unlock(&ipa3_dma_ctx->enable_lock);
IPADMA_FUNC_EXIT();
return 0;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index 77907eea5501..66d22817b0dc 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -17,6 +17,8 @@
#include <linux/netdevice.h>
#include <linux/msm_gsi.h>
#include "ipa_i.h"
+#include "ipa_trace.h"
+#include "ipahal/ipahal.h"
#define IPA_LAST_DESC_CNT 0xFFFF
#define POLLING_INACTIVITY_RX 40
@@ -37,6 +39,8 @@
#define IPA_GENERIC_RX_BUFF_SZ (IPA_GENERIC_RX_BUFF_BASE_SZ -\
(IPA_REAL_GENERIC_RX_BUFF_SZ - IPA_GENERIC_RX_BUFF_BASE_SZ))
+#define IPA_RX_BUFF_CLIENT_HEADROOM 256
+
#define IPA_WLAN_RX_POOL_SZ 100
#define IPA_WLAN_RX_POOL_SZ_LOW_WM 5
#define IPA_WLAN_RX_BUFF_SZ 2048
@@ -56,6 +60,7 @@ static struct sk_buff *ipa3_get_skb_ipa_rx(unsigned int len, gfp_t flags);
static void ipa3_replenish_wlan_rx_cache(struct ipa3_sys_context *sys);
static void ipa3_replenish_rx_cache(struct ipa3_sys_context *sys);
static void ipa3_replenish_rx_work_func(struct work_struct *work);
+static void ipa3_fast_replenish_rx_cache(struct ipa3_sys_context *sys);
static void ipa3_wq_handle_rx(struct work_struct *work);
static void ipa3_wq_handle_tx(struct work_struct *work);
static void ipa3_wq_rx_common(struct ipa3_sys_context *sys, u32 size);
@@ -73,7 +78,7 @@ static void ipa3_dma_memcpy_notify(struct ipa3_sys_context *sys,
static int ipa_gsi_setup_channel(struct ipa3_ep_context *ep);
static int ipa_populate_tag_field(struct ipa3_desc *desc,
struct ipa3_tx_pkt_wrapper *tx_pkt,
- struct ipa3_ip_packet_tag_status **tag_ret);
+ struct ipahal_imm_cmd_pyld **tag_pyld_ret);
static int ipa_handle_rx_core_gsi(struct ipa3_sys_context *sys,
bool process_all, bool in_poll_state);
static int ipa_handle_rx_core_sps(struct ipa3_sys_context *sys,
@@ -88,7 +93,7 @@ static void ipa3_wq_write_done_common(struct ipa3_sys_context *sys,
int i, cnt;
cnt = tx_pkt->cnt;
- IPADBG("cnt: %d\n", cnt);
+ IPADBG_LOW("cnt: %d\n", cnt);
for (i = 0; i < cnt; i++) {
spin_lock_bh(&sys->spinlock);
if (unlikely(list_empty(&sys->head_desc_list))) {
@@ -256,7 +261,7 @@ static void ipa3_handle_tx(struct ipa3_sys_context *sys)
int inactive_cycles = 0;
int cnt;
- ipa3_inc_client_enable_clks();
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
do {
cnt = ipa3_handle_tx_core(sys, true, true);
if (cnt == 0) {
@@ -269,7 +274,7 @@ static void ipa3_handle_tx(struct ipa3_sys_context *sys)
} while (inactive_cycles <= POLLING_INACTIVITY_TX);
ipa3_tx_switch_to_intr_mode(sys);
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
}
static void ipa3_wq_handle_tx(struct work_struct *work)
@@ -358,7 +363,7 @@ int ipa3_send_one(struct ipa3_sys_context *sys, struct ipa3_desc *desc,
if (desc->type == IPA_IMM_CMD_DESC) {
sps_flags |= SPS_IOVEC_FLAG_IMME;
len = desc->opcode;
- IPADBG("sending cmd=%d pyld_len=%d sps_flags=%x\n",
+ IPADBG_LOW("sending cmd=%d pyld_len=%d sps_flags=%x\n",
desc->opcode, desc->len, sps_flags);
IPA_DUMP_BUFF(desc->pyld, dma_address, desc->len);
} else {
@@ -427,7 +432,7 @@ int ipa3_send(struct ipa3_sys_context *sys,
bool in_atomic)
{
struct ipa3_tx_pkt_wrapper *tx_pkt, *tx_pkt_first;
- struct ipa3_ip_packet_tag_status *tag_ret = NULL;
+ struct ipahal_imm_cmd_pyld *tag_pyld_ret = NULL;
struct ipa3_tx_pkt_wrapper *next_pkt;
struct sps_transfer transfer = { 0 };
struct sps_iovec *iovec;
@@ -501,9 +506,11 @@ int ipa3_send(struct ipa3_sys_context *sys,
}
/* populate tag field */
- if (desc[i].opcode == IPA_IP_PACKET_TAG_STATUS) {
+ if (desc[i].opcode ==
+ ipahal_imm_cmd_get_opcode(
+ IPA_IMM_CMD_IP_PACKET_TAG_STATUS)) {
if (ipa_populate_tag_field(&desc[i], tx_pkt,
- &tag_ret)) {
+ &tag_pyld_ret)) {
IPAERR("Failed to populate tag field\n");
goto failure;
}
@@ -622,7 +629,7 @@ int ipa3_send(struct ipa3_sys_context *sys,
return 0;
failure:
- kfree(tag_ret);
+ ipahal_destroy_imm_cmd(tag_pyld_ret);
tx_pkt = tx_pkt_first;
for (j = 0; j < i; j++) {
next_pkt = list_next_entry(tx_pkt, link);
@@ -675,7 +682,7 @@ static void ipa3_transport_irq_cmd_ack(void *user1, int user2)
WARN_ON(1);
return;
}
- IPADBG("got ack for cmd=%d\n", desc->opcode);
+ IPADBG_LOW("got ack for cmd=%d\n", desc->opcode);
complete(&desc->xfer_done);
}
@@ -692,11 +699,12 @@ static void ipa3_transport_irq_cmd_ack(void *user1, int user2)
int ipa3_send_cmd(u16 num_desc, struct ipa3_desc *descr)
{
struct ipa3_desc *desc;
- int result = 0;
+ int i, result = 0;
struct ipa3_sys_context *sys;
int ep_idx;
- IPADBG("sending command\n");
+ for (i = 0; i < num_desc; i++)
+ IPADBG("sending imm cmd %d\n", descr[i].opcode);
ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_CMD_PROD);
if (-1 == ep_idx) {
@@ -705,8 +713,7 @@ int ipa3_send_cmd(u16 num_desc, struct ipa3_desc *descr)
return -EFAULT;
}
sys = ipa3_ctx->ep[ep_idx].sys;
-
- ipa3_inc_client_enable_clks();
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
if (num_desc == 1) {
init_completion(&descr->xfer_done);
@@ -740,7 +747,7 @@ int ipa3_send_cmd(u16 num_desc, struct ipa3_desc *descr)
}
bail:
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return result;
}
@@ -757,7 +764,7 @@ static void ipa3_sps_irq_tx_notify(struct sps_event_notify *notify)
struct ipa3_sys_context *sys = (struct ipa3_sys_context *)notify->user;
int ret;
- IPADBG("event %d notified\n", notify->event_id);
+ IPADBG_LOW("event %d notified\n", notify->event_id);
switch (notify->event_id) {
case SPS_EVENT_EOT:
@@ -801,7 +808,7 @@ static void ipa3_sps_irq_tx_no_aggr_notify(struct sps_event_notify *notify)
{
struct ipa3_tx_pkt_wrapper *tx_pkt;
- IPADBG("event %d notified\n", notify->event_id);
+ IPADBG_LOW("event %d notified\n", notify->event_id);
switch (notify->event_id) {
case SPS_EVENT_EOT:
@@ -911,7 +918,7 @@ static void ipa3_sps_irq_rx_notify(struct sps_event_notify *notify)
struct ipa3_sys_context *sys = (struct ipa3_sys_context *)notify->user;
int ret;
- IPADBG("event %d notified\n", notify->event_id);
+ IPADBG_LOW("event %d notified\n", notify->event_id);
switch (notify->event_id) {
case SPS_EVENT_EOT:
@@ -934,6 +941,7 @@ static void ipa3_sps_irq_rx_notify(struct sps_event_notify *notify)
}
ipa3_inc_acquire_wakelock();
atomic_set(&sys->curr_polling_state, 1);
+ trace_intr_to_poll3(sys->ep->client);
queue_work(sys->wq, &sys->work);
}
break;
@@ -970,20 +978,23 @@ static void ipa3_handle_rx(struct ipa3_sys_context *sys)
int inactive_cycles = 0;
int cnt;
- ipa3_inc_client_enable_clks();
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
do {
cnt = ipa3_handle_rx_core(sys, true, true);
if (cnt == 0) {
inactive_cycles++;
+ trace_idle_sleep_enter3(sys->ep->client);
usleep_range(POLLING_MIN_SLEEP_RX,
POLLING_MAX_SLEEP_RX);
+ trace_idle_sleep_exit3(sys->ep->client);
} else {
inactive_cycles = 0;
}
} while (inactive_cycles <= POLLING_INACTIVITY_RX);
+ trace_poll_to_intr3(sys->ep->client);
ipa3_rx_switch_to_intr_mode(sys);
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
}
static void ipa3_switch_to_intr_rx_work_func(struct work_struct *work)
@@ -1039,8 +1050,7 @@ int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
}
ep = &ipa3_ctx->ep[ipa_ep_idx];
-
- ipa3_inc_client_enable_clks();
+ IPA_ACTIVE_CLIENTS_INC_EP(sys_in->client);
if (ep->valid == 1) {
if (sys_in->client != IPA_CLIENT_APPS_LAN_WAN_PROD) {
@@ -1065,7 +1075,7 @@ int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
ep->priv = sys_in->priv;
*clnt_hdl = ipa_ep_idx;
if (!ep->keep_ipa_awake)
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
return 0;
}
@@ -1106,6 +1116,7 @@ int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
}
INIT_LIST_HEAD(&ep->sys->head_desc_list);
+ INIT_LIST_HEAD(&ep->sys->rcycl_list);
spin_lock_init(&ep->sys->spinlock);
} else {
memset(ep->sys, 0, offsetof(struct ipa3_sys_context, ep));
@@ -1242,17 +1253,7 @@ int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
*clnt_hdl = ipa_ep_idx;
- if (IPA_CLIENT_IS_CONS(sys_in->client))
- ipa3_replenish_rx_cache(ep->sys);
-
- if (IPA_CLIENT_IS_WLAN_CONS(sys_in->client)) {
- ipa3_alloc_wlan_rx_common_cache(IPA_WLAN_COMM_RX_POOL_LOW);
- atomic_inc(&ipa3_ctx->wc_memb.active_clnt_cnt);
- }
-
- if (nr_cpu_ids > 1 &&
- (sys_in->client == IPA_CLIENT_APPS_LAN_CONS ||
- sys_in->client == IPA_CLIENT_APPS_WAN_CONS)) {
+ if (ep->sys->repl_hdlr == ipa3_fast_replenish_rx_cache) {
ep->sys->repl.capacity = ep->sys->rx_pool_sz + 1;
ep->sys->repl.cache = kzalloc(ep->sys->repl.capacity *
sizeof(void *), GFP_KERNEL);
@@ -1267,6 +1268,14 @@ int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
}
}
+ if (IPA_CLIENT_IS_CONS(sys_in->client))
+ ipa3_replenish_rx_cache(ep->sys);
+
+ if (IPA_CLIENT_IS_WLAN_CONS(sys_in->client)) {
+ ipa3_alloc_wlan_rx_common_cache(IPA_WLAN_COMM_RX_POOL_LOW);
+ atomic_inc(&ipa3_ctx->wc_memb.active_clnt_cnt);
+ }
+
ipa3_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg;
if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(sys_in->client)) {
if (ipa3_ctx->modem_cfg_emb_pipe_flt &&
@@ -1277,7 +1286,7 @@ int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
}
if (!ep->keep_ipa_awake)
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
IPADBG("client %d (ep: %d) connected sys=%p\n", sys_in->client,
ipa_ep_idx, ep->sys);
@@ -1300,7 +1309,7 @@ fail_wq:
kfree(ep->sys);
memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context));
fail_and_disable_clocks:
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
fail_gen:
return result;
}
@@ -1326,7 +1335,7 @@ int ipa3_teardown_sys_pipe(u32 clnt_hdl)
ep = &ipa3_ctx->ep[clnt_hdl];
if (!ep->keep_ipa_awake)
- ipa3_inc_client_enable_clks();
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
ipa3_disable_data_path(clnt_hdl);
@@ -1342,6 +1351,8 @@ int ipa3_teardown_sys_pipe(u32 clnt_hdl)
} while (1);
}
+ if (IPA_CLIENT_IS_CONS(ep->client))
+ cancel_delayed_work_sync(&ep->sys->replenish_rx_work);
flush_workqueue(ep->sys->wq);
if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
result = ipa3_stop_gsi_channel(clnt_hdl);
@@ -1417,7 +1428,7 @@ int ipa3_teardown_sys_pipe(u32 clnt_hdl)
ipa3_cleanup_wlan_rx_common_cache();
ep->valid = 0;
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
@@ -1439,7 +1450,7 @@ static void ipa3_tx_comp_usr_notify_release(void *user1, int user2)
struct sk_buff *skb = (struct sk_buff *)user1;
int ep_idx = user2;
- IPADBG("skb=%p ep=%d\n", skb, ep_idx);
+ IPADBG_LOW("skb=%p ep=%d\n", skb, ep_idx);
IPA_STATS_INC_CNT(ipa3_ctx->stats.tx_pkts_compl);
@@ -1452,7 +1463,7 @@ static void ipa3_tx_comp_usr_notify_release(void *user1, int user2)
static void ipa3_tx_cmd_comp(void *user1, int user2)
{
- kfree(user1);
+ ipahal_destroy_imm_cmd(user1);
}
/**
@@ -1487,7 +1498,8 @@ int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
{
struct ipa3_desc desc[3];
int dst_ep_idx;
- struct ipa3_ip_packet_init *cmd;
+ struct ipahal_imm_cmd_ip_packet_init cmd;
+ struct ipahal_imm_cmd_pyld *cmd_pyld = NULL;
struct ipa3_sys_context *sys;
int src_ep_idx;
@@ -1536,24 +1548,26 @@ int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
if (dst_ep_idx != -1) {
/* SW data path */
- cmd = kzalloc(sizeof(struct ipa3_ip_packet_init), GFP_ATOMIC);
- if (!cmd) {
- IPAERR("failed to alloc immediate command object\n");
+ cmd.destination_pipe_index = dst_ep_idx;
+ cmd_pyld = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_IP_PACKET_INIT, &cmd, true);
+ if (unlikely(!cmd_pyld)) {
+ IPAERR("failed to construct ip_packet_init imm cmd\n");
goto fail_gen;
}
- cmd->destination_pipe_index = dst_ep_idx;
-
/* the tag field will be populated in ipa3_send() function */
- desc[0].opcode = IPA_IP_PACKET_TAG_STATUS;
+ desc[0].opcode = ipahal_imm_cmd_get_opcode(
+ IPA_IMM_CMD_IP_PACKET_TAG_STATUS);
desc[0].type = IPA_IMM_CMD_DESC;
- desc[0].callback = ipa3_tag_free_buf;
- desc[1].opcode = IPA_IP_PACKET_INIT;
- desc[1].pyld = cmd;
- desc[1].len = sizeof(struct ipa3_ip_packet_init);
+ desc[0].callback = ipa3_tag_destroy_imm;
+ desc[1].opcode =
+ ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_PACKET_INIT);
+ desc[1].pyld = cmd_pyld->data;
+ desc[1].len = cmd_pyld->len;
desc[1].type = IPA_IMM_CMD_DESC;
desc[1].callback = ipa3_tx_cmd_comp;
- desc[1].user1 = cmd;
+ desc[1].user1 = cmd_pyld;
desc[2].pyld = skb->data;
desc[2].len = skb->len;
desc[2].type = IPA_DATA_DESC_SKB;
@@ -1575,9 +1589,11 @@ int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
IPA_STATS_INC_CNT(ipa3_ctx->stats.tx_sw_pkts);
} else {
/* HW data path */
- desc[0].opcode = IPA_IP_PACKET_TAG_STATUS;
+ desc[0].opcode =
+ ipahal_imm_cmd_get_opcode(
+ IPA_IMM_CMD_IP_PACKET_TAG_STATUS);
desc[0].type = IPA_IMM_CMD_DESC;
- desc[0].callback = ipa3_tag_free_buf;
+ desc[0].callback = ipa3_tag_destroy_imm;
desc[1].pyld = skb->data;
desc[1].len = skb->len;
desc[1].type = IPA_DATA_DESC_SKB;
@@ -1600,7 +1616,7 @@ int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
return 0;
fail_send:
- kfree(cmd);
+ ipahal_destroy_imm_cmd(cmd_pyld);
fail_gen:
return -EFAULT;
}
@@ -1697,7 +1713,7 @@ static void ipa3_replenish_wlan_rx_cache(struct ipa3_sys_context *sys)
struct gsi_xfer_elem gsi_xfer_elem_one;
u32 rx_len_cached = 0;
- IPADBG("\n");
+ IPADBG_LOW("\n");
spin_lock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
rx_len_cached = sys->len;
@@ -1948,6 +1964,113 @@ fail_kmem_cache_alloc:
msecs_to_jiffies(1));
}
+static void ipa3_replenish_rx_cache_recycle(struct ipa3_sys_context *sys)
+{
+ void *ptr;
+ struct ipa3_rx_pkt_wrapper *rx_pkt;
+ int ret;
+ int rx_len_cached = 0;
+ struct gsi_xfer_elem gsi_xfer_elem_one;
+ gfp_t flag = GFP_NOWAIT | __GFP_NOWARN;
+
+ rx_len_cached = sys->len;
+
+ while (rx_len_cached < sys->rx_pool_sz) {
+ if (list_empty(&sys->rcycl_list)) {
+ rx_pkt = kmem_cache_zalloc(
+ ipa3_ctx->rx_pkt_wrapper_cache, flag);
+ if (!rx_pkt) {
+ IPAERR("failed to alloc rx wrapper\n");
+ goto fail_kmem_cache_alloc;
+ }
+
+ INIT_LIST_HEAD(&rx_pkt->link);
+ INIT_WORK(&rx_pkt->work, ipa3_wq_rx_avail);
+ rx_pkt->sys = sys;
+
+ rx_pkt->data.skb = sys->get_skb(sys->rx_buff_sz, flag);
+ if (rx_pkt->data.skb == NULL) {
+ IPAERR("failed to alloc skb\n");
+ kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache,
+ rx_pkt);
+ goto fail_kmem_cache_alloc;
+ }
+ ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
+ rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev,
+ ptr, sys->rx_buff_sz, DMA_FROM_DEVICE);
+ if (rx_pkt->data.dma_addr == 0 ||
+ rx_pkt->data.dma_addr == ~0) {
+ IPAERR("dma_map_single failure %p for %p\n",
+ (void *)rx_pkt->data.dma_addr, ptr);
+ goto fail_dma_mapping;
+ }
+ } else {
+ spin_lock_bh(&sys->spinlock);
+ rx_pkt = list_first_entry(&sys->rcycl_list,
+ struct ipa3_rx_pkt_wrapper, link);
+ list_del(&rx_pkt->link);
+ spin_unlock_bh(&sys->spinlock);
+ INIT_LIST_HEAD(&rx_pkt->link);
+ ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
+ rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev,
+ ptr, sys->rx_buff_sz, DMA_FROM_DEVICE);
+ if (rx_pkt->data.dma_addr == 0 ||
+ rx_pkt->data.dma_addr == ~0) {
+ IPAERR("dma_map_single failure %p for %p\n",
+ (void *)rx_pkt->data.dma_addr, ptr);
+ goto fail_dma_mapping;
+ }
+ }
+
+ list_add_tail(&rx_pkt->link, &sys->head_desc_list);
+ rx_len_cached = ++sys->len;
+ if (ipa3_ctx->transport_prototype ==
+ IPA_TRANSPORT_TYPE_GSI) {
+ memset(&gsi_xfer_elem_one, 0,
+ sizeof(gsi_xfer_elem_one));
+ gsi_xfer_elem_one.addr = rx_pkt->data.dma_addr;
+ gsi_xfer_elem_one.len = sys->rx_buff_sz;
+ gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOT;
+ gsi_xfer_elem_one.type = GSI_XFER_ELEM_DATA;
+ gsi_xfer_elem_one.xfer_user_data = rx_pkt;
+
+ ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl,
+ 1, &gsi_xfer_elem_one, true);
+ if (ret != GSI_STATUS_SUCCESS) {
+ IPAERR("failed to provide buffer: %d\n",
+ ret);
+ goto fail_provide_rx_buffer;
+ }
+ } else {
+ ret = sps_transfer_one(sys->ep->ep_hdl,
+ rx_pkt->data.dma_addr, sys->rx_buff_sz,
+ rx_pkt, 0);
+
+ if (ret) {
+ IPAERR("sps_transfer_one failed %d\n", ret);
+ goto fail_provide_rx_buffer;
+ }
+ }
+ }
+
+ return;
+fail_provide_rx_buffer:
+ rx_len_cached = --sys->len;
+ list_del(&rx_pkt->link);
+ INIT_LIST_HEAD(&rx_pkt->link);
+ dma_unmap_single(ipa3_ctx->pdev, rx_pkt->data.dma_addr,
+ sys->rx_buff_sz, DMA_FROM_DEVICE);
+fail_dma_mapping:
+ spin_lock_bh(&sys->spinlock);
+ list_add_tail(&rx_pkt->link, &sys->rcycl_list);
+ INIT_LIST_HEAD(&rx_pkt->link);
+ spin_unlock_bh(&sys->spinlock);
+fail_kmem_cache_alloc:
+ if (rx_len_cached == 0)
+ queue_delayed_work(sys->wq, &sys->replenish_rx_work,
+ msecs_to_jiffies(1));
+}
+
static void ipa3_fast_replenish_rx_cache(struct ipa3_sys_context *sys)
{
struct ipa3_rx_pkt_wrapper *rx_pkt;
@@ -2022,9 +2145,9 @@ static void ipa3_replenish_rx_work_func(struct work_struct *work)
dwork = container_of(work, struct delayed_work, work);
sys = container_of(dwork, struct ipa3_sys_context, replenish_rx_work);
- ipa3_inc_client_enable_clks();
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
sys->repl_hdlr(sys);
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
}
/**
@@ -2047,6 +2170,15 @@ static void ipa3_cleanup_rx(struct ipa3_sys_context *sys)
kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
}
+ list_for_each_entry_safe(rx_pkt, r,
+ &sys->rcycl_list, link) {
+ list_del(&rx_pkt->link);
+ dma_unmap_single(ipa3_ctx->pdev, rx_pkt->data.dma_addr,
+ sys->rx_buff_sz, DMA_FROM_DEVICE);
+ sys->free_skb(rx_pkt->data.skb);
+ kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
+ }
+
if (sys->repl.cache) {
head = atomic_read(&sys->repl.head_idx);
tail = atomic_read(&sys->repl.tail_idx);
@@ -2062,6 +2194,21 @@ static void ipa3_cleanup_rx(struct ipa3_sys_context *sys)
}
}
+static struct sk_buff *ipa3_skb_copy_for_client(struct sk_buff *skb, int len)
+{
+ struct sk_buff *skb2 = NULL;
+
+ skb2 = __dev_alloc_skb(len + IPA_RX_BUFF_CLIENT_HEADROOM, GFP_KERNEL);
+ if (likely(skb2)) {
+ /* Set the data pointer */
+ skb_reserve(skb2, IPA_RX_BUFF_CLIENT_HEADROOM);
+ memcpy(skb2->data, skb->data, len);
+ skb2->len = len;
+ skb_set_tail_pointer(skb2, len);
+ }
+
+ return skb2;
+}
static int ipa3_lan_rx_pyld_hdlr(struct sk_buff *skb,
struct ipa3_sys_context *sys)
@@ -2089,7 +2236,7 @@ static int ipa3_lan_rx_pyld_hdlr(struct sk_buff *skb,
}
if (sys->len_partial) {
- IPADBG("len_partial %d\n", sys->len_partial);
+ IPADBG_LOW("len_partial %d\n", sys->len_partial);
buf = skb_push(skb, sys->len_partial);
memcpy(buf, sys->prev_skb->data, sys->len_partial);
sys->len_partial = 0;
@@ -2100,7 +2247,7 @@ static int ipa3_lan_rx_pyld_hdlr(struct sk_buff *skb,
/* this pipe has TX comp (status only) + mux-ed LAN RX data
* (status+data) */
if (sys->len_rem) {
- IPADBG("rem %d skb %d pad %d\n", sys->len_rem, skb->len,
+ IPADBG_LOW("rem %d skb %d pad %d\n", sys->len_rem, skb->len,
sys->len_pad);
if (sys->len_rem <= skb->len) {
if (sys->prev_skb) {
@@ -2147,18 +2294,18 @@ static int ipa3_lan_rx_pyld_hdlr(struct sk_buff *skb,
begin:
while (skb->len) {
drop_packet = false;
- IPADBG("LEN_REM %d\n", skb->len);
+ IPADBG_LOW("LEN_REM %d\n", skb->len);
if (skb->len < IPA_PKT_STATUS_SIZE) {
WARN_ON(sys->prev_skb != NULL);
- IPADBG("status straddles buffer\n");
+ IPADBG_LOW("status straddles buffer\n");
sys->prev_skb = skb;
sys->len_partial = skb->len;
return rc;
}
status = (struct ipa3_hw_pkt_status *)skb->data;
- IPADBG("STATUS opcode=%d src=%d dst=%d len=%d\n",
+ IPADBG_LOW("STATUS opcode=%d src=%d dst=%d len=%d\n",
status->status_opcode, status->endp_src_idx,
status->endp_dest_idx, status->pkt_len);
if (sys->status_stat) {
@@ -2191,7 +2338,7 @@ begin:
if (status->status_mask & IPA_HW_PKT_STATUS_MASK_TAG_VALID) {
struct ipa3_tag_completion *comp;
- IPADBG("TAG packet arrived\n");
+ IPADBG_LOW("TAG packet arrived\n");
if (status->tag == IPA_COOKIE) {
skb_pull(skb, IPA_PKT_STATUS_SIZE);
if (skb->len < sizeof(comp)) {
@@ -2208,11 +2355,11 @@ begin:
} else {
ptr = tag_to_pointer_wa(status->tag);
tx_pkt = (struct ipa3_tx_pkt_wrapper *)ptr;
- IPADBG("tx_pkt recv = %p\n", tx_pkt);
+ IPADBG_LOW("tx_pkt recv = %p\n", tx_pkt);
}
}
if (status->pkt_len == 0) {
- IPADBG("Skip aggr close status\n");
+ IPADBG_LOW("Skip aggr close status\n");
skb_pull(skb, IPA_PKT_STATUS_SIZE);
IPA_STATS_INC_CNT(ipa3_ctx->stats.aggr_close);
IPA_STATS_DEC_CNT(
@@ -2235,7 +2382,7 @@ begin:
if (skb->len == IPA_PKT_STATUS_SIZE &&
!status->exception) {
WARN_ON(sys->prev_skb != NULL);
- IPADBG("Ins header in next buffer\n");
+ IPADBG_LOW("Ins header in next buffer\n");
sys->prev_skb = skb;
sys->len_partial = skb->len;
return rc;
@@ -2246,20 +2393,22 @@ begin:
len = status->pkt_len + pad_len_byte +
IPA_SIZE_DL_CSUM_META_TRAILER;
- IPADBG("pad %d pkt_len %d len %d\n", pad_len_byte,
+ IPADBG_LOW("pad %d pkt_len %d len %d\n", pad_len_byte,
status->pkt_len, len);
if (status->exception ==
IPA_HW_PKT_STATUS_EXCEPTION_DEAGGR) {
- IPADBG("Dropping packet on DeAggr Exception\n");
+ IPADBG_LOW(
+ "Dropping packet on DeAggr Exception\n");
skb_pull(skb, len + IPA_PKT_STATUS_SIZE);
continue;
}
- skb2 = skb_clone(skb, GFP_KERNEL);
+ skb2 = ipa3_skb_copy_for_client(skb,
+ status->pkt_len + IPA_PKT_STATUS_SIZE);
if (likely(skb2)) {
if (skb->len < len + IPA_PKT_STATUS_SIZE) {
- IPADBG("SPL skb len %d len %d\n",
+ IPADBG_LOW("SPL skb len %d len %d\n",
skb->len, len);
sys->prev_skb = skb2;
sys->len_rem = len - skb->len +
@@ -2269,7 +2418,7 @@ begin:
} else {
skb_trim(skb2, status->pkt_len +
IPA_PKT_STATUS_SIZE);
- IPADBG("rx avail for %d\n",
+ IPADBG_LOW("rx avail for %d\n",
status->endp_dest_idx);
if (drop_packet)
dev_kfree_skb_any(skb2);
@@ -2288,7 +2437,7 @@ begin:
IPA_PKT_STATUS_SIZE);
}
} else {
- IPAERR("fail to clone\n");
+ IPAERR("fail to alloc skb\n");
if (skb->len < len) {
sys->prev_skb = NULL;
sys->len_rem = len - skb->len +
@@ -2302,11 +2451,12 @@ begin:
}
/* TX comp */
ipa3_wq_write_done_status(src_pipe, tx_pkt);
- IPADBG("tx comp imp for %d\n", src_pipe);
+ IPADBG_LOW("tx comp imp for %d\n", src_pipe);
} else {
/* TX comp */
ipa3_wq_write_done_status(status->endp_src_idx, tx_pkt);
- IPADBG("tx comp exp for %d\n", status->endp_src_idx);
+ IPADBG_LOW("tx comp exp for %d\n",
+ status->endp_src_idx);
skb_pull(skb, IPA_PKT_STATUS_SIZE);
IPA_STATS_INC_CNT(ipa3_ctx->stats.stat_compl);
IPA_STATS_DEC_CNT(
@@ -2314,7 +2464,6 @@ begin:
}
};
- sys->free_skb(skb);
return rc;
}
@@ -2342,13 +2491,13 @@ static void ipa3_wan_rx_handle_splt_pyld(struct sk_buff *skb,
{
struct sk_buff *skb2;
- IPADBG("rem %d skb %d\n", sys->len_rem, skb->len);
+ IPADBG_LOW("rem %d skb %d\n", sys->len_rem, skb->len);
if (sys->len_rem <= skb->len) {
if (sys->prev_skb) {
skb2 = ipa3_join_prev_skb(sys->prev_skb, skb,
sys->len_rem);
if (likely(skb2)) {
- IPADBG(
+ IPADBG_LOW(
"removing Status element from skb and sending to WAN client");
skb_pull(skb2, IPA_PKT_STATUS_SIZE);
skb2->truesize = skb2->len +
@@ -2400,14 +2549,14 @@ static int ipa3_wan_rx_pyld_hdlr(struct sk_buff *skb,
ipa3_wan_rx_handle_splt_pyld(skb, sys);
while (skb->len) {
- IPADBG("LEN_REM %d\n", skb->len);
+ IPADBG_LOW("LEN_REM %d\n", skb->len);
if (skb->len < IPA_PKT_STATUS_SIZE) {
IPAERR("status straddles buffer\n");
WARN_ON(1);
goto bail;
}
status = (struct ipa3_hw_pkt_status *)skb->data;
- IPADBG("STATUS opcode=%d src=%d dst=%d len=%d\n",
+ IPADBG_LOW("STATUS opcode=%d src=%d dst=%d len=%d\n",
status->status_opcode, status->endp_src_idx,
status->endp_dest_idx, status->pkt_len);
@@ -2436,7 +2585,7 @@ static int ipa3_wan_rx_pyld_hdlr(struct sk_buff *skb,
goto bail;
}
if (status->pkt_len == 0) {
- IPADBG("Skip aggr close status\n");
+ IPADBG_LOW("Skip aggr close status\n");
skb_pull(skb, IPA_PKT_STATUS_SIZE);
IPA_STATS_DEC_CNT(ipa3_ctx->stats.rx_pkts);
IPA_STATS_INC_CNT(ipa3_ctx->stats.wan_aggr_close);
@@ -2463,11 +2612,11 @@ static int ipa3_wan_rx_pyld_hdlr(struct sk_buff *skb,
/*QMAP is BE: convert the pkt_len field from BE to LE*/
pkt_len_with_pad = ntohs((qmap_hdr>>16) & 0xffff);
- IPADBG("pkt_len with pad %d\n", pkt_len_with_pad);
+ IPADBG_LOW("pkt_len with pad %d\n", pkt_len_with_pad);
/*get the CHECKSUM_PROCESS bit*/
checksum_trailer_exists = status->status_mask &
IPA_HW_PKT_STATUS_MASK_CKSUM_PROCESS;
- IPADBG("checksum_trailer_exists %d\n",
+ IPADBG_LOW("checksum_trailer_exists %d\n",
checksum_trailer_exists);
frame_len = IPA_PKT_STATUS_SIZE +
@@ -2475,7 +2624,7 @@ static int ipa3_wan_rx_pyld_hdlr(struct sk_buff *skb,
pkt_len_with_pad;
if (checksum_trailer_exists)
frame_len += IPA_DL_CHECKSUM_LENGTH;
- IPADBG("frame_len %d\n", frame_len);
+ IPADBG_LOW("frame_len %d\n", frame_len);
skb2 = skb_clone(skb, GFP_KERNEL);
if (likely(skb2)) {
@@ -2484,16 +2633,16 @@ static int ipa3_wan_rx_pyld_hdlr(struct sk_buff *skb,
* payload split across 2 buff
*/
if (skb->len < frame_len) {
- IPADBG("SPL skb len %d len %d\n",
+ IPADBG_LOW("SPL skb len %d len %d\n",
skb->len, frame_len);
sys->prev_skb = skb2;
sys->len_rem = frame_len - skb->len;
skb_pull(skb, skb->len);
} else {
skb_trim(skb2, frame_len);
- IPADBG("rx avail for %d\n",
+ IPADBG_LOW("rx avail for %d\n",
status->endp_dest_idx);
- IPADBG(
+ IPADBG_LOW(
"removing Status element from skb and sending to WAN client");
skb_pull(skb2, IPA_PKT_STATUS_SIZE);
skb2->truesize = skb2->len +
@@ -2563,12 +2712,22 @@ void ipa3_lan_rx_cb(void *priv, enum ipa_dp_evt_type evt, unsigned long data)
------------------------------------------
*/
*(u16 *)rx_skb->cb = ((metadata >> 16) & 0xFFFF);
- IPADBG("meta_data: 0x%x cb: 0x%x\n",
+ IPADBG_LOW("meta_data: 0x%x cb: 0x%x\n",
metadata, *(u32 *)rx_skb->cb);
ep->client_notify(ep->priv, IPA_RECEIVE, (unsigned long)(rx_skb));
}
+static void ipa3_recycle_rx_wrapper(struct ipa3_rx_pkt_wrapper *rx_pkt)
+{
+ rx_pkt->data.dma_addr = 0;
+ ipa3_skb_recycle(rx_pkt->data.skb);
+ INIT_LIST_HEAD(&rx_pkt->link);
+ spin_lock_bh(&rx_pkt->sys->spinlock);
+ list_add_tail(&rx_pkt->link, &rx_pkt->sys->rcycl_list);
+ spin_unlock_bh(&rx_pkt->sys->spinlock);
+}
+
static void ipa3_wq_rx_common(struct ipa3_sys_context *sys, u32 size)
{
struct ipa3_rx_pkt_wrapper *rx_pkt_expected;
@@ -2593,9 +2752,8 @@ static void ipa3_wq_rx_common(struct ipa3_sys_context *sys, u32 size)
*(unsigned int *)rx_skb->cb = rx_skb->len;
rx_skb->truesize = rx_pkt_expected->len + sizeof(struct sk_buff);
sys->pyld_hdlr(rx_skb, sys);
+ sys->free_rx_wrapper(rx_pkt_expected);
sys->repl_hdlr(sys);
- kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt_expected);
-
}
static void ipa3_wlan_wq_rx_common(struct ipa3_sys_context *sys, u32 size)
@@ -2635,7 +2793,7 @@ static void ipa3_wlan_wq_rx_common(struct ipa3_sys_context *sys, u32 size)
static void ipa3_dma_memcpy_notify(struct ipa3_sys_context *sys,
struct ipa3_mem_buffer *mem_info)
{
- IPADBG("ENTER.\n");
+ IPADBG_LOW("ENTER.\n");
if (unlikely(list_empty(&sys->head_desc_list))) {
IPAERR("descriptor list is empty!\n");
WARN_ON(1);
@@ -2643,7 +2801,7 @@ static void ipa3_dma_memcpy_notify(struct ipa3_sys_context *sys,
}
sys->ep->client_notify(sys->ep->priv, IPA_RECEIVE,
(unsigned long)(mem_info));
- IPADBG("EXIT\n");
+ IPADBG_LOW("EXIT\n");
}
static void ipa3_wq_rx_avail(struct work_struct *work)
@@ -2676,7 +2834,8 @@ void ipa3_sps_irq_rx_no_aggr_notify(struct sps_event_notify *notify)
if (IPA_CLIENT_IS_APPS_CONS(rx_pkt->sys->ep->client))
atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1);
rx_pkt->len = notify->data.transfer.iovec.size;
- IPADBG("event %d notified sys=%p len=%u\n", notify->event_id,
+ IPADBG_LOW("event %d notified sys=%p len=%u\n",
+ notify->event_id,
notify->user, rx_pkt->len);
queue_work(rx_pkt->sys->wq, &rx_pkt->work);
break;
@@ -2700,6 +2859,11 @@ static int ipa3_odu_rx_pyld_hdlr(struct sk_buff *rx_skb,
return 0;
}
+static void ipa3_free_rx_wrapper(struct ipa3_rx_pkt_wrapper *rk_pkt)
+{
+ kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rk_pkt);
+}
+
static int ipa3_assign_policy(struct ipa_sys_connect_params *in,
struct ipa3_sys_context *sys)
{
@@ -2763,19 +2927,25 @@ static int ipa3_assign_policy(struct ipa_sys_connect_params *in,
IPA_GENERIC_AGGR_PKT_LIMIT;
if (in->client == IPA_CLIENT_APPS_LAN_CONS) {
sys->pyld_hdlr = ipa3_lan_rx_pyld_hdlr;
+ sys->repl_hdlr =
+ ipa3_replenish_rx_cache_recycle;
+ sys->free_rx_wrapper =
+ ipa3_recycle_rx_wrapper;
sys->rx_pool_sz =
IPA_GENERIC_RX_POOL_SZ;
} else if (in->client ==
IPA_CLIENT_APPS_WAN_CONS) {
sys->pyld_hdlr = ipa3_wan_rx_pyld_hdlr;
+ sys->free_rx_wrapper = ipa3_free_rx_wrapper;
+ if (nr_cpu_ids > 1)
+ sys->repl_hdlr =
+ ipa3_fast_replenish_rx_cache;
+ else
+ sys->repl_hdlr =
+ ipa3_replenish_rx_cache;
sys->rx_pool_sz =
ipa3_ctx->wan_rx_ring_size;
}
- if (nr_cpu_ids > 1)
- sys->repl_hdlr =
- ipa3_fast_replenish_rx_cache;
- else
- sys->repl_hdlr = ipa3_replenish_rx_cache;
} else if (IPA_CLIENT_IS_WLAN_CONS(in->client)) {
IPADBG("assigning policy to client:%d",
in->client);
@@ -2796,8 +2966,10 @@ static int ipa3_assign_policy(struct ipa_sys_connect_params *in,
if (sys->rx_pool_sz > IPA_WLAN_RX_POOL_SZ)
sys->rx_pool_sz = IPA_WLAN_RX_POOL_SZ;
sys->pyld_hdlr = NULL;
+ sys->repl_hdlr = ipa3_replenish_wlan_rx_cache;
sys->get_skb = ipa3_get_skb_ipa_rx;
sys->free_skb = ipa3_free_skb_rx;
+ sys->free_rx_wrapper = ipa3_free_rx_wrapper;
in->ipa_ep_cfg.aggr.aggr_en = IPA_BYPASS_AGGR;
} else if (IPA_CLIENT_IS_ODU_CONS(in->client)) {
IPADBG("assigning policy to client:%d",
@@ -2821,6 +2993,7 @@ static int ipa3_assign_policy(struct ipa_sys_connect_params *in,
sys->pyld_hdlr = ipa3_odu_rx_pyld_hdlr;
sys->get_skb = ipa3_get_skb_ipa_rx;
sys->free_skb = ipa3_free_skb_rx;
+ sys->free_rx_wrapper = ipa3_free_rx_wrapper;
sys->repl_hdlr = ipa3_replenish_rx_cache;
} else if (in->client ==
IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS) {
@@ -2869,15 +3042,15 @@ static void ipa3_tx_client_rx_notify_release(void *user1, int user2)
struct ipa_tx_data_desc *dd = (struct ipa_tx_data_desc *)user1;
int ep_idx = user2;
- IPADBG("Received data desc anchor:%p\n", dd);
+ IPADBG_LOW("Received data desc anchor:%p\n", dd);
atomic_inc(&ipa3_ctx->ep[ep_idx].avail_fifo_desc);
ipa3_ctx->ep[ep_idx].wstats.rx_pkts_status_rcvd++;
/* wlan host driver waits till tx complete before unload */
- IPADBG("ep=%d fifo_desc_free_count=%d\n",
+ IPADBG_LOW("ep=%d fifo_desc_free_count=%d\n",
ep_idx, atomic_read(&ipa3_ctx->ep[ep_idx].avail_fifo_desc));
- IPADBG("calling client notify callback with priv:%p\n",
+ IPADBG_LOW("calling client notify callback with priv:%p\n",
ipa3_ctx->ep[ep_idx].priv);
if (ipa3_ctx->ep[ep_idx].client_notify) {
@@ -2936,7 +3109,7 @@ int ipa3_tx_dp_mul(enum ipa_client_type src,
u32 num_desc, cnt;
int ep_idx;
- IPADBG("Received data desc anchor:%p\n", data_desc);
+ IPADBG_LOW("Received data desc anchor:%p\n", data_desc);
spin_lock_bh(&ipa3_ctx->wc_memb.ipa_tx_mul_spinlock);
@@ -2945,7 +3118,7 @@ int ipa3_tx_dp_mul(enum ipa_client_type src,
IPAERR("dest EP does not exist.\n");
goto fail_send;
}
- IPADBG("ep idx:%d\n", ep_idx);
+ IPADBG_LOW("ep idx:%d\n", ep_idx);
sys = ipa3_ctx->ep[ep_idx].sys;
if (unlikely(ipa3_ctx->ep[ep_idx].valid == 0)) {
@@ -2959,7 +3132,7 @@ int ipa3_tx_dp_mul(enum ipa_client_type src,
list_for_each_entry(entry, &data_desc->link, link) {
num_desc++;
}
- IPADBG("Number of Data Descriptors:%d", num_desc);
+ IPADBG_LOW("Number of Data Descriptors:%d", num_desc);
if (atomic_read(&sys->ep->avail_fifo_desc) < num_desc) {
IPAERR("Insufficient data descriptors available\n");
@@ -2971,32 +3144,34 @@ int ipa3_tx_dp_mul(enum ipa_client_type src,
list_for_each_entry(entry, &data_desc->link, link) {
memset(desc, 0, 2 * sizeof(struct ipa3_desc));
- IPADBG("Parsing data desc :%d\n", cnt);
+ IPADBG_LOW("Parsing data desc :%d\n", cnt);
cnt++;
((u8 *)entry->pyld_buffer)[IPA_WLAN_HDR_QMAP_ID_OFFSET] =
(u8)sys->ep->cfg.meta.qmap_id;
/* the tag field will be populated in ipa3_send() function */
- desc[0].opcode = IPA_IP_PACKET_TAG_STATUS;
+ desc[0].opcode =
+ ipahal_imm_cmd_get_opcode(
+ IPA_IMM_CMD_IP_PACKET_TAG_STATUS);
desc[0].type = IPA_IMM_CMD_DESC;
- desc[0].callback = ipa3_tag_free_buf;
+ desc[0].callback = ipa3_tag_destroy_imm;
desc[1].pyld = entry->pyld_buffer;
desc[1].len = entry->pyld_len;
desc[1].type = IPA_DATA_DESC_SKB;
desc[1].user1 = data_desc;
desc[1].user2 = ep_idx;
- IPADBG("priv:%p pyld_buf:0x%p pyld_len:%d\n",
+ IPADBG_LOW("priv:%p pyld_buf:0x%p pyld_len:%d\n",
entry->priv, desc[1].pyld, desc[1].len);
/* In case of last descriptor populate callback */
if (cnt == num_desc) {
- IPADBG("data desc:%p\n", data_desc);
+ IPADBG_LOW("data desc:%p\n", data_desc);
desc[1].callback = ipa3_tx_client_rx_notify_release;
} else {
desc[1].callback = ipa3_tx_client_rx_pkt_status;
}
- IPADBG("calling ipa3_send_one()\n");
+ IPADBG_LOW("calling ipa3_send_one()\n");
if (ipa3_send(sys, 2, desc, true)) {
IPAERR("fail to send skb\n");
sys->ep->wstats.rx_pkt_leak += (cnt-1);
@@ -3008,7 +3183,7 @@ int ipa3_tx_dp_mul(enum ipa_client_type src,
atomic_dec(&sys->ep->avail_fifo_desc);
sys->ep->wstats.rx_pkts_rcvd++;
- IPADBG("ep=%d fifo desc=%d\n",
+ IPADBG_LOW("ep=%d fifo desc=%d\n",
ep_idx, atomic_read(&sys->ep->avail_fifo_desc));
}
@@ -3072,8 +3247,7 @@ int ipa3_sys_setup(struct ipa_sys_connect_params *sys_in,
}
ep = &ipa3_ctx->ep[ipa_ep_idx];
-
- ipa3_inc_client_enable_clks();
+ IPA_ACTIVE_CLIENTS_INC_EP(sys_in->client);
if (ep->valid == 1) {
if (sys_in->client != IPA_CLIENT_APPS_LAN_WAN_PROD) {
@@ -3100,7 +3274,7 @@ int ipa3_sys_setup(struct ipa_sys_connect_params *sys_in,
ep->priv = sys_in->priv;
*clnt_hdl = ipa_ep_idx;
if (!ep->keep_ipa_awake)
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
return 0;
}
@@ -3148,7 +3322,7 @@ int ipa3_sys_setup(struct ipa_sys_connect_params *sys_in,
*ipa_bam_or_gsi_hdl = ipa3_ctx->bam_handle;
if (!ep->keep_ipa_awake)
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
ipa3_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg;
IPADBG("client %d (ep: %d) connected sys=%p\n", sys_in->client,
@@ -3158,7 +3332,7 @@ int ipa3_sys_setup(struct ipa_sys_connect_params *sys_in,
fail_gen2:
fail_and_disable_clocks:
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
fail_gen:
return result;
}
@@ -3176,12 +3350,12 @@ int ipa3_sys_teardown(u32 clnt_hdl)
ep = &ipa3_ctx->ep[clnt_hdl];
if (!ep->keep_ipa_awake)
- ipa3_inc_client_enable_clks();
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
ipa3_disable_data_path(clnt_hdl);
ep->valid = 0;
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
@@ -3257,7 +3431,7 @@ static void ipa_gsi_irq_tx_notify_cb(struct gsi_chan_xfer_notify *notify)
{
struct ipa3_tx_pkt_wrapper *tx_pkt;
- IPADBG("event %d notified\n", notify->evt_id);
+ IPADBG_LOW("event %d notified\n", notify->evt_id);
switch (notify->evt_id) {
case GSI_CHAN_EVT_EOT:
@@ -3279,7 +3453,7 @@ static void ipa_gsi_irq_rx_notify_cb(struct gsi_chan_xfer_notify *notify)
IPAERR("gsi notify is NULL.\n");
return;
}
- IPADBG("event %d notified\n", notify->evt_id);
+ IPADBG_LOW("event %d notified\n", notify->evt_id);
sys = (struct ipa3_sys_context *)notify->chan_user_data;
rx_pkt_expected = list_first_entry(&sys->head_desc_list,
@@ -3321,7 +3495,7 @@ static void ipa_dma_gsi_irq_rx_notify_cb(struct gsi_chan_xfer_notify *notify)
IPAERR("gsi notify is NULL.\n");
return;
}
- IPADBG("event %d notified\n", notify->evt_id);
+ IPADBG_LOW("event %d notified\n", notify->evt_id);
sys = (struct ipa3_sys_context *)notify->chan_user_data;
if (sys->ep->client == IPA_CLIENT_MEMCPY_DMA_SYNC_CONS) {
@@ -3485,31 +3659,30 @@ fail_alloc_evt_ring:
static int ipa_populate_tag_field(struct ipa3_desc *desc,
struct ipa3_tx_pkt_wrapper *tx_pkt,
- struct ipa3_ip_packet_tag_status **tag_ret)
+ struct ipahal_imm_cmd_pyld **tag_pyld_ret)
{
- struct ipa3_ip_packet_tag_status *tag = NULL;
- int tag_size;
+ struct ipahal_imm_cmd_pyld *tag_pyld;
+ struct ipahal_imm_cmd_ip_packet_tag_status tag_cmd = {0};
- tag = (struct ipa3_ip_packet_tag_status *)desc->pyld;
- tag_size = sizeof(struct ipa3_ip_packet_tag_status);
/* populate tag field only if it is NULL */
- if (tag == NULL) {
- tag = kzalloc(tag_size, GFP_ATOMIC);
- if (!tag) {
- IPAERR("Failed to alloc tag.\n");
+ if (desc->pyld == NULL) {
+ tag_cmd.tag = pointer_to_tag_wa(tx_pkt);
+ tag_pyld = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_IP_PACKET_TAG_STATUS, &tag_cmd, true);
+ if (unlikely(!tag_pyld)) {
+ IPAERR("Failed to construct ip_packet_tag_status\n");
return -EFAULT;
}
/*
* This is for 32-bit pointer, will need special
* handling if 64-bit pointer is used
*/
- tag->tag = pointer_to_tag_wa(tx_pkt);
- IPADBG("tx_pkt sent in tag: 0x%p\n", tx_pkt);
- desc->pyld = tag;
- desc->len = sizeof(*tag);
- desc->user1 = tag;
+ IPADBG_LOW("tx_pkt sent in tag: 0x%p\n", tx_pkt);
+ desc->pyld = tag_pyld->data;
+ desc->len = tag_pyld->len;
+ desc->user1 = tag_pyld;
- *tag_ret = tag;
+ *tag_pyld_ret = tag_pyld;
}
return 0;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
index 4ca6d0834f2c..a7c036887880 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -11,6 +11,7 @@
*/
#include "ipa_i.h"
+#include "ipahal/ipahal.h"
#define IPA_FLT_TABLE_INDEX_NOT_FOUND (-1)
#define IPA_FLT_STATUS_OF_ADD_FAILED (-1)
@@ -246,12 +247,12 @@ static int ipa3_generate_flt_hw_rule(enum ipa_ip_type ip,
}
}
- IPADBG("en_rule=0x%x, action=%d, rt_idx=%d, retain_hdr=%d\n",
+ IPADBG_LOW("en_rule=0x%x, action=%d, rt_idx=%d, retain_hdr=%d\n",
en_rule,
hdr->u.hdr.action,
hdr->u.hdr.rt_tbl_idx,
hdr->u.hdr.retain_hdr);
- IPADBG("priority=%d, rule_id=%d\n",
+ IPADBG_LOW("priority=%d, rule_id=%d\n",
hdr->u.hdr.priority,
hdr->u.hdr.rule_id);
@@ -274,7 +275,7 @@ static void __ipa_reap_sys_flt_tbls(enum ipa_ip_type ip, enum ipa_rule_type rlt)
struct ipa3_flt_tbl *tbl;
int i;
- IPADBG("reaping sys flt tbls ip=%d rlt=%d\n", ip, rlt);
+ IPADBG_LOW("reaping sys flt tbls ip=%d rlt=%d\n", ip, rlt);
for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
if (!ipa_is_ep_support_flt(i))
@@ -282,7 +283,7 @@ static void __ipa_reap_sys_flt_tbls(enum ipa_ip_type ip, enum ipa_rule_type rlt)
tbl = &ipa3_ctx->flt_tbl[i][ip];
if (tbl->prev_mem[rlt].phys_base) {
- IPADBG("reaping flt tbl (prev) pipe=%d\n", i);
+ IPADBG_LOW("reaping flt tbl (prev) pipe=%d\n", i);
dma_free_coherent(ipa3_ctx->pdev,
tbl->prev_mem[rlt].size,
tbl->prev_mem[rlt].base,
@@ -293,7 +294,8 @@ static void __ipa_reap_sys_flt_tbls(enum ipa_ip_type ip, enum ipa_rule_type rlt)
if (list_empty(&tbl->head_flt_rule_list)) {
if (tbl->curr_mem[rlt].phys_base) {
- IPADBG("reaping flt tbl (curr) pipe=%d\n", i);
+ IPADBG_LOW("reaping flt tbl (curr) pipe=%d\n",
+ i);
dma_free_coherent(ipa3_ctx->pdev,
tbl->curr_mem[rlt].size,
tbl->curr_mem[rlt].base,
@@ -391,7 +393,7 @@ static int ipa_prep_flt_tbl_for_cmt(enum ipa_ip_type ip,
IPAERR("failed to calculate HW FLT rule size\n");
return -EPERM;
}
- IPADBG("pipe %d hw_len %d priority %u\n",
+ IPADBG_LOW("pipe %d hw_len %d priority %u\n",
pipe_idx, entry->hw_len, entry->prio);
if (entry->rule.hashable)
@@ -402,7 +404,8 @@ static int ipa_prep_flt_tbl_for_cmt(enum ipa_ip_type ip,
if ((tbl->sz[IPA_RULE_HASHABLE] +
tbl->sz[IPA_RULE_NON_HASHABLE]) == 0) {
- IPADBG("flt tbl pipe %d is with zero total size\n", pipe_idx);
+ IPADBG_LOW("flt tbl pipe %d is with zero total size\n",
+ pipe_idx);
return 0;
}
@@ -412,7 +415,7 @@ static int ipa_prep_flt_tbl_for_cmt(enum ipa_ip_type ip,
if (tbl->sz[IPA_RULE_NON_HASHABLE])
tbl->sz[IPA_RULE_NON_HASHABLE] += IPA_HW_TBL_HDR_WIDTH;
- IPADBG("FLT tbl pipe idx %d hash sz %u non-hash sz %u\n", pipe_idx,
+ IPADBG_LOW("FLT tbl pipe idx %d hash sz %u non-hash sz %u\n", pipe_idx,
tbl->sz[IPA_RULE_HASHABLE], tbl->sz[IPA_RULE_NON_HASHABLE]);
return 0;
@@ -648,7 +651,7 @@ static int ipa_generate_flt_hw_tbl_img(enum ipa_ip_type ip,
}
ipa_get_flt_tbl_lcl_bdy_size(ip, &hash_bdy_sz, &nhash_bdy_sz);
- IPADBG("total flt tbl local body sizes: hash %u nhash %u\n",
+ IPADBG_LOW("total flt tbl local body sizes: hash %u nhash %u\n",
hash_bdy_sz, nhash_bdy_sz);
hash_bdy->size = hash_bdy_sz + IPA_HW_TBL_BLK_SIZE_ALIGNMENT;
@@ -744,32 +747,31 @@ static bool ipa_flt_valid_lcl_tbl_size(enum ipa_ip_type ipt,
/**
* ipa_flt_alloc_cmd_buffers() - alloc descriptors and imm cmds
- * buffers for headers and bodies updates via imm cmds
- * also allocate descriptor for the flushing imm cmd
+ * payload pointers buffers for headers and bodies of flt structure
+ * as well as place for flush imm.
* @ipt: the ip address family type
* @desc: [OUT] descriptor buffer
- * @cmd: [OUT] imm commands buffer
+ * @cmd: [OUT] imm commands payload pointers buffer
*
* Return: 0 on success, negative on failure
*/
static int ipa_flt_alloc_cmd_buffers(enum ipa_ip_type ip,
- struct ipa3_desc **desc, struct ipa3_hw_imm_cmd_dma_shared_mem **cmd)
+ struct ipa3_desc **desc, struct ipahal_imm_cmd_pyld ***cmd_pyld)
{
u16 entries;
/* +3: 2 for bodies (hashable and non-hashable) and 1 for flushing */
entries = (ipa3_ctx->ep_flt_num) * 2 + 3;
+
*desc = kcalloc(entries, sizeof(**desc), GFP_ATOMIC);
if (*desc == NULL) {
IPAERR("fail to alloc desc blob ip %d\n", ip);
goto fail_desc_alloc;
}
- /* +2: for bodies (hashable and non-hashable) */
- entries = (ipa3_ctx->ep_flt_num) * 2 + 2;
- *cmd = kcalloc(entries, sizeof(**cmd), GFP_ATOMIC);
- if (*cmd == NULL) {
- IPAERR("fail to alloc cmd blob ip %d\n", ip);
+ *cmd_pyld = kcalloc(entries, sizeof(**cmd_pyld), GFP_ATOMIC);
+ if (*cmd_pyld == NULL) {
+ IPAERR("fail to alloc cmd pyld blob ip %d\n", ip);
goto fail_cmd_alloc;
}
@@ -791,18 +793,18 @@ fail_desc_alloc:
static bool ipa_flt_skip_pipe_config(int pipe)
{
if (ipa_is_modem_pipe(pipe)) {
- IPADBG("skip %d - modem owned pipe\n", pipe);
+ IPADBG_LOW("skip %d - modem owned pipe\n", pipe);
return true;
}
if (ipa3_ctx->skip_ep_cfg_shadow[pipe]) {
- IPADBG("skip %d\n", pipe);
+ IPADBG_LOW("skip %d\n", pipe);
return true;
}
if ((ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_WAN_PROD) == pipe
&& ipa3_ctx->modem_cfg_emb_pipe_flt)) {
- IPADBG("skip %d\n", pipe);
+ IPADBG_LOW("skip %d\n", pipe);
return true;
}
@@ -824,14 +826,17 @@ int __ipa_commit_flt_v3(enum ipa_ip_type ip)
struct ipa3_mem_buffer hash_hdr, nhash_hdr;
int rc = 0;
struct ipa3_desc *desc;
- struct ipa3_hw_imm_cmd_dma_shared_mem *mem_cmd;
- struct ipa3_register_write reg_write_cmd = {0};
+ struct ipahal_imm_cmd_register_write reg_write_cmd = {0};
+ struct ipahal_imm_cmd_dma_shared_mem mem_cmd = {0};
+ struct ipahal_imm_cmd_pyld **cmd_pyld;
int num_cmd = 0;
int i;
int hdr_idx;
u32 lcl_hash_hdr, lcl_nhash_hdr;
u32 lcl_hash_bdy, lcl_nhash_bdy;
bool lcl_hash, lcl_nhash;
+ struct ipahal_reg_fltrt_hash_flush flush;
+ struct ipahal_reg_valmask valmask;
if (ip == IPA_IP_v4) {
lcl_hash_hdr = ipa3_ctx->smem_restricted_bytes +
@@ -878,29 +883,40 @@ int __ipa_commit_flt_v3(enum ipa_ip_type ip)
goto fail_size_valid;
}
- if (ipa_flt_alloc_cmd_buffers(ip, &desc, &mem_cmd)) {
+ if (ipa_flt_alloc_cmd_buffers(ip, &desc, &cmd_pyld)) {
rc = -ENOMEM;
goto fail_size_valid;
}
/* flushing ipa internal hashable flt rules cache */
- reg_write_cmd.skip_pipeline_clear = 0;
- reg_write_cmd.pipeline_clear_options = IPA_HPS_CLEAR;
- reg_write_cmd.offset = IPA_FILT_ROUT_HASH_FLUSH_OFST;
- reg_write_cmd.value = (ip == IPA_IP_v4) ?
- (1<<IPA_FILT_ROUT_HASH_FLUSH_IPv4_FILT_SHFT) :
- (1<<IPA_FILT_ROUT_HASH_FLUSH_IPv6_FILT_SHFT);
- reg_write_cmd.value_mask = reg_write_cmd.value;
- desc[0].opcode = IPA_REGISTER_WRITE;
- desc[0].pyld = &reg_write_cmd;
- desc[0].len = sizeof(reg_write_cmd);
+ memset(&flush, 0, sizeof(flush));
+ if (ip == IPA_IP_v4)
+ flush.v4_flt = true;
+ else
+ flush.v6_flt = true;
+ ipahal_get_fltrt_hash_flush_valmask(&flush, &valmask);
+ reg_write_cmd.skip_pipeline_clear = false;
+ reg_write_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+ reg_write_cmd.offset = ipahal_get_reg_ofst(IPA_FILT_ROUT_HASH_FLUSH);
+ reg_write_cmd.value = valmask.val;
+ reg_write_cmd.value_mask = valmask.mask;
+ cmd_pyld[0] = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_REGISTER_WRITE, &reg_write_cmd, false);
+ if (!cmd_pyld[0]) {
+ IPAERR("fail construct register_write imm cmd: IP %d\n", ip);
+ rc = -EFAULT;
+ goto fail_reg_write_construct;
+ }
+ desc[0].opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_REGISTER_WRITE);
+ desc[0].pyld = cmd_pyld[0]->data;
+ desc[0].len = cmd_pyld[0]->len;
desc[0].type = IPA_IMM_CMD_DESC;
num_cmd++;
hdr_idx = 0;
for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
if (!ipa_is_ep_support_flt(i)) {
- IPADBG("skip %d - not filtering pipe\n", i);
+ IPADBG_LOW("skip %d - not filtering pipe\n", i);
continue;
}
@@ -909,83 +925,115 @@ int __ipa_commit_flt_v3(enum ipa_ip_type ip)
continue;
}
- IPADBG("Prepare imm cmd for hdr at index %d for pipe %d\n",
+ IPADBG_LOW("Prepare imm cmd for hdr at index %d for pipe %d\n",
hdr_idx, i);
- mem_cmd[num_cmd-1].skip_pipeline_clear = 0;
- mem_cmd[num_cmd-1].pipeline_clear_options = IPA_HPS_CLEAR;
- mem_cmd[num_cmd-1].size = IPA_HW_TBL_HDR_WIDTH;
- mem_cmd[num_cmd-1].system_addr = nhash_hdr.phys_base +
+ mem_cmd.is_read = false;
+ mem_cmd.skip_pipeline_clear = false;
+ mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+ mem_cmd.size = IPA_HW_TBL_HDR_WIDTH;
+ mem_cmd.system_addr = nhash_hdr.phys_base +
hdr_idx * IPA_HW_TBL_HDR_WIDTH;
- mem_cmd[num_cmd-1].local_addr = lcl_nhash_hdr +
+ mem_cmd.local_addr = lcl_nhash_hdr +
hdr_idx * IPA_HW_TBL_HDR_WIDTH;
- desc[num_cmd].opcode = IPA_DMA_SHARED_MEM;
- desc[num_cmd].pyld = &mem_cmd[num_cmd-1];
- desc[num_cmd].len =
- sizeof(struct ipa3_hw_imm_cmd_dma_shared_mem);
+ cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
+ if (!cmd_pyld[num_cmd]) {
+ IPAERR("fail construct dma_shared_mem cmd: IP = %d\n",
+ ip);
+ goto fail_imm_cmd_construct;
+ }
+ desc[num_cmd].opcode =
+ ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+ desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
+ desc[num_cmd].len = cmd_pyld[num_cmd]->len;
desc[num_cmd++].type = IPA_IMM_CMD_DESC;
- mem_cmd[num_cmd-1].skip_pipeline_clear = 0;
- mem_cmd[num_cmd-1].pipeline_clear_options = IPA_HPS_CLEAR;
- mem_cmd[num_cmd-1].size = IPA_HW_TBL_HDR_WIDTH;
- mem_cmd[num_cmd-1].system_addr = hash_hdr.phys_base +
+ mem_cmd.is_read = false;
+ mem_cmd.skip_pipeline_clear = false;
+ mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+ mem_cmd.size = IPA_HW_TBL_HDR_WIDTH;
+ mem_cmd.system_addr = hash_hdr.phys_base +
hdr_idx * IPA_HW_TBL_HDR_WIDTH;
- mem_cmd[num_cmd-1].local_addr = lcl_hash_hdr +
+ mem_cmd.local_addr = lcl_hash_hdr +
hdr_idx * IPA_HW_TBL_HDR_WIDTH;
- desc[num_cmd].opcode = IPA_DMA_SHARED_MEM;
- desc[num_cmd].pyld = &mem_cmd[num_cmd-1];
- desc[num_cmd].len =
- sizeof(struct ipa3_hw_imm_cmd_dma_shared_mem);
+ cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
+ if (!cmd_pyld[num_cmd]) {
+ IPAERR("fail construct dma_shared_mem cmd: IP = %d\n",
+ ip);
+ goto fail_imm_cmd_construct;
+ }
+ desc[num_cmd].opcode =
+ ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+ desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
+ desc[num_cmd].len = cmd_pyld[num_cmd]->len;
desc[num_cmd++].type = IPA_IMM_CMD_DESC;
hdr_idx++;
}
if (lcl_nhash) {
- mem_cmd[num_cmd-1].skip_pipeline_clear = 0;
- mem_cmd[num_cmd-1].pipeline_clear_options = IPA_HPS_CLEAR;
- mem_cmd[num_cmd-1].size = nhash_bdy.size;
- mem_cmd[num_cmd-1].system_addr = nhash_bdy.phys_base;
- mem_cmd[num_cmd-1].local_addr = lcl_nhash_bdy;
- desc[num_cmd].opcode = IPA_DMA_SHARED_MEM;
- desc[num_cmd].pyld = &mem_cmd[num_cmd-1];
- desc[num_cmd].len =
- sizeof(struct ipa3_hw_imm_cmd_dma_shared_mem);
+ mem_cmd.is_read = false;
+ mem_cmd.skip_pipeline_clear = false;
+ mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+ mem_cmd.size = nhash_bdy.size;
+ mem_cmd.system_addr = nhash_bdy.phys_base;
+ mem_cmd.local_addr = lcl_nhash_bdy;
+ cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
+ if (!cmd_pyld[num_cmd]) {
+ IPAERR("fail construct dma_shared_mem cmd: IP = %d\n",
+ ip);
+ goto fail_imm_cmd_construct;
+ }
+ desc[num_cmd].opcode =
+ ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+ desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
+ desc[num_cmd].len = cmd_pyld[num_cmd]->len;
desc[num_cmd++].type = IPA_IMM_CMD_DESC;
}
if (lcl_hash) {
- mem_cmd[num_cmd-1].skip_pipeline_clear = 0;
- mem_cmd[num_cmd-1].pipeline_clear_options = IPA_HPS_CLEAR;
- mem_cmd[num_cmd-1].size = hash_bdy.size;
- mem_cmd[num_cmd-1].system_addr = hash_bdy.phys_base;
- mem_cmd[num_cmd-1].local_addr = lcl_hash_bdy;
- desc[num_cmd].opcode = IPA_DMA_SHARED_MEM;
- desc[num_cmd].pyld = &mem_cmd[num_cmd-1];
- desc[num_cmd].len =
- sizeof(struct ipa3_hw_imm_cmd_dma_shared_mem);
+ mem_cmd.is_read = false;
+ mem_cmd.skip_pipeline_clear = false;
+ mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+ mem_cmd.size = hash_bdy.size;
+ mem_cmd.system_addr = hash_bdy.phys_base;
+ mem_cmd.local_addr = lcl_hash_bdy;
+ cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
+ if (!cmd_pyld[num_cmd]) {
+ IPAERR("fail construct dma_shared_mem cmd: IP = %d\n",
+ ip);
+ goto fail_imm_cmd_construct;
+ }
+ desc[num_cmd].opcode =
+ ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+ desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
+ desc[num_cmd].len = cmd_pyld[num_cmd]->len;
desc[num_cmd++].type = IPA_IMM_CMD_DESC;
}
if (ipa3_send_cmd(num_cmd, desc)) {
IPAERR("fail to send immediate command\n");
rc = -EFAULT;
- goto fail_send_cmd;
+ goto fail_imm_cmd_construct;
}
- IPADBG("Hashable HEAD\n");
+ IPADBG_LOW("Hashable HEAD\n");
IPA_DUMP_BUFF(hash_hdr.base, hash_hdr.phys_base, hash_hdr.size);
- IPADBG("Non-Hashable HEAD\n");
+ IPADBG_LOW("Non-Hashable HEAD\n");
IPA_DUMP_BUFF(nhash_hdr.base, nhash_hdr.phys_base, nhash_hdr.size);
if (hash_bdy.size) {
- IPADBG("Hashable BODY\n");
+ IPADBG_LOW("Hashable BODY\n");
IPA_DUMP_BUFF(hash_bdy.base,
hash_bdy.phys_base, hash_bdy.size);
}
if (nhash_bdy.size) {
- IPADBG("Non-Hashable BODY\n");
+ IPADBG_LOW("Non-Hashable BODY\n");
IPA_DUMP_BUFF(nhash_bdy.base,
nhash_bdy.phys_base, nhash_bdy.size);
}
@@ -993,9 +1041,12 @@ int __ipa_commit_flt_v3(enum ipa_ip_type ip)
__ipa_reap_sys_flt_tbls(ip, IPA_RULE_HASHABLE);
__ipa_reap_sys_flt_tbls(ip, IPA_RULE_NON_HASHABLE);
-fail_send_cmd:
+fail_imm_cmd_construct:
+ for (i = 0 ; i < num_cmd ; i++)
+ ipahal_destroy_imm_cmd(cmd_pyld[i]);
+fail_reg_write_construct:
kfree(desc);
- kfree(mem_cmd);
+ kfree(cmd_pyld);
fail_size_valid:
dma_free_coherent(ipa3_ctx->pdev, hash_hdr.size,
hash_hdr.base, hash_hdr.phys_base);
@@ -1112,7 +1163,7 @@ static int __ipa_finish_flt_rule_add(struct ipa3_flt_tbl *tbl,
}
*rule_hdl = id;
entry->id = id;
- IPADBG("add flt rule rule_cnt=%d\n", tbl->rule_cnt);
+ IPADBG_LOW("add flt rule rule_cnt=%d\n", tbl->rule_cnt);
return 0;
}
@@ -1320,7 +1371,7 @@ static int __ipa_add_ep_flt_rule(enum ipa_ip_type ip, enum ipa_client_type ep,
return -EINVAL;
tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][ip];
- IPADBG("add ep flt rule ip=%d ep=%d\n", ip, ep);
+ IPADBG_LOW("add ep flt rule ip=%d ep=%d\n", ip, ep);
return __ipa_add_flt_rule(tbl, ip, rule, add_rear, rule_hdl);
}
@@ -1693,48 +1744,6 @@ void ipa3_delete_dflt_flt_rules(u32 ipa_ep_idx)
mutex_unlock(&ipa3_ctx->lock);
}
-static u32 ipa3_build_flt_tuple_mask(struct ipa3_hash_tuple *tpl)
-{
- u32 msk = 0;
-
- IPA_SETFIELD_IN_REG(msk, tpl->src_id,
- IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_ID_SHFT,
- IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_ID_BMSK
- );
-
- IPA_SETFIELD_IN_REG(msk, tpl->src_ip_addr,
- IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_IP_SHFT,
- IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_IP_BMSK
- );
-
- IPA_SETFIELD_IN_REG(msk, tpl->dst_ip_addr,
- IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_IP_SHFT,
- IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_IP_BMSK
- );
-
- IPA_SETFIELD_IN_REG(msk, tpl->src_port,
- IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_PORT_SHFT,
- IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_PORT_BMSK
- );
-
- IPA_SETFIELD_IN_REG(msk, tpl->dst_port,
- IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_PORT_SHFT,
- IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_PORT_BMSK
- );
-
- IPA_SETFIELD_IN_REG(msk, tpl->protocol,
- IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_PROTOCOL_SHFT,
- IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_PROTOCOL_BMSK
- );
-
- IPA_SETFIELD_IN_REG(msk, tpl->meta_data,
- IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_METADATA_SHFT,
- IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_METADATA_BMSK
- );
-
- return msk;
-}
-
/**
* ipa3_set_flt_tuple_mask() - Sets the flt tuple masking for the given pipe
* Pipe must be for AP EP (not modem) and support filtering
@@ -1745,10 +1754,9 @@ static u32 ipa3_build_flt_tuple_mask(struct ipa3_hash_tuple *tpl)
* Returns: 0 on success, negative on failure
*
*/
-int ipa3_set_flt_tuple_mask(int pipe_idx, struct ipa3_hash_tuple *tuple)
+int ipa3_set_flt_tuple_mask(int pipe_idx, struct ipahal_reg_hash_tuple *tuple)
{
- u32 val;
- u32 mask;
+ struct ipahal_reg_fltrt_hash_tuple fltrt_tuple;
if (!tuple) {
IPAERR("bad tuple\n");
@@ -1770,19 +1778,11 @@ int ipa3_set_flt_tuple_mask(int pipe_idx, struct ipa3_hash_tuple *tuple)
return -EINVAL;
}
- val = ipa_read_reg(ipa3_ctx->mmio,
- IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_OFST(pipe_idx));
-
- val &= 0xFFFF0000; /* clear 16 LSBs - flt bits */
-
- mask = ipa3_build_flt_tuple_mask(tuple);
- mask &= 0x0000FFFF;
-
- val |= mask;
-
- ipa_write_reg(ipa3_ctx->mmio,
- IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_OFST(pipe_idx),
- val);
+ ipahal_read_reg_n_fields(IPA_ENDP_FILTER_ROUTER_HSH_CFG_n,
+ pipe_idx, &fltrt_tuple);
+ fltrt_tuple.flt = *tuple;
+ ipahal_write_reg_n_fields(IPA_ENDP_FILTER_ROUTER_HSH_CFG_n,
+ pipe_idx, &fltrt_tuple);
return 0;
}
@@ -1866,7 +1866,8 @@ int ipa3_flt_read_tbl_from_hw(u32 pipe_idx,
IPADBG("tbl_entry_in_hdr_ofst=0x%llx\n", tbl_entry_in_hdr_ofst);
tbl_entry_in_hdr = ipa3_ctx->mmio +
- IPA_SRAM_DIRECT_ACCESS_N_OFST_v3_0(0) + tbl_entry_in_hdr_ofst;
+ ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n, 0) +
+ tbl_entry_in_hdr_ofst;
/* for tables resides in DDR access it from the virtual memory */
if (*tbl_entry_in_hdr & 0x1) {
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
index 6e8e43096f8b..b8baa53ae9e9 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -11,6 +11,7 @@
*/
#include "ipa_i.h"
+#include "ipahal/ipahal.h"
static const u32 ipa_hdr_bin_sz[IPA_HDR_BIN_MAX] = { 8, 16, 24, 36, 60};
static const u32 ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN_MAX] = { 32, 64};
@@ -43,7 +44,7 @@ static int ipa3_generate_hdr_hw_tbl(struct ipa3_mem_buffer *mem)
IPAERR("hdr tbl empty\n");
return -EPERM;
}
- IPADBG("tbl_sz=%d\n", ipa3_ctx->hdr_tbl.end);
+ IPADBG_LOW("tbl_sz=%d\n", ipa3_ctx->hdr_tbl.end);
mem->base = dma_alloc_coherent(ipa3_ctx->pdev, mem->size,
&mem->phys_base, GFP_KERNEL);
@@ -57,7 +58,7 @@ static int ipa3_generate_hdr_hw_tbl(struct ipa3_mem_buffer *mem)
link) {
if (entry->is_hdr_proc_ctx)
continue;
- IPADBG("hdr of len %d ofst=%d\n", entry->hdr_len,
+ IPADBG_LOW("hdr of len %d ofst=%d\n", entry->hdr_len,
entry->offset_entry->offset);
memcpy(mem->base + entry->offset_entry->offset, entry->hdr,
entry->hdr_len);
@@ -74,7 +75,7 @@ static void ipa3_hdr_proc_ctx_to_hw_format(struct ipa3_mem_buffer *mem,
list_for_each_entry(entry,
&ipa3_ctx->hdr_proc_ctx_tbl.head_proc_ctx_entry_list,
link) {
- IPADBG("processing type %d ofst=%d\n",
+ IPADBG_LOW("processing type %d ofst=%d\n",
entry->type, entry->offset_entry->offset);
if (entry->type == IPA_HDR_PROC_NONE) {
struct ipa3_hdr_proc_ctx_add_hdr_seq *ctx;
@@ -88,7 +89,7 @@ static void ipa3_hdr_proc_ctx_to_hw_format(struct ipa3_mem_buffer *mem,
entry->hdr->phys_base :
hdr_base_addr +
entry->hdr->offset_entry->offset;
- IPADBG("header address 0x%x\n",
+ IPADBG_LOW("header address 0x%x\n",
ctx->hdr_add.hdr_addr);
ctx->end.type = IPA_PROC_CTX_TLV_TYPE_END;
ctx->end.length = 0;
@@ -105,7 +106,7 @@ static void ipa3_hdr_proc_ctx_to_hw_format(struct ipa3_mem_buffer *mem,
entry->hdr->phys_base :
hdr_base_addr +
entry->hdr->offset_entry->offset;
- IPADBG("header address 0x%x\n",
+ IPADBG_LOW("header address 0x%x\n",
ctx->hdr_add.hdr_addr);
ctx->cmd.type = IPA_PROC_CTX_TLV_TYPE_PROC_CMD;
ctx->cmd.length = 0;
@@ -117,7 +118,7 @@ static void ipa3_hdr_proc_ctx_to_hw_format(struct ipa3_mem_buffer *mem,
ctx->cmd.value = IPA_HDR_UCP_802_3_TO_ETHII;
else if (entry->type == IPA_HDR_PROC_802_3_TO_802_3)
ctx->cmd.value = IPA_HDR_UCP_802_3_TO_802_3;
- IPADBG("command id %d\n", ctx->cmd.value);
+ IPADBG_LOW("command id %d\n", ctx->cmd.value);
ctx->end.type = IPA_PROC_CTX_TLV_TYPE_END;
ctx->end.length = 0;
ctx->end.value = 0;
@@ -144,7 +145,7 @@ static int ipa3_generate_hdr_proc_ctx_hw_tbl(u32 hdr_sys_addr,
/* make sure table is aligned */
mem->size += IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE;
- IPADBG("tbl_sz=%d\n", ipa3_ctx->hdr_proc_ctx_tbl.end);
+ IPADBG_LOW("tbl_sz=%d\n", ipa3_ctx->hdr_proc_ctx_tbl.end);
mem->base = dma_alloc_coherent(ipa3_ctx->pdev, mem->size,
&mem->phys_base, GFP_KERNEL);
@@ -177,10 +178,12 @@ int __ipa_commit_hdr_v3_0(void)
struct ipa3_mem_buffer hdr_mem;
struct ipa3_mem_buffer ctx_mem;
struct ipa3_mem_buffer aligned_ctx_mem;
- struct ipa3_hdr_init_system hdr_init_cmd = {0};
- struct ipa3_hw_imm_cmd_dma_shared_mem dma_cmd_hdr = {0};
- struct ipa3_hw_imm_cmd_dma_shared_mem dma_cmd_ctx = {0};
- struct ipa3_register_write reg_write_cmd = {0};
+ struct ipahal_imm_cmd_dma_shared_mem dma_cmd_hdr = {0};
+ struct ipahal_imm_cmd_dma_shared_mem dma_cmd_ctx = {0};
+ struct ipahal_imm_cmd_register_write reg_write_cmd = {0};
+ struct ipahal_imm_cmd_hdr_init_system hdr_init_cmd = {0};
+ struct ipahal_imm_cmd_pyld *hdr_cmd_pyld = NULL;
+ struct ipahal_imm_cmd_pyld *ctx_cmd_pyld = NULL;
int rc = -EFAULT;
u32 proc_ctx_size;
u32 proc_ctx_ofst;
@@ -205,17 +208,25 @@ int __ipa_commit_hdr_v3_0(void)
IPA_MEM_PART(apps_hdr_size));
goto end;
} else {
- dma_cmd_hdr.skip_pipeline_clear = 0;
- dma_cmd_hdr.pipeline_clear_options = IPA_HPS_CLEAR;
+ dma_cmd_hdr.is_read = false; /* write operation */
+ dma_cmd_hdr.skip_pipeline_clear = false;
+ dma_cmd_hdr.pipeline_clear_options = IPAHAL_HPS_CLEAR;
dma_cmd_hdr.system_addr = hdr_mem.phys_base;
dma_cmd_hdr.size = hdr_mem.size;
dma_cmd_hdr.local_addr =
ipa3_ctx->smem_restricted_bytes +
IPA_MEM_PART(apps_hdr_ofst);
- desc[0].opcode = IPA_DMA_SHARED_MEM;
- desc[0].pyld = &dma_cmd_hdr;
- desc[0].len =
- sizeof(struct ipa3_hw_imm_cmd_dma_shared_mem);
+ hdr_cmd_pyld = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_DMA_SHARED_MEM,
+ &dma_cmd_hdr, false);
+ if (!hdr_cmd_pyld) {
+ IPAERR("fail construct dma_shared_mem cmd\n");
+ goto end;
+ }
+ desc[0].opcode = ipahal_imm_cmd_get_opcode(
+ IPA_IMM_CMD_DMA_SHARED_MEM);
+ desc[0].pyld = hdr_cmd_pyld->data;
+ desc[0].len = hdr_cmd_pyld->len;
}
} else {
if (hdr_mem.size > IPA_MEM_PART(apps_hdr_size_ddr)) {
@@ -224,9 +235,17 @@ int __ipa_commit_hdr_v3_0(void)
goto end;
} else {
hdr_init_cmd.hdr_table_addr = hdr_mem.phys_base;
- desc[0].opcode = IPA_HDR_INIT_SYSTEM;
- desc[0].pyld = &hdr_init_cmd;
- desc[0].len = sizeof(struct ipa3_hdr_init_system);
+ hdr_cmd_pyld = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_HDR_INIT_SYSTEM,
+ &hdr_init_cmd, false);
+ if (!hdr_cmd_pyld) {
+ IPAERR("fail construct hdr_init_system cmd\n");
+ goto end;
+ }
+ desc[0].opcode = ipahal_imm_cmd_get_opcode(
+ IPA_IMM_CMD_HDR_INIT_SYSTEM);
+ desc[0].pyld = hdr_cmd_pyld->data;
+ desc[0].len = hdr_cmd_pyld->len;
}
}
desc[0].type = IPA_IMM_CMD_DESC;
@@ -241,17 +260,25 @@ int __ipa_commit_hdr_v3_0(void)
proc_ctx_size);
goto end;
} else {
- dma_cmd_ctx.skip_pipeline_clear = 0;
- dma_cmd_ctx.pipeline_clear_options = IPA_HPS_CLEAR;
+ dma_cmd_ctx.is_read = false; /* Write operation */
+ dma_cmd_ctx.skip_pipeline_clear = false;
+ dma_cmd_ctx.pipeline_clear_options = IPAHAL_HPS_CLEAR;
dma_cmd_ctx.system_addr = aligned_ctx_mem.phys_base;
dma_cmd_ctx.size = aligned_ctx_mem.size;
dma_cmd_ctx.local_addr =
ipa3_ctx->smem_restricted_bytes +
proc_ctx_ofst;
- desc[1].opcode = IPA_DMA_SHARED_MEM;
- desc[1].pyld = &dma_cmd_ctx;
- desc[1].len =
- sizeof(struct ipa3_hw_imm_cmd_dma_shared_mem);
+ ctx_cmd_pyld = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_DMA_SHARED_MEM,
+ &dma_cmd_ctx, false);
+ if (!ctx_cmd_pyld) {
+ IPAERR("fail construct dma_shared_mem cmd\n");
+ goto end;
+ }
+ desc[1].opcode = ipahal_imm_cmd_get_opcode(
+ IPA_IMM_CMD_DMA_SHARED_MEM);
+ desc[1].pyld = ctx_cmd_pyld->data;
+ desc[1].len = ctx_cmd_pyld->len;
}
} else {
proc_ctx_size_ddr = IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr);
@@ -261,15 +288,26 @@ int __ipa_commit_hdr_v3_0(void)
proc_ctx_size_ddr);
goto end;
} else {
- reg_write_cmd.skip_pipeline_clear = 0;
- reg_write_cmd.pipeline_clear_options = IPA_HPS_CLEAR;
- reg_write_cmd.offset = IPA_SYS_PKT_PROC_CNTXT_BASE_OFST;
+ reg_write_cmd.skip_pipeline_clear = false;
+ reg_write_cmd.pipeline_clear_options =
+ IPAHAL_HPS_CLEAR;
+ reg_write_cmd.offset =
+ ipahal_get_reg_ofst(
+ IPA_SYS_PKT_PROC_CNTXT_BASE);
reg_write_cmd.value = aligned_ctx_mem.phys_base;
reg_write_cmd.value_mask =
~(IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE - 1);
- desc[1].pyld = &reg_write_cmd;
- desc[1].opcode = IPA_REGISTER_WRITE;
- desc[1].len = sizeof(reg_write_cmd);
+ ctx_cmd_pyld = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_REGISTER_WRITE,
+ &reg_write_cmd, false);
+ if (!ctx_cmd_pyld) {
+ IPAERR("fail construct register_write cmd\n");
+ goto end;
+ }
+ desc[1].opcode = ipahal_imm_cmd_get_opcode(
+ IPA_IMM_CMD_REGISTER_WRITE);
+ desc[1].pyld = ctx_cmd_pyld->data;
+ desc[1].len = ctx_cmd_pyld->len;
}
}
desc[1].type = IPA_IMM_CMD_DESC;
@@ -309,6 +347,12 @@ int __ipa_commit_hdr_v3_0(void)
}
end:
+ if (ctx_cmd_pyld)
+ ipahal_destroy_imm_cmd(ctx_cmd_pyld);
+
+ if (hdr_cmd_pyld)
+ ipahal_destroy_imm_cmd(hdr_cmd_pyld);
+
return rc;
}
@@ -323,7 +367,7 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx,
int id;
int needed_len;
- IPADBG("processing type %d hdr_hdl %d\n",
+ IPADBG_LOW("processing type %d hdr_hdl %d\n",
proc_ctx->type, proc_ctx->hdr_hdl);
if (!HDR_PROC_TYPE_IS_VALID(proc_ctx->type)) {
@@ -394,7 +438,7 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx,
entry->offset_entry = offset;
list_add(&entry->link, &htbl->head_proc_ctx_entry_list);
htbl->proc_ctx_cnt++;
- IPADBG("add proc ctx of sz=%d cnt=%d ofst=%d\n", needed_len,
+ IPADBG_LOW("add proc ctx of sz=%d cnt=%d ofst=%d\n", needed_len,
htbl->proc_ctx_cnt, offset->offset);
id = ipa3_id_alloc(entry);
@@ -510,12 +554,12 @@ static int __ipa_add_hdr(struct ipa_hdr_add *hdr)
list_add(&entry->link, &htbl->head_hdr_entry_list);
htbl->hdr_cnt++;
if (entry->is_hdr_proc_ctx)
- IPADBG("add hdr of sz=%d hdr_cnt=%d phys_base=%pa\n",
+ IPADBG_LOW("add hdr of sz=%d hdr_cnt=%d phys_base=%pa\n",
hdr->hdr_len,
htbl->hdr_cnt,
&entry->phys_base);
else
- IPADBG("add hdr of sz=%d hdr_cnt=%d ofst=%d\n",
+ IPADBG_LOW("add hdr of sz=%d hdr_cnt=%d ofst=%d\n",
hdr->hdr_len,
htbl->hdr_cnt,
entry->offset_entry->offset);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_hw_defs.h b/drivers/platform/msm/ipa/ipa_v3/ipa_hw_defs.h
index fdde9eb04aa9..a01fced6d12a 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_hw_defs.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_hw_defs.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -16,25 +16,6 @@
/* This header defines various HW related data types */
-/* immediate command op-codes */
-#define IPA_DECIPH_INIT (1)
-#define IPA_PPP_FRM_INIT (2)
-#define IPA_IP_V4_FILTER_INIT (3)
-#define IPA_IP_V6_FILTER_INIT (4)
-#define IPA_IP_V4_NAT_INIT (5)
-#define IPA_IP_V6_NAT_INIT (6)
-#define IPA_IP_V4_ROUTING_INIT (7)
-#define IPA_IP_V6_ROUTING_INIT (8)
-#define IPA_HDR_INIT_LOCAL (9)
-#define IPA_HDR_INIT_SYSTEM (10)
-#define IPA_DECIPH_SETUP (11)
-#define IPA_REGISTER_WRITE (12)
-#define IPA_NAT_DMA (14)
-#define IPA_IP_PACKET_INIT (16)
-#define IPA_DMA_SHARED_MEM (19)
-#define IPA_IP_PACKET_TAG_STATUS (20)
-#define IPA_DMA_TASK_32B_ADDR(num_buff) (17 + ((num_buff) << 8))
-
/* Processing context TLV type */
#define IPA_PROC_CTX_TLV_TYPE_END 0
#define IPA_PROC_CTX_TLV_TYPE_HDR_ADD 1
@@ -114,111 +95,6 @@ struct ipa3_rt_rule_hw_hdr {
};
/**
- * struct ipa3_ip_v4_filter_init - IPA_IP_V4_FILTER_INIT command payload
- * @hash_rules_addr: System memory address of IPv4 hashable rules
- * @hash_rules_size: Size in bytes of the hashable rules
- * @hash_local_addr: Shared memory address of IPv4 hashable rules
- * @nhash_rules_size: Size in bytes of the non-hashable rules
- * @nhash_local_addr: Shared memory address of IPv4 non-hashable rules
- * @rsvd: reserved
- * @nhash_rules_addr: System memory address of IPv4 non-hashable rules
- */
-struct ipa3_ip_v4_filter_init {
- u64 hash_rules_addr:64;
- u64 hash_rules_size:12;
- u64 hash_local_addr:16;
- u64 nhash_rules_size:12;
- u64 nhash_local_addr:16;
- u64 rsvd:8;
- u64 nhash_rules_addr:64;
-};
-
-/**
- * struct ipa3_ip_v6_filter_init - IPA_IP_V6_FILTER_INIT command payload
- * @hash_rules_addr: System memory address of IPv6 hashable rules
- * @hash_rules_size: Size in bytes of the hashable rules
- * @hash_local_addr: Shared memory address of IPv6 hashable rules
- * @nhash_rules_size: Size in bytes of the non-hashable rules
- * @nhash_local_addr: Shared memory address of IPv6 non-hashable rules
- * @rsvd: reserved
- * @nhash_rules_addr: System memory address of IPv6 non-hashable rules
- */
-struct ipa3_ip_v6_filter_init {
- u64 hash_rules_addr:64;
- u64 hash_rules_size:12;
- u64 hash_local_addr:16;
- u64 nhash_rules_size:12;
- u64 nhash_local_addr:16;
- u64 rsvd:8;
- u64 nhash_rules_addr:64;
-};
-
-/**
- * struct ipa3_ip_v4_routing_init - IPA_IP_V4_ROUTING_INIT command payload
- * @hash_rules_addr: System memory address of IPv4 hashable rules
- * @hash_rules_size: Size in bytes of the hashable rules
- * @hash_local_addr: Shared memory address of IPv4 hashable rules
- * @nhash_rules_size: Size in bytes of the non-hashable rules
- * @nhash_local_addr: Shared memory address of IPv4 non-hashable rules
- * @rsvd: reserved
- * @nhash_rules_addr: System memory address of IPv4 non-hashable rules
- */
-struct ipa3_ip_v4_routing_init {
- u64 hash_rules_addr:64;
- u64 hash_rules_size:12;
- u64 hash_local_addr:16;
- u64 nhash_rules_size:12;
- u64 nhash_local_addr:16;
- u64 rsvd:8;
- u64 nhash_rules_addr:64;
-};
-
-/**
- * struct ipa3_ip_v6_routing_init - IPA_IP_V6_ROUTING_INIT command payload
- * @hash_rules_addr: System memory address of IPv6 hashable rules
- * @hash_rules_size: Size in bytes of the hashable rules
- * @hash_local_addr: Shared memory address of IPv6 hashable rules
- * @nhash_rules_size: Size in bytes of the non-hashable rules
- * @nhash_local_addr: Shared memory address of IPv6 non-hashable rules
- * @rsvd: reserved
- * @nhash_rules_addr: System memory address of IPv6 non-hashable rules
- */
-struct ipa3_ip_v6_routing_init {
- u64 hash_rules_addr:64;
- u64 hash_rules_size:12;
- u64 hash_local_addr:16;
- u64 nhash_rules_size:12;
- u64 nhash_local_addr:16;
- u64 rsvd:8;
- u64 nhash_rules_addr:64;
-};
-
-/**
- * struct ipa3_hdr_init_local - IPA_HDR_INIT_LOCAL command payload
- * @hdr_table_src_addr: word address of header table in system memory where the
- * table starts (use as source for memory copying)
- * @size_hdr_table: size of the above (in bytes)
- * @hdr_table_dst_addr: header address in IPA sram (used as dst for memory copy)
- * @rsvd: reserved
- */
-struct ipa3_hdr_init_local {
- u64 hdr_table_src_addr:64;
- u64 size_hdr_table:12;
- u64 hdr_table_dst_addr:16;
- u64 rsvd:4;
-};
-
-/**
- * struct ipa3_hdr_init_system - IPA_HDR_INIT_SYSTEM command payload
- * @hdr_table_addr: word address of header table in system memory where the
- * table starts (use as source for memory copying)
- * @rsvd: reserved
- */
-struct ipa3_hdr_init_system {
- u64 hdr_table_addr:64;
-};
-
-/**
* struct ipa3_hdr_proc_ctx_tlv -
* HW structure of IPA processing context header - TLV part
* @type: 0 - end type
@@ -276,124 +152,6 @@ struct ipa3_a5_mux_hdr {
u32 metadata;
};
-/**
- * enum ipa_pipeline_clear_option - Values for pipeline_clear_options
- * @IPA_HPS_CLEAR: Wait for HPS clear. All queues except high priority queue
- * shall not be serviced until HPS is clear of packets or immediate commands.
- * The high priority Rx queue / Q6ZIP group shall still be serviced normally.
- *
- * @IPA_SRC_GRP_CLEAR: Wait for originating source group to be clear
- * (for no packet contexts allocated to the originating source group).
- * The source group / Rx queue shall not be serviced until all previously
- * allocated packet contexts are released. All other source groups/queues shall
- * be serviced normally.
- *
- * @IPA_FULL_PIPELINE_CLEAR: Wait for full pipeline to be clear.
- * All groups / Rx queues shall not be serviced until IPA pipeline is fully
- * clear. This should be used for debug only.
- */
-enum ipa_pipeline_clear_option {
- IPA_HPS_CLEAR,
- IPA_SRC_GRP_CLEAR,
- IPA_FULL_PIPELINE_CLEAR
-};
-
-/**
- * struct ipa3_register_write - IPA_REGISTER_WRITE command payload
- * @rsvd: reserved
- * @skip_pipeline_clear: 0 to wait until IPA pipeline is clear
- * @offset: offset from IPA base address
- * @value: value to write to register
- * @value_mask: mask specifying which value bits to write to the register
- * @pipeline_clear_options: options for pipeline to clear
- */
-struct ipa3_register_write {
- u64 rsvd:15;
- u64 skip_pipeline_clear:1;
- u64 offset:16;
- u64 value:32;
- u64 value_mask:32;
- u64 pipeline_clear_options:2;
- u64 rsvd2:30;
-};
-
-/**
- * struct ipa3_nat_dma - IPA_NAT_DMA command payload
- * @table_index: NAT table index
- * @rsvd1: reserved
- * @base_addr: base address
- * @rsvd2: reserved
- * @offset: offset
- * @data: metadata
- * @rsvd3: reserved
- */
-struct ipa3_nat_dma {
- u64 table_index:3;
- u64 rsvd1:1;
- u64 base_addr:2;
- u64 rsvd2:2;
- u64 offset:32;
- u64 data:16;
- u64 rsvd3:8;
-};
-
-/**
- * struct ipa3_ip_packet_init - IPA_IP_PACKET_INIT command payload
- * @destination_pipe_index: destination pipe index
- * @rsvd1: reserved
- * @metadata: metadata
- * @rsvd2: reserved
- */
-struct ipa3_ip_packet_init {
- u64 destination_pipe_index:5;
- u64 rsvd1:3;
- u64 rsvd2:32;
- u64 rsvd3:24;
-};
-
-/**
- * struct ipa3_nat_dma - IPA_IP_V4_NAT_INIT command payload
- * @ipv4_rules_addr: ipv4 rules address
- * @ipv4_expansion_rules_addr: ipv4 expansion rules address
- * @index_table_addr: index tables address
- * @index_table_expansion_addr: index expansion table address
- * @table_index: index in table
- * @ipv4_rules_addr_type: ipv4 address type
- * @ipv4_expansion_rules_addr_type: ipv4 expansion address type
- * @index_table_addr_type: index table address type
- * @index_table_expansion_addr_type: index expansion table type
- * @size_base_tables: size of base tables
- * @size_expansion_tables: size of expansion tables
- * @rsvd2: reserved
- * @public_ip_addr: public IP address
- */
-struct ipa3_ip_v4_nat_init {
- u64 ipv4_rules_addr:64;
- u64 ipv4_expansion_rules_addr:64;
- u64 index_table_addr:64;
- u64 index_table_expansion_addr:64;
- u64 table_index:3;
- u64 rsvd1:1;
- u64 ipv4_rules_addr_type:1;
- u64 ipv4_expansion_rules_addr_type:1;
- u64 index_table_addr_type:1;
- u64 index_table_expansion_addr_type:1;
- u64 size_base_tables:12;
- u64 size_expansion_tables:10;
- u64 rsvd2:2;
- u64 public_ip_addr:32;
-};
-
-/**
- * struct ipa3_ip_packet_tag_status - IPA_IP_PACKET_TAG_STATUS command payload
- * @rsvd: reserved
- * @tag: tag value returned within status
- */
-struct ipa3_ip_packet_tag_status {
- u64 rsvd:16;
- u64 tag:48;
-};
-
/*! @brief Struct for the IPAv3.0 UL packet status header */
struct ipa3_hw_pkt_status {
u64 status_opcode:8;
@@ -474,29 +232,4 @@ enum ipa3_hw_pkt_status_exception {
IPA_HW_PKT_STATUS_EXCEPTION_MAX = 0xFF
};
-/*! @brief IPA_HW_IMM_CMD_DMA_SHARED_MEM Immediate Command Parameters */
-struct ipa3_hw_imm_cmd_dma_shared_mem {
- u64 reserved_1:16;
- u64 size:16;
- u64 local_addr:16;
- u64 direction:1;
- u64 skip_pipeline_clear:1;
- u64 pipeline_clear_options:2;
- u64 reserved_2:12;
- u64 system_addr:64;
-};
-
-/*! @brief IPA_HW_IMM_CMD_DMA_TASK_32B_ADDR Immediate Command Parameters */
-struct ipa3_hw_imm_cmd_dma_task_32b_addr {
- u64 reserved:11;
- u64 cmplt:1;
- u64 eof:1;
- u64 flsh:1;
- u64 lock:1;
- u64 unlock:1;
- u64 size1:16;
- u64 addr1:32;
- u64 packet_size:16;
-};
-
#endif /* _IPA_HW_DEFS_H */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index ff1ec3e93592..d6229a8ba704 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -22,16 +22,18 @@
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/ipa.h>
+#include <linux/ipa_usb.h>
#include <linux/msm-sps.h>
#include <asm/dma-iommu.h>
#include <linux/iommu.h>
#include <linux/platform_device.h>
+#include <linux/ipc_logging.h>
#include <linux/firmware.h>
#include "ipa_hw_defs.h"
#include "ipa_ram_mmap.h"
-#include "ipa_reg.h"
#include "ipa_qmi_service.h"
#include "../ipa_api.h"
+#include "ipahal/ipahal_reg.h"
#define DRV_NAME "ipa"
#define NAT_DEV_NAME "ipaNatTable"
@@ -48,11 +50,38 @@
#define IPA_GENERIC_RX_POOL_SZ 192
#define IPA_MAX_STATUS_STAT_NUM 30
+#define __FILENAME__ \
+ (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
+
+
+#define IPA_IPC_LOGGING(buf, fmt, args...) \
+ ipc_log_string((buf), \
+ DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args)
#define IPADBG(fmt, args...) \
- pr_debug(DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args)
+ do { \
+ pr_debug(DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args);\
+ if (ipa3_ctx) { \
+ IPA_IPC_LOGGING(ipa3_ctx->logbuf, fmt, ## args); \
+ IPA_IPC_LOGGING(ipa3_ctx->logbuf_low, fmt, ## args); \
+ } \
+ } while (0)
+
+#define IPADBG_LOW(fmt, args...) \
+ do { \
+ pr_debug(DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args);\
+ if (ipa3_ctx && ipa3_ctx->enable_low_prio_print) \
+ IPA_IPC_LOGGING(ipa3_ctx->logbuf_low, fmt, ## args); \
+ } while (0)
+
#define IPAERR(fmt, args...) \
- pr_err(DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args)
+ do { \
+ pr_err(DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args);\
+ if (ipa3_ctx) { \
+ IPA_IPC_LOGGING(ipa3_ctx->logbuf, fmt, ## args); \
+ IPA_IPC_LOGGING(ipa3_ctx->logbuf_low, fmt, ## args); \
+ } \
+ } while (0)
#define WLAN_AMPDU_TX_EP 15
#define WLAN_PROD_TX_EP 19
@@ -128,9 +157,6 @@
#define IPA_CLIENT_IS_PROD(x) (x >= IPA_CLIENT_PROD && x < IPA_CLIENT_CONS)
#define IPA_CLIENT_IS_CONS(x) (x >= IPA_CLIENT_CONS && x < IPA_CLIENT_MAX)
-#define IPA_SETFIELD(val, shift, mask) (((val) << (shift)) & (mask))
-#define IPA_SETFIELD_IN_REG(reg, val, shift, mask) \
- (reg |= ((val) << (shift)) & (mask))
#define IPA_HW_TABLE_ALIGNMENT(start_ofst) \
(((start_ofst) + 127) & ~127)
@@ -184,6 +210,123 @@
#define IPA_SLEEP_CLK_RATE_KHZ (32)
+#define IPA_ACTIVE_CLIENTS_PREP_EP(log_info, client) \
+ log_info.file = __FILENAME__; \
+ log_info.line = __LINE__; \
+ log_info.type = EP; \
+ log_info.id_string = ipa3_clients_strings[client]
+
+#define IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info) \
+ log_info.file = __FILENAME__; \
+ log_info.line = __LINE__; \
+ log_info.type = SIMPLE; \
+ log_info.id_string = __func__
+
+#define IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info, resource_name) \
+ log_info.file = __FILENAME__; \
+ log_info.line = __LINE__; \
+ log_info.type = RESOURCE; \
+ log_info.id_string = resource_name
+
+#define IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, id_str) \
+ log_info.file = __FILENAME__; \
+ log_info.line = __LINE__; \
+ log_info.type = SPECIAL; \
+ log_info.id_string = id_str
+
+#define IPA_ACTIVE_CLIENTS_INC_EP(client) \
+ do { \
+ struct ipa3_active_client_logging_info log_info; \
+ IPA_ACTIVE_CLIENTS_PREP_EP(log_info, client); \
+ ipa3_inc_client_enable_clks(&log_info); \
+ } while (0)
+
+#define IPA_ACTIVE_CLIENTS_DEC_EP(client) \
+ do { \
+ struct ipa3_active_client_logging_info log_info; \
+ IPA_ACTIVE_CLIENTS_PREP_EP(log_info, client); \
+ ipa3_dec_client_disable_clks(&log_info); \
+ } while (0)
+
+#define IPA_ACTIVE_CLIENTS_INC_SIMPLE() \
+ do { \
+ struct ipa3_active_client_logging_info log_info; \
+ IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info); \
+ ipa3_inc_client_enable_clks(&log_info); \
+ } while (0)
+
+#define IPA_ACTIVE_CLIENTS_DEC_SIMPLE() \
+ do { \
+ struct ipa3_active_client_logging_info log_info; \
+ IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info); \
+ ipa3_dec_client_disable_clks(&log_info); \
+ } while (0)
+
+#define IPA_ACTIVE_CLIENTS_INC_RESOURCE(resource_name) \
+ do { \
+ struct ipa3_active_client_logging_info log_info; \
+ IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info, resource_name); \
+ ipa3_inc_client_enable_clks(&log_info); \
+ } while (0)
+
+#define IPA_ACTIVE_CLIENTS_DEC_RESOURCE(resource_name) \
+ do { \
+ struct ipa3_active_client_logging_info log_info; \
+ IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info, resource_name); \
+ ipa3_dec_client_disable_clks(&log_info); \
+ } while (0)
+
+#define IPA_ACTIVE_CLIENTS_INC_SPECIAL(id_str) \
+ do { \
+ struct ipa3_active_client_logging_info log_info; \
+ IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, id_str); \
+ ipa3_inc_client_enable_clks(&log_info); \
+ } while (0)
+
+#define IPA_ACTIVE_CLIENTS_DEC_SPECIAL(id_str) \
+ do { \
+ struct ipa3_active_client_logging_info log_info; \
+ IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, id_str); \
+ ipa3_dec_client_disable_clks(&log_info); \
+ } while (0)
+
+#define IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES 120
+#define IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN 96
+#define IPA3_ACTIVE_CLIENTS_LOG_HASHTABLE_SIZE 50
+#define IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN 40
+
+extern const char *ipa3_clients_strings[];
+
+enum ipa3_active_client_log_type {
+ EP,
+ SIMPLE,
+ RESOURCE,
+ SPECIAL,
+ INVALID
+};
+
+struct ipa3_active_client_logging_info {
+ const char *id_string;
+ char *file;
+ int line;
+ enum ipa3_active_client_log_type type;
+};
+
+struct ipa3_active_client_htable_entry {
+ struct hlist_node list;
+ char id_string[IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN];
+ int count;
+ enum ipa3_active_client_log_type type;
+};
+
+struct ipa3_active_clients_log_ctx {
+ char *log_buffer[IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES];
+ int log_head;
+ int log_tail;
+ bool log_rdy;
+ struct hlist_head htable[IPA3_ACTIVE_CLIENTS_LOG_HASHTABLE_SIZE];
+};
+
struct ipa3_client_names {
enum ipa_client_type names[MAX_RESOURCE_TO_CLIENTS];
int length;
@@ -468,28 +611,6 @@ struct ipa3_rt_tbl_set {
};
/**
- * struct ipa3_ep_cfg_status - status configuration in IPA end-point
- * @status_en: Determines if end point supports Status Indications. SW should
- * set this bit in order to enable Statuses. Output Pipe - send
- * Status indications only if bit is set. Input Pipe - forward Status
- * indication to STATUS_ENDP only if bit is set. Valid for Input
- * and Output Pipes (IPA Consumer and Producer)
- * @status_ep: Statuses generated for this endpoint will be forwarded to the
- * specified Status End Point. Status endpoint needs to be
- * configured with STATUS_EN=1 Valid only for Input Pipes (IPA
- * Consumer)
- * @status_location: Location of PKT-STATUS on destination pipe.
- * If set to 0 (default), PKT-STATUS will be appended before the packet
- * for this endpoint. If set to 1, PKT-STATUS will be appended after the
- * packet for this endpoint. Valid only for Output Pipes (IPA Producer)
- */
-struct ipa3_ep_cfg_status {
- bool status_en;
- u8 status_ep;
- bool status_location;
-};
-
-/**
* struct ipa3_wlan_stats - Wlan stats for each wlan endpoint
* @rx_pkts_rcvd: Packets sent by wlan driver
* @rx_pkts_status_rcvd: Status packets received from ipa hw
@@ -591,7 +712,7 @@ struct ipa3_ep_context {
dma_addr_t phys_base;
struct ipa_ep_cfg cfg;
struct ipa_ep_cfg_holb holb;
- struct ipa3_ep_cfg_status status;
+ struct ipahal_reg_ep_cfg_status status;
u32 dst_pipe_index;
u32 rt_tbl_idx;
struct sps_connect connect;
@@ -650,7 +771,6 @@ struct ipa_request_gsi_channel_params {
union __packed gsi_channel_scratch chan_scratch;
};
-
enum ipa3_sys_pipe_policy {
IPA_POLICY_INTR_MODE,
IPA_POLICY_NOINTR_MODE,
@@ -683,6 +803,7 @@ struct ipa3_sys_context {
int (*pyld_hdlr)(struct sk_buff *skb, struct ipa3_sys_context *sys);
struct sk_buff * (*get_skb)(unsigned int len, gfp_t flags);
void (*free_skb)(struct sk_buff *skb);
+ void (*free_rx_wrapper)(struct ipa3_rx_pkt_wrapper *rk_pkt);
u32 rx_buff_sz;
u32 rx_pool_sz;
struct sk_buff *prev_skb;
@@ -700,6 +821,7 @@ struct ipa3_sys_context {
/* ordering is important - mutable fields go above */
struct ipa3_ep_context *ep;
struct list_head head_desc_list;
+ struct list_head rcycl_list;
spinlock_t spinlock;
struct workqueue_struct *wq;
struct workqueue_struct *repl_wq;
@@ -973,6 +1095,47 @@ enum ipa3_hw_features {
};
/**
+ * enum ipa3_hw_2_cpu_events - Values that represent HW event to be sent to CPU.
+ * @IPA_HW_2_CPU_EVENT_ERROR : Event specify a system error is detected by the
+ * device
+ * @IPA_HW_2_CPU_EVENT_LOG_INFO : Event providing logging specific information
+ */
+enum ipa3_hw_2_cpu_events {
+ IPA_HW_2_CPU_EVENT_ERROR =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 1),
+ IPA_HW_2_CPU_EVENT_LOG_INFO =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 2),
+};
+
+/**
+ * enum ipa3_hw_errors - Common error types.
+ * @IPA_HW_ERROR_NONE : No error persists
+ * @IPA_HW_INVALID_DOORBELL_ERROR : Invalid data read from doorbell
+ * @IPA_HW_DMA_ERROR : Unexpected DMA error
+ * @IPA_HW_FATAL_SYSTEM_ERROR : HW has crashed and requires reset.
+ * @IPA_HW_INVALID_OPCODE : Invalid opcode sent
+ * @IPA_HW_ZIP_ENGINE_ERROR : ZIP engine error
+ */
+enum ipa3_hw_errors {
+ IPA_HW_ERROR_NONE =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 0),
+ IPA_HW_INVALID_DOORBELL_ERROR =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 1),
+ IPA_HW_DMA_ERROR =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 2),
+ IPA_HW_FATAL_SYSTEM_ERROR =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 3),
+ IPA_HW_INVALID_OPCODE =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 4),
+ IPA_HW_ZIP_ENGINE_ERROR =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 5),
+ IPA_HW_CONS_DISABLE_CMD_GSI_STOP_FAILURE =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 6),
+ IPA_HW_PROD_DISABLE_CMD_GSI_STOP_FAILURE =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 7)
+};
+
+/**
* struct IpaHwSharedMemCommonMapping_t - Structure referring to the common
* section in 128B shared memory located in offset zero of SW Partition in IPA
* SRAM.
@@ -1034,6 +1197,20 @@ union IpaHwFeatureInfoData_t {
} __packed;
/**
+ * union IpaHwErrorEventData_t - HW->CPU Common Events
+ * @errorType : Entered when a system error is detected by the HW. Type of
+ * error is specified by IPA_HW_ERRORS
+ * @reserved : Reserved
+ */
+union IpaHwErrorEventData_t {
+ struct IpaHwErrorEventParams_t {
+ u32 errorType:8;
+ u32 reserved:24;
+ } __packed params;
+ u32 raw32b;
+} __packed;
+
+/**
* struct IpaHwEventInfoData_t - Structure holding the parameters for
* statistics and config info
*
@@ -1184,6 +1361,7 @@ union IpaHwMhiDlUlSyncCmdData_t {
* @uc_status: The last status provided by the uC
* @uc_zip_error: uC has notified the APPS upon a ZIP engine error
* @uc_error_type: error type from uC error event
+ * @uc_error_timestamp: tag timer sampled after uC crashed
*/
struct ipa3_uc_ctx {
bool uc_inited;
@@ -1199,6 +1377,7 @@ struct ipa3_uc_ctx {
u32 uc_status;
bool uc_zip_error;
u32 uc_error_type;
+ u32 uc_error_timestamp;
};
/**
@@ -1239,27 +1418,11 @@ struct ipa3cm_client_info {
enum ipacm_client_enum client_enum;
bool uplink;
};
-/**
- * struct ipa3_hash_tuple - Hash tuple members for flt and rt
- * the fields tells if to be masked or not
- * @src_id: pipe number for flt, table index for rt
- * @src_ip_addr: IP source address
- * @dst_ip_addr: IP destination address
- * @src_port: L4 source port
- * @dst_port: L4 destination port
- * @protocol: IP protocol field
- * @meta_data: packet meta-data
- *
- */
-struct ipa3_hash_tuple {
- /* src_id: pipe in flt, tbl index in rt */
- bool src_id;
- bool src_ip_addr;
- bool dst_ip_addr;
- bool src_port;
- bool dst_port;
- bool protocol;
- bool meta_data;
+
+struct ipa3_smp2p_info {
+ u32 out_base_id;
+ u32 in_base_id;
+ bool res_sent;
};
/**
@@ -1342,10 +1505,13 @@ struct ipa3_ready_cb_info {
* @use_ipa_teth_bridge: use tethering bridge driver
* @ipa_bam_remote_mode: ipa bam is in remote mode
* @modem_cfg_emb_pipe_flt: modem configure embedded pipe filtering rules
+ * @logbuf: ipc log buffer for high priority messages
+ * @logbuf_low: ipc log buffer for low priority messages
* @ipa_bus_hdl: msm driver handle for the data path bus
* @ctrl: holds the core specific operations based on
* core version (vtable like)
* @enable_clock_scaling: clock scaling is enabled ?
+ * @enable_low_prio_print: enable low priority prints
* @curr_ipa_clk_rate: ipa3_clk current rate
* @wcstats: wlan common buffer stats
* @uc_ctx: uC interface context
@@ -1417,6 +1583,7 @@ struct ipa3_context {
struct gen_pool *pipe_mem_pool;
struct dma_pool *dma_pool;
struct ipa3_active_clients ipa3_active_clients;
+ struct ipa3_active_clients_log_ctx ipa3_active_clients_logging;
struct workqueue_struct *power_mgmt_wq;
struct workqueue_struct *transport_power_mgmt_wq;
bool tag_process_before_gating;
@@ -1439,6 +1606,8 @@ struct ipa3_context {
/* featurize if memory footprint becomes a concern */
struct ipa3_stats stats;
void *smem_pipe_mem;
+ void *logbuf;
+ void *logbuf_low;
u32 ipa_bus_hdl;
struct ipa3_controller *ctrl;
struct idr ipa_idr;
@@ -1446,6 +1615,7 @@ struct ipa3_context {
struct device *uc_pdev;
spinlock_t idr_lock;
u32 enable_clock_scaling;
+ u32 enable_low_prio_print;
u32 curr_ipa_clk_rate;
bool q6_proxy_clk_vote_valid;
u32 ipa_num_pipes;
@@ -1478,27 +1648,7 @@ struct ipa3_context {
bool ipa_initialization_complete;
struct list_head ipa_ready_cb_list;
struct completion init_completion_obj;
-};
-
-/**
- * struct ipa3_route - IPA route
- * @route_dis: route disable
- * @route_def_pipe: route default pipe
- * @route_def_hdr_table: route default header table
- * @route_def_hdr_ofst: route default header offset table
- * @route_frag_def_pipe: Default pipe to route fragmented exception
- * packets and frag new rule statues, if source pipe does not have
- * a notification status pipe defined.
- * @route_def_retain_hdr: default value of retain header. It is used
- * when no rule was hit
- */
-struct ipa3_route {
- u32 route_dis;
- u32 route_def_pipe;
- u32 route_def_hdr_table;
- u32 route_def_hdr_ofst;
- u8 route_frag_def_pipe;
- u32 route_def_retain_hdr;
+ struct ipa3_smp2p_info smp2p_info;
};
/**
@@ -1624,43 +1774,15 @@ struct ipa3_controller {
int (*ipa_init_rt6)(void);
int (*ipa_init_flt4)(void);
int (*ipa_init_flt6)(void);
- void (*ipa3_cfg_ep_hdr)(u32 pipe_number,
- const struct ipa_ep_cfg_hdr *ipa_ep_hdr_cfg);
- int (*ipa3_cfg_ep_hdr_ext)(u32 pipe_number,
- const struct ipa_ep_cfg_hdr_ext *ipa_ep_hdr_ext_cfg);
- void (*ipa3_cfg_ep_aggr)(u32 pipe_number,
- const struct ipa_ep_cfg_aggr *ipa_ep_agrr_cfg);
- int (*ipa3_cfg_ep_deaggr)(u32 pipe_index,
- const struct ipa_ep_cfg_deaggr *ep_deaggr);
- void (*ipa3_cfg_ep_nat)(u32 pipe_number,
- const struct ipa_ep_cfg_nat *ipa_ep_nat_cfg);
- void (*ipa3_cfg_ep_mode)(u32 pipe_number, u32 dst_pipe_number,
- const struct ipa_ep_cfg_mode *ep_mode);
- void (*ipa3_cfg_ep_route)(u32 pipe_index, u32 rt_tbl_index);
- void (*ipa3_cfg_ep_holb)(u32 pipe_index,
- const struct ipa_ep_cfg_holb *ep_holb);
- void (*ipa3_cfg_route)(struct ipa3_route *route);
- int (*ipa3_read_gen_reg)(char *buff, int max_len);
int (*ipa3_read_ep_reg)(char *buff, int max_len, int pipe);
- void (*ipa3_write_dbg_cnt)(int option);
- int (*ipa3_read_dbg_cnt)(char *buf, int max_len);
- void (*ipa3_cfg_ep_status)(u32 clnt_hdl,
- const struct ipa3_ep_cfg_status *ep_status);
int (*ipa3_commit_flt)(enum ipa_ip_type ip);
int (*ipa3_commit_rt)(enum ipa_ip_type ip);
int (*ipa_generate_rt_hw_rule)(enum ipa_ip_type ip,
struct ipa3_rt_entry *entry, u8 *buf);
int (*ipa3_commit_hdr)(void);
- void (*ipa3_cfg_ep_cfg)(u32 clnt_hdl,
- const struct ipa_ep_cfg_cfg *cfg);
- void (*ipa3_cfg_ep_metadata_mask)(u32 clnt_hdl,
- const struct ipa_ep_cfg_metadata_mask *metadata_mask);
void (*ipa3_enable_clks)(void);
void (*ipa3_disable_clks)(void);
struct msm_bus_scale_pdata *msm_bus_data_ptr;
-
- void (*ipa3_cfg_ep_metadata)(u32 pipe_number,
- const struct ipa_ep_cfg_metadata *);
};
extern struct ipa3_context *ipa3_ctx;
@@ -1697,36 +1819,9 @@ int ipa3_xdci_connect(u32 clnt_hdl, u8 xferrscidx, bool xferrscidx_valid);
int ipa3_xdci_disconnect(u32 clnt_hdl, bool should_force_clear, u32 qmi_req_id);
int ipa3_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
- bool should_force_clear, u32 qmi_req_id);
-
-int ipa3_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl);
-
-/*
- * USB
- */
-int ipa3_usb_init(void);
-
-int ipa3_usb_init_teth_prot(enum ipa_usb_teth_prot teth_prot,
- struct ipa_usb_teth_params *teth_params,
- int (*ipa_usb_notify_cb)(enum ipa_usb_notify_event,
- void *),
- void *user_data);
-
-int ipa3_usb_xdci_connect(struct ipa_usb_xdci_chan_params *ul_chan_params,
- struct ipa_usb_xdci_chan_params *dl_chan_params,
- struct ipa_req_chan_out_params *ul_out_params,
- struct ipa_req_chan_out_params *dl_out_params,
- struct ipa_usb_xdci_connect_params *connect_params);
-
-int ipa3_usb_xdci_disconnect(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
- enum ipa_usb_teth_prot teth_prot);
-
-int ipa3_usb_deinit_teth_prot(enum ipa_usb_teth_prot teth_prot);
+ bool should_force_clear, u32 qmi_req_id, bool is_dpl);
-int ipa3_usb_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
- enum ipa_usb_teth_prot teth_prot);
-
-int ipa3_usb_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl);
+int ipa3_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl, bool is_dpl);
/*
* Resume / Suspend
@@ -1922,6 +2017,10 @@ int ipa3_uc_wdi_get_dbpa(struct ipa_wdi_db_params *out);
* if uC not ready only, register callback
*/
int ipa3_uc_reg_rdyCB(struct ipa_wdi_uc_ready_params *param);
+/*
+ * To de-register uC ready callback
+ */
+int ipa3_uc_dereg_rdyCB(void);
/*
* Resource manager
@@ -2110,7 +2209,7 @@ void ipa3_dump_buff_internal(void *base, dma_addr_t phy_base, u32 size);
#endif
int ipa3_controller_static_bind(struct ipa3_controller *controller,
enum ipa_hw_type ipa_hw_type);
-int ipa3_cfg_route(struct ipa3_route *route);
+int ipa3_cfg_route(struct ipahal_reg_route *route);
int ipa3_send_cmd(u16 num_desc, struct ipa3_desc *descr);
int ipa3_cfg_filter(u32 disable);
int ipa3_pipe_mem_init(u32 start_ofst, u32 size);
@@ -2120,18 +2219,23 @@ int ipa3_straddle_boundary(u32 start, u32 end, u32 boundary);
struct ipa3_context *ipa3_get_ctx(void);
void ipa3_enable_clks(void);
void ipa3_disable_clks(void);
-void ipa3_inc_client_enable_clks(void);
-int ipa3_inc_client_enable_clks_no_block(void);
-void ipa3_dec_client_disable_clks(void);
+void ipa3_inc_client_enable_clks(struct ipa3_active_client_logging_info *id);
+int ipa3_inc_client_enable_clks_no_block(struct ipa3_active_client_logging_info
+ *id);
+void ipa3_dec_client_disable_clks(struct ipa3_active_client_logging_info *id);
+void ipa3_active_clients_log_dec(struct ipa3_active_client_logging_info *id,
+ bool int_ctx);
+void ipa3_active_clients_log_inc(struct ipa3_active_client_logging_info *id,
+ bool int_ctx);
+int ipa3_active_clients_log_print_buffer(char *buf, int size);
+int ipa3_active_clients_log_print_table(char *buf, int size);
+void ipa3_active_clients_log_clear(void);
int ipa3_interrupts_init(u32 ipa_irq, u32 ee, struct device *ipa_dev);
int __ipa3_del_rt_rule(u32 rule_hdl);
int __ipa3_del_hdr(u32 hdr_hdl);
int __ipa3_release_hdr(u32 hdr_hdl);
int __ipa3_release_hdr_proc_ctx(u32 proc_ctx_hdl);
-int _ipa_read_gen_reg_v3_0(char *buff, int max_len);
int _ipa_read_ep_reg_v3_0(char *buf, int max_len, int pipe);
-void _ipa_write_dbg_cnt_v3_0(int option);
-int _ipa_read_dbg_cnt_v3_0(char *buf, int max_len);
void _ipa_enable_clks_v3_0(void);
void _ipa_disable_clks_v3_0(void);
struct device *ipa3_get_dma_dev(void);
@@ -2141,22 +2245,6 @@ void ipa3_suspend_handler(enum ipa_irq_type interrupt,
void *interrupt_data);
-static inline u32 ipa_read_reg(void *base, u32 offset)
-{
- return ioread32(base + offset);
-}
-
-static inline u32 ipa_read_reg_field(void *base, u32 offset,
- u32 mask, u32 shift)
-{
- return (ipa_read_reg(base, offset) & mask) >> shift;
-}
-
-static inline void ipa_write_reg(void *base, u32 offset, u32 val)
-{
- iowrite32(val, base + offset);
-}
-
int ipa_bridge_init(void);
void ipa_bridge_cleanup(void);
@@ -2204,9 +2292,7 @@ int ipa3_set_required_perf_profile(enum ipa_voltage_level floor_voltage,
u32 bandwidth_mbps);
int ipa3_cfg_ep_status(u32 clnt_hdl,
- const struct ipa3_ep_cfg_status *ipa_ep_cfg);
-int ipa3_cfg_aggr_cntr_granularity(u8 aggr_granularity);
-int ipa3_cfg_eot_coal_cntr_granularity(u8 eot_coal_granularity);
+ const struct ipahal_reg_ep_cfg_status *ipa_ep_cfg);
int ipa3_suspend_resource_no_block(enum ipa_rm_resource_name name);
int ipa3_suspend_resource_sync(enum ipa_rm_resource_name name);
@@ -2239,7 +2325,6 @@ int ipa3_uc_loaded_check(void);
void ipa3_uc_load_notify(void);
int ipa3_uc_send_cmd(u32 cmd, u32 opcode, u32 expected_status,
bool polling_mode, unsigned long timeout_jiffies);
-void ipa3_register_panic_hdlr(void);
void ipa3_uc_register_handlers(enum ipa3_hw_features feature,
struct ipa3_uc_hdlrs *hdlrs);
int ipa3_create_nat_device(void);
@@ -2263,9 +2348,9 @@ int ipa3_uc_mhi_resume_channel(int channelHandle, bool LPTransitionRejected);
int ipa3_uc_mhi_stop_event_update_channel(int channelHandle);
int ipa3_uc_mhi_print_stats(char *dbg_buff, int size);
int ipa3_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len);
-void ipa3_tag_free_buf(void *user1, int user2);
+void ipa3_tag_destroy_imm(void *user1, int user2);
struct ipa_gsi_ep_config *ipa3_get_gsi_ep_info(int ipa_ep_idx);
-void ipa3_uc_rg10_write_reg(void *base, u32 offset, u32 val);
+void ipa3_uc_rg10_write_reg(enum ipahal_reg_name reg, u32 n, u32 val);
u32 ipa3_get_num_pipes(void);
struct ipa_smmu_cb_ctx *ipa3_get_wlan_smmu_ctx(void);
@@ -2279,8 +2364,8 @@ int ipa3_rm_add_dependency_sync(enum ipa_rm_resource_name resource_name,
enum ipa_rm_resource_name depends_on_name);
int ipa3_release_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info);
int ipa3_create_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info);
-int ipa3_set_flt_tuple_mask(int pipe_idx, struct ipa3_hash_tuple *tuple);
-int ipa3_set_rt_tuple_mask(int tbl_idx, struct ipa3_hash_tuple *tuple);
+int ipa3_set_flt_tuple_mask(int pipe_idx, struct ipahal_reg_hash_tuple *tuple);
+int ipa3_set_rt_tuple_mask(int tbl_idx, struct ipahal_reg_hash_tuple *tuple);
void ipa3_set_resorce_groups_min_max_limits(void);
void ipa3_suspend_apps_pipes(bool suspend);
void ipa3_flow_control(enum ipa_client_type ipa_client, bool enable,
@@ -2298,10 +2383,14 @@ int ipa3_rt_read_tbl_from_hw(u32 tbl_idx,
struct ipa3_debugfs_rt_entry entry[],
int *num_entry);
int ipa3_calc_extra_wrd_bytes(const struct ipa_ipfltri_rule_eq *attrib);
+const char *ipa3_rm_resource_str(enum ipa_rm_resource_name resource_name);
int ipa3_restore_suspend_handler(void);
int ipa3_inject_dma_task_for_gsi(void);
+int ipa3_uc_panic_notifier(struct notifier_block *this,
+ unsigned long event, void *ptr);
void ipa3_inc_acquire_wakelock(void);
void ipa3_dec_release_wakelock(void);
int ipa3_load_fws(const struct firmware *firmware);
int ipa3_register_ipa_ready_cb(void (*ipa_ready_cb)(void *), void *user_data);
+const char *ipa_hw_error_str(enum ipa3_hw_errors err_type);
#endif /* _IPA3_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_interrupts.c b/drivers/platform/msm/ipa/ipa_v3/ipa_interrupts.c
index 50ab8f8e76d5..45d3b13049bc 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_interrupts.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_interrupts.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -111,20 +111,16 @@ static int ipa3_handle_interrupt(int irq_num, bool isr_context)
switch (interrupt_info.interrupt) {
case IPA_TX_SUSPEND_IRQ:
- IPADBG("processing TX_SUSPEND interrupt work-around\n");
+ IPADBG_LOW("processing TX_SUSPEND interrupt work-around\n");
ipa3_tx_suspend_interrupt_wa();
- if (ipa3_ctx->ipa_hw_type == IPA_HW_v3_0) {
- suspend_data = ipa_read_reg(ipa3_ctx->mmio,
- IPA_IRQ_SUSPEND_INFO_EE_n_ADDR_v3_0(ipa_ee));
- IPADBG("get interrupt %d\n", suspend_data);
- } else {
- suspend_data = ipa_read_reg(ipa3_ctx->mmio,
- IPA_IRQ_SUSPEND_INFO_EE_n_ADDR_v3_1(ipa_ee));
- IPADBG("get interrupt %d\n", suspend_data);
+ suspend_data = ipahal_read_reg_n(IPA_IRQ_SUSPEND_INFO_EE_n,
+ ipa_ee);
+ IPADBG_LOW("get interrupt %d\n", suspend_data);
+
+ if (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_1) {
/* Clearing L2 interrupts status */
- ipa_write_reg(ipa3_ctx->mmio,
- IPA_SUSPEND_IRQ_CLR_EE_n_ADDR(ipa_ee),
- suspend_data);
+ ipahal_write_reg_n(IPA_SUSPEND_IRQ_CLR_EE_n,
+ ipa_ee, suspend_data);
}
if (!ipa3_is_valid_ep(suspend_data))
return 0;
@@ -138,6 +134,20 @@ static int ipa3_handle_interrupt(int irq_num, bool isr_context)
suspend_interrupt_data->endpoints = suspend_data;
interrupt_data = suspend_interrupt_data;
break;
+ case IPA_UC_IRQ_0:
+ if (ipa3_ctx->apply_rg10_wa) {
+ /*
+ * Early detect of uC crash. If RG10 workaround is
+ * enable uC crash will not be detected as before
+ * processing uC event the interrupt is cleared using
+ * uC register write which times out as it crashed
+ * already.
+ */
+ if (ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp ==
+ IPA_HW_2_CPU_EVENT_ERROR)
+ ipa3_ctx->uc_ctx.uc_failed = true;
+ }
+ break;
default:
break;
}
@@ -179,26 +189,25 @@ static void ipa3_enable_tx_suspend_wa(struct work_struct *work)
u32 suspend_bmask;
int irq_num;
- IPADBG("Enter\n");
+ IPADBG_LOW("Enter\n");
irq_num = ipa3_irq_mapping[IPA_TX_SUSPEND_IRQ];
BUG_ON(irq_num == -1);
/* make sure ipa hw is clocked on*/
- ipa3_inc_client_enable_clks();
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
- en = ipa_read_reg(ipa3_ctx->mmio, IPA_IRQ_EN_EE_n_ADDR(ipa_ee));
+ en = ipahal_read_reg_n(IPA_IRQ_EN_EE_n, ipa_ee);
suspend_bmask = 1 << irq_num;
/*enable TX_SUSPEND_IRQ*/
en |= suspend_bmask;
IPADBG("enable TX_SUSPEND_IRQ, IPA_IRQ_EN_EE reg, write val = %u\n"
, en);
- ipa3_uc_rg10_write_reg(ipa3_ctx->mmio,
- IPA_IRQ_EN_EE_n_ADDR(ipa_ee), en);
+ ipa3_uc_rg10_write_reg(IPA_IRQ_EN_EE_n, ipa_ee, en);
ipa3_process_interrupts(false);
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
- IPADBG("Exit\n");
+ IPADBG_LOW("Exit\n");
}
static void ipa3_tx_suspend_interrupt_wa(void)
@@ -207,24 +216,23 @@ static void ipa3_tx_suspend_interrupt_wa(void)
u32 suspend_bmask;
int irq_num;
- IPADBG("Enter\n");
+ IPADBG_LOW("Enter\n");
irq_num = ipa3_irq_mapping[IPA_TX_SUSPEND_IRQ];
BUG_ON(irq_num == -1);
/*disable TX_SUSPEND_IRQ*/
- val = ipa_read_reg(ipa3_ctx->mmio, IPA_IRQ_EN_EE_n_ADDR(ipa_ee));
+ val = ipahal_read_reg_n(IPA_IRQ_EN_EE_n, ipa_ee);
suspend_bmask = 1 << irq_num;
val &= ~suspend_bmask;
IPADBG("Disabling TX_SUSPEND_IRQ, write val: %u to IPA_IRQ_EN_EE reg\n",
val);
- ipa3_uc_rg10_write_reg(ipa3_ctx->mmio,
- IPA_IRQ_EN_EE_n_ADDR(ipa_ee), val);
+ ipa3_uc_rg10_write_reg(IPA_IRQ_EN_EE_n, ipa_ee, val);
- IPADBG(" processing suspend interrupt work-around, delayed work\n");
+ IPADBG_LOW(" processing suspend interrupt work-around, delayed work\n");
queue_delayed_work(ipa_interrupt_wq, &dwork_en_suspend_int,
msecs_to_jiffies(DIS_SUSPEND_INTERRUPT_TIMEOUT));
- IPADBG("Exit\n");
+ IPADBG_LOW("Exit\n");
}
static void ipa3_process_interrupts(bool isr_context)
@@ -235,11 +243,11 @@ static void ipa3_process_interrupts(bool isr_context)
u32 en;
unsigned long flags;
- IPADBG("Enter\n");
+ IPADBG_LOW("Enter\n");
spin_lock_irqsave(&suspend_wa_lock, flags);
- en = ipa_read_reg(ipa3_ctx->mmio, IPA_IRQ_EN_EE_n_ADDR(ipa_ee));
- reg = ipa_read_reg(ipa3_ctx->mmio, IPA_IRQ_STTS_EE_n_ADDR(ipa_ee));
+ en = ipahal_read_reg_n(IPA_IRQ_EN_EE_n, ipa_ee);
+ reg = ipahal_read_reg_n(IPA_IRQ_STTS_EE_n, ipa_ee);
while (en & reg) {
bmsk = 1;
for (i = 0; i < IPA_IRQ_NUM_MAX; i++) {
@@ -257,26 +265,31 @@ static void ipa3_process_interrupts(bool isr_context)
}
bmsk = bmsk << 1;
}
- ipa3_uc_rg10_write_reg(ipa3_ctx->mmio,
- IPA_IRQ_CLR_EE_n_ADDR(ipa_ee), reg);
- reg = ipa_read_reg(ipa3_ctx->mmio,
- IPA_IRQ_STTS_EE_n_ADDR(ipa_ee));
+ /*
+ * In case uC failed interrupt cannot be cleared.
+ * Device will crash as part of handling uC event handler.
+ */
+ if (ipa3_ctx->apply_rg10_wa && ipa3_ctx->uc_ctx.uc_failed)
+ break;
+
+ ipa3_uc_rg10_write_reg(IPA_IRQ_CLR_EE_n, ipa_ee, reg);
+ reg = ipahal_read_reg_n(IPA_IRQ_STTS_EE_n, ipa_ee);
/* since the suspend interrupt HW bug we must
* read again the EN register, otherwise the while is endless
*/
- en = ipa_read_reg(ipa3_ctx->mmio, IPA_IRQ_EN_EE_n_ADDR(ipa_ee));
+ en = ipahal_read_reg_n(IPA_IRQ_EN_EE_n, ipa_ee);
}
spin_unlock_irqrestore(&suspend_wa_lock, flags);
- IPADBG("Exit\n");
+ IPADBG_LOW("Exit\n");
}
static void ipa3_interrupt_defer(struct work_struct *work)
{
IPADBG("processing interrupts in wq\n");
- ipa3_inc_client_enable_clks();
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
ipa3_process_interrupts(false);
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
IPADBG("Done\n");
}
@@ -284,7 +297,7 @@ static irqreturn_t ipa3_isr(int irq, void *ctxt)
{
unsigned long flags;
- IPADBG("Enter\n");
+ IPADBG_LOW("Enter\n");
/* defer interrupt handling in case IPA is not clocked on */
if (ipa3_active_clients_trylock(&flags) == 0) {
IPADBG("defer interrupt processing\n");
@@ -299,7 +312,7 @@ static irqreturn_t ipa3_isr(int irq, void *ctxt)
}
ipa3_process_interrupts(true);
- IPADBG("Exit\n");
+ IPADBG_LOW("Exit\n");
bail:
ipa3_active_clients_trylock_unlock(&flags);
@@ -346,13 +359,12 @@ int ipa3_add_interrupt_handler(enum ipa_irq_type interrupt,
ipa_interrupt_to_cb[irq_num].private_data = private_data;
ipa_interrupt_to_cb[irq_num].interrupt = interrupt;
- val = ipa_read_reg(ipa3_ctx->mmio, IPA_IRQ_EN_EE_n_ADDR(ipa_ee));
- IPADBG("read IPA_IRQ_EN_EE_n_ADDR register. reg = %d\n", val);
+ val = ipahal_read_reg_n(IPA_IRQ_EN_EE_n, ipa_ee);
+ IPADBG("read IPA_IRQ_EN_EE_n register. reg = %d\n", val);
bmsk = 1 << irq_num;
val |= bmsk;
- ipa3_uc_rg10_write_reg(ipa3_ctx->mmio,
- IPA_IRQ_EN_EE_n_ADDR(ipa_ee), val);
- IPADBG("wrote IPA_IRQ_EN_EE_n_ADDR register. reg = %d\n", val);
+ ipa3_uc_rg10_write_reg(IPA_IRQ_EN_EE_n, ipa_ee, val);
+ IPADBG("wrote IPA_IRQ_EN_EE_n register. reg = %d\n", val);
/* register SUSPEND_IRQ_EN_EE_n_ADDR for L2 interrupt*/
if ((interrupt == IPA_TX_SUSPEND_IRQ) &&
@@ -370,9 +382,8 @@ int ipa3_add_interrupt_handler(enum ipa_irq_type interrupt,
val &= ~(1 << ep_idx);
}
- ipa_write_reg(ipa3_ctx->mmio,
- IPA_SUSPEND_IRQ_EN_EE_n_ADDR(ipa_ee), val);
- IPADBG("wrote IPA_SUSPEND_IRQ_EN_EE_n_ADDR reg = %d\n", val);
+ ipahal_write_reg_n(IPA_SUSPEND_IRQ_EN_EE_n, ipa_ee, val);
+ IPADBG("wrote IPA_SUSPEND_IRQ_EN_EE_n reg = %d\n", val);
}
return 0;
}
@@ -411,16 +422,14 @@ int ipa3_remove_interrupt_handler(enum ipa_irq_type interrupt)
/* clean SUSPEND_IRQ_EN_EE_n_ADDR for L2 interrupt */
if ((interrupt == IPA_TX_SUSPEND_IRQ) &&
(ipa3_ctx->ipa_hw_type == IPA_HW_v3_1)) {
- ipa_write_reg(ipa3_ctx->mmio,
- IPA_SUSPEND_IRQ_EN_EE_n_ADDR(ipa_ee), 0);
- IPADBG("wrote IPA_SUSPEND_IRQ_EN_EE_n_ADDR reg = %d\n", 0);
+ ipahal_write_reg_n(IPA_SUSPEND_IRQ_EN_EE_n, ipa_ee, 0);
+ IPADBG("wrote IPA_SUSPEND_IRQ_EN_EE_n reg = %d\n", 0);
}
- val = ipa_read_reg(ipa3_ctx->mmio, IPA_IRQ_EN_EE_n_ADDR(ipa_ee));
+ val = ipahal_read_reg_n(IPA_IRQ_EN_EE_n, ipa_ee);
bmsk = 1 << irq_num;
val &= ~bmsk;
- ipa3_uc_rg10_write_reg(ipa3_ctx->mmio,
- IPA_IRQ_EN_EE_n_ADDR(ipa_ee), val);
+ ipa3_uc_rg10_write_reg(IPA_IRQ_EN_EE_n, ipa_ee, val);
return 0;
}
@@ -439,7 +448,6 @@ int ipa3_remove_interrupt_handler(enum ipa_irq_type interrupt)
int ipa3_interrupts_init(u32 ipa_irq, u32 ee, struct device *ipa_dev)
{
int idx;
- u32 reg = 0xFFFFFFFF;
int res = 0;
ipa_ee = ee;
@@ -457,15 +465,6 @@ int ipa3_interrupts_init(u32 ipa_irq, u32 ee, struct device *ipa_dev)
return -ENOMEM;
}
- /* Clearing interrupts status */
- ipa3_uc_rg10_write_reg(ipa3_ctx->mmio,
- IPA_IRQ_CLR_EE_n_ADDR(ipa_ee), reg);
-
- /* Clearing L2 interrupts status */
- if (ipa3_ctx->ipa_hw_type == IPA_HW_v3_1)
- ipa_write_reg(ipa3_ctx->mmio,
- IPA_SUSPEND_IRQ_CLR_EE_n_ADDR(ipa_ee), reg);
-
res = request_irq(ipa_irq, (irq_handler_t) ipa3_isr,
IRQF_TRIGGER_RISING, "ipa", ipa_dev);
if (res) {
@@ -499,13 +498,11 @@ void ipa3_suspend_active_aggr_wa(u32 clnt_hdl)
struct ipa3_interrupt_work_wrap *work_data;
struct ipa_tx_suspend_irq_data *suspend_interrupt_data;
int irq_num;
- int aggr_active_bitmap = ipa_read_reg(ipa3_ctx->mmio,
- IPA_STATE_AGGR_ACTIVE_OFST);
+ int aggr_active_bitmap = ipahal_read_reg(IPA_STATE_AGGR_ACTIVE);
if (aggr_active_bitmap & (1 << clnt_hdl)) {
/* force close aggregation */
- ipa_write_reg(ipa3_ctx->mmio, IPA_AGGR_FORCE_CLOSE_OFST,
- (1 << clnt_hdl));
+ ipahal_write_reg(IPA_AGGR_FORCE_CLOSE, (1 << clnt_hdl));
/* simulate suspend IRQ */
irq_num = ipa3_irq_mapping[IPA_TX_SUSPEND_IRQ];
@@ -516,7 +513,7 @@ void ipa3_suspend_active_aggr_wa(u32 clnt_hdl)
}
suspend_interrupt_data = kzalloc(
sizeof(*suspend_interrupt_data),
- GFP_KERNEL);
+ GFP_ATOMIC);
if (!suspend_interrupt_data) {
IPAERR("failed allocating suspend_interrupt_data\n");
return;
@@ -524,7 +521,7 @@ void ipa3_suspend_active_aggr_wa(u32 clnt_hdl)
suspend_interrupt_data->endpoints = 1 << clnt_hdl;
work_data = kzalloc(sizeof(struct ipa3_interrupt_work_wrap),
- GFP_KERNEL);
+ GFP_ATOMIC);
if (!work_data) {
IPAERR("failed allocating ipa3_interrupt_work_wrap\n");
goto fail_alloc_work;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c b/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c
index 999e020f98df..32c5004dda95 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -523,7 +523,7 @@ ssize_t ipa3_read(struct file *filp, char __user *buf, size_t count,
list_del(&msg->link);
}
- IPADBG("msg=%p\n", msg);
+ IPADBG_LOW("msg=%p\n", msg);
if (msg) {
locked = 0;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c
index 1cf9c77e1d3e..5a3f2fcbf652 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015, 2016 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1606,7 +1606,7 @@ static int ipa_mhi_start_gsi_channel(struct ipa3_mhi_channel_ctx *channel,
ch_props.ring_len = channel->ch_ctx_host.rlen;
ch_props.ring_base_addr = IPA_MHI_HOST_ADDR_COND(
channel->ch_ctx_host.rbase);
- ch_props.use_db_eng = GSI_CHAN_DB_MODE;
+ ch_props.use_db_eng = GSI_CHAN_DIRECT_MODE;
ch_props.max_prefetch = GSI_ONE_PREFETCH_SEG;
ch_props.low_weight = 1;
ch_props.err_cb = ipa_mhi_gsi_ch_err_cb;
@@ -2008,7 +2008,7 @@ int ipa3_mhi_connect_pipe(struct ipa_mhi_connect_params *in, u32 *clnt_hdl)
IPA_MHI_DBG("channel_context_addr 0x%llx\n",
channel->channel_context_addr);
- ipa3_inc_client_enable_clks();
+ IPA_ACTIVE_CLIENTS_INC_EP(in->sys.client);
if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
res = ipa_mhi_start_gsi_channel(channel, ipa_ep_idx);
@@ -2065,7 +2065,7 @@ int ipa3_mhi_connect_pipe(struct ipa_mhi_connect_params *in, u32 *clnt_hdl)
ipa3_install_dflt_flt_rules(ipa_ep_idx);
if (!ep->keep_ipa_awake)
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client);
ipa3_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg;
IPA_MHI_DBG("client %d (ep: %d) connected\n", in->sys.client,
@@ -2080,7 +2080,7 @@ fail_ep_cfg:
fail_enable_dp:
ipa3_mhi_reset_channel(channel);
fail_start_channel:
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client);
fail_init_channel:
memset(ep, 0, offsetof(struct ipa3_ep_context, sys));
return -EPERM;
@@ -2137,8 +2137,7 @@ int ipa3_mhi_disconnect_pipe(u32 clnt_hdl)
ep = &ipa3_ctx->ep[clnt_hdl];
if (!ep->keep_ipa_awake)
- ipa3_inc_client_enable_clks();
-
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
res = ipa3_mhi_reset_channel(channel);
if (res) {
IPA_MHI_ERR("ipa3_mhi_reset_channel failed %d\n", res);
@@ -2155,8 +2154,7 @@ int ipa3_mhi_disconnect_pipe(u32 clnt_hdl)
ep->valid = 0;
ipa3_delete_dflt_flt_rules(clnt_hdl);
-
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
IPA_MHI_DBG("client (ep: %d) disconnected\n", clnt_hdl);
IPA_MHI_FUNC_EXIT();
@@ -2164,7 +2162,7 @@ int ipa3_mhi_disconnect_pipe(u32 clnt_hdl)
fail_reset_channel:
if (!ep->keep_ipa_awake)
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
return res;
}
@@ -2494,8 +2492,7 @@ static bool ipa3_mhi_has_open_aggr_frame(void)
int i;
int ipa_ep_idx;
- aggr_state_active = ipa_read_reg(ipa3_ctx->mmio,
- IPA_STATE_AGGR_ACTIVE_OFST);
+ aggr_state_active = ipahal_read_reg(IPA_STATE_AGGR_ACTIVE);
IPA_MHI_DBG("IPA_STATE_AGGR_ACTIVE_OFST 0x%x\n", aggr_state_active);
for (i = 0; i < IPA_MHI_MAX_DL_CHANNELS; i++) {
@@ -2608,7 +2605,8 @@ int ipa3_mhi_suspend(bool force)
* hold IPA clocks and release them after all
* IPA RM resource are released to make sure tag process will not start
*/
- ipa3_inc_client_enable_clks();
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
IPA_MHI_DBG("release prod\n");
res = ipa3_mhi_release_prod();
if (res) {
@@ -2661,7 +2659,7 @@ int ipa3_mhi_suspend(bool force)
goto fail_release_cons;
}
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
IPA_MHI_FUNC_EXIT();
return 0;
@@ -2671,7 +2669,7 @@ fail_suspend_dl_channel:
fail_release_cons:
ipa3_mhi_request_prod();
fail_release_prod:
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
fail_suspend_ul_channel:
ipa3_mhi_resume_ul_channels(true);
ipa3_mhi_set_state(IPA_MHI_STATE_STARTED);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c b/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
index 725e0b91da77..67e9b397a8b4 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -17,12 +17,11 @@
#include <linux/mm.h>
#include <linux/uaccess.h>
#include "ipa_i.h"
+#include "ipahal/ipahal.h"
#define IPA_NAT_PHYS_MEM_OFFSET 0
#define IPA_NAT_PHYS_MEM_SIZE IPA_RAM_NAT_SIZE
-#define IPA_NAT_SYSTEM_MEMORY 0
-#define IPA_NAT_SHARED_MEMORY 1
#define IPA_NAT_TEMP_MEM_SIZE 128
static int ipa3_nat_vma_fault_remap(
@@ -91,8 +90,8 @@ static int ipa3_nat_mmap(struct file *filp, struct vm_area_struct *vma)
}
phys_addr = ipa3_ctx->ipa_wrapper_base +
ipa3_ctx->ctrl->ipa_reg_base_ofst +
- IPA_SRAM_DIRECT_ACCESS_N_OFST_v3_0(
- IPA_NAT_PHYS_MEM_OFFSET);
+ ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n,
+ IPA_NAT_PHYS_MEM_OFFSET);
if (remap_pfn_range(
vma, vma->vm_start,
@@ -310,10 +309,10 @@ int ipa3_nat_init_cmd(struct ipa_ioc_v4_nat_init *init)
#define TBL_ENTRY_SIZE 32
#define INDX_TBL_ENTRY_SIZE 4
- struct ipa3_register_write *reg_write_nop;
+ struct ipahal_imm_cmd_pyld *nop_cmd_pyld = NULL;
struct ipa3_desc desc[2];
- struct ipa3_ip_v4_nat_init *cmd;
- u16 size = sizeof(struct ipa3_ip_v4_nat_init);
+ struct ipahal_imm_cmd_ip_v4_nat_init cmd;
+ struct ipahal_imm_cmd_pyld *cmd_pyld = NULL;
int result;
u32 offset = 0;
size_t tmp;
@@ -398,37 +397,28 @@ int ipa3_nat_init_cmd(struct ipa_ioc_v4_nat_init *init)
memset(&desc, 0, sizeof(desc));
/* NO-OP IC for ensuring that IPA pipeline is empty */
- reg_write_nop = kzalloc(sizeof(*reg_write_nop), GFP_KERNEL);
- if (!reg_write_nop) {
- IPAERR("no mem\n");
+ nop_cmd_pyld =
+ ipahal_construct_nop_imm_cmd(false, IPAHAL_HPS_CLEAR, false);
+ if (!nop_cmd_pyld) {
+ IPAERR("failed to construct NOP imm cmd\n");
result = -ENOMEM;
goto bail;
}
- reg_write_nop->skip_pipeline_clear = 0;
- reg_write_nop->pipeline_clear_options = IPA_HPS_CLEAR;
- reg_write_nop->value_mask = 0x0;
-
- desc[0].opcode = IPA_REGISTER_WRITE;
+ desc[0].opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_REGISTER_WRITE);
desc[0].type = IPA_IMM_CMD_DESC;
desc[0].callback = NULL;
desc[0].user1 = NULL;
desc[0].user2 = 0;
- desc[0].pyld = (void *)reg_write_nop;
- desc[0].len = sizeof(*reg_write_nop);
+ desc[0].pyld = nop_cmd_pyld->data;
+ desc[0].len = nop_cmd_pyld->len;
- cmd = kmalloc(size, GFP_KERNEL);
- if (!cmd) {
- IPAERR("Failed to alloc immediate command object\n");
- result = -ENOMEM;
- goto free_nop;
- }
if (ipa3_ctx->nat_mem.vaddr) {
IPADBG("using system memory for nat table\n");
- cmd->ipv4_rules_addr_type = IPA_NAT_SYSTEM_MEMORY;
- cmd->ipv4_expansion_rules_addr_type = IPA_NAT_SYSTEM_MEMORY;
- cmd->index_table_addr_type = IPA_NAT_SYSTEM_MEMORY;
- cmd->index_table_expansion_addr_type = IPA_NAT_SYSTEM_MEMORY;
+ cmd.ipv4_rules_addr_shared = false;
+ cmd.ipv4_expansion_rules_addr_shared = false;
+ cmd.index_table_addr_shared = false;
+ cmd.index_table_expansion_addr_shared = false;
offset = UINT_MAX - ipa3_ctx->nat_mem.dma_handle;
@@ -448,62 +438,70 @@ int ipa3_nat_init_cmd(struct ipa_ioc_v4_nat_init *init)
IPAERR("index_expn_offset: 0x%x\n",
init->index_expn_offset);
result = -EPERM;
- goto free_mem;
+ goto free_nop;
}
- cmd->ipv4_rules_addr =
+ cmd.ipv4_rules_addr =
ipa3_ctx->nat_mem.dma_handle + init->ipv4_rules_offset;
IPADBG("ipv4_rules_offset:0x%x\n", init->ipv4_rules_offset);
- cmd->ipv4_expansion_rules_addr =
+ cmd.ipv4_expansion_rules_addr =
ipa3_ctx->nat_mem.dma_handle + init->expn_rules_offset;
IPADBG("expn_rules_offset:0x%x\n", init->expn_rules_offset);
- cmd->index_table_addr =
+ cmd.index_table_addr =
ipa3_ctx->nat_mem.dma_handle + init->index_offset;
IPADBG("index_offset:0x%x\n", init->index_offset);
- cmd->index_table_expansion_addr =
+ cmd.index_table_expansion_addr =
ipa3_ctx->nat_mem.dma_handle + init->index_expn_offset;
IPADBG("index_expn_offset:0x%x\n", init->index_expn_offset);
} else {
IPADBG("using shared(local) memory for nat table\n");
- cmd->ipv4_rules_addr_type = IPA_NAT_SHARED_MEMORY;
- cmd->ipv4_expansion_rules_addr_type = IPA_NAT_SHARED_MEMORY;
- cmd->index_table_addr_type = IPA_NAT_SHARED_MEMORY;
- cmd->index_table_expansion_addr_type = IPA_NAT_SHARED_MEMORY;
+ cmd.ipv4_rules_addr_shared = true;
+ cmd.ipv4_expansion_rules_addr_shared = true;
+ cmd.index_table_addr_shared = true;
+ cmd.index_table_expansion_addr_shared = true;
- cmd->ipv4_rules_addr = init->ipv4_rules_offset +
+ cmd.ipv4_rules_addr = init->ipv4_rules_offset +
IPA_RAM_NAT_OFST;
- cmd->ipv4_expansion_rules_addr = init->expn_rules_offset +
+ cmd.ipv4_expansion_rules_addr = init->expn_rules_offset +
IPA_RAM_NAT_OFST;
- cmd->index_table_addr = init->index_offset +
+ cmd.index_table_addr = init->index_offset +
IPA_RAM_NAT_OFST;
- cmd->index_table_expansion_addr = init->index_expn_offset +
+ cmd.index_table_expansion_addr = init->index_expn_offset +
IPA_RAM_NAT_OFST;
}
- cmd->table_index = init->tbl_index;
- IPADBG("Table index:0x%x\n", cmd->table_index);
- cmd->size_base_tables = init->table_entries;
- IPADBG("Base Table size:0x%x\n", cmd->size_base_tables);
- cmd->size_expansion_tables = init->expn_table_entries;
- IPADBG("Expansion Table size:0x%x\n", cmd->size_expansion_tables);
- cmd->public_ip_addr = init->ip_addr;
- IPADBG("Public ip address:0x%x\n", cmd->public_ip_addr);
- desc[1].opcode = IPA_IP_V4_NAT_INIT;
+ cmd.table_index = init->tbl_index;
+ IPADBG("Table index:0x%x\n", cmd.table_index);
+ cmd.size_base_tables = init->table_entries;
+ IPADBG("Base Table size:0x%x\n", cmd.size_base_tables);
+ cmd.size_expansion_tables = init->expn_table_entries;
+ IPADBG("Expansion Table size:0x%x\n", cmd.size_expansion_tables);
+ cmd.public_ip_addr = init->ip_addr;
+ IPADBG("Public ip address:0x%x\n", cmd.public_ip_addr);
+ cmd_pyld = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_IP_V4_NAT_INIT, &cmd, false);
+ if (!cmd_pyld) {
+ IPAERR("Fail to construct ip_v4_nat_init imm cmd\n");
+ result = -EPERM;
+ goto free_nop;
+ }
+
+ desc[1].opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_V4_NAT_INIT);
desc[1].type = IPA_IMM_CMD_DESC;
desc[1].callback = NULL;
desc[1].user1 = NULL;
desc[1].user2 = 0;
- desc[1].pyld = (void *)cmd;
- desc[1].len = size;
+ desc[1].pyld = cmd_pyld->data;
+ desc[1].len = cmd_pyld->len;
IPADBG("posting v4 init command\n");
if (ipa3_send_cmd(2, desc)) {
IPAERR("Fail to send immediate command\n");
result = -EPERM;
- goto free_mem;
+ goto destroy_imm_cmd;
}
ipa3_ctx->nat_mem.public_ip_addr = init->ip_addr;
@@ -538,10 +536,10 @@ int ipa3_nat_init_cmd(struct ipa_ioc_v4_nat_init *init)
IPADBG("return\n");
result = 0;
-free_mem:
- kfree(cmd);
+destroy_imm_cmd:
+ ipahal_destroy_imm_cmd(cmd_pyld);
free_nop:
- kfree(reg_write_nop);
+ ipahal_destroy_imm_cmd(nop_cmd_pyld);
bail:
return result;
}
@@ -558,8 +556,9 @@ int ipa3_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
{
#define NUM_OF_DESC 2
- struct ipa3_register_write *reg_write_nop = NULL;
- struct ipa3_nat_dma *cmd = NULL;
+ struct ipahal_imm_cmd_pyld *nop_cmd_pyld = NULL;
+ struct ipahal_imm_cmd_nat_dma cmd;
+ struct ipahal_imm_cmd_pyld *cmd_pyld = NULL;
struct ipa3_desc *desc = NULL;
u16 size = 0, cnt = 0;
int ret = 0;
@@ -580,62 +579,53 @@ int ipa3_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
goto bail;
}
- size = sizeof(struct ipa3_nat_dma);
- cmd = kzalloc(size, GFP_KERNEL);
- if (cmd == NULL) {
- IPAERR("Failed to alloc memory\n");
- ret = -ENOMEM;
- goto bail;
- }
-
/* NO-OP IC for ensuring that IPA pipeline is empty */
- reg_write_nop = kzalloc(sizeof(*reg_write_nop), GFP_KERNEL);
- if (!reg_write_nop) {
- IPAERR("Failed to alloc memory\n");
+ nop_cmd_pyld =
+ ipahal_construct_nop_imm_cmd(false, IPAHAL_HPS_CLEAR, false);
+ if (!nop_cmd_pyld) {
+ IPAERR("Failed to construct NOP imm cmd\n");
ret = -ENOMEM;
goto bail;
}
-
- reg_write_nop->skip_pipeline_clear = 0;
- reg_write_nop->pipeline_clear_options = IPA_HPS_CLEAR;
- reg_write_nop->value_mask = 0x0;
-
desc[0].type = IPA_IMM_CMD_DESC;
- desc[0].opcode = IPA_REGISTER_WRITE;
+ desc[0].opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_REGISTER_WRITE);
desc[0].callback = NULL;
desc[0].user1 = NULL;
desc[0].user2 = 0;
- desc[0].len = sizeof(*reg_write_nop);
- desc[0].pyld = (void *)reg_write_nop;
+ desc[0].pyld = nop_cmd_pyld->data;
+ desc[0].len = nop_cmd_pyld->len;
for (cnt = 0; cnt < dma->entries; cnt++) {
- cmd->table_index = dma->dma[cnt].table_index;
- cmd->base_addr = dma->dma[cnt].base_addr;
- cmd->offset = dma->dma[cnt].offset;
- cmd->data = dma->dma[cnt].data;
-
+ cmd.table_index = dma->dma[cnt].table_index;
+ cmd.base_addr = dma->dma[cnt].base_addr;
+ cmd.offset = dma->dma[cnt].offset;
+ cmd.data = dma->dma[cnt].data;
+ cmd_pyld = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_NAT_DMA, &cmd, false);
+ if (!cmd_pyld) {
+ IPAERR("Fail to construct nat_dma imm cmd\n");
+ continue;
+ }
desc[1].type = IPA_IMM_CMD_DESC;
- desc[1].opcode = IPA_NAT_DMA;
+ desc[1].opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_NAT_DMA);
desc[1].callback = NULL;
desc[1].user1 = NULL;
desc[1].user2 = 0;
- desc[1].len = sizeof(struct ipa3_nat_dma);
- desc[1].pyld = (void *)cmd;
+ desc[1].pyld = cmd_pyld->data;
+ desc[1].len = cmd_pyld->len;
ret = ipa3_send_cmd(NUM_OF_DESC, desc);
if (ret == -EPERM)
IPAERR("Fail to send immediate command %d\n", cnt);
+ ipahal_destroy_imm_cmd(cmd_pyld);
}
bail:
- if (cmd != NULL)
- kfree(cmd);
-
if (desc != NULL)
kfree(desc);
- if (reg_write_nop != NULL)
- kfree(reg_write_nop);
+ if (nop_cmd_pyld != NULL)
+ ipahal_destroy_imm_cmd(nop_cmd_pyld);
return ret;
}
@@ -677,18 +667,18 @@ void ipa3_nat_free_mem_and_device(struct ipa3_nat_mem *nat_ctx)
*/
int ipa3_nat_del_cmd(struct ipa_ioc_v4_nat_del *del)
{
- struct ipa3_register_write *reg_write_nop;
+ struct ipahal_imm_cmd_pyld *nop_cmd_pyld = NULL;
struct ipa3_desc desc[2];
- struct ipa3_ip_v4_nat_init *cmd;
- u16 size = sizeof(struct ipa3_ip_v4_nat_init);
- u8 mem_type = IPA_NAT_SHARED_MEMORY;
+ struct ipahal_imm_cmd_ip_v4_nat_init cmd;
+ struct ipahal_imm_cmd_pyld *cmd_pyld;
+ bool mem_type_shared = true;
u32 base_addr = IPA_NAT_PHYS_MEM_OFFSET;
int result;
IPADBG("\n");
if (ipa3_ctx->nat_mem.is_tmp_mem) {
IPAERR("using temp memory during nat del\n");
- mem_type = IPA_NAT_SYSTEM_MEMORY;
+ mem_type_shared = false;
base_addr = ipa3_ctx->nat_mem.tmp_dma_handle;
}
@@ -700,55 +690,52 @@ int ipa3_nat_del_cmd(struct ipa_ioc_v4_nat_del *del)
memset(&desc, 0, sizeof(desc));
/* NO-OP IC for ensuring that IPA pipeline is empty */
- reg_write_nop = kzalloc(sizeof(*reg_write_nop), GFP_KERNEL);
- if (!reg_write_nop) {
- IPAERR("no mem\n");
+ nop_cmd_pyld =
+ ipahal_construct_nop_imm_cmd(false, IPAHAL_HPS_CLEAR, false);
+ if (!nop_cmd_pyld) {
+ IPAERR("Failed to construct NOP imm cmd\n");
result = -ENOMEM;
goto bail;
}
-
- reg_write_nop->skip_pipeline_clear = 0;
- reg_write_nop->pipeline_clear_options = IPA_HPS_CLEAR;
- reg_write_nop->value_mask = 0x0;
-
- desc[0].opcode = IPA_REGISTER_WRITE;
+ desc[0].opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_REGISTER_WRITE);
desc[0].type = IPA_IMM_CMD_DESC;
desc[0].callback = NULL;
desc[0].user1 = NULL;
desc[0].user2 = 0;
- desc[0].pyld = (void *)reg_write_nop;
- desc[0].len = sizeof(*reg_write_nop);
-
- cmd = kmalloc(size, GFP_KERNEL);
- if (cmd == NULL) {
- IPAERR("Failed to alloc immediate command object\n");
- result = -ENOMEM;
- goto free_nop;
+ desc[0].pyld = nop_cmd_pyld->data;
+ desc[0].len = nop_cmd_pyld->len;
+
+ cmd.table_index = del->table_index;
+ cmd.ipv4_rules_addr = base_addr;
+ cmd.ipv4_rules_addr_shared = mem_type_shared;
+ cmd.ipv4_expansion_rules_addr = base_addr;
+ cmd.ipv4_expansion_rules_addr_shared = mem_type_shared;
+ cmd.index_table_addr = base_addr;
+ cmd.index_table_addr_shared = mem_type_shared;
+ cmd.index_table_expansion_addr = base_addr;
+ cmd.index_table_expansion_addr_shared = mem_type_shared;
+ cmd.size_base_tables = 0;
+ cmd.size_expansion_tables = 0;
+ cmd.public_ip_addr = 0;
+ cmd_pyld = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_IP_V4_NAT_INIT, &cmd, false);
+ if (!cmd_pyld) {
+ IPAERR("Fail to construct ip_v4_nat_init imm cmd\n");
+ result = -EPERM;
+ goto destroy_regwrt_imm_cmd;
}
- cmd->table_index = del->table_index;
- cmd->ipv4_rules_addr = base_addr;
- cmd->ipv4_rules_addr_type = mem_type;
- cmd->ipv4_expansion_rules_addr = base_addr;
- cmd->ipv4_expansion_rules_addr_type = mem_type;
- cmd->index_table_addr = base_addr;
- cmd->index_table_addr_type = mem_type;
- cmd->index_table_expansion_addr = base_addr;
- cmd->index_table_expansion_addr_type = mem_type;
- cmd->size_base_tables = 0;
- cmd->size_expansion_tables = 0;
- cmd->public_ip_addr = 0;
-
- desc[1].opcode = IPA_IP_V4_NAT_INIT;
+ desc[1].opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_V4_NAT_INIT);
desc[1].type = IPA_IMM_CMD_DESC;
desc[1].callback = NULL;
desc[1].user1 = NULL;
desc[1].user2 = 0;
- desc[1].pyld = (void *)cmd;
- desc[1].len = size;
+ desc[1].pyld = cmd_pyld->data;
+ desc[1].len = cmd_pyld->len;
+
if (ipa3_send_cmd(2, desc)) {
IPAERR("Fail to send immediate command\n");
result = -EPERM;
- goto free_mem;
+ goto destroy_imm_cmd;
}
ipa3_ctx->nat_mem.size_base_tables = 0;
@@ -762,10 +749,11 @@ int ipa3_nat_del_cmd(struct ipa_ioc_v4_nat_del *del)
ipa3_nat_free_mem_and_device(&ipa3_ctx->nat_mem);
IPADBG("return\n");
result = 0;
-free_mem:
- kfree(cmd);
-free_nop:
- kfree(reg_write_nop);
+
+destroy_imm_cmd:
+ ipahal_destroy_imm_cmd(cmd_pyld);
+destroy_regwrt_imm_cmd:
+ ipahal_destroy_imm_cmd(nop_cmd_pyld);
bail:
return result;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
index 5d46ed1415d4..73c0ce10afe9 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
@@ -590,11 +590,11 @@ int ipa3_qmi_filter_request_send(struct ipa_install_fltr_rule_req_msg_v01 *req)
int rc;
/* check if the filter rules from IPACM is valid */
- if (req->filter_spec_list_len == 0) {
+ if (req->filter_spec_ex_list_len == 0) {
IPAWANDBG("IPACM pass zero rules to Q6\n");
} else {
IPAWANDBG("IPACM pass %d rules to Q6\n",
- req->filter_spec_list_len);
+ req->filter_spec_ex_list_len);
}
/* cache the qmi_filter_request */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_reg.h b/drivers/platform/msm/ipa/ipa_v3/ipa_reg.h
deleted file mode 100644
index 4f06347ed50d..000000000000
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_reg.h
+++ /dev/null
@@ -1,318 +0,0 @@
-/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __IPA_REG_H__
-#define __IPA_REG_H__
-
-/*
- * IPA HW 3.1 Registers
- */
-#define IPA_SUSPEND_IRQ_EN_EE_n_ADDR(n) (0x00003034 + 0x1000 * (n))
-#define IPA_SUSPEND_IRQ_CLR_EE_n_ADDR(n) (0x00003038 + 0x1000 * (n))
-/*
- * End of IPA 3.1 Registers
- */
-
-/*
-Common Registers
-*/
-
-#define IPA_IRQ_STTS_EE_n_ADDR(n) (0x00003008 + 0x1000 * (n))
-#define IPA_IRQ_EN_EE_n_ADDR(n) (0x0000300c + 0x1000 * (n))
-#define IPA_IRQ_CLR_EE_n_ADDR(n) (0x00003010 + 0x1000 * (n))
-
-#define IPA_IRQ_SUSPEND_INFO_EE_n_ADDR_v3_0(n) (0x00003098 + 0x1000 * (n))
-#define IPA_IRQ_SUSPEND_INFO_EE_n_ADDR_v3_1(n) (0x00003030 + 0x1000 * (n))
-
-#define IPA_BCR_OFST 0x000001D0
-#define IPA_COUNTER_CFG_OFST 0x000001f0
-#define IPA_COUNTER_CFG_EOT_COAL_GRAN_BMSK 0xF
-#define IPA_COUNTER_CFG_EOT_COAL_GRAN_SHFT 0x0
-#define IPA_COUNTER_CFG_AGGR_GRAN_BMSK 0x1F0
-#define IPA_COUNTER_CFG_AGGR_GRAN_SHFT 0x4
-
-#define IPA_ENABLED_PIPES_OFST 0x00000038
-
-#define IPA_REG_BASE_OFST_v3_0 0x00040000
-#define IPA_COMP_SW_RESET_OFST 0x00000040
-
-#define IPA_VERSION_OFST 0x00000034
-#define IPA_COMP_HW_VERSION_OFST 0x00000030
-
-#define IPA_SPARE_REG_1_OFST (0x00005090)
-#define IPA_SPARE_REG_2_OFST (0x00005094)
-
-#define IPA_SHARED_MEM_SIZE_OFST_v3_0 0x00000054
-#define IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_BMSK_v3_0 0xffff0000
-#define IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_SHFT_v3_0 0x10
-#define IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_BMSK_v3_0 0xffff
-#define IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_SHFT_v3_0 0x0
-
-#define IPA_ENDP_INIT_ROUTE_N_OFST_v3_0(n) (0x00000828 + 0x70 * (n))
-#define IPA_ENDP_INIT_ROUTE_N_ROUTE_TABLE_INDEX_BMSK 0x1f
-#define IPA_ENDP_INIT_ROUTE_N_ROUTE_TABLE_INDEX_SHFT 0x0
-
-#define IPA_ROUTE_OFST_v3_0 0x00000048
-
-#define IPA_ROUTE_ROUTE_DIS_SHFT 0x0
-#define IPA_ROUTE_ROUTE_DIS_BMSK 0x1
-#define IPA_ROUTE_ROUTE_DEF_PIPE_SHFT 0x1
-#define IPA_ROUTE_ROUTE_DEF_PIPE_BMSK 0x3e
-#define IPA_ROUTE_ROUTE_DEF_HDR_TABLE_SHFT 0x6
-#define IPA_ROUTE_ROUTE_DEF_HDR_TABLE_BMSK 0X40
-#define IPA_ROUTE_ROUTE_DEF_HDR_OFST_SHFT 0x7
-#define IPA_ROUTE_ROUTE_DEF_HDR_OFST_BMSK 0x1ff80
-#define IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_BMSK 0x3e0000
-#define IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_SHFT 0x11
-#define IPA_ROUTE_ROUTE_DEF_RETAIN_HDR_BMSK 0x1000000
-#define IPA_ROUTE_ROUTE_DEF_RETAIN_HDR_SHFT 0x18
-
-#define IPA_SRAM_DIRECT_ACCESS_N_OFST_v3_0(n) (0x00007000 + 0x4 * (n))
-
-#define IPA_COMP_CFG_OFST 0x0000003C
-
-#define IPA_STATE_AGGR_ACTIVE_OFST 0x0000010C
-
-#define IPA_AGGR_FORCE_CLOSE_OFST 0x000001EC
-#define IPA_AGGR_FORCE_CLOSE_OFST_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK 0x3FFFFFFF
-#define IPA_AGGR_FORCE_CLOSE_OFST_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT 0
-
-#define IPA_ENDP_INIT_AGGR_N_OFST_v3_0(n) (0x00000824 + 0x70 * (n))
-#define IPA_ENDP_INIT_AGGR_N_AGGR_HARD_BYTE_LIMIT_ENABLE_BMSK 0x1000000
-#define IPA_ENDP_INIT_AGGR_N_AGGR_HARD_BYTE_LIMIT_ENABLE_SHFT 0x18
-#define IPA_ENDP_INIT_AGGR_N_AGGR_FORCE_CLOSE_BMSK 0x400000
-#define IPA_ENDP_INIT_AGGR_N_AGGR_FORCE_CLOSE_SHFT 0x16
-#define IPA_ENDP_INIT_AGGR_N_AGGR_SW_EOF_ACTIVE_BMSK 0x200000
-#define IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_SHFT 0x15
-#define IPA_ENDP_INIT_AGGR_N_AGGR_PKT_LIMIT_BMSK 0x1f8000
-#define IPA_ENDP_INIT_AGGR_N_AGGR_PKT_LIMIT_SHFT 0xf
-#define IPA_ENDP_INIT_AGGR_N_AGGR_TIME_LIMIT_BMSK 0x7c00
-#define IPA_ENDP_INIT_AGGR_N_AGGR_TIME_LIMIT_SHFT 0xa
-#define IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_BMSK 0x3e0
-#define IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_SHFT 0x5
-#define IPA_ENDP_INIT_AGGR_N_AGGR_TYPE_BMSK 0x1c
-#define IPA_ENDP_INIT_AGGR_N_AGGR_TYPE_SHFT 0x2
-#define IPA_ENDP_INIT_AGGR_N_AGGR_EN_BMSK 0x3
-#define IPA_ENDP_INIT_AGGR_N_AGGR_EN_SHFT 0x0
-
-#define IPA_ENDP_INIT_MODE_N_OFST_v3_0(n) (0x00000820 + 0x70 * (n))
-#define IPA_ENDP_INIT_MODE_N_DEST_PIPE_INDEX_BMSK_v3_0 0x1f0
-#define IPA_ENDP_INIT_MODE_N_DEST_PIPE_INDEX_SHFT_v3_0 0x4
-#define IPA_ENDP_INIT_MODE_N_MODE_BMSK 0x7
-#define IPA_ENDP_INIT_MODE_N_MODE_SHFT 0x0
-
-#define IPA_ENDP_INIT_HDR_N_OFST_v3_0(n) (0x00000810 + 0x70 * (n))
-#define IPA_ENDP_INIT_HDR_N_HDR_LEN_BMSK 0x3f
-#define IPA_ENDP_INIT_HDR_N_HDR_LEN_SHFT 0x0
-#define IPA_ENDP_INIT_HDR_N_HDR_ADDITIONAL_CONST_LEN_BMSK 0x7e000
-#define IPA_ENDP_INIT_HDR_N_HDR_ADDITIONAL_CONST_LEN_SHFT 0xd
-#define IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_BMSK 0x3f00000
-#define IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_SHFT 0x14
-#define IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_VALID_BMSK 0x80000
-#define IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_VALID_SHFT 0x13
-#define IPA_ENDP_INIT_HDR_N_HDR_METADATA_REG_VALID_BMSK_v2 0x10000000
-#define IPA_ENDP_INIT_HDR_N_HDR_METADATA_REG_VALID_SHFT_v2 0x1c
-#define IPA_ENDP_INIT_HDR_N_HDR_LEN_INC_DEAGG_HDR_BMSK_v2 0x8000000
-#define IPA_ENDP_INIT_HDR_N_HDR_LEN_INC_DEAGG_HDR_SHFT_v2 0x1b
-#define IPA_ENDP_INIT_HDR_N_HDR_A5_MUX_BMSK 0x4000000
-#define IPA_ENDP_INIT_HDR_N_HDR_A5_MUX_SHFT 0x1a
-#define IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_VALID_BMSK 0x40
-#define IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_VALID_SHFT 0x6
-#define IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_SHFT 0x7
-#define IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_BMSK 0x1f80
-
-#define IPA_ENDP_INIT_NAT_N_OFST_v3_0(n) (0x0000080C + 0x70 * (n))
-#define IPA_ENDP_INIT_NAT_N_NAT_EN_BMSK 0x3
-#define IPA_ENDP_INIT_NAT_N_NAT_EN_SHFT 0x0
-
-#define IPA_ENDP_INIT_HDR_EXT_n_OFST_v3_0(n) (0x00000814 + 0x70 * (n))
-#define IPA_ENDP_INIT_HDR_EXT_n_HDR_ENDIANNESS_BMSK 0x1
-#define IPA_ENDP_INIT_HDR_EXT_n_HDR_ENDIANNESS_SHFT 0x0
-#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_BMSK 0x2
-#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_SHFT 0x1
-#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_BMSK 0x4
-#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_SHFT 0x2
-#define IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_BMSK 0x8
-#define IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_SHFT 0x3
-#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_BMSK 0x3f0
-#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_SHFT 0x4
-#define IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_SHFT 0xa
-#define IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_BMSK_v3_0 0x3c00
-
-#define IPA_SINGLE_NDP_MODE_OFST 0x00000068
-#define IPA_QCNCM_OFST 0x00000064
-
-#define IPA_ENDP_INIT_CTRL_N_OFST(n) (0x00000800 + 0x70 * (n))
-#define IPA_ENDP_INIT_CTRL_N_ENDP_SUSPEND_BMSK 0x1
-#define IPA_ENDP_INIT_CTRL_N_ENDP_SUSPEND_SHFT 0x0
-#define IPA_ENDP_INIT_CTRL_N_ENDP_DELAY_BMSK 0x2
-#define IPA_ENDP_INIT_CTRL_N_ENDP_DELAY_SHFT 0x1
-
-#define IPA_ENDP_INIT_HOL_BLOCK_EN_N_OFST_v3_0(n) (0x0000082c + 0x70 * (n))
-#define IPA_ENDP_INIT_HOL_BLOCK_EN_N_RMSK 0x1
-#define IPA_ENDP_INIT_HOL_BLOCK_EN_N_MAX 19
-#define IPA_ENDP_INIT_HOL_BLOCK_EN_N_EN_BMSK 0x1
-#define IPA_ENDP_INIT_HOL_BLOCK_EN_N_EN_SHFT 0x0
-
-#define IPA_ENDP_INIT_DEAGGR_n_OFST_v3_0(n) (0x00000834 + 0x70 * (n))
-#define IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_BMSK 0x3F
-#define IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_SHFT 0x0
-#define IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_BMSK 0x80
-#define IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_SHFT 0x7
-#define IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_BMSK 0x3F00
-#define IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_SHFT 0x8
-#define IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_BMSK 0xFFFF0000
-#define IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_SHFT 0x10
-
-#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_OFST_v3_0(n) (0x00000830 + 0x70 * (n))
-#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_RMSK 0x1ff
-#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_MAX 19
-#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_TIMER_BMSK 0x1ff
-#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_TIMER_SHFT 0x0
-#define IPA_DEBUG_CNT_REG_N_OFST_v3_0(n) (0x00000600 + 0x4 * (n))
-#define IPA_DEBUG_CNT_REG_N_RMSK 0xffffffff
-#define IPA_DEBUG_CNT_REG_N_MAX 15
-#define IPA_DEBUG_CNT_REG_N_DBG_CNT_REG_BMSK 0xffffffff
-#define IPA_DEBUG_CNT_REG_N_DBG_CNT_REG_SHFT 0x0
-
-#define IPA_DEBUG_CNT_CTRL_N_OFST_v3_0(n) (0x00000640 + 0x4 * (n))
-#define IPA_DEBUG_CNT_CTRL_N_RMSK 0x1ff1f171
-#define IPA_DEBUG_CNT_CTRL_N_MAX 15
-#define IPA_DEBUG_CNT_CTRL_N_DBG_CNT_RULE_INDEX_BMSK 0x1ff00000
-#define IPA_DEBUG_CNT_CTRL_N_DBG_CNT_RULE_INDEX_SHFT 0x14
-#define IPA_DEBUG_CNT_CTRL_N_DBG_CNT_SOURCE_PIPE_BMSK 0x1f000
-#define IPA_DEBUG_CNT_CTRL_N_DBG_CNT_SOURCE_PIPE_SHFT 0xc
-#define IPA_DEBUG_CNT_CTRL_N_DBG_CNT_PRODUCT_BMSK 0x100
-#define IPA_DEBUG_CNT_CTRL_N_DBG_CNT_PRODUCT_SHFT 0x8
-#define IPA_DEBUG_CNT_CTRL_N_DBG_CNT_TYPE_BMSK 0x70
-#define IPA_DEBUG_CNT_CTRL_N_DBG_CNT_TYPE_SHFT 0x4
-#define IPA_DEBUG_CNT_CTRL_N_DBG_CNT_EN_BMSK 0x1
-#define IPA_DEBUG_CNT_CTRL_N_DBG_CNT_EN_SHFT 0x0
-
-#define IPA_ENDP_STATUS_n_OFST(n) (0x00000840 + 0x70 * (n))
-#define IPA_ENDP_STATUS_n_STATUS_LOCATION_BMSK 0x100
-#define IPA_ENDP_STATUS_n_STATUS_LOCATION_SHFT 0x8
-#define IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK 0x3e
-#define IPA_ENDP_STATUS_n_STATUS_ENDP_SHFT 0x1
-#define IPA_ENDP_STATUS_n_STATUS_EN_BMSK 0x1
-#define IPA_ENDP_STATUS_n_STATUS_EN_SHFT 0x0
-
-#define IPA_ENDP_INIT_CFG_n_OFST(n) (0x00000808 + 0x70 * (n))
-#define IPA_ENDP_INIT_CFG_n_RMSK 0x7f
-#define IPA_ENDP_INIT_CFG_n_MAXn 19
-#define IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_BMSK 0x78
-#define IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_SHFT 0x3
-#define IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_BMSK 0x6
-#define IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_SHFT 0x1
-#define IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_BMSK 0x1
-#define IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_SHFT 0x0
-
-#define IPA_ENDP_INIT_HDR_METADATA_MASK_n_OFST(n) (0x00000818 + 0x70 * (n))
-#define IPA_ENDP_INIT_HDR_METADATA_MASK_n_RMSK 0xffffffff
-#define IPA_ENDP_INIT_HDR_METADATA_MASK_n_MAXn 19
-#define IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_BMSK 0xffffffff
-#define IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_SHFT 0x0
-
-#define IPA_ENDP_INIT_HDR_METADATA_n_OFST(n) (0x0000081c + 0x70 * (n))
-#define IPA_ENDP_INIT_HDR_METADATA_n_MUX_ID_BMASK 0xFF0000
-#define IPA_ENDP_INIT_HDR_METADATA_n_MUX_ID_SHFT 0x10
-
-#define IPA_ENDP_INIT_RSRC_GRP_n(n) (0x00000838 + 0x70 * (n))
-#define IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_BMSK 0x7
-#define IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_SHFT 0
-#define IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n(n) (0x00000400 + 0x20 * (n))
-#define IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n(n) (0x00000404 + 0x20 * (n))
-#define IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n(n) (0x00000408 + 0x20 * (n))
-#define IPA_SRC_RSRC_GRP_67_RSRC_TYPE_n(n) (0x0000040C + 0x20 * (n))
-
-#define IPA_DST_RSRC_GRP_01_RSRC_TYPE_n(n) (0x00000500 + 0x20 * (n))
-#define IPA_DST_RSRC_GRP_23_RSRC_TYPE_n(n) (0x00000504 + 0x20 * (n))
-#define IPA_DST_RSRC_GRP_45_RSRC_TYPE_n(n) (0x00000508 + 0x20 * (n))
-#define IPA_DST_RSRC_GRP_67_RSRC_TYPE_n(n) (0x0000050c + 0x20 * (n))
-
-#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_BMSK 0xFF000000
-#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_SHFT 24
-#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_BMSK 0xFF0000
-#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_SHFT 16
-#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_BMSK 0xFF00
-#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_SHFT 8
-#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_BMSK 0xFF
-#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_SHFT 0
-
-#define IPA_RX_HPS_CLIENTS_MIN_DEPTH_0 0x000023C4
-#define IPA_RX_HPS_CLIENTS_MIN_DEPTH_1 0x000023C8
-#define IPA_RX_HPS_CLIENTS_MAX_DEPTH_0 0x000023CC
-#define IPA_RX_HPS_CLIENTS_MAX_DEPTH_1 0x000023D0
-
-#define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK(n) (0x7F << (8 * (n)))
-#define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(n) (8 * (n))
-
-#define IPA_IRQ_EE_UC_n_OFFS(n) (0x0000301c + 0x1000 * (n))
-
-#define IPA_UC_MAILBOX_m_n_OFFS_v3_0(m, n) (0x00032000 + 0x80 * (m) + 0x4 * (n))
-
-#define IPA_SYS_PKT_PROC_CNTXT_BASE_OFST (0x000001e0)
-#define IPA_LOCAL_PKT_PROC_CNTXT_BASE_OFST (0x000001e8)
-
-#define IPA_FILT_ROUT_HASH_FLUSH_OFST (0x00000090)
-#define IPA_FILT_ROUT_HASH_FLUSH_IPv4_FILT_SHFT (12)
-#define IPA_FILT_ROUT_HASH_FLUSH_IPv4_ROUT_SHFT (8)
-#define IPA_FILT_ROUT_HASH_FLUSH_IPv6_FILT_SHFT (4)
-#define IPA_FILT_ROUT_HASH_FLUSH_IPv6_ROUT_SHFT (0)
-
-#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_OFST(n) (0x0000085C + 0x70 * (n))
-#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_ID_SHFT 0
-#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_ID_BMSK 0x1
-#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_IP_SHFT 1
-#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_IP_BMSK 0x2
-#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_IP_SHFT 2
-#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_IP_BMSK 0x4
-#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_PORT_SHFT 3
-#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_PORT_BMSK 0x8
-#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_PORT_SHFT 4
-#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_PORT_BMSK 0x10
-#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_PROTOCOL_SHFT 5
-#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_PROTOCOL_BMSK 0x20
-#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_METADATA_SHFT 6
-#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_METADATA_BMSK 0x40
-#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_ID_SHFT 16
-#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_ID_BMSK 0x10000
-#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_IP_SHFT 17
-#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_IP_BMSK 0x20000
-#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_IP_SHFT 18
-#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_IP_BMSK 0x40000
-#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_PORT_SHFT 19
-#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_PORT_BMSK 0x80000
-#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_PORT_SHFT 20
-#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_PORT_BMSK 0x100000
-#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_PROTOCOL_SHFT 21
-#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_PROTOCOL_BMSK 0x200000
-#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_METADATA_SHFT 22
-#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_METADATA_BMSK 0x400000
-
-#define IPA_ENDP_INIT_SEQ_n_OFST(n) (0x0000083C + 0x70*(n))
-#define IPA_ENDP_INIT_SEQ_n_DPS_REP_SEQ_TYPE_BMSK 0xf000
-#define IPA_ENDP_INIT_SEQ_n_DPS_REP_SEQ_TYPE_SHFT 0xc
-#define IPA_ENDP_INIT_SEQ_n_HPS_REP_SEQ_TYPE_BMSK 0xf00
-#define IPA_ENDP_INIT_SEQ_n_HPS_REP_SEQ_TYPE_SHFT 0x8
-#define IPA_ENDP_INIT_SEQ_n_DPS_SEQ_TYPE_BMSK 0xf0
-#define IPA_ENDP_INIT_SEQ_n_DPS_SEQ_TYPE_SHFT 0x4
-#define IPA_ENDP_INIT_SEQ_n_HPS_SEQ_TYPE_BMSK 0xf
-#define IPA_ENDP_INIT_SEQ_n_HPS_SEQ_TYPE_SHFT 0x0
-
-#define IPA_ENDP_GSI_CFG_TLV_n_OFST(n) (0x850 + 0x70 * (n))
-#define IPA_ENDP_GSI_CFG_AOS_n_OFST(n) (0x854 + 0x70 * (n))
-#define IPA_ENDP_GSI_CFG2_n_OFST(n) (0x858 + 0x70 * (n))
-#define IPA_ENDP_GSI_CFG1_n_OFST(n) (0x5504 + 0x4 * (n))
-#define IPA_ENABLE_GSI_OFST 0x5500
-
-#endif
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rm.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rm.c
index 0748a79c7ef4..3ee117a8502d 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_rm.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_rm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -20,6 +20,7 @@
static const char *ipa3_resource_name_to_str[IPA_RM_RESOURCE_MAX] = {
__stringify(IPA_RM_RESOURCE_Q6_PROD),
__stringify(IPA_RM_RESOURCE_USB_PROD),
+ __stringify(IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD),
__stringify(IPA_RM_RESOURCE_HSIC_PROD),
__stringify(IPA_RM_RESOURCE_STD_ECM_PROD),
__stringify(IPA_RM_RESOURCE_RNDIS_PROD),
@@ -29,6 +30,7 @@ static const char *ipa3_resource_name_to_str[IPA_RM_RESOURCE_MAX] = {
__stringify(IPA_RM_RESOURCE_MHI_PROD),
__stringify(IPA_RM_RESOURCE_Q6_CONS),
__stringify(IPA_RM_RESOURCE_USB_CONS),
+ __stringify(IPA_RM_RESOURCE_USB_DPL_CONS),
__stringify(IPA_RM_RESOURCE_HSIC_CONS),
__stringify(IPA_RM_RESOURCE_WLAN_CONS),
__stringify(IPA_RM_RESOURCE_APPS_CONS),
@@ -394,6 +396,7 @@ bail:
return result;
}
+
/**
* ipa3_rm_release_resource() - release resource
* @resource_name: [in] name of the requested resource
@@ -530,6 +533,9 @@ int ipa3_rm_set_perf_profile(enum ipa_rm_resource_name resource_name,
unsigned long flags;
struct ipa_rm_resource *resource;
+ IPADBG("resource: %s ", ipa3_rm_resource_str(resource_name));
+ if (profile)
+ IPADBG("BW: %d\n", profile->max_supported_bandwidth_mbps);
IPA_RM_DBG("%s\n", ipa3_rm_resource_str(resource_name));
spin_lock_irqsave(&ipa3_rm_ctx->ipa_rm_lock, flags);
@@ -662,14 +668,16 @@ static void ipa3_rm_wq_resume_handler(struct work_struct *work)
IPA_RM_ERR("resource is not CONS\n");
return;
}
- ipa3_inc_client_enable_clks();
+ IPA_ACTIVE_CLIENTS_INC_RESOURCE(ipa3_rm_resource_str(
+ ipa_rm_work->resource_name));
spin_lock_irqsave(&ipa3_rm_ctx->ipa_rm_lock, flags);
if (ipa3_rm_dep_graph_get_resource(ipa3_rm_ctx->dep_graph,
ipa_rm_work->resource_name,
&resource) != 0){
IPA_RM_ERR("resource does not exists\n");
spin_unlock_irqrestore(&ipa3_rm_ctx->ipa_rm_lock, flags);
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_RESOURCE(ipa3_rm_resource_str(
+ ipa_rm_work->resource_name));
goto bail;
}
ipa3_rm_resource_consumer_request_work(
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rm_inactivity_timer.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rm_inactivity_timer.c
index 0e9d3d3bbedb..cd72b058b00d 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_rm_inactivity_timer.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_rm_inactivity_timer.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -70,22 +70,22 @@ static void ipa3_rm_inactivity_timer_func(struct work_struct *work)
work);
unsigned long flags;
- IPADBG("%s: timer expired for resource %d!\n", __func__,
+ IPADBG_LOW("%s: timer expired for resource %d!\n", __func__,
me->resource_name);
spin_lock_irqsave(
&ipa3_rm_it_handles[me->resource_name].lock, flags);
if (ipa3_rm_it_handles[me->resource_name].reschedule_work) {
- IPADBG("%s: setting delayed work\n", __func__);
+ IPADBG_LOW("%s: setting delayed work\n", __func__);
ipa3_rm_it_handles[me->resource_name].reschedule_work = false;
schedule_delayed_work(
&ipa3_rm_it_handles[me->resource_name].work,
ipa3_rm_it_handles[me->resource_name].jiffies);
} else if (ipa3_rm_it_handles[me->resource_name].resource_requested) {
- IPADBG("%s: not calling release\n", __func__);
+ IPADBG_LOW("%s: not calling release\n", __func__);
ipa3_rm_it_handles[me->resource_name].work_in_progress = false;
} else {
- IPADBG("%s: calling release_resource on resource %d!\n",
+ IPADBG_LOW("%s: calling release_resource on resource %d!\n",
__func__, me->resource_name);
ipa3_rm_release_resource(me->resource_name);
ipa3_rm_it_handles[me->resource_name].work_in_progress = false;
@@ -110,7 +110,7 @@ static void ipa3_rm_inactivity_timer_func(struct work_struct *work)
int ipa3_rm_inactivity_timer_init(enum ipa_rm_resource_name resource_name,
unsigned long msecs)
{
- IPADBG("%s: resource %d\n", __func__, resource_name);
+ IPADBG_LOW("%s: resource %d\n", __func__, resource_name);
if (resource_name < 0 ||
resource_name >= IPA_RM_RESOURCE_MAX) {
@@ -150,7 +150,7 @@ int ipa3_rm_inactivity_timer_init(enum ipa_rm_resource_name resource_name,
*/
int ipa3_rm_inactivity_timer_destroy(enum ipa_rm_resource_name resource_name)
{
- IPADBG("%s: resource %d\n", __func__, resource_name);
+ IPADBG_LOW("%s: resource %d\n", __func__, resource_name);
if (resource_name < 0 ||
resource_name >= IPA_RM_RESOURCE_MAX) {
@@ -190,7 +190,7 @@ int ipa3_rm_inactivity_timer_request_resource(
int ret;
unsigned long flags;
- IPADBG("%s: resource %d\n", __func__, resource_name);
+ IPADBG_LOW("%s: resource %d\n", __func__, resource_name);
if (resource_name < 0 ||
resource_name >= IPA_RM_RESOURCE_MAX) {
@@ -207,7 +207,8 @@ int ipa3_rm_inactivity_timer_request_resource(
ipa3_rm_it_handles[resource_name].resource_requested = true;
spin_unlock_irqrestore(&ipa3_rm_it_handles[resource_name].lock, flags);
ret = ipa3_rm_request_resource(resource_name);
- IPADBG("%s: resource %d: returning %d\n", __func__, resource_name, ret);
+ IPADBG_LOW("%s: resource %d: returning %d\n", __func__,
+ resource_name, ret);
return ret;
}
@@ -232,7 +233,7 @@ int ipa3_rm_inactivity_timer_release_resource(
{
unsigned long flags;
- IPADBG("%s: resource %d\n", __func__, resource_name);
+ IPADBG_LOW("%s: resource %d\n", __func__, resource_name);
if (resource_name < 0 ||
resource_name >= IPA_RM_RESOURCE_MAX) {
@@ -248,7 +249,7 @@ int ipa3_rm_inactivity_timer_release_resource(
spin_lock_irqsave(&ipa3_rm_it_handles[resource_name].lock, flags);
ipa3_rm_it_handles[resource_name].resource_requested = false;
if (ipa3_rm_it_handles[resource_name].work_in_progress) {
- IPADBG("%s: Timer already set, not scheduling again %d\n",
+ IPADBG_LOW("%s: Timer already set, not scheduling again %d\n",
__func__, resource_name);
ipa3_rm_it_handles[resource_name].reschedule_work = true;
spin_unlock_irqrestore(
@@ -257,7 +258,7 @@ int ipa3_rm_inactivity_timer_release_resource(
}
ipa3_rm_it_handles[resource_name].work_in_progress = true;
ipa3_rm_it_handles[resource_name].reschedule_work = false;
- IPADBG("%s: setting delayed work\n", __func__);
+ IPADBG_LOW("%s: setting delayed work\n", __func__);
schedule_delayed_work(&ipa3_rm_it_handles[resource_name].work,
ipa3_rm_it_handles[resource_name].jiffies);
spin_unlock_irqrestore(&ipa3_rm_it_handles[resource_name].lock, flags);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rm_resource.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rm_resource.c
index db632f702974..4566b8c4ea84 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_rm_resource.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_rm_resource.c
@@ -31,6 +31,7 @@ int ipa3_rm_prod_index(enum ipa_rm_resource_name resource_name)
switch (resource_name) {
case IPA_RM_RESOURCE_Q6_PROD:
case IPA_RM_RESOURCE_USB_PROD:
+ case IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD:
case IPA_RM_RESOURCE_HSIC_PROD:
case IPA_RM_RESOURCE_STD_ECM_PROD:
case IPA_RM_RESOURCE_RNDIS_PROD:
@@ -68,6 +69,7 @@ int ipa3_rm_cons_index(enum ipa_rm_resource_name resource_name)
case IPA_RM_RESOURCE_APPS_CONS:
case IPA_RM_RESOURCE_ODU_ADAPT_CONS:
case IPA_RM_RESOURCE_MHI_CONS:
+ case IPA_RM_RESOURCE_USB_DPL_CONS:
break;
default:
result = IPA_RM_INDEX_INVALID;
@@ -149,6 +151,7 @@ int ipa3_rm_resource_consumer_request(
{
int result = 0;
enum ipa3_rm_resource_state prev_state;
+ struct ipa3_active_client_logging_info log_info;
IPA_RM_DBG("%s state: %d\n",
ipa3_rm_resource_str(consumer->resource.name),
@@ -161,8 +164,10 @@ int ipa3_rm_resource_consumer_request(
case IPA_RM_RELEASE_IN_PROGRESS:
reinit_completion(&consumer->request_consumer_in_progress);
consumer->resource.state = IPA_RM_REQUEST_IN_PROGRESS;
+ IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info,
+ ipa3_rm_resource_str(consumer->resource.name));
if (prev_state == IPA_RM_RELEASE_IN_PROGRESS ||
- ipa3_inc_client_enable_clks_no_block() != 0) {
+ ipa3_inc_client_enable_clks_no_block(&log_info) != 0) {
IPA_RM_DBG("async resume work for %s\n",
ipa3_rm_resource_str(consumer->resource.name));
ipa3_rm_wq_send_resume_cmd(consumer->resource.name,
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
index 7fcf1824c654..b70fd03d492e 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -13,6 +13,7 @@
#include <linux/bitops.h>
#include <linux/idr.h>
#include "ipa_i.h"
+#include "ipahal/ipahal.h"
#define IPA_RT_INDEX_BITMAP_SIZE (32)
#define IPA_RT_STATUS_OF_ADD_FAILED (-1)
@@ -110,7 +111,7 @@ int __ipa_generate_rt_hw_rule_v3_0(enum ipa_ip_type ip,
return -EPERM;
}
- IPADBG("en_rule 0x%x\n", en_rule);
+ IPADBG_LOW("en_rule 0x%x\n", en_rule);
rule_hdr->u.hdr.en_rule = en_rule;
ipa3_write_64(rule_hdr->u.word, (u8 *)rule_hdr);
@@ -271,7 +272,7 @@ static void __ipa_reap_sys_rt_tbls(enum ipa_ip_type ip)
list_for_each_entry(tbl, &set->head_rt_tbl_list, link) {
for (i = 0; i < IPA_RULE_TYPE_MAX; i++) {
if (tbl->prev_mem[i].phys_base) {
- IPADBG(
+ IPADBG_LOW(
"reaping sys rt tbl name=%s ip=%d rlt=%d\n",
tbl->name, ip, i);
dma_free_coherent(ipa3_ctx->pdev,
@@ -289,7 +290,7 @@ static void __ipa_reap_sys_rt_tbls(enum ipa_ip_type ip)
for (i = 0; i < IPA_RULE_TYPE_MAX; i++) {
WARN_ON(tbl->prev_mem[i].phys_base != 0);
if (tbl->curr_mem[i].phys_base) {
- IPADBG(
+ IPADBG_LOW(
"reaping sys rt tbl name=%s ip=%d rlt=%d\n",
tbl->name, ip, i);
dma_free_coherent(ipa3_ctx->pdev,
@@ -399,7 +400,7 @@ static int ipa_prep_rt_tbl_for_cmt(enum ipa_ip_type ip,
return -EPERM;
}
- IPADBG("RT rule id (handle) %d hw_len %u priority %u\n",
+ IPADBG_LOW("RT rule id (handle) %d hw_len %u priority %u\n",
entry->id, entry->hw_len, entry->prio);
if (entry->rule.hashable)
@@ -419,7 +420,7 @@ static int ipa_prep_rt_tbl_for_cmt(enum ipa_ip_type ip,
if (tbl->sz[IPA_RULE_NON_HASHABLE])
tbl->sz[IPA_RULE_NON_HASHABLE] += IPA_HW_TBL_HDR_WIDTH;
- IPADBG("RT tbl index %u hash_sz %u non-hash sz %u\n", tbl->idx,
+ IPADBG_LOW("RT tbl index %u hash_sz %u non-hash sz %u\n", tbl->idx,
tbl->sz[IPA_RULE_HASHABLE], tbl->sz[IPA_RULE_NON_HASHABLE]);
return 0;
@@ -528,7 +529,7 @@ static int ipa_generate_rt_hw_tbl_img(enum ipa_ip_type ip,
}
ipa_get_rt_tbl_lcl_bdy_size(ip, &hash_bdy_sz, &nhash_bdy_sz);
- IPADBG("total rt tbl local body sizes: hash %u nhash %u\n",
+ IPADBG_LOW("total rt tbl local body sizes: hash %u nhash %u\n",
hash_bdy_sz, nhash_bdy_sz);
hash_bdy->size = hash_bdy_sz + IPA_HW_TBL_BLK_SIZE_ALIGNMENT;
@@ -634,8 +635,9 @@ static bool ipa_rt_valid_lcl_tbl_size(enum ipa_ip_type ipt,
int __ipa_commit_rt_v3(enum ipa_ip_type ip)
{
struct ipa3_desc desc[5];
- struct ipa3_register_write reg_write_cmd = {0};
- struct ipa3_hw_imm_cmd_dma_shared_mem mem_cmd[4];
+ struct ipahal_imm_cmd_register_write reg_write_cmd = {0};
+ struct ipahal_imm_cmd_dma_shared_mem mem_cmd = {0};
+ struct ipahal_imm_cmd_pyld *cmd_pyld[5];
int num_cmd = 0;
struct ipa3_mem_buffer hash_bdy, nhash_bdy;
struct ipa3_mem_buffer hash_hdr, nhash_hdr;
@@ -644,9 +646,12 @@ int __ipa_commit_rt_v3(enum ipa_ip_type ip)
u32 lcl_hash_hdr, lcl_nhash_hdr;
u32 lcl_hash_bdy, lcl_nhash_bdy;
bool lcl_hash, lcl_nhash;
+ struct ipahal_reg_fltrt_hash_flush flush;
+ struct ipahal_reg_valmask valmask;
+ int i;
memset(desc, 0, sizeof(desc));
- memset(mem_cmd, 0, sizeof(mem_cmd));
+ memset(cmd_pyld, 0, sizeof(cmd_pyld));
if (ip == IPA_IP_v4) {
num_modem_rt_index =
@@ -700,64 +705,107 @@ int __ipa_commit_rt_v3(enum ipa_ip_type ip)
}
/* flushing ipa internal hashable rt rules cache */
- reg_write_cmd.skip_pipeline_clear = 0;
- reg_write_cmd.pipeline_clear_options = IPA_HPS_CLEAR;
- reg_write_cmd.offset = IPA_FILT_ROUT_HASH_FLUSH_OFST;
- reg_write_cmd.value = (ip == IPA_IP_v4) ?
- (1 << IPA_FILT_ROUT_HASH_FLUSH_IPv4_ROUT_SHFT) :
- (1 << IPA_FILT_ROUT_HASH_FLUSH_IPv6_ROUT_SHFT);
- reg_write_cmd.value_mask = reg_write_cmd.value;
- desc[num_cmd].opcode = IPA_REGISTER_WRITE;
- desc[num_cmd].pyld = &reg_write_cmd;
- desc[num_cmd].len = sizeof(reg_write_cmd);
+ memset(&flush, 0, sizeof(flush));
+ if (ip == IPA_IP_v4)
+ flush.v4_rt = true;
+ else
+ flush.v6_rt = true;
+ ipahal_get_fltrt_hash_flush_valmask(&flush, &valmask);
+ reg_write_cmd.skip_pipeline_clear = false;
+ reg_write_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+ reg_write_cmd.offset = ipahal_get_reg_ofst(IPA_FILT_ROUT_HASH_FLUSH);
+ reg_write_cmd.value = valmask.val;
+ reg_write_cmd.value_mask = valmask.mask;
+ cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_REGISTER_WRITE, &reg_write_cmd, false);
+ if (!cmd_pyld[num_cmd]) {
+ IPAERR("fail construct register_write imm cmd. IP %d\n", ip);
+ goto fail_size_valid;
+ }
+ desc[num_cmd].opcode =
+ ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_REGISTER_WRITE);
+ desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
+ desc[num_cmd].len = cmd_pyld[num_cmd]->len;
desc[num_cmd].type = IPA_IMM_CMD_DESC;
num_cmd++;
- mem_cmd[num_cmd - 1].skip_pipeline_clear = 0;
- mem_cmd[num_cmd - 1].pipeline_clear_options = IPA_HPS_CLEAR;
- mem_cmd[num_cmd - 1].size = nhash_hdr.size;
- mem_cmd[num_cmd - 1].system_addr = nhash_hdr.phys_base;
- mem_cmd[num_cmd - 1].local_addr = lcl_nhash_hdr;
- desc[num_cmd].opcode = IPA_DMA_SHARED_MEM;
- desc[num_cmd].pyld = &mem_cmd[num_cmd - 1];
- desc[num_cmd].len = sizeof(struct ipa3_hw_imm_cmd_dma_shared_mem);
+ mem_cmd.is_read = false;
+ mem_cmd.skip_pipeline_clear = false;
+ mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+ mem_cmd.size = nhash_hdr.size;
+ mem_cmd.system_addr = nhash_hdr.phys_base;
+ mem_cmd.local_addr = lcl_nhash_hdr;
+ cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
+ if (!cmd_pyld[num_cmd]) {
+ IPAERR("fail construct dma_shared_mem imm cmd. IP %d\n", ip);
+ goto fail_imm_cmd_construct;
+ }
+ desc[num_cmd].opcode =
+ ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+ desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
+ desc[num_cmd].len = cmd_pyld[num_cmd]->len;
desc[num_cmd].type = IPA_IMM_CMD_DESC;
num_cmd++;
- mem_cmd[num_cmd - 1].skip_pipeline_clear = 0;
- mem_cmd[num_cmd - 1].pipeline_clear_options = IPA_HPS_CLEAR;
- mem_cmd[num_cmd - 1].size = hash_hdr.size;
- mem_cmd[num_cmd - 1].system_addr = hash_hdr.phys_base;
- mem_cmd[num_cmd - 1].local_addr = lcl_hash_hdr;
- desc[num_cmd].opcode = IPA_DMA_SHARED_MEM;
- desc[num_cmd].pyld = &mem_cmd[num_cmd - 1];
- desc[num_cmd].len = sizeof(struct ipa3_hw_imm_cmd_dma_shared_mem);
+ mem_cmd.is_read = false;
+ mem_cmd.skip_pipeline_clear = false;
+ mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+ mem_cmd.size = hash_hdr.size;
+ mem_cmd.system_addr = hash_hdr.phys_base;
+ mem_cmd.local_addr = lcl_hash_hdr;
+ cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
+ if (!cmd_pyld[num_cmd]) {
+ IPAERR("fail construct dma_shared_mem imm cmd. IP %d\n", ip);
+ goto fail_imm_cmd_construct;
+ }
+ desc[num_cmd].opcode =
+ ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+ desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
+ desc[num_cmd].len = cmd_pyld[num_cmd]->len;
desc[num_cmd].type = IPA_IMM_CMD_DESC;
num_cmd++;
if (lcl_nhash) {
- mem_cmd[num_cmd - 1].skip_pipeline_clear = 0;
- mem_cmd[num_cmd - 1].pipeline_clear_options = IPA_HPS_CLEAR;
- mem_cmd[num_cmd - 1].size = nhash_bdy.size;
- mem_cmd[num_cmd - 1].system_addr = nhash_bdy.phys_base;
- mem_cmd[num_cmd - 1].local_addr = lcl_nhash_bdy;
- desc[num_cmd].opcode = IPA_DMA_SHARED_MEM;
- desc[num_cmd].pyld = &mem_cmd[num_cmd - 1];
- desc[num_cmd].len =
- sizeof(struct ipa3_hw_imm_cmd_dma_shared_mem);
+ mem_cmd.is_read = false;
+ mem_cmd.skip_pipeline_clear = false;
+ mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+ mem_cmd.size = nhash_bdy.size;
+ mem_cmd.system_addr = nhash_bdy.phys_base;
+ mem_cmd.local_addr = lcl_nhash_bdy;
+ cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
+ if (!cmd_pyld[num_cmd]) {
+ IPAERR("fail construct dma_shared_mem cmd. IP %d\n",
+ ip);
+ goto fail_imm_cmd_construct;
+ }
+ desc[num_cmd].opcode =
+ ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+ desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
+ desc[num_cmd].len = cmd_pyld[num_cmd]->len;
desc[num_cmd].type = IPA_IMM_CMD_DESC;
num_cmd++;
}
if (lcl_hash) {
- mem_cmd[num_cmd - 1].skip_pipeline_clear = 0;
- mem_cmd[num_cmd - 1].pipeline_clear_options = IPA_HPS_CLEAR;
- mem_cmd[num_cmd - 1].size = hash_bdy.size;
- mem_cmd[num_cmd - 1].system_addr = hash_bdy.phys_base;
- mem_cmd[num_cmd - 1].local_addr = lcl_hash_bdy;
- desc[num_cmd].opcode = IPA_DMA_SHARED_MEM;
- desc[num_cmd].pyld = &mem_cmd[num_cmd - 1];
- desc[num_cmd].len =
- sizeof(struct ipa3_hw_imm_cmd_dma_shared_mem);
+ mem_cmd.is_read = false;
+ mem_cmd.skip_pipeline_clear = false;
+ mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+ mem_cmd.size = hash_bdy.size;
+ mem_cmd.system_addr = hash_bdy.phys_base;
+ mem_cmd.local_addr = lcl_hash_bdy;
+ cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
+ if (!cmd_pyld[num_cmd]) {
+ IPAERR("fail construct dma_shared_mem cmd. IP %d\n",
+ ip);
+ goto fail_imm_cmd_construct;
+ }
+ desc[num_cmd].opcode =
+ ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+ desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
+ desc[num_cmd].len = cmd_pyld[num_cmd]->len;
desc[num_cmd].type = IPA_IMM_CMD_DESC;
num_cmd++;
}
@@ -765,13 +813,13 @@ int __ipa_commit_rt_v3(enum ipa_ip_type ip)
if (ipa3_send_cmd(num_cmd, desc)) {
IPAERR("fail to send immediate command\n");
rc = -EFAULT;
- goto fail_size_valid;
+ goto fail_imm_cmd_construct;
}
- IPADBG("Hashable HEAD\n");
+ IPADBG_LOW("Hashable HEAD\n");
IPA_DUMP_BUFF(hash_hdr.base, hash_hdr.phys_base, hash_hdr.size);
- IPADBG("Non-Hashable HEAD\n");
+ IPADBG_LOW("Non-Hashable HEAD\n");
IPA_DUMP_BUFF(nhash_hdr.base, nhash_hdr.phys_base, nhash_hdr.size);
if (hash_bdy.size) {
@@ -788,6 +836,9 @@ int __ipa_commit_rt_v3(enum ipa_ip_type ip)
__ipa_reap_sys_rt_tbls(ip);
+fail_imm_cmd_construct:
+ for (i = 0 ; i < num_cmd ; i++)
+ ipahal_destroy_imm_cmd(cmd_pyld[i]);
fail_size_valid:
dma_free_coherent(ipa3_ctx->pdev, hash_hdr.size,
hash_hdr.base, hash_hdr.phys_base);
@@ -1717,48 +1768,6 @@ bail:
return result;
}
-static u32 ipa3_build_rt_tuple_mask(struct ipa3_hash_tuple *tpl)
-{
- u32 msk = 0;
-
- IPA_SETFIELD_IN_REG(msk, tpl->src_id,
- IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_ID_SHFT,
- IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_ID_BMSK
- );
-
- IPA_SETFIELD_IN_REG(msk, tpl->src_ip_addr,
- IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_IP_SHFT,
- IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_IP_BMSK
- );
-
- IPA_SETFIELD_IN_REG(msk, tpl->dst_ip_addr,
- IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_IP_SHFT,
- IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_IP_BMSK
- );
-
- IPA_SETFIELD_IN_REG(msk, tpl->src_port,
- IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_PORT_SHFT,
- IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_PORT_BMSK
- );
-
- IPA_SETFIELD_IN_REG(msk, tpl->dst_port,
- IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_PORT_SHFT,
- IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_PORT_BMSK
- );
-
- IPA_SETFIELD_IN_REG(msk, tpl->protocol,
- IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_PROTOCOL_SHFT,
- IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_PROTOCOL_BMSK
- );
-
- IPA_SETFIELD_IN_REG(msk, tpl->meta_data,
- IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_METADATA_SHFT,
- IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_METADATA_BMSK
- );
-
- return msk;
-}
-
/**
* ipa3_set_rt_tuple_mask() - Sets the rt tuple masking for the given tbl
* table index must be for AP EP (not modem)
@@ -1769,10 +1778,9 @@ static u32 ipa3_build_rt_tuple_mask(struct ipa3_hash_tuple *tpl)
* Returns: 0 on success, negative on failure
*
*/
-int ipa3_set_rt_tuple_mask(int tbl_idx, struct ipa3_hash_tuple *tuple)
+int ipa3_set_rt_tuple_mask(int tbl_idx, struct ipahal_reg_hash_tuple *tuple)
{
- u32 val;
- u32 mask;
+ struct ipahal_reg_fltrt_hash_tuple fltrt_tuple;
if (!tuple) {
IPAERR("bad tuple\n");
@@ -1799,19 +1807,11 @@ int ipa3_set_rt_tuple_mask(int tbl_idx, struct ipa3_hash_tuple *tuple)
return -EINVAL;
}
- val = ipa_read_reg(ipa3_ctx->mmio,
- IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_OFST(tbl_idx));
-
- val &= 0x0000FFFF; /* clear 16 MSBs - rt bits */
-
- mask = ipa3_build_rt_tuple_mask(tuple);
- mask &= 0xFFFF0000;
-
- val |= mask;
-
- ipa_write_reg(ipa3_ctx->mmio,
- IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_OFST(tbl_idx),
- val);
+ ipahal_read_reg_n_fields(IPA_ENDP_FILTER_ROUTER_HSH_CFG_n,
+ tbl_idx, &fltrt_tuple);
+ fltrt_tuple.rt = *tuple;
+ ipahal_write_reg_n_fields(IPA_ENDP_FILTER_ROUTER_HSH_CFG_n,
+ tbl_idx, &fltrt_tuple);
return 0;
}
@@ -1885,10 +1885,11 @@ int ipa3_rt_read_tbl_from_hw(u32 tbl_idx,
IPADBG("tbl_entry_in_hdr_ofst=0x%llx\n", tbl_entry_in_hdr_ofst);
tbl_entry_in_hdr = ipa3_ctx->mmio +
- IPA_SRAM_DIRECT_ACCESS_N_OFST_v3_0(0) + tbl_entry_in_hdr_ofst;
+ ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n, 0) +
+ tbl_entry_in_hdr_ofst;
/* for tables which reside in DDR access it from the virtual memory */
- if (*tbl_entry_in_hdr & 0x0) {
+ if (!(*tbl_entry_in_hdr & 0x1)) {
/* system */
struct ipa3_rt_tbl_set *set;
struct ipa3_rt_tbl *tbl;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_trace.h b/drivers/platform/msm/ipa/ipa_v3/ipa_trace.h
new file mode 100644
index 000000000000..5ea6c6daf240
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_trace.h
@@ -0,0 +1,135 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ipa
+#define TRACE_INCLUDE_FILE ipa_trace
+
+#if !defined(_IPA_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _IPA_TRACE_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(
+ intr_to_poll3,
+
+ TP_PROTO(unsigned long client),
+
+ TP_ARGS(client),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, client)
+ ),
+
+ TP_fast_assign(
+ __entry->client = client;
+ ),
+
+ TP_printk("client=%lu", __entry->client)
+);
+
+TRACE_EVENT(
+ poll_to_intr3,
+
+ TP_PROTO(unsigned long client),
+
+ TP_ARGS(client),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, client)
+ ),
+
+ TP_fast_assign(
+ __entry->client = client;
+ ),
+
+ TP_printk("client=%lu", __entry->client)
+);
+
+TRACE_EVENT(
+ idle_sleep_enter3,
+
+ TP_PROTO(unsigned long client),
+
+ TP_ARGS(client),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, client)
+ ),
+
+ TP_fast_assign(
+ __entry->client = client;
+ ),
+
+ TP_printk("client=%lu", __entry->client)
+);
+
+TRACE_EVENT(
+ idle_sleep_exit3,
+
+ TP_PROTO(unsigned long client),
+
+ TP_ARGS(client),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, client)
+ ),
+
+ TP_fast_assign(
+ __entry->client = client;
+ ),
+
+ TP_printk("client=%lu", __entry->client)
+);
+
+TRACE_EVENT(
+ rmnet_ipa_netifni3,
+
+ TP_PROTO(unsigned long rx_pkt_cnt),
+
+ TP_ARGS(rx_pkt_cnt),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, rx_pkt_cnt)
+ ),
+
+ TP_fast_assign(
+ __entry->rx_pkt_cnt = rx_pkt_cnt;
+ ),
+
+ TP_printk("rx_pkt_cnt=%lu", __entry->rx_pkt_cnt)
+);
+
+TRACE_EVENT(
+ rmnet_ipa_netifrx3,
+
+ TP_PROTO(unsigned long rx_pkt_cnt),
+
+ TP_ARGS(rx_pkt_cnt),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, rx_pkt_cnt)
+ ),
+
+ TP_fast_assign(
+ __entry->rx_pkt_cnt = rx_pkt_cnt;
+ ),
+
+ TP_printk("rx_pkt_cnt=%lu", __entry->rx_pkt_cnt)
+);
+
+#endif /* _IPA_TRACE_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c
index 21eaf02917d6..b2b193d4c9e7 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -79,47 +79,6 @@ enum ipa3_hw_2_cpu_responses {
};
/**
- * enum ipa3_hw_2_cpu_events - Values that represent HW event to be sent to CPU.
- * @IPA_HW_2_CPU_EVENT_ERROR : Event specify a system error is detected by the
- * device
- * @IPA_HW_2_CPU_EVENT_LOG_INFO : Event providing logging specific information
- */
-enum ipa3_hw_2_cpu_events {
- IPA_HW_2_CPU_EVENT_ERROR =
- FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 1),
- IPA_HW_2_CPU_EVENT_LOG_INFO =
- FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 2),
-};
-
-/**
- * enum ipa3_hw_errors - Common error types.
- * @IPA_HW_ERROR_NONE : No error persists
- * @IPA_HW_INVALID_DOORBELL_ERROR : Invalid data read from doorbell
- * @IPA_HW_DMA_ERROR : Unexpected DMA error
- * @IPA_HW_FATAL_SYSTEM_ERROR : HW has crashed and requires reset.
- * @IPA_HW_INVALID_OPCODE : Invalid opcode sent
- * @IPA_HW_ZIP_ENGINE_ERROR : ZIP engine error
- */
-enum ipa3_hw_errors {
- IPA_HW_ERROR_NONE =
- FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 0),
- IPA_HW_INVALID_DOORBELL_ERROR =
- FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 1),
- IPA_HW_DMA_ERROR =
- FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 2),
- IPA_HW_FATAL_SYSTEM_ERROR =
- FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 3),
- IPA_HW_INVALID_OPCODE =
- FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 4),
- IPA_HW_ZIP_ENGINE_ERROR =
- FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 5),
- IPA_HW_CONS_DISABLE_CMD_GSI_STOP_FAILURE =
- FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 6),
- IPA_HW_PROD_DISABLE_CMD_GSI_STOP_FAILURE =
- FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 7)
-};
-
-/**
* struct IpaHwResetPipeCmdData_t - Structure holding the parameters
* for IPA_CPU_2_HW_CMD_MEMCPY command.
*
@@ -181,20 +140,6 @@ union IpaHwCpuCmdCompletedResponseData_t {
} __packed;
/**
- * union IpaHwErrorEventData_t - HW->CPU Common Events
- * @errorType : Entered when a system error is detected by the HW. Type of
- * error is specified by IPA_HW_ERRORS
- * @reserved : Reserved
- */
-union IpaHwErrorEventData_t {
- struct IpaHwErrorEventParams_t {
- u32 errorType:8;
- u32 reserved:24;
- } __packed params;
- u32 raw32b;
-} __packed;
-
-/**
* union IpaHwUpdateFlagsCmdData_t - Structure holding the parameters for
* IPA_CPU_2_HW_CMD_UPDATE_FLAGS command
* @newFlags: SW flags defined the behavior of HW.
@@ -230,7 +175,7 @@ do { \
struct ipa3_uc_hdlrs ipa3_uc_hdlrs[IPA_HW_NUM_FEATURES] = { { 0 } };
-static inline const char *ipa_hw_error_str(enum ipa3_hw_errors err_type)
+const char *ipa_hw_error_str(enum ipa3_hw_errors err_type)
{
const char *str;
@@ -267,7 +212,7 @@ static void ipa3_log_evt_hdlr(void)
if (ipa3_ctx->uc_ctx.uc_event_top_ofst +
sizeof(struct IpaHwEventLogInfoData_t) >=
ipa3_ctx->ctrl->ipa_reg_base_ofst +
- IPA_SRAM_DIRECT_ACCESS_N_OFST_v3_0(0) +
+ ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n, 0) +
ipa3_ctx->smem_sz) {
IPAERR("uc_top 0x%x outside SRAM\n",
ipa3_ctx->uc_ctx.uc_event_top_ofst);
@@ -352,7 +297,7 @@ static void ipa3_uc_event_handler(enum ipa_irq_type interrupt,
WARN_ON(private_data != ipa3_ctx);
- ipa3_inc_client_enable_clks();
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
IPADBG("uC evt opcode=%u\n",
ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp);
@@ -363,7 +308,7 @@ static void ipa3_uc_event_handler(enum ipa_irq_type interrupt,
if (0 > feature || IPA_HW_FEATURE_MAX <= feature) {
IPAERR("Invalid feature %u for event %u\n",
feature, ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp);
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return;
}
/* Feature specific handling */
@@ -383,6 +328,8 @@ static void ipa3_uc_event_handler(enum ipa_irq_type interrupt,
IPAERR("IPA has encountered a ZIP engine error\n");
ipa3_ctx->uc_ctx.uc_zip_error = true;
}
+ ipa3_ctx->uc_ctx.uc_error_timestamp =
+ ipahal_read_reg(IPA_TAG_TIMER);
BUG();
} else if (ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp ==
IPA_HW_2_CPU_EVENT_LOG_INFO) {
@@ -393,14 +340,15 @@ static void ipa3_uc_event_handler(enum ipa_irq_type interrupt,
IPADBG("unsupported uC evt opcode=%u\n",
ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp);
}
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
}
-static int ipa3_uc_panic_notifier(struct notifier_block *this,
+int ipa3_uc_panic_notifier(struct notifier_block *this,
unsigned long event, void *ptr)
{
int result = 0;
+ struct ipa3_active_client_logging_info log_info;
IPADBG("this=%p evt=%lu ptr=%p\n", this, event, ptr);
@@ -408,7 +356,8 @@ static int ipa3_uc_panic_notifier(struct notifier_block *this,
if (result)
goto fail;
- if (ipa3_inc_client_enable_clks_no_block())
+ IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info);
+ if (ipa3_inc_client_enable_clks_no_block(&log_info))
goto fail;
ipa3_ctx->uc_ctx.uc_sram_mmio->cmdOp =
@@ -418,32 +367,22 @@ static int ipa3_uc_panic_notifier(struct notifier_block *this,
wmb();
if (ipa3_ctx->apply_rg10_wa)
- ipa_write_reg(ipa3_ctx->mmio,
- IPA_UC_MAILBOX_m_n_OFFS_v3_0(IPA_CPU_2_HW_CMD_MBOX_m,
- IPA_CPU_2_HW_CMD_MBOX_n), 0x1);
+ ipahal_write_reg_mn(IPA_UC_MAILBOX_m_n,
+ IPA_CPU_2_HW_CMD_MBOX_m,
+ IPA_CPU_2_HW_CMD_MBOX_n, 0x1);
else
- ipa_write_reg(ipa3_ctx->mmio, IPA_IRQ_EE_UC_n_OFFS(0), 0x1);
+ ipahal_write_reg_n(IPA_IRQ_EE_UC_n, 0, 0x1);
/* give uc enough time to save state */
udelay(IPA_PKT_FLUSH_TO_US);
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
IPADBG("err_fatal issued\n");
fail:
return NOTIFY_DONE;
}
-static struct notifier_block ipa3_uc_panic_blk = {
- .notifier_call = ipa3_uc_panic_notifier,
-};
-
-void ipa3_register_panic_hdlr(void)
-{
- atomic_notifier_chain_register(&panic_notifier_list,
- &ipa3_uc_panic_blk);
-}
-
static void ipa3_uc_response_hdlr(enum ipa_irq_type interrupt,
void *private_data,
void *interrupt_data)
@@ -454,8 +393,7 @@ static void ipa3_uc_response_hdlr(enum ipa_irq_type interrupt,
int i;
WARN_ON(private_data != ipa3_ctx);
-
- ipa3_inc_client_enable_clks();
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
IPADBG("uC rsp opcode=%u\n",
ipa3_ctx->uc_ctx.uc_sram_mmio->responseOp);
@@ -464,7 +402,7 @@ static void ipa3_uc_response_hdlr(enum ipa_irq_type interrupt,
if (0 > feature || IPA_HW_FEATURE_MAX <= feature) {
IPAERR("Invalid feature %u for event %u\n",
feature, ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp);
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return;
}
@@ -477,7 +415,7 @@ static void ipa3_uc_response_hdlr(enum ipa_irq_type interrupt,
IPADBG("feature %d specific response handler\n",
feature);
complete_all(&ipa3_ctx->uc_ctx.uc_completion);
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return;
}
}
@@ -517,7 +455,7 @@ static void ipa3_uc_response_hdlr(enum ipa_irq_type interrupt,
IPAERR("Unsupported uC rsp opcode = %u\n",
ipa3_ctx->uc_ctx.uc_sram_mmio->responseOp);
}
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
}
static int ipa3_uc_send_cmd_64b_param(u32 cmd_lo, u32 cmd_hi, u32 opcode,
@@ -558,11 +496,11 @@ send_cmd:
wmb();
if (ipa3_ctx->apply_rg10_wa)
- ipa_write_reg(ipa3_ctx->mmio,
- IPA_UC_MAILBOX_m_n_OFFS_v3_0(IPA_CPU_2_HW_CMD_MBOX_m,
- IPA_CPU_2_HW_CMD_MBOX_n), 0x1);
+ ipahal_write_reg_mn(IPA_UC_MAILBOX_m_n,
+ IPA_CPU_2_HW_CMD_MBOX_m,
+ IPA_CPU_2_HW_CMD_MBOX_n, 0x1);
else
- ipa_write_reg(ipa3_ctx->mmio, IPA_IRQ_EE_UC_n_OFFS(0), 0x1);
+ ipahal_write_reg_n(IPA_IRQ_EE_UC_n, 0, 0x1);
if (polling_mode) {
for (index = 0; index < IPA_UC_POLL_MAX_RETRY; index++) {
@@ -661,7 +599,7 @@ int ipa3_uc_interface_init(void)
phys_addr = ipa3_ctx->ipa_wrapper_base +
ipa3_ctx->ctrl->ipa_reg_base_ofst +
- IPA_SRAM_DIRECT_ACCESS_N_OFST_v3_0(0);
+ ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n, 0);
ipa3_ctx->uc_ctx.uc_sram_mmio = ioremap(phys_addr,
IPA_RAM_UC_SMEM_SIZE);
if (!ipa3_ctx->uc_ctx.uc_sram_mmio) {
@@ -721,7 +659,7 @@ void ipa3_uc_load_notify(void)
if (!ipa3_ctx->apply_rg10_wa)
return;
- ipa3_inc_client_enable_clks();
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
ipa3_ctx->uc_ctx.uc_loaded = true;
IPADBG("IPA uC loaded\n");
@@ -739,7 +677,7 @@ void ipa3_uc_load_notify(void)
if (ipa3_uc_hdlrs[i].ipa_uc_loaded_hdlr)
ipa3_uc_hdlrs[i].ipa_uc_loaded_hdlr();
}
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
}
EXPORT_SYMBOL(ipa3_uc_load_notify);
@@ -900,20 +838,21 @@ int ipa3_uc_update_hw_flags(u32 flags)
* to a register will be proxied by the uC due to H/W limitation.
* This func should be called for RG10 registers only
*
- * @Parameters: Like ipa_write_reg() parameters
+ * @Parameters: Like ipahal_write_reg_n() parameters
*
*/
-void ipa3_uc_rg10_write_reg(void *base, u32 offset, u32 val)
+void ipa3_uc_rg10_write_reg(enum ipahal_reg_name reg, u32 n, u32 val)
{
int ret;
u32 paddr;
if (!ipa3_ctx->apply_rg10_wa)
- return ipa_write_reg(base, offset, val);
+ return ipahal_write_reg_n(reg, n, val);
+
/* calculate register physical address */
paddr = ipa3_ctx->ipa_wrapper_base + ipa3_ctx->ctrl->ipa_reg_base_ofst;
- paddr += offset;
+ paddr += ipahal_get_reg_n_ofst(reg, n);
IPADBG("Sending uC cmd to reg write: addr=0x%x val=0x%x\n",
paddr, val);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_mhi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_mhi.c
index fa73d548848d..d5ea3ae6dcd9 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_mhi.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_mhi.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015, 2016 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -554,7 +554,7 @@ static void ipa3_uc_mhi_event_log_info_hdlr(
if (ipa3_uc_mhi_ctx->mhi_uc_stats_ofst +
sizeof(struct IpaHwStatsMhiInfoData_t) >=
ipa3_ctx->ctrl->ipa_reg_base_ofst +
- IPA_SRAM_DIRECT_ACCESS_N_OFST_v3_0(0) +
+ ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n, 0) +
ipa3_ctx->smem_sz) {
IPAERR("uc_mhi_stats 0x%x outside SRAM\n",
ipa3_uc_mhi_ctx->mhi_uc_stats_ofst);
@@ -629,7 +629,7 @@ int ipa3_uc_mhi_init_engine(struct ipa_mhi_msi_info *msi, u32 mmio_addr,
return -EFAULT;
}
- ipa3_inc_client_enable_clks();
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
res = ipa3_uc_update_hw_flags(0);
if (res) {
@@ -692,7 +692,7 @@ int ipa3_uc_mhi_init_engine(struct ipa_mhi_msi_info *msi, u32 mmio_addr,
res = 0;
disable_clks:
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return res;
}
@@ -715,7 +715,7 @@ int ipa3_uc_mhi_init_channel(int ipa_ep_idx, int channelHandle,
return -EINVAL;
}
- ipa3_inc_client_enable_clks();
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
memset(&uc_rsp, 0, sizeof(uc_rsp));
uc_rsp.params.state = IPA_HW_MHI_CHANNEL_STATE_RUN;
@@ -740,7 +740,7 @@ int ipa3_uc_mhi_init_channel(int ipa_ep_idx, int channelHandle,
res = 0;
disable_clks:
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return res;
}
@@ -755,8 +755,7 @@ int ipa3_uc_mhi_reset_channel(int channelHandle)
IPAERR("Not initialized\n");
return -EFAULT;
}
-
- ipa3_inc_client_enable_clks();
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
memset(&uc_rsp, 0, sizeof(uc_rsp));
uc_rsp.params.state = IPA_HW_MHI_CHANNEL_STATE_DISABLE;
@@ -778,7 +777,7 @@ int ipa3_uc_mhi_reset_channel(int channelHandle)
res = 0;
disable_clks:
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return res;
}
@@ -792,8 +791,7 @@ int ipa3_uc_mhi_suspend_channel(int channelHandle)
IPAERR("Not initialized\n");
return -EFAULT;
}
-
- ipa3_inc_client_enable_clks();
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
memset(&uc_rsp, 0, sizeof(uc_rsp));
uc_rsp.params.state = IPA_HW_MHI_CHANNEL_STATE_SUSPEND;
@@ -815,7 +813,7 @@ int ipa3_uc_mhi_suspend_channel(int channelHandle)
res = 0;
disable_clks:
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return res;
}
@@ -829,8 +827,7 @@ int ipa3_uc_mhi_resume_channel(int channelHandle, bool LPTransitionRejected)
IPAERR("Not initialized\n");
return -EFAULT;
}
-
- ipa3_inc_client_enable_clks();
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
memset(&uc_rsp, 0, sizeof(uc_rsp));
uc_rsp.params.state = IPA_HW_MHI_CHANNEL_STATE_RUN;
@@ -853,7 +850,7 @@ int ipa3_uc_mhi_resume_channel(int channelHandle, bool LPTransitionRejected)
res = 0;
disable_clks:
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return res;
}
@@ -866,8 +863,7 @@ int ipa3_uc_mhi_stop_event_update_channel(int channelHandle)
IPAERR("Not initialized\n");
return -EFAULT;
}
-
- ipa3_inc_client_enable_clks();
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
memset(&cmd, 0, sizeof(cmd));
cmd.params.channelHandle = channelHandle;
@@ -885,7 +881,7 @@ int ipa3_uc_mhi_stop_event_update_channel(int channelHandle)
res = 0;
disable_clks:
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return res;
}
@@ -903,7 +899,7 @@ int ipa3_uc_mhi_send_dl_ul_sync_info(union IpaHwMhiDlUlSyncCmdData_t cmd)
IPADBG("ulMsiEventThreshold=0x%x dlMsiEventThreshold=0x%x\n",
cmd.params.ulMsiEventThreshold, cmd.params.dlMsiEventThreshold);
- ipa3_inc_client_enable_clks();
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
res = ipa3_uc_send_cmd(cmd.raw32b,
IPA_CPU_2_HW_CMD_MHI_DL_UL_SYNC_INFO, 0, false, HZ);
@@ -914,7 +910,7 @@ int ipa3_uc_mhi_send_dl_ul_sync_info(union IpaHwMhiDlUlSyncCmdData_t cmd)
res = 0;
disable_clks:
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return res;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
index 284d000a3935..dc069e15dd44 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -339,7 +339,7 @@ struct IpaHwEventLogInfoData_t *uc_event_top_mmio)
if (ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_ofst +
sizeof(struct IpaHwStatsWDIInfoData_t) >=
ipa3_ctx->ctrl->ipa_reg_base_ofst +
- IPA_SRAM_DIRECT_ACCESS_N_OFST_v3_0(0) +
+ ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n, 0) +
ipa3_ctx->smem_sz) {
IPAERR("uc_wdi_stats 0x%x outside SRAM\n",
ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_ofst);
@@ -401,8 +401,7 @@ int ipa3_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats)
ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_mmio);
return -EINVAL;
}
-
- ipa3_inc_client_enable_clks();
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
TX_STATS(num_pkts_processed);
TX_STATS(copy_engine_doorbell_value);
@@ -444,7 +443,7 @@ int ipa3_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats)
RX_STATS(reserved1);
RX_STATS(reserved2);
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return 0;
}
@@ -737,7 +736,7 @@ int ipa3_connect_wdi_pipe(struct ipa_wdi_in_params *in,
}
memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context));
- ipa3_inc_client_enable_clks();
+ IPA_ACTIVE_CLIENTS_INC_EP(in->sys.client);
IPADBG("client=%d ep=%d\n", in->sys.client, ipa_ep_idx);
if (IPA_CLIENT_IS_CONS(in->sys.client)) {
@@ -825,10 +824,10 @@ int ipa3_connect_wdi_pipe(struct ipa_wdi_in_params *in,
tx->num_tx_buffers = in->u.dl.num_tx_buffers;
tx->ipa_pipe_number = ipa_ep_idx;
out->uc_door_bell_pa = ipa3_ctx->ipa_wrapper_base +
- IPA_REG_BASE_OFST_v3_0 +
- IPA_UC_MAILBOX_m_n_OFFS_v3_0(
- IPA_HW_WDI_TX_MBOX_START_INDEX/32,
- IPA_HW_WDI_TX_MBOX_START_INDEX % 32);
+ ipahal_get_reg_base() +
+ ipahal_get_reg_mn_ofst(IPA_UC_MAILBOX_m_n,
+ IPA_HW_WDI_TX_MBOX_START_INDEX/32,
+ IPA_HW_WDI_TX_MBOX_START_INDEX % 32);
} else {
rx = (struct IpaHwWdiRxSetUpCmdData_t *)cmd.base;
@@ -868,8 +867,8 @@ int ipa3_connect_wdi_pipe(struct ipa_wdi_in_params *in,
rx->ipa_pipe_number = ipa_ep_idx;
out->uc_door_bell_pa = ipa3_ctx->ipa_wrapper_base +
- IPA_REG_BASE_OFST_v3_0 +
- IPA_UC_MAILBOX_m_n_OFFS_v3_0(
+ ipahal_get_reg_base() +
+ ipahal_get_reg_mn_ofst(IPA_UC_MAILBOX_m_n,
IPA_HW_WDI_RX_MBOX_START_INDEX/32,
IPA_HW_WDI_RX_MBOX_START_INDEX % 32);
}
@@ -921,7 +920,7 @@ int ipa3_connect_wdi_pipe(struct ipa_wdi_in_params *in,
ipa3_install_dflt_flt_rules(ipa_ep_idx);
if (!ep->keep_ipa_awake)
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client);
dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
ep->wdi_state |= IPA_WDI_CONNECTED;
@@ -935,7 +934,7 @@ uc_timeout:
ipa_release_uc_smmu_mappings(in->sys.client);
dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
dma_alloc_fail:
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client);
fail:
return result;
}
@@ -974,7 +973,7 @@ int ipa3_disconnect_wdi_pipe(u32 clnt_hdl)
}
if (!ep->keep_ipa_awake)
- ipa3_inc_client_enable_clks();
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
tear.params.ipa_pipe_number = clnt_hdl;
@@ -992,7 +991,7 @@ int ipa3_disconnect_wdi_pipe(u32 clnt_hdl)
ipa_release_uc_smmu_mappings(ep->client);
memset(&ipa3_ctx->ep[clnt_hdl], 0, sizeof(struct ipa3_ep_context));
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
@@ -1033,8 +1032,7 @@ int ipa3_enable_wdi_pipe(u32 clnt_hdl)
IPAERR("WDI channel bad state %d\n", ep->wdi_state);
return -EFAULT;
}
-
- ipa3_inc_client_enable_clks();
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
enable.params.ipa_pipe_number = clnt_hdl;
result = ipa3_uc_send_cmd(enable.raw32b,
@@ -1053,8 +1051,7 @@ int ipa3_enable_wdi_pipe(u32 clnt_hdl)
holb_cfg.tmr_val = 0;
result = ipa3_cfg_ep_holb(clnt_hdl, &holb_cfg);
}
-
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
ep->wdi_state |= IPA_WDI_ENABLED;
IPADBG("client (ep: %d) enabled\n", clnt_hdl);
@@ -1096,8 +1093,7 @@ int ipa3_disable_wdi_pipe(u32 clnt_hdl)
IPAERR("WDI channel bad state %d\n", ep->wdi_state);
return -EFAULT;
}
-
- ipa3_inc_client_enable_clks();
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
result = ipa3_disable_data_path(clnt_hdl);
if (result) {
@@ -1149,8 +1145,7 @@ int ipa3_disable_wdi_pipe(u32 clnt_hdl)
ep_cfg_ctrl.ipa_ep_delay = true;
ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
}
-
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
ep->wdi_state &= ~IPA_WDI_ENABLED;
IPADBG("client (ep: %d) disabled\n", clnt_hdl);
@@ -1191,8 +1186,7 @@ int ipa3_resume_wdi_pipe(u32 clnt_hdl)
IPAERR("WDI channel bad state %d\n", ep->wdi_state);
return -EFAULT;
}
-
- ipa3_inc_client_enable_clks();
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
resume.params.ipa_pipe_number = clnt_hdl;
result = ipa3_uc_send_cmd(resume.raw32b,
@@ -1303,7 +1297,7 @@ int ipa3_suspend_wdi_pipe(u32 clnt_hdl)
}
ipa3_ctx->tag_process_before_gating = true;
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
ep->wdi_state &= ~IPA_WDI_RESUMED;
IPADBG("client (ep: %d) suspended\n", clnt_hdl);
@@ -1335,8 +1329,7 @@ int ipa3_write_qmapid_wdi_pipe(u32 clnt_hdl, u8 qmap_id)
IPAERR("WDI channel bad state %d\n", ep->wdi_state);
return -EFAULT;
}
-
- ipa3_inc_client_enable_clks();
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
qmap.params.ipa_pipe_number = clnt_hdl;
qmap.params.qmap_id = qmap_id;
@@ -1349,8 +1342,7 @@ int ipa3_write_qmapid_wdi_pipe(u32 clnt_hdl, u8 qmap_id)
result = -EFAULT;
goto uc_timeout;
}
-
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
IPADBG("client (ep: %d) qmap_id %d updated\n", clnt_hdl, qmap_id);
@@ -1389,6 +1381,20 @@ int ipa3_uc_reg_rdyCB(
return 0;
}
+/**
+ * ipa3_uc_dereg_rdyCB() - To de-register uC ready CB
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ */
+int ipa3_uc_dereg_rdyCB(void)
+{
+ ipa3_ctx->uc_wdi_ctx.uc_ready_cb = NULL;
+ ipa3_ctx->uc_wdi_ctx.priv = NULL;
+
+ return 0;
+}
+
/**
* ipa3_uc_wdi_get_dbpa() - To retrieve
@@ -1411,14 +1417,14 @@ int ipa3_uc_wdi_get_dbpa(
if (IPA_CLIENT_IS_CONS(param->client)) {
param->uc_door_bell_pa = ipa3_ctx->ipa_wrapper_base +
- IPA_REG_BASE_OFST_v3_0 +
- IPA_UC_MAILBOX_m_n_OFFS_v3_0(
+ ipahal_get_reg_base() +
+ ipahal_get_reg_mn_ofst(IPA_UC_MAILBOX_m_n,
IPA_HW_WDI_TX_MBOX_START_INDEX/32,
IPA_HW_WDI_TX_MBOX_START_INDEX % 32);
} else {
param->uc_door_bell_pa = ipa3_ctx->ipa_wrapper_base +
- IPA_REG_BASE_OFST_v3_0 +
- IPA_UC_MAILBOX_m_n_OFFS_v3_0(
+ ipahal_get_reg_base() +
+ ipahal_get_reg_mn_ofst(IPA_UC_MAILBOX_m_n,
IPA_HW_WDI_RX_MBOX_START_INDEX/32,
IPA_HW_WDI_RX_MBOX_START_INDEX % 32);
}
@@ -1433,9 +1439,16 @@ static void ipa3_uc_wdi_loaded_handler(void)
return;
}
- if (ipa3_ctx->uc_wdi_ctx.uc_ready_cb)
+ if (ipa3_ctx->uc_wdi_ctx.uc_ready_cb) {
ipa3_ctx->uc_wdi_ctx.uc_ready_cb(
ipa3_ctx->uc_wdi_ctx.priv);
+
+ ipa3_ctx->uc_wdi_ctx.uc_ready_cb =
+ NULL;
+ ipa3_ctx->uc_wdi_ctx.priv = NULL;
+ }
+
+ return;
}
int ipa3_create_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info)
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index 2b3a7d4e0794..9400ddb2e30a 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -19,6 +19,7 @@
#include <linux/msm_gsi.h>
#include <linux/elf.h>
#include "ipa_i.h"
+#include "ipahal/ipahal.h"
#define IPA_V3_0_CLK_RATE_SVS (75 * 1000 * 1000UL)
#define IPA_V3_0_CLK_RATE_NOMINAL (150 * 1000 * 1000UL)
@@ -28,6 +29,9 @@
#define IPA_V3_0_BW_THRESHOLD_TURBO_MBPS (1200)
#define IPA_V3_0_BW_THRESHOLD_NOMINAL_MBPS (600)
+#define IPA_ENDP_INIT_HDR_METADATA_n_MUX_ID_BMASK 0xFF0000
+#define IPA_ENDP_INIT_HDR_METADATA_n_MUX_ID_SHFT 0x10
+
/* Max pipes + ICs for TAG process */
#define IPA_TAG_MAX_DESC (IPA3_MAX_NUM_PIPES + 6)
@@ -40,13 +44,6 @@
#define IPA_EOT_COAL_GRAN_MIN (1)
#define IPA_EOT_COAL_GRAN_MAX (16)
-#define IPA_AGGR_BYTE_LIMIT (\
- IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_BMSK >> \
- IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_SHFT)
-#define IPA_AGGR_PKT_LIMIT (\
- IPA_ENDP_INIT_AGGR_N_AGGR_PKT_LIMIT_BMSK >> \
- IPA_ENDP_INIT_AGGR_N_AGGR_PKT_LIMIT_SHFT)
-
/* In IPAv3 only endpoints 0-3 can be configured to deaggregation */
#define IPA_EP_SUPPORTS_DEAGGR(idx) ((idx) >= 0 && (idx) <= 3)
@@ -445,6 +442,9 @@ int ipa3_get_clients_from_rm_resource(
case IPA_RM_RESOURCE_USB_CONS:
clients->names[i++] = IPA_CLIENT_USB_CONS;
break;
+ case IPA_RM_RESOURCE_USB_DPL_CONS:
+ clients->names[i++] = IPA_CLIENT_USB_DPL_CONS;
+ break;
case IPA_RM_RESOURCE_HSIC_CONS:
clients->names[i++] = IPA_CLIENT_HSIC1_CONS;
break;
@@ -497,12 +497,13 @@ bool ipa3_should_pipe_be_suspended(enum ipa_client_type client)
if (ep->keep_ipa_awake)
return false;
- if (client == IPA_CLIENT_USB_CONS ||
- client == IPA_CLIENT_MHI_CONS ||
- client == IPA_CLIENT_HSIC1_CONS ||
- client == IPA_CLIENT_WLAN1_CONS ||
- client == IPA_CLIENT_WLAN2_CONS ||
- client == IPA_CLIENT_WLAN3_CONS ||
+ if (client == IPA_CLIENT_USB_CONS ||
+ client == IPA_CLIENT_USB_DPL_CONS ||
+ client == IPA_CLIENT_MHI_CONS ||
+ client == IPA_CLIENT_HSIC1_CONS ||
+ client == IPA_CLIENT_WLAN1_CONS ||
+ client == IPA_CLIENT_WLAN2_CONS ||
+ client == IPA_CLIENT_WLAN3_CONS ||
client == IPA_CLIENT_WLAN4_CONS)
return true;
@@ -561,7 +562,7 @@ int ipa3_suspend_resource_sync(enum ipa_rm_resource_name resource)
/* before gating IPA clocks do TAG process */
ipa3_ctx->tag_process_before_gating = true;
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_RESOURCE(ipa3_rm_resource_str(resource));
return 0;
}
@@ -584,6 +585,7 @@ int ipa3_suspend_resource_no_block(enum ipa_rm_resource_name resource)
struct ipa_ep_cfg_ctrl suspend;
int ipa_ep_idx;
unsigned long flags;
+ struct ipa3_active_client_logging_info log_info;
if (ipa3_active_clients_trylock(&flags) == 0)
return -EPERM;
@@ -622,6 +624,9 @@ int ipa3_suspend_resource_no_block(enum ipa_rm_resource_name resource)
}
if (res == 0) {
+ IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info,
+ ipa3_rm_resource_str(resource));
+ ipa3_active_clients_log_dec(&log_info, true);
ipa3_ctx->ipa3_active_clients.cnt--;
IPADBG("active clients = %d\n",
ipa3_ctx->ipa3_active_clients.cnt);
@@ -691,14 +696,15 @@ int ipa3_resume_resource(enum ipa_rm_resource_name resource)
*/
void _ipa_sram_settings_read_v3_0(void)
{
- ipa3_ctx->smem_restricted_bytes = ipa_read_reg_field(ipa3_ctx->mmio,
- IPA_SHARED_MEM_SIZE_OFST_v3_0,
- IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_BMSK_v3_0,
- IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_SHFT_v3_0);
- ipa3_ctx->smem_sz = ipa_read_reg_field(ipa3_ctx->mmio,
- IPA_SHARED_MEM_SIZE_OFST_v3_0,
- IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_BMSK_v3_0,
- IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_SHFT_v3_0);
+ struct ipahal_reg_shared_mem_size smem_sz;
+
+ memset(&smem_sz, 0, sizeof(smem_sz));
+
+ ipahal_read_reg_fields(IPA_SHARED_MEM_SIZE, &smem_sz);
+
+ ipa3_ctx->smem_restricted_bytes = smem_sz.shared_mem_baddr;
+ ipa3_ctx->smem_sz = smem_sz.shared_mem_sz;
+
/* reg fields are in 8B units */
ipa3_ctx->smem_restricted_bytes *= 8;
ipa3_ctx->smem_sz *= 8;
@@ -725,50 +731,13 @@ void _ipa_sram_settings_read_v3_0(void)
}
/**
- * _ipa_cfg_route_v3_0() - Configure the IPA default route register
- * @route: IPA route
- *
- * Returns: None
- */
-void _ipa_cfg_route_v3_0(struct ipa3_route *route)
-{
- u32 reg_val = 0;
-
- IPA_SETFIELD_IN_REG(reg_val, route->route_dis,
- IPA_ROUTE_ROUTE_DIS_SHFT,
- IPA_ROUTE_ROUTE_DIS_BMSK);
-
- IPA_SETFIELD_IN_REG(reg_val, route->route_def_pipe,
- IPA_ROUTE_ROUTE_DEF_PIPE_SHFT,
- IPA_ROUTE_ROUTE_DEF_PIPE_BMSK);
-
- IPA_SETFIELD_IN_REG(reg_val, route->route_def_hdr_table,
- IPA_ROUTE_ROUTE_DEF_HDR_TABLE_SHFT,
- IPA_ROUTE_ROUTE_DEF_HDR_TABLE_BMSK);
-
- IPA_SETFIELD_IN_REG(reg_val, route->route_def_hdr_ofst,
- IPA_ROUTE_ROUTE_DEF_HDR_OFST_SHFT,
- IPA_ROUTE_ROUTE_DEF_HDR_OFST_BMSK);
-
- IPA_SETFIELD_IN_REG(reg_val, route->route_frag_def_pipe,
- IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_SHFT,
- IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_BMSK);
-
- IPA_SETFIELD_IN_REG(reg_val, route->route_def_retain_hdr,
- IPA_ROUTE_ROUTE_DEF_RETAIN_HDR_SHFT,
- IPA_ROUTE_ROUTE_DEF_RETAIN_HDR_BMSK);
-
- ipa_write_reg(ipa3_ctx->mmio, IPA_ROUTE_OFST_v3_0, reg_val);
-}
-
-/**
* ipa3_cfg_route() - configure IPA route
* @route: IPA route
*
* Return codes:
* 0: success
*/
-int ipa3_cfg_route(struct ipa3_route *route)
+int ipa3_cfg_route(struct ipahal_reg_route *route)
{
IPADBG("disable_route_block=%d, default_pipe=%d, default_hdr_tbl=%d\n",
@@ -779,16 +748,19 @@ int ipa3_cfg_route(struct ipa3_route *route)
route->route_def_hdr_ofst,
route->route_frag_def_pipe);
+ IPADBG("default_retain_hdr=%d\n",
+ route->route_def_retain_hdr);
+
if (route->route_dis) {
IPAERR("Route disable is not supported!\n");
return -EPERM;
}
- ipa3_inc_client_enable_clks();
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
- ipa3_ctx->ctrl->ipa3_cfg_route(route);
+ ipahal_write_reg_fields(IPA_ROUTE, route);
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return 0;
}
@@ -817,12 +789,12 @@ int ipa3_init_hw(void)
u32 ipa_version = 0;
/* Read IPA version and make sure we have access to the registers */
- ipa_version = ipa_read_reg(ipa3_ctx->mmio, IPA_VERSION_OFST);
+ ipa_version = ipahal_read_reg(IPA_VERSION);
if (ipa_version == 0)
return -EFAULT;
/* using old BCR configuration(IPAv2.6)*/
- ipa_write_reg(ipa3_ctx->mmio, IPA_BCR_OFST, IPA_BCR_REG_VAL);
+ ipahal_write_reg(IPA_BCR, IPA_BCR_REG_VAL);
return 0;
}
@@ -1872,25 +1844,25 @@ int ipa3_generate_hw_rule(enum ipa_ip_type ip,
* OFFSET_MEQ32_0 with mask of 0 and val of 0 and offset 0
*/
if (attrib->attrib_mask == 0) {
- IPADBG("building default rule\n");
+ IPADBG_LOW("building default rule\n");
*en_rule |= ipa_ofst_meq32[0];
extra_wrd_i = ipa3_write_8(0, extra_wrd_i); /* offset */
rest_wrd_i = ipa3_write_32(0, rest_wrd_i); /* mask */
rest_wrd_i = ipa3_write_32(0, rest_wrd_i); /* val */
}
- IPADBG("extra_word_1 0x%llx\n", *(u64 *)extra_wrd_start);
- IPADBG("extra_word_2 0x%llx\n",
+ IPADBG_LOW("extra_word_1 0x%llx\n", *(u64 *)extra_wrd_start);
+ IPADBG_LOW("extra_word_2 0x%llx\n",
*(u64 *)(extra_wrd_start + IPA_HW_TBL_WIDTH));
extra_wrd_i = ipa3_pad_to_64(extra_wrd_i);
sz = extra_wrd_i - extra_wrd_start;
- IPADBG("extra words params sz %d\n", sz);
+ IPADBG_LOW("extra words params sz %d\n", sz);
*buf = ipa3_copy_mem(extra_wrd_start, *buf, sz);
rest_wrd_i = ipa3_pad_to_64(rest_wrd_i);
sz = rest_wrd_i - rest_wrd_start;
- IPADBG("non extra words params sz %d\n", sz);
+ IPADBG_LOW("non extra words params sz %d\n", sz);
*buf = ipa3_copy_mem(rest_wrd_start, *buf, sz);
fail_err_check:
@@ -2577,16 +2549,14 @@ int ipa3_cfg_ep_seq(u32 clnt_hdl)
IPAERR("Configuring non-DMA SEQ type to DMA pipe\n");
BUG();
}
-
- ipa3_inc_client_enable_clks();
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
/* Configure sequencers type*/
IPADBG("set sequencers to sequence 0x%x, ep = %d\n", type,
clnt_hdl);
- ipa_write_reg(ipa3_ctx->mmio,
- IPA_ENDP_INIT_SEQ_n_OFST(clnt_hdl), type);
+ ipahal_write_reg_n(IPA_ENDP_INIT_SEQ_n, clnt_hdl, type);
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
} else {
IPADBG("should not set sequencer type of ep = %d\n", clnt_hdl);
}
@@ -2675,20 +2645,6 @@ const char *ipa3_get_nat_en_str(enum ipa_nat_en_type nat_en)
return "undefined";
}
-void _ipa_cfg_ep_nat_v3_0(u32 clnt_hdl,
- const struct ipa_ep_cfg_nat *ep_nat)
-{
- u32 reg_val = 0;
-
- IPA_SETFIELD_IN_REG(reg_val, ep_nat->nat_en,
- IPA_ENDP_INIT_NAT_N_NAT_EN_SHFT,
- IPA_ENDP_INIT_NAT_N_NAT_EN_BMSK);
-
- ipa_write_reg(ipa3_ctx->mmio,
- IPA_ENDP_INIT_NAT_N_OFST_v3_0(clnt_hdl),
- reg_val);
-}
-
/**
* ipa3_cfg_ep_nat() - IPA end-point NAT configuration
* @clnt_hdl: [in] opaque client handle assigned by IPA to client
@@ -2721,36 +2677,15 @@ int ipa3_cfg_ep_nat(u32 clnt_hdl, const struct ipa_ep_cfg_nat *ep_nat)
/* copy over EP cfg */
ipa3_ctx->ep[clnt_hdl].cfg.nat = *ep_nat;
- ipa3_inc_client_enable_clks();
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
- ipa3_ctx->ctrl->ipa3_cfg_ep_nat(clnt_hdl, ep_nat);
+ ipahal_write_reg_n_fields(IPA_ENDP_INIT_NAT_n, clnt_hdl, ep_nat);
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
return 0;
}
-static void _ipa_cfg_ep_status_v3_0(u32 clnt_hdl,
- const struct ipa3_ep_cfg_status *ep_status)
-{
- u32 reg_val = 0;
-
- IPA_SETFIELD_IN_REG(reg_val, ep_status->status_en,
- IPA_ENDP_STATUS_n_STATUS_EN_SHFT,
- IPA_ENDP_STATUS_n_STATUS_EN_BMSK);
-
- IPA_SETFIELD_IN_REG(reg_val, ep_status->status_ep,
- IPA_ENDP_STATUS_n_STATUS_ENDP_SHFT,
- IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK);
-
- IPA_SETFIELD_IN_REG(reg_val, ep_status->status_location,
- IPA_ENDP_STATUS_n_STATUS_LOCATION_SHFT,
- IPA_ENDP_STATUS_n_STATUS_LOCATION_BMSK);
-
- ipa_write_reg(ipa3_ctx->mmio,
- IPA_ENDP_STATUS_n_OFST(clnt_hdl),
- reg_val);
-}
/**
* ipa3_cfg_ep_status() - IPA end-point status configuration
@@ -2761,7 +2696,8 @@ static void _ipa_cfg_ep_status_v3_0(u32 clnt_hdl,
*
* Note: Should not be called from atomic context
*/
-int ipa3_cfg_ep_status(u32 clnt_hdl, const struct ipa3_ep_cfg_status *ep_status)
+int ipa3_cfg_ep_status(u32 clnt_hdl,
+ const struct ipahal_reg_ep_cfg_status *ep_status)
{
if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_status == NULL) {
@@ -2780,34 +2716,15 @@ int ipa3_cfg_ep_status(u32 clnt_hdl, const struct ipa3_ep_cfg_status *ep_status)
/* copy over EP cfg */
ipa3_ctx->ep[clnt_hdl].status = *ep_status;
- ipa3_inc_client_enable_clks();
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
- ipa3_ctx->ctrl->ipa3_cfg_ep_status(clnt_hdl, ep_status);
+ ipahal_write_reg_n_fields(IPA_ENDP_STATUS_n, clnt_hdl, ep_status);
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
return 0;
}
-static void _ipa_cfg_ep_cfg_v3_0(u32 clnt_hdl,
- const struct ipa_ep_cfg_cfg *cfg)
-{
- u32 reg_val = 0;
-
- IPA_SETFIELD_IN_REG(reg_val, cfg->frag_offload_en,
- IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_SHFT,
- IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_BMSK);
- IPA_SETFIELD_IN_REG(reg_val, cfg->cs_offload_en,
- IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_SHFT,
- IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_BMSK);
- IPA_SETFIELD_IN_REG(reg_val, cfg->cs_metadata_hdr_offset,
- IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_SHFT,
- IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_BMSK);
-
- ipa_write_reg(ipa3_ctx->mmio, IPA_ENDP_INIT_CFG_n_OFST(clnt_hdl),
- reg_val);
-}
-
/**
* ipa3_cfg_ep_cfg() - IPA end-point cfg configuration
* @clnt_hdl: [in] opaque client handle assigned by IPA to client
@@ -2832,33 +2749,21 @@ int ipa3_cfg_ep_cfg(u32 clnt_hdl, const struct ipa_ep_cfg_cfg *cfg)
cfg->frag_offload_en,
cfg->cs_offload_en,
cfg->cs_metadata_hdr_offset);
+ IPADBG("gen_qmb_master_sel=%d\n",
+ cfg->gen_qmb_master_sel);
/* copy over EP cfg */
ipa3_ctx->ep[clnt_hdl].cfg.cfg = *cfg;
- ipa3_inc_client_enable_clks();
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
- ipa3_ctx->ctrl->ipa3_cfg_ep_cfg(clnt_hdl, cfg);
+ ipahal_write_reg_n_fields(IPA_ENDP_INIT_CFG_n, clnt_hdl, cfg);
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
return 0;
}
-static void _ipa_cfg_ep_metadata_mask_v3_0(u32 clnt_hdl,
- const struct ipa_ep_cfg_metadata_mask *metadata_mask)
-{
- u32 reg_val = 0;
-
- IPA_SETFIELD_IN_REG(reg_val, metadata_mask->metadata_mask,
- IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_SHFT,
- IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_BMSK);
-
- ipa_write_reg(ipa3_ctx->mmio,
- IPA_ENDP_INIT_HDR_METADATA_MASK_n_OFST(clnt_hdl),
- reg_val);
-}
-
/**
* ipa3_cfg_ep_metadata_mask() - IPA end-point meta-data mask configuration
* @clnt_hdl: [in] opaque client handle assigned by IPA to client
@@ -2887,70 +2792,17 @@ int ipa3_cfg_ep_metadata_mask(u32 clnt_hdl,
/* copy over EP cfg */
ipa3_ctx->ep[clnt_hdl].cfg.metadata_mask = *metadata_mask;
- ipa3_inc_client_enable_clks();
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
- ipa3_ctx->ctrl->ipa3_cfg_ep_metadata_mask(clnt_hdl, metadata_mask);
+ ipahal_write_reg_n_fields(IPA_ENDP_INIT_HDR_METADATA_MASK_n,
+ clnt_hdl, metadata_mask);
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
return 0;
}
/**
- * ipa_cfg_ep_hdr_v3_0() - IPA end-point header configuration
- * @pipe_number:[in] opaque client handle assigned by IPA to client
- * @ipa_ep_cfg: [in] IPA end-point configuration params
- *
- * Returns: 0 on success, negative on failure
- *
- * Note: Should not be called from atomic context
- */
-void _ipa_cfg_ep_hdr_v3_0(u32 pipe_number,
- const struct ipa_ep_cfg_hdr *ep_hdr)
-{
- u32 reg_val = 0;
-
- IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_metadata_reg_valid,
- IPA_ENDP_INIT_HDR_N_HDR_METADATA_REG_VALID_SHFT_v2,
- IPA_ENDP_INIT_HDR_N_HDR_METADATA_REG_VALID_BMSK_v2);
-
- IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_remove_additional,
- IPA_ENDP_INIT_HDR_N_HDR_LEN_INC_DEAGG_HDR_SHFT_v2,
- IPA_ENDP_INIT_HDR_N_HDR_LEN_INC_DEAGG_HDR_BMSK_v2);
-
- IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_a5_mux,
- IPA_ENDP_INIT_HDR_N_HDR_A5_MUX_SHFT,
- IPA_ENDP_INIT_HDR_N_HDR_A5_MUX_BMSK);
-
- IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_ofst_pkt_size,
- IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_SHFT,
- IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_BMSK);
-
- IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_ofst_pkt_size_valid,
- IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_VALID_SHFT,
- IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_VALID_BMSK);
-
- IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_additional_const_len,
- IPA_ENDP_INIT_HDR_N_HDR_ADDITIONAL_CONST_LEN_SHFT,
- IPA_ENDP_INIT_HDR_N_HDR_ADDITIONAL_CONST_LEN_BMSK);
-
- IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_ofst_metadata,
- IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_SHFT,
- IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_BMSK);
-
- IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_ofst_metadata_valid,
- IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_VALID_SHFT,
- IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_VALID_BMSK);
-
- IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_len,
- IPA_ENDP_INIT_HDR_N_HDR_LEN_SHFT,
- IPA_ENDP_INIT_HDR_N_HDR_LEN_BMSK);
-
- ipa_write_reg(ipa3_ctx->mmio,
- IPA_ENDP_INIT_HDR_N_OFST_v3_0(pipe_number), reg_val);
-}
-
-/**
* ipa3_cfg_ep_hdr() - IPA end-point header configuration
* @clnt_hdl: [in] opaque client handle assigned by IPA to client
* @ipa_ep_cfg: [in] IPA end-point configuration params
@@ -2969,8 +2821,11 @@ int ipa3_cfg_ep_hdr(u32 clnt_hdl, const struct ipa_ep_cfg_hdr *ep_hdr)
clnt_hdl, ipa3_ctx->ep[clnt_hdl].valid);
return -EINVAL;
}
- IPADBG("pipe=%d remove_additional=%d, a5_mux=%d, ofst_pkt_size=0x%x\n",
+ IPADBG("pipe=%d metadata_reg_valid=%d\n",
clnt_hdl,
+ ep_hdr->hdr_metadata_reg_valid);
+
+ IPADBG("remove_additional=%d, a5_mux=%d, ofst_pkt_size=0x%x\n",
ep_hdr->hdr_remove_additional,
ep_hdr->hdr_a5_mux,
ep_hdr->hdr_ofst_pkt_size);
@@ -2989,58 +2844,15 @@ int ipa3_cfg_ep_hdr(u32 clnt_hdl, const struct ipa_ep_cfg_hdr *ep_hdr)
/* copy over EP cfg */
ep->cfg.hdr = *ep_hdr;
- ipa3_inc_client_enable_clks();
-
- ipa3_ctx->ctrl->ipa3_cfg_ep_hdr(clnt_hdl, &ep->cfg.hdr);
-
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
- return 0;
-}
-
-static int _ipa_cfg_ep_hdr_ext(u32 clnt_hdl,
- const struct ipa_ep_cfg_hdr_ext *ep_hdr_ext, u32 reg_val)
-{
- u8 hdr_endianness = ep_hdr_ext->hdr_little_endian ? 0 : 1;
-
- IPA_SETFIELD_IN_REG(reg_val, ep_hdr_ext->hdr_total_len_or_pad_offset,
- IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_SHFT,
- IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_BMSK);
-
- IPA_SETFIELD_IN_REG(reg_val, ep_hdr_ext->hdr_payload_len_inc_padding,
- IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_SHFT,
- IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_BMSK);
-
- IPA_SETFIELD_IN_REG(reg_val, ep_hdr_ext->hdr_total_len_or_pad,
- IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_SHFT,
- IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_BMSK);
+ ipahal_write_reg_n_fields(IPA_ENDP_INIT_HDR_n, clnt_hdl, &ep->cfg.hdr);
- IPA_SETFIELD_IN_REG(reg_val, ep_hdr_ext->hdr_total_len_or_pad_valid,
- IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_SHFT,
- IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_BMSK);
-
- IPA_SETFIELD_IN_REG(reg_val, hdr_endianness,
- IPA_ENDP_INIT_HDR_EXT_n_HDR_ENDIANNESS_SHFT,
- IPA_ENDP_INIT_HDR_EXT_n_HDR_ENDIANNESS_BMSK);
-
- ipa_write_reg(ipa3_ctx->mmio,
- IPA_ENDP_INIT_HDR_EXT_n_OFST_v3_0(clnt_hdl), reg_val);
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
return 0;
}
-static int _ipa_cfg_ep_hdr_ext_v3_0(u32 clnt_hdl,
- const struct ipa_ep_cfg_hdr_ext *ep_hdr_ext)
-{
- u32 reg_val = 0;
-
- IPA_SETFIELD_IN_REG(reg_val, ep_hdr_ext->hdr_pad_to_alignment,
- IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_SHFT,
- IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_BMSK_v3_0);
-
- return _ipa_cfg_ep_hdr_ext(clnt_hdl, ep_hdr_ext, reg_val);
-}
-
/**
* ipa3_cfg_ep_hdr_ext() - IPA end-point extended header configuration
* @clnt_hdl: [in] opaque client handle assigned by IPA to client
@@ -3082,17 +2894,18 @@ int ipa3_cfg_ep_hdr_ext(u32 clnt_hdl,
/* copy over EP cfg */
ep->cfg.hdr_ext = *ep_hdr_ext;
- ipa3_inc_client_enable_clks();
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
- ipa3_ctx->ctrl->ipa3_cfg_ep_hdr_ext(clnt_hdl, &ep->cfg.hdr_ext);
+ ipahal_write_reg_n_fields(IPA_ENDP_INIT_HDR_EXT_n, clnt_hdl,
+ &ep->cfg.hdr_ext);
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
return 0;
}
/**
- * ipa3_cfg_ep_hdr() - IPA end-point Control configuration
+ * ipa3_cfg_ep_ctrl() - IPA end-point Control configuration
* @clnt_hdl: [in] opaque client handle assigned by IPA to client
* @ipa_ep_cfg_ctrl: [in] IPA end-point configuration params
*
@@ -3100,8 +2913,6 @@ int ipa3_cfg_ep_hdr_ext(u32 clnt_hdl,
*/
int ipa3_cfg_ep_ctrl(u32 clnt_hdl, const struct ipa_ep_cfg_ctrl *ep_ctrl)
{
- u32 reg_val = 0;
-
if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || ep_ctrl == NULL) {
IPAERR("bad parm, clnt_hdl = %d\n", clnt_hdl);
return -EINVAL;
@@ -3112,16 +2923,7 @@ int ipa3_cfg_ep_ctrl(u32 clnt_hdl, const struct ipa_ep_cfg_ctrl *ep_ctrl)
ep_ctrl->ipa_ep_suspend,
ep_ctrl->ipa_ep_delay);
- IPA_SETFIELD_IN_REG(reg_val, ep_ctrl->ipa_ep_suspend,
- IPA_ENDP_INIT_CTRL_N_ENDP_SUSPEND_SHFT,
- IPA_ENDP_INIT_CTRL_N_ENDP_SUSPEND_BMSK);
-
- IPA_SETFIELD_IN_REG(reg_val, ep_ctrl->ipa_ep_delay,
- IPA_ENDP_INIT_CTRL_N_ENDP_DELAY_SHFT,
- IPA_ENDP_INIT_CTRL_N_ENDP_DELAY_BMSK);
-
- ipa_write_reg(ipa3_ctx->mmio,
- IPA_ENDP_INIT_CTRL_N_OFST(clnt_hdl), reg_val);
+ ipahal_write_reg_n_fields(IPA_ENDP_INIT_CTRL_n, clnt_hdl, ep_ctrl);
if (ep_ctrl->ipa_ep_suspend == true &&
IPA_CLIENT_IS_CONS(ipa3_ctx->ep[clnt_hdl].client))
@@ -3130,73 +2932,6 @@ int ipa3_cfg_ep_ctrl(u32 clnt_hdl, const struct ipa_ep_cfg_ctrl *ep_ctrl)
return 0;
}
-/**
- * ipa3_cfg_aggr_cntr_granularity() - granularity of the AGGR timer configuration
- * @aggr_granularity: [in] defines the granularity of AGGR timers
- * number of units of 1/32msec
- *
- * Returns: 0 on success, negative on failure
- */
-int ipa3_cfg_aggr_cntr_granularity(u8 aggr_granularity)
-{
- u32 reg_val = 0;
-
- if (aggr_granularity <= IPA_AGGR_GRAN_MIN ||
- aggr_granularity > IPA_AGGR_GRAN_MAX) {
- IPAERR("bad param, aggr_granularity = %d\n",
- aggr_granularity);
- return -EINVAL;
- }
- IPADBG("aggr_granularity=%d\n", aggr_granularity);
-
- reg_val = ipa_read_reg(ipa3_ctx->mmio, IPA_COUNTER_CFG_OFST);
- reg_val = (reg_val & ~IPA_COUNTER_CFG_AGGR_GRAN_BMSK);
-
- IPA_SETFIELD_IN_REG(reg_val, aggr_granularity - 1,
- IPA_COUNTER_CFG_AGGR_GRAN_SHFT,
- IPA_COUNTER_CFG_AGGR_GRAN_BMSK);
-
- ipa_write_reg(ipa3_ctx->mmio,
- IPA_COUNTER_CFG_OFST, reg_val);
-
- return 0;
-
-}
-
-/**
- * ipa3_cfg_eot_coal_cntr_granularity() - granularity of EOT_COAL timer
- * configuration
- * @eot_coal_granularity: defines the granularity of EOT_COAL timers
- * number of units of 1/32msec
- *
- * Returns: 0 on success, negative on failure
- */
-int ipa3_cfg_eot_coal_cntr_granularity(u8 eot_coal_granularity)
-{
- u32 reg_val = 0;
-
- if (eot_coal_granularity <= IPA_EOT_COAL_GRAN_MIN ||
- eot_coal_granularity > IPA_EOT_COAL_GRAN_MAX) {
- IPAERR("bad parm, eot_coal_granularity = %d\n",
- eot_coal_granularity);
- return -EINVAL;
- }
- IPADBG("eot_coal_granularity=%d\n", eot_coal_granularity);
-
- reg_val = ipa_read_reg(ipa3_ctx->mmio, IPA_COUNTER_CFG_OFST);
- reg_val = (reg_val & ~IPA_COUNTER_CFG_EOT_COAL_GRAN_BMSK);
-
- IPA_SETFIELD_IN_REG(reg_val, eot_coal_granularity - 1,
- IPA_COUNTER_CFG_EOT_COAL_GRAN_SHFT,
- IPA_COUNTER_CFG_EOT_COAL_GRAN_BMSK);
-
- ipa_write_reg(ipa3_ctx->mmio,
- IPA_COUNTER_CFG_OFST, reg_val);
-
- return 0;
-
-}
-
const char *ipa3_get_mode_type_str(enum ipa_mode_type mode)
{
switch (mode) {
@@ -3214,31 +2949,6 @@ const char *ipa3_get_mode_type_str(enum ipa_mode_type mode)
}
/**
- * _ipa_cfg_ep_mode_v3_0() - IPA end-point mode configuration
- * @pipe_number:[in] pipe number to set the mode on
- * @dst_pipe_number:[in] destination pipe, valid only when the mode is DMA
- * @ep_mode: [in] IPA end-point configuration params
- *
- * Returns: None
- */
-void _ipa_cfg_ep_mode_v3_0(u32 pipe_number, u32 dst_pipe_number,
- const struct ipa_ep_cfg_mode *ep_mode)
-{
- u32 reg_val = 0;
-
- IPA_SETFIELD_IN_REG(reg_val, ep_mode->mode,
- IPA_ENDP_INIT_MODE_N_MODE_SHFT,
- IPA_ENDP_INIT_MODE_N_MODE_BMSK);
-
- IPA_SETFIELD_IN_REG(reg_val, dst_pipe_number,
- IPA_ENDP_INIT_MODE_N_DEST_PIPE_INDEX_SHFT_v3_0,
- IPA_ENDP_INIT_MODE_N_DEST_PIPE_INDEX_BMSK_v3_0);
-
- ipa_write_reg(ipa3_ctx->mmio,
- IPA_ENDP_INIT_MODE_N_OFST_v3_0(pipe_number), reg_val);
-}
-
-/**
* ipa3_cfg_ep_mode() - IPA end-point mode configuration
* @clnt_hdl: [in] opaque client handle assigned by IPA to client
* @ipa_ep_cfg: [in] IPA end-point configuration params
@@ -3251,6 +2961,7 @@ int ipa3_cfg_ep_mode(u32 clnt_hdl, const struct ipa_ep_cfg_mode *ep_mode)
{
int ep;
int type;
+ struct ipahal_reg_endp_init_mode init_mode;
if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_mode == NULL) {
@@ -3286,11 +2997,11 @@ int ipa3_cfg_ep_mode(u32 clnt_hdl, const struct ipa_ep_cfg_mode *ep_mode)
ipa3_ctx->ep[clnt_hdl].cfg.mode = *ep_mode;
ipa3_ctx->ep[clnt_hdl].dst_pipe_index = ep;
- ipa3_inc_client_enable_clks();
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
- ipa3_ctx->ctrl->ipa3_cfg_ep_mode(clnt_hdl,
- ipa3_ctx->ep[clnt_hdl].dst_pipe_index,
- ep_mode);
+ init_mode.dst_pipe_number = ipa3_ctx->ep[clnt_hdl].dst_pipe_index;
+ init_mode.ep_mode = *ep_mode;
+ ipahal_write_reg_n_fields(IPA_ENDP_INIT_MODE_n, clnt_hdl, &init_mode);
/* Configure sequencers type for test clients*/
if (IPA_CLIENT_IS_TEST(ipa3_ctx->ep[clnt_hdl].client)) {
@@ -3301,10 +3012,9 @@ int ipa3_cfg_ep_mode(u32 clnt_hdl, const struct ipa_ep_cfg_mode *ep_mode)
IPADBG(" set sequencers to sequance 0x%x, ep = %d\n", type,
clnt_hdl);
- ipa_write_reg(ipa3_ctx->mmio,
- IPA_ENDP_INIT_SEQ_n_OFST(clnt_hdl), type);
+ ipahal_write_reg_n(IPA_ENDP_INIT_SEQ_n, clnt_hdl, type);
}
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
return 0;
}
@@ -3343,45 +3053,6 @@ const char *ipa3_get_aggr_type_str(enum ipa_aggr_type aggr_type)
}
/**
- * _ipa_cfg_ep_aggr_v3_0() - IPA end-point aggregation configuration
- * @pipe_number:[in] pipe number
- * @ep_aggr:[in] IPA end-point configuration params
- */
-void _ipa_cfg_ep_aggr_v3_0(u32 pipe_number,
- const struct ipa_ep_cfg_aggr *ep_aggr)
-{
- u32 reg_val = 0;
-
- IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr_en,
- IPA_ENDP_INIT_AGGR_N_AGGR_EN_SHFT,
- IPA_ENDP_INIT_AGGR_N_AGGR_EN_BMSK);
-
- IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr,
- IPA_ENDP_INIT_AGGR_N_AGGR_TYPE_SHFT,
- IPA_ENDP_INIT_AGGR_N_AGGR_TYPE_BMSK);
-
- IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr_byte_limit,
- IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_SHFT,
- IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_BMSK);
-
- IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr_time_limit,
- IPA_ENDP_INIT_AGGR_N_AGGR_TIME_LIMIT_SHFT,
- IPA_ENDP_INIT_AGGR_N_AGGR_TIME_LIMIT_BMSK);
-
- IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr_pkt_limit,
- IPA_ENDP_INIT_AGGR_N_AGGR_PKT_LIMIT_SHFT,
- IPA_ENDP_INIT_AGGR_N_AGGR_PKT_LIMIT_BMSK);
-
- /* set byte-limit aggregation behavior to soft-byte limit */
- IPA_SETFIELD_IN_REG(reg_val, 0,
- IPA_ENDP_INIT_AGGR_N_AGGR_HARD_BYTE_LIMIT_ENABLE_SHFT,
- IPA_ENDP_INIT_AGGR_N_AGGR_HARD_BYTE_LIMIT_ENABLE_BMSK);
-
- ipa_write_reg(ipa3_ctx->mmio,
- IPA_ENDP_INIT_AGGR_N_OFST_v3_0(pipe_number), reg_val);
-}
-
-/**
* ipa3_cfg_ep_aggr() - IPA end-point aggregation configuration
* @clnt_hdl: [in] opaque client handle assigned by IPA to client
* @ipa_ep_cfg: [in] IPA end-point configuration params
@@ -3414,42 +3085,22 @@ int ipa3_cfg_ep_aggr(u32 clnt_hdl, const struct ipa_ep_cfg_aggr *ep_aggr)
ipa3_get_aggr_type_str(ep_aggr->aggr),
ep_aggr->aggr_byte_limit,
ep_aggr->aggr_time_limit);
+ IPADBG("hard_byte_limit_en=%d\n",
+ ep_aggr->aggr_hard_byte_limit_en);
/* copy over EP cfg */
ipa3_ctx->ep[clnt_hdl].cfg.aggr = *ep_aggr;
- ipa3_inc_client_enable_clks();
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
- ipa3_ctx->ctrl->ipa3_cfg_ep_aggr(clnt_hdl, ep_aggr);
+ ipahal_write_reg_n_fields(IPA_ENDP_INIT_AGGR_n, clnt_hdl, ep_aggr);
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
return 0;
}
/**
- * _ipa_cfg_ep_route_v3_0() - IPA end-point routing configuration
- * @pipe_index:[in] pipe index
- * @rt_tbl_index:[in] IPA end-point configuration params
- *
- * Returns: 0 on success, negative on failure
- *
- * Note: Should not be called from atomic context
- */
-void _ipa_cfg_ep_route_v3_0(u32 pipe_index, u32 rt_tbl_index)
-{
- int reg_val = 0;
-
- IPA_SETFIELD_IN_REG(reg_val, rt_tbl_index,
- IPA_ENDP_INIT_ROUTE_N_ROUTE_TABLE_INDEX_SHFT,
- IPA_ENDP_INIT_ROUTE_N_ROUTE_TABLE_INDEX_BMSK);
-
- ipa_write_reg(ipa3_ctx->mmio,
- IPA_ENDP_INIT_ROUTE_N_OFST_v3_0(pipe_index),
- reg_val);
-}
-
-/**
* ipa3_cfg_ep_route() - IPA end-point routing configuration
* @clnt_hdl: [in] opaque client handle assigned by IPA to client
* @ipa_ep_cfg: [in] IPA end-point configuration params
@@ -3460,6 +3111,8 @@ void _ipa_cfg_ep_route_v3_0(u32 pipe_index, u32 rt_tbl_index)
*/
int ipa3_cfg_ep_route(u32 clnt_hdl, const struct ipa_ep_cfg_route *ep_route)
{
+ struct ipahal_reg_endp_init_route init_rt;
+
if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_route == NULL) {
IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
@@ -3494,43 +3147,17 @@ int ipa3_cfg_ep_route(u32 clnt_hdl, const struct ipa_ep_cfg_route *ep_route)
ipa3_ctx->ep[clnt_hdl].rt_tbl_idx =
IPA_MEM_PART(v4_apps_rt_index_lo);
- ipa3_inc_client_enable_clks();
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
- ipa3_ctx->ctrl->ipa3_cfg_ep_route(clnt_hdl,
- ipa3_ctx->ep[clnt_hdl].rt_tbl_idx);
+ init_rt.route_table_index = ipa3_ctx->ep[clnt_hdl].rt_tbl_idx;
+ ipahal_write_reg_n_fields(IPA_ENDP_INIT_ROUTE_n, clnt_hdl, &init_rt);
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
return 0;
}
/**
- * _ipa_cfg_ep_holb_v3_0() - IPA end-point holb configuration
- *
- * If an IPA producer pipe is full, IPA HW by default will block
- * indefinitely till space opens up. During this time no packets
- * including those from unrelated pipes will be processed. Enabling
- * HOLB means IPA HW will be allowed to drop packets as/when needed
- * and indefinite blocking is avoided.
- *
- * @pipe_number [in] pipe number
- * @ep_holb: [in] IPA end-point configuration params
- *
- * Returns: 0 on success, negative on failure
- */
-void _ipa_cfg_ep_holb_v3_0(u32 pipe_number,
- const struct ipa_ep_cfg_holb *ep_holb)
-{
- ipa_write_reg(ipa3_ctx->mmio,
- IPA_ENDP_INIT_HOL_BLOCK_EN_N_OFST_v3_0(pipe_number),
- ep_holb->en);
-
- ipa_write_reg(ipa3_ctx->mmio,
- IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_OFST_v3_0(pipe_number),
- (u16)ep_holb->tmr_val);
-}
-
-/**
* ipa3_cfg_ep_holb() - IPA end-point holb configuration
*
* If an IPA producer pipe is full, IPA HW by default will block
@@ -3559,18 +3186,17 @@ int ipa3_cfg_ep_holb(u32 clnt_hdl, const struct ipa_ep_cfg_holb *ep_holb)
return -EINVAL;
}
- if (!ipa3_ctx->ctrl->ipa3_cfg_ep_holb) {
- IPAERR("HOLB is not supported for this IPA core\n");
- return -EINVAL;
- }
-
ipa3_ctx->ep[clnt_hdl].holb = *ep_holb;
- ipa3_inc_client_enable_clks();
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+ ipahal_write_reg_n_fields(IPA_ENDP_INIT_HOL_BLOCK_EN_n, clnt_hdl,
+ ep_holb);
- ipa3_ctx->ctrl->ipa3_cfg_ep_holb(clnt_hdl, ep_holb);
+ ipahal_write_reg_n_fields(IPA_ENDP_INIT_HOL_BLOCK_TIMER_n, clnt_hdl,
+ ep_holb);
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
IPADBG("cfg holb %u ep=%d tmr=%d\n", ep_holb->en, clnt_hdl,
ep_holb->tmr_val);
@@ -3596,33 +3222,6 @@ int ipa3_cfg_ep_holb_by_client(enum ipa_client_type client,
return ipa3_cfg_ep_holb(ipa3_get_ep_mapping(client), ep_holb);
}
-static int _ipa_cfg_ep_deaggr_v3_0(u32 clnt_hdl,
- const struct ipa_ep_cfg_deaggr *ep_deaggr)
-{
- u32 reg_val = 0;
-
- IPA_SETFIELD_IN_REG(reg_val, ep_deaggr->deaggr_hdr_len,
- IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_SHFT,
- IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_BMSK);
-
- IPA_SETFIELD_IN_REG(reg_val, ep_deaggr->packet_offset_valid,
- IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_SHFT,
- IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_BMSK);
-
- IPA_SETFIELD_IN_REG(reg_val, ep_deaggr->packet_offset_location,
- IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_SHFT,
- IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_BMSK);
-
- IPA_SETFIELD_IN_REG(reg_val, ep_deaggr->max_packet_len,
- IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_SHFT,
- IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_BMSK);
-
- ipa_write_reg(ipa3_ctx->mmio,
- IPA_ENDP_INIT_DEAGGR_n_OFST_v3_0(clnt_hdl), reg_val);
-
- return 0;
-}
-
/**
* ipa3_cfg_ep_deaggr() - IPA end-point deaggregation configuration
* @clnt_hdl: [in] opaque client handle assigned by IPA to client
@@ -3660,29 +3259,16 @@ int ipa3_cfg_ep_deaggr(u32 clnt_hdl,
/* copy over EP cfg */
ep->cfg.deaggr = *ep_deaggr;
- ipa3_inc_client_enable_clks();
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
- ipa3_ctx->ctrl->ipa3_cfg_ep_deaggr(clnt_hdl, &ep->cfg.deaggr);
+ ipahal_write_reg_n_fields(IPA_ENDP_INIT_DEAGGR_n, clnt_hdl,
+ &ep->cfg.deaggr);
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
return 0;
}
-static void _ipa_cfg_ep_metadata_v3_0(u32 pipe_number,
- const struct ipa_ep_cfg_metadata *meta)
-{
- u32 reg_val = 0;
-
- IPA_SETFIELD_IN_REG(reg_val, meta->qmap_id,
- IPA_ENDP_INIT_HDR_METADATA_n_MUX_ID_SHFT,
- IPA_ENDP_INIT_HDR_METADATA_n_MUX_ID_BMASK);
-
- ipa_write_reg(ipa3_ctx->mmio,
- IPA_ENDP_INIT_HDR_METADATA_n_OFST(pipe_number),
- reg_val);
-}
-
/**
* ipa3_cfg_ep_metadata() - IPA end-point metadata configuration
* @clnt_hdl: [in] opaque client handle assigned by IPA to client
@@ -3694,6 +3280,8 @@ static void _ipa_cfg_ep_metadata_v3_0(u32 pipe_number,
*/
int ipa3_cfg_ep_metadata(u32 clnt_hdl, const struct ipa_ep_cfg_metadata *ep_md)
{
+ u32 qmap_id = 0;
+
if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_md == NULL) {
IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
@@ -3706,14 +3294,19 @@ int ipa3_cfg_ep_metadata(u32 clnt_hdl, const struct ipa_ep_cfg_metadata *ep_md)
/* copy over EP cfg */
ipa3_ctx->ep[clnt_hdl].cfg.meta = *ep_md;
- ipa3_inc_client_enable_clks();
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+ qmap_id = (ep_md->qmap_id <<
+ IPA_ENDP_INIT_HDR_METADATA_n_MUX_ID_SHFT) &&
+ IPA_ENDP_INIT_HDR_METADATA_n_MUX_ID_BMASK;
- ipa3_ctx->ctrl->ipa3_cfg_ep_metadata(clnt_hdl, ep_md);
+ ipahal_write_reg_n_fields(IPA_ENDP_INIT_HDR_METADATA_n, clnt_hdl,
+ ep_md);
ipa3_ctx->ep[clnt_hdl].cfg.hdr.hdr_metadata_reg_valid = 1;
- ipa3_ctx->ctrl->ipa3_cfg_ep_hdr(clnt_hdl,
- &ipa3_ctx->ep[clnt_hdl].cfg.hdr);
+ ipahal_write_reg_n_fields(IPA_ENDP_INIT_HDR_n, clnt_hdl,
+ &ipa3_ctx->ep[clnt_hdl].cfg.hdr);
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
return 0;
}
@@ -3887,13 +3480,13 @@ int ipa3_pipe_mem_free(u32 ofst, u32 size)
*/
int ipa3_set_aggr_mode(enum ipa_aggr_mode mode)
{
- u32 reg_val;
+ struct ipahal_reg_qcncm qcncm;
- ipa3_inc_client_enable_clks();
- reg_val = ipa_read_reg(ipa3_ctx->mmio, IPA_QCNCM_OFST);
- ipa_write_reg(ipa3_ctx->mmio, IPA_QCNCM_OFST, (mode & 0x1) |
- (reg_val & 0xfffffffe));
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+ ipahal_read_reg_fields(IPA_QCNCM, &qcncm);
+ qcncm.mode_en = mode;
+ ipahal_write_reg_fields(IPA_QCNCM, &qcncm);
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return 0;
}
@@ -3911,18 +3504,17 @@ int ipa3_set_aggr_mode(enum ipa_aggr_mode mode)
*/
int ipa3_set_qcncm_ndp_sig(char sig[3])
{
- u32 reg_val;
+ struct ipahal_reg_qcncm qcncm;
if (sig == NULL) {
IPAERR("bad argument for ipa3_set_qcncm_ndp_sig/n");
return -EINVAL;
}
- ipa3_inc_client_enable_clks();
- reg_val = ipa_read_reg(ipa3_ctx->mmio, IPA_QCNCM_OFST);
- ipa_write_reg(ipa3_ctx->mmio, IPA_QCNCM_OFST, sig[0] << 20 |
- (sig[1] << 12) | (sig[2] << 4) |
- (reg_val & 0xf000000f));
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+ ipahal_read_reg_fields(IPA_QCNCM, &qcncm);
+ qcncm.mode_val = ((sig[0] << 16) | (sig[1] << 8) | sig[2]);
+ ipahal_write_reg_fields(IPA_QCNCM, &qcncm);
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return 0;
}
@@ -3936,13 +3528,13 @@ int ipa3_set_qcncm_ndp_sig(char sig[3])
*/
int ipa3_set_single_ndp_per_mbim(bool enable)
{
- u32 reg_val;
+ struct ipahal_reg_single_ndp_mode mode;
- ipa3_inc_client_enable_clks();
- reg_val = ipa_read_reg(ipa3_ctx->mmio, IPA_SINGLE_NDP_MODE_OFST);
- ipa_write_reg(ipa3_ctx->mmio, IPA_SINGLE_NDP_MODE_OFST,
- (enable & 0x1) | (reg_val & 0xfffffffe));
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+ ipahal_read_reg_fields(IPA_SINGLE_NDP_MODE, &mode);
+ mode.single_ndp_en = enable;
+ ipahal_write_reg_fields(IPA_SINGLE_NDP_MODE, &mode);
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return 0;
}
@@ -3987,14 +3579,14 @@ void ipa3_bam_reg_dump(void)
static DEFINE_RATELIMIT_STATE(_rs, 500*HZ, 1);
if (__ratelimit(&_rs)) {
- ipa3_inc_client_enable_clks();
+ IPA_ACTIVE_CLIENTS_INC_SIMPLE();
pr_err("IPA BAM START\n");
sps_get_bam_debug_info(ipa3_ctx->bam_handle, 93,
(SPS_BAM_PIPE(ipa3_get_ep_mapping(IPA_CLIENT_USB_CONS))
|
SPS_BAM_PIPE(ipa3_get_ep_mapping(IPA_CLIENT_USB_PROD))),
0, 2);
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
}
}
@@ -4227,41 +3819,25 @@ int ipa3_controller_static_bind(struct ipa3_controller *ctrl,
ctrl->ipa_init_rt6 = _ipa_init_rt6_v3;
ctrl->ipa_init_flt4 = _ipa_init_flt4_v3;
ctrl->ipa_init_flt6 = _ipa_init_flt6_v3;
- ctrl->ipa3_cfg_ep_hdr = _ipa_cfg_ep_hdr_v3_0;
- ctrl->ipa3_cfg_ep_nat = _ipa_cfg_ep_nat_v3_0;
- ctrl->ipa3_cfg_ep_aggr = _ipa_cfg_ep_aggr_v3_0;
- ctrl->ipa3_cfg_ep_deaggr = _ipa_cfg_ep_deaggr_v3_0;
- ctrl->ipa3_cfg_ep_mode = _ipa_cfg_ep_mode_v3_0;
- ctrl->ipa3_cfg_ep_route = _ipa_cfg_ep_route_v3_0;
- ctrl->ipa3_cfg_route = _ipa_cfg_route_v3_0;
- ctrl->ipa3_cfg_ep_status = _ipa_cfg_ep_status_v3_0;
- ctrl->ipa3_cfg_ep_cfg = _ipa_cfg_ep_cfg_v3_0;
- ctrl->ipa3_cfg_ep_metadata_mask = _ipa_cfg_ep_metadata_mask_v3_0;
ctrl->ipa_clk_rate_turbo = IPA_V3_0_CLK_RATE_TURBO;
ctrl->ipa_clk_rate_nominal = IPA_V3_0_CLK_RATE_NOMINAL;
ctrl->ipa_clk_rate_svs = IPA_V3_0_CLK_RATE_SVS;
- ctrl->ipa3_read_gen_reg = _ipa_read_gen_reg_v3_0;
ctrl->ipa3_read_ep_reg = _ipa_read_ep_reg_v3_0;
- ctrl->ipa3_write_dbg_cnt = _ipa_write_dbg_cnt_v3_0;
- ctrl->ipa3_read_dbg_cnt = _ipa_read_dbg_cnt_v3_0;
ctrl->ipa3_commit_flt = __ipa_commit_flt_v3;
ctrl->ipa3_commit_rt = __ipa_commit_rt_v3;
ctrl->ipa3_commit_hdr = __ipa_commit_hdr_v3_0;
ctrl->ipa3_enable_clks = _ipa_enable_clks_v3_0;
ctrl->ipa3_disable_clks = _ipa_disable_clks_v3_0;
ctrl->msm_bus_data_ptr = &ipa_bus_client_pdata_v3_0;
- ctrl->ipa3_cfg_ep_metadata = _ipa_cfg_ep_metadata_v3_0;
ctrl->clock_scaling_bw_threshold_nominal =
IPA_V3_0_BW_THRESHOLD_NOMINAL_MBPS;
ctrl->clock_scaling_bw_threshold_turbo =
IPA_V3_0_BW_THRESHOLD_TURBO_MBPS;
- ctrl->ipa_reg_base_ofst = IPA_REG_BASE_OFST_v3_0;
+ ctrl->ipa_reg_base_ofst = ipahal_get_reg_base();
ctrl->ipa_init_sram = _ipa_init_sram_v3_0;
ctrl->ipa_sram_read_settings = _ipa_sram_settings_read_v3_0;
ctrl->ipa_init_hdr = _ipa_init_hdr_v3_0;
- ctrl->ipa3_cfg_ep_hdr_ext = _ipa_cfg_ep_hdr_ext_v3_0;
- ctrl->ipa3_cfg_ep_holb = _ipa_cfg_ep_holb_v3_0;
ctrl->ipa_generate_rt_hw_rule = __ipa_generate_rt_hw_rule_v3_0;
return 0;
@@ -4318,9 +3894,9 @@ void ipa3_id_remove(u32 id)
spin_unlock(&ipa3_ctx->idr_lock);
}
-void ipa3_tag_free_buf(void *user1, int user2)
+void ipa3_tag_destroy_imm(void *user1, int user2)
{
- kfree(user1);
+ ipahal_destroy_imm_cmd(user1);
}
static void ipa3_tag_free_skb(void *user1, int user2)
@@ -4348,9 +3924,9 @@ int ipa3_tag_process(struct ipa3_desc desc[],
struct ipa3_sys_context *sys;
struct ipa3_desc *tag_desc;
int desc_idx = 0;
- struct ipa3_ip_packet_init *pkt_init;
- struct ipa3_register_write *reg_write_nop;
- struct ipa3_ip_packet_tag_status *status;
+ struct ipahal_imm_cmd_ip_packet_init pktinit_cmd;
+ struct ipahal_imm_cmd_pyld *cmd_pyld = NULL;
+ struct ipahal_imm_cmd_ip_packet_tag_status status;
int i;
struct sk_buff *dummy_skb;
int res;
@@ -4376,8 +3952,7 @@ int ipa3_tag_process(struct ipa3_desc desc[],
tag_desc = kzalloc(sizeof(*tag_desc) * IPA_TAG_MAX_DESC, GFP_KERNEL);
if (!tag_desc) {
IPAERR("failed to allocate memory\n");
- res = -ENOMEM;
- goto fail_alloc_desc;
+ return -ENOMEM;
}
/* Copy the required descriptors from the client now */
@@ -4388,60 +3963,57 @@ int ipa3_tag_process(struct ipa3_desc desc[],
}
/* NO-OP IC for ensuring that IPA pipeline is empty */
- reg_write_nop = kzalloc(sizeof(*reg_write_nop), GFP_KERNEL);
- if (!reg_write_nop) {
- IPAERR("no mem\n");
+ cmd_pyld = ipahal_construct_nop_imm_cmd(
+ false, IPAHAL_FULL_PIPELINE_CLEAR, false);
+ if (!cmd_pyld) {
+ IPAERR("failed to construct NOP imm cmd\n");
res = -ENOMEM;
- goto fail_free_desc;
+ goto fail_free_tag_desc;
}
-
- reg_write_nop->skip_pipeline_clear = 0;
- reg_write_nop->pipeline_clear_options = IPA_FULL_PIPELINE_CLEAR;
- reg_write_nop->value_mask = 0x0;
-
- tag_desc[desc_idx].opcode = IPA_REGISTER_WRITE;
- tag_desc[desc_idx].pyld = reg_write_nop;
- tag_desc[desc_idx].len = sizeof(*reg_write_nop);
+ tag_desc[desc_idx].opcode =
+ ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_REGISTER_WRITE);
+ tag_desc[desc_idx].pyld = cmd_pyld->data;
+ tag_desc[desc_idx].len = cmd_pyld->len;
tag_desc[desc_idx].type = IPA_IMM_CMD_DESC;
- tag_desc[desc_idx].callback = ipa3_tag_free_buf;
- tag_desc[desc_idx].user1 = reg_write_nop;
+ tag_desc[desc_idx].callback = ipa3_tag_destroy_imm;
+ tag_desc[desc_idx].user1 = cmd_pyld;
desc_idx++;
/* IP_PACKET_INIT IC for tag status to be sent to apps */
- pkt_init = kzalloc(sizeof(*pkt_init), GFP_KERNEL);
- if (!pkt_init) {
- IPAERR("failed to allocate memory\n");
+ pktinit_cmd.destination_pipe_index =
+ ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
+ cmd_pyld = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_IP_PACKET_INIT, &pktinit_cmd, false);
+ if (!cmd_pyld) {
+ IPAERR("failed to construct ip_packet_init imm cmd\n");
res = -ENOMEM;
- goto fail_alloc_pkt_init;
+ goto fail_free_desc;
}
-
- pkt_init->destination_pipe_index =
- ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
-
- tag_desc[desc_idx].opcode = IPA_IP_PACKET_INIT;
- tag_desc[desc_idx].pyld = pkt_init;
- tag_desc[desc_idx].len = sizeof(*pkt_init);
+ tag_desc[desc_idx].opcode =
+ ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_PACKET_INIT);
+ tag_desc[desc_idx].pyld = cmd_pyld->data;
+ tag_desc[desc_idx].len = cmd_pyld->len;
tag_desc[desc_idx].type = IPA_IMM_CMD_DESC;
- tag_desc[desc_idx].callback = ipa3_tag_free_buf;
- tag_desc[desc_idx].user1 = pkt_init;
+ tag_desc[desc_idx].callback = ipa3_tag_destroy_imm;
+ tag_desc[desc_idx].user1 = cmd_pyld;
desc_idx++;
/* status IC */
- status = kzalloc(sizeof(*status), GFP_KERNEL);
- if (!status) {
- IPAERR("no mem\n");
+ status.tag = IPA_COOKIE;
+ cmd_pyld = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_IP_PACKET_TAG_STATUS, &status, false);
+ if (!cmd_pyld) {
+ IPAERR("failed to construct ip_packet_tag_status imm cmd\n");
res = -ENOMEM;
goto fail_free_desc;
}
-
- status->tag = IPA_COOKIE;
-
- tag_desc[desc_idx].opcode = IPA_IP_PACKET_TAG_STATUS;
- tag_desc[desc_idx].pyld = status;
- tag_desc[desc_idx].len = sizeof(*status);
+ tag_desc[desc_idx].opcode =
+ ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_PACKET_TAG_STATUS);
+ tag_desc[desc_idx].pyld = cmd_pyld->data;
+ tag_desc[desc_idx].len = cmd_pyld->len;
tag_desc[desc_idx].type = IPA_IMM_CMD_DESC;
- tag_desc[desc_idx].callback = ipa3_tag_free_buf;
- tag_desc[desc_idx].user1 = status;
+ tag_desc[desc_idx].callback = ipa3_tag_destroy_imm;
+ tag_desc[desc_idx].user1 = cmd_pyld;
desc_idx++;
comp = kzalloc(sizeof(*comp), GFP_KERNEL);
@@ -4460,7 +4032,7 @@ int ipa3_tag_process(struct ipa3_desc desc[],
if (!dummy_skb) {
IPAERR("failed to allocate memory\n");
res = -ENOMEM;
- goto fail_free_skb;
+ goto fail_free_comp;
}
memcpy(skb_put(dummy_skb, sizeof(comp)), &comp, sizeof(comp));
@@ -4477,7 +4049,7 @@ int ipa3_tag_process(struct ipa3_desc desc[],
if (res) {
IPAERR("failed to send TAG packets %d\n", res);
res = -ENOMEM;
- goto fail_send;
+ goto fail_free_comp;
}
kfree(tag_desc);
tag_desc = NULL;
@@ -4502,26 +4074,24 @@ int ipa3_tag_process(struct ipa3_desc desc[],
return 0;
-fail_send:
- dev_kfree_skb_any(dummy_skb);
- desc_idx--;
-fail_free_skb:
+fail_free_comp:
kfree(comp);
fail_free_desc:
/*
* Free only the first descriptors allocated here.
- * [pkt_init, status, nop]
+ * [nop, pkt_init, status, dummy_skb]
* The user is responsible to free his allocations
* in case of failure.
* The min is required because we may fail during
* of the initial allocations above
*/
- for (i = 0; i < min(REQUIRED_TAG_PROCESS_DESCRIPTORS-1, desc_idx); i++)
- kfree(tag_desc[i].user1);
-
-fail_alloc_pkt_init:
+ for (i = descs_num;
+ i < min(REQUIRED_TAG_PROCESS_DESCRIPTORS, desc_idx); i++)
+ if (tag_desc[i].callback)
+ tag_desc[i].callback(tag_desc[i].user1,
+ tag_desc[i].user2);
+fail_free_tag_desc:
kfree(tag_desc);
-fail_alloc_desc:
return res;
}
@@ -4540,16 +4110,16 @@ static int ipa3_tag_generate_force_close_desc(struct ipa3_desc desc[],
int desc_size, int start_pipe, int end_pipe)
{
int i;
- u32 aggr_init;
+ struct ipa_ep_cfg_aggr ep_aggr;
int desc_idx = 0;
int res;
- struct ipa3_register_write *reg_write_agg_close;
+ struct ipahal_imm_cmd_register_write reg_write_agg_close;
+ struct ipahal_imm_cmd_pyld *cmd_pyld;
+ struct ipahal_reg_valmask valmask;
for (i = start_pipe; i < end_pipe; i++) {
- aggr_init = ipa_read_reg(ipa3_ctx->mmio,
- IPA_ENDP_INIT_AGGR_N_OFST_v3_0(i));
- if (((aggr_init & IPA_ENDP_INIT_AGGR_N_AGGR_EN_BMSK) >>
- IPA_ENDP_INIT_AGGR_N_AGGR_EN_SHFT) != IPA_ENABLE_AGGR)
+ ipahal_read_reg_n_fields(IPA_ENDP_INIT_AGGR_n, i, &ep_aggr);
+ if (!ep_aggr.aggr_en)
continue;
IPADBG("Force close ep: %d\n", i);
if (desc_idx + 1 > desc_size) {
@@ -4558,32 +4128,29 @@ static int ipa3_tag_generate_force_close_desc(struct ipa3_desc desc[],
goto fail_no_desc;
}
- reg_write_agg_close = kzalloc(sizeof(*reg_write_agg_close),
- GFP_KERNEL);
- if (!reg_write_agg_close) {
- IPAERR("no mem\n");
+ reg_write_agg_close.skip_pipeline_clear = false;
+ reg_write_agg_close.pipeline_clear_options =
+ IPAHAL_FULL_PIPELINE_CLEAR;
+ reg_write_agg_close.offset =
+ ipahal_get_reg_ofst(IPA_AGGR_FORCE_CLOSE);
+ ipahal_get_aggr_force_close_valmask(1<<i, &valmask);
+ reg_write_agg_close.value = valmask.val;
+ reg_write_agg_close.value_mask = valmask.mask;
+ cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
+ &reg_write_agg_close, false);
+ if (!cmd_pyld) {
+ IPAERR("failed to construct register_write imm cmd\n");
res = -ENOMEM;
goto fail_alloc_reg_write_agg_close;
}
- reg_write_agg_close->skip_pipeline_clear = 0;
- reg_write_agg_close->pipeline_clear_options =
- IPA_FULL_PIPELINE_CLEAR;
-
- reg_write_agg_close->offset = IPA_AGGR_FORCE_CLOSE_OFST;
- IPA_SETFIELD_IN_REG(reg_write_agg_close->value, 1 << i,
- IPA_AGGR_FORCE_CLOSE_OFST_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT,
- IPA_AGGR_FORCE_CLOSE_OFST_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK);
- reg_write_agg_close->value_mask =
- IPA_AGGR_FORCE_CLOSE_OFST_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK <<
- IPA_AGGR_FORCE_CLOSE_OFST_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT;
-
- desc[desc_idx].opcode = IPA_REGISTER_WRITE;
- desc[desc_idx].pyld = reg_write_agg_close;
- desc[desc_idx].len = sizeof(*reg_write_agg_close);
+ desc[desc_idx].opcode =
+ ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_REGISTER_WRITE);
+ desc[desc_idx].pyld = cmd_pyld->data;
+ desc[desc_idx].len = cmd_pyld->len;
desc[desc_idx].type = IPA_IMM_CMD_DESC;
- desc[desc_idx].callback = ipa3_tag_free_buf;
- desc[desc_idx].user1 = reg_write_agg_close;
+ desc[desc_idx].callback = ipa3_tag_destroy_imm;
+ desc[desc_idx].user1 = cmd_pyld;
desc_idx++;
}
@@ -4591,7 +4158,9 @@ static int ipa3_tag_generate_force_close_desc(struct ipa3_desc desc[],
fail_alloc_reg_write_agg_close:
for (i = 0; i < desc_idx; i++)
- kfree(desc[desc_idx].user1);
+ if (desc[desc_idx].callback)
+ desc[desc_idx].callback(desc[desc_idx].user1,
+ desc[desc_idx].user2);
fail_no_desc:
return res;
}
@@ -4687,7 +4256,7 @@ bool ipa3_is_client_handle_valid(u32 clnt_hdl)
void ipa3_proxy_clk_unvote(void)
{
if (ipa3_is_ready() && ipa3_ctx->q6_proxy_clk_vote_valid) {
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_SPECIAL("PROXY_CLK_VOTE");
ipa3_ctx->q6_proxy_clk_vote_valid = false;
}
}
@@ -4700,7 +4269,7 @@ void ipa3_proxy_clk_unvote(void)
void ipa3_proxy_clk_vote(void)
{
if (ipa3_is_ready() && !ipa3_ctx->q6_proxy_clk_vote_valid) {
- ipa3_inc_client_enable_clks();
+ IPA_ACTIVE_CLIENTS_INC_SPECIAL("PROXY_CLK_VOTE");
ipa3_ctx->q6_proxy_clk_vote_valid = true;
}
}
@@ -4751,7 +4320,7 @@ enum ipa_transport_type ipa3_get_transport_type(void)
u32 ipa3_get_num_pipes(void)
{
- return ipa_read_reg(ipa3_ctx->mmio, IPA_ENABLED_PIPES_OFST);
+ return ipahal_read_reg(IPA_ENABLED_PIPES);
}
/**
@@ -4762,19 +4331,18 @@ u32 ipa3_get_num_pipes(void)
int ipa3_disable_apps_wan_cons_deaggr(uint32_t agg_size, uint32_t agg_count)
{
int res = -1;
+ u32 limit;
/* checking if IPA-HW can support */
- if ((agg_size >> 10) >
- IPA_AGGR_BYTE_LIMIT) {
- IPAERR("IPA-AGG byte limit %d\n",
- IPA_AGGR_BYTE_LIMIT);
+ limit = ipahal_aggr_get_max_byte_limit();
+ if ((agg_size >> 10) > limit) {
+ IPAERR("IPA-AGG byte limit %d\n", limit);
IPAERR("exceed aggr_byte_limit\n");
return res;
- }
- if (agg_count >
- IPA_AGGR_PKT_LIMIT) {
- IPAERR("IPA-AGG pkt limit %d\n",
- IPA_AGGR_PKT_LIMIT);
+ }
+ limit = ipahal_aggr_get_max_pkt_limit();
+ if (agg_count > limit) {
+ IPAERR("IPA-AGG pkt limit %d\n", limit);
IPAERR("exceed aggr_pkt_limit\n");
return res;
}
@@ -4866,6 +4434,7 @@ int ipa3_bind_api_controller(enum ipa_hw_type ipa_hw_type,
api_ctrl->ipa_get_smem_restr_bytes = ipa3_get_smem_restr_bytes;
api_ctrl->ipa_uc_wdi_get_dbpa = ipa3_uc_wdi_get_dbpa;
api_ctrl->ipa_uc_reg_rdyCB = ipa3_uc_reg_rdyCB;
+ api_ctrl->ipa_uc_dereg_rdyCB = ipa3_uc_dereg_rdyCB;
api_ctrl->ipa_rm_create_resource = ipa3_rm_create_resource;
api_ctrl->ipa_rm_delete_resource = ipa3_rm_delete_resource;
api_ctrl->ipa_rm_register = ipa3_rm_register;
@@ -4935,12 +4504,6 @@ int ipa3_bind_api_controller(enum ipa_hw_type ipa_hw_type,
api_ctrl->ipa_create_wdi_mapping = ipa3_create_wdi_mapping;
api_ctrl->ipa_get_gsi_ep_info = ipa3_get_gsi_ep_info;
api_ctrl->ipa_stop_gsi_channel = ipa3_stop_gsi_channel;
- api_ctrl->ipa_usb_init_teth_prot = ipa3_usb_init_teth_prot;
- api_ctrl->ipa_usb_xdci_connect = ipa3_usb_xdci_connect;
- api_ctrl->ipa_usb_xdci_disconnect = ipa3_usb_xdci_disconnect;
- api_ctrl->ipa_usb_deinit_teth_prot = ipa3_usb_deinit_teth_prot;
- api_ctrl->ipa_usb_xdci_suspend = ipa3_usb_xdci_suspend;
- api_ctrl->ipa_usb_xdci_resume = ipa3_usb_xdci_resume;
api_ctrl->ipa_register_ipa_ready_cb = ipa3_register_ipa_ready_cb;
return 0;
@@ -4973,24 +4536,28 @@ bool ipa_is_modem_pipe(int pipe_idx)
}
static void ipa3_write_rsrc_grp_type_reg(int group_index,
- enum ipa_rsrc_grp_type_src n, bool src, u32 val) {
+ enum ipa_rsrc_grp_type_src n, bool src,
+ struct ipahal_reg_rsrc_grp_cfg *val) {
if (src) {
switch (group_index) {
case IPA_GROUP_UL:
case IPA_GROUP_DL:
- ipa_write_reg(ipa3_ctx->mmio,
- IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n(n), val);
+ ipahal_write_reg_n_fields(
+ IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n,
+ n, val);
break;
case IPA_GROUP_DIAG:
case IPA_GROUP_DMA:
- ipa_write_reg(ipa3_ctx->mmio,
- IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n(n), val);
+ ipahal_write_reg_n_fields(
+ IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n,
+ n, val);
break;
case IPA_GROUP_Q6ZIP:
case IPA_GROUP_UC_RX_Q:
- ipa_write_reg(ipa3_ctx->mmio,
- IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n(n), val);
+ ipahal_write_reg_n_fields(
+ IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n,
+ n, val);
break;
default:
IPAERR(
@@ -5002,18 +4569,21 @@ static void ipa3_write_rsrc_grp_type_reg(int group_index,
switch (group_index) {
case IPA_GROUP_UL:
case IPA_GROUP_DL:
- ipa_write_reg(ipa3_ctx->mmio,
- IPA_DST_RSRC_GRP_01_RSRC_TYPE_n(n), val);
+ ipahal_write_reg_n_fields(
+ IPA_DST_RSRC_GRP_01_RSRC_TYPE_n,
+ n, val);
break;
case IPA_GROUP_DIAG:
case IPA_GROUP_DMA:
- ipa_write_reg(ipa3_ctx->mmio,
- IPA_DST_RSRC_GRP_23_RSRC_TYPE_n(n), val);
+ ipahal_write_reg_n_fields(
+ IPA_DST_RSRC_GRP_23_RSRC_TYPE_n,
+ n, val);
break;
case IPA_GROUP_Q6ZIP_GENERAL:
case IPA_GROUP_Q6ZIP_ENGINE:
- ipa_write_reg(ipa3_ctx->mmio,
- IPA_DST_RSRC_GRP_45_RSRC_TYPE_n(n), val);
+ ipahal_write_reg_n_fields(
+ IPA_DST_RSRC_GRP_45_RSRC_TYPE_n,
+ n, val);
break;
default:
IPAERR(
@@ -5027,8 +4597,7 @@ static void ipa3_write_rsrc_grp_type_reg(int group_index,
static void ipa3_configure_rx_hps_clients(int depth, bool min)
{
int i;
- int val;
- u32 reg;
+ struct ipahal_reg_rx_hps_clients val;
/*
* depth 0 contains 4 first clients out of 6
@@ -5036,28 +4605,24 @@ static void ipa3_configure_rx_hps_clients(int depth, bool min)
*/
for (i = 0 ; i < (depth ? 2 : 4) ; i++) {
if (min)
- val = ipa3_rsrc_rx_grp_config
+ val.client_minmax[i] =
+ ipa3_rsrc_rx_grp_config
[IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ]
[!depth ? i : 4 + i].min;
else
- val = ipa3_rsrc_rx_grp_config
+ val.client_minmax[i] =
+ ipa3_rsrc_rx_grp_config
[IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ]
[!depth ? i : 4 + i].max;
-
- IPA_SETFIELD_IN_REG(reg, val,
- IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(i),
- IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK(i));
}
if (depth) {
- ipa_write_reg(ipa3_ctx->mmio,
- min ? IPA_RX_HPS_CLIENTS_MIN_DEPTH_1 :
- IPA_RX_HPS_CLIENTS_MAX_DEPTH_1,
- reg);
+ ipahal_write_reg_fields(min ? IPA_RX_HPS_CLIENTS_MIN_DEPTH_1 :
+ IPA_RX_HPS_CLIENTS_MAX_DEPTH_1,
+ &val);
} else {
- ipa_write_reg(ipa3_ctx->mmio,
- min ? IPA_RX_HPS_CLIENTS_MIN_DEPTH_0 :
- IPA_RX_HPS_CLIENTS_MAX_DEPTH_0,
- reg);
+ ipahal_write_reg_fields(min ? IPA_RX_HPS_CLIENTS_MIN_DEPTH_0 :
+ IPA_RX_HPS_CLIENTS_MAX_DEPTH_0,
+ &val);
}
}
@@ -5065,31 +4630,18 @@ void ipa3_set_resorce_groups_min_max_limits(void)
{
int i;
int j;
- u32 reg;
+ struct ipahal_reg_rsrc_grp_cfg val;
IPADBG("ENTER\n");
IPADBG("Assign source rsrc groups min-max limits\n");
for (i = 0; i < IPA_RSRC_GRP_TYPE_SRC_MAX; i++) {
for (j = 0; j < IPA_GROUP_MAX; j = j + 2) {
- reg = 0;
- IPA_SETFIELD_IN_REG(reg,
- ipa3_rsrc_src_grp_config[i][j].min,
- IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_SHFT,
- IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_BMSK);
- IPA_SETFIELD_IN_REG(reg,
- ipa3_rsrc_src_grp_config[i][j].max,
- IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_SHFT,
- IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_BMSK);
- IPA_SETFIELD_IN_REG(reg,
- ipa3_rsrc_src_grp_config[i][j + 1].min,
- IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_SHFT,
- IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_BMSK);
- IPA_SETFIELD_IN_REG(reg,
- ipa3_rsrc_src_grp_config[i][j + 1].max,
- IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_SHFT,
- IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_BMSK);
- ipa3_write_rsrc_grp_type_reg(j, i, true, reg);
+ val.x_min = ipa3_rsrc_src_grp_config[i][j].min;
+ val.x_max = ipa3_rsrc_src_grp_config[i][j].max;
+ val.y_min = ipa3_rsrc_src_grp_config[i][j + 1].min;
+ val.y_max = ipa3_rsrc_src_grp_config[i][j + 1].max;
+ ipa3_write_rsrc_grp_type_reg(j, i, true, &val);
}
}
@@ -5097,24 +4649,11 @@ void ipa3_set_resorce_groups_min_max_limits(void)
for (i = 0; i < IPA_RSRC_GRP_TYPE_DST_MAX; i++) {
for (j = 0; j < IPA_GROUP_MAX; j = j + 2) {
- reg = 0;
- IPA_SETFIELD_IN_REG(reg,
- ipa3_rsrc_dst_grp_config[i][j].min,
- IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_SHFT,
- IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_BMSK);
- IPA_SETFIELD_IN_REG(reg,
- ipa3_rsrc_dst_grp_config[i][j].max,
- IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_SHFT,
- IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_BMSK);
- IPA_SETFIELD_IN_REG(reg,
- ipa3_rsrc_dst_grp_config[i][j + 1].min,
- IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_SHFT,
- IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_BMSK);
- IPA_SETFIELD_IN_REG(reg,
- ipa3_rsrc_dst_grp_config[i][j + 1].max,
- IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_SHFT,
- IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_BMSK);
- ipa3_write_rsrc_grp_type_reg(j, i, false, reg);
+ val.x_min = ipa3_rsrc_dst_grp_config[i][j].min;
+ val.x_max = ipa3_rsrc_dst_grp_config[i][j].max;
+ val.y_min = ipa3_rsrc_dst_grp_config[i][j + 1].min;
+ val.y_max = ipa3_rsrc_dst_grp_config[i][j + 1].max;
+ ipa3_write_rsrc_grp_type_reg(j, i, false, &val);
}
}
@@ -5196,7 +4735,8 @@ void ipa3_suspend_apps_pipes(bool suspend)
int ipa3_inject_dma_task_for_gsi(void)
{
static struct ipa3_mem_buffer mem = {0};
- static struct ipa3_hw_imm_cmd_dma_task_32b_addr cmd = {0};
+ struct ipahal_imm_cmd_dma_task_32b_addr cmd = {0};
+ static struct ipahal_imm_cmd_pyld *cmd_pyld;
struct ipa3_desc desc = {0};
/* allocate the memory only for the very first time */
@@ -5211,16 +4751,24 @@ int ipa3_inject_dma_task_for_gsi(void)
IPAERR("no mem\n");
return -EFAULT;
}
-
+ }
+ if (!cmd_pyld) {
cmd.flsh = 1;
cmd.size1 = mem.size;
cmd.addr1 = mem.phys_base;
cmd.packet_size = mem.size;
+ cmd_pyld = ipahal_construct_imm_cmd(
+ IPA_IMM_CMD_DMA_TASK_32B_ADDR, &cmd, false);
+ if (!cmd_pyld) {
+ IPAERR("failed to construct dma_task_32b_addr cmd\n");
+ return -EFAULT;
+ }
}
- desc.opcode = IPA_DMA_TASK_32B_ADDR(1);
- desc.pyld = &cmd;
- desc.len = sizeof(cmd);
+ desc.opcode = ipahal_imm_cmd_get_opcode_param(
+ IPA_IMM_CMD_DMA_TASK_32B_ADDR, 1);
+ desc.pyld = cmd_pyld->data;
+ desc.len = cmd_pyld->len;
desc.type = IPA_IMM_CMD_DESC;
IPADBG("sending 1B packet to IPA\n");
@@ -5256,7 +4804,7 @@ int ipa3_stop_gsi_channel(u32 clnt_hdl)
ep = &ipa3_ctx->ep[clnt_hdl];
- ipa3_inc_client_enable_clks();
+ IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
memset(&mem, 0, sizeof(mem));
@@ -5288,7 +4836,7 @@ int ipa3_stop_gsi_channel(u32 clnt_hdl)
IPAERR("Failed to stop GSI channel with retries\n");
res = -EFAULT;
end_sequence:
- ipa3_dec_client_disable_clks();
+ IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
return res;
}
@@ -5318,7 +4866,7 @@ int ipa3_calc_extra_wrd_bytes(const struct ipa_ipfltri_rule_eq *attrib)
if (attrib->ihl_offset_eq_16_present)
num++;
- IPADBG("extra bytes number %d\n", num);
+ IPADBG_LOW("extra bytes number %d\n", num);
return num;
}
@@ -5567,16 +5115,14 @@ int ipa3_load_fws(const struct firmware *firmware)
/*
* The ELF program header will contain the starting
* address to which the firmware needs to copied.
- * TODO: Shall we rely on that, or rely on the order
- * of which the FWs reside in the ELF, and use
- * registers/defines in here?
*/
phdr = (struct elf32_phdr *)elf_phdr_ptr;
/*
* p_addr will contain the physical address to which the
* FW needs to be loaded.
- * p_memsz will contain the size of the FW image.
+ * p_memsz will contain the size of the IRAM.
+ * p_filesz will contain the size of the FW image.
*/
fw_mem_base = ioremap(phdr->p_paddr, phdr->p_memsz);
if (!fw_mem_base) {
@@ -5585,6 +5131,9 @@ int ipa3_load_fws(const struct firmware *firmware)
return -ENOMEM;
}
+ /* Set the entire region to 0s */
+ memset(fw_mem_base, 0, phdr->p_memsz);
+
/*
* p_offset will contain and absolute offset from the beginning
* of the ELF file.
@@ -5598,7 +5147,8 @@ int ipa3_load_fws(const struct firmware *firmware)
return -EFAULT;
}
- for (index = 0; index < phdr->p_memsz/sizeof(uint32_t);
+ /* Write the FW */
+ for (index = 0; index < phdr->p_filesz/sizeof(uint32_t);
index++) {
writel_relaxed(*elf_data_ptr, &fw_mem_base[index]);
elf_data_ptr++;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/Makefile b/drivers/platform/msm/ipa/ipa_v3/ipahal/Makefile
new file mode 100644
index 000000000000..f927f68cfeb6
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_IPA3) += ipa_hal.o
+
+ipa_hal-y := ipahal.o ipahal_reg.o
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
new file mode 100644
index 000000000000..0d8b3c28c113
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
@@ -0,0 +1,768 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "ipahal.h"
+#include "ipahal_i.h"
+#include "ipahal_reg_i.h"
+
+static int ipahal_imm_cmd_init(enum ipa_hw_type ipa_hw_type);
+
+struct ipahal_context *ipahal_ctx;
+
+static const char *ipahal_imm_cmd_name_to_str[IPA_IMM_CMD_MAX] = {
+ __stringify(IPA_IMM_CMD_IP_V4_FILTER_INIT),
+ __stringify(IPA_IMM_CMD_IP_V6_FILTER_INIT),
+ __stringify(IPA_IMM_CMD_IP_V4_NAT_INIT),
+ __stringify(IPA_IMM_CMD_IP_V4_ROUTING_INIT),
+ __stringify(IPA_IMM_CMD_IP_V6_ROUTING_INIT),
+ __stringify(IPA_IMM_CMD_HDR_INIT_LOCAL),
+ __stringify(IPA_IMM_CMD_HDR_INIT_SYSTEM),
+ __stringify(IPA_IMM_CMD_REGISTER_WRITE),
+ __stringify(IPA_IMM_CMD_NAT_DMA),
+ __stringify(IPA_IMM_CMD_IP_PACKET_INIT),
+ __stringify(IPA_IMM_CMD_DMA_SHARED_MEM),
+ __stringify(IPA_IMM_CMD_IP_PACKET_TAG_STATUS),
+ __stringify(IPA_IMM_CMD_DMA_TASK_32B_ADDR),
+};
+
+#define IPAHAL_MEM_ALLOC(__size, __is_atomic_ctx) \
+ (kzalloc((__size), ((__is_atomic_ctx)?GFP_ATOMIC:GFP_KERNEL)))
+
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_dma_task_32b_addr(
+ enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+ struct ipahal_imm_cmd_pyld *pyld;
+ struct ipa_imm_cmd_hw_dma_task_32b_addr *data;
+ struct ipahal_imm_cmd_dma_task_32b_addr *dma_params =
+ (struct ipahal_imm_cmd_dma_task_32b_addr *)params;
+
+ pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+ if (unlikely(!pyld)) {
+ IPAHAL_ERR("kzalloc err\n");
+ return pyld;
+ }
+ pyld->len = sizeof(*data);
+ data = (struct ipa_imm_cmd_hw_dma_task_32b_addr *)pyld->data;
+
+ if (unlikely(dma_params->size1 & ~0xFFFF)) {
+ IPAHAL_ERR("Size1 is bigger than 16bit width 0x%x\n",
+ dma_params->size1);
+ WARN_ON(1);
+ }
+ if (unlikely(dma_params->packet_size & ~0xFFFF)) {
+ IPAHAL_ERR("Pkt size is bigger than 16bit width 0x%x\n",
+ dma_params->packet_size);
+ WARN_ON(1);
+ }
+ data->cmplt = dma_params->cmplt ? 1 : 0;
+ data->eof = dma_params->eof ? 1 : 0;
+ data->flsh = dma_params->flsh ? 1 : 0;
+ data->lock = dma_params->lock ? 1 : 0;
+ data->unlock = dma_params->unlock ? 1 : 0;
+ data->size1 = dma_params->size1;
+ data->addr1 = dma_params->addr1;
+ data->packet_size = dma_params->packet_size;
+
+ return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_packet_tag_status(
+ enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+ struct ipahal_imm_cmd_pyld *pyld;
+ struct ipa_imm_cmd_hw_ip_packet_tag_status *data;
+ struct ipahal_imm_cmd_ip_packet_tag_status *tag_params =
+ (struct ipahal_imm_cmd_ip_packet_tag_status *)params;
+
+ pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+ if (unlikely(!pyld)) {
+ IPAHAL_ERR("kzalloc err\n");
+ return pyld;
+ }
+ pyld->len = sizeof(*data);
+ data = (struct ipa_imm_cmd_hw_ip_packet_tag_status *)pyld->data;
+
+ if (unlikely(tag_params->tag & ~0xFFFFFFFFFFFF)) {
+ IPAHAL_ERR("tag is bigger than 48bit width 0x%llx\n",
+ tag_params->tag);
+ WARN_ON(1);
+ }
+ data->tag = tag_params->tag;
+
+ return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_dma_shared_mem(
+ enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+ struct ipahal_imm_cmd_pyld *pyld;
+ struct ipa_imm_cmd_hw_dma_shared_mem *data;
+ struct ipahal_imm_cmd_dma_shared_mem *mem_params =
+ (struct ipahal_imm_cmd_dma_shared_mem *)params;
+
+ pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+ if (unlikely(!pyld)) {
+ IPAHAL_ERR("kzalloc err\n");
+ return pyld;
+ }
+ pyld->len = sizeof(*data);
+ data = (struct ipa_imm_cmd_hw_dma_shared_mem *)pyld->data;
+
+ if (unlikely(mem_params->size & ~0xFFFF)) {
+ IPAHAL_ERR("Size is bigger than 16bit width 0x%x\n",
+ mem_params->size);
+ WARN_ON(1);
+ }
+ if (unlikely(mem_params->local_addr & ~0xFFFF)) {
+ IPAHAL_ERR("Local addr is bigger than 16bit width 0x%x\n",
+ mem_params->local_addr);
+ WARN_ON(1);
+ }
+ data->direction = mem_params->is_read ? 1 : 0;
+ data->size = mem_params->size;
+ data->local_addr = mem_params->local_addr;
+ data->system_addr = mem_params->system_addr;
+ data->skip_pipeline_clear = mem_params->skip_pipeline_clear ? 1 : 0;
+ switch (mem_params->pipeline_clear_options) {
+ case IPAHAL_HPS_CLEAR:
+ data->pipeline_clear_options = 0;
+ break;
+ case IPAHAL_SRC_GRP_CLEAR:
+ data->pipeline_clear_options = 1;
+ break;
+ case IPAHAL_FULL_PIPELINE_CLEAR:
+ data->pipeline_clear_options = 2;
+ break;
+ default:
+ IPAHAL_ERR("unsupported pipline clear option %d\n",
+ mem_params->pipeline_clear_options);
+ WARN_ON(1);
+ };
+
+ return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_register_write(
+ enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+ struct ipahal_imm_cmd_pyld *pyld;
+ struct ipa_imm_cmd_hw_register_write *data;
+ struct ipahal_imm_cmd_register_write *regwrt_params =
+ (struct ipahal_imm_cmd_register_write *)params;
+
+ pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+ if (unlikely(!pyld)) {
+ IPAHAL_ERR("kzalloc err\n");
+ return pyld;
+ }
+ pyld->len = sizeof(*data);
+ data = (struct ipa_imm_cmd_hw_register_write *)pyld->data;
+
+ if (unlikely(regwrt_params->offset & ~0xFFFF)) {
+ IPAHAL_ERR("Offset is bigger than 16bit width 0x%x\n",
+ regwrt_params->offset);
+ WARN_ON(1);
+ }
+ data->offset = regwrt_params->offset;
+ data->value = regwrt_params->value;
+ data->value_mask = regwrt_params->value_mask;
+
+ data->skip_pipeline_clear = regwrt_params->skip_pipeline_clear ? 1 : 0;
+ switch (regwrt_params->pipeline_clear_options) {
+ case IPAHAL_HPS_CLEAR:
+ data->pipeline_clear_options = 0;
+ break;
+ case IPAHAL_SRC_GRP_CLEAR:
+ data->pipeline_clear_options = 1;
+ break;
+ case IPAHAL_FULL_PIPELINE_CLEAR:
+ data->pipeline_clear_options = 2;
+ break;
+ default:
+ IPAHAL_ERR("unsupported pipline clear option %d\n",
+ regwrt_params->pipeline_clear_options);
+ WARN_ON(1);
+ };
+
+ return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_packet_init(
+ enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+ struct ipahal_imm_cmd_pyld *pyld;
+ struct ipa_imm_cmd_hw_ip_packet_init *data;
+ struct ipahal_imm_cmd_ip_packet_init *pktinit_params =
+ (struct ipahal_imm_cmd_ip_packet_init *)params;
+
+ pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+ if (unlikely(!pyld)) {
+ IPAHAL_ERR("kzalloc err\n");
+ return pyld;
+ }
+ pyld->len = sizeof(*data);
+ data = (struct ipa_imm_cmd_hw_ip_packet_init *)pyld->data;
+
+ if (unlikely(pktinit_params->destination_pipe_index & ~0x1F)) {
+ IPAHAL_ERR("Dst pipe idx is bigger than 5bit width 0x%x\n",
+ pktinit_params->destination_pipe_index);
+ WARN_ON(1);
+ }
+ data->destination_pipe_index = pktinit_params->destination_pipe_index;
+
+ return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_nat_dma(
+ enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+ struct ipahal_imm_cmd_pyld *pyld;
+ struct ipa_imm_cmd_hw_nat_dma *data;
+ struct ipahal_imm_cmd_nat_dma *nat_params =
+ (struct ipahal_imm_cmd_nat_dma *)params;
+
+ pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+ if (unlikely(!pyld)) {
+ IPAHAL_ERR("kzalloc err\n");
+ return pyld;
+ }
+ pyld->len = sizeof(*data);
+ data = (struct ipa_imm_cmd_hw_nat_dma *)pyld->data;
+
+ data->table_index = nat_params->table_index;
+ data->base_addr = nat_params->base_addr;
+ data->offset = nat_params->offset;
+ data->data = nat_params->data;
+
+ return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_hdr_init_system(
+ enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+ struct ipahal_imm_cmd_pyld *pyld;
+ struct ipa_imm_cmd_hw_hdr_init_system *data;
+ struct ipahal_imm_cmd_hdr_init_system *syshdr_params =
+ (struct ipahal_imm_cmd_hdr_init_system *)params;
+
+ pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+ if (unlikely(!pyld)) {
+ IPAHAL_ERR("kzalloc err\n");
+ return pyld;
+ }
+ pyld->len = sizeof(*data);
+ data = (struct ipa_imm_cmd_hw_hdr_init_system *)pyld->data;
+
+ data->hdr_table_addr = syshdr_params->hdr_table_addr;
+
+ return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_hdr_init_local(
+ enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+ struct ipahal_imm_cmd_pyld *pyld;
+ struct ipa_imm_cmd_hw_hdr_init_local *data;
+ struct ipahal_imm_cmd_hdr_init_local *lclhdr_params =
+ (struct ipahal_imm_cmd_hdr_init_local *)params;
+
+ pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+ if (unlikely(!pyld)) {
+ IPAHAL_ERR("kzalloc err\n");
+ return pyld;
+ }
+ pyld->len = sizeof(*data);
+ data = (struct ipa_imm_cmd_hw_hdr_init_local *)pyld->data;
+
+ if (unlikely(lclhdr_params->size_hdr_table & ~0xFFF)) {
+ IPAHAL_ERR("Hdr tble size is bigger than 12bit width 0x%x\n",
+ lclhdr_params->size_hdr_table);
+ WARN_ON(1);
+ }
+ data->hdr_table_addr = lclhdr_params->hdr_table_addr;
+ data->size_hdr_table = lclhdr_params->size_hdr_table;
+ data->hdr_addr = lclhdr_params->hdr_addr;
+
+ return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_v6_routing_init(
+ enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+ struct ipahal_imm_cmd_pyld *pyld;
+ struct ipa_imm_cmd_hw_ip_v6_routing_init *data;
+ struct ipahal_imm_cmd_ip_v6_routing_init *rt6_params =
+ (struct ipahal_imm_cmd_ip_v6_routing_init *)params;
+
+ pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+ if (unlikely(!pyld)) {
+ IPAHAL_ERR("kzalloc err\n");
+ return pyld;
+ }
+ pyld->len = sizeof(*data);
+ data = (struct ipa_imm_cmd_hw_ip_v6_routing_init *)pyld->data;
+
+ data->hash_rules_addr = rt6_params->hash_rules_addr;
+ data->hash_rules_size = rt6_params->hash_rules_size;
+ data->hash_local_addr = rt6_params->hash_local_addr;
+ data->nhash_rules_addr = rt6_params->nhash_rules_addr;
+ data->nhash_rules_size = rt6_params->nhash_rules_size;
+ data->nhash_local_addr = rt6_params->nhash_local_addr;
+
+ return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_v4_routing_init(
+ enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+ struct ipahal_imm_cmd_pyld *pyld;
+ struct ipa_imm_cmd_hw_ip_v4_routing_init *data;
+ struct ipahal_imm_cmd_ip_v4_routing_init *rt4_params =
+ (struct ipahal_imm_cmd_ip_v4_routing_init *)params;
+
+ pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+ if (unlikely(!pyld)) {
+ IPAHAL_ERR("kzalloc err\n");
+ return pyld;
+ }
+ pyld->len = sizeof(*data);
+ data = (struct ipa_imm_cmd_hw_ip_v4_routing_init *)pyld->data;
+
+ data->hash_rules_addr = rt4_params->hash_rules_addr;
+ data->hash_rules_size = rt4_params->hash_rules_size;
+ data->hash_local_addr = rt4_params->hash_local_addr;
+ data->nhash_rules_addr = rt4_params->nhash_rules_addr;
+ data->nhash_rules_size = rt4_params->nhash_rules_size;
+ data->nhash_local_addr = rt4_params->nhash_local_addr;
+
+ return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_v4_nat_init(
+ enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+ struct ipahal_imm_cmd_pyld *pyld;
+ struct ipa_imm_cmd_hw_ip_v4_nat_init *data;
+ struct ipahal_imm_cmd_ip_v4_nat_init *nat4_params =
+ (struct ipahal_imm_cmd_ip_v4_nat_init *)params;
+
+ pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+ if (unlikely(!pyld)) {
+ IPAHAL_ERR("kzalloc err\n");
+ return pyld;
+ }
+ pyld->len = sizeof(*data);
+ data = (struct ipa_imm_cmd_hw_ip_v4_nat_init *)pyld->data;
+
+ data->ipv4_rules_addr = nat4_params->ipv4_rules_addr;
+ data->ipv4_expansion_rules_addr =
+ nat4_params->ipv4_expansion_rules_addr;
+ data->index_table_addr = nat4_params->index_table_addr;
+ data->index_table_expansion_addr =
+ nat4_params->index_table_expansion_addr;
+ data->table_index = nat4_params->table_index;
+ data->ipv4_rules_addr_type =
+ nat4_params->ipv4_rules_addr_shared ? 1 : 0;
+ data->ipv4_expansion_rules_addr_type =
+ nat4_params->ipv4_expansion_rules_addr_shared ? 1 : 0;
+ data->index_table_addr_type =
+ nat4_params->index_table_addr_shared ? 1 : 0;
+ data->index_table_expansion_addr_type =
+ nat4_params->index_table_expansion_addr_shared ? 1 : 0;
+ data->size_base_tables = nat4_params->size_base_tables;
+ data->size_expansion_tables = nat4_params->size_expansion_tables;
+ data->public_ip_addr = nat4_params->public_ip_addr;
+
+ return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_v6_filter_init(
+ enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+ struct ipahal_imm_cmd_pyld *pyld;
+ struct ipa_imm_cmd_hw_ip_v6_filter_init *data;
+ struct ipahal_imm_cmd_ip_v6_filter_init *flt6_params =
+ (struct ipahal_imm_cmd_ip_v6_filter_init *)params;
+
+ pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+ if (unlikely(!pyld)) {
+ IPAHAL_ERR("kzalloc err\n");
+ return pyld;
+ }
+ pyld->len = sizeof(*data);
+ data = (struct ipa_imm_cmd_hw_ip_v6_filter_init *)pyld->data;
+
+ data->hash_rules_addr = flt6_params->hash_rules_addr;
+ data->hash_rules_size = flt6_params->hash_rules_size;
+ data->hash_local_addr = flt6_params->hash_local_addr;
+ data->nhash_rules_addr = flt6_params->nhash_rules_addr;
+ data->nhash_rules_size = flt6_params->nhash_rules_size;
+ data->nhash_local_addr = flt6_params->nhash_local_addr;
+
+ return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_v4_filter_init(
+ enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+ struct ipahal_imm_cmd_pyld *pyld;
+ struct ipa_imm_cmd_hw_ip_v4_filter_init *data;
+ struct ipahal_imm_cmd_ip_v4_filter_init *flt4_params =
+ (struct ipahal_imm_cmd_ip_v4_filter_init *)params;
+
+ pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+ if (unlikely(!pyld)) {
+ IPAHAL_ERR("kzalloc err\n");
+ return pyld;
+ }
+ pyld->len = sizeof(*data);
+ data = (struct ipa_imm_cmd_hw_ip_v4_filter_init *)pyld->data;
+
+ data->hash_rules_addr = flt4_params->hash_rules_addr;
+ data->hash_rules_size = flt4_params->hash_rules_size;
+ data->hash_local_addr = flt4_params->hash_local_addr;
+ data->nhash_rules_addr = flt4_params->nhash_rules_addr;
+ data->nhash_rules_size = flt4_params->nhash_rules_size;
+ data->nhash_local_addr = flt4_params->nhash_local_addr;
+
+ return pyld;
+}
+
+/*
+ * struct ipahal_imm_cmd_obj - immediate command H/W information for
+ * specific IPA version
+ * @construct - CB to construct imm command payload from abstracted structure
+ * @opcode - Immediate command OpCode
+ * @dyn_op - Does this command supports Dynamic opcode?
+ * Some commands opcode are dynamic where the part of the opcode is
+ * supplied as param. This flag indicates if the specific command supports it
+ * or not.
+ */
+struct ipahal_imm_cmd_obj {
+ struct ipahal_imm_cmd_pyld *(*construct)(enum ipahal_imm_cmd_name cmd,
+ const void *params, bool is_atomic_ctx);
+ u16 opcode;
+ bool dyn_op;
+};
+
+/*
+ * This table contains the info regard each immediate command for IPAv3
+ * and later.
+ * Information like: opcode and construct functions.
+ * All the information on the IMM on IPAv3 are statically defined below.
+ * If information is missing regard some IMM on some IPA version,
+ * the init function will fill it with the information from the previous
+ * IPA version.
+ * Information is considered missing if all of the fields are 0
+ * If opcode is -1, this means that the IMM is removed on the
+ * specific version
+ */
+static struct ipahal_imm_cmd_obj
+ ipahal_imm_cmd_objs[IPA_HW_MAX][IPA_IMM_CMD_MAX] = {
+ /* IPAv3 */
+ [IPA_HW_v3_0][IPA_IMM_CMD_IP_V4_FILTER_INIT] = {
+ ipa_imm_cmd_construct_ip_v4_filter_init,
+ 3, false},
+ [IPA_HW_v3_0][IPA_IMM_CMD_IP_V6_FILTER_INIT] = {
+ ipa_imm_cmd_construct_ip_v6_filter_init,
+ 4, false},
+ [IPA_HW_v3_0][IPA_IMM_CMD_IP_V4_NAT_INIT] = {
+ ipa_imm_cmd_construct_ip_v4_nat_init,
+ 5, false},
+ [IPA_HW_v3_0][IPA_IMM_CMD_IP_V4_ROUTING_INIT] = {
+ ipa_imm_cmd_construct_ip_v4_routing_init,
+ 7, false},
+ [IPA_HW_v3_0][IPA_IMM_CMD_IP_V6_ROUTING_INIT] = {
+ ipa_imm_cmd_construct_ip_v6_routing_init,
+ 8, false},
+ [IPA_HW_v3_0][IPA_IMM_CMD_HDR_INIT_LOCAL] = {
+ ipa_imm_cmd_construct_hdr_init_local,
+ 9, false},
+ [IPA_HW_v3_0][IPA_IMM_CMD_HDR_INIT_SYSTEM] = {
+ ipa_imm_cmd_construct_hdr_init_system,
+ 10, false},
+ [IPA_HW_v3_0][IPA_IMM_CMD_REGISTER_WRITE] = {
+ ipa_imm_cmd_construct_register_write,
+ 12, false},
+ [IPA_HW_v3_0][IPA_IMM_CMD_NAT_DMA] = {
+ ipa_imm_cmd_construct_nat_dma,
+ 14, false},
+ [IPA_HW_v3_0][IPA_IMM_CMD_IP_PACKET_INIT] = {
+ ipa_imm_cmd_construct_ip_packet_init,
+ 16, false},
+ [IPA_HW_v3_0][IPA_IMM_CMD_DMA_TASK_32B_ADDR] = {
+ ipa_imm_cmd_construct_dma_task_32b_addr,
+ 17, true},
+ [IPA_HW_v3_0][IPA_IMM_CMD_DMA_SHARED_MEM] = {
+ ipa_imm_cmd_construct_dma_shared_mem,
+ 19, false},
+ [IPA_HW_v3_0][IPA_IMM_CMD_IP_PACKET_TAG_STATUS] = {
+ ipa_imm_cmd_construct_ip_packet_tag_status,
+ 20, false},
+};
+
+/*
+ * ipahal_imm_cmd_init() - Build the Immediate command information table
+ * See ipahal_imm_cmd_objs[][] comments
+ */
+static int ipahal_imm_cmd_init(enum ipa_hw_type ipa_hw_type)
+{
+ int i;
+ int j;
+ struct ipahal_imm_cmd_obj zero_obj;
+
+ IPAHAL_DBG("Entry - HW_TYPE=%d\n", ipa_hw_type);
+
+ memset(&zero_obj, 0, sizeof(zero_obj));
+ for (i = IPA_HW_v3_0 ; i < ipa_hw_type ; i++) {
+ for (j = 0; j < IPA_IMM_CMD_MAX ; j++) {
+ if (!memcmp(&ipahal_imm_cmd_objs[i+1][j], &zero_obj,
+ sizeof(struct ipahal_imm_cmd_obj))) {
+ memcpy(&ipahal_imm_cmd_objs[i+1][j],
+ &ipahal_imm_cmd_objs[i][j],
+ sizeof(struct ipahal_imm_cmd_obj));
+ } else {
+ /*
+ * explicitly overridden immediate command.
+ * Check validity
+ */
+ if (!ipahal_imm_cmd_objs[i+1][j].opcode) {
+ IPAHAL_ERR(
+ "imm_cmd=%s with zero opcode\n",
+ ipahal_imm_cmd_name_str(j));
+ WARN_ON(1);
+ }
+ if (!ipahal_imm_cmd_objs[i+1][j].construct) {
+ IPAHAL_ERR(
+ "imm_cmd=%s with NULL construct fun\n",
+ ipahal_imm_cmd_name_str(j));
+ WARN_ON(1);
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * ipahal_imm_cmd_name_str() - returns string that represent the imm cmd
+ * @cmd_name: [in] Immediate command name
+ */
+const char *ipahal_imm_cmd_name_str(enum ipahal_imm_cmd_name cmd_name)
+{
+ if (cmd_name < 0 || cmd_name >= IPA_IMM_CMD_MAX) {
+ IPAHAL_ERR("requested name of invalid imm_cmd=%d\n", cmd_name);
+ return "Invalid IMM_CMD";
+ }
+
+ return ipahal_imm_cmd_name_to_str[cmd_name];
+}
+
+/*
+ * ipahal_imm_cmd_get_opcode() - Get the fixed opcode of the immediate command
+ */
+u16 ipahal_imm_cmd_get_opcode(enum ipahal_imm_cmd_name cmd)
+{
+ u32 opcode;
+
+ if (cmd >= IPA_IMM_CMD_MAX) {
+ IPAHAL_ERR("Invalid immediate command imm_cmd=%u\n", cmd);
+ WARN_ON(1);
+ return -EFAULT;
+ }
+
+ IPAHAL_DBG("Get opcode of IMM_CMD=%s\n", ipahal_imm_cmd_name_str(cmd));
+ opcode = ipahal_imm_cmd_objs[ipahal_ctx->hw_type][cmd].opcode;
+ if (opcode == -1) {
+ IPAHAL_ERR("Try to get opcode of obsolete IMM_CMD=%s\n",
+ ipahal_imm_cmd_name_str(cmd));
+ WARN_ON(1);
+ return -EFAULT;
+ }
+
+ return opcode;
+}
+
+/*
+ * ipahal_imm_cmd_get_opcode_param() - Get the opcode of an immediate command
+ * that supports dynamic opcode
+ * Some commands opcode are not totaly fixed, but part of it is
+ * a supplied parameter. E.g. Low-Byte is fixed and Hi-Byte
+ * is a given parameter.
+ * This API will return the composed opcode of the command given
+ * the parameter
+ * Note: Use this API only for immediate comamnds that support Dynamic Opcode
+ */
+u16 ipahal_imm_cmd_get_opcode_param(enum ipahal_imm_cmd_name cmd, int param)
+{
+ u32 opcode;
+
+ if (cmd >= IPA_IMM_CMD_MAX) {
+ IPAHAL_ERR("Invalid immediate command IMM_CMD=%u\n", cmd);
+ WARN_ON(1);
+ return -EFAULT;
+ }
+
+ IPAHAL_DBG("Get opcode of IMM_CMD=%s\n", ipahal_imm_cmd_name_str(cmd));
+
+ if (!ipahal_imm_cmd_objs[ipahal_ctx->hw_type][cmd].dyn_op) {
+ IPAHAL_ERR("IMM_CMD=%s does not support dynamic opcode\n",
+ ipahal_imm_cmd_name_str(cmd));
+ WARN_ON(1);
+ return -EFAULT;
+ }
+
+ /* Currently, dynamic opcode commands uses params to be set
+ * on the Opcode hi-byte (lo-byte is fixed).
+ * If this to be changed in the future, make the opcode calculation
+ * a CB per command
+ */
+ if (param & ~0xFFFF) {
+ IPAHAL_ERR("IMM_CMD=%s opcode param is invalid\n",
+ ipahal_imm_cmd_name_str(cmd));
+ WARN_ON(1);
+ return -EFAULT;
+ }
+ opcode = ipahal_imm_cmd_objs[ipahal_ctx->hw_type][cmd].opcode;
+ if (opcode == -1) {
+ IPAHAL_ERR("Try to get opcode of obsolete IMM_CMD=%s\n",
+ ipahal_imm_cmd_name_str(cmd));
+ WARN_ON(1);
+ return -EFAULT;
+ }
+ if (opcode & ~0xFFFF) {
+ IPAHAL_ERR("IMM_CMD=%s opcode will be overridden\n",
+ ipahal_imm_cmd_name_str(cmd));
+ WARN_ON(1);
+ return -EFAULT;
+ }
+ return (opcode + (param<<8));
+}
+
+/*
+ * ipahal_construct_imm_cmd() - Construct immdiate command
+ * This function builds imm cmd bulk that can be be sent to IPA
+ * The command will be allocated dynamically.
+ * After done using it, call ipahal_destroy_imm_cmd() to release it
+ */
+struct ipahal_imm_cmd_pyld *ipahal_construct_imm_cmd(
+ enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+ if (!params) {
+ IPAHAL_ERR("Input error: params=%p\n", params);
+ WARN_ON(1);
+ return NULL;
+ }
+
+ if (cmd >= IPA_IMM_CMD_MAX) {
+ IPAHAL_ERR("Invalid immediate command %u\n", cmd);
+ WARN_ON(1);
+ return NULL;
+ }
+
+ IPAHAL_DBG("construct IMM_CMD:%s\n", ipahal_imm_cmd_name_str(cmd));
+ return ipahal_imm_cmd_objs[ipahal_ctx->hw_type][cmd].construct(
+ cmd, params, is_atomic_ctx);
+}
+
+/*
+ * ipahal_construct_nop_imm_cmd() - Construct immediate comamnd for NO-Op
+ * Core driver may want functionality to inject NOP commands to IPA
+ * to ensure e.g., PIPLINE clear before someother operation.
+ * The functionality given by this function can be reached by
+ * ipahal_construct_imm_cmd(). This function is helper to the core driver
+ * to reach this NOP functionlity easily.
+ * @skip_pipline_clear: if to skip pipeline clear waiting (don't wait)
+ * @pipline_clr_opt: options for pipeline clear waiting
+ * @is_atomic_ctx: is called in atomic context or can sleep?
+ */
+struct ipahal_imm_cmd_pyld *ipahal_construct_nop_imm_cmd(
+ bool skip_pipline_clear,
+ enum ipahal_pipeline_clear_option pipline_clr_opt,
+ bool is_atomic_ctx)
+{
+ struct ipahal_imm_cmd_register_write cmd;
+ struct ipahal_imm_cmd_pyld *cmd_pyld;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.skip_pipeline_clear = skip_pipline_clear;
+ cmd.pipeline_clear_options = pipline_clr_opt;
+ cmd.value_mask = 0x0;
+
+ cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
+ &cmd, is_atomic_ctx);
+
+ if (!cmd_pyld)
+ IPAHAL_ERR("failed to construct register_write imm cmd\n");
+
+ return cmd_pyld;
+}
+
+int ipahal_init(enum ipa_hw_type ipa_hw_type, void __iomem *base)
+{
+ int result;
+
+ IPAHAL_DBG("Entry - IPA HW TYPE=%d base=%p\n",
+ ipa_hw_type, base);
+
+ ipahal_ctx = kzalloc(sizeof(*ipahal_ctx), GFP_KERNEL);
+ if (!ipahal_ctx) {
+ IPAHAL_ERR("kzalloc err for ipahal_ctx\n");
+ result = -ENOMEM;
+ goto bail_err_exit;
+ }
+
+ if (ipa_hw_type < IPA_HW_v3_0) {
+ IPAHAL_ERR("ipahal supported on IPAv3 and later only\n");
+ result = -EINVAL;
+ goto bail_free_ctx;
+ }
+
+ if (!base) {
+ IPAHAL_ERR("invalid memory io mapping addr\n");
+ result = -EINVAL;
+ goto bail_free_ctx;
+ }
+
+ ipahal_ctx->hw_type = ipa_hw_type;
+ ipahal_ctx->base = base;
+
+ if (ipahal_reg_init(ipa_hw_type)) {
+ IPAHAL_ERR("failed to init ipahal reg\n");
+ result = -EFAULT;
+ goto bail_free_ctx;
+ }
+
+ if (ipahal_imm_cmd_init(ipa_hw_type)) {
+ IPAHAL_ERR("failed to init ipahal imm cmd\n");
+ result = -EFAULT;
+ goto bail_free_ctx;
+ }
+
+ return 0;
+
+bail_free_ctx:
+ kfree(ipahal_ctx);
+ ipahal_ctx = NULL;
+bail_err_exit:
+ return result;
+}
+
+void ipahal_destroy(void)
+{
+ IPAHAL_DBG("Entry\n");
+
+ kfree(ipahal_ctx);
+ ipahal_ctx = NULL;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h
new file mode 100644
index 000000000000..5bdbcaf451a0
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h
@@ -0,0 +1,395 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPAHAL_H_
+#define _IPAHAL_H_
+
+#include <linux/msm_ipa.h>
+
+/*
+ * Immediate command names
+ *
+ * NOTE:: Any change to this enum, need to change to ipahal_imm_cmd_name_to_str
+ * array as well.
+ */
+enum ipahal_imm_cmd_name {
+ IPA_IMM_CMD_IP_V4_FILTER_INIT,
+ IPA_IMM_CMD_IP_V6_FILTER_INIT,
+ IPA_IMM_CMD_IP_V4_NAT_INIT,
+ IPA_IMM_CMD_IP_V4_ROUTING_INIT,
+ IPA_IMM_CMD_IP_V6_ROUTING_INIT,
+ IPA_IMM_CMD_HDR_INIT_LOCAL,
+ IPA_IMM_CMD_HDR_INIT_SYSTEM,
+ IPA_IMM_CMD_REGISTER_WRITE,
+ IPA_IMM_CMD_NAT_DMA,
+ IPA_IMM_CMD_IP_PACKET_INIT,
+ IPA_IMM_CMD_DMA_SHARED_MEM,
+ IPA_IMM_CMD_IP_PACKET_TAG_STATUS,
+ IPA_IMM_CMD_DMA_TASK_32B_ADDR,
+ IPA_IMM_CMD_MAX,
+};
+
+/* Immediate commands abstracted structures */
+
+/*
+ * struct ipahal_imm_cmd_ip_v4_filter_init - IP_V4_FILTER_INIT cmd payload
+ * Inits IPv4 filter block.
+ * @hash_rules_addr: Addr in sys mem where ipv4 hashable flt tbl starts
+ * @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem
+ * @hash_local_addr: Addr in shared mem where ipv4 hashable flt tbl should
+ * be copied to
+ * @nhash_rules_addr: Addr in sys mem where ipv4 non-hashable flt tbl starts
+ * @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem
+ * @nhash_local_addr: Addr in shared mem where ipv4 non-hashable flt tbl should
+ * be copied to
+ */
+struct ipahal_imm_cmd_ip_v4_filter_init {
+ u64 hash_rules_addr;
+ u32 hash_rules_size;
+ u32 hash_local_addr;
+ u64 nhash_rules_addr;
+ u32 nhash_rules_size;
+ u32 nhash_local_addr;
+};
+
+/*
+ * struct ipahal_imm_cmd_ip_v6_filter_init - IP_V6_FILTER_INIT cmd payload
+ * Inits IPv6 filter block.
+ * @hash_rules_addr: Addr in sys mem where ipv6 hashable flt tbl starts
+ * @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem
+ * @hash_local_addr: Addr in shared mem where ipv6 hashable flt tbl should
+ * be copied to
+ * @nhash_rules_addr: Addr in sys mem where ipv6 non-hashable flt tbl starts
+ * @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem
+ * @nhash_local_addr: Addr in shared mem where ipv6 non-hashable flt tbl should
+ * be copied to
+ */
+struct ipahal_imm_cmd_ip_v6_filter_init {
+ u64 hash_rules_addr;
+ u32 hash_rules_size;
+ u32 hash_local_addr;
+ u64 nhash_rules_addr;
+ u32 nhash_rules_size;
+ u32 nhash_local_addr;
+};
+
+/*
+ * struct ipahal_imm_cmd_ip_v4_nat_init - IP_V4_NAT_INIT cmd payload
+ * Inits IPv4 NAT block. Initiate NAT table with it dimensions, location
+ * cache address abd itger related parameters.
+ * @table_index: For future support of multiple NAT tables
+ * @ipv4_rules_addr: Addr in sys/shared mem where ipv4 NAT rules start
+ * @ipv4_rules_addr_shared: ipv4_rules_addr in shared mem (if not, then sys)
+ * @ipv4_expansion_rules_addr: Addr in sys/shared mem where expantion NAT
+ * table starts. IPv4 NAT rules that result in NAT collision are located
+ * in this table.
+ * @ipv4_expansion_rules_addr_shared: ipv4_expansion_rules_addr in
+ * shared mem (if not, then sys)
+ * @index_table_addr: Addr in sys/shared mem where index table, which points
+ * to NAT table starts
+ * @index_table_addr_shared: index_table_addr in shared mem (if not, then sys)
+ * @index_table_expansion_addr: Addr in sys/shared mem where expansion index
+ * table starts
+ * @index_table_expansion_addr_shared: index_table_expansion_addr in
+ * shared mem (if not, then sys)
+ * @size_base_tables: Num of entries in NAT tbl and idx tbl (each)
+ * @size_expansion_tables: Num of entries in NAT expantion tbl and expantion
+ * idx tbl (each)
+ * @public_ip_addr: public IP address
+ */
+struct ipahal_imm_cmd_ip_v4_nat_init {
+ u8 table_index;
+ u64 ipv4_rules_addr;
+ bool ipv4_rules_addr_shared;
+ u64 ipv4_expansion_rules_addr;
+ bool ipv4_expansion_rules_addr_shared;
+ u64 index_table_addr;
+ bool index_table_addr_shared;
+ u64 index_table_expansion_addr;
+ bool index_table_expansion_addr_shared;
+ u16 size_base_tables;
+ u16 size_expansion_tables;
+ u32 public_ip_addr;
+};
+
+/*
+ * struct ipahal_imm_cmd_ip_v4_routing_init - IP_V4_ROUTING_INIT cmd payload
+ * Inits IPv4 routing table/structure - with the rules and other related params
+ * @hash_rules_addr: Addr in sys mem where ipv4 hashable rt tbl starts
+ * @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem
+ * @hash_local_addr: Addr in shared mem where ipv4 hashable rt tbl should
+ * be copied to
+ * @nhash_rules_addr: Addr in sys mem where ipv4 non-hashable rt tbl starts
+ * @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem
+ * @nhash_local_addr: Addr in shared mem where ipv4 non-hashable rt tbl should
+ * be copied to
+ */
+struct ipahal_imm_cmd_ip_v4_routing_init {
+ u64 hash_rules_addr;
+ u32 hash_rules_size;
+ u32 hash_local_addr;
+ u64 nhash_rules_addr;
+ u32 nhash_rules_size;
+ u32 nhash_local_addr;
+};
+
+/*
+ * struct ipahal_imm_cmd_ip_v6_routing_init - IP_V6_ROUTING_INIT cmd payload
+ * Inits IPv6 routing table/structure - with the rules and other related params
+ * @hash_rules_addr: Addr in sys mem where ipv6 hashable rt tbl starts
+ * @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem
+ * @hash_local_addr: Addr in shared mem where ipv6 hashable rt tbl should
+ * be copied to
+ * @nhash_rules_addr: Addr in sys mem where ipv6 non-hashable rt tbl starts
+ * @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem
+ * @nhash_local_addr: Addr in shared mem where ipv6 non-hashable rt tbl should
+ * be copied to
+ */
+struct ipahal_imm_cmd_ip_v6_routing_init {
+ u64 hash_rules_addr;
+ u32 hash_rules_size;
+ u32 hash_local_addr;
+ u64 nhash_rules_addr;
+ u32 nhash_rules_size;
+ u32 nhash_local_addr;
+};
+
+/*
+ * struct ipahal_imm_cmd_hdr_init_local - HDR_INIT_LOCAL cmd payload
+ * Inits hdr table within local mem with the hdrs and their length.
+ * @hdr_table_addr: Word address in sys mem where the table starts (SRC)
+ * @size_hdr_table: Size of the above (in bytes)
+ * @hdr_addr: header address in IPA sram (used as DST for memory copy)
+ * @rsvd: reserved
+ */
+struct ipahal_imm_cmd_hdr_init_local {
+ u64 hdr_table_addr;
+ u32 size_hdr_table;
+ u32 hdr_addr;
+};
+
+/*
+ * struct ipahal_imm_cmd_hdr_init_system - HDR_INIT_SYSTEM cmd payload
+ * Inits hdr table within sys mem with the hdrs and their length.
+ * @hdr_table_addr: Word address in system memory where the hdrs tbl starts.
+ */
+struct ipahal_imm_cmd_hdr_init_system {
+ u64 hdr_table_addr;
+};
+
+/*
+ * struct ipahal_imm_cmd_nat_dma - NAT_DMA cmd payload
+ * Perform DMA operation on NAT related mem addressess. Copy data into
+ * different locations within NAT associated tbls. (For add/remove NAT rules)
+ * @table_index: NAT tbl index. Defines the NAT tbl on which to perform DMA op.
+ * @base_addr: Base addr to which the DMA operation should be performed.
+ * @offset: offset in bytes from base addr to write 'data' to
+ * @data: data to be written
+ */
+struct ipahal_imm_cmd_nat_dma {
+ u8 table_index;
+ u8 base_addr;
+ u32 offset;
+ u16 data;
+};
+
+/*
+ * struct ipahal_imm_cmd_ip_packet_init - IP_PACKET_INIT cmd payload
+ * Configuration for specific IP pkt. Shall be called prior to an IP pkt
+ * data. Pkt will not go through IP pkt processing.
+ * @destination_pipe_index: Destination pipe index (in case routing
+ * is enabled, this field will overwrite the rt rule)
+ */
+struct ipahal_imm_cmd_ip_packet_init {
+ u32 destination_pipe_index;
+};
+
+/*
+ * enum ipa_pipeline_clear_option - Values for pipeline clear waiting options
+ * @IPAHAL_HPS_CLEAR: Wait for HPS clear. All queues except high priority queue
+ * shall not be serviced until HPS is clear of packets or immediate commands.
+ * The high priority Rx queue / Q6ZIP group shall still be serviced normally.
+ *
+ * @IPAHAL_SRC_GRP_CLEAR: Wait for originating source group to be clear
+ * (for no packet contexts allocated to the originating source group).
+ * The source group / Rx queue shall not be serviced until all previously
+ * allocated packet contexts are released. All other source groups/queues shall
+ * be serviced normally.
+ *
+ * @IPAHAL_FULL_PIPELINE_CLEAR: Wait for full pipeline to be clear.
+ * All groups / Rx queues shall not be serviced until IPA pipeline is fully
+ * clear. This should be used for debug only.
+ */
+enum ipahal_pipeline_clear_option {
+ IPAHAL_HPS_CLEAR,
+ IPAHAL_SRC_GRP_CLEAR,
+ IPAHAL_FULL_PIPELINE_CLEAR
+};
+
+/*
+ * struct ipahal_imm_cmd_register_write - REGISTER_WRITE cmd payload
+ * Write value to register. Allows reg changes to be synced with data packet
+ * and other immediate commands. Can be used to access the sram
+ * @offset: offset from IPA base address - Lower 16bit of the IPA reg addr
+ * @value: value to write to register
+ * @value_mask: mask specifying which value bits to write to the register
+ * @skip_pipeline_clear: if to skip pipeline clear waiting (don't wait)
+ * @pipeline_clear_option: options for pipeline clear waiting
+ */
+struct ipahal_imm_cmd_register_write {
+ u32 offset;
+ u32 value;
+ u32 value_mask;
+ bool skip_pipeline_clear;
+ enum ipahal_pipeline_clear_option pipeline_clear_options;
+};
+
+/*
+ * struct ipahal_imm_cmd_dma_shared_mem - DMA_SHARED_MEM cmd payload
+ * Perform mem copy into or out of the SW area of IPA local mem
+ * @size: Size in bytes of data to copy. Expected size is up to 2K bytes
+ * @local_addr: Address in IPA local memory
+ * @is_read: Read operation from local memory? If not, then write.
+ * @skip_pipeline_clear: if to skip pipeline clear waiting (don't wait)
+ * @pipeline_clear_option: options for pipeline clear waiting
+ * @system_addr: Address in system memory
+ */
+struct ipahal_imm_cmd_dma_shared_mem {
+ u32 size;
+ u32 local_addr;
+ bool is_read;
+ bool skip_pipeline_clear;
+ enum ipahal_pipeline_clear_option pipeline_clear_options;
+ u64 system_addr;
+};
+
+/*
+ * struct ipahal_imm_cmd_ip_packet_tag_status - IP_PACKET_TAG_STATUS cmd payload
+ * This cmd is used for to allow SW to track HW processing by setting a TAG
+ * value that is passed back to SW inside Packet Status information.
+ * TAG info will be provided as part of Packet Status info generated for
+ * the next pkt transferred over the pipe.
+ * This immediate command must be followed by a packet in the same transfer.
+ * @tag: Tag that is provided back to SW
+ */
+struct ipahal_imm_cmd_ip_packet_tag_status {
+ u64 tag;
+};
+
+/*
+ * struct ipahal_imm_cmd_dma_task_32b_addr - IPA_DMA_TASK_32B_ADDR cmd payload
+ * Used by clients using 32bit addresses. Used to perform DMA operation on
+ * multiple descriptors.
+ * The Opcode is dynamic, where it holds the number of buffer to process
+ * @cmplt: Complete flag: If true, IPA interrupt SW when the entire
+ * DMA related data was completely xfered to its destination.
+ * @eof: Enf Of Frame flag: If true, IPA assert the EOT to the
+ * dest client. This is used used for aggr sequence
+ * @flsh: Flush flag: If true pkt will go through the IPA blocks but
+ * will not be xfered to dest client but rather will be discarded
+ * @lock: Lock pipe flag: If true, IPA will stop processing descriptors
+ * from other EPs in the same src grp (RX queue)
+ * @unlock: Unlock pipe flag: If true, IPA will stop exclusively
+ * servicing current EP out of the src EPs of the grp (RX queue)
+ * @size1: Size of buffer1 data
+ * @addr1: Pointer to buffer1 data
+ * @packet_size: Total packet size. If a pkt send using multiple DMA_TASKs,
+ * only the first one needs to have this field set. It will be ignored
+ * in subsequent DMA_TASKs until the packet ends (EOT). First DMA_TASK
+ * must contain this field (2 or more buffers) or EOT.
+ */
+struct ipahal_imm_cmd_dma_task_32b_addr {
+ bool cmplt;
+ bool eof;
+ bool flsh;
+ bool lock;
+ bool unlock;
+ u32 size1;
+ u32 addr1;
+ u32 packet_size;
+};
+
+/*
+ * struct ipahal_imm_cmd_pyld - Immediate cmd payload information
+ * @len: length of the buffer
+ * @data: buffer contains the immediate command payload. Buffer goes
+ * back to back with this structure
+ */
+struct ipahal_imm_cmd_pyld {
+ u16 len;
+ u8 data[0];
+};
+
+
+/* Immediate command Function APIs */
+
+/*
+ * ipahal_imm_cmd_name_str() - returns string that represent the imm cmd
+ * @cmd_name: [in] Immediate command name
+ */
+const char *ipahal_imm_cmd_name_str(enum ipahal_imm_cmd_name cmd_name);
+
+/*
+ * ipahal_imm_cmd_get_opcode() - Get the fixed opcode of the immediate command
+ */
+u16 ipahal_imm_cmd_get_opcode(enum ipahal_imm_cmd_name cmd);
+
+/*
+ * ipahal_imm_cmd_get_opcode_param() - Get the opcode of an immediate command
+ * that supports dynamic opcode
+ * Some commands opcode are not totaly fixed, but part of it is
+ * a supplied parameter. E.g. Low-Byte is fixed and Hi-Byte
+ * is a given parameter.
+ * This API will return the composed opcode of the command given
+ * the parameter
+ * Note: Use this API only for immediate comamnds that support Dynamic Opcode
+ */
+u16 ipahal_imm_cmd_get_opcode_param(enum ipahal_imm_cmd_name cmd, int param);
+
+/*
+ * ipahal_construct_imm_cmd() - Construct immdiate command
+ * This function builds imm cmd bulk that can be be sent to IPA
+ * The command will be allocated dynamically.
+ * After done using it, call ipahal_destroy_imm_cmd() to release it
+ */
+struct ipahal_imm_cmd_pyld *ipahal_construct_imm_cmd(
+ enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx);
+
+/*
+ * ipahal_construct_nop_imm_cmd() - Construct immediate comamnd for NO-Op
+ * Core driver may want functionality to inject NOP commands to IPA
+ * to ensure e.g., PIPLINE clear before someother operation.
+ * The functionality given by this function can be reached by
+ * ipahal_construct_imm_cmd(). This function is helper to the core driver
+ * to reach this NOP functionlity easily.
+ * @skip_pipline_clear: if to skip pipeline clear waiting (don't wait)
+ * @pipline_clr_opt: options for pipeline clear waiting
+ * @is_atomic_ctx: is called in atomic context or can sleep?
+ */
+struct ipahal_imm_cmd_pyld *ipahal_construct_nop_imm_cmd(
+ bool skip_pipline_clear,
+ enum ipahal_pipeline_clear_option pipline_clr_opt,
+ bool is_atomic_ctx);
+
+/*
+ * ipahal_destroy_imm_cmd() - Destroy/Release bulk that was built
+ * by the construction functions
+ */
+static inline void ipahal_destroy_imm_cmd(struct ipahal_imm_cmd_pyld *pyld)
+{
+ kfree(pyld);
+}
+
+int ipahal_init(enum ipa_hw_type ipa_hw_type, void __iomem *base);
+void ipahal_destroy(void);
+
+#endif /* _IPAHAL_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h
new file mode 100644
index 000000000000..b8d6c13c30ed
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h
@@ -0,0 +1,351 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPAHAL_I_H_
+#define _IPAHAL_I_H_
+
+#define IPAHAL_DRV_NAME "ipahal"
+#define IPAHAL_DBG(fmt, args...) \
+ pr_debug(IPAHAL_DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args)
+#define IPAHAL_ERR(fmt, args...) \
+ pr_err(IPAHAL_DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args)
+
+/*
+ * struct ipahal_context - HAL global context data
+ * @hw_type: IPA H/W type/version.
+ * @base: Base address to be used for accessing IPA memory. This is
+ * I/O memory mapped address.
+ */
+struct ipahal_context {
+ enum ipa_hw_type hw_type;
+ void __iomem *base;
+};
+
+extern struct ipahal_context *ipahal_ctx;
+
+
+
+/* Immediate commands H/W structures */
+
+/*
+ * struct ipa_imm_cmd_hw_ip_v4_filter_init - IP_V4_FILTER_INIT command payload
+ * in H/W format.
+ * Inits IPv4 filter block.
+ * @hash_rules_addr: Addr in system mem where ipv4 hashable flt rules starts
+ * @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem
+ * @hash_local_addr: Addr in shared mem where ipv4 hashable flt tbl should
+ * be copied to
+ * @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem
+ * @nhash_local_addr: Addr in shared mem where ipv4 non-hashable flt tbl should
+ * be copied to
+ * @rsvd: reserved
+ * @nhash_rules_addr: Addr in sys mem where ipv4 non-hashable flt tbl starts
+ */
+struct ipa_imm_cmd_hw_ip_v4_filter_init {
+ u64 hash_rules_addr:64;
+ u64 hash_rules_size:12;
+ u64 hash_local_addr:16;
+ u64 nhash_rules_size:12;
+ u64 nhash_local_addr:16;
+ u64 rsvd:8;
+ u64 nhash_rules_addr:64;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_ip_v6_filter_init - IP_V6_FILTER_INIT command payload
+ * in H/W format.
+ * Inits IPv6 filter block.
+ * @hash_rules_addr: Addr in system mem where ipv6 hashable flt rules starts
+ * @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem
+ * @hash_local_addr: Addr in shared mem where ipv6 hashable flt tbl should
+ * be copied to
+ * @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem
+ * @nhash_local_addr: Addr in shared mem where ipv6 non-hashable flt tbl should
+ * be copied to
+ * @rsvd: reserved
+ * @nhash_rules_addr: Addr in sys mem where ipv6 non-hashable flt tbl starts
+ */
+struct ipa_imm_cmd_hw_ip_v6_filter_init {
+ u64 hash_rules_addr:64;
+ u64 hash_rules_size:12;
+ u64 hash_local_addr:16;
+ u64 nhash_rules_size:12;
+ u64 nhash_local_addr:16;
+ u64 rsvd:8;
+ u64 nhash_rules_addr:64;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_ip_v4_nat_init - IP_V4_NAT_INIT command payload
+ * in H/W format.
+ * Inits IPv4 NAT block. Initiate NAT table with it dimensions, location
+ * cache address abd itger related parameters.
+ * @ipv4_rules_addr: Addr in sys/shared mem where ipv4 NAT rules start
+ * @ipv4_expansion_rules_addr: Addr in sys/shared mem where expantion NAT
+ * table starts. IPv4 NAT rules that result in NAT collision are located
+ * in this table.
+ * @index_table_addr: Addr in sys/shared mem where index table, which points
+ * to NAT table starts
+ * @index_table_expansion_addr: Addr in sys/shared mem where expansion index
+ * table starts
+ * @table_index: For future support of multiple NAT tables
+ * @rsvd1: reserved
+ * @ipv4_rules_addr_type: ipv4_rules_addr in sys or shared mem
+ * @ipv4_expansion_rules_addr_type: ipv4_expansion_rules_addr in
+ * sys or shared mem
+ * @index_table_addr_type: index_table_addr in sys or shared mem
+ * @index_table_expansion_addr_type: index_table_expansion_addr in
+ * sys or shared mem
+ * @size_base_tables: Num of entries in NAT tbl and idx tbl (each)
+ * @size_expansion_tables: Num of entries in NAT expantion tbl and expantion
+ * idx tbl (each)
+ * @rsvd2: reserved
+ * @public_ip_addr: public IP address
+ */
+struct ipa_imm_cmd_hw_ip_v4_nat_init {
+ u64 ipv4_rules_addr:64;
+ u64 ipv4_expansion_rules_addr:64;
+ u64 index_table_addr:64;
+ u64 index_table_expansion_addr:64;
+ u64 table_index:3;
+ u64 rsvd1:1;
+ u64 ipv4_rules_addr_type:1;
+ u64 ipv4_expansion_rules_addr_type:1;
+ u64 index_table_addr_type:1;
+ u64 index_table_expansion_addr_type:1;
+ u64 size_base_tables:12;
+ u64 size_expansion_tables:10;
+ u64 rsvd2:2;
+ u64 public_ip_addr:32;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_ip_v4_routing_init - IP_V4_ROUTING_INIT command payload
+ * in H/W format.
+ * Inits IPv4 routing table/structure - with the rules and other related params
+ * @hash_rules_addr: Addr in system mem where ipv4 hashable rt rules starts
+ * @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem
+ * @hash_local_addr: Addr in shared mem where ipv4 hashable rt tbl should
+ * be copied to
+ * @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem
+ * @nhash_local_addr: Addr in shared mem where ipv4 non-hashable rt tbl should
+ * be copied to
+ * @rsvd: reserved
+ * @nhash_rules_addr: Addr in sys mem where ipv4 non-hashable rt tbl starts
+ */
+struct ipa_imm_cmd_hw_ip_v4_routing_init {
+ u64 hash_rules_addr:64;
+ u64 hash_rules_size:12;
+ u64 hash_local_addr:16;
+ u64 nhash_rules_size:12;
+ u64 nhash_local_addr:16;
+ u64 rsvd:8;
+ u64 nhash_rules_addr:64;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_ip_v6_routing_init - IP_V6_ROUTING_INIT command payload
+ * in H/W format.
+ * Inits IPv6 routing table/structure - with the rules and other related params
+ * @hash_rules_addr: Addr in system mem where ipv6 hashable rt rules starts
+ * @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem
+ * @hash_local_addr: Addr in shared mem where ipv6 hashable rt tbl should
+ * be copied to
+ * @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem
+ * @nhash_local_addr: Addr in shared mem where ipv6 non-hashable rt tbl should
+ * be copied to
+ * @rsvd: reserved
+ * @nhash_rules_addr: Addr in sys mem where ipv6 non-hashable rt tbl starts
+ */
+struct ipa_imm_cmd_hw_ip_v6_routing_init {
+ u64 hash_rules_addr:64;
+ u64 hash_rules_size:12;
+ u64 hash_local_addr:16;
+ u64 nhash_rules_size:12;
+ u64 nhash_local_addr:16;
+ u64 rsvd:8;
+ u64 nhash_rules_addr:64;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_hdr_init_local - HDR_INIT_LOCAL command payload
+ * in H/W format.
+ * Inits hdr table within local mem with the hdrs and their length.
+ * @hdr_table_addr: Word address in sys mem where the table starts (SRC)
+ * @size_hdr_table: Size of the above (in bytes)
+ * @hdr_addr: header address in IPA sram (used as DST for memory copy)
+ * @rsvd: reserved
+ */
+struct ipa_imm_cmd_hw_hdr_init_local {
+ u64 hdr_table_addr:64;
+ u64 size_hdr_table:12;
+ u64 hdr_addr:16;
+ u64 rsvd:4;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_nat_dma - NAT_DMA command payload
+ * in H/W format
+ * Perform DMA operation on NAT related mem addressess. Copy data into
+ * different locations within NAT associated tbls. (For add/remove NAT rules)
+ * @table_index: NAT tbl index. Defines the NAT tbl on which to perform DMA op.
+ * @rsvd1: reserved
+ * @base_addr: Base addr to which the DMA operation should be performed.
+ * @rsvd2: reserved
+ * @offset: offset in bytes from base addr to write 'data' to
+ * @data: data to be written
+ * @rsvd3: reserved
+ */
+struct ipa_imm_cmd_hw_nat_dma {
+ u64 table_index:3;
+ u64 rsvd1:1;
+ u64 base_addr:2;
+ u64 rsvd2:2;
+ u64 offset:32;
+ u64 data:16;
+ u64 rsvd3:8;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_hdr_init_system - HDR_INIT_SYSTEM command payload
+ * in H/W format.
+ * Inits hdr table within sys mem with the hdrs and their length.
+ * @hdr_table_addr: Word address in system memory where the hdrs tbl starts.
+ */
+struct ipa_imm_cmd_hw_hdr_init_system {
+ u64 hdr_table_addr:64;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_ip_packet_init - IP_PACKET_INIT command payload
+ * in H/W format.
+ * Configuration for specific IP pkt. Shall be called prior to an IP pkt
+ * data. Pkt will not go through IP pkt processing.
+ * @destination_pipe_index: Destination pipe index (in case routing
+ * is enabled, this field will overwrite the rt rule)
+ * @rsvd: reserved
+ */
+struct ipa_imm_cmd_hw_ip_packet_init {
+ u64 destination_pipe_index:5;
+ u64 rsv1:59;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_register_write - REGISTER_WRITE command payload
+ * in H/W format.
+ * Write value to register. Allows reg changes to be synced with data packet
+ * and other immediate command. Can be used to access the sram
+ * @sw_rsvd: Ignored by H/W. My be used by S/W
+ * @skip_pipeline_clear: 0 to wait until IPA pipeline is clear. 1 don't wait
+ * @offset: offset from IPA base address - Lower 16bit of the IPA reg addr
+ * @value: value to write to register
+ * @value_mask: mask specifying which value bits to write to the register
+ * @pipeline_clear_options: options for pipeline to clear
+ * 0: HPS - no pkt inside HPS (not grp specific)
+ * 1: source group - The immediate cmd src grp does not use any pkt ctxs
+ * 2: Wait until no pkt reside inside IPA pipeline
+ * 3: reserved
+ * @rsvd: reserved - should be set to zero
+ */
+struct ipa_imm_cmd_hw_register_write {
+ u64 sw_rsvd:15;
+ u64 skip_pipeline_clear:1;
+ u64 offset:16;
+ u64 value:32;
+ u64 value_mask:32;
+ u64 pipeline_clear_options:2;
+ u64 rsvd:30;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_dma_shared_mem - DMA_SHARED_MEM command payload
+ * in H/W format.
+ * Perform mem copy into or out of the SW area of IPA local mem
+ * @sw_rsvd: Ignored by H/W. My be used by S/W
+ * @size: Size in bytes of data to copy. Expected size is up to 2K bytes
+ * @local_addr: Address in IPA local memory
+ * @direction: Read or write?
+ * 0: IPA write, Write to local address from system address
+ * 1: IPA read, Read from local address to system address
+ * @skip_pipeline_clear: 0 to wait until IPA pipeline is clear. 1 don't wait
+ * @pipeline_clear_options: options for pipeline to clear
+ * 0: HPS - no pkt inside HPS (not grp specific)
+ * 1: source group - The immediate cmd src grp does npt use any pkt ctxs
+ * 2: Wait until no pkt reside inside IPA pipeline
+ * 3: reserved
+ * @rsvd: reserved - should be set to zero
+ * @system_addr: Address in system memory
+ */
+struct ipa_imm_cmd_hw_dma_shared_mem {
+ u64 sw_rsvd:16;
+ u64 size:16;
+ u64 local_addr:16;
+ u64 direction:1;
+ u64 skip_pipeline_clear:1;
+ u64 pipeline_clear_options:2;
+ u64 rsvd:12;
+ u64 system_addr:64;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_ip_packet_tag_status -
+ * IP_PACKET_TAG_STATUS command payload in H/W format.
+ * This cmd is used for to allow SW to track HW processing by setting a TAG
+ * value that is passed back to SW inside Packet Status information.
+ * TAG info will be provided as part of Packet Status info generated for
+ * the next pkt transferred over the pipe.
+ * This immediate command must be followed by a packet in the same transfer.
+ * @sw_rsvd: Ignored by H/W. My be used by S/W
+ * @tag: Tag that is provided back to SW
+ */
+struct ipa_imm_cmd_hw_ip_packet_tag_status {
+ u64 sw_rsvd:16;
+ u64 tag:48;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_dma_task_32b_addr -
+ * IPA_DMA_TASK_32B_ADDR command payload in H/W format.
+ * Used by clients using 32bit addresses. Used to perform DMA operation on
+ * multiple descriptors.
+ * The Opcode is dynamic, where it holds the number of buffer to process
+ * @sw_rsvd: Ignored by H/W. My be used by S/W
+ * @cmplt: Complete flag: When asserted IPA will interrupt SW when the entire
+ * DMA related data was completely xfered to its destination.
+ * @eof: Enf Of Frame flag: When asserted IPA will assert the EOT to the
+ * dest client. This is used used for aggr sequence
+ * @flsh: Flush flag: When asserted, pkt will go through the IPA blocks but
+ * will not be xfered to dest client but rather will be discarded
+ * @lock: Lock pipe flag: When asserted, IPA will stop processing descriptors
+ * from other EPs in the same src grp (RX queue)
+ * @unlock: Unlock pipe flag: When asserted, IPA will stop exclusively
+ * servicing current EP out of the src EPs of the grp (RX queue)
+ * @size1: Size of buffer1 data
+ * @addr1: Pointer to buffer1 data
+ * @packet_size: Total packet size. If a pkt send using multiple DMA_TASKs,
+ * only the first one needs to have this field set. It will be ignored
+ * in subsequent DMA_TASKs until the packet ends (EOT). First DMA_TASK
+ * must contain this field (2 or more buffers) or EOT.
+ */
+struct ipa_imm_cmd_hw_dma_task_32b_addr {
+ u64 sw_rsvd:11;
+ u64 cmplt:1;
+ u64 eof:1;
+ u64 flsh:1;
+ u64 lock:1;
+ u64 unlock:1;
+ u64 size1:16;
+ u64 addr1:32;
+ u64 packet_size:16;
+};
+
+#endif /* _IPAHAL_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
new file mode 100644
index 000000000000..707289ab896b
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
@@ -0,0 +1,1352 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/ipa.h>
+#include <linux/kernel.h>
+#include <linux/msm_ipa.h>
+#include "ipahal_i.h"
+#include "ipahal_reg.h"
+#include "ipahal_reg_i.h"
+
+static const char *ipareg_name_to_str[IPA_REG_MAX] = {
+ __stringify(IPA_ROUTE),
+ __stringify(IPA_IRQ_STTS_EE_n),
+ __stringify(IPA_IRQ_EN_EE_n),
+ __stringify(IPA_IRQ_CLR_EE_n),
+ __stringify(IPA_IRQ_SUSPEND_INFO_EE_n),
+ __stringify(IPA_SUSPEND_IRQ_EN_EE_n),
+ __stringify(IPA_SUSPEND_IRQ_CLR_EE_n),
+ __stringify(IPA_BCR),
+ __stringify(IPA_ENABLED_PIPES),
+ __stringify(IPA_COMP_SW_RESET),
+ __stringify(IPA_VERSION),
+ __stringify(IPA_TAG_TIMER),
+ __stringify(IPA_COMP_HW_VERSION),
+ __stringify(IPA_SPARE_REG_1),
+ __stringify(IPA_SPARE_REG_2),
+ __stringify(IPA_COMP_CFG),
+ __stringify(IPA_STATE_AGGR_ACTIVE),
+ __stringify(IPA_ENDP_INIT_HDR_n),
+ __stringify(IPA_ENDP_INIT_HDR_EXT_n),
+ __stringify(IPA_ENDP_INIT_AGGR_n),
+ __stringify(IPA_AGGR_FORCE_CLOSE),
+ __stringify(IPA_ENDP_INIT_ROUTE_n),
+ __stringify(IPA_ENDP_INIT_MODE_n),
+ __stringify(IPA_ENDP_INIT_NAT_n),
+ __stringify(IPA_ENDP_INIT_CTRL_n),
+ __stringify(IPA_ENDP_INIT_HOL_BLOCK_EN_n),
+ __stringify(IPA_ENDP_INIT_HOL_BLOCK_TIMER_n),
+ __stringify(IPA_ENDP_INIT_DEAGGR_n),
+ __stringify(IPA_ENDP_INIT_SEQ_n),
+ __stringify(IPA_DEBUG_CNT_REG_n),
+ __stringify(IPA_ENDP_INIT_CFG_n),
+ __stringify(IPA_IRQ_EE_UC_n),
+ __stringify(IPA_ENDP_INIT_HDR_METADATA_MASK_n),
+ __stringify(IPA_ENDP_INIT_HDR_METADATA_n),
+ __stringify(IPA_ENABLE_GSI),
+ __stringify(IPA_ENDP_INIT_RSRC_GRP_n),
+ __stringify(IPA_SHARED_MEM_SIZE),
+ __stringify(IPA_SRAM_DIRECT_ACCESS_n),
+ __stringify(IPA_DEBUG_CNT_CTRL_n),
+ __stringify(IPA_UC_MAILBOX_m_n),
+ __stringify(IPA_FILT_ROUT_HASH_FLUSH),
+ __stringify(IPA_SINGLE_NDP_MODE),
+ __stringify(IPA_QCNCM),
+ __stringify(IPA_SYS_PKT_PROC_CNTXT_BASE),
+ __stringify(IPA_LOCAL_PKT_PROC_CNTXT_BASE),
+ __stringify(IPA_ENDP_STATUS_n),
+ __stringify(IPA_ENDP_FILTER_ROUTER_HSH_CFG_n),
+ __stringify(IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n),
+ __stringify(IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n),
+ __stringify(IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n),
+ __stringify(IPA_SRC_RSRC_GRP_67_RSRC_TYPE_n),
+ __stringify(IPA_DST_RSRC_GRP_01_RSRC_TYPE_n),
+ __stringify(IPA_DST_RSRC_GRP_23_RSRC_TYPE_n),
+ __stringify(IPA_DST_RSRC_GRP_45_RSRC_TYPE_n),
+ __stringify(IPA_DST_RSRC_GRP_67_RSRC_TYPE_n),
+ __stringify(IPA_RX_HPS_CLIENTS_MIN_DEPTH_0),
+ __stringify(IPA_RX_HPS_CLIENTS_MIN_DEPTH_1),
+ __stringify(IPA_RX_HPS_CLIENTS_MAX_DEPTH_0),
+ __stringify(IPA_RX_HPS_CLIENTS_MAX_DEPTH_1),
+};
+
+static void ipareg_construct_dummy(enum ipahal_reg_name reg,
+ const void *fields, u32 *val)
+{
+ IPAHAL_ERR("No construct function for %s\n",
+ ipahal_reg_name_str(reg));
+ WARN_ON(1);
+}
+
+static void ipareg_parse_dummy(enum ipahal_reg_name reg,
+ void *fields, u32 val)
+{
+ IPAHAL_ERR("No parse function for %s\n",
+ ipahal_reg_name_str(reg));
+ WARN_ON(1);
+}
+
+static void ipareg_construct_rx_hps_clients_depth1(
+ enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+ struct ipahal_reg_rx_hps_clients *clients =
+ (struct ipahal_reg_rx_hps_clients *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, clients->client_minmax[0],
+ IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(0),
+ IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK(0));
+
+ IPA_SETFIELD_IN_REG(*val, clients->client_minmax[1],
+ IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(1),
+ IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK(1));
+}
+
+static void ipareg_construct_rx_hps_clients_depth0(
+ enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+ struct ipahal_reg_rx_hps_clients *clients =
+ (struct ipahal_reg_rx_hps_clients *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, clients->client_minmax[0],
+ IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(0),
+ IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK(0));
+
+ IPA_SETFIELD_IN_REG(*val, clients->client_minmax[1],
+ IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(1),
+ IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK(1));
+
+ IPA_SETFIELD_IN_REG(*val, clients->client_minmax[2],
+ IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(2),
+ IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK(2));
+
+ IPA_SETFIELD_IN_REG(*val, clients->client_minmax[3],
+ IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(3),
+ IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK(3));
+}
+
+static void ipareg_construct_rsrg_grp_xy(
+ enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+ struct ipahal_reg_rsrc_grp_cfg *grp =
+ (struct ipahal_reg_rsrc_grp_cfg *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, grp->x_min,
+ IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_SHFT,
+ IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_BMSK);
+ IPA_SETFIELD_IN_REG(*val, grp->x_max,
+ IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_SHFT,
+ IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_BMSK);
+ IPA_SETFIELD_IN_REG(*val, grp->y_min,
+ IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_SHFT,
+ IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_BMSK);
+ IPA_SETFIELD_IN_REG(*val, grp->y_max,
+ IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_SHFT,
+ IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_BMSK);
+}
+
+static void ipareg_construct_hash_cfg_n(
+ enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+ struct ipahal_reg_fltrt_hash_tuple *tuple =
+ (struct ipahal_reg_fltrt_hash_tuple *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, tuple->flt.src_id,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_ID_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_ID_BMSK);
+ IPA_SETFIELD_IN_REG(*val, tuple->flt.src_ip_addr,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_IP_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_IP_BMSK);
+ IPA_SETFIELD_IN_REG(*val, tuple->flt.dst_ip_addr,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_IP_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_IP_BMSK);
+ IPA_SETFIELD_IN_REG(*val, tuple->flt.src_port,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_PORT_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_PORT_BMSK);
+ IPA_SETFIELD_IN_REG(*val, tuple->flt.dst_port,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_PORT_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_PORT_BMSK);
+ IPA_SETFIELD_IN_REG(*val, tuple->flt.protocol,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_PROTOCOL_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_PROTOCOL_BMSK);
+ IPA_SETFIELD_IN_REG(*val, tuple->flt.meta_data,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_METADATA_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_METADATA_BMSK);
+ IPA_SETFIELD_IN_REG(*val, tuple->undefined1,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED1_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED1_BMSK);
+ IPA_SETFIELD_IN_REG(*val, tuple->rt.src_id,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_ID_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_ID_BMSK);
+ IPA_SETFIELD_IN_REG(*val, tuple->rt.src_ip_addr,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_IP_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_IP_BMSK);
+ IPA_SETFIELD_IN_REG(*val, tuple->rt.dst_ip_addr,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_IP_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_IP_BMSK);
+ IPA_SETFIELD_IN_REG(*val, tuple->rt.src_port,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_PORT_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_PORT_BMSK);
+ IPA_SETFIELD_IN_REG(*val, tuple->rt.dst_port,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_PORT_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_PORT_BMSK);
+ IPA_SETFIELD_IN_REG(*val, tuple->rt.protocol,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_PROTOCOL_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_PROTOCOL_BMSK);
+ IPA_SETFIELD_IN_REG(*val, tuple->rt.meta_data,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_METADATA_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_METADATA_BMSK);
+ IPA_SETFIELD_IN_REG(*val, tuple->undefined2,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED2_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED2_BMSK);
+}
+
+static void ipareg_parse_hash_cfg_n(
+ enum ipahal_reg_name reg, void *fields, u32 val)
+{
+ struct ipahal_reg_fltrt_hash_tuple *tuple =
+ (struct ipahal_reg_fltrt_hash_tuple *)fields;
+
+ tuple->flt.src_id =
+ IPA_GETFIELD_FROM_REG(val,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_ID_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_ID_BMSK);
+ tuple->flt.src_ip_addr =
+ IPA_GETFIELD_FROM_REG(val,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_IP_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_IP_BMSK);
+ tuple->flt.dst_ip_addr =
+ IPA_GETFIELD_FROM_REG(val,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_IP_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_IP_BMSK);
+ tuple->flt.src_port =
+ IPA_GETFIELD_FROM_REG(val,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_PORT_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_PORT_BMSK);
+ tuple->flt.dst_port =
+ IPA_GETFIELD_FROM_REG(val,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_PORT_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_PORT_BMSK);
+ tuple->flt.protocol =
+ IPA_GETFIELD_FROM_REG(val,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_PROTOCOL_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_PROTOCOL_BMSK);
+ tuple->flt.meta_data =
+ IPA_GETFIELD_FROM_REG(val,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_METADATA_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_METADATA_BMSK);
+ tuple->undefined1 =
+ IPA_GETFIELD_FROM_REG(val,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED1_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED1_BMSK);
+ tuple->rt.src_id =
+ IPA_GETFIELD_FROM_REG(val,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_ID_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_ID_BMSK);
+ tuple->rt.src_ip_addr =
+ IPA_GETFIELD_FROM_REG(val,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_IP_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_IP_BMSK);
+ tuple->rt.dst_ip_addr =
+ IPA_GETFIELD_FROM_REG(val,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_IP_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_IP_BMSK);
+ tuple->rt.src_port =
+ IPA_GETFIELD_FROM_REG(val,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_PORT_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_PORT_BMSK);
+ tuple->rt.dst_port =
+ IPA_GETFIELD_FROM_REG(val,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_PORT_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_PORT_BMSK);
+ tuple->rt.protocol =
+ IPA_GETFIELD_FROM_REG(val,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_PROTOCOL_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_PROTOCOL_BMSK);
+ tuple->rt.meta_data =
+ IPA_GETFIELD_FROM_REG(val,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_METADATA_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_METADATA_BMSK);
+ tuple->undefined2 =
+ IPA_GETFIELD_FROM_REG(val,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED2_SHFT,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED2_BMSK);
+}
+
+static void ipareg_construct_endp_status_n(
+ enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+ struct ipahal_reg_ep_cfg_status *ep_status =
+ (struct ipahal_reg_ep_cfg_status *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, ep_status->status_en,
+ IPA_ENDP_STATUS_n_STATUS_EN_SHFT,
+ IPA_ENDP_STATUS_n_STATUS_EN_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, ep_status->status_ep,
+ IPA_ENDP_STATUS_n_STATUS_ENDP_SHFT,
+ IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, ep_status->status_location,
+ IPA_ENDP_STATUS_n_STATUS_LOCATION_SHFT,
+ IPA_ENDP_STATUS_n_STATUS_LOCATION_BMSK);
+}
+
+static void ipareg_construct_qcncm(
+ enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+ struct ipahal_reg_qcncm *qcncm =
+ (struct ipahal_reg_qcncm *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, qcncm->mode_en ? 1 : 0,
+ IPA_QCNCM_MODE_EN_SHFT,
+ IPA_QCNCM_MODE_EN_BMSK);
+ IPA_SETFIELD_IN_REG(*val, qcncm->mode_val,
+ IPA_QCNCM_MODE_VAL_SHFT,
+ IPA_QCNCM_MODE_VAL_BMSK);
+ IPA_SETFIELD_IN_REG(*val, qcncm->undefined,
+ 0, IPA_QCNCM_MODE_VAL_BMSK);
+}
+
+static void ipareg_parse_qcncm(
+ enum ipahal_reg_name reg, void *fields, u32 val)
+{
+ struct ipahal_reg_qcncm *qcncm =
+ (struct ipahal_reg_qcncm *)fields;
+
+ memset(qcncm, 0, sizeof(struct ipahal_reg_qcncm));
+ qcncm->mode_en = IPA_GETFIELD_FROM_REG(val,
+ IPA_QCNCM_MODE_EN_SHFT,
+ IPA_QCNCM_MODE_EN_BMSK);
+ qcncm->mode_val = IPA_GETFIELD_FROM_REG(val,
+ IPA_QCNCM_MODE_VAL_SHFT,
+ IPA_QCNCM_MODE_VAL_BMSK);
+ qcncm->undefined = IPA_GETFIELD_FROM_REG(val,
+ 0, IPA_QCNCM_UNDEFINED1_BMSK);
+ qcncm->undefined |= IPA_GETFIELD_FROM_REG(val,
+ 0, IPA_QCNCM_MODE_UNDEFINED2_BMSK);
+}
+
+static void ipareg_construct_single_ndp_mode(
+ enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+ struct ipahal_reg_single_ndp_mode *mode =
+ (struct ipahal_reg_single_ndp_mode *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, mode->single_ndp_en ? 1 : 0,
+ IPA_SINGLE_NDP_MODE_SINGLE_NDP_EN_SHFT,
+ IPA_SINGLE_NDP_MODE_SINGLE_NDP_EN_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, mode->undefined,
+ IPA_SINGLE_NDP_MODE_UNDEFINED_SHFT,
+ IPA_SINGLE_NDP_MODE_UNDEFINED_BMSK);
+}
+
+static void ipareg_parse_single_ndp_mode(
+ enum ipahal_reg_name reg, void *fields, u32 val)
+{
+ struct ipahal_reg_single_ndp_mode *mode =
+ (struct ipahal_reg_single_ndp_mode *)fields;
+
+ memset(mode, 0, sizeof(struct ipahal_reg_single_ndp_mode));
+ mode->single_ndp_en = IPA_GETFIELD_FROM_REG(val,
+ IPA_SINGLE_NDP_MODE_SINGLE_NDP_EN_SHFT,
+ IPA_SINGLE_NDP_MODE_SINGLE_NDP_EN_BMSK);
+ mode->undefined = IPA_GETFIELD_FROM_REG(val,
+ IPA_SINGLE_NDP_MODE_UNDEFINED_SHFT,
+ IPA_SINGLE_NDP_MODE_UNDEFINED_BMSK);
+}
+
+static void ipareg_construct_debug_cnt_ctrl_n(
+ enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+ struct ipahal_reg_debug_cnt_ctrl *dbg_cnt_ctrl =
+ (struct ipahal_reg_debug_cnt_ctrl *)fields;
+ u8 type;
+
+ IPA_SETFIELD_IN_REG(*val, dbg_cnt_ctrl->en ? 1 : 0,
+ IPA_DEBUG_CNT_CTRL_n_DBG_CNT_EN_SHFT,
+ IPA_DEBUG_CNT_CTRL_n_DBG_CNT_EN_BMSK);
+
+ switch (dbg_cnt_ctrl->type) {
+ case DBG_CNT_TYPE_IPV4_FLTR:
+ type = 0x0;
+ if (!dbg_cnt_ctrl->rule_idx_pipe_rule) {
+ IPAHAL_ERR("No FLT global rules\n");
+ WARN_ON(1);
+ }
+ break;
+ case DBG_CNT_TYPE_IPV4_ROUT:
+ type = 0x1;
+ break;
+ case DBG_CNT_TYPE_GENERAL:
+ type = 0x2;
+ break;
+ case DBG_CNT_TYPE_IPV6_FLTR:
+ type = 0x4;
+ if (!dbg_cnt_ctrl->rule_idx_pipe_rule) {
+ IPAHAL_ERR("No FLT global rules\n");
+ WARN_ON(1);
+ }
+ break;
+ case DBG_CNT_TYPE_IPV6_ROUT:
+ type = 0x5;
+ break;
+ default:
+ IPAHAL_ERR("Invalid dbg_cnt_ctrl type (%d) for %s\n",
+ dbg_cnt_ctrl->type, ipahal_reg_name_str(reg));
+ WARN_ON(1);
+ return;
+
+ };
+
+ IPA_SETFIELD_IN_REG(*val, type,
+ IPA_DEBUG_CNT_CTRL_n_DBG_CNT_TYPE_SHFT,
+ IPA_DEBUG_CNT_CTRL_n_DBG_CNT_TYPE_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, dbg_cnt_ctrl->product ? 1 : 0,
+ IPA_DEBUG_CNT_CTRL_n_DBG_CNT_PRODUCT_SHFT,
+ IPA_DEBUG_CNT_CTRL_n_DBG_CNT_PRODUCT_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, dbg_cnt_ctrl->src_pipe,
+ IPA_DEBUG_CNT_CTRL_n_DBG_CNT_SOURCE_PIPE_SHFT,
+ IPA_DEBUG_CNT_CTRL_n_DBG_CNT_SOURCE_PIPE_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, dbg_cnt_ctrl->rule_idx,
+ IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_SHFT,
+ IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, dbg_cnt_ctrl->rule_idx_pipe_rule,
+ IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_PIPE_RULE_SHFT,
+ IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_PIPE_RULE_BMSK);
+}
+
+static void ipareg_parse_shared_mem_size(
+ enum ipahal_reg_name reg, void *fields, u32 val)
+{
+ struct ipahal_reg_shared_mem_size *smem_sz =
+ (struct ipahal_reg_shared_mem_size *)fields;
+
+ memset(smem_sz, 0, sizeof(struct ipahal_reg_shared_mem_size));
+ smem_sz->shared_mem_sz = IPA_GETFIELD_FROM_REG(val,
+ IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_SHFT,
+ IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_BMSK);
+
+ smem_sz->shared_mem_baddr = IPA_GETFIELD_FROM_REG(val,
+ IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_SHFT,
+ IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_BMSK);
+}
+
+static void ipareg_construct_endp_init_rsrc_grp_n(
+ enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+ struct ipahal_reg_endp_init_rsrc_grp *rsrc_grp =
+ (struct ipahal_reg_endp_init_rsrc_grp *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, rsrc_grp->rsrc_grp,
+ IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_SHFT,
+ IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_BMSK);
+}
+
+static void ipareg_construct_endp_init_hdr_metadata_n(
+ enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+ struct ipa_ep_cfg_metadata *metadata =
+ (struct ipa_ep_cfg_metadata *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, metadata->qmap_id,
+ IPA_ENDP_INIT_HDR_METADATA_n_METADATA_SHFT,
+ IPA_ENDP_INIT_HDR_METADATA_n_METADATA_BMSK);
+}
+
+static void ipareg_construct_endp_init_hdr_metadata_mask_n(
+ enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+ struct ipa_ep_cfg_metadata_mask *metadata_mask =
+ (struct ipa_ep_cfg_metadata_mask *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, metadata_mask->metadata_mask,
+ IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_SHFT,
+ IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_BMSK);
+}
+
+static void ipareg_construct_endp_init_cfg_n(
+ enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+ struct ipa_ep_cfg_cfg *cfg =
+ (struct ipa_ep_cfg_cfg *)fields;
+ u32 cs_offload_en;
+
+ switch (cfg->cs_offload_en) {
+ case IPA_DISABLE_CS_OFFLOAD:
+ cs_offload_en = 0;
+ break;
+ case IPA_ENABLE_CS_OFFLOAD_UL:
+ cs_offload_en = 1;
+ break;
+ case IPA_ENABLE_CS_OFFLOAD_DL:
+ cs_offload_en = 2;
+ break;
+ default:
+ IPAHAL_ERR("Invalid cs_offload_en value for %s\n",
+ ipahal_reg_name_str(reg));
+ WARN_ON(1);
+ return;
+ }
+
+ IPA_SETFIELD_IN_REG(*val, cfg->frag_offload_en ? 1 : 0,
+ IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_SHFT,
+ IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_BMSK);
+ IPA_SETFIELD_IN_REG(*val, cs_offload_en,
+ IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_SHFT,
+ IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_BMSK);
+ IPA_SETFIELD_IN_REG(*val, cfg->cs_metadata_hdr_offset,
+ IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_SHFT,
+ IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_BMSK);
+ IPA_SETFIELD_IN_REG(*val, cfg->gen_qmb_master_sel,
+ IPA_ENDP_INIT_CFG_n_CS_GEN_QMB_MASTER_SEL_SHFT,
+ IPA_ENDP_INIT_CFG_n_CS_GEN_QMB_MASTER_SEL_BMSK);
+
+}
+
+static void ipareg_construct_endp_init_deaggr_n(
+ enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+ struct ipa_ep_cfg_deaggr *ep_deaggr =
+ (struct ipa_ep_cfg_deaggr *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, ep_deaggr->deaggr_hdr_len,
+ IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_SHFT,
+ IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, ep_deaggr->packet_offset_valid,
+ IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_SHFT,
+ IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, ep_deaggr->packet_offset_location,
+ IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_SHFT,
+ IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, ep_deaggr->max_packet_len,
+ IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_SHFT,
+ IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_BMSK);
+}
+
+static void ipareg_construct_endp_init_hol_block_en_n(
+ enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+ struct ipa_ep_cfg_holb *ep_holb =
+ (struct ipa_ep_cfg_holb *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, ep_holb->en,
+ IPA_ENDP_INIT_HOL_BLOCK_EN_n_EN_SHFT,
+ IPA_ENDP_INIT_HOL_BLOCK_EN_n_EN_BMSK);
+}
+
+static void ipareg_construct_endp_init_hol_block_timer_n(
+ enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+ struct ipa_ep_cfg_holb *ep_holb =
+ (struct ipa_ep_cfg_holb *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, ep_holb->tmr_val,
+ IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_TIMER_SHFT,
+ IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_TIMER_BMSK);
+}
+
+static void ipareg_construct_endp_init_ctrl_n(enum ipahal_reg_name reg,
+ const void *fields, u32 *val)
+{
+ struct ipa_ep_cfg_ctrl *ep_ctrl =
+ (struct ipa_ep_cfg_ctrl *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, ep_ctrl->ipa_ep_suspend,
+ IPA_ENDP_INIT_CTRL_n_ENDP_SUSPEND_SHFT,
+ IPA_ENDP_INIT_CTRL_n_ENDP_SUSPEND_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, ep_ctrl->ipa_ep_delay,
+ IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_SHFT,
+ IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_BMSK);
+}
+
+static void ipareg_construct_endp_init_nat_n(enum ipahal_reg_name reg,
+ const void *fields, u32 *val)
+{
+ struct ipa_ep_cfg_nat *ep_nat =
+ (struct ipa_ep_cfg_nat *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, ep_nat->nat_en,
+ IPA_ENDP_INIT_NAT_n_NAT_EN_SHFT,
+ IPA_ENDP_INIT_NAT_n_NAT_EN_BMSK);
+}
+
+static void ipareg_construct_endp_init_mode_n(enum ipahal_reg_name reg,
+ const void *fields, u32 *val)
+{
+ struct ipahal_reg_endp_init_mode *init_mode =
+ (struct ipahal_reg_endp_init_mode *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, init_mode->ep_mode.mode,
+ IPA_ENDP_INIT_MODE_n_MODE_SHFT,
+ IPA_ENDP_INIT_MODE_n_MODE_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, init_mode->dst_pipe_number,
+ IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_SHFT,
+ IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_BMSK);
+}
+
+static void ipareg_construct_endp_init_route_n(enum ipahal_reg_name reg,
+ const void *fields, u32 *val)
+{
+ struct ipahal_reg_endp_init_route *ep_init_rt =
+ (struct ipahal_reg_endp_init_route *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, ep_init_rt->route_table_index,
+ IPA_ENDP_INIT_ROUTE_n_ROUTE_TABLE_INDEX_SHFT,
+ IPA_ENDP_INIT_ROUTE_n_ROUTE_TABLE_INDEX_BMSK);
+
+}
+
+static void ipareg_parse_endp_init_aggr_n(enum ipahal_reg_name reg,
+ void *fields, u32 val)
+{
+ struct ipa_ep_cfg_aggr *ep_aggr =
+ (struct ipa_ep_cfg_aggr *)fields;
+
+ memset(ep_aggr, 0, sizeof(struct ipa_ep_cfg_aggr));
+
+ ep_aggr->aggr_en =
+ (((val & IPA_ENDP_INIT_AGGR_n_AGGR_EN_BMSK) >>
+ IPA_ENDP_INIT_AGGR_n_AGGR_EN_SHFT)
+ == IPA_ENABLE_AGGR);
+ ep_aggr->aggr =
+ ((val & IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_BMSK) >>
+ IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_SHFT);
+ ep_aggr->aggr_byte_limit =
+ ((val & IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_BMSK) >>
+ IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_SHFT);
+ ep_aggr->aggr_time_limit =
+ ((val & IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_BMSK) >>
+ IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_SHFT);
+ ep_aggr->aggr_pkt_limit =
+ ((val & IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK) >>
+ IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT);
+ ep_aggr->aggr_hard_byte_limit_en =
+ ((val & IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_BMSK)
+ >>
+ IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_SHFT);
+}
+
+static void ipareg_construct_endp_init_aggr_n(enum ipahal_reg_name reg,
+ const void *fields, u32 *val)
+{
+ struct ipa_ep_cfg_aggr *ep_aggr =
+ (struct ipa_ep_cfg_aggr *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr_en,
+ IPA_ENDP_INIT_AGGR_n_AGGR_EN_SHFT,
+ IPA_ENDP_INIT_AGGR_n_AGGR_EN_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr,
+ IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_SHFT,
+ IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr_byte_limit,
+ IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_SHFT,
+ IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr_time_limit,
+ IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_SHFT,
+ IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr_pkt_limit,
+ IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT,
+ IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK);
+
+ /* At IPAv3 hard_byte_limit is not supported */
+ ep_aggr->aggr_hard_byte_limit_en = 0;
+ IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr_hard_byte_limit_en,
+ IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_SHFT,
+ IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_BMSK);
+}
+
+static void ipareg_construct_endp_init_hdr_ext_n(enum ipahal_reg_name reg,
+ const void *fields, u32 *val)
+{
+ struct ipa_ep_cfg_hdr_ext *ep_hdr_ext;
+ u8 hdr_endianness;
+
+ ep_hdr_ext = (struct ipa_ep_cfg_hdr_ext *)fields;
+ hdr_endianness = ep_hdr_ext->hdr_little_endian ? 0 : 1;
+
+ IPA_SETFIELD_IN_REG(*val, ep_hdr_ext->hdr_pad_to_alignment,
+ IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_SHFT,
+ IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_BMSK_v3_0);
+
+ IPA_SETFIELD_IN_REG(*val, ep_hdr_ext->hdr_total_len_or_pad_offset,
+ IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_SHFT,
+ IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, ep_hdr_ext->hdr_payload_len_inc_padding,
+ IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_SHFT,
+ IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, ep_hdr_ext->hdr_total_len_or_pad,
+ IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_SHFT,
+ IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, ep_hdr_ext->hdr_total_len_or_pad_valid,
+ IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_SHFT,
+ IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, hdr_endianness,
+ IPA_ENDP_INIT_HDR_EXT_n_HDR_ENDIANNESS_SHFT,
+ IPA_ENDP_INIT_HDR_EXT_n_HDR_ENDIANNESS_BMSK);
+}
+
+static void ipareg_construct_endp_init_hdr_n(enum ipahal_reg_name reg,
+ const void *fields, u32 *val)
+{
+ struct ipa_ep_cfg_hdr *ep_hdr;
+
+ ep_hdr = (struct ipa_ep_cfg_hdr *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_metadata_reg_valid,
+ IPA_ENDP_INIT_HDR_n_HDR_METADATA_REG_VALID_SHFT_v2,
+ IPA_ENDP_INIT_HDR_n_HDR_METADATA_REG_VALID_BMSK_v2);
+
+ IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_remove_additional,
+ IPA_ENDP_INIT_HDR_n_HDR_LEN_INC_DEAGG_HDR_SHFT_v2,
+ IPA_ENDP_INIT_HDR_n_HDR_LEN_INC_DEAGG_HDR_BMSK_v2);
+
+ IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_a5_mux,
+ IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_SHFT,
+ IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_ofst_pkt_size,
+ IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_SHFT,
+ IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_ofst_pkt_size_valid,
+ IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_SHFT,
+ IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_additional_const_len,
+ IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_SHFT,
+ IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_ofst_metadata,
+ IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_SHFT,
+ IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_ofst_metadata_valid,
+ IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_SHFT,
+ IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_len,
+ IPA_ENDP_INIT_HDR_n_HDR_LEN_SHFT,
+ IPA_ENDP_INIT_HDR_n_HDR_LEN_BMSK);
+}
+
+static void ipareg_construct_route(enum ipahal_reg_name reg,
+ const void *fields, u32 *val)
+{
+ struct ipahal_reg_route *route;
+
+ route = (struct ipahal_reg_route *)fields;
+
+ IPA_SETFIELD_IN_REG(*val, route->route_dis,
+ IPA_ROUTE_ROUTE_DIS_SHFT,
+ IPA_ROUTE_ROUTE_DIS_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, route->route_def_pipe,
+ IPA_ROUTE_ROUTE_DEF_PIPE_SHFT,
+ IPA_ROUTE_ROUTE_DEF_PIPE_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, route->route_def_hdr_table,
+ IPA_ROUTE_ROUTE_DEF_HDR_TABLE_SHFT,
+ IPA_ROUTE_ROUTE_DEF_HDR_TABLE_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, route->route_def_hdr_ofst,
+ IPA_ROUTE_ROUTE_DEF_HDR_OFST_SHFT,
+ IPA_ROUTE_ROUTE_DEF_HDR_OFST_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, route->route_frag_def_pipe,
+ IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_SHFT,
+ IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_BMSK);
+
+ IPA_SETFIELD_IN_REG(*val, route->route_def_retain_hdr,
+ IPA_ROUTE_ROUTE_DEF_RETAIN_HDR_SHFT,
+ IPA_ROUTE_ROUTE_DEF_RETAIN_HDR_BMSK);
+}
+
+/*
+ * struct ipahal_reg_obj - Register H/W information for specific IPA version
+ * @construct - CB to construct register value from abstracted structure
+ * @parse - CB to parse register value to abstracted structure
+ * @offset - register offset relative to base address
+ * @n_ofst - N parameterized register sub-offset
+ */
+struct ipahal_reg_obj {
+ void (*construct)(enum ipahal_reg_name reg, const void *fields,
+ u32 *val);
+ void (*parse)(enum ipahal_reg_name reg, void *fields,
+ u32 val);
+ u32 offset;
+ u32 n_ofst;
+};
+
+/*
+ * This table contains the info regarding each register for IPAv3 and later.
+ * Information like: offset and construct/parse functions.
+ * All the information on the register on IPAv3 are statically defined below.
+ * If information is missing regarding some register on some IPA version,
+ * the init function will fill it with the information from the previous
+ * IPA version.
+ * Information is considered missing if all of the fields are 0.
+ * If offset is -1, this means that the register is removed on the
+ * specific version.
+ */
+static struct ipahal_reg_obj ipahal_reg_objs[IPA_HW_MAX][IPA_REG_MAX] = {
+ /* IPAv3 */
+ [IPA_HW_v3_0][IPA_ROUTE] = {
+ ipareg_construct_route, ipareg_parse_dummy,
+ 0x00000048, 0},
+ [IPA_HW_v3_0][IPA_IRQ_STTS_EE_n] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00003008, 0x1000},
+ [IPA_HW_v3_0][IPA_IRQ_EN_EE_n] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x0000300c, 0x1000},
+ [IPA_HW_v3_0][IPA_IRQ_CLR_EE_n] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00003010, 0x1000},
+ [IPA_HW_v3_0][IPA_IRQ_SUSPEND_INFO_EE_n] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00003098, 0x1000},
+ [IPA_HW_v3_0][IPA_BCR] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x000001D0, 0},
+ [IPA_HW_v3_0][IPA_ENABLED_PIPES] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00000038, 0},
+ [IPA_HW_v3_0][IPA_COMP_SW_RESET] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00000040, 0},
+ [IPA_HW_v3_0][IPA_VERSION] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00000034, 0},
+ [IPA_HW_v3_0][IPA_TAG_TIMER] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00000060, 0 },
+ [IPA_HW_v3_0][IPA_COMP_HW_VERSION] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00000030, 0},
+ [IPA_HW_v3_0][IPA_SPARE_REG_1] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00005090, 0},
+ [IPA_HW_v3_0][IPA_SPARE_REG_2] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00005094, 0},
+ [IPA_HW_v3_0][IPA_COMP_CFG] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x0000003C, 0},
+ [IPA_HW_v3_0][IPA_STATE_AGGR_ACTIVE] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x0000010C, 0},
+ [IPA_HW_v3_0][IPA_ENDP_INIT_HDR_n] = {
+ ipareg_construct_endp_init_hdr_n, ipareg_parse_dummy,
+ 0x00000810, 0x70},
+ [IPA_HW_v3_0][IPA_ENDP_INIT_HDR_EXT_n] = {
+ ipareg_construct_endp_init_hdr_ext_n, ipareg_parse_dummy,
+ 0x00000814, 0x70},
+ [IPA_HW_v3_0][IPA_ENDP_INIT_AGGR_n] = {
+ ipareg_construct_endp_init_aggr_n,
+ ipareg_parse_endp_init_aggr_n,
+ 0x00000824, 0x70},
+ [IPA_HW_v3_0][IPA_AGGR_FORCE_CLOSE] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x000001EC, 0},
+ [IPA_HW_v3_0][IPA_ENDP_INIT_ROUTE_n] = {
+ ipareg_construct_endp_init_route_n, ipareg_parse_dummy,
+ 0x00000828, 0x70},
+ [IPA_HW_v3_0][IPA_ENDP_INIT_MODE_n] = {
+ ipareg_construct_endp_init_mode_n, ipareg_parse_dummy,
+ 0x00000820, 0x70},
+ [IPA_HW_v3_0][IPA_ENDP_INIT_NAT_n] = {
+ ipareg_construct_endp_init_nat_n, ipareg_parse_dummy,
+ 0x0000080C, 0x70},
+ [IPA_HW_v3_0][IPA_ENDP_INIT_CTRL_n] = {
+ ipareg_construct_endp_init_ctrl_n, ipareg_parse_dummy,
+ 0x00000800, 0x70},
+ [IPA_HW_v3_0][IPA_ENDP_INIT_HOL_BLOCK_EN_n] = {
+ ipareg_construct_endp_init_hol_block_en_n,
+ ipareg_parse_dummy,
+ 0x0000082c, 0x70},
+ [IPA_HW_v3_0][IPA_ENDP_INIT_HOL_BLOCK_TIMER_n] = {
+ ipareg_construct_endp_init_hol_block_timer_n,
+ ipareg_parse_dummy,
+ 0x00000830, 0x70},
+ [IPA_HW_v3_0][IPA_ENDP_INIT_DEAGGR_n] = {
+ ipareg_construct_endp_init_deaggr_n,
+ ipareg_parse_dummy,
+ 0x00000834, 0x70},
+ [IPA_HW_v3_0][IPA_ENDP_INIT_SEQ_n] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x0000083C, 0x70},
+ [IPA_HW_v3_0][IPA_DEBUG_CNT_REG_n] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00000600, 0x4},
+ [IPA_HW_v3_0][IPA_ENDP_INIT_CFG_n] = {
+ ipareg_construct_endp_init_cfg_n, ipareg_parse_dummy,
+ 0x00000808, 0x70},
+ [IPA_HW_v3_0][IPA_IRQ_EE_UC_n] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x0000301c, 0x1000},
+ [IPA_HW_v3_0][IPA_ENDP_INIT_HDR_METADATA_MASK_n] = {
+ ipareg_construct_endp_init_hdr_metadata_mask_n,
+ ipareg_parse_dummy,
+ 0x00000818, 0x70},
+ [IPA_HW_v3_0][IPA_ENDP_INIT_HDR_METADATA_n] = {
+ ipareg_construct_endp_init_hdr_metadata_n,
+ ipareg_parse_dummy,
+ 0x0000081c, 0x70},
+ [IPA_HW_v3_0][IPA_ENABLE_GSI] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x5500, 0},
+ [IPA_HW_v3_0][IPA_ENDP_INIT_RSRC_GRP_n] = {
+ ipareg_construct_endp_init_rsrc_grp_n,
+ ipareg_parse_dummy,
+ 0x00000838, 0x70},
+ [IPA_HW_v3_0][IPA_SHARED_MEM_SIZE] = {
+ ipareg_construct_dummy, ipareg_parse_shared_mem_size,
+ 0x00000054, 0},
+ [IPA_HW_v3_0][IPA_SRAM_DIRECT_ACCESS_n] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00007000, 0x4},
+ [IPA_HW_v3_0][IPA_DEBUG_CNT_CTRL_n] = {
+ ipareg_construct_debug_cnt_ctrl_n, ipareg_parse_dummy,
+ 0x00000640, 0x4},
+ [IPA_HW_v3_0][IPA_UC_MAILBOX_m_n] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00032000, 0x4},
+ [IPA_HW_v3_0][IPA_FILT_ROUT_HASH_FLUSH] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00000090, 0},
+ [IPA_HW_v3_0][IPA_SINGLE_NDP_MODE] = {
+ ipareg_construct_single_ndp_mode, ipareg_parse_single_ndp_mode,
+ 0x00000068, 0},
+ [IPA_HW_v3_0][IPA_QCNCM] = {
+ ipareg_construct_qcncm, ipareg_parse_qcncm,
+ 0x00000064, 0},
+ [IPA_HW_v3_0][IPA_SYS_PKT_PROC_CNTXT_BASE] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x000001e0, 0},
+ [IPA_HW_v3_0][IPA_LOCAL_PKT_PROC_CNTXT_BASE] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x000001e8, 0},
+ [IPA_HW_v3_0][IPA_ENDP_STATUS_n] = {
+ ipareg_construct_endp_status_n, ipareg_parse_dummy,
+ 0x00000840, 0x70},
+ [IPA_HW_v3_0][IPA_ENDP_FILTER_ROUTER_HSH_CFG_n] = {
+ ipareg_construct_hash_cfg_n, ipareg_parse_hash_cfg_n,
+ 0x0000085C, 0x70},
+ [IPA_HW_v3_0][IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n] = {
+ ipareg_construct_rsrg_grp_xy, ipareg_parse_dummy,
+ 0x00000400, 0x20},
+ [IPA_HW_v3_0][IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n] = {
+ ipareg_construct_rsrg_grp_xy, ipareg_parse_dummy,
+ 0x00000404, 0x20},
+ [IPA_HW_v3_0][IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n] = {
+ ipareg_construct_rsrg_grp_xy, ipareg_parse_dummy,
+ 0x00000408, 0x20},
+ [IPA_HW_v3_0][IPA_SRC_RSRC_GRP_67_RSRC_TYPE_n] = {
+ ipareg_construct_rsrg_grp_xy, ipareg_parse_dummy,
+ 0x0000040C, 0x20},
+ [IPA_HW_v3_0][IPA_DST_RSRC_GRP_01_RSRC_TYPE_n] = {
+ ipareg_construct_rsrg_grp_xy, ipareg_parse_dummy,
+ 0x00000500, 0x20},
+ [IPA_HW_v3_0][IPA_DST_RSRC_GRP_23_RSRC_TYPE_n] = {
+ ipareg_construct_rsrg_grp_xy, ipareg_parse_dummy,
+ 0x00000504, 0x20},
+ [IPA_HW_v3_0][IPA_DST_RSRC_GRP_45_RSRC_TYPE_n] = {
+ ipareg_construct_rsrg_grp_xy, ipareg_parse_dummy,
+ 0x00000508, 0x20},
+ [IPA_HW_v3_0][IPA_DST_RSRC_GRP_67_RSRC_TYPE_n] = {
+ ipareg_construct_rsrg_grp_xy, ipareg_parse_dummy,
+ 0x0000050c, 0x20},
+ [IPA_HW_v3_0][IPA_RX_HPS_CLIENTS_MIN_DEPTH_0] = {
+ ipareg_construct_rx_hps_clients_depth0, ipareg_parse_dummy,
+ 0x000023C4, 0},
+ [IPA_HW_v3_0][IPA_RX_HPS_CLIENTS_MIN_DEPTH_1] = {
+ ipareg_construct_rx_hps_clients_depth1, ipareg_parse_dummy,
+ 0x000023C8, 0},
+ [IPA_HW_v3_0][IPA_RX_HPS_CLIENTS_MAX_DEPTH_0] = {
+ ipareg_construct_rx_hps_clients_depth0, ipareg_parse_dummy,
+ 0x000023CC, 0},
+ [IPA_HW_v3_0][IPA_RX_HPS_CLIENTS_MAX_DEPTH_1] = {
+ ipareg_construct_rx_hps_clients_depth1, ipareg_parse_dummy,
+ 0x000023D0, 0},
+
+
+ /* IPAv3.1 */
+ [IPA_HW_v3_1][IPA_IRQ_SUSPEND_INFO_EE_n] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00003030, 0x1000},
+ [IPA_HW_v3_1][IPA_SUSPEND_IRQ_EN_EE_n] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00003034, 0x1000},
+ [IPA_HW_v3_1][IPA_SUSPEND_IRQ_CLR_EE_n] = {
+ ipareg_construct_dummy, ipareg_parse_dummy,
+ 0x00003038, 0x1000},
+};
+
+/*
+ * ipahal_reg_init() - Build the registers information table
+ * See ipahal_reg_objs[][] comments
+ *
+ * Note: As global variables are initialized with zero, any un-overridden
+ * register entry will be zero. By this we recognize them.
+ */
+int ipahal_reg_init(enum ipa_hw_type ipa_hw_type)
+{
+ int i;
+ int j;
+ struct ipahal_reg_obj zero_obj;
+
+ IPAHAL_DBG("Entry - HW_TYPE=%d\n", ipa_hw_type);
+
+ memset(&zero_obj, 0, sizeof(zero_obj));
+ for (i = IPA_HW_v3_0 ; i < ipa_hw_type ; i++) {
+ for (j = 0; j < IPA_REG_MAX ; j++) {
+ if (!memcmp(&ipahal_reg_objs[i+1][j], &zero_obj,
+ sizeof(struct ipahal_reg_obj))) {
+ memcpy(&ipahal_reg_objs[i+1][j],
+ &ipahal_reg_objs[i][j],
+ sizeof(struct ipahal_reg_obj));
+ } else {
+ /*
+ * explicitly overridden register.
+ * Check validity
+ */
+ if (!ipahal_reg_objs[i+1][j].offset) {
+ IPAHAL_ERR(
+ "reg=%s with zero offset\n",
+ ipahal_reg_name_str(j));
+ WARN_ON(1);
+ }
+ if (!ipahal_reg_objs[i+1][j].construct) {
+ IPAHAL_ERR(
+ "reg=%s with NULL construct func\n",
+ ipahal_reg_name_str(j));
+ WARN_ON(1);
+ }
+ if (!ipahal_reg_objs[i+1][j].parse) {
+ IPAHAL_ERR(
+ "reg=%s with NULL parse func\n",
+ ipahal_reg_name_str(j));
+ WARN_ON(1);
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * ipahal_reg_name_str() - returns string that represent the register
+ * @reg_name: [in] register name
+ */
+const char *ipahal_reg_name_str(enum ipahal_reg_name reg_name)
+{
+ if (reg_name < 0 || reg_name >= IPA_REG_MAX) {
+ IPAHAL_ERR("requested name of invalid reg=%d\n", reg_name);
+ return "Invalid Register";
+ }
+
+ return ipareg_name_to_str[reg_name];
+}
+
+/*
+ * ipahal_read_reg_n() - Get n parameterized reg value
+ */
+u32 ipahal_read_reg_n(enum ipahal_reg_name reg, u32 n)
+{
+ u32 offset;
+
+ if (reg >= IPA_REG_MAX) {
+ IPAHAL_ERR("Invalid register reg=%u\n", reg);
+ WARN_ON(1);
+ return -EFAULT;
+ }
+
+ IPAHAL_DBG("read from %s n=%u\n",
+ ipahal_reg_name_str(reg), n);
+
+ offset = ipahal_reg_objs[ipahal_ctx->hw_type][reg].offset;
+ if (offset == -1) {
+ IPAHAL_ERR("Read access to obsolete reg=%s\n",
+ ipahal_reg_name_str(reg));
+ WARN_ON(1);
+ return -EFAULT;
+ }
+ offset += ipahal_reg_objs[ipahal_ctx->hw_type][reg].n_ofst * n;
+ return ioread32(ipahal_ctx->base + offset);
+}
+
+/*
+ * ipahal_write_reg_mn() - Write to m/n parameterized reg a raw value
+ */
+void ipahal_write_reg_mn(enum ipahal_reg_name reg, u32 m, u32 n, u32 val)
+{
+ u32 offset;
+
+ if (reg >= IPA_REG_MAX) {
+ IPAHAL_ERR("Invalid register reg=%u\n", reg);
+ return;
+ }
+
+ IPAHAL_DBG("write to %s m=%u n=%u val=%u\n",
+ ipahal_reg_name_str(reg), m, n, val);
+ offset = ipahal_reg_objs[ipahal_ctx->hw_type][reg].offset;
+ if (offset == -1) {
+ IPAHAL_ERR("Write access to obsolete reg=%s\n",
+ ipahal_reg_name_str(reg));
+ WARN_ON(1);
+ return;
+ }
+ /*
+ * Currently there is one register with m and n parameters
+ * IPA_UC_MAILBOX_m_n. The m value of it is 0x80.
+ * If more such registers will be added in the future,
+ * we can move the m parameter to the table above.
+ */
+ offset += 0x80 * m;
+ offset += ipahal_reg_objs[ipahal_ctx->hw_type][reg].n_ofst * n;
+ iowrite32(val, ipahal_ctx->base + offset);
+}
+
+/*
+ * ipahal_read_reg_n_fields() - Get the parsed value of n parameterized reg
+ */
+u32 ipahal_read_reg_n_fields(enum ipahal_reg_name reg, u32 n, void *fields)
+{
+ u32 val = 0;
+ u32 offset;
+
+ if (!fields) {
+ IPAHAL_ERR("Input error fields=%p\n", fields);
+ WARN_ON(1);
+ return -EFAULT;
+ }
+
+ if (reg >= IPA_REG_MAX) {
+ IPAHAL_ERR("Invalid register reg=%u\n", reg);
+ WARN_ON(1);
+ return -EFAULT;
+ }
+
+ IPAHAL_DBG("read from %s n=%u and parse it\n",
+ ipahal_reg_name_str(reg), n);
+ offset = ipahal_reg_objs[ipahal_ctx->hw_type][reg].offset;
+ if (offset == -1) {
+ IPAHAL_ERR("Read access to obsolete reg=%s\n",
+ ipahal_reg_name_str(reg));
+ WARN_ON(1);
+ return -EFAULT;
+ }
+ offset += ipahal_reg_objs[ipahal_ctx->hw_type][reg].n_ofst * n;
+ val = ioread32(ipahal_ctx->base + offset);
+ ipahal_reg_objs[ipahal_ctx->hw_type][reg].parse(reg, fields, val);
+
+ return val;
+}
+
+/*
+ * ipahal_write_reg_n_fields() - Write to n parameterized reg a prased value
+ */
+void ipahal_write_reg_n_fields(enum ipahal_reg_name reg, u32 n,
+ const void *fields)
+{
+ u32 val = 0;
+ u32 offset;
+
+ if (!fields) {
+ IPAHAL_ERR("Input error fields=%p\n", fields);
+ return;
+ }
+
+ if (reg >= IPA_REG_MAX) {
+ IPAHAL_ERR("Invalid register reg=%u\n", reg);
+ return;
+ }
+
+ IPAHAL_DBG("write to %s n=%u after constructing it\n",
+ ipahal_reg_name_str(reg), n);
+ offset = ipahal_reg_objs[ipahal_ctx->hw_type][reg].offset;
+ if (offset == -1) {
+ IPAHAL_ERR("Write access to obsolete reg=%s\n",
+ ipahal_reg_name_str(reg));
+ WARN_ON(1);
+ return;
+ }
+ offset += ipahal_reg_objs[ipahal_ctx->hw_type][reg].n_ofst * n;
+ ipahal_reg_objs[ipahal_ctx->hw_type][reg].construct(reg, fields, &val);
+
+ iowrite32(val, ipahal_ctx->base + offset);
+}
+
+/*
+ * Get the offset of a m/n parameterized register
+ */
+u32 ipahal_get_reg_mn_ofst(enum ipahal_reg_name reg, u32 m, u32 n)
+{
+ u32 offset;
+
+ if (reg >= IPA_REG_MAX) {
+ IPAHAL_ERR("Invalid register reg=%u\n", reg);
+ WARN_ON(1);
+ return -EFAULT;
+ }
+
+ IPAHAL_DBG("get offset of %s m=%u n=%u\n",
+ ipahal_reg_name_str(reg), m, n);
+ offset = ipahal_reg_objs[ipahal_ctx->hw_type][reg].offset;
+ if (offset == -1) {
+ IPAHAL_ERR("Access to obsolete reg=%s\n",
+ ipahal_reg_name_str(reg));
+ WARN_ON(1);
+ return -EFAULT;
+ }
+ /*
+ * Currently there is one register with m and n parameters
+ * IPA_UC_MAILBOX_m_n. The m value of it is 0x80.
+ * If more such registers will be added in the future,
+ * we can move the m parameter to the table above.
+ */
+ offset += 0x80 * m;
+ offset += ipahal_reg_objs[ipahal_ctx->hw_type][reg].n_ofst * n;
+
+ return offset;
+}
+
+u32 ipahal_get_reg_base(void)
+{
+ return 0x00040000;
+}
+
+
+/*
+ * Specific functions
+ * These functions supply specific register values for specific operations
+ * that cannot be reached by generic functions.
+ * E.g. To disable aggregation, need to write to specific bits of the AGGR
+ * register. The other bits should be untouched. This oeprate is very specific
+ * and cannot be generically defined. For such operations we define these
+ * specific functions.
+ */
+
+void ipahal_get_disable_aggr_valmask(struct ipahal_reg_valmask *valmask)
+{
+ if (!valmask) {
+ IPAHAL_ERR("Input error\n");
+ return;
+ }
+
+ valmask->val = (1 & IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_BMSK) <<
+ IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_SHFT;
+ valmask->mask = IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_BMSK <<
+ IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_SHFT;
+
+ valmask->val |= ((0 & IPA_ENDP_INIT_AGGR_n_AGGR_EN_BMSK) <<
+ IPA_ENDP_INIT_AGGR_n_AGGR_EN_SHFT);
+ valmask->mask |= ((IPA_ENDP_INIT_AGGR_n_AGGR_EN_BMSK <<
+ IPA_ENDP_INIT_AGGR_n_AGGR_EN_SHFT));
+}
+
+u32 ipahal_aggr_get_max_byte_limit(void)
+{
+ return
+ IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_BMSK >>
+ IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_SHFT;
+}
+
+u32 ipahal_aggr_get_max_pkt_limit(void)
+{
+ return
+ IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK >>
+ IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT;
+}
+
+
+void ipahal_get_aggr_force_close_valmask(int ep_idx,
+ struct ipahal_reg_valmask *valmask)
+{
+ if (!valmask) {
+ IPAHAL_ERR("Input error\n");
+ return;
+ }
+
+ IPA_SETFIELD_IN_REG(valmask->val, 1 << ep_idx,
+ IPA_AGGR_FORCE_CLOSE_OFST_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT,
+ IPA_AGGR_FORCE_CLOSE_OFST_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK);
+
+ valmask->mask =
+ IPA_AGGR_FORCE_CLOSE_OFST_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK <<
+ IPA_AGGR_FORCE_CLOSE_OFST_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT;
+}
+
+void ipahal_get_fltrt_hash_flush_valmask(
+ struct ipahal_reg_fltrt_hash_flush *flush,
+ struct ipahal_reg_valmask *valmask)
+{
+ if (!flush || !valmask) {
+ IPAHAL_ERR("Input error: flush=%p ; valmask=%p\n",
+ flush, valmask);
+ return;
+ }
+
+ memset(valmask, 0, sizeof(struct ipahal_reg_valmask));
+
+ if (flush->v6_rt)
+ valmask->val |=
+ (1<<IPA_FILT_ROUT_HASH_FLUSH_IPv6_ROUT_SHFT);
+ if (flush->v6_flt)
+ valmask->val |=
+ (1<<IPA_FILT_ROUT_HASH_FLUSH_IPv6_FILT_SHFT);
+ if (flush->v4_rt)
+ valmask->val |=
+ (1<<IPA_FILT_ROUT_HASH_FLUSH_IPv4_ROUT_SHFT);
+ if (flush->v4_flt)
+ valmask->val |=
+ (1<<IPA_FILT_ROUT_HASH_FLUSH_IPv4_FILT_SHFT);
+
+ valmask->mask = valmask->val;
+}
+
+void ipahal_get_status_ep_valmask(int pipe_num,
+ struct ipahal_reg_valmask *valmask)
+{
+ if (!valmask) {
+ IPAHAL_ERR("Input error\n");
+ return;
+ }
+
+ valmask->val =
+ (pipe_num & IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK) <<
+ IPA_ENDP_STATUS_n_STATUS_ENDP_SHFT;
+
+ valmask->mask =
+ IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK <<
+ IPA_ENDP_STATUS_n_STATUS_ENDP_SHFT;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
new file mode 100644
index 000000000000..5cc3623103eb
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
@@ -0,0 +1,434 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPAHAL_REG_H_
+#define _IPAHAL_REG_H_
+
+#include <linux/ipa.h>
+
+/*
+ * Registers names
+ *
+ * NOTE:: Any change to this enum, need to change to ipareg_name_to_str
+ * array as well.
+ */
+enum ipahal_reg_name {
+ IPA_ROUTE,
+ IPA_IRQ_STTS_EE_n,
+ IPA_IRQ_EN_EE_n,
+ IPA_IRQ_CLR_EE_n,
+ IPA_IRQ_SUSPEND_INFO_EE_n,
+ IPA_SUSPEND_IRQ_EN_EE_n,
+ IPA_SUSPEND_IRQ_CLR_EE_n,
+ IPA_BCR,
+ IPA_ENABLED_PIPES,
+ IPA_COMP_SW_RESET,
+ IPA_VERSION,
+ IPA_TAG_TIMER,
+ IPA_COMP_HW_VERSION,
+ IPA_SPARE_REG_1,
+ IPA_SPARE_REG_2,
+ IPA_COMP_CFG,
+ IPA_STATE_AGGR_ACTIVE,
+ IPA_ENDP_INIT_HDR_n,
+ IPA_ENDP_INIT_HDR_EXT_n,
+ IPA_ENDP_INIT_AGGR_n,
+ IPA_AGGR_FORCE_CLOSE,
+ IPA_ENDP_INIT_ROUTE_n,
+ IPA_ENDP_INIT_MODE_n,
+ IPA_ENDP_INIT_NAT_n,
+ IPA_ENDP_INIT_CTRL_n,
+ IPA_ENDP_INIT_HOL_BLOCK_EN_n,
+ IPA_ENDP_INIT_HOL_BLOCK_TIMER_n,
+ IPA_ENDP_INIT_DEAGGR_n,
+ IPA_ENDP_INIT_SEQ_n,
+ IPA_DEBUG_CNT_REG_n,
+ IPA_ENDP_INIT_CFG_n,
+ IPA_IRQ_EE_UC_n,
+ IPA_ENDP_INIT_HDR_METADATA_MASK_n,
+ IPA_ENDP_INIT_HDR_METADATA_n,
+ IPA_ENABLE_GSI,
+ IPA_ENDP_INIT_RSRC_GRP_n,
+ IPA_SHARED_MEM_SIZE,
+ IPA_SRAM_DIRECT_ACCESS_n,
+ IPA_DEBUG_CNT_CTRL_n,
+ IPA_UC_MAILBOX_m_n,
+ IPA_FILT_ROUT_HASH_FLUSH,
+ IPA_SINGLE_NDP_MODE,
+ IPA_QCNCM,
+ IPA_SYS_PKT_PROC_CNTXT_BASE,
+ IPA_LOCAL_PKT_PROC_CNTXT_BASE,
+ IPA_ENDP_STATUS_n,
+ IPA_ENDP_FILTER_ROUTER_HSH_CFG_n,
+ IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n,
+ IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n,
+ IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n,
+ IPA_SRC_RSRC_GRP_67_RSRC_TYPE_n,
+ IPA_DST_RSRC_GRP_01_RSRC_TYPE_n,
+ IPA_DST_RSRC_GRP_23_RSRC_TYPE_n,
+ IPA_DST_RSRC_GRP_45_RSRC_TYPE_n,
+ IPA_DST_RSRC_GRP_67_RSRC_TYPE_n,
+ IPA_RX_HPS_CLIENTS_MIN_DEPTH_0,
+ IPA_RX_HPS_CLIENTS_MIN_DEPTH_1,
+ IPA_RX_HPS_CLIENTS_MAX_DEPTH_0,
+ IPA_RX_HPS_CLIENTS_MAX_DEPTH_1,
+ IPA_REG_MAX,
+};
+
+/*
+ * struct ipahal_reg_route - IPA route register
+ * @route_dis: route disable
+ * @route_def_pipe: route default pipe
+ * @route_def_hdr_table: route default header table
+ * @route_def_hdr_ofst: route default header offset table
+ * @route_frag_def_pipe: Default pipe to route fragmented exception
+ * packets and frag new rule statues, if source pipe does not have
+ * a notification status pipe defined.
+ * @route_def_retain_hdr: default value of retain header. It is used
+ * when no rule was hit
+ */
+struct ipahal_reg_route {
+ u32 route_dis;
+ u32 route_def_pipe;
+ u32 route_def_hdr_table;
+ u32 route_def_hdr_ofst;
+ u8 route_frag_def_pipe;
+ u32 route_def_retain_hdr;
+};
+
+/*
+ * struct ipahal_reg_endp_init_route - IPA ENDP_INIT_ROUTE_n register
+ * @route_table_index: Default index of routing table (IPA Consumer).
+ */
+struct ipahal_reg_endp_init_route {
+ u32 route_table_index;
+};
+
+/*
+ * struct ipahal_reg_endp_init_rsrc_grp - PA_ENDP_INIT_RSRC_GRP_n register
+ * @rsrc_grp: Index of group for this ENDP. If this ENDP is a source-ENDP,
+ * index is for source-resource-group. If destination ENPD, index is
+ * for destination-resoruce-group.
+ */
+struct ipahal_reg_endp_init_rsrc_grp {
+ u32 rsrc_grp;
+};
+
+/*
+ * struct ipahal_reg_endp_init_mode - IPA ENDP_INIT_MODE_n register
+ * @dst_pipe_number: This parameter specifies destination output-pipe-packets
+ * will be routed to. Valid for DMA mode only and for Input
+ * Pipes only (IPA Consumer)
+ */
+struct ipahal_reg_endp_init_mode {
+ u32 dst_pipe_number;
+ struct ipa_ep_cfg_mode ep_mode;
+};
+
+/*
+ * struct ipahal_reg_shared_mem_size - IPA SHARED_MEM_SIZE register
+ * @shared_mem_sz: Available size [in 8Bytes] of SW partition within
+ * IPA shared memory.
+ * @shared_mem_baddr: Offset of SW partition within IPA
+ * shared memory[in 8Bytes]. To get absolute address of SW partition,
+ * add this offset to IPA_SRAM_DIRECT_ACCESS_n baddr.
+ */
+struct ipahal_reg_shared_mem_size {
+ u32 shared_mem_sz;
+ u32 shared_mem_baddr;
+};
+
+/*
+ * struct ipahal_reg_ep_cfg_status - status configuration in IPA end-point
+ * @status_en: Determines if end point supports Status Indications. SW should
+ * set this bit in order to enable Statuses. Output Pipe - send
+ * Status indications only if bit is set. Input Pipe - forward Status
+ * indication to STATUS_ENDP only if bit is set. Valid for Input
+ * and Output Pipes (IPA Consumer and Producer)
+ * @status_ep: Statuses generated for this endpoint will be forwarded to the
+ * specified Status End Point. Status endpoint needs to be
+ * configured with STATUS_EN=1 Valid only for Input Pipes (IPA
+ * Consumer)
+ * @status_location: Location of PKT-STATUS on destination pipe.
+ * If set to 0 (default), PKT-STATUS will be appended before the packet
+ * for this endpoint. If set to 1, PKT-STATUS will be appended after the
+ * packet for this endpoint. Valid only for Output Pipes (IPA Producer)
+ */
+struct ipahal_reg_ep_cfg_status {
+ bool status_en;
+ u8 status_ep;
+ bool status_location;
+};
+
+/*
+ * struct ipa_hash_tuple - Hash tuple members for flt and rt
+ * the fields tells if to be masked or not
+ * @src_id: pipe number for flt, table index for rt
+ * @src_ip_addr: IP source address
+ * @dst_ip_addr: IP destination address
+ * @src_port: L4 source port
+ * @dst_port: L4 destination port
+ * @protocol: IP protocol field
+ * @meta_data: packet meta-data
+ *
+ */
+struct ipahal_reg_hash_tuple {
+ /* src_id: pipe in flt, tbl index in rt */
+ bool src_id;
+ bool src_ip_addr;
+ bool dst_ip_addr;
+ bool src_port;
+ bool dst_port;
+ bool protocol;
+ bool meta_data;
+};
+
+/*
+ * struct ipahal_reg_fltrt_hash_tuple - IPA hash tuple register
+ * @flt: Hash tuple info for filtering
+ * @rt: Hash tuple info for routing
+ * @undefinedX: Undefined/Unused bit fields set of the register
+ */
+struct ipahal_reg_fltrt_hash_tuple {
+ struct ipahal_reg_hash_tuple flt;
+ struct ipahal_reg_hash_tuple rt;
+ u32 undefined1;
+ u32 undefined2;
+};
+
+/*
+ * enum ipahal_reg_dbg_cnt_type - Debug Counter Type
+ * DBG_CNT_TYPE_IPV4_FLTR - Count IPv4 filtering rules
+ * DBG_CNT_TYPE_IPV4_ROUT - Count IPv4 routing rules
+ * DBG_CNT_TYPE_GENERAL - General counter
+ * DBG_CNT_TYPE_IPV6_FLTR - Count IPv6 filtering rules
+ * DBG_CNT_TYPE_IPV4_ROUT - Count IPv6 routing rules
+ */
+enum ipahal_reg_dbg_cnt_type {
+ DBG_CNT_TYPE_IPV4_FLTR,
+ DBG_CNT_TYPE_IPV4_ROUT,
+ DBG_CNT_TYPE_GENERAL,
+ DBG_CNT_TYPE_IPV6_FLTR,
+ DBG_CNT_TYPE_IPV6_ROUT,
+};
+
+/*
+ * struct ipahal_reg_debug_cnt_ctrl - IPA_DEBUG_CNT_CTRL_n register
+ * @en - Enable debug counter
+ * @type - Type of debugging couting
+ * @product - False->Count Bytes . True->Count #packets
+ * @src_pipe - Specific Pipe to match. If FF, no need to match
+ * specific pipe
+ * @rule_idx_pipe_rule - Global Rule or Pipe Rule. If pipe, then indicated by
+ * src_pipe
+ * @rule_idx - Rule index. Irrelevant for type General
+ */
+struct ipahal_reg_debug_cnt_ctrl {
+ bool en;
+ enum ipahal_reg_dbg_cnt_type type;
+ bool product;
+ u8 src_pipe;
+ bool rule_idx_pipe_rule;
+ u8 rule_idx;
+};
+
+/*
+ * struct ipahal_reg_rsrc_grp_cfg - Mix/Max values for two rsrc groups
+ * @x_min - first group min value
+ * @x_max - first group max value
+ * @y_min - second group min value
+ * @y_max - second group max value
+ */
+struct ipahal_reg_rsrc_grp_cfg {
+ u32 x_min;
+ u32 x_max;
+ u32 y_min;
+ u32 y_max;
+};
+
+/*
+ * struct ipahal_reg_rx_hps_clients - Min or Max values for RX HPS clients
+ * @client_minmax - Min or Max values. In case of depth 0 the 4 values
+ * are used. In case of depth 1, only the first 2 values are used
+ */
+struct ipahal_reg_rx_hps_clients {
+ u32 client_minmax[4];
+};
+
+/*
+ * struct ipahal_reg_valmask - holding values and masking for registers
+ * HAL application may require only value and mask of it for some
+ * register fields.
+ * @val - The value
+ * @mask - Tha mask of the value
+ */
+struct ipahal_reg_valmask {
+ u32 val;
+ u32 mask;
+};
+
+/*
+ * struct ipahal_reg_fltrt_hash_flush - Flt/Rt flush configuration
+ * @v6_rt - Flush IPv6 Routing cache
+ * @v6_flt - Flush IPv6 Filtering cache
+ * @v4_rt - Flush IPv4 Routing cache
+ * @v4_flt - Flush IPv4 Filtering cache
+ */
+struct ipahal_reg_fltrt_hash_flush {
+ bool v6_rt;
+ bool v6_flt;
+ bool v4_rt;
+ bool v4_flt;
+};
+
+/*
+ * struct ipahal_reg_single_ndp_mode - IPA SINGLE_NDP_MODE register
+ * @single_ndp_en: When set to '1', IPA builds MBIM frames with up to 1
+ * NDP-header.
+ * @unused: undefined bits of the register
+ */
+struct ipahal_reg_single_ndp_mode {
+ bool single_ndp_en;
+ u32 undefined;
+};
+
+/*
+ * struct ipahal_reg_qcncm - IPA QCNCM register
+ * @mode_en: When QCNCM_MODE_EN=1, IPA will use QCNCM signature.
+ * @mode_val: Used only when QCNCM_MODE_EN=1 and sets SW Signature in
+ * the NDP header.
+ * @unused: undefined bits of the register
+ */
+struct ipahal_reg_qcncm {
+ bool mode_en;
+ u32 mode_val;
+ u32 undefined;
+};
+
+/*
+ * ipahal_reg_name_str() - returns string that represent the register
+ * @reg_name: [in] register name
+ */
+const char *ipahal_reg_name_str(enum ipahal_reg_name reg_name);
+
+/*
+ * ipahal_read_reg_n() - Get the raw value of n parameterized reg
+ */
+u32 ipahal_read_reg_n(enum ipahal_reg_name reg, u32 n);
+
+/*
+ * ipahal_write_reg_mn() - Write to m/n parameterized reg a raw value
+ */
+void ipahal_write_reg_mn(enum ipahal_reg_name reg, u32 m, u32 n, u32 val);
+
+/*
+ * ipahal_write_reg_n() - Write to n parameterized reg a raw value
+ */
+static inline void ipahal_write_reg_n(enum ipahal_reg_name reg,
+ u32 n, u32 val)
+{
+ ipahal_write_reg_mn(reg, 0, n, val);
+}
+
+/*
+ * ipahal_read_reg_n_fields() - Get the parsed value of n parameterized reg
+ */
+u32 ipahal_read_reg_n_fields(enum ipahal_reg_name reg, u32 n, void *fields);
+
+/*
+ * ipahal_write_reg_n_fields() - Write to n parameterized reg a prased value
+ */
+void ipahal_write_reg_n_fields(enum ipahal_reg_name reg, u32 n,
+ const void *fields);
+
+/*
+ * ipahal_read_reg() - Get the raw value of a reg
+ */
+static inline u32 ipahal_read_reg(enum ipahal_reg_name reg)
+{
+ return ipahal_read_reg_n(reg, 0);
+}
+
+/*
+ * ipahal_write_reg() - Write to reg a raw value
+ */
+static inline void ipahal_write_reg(enum ipahal_reg_name reg,
+ u32 val)
+{
+ ipahal_write_reg_mn(reg, 0, 0, val);
+}
+
+/*
+ * ipahal_read_reg_fields() - Get the parsed value of a reg
+ */
+static inline u32 ipahal_read_reg_fields(enum ipahal_reg_name reg, void *fields)
+{
+ return ipahal_read_reg_n_fields(reg, 0, fields);
+}
+
+/*
+ * ipahal_write_reg_fields() - Write to reg a parsed value
+ */
+static inline void ipahal_write_reg_fields(enum ipahal_reg_name reg,
+ const void *fields)
+{
+ ipahal_write_reg_n_fields(reg, 0, fields);
+}
+
+/*
+ * Get the offset of a m/n parameterized register
+ */
+u32 ipahal_get_reg_mn_ofst(enum ipahal_reg_name reg, u32 m, u32 n);
+
+/*
+ * Get the offset of a n parameterized register
+ */
+static inline u32 ipahal_get_reg_n_ofst(enum ipahal_reg_name reg, u32 n)
+{
+ return ipahal_get_reg_mn_ofst(reg, 0, n);
+}
+
+/*
+ * Get the offset of a register
+ */
+static inline u32 ipahal_get_reg_ofst(enum ipahal_reg_name reg)
+{
+ return ipahal_get_reg_mn_ofst(reg, 0, 0);
+}
+
+/*
+ * Get the register base address
+ */
+u32 ipahal_get_reg_base(void);
+
+/*
+ * Specific functions
+ * These functions supply specific register values for specific operations
+ * that cannot be reached by generic functions.
+ * E.g. To disable aggregation, need to write to specific bits of the AGGR
+ * register. The other bits should be untouched. This oeprate is very specific
+ * and cannot be generically defined. For such operations we define these
+ * specific functions.
+ */
+void ipahal_get_disable_aggr_valmask(struct ipahal_reg_valmask *valmask);
+u32 ipahal_aggr_get_max_byte_limit(void);
+u32 ipahal_aggr_get_max_pkt_limit(void);
+void ipahal_get_aggr_force_close_valmask(int ep_idx,
+ struct ipahal_reg_valmask *valmask);
+void ipahal_get_fltrt_hash_flush_valmask(
+ struct ipahal_reg_fltrt_hash_flush *flush,
+ struct ipahal_reg_valmask *valmask);
+void ipahal_get_status_ep_valmask(int pipe_num,
+ struct ipahal_reg_valmask *valmask);
+
+#endif /* _IPAHAL_REG_H_ */
+
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h
new file mode 100644
index 000000000000..8f46f654d314
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h
@@ -0,0 +1,279 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPAHAL_REG_I_H_
+#define _IPAHAL_REG_I_H_
+
+int ipahal_reg_init(enum ipa_hw_type ipa_hw_type);
+
+#define IPA_SETFIELD(val, shift, mask) (((val) << (shift)) & (mask))
+#define IPA_SETFIELD_IN_REG(reg, val, shift, mask) \
+ (reg |= ((val) << (shift)) & (mask))
+#define IPA_GETFIELD_FROM_REG(reg, shift, mask) \
+ (((reg) & (mask)) >> (shift))
+
+
+/* IPA_ROUTE register */
+#define IPA_ROUTE_ROUTE_DIS_SHFT 0x0
+#define IPA_ROUTE_ROUTE_DIS_BMSK 0x1
+#define IPA_ROUTE_ROUTE_DEF_PIPE_SHFT 0x1
+#define IPA_ROUTE_ROUTE_DEF_PIPE_BMSK 0x3e
+#define IPA_ROUTE_ROUTE_DEF_HDR_TABLE_SHFT 0x6
+#define IPA_ROUTE_ROUTE_DEF_HDR_TABLE_BMSK 0X40
+#define IPA_ROUTE_ROUTE_DEF_HDR_OFST_SHFT 0x7
+#define IPA_ROUTE_ROUTE_DEF_HDR_OFST_BMSK 0x1ff80
+#define IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_BMSK 0x3e0000
+#define IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_SHFT 0x11
+#define IPA_ROUTE_ROUTE_DEF_RETAIN_HDR_BMSK 0x1000000
+#define IPA_ROUTE_ROUTE_DEF_RETAIN_HDR_SHFT 0x18
+
+/* IPA_ENDP_INIT_HDR_n register */
+#define IPA_ENDP_INIT_HDR_n_HDR_LEN_BMSK 0x3f
+#define IPA_ENDP_INIT_HDR_n_HDR_LEN_SHFT 0x0
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_BMSK 0x40
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_SHFT 0x6
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_SHFT 0x7
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_BMSK 0x1f80
+#define IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_BMSK 0x7e000
+#define IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_SHFT 0xd
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_BMSK 0x80000
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_SHFT 0x13
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_BMSK 0x3f00000
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_SHFT 0x14
+#define IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_BMSK 0x4000000
+#define IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_SHFT 0x1a
+#define IPA_ENDP_INIT_HDR_n_HDR_LEN_INC_DEAGG_HDR_BMSK_v2 0x8000000
+#define IPA_ENDP_INIT_HDR_n_HDR_LEN_INC_DEAGG_HDR_SHFT_v2 0x1b
+#define IPA_ENDP_INIT_HDR_n_HDR_METADATA_REG_VALID_BMSK_v2 0x10000000
+#define IPA_ENDP_INIT_HDR_n_HDR_METADATA_REG_VALID_SHFT_v2 0x1c
+
+/* IPA_ENDP_INIT_HDR_EXT_n register */
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_ENDIANNESS_BMSK 0x1
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_ENDIANNESS_SHFT 0x0
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_BMSK 0x2
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_SHFT 0x1
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_BMSK 0x4
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_SHFT 0x2
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_BMSK 0x8
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_SHFT 0x3
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_BMSK 0x3f0
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_SHFT 0x4
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_SHFT 0xa
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_BMSK_v3_0 0x3c00
+
+/* IPA_ENDP_INIT_AGGR_N register */
+#define IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_BMSK 0x1000000
+#define IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_SHFT 0x18
+#define IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_BMSK 0x400000
+#define IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_SHFT 0x16
+#define IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_BMSK 0x200000
+#define IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_SHFT 0x15
+#define IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK 0x1f8000
+#define IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT 0xf
+#define IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_BMSK 0x7c00
+#define IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_SHFT 0xa
+#define IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_BMSK 0x3e0
+#define IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_SHFT 0x5
+#define IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_BMSK 0x1c
+#define IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_SHFT 0x2
+#define IPA_ENDP_INIT_AGGR_n_AGGR_EN_BMSK 0x3
+#define IPA_ENDP_INIT_AGGR_n_AGGR_EN_SHFT 0x0
+
+/* IPA_AGGR_FORCE_CLOSE register */
+#define IPA_AGGR_FORCE_CLOSE_OFST_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK 0x3fffffff
+#define IPA_AGGR_FORCE_CLOSE_OFST_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT 0
+
+/* IPA_ENDP_INIT_ROUTE_n register */
+#define IPA_ENDP_INIT_ROUTE_n_ROUTE_TABLE_INDEX_BMSK 0x1f
+#define IPA_ENDP_INIT_ROUTE_n_ROUTE_TABLE_INDEX_SHFT 0x0
+
+/* IPA_ENDP_INIT_MODE_n register */
+#define IPA_ENDP_INIT_MODE_n_HDR_FTCH_DISABLE_BMSK 0x40000000
+#define IPA_ENDP_INIT_MODE_n_HDR_FTCH_DISABLE_SHFT 0x1e
+#define IPA_ENDP_INIT_MODE_n_PAD_EN_BMSK 0x20000000
+#define IPA_ENDP_INIT_MODE_n_PAD_EN_SHFT 0x1d
+#define IPA_ENDP_INIT_MODE_n_PIPE_REPLICATION_EN_BMSK 0x10000000
+#define IPA_ENDP_INIT_MODE_n_PIPE_REPLICATION_EN_SHFT 0x1c
+#define IPA_ENDP_INIT_MODE_n_BYTE_THRESHOLD_BMSK 0xffff000
+#define IPA_ENDP_INIT_MODE_n_BYTE_THRESHOLD_SHFT 0xc
+#define IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_BMSK 0x1f0
+#define IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_SHFT 0x4
+#define IPA_ENDP_INIT_MODE_n_MODE_BMSK 0x7
+#define IPA_ENDP_INIT_MODE_n_MODE_SHFT 0x0
+
+/* IPA_ENDP_INIT_NAT_n register */
+#define IPA_ENDP_INIT_NAT_n_NAT_EN_BMSK 0x3
+#define IPA_ENDP_INIT_NAT_n_NAT_EN_SHFT 0x0
+
+/* IPA_ENDP_INIT_CTRL_n register */
+#define IPA_ENDP_INIT_CTRL_n_ENDP_SUSPEND_BMSK 0x1
+#define IPA_ENDP_INIT_CTRL_n_ENDP_SUSPEND_SHFT 0x0
+#define IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_BMSK 0x2
+#define IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_SHFT 0x1
+
+/* IPA_ENDP_INIT_HOL_BLOCK_EN_n register */
+#define IPA_ENDP_INIT_HOL_BLOCK_EN_n_RMSK 0x1
+#define IPA_ENDP_INIT_HOL_BLOCK_EN_n_MAX 19
+#define IPA_ENDP_INIT_HOL_BLOCK_EN_n_EN_BMSK 0x1
+#define IPA_ENDP_INIT_HOL_BLOCK_EN_n_EN_SHFT 0x0
+
+/* IPA_ENDP_INIT_HOL_BLOCK_TIMER_n register */
+#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_TIMER_BMSK 0xffffffff
+#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_TIMER_SHFT 0x0
+
+/* IPA_ENDP_INIT_DEAGGR_n register */
+#define IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_BMSK 0xFFFF0000
+#define IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_SHFT 0x10
+#define IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_BMSK 0x3F00
+#define IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_SHFT 0x8
+#define IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_BMSK 0x80
+#define IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_SHFT 0x7
+#define IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_BMSK 0x3F
+#define IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_SHFT 0x0
+
+/* IPA_IPA_ENDP_INIT_SEQ_n register */
+#define IPA_ENDP_INIT_SEQ_n_DPS_REP_SEQ_TYPE_BMSK 0xf000
+#define IPA_ENDP_INIT_SEQ_n_DPS_REP_SEQ_TYPE_SHFT 0xc
+#define IPA_ENDP_INIT_SEQ_n_HPS_REP_SEQ_TYPE_BMSK 0xf00
+#define IPA_ENDP_INIT_SEQ_n_HPS_REP_SEQ_TYPE_SHFT 0x8
+#define IPA_ENDP_INIT_SEQ_n_DPS_SEQ_TYPE_BMSK 0xf0
+#define IPA_ENDP_INIT_SEQ_n_DPS_SEQ_TYPE_SHFT 0x4
+#define IPA_ENDP_INIT_SEQ_n_HPS_SEQ_TYPE_BMSK 0xf
+#define IPA_ENDP_INIT_SEQ_n_HPS_SEQ_TYPE_SHFT 0x0
+
+/* IPA_DEBUG_CNT_REG_m register */
+#define IPA_DEBUG_CNT_REG_N_RMSK 0xffffffff
+#define IPA_DEBUG_CNT_REG_N_MAX 15
+#define IPA_DEBUG_CNT_REG_N_DBG_CNT_REG_BMSK 0xffffffff
+#define IPA_DEBUG_CNT_REG_N_DBG_CNT_REG_SHFT 0x0
+
+/* IPA_ENDP_INIT_CFG_n register */
+#define IPA_ENDP_INIT_CFG_n_CS_GEN_QMB_MASTER_SEL_BMSK 0x100
+#define IPA_ENDP_INIT_CFG_n_CS_GEN_QMB_MASTER_SEL_SHFT 0x8
+#define IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_BMSK 0x78
+#define IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_SHFT 0x3
+#define IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_BMSK 0x6
+#define IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_SHFT 0x1
+#define IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_BMSK 0x1
+#define IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_SHFT 0x0
+
+/* IPA_ENDP_INIT_HDR_METADATA_MASK_n register */
+#define IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_BMSK 0xffffffff
+#define IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_SHFT 0x0
+
+/* IPA_IPA_ENDP_INIT_HDR_METADATA_n register */
+#define IPA_ENDP_INIT_HDR_METADATA_n_METADATA_BMSK 0xffffffff
+#define IPA_ENDP_INIT_HDR_METADATA_n_METADATA_SHFT 0x0
+
+/* IPA_ENDP_INIT_RSRC_GRP_n register */
+#define IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_BMSK 0x7
+#define IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_SHFT 0
+
+/* IPA_SHARED_MEM_SIZE register */
+#define IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_BMSK 0xffff0000
+#define IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_SHFT 0x10
+#define IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_BMSK 0xffff
+#define IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_SHFT 0x0
+
+/* IPA_DEBUG_CNT_CTRL_n register */
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_PIPE_RULE_BMSK 0x10000000
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_PIPE_RULE_SHFT 0x1c
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_BMSK 0x0ff00000
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_SHFT 0x14
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_SOURCE_PIPE_BMSK 0x1f000
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_SOURCE_PIPE_SHFT 0xc
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_PRODUCT_BMSK 0x100
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_PRODUCT_SHFT 0x8
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_TYPE_BMSK 0x70
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_TYPE_SHFT 0x4
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_EN_BMSK 0x1
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_EN_SHFT 0x0
+
+/* IPA_FILT_ROUT_HASH_FLUSH register */
+#define IPA_FILT_ROUT_HASH_FLUSH_IPv4_FILT_SHFT 12
+#define IPA_FILT_ROUT_HASH_FLUSH_IPv4_ROUT_SHFT 8
+#define IPA_FILT_ROUT_HASH_FLUSH_IPv6_FILT_SHFT 4
+#define IPA_FILT_ROUT_HASH_FLUSH_IPv6_ROUT_SHFT 0
+
+/* IPA_SINGLE_NDP_MODE register */
+#define IPA_SINGLE_NDP_MODE_UNDEFINED_BMSK 0xfffffffe
+#define IPA_SINGLE_NDP_MODE_UNDEFINED_SHFT 0x1
+#define IPA_SINGLE_NDP_MODE_SINGLE_NDP_EN_BMSK 0x1
+#define IPA_SINGLE_NDP_MODE_SINGLE_NDP_EN_SHFT 0
+
+/* IPA_QCNCM register */
+#define IPA_QCNCM_MODE_UNDEFINED2_BMSK 0xf0000000
+#define IPA_QCNCM_MODE_UNDEFINED2_SHFT 0x1c
+#define IPA_QCNCM_MODE_VAL_BMSK 0xffffff0
+#define IPA_QCNCM_MODE_VAL_SHFT 0x4
+#define IPA_QCNCM_UNDEFINED1_BMSK 0xe
+#define IPA_QCNCM_UNDEFINED1_SHFT 0x1
+#define IPA_QCNCM_MODE_EN_BMSK 0x1
+#define IPA_QCNCM_MODE_EN_SHFT 0
+
+/* IPA_ENDP_STATUS_n register */
+#define IPA_ENDP_STATUS_n_STATUS_LOCATION_BMSK 0x100
+#define IPA_ENDP_STATUS_n_STATUS_LOCATION_SHFT 0x8
+#define IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK 0x3e
+#define IPA_ENDP_STATUS_n_STATUS_ENDP_SHFT 0x1
+#define IPA_ENDP_STATUS_n_STATUS_EN_BMSK 0x1
+#define IPA_ENDP_STATUS_n_STATUS_EN_SHFT 0x0
+
+/* IPA_ENDP_FILTER_ROUTER_HSH_CFG_n register */
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_ID_SHFT 0
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_ID_BMSK 0x1
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_IP_SHFT 1
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_IP_BMSK 0x2
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_IP_SHFT 2
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_IP_BMSK 0x4
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_PORT_SHFT 3
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_PORT_BMSK 0x8
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_PORT_SHFT 4
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_PORT_BMSK 0x10
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_PROTOCOL_SHFT 5
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_PROTOCOL_BMSK 0x20
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_METADATA_SHFT 6
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_METADATA_BMSK 0x40
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED1_SHFT 7
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED1_BMSK 0xff80
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_ID_SHFT 16
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_ID_BMSK 0x10000
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_IP_SHFT 17
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_IP_BMSK 0x20000
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_IP_SHFT 18
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_IP_BMSK 0x40000
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_PORT_SHFT 19
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_PORT_BMSK 0x80000
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_PORT_SHFT 20
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_PORT_BMSK 0x100000
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_PROTOCOL_SHFT 21
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_PROTOCOL_BMSK 0x200000
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_METADATA_SHFT 22
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_METADATA_BMSK 0x400000
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED2_SHFT 23
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED2_BMSK 0xff800000
+
+/* IPA_RSRC_GRP_XY_RSRC_TYPE_n register */
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_BMSK 0xFF000000
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_SHFT 24
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_BMSK 0xFF0000
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_SHFT 16
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_BMSK 0xFF00
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_SHFT 8
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_BMSK 0xFF
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_SHFT 0
+
+/* IPA_IPA_IPA_RX_HPS_CLIENTS_MIN/MAX_DEPTH_0/1 registers */
+#define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK(n) (0x7F << (8 * (n)))
+#define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(n) (8 * (n))
+
+#endif /* _IPAHAL_REG_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index f42e3c1c6924..c9f2c150e668 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -34,6 +34,8 @@
#include <linux/rmnet_ipa_fd_ioctl.h>
#include <linux/ipa.h>
+#include "ipa_trace.h"
+
#define WWAN_METADATA_SHFT 24
#define WWAN_METADATA_MASK 0xFF000000
#define WWAN_DATA_LEN 2000
@@ -46,7 +48,6 @@
#define DEFAULT_OUTSTANDING_LOW 32
#define IPA_WWAN_DEV_NAME "rmnet_ipa%d"
-#define IPA_WWAN_DEVICE_COUNT (1)
#define IPA_WWAN_RX_SOFTIRQ_THRESH 16
@@ -55,20 +56,11 @@
#define IPA_QUOTA_REACH_IF_NAME_MAX_SIZE 64
#define IPA_UEVENT_NUM_EVNP 4 /* number of event pointers */
-static struct net_device *ipa3_netdevs[IPA_WWAN_DEVICE_COUNT];
-static struct ipa_sys_connect_params apps_to_ipa_ep_cfg, ipa_to_apps_ep_cfg;
-static u32 ipa3_qmap_hdr_hdl, ipa3_dflt_v4_wan_rt_hdl, ipa3_dflt_v6_wan_rt_hdl;
-static struct ipa3_rmnet_mux_val ipa3_mux_channel[MAX_NUM_OF_MUX_CHANNEL];
-static int ipa3_num_q6_rule, ipa3_old_num_q6_rule;
-static int ipa3_rmnet_index;
-static bool ipa3_egress_set, ipa3_a7_ul_flt_set;
-static struct workqueue_struct *ipa_rm_q6_workqueue; /* IPA_RM workqueue*/
-static atomic_t is_initialized;
-static atomic_t is_ssr;
-static void *ipa3_subsys_notify_handle;
-
-u32 apps_to_ipa3_hdl, ipa3_to_apps_hdl; /* get handler from ipa */
-static struct mutex ipa_to_apps_pipe_handle_guard;
+#define IPA_NETDEV() \
+ ((rmnet_ipa3_ctx && rmnet_ipa3_ctx->wwan_priv) ? \
+ rmnet_ipa3_ctx->wwan_priv->net : NULL)
+
+
static int ipa3_wwan_add_ul_flt_rule_to_ipa(void);
static int ipa3_wwan_del_ul_flt_rule_to_ipa(void);
static void ipa3_wwan_msg_free_cb(void*, u32, u32);
@@ -115,6 +107,30 @@ struct ipa3_wwan_private {
enum ipa3_wwan_device_status device_status;
};
+struct rmnet_ipa3_context {
+ struct ipa3_wwan_private *wwan_priv;
+ struct ipa_sys_connect_params apps_to_ipa_ep_cfg;
+ struct ipa_sys_connect_params ipa_to_apps_ep_cfg;
+ u32 qmap_hdr_hdl;
+ u32 dflt_v4_wan_rt_hdl;
+ u32 dflt_v6_wan_rt_hdl;
+ struct ipa3_rmnet_mux_val mux_channel[MAX_NUM_OF_MUX_CHANNEL];
+ int num_q6_rules;
+ int old_num_q6_rules;
+ int rmnet_index;
+ bool egress_set;
+ bool a7_ul_flt_set;
+ struct workqueue_struct *rm_q6_wq;
+ atomic_t is_initialized;
+ atomic_t is_ssr;
+ void *subsys_notify_handle;
+ u32 apps_to_ipa3_hdl;
+ u32 ipa3_to_apps_hdl;
+ struct mutex ipa_to_apps_pipe_handle_guard;
+};
+
+static struct rmnet_ipa3_context *rmnet_ipa3_ctx;
+
/**
* ipa3_setup_a7_qmap_hdr() - Setup default a7 qmap hdr
*
@@ -157,7 +173,7 @@ static int ipa3_setup_a7_qmap_hdr(void)
ret = -EPERM;
goto bail;
}
- ipa3_qmap_hdr_hdl = hdr_entry->hdr_hdl;
+ rmnet_ipa3_ctx->qmap_hdr_hdl = hdr_entry->hdr_hdl;
ret = 0;
bail:
@@ -183,7 +199,7 @@ static void ipa3_del_a7_qmap_hdr(void)
del_hdr->commit = 1;
del_hdr->num_hdls = 1;
hdl_entry = &del_hdr->hdl[0];
- hdl_entry->hdl = ipa3_qmap_hdr_hdl;
+ hdl_entry->hdl = rmnet_ipa3_ctx->qmap_hdr_hdl;
ret = ipa3_del_hdr(del_hdr);
if (ret || hdl_entry->status)
@@ -191,7 +207,7 @@ static void ipa3_del_a7_qmap_hdr(void)
else
IPAWANDBG("hdrs deletion done\n");
- ipa3_qmap_hdr_hdl = 0;
+ rmnet_ipa3_ctx->qmap_hdr_hdl = 0;
kfree(del_hdr);
}
@@ -226,7 +242,7 @@ static void ipa3_del_qmap_hdr(uint32_t hdr_hdl)
else
IPAWANDBG("header deletion done\n");
- ipa3_qmap_hdr_hdl = 0;
+ rmnet_ipa3_ctx->qmap_hdr_hdl = 0;
kfree(del_hdr);
}
@@ -234,9 +250,9 @@ static void ipa3_del_mux_qmap_hdrs(void)
{
int index;
- for (index = 0; index < ipa3_rmnet_index; index++) {
- ipa3_del_qmap_hdr(ipa3_mux_channel[index].hdr_hdl);
- ipa3_mux_channel[index].hdr_hdl = 0;
+ for (index = 0; index < rmnet_ipa3_ctx->rmnet_index; index++) {
+ ipa3_del_qmap_hdr(rmnet_ipa3_ctx->mux_channel[index].hdr_hdl);
+ rmnet_ipa3_ctx->mux_channel[index].hdr_hdl = 0;
}
}
@@ -319,7 +335,7 @@ static int ipa3_setup_dflt_wan_rt_tables(void)
rt_rule_entry = &rt_rule->rules[0];
rt_rule_entry->at_rear = 1;
rt_rule_entry->rule.dst = IPA_CLIENT_APPS_WAN_CONS;
- rt_rule_entry->rule.hdr_hdl = ipa3_qmap_hdr_hdl;
+ rt_rule_entry->rule.hdr_hdl = rmnet_ipa3_ctx->qmap_hdr_hdl;
if (ipa3_add_rt_rule(rt_rule)) {
IPAWANERR("fail to add dflt_wan v4 rule\n");
@@ -328,7 +344,7 @@ static int ipa3_setup_dflt_wan_rt_tables(void)
}
IPAWANDBG("dflt v4 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
- ipa3_dflt_v4_wan_rt_hdl = rt_rule_entry->rt_rule_hdl;
+ rmnet_ipa3_ctx->dflt_v4_wan_rt_hdl = rt_rule_entry->rt_rule_hdl;
/* setup a default v6 route to point to A5 */
rt_rule->ip = IPA_IP_v6;
@@ -338,7 +354,7 @@ static int ipa3_setup_dflt_wan_rt_tables(void)
return -EPERM;
}
IPAWANDBG("dflt v6 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
- ipa3_dflt_v6_wan_rt_hdl = rt_rule_entry->rt_rule_hdl;
+ rmnet_ipa3_ctx->dflt_v6_wan_rt_hdl = rt_rule_entry->rt_rule_hdl;
kfree(rt_rule);
return 0;
@@ -365,7 +381,7 @@ static void ipa3_del_dflt_wan_rt_tables(void)
rt_rule_entry = &rt_rule->hdl[0];
rt_rule_entry->status = -1;
- rt_rule_entry->hdl = ipa3_dflt_v4_wan_rt_hdl;
+ rt_rule_entry->hdl = rmnet_ipa3_ctx->dflt_v4_wan_rt_hdl;
IPAWANERR("Deleting Route hdl:(0x%x) with ip type: %d\n",
rt_rule_entry->hdl, IPA_IP_v4);
@@ -375,7 +391,7 @@ static void ipa3_del_dflt_wan_rt_tables(void)
}
rt_rule->ip = IPA_IP_v6;
- rt_rule_entry->hdl = ipa3_dflt_v6_wan_rt_hdl;
+ rt_rule_entry->hdl = rmnet_ipa3_ctx->dflt_v6_wan_rt_hdl;
IPAWANERR("Deleting Route hdl:(0x%x) with ip type: %d\n",
rt_rule_entry->hdl, IPA_IP_v6);
if (ipa3_del_rt_rule(rt_rule) ||
@@ -392,22 +408,24 @@ int ipa3_copy_ul_filter_rule_to_ipa(struct ipa_install_fltr_rule_req_msg_v01
int i, j;
if (rule_req->filter_spec_ex_list_valid == true) {
- ipa3_num_q6_rule = rule_req->filter_spec_ex_list_len;
- IPAWANDBG("Received (%d) install_flt_req\n", ipa3_num_q6_rule);
+ rmnet_ipa3_ctx->num_q6_rules =
+ rule_req->filter_spec_ex_list_len;
+ IPAWANDBG("Received (%d) install_flt_req\n",
+ rmnet_ipa3_ctx->num_q6_rules);
} else {
- ipa3_num_q6_rule = 0;
+ rmnet_ipa3_ctx->num_q6_rules = 0;
IPAWANERR("got no UL rules from modem\n");
return -EINVAL;
}
/* copy UL filter rules from Modem*/
- for (i = 0; i < ipa3_num_q6_rule; i++) {
+ for (i = 0; i < rmnet_ipa3_ctx->num_q6_rules; i++) {
/* check if rules overside the cache*/
if (i == MAX_NUM_Q6_RULE) {
IPAWANERR("Reaching (%d) max cache ",
MAX_NUM_Q6_RULE);
IPAWANERR(" however total (%d)\n",
- ipa3_num_q6_rule);
+ rmnet_ipa3_ctx->num_q6_rules);
goto failure;
}
ipa3_qmi_ctx->q6_ul_filter_rule[i].ip =
@@ -566,7 +584,8 @@ int ipa3_copy_ul_filter_rule_to_ipa(struct ipa_install_fltr_rule_req_msg_v01
}
if (rule_req->xlat_filter_indices_list_valid) {
- if (rule_req->xlat_filter_indices_list_len > ipa3_num_q6_rule) {
+ if (rule_req->xlat_filter_indices_list_len >
+ rmnet_ipa3_ctx->num_q6_rules) {
IPAWANERR("Number of xlat indices is not valid: %d\n",
rule_req->xlat_filter_indices_list_len);
goto failure;
@@ -579,7 +598,7 @@ int ipa3_copy_ul_filter_rule_to_ipa(struct ipa_install_fltr_rule_req_msg_v01
for (i = 0; i < rule_req->xlat_filter_indices_list_len; i++) {
if (rule_req->xlat_filter_indices_list[i]
- >= ipa3_num_q6_rule) {
+ >= rmnet_ipa3_ctx->num_q6_rules) {
IPAWANERR("Xlat rule idx is wrong: %d\n",
rule_req->xlat_filter_indices_list[i]);
goto failure;
@@ -595,7 +614,7 @@ int ipa3_copy_ul_filter_rule_to_ipa(struct ipa_install_fltr_rule_req_msg_v01
goto success;
failure:
- ipa3_num_q6_rule = 0;
+ rmnet_ipa3_ctx->num_q6_rules = 0;
memset(ipa3_qmi_ctx->q6_ul_filter_rule, 0,
sizeof(ipa3_qmi_ctx->q6_ul_filter_rule));
return -EINVAL;
@@ -631,7 +650,7 @@ static int ipa3_wwan_add_ul_flt_rule_to_ipa(void)
param->global = false;
param->num_rules = (uint8_t)1;
- for (i = 0; i < ipa3_num_q6_rule; i++) {
+ for (i = 0; i < rmnet_ipa3_ctx->num_q6_rules; i++) {
param->ip = ipa3_qmi_ctx->q6_ul_filter_rule[i].ip;
memset(&flt_rule_entry, 0, sizeof(struct ipa_flt_rule_add));
flt_rule_entry.at_rear = true;
@@ -669,8 +688,8 @@ static int ipa3_wwan_add_ul_flt_rule_to_ipa(void)
ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_WAN_PROD);
req->install_status = QMI_RESULT_SUCCESS_V01;
req->rule_id_valid = 1;
- req->rule_id_len = ipa3_num_q6_rule;
- for (i = 0; i < ipa3_num_q6_rule; i++) {
+ req->rule_id_len = rmnet_ipa3_ctx->num_q6_rules;
+ for (i = 0; i < rmnet_ipa3_ctx->num_q6_rules; i++) {
req->rule_id[i] =
ipa3_qmi_ctx->q6_ul_filter_rule[i].rule_id;
}
@@ -678,9 +697,9 @@ static int ipa3_wwan_add_ul_flt_rule_to_ipa(void)
IPAWANDBG("add filter rule index on A7-RX failed\n");
retval = -EFAULT;
}
- ipa3_old_num_q6_rule = ipa3_num_q6_rule;
+ rmnet_ipa3_ctx->old_num_q6_rules = rmnet_ipa3_ctx->num_q6_rules;
IPAWANDBG("add (%d) filter rule index on A7-RX\n",
- ipa3_old_num_q6_rule);
+ rmnet_ipa3_ctx->old_num_q6_rules);
kfree(param);
kfree(req);
return retval;
@@ -704,7 +723,7 @@ static int ipa3_wwan_del_ul_flt_rule_to_ipa(void)
param->commit = 1;
param->num_hdls = (uint8_t) 1;
- for (i = 0; i < ipa3_old_num_q6_rule; i++) {
+ for (i = 0; i < rmnet_ipa3_ctx->old_num_q6_rules; i++) {
param->ip = ipa3_qmi_ctx->q6_ul_filter_rule[i].ip;
memset(&flt_rule_entry, 0, sizeof(struct ipa_flt_rule_del));
flt_rule_entry.hdl = ipa3_qmi_ctx->q6_ul_filter_rule_hdl[i];
@@ -720,8 +739,8 @@ static int ipa3_wwan_del_ul_flt_rule_to_ipa(void)
}
/* set UL filter-rule add-indication */
- ipa3_a7_ul_flt_set = false;
- ipa3_old_num_q6_rule = 0;
+ rmnet_ipa3_ctx->a7_ul_flt_set = false;
+ rmnet_ipa3_ctx->old_num_q6_rules = 0;
kfree(param);
return retval;
@@ -732,7 +751,7 @@ static int ipa3_find_mux_channel_index(uint32_t mux_id)
int i;
for (i = 0; i < MAX_NUM_OF_MUX_CHANNEL; i++) {
- if (mux_id == ipa3_mux_channel[i].mux_id)
+ if (mux_id == rmnet_ipa3_ctx->mux_channel[i].mux_id)
return i;
}
return MAX_NUM_OF_MUX_CHANNEL;
@@ -743,8 +762,8 @@ static int find_vchannel_name_index(const char *vchannel_name)
int i;
for (i = 0; i < MAX_NUM_OF_MUX_CHANNEL; i++) {
- if (0 == strcmp(ipa3_mux_channel[i].vchannel_name,
- vchannel_name))
+ if (strcmp(rmnet_ipa3_ctx->mux_channel[i].vchannel_name,
+ vchannel_name) == 0)
return i;
}
return MAX_NUM_OF_MUX_CHANNEL;
@@ -766,15 +785,16 @@ static int ipa3_wwan_register_to_ipa(int index)
int ret = 0, i;
IPAWANDBG("index(%d) device[%s]:\n", index,
- ipa3_mux_channel[index].vchannel_name);
- if (!ipa3_mux_channel[index].mux_hdr_set) {
- ret = ipa3_add_qmap_hdr(ipa3_mux_channel[index].mux_id,
- &ipa3_mux_channel[index].hdr_hdl);
+ rmnet_ipa3_ctx->mux_channel[index].vchannel_name);
+ if (!rmnet_ipa3_ctx->mux_channel[index].mux_hdr_set) {
+ ret = ipa3_add_qmap_hdr(
+ rmnet_ipa3_ctx->mux_channel[index].mux_id,
+ &rmnet_ipa3_ctx->mux_channel[index].hdr_hdl);
if (ret) {
IPAWANERR("ipa_add_mux_hdr failed (%d)\n", index);
return ret;
}
- ipa3_mux_channel[index].mux_hdr_set = true;
+ rmnet_ipa3_ctx->mux_channel[index].mux_hdr_set = true;
}
tx_properties.prop = tx_ioc_properties;
tx_ipv4_property = &tx_properties.prop[0];
@@ -782,14 +802,14 @@ static int ipa3_wwan_register_to_ipa(int index)
tx_ipv4_property->dst_pipe = IPA_CLIENT_APPS_WAN_CONS;
snprintf(tx_ipv4_property->hdr_name, IPA_RESOURCE_NAME_MAX, "%s%d",
A2_MUX_HDR_NAME_V4_PREF,
- ipa3_mux_channel[index].mux_id);
+ rmnet_ipa3_ctx->mux_channel[index].mux_id);
tx_ipv6_property = &tx_properties.prop[1];
tx_ipv6_property->ip = IPA_IP_v6;
tx_ipv6_property->dst_pipe = IPA_CLIENT_APPS_WAN_CONS;
/* no need use A2_MUX_HDR_NAME_V6_PREF, same header */
snprintf(tx_ipv6_property->hdr_name, IPA_RESOURCE_NAME_MAX, "%s%d",
A2_MUX_HDR_NAME_V4_PREF,
- ipa3_mux_channel[index].mux_id);
+ rmnet_ipa3_ctx->mux_channel[index].mux_id);
tx_properties.num_props = 2;
rx_properties.prop = rx_ioc_properties;
@@ -797,19 +817,19 @@ static int ipa3_wwan_register_to_ipa(int index)
rx_ipv4_property->ip = IPA_IP_v4;
rx_ipv4_property->attrib.attrib_mask |= IPA_FLT_META_DATA;
rx_ipv4_property->attrib.meta_data =
- ipa3_mux_channel[index].mux_id << WWAN_METADATA_SHFT;
+ rmnet_ipa3_ctx->mux_channel[index].mux_id << WWAN_METADATA_SHFT;
rx_ipv4_property->attrib.meta_data_mask = WWAN_METADATA_MASK;
rx_ipv4_property->src_pipe = IPA_CLIENT_APPS_LAN_WAN_PROD;
rx_ipv6_property = &rx_properties.prop[1];
rx_ipv6_property->ip = IPA_IP_v6;
rx_ipv6_property->attrib.attrib_mask |= IPA_FLT_META_DATA;
rx_ipv6_property->attrib.meta_data =
- ipa3_mux_channel[index].mux_id << WWAN_METADATA_SHFT;
+ rmnet_ipa3_ctx->mux_channel[index].mux_id << WWAN_METADATA_SHFT;
rx_ipv6_property->attrib.meta_data_mask = WWAN_METADATA_MASK;
rx_ipv6_property->src_pipe = IPA_CLIENT_APPS_LAN_WAN_PROD;
rx_properties.num_props = 2;
- pyld_sz = ipa3_num_q6_rule *
+ pyld_sz = rmnet_ipa3_ctx->num_q6_rules *
sizeof(struct ipa_ioc_ext_intf_prop);
ext_ioc_properties = kmalloc(pyld_sz, GFP_KERNEL);
if (!ext_ioc_properties) {
@@ -820,12 +840,13 @@ static int ipa3_wwan_register_to_ipa(int index)
ext_properties.prop = ext_ioc_properties;
ext_properties.excp_pipe_valid = true;
ext_properties.excp_pipe = IPA_CLIENT_APPS_WAN_CONS;
- ext_properties.num_props = ipa3_num_q6_rule;
- for (i = 0; i < ipa3_num_q6_rule; i++) {
+ ext_properties.num_props = rmnet_ipa3_ctx->num_q6_rules;
+ for (i = 0; i < rmnet_ipa3_ctx->num_q6_rules; i++) {
memcpy(&(ext_properties.prop[i]),
&(ipa3_qmi_ctx->q6_ul_filter_rule[i]),
sizeof(struct ipa_ioc_ext_intf_prop));
- ext_properties.prop[i].mux_id = ipa3_mux_channel[index].mux_id;
+ ext_properties.prop[i].mux_id =
+ rmnet_ipa3_ctx->mux_channel[index].mux_id;
IPAWANDBG("index %d ip: %d rt-tbl:%d\n", i,
ext_properties.prop[i].ip,
ext_properties.prop[i].rt_tbl_idx);
@@ -833,15 +854,15 @@ static int ipa3_wwan_register_to_ipa(int index)
ext_properties.prop[i].action,
ext_properties.prop[i].mux_id);
}
- ret = ipa3_register_intf_ext(ipa3_mux_channel[index].
+ ret = ipa3_register_intf_ext(rmnet_ipa3_ctx->mux_channel[index].
vchannel_name, &tx_properties,
&rx_properties, &ext_properties);
if (ret) {
IPAWANERR("[%s]:ipa3_register_intf failed %d\n",
- ipa3_mux_channel[index].vchannel_name, ret);
+ rmnet_ipa3_ctx->mux_channel[index].vchannel_name, ret);
goto fail;
}
- ipa3_mux_channel[index].ul_flt_reg = true;
+ rmnet_ipa3_ctx->mux_channel[index].ul_flt_reg = true;
fail:
kfree(ext_ioc_properties);
return ret;
@@ -852,21 +873,22 @@ static void ipa3_cleanup_deregister_intf(void)
int i;
int ret;
- for (i = 0; i < ipa3_rmnet_index; i++) {
- if (ipa3_mux_channel[i].ul_flt_reg) {
+ for (i = 0; i < rmnet_ipa3_ctx->rmnet_index; i++) {
+ if (rmnet_ipa3_ctx->mux_channel[i].ul_flt_reg) {
ret = ipa3_deregister_intf(
- ipa3_mux_channel[i].vchannel_name);
+ rmnet_ipa3_ctx->mux_channel[i].vchannel_name);
if (ret < 0) {
IPAWANERR("de-register device %s(%d) failed\n",
- ipa3_mux_channel[i].vchannel_name,
+ rmnet_ipa3_ctx->mux_channel[i].
+ vchannel_name,
i);
return;
}
IPAWANDBG("de-register device %s(%d) success\n",
- ipa3_mux_channel[i].vchannel_name,
+ rmnet_ipa3_ctx->mux_channel[i].vchannel_name,
i);
}
- ipa3_mux_channel[i].ul_flt_reg = false;
+ rmnet_ipa3_ctx->mux_channel[i].ul_flt_reg = false;
}
}
@@ -874,10 +896,10 @@ int ipa3_wwan_update_mux_channel_prop(void)
{
int ret = 0, i;
/* install UL filter rules */
- if (ipa3_egress_set) {
+ if (rmnet_ipa3_ctx->egress_set) {
if (ipa3_qmi_ctx->modem_cfg_emb_pipe_flt == false) {
IPAWANDBG("setup UL filter rules\n");
- if (ipa3_a7_ul_flt_set) {
+ if (rmnet_ipa3_ctx->a7_ul_flt_set) {
IPAWANDBG("del previous UL filter rules\n");
/* delete rule hdlers */
ret = ipa3_wwan_del_ul_flt_rule_to_ipa();
@@ -892,29 +914,29 @@ int ipa3_wwan_update_mux_channel_prop(void)
if (ret)
IPAWANERR("failed to install UL rules\n");
else
- ipa3_a7_ul_flt_set = true;
+ rmnet_ipa3_ctx->a7_ul_flt_set = true;
}
/* update Tx/Rx/Ext property */
IPAWANDBG("update Tx/Rx/Ext property in IPA\n");
- if (ipa3_rmnet_index == 0) {
+ if (rmnet_ipa3_ctx->rmnet_index == 0) {
IPAWANDBG("no Tx/Rx/Ext property registered in IPA\n");
return ret;
}
ipa3_cleanup_deregister_intf();
- for (i = 0; i < ipa3_rmnet_index; i++) {
+ for (i = 0; i < rmnet_ipa3_ctx->rmnet_index; i++) {
ret = ipa3_wwan_register_to_ipa(i);
if (ret < 0) {
IPAWANERR("failed to re-regist %s, mux %d, index %d\n",
- ipa3_mux_channel[i].vchannel_name,
- ipa3_mux_channel[i].mux_id,
+ rmnet_ipa3_ctx->mux_channel[i].vchannel_name,
+ rmnet_ipa3_ctx->mux_channel[i].mux_id,
i);
return -ENODEV;
}
IPAWANERR("dev(%s) has registered to IPA\n",
- ipa3_mux_channel[i].vchannel_name);
- ipa3_mux_channel[i].ul_flt_reg = true;
+ rmnet_ipa3_ctx->mux_channel[i].vchannel_name);
+ rmnet_ipa3_ctx->mux_channel[i].ul_flt_reg = true;
}
return ret;
}
@@ -1103,7 +1125,7 @@ static void apps_ipa_tx_complete_notify(void *priv,
return;
}
- if (dev != ipa3_netdevs[0]) {
+ if (dev != IPA_NETDEV()) {
IPAWANDBG("Received pre-SSR packet completion\n");
dev_kfree_skb_any(skb);
return;
@@ -1112,7 +1134,7 @@ static void apps_ipa_tx_complete_notify(void *priv,
wwan_ptr = netdev_priv(dev);
atomic_dec(&wwan_ptr->outstanding_pkts);
__netif_tx_lock_bh(netdev_get_tx_queue(dev, 0));
- if (!atomic_read(&is_ssr) &&
+ if (!atomic_read(&rmnet_ipa3_ctx->is_ssr) &&
netif_queue_stopped(wwan_ptr->net) &&
atomic_read(&wwan_ptr->outstanding_pkts) <
(wwan_ptr->outstanding_low)) {
@@ -1150,13 +1172,16 @@ static void apps_ipa_packet_receive_notify(void *priv,
return;
}
- skb->dev = ipa3_netdevs[0];
+ skb->dev = IPA_NETDEV();
skb->protocol = htons(ETH_P_MAP);
- if (dev->stats.rx_packets % IPA_WWAN_RX_SOFTIRQ_THRESH == 0)
+ if (dev->stats.rx_packets % IPA_WWAN_RX_SOFTIRQ_THRESH == 0) {
+ trace_rmnet_ipa_netifni3(dev->stats.rx_packets);
result = netif_rx_ni(skb);
- else
+ } else {
+ trace_rmnet_ipa_netifrx3(dev->stats.rx_packets);
result = netif_rx(skb);
+ }
if (result) {
pr_err_ratelimited(DEV_NAME " %s:%d fail on netif_rx\n",
@@ -1190,6 +1215,8 @@ static int ipa3_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
struct ipa_wan_msg *wan_msg = NULL;
struct rmnet_ioctl_extended_s extend_ioctl_data;
struct rmnet_ioctl_data_s ioctl_data;
+ struct ipa3_rmnet_mux_val *mux_channel;
+ int rmnet_index;
IPAWANDBG("rmnet_ipa got ioctl number 0x%08x", cmd);
switch (cmd) {
@@ -1338,7 +1365,7 @@ static int ipa3_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
/* Get driver name */
case RMNET_IOCTL_GET_DRIVER_NAME:
memcpy(&extend_ioctl_data.u.if_name,
- ipa3_netdevs[0]->name,
+ IPA_NETDEV()->name,
sizeof(IFNAMSIZ));
if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
&extend_ioctl_data,
@@ -1355,154 +1382,175 @@ static int ipa3_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
rmnet_mux_val.mux_id);
return rc;
}
+ if (rmnet_ipa3_ctx->rmnet_index
+ >= MAX_NUM_OF_MUX_CHANNEL) {
+ IPAWANERR("Exceed mux_channel limit(%d)\n",
+ rmnet_ipa3_ctx->rmnet_index);
+ return -EFAULT;
+ }
IPAWANDBG("ADD_MUX_CHANNEL(%d, name: %s)\n",
extend_ioctl_data.u.rmnet_mux_val.mux_id,
extend_ioctl_data.u.rmnet_mux_val.vchannel_name);
/* cache the mux name and id */
- ipa3_mux_channel[ipa3_rmnet_index].mux_id =
+ mux_channel = rmnet_ipa3_ctx->mux_channel;
+ rmnet_index = rmnet_ipa3_ctx->rmnet_index;
+
+ mux_channel[rmnet_index].mux_id =
extend_ioctl_data.u.rmnet_mux_val.mux_id;
- memcpy(ipa3_mux_channel[ipa3_rmnet_index].vchannel_name,
+ memcpy(mux_channel[rmnet_index].vchannel_name,
extend_ioctl_data.u.rmnet_mux_val.vchannel_name,
- sizeof(ipa3_mux_channel[ipa3_rmnet_index]
+ sizeof(mux_channel[rmnet_index]
.vchannel_name));
IPAWANDBG("cashe device[%s:%d] in IPA_wan[%d]\n",
- ipa3_mux_channel[ipa3_rmnet_index]
- .vchannel_name,
- ipa3_mux_channel[ipa3_rmnet_index].mux_id,
- ipa3_rmnet_index);
+ mux_channel[rmnet_index].vchannel_name,
+ mux_channel[rmnet_index].mux_id,
+ rmnet_index);
/* check if UL filter rules coming*/
- if (ipa3_num_q6_rule != 0) {
+ if (rmnet_ipa3_ctx->num_q6_rules != 0) {
IPAWANERR("dev(%s) register to IPA\n",
extend_ioctl_data.u.rmnet_mux_val.
vchannel_name);
rc = ipa3_wwan_register_to_ipa(
- ipa3_rmnet_index);
+ rmnet_ipa3_ctx->rmnet_index);
if (rc < 0) {
IPAWANERR("device %s reg IPA failed\n",
extend_ioctl_data.u.
rmnet_mux_val.vchannel_name);
return -ENODEV;
}
- ipa3_mux_channel[ipa3_rmnet_index].
- mux_channel_set = true;
- ipa3_mux_channel[ipa3_rmnet_index].
- ul_flt_reg = true;
+ mux_channel[rmnet_index].mux_channel_set = true;
+ mux_channel[rmnet_index].ul_flt_reg = true;
} else {
IPAWANDBG("dev(%s) haven't registered to IPA\n",
extend_ioctl_data.u.
rmnet_mux_val.vchannel_name);
- ipa3_mux_channel[ipa3_rmnet_index].
- mux_channel_set = true;
- ipa3_mux_channel[ipa3_rmnet_index].
- ul_flt_reg = false;
+ mux_channel[rmnet_index].mux_channel_set = true;
+ mux_channel[rmnet_index].ul_flt_reg = false;
}
- ipa3_rmnet_index++;
+ rmnet_ipa3_ctx->rmnet_index++;
break;
case RMNET_IOCTL_SET_EGRESS_DATA_FORMAT:
IPAWANDBG("get RMNET_IOCTL_SET_EGRESS_DATA_FORMAT\n");
if ((extend_ioctl_data.u.data) &
RMNET_IOCTL_EGRESS_FORMAT_CHECKSUM) {
- apps_to_ipa_ep_cfg.ipa_ep_cfg.hdr.hdr_len = 8;
- apps_to_ipa_ep_cfg.ipa_ep_cfg.cfg.
- cs_offload_en = 1;
- apps_to_ipa_ep_cfg.ipa_ep_cfg.cfg.
- cs_metadata_hdr_offset = 1;
+ rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.
+ ipa_ep_cfg.hdr.hdr_len = 8;
+ rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.
+ ipa_ep_cfg.cfg.cs_offload_en =
+ IPA_ENABLE_CS_OFFLOAD_UL;
+ rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.
+ ipa_ep_cfg.cfg.cs_metadata_hdr_offset
+ = 1;
} else {
- apps_to_ipa_ep_cfg.ipa_ep_cfg.hdr.hdr_len = 4;
+ rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.
+ ipa_ep_cfg.hdr.hdr_len = 4;
}
if ((extend_ioctl_data.u.data) &
RMNET_IOCTL_EGRESS_FORMAT_AGGREGATION)
- apps_to_ipa_ep_cfg.ipa_ep_cfg.aggr.aggr_en =
- IPA_ENABLE_AGGR;
+ rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.
+ ipa_ep_cfg.aggr.aggr_en =
+ IPA_ENABLE_AGGR;
else
- apps_to_ipa_ep_cfg.ipa_ep_cfg.aggr.aggr_en =
- IPA_BYPASS_AGGR;
- apps_to_ipa_ep_cfg.ipa_ep_cfg.hdr.
+ rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.
+ ipa_ep_cfg.aggr.aggr_en =
+ IPA_BYPASS_AGGR;
+ rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.ipa_ep_cfg.hdr.
hdr_ofst_metadata_valid = 1;
/* modem want offset at 0! */
- apps_to_ipa_ep_cfg.ipa_ep_cfg.hdr.hdr_ofst_metadata = 0;
- apps_to_ipa_ep_cfg.ipa_ep_cfg.mode.dst =
- IPA_CLIENT_APPS_LAN_WAN_PROD;
- apps_to_ipa_ep_cfg.ipa_ep_cfg.mode.mode = IPA_BASIC;
-
- apps_to_ipa_ep_cfg.client =
+ rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.ipa_ep_cfg.hdr.
+ hdr_ofst_metadata = 0;
+ rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.ipa_ep_cfg.mode.
+ dst = IPA_CLIENT_APPS_LAN_WAN_PROD;
+ rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.ipa_ep_cfg.mode.
+ mode = IPA_BASIC;
+
+ rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.client =
IPA_CLIENT_APPS_LAN_WAN_PROD;
- apps_to_ipa_ep_cfg.notify =
+ rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.notify =
apps_ipa_tx_complete_notify;
- apps_to_ipa_ep_cfg.desc_fifo_sz =
+ rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.desc_fifo_sz =
IPA_SYS_TX_DATA_DESC_FIFO_SZ;
- apps_to_ipa_ep_cfg.priv = dev;
+ rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.priv = dev;
- rc = ipa3_setup_sys_pipe(&apps_to_ipa_ep_cfg,
- &apps_to_ipa3_hdl);
+ rc = ipa3_setup_sys_pipe(
+ &rmnet_ipa3_ctx->apps_to_ipa_ep_cfg,
+ &rmnet_ipa3_ctx->apps_to_ipa3_hdl);
if (rc)
IPAWANERR("failed to config egress endpoint\n");
- if (ipa3_num_q6_rule != 0) {
+ if (rmnet_ipa3_ctx->num_q6_rules != 0) {
/* already got Q6 UL filter rules*/
if (ipa3_qmi_ctx->modem_cfg_emb_pipe_flt
== false)
rc = ipa3_wwan_add_ul_flt_rule_to_ipa();
else
rc = 0;
- ipa3_egress_set = true;
+ rmnet_ipa3_ctx->egress_set = true;
if (rc)
IPAWANERR("install UL rules failed\n");
else
- ipa3_a7_ul_flt_set = true;
+ rmnet_ipa3_ctx->a7_ul_flt_set = true;
} else {
/* wait Q6 UL filter rules*/
- ipa3_egress_set = true;
- IPAWANDBG("no UL-rules, ipa3_egress_set(%d)\n",
- ipa3_egress_set);
+ rmnet_ipa3_ctx->egress_set = true;
+ IPAWANDBG("no UL-rules, egress_set(%d)\n",
+ rmnet_ipa3_ctx->egress_set);
}
break;
case RMNET_IOCTL_SET_INGRESS_DATA_FORMAT:/* Set IDF */
IPAWANDBG("get RMNET_IOCTL_SET_INGRESS_DATA_FORMAT\n");
if ((extend_ioctl_data.u.data) &
RMNET_IOCTL_INGRESS_FORMAT_CHECKSUM)
- ipa_to_apps_ep_cfg.ipa_ep_cfg.cfg.
- cs_offload_en = 2;
+ rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.
+ ipa_ep_cfg.cfg.cs_offload_en =
+ IPA_ENABLE_CS_OFFLOAD_DL;
- ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.hdr_len = 4;
- ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.
+ rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.
+ hdr_len = 4;
+ rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.
hdr_ofst_metadata_valid = 1;
- ipa_to_apps_ep_cfg.ipa_ep_cfg.
+ rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.ipa_ep_cfg.
hdr.hdr_ofst_metadata = 1;
- ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.
+ rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.
hdr_ofst_pkt_size_valid = 1;
- ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.
+ rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.
hdr_ofst_pkt_size = 2;
- ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.
+ rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.
hdr_total_len_or_pad_valid = true;
- ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.
+ rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.
hdr_total_len_or_pad = 0;
- ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.
+ rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.
hdr_payload_len_inc_padding = true;
- ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.
+ rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.
hdr_total_len_or_pad_offset = 0;
- ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.
+ rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.
hdr_little_endian = 0;
- ipa_to_apps_ep_cfg.ipa_ep_cfg.metadata_mask.
- metadata_mask = 0xFF000000;
+ rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.ipa_ep_cfg.
+ metadata_mask.metadata_mask = 0xFF000000;
- ipa_to_apps_ep_cfg.client = IPA_CLIENT_APPS_WAN_CONS;
- ipa_to_apps_ep_cfg.notify =
+ rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.client =
+ IPA_CLIENT_APPS_WAN_CONS;
+ rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.notify =
apps_ipa_packet_receive_notify;
- ipa_to_apps_ep_cfg.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
- ipa_to_apps_ep_cfg.priv = dev;
+ rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.desc_fifo_sz =
+ IPA_SYS_DESC_FIFO_SZ;
+ rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.priv = dev;
- mutex_lock(&ipa_to_apps_pipe_handle_guard);
- if (atomic_read(&is_ssr)) {
+ mutex_lock(
+ &rmnet_ipa3_ctx->ipa_to_apps_pipe_handle_guard);
+ if (atomic_read(&rmnet_ipa3_ctx->is_ssr)) {
IPAWANDBG("In SSR sequence/recovery\n");
- mutex_unlock(&ipa_to_apps_pipe_handle_guard);
+ mutex_unlock(&rmnet_ipa3_ctx->
+ ipa_to_apps_pipe_handle_guard);
rc = -EFAULT;
break;
}
rc = ipa3_setup_sys_pipe(
- &ipa_to_apps_ep_cfg, &ipa3_to_apps_hdl);
- mutex_unlock(&ipa_to_apps_pipe_handle_guard);
+ &rmnet_ipa3_ctx->ipa_to_apps_ep_cfg,
+ &rmnet_ipa3_ctx->ipa3_to_apps_hdl);
+ mutex_unlock(&rmnet_ipa3_ctx->
+ ipa_to_apps_pipe_handle_guard);
if (rc)
IPAWANERR("failed to configure ingress\n");
break;
@@ -1626,7 +1674,7 @@ static void ipa3_q6_prod_rm_request_resource(struct work_struct *work)
static int ipa3_q6_rm_request_resource(void)
{
- queue_delayed_work(ipa_rm_q6_workqueue,
+ queue_delayed_work(rmnet_ipa3_ctx->rm_q6_wq,
&ipa3_q6_con_rm_request, 0);
return 0;
}
@@ -1646,7 +1694,7 @@ static void ipa3_q6_prod_rm_release_resource(struct work_struct *work)
static int ipa3_q6_rm_release_resource(void)
{
- queue_delayed_work(ipa_rm_q6_workqueue,
+ queue_delayed_work(rmnet_ipa3_ctx->rm_q6_wq,
&ipa3_q6_con_rm_release, 0);
return 0;
}
@@ -1674,8 +1722,8 @@ static int ipa3_q6_initialize_rm(void)
int result;
/* Initialize IPA_RM workqueue */
- ipa_rm_q6_workqueue = create_singlethread_workqueue("clnt_req");
- if (!ipa_rm_q6_workqueue)
+ rmnet_ipa3_ctx->rm_q6_wq = create_singlethread_workqueue("clnt_req");
+ if (!rmnet_ipa3_ctx->rm_q6_wq)
return -ENOMEM;
memset(&create_params, 0, sizeof(create_params));
@@ -1723,7 +1771,7 @@ create_rsrc_err2:
IPAWANERR("Error deleting resource %d, ret=%d\n",
IPA_RM_RESOURCE_Q6_PROD, result);
create_rsrc_err1:
- destroy_workqueue(ipa_rm_q6_workqueue);
+ destroy_workqueue(rmnet_ipa3_ctx->rm_q6_wq);
return result;
}
@@ -1745,15 +1793,15 @@ void ipa3_q6_deinitialize_rm(void)
if (ret < 0)
IPAWANERR("Error deleting resource %d, ret=%d\n",
IPA_RM_RESOURCE_Q6_PROD, ret);
- destroy_workqueue(ipa_rm_q6_workqueue);
+ destroy_workqueue(rmnet_ipa3_ctx->rm_q6_wq);
}
static void ipa3_wake_tx_queue(struct work_struct *work)
{
- if (ipa3_netdevs[0]) {
- __netif_tx_lock_bh(netdev_get_tx_queue(ipa3_netdevs[0], 0));
- netif_wake_queue(ipa3_netdevs[0]);
- __netif_tx_unlock_bh(netdev_get_tx_queue(ipa3_netdevs[0], 0));
+ if (IPA_NETDEV()) {
+ __netif_tx_lock_bh(netdev_get_tx_queue(IPA_NETDEV(), 0));
+ netif_wake_queue(IPA_NETDEV());
+ __netif_tx_unlock_bh(netdev_get_tx_queue(IPA_NETDEV(), 0));
}
}
@@ -1874,7 +1922,6 @@ static int ipa3_wwan_probe(struct platform_device *pdev)
{
int ret, i;
struct net_device *dev;
- struct ipa3_wwan_private *wwan_ptr;
struct ipa_rm_create_params ipa_rm_params; /* IPA_RM */
struct ipa_rm_perf_profile profile; /* IPA_RM */
@@ -1905,17 +1952,19 @@ static int ipa3_wwan_probe(struct platform_device *pdev)
}
/* initialize tx/rx endpoint setup */
- memset(&apps_to_ipa_ep_cfg, 0, sizeof(struct ipa_sys_connect_params));
- memset(&ipa_to_apps_ep_cfg, 0, sizeof(struct ipa_sys_connect_params));
+ memset(&rmnet_ipa3_ctx->apps_to_ipa_ep_cfg, 0,
+ sizeof(struct ipa_sys_connect_params));
+ memset(&rmnet_ipa3_ctx->ipa_to_apps_ep_cfg, 0,
+ sizeof(struct ipa_sys_connect_params));
/* initialize ex property setup */
- ipa3_num_q6_rule = 0;
- ipa3_old_num_q6_rule = 0;
- ipa3_rmnet_index = 0;
- ipa3_egress_set = false;
- ipa3_a7_ul_flt_set = false;
+ rmnet_ipa3_ctx->num_q6_rules = 0;
+ rmnet_ipa3_ctx->old_num_q6_rules = 0;
+ rmnet_ipa3_ctx->rmnet_index = 0;
+ rmnet_ipa3_ctx->egress_set = false;
+ rmnet_ipa3_ctx->a7_ul_flt_set = false;
for (i = 0; i < MAX_NUM_OF_MUX_CHANNEL; i++)
- memset(&ipa3_mux_channel[i], 0,
+ memset(&rmnet_ipa3_ctx->mux_channel[i], 0,
sizeof(struct ipa3_rmnet_mux_val));
/* start A7 QMI service/client */
@@ -1934,7 +1983,7 @@ static int ipa3_wwan_probe(struct platform_device *pdev)
if (ret)
goto setup_dflt_wan_rt_tables_err;
- if (!atomic_read(&is_ssr)) {
+ if (!atomic_read(&rmnet_ipa3_ctx->is_ssr)) {
/* Start transport-driver fd ioctl for ipacm for first init */
ret = ipa3_wan_ioctl_init();
if (ret)
@@ -1954,18 +2003,19 @@ static int ipa3_wwan_probe(struct platform_device *pdev)
ret = -ENOMEM;
goto alloc_netdev_err;
}
- ipa3_netdevs[0] = dev;
- wwan_ptr = netdev_priv(dev);
- memset(wwan_ptr, 0, sizeof(*wwan_ptr));
- IPAWANDBG("wwan_ptr (private) = %p", wwan_ptr);
- wwan_ptr->net = dev;
- wwan_ptr->outstanding_high = DEFAULT_OUTSTANDING_HIGH;
- wwan_ptr->outstanding_low = DEFAULT_OUTSTANDING_LOW;
- atomic_set(&wwan_ptr->outstanding_pkts, 0);
- spin_lock_init(&wwan_ptr->lock);
- init_completion(&wwan_ptr->resource_granted_completion);
-
- if (!atomic_read(&is_ssr)) {
+ rmnet_ipa3_ctx->wwan_priv = netdev_priv(dev);
+ memset(rmnet_ipa3_ctx->wwan_priv, 0,
+ sizeof(*(rmnet_ipa3_ctx->wwan_priv)));
+ IPAWANDBG("wwan_ptr (private) = %p", rmnet_ipa3_ctx->wwan_priv);
+ rmnet_ipa3_ctx->wwan_priv->net = dev;
+ rmnet_ipa3_ctx->wwan_priv->outstanding_high = DEFAULT_OUTSTANDING_HIGH;
+ rmnet_ipa3_ctx->wwan_priv->outstanding_low = DEFAULT_OUTSTANDING_LOW;
+ atomic_set(&rmnet_ipa3_ctx->wwan_priv->outstanding_pkts, 0);
+ spin_lock_init(&rmnet_ipa3_ctx->wwan_priv->lock);
+ init_completion(
+ &rmnet_ipa3_ctx->wwan_priv->resource_granted_completion);
+
+ if (!atomic_read(&rmnet_ipa3_ctx->is_ssr)) {
/* IPA_RM configuration starts */
ret = ipa3_q6_initialize_rm();
if (ret) {
@@ -2013,24 +2063,23 @@ static int ipa3_wwan_probe(struct platform_device *pdev)
goto set_perf_err;
}
- IPAWANDBG("IPA-WWAN devices (%s) initialization ok :>>>>\n",
- ipa3_netdevs[0]->name);
+ IPAWANDBG("IPA-WWAN devices (%s) initialization ok :>>>>\n", dev->name);
if (ret) {
IPAWANERR("default configuration failed rc=%d\n",
ret);
goto config_err;
}
- atomic_set(&is_initialized, 1);
- if (!atomic_read(&is_ssr)) {
+ atomic_set(&rmnet_ipa3_ctx->is_initialized, 1);
+ if (!atomic_read(&rmnet_ipa3_ctx->is_ssr)) {
/* offline charging mode */
ipa3_proxy_clk_unvote();
}
- atomic_set(&is_ssr, 0);
+ atomic_set(&rmnet_ipa3_ctx->is_ssr, 0);
pr_info("rmnet_ipa completed initialization\n");
return 0;
config_err:
- unregister_netdev(ipa3_netdevs[0]);
+ unregister_netdev(dev);
set_perf_err:
ret = ipa3_rm_delete_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
IPA_RM_RESOURCE_Q6_CONS);
@@ -2052,8 +2101,8 @@ timer_init_err:
create_rsrc_err:
ipa3_q6_deinitialize_rm();
q6_init_err:
- free_netdev(ipa3_netdevs[0]);
- ipa3_netdevs[0] = NULL;
+ free_netdev(dev);
+ rmnet_ipa3_ctx->wwan_priv = NULL;
alloc_netdev_err:
ipa3_wan_ioctl_deinit();
wan_ioctl_init_err:
@@ -2062,7 +2111,7 @@ setup_dflt_wan_rt_tables_err:
ipa3_del_a7_qmap_hdr();
setup_a7_qmap_hdr_err:
ipa3_qmi_service_exit();
- atomic_set(&is_ssr, 0);
+ atomic_set(&rmnet_ipa3_ctx->is_ssr, 0);
return ret;
}
@@ -2071,14 +2120,14 @@ static int ipa3_wwan_remove(struct platform_device *pdev)
int ret;
pr_info("rmnet_ipa started deinitialization\n");
- mutex_lock(&ipa_to_apps_pipe_handle_guard);
- ret = ipa3_teardown_sys_pipe(ipa3_to_apps_hdl);
+ mutex_lock(&rmnet_ipa3_ctx->ipa_to_apps_pipe_handle_guard);
+ ret = ipa3_teardown_sys_pipe(rmnet_ipa3_ctx->ipa3_to_apps_hdl);
if (ret < 0)
IPAWANERR("Failed to teardown IPA->APPS pipe\n");
else
- ipa3_to_apps_hdl = -1;
- mutex_unlock(&ipa_to_apps_pipe_handle_guard);
- unregister_netdev(ipa3_netdevs[0]);
+ rmnet_ipa3_ctx->ipa3_to_apps_hdl = -1;
+ mutex_unlock(&rmnet_ipa3_ctx->ipa_to_apps_pipe_handle_guard);
+ unregister_netdev(IPA_NETDEV());
ret = ipa3_rm_delete_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
IPA_RM_RESOURCE_Q6_CONS);
if (ret < 0)
@@ -2096,20 +2145,18 @@ static int ipa3_wwan_remove(struct platform_device *pdev)
IPA_RM_RESOURCE_WWAN_0_PROD, ret);
cancel_work_sync(&ipa3_tx_wakequeue_work);
cancel_delayed_work(&ipa_tether_stats_poll_wakequeue_work);
- free_netdev(ipa3_netdevs[0]);
- ipa3_netdevs[0] = NULL;
+ free_netdev(IPA_NETDEV());
+ rmnet_ipa3_ctx->wwan_priv = NULL;
/* No need to remove wwan_ioctl during SSR */
- if (!atomic_read(&is_ssr))
+ if (!atomic_read(&rmnet_ipa3_ctx->is_ssr))
ipa3_wan_ioctl_deinit();
ipa3_del_dflt_wan_rt_tables();
ipa3_del_a7_qmap_hdr();
ipa3_del_mux_qmap_hdrs();
if (ipa3_qmi_ctx->modem_cfg_emb_pipe_flt == false)
ipa3_wwan_del_ul_flt_rule_to_ipa();
- /* clean up cached QMI msg/handlers */
- ipa3_qmi_service_exit();
ipa3_cleanup_deregister_intf();
- atomic_set(&is_initialized, 0);
+ atomic_set(&rmnet_ipa3_ctx->is_initialized, 0);
pr_info("rmnet_ipa completed deinitialization\n");
return 0;
}
@@ -2131,7 +2178,7 @@ static int ipa3_wwan_remove(struct platform_device *pdev)
*/
static int rmnet_ipa_ap_suspend(struct device *dev)
{
- struct net_device *netdev = ipa3_netdevs[0];
+ struct net_device *netdev = IPA_NETDEV();
struct ipa3_wwan_private *wwan_ptr = netdev_priv(netdev);
IPAWANDBG("Enter...\n");
@@ -2162,7 +2209,7 @@ static int rmnet_ipa_ap_suspend(struct device *dev)
*/
static int rmnet_ipa_ap_resume(struct device *dev)
{
- struct net_device *netdev = ipa3_netdevs[0];
+ struct net_device *netdev = IPA_NETDEV();
IPAWANDBG("Enter...\n");
netif_wake_queue(netdev);
@@ -2206,35 +2253,38 @@ static int ipa3_ssr_notifier_cb(struct notifier_block *this,
if (ipa3_rmnet_ctx.ipa_rmnet_ssr) {
if (SUBSYS_BEFORE_SHUTDOWN == code) {
pr_info("IPA received MPSS BEFORE_SHUTDOWN\n");
- atomic_set(&is_ssr, 1);
+ atomic_set(&rmnet_ipa3_ctx->is_ssr, 1);
ipa3_q6_cleanup();
- netif_stop_queue(ipa3_netdevs[0]);
+ netif_stop_queue(IPA_NETDEV());
ipa3_qmi_stop_workqueues();
ipa3_wan_ioctl_stop_qmi_messages();
ipa_stop_polling_stats();
- atomic_set(&is_ssr, 1);
- if (atomic_read(&is_initialized))
+ atomic_set(&rmnet_ipa3_ctx->is_ssr, 1);
+ if (atomic_read(&rmnet_ipa3_ctx->is_initialized))
platform_driver_unregister(&rmnet_ipa_driver);
pr_info("IPA BEFORE_SHUTDOWN handling is complete\n");
return NOTIFY_DONE;
}
if (SUBSYS_AFTER_SHUTDOWN == code) {
pr_info("IPA received MPSS AFTER_SHUTDOWN\n");
- if (atomic_read(&is_ssr))
+ if (atomic_read(&rmnet_ipa3_ctx->is_ssr))
ipa3_q6_pipe_reset();
pr_info("IPA AFTER_SHUTDOWN handling is complete\n");
return NOTIFY_DONE;
}
if (SUBSYS_AFTER_POWERUP == code) {
pr_info("IPA received MPSS AFTER_POWERUP\n");
- if (!atomic_read(&is_initialized)
- && atomic_read(&is_ssr))
+ if (!atomic_read(&rmnet_ipa3_ctx->is_initialized)
+ && atomic_read(&rmnet_ipa3_ctx->is_ssr))
platform_driver_register(&rmnet_ipa_driver);
pr_info("IPA AFTER_POWERUP handling is complete\n");
return NOTIFY_DONE;
}
if (SUBSYS_BEFORE_POWERUP == code) {
pr_info("IPA received MPSS BEFORE_POWERUP\n");
+ if (atomic_read(&rmnet_ipa3_ctx->is_ssr))
+ /* clean up cached QMI msg/handlers */
+ ipa3_qmi_service_exit();
ipa3_proxy_clk_vote();
pr_info("IPA BEFORE_POWERUP handling is complete\n");
return NOTIFY_DONE;
@@ -2427,7 +2477,7 @@ int rmnet_ipa3_set_data_quota(struct wan_ioctl_set_data_quota *data)
return -EFAULT;
}
- mux_id = ipa3_mux_channel[index].mux_id;
+ mux_id = rmnet_ipa3_ctx->mux_channel[index].mux_id;
ipa3_rmnet_ctx.metered_mux_id = mux_id;
memset(&req, 0, sizeof(struct ipa_set_data_usage_quota_req_msg_v01));
@@ -2497,14 +2547,14 @@ int rmnet_ipa3_query_tethering_stats(struct wan_ioctl_query_tether_stats *data,
GFP_KERNEL);
if (!req) {
IPAWANERR("Can't allocate memory for stats message\n");
- return rc;
+ return -ENOMEM;
}
resp = kzalloc(sizeof(struct ipa_get_data_stats_resp_msg_v01),
GFP_KERNEL);
if (!resp) {
IPAWANERR("Can't allocate memory for stats message\n");
kfree(req);
- return rc;
+ return -ENOMEM;
}
memset(req, 0, sizeof(struct ipa_get_data_stats_req_msg_v01));
memset(resp, 0, sizeof(struct ipa_get_data_stats_resp_msg_v01));
@@ -2666,14 +2716,14 @@ void ipa3_broadcast_quota_reach_ind(u32 mux_id)
}
/* posting msg for L-release for CNE */
res = snprintf(iface_name_l, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE,
- "UPSTREAM=%s", ipa3_mux_channel[index].vchannel_name);
+ "UPSTREAM=%s", rmnet_ipa3_ctx->mux_channel[index].vchannel_name);
if (IPA_QUOTA_REACH_IF_NAME_MAX_SIZE <= res) {
IPAWANERR("message too long (%d)", res);
return;
}
/* posting msg for M-release for CNE */
res = snprintf(iface_name_m, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE,
- "INTERFACE=%s", ipa3_mux_channel[index].vchannel_name);
+ "INTERFACE=%s", rmnet_ipa3_ctx->mux_channel[index].vchannel_name);
if (IPA_QUOTA_REACH_IF_NAME_MAX_SIZE <= res) {
IPAWANERR("message too long (%d)", res);
return;
@@ -2681,7 +2731,8 @@ void ipa3_broadcast_quota_reach_ind(u32 mux_id)
IPAWANERR("putting nlmsg: <%s> <%s> <%s>\n",
alert_msg, iface_name_l, iface_name_m);
- kobject_uevent_env(&(ipa3_netdevs[0]->dev.kobj), KOBJ_CHANGE, envp);
+ kobject_uevent_env(&(IPA_NETDEV()->dev.kobj),
+ KOBJ_CHANGE, envp);
}
/**
@@ -2716,32 +2767,40 @@ void ipa3_q6_handshake_complete(bool ssr_bootup)
static int __init ipa3_wwan_init(void)
{
- atomic_set(&is_initialized, 0);
- atomic_set(&is_ssr, 0);
+ rmnet_ipa3_ctx = kzalloc(sizeof(*rmnet_ipa3_ctx), GFP_KERNEL);
+ if (!rmnet_ipa3_ctx) {
+ IPAWANERR("no memory\n");
+ return -ENOMEM;
+ }
- mutex_init(&ipa_to_apps_pipe_handle_guard);
- ipa3_to_apps_hdl = -1;
+ atomic_set(&rmnet_ipa3_ctx->is_initialized, 0);
+ atomic_set(&rmnet_ipa3_ctx->is_ssr, 0);
+
+ mutex_init(&rmnet_ipa3_ctx->ipa_to_apps_pipe_handle_guard);
+ rmnet_ipa3_ctx->ipa3_to_apps_hdl = -1;
/* Register for Modem SSR */
- ipa3_subsys_notify_handle = subsys_notif_register_notifier(
+ rmnet_ipa3_ctx->subsys_notify_handle = subsys_notif_register_notifier(
SUBSYS_MODEM,
&ipa3_ssr_notifier);
- if (!IS_ERR(ipa3_subsys_notify_handle))
+ if (!IS_ERR(rmnet_ipa3_ctx->subsys_notify_handle))
return platform_driver_register(&rmnet_ipa_driver);
else
- return (int)PTR_ERR(ipa3_subsys_notify_handle);
- }
+ return (int)PTR_ERR(rmnet_ipa3_ctx->subsys_notify_handle);
+}
static void __exit ipa3_wwan_cleanup(void)
{
int ret;
- mutex_destroy(&ipa_to_apps_pipe_handle_guard);
- ret = subsys_notif_unregister_notifier(ipa3_subsys_notify_handle,
- &ipa3_ssr_notifier);
+ mutex_destroy(&rmnet_ipa3_ctx->ipa_to_apps_pipe_handle_guard);
+ ret = subsys_notif_unregister_notifier(
+ rmnet_ipa3_ctx->subsys_notify_handle, &ipa3_ssr_notifier);
if (ret)
IPAWANERR(
"Error subsys_notif_unregister_notifier system %s, ret=%d\n",
SUBSYS_MODEM, ret);
platform_driver_unregister(&rmnet_ipa_driver);
+ kfree(rmnet_ipa3_ctx);
+ rmnet_ipa3_ctx = NULL;
}
static void ipa3_wwan_msg_free_cb(void *buff, u32 len, u32 type)
diff --git a/drivers/platform/msm/ipa/ipa_v3/teth_bridge.c b/drivers/platform/msm/ipa/ipa_v3/teth_bridge.c
index 3bb07043a91b..b629ec740b1e 100644
--- a/drivers/platform/msm/ipa/ipa_v3/teth_bridge.c
+++ b/drivers/platform/msm/ipa/ipa_v3/teth_bridge.c
@@ -98,8 +98,6 @@ static void teth_bridge_ipa_cb(void *priv, enum ipa_dp_evt_type evt,
*/
int ipa3_teth_bridge_init(struct teth_bridge_init_params *params)
{
- int res = 0;
-
TETH_DBG_FUNC_ENTRY();
if (!params) {
@@ -112,28 +110,8 @@ int ipa3_teth_bridge_init(struct teth_bridge_init_params *params)
params->private_data = NULL;
params->skip_ep_cfg = true;
- /* Build dependency graph */
- res = ipa3_rm_add_dependency(IPA_RM_RESOURCE_USB_PROD,
- IPA_RM_RESOURCE_Q6_CONS);
- if (res < 0 && res != -EINPROGRESS) {
- TETH_ERR("ipa3_rm_add_dependency() failed.\n");
- goto bail;
- }
- res = ipa3_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD,
- IPA_RM_RESOURCE_USB_CONS);
- if (res < 0 && res != -EINPROGRESS) {
- ipa3_rm_delete_dependency(IPA_RM_RESOURCE_USB_PROD,
- IPA_RM_RESOURCE_Q6_CONS);
- TETH_ERR("ipa3_rm_add_dependency() failed.\n");
- goto bail;
- }
-
- res = 0;
- goto bail;
-
-bail:
TETH_DBG_FUNC_EXIT();
- return res;
+ return 0;
}
/**
@@ -162,7 +140,40 @@ int ipa3_teth_bridge_disconnect(enum ipa_client_type client)
*/
int ipa3_teth_bridge_connect(struct teth_bridge_connect_params *connect_params)
{
- return 0;
+ int res = 0;
+
+ TETH_DBG_FUNC_ENTRY();
+
+ /* Build the dependency graph, first add_dependency call is sync
+ * in order to make sure the IPA clocks are up before we continue
+ * and notify the USB driver it may continue.
+ */
+ res = ipa3_rm_add_dependency_sync(IPA_RM_RESOURCE_USB_PROD,
+ IPA_RM_RESOURCE_Q6_CONS);
+ if (res < 0) {
+ TETH_ERR("ipa3_rm_add_dependency() failed.\n");
+ goto bail;
+ }
+
+ /* this add_dependency call can't be sync since it will block until USB
+ * status is connected (which can happen only after the tethering
+ * bridge is connected), the clocks are already up so the call doesn't
+ * need to block.
+ */
+ res = ipa3_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD,
+ IPA_RM_RESOURCE_USB_CONS);
+ if (res < 0 && res != -EINPROGRESS) {
+ ipa3_rm_delete_dependency(IPA_RM_RESOURCE_USB_PROD,
+ IPA_RM_RESOURCE_Q6_CONS);
+ TETH_ERR("ipa3_rm_add_dependency() failed.\n");
+ goto bail;
+ }
+
+ res = 0;
+
+bail:
+ TETH_DBG_FUNC_EXIT();
+ return res;
}
static long ipa3_teth_bridge_ioctl(struct file *filp,
diff --git a/include/linux/ipa.h b/include/linux/ipa.h
index 0c618d31d4ce..a1e32aa775a9 100644
--- a/include/linux/ipa.h
+++ b/include/linux/ipa.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -80,8 +80,8 @@ enum ipa_aggr_type {
* enum ipa_aggr_mode - global aggregation mode
*/
enum ipa_aggr_mode {
- IPA_MBIM,
- IPA_QCNCM,
+ IPA_MBIM_AGGR,
+ IPA_QCNCM_AGGR,
};
/**
@@ -239,6 +239,13 @@ struct ipa_ep_cfg_mode {
* aggregation closure. Valid for Output Pipes only (IPA
* Producer). EOF affects only Pipes configured for
* generic aggregation.
+ * @aggr_hard_byte_limit_en: If set to 1, byte-limit aggregation for this
+ * pipe will apply a hard-limit behavior which will not
+ * allow frames to be closed with more than byte-limit
+ * bytes. If set to 0, previous byte-limit behavior
+ * will apply - frames close once a packet causes the
+ * accumulated byte-count to cross the byte-limit
+ * threshold (closed frame will contain that packet).
*/
struct ipa_ep_cfg_aggr {
enum ipa_aggr_en_type aggr_en;
@@ -246,6 +253,7 @@ struct ipa_ep_cfg_aggr {
u32 aggr_byte_limit;
u32 aggr_time_limit;
u32 aggr_pkt_limit;
+ u32 aggr_hard_byte_limit_en;
};
/**
@@ -323,11 +331,16 @@ enum ipa_cs_offload {
* checksum meta info header (4 bytes) starts (UL). Values are 0-15, which
* mean 0 - 60 byte checksum header offset. Valid for input
* pipes only (IPA consumer)
+ * @gen_qmb_master_sel: Select bit for ENDP GEN-QMB master. This is used to
+ * separate DDR & PCIe transactions in-order to limit them as
+ * a group (using MAX_WRITES/READS limiation). Valid for input and
+ * output pipes (IPA consumer+producer)
*/
struct ipa_ep_cfg_cfg {
bool frag_offload_en;
enum ipa_cs_offload cs_offload_en;
u8 cs_metadata_hdr_offset;
+ u8 gen_qmb_master_sel;
};
/**
@@ -1130,151 +1143,6 @@ struct ipa_gsi_ep_config {
int ee;
};
-enum ipa_usb_teth_prot {
- IPA_USB_RNDIS = 0,
- IPA_USB_ECM = 1,
- IPA_USB_RMNET = 2,
- IPA_USB_MBIM = 3,
- IPA_USB_DIAG = 4,
- IPA_USB_MAX_TETH_PROT_SIZE
-};
-
-/**
- * ipa_usb_teth_params - parameters for RDNIS/ECM initialization API
- *
- * @host_ethaddr: host Ethernet address in network order
- * @device_ethaddr: device Ethernet address in network order
- */
-struct ipa_usb_teth_params {
- u8 host_ethaddr[ETH_ALEN];
- u8 device_ethaddr[ETH_ALEN];
-};
-
-enum ipa_usb_notify_event {
- IPA_USB_DEVICE_READY,
- IPA_USB_REMOTE_WAKEUP,
- IPA_USB_SUSPEND_COMPLETED
-};
-
-enum ipa_usb_max_usb_packet_size {
- IPA_USB_HIGH_SPEED_512B = 512,
- IPA_USB_SUPER_SPEED_1024B = 1024
-};
-
-/**
- * ipa_usb_xdci_chan_scratch - xDCI protocol SW config area of
- * channel scratch
- *
- * @last_trb_addr: Address (LSB - based on alignment restrictions) of
- * last TRB in queue. Used to identify roll over case
- * @const_buffer_size: TRB buffer size in KB (similar to IPA aggregation
- * configuration). Must be aligned to max USB Packet Size.
- * Should be 1 <= const_buffer_size <= 31.
- * @depcmd_low_addr: Used to generate "Update Transfer" command
- * @depcmd_hi_addr: Used to generate "Update Transfer" command.
- */
-struct ipa_usb_xdci_chan_scratch {
- u16 last_trb_addr;
- u8 const_buffer_size;
- u32 depcmd_low_addr;
- u8 depcmd_hi_addr;
-};
-
-/**
- * ipa_usb_xdci_chan_params - xDCI channel related properties
- *
- * @client: type of "client"
- * @ipa_ep_cfg: IPA EP configuration
- * @keep_ipa_awake: when true, IPA will not be clock gated
- * @teth_prot: tethering protocol for which the channel is created
- * @gevntcount_low_addr: GEVNCOUNT low address for event scratch
- * @gevntcount_hi_addr: GEVNCOUNT high address for event scratch
- * @dir: channel direction
- * @xfer_ring_len: length of transfer ring in bytes (must be integral
- * multiple of transfer element size - 16B for xDCI)
- * @xfer_ring_base_addr: physical base address of transfer ring. Address must be
- * aligned to xfer_ring_len rounded to power of two
- * @xfer_scratch: parameters for xDCI channel scratch
- *
- */
-struct ipa_usb_xdci_chan_params {
- /* IPA EP params */
- enum ipa_client_type client;
- struct ipa_ep_cfg ipa_ep_cfg;
- bool keep_ipa_awake;
- enum ipa_usb_teth_prot teth_prot;
- /* event ring params */
- u32 gevntcount_low_addr;
- u8 gevntcount_hi_addr;
- /* transfer ring params */
- enum gsi_chan_dir dir;
- u16 xfer_ring_len;
- u64 xfer_ring_base_addr;
- struct ipa_usb_xdci_chan_scratch xfer_scratch;
-};
-
-/**
- * ipa_usb_chan_out_params - out parameters for channel request
- *
- * @clnt_hdl: opaque client handle assigned by IPA to client
- * @db_reg_phs_addr_lsb: Physical address of doorbell register where the 32
- * LSBs of the doorbell value should be written
- * @db_reg_phs_addr_msb: Physical address of doorbell register where the 32
- * MSBs of the doorbell value should be written
- *
- */
-struct ipa_req_chan_out_params {
- u32 clnt_hdl;
- u32 db_reg_phs_addr_lsb;
- u32 db_reg_phs_addr_msb;
-};
-
-/**
- * ipa_usb_teth_prot_params - parameters for connecting RNDIS
- *
- * @max_xfer_size_bytes_to_dev: max size of UL packets in bytes
- * @max_packet_number_to_dev: max number of UL aggregated packets
- * @max_xfer_size_bytes_to_host: max size of DL packets in bytes
- *
- */
-struct ipa_usb_teth_prot_params {
- u32 max_xfer_size_bytes_to_dev;
- u32 max_packet_number_to_dev;
- u32 max_xfer_size_bytes_to_host;
-};
-
-/**
- * ipa_usb_xdci_connect_params - parameters required to start IN, OUT
- * channels, and connect RNDIS/ECM/teth_bridge
- *
- * @max_pkt_size: high speed or full speed
- * @ipa_to_usb_xferrscidx: Transfer Resource Index (XferRscIdx) for IN channel.
- * The hardware-assigned transfer resource index for the
- * transfer, which was returned in response to the
- * Start Transfer command. This field is used for
- * "Update Transfer" command.
- * Should be 0 =< ipa_to_usb_xferrscidx <= 127.
- * @ipa_to_usb_xferrscidx_valid: true if xferRscIdx should be updated for IN
- * channel
- * @usb_to_ipa_xferrscidx: Transfer Resource Index (XferRscIdx) for OUT channel
- * Should be 0 =< usb_to_ipa_xferrscidx <= 127.
- * @usb_to_ipa_xferrscidx_valid: true if xferRscIdx should be updated for OUT
- * channel
- * @teth_prot: tethering protocol
- * @teth_prot_params: parameters for connecting the tethering protocol.
- * @max_supported_bandwidth_mbps: maximum bandwidth need of the client in Mbps
- */
-struct ipa_usb_xdci_connect_params {
- enum ipa_usb_max_usb_packet_size max_pkt_size;
- u8 ipa_to_usb_xferrscidx;
- bool ipa_to_usb_xferrscidx_valid;
- u8 usb_to_ipa_xferrscidx;
- bool usb_to_ipa_xferrscidx_valid;
- enum ipa_usb_teth_prot teth_prot;
- struct ipa_usb_teth_prot_params teth_prot_params;
- u32 max_supported_bandwidth_mbps;
-};
-
#if defined CONFIG_IPA || defined CONFIG_IPA3
/*
@@ -1465,6 +1333,10 @@ int ipa_uc_wdi_get_dbpa(struct ipa_wdi_db_params *out);
* if uC not ready only, register callback
*/
int ipa_uc_reg_rdyCB(struct ipa_wdi_uc_ready_params *param);
+/*
+ * To de-register uC ready callback
+ */
+int ipa_uc_dereg_rdyCB(void);
int ipa_create_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info);
int ipa_release_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info);
@@ -1580,117 +1452,6 @@ int ipa_mhi_resume(void);
void ipa_mhi_destroy(void);
/*
- * IPA_USB
- */
-
-/**
- * ipa_usb_init_teth_prot - Peripheral should call this function to initialize
- * RNDIS/ECM/teth_bridge, prior to calling ipa_usb_xdci_connect()
- *
- * @usb_teth_type: tethering protocol type
- * @teth_params: pointer to tethering protocol parameters.
- * Should be struct ipa_usb_teth_params for RNDIS/ECM,
- * or NULL for teth_bridge
- * @ipa_usb_notify_cb: will be called to notify USB driver on certain events
- * @user_data: cookie used for ipa_usb_notify_cb
- *
- * @Return 0 on success, negative on failure
- */
-int ipa_usb_init_teth_prot(enum ipa_usb_teth_prot teth_prot,
- struct ipa_usb_teth_params *teth_params,
- int (*ipa_usb_notify_cb)(enum ipa_usb_notify_event,
- void *),
- void *user_data);
-
-/**
- * ipa_usb_xdci_connect - Peripheral should call this function to start IN &
- * OUT xDCI channels, and connect RNDIS/ECM/MBIM/RMNET.
- * For DIAG, only starts IN channel.
- *
- * @ul_chan_params: parameters for allocating UL xDCI channel. containing
- * required info on event and transfer rings, and IPA EP
- * configuration
- * @ul_out_params: [out] opaque client handle assigned by IPA to client & DB
- * registers physical address for UL channel
- * @dl_chan_params: parameters for allocating DL xDCI channel. containing
- * required info on event and transfer rings, and IPA EP
- * configuration
- * @dl_out_params: [out] opaque client handle assigned by IPA to client & DB
- * registers physical address for DL channel
- * @connect_params: handles and scratch params of the required channels,
- * tethering protocol and the tethering protocol parameters.
- *
- * Note: Should not be called from atomic context
- *
- * @Return 0 on success, negative on failure
- */
-int ipa_usb_xdci_connect(struct ipa_usb_xdci_chan_params *ul_chan_params,
- struct ipa_usb_xdci_chan_params *dl_chan_params,
- struct ipa_req_chan_out_params *ul_out_params,
- struct ipa_req_chan_out_params *dl_out_params,
- struct ipa_usb_xdci_connect_params *connect_params);
-
-/**
- * ipa_usb_xdci_disconnect - Peripheral should call this function to stop
- * IN & OUT xDCI channels
- * For DIAG, only stops IN channel.
- *
- * @ul_clnt_hdl: client handle received from ipa_usb_xdci_connect()
- * for OUT channel
- * @dl_clnt_hdl: client handle received from ipa_usb_xdci_connect()
- * for IN channel
- * @teth_prot: tethering protocol
- *
- * Note: Should not be called from atomic context
- *
- * @Return 0 on success, negative on failure
- */
-int ipa_usb_xdci_disconnect(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
- enum ipa_usb_teth_prot teth_prot);
-
-/**
- * ipa_usb_deinit_teth_prot - Peripheral should call this function to deinit
- * RNDIS/ECM/MBIM/RMNET
- *
- * @teth_prot: tethering protocol
- *
- * @Return 0 on success, negative on failure
- */
-int ipa_usb_deinit_teth_prot(enum ipa_usb_teth_prot teth_prot);
-
-/**
- * ipa_usb_xdci_suspend - Peripheral should call this function to suspend
- * IN & OUT xDCI channels
- *
- * @ul_clnt_hdl: client handle previously obtained from
- * ipa_usb_xdci_connect() for OUT channel
- * @dl_clnt_hdl: client handle previously obtained from
- * ipa_usb_xdci_connect() for IN channel
- * @teth_prot: tethering protocol
- *
- * Note: Should not be called from atomic context
- *
- * @Return 0 on success, negative on failure
- */
-int ipa_usb_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
- enum ipa_usb_teth_prot teth_prot);
-
-/**
- * ipa_usb_xdci_resume - Peripheral should call this function to resume
- * IN & OUT xDCI channels
- *
- * @ul_clnt_hdl: client handle received from ipa_usb_xdci_connect()
- * for OUT channel
- * @dl_clnt_hdl: client handle received from ipa_usb_xdci_connect()
- * for IN channel
- *
- * Note: Should not be called from atomic context
- *
- * @Return 0 on success, negative on failure
- */
-int ipa_usb_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl);
-
-/*
* mux id
*/
int ipa_write_qmap_id(struct ipa_ioc_write_qmapid *param_in);
@@ -2166,6 +1927,11 @@ static inline int ipa_uc_reg_rdyCB(
return -EPERM;
}
+static inline int ipa_uc_dereg_rdyCB(void)
+{
+ return -EPERM;
+}
+
/*
* Resource manager
@@ -2414,51 +2180,6 @@ static inline void ipa_mhi_destroy(void)
}
/*
- * IPA_USB
- */
-
-static inline int ipa_usb_init_teth_prot(enum ipa_usb_teth_prot teth_prot,
- struct ipa_usb_teth_params *teth_params,
- int (*ipa_usb_notify_cb)(enum ipa_usb_notify_event,
- void *),
- void *user_data)
-{
- return -EPERM;
-}
-
-static inline int ipa_usb_xdci_connect(
- struct ipa_usb_xdci_chan_params *ul_chan_params,
- struct ipa_usb_xdci_chan_params *dl_chan_params,
- struct ipa_req_chan_out_params *ul_out_params,
- struct ipa_req_chan_out_params *dl_out_params,
- struct ipa_usb_xdci_connect_params *connect_params)
-{
- return -EPERM;
-}
-
-static inline int ipa_usb_xdci_disconnect(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
- enum ipa_usb_teth_prot teth_prot)
-{
- return -EPERM;
-}
-
-static inline int ipa_usb_deinit_teth_prot(enum ipa_usb_teth_prot teth_prot)
-{
- return -EPERM;
-}
-
-static inline int ipa_usb_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
- enum ipa_usb_teth_prot teth_prot)
-{
- return -EPERM;
-}
-
-static inline int ipa_usb_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl)
-{
- return -EPERM;
-}
-
-/*
* mux id
*/
static inline int ipa_write_qmap_id(struct ipa_ioc_write_qmapid *param_in)
diff --git a/include/linux/ipa_usb.h b/include/linux/ipa_usb.h
new file mode 100644
index 000000000000..c3885c72e5ea
--- /dev/null
+++ b/include/linux/ipa_usb.h
@@ -0,0 +1,321 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_USB_H_
+#define _IPA_USB_H_
+
+enum ipa_usb_teth_prot {
+ IPA_USB_RNDIS = 0,
+ IPA_USB_ECM = 1,
+ IPA_USB_RMNET = 2,
+ IPA_USB_MBIM = 3,
+ IPA_USB_DIAG = 4,
+ IPA_USB_MAX_TETH_PROT_SIZE
+};
+
+/**
+ * ipa_usb_teth_params - parameters for RDNIS/ECM initialization API
+ *
+ * @host_ethaddr: host Ethernet address in network order
+ * @device_ethaddr: device Ethernet address in network order
+ */
+struct ipa_usb_teth_params {
+ u8 host_ethaddr[ETH_ALEN];
+ u8 device_ethaddr[ETH_ALEN];
+};
+
+enum ipa_usb_notify_event {
+ IPA_USB_DEVICE_READY,
+ IPA_USB_REMOTE_WAKEUP,
+ IPA_USB_SUSPEND_COMPLETED
+};
+
+enum ipa_usb_max_usb_packet_size {
+ IPA_USB_HIGH_SPEED_512B = 512,
+ IPA_USB_SUPER_SPEED_1024B = 1024
+};
+
+/**
+ * ipa_usb_teth_prot_params - parameters for connecting RNDIS
+ *
+ * @max_xfer_size_bytes_to_dev: max size of UL packets in bytes
+ * @max_packet_number_to_dev: max number of UL aggregated packets
+ * @max_xfer_size_bytes_to_host: max size of DL packets in bytes
+ *
+ */
+struct ipa_usb_teth_prot_params {
+ u32 max_xfer_size_bytes_to_dev;
+ u32 max_packet_number_to_dev;
+ u32 max_xfer_size_bytes_to_host;
+};
+
+/**
+ * ipa_usb_xdci_connect_params - parameters required to start IN, OUT
+ * channels, and connect RNDIS/ECM/teth_bridge
+ *
+ * @max_pkt_size: high speed or full speed
+ * @ipa_to_usb_xferrscidx: Transfer Resource Index (XferRscIdx) for IN channel.
+ * The hardware-assigned transfer resource index for the
+ * transfer, which was returned in response to the
+ * Start Transfer command. This field is used for
+ * "Update Transfer" command.
+ * Should be 0 =< ipa_to_usb_xferrscidx <= 127.
+ * @ipa_to_usb_xferrscidx_valid: true if xferRscIdx should be updated for IN
+ * channel
+ * @usb_to_ipa_xferrscidx: Transfer Resource Index (XferRscIdx) for OUT channel
+ * Should be 0 =< usb_to_ipa_xferrscidx <= 127.
+ * @usb_to_ipa_xferrscidx_valid: true if xferRscIdx should be updated for OUT
+ * channel
+ * @teth_prot: tethering protocol
+ * @teth_prot_params: parameters for connecting the tethering protocol.
+ * @max_supported_bandwidth_mbps: maximum bandwidth need of the client in Mbps
+ */
+struct ipa_usb_xdci_connect_params {
+ enum ipa_usb_max_usb_packet_size max_pkt_size;
+ u8 ipa_to_usb_xferrscidx;
+ bool ipa_to_usb_xferrscidx_valid;
+ u8 usb_to_ipa_xferrscidx;
+ bool usb_to_ipa_xferrscidx_valid;
+ enum ipa_usb_teth_prot teth_prot;
+ struct ipa_usb_teth_prot_params teth_prot_params;
+ u32 max_supported_bandwidth_mbps;
+};
+
+/**
+ * ipa_usb_xdci_chan_scratch - xDCI protocol SW config area of
+ * channel scratch
+ *
+ * @last_trb_addr: Address (LSB - based on alignment restrictions) of
+ * last TRB in queue. Used to identify roll over case
+ * @const_buffer_size: TRB buffer size in KB (similar to IPA aggregation
+ * configuration). Must be aligned to max USB Packet Size.
+ * Should be 1 <= const_buffer_size <= 31.
+ * @depcmd_low_addr: Used to generate "Update Transfer" command
+ * @depcmd_hi_addr: Used to generate "Update Transfer" command.
+ */
+struct ipa_usb_xdci_chan_scratch {
+ u16 last_trb_addr;
+ u8 const_buffer_size;
+ u32 depcmd_low_addr;
+ u8 depcmd_hi_addr;
+};
+
+/**
+ * ipa_usb_xdci_chan_params - xDCI channel related properties
+ *
+ * @client: type of "client"
+ * @ipa_ep_cfg: IPA EP configuration
+ * @keep_ipa_awake: when true, IPA will not be clock gated
+ * @teth_prot: tethering protocol for which the channel is created
+ * @gevntcount_low_addr: GEVNCOUNT low address for event scratch
+ * @gevntcount_hi_addr: GEVNCOUNT high address for event scratch
+ * @dir: channel direction
+ * @xfer_ring_len: length of transfer ring in bytes (must be integral
+ * multiple of transfer element size - 16B for xDCI)
+ * @xfer_ring_base_addr: physical base address of transfer ring. Address must be
+ * aligned to xfer_ring_len rounded to power of two
+ * @xfer_scratch: parameters for xDCI channel scratch
+ *
+ */
+struct ipa_usb_xdci_chan_params {
+ /* IPA EP params */
+ enum ipa_client_type client;
+ struct ipa_ep_cfg ipa_ep_cfg;
+ bool keep_ipa_awake;
+ enum ipa_usb_teth_prot teth_prot;
+ /* event ring params */
+ u32 gevntcount_low_addr;
+ u8 gevntcount_hi_addr;
+ /* transfer ring params */
+ enum gsi_chan_dir dir;
+ u16 xfer_ring_len;
+ u64 xfer_ring_base_addr;
+ struct ipa_usb_xdci_chan_scratch xfer_scratch;
+};
+
+/**
+ * ipa_usb_chan_out_params - out parameters for channel request
+ *
+ * @clnt_hdl: opaque client handle assigned by IPA to client
+ * @db_reg_phs_addr_lsb: Physical address of doorbell register where the 32
+ * LSBs of the doorbell value should be written
+ * @db_reg_phs_addr_msb: Physical address of doorbell register where the 32
+ * MSBs of the doorbell value should be written
+ *
+ */
+struct ipa_req_chan_out_params {
+ u32 clnt_hdl;
+ u32 db_reg_phs_addr_lsb;
+ u32 db_reg_phs_addr_msb;
+};
+
+#ifdef CONFIG_IPA3
+
+/**
+ * ipa_usb_init_teth_prot - Peripheral should call this function to initialize
+ * RNDIS/ECM/teth_bridge/DPL, prior to calling ipa_usb_xdci_connect()
+ *
+ * @usb_teth_type: tethering protocol type
+ * @teth_params: pointer to tethering protocol parameters.
+ * Should be struct ipa_usb_teth_params for RNDIS/ECM,
+ * or NULL for teth_bridge
+ * @ipa_usb_notify_cb: will be called to notify USB driver on certain events
+ * @user_data: cookie used for ipa_usb_notify_cb
+ *
+ * @Return 0 on success, negative on failure
+ */
+int ipa_usb_init_teth_prot(enum ipa_usb_teth_prot teth_prot,
+ struct ipa_usb_teth_params *teth_params,
+ int (*ipa_usb_notify_cb)(enum ipa_usb_notify_event,
+ void *),
+ void *user_data);
+
+/**
+ * ipa_usb_xdci_connect - Peripheral should call this function to start IN &
+ * OUT xDCI channels, and connect RNDIS/ECM/MBIM/RMNET.
+ * For DPL, only starts IN channel.
+ *
+ * @ul_chan_params: parameters for allocating UL xDCI channel. containing
+ * required info on event and transfer rings, and IPA EP
+ * configuration
+ * @ul_out_params: [out] opaque client handle assigned by IPA to client & DB
+ * registers physical address for UL channel
+ * @dl_chan_params: parameters for allocating DL xDCI channel. containing
+ * required info on event and transfer rings, and IPA EP
+ * configuration
+ * @dl_out_params: [out] opaque client handle assigned by IPA to client & DB
+ * registers physical address for DL channel
+ * @connect_params: handles and scratch params of the required channels,
+ * tethering protocol and the tethering protocol parameters.
+ *
+ * Note: Should not be called from atomic context
+ *
+ * @Return 0 on success, negative on failure
+ */
+int ipa_usb_xdci_connect(struct ipa_usb_xdci_chan_params *ul_chan_params,
+ struct ipa_usb_xdci_chan_params *dl_chan_params,
+ struct ipa_req_chan_out_params *ul_out_params,
+ struct ipa_req_chan_out_params *dl_out_params,
+ struct ipa_usb_xdci_connect_params *connect_params);
+
+/**
+ * ipa_usb_xdci_disconnect - Peripheral should call this function to stop
+ * IN & OUT xDCI channels
+ * For DPL, only stops IN channel.
+ *
+ * @ul_clnt_hdl: client handle received from ipa_usb_xdci_connect()
+ * for OUT channel
+ * @dl_clnt_hdl: client handle received from ipa_usb_xdci_connect()
+ * for IN channel
+ * @teth_prot: tethering protocol
+ *
+ * Note: Should not be called from atomic context
+ *
+ * @Return 0 on success, negative on failure
+ */
+int ipa_usb_xdci_disconnect(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
+ enum ipa_usb_teth_prot teth_prot);
+
+/**
+ * ipa_usb_deinit_teth_prot - Peripheral should call this function to deinit
+ * RNDIS/ECM/MBIM/RMNET
+ *
+ * @teth_prot: tethering protocol
+ *
+ * @Return 0 on success, negative on failure
+ */
+int ipa_usb_deinit_teth_prot(enum ipa_usb_teth_prot teth_prot);
+
+/**
+ * ipa_usb_xdci_suspend - Peripheral should call this function to suspend
+ * IN & OUT or DPL xDCI channels
+ *
+ * @ul_clnt_hdl: client handle previously obtained from
+ * ipa_usb_xdci_connect() for OUT channel
+ * @dl_clnt_hdl: client handle previously obtained from
+ * ipa_usb_xdci_connect() for IN channel
+ * @teth_prot: tethering protocol
+ *
+ * Note: Should not be called from atomic context
+ * Note: for DPL, the ul will be ignored as irrelevant
+ *
+ * @Return 0 on success, negative on failure
+ */
+int ipa_usb_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
+ enum ipa_usb_teth_prot teth_prot);
+
+/**
+ * ipa_usb_xdci_resume - Peripheral should call this function to resume
+ * IN & OUT or DPL xDCI channels
+ *
+ * @ul_clnt_hdl: client handle received from ipa_usb_xdci_connect()
+ * for OUT channel
+ * @dl_clnt_hdl: client handle received from ipa_usb_xdci_connect()
+ * for IN channel
+ * @teth_prot: tethering protocol
+ *
+ * Note: Should not be called from atomic context
+ * Note: for DPL, the ul will be ignored as irrelevant
+ *
+ * @Return 0 on success, negative on failure
+ */
+int ipa_usb_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
+ enum ipa_usb_teth_prot teth_prot);
+
+#else /* CONFIG_IPA3 */
+
+static inline int ipa_usb_init_teth_prot(enum ipa_usb_teth_prot teth_prot,
+ struct ipa_usb_teth_params *teth_params,
+ int (*ipa_usb_notify_cb)(enum ipa_usb_notify_event,
+ void *),
+ void *user_data)
+{
+ return -EPERM;
+}
+
+static inline int ipa_usb_xdci_connect(
+ struct ipa_usb_xdci_chan_params *ul_chan_params,
+ struct ipa_usb_xdci_chan_params *dl_chan_params,
+ struct ipa_req_chan_out_params *ul_out_params,
+ struct ipa_req_chan_out_params *dl_out_params,
+ struct ipa_usb_xdci_connect_params *connect_params)
+{
+ return -EPERM;
+}
+
+static inline int ipa_usb_xdci_disconnect(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
+ enum ipa_usb_teth_prot teth_prot)
+{
+ return -EPERM;
+}
+
+static inline int ipa_usb_deinit_teth_prot(enum ipa_usb_teth_prot teth_prot)
+{
+ return -EPERM;
+}
+
+static inline int ipa_usb_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
+ enum ipa_usb_teth_prot teth_prot)
+{
+ return -EPERM;
+}
+
+static inline int ipa_usb_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
+ enum ipa_usb_teth_prot teth_prot)
+{
+ return -EPERM;
+}
+
+
+#endif /* CONFIG_IPA3 */
+
+#endif /* _IPA_USB_H_ */
diff --git a/include/linux/msm-sps.h b/include/linux/msm-sps.h
index 4533289bcd4c..59a7b82d7808 100644
--- a/include/linux/msm-sps.h
+++ b/include/linux/msm-sps.h
@@ -1621,7 +1621,7 @@ static inline int sps_bam_process_irq(unsigned long dev)
}
static inline int sps_get_bam_addr(unsigned long dev, phys_addr_t *base,
- u32 *size);
+ u32 *size)
{
return -EPERM;
}
diff --git a/include/linux/rndis_ipa.h b/include/linux/rndis_ipa.h
index 14cce939d485..c9e389ddbf2b 100644
--- a/include/linux/rndis_ipa.h
+++ b/include/linux/rndis_ipa.h
@@ -73,12 +73,12 @@ void rndis_ipa_cleanup(void *private);
#else /* CONFIG_RNDIS_IPA*/
-int rndis_ipa_init(struct ipa_usb_init_params *params)
+static inline int rndis_ipa_init(struct ipa_usb_init_params *params)
{
return 0;
}
-int rndis_ipa_pipe_connect_notify(u32 usb_to_ipa_hdl,
+static inline int rndis_ipa_pipe_connect_notify(u32 usb_to_ipa_hdl,
u32 ipa_to_usb_hdl,
u32 max_xfer_size_bytes_to_dev,
u32 max_packet_number_to_dev,
diff --git a/include/uapi/linux/msm_ipa.h b/include/uapi/linux/msm_ipa.h
index a1b1aaab3ff8..54a02107a06e 100644
--- a/include/uapi/linux/msm_ipa.h
+++ b/include/uapi/linux/msm_ipa.h
@@ -395,13 +395,14 @@ enum ipa_tethering_stats_event {
/**
* enum ipa_rm_resource_name - IPA RM clients identification names
*
- * Add new mapping to ipa_rm_dep_prod_index() / ipa_rm_dep_cons_index()
+ * Add new mapping to ipa_rm_prod_index() / ipa_rm_cons_index()
* when adding new entry to this enum.
*/
enum ipa_rm_resource_name {
IPA_RM_RESOURCE_PROD = 0,
IPA_RM_RESOURCE_Q6_PROD = IPA_RM_RESOURCE_PROD,
IPA_RM_RESOURCE_USB_PROD,
+ IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD,
IPA_RM_RESOURCE_HSIC_PROD,
IPA_RM_RESOURCE_STD_ECM_PROD,
IPA_RM_RESOURCE_RNDIS_PROD,
@@ -413,6 +414,7 @@ enum ipa_rm_resource_name {
IPA_RM_RESOURCE_Q6_CONS = IPA_RM_RESOURCE_PROD_MAX,
IPA_RM_RESOURCE_USB_CONS,
+ IPA_RM_RESOURCE_USB_DPL_CONS,
IPA_RM_RESOURCE_HSIC_CONS,
IPA_RM_RESOURCE_WLAN_CONS,
IPA_RM_RESOURCE_APPS_CONS,