summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/platform/msm/rmnet_ipa.txt2
-rw-r--r--Documentation/devicetree/bindings/platform/msm/rmnet_ipa3.txt2
-rw-r--r--drivers/platform/msm/ipa/ipa_api.c31
-rw-r--r--drivers/platform/msm/ipa/ipa_api.h4
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_dp.c680
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_i.h10
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_trace.h19
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_utils.c3
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c129
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_dp.c287
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_i.h9
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_trace.h20
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_utils.c2
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c103
-rw-r--r--include/linux/ipa.h15
15 files changed, 956 insertions, 360 deletions
diff --git a/Documentation/devicetree/bindings/platform/msm/rmnet_ipa.txt b/Documentation/devicetree/bindings/platform/msm/rmnet_ipa.txt
index c7024e07a71e..102b304f5fb3 100644
--- a/Documentation/devicetree/bindings/platform/msm/rmnet_ipa.txt
+++ b/Documentation/devicetree/bindings/platform/msm/rmnet_ipa.txt
@@ -10,6 +10,8 @@ Optional:
- qcom,ipa-loaduC: indicate that ipa uC should be loaded
- qcom,ipa-advertise-sg-support: determine how to respond to a query
regarding scatter-gather capability
+- qcom,ipa-napi-enable: Boolean context flag to indicate whether
+ to enable napi framework or not
Example:
qcom,rmnet-ipa {
diff --git a/Documentation/devicetree/bindings/platform/msm/rmnet_ipa3.txt b/Documentation/devicetree/bindings/platform/msm/rmnet_ipa3.txt
index 3f5531278700..7ee28664668b 100644
--- a/Documentation/devicetree/bindings/platform/msm/rmnet_ipa3.txt
+++ b/Documentation/devicetree/bindings/platform/msm/rmnet_ipa3.txt
@@ -10,6 +10,8 @@ Optional:
- qcom,ipa-loaduC: indicate that ipa uC should be loaded
- qcom,ipa-advertise-sg-support: determine how to respond to a query
regarding scatter-gather capability
+- qcom,ipa-napi-enable: Boolean context flag to indicate whether
+ to enable napi framework or not
Example:
qcom,rmnet-ipa3 {
diff --git a/drivers/platform/msm/ipa/ipa_api.c b/drivers/platform/msm/ipa/ipa_api.c
index 72249ca07886..82402d7d1545 100644
--- a/drivers/platform/msm/ipa/ipa_api.c
+++ b/drivers/platform/msm/ipa/ipa_api.c
@@ -2654,6 +2654,37 @@ void ipa_assert(void)
BUG();
}
+/**
+ * ipa_rx_poll() - Poll the rx packets from IPA HW in the
+ * softirq context
+ *
+ * @budget: number of packets to be polled in single iteration
+ *
+ * Return codes: >= 0 : Actual number of packets polled
+ *
+ */
+int ipa_rx_poll(u32 clnt_hdl, int budget)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_rx_poll, clnt_hdl, budget);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_rx_poll);
+
+/**
+ * ipa_recycle_wan_skb() - Recycle the Wan skb
+ *
+ * @skb: skb that needs to recycle
+ *
+ */
+void ipa_recycle_wan_skb(struct sk_buff *skb)
+{
+ IPA_API_DISPATCH(ipa_recycle_wan_skb, skb);
+}
+EXPORT_SYMBOL(ipa_recycle_wan_skb);
+
static const struct dev_pm_ops ipa_pm_ops = {
.suspend_noirq = ipa_ap_suspend,
.resume_noirq = ipa_ap_resume,
diff --git a/drivers/platform/msm/ipa/ipa_api.h b/drivers/platform/msm/ipa/ipa_api.h
index e3fa4144cb84..862bdc475025 100644
--- a/drivers/platform/msm/ipa/ipa_api.h
+++ b/drivers/platform/msm/ipa/ipa_api.h
@@ -356,6 +356,10 @@ struct ipa_api_controller {
void *(*ipa_get_ipc_logbuf_low)(void);
+ int (*ipa_rx_poll)(u32 clnt_hdl, int budget);
+
+ void (*ipa_recycle_wan_skb)(struct sk_buff *skb);
+
};
#ifdef CONFIG_IPA
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
index 9a2797537712..510b4c584764 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
@@ -77,6 +77,7 @@ static void ipa_dma_memcpy_notify(struct ipa_sys_context *sys,
struct sps_iovec *iovec);
static u32 ipa_adjust_ra_buff_base_sz(u32 aggr_byte_limit);
+static void ipa_fast_replenish_rx_cache(struct ipa_sys_context *sys);
static void ipa_wq_write_done_common(struct ipa_sys_context *sys, u32 cnt)
{
@@ -762,6 +763,29 @@ static void ipa_sps_irq_tx_no_aggr_notify(struct sps_event_notify *notify)
}
/**
+ * ipa_poll_pkt() - Poll packet from SPS BAM
+ * return 0 to caller on poll successfully
+ * else -EIO
+ *
+ */
+static int ipa_poll_pkt(struct ipa_sys_context *sys,
+ struct sps_iovec *iov)
+{
+ int ret;
+
+ ret = sps_get_iovec(sys->ep->ep_hdl, iov);
+ if (ret) {
+ IPAERR("sps_get_iovec failed %d\n", ret);
+ return ret;
+ }
+
+ if (iov->addr == 0)
+ return -EIO;
+
+ return 0;
+}
+
+/**
* ipa_handle_rx_core() - The core functionality of packet reception. This
* function is read from multiple code paths.
*
@@ -787,14 +811,10 @@ static int ipa_handle_rx_core(struct ipa_sys_context *sys, bool process_all,
if (cnt && !process_all)
break;
- ret = sps_get_iovec(sys->ep->ep_hdl, &iov);
- if (ret) {
- IPAERR("sps_get_iovec failed %d\n", ret);
+ ret = ipa_poll_pkt(sys, &iov);
+ if (ret)
break;
- }
- if (iov.addr == 0)
- break;
if (IPA_CLIENT_IS_MEMCPY_DMA_CONS(sys->ep->client))
ipa_dma_memcpy_notify(sys, &iov);
else if (IPA_CLIENT_IS_WLAN_CONS(sys->ep->client))
@@ -851,7 +871,8 @@ static void ipa_rx_switch_to_intr_mode(struct ipa_sys_context *sys)
goto fail;
}
atomic_set(&sys->curr_polling_state, 0);
- ipa_handle_rx_core(sys, true, false);
+ if (!sys->ep->napi_enabled)
+ ipa_handle_rx_core(sys, true, false);
ipa_dec_release_wakelock(sys->ep->wakelock_client);
return;
@@ -965,26 +986,30 @@ static void ipa_sps_irq_rx_notify(struct sps_event_notify *notify)
case SPS_EVENT_EOT:
if (IPA_CLIENT_IS_APPS_CONS(sys->ep->client))
atomic_set(&ipa_ctx->sps_pm.eot_activity, 1);
- if (!atomic_read(&sys->curr_polling_state)) {
- ret = sps_get_config(sys->ep->ep_hdl,
+
+ if (atomic_read(&sys->curr_polling_state)) {
+ sys->ep->eot_in_poll_err++;
+ break;
+ }
+
+ ret = sps_get_config(sys->ep->ep_hdl,
&sys->ep->connect);
- if (ret) {
- IPAERR("sps_get_config() failed %d\n", ret);
- break;
- }
- sys->ep->connect.options = SPS_O_AUTO_ENABLE |
- SPS_O_ACK_TRANSFERS | SPS_O_POLL;
- ret = sps_set_config(sys->ep->ep_hdl,
+ if (ret) {
+ IPAERR("sps_get_config() failed %d\n", ret);
+ break;
+ }
+ sys->ep->connect.options = SPS_O_AUTO_ENABLE |
+ SPS_O_ACK_TRANSFERS | SPS_O_POLL;
+ ret = sps_set_config(sys->ep->ep_hdl,
&sys->ep->connect);
- if (ret) {
- IPAERR("sps_set_config() failed %d\n", ret);
- break;
- }
- ipa_inc_acquire_wakelock(sys->ep->wakelock_client);
- atomic_set(&sys->curr_polling_state, 1);
- trace_intr_to_poll(sys->ep->client);
- queue_work(sys->wq, &sys->work);
+ if (ret) {
+ IPAERR("sps_set_config() failed %d\n", ret);
+ break;
}
+ ipa_inc_acquire_wakelock(sys->ep->wakelock_client);
+ atomic_set(&sys->curr_polling_state, 1);
+ trace_intr_to_poll(sys->ep->client);
+ queue_work(sys->wq, &sys->work);
break;
default:
IPAERR("received unexpected event id %d\n", notify->event_id);
@@ -1041,6 +1066,58 @@ static void ipa_handle_rx(struct ipa_sys_context *sys)
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
}
+/**
+ * ipa2_rx_poll() - Poll the rx packets from IPA HW. This
+ * function is exectued in the softirq context
+ *
+ * if input budget is zero, the driver switches back to
+ * interrupt mode
+ *
+ * return number of polled packets, on error 0(zero)
+ */
+int ipa2_rx_poll(u32 clnt_hdl, int weight)
+{
+ struct ipa_ep_context *ep;
+ int ret;
+ int cnt = 0;
+ unsigned int delay = 1;
+ struct sps_iovec iov;
+
+ IPADBG("\n");
+ if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
+ ipa_ctx->ep[clnt_hdl].valid == 0) {
+ IPAERR("bad parm 0x%x\n", clnt_hdl);
+ return cnt;
+ }
+
+ ep = &ipa_ctx->ep[clnt_hdl];
+ while (cnt < weight &&
+ atomic_read(&ep->sys->curr_polling_state)) {
+
+ ret = ipa_poll_pkt(ep->sys, &iov);
+ if (ret)
+ break;
+
+ ipa_wq_rx_common(ep->sys, iov.size);
+ cnt += 5;
+ };
+
+ if (cnt == 0) {
+ ep->inactive_cycles++;
+ ep->client_notify(ep->priv, IPA_CLIENT_COMP_NAPI, 0);
+
+ if (ep->inactive_cycles > 3 || ep->sys->len == 0) {
+ ep->switch_to_intr = true;
+ delay = 0;
+ }
+ queue_delayed_work(ep->sys->wq,
+ &ep->sys->switch_to_intr_work, msecs_to_jiffies(delay));
+ } else
+ ep->inactive_cycles = 0;
+
+ return cnt;
+}
+
static void switch_to_intr_rx_work_func(struct work_struct *work)
{
struct delayed_work *dwork;
@@ -1048,7 +1125,18 @@ static void switch_to_intr_rx_work_func(struct work_struct *work)
dwork = container_of(work, struct delayed_work, work);
sys = container_of(dwork, struct ipa_sys_context, switch_to_intr_work);
- ipa_handle_rx(sys);
+
+ if (sys->ep->napi_enabled) {
+ if (sys->ep->switch_to_intr) {
+ ipa_rx_switch_to_intr_mode(sys);
+ IPA_ACTIVE_CLIENTS_DEC_SPECIAL("NAPI");
+ sys->ep->switch_to_intr = false;
+ sys->ep->inactive_cycles = 0;
+ } else
+ sys->ep->client_notify(sys->ep->priv,
+ IPA_CLIENT_START_POLL, 0);
+ } else
+ ipa_handle_rx(sys);
}
/**
@@ -1196,6 +1284,7 @@ int ipa2_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
}
INIT_LIST_HEAD(&ep->sys->head_desc_list);
+ INIT_LIST_HEAD(&ep->sys->rcycl_list);
spin_lock_init(&ep->sys->spinlock);
} else {
memset(ep->sys, 0, offsetof(struct ipa_sys_context, ep));
@@ -1211,6 +1300,7 @@ int ipa2_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
ep->valid = 1;
ep->client = sys_in->client;
ep->client_notify = sys_in->notify;
+ ep->napi_enabled = sys_in->napi_enabled;
ep->priv = sys_in->priv;
ep->keep_ipa_awake = sys_in->keep_ipa_awake;
atomic_set(&ep->avail_fifo_desc,
@@ -1334,9 +1424,7 @@ int ipa2_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
*clnt_hdl = ipa_ep_idx;
- if (nr_cpu_ids > 1 &&
- (sys_in->client == IPA_CLIENT_APPS_LAN_CONS ||
- sys_in->client == IPA_CLIENT_APPS_WAN_CONS)) {
+ if (ep->sys->repl_hdlr == ipa_fast_replenish_rx_cache) {
ep->sys->repl.capacity = ep->sys->rx_pool_sz + 1;
ep->sys->repl.cache = kzalloc(ep->sys->repl.capacity *
sizeof(void *), GFP_KERNEL);
@@ -1425,7 +1513,12 @@ int ipa2_teardown_sys_pipe(u32 clnt_hdl)
IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
ipa_disable_data_path(clnt_hdl);
- ep->valid = 0;
+ if (ep->napi_enabled) {
+ ep->switch_to_intr = true;
+ do {
+ usleep_range(95, 105);
+ } while (atomic_read(&ep->sys->curr_polling_state));
+ }
if (IPA_CLIENT_IS_PROD(ep->client)) {
do {
@@ -1471,6 +1564,7 @@ int ipa2_teardown_sys_pipe(u32 clnt_hdl)
if (!atomic_read(&ipa_ctx->wc_memb.active_clnt_cnt))
ipa_cleanup_wlan_rx_common_cache();
+ ep->valid = 0;
IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
@@ -1721,7 +1815,13 @@ static void ipa_wq_handle_rx(struct work_struct *work)
struct ipa_sys_context *sys;
sys = container_of(work, struct ipa_sys_context, work);
- ipa_handle_rx(sys);
+
+ if (sys->ep->napi_enabled) {
+ IPA_ACTIVE_CLIENTS_INC_SPECIAL("NAPI");
+ sys->ep->client_notify(sys->ep->priv,
+ IPA_CLIENT_START_POLL, 0);
+ } else
+ ipa_handle_rx(sys);
}
static void ipa_wq_repl_rx(struct work_struct *work)
@@ -2024,6 +2124,63 @@ fail_kmem_cache_alloc:
msecs_to_jiffies(1));
}
+static void ipa_replenish_rx_cache_recycle(struct ipa_sys_context *sys)
+{
+ void *ptr;
+ struct ipa_rx_pkt_wrapper *rx_pkt;
+ int ret;
+ int rx_len_cached = 0;
+
+ rx_len_cached = sys->len;
+
+ while (rx_len_cached < sys->rx_pool_sz) {
+ spin_lock_bh(&sys->spinlock);
+ if (list_empty(&sys->rcycl_list))
+ goto fail_kmem_cache_alloc;
+
+ rx_pkt = list_first_entry(&sys->rcycl_list,
+ struct ipa_rx_pkt_wrapper, link);
+ list_del(&rx_pkt->link);
+ spin_unlock_bh(&sys->spinlock);
+ INIT_LIST_HEAD(&rx_pkt->link);
+ ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
+ rx_pkt->data.dma_addr = dma_map_single(ipa_ctx->pdev,
+ ptr, sys->rx_buff_sz, DMA_FROM_DEVICE);
+ if (rx_pkt->data.dma_addr == 0 ||
+ rx_pkt->data.dma_addr == ~0)
+ goto fail_dma_mapping;
+
+ list_add_tail(&rx_pkt->link, &sys->head_desc_list);
+ rx_len_cached = ++sys->len;
+
+ ret = sps_transfer_one(sys->ep->ep_hdl,
+ rx_pkt->data.dma_addr, sys->rx_buff_sz, rx_pkt, 0);
+
+ if (ret) {
+ IPAERR("sps_transfer_one failed %d\n", ret);
+ goto fail_sps_transfer;
+ }
+ }
+
+ return;
+fail_sps_transfer:
+ rx_len_cached = --sys->len;
+ list_del(&rx_pkt->link);
+ INIT_LIST_HEAD(&rx_pkt->link);
+ dma_unmap_single(ipa_ctx->pdev, rx_pkt->data.dma_addr,
+ sys->rx_buff_sz, DMA_FROM_DEVICE);
+fail_dma_mapping:
+ spin_lock_bh(&sys->spinlock);
+ list_add_tail(&rx_pkt->link, &sys->rcycl_list);
+ INIT_LIST_HEAD(&rx_pkt->link);
+ spin_unlock_bh(&sys->spinlock);
+fail_kmem_cache_alloc:
+ spin_unlock_bh(&sys->spinlock);
+ if (rx_len_cached == 0)
+ queue_delayed_work(sys->wq, &sys->replenish_rx_work,
+ msecs_to_jiffies(1));
+}
+
static void ipa_fast_replenish_rx_cache(struct ipa_sys_context *sys)
{
struct ipa_rx_pkt_wrapper *rx_pkt;
@@ -2035,8 +2192,10 @@ static void ipa_fast_replenish_rx_cache(struct ipa_sys_context *sys)
curr = atomic_read(&sys->repl.head_idx);
while (rx_len_cached < sys->rx_pool_sz) {
- if (curr == atomic_read(&sys->repl.tail_idx))
+ if (curr == atomic_read(&sys->repl.tail_idx)) {
+ queue_work(sys->repl_wq, &sys->repl_work);
break;
+ }
rx_pkt = sys->repl.cache[curr];
list_add_tail(&rx_pkt->link, &sys->head_desc_list);
@@ -2107,6 +2266,15 @@ static void ipa_cleanup_rx(struct ipa_sys_context *sys)
kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
}
+ list_for_each_entry_safe(rx_pkt, r,
+ &sys->rcycl_list, link) {
+ list_del(&rx_pkt->link);
+ dma_unmap_single(ipa_ctx->pdev, rx_pkt->data.dma_addr,
+ sys->rx_buff_sz, DMA_FROM_DEVICE);
+ sys->free_skb(rx_pkt->data.skb);
+ kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
+ }
+
if (sys->repl.cache) {
head = atomic_read(&sys->repl.head_idx);
tail = atomic_read(&sys->repl.tail_idx);
@@ -2471,6 +2639,10 @@ static int ipa_wan_rx_pyld_hdlr(struct sk_buff *skb,
IPA_RECEIVE, (unsigned long)(skb));
return rc;
}
+ if (sys->repl_hdlr == ipa_replenish_rx_cache_recycle) {
+ IPAERR("Recycle should enable only with GRO Aggr\n");
+ ipa_assert();
+ }
/*
* payload splits across 2 buff or more,
* take the start of the payload from prev_skb
@@ -2721,6 +2893,37 @@ void ipa_lan_rx_cb(void *priv, enum ipa_dp_evt_type evt, unsigned long data)
ep->client_notify(ep->priv, IPA_RECEIVE, (unsigned long)(rx_skb));
}
+void ipa2_recycle_wan_skb(struct sk_buff *skb)
+{
+ struct ipa_rx_pkt_wrapper *rx_pkt;
+ int ep_idx = ipa2_get_ep_mapping(
+ IPA_CLIENT_APPS_WAN_CONS);
+ gfp_t flag = GFP_NOWAIT | __GFP_NOWARN |
+ (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
+
+ if (unlikely(ep_idx == -1)) {
+ IPAERR("dest EP does not exist\n");
+ ipa_assert();
+ }
+
+ rx_pkt = kmem_cache_zalloc(
+ ipa_ctx->rx_pkt_wrapper_cache, flag);
+ if (!rx_pkt)
+ ipa_assert();
+
+ INIT_WORK(&rx_pkt->work, ipa_wq_rx_avail);
+ rx_pkt->sys = ipa_ctx->ep[ep_idx].sys;
+
+ rx_pkt->data.skb = skb;
+ rx_pkt->data.dma_addr = 0;
+ ipa_skb_recycle(rx_pkt->data.skb);
+ skb_reserve(rx_pkt->data.skb, IPA_HEADROOM);
+ INIT_LIST_HEAD(&rx_pkt->link);
+ spin_lock_bh(&rx_pkt->sys->spinlock);
+ list_add_tail(&rx_pkt->link, &rx_pkt->sys->rcycl_list);
+ spin_unlock_bh(&rx_pkt->sys->spinlock);
+}
+
static void ipa_wq_rx_common(struct ipa_sys_context *sys, u32 size)
{
struct ipa_rx_pkt_wrapper *rx_pkt_expected;
@@ -2858,6 +3061,220 @@ static int ipa_odu_rx_pyld_hdlr(struct sk_buff *rx_skb,
return 0;
}
+static int ipa_assign_policy_v2(struct ipa_sys_connect_params *in,
+ struct ipa_sys_context *sys)
+{
+ unsigned long int aggr_byte_limit;
+
+ sys->ep->status.status_en = true;
+ sys->ep->wakelock_client = IPA_WAKELOCK_REF_CLIENT_MAX;
+ if (IPA_CLIENT_IS_PROD(in->client)) {
+ if (!sys->ep->skip_ep_cfg) {
+ sys->policy = IPA_POLICY_NOINTR_MODE;
+ sys->sps_option = SPS_O_AUTO_ENABLE;
+ sys->sps_callback = NULL;
+ sys->ep->status.status_ep = ipa2_get_ep_mapping(
+ IPA_CLIENT_APPS_LAN_CONS);
+ if (IPA_CLIENT_IS_MEMCPY_DMA_PROD(in->client))
+ sys->ep->status.status_en = false;
+ } else {
+ sys->policy = IPA_POLICY_INTR_MODE;
+ sys->sps_option = (SPS_O_AUTO_ENABLE |
+ SPS_O_EOT);
+ sys->sps_callback =
+ ipa_sps_irq_tx_no_aggr_notify;
+ }
+ return 0;
+ }
+
+ aggr_byte_limit =
+ (unsigned long int)IPA_GENERIC_RX_BUFF_SZ(
+ ipa_adjust_ra_buff_base_sz(
+ in->ipa_ep_cfg.aggr.aggr_byte_limit));
+
+ if (in->client == IPA_CLIENT_APPS_LAN_CONS ||
+ in->client == IPA_CLIENT_APPS_WAN_CONS) {
+ sys->policy = IPA_POLICY_INTR_POLL_MODE;
+ sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT
+ | SPS_O_ACK_TRANSFERS);
+ sys->sps_callback = ipa_sps_irq_rx_notify;
+ INIT_WORK(&sys->work, ipa_wq_handle_rx);
+ INIT_DELAYED_WORK(&sys->switch_to_intr_work,
+ switch_to_intr_rx_work_func);
+ INIT_DELAYED_WORK(&sys->replenish_rx_work,
+ replenish_rx_work_func);
+ INIT_WORK(&sys->repl_work, ipa_wq_repl_rx);
+ atomic_set(&sys->curr_polling_state, 0);
+ sys->rx_buff_sz = IPA_GENERIC_RX_BUFF_SZ(
+ IPA_GENERIC_RX_BUFF_BASE_SZ) -
+ IPA_HEADROOM;
+ sys->get_skb = ipa_get_skb_ipa_rx_headroom;
+ sys->free_skb = ipa_free_skb_rx;
+ in->ipa_ep_cfg.aggr.aggr_en = IPA_ENABLE_AGGR;
+ in->ipa_ep_cfg.aggr.aggr = IPA_GENERIC;
+ in->ipa_ep_cfg.aggr.aggr_time_limit =
+ IPA_GENERIC_AGGR_TIME_LIMIT;
+ if (in->client == IPA_CLIENT_APPS_LAN_CONS) {
+ sys->pyld_hdlr = ipa_lan_rx_pyld_hdlr;
+ if (nr_cpu_ids > 1) {
+ sys->repl_hdlr =
+ ipa_fast_replenish_rx_cache;
+ sys->repl_trig_thresh =
+ sys->rx_pool_sz / 8;
+ } else {
+ sys->repl_hdlr =
+ ipa_replenish_rx_cache;
+ }
+ sys->rx_pool_sz =
+ IPA_GENERIC_RX_POOL_SZ;
+ in->ipa_ep_cfg.aggr.aggr_byte_limit =
+ IPA_GENERIC_AGGR_BYTE_LIMIT;
+ in->ipa_ep_cfg.aggr.aggr_pkt_limit =
+ IPA_GENERIC_AGGR_PKT_LIMIT;
+ sys->ep->wakelock_client =
+ IPA_WAKELOCK_REF_CLIENT_LAN_RX;
+ } else if (in->client ==
+ IPA_CLIENT_APPS_WAN_CONS) {
+ sys->pyld_hdlr = ipa_wan_rx_pyld_hdlr;
+ if (in->napi_enabled) {
+ sys->repl_hdlr =
+ ipa_replenish_rx_cache_recycle;
+ sys->rx_pool_sz =
+ IPA_WAN_NAPI_CONS_RX_POOL_SZ;
+ } else {
+ if (nr_cpu_ids > 1) {
+ sys->repl_hdlr =
+ ipa_fast_replenish_rx_cache;
+ sys->repl_trig_thresh =
+ sys->rx_pool_sz / 8;
+ } else {
+ sys->repl_hdlr =
+ ipa_replenish_rx_cache;
+ }
+ sys->rx_pool_sz =
+ ipa_ctx->wan_rx_ring_size;
+ }
+ sys->ep->wakelock_client =
+ IPA_WAKELOCK_REF_CLIENT_WAN_RX;
+ in->ipa_ep_cfg.aggr.aggr_sw_eof_active
+ = true;
+ if (ipa_ctx->ipa_client_apps_wan_cons_agg_gro) {
+ IPAERR("get close-by %u\n",
+ ipa_adjust_ra_buff_base_sz(
+ in->ipa_ep_cfg.aggr.
+ aggr_byte_limit));
+ IPAERR("set rx_buff_sz %lu\n", aggr_byte_limit);
+ /* disable ipa_status */
+ sys->ep->status.
+ status_en = false;
+ sys->rx_buff_sz =
+ IPA_GENERIC_RX_BUFF_SZ(
+ ipa_adjust_ra_buff_base_sz(
+ in->ipa_ep_cfg.aggr.
+ aggr_byte_limit));
+ in->ipa_ep_cfg.aggr.
+ aggr_byte_limit =
+ sys->rx_buff_sz < in->
+ ipa_ep_cfg.aggr.aggr_byte_limit ?
+ IPA_ADJUST_AGGR_BYTE_LIMIT(
+ sys->rx_buff_sz) :
+ IPA_ADJUST_AGGR_BYTE_LIMIT(
+ in->ipa_ep_cfg.
+ aggr.aggr_byte_limit);
+ IPAERR("set aggr_limit %lu\n",
+ (unsigned long int)
+ in->ipa_ep_cfg.aggr.
+ aggr_byte_limit);
+ } else {
+ in->ipa_ep_cfg.aggr.
+ aggr_byte_limit =
+ IPA_GENERIC_AGGR_BYTE_LIMIT;
+ in->ipa_ep_cfg.aggr.
+ aggr_pkt_limit =
+ IPA_GENERIC_AGGR_PKT_LIMIT;
+ }
+ }
+ } else if (IPA_CLIENT_IS_WLAN_CONS(in->client)) {
+ IPADBG("assigning policy to client:%d",
+ in->client);
+
+ sys->ep->status.status_en = false;
+ sys->policy = IPA_POLICY_INTR_POLL_MODE;
+ sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT
+ | SPS_O_ACK_TRANSFERS);
+ sys->sps_callback = ipa_sps_irq_rx_notify;
+ INIT_WORK(&sys->work, ipa_wq_handle_rx);
+ INIT_DELAYED_WORK(&sys->switch_to_intr_work,
+ switch_to_intr_rx_work_func);
+ INIT_DELAYED_WORK(&sys->replenish_rx_work,
+ replenish_rx_work_func);
+ atomic_set(&sys->curr_polling_state, 0);
+ sys->rx_buff_sz = IPA_WLAN_RX_BUFF_SZ;
+ sys->rx_pool_sz = in->desc_fifo_sz /
+ sizeof(struct sps_iovec) - 1;
+ if (sys->rx_pool_sz > IPA_WLAN_RX_POOL_SZ)
+ sys->rx_pool_sz = IPA_WLAN_RX_POOL_SZ;
+ sys->pyld_hdlr = NULL;
+ sys->repl_hdlr = ipa_replenish_wlan_rx_cache;
+ sys->get_skb = ipa_get_skb_ipa_rx;
+ sys->free_skb = ipa_free_skb_rx;
+ in->ipa_ep_cfg.aggr.aggr_en = IPA_BYPASS_AGGR;
+ sys->ep->wakelock_client =
+ IPA_WAKELOCK_REF_CLIENT_WLAN_RX;
+ } else if (IPA_CLIENT_IS_ODU_CONS(in->client)) {
+ IPADBG("assigning policy to client:%d",
+ in->client);
+
+ sys->ep->status.status_en = false;
+ sys->policy = IPA_POLICY_INTR_POLL_MODE;
+ sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT
+ | SPS_O_ACK_TRANSFERS);
+ sys->sps_callback = ipa_sps_irq_rx_notify;
+ INIT_WORK(&sys->work, ipa_wq_handle_rx);
+ INIT_DELAYED_WORK(&sys->switch_to_intr_work,
+ switch_to_intr_rx_work_func);
+ INIT_DELAYED_WORK(&sys->replenish_rx_work,
+ replenish_rx_work_func);
+ atomic_set(&sys->curr_polling_state, 0);
+ sys->rx_buff_sz = IPA_ODU_RX_BUFF_SZ;
+ sys->rx_pool_sz = in->desc_fifo_sz /
+ sizeof(struct sps_iovec) - 1;
+ if (sys->rx_pool_sz > IPA_ODU_RX_POOL_SZ)
+ sys->rx_pool_sz = IPA_ODU_RX_POOL_SZ;
+ sys->pyld_hdlr = ipa_odu_rx_pyld_hdlr;
+ sys->get_skb = ipa_get_skb_ipa_rx;
+ sys->free_skb = ipa_free_skb_rx;
+ sys->repl_hdlr = ipa_replenish_rx_cache;
+ sys->ep->wakelock_client =
+ IPA_WAKELOCK_REF_CLIENT_ODU_RX;
+ } else if (in->client ==
+ IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS) {
+ IPADBG("assigning policy to client:%d",
+ in->client);
+ sys->ep->status.status_en = false;
+ sys->policy = IPA_POLICY_INTR_POLL_MODE;
+ sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT
+ | SPS_O_ACK_TRANSFERS);
+ sys->sps_callback = ipa_sps_irq_rx_notify;
+ INIT_WORK(&sys->work, ipa_wq_handle_rx);
+ INIT_DELAYED_WORK(&sys->switch_to_intr_work,
+ switch_to_intr_rx_work_func);
+ } else if (in->client ==
+ IPA_CLIENT_MEMCPY_DMA_SYNC_CONS) {
+ IPADBG("assigning policy to client:%d",
+ in->client);
+ sys->ep->status.status_en = false;
+ sys->policy = IPA_POLICY_NOINTR_MODE;
+ sys->sps_option = SPS_O_AUTO_ENABLE |
+ SPS_O_ACK_TRANSFERS | SPS_O_POLL;
+ } else {
+ IPAERR("Need to install a RX pipe hdlr\n");
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int ipa_assign_policy(struct ipa_sys_connect_params *in,
struct ipa_sys_context *sys)
{
@@ -2904,203 +3321,14 @@ static int ipa_assign_policy(struct ipa_sys_connect_params *in,
WARN_ON(1);
return -EINVAL;
}
- } else if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_0) {
- sys->ep->status.status_en = true;
- sys->ep->wakelock_client = IPA_WAKELOCK_REF_CLIENT_MAX;
- if (IPA_CLIENT_IS_PROD(in->client)) {
- if (!sys->ep->skip_ep_cfg) {
- sys->policy = IPA_POLICY_NOINTR_MODE;
- sys->sps_option = SPS_O_AUTO_ENABLE;
- sys->sps_callback = NULL;
- sys->ep->status.status_ep = ipa2_get_ep_mapping(
- IPA_CLIENT_APPS_LAN_CONS);
- if (IPA_CLIENT_IS_MEMCPY_DMA_PROD(in->client))
- sys->ep->status.status_en = false;
- } else {
- sys->policy = IPA_POLICY_INTR_MODE;
- sys->sps_option = (SPS_O_AUTO_ENABLE |
- SPS_O_EOT);
- sys->sps_callback =
- ipa_sps_irq_tx_no_aggr_notify;
- }
- } else {
- if (in->client == IPA_CLIENT_APPS_LAN_CONS ||
- in->client == IPA_CLIENT_APPS_WAN_CONS) {
- sys->policy = IPA_POLICY_INTR_POLL_MODE;
- sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT
- | SPS_O_ACK_TRANSFERS);
- sys->sps_callback = ipa_sps_irq_rx_notify;
- INIT_WORK(&sys->work, ipa_wq_handle_rx);
- INIT_DELAYED_WORK(&sys->switch_to_intr_work,
- switch_to_intr_rx_work_func);
- INIT_DELAYED_WORK(&sys->replenish_rx_work,
- replenish_rx_work_func);
- INIT_WORK(&sys->repl_work, ipa_wq_repl_rx);
- atomic_set(&sys->curr_polling_state, 0);
- sys->rx_buff_sz = IPA_GENERIC_RX_BUFF_SZ(
- IPA_GENERIC_RX_BUFF_BASE_SZ) -
- IPA_HEADROOM;
- sys->get_skb = ipa_get_skb_ipa_rx_headroom;
- sys->free_skb = ipa_free_skb_rx;
- in->ipa_ep_cfg.aggr.aggr_en = IPA_ENABLE_AGGR;
- in->ipa_ep_cfg.aggr.aggr = IPA_GENERIC;
- in->ipa_ep_cfg.aggr.aggr_time_limit =
- IPA_GENERIC_AGGR_TIME_LIMIT;
- if (in->client == IPA_CLIENT_APPS_LAN_CONS) {
- sys->pyld_hdlr = ipa_lan_rx_pyld_hdlr;
- sys->rx_pool_sz =
- IPA_GENERIC_RX_POOL_SZ;
- in->ipa_ep_cfg.aggr.aggr_byte_limit =
- IPA_GENERIC_AGGR_BYTE_LIMIT;
- in->ipa_ep_cfg.aggr.aggr_pkt_limit =
- IPA_GENERIC_AGGR_PKT_LIMIT;
- sys->ep->wakelock_client =
- IPA_WAKELOCK_REF_CLIENT_LAN_RX;
- } else if (in->client ==
- IPA_CLIENT_APPS_WAN_CONS) {
- sys->pyld_hdlr = ipa_wan_rx_pyld_hdlr;
- sys->rx_pool_sz =
- ipa_ctx->wan_rx_ring_size;
- sys->ep->wakelock_client =
- IPA_WAKELOCK_REF_CLIENT_WAN_RX;
- in->ipa_ep_cfg.aggr.aggr_sw_eof_active
- = true;
- if (ipa_ctx->
- ipa_client_apps_wan_cons_agg_gro) {
- IPAERR("get close-by %u\n",
- ipa_adjust_ra_buff_base_sz(
- in->ipa_ep_cfg.aggr.
- aggr_byte_limit));
- IPAERR("set rx_buff_sz %lu\n",
- (unsigned long int)
- IPA_GENERIC_RX_BUFF_SZ(
- ipa_adjust_ra_buff_base_sz(
- in->ipa_ep_cfg.
- aggr.aggr_byte_limit)));
- /* disable ipa_status */
- sys->ep->status.
- status_en = false;
- sys->rx_buff_sz =
- IPA_GENERIC_RX_BUFF_SZ(
- ipa_adjust_ra_buff_base_sz(
- in->ipa_ep_cfg.aggr.
- aggr_byte_limit));
- in->ipa_ep_cfg.aggr.
- aggr_byte_limit =
- sys->rx_buff_sz < in->
- ipa_ep_cfg.aggr.
- aggr_byte_limit ?
- IPA_ADJUST_AGGR_BYTE_LIMIT(
- sys->rx_buff_sz) :
- IPA_ADJUST_AGGR_BYTE_LIMIT(
- in->ipa_ep_cfg.
- aggr.aggr_byte_limit);
- IPAERR("set aggr_limit %lu\n",
- (unsigned long int)
- in->ipa_ep_cfg.aggr.
- aggr_byte_limit);
- } else {
- in->ipa_ep_cfg.aggr.
- aggr_byte_limit =
- IPA_GENERIC_AGGR_BYTE_LIMIT;
- in->ipa_ep_cfg.aggr.
- aggr_pkt_limit =
- IPA_GENERIC_AGGR_PKT_LIMIT;
- }
- }
- sys->repl_trig_thresh = sys->rx_pool_sz / 8;
- if (nr_cpu_ids > 1)
- sys->repl_hdlr =
- ipa_fast_replenish_rx_cache;
- else
- sys->repl_hdlr = ipa_replenish_rx_cache;
- } else if (IPA_CLIENT_IS_WLAN_CONS(in->client)) {
- IPADBG("assigning policy to client:%d",
- in->client);
-
- sys->ep->status.status_en = false;
- sys->policy = IPA_POLICY_INTR_POLL_MODE;
- sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT
- | SPS_O_ACK_TRANSFERS);
- sys->sps_callback = ipa_sps_irq_rx_notify;
- INIT_WORK(&sys->work, ipa_wq_handle_rx);
- INIT_DELAYED_WORK(&sys->switch_to_intr_work,
- switch_to_intr_rx_work_func);
- INIT_DELAYED_WORK(&sys->replenish_rx_work,
- replenish_rx_work_func);
- atomic_set(&sys->curr_polling_state, 0);
- sys->rx_buff_sz = IPA_WLAN_RX_BUFF_SZ;
- sys->rx_pool_sz = in->desc_fifo_sz/
- sizeof(struct sps_iovec) - 1;
- if (sys->rx_pool_sz > IPA_WLAN_RX_POOL_SZ)
- sys->rx_pool_sz = IPA_WLAN_RX_POOL_SZ;
- sys->pyld_hdlr = NULL;
- sys->repl_hdlr = ipa_replenish_wlan_rx_cache;
- sys->get_skb = ipa_get_skb_ipa_rx;
- sys->free_skb = ipa_free_skb_rx;
- in->ipa_ep_cfg.aggr.aggr_en = IPA_BYPASS_AGGR;
- sys->ep->wakelock_client =
- IPA_WAKELOCK_REF_CLIENT_WLAN_RX;
- } else if (IPA_CLIENT_IS_ODU_CONS(in->client)) {
- IPADBG("assigning policy to client:%d",
- in->client);
-
- sys->ep->status.status_en = false;
- sys->policy = IPA_POLICY_INTR_POLL_MODE;
- sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT
- | SPS_O_ACK_TRANSFERS);
- sys->sps_callback = ipa_sps_irq_rx_notify;
- INIT_WORK(&sys->work, ipa_wq_handle_rx);
- INIT_DELAYED_WORK(&sys->switch_to_intr_work,
- switch_to_intr_rx_work_func);
- INIT_DELAYED_WORK(&sys->replenish_rx_work,
- replenish_rx_work_func);
- atomic_set(&sys->curr_polling_state, 0);
- sys->rx_buff_sz = IPA_ODU_RX_BUFF_SZ;
- sys->rx_pool_sz = in->desc_fifo_sz /
- sizeof(struct sps_iovec) - 1;
- if (sys->rx_pool_sz > IPA_ODU_RX_POOL_SZ)
- sys->rx_pool_sz = IPA_ODU_RX_POOL_SZ;
- sys->pyld_hdlr = ipa_odu_rx_pyld_hdlr;
- sys->get_skb = ipa_get_skb_ipa_rx;
- sys->free_skb = ipa_free_skb_rx;
- sys->repl_hdlr = ipa_replenish_rx_cache;
- sys->ep->wakelock_client =
- IPA_WAKELOCK_REF_CLIENT_ODU_RX;
- } else if (in->client ==
- IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS) {
- IPADBG("assigning policy to client:%d",
- in->client);
- sys->ep->status.status_en = false;
- sys->policy = IPA_POLICY_INTR_POLL_MODE;
- sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT
- | SPS_O_ACK_TRANSFERS);
- sys->sps_callback = ipa_sps_irq_rx_notify;
- INIT_WORK(&sys->work, ipa_wq_handle_rx);
- INIT_DELAYED_WORK(&sys->switch_to_intr_work,
- switch_to_intr_rx_work_func);
- } else if (in->client ==
- IPA_CLIENT_MEMCPY_DMA_SYNC_CONS) {
- IPADBG("assigning policy to client:%d",
- in->client);
- sys->ep->status.status_en = false;
- sys->policy = IPA_POLICY_NOINTR_MODE;
- sys->sps_option = SPS_O_AUTO_ENABLE |
- SPS_O_ACK_TRANSFERS | SPS_O_POLL;
- } else {
- IPAERR("Need to install a RX pipe hdlr\n");
- WARN_ON(1);
- return -EINVAL;
- }
- }
- } else {
- IPAERR("Unsupported HW type %d\n", ipa_ctx->ipa_hw_type);
- WARN_ON(1);
- return -EINVAL;
- }
+ return 0;
+ } else if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_0)
+ return ipa_assign_policy_v2(in, sys);
- return 0;
+ IPAERR("Unsupported HW type %d\n", ipa_ctx->ipa_hw_type);
+ WARN_ON(1);
+ return -EINVAL;
}
/**
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
index cb41f8429771..f0b25132df33 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
@@ -39,6 +39,8 @@
#define MTU_BYTE 1500
#define IPA_MAX_NUM_PIPES 0x14
+#define IPA_WAN_CONS_DESC_FIFO_SZ 0x5E80
+#define IPA_WAN_NAPI_CONS_RX_POOL_SZ 3000
#define IPA_SYS_DESC_FIFO_SZ 0x2000
#define IPA_SYS_TX_DATA_DESC_FIFO_SZ 0x1000
#define IPA_LAN_RX_HEADER_LENGTH (2)
@@ -515,6 +517,7 @@ enum ipa_wakelock_ref_client {
* @disconnect_in_progress: Indicates client disconnect in progress.
* @qmi_request_sent: Indicates whether QMI request to enable clear data path
* request is sent or not.
+ * @napi_enabled: when true, IPA call client callback to start polling
*/
struct ipa_ep_context {
int valid;
@@ -546,6 +549,10 @@ struct ipa_ep_context {
bool disconnect_in_progress;
u32 qmi_request_sent;
enum ipa_wakelock_ref_client wakelock_client;
+ bool napi_enabled;
+ bool switch_to_intr;
+ int inactive_cycles;
+ u32 eot_in_poll_err;
/* sys MUST be the last element of this struct */
struct ipa_sys_context *sys;
@@ -603,6 +610,7 @@ struct ipa_sys_context {
/* ordering is important - mutable fields go above */
struct ipa_ep_context *ep;
struct list_head head_desc_list;
+ struct list_head rcycl_list;
spinlock_t spinlock;
struct workqueue_struct *wq;
struct workqueue_struct *repl_wq;
@@ -1929,4 +1937,6 @@ void ipa_inc_acquire_wakelock(enum ipa_wakelock_ref_client ref_client);
void ipa_dec_release_wakelock(enum ipa_wakelock_ref_client ref_client);
int ipa_iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot);
+int ipa2_rx_poll(u32 clnt_hdl, int budget);
+void ipa2_recycle_wan_skb(struct sk_buff *skb);
#endif /* _IPA_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_trace.h b/drivers/platform/msm/ipa/ipa_v2/ipa_trace.h
index d70abdfa0469..a03a49a33f97 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_trace.h
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_trace.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -127,6 +127,23 @@ TRACE_EVENT(
TP_printk("rx_pkt_cnt=%lu", __entry->rx_pkt_cnt)
);
+TRACE_EVENT(
+ rmnet_ipa_netif_rcv_skb,
+
+ TP_PROTO(unsigned long rx_pkt_cnt),
+
+ TP_ARGS(rx_pkt_cnt),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, rx_pkt_cnt)
+ ),
+
+ TP_fast_assign(
+ __entry->rx_pkt_cnt = rx_pkt_cnt;
+ ),
+
+ TP_printk("rx_pkt_cnt=%lu", __entry->rx_pkt_cnt)
+);
#endif /* _IPA_TRACE_H */
/* This part must be outside protection */
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
index 9fc67548f6ac..ae709c54cec1 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
@@ -5128,7 +5128,8 @@ int ipa2_bind_api_controller(enum ipa_hw_type ipa_hw_type,
ipa2_set_required_perf_profile;
api_ctrl->ipa_get_ipc_logbuf = ipa2_get_ipc_logbuf;
api_ctrl->ipa_get_ipc_logbuf_low = ipa2_get_ipc_logbuf_low;
-
+ api_ctrl->ipa_rx_poll = ipa2_rx_poll;
+ api_ctrl->ipa_recycle_wan_skb = ipa2_recycle_wan_skb;
return 0;
}
diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
index eeecc508e8db..9336250352f0 100644
--- a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
@@ -59,6 +59,8 @@
#define IPA_QUOTA_REACH_IF_NAME_MAX_SIZE 64
#define IPA_UEVENT_NUM_EVNP 4 /* number of event pointers */
+#define NAPI_WEIGHT 60
+
static struct net_device *ipa_netdevs[IPA_WWAN_DEVICE_COUNT];
static struct ipa_sys_connect_params apps_to_ipa_ep_cfg, ipa_to_apps_ep_cfg;
static u32 qmap_hdr_hdl, dflt_v4_wan_rt_hdl, dflt_v6_wan_rt_hdl;
@@ -76,6 +78,8 @@ static struct mutex ipa_to_apps_pipe_handle_guard;
static int wwan_add_ul_flt_rule_to_ipa(void);
static int wwan_del_ul_flt_rule_to_ipa(void);
static void ipa_wwan_msg_free_cb(void*, u32, u32);
+static void ipa_rmnet_rx_cb(void *priv);
+static int ipa_rmnet_poll(struct napi_struct *napi, int budget);
static void wake_tx_queue(struct work_struct *work);
static DECLARE_WORK(ipa_tx_wakequeue_work, wake_tx_queue);
@@ -93,8 +97,10 @@ struct ipa_rmnet_plat_drv_res {
bool ipa_rmnet_ssr;
bool ipa_loaduC;
bool ipa_advertise_sg_support;
+ bool ipa_napi_enable;
};
+static struct ipa_rmnet_plat_drv_res ipa_rmnet_res;
/**
* struct wwan_private - WWAN private data
* @net: network interface struct implemented by this driver
@@ -119,6 +125,7 @@ struct wwan_private {
spinlock_t lock;
struct completion resource_granted_completion;
enum wwan_device_status device_status;
+ struct napi_struct napi;
};
/**
@@ -936,6 +943,9 @@ static int __ipa_wwan_open(struct net_device *dev)
if (wwan_ptr->device_status != WWAN_DEVICE_ACTIVE)
reinit_completion(&wwan_ptr->resource_granted_completion);
wwan_ptr->device_status = WWAN_DEVICE_ACTIVE;
+
+ if (ipa_rmnet_res.ipa_napi_enable)
+ napi_enable(&(wwan_ptr->napi));
return 0;
}
@@ -970,6 +980,8 @@ static int __ipa_wwan_close(struct net_device *dev)
/* do not close wwan port once up, this causes
remote side to hang if tried to open again */
reinit_completion(&wwan_ptr->resource_granted_completion);
+ if (ipa_rmnet_res.ipa_napi_enable)
+ napi_disable(&(wwan_ptr->napi));
rc = ipa2_deregister_intf(dev->name);
if (rc) {
IPAWANERR("[%s]: ipa2_deregister_intf failed %d\n",
@@ -1168,38 +1180,49 @@ static void apps_ipa_packet_receive_notify(void *priv,
enum ipa_dp_evt_type evt,
unsigned long data)
{
- struct sk_buff *skb = (struct sk_buff *)data;
struct net_device *dev = (struct net_device *)priv;
- int result;
- unsigned int packet_len = skb->len;
- IPAWANDBG("Rx packet was received");
- if (evt != IPA_RECEIVE) {
- IPAWANERR("A none IPA_RECEIVE event in wan_ipa_receive\n");
- return;
- }
+ if (evt == IPA_RECEIVE) {
+ struct sk_buff *skb = (struct sk_buff *)data;
+ int result;
+ unsigned int packet_len = skb->len;
- skb->dev = ipa_netdevs[0];
- skb->protocol = htons(ETH_P_MAP);
+ IPAWANDBG("Rx packet was received");
+ skb->dev = ipa_netdevs[0];
+ skb->protocol = htons(ETH_P_MAP);
- if (dev->stats.rx_packets % IPA_WWAN_RX_SOFTIRQ_THRESH == 0) {
- trace_rmnet_ipa_netifni(dev->stats.rx_packets);
- result = netif_rx_ni(skb);
- } else {
- trace_rmnet_ipa_netifrx(dev->stats.rx_packets);
- result = netif_rx(skb);
- }
+ if (ipa_rmnet_res.ipa_napi_enable) {
+ trace_rmnet_ipa_netif_rcv_skb(dev->stats.rx_packets);
+ result = netif_receive_skb(skb);
+ } else {
+ if (dev->stats.rx_packets % IPA_WWAN_RX_SOFTIRQ_THRESH
+ == 0) {
+ trace_rmnet_ipa_netifni(dev->stats.rx_packets);
+ result = netif_rx_ni(skb);
+ } else {
+ trace_rmnet_ipa_netifrx(dev->stats.rx_packets);
+ result = netif_rx(skb);
+ }
+ }
- if (result) {
- pr_err_ratelimited(DEV_NAME " %s:%d fail on netif_rx\n",
- __func__, __LINE__);
- dev->stats.rx_dropped++;
- }
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += packet_len;
-}
+ if (result) {
+ pr_err_ratelimited(DEV_NAME " %s:%d fail on netif_receive_skb\n",
+ __func__, __LINE__);
+ dev->stats.rx_dropped++;
+ }
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += packet_len;
+ } else if (evt == IPA_CLIENT_START_POLL)
+ ipa_rmnet_rx_cb(priv);
+ else if (evt == IPA_CLIENT_COMP_NAPI) {
+ struct wwan_private *wwan_ptr = netdev_priv(dev);
+
+ if (ipa_rmnet_res.ipa_napi_enable)
+ napi_complete(&(wwan_ptr->napi));
+ } else
+ IPAWANERR("Invalid evt %d received in wan_ipa_receive\n", evt);
-static struct ipa_rmnet_plat_drv_res ipa_rmnet_res = {0, };
+}
/**
* ipa_wwan_ioctl() - I/O control for wwan network driver.
@@ -1555,9 +1578,17 @@ static int ipa_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
ipa_to_apps_ep_cfg.client = IPA_CLIENT_APPS_WAN_CONS;
ipa_to_apps_ep_cfg.notify =
apps_ipa_packet_receive_notify;
- ipa_to_apps_ep_cfg.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
ipa_to_apps_ep_cfg.priv = dev;
+ ipa_to_apps_ep_cfg.napi_enabled =
+ ipa_rmnet_res.ipa_napi_enable;
+ if (ipa_to_apps_ep_cfg.napi_enabled)
+ ipa_to_apps_ep_cfg.desc_fifo_sz =
+ IPA_WAN_CONS_DESC_FIFO_SZ;
+ else
+ ipa_to_apps_ep_cfg.desc_fifo_sz =
+ IPA_SYS_DESC_FIFO_SZ;
+
mutex_lock(&ipa_to_apps_pipe_handle_guard);
if (atomic_read(&is_ssr)) {
IPAWANDBG("In SSR sequence/recovery\n");
@@ -1899,6 +1930,12 @@ static int get_ipa_rmnet_dts_configuration(struct platform_device *pdev,
"qcom,ipa-advertise-sg-support");
pr_info("IPA SG support = %s\n",
ipa_rmnet_drv_res->ipa_advertise_sg_support ? "True" : "False");
+
+ ipa_rmnet_drv_res->ipa_napi_enable =
+ of_property_read_bool(pdev->dev.of_node,
+ "qcom,ipa-napi-enable");
+ pr_info("IPA Napi Enable = %s\n",
+ ipa_rmnet_drv_res->ipa_napi_enable ? "True" : "False");
return 0;
}
@@ -2044,6 +2081,12 @@ static int ipa_wwan_probe(struct platform_device *pdev)
if (ipa_rmnet_res.ipa_advertise_sg_support)
dev->hw_features |= NETIF_F_SG;
+ /* Enable NAPI support in netdevice. */
+ if (ipa_rmnet_res.ipa_napi_enable) {
+ netif_napi_add(dev, &(wwan_ptr->napi),
+ ipa_rmnet_poll, NAPI_WEIGHT);
+ }
+
ret = register_netdev(dev);
if (ret) {
IPAWANERR("unable to register ipa_netdev %d rc=%d\n",
@@ -2068,6 +2111,8 @@ static int ipa_wwan_probe(struct platform_device *pdev)
pr_info("rmnet_ipa completed initialization\n");
return 0;
config_err:
+ if (ipa_rmnet_res.ipa_napi_enable)
+ netif_napi_del(&(wwan_ptr->napi));
unregister_netdev(ipa_netdevs[0]);
set_perf_err:
ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
@@ -2107,6 +2152,9 @@ setup_a7_qmap_hdr_err:
static int ipa_wwan_remove(struct platform_device *pdev)
{
int ret;
+ struct wwan_private *wwan_ptr;
+
+ wwan_ptr = netdev_priv(ipa_netdevs[0]);
pr_info("rmnet_ipa started deinitialization\n");
mutex_lock(&ipa_to_apps_pipe_handle_guard);
@@ -2115,6 +2163,8 @@ static int ipa_wwan_remove(struct platform_device *pdev)
IPAWANERR("Failed to teardown IPA->APPS pipe\n");
else
ipa_to_apps_hdl = -1;
+ if (ipa_rmnet_res.ipa_napi_enable)
+ netif_napi_del(&(wwan_ptr->napi));
mutex_unlock(&ipa_to_apps_pipe_handle_guard);
unregister_netdev(ipa_netdevs[0]);
ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
@@ -2802,6 +2852,31 @@ static void ipa_wwan_msg_free_cb(void *buff, u32 len, u32 type)
kfree(buff);
}
+static void ipa_rmnet_rx_cb(void *priv)
+{
+ struct net_device *dev = priv;
+ struct wwan_private *wwan_ptr;
+
+ IPAWANDBG("\n");
+
+ if (dev != ipa_netdevs[0]) {
+ IPAWANERR("Not matching with netdev\n");
+ return;
+ }
+
+ wwan_ptr = netdev_priv(dev);
+ napi_schedule(&(wwan_ptr->napi));
+}
+
+static int ipa_rmnet_poll(struct napi_struct *napi, int budget)
+{
+ int rcvd_pkts = 0;
+
+ rcvd_pkts = ipa_rx_poll(ipa_to_apps_hdl, NAPI_WEIGHT);
+ IPAWANDBG("rcvd packets: %d\n", rcvd_pkts);
+ return rcvd_pkts;
+}
+
late_initcall(ipa_wwan_init);
module_exit(ipa_wwan_cleanup);
MODULE_DESCRIPTION("WWAN Network Interface");
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index f5d202bfebce..f480cba044eb 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -1010,25 +1010,28 @@ static void ipa3_sps_irq_rx_notify(struct sps_event_notify *notify)
if (IPA_CLIENT_IS_APPS_CONS(sys->ep->client))
atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1);
if (!atomic_read(&sys->curr_polling_state)) {
- ret = sps_get_config(sys->ep->ep_hdl,
- &sys->ep->connect);
- if (ret) {
- IPAERR("sps_get_config() failed %d\n", ret);
- break;
- }
- sys->ep->connect.options = SPS_O_AUTO_ENABLE |
- SPS_O_ACK_TRANSFERS | SPS_O_POLL;
- ret = sps_set_config(sys->ep->ep_hdl,
- &sys->ep->connect);
- if (ret) {
- IPAERR("sps_set_config() failed %d\n", ret);
- break;
- }
- ipa3_inc_acquire_wakelock();
- atomic_set(&sys->curr_polling_state, 1);
- trace_intr_to_poll3(sys->ep->client);
- queue_work(sys->wq, &sys->work);
+ sys->ep->eot_in_poll_err++;
+ break;
}
+
+ ret = sps_get_config(sys->ep->ep_hdl,
+ &sys->ep->connect);
+ if (ret) {
+ IPAERR("sps_get_config() failed %d\n", ret);
+ break;
+ }
+ sys->ep->connect.options = SPS_O_AUTO_ENABLE |
+ SPS_O_ACK_TRANSFERS | SPS_O_POLL;
+ ret = sps_set_config(sys->ep->ep_hdl,
+ &sys->ep->connect);
+ if (ret) {
+ IPAERR("sps_set_config() failed %d\n", ret);
+ break;
+ }
+ ipa3_inc_acquire_wakelock();
+ atomic_set(&sys->curr_polling_state, 1);
+ trace_intr_to_poll3(sys->ep->client);
+ queue_work(sys->wq, &sys->work);
break;
default:
IPAERR("received unexpected event id %d\n", notify->event_id);
@@ -1089,7 +1092,18 @@ static void ipa3_switch_to_intr_rx_work_func(struct work_struct *work)
dwork = container_of(work, struct delayed_work, work);
sys = container_of(dwork, struct ipa3_sys_context, switch_to_intr_work);
- ipa3_handle_rx(sys);
+
+ if (sys->ep->napi_enabled) {
+ if (sys->ep->switch_to_intr) {
+ ipa3_rx_switch_to_intr_mode(sys);
+ IPA_ACTIVE_CLIENTS_DEC_SPECIAL("NAPI");
+ sys->ep->switch_to_intr = false;
+ sys->ep->inactive_cycles = 0;
+ } else
+ sys->ep->client_notify(sys->ep->priv,
+ IPA_CLIENT_START_POLL, 0);
+ } else
+ ipa3_handle_rx(sys);
}
/**
@@ -1217,6 +1231,7 @@ int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
ep->valid = 1;
ep->client = sys_in->client;
ep->client_notify = sys_in->notify;
+ ep->napi_enabled = sys_in->napi_enabled;
ep->priv = sys_in->priv;
ep->keep_ipa_awake = sys_in->keep_ipa_awake;
atomic_set(&ep->avail_fifo_desc,
@@ -1423,6 +1438,12 @@ int ipa3_teardown_sys_pipe(u32 clnt_hdl)
IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
ipa3_disable_data_path(clnt_hdl);
+ if (ep->napi_enabled) {
+ ep->switch_to_intr = true;
+ do {
+ usleep_range(95, 105);
+ } while (atomic_read(&ep->sys->curr_polling_state));
+ }
if (IPA_CLIENT_IS_PROD(ep->client)) {
do {
@@ -1772,7 +1793,13 @@ static void ipa3_wq_handle_rx(struct work_struct *work)
struct ipa3_sys_context *sys;
sys = container_of(work, struct ipa3_sys_context, work);
- ipa3_handle_rx(sys);
+
+ if (sys->ep->napi_enabled) {
+ IPA_ACTIVE_CLIENTS_INC_SPECIAL("NAPI");
+ sys->ep->client_notify(sys->ep->priv,
+ IPA_CLIENT_START_POLL, 0);
+ } else
+ ipa3_handle_rx(sys);
}
static void ipa3_wq_repl_rx(struct work_struct *work)
@@ -2717,6 +2744,11 @@ static int ipa3_wan_rx_pyld_hdlr(struct sk_buff *skb,
IPA_RECEIVE, (unsigned long)(skb));
return rc;
}
+ if (sys->repl_hdlr == ipa3_replenish_rx_cache_recycle) {
+ IPAERR("Recycle should enable only with GRO Aggr\n");
+ ipa_assert();
+ }
+
/*
* payload splits across 2 buff or more,
* take the start of the payload from prev_skb
@@ -2909,6 +2941,30 @@ static void ipa3_recycle_rx_wrapper(struct ipa3_rx_pkt_wrapper *rx_pkt)
spin_unlock_bh(&rx_pkt->sys->spinlock);
}
+void ipa3_recycle_wan_skb(struct sk_buff *skb)
+{
+ struct ipa3_rx_pkt_wrapper *rx_pkt;
+ int ep_idx = ipa3_get_ep_mapping(
+ IPA_CLIENT_APPS_WAN_CONS);
+ gfp_t flag = GFP_NOWAIT | __GFP_NOWARN;
+
+ if (unlikely(ep_idx == -1)) {
+ IPAERR("dest EP does not exist\n");
+ ipa_assert();
+ }
+
+ rx_pkt = kmem_cache_zalloc(ipa3_ctx->rx_pkt_wrapper_cache,
+ flag);
+ if (!rx_pkt)
+ ipa_assert();
+
+ INIT_WORK(&rx_pkt->work, ipa3_wq_rx_avail);
+ rx_pkt->sys = ipa3_ctx->ep[ep_idx].sys;
+
+ rx_pkt->data.skb = skb;
+ ipa3_recycle_rx_wrapper(rx_pkt);
+}
+
static void ipa3_wq_rx_common(struct ipa3_sys_context *sys, u32 size)
{
struct ipa3_rx_pkt_wrapper *rx_pkt_expected;
@@ -3123,14 +3179,22 @@ static int ipa3_assign_policy(struct ipa_sys_connect_params *in,
IPA_CLIENT_APPS_WAN_CONS) {
sys->pyld_hdlr = ipa3_wan_rx_pyld_hdlr;
sys->free_rx_wrapper = ipa3_free_rx_wrapper;
- if (nr_cpu_ids > 1)
- sys->repl_hdlr =
- ipa3_fast_replenish_rx_cache;
- else
+ if (in->napi_enabled) {
sys->repl_hdlr =
- ipa3_replenish_rx_cache;
- sys->rx_pool_sz =
- ipa3_ctx->wan_rx_ring_size;
+ ipa3_replenish_rx_cache_recycle;
+ sys->rx_pool_sz =
+ IPA_WAN_NAPI_CONS_RX_POOL_SZ;
+ } else {
+ if (nr_cpu_ids > 1) {
+ sys->repl_hdlr =
+ ipa3_fast_replenish_rx_cache;
+ } else {
+ sys->repl_hdlr =
+ ipa3_replenish_rx_cache;
+ }
+ sys->rx_pool_sz =
+ ipa3_ctx->wan_rx_ring_size;
+ }
in->ipa_ep_cfg.aggr.aggr_sw_eof_active
= true;
if (ipa3_ctx->
@@ -3941,68 +4005,88 @@ static int ipa_populate_tag_field(struct ipa3_desc *desc,
return 0;
}
-static int ipa_handle_rx_core_gsi(struct ipa3_sys_context *sys,
- bool process_all, bool in_poll_state)
+static int ipa_poll_gsi_pkt(struct ipa3_sys_context *sys,
+ struct ipa_mem_buffer *mem_info)
{
int ret;
- int cnt = 0;
- struct ipa3_sys_context *sys_ptr;
- struct ipa3_rx_pkt_wrapper *rx_pkt;
struct gsi_chan_xfer_notify xfer_notify;
- struct ipa_mem_buffer mem_info = {0};
- enum ipa_client_type client;
+ struct ipa3_rx_pkt_wrapper *rx_pkt;
if (sys->ep->bytes_xfered_valid) {
- mem_info.phys_base = sys->ep->phys_base;
- mem_info.size = (u32)sys->ep->bytes_xfered;
- sys_ptr = sys;
- if (IPA_CLIENT_IS_MEMCPY_DMA_CONS(sys->ep->client))
- ipa3_dma_memcpy_notify(sys_ptr, &mem_info);
- else if (IPA_CLIENT_IS_WLAN_CONS(sys->ep->client))
- ipa3_wlan_wq_rx_common(sys_ptr, mem_info.size);
- else
- ipa3_wq_rx_common(sys_ptr, mem_info.size);
-
- cnt++;
+ mem_info->phys_base = sys->ep->phys_base;
+ mem_info->size = (u32)sys->ep->bytes_xfered;
sys->ep->bytes_xfered_valid = false;
+ return GSI_STATUS_SUCCESS;
+ }
+
+ ret = gsi_poll_channel(sys->ep->gsi_chan_hdl,
+ &xfer_notify);
+ if (ret == GSI_STATUS_POLL_EMPTY)
+ return ret;
+ else if (ret != GSI_STATUS_SUCCESS) {
+ IPAERR("Poll channel err: %d\n", ret);
+ return ret;
}
+ rx_pkt = (struct ipa3_rx_pkt_wrapper *)
+ xfer_notify.xfer_user_data;
+ mem_info->phys_base = rx_pkt->data.dma_addr;
+ mem_info->size = xfer_notify.bytes_xfered;
+
+ return ret;
+}
+
+static int ipa_handle_rx_core_gsi(struct ipa3_sys_context *sys,
+ bool process_all, bool in_poll_state)
+{
+ int ret;
+ int cnt = 0;
+ struct ipa_mem_buffer mem_info = {0};
+
while ((in_poll_state ? atomic_read(&sys->curr_polling_state) :
!atomic_read(&sys->curr_polling_state))) {
if (cnt && !process_all)
break;
- ret = gsi_poll_channel(sys->ep->gsi_chan_hdl,
- &xfer_notify);
- if (ret == GSI_STATUS_POLL_EMPTY)
+ ret = ipa_poll_gsi_pkt(sys, &mem_info);
+ if (ret)
break;
- else if (ret == GSI_STATUS_SUCCESS) {
- sys_ptr = (struct ipa3_sys_context *)
- xfer_notify.chan_user_data;
- rx_pkt = (struct ipa3_rx_pkt_wrapper *)
- xfer_notify.xfer_user_data;
- mem_info.phys_base = rx_pkt->data.dma_addr;
- mem_info.size = xfer_notify.bytes_xfered;
-
- client = sys->ep->client;
- if (IPA_CLIENT_IS_MEMCPY_DMA_CONS(client))
- ipa3_dma_memcpy_notify(sys_ptr, &mem_info);
- else if (IPA_CLIENT_IS_WLAN_CONS(client))
- ipa3_wlan_wq_rx_common(sys_ptr, mem_info.size);
- else
- ipa3_wq_rx_common(sys_ptr, mem_info.size);
- cnt++;
- } else
- IPAERR("Poll channel err: %d\n", ret);
+ if (IPA_CLIENT_IS_MEMCPY_DMA_CONS(sys->ep->client))
+ ipa3_dma_memcpy_notify(sys, &mem_info);
+ else if (IPA_CLIENT_IS_WLAN_CONS(sys->ep->client))
+ ipa3_wlan_wq_rx_common(sys, mem_info.size);
+ else
+ ipa3_wq_rx_common(sys, mem_info.size);
+
+ cnt++;
}
return cnt;
}
+static int ipa_poll_sps_pkt(struct ipa3_sys_context *sys,
+ struct ipa_mem_buffer *mem_info)
+{
+ int ret;
+ struct sps_iovec iov;
+
+ ret = sps_get_iovec(sys->ep->ep_hdl, &iov);
+ if (ret) {
+ IPAERR("sps_get_iovec failed %d\n", ret);
+ return ret;
+ }
+
+ if (iov.addr == 0)
+ return -EIO;
+
+ mem_info->phys_base = iov.addr;
+ mem_info->size = iov.size;
+ return 0;
+}
+
static int ipa_handle_rx_core_sps(struct ipa3_sys_context *sys,
bool process_all, bool in_poll_state)
{
- struct sps_iovec iov;
int ret;
int cnt = 0;
struct ipa_mem_buffer mem_info = {0};
@@ -4012,17 +4096,10 @@ static int ipa_handle_rx_core_sps(struct ipa3_sys_context *sys,
if (cnt && !process_all)
break;
- ret = sps_get_iovec(sys->ep->ep_hdl, &iov);
- if (ret) {
- IPAERR("sps_get_iovec failed %d\n", ret);
- break;
- }
-
- if (iov.addr == 0)
+ ret = ipa_poll_sps_pkt(sys, &mem_info);
+ if (ret)
break;
- mem_info.phys_base = iov.addr;
- mem_info.size = iov.size;
if (IPA_CLIENT_IS_MEMCPY_DMA_CONS(sys->ep->client))
ipa3_dma_memcpy_notify(sys, &mem_info);
else if (IPA_CLIENT_IS_WLAN_CONS(sys->ep->client))
@@ -4032,6 +4109,64 @@ static int ipa_handle_rx_core_sps(struct ipa3_sys_context *sys,
cnt++;
}
+
+ return cnt;
+}
+
+/**
+ * ipa3_rx_poll() - Poll the rx packets from IPA HW. This
+ * function is exectued in the softirq context
+ *
+ * if input budget is zero, the driver switches back to
+ * interrupt mode
+ *
+ * return number of polled packets, on error 0(zero)
+ */
+int ipa3_rx_poll(u32 clnt_hdl, int weight)
+{
+ struct ipa3_ep_context *ep;
+ int ret;
+ int cnt = 0;
+ unsigned int delay = 1;
+ struct ipa_mem_buffer mem_info = {0};
+
+ IPADBG("\n");
+ if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+ ipa3_ctx->ep[clnt_hdl].valid == 0) {
+ IPAERR("bad parm 0x%x\n", clnt_hdl);
+ return cnt;
+ }
+
+ ep = &ipa3_ctx->ep[clnt_hdl];
+
+ while (cnt < weight &&
+ atomic_read(&ep->sys->curr_polling_state)) {
+
+ if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI)
+ ret = ipa_poll_gsi_pkt(ep->sys, &mem_info);
+ else
+ ret = ipa_poll_sps_pkt(ep->sys, &mem_info);
+
+ if (ret)
+ break;
+
+ ipa3_wq_rx_common(ep->sys, mem_info.size);
+ cnt += 5;
+ };
+
+ if (cnt == 0) {
+ ep->inactive_cycles++;
+ ep->client_notify(ep->priv, IPA_CLIENT_COMP_NAPI, 0);
+
+ if (ep->inactive_cycles > 3 || ep->sys->len == 0) {
+ ep->switch_to_intr = true;
+ delay = 0;
+ }
+ queue_delayed_work(ep->sys->wq,
+ &ep->sys->switch_to_intr_work, msecs_to_jiffies(delay));
+ } else
+ ep->inactive_cycles = 0;
+
return cnt;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index d50a25aa84f4..36cb5cbb8d34 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -41,6 +41,8 @@
#define MTU_BYTE 1500
#define IPA3_MAX_NUM_PIPES 31
+#define IPA_WAN_CONS_DESC_FIFO_SZ 0x5E80
+#define IPA_WAN_NAPI_CONS_RX_POOL_SZ 3000
#define IPA_SYS_DESC_FIFO_SZ 0x800
#define IPA_SYS_TX_DATA_DESC_FIFO_SZ 0x1000
#define IPA_LAN_RX_HEADER_LENGTH (2)
@@ -550,6 +552,7 @@ struct ipa3_status_stats {
* @disconnect_in_progress: Indicates client disconnect in progress.
* @qmi_request_sent: Indicates whether QMI request to enable clear data path
* request is sent or not.
+ * @napi_enabled: when true, IPA call client callback to start polling
*/
struct ipa3_ep_context {
int valid;
@@ -586,6 +589,10 @@ struct ipa3_ep_context {
u32 wdi_state;
bool disconnect_in_progress;
u32 qmi_request_sent;
+ bool napi_enabled;
+ bool switch_to_intr;
+ int inactive_cycles;
+ u32 eot_in_poll_err;
/* sys MUST be the last element of this struct */
struct ipa3_sys_context *sys;
@@ -2262,4 +2269,6 @@ int ipa3_load_fws(const struct firmware *firmware);
int ipa3_register_ipa_ready_cb(void (*ipa_ready_cb)(void *), void *user_data);
const char *ipa_hw_error_str(enum ipa3_hw_errors err_type);
int ipa_gsi_ch20_wa(void);
+int ipa3_rx_poll(u32 clnt_hdl, int budget);
+void ipa3_recycle_wan_skb(struct sk_buff *skb);
#endif /* _IPA3_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_trace.h b/drivers/platform/msm/ipa/ipa_v3/ipa_trace.h
index 5ea6c6daf240..b67899ba894e 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_trace.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_trace.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -127,6 +127,24 @@ TRACE_EVENT(
TP_printk("rx_pkt_cnt=%lu", __entry->rx_pkt_cnt)
);
+TRACE_EVENT(
+ rmnet_ipa_netif_rcv_skb3,
+
+ TP_PROTO(unsigned long rx_pkt_cnt),
+
+ TP_ARGS(rx_pkt_cnt),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, rx_pkt_cnt)
+ ),
+
+ TP_fast_assign(
+ __entry->rx_pkt_cnt = rx_pkt_cnt;
+ ),
+
+ TP_printk("rx_pkt_cnt=%lu", __entry->rx_pkt_cnt)
+);
+
#endif /* _IPA_TRACE_H */
/* This part must be outside protection */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index 2481d6e7bfb7..c2a70bca80b1 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -4657,6 +4657,8 @@ int ipa3_bind_api_controller(enum ipa_hw_type ipa_hw_type,
ipa3_set_required_perf_profile;
api_ctrl->ipa_get_ipc_logbuf = ipa3_get_ipc_logbuf;
api_ctrl->ipa_get_ipc_logbuf_low = ipa3_get_ipc_logbuf_low;
+ api_ctrl->ipa_rx_poll = ipa3_rx_poll;
+ api_ctrl->ipa_recycle_wan_skb = ipa3_recycle_wan_skb;
return 0;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index 5dcb25876fd4..a4eab02cb571 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -57,6 +57,7 @@
#define IPA_QUOTA_REACH_ALERT_MAX_SIZE 64
#define IPA_QUOTA_REACH_IF_NAME_MAX_SIZE 64
#define IPA_UEVENT_NUM_EVNP 4 /* number of event pointers */
+#define NAPI_WEIGHT 60
#define IPA_NETDEV() \
((rmnet_ipa3_ctx && rmnet_ipa3_ctx->wwan_priv) ? \
@@ -66,6 +67,8 @@
static int ipa3_wwan_add_ul_flt_rule_to_ipa(void);
static int ipa3_wwan_del_ul_flt_rule_to_ipa(void);
static void ipa3_wwan_msg_free_cb(void*, u32, u32);
+static void ipa3_rmnet_rx_cb(void *priv);
+static int ipa3_rmnet_poll(struct napi_struct *napi, int budget);
static void ipa3_wake_tx_queue(struct work_struct *work);
static DECLARE_WORK(ipa3_tx_wakequeue_work, ipa3_wake_tx_queue);
@@ -83,6 +86,7 @@ struct ipa3_rmnet_plat_drv_res {
bool ipa_rmnet_ssr;
bool ipa_loaduC;
bool ipa_advertise_sg_support;
+ bool ipa_napi_enable;
};
/**
@@ -109,6 +113,7 @@ struct ipa3_wwan_private {
spinlock_t lock;
struct completion resource_granted_completion;
enum ipa3_wwan_device_status device_status;
+ struct napi_struct napi;
};
struct rmnet_ipa3_context {
@@ -134,6 +139,7 @@ struct rmnet_ipa3_context {
};
static struct rmnet_ipa3_context *rmnet_ipa3_ctx;
+static struct ipa3_rmnet_plat_drv_res ipa3_rmnet_res;
/**
* ipa3_setup_a7_qmap_hdr() - Setup default a7 qmap hdr
@@ -957,6 +963,9 @@ static int __ipa_wwan_open(struct net_device *dev)
if (wwan_ptr->device_status != WWAN_DEVICE_ACTIVE)
reinit_completion(&wwan_ptr->resource_granted_completion);
wwan_ptr->device_status = WWAN_DEVICE_ACTIVE;
+
+ if (ipa3_rmnet_res.ipa_napi_enable)
+ napi_enable(&(wwan_ptr->napi));
return 0;
}
@@ -1189,39 +1198,47 @@ static void apps_ipa_packet_receive_notify(void *priv,
enum ipa_dp_evt_type evt,
unsigned long data)
{
- struct sk_buff *skb = (struct sk_buff *)data;
struct net_device *dev = (struct net_device *)priv;
- int result;
- unsigned int packet_len = skb->len;
- IPAWANDBG_LOW("Rx packet was received");
- if (evt != IPA_RECEIVE) {
- IPAWANERR("A none IPA_RECEIVE event in wan_ipa_receive\n");
- return;
- }
+ if (evt == IPA_RECEIVE) {
+ struct sk_buff *skb = (struct sk_buff *)data;
+ int result;
+ unsigned int packet_len = skb->len;
- skb->dev = IPA_NETDEV();
- skb->protocol = htons(ETH_P_MAP);
+ IPAWANDBG_LOW("Rx packet was received");
+ skb->dev = IPA_NETDEV();
+ skb->protocol = htons(ETH_P_MAP);
- if (dev->stats.rx_packets % IPA_WWAN_RX_SOFTIRQ_THRESH == 0) {
- trace_rmnet_ipa_netifni3(dev->stats.rx_packets);
- result = netif_rx_ni(skb);
- } else {
- trace_rmnet_ipa_netifrx3(dev->stats.rx_packets);
- result = netif_rx(skb);
- }
+ if (ipa3_rmnet_res.ipa_napi_enable) {
+ trace_rmnet_ipa_netif_rcv_skb3(dev->stats.rx_packets);
+ result = netif_receive_skb(skb);
+ } else {
+ if (dev->stats.rx_packets % IPA_WWAN_RX_SOFTIRQ_THRESH
+ == 0) {
+ trace_rmnet_ipa_netifni3(dev->stats.rx_packets);
+ result = netif_rx_ni(skb);
+ } else {
+ trace_rmnet_ipa_netifrx3(dev->stats.rx_packets);
+ result = netif_rx(skb);
+ }
+ }
- if (result) {
- pr_err_ratelimited(DEV_NAME " %s:%d fail on netif_rx\n",
- __func__, __LINE__);
- dev->stats.rx_dropped++;
- }
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += packet_len;
+ if (result) {
+ pr_err_ratelimited(DEV_NAME " %s:%d fail on netif_receive_skb\n",
+ __func__, __LINE__);
+ dev->stats.rx_dropped++;
+ }
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += packet_len;
+ } else if (evt == IPA_CLIENT_START_POLL)
+ ipa3_rmnet_rx_cb(priv);
+ else if (evt == IPA_CLIENT_COMP_NAPI) {
+ if (ipa3_rmnet_res.ipa_napi_enable)
+ napi_complete(&(rmnet_ipa3_ctx->wwan_priv->napi));
+ } else
+ IPAWANERR("Invalid evt %d received in wan_ipa_receive\n", evt);
}
-static struct ipa3_rmnet_plat_drv_res ipa3_rmnet_res = {0, };
-
/**
* ipa3_wwan_ioctl() - I/O control for wwan network driver.
*
@@ -1595,10 +1612,17 @@ static int ipa3_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
IPA_CLIENT_APPS_WAN_CONS;
rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.notify =
apps_ipa_packet_receive_notify;
- rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.desc_fifo_sz =
- IPA_SYS_DESC_FIFO_SZ;
rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.priv = dev;
+ rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.napi_enabled =
+ ipa3_rmnet_res.ipa_napi_enable;
+ if (rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.napi_enabled)
+ rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.
+ desc_fifo_sz = IPA_WAN_CONS_DESC_FIFO_SZ;
+ else
+ rmnet_ipa3_ctx->ipa_to_apps_ep_cfg.
+ desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
+
mutex_lock(
&rmnet_ipa3_ctx->ipa_to_apps_pipe_handle_guard);
if (atomic_read(&rmnet_ipa3_ctx->is_ssr)) {
@@ -2126,6 +2150,9 @@ static int ipa3_wwan_probe(struct platform_device *pdev)
if (ipa3_rmnet_res.ipa_advertise_sg_support)
dev->hw_features |= NETIF_F_SG;
+ if (ipa3_rmnet_res.ipa_napi_enable)
+ netif_napi_add(dev, &(rmnet_ipa3_ctx->wwan_priv->napi),
+ ipa3_rmnet_poll, NAPI_WEIGHT);
ret = register_netdev(dev);
if (ret) {
IPAWANERR("unable to register ipa_netdev %d rc=%d\n",
@@ -2149,6 +2176,8 @@ static int ipa3_wwan_probe(struct platform_device *pdev)
pr_info("rmnet_ipa completed initialization\n");
return 0;
config_err:
+ if (ipa3_rmnet_res.ipa_napi_enable)
+ netif_napi_del(&(rmnet_ipa3_ctx->wwan_priv->napi));
unregister_netdev(dev);
set_perf_err:
ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
@@ -2196,6 +2225,8 @@ static int ipa3_wwan_remove(struct platform_device *pdev)
IPAWANERR("Failed to teardown IPA->APPS pipe\n");
else
rmnet_ipa3_ctx->ipa3_to_apps_hdl = -1;
+ if (ipa3_rmnet_res.ipa_napi_enable)
+ netif_napi_del(&(rmnet_ipa3_ctx->wwan_priv->napi));
mutex_unlock(&rmnet_ipa3_ctx->ipa_to_apps_pipe_handle_guard);
unregister_netdev(IPA_NETDEV());
ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
@@ -2903,6 +2934,22 @@ static void ipa3_wwan_msg_free_cb(void *buff, u32 len, u32 type)
kfree(buff);
}
+static void ipa3_rmnet_rx_cb(void *priv)
+{
+ IPAWANDBG_LOW("\n");
+ napi_schedule(&(rmnet_ipa3_ctx->wwan_priv->napi));
+}
+
+static int ipa3_rmnet_poll(struct napi_struct *napi, int budget)
+{
+ int rcvd_pkts = 0;
+
+ rcvd_pkts = ipa_rx_poll(rmnet_ipa3_ctx->ipa3_to_apps_hdl,
+ NAPI_WEIGHT);
+ IPAWANDBG_LOW("rcvd packets: %d\n", rcvd_pkts);
+ return rcvd_pkts;
+}
+
late_initcall(ipa3_wwan_init);
module_exit(ipa3_wwan_cleanup);
MODULE_DESCRIPTION("WWAN Network Interface");
diff --git a/include/linux/ipa.h b/include/linux/ipa.h
index a0dd21d215d2..d152057af385 100644
--- a/include/linux/ipa.h
+++ b/include/linux/ipa.h
@@ -93,6 +93,8 @@ enum ipa_aggr_mode {
enum ipa_dp_evt_type {
IPA_RECEIVE,
IPA_WRITE_DONE,
+ IPA_CLIENT_START_POLL,
+ IPA_CLIENT_COMP_NAPI,
};
/**
@@ -538,6 +540,7 @@ struct ipa_ext_intf {
* @skip_ep_cfg: boolean field that determines if EP should be configured
* by IPA driver
* @keep_ipa_awake: when true, IPA will not be clock gated
+ * @napi_enabled: when true, IPA call client callback to start polling
*/
struct ipa_sys_connect_params {
struct ipa_ep_cfg ipa_ep_cfg;
@@ -547,6 +550,7 @@ struct ipa_sys_connect_params {
ipa_notify_cb notify;
bool skip_ep_cfg;
bool keep_ipa_awake;
+ bool napi_enabled;
};
/**
@@ -1233,6 +1237,8 @@ int ipa_tx_dp_mul(enum ipa_client_type dst,
struct ipa_tx_data_desc *data_desc);
void ipa_free_skb(struct ipa_rx_data *);
+int ipa_rx_poll(u32 clnt_hdl, int budget);
+void ipa_recycle_wan_skb(struct sk_buff *skb);
/*
* System pipes
@@ -1763,6 +1769,15 @@ static inline void ipa_free_skb(struct ipa_rx_data *rx_in)
return;
}
+static inline int ipa_rx_poll(u32 clnt_hdl, int budget)
+{
+ return -EPERM;
+}
+
+static inline void ipa_recycle_wan_skb(struct sk_buff *skb)
+{
+}
+
/*
* System pipes
*/