summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/Kconfig22
-rw-r--r--net/Makefile2
-rw-r--r--net/bluetooth/af_bluetooth.c18
-rw-r--r--net/bluetooth/bnep/core.c2
-rw-r--r--net/bluetooth/bnep/netdev.c4
-rw-r--r--net/bluetooth/bnep/sock.c4
-rw-r--r--net/bluetooth/cmtp/capi.c31
-rw-r--r--net/bluetooth/cmtp/core.c10
-rw-r--r--net/bluetooth/cmtp/sock.c4
-rw-r--r--net/bluetooth/hci_conn.c42
-rw-r--r--net/bluetooth/hci_core.c50
-rw-r--r--net/bluetooth/hci_event.c8
-rw-r--r--net/bluetooth/hci_sock.c20
-rw-r--r--net/bluetooth/hci_sysfs.c4
-rw-r--r--net/bluetooth/hidp/core.c21
-rw-r--r--net/bluetooth/hidp/sock.c4
-rw-r--r--net/bluetooth/l2cap_core.c191
-rw-r--r--net/bluetooth/l2cap_sock.c44
-rw-r--r--net/bluetooth/lib.c8
-rw-r--r--net/bluetooth/mgmt.c14
-rw-r--r--net/bluetooth/rfcomm/core.c130
-rw-r--r--net/bluetooth/rfcomm/sock.c47
-rw-r--r--net/bluetooth/rfcomm/tty.c48
-rw-r--r--net/bluetooth/sco.c64
-rw-r--r--net/bluetooth/smp.c50
-rw-r--r--net/core/Makefile1
-rw-r--r--net/core/dev.c41
-rw-r--r--net/core/flow_dissector.c35
-rw-r--r--net/core/neighbour.c2
-rw-r--r--net/core/net-procfs.c5
-rw-r--r--net/core/skbuff.c26
-rw-r--r--net/core/sock.c14
-rw-r--r--net/core/sockev_nlmcast.c145
-rw-r--r--net/ipc_router/Kconfig25
-rw-r--r--net/ipc_router/Makefile7
-rw-r--r--net/ipc_router/ipc_router_core.c4362
-rw-r--r--net/ipc_router/ipc_router_private.h150
-rw-r--r--net/ipc_router/ipc_router_security.c334
-rw-r--r--net/ipc_router/ipc_router_security.h120
-rw-r--r--net/ipc_router/ipc_router_socket.c687
-rw-r--r--net/ipv4/af_inet.c27
-rw-r--r--net/ipv4/fib_trie.c2
-rw-r--r--net/ipv4/inet_connection_sock.c7
-rw-r--r--net/ipv4/inet_lro.c316
-rw-r--r--net/ipv4/ping.c2
-rw-r--r--net/ipv4/sysctl_net_ipv4.c30
-rw-r--r--net/ipv4/tcp.c21
-rw-r--r--net/ipv4/tcp_input.c3
-rw-r--r--net/ipv4/tcp_ipv4.c12
-rw-r--r--net/ipv4/tcp_timer.c34
-rw-r--r--net/ipv4/udp.c13
-rw-r--r--net/ipv4/xfrm4_policy.c11
-rw-r--r--net/ipv6/addrconf.c30
-rw-r--r--net/ipv6/datagram.c7
-rw-r--r--net/ipv6/ip6_offload.c3
-rw-r--r--net/ipv6/ip6_output.c3
-rw-r--r--net/ipv6/route.c3
-rw-r--r--net/ipv6/tcp_ipv6.c6
-rw-r--r--net/ipv6/xfrm6_policy.c8
-rw-r--r--net/mac80211/agg-rx.c25
-rw-r--r--net/mac80211/agg-tx.c53
-rw-r--r--net/mac80211/driver-ops.c10
-rw-r--r--net/mac80211/driver-ops.h4
-rw-r--r--net/mac80211/ieee80211_i.h1
-rw-r--r--net/mac80211/iface.c1
-rw-r--r--net/mac80211/rx.c23
-rw-r--r--net/mac80211/sta_info.c1
-rw-r--r--net/mac80211/trace.h43
-rw-r--r--net/mac80211/tx.c8
-rw-r--r--net/mac80211/util.c58
-rw-r--r--net/netfilter/Kconfig14
-rw-r--r--net/netfilter/Makefile1
-rw-r--r--net/netfilter/core.c2
-rw-r--r--net/netfilter/nf_conntrack_core.c18
-rw-r--r--net/netfilter/nf_conntrack_sip.c6
-rw-r--r--net/netfilter/xt_HARDIDLETIMER.c381
-rw-r--r--net/netfilter/xt_IDLETIMER.c14
-rw-r--r--net/netfilter/xt_qtaguid.c150
-rw-r--r--net/netfilter/xt_quota2.c131
-rw-r--r--net/netlink/af_netlink.c54
-rw-r--r--net/netlink/af_netlink.h1
-rw-r--r--net/netlink/genetlink.c2
-rw-r--r--net/rmnet_data/Kconfig29
-rw-r--r--net/rmnet_data/Makefile14
-rw-r--r--net/rmnet_data/rmnet_data_config.c1243
-rw-r--r--net/rmnet_data/rmnet_data_config.h132
-rw-r--r--net/rmnet_data/rmnet_data_handlers.c771
-rw-r--r--net/rmnet_data/rmnet_data_handlers.h25
-rw-r--r--net/rmnet_data/rmnet_data_main.c62
-rw-r--r--net/rmnet_data/rmnet_data_private.h77
-rw-r--r--net/rmnet_data/rmnet_data_stats.c145
-rw-r--r--net/rmnet_data/rmnet_data_stats.h62
-rw-r--r--net/rmnet_data/rmnet_data_trace.h333
-rw-r--r--net/rmnet_data/rmnet_data_vnd.c1115
-rw-r--r--net/rmnet_data/rmnet_data_vnd.h41
-rw-r--r--net/rmnet_data/rmnet_map.h150
-rw-r--r--net/rmnet_data/rmnet_map_command.c210
-rw-r--r--net/rmnet_data/rmnet_map_data.c757
-rw-r--r--net/rose/rose_in.c3
-rw-r--r--net/sched/sch_api.c39
-rw-r--r--net/sched/sch_prio.c22
-rw-r--r--net/socket.c60
-rw-r--r--net/wireless/ap.c2
-rw-r--r--net/wireless/chan.c20
-rw-r--r--net/wireless/core.c57
-rw-r--r--net/wireless/core.h14
-rw-r--r--net/wireless/db.txt1508
-rw-r--r--net/wireless/mlme.c24
-rw-r--r--net/wireless/nl80211.c899
-rw-r--r--net/wireless/nl80211.h4
-rw-r--r--net/wireless/rdev-ops.h20
-rw-r--r--net/wireless/reg.c61
-rw-r--r--net/wireless/scan.c2
-rw-r--r--net/wireless/sme.c110
-rw-r--r--net/wireless/sysfs.c18
-rw-r--r--net/wireless/trace.h29
-rw-r--r--net/wireless/util.c185
-rw-r--r--net/xfrm/xfrm_policy.c23
118 files changed, 15439 insertions, 1162 deletions
diff --git a/net/Kconfig b/net/Kconfig
index ce9585cf343a..5cff5877d7d1 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -48,6 +48,16 @@ config COMPAT_NETLINK_MESSAGES
config NET_INGRESS
bool
+config DISABLE_NET_SKB_FRAG_CACHE
+ bool "Disable skb fragment cache"
+ help
+ Enabling this option ensures that when allocating skbs the network
+ skb fragment cache is not used.
+ Disabling use of the fragment cache can be useful on some low end
+ targets because it reduces memory pressure.
+
+ If you are unsure how to answer this question, answer N.
+
menu "Networking options"
source "net/packet/Kconfig"
@@ -239,6 +249,7 @@ source "net/mpls/Kconfig"
source "net/hsr/Kconfig"
source "net/switchdev/Kconfig"
source "net/l3mdev/Kconfig"
+source "net/rmnet_data/Kconfig"
config RPS
bool
@@ -303,6 +314,15 @@ config NET_FLOW_LIMIT
with many clients some protection against DoS by a single (spoofed)
flow that greatly exceeds average workload.
+config SOCKEV_NLMCAST
+ bool "Enable SOCKEV Netlink Multicast"
+ default n
+ ---help---
+ Default client for SOCKEV notifier events. Sends multicast netlink
+ messages whenever the socket event notifier is invoked. Enable if
+ user space entities need to be notified of socket events without
+ having to poll /proc
+
menu "Network testing"
config NET_PKTGEN
@@ -389,6 +409,8 @@ config LWTUNNEL
weight tunnel endpoint. Tunnel encapsulation parameters are stored
with light weight tunnel state associated with fib routes.
+source "net/ipc_router/Kconfig"
+
endif # if NET
# Used by archs to tell that they support BPF_JIT
diff --git a/net/Makefile b/net/Makefile
index a5d04098dfce..e700aa62b1af 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -77,3 +77,5 @@ endif
ifneq ($(CONFIG_NET_L3_MASTER_DEV),)
obj-y += l3mdev/
endif
+obj-$(CONFIG_IPC_ROUTER) += ipc_router/
+obj-$(CONFIG_RMNET_DATA) += rmnet_data/
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 709ce9fb15f3..5edc3212fb2a 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -183,7 +183,7 @@ EXPORT_SYMBOL(bt_sock_unlink);
void bt_accept_enqueue(struct sock *parent, struct sock *sk)
{
- BT_DBG("parent %p, sk %p", parent, sk);
+ BT_DBG("parent %pK, sk %pK", parent, sk);
sock_hold(sk);
list_add_tail(&bt_sk(sk)->accept_q, &bt_sk(parent)->accept_q);
@@ -194,7 +194,7 @@ EXPORT_SYMBOL(bt_accept_enqueue);
void bt_accept_unlink(struct sock *sk)
{
- BT_DBG("sk %p state %d", sk, sk->sk_state);
+ BT_DBG("sk %pK state %d", sk, sk->sk_state);
list_del_init(&bt_sk(sk)->accept_q);
bt_sk(sk)->parent->sk_ack_backlog--;
@@ -208,7 +208,7 @@ struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock)
struct list_head *p, *n;
struct sock *sk;
- BT_DBG("parent %p", parent);
+ BT_DBG("parent %pK", parent);
list_for_each_safe(p, n, &bt_sk(parent)->accept_q) {
sk = (struct sock *) list_entry(p, struct bt_sock, accept_q);
@@ -248,7 +248,7 @@ int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
size_t copied;
int err;
- BT_DBG("sock %p sk %p len %zu", sock, sk, len);
+ BT_DBG("sock %pK sk %pK len %zu", sock, sk, len);
if (flags & MSG_OOB)
return -EOPNOTSUPP;
@@ -323,7 +323,7 @@ int bt_sock_stream_recvmsg(struct socket *sock, struct msghdr *msg,
if (flags & MSG_OOB)
return -EOPNOTSUPP;
- BT_DBG("sk %p size %zu", sk, size);
+ BT_DBG("sk %pK size %zu", sk, size);
lock_sock(sk);
@@ -439,7 +439,7 @@ unsigned int bt_sock_poll(struct file *file, struct socket *sock,
struct sock *sk = sock->sk;
unsigned int mask = 0;
- BT_DBG("sock %p, sk %p", sock, sk);
+ BT_DBG("sock %pK, sk %pK", sock, sk);
poll_wait(file, sk_sleep(sk), wait);
@@ -483,7 +483,7 @@ int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
long amount;
int err;
- BT_DBG("sk %p cmd %x arg %lx", sk, cmd, arg);
+ BT_DBG("sk %pK cmd %x arg %lx", sk, cmd, arg);
switch (cmd) {
case TIOCOUTQ:
@@ -530,7 +530,7 @@ int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo)
DECLARE_WAITQUEUE(wait, current);
int err = 0;
- BT_DBG("sk %p", sk);
+ BT_DBG("sk %pK", sk);
add_wait_queue(sk_sleep(sk), &wait);
set_current_state(TASK_INTERRUPTIBLE);
@@ -567,7 +567,7 @@ int bt_sock_wait_ready(struct sock *sk, unsigned long flags)
unsigned long timeo;
int err = 0;
- BT_DBG("sk %p", sk);
+ BT_DBG("sk %pK", sk);
timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index 1641367e54ca..abeccb56fbbd 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -427,7 +427,7 @@ static int bnep_tx_frame(struct bnep_session *s, struct sk_buff *skb)
int len = 0, il = 0;
u8 type = 0;
- BT_DBG("skb %p dev %p type %d", skb, skb->dev, skb->pkt_type);
+ BT_DBG("skb %pK dev %pK type %d", skb, skb->dev, skb->pkt_type);
if (!skb->dev) {
/* Control frame sent by us */
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c
index 6ceb5d36a32b..f03713cd57e8 100644
--- a/net/bluetooth/bnep/netdev.c
+++ b/net/bluetooth/bnep/netdev.c
@@ -156,7 +156,7 @@ static int bnep_net_proto_filter(struct sk_buff *skb, struct bnep_session *s)
return 0;
}
- BT_DBG("BNEP: filtered skb %p, proto 0x%.4x", skb, proto);
+ BT_DBG("BNEP: filtered skb %pK, proto 0x%.4x", skb, proto);
return 1;
}
#endif
@@ -167,7 +167,7 @@ static netdev_tx_t bnep_net_xmit(struct sk_buff *skb,
struct bnep_session *s = netdev_priv(dev);
struct sock *sk = s->sock->sk;
- BT_DBG("skb %p, dev %p", skb, dev);
+ BT_DBG("skb %pK, dev %pK", skb, dev);
#ifdef CONFIG_BT_BNEP_MC_FILTER
if (bnep_net_mc_filter(skb, s)) {
diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c
index b5116fa9835e..5d1ec04e508a 100644
--- a/net/bluetooth/bnep/sock.c
+++ b/net/bluetooth/bnep/sock.c
@@ -37,7 +37,7 @@ static int bnep_sock_release(struct socket *sock)
{
struct sock *sk = sock->sk;
- BT_DBG("sock %p sk %p", sock, sk);
+ BT_DBG("sock %pK sk %pK", sock, sk);
if (!sk)
return 0;
@@ -197,7 +197,7 @@ static int bnep_sock_create(struct net *net, struct socket *sock, int protocol,
{
struct sock *sk;
- BT_DBG("sock %p", sock);
+ BT_DBG("sock %pK", sock);
if (sock->type != SOCK_RAW)
return -ESOCKTNOSUPPORT;
diff --git a/net/bluetooth/cmtp/capi.c b/net/bluetooth/cmtp/capi.c
index 9a50338772f3..c1cc231fe148 100644
--- a/net/bluetooth/cmtp/capi.c
+++ b/net/bluetooth/cmtp/capi.c
@@ -74,7 +74,7 @@ static struct cmtp_application *cmtp_application_add(struct cmtp_session *sessio
{
struct cmtp_application *app = kzalloc(sizeof(*app), GFP_KERNEL);
- BT_DBG("session %p application %p appl %d", session, app, appl);
+ BT_DBG("session %pK application %pK appl %d", session, app, appl);
if (!app)
return NULL;
@@ -89,7 +89,7 @@ static struct cmtp_application *cmtp_application_add(struct cmtp_session *sessio
static void cmtp_application_del(struct cmtp_session *session, struct cmtp_application *app)
{
- BT_DBG("session %p application %p", session, app);
+ BT_DBG("session %pK application %pK", session, app);
if (app) {
list_del(&app->list);
@@ -137,7 +137,7 @@ static void cmtp_send_capimsg(struct cmtp_session *session, struct sk_buff *skb)
{
struct cmtp_scb *scb = (void *) skb->cb;
- BT_DBG("session %p skb %p len %d", session, skb, skb->len);
+ BT_DBG("session %pK skb %pK len %d", session, skb, skb->len);
scb->id = -1;
scb->data = (CAPIMSG_COMMAND(skb->data) == CAPI_DATA_B3);
@@ -154,7 +154,8 @@ static void cmtp_send_interopmsg(struct cmtp_session *session,
struct sk_buff *skb;
unsigned char *s;
- BT_DBG("session %p subcmd 0x%02x appl %d msgnum %d", session, subcmd, appl, msgnum);
+ BT_DBG("session %pK subcmd 0x%02x appl %d msgnum %d",
+ session, subcmd, appl, msgnum);
skb = alloc_skb(CAPI_MSG_BASELEN + 6 + len, GFP_ATOMIC);
if (!skb) {
@@ -190,7 +191,7 @@ static void cmtp_recv_interopmsg(struct cmtp_session *session, struct sk_buff *s
__u16 appl, msgnum, func, info;
__u32 controller;
- BT_DBG("session %p skb %p len %d", session, skb, skb->len);
+ BT_DBG("session %pK skb %pK len %d", session, skb, skb->len);
switch (CAPIMSG_SUBCOMMAND(skb->data)) {
case CAPI_CONF:
@@ -323,7 +324,7 @@ void cmtp_recv_capimsg(struct cmtp_session *session, struct sk_buff *skb)
__u16 appl;
__u32 contr;
- BT_DBG("session %p skb %p len %d", session, skb, skb->len);
+ BT_DBG("session %pK skb %pK len %d", session, skb, skb->len);
if (skb->len < CAPI_MSG_BASELEN)
return;
@@ -361,7 +362,7 @@ void cmtp_recv_capimsg(struct cmtp_session *session, struct sk_buff *skb)
static int cmtp_load_firmware(struct capi_ctr *ctrl, capiloaddata *data)
{
- BT_DBG("ctrl %p data %p", ctrl, data);
+ BT_DBG("ctrl %pK data %pK", ctrl, data);
return 0;
}
@@ -370,7 +371,7 @@ static void cmtp_reset_ctr(struct capi_ctr *ctrl)
{
struct cmtp_session *session = ctrl->driverdata;
- BT_DBG("ctrl %p", ctrl);
+ BT_DBG("ctrl %pK", ctrl);
capi_ctr_down(ctrl);
@@ -387,8 +388,8 @@ static void cmtp_register_appl(struct capi_ctr *ctrl, __u16 appl, capi_register_
unsigned char buf[8];
int err = 0, nconn, want = rp->level3cnt;
- BT_DBG("ctrl %p appl %d level3cnt %d datablkcnt %d datablklen %d",
- ctrl, appl, rp->level3cnt, rp->datablkcnt, rp->datablklen);
+ BT_DBG("ctrl %pK appl %d level3cnt %d datablkcnt %d datablklen %d",
+ ctrl, appl, rp->level3cnt, rp->datablkcnt, rp->datablklen);
application = cmtp_application_add(session, appl);
if (!application) {
@@ -452,7 +453,7 @@ static void cmtp_release_appl(struct capi_ctr *ctrl, __u16 appl)
struct cmtp_session *session = ctrl->driverdata;
struct cmtp_application *application;
- BT_DBG("ctrl %p appl %d", ctrl, appl);
+ BT_DBG("ctrl %pK appl %d", ctrl, appl);
application = cmtp_application_get(session, CMTP_APPLID, appl);
if (!application) {
@@ -478,7 +479,7 @@ static u16 cmtp_send_message(struct capi_ctr *ctrl, struct sk_buff *skb)
__u16 appl;
__u32 contr;
- BT_DBG("ctrl %p skb %p", ctrl, skb);
+ BT_DBG("ctrl %pK skb %pK", ctrl, skb);
appl = CAPIMSG_APPID(skb->data);
contr = CAPIMSG_CONTROL(skb->data);
@@ -543,7 +544,7 @@ int cmtp_attach_device(struct cmtp_session *session)
unsigned char buf[4];
long ret;
- BT_DBG("session %p", session);
+ BT_DBG("session %pK", session);
capimsg_setu32(buf, 0, 0);
@@ -585,7 +586,7 @@ int cmtp_attach_device(struct cmtp_session *session)
session->num = session->ctrl.cnr;
- BT_DBG("session %p num %d", session, session->num);
+ BT_DBG("session %pK num %d", session, session->num);
capimsg_setu32(buf, 0, 1);
@@ -606,7 +607,7 @@ int cmtp_attach_device(struct cmtp_session *session)
void cmtp_detach_device(struct cmtp_session *session)
{
- BT_DBG("session %p", session);
+ BT_DBG("session %pK", session);
detach_capi_ctr(&session->ctrl);
}
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
index 298ed37010e6..011747337858 100644
--- a/net/bluetooth/cmtp/core.c
+++ b/net/bluetooth/cmtp/core.c
@@ -109,7 +109,7 @@ static inline void cmtp_add_msgpart(struct cmtp_session *session, int id, const
struct sk_buff *skb = session->reassembly[id], *nskb;
int size;
- BT_DBG("session %p buf %p count %d", session, buf, count);
+ BT_DBG("session %pK buf %pK count %d", session, buf, count);
size = (skb) ? skb->len + count : count;
@@ -134,7 +134,7 @@ static inline int cmtp_recv_frame(struct cmtp_session *session, struct sk_buff *
__u8 hdr, hdrlen, id;
__u16 len;
- BT_DBG("session %p skb %p len %d", session, skb, skb->len);
+ BT_DBG("session %pK skb %pK len %d", session, skb, skb->len);
while (skb->len > 0) {
hdr = skb->data[0];
@@ -197,7 +197,7 @@ static int cmtp_send_frame(struct cmtp_session *session, unsigned char *data, in
struct kvec iv = { data, len };
struct msghdr msg;
- BT_DBG("session %p data %p len %d", session, data, len);
+ BT_DBG("session %pK data %pK len %d", session, data, len);
if (!len)
return 0;
@@ -213,7 +213,7 @@ static void cmtp_process_transmit(struct cmtp_session *session)
unsigned char *hdr;
unsigned int size, tail;
- BT_DBG("session %p", session);
+ BT_DBG("session %pK", session);
nskb = alloc_skb(session->mtu, GFP_ATOMIC);
if (!nskb) {
@@ -283,7 +283,7 @@ static int cmtp_session(void *arg)
struct sk_buff *skb;
wait_queue_t wait;
- BT_DBG("session %p", session);
+ BT_DBG("session %pK", session);
set_user_nice(current, -15);
diff --git a/net/bluetooth/cmtp/sock.c b/net/bluetooth/cmtp/sock.c
index ce86a7bae844..e91ce530ed05 100644
--- a/net/bluetooth/cmtp/sock.c
+++ b/net/bluetooth/cmtp/sock.c
@@ -50,7 +50,7 @@ static int cmtp_sock_release(struct socket *sock)
{
struct sock *sk = sock->sk;
- BT_DBG("sock %p sk %p", sock, sk);
+ BT_DBG("sock %pK sk %pK", sock, sk);
if (!sk)
return 0;
@@ -200,7 +200,7 @@ static int cmtp_sock_create(struct net *net, struct socket *sock, int protocol,
{
struct sock *sk;
- BT_DBG("sock %p", sock);
+ BT_DBG("sock %pK", sock);
if (sock->type != SOCK_RAW)
return -ESOCKTNOSUPPORT;
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 24e9410923d0..2ad1f7fb65a3 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -141,7 +141,7 @@ static void le_scan_cleanup(struct work_struct *work)
struct hci_dev *hdev = conn->hdev;
struct hci_conn *c = NULL;
- BT_DBG("%s hcon %p", hdev->name, conn);
+ BT_DBG("%s hcon %pK", hdev->name, conn);
hci_dev_lock(hdev);
@@ -165,7 +165,7 @@ static void le_scan_cleanup(struct work_struct *work)
static void hci_connect_le_scan_remove(struct hci_conn *conn)
{
- BT_DBG("%s hcon %p", conn->hdev->name, conn);
+ BT_DBG("%s hcon %pK", conn->hdev->name, conn);
/* We can't call hci_conn_del/hci_conn_cleanup here since that
* could deadlock with another hci_conn_del() call that's holding
@@ -187,7 +187,7 @@ static void hci_acl_create_connection(struct hci_conn *conn)
struct inquiry_entry *ie;
struct hci_cp_create_conn cp;
- BT_DBG("hcon %p", conn);
+ BT_DBG("hcon %pK", conn);
conn->state = BT_CONNECT;
conn->out = true;
@@ -226,7 +226,7 @@ static void hci_acl_create_connection(struct hci_conn *conn)
int hci_disconnect(struct hci_conn *conn, __u8 reason)
{
- BT_DBG("hcon %p", conn);
+ BT_DBG("hcon %pK", conn);
/* When we are master of an established connection and it enters
* the disconnect timeout, then go ahead and try to read the
@@ -251,7 +251,7 @@ static void hci_add_sco(struct hci_conn *conn, __u16 handle)
struct hci_dev *hdev = conn->hdev;
struct hci_cp_add_sco cp;
- BT_DBG("hcon %p", conn);
+ BT_DBG("hcon %pK", conn);
conn->state = BT_CONNECT;
conn->out = true;
@@ -270,7 +270,7 @@ bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
struct hci_cp_setup_sync_conn cp;
const struct sco_param *param;
- BT_DBG("hcon %p", conn);
+ BT_DBG("hcon %pK", conn);
conn->state = BT_CONNECT;
conn->out = true;
@@ -356,7 +356,7 @@ void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
struct hci_dev *hdev = conn->hdev;
struct hci_cp_le_start_enc cp;
- BT_DBG("hcon %p", conn);
+ BT_DBG("hcon %pK", conn);
memset(&cp, 0, sizeof(cp));
@@ -376,7 +376,7 @@ void hci_sco_setup(struct hci_conn *conn, __u8 status)
if (!sco)
return;
- BT_DBG("hcon %p", conn);
+ BT_DBG("hcon %pK", conn);
if (!status) {
if (lmp_esco_capable(conn->hdev))
@@ -395,7 +395,7 @@ static void hci_conn_timeout(struct work_struct *work)
disc_work.work);
int refcnt = atomic_read(&conn->refcnt);
- BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
+ BT_DBG("hcon %pK state %s", conn, state_to_string(conn->state));
WARN_ON(refcnt < 0);
@@ -426,7 +426,7 @@ static void hci_conn_idle(struct work_struct *work)
idle_work.work);
struct hci_dev *hdev = conn->hdev;
- BT_DBG("hcon %p mode %d", conn, conn->mode);
+ BT_DBG("hcon %pK mode %d", conn, conn->mode);
if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
return;
@@ -566,7 +566,7 @@ int hci_conn_del(struct hci_conn *conn)
{
struct hci_dev *hdev = conn->hdev;
- BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
+ BT_DBG("%s hcon %pK handle %d", hdev->name, conn, conn->handle);
cancel_delayed_work_sync(&conn->disc_work);
cancel_delayed_work_sync(&conn->auto_accept_work);
@@ -1147,7 +1147,7 @@ struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
/* Check link security requirement */
int hci_conn_check_link_mode(struct hci_conn *conn)
{
- BT_DBG("hcon %p", conn);
+ BT_DBG("hcon %pK", conn);
/* In Secure Connections Only mode, it is required that Secure
* Connections is used and the link is encrypted with AES-CCM
@@ -1170,7 +1170,7 @@ int hci_conn_check_link_mode(struct hci_conn *conn)
/* Authenticate remote device */
static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
{
- BT_DBG("hcon %p", conn);
+ BT_DBG("hcon %pK", conn);
if (conn->pending_sec_level > sec_level)
sec_level = conn->pending_sec_level;
@@ -1207,7 +1207,7 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
/* Encrypt the the link */
static void hci_conn_encrypt(struct hci_conn *conn)
{
- BT_DBG("hcon %p", conn);
+ BT_DBG("hcon %pK", conn);
if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
struct hci_cp_set_conn_encrypt cp;
@@ -1222,7 +1222,7 @@ static void hci_conn_encrypt(struct hci_conn *conn)
int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
bool initiator)
{
- BT_DBG("hcon %p", conn);
+ BT_DBG("hcon %pK", conn);
if (conn->type == LE_LINK)
return smp_conn_security(conn, sec_level);
@@ -1291,7 +1291,7 @@ EXPORT_SYMBOL(hci_conn_security);
/* Check secure link requirement */
int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
{
- BT_DBG("hcon %p", conn);
+ BT_DBG("hcon %pK", conn);
/* Accept if non-secure or higher security level is required */
if (sec_level != BT_SECURITY_HIGH && sec_level != BT_SECURITY_FIPS)
@@ -1310,7 +1310,7 @@ EXPORT_SYMBOL(hci_conn_check_secure);
/* Switch role */
int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
{
- BT_DBG("hcon %p", conn);
+ BT_DBG("hcon %pK", conn);
if (role == conn->role)
return 1;
@@ -1331,7 +1331,7 @@ void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
{
struct hci_dev *hdev = conn->hdev;
- BT_DBG("hcon %p mode %d", conn, conn->mode);
+ BT_DBG("hcon %pK mode %d", conn, conn->mode);
if (conn->mode != HCI_CM_SNIFF)
goto timer;
@@ -1511,7 +1511,7 @@ struct hci_chan *hci_chan_create(struct hci_conn *conn)
struct hci_dev *hdev = conn->hdev;
struct hci_chan *chan;
- BT_DBG("%s hcon %p", hdev->name, conn);
+ BT_DBG("%s hcon %pK", hdev->name, conn);
if (test_bit(HCI_CONN_DROP, &conn->flags)) {
BT_DBG("Refusing to create new hci_chan");
@@ -1536,7 +1536,7 @@ void hci_chan_del(struct hci_chan *chan)
struct hci_conn *conn = chan->conn;
struct hci_dev *hdev = conn->hdev;
- BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
+ BT_DBG("%s hcon %pK chan %pK", hdev->name, conn, chan);
list_del_rcu(&chan->list);
@@ -1555,7 +1555,7 @@ void hci_chan_list_flush(struct hci_conn *conn)
{
struct hci_chan *chan, *n;
- BT_DBG("hcon %p", conn);
+ BT_DBG("hcon %pK", conn);
list_for_each_entry_safe(chan, n, &conn->chan_list, list)
hci_chan_del(chan);
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 62edbf1b114e..da4078651c22 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -1131,7 +1131,7 @@ struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
struct discovery_state *cache = &hdev->discovery;
struct inquiry_entry *e;
- BT_DBG("cache %p, %pMR", cache, bdaddr);
+ BT_DBG("cache %pK, %pMR", cache, bdaddr);
list_for_each_entry(e, &cache->all, all) {
if (!bacmp(&e->data.bdaddr, bdaddr))
@@ -1147,7 +1147,7 @@ struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
struct discovery_state *cache = &hdev->discovery;
struct inquiry_entry *e;
- BT_DBG("cache %p, %pMR", cache, bdaddr);
+ BT_DBG("cache %pK, %pMR", cache, bdaddr);
list_for_each_entry(e, &cache->unknown, list) {
if (!bacmp(&e->data.bdaddr, bdaddr))
@@ -1164,7 +1164,7 @@ struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
struct discovery_state *cache = &hdev->discovery;
struct inquiry_entry *e;
- BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
+ BT_DBG("cache %pK bdaddr %pMR state %d", cache, bdaddr, state);
list_for_each_entry(e, &cache->resolve, list) {
if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
@@ -1202,7 +1202,7 @@ u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
struct inquiry_entry *ie;
u32 flags = 0;
- BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
+ BT_DBG("cache %pK, %pMR", cache, &data->bdaddr);
hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
@@ -1281,7 +1281,7 @@ static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
copied++;
}
- BT_DBG("cache %p, copied %d", cache, copied);
+ BT_DBG("cache %pK, copied %d", cache, copied);
return copied;
}
@@ -1402,7 +1402,7 @@ static int hci_dev_do_open(struct hci_dev *hdev)
{
int ret = 0;
- BT_DBG("%s %p", hdev->name, hdev);
+ BT_DBG("%s %pK", hdev->name, hdev);
hci_req_lock(hdev);
@@ -1639,7 +1639,7 @@ int hci_dev_do_close(struct hci_dev *hdev)
{
bool auto_off;
- BT_DBG("%s %p", hdev->name, hdev);
+ BT_DBG("%s %pK", hdev->name, hdev);
if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
@@ -1788,7 +1788,7 @@ static int hci_dev_do_reset(struct hci_dev *hdev)
{
int ret;
- BT_DBG("%s %p", hdev->name, hdev);
+ BT_DBG("%s %pK", hdev->name, hdev);
hci_req_lock(hdev);
@@ -2122,7 +2122,7 @@ static int hci_rfkill_set_block(void *data, bool blocked)
{
struct hci_dev *hdev = data;
- BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
+ BT_DBG("%pK name %s blocked %d", hdev, hdev->name, blocked);
if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
return -EBUSY;
@@ -3353,7 +3353,7 @@ int hci_register_dev(struct hci_dev *hdev)
sprintf(hdev->name, "hci%d", id);
hdev->id = id;
- BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
+ BT_DBG("%pK name %s bus %d", hdev, hdev->name, hdev->bus);
hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
WQ_MEM_RECLAIM, 1, hdev->name);
@@ -3434,7 +3434,7 @@ void hci_unregister_dev(struct hci_dev *hdev)
{
int id;
- BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
+ BT_DBG("%pK name %s bus %d", hdev, hdev->name, hdev->bus);
hci_dev_set_flag(hdev, HCI_UNREGISTER);
@@ -3577,7 +3577,7 @@ EXPORT_SYMBOL(hci_recv_diag);
int hci_register_cb(struct hci_cb *cb)
{
- BT_DBG("%p name %s", cb, cb->name);
+ BT_DBG("%pK name %s", cb, cb->name);
mutex_lock(&hci_cb_list_lock);
list_add_tail(&cb->list, &hci_cb_list);
@@ -3589,7 +3589,7 @@ EXPORT_SYMBOL(hci_register_cb);
int hci_unregister_cb(struct hci_cb *cb)
{
- BT_DBG("%p name %s", cb, cb->name);
+ BT_DBG("%pK name %s", cb, cb->name);
mutex_lock(&hci_cb_list_lock);
list_del(&cb->list);
@@ -3733,12 +3733,12 @@ static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
list = skb_shinfo(skb)->frag_list;
if (!list) {
/* Non fragmented */
- BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
+ BT_DBG("%s nonfrag skb %pK len %d", hdev->name, skb, skb->len);
skb_queue_tail(queue, skb);
} else {
/* Fragmented */
- BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
+ BT_DBG("%s frag %pK len %d", hdev->name, skb, skb->len);
skb_shinfo(skb)->frag_list = NULL;
@@ -3759,7 +3759,7 @@ static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
hci_add_acl_hdr(skb, conn->handle, flags);
- BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
+ BT_DBG("%s frag %pK len %d", hdev->name, skb, skb->len);
__skb_queue_tail(queue, skb);
} while (list);
@@ -3772,7 +3772,7 @@ void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
{
struct hci_dev *hdev = chan->conn->hdev;
- BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
+ BT_DBG("%s chan %pK flags 0x%4.4x", hdev->name, chan, flags);
hci_queue_acl(chan, &chan->data_q, skb, flags);
@@ -3859,7 +3859,7 @@ static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
} else
*quote = 0;
- BT_DBG("conn %p quote %d", conn, *quote);
+ BT_DBG("conn %pK quote %d", conn, *quote);
return conn;
}
@@ -3962,7 +3962,7 @@ static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
q = cnt / num;
*quote = q ? q : 1;
- BT_DBG("chan %p quote %d", chan, *quote);
+ BT_DBG("chan %pK quote %d", chan, *quote);
return chan;
}
@@ -4004,7 +4004,7 @@ static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
skb->priority = HCI_PRIO_MAX - 1;
- BT_DBG("chan %p skb %p promoted to %d", chan, skb,
+ BT_DBG("chan %pK skb %pK promoted to %d", chan, skb,
skb->priority);
}
@@ -4046,7 +4046,7 @@ static void hci_sched_acl_pkt(struct hci_dev *hdev)
(chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
u32 priority = (skb_peek(&chan->data_q))->priority;
while (quote-- && (skb = skb_peek(&chan->data_q))) {
- BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
+ BT_DBG("chan %pK skb %pK len %d priority %u", chan, skb,
skb->len, skb->priority);
/* Stop if priority has changed */
@@ -4094,7 +4094,7 @@ static void hci_sched_acl_blk(struct hci_dev *hdev)
while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
int blocks;
- BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
+ BT_DBG("chan %pK skb %pK len %d priority %u", chan, skb,
skb->len, skb->priority);
/* Stop if priority has changed */
@@ -4162,7 +4162,7 @@ static void hci_sched_sco(struct hci_dev *hdev)
while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
- BT_DBG("skb %p len %d", skb, skb->len);
+ BT_DBG("skb %pK len %d", skb, skb->len);
hci_send_frame(hdev, skb);
conn->sent++;
@@ -4186,7 +4186,7 @@ static void hci_sched_esco(struct hci_dev *hdev)
while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
&quote))) {
while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
- BT_DBG("skb %p len %d", skb, skb->len);
+ BT_DBG("skb %pK len %d", skb, skb->len);
hci_send_frame(hdev, skb);
conn->sent++;
@@ -4220,7 +4220,7 @@ static void hci_sched_le(struct hci_dev *hdev)
while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
u32 priority = (skb_peek(&chan->data_q))->priority;
while (quote-- && (skb = skb_peek(&chan->data_q))) {
- BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
+ BT_DBG("chan %pK skb %pK len %d priority %u", chan, skb,
skb->len, skb->priority);
/* Stop if priority has changed */
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index d57c11c1c6b5..0e5bf7e61603 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -1469,7 +1469,7 @@ static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
- BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
+ BT_DBG("%s bdaddr %pMR hcon %pK", hdev->name, &cp->bdaddr, conn);
if (status) {
if (conn && conn->state == BT_CONNECT) {
@@ -3235,7 +3235,7 @@ static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
break;
default:
- BT_ERR("Unknown type %d conn %p", conn->type, conn);
+ BT_ERR("Unknown type %d conn %pK", conn->type, conn);
break;
}
}
@@ -3306,7 +3306,7 @@ static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
break;
default:
- BT_ERR("Unknown type %d conn %p", conn->type, conn);
+ BT_ERR("Unknown type %d conn %pK", conn->type, conn);
break;
}
}
@@ -4381,7 +4381,7 @@ static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
hchan->handle = le16_to_cpu(ev->handle);
- BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
+ BT_DBG("hcon %pK mgr %pK hchan %pK", hcon, hcon->amp_mgr, hchan);
mgr = hcon->amp_mgr;
if (mgr && mgr->bredr_chan) {
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index b1eb8c09a660..87fd1a07326b 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -155,7 +155,7 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
struct sock *sk;
struct sk_buff *skb_copy = NULL;
- BT_DBG("hdev %p len %d", hdev, skb->len);
+ BT_DBG("hdev %pK len %d", hdev, skb->len);
read_lock(&hci_sk_list.lock);
@@ -260,7 +260,7 @@ void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
if (!atomic_read(&monitor_promisc))
return;
- BT_DBG("hdev %p len %d", hdev, skb->len);
+ BT_DBG("hdev %pK len %d", hdev, skb->len);
switch (bt_cb(skb)->pkt_type) {
case HCI_COMMAND_PKT:
@@ -553,7 +553,7 @@ static int hci_sock_release(struct socket *sock)
struct sock *sk = sock->sk;
struct hci_dev *hdev;
- BT_DBG("sock %p sk %p", sock, sk);
+ BT_DBG("sock %pK sk %pK", sock, sk);
if (!sk)
return 0;
@@ -753,7 +753,7 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
struct hci_dev *hdev = NULL;
int len, err = 0;
- BT_DBG("sock %p sk %p", sock, sk);
+ BT_DBG("sock %pK sk %pK", sock, sk);
if (!addr)
return -EINVAL;
@@ -931,7 +931,7 @@ static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
struct hci_dev *hdev;
int err = 0;
- BT_DBG("sock %p sk %p", sock, sk);
+ BT_DBG("sock %pK sk %pK", sock, sk);
if (peer)
return -EOPNOTSUPP;
@@ -999,7 +999,7 @@ static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
struct sk_buff *skb;
int copied, err;
- BT_DBG("sock %p, sk %p", sock, sk);
+ BT_DBG("sock %pK, sk %pK", sock, sk);
if (flags & MSG_OOB)
return -EOPNOTSUPP;
@@ -1159,7 +1159,7 @@ static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
struct sk_buff *skb;
int err;
- BT_DBG("sock %p sk %p", sock, sk);
+ BT_DBG("sock %pK sk %pK", sock, sk);
if (msg->msg_flags & MSG_OOB)
return -EOPNOTSUPP;
@@ -1288,7 +1288,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
struct sock *sk = sock->sk;
int err = 0, opt = 0;
- BT_DBG("sk %p, opt %d", sk, optname);
+ BT_DBG("sk %pK, opt %d", sk, optname);
lock_sock(sk);
@@ -1371,7 +1371,7 @@ static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
struct sock *sk = sock->sk;
int len, opt, err = 0;
- BT_DBG("sk %p, opt %d", sk, optname);
+ BT_DBG("sk %pK, opt %d", sk, optname);
if (get_user(len, optlen))
return -EFAULT;
@@ -1461,7 +1461,7 @@ static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
{
struct sock *sk;
- BT_DBG("sock %p", sock);
+ BT_DBG("sock %pK", sock);
if (sock->type != SOCK_RAW)
return -ESOCKTNOSUPPORT;
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index 555982a78a58..4f78b28686ff 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -77,7 +77,7 @@ void hci_conn_init_sysfs(struct hci_conn *conn)
{
struct hci_dev *hdev = conn->hdev;
- BT_DBG("conn %p", conn);
+ BT_DBG("conn %pK", conn);
conn->dev.type = &bt_link;
conn->dev.class = bt_class;
@@ -90,7 +90,7 @@ void hci_conn_add_sysfs(struct hci_conn *conn)
{
struct hci_dev *hdev = conn->hdev;
- BT_DBG("conn %p", conn);
+ BT_DBG("conn %pK", conn);
dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle);
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 0bec4588c3c8..f02ffe558a08 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -101,7 +101,7 @@ static int hidp_send_message(struct hidp_session *session, struct socket *sock,
struct sk_buff *skb;
struct sock *sk = sock->sk;
- BT_DBG("session %p data %p size %d", session, data, size);
+ BT_DBG("session %pK data %pK size %d", session, data, size);
if (atomic_read(&session->terminate))
return -EIO;
@@ -145,7 +145,7 @@ static int hidp_input_event(struct input_dev *dev, unsigned int type,
unsigned char newleds;
unsigned char hdr, data[2];
- BT_DBG("session %p type %d code %d value %d",
+ BT_DBG("session %pK type %d code %d value %d",
session, type, code, value);
if (type != EV_LED)
@@ -443,7 +443,7 @@ static void hidp_process_report(struct hidp_session *session,
static void hidp_process_handshake(struct hidp_session *session,
unsigned char param)
{
- BT_DBG("session %p param 0x%02x", session, param);
+ BT_DBG("session %pK param 0x%02x", session, param);
session->output_report_success = 0; /* default condition */
switch (param) {
@@ -486,7 +486,7 @@ static void hidp_process_handshake(struct hidp_session *session,
static void hidp_process_hid_control(struct hidp_session *session,
unsigned char param)
{
- BT_DBG("session %p param 0x%02x", session, param);
+ BT_DBG("session %pK param 0x%02x", session, param);
if (param == HIDP_CTRL_VIRTUAL_CABLE_UNPLUG) {
/* Flush the transmit queues */
@@ -502,7 +502,8 @@ static int hidp_process_data(struct hidp_session *session, struct sk_buff *skb,
unsigned char param)
{
int done_with_skb = 1;
- BT_DBG("session %p skb %p len %d param 0x%02x", session, skb, skb->len, param);
+ BT_DBG("session %pK skb %pK len %d param 0x%02x",
+ session, skb, skb->len, param);
switch (param) {
case HIDP_DATA_RTYPE_INPUT:
@@ -547,7 +548,7 @@ static void hidp_recv_ctrl_frame(struct hidp_session *session,
unsigned char hdr, type, param;
int free_skb = 1;
- BT_DBG("session %p skb %p len %d", session, skb, skb->len);
+ BT_DBG("session %pK skb %pK len %d", session, skb, skb->len);
hdr = skb->data[0];
skb_pull(skb, 1);
@@ -583,7 +584,7 @@ static void hidp_recv_intr_frame(struct hidp_session *session,
{
unsigned char hdr;
- BT_DBG("session %p skb %p len %d", session, skb, skb->len);
+ BT_DBG("session %pK skb %pK len %d", session, skb, skb->len);
hdr = skb->data[0];
skb_pull(skb, 1);
@@ -611,7 +612,7 @@ static int hidp_send_frame(struct socket *sock, unsigned char *data, int len)
struct kvec iv = { data, len };
struct msghdr msg;
- BT_DBG("sock %p data %p len %d", sock, data, len);
+ BT_DBG("sock %pK data %pK len %d", sock, data, len);
if (!len)
return 0;
@@ -629,7 +630,7 @@ static void hidp_process_transmit(struct hidp_session *session,
struct sk_buff *skb;
int ret;
- BT_DBG("session %p", session);
+ BT_DBG("session %pK", session);
while ((skb = skb_dequeue(transmit))) {
ret = hidp_send_frame(sock, skb->data, skb->len);
@@ -1246,7 +1247,7 @@ static int hidp_session_thread(void *arg)
struct hidp_session *session = arg;
wait_queue_t ctrl_wait, intr_wait;
- BT_DBG("session %p", session);
+ BT_DBG("session %pK", session);
/* initialize runtime environment */
hidp_session_get(session);
diff --git a/net/bluetooth/hidp/sock.c b/net/bluetooth/hidp/sock.c
index 008ba439bd62..c06f9a0107d6 100644
--- a/net/bluetooth/hidp/sock.c
+++ b/net/bluetooth/hidp/sock.c
@@ -33,7 +33,7 @@ static int hidp_sock_release(struct socket *sock)
{
struct sock *sk = sock->sk;
- BT_DBG("sock %p sk %p", sock, sk);
+ BT_DBG("sock %pK sk %pK", sock, sk);
if (!sk)
return 0;
@@ -230,7 +230,7 @@ static int hidp_sock_create(struct net *net, struct socket *sock, int protocol,
{
struct sock *sk;
- BT_DBG("sock %p", sock);
+ BT_DBG("sock %pK", sock);
if (sock->type != SOCK_RAW)
return -ESOCKTNOSUPPORT;
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 66e8b6ee19a5..bd5937dc6fe9 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -249,7 +249,7 @@ static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
static void l2cap_state_change(struct l2cap_chan *chan, int state)
{
- BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
+ BT_DBG("chan %pK %s -> %s", chan, state_to_string(chan->state),
state_to_string(state));
chan->state = state;
@@ -400,7 +400,7 @@ static void l2cap_chan_timeout(struct work_struct *work)
struct l2cap_conn *conn = chan->conn;
int reason;
- BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
+ BT_DBG("chan %pK state %s", chan, state_to_string(chan->state));
mutex_lock(&conn->chan_lock);
l2cap_chan_lock(chan);
@@ -449,7 +449,7 @@ struct l2cap_chan *l2cap_chan_create(void)
/* This flag is cleared in l2cap_chan_ready() */
set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
- BT_DBG("chan %p", chan);
+ BT_DBG("chan %pK", chan);
return chan;
}
@@ -459,7 +459,7 @@ static void l2cap_chan_destroy(struct kref *kref)
{
struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
- BT_DBG("chan %p", chan);
+ BT_DBG("chan %pK", chan);
write_lock(&chan_list_lock);
list_del(&chan->global_l);
@@ -470,14 +470,14 @@ static void l2cap_chan_destroy(struct kref *kref)
void l2cap_chan_hold(struct l2cap_chan *c)
{
- BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
+ BT_DBG("chan %pK orig refcnt %d", c, atomic_read(&c->kref.refcount));
kref_get(&c->kref);
}
void l2cap_chan_put(struct l2cap_chan *c)
{
- BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
+ BT_DBG("chan %pK orig refcnt %d", c, atomic_read(&c->kref.refcount));
kref_put(&c->kref, l2cap_chan_destroy);
}
@@ -516,7 +516,7 @@ static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
{
- BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
+ BT_DBG("conn %pK, psm 0x%2.2x, dcid 0x%4.4x", conn,
__le16_to_cpu(chan->psm), chan->dcid);
conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
@@ -579,7 +579,7 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err)
__clear_chan_timer(chan);
- BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
+ BT_DBG("chan %pK, conn %pK, err %d, state %s", chan, conn, err,
state_to_string(chan->state));
chan->ops->teardown(chan, err);
@@ -608,7 +608,7 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err)
if (chan->hs_hchan) {
struct hci_chan *hs_hchan = chan->hs_hchan;
- BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
+ BT_DBG("chan %pK disconnect hs_hchan %pK", chan, hs_hchan);
amp_disconnect_logical_link(hs_hchan);
}
@@ -711,7 +711,7 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason)
{
struct l2cap_conn *conn = chan->conn;
- BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
+ BT_DBG("chan %pK state %s", chan, state_to_string(chan->state));
switch (chan->state) {
case BT_LISTEN:
@@ -874,7 +874,7 @@ static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
struct hci_conn *hcon = chan->conn->hcon;
u16 flags;
- BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
+ BT_DBG("chan %pK, skb %pK len %d priority %u", chan, skb, skb->len,
skb->priority);
if (chan->hs_hcon && !__chan_is_moving(chan)) {
@@ -1061,7 +1061,7 @@ static void l2cap_send_sframe(struct l2cap_chan *chan,
struct sk_buff *skb;
u32 control_field;
- BT_DBG("chan %p, control %p", chan, control);
+ BT_DBG("chan %pK, control %pK", chan, control);
if (!control->sframe)
return;
@@ -1100,7 +1100,7 @@ static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
{
struct l2cap_ctrl control;
- BT_DBG("chan %p, poll %d", chan, poll);
+ BT_DBG("chan %pK, poll %d", chan, poll);
memset(&control, 0, sizeof(control));
control.sframe = 1;
@@ -1189,7 +1189,7 @@ static void l2cap_move_setup(struct l2cap_chan *chan)
{
struct sk_buff *skb;
- BT_DBG("chan %p", chan);
+ BT_DBG("chan %pK", chan);
if (chan->mode != L2CAP_MODE_ERTM)
return;
@@ -1223,7 +1223,7 @@ static void l2cap_move_setup(struct l2cap_chan *chan)
static void l2cap_move_done(struct l2cap_chan *chan)
{
u8 move_role = chan->move_role;
- BT_DBG("chan %p", chan);
+ BT_DBG("chan %pK", chan);
chan->move_state = L2CAP_MOVE_STABLE;
chan->move_role = L2CAP_MOVE_ROLE_NONE;
@@ -1302,7 +1302,7 @@ static void l2cap_le_start(struct l2cap_chan *chan)
static void l2cap_start_connection(struct l2cap_chan *chan)
{
if (__amp_capable(chan)) {
- BT_DBG("chan %p AMP capable: discover AMPs", chan);
+ BT_DBG("chan %pK AMP capable: discover AMPs", chan);
a2mp_discover_amp(chan);
} else if (chan->conn->hcon->type == LE_LINK) {
l2cap_le_start(chan);
@@ -1399,7 +1399,7 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
{
struct l2cap_chan *chan, *tmp;
- BT_DBG("conn %p", conn);
+ BT_DBG("conn %pK", conn);
mutex_lock(&conn->chan_lock);
@@ -1477,7 +1477,7 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
struct hci_conn *hcon = conn->hcon;
struct hci_dev *hdev = hcon->hdev;
- BT_DBG("%s conn %p", hdev->name, conn);
+ BT_DBG("%s conn %pK", hdev->name, conn);
/* For outgoing pairing which doesn't necessarily have an
* associated socket (e.g. mgmt_pair_device).
@@ -1510,7 +1510,7 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
struct l2cap_chan *chan;
struct hci_conn *hcon = conn->hcon;
- BT_DBG("conn %p", conn);
+ BT_DBG("conn %pK", conn);
if (hcon->type == ACL_LINK)
l2cap_request_info(conn);
@@ -1551,7 +1551,7 @@ static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
{
struct l2cap_chan *chan;
- BT_DBG("conn %p", conn);
+ BT_DBG("conn %pK", conn);
mutex_lock(&conn->chan_lock);
@@ -1661,7 +1661,7 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
if (!conn)
return;
- BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
+ BT_DBG("hcon %pK conn %pK, err %d", hcon, conn, err);
kfree_skb(conn->rx_skb);
@@ -1789,7 +1789,7 @@ static void l2cap_monitor_timeout(struct work_struct *work)
struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
monitor_timer.work);
- BT_DBG("chan %p", chan);
+ BT_DBG("chan %pK", chan);
l2cap_chan_lock(chan);
@@ -1810,7 +1810,7 @@ static void l2cap_retrans_timeout(struct work_struct *work)
struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
retrans_timer.work);
- BT_DBG("chan %p", chan);
+ BT_DBG("chan %pK", chan);
l2cap_chan_lock(chan);
@@ -1831,7 +1831,7 @@ static void l2cap_streaming_send(struct l2cap_chan *chan,
struct sk_buff *skb;
struct l2cap_ctrl *control;
- BT_DBG("chan %p, skbs %p", chan, skbs);
+ BT_DBG("chan %pK, skbs %pK", chan, skbs);
if (__chan_is_moving(chan))
return;
@@ -1870,7 +1870,7 @@ static int l2cap_ertm_send(struct l2cap_chan *chan)
struct l2cap_ctrl *control;
int sent = 0;
- BT_DBG("chan %p", chan);
+ BT_DBG("chan %pK", chan);
if (chan->state != BT_CONNECTED)
return -ENOTCONN;
@@ -1941,7 +1941,7 @@ static void l2cap_ertm_resend(struct l2cap_chan *chan)
struct sk_buff *tx_skb;
u16 seq;
- BT_DBG("chan %p", chan);
+ BT_DBG("chan %pK", chan);
if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
return;
@@ -2018,7 +2018,7 @@ static void l2cap_ertm_resend(struct l2cap_chan *chan)
static void l2cap_retransmit(struct l2cap_chan *chan,
struct l2cap_ctrl *control)
{
- BT_DBG("chan %p, control %p", chan, control);
+ BT_DBG("chan %pK, control %pK", chan, control);
l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
l2cap_ertm_resend(chan);
@@ -2029,7 +2029,7 @@ static void l2cap_retransmit_all(struct l2cap_chan *chan,
{
struct sk_buff *skb;
- BT_DBG("chan %p, control %p", chan, control);
+ BT_DBG("chan %pK, control %pK", chan, control);
if (control->poll)
set_bit(CONN_SEND_FBIT, &chan->conn_state);
@@ -2065,7 +2065,7 @@ static void l2cap_send_ack(struct l2cap_chan *chan)
chan->last_acked_seq);
int threshold;
- BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
+ BT_DBG("chan %pK last_acked_seq %d buffer_seq %d",
chan, chan->last_acked_seq, chan->buffer_seq);
memset(&control, 0, sizeof(control));
@@ -2160,7 +2160,7 @@ static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
struct l2cap_hdr *lh;
- BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
+ BT_DBG("chan %pK psm 0x%2.2x len %zu", chan,
__le16_to_cpu(chan->psm), len);
count = min_t(unsigned int, (conn->mtu - hlen), len);
@@ -2192,7 +2192,7 @@ static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
int err, count;
struct l2cap_hdr *lh;
- BT_DBG("chan %p len %zu", chan, len);
+ BT_DBG("chan %pK len %zu", chan, len);
count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
@@ -2223,7 +2223,7 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
int err, count, hlen;
struct l2cap_hdr *lh;
- BT_DBG("chan %p len %zu", chan, len);
+ BT_DBG("chan %pK len %zu", chan, len);
if (!conn)
return ERR_PTR(-ENOTCONN);
@@ -2277,7 +2277,7 @@ static int l2cap_segment_sdu(struct l2cap_chan *chan,
size_t pdu_len;
u8 sar;
- BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
+ BT_DBG("chan %pK, msg %pK, len %zu", chan, msg, len);
/* It is critical that ERTM PDUs fit in a single HCI fragment,
* so fragmented skbs are not used. The HCI layer's handling
@@ -2344,7 +2344,7 @@ static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
int err, count, hlen;
struct l2cap_hdr *lh;
- BT_DBG("chan %p len %zu", chan, len);
+ BT_DBG("chan %pK len %zu", chan, len);
if (!conn)
return ERR_PTR(-ENOTCONN);
@@ -2386,7 +2386,7 @@ static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
size_t pdu_len;
u16 sdu_len;
- BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
+ BT_DBG("chan %pK, msg %pK, len %zu", chan, msg, len);
sdu_len = len;
pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
@@ -2552,7 +2552,7 @@ static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
struct l2cap_ctrl control;
u16 seq;
- BT_DBG("chan %p, txseq %u", chan, txseq);
+ BT_DBG("chan %pK, txseq %u", chan, txseq);
memset(&control, 0, sizeof(control));
control.sframe = 1;
@@ -2574,7 +2574,7 @@ static void l2cap_send_srej_tail(struct l2cap_chan *chan)
{
struct l2cap_ctrl control;
- BT_DBG("chan %p", chan);
+ BT_DBG("chan %pK", chan);
if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
return;
@@ -2592,7 +2592,7 @@ static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
u16 initial_head;
u16 seq;
- BT_DBG("chan %p, txseq %u", chan, txseq);
+ BT_DBG("chan %pK, txseq %u", chan, txseq);
memset(&control, 0, sizeof(control));
control.sframe = 1;
@@ -2617,7 +2617,7 @@ static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
struct sk_buff *acked_skb;
u16 ackseq;
- BT_DBG("chan %p, reqseq %u", chan, reqseq);
+ BT_DBG("chan %pK, reqseq %u", chan, reqseq);
if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
return;
@@ -2646,7 +2646,7 @@ static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
{
- BT_DBG("chan %p", chan);
+ BT_DBG("chan %pK", chan);
chan->expected_tx_seq = chan->buffer_seq;
l2cap_seq_list_clear(&chan->srej_list);
@@ -2658,7 +2658,7 @@ static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
struct l2cap_ctrl *control,
struct sk_buff_head *skbs, u8 event)
{
- BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
+ BT_DBG("chan %pK, control %pK, skbs %pK, event %d", chan, control, skbs,
event);
switch (event) {
@@ -2730,7 +2730,7 @@ static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
struct l2cap_ctrl *control,
struct sk_buff_head *skbs, u8 event)
{
- BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
+ BT_DBG("chan %pK, control %pK, skbs %pK, event %d", chan, control, skbs,
event);
switch (event) {
@@ -2807,7 +2807,7 @@ static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
struct sk_buff_head *skbs, u8 event)
{
- BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
+ BT_DBG("chan %pK, control %pK, skbs %pK, event %d, state %d",
chan, control, skbs, event, chan->tx_state);
switch (chan->tx_state) {
@@ -2826,14 +2826,14 @@ static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
static void l2cap_pass_to_tx(struct l2cap_chan *chan,
struct l2cap_ctrl *control)
{
- BT_DBG("chan %p, control %p", chan, control);
+ BT_DBG("chan %pK, control %pK", chan, control);
l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
}
static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
struct l2cap_ctrl *control)
{
- BT_DBG("chan %p, control %p", chan, control);
+ BT_DBG("chan %pK, control %pK", chan, control);
l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
}
@@ -2843,7 +2843,7 @@ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
struct sk_buff *nskb;
struct l2cap_chan *chan;
- BT_DBG("conn %p", conn);
+ BT_DBG("conn %pK", conn);
mutex_lock(&conn->chan_lock);
@@ -2874,7 +2874,7 @@ static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
struct l2cap_hdr *lh;
int len, count;
- BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
+ BT_DBG("conn %pK, code 0x%2.2x, ident 0x%2.2x, len %u",
conn, code, ident, dlen);
if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
@@ -3033,7 +3033,7 @@ static void l2cap_ack_timeout(struct work_struct *work)
ack_timer.work);
u16 frames_to_ack;
- BT_DBG("chan %p", chan);
+ BT_DBG("chan %pK", chan);
l2cap_chan_lock(chan);
@@ -3177,7 +3177,7 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
void *ptr = req->data;
u16 size;
- BT_DBG("chan %p", chan);
+ BT_DBG("chan %pK", chan);
if (chan->num_conf_req || chan->num_conf_rsp)
goto done;
@@ -3306,7 +3306,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
u16 result = L2CAP_CONF_SUCCESS;
u16 size;
- BT_DBG("chan %p", chan);
+ BT_DBG("chan %pK", chan);
while (len >= L2CAP_CONF_OPT_SIZE) {
len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
@@ -3515,7 +3515,7 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
struct l2cap_conf_efs efs;
- BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
+ BT_DBG("chan %pK, rsp %pK, len %d, req %pK", chan, rsp, len, data);
while (len >= L2CAP_CONF_OPT_SIZE) {
len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
@@ -3620,7 +3620,7 @@ static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
struct l2cap_conf_rsp *rsp = data;
void *ptr = rsp->data;
- BT_DBG("chan %p", chan);
+ BT_DBG("chan %pK", chan);
rsp->scid = cpu_to_le16(chan->dcid);
rsp->result = cpu_to_le16(result);
@@ -3634,7 +3634,7 @@ void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
struct l2cap_le_conn_rsp rsp;
struct l2cap_conn *conn = chan->conn;
- BT_DBG("chan %p", chan);
+ BT_DBG("chan %pK", chan);
rsp.dcid = cpu_to_le16(chan->scid);
rsp.mtu = cpu_to_le16(chan->imtu);
@@ -3663,7 +3663,7 @@ void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
else
rsp_code = L2CAP_CONN_RSP;
- BT_DBG("chan %p rsp_code %u", chan, rsp_code);
+ BT_DBG("chan %pK rsp_code %u", chan, rsp_code);
l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
@@ -3691,7 +3691,7 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
.txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
};
- BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
+ BT_DBG("chan %pK, rsp %pK, len %d", chan, rsp, len);
if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
return;
@@ -3994,7 +3994,7 @@ static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
{
struct l2cap_conn *conn = chan->conn;
- BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
+ BT_DBG("conn %pK chan %pK ident %d flags 0x%4.4x", conn, chan, ident,
flags);
clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
@@ -4491,7 +4491,8 @@ static int l2cap_create_channel_req(struct l2cap_conn *conn,
return 0;
}
- BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
+ BT_DBG("mgr %pK bredr_chan %pK hs_hcon %pK",
+ mgr, chan, hs_hcon);
mgr->bredr_chan = chan;
chan->hs_hcon = hs_hcon;
@@ -4520,7 +4521,7 @@ static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
struct l2cap_move_chan_req req;
u8 ident;
- BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
+ BT_DBG("chan %pK, dest_amp_id %d", chan, dest_amp_id);
ident = l2cap_get_ident(chan->conn);
chan->ident = ident;
@@ -4538,7 +4539,7 @@ static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
{
struct l2cap_move_chan_rsp rsp;
- BT_DBG("chan %p, result 0x%4.4x", chan, result);
+ BT_DBG("chan %pK, result 0x%4.4x", chan, result);
rsp.icid = cpu_to_le16(chan->dcid);
rsp.result = cpu_to_le16(result);
@@ -4551,7 +4552,7 @@ static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
{
struct l2cap_move_chan_cfm cfm;
- BT_DBG("chan %p, result 0x%4.4x", chan, result);
+ BT_DBG("chan %pK, result 0x%4.4x", chan, result);
chan->ident = l2cap_get_ident(chan->conn);
@@ -4568,7 +4569,7 @@ static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
{
struct l2cap_move_chan_cfm cfm;
- BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
+ BT_DBG("conn %pK, icid 0x%4.4x", conn, icid);
cfm.icid = cpu_to_le16(icid);
cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
@@ -4688,7 +4689,7 @@ static void l2cap_logical_finish_move(struct l2cap_chan *chan,
void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
u8 status)
{
- BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
+ BT_DBG("chan %pK, hchan %pK, status %d", chan, hchan, status);
if (status) {
l2cap_logical_fail(chan);
@@ -4707,7 +4708,7 @@ void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
void l2cap_move_start(struct l2cap_chan *chan)
{
- BT_DBG("chan %p", chan);
+ BT_DBG("chan %pK", chan);
if (chan->local_amp_id == AMP_ID_BREDR) {
if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
@@ -4727,7 +4728,7 @@ void l2cap_move_start(struct l2cap_chan *chan)
static void l2cap_do_create(struct l2cap_chan *chan, int result,
u8 local_amp_id, u8 remote_amp_id)
{
- BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
+ BT_DBG("chan %pK state %s %u -> %u", chan, state_to_string(chan->state),
local_amp_id, remote_amp_id);
chan->fcs = L2CAP_FCS_NONE;
@@ -4836,7 +4837,7 @@ void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
u8 local_amp_id = chan->local_amp_id;
u8 remote_amp_id = chan->remote_amp_id;
- BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
+ BT_DBG("chan %pK, result %d, local_amp_id %d, remote_amp_id %d",
chan, result, local_amp_id, remote_amp_id);
if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
@@ -5759,7 +5760,7 @@ static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
{
struct l2cap_ctrl control;
- BT_DBG("chan %p", chan);
+ BT_DBG("chan %pK", chan);
memset(&control, 0, sizeof(control));
control.sframe = 1;
@@ -5914,7 +5915,7 @@ static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
* until a gap is encountered.
*/
- BT_DBG("chan %p", chan);
+ BT_DBG("chan %pK", chan);
while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
struct sk_buff *skb;
@@ -5946,7 +5947,7 @@ static void l2cap_handle_srej(struct l2cap_chan *chan,
{
struct sk_buff *skb;
- BT_DBG("chan %p, control %p", chan, control);
+ BT_DBG("chan %pK, control %pK", chan, control);
if (control->reqseq == chan->next_tx_seq) {
BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
@@ -6004,7 +6005,7 @@ static void l2cap_handle_rej(struct l2cap_chan *chan,
{
struct sk_buff *skb;
- BT_DBG("chan %p, control %p", chan, control);
+ BT_DBG("chan %pK, control %pK", chan, control);
if (control->reqseq == chan->next_tx_seq) {
BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
@@ -6038,7 +6039,7 @@ static void l2cap_handle_rej(struct l2cap_chan *chan,
static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
{
- BT_DBG("chan %p, txseq %d", chan, txseq);
+ BT_DBG("chan %pK, txseq %d", chan, txseq);
BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
chan->expected_tx_seq);
@@ -6129,7 +6130,7 @@ static int l2cap_rx_state_recv(struct l2cap_chan *chan,
int err = 0;
bool skb_in_use = false;
- BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
+ BT_DBG("chan %pK, control %pK, skb %pK, event %d", chan, control, skb,
event);
switch (event) {
@@ -6185,7 +6186,7 @@ static int l2cap_rx_state_recv(struct l2cap_chan *chan,
*/
skb_queue_tail(&chan->srej_q, skb);
skb_in_use = true;
- BT_DBG("Queued %p (queue len %d)", skb,
+ BT_DBG("Queued %pK (queue len %d)", skb,
skb_queue_len(&chan->srej_q));
clear_bit(CONN_SREJ_ACT, &chan->conn_state);
@@ -6249,7 +6250,7 @@ static int l2cap_rx_state_recv(struct l2cap_chan *chan,
}
if (skb && !skb_in_use) {
- BT_DBG("Freeing %p", skb);
+ BT_DBG("Freeing %pK", skb);
kfree_skb(skb);
}
@@ -6264,7 +6265,7 @@ static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
u16 txseq = control->txseq;
bool skb_in_use = false;
- BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
+ BT_DBG("chan %pK, control %pK, skb %pK, event %d", chan, control, skb,
event);
switch (event) {
@@ -6275,7 +6276,7 @@ static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
l2cap_pass_to_tx(chan, control);
skb_queue_tail(&chan->srej_q, skb);
skb_in_use = true;
- BT_DBG("Queued %p (queue len %d)", skb,
+ BT_DBG("Queued %pK (queue len %d)", skb,
skb_queue_len(&chan->srej_q));
chan->expected_tx_seq = __next_seq(chan, txseq);
@@ -6286,7 +6287,7 @@ static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
l2cap_pass_to_tx(chan, control);
skb_queue_tail(&chan->srej_q, skb);
skb_in_use = true;
- BT_DBG("Queued %p (queue len %d)", skb,
+ BT_DBG("Queued %pK (queue len %d)", skb,
skb_queue_len(&chan->srej_q));
err = l2cap_rx_queued_iframes(chan);
@@ -6301,7 +6302,7 @@ static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
*/
skb_queue_tail(&chan->srej_q, skb);
skb_in_use = true;
- BT_DBG("Queued %p (queue len %d)", skb,
+ BT_DBG("Queued %pK (queue len %d)", skb,
skb_queue_len(&chan->srej_q));
l2cap_pass_to_tx(chan, control);
@@ -6315,7 +6316,7 @@ static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
*/
skb_queue_tail(&chan->srej_q, skb);
skb_in_use = true;
- BT_DBG("Queued %p (queue len %d)", skb,
+ BT_DBG("Queued %pK (queue len %d)", skb,
skb_queue_len(&chan->srej_q));
l2cap_pass_to_tx(chan, control);
@@ -6392,7 +6393,7 @@ static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
}
if (skb && !skb_in_use) {
- BT_DBG("Freeing %p", skb);
+ BT_DBG("Freeing %pK", skb);
kfree_skb(skb);
}
@@ -6401,7 +6402,7 @@ static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
static int l2cap_finish_move(struct l2cap_chan *chan)
{
- BT_DBG("chan %p", chan);
+ BT_DBG("chan %pK", chan);
chan->rx_state = L2CAP_RX_STATE_RECV;
@@ -6419,7 +6420,7 @@ static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
{
int err;
- BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
+ BT_DBG("chan %pK, control %pK, skb %pK, event %d", chan, control, skb,
event);
if (!control->poll)
@@ -6503,7 +6504,7 @@ static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
{
int err = 0;
- BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
+ BT_DBG("chan %pK, control %pK, skb %pK, event %d, state %d", chan,
control, skb, event, chan->rx_state);
if (__valid_reqseq(chan, control->reqseq)) {
@@ -6540,7 +6541,7 @@ static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
{
int err = 0;
- BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
+ BT_DBG("chan %pK, control %pK, skb %pK, state %d", chan, control, skb,
chan->rx_state);
if (l2cap_classify_txseq(chan, control->txseq) ==
@@ -6562,7 +6563,7 @@ static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
chan->sdu_len = 0;
if (skb) {
- BT_DBG("Freeing %p", skb);
+ BT_DBG("Freeing %pK", skb);
kfree_skb(skb);
}
}
@@ -6675,7 +6676,7 @@ static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
return_credits = le_max_credits - chan->rx_credits;
- BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
+ BT_DBG("chan %pK returning %u credits to sender", chan, return_credits);
chan->rx_credits += return_credits;
@@ -6800,7 +6801,7 @@ static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
}
}
- BT_DBG("chan %p, len %d", chan, skb->len);
+ BT_DBG("chan %pK, len %d", chan, skb->len);
/* If we receive data on a fixed channel before the info req/rsp
* procdure is done simply assume that the channel is supported
@@ -6840,7 +6841,7 @@ static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
goto done;
default:
- BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
+ BT_DBG("chan %pK: bad mode 0x%2.2x", chan, chan->mode);
break;
}
@@ -6865,7 +6866,7 @@ static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
if (!chan)
goto free_skb;
- BT_DBG("chan %p, len %d", chan, skb->len);
+ BT_DBG("chan %pK, len %d", chan, skb->len);
if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
goto drop;
@@ -6978,7 +6979,7 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
conn->hcon = hci_conn_get(hcon);
conn->hchan = hchan;
- BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
+ BT_DBG("hcon %pK conn %pK hchan %pK", hcon, conn, hchan);
switch (hcon->type) {
case LE_LINK:
@@ -7274,7 +7275,7 @@ static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
return;
- BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
+ BT_DBG("hcon %pK bdaddr %pMR status %d", hcon, &hcon->dst, status);
if (status) {
l2cap_conn_del(hcon, bt_to_errno(status));
@@ -7329,7 +7330,7 @@ int l2cap_disconn_ind(struct hci_conn *hcon)
{
struct l2cap_conn *conn = hcon->l2cap_data;
- BT_DBG("hcon %p", hcon);
+ BT_DBG("hcon %pK", hcon);
if (!conn)
return HCI_ERROR_REMOTE_USER_TERM;
@@ -7341,7 +7342,7 @@ static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
return;
- BT_DBG("hcon %p reason %d", hcon, reason);
+ BT_DBG("hcon %pK reason %d", hcon, reason);
l2cap_conn_del(hcon, bt_to_errno(reason));
}
@@ -7371,14 +7372,14 @@ static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
if (!conn)
return;
- BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
+ BT_DBG("conn %pK status 0x%2.2x encrypt %u", conn, status, encrypt);
mutex_lock(&conn->chan_lock);
list_for_each_entry(chan, &conn->chan_l, list) {
l2cap_chan_lock(chan);
- BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
+ BT_DBG("chan %pK scid 0x%4.4x state %s", chan, chan->scid,
state_to_string(chan->state));
if (chan->scid == L2CAP_CID_A2MP) {
@@ -7470,7 +7471,7 @@ void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
if (!conn)
goto drop;
- BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
+ BT_DBG("conn %pK len %d flags 0x%x", conn, skb->len, flags);
switch (flags) {
case ACL_START:
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index d9bbbded49ef..bd3c021932be 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -84,7 +84,7 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
struct sockaddr_l2 la;
int len, err = 0;
- BT_DBG("sk %p", sk);
+ BT_DBG("sk %pK", sk);
if (!addr || addr->sa_family != AF_BLUETOOTH)
return -EINVAL;
@@ -178,7 +178,7 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr,
struct sockaddr_l2 la;
int len, err = 0;
- BT_DBG("sk %p", sk);
+ BT_DBG("sk %pK", sk);
if (!addr || alen < sizeof(addr->sa_family) ||
addr->sa_family != AF_BLUETOOTH)
@@ -254,7 +254,7 @@ static int l2cap_sock_listen(struct socket *sock, int backlog)
struct l2cap_chan *chan = l2cap_pi(sk)->chan;
int err = 0;
- BT_DBG("sk %p backlog %d", sk, backlog);
+ BT_DBG("sk %pK backlog %d", sk, backlog);
lock_sock(sk);
@@ -311,7 +311,7 @@ static int l2cap_sock_accept(struct socket *sock, struct socket *newsock,
timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
- BT_DBG("sk %p timeo %ld", sk, timeo);
+ BT_DBG("sk %pK timeo %ld", sk, timeo);
/* Wait for an incoming connection. (wake-one). */
add_wait_queue_exclusive(sk_sleep(sk), &wait);
@@ -348,7 +348,7 @@ static int l2cap_sock_accept(struct socket *sock, struct socket *newsock,
newsock->state = SS_CONNECTED;
- BT_DBG("new socket %p", nsk);
+ BT_DBG("new socket %pK", nsk);
done:
release_sock(sk);
@@ -362,7 +362,7 @@ static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr,
struct sock *sk = sock->sk;
struct l2cap_chan *chan = l2cap_pi(sk)->chan;
- BT_DBG("sock %p, sk %p", sock, sk);
+ BT_DBG("sock %pK, sk %pK", sock, sk);
if (peer && sk->sk_state != BT_CONNECTED &&
sk->sk_state != BT_CONNECT && sk->sk_state != BT_CONNECT2 &&
@@ -398,7 +398,7 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname,
int len, err = 0;
u32 opt;
- BT_DBG("sk %p", sk);
+ BT_DBG("sk %pK", sk);
if (get_user(len, optlen))
return -EFAULT;
@@ -500,7 +500,7 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname,
struct bt_power pwr;
int len, err = 0;
- BT_DBG("sk %p", sk);
+ BT_DBG("sk %pK", sk);
if (level == SOL_L2CAP)
return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
@@ -636,7 +636,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
int len, err = 0;
u32 opt;
- BT_DBG("sk %p", sk);
+ BT_DBG("sk %pK", sk);
lock_sock(sk);
@@ -750,7 +750,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
int len, err = 0;
u32 opt;
- BT_DBG("sk %p", sk);
+ BT_DBG("sk %pK", sk);
if (level == SOL_L2CAP)
return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
@@ -951,7 +951,7 @@ static int l2cap_sock_sendmsg(struct socket *sock, struct msghdr *msg,
struct l2cap_chan *chan = l2cap_pi(sk)->chan;
int err;
- BT_DBG("sock %p, sk %p", sock, sk);
+ BT_DBG("sock %pK, sk %pK", sock, sk);
err = sock_error(sk);
if (err)
@@ -1045,7 +1045,7 @@ static void l2cap_sock_kill(struct sock *sk)
if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
return;
- BT_DBG("sk %p state %s", sk, state_to_string(sk->sk_state));
+ BT_DBG("sk %pK state %s", sk, state_to_string(sk->sk_state));
/* Kill poor orphan */
@@ -1106,7 +1106,7 @@ static int l2cap_sock_shutdown(struct socket *sock, int how)
struct l2cap_conn *conn;
int err = 0;
- BT_DBG("sock %p, sk %p", sock, sk);
+ BT_DBG("sock %pK, sk %pK", sock, sk);
if (!sk)
return 0;
@@ -1125,7 +1125,7 @@ static int l2cap_sock_shutdown(struct socket *sock, int how)
/* prevent chan structure from being freed whilst unlocked */
l2cap_chan_hold(chan);
- BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
+ BT_DBG("chan %pK state %s", chan, state_to_string(chan->state));
if (chan->mode == L2CAP_MODE_ERTM &&
chan->unacked_frames > 0 &&
@@ -1190,7 +1190,7 @@ static int l2cap_sock_release(struct socket *sock)
struct sock *sk = sock->sk;
int err;
- BT_DBG("sock %p, sk %p", sock, sk);
+ BT_DBG("sock %pK, sk %pK", sock, sk);
if (!sk)
return 0;
@@ -1208,14 +1208,14 @@ static void l2cap_sock_cleanup_listen(struct sock *parent)
{
struct sock *sk;
- BT_DBG("parent %p state %s", parent,
+ BT_DBG("parent %pK state %s", parent,
state_to_string(parent->sk_state));
/* Close not yet accepted channels */
while ((sk = bt_accept_dequeue(parent, NULL))) {
struct l2cap_chan *chan = l2cap_pi(sk)->chan;
- BT_DBG("child chan %p state %s", chan,
+ BT_DBG("child chan %pK state %s", chan,
state_to_string(chan->state));
l2cap_chan_lock(chan);
@@ -1305,7 +1305,7 @@ static void l2cap_sock_teardown_cb(struct l2cap_chan *chan, int err)
struct sock *sk = chan->data;
struct sock *parent;
- BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
+ BT_DBG("chan %pK state %s", chan, state_to_string(chan->state));
/* This callback can be called both for server (BT_LISTEN)
* sockets as well as "normal" ones. To avoid lockdep warnings
@@ -1392,7 +1392,7 @@ static void l2cap_sock_ready_cb(struct l2cap_chan *chan)
parent = bt_sk(sk)->parent;
- BT_DBG("sk %p, parent %p", sk, parent);
+ BT_DBG("sk %pK, parent %pK", sk, parent);
sk->sk_state = BT_CONNECTED;
sk->sk_state_change(sk);
@@ -1471,7 +1471,7 @@ static const struct l2cap_ops l2cap_chan_ops = {
static void l2cap_sock_destruct(struct sock *sk)
{
- BT_DBG("sk %p", sk);
+ BT_DBG("sk %pK", sk);
if (l2cap_pi(sk)->chan)
l2cap_chan_put(l2cap_pi(sk)->chan);
@@ -1502,7 +1502,7 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent)
{
struct l2cap_chan *chan = l2cap_pi(sk)->chan;
- BT_DBG("sk %p", sk);
+ BT_DBG("sk %pK", sk);
if (parent) {
struct l2cap_chan *pchan = l2cap_pi(parent)->chan;
@@ -1609,7 +1609,7 @@ static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
{
struct sock *sk;
- BT_DBG("sock %p", sock);
+ BT_DBG("sock %pK", sock);
sock->state = SS_UNCONNECTED;
diff --git a/net/bluetooth/lib.c b/net/bluetooth/lib.c
index aa4cf64e32a6..e87b965230bf 100644
--- a/net/bluetooth/lib.c
+++ b/net/bluetooth/lib.c
@@ -145,7 +145,7 @@ void bt_info(const char *format, ...)
vaf.fmt = format;
vaf.va = &args;
- pr_info("%pV", &vaf);
+ pr_info("%pKV", &vaf);
va_end(args);
}
@@ -161,7 +161,7 @@ void bt_warn(const char *format, ...)
vaf.fmt = format;
vaf.va = &args;
- pr_warn("%pV", &vaf);
+ pr_warn("%pKV", &vaf);
va_end(args);
}
@@ -177,7 +177,7 @@ void bt_err(const char *format, ...)
vaf.fmt = format;
vaf.va = &args;
- pr_err("%pV", &vaf);
+ pr_err("%pKV", &vaf);
va_end(args);
}
@@ -193,7 +193,7 @@ void bt_err_ratelimited(const char *format, ...)
vaf.fmt = format;
vaf.va = &args;
- pr_err_ratelimited("%pV", &vaf);
+ pr_err_ratelimited("%pKV", &vaf);
va_end(args);
}
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index b1b0a1c0bd8d..db399c1662ab 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -281,7 +281,7 @@ static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
{
struct mgmt_rp_read_version rp;
- BT_DBG("sock %p", sk);
+ BT_DBG("sock %pK", sk);
rp.version = MGMT_VERSION;
rp.revision = cpu_to_le16(MGMT_REVISION);
@@ -298,7 +298,7 @@ static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
size_t rp_size;
int i, err;
- BT_DBG("sock %p", sk);
+ BT_DBG("sock %pK", sk);
if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
num_commands = ARRAY_SIZE(mgmt_commands);
@@ -351,7 +351,7 @@ static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
u16 count;
int err;
- BT_DBG("sock %p", sk);
+ BT_DBG("sock %pK", sk);
read_lock(&hci_dev_list_lock);
@@ -411,7 +411,7 @@ static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
u16 count;
int err;
- BT_DBG("sock %p", sk);
+ BT_DBG("sock %pK", sk);
read_lock(&hci_dev_list_lock);
@@ -471,7 +471,7 @@ static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
u16 count;
int err;
- BT_DBG("sock %p", sk);
+ BT_DBG("sock %pK", sk);
read_lock(&hci_dev_list_lock);
@@ -588,7 +588,7 @@ static int read_config_info(struct sock *sk, struct hci_dev *hdev,
struct mgmt_rp_read_config_info rp;
u32 options = 0;
- BT_DBG("sock %p %s", sk, hdev->name);
+ BT_DBG("sock %pK %s", sk, hdev->name);
hci_dev_lock(hdev);
@@ -1373,7 +1373,7 @@ static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
{
struct mgmt_rp_read_info rp;
- BT_DBG("sock %p %s", sk, hdev->name);
+ BT_DBG("sock %pK %s", sk, hdev->name);
hci_dev_lock(hdev);
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 29709fbfd1f5..6a83e1b14575 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -184,13 +184,13 @@ static inline int __check_fcs(u8 *data, int type, u8 fcs)
/* ---- L2CAP callbacks ---- */
static void rfcomm_l2state_change(struct sock *sk)
{
- BT_DBG("%p state %d", sk, sk->sk_state);
+ BT_DBG("%pK state %d", sk, sk->sk_state);
rfcomm_schedule();
}
static void rfcomm_l2data_ready(struct sock *sk)
{
- BT_DBG("%p", sk);
+ BT_DBG("%pK", sk);
rfcomm_schedule();
}
@@ -237,7 +237,7 @@ static void rfcomm_session_timeout(unsigned long arg)
{
struct rfcomm_session *s = (void *) arg;
- BT_DBG("session %p state %ld", s, s->state);
+ BT_DBG("session %pK state %ld", s, s->state);
set_bit(RFCOMM_TIMED_OUT, &s->flags);
rfcomm_schedule();
@@ -245,14 +245,14 @@ static void rfcomm_session_timeout(unsigned long arg)
static void rfcomm_session_set_timer(struct rfcomm_session *s, long timeout)
{
- BT_DBG("session %p state %ld timeout %ld", s, s->state, timeout);
+ BT_DBG("session %pK state %ld timeout %ld", s, s->state, timeout);
mod_timer(&s->timer, jiffies + timeout);
}
static void rfcomm_session_clear_timer(struct rfcomm_session *s)
{
- BT_DBG("session %p state %ld", s, s->state);
+ BT_DBG("session %pK state %ld", s, s->state);
del_timer_sync(&s->timer);
}
@@ -262,7 +262,7 @@ static void rfcomm_dlc_timeout(unsigned long arg)
{
struct rfcomm_dlc *d = (void *) arg;
- BT_DBG("dlc %p state %ld", d, d->state);
+ BT_DBG("dlc %pK state %ld", d, d->state);
set_bit(RFCOMM_TIMED_OUT, &d->flags);
rfcomm_dlc_put(d);
@@ -271,7 +271,7 @@ static void rfcomm_dlc_timeout(unsigned long arg)
static void rfcomm_dlc_set_timer(struct rfcomm_dlc *d, long timeout)
{
- BT_DBG("dlc %p state %ld timeout %ld", d, d->state, timeout);
+ BT_DBG("dlc %pK state %ld timeout %ld", d, d->state, timeout);
if (!mod_timer(&d->timer, jiffies + timeout))
rfcomm_dlc_hold(d);
@@ -279,7 +279,7 @@ static void rfcomm_dlc_set_timer(struct rfcomm_dlc *d, long timeout)
static void rfcomm_dlc_clear_timer(struct rfcomm_dlc *d)
{
- BT_DBG("dlc %p state %ld", d, d->state);
+ BT_DBG("dlc %pK state %ld", d, d->state);
if (del_timer(&d->timer))
rfcomm_dlc_put(d);
@@ -287,7 +287,7 @@ static void rfcomm_dlc_clear_timer(struct rfcomm_dlc *d)
static void rfcomm_dlc_clear_state(struct rfcomm_dlc *d)
{
- BT_DBG("%p", d);
+ BT_DBG("%pK", d);
d->state = BT_OPEN;
d->flags = 0;
@@ -315,14 +315,14 @@ struct rfcomm_dlc *rfcomm_dlc_alloc(gfp_t prio)
rfcomm_dlc_clear_state(d);
- BT_DBG("%p", d);
+ BT_DBG("%pK", d);
return d;
}
void rfcomm_dlc_free(struct rfcomm_dlc *d)
{
- BT_DBG("%p", d);
+ BT_DBG("%pK", d);
skb_queue_purge(&d->tx_queue);
kfree(d);
@@ -330,7 +330,7 @@ void rfcomm_dlc_free(struct rfcomm_dlc *d)
static void rfcomm_dlc_link(struct rfcomm_session *s, struct rfcomm_dlc *d)
{
- BT_DBG("dlc %p session %p", d, s);
+ BT_DBG("dlc %pK session %pK", d, s);
rfcomm_session_clear_timer(s);
rfcomm_dlc_hold(d);
@@ -342,7 +342,7 @@ static void rfcomm_dlc_unlink(struct rfcomm_dlc *d)
{
struct rfcomm_session *s = d->session;
- BT_DBG("dlc %p refcnt %d session %p", d, atomic_read(&d->refcnt), s);
+ BT_DBG("dlc %pK refcnt %d session %pK", d, atomic_read(&d->refcnt), s);
list_del(&d->list);
d->session = NULL;
@@ -374,7 +374,7 @@ static int __rfcomm_dlc_open(struct rfcomm_dlc *d, bdaddr_t *src, bdaddr_t *dst,
int err = 0;
u8 dlci;
- BT_DBG("dlc %p state %ld %pMR -> %pMR channel %d",
+ BT_DBG("dlc %pK state %ld %pMR -> %pMR channel %d",
d, d->state, src, dst, channel);
if (rfcomm_check_channel(channel))
@@ -454,8 +454,8 @@ static int __rfcomm_dlc_close(struct rfcomm_dlc *d, int err)
if (!s)
return 0;
- BT_DBG("dlc %p state %ld dlci %d err %d session %p",
- d, d->state, d->dlci, err, s);
+ BT_DBG("dlc %pK state %ld dlci %d err %d session %pK",
+ d, d->state, d->dlci, err, s);
switch (d->state) {
case BT_CONNECT:
@@ -505,7 +505,7 @@ int rfcomm_dlc_close(struct rfcomm_dlc *d, int err)
struct rfcomm_dlc *d_list;
struct rfcomm_session *s, *s_list;
- BT_DBG("dlc %p state %ld dlci %d err %d", d, d->state, d->dlci, err);
+ BT_DBG("dlc %pK state %ld dlci %d err %d", d, d->state, d->dlci, err);
rfcomm_lock();
@@ -559,7 +559,7 @@ int rfcomm_dlc_send(struct rfcomm_dlc *d, struct sk_buff *skb)
if (d->state != BT_CONNECTED)
return -ENOTCONN;
- BT_DBG("dlc %p mtu %d len %d", d, d->mtu, len);
+ BT_DBG("dlc %pK mtu %d len %d", d, d->mtu, len);
if (len > d->mtu)
return -EINVAL;
@@ -576,7 +576,7 @@ void rfcomm_dlc_send_noerror(struct rfcomm_dlc *d, struct sk_buff *skb)
{
int len = skb->len;
- BT_DBG("dlc %p mtu %d len %d", d, d->mtu, len);
+ BT_DBG("dlc %pK mtu %d len %d", d, d->mtu, len);
rfcomm_make_uih(skb, d->addr);
skb_queue_tail(&d->tx_queue, skb);
@@ -588,7 +588,7 @@ void rfcomm_dlc_send_noerror(struct rfcomm_dlc *d, struct sk_buff *skb)
void __rfcomm_dlc_throttle(struct rfcomm_dlc *d)
{
- BT_DBG("dlc %p state %ld", d, d->state);
+ BT_DBG("dlc %pK state %ld", d, d->state);
if (!d->cfc) {
d->v24_sig |= RFCOMM_V24_FC;
@@ -599,7 +599,7 @@ void __rfcomm_dlc_throttle(struct rfcomm_dlc *d)
void __rfcomm_dlc_unthrottle(struct rfcomm_dlc *d)
{
- BT_DBG("dlc %p state %ld", d, d->state);
+ BT_DBG("dlc %pK state %ld", d, d->state);
if (!d->cfc) {
d->v24_sig &= ~RFCOMM_V24_FC;
@@ -615,8 +615,8 @@ void __rfcomm_dlc_unthrottle(struct rfcomm_dlc *d)
*/
int rfcomm_dlc_set_modem_status(struct rfcomm_dlc *d, u8 v24_sig)
{
- BT_DBG("dlc %p state %ld v24_sig 0x%x",
- d, d->state, v24_sig);
+ BT_DBG("dlc %pK state %ld v24_sig 0x%x",
+ d, d->state, v24_sig);
if (test_bit(RFCOMM_RX_THROTTLED, &d->flags))
v24_sig |= RFCOMM_V24_FC;
@@ -633,8 +633,8 @@ int rfcomm_dlc_set_modem_status(struct rfcomm_dlc *d, u8 v24_sig)
int rfcomm_dlc_get_modem_status(struct rfcomm_dlc *d, u8 *v24_sig)
{
- BT_DBG("dlc %p state %ld v24_sig 0x%x",
- d, d->state, d->v24_sig);
+ BT_DBG("dlc %pK state %ld v24_sig 0x%x",
+ d, d->state, d->v24_sig);
*v24_sig = d->v24_sig;
return 0;
@@ -648,7 +648,7 @@ static struct rfcomm_session *rfcomm_session_add(struct socket *sock, int state)
if (!s)
return NULL;
- BT_DBG("session %p sock %p", s, sock);
+ BT_DBG("session %pK sock %pK", s, sock);
setup_timer(&s->timer, rfcomm_session_timeout, (unsigned long) s);
@@ -676,7 +676,7 @@ static struct rfcomm_session *rfcomm_session_del(struct rfcomm_session *s)
{
int state = s->state;
- BT_DBG("session %p state %ld", s, s->state);
+ BT_DBG("session %pK state %ld", s, s->state);
list_del(&s->list);
@@ -714,7 +714,7 @@ static struct rfcomm_session *rfcomm_session_close(struct rfcomm_session *s,
s->state = BT_CLOSED;
- BT_DBG("session %p state %ld err %d", s, s->state, err);
+ BT_DBG("session %pK state %ld err %d", s, s->state, err);
/* Close all dlcs */
list_for_each_safe(p, n, &s->dlcs) {
@@ -800,7 +800,7 @@ static int rfcomm_send_frame(struct rfcomm_session *s, u8 *data, int len)
struct kvec iv = { data, len };
struct msghdr msg;
- BT_DBG("session %p len %d", s, len);
+ BT_DBG("session %pK len %d", s, len);
memset(&msg, 0, sizeof(msg));
@@ -809,7 +809,7 @@ static int rfcomm_send_frame(struct rfcomm_session *s, u8 *data, int len)
static int rfcomm_send_cmd(struct rfcomm_session *s, struct rfcomm_cmd *cmd)
{
- BT_DBG("%p cmd %u", s, cmd->ctrl);
+ BT_DBG("%pK cmd %u", s, cmd->ctrl);
return rfcomm_send_frame(s, (void *) cmd, sizeof(*cmd));
}
@@ -818,7 +818,7 @@ static int rfcomm_send_sabm(struct rfcomm_session *s, u8 dlci)
{
struct rfcomm_cmd cmd;
- BT_DBG("%p dlci %d", s, dlci);
+ BT_DBG("%pK dlci %d", s, dlci);
cmd.addr = __addr(s->initiator, dlci);
cmd.ctrl = __ctrl(RFCOMM_SABM, 1);
@@ -832,7 +832,7 @@ static int rfcomm_send_ua(struct rfcomm_session *s, u8 dlci)
{
struct rfcomm_cmd cmd;
- BT_DBG("%p dlci %d", s, dlci);
+ BT_DBG("%pK dlci %d", s, dlci);
cmd.addr = __addr(!s->initiator, dlci);
cmd.ctrl = __ctrl(RFCOMM_UA, 1);
@@ -846,7 +846,7 @@ static int rfcomm_send_disc(struct rfcomm_session *s, u8 dlci)
{
struct rfcomm_cmd cmd;
- BT_DBG("%p dlci %d", s, dlci);
+ BT_DBG("%pK dlci %d", s, dlci);
cmd.addr = __addr(s->initiator, dlci);
cmd.ctrl = __ctrl(RFCOMM_DISC, 1);
@@ -861,7 +861,7 @@ static int rfcomm_queue_disc(struct rfcomm_dlc *d)
struct rfcomm_cmd *cmd;
struct sk_buff *skb;
- BT_DBG("dlc %p dlci %d", d, d->dlci);
+ BT_DBG("dlc %pK dlci %d", d, d->dlci);
skb = alloc_skb(sizeof(*cmd), GFP_KERNEL);
if (!skb)
@@ -882,7 +882,7 @@ static int rfcomm_send_dm(struct rfcomm_session *s, u8 dlci)
{
struct rfcomm_cmd cmd;
- BT_DBG("%p dlci %d", s, dlci);
+ BT_DBG("%pK dlci %d", s, dlci);
cmd.addr = __addr(!s->initiator, dlci);
cmd.ctrl = __ctrl(RFCOMM_DM, 1);
@@ -898,7 +898,7 @@ static int rfcomm_send_nsc(struct rfcomm_session *s, int cr, u8 type)
struct rfcomm_mcc *mcc;
u8 buf[16], *ptr = buf;
- BT_DBG("%p cr %d type %d", s, cr, type);
+ BT_DBG("%pK cr %d type %d", s, cr, type);
hdr = (void *) ptr; ptr += sizeof(*hdr);
hdr->addr = __addr(s->initiator, 0);
@@ -924,7 +924,7 @@ static int rfcomm_send_pn(struct rfcomm_session *s, int cr, struct rfcomm_dlc *d
struct rfcomm_pn *pn;
u8 buf[16], *ptr = buf;
- BT_DBG("%p cr %d dlci %d mtu %d", s, cr, d->dlci, d->mtu);
+ BT_DBG("%pK cr %d dlci %d mtu %d", s, cr, d->dlci, d->mtu);
hdr = (void *) ptr; ptr += sizeof(*hdr);
hdr->addr = __addr(s->initiator, 0);
@@ -969,10 +969,9 @@ int rfcomm_send_rpn(struct rfcomm_session *s, int cr, u8 dlci,
struct rfcomm_rpn *rpn;
u8 buf[16], *ptr = buf;
- BT_DBG("%p cr %d dlci %d bit_r 0x%x data_b 0x%x stop_b 0x%x parity 0x%x"
- " flwc_s 0x%x xon_c 0x%x xoff_c 0x%x p_mask 0x%x",
- s, cr, dlci, bit_rate, data_bits, stop_bits, parity,
- flow_ctrl_settings, xon_char, xoff_char, param_mask);
+ BT_DBG("%pK cr %d dlci %d bit_r 0x%x data_b 0x%x stop_b 0x%x parity 0x%x flwc_s 0x%x xon_c 0x%x xoff_c 0x%x p_mask 0x%x",
+ s, cr, dlci, bit_rate, data_bits, stop_bits, parity,
+ flow_ctrl_settings, xon_char, xoff_char, param_mask);
hdr = (void *) ptr; ptr += sizeof(*hdr);
hdr->addr = __addr(s->initiator, 0);
@@ -1004,7 +1003,7 @@ static int rfcomm_send_rls(struct rfcomm_session *s, int cr, u8 dlci, u8 status)
struct rfcomm_rls *rls;
u8 buf[16], *ptr = buf;
- BT_DBG("%p cr %d status 0x%x", s, cr, status);
+ BT_DBG("%pK cr %d status 0x%x", s, cr, status);
hdr = (void *) ptr; ptr += sizeof(*hdr);
hdr->addr = __addr(s->initiator, 0);
@@ -1031,7 +1030,7 @@ static int rfcomm_send_msc(struct rfcomm_session *s, int cr, u8 dlci, u8 v24_sig
struct rfcomm_msc *msc;
u8 buf[16], *ptr = buf;
- BT_DBG("%p cr %d v24 0x%x", s, cr, v24_sig);
+ BT_DBG("%pK cr %d v24 0x%x", s, cr, v24_sig);
hdr = (void *) ptr; ptr += sizeof(*hdr);
hdr->addr = __addr(s->initiator, 0);
@@ -1057,7 +1056,7 @@ static int rfcomm_send_fcoff(struct rfcomm_session *s, int cr)
struct rfcomm_mcc *mcc;
u8 buf[16], *ptr = buf;
- BT_DBG("%p cr %d", s, cr);
+ BT_DBG("%pK cr %d", s, cr);
hdr = (void *) ptr; ptr += sizeof(*hdr);
hdr->addr = __addr(s->initiator, 0);
@@ -1079,7 +1078,7 @@ static int rfcomm_send_fcon(struct rfcomm_session *s, int cr)
struct rfcomm_mcc *mcc;
u8 buf[16], *ptr = buf;
- BT_DBG("%p cr %d", s, cr);
+ BT_DBG("%pK cr %d", s, cr);
hdr = (void *) ptr; ptr += sizeof(*hdr);
hdr->addr = __addr(s->initiator, 0);
@@ -1105,7 +1104,7 @@ static int rfcomm_send_test(struct rfcomm_session *s, int cr, u8 *pattern, int l
if (len > 125)
return -EINVAL;
- BT_DBG("%p cr %d", s, cr);
+ BT_DBG("%pK cr %d", s, cr);
hdr[0] = __addr(s->initiator, 0);
hdr[1] = __ctrl(RFCOMM_UIH, 0);
@@ -1132,7 +1131,7 @@ static int rfcomm_send_credits(struct rfcomm_session *s, u8 addr, u8 credits)
struct rfcomm_hdr *hdr;
u8 buf[16], *ptr = buf;
- BT_DBG("%p addr %d credits %d", s, addr, credits);
+ BT_DBG("%pK addr %d credits %d", s, addr, credits);
hdr = (void *) ptr; ptr += sizeof(*hdr);
hdr->addr = addr;
@@ -1169,7 +1168,7 @@ static void rfcomm_make_uih(struct sk_buff *skb, u8 addr)
/* ---- RFCOMM frame reception ---- */
static struct rfcomm_session *rfcomm_recv_ua(struct rfcomm_session *s, u8 dlci)
{
- BT_DBG("session %p state %ld dlci %d", s, s->state, dlci);
+ BT_DBG("session %pK state %ld dlci %d", s, s->state, dlci);
if (dlci) {
/* Data channel */
@@ -1223,7 +1222,7 @@ static struct rfcomm_session *rfcomm_recv_dm(struct rfcomm_session *s, u8 dlci)
{
int err = 0;
- BT_DBG("session %p state %ld dlci %d", s, s->state, dlci);
+ BT_DBG("session %pK state %ld dlci %d", s, s->state, dlci);
if (dlci) {
/* Data DLC */
@@ -1253,7 +1252,7 @@ static struct rfcomm_session *rfcomm_recv_disc(struct rfcomm_session *s,
{
int err = 0;
- BT_DBG("session %p state %ld dlci %d", s, s->state, dlci);
+ BT_DBG("session %pK state %ld dlci %d", s, s->state, dlci);
if (dlci) {
struct rfcomm_dlc *d = rfcomm_dlc_get(s, dlci);
@@ -1288,7 +1287,7 @@ void rfcomm_dlc_accept(struct rfcomm_dlc *d)
struct sock *sk = d->session->sock->sk;
struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
- BT_DBG("dlc %p", d);
+ BT_DBG("dlc %pK", d);
rfcomm_send_ua(d->session, d->dlci);
@@ -1329,7 +1328,7 @@ static int rfcomm_recv_sabm(struct rfcomm_session *s, u8 dlci)
struct rfcomm_dlc *d;
u8 channel;
- BT_DBG("session %p state %ld dlci %d", s, s->state, dlci);
+ BT_DBG("session %pK state %ld dlci %d", s, s->state, dlci);
if (!dlci) {
rfcomm_send_ua(s, 0);
@@ -1370,8 +1369,8 @@ static int rfcomm_apply_pn(struct rfcomm_dlc *d, int cr, struct rfcomm_pn *pn)
{
struct rfcomm_session *s = d->session;
- BT_DBG("dlc %p state %ld dlci %d mtu %d fc 0x%x credits %d",
- d, d->state, d->dlci, pn->mtu, pn->flow_ctrl, pn->credits);
+ BT_DBG("dlc %pK state %ld dlci %d mtu %d fc 0x%x credits %d",
+ d, d->state, d->dlci, pn->mtu, pn->flow_ctrl, pn->credits);
if ((pn->flow_ctrl == 0xf0 && s->cfc != RFCOMM_CFC_DISABLED) ||
pn->flow_ctrl == 0xe0) {
@@ -1401,7 +1400,7 @@ static int rfcomm_recv_pn(struct rfcomm_session *s, int cr, struct sk_buff *skb)
struct rfcomm_dlc *d;
u8 dlci = pn->dlci;
- BT_DBG("session %p state %ld dlci %d", s, s->state, dlci);
+ BT_DBG("session %pK state %ld dlci %d", s, s->state, dlci);
if (!dlci)
return 0;
@@ -1617,7 +1616,7 @@ static int rfcomm_recv_mcc(struct rfcomm_session *s, struct sk_buff *skb)
type = __get_mcc_type(mcc->type);
len = __get_mcc_len(mcc->len);
- BT_DBG("%p type 0x%x cr %d", s, type, cr);
+ BT_DBG("%pK type 0x%x cr %d", s, type, cr);
skb_pull(skb, 2);
@@ -1672,7 +1671,7 @@ static int rfcomm_recv_data(struct rfcomm_session *s, u8 dlci, int pf, struct sk
{
struct rfcomm_dlc *d;
- BT_DBG("session %p state %ld dlci %d pf %d", s, s->state, dlci, pf);
+ BT_DBG("session %pK state %ld dlci %d pf %d", s, s->state, dlci, pf);
d = rfcomm_dlc_get(s, dlci);
if (!d) {
@@ -1774,7 +1773,7 @@ static void rfcomm_process_connect(struct rfcomm_session *s)
struct rfcomm_dlc *d;
struct list_head *p, *n;
- BT_DBG("session %p state %ld", s, s->state);
+ BT_DBG("session %pK state %ld", s, s->state);
list_for_each_safe(p, n, &s->dlcs) {
d = list_entry(p, struct rfcomm_dlc, list);
@@ -1798,8 +1797,8 @@ static int rfcomm_process_tx(struct rfcomm_dlc *d)
struct sk_buff *skb;
int err;
- BT_DBG("dlc %p state %ld cfc %d rx_credits %d tx_credits %d",
- d, d->state, d->cfc, d->rx_credits, d->tx_credits);
+ BT_DBG("dlc %pK state %ld cfc %d rx_credits %d tx_credits %d",
+ d, d->state, d->cfc, d->rx_credits, d->tx_credits);
/* Send pending MSC */
if (test_and_clear_bit(RFCOMM_MSC_PENDING, &d->flags))
@@ -1846,7 +1845,7 @@ static void rfcomm_process_dlcs(struct rfcomm_session *s)
struct rfcomm_dlc *d;
struct list_head *p, *n;
- BT_DBG("session %p state %ld", s, s->state);
+ BT_DBG("session %pK state %ld", s, s->state);
list_for_each_safe(p, n, &s->dlcs) {
d = list_entry(p, struct rfcomm_dlc, list);
@@ -1907,7 +1906,8 @@ static struct rfcomm_session *rfcomm_process_rx(struct rfcomm_session *s)
struct sock *sk = sock->sk;
struct sk_buff *skb;
- BT_DBG("session %p state %ld qlen %d", s, s->state, skb_queue_len(&sk->sk_receive_queue));
+ BT_DBG("session %pK state %ld qlen %d", s, s->state,
+ skb_queue_len(&sk->sk_receive_queue));
/* Get data directly from socket receive queue without copying it. */
while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
@@ -1937,7 +1937,7 @@ static void rfcomm_accept_connection(struct rfcomm_session *s)
if (list_empty(&bt_sk(sock->sk)->accept_q))
return;
- BT_DBG("session %p", s);
+ BT_DBG("session %pK", s);
err = kernel_accept(sock, &nsock, O_NONBLOCK);
if (err < 0)
@@ -1963,7 +1963,7 @@ static struct rfcomm_session *rfcomm_check_connection(struct rfcomm_session *s)
{
struct sock *sk = s->sock->sk;
- BT_DBG("%p state %ld", s, s->state);
+ BT_DBG("%pK state %ld", s, s->state);
switch (sk->sk_state) {
case BT_CONNECTED:
@@ -2116,7 +2116,7 @@ static void rfcomm_security_cfm(struct hci_conn *conn, u8 status, u8 encrypt)
struct rfcomm_dlc *d;
struct list_head *p, *n;
- BT_DBG("conn %p status 0x%02x encrypt 0x%02x", conn, status, encrypt);
+ BT_DBG("conn %pK status 0x%02x encrypt 0x%02x", conn, status, encrypt);
s = rfcomm_session_get(&conn->hdev->bdaddr, &conn->dst);
if (!s)
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 7511df72347f..e8922de3c725 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -68,7 +68,7 @@ static void rfcomm_sk_state_change(struct rfcomm_dlc *d, int err)
if (!sk)
return;
- BT_DBG("dlc %p state %ld err %d", d, d->state, err);
+ BT_DBG("dlc %pK state %ld err %d", d, d->state, err);
local_irq_save(flags);
bh_lock_sock(sk);
@@ -156,7 +156,7 @@ static void rfcomm_sock_destruct(struct sock *sk)
{
struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc;
- BT_DBG("sk %p dlc %p", sk, d);
+ BT_DBG("sk %pK dlc %pK", sk, d);
skb_queue_purge(&sk->sk_receive_queue);
skb_queue_purge(&sk->sk_write_queue);
@@ -176,7 +176,7 @@ static void rfcomm_sock_cleanup_listen(struct sock *parent)
{
struct sock *sk;
- BT_DBG("parent %p", parent);
+ BT_DBG("parent %pK", parent);
/* Close not yet accepted dlcs */
while ((sk = bt_accept_dequeue(parent, NULL))) {
@@ -196,7 +196,8 @@ static void rfcomm_sock_kill(struct sock *sk)
if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
return;
- BT_DBG("sk %p state %d refcnt %d", sk, sk->sk_state, atomic_read(&sk->sk_refcnt));
+ BT_DBG("sk %pK state %d refcnt %d", sk, sk->sk_state,
+ atomic_read(&sk->sk_refcnt));
/* Kill poor orphan */
bt_sock_unlink(&rfcomm_sk_list, sk);
@@ -208,7 +209,7 @@ static void __rfcomm_sock_close(struct sock *sk)
{
struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc;
- BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
+ BT_DBG("sk %pK state %d socket %pK", sk, sk->sk_state, sk->sk_socket);
switch (sk->sk_state) {
case BT_LISTEN:
@@ -241,7 +242,7 @@ static void rfcomm_sock_init(struct sock *sk, struct sock *parent)
{
struct rfcomm_pinfo *pi = rfcomm_pi(sk);
- BT_DBG("sk %p", sk);
+ BT_DBG("sk %pK", sk);
if (parent) {
sk->sk_type = parent->sk_type;
@@ -306,7 +307,7 @@ static struct sock *rfcomm_sock_alloc(struct net *net, struct socket *sock, int
bt_sock_link(&rfcomm_sk_list, sk);
- BT_DBG("sk %p", sk);
+ BT_DBG("sk %pK", sk);
return sk;
}
@@ -315,7 +316,7 @@ static int rfcomm_sock_create(struct net *net, struct socket *sock,
{
struct sock *sk;
- BT_DBG("sock %p", sock);
+ BT_DBG("sock %pK", sock);
sock->state = SS_UNCONNECTED;
@@ -345,7 +346,7 @@ static int rfcomm_sock_bind(struct socket *sock, struct sockaddr *addr, int addr
len = min_t(unsigned int, sizeof(sa), addr_len);
memcpy(&sa, addr, len);
- BT_DBG("sk %p %pMR", sk, &sa.rc_bdaddr);
+ BT_DBG("sk %pK %pMR", sk, &sa.rc_bdaddr);
lock_sock(sk);
@@ -385,7 +386,7 @@ static int rfcomm_sock_connect(struct socket *sock, struct sockaddr *addr, int a
struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc;
int err = 0;
- BT_DBG("sk %p", sk);
+ BT_DBG("sk %pK", sk);
if (alen < sizeof(struct sockaddr_rc) ||
addr->sa_family != AF_BLUETOOTH)
@@ -426,7 +427,7 @@ static int rfcomm_sock_listen(struct socket *sock, int backlog)
struct sock *sk = sock->sk;
int err = 0;
- BT_DBG("sk %p backlog %d", sk, backlog);
+ BT_DBG("sk %pK backlog %d", sk, backlog);
lock_sock(sk);
@@ -486,7 +487,7 @@ static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int f
timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
- BT_DBG("sk %p timeo %ld", sk, timeo);
+ BT_DBG("sk %pK timeo %ld", sk, timeo);
/* Wait for an incoming connection. (wake-one). */
add_wait_queue_exclusive(sk_sleep(sk), &wait);
@@ -523,7 +524,7 @@ static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int f
newsock->state = SS_CONNECTED;
- BT_DBG("new socket %p", nsk);
+ BT_DBG("new socket %pK", nsk);
done:
release_sock(sk);
@@ -535,7 +536,7 @@ static int rfcomm_sock_getname(struct socket *sock, struct sockaddr *addr, int *
struct sockaddr_rc *sa = (struct sockaddr_rc *) addr;
struct sock *sk = sock->sk;
- BT_DBG("sock %p, sk %p", sock, sk);
+ BT_DBG("sock %pK, sk %pK", sock, sk);
if (peer && sk->sk_state != BT_CONNECTED &&
sk->sk_state != BT_CONNECT && sk->sk_state != BT_CONNECT2)
@@ -570,7 +571,7 @@ static int rfcomm_sock_sendmsg(struct socket *sock, struct msghdr *msg,
if (sk->sk_shutdown & SEND_SHUTDOWN)
return -EPIPE;
- BT_DBG("sock %p, sk %p", sock, sk);
+ BT_DBG("sock %pK, sk %pK", sock, sk);
lock_sock(sk);
@@ -650,7 +651,7 @@ static int rfcomm_sock_setsockopt_old(struct socket *sock, int optname, char __u
int err = 0;
u32 opt;
- BT_DBG("sk %p", sk);
+ BT_DBG("sk %pK", sk);
lock_sock(sk);
@@ -693,7 +694,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
size_t len;
u32 opt;
- BT_DBG("sk %p", sk);
+ BT_DBG("sk %pK", sk);
if (level == SOL_RFCOMM)
return rfcomm_sock_setsockopt_old(sock, optname, optval, optlen);
@@ -762,7 +763,7 @@ static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __u
int len, err = 0;
u32 opt;
- BT_DBG("sk %p", sk);
+ BT_DBG("sk %pK", sk);
if (get_user(len, optlen))
return -EFAULT;
@@ -834,7 +835,7 @@ static int rfcomm_sock_getsockopt(struct socket *sock, int level, int optname, c
struct bt_security sec;
int len, err = 0;
- BT_DBG("sk %p", sk);
+ BT_DBG("sk %pK", sk);
if (level == SOL_RFCOMM)
return rfcomm_sock_getsockopt_old(sock, optname, optval, optlen);
@@ -889,7 +890,7 @@ static int rfcomm_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned lon
struct sock *sk __maybe_unused = sock->sk;
int err;
- BT_DBG("sk %p cmd %x arg %lx", sk, cmd, arg);
+ BT_DBG("sk %pK cmd %x arg %lx", sk, cmd, arg);
err = bt_sock_ioctl(sock, cmd, arg);
@@ -911,7 +912,7 @@ static int rfcomm_sock_shutdown(struct socket *sock, int how)
struct sock *sk = sock->sk;
int err = 0;
- BT_DBG("sock %p, sk %p", sock, sk);
+ BT_DBG("sock %pK, sk %pK", sock, sk);
if (!sk)
return 0;
@@ -934,7 +935,7 @@ static int rfcomm_sock_release(struct socket *sock)
struct sock *sk = sock->sk;
int err;
- BT_DBG("sock %p, sk %p", sock, sk);
+ BT_DBG("sock %pK, sk %pK", sock, sk);
if (!sk)
return 0;
@@ -956,7 +957,7 @@ int rfcomm_connect_ind(struct rfcomm_session *s, u8 channel, struct rfcomm_dlc *
bdaddr_t src, dst;
int result = 0;
- BT_DBG("session %p channel %d", s, channel);
+ BT_DBG("session %pK channel %d", s, channel);
rfcomm_session_getaddr(s, &src, &dst);
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index 8e385a0ae60e..71f8126be12b 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -83,7 +83,7 @@ static void rfcomm_dev_destruct(struct tty_port *port)
struct rfcomm_dev *dev = container_of(port, struct rfcomm_dev, port);
struct rfcomm_dlc *dlc = dev->dlc;
- BT_DBG("dev %p dlc %p", dev, dlc);
+ BT_DBG("dev %pK dlc %pK", dev, dlc);
rfcomm_dlc_lock(dlc);
/* Detach DLC if it's owned by this dev */
@@ -396,7 +396,7 @@ static int __rfcomm_create_dev(struct sock *sk, void __user *arg)
if (copy_from_user(&req, arg, sizeof(req)))
return -EFAULT;
- BT_DBG("sk %p dev_id %d flags 0x%x", sk, req.dev_id, req.flags);
+ BT_DBG("sk %pK dev_id %d flags 0x%x", sk, req.dev_id, req.flags);
if (req.flags != NOCAP_FLAGS && !capable(CAP_NET_ADMIN))
return -EPERM;
@@ -581,7 +581,7 @@ static int rfcomm_get_dev_info(void __user *arg)
int rfcomm_dev_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
{
- BT_DBG("cmd %d arg %p", cmd, arg);
+ BT_DBG("cmd %d arg %pK", cmd, arg);
switch (cmd) {
case RFCOMMCREATEDEV:
@@ -615,7 +615,7 @@ static void rfcomm_dev_data_ready(struct rfcomm_dlc *dlc, struct sk_buff *skb)
return;
}
- BT_DBG("dlc %p len %d", dlc, skb->len);
+ BT_DBG("dlc %pK len %d", dlc, skb->len);
tty_insert_flip_string(&dev->port, skb->data, skb->len);
tty_flip_buffer_push(&dev->port);
@@ -629,7 +629,7 @@ static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err)
if (!dev)
return;
- BT_DBG("dlc %p dev %p err %d", dlc, dev, err);
+ BT_DBG("dlc %pK dev %pK err %d", dlc, dev, err);
dev->err = err;
if (dlc->state == BT_CONNECTED) {
@@ -646,7 +646,7 @@ static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig)
if (!dev)
return;
- BT_DBG("dlc %p dev %p v24_sig 0x%02x", dlc, dev, v24_sig);
+ BT_DBG("dlc %pK dev %pK v24_sig 0x%02x", dlc, dev, v24_sig);
if ((dev->modem_status & TIOCM_CD) && !(v24_sig & RFCOMM_V24_DV))
tty_port_tty_hangup(&dev->port, true);
@@ -664,7 +664,7 @@ static void rfcomm_tty_copy_pending(struct rfcomm_dev *dev)
struct sk_buff *skb;
int inserted = 0;
- BT_DBG("dev %p", dev);
+ BT_DBG("dev %pK", dev);
rfcomm_dlc_lock(dev->dlc);
@@ -749,9 +749,9 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
struct rfcomm_dev *dev = tty->driver_data;
int err;
- BT_DBG("tty %p id %d", tty, tty->index);
+ BT_DBG("tty %pK id %d", tty, tty->index);
- BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst,
+ BT_DBG("dev %pK dst %pMR channel %d opened %d", dev, &dev->dst,
dev->channel, dev->port.count);
err = tty_port_open(&dev->port, tty, filp);
@@ -774,8 +774,8 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
{
struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
- BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc,
- dev->port.count);
+ BT_DBG("tty %pK dev %pK dlc %pK opened %d", tty, dev, dev->dlc,
+ dev->port.count);
tty_port_close(&dev->port, tty, filp);
}
@@ -787,7 +787,7 @@ static int rfcomm_tty_write(struct tty_struct *tty, const unsigned char *buf, in
struct sk_buff *skb;
int sent = 0, size;
- BT_DBG("tty %p count %d", tty, count);
+ BT_DBG("tty %pK count %d", tty, count);
while (count) {
size = min_t(uint, count, dlc->mtu);
@@ -817,14 +817,14 @@ static int rfcomm_tty_write_room(struct tty_struct *tty)
if (dev && dev->dlc)
room = rfcomm_room(dev);
- BT_DBG("tty %p room %d", tty, room);
+ BT_DBG("tty %pK room %d", tty, room);
return room;
}
static int rfcomm_tty_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg)
{
- BT_DBG("tty %p cmd 0x%02x", tty, cmd);
+ BT_DBG("tty %pK cmd 0x%02x", tty, cmd);
switch (cmd) {
case TCGETS:
@@ -878,7 +878,7 @@ static void rfcomm_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
- BT_DBG("tty %p termios %p", tty, old);
+ BT_DBG("tty %pK termios %pK", tty, old);
if (!dev || !dev->dlc || !dev->dlc->session)
return;
@@ -1010,7 +1010,7 @@ static void rfcomm_tty_throttle(struct tty_struct *tty)
{
struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
- BT_DBG("tty %p dev %p", tty, dev);
+ BT_DBG("tty %pK dev %pK", tty, dev);
rfcomm_dlc_throttle(dev->dlc);
}
@@ -1019,7 +1019,7 @@ static void rfcomm_tty_unthrottle(struct tty_struct *tty)
{
struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
- BT_DBG("tty %p dev %p", tty, dev);
+ BT_DBG("tty %pK dev %pK", tty, dev);
rfcomm_dlc_unthrottle(dev->dlc);
}
@@ -1028,7 +1028,7 @@ static int rfcomm_tty_chars_in_buffer(struct tty_struct *tty)
{
struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
- BT_DBG("tty %p dev %p", tty, dev);
+ BT_DBG("tty %pK dev %pK", tty, dev);
if (!dev || !dev->dlc)
return 0;
@@ -1043,7 +1043,7 @@ static void rfcomm_tty_flush_buffer(struct tty_struct *tty)
{
struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
- BT_DBG("tty %p dev %p", tty, dev);
+ BT_DBG("tty %pK dev %pK", tty, dev);
if (!dev || !dev->dlc)
return;
@@ -1054,19 +1054,19 @@ static void rfcomm_tty_flush_buffer(struct tty_struct *tty)
static void rfcomm_tty_send_xchar(struct tty_struct *tty, char ch)
{
- BT_DBG("tty %p ch %c", tty, ch);
+ BT_DBG("tty %pK ch %c", tty, ch);
}
static void rfcomm_tty_wait_until_sent(struct tty_struct *tty, int timeout)
{
- BT_DBG("tty %p timeout %d", tty, timeout);
+ BT_DBG("tty %pK timeout %d", tty, timeout);
}
static void rfcomm_tty_hangup(struct tty_struct *tty)
{
struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
- BT_DBG("tty %p dev %p", tty, dev);
+ BT_DBG("tty %pK dev %pK", tty, dev);
tty_port_hangup(&dev->port);
}
@@ -1075,7 +1075,7 @@ static int rfcomm_tty_tiocmget(struct tty_struct *tty)
{
struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
- BT_DBG("tty %p dev %p", tty, dev);
+ BT_DBG("tty %pK dev %pK", tty, dev);
return dev->modem_status;
}
@@ -1086,7 +1086,7 @@ static int rfcomm_tty_tiocmset(struct tty_struct *tty, unsigned int set, unsigne
struct rfcomm_dlc *dlc = dev->dlc;
u8 v24_sig;
- BT_DBG("tty %p dev %p set 0x%02x clear 0x%02x", tty, dev, set, clear);
+ BT_DBG("tty %pK dev %pK set 0x%02x clear 0x%02x", tty, dev, set, clear);
rfcomm_dlc_get_modem_status(dlc, &v24_sig);
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index f52bcbf2e58c..7d5457d64212 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -76,7 +76,7 @@ static void sco_sock_timeout(unsigned long arg)
{
struct sock *sk = (struct sock *)arg;
- BT_DBG("sock %p state %d", sk, sk->sk_state);
+ BT_DBG("sock %pK state %d", sk, sk->sk_state);
bh_lock_sock(sk);
sk->sk_err = ETIMEDOUT;
@@ -89,13 +89,13 @@ static void sco_sock_timeout(unsigned long arg)
static void sco_sock_set_timer(struct sock *sk, long timeout)
{
- BT_DBG("sock %p state %d timeout %ld", sk, sk->sk_state, timeout);
+ BT_DBG("sock %pK state %d timeout %ld", sk, sk->sk_state, timeout);
sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
}
static void sco_sock_clear_timer(struct sock *sk)
{
- BT_DBG("sock %p state %d", sk, sk->sk_state);
+ BT_DBG("sock %pK state %d", sk, sk->sk_state);
sk_stop_timer(sk, &sk->sk_timer);
}
@@ -122,7 +122,7 @@ static struct sco_conn *sco_conn_add(struct hci_conn *hcon)
else
conn->mtu = 60;
- BT_DBG("hcon %p conn %p", hcon, conn);
+ BT_DBG("hcon %pK conn %pK", hcon, conn);
return conn;
}
@@ -135,7 +135,7 @@ static void sco_chan_del(struct sock *sk, int err)
conn = sco_pi(sk)->conn;
- BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
+ BT_DBG("sk %pK, conn %pK, err %d", sk, conn, err);
if (conn) {
sco_conn_lock(conn);
@@ -162,7 +162,7 @@ static void sco_conn_del(struct hci_conn *hcon, int err)
if (!conn)
return;
- BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
+ BT_DBG("hcon %pK conn %pK, err %d", hcon, conn, err);
/* Kill socket */
sco_conn_lock(conn);
@@ -186,7 +186,7 @@ static void sco_conn_del(struct hci_conn *hcon, int err)
static void __sco_chan_add(struct sco_conn *conn, struct sock *sk,
struct sock *parent)
{
- BT_DBG("conn %p", conn);
+ BT_DBG("conn %pK", conn);
sco_pi(sk)->conn = conn;
conn->sk = sk;
@@ -281,7 +281,7 @@ static int sco_send_frame(struct sock *sk, struct msghdr *msg, int len)
if (len > conn->mtu)
return -EINVAL;
- BT_DBG("sk %p len %d", sk, len);
+ BT_DBG("sk %pK len %d", sk, len);
skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
if (!skb)
@@ -308,7 +308,7 @@ static void sco_recv_frame(struct sco_conn *conn, struct sk_buff *skb)
if (!sk)
goto drop;
- BT_DBG("sk %p len %d", sk, skb->len);
+ BT_DBG("sk %pK len %d", sk, skb->len);
if (sk->sk_state != BT_CONNECTED)
goto drop;
@@ -365,7 +365,7 @@ static struct sock *sco_get_sock_listen(bdaddr_t *src)
static void sco_sock_destruct(struct sock *sk)
{
- BT_DBG("sk %p", sk);
+ BT_DBG("sk %pK", sk);
skb_queue_purge(&sk->sk_receive_queue);
skb_queue_purge(&sk->sk_write_queue);
@@ -375,7 +375,7 @@ static void sco_sock_cleanup_listen(struct sock *parent)
{
struct sock *sk;
- BT_DBG("parent %p", parent);
+ BT_DBG("parent %pK", parent);
/* Close not yet accepted channels */
while ((sk = bt_accept_dequeue(parent, NULL))) {
@@ -395,7 +395,7 @@ static void sco_sock_kill(struct sock *sk)
if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
return;
- BT_DBG("sk %p state %d", sk, sk->sk_state);
+ BT_DBG("sk %pK state %d", sk, sk->sk_state);
/* Kill poor orphan */
bt_sock_unlink(&sco_sk_list, sk);
@@ -405,7 +405,7 @@ static void sco_sock_kill(struct sock *sk)
static void __sco_sock_close(struct sock *sk)
{
- BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
+ BT_DBG("sk %pK state %d socket %pK", sk, sk->sk_state, sk->sk_socket);
switch (sk->sk_state) {
case BT_LISTEN:
@@ -449,7 +449,7 @@ static void sco_sock_close(struct sock *sk)
static void sco_sock_init(struct sock *sk, struct sock *parent)
{
- BT_DBG("sk %p", sk);
+ BT_DBG("sk %pK", sk);
if (parent) {
sk->sk_type = parent->sk_type;
@@ -497,7 +497,7 @@ static int sco_sock_create(struct net *net, struct socket *sock, int protocol,
{
struct sock *sk;
- BT_DBG("sock %p", sock);
+ BT_DBG("sock %pK", sock);
sock->state = SS_UNCONNECTED;
@@ -521,7 +521,7 @@ static int sco_sock_bind(struct socket *sock, struct sockaddr *addr,
struct sock *sk = sock->sk;
int err = 0;
- BT_DBG("sk %p %pMR", sk, &sa->sco_bdaddr);
+ BT_DBG("sk %pK %pMR", sk, &sa->sco_bdaddr);
if (!addr || addr->sa_family != AF_BLUETOOTH)
return -EINVAL;
@@ -556,7 +556,7 @@ static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen
struct sock *sk = sock->sk;
int err;
- BT_DBG("sk %p", sk);
+ BT_DBG("sk %pK", sk);
if (alen < sizeof(struct sockaddr_sco) ||
addr->sa_family != AF_BLUETOOTH)
@@ -591,7 +591,7 @@ static int sco_sock_listen(struct socket *sock, int backlog)
bdaddr_t *src = &sco_pi(sk)->src;
int err = 0;
- BT_DBG("sk %p backlog %d", sk, backlog);
+ BT_DBG("sk %pK backlog %d", sk, backlog);
lock_sock(sk);
@@ -637,7 +637,7 @@ static int sco_sock_accept(struct socket *sock, struct socket *newsock,
timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
- BT_DBG("sk %p timeo %ld", sk, timeo);
+ BT_DBG("sk %pK timeo %ld", sk, timeo);
/* Wait for an incoming connection. (wake-one). */
add_wait_queue_exclusive(sk_sleep(sk), &wait);
@@ -673,7 +673,7 @@ static int sco_sock_accept(struct socket *sock, struct socket *newsock,
newsock->state = SS_CONNECTED;
- BT_DBG("new socket %p", ch);
+ BT_DBG("new socket %pK", ch);
done:
release_sock(sk);
@@ -686,7 +686,7 @@ static int sco_sock_getname(struct socket *sock, struct sockaddr *addr,
struct sockaddr_sco *sa = (struct sockaddr_sco *) addr;
struct sock *sk = sock->sk;
- BT_DBG("sock %p, sk %p", sock, sk);
+ BT_DBG("sock %pK, sk %pK", sock, sk);
addr->sa_family = AF_BLUETOOTH;
*len = sizeof(struct sockaddr_sco);
@@ -705,7 +705,7 @@ static int sco_sock_sendmsg(struct socket *sock, struct msghdr *msg,
struct sock *sk = sock->sk;
int err;
- BT_DBG("sock %p, sk %p", sock, sk);
+ BT_DBG("sock %pK, sk %pK", sock, sk);
err = sock_error(sk);
if (err)
@@ -729,7 +729,7 @@ static void sco_conn_defer_accept(struct hci_conn *conn, u16 setting)
{
struct hci_dev *hdev = conn->hdev;
- BT_DBG("conn %p", conn);
+ BT_DBG("conn %pK", conn);
conn->state = BT_CONFIG;
@@ -799,7 +799,7 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname,
struct bt_voice voice;
u32 opt;
- BT_DBG("sk %p", sk);
+ BT_DBG("sk %pK", sk);
lock_sock(sk);
@@ -864,7 +864,7 @@ static int sco_sock_getsockopt_old(struct socket *sock, int optname,
struct sco_conninfo cinfo;
int len, err = 0;
- BT_DBG("sk %p", sk);
+ BT_DBG("sk %pK", sk);
if (get_user(len, optlen))
return -EFAULT;
@@ -924,7 +924,7 @@ static int sco_sock_getsockopt(struct socket *sock, int level, int optname,
int len, err = 0;
struct bt_voice voice;
- BT_DBG("sk %p", sk);
+ BT_DBG("sk %pK", sk);
if (level == SOL_SCO)
return sco_sock_getsockopt_old(sock, optname, optval, optlen);
@@ -971,7 +971,7 @@ static int sco_sock_shutdown(struct socket *sock, int how)
struct sock *sk = sock->sk;
int err = 0;
- BT_DBG("sock %p, sk %p", sock, sk);
+ BT_DBG("sock %pK, sk %pK", sock, sk);
if (!sk)
return 0;
@@ -1001,7 +1001,7 @@ static int sco_sock_release(struct socket *sock)
struct sock *sk = sock->sk;
int err = 0;
- BT_DBG("sock %p, sk %p", sock, sk);
+ BT_DBG("sock %pK, sk %pK", sock, sk);
if (!sk)
return 0;
@@ -1025,7 +1025,7 @@ static void sco_conn_ready(struct sco_conn *conn)
struct sock *parent;
struct sock *sk = conn->sk;
- BT_DBG("conn %p", conn);
+ BT_DBG("conn %pK", conn);
if (sk) {
sco_sock_clear_timer(sk);
@@ -1112,7 +1112,7 @@ static void sco_connect_cfm(struct hci_conn *hcon, __u8 status)
if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK)
return;
- BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
+ BT_DBG("hcon %pK bdaddr %pMR status %d", hcon, &hcon->dst, status);
if (!status) {
struct sco_conn *conn;
@@ -1129,7 +1129,7 @@ static void sco_disconn_cfm(struct hci_conn *hcon, __u8 reason)
if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK)
return;
- BT_DBG("hcon %p reason %d", hcon, reason);
+ BT_DBG("hcon %pK reason %d", hcon, reason);
sco_conn_del(hcon, bt_to_errno(reason));
}
@@ -1141,7 +1141,7 @@ void sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb)
if (!conn)
goto drop;
- BT_DBG("conn %p len %d", conn, skb->len);
+ BT_DBG("conn %pK len %d", conn, skb->len);
if (skb->len) {
sco_recv_frame(conn, skb);
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 4b175df35184..6f2c704e41ab 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -177,7 +177,7 @@ static int aes_cmac(struct crypto_hash *tfm, const u8 k[16], const u8 *m,
return -EFBIG;
if (!tfm) {
- BT_ERR("tfm %p", tfm);
+ BT_ERR("tfm %pK", tfm);
return -EINVAL;
}
@@ -380,7 +380,7 @@ static int smp_e(struct crypto_blkcipher *tfm, const u8 *k, u8 *r)
SMP_DBG("k %16phN r %16phN", k, r);
if (!tfm) {
- BT_ERR("tfm %p", tfm);
+ BT_ERR("tfm %pK", tfm);
return -EINVAL;
}
@@ -952,7 +952,7 @@ static u8 smp_confirm(struct smp_chan *smp)
struct smp_cmd_pairing_confirm cp;
int ret;
- BT_DBG("conn %p", conn);
+ BT_DBG("conn %pK", conn);
ret = smp_c1(smp->tfm_aes, smp->tk, smp->prnd, smp->preq, smp->prsp,
conn->hcon->init_addr_type, &conn->hcon->init_addr,
@@ -983,7 +983,7 @@ static u8 smp_random(struct smp_chan *smp)
if (IS_ERR_OR_NULL(smp->tfm_aes))
return SMP_UNSPECIFIED;
- BT_DBG("conn %p %s", conn, conn->hcon->out ? "master" : "slave");
+ BT_DBG("conn %pK %s", conn, conn->hcon->out ? "master" : "slave");
ret = smp_c1(smp->tfm_aes, smp->tk, smp->rrnd, smp->preq, smp->prsp,
hcon->init_addr_type, &hcon->init_addr,
@@ -1222,7 +1222,7 @@ static void smp_distribute_keys(struct smp_chan *smp)
struct hci_dev *hdev = hcon->hdev;
__u8 *keydist;
- BT_DBG("conn %p", conn);
+ BT_DBG("conn %pK", conn);
rsp = (void *) &smp->prsp[1];
@@ -1352,7 +1352,7 @@ static void smp_timeout(struct work_struct *work)
security_timer.work);
struct l2cap_conn *conn = smp->conn;
- BT_DBG("conn %p", conn);
+ BT_DBG("conn %pK", conn);
hci_disconnect(conn->hcon, HCI_ERROR_REMOTE_USER_TERM);
}
@@ -1714,7 +1714,7 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
u8 key_size, auth, sec_level;
int ret;
- BT_DBG("conn %p", conn);
+ BT_DBG("conn %pK", conn);
if (skb->len < sizeof(*req))
return SMP_INVALID_PARAMS;
@@ -1899,7 +1899,7 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
u8 key_size, auth;
int ret;
- BT_DBG("conn %p", conn);
+ BT_DBG("conn %pK", conn);
if (skb->len < sizeof(*rsp))
return SMP_INVALID_PARAMS;
@@ -2051,7 +2051,7 @@ static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb)
struct l2cap_chan *chan = conn->smp;
struct smp_chan *smp = chan->data;
- BT_DBG("conn %p %s", conn, conn->hcon->out ? "master" : "slave");
+ BT_DBG("conn %pK %s", conn, conn->hcon->out ? "master" : "slave");
if (skb->len < sizeof(smp->pcnf))
return SMP_INVALID_PARAMS;
@@ -2097,7 +2097,7 @@ static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb)
u32 passkey;
int err;
- BT_DBG("conn %p", conn);
+ BT_DBG("conn %pK", conn);
if (skb->len < sizeof(smp->rrnd))
return SMP_INVALID_PARAMS;
@@ -2232,7 +2232,7 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
struct smp_chan *smp;
u8 sec_level, auth;
- BT_DBG("conn %p", conn);
+ BT_DBG("conn %pK", conn);
if (skb->len < sizeof(*rp))
return SMP_INVALID_PARAMS;
@@ -2289,7 +2289,7 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
__u8 authreq;
int ret;
- BT_DBG("conn %p hcon %p level 0x%2.2x", conn, hcon, sec_level);
+ BT_DBG("conn %pK hcon %pK level 0x%2.2x", conn, hcon, sec_level);
/* This may be NULL if there's an unexpected disconnection */
if (!conn)
@@ -2396,7 +2396,7 @@ static int smp_cmd_encrypt_info(struct l2cap_conn *conn, struct sk_buff *skb)
struct l2cap_chan *chan = conn->smp;
struct smp_chan *smp = chan->data;
- BT_DBG("conn %p", conn);
+ BT_DBG("conn %pK", conn);
if (skb->len < sizeof(*rp))
return SMP_INVALID_PARAMS;
@@ -2420,7 +2420,7 @@ static int smp_cmd_master_ident(struct l2cap_conn *conn, struct sk_buff *skb)
struct smp_ltk *ltk;
u8 authenticated;
- BT_DBG("conn %p", conn);
+ BT_DBG("conn %pK", conn);
if (skb->len < sizeof(*rp))
return SMP_INVALID_PARAMS;
@@ -2529,7 +2529,7 @@ static int smp_cmd_sign_info(struct l2cap_conn *conn, struct sk_buff *skb)
struct smp_chan *smp = chan->data;
struct smp_csrk *csrk;
- BT_DBG("conn %p", conn);
+ BT_DBG("conn %pK", conn);
if (skb->len < sizeof(*rp))
return SMP_INVALID_PARAMS;
@@ -2608,7 +2608,7 @@ static int smp_cmd_public_key(struct l2cap_conn *conn, struct sk_buff *skb)
struct smp_cmd_pairing_confirm cfm;
int err;
- BT_DBG("conn %p", conn);
+ BT_DBG("conn %pK", conn);
if (skb->len < sizeof(*key))
return SMP_INVALID_PARAMS;
@@ -2721,7 +2721,7 @@ static int smp_cmd_dhkey_check(struct l2cap_conn *conn, struct sk_buff *skb)
u8 io_cap[3], r[16], e[16];
int err;
- BT_DBG("conn %p", conn);
+ BT_DBG("conn %pK", conn);
if (skb->len < sizeof(*check))
return SMP_INVALID_PARAMS;
@@ -2903,7 +2903,7 @@ static void smp_teardown_cb(struct l2cap_chan *chan, int err)
{
struct l2cap_conn *conn = chan->conn;
- BT_DBG("chan %p", chan);
+ BT_DBG("chan %pK", chan);
if (chan->data)
smp_chan_destroy(conn);
@@ -2920,7 +2920,7 @@ static void bredr_pairing(struct l2cap_chan *chan)
struct smp_cmd_pairing req;
struct smp_chan *smp;
- BT_DBG("chan %p", chan);
+ BT_DBG("chan %pK", chan);
/* Only new pairings are interesting */
if (!test_bit(HCI_CONN_NEW_LINK_KEY, &hcon->flags))
@@ -2986,7 +2986,7 @@ static void smp_resume_cb(struct l2cap_chan *chan)
struct l2cap_conn *conn = chan->conn;
struct hci_conn *hcon = conn->hcon;
- BT_DBG("chan %p", chan);
+ BT_DBG("chan %pK", chan);
if (hcon->type == ACL_LINK) {
bredr_pairing(chan);
@@ -3009,7 +3009,7 @@ static void smp_ready_cb(struct l2cap_chan *chan)
struct l2cap_conn *conn = chan->conn;
struct hci_conn *hcon = conn->hcon;
- BT_DBG("chan %p", chan);
+ BT_DBG("chan %pK", chan);
/* No need to call l2cap_chan_hold() here since we already own
* the reference taken in smp_new_conn_cb(). This is just the
@@ -3027,7 +3027,7 @@ static int smp_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
{
int err;
- BT_DBG("chan %p", chan);
+ BT_DBG("chan %pK", chan);
err = smp_sig_channel(chan, skb);
if (err) {
@@ -3079,7 +3079,7 @@ static inline struct l2cap_chan *smp_new_conn_cb(struct l2cap_chan *pchan)
{
struct l2cap_chan *chan;
- BT_DBG("pchan %p", pchan);
+ BT_DBG("pchan %pK", pchan);
chan = l2cap_chan_create();
if (!chan)
@@ -3100,7 +3100,7 @@ static inline struct l2cap_chan *smp_new_conn_cb(struct l2cap_chan *pchan)
*/
atomic_set(&chan->nesting, L2CAP_NESTING_SMP);
- BT_DBG("created chan %p", chan);
+ BT_DBG("created chan %pK", chan);
return chan;
}
@@ -3205,7 +3205,7 @@ static void smp_del_chan(struct l2cap_chan *chan)
{
struct smp_dev *smp;
- BT_DBG("chan %p", chan);
+ BT_DBG("chan %pK", chan);
smp = chan->data;
if (smp) {
diff --git a/net/core/Makefile b/net/core/Makefile
index 086b01fbe1bd..87e2e186602b 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -24,3 +24,4 @@ obj-$(CONFIG_NET_PTP_CLASSIFY) += ptp_classifier.o
obj-$(CONFIG_CGROUP_NET_PRIO) += netprio_cgroup.o
obj-$(CONFIG_CGROUP_NET_CLASSID) += netclassid_cgroup.o
obj-$(CONFIG_LWTUNNEL) += lwtunnel.o
+obj-$(CONFIG_SOCKEV_NLMCAST) += sockev_nlmcast.o
diff --git a/net/core/dev.c b/net/core/dev.c
index 0798a0f1b395..1d24d5e54ac0 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -137,6 +137,8 @@
#include <linux/errqueue.h>
#include <linux/hrtimer.h>
#include <linux/netfilter_ingress.h>
+#include <linux/tcp.h>
+#include <net/tcp.h>
#include "net-sysfs.h"
@@ -2773,6 +2775,10 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device
if (netif_needs_gso(skb, features)) {
struct sk_buff *segs;
+ __be16 src_port = tcp_hdr(skb)->source;
+ __be16 dest_port = tcp_hdr(skb)->dest;
+
+ trace_print_skb_gso(skb, src_port, dest_port);
segs = skb_gso_segment(skb, features);
if (IS_ERR(segs)) {
goto out_kfree_skb;
@@ -2812,7 +2818,7 @@ out_null:
struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev)
{
- struct sk_buff *next, *head = NULL, *tail;
+ struct sk_buff *next, *head = NULL, *tail = NULL;
for (; skb != NULL; skb = next) {
next = skb->next;
@@ -4112,6 +4118,7 @@ static int napi_gro_complete(struct sk_buff *skb)
}
out:
+ __this_cpu_add(softnet_data.gro_coalesced, NAPI_GRO_CB(skb)->count > 1);
return netif_receive_skb_internal(skb);
}
@@ -4154,6 +4161,7 @@ static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
unsigned long diffs;
NAPI_GRO_CB(p)->flush = 0;
+ NAPI_GRO_CB(p)->flush_id = 0;
if (hash != skb_get_hash_raw(p)) {
NAPI_GRO_CB(p)->same_flow = 0;
@@ -4540,9 +4548,15 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd)
while (remsd) {
struct softnet_data *next = remsd->rps_ipi_next;
- if (cpu_online(remsd->cpu))
+ if (cpu_online(remsd->cpu)) {
smp_call_function_single_async(remsd->cpu,
&remsd->csd);
+ } else {
+ pr_err("%s() cpu offline\n", __func__);
+ rps_lock(remsd);
+ remsd->backlog.state = 0;
+ rps_unlock(remsd);
+ }
remsd = next;
}
} else
@@ -4585,8 +4599,7 @@ static int process_backlog(struct napi_struct *napi, int quota)
local_irq_disable();
input_queue_head_incr(sd);
if (++work >= quota) {
- local_irq_enable();
- return work;
+ goto state_changed;
}
}
@@ -4603,14 +4616,17 @@ static int process_backlog(struct napi_struct *napi, int quota)
napi->state = 0;
rps_unlock(sd);
- break;
+ goto state_changed;
}
skb_queue_splice_tail_init(&sd->input_pkt_queue,
&sd->process_queue);
rps_unlock(sd);
}
+state_changed:
local_irq_enable();
+ napi_gro_flush(napi, false);
+ sd->current_napi = NULL;
return work;
}
@@ -4646,10 +4662,13 @@ EXPORT_SYMBOL(__napi_schedule_irqoff);
void __napi_complete(struct napi_struct *n)
{
+ struct softnet_data *sd = this_cpu_ptr(&softnet_data);
+
BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
list_del_init(&n->poll_list);
smp_mb__before_atomic();
+ sd->current_napi = NULL;
clear_bit(NAPI_STATE_SCHED, &n->state);
}
EXPORT_SYMBOL(__napi_complete);
@@ -4802,6 +4821,15 @@ void netif_napi_del(struct napi_struct *napi)
}
EXPORT_SYMBOL(netif_napi_del);
+
+struct napi_struct *get_current_napi_context(void)
+{
+ struct softnet_data *sd = this_cpu_ptr(&softnet_data);
+
+ return sd->current_napi;
+}
+EXPORT_SYMBOL(get_current_napi_context);
+
static int napi_poll(struct napi_struct *n, struct list_head *repoll)
{
void *have;
@@ -4821,6 +4849,9 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll)
*/
work = 0;
if (test_bit(NAPI_STATE_SCHED, &n->state)) {
+ struct softnet_data *sd = this_cpu_ptr(&softnet_data);
+
+ sd->current_napi = n;
work = n->poll(n, weight);
trace_napi_poll(n);
}
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index ee9082792530..5ec5502d7360 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -18,6 +18,7 @@
#include <linux/mpls.h>
#include <net/flow_dissector.h>
#include <scsi/fc/fc_fcoe.h>
+#include <linux/net_map.h>
static bool dissector_uses_key(const struct flow_dissector *flow_dissector,
enum flow_dissector_key_id key_id)
@@ -338,6 +339,40 @@ mpls:
goto out_good;
}
+ case __constant_htons(ETH_P_MAP): {
+ struct {
+ struct rmnet_map_header_s map;
+ uint8_t proto;
+ } *map, _map;
+ unsigned int maplen;
+
+ map = skb_header_pointer(skb, nhoff, sizeof(_map), &_map);
+ if (!map)
+ return false;
+
+ /* Is MAP command? */
+ if (map->map.cd_bit)
+ return false;
+
+ /* Is aggregated frame? */
+ maplen = ntohs(map->map.pkt_len);
+ maplen += map->map.pad_len;
+ maplen += sizeof(struct rmnet_map_header_s);
+ if (maplen < skb->len)
+ return false;
+
+ nhoff += sizeof(struct rmnet_map_header_s);
+ switch (map->proto & RMNET_IP_VER_MASK) {
+ case RMNET_IPV4:
+ proto = htons(ETH_P_IP);
+ goto ip;
+ case RMNET_IPV6:
+ proto = htons(ETH_P_IPV6);
+ goto ipv6;
+ default:
+ return false;
+ }
+ }
case htons(ETH_P_FCOE):
key_control->thoff = (u16)(nhoff + FCOE_HEADER_LEN);
/* fall through */
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 769cece9b00b..f1e575d7f21a 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -687,7 +687,7 @@ void neigh_destroy(struct neighbour *neigh)
NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
if (!neigh->dead) {
- pr_warn("Destroying alive neighbour %p\n", neigh);
+ pr_warn("Destroying alive neighbour %pK\n", neigh);
dump_stack();
return;
}
diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
index 2bf83299600a..77969b71a50a 100644
--- a/net/core/net-procfs.c
+++ b/net/core/net-procfs.c
@@ -159,10 +159,11 @@ static int softnet_seq_show(struct seq_file *seq, void *v)
#endif
seq_printf(seq,
- "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
+ "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
sd->processed, sd->dropped, sd->time_squeeze, 0,
0, 0, 0, 0, /* was fastroute */
- sd->cpu_collision, sd->received_rps, flow_limit_count);
+ sd->cpu_collision, sd->received_rps, flow_limit_count,
+ sd->gro_coalesced);
return 0;
}
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 4968b5ddea69..5dd643d524d6 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -208,6 +208,9 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
u8 *data;
bool pfmemalloc;
+ if (IS_ENABLED(CONFIG_FORCE_ALLOC_FROM_DMA_ZONE))
+ gfp_mask |= GFP_DMA;
+
cache = (flags & SKB_ALLOC_FCLONE)
? skbuff_fclone_cache : skbuff_head_cache;
@@ -358,6 +361,9 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
unsigned long flags;
void *data;
+ if (IS_ENABLED(CONFIG_FORCE_ALLOC_FROM_DMA_ZONE))
+ gfp_mask |= GFP_DMA;
+
local_irq_save(flags);
nc = this_cpu_ptr(&netdev_alloc_cache);
data = __alloc_page_frag(nc, fragsz, gfp_mask);
@@ -404,6 +410,7 @@ EXPORT_SYMBOL(napi_alloc_frag);
*
* %NULL is returned if there is no free memory.
*/
+#ifndef CONFIG_DISABLE_NET_SKB_FRAG_CACHE
struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
gfp_t gfp_mask)
{
@@ -415,6 +422,9 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
len += NET_SKB_PAD;
+ if (IS_ENABLED(CONFIG_FORCE_ALLOC_FROM_DMA_ZONE))
+ gfp_mask |= GFP_DMA;
+
if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) ||
(gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
@@ -458,6 +468,22 @@ skb_success:
skb_fail:
return skb;
}
+#else
+struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
+ unsigned int length, gfp_t gfp_mask)
+{
+ struct sk_buff *skb = NULL;
+
+ skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask,
+ SKB_ALLOC_RX, NUMA_NO_NODE);
+ if (likely(skb)) {
+ skb_reserve(skb, NET_SKB_PAD);
+ skb->dev = dev;
+ }
+ return skb;
+}
+#endif
+
EXPORT_SYMBOL(__netdev_alloc_skb);
/**
diff --git a/net/core/sock.c b/net/core/sock.c
index f367df38c264..a84a154cdf0c 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1439,8 +1439,12 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
}
EXPORT_SYMBOL(sk_alloc);
-void sk_destruct(struct sock *sk)
+/* Sockets having SOCK_RCU_FREE will call this function after one RCU
+ * grace period. This is the case for UDP sockets and TCP listeners.
+ */
+static void __sk_destruct(struct rcu_head *head)
{
+ struct sock *sk = container_of(head, struct sock, sk_rcu);
struct sk_filter *filter;
if (sk->sk_destruct)
@@ -1467,6 +1471,14 @@ void sk_destruct(struct sock *sk)
sk_prot_free(sk->sk_prot_creator, sk);
}
+void sk_destruct(struct sock *sk)
+{
+ if (sock_flag(sk, SOCK_RCU_FREE))
+ call_rcu(&sk->sk_rcu, __sk_destruct);
+ else
+ __sk_destruct(&sk->sk_rcu);
+}
+
static void __sk_free(struct sock *sk)
{
if (unlikely(sock_diag_has_destroy_listeners(sk) && sk->sk_net_refcnt))
diff --git a/net/core/sockev_nlmcast.c b/net/core/sockev_nlmcast.c
new file mode 100644
index 000000000000..749ffb81c87c
--- /dev/null
+++ b/net/core/sockev_nlmcast.c
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ * Default SOCKEV client implementation
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/netlink.h>
+#include <linux/sockev.h>
+#include <net/sock.h>
+
+static int registration_status;
+static struct sock *socknlmsgsk;
+
+static void sockev_skmsg_recv(struct sk_buff *skb)
+{
+ pr_debug("%s(): Got unsolicited request\n", __func__);
+}
+
+static struct netlink_kernel_cfg nlcfg = {
+ .input = sockev_skmsg_recv
+};
+
+static void _sockev_event(unsigned long event, __u8 *evstr, int buflen)
+{
+ switch (event) {
+ case SOCKEV_SOCKET:
+ strlcpy(evstr, "SOCKEV_SOCKET", buflen);
+ break;
+ case SOCKEV_BIND:
+ strlcpy(evstr, "SOCKEV_BIND", buflen);
+ break;
+ case SOCKEV_LISTEN:
+ strlcpy(evstr, "SOCKEV_LISTEN", buflen);
+ break;
+ case SOCKEV_ACCEPT:
+ strlcpy(evstr, "SOCKEV_ACCEPT", buflen);
+ break;
+ case SOCKEV_CONNECT:
+ strlcpy(evstr, "SOCKEV_CONNECT", buflen);
+ break;
+ case SOCKEV_SHUTDOWN:
+ strlcpy(evstr, "SOCKEV_SHUTDOWN", buflen);
+ break;
+ default:
+ strlcpy(evstr, "UNKOWN", buflen);
+ }
+}
+
+static int sockev_client_cb(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+
+ struct sk_buff *skb;
+ struct nlmsghdr *nlh;
+ struct sknlsockevmsg *smsg;
+ struct socket *sock;
+
+ sock = (struct socket *)data;
+ if (socknlmsgsk == 0)
+ goto done;
+ if ((socknlmsgsk == NULL) || (sock == NULL) || (sock->sk == NULL))
+ goto done;
+
+ if (sock->sk->sk_family != AF_INET && sock->sk->sk_family != AF_INET6)
+ goto done;
+
+ if (event != SOCKEV_BIND && event != SOCKEV_LISTEN)
+ goto done;
+
+ skb = nlmsg_new(sizeof(struct sknlsockevmsg), GFP_KERNEL);
+ if (skb == NULL)
+ goto done;
+
+ nlh = nlmsg_put(skb, 0, 0, event, sizeof(struct sknlsockevmsg), 0);
+ if (nlh == NULL) {
+ kfree_skb(skb);
+ goto done;
+ }
+
+ NETLINK_CB(skb).dst_group = SKNLGRP_SOCKEV;
+
+ smsg = nlmsg_data(nlh);
+ smsg->pid = current->pid;
+ _sockev_event(event, smsg->event, sizeof(smsg->event));
+ smsg->skfamily = sock->sk->sk_family;
+ smsg->skstate = sock->sk->sk_state;
+ smsg->skprotocol = sock->sk->sk_protocol;
+ smsg->sktype = sock->sk->sk_type;
+ smsg->skflags = sock->sk->sk_flags;
+
+ nlmsg_notify(socknlmsgsk, skb, 0, SKNLGRP_SOCKEV, 0, GFP_KERNEL);
+done:
+ return 0;
+}
+
+static struct notifier_block sockev_notifier_client = {
+ .notifier_call = sockev_client_cb,
+ .next = 0,
+ .priority = 0
+};
+
+/* ***************** Startup/Shutdown *************************************** */
+
+static int __init sockev_client_init(void)
+{
+ int rc;
+ registration_status = 1;
+ rc = sockev_register_notify(&sockev_notifier_client);
+ if (rc != 0) {
+ registration_status = 0;
+ pr_err("%s(): Failed to register cb (%d)\n", __func__, rc);
+ }
+ socknlmsgsk = netlink_kernel_create(&init_net, NETLINK_SOCKEV, &nlcfg);
+ if (!socknlmsgsk) {
+ pr_err("%s(): Failed to initialize netlink socket\n", __func__);
+ if (registration_status)
+ sockev_unregister_notify(&sockev_notifier_client);
+ registration_status = 0;
+ }
+
+ return rc;
+}
+static void __exit sockev_client_exit(void)
+{
+ if (registration_status)
+ sockev_unregister_notify(&sockev_notifier_client);
+}
+module_init(sockev_client_init)
+module_exit(sockev_client_exit)
+MODULE_LICENSE("GPL v2");
+
diff --git a/net/ipc_router/Kconfig b/net/ipc_router/Kconfig
new file mode 100644
index 000000000000..30cd45a70208
--- /dev/null
+++ b/net/ipc_router/Kconfig
@@ -0,0 +1,25 @@
+#
+# IPC_ROUTER Configuration
+#
+
+menuconfig IPC_ROUTER
+ bool "IPC Router support"
+ help
+ IPC Router provides a connectionless message routing service
+ between multiple modules within a System-on-Chip(SoC). The
+ communicating entities can run either in the same processor or
+ in a different processor within the SoC. The IPC Router has been
+ designed to route messages of any types and support a broader
+ network of processors.
+
+ If in doubt, say N.
+
+config IPC_ROUTER_SECURITY
+ depends on IPC_ROUTER
+ bool "IPC Router Security support"
+ help
+ This feature of IPC Router will enforce security rules
+ configured by a security script from the user-space. IPC Router
+ once configured with the security rules will ensure that the
+ sender of the message to a service belongs to the relevant
+ Linux group as configured by the security script.
diff --git a/net/ipc_router/Makefile b/net/ipc_router/Makefile
new file mode 100644
index 000000000000..501688e42e3d
--- /dev/null
+++ b/net/ipc_router/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the Linux IPC_ROUTER
+#
+
+obj-$(CONFIG_IPC_ROUTER) := ipc_router_core.o
+obj-$(CONFIG_IPC_ROUTER) += ipc_router_socket.o
+obj-$(CONFIG_IPC_ROUTER_SECURITY) += ipc_router_security.o
diff --git a/net/ipc_router/ipc_router_core.c b/net/ipc_router/ipc_router_core.c
new file mode 100644
index 000000000000..d23799a5b260
--- /dev/null
+++ b/net/ipc_router/ipc_router_core.c
@@ -0,0 +1,4362 @@
+/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/poll.h>
+#include <linux/pm.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/rwsem.h>
+#include <linux/ipc_logging.h>
+#include <linux/uaccess.h>
+#include <linux/ipc_router.h>
+#include <linux/ipc_router_xprt.h>
+#include <linux/kref.h>
+#include <soc/qcom/subsystem_notif.h>
+#include <soc/qcom/subsystem_restart.h>
+
+#include <asm/byteorder.h>
+
+#include <soc/qcom/smem_log.h>
+
+#include "ipc_router_private.h"
+#include "ipc_router_security.h"
+
+enum {
+ SMEM_LOG = 1U << 0,
+ RTR_DBG = 1U << 1,
+};
+
+static int msm_ipc_router_debug_mask;
+module_param_named(debug_mask, msm_ipc_router_debug_mask,
+ int, S_IRUGO | S_IWUSR | S_IWGRP);
+#define MODULE_NAME "ipc_router"
+
+#define IPC_RTR_INFO_PAGES 6
+
+#define IPC_RTR_INFO(log_ctx, x...) do { \
+if (log_ctx) \
+ ipc_log_string(log_ctx, x); \
+if (msm_ipc_router_debug_mask & RTR_DBG) \
+ pr_info("[IPCRTR] "x); \
+} while (0)
+
+#define IPC_ROUTER_LOG_EVENT_TX 0x01
+#define IPC_ROUTER_LOG_EVENT_RX 0x02
+#define IPC_ROUTER_LOG_EVENT_TX_ERR 0x03
+#define IPC_ROUTER_LOG_EVENT_RX_ERR 0x04
+#define IPC_ROUTER_DUMMY_DEST_NODE 0xFFFFFFFF
+
+#define ipc_port_sk(port) ((struct sock *)(port))
+
+static LIST_HEAD(control_ports);
+static DECLARE_RWSEM(control_ports_lock_lha5);
+
+#define LP_HASH_SIZE 32
+static struct list_head local_ports[LP_HASH_SIZE];
+static DECLARE_RWSEM(local_ports_lock_lhc2);
+
+/* Server info is organized as a hash table. The server's service ID is
+ * used to index into the hash table. The instance ID of most of the servers
+ * are 1 or 2. The service IDs are well distributed compared to the instance
+ * IDs and hence choosing service ID to index into this hash table optimizes
+ * the hash table operations like add, lookup, destroy.
+ */
+#define SRV_HASH_SIZE 32
+static struct list_head server_list[SRV_HASH_SIZE];
+static DECLARE_RWSEM(server_list_lock_lha2);
+
+struct msm_ipc_server {
+ struct list_head list;
+ struct kref ref;
+ struct msm_ipc_port_name name;
+ char pdev_name[32];
+ int next_pdev_id;
+ int synced_sec_rule;
+ struct list_head server_port_list;
+};
+
+struct msm_ipc_server_port {
+ struct list_head list;
+ struct platform_device *pdev;
+ struct msm_ipc_port_addr server_addr;
+ struct msm_ipc_router_xprt_info *xprt_info;
+};
+
+struct msm_ipc_resume_tx_port {
+ struct list_head list;
+ uint32_t port_id;
+ uint32_t node_id;
+};
+
+struct ipc_router_conn_info {
+ struct list_head list;
+ uint32_t port_id;
+};
+
+enum {
+ RESET = 0,
+ VALID = 1,
+};
+
+#define RP_HASH_SIZE 32
+struct msm_ipc_router_remote_port {
+ struct list_head list;
+ struct kref ref;
+ struct mutex rport_lock_lhb2;
+ uint32_t node_id;
+ uint32_t port_id;
+ int status;
+ uint32_t tx_quota_cnt;
+ struct list_head resume_tx_port_list;
+ struct list_head conn_info_list;
+ void *sec_rule;
+ struct msm_ipc_server *server;
+};
+
+struct msm_ipc_router_xprt_info {
+ struct list_head list;
+ struct msm_ipc_router_xprt *xprt;
+ uint32_t remote_node_id;
+ uint32_t initialized;
+ struct list_head pkt_list;
+ struct wakeup_source ws;
+ struct mutex rx_lock_lhb2;
+ struct mutex tx_lock_lhb2;
+ uint32_t need_len;
+ uint32_t abort_data_read;
+ struct work_struct read_data;
+ struct workqueue_struct *workqueue;
+ void *log_ctx;
+ struct kref ref;
+ struct completion ref_complete;
+};
+
+#define RT_HASH_SIZE 4
+struct msm_ipc_routing_table_entry {
+ struct list_head list;
+ struct kref ref;
+ uint32_t node_id;
+ uint32_t neighbor_node_id;
+ struct list_head remote_port_list[RP_HASH_SIZE];
+ struct msm_ipc_router_xprt_info *xprt_info;
+ struct rw_semaphore lock_lha4;
+ unsigned long num_tx_bytes;
+ unsigned long num_rx_bytes;
+};
+
+#define LOG_CTX_NAME_LEN 32
+struct ipc_rtr_log_ctx {
+ struct list_head list;
+ char log_ctx_name[LOG_CTX_NAME_LEN];
+ void *log_ctx;
+};
+
+static struct list_head routing_table[RT_HASH_SIZE];
+static DECLARE_RWSEM(routing_table_lock_lha3);
+static int routing_table_inited;
+
+static void do_read_data(struct work_struct *work);
+
+static LIST_HEAD(xprt_info_list);
+static DECLARE_RWSEM(xprt_info_list_lock_lha5);
+
+static DEFINE_MUTEX(log_ctx_list_lock_lha0);
+static LIST_HEAD(log_ctx_list);
+static DEFINE_MUTEX(ipc_router_init_lock);
+static bool is_ipc_router_inited;
+static int ipc_router_core_init(void);
+#define IPC_ROUTER_INIT_TIMEOUT (10 * HZ)
+
+static uint32_t next_port_id;
+static DEFINE_MUTEX(next_port_id_lock_lhc1);
+static struct workqueue_struct *msm_ipc_router_workqueue;
+
+static void *local_log_ctx;
+static void *ipc_router_get_log_ctx(char *sub_name);
+static int process_resume_tx_msg(union rr_control_msg *msg,
+ struct rr_packet *pkt);
+static void ipc_router_reset_conn(struct msm_ipc_router_remote_port *rport_ptr);
+static int ipc_router_get_xprt_info_ref(
+ struct msm_ipc_router_xprt_info *xprt_info);
+static void ipc_router_put_xprt_info_ref(
+ struct msm_ipc_router_xprt_info *xprt_info);
+static void ipc_router_release_xprt_info_ref(struct kref *ref);
+
+struct pil_vote_info {
+ void *pil_handle;
+ struct work_struct load_work;
+ struct work_struct unload_work;
+};
+
+#define PIL_SUBSYSTEM_NAME_LEN 32
+static char default_peripheral[PIL_SUBSYSTEM_NAME_LEN];
+
+enum {
+ DOWN,
+ UP,
+};
+
+static void init_routing_table(void)
+{
+ int i;
+ for (i = 0; i < RT_HASH_SIZE; i++)
+ INIT_LIST_HEAD(&routing_table[i]);
+}
+
+/**
+ * ipc_router_calc_checksum() - compute the checksum for extended HELLO message
+ * @msg: Reference to the IPC Router HELLO message.
+ *
+ * Return: Computed checksum value, 0 if msg is NULL.
+ */
+static uint32_t ipc_router_calc_checksum(union rr_control_msg *msg)
+{
+ uint32_t checksum = 0;
+ int i, len;
+ uint16_t upper_nb;
+ uint16_t lower_nb;
+ void *hello;
+
+ if (!msg)
+ return checksum;
+ hello = msg;
+ len = sizeof(*msg);
+
+ for (i = 0; i < len/IPCR_WORD_SIZE; i++) {
+ lower_nb = (*((uint32_t *)hello)) & IPC_ROUTER_CHECKSUM_MASK;
+ upper_nb = ((*((uint32_t *)hello)) >> 16) &
+ IPC_ROUTER_CHECKSUM_MASK;
+ checksum = checksum + upper_nb + lower_nb;
+ hello = ((uint32_t *)hello) + 1;
+ }
+ while (checksum > 0xFFFF)
+ checksum = (checksum & IPC_ROUTER_CHECKSUM_MASK) +
+ ((checksum >> 16) & IPC_ROUTER_CHECKSUM_MASK);
+
+ checksum = ~checksum & IPC_ROUTER_CHECKSUM_MASK;
+ return checksum;
+}
+
+/**
+ * skb_copy_to_log_buf() - copies the required number bytes from the skb_queue
+ * @skb_head: skb_queue head that contains the data.
+ * @pl_len: length of payload need to be copied.
+ * @hdr_offset: length of the header present in first skb
+ * @log_buf: The output buffer which will contain the formatted log string
+ *
+ * This function copies the first specified number of bytes from the skb_queue
+ * to a new buffer and formats them to a string for logging.
+ */
+static void skb_copy_to_log_buf(struct sk_buff_head *skb_head,
+ unsigned int pl_len, unsigned int hdr_offset,
+ uint64_t *log_buf)
+{
+ struct sk_buff *temp_skb;
+ unsigned int copied_len = 0, copy_len = 0;
+ int remaining;
+
+ if (!skb_head) {
+ IPC_RTR_ERR("%s: NULL skb_head\n", __func__);
+ return;
+ }
+ temp_skb = skb_peek(skb_head);
+ if (unlikely(!temp_skb || !temp_skb->data)) {
+ IPC_RTR_ERR("%s: No SKBs in skb_queue\n", __func__);
+ return;
+ }
+
+ remaining = temp_skb->len - hdr_offset;
+ skb_queue_walk(skb_head, temp_skb) {
+ copy_len = remaining < pl_len ? remaining : pl_len;
+ memcpy(log_buf + copied_len,
+ temp_skb->data + hdr_offset, copy_len);
+ copied_len += copy_len;
+ hdr_offset = 0;
+ if (copied_len == pl_len)
+ break;
+ remaining = pl_len - remaining;
+ }
+ return;
+}
+
+/**
+ * ipc_router_log_msg() - log all data messages exchanged
+ * @log_ctx: IPC Logging context specfic to each transport
+ * @xchng_type: Identifies the data to be a receive or send.
+ * @data: IPC Router data packet or control msg recieved or to be send.
+ * @hdr: Reference to the router header
+ * @port_ptr: Local IPC Router port.
+ * @rport_ptr: Remote IPC Router port
+ *
+ * This function builds the log message that would be passed on to the IPC
+ * logging framework. The data messages that would be passed corresponds to
+ * the information that is exchanged between the IPC Router and it's clients.
+ */
+static void ipc_router_log_msg(void *log_ctx, uint32_t xchng_type,
+ void *data, struct rr_header_v1 *hdr,
+ struct msm_ipc_port *port_ptr,
+ struct msm_ipc_router_remote_port *rport_ptr)
+{
+ struct sk_buff_head *skb_head = NULL;
+ union rr_control_msg *msg = NULL;
+ struct rr_packet *pkt = NULL;
+ uint64_t pl_buf = 0;
+ struct sk_buff *skb;
+ uint32_t buf_len = 8;
+ uint32_t svcId = 0;
+ uint32_t svcIns = 0;
+ unsigned int hdr_offset = 0;
+ uint32_t port_type = 0;
+
+ if (!log_ctx || !hdr || !data)
+ return;
+
+ if (hdr->type == IPC_ROUTER_CTRL_CMD_DATA) {
+ pkt = (struct rr_packet *)data;
+ skb_head = pkt->pkt_fragment_q;
+ skb = skb_peek(skb_head);
+ if (!skb || !skb->data) {
+ IPC_RTR_ERR("%s: No SKBs in skb_queue\n", __func__);
+ return;
+ }
+
+ if (skb_queue_len(skb_head) == 1 && skb->len < 8)
+ buf_len = skb->len;
+ if (xchng_type == IPC_ROUTER_LOG_EVENT_TX && hdr->dst_node_id
+ != IPC_ROUTER_NID_LOCAL) {
+ if (hdr->version == IPC_ROUTER_V1)
+ hdr_offset = sizeof(struct rr_header_v1);
+ else if (hdr->version == IPC_ROUTER_V2)
+ hdr_offset = sizeof(struct rr_header_v2);
+ }
+ skb_copy_to_log_buf(skb_head, buf_len, hdr_offset, &pl_buf);
+
+ if (port_ptr && rport_ptr && (port_ptr->type == CLIENT_PORT)
+ && (rport_ptr->server != NULL)) {
+ svcId = rport_ptr->server->name.service;
+ svcIns = rport_ptr->server->name.instance;
+ port_type = CLIENT_PORT;
+ } else if (port_ptr && (port_ptr->type == SERVER_PORT)) {
+ svcId = port_ptr->port_name.service;
+ svcIns = port_ptr->port_name.instance;
+ port_type = SERVER_PORT;
+ }
+ IPC_RTR_INFO(log_ctx,
+ "%s %s %s Len:0x%x T:0x%x CF:0x%x SVC:<0x%x:0x%x> SRC:<0x%x:0x%x> DST:<0x%x:0x%x> DATA: %08x %08x",
+ (xchng_type == IPC_ROUTER_LOG_EVENT_RX ? "" :
+ (xchng_type == IPC_ROUTER_LOG_EVENT_TX ?
+ current->comm : "")),
+ (port_type == CLIENT_PORT ? "CLI" : "SRV"),
+ (xchng_type == IPC_ROUTER_LOG_EVENT_RX ? "RX" :
+ (xchng_type == IPC_ROUTER_LOG_EVENT_TX ? "TX" :
+ (xchng_type == IPC_ROUTER_LOG_EVENT_TX_ERR ? "TX_ERR" :
+ (xchng_type == IPC_ROUTER_LOG_EVENT_RX_ERR ? "RX_ERR" :
+ "UNKNOWN")))),
+ hdr->size, hdr->type, hdr->control_flag,
+ svcId, svcIns, hdr->src_node_id, hdr->src_port_id,
+ hdr->dst_node_id, hdr->dst_port_id,
+ (unsigned int)pl_buf, (unsigned int)(pl_buf>>32));
+
+ } else {
+ msg = (union rr_control_msg *)data;
+ if (msg->cmd == IPC_ROUTER_CTRL_CMD_NEW_SERVER ||
+ msg->cmd == IPC_ROUTER_CTRL_CMD_REMOVE_SERVER)
+ IPC_RTR_INFO(log_ctx,
+ "CTL MSG: %s cmd:0x%x SVC:<0x%x:0x%x> ADDR:<0x%x:0x%x>",
+ (xchng_type == IPC_ROUTER_LOG_EVENT_RX ? "RX" :
+ (xchng_type == IPC_ROUTER_LOG_EVENT_TX ? "TX" :
+ (xchng_type == IPC_ROUTER_LOG_EVENT_TX_ERR ? "TX_ERR" :
+ (xchng_type == IPC_ROUTER_LOG_EVENT_RX_ERR ? "RX_ERR" :
+ "UNKNOWN")))),
+ msg->cmd, msg->srv.service, msg->srv.instance,
+ msg->srv.node_id, msg->srv.port_id);
+ else if (msg->cmd == IPC_ROUTER_CTRL_CMD_REMOVE_CLIENT ||
+ msg->cmd == IPC_ROUTER_CTRL_CMD_RESUME_TX)
+ IPC_RTR_INFO(log_ctx,
+ "CTL MSG: %s cmd:0x%x ADDR: <0x%x:0x%x>",
+ (xchng_type == IPC_ROUTER_LOG_EVENT_RX ? "RX" :
+ (xchng_type == IPC_ROUTER_LOG_EVENT_TX ? "TX" : "ERR")),
+ msg->cmd, msg->cli.node_id, msg->cli.port_id);
+ else if (msg->cmd == IPC_ROUTER_CTRL_CMD_HELLO && hdr)
+ IPC_RTR_INFO(log_ctx, "CTL MSG %s cmd:0x%x ADDR:0x%x",
+ (xchng_type == IPC_ROUTER_LOG_EVENT_RX ? "RX" :
+ (xchng_type == IPC_ROUTER_LOG_EVENT_TX ? "TX" : "ERR")),
+ msg->cmd, hdr->src_node_id);
+ else
+ IPC_RTR_INFO(log_ctx, "%s UNKNOWN cmd:0x%x",
+ (xchng_type == IPC_ROUTER_LOG_EVENT_RX ? "RX" :
+ (xchng_type == IPC_ROUTER_LOG_EVENT_TX ? "TX" : "ERR")),
+ msg->cmd);
+ }
+}
+
+/* Must be called with routing_table_lock_lha3 locked. */
+static struct msm_ipc_routing_table_entry *lookup_routing_table(
+ uint32_t node_id)
+{
+ uint32_t key = (node_id % RT_HASH_SIZE);
+ struct msm_ipc_routing_table_entry *rt_entry;
+
+ list_for_each_entry(rt_entry, &routing_table[key], list) {
+ if (rt_entry->node_id == node_id)
+ return rt_entry;
+ }
+ return NULL;
+}
+
+/**
+ * create_routing_table_entry() - Lookup and create a routing table entry
+ * @node_id: Node ID of the routing table entry to be created.
+ * @xprt_info: XPRT through which the node ID is reachable.
+ *
+ * @return: a reference to the routing table entry on success, NULL on failure.
+ */
+static struct msm_ipc_routing_table_entry *create_routing_table_entry(
+ uint32_t node_id, struct msm_ipc_router_xprt_info *xprt_info)
+{
+ int i;
+ struct msm_ipc_routing_table_entry *rt_entry;
+ uint32_t key;
+
+ down_write(&routing_table_lock_lha3);
+ rt_entry = lookup_routing_table(node_id);
+ if (rt_entry)
+ goto out_create_rtentry1;
+
+ rt_entry = kmalloc(sizeof(struct msm_ipc_routing_table_entry),
+ GFP_KERNEL);
+ if (!rt_entry) {
+ IPC_RTR_ERR("%s: rt_entry allocation failed for %d\n",
+ __func__, node_id);
+ goto out_create_rtentry2;
+ }
+
+ for (i = 0; i < RP_HASH_SIZE; i++)
+ INIT_LIST_HEAD(&rt_entry->remote_port_list[i]);
+ init_rwsem(&rt_entry->lock_lha4);
+ kref_init(&rt_entry->ref);
+ rt_entry->node_id = node_id;
+ rt_entry->xprt_info = xprt_info;
+ if (xprt_info)
+ rt_entry->neighbor_node_id = xprt_info->remote_node_id;
+
+ key = (node_id % RT_HASH_SIZE);
+ list_add_tail(&rt_entry->list, &routing_table[key]);
+out_create_rtentry1:
+ kref_get(&rt_entry->ref);
+out_create_rtentry2:
+ up_write(&routing_table_lock_lha3);
+ return rt_entry;
+}
+
+/**
+ * ipc_router_get_rtentry_ref() - Get a reference to the routing table entry
+ * @node_id: Node ID of the routing table entry.
+ *
+ * @return: a reference to the routing table entry on success, NULL on failure.
+ *
+ * This function is used to obtain a reference to the rounting table entry
+ * corresponding to a node id.
+ */
+static struct msm_ipc_routing_table_entry *ipc_router_get_rtentry_ref(
+ uint32_t node_id)
+{
+ struct msm_ipc_routing_table_entry *rt_entry;
+
+ down_read(&routing_table_lock_lha3);
+ rt_entry = lookup_routing_table(node_id);
+ if (rt_entry)
+ kref_get(&rt_entry->ref);
+ up_read(&routing_table_lock_lha3);
+ return rt_entry;
+}
+
+/**
+ * ipc_router_release_rtentry() - Cleanup and release the routing table entry
+ * @ref: Reference to the entry.
+ *
+ * This function is called when all references to the routing table entry are
+ * released.
+ */
+void ipc_router_release_rtentry(struct kref *ref)
+{
+ struct msm_ipc_routing_table_entry *rt_entry =
+ container_of(ref, struct msm_ipc_routing_table_entry, ref);
+
+ /*
+ * All references to a routing entry will be put only under SSR.
+ * As part of SSR, all the internals of the routing table entry
+ * are cleaned. So just free the routing table entry.
+ */
+ kfree(rt_entry);
+}
+
+struct rr_packet *rr_read(struct msm_ipc_router_xprt_info *xprt_info)
+{
+ struct rr_packet *temp_pkt;
+
+ if (!xprt_info)
+ return NULL;
+
+ mutex_lock(&xprt_info->rx_lock_lhb2);
+ if (xprt_info->abort_data_read) {
+ mutex_unlock(&xprt_info->rx_lock_lhb2);
+ IPC_RTR_ERR("%s detected SSR & exiting now\n",
+ xprt_info->xprt->name);
+ return NULL;
+ }
+
+ if (list_empty(&xprt_info->pkt_list)) {
+ mutex_unlock(&xprt_info->rx_lock_lhb2);
+ return NULL;
+ }
+
+ temp_pkt = list_first_entry(&xprt_info->pkt_list,
+ struct rr_packet, list);
+ list_del(&temp_pkt->list);
+ if (list_empty(&xprt_info->pkt_list))
+ __pm_relax(&xprt_info->ws);
+ mutex_unlock(&xprt_info->rx_lock_lhb2);
+ return temp_pkt;
+}
+
+struct rr_packet *clone_pkt(struct rr_packet *pkt)
+{
+ struct rr_packet *cloned_pkt;
+ struct sk_buff *temp_skb, *cloned_skb;
+ struct sk_buff_head *pkt_fragment_q;
+
+ cloned_pkt = kzalloc(sizeof(struct rr_packet), GFP_KERNEL);
+ if (!cloned_pkt) {
+ IPC_RTR_ERR("%s: failure\n", __func__);
+ return NULL;
+ }
+ memcpy(&(cloned_pkt->hdr), &(pkt->hdr), sizeof(struct rr_header_v1));
+ if (pkt->opt_hdr.len > 0) {
+ cloned_pkt->opt_hdr.data = kmalloc(pkt->opt_hdr.len,
+ GFP_KERNEL);
+ if (!cloned_pkt->opt_hdr.data) {
+ IPC_RTR_ERR("%s: Memory allocation Failed\n", __func__);
+ } else {
+ cloned_pkt->opt_hdr.len = pkt->opt_hdr.len;
+ memcpy(cloned_pkt->opt_hdr.data, pkt->opt_hdr.data,
+ pkt->opt_hdr.len);
+ }
+ }
+
+ pkt_fragment_q = kmalloc(sizeof(struct sk_buff_head), GFP_KERNEL);
+ if (!pkt_fragment_q) {
+ IPC_RTR_ERR("%s: pkt_frag_q alloc failure\n", __func__);
+ kfree(cloned_pkt);
+ return NULL;
+ }
+ skb_queue_head_init(pkt_fragment_q);
+ kref_init(&cloned_pkt->ref);
+
+ skb_queue_walk(pkt->pkt_fragment_q, temp_skb) {
+ cloned_skb = skb_clone(temp_skb, GFP_KERNEL);
+ if (!cloned_skb)
+ goto fail_clone;
+ skb_queue_tail(pkt_fragment_q, cloned_skb);
+ }
+ cloned_pkt->pkt_fragment_q = pkt_fragment_q;
+ cloned_pkt->length = pkt->length;
+ return cloned_pkt;
+
+fail_clone:
+ while (!skb_queue_empty(pkt_fragment_q)) {
+ temp_skb = skb_dequeue(pkt_fragment_q);
+ kfree_skb(temp_skb);
+ }
+ kfree(pkt_fragment_q);
+ if (cloned_pkt->opt_hdr.len > 0)
+ kfree(cloned_pkt->opt_hdr.data);
+ kfree(cloned_pkt);
+ return NULL;
+}
+
+/**
+ * create_pkt() - Create a Router packet
+ * @data: SKB queue to be contained inside the packet.
+ *
+ * @return: pointer to packet on success, NULL on failure.
+ */
+struct rr_packet *create_pkt(struct sk_buff_head *data)
+{
+ struct rr_packet *pkt;
+ struct sk_buff *temp_skb;
+
+ pkt = kzalloc(sizeof(struct rr_packet), GFP_KERNEL);
+ if (!pkt) {
+ IPC_RTR_ERR("%s: failure\n", __func__);
+ return NULL;
+ }
+
+ if (data) {
+ pkt->pkt_fragment_q = data;
+ skb_queue_walk(pkt->pkt_fragment_q, temp_skb)
+ pkt->length += temp_skb->len;
+ } else {
+ pkt->pkt_fragment_q = kmalloc(sizeof(struct sk_buff_head),
+ GFP_KERNEL);
+ if (!pkt->pkt_fragment_q) {
+ IPC_RTR_ERR("%s: Couldn't alloc pkt_fragment_q\n",
+ __func__);
+ kfree(pkt);
+ return NULL;
+ }
+ skb_queue_head_init(pkt->pkt_fragment_q);
+ }
+ kref_init(&pkt->ref);
+ return pkt;
+}
+
+void release_pkt(struct rr_packet *pkt)
+{
+ struct sk_buff *temp_skb;
+
+ if (!pkt)
+ return;
+
+ if (!pkt->pkt_fragment_q) {
+ kfree(pkt);
+ return;
+ }
+
+ while (!skb_queue_empty(pkt->pkt_fragment_q)) {
+ temp_skb = skb_dequeue(pkt->pkt_fragment_q);
+ kfree_skb(temp_skb);
+ }
+ kfree(pkt->pkt_fragment_q);
+ if (pkt->opt_hdr.len > 0)
+ kfree(pkt->opt_hdr.data);
+ kfree(pkt);
+ return;
+}
+
+static struct sk_buff_head *msm_ipc_router_buf_to_skb(void *buf,
+ unsigned int buf_len)
+{
+ struct sk_buff_head *skb_head;
+ struct sk_buff *skb;
+ int first = 1, offset = 0;
+ int skb_size, data_size;
+ void *data;
+ int last = 1;
+ int align_size;
+
+ skb_head = kmalloc(sizeof(struct sk_buff_head), GFP_KERNEL);
+ if (!skb_head) {
+ IPC_RTR_ERR("%s: Couldnot allocate skb_head\n", __func__);
+ return NULL;
+ }
+ skb_queue_head_init(skb_head);
+
+ data_size = buf_len;
+ align_size = ALIGN_SIZE(data_size);
+ while (offset != buf_len) {
+ skb_size = data_size;
+ if (first)
+ skb_size += IPC_ROUTER_HDR_SIZE;
+ if (last)
+ skb_size += align_size;
+
+ skb = alloc_skb(skb_size, GFP_KERNEL);
+ if (!skb) {
+ if (skb_size <= (PAGE_SIZE/2)) {
+ IPC_RTR_ERR("%s: cannot allocate skb\n",
+ __func__);
+ goto buf_to_skb_error;
+ }
+ data_size = data_size / 2;
+ last = 0;
+ continue;
+ }
+
+ if (first) {
+ skb_reserve(skb, IPC_ROUTER_HDR_SIZE);
+ first = 0;
+ }
+
+ data = skb_put(skb, data_size);
+ memcpy(skb->data, buf + offset, data_size);
+ skb_queue_tail(skb_head, skb);
+ offset += data_size;
+ data_size = buf_len - offset;
+ last = 1;
+ }
+ return skb_head;
+
+buf_to_skb_error:
+ while (!skb_queue_empty(skb_head)) {
+ skb = skb_dequeue(skb_head);
+ kfree_skb(skb);
+ }
+ kfree(skb_head);
+ return NULL;
+}
+
+static void *msm_ipc_router_skb_to_buf(struct sk_buff_head *skb_head,
+ unsigned int len)
+{
+ struct sk_buff *temp;
+ unsigned int offset = 0, buf_len = 0, copy_len;
+ void *buf;
+
+ if (!skb_head) {
+ IPC_RTR_ERR("%s: NULL skb_head\n", __func__);
+ return NULL;
+ }
+
+ temp = skb_peek(skb_head);
+ buf_len = len;
+ buf = kmalloc(buf_len, GFP_KERNEL);
+ if (!buf) {
+ IPC_RTR_ERR("%s: cannot allocate buf\n", __func__);
+ return NULL;
+ }
+ skb_queue_walk(skb_head, temp) {
+ copy_len = buf_len < temp->len ? buf_len : temp->len;
+ memcpy(buf + offset, temp->data, copy_len);
+ offset += copy_len;
+ buf_len -= copy_len;
+ }
+ return buf;
+}
+
+void msm_ipc_router_free_skb(struct sk_buff_head *skb_head)
+{
+ struct sk_buff *temp_skb;
+
+ if (!skb_head)
+ return;
+
+ while (!skb_queue_empty(skb_head)) {
+ temp_skb = skb_dequeue(skb_head);
+ kfree_skb(temp_skb);
+ }
+ kfree(skb_head);
+}
+
+/**
+ * extract_optional_header() - Extract the optional header from skb
+ * @pkt: Packet structure into which the header has to be extracted.
+ * @opt_len: The optional header length in word size.
+ *
+ * @return: Length of optional header in bytes if success, zero otherwise.
+ */
+static int extract_optional_header(struct rr_packet *pkt, uint8_t opt_len)
+{
+ size_t offset = 0, buf_len = 0, copy_len, opt_hdr_len;
+ struct sk_buff *temp;
+ struct sk_buff_head *skb_head;
+
+ opt_hdr_len = opt_len * IPCR_WORD_SIZE;
+ pkt->opt_hdr.data = kmalloc(opt_hdr_len, GFP_KERNEL);
+ if (!pkt->opt_hdr.data) {
+ IPC_RTR_ERR("%s: Memory allocation Failed\n", __func__);
+ return 0;
+ }
+ skb_head = pkt->pkt_fragment_q;
+ buf_len = opt_hdr_len;
+ skb_queue_walk(skb_head, temp) {
+ copy_len = buf_len < temp->len ? buf_len : temp->len;
+ memcpy(pkt->opt_hdr.data + offset, temp->data, copy_len);
+ offset += copy_len;
+ buf_len -= copy_len;
+ skb_pull(temp, copy_len);
+ if (temp->len == 0) {
+ skb_dequeue(skb_head);
+ kfree_skb(temp);
+ }
+ }
+ pkt->opt_hdr.len = opt_hdr_len;
+ return opt_hdr_len;
+}
+
+/**
+ * extract_header_v1() - Extract IPC Router header of version 1
+ * @pkt: Packet structure into which the header has to be extraced.
+ * @skb: SKB from which the header has to be extracted.
+ *
+ * @return: 0 on success, standard Linux error codes on failure.
+ */
+static int extract_header_v1(struct rr_packet *pkt, struct sk_buff *skb)
+{
+ if (!pkt || !skb) {
+ IPC_RTR_ERR("%s: Invalid pkt or skb\n", __func__);
+ return -EINVAL;
+ }
+
+ memcpy(&pkt->hdr, skb->data, sizeof(struct rr_header_v1));
+ skb_pull(skb, sizeof(struct rr_header_v1));
+ pkt->length -= sizeof(struct rr_header_v1);
+ return 0;
+}
+
+/**
+ * extract_header_v2() - Extract IPC Router header of version 2
+ * @pkt: Packet structure into which the header has to be extraced.
+ * @skb: SKB from which the header has to be extracted.
+ *
+ * @return: 0 on success, standard Linux error codes on failure.
+ */
+static int extract_header_v2(struct rr_packet *pkt, struct sk_buff *skb)
+{
+ struct rr_header_v2 *hdr;
+ uint8_t opt_len;
+ size_t opt_hdr_len;
+ size_t total_hdr_size = sizeof(*hdr);
+
+ if (!pkt || !skb) {
+ IPC_RTR_ERR("%s: Invalid pkt or skb\n", __func__);
+ return -EINVAL;
+ }
+
+ hdr = (struct rr_header_v2 *)skb->data;
+ pkt->hdr.version = (uint32_t)hdr->version;
+ pkt->hdr.type = (uint32_t)hdr->type;
+ pkt->hdr.src_node_id = (uint32_t)hdr->src_node_id;
+ pkt->hdr.src_port_id = (uint32_t)hdr->src_port_id;
+ pkt->hdr.size = (uint32_t)hdr->size;
+ pkt->hdr.control_flag = (uint32_t)hdr->control_flag;
+ pkt->hdr.dst_node_id = (uint32_t)hdr->dst_node_id;
+ pkt->hdr.dst_port_id = (uint32_t)hdr->dst_port_id;
+ opt_len = hdr->opt_len;
+ skb_pull(skb, total_hdr_size);
+ if (opt_len > 0) {
+ opt_hdr_len = extract_optional_header(pkt, opt_len);
+ total_hdr_size += opt_hdr_len;
+ }
+ pkt->length -= total_hdr_size;
+ return 0;
+}
+
+/**
+ * extract_header() - Extract IPC Router header
+ * @pkt: Packet from which the header has to be extraced.
+ *
+ * @return: 0 on success, standard Linux error codes on failure.
+ *
+ * This function will check if the header version is v1 or v2 and invoke
+ * the corresponding helper function to extract the IPC Router header.
+ */
+static int extract_header(struct rr_packet *pkt)
+{
+ struct sk_buff *temp_skb;
+ int ret;
+
+ if (!pkt) {
+ IPC_RTR_ERR("%s: NULL PKT\n", __func__);
+ return -EINVAL;
+ }
+
+ temp_skb = skb_peek(pkt->pkt_fragment_q);
+ if (!temp_skb || !temp_skb->data) {
+ IPC_RTR_ERR("%s: No SKBs in skb_queue\n", __func__);
+ return -EINVAL;
+ }
+
+ if (temp_skb->data[0] == IPC_ROUTER_V1) {
+ ret = extract_header_v1(pkt, temp_skb);
+ } else if (temp_skb->data[0] == IPC_ROUTER_V2) {
+ ret = extract_header_v2(pkt, temp_skb);
+ } else {
+ IPC_RTR_ERR("%s: Invalid Header version %02x\n",
+ __func__, temp_skb->data[0]);
+ print_hex_dump(KERN_ERR, "Header: ", DUMP_PREFIX_ADDRESS,
+ 16, 1, temp_skb->data, pkt->length, true);
+ return -EINVAL;
+ }
+ return ret;
+}
+
+/**
+ * calc_tx_header_size() - Calculate header size to be reserved in SKB
+ * @pkt: Packet in which the space for header has to be reserved.
+ * @dst_xprt_info: XPRT through which the destination is reachable.
+ *
+ * @return: required header size on success,
+ * starndard Linux error codes on failure.
+ *
+ * This function is used to calculate the header size that has to be reserved
+ * in a transmit SKB. The header size is calculated based on the XPRT through
+ * which the destination node is reachable.
+ */
+static int calc_tx_header_size(struct rr_packet *pkt,
+ struct msm_ipc_router_xprt_info *dst_xprt_info)
+{
+ int hdr_size = 0;
+ int xprt_version = 0;
+ struct msm_ipc_router_xprt_info *xprt_info = dst_xprt_info;
+
+ if (!pkt) {
+ IPC_RTR_ERR("%s: NULL PKT\n", __func__);
+ return -EINVAL;
+ }
+
+ if (xprt_info)
+ xprt_version = xprt_info->xprt->get_version(xprt_info->xprt);
+
+ if (xprt_version == IPC_ROUTER_V1) {
+ pkt->hdr.version = IPC_ROUTER_V1;
+ hdr_size = sizeof(struct rr_header_v1);
+ } else if (xprt_version == IPC_ROUTER_V2) {
+ pkt->hdr.version = IPC_ROUTER_V2;
+ hdr_size = sizeof(struct rr_header_v2) + pkt->opt_hdr.len;
+ } else {
+ IPC_RTR_ERR("%s: Invalid xprt_version %d\n",
+ __func__, xprt_version);
+ hdr_size = -EINVAL;
+ }
+
+ return hdr_size;
+}
+
+/**
+ * calc_rx_header_size() - Calculate the RX header size
+ * @xprt_info: XPRT info of the received message.
+ *
+ * @return: valid header size on success, INT_MAX on failure.
+ */
+static int calc_rx_header_size(struct msm_ipc_router_xprt_info *xprt_info)
+{
+ int xprt_version = 0;
+ int hdr_size = INT_MAX;
+
+ if (xprt_info)
+ xprt_version = xprt_info->xprt->get_version(xprt_info->xprt);
+
+ if (xprt_version == IPC_ROUTER_V1)
+ hdr_size = sizeof(struct rr_header_v1);
+ else if (xprt_version == IPC_ROUTER_V2)
+ hdr_size = sizeof(struct rr_header_v2);
+ return hdr_size;
+}
+
+/**
+ * prepend_header_v1() - Prepend IPC Router header of version 1
+ * @pkt: Packet structure which contains the header info to be prepended.
+ * @hdr_size: Size of the header
+ *
+ * @return: 0 on success, standard Linux error codes on failure.
+ */
+static int prepend_header_v1(struct rr_packet *pkt, int hdr_size)
+{
+ struct sk_buff *temp_skb;
+ struct rr_header_v1 *hdr;
+
+ if (!pkt || hdr_size <= 0) {
+ IPC_RTR_ERR("%s: Invalid input parameters\n", __func__);
+ return -EINVAL;
+ }
+
+ temp_skb = skb_peek(pkt->pkt_fragment_q);
+ if (!temp_skb || !temp_skb->data) {
+ IPC_RTR_ERR("%s: No SKBs in skb_queue\n", __func__);
+ return -EINVAL;
+ }
+
+ if (skb_headroom(temp_skb) < hdr_size) {
+ temp_skb = alloc_skb(hdr_size, GFP_KERNEL);
+ if (!temp_skb) {
+ IPC_RTR_ERR("%s: Could not allocate SKB of size %d\n",
+ __func__, hdr_size);
+ return -ENOMEM;
+ }
+ skb_reserve(temp_skb, hdr_size);
+ }
+
+ hdr = (struct rr_header_v1 *)skb_push(temp_skb, hdr_size);
+ memcpy(hdr, &pkt->hdr, hdr_size);
+ if (temp_skb != skb_peek(pkt->pkt_fragment_q))
+ skb_queue_head(pkt->pkt_fragment_q, temp_skb);
+ pkt->length += hdr_size;
+ return 0;
+}
+
+/**
+ * prepend_header_v2() - Prepend IPC Router header of version 2
+ * @pkt: Packet structure which contains the header info to be prepended.
+ * @hdr_size: Size of the header
+ *
+ * @return: 0 on success, standard Linux error codes on failure.
+ */
+static int prepend_header_v2(struct rr_packet *pkt, int hdr_size)
+{
+ struct sk_buff *temp_skb;
+ struct rr_header_v2 *hdr;
+
+ if (!pkt || hdr_size <= 0) {
+ IPC_RTR_ERR("%s: Invalid input parameters\n", __func__);
+ return -EINVAL;
+ }
+
+ temp_skb = skb_peek(pkt->pkt_fragment_q);
+ if (!temp_skb || !temp_skb->data) {
+ IPC_RTR_ERR("%s: No SKBs in skb_queue\n", __func__);
+ return -EINVAL;
+ }
+
+ if (skb_headroom(temp_skb) < hdr_size) {
+ temp_skb = alloc_skb(hdr_size, GFP_KERNEL);
+ if (!temp_skb) {
+ IPC_RTR_ERR("%s: Could not allocate SKB of size %d\n",
+ __func__, hdr_size);
+ return -ENOMEM;
+ }
+ skb_reserve(temp_skb, hdr_size);
+ }
+
+ hdr = (struct rr_header_v2 *)skb_push(temp_skb, hdr_size);
+ hdr->version = (uint8_t)pkt->hdr.version;
+ hdr->type = (uint8_t)pkt->hdr.type;
+ hdr->control_flag = (uint8_t)pkt->hdr.control_flag;
+ hdr->size = (uint32_t)pkt->hdr.size;
+ hdr->src_node_id = (uint16_t)pkt->hdr.src_node_id;
+ hdr->src_port_id = (uint16_t)pkt->hdr.src_port_id;
+ hdr->dst_node_id = (uint16_t)pkt->hdr.dst_node_id;
+ hdr->dst_port_id = (uint16_t)pkt->hdr.dst_port_id;
+ if (pkt->opt_hdr.len > 0) {
+ hdr->opt_len = pkt->opt_hdr.len/IPCR_WORD_SIZE;
+ memcpy(hdr + sizeof(*hdr), pkt->opt_hdr.data, pkt->opt_hdr.len);
+ } else {
+ hdr->opt_len = 0;
+ }
+ if (temp_skb != skb_peek(pkt->pkt_fragment_q))
+ skb_queue_head(pkt->pkt_fragment_q, temp_skb);
+ pkt->length += hdr_size;
+ return 0;
+}
+
+/**
+ * prepend_header() - Prepend IPC Router header
+ * @pkt: Packet structure which contains the header info to be prepended.
+ * @xprt_info: XPRT through which the packet is transmitted.
+ *
+ * @return: 0 on success, standard Linux error codes on failure.
+ *
+ * This function prepends the header to the packet to be transmitted. The
+ * IPC Router header version to be prepended depends on the XPRT through
+ * which the destination is reachable.
+ */
+static int prepend_header(struct rr_packet *pkt,
+ struct msm_ipc_router_xprt_info *xprt_info)
+{
+ int hdr_size;
+ struct sk_buff *temp_skb;
+
+ if (!pkt) {
+ IPC_RTR_ERR("%s: NULL PKT\n", __func__);
+ return -EINVAL;
+ }
+
+ temp_skb = skb_peek(pkt->pkt_fragment_q);
+ if (!temp_skb || !temp_skb->data) {
+ IPC_RTR_ERR("%s: No SKBs in skb_queue\n", __func__);
+ return -EINVAL;
+ }
+
+ hdr_size = calc_tx_header_size(pkt, xprt_info);
+ if (hdr_size <= 0)
+ return hdr_size;
+
+ if (pkt->hdr.version == IPC_ROUTER_V1)
+ return prepend_header_v1(pkt, hdr_size);
+ else if (pkt->hdr.version == IPC_ROUTER_V2)
+ return prepend_header_v2(pkt, hdr_size);
+ else
+ return -EINVAL;
+}
+
+/**
+ * defragment_pkt() - Defragment and linearize the packet
+ * @pkt: Packet to be linearized.
+ *
+ * @return: 0 on success, standard Linux error codes on failure.
+ *
+ * Some packets contain fragments of data over multiple SKBs. If an XPRT
+ * does not supported fragmented writes, linearize multiple SKBs into one
+ * single SKB.
+ */
+static int defragment_pkt(struct rr_packet *pkt)
+{
+ struct sk_buff *dst_skb, *src_skb, *temp_skb;
+ int offset = 0, buf_len = 0, copy_len;
+ void *buf;
+ int align_size;
+
+ if (!pkt || pkt->length <= 0) {
+ IPC_RTR_ERR("%s: Invalid PKT\n", __func__);
+ return -EINVAL;
+ }
+
+ if (skb_queue_len(pkt->pkt_fragment_q) == 1)
+ return 0;
+
+ align_size = ALIGN_SIZE(pkt->length);
+ dst_skb = alloc_skb(pkt->length + align_size, GFP_KERNEL);
+ if (!dst_skb) {
+ IPC_RTR_ERR("%s: could not allocate one skb of size %d\n",
+ __func__, pkt->length);
+ return -ENOMEM;
+ }
+ buf = skb_put(dst_skb, pkt->length);
+ buf_len = pkt->length;
+
+ skb_queue_walk(pkt->pkt_fragment_q, src_skb) {
+ copy_len = buf_len < src_skb->len ? buf_len : src_skb->len;
+ memcpy(buf + offset, src_skb->data, copy_len);
+ offset += copy_len;
+ buf_len -= copy_len;
+ }
+
+ while (!skb_queue_empty(pkt->pkt_fragment_q)) {
+ temp_skb = skb_dequeue(pkt->pkt_fragment_q);
+ kfree_skb(temp_skb);
+ }
+ skb_queue_tail(pkt->pkt_fragment_q, dst_skb);
+ return 0;
+}
+
+static int post_pkt_to_port(struct msm_ipc_port *port_ptr,
+ struct rr_packet *pkt, int clone)
+{
+ struct rr_packet *temp_pkt = pkt;
+ void (*notify)(unsigned event, void *oob_data,
+ size_t oob_data_len, void *priv);
+ void (*data_ready)(struct sock *sk) = NULL;
+ struct sock *sk;
+ uint32_t pkt_type;
+
+ if (unlikely(!port_ptr || !pkt))
+ return -EINVAL;
+
+ if (clone) {
+ temp_pkt = clone_pkt(pkt);
+ if (!temp_pkt) {
+ IPC_RTR_ERR(
+ "%s: Error cloning packet for port %08x:%08x\n",
+ __func__, port_ptr->this_port.node_id,
+ port_ptr->this_port.port_id);
+ return -ENOMEM;
+ }
+ }
+
+ mutex_lock(&port_ptr->port_rx_q_lock_lhc3);
+ __pm_stay_awake(port_ptr->port_rx_ws);
+ list_add_tail(&temp_pkt->list, &port_ptr->port_rx_q);
+ wake_up(&port_ptr->port_rx_wait_q);
+ notify = port_ptr->notify;
+ pkt_type = temp_pkt->hdr.type;
+ sk = (struct sock *)port_ptr->endpoint;
+ if (sk) {
+ read_lock(&sk->sk_callback_lock);
+ data_ready = sk->sk_data_ready;
+ read_unlock(&sk->sk_callback_lock);
+ }
+ mutex_unlock(&port_ptr->port_rx_q_lock_lhc3);
+ if (notify)
+ notify(pkt_type, NULL, 0, port_ptr->priv);
+ else if (sk && data_ready)
+ data_ready(sk);
+
+ return 0;
+}
+
+/**
+ * ipc_router_peek_pkt_size() - Peek into the packet header to get potential packet size
+ * @data: Starting address of the packet which points to router header.
+ *
+ * @returns: potential packet size on success, < 0 on error.
+ *
+ * This function is used by the underlying transport abstraction layer to
+ * peek into the potential packet size of an incoming packet. This information
+ * is used to perform link layer fragmentation and re-assembly
+ */
+int ipc_router_peek_pkt_size(char *data)
+{
+ int size;
+
+ if (!data) {
+ pr_err("%s: NULL PKT\n", __func__);
+ return -EINVAL;
+ }
+
+ if (data[0] == IPC_ROUTER_V1)
+ size = ((struct rr_header_v1 *)data)->size +
+ sizeof(struct rr_header_v1);
+ else if (data[0] == IPC_ROUTER_V2)
+ size = ((struct rr_header_v2 *)data)->size +
+ ((struct rr_header_v2 *)data)->opt_len * IPCR_WORD_SIZE
+ + sizeof(struct rr_header_v2);
+ else
+ return -EINVAL;
+
+ size += ALIGN_SIZE(size);
+ return size;
+}
+
+static int post_control_ports(struct rr_packet *pkt)
+{
+ struct msm_ipc_port *port_ptr;
+
+ if (!pkt)
+ return -EINVAL;
+
+ down_read(&control_ports_lock_lha5);
+ list_for_each_entry(port_ptr, &control_ports, list)
+ post_pkt_to_port(port_ptr, pkt, 1);
+ up_read(&control_ports_lock_lha5);
+ return 0;
+}
+
+static uint32_t allocate_port_id(void)
+{
+ uint32_t port_id = 0, prev_port_id, key;
+ struct msm_ipc_port *port_ptr;
+
+ mutex_lock(&next_port_id_lock_lhc1);
+ prev_port_id = next_port_id;
+ down_read(&local_ports_lock_lhc2);
+ do {
+ next_port_id++;
+ if ((next_port_id & IPC_ROUTER_ADDRESS) == IPC_ROUTER_ADDRESS)
+ next_port_id = 1;
+
+ key = (next_port_id & (LP_HASH_SIZE - 1));
+ if (list_empty(&local_ports[key])) {
+ port_id = next_port_id;
+ break;
+ }
+ list_for_each_entry(port_ptr, &local_ports[key], list) {
+ if (port_ptr->this_port.port_id == next_port_id) {
+ port_id = next_port_id;
+ break;
+ }
+ }
+ if (!port_id) {
+ port_id = next_port_id;
+ break;
+ }
+ port_id = 0;
+ } while (next_port_id != prev_port_id);
+ up_read(&local_ports_lock_lhc2);
+ mutex_unlock(&next_port_id_lock_lhc1);
+
+ return port_id;
+}
+
+void msm_ipc_router_add_local_port(struct msm_ipc_port *port_ptr)
+{
+ uint32_t key;
+
+ if (!port_ptr)
+ return;
+
+ key = (port_ptr->this_port.port_id & (LP_HASH_SIZE - 1));
+ down_write(&local_ports_lock_lhc2);
+ list_add_tail(&port_ptr->list, &local_ports[key]);
+ up_write(&local_ports_lock_lhc2);
+}
+
+/**
+ * msm_ipc_router_create_raw_port() - Create an IPC Router port
+ * @endpoint: User-space space socket information to be cached.
+ * @notify: Function to notify incoming events on the port.
+ * @event: Event ID to be handled.
+ * @oob_data: Any out-of-band data associated with the event.
+ * @oob_data_len: Size of the out-of-band data, if valid.
+ * @priv: Private data registered during the port creation.
+ * @priv: Private Data to be passed during the event notification.
+ *
+ * @return: Valid pointer to port on success, NULL on failure.
+ *
+ * This function is used to create an IPC Router port. The port is used for
+ * communication locally or outside the subsystem.
+ */
+struct msm_ipc_port *msm_ipc_router_create_raw_port(void *endpoint,
+ void (*notify)(unsigned event, void *oob_data,
+ size_t oob_data_len, void *priv),
+ void *priv)
+{
+ struct msm_ipc_port *port_ptr;
+
+ port_ptr = kzalloc(sizeof(struct msm_ipc_port), GFP_KERNEL);
+ if (!port_ptr)
+ return NULL;
+
+ port_ptr->this_port.node_id = IPC_ROUTER_NID_LOCAL;
+ port_ptr->this_port.port_id = allocate_port_id();
+ if (!port_ptr->this_port.port_id) {
+ IPC_RTR_ERR("%s: All port ids are in use\n", __func__);
+ kfree(port_ptr);
+ return NULL;
+ }
+
+ mutex_init(&port_ptr->port_lock_lhc3);
+ INIT_LIST_HEAD(&port_ptr->port_rx_q);
+ mutex_init(&port_ptr->port_rx_q_lock_lhc3);
+ init_waitqueue_head(&port_ptr->port_rx_wait_q);
+ snprintf(port_ptr->rx_ws_name, MAX_WS_NAME_SZ,
+ "ipc%08x_%s",
+ port_ptr->this_port.port_id,
+ current->comm);
+ port_ptr->port_rx_ws = wakeup_source_register(port_ptr->rx_ws_name);
+ if (!port_ptr->port_rx_ws) {
+ kfree(port_ptr);
+ return NULL;
+ }
+ init_waitqueue_head(&port_ptr->port_tx_wait_q);
+ kref_init(&port_ptr->ref);
+
+ port_ptr->endpoint = endpoint;
+ port_ptr->notify = notify;
+ port_ptr->priv = priv;
+
+ msm_ipc_router_add_local_port(port_ptr);
+ if (endpoint)
+ sock_hold(ipc_port_sk(endpoint));
+ return port_ptr;
+}
+
+/**
+ * ipc_router_get_port_ref() - Get a reference to the local port
+ * @port_id: Port ID of the local port for which reference is get.
+ *
+ * @return: If port is found, a reference to the port is returned.
+ * Else NULL is returned.
+ */
+static struct msm_ipc_port *ipc_router_get_port_ref(uint32_t port_id)
+{
+ int key = (port_id & (LP_HASH_SIZE - 1));
+ struct msm_ipc_port *port_ptr;
+
+ down_read(&local_ports_lock_lhc2);
+ list_for_each_entry(port_ptr, &local_ports[key], list) {
+ if (port_ptr->this_port.port_id == port_id) {
+ kref_get(&port_ptr->ref);
+ up_read(&local_ports_lock_lhc2);
+ return port_ptr;
+ }
+ }
+ up_read(&local_ports_lock_lhc2);
+ return NULL;
+}
+
+/**
+ * ipc_router_release_port() - Cleanup and release the port
+ * @ref: Reference to the port.
+ *
+ * This function is called when all references to the port are released.
+ */
+void ipc_router_release_port(struct kref *ref)
+{
+ struct rr_packet *pkt, *temp_pkt;
+ struct msm_ipc_port *port_ptr =
+ container_of(ref, struct msm_ipc_port, ref);
+
+ mutex_lock(&port_ptr->port_rx_q_lock_lhc3);
+ list_for_each_entry_safe(pkt, temp_pkt, &port_ptr->port_rx_q, list) {
+ list_del(&pkt->list);
+ release_pkt(pkt);
+ }
+ mutex_unlock(&port_ptr->port_rx_q_lock_lhc3);
+ wakeup_source_unregister(port_ptr->port_rx_ws);
+ if (port_ptr->endpoint)
+ sock_put(ipc_port_sk(port_ptr->endpoint));
+ kfree(port_ptr);
+}
+
+/**
+ * ipc_router_get_rport_ref()- Get reference to the remote port
+ * @node_id: Node ID corresponding to the remote port.
+ * @port_id: Port ID corresponding to the remote port.
+ *
+ * @return: a reference to the remote port on success, NULL on failure.
+ */
+static struct msm_ipc_router_remote_port *ipc_router_get_rport_ref(
+ uint32_t node_id, uint32_t port_id)
+{
+ struct msm_ipc_router_remote_port *rport_ptr;
+ struct msm_ipc_routing_table_entry *rt_entry;
+ int key = (port_id & (RP_HASH_SIZE - 1));
+
+ rt_entry = ipc_router_get_rtentry_ref(node_id);
+ if (!rt_entry) {
+ IPC_RTR_ERR("%s: Node is not up\n", __func__);
+ return NULL;
+ }
+
+ down_read(&rt_entry->lock_lha4);
+ list_for_each_entry(rport_ptr,
+ &rt_entry->remote_port_list[key], list) {
+ if (rport_ptr->port_id == port_id) {
+ kref_get(&rport_ptr->ref);
+ goto out_lookup_rmt_port1;
+ }
+ }
+ rport_ptr = NULL;
+out_lookup_rmt_port1:
+ up_read(&rt_entry->lock_lha4);
+ kref_put(&rt_entry->ref, ipc_router_release_rtentry);
+ return rport_ptr;
+}
+
+/**
+ * ipc_router_create_rport() - Create a remote port
+ * @node_id: Node ID corresponding to the remote port.
+ * @port_id: Port ID corresponding to the remote port.
+ * @xprt_info: XPRT through which the concerned node is reachable.
+ *
+ * @return: a reference to the remote port on success, NULL on failure.
+ */
+static struct msm_ipc_router_remote_port *ipc_router_create_rport(
+ uint32_t node_id, uint32_t port_id,
+ struct msm_ipc_router_xprt_info *xprt_info)
+{
+ struct msm_ipc_router_remote_port *rport_ptr;
+ struct msm_ipc_routing_table_entry *rt_entry;
+ int key = (port_id & (RP_HASH_SIZE - 1));
+
+ rt_entry = create_routing_table_entry(node_id, xprt_info);
+ if (!rt_entry) {
+ IPC_RTR_ERR("%s: Node cannot be created\n", __func__);
+ return NULL;
+ }
+
+ down_write(&rt_entry->lock_lha4);
+ list_for_each_entry(rport_ptr,
+ &rt_entry->remote_port_list[key], list) {
+ if (rport_ptr->port_id == port_id)
+ goto out_create_rmt_port1;
+ }
+
+ rport_ptr = kmalloc(sizeof(struct msm_ipc_router_remote_port),
+ GFP_KERNEL);
+ if (!rport_ptr) {
+ IPC_RTR_ERR("%s: Remote port alloc failed\n", __func__);
+ goto out_create_rmt_port2;
+ }
+ rport_ptr->port_id = port_id;
+ rport_ptr->node_id = node_id;
+ rport_ptr->status = VALID;
+ rport_ptr->sec_rule = NULL;
+ rport_ptr->server = NULL;
+ rport_ptr->tx_quota_cnt = 0;
+ kref_init(&rport_ptr->ref);
+ mutex_init(&rport_ptr->rport_lock_lhb2);
+ INIT_LIST_HEAD(&rport_ptr->resume_tx_port_list);
+ INIT_LIST_HEAD(&rport_ptr->conn_info_list);
+ list_add_tail(&rport_ptr->list,
+ &rt_entry->remote_port_list[key]);
+out_create_rmt_port1:
+ kref_get(&rport_ptr->ref);
+out_create_rmt_port2:
+ up_write(&rt_entry->lock_lha4);
+ kref_put(&rt_entry->ref, ipc_router_release_rtentry);
+ return rport_ptr;
+}
+
+/**
+ * msm_ipc_router_free_resume_tx_port() - Free the resume_tx ports
+ * @rport_ptr: Pointer to the remote port.
+ *
+ * This function deletes all the resume_tx ports associated with a remote port
+ * and frees the memory allocated to each resume_tx port.
+ *
+ * Must be called with rport_ptr->rport_lock_lhb2 locked.
+ */
+static void msm_ipc_router_free_resume_tx_port(
+ struct msm_ipc_router_remote_port *rport_ptr)
+{
+ struct msm_ipc_resume_tx_port *rtx_port, *tmp_rtx_port;
+
+ list_for_each_entry_safe(rtx_port, tmp_rtx_port,
+ &rport_ptr->resume_tx_port_list, list) {
+ list_del(&rtx_port->list);
+ kfree(rtx_port);
+ }
+}
+
+/**
+ * msm_ipc_router_lookup_resume_tx_port() - Lookup resume_tx port list
+ * @rport_ptr: Remote port whose resume_tx port list needs to be looked.
+ * @port_id: Port ID which needs to be looked from the list.
+ *
+ * return 1 if the port_id is found in the list, else 0.
+ *
+ * This function is used to lookup the existence of a local port in
+ * remote port's resume_tx list. This function is used to ensure that
+ * the same port is not added to the remote_port's resume_tx list repeatedly.
+ *
+ * Must be called with rport_ptr->rport_lock_lhb2 locked.
+ */
+static int msm_ipc_router_lookup_resume_tx_port(
+ struct msm_ipc_router_remote_port *rport_ptr, uint32_t port_id)
+{
+ struct msm_ipc_resume_tx_port *rtx_port;
+
+ list_for_each_entry(rtx_port, &rport_ptr->resume_tx_port_list, list) {
+ if (port_id == rtx_port->port_id)
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * ipc_router_dummy_write_space() - Dummy write space available callback
+ * @sk: Socket pointer for which the callback is called.
+ */
+void ipc_router_dummy_write_space(struct sock *sk)
+{
+}
+
+/**
+ * post_resume_tx() - Post the resume_tx event
+ * @rport_ptr: Pointer to the remote port
+ * @pkt : The data packet that is received on a resume_tx event
+ * @msg: Out of band data to be passed to kernel drivers
+ *
+ * This function informs about the reception of the resume_tx message from a
+ * remote port pointed by rport_ptr to all the local ports that are in the
+ * resume_tx_ports_list of this remote port. On posting the information, this
+ * function sequentially deletes each entry in the resume_tx_port_list of the
+ * remote port.
+ *
+ * Must be called with rport_ptr->rport_lock_lhb2 locked.
+ */
+static void post_resume_tx(struct msm_ipc_router_remote_port *rport_ptr,
+ struct rr_packet *pkt, union rr_control_msg *msg)
+{
+ struct msm_ipc_resume_tx_port *rtx_port, *tmp_rtx_port;
+ struct msm_ipc_port *local_port;
+ struct sock *sk;
+ void (*write_space)(struct sock *sk) = NULL;
+
+ list_for_each_entry_safe(rtx_port, tmp_rtx_port,
+ &rport_ptr->resume_tx_port_list, list) {
+ local_port = ipc_router_get_port_ref(rtx_port->port_id);
+ if (local_port && local_port->notify) {
+ wake_up(&local_port->port_tx_wait_q);
+ local_port->notify(IPC_ROUTER_CTRL_CMD_RESUME_TX, msg,
+ sizeof(*msg), local_port->priv);
+ } else if (local_port) {
+ wake_up(&local_port->port_tx_wait_q);
+ sk = ipc_port_sk(local_port->endpoint);
+ if (sk) {
+ read_lock(&sk->sk_callback_lock);
+ write_space = sk->sk_write_space;
+ read_unlock(&sk->sk_callback_lock);
+ }
+ if (write_space &&
+ write_space != ipc_router_dummy_write_space)
+ write_space(sk);
+ else
+ post_pkt_to_port(local_port, pkt, 1);
+ } else {
+ IPC_RTR_ERR("%s: Local Port %d not Found",
+ __func__, rtx_port->port_id);
+ }
+ if (local_port)
+ kref_put(&local_port->ref, ipc_router_release_port);
+ list_del(&rtx_port->list);
+ kfree(rtx_port);
+ }
+}
+
+/**
+ * signal_rport_exit() - Signal the local ports of remote port exit
+ * @rport_ptr: Remote port that is exiting.
+ *
+ * This function is used to signal the local ports that are waiting
+ * to resume transmission to a remote port that is exiting.
+ */
+static void signal_rport_exit(struct msm_ipc_router_remote_port *rport_ptr)
+{
+ struct msm_ipc_resume_tx_port *rtx_port, *tmp_rtx_port;
+ struct msm_ipc_port *local_port;
+
+ mutex_lock(&rport_ptr->rport_lock_lhb2);
+ rport_ptr->status = RESET;
+ list_for_each_entry_safe(rtx_port, tmp_rtx_port,
+ &rport_ptr->resume_tx_port_list, list) {
+ local_port = ipc_router_get_port_ref(rtx_port->port_id);
+ if (local_port) {
+ wake_up(&local_port->port_tx_wait_q);
+ kref_put(&local_port->ref, ipc_router_release_port);
+ }
+ list_del(&rtx_port->list);
+ kfree(rtx_port);
+ }
+ mutex_unlock(&rport_ptr->rport_lock_lhb2);
+}
+
+/**
+ * ipc_router_release_rport() - Cleanup and release the remote port
+ * @ref: Reference to the remote port.
+ *
+ * This function is called when all references to the remote port are released.
+ */
+static void ipc_router_release_rport(struct kref *ref)
+{
+ struct msm_ipc_router_remote_port *rport_ptr =
+ container_of(ref, struct msm_ipc_router_remote_port, ref);
+
+ mutex_lock(&rport_ptr->rport_lock_lhb2);
+ msm_ipc_router_free_resume_tx_port(rport_ptr);
+ mutex_unlock(&rport_ptr->rport_lock_lhb2);
+ kfree(rport_ptr);
+}
+
+/**
+ * ipc_router_destroy_rport() - Destroy the remote port
+ * @rport_ptr: Pointer to the remote port to be destroyed.
+ */
+static void ipc_router_destroy_rport(
+ struct msm_ipc_router_remote_port *rport_ptr)
+{
+ uint32_t node_id;
+ struct msm_ipc_routing_table_entry *rt_entry;
+
+ if (!rport_ptr)
+ return;
+
+ node_id = rport_ptr->node_id;
+ rt_entry = ipc_router_get_rtentry_ref(node_id);
+ if (!rt_entry) {
+ IPC_RTR_ERR("%s: Node %d is not up\n", __func__, node_id);
+ return;
+ }
+ down_write(&rt_entry->lock_lha4);
+ list_del(&rport_ptr->list);
+ up_write(&rt_entry->lock_lha4);
+ signal_rport_exit(rport_ptr);
+ kref_put(&rport_ptr->ref, ipc_router_release_rport);
+ kref_put(&rt_entry->ref, ipc_router_release_rtentry);
+ return;
+}
+
+/**
+ * msm_ipc_router_lookup_server() - Lookup server information
+ * @service: Service ID of the server info to be looked up.
+ * @instance: Instance ID of the server info to be looked up.
+ * @node_id: Node/Processor ID in which the server is hosted.
+ * @port_id: Port ID within the node in which the server is hosted.
+ *
+ * @return: If found Pointer to server structure, else NULL.
+ *
+ * Note1: Lock the server_list_lock_lha2 before accessing this function.
+ * Note2: If the <node_id:port_id> are <0:0>, then the lookup is restricted
+ * to <service:instance>. Used only when a client wants to send a
+ * message to any QMI server.
+ */
+static struct msm_ipc_server *msm_ipc_router_lookup_server(
+ uint32_t service,
+ uint32_t instance,
+ uint32_t node_id,
+ uint32_t port_id)
+{
+ struct msm_ipc_server *server;
+ struct msm_ipc_server_port *server_port;
+ int key = (service & (SRV_HASH_SIZE - 1));
+
+ list_for_each_entry(server, &server_list[key], list) {
+ if ((server->name.service != service) ||
+ (server->name.instance != instance))
+ continue;
+ if ((node_id == 0) && (port_id == 0))
+ return server;
+ list_for_each_entry(server_port, &server->server_port_list,
+ list) {
+ if ((server_port->server_addr.node_id == node_id) &&
+ (server_port->server_addr.port_id == port_id))
+ return server;
+ }
+ }
+ return NULL;
+}
+
+/**
+ * ipc_router_get_server_ref() - Get reference to the server
+ * @svc: Service ID for which the reference is required.
+ * @ins: Instance ID for which the reference is required.
+ * @node_id: Node/Processor ID in which the server is hosted.
+ * @port_id: Port ID within the node in which the server is hosted.
+ *
+ * @return: If found return reference to server, else NULL.
+ */
+static struct msm_ipc_server *ipc_router_get_server_ref(
+ uint32_t svc, uint32_t ins, uint32_t node_id, uint32_t port_id)
+{
+ struct msm_ipc_server *server;
+
+ down_read(&server_list_lock_lha2);
+ server = msm_ipc_router_lookup_server(svc, ins, node_id, port_id);
+ if (server)
+ kref_get(&server->ref);
+ up_read(&server_list_lock_lha2);
+ return server;
+}
+
+/**
+ * ipc_router_release_server() - Cleanup and release the server
+ * @ref: Reference to the server.
+ *
+ * This function is called when all references to the server are released.
+ */
+static void ipc_router_release_server(struct kref *ref)
+{
+ struct msm_ipc_server *server =
+ container_of(ref, struct msm_ipc_server, ref);
+
+ kfree(server);
+}
+
+/**
+ * msm_ipc_router_create_server() - Add server info to hash table
+ * @service: Service ID of the server info to be created.
+ * @instance: Instance ID of the server info to be created.
+ * @node_id: Node/Processor ID in which the server is hosted.
+ * @port_id: Port ID within the node in which the server is hosted.
+ * @xprt_info: XPRT through which the node hosting the server is reached.
+ *
+ * @return: Pointer to server structure on success, else NULL.
+ *
+ * This function adds the server info to the hash table. If the same
+ * server(i.e. <service_id:instance_id>) is hosted in different nodes,
+ * they are maintained as list of "server_port" under "server" structure.
+ */
+static struct msm_ipc_server *msm_ipc_router_create_server(
+ uint32_t service,
+ uint32_t instance,
+ uint32_t node_id,
+ uint32_t port_id,
+ struct msm_ipc_router_xprt_info *xprt_info)
+{
+ struct msm_ipc_server *server = NULL;
+ struct msm_ipc_server_port *server_port;
+ struct platform_device *pdev;
+ int key = (service & (SRV_HASH_SIZE - 1));
+
+ down_write(&server_list_lock_lha2);
+ server = msm_ipc_router_lookup_server(service, instance, 0, 0);
+ if (server) {
+ list_for_each_entry(server_port, &server->server_port_list,
+ list) {
+ if ((server_port->server_addr.node_id == node_id) &&
+ (server_port->server_addr.port_id == port_id))
+ goto return_server;
+ }
+ goto create_srv_port;
+ }
+
+ server = kzalloc(sizeof(struct msm_ipc_server), GFP_KERNEL);
+ if (!server) {
+ up_write(&server_list_lock_lha2);
+ IPC_RTR_ERR("%s: Server allocation failed\n", __func__);
+ return NULL;
+ }
+ server->name.service = service;
+ server->name.instance = instance;
+ server->synced_sec_rule = 0;
+ INIT_LIST_HEAD(&server->server_port_list);
+ kref_init(&server->ref);
+ list_add_tail(&server->list, &server_list[key]);
+ scnprintf(server->pdev_name, sizeof(server->pdev_name),
+ "SVC%08x:%08x", service, instance);
+ server->next_pdev_id = 1;
+
+create_srv_port:
+ server_port = kzalloc(sizeof(struct msm_ipc_server_port), GFP_KERNEL);
+ pdev = platform_device_alloc(server->pdev_name, server->next_pdev_id);
+ if (!server_port || !pdev) {
+ kfree(server_port);
+ if (pdev)
+ platform_device_put(pdev);
+ if (list_empty(&server->server_port_list)) {
+ list_del(&server->list);
+ kfree(server);
+ }
+ up_write(&server_list_lock_lha2);
+ IPC_RTR_ERR("%s: Server Port allocation failed\n", __func__);
+ return NULL;
+ }
+ server_port->pdev = pdev;
+ server_port->server_addr.node_id = node_id;
+ server_port->server_addr.port_id = port_id;
+ server_port->xprt_info = xprt_info;
+ list_add_tail(&server_port->list, &server->server_port_list);
+ server->next_pdev_id++;
+ platform_device_add(server_port->pdev);
+
+return_server:
+ /* Add a reference so that the caller can put it back */
+ kref_get(&server->ref);
+ up_write(&server_list_lock_lha2);
+ return server;
+}
+
+/**
+ * ipc_router_destroy_server_nolock() - Remove server info from hash table
+ * @server: Server info to be removed.
+ * @node_id: Node/Processor ID in which the server is hosted.
+ * @port_id: Port ID within the node in which the server is hosted.
+ *
+ * This function removes the server_port identified using <node_id:port_id>
+ * from the server structure. If the server_port list under server structure
+ * is empty after removal, then remove the server structure from the server
+ * hash table. This function must be called with server_list_lock_lha2 locked.
+ */
+static void ipc_router_destroy_server_nolock(struct msm_ipc_server *server,
+ uint32_t node_id, uint32_t port_id)
+{
+ struct msm_ipc_server_port *server_port;
+ bool server_port_found = false;
+
+ if (!server)
+ return;
+
+ list_for_each_entry(server_port, &server->server_port_list, list) {
+ if ((server_port->server_addr.node_id == node_id) &&
+ (server_port->server_addr.port_id == port_id)) {
+ server_port_found = true;
+ break;
+ }
+ }
+ if (server_port_found && server_port) {
+ platform_device_unregister(server_port->pdev);
+ list_del(&server_port->list);
+ kfree(server_port);
+ }
+ if (list_empty(&server->server_port_list)) {
+ list_del(&server->list);
+ kref_put(&server->ref, ipc_router_release_server);
+ }
+ return;
+}
+
+/**
+ * ipc_router_destroy_server() - Remove server info from hash table
+ * @server: Server info to be removed.
+ * @node_id: Node/Processor ID in which the server is hosted.
+ * @port_id: Port ID within the node in which the server is hosted.
+ *
+ * This function removes the server_port identified using <node_id:port_id>
+ * from the server structure. If the server_port list under server structure
+ * is empty after removal, then remove the server structure from the server
+ * hash table.
+ */
+static void ipc_router_destroy_server(struct msm_ipc_server *server,
+ uint32_t node_id, uint32_t port_id)
+{
+ down_write(&server_list_lock_lha2);
+ ipc_router_destroy_server_nolock(server, node_id, port_id);
+ up_write(&server_list_lock_lha2);
+ return;
+}
+
+static int ipc_router_send_ctl_msg(
+ struct msm_ipc_router_xprt_info *xprt_info,
+ union rr_control_msg *msg,
+ uint32_t dst_node_id)
+{
+ struct rr_packet *pkt;
+ struct sk_buff *ipc_rtr_pkt;
+ struct rr_header_v1 *hdr;
+ int pkt_size;
+ void *data;
+ int ret = -EINVAL;
+
+ pkt = create_pkt(NULL);
+ if (!pkt) {
+ IPC_RTR_ERR("%s: pkt alloc failed\n", __func__);
+ return -ENOMEM;
+ }
+
+ pkt_size = IPC_ROUTER_HDR_SIZE + sizeof(*msg);
+ ipc_rtr_pkt = alloc_skb(pkt_size, GFP_KERNEL);
+ if (!ipc_rtr_pkt) {
+ IPC_RTR_ERR("%s: ipc_rtr_pkt alloc failed\n", __func__);
+ release_pkt(pkt);
+ return -ENOMEM;
+ }
+
+ skb_reserve(ipc_rtr_pkt, IPC_ROUTER_HDR_SIZE);
+ data = skb_put(ipc_rtr_pkt, sizeof(*msg));
+ memcpy(data, msg, sizeof(*msg));
+ skb_queue_tail(pkt->pkt_fragment_q, ipc_rtr_pkt);
+ pkt->length = sizeof(*msg);
+
+ hdr = &(pkt->hdr);
+ hdr->version = IPC_ROUTER_V1;
+ hdr->type = msg->cmd;
+ hdr->src_node_id = IPC_ROUTER_NID_LOCAL;
+ hdr->src_port_id = IPC_ROUTER_ADDRESS;
+ hdr->control_flag = 0;
+ hdr->size = sizeof(*msg);
+ if (hdr->type == IPC_ROUTER_CTRL_CMD_RESUME_TX ||
+ (!xprt_info && dst_node_id == IPC_ROUTER_NID_LOCAL))
+ hdr->dst_node_id = dst_node_id;
+ else if (xprt_info)
+ hdr->dst_node_id = xprt_info->remote_node_id;
+ hdr->dst_port_id = IPC_ROUTER_ADDRESS;
+
+ if (dst_node_id == IPC_ROUTER_NID_LOCAL &&
+ msg->cmd != IPC_ROUTER_CTRL_CMD_RESUME_TX) {
+ ipc_router_log_msg(local_log_ctx,
+ IPC_ROUTER_LOG_EVENT_TX, msg, hdr, NULL, NULL);
+ ret = post_control_ports(pkt);
+ } else if (dst_node_id == IPC_ROUTER_NID_LOCAL &&
+ msg->cmd == IPC_ROUTER_CTRL_CMD_RESUME_TX) {
+ ipc_router_log_msg(local_log_ctx,
+ IPC_ROUTER_LOG_EVENT_TX, msg, hdr, NULL, NULL);
+ ret = process_resume_tx_msg(msg, pkt);
+ } else if (xprt_info && (msg->cmd == IPC_ROUTER_CTRL_CMD_HELLO ||
+ xprt_info->initialized)) {
+ mutex_lock(&xprt_info->tx_lock_lhb2);
+ ipc_router_log_msg(xprt_info->log_ctx,
+ IPC_ROUTER_LOG_EVENT_TX, msg, hdr, NULL, NULL);
+ ret = prepend_header(pkt, xprt_info);
+ if (ret < 0) {
+ mutex_unlock(&xprt_info->tx_lock_lhb2);
+ IPC_RTR_ERR("%s: Prepend Header failed\n", __func__);
+ release_pkt(pkt);
+ return ret;
+ }
+
+ ret = xprt_info->xprt->write(pkt, pkt->length, xprt_info->xprt);
+ mutex_unlock(&xprt_info->tx_lock_lhb2);
+ }
+
+ release_pkt(pkt);
+ return ret;
+}
+
+static int msm_ipc_router_send_server_list(uint32_t node_id,
+ struct msm_ipc_router_xprt_info *xprt_info)
+{
+ union rr_control_msg ctl;
+ struct msm_ipc_server *server;
+ struct msm_ipc_server_port *server_port;
+ int i;
+
+ if (!xprt_info || !xprt_info->initialized) {
+ IPC_RTR_ERR("%s: Xprt info not initialized\n", __func__);
+ return -EINVAL;
+ }
+
+ memset(&ctl, 0, sizeof(ctl));
+ ctl.cmd = IPC_ROUTER_CTRL_CMD_NEW_SERVER;
+
+ for (i = 0; i < SRV_HASH_SIZE; i++) {
+ list_for_each_entry(server, &server_list[i], list) {
+ ctl.srv.service = server->name.service;
+ ctl.srv.instance = server->name.instance;
+ list_for_each_entry(server_port,
+ &server->server_port_list, list) {
+ if (server_port->server_addr.node_id !=
+ node_id)
+ continue;
+
+ ctl.srv.node_id =
+ server_port->server_addr.node_id;
+ ctl.srv.port_id =
+ server_port->server_addr.port_id;
+ ipc_router_send_ctl_msg(xprt_info,
+ &ctl, IPC_ROUTER_DUMMY_DEST_NODE);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int broadcast_ctl_msg_locally(union rr_control_msg *msg)
+{
+ return ipc_router_send_ctl_msg(NULL, msg, IPC_ROUTER_NID_LOCAL);
+}
+
+static int broadcast_ctl_msg(union rr_control_msg *ctl)
+{
+ struct msm_ipc_router_xprt_info *xprt_info;
+
+ down_read(&xprt_info_list_lock_lha5);
+ list_for_each_entry(xprt_info, &xprt_info_list, list) {
+ ipc_router_send_ctl_msg(xprt_info, ctl,
+ IPC_ROUTER_DUMMY_DEST_NODE);
+ }
+ up_read(&xprt_info_list_lock_lha5);
+ broadcast_ctl_msg_locally(ctl);
+
+ return 0;
+}
+
+static int relay_ctl_msg(struct msm_ipc_router_xprt_info *xprt_info,
+ union rr_control_msg *ctl)
+{
+ struct msm_ipc_router_xprt_info *fwd_xprt_info;
+
+ if (!xprt_info || !ctl)
+ return -EINVAL;
+
+ down_read(&xprt_info_list_lock_lha5);
+ list_for_each_entry(fwd_xprt_info, &xprt_info_list, list) {
+ if (xprt_info->xprt->link_id != fwd_xprt_info->xprt->link_id)
+ ipc_router_send_ctl_msg(fwd_xprt_info, ctl,
+ IPC_ROUTER_DUMMY_DEST_NODE);
+ }
+ up_read(&xprt_info_list_lock_lha5);
+
+ return 0;
+}
+
+static int forward_msg(struct msm_ipc_router_xprt_info *xprt_info,
+ struct rr_packet *pkt)
+{
+ struct rr_header_v1 *hdr;
+ struct msm_ipc_router_xprt_info *fwd_xprt_info;
+ struct msm_ipc_routing_table_entry *rt_entry;
+ int ret = 0;
+ int fwd_xprt_option;
+
+ if (!xprt_info || !pkt)
+ return -EINVAL;
+
+ hdr = &(pkt->hdr);
+ rt_entry = ipc_router_get_rtentry_ref(hdr->dst_node_id);
+ if (!(rt_entry) || !(rt_entry->xprt_info)) {
+ IPC_RTR_ERR("%s: Routing table not initialized\n", __func__);
+ ret = -ENODEV;
+ goto fm_error1;
+ }
+
+ down_read(&rt_entry->lock_lha4);
+ fwd_xprt_info = rt_entry->xprt_info;
+ ret = ipc_router_get_xprt_info_ref(fwd_xprt_info);
+ if (ret < 0) {
+ IPC_RTR_ERR("%s: Abort invalid xprt\n", __func__);
+ goto fm_error_xprt;
+ }
+ ret = prepend_header(pkt, fwd_xprt_info);
+ if (ret < 0) {
+ IPC_RTR_ERR("%s: Prepend Header failed\n", __func__);
+ goto fm_error2;
+ }
+ fwd_xprt_option = fwd_xprt_info->xprt->get_option(fwd_xprt_info->xprt);
+ if (!(fwd_xprt_option & FRAG_PKT_WRITE_ENABLE)) {
+ ret = defragment_pkt(pkt);
+ if (ret < 0)
+ goto fm_error2;
+ }
+
+ mutex_lock(&fwd_xprt_info->tx_lock_lhb2);
+ if (xprt_info->remote_node_id == fwd_xprt_info->remote_node_id) {
+ IPC_RTR_ERR("%s: Discarding Command to route back\n", __func__);
+ ret = -EINVAL;
+ goto fm_error3;
+ }
+
+ if (xprt_info->xprt->link_id == fwd_xprt_info->xprt->link_id) {
+ IPC_RTR_ERR("%s: DST in the same cluster\n", __func__);
+ ret = 0;
+ goto fm_error3;
+ }
+ fwd_xprt_info->xprt->write(pkt, pkt->length, fwd_xprt_info->xprt);
+ IPC_RTR_INFO(fwd_xprt_info->log_ctx,
+ "%s %s Len:0x%x T:0x%x CF:0x%x SRC:<0x%x:0x%x> DST:<0x%x:0x%x>\n",
+ "FWD", "TX", hdr->size, hdr->type, hdr->control_flag,
+ hdr->src_node_id, hdr->src_port_id,
+ hdr->dst_node_id, hdr->dst_port_id);
+
+fm_error3:
+ mutex_unlock(&fwd_xprt_info->tx_lock_lhb2);
+fm_error2:
+ ipc_router_put_xprt_info_ref(fwd_xprt_info);
+fm_error_xprt:
+ up_read(&rt_entry->lock_lha4);
+fm_error1:
+ if (rt_entry)
+ kref_put(&rt_entry->ref, ipc_router_release_rtentry);
+ return ret;
+}
+
+static int msm_ipc_router_send_remove_client(struct comm_mode_info *mode_info,
+ uint32_t node_id, uint32_t port_id)
+{
+ union rr_control_msg msg;
+ struct msm_ipc_router_xprt_info *tmp_xprt_info;
+ int mode;
+ void *xprt_info;
+ int rc = 0;
+
+ if (!mode_info) {
+ IPC_RTR_ERR("%s: NULL mode_info\n", __func__);
+ return -EINVAL;
+ }
+ mode = mode_info->mode;
+ xprt_info = mode_info->xprt_info;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.cmd = IPC_ROUTER_CTRL_CMD_REMOVE_CLIENT;
+ msg.cli.node_id = node_id;
+ msg.cli.port_id = port_id;
+
+ if ((mode == SINGLE_LINK_MODE) && xprt_info) {
+ down_read(&xprt_info_list_lock_lha5);
+ list_for_each_entry(tmp_xprt_info, &xprt_info_list, list) {
+ if (tmp_xprt_info != xprt_info)
+ continue;
+ ipc_router_send_ctl_msg(tmp_xprt_info, &msg,
+ IPC_ROUTER_DUMMY_DEST_NODE);
+ break;
+ }
+ up_read(&xprt_info_list_lock_lha5);
+ } else if ((mode == SINGLE_LINK_MODE) && !xprt_info) {
+ broadcast_ctl_msg_locally(&msg);
+ } else if (mode == MULTI_LINK_MODE) {
+ broadcast_ctl_msg(&msg);
+ } else if (mode != NULL_MODE) {
+ IPC_RTR_ERR(
+ "%s: Invalid mode(%d) + xprt_inf(%p) for %08x:%08x\n",
+ __func__, mode, xprt_info, node_id, port_id);
+ rc = -EINVAL;
+ }
+ return rc;
+}
+
+static void update_comm_mode_info(struct comm_mode_info *mode_info,
+ struct msm_ipc_router_xprt_info *xprt_info)
+{
+ if (!mode_info) {
+ IPC_RTR_ERR("%s: NULL mode_info\n", __func__);
+ return;
+ }
+
+ if (mode_info->mode == NULL_MODE) {
+ mode_info->xprt_info = xprt_info;
+ mode_info->mode = SINGLE_LINK_MODE;
+ } else if (mode_info->mode == SINGLE_LINK_MODE &&
+ mode_info->xprt_info != xprt_info) {
+ mode_info->mode = MULTI_LINK_MODE;
+ }
+
+ return;
+}
+
+/**
+ * cleanup_rmt_server() - Cleanup server hosted in the remote port
+ * @xprt_info: XPRT through which this cleanup event is handled.
+ * @rport_ptr: Remote port that is being cleaned up.
+ * @server: Server that is hosted in the remote port.
+ */
+static void cleanup_rmt_server(struct msm_ipc_router_xprt_info *xprt_info,
+ struct msm_ipc_router_remote_port *rport_ptr,
+ struct msm_ipc_server *server)
+{
+ union rr_control_msg ctl;
+
+ memset(&ctl, 0, sizeof(ctl));
+ ctl.cmd = IPC_ROUTER_CTRL_CMD_REMOVE_SERVER;
+ ctl.srv.service = server->name.service;
+ ctl.srv.instance = server->name.instance;
+ ctl.srv.node_id = rport_ptr->node_id;
+ ctl.srv.port_id = rport_ptr->port_id;
+ if (xprt_info)
+ relay_ctl_msg(xprt_info, &ctl);
+ broadcast_ctl_msg_locally(&ctl);
+ ipc_router_destroy_server_nolock(server,
+ rport_ptr->node_id, rport_ptr->port_id);
+}
+
+static void cleanup_rmt_ports(struct msm_ipc_router_xprt_info *xprt_info,
+ struct msm_ipc_routing_table_entry *rt_entry)
+{
+ struct msm_ipc_router_remote_port *rport_ptr, *tmp_rport_ptr;
+ struct msm_ipc_server *server;
+ union rr_control_msg ctl;
+ int j;
+
+ memset(&ctl, 0, sizeof(ctl));
+ for (j = 0; j < RP_HASH_SIZE; j++) {
+ list_for_each_entry_safe(rport_ptr, tmp_rport_ptr,
+ &rt_entry->remote_port_list[j], list) {
+ list_del(&rport_ptr->list);
+ mutex_lock(&rport_ptr->rport_lock_lhb2);
+ server = rport_ptr->server;
+ rport_ptr->server = NULL;
+ mutex_unlock(&rport_ptr->rport_lock_lhb2);
+ ipc_router_reset_conn(rport_ptr);
+ if (server) {
+ cleanup_rmt_server(xprt_info, rport_ptr,
+ server);
+ server = NULL;
+ }
+
+ ctl.cmd = IPC_ROUTER_CTRL_CMD_REMOVE_CLIENT;
+ ctl.cli.node_id = rport_ptr->node_id;
+ ctl.cli.port_id = rport_ptr->port_id;
+ kref_put(&rport_ptr->ref, ipc_router_release_rport);
+
+ relay_ctl_msg(xprt_info, &ctl);
+ broadcast_ctl_msg_locally(&ctl);
+ }
+ }
+}
+
+static void msm_ipc_cleanup_routing_table(
+ struct msm_ipc_router_xprt_info *xprt_info)
+{
+ int i;
+ struct msm_ipc_routing_table_entry *rt_entry, *tmp_rt_entry;
+
+ if (!xprt_info) {
+ IPC_RTR_ERR("%s: Invalid xprt_info\n", __func__);
+ return;
+ }
+
+ down_write(&server_list_lock_lha2);
+ down_write(&routing_table_lock_lha3);
+ for (i = 0; i < RT_HASH_SIZE; i++) {
+ list_for_each_entry_safe(rt_entry, tmp_rt_entry,
+ &routing_table[i], list) {
+ down_write(&rt_entry->lock_lha4);
+ if (rt_entry->xprt_info != xprt_info) {
+ up_write(&rt_entry->lock_lha4);
+ continue;
+ }
+ cleanup_rmt_ports(xprt_info, rt_entry);
+ rt_entry->xprt_info = NULL;
+ up_write(&rt_entry->lock_lha4);
+ list_del(&rt_entry->list);
+ kref_put(&rt_entry->ref, ipc_router_release_rtentry);
+ }
+ }
+ up_write(&routing_table_lock_lha3);
+ up_write(&server_list_lock_lha2);
+}
+
+/**
+ * sync_sec_rule() - Synchrnoize the security rule into the server structure
+ * @server: Server structure where the rule has to be synchronized.
+ * @rule: Security tule to be synchronized.
+ *
+ * This function is used to update the server structure with the security
+ * rule configured for the <service:instance> corresponding to that server.
+ */
+static void sync_sec_rule(struct msm_ipc_server *server, void *rule)
+{
+ struct msm_ipc_server_port *server_port;
+ struct msm_ipc_router_remote_port *rport_ptr = NULL;
+
+ list_for_each_entry(server_port, &server->server_port_list, list) {
+ rport_ptr = ipc_router_get_rport_ref(
+ server_port->server_addr.node_id,
+ server_port->server_addr.port_id);
+ if (!rport_ptr)
+ continue;
+ rport_ptr->sec_rule = rule;
+ kref_put(&rport_ptr->ref, ipc_router_release_rport);
+ }
+ server->synced_sec_rule = 1;
+}
+
+/**
+ * msm_ipc_sync_sec_rule() - Sync the security rule to the service
+ * @service: Service for which the rule has to be synchronized.
+ * @instance: Instance for which the rule has to be synchronized.
+ * @rule: Security rule to be synchronized.
+ *
+ * This function is used to syncrhonize the security rule with the server
+ * hash table, if the user-space script configures the rule after the service
+ * has come up. This function is used to synchronize the security rule to a
+ * specific service and optionally a specific instance.
+ */
+void msm_ipc_sync_sec_rule(uint32_t service, uint32_t instance, void *rule)
+{
+ int key = (service & (SRV_HASH_SIZE - 1));
+ struct msm_ipc_server *server;
+
+ down_write(&server_list_lock_lha2);
+ list_for_each_entry(server, &server_list[key], list) {
+ if (server->name.service != service)
+ continue;
+
+ if (server->name.instance != instance &&
+ instance != ALL_INSTANCE)
+ continue;
+
+ /* If the rule applies to all instances and if the specific
+ * instance of a service has a rule synchronized already,
+ * do not apply the rule for that specific instance.
+ */
+ if (instance == ALL_INSTANCE && server->synced_sec_rule)
+ continue;
+
+ sync_sec_rule(server, rule);
+ }
+ up_write(&server_list_lock_lha2);
+}
+
+/**
+ * msm_ipc_sync_default_sec_rule() - Default security rule to all services
+ * @rule: Security rule to be synchronized.
+ *
+ * This function is used to syncrhonize the security rule with the server
+ * hash table, if the user-space script configures the rule after the service
+ * has come up. This function is used to synchronize the security rule that
+ * applies to all services, if the concerned service do not have any rule
+ * defined.
+ */
+void msm_ipc_sync_default_sec_rule(void *rule)
+{
+ int key;
+ struct msm_ipc_server *server;
+
+ down_write(&server_list_lock_lha2);
+ for (key = 0; key < SRV_HASH_SIZE; key++) {
+ list_for_each_entry(server, &server_list[key], list) {
+ if (server->synced_sec_rule)
+ continue;
+
+ sync_sec_rule(server, rule);
+ }
+ }
+ up_write(&server_list_lock_lha2);
+}
+
+/**
+ * ipc_router_reset_conn() - Reset the connection to remote port
+ * @rport_ptr: Pointer to the remote port to be disconnected.
+ *
+ * This function is used to reset all the local ports that are connected to
+ * the remote port being passed.
+ */
+static void ipc_router_reset_conn(struct msm_ipc_router_remote_port *rport_ptr)
+{
+ struct msm_ipc_port *port_ptr;
+ struct ipc_router_conn_info *conn_info, *tmp_conn_info;
+
+ mutex_lock(&rport_ptr->rport_lock_lhb2);
+ list_for_each_entry_safe(conn_info, tmp_conn_info,
+ &rport_ptr->conn_info_list, list) {
+ port_ptr = ipc_router_get_port_ref(conn_info->port_id);
+ if (port_ptr) {
+ mutex_lock(&port_ptr->port_lock_lhc3);
+ port_ptr->conn_status = CONNECTION_RESET;
+ mutex_unlock(&port_ptr->port_lock_lhc3);
+ wake_up(&port_ptr->port_rx_wait_q);
+ kref_put(&port_ptr->ref, ipc_router_release_port);
+ }
+
+ list_del(&conn_info->list);
+ kfree(conn_info);
+ }
+ mutex_unlock(&rport_ptr->rport_lock_lhb2);
+}
+
+/**
+ * ipc_router_set_conn() - Set the connection by initializing dest address
+ * @port_ptr: Local port in which the connection has to be set.
+ * @addr: Destination address of the connection.
+ *
+ * @return: 0 on success, standard Linux error codes on failure.
+ */
+int ipc_router_set_conn(struct msm_ipc_port *port_ptr,
+ struct msm_ipc_addr *addr)
+{
+ struct msm_ipc_router_remote_port *rport_ptr;
+ struct ipc_router_conn_info *conn_info;
+
+ if (unlikely(!port_ptr || !addr))
+ return -EINVAL;
+
+ if (addr->addrtype != MSM_IPC_ADDR_ID) {
+ IPC_RTR_ERR("%s: Invalid Address type\n", __func__);
+ return -EINVAL;
+ }
+
+ if (port_ptr->type == SERVER_PORT) {
+ IPC_RTR_ERR("%s: Connection refused on a server port\n",
+ __func__);
+ return -ECONNREFUSED;
+ }
+
+ if (port_ptr->conn_status == CONNECTED) {
+ IPC_RTR_ERR("%s: Port %08x already connected\n",
+ __func__, port_ptr->this_port.port_id);
+ return -EISCONN;
+ }
+
+ conn_info = kzalloc(sizeof(struct ipc_router_conn_info), GFP_KERNEL);
+ if (!conn_info) {
+ IPC_RTR_ERR("%s: Error allocating conn_info\n", __func__);
+ return -ENOMEM;
+ }
+ INIT_LIST_HEAD(&conn_info->list);
+ conn_info->port_id = port_ptr->this_port.port_id;
+
+ rport_ptr = ipc_router_get_rport_ref(addr->addr.port_addr.node_id,
+ addr->addr.port_addr.port_id);
+ if (!rport_ptr) {
+ IPC_RTR_ERR("%s: Invalid remote endpoint\n", __func__);
+ kfree(conn_info);
+ return -ENODEV;
+ }
+ mutex_lock(&rport_ptr->rport_lock_lhb2);
+ list_add_tail(&conn_info->list, &rport_ptr->conn_info_list);
+ mutex_unlock(&rport_ptr->rport_lock_lhb2);
+
+ mutex_lock(&port_ptr->port_lock_lhc3);
+ memcpy(&port_ptr->dest_addr, &addr->addr.port_addr,
+ sizeof(struct msm_ipc_port_addr));
+ port_ptr->conn_status = CONNECTED;
+ mutex_unlock(&port_ptr->port_lock_lhc3);
+ kref_put(&rport_ptr->ref, ipc_router_release_rport);
+ return 0;
+}
+
+/**
+ * do_version_negotiation() - perform a version negotiation and set the version
+ * @xprt_info: Pointer to the IPC Router transport info structure.
+ * @msg: Pointer to the IPC Router HELLO message.
+ *
+ * This function performs the version negotiation by verifying the computed
+ * checksum first. If the checksum matches with the magic number, it sets the
+ * negotiated IPC Router version in transport.
+ */
+static void do_version_negotiation(struct msm_ipc_router_xprt_info *xprt_info,
+ union rr_control_msg *msg)
+{
+ uint32_t magic;
+ unsigned version;
+
+ if (!xprt_info)
+ return;
+ magic = ipc_router_calc_checksum(msg);
+ if (magic == IPC_ROUTER_HELLO_MAGIC) {
+ version = fls(msg->hello.versions & IPC_ROUTER_VER_BITMASK) - 1;
+ /*Bit 0 & 31 are reserved for future usage*/
+ if ((version > 0) &&
+ (version != (sizeof(version) * BITS_PER_BYTE - 1)) &&
+ xprt_info->xprt->set_version)
+ xprt_info->xprt->set_version(xprt_info->xprt, version);
+ }
+}
+
+static int process_hello_msg(struct msm_ipc_router_xprt_info *xprt_info,
+ union rr_control_msg *msg,
+ struct rr_header_v1 *hdr)
+{
+ int i, rc = 0;
+ union rr_control_msg ctl;
+ struct msm_ipc_routing_table_entry *rt_entry;
+
+ if (!hdr)
+ return -EINVAL;
+
+ xprt_info->remote_node_id = hdr->src_node_id;
+ rt_entry = create_routing_table_entry(hdr->src_node_id, xprt_info);
+ if (!rt_entry) {
+ IPC_RTR_ERR("%s: rt_entry allocation failed\n", __func__);
+ return -ENOMEM;
+ }
+ kref_put(&rt_entry->ref, ipc_router_release_rtentry);
+
+ do_version_negotiation(xprt_info, msg);
+ /* Send a reply HELLO message */
+ memset(&ctl, 0, sizeof(ctl));
+ ctl.hello.cmd = IPC_ROUTER_CTRL_CMD_HELLO;
+ ctl.hello.checksum = IPC_ROUTER_HELLO_MAGIC;
+ ctl.hello.versions = (uint32_t)IPC_ROUTER_VER_BITMASK;
+ ctl.hello.checksum = ipc_router_calc_checksum(&ctl);
+ rc = ipc_router_send_ctl_msg(xprt_info, &ctl,
+ IPC_ROUTER_DUMMY_DEST_NODE);
+ if (rc < 0) {
+ IPC_RTR_ERR("%s: Error sending reply HELLO message\n",
+ __func__);
+ return rc;
+ }
+ xprt_info->initialized = 1;
+
+ /* Send list of servers from the local node and from nodes
+ * outside the mesh network in which this XPRT is part of.
+ */
+ down_read(&server_list_lock_lha2);
+ down_read(&routing_table_lock_lha3);
+ for (i = 0; i < RT_HASH_SIZE; i++) {
+ list_for_each_entry(rt_entry, &routing_table[i], list) {
+ if ((rt_entry->node_id != IPC_ROUTER_NID_LOCAL) &&
+ (!rt_entry->xprt_info ||
+ (rt_entry->xprt_info->xprt->link_id ==
+ xprt_info->xprt->link_id)))
+ continue;
+ rc = msm_ipc_router_send_server_list(rt_entry->node_id,
+ xprt_info);
+ if (rc < 0) {
+ up_read(&routing_table_lock_lha3);
+ up_read(&server_list_lock_lha2);
+ return rc;
+ }
+ }
+ }
+ up_read(&routing_table_lock_lha3);
+ up_read(&server_list_lock_lha2);
+ return rc;
+}
+
+static int process_resume_tx_msg(union rr_control_msg *msg,
+ struct rr_packet *pkt)
+{
+ struct msm_ipc_router_remote_port *rport_ptr;
+
+
+ rport_ptr = ipc_router_get_rport_ref(msg->cli.node_id,
+ msg->cli.port_id);
+ if (!rport_ptr) {
+ IPC_RTR_ERR("%s: Unable to resume client\n", __func__);
+ return -ENODEV;
+ }
+ mutex_lock(&rport_ptr->rport_lock_lhb2);
+ rport_ptr->tx_quota_cnt = 0;
+ post_resume_tx(rport_ptr, pkt, msg);
+ mutex_unlock(&rport_ptr->rport_lock_lhb2);
+ kref_put(&rport_ptr->ref, ipc_router_release_rport);
+ return 0;
+}
+
+static int process_new_server_msg(struct msm_ipc_router_xprt_info *xprt_info,
+ union rr_control_msg *msg, struct rr_packet *pkt)
+{
+ struct msm_ipc_routing_table_entry *rt_entry;
+ struct msm_ipc_server *server;
+ struct msm_ipc_router_remote_port *rport_ptr;
+
+ if (msg->srv.instance == 0) {
+ IPC_RTR_ERR("%s: Server %08x create rejected, version = 0\n",
+ __func__, msg->srv.service);
+ return -EINVAL;
+ }
+
+ rt_entry = ipc_router_get_rtentry_ref(msg->srv.node_id);
+ if (!rt_entry) {
+ rt_entry = create_routing_table_entry(msg->srv.node_id,
+ xprt_info);
+ if (!rt_entry) {
+ IPC_RTR_ERR("%s: rt_entry allocation failed\n",
+ __func__);
+ return -ENOMEM;
+ }
+ }
+ kref_put(&rt_entry->ref, ipc_router_release_rtentry);
+
+ /* If the service already exists in the table, create_server returns
+ * a reference to it.
+ */
+ rport_ptr = ipc_router_create_rport(msg->srv.node_id,
+ msg->srv.port_id, xprt_info);
+ if (!rport_ptr)
+ return -ENOMEM;
+
+ server = msm_ipc_router_create_server(
+ msg->srv.service, msg->srv.instance,
+ msg->srv.node_id, msg->srv.port_id, xprt_info);
+ if (!server) {
+ IPC_RTR_ERR("%s: Server %08x:%08x Create failed\n",
+ __func__, msg->srv.service, msg->srv.instance);
+ kref_put(&rport_ptr->ref, ipc_router_release_rport);
+ ipc_router_destroy_rport(rport_ptr);
+ return -ENOMEM;
+ }
+ mutex_lock(&rport_ptr->rport_lock_lhb2);
+ rport_ptr->server = server;
+ mutex_unlock(&rport_ptr->rport_lock_lhb2);
+ rport_ptr->sec_rule = msm_ipc_get_security_rule(
+ msg->srv.service, msg->srv.instance);
+ kref_put(&rport_ptr->ref, ipc_router_release_rport);
+ kref_put(&server->ref, ipc_router_release_server);
+
+ /* Relay the new server message to other subsystems that do not belong
+ * to the cluster from which this message is received. Notify the
+ * local clients waiting for this service.
+ */
+ relay_ctl_msg(xprt_info, msg);
+ post_control_ports(pkt);
+ return 0;
+}
+
+static int process_rmv_server_msg(struct msm_ipc_router_xprt_info *xprt_info,
+ union rr_control_msg *msg, struct rr_packet *pkt)
+{
+ struct msm_ipc_server *server;
+ struct msm_ipc_router_remote_port *rport_ptr;
+
+ server = ipc_router_get_server_ref(msg->srv.service, msg->srv.instance,
+ msg->srv.node_id, msg->srv.port_id);
+ rport_ptr = ipc_router_get_rport_ref(msg->srv.node_id,
+ msg->srv.port_id);
+ if (rport_ptr) {
+ mutex_lock(&rport_ptr->rport_lock_lhb2);
+ if (rport_ptr->server == server)
+ rport_ptr->server = NULL;
+ mutex_unlock(&rport_ptr->rport_lock_lhb2);
+ kref_put(&rport_ptr->ref, ipc_router_release_rport);
+ }
+
+ if (server) {
+ kref_put(&server->ref, ipc_router_release_server);
+ ipc_router_destroy_server(server, msg->srv.node_id,
+ msg->srv.port_id);
+ /*
+ * Relay the new server message to other subsystems that do not
+ * belong to the cluster from which this message is received.
+ * Notify the local clients communicating with the service.
+ */
+ relay_ctl_msg(xprt_info, msg);
+ post_control_ports(pkt);
+ }
+ return 0;
+}
+
+static int process_rmv_client_msg(struct msm_ipc_router_xprt_info *xprt_info,
+ union rr_control_msg *msg, struct rr_packet *pkt)
+{
+ struct msm_ipc_router_remote_port *rport_ptr;
+ struct msm_ipc_server *server;
+
+ rport_ptr = ipc_router_get_rport_ref(msg->cli.node_id,
+ msg->cli.port_id);
+ if (rport_ptr) {
+ mutex_lock(&rport_ptr->rport_lock_lhb2);
+ server = rport_ptr->server;
+ rport_ptr->server = NULL;
+ mutex_unlock(&rport_ptr->rport_lock_lhb2);
+ ipc_router_reset_conn(rport_ptr);
+ down_write(&server_list_lock_lha2);
+ if (server)
+ cleanup_rmt_server(NULL, rport_ptr, server);
+ up_write(&server_list_lock_lha2);
+ kref_put(&rport_ptr->ref, ipc_router_release_rport);
+ ipc_router_destroy_rport(rport_ptr);
+ }
+
+ relay_ctl_msg(xprt_info, msg);
+ post_control_ports(pkt);
+ return 0;
+}
+
+static int process_control_msg(struct msm_ipc_router_xprt_info *xprt_info,
+ struct rr_packet *pkt)
+{
+ union rr_control_msg *msg;
+ int rc = 0;
+ struct rr_header_v1 *hdr;
+
+ if (pkt->length != sizeof(*msg)) {
+ IPC_RTR_ERR("%s: r2r msg size %d != %zu\n",
+ __func__, pkt->length, sizeof(*msg));
+ return -EINVAL;
+ }
+
+ hdr = &(pkt->hdr);
+ msg = msm_ipc_router_skb_to_buf(pkt->pkt_fragment_q, sizeof(*msg));
+ if (!msg) {
+ IPC_RTR_ERR("%s: Error extracting control msg\n", __func__);
+ return -ENOMEM;
+ }
+
+ ipc_router_log_msg(xprt_info->log_ctx, IPC_ROUTER_LOG_EVENT_RX,
+ msg, hdr, NULL, NULL);
+
+ switch (msg->cmd) {
+ case IPC_ROUTER_CTRL_CMD_HELLO:
+ rc = process_hello_msg(xprt_info, msg, hdr);
+ break;
+ case IPC_ROUTER_CTRL_CMD_RESUME_TX:
+ rc = process_resume_tx_msg(msg, pkt);
+ break;
+ case IPC_ROUTER_CTRL_CMD_NEW_SERVER:
+ rc = process_new_server_msg(xprt_info, msg, pkt);
+ break;
+ case IPC_ROUTER_CTRL_CMD_REMOVE_SERVER:
+ rc = process_rmv_server_msg(xprt_info, msg, pkt);
+ break;
+ case IPC_ROUTER_CTRL_CMD_REMOVE_CLIENT:
+ rc = process_rmv_client_msg(xprt_info, msg, pkt);
+ break;
+ default:
+ rc = -ENOSYS;
+ }
+ kfree(msg);
+ return rc;
+}
+
+static void do_read_data(struct work_struct *work)
+{
+ struct rr_header_v1 *hdr;
+ struct rr_packet *pkt = NULL;
+ struct msm_ipc_port *port_ptr;
+ struct msm_ipc_router_remote_port *rport_ptr;
+ int ret;
+
+ struct msm_ipc_router_xprt_info *xprt_info =
+ container_of(work,
+ struct msm_ipc_router_xprt_info,
+ read_data);
+
+ while ((pkt = rr_read(xprt_info)) != NULL) {
+ if (pkt->length < calc_rx_header_size(xprt_info) ||
+ pkt->length > MAX_IPC_PKT_SIZE) {
+ IPC_RTR_ERR("%s: Invalid pkt length %d\n",
+ __func__, pkt->length);
+ goto read_next_pkt1;
+ }
+
+ ret = extract_header(pkt);
+ if (ret < 0)
+ goto read_next_pkt1;
+ hdr = &(pkt->hdr);
+
+ if ((hdr->dst_node_id != IPC_ROUTER_NID_LOCAL) &&
+ ((hdr->type == IPC_ROUTER_CTRL_CMD_RESUME_TX) ||
+ (hdr->type == IPC_ROUTER_CTRL_CMD_DATA))) {
+ IPC_RTR_INFO(xprt_info->log_ctx,
+ "%s %s Len:0x%x T:0x%x CF:0x%x SRC:<0x%x:0x%x> DST:<0x%x:0x%x>\n",
+ "FWD", "RX", hdr->size, hdr->type, hdr->control_flag,
+ hdr->src_node_id, hdr->src_port_id,
+ hdr->dst_node_id, hdr->dst_port_id);
+ forward_msg(xprt_info, pkt);
+ goto read_next_pkt1;
+ }
+
+ if (hdr->type != IPC_ROUTER_CTRL_CMD_DATA) {
+ process_control_msg(xprt_info, pkt);
+ goto read_next_pkt1;
+ }
+
+ if (msm_ipc_router_debug_mask & SMEM_LOG) {
+ smem_log_event((SMEM_LOG_PROC_ID_APPS |
+ SMEM_LOG_IPC_ROUTER_EVENT_BASE |
+ IPC_ROUTER_LOG_EVENT_RX),
+ (hdr->src_node_id << 24) |
+ (hdr->src_port_id & 0xffffff),
+ (hdr->dst_node_id << 24) |
+ (hdr->dst_port_id & 0xffffff),
+ (hdr->type << 24) | (hdr->control_flag << 16) |
+ (hdr->size & 0xffff));
+ }
+
+ port_ptr = ipc_router_get_port_ref(hdr->dst_port_id);
+ if (!port_ptr) {
+ IPC_RTR_ERR("%s: No local port id %08x\n", __func__,
+ hdr->dst_port_id);
+ goto read_next_pkt1;
+ }
+
+ rport_ptr = ipc_router_get_rport_ref(hdr->src_node_id,
+ hdr->src_port_id);
+ if (!rport_ptr) {
+ rport_ptr = ipc_router_create_rport(hdr->src_node_id,
+ hdr->src_port_id, xprt_info);
+ if (!rport_ptr) {
+ IPC_RTR_ERR(
+ "%s: Rmt Prt %08x:%08x create failed\n",
+ __func__, hdr->src_node_id, hdr->src_port_id);
+ goto read_next_pkt2;
+ }
+ }
+
+ ipc_router_log_msg(xprt_info->log_ctx, IPC_ROUTER_LOG_EVENT_RX,
+ pkt, hdr, port_ptr, rport_ptr);
+ kref_put(&rport_ptr->ref, ipc_router_release_rport);
+ post_pkt_to_port(port_ptr, pkt, 0);
+ kref_put(&port_ptr->ref, ipc_router_release_port);
+ continue;
+read_next_pkt2:
+ kref_put(&port_ptr->ref, ipc_router_release_port);
+read_next_pkt1:
+ release_pkt(pkt);
+ }
+}
+
+int msm_ipc_router_register_server(struct msm_ipc_port *port_ptr,
+ struct msm_ipc_addr *name)
+{
+ struct msm_ipc_server *server;
+ union rr_control_msg ctl;
+ struct msm_ipc_router_remote_port *rport_ptr;
+
+ if (!port_ptr || !name)
+ return -EINVAL;
+
+ if (port_ptr->type != CLIENT_PORT)
+ return -EINVAL;
+
+ if (name->addrtype != MSM_IPC_ADDR_NAME)
+ return -EINVAL;
+
+ rport_ptr = ipc_router_create_rport(IPC_ROUTER_NID_LOCAL,
+ port_ptr->this_port.port_id, NULL);
+ if (!rport_ptr) {
+ IPC_RTR_ERR("%s: RPort %08x:%08x creation failed\n", __func__,
+ IPC_ROUTER_NID_LOCAL, port_ptr->this_port.port_id);
+ return -ENOMEM;
+ }
+
+ server = msm_ipc_router_create_server(name->addr.port_name.service,
+ name->addr.port_name.instance,
+ IPC_ROUTER_NID_LOCAL,
+ port_ptr->this_port.port_id,
+ NULL);
+ if (!server) {
+ IPC_RTR_ERR("%s: Server %08x:%08x Create failed\n",
+ __func__, name->addr.port_name.service,
+ name->addr.port_name.instance);
+ kref_put(&rport_ptr->ref, ipc_router_release_rport);
+ ipc_router_destroy_rport(rport_ptr);
+ return -ENOMEM;
+ }
+
+ memset(&ctl, 0, sizeof(ctl));
+ ctl.cmd = IPC_ROUTER_CTRL_CMD_NEW_SERVER;
+ ctl.srv.service = server->name.service;
+ ctl.srv.instance = server->name.instance;
+ ctl.srv.node_id = IPC_ROUTER_NID_LOCAL;
+ ctl.srv.port_id = port_ptr->this_port.port_id;
+ broadcast_ctl_msg(&ctl);
+ mutex_lock(&port_ptr->port_lock_lhc3);
+ port_ptr->type = SERVER_PORT;
+ port_ptr->mode_info.mode = MULTI_LINK_MODE;
+ port_ptr->port_name.service = server->name.service;
+ port_ptr->port_name.instance = server->name.instance;
+ port_ptr->rport_info = rport_ptr;
+ mutex_unlock(&port_ptr->port_lock_lhc3);
+ kref_put(&rport_ptr->ref, ipc_router_release_rport);
+ kref_put(&server->ref, ipc_router_release_server);
+ return 0;
+}
+
+int msm_ipc_router_unregister_server(struct msm_ipc_port *port_ptr)
+{
+ struct msm_ipc_server *server;
+ union rr_control_msg ctl;
+ struct msm_ipc_router_remote_port *rport_ptr;
+
+ if (!port_ptr)
+ return -EINVAL;
+
+ if (port_ptr->type != SERVER_PORT) {
+ IPC_RTR_ERR("%s: Trying to unregister a non-server port\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (port_ptr->this_port.node_id != IPC_ROUTER_NID_LOCAL) {
+ IPC_RTR_ERR(
+ "%s: Trying to unregister a remote server locally\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ server = ipc_router_get_server_ref(port_ptr->port_name.service,
+ port_ptr->port_name.instance,
+ port_ptr->this_port.node_id,
+ port_ptr->this_port.port_id);
+ if (!server) {
+ IPC_RTR_ERR("%s: Server lookup failed\n", __func__);
+ return -ENODEV;
+ }
+
+ mutex_lock(&port_ptr->port_lock_lhc3);
+ port_ptr->type = CLIENT_PORT;
+ rport_ptr = (struct msm_ipc_router_remote_port *)port_ptr->rport_info;
+ mutex_unlock(&port_ptr->port_lock_lhc3);
+ if (rport_ptr)
+ ipc_router_reset_conn(rport_ptr);
+ memset(&ctl, 0, sizeof(ctl));
+ ctl.cmd = IPC_ROUTER_CTRL_CMD_REMOVE_SERVER;
+ ctl.srv.service = server->name.service;
+ ctl.srv.instance = server->name.instance;
+ ctl.srv.node_id = IPC_ROUTER_NID_LOCAL;
+ ctl.srv.port_id = port_ptr->this_port.port_id;
+ kref_put(&server->ref, ipc_router_release_server);
+ ipc_router_destroy_server(server, port_ptr->this_port.node_id,
+ port_ptr->this_port.port_id);
+ broadcast_ctl_msg(&ctl);
+ mutex_lock(&port_ptr->port_lock_lhc3);
+ port_ptr->type = CLIENT_PORT;
+ mutex_unlock(&port_ptr->port_lock_lhc3);
+ return 0;
+}
+
+static int loopback_data(struct msm_ipc_port *src,
+ uint32_t port_id,
+ struct rr_packet *pkt)
+{
+ struct msm_ipc_port *port_ptr;
+ struct sk_buff *temp_skb;
+ int align_size;
+
+ if (!pkt) {
+ IPC_RTR_ERR("%s: Invalid pkt pointer\n", __func__);
+ return -EINVAL;
+ }
+
+ temp_skb = skb_peek_tail(pkt->pkt_fragment_q);
+ align_size = ALIGN_SIZE(pkt->length);
+ skb_put(temp_skb, align_size);
+ pkt->length += align_size;
+
+ port_ptr = ipc_router_get_port_ref(port_id);
+ if (!port_ptr) {
+ IPC_RTR_ERR("%s: Local port %d not present\n",
+ __func__, port_id);
+ return -ENODEV;
+ }
+ post_pkt_to_port(port_ptr, pkt, 1);
+ update_comm_mode_info(&src->mode_info, NULL);
+ kref_put(&port_ptr->ref, ipc_router_release_port);
+
+ return pkt->hdr.size;
+}
+
+static int ipc_router_tx_wait(struct msm_ipc_port *src,
+ struct msm_ipc_router_remote_port *rport_ptr,
+ uint32_t *set_confirm_rx,
+ long timeout)
+{
+ struct msm_ipc_resume_tx_port *resume_tx_port;
+ int ret;
+
+ if (unlikely(!src || !rport_ptr))
+ return -EINVAL;
+
+ for (;;) {
+ mutex_lock(&rport_ptr->rport_lock_lhb2);
+ if (rport_ptr->status == RESET) {
+ mutex_unlock(&rport_ptr->rport_lock_lhb2);
+ IPC_RTR_ERR("%s: RPort %08x:%08x is in reset state\n",
+ __func__, rport_ptr->node_id, rport_ptr->port_id);
+ return -ENETRESET;
+ }
+
+ if (rport_ptr->tx_quota_cnt < IPC_ROUTER_HIGH_RX_QUOTA)
+ break;
+
+ if (msm_ipc_router_lookup_resume_tx_port(
+ rport_ptr, src->this_port.port_id))
+ goto check_timeo;
+
+ resume_tx_port =
+ kzalloc(sizeof(struct msm_ipc_resume_tx_port),
+ GFP_KERNEL);
+ if (!resume_tx_port) {
+ IPC_RTR_ERR("%s: Resume_Tx port allocation failed\n",
+ __func__);
+ mutex_unlock(&rport_ptr->rport_lock_lhb2);
+ return -ENOMEM;
+ }
+ INIT_LIST_HEAD(&resume_tx_port->list);
+ resume_tx_port->port_id = src->this_port.port_id;
+ resume_tx_port->node_id = src->this_port.node_id;
+ list_add_tail(&resume_tx_port->list,
+ &rport_ptr->resume_tx_port_list);
+check_timeo:
+ mutex_unlock(&rport_ptr->rport_lock_lhb2);
+ if (!timeout) {
+ return -EAGAIN;
+ } else if (timeout < 0) {
+ ret = wait_event_interruptible(src->port_tx_wait_q,
+ (rport_ptr->tx_quota_cnt !=
+ IPC_ROUTER_HIGH_RX_QUOTA ||
+ rport_ptr->status == RESET));
+ if (ret)
+ return ret;
+ } else {
+ ret = wait_event_interruptible_timeout(
+ src->port_tx_wait_q,
+ (rport_ptr->tx_quota_cnt !=
+ IPC_ROUTER_HIGH_RX_QUOTA ||
+ rport_ptr->status == RESET),
+ msecs_to_jiffies(timeout));
+ if (ret < 0) {
+ return ret;
+ } else if (ret == 0) {
+ IPC_RTR_ERR("%s: Resume_tx Timeout %08x:%08x\n",
+ __func__, rport_ptr->node_id,
+ rport_ptr->port_id);
+ return -ETIMEDOUT;
+ }
+ }
+ }
+ rport_ptr->tx_quota_cnt++;
+ if (rport_ptr->tx_quota_cnt == IPC_ROUTER_LOW_RX_QUOTA)
+ *set_confirm_rx = 1;
+ mutex_unlock(&rport_ptr->rport_lock_lhb2);
+ return 0;
+}
+
+static int msm_ipc_router_write_pkt(struct msm_ipc_port *src,
+ struct msm_ipc_router_remote_port *rport_ptr,
+ struct rr_packet *pkt,
+ long timeout)
+{
+ struct rr_header_v1 *hdr;
+ struct msm_ipc_router_xprt_info *xprt_info;
+ struct msm_ipc_routing_table_entry *rt_entry;
+ struct sk_buff *temp_skb;
+ int xprt_option;
+ int ret;
+ int align_size;
+ uint32_t set_confirm_rx = 0;
+
+ if (!rport_ptr || !src || !pkt)
+ return -EINVAL;
+
+ hdr = &(pkt->hdr);
+ hdr->version = IPC_ROUTER_V1;
+ hdr->type = IPC_ROUTER_CTRL_CMD_DATA;
+ hdr->src_node_id = src->this_port.node_id;
+ hdr->src_port_id = src->this_port.port_id;
+ hdr->size = pkt->length;
+ hdr->control_flag = 0;
+ hdr->dst_node_id = rport_ptr->node_id;
+ hdr->dst_port_id = rport_ptr->port_id;
+
+ ret = ipc_router_tx_wait(src, rport_ptr, &set_confirm_rx, timeout);
+ if (ret < 0)
+ return ret;
+ if (set_confirm_rx)
+ hdr->control_flag |= CONTROL_FLAG_CONFIRM_RX;
+
+ if (hdr->dst_node_id == IPC_ROUTER_NID_LOCAL) {
+ ipc_router_log_msg(local_log_ctx,
+ IPC_ROUTER_LOG_EVENT_TX, pkt, hdr, src, rport_ptr);
+ ret = loopback_data(src, hdr->dst_port_id, pkt);
+ return ret;
+ }
+
+ rt_entry = ipc_router_get_rtentry_ref(hdr->dst_node_id);
+ if (!rt_entry) {
+ IPC_RTR_ERR("%s: Remote node %d not up\n",
+ __func__, hdr->dst_node_id);
+ return -ENODEV;
+ }
+ down_read(&rt_entry->lock_lha4);
+ xprt_info = rt_entry->xprt_info;
+ ret = ipc_router_get_xprt_info_ref(xprt_info);
+ if (ret < 0) {
+ IPC_RTR_ERR("%s: Abort invalid xprt\n", __func__);
+ up_read(&rt_entry->lock_lha4);
+ kref_put(&rt_entry->ref, ipc_router_release_rtentry);
+ return ret;
+ }
+ ret = prepend_header(pkt, xprt_info);
+ if (ret < 0) {
+ IPC_RTR_ERR("%s: Prepend Header failed\n", __func__);
+ goto out_write_pkt;
+ }
+ xprt_option = xprt_info->xprt->get_option(xprt_info->xprt);
+ if (!(xprt_option & FRAG_PKT_WRITE_ENABLE)) {
+ ret = defragment_pkt(pkt);
+ if (ret < 0)
+ goto out_write_pkt;
+ }
+
+ temp_skb = skb_peek_tail(pkt->pkt_fragment_q);
+ align_size = ALIGN_SIZE(pkt->length);
+ skb_put(temp_skb, align_size);
+ pkt->length += align_size;
+ mutex_lock(&xprt_info->tx_lock_lhb2);
+ ret = xprt_info->xprt->write(pkt, pkt->length, xprt_info->xprt);
+ mutex_unlock(&xprt_info->tx_lock_lhb2);
+out_write_pkt:
+ up_read(&rt_entry->lock_lha4);
+ kref_put(&rt_entry->ref, ipc_router_release_rtentry);
+
+ if (ret < 0) {
+ IPC_RTR_ERR("%s: Write on XPRT failed\n", __func__);
+ ipc_router_log_msg(xprt_info->log_ctx,
+ IPC_ROUTER_LOG_EVENT_TX_ERR, pkt, hdr, src, rport_ptr);
+
+ ipc_router_put_xprt_info_ref(xprt_info);
+ return ret;
+ }
+ update_comm_mode_info(&src->mode_info, xprt_info);
+ ipc_router_log_msg(xprt_info->log_ctx,
+ IPC_ROUTER_LOG_EVENT_TX, pkt, hdr, src, rport_ptr);
+ if (msm_ipc_router_debug_mask & SMEM_LOG) {
+ smem_log_event((SMEM_LOG_PROC_ID_APPS |
+ SMEM_LOG_IPC_ROUTER_EVENT_BASE |
+ IPC_ROUTER_LOG_EVENT_TX),
+ (hdr->src_node_id << 24) |
+ (hdr->src_port_id & 0xffffff),
+ (hdr->dst_node_id << 24) |
+ (hdr->dst_port_id & 0xffffff),
+ (hdr->type << 24) | (hdr->control_flag << 16) |
+ (hdr->size & 0xffff));
+ }
+
+ ipc_router_put_xprt_info_ref(xprt_info);
+ return hdr->size;
+}
+
+int msm_ipc_router_send_to(struct msm_ipc_port *src,
+ struct sk_buff_head *data,
+ struct msm_ipc_addr *dest,
+ long timeout)
+{
+ uint32_t dst_node_id = 0, dst_port_id = 0;
+ struct msm_ipc_server *server;
+ struct msm_ipc_server_port *server_port;
+ struct msm_ipc_router_remote_port *rport_ptr = NULL;
+ struct msm_ipc_router_remote_port *src_rport_ptr = NULL;
+ struct rr_packet *pkt;
+ int ret;
+
+ if (!src || !data || !dest) {
+ IPC_RTR_ERR("%s: Invalid Parameters\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Resolve Address*/
+ if (dest->addrtype == MSM_IPC_ADDR_ID) {
+ dst_node_id = dest->addr.port_addr.node_id;
+ dst_port_id = dest->addr.port_addr.port_id;
+ } else if (dest->addrtype == MSM_IPC_ADDR_NAME) {
+ server = ipc_router_get_server_ref(
+ dest->addr.port_name.service,
+ dest->addr.port_name.instance,
+ 0, 0);
+ if (!server) {
+ IPC_RTR_ERR("%s: Destination not reachable\n",
+ __func__);
+ return -ENODEV;
+ }
+ server_port = list_first_entry(&server->server_port_list,
+ struct msm_ipc_server_port,
+ list);
+ dst_node_id = server_port->server_addr.node_id;
+ dst_port_id = server_port->server_addr.port_id;
+ kref_put(&server->ref, ipc_router_release_server);
+ }
+
+ rport_ptr = ipc_router_get_rport_ref(dst_node_id, dst_port_id);
+ if (!rport_ptr) {
+ IPC_RTR_ERR("%s: Remote port not found\n", __func__);
+ return -ENODEV;
+ }
+
+ if (src->check_send_permissions) {
+ ret = src->check_send_permissions(rport_ptr->sec_rule);
+ if (ret <= 0) {
+ kref_put(&rport_ptr->ref, ipc_router_release_rport);
+ IPC_RTR_ERR("%s: permission failure for %s\n",
+ __func__, current->comm);
+ return -EPERM;
+ }
+ }
+
+ if (dst_node_id == IPC_ROUTER_NID_LOCAL && !src->rport_info) {
+ src_rport_ptr = ipc_router_create_rport(IPC_ROUTER_NID_LOCAL,
+ src->this_port.port_id, NULL);
+ if (!src_rport_ptr) {
+ kref_put(&rport_ptr->ref, ipc_router_release_rport);
+ IPC_RTR_ERR("%s: RPort creation failed\n", __func__);
+ return -ENOMEM;
+ }
+ mutex_lock(&src->port_lock_lhc3);
+ src->rport_info = src_rport_ptr;
+ mutex_unlock(&src->port_lock_lhc3);
+ kref_put(&src_rport_ptr->ref, ipc_router_release_rport);
+ }
+
+ pkt = create_pkt(data);
+ if (!pkt) {
+ kref_put(&rport_ptr->ref, ipc_router_release_rport);
+ IPC_RTR_ERR("%s: Pkt creation failed\n", __func__);
+ return -ENOMEM;
+ }
+
+ ret = msm_ipc_router_write_pkt(src, rport_ptr, pkt, timeout);
+ kref_put(&rport_ptr->ref, ipc_router_release_rport);
+ if (ret < 0)
+ pkt->pkt_fragment_q = NULL;
+ release_pkt(pkt);
+
+ return ret;
+}
+
+int msm_ipc_router_send_msg(struct msm_ipc_port *src,
+ struct msm_ipc_addr *dest,
+ void *data, unsigned int data_len)
+{
+ struct sk_buff_head *out_skb_head;
+ int ret;
+
+ out_skb_head = msm_ipc_router_buf_to_skb(data, data_len);
+ if (!out_skb_head) {
+ IPC_RTR_ERR("%s: SKB conversion failed\n", __func__);
+ return -EFAULT;
+ }
+
+ ret = msm_ipc_router_send_to(src, out_skb_head, dest, 0);
+ if (ret < 0) {
+ if (ret != -EAGAIN)
+ IPC_RTR_ERR(
+ "%s: msm_ipc_router_send_to failed - ret: %d\n",
+ __func__, ret);
+ msm_ipc_router_free_skb(out_skb_head);
+ return ret;
+ }
+ return 0;
+}
+
+/**
+ * msm_ipc_router_send_resume_tx() - Send Resume_Tx message
+ * @data: Pointer to received data packet that has confirm_rx bit set
+ *
+ * @return: On success, number of bytes transferred is returned, else
+ * standard linux error code is returned.
+ *
+ * This function sends the Resume_Tx event to the remote node that
+ * sent the data with confirm_rx field set. In case of a multi-hop
+ * scenario also, this function makes sure that the destination node_id
+ * to which the resume_tx event should reach is right.
+ */
+static int msm_ipc_router_send_resume_tx(void *data)
+{
+ union rr_control_msg msg;
+ struct rr_header_v1 *hdr = (struct rr_header_v1 *)data;
+ struct msm_ipc_routing_table_entry *rt_entry;
+ int ret;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.cmd = IPC_ROUTER_CTRL_CMD_RESUME_TX;
+ msg.cli.node_id = hdr->dst_node_id;
+ msg.cli.port_id = hdr->dst_port_id;
+ rt_entry = ipc_router_get_rtentry_ref(hdr->src_node_id);
+ if (!rt_entry) {
+ IPC_RTR_ERR("%s: %d Node is not present",
+ __func__, hdr->src_node_id);
+ return -ENODEV;
+ }
+ ret = ipc_router_get_xprt_info_ref(rt_entry->xprt_info);
+ if (ret < 0) {
+ IPC_RTR_ERR("%s: Abort invalid xprt\n", __func__);
+ kref_put(&rt_entry->ref, ipc_router_release_rtentry);
+ return ret;
+ }
+ ret = ipc_router_send_ctl_msg(rt_entry->xprt_info, &msg,
+ hdr->src_node_id);
+ ipc_router_put_xprt_info_ref(rt_entry->xprt_info);
+ kref_put(&rt_entry->ref, ipc_router_release_rtentry);
+ if (ret < 0)
+ IPC_RTR_ERR(
+ "%s: Send Resume_Tx Failed SRC_NODE: %d SRC_PORT: %d DEST_NODE: %d",
+ __func__, hdr->dst_node_id, hdr->dst_port_id,
+ hdr->src_node_id);
+
+ return ret;
+}
+
+int msm_ipc_router_read(struct msm_ipc_port *port_ptr,
+ struct rr_packet **read_pkt,
+ size_t buf_len)
+{
+ struct rr_packet *pkt;
+
+ if (!port_ptr || !read_pkt)
+ return -EINVAL;
+
+ mutex_lock(&port_ptr->port_rx_q_lock_lhc3);
+ if (list_empty(&port_ptr->port_rx_q)) {
+ mutex_unlock(&port_ptr->port_rx_q_lock_lhc3);
+ return -EAGAIN;
+ }
+
+ pkt = list_first_entry(&port_ptr->port_rx_q, struct rr_packet, list);
+ if ((buf_len) && (pkt->hdr.size > buf_len)) {
+ mutex_unlock(&port_ptr->port_rx_q_lock_lhc3);
+ return -ETOOSMALL;
+ }
+ list_del(&pkt->list);
+ if (list_empty(&port_ptr->port_rx_q))
+ __pm_relax(port_ptr->port_rx_ws);
+ *read_pkt = pkt;
+ mutex_unlock(&port_ptr->port_rx_q_lock_lhc3);
+ if (pkt->hdr.control_flag & CONTROL_FLAG_CONFIRM_RX)
+ msm_ipc_router_send_resume_tx(&pkt->hdr);
+
+ return pkt->length;
+}
+
+/**
+ * msm_ipc_router_rx_data_wait() - Wait for new message destined to a local port.
+ * @port_ptr: Pointer to the local port
+ * @timeout: < 0 timeout indicates infinite wait till a message arrives.
+ * > 0 timeout indicates the wait time.
+ * 0 indicates that we do not wait.
+ * @return: 0 if there are pending messages to read,
+ * standard Linux error code otherwise.
+ *
+ * Checks for the availability of messages that are destined to a local port.
+ * If no messages are present then waits as per @timeout.
+ */
+int msm_ipc_router_rx_data_wait(struct msm_ipc_port *port_ptr, long timeout)
+{
+ int ret = 0;
+
+ mutex_lock(&port_ptr->port_rx_q_lock_lhc3);
+ while (list_empty(&port_ptr->port_rx_q)) {
+ mutex_unlock(&port_ptr->port_rx_q_lock_lhc3);
+ if (timeout < 0) {
+ ret = wait_event_interruptible(
+ port_ptr->port_rx_wait_q,
+ !list_empty(&port_ptr->port_rx_q));
+ if (ret)
+ return ret;
+ } else if (timeout > 0) {
+ timeout = wait_event_interruptible_timeout(
+ port_ptr->port_rx_wait_q,
+ !list_empty(&port_ptr->port_rx_q),
+ timeout);
+ if (timeout < 0)
+ return -EFAULT;
+ }
+ if (timeout == 0)
+ return -ENOMSG;
+ mutex_lock(&port_ptr->port_rx_q_lock_lhc3);
+ }
+ mutex_unlock(&port_ptr->port_rx_q_lock_lhc3);
+
+ return ret;
+}
+
+/**
+ * msm_ipc_router_recv_from() - Recieve messages destined to a local port.
+ * @port_ptr: Pointer to the local port
+ * @pkt : Pointer to the router-to-router packet
+ * @src: Pointer to local port address
+ * @timeout: < 0 timeout indicates infinite wait till a message arrives.
+ * > 0 timeout indicates the wait time.
+ * 0 indicates that we do not wait.
+ * @return: = Number of bytes read(On successful read operation).
+ * = -ENOMSG (If there are no pending messages and timeout is 0).
+ * = -EINVAL (If either of the arguments, port_ptr or data is invalid)
+ * = -EFAULT (If there are no pending messages when timeout is > 0
+ * and the wait_event_interruptible_timeout has returned value > 0)
+ * = -ERESTARTSYS (If there are no pending messages when timeout
+ * is < 0 and wait_event_interruptible was interrupted by a signal)
+ *
+ * This function reads the messages that are destined for a local port. It
+ * is used by modules that exist with-in the kernel and use IPC Router for
+ * transport. The function checks if there are any messages that are already
+ * received. If yes, it reads them, else it waits as per the timeout value.
+ * On a successful read, the return value of the function indicates the number
+ * of bytes that are read.
+ */
+int msm_ipc_router_recv_from(struct msm_ipc_port *port_ptr,
+ struct rr_packet **pkt,
+ struct msm_ipc_addr *src,
+ long timeout)
+{
+ int ret, data_len, align_size;
+ struct sk_buff *temp_skb;
+ struct rr_header_v1 *hdr = NULL;
+
+ if (!port_ptr || !pkt) {
+ IPC_RTR_ERR("%s: Invalid pointers being passed\n", __func__);
+ return -EINVAL;
+ }
+
+ *pkt = NULL;
+
+ ret = msm_ipc_router_rx_data_wait(port_ptr, timeout);
+ if (ret)
+ return ret;
+
+ ret = msm_ipc_router_read(port_ptr, pkt, 0);
+ if (ret <= 0 || !(*pkt))
+ return ret;
+
+ hdr = &((*pkt)->hdr);
+ if (src) {
+ src->addrtype = MSM_IPC_ADDR_ID;
+ src->addr.port_addr.node_id = hdr->src_node_id;
+ src->addr.port_addr.port_id = hdr->src_port_id;
+ }
+
+ data_len = hdr->size;
+ align_size = ALIGN_SIZE(data_len);
+ if (align_size) {
+ temp_skb = skb_peek_tail((*pkt)->pkt_fragment_q);
+ skb_trim(temp_skb, (temp_skb->len - align_size));
+ }
+ return data_len;
+}
+
+int msm_ipc_router_read_msg(struct msm_ipc_port *port_ptr,
+ struct msm_ipc_addr *src,
+ unsigned char **data,
+ unsigned int *len)
+{
+ struct rr_packet *pkt;
+ int ret;
+
+ ret = msm_ipc_router_recv_from(port_ptr, &pkt, src, 0);
+ if (ret < 0) {
+ if (ret != -ENOMSG)
+ IPC_RTR_ERR(
+ "%s: msm_ipc_router_recv_from failed - ret: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ *data = msm_ipc_router_skb_to_buf(pkt->pkt_fragment_q, ret);
+ if (!(*data)) {
+ IPC_RTR_ERR("%s: Buf conversion failed\n", __func__);
+ release_pkt(pkt);
+ return -ENOMEM;
+ }
+
+ *len = ret;
+ release_pkt(pkt);
+ return 0;
+}
+
+/**
+ * msm_ipc_router_create_port() - Create a IPC Router port/endpoint
+ * @notify: Callback function to notify any event on the port.
+ * @event: Event ID to be handled.
+ * @oob_data: Any out-of-band data associated with the event.
+ * @oob_data_len: Size of the out-of-band data, if valid.
+ * @priv: Private data registered during the port creation.
+ * @priv: Private info to be passed while the notification is generated.
+ *
+ * @return: Pointer to the port on success, NULL on error.
+ */
+struct msm_ipc_port *msm_ipc_router_create_port(
+ void (*notify)(unsigned event, void *oob_data,
+ size_t oob_data_len, void *priv),
+ void *priv)
+{
+ struct msm_ipc_port *port_ptr;
+ int ret;
+
+ ret = ipc_router_core_init();
+ if (ret < 0) {
+ IPC_RTR_ERR("%s: Error %d initializing IPC Router\n",
+ __func__, ret);
+ return NULL;
+ }
+
+ port_ptr = msm_ipc_router_create_raw_port(NULL, notify, priv);
+ if (!port_ptr)
+ IPC_RTR_ERR("%s: port_ptr alloc failed\n", __func__);
+
+ return port_ptr;
+}
+
+int msm_ipc_router_close_port(struct msm_ipc_port *port_ptr)
+{
+ union rr_control_msg msg;
+ struct msm_ipc_server *server;
+ struct msm_ipc_router_remote_port *rport_ptr;
+
+ if (!port_ptr)
+ return -EINVAL;
+
+ if (port_ptr->type == SERVER_PORT || port_ptr->type == CLIENT_PORT) {
+ down_write(&local_ports_lock_lhc2);
+ list_del(&port_ptr->list);
+ up_write(&local_ports_lock_lhc2);
+
+ mutex_lock(&port_ptr->port_lock_lhc3);
+ rport_ptr = (struct msm_ipc_router_remote_port *)
+ port_ptr->rport_info;
+ port_ptr->rport_info = NULL;
+ mutex_unlock(&port_ptr->port_lock_lhc3);
+ if (rport_ptr) {
+ ipc_router_reset_conn(rport_ptr);
+ ipc_router_destroy_rport(rport_ptr);
+ }
+
+ if (port_ptr->type == SERVER_PORT) {
+ memset(&msg, 0, sizeof(msg));
+ msg.cmd = IPC_ROUTER_CTRL_CMD_REMOVE_SERVER;
+ msg.srv.service = port_ptr->port_name.service;
+ msg.srv.instance = port_ptr->port_name.instance;
+ msg.srv.node_id = port_ptr->this_port.node_id;
+ msg.srv.port_id = port_ptr->this_port.port_id;
+ broadcast_ctl_msg(&msg);
+ }
+
+ /* Server port could have been a client port earlier.
+ * Send REMOVE_CLIENT message in either case.
+ */
+ msm_ipc_router_send_remove_client(&port_ptr->mode_info,
+ port_ptr->this_port.node_id,
+ port_ptr->this_port.port_id);
+ } else if (port_ptr->type == CONTROL_PORT) {
+ down_write(&control_ports_lock_lha5);
+ list_del(&port_ptr->list);
+ up_write(&control_ports_lock_lha5);
+ } else if (port_ptr->type == IRSC_PORT) {
+ down_write(&local_ports_lock_lhc2);
+ list_del(&port_ptr->list);
+ up_write(&local_ports_lock_lhc2);
+ signal_irsc_completion();
+ }
+
+ if (port_ptr->type == SERVER_PORT) {
+ server = ipc_router_get_server_ref(
+ port_ptr->port_name.service,
+ port_ptr->port_name.instance,
+ port_ptr->this_port.node_id,
+ port_ptr->this_port.port_id);
+ if (server) {
+ kref_put(&server->ref, ipc_router_release_server);
+ ipc_router_destroy_server(server,
+ port_ptr->this_port.node_id,
+ port_ptr->this_port.port_id);
+ }
+ }
+
+ mutex_lock(&port_ptr->port_lock_lhc3);
+ rport_ptr = (struct msm_ipc_router_remote_port *)port_ptr->rport_info;
+ port_ptr->rport_info = NULL;
+ mutex_unlock(&port_ptr->port_lock_lhc3);
+ if (rport_ptr)
+ ipc_router_destroy_rport(rport_ptr);
+
+ kref_put(&port_ptr->ref, ipc_router_release_port);
+ return 0;
+}
+
+int msm_ipc_router_get_curr_pkt_size(struct msm_ipc_port *port_ptr)
+{
+ struct rr_packet *pkt;
+ int rc = 0;
+
+ if (!port_ptr)
+ return -EINVAL;
+
+ mutex_lock(&port_ptr->port_rx_q_lock_lhc3);
+ if (!list_empty(&port_ptr->port_rx_q)) {
+ pkt = list_first_entry(&port_ptr->port_rx_q,
+ struct rr_packet, list);
+ rc = pkt->hdr.size;
+ }
+ mutex_unlock(&port_ptr->port_rx_q_lock_lhc3);
+
+ return rc;
+}
+
+int msm_ipc_router_bind_control_port(struct msm_ipc_port *port_ptr)
+{
+ if (unlikely(!port_ptr || port_ptr->type != CLIENT_PORT))
+ return -EINVAL;
+
+ down_write(&local_ports_lock_lhc2);
+ list_del(&port_ptr->list);
+ up_write(&local_ports_lock_lhc2);
+ port_ptr->type = CONTROL_PORT;
+ down_write(&control_ports_lock_lha5);
+ list_add_tail(&port_ptr->list, &control_ports);
+ up_write(&control_ports_lock_lha5);
+
+ return 0;
+}
+
+int msm_ipc_router_lookup_server_name(struct msm_ipc_port_name *srv_name,
+ struct msm_ipc_server_info *srv_info,
+ int num_entries_in_array,
+ uint32_t lookup_mask)
+{
+ struct msm_ipc_server *server;
+ struct msm_ipc_server_port *server_port;
+ int key, i = 0; /*num_entries_found*/
+
+ if (!srv_name) {
+ IPC_RTR_ERR("%s: Invalid srv_name\n", __func__);
+ return -EINVAL;
+ }
+
+ if (num_entries_in_array && !srv_info) {
+ IPC_RTR_ERR("%s: srv_info NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ down_read(&server_list_lock_lha2);
+ key = (srv_name->service & (SRV_HASH_SIZE - 1));
+ list_for_each_entry(server, &server_list[key], list) {
+ if ((server->name.service != srv_name->service) ||
+ ((server->name.instance & lookup_mask) !=
+ srv_name->instance))
+ continue;
+
+ list_for_each_entry(server_port,
+ &server->server_port_list, list) {
+ if (i < num_entries_in_array) {
+ srv_info[i].node_id =
+ server_port->server_addr.node_id;
+ srv_info[i].port_id =
+ server_port->server_addr.port_id;
+ srv_info[i].service = server->name.service;
+ srv_info[i].instance = server->name.instance;
+ }
+ i++;
+ }
+ }
+ up_read(&server_list_lock_lha2);
+
+ return i;
+}
+
+int msm_ipc_router_close(void)
+{
+ struct msm_ipc_router_xprt_info *xprt_info, *tmp_xprt_info;
+
+ down_write(&xprt_info_list_lock_lha5);
+ list_for_each_entry_safe(xprt_info, tmp_xprt_info,
+ &xprt_info_list, list) {
+ xprt_info->xprt->close(xprt_info->xprt);
+ list_del(&xprt_info->list);
+ kfree(xprt_info);
+ }
+ up_write(&xprt_info_list_lock_lha5);
+ return 0;
+}
+
+/**
+ * pil_vote_load_worker() - Process vote to load the modem
+ *
+ * @work: Work item to process
+ *
+ * This function is called to process votes to load the modem that have been
+ * queued by msm_ipc_load_default_node().
+ */
+static void pil_vote_load_worker(struct work_struct *work)
+{
+ struct pil_vote_info *vote_info;
+
+ vote_info = container_of(work, struct pil_vote_info, load_work);
+ if (strlen(default_peripheral)) {
+ vote_info->pil_handle = subsystem_get(default_peripheral);
+ if (IS_ERR(vote_info->pil_handle)) {
+ IPC_RTR_ERR("%s: Failed to load %s\n",
+ __func__, default_peripheral);
+ vote_info->pil_handle = NULL;
+ }
+ } else {
+ vote_info->pil_handle = NULL;
+ }
+}
+
+/**
+ * pil_vote_unload_worker() - Process vote to unload the modem
+ *
+ * @work: Work item to process
+ *
+ * This function is called to process votes to unload the modem that have been
+ * queued by msm_ipc_unload_default_node().
+ */
+static void pil_vote_unload_worker(struct work_struct *work)
+{
+ struct pil_vote_info *vote_info;
+
+ vote_info = container_of(work, struct pil_vote_info, unload_work);
+
+ if (vote_info->pil_handle) {
+ subsystem_put(vote_info->pil_handle);
+ vote_info->pil_handle = NULL;
+ }
+ kfree(vote_info);
+}
+
+/**
+ * msm_ipc_load_default_node() - Queue a vote to load the modem.
+ *
+ * @return: PIL vote info structure on success, NULL on failure.
+ *
+ * This function places a work item that loads the modem on the
+ * single-threaded workqueue used for processing PIL votes to load
+ * or unload the modem.
+ */
+void *msm_ipc_load_default_node(void)
+{
+ struct pil_vote_info *vote_info;
+
+ vote_info = kmalloc(sizeof(*vote_info), GFP_KERNEL);
+ if (!vote_info)
+ return vote_info;
+
+ INIT_WORK(&vote_info->load_work, pil_vote_load_worker);
+ queue_work(msm_ipc_router_workqueue, &vote_info->load_work);
+
+ return vote_info;
+}
+
+/**
+ * msm_ipc_unload_default_node() - Queue a vote to unload the modem.
+ *
+ * @pil_vote: PIL vote info structure, containing the PIL handle
+ * and work structure.
+ *
+ * This function places a work item that unloads the modem on the
+ * single-threaded workqueue used for processing PIL votes to load
+ * or unload the modem.
+ */
+void msm_ipc_unload_default_node(void *pil_vote)
+{
+ struct pil_vote_info *vote_info;
+
+ if (pil_vote) {
+ vote_info = (struct pil_vote_info *)pil_vote;
+ INIT_WORK(&vote_info->unload_work, pil_vote_unload_worker);
+ queue_work(msm_ipc_router_workqueue, &vote_info->unload_work);
+ }
+}
+
+#if defined(CONFIG_DEBUG_FS)
+static void dump_routing_table(struct seq_file *s)
+{
+ int j;
+ struct msm_ipc_routing_table_entry *rt_entry;
+
+ seq_printf(s, "%-10s|%-20s|%-10s|\n",
+ "Node Id", "XPRT Name", "Next Hop");
+ seq_puts(s, "----------------------------------------------\n");
+ for (j = 0; j < RT_HASH_SIZE; j++) {
+ down_read(&routing_table_lock_lha3);
+ list_for_each_entry(rt_entry, &routing_table[j], list) {
+ down_read(&rt_entry->lock_lha4);
+ seq_printf(s, "0x%08x|", rt_entry->node_id);
+ if (rt_entry->node_id == IPC_ROUTER_NID_LOCAL)
+ seq_printf(s, "%-20s|0x%08x|\n",
+ "Loopback", rt_entry->node_id);
+ else
+ seq_printf(s, "%-20s|0x%08x|\n",
+ rt_entry->xprt_info->xprt->name,
+ rt_entry->node_id);
+ up_read(&rt_entry->lock_lha4);
+ }
+ up_read(&routing_table_lock_lha3);
+ }
+}
+
+static void dump_xprt_info(struct seq_file *s)
+{
+ struct msm_ipc_router_xprt_info *xprt_info;
+
+ seq_printf(s, "%-20s|%-10s|%-12s|%-15s|\n",
+ "XPRT Name", "Link ID",
+ "Initialized", "Remote Node Id");
+ seq_puts(s, "------------------------------------------------------------\n");
+ down_read(&xprt_info_list_lock_lha5);
+ list_for_each_entry(xprt_info, &xprt_info_list, list)
+ seq_printf(s, "%-20s|0x%08x|%-12s|0x%08x|\n",
+ xprt_info->xprt->name,
+ xprt_info->xprt->link_id,
+ (xprt_info->initialized ? "Y" : "N"),
+ xprt_info->remote_node_id);
+ up_read(&xprt_info_list_lock_lha5);
+}
+
+static void dump_servers(struct seq_file *s)
+{
+ int j;
+ struct msm_ipc_server *server;
+ struct msm_ipc_server_port *server_port;
+
+ seq_printf(s, "%-11s|%-11s|%-11s|%-11s|\n",
+ "Service", "Instance", "Node_id", "Port_id");
+ seq_puts(s, "------------------------------------------------------------\n");
+ down_read(&server_list_lock_lha2);
+ for (j = 0; j < SRV_HASH_SIZE; j++) {
+ list_for_each_entry(server, &server_list[j], list) {
+ list_for_each_entry(server_port,
+ &server->server_port_list,
+ list)
+ seq_printf(s, "0x%08x |0x%08x |0x%08x |0x%08x |\n",
+ server->name.service,
+ server->name.instance,
+ server_port->server_addr.node_id,
+ server_port->server_addr.port_id);
+ }
+ }
+ up_read(&server_list_lock_lha2);
+}
+
+static void dump_remote_ports(struct seq_file *s)
+{
+ int j, k;
+ struct msm_ipc_router_remote_port *rport_ptr;
+ struct msm_ipc_routing_table_entry *rt_entry;
+
+ seq_printf(s, "%-11s|%-11s|%-10s|\n",
+ "Node_id", "Port_id", "Quota_cnt");
+ seq_puts(s, "------------------------------------------------------------\n");
+ for (j = 0; j < RT_HASH_SIZE; j++) {
+ down_read(&routing_table_lock_lha3);
+ list_for_each_entry(rt_entry, &routing_table[j], list) {
+ down_read(&rt_entry->lock_lha4);
+ for (k = 0; k < RP_HASH_SIZE; k++) {
+ list_for_each_entry(rport_ptr,
+ &rt_entry->remote_port_list[k],
+ list)
+ seq_printf(s, "0x%08x |0x%08x |0x%08x|\n",
+ rport_ptr->node_id,
+ rport_ptr->port_id,
+ rport_ptr->tx_quota_cnt);
+ }
+ up_read(&rt_entry->lock_lha4);
+ }
+ up_read(&routing_table_lock_lha3);
+ }
+}
+
+static void dump_control_ports(struct seq_file *s)
+{
+ struct msm_ipc_port *port_ptr;
+
+ seq_printf(s, "%-11s|%-11s|\n",
+ "Node_id", "Port_id");
+ seq_puts(s, "------------------------------------------------------------\n");
+ down_read(&control_ports_lock_lha5);
+ list_for_each_entry(port_ptr, &control_ports, list)
+ seq_printf(s, "0x%08x |0x%08x |\n",
+ port_ptr->this_port.node_id,
+ port_ptr->this_port.port_id);
+ up_read(&control_ports_lock_lha5);
+}
+
+static void dump_local_ports(struct seq_file *s)
+{
+ int j;
+ struct msm_ipc_port *port_ptr;
+
+ seq_printf(s, "%-11s|%-11s|\n",
+ "Node_id", "Port_id");
+ seq_puts(s, "------------------------------------------------------------\n");
+ down_read(&local_ports_lock_lhc2);
+ for (j = 0; j < LP_HASH_SIZE; j++) {
+ list_for_each_entry(port_ptr, &local_ports[j], list) {
+ mutex_lock(&port_ptr->port_lock_lhc3);
+ seq_printf(s, "0x%08x |0x%08x |\n",
+ port_ptr->this_port.node_id,
+ port_ptr->this_port.port_id);
+ mutex_unlock(&port_ptr->port_lock_lhc3);
+ }
+ }
+ up_read(&local_ports_lock_lhc2);
+}
+
+static int debugfs_show(struct seq_file *s, void *data)
+{
+ void (*show)(struct seq_file *) = s->private;
+ show(s);
+ return 0;
+}
+
+static int debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, debugfs_show, inode->i_private);
+}
+
+static const struct file_operations debug_ops = {
+ .open = debug_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+
+static void debug_create(const char *name, struct dentry *dent,
+ void (*show)(struct seq_file *))
+{
+ debugfs_create_file(name, 0444, dent, show, &debug_ops);
+}
+
+static void debugfs_init(void)
+{
+ struct dentry *dent;
+
+ dent = debugfs_create_dir("msm_ipc_router", 0);
+ if (IS_ERR(dent))
+ return;
+
+ debug_create("dump_local_ports", dent, dump_local_ports);
+ debug_create("dump_remote_ports", dent, dump_remote_ports);
+ debug_create("dump_control_ports", dent, dump_control_ports);
+ debug_create("dump_servers", dent, dump_servers);
+ debug_create("dump_xprt_info", dent, dump_xprt_info);
+ debug_create("dump_routing_table", dent, dump_routing_table);
+}
+
+#else
+static void debugfs_init(void) {}
+#endif
+
+/**
+ * ipc_router_create_log_ctx() - Create and add the log context based on transport
+ * @name: subsystem name
+ *
+ * Return: a reference to the log context created
+ *
+ * This function creates ipc log context based on transport and adds it to a
+ * global list. This log context can be reused from the list in case of a
+ * subsystem restart.
+ */
+static void *ipc_router_create_log_ctx(char *name)
+{
+ struct ipc_rtr_log_ctx *sub_log_ctx;
+
+ sub_log_ctx = kmalloc(sizeof(struct ipc_rtr_log_ctx),
+ GFP_KERNEL);
+ if (!sub_log_ctx)
+ return NULL;
+ sub_log_ctx->log_ctx = ipc_log_context_create(
+ IPC_RTR_INFO_PAGES, name, 0);
+ if (!sub_log_ctx->log_ctx) {
+ IPC_RTR_ERR("%s: Unable to create IPC logging for [%s]",
+ __func__, name);
+ kfree(sub_log_ctx);
+ return NULL;
+ }
+ strlcpy(sub_log_ctx->log_ctx_name, name,
+ LOG_CTX_NAME_LEN);
+ INIT_LIST_HEAD(&sub_log_ctx->list);
+ list_add_tail(&sub_log_ctx->list, &log_ctx_list);
+ return sub_log_ctx->log_ctx;
+}
+
+static void ipc_router_log_ctx_init(void)
+{
+ mutex_lock(&log_ctx_list_lock_lha0);
+ local_log_ctx = ipc_router_create_log_ctx("local_IPCRTR");
+ mutex_unlock(&log_ctx_list_lock_lha0);
+}
+
+/**
+ * ipc_router_get_log_ctx() - Retrieves the ipc log context based on subsystem name.
+ * @sub_name: subsystem name
+ *
+ * Return: a reference to the log context
+ */
+static void *ipc_router_get_log_ctx(char *sub_name)
+{
+ void *log_ctx = NULL;
+ struct ipc_rtr_log_ctx *temp_log_ctx;
+
+ mutex_lock(&log_ctx_list_lock_lha0);
+ list_for_each_entry(temp_log_ctx, &log_ctx_list, list)
+ if (!strcmp(temp_log_ctx->log_ctx_name, sub_name)) {
+ log_ctx = temp_log_ctx->log_ctx;
+ mutex_unlock(&log_ctx_list_lock_lha0);
+ return log_ctx;
+ }
+ log_ctx = ipc_router_create_log_ctx(sub_name);
+ mutex_unlock(&log_ctx_list_lock_lha0);
+
+ return log_ctx;
+}
+
+/**
+ * ipc_router_get_xprt_info_ref() - Get a reference to the xprt_info structure
+ * @xprt_info: pointer to the xprt_info.
+ *
+ * @return: Zero on success, -ENODEV on failure.
+ *
+ * This function is used to obtain a reference to the xprt_info structure
+ * corresponding to the requested @xprt_info pointer.
+ */
+static int ipc_router_get_xprt_info_ref(
+ struct msm_ipc_router_xprt_info *xprt_info)
+{
+ int ret = -ENODEV;
+ struct msm_ipc_router_xprt_info *tmp_xprt_info;
+
+ if (!xprt_info)
+ return 0;
+
+ down_read(&xprt_info_list_lock_lha5);
+ list_for_each_entry(tmp_xprt_info, &xprt_info_list, list) {
+ if (tmp_xprt_info == xprt_info) {
+ kref_get(&xprt_info->ref);
+ ret = 0;
+ break;
+ }
+ }
+ up_read(&xprt_info_list_lock_lha5);
+
+ return ret;
+}
+
+/**
+ * ipc_router_put_xprt_info_ref() - Put a reference to the xprt_info structure
+ * @xprt_info: pointer to the xprt_info.
+ *
+ * This function is used to put the reference to the xprt_info structure
+ * corresponding to the requested @xprt_info pointer.
+ */
+static void ipc_router_put_xprt_info_ref(
+ struct msm_ipc_router_xprt_info *xprt_info)
+{
+ if (xprt_info)
+ kref_put(&xprt_info->ref, ipc_router_release_xprt_info_ref);
+}
+
+/**
+ * ipc_router_release_xprt_info_ref() - release the xprt_info last reference
+ * @ref: Reference to the xprt_info structure.
+ *
+ * This function is called when all references to the xprt_info structure
+ * are released.
+ */
+static void ipc_router_release_xprt_info_ref(struct kref *ref)
+{
+ struct msm_ipc_router_xprt_info *xprt_info =
+ container_of(ref, struct msm_ipc_router_xprt_info, ref);
+
+ complete_all(&xprt_info->ref_complete);
+}
+
+static int msm_ipc_router_add_xprt(struct msm_ipc_router_xprt *xprt)
+{
+ struct msm_ipc_router_xprt_info *xprt_info;
+
+ xprt_info = kmalloc(sizeof(struct msm_ipc_router_xprt_info),
+ GFP_KERNEL);
+ if (!xprt_info)
+ return -ENOMEM;
+
+ xprt_info->xprt = xprt;
+ xprt_info->initialized = 0;
+ xprt_info->remote_node_id = -1;
+ INIT_LIST_HEAD(&xprt_info->pkt_list);
+ mutex_init(&xprt_info->rx_lock_lhb2);
+ mutex_init(&xprt_info->tx_lock_lhb2);
+ wakeup_source_init(&xprt_info->ws, xprt->name);
+ xprt_info->need_len = 0;
+ xprt_info->abort_data_read = 0;
+ INIT_WORK(&xprt_info->read_data, do_read_data);
+ INIT_LIST_HEAD(&xprt_info->list);
+ kref_init(&xprt_info->ref);
+ init_completion(&xprt_info->ref_complete);
+
+ xprt_info->workqueue = create_singlethread_workqueue(xprt->name);
+ if (!xprt_info->workqueue) {
+ kfree(xprt_info);
+ return -ENOMEM;
+ }
+
+ xprt_info->log_ctx = ipc_router_get_log_ctx(xprt->name);
+
+ if (!strcmp(xprt->name, "msm_ipc_router_loopback_xprt")) {
+ xprt_info->remote_node_id = IPC_ROUTER_NID_LOCAL;
+ xprt_info->initialized = 1;
+ }
+
+ IPC_RTR_INFO(xprt_info->log_ctx, "Adding xprt: [%s]\n",
+ xprt->name);
+ down_write(&xprt_info_list_lock_lha5);
+ list_add_tail(&xprt_info->list, &xprt_info_list);
+ up_write(&xprt_info_list_lock_lha5);
+
+ down_write(&routing_table_lock_lha3);
+ if (!routing_table_inited) {
+ init_routing_table();
+ routing_table_inited = 1;
+ }
+ up_write(&routing_table_lock_lha3);
+
+ xprt->priv = xprt_info;
+
+ return 0;
+}
+
+static void msm_ipc_router_remove_xprt(struct msm_ipc_router_xprt *xprt)
+{
+ struct msm_ipc_router_xprt_info *xprt_info;
+ struct rr_packet *temp_pkt, *pkt;
+
+ if (xprt && xprt->priv) {
+ xprt_info = xprt->priv;
+
+ IPC_RTR_INFO(xprt_info->log_ctx, "Removing xprt: [%s]\n",
+ xprt->name);
+ mutex_lock(&xprt_info->rx_lock_lhb2);
+ xprt_info->abort_data_read = 1;
+ mutex_unlock(&xprt_info->rx_lock_lhb2);
+ flush_workqueue(xprt_info->workqueue);
+ destroy_workqueue(xprt_info->workqueue);
+ mutex_lock(&xprt_info->rx_lock_lhb2);
+ list_for_each_entry_safe(pkt, temp_pkt,
+ &xprt_info->pkt_list, list) {
+ list_del(&pkt->list);
+ release_pkt(pkt);
+ }
+ mutex_unlock(&xprt_info->rx_lock_lhb2);
+
+ down_write(&xprt_info_list_lock_lha5);
+ list_del(&xprt_info->list);
+ up_write(&xprt_info_list_lock_lha5);
+
+ msm_ipc_cleanup_routing_table(xprt_info);
+
+ wakeup_source_trash(&xprt_info->ws);
+
+ ipc_router_put_xprt_info_ref(xprt_info);
+ wait_for_completion(&xprt_info->ref_complete);
+
+ xprt->priv = 0;
+ kfree(xprt_info);
+ }
+}
+
+
+struct msm_ipc_router_xprt_work {
+ struct msm_ipc_router_xprt *xprt;
+ struct work_struct work;
+};
+
+static void xprt_open_worker(struct work_struct *work)
+{
+ struct msm_ipc_router_xprt_work *xprt_work =
+ container_of(work, struct msm_ipc_router_xprt_work, work);
+
+ msm_ipc_router_add_xprt(xprt_work->xprt);
+ kfree(xprt_work);
+}
+
+static void xprt_close_worker(struct work_struct *work)
+{
+ struct msm_ipc_router_xprt_work *xprt_work =
+ container_of(work, struct msm_ipc_router_xprt_work, work);
+
+ msm_ipc_router_remove_xprt(xprt_work->xprt);
+ xprt_work->xprt->sft_close_done(xprt_work->xprt);
+ kfree(xprt_work);
+}
+
+void msm_ipc_router_xprt_notify(struct msm_ipc_router_xprt *xprt,
+ unsigned event,
+ void *data)
+{
+ struct msm_ipc_router_xprt_info *xprt_info = xprt->priv;
+ struct msm_ipc_router_xprt_work *xprt_work;
+ struct rr_packet *pkt;
+ int ret;
+
+ ret = ipc_router_core_init();
+ if (ret < 0) {
+ IPC_RTR_ERR("%s: Error %d initializing IPC Router\n",
+ __func__, ret);
+ return;
+ }
+
+ switch (event) {
+ case IPC_ROUTER_XPRT_EVENT_OPEN:
+ xprt_work = kmalloc(sizeof(struct msm_ipc_router_xprt_work),
+ GFP_ATOMIC);
+ if (xprt_work) {
+ xprt_work->xprt = xprt;
+ INIT_WORK(&xprt_work->work, xprt_open_worker);
+ queue_work(msm_ipc_router_workqueue, &xprt_work->work);
+ } else {
+ IPC_RTR_ERR(
+ "%s: malloc failure - Couldn't notify OPEN event",
+ __func__);
+ }
+ break;
+
+ case IPC_ROUTER_XPRT_EVENT_CLOSE:
+ xprt_work = kmalloc(sizeof(struct msm_ipc_router_xprt_work),
+ GFP_ATOMIC);
+ if (xprt_work) {
+ xprt_work->xprt = xprt;
+ INIT_WORK(&xprt_work->work, xprt_close_worker);
+ queue_work(msm_ipc_router_workqueue, &xprt_work->work);
+ } else {
+ IPC_RTR_ERR(
+ "%s: malloc failure - Couldn't notify CLOSE event",
+ __func__);
+ }
+ break;
+ }
+
+ if (!data)
+ return;
+
+ while (!xprt_info) {
+ msleep(100);
+ xprt_info = xprt->priv;
+ }
+
+ pkt = clone_pkt((struct rr_packet *)data);
+ if (!pkt)
+ return;
+
+ mutex_lock(&xprt_info->rx_lock_lhb2);
+ list_add_tail(&pkt->list, &xprt_info->pkt_list);
+ __pm_stay_awake(&xprt_info->ws);
+ mutex_unlock(&xprt_info->rx_lock_lhb2);
+ queue_work(xprt_info->workqueue, &xprt_info->read_data);
+}
+
+/**
+ * parse_devicetree() - parse device tree binding
+ *
+ * @node: pointer to device tree node
+ *
+ * @return: 0 on success, -ENODEV on failure.
+ */
+static int parse_devicetree(struct device_node *node)
+{
+ char *key;
+ const char *peripheral = NULL;
+
+ key = "qcom,default-peripheral";
+ peripheral = of_get_property(node, key, NULL);
+ if (peripheral)
+ strlcpy(default_peripheral, peripheral, PIL_SUBSYSTEM_NAME_LEN);
+
+ return 0;
+}
+
+/**
+ * ipc_router_probe() - Probe the IPC Router
+ *
+ * @pdev: Platform device corresponding to IPC Router.
+ *
+ * @return: 0 on success, standard Linux error codes on error.
+ *
+ * This function is called when the underlying device tree driver registers
+ * a platform device, mapped to IPC Router.
+ */
+static int ipc_router_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+
+ if (pdev && pdev->dev.of_node) {
+ ret = parse_devicetree(pdev->dev.of_node);
+ if (ret)
+ IPC_RTR_ERR("%s: Failed to parse device tree\n",
+ __func__);
+ }
+ return ret;
+}
+
+static struct of_device_id ipc_router_match_table[] = {
+ { .compatible = "qcom,ipc_router" },
+ {},
+};
+
+static struct platform_driver ipc_router_driver = {
+ .probe = ipc_router_probe,
+ .driver = {
+ .name = MODULE_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = ipc_router_match_table,
+ },
+};
+
+/**
+ * ipc_router_core_init() - Initialize all IPC Router core data structures
+ *
+ * Return: 0 on Success or Standard error code otherwise.
+ *
+ * This function only initializes all the core data structures to the IPC Router
+ * module. The remaining initialization is done inside msm_ipc_router_init().
+ */
+static int ipc_router_core_init(void)
+{
+ int i;
+ int ret;
+ struct msm_ipc_routing_table_entry *rt_entry;
+
+ mutex_lock(&ipc_router_init_lock);
+ if (likely(is_ipc_router_inited)) {
+ mutex_unlock(&ipc_router_init_lock);
+ return 0;
+ }
+
+ debugfs_init();
+
+ for (i = 0; i < SRV_HASH_SIZE; i++)
+ INIT_LIST_HEAD(&server_list[i]);
+
+ for (i = 0; i < LP_HASH_SIZE; i++)
+ INIT_LIST_HEAD(&local_ports[i]);
+
+ down_write(&routing_table_lock_lha3);
+ if (!routing_table_inited) {
+ init_routing_table();
+ routing_table_inited = 1;
+ }
+ up_write(&routing_table_lock_lha3);
+ rt_entry = create_routing_table_entry(IPC_ROUTER_NID_LOCAL, NULL);
+ kref_put(&rt_entry->ref, ipc_router_release_rtentry);
+
+ msm_ipc_router_workqueue =
+ create_singlethread_workqueue("msm_ipc_router");
+ if (!msm_ipc_router_workqueue) {
+ mutex_unlock(&ipc_router_init_lock);
+ return -ENOMEM;
+ }
+
+ ret = msm_ipc_router_security_init();
+ if (ret < 0)
+ IPC_RTR_ERR("%s: Security Init failed\n", __func__);
+ else
+ is_ipc_router_inited = true;
+ mutex_unlock(&ipc_router_init_lock);
+
+ return ret;
+}
+
+static int msm_ipc_router_init(void)
+{
+ int ret;
+
+ ret = ipc_router_core_init();
+ if (ret < 0)
+ return ret;
+
+ ret = platform_driver_register(&ipc_router_driver);
+ if (ret)
+ IPC_RTR_ERR(
+ "%s: ipc_router_driver register failed %d\n", __func__, ret);
+
+ ret = msm_ipc_router_init_sockets();
+ if (ret < 0)
+ IPC_RTR_ERR("%s: Init sockets failed\n", __func__);
+
+ ipc_router_log_ctx_init();
+ return ret;
+}
+
+module_init(msm_ipc_router_init);
+MODULE_DESCRIPTION("MSM IPC Router");
+MODULE_LICENSE("GPL v2");
diff --git a/net/ipc_router/ipc_router_private.h b/net/ipc_router/ipc_router_private.h
new file mode 100644
index 000000000000..ce6c84070402
--- /dev/null
+++ b/net/ipc_router/ipc_router_private.h
@@ -0,0 +1,150 @@
+/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPC_ROUTER_PRIVATE_H
+#define _IPC_ROUTER_PRIVATE_H
+
+#include <linux/types.h>
+#include <linux/socket.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/list.h>
+#include <linux/platform_device.h>
+#include <linux/msm_ipc.h>
+#include <linux/ipc_router.h>
+#include <linux/ipc_router_xprt.h>
+
+#include <net/sock.h>
+
+/* definitions for the R2R wire protcol */
+#define IPC_ROUTER_V1 1
+/*
+ * Ambiguous definition but will enable multiplexing IPC_ROUTER_V2 packets
+ * with an existing alternate transport in user-space, if needed.
+ */
+#define IPC_ROUTER_V2 3
+#define IPC_ROUTER_VER_BITMASK ((BIT(IPC_ROUTER_V1)) | (BIT(IPC_ROUTER_V2)))
+#define IPC_ROUTER_HELLO_MAGIC 0xE110
+#define IPC_ROUTER_CHECKSUM_MASK 0xFFFF
+
+#define IPC_ROUTER_ADDRESS 0x0000FFFF
+
+#define IPC_ROUTER_NID_LOCAL 1
+#define MAX_IPC_PKT_SIZE 66000
+
+#define IPC_ROUTER_LOW_RX_QUOTA 5
+#define IPC_ROUTER_HIGH_RX_QUOTA 10
+
+#define IPC_ROUTER_INFINITY -1
+#define DEFAULT_RCV_TIMEO IPC_ROUTER_INFINITY
+#define DEFAULT_SND_TIMEO IPC_ROUTER_INFINITY
+
+#define ALIGN_SIZE(x) ((4 - ((x) & 3)) & 3)
+
+#define ALL_SERVICE 0xFFFFFFFF
+#define ALL_INSTANCE 0xFFFFFFFF
+
+#define CONTROL_FLAG_CONFIRM_RX 0x1
+#define CONTROL_FLAG_OPT_HDR 0x2
+
+enum {
+ CLIENT_PORT,
+ SERVER_PORT,
+ CONTROL_PORT,
+ IRSC_PORT,
+};
+
+enum {
+ NULL_MODE,
+ SINGLE_LINK_MODE,
+ MULTI_LINK_MODE,
+};
+
+enum {
+ CONNECTION_RESET = -1,
+ NOT_CONNECTED,
+ CONNECTED,
+};
+
+struct msm_ipc_sock {
+ struct sock sk;
+ struct msm_ipc_port *port;
+ void *default_node_vote_info;
+};
+
+/**
+ * msm_ipc_router_create_raw_port() - Create an IPC Router port
+ * @endpoint: User-space space socket information to be cached.
+ * @notify: Function to notify incoming events on the port.
+ * @event: Event ID to be handled.
+ * @oob_data: Any out-of-band data associated with the event.
+ * @oob_data_len: Size of the out-of-band data, if valid.
+ * @priv: Private data registered during the port creation.
+ * @priv: Private Data to be passed during the event notification.
+ *
+ * @return: Valid pointer to port on success, NULL on failure.
+ *
+ * This function is used to create an IPC Router port. The port is used for
+ * communication locally or outside the subsystem.
+ */
+struct msm_ipc_port *msm_ipc_router_create_raw_port(void *endpoint,
+ void (*notify)(unsigned event, void *oob_data,
+ size_t oob_data_len, void *priv),
+ void *priv);
+int msm_ipc_router_send_to(struct msm_ipc_port *src,
+ struct sk_buff_head *data,
+ struct msm_ipc_addr *dest,
+ long timeout);
+int msm_ipc_router_read(struct msm_ipc_port *port_ptr,
+ struct rr_packet **pkt,
+ size_t buf_len);
+
+int msm_ipc_router_recv_from(struct msm_ipc_port *port_ptr,
+ struct rr_packet **pkt,
+ struct msm_ipc_addr *src_addr,
+ long timeout);
+int msm_ipc_router_register_server(struct msm_ipc_port *server_port,
+ struct msm_ipc_addr *name);
+int msm_ipc_router_unregister_server(struct msm_ipc_port *server_port);
+
+int msm_ipc_router_init_sockets(void);
+void msm_ipc_router_exit_sockets(void);
+
+void msm_ipc_sync_sec_rule(uint32_t service, uint32_t instance, void *rule);
+
+void msm_ipc_sync_default_sec_rule(void *rule);
+
+int msm_ipc_router_rx_data_wait(struct msm_ipc_port *port_ptr, long timeout);
+
+void msm_ipc_router_free_skb(struct sk_buff_head *skb_head);
+
+/**
+ * ipc_router_set_conn() - Set the connection by initializing dest address
+ * @port_ptr: Local port in which the connection has to be set.
+ * @addr: Destination address of the connection.
+ *
+ * @return: 0 on success, standard Linux error codes on failure.
+ */
+int ipc_router_set_conn(struct msm_ipc_port *port_ptr,
+ struct msm_ipc_addr *addr);
+
+void *msm_ipc_load_default_node(void);
+
+void msm_ipc_unload_default_node(void *pil);
+
+/**
+ * ipc_router_dummy_write_space() - Dummy write space available callback
+ * @sk: Socket pointer for which the callback is called.
+ */
+void ipc_router_dummy_write_space(struct sock *sk);
+
+#endif
diff --git a/net/ipc_router/ipc_router_security.c b/net/ipc_router/ipc_router_security.c
new file mode 100644
index 000000000000..539c72f8158a
--- /dev/null
+++ b/net/ipc_router/ipc_router_security.c
@@ -0,0 +1,334 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/net.h>
+#include <linux/socket.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/poll.h>
+#include <linux/fcntl.h>
+#include <linux/gfp.h>
+#include <linux/uaccess.h>
+#include <linux/kernel.h>
+#include <linux/msm_ipc.h>
+#include <linux/rwsem.h>
+#include <linux/uaccess.h>
+
+#include <net/sock.h>
+#include "ipc_router_private.h"
+#include "ipc_router_security.h"
+
+#define IRSC_COMPLETION_TIMEOUT_MS 30000
+#define SEC_RULES_HASH_SZ 32
+
+#ifndef SIZE_MAX
+#define SIZE_MAX ((size_t)-1)
+#endif
+
+struct security_rule {
+ struct list_head list;
+ uint32_t service_id;
+ uint32_t instance_id;
+ unsigned reserved;
+ int num_group_info;
+ kgid_t *group_id;
+};
+
+static DECLARE_RWSEM(security_rules_lock_lha4);
+static struct list_head security_rules[SEC_RULES_HASH_SZ];
+static DECLARE_COMPLETION(irsc_completion);
+
+/**
+ * wait_for_irsc_completion() - Wait for IPC Router Security Configuration
+ * (IRSC) to complete
+ */
+void wait_for_irsc_completion(void)
+{
+ unsigned long rem_jiffies;
+ do {
+ rem_jiffies = wait_for_completion_timeout(&irsc_completion,
+ msecs_to_jiffies(IRSC_COMPLETION_TIMEOUT_MS));
+ if (rem_jiffies)
+ return;
+ pr_err("%s: waiting for IPC Security Conf.\n", __func__);
+ } while (1);
+}
+
+/**
+ * signal_irsc_completion() - Signal the completion of IRSC
+ */
+void signal_irsc_completion(void)
+{
+ complete_all(&irsc_completion);
+}
+
+/**
+ * check_permisions() - Check whether the process has permissions to
+ * create an interface handle with IPC Router
+ *
+ * @return: true if the process has permissions, else false.
+ */
+int check_permissions(void)
+{
+ int rc = 0;
+ if (capable(CAP_NET_RAW) || capable(CAP_NET_BIND_SERVICE))
+ rc = 1;
+ return rc;
+}
+EXPORT_SYMBOL(check_permissions);
+
+/**
+ * msm_ipc_config_sec_rules() - Add a security rule to the database
+ * @arg: Pointer to the buffer containing the rule.
+ *
+ * @return: 0 if successfully added, < 0 for error.
+ *
+ * A security rule is defined using <Service_ID: Group_ID> tuple. The rule
+ * implies that a user-space process in order to send a QMI message to
+ * service Service_ID should belong to the Linux group Group_ID.
+ */
+int msm_ipc_config_sec_rules(void *arg)
+{
+ struct config_sec_rules_args sec_rules_arg;
+ struct security_rule *rule, *temp_rule;
+ int key;
+ size_t kgroup_info_sz;
+ int ret;
+ size_t group_info_sz;
+ gid_t *group_id = NULL;
+ int loop;
+
+ if (!uid_eq(current_euid(), GLOBAL_ROOT_UID))
+ return -EPERM;
+
+ ret = copy_from_user(&sec_rules_arg, (void *)arg,
+ sizeof(sec_rules_arg));
+ if (ret)
+ return -EFAULT;
+
+ if (sec_rules_arg.num_group_info <= 0)
+ return -EINVAL;
+
+ if (sec_rules_arg.num_group_info > (SIZE_MAX / sizeof(gid_t))) {
+ pr_err("%s: Integer Overflow %zu * %d\n", __func__,
+ sizeof(gid_t), sec_rules_arg.num_group_info);
+ return -EINVAL;
+ }
+ group_info_sz = sec_rules_arg.num_group_info * sizeof(gid_t);
+
+ if (sec_rules_arg.num_group_info > (SIZE_MAX / sizeof(kgid_t))) {
+ pr_err("%s: Integer Overflow %zu * %d\n", __func__,
+ sizeof(kgid_t), sec_rules_arg.num_group_info);
+ return -EINVAL;
+ }
+ kgroup_info_sz = sec_rules_arg.num_group_info * sizeof(kgid_t);
+
+ rule = kzalloc(sizeof(struct security_rule), GFP_KERNEL);
+ if (!rule) {
+ pr_err("%s: security_rule alloc failed\n", __func__);
+ return -ENOMEM;
+ }
+
+ rule->group_id = kzalloc(kgroup_info_sz, GFP_KERNEL);
+ if (!rule->group_id) {
+ pr_err("%s: kgroup_id alloc failed\n", __func__);
+ kfree(rule);
+ return -ENOMEM;
+ }
+
+ group_id = kzalloc(group_info_sz, GFP_KERNEL);
+ if (!group_id) {
+ pr_err("%s: group_id alloc failed\n", __func__);
+ kfree(rule->group_id);
+ kfree(rule);
+ return -ENOMEM;
+ }
+
+ rule->service_id = sec_rules_arg.service_id;
+ rule->instance_id = sec_rules_arg.instance_id;
+ rule->reserved = sec_rules_arg.reserved;
+ rule->num_group_info = sec_rules_arg.num_group_info;
+ ret = copy_from_user(group_id, ((void *)(arg + sizeof(sec_rules_arg))),
+ group_info_sz);
+ if (ret) {
+ kfree(group_id);
+ kfree(rule->group_id);
+ kfree(rule);
+ return -EFAULT;
+ }
+ for (loop = 0; loop < rule->num_group_info; loop++)
+ rule->group_id[loop] = make_kgid(current_user_ns(),
+ group_id[loop]);
+ kfree(group_id);
+
+ key = rule->service_id & (SEC_RULES_HASH_SZ - 1);
+ down_write(&security_rules_lock_lha4);
+ if (rule->service_id == ALL_SERVICE) {
+ temp_rule = list_first_entry(&security_rules[key],
+ struct security_rule, list);
+ list_del(&temp_rule->list);
+ kfree(temp_rule->group_id);
+ kfree(temp_rule);
+ }
+ list_add_tail(&rule->list, &security_rules[key]);
+ up_write(&security_rules_lock_lha4);
+
+ if (rule->service_id == ALL_SERVICE)
+ msm_ipc_sync_default_sec_rule((void *)rule);
+ else
+ msm_ipc_sync_sec_rule(rule->service_id, rule->instance_id,
+ (void *)rule);
+
+ return 0;
+}
+EXPORT_SYMBOL(msm_ipc_config_sec_rules);
+
+/**
+ * msm_ipc_add_default_rule() - Add default security rule
+ *
+ * @return: 0 on success, < 0 on error/
+ *
+ * This function is used to ensure the basic security, if there is no
+ * security rule defined for a service. It can be overwritten by the
+ * default security rule from user-space script.
+ */
+static int msm_ipc_add_default_rule(void)
+{
+ struct security_rule *rule;
+ int key;
+
+ rule = kzalloc(sizeof(struct security_rule), GFP_KERNEL);
+ if (!rule) {
+ pr_err("%s: security_rule alloc failed\n", __func__);
+ return -ENOMEM;
+ }
+
+ rule->group_id = kzalloc(sizeof(*(rule->group_id)), GFP_KERNEL);
+ if (!rule->group_id) {
+ pr_err("%s: group_id alloc failed\n", __func__);
+ kfree(rule);
+ return -ENOMEM;
+ }
+
+ rule->service_id = ALL_SERVICE;
+ rule->instance_id = ALL_INSTANCE;
+ rule->num_group_info = 1;
+ *(rule->group_id) = AID_NET_RAW;
+ down_write(&security_rules_lock_lha4);
+ key = (ALL_SERVICE & (SEC_RULES_HASH_SZ - 1));
+ list_add_tail(&rule->list, &security_rules[key]);
+ up_write(&security_rules_lock_lha4);
+ return 0;
+}
+
+/**
+ * msm_ipc_get_security_rule() - Get the security rule corresponding to a
+ * service
+ * @service_id: Service ID for which the rule has to be got.
+ * @instance_id: Instance ID for which the rule has to be got.
+ *
+ * @return: Returns the rule info on success, NULL on error.
+ *
+ * This function is used when the service comes up and gets registered with
+ * the IPC Router.
+ */
+void *msm_ipc_get_security_rule(uint32_t service_id, uint32_t instance_id)
+{
+ int key;
+ struct security_rule *rule;
+
+ key = (service_id & (SEC_RULES_HASH_SZ - 1));
+ down_read(&security_rules_lock_lha4);
+ /* Return the rule for a specific <service:instance>, if found. */
+ list_for_each_entry(rule, &security_rules[key], list) {
+ if ((rule->service_id == service_id) &&
+ (rule->instance_id == instance_id)) {
+ up_read(&security_rules_lock_lha4);
+ return (void *)rule;
+ }
+ }
+
+ /* Return the rule for a specific service, if found. */
+ list_for_each_entry(rule, &security_rules[key], list) {
+ if ((rule->service_id == service_id) &&
+ (rule->instance_id == ALL_INSTANCE)) {
+ up_read(&security_rules_lock_lha4);
+ return (void *)rule;
+ }
+ }
+
+ /* Return the default rule, if no rule defined for a service. */
+ key = (ALL_SERVICE & (SEC_RULES_HASH_SZ - 1));
+ list_for_each_entry(rule, &security_rules[key], list) {
+ if ((rule->service_id == ALL_SERVICE) &&
+ (rule->instance_id == ALL_INSTANCE)) {
+ up_read(&security_rules_lock_lha4);
+ return (void *)rule;
+ }
+ }
+ up_read(&security_rules_lock_lha4);
+ return NULL;
+}
+EXPORT_SYMBOL(msm_ipc_get_security_rule);
+
+/**
+ * msm_ipc_check_send_permissions() - Check if the sendng process has
+ * permissions specified as per the rule
+ * @data: Security rule to be checked.
+ *
+ * @return: true if the process has permissions, else false.
+ *
+ * This function is used to check if the current executing process has
+ * permissions to send message to the remote entity. The security rule
+ * corresponding to the remote entity is specified by "data" parameter
+ */
+int msm_ipc_check_send_permissions(void *data)
+{
+ int i;
+ struct security_rule *rule = (struct security_rule *)data;
+
+ /* Source/Sender is Root user */
+ if (uid_eq(current_euid(), GLOBAL_ROOT_UID))
+ return 1;
+
+ /* Destination has no rules defined, possibly a client. */
+ if (!rule)
+ return 1;
+
+ for (i = 0; i < rule->num_group_info; i++) {
+ if (!gid_valid(rule->group_id[i]))
+ continue;
+ if (in_egroup_p(rule->group_id[i]))
+ return 1;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(msm_ipc_check_send_permissions);
+
+/**
+ * msm_ipc_router_security_init() - Initialize the security rule database
+ *
+ * @return: 0 if successful, < 0 for error.
+ */
+int msm_ipc_router_security_init(void)
+{
+ int i;
+
+ for (i = 0; i < SEC_RULES_HASH_SZ; i++)
+ INIT_LIST_HEAD(&security_rules[i]);
+
+ msm_ipc_add_default_rule();
+ return 0;
+}
+EXPORT_SYMBOL(msm_ipc_router_security_init);
diff --git a/net/ipc_router/ipc_router_security.h b/net/ipc_router/ipc_router_security.h
new file mode 100644
index 000000000000..002ae84d0b95
--- /dev/null
+++ b/net/ipc_router/ipc_router_security.h
@@ -0,0 +1,120 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPC_ROUTER_SECURITY_H
+#define _IPC_ROUTER_SECURITY_H
+
+#include <linux/types.h>
+#include <linux/socket.h>
+#include <linux/errno.h>
+
+#ifdef CONFIG_IPC_ROUTER_SECURITY
+#include <linux/android_aid.h>
+
+/**
+ * check_permisions() - Check whether the process has permissions to
+ * create an interface handle with IPC Router
+ *
+ * @return: true if the process has permissions, else false.
+ */
+int check_permissions(void);
+
+/**
+ * msm_ipc_config_sec_rules() - Add a security rule to the database
+ * @arg: Pointer to the buffer containing the rule.
+ *
+ * @return: 0 if successfully added, < 0 for error.
+ *
+ * A security rule is defined using <Service_ID: Group_ID> tuple. The rule
+ * implies that a user-space process in order to send a QMI message to
+ * service Service_ID should belong to the Linux group Group_ID.
+ */
+int msm_ipc_config_sec_rules(void *arg);
+
+/**
+ * msm_ipc_get_security_rule() - Get the security rule corresponding to a
+ * service
+ * @service_id: Service ID for which the rule has to be got.
+ * @instance_id: Instance ID for which the rule has to be got.
+ *
+ * @return: Returns the rule info on success, NULL on error.
+ *
+ * This function is used when the service comes up and gets registered with
+ * the IPC Router.
+ */
+void *msm_ipc_get_security_rule(uint32_t service_id, uint32_t instance_id);
+
+/**
+ * msm_ipc_check_send_permissions() - Check if the sendng process has
+ * permissions specified as per the rule
+ * @data: Security rule to be checked.
+ *
+ * @return: true if the process has permissions, else false.
+ *
+ * This function is used to check if the current executing process has
+ * permissions to send message to the remote entity. The security rule
+ * corresponding to the remote entity is specified by "data" parameter
+ */
+int msm_ipc_check_send_permissions(void *data);
+
+/**
+ * msm_ipc_router_security_init() - Initialize the security rule database
+ *
+ * @return: 0 if successful, < 0 for error.
+ */
+int msm_ipc_router_security_init(void);
+
+/**
+ * wait_for_irsc_completion() - Wait for IPC Router Security Configuration
+ * (IRSC) to complete
+ */
+void wait_for_irsc_completion(void);
+
+/**
+ * signal_irsc_completion() - Signal the completion of IRSC
+ */
+void signal_irsc_completion(void);
+
+#else
+
+static inline int check_permissions(void)
+{
+ return 1;
+}
+
+static inline int msm_ipc_config_sec_rules(void *arg)
+{
+ return -ENODEV;
+}
+
+static inline void *msm_ipc_get_security_rule(uint32_t service_id,
+ uint32_t instance_id)
+{
+ return NULL;
+}
+
+static inline int msm_ipc_check_send_permissions(void *data)
+{
+ return 1;
+}
+
+static inline int msm_ipc_router_security_init(void)
+{
+ return 0;
+}
+
+static inline void wait_for_irsc_completion(void) { }
+
+static inline void signal_irsc_completion(void) { }
+
+#endif
+#endif
diff --git a/net/ipc_router/ipc_router_socket.c b/net/ipc_router/ipc_router_socket.c
new file mode 100644
index 000000000000..b15356ae26fc
--- /dev/null
+++ b/net/ipc_router/ipc_router_socket.c
@@ -0,0 +1,687 @@
+/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/net.h>
+#include <linux/socket.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/poll.h>
+#include <linux/fcntl.h>
+#include <linux/gfp.h>
+#include <linux/msm_ipc.h>
+#include <linux/sched.h>
+#include <linux/thread_info.h>
+#include <linux/slab.h>
+#include <linux/kmemleak.h>
+#include <linux/ipc_logging.h>
+#include <linux/string.h>
+#include <linux/atomic.h>
+#include <linux/ipc_router.h>
+
+#include <net/sock.h>
+
+#include "ipc_router_private.h"
+#include "ipc_router_security.h"
+
+#define msm_ipc_sk(sk) ((struct msm_ipc_sock *)(sk))
+#define msm_ipc_sk_port(sk) ((struct msm_ipc_port *)(msm_ipc_sk(sk)->port))
+
+#ifndef SIZE_MAX
+#define SIZE_MAX ((size_t)-1)
+#endif
+
+static int sockets_enabled;
+static struct proto msm_ipc_proto;
+static const struct proto_ops msm_ipc_proto_ops;
+static RAW_NOTIFIER_HEAD(ipcrtr_af_init_chain);
+static DEFINE_MUTEX(ipcrtr_af_init_lock);
+
+static struct sk_buff_head *msm_ipc_router_build_msg(struct msghdr *m,
+ size_t total_len)
+{
+ struct sk_buff_head *msg_head;
+ struct sk_buff *msg;
+ int first = 1;
+ int last = 1;
+ size_t data_size = 0;
+ size_t alloc_size, align_size;
+ void *data;
+ size_t total_copied_size = 0, copied_size;
+
+ if (iov_iter_count(&m->msg_iter) == total_len)
+ data_size = total_len;
+
+ if (!data_size)
+ return NULL;
+ align_size = ALIGN_SIZE(data_size);
+
+ msg_head = kmalloc(sizeof(struct sk_buff_head), GFP_KERNEL);
+ if (!msg_head) {
+ IPC_RTR_ERR("%s: cannot allocate skb_head\n", __func__);
+ return NULL;
+ }
+ skb_queue_head_init(msg_head);
+
+ while (total_copied_size < total_len) {
+ alloc_size = data_size;
+ if (first)
+ alloc_size += IPC_ROUTER_HDR_SIZE;
+ if (last)
+ alloc_size += align_size;
+
+ msg = alloc_skb(alloc_size, GFP_KERNEL);
+ if (!msg) {
+ if (alloc_size <= (PAGE_SIZE/2)) {
+ IPC_RTR_ERR("%s: cannot allocated skb\n",
+ __func__);
+ goto msg_build_failure;
+ }
+ data_size = data_size / 2;
+ last = 0;
+ continue;
+ }
+
+ if (first) {
+ skb_reserve(msg, IPC_ROUTER_HDR_SIZE);
+ first = 0;
+ }
+
+ data = skb_put(msg, data_size);
+ copied_size = copy_from_iter(msg->data, data_size, &m->msg_iter);
+ if (copied_size != data_size) {
+ IPC_RTR_ERR("%s: copy_from_iter failed %zu %zu %zu\n", __func__, alloc_size, data_size, copied_size);
+ kfree_skb(msg);
+ goto msg_build_failure;
+ }
+ skb_queue_tail(msg_head, msg);
+ total_copied_size += data_size;
+ data_size = total_len - total_copied_size;
+ last = 1;
+ }
+ return msg_head;
+
+msg_build_failure:
+ while (!skb_queue_empty(msg_head)) {
+ msg = skb_dequeue(msg_head);
+ kfree_skb(msg);
+ }
+ kfree(msg_head);
+ return NULL;
+}
+
+static int msm_ipc_router_extract_msg(struct msghdr *m,
+ struct rr_packet *pkt)
+{
+ struct sockaddr_msm_ipc *addr;
+ struct rr_header_v1 *hdr;
+ struct sk_buff *temp;
+ union rr_control_msg *ctl_msg;
+ int offset = 0, data_len = 0, copy_len, copied_len;
+
+ if (!m || !pkt) {
+ IPC_RTR_ERR("%s: Invalid pointers passed\n", __func__);
+ return -EINVAL;
+ }
+ addr = (struct sockaddr_msm_ipc *)m->msg_name;
+
+ hdr = &(pkt->hdr);
+ if (addr && (hdr->type == IPC_ROUTER_CTRL_CMD_RESUME_TX)) {
+ temp = skb_peek(pkt->pkt_fragment_q);
+ ctl_msg = (union rr_control_msg *)(temp->data);
+ addr->family = AF_MSM_IPC;
+ addr->address.addrtype = MSM_IPC_ADDR_ID;
+ addr->address.addr.port_addr.node_id = ctl_msg->cli.node_id;
+ addr->address.addr.port_addr.port_id = ctl_msg->cli.port_id;
+ m->msg_namelen = sizeof(struct sockaddr_msm_ipc);
+ return offset;
+ }
+ if (addr && (hdr->type == IPC_ROUTER_CTRL_CMD_DATA)) {
+ addr->family = AF_MSM_IPC;
+ addr->address.addrtype = MSM_IPC_ADDR_ID;
+ addr->address.addr.port_addr.node_id = hdr->src_node_id;
+ addr->address.addr.port_addr.port_id = hdr->src_port_id;
+ m->msg_namelen = sizeof(struct sockaddr_msm_ipc);
+ }
+
+ data_len = hdr->size;
+ skb_queue_walk(pkt->pkt_fragment_q, temp) {
+ copy_len = data_len < temp->len ? data_len : temp->len;
+ copied_len = copy_to_iter(temp->data, copy_len, &m->msg_iter);
+ if (copy_len != copied_len) {
+ IPC_RTR_ERR("%s: Copy to user failed\n", __func__);
+ return -EFAULT;
+ }
+ offset += copy_len;
+ data_len -= copy_len;
+ }
+ return offset;
+}
+
+static int msm_ipc_router_create(struct net *net,
+ struct socket *sock,
+ int protocol,
+ int kern)
+{
+ struct sock *sk;
+ struct msm_ipc_port *port_ptr;
+
+ if (unlikely(protocol != 0)) {
+ IPC_RTR_ERR("%s: Protocol not supported\n", __func__);
+ return -EPROTONOSUPPORT;
+ }
+
+ switch (sock->type) {
+ case SOCK_DGRAM:
+ break;
+ default:
+ IPC_RTR_ERR("%s: Protocol type not supported\n", __func__);
+ return -EPROTOTYPE;
+ }
+
+ sk = sk_alloc(net, AF_MSM_IPC, GFP_KERNEL, &msm_ipc_proto, kern);
+ if (!sk) {
+ IPC_RTR_ERR("%s: sk_alloc failed\n", __func__);
+ return -ENOMEM;
+ }
+
+ sock->ops = &msm_ipc_proto_ops;
+ sock_init_data(sock, sk);
+ sk->sk_data_ready = NULL;
+ sk->sk_write_space = ipc_router_dummy_write_space;
+ sk->sk_rcvtimeo = DEFAULT_RCV_TIMEO;
+ sk->sk_sndtimeo = DEFAULT_SND_TIMEO;
+
+ port_ptr = msm_ipc_router_create_raw_port(sk, NULL, NULL);
+ if (!port_ptr) {
+ IPC_RTR_ERR("%s: port_ptr alloc failed\n", __func__);
+ sock_put(sk);
+ sock->sk = NULL;
+ return -ENOMEM;
+ }
+
+ port_ptr->check_send_permissions = msm_ipc_check_send_permissions;
+ msm_ipc_sk(sk)->port = port_ptr;
+ msm_ipc_sk(sk)->default_node_vote_info = NULL;
+
+ return 0;
+}
+
+int msm_ipc_router_bind(struct socket *sock, struct sockaddr *uaddr,
+ int uaddr_len)
+{
+ struct sockaddr_msm_ipc *addr = (struct sockaddr_msm_ipc *)uaddr;
+ struct sock *sk = sock->sk;
+ struct msm_ipc_port *port_ptr;
+ int ret;
+
+ if (!sk)
+ return -EINVAL;
+
+ if (!check_permissions()) {
+ IPC_RTR_ERR("%s: %s Do not have permissions\n",
+ __func__, current->comm);
+ return -EPERM;
+ }
+
+ if (!uaddr_len) {
+ IPC_RTR_ERR("%s: Invalid address length\n", __func__);
+ return -EINVAL;
+ }
+
+ if (addr->family != AF_MSM_IPC) {
+ IPC_RTR_ERR("%s: Address family is incorrect\n", __func__);
+ return -EAFNOSUPPORT;
+ }
+
+ if (addr->address.addrtype != MSM_IPC_ADDR_NAME) {
+ IPC_RTR_ERR("%s: Address type is incorrect\n", __func__);
+ return -EINVAL;
+ }
+
+ port_ptr = msm_ipc_sk_port(sk);
+ if (!port_ptr)
+ return -ENODEV;
+
+ if (!msm_ipc_sk(sk)->default_node_vote_info)
+ msm_ipc_sk(sk)->default_node_vote_info =
+ msm_ipc_load_default_node();
+ lock_sock(sk);
+
+ ret = msm_ipc_router_register_server(port_ptr, &addr->address);
+
+ release_sock(sk);
+ return ret;
+}
+
+static int ipc_router_connect(struct socket *sock, struct sockaddr *uaddr,
+ int uaddr_len, int flags)
+{
+ struct sockaddr_msm_ipc *addr = (struct sockaddr_msm_ipc *)uaddr;
+ struct sock *sk = sock->sk;
+ struct msm_ipc_port *port_ptr;
+ int ret;
+
+ if (!sk)
+ return -EINVAL;
+
+ if (uaddr_len <= 0) {
+ IPC_RTR_ERR("%s: Invalid address length\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!addr) {
+ IPC_RTR_ERR("%s: Invalid address\n", __func__);
+ return -EINVAL;
+ }
+
+ if (addr->family != AF_MSM_IPC) {
+ IPC_RTR_ERR("%s: Address family is incorrect\n", __func__);
+ return -EAFNOSUPPORT;
+ }
+
+ port_ptr = msm_ipc_sk_port(sk);
+ if (!port_ptr)
+ return -ENODEV;
+
+ lock_sock(sk);
+ ret = ipc_router_set_conn(port_ptr, &addr->address);
+ release_sock(sk);
+ return ret;
+}
+
+static int msm_ipc_router_sendmsg(struct socket *sock,
+ struct msghdr *m, size_t total_len)
+{
+ struct sock *sk = sock->sk;
+ struct msm_ipc_port *port_ptr = msm_ipc_sk_port(sk);
+ struct sockaddr_msm_ipc *dest = (struct sockaddr_msm_ipc *)m->msg_name;
+ struct sk_buff_head *msg;
+ int ret;
+ struct msm_ipc_addr dest_addr = {0};
+ long timeout;
+
+ if (dest) {
+ if (m->msg_namelen < sizeof(*dest) ||
+ dest->family != AF_MSM_IPC)
+ return -EINVAL;
+ memcpy(&dest_addr, &dest->address, sizeof(dest_addr));
+ } else {
+ if (port_ptr->conn_status == NOT_CONNECTED) {
+ return -EDESTADDRREQ;
+ } else if (port_ptr->conn_status < CONNECTION_RESET) {
+ return -ENETRESET;
+ } else {
+ memcpy(&dest_addr.addr.port_addr, &port_ptr->dest_addr,
+ sizeof(struct msm_ipc_port_addr));
+ dest_addr.addrtype = MSM_IPC_ADDR_ID;
+ }
+ }
+
+ if (total_len > MAX_IPC_PKT_SIZE)
+ return -EINVAL;
+
+ lock_sock(sk);
+ timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
+ msg = msm_ipc_router_build_msg(m, total_len);
+ if (!msg) {
+ IPC_RTR_ERR("%s: Msg build failure\n", __func__);
+ ret = -ENOMEM;
+ goto out_sendmsg;
+ }
+ kmemleak_not_leak(msg);
+
+ if (port_ptr->type == CLIENT_PORT)
+ wait_for_irsc_completion();
+ ret = msm_ipc_router_send_to(port_ptr, msg, &dest_addr, timeout);
+ if (ret != total_len) {
+ if (ret < 0) {
+ if (ret != -EAGAIN)
+ IPC_RTR_ERR("%s: Send_to failure %d\n",
+ __func__, ret);
+ msm_ipc_router_free_skb(msg);
+ } else if (ret >= 0) {
+ ret = -EFAULT;
+ }
+ }
+
+out_sendmsg:
+ release_sock(sk);
+ return ret;
+}
+
+static int msm_ipc_router_recvmsg(struct socket *sock,
+ struct msghdr *m, size_t buf_len, int flags)
+{
+ struct sock *sk = sock->sk;
+ struct msm_ipc_port *port_ptr = msm_ipc_sk_port(sk);
+ struct rr_packet *pkt;
+ long timeout;
+ int ret;
+
+ lock_sock(sk);
+ if (!buf_len) {
+ if (flags & MSG_PEEK)
+ ret = msm_ipc_router_get_curr_pkt_size(port_ptr);
+ else
+ ret = -EINVAL;
+ release_sock(sk);
+ return ret;
+ }
+ timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
+
+ ret = msm_ipc_router_rx_data_wait(port_ptr, timeout);
+ if (ret) {
+ release_sock(sk);
+ if (ret == -ENOMSG)
+ m->msg_namelen = 0;
+ return ret;
+ }
+
+ ret = msm_ipc_router_read(port_ptr, &pkt, buf_len);
+ if (ret <= 0 || !pkt) {
+ release_sock(sk);
+ return ret;
+ }
+
+ ret = msm_ipc_router_extract_msg(m, pkt);
+ release_pkt(pkt);
+ release_sock(sk);
+ return ret;
+}
+
+static int msm_ipc_router_ioctl(struct socket *sock,
+ unsigned int cmd, unsigned long arg)
+{
+ struct sock *sk = sock->sk;
+ struct msm_ipc_port *port_ptr;
+ struct server_lookup_args server_arg;
+ struct msm_ipc_server_info *srv_info = NULL;
+ unsigned int n;
+ size_t srv_info_sz = 0;
+ int ret;
+
+ if (!sk)
+ return -EINVAL;
+
+ lock_sock(sk);
+ port_ptr = msm_ipc_sk_port(sock->sk);
+ if (!port_ptr) {
+ release_sock(sk);
+ return -EINVAL;
+ }
+
+ switch (cmd) {
+ case IPC_ROUTER_IOCTL_GET_VERSION:
+ n = IPC_ROUTER_V1;
+ ret = put_user(n, (unsigned int *)arg);
+ break;
+
+ case IPC_ROUTER_IOCTL_GET_MTU:
+ n = (MAX_IPC_PKT_SIZE - IPC_ROUTER_HDR_SIZE);
+ ret = put_user(n, (unsigned int *)arg);
+ break;
+
+ case IPC_ROUTER_IOCTL_GET_CURR_PKT_SIZE:
+ ret = msm_ipc_router_get_curr_pkt_size(port_ptr);
+ break;
+
+ case IPC_ROUTER_IOCTL_LOOKUP_SERVER:
+ if (!msm_ipc_sk(sk)->default_node_vote_info)
+ msm_ipc_sk(sk)->default_node_vote_info =
+ msm_ipc_load_default_node();
+
+ ret = copy_from_user(&server_arg, (void *)arg,
+ sizeof(server_arg));
+ if (ret) {
+ ret = -EFAULT;
+ break;
+ }
+
+ if (server_arg.num_entries_in_array < 0) {
+ ret = -EINVAL;
+ break;
+ }
+ if (server_arg.num_entries_in_array) {
+ if (server_arg.num_entries_in_array >
+ (SIZE_MAX / sizeof(*srv_info))) {
+ IPC_RTR_ERR("%s: Integer Overflow %zu * %d\n",
+ __func__, sizeof(*srv_info),
+ server_arg.num_entries_in_array);
+ ret = -EINVAL;
+ break;
+ }
+ srv_info_sz = server_arg.num_entries_in_array *
+ sizeof(*srv_info);
+ srv_info = kmalloc(srv_info_sz, GFP_KERNEL);
+ if (!srv_info) {
+ ret = -ENOMEM;
+ break;
+ }
+ }
+ ret = msm_ipc_router_lookup_server_name(&server_arg.port_name,
+ srv_info, server_arg.num_entries_in_array,
+ server_arg.lookup_mask);
+ if (ret < 0) {
+ IPC_RTR_ERR("%s: Server not found\n", __func__);
+ ret = -ENODEV;
+ kfree(srv_info);
+ break;
+ }
+ server_arg.num_entries_found = ret;
+
+ ret = copy_to_user((void *)arg, &server_arg,
+ sizeof(server_arg));
+
+ n = min(server_arg.num_entries_found,
+ server_arg.num_entries_in_array);
+
+ if (ret == 0 && n) {
+ ret = copy_to_user((void *)(arg + sizeof(server_arg)),
+ srv_info, n * sizeof(*srv_info));
+ }
+
+ if (ret)
+ ret = -EFAULT;
+ kfree(srv_info);
+ break;
+
+ case IPC_ROUTER_IOCTL_BIND_CONTROL_PORT:
+ ret = msm_ipc_router_bind_control_port(port_ptr);
+ break;
+
+ case IPC_ROUTER_IOCTL_CONFIG_SEC_RULES:
+ ret = msm_ipc_config_sec_rules((void *)arg);
+ if (ret != -EPERM)
+ port_ptr->type = IRSC_PORT;
+ break;
+
+ default:
+ ret = -EINVAL;
+ }
+ release_sock(sk);
+ return ret;
+}
+
+static unsigned int msm_ipc_router_poll(struct file *file,
+ struct socket *sock, poll_table *wait)
+{
+ struct sock *sk = sock->sk;
+ struct msm_ipc_port *port_ptr;
+ uint32_t mask = 0;
+
+ if (!sk)
+ return -EINVAL;
+
+ port_ptr = msm_ipc_sk_port(sk);
+ if (!port_ptr)
+ return -EINVAL;
+
+ poll_wait(file, &port_ptr->port_rx_wait_q, wait);
+
+ if (!list_empty(&port_ptr->port_rx_q))
+ mask |= (POLLRDNORM | POLLIN);
+
+ if (port_ptr->conn_status == CONNECTION_RESET)
+ mask |= (POLLHUP | POLLERR);
+
+ return mask;
+}
+
+static int msm_ipc_router_close(struct socket *sock)
+{
+ struct sock *sk = sock->sk;
+ struct msm_ipc_port *port_ptr;
+ int ret;
+
+ if (!sk)
+ return -EINVAL;
+
+ lock_sock(sk);
+ port_ptr = msm_ipc_sk_port(sk);
+ if (!port_ptr) {
+ release_sock(sk);
+ return -EINVAL;
+ }
+ ret = msm_ipc_router_close_port(port_ptr);
+ msm_ipc_unload_default_node(msm_ipc_sk(sk)->default_node_vote_info);
+ release_sock(sk);
+ sock_put(sk);
+ sock->sk = NULL;
+
+ return ret;
+}
+
+/**
+ * register_ipcrtr_af_init_notifier() - Register for ipc router socket
+ * address family initialization callback
+ * @nb: Notifier block which will be notified when address family is
+ * initialized.
+ *
+ * Return: 0 on success, standard error code otherwise.
+ */
+int register_ipcrtr_af_init_notifier(struct notifier_block *nb)
+{
+ int ret;
+
+ if (!nb)
+ return -EINVAL;
+ mutex_lock(&ipcrtr_af_init_lock);
+ if (sockets_enabled)
+ nb->notifier_call(nb, IPCRTR_AF_INIT, NULL);
+ ret = raw_notifier_chain_register(&ipcrtr_af_init_chain, nb);
+ mutex_unlock(&ipcrtr_af_init_lock);
+ return ret;
+}
+EXPORT_SYMBOL(register_ipcrtr_af_init_notifier);
+
+/**
+ * unregister_ipcrtr_af_init_notifier() - Unregister for ipc router socket
+ * address family initialization callback
+ * @nb: Notifier block which will be notified once address family is
+ * initialized.
+ *
+ * Return: 0 on success, standard error code otherwise.
+ */
+int unregister_ipcrtr_af_init_notifier(struct notifier_block *nb)
+{
+ int ret;
+
+ if (!nb)
+ return -EINVAL;
+ ret = raw_notifier_chain_unregister(&ipcrtr_af_init_chain, nb);
+ return ret;
+}
+EXPORT_SYMBOL(unregister_ipcrtr_af_init_notifier);
+
+static const struct net_proto_family msm_ipc_family_ops = {
+ .owner = THIS_MODULE,
+ .family = AF_MSM_IPC,
+ .create = msm_ipc_router_create
+};
+
+static const struct proto_ops msm_ipc_proto_ops = {
+ .family = AF_MSM_IPC,
+ .owner = THIS_MODULE,
+ .release = msm_ipc_router_close,
+ .bind = msm_ipc_router_bind,
+ .connect = ipc_router_connect,
+ .socketpair = sock_no_socketpair,
+ .accept = sock_no_accept,
+ .getname = sock_no_getname,
+ .poll = msm_ipc_router_poll,
+ .ioctl = msm_ipc_router_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = msm_ipc_router_ioctl,
+#endif
+ .listen = sock_no_listen,
+ .shutdown = sock_no_shutdown,
+ .setsockopt = sock_no_setsockopt,
+ .getsockopt = sock_no_getsockopt,
+#ifdef CONFIG_COMPAT
+ .compat_setsockopt = sock_no_setsockopt,
+ .compat_getsockopt = sock_no_getsockopt,
+#endif
+ .sendmsg = msm_ipc_router_sendmsg,
+ .recvmsg = msm_ipc_router_recvmsg,
+ .mmap = sock_no_mmap,
+ .sendpage = sock_no_sendpage,
+};
+
+static struct proto msm_ipc_proto = {
+ .name = "MSM_IPC",
+ .owner = THIS_MODULE,
+ .obj_size = sizeof(struct msm_ipc_sock),
+};
+
+int msm_ipc_router_init_sockets(void)
+{
+ int ret;
+
+ ret = proto_register(&msm_ipc_proto, 1);
+ if (ret) {
+ IPC_RTR_ERR("%s: Failed to register MSM_IPC protocol type\n",
+ __func__);
+ goto out_init_sockets;
+ }
+
+ ret = sock_register(&msm_ipc_family_ops);
+ if (ret) {
+ IPC_RTR_ERR("%s: Failed to register MSM_IPC socket type\n",
+ __func__);
+ proto_unregister(&msm_ipc_proto);
+ goto out_init_sockets;
+ }
+
+ mutex_lock(&ipcrtr_af_init_lock);
+ sockets_enabled = 1;
+ raw_notifier_call_chain(&ipcrtr_af_init_chain,
+ IPCRTR_AF_INIT, NULL);
+ mutex_unlock(&ipcrtr_af_init_lock);
+out_init_sockets:
+ return ret;
+}
+
+void msm_ipc_router_exit_sockets(void)
+{
+ if (!sockets_enabled)
+ return;
+
+ sock_unregister(msm_ipc_family_ops.family);
+ proto_unregister(&msm_ipc_proto);
+ mutex_lock(&ipcrtr_af_init_lock);
+ sockets_enabled = 0;
+ raw_notifier_call_chain(&ipcrtr_af_init_chain,
+ IPCRTR_AF_DEINIT, NULL);
+ mutex_unlock(&ipcrtr_af_init_lock);
+}
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 68bf7bdf7fdb..fc65a0167fbe 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -135,6 +135,8 @@ static inline int current_has_network(void)
}
#endif
+int sysctl_reserved_port_bind __read_mostly = 1;
+
/* The inetsw table contains everything that inet_create needs to
* build a new socket.
*/
@@ -1342,6 +1344,7 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
for (p = *head; p; p = p->next) {
struct iphdr *iph2;
+ u16 flush_id;
if (!NAPI_GRO_CB(p)->same_flow)
continue;
@@ -1365,14 +1368,24 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
(iph->tos ^ iph2->tos) |
((iph->frag_off ^ iph2->frag_off) & htons(IP_DF));
- /* Save the IP ID check to be included later when we get to
- * the transport layer so only the inner most IP ID is checked.
- * This is because some GSO/TSO implementations do not
- * correctly increment the IP ID for the outer hdrs.
- */
- NAPI_GRO_CB(p)->flush_id =
- ((u16)(ntohs(iph2->id) + NAPI_GRO_CB(p)->count) ^ id);
NAPI_GRO_CB(p)->flush |= flush;
+
+ /* We must save the offset as it is possible to have multiple
+ * flows using the same protocol and address pairs so we
+ * need to wait until we can validate this is part of the
+ * same flow with a 5-tuple or better to avoid unnecessary
+ * collisions between flows. We can support one of two
+ * possible scenarios, either a fixed value with DF bit set
+ * or an incrementing value with DF either set or unset.
+ * In the case of a fixed value we will end up losing the
+ * data that the IP ID was a fixed value, however per RFC
+ * 6864 in such a case the actual value of the IP ID is
+ * meant to be ignored anyway.
+ */
+ flush_id = (u16)(id - ntohs(iph2->id));
+ if (flush_id || !(iph2->frag_off & htons(IP_DF)))
+ NAPI_GRO_CB(p)->flush_id |= flush_id ^
+ NAPI_GRO_CB(p)->count;
}
NAPI_GRO_CB(skb)->flush |= flush;
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 7c52afb98c42..2059c2010f9c 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1694,7 +1694,7 @@ struct fib_table *fib_trie_unmerge(struct fib_table *oldtb)
lt = (struct trie *)local_tb->tb_data;
while ((l = leaf_walk_rcu(&tp, key)) != NULL) {
- struct key_vector *local_l = NULL, *local_tp;
+ struct key_vector *local_l = NULL, *local_tp = NULL;
hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) {
struct fib_alias *new_fa;
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index c97a2108cd61..012aa120b090 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -179,6 +179,13 @@ have_snum:
head = &hashinfo->bhash[inet_bhashfn(net, snum,
hashinfo->bhash_size)];
spin_lock(&head->lock);
+
+ if (inet_is_local_reserved_port(net, snum) &&
+ !sysctl_reserved_port_bind) {
+ ret = 1;
+ goto fail_unlock;
+ }
+
inet_bind_bucket_for_each(tb, &head->chain)
if (net_eq(ib_net(tb), net) && tb->port == snum)
goto tb_found;
diff --git a/net/ipv4/inet_lro.c b/net/ipv4/inet_lro.c
index f17ea49b28fb..8f26145c34e2 100644
--- a/net/ipv4/inet_lro.c
+++ b/net/ipv4/inet_lro.c
@@ -145,20 +145,37 @@ static __wsum lro_tcp_data_csum(struct iphdr *iph, struct tcphdr *tcph, int len)
}
static void lro_init_desc(struct net_lro_desc *lro_desc, struct sk_buff *skb,
- struct iphdr *iph, struct tcphdr *tcph)
+ struct iphdr *iph, struct tcphdr *tcph,
+ struct net_lro_info *lro_info)
{
int nr_frags;
__be32 *ptr;
u32 tcp_data_len = TCP_PAYLOAD_LENGTH(iph, tcph);
+ u64 hw_marked = 0;
+
+ if (lro_info)
+ hw_marked = lro_info->valid_fields;
nr_frags = skb_shinfo(skb)->nr_frags;
lro_desc->parent = skb;
lro_desc->next_frag = &(skb_shinfo(skb)->frags[nr_frags]);
lro_desc->iph = iph;
lro_desc->tcph = tcph;
- lro_desc->tcp_next_seq = ntohl(tcph->seq) + tcp_data_len;
- lro_desc->tcp_ack = tcph->ack_seq;
- lro_desc->tcp_window = tcph->window;
+
+ if (hw_marked & LRO_TCP_SEQ_NUM)
+ lro_desc->tcp_next_seq = lro_info->tcp_seq_num + tcp_data_len;
+ else
+ lro_desc->tcp_next_seq = ntohl(tcph->seq) + tcp_data_len;
+
+ if (hw_marked & LRO_TCP_ACK_NUM)
+ lro_desc->tcp_ack = htonl(lro_info->tcp_ack_num);
+ else
+ lro_desc->tcp_ack = tcph->ack_seq;
+
+ if (hw_marked & LRO_TCP_WIN)
+ lro_desc->tcp_window = htons(lro_info->tcp_win);
+ else
+ lro_desc->tcp_window = tcph->window;
lro_desc->pkt_aggr_cnt = 1;
lro_desc->ip_tot_len = ntohs(iph->tot_len);
@@ -173,8 +190,11 @@ static void lro_init_desc(struct net_lro_desc *lro_desc, struct sk_buff *skb,
lro_desc->mss = tcp_data_len;
lro_desc->active = 1;
- lro_desc->data_csum = lro_tcp_data_csum(iph, tcph,
- tcp_data_len);
+ if (hw_marked & LRO_TCP_DATA_CSUM)
+ lro_desc->data_csum = lro_info->tcp_data_csum;
+ else
+ lro_desc->data_csum = lro_tcp_data_csum(iph, tcph,
+ tcp_data_len);
}
static inline void lro_clear_desc(struct net_lro_desc *lro_desc)
@@ -183,16 +203,29 @@ static inline void lro_clear_desc(struct net_lro_desc *lro_desc)
}
static void lro_add_common(struct net_lro_desc *lro_desc, struct iphdr *iph,
- struct tcphdr *tcph, int tcp_data_len)
+ struct tcphdr *tcph, int tcp_data_len,
+ struct net_lro_info *lro_info)
{
struct sk_buff *parent = lro_desc->parent;
__be32 *topt;
+ u64 hw_marked = 0;
+
+ if (lro_info)
+ hw_marked = lro_info->valid_fields;
lro_desc->pkt_aggr_cnt++;
lro_desc->ip_tot_len += tcp_data_len;
lro_desc->tcp_next_seq += tcp_data_len;
- lro_desc->tcp_window = tcph->window;
- lro_desc->tcp_ack = tcph->ack_seq;
+
+ if (hw_marked & LRO_TCP_WIN)
+ lro_desc->tcp_window = htons(lro_info->tcp_win);
+ else
+ lro_desc->tcp_window = tcph->window;
+
+ if (hw_marked & LRO_TCP_ACK_NUM)
+ lro_desc->tcp_ack = htonl(lro_info->tcp_ack_num);
+ else
+ lro_desc->tcp_ack = tcph->ack_seq;
/* don't update tcp_rcv_tsval, would not work with PAWS */
if (lro_desc->tcp_saw_tstamp) {
@@ -200,10 +233,17 @@ static void lro_add_common(struct net_lro_desc *lro_desc, struct iphdr *iph,
lro_desc->tcp_rcv_tsecr = *(topt + 2);
}
- lro_desc->data_csum = csum_block_add(lro_desc->data_csum,
- lro_tcp_data_csum(iph, tcph,
- tcp_data_len),
- parent->len);
+ if (hw_marked & LRO_TCP_DATA_CSUM)
+ lro_desc->data_csum = csum_block_add(lro_desc->data_csum,
+ lro_info->tcp_data_csum,
+ parent->len);
+ else
+ lro_desc->data_csum =
+ csum_block_add(lro_desc->data_csum,
+ lro_tcp_data_csum(iph,
+ tcph,
+ tcp_data_len),
+ parent->len);
parent->len += tcp_data_len;
parent->data_len += tcp_data_len;
@@ -212,12 +252,13 @@ static void lro_add_common(struct net_lro_desc *lro_desc, struct iphdr *iph,
}
static void lro_add_packet(struct net_lro_desc *lro_desc, struct sk_buff *skb,
- struct iphdr *iph, struct tcphdr *tcph)
+ struct iphdr *iph, struct tcphdr *tcph,
+ struct net_lro_info *lro_info)
{
struct sk_buff *parent = lro_desc->parent;
int tcp_data_len = TCP_PAYLOAD_LENGTH(iph, tcph);
- lro_add_common(lro_desc, iph, tcph, tcp_data_len);
+ lro_add_common(lro_desc, iph, tcph, tcp_data_len, lro_info);
skb_pull(skb, (skb->len - tcp_data_len));
parent->truesize += skb->truesize;
@@ -230,6 +271,29 @@ static void lro_add_packet(struct net_lro_desc *lro_desc, struct sk_buff *skb,
lro_desc->last_skb = skb;
}
+static void lro_add_frags(struct net_lro_desc *lro_desc,
+ int len, int hlen, int truesize,
+ struct skb_frag_struct *skb_frags,
+ struct iphdr *iph, struct tcphdr *tcph)
+{
+ struct sk_buff *skb = lro_desc->parent;
+ int tcp_data_len = TCP_PAYLOAD_LENGTH(iph, tcph);
+
+ lro_add_common(lro_desc, iph, tcph, tcp_data_len, NULL);
+
+ skb->truesize += truesize;
+
+ skb_frags[0].page_offset += hlen;
+ skb_frag_size_sub(&skb_frags[0], hlen);
+
+ while (tcp_data_len > 0) {
+ *lro_desc->next_frag = *skb_frags;
+ tcp_data_len -= skb_frag_size(skb_frags);
+ lro_desc->next_frag++;
+ skb_frags++;
+ skb_shinfo(skb)->nr_frags++;
+ }
+}
static int lro_check_tcp_conn(struct net_lro_desc *lro_desc,
struct iphdr *iph,
@@ -284,6 +348,8 @@ static void lro_flush(struct net_lro_mgr *lro_mgr,
if (lro_mgr->features & LRO_F_NAPI)
netif_receive_skb(lro_desc->parent);
+ else if (lro_mgr->features & LRO_F_NI)
+ netif_rx_ni(lro_desc->parent);
else
netif_rx(lro_desc->parent);
@@ -292,12 +358,13 @@ static void lro_flush(struct net_lro_mgr *lro_mgr,
}
static int __lro_proc_skb(struct net_lro_mgr *lro_mgr, struct sk_buff *skb,
- void *priv)
+ void *priv, struct net_lro_info *lro_info)
{
struct net_lro_desc *lro_desc;
struct iphdr *iph;
struct tcphdr *tcph;
u64 flags;
+ u64 hw_marked = 0;
int vlan_hdr_len = 0;
if (!lro_mgr->get_skb_header ||
@@ -308,7 +375,14 @@ static int __lro_proc_skb(struct net_lro_mgr *lro_mgr, struct sk_buff *skb,
if (!(flags & LRO_IPV4) || !(flags & LRO_TCP))
goto out;
- lro_desc = lro_get_desc(lro_mgr, lro_mgr->lro_arr, iph, tcph);
+ if (lro_info)
+ hw_marked = lro_info->valid_fields;
+
+ if (hw_marked & LRO_DESC)
+ lro_desc = lro_info->lro_desc;
+ else
+ lro_desc = lro_get_desc(lro_mgr, lro_mgr->lro_arr, iph, tcph);
+
if (!lro_desc)
goto out;
@@ -317,22 +391,38 @@ static int __lro_proc_skb(struct net_lro_mgr *lro_mgr, struct sk_buff *skb,
vlan_hdr_len = VLAN_HLEN;
if (!lro_desc->active) { /* start new lro session */
- if (lro_tcp_ip_check(iph, tcph, skb->len - vlan_hdr_len, NULL))
- goto out;
+ if (hw_marked & LRO_ELIGIBILITY_CHECKED) {
+ if (!lro_info->lro_eligible)
+ goto out;
+ } else {
+ if (lro_tcp_ip_check(iph, tcph,
+ skb->len - vlan_hdr_len, NULL))
+ goto out;
+ }
skb->ip_summed = lro_mgr->ip_summed_aggr;
- lro_init_desc(lro_desc, skb, iph, tcph);
+ lro_init_desc(lro_desc, skb, iph, tcph, lro_info);
LRO_INC_STATS(lro_mgr, aggregated);
return 0;
}
- if (lro_desc->tcp_next_seq != ntohl(tcph->seq))
- goto out2;
+ if (hw_marked & LRO_TCP_SEQ_NUM) {
+ if (lro_desc->tcp_next_seq != lro_info->tcp_seq_num)
+ goto out2;
+ } else {
+ if (lro_desc->tcp_next_seq != ntohl(tcph->seq))
+ goto out2;
+ }
- if (lro_tcp_ip_check(iph, tcph, skb->len, lro_desc))
- goto out2;
+ if (hw_marked & LRO_ELIGIBILITY_CHECKED) {
+ if (!lro_info->lro_eligible)
+ goto out2;
+ } else {
+ if (lro_tcp_ip_check(iph, tcph, skb->len, lro_desc))
+ goto out2;
+ }
- lro_add_packet(lro_desc, skb, iph, tcph);
+ lro_add_packet(lro_desc, skb, iph, tcph, lro_info);
LRO_INC_STATS(lro_mgr, aggregated);
if ((lro_desc->pkt_aggr_cnt >= lro_mgr->max_aggr) ||
@@ -348,19 +438,161 @@ out:
return 1;
}
+static struct sk_buff *lro_gen_skb(struct net_lro_mgr *lro_mgr,
+ struct skb_frag_struct *frags,
+ int len, int true_size,
+ void *mac_hdr,
+ int hlen, __wsum sum,
+ u32 ip_summed)
+{
+ struct sk_buff *skb;
+ struct skb_frag_struct *skb_frags;
+ int data_len = len;
+ int hdr_len = min(len, hlen);
+
+ skb = netdev_alloc_skb(lro_mgr->dev, hlen + lro_mgr->frag_align_pad);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, lro_mgr->frag_align_pad);
+ skb->len = len;
+ skb->data_len = len - hdr_len;
+ skb->truesize += true_size;
+ skb->tail += hdr_len;
+
+ memcpy(skb->data, mac_hdr, hdr_len);
+
+ skb_frags = skb_shinfo(skb)->frags;
+ while (data_len > 0) {
+ *skb_frags = *frags;
+ data_len -= skb_frag_size(frags);
+ skb_frags++;
+ frags++;
+ skb_shinfo(skb)->nr_frags++;
+ }
+
+ skb_shinfo(skb)->frags[0].page_offset += hdr_len;
+ skb_frag_size_sub(&skb_shinfo(skb)->frags[0], hdr_len);
+
+ skb->ip_summed = ip_summed;
+ skb->csum = sum;
+ skb->protocol = eth_type_trans(skb, lro_mgr->dev);
+ return skb;
+}
+
+static struct sk_buff *__lro_proc_segment(struct net_lro_mgr *lro_mgr,
+ struct skb_frag_struct *frags,
+ int len, int true_size,
+ void *priv, __wsum sum)
+{
+ struct net_lro_desc *lro_desc;
+ struct iphdr *iph;
+ struct tcphdr *tcph;
+ struct sk_buff *skb;
+ u64 flags;
+ void *mac_hdr;
+ int mac_hdr_len;
+ int hdr_len = LRO_MAX_PG_HLEN;
+ int vlan_hdr_len = 0;
+
+ if (!lro_mgr->get_frag_header ||
+ lro_mgr->get_frag_header(frags, (void *)&mac_hdr, (void *)&iph,
+ (void *)&tcph, &flags, priv)) {
+ mac_hdr = skb_frag_address(frags);
+ goto out1;
+ }
+
+ if (!(flags & LRO_IPV4) || !(flags & LRO_TCP))
+ goto out1;
+
+ hdr_len = (int)((void *)(tcph) + TCP_HDR_LEN(tcph) - mac_hdr);
+ mac_hdr_len = (int)((void *)(iph) - mac_hdr);
+
+ lro_desc = lro_get_desc(lro_mgr, lro_mgr->lro_arr, iph, tcph);
+ if (!lro_desc)
+ goto out1;
+
+ if (!lro_desc->active) { /* start new lro session */
+ if (lro_tcp_ip_check(iph, tcph, len - mac_hdr_len, NULL))
+ goto out1;
+
+ skb = lro_gen_skb(lro_mgr, frags, len, true_size, mac_hdr,
+ hdr_len, 0, lro_mgr->ip_summed_aggr);
+ if (!skb)
+ goto out;
+
+ if ((skb->protocol == htons(ETH_P_8021Q)) &&
+ !(lro_mgr->features & LRO_F_EXTRACT_VLAN_ID))
+ vlan_hdr_len = VLAN_HLEN;
+
+ iph = (void *)(skb->data + vlan_hdr_len);
+ tcph = (void *)((u8 *)skb->data + vlan_hdr_len
+ + IP_HDR_LEN(iph));
+
+ lro_init_desc(lro_desc, skb, iph, tcph, NULL);
+ LRO_INC_STATS(lro_mgr, aggregated);
+ return NULL;
+ }
+
+ if (lro_desc->tcp_next_seq != ntohl(tcph->seq))
+ goto out2;
+
+ if (lro_tcp_ip_check(iph, tcph, len - mac_hdr_len, lro_desc))
+ goto out2;
+
+ lro_add_frags(lro_desc, len, hdr_len, true_size, frags, iph, tcph);
+ LRO_INC_STATS(lro_mgr, aggregated);
+
+ if ((skb_shinfo(lro_desc->parent)->nr_frags >= lro_mgr->max_aggr) ||
+ lro_desc->parent->len > (0xFFFF - lro_mgr->dev->mtu))
+ lro_flush(lro_mgr, lro_desc);
+
+ return NULL;
+
+out2: /* send aggregated packets to the stack */
+ lro_flush(lro_mgr, lro_desc);
+
+out1: /* Original packet has to be posted to the stack */
+ skb = lro_gen_skb(lro_mgr, frags, len, true_size, mac_hdr,
+ hdr_len, sum, lro_mgr->ip_summed);
+out:
+ return skb;
+}
+
void lro_receive_skb(struct net_lro_mgr *lro_mgr,
struct sk_buff *skb,
void *priv)
{
- if (__lro_proc_skb(lro_mgr, skb, priv)) {
+ if (__lro_proc_skb(lro_mgr, skb, priv, NULL)) {
if (lro_mgr->features & LRO_F_NAPI)
netif_receive_skb(skb);
+ else if (lro_mgr->features & LRO_F_NI)
+ netif_rx_ni(skb);
else
netif_rx(skb);
}
}
EXPORT_SYMBOL(lro_receive_skb);
+void lro_receive_frags(struct net_lro_mgr *lro_mgr,
+ struct skb_frag_struct *frags,
+ int len, int true_size, void *priv, __wsum sum)
+{
+ struct sk_buff *skb;
+
+ skb = __lro_proc_segment(lro_mgr, frags, len, true_size, priv, sum);
+ if (!skb)
+ return;
+
+ if (lro_mgr->features & LRO_F_NAPI)
+ netif_receive_skb(skb);
+ else if (lro_mgr->features & LRO_F_NI)
+ netif_rx_ni(skb);
+ else
+ netif_rx(skb);
+}
+EXPORT_SYMBOL(lro_receive_frags);
+
void lro_flush_all(struct net_lro_mgr *lro_mgr)
{
int i;
@@ -372,3 +604,35 @@ void lro_flush_all(struct net_lro_mgr *lro_mgr)
}
}
EXPORT_SYMBOL(lro_flush_all);
+
+void lro_flush_pkt(struct net_lro_mgr *lro_mgr, struct iphdr *iph,
+ struct tcphdr *tcph)
+{
+ struct net_lro_desc *lro_desc;
+
+ lro_desc = lro_get_desc(lro_mgr, lro_mgr->lro_arr, iph, tcph);
+ if (lro_desc && lro_desc->active)
+ lro_flush(lro_mgr, lro_desc);
+}
+EXPORT_SYMBOL(lro_flush_pkt);
+
+void lro_flush_desc(struct net_lro_mgr *lro_mgr, struct net_lro_desc *lro_desc)
+{
+ if (lro_desc->active)
+ lro_flush(lro_mgr, lro_desc);
+}
+EXPORT_SYMBOL(lro_flush_desc);
+
+void lro_receive_skb_ext(struct net_lro_mgr *lro_mgr, struct sk_buff *skb,
+ void *priv, struct net_lro_info *lro_info)
+{
+ if (__lro_proc_skb(lro_mgr, skb, priv, lro_info)) {
+ if (lro_mgr->features & LRO_F_NAPI)
+ netif_receive_skb(skb);
+ else if (lro_mgr->features & LRO_F_NI)
+ netif_rx_ni(skb);
+ else
+ netif_rx(skb);
+ }
+}
+EXPORT_SYMBOL(lro_receive_skb_ext);
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 495fefe6a898..e29249bc23b8 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -657,7 +657,7 @@ int ping_common_sendmsg(int family, struct msghdr *msg, size_t len,
void *user_icmph, size_t icmph_len) {
u8 type, code;
- if (len > 0xFFFF)
+ if (len > 0xFFFF || len < icmph_len)
return -EMSGSIZE;
/* Must have at least a full ICMP header. */
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 46123369144f..8233e27679f2 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -42,6 +42,10 @@ static int tcp_syn_retries_min = 1;
static int tcp_syn_retries_max = MAX_TCP_SYNCNT;
static int ip_ping_group_range_min[] = { 0, 0 };
static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
+static int tcp_delack_seg_min = TCP_DELACK_MIN;
+static int tcp_delack_seg_max = 60;
+static int tcp_use_userconfig_min;
+static int tcp_use_userconfig_max = 1;
/* Update system visible IP port range */
static void set_local_port_range(struct net *net, int range[2])
@@ -821,6 +825,25 @@ static struct ctl_table ipv4_table[] = {
.proc_handler = proc_dointvec_minmax,
.extra1 = &one
},
+ {
+ .procname = "tcp_delack_seg",
+ .data = &sysctl_tcp_delack_seg,
+ .maxlen = sizeof(sysctl_tcp_delack_seg),
+ .mode = 0644,
+ .proc_handler = tcp_proc_delayed_ack_control,
+ .extra1 = &tcp_delack_seg_min,
+ .extra2 = &tcp_delack_seg_max,
+ },
+ {
+ .procname = "tcp_use_userconfig",
+ .data = &sysctl_tcp_use_userconfig,
+ .maxlen = sizeof(sysctl_tcp_use_userconfig),
+ .mode = 0644,
+ .proc_handler = tcp_use_userconfig_sysctl_handler,
+ .extra1 = &tcp_use_userconfig_min,
+ .extra2 = &tcp_use_userconfig_max,
+ },
+
{ }
};
@@ -903,6 +926,13 @@ static struct ctl_table ipv4_net_table[] = {
.proc_handler = proc_do_large_bitmap,
},
{
+ .procname = "reserved_port_bind",
+ .data = &sysctl_reserved_port_bind,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
+ {
.procname = "ip_no_pmtu_disc",
.data = &init_net.ipv4.sysctl_ip_no_pmtu_disc,
.maxlen = sizeof(int),
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index dc173e0d2184..676566faf649 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -302,6 +302,12 @@ EXPORT_SYMBOL(sysctl_tcp_wmem);
atomic_long_t tcp_memory_allocated; /* Current allocated memory. */
EXPORT_SYMBOL(tcp_memory_allocated);
+int sysctl_tcp_delack_seg __read_mostly = TCP_DELACK_SEG;
+EXPORT_SYMBOL(sysctl_tcp_delack_seg);
+
+int sysctl_tcp_use_userconfig __read_mostly;
+EXPORT_SYMBOL(sysctl_tcp_use_userconfig);
+
/*
* Current number of TCP sockets.
*/
@@ -1398,8 +1404,11 @@ static void tcp_cleanup_rbuf(struct sock *sk, int copied)
/* Delayed ACKs frequently hit locked sockets during bulk
* receive. */
if (icsk->icsk_ack.blocked ||
- /* Once-per-two-segments ACK was not sent by tcp_input.c */
- tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||
+ /* Once-per-sysctl_tcp_delack_seg segments
+ * ACK was not sent by tcp_input.c
+ */
+ tp->rcv_nxt - tp->rcv_wup > (icsk->icsk_ack.rcv_mss) *
+ sysctl_tcp_delack_seg ||
/*
* If this read emptied read buffer, we send ACK, if
* connection is not bidirectional, user drained
@@ -2711,6 +2720,14 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
rate64 = rate != ~0U ? rate : ~0ULL;
put_unaligned(rate64, &info->tcpi_max_pacing_rate);
+ /* Expose reference count for socket */
+ if (sk->sk_socket) {
+ struct file *filep = sk->sk_socket->file;
+
+ if (filep)
+ info->tcpi_count = file_count(filep);
+ }
+
do {
start = u64_stats_fetch_begin_irq(&tp->syncp);
put_unaligned(tp->bytes_acked, &info->tcpi_bytes_acked);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 35e97ff3054a..e7e227f6760f 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -4964,7 +4964,8 @@ static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible)
struct tcp_sock *tp = tcp_sk(sk);
/* More than one full frame received... */
- if (((tp->rcv_nxt - tp->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss &&
+ if (((tp->rcv_nxt - tp->rcv_wup) > (inet_csk(sk)->icsk_ack.rcv_mss) *
+ sysctl_tcp_delack_seg &&
/* ... and right edge of window advances far enough.
* (tcp_recvmsg() will send ACK otherwise). Or...
*/
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 25309b137c43..51a77e20f6c6 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -845,7 +845,8 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
struct sk_buff *skb;
/* First, grab a route. */
- if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
+ if (!dst && (dst = inet_csk_route_req(
+ (struct sock *)sk, &fl4, req)) == NULL)
return -1;
skb = tcp_make_synack(sk, dst, req, foc, attach_req);
@@ -1204,7 +1205,8 @@ static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
const struct request_sock *req,
bool *strict)
{
- struct dst_entry *dst = inet_csk_route_req(sk, &fl->u.ip4, req);
+ struct dst_entry *dst = inet_csk_route_req(
+ (struct sock *)sk, &fl->u.ip4, req);
if (strict) {
if (fl->u.ip4.daddr == inet_rsk(req)->ir_rmt_addr)
@@ -2191,6 +2193,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
__be32 src = inet->inet_rcv_saddr;
__u16 destp = ntohs(inet->inet_dport);
__u16 srcp = ntohs(inet->inet_sport);
+ __u8 seq_state = sk->sk_state;
int rx_queue;
int state;
@@ -2210,6 +2213,9 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
timer_expires = jiffies;
}
+ if (inet->transparent)
+ seq_state |= 0x80;
+
state = sk_state_load(sk);
if (state == TCP_LISTEN)
rx_queue = sk->sk_ack_backlog;
@@ -2221,7 +2227,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
"%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
- i, src, srcp, dest, destp, state,
+ i, src, srcp, dest, destp, seq_state,
tp->write_seq - tp->snd_una,
rx_queue,
timer_active,
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 193ba1fa8a9a..ce20968de667 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -32,6 +32,40 @@ int sysctl_tcp_retries2 __read_mostly = TCP_RETR2;
int sysctl_tcp_orphan_retries __read_mostly;
int sysctl_tcp_thin_linear_timeouts __read_mostly;
+/*Function to reset tcp_ack related sysctl on resetting master control */
+void set_tcp_default(void)
+{
+ sysctl_tcp_delack_seg = TCP_DELACK_SEG;
+}
+
+/*sysctl handler for tcp_ack realted master control */
+int tcp_proc_delayed_ack_control(struct ctl_table *table, int write,
+ void __user *buffer, size_t *length,
+ loff_t *ppos)
+{
+ int ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
+
+ /* The ret value will be 0 if the input validation is successful
+ * and the values are written to sysctl table. If not, the stack
+ * will continue to work with currently configured values
+ */
+ return ret;
+}
+
+/*sysctl handler for tcp_ack realted master control */
+int tcp_use_userconfig_sysctl_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *length,
+ loff_t *ppos)
+{
+ int ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
+
+ if (write && ret == 0) {
+ if (!sysctl_tcp_use_userconfig)
+ set_tcp_default();
+ }
+ return ret;
+}
+
static void tcp_write_err(struct sock *sk)
{
sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index ad3d1534c524..254fcc7f1825 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -257,6 +257,11 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
} else {
hslot = udp_hashslot(udptable, net, snum);
spin_lock_bh(&hslot->lock);
+
+ if (inet_is_local_reserved_port(net, snum) &&
+ !sysctl_reserved_port_bind)
+ goto fail_unlock;
+
if (hslot->count > 10) {
int exist;
unsigned int slot2 = udp_sk(sk)->udp_portaddr_hash ^ snum;
@@ -2448,14 +2453,20 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
int bucket)
{
struct inet_sock *inet = inet_sk(sp);
+ struct udp_sock *up = udp_sk(sp);
__be32 dest = inet->inet_daddr;
__be32 src = inet->inet_rcv_saddr;
__u16 destp = ntohs(inet->inet_dport);
__u16 srcp = ntohs(inet->inet_sport);
+ __u8 state = sp->sk_state;
+ if (up->encap_rcv)
+ state |= 0xF0;
+ else if (inet->transparent)
+ state |= 0x80;
seq_printf(f, "%5d: %08X:%04X %08X:%04X"
" %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d",
- bucket, src, srcp, dest, destp, sp->sk_state,
+ bucket, src, srcp, dest, destp, state,
sk_wmem_alloc_get(sp),
sk_rmem_alloc_get(sp),
0, 0L, 0,
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 7b0edb37a115..ab7ab839b057 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -20,7 +20,7 @@
static struct xfrm_policy_afinfo xfrm4_policy_afinfo;
static struct dst_entry *__xfrm4_dst_lookup(struct net *net, struct flowi4 *fl4,
- int tos, int oif,
+ int tos,
const xfrm_address_t *saddr,
const xfrm_address_t *daddr)
{
@@ -29,7 +29,6 @@ static struct dst_entry *__xfrm4_dst_lookup(struct net *net, struct flowi4 *fl4,
memset(fl4, 0, sizeof(*fl4));
fl4->daddr = daddr->a4;
fl4->flowi4_tos = tos;
- fl4->flowi4_oif = oif;
if (saddr)
fl4->saddr = saddr->a4;
@@ -42,22 +41,22 @@ static struct dst_entry *__xfrm4_dst_lookup(struct net *net, struct flowi4 *fl4,
return ERR_CAST(rt);
}
-static struct dst_entry *xfrm4_dst_lookup(struct net *net, int tos, int oif,
+static struct dst_entry *xfrm4_dst_lookup(struct net *net, int tos,
const xfrm_address_t *saddr,
const xfrm_address_t *daddr)
{
struct flowi4 fl4;
- return __xfrm4_dst_lookup(net, &fl4, tos, oif, saddr, daddr);
+ return __xfrm4_dst_lookup(net, &fl4, tos, saddr, daddr);
}
-static int xfrm4_get_saddr(struct net *net, int oif,
+static int xfrm4_get_saddr(struct net *net,
xfrm_address_t *saddr, xfrm_address_t *daddr)
{
struct dst_entry *dst;
struct flowi4 fl4;
- dst = __xfrm4_dst_lookup(net, &fl4, 0, oif, NULL, daddr);
+ dst = __xfrm4_dst_lookup(net, &fl4, 0, NULL, daddr);
if (IS_ERR(dst))
return -EHOSTUNREACH;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 498a664b8dc9..01455f492e17 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -217,6 +217,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
},
.use_oif_addrs_only = 0,
.ignore_routes_with_linkdown = 0,
+ .accept_ra_prefix_route = 1,
};
static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
@@ -262,6 +263,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
},
.use_oif_addrs_only = 0,
.ignore_routes_with_linkdown = 0,
+ .accept_ra_prefix_route = 1,
};
/* Check if a valid qdisc is available */
@@ -2056,6 +2058,16 @@ static int ipv6_generate_eui64(u8 *eui, struct net_device *dev)
return addrconf_ifid_ieee1394(eui, dev);
case ARPHRD_TUNNEL6:
return addrconf_ifid_ip6tnl(eui, dev);
+ case ARPHRD_RAWIP: {
+ struct in6_addr lladdr;
+
+ if (ipv6_get_lladdr(dev, &lladdr, IFA_F_TENTATIVE))
+ get_random_bytes(eui, 8);
+ else
+ memcpy(eui, lladdr.s6_addr + 8, 8);
+
+ return 0;
+ }
}
return -1;
}
@@ -2438,8 +2450,11 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
flags |= RTF_EXPIRES;
expires = jiffies_to_clock_t(rt_expires);
}
- addrconf_prefix_route(&pinfo->prefix, pinfo->prefix_len,
- dev, expires, flags);
+ if (dev->ip6_ptr->cnf.accept_ra_prefix_route) {
+ addrconf_prefix_route(&pinfo->prefix,
+ pinfo->prefix_len,
+ dev, expires, flags);
+ }
}
ip6_rt_put(rt);
}
@@ -3103,7 +3118,9 @@ static void addrconf_dev_config(struct net_device *dev)
(dev->type != ARPHRD_IEEE802154) &&
(dev->type != ARPHRD_IEEE1394) &&
(dev->type != ARPHRD_TUNNEL6) &&
- (dev->type != ARPHRD_6LOWPAN)) {
+ (dev->type != ARPHRD_6LOWPAN) &&
+ (dev->type != ARPHRD_RAWIP) &&
+ (dev->type != ARPHRD_INFINIBAND)) {
/* Alas, we support only Ethernet autoconfiguration. */
return;
}
@@ -5755,6 +5772,13 @@ static struct addrconf_sysctl_table
.proc_handler = proc_dointvec,
},
{
+ .procname = "accept_ra_prefix_route",
+ .data = &ipv6_devconf.accept_ra_prefix_route,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
.procname = "stable_secret",
.data = &ipv6_devconf.stable_secret,
.maxlen = IPV6_MAX_STRLEN,
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index cf2dfb222230..e9dcd7e587e2 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -964,9 +964,14 @@ void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
__u16 srcp, __u16 destp, int bucket)
{
const struct in6_addr *dest, *src;
+ __u8 state = sp->sk_state;
dest = &sp->sk_v6_daddr;
src = &sp->sk_v6_rcv_saddr;
+
+ if (inet_sk(sp) && inet_sk(sp)->transparent)
+ state |= 0x80;
+
seq_printf(seq,
"%5d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
"%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d\n",
@@ -975,7 +980,7 @@ void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
src->s6_addr32[2], src->s6_addr32[3], srcp,
dest->s6_addr32[0], dest->s6_addr32[1],
dest->s6_addr32[2], dest->s6_addr32[3], destp,
- sp->sk_state,
+ state,
sk_wmem_alloc_get(sp),
sk_rmem_alloc_get(sp),
0, 0L, 0,
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index 225f5f7f26ba..3feeca6a713d 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -239,9 +239,6 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
/* flush if Traffic Class fields are different */
NAPI_GRO_CB(p)->flush |= !!(first_word & htonl(0x0FF00000));
NAPI_GRO_CB(p)->flush |= flush;
-
- /* Clear flush_id, there's really no concept of ID in IPv6. */
- NAPI_GRO_CB(p)->flush_id = 0;
}
NAPI_GRO_CB(skb)->flush |= flush;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 58900c21e4e4..cd96a01032a2 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -891,8 +891,7 @@ static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
#ifdef CONFIG_IPV6_SUBTREES
ip6_rt_check(&rt->rt6i_src, &fl6->saddr, np->saddr_cache) ||
#endif
- (!(fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF) &&
- (fl6->flowi6_oif && fl6->flowi6_oif != dst->dev->ifindex))) {
+ (fl6->flowi6_oif && fl6->flowi6_oif != dst->dev->ifindex)) {
dst_release(dst);
dst = NULL;
}
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 9ba4e350c6a9..83ddc8074e55 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1049,9 +1049,6 @@ static struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
saved_fn = fn;
- if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
- oif = 0;
-
redo_rt6_select:
rt = rt6_select(fn, oif, strict);
if (rt->rt6i_nsiblings)
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 8ed00c8a128a..a6079e7a6c6b 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1712,6 +1712,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
int rx_queue;
int state;
+ __u8 state_seq = sp->sk_state;
dest = &sp->sk_v6_daddr;
src = &sp->sk_v6_rcv_saddr;
@@ -1743,6 +1744,9 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
*/
rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
+ if (inet->transparent)
+ state_seq |= 0x80;
+
seq_printf(seq,
"%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
"%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
@@ -1751,7 +1755,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
src->s6_addr32[2], src->s6_addr32[3], srcp,
dest->s6_addr32[0], dest->s6_addr32[1],
dest->s6_addr32[2], dest->s6_addr32[3], destp,
- state,
+ state_seq,
tp->write_seq - tp->snd_una,
rx_queue,
timer_active,
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index c074771a10f7..c23742462f02 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -27,7 +27,7 @@
static struct xfrm_policy_afinfo xfrm6_policy_afinfo;
-static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos, int oif,
+static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos,
const xfrm_address_t *saddr,
const xfrm_address_t *daddr)
{
@@ -36,8 +36,6 @@ static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos, int oif,
int err;
memset(&fl6, 0, sizeof(fl6));
- fl6.flowi6_oif = oif;
- fl6.flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF;
memcpy(&fl6.daddr, daddr, sizeof(fl6.daddr));
if (saddr)
memcpy(&fl6.saddr, saddr, sizeof(fl6.saddr));
@@ -53,13 +51,13 @@ static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos, int oif,
return dst;
}
-static int xfrm6_get_saddr(struct net *net, int oif,
+static int xfrm6_get_saddr(struct net *net,
xfrm_address_t *saddr, xfrm_address_t *daddr)
{
struct dst_entry *dst;
struct net_device *dev;
- dst = xfrm6_dst_lookup(net, 0, oif, NULL, daddr);
+ dst = xfrm6_dst_lookup(net, 0, NULL, daddr);
if (IS_ERR(dst))
return -EHOSTUNREACH;
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index 367784be5df2..f598ff80b30e 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -7,6 +7,7 @@
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2007, Michael Wu <flamingice@sourmilk.net>
* Copyright 2007-2010, Intel Corporation
+ * Copyright(c) 2015 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -61,6 +62,14 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
{
struct ieee80211_local *local = sta->local;
struct tid_ampdu_rx *tid_rx;
+ struct ieee80211_ampdu_params params = {
+ .sta = &sta->sta,
+ .action = IEEE80211_AMPDU_RX_STOP,
+ .tid = tid,
+ .amsdu = false,
+ .timeout = 0,
+ .ssn = 0,
+ };
lockdep_assert_held(&sta->ampdu_mlme.mtx);
@@ -78,8 +87,7 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
initiator == WLAN_BACK_RECIPIENT ? "recipient" : "inititator",
(int)reason);
- if (drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_STOP,
- &sta->sta, tid, NULL, 0, false))
+ if (drv_ampdu_action(local, sta->sdata, &params))
sdata_info(sta->sdata,
"HW problem - can not stop rx aggregation for %pM tid %d\n",
sta->sta.addr, tid);
@@ -237,6 +245,15 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta,
{
struct ieee80211_local *local = sta->sdata->local;
struct tid_ampdu_rx *tid_agg_rx;
+ struct ieee80211_ampdu_params params = {
+ .sta = &sta->sta,
+ .action = IEEE80211_AMPDU_RX_START,
+ .tid = tid,
+ .amsdu = false,
+ .timeout = timeout,
+ .ssn = start_seq_num,
+ };
+
int i, ret = -EOPNOTSUPP;
u16 status = WLAN_STATUS_REQUEST_DECLINED;
@@ -275,6 +292,7 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta,
/* make sure the size doesn't exceed the maximum supported by the hw */
if (buf_size > local->hw.max_rx_aggregation_subframes)
buf_size = local->hw.max_rx_aggregation_subframes;
+ params.buf_size = buf_size;
/* examine state machine */
mutex_lock(&sta->ampdu_mlme.mtx);
@@ -322,8 +340,7 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta,
for (i = 0; i < buf_size; i++)
__skb_queue_head_init(&tid_agg_rx->reorder_buf[i]);
- ret = drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_START,
- &sta->sta, tid, &start_seq_num, 0, false);
+ ret = drv_ampdu_action(local, sta->sdata, &params);
ht_dbg(sta->sdata, "Rx A-MPDU request on %pM tid %d result %d\n",
sta->sta.addr, tid, ret);
if (ret) {
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index ff757181b0a8..4932e9f243a2 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -7,6 +7,7 @@
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2007, Michael Wu <flamingice@sourmilk.net>
* Copyright 2007-2010, Intel Corporation
+ * Copyright(c) 2015 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -295,7 +296,14 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
{
struct ieee80211_local *local = sta->local;
struct tid_ampdu_tx *tid_tx;
- enum ieee80211_ampdu_mlme_action action;
+ struct ieee80211_ampdu_params params = {
+ .sta = &sta->sta,
+ .tid = tid,
+ .buf_size = 0,
+ .amsdu = false,
+ .timeout = 0,
+ .ssn = 0,
+ };
int ret;
lockdep_assert_held(&sta->ampdu_mlme.mtx);
@@ -304,10 +312,10 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
case AGG_STOP_DECLINED:
case AGG_STOP_LOCAL_REQUEST:
case AGG_STOP_PEER_REQUEST:
- action = IEEE80211_AMPDU_TX_STOP_CONT;
+ params.action = IEEE80211_AMPDU_TX_STOP_CONT;
break;
case AGG_STOP_DESTROY_STA:
- action = IEEE80211_AMPDU_TX_STOP_FLUSH;
+ params.action = IEEE80211_AMPDU_TX_STOP_FLUSH;
break;
default:
WARN_ON_ONCE(1);
@@ -330,9 +338,8 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
spin_unlock_bh(&sta->lock);
if (reason != AGG_STOP_DESTROY_STA)
return -EALREADY;
- ret = drv_ampdu_action(local, sta->sdata,
- IEEE80211_AMPDU_TX_STOP_FLUSH_CONT,
- &sta->sta, tid, NULL, 0, false);
+ params.action = IEEE80211_AMPDU_TX_STOP_FLUSH_CONT;
+ ret = drv_ampdu_action(local, sta->sdata, &params);
WARN_ON_ONCE(ret);
return 0;
}
@@ -381,8 +388,7 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
WLAN_BACK_INITIATOR;
tid_tx->tx_stop = reason == AGG_STOP_LOCAL_REQUEST;
- ret = drv_ampdu_action(local, sta->sdata, action,
- &sta->sta, tid, NULL, 0, false);
+ ret = drv_ampdu_action(local, sta->sdata, &params);
/* HW shall not deny going back to legacy */
if (WARN_ON(ret)) {
@@ -445,7 +451,14 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
struct tid_ampdu_tx *tid_tx;
struct ieee80211_local *local = sta->local;
struct ieee80211_sub_if_data *sdata = sta->sdata;
- u16 start_seq_num;
+ struct ieee80211_ampdu_params params = {
+ .sta = &sta->sta,
+ .action = IEEE80211_AMPDU_TX_START,
+ .tid = tid,
+ .buf_size = 0,
+ .amsdu = false,
+ .timeout = 0,
+ };
int ret;
tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
@@ -467,10 +480,8 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
*/
synchronize_net();
- start_seq_num = sta->tid_seq[tid] >> 4;
-
- ret = drv_ampdu_action(local, sdata, IEEE80211_AMPDU_TX_START,
- &sta->sta, tid, &start_seq_num, 0, false);
+ params.ssn = sta->tid_seq[tid] >> 4;
+ ret = drv_ampdu_action(local, sdata, &params);
if (ret) {
ht_dbg(sdata,
"BA request denied - HW unavailable for %pM tid %d\n",
@@ -499,7 +510,7 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
/* send AddBA request */
ieee80211_send_addba_request(sdata, sta->sta.addr, tid,
- tid_tx->dialog_token, start_seq_num,
+ tid_tx->dialog_token, params.ssn,
IEEE80211_MAX_AMPDU_BUF,
tid_tx->timeout);
}
@@ -684,18 +695,24 @@ static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
struct sta_info *sta, u16 tid)
{
struct tid_ampdu_tx *tid_tx;
+ struct ieee80211_ampdu_params params = {
+ .sta = &sta->sta,
+ .action = IEEE80211_AMPDU_TX_OPERATIONAL,
+ .tid = tid,
+ .timeout = 0,
+ .ssn = 0,
+ };
lockdep_assert_held(&sta->ampdu_mlme.mtx);
tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
+ params.buf_size = tid_tx->buf_size;
+ params.amsdu = tid_tx->amsdu;
ht_dbg(sta->sdata, "Aggregation is on for %pM tid %d\n",
sta->sta.addr, tid);
- drv_ampdu_action(local, sta->sdata,
- IEEE80211_AMPDU_TX_OPERATIONAL,
- &sta->sta, tid, NULL, tid_tx->buf_size,
- tid_tx->amsdu);
+ drv_ampdu_action(local, sta->sdata, &params);
/*
* synchronize with TX path, while splicing the TX path
diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c
index ca1fe5576103..c258f1041d33 100644
--- a/net/mac80211/driver-ops.c
+++ b/net/mac80211/driver-ops.c
@@ -284,9 +284,7 @@ int drv_switch_vif_chanctx(struct ieee80211_local *local,
int drv_ampdu_action(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid,
- u16 *ssn, u8 buf_size, bool amsdu)
+ struct ieee80211_ampdu_params *params)
{
int ret = -EOPNOTSUPP;
@@ -296,12 +294,10 @@ int drv_ampdu_action(struct ieee80211_local *local,
if (!check_sdata_in_driver(sdata))
return -EIO;
- trace_drv_ampdu_action(local, sdata, action, sta, tid,
- ssn, buf_size, amsdu);
+ trace_drv_ampdu_action(local, sdata, params);
if (local->ops->ampdu_action)
- ret = local->ops->ampdu_action(&local->hw, &sdata->vif, action,
- sta, tid, ssn, buf_size, amsdu);
+ ret = local->ops->ampdu_action(&local->hw, &sdata->vif, params);
trace_drv_return_int(local, ret);
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 154ce4b13406..18b0d65baff0 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -585,9 +585,7 @@ static inline int drv_tx_last_beacon(struct ieee80211_local *local)
int drv_ampdu_action(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid,
- u16 *ssn, u8 buf_size, bool amsdu);
+ struct ieee80211_ampdu_params *params);
static inline int drv_get_survey(struct ieee80211_local *local, int idx,
struct survey_info *survey)
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 6837a46ca4a2..67fede656ea5 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -808,6 +808,7 @@ enum txq_info_flags {
struct txq_info {
struct sk_buff_head queue;
unsigned long flags;
+ unsigned long byte_cnt;
/* keep last! */
struct ieee80211_txq txq;
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index bcb0a1b64556..7fc1250c8d37 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -979,6 +979,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
spin_lock_bh(&txqi->queue.lock);
ieee80211_purge_tx_queue(&local->hw, &txqi->queue);
+ txqi->byte_cnt = 0;
spin_unlock_bh(&txqi->queue.lock);
atomic_set(&sdata->txqs_len[txqi->txq.ac], 0);
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 2b528389409f..4f4c45ba7b70 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -122,7 +122,8 @@ static inline bool should_drop_frame(struct sk_buff *skb, int present_fcs_len,
hdr = (void *)(skb->data + rtap_vendor_space);
if (status->flag & (RX_FLAG_FAILED_FCS_CRC |
- RX_FLAG_FAILED_PLCP_CRC))
+ RX_FLAG_FAILED_PLCP_CRC |
+ RX_FLAG_ONLY_MONITOR))
return true;
if (unlikely(skb->len < 16 + present_fcs_len + rtap_vendor_space))
@@ -507,7 +508,7 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
return NULL;
}
- if (!local->monitors) {
+ if (!local->monitors || (status->flag & RX_FLAG_SKIP_MONITOR)) {
if (should_drop_frame(origskb, present_fcs_len,
rtap_vendor_space)) {
dev_kfree_skb(origskb);
@@ -3453,6 +3454,7 @@ static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
* be called with rcu_read_lock protection.
*/
static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
+ struct ieee80211_sta *pubsta,
struct sk_buff *skb,
struct napi_struct *napi)
{
@@ -3462,7 +3464,6 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
__le16 fc;
struct ieee80211_rx_data rx;
struct ieee80211_sub_if_data *prev;
- struct sta_info *sta, *prev_sta;
struct rhash_head *tmp;
int err = 0;
@@ -3498,7 +3499,14 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
ieee80211_is_beacon(hdr->frame_control)))
ieee80211_scan_rx(local, skb);
- if (ieee80211_is_data(fc)) {
+ if (pubsta) {
+ rx.sta = container_of(pubsta, struct sta_info, sta);
+ rx.sdata = rx.sta->sdata;
+ if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
+ return;
+ goto out;
+ } else if (ieee80211_is_data(fc)) {
+ struct sta_info *sta, *prev_sta;
const struct bucket_table *tbl;
prev_sta = NULL;
@@ -3572,8 +3580,8 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
* This is the receive path handler. It is called by a low level driver when an
* 802.11 MPDU is received from the hardware.
*/
-void ieee80211_rx_napi(struct ieee80211_hw *hw, struct sk_buff *skb,
- struct napi_struct *napi)
+void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta,
+ struct sk_buff *skb, struct napi_struct *napi)
{
struct ieee80211_local *local = hw_to_local(hw);
struct ieee80211_rate *rate = NULL;
@@ -3672,7 +3680,8 @@ void ieee80211_rx_napi(struct ieee80211_hw *hw, struct sk_buff *skb,
ieee80211_tpt_led_trig_rx(local,
((struct ieee80211_hdr *)skb->data)->frame_control,
skb->len);
- __ieee80211_rx_handle_packet(hw, skb, napi);
+
+ __ieee80211_rx_handle_packet(hw, pubsta, skb, napi);
rcu_read_unlock();
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 67066d048e6f..fe88071d4abb 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -115,6 +115,7 @@ static void __cleanup_single_sta(struct sta_info *sta)
ieee80211_purge_tx_queue(&local->hw, &txqi->queue);
atomic_sub(n, &sdata->txqs_len[txqi->txq.ac]);
+ txqi->byte_cnt = 0;
}
}
diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h
index 56c6d6cfa5a1..913e959b03cf 100644
--- a/net/mac80211/trace.h
+++ b/net/mac80211/trace.h
@@ -80,7 +80,23 @@
#define KEY_PR_FMT " cipher:0x%x, flags=%#x, keyidx=%d, hw_key_idx=%d"
#define KEY_PR_ARG __entry->cipher, __entry->flags, __entry->keyidx, __entry->hw_key_idx
-
+#define AMPDU_ACTION_ENTRY __field(enum ieee80211_ampdu_mlme_action, \
+ ieee80211_ampdu_mlme_action) \
+ STA_ENTRY \
+ __field(u16, tid) \
+ __field(u16, ssn) \
+ __field(u8, buf_size) \
+ __field(bool, amsdu) \
+ __field(u16, timeout)
+#define AMPDU_ACTION_ASSIGN STA_NAMED_ASSIGN(params->sta); \
+ __entry->tid = params->tid; \
+ __entry->ssn = params->ssn; \
+ __entry->buf_size = params->buf_size; \
+ __entry->amsdu = params->amsdu; \
+ __entry->timeout = params->timeout;
+#define AMPDU_ACTION_PR_FMT STA_PR_FMT " tid %d, ssn %d, buf_size %u, amsdu %d, timeout %d"
+#define AMPDU_ACTION_PR_ARG STA_PR_ARG, __entry->tid, __entry->ssn, \
+ __entry->buf_size, __entry->amsdu, __entry->timeout
/*
* Tracing for driver callbacks.
@@ -970,38 +986,25 @@ DEFINE_EVENT(local_only_evt, drv_tx_last_beacon,
TRACE_EVENT(drv_ampdu_action,
TP_PROTO(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid,
- u16 *ssn, u8 buf_size, bool amsdu),
+ struct ieee80211_ampdu_params *params),
- TP_ARGS(local, sdata, action, sta, tid, ssn, buf_size, amsdu),
+ TP_ARGS(local, sdata, params),
TP_STRUCT__entry(
LOCAL_ENTRY
- STA_ENTRY
- __field(u32, action)
- __field(u16, tid)
- __field(u16, ssn)
- __field(u8, buf_size)
- __field(bool, amsdu)
VIF_ENTRY
+ AMPDU_ACTION_ENTRY
),
TP_fast_assign(
LOCAL_ASSIGN;
VIF_ASSIGN;
- STA_ASSIGN;
- __entry->action = action;
- __entry->tid = tid;
- __entry->ssn = ssn ? *ssn : 0;
- __entry->buf_size = buf_size;
- __entry->amsdu = amsdu;
+ AMPDU_ACTION_ASSIGN;
),
TP_printk(
- LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " action:%d tid:%d buf:%d amsdu:%d",
- LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->action,
- __entry->tid, __entry->buf_size, __entry->amsdu
+ LOCAL_PR_FMT VIF_PR_FMT AMPDU_ACTION_PR_FMT,
+ LOCAL_PR_ARG, VIF_PR_ARG, AMPDU_ACTION_PR_ARG
)
);
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index ea5dfefd765a..a45248a4967b 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1266,7 +1266,11 @@ static void ieee80211_drv_tx(struct ieee80211_local *local,
if (atomic_read(&sdata->txqs_len[ac]) >= local->hw.txq_ac_max_pending)
netif_stop_subqueue(sdata->dev, ac);
- skb_queue_tail(&txqi->queue, skb);
+ spin_lock_bh(&txqi->queue.lock);
+ txqi->byte_cnt += skb->len;
+ __skb_queue_tail(&txqi->queue, skb);
+ spin_unlock_bh(&txqi->queue.lock);
+
drv_wake_tx_queue(local, txqi);
return;
@@ -1294,6 +1298,8 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
if (!skb)
goto out;
+ txqi->byte_cnt -= skb->len;
+
atomic_dec(&sdata->txqs_len[ac]);
if (__netif_subqueue_stopped(sdata->dev, ac))
ieee80211_propagate_queue_wake(local, sdata->vif.hw_queue[ac]);
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 33344f5a66a8..bc799a4b7cd1 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -3198,10 +3198,11 @@ int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata,
struct ieee80211_local *local = sdata->local;
struct ieee80211_sub_if_data *sdata_iter;
enum nl80211_iftype iftype = sdata->wdev.iftype;
- int num[NUM_NL80211_IFTYPES];
struct ieee80211_chanctx *ctx;
- int num_different_channels = 0;
int total = 1;
+ struct iface_combination_params params = {
+ .radar_detect = radar_detect,
+ };
lockdep_assert_held(&local->chanctx_mtx);
@@ -3212,9 +3213,6 @@ int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata,
!chandef->chan))
return -EINVAL;
- if (chandef)
- num_different_channels = 1;
-
if (WARN_ON(iftype >= NUM_NL80211_IFTYPES))
return -EINVAL;
@@ -3225,24 +3223,26 @@ int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata,
return 0;
}
- memset(num, 0, sizeof(num));
+ if (chandef)
+ params.num_different_channels = 1;
if (iftype != NL80211_IFTYPE_UNSPECIFIED)
- num[iftype] = 1;
+ params.iftype_num[iftype] = 1;
list_for_each_entry(ctx, &local->chanctx_list, list) {
if (ctx->replace_state == IEEE80211_CHANCTX_WILL_BE_REPLACED)
continue;
- radar_detect |= ieee80211_chanctx_radar_detect(local, ctx);
+ params.radar_detect |=
+ ieee80211_chanctx_radar_detect(local, ctx);
if (ctx->mode == IEEE80211_CHANCTX_EXCLUSIVE) {
- num_different_channels++;
+ params.num_different_channels++;
continue;
}
if (chandef && chanmode == IEEE80211_CHANCTX_SHARED &&
cfg80211_chandef_compatible(chandef,
&ctx->conf.def))
continue;
- num_different_channels++;
+ params.num_different_channels++;
}
list_for_each_entry_rcu(sdata_iter, &local->interfaces, list) {
@@ -3255,16 +3255,14 @@ int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata,
local->hw.wiphy->software_iftypes & BIT(wdev_iter->iftype))
continue;
- num[wdev_iter->iftype]++;
+ params.iftype_num[wdev_iter->iftype]++;
total++;
}
- if (total == 1 && !radar_detect)
+ if (total == 1 && !params.radar_detect)
return 0;
- return cfg80211_check_combinations(local->hw.wiphy,
- num_different_channels,
- radar_detect, num);
+ return cfg80211_check_combinations(local->hw.wiphy, &params);
}
static void
@@ -3280,12 +3278,10 @@ ieee80211_iter_max_chans(const struct ieee80211_iface_combination *c,
int ieee80211_max_num_channels(struct ieee80211_local *local)
{
struct ieee80211_sub_if_data *sdata;
- int num[NUM_NL80211_IFTYPES] = {};
struct ieee80211_chanctx *ctx;
- int num_different_channels = 0;
- u8 radar_detect = 0;
u32 max_num_different_channels = 1;
int err;
+ struct iface_combination_params params = {0};
lockdep_assert_held(&local->chanctx_mtx);
@@ -3293,17 +3289,17 @@ int ieee80211_max_num_channels(struct ieee80211_local *local)
if (ctx->replace_state == IEEE80211_CHANCTX_WILL_BE_REPLACED)
continue;
- num_different_channels++;
+ params.num_different_channels++;
- radar_detect |= ieee80211_chanctx_radar_detect(local, ctx);
+ params.radar_detect |=
+ ieee80211_chanctx_radar_detect(local, ctx);
}
list_for_each_entry_rcu(sdata, &local->interfaces, list)
- num[sdata->wdev.iftype]++;
+ params.iftype_num[sdata->wdev.iftype]++;
- err = cfg80211_iter_combinations(local->hw.wiphy,
- num_different_channels, radar_detect,
- num, ieee80211_iter_max_chans,
+ err = cfg80211_iter_combinations(local->hw.wiphy, &params,
+ ieee80211_iter_max_chans,
&max_num_different_channels);
if (err < 0)
return err;
@@ -3344,3 +3340,17 @@ void ieee80211_init_tx_queue(struct ieee80211_sub_if_data *sdata,
txqi->txq.ac = IEEE80211_AC_BE;
}
}
+
+void ieee80211_txq_get_depth(struct ieee80211_txq *txq,
+ unsigned long *frame_cnt,
+ unsigned long *byte_cnt)
+{
+ struct txq_info *txqi = to_txq_info(txq);
+
+ if (frame_cnt)
+ *frame_cnt = txqi->queue.qlen;
+
+ if (byte_cnt)
+ *byte_cnt = txqi->byte_cnt;
+}
+EXPORT_SYMBOL(ieee80211_txq_get_depth);
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 1959548b1161..f83c255d7da2 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -749,6 +749,20 @@ config NETFILTER_XT_TARGET_IDLETIMER
To compile it as a module, choose M here. If unsure, say N.
+config NETFILTER_XT_TARGET_HARDIDLETIMER
+ tristate "HARDIDLETIMER target support"
+ depends on NETFILTER_ADVANCED
+ help
+
+ This option adds the `HARDIDLETIMER' target. Each matching packet
+ resets the timer associated with label specified when the rule is
+ added. When the timer expires, it triggers a sysfs notification.
+ The remaining time for expiration can be read via sysfs.
+ Compared to IDLETIMER HARDIDLETIMER will send notification when
+ CPU in suspend too.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
config NETFILTER_XT_TARGET_LED
tristate '"LED" target support'
depends on LEDS_CLASS && LEDS_TRIGGERS
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index ad6a8aa63b1f..fcdc061130d7 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -122,6 +122,7 @@ obj-$(CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP) += xt_TCPOPTSTRIP.o
obj-$(CONFIG_NETFILTER_XT_TARGET_TEE) += xt_TEE.o
obj-$(CONFIG_NETFILTER_XT_TARGET_TRACE) += xt_TRACE.o
obj-$(CONFIG_NETFILTER_XT_TARGET_IDLETIMER) += xt_IDLETIMER.o
+obj-$(CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER) += xt_HARDIDLETIMER.o
# matches
obj-$(CONFIG_NETFILTER_XT_MATCH_ADDRTYPE) += xt_addrtype.o
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index f39276d1c2d7..67406d757001 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -123,7 +123,7 @@ EXPORT_SYMBOL(nf_register_net_hook);
void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg)
{
struct list_head *hook_list;
- struct nf_hook_entry *entry;
+ struct nf_hook_entry *entry = NULL;
struct nf_hook_ops *elem;
hook_list = nf_find_hook_list(net, reg);
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 86a3c6f0c871..2efe0500bba9 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -237,7 +237,7 @@ EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
static void
clean_from_lists(struct nf_conn *ct)
{
- pr_debug("clean_from_lists(%p)\n", ct);
+ pr_debug("clean_from_lists(%pK)\n", ct);
hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode);
@@ -330,7 +330,7 @@ destroy_conntrack(struct nf_conntrack *nfct)
struct net *net = nf_ct_net(ct);
struct nf_conntrack_l4proto *l4proto;
- pr_debug("destroy_conntrack(%p)\n", ct);
+ pr_debug("destroy_conntrack(%pK)\n", ct);
NF_CT_ASSERT(atomic_read(&nfct->use) == 0);
NF_CT_ASSERT(!timer_pending(&ct->timeout));
@@ -361,7 +361,7 @@ destroy_conntrack(struct nf_conntrack *nfct)
if (ct->master)
nf_ct_put(ct->master);
- pr_debug("destroy_conntrack: returning ct=%p to slab\n", ct);
+ pr_debug("destroy_conntrack: returning ct=%pK to slab\n", ct);
nf_conntrack_free(ct);
}
@@ -629,7 +629,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
* confirmed us.
*/
NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
- pr_debug("Confirming conntrack %p\n", ct);
+ pr_debug("Confirming conntrack %pK\n", ct);
/* We have to check the DYING flag after unlink to prevent
* a race against nf_ct_get_next_corpse() possibly called from
* user context, else we insert an already 'dead' hash, blocking
@@ -972,7 +972,7 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
spin_lock(&nf_conntrack_expect_lock);
exp = nf_ct_find_expectation(net, zone, tuple);
if (exp) {
- pr_debug("conntrack: expectation arrives ct=%p exp=%p\n",
+ pr_debug("conntrack: expectation arrives ct=%pK exp=%pK\n",
ct, exp);
/* Welcome, Mr. Bond. We've been expecting you... */
__set_bit(IPS_EXPECTED_BIT, &ct->status);
@@ -1063,14 +1063,14 @@ resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
} else {
/* Once we've had two way comms, always ESTABLISHED. */
if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
- pr_debug("nf_conntrack_in: normal packet for %p\n", ct);
+ pr_debug("nf_conntrack_in: normal packet for %pK\n", ct);
*ctinfo = IP_CT_ESTABLISHED;
} else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) {
- pr_debug("nf_conntrack_in: related packet for %p\n",
+ pr_debug("nf_conntrack_in: related packet for %pK\n",
ct);
*ctinfo = IP_CT_RELATED;
} else {
- pr_debug("nf_conntrack_in: new packet for %p\n", ct);
+ pr_debug("nf_conntrack_in: new packet for %pK\n", ct);
*ctinfo = IP_CT_NEW;
}
*set_reply = 0;
@@ -1212,7 +1212,7 @@ void nf_conntrack_alter_reply(struct nf_conn *ct,
/* Should be unconfirmed, so not in hash table yet */
NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
- pr_debug("Altering reply tuple of %p to ", ct);
+ pr_debug("Altering reply tuple of %pK to ", ct);
nf_ct_dump_tuple(newreply);
ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index 885b4aba3695..9178e9302525 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -1425,7 +1425,8 @@ static int process_sip_request(struct sk_buff *skb, unsigned int protoff,
SIP_HDR_VIA_UDP, NULL, &matchoff,
&matchlen, &addr, &port) > 0 &&
port != ct->tuplehash[dir].tuple.src.u.udp.port &&
- nf_inet_addr_cmp(&addr, &ct->tuplehash[dir].tuple.src.u3))
+ nf_inet_addr_cmp(&addr, &ct->tuplehash[dir].tuple.src.u3) &&
+ (dir == IP_CT_DIR_ORIGINAL))
ct_sip_info->forced_dport = port;
for (i = 0; i < ARRAY_SIZE(sip_handlers); i++) {
@@ -1569,6 +1570,9 @@ static int sip_help_udp(struct sk_buff *skb, unsigned int protoff,
unsigned int dataoff, datalen;
const char *dptr;
+ if (nf_ct_disable_sip_alg)
+ return NF_ACCEPT;
+
/* No Data ? */
dataoff = protoff + sizeof(struct udphdr);
if (dataoff >= skb->len)
diff --git a/net/netfilter/xt_HARDIDLETIMER.c b/net/netfilter/xt_HARDIDLETIMER.c
new file mode 100644
index 000000000000..06322e4de632
--- /dev/null
+++ b/net/netfilter/xt_HARDIDLETIMER.c
@@ -0,0 +1,381 @@
+/*
+ * linux/net/netfilter/xt_HARDIDLETIMER.c
+ *
+ * Netfilter module to trigger a timer when packet matches.
+ * After timer expires a kevent will be sent.
+ *
+ * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * Copyright (C) 2004, 2010 Nokia Corporation
+ *
+ * Written by Timo Teras <ext-timo.teras@nokia.com>
+ *
+ * Converted to x_tables and reworked for upstream inclusion
+ * by Luciano Coelho <luciano.coelho@nokia.com>
+ *
+ * Contact: Luciano Coelho <luciano.coelho@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/timer.h>
+#include <linux/alarmtimer.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_HARDIDLETIMER.h>
+#include <linux/kdev_t.h>
+#include <linux/kobject.h>
+#include <linux/skbuff.h>
+#include <linux/workqueue.h>
+#include <linux/sysfs.h>
+#include <net/net_namespace.h>
+
+struct hardidletimer_tg_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct kobject *kobj,
+ struct attribute *attr, char *buf);
+};
+
+struct hardidletimer_tg {
+ struct list_head entry;
+ struct alarm alarm;
+ struct work_struct work;
+
+ struct kobject *kobj;
+ struct hardidletimer_tg_attr attr;
+
+ unsigned int refcnt;
+ bool send_nl_msg;
+ bool active;
+};
+
+static LIST_HEAD(hardidletimer_tg_list);
+static DEFINE_MUTEX(list_mutex);
+
+static struct kobject *hardidletimer_tg_kobj;
+
+static void notify_netlink_uevent(const char *iface,
+ struct hardidletimer_tg *timer)
+{
+ char iface_msg[NLMSG_MAX_SIZE];
+ char state_msg[NLMSG_MAX_SIZE];
+ char *envp[] = { iface_msg, state_msg, NULL };
+ int res;
+
+ res = snprintf(iface_msg, NLMSG_MAX_SIZE, "INTERFACE=%s",
+ iface);
+ if (NLMSG_MAX_SIZE <= res) {
+ pr_err("message too long (%d)", res);
+ return;
+ }
+ res = snprintf(state_msg, NLMSG_MAX_SIZE, "STATE=%s",
+ timer->active ? "active" : "inactive");
+ if (NLMSG_MAX_SIZE <= res) {
+ pr_err("message too long (%d)", res);
+ return;
+ }
+ pr_debug("putting nlmsg: <%s> <%s>\n", iface_msg, state_msg);
+ kobject_uevent_env(hardidletimer_tg_kobj, KOBJ_CHANGE, envp);
+ return;
+
+
+}
+
+static
+struct hardidletimer_tg *__hardidletimer_tg_find_by_label(const char *label)
+{
+ struct hardidletimer_tg *entry;
+
+ BUG_ON(!label);
+
+ list_for_each_entry(entry, &hardidletimer_tg_list, entry) {
+ if (!strcmp(label, entry->attr.attr.name))
+ return entry;
+ }
+
+ return NULL;
+}
+
+static ssize_t hardidletimer_tg_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ struct hardidletimer_tg *timer;
+ ktime_t expires;
+ struct timespec ktimespec;
+
+ memset(&ktimespec, 0, sizeof(struct timespec));
+ mutex_lock(&list_mutex);
+
+ timer = __hardidletimer_tg_find_by_label(attr->name);
+ if (timer) {
+ expires = alarm_expires_remaining(&timer->alarm);
+ ktimespec = ktime_to_timespec(expires);
+ }
+
+ mutex_unlock(&list_mutex);
+
+ if (ktimespec.tv_sec >= 0)
+ return snprintf(buf, PAGE_SIZE, "%ld\n", ktimespec.tv_sec);
+
+ if ((timer) && (timer->send_nl_msg))
+ return snprintf(buf, PAGE_SIZE, "0 %ld\n", ktimespec.tv_sec);
+ else
+ return snprintf(buf, PAGE_SIZE, "0\n");
+}
+
+static void hardidletimer_tg_work(struct work_struct *work)
+{
+ struct hardidletimer_tg *timer = container_of(work,
+ struct hardidletimer_tg, work);
+
+ sysfs_notify(hardidletimer_tg_kobj, NULL, timer->attr.attr.name);
+
+ if (timer->send_nl_msg)
+ notify_netlink_uevent(timer->attr.attr.name, timer);
+}
+
+static enum alarmtimer_restart hardidletimer_tg_alarmproc(struct alarm *alarm,
+ ktime_t now)
+{
+ struct hardidletimer_tg *timer = alarm->data;
+
+ pr_debug("alarm %s expired\n", timer->attr.attr.name);
+
+ timer->active = false;
+ schedule_work(&timer->work);
+ return ALARMTIMER_NORESTART;
+}
+
+static int hardidletimer_tg_create(struct hardidletimer_tg_info *info)
+{
+ int ret;
+ ktime_t tout;
+
+ info->timer = kmalloc(sizeof(*info->timer), GFP_KERNEL);
+ if (!info->timer) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ info->timer->attr.attr.name = kstrdup(info->label, GFP_KERNEL);
+ if (!info->timer->attr.attr.name) {
+ ret = -ENOMEM;
+ goto out_free_timer;
+ }
+ info->timer->attr.attr.mode = S_IRUGO;
+ info->timer->attr.show = hardidletimer_tg_show;
+
+ ret = sysfs_create_file(hardidletimer_tg_kobj, &info->timer->attr.attr);
+ if (ret < 0) {
+ pr_debug("couldn't add file to sysfs");
+ goto out_free_attr;
+ }
+
+ list_add(&info->timer->entry, &hardidletimer_tg_list);
+
+ alarm_init(&info->timer->alarm, ALARM_BOOTTIME,
+ hardidletimer_tg_alarmproc);
+ info->timer->alarm.data = info->timer;
+ info->timer->refcnt = 1;
+ info->timer->send_nl_msg = (info->send_nl_msg == 0) ? false : true;
+ info->timer->active = true;
+ tout = ktime_set(info->timeout, 0);
+ alarm_start_relative(&info->timer->alarm, tout);
+
+ INIT_WORK(&info->timer->work, hardidletimer_tg_work);
+
+ return 0;
+
+out_free_attr:
+ kfree(info->timer->attr.attr.name);
+out_free_timer:
+ kfree(info->timer);
+out:
+ return ret;
+}
+
+/* The actual xt_tables plugin. */
+static unsigned int hardidletimer_tg_target(struct sk_buff *skb,
+ const struct xt_action_param *par)
+{
+ const struct hardidletimer_tg_info *info = par->targinfo;
+ ktime_t tout;
+
+ pr_debug("resetting timer %s, timeout period %u\n",
+ info->label, info->timeout);
+
+ BUG_ON(!info->timer);
+
+ if (info->timer->active == false) {
+ schedule_work(&info->timer->work);
+ pr_debug("Starting timer %s\n", info->label);
+ }
+
+ info->timer->active = true;
+ /* TODO: Avoid modifying timers on each packet */
+ tout = ktime_set(info->timeout, 0);
+ alarm_start_relative(&info->timer->alarm, tout);
+
+ return XT_CONTINUE;
+}
+
+static int hardidletimer_tg_checkentry(const struct xt_tgchk_param *par)
+{
+ struct hardidletimer_tg_info *info = par->targinfo;
+ int ret;
+ ktime_t tout;
+
+ pr_debug("checkentry targinfo %s\n", info->label);
+
+ if (info->timeout == 0) {
+ pr_debug("timeout value is zero\n");
+ return -EINVAL;
+ }
+
+ if (info->label[0] == '\0' ||
+ strnlen(info->label, MAX_HARDIDLETIMER_LABEL_SIZE)
+ == MAX_HARDIDLETIMER_LABEL_SIZE) {
+ pr_debug("label is empty or not nul-terminated\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&list_mutex);
+
+ info->timer = __hardidletimer_tg_find_by_label(info->label);
+ if (info->timer) {
+ info->timer->refcnt++;
+ if (info->timer->active == false) {
+ schedule_work(&info->timer->work);
+ pr_debug("Starting Checkentry timer\n");
+ }
+
+ info->timer->active = true;
+ tout = ktime_set(info->timeout, 0);
+ alarm_start_relative(&info->timer->alarm, tout);
+
+ pr_debug("increased refcnt of timer %s to %u\n",
+ info->label, info->timer->refcnt);
+ } else {
+ ret = hardidletimer_tg_create(info);
+ if (ret < 0) {
+ pr_debug("failed to create timer\n");
+ mutex_unlock(&list_mutex);
+ return ret;
+ }
+ }
+
+ mutex_unlock(&list_mutex);
+
+ return 0;
+}
+
+static void hardidletimer_tg_destroy(const struct xt_tgdtor_param *par)
+{
+ const struct hardidletimer_tg_info *info = par->targinfo;
+
+ pr_debug("destroy targinfo %s\n", info->label);
+
+ mutex_lock(&list_mutex);
+
+ if (--info->timer->refcnt == 0) {
+ pr_debug("deleting timer %s\n", info->label);
+
+ list_del(&info->timer->entry);
+ alarm_cancel(&info->timer->alarm);
+ cancel_work_sync(&info->timer->work);
+ sysfs_remove_file(hardidletimer_tg_kobj,
+ &info->timer->attr.attr);
+ kfree(info->timer->attr.attr.name);
+ kfree(info->timer);
+ } else {
+ pr_debug("decreased refcnt of timer %s to %u\n",
+ info->label, info->timer->refcnt);
+ }
+
+ mutex_unlock(&list_mutex);
+}
+
+static struct xt_target hardidletimer_tg __read_mostly = {
+ .name = "HARDIDLETIMER",
+ .revision = 1,
+ .family = NFPROTO_UNSPEC,
+ .target = hardidletimer_tg_target,
+ .targetsize = sizeof(struct hardidletimer_tg_info),
+ .checkentry = hardidletimer_tg_checkentry,
+ .destroy = hardidletimer_tg_destroy,
+ .me = THIS_MODULE,
+};
+
+static struct class *hardidletimer_tg_class;
+
+static struct device *hardidletimer_tg_device;
+
+static int __init hardidletimer_tg_init(void)
+{
+ int err;
+
+ hardidletimer_tg_class = class_create(THIS_MODULE, "xt_hardidletimer");
+ err = PTR_ERR(hardidletimer_tg_class);
+ if (IS_ERR(hardidletimer_tg_class)) {
+ pr_debug("couldn't register device class\n");
+ goto out;
+ }
+
+ hardidletimer_tg_device = device_create(hardidletimer_tg_class, NULL,
+ MKDEV(0, 0), NULL, "timers");
+ err = PTR_ERR(hardidletimer_tg_device);
+ if (IS_ERR(hardidletimer_tg_device)) {
+ pr_debug("couldn't register system device\n");
+ goto out_class;
+ }
+
+ hardidletimer_tg_kobj = &hardidletimer_tg_device->kobj;
+
+ err = xt_register_target(&hardidletimer_tg);
+ if (err < 0) {
+ pr_debug("couldn't register xt target\n");
+ goto out_dev;
+ }
+
+ return 0;
+out_dev:
+ device_destroy(hardidletimer_tg_class, MKDEV(0, 0));
+out_class:
+ class_destroy(hardidletimer_tg_class);
+out:
+ return err;
+}
+
+static void __exit hardidletimer_tg_exit(void)
+{
+ xt_unregister_target(&hardidletimer_tg);
+
+ device_destroy(hardidletimer_tg_class, MKDEV(0, 0));
+ class_destroy(hardidletimer_tg_class);
+}
+
+module_init(hardidletimer_tg_init);
+module_exit(hardidletimer_tg_exit);
+
+MODULE_AUTHOR("Timo Teras <ext-timo.teras@nokia.com>");
+MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>");
+MODULE_DESCRIPTION("Xtables: idle time monitor");
+MODULE_LICENSE("GPL v2");
+
diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c
index 0975c993a94e..80b32de1d99c 100644
--- a/net/netfilter/xt_IDLETIMER.c
+++ b/net/netfilter/xt_IDLETIMER.c
@@ -49,6 +49,7 @@
#include <linux/notifier.h>
#include <net/net_namespace.h>
#include <net/sock.h>
+#include <net/inet_sock.h>
struct idletimer_tg_attr {
struct attribute attr;
@@ -75,6 +76,7 @@ struct idletimer_tg {
bool send_nl_msg;
bool active;
uid_t uid;
+ bool suspend_time_valid;
};
static LIST_HEAD(idletimer_tg_list);
@@ -244,8 +246,13 @@ static int idletimer_resume(struct notifier_block *notifier,
switch (pm_event) {
case PM_SUSPEND_PREPARE:
get_monotonic_boottime(&timer->last_suspend_time);
+ timer->suspend_time_valid = true;
break;
case PM_POST_SUSPEND:
+ if (!timer->suspend_time_valid)
+ break;
+ timer->suspend_time_valid = false;
+
spin_lock_bh(&timestamp_lock);
if (!timer->active) {
spin_unlock_bh(&timestamp_lock);
@@ -280,7 +287,7 @@ static int idletimer_tg_create(struct idletimer_tg_info *info)
{
int ret;
- info->timer = kmalloc(sizeof(*info->timer), GFP_KERNEL);
+ info->timer = kzalloc(sizeof(*info->timer), GFP_KERNEL);
if (!info->timer) {
ret = -ENOMEM;
goto out;
@@ -354,8 +361,8 @@ static void reset_timer(const struct idletimer_tg_info *info,
/* Stores the uid resposible for waking up the radio */
if (skb && (skb->sk)) {
- timer->uid = from_kuid_munged(current_user_ns(),
- sock_i_uid(skb->sk));
+ timer->uid = from_kuid_munged
+ (current_user_ns(), sock_i_uid(skb_to_full_sk(skb)));
}
/* checks if there is a pending inactive notification*/
@@ -456,6 +463,7 @@ static void idletimer_tg_destroy(const struct xt_tgdtor_param *par)
del_timer_sync(&info->timer->timer);
sysfs_remove_file(idletimer_tg_kobj, &info->timer->attr.attr);
unregister_pm_notifier(&info->timer->pm_nb);
+ cancel_work_sync(&info->timer->work);
kfree(info->timer->attr.attr.name);
kfree(info->timer);
} else {
diff --git a/net/netfilter/xt_qtaguid.c b/net/netfilter/xt_qtaguid.c
index 3bf0c59dab2f..ececa65868ef 100644
--- a/net/netfilter/xt_qtaguid.c
+++ b/net/netfilter/xt_qtaguid.c
@@ -1175,6 +1175,38 @@ static void iface_stat_update(struct net_device *net_dev, bool stash_only)
spin_unlock_bh(&iface_stat_list_lock);
}
+/* Guarantied to return a net_device that has a name */
+static void get_dev_and_dir(const struct sk_buff *skb,
+ struct xt_action_param *par,
+ enum ifs_tx_rx *direction,
+ const struct net_device **el_dev)
+{
+ BUG_ON(!direction || !el_dev);
+
+ if (par->in) {
+ *el_dev = par->in;
+ *direction = IFS_RX;
+ } else if (par->out) {
+ *el_dev = par->out;
+ *direction = IFS_TX;
+ } else {
+ pr_err("qtaguid[%d]: %s(): no par->in/out?!!\n",
+ par->hooknum, __func__);
+ BUG();
+ }
+ if (unlikely(!(*el_dev)->name)) {
+ pr_err("qtaguid[%d]: %s(): no dev->name?!!\n",
+ par->hooknum, __func__);
+ BUG();
+ }
+ if (skb->dev && *el_dev != skb->dev) {
+ MT_DEBUG("qtaguid[%d]: skb->dev=%p %s vs par->%s=%p %s\n",
+ par->hooknum, skb->dev, skb->dev->name,
+ *direction == IFS_RX ? "in" : "out", *el_dev,
+ (*el_dev)->name);
+ }
+}
+
/*
* Update stats for the specified interface from the skb.
* Do nothing if the entry
@@ -1186,50 +1218,27 @@ static void iface_stat_update_from_skb(const struct sk_buff *skb,
{
struct iface_stat *entry;
const struct net_device *el_dev;
- enum ifs_tx_rx direction = par->in ? IFS_RX : IFS_TX;
+ enum ifs_tx_rx direction;
int bytes = skb->len;
int proto;
- if (!skb->dev) {
- MT_DEBUG("qtaguid[%d]: no skb->dev\n", par->hooknum);
- el_dev = par->in ? : par->out;
- } else {
- const struct net_device *other_dev;
- el_dev = skb->dev;
- other_dev = par->in ? : par->out;
- if (el_dev != other_dev) {
- MT_DEBUG("qtaguid[%d]: skb->dev=%p %s vs "
- "par->(in/out)=%p %s\n",
- par->hooknum, el_dev, el_dev->name, other_dev,
- other_dev->name);
- }
- }
-
- if (unlikely(!el_dev)) {
- pr_err_ratelimited("qtaguid[%d]: %s(): no par->in/out?!!\n",
- par->hooknum, __func__);
- BUG();
- } else if (unlikely(!el_dev->name)) {
- pr_err_ratelimited("qtaguid[%d]: %s(): no dev->name?!!\n",
- par->hooknum, __func__);
- BUG();
- } else {
- proto = ipx_proto(skb, par);
- MT_DEBUG("qtaguid[%d]: dev name=%s type=%d fam=%d proto=%d\n",
- par->hooknum, el_dev->name, el_dev->type,
- par->family, proto);
- }
+ get_dev_and_dir(skb, par, &direction, &el_dev);
+ proto = ipx_proto(skb, par);
+ MT_DEBUG("qtaguid[%d]: iface_stat: %s(%s): "
+ "type=%d fam=%d proto=%d dir=%d\n",
+ par->hooknum, __func__, el_dev->name, el_dev->type,
+ par->family, proto, direction);
spin_lock_bh(&iface_stat_list_lock);
entry = get_iface_entry(el_dev->name);
if (entry == NULL) {
- IF_DEBUG("qtaguid: iface_stat: %s(%s): not tracked\n",
- __func__, el_dev->name);
+ IF_DEBUG("qtaguid[%d]: iface_stat: %s(%s): not tracked\n",
+ par->hooknum, __func__, el_dev->name);
spin_unlock_bh(&iface_stat_list_lock);
return;
}
- IF_DEBUG("qtaguid: %s(%s): entry=%p\n", __func__,
+ IF_DEBUG("qtaguid[%d]: %s(%s): entry=%p\n", par->hooknum, __func__,
el_dev->name, entry);
data_counters_update(&entry->totals_via_skb, 0, direction, proto,
@@ -1294,14 +1303,14 @@ static void if_tag_stat_update(const char *ifname, uid_t uid,
spin_lock_bh(&iface_stat_list_lock);
iface_entry = get_iface_entry(ifname);
if (!iface_entry) {
- pr_err_ratelimited("qtaguid: iface_stat: stat_update() "
+ pr_err_ratelimited("qtaguid: tag_stat: stat_update() "
"%s not found\n", ifname);
spin_unlock_bh(&iface_stat_list_lock);
return;
}
/* It is ok to process data when an iface_entry is inactive */
- MT_DEBUG("qtaguid: iface_stat: stat_update() dev=%s entry=%p\n",
+ MT_DEBUG("qtaguid: tag_stat: stat_update() dev=%s entry=%p\n",
ifname, iface_entry);
/*
@@ -1318,7 +1327,7 @@ static void if_tag_stat_update(const char *ifname, uid_t uid,
tag = combine_atag_with_uid(acct_tag, uid);
uid_tag = make_tag_from_uid(uid);
}
- MT_DEBUG("qtaguid: iface_stat: stat_update(): "
+ MT_DEBUG("qtaguid: tag_stat: stat_update(): "
" looking for tag=0x%llx (uid=%u) in ife=%p\n",
tag, get_uid_from_tag(tag), iface_entry);
/* Loop over tag list under this interface for {acct_tag,uid_tag} */
@@ -1578,8 +1587,8 @@ static struct sock *qtaguid_find_sk(const struct sk_buff *skb,
struct sock *sk;
unsigned int hook_mask = (1 << par->hooknum);
- MT_DEBUG("qtaguid: find_sk(skb=%p) hooknum=%d family=%d\n", skb,
- par->hooknum, par->family);
+ MT_DEBUG("qtaguid[%d]: find_sk(skb=%p) family=%d\n",
+ par->hooknum, skb, par->family);
/*
* Let's not abuse the the xt_socket_get*_sk(), or else it will
@@ -1600,8 +1609,8 @@ static struct sock *qtaguid_find_sk(const struct sk_buff *skb,
}
if (sk) {
- MT_DEBUG("qtaguid: %p->sk_proto=%u "
- "->sk_state=%d\n", sk, sk->sk_protocol, sk->sk_state);
+ MT_DEBUG("qtaguid[%d]: %p->sk_proto=%u->sk_state=%d\n",
+ par->hooknum, sk, sk->sk_protocol, sk->sk_state);
/*
* When in TCP_TIME_WAIT the sk is not a "struct sock" but
* "struct inet_timewait_sock" which is missing fields.
@@ -1619,37 +1628,19 @@ static void account_for_uid(const struct sk_buff *skb,
struct xt_action_param *par)
{
const struct net_device *el_dev;
+ enum ifs_tx_rx direction;
+ int proto;
- if (!skb->dev) {
- MT_DEBUG("qtaguid[%d]: no skb->dev\n", par->hooknum);
- el_dev = par->in ? : par->out;
- } else {
- const struct net_device *other_dev;
- el_dev = skb->dev;
- other_dev = par->in ? : par->out;
- if (el_dev != other_dev) {
- MT_DEBUG("qtaguid[%d]: skb->dev=%p %s vs "
- "par->(in/out)=%p %s\n",
- par->hooknum, el_dev, el_dev->name, other_dev,
- other_dev->name);
- }
- }
-
- if (unlikely(!el_dev)) {
- pr_info("qtaguid[%d]: no par->in/out?!!\n", par->hooknum);
- } else if (unlikely(!el_dev->name)) {
- pr_info("qtaguid[%d]: no dev->name?!!\n", par->hooknum);
- } else {
- int proto = ipx_proto(skb, par);
- MT_DEBUG("qtaguid[%d]: dev name=%s type=%d fam=%d proto=%d\n",
- par->hooknum, el_dev->name, el_dev->type,
- par->family, proto);
+ get_dev_and_dir(skb, par, &direction, &el_dev);
+ proto = ipx_proto(skb, par);
+ MT_DEBUG("qtaguid[%d]: dev name=%s type=%d fam=%d proto=%d dir=%d\n",
+ par->hooknum, el_dev->name, el_dev->type,
+ par->family, proto, direction);
- if_tag_stat_update(el_dev->name, uid,
- skb->sk ? skb->sk : alternate_sk,
- par->in ? IFS_RX : IFS_TX,
- proto, skb->len);
- }
+ if_tag_stat_update(el_dev->name, uid,
+ skb->sk ? skb->sk : alternate_sk,
+ direction,
+ proto, skb->len);
}
static bool qtaguid_mt(const struct sk_buff *skb, struct xt_action_param *par)
@@ -1661,6 +1652,11 @@ static bool qtaguid_mt(const struct sk_buff *skb, struct xt_action_param *par)
kuid_t sock_uid;
bool res;
bool set_sk_callback_lock = false;
+ /*
+ * TODO: unhack how to force just accounting.
+ * For now we only do tag stats when the uid-owner is not requested
+ */
+ bool do_tag_stat = !(info->match & XT_QTAGUID_UID);
if (unlikely(module_passive))
return (info->match ^ info->invert) == 0;
@@ -1734,12 +1730,7 @@ static bool qtaguid_mt(const struct sk_buff *skb, struct xt_action_param *par)
* couldn't find the owner, so for now we just count them
* against the system.
*/
- /*
- * TODO: unhack how to force just accounting.
- * For now we only do iface stats when the uid-owner is not
- * requested.
- */
- if (!(info->match & XT_QTAGUID_UID))
+ if (do_tag_stat)
account_for_uid(skb, sk, 0, par);
MT_DEBUG("qtaguid[%d]: leaving (sk?sk->sk_socket)=%p\n",
par->hooknum,
@@ -1754,18 +1745,15 @@ static bool qtaguid_mt(const struct sk_buff *skb, struct xt_action_param *par)
filp = sk->sk_socket->file;
if (filp == NULL) {
MT_DEBUG("qtaguid[%d]: leaving filp=NULL\n", par->hooknum);
- account_for_uid(skb, sk, 0, par);
+ if (do_tag_stat)
+ account_for_uid(skb, sk, 0, par);
res = ((info->match ^ info->invert) &
(XT_QTAGUID_UID | XT_QTAGUID_GID)) == 0;
atomic64_inc(&qtu_events.match_no_sk_file);
goto put_sock_ret_res;
}
sock_uid = filp->f_cred->fsuid;
- /*
- * TODO: unhack how to force just accounting.
- * For now we only do iface stats when the uid-owner is not requested
- */
- if (!(info->match & XT_QTAGUID_UID))
+ if (do_tag_stat)
account_for_uid(skb, sk, from_kuid(&init_user_ns, sock_uid), par);
/*
diff --git a/net/netfilter/xt_quota2.c b/net/netfilter/xt_quota2.c
index 834594aa0085..94663440d160 100644
--- a/net/netfilter/xt_quota2.c
+++ b/net/netfilter/xt_quota2.c
@@ -16,12 +16,16 @@
#include <linux/proc_fs.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
+#include <linux/workqueue.h>
#include <asm/atomic.h>
#include <net/netlink.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_quota2.h>
+#define QUOTA2_SYSFS_WORK_MAX_SIZE 64
+#define QUOTA2_SYSFS_NUM_ENVP 3
+
#ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG
/* For compatibility, these definitions are copied from the
* deprecated header file <linux/netfilter_ipv4/ipt_ULOG.h> */
@@ -54,17 +58,16 @@ struct xt_quota_counter {
atomic_t ref;
char name[sizeof(((struct xt_quota_mtinfo2 *)NULL)->name)];
struct proc_dir_entry *procfs_entry;
+ char last_iface[QUOTA2_SYSFS_WORK_MAX_SIZE];
+ char last_prefix[QUOTA2_SYSFS_WORK_MAX_SIZE];
+ struct work_struct work;
};
-#ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG
-/* Harald's favorite number +1 :D From ipt_ULOG.C */
-static int qlog_nl_event = 112;
-module_param_named(event_num, qlog_nl_event, uint, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(event_num,
- "Event number for NETLINK_NFLOG message. 0 disables log."
- "111 is what ipt_ULOG uses.");
-static struct sock *nflognl;
-#endif
+#define to_quota_counter(x) container_of(x, struct xt_quota_counter, work)
+
+static struct class *quota_class;
+static struct device *quota_device;
+static struct kobject *quota_kobj;
static LIST_HEAD(counter_list);
static DEFINE_SPINLOCK(counter_list_lock);
@@ -75,68 +78,39 @@ static kuid_t quota_list_uid = KUIDT_INIT(0);
static kgid_t quota_list_gid = KGIDT_INIT(0);
module_param_named(perms, quota_list_perms, uint, S_IRUGO | S_IWUSR);
-#ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG
-static void quota2_log(unsigned int hooknum,
- const struct sk_buff *skb,
- const struct net_device *in,
+static void quota2_work(struct work_struct *work)
+{
+ char alert_msg[QUOTA2_SYSFS_WORK_MAX_SIZE];
+ char iface_name[QUOTA2_SYSFS_WORK_MAX_SIZE];
+ char *envp[QUOTA2_SYSFS_NUM_ENVP] = {alert_msg, iface_name, NULL};
+ struct xt_quota_counter *counter = to_quota_counter(work);
+
+ snprintf(alert_msg, sizeof(alert_msg), "ALERT_NAME=%s", counter->name);
+ snprintf(iface_name, sizeof(iface_name), "INTERFACE=%s",
+ counter->last_iface);
+
+ kobject_uevent_env(quota_kobj, KOBJ_CHANGE, envp);
+}
+
+static void quota2_log(const struct net_device *in,
const struct net_device *out,
+ struct xt_quota_counter *q,
const char *prefix)
{
- ulog_packet_msg_t *pm;
- struct sk_buff *log_skb;
- size_t size;
- struct nlmsghdr *nlh;
-
- if (!qlog_nl_event)
+ if (!prefix)
return;
- size = NLMSG_SPACE(sizeof(*pm));
- size = max(size, (size_t)NLMSG_GOODSIZE);
- log_skb = alloc_skb(size, GFP_ATOMIC);
- if (!log_skb) {
- pr_err("xt_quota2: cannot alloc skb for logging\n");
- return;
- }
+ strlcpy(q->last_prefix, prefix, QUOTA2_SYSFS_WORK_MAX_SIZE);
- nlh = nlmsg_put(log_skb, /*pid*/0, /*seq*/0, qlog_nl_event,
- sizeof(*pm), 0);
- if (!nlh) {
- pr_err("xt_quota2: nlmsg_put failed\n");
- kfree_skb(log_skb);
- return;
- }
- pm = nlmsg_data(nlh);
- if (skb->tstamp.tv64 == 0)
- __net_timestamp((struct sk_buff *)skb);
- pm->data_len = 0;
- pm->hook = hooknum;
- if (prefix != NULL)
- strlcpy(pm->prefix, prefix, sizeof(pm->prefix));
- else
- *(pm->prefix) = '\0';
if (in)
- strlcpy(pm->indev_name, in->name, sizeof(pm->indev_name));
- else
- pm->indev_name[0] = '\0';
-
- if (out)
- strlcpy(pm->outdev_name, out->name, sizeof(pm->outdev_name));
+ strlcpy(q->last_iface, in->name, QUOTA2_SYSFS_WORK_MAX_SIZE);
+ else if (out)
+ strlcpy(q->last_iface, out->name, QUOTA2_SYSFS_WORK_MAX_SIZE);
else
- pm->outdev_name[0] = '\0';
+ strlcpy(q->last_iface, "UNKNOWN", QUOTA2_SYSFS_WORK_MAX_SIZE);
- NETLINK_CB(log_skb).dst_group = 1;
- pr_debug("throwing 1 packets to netlink group 1\n");
- netlink_broadcast(nflognl, log_skb, 0, 1, GFP_ATOMIC);
-}
-#else
-static void quota2_log(unsigned int hooknum,
- const struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- const char *prefix)
-{
+ schedule_work(&q->work);
}
-#endif /* if+else CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG */
static ssize_t quota_proc_read(struct file *file, char __user *buf,
size_t size, loff_t *ppos)
@@ -193,6 +167,9 @@ q2_new_counter(const struct xt_quota_mtinfo2 *q, bool anon)
INIT_LIST_HEAD(&e->list);
atomic_set(&e->ref, 1);
strlcpy(e->name, q->name, sizeof(e->name));
+ strlcpy(e->last_prefix, "UNSET", sizeof(e->last_prefix));
+ strlcpy(e->last_iface, "UNSET", sizeof(e->last_iface));
+ INIT_WORK(&e->work, quota2_work);
}
return e;
}
@@ -326,11 +303,7 @@ quota_mt2(const struct sk_buff *skb, struct xt_action_param *par)
} else {
/* We are transitioning, log that fact. */
if (e->quota) {
- quota2_log(par->hooknum,
- skb,
- par->in,
- par->out,
- q->name);
+ quota2_log(par->in, par->out, e, q->name);
}
/* we do not allow even small packets from now on */
e->quota = 0;
@@ -368,11 +341,25 @@ static int __init quota_mt2_init(void)
int ret;
pr_debug("xt_quota2: init()");
-#ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG
- nflognl = netlink_kernel_create(&init_net, NETLINK_NFLOG, NULL);
- if (!nflognl)
- return -ENOMEM;
-#endif
+ quota_class = class_create(THIS_MODULE, "xt_quota2");
+ ret = PTR_ERR(quota_class);
+ if (IS_ERR(quota_class)) {
+ pr_err("xt_quota2: couldn't create class");
+ class_destroy(quota_class);
+ return ret;
+ }
+
+ quota_device = device_create(quota_class, NULL, MKDEV(0, 0), NULL,
+ "counters");
+ ret = PTR_ERR(quota_device);
+ if (IS_ERR(quota_device)) {
+ pr_err("xt_quota2: couldn't create device");
+ device_destroy(quota_class, MKDEV(0, 0));
+ class_destroy(quota_class);
+ return ret;
+ }
+
+ quota_kobj = &quota_device->kobj;
proc_xt_quota = proc_mkdir("xt_quota", init_net.proc_net);
if (proc_xt_quota == NULL)
@@ -389,6 +376,8 @@ static void __exit quota_mt2_exit(void)
{
xt_unregister_matches(quota_mt2_reg, ARRAY_SIZE(quota_mt2_reg));
remove_proc_entry("xt_quota", init_net.proc_net);
+ device_destroy(quota_class, MKDEV(0, 0));
+ class_destroy(quota_class);
}
module_init(quota_mt2_init);
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 360700a2f46c..f4dd706c6cd1 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -926,15 +926,6 @@ static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
static void netlink_sock_destruct(struct sock *sk)
{
- struct netlink_sock *nlk = nlk_sk(sk);
-
- if (nlk->cb_running) {
- if (nlk->cb.done)
- nlk->cb.done(&nlk->cb);
- module_put(nlk->cb.module);
- kfree_skb(nlk->cb.skb);
- }
-
skb_queue_purge(&sk->sk_receive_queue);
#ifdef CONFIG_NETLINK_MMAP
if (1) {
@@ -959,14 +950,6 @@ static void netlink_sock_destruct(struct sock *sk)
WARN_ON(nlk_sk(sk)->groups);
}
-static void netlink_sock_destruct_work(struct work_struct *work)
-{
- struct netlink_sock *nlk = container_of(work, struct netlink_sock,
- work);
-
- sk_free(&nlk->sk);
-}
-
/* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
* SMP. Look, when several writers sleep and reader wakes them up, all but one
* immediately hit write lock and grab all the cpus. Exclusive sleep solves
@@ -1077,8 +1060,9 @@ static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid)
rcu_read_lock();
sk = __netlink_lookup(table, portid, net);
- if (sk)
- sock_hold(sk);
+ if (sk && !atomic_inc_not_zero(&sk->sk_refcnt))
+ sk = NULL;
+
rcu_read_unlock();
return sk;
@@ -1205,6 +1189,7 @@ static int __netlink_create(struct net *net, struct socket *sock,
mutex_init(&nlk->pg_vec_lock);
#endif
+ sock_set_flag(sk, SOCK_RCU_FREE);
sk->sk_destruct = netlink_sock_destruct;
sk->sk_protocol = protocol;
return 0;
@@ -1269,23 +1254,6 @@ out_module:
goto out;
}
-static void deferred_put_nlk_sk(struct rcu_head *head)
-{
- struct netlink_sock *nlk = container_of(head, struct netlink_sock, rcu);
- struct sock *sk = &nlk->sk;
-
- if (!atomic_dec_and_test(&sk->sk_refcnt))
- return;
-
- if (nlk->cb_running && nlk->cb.done) {
- INIT_WORK(&nlk->work, netlink_sock_destruct_work);
- schedule_work(&nlk->work);
- return;
- }
-
- sk_free(sk);
-}
-
static int netlink_release(struct socket *sock)
{
struct sock *sk = sock->sk;
@@ -1358,7 +1326,19 @@ static int netlink_release(struct socket *sock)
local_bh_disable();
sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1);
local_bh_enable();
- call_rcu(&nlk->rcu, deferred_put_nlk_sk);
+ if (nlk->cb_running) {
+ mutex_lock(nlk->cb_mutex);
+ if (nlk->cb_running) {
+ if (nlk->cb.done)
+ nlk->cb.done(&nlk->cb);
+
+ module_put(nlk->cb.module);
+ kfree_skb(nlk->cb.skb);
+ nlk->cb_running = false;
+ }
+ mutex_unlock(nlk->cb_mutex);
+ }
+ sock_put(sk);
return 0;
}
diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h
index df32cb92d9fc..b0c1ddc97260 100644
--- a/net/netlink/af_netlink.h
+++ b/net/netlink/af_netlink.h
@@ -53,7 +53,6 @@ struct netlink_sock {
#endif /* CONFIG_NETLINK_MMAP */
struct rhash_head node;
- struct rcu_head rcu;
struct work_struct work;
};
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index bc0e504f33a6..80649934cf3b 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -993,7 +993,7 @@ static struct genl_multicast_group genl_ctrl_groups[] = {
static int genl_bind(struct net *net, int group)
{
- int i, err = -ENOENT;
+ int i, err = 0;
down_read(&cb_lock);
for (i = 0; i < GENL_FAM_TAB_SIZE; i++) {
diff --git a/net/rmnet_data/Kconfig b/net/rmnet_data/Kconfig
new file mode 100644
index 000000000000..36d581771cb9
--- /dev/null
+++ b/net/rmnet_data/Kconfig
@@ -0,0 +1,29 @@
+#
+# RMNET Data and MAP driver
+#
+
+menuconfig RMNET_DATA
+ depends on NETDEVICES
+ bool "RmNet Data and MAP driver"
+ ---help---
+ If you say Y here, then the rmnet_data module will be statically
+ compiled into the kernel. The rmnet data module provides MAP
+ functionality for embedded and bridged traffic.
+if RMNET_DATA
+
+config RMNET_DATA_FC
+ bool "RmNet Data Flow Control"
+ depends on NET_SCHED && NET_SCH_PRIO
+ ---help---
+ Say Y here if you want RmNet data to handle in-band flow control and
+ ioctl based flow control. This depends on net scheduler and prio queue
+ capability being present in the kernel. In-band flow control requires
+ MAP protocol be used.
+config RMNET_DATA_DEBUG_PKT
+ bool "Packet Debug Logging"
+ ---help---
+ Say Y here if you want RmNet data to be able to log packets in main
+ system log. This should not be enabled on production builds as it can
+ impact system performance. Note that simply enabling it here will not
+ enable the logging; it must be enabled at run-time as well.
+endif # RMNET_DATA
diff --git a/net/rmnet_data/Makefile b/net/rmnet_data/Makefile
new file mode 100644
index 000000000000..ccb8b5b76d6c
--- /dev/null
+++ b/net/rmnet_data/Makefile
@@ -0,0 +1,14 @@
+#
+# Makefile for the RMNET Data module
+#
+
+rmnet_data-y := rmnet_data_main.o
+rmnet_data-y += rmnet_data_config.o
+rmnet_data-y += rmnet_data_vnd.o
+rmnet_data-y += rmnet_data_handlers.o
+rmnet_data-y += rmnet_map_data.o
+rmnet_data-y += rmnet_map_command.o
+rmnet_data-y += rmnet_data_stats.o
+obj-$(CONFIG_RMNET_DATA) += rmnet_data.o
+
+CFLAGS_rmnet_data_main.o := -I$(src)
diff --git a/net/rmnet_data/rmnet_data_config.c b/net/rmnet_data/rmnet_data_config.c
new file mode 100644
index 000000000000..fb4c60fc2203
--- /dev/null
+++ b/net/rmnet_data/rmnet_data_config.c
@@ -0,0 +1,1243 @@
+/*
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * RMNET Data configuration engine
+ *
+ */
+
+#include <net/sock.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/rmnet_data.h>
+#include <net/rmnet_config.h>
+#include "rmnet_data_config.h"
+#include "rmnet_data_handlers.h"
+#include "rmnet_data_vnd.h"
+#include "rmnet_data_private.h"
+#include "rmnet_data_trace.h"
+
+RMNET_LOG_MODULE(RMNET_DATA_LOGMASK_CONFIG);
+
+/* ***************** Local Definitions and Declarations ********************* */
+static struct sock *nl_socket_handle;
+
+#ifndef RMNET_KERNEL_PRE_3_8
+static struct netlink_kernel_cfg rmnet_netlink_cfg = {
+ .input = rmnet_config_netlink_msg_handler
+};
+#endif
+
+static struct notifier_block rmnet_dev_notifier = {
+ .notifier_call = rmnet_config_notify_cb,
+ .next = 0,
+ .priority = 0
+};
+
+#define RMNET_NL_MSG_SIZE(Y) (sizeof(((struct rmnet_nl_msg_s *)0)->Y))
+
+struct rmnet_free_vnd_work {
+ struct work_struct work;
+ int vnd_id[RMNET_DATA_MAX_VND];
+ int count;
+};
+
+/* ***************** Init and Cleanup *************************************** */
+
+#ifdef RMNET_KERNEL_PRE_3_8
+static struct sock *_rmnet_config_start_netlink(void)
+{
+ return netlink_kernel_create(&init_net,
+ RMNET_NETLINK_PROTO,
+ 0,
+ rmnet_config_netlink_msg_handler,
+ NULL,
+ THIS_MODULE);
+}
+#else
+static struct sock *_rmnet_config_start_netlink(void)
+{
+ return netlink_kernel_create(&init_net,
+ RMNET_NETLINK_PROTO,
+ &rmnet_netlink_cfg);
+}
+#endif /* RMNET_KERNEL_PRE_3_8 */
+
+/**
+ * rmnet_config_init() - Startup init
+ *
+ * Registers netlink protocol with kernel and opens socket. Netlink handler is
+ * registered with kernel.
+ */
+int rmnet_config_init(void)
+{
+ int rc;
+ nl_socket_handle = _rmnet_config_start_netlink();
+ if (!nl_socket_handle) {
+ LOGE("%s", "Failed to init netlink socket");
+ return RMNET_INIT_ERROR;
+ }
+
+ rc = register_netdevice_notifier(&rmnet_dev_notifier);
+ if (rc != 0) {
+ LOGE("Failed to register device notifier; rc=%d", rc);
+ /* TODO: Cleanup the nl socket */
+ return RMNET_INIT_ERROR;
+ }
+
+ return 0;
+}
+
+/**
+ * rmnet_config_exit() - Cleans up all netlink related resources
+ */
+void rmnet_config_exit(void)
+{
+ int rc;
+ netlink_kernel_release(nl_socket_handle);
+ rc = unregister_netdevice_notifier(&rmnet_dev_notifier);
+ if (rc != 0)
+ LOGE("Failed to unregister device notifier; rc=%d", rc);
+}
+
+/* ***************** Helper Functions *************************************** */
+
+/**
+ * _rmnet_is_physical_endpoint_associated() - Determines if device is associated
+ * @dev: Device to get check
+ *
+ * Compares device rx_handler callback pointer against known funtion
+ *
+ * Return:
+ * - 1 if associated
+ * - 0 if NOT associated
+ */
+static inline int _rmnet_is_physical_endpoint_associated(struct net_device *dev)
+{
+ rx_handler_func_t *rx_handler;
+ rx_handler = rcu_dereference(dev->rx_handler);
+
+ if (rx_handler == rmnet_rx_handler)
+ return 1;
+ else
+ return 0;
+}
+
+/**
+ * _rmnet_get_phys_ep_config() - Get physical ep config for an associated device
+ * @dev: Device to get endpoint configuration from
+ *
+ * Return:
+ * - pointer to configuration if successful
+ * - 0 (null) if device is not associated
+ */
+struct rmnet_phys_ep_config *_rmnet_get_phys_ep_config
+ (struct net_device *dev)
+{
+ struct rmnet_phys_ep_conf_s *_rmnet_phys_ep_config;
+
+ if (_rmnet_is_physical_endpoint_associated(dev)) {
+ _rmnet_phys_ep_config = (struct rmnet_phys_ep_conf_s *)
+ rcu_dereference(dev->rx_handler_data);
+ if (_rmnet_phys_ep_config && _rmnet_phys_ep_config->config)
+ return (struct rmnet_phys_ep_config *)
+ _rmnet_phys_ep_config->config;
+ else
+ return 0;
+ } else {
+ return 0;
+ }
+}
+
+/**
+ * _rmnet_get_logical_ep() - Gets the logical end point configuration
+ * structure for a network device
+ * @dev: Device to get endpoint configuration from
+ * @config_id: Logical endpoint id on device
+ * Retrieves the logical_endpoint_config structure.
+ *
+ * Return:
+ * - End point configuration structure
+ * - NULL in case of an error
+ */
+struct rmnet_logical_ep_conf_s *_rmnet_get_logical_ep(struct net_device *dev,
+ int config_id)
+{
+ struct rmnet_phys_ep_config *config;
+ struct rmnet_logical_ep_conf_s *epconfig_l;
+
+ if (rmnet_vnd_is_vnd(dev))
+ epconfig_l = rmnet_vnd_get_le_config(dev);
+ else {
+ config = _rmnet_get_phys_ep_config(dev);
+
+ if (!config)
+ return NULL;
+
+ if (config_id == RMNET_LOCAL_LOGICAL_ENDPOINT)
+ epconfig_l = &config->local_ep;
+ else
+ epconfig_l = &config->muxed_ep[config_id];
+ }
+
+ return epconfig_l;
+}
+
+/* ***************** Netlink Handler **************************************** */
+#define _RMNET_NETLINK_NULL_CHECKS() do { if (!rmnet_header || !resp_rmnet) \
+ BUG(); \
+ } while (0)
+
+static void _rmnet_netlink_set_link_egress_data_format
+ (struct rmnet_nl_msg_s *rmnet_header,
+ struct rmnet_nl_msg_s *resp_rmnet)
+{
+ struct net_device *dev;
+ _RMNET_NETLINK_NULL_CHECKS();
+
+ resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE;
+ dev = dev_get_by_name(&init_net, rmnet_header->data_format.dev);
+
+ if (!dev) {
+ resp_rmnet->return_code = RMNET_CONFIG_NO_SUCH_DEVICE;
+ return;
+ }
+
+ resp_rmnet->return_code =
+ rmnet_set_egress_data_format(dev,
+ rmnet_header->data_format.flags,
+ rmnet_header->data_format.agg_size,
+ rmnet_header->data_format.agg_count
+ );
+ dev_put(dev);
+}
+
+static void _rmnet_netlink_set_link_ingress_data_format
+ (struct rmnet_nl_msg_s *rmnet_header,
+ struct rmnet_nl_msg_s *resp_rmnet)
+{
+ struct net_device *dev;
+ _RMNET_NETLINK_NULL_CHECKS();
+
+ resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE;
+
+ dev = dev_get_by_name(&init_net, rmnet_header->data_format.dev);
+ if (!dev) {
+ resp_rmnet->return_code = RMNET_CONFIG_NO_SUCH_DEVICE;
+ return;
+ }
+
+ resp_rmnet->return_code = rmnet_set_ingress_data_format(
+ dev,
+ rmnet_header->data_format.flags,
+ rmnet_header->data_format.tail_spacing);
+ dev_put(dev);
+}
+
+static void _rmnet_netlink_set_logical_ep_config
+ (struct rmnet_nl_msg_s *rmnet_header,
+ struct rmnet_nl_msg_s *resp_rmnet)
+{
+ struct net_device *dev, *dev2;
+ _RMNET_NETLINK_NULL_CHECKS();
+
+ resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE;
+ if (rmnet_header->local_ep_config.ep_id < -1
+ || rmnet_header->local_ep_config.ep_id > 254) {
+ resp_rmnet->return_code = RMNET_CONFIG_BAD_ARGUMENTS;
+ return;
+ }
+
+ dev = dev_get_by_name(&init_net,
+ rmnet_header->local_ep_config.dev);
+
+ dev2 = dev_get_by_name(&init_net,
+ rmnet_header->local_ep_config.next_dev);
+
+
+ if (dev && dev2)
+ resp_rmnet->return_code =
+ rmnet_set_logical_endpoint_config(
+ dev,
+ rmnet_header->local_ep_config.ep_id,
+ rmnet_header->local_ep_config.operating_mode,
+ dev2);
+ else
+ resp_rmnet->return_code = RMNET_CONFIG_NO_SUCH_DEVICE;
+
+ if (dev)
+ dev_put(dev);
+ if (dev2)
+ dev_put(dev2);
+}
+
+static void _rmnet_netlink_unset_logical_ep_config
+ (struct rmnet_nl_msg_s *rmnet_header,
+ struct rmnet_nl_msg_s *resp_rmnet)
+{
+ struct net_device *dev;
+ _RMNET_NETLINK_NULL_CHECKS();
+
+ resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE;
+ if (rmnet_header->local_ep_config.ep_id < -1
+ || rmnet_header->local_ep_config.ep_id > 254) {
+ resp_rmnet->return_code = RMNET_CONFIG_BAD_ARGUMENTS;
+ return;
+ }
+
+ dev = dev_get_by_name(&init_net,
+ rmnet_header->local_ep_config.dev);
+
+ if (dev) {
+ resp_rmnet->return_code =
+ rmnet_unset_logical_endpoint_config(
+ dev,
+ rmnet_header->local_ep_config.ep_id);
+ dev_put(dev);
+ } else {
+ resp_rmnet->return_code = RMNET_CONFIG_NO_SUCH_DEVICE;
+ }
+}
+
+static void _rmnet_netlink_get_logical_ep_config
+ (struct rmnet_nl_msg_s *rmnet_header,
+ struct rmnet_nl_msg_s *resp_rmnet)
+{
+ struct net_device *dev;
+ _RMNET_NETLINK_NULL_CHECKS();
+
+ resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE;
+ if (rmnet_header->local_ep_config.ep_id < -1
+ || rmnet_header->local_ep_config.ep_id > 254) {
+ resp_rmnet->return_code = RMNET_CONFIG_BAD_ARGUMENTS;
+ return;
+ }
+
+ dev = dev_get_by_name(&init_net,
+ rmnet_header->local_ep_config.dev);
+
+ if (dev)
+ resp_rmnet->return_code =
+ rmnet_get_logical_endpoint_config(
+ dev,
+ rmnet_header->local_ep_config.ep_id,
+ &resp_rmnet->local_ep_config.operating_mode,
+ resp_rmnet->local_ep_config.next_dev,
+ sizeof(resp_rmnet->local_ep_config.next_dev));
+ else {
+ resp_rmnet->return_code = RMNET_CONFIG_NO_SUCH_DEVICE;
+ return;
+ }
+
+ if (resp_rmnet->return_code == RMNET_CONFIG_OK) {
+ /* Begin Data */
+ resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNDATA;
+ resp_rmnet->arg_length = RMNET_NL_MSG_SIZE(local_ep_config);
+ }
+ dev_put(dev);
+}
+
+static void _rmnet_netlink_associate_network_device
+ (struct rmnet_nl_msg_s *rmnet_header,
+ struct rmnet_nl_msg_s *resp_rmnet)
+{
+ struct net_device *dev;
+ _RMNET_NETLINK_NULL_CHECKS();
+ resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE;
+
+ dev = dev_get_by_name(&init_net, rmnet_header->data);
+ if (!dev) {
+ resp_rmnet->return_code = RMNET_CONFIG_NO_SUCH_DEVICE;
+ return;
+ }
+
+ resp_rmnet->return_code = rmnet_associate_network_device(dev);
+ dev_put(dev);
+}
+
+static void _rmnet_netlink_unassociate_network_device
+ (struct rmnet_nl_msg_s *rmnet_header,
+ struct rmnet_nl_msg_s *resp_rmnet)
+{
+ struct net_device *dev;
+ _RMNET_NETLINK_NULL_CHECKS();
+ resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE;
+
+ dev = dev_get_by_name(&init_net, rmnet_header->data);
+ if (!dev) {
+ resp_rmnet->return_code = RMNET_CONFIG_NO_SUCH_DEVICE;
+ return;
+ }
+
+ resp_rmnet->return_code = rmnet_unassociate_network_device(dev);
+ dev_put(dev);
+}
+
+static void _rmnet_netlink_get_network_device_associated
+ (struct rmnet_nl_msg_s *rmnet_header,
+ struct rmnet_nl_msg_s *resp_rmnet)
+{
+ struct net_device *dev;
+
+ _RMNET_NETLINK_NULL_CHECKS();
+ resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE;
+
+ dev = dev_get_by_name(&init_net, rmnet_header->data);
+ if (!dev) {
+ resp_rmnet->return_code = RMNET_CONFIG_NO_SUCH_DEVICE;
+ return;
+ }
+
+ resp_rmnet->return_code = _rmnet_is_physical_endpoint_associated(dev);
+ resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNDATA;
+ dev_put(dev);
+}
+
+static void _rmnet_netlink_get_link_egress_data_format
+ (struct rmnet_nl_msg_s *rmnet_header,
+ struct rmnet_nl_msg_s *resp_rmnet)
+{
+ struct net_device *dev;
+ struct rmnet_phys_ep_config *config;
+ _RMNET_NETLINK_NULL_CHECKS();
+ resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE;
+
+
+ dev = dev_get_by_name(&init_net, rmnet_header->data_format.dev);
+ if (!dev) {
+ resp_rmnet->return_code = RMNET_CONFIG_NO_SUCH_DEVICE;
+ return;
+ }
+
+ config = _rmnet_get_phys_ep_config(dev);
+ if (!config) {
+ resp_rmnet->return_code = RMNET_CONFIG_INVALID_REQUEST;
+ dev_put(dev);
+ return;
+ }
+
+ /* Begin Data */
+ resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNDATA;
+ resp_rmnet->arg_length = RMNET_NL_MSG_SIZE(data_format);
+ resp_rmnet->data_format.flags = config->egress_data_format;
+ resp_rmnet->data_format.agg_count = config->egress_agg_count;
+ resp_rmnet->data_format.agg_size = config->egress_agg_size;
+ dev_put(dev);
+}
+
+static void _rmnet_netlink_get_link_ingress_data_format
+ (struct rmnet_nl_msg_s *rmnet_header,
+ struct rmnet_nl_msg_s *resp_rmnet)
+{
+ struct net_device *dev;
+ struct rmnet_phys_ep_config *config;
+ _RMNET_NETLINK_NULL_CHECKS();
+ resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE;
+
+
+ dev = dev_get_by_name(&init_net, rmnet_header->data_format.dev);
+ if (!dev) {
+ resp_rmnet->return_code = RMNET_CONFIG_NO_SUCH_DEVICE;
+ return;
+ }
+
+ config = _rmnet_get_phys_ep_config(dev);
+ if (!config) {
+ resp_rmnet->return_code = RMNET_CONFIG_INVALID_REQUEST;
+ dev_put(dev);
+ return;
+ }
+
+ /* Begin Data */
+ resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNDATA;
+ resp_rmnet->arg_length = RMNET_NL_MSG_SIZE(data_format);
+ resp_rmnet->data_format.flags = config->ingress_data_format;
+ resp_rmnet->data_format.tail_spacing = config->tail_spacing;
+ dev_put(dev);
+}
+
+static void _rmnet_netlink_get_vnd_name
+ (struct rmnet_nl_msg_s *rmnet_header,
+ struct rmnet_nl_msg_s *resp_rmnet)
+{
+ int r;
+ _RMNET_NETLINK_NULL_CHECKS();
+ resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE;
+
+ r = rmnet_vnd_get_name(rmnet_header->vnd.id, resp_rmnet->vnd.vnd_name,
+ RMNET_MAX_STR_LEN);
+
+ if (r != 0) {
+ resp_rmnet->return_code = RMNET_CONFIG_INVALID_REQUEST;
+ return;
+ }
+
+ /* Begin Data */
+ resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNDATA;
+ resp_rmnet->arg_length = RMNET_NL_MSG_SIZE(vnd);
+}
+
+static void _rmnet_netlink_add_del_vnd_tc_flow
+ (uint32_t command,
+ struct rmnet_nl_msg_s *rmnet_header,
+ struct rmnet_nl_msg_s *resp_rmnet)
+{
+ uint32_t id;
+ uint32_t map_flow_id;
+ uint32_t tc_flow_id;
+
+ _RMNET_NETLINK_NULL_CHECKS();
+ resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE;
+
+ id = rmnet_header->flow_control.id;
+ map_flow_id = rmnet_header->flow_control.map_flow_id;
+ tc_flow_id = rmnet_header->flow_control.tc_flow_id;
+
+ switch (command) {
+ case RMNET_NETLINK_ADD_VND_TC_FLOW:
+ resp_rmnet->return_code = rmnet_vnd_add_tc_flow(id,
+ map_flow_id,
+ tc_flow_id);
+ break;
+ case RMNET_NETLINK_DEL_VND_TC_FLOW:
+ resp_rmnet->return_code = rmnet_vnd_del_tc_flow(id,
+ map_flow_id,
+ tc_flow_id);
+ break;
+ default:
+ LOGM("Called with unhandled command %d", command);
+ resp_rmnet->return_code = RMNET_CONFIG_INVALID_REQUEST;
+ break;
+ }
+}
+
+/**
+ * rmnet_config_netlink_msg_handler() - Netlink message handler callback
+ * @skb: Packet containing netlink messages
+ *
+ * Standard kernel-expected format for a netlink message handler. Processes SKBs
+ * which contain RmNet data specific netlink messages.
+ */
+void rmnet_config_netlink_msg_handler(struct sk_buff *skb)
+{
+ struct nlmsghdr *nlmsg_header, *resp_nlmsg;
+ struct rmnet_nl_msg_s *rmnet_header, *resp_rmnet;
+ int return_pid, response_data_length;
+ struct sk_buff *skb_response;
+
+ response_data_length = 0;
+ nlmsg_header = (struct nlmsghdr *) skb->data;
+ rmnet_header = (struct rmnet_nl_msg_s *) nlmsg_data(nlmsg_header);
+
+ if (!nlmsg_header->nlmsg_pid ||
+ (nlmsg_header->nlmsg_len < sizeof(struct nlmsghdr) +
+ sizeof(struct rmnet_nl_msg_s)))
+ return;
+
+ LOGL("Netlink message pid=%d, seq=%d, length=%d, rmnet_type=%d",
+ nlmsg_header->nlmsg_pid,
+ nlmsg_header->nlmsg_seq,
+ nlmsg_header->nlmsg_len,
+ rmnet_header->message_type);
+
+ return_pid = nlmsg_header->nlmsg_pid;
+
+ skb_response = nlmsg_new(sizeof(struct nlmsghdr)
+ + sizeof(struct rmnet_nl_msg_s),
+ GFP_KERNEL);
+
+ if (!skb_response) {
+ LOGH("%s", "Failed to allocate response buffer");
+ return;
+ }
+
+ resp_nlmsg = nlmsg_put(skb_response,
+ 0,
+ nlmsg_header->nlmsg_seq,
+ NLMSG_DONE,
+ sizeof(struct rmnet_nl_msg_s),
+ 0);
+
+ resp_rmnet = nlmsg_data(resp_nlmsg);
+
+ if (!resp_rmnet)
+ BUG();
+
+ resp_rmnet->message_type = rmnet_header->message_type;
+ rtnl_lock();
+ switch (rmnet_header->message_type) {
+ case RMNET_NETLINK_ASSOCIATE_NETWORK_DEVICE:
+ _rmnet_netlink_associate_network_device
+ (rmnet_header, resp_rmnet);
+ break;
+
+ case RMNET_NETLINK_UNASSOCIATE_NETWORK_DEVICE:
+ _rmnet_netlink_unassociate_network_device
+ (rmnet_header, resp_rmnet);
+ break;
+
+ case RMNET_NETLINK_GET_NETWORK_DEVICE_ASSOCIATED:
+ _rmnet_netlink_get_network_device_associated
+ (rmnet_header, resp_rmnet);
+ break;
+
+ case RMNET_NETLINK_SET_LINK_EGRESS_DATA_FORMAT:
+ _rmnet_netlink_set_link_egress_data_format
+ (rmnet_header, resp_rmnet);
+ break;
+
+ case RMNET_NETLINK_GET_LINK_EGRESS_DATA_FORMAT:
+ _rmnet_netlink_get_link_egress_data_format
+ (rmnet_header, resp_rmnet);
+ break;
+
+ case RMNET_NETLINK_SET_LINK_INGRESS_DATA_FORMAT:
+ _rmnet_netlink_set_link_ingress_data_format
+ (rmnet_header, resp_rmnet);
+ break;
+
+ case RMNET_NETLINK_GET_LINK_INGRESS_DATA_FORMAT:
+ _rmnet_netlink_get_link_ingress_data_format
+ (rmnet_header, resp_rmnet);
+ break;
+
+ case RMNET_NETLINK_SET_LOGICAL_EP_CONFIG:
+ _rmnet_netlink_set_logical_ep_config(rmnet_header, resp_rmnet);
+ break;
+
+ case RMNET_NETLINK_UNSET_LOGICAL_EP_CONFIG:
+ _rmnet_netlink_unset_logical_ep_config(rmnet_header,
+ resp_rmnet);
+ break;
+
+ case RMNET_NETLINK_GET_LOGICAL_EP_CONFIG:
+ _rmnet_netlink_get_logical_ep_config(rmnet_header, resp_rmnet);
+ break;
+
+ case RMNET_NETLINK_NEW_VND:
+ resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE;
+ resp_rmnet->return_code =
+ rmnet_create_vnd(rmnet_header->vnd.id);
+ break;
+
+ case RMNET_NETLINK_NEW_VND_WITH_PREFIX:
+ resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE;
+ resp_rmnet->return_code = rmnet_create_vnd_prefix(
+ rmnet_header->vnd.id,
+ rmnet_header->vnd.vnd_name);
+ break;
+
+ case RMNET_NETLINK_FREE_VND:
+ resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE;
+ /* Please check rmnet_vnd_free_dev documentation regarding
+ the below locking sequence
+ */
+ rtnl_unlock();
+ resp_rmnet->return_code = rmnet_free_vnd(rmnet_header->vnd.id);
+ rtnl_lock();
+ break;
+
+ case RMNET_NETLINK_GET_VND_NAME:
+ _rmnet_netlink_get_vnd_name(rmnet_header, resp_rmnet);
+ break;
+
+ case RMNET_NETLINK_DEL_VND_TC_FLOW:
+ case RMNET_NETLINK_ADD_VND_TC_FLOW:
+ _rmnet_netlink_add_del_vnd_tc_flow(rmnet_header->message_type,
+ rmnet_header,
+ resp_rmnet);
+ break;
+
+ default:
+ resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE;
+ resp_rmnet->return_code = RMNET_CONFIG_UNKNOWN_MESSAGE;
+ break;
+ }
+ rtnl_unlock();
+ nlmsg_unicast(nl_socket_handle, skb_response, return_pid);
+ LOGD("%s", "Done processing command");
+
+}
+
+/* ***************** Configuration API ************************************** */
+
+/**
+ * rmnet_unassociate_network_device() - Unassociate network device
+ * @dev: Device to unassociate
+ *
+ * Frees all structures generate for device. Unregisters rx_handler
+ * todo: needs to do some sanity verification first (is device in use, etc...)
+ *
+ * Return:
+ * - RMNET_CONFIG_OK if successful
+ * - RMNET_CONFIG_NO_SUCH_DEVICE dev is null
+ * - RMNET_CONFIG_INVALID_REQUEST if device is not already associated
+ * - RMNET_CONFIG_DEVICE_IN_USE if device has logical ep that wasn't unset
+ * - RMNET_CONFIG_UNKNOWN_ERROR net_device private section is null
+ */
+int rmnet_unassociate_network_device(struct net_device *dev)
+{
+ struct rmnet_phys_ep_conf_s *config;
+ int config_id = RMNET_LOCAL_LOGICAL_ENDPOINT;
+ struct rmnet_logical_ep_conf_s *epconfig_l;
+ ASSERT_RTNL();
+
+ LOGL("(%s);", dev->name);
+
+ if (!dev)
+ return RMNET_CONFIG_NO_SUCH_DEVICE;
+
+ if (!_rmnet_is_physical_endpoint_associated(dev))
+ return RMNET_CONFIG_INVALID_REQUEST;
+
+ for (; config_id < RMNET_DATA_MAX_LOGICAL_EP; config_id++) {
+ epconfig_l = _rmnet_get_logical_ep(dev, config_id);
+ if (epconfig_l && epconfig_l->refcount)
+ return RMNET_CONFIG_DEVICE_IN_USE;
+ }
+
+ config = (struct rmnet_phys_ep_conf_s *)
+ rcu_dereference(dev->rx_handler_data);
+
+ if (!config)
+ return RMNET_CONFIG_UNKNOWN_ERROR;
+
+ kfree(config);
+
+ netdev_rx_handler_unregister(dev);
+
+ /* Explicitly release the reference from the device */
+ dev_put(dev);
+ trace_rmnet_unassociate(dev);
+ return RMNET_CONFIG_OK;
+}
+
+/**
+ * rmnet_set_ingress_data_format() - Set ingress data format on network device
+ * @dev: Device to ingress data format on
+ * @egress_data_format: 32-bit unsigned bitmask of ingress format
+ *
+ * Network device must already have association with RmNet Data driver
+ *
+ * Return:
+ * - RMNET_CONFIG_OK if successful
+ * - RMNET_CONFIG_NO_SUCH_DEVICE dev is null
+ * - RMNET_CONFIG_UNKNOWN_ERROR net_device private section is null
+ */
+int rmnet_set_ingress_data_format(struct net_device *dev,
+ uint32_t ingress_data_format,
+ uint8_t tail_spacing)
+{
+ struct rmnet_phys_ep_config *config;
+ ASSERT_RTNL();
+
+ LOGL("(%s,0x%08X);", dev->name, ingress_data_format);
+
+ if (!dev)
+ return RMNET_CONFIG_NO_SUCH_DEVICE;
+
+ config = _rmnet_get_phys_ep_config(dev);
+
+ if (!config)
+ return RMNET_CONFIG_INVALID_REQUEST;
+
+ config->ingress_data_format = ingress_data_format;
+ config->tail_spacing = tail_spacing;
+
+ return RMNET_CONFIG_OK;
+}
+
+/**
+ * rmnet_set_egress_data_format() - Set egress data format on network device
+ * @dev: Device to egress data format on
+ * @egress_data_format: 32-bit unsigned bitmask of egress format
+ *
+ * Network device must already have association with RmNet Data driver
+ * todo: Bounds check on agg_*
+ *
+ * Return:
+ * - RMNET_CONFIG_OK if successful
+ * - RMNET_CONFIG_NO_SUCH_DEVICE dev is null
+ * - RMNET_CONFIG_UNKNOWN_ERROR net_device private section is null
+ */
+int rmnet_set_egress_data_format(struct net_device *dev,
+ uint32_t egress_data_format,
+ uint16_t agg_size,
+ uint16_t agg_count)
+{
+ struct rmnet_phys_ep_config *config;
+ ASSERT_RTNL();
+
+ LOGL("(%s,0x%08X, %d, %d);",
+ dev->name, egress_data_format, agg_size, agg_count);
+
+ if (!dev)
+ return RMNET_CONFIG_NO_SUCH_DEVICE;
+
+ config = _rmnet_get_phys_ep_config(dev);
+
+ if (!config)
+ return RMNET_CONFIG_UNKNOWN_ERROR;
+
+ config->egress_data_format = egress_data_format;
+ config->egress_agg_size = agg_size;
+ config->egress_agg_count = agg_count;
+
+ return RMNET_CONFIG_OK;
+}
+
+/**
+ * rmnet_associate_network_device() - Associate network device
+ * @dev: Device to register with RmNet data
+ *
+ * Typically used on physical network devices. Registers RX handler and private
+ * metadata structures.
+ *
+ * Return:
+ * - RMNET_CONFIG_OK if successful
+ * - RMNET_CONFIG_NO_SUCH_DEVICE dev is null
+ * - RMNET_CONFIG_INVALID_REQUEST if the device to be associated is a vnd
+ * - RMNET_CONFIG_DEVICE_IN_USE if dev rx_handler is already filled
+ * - RMNET_CONFIG_DEVICE_IN_USE if netdev_rx_handler_register() fails
+ */
+int rmnet_associate_network_device(struct net_device *dev)
+{
+ struct rmnet_phys_ep_conf_s *config;
+ struct rmnet_phys_ep_config *conf;
+ int rc;
+
+ ASSERT_RTNL();
+
+ LOGL("(%s);\n", dev->name);
+
+ if (!dev)
+ return RMNET_CONFIG_NO_SUCH_DEVICE;
+
+ if (_rmnet_is_physical_endpoint_associated(dev)) {
+ LOGM("%s is already regestered", dev->name);
+ return RMNET_CONFIG_DEVICE_IN_USE;
+ }
+
+ if (rmnet_vnd_is_vnd(dev)) {
+ LOGM("%s is a vnd", dev->name);
+ return RMNET_CONFIG_INVALID_REQUEST;
+ }
+
+ config = kmalloc(sizeof(*config), GFP_ATOMIC);
+ conf = kmalloc(sizeof(*conf), GFP_ATOMIC);
+
+ if (!config || !conf)
+ return RMNET_CONFIG_NOMEM;
+
+ memset(config, 0, sizeof(struct rmnet_phys_ep_conf_s));
+ memset(conf, 0, sizeof(struct rmnet_phys_ep_config));
+
+ config->config = conf;
+ conf->dev = dev;
+ spin_lock_init(&conf->agg_lock);
+ config->recycle = kfree_skb;
+
+ rc = netdev_rx_handler_register(dev, rmnet_rx_handler, config);
+
+ if (rc) {
+ LOGM("netdev_rx_handler_register returns %d", rc);
+ kfree(config);
+ kfree(conf);
+ return RMNET_CONFIG_DEVICE_IN_USE;
+ }
+
+ /* Explicitly hold a reference to the device */
+ dev_hold(dev);
+ trace_rmnet_associate(dev);
+ return RMNET_CONFIG_OK;
+}
+
+/**
+ * _rmnet_set_logical_endpoint_config() - Set logical endpoing config on device
+ * @dev: Device to set endpoint configuration on
+ * @config_id: logical endpoint id on device
+ * @epconfig: endpoing configuration structure to set
+ *
+ * Return:
+ * - RMNET_CONFIG_OK if successful
+ * - RMNET_CONFIG_UNKNOWN_ERROR net_device private section is null
+ * - RMNET_CONFIG_NO_SUCH_DEVICE if device to set config on is null
+ * - RMNET_CONFIG_DEVICE_IN_USE if device already has a logical ep
+ * - RMNET_CONFIG_BAD_ARGUMENTS if logical endpoint id is out of range
+ */
+int _rmnet_set_logical_endpoint_config(struct net_device *dev,
+ int config_id,
+ struct rmnet_logical_ep_conf_s *epconfig)
+{
+ struct rmnet_logical_ep_conf_s *epconfig_l;
+
+ ASSERT_RTNL();
+
+ if (!dev)
+ return RMNET_CONFIG_NO_SUCH_DEVICE;
+
+ if (config_id < RMNET_LOCAL_LOGICAL_ENDPOINT
+ || config_id >= RMNET_DATA_MAX_LOGICAL_EP)
+ return RMNET_CONFIG_BAD_ARGUMENTS;
+
+ epconfig_l = _rmnet_get_logical_ep(dev, config_id);
+
+ if (!epconfig_l)
+ return RMNET_CONFIG_UNKNOWN_ERROR;
+
+ if (epconfig_l->refcount)
+ return RMNET_CONFIG_DEVICE_IN_USE;
+
+ memcpy(epconfig_l, epconfig, sizeof(struct rmnet_logical_ep_conf_s));
+ if (config_id == RMNET_LOCAL_LOGICAL_ENDPOINT)
+ epconfig_l->mux_id = 0;
+ else
+ epconfig_l->mux_id = config_id;
+
+ /* Explicitly hold a reference to the egress device */
+ dev_hold(epconfig_l->egress_dev);
+ return RMNET_CONFIG_OK;
+}
+
+/**
+ * _rmnet_unset_logical_endpoint_config() - Un-set the logical endpoing config
+ * on device
+ * @dev: Device to set endpoint configuration on
+ * @config_id: logical endpoint id on device
+ *
+ * Return:
+ * - RMNET_CONFIG_OK if successful
+ * - RMNET_CONFIG_UNKNOWN_ERROR net_device private section is null
+ * - RMNET_CONFIG_NO_SUCH_DEVICE if device to set config on is null
+ * - RMNET_CONFIG_BAD_ARGUMENTS if logical endpoint id is out of range
+ */
+int _rmnet_unset_logical_endpoint_config(struct net_device *dev,
+ int config_id)
+{
+ struct rmnet_logical_ep_conf_s *epconfig_l = 0;
+
+ ASSERT_RTNL();
+
+ if (!dev)
+ return RMNET_CONFIG_NO_SUCH_DEVICE;
+
+ if (config_id < RMNET_LOCAL_LOGICAL_ENDPOINT
+ || config_id >= RMNET_DATA_MAX_LOGICAL_EP)
+ return RMNET_CONFIG_BAD_ARGUMENTS;
+
+ epconfig_l = _rmnet_get_logical_ep(dev, config_id);
+
+ if (!epconfig_l || !epconfig_l->refcount)
+ return RMNET_CONFIG_NO_SUCH_DEVICE;
+
+ /* Explicitly release the reference from the egress device */
+ dev_put(epconfig_l->egress_dev);
+ memset(epconfig_l, 0, sizeof(struct rmnet_logical_ep_conf_s));
+
+ return RMNET_CONFIG_OK;
+}
+
+/**
+ * rmnet_set_logical_endpoint_config() - Set logical endpoint config on a device
+ * @dev: Device to set endpoint configuration on
+ * @config_id: logical endpoint id on device
+ * @rmnet_mode: endpoint mode. Values from: rmnet_config_endpoint_modes_e
+ * @egress_device: device node to forward packet to once done processing in
+ * ingress/egress handlers
+ *
+ * Creates a logical_endpoint_config structure and fills in the information from
+ * function arguments. Calls _rmnet_set_logical_endpoint_config() to finish
+ * configuration. Network device must already have association with RmNet Data
+ * driver
+ *
+ * Return:
+ * - RMNET_CONFIG_OK if successful
+ * - RMNET_CONFIG_BAD_EGRESS_DEVICE if egress device is null
+ * - RMNET_CONFIG_BAD_EGRESS_DEVICE if egress device is not handled by
+ * RmNet data module
+ * - RMNET_CONFIG_UNKNOWN_ERROR net_device private section is null
+ * - RMNET_CONFIG_NO_SUCH_DEVICE if device to set config on is null
+ * - RMNET_CONFIG_BAD_ARGUMENTS if logical endpoint id is out of range
+ */
+int rmnet_set_logical_endpoint_config(struct net_device *dev,
+ int config_id,
+ uint8_t rmnet_mode,
+ struct net_device *egress_dev)
+{
+ struct rmnet_logical_ep_conf_s epconfig;
+
+ LOGL("(%s, %d, %d, %s);",
+ dev->name, config_id, rmnet_mode, egress_dev->name);
+
+ if (!egress_dev
+ || ((!_rmnet_is_physical_endpoint_associated(egress_dev))
+ && (!rmnet_vnd_is_vnd(egress_dev)))) {
+ return RMNET_CONFIG_BAD_EGRESS_DEVICE;
+ }
+
+ memset(&epconfig, 0, sizeof(struct rmnet_logical_ep_conf_s));
+ epconfig.refcount = 1;
+ epconfig.rmnet_mode = rmnet_mode;
+ epconfig.egress_dev = egress_dev;
+
+ return _rmnet_set_logical_endpoint_config(dev, config_id, &epconfig);
+}
+
+/**
+ * rmnet_unset_logical_endpoint_config() - Un-set logical endpoing configuration
+ * on a device
+ * @dev: Device to set endpoint configuration on
+ * @config_id: logical endpoint id on device
+ *
+ * Retrieves the logical_endpoint_config structure and frees the egress device.
+ * Network device must already have association with RmNet Data driver
+ *
+ * Return:
+ * - RMNET_CONFIG_OK if successful
+ * - RMNET_CONFIG_UNKNOWN_ERROR net_device private section is null
+ * - RMNET_CONFIG_NO_SUCH_DEVICE device is not associated
+ * - RMNET_CONFIG_BAD_ARGUMENTS if logical endpoint id is out of range
+ */
+int rmnet_unset_logical_endpoint_config(struct net_device *dev,
+ int config_id)
+{
+ LOGL("(%s, %d);", dev->name, config_id);
+
+ if (!dev
+ || ((!_rmnet_is_physical_endpoint_associated(dev))
+ && (!rmnet_vnd_is_vnd(dev)))) {
+ return RMNET_CONFIG_NO_SUCH_DEVICE;
+ }
+
+ return _rmnet_unset_logical_endpoint_config(dev, config_id);
+}
+
+/**
+ * rmnet_get_logical_endpoint_config() - Gets logical endpoing configuration
+ * for a device
+ * @dev: Device to get endpoint configuration on
+ * @config_id: logical endpoint id on device
+ * @rmnet_mode: (I/O) logical endpoint mode
+ * @egress_dev_name: (I/O) logical endpoint egress device name
+ * @egress_dev_name_size: The maximal size of the I/O egress_dev_name
+ *
+ * Retrieves the logical_endpoint_config structure.
+ * Network device must already have association with RmNet Data driver
+ *
+ * Return:
+ * - RMNET_CONFIG_OK if successful
+ * - RMNET_CONFIG_UNKNOWN_ERROR net_device private section is null
+ * - RMNET_CONFIG_NO_SUCH_DEVICE device is not associated
+ * - RMNET_CONFIG_BAD_ARGUMENTS if logical endpoint id is out of range or
+ * if the provided buffer size for egress dev name is too short
+ */
+int rmnet_get_logical_endpoint_config(struct net_device *dev,
+ int config_id,
+ uint8_t *rmnet_mode,
+ uint8_t *egress_dev_name,
+ size_t egress_dev_name_size)
+{
+ struct rmnet_logical_ep_conf_s *epconfig_l = 0;
+ size_t strlcpy_res = 0;
+
+ LOGL("(%s, %d);", dev->name, config_id);
+
+ if (!egress_dev_name || !rmnet_mode)
+ return RMNET_CONFIG_BAD_ARGUMENTS;
+ if (config_id < RMNET_LOCAL_LOGICAL_ENDPOINT
+ || config_id >= RMNET_DATA_MAX_LOGICAL_EP)
+ return RMNET_CONFIG_BAD_ARGUMENTS;
+
+ epconfig_l = _rmnet_get_logical_ep(dev, config_id);
+
+ if (!epconfig_l || !epconfig_l->refcount)
+ return RMNET_CONFIG_NO_SUCH_DEVICE;
+
+ *rmnet_mode = epconfig_l->rmnet_mode;
+
+ strlcpy_res = strlcpy(egress_dev_name, epconfig_l->egress_dev->name,
+ egress_dev_name_size);
+
+ if (strlcpy_res >= egress_dev_name_size)
+ return RMNET_CONFIG_BAD_ARGUMENTS;
+
+ return RMNET_CONFIG_OK;
+}
+
+/**
+ * rmnet_create_vnd() - Create virtual network device node
+ * @id: RmNet virtual device node id
+ *
+ * Return:
+ * - result of rmnet_vnd_create_dev()
+ */
+int rmnet_create_vnd(int id)
+{
+ struct net_device *dev;
+ ASSERT_RTNL();
+ LOGL("(%d);", id);
+ return rmnet_vnd_create_dev(id, &dev, NULL);
+}
+
+/**
+ * rmnet_create_vnd() - Create virtual network device node
+ * @id: RmNet virtual device node id
+ * @prefix: String prefix for device name
+ *
+ * Return:
+ * - result of rmnet_vnd_create_dev()
+ */
+int rmnet_create_vnd_prefix(int id, const char *prefix)
+{
+ struct net_device *dev;
+ ASSERT_RTNL();
+ LOGL("(%d, \"%s\");", id, prefix);
+ return rmnet_vnd_create_dev(id, &dev, prefix);
+}
+
+/**
+ * rmnet_free_vnd() - Free virtual network device node
+ * @id: RmNet virtual device node id
+ *
+ * Return:
+ * - result of rmnet_vnd_free_dev()
+ */
+int rmnet_free_vnd(int id)
+{
+ LOGL("(%d);", id);
+ return rmnet_vnd_free_dev(id);
+}
+
+static void _rmnet_free_vnd_later(struct work_struct *work)
+{
+ int i;
+ struct rmnet_free_vnd_work *fwork;
+ fwork = container_of(work, struct rmnet_free_vnd_work, work);
+
+ for (i = 0; i < fwork->count; i++)
+ rmnet_free_vnd(fwork->vnd_id[i]);
+ kfree(fwork);
+}
+
+/**
+ * rmnet_force_unassociate_device() - Force a device to unassociate
+ * @dev: Device to unassociate
+ *
+ * Return:
+ * - void
+ */
+static void rmnet_force_unassociate_device(struct net_device *dev)
+{
+ int i, j;
+ struct net_device *vndev;
+ struct rmnet_logical_ep_conf_s *cfg;
+ struct rmnet_free_vnd_work *vnd_work;
+ ASSERT_RTNL();
+
+ if (!dev)
+ BUG();
+
+ if (!_rmnet_is_physical_endpoint_associated(dev)) {
+ LOGM("%s", "Called on unassociated device, skipping");
+ return;
+ }
+
+ trace_rmnet_unregister_cb_clear_vnds(dev);
+ vnd_work = kmalloc(sizeof(*vnd_work), GFP_KERNEL);
+ if (!vnd_work) {
+ LOGH("%s", "Out of Memory");
+ return;
+ }
+ INIT_WORK(&vnd_work->work, _rmnet_free_vnd_later);
+ vnd_work->count = 0;
+
+ /* Check the VNDs for offending mappings */
+ for (i = 0, j = 0; i < RMNET_DATA_MAX_VND &&
+ j < RMNET_DATA_MAX_VND; i++) {
+ vndev = rmnet_vnd_get_by_id(i);
+ if (!vndev) {
+ LOGL("VND %d not in use; skipping", i);
+ continue;
+ }
+ cfg = rmnet_vnd_get_le_config(vndev);
+ if (!cfg) {
+ LOGH("Got NULL config from VND %d", i);
+ BUG();
+ continue;
+ }
+ if (cfg->refcount && (cfg->egress_dev == dev)) {
+ /* Make sure the device is down before clearing any of
+ * the mappings. Otherwise we could see a potential
+ * race condition if packets are actively being
+ * transmitted.
+ */
+ dev_close(vndev);
+ rmnet_unset_logical_endpoint_config(vndev,
+ RMNET_LOCAL_LOGICAL_ENDPOINT);
+ vnd_work->vnd_id[j] = i;
+ j++;
+ }
+ }
+ if (j > 0) {
+ vnd_work->count = j;
+ schedule_work(&vnd_work->work);
+ } else {
+ kfree(vnd_work);
+ }
+
+
+ /* Clear the mappings on the phys ep */
+ trace_rmnet_unregister_cb_clear_lepcs(dev);
+ rmnet_unset_logical_endpoint_config(dev, RMNET_LOCAL_LOGICAL_ENDPOINT);
+ for (i = 0; i < RMNET_DATA_MAX_LOGICAL_EP; i++)
+ rmnet_unset_logical_endpoint_config(dev, i);
+ rmnet_unassociate_network_device(dev);
+}
+
+/**
+ * rmnet_config_notify_cb() - Callback for netdevice notifier chain
+ * @nb: Notifier block data
+ * @event: Netdevice notifier event ID
+ * @data: Contains a net device for which we are getting notified
+ *
+ * Return:
+ * - result of NOTIFY_DONE()
+ */
+int rmnet_config_notify_cb(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(data);
+
+ if (!dev)
+ BUG();
+
+ LOGL("(..., %lu, %s)", event, dev->name);
+
+ switch (event) {
+ case NETDEV_UNREGISTER_FINAL:
+ case NETDEV_UNREGISTER:
+ trace_rmnet_unregister_cb_entry(dev);
+ LOGH("Kernel is trying to unregister %s", dev->name);
+ rmnet_force_unassociate_device(dev);
+ trace_rmnet_unregister_cb_exit(dev);
+ break;
+
+ default:
+ trace_rmnet_unregister_cb_unhandled(dev);
+ LOGD("Unhandeled event [%lu]", event);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
diff --git a/net/rmnet_data/rmnet_data_config.h b/net/rmnet_data/rmnet_data_config.h
new file mode 100644
index 000000000000..f19fbb378111
--- /dev/null
+++ b/net/rmnet_data/rmnet_data_config.h
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2013-2014, 2016-2017 The Linux Foundation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * RMNET Data configuration engine
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/time.h>
+#include <linux/spinlock.h>
+#include <net/rmnet_config.h>
+
+#ifndef _RMNET_DATA_CONFIG_H_
+#define _RMNET_DATA_CONFIG_H_
+
+#define RMNET_DATA_MAX_LOGICAL_EP 256
+
+/**
+ * struct rmnet_logical_ep_conf_s - Logical end-point configuration
+ *
+ * @refcount: Reference count for this endpoint. 0 signifies the endpoint is not
+ * configured for use
+ * @rmnet_mode: Specifies how the traffic should be finally delivered. Possible
+ * options are available in enum rmnet_config_endpoint_modes_e
+ * @mux_id: Virtual channel ID used by MAP protocol
+ * @egress_dev: Next device to deliver the packet to. Exact usage of this
+ * parmeter depends on the rmnet_mode
+ */
+struct rmnet_logical_ep_conf_s {
+ uint8_t refcount;
+ uint8_t rmnet_mode;
+ uint8_t mux_id;
+ struct timespec flush_time;
+ unsigned int flush_byte_count;
+ struct net_device *egress_dev;
+};
+
+/**
+ * struct rmnet_phys_ep_conf_s - Physical endpoint configuration
+ * One instance of this structure is instantiated for each net_device associated
+ * with rmnet_data.
+ *
+ * @dev: The device which is associated with rmnet_data. Corresponds to this
+ * specific instance of rmnet_phys_ep_conf_s
+ * @local_ep: Default non-muxed endpoint. Used for non-MAP protocols/formats
+ * @muxed_ep: All multiplexed logical endpoints associated with this device
+ * @ingress_data_format: RMNET_INGRESS_FORMAT_* flags from rmnet_data.h
+ * @egress_data_format: RMNET_EGRESS_FORMAT_* flags from rmnet_data.h
+ *
+ * @egress_agg_size: Maximum size (bytes) of data which should be aggregated
+ * @egress_agg_count: Maximum count (packets) of data which should be aggregated
+ * Smaller of the two parameters above are chosen for
+ * aggregation
+ * @tail_spacing: Guaranteed padding (bytes) when de-aggregating ingress frames
+ * @agg_time: Wall clock time when aggregated frame was created
+ * @agg_last: Last time the aggregation routing was invoked
+ */
+struct rmnet_phys_ep_config {
+ struct net_device *dev;
+ struct rmnet_logical_ep_conf_s local_ep;
+ struct rmnet_logical_ep_conf_s muxed_ep[RMNET_DATA_MAX_LOGICAL_EP];
+ uint32_t ingress_data_format;
+ uint32_t egress_data_format;
+
+ /* MAP specific */
+ uint16_t egress_agg_size;
+ uint16_t egress_agg_count;
+ uint8_t tail_spacing;
+ /* MAP aggregation state machine
+ * - This is not sctrictly configuration and is updated at runtime
+ * Make sure all of these are protected by the agg_lock
+ */
+ spinlock_t agg_lock;
+ struct sk_buff *agg_skb;
+ uint8_t agg_state;
+ uint8_t agg_count;
+ struct timespec agg_time;
+ struct timespec agg_last;
+};
+
+int rmnet_config_init(void);
+void rmnet_config_exit(void);
+
+int rmnet_unassociate_network_device(struct net_device *dev);
+int rmnet_set_ingress_data_format(struct net_device *dev,
+ uint32_t ingress_data_format,
+ uint8_t tail_spacing);
+int rmnet_set_egress_data_format(struct net_device *dev,
+ uint32_t egress_data_format,
+ uint16_t agg_size,
+ uint16_t agg_count);
+int rmnet_associate_network_device(struct net_device *dev);
+int _rmnet_set_logical_endpoint_config(struct net_device *dev,
+ int config_id,
+ struct rmnet_logical_ep_conf_s *epconfig);
+int rmnet_set_logical_endpoint_config(struct net_device *dev,
+ int config_id,
+ uint8_t rmnet_mode,
+ struct net_device *egress_dev);
+int _rmnet_unset_logical_endpoint_config(struct net_device *dev,
+ int config_id);
+int rmnet_unset_logical_endpoint_config(struct net_device *dev,
+ int config_id);
+int _rmnet_get_logical_endpoint_config(struct net_device *dev,
+ int config_id,
+ struct rmnet_logical_ep_conf_s *epconfig);
+int rmnet_get_logical_endpoint_config(struct net_device *dev,
+ int config_id,
+ uint8_t *rmnet_mode,
+ uint8_t *egress_dev_name,
+ size_t egress_dev_name_size);
+void rmnet_config_netlink_msg_handler (struct sk_buff *skb);
+int rmnet_config_notify_cb(struct notifier_block *nb,
+ unsigned long event, void *data);
+int rmnet_create_vnd(int id);
+int rmnet_create_vnd_prefix(int id, const char *name);
+int rmnet_free_vnd(int id);
+
+struct rmnet_phys_ep_config *_rmnet_get_phys_ep_config
+ (struct net_device *dev);
+
+#endif /* _RMNET_DATA_CONFIG_H_ */
diff --git a/net/rmnet_data/rmnet_data_handlers.c b/net/rmnet_data/rmnet_data_handlers.c
new file mode 100644
index 000000000000..cef9369eace5
--- /dev/null
+++ b/net/rmnet_data/rmnet_data_handlers.c
@@ -0,0 +1,771 @@
+/*
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * RMNET Data ingress/egress handler
+ *
+ */
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/module.h>
+#include <linux/rmnet_data.h>
+#include <linux/net_map.h>
+#include <linux/netdev_features.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <net/rmnet_config.h>
+#include "rmnet_data_private.h"
+#include "rmnet_data_config.h"
+#include "rmnet_data_vnd.h"
+#include "rmnet_map.h"
+#include "rmnet_data_stats.h"
+#include "rmnet_data_trace.h"
+
+RMNET_LOG_MODULE(RMNET_DATA_LOGMASK_HANDLER);
+
+
+void rmnet_egress_handler(struct sk_buff *skb,
+ struct rmnet_logical_ep_conf_s *ep);
+
+#ifdef CONFIG_RMNET_DATA_DEBUG_PKT
+unsigned int dump_pkt_rx;
+module_param(dump_pkt_rx, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(dump_pkt_rx, "Dump packets entering ingress handler");
+
+unsigned int dump_pkt_tx;
+module_param(dump_pkt_tx, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(dump_pkt_tx, "Dump packets exiting egress handler");
+#endif /* CONFIG_RMNET_DATA_DEBUG_PKT */
+
+/* Time in nano seconds. This number must be less that a second. */
+long gro_flush_time __read_mostly = 10000L;
+module_param(gro_flush_time, long, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(gro_flush_time, "Flush GRO when spaced more than this");
+
+unsigned int gro_min_byte_thresh __read_mostly = 7500;
+module_param(gro_min_byte_thresh, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(gro_min_byte_thresh, "Min byte thresh to change flush time");
+
+unsigned int dynamic_gro_on __read_mostly = 1;
+module_param(dynamic_gro_on, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(dynamic_gro_on, "Toggle to turn on dynamic gro logic");
+
+unsigned int upper_flush_time __read_mostly = 15000;
+module_param(upper_flush_time, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(upper_flush_time, "Upper limit on flush time");
+
+unsigned int upper_byte_limit __read_mostly = 10500;
+module_param(upper_byte_limit, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(upper_byte_limit, "Upper byte limit");
+
+#define RMNET_DATA_IP_VERSION_4 0x40
+#define RMNET_DATA_IP_VERSION_6 0x60
+
+#define RMNET_DATA_GRO_RCV_FAIL 0
+#define RMNET_DATA_GRO_RCV_PASS 1
+
+/* ***************** Helper Functions *************************************** */
+
+/**
+ * __rmnet_data_set_skb_proto() - Set skb->protocol field
+ * @skb: packet being modified
+ *
+ * Peek at the first byte of the packet and set the protocol. There is not
+ * good way to determine if a packet has a MAP header. As of writing this,
+ * the reserved bit in the MAP frame will prevent it from overlapping with
+ * IPv4/IPv6 frames. This could change in the future!
+ */
+static inline void __rmnet_data_set_skb_proto(struct sk_buff *skb)
+{
+ switch (skb->data[0] & 0xF0) {
+ case RMNET_DATA_IP_VERSION_4:
+ skb->protocol = htons(ETH_P_IP);
+ break;
+ case RMNET_DATA_IP_VERSION_6:
+ skb->protocol = htons(ETH_P_IPV6);
+ break;
+ default:
+ skb->protocol = htons(ETH_P_MAP);
+ break;
+ }
+}
+
+#ifdef CONFIG_RMNET_DATA_DEBUG_PKT
+/**
+ * rmnet_print_packet() - Print packet / diagnostics
+ * @skb: Packet to print
+ * @printlen: Number of bytes to print
+ * @dev: Name of interface
+ * @dir: Character representing direction (e.g.. 'r' for receive)
+ *
+ * This function prints out raw bytes in an SKB. Use of this will have major
+ * performance impacts and may even trigger watchdog resets if too much is being
+ * printed. Hence, this should always be compiled out unless absolutely needed.
+ */
+void rmnet_print_packet(const struct sk_buff *skb, const char *dev, char dir)
+{
+ char buffer[200];
+ unsigned int len, printlen;
+ int i, buffloc = 0;
+
+ switch (dir) {
+ case 'r':
+ printlen = dump_pkt_rx;
+ break;
+
+ case 't':
+ printlen = dump_pkt_tx;
+ break;
+
+ default:
+ printlen = 0;
+ break;
+ }
+
+ if (!printlen)
+ return;
+
+ pr_err("[%s][%c] - PKT skb->len=%d skb->head=%pK skb->data=%pK\n",
+ dev, dir, skb->len, (void *)skb->head, (void *)skb->data);
+ pr_err("[%s][%c] - PKT skb->tail=%pK skb->end=%pK\n",
+ dev, dir, skb_tail_pointer(skb), skb_end_pointer(skb));
+
+ if (skb->len > 0)
+ len = skb->len;
+ else
+ len = ((unsigned int)(uintptr_t)skb->end) -
+ ((unsigned int)(uintptr_t)skb->data);
+
+ pr_err("[%s][%c] - PKT len: %d, printing first %d bytes\n",
+ dev, dir, len, printlen);
+
+ memset(buffer, 0, sizeof(buffer));
+ for (i = 0; (i < printlen) && (i < len); i++) {
+ if ((i%16) == 0) {
+ pr_err("[%s][%c] - PKT%s\n", dev, dir, buffer);
+ memset(buffer, 0, sizeof(buffer));
+ buffloc = 0;
+ buffloc += snprintf(&buffer[buffloc],
+ sizeof(buffer)-buffloc, "%04X:",
+ i);
+ }
+
+ buffloc += snprintf(&buffer[buffloc], sizeof(buffer)-buffloc,
+ " %02x", skb->data[i]);
+
+ }
+ pr_err("[%s][%c] - PKT%s\n", dev, dir, buffer);
+}
+#else
+void rmnet_print_packet(const struct sk_buff *skb, const char *dev, char dir)
+{
+ return;
+}
+#endif /* CONFIG_RMNET_DATA_DEBUG_PKT */
+
+/* ***************** Generic handler **************************************** */
+
+/**
+ * rmnet_bridge_handler() - Bridge related functionality
+ *
+ * Return:
+ * - RX_HANDLER_CONSUMED in all cases
+ */
+static rx_handler_result_t rmnet_bridge_handler(struct sk_buff *skb,
+ struct rmnet_logical_ep_conf_s *ep)
+{
+ if (!ep->egress_dev) {
+ LOGD("Missing egress device for packet arriving on %s",
+ skb->dev->name);
+ rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_BRDG_NO_EGRESS);
+ } else {
+ rmnet_egress_handler(skb, ep);
+ }
+
+ return RX_HANDLER_CONSUMED;
+}
+
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+static void rmnet_reset_mac_header(struct sk_buff *skb)
+{
+ skb->mac_header = 0;
+ skb->mac_len = 0;
+}
+#else
+static void rmnet_reset_mac_header(struct sk_buff *skb)
+{
+ skb->mac_header = skb->network_header;
+ skb->mac_len = 0;
+}
+#endif /*NET_SKBUFF_DATA_USES_OFFSET*/
+
+/**
+ * rmnet_check_skb_can_gro() - Check is skb can be passed through GRO handler
+ *
+ * Determines whether to pass the skb to the GRO handler napi_gro_receive() or
+ * handle normally by passing to netif_receive_skb().
+ *
+ * Warning:
+ * This assumes that only TCP packets can be coalesced by the GRO handler which
+ * is not true in general. We lose the ability to use GRO for cases like UDP
+ * encapsulation protocols.
+ *
+ * Return:
+ * - RMNET_DATA_GRO_RCV_FAIL if packet is sent to netif_receive_skb()
+ * - RMNET_DATA_GRO_RCV_PASS if packet is sent to napi_gro_receive()
+ */
+static int rmnet_check_skb_can_gro(struct sk_buff *skb)
+{
+ switch (skb->data[0] & 0xF0) {
+ case RMNET_DATA_IP_VERSION_4:
+ if (ip_hdr(skb)->protocol == IPPROTO_TCP)
+ return RMNET_DATA_GRO_RCV_PASS;
+ break;
+ case RMNET_DATA_IP_VERSION_6:
+ if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
+ return RMNET_DATA_GRO_RCV_PASS;
+ /* Fall through */
+ }
+
+ return RMNET_DATA_GRO_RCV_FAIL;
+}
+
+/**
+ * rmnet_optional_gro_flush() - Check if GRO handler needs to flush now
+ *
+ * Determines whether GRO handler needs to flush packets which it has
+ * coalesced so far.
+ *
+ * Tuning this parameter will trade TCP slow start performance for GRO coalesce
+ * ratio.
+ */
+static void rmnet_optional_gro_flush(struct napi_struct *napi,
+ struct rmnet_logical_ep_conf_s *ep,
+ unsigned int skb_size)
+{
+ struct timespec curr_time, diff;
+
+ if (!gro_flush_time)
+ return;
+
+ if (unlikely(ep->flush_time.tv_sec == 0)) {
+ getnstimeofday(&ep->flush_time);
+ ep->flush_byte_count = 0;
+ } else {
+ getnstimeofday(&(curr_time));
+ diff = timespec_sub(curr_time, ep->flush_time);
+ ep->flush_byte_count += skb_size;
+
+ if (dynamic_gro_on) {
+ if ((!(diff.tv_sec > 0) || diff.tv_nsec <=
+ gro_flush_time) &&
+ ep->flush_byte_count >=
+ gro_min_byte_thresh) {
+ /* Processed many bytes in a small time window.
+ * No longer need to flush so often and we can
+ * increase our byte limit
+ */
+ gro_flush_time = upper_flush_time;
+ gro_min_byte_thresh = upper_byte_limit;
+ } else if ((diff.tv_sec > 0 ||
+ diff.tv_nsec > gro_flush_time) &&
+ ep->flush_byte_count <
+ gro_min_byte_thresh) {
+ /* We have not hit our time limit and we are not
+ * receive many bytes. Demote ourselves to the
+ * lowest limits and flush
+ */
+ napi_gro_flush(napi, false);
+ getnstimeofday(&ep->flush_time);
+ ep->flush_byte_count = 0;
+ gro_flush_time = 10000L;
+ gro_min_byte_thresh = 7500L;
+ } else if ((diff.tv_sec > 0 ||
+ diff.tv_nsec > gro_flush_time) &&
+ ep->flush_byte_count >=
+ gro_min_byte_thresh) {
+ /* Above byte and time limt, therefore we can
+ * move/maintain our limits to be the max
+ * and flush
+ */
+ napi_gro_flush(napi, false);
+ getnstimeofday(&ep->flush_time);
+ ep->flush_byte_count = 0;
+ gro_flush_time = upper_flush_time;
+ gro_min_byte_thresh = upper_byte_limit;
+ }
+ /* else, below time limit and below
+ * byte thresh, so change nothing
+ */
+ } else if (diff.tv_sec > 0 ||
+ diff.tv_nsec >= gro_flush_time) {
+ napi_gro_flush(napi, false);
+ getnstimeofday(&ep->flush_time);
+ ep->flush_byte_count = 0;
+ }
+ }
+}
+
+/**
+ * __rmnet_deliver_skb() - Deliver skb
+ *
+ * Determines where to deliver skb. Options are: consume by network stack,
+ * pass to bridge handler, or pass to virtual network device
+ *
+ * Return:
+ * - RX_HANDLER_CONSUMED if packet forwarded or dropped
+ * - RX_HANDLER_PASS if packet is to be consumed by network stack as-is
+ */
+static rx_handler_result_t __rmnet_deliver_skb(struct sk_buff *skb,
+ struct rmnet_logical_ep_conf_s *ep)
+{
+ struct napi_struct *napi = NULL;
+ gro_result_t gro_res;
+ unsigned int skb_size;
+
+ trace___rmnet_deliver_skb(skb);
+ switch (ep->rmnet_mode) {
+ case RMNET_EPMODE_NONE:
+ return RX_HANDLER_PASS;
+
+ case RMNET_EPMODE_BRIDGE:
+ return rmnet_bridge_handler(skb, ep);
+
+ case RMNET_EPMODE_VND:
+ skb_reset_transport_header(skb);
+ skb_reset_network_header(skb);
+ switch (rmnet_vnd_rx_fixup(skb, skb->dev)) {
+ case RX_HANDLER_CONSUMED:
+ return RX_HANDLER_CONSUMED;
+
+ case RX_HANDLER_PASS:
+ skb->pkt_type = PACKET_HOST;
+ rmnet_reset_mac_header(skb);
+ if (rmnet_check_skb_can_gro(skb) &&
+ (skb->dev->features & NETIF_F_GRO)) {
+ napi = get_current_napi_context();
+ if (napi != NULL) {
+ skb_size = skb->len;
+ gro_res = napi_gro_receive(napi, skb);
+ trace_rmnet_gro_downlink(gro_res);
+ rmnet_optional_gro_flush(
+ napi, ep,
+ skb_size);
+ } else {
+ WARN_ONCE(1, "current napi is NULL\n");
+ netif_receive_skb(skb);
+ }
+ } else {
+ netif_receive_skb(skb);
+ }
+ return RX_HANDLER_CONSUMED;
+ }
+ return RX_HANDLER_PASS;
+
+ default:
+ LOGD("Unkown ep mode %d", ep->rmnet_mode);
+ rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_DELIVER_NO_EP);
+ return RX_HANDLER_CONSUMED;
+ }
+}
+
+/**
+ * rmnet_ingress_deliver_packet() - Ingress handler for raw IP and bridged
+ * MAP packets.
+ * @skb: Packet needing a destination.
+ * @config: Physical end point configuration that the packet arrived on.
+ *
+ * Return:
+ * - RX_HANDLER_CONSUMED if packet forwarded/dropped
+ * - RX_HANDLER_PASS if packet should be passed up the stack by caller
+ */
+static rx_handler_result_t rmnet_ingress_deliver_packet(struct sk_buff *skb,
+ struct rmnet_phys_ep_config *config)
+{
+ if (!config) {
+ LOGD("%s", "NULL physical EP provided");
+ kfree_skb(skb);
+ return RX_HANDLER_CONSUMED;
+ }
+
+ if (!(config->local_ep.refcount)) {
+ LOGD("Packet on %s has no local endpoint configuration",
+ skb->dev->name);
+ rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_IPINGRESS_NO_EP);
+ return RX_HANDLER_CONSUMED;
+ }
+
+ skb->dev = config->local_ep.egress_dev;
+
+ return __rmnet_deliver_skb(skb, &(config->local_ep));
+}
+
+/* ***************** MAP handler ******************************************** */
+
+/**
+ * _rmnet_map_ingress_handler() - Actual MAP ingress handler
+ * @skb: Packet being received
+ * @config: Physical endpoint configuration for the ingress device
+ *
+ * Most MAP ingress functions are processed here. Packets are processed
+ * individually; aggregated packets should use rmnet_map_ingress_handler()
+ *
+ * Return:
+ * - RX_HANDLER_CONSUMED if packet is dropped
+ * - result of __rmnet_deliver_skb() for all other cases
+ */
+static rx_handler_result_t _rmnet_map_ingress_handler(struct sk_buff *skb,
+ struct rmnet_phys_ep_config *config)
+{
+ struct rmnet_logical_ep_conf_s *ep;
+ uint8_t mux_id;
+ uint16_t len;
+ int ckresult;
+
+ if (RMNET_MAP_GET_CD_BIT(skb)) {
+ if (config->ingress_data_format
+ & RMNET_INGRESS_FORMAT_MAP_COMMANDS)
+ return rmnet_map_command(skb, config);
+
+ LOGM("MAP command packet on %s; %s", skb->dev->name,
+ "Not configured for MAP commands");
+ rmnet_kfree_skb(skb,
+ RMNET_STATS_SKBFREE_INGRESS_NOT_EXPECT_MAPC);
+ return RX_HANDLER_CONSUMED;
+ }
+
+ mux_id = RMNET_MAP_GET_MUX_ID(skb);
+ len = RMNET_MAP_GET_LENGTH(skb)
+ - RMNET_MAP_GET_PAD(skb)
+ - config->tail_spacing;
+
+ if (mux_id >= RMNET_DATA_MAX_LOGICAL_EP) {
+ LOGD("Got packet on %s with bad mux id %d",
+ skb->dev->name, mux_id);
+ rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_MAPINGRESS_BAD_MUX);
+ return RX_HANDLER_CONSUMED;
+ }
+
+ ep = &(config->muxed_ep[mux_id]);
+
+ if (!ep->refcount) {
+ LOGD("Packet on %s:%d; has no logical endpoint config",
+ skb->dev->name, mux_id);
+
+ rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_MAPINGRESS_MUX_NO_EP);
+ return RX_HANDLER_CONSUMED;
+ }
+
+ if (config->ingress_data_format & RMNET_INGRESS_FORMAT_DEMUXING)
+ skb->dev = ep->egress_dev;
+
+ if ((config->ingress_data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV3) ||
+ (config->ingress_data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV4)) {
+ ckresult = rmnet_map_checksum_downlink_packet(skb);
+ trace_rmnet_map_checksum_downlink_packet(skb, ckresult);
+ rmnet_stats_dl_checksum(ckresult);
+ if (likely((ckresult == RMNET_MAP_CHECKSUM_OK)
+ || (ckresult == RMNET_MAP_CHECKSUM_SKIPPED)))
+ skb->ip_summed |= CHECKSUM_UNNECESSARY;
+ else if (ckresult != RMNET_MAP_CHECKSUM_ERR_UNKNOWN_IP_VERSION
+ && ckresult != RMNET_MAP_CHECKSUM_ERR_UNKNOWN_TRANSPORT
+ && ckresult != RMNET_MAP_CHECKSUM_VALID_FLAG_NOT_SET
+ && ckresult != RMNET_MAP_CHECKSUM_FRAGMENTED_PACKET) {
+ rmnet_kfree_skb(skb,
+ RMNET_STATS_SKBFREE_INGRESS_BAD_MAP_CKSUM);
+ return RX_HANDLER_CONSUMED;
+ }
+ }
+
+ /* Subtract MAP header */
+ skb_pull(skb, sizeof(struct rmnet_map_header_s));
+ skb_trim(skb, len);
+ __rmnet_data_set_skb_proto(skb);
+ return __rmnet_deliver_skb(skb, ep);
+}
+
+/**
+ * rmnet_map_ingress_handler() - MAP ingress handler
+ * @skb: Packet being received
+ * @config: Physical endpoint configuration for the ingress device
+ *
+ * Called if and only if MAP is configured in the ingress device's ingress data
+ * format. Deaggregation is done here, actual MAP processing is done in
+ * _rmnet_map_ingress_handler().
+ *
+ * Return:
+ * - RX_HANDLER_CONSUMED for aggregated packets
+ * - RX_HANDLER_CONSUMED for dropped packets
+ * - result of _rmnet_map_ingress_handler() for all other cases
+ */
+static rx_handler_result_t rmnet_map_ingress_handler(struct sk_buff *skb,
+ struct rmnet_phys_ep_config *config)
+{
+ struct sk_buff *skbn;
+ int rc, co = 0;
+
+ if (config->ingress_data_format & RMNET_INGRESS_FORMAT_DEAGGREGATION) {
+ trace_rmnet_start_deaggregation(skb);
+ while ((skbn = rmnet_map_deaggregate(skb, config)) != 0) {
+ _rmnet_map_ingress_handler(skbn, config);
+ co++;
+ }
+ trace_rmnet_end_deaggregation(skb, co);
+ LOGD("De-aggregated %d packets", co);
+ rmnet_stats_deagg_pkts(co);
+ rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_MAPINGRESS_AGGBUF);
+ rc = RX_HANDLER_CONSUMED;
+ } else {
+ rc = _rmnet_map_ingress_handler(skb, config);
+ }
+
+ return rc;
+}
+
+/**
+ * rmnet_map_egress_handler() - MAP egress handler
+ * @skb: Packet being sent
+ * @config: Physical endpoint configuration for the egress device
+ * @ep: logical endpoint configuration of the packet originator
+ * (e.g.. RmNet virtual network device)
+ * @orig_dev: The originator vnd device
+ *
+ * Called if and only if MAP is configured in the egress device's egress data
+ * format. Will expand skb if there is insufficient headroom for MAP protocol.
+ * Note: headroomexpansion will incur a performance penalty.
+ *
+ * Return:
+ * - 0 on success
+ * - 1 on failure
+ */
+static int rmnet_map_egress_handler(struct sk_buff *skb,
+ struct rmnet_phys_ep_config *config,
+ struct rmnet_logical_ep_conf_s *ep,
+ struct net_device *orig_dev)
+{
+ int required_headroom, additional_header_length, ckresult;
+ struct rmnet_map_header_s *map_header;
+
+ additional_header_length = 0;
+
+ required_headroom = sizeof(struct rmnet_map_header_s);
+ if ((config->egress_data_format & RMNET_EGRESS_FORMAT_MAP_CKSUMV3) ||
+ (config->egress_data_format & RMNET_EGRESS_FORMAT_MAP_CKSUMV4)) {
+ required_headroom +=
+ sizeof(struct rmnet_map_ul_checksum_header_s);
+ additional_header_length +=
+ sizeof(struct rmnet_map_ul_checksum_header_s);
+ }
+
+ LOGD("headroom of %d bytes", required_headroom);
+
+ if (skb_headroom(skb) < required_headroom) {
+ if (pskb_expand_head(skb, required_headroom, 0, GFP_KERNEL)) {
+ LOGD("Failed to add headroom of %d bytes",
+ required_headroom);
+ kfree_skb(skb);
+ return 1;
+ }
+ }
+
+ if ((config->egress_data_format & RMNET_EGRESS_FORMAT_MAP_CKSUMV3) ||
+ (config->egress_data_format & RMNET_EGRESS_FORMAT_MAP_CKSUMV4)) {
+ ckresult = rmnet_map_checksum_uplink_packet
+ (skb, orig_dev, config->egress_data_format);
+ trace_rmnet_map_checksum_uplink_packet(orig_dev, ckresult);
+ rmnet_stats_ul_checksum(ckresult);
+ }
+
+ if ((!(config->egress_data_format &
+ RMNET_EGRESS_FORMAT_AGGREGATION)) ||
+ ((orig_dev->features & NETIF_F_GSO) && skb_is_nonlinear(skb)))
+ map_header = rmnet_map_add_map_header
+ (skb, additional_header_length, RMNET_MAP_NO_PAD_BYTES);
+ else
+ map_header = rmnet_map_add_map_header
+ (skb, additional_header_length, RMNET_MAP_ADD_PAD_BYTES);
+
+ if (!map_header) {
+ LOGD("%s", "Failed to add MAP header to egress packet");
+ kfree_skb(skb);
+ return 1;
+ }
+
+ if (config->egress_data_format & RMNET_EGRESS_FORMAT_MUXING) {
+ if (ep->mux_id == 0xff)
+ map_header->mux_id = 0;
+ else
+ map_header->mux_id = ep->mux_id;
+ }
+
+ skb->protocol = htons(ETH_P_MAP);
+
+ if (config->egress_data_format & RMNET_EGRESS_FORMAT_AGGREGATION) {
+ rmnet_map_aggregate(skb, config);
+ return RMNET_MAP_CONSUMED;
+ }
+
+ return RMNET_MAP_SUCCESS;
+}
+/* ***************** Ingress / Egress Entry Points ************************** */
+
+/**
+ * rmnet_ingress_handler() - Ingress handler entry point
+ * @skb: Packet being received
+ *
+ * Processes packet as per ingress data format for receiving device. Logical
+ * endpoint is determined from packet inspection. Packet is then sent to the
+ * egress device listed in the logical endpoint configuration.
+ *
+ * Return:
+ * - RX_HANDLER_PASS if packet is not processed by handler (caller must
+ * deal with the packet)
+ * - RX_HANDLER_CONSUMED if packet is forwarded or processed by MAP
+ */
+rx_handler_result_t rmnet_ingress_handler(struct sk_buff *skb)
+{
+ struct rmnet_phys_ep_config *config;
+ struct net_device *dev;
+ int rc;
+
+ if (!skb)
+ BUG();
+
+ dev = skb->dev;
+ trace_rmnet_ingress_handler(skb);
+ rmnet_print_packet(skb, dev->name, 'r');
+
+ config = _rmnet_get_phys_ep_config(skb->dev);
+
+ if (!config) {
+ LOGD("%s is not associated with rmnet_data", skb->dev->name);
+ kfree_skb(skb);
+ return RX_HANDLER_CONSUMED;
+ }
+
+ /* Sometimes devices operate in ethernet mode even thouth there is no
+ * ethernet header. This causes the skb->protocol to contain a bogus
+ * value and the skb->data pointer to be off by 14 bytes. Fix it if
+ * configured to do so
+ */
+ if (config->ingress_data_format & RMNET_INGRESS_FIX_ETHERNET) {
+ skb_push(skb, RMNET_ETHERNET_HEADER_LENGTH);
+ __rmnet_data_set_skb_proto(skb);
+ }
+
+ if (config->ingress_data_format & RMNET_INGRESS_FORMAT_MAP) {
+ rc = rmnet_map_ingress_handler(skb, config);
+ } else {
+ switch (ntohs(skb->protocol)) {
+ case ETH_P_MAP:
+ if (config->local_ep.rmnet_mode ==
+ RMNET_EPMODE_BRIDGE) {
+ rc = rmnet_ingress_deliver_packet(skb, config);
+ } else {
+ LOGD("MAP packet on %s; MAP not set",
+ dev->name);
+ rmnet_kfree_skb(skb,
+ RMNET_STATS_SKBFREE_INGRESS_NOT_EXPECT_MAPD);
+ rc = RX_HANDLER_CONSUMED;
+ }
+ break;
+
+ case ETH_P_ARP:
+ case ETH_P_IP:
+ case ETH_P_IPV6:
+ rc = rmnet_ingress_deliver_packet(skb, config);
+ break;
+
+ default:
+ LOGD("Unknown skb->proto 0x%04X\n",
+ ntohs(skb->protocol) & 0xFFFF);
+ rc = RX_HANDLER_PASS;
+ }
+ }
+
+ return rc;
+}
+
+/**
+ * rmnet_rx_handler() - Rx handler callback registered with kernel
+ * @pskb: Packet to be processed by rx handler
+ *
+ * Standard kernel-expected footprint for rx handlers. Calls
+ * rmnet_ingress_handler with correctly formatted arguments
+ *
+ * Return:
+ * - Whatever rmnet_ingress_handler() returns
+ */
+rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
+{
+ return rmnet_ingress_handler(*pskb);
+}
+
+/**
+ * rmnet_egress_handler() - Egress handler entry point
+ * @skb: packet to transmit
+ * @ep: logical endpoint configuration of the packet originator
+ * (e.g.. RmNet virtual network device)
+ *
+ * Modifies packet as per logical endpoint configuration and egress data format
+ * for egress device configured in logical endpoint. Packet is then transmitted
+ * on the egress device.
+ */
+void rmnet_egress_handler(struct sk_buff *skb,
+ struct rmnet_logical_ep_conf_s *ep)
+{
+ struct rmnet_phys_ep_config *config;
+ struct net_device *orig_dev;
+ int rc;
+ orig_dev = skb->dev;
+ skb->dev = ep->egress_dev;
+
+ config = _rmnet_get_phys_ep_config(skb->dev);
+
+ if (!config) {
+ LOGD("%s is not associated with rmnet_data", skb->dev->name);
+ kfree_skb(skb);
+ return;
+ }
+
+ LOGD("Packet going out on %s with egress format 0x%08X",
+ skb->dev->name, config->egress_data_format);
+
+ if (config->egress_data_format & RMNET_EGRESS_FORMAT_MAP) {
+ switch (rmnet_map_egress_handler(skb, config, ep, orig_dev)) {
+ case RMNET_MAP_CONSUMED:
+ LOGD("%s", "MAP process consumed packet");
+ return;
+
+ case RMNET_MAP_SUCCESS:
+ break;
+
+ default:
+ LOGD("MAP egress failed on packet on %s",
+ skb->dev->name);
+ rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_EGR_MAPFAIL);
+ return;
+ }
+ }
+
+ if (ep->rmnet_mode == RMNET_EPMODE_VND)
+ rmnet_vnd_tx_fixup(skb, orig_dev);
+
+ rmnet_print_packet(skb, skb->dev->name, 't');
+ trace_rmnet_egress_handler(skb);
+ rc = dev_queue_xmit(skb);
+ if (rc != 0) {
+ LOGD("Failed to queue packet for transmission on [%s]",
+ skb->dev->name);
+ }
+ rmnet_stats_queue_xmit(rc, RMNET_STATS_QUEUE_XMIT_EGRESS);
+}
diff --git a/net/rmnet_data/rmnet_data_handlers.h b/net/rmnet_data/rmnet_data_handlers.h
new file mode 100644
index 000000000000..42f9e6f4403e
--- /dev/null
+++ b/net/rmnet_data/rmnet_data_handlers.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * RMNET Data ingress/egress handler
+ *
+ */
+
+#ifndef _RMNET_DATA_HANDLERS_H_
+#define _RMNET_DATA_HANDLERS_H_
+
+void rmnet_egress_handler(struct sk_buff *skb,
+ struct rmnet_logical_ep_conf_s *ep);
+
+rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb);
+
+#endif /* _RMNET_DATA_HANDLERS_H_ */
diff --git a/net/rmnet_data/rmnet_data_main.c b/net/rmnet_data/rmnet_data_main.c
new file mode 100644
index 000000000000..d1754df94bf1
--- /dev/null
+++ b/net/rmnet_data/rmnet_data_main.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ * RMNET Data generic framework
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include "rmnet_data_private.h"
+#include "rmnet_data_config.h"
+#include "rmnet_data_vnd.h"
+
+/* ***************** Trace Points ******************************************* */
+#define CREATE_TRACE_POINTS
+#include "rmnet_data_trace.h"
+
+/* ***************** Module Parameters ************************************** */
+unsigned int rmnet_data_log_level = RMNET_LOG_LVL_ERR | RMNET_LOG_LVL_HI;
+module_param(rmnet_data_log_level, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(log_level, "Logging level");
+
+unsigned int rmnet_data_log_module_mask;
+module_param(rmnet_data_log_module_mask, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(rmnet_data_log_module_mask, "Logging module mask");
+
+/* ***************** Startup/Shutdown *************************************** */
+
+/**
+ * rmnet_init() - Module initialization
+ *
+ * todo: check for (and init) startup errors
+ */
+static int __init rmnet_init(void)
+{
+ rmnet_config_init();
+ rmnet_vnd_init();
+
+ LOGL("%s", "RMNET Data driver loaded successfully");
+ return 0;
+}
+
+static void __exit rmnet_exit(void)
+{
+ rmnet_config_exit();
+ rmnet_vnd_exit();
+}
+
+module_init(rmnet_init)
+module_exit(rmnet_exit)
+MODULE_LICENSE("GPL v2");
diff --git a/net/rmnet_data/rmnet_data_private.h b/net/rmnet_data/rmnet_data_private.h
new file mode 100644
index 000000000000..2979234999ba
--- /dev/null
+++ b/net/rmnet_data/rmnet_data_private.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _RMNET_DATA_PRIVATE_H_
+#define _RMNET_DATA_PRIVATE_H_
+
+#define RMNET_DATA_MAX_VND 32
+#define RMNET_DATA_MAX_PACKET_SIZE 16384
+#define RMNET_DATA_DFLT_PACKET_SIZE 1500
+#define RMNET_DATA_DEV_NAME_STR "rmnet_data"
+#define RMNET_DATA_NEEDED_HEADROOM 16
+#define RMNET_DATA_TX_QUEUE_LEN 1000
+#define RMNET_ETHERNET_HEADER_LENGTH 14
+
+extern unsigned int rmnet_data_log_level;
+extern unsigned int rmnet_data_log_module_mask;
+
+#define RMNET_INIT_OK 0
+#define RMNET_INIT_ERROR 1
+
+#define RMNET_LOG_LVL_DBG (1<<4)
+#define RMNET_LOG_LVL_LOW (1<<3)
+#define RMNET_LOG_LVL_MED (1<<2)
+#define RMNET_LOG_LVL_HI (1<<1)
+#define RMNET_LOG_LVL_ERR (1<<0)
+
+#define RMNET_LOG_MODULE(X) \
+ static uint32_t rmnet_mod_mask = X
+
+#define RMNET_DATA_LOGMASK_CONFIG (1<<0)
+#define RMNET_DATA_LOGMASK_HANDLER (1<<1)
+#define RMNET_DATA_LOGMASK_VND (1<<2)
+#define RMNET_DATA_LOGMASK_MAPD (1<<3)
+#define RMNET_DATA_LOGMASK_MAPC (1<<4)
+
+#define LOGE(fmt, ...) do { if (rmnet_data_log_level & RMNET_LOG_LVL_ERR) \
+ pr_err("[RMNET:ERR] %s(): " fmt "\n", __func__, \
+ ##__VA_ARGS__); \
+ } while (0)
+
+#define LOGH(fmt, ...) do { if (rmnet_data_log_level & RMNET_LOG_LVL_HI) \
+ pr_err("[RMNET:HI] %s(): " fmt "\n" , __func__, \
+ ##__VA_ARGS__); \
+ } while (0)
+
+#define LOGM(fmt, ...) do { if (rmnet_data_log_level & RMNET_LOG_LVL_MED) \
+ pr_warn("[RMNET:MED] %s(): " fmt "\n", __func__, \
+ ##__VA_ARGS__); \
+ } while (0)
+
+#define LOGL(fmt, ...) do { if (unlikely \
+ (rmnet_data_log_level & RMNET_LOG_LVL_LOW)) \
+ pr_notice("[RMNET:LOW] %s(): " fmt "\n", __func__, \
+ ##__VA_ARGS__); \
+ } while (0)
+
+/* Don't use pr_debug as it is compiled out of the kernel. We can be sure of
+ * minimal impact as LOGD is not enabled by default.
+ */
+#define LOGD(fmt, ...) do { if (unlikely( \
+ (rmnet_data_log_level & RMNET_LOG_LVL_DBG) \
+ && (rmnet_data_log_module_mask & rmnet_mod_mask))) \
+ pr_notice("[RMNET:DBG] %s(): " fmt "\n", __func__, \
+ ##__VA_ARGS__); \
+ } while (0)
+
+#endif /* _RMNET_DATA_PRIVATE_H_ */
diff --git a/net/rmnet_data/rmnet_data_stats.c b/net/rmnet_data/rmnet_data_stats.c
new file mode 100644
index 000000000000..20f1628242c7
--- /dev/null
+++ b/net/rmnet_data/rmnet_data_stats.c
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2014, 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ * RMNET Data statistics
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/netdevice.h>
+#include <net/rmnet_config.h>
+#include "rmnet_data_private.h"
+#include "rmnet_data_stats.h"
+#include "rmnet_data_config.h"
+#include "rmnet_map.h"
+
+enum rmnet_deagg_e {
+ RMNET_STATS_AGG_BUFF,
+ RMNET_STATS_AGG_PKT,
+ RMNET_STATS_AGG_MAX
+};
+
+static DEFINE_SPINLOCK(rmnet_skb_free_lock);
+unsigned long int skb_free[RMNET_STATS_SKBFREE_MAX];
+module_param_array(skb_free, ulong, 0, S_IRUGO);
+MODULE_PARM_DESC(skb_free, "SKBs dropped or freed");
+
+static DEFINE_SPINLOCK(rmnet_queue_xmit_lock);
+unsigned long int queue_xmit[RMNET_STATS_QUEUE_XMIT_MAX*2];
+module_param_array(queue_xmit, ulong, 0, S_IRUGO);
+MODULE_PARM_DESC(queue_xmit, "SKBs queued for transmit");
+
+static DEFINE_SPINLOCK(rmnet_deagg_count);
+unsigned long int deagg_count[RMNET_STATS_AGG_MAX];
+module_param_array(deagg_count, ulong, 0, S_IRUGO);
+MODULE_PARM_DESC(deagg_count, "SKBs De-aggregated");
+
+static DEFINE_SPINLOCK(rmnet_agg_count);
+unsigned long int agg_count[RMNET_STATS_AGG_MAX];
+module_param_array(agg_count, ulong, 0, S_IRUGO);
+MODULE_PARM_DESC(agg_count, "SKBs Aggregated");
+
+static DEFINE_SPINLOCK(rmnet_checksum_dl_stats);
+unsigned long int checksum_dl_stats[RMNET_MAP_CHECKSUM_ENUM_LENGTH];
+module_param_array(checksum_dl_stats, ulong, 0, S_IRUGO);
+MODULE_PARM_DESC(checksum_dl_stats, "Downlink Checksum Statistics");
+
+static DEFINE_SPINLOCK(rmnet_checksum_ul_stats);
+unsigned long int checksum_ul_stats[RMNET_MAP_CHECKSUM_ENUM_LENGTH];
+module_param_array(checksum_ul_stats, ulong, 0, S_IRUGO);
+MODULE_PARM_DESC(checksum_ul_stats, "Uplink Checksum Statistics");
+
+void rmnet_kfree_skb(struct sk_buff *skb, unsigned int reason)
+{
+ unsigned long flags;
+
+ if (reason >= RMNET_STATS_SKBFREE_MAX)
+ reason = RMNET_STATS_SKBFREE_UNKNOWN;
+
+ spin_lock_irqsave(&rmnet_skb_free_lock, flags);
+ skb_free[reason]++;
+ spin_unlock_irqrestore(&rmnet_skb_free_lock, flags);
+
+ if (likely(skb)) {
+ struct rmnet_phys_ep_conf_s *config;
+
+ config = (struct rmnet_phys_ep_conf_s *)rcu_dereference
+ (skb->dev->rx_handler_data);
+ if (likely(config))
+ config->recycle(skb);
+ else
+ kfree_skb(skb);
+ }
+}
+
+void rmnet_stats_queue_xmit(int rc, unsigned int reason)
+{
+ unsigned long flags;
+
+ if (rc != 0)
+ reason += RMNET_STATS_QUEUE_XMIT_MAX;
+ if (reason >= RMNET_STATS_QUEUE_XMIT_MAX*2)
+ reason = RMNET_STATS_SKBFREE_UNKNOWN;
+
+ spin_lock_irqsave(&rmnet_queue_xmit_lock, flags);
+ queue_xmit[reason]++;
+ spin_unlock_irqrestore(&rmnet_queue_xmit_lock, flags);
+}
+
+void rmnet_stats_agg_pkts(int aggcount)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rmnet_agg_count, flags);
+ agg_count[RMNET_STATS_AGG_BUFF]++;
+ agg_count[RMNET_STATS_AGG_PKT] += aggcount;
+ spin_unlock_irqrestore(&rmnet_agg_count, flags);
+}
+
+void rmnet_stats_deagg_pkts(int aggcount)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rmnet_deagg_count, flags);
+ deagg_count[RMNET_STATS_AGG_BUFF]++;
+ deagg_count[RMNET_STATS_AGG_PKT] += aggcount;
+ spin_unlock_irqrestore(&rmnet_deagg_count, flags);
+}
+
+void rmnet_stats_dl_checksum(unsigned int rc)
+{
+ unsigned long flags;
+
+ if (rc >= RMNET_MAP_CHECKSUM_ENUM_LENGTH)
+ rc = RMNET_MAP_CHECKSUM_ERR_UNKOWN;
+
+ spin_lock_irqsave(&rmnet_checksum_dl_stats, flags);
+ checksum_dl_stats[rc]++;
+ spin_unlock_irqrestore(&rmnet_checksum_dl_stats, flags);
+}
+
+void rmnet_stats_ul_checksum(unsigned int rc)
+{
+ unsigned long flags;
+
+ if (rc >= RMNET_MAP_CHECKSUM_ENUM_LENGTH)
+ rc = RMNET_MAP_CHECKSUM_ERR_UNKOWN;
+
+ spin_lock_irqsave(&rmnet_checksum_ul_stats, flags);
+ checksum_ul_stats[rc]++;
+ spin_unlock_irqrestore(&rmnet_checksum_ul_stats, flags);
+}
diff --git a/net/rmnet_data/rmnet_data_stats.h b/net/rmnet_data/rmnet_data_stats.h
new file mode 100644
index 000000000000..1581d9f0c5f6
--- /dev/null
+++ b/net/rmnet_data/rmnet_data_stats.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2014, 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ * RMNET Data statistics
+ *
+ */
+
+#ifndef _RMNET_DATA_STATS_H_
+#define _RMNET_DATA_STATS_H_
+
+enum rmnet_skb_free_e {
+ RMNET_STATS_SKBFREE_UNKNOWN,
+ RMNET_STATS_SKBFREE_BRDG_NO_EGRESS,
+ RMNET_STATS_SKBFREE_DELIVER_NO_EP,
+ RMNET_STATS_SKBFREE_IPINGRESS_NO_EP,
+ RMNET_STATS_SKBFREE_MAPINGRESS_BAD_MUX,
+ RMNET_STATS_SKBFREE_MAPINGRESS_MUX_NO_EP,
+ RMNET_STATS_SKBFREE_MAPINGRESS_AGGBUF,
+ RMNET_STATS_SKBFREE_INGRESS_NOT_EXPECT_MAPD,
+ RMNET_STATS_SKBFREE_INGRESS_NOT_EXPECT_MAPC,
+ RMNET_STATS_SKBFREE_EGR_MAPFAIL,
+ RMNET_STATS_SKBFREE_VND_NO_EGRESS,
+ RMNET_STATS_SKBFREE_MAPC_BAD_MUX,
+ RMNET_STATS_SKBFREE_MAPC_MUX_NO_EP,
+ RMNET_STATS_SKBFREE_AGG_CPY_EXPAND,
+ RMNET_STATS_SKBFREE_AGG_INTO_BUFF,
+ RMNET_STATS_SKBFREE_DEAGG_MALFORMED,
+ RMNET_STATS_SKBFREE_DEAGG_CLONE_FAIL,
+ RMNET_STATS_SKBFREE_DEAGG_UNKOWN_IP_TYP,
+ RMNET_STATS_SKBFREE_DEAGG_DATA_LEN_0,
+ RMNET_STATS_SKBFREE_INGRESS_BAD_MAP_CKSUM,
+ RMNET_STATS_SKBFREE_MAPC_UNSUPPORTED,
+ RMNET_STATS_SKBFREE_MAX
+};
+
+enum rmnet_queue_xmit_e {
+ RMNET_STATS_QUEUE_XMIT_UNKNOWN,
+ RMNET_STATS_QUEUE_XMIT_EGRESS,
+ RMNET_STATS_QUEUE_XMIT_AGG_FILL_BUFFER,
+ RMNET_STATS_QUEUE_XMIT_AGG_TIMEOUT,
+ RMNET_STATS_QUEUE_XMIT_AGG_CPY_EXP_FAIL,
+ RMNET_STATS_QUEUE_XMIT_AGG_SKIP,
+ RMNET_STATS_QUEUE_XMIT_MAX
+};
+
+void rmnet_kfree_skb(struct sk_buff *skb, unsigned int reason);
+void rmnet_stats_queue_xmit(int rc, unsigned int reason);
+void rmnet_stats_deagg_pkts(int aggcount);
+void rmnet_stats_agg_pkts(int aggcount);
+void rmnet_stats_dl_checksum(unsigned int rc);
+void rmnet_stats_ul_checksum(unsigned int rc);
+#endif /* _RMNET_DATA_STATS_H_ */
diff --git a/net/rmnet_data/rmnet_data_trace.h b/net/rmnet_data/rmnet_data_trace.h
new file mode 100644
index 000000000000..9663979db13c
--- /dev/null
+++ b/net/rmnet_data/rmnet_data_trace.h
@@ -0,0 +1,333 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rmnet_data
+#define TRACE_INCLUDE_FILE rmnet_data_trace
+
+#if !defined(_TRACE_MSM_LOW_POWER_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _RMNET_DATA_TRACE_H_
+
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(rmnet_handler_template,
+
+ TP_PROTO(struct sk_buff *skb),
+
+ TP_ARGS(skb),
+
+ TP_STRUCT__entry(
+ __field(void *, skbaddr)
+ __field(unsigned int, len)
+ __string(name, skb->dev->name)
+ ),
+
+ TP_fast_assign(
+ __entry->skbaddr = skb;
+ __entry->len = skb->len;
+ __assign_str(name, skb->dev->name);
+ ),
+
+ TP_printk("dev=%s skbaddr=%pK len=%u",
+ __get_str(name), __entry->skbaddr, __entry->len)
+)
+
+DEFINE_EVENT(rmnet_handler_template, rmnet_egress_handler,
+
+ TP_PROTO(struct sk_buff *skb),
+
+ TP_ARGS(skb)
+);
+
+DEFINE_EVENT(rmnet_handler_template, rmnet_ingress_handler,
+
+ TP_PROTO(struct sk_buff *skb),
+
+ TP_ARGS(skb)
+);
+
+DEFINE_EVENT(rmnet_handler_template, rmnet_vnd_start_xmit,
+
+ TP_PROTO(struct sk_buff *skb),
+
+ TP_ARGS(skb)
+);
+
+DEFINE_EVENT(rmnet_handler_template, __rmnet_deliver_skb,
+
+ TP_PROTO(struct sk_buff *skb),
+
+ TP_ARGS(skb)
+);
+
+DECLARE_EVENT_CLASS(rmnet_tc_fc_template,
+
+ TP_PROTO(u32 tcm_handle, int qdisc_len, int is_enable),
+
+ TP_ARGS(tcm_handle, qdisc_len, is_enable),
+
+ TP_STRUCT__entry(
+ __field(u32, handle)
+ __field(int, qlen)
+ __field(int, enable)
+ ),
+
+ TP_fast_assign(
+ __entry->handle = tcm_handle;
+ __entry->qlen = qdisc_len;
+ __entry->enable = is_enable;
+ ),
+
+ TP_printk("tcm_handle=%d qdisc length=%d flow %s",
+ __entry->handle, __entry->qlen,
+ __entry->enable ? "enable" : "disable")
+)
+
+DEFINE_EVENT(rmnet_tc_fc_template, rmnet_fc_qmi,
+
+ TP_PROTO(u32 tcm_handle, int qdisc_len, int is_enable),
+
+ TP_ARGS(tcm_handle, qdisc_len, is_enable)
+);
+
+DEFINE_EVENT(rmnet_tc_fc_template, rmnet_fc_map,
+
+ TP_PROTO(u32 tcm_handle, int qdisc_len, int is_enable),
+
+ TP_ARGS(tcm_handle, qdisc_len, is_enable)
+);
+
+DECLARE_EVENT_CLASS(rmnet_aggregation_template,
+
+ TP_PROTO(struct sk_buff *skb, int num_agg_pakcets),
+
+ TP_ARGS(skb, num_agg_pakcets),
+
+ TP_STRUCT__entry(
+ __field(void *, skbaddr)
+ __field(unsigned int, len)
+ __string(name, skb->dev->name)
+ __field(int, num)
+ ),
+
+ TP_fast_assign(
+ __entry->skbaddr = skb;
+ __entry->len = skb->len;
+ __assign_str(name, skb->dev->name);
+ __entry->num = num_agg_pakcets;
+ ),
+
+ TP_printk("dev=%s skbaddr=%pK len=%u agg_count: %d",
+ __get_str(name), __entry->skbaddr, __entry->len,
+ __entry->num)
+)
+
+DEFINE_EVENT(rmnet_aggregation_template, rmnet_map_aggregate,
+
+ TP_PROTO(struct sk_buff *skb, int num_agg_pakcets),
+
+ TP_ARGS(skb, num_agg_pakcets)
+);
+
+DEFINE_EVENT(rmnet_aggregation_template, rmnet_map_flush_packet_queue,
+
+ TP_PROTO(struct sk_buff *skb, int num_agg_pakcets),
+
+ TP_ARGS(skb, num_agg_pakcets)
+);
+
+TRACE_EVENT(rmnet_start_aggregation,
+
+ TP_PROTO(struct sk_buff *skb),
+
+ TP_ARGS(skb),
+
+ TP_STRUCT__entry(
+ __string(name, skb->dev->name)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, skb->dev->name);
+ ),
+
+ TP_printk("dev: %s, aggregated first packet", __get_str(name))
+)
+
+TRACE_EVENT(rmnet_start_deaggregation,
+
+ TP_PROTO(struct sk_buff *skb),
+
+ TP_ARGS(skb),
+
+ TP_STRUCT__entry(
+ __string(name, skb->dev->name)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, skb->dev->name);
+ ),
+
+ TP_printk("dev: %s, deaggregated first packet", __get_str(name))
+)
+
+TRACE_EVENT(rmnet_end_deaggregation,
+
+ TP_PROTO(struct sk_buff *skb, int num_deagg_packets),
+
+ TP_ARGS(skb, num_deagg_packets),
+
+ TP_STRUCT__entry(
+ __string(name, skb->dev->name)
+ __field(int, num)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, skb->dev->name);
+ __entry->num = num_deagg_packets;
+ ),
+
+ TP_printk("dev: %s, deaggregate end count: %d",
+ __get_str(name), __entry->num)
+)
+
+TRACE_EVENT(rmnet_map_checksum_downlink_packet,
+
+ TP_PROTO(struct sk_buff *skb, int ckresult),
+
+ TP_ARGS(skb, ckresult),
+
+ TP_STRUCT__entry(
+ __string(name, skb->dev->name)
+ __field(int, res)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, skb->dev->name);
+ __entry->res = ckresult;
+ ),
+
+ TP_printk("DL checksum on dev=%s, res: %d",
+ __get_str(name), __entry->res)
+)
+
+TRACE_EVENT(rmnet_map_checksum_uplink_packet,
+
+ TP_PROTO(struct net_device *dev, int ckresult),
+
+ TP_ARGS(dev, ckresult),
+
+ TP_STRUCT__entry(
+ __string(name, dev->name)
+ __field(int, res)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, dev->name);
+ __entry->res = ckresult;
+ ),
+
+ TP_printk("UL checksum on dev=%s, res: %d",
+ __get_str(name), __entry->res)
+)
+
+DECLARE_EVENT_CLASS(rmnet_physdev_action_template,
+
+ TP_PROTO(struct net_device *dev),
+
+ TP_ARGS(dev),
+
+ TP_STRUCT__entry(
+ __string(name, dev->name)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, dev->name);
+ ),
+
+ TP_printk("Physical dev=%s", __get_str(name))
+)
+
+DEFINE_EVENT(rmnet_physdev_action_template, rmnet_unregister_cb_unhandled,
+
+ TP_PROTO(struct net_device *dev),
+
+ TP_ARGS(dev)
+);
+
+DEFINE_EVENT(rmnet_physdev_action_template, rmnet_unregister_cb_entry,
+
+ TP_PROTO(struct net_device *dev),
+
+ TP_ARGS(dev)
+);
+
+DEFINE_EVENT(rmnet_physdev_action_template, rmnet_unregister_cb_exit,
+
+ TP_PROTO(struct net_device *dev),
+
+ TP_ARGS(dev)
+);
+
+DEFINE_EVENT(rmnet_physdev_action_template, rmnet_unregister_cb_clear_vnds,
+
+ TP_PROTO(struct net_device *dev),
+
+ TP_ARGS(dev)
+);
+
+DEFINE_EVENT(rmnet_physdev_action_template, rmnet_unregister_cb_clear_lepcs,
+
+ TP_PROTO(struct net_device *dev),
+
+ TP_ARGS(dev)
+);
+
+DEFINE_EVENT(rmnet_physdev_action_template, rmnet_associate,
+
+ TP_PROTO(struct net_device *dev),
+
+ TP_ARGS(dev)
+);
+
+DEFINE_EVENT(rmnet_physdev_action_template, rmnet_unassociate,
+
+ TP_PROTO(struct net_device *dev),
+
+ TP_ARGS(dev)
+);
+
+TRACE_EVENT(rmnet_gro_downlink,
+
+ TP_PROTO(gro_result_t gro_res),
+
+ TP_ARGS(gro_res),
+
+ TP_STRUCT__entry(
+ __field(gro_result_t, res)
+ ),
+
+ TP_fast_assign(
+ __entry->res = gro_res;
+ ),
+
+ TP_printk("GRO res: %d", __entry->res)
+)
+
+#endif /* _RMNET_DATA_TRACE_H_ */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>
+
diff --git a/net/rmnet_data/rmnet_data_vnd.c b/net/rmnet_data/rmnet_data_vnd.c
new file mode 100644
index 000000000000..2819da9ae3f2
--- /dev/null
+++ b/net/rmnet_data/rmnet_data_vnd.c
@@ -0,0 +1,1115 @@
+/*
+ * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ * RMNET Data virtual network driver
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/rmnet_data.h>
+#include <linux/msm_rmnet.h>
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+#include <linux/spinlock.h>
+#include <net/pkt_sched.h>
+#include <linux/atomic.h>
+#include <linux/net_map.h>
+#include "rmnet_data_config.h"
+#include "rmnet_data_handlers.h"
+#include "rmnet_data_private.h"
+#include "rmnet_map.h"
+#include "rmnet_data_vnd.h"
+#include "rmnet_data_stats.h"
+#include "rmnet_data_trace.h"
+
+RMNET_LOG_MODULE(RMNET_DATA_LOGMASK_VND);
+
+#define RMNET_MAP_FLOW_NUM_TC_HANDLE 3
+#define RMNET_VND_UF_ACTION_ADD 0
+#define RMNET_VND_UF_ACTION_DEL 1
+enum {
+ RMNET_VND_UPDATE_FLOW_OK,
+ RMNET_VND_UPDATE_FLOW_NO_ACTION,
+ RMNET_VND_UPDATE_FLOW_NO_MORE_ROOM,
+ RMNET_VND_UPDATE_FLOW_NO_VALID_LEFT
+};
+
+struct net_device *rmnet_devices[RMNET_DATA_MAX_VND];
+
+struct rmnet_map_flow_mapping_s {
+ struct list_head list;
+ uint32_t map_flow_id;
+ uint32_t tc_flow_valid[RMNET_MAP_FLOW_NUM_TC_HANDLE];
+ uint32_t tc_flow_id[RMNET_MAP_FLOW_NUM_TC_HANDLE];
+ atomic_t v4_seq;
+ atomic_t v6_seq;
+};
+
+struct rmnet_vnd_private_s {
+ uint32_t qos_version;
+ struct rmnet_logical_ep_conf_s local_ep;
+
+ rwlock_t flow_map_lock;
+ struct list_head flow_head;
+ struct rmnet_map_flow_mapping_s root_flow;
+};
+
+#define RMNET_VND_FC_QUEUED 0
+#define RMNET_VND_FC_NOT_ENABLED 1
+#define RMNET_VND_FC_KMALLOC_ERR 2
+
+/* ***************** Helper Functions *************************************** */
+
+/**
+ * rmnet_vnd_add_qos_header() - Adds QoS header to front of skb->data
+ * @skb: Socket buffer ("packet") to modify
+ * @dev: Egress interface
+ *
+ * Does not check for sufficient headroom! Caller must make sure there is enough
+ * headroom.
+ */
+static void rmnet_vnd_add_qos_header(struct sk_buff *skb,
+ struct net_device *dev,
+ uint32_t qos_version)
+{
+ struct QMI_QOS_HDR_S *qmih;
+ struct qmi_qos_hdr8_s *qmi8h;
+
+ if (qos_version & RMNET_IOCTL_QOS_MODE_6) {
+ qmih = (struct QMI_QOS_HDR_S *)
+ skb_push(skb, sizeof(struct QMI_QOS_HDR_S));
+ qmih->version = 1;
+ qmih->flags = 0;
+ qmih->flow_id = skb->mark;
+ } else if (qos_version & RMNET_IOCTL_QOS_MODE_8) {
+ qmi8h = (struct qmi_qos_hdr8_s *)
+ skb_push(skb, sizeof(struct qmi_qos_hdr8_s));
+ /* Flags are 0 always */
+ qmi8h->hdr.version = 0;
+ qmi8h->hdr.flags = 0;
+ memset(qmi8h->reserved, 0, sizeof(qmi8h->reserved));
+ qmi8h->hdr.flow_id = skb->mark;
+ } else {
+ LOGD("%s(): Bad QoS version configured\n", __func__);
+ }
+}
+
+/* ***************** RX/TX Fixup ******************************************** */
+
+/**
+ * rmnet_vnd_rx_fixup() - Virtual Network Device receive fixup hook
+ * @skb: Socket buffer ("packet") to modify
+ * @dev: Virtual network device
+ *
+ * Additional VND specific packet processing for ingress packets
+ *
+ * Return:
+ * - RX_HANDLER_PASS if packet should continue to process in stack
+ * - RX_HANDLER_CONSUMED if packet should not be processed in stack
+ *
+ */
+int rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev)
+{
+ if (unlikely(!dev || !skb))
+ BUG();
+
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += skb->len;
+
+ return RX_HANDLER_PASS;
+}
+
+/**
+ * rmnet_vnd_tx_fixup() - Virtual Network Device transmic fixup hook
+ * @skb: Socket buffer ("packet") to modify
+ * @dev: Virtual network device
+ *
+ * Additional VND specific packet processing for egress packets
+ *
+ * Return:
+ * - RX_HANDLER_PASS if packet should continue to be transmitted
+ * - RX_HANDLER_CONSUMED if packet should not be transmitted by stack
+ */
+int rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev)
+{
+ struct rmnet_vnd_private_s *dev_conf;
+ dev_conf = (struct rmnet_vnd_private_s *) netdev_priv(dev);
+
+ if (unlikely(!dev || !skb))
+ BUG();
+
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+
+ return RX_HANDLER_PASS;
+}
+
+/* ***************** Network Device Operations ****************************** */
+
+/**
+ * rmnet_vnd_start_xmit() - Transmit NDO callback
+ * @skb: Socket buffer ("packet") being sent from network stack
+ * @dev: Virtual Network Device
+ *
+ * Standard network driver operations hook to transmit packets on virtual
+ * network device. Called by network stack. Packet is not transmitted directly
+ * from here; instead it is given to the rmnet egress handler.
+ *
+ * Return:
+ * - NETDEV_TX_OK under all cirumstances (cannot block/fail)
+ */
+static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct rmnet_vnd_private_s *dev_conf;
+ trace_rmnet_vnd_start_xmit(skb);
+ dev_conf = (struct rmnet_vnd_private_s *) netdev_priv(dev);
+ if (dev_conf->local_ep.egress_dev) {
+ /* QoS header should come after MAP header */
+ if (dev_conf->qos_version)
+ rmnet_vnd_add_qos_header(skb,
+ dev,
+ dev_conf->qos_version);
+ skb_orphan(skb);
+ rmnet_egress_handler(skb, &dev_conf->local_ep);
+ } else {
+ dev->stats.tx_dropped++;
+ rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_VND_NO_EGRESS);
+ }
+ return NETDEV_TX_OK;
+}
+
+/**
+ * rmnet_vnd_change_mtu() - Change MTU NDO callback
+ * @dev: Virtual network device
+ * @new_mtu: New MTU value to set (in bytes)
+ *
+ * Standard network driver operations hook to set the MTU. Called by kernel to
+ * set the device MTU. Checks if desired MTU is less than zero or greater than
+ * RMNET_DATA_MAX_PACKET_SIZE;
+ *
+ * Return:
+ * - 0 if successful
+ * - -EINVAL if new_mtu is out of range
+ */
+static int rmnet_vnd_change_mtu(struct net_device *dev, int new_mtu)
+{
+ if (new_mtu < 0 || new_mtu > RMNET_DATA_MAX_PACKET_SIZE)
+ return -EINVAL;
+
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+#ifdef CONFIG_RMNET_DATA_FC
+static int _rmnet_vnd_do_qos_ioctl(struct net_device *dev,
+ struct ifreq *ifr,
+ int cmd)
+{
+ struct rmnet_vnd_private_s *dev_conf;
+ int rc, qdisc_len = 0;
+ struct rmnet_ioctl_data_s ioctl_data;
+ rc = 0;
+ dev_conf = (struct rmnet_vnd_private_s *) netdev_priv(dev);
+
+ switch (cmd) {
+
+ case RMNET_IOCTL_SET_QOS_ENABLE:
+ LOGM("RMNET_IOCTL_SET_QOS_ENABLE on %s", dev->name);
+ if (!dev_conf->qos_version)
+ dev_conf->qos_version = RMNET_IOCTL_QOS_MODE_6;
+ break;
+
+ case RMNET_IOCTL_SET_QOS_DISABLE:
+ LOGM("RMNET_IOCTL_SET_QOS_DISABLE on %s", dev->name);
+ dev_conf->qos_version = 0;
+ break;
+
+ case RMNET_IOCTL_GET_QOS: /* Get QoS header state */
+ LOGM("RMNET_IOCTL_GET_QOS on %s", dev->name);
+ ioctl_data.u.operation_mode = (dev_conf->qos_version ==
+ RMNET_IOCTL_QOS_MODE_6);
+ if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data,
+ sizeof(struct rmnet_ioctl_data_s)))
+ rc = -EFAULT;
+ break;
+
+ case RMNET_IOCTL_FLOW_ENABLE:
+ LOGL("RMNET_IOCTL_FLOW_ENABLE on %s", dev->name);
+ if (copy_from_user(&ioctl_data, ifr->ifr_ifru.ifru_data,
+ sizeof(struct rmnet_ioctl_data_s))) {
+ rc = -EFAULT;
+ break;
+ }
+ qdisc_len = tc_qdisc_flow_control(dev,
+ ioctl_data.u.tcm_handle, 1);
+ trace_rmnet_fc_qmi(ioctl_data.u.tcm_handle, qdisc_len, 1);
+ break;
+
+ case RMNET_IOCTL_FLOW_DISABLE:
+ LOGL("RMNET_IOCTL_FLOW_DISABLE on %s", dev->name);
+ if (copy_from_user(&ioctl_data, ifr->ifr_ifru.ifru_data,
+ sizeof(struct rmnet_ioctl_data_s))) {
+ rc = -EFAULT;
+ break;
+ }
+ qdisc_len = tc_qdisc_flow_control(dev,
+ ioctl_data.u.tcm_handle, 0);
+ trace_rmnet_fc_qmi(ioctl_data.u.tcm_handle, qdisc_len, 0);
+ break;
+
+ default:
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+struct rmnet_vnd_fc_work {
+ struct work_struct work;
+ struct net_device *dev;
+ uint32_t tc_handle;
+ int enable;
+};
+
+static void _rmnet_vnd_wq_flow_control(struct work_struct *work)
+{
+ struct rmnet_vnd_fc_work *fcwork;
+ int qdisc_len = 0;
+ fcwork = (struct rmnet_vnd_fc_work *)work;
+
+ rtnl_lock();
+ qdisc_len = tc_qdisc_flow_control(fcwork->dev, fcwork->tc_handle,
+ fcwork->enable);
+ trace_rmnet_fc_map(fcwork->tc_handle, qdisc_len, fcwork->enable);
+ rtnl_unlock();
+
+ LOGL("[%s] handle:%08X enable:%d",
+ fcwork->dev->name, fcwork->tc_handle, fcwork->enable);
+
+ kfree(work);
+}
+
+static int _rmnet_vnd_do_flow_control(struct net_device *dev,
+ uint32_t tc_handle,
+ int enable)
+{
+ struct rmnet_vnd_fc_work *fcwork;
+
+ fcwork = kmalloc(sizeof(*fcwork), GFP_ATOMIC);
+ if (!fcwork)
+ return RMNET_VND_FC_KMALLOC_ERR;
+ memset(fcwork, 0, sizeof(struct rmnet_vnd_fc_work));
+
+ INIT_WORK((struct work_struct *)fcwork, _rmnet_vnd_wq_flow_control);
+ fcwork->dev = dev;
+ fcwork->tc_handle = tc_handle;
+ fcwork->enable = enable;
+
+ schedule_work((struct work_struct *)fcwork);
+ return RMNET_VND_FC_QUEUED;
+}
+#else
+static int _rmnet_vnd_do_qos_ioctl(struct net_device *dev,
+ struct ifreq *ifr,
+ int cmd)
+{
+ return -EINVAL;
+}
+
+static inline int _rmnet_vnd_do_flow_control(struct net_device *dev,
+ uint32_t tc_handle,
+ int enable)
+{
+ LOGD("[%s] called with no QoS support", dev->name);
+ return RMNET_VND_FC_NOT_ENABLED;
+}
+#endif /* CONFIG_RMNET_DATA_FC */
+
+static int rmnet_vnd_ioctl_extended(struct net_device *dev, struct ifreq *ifr)
+{
+ struct rmnet_vnd_private_s *dev_conf;
+ struct rmnet_ioctl_extended_s ext_cmd;
+ int rc = 0;
+ dev_conf = (struct rmnet_vnd_private_s *) netdev_priv(dev);
+
+ rc = copy_from_user(&ext_cmd, ifr->ifr_ifru.ifru_data,
+ sizeof(struct rmnet_ioctl_extended_s));
+ if (rc) {
+ LOGM("%s(): copy_from_user() failed\n", __func__);
+ return rc;
+ }
+
+ switch (ext_cmd.extended_ioctl) {
+ case RMNET_IOCTL_GET_SUPPORTED_FEATURES:
+ ext_cmd.u.data = 0;
+ break;
+
+ case RMNET_IOCTL_GET_DRIVER_NAME:
+ strlcpy(ext_cmd.u.if_name, "rmnet_data",
+ sizeof(ext_cmd.u.if_name));
+ break;
+
+ case RMNET_IOCTL_GET_SUPPORTED_QOS_MODES:
+ ext_cmd.u.data = RMNET_IOCTL_QOS_MODE_6
+ | RMNET_IOCTL_QOS_MODE_8;
+ break;
+
+ case RMNET_IOCTL_GET_QOS_VERSION:
+ ext_cmd.u.data = dev_conf->qos_version;
+ break;
+
+ case RMNET_IOCTL_SET_QOS_VERSION:
+ if (ext_cmd.u.data == RMNET_IOCTL_QOS_MODE_6
+ || ext_cmd.u.data == RMNET_IOCTL_QOS_MODE_8
+ || ext_cmd.u.data == 0) {
+ dev_conf->qos_version = ext_cmd.u.data;
+ } else {
+ rc = -EINVAL;
+ goto done;
+ }
+ break;
+
+ default:
+ rc = -EINVAL;
+ goto done;
+ break;
+ }
+
+ rc = copy_to_user(ifr->ifr_ifru.ifru_data, &ext_cmd,
+ sizeof(struct rmnet_ioctl_extended_s));
+ if (rc)
+ LOGM("%s(): copy_to_user() failed\n", __func__);
+
+done:
+ return rc;
+}
+
+
+/**
+ * rmnet_vnd_ioctl() - IOCTL NDO callback
+ * @dev: Virtual network device
+ * @ifreq: User data
+ * @cmd: IOCTL command value
+ *
+ * Standard network driver operations hook to process IOCTLs. Called by kernel
+ * to process non-stanard IOCTLs for device
+ *
+ * Return:
+ * - 0 if successful
+ * - -EINVAL if unknown IOCTL
+ */
+static int rmnet_vnd_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct rmnet_vnd_private_s *dev_conf;
+ int rc;
+ struct rmnet_ioctl_data_s ioctl_data;
+ rc = 0;
+ dev_conf = (struct rmnet_vnd_private_s *) netdev_priv(dev);
+
+ rc = _rmnet_vnd_do_qos_ioctl(dev, ifr, cmd);
+ if (rc != -EINVAL)
+ return rc;
+ rc = 0; /* Reset rc as it may contain -EINVAL from above */
+
+ switch (cmd) {
+
+ case RMNET_IOCTL_OPEN: /* Do nothing. Support legacy behavior */
+ LOGM("RMNET_IOCTL_OPEN on %s (ignored)", dev->name);
+ break;
+
+ case RMNET_IOCTL_CLOSE: /* Do nothing. Support legacy behavior */
+ LOGM("RMNET_IOCTL_CLOSE on %s (ignored)", dev->name);
+ break;
+
+ case RMNET_IOCTL_SET_LLP_ETHERNET:
+ LOGM("RMNET_IOCTL_SET_LLP_ETHERNET on %s (no support)",
+ dev->name);
+ rc = -EINVAL;
+ break;
+
+ case RMNET_IOCTL_SET_LLP_IP: /* Do nothing. Support legacy behavior */
+ LOGM("RMNET_IOCTL_SET_LLP_IP on %s (ignored)", dev->name);
+ break;
+
+ case RMNET_IOCTL_GET_LLP: /* Always return IP mode */
+ LOGM("RMNET_IOCTL_GET_LLP on %s", dev->name);
+ ioctl_data.u.operation_mode = RMNET_MODE_LLP_IP;
+ if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data,
+ sizeof(struct rmnet_ioctl_data_s)))
+ rc = -EFAULT;
+ break;
+
+ case RMNET_IOCTL_EXTENDED:
+ rc = rmnet_vnd_ioctl_extended(dev, ifr);
+ break;
+
+ default:
+ LOGM("Unknown IOCTL 0x%08X", cmd);
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+static const struct net_device_ops rmnet_data_vnd_ops = {
+ .ndo_init = 0,
+ .ndo_start_xmit = rmnet_vnd_start_xmit,
+ .ndo_do_ioctl = rmnet_vnd_ioctl,
+ .ndo_change_mtu = rmnet_vnd_change_mtu,
+ .ndo_set_mac_address = 0,
+ .ndo_validate_addr = 0,
+};
+
+/**
+ * rmnet_vnd_setup() - net_device initialization callback
+ * @dev: Virtual network device
+ *
+ * Called by kernel whenever a new rmnet_data<n> device is created. Sets MTU,
+ * flags, ARP type, needed headroom, etc...
+ */
+static void rmnet_vnd_setup(struct net_device *dev)
+{
+ struct rmnet_vnd_private_s *dev_conf;
+ LOGM("Setting up device %s", dev->name);
+
+ /* Clear out private data */
+ dev_conf = (struct rmnet_vnd_private_s *) netdev_priv(dev);
+ memset(dev_conf, 0, sizeof(struct rmnet_vnd_private_s));
+
+ dev->netdev_ops = &rmnet_data_vnd_ops;
+ dev->mtu = RMNET_DATA_DFLT_PACKET_SIZE;
+ dev->needed_headroom = RMNET_DATA_NEEDED_HEADROOM;
+ random_ether_addr(dev->dev_addr);
+ dev->tx_queue_len = RMNET_DATA_TX_QUEUE_LEN;
+
+ /* Raw IP mode */
+ dev->header_ops = 0; /* No header */
+ dev->type = ARPHRD_RAWIP;
+ dev->hard_header_len = 0;
+ dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
+
+ /* Flow control */
+ rwlock_init(&dev_conf->flow_map_lock);
+ INIT_LIST_HEAD(&dev_conf->flow_head);
+}
+
+/**
+ * rmnet_vnd_setup() - net_device initialization helper function
+ * @dev: Virtual network device
+ *
+ * Called during device initialization. Disables GRO.
+ */
+static void rmnet_vnd_disable_offload(struct net_device *dev)
+{
+ dev->wanted_features &= ~NETIF_F_GRO;
+ __netdev_update_features(dev);
+}
+
+/* ***************** Exposed API ******************************************** */
+
+/**
+ * rmnet_vnd_exit() - Shutdown cleanup hook
+ *
+ * Called by RmNet main on module unload. Cleans up data structures and
+ * unregisters/frees net_devices.
+ */
+void rmnet_vnd_exit(void)
+{
+ int i;
+ for (i = 0; i < RMNET_DATA_MAX_VND; i++)
+ if (rmnet_devices[i]) {
+ unregister_netdev(rmnet_devices[i]);
+ free_netdev(rmnet_devices[i]);
+ }
+}
+
+/**
+ * rmnet_vnd_init() - Init hook
+ *
+ * Called by RmNet main on module load. Initializes data structures
+ */
+int rmnet_vnd_init(void)
+{
+ memset(rmnet_devices, 0,
+ sizeof(struct net_device *) * RMNET_DATA_MAX_VND);
+ return 0;
+}
+
+/**
+ * rmnet_vnd_create_dev() - Create a new virtual network device node.
+ * @id: Virtual device node id
+ * @new_device: Pointer to newly created device node
+ * @prefix: Device name prefix
+ *
+ * Allocates structures for new virtual network devices. Sets the name of the
+ * new device and registers it with the network stack. Device will appear in
+ * ifconfig list after this is called. If the prefix is null, then
+ * RMNET_DATA_DEV_NAME_STR will be assumed.
+ *
+ * Return:
+ * - 0 if successful
+ * - RMNET_CONFIG_BAD_ARGUMENTS if id is out of range or prefix is too long
+ * - RMNET_CONFIG_DEVICE_IN_USE if id already in use
+ * - RMNET_CONFIG_NOMEM if net_device allocation failed
+ * - RMNET_CONFIG_UNKNOWN_ERROR if register_netdevice() fails
+ */
+int rmnet_vnd_create_dev(int id, struct net_device **new_device,
+ const char *prefix)
+{
+ struct net_device *dev;
+ char dev_prefix[IFNAMSIZ];
+ int p, rc = 0;
+
+ if (id < 0 || id >= RMNET_DATA_MAX_VND) {
+ *new_device = 0;
+ return RMNET_CONFIG_BAD_ARGUMENTS;
+ }
+
+ if (rmnet_devices[id] != 0) {
+ *new_device = 0;
+ return RMNET_CONFIG_DEVICE_IN_USE;
+ }
+
+ if (!prefix)
+ p = scnprintf(dev_prefix, IFNAMSIZ, "%s%%d",
+ RMNET_DATA_DEV_NAME_STR);
+ else
+ p = scnprintf(dev_prefix, IFNAMSIZ, "%s%%d",
+ prefix);
+ if (p >= (IFNAMSIZ-1)) {
+ LOGE("Specified prefix longer than IFNAMSIZ");
+ return RMNET_CONFIG_BAD_ARGUMENTS;
+ }
+
+ dev = alloc_netdev(sizeof(struct rmnet_vnd_private_s),
+ dev_prefix,
+ NET_NAME_ENUM,
+ rmnet_vnd_setup);
+ if (!dev) {
+ LOGE("Failed to to allocate netdev for id %d", id);
+ *new_device = 0;
+ return RMNET_CONFIG_NOMEM;
+ }
+
+ if (!prefix) {
+ /* Configuring DL checksum offload on rmnet_data interfaces */
+ dev->hw_features = NETIF_F_RXCSUM;
+ /* Configuring UL checksum offload on rmnet_data interfaces */
+ dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ /* Configuring GRO on rmnet_data interfaces */
+ dev->hw_features |= NETIF_F_GRO;
+ /* Configuring Scatter-Gather on rmnet_data interfaces */
+ dev->hw_features |= NETIF_F_SG;
+ /* Configuring GSO on rmnet_data interfaces */
+ dev->hw_features |= NETIF_F_GSO;
+ dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
+ dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ }
+
+ rc = register_netdevice(dev);
+ if (rc != 0) {
+ LOGE("Failed to to register netdev [%s]", dev->name);
+ free_netdev(dev);
+ *new_device = 0;
+ return RMNET_CONFIG_UNKNOWN_ERROR;
+ } else {
+ rmnet_devices[id] = dev;
+ *new_device = dev;
+ }
+
+ rmnet_vnd_disable_offload(dev);
+
+ LOGM("Registered device %s", dev->name);
+ return rc;
+}
+
+/**
+ * rmnet_vnd_free_dev() - free a virtual network device node.
+ * @id: Virtual device node id
+ *
+ * Unregisters the virtual network device node and frees it.
+ * unregister_netdev locks the rtnl mutex, so the mutex must not be locked
+ * by the caller of the function. unregister_netdev enqueues the request to
+ * unregister the device into a TODO queue. The requests in the TODO queue
+ * are only done after rtnl mutex is unlocked, therefore free_netdev has to
+ * called after unlocking rtnl mutex.
+ *
+ * Return:
+ * - 0 if successful
+ * - RMNET_CONFIG_NO_SUCH_DEVICE if id is invalid or not in range
+ * - RMNET_CONFIG_DEVICE_IN_USE if device has logical ep that wasn't unset
+ */
+int rmnet_vnd_free_dev(int id)
+{
+ struct rmnet_logical_ep_conf_s *epconfig_l;
+ struct net_device *dev;
+
+ rtnl_lock();
+ if ((id < 0) || (id >= RMNET_DATA_MAX_VND) || !rmnet_devices[id]) {
+ rtnl_unlock();
+ LOGM("Invalid id [%d]", id);
+ return RMNET_CONFIG_NO_SUCH_DEVICE;
+ }
+
+ epconfig_l = rmnet_vnd_get_le_config(rmnet_devices[id]);
+ if (epconfig_l && epconfig_l->refcount) {
+ rtnl_unlock();
+ return RMNET_CONFIG_DEVICE_IN_USE;
+ }
+
+ dev = rmnet_devices[id];
+ rmnet_devices[id] = 0;
+ rtnl_unlock();
+
+ if (dev) {
+ unregister_netdev(dev);
+ free_netdev(dev);
+ return 0;
+ } else {
+ return RMNET_CONFIG_NO_SUCH_DEVICE;
+ }
+}
+
+/**
+ * rmnet_vnd_get_name() - Gets the string name of a VND based on ID
+ * @id: Virtual device node id
+ * @name: Buffer to store name of virtual device node
+ * @name_len: Length of name buffer
+ *
+ * Copies the name of the virtual device node into the users buffer. Will throw
+ * an error if the buffer is null, or too small to hold the device name.
+ *
+ * Return:
+ * - 0 if successful
+ * - -EINVAL if name is null
+ * - -EINVAL if id is invalid or not in range
+ * - -EINVAL if name is too small to hold things
+ */
+int rmnet_vnd_get_name(int id, char *name, int name_len)
+{
+ int p;
+
+ if (!name) {
+ LOGM("%s", "Bad arguments; name buffer null");
+ return -EINVAL;
+ }
+
+ if ((id < 0) || (id >= RMNET_DATA_MAX_VND) || !rmnet_devices[id]) {
+ LOGM("Invalid id [%d]", id);
+ return -EINVAL;
+ }
+
+ p = strlcpy(name, rmnet_devices[id]->name, name_len);
+ if (p >= name_len) {
+ LOGM("Buffer to small (%d) to fit device name", name_len);
+ return -EINVAL;
+ }
+ LOGL("Found mapping [%d]->\"%s\"", id, name);
+
+ return 0;
+}
+
+/**
+ * rmnet_vnd_is_vnd() - Determine if net_device is RmNet owned virtual devices
+ * @dev: Network device to test
+ *
+ * Searches through list of known RmNet virtual devices. This function is O(n)
+ * and should not be used in the data path.
+ *
+ * Return:
+ * - 0 if device is not RmNet virtual device
+ * - 1 if device is RmNet virtual device
+ */
+int rmnet_vnd_is_vnd(struct net_device *dev)
+{
+ /*
+ * This is not an efficient search, but, this will only be called in
+ * a configuration context, and the list is small.
+ */
+ int i;
+
+ if (!dev)
+ BUG();
+
+ for (i = 0; i < RMNET_DATA_MAX_VND; i++)
+ if (dev == rmnet_devices[i])
+ return i+1;
+
+ return 0;
+}
+
+/**
+ * rmnet_vnd_get_le_config() - Get the logical endpoint configuration
+ * @dev: Virtual device node
+ *
+ * Gets the logical endpoint configuration for a RmNet virtual network device
+ * node. Caller should confirm that devices is a RmNet VND before calling.
+ *
+ * Return:
+ * - Pointer to logical endpoint configuration structure
+ * - 0 (null) if dev is null
+ */
+struct rmnet_logical_ep_conf_s *rmnet_vnd_get_le_config(struct net_device *dev)
+{
+ struct rmnet_vnd_private_s *dev_conf;
+ if (!dev)
+ return 0;
+
+ dev_conf = (struct rmnet_vnd_private_s *) netdev_priv(dev);
+ if (!dev_conf)
+ BUG();
+
+ return &dev_conf->local_ep;
+}
+
+/**
+ * _rmnet_vnd_get_flow_map() - Gets object representing a MAP flow handle
+ * @dev_conf: Private configuration structure for virtual network device
+ * @map_flow: MAP flow handle IF
+ *
+ * Loops through available flow mappings and compares the MAP flow handle.
+ * Returns when mapping is found.
+ *
+ * Return:
+ * - Null if no mapping was found
+ * - Pointer to mapping otherwise
+ */
+static struct rmnet_map_flow_mapping_s *_rmnet_vnd_get_flow_map
+ (struct rmnet_vnd_private_s *dev_conf,
+ uint32_t map_flow)
+{
+ struct list_head *p;
+ struct rmnet_map_flow_mapping_s *itm;
+
+ list_for_each(p, &(dev_conf->flow_head)) {
+ itm = list_entry(p, struct rmnet_map_flow_mapping_s, list);
+
+ if (unlikely(!itm))
+ BUG();
+
+ if (itm->map_flow_id == map_flow)
+ return itm;
+ }
+ return 0;
+}
+
+/**
+ * _rmnet_vnd_update_flow_map() - Add or remove individual TC flow handles
+ * @action: One of RMNET_VND_UF_ACTION_ADD / RMNET_VND_UF_ACTION_DEL
+ * @itm: Flow mapping object
+ * @map_flow: TC flow handle
+ *
+ * RMNET_VND_UF_ACTION_ADD:
+ * Will check for a free mapping slot in the mapping object. If one is found,
+ * valid for that slot will be set to 1 and the value will be set.
+ *
+ * RMNET_VND_UF_ACTION_DEL:
+ * Will check for matching tc handle. If found, valid for that slot will be
+ * set to 0 and the value will also be zeroed.
+ *
+ * Return:
+ * - RMNET_VND_UPDATE_FLOW_OK tc flow handle is added/removed ok
+ * - RMNET_VND_UPDATE_FLOW_NO_MORE_ROOM if there are no more tc handles
+ * - RMNET_VND_UPDATE_FLOW_NO_VALID_LEFT if flow mapping is now empty
+ * - RMNET_VND_UPDATE_FLOW_NO_ACTION if no action was taken
+ */
+static int _rmnet_vnd_update_flow_map(uint8_t action,
+ struct rmnet_map_flow_mapping_s *itm,
+ uint32_t tc_flow)
+{
+ int rc, i, j;
+ rc = RMNET_VND_UPDATE_FLOW_OK;
+
+ switch (action) {
+ case RMNET_VND_UF_ACTION_ADD:
+ rc = RMNET_VND_UPDATE_FLOW_NO_MORE_ROOM;
+ for (i = 0; i < RMNET_MAP_FLOW_NUM_TC_HANDLE; i++) {
+ if (itm->tc_flow_valid[i] == 0) {
+ itm->tc_flow_valid[i] = 1;
+ itm->tc_flow_id[i] = tc_flow;
+ rc = RMNET_VND_UPDATE_FLOW_OK;
+ LOGD("{%pK}->tc_flow_id[%d]=%08X",
+ itm, i, tc_flow);
+ break;
+ }
+ }
+ break;
+
+ case RMNET_VND_UF_ACTION_DEL:
+ j = 0;
+ rc = RMNET_VND_UPDATE_FLOW_OK;
+ for (i = 0; i < RMNET_MAP_FLOW_NUM_TC_HANDLE; i++) {
+ if (itm->tc_flow_valid[i] == 1) {
+ if (itm->tc_flow_id[i] == tc_flow) {
+ itm->tc_flow_valid[i] = 0;
+ itm->tc_flow_id[i] = 0;
+ j++;
+ LOGD("{%pK}->tc_flow_id[%d]=0", itm, i);
+ }
+ } else {
+ j++;
+ }
+ }
+ if (j == RMNET_MAP_FLOW_NUM_TC_HANDLE)
+ rc = RMNET_VND_UPDATE_FLOW_NO_VALID_LEFT;
+ break;
+
+ default:
+ rc = RMNET_VND_UPDATE_FLOW_NO_ACTION;
+ break;
+ }
+ return rc;
+}
+
+/**
+ * rmnet_vnd_add_tc_flow() - Add a MAP/TC flow handle mapping
+ * @id: Virtual network device ID
+ * @map_flow: MAP flow handle
+ * @tc_flow: TC flow handle
+ *
+ * Checkes for an existing flow mapping object corresponding to map_flow. If one
+ * is found, then it will try to add to the existing mapping object. Otherwise,
+ * a new mapping object is created.
+ *
+ * Return:
+ * - RMNET_CONFIG_OK if successful
+ * - RMNET_CONFIG_TC_HANDLE_FULL if there is no more room in the map object
+ * - RMNET_CONFIG_NOMEM failed to allocate a new map object
+ */
+int rmnet_vnd_add_tc_flow(uint32_t id, uint32_t map_flow, uint32_t tc_flow)
+{
+ struct rmnet_map_flow_mapping_s *itm;
+ struct net_device *dev;
+ struct rmnet_vnd_private_s *dev_conf;
+ int r;
+ unsigned long flags;
+
+ if ((id < 0) || (id >= RMNET_DATA_MAX_VND) || !rmnet_devices[id]) {
+ LOGM("Invalid VND id [%d]", id);
+ return RMNET_CONFIG_NO_SUCH_DEVICE;
+ }
+
+ dev = rmnet_devices[id];
+ dev_conf = (struct rmnet_vnd_private_s *) netdev_priv(dev);
+
+ if (!dev_conf)
+ BUG();
+
+ write_lock_irqsave(&dev_conf->flow_map_lock, flags);
+ itm = _rmnet_vnd_get_flow_map(dev_conf, map_flow);
+ if (itm) {
+ r = _rmnet_vnd_update_flow_map(RMNET_VND_UF_ACTION_ADD,
+ itm, tc_flow);
+ if (r != RMNET_VND_UPDATE_FLOW_OK) {
+ write_unlock_irqrestore(&dev_conf->flow_map_lock,
+ flags);
+ return RMNET_CONFIG_TC_HANDLE_FULL;
+ }
+ write_unlock_irqrestore(&dev_conf->flow_map_lock, flags);
+ return RMNET_CONFIG_OK;
+ }
+ write_unlock_irqrestore(&dev_conf->flow_map_lock, flags);
+
+ itm = kmalloc(sizeof(*itm), GFP_KERNEL);
+
+ if (!itm) {
+ LOGM("%s", "Failure allocating flow mapping");
+ return RMNET_CONFIG_NOMEM;
+ }
+ memset(itm, 0, sizeof(struct rmnet_map_flow_mapping_s));
+
+ itm->map_flow_id = map_flow;
+ itm->tc_flow_valid[0] = 1;
+ itm->tc_flow_id[0] = tc_flow;
+
+ /* How can we dynamically init these safely? Kernel only provides static
+ * initializers for atomic_t
+ */
+ itm->v4_seq.counter = 0; /* Init is broken: ATOMIC_INIT(0); */
+ itm->v6_seq.counter = 0; /* Init is broken: ATOMIC_INIT(0); */
+
+ write_lock_irqsave(&dev_conf->flow_map_lock, flags);
+ list_add(&(itm->list), &(dev_conf->flow_head));
+ write_unlock_irqrestore(&dev_conf->flow_map_lock, flags);
+
+ LOGD("Created flow mapping [%s][0x%08X][0x%08X]@%pK",
+ dev->name, itm->map_flow_id, itm->tc_flow_id[0], itm);
+
+ return RMNET_CONFIG_OK;
+}
+
+/**
+ * rmnet_vnd_del_tc_flow() - Delete a MAP/TC flow handle mapping
+ * @id: Virtual network device ID
+ * @map_flow: MAP flow handle
+ * @tc_flow: TC flow handle
+ *
+ * Checkes for an existing flow mapping object corresponding to map_flow. If one
+ * is found, then it will try to remove the existing tc_flow mapping. If the
+ * mapping object no longer contains any mappings, then it is freed. Otherwise
+ * the mapping object is left in the list
+ *
+ * Return:
+ * - RMNET_CONFIG_OK if successful or if there was no such tc_flow
+ * - RMNET_CONFIG_INVALID_REQUEST if there is no such map_flow
+ */
+int rmnet_vnd_del_tc_flow(uint32_t id, uint32_t map_flow, uint32_t tc_flow)
+{
+ struct rmnet_vnd_private_s *dev_conf;
+ struct net_device *dev;
+ struct rmnet_map_flow_mapping_s *itm;
+ int r;
+ unsigned long flags;
+ int rc = RMNET_CONFIG_OK;
+
+ if ((id < 0) || (id >= RMNET_DATA_MAX_VND) || !rmnet_devices[id]) {
+ LOGM("Invalid VND id [%d]", id);
+ return RMNET_CONFIG_NO_SUCH_DEVICE;
+ }
+
+ dev = rmnet_devices[id];
+ dev_conf = (struct rmnet_vnd_private_s *) netdev_priv(dev);
+
+ if (!dev_conf)
+ BUG();
+
+ r = RMNET_VND_UPDATE_FLOW_NO_ACTION;
+ write_lock_irqsave(&dev_conf->flow_map_lock, flags);
+ itm = _rmnet_vnd_get_flow_map(dev_conf, map_flow);
+ if (!itm) {
+ rc = RMNET_CONFIG_INVALID_REQUEST;
+ } else {
+ r = _rmnet_vnd_update_flow_map(RMNET_VND_UF_ACTION_DEL,
+ itm, tc_flow);
+ if (r == RMNET_VND_UPDATE_FLOW_NO_VALID_LEFT)
+ list_del(&(itm->list));
+ }
+ write_unlock_irqrestore(&dev_conf->flow_map_lock, flags);
+
+ if (r == RMNET_VND_UPDATE_FLOW_NO_VALID_LEFT) {
+ if (itm)
+ LOGD("Removed flow mapping [%s][0x%08X]@%pK",
+ dev->name, itm->map_flow_id, itm);
+ kfree(itm);
+ }
+
+ return rc;
+}
+
+/**
+ * rmnet_vnd_do_flow_control() - Process flow control request
+ * @dev: Virtual network device node to do lookup on
+ * @map_flow_id: Flow ID from MAP message
+ * @v4_seq: pointer to IPv4 indication sequence number
+ * @v6_seq: pointer to IPv6 indication sequence number
+ * @enable: boolean to enable/disable flow.
+ *
+ * Return:
+ * - 0 if successful
+ * - 1 if no mapping is found
+ * - 2 if dev is not RmNet virtual network device node
+ */
+int rmnet_vnd_do_flow_control(struct net_device *dev,
+ uint32_t map_flow_id,
+ uint16_t v4_seq,
+ uint16_t v6_seq,
+ int enable)
+{
+ struct rmnet_vnd_private_s *dev_conf;
+ struct rmnet_map_flow_mapping_s *itm;
+ int do_fc, error, i;
+ error = 0;
+ do_fc = 0;
+
+ if (unlikely(!dev))
+ BUG();
+
+ if (!rmnet_vnd_is_vnd(dev)) {
+ return 2;
+ } else {
+ dev_conf = (struct rmnet_vnd_private_s *) netdev_priv(dev);
+ }
+
+ if (unlikely(!dev_conf))
+ BUG();
+
+ read_lock(&dev_conf->flow_map_lock);
+ if (map_flow_id == 0xFFFFFFFF) {
+ itm = &(dev_conf->root_flow);
+ goto nolookup;
+ }
+
+ itm = _rmnet_vnd_get_flow_map(dev_conf, map_flow_id);
+
+ if (!itm) {
+ LOGL("Got flow control request for unknown flow %08X",
+ map_flow_id);
+ goto fcdone;
+ }
+
+nolookup:
+ if (v4_seq == 0 || v4_seq >= atomic_read(&(itm->v4_seq))) {
+ atomic_set(&(itm->v4_seq), v4_seq);
+ if (map_flow_id == 0xFFFFFFFF) {
+ LOGD("Setting VND TX queue state to %d", enable);
+ /* Although we expect similar number of enable/disable
+ * commands, optimize for the disable. That is more
+ * latency sensitive than enable
+ */
+ if (unlikely(enable))
+ netif_wake_queue(dev);
+ else
+ netif_stop_queue(dev);
+ trace_rmnet_fc_map(0xFFFFFFFF, 0, enable);
+ goto fcdone;
+ }
+ for (i = 0; i < RMNET_MAP_FLOW_NUM_TC_HANDLE; i++) {
+ if (itm->tc_flow_valid[i] == 1) {
+ LOGD("Found [%s][0x%08X][%d:0x%08X]",
+ dev->name, itm->map_flow_id, i,
+ itm->tc_flow_id[i]);
+
+ _rmnet_vnd_do_flow_control(dev,
+ itm->tc_flow_id[i],
+ enable);
+ }
+ }
+ } else {
+ LOGD("Internal seq(%hd) higher than called(%hd)",
+ atomic_read(&(itm->v4_seq)), v4_seq);
+ }
+
+fcdone:
+ read_unlock(&dev_conf->flow_map_lock);
+
+ return error;
+}
+
+/**
+ * rmnet_vnd_get_by_id() - Get VND by array index ID
+ * @id: Virtual network deice id [0:RMNET_DATA_MAX_VND]
+ *
+ * Return:
+ * - 0 if no device or ID out of range
+ * - otherwise return pointer to VND net_device struct
+ */
+struct net_device *rmnet_vnd_get_by_id(int id)
+{
+ if (id < 0 || id >= RMNET_DATA_MAX_VND) {
+ pr_err("Bug; VND ID out of bounds");
+ BUG();
+ return 0;
+ }
+ return rmnet_devices[id];
+}
diff --git a/net/rmnet_data/rmnet_data_vnd.h b/net/rmnet_data/rmnet_data_vnd.h
new file mode 100644
index 000000000000..22ffcfc2e08e
--- /dev/null
+++ b/net/rmnet_data/rmnet_data_vnd.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * RMNET Data Virtual Network Device APIs
+ *
+ */
+
+#include <linux/types.h>
+
+#ifndef _RMNET_DATA_VND_H_
+#define _RMNET_DATA_VND_H_
+
+int rmnet_vnd_do_flow_control(struct net_device *dev,
+ uint32_t map_flow_id,
+ uint16_t v4_seq,
+ uint16_t v6_seq,
+ int enable);
+struct rmnet_logical_ep_conf_s *rmnet_vnd_get_le_config(struct net_device *dev);
+int rmnet_vnd_get_name(int id, char *name, int name_len);
+int rmnet_vnd_create_dev(int id, struct net_device **new_device,
+ const char *prefix);
+int rmnet_vnd_free_dev(int id);
+int rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev);
+int rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev);
+int rmnet_vnd_is_vnd(struct net_device *dev);
+int rmnet_vnd_add_tc_flow(uint32_t id, uint32_t map_flow, uint32_t tc_flow);
+int rmnet_vnd_del_tc_flow(uint32_t id, uint32_t map_flow, uint32_t tc_flow);
+int rmnet_vnd_init(void);
+void rmnet_vnd_exit(void);
+struct net_device *rmnet_vnd_get_by_id(int id);
+
+#endif /* _RMNET_DATA_VND_H_ */
diff --git a/net/rmnet_data/rmnet_map.h b/net/rmnet_data/rmnet_map.h
new file mode 100644
index 000000000000..71abca122dd6
--- /dev/null
+++ b/net/rmnet_data/rmnet_map.h
@@ -0,0 +1,150 @@
+/*
+ * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <net/rmnet_config.h>
+
+#ifndef _RMNET_MAP_H_
+#define _RMNET_MAP_H_
+
+struct rmnet_map_control_command_s {
+ uint8_t command_name;
+#ifndef RMNET_USE_BIG_ENDIAN_STRUCTS
+ uint8_t cmd_type:2;
+ uint8_t reserved:6;
+#else
+ uint8_t reserved:6;
+ uint8_t cmd_type:2;
+#endif /* RMNET_USE_BIG_ENDIAN_STRUCTS */
+ uint16_t reserved2;
+ uint32_t transaction_id;
+ union {
+ uint8_t data[65528];
+ struct {
+#ifndef RMNET_USE_BIG_ENDIAN_STRUCTS
+ uint16_t ip_family:2;
+ uint16_t reserved:14;
+#else
+ uint16_t reserved:14;
+ uint16_t ip_family:2;
+#endif /* RMNET_USE_BIG_ENDIAN_STRUCTS */
+ uint16_t flow_control_seq_num;
+ uint32_t qos_id;
+ } flow_control;
+ };
+} __aligned(1);
+
+struct rmnet_map_dl_checksum_trailer_s {
+ unsigned char reserved_h;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ unsigned char valid:1;
+ unsigned char reserved_l:7;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ unsigned char reserved_l:7;
+ unsigned char valid:1;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ unsigned short checksum_start_offset;
+ unsigned short checksum_length;
+ unsigned short checksum_value;
+} __aligned(1);
+
+struct rmnet_map_ul_checksum_header_s {
+ unsigned short checksum_start_offset;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ unsigned short checksum_insert_offset:14;
+ unsigned short udp_ip4_ind:1;
+ unsigned short cks_en:1;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ unsigned short cks_en:1;
+ unsigned short udp_ip4_ind:1;
+ unsigned short checksum_insert_offset:14;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+} __aligned(1);
+
+enum rmnet_map_results_e {
+ RMNET_MAP_SUCCESS,
+ RMNET_MAP_CONSUMED,
+ RMNET_MAP_GENERAL_FAILURE,
+ RMNET_MAP_NOT_ENABLED,
+ RMNET_MAP_FAILED_AGGREGATION,
+ RMNET_MAP_FAILED_MUX
+};
+
+enum rmnet_map_mux_errors_e {
+ RMNET_MAP_MUX_SUCCESS,
+ RMNET_MAP_MUX_INVALID_MUX_ID,
+ RMNET_MAP_MUX_INVALID_PAD_LENGTH,
+ RMNET_MAP_MUX_INVALID_PKT_LENGTH,
+ /* This should always be the last element */
+ RMNET_MAP_MUX_ENUM_LENGTH
+};
+
+enum rmnet_map_checksum_errors_e {
+ RMNET_MAP_CHECKSUM_OK,
+ RMNET_MAP_CHECKSUM_VALID_FLAG_NOT_SET,
+ RMNET_MAP_CHECKSUM_VALIDATION_FAILED,
+ RMNET_MAP_CHECKSUM_ERR_UNKOWN,
+ RMNET_MAP_CHECKSUM_ERR_NOT_DATA_PACKET,
+ RMNET_MAP_CHECKSUM_ERR_BAD_BUFFER,
+ RMNET_MAP_CHECKSUM_ERR_UNKNOWN_IP_VERSION,
+ RMNET_MAP_CHECKSUM_ERR_UNKNOWN_TRANSPORT,
+ RMNET_MAP_CHECKSUM_FRAGMENTED_PACKET,
+ RMNET_MAP_CHECKSUM_SKIPPED,
+ RMNET_MAP_CHECKSUM_SW,
+ /* This should always be the last element */
+ RMNET_MAP_CHECKSUM_ENUM_LENGTH
+};
+
+enum rmnet_map_commands_e {
+ RMNET_MAP_COMMAND_NONE,
+ RMNET_MAP_COMMAND_FLOW_DISABLE,
+ RMNET_MAP_COMMAND_FLOW_ENABLE,
+ /* These should always be the last 2 elements */
+ RMNET_MAP_COMMAND_UNKNOWN,
+ RMNET_MAP_COMMAND_ENUM_LENGTH
+};
+
+enum rmnet_map_agg_state_e {
+ RMNET_MAP_AGG_IDLE,
+ RMNET_MAP_TXFER_SCHEDULED
+};
+
+#define RMNET_MAP_COMMAND_REQUEST 0
+#define RMNET_MAP_COMMAND_ACK 1
+#define RMNET_MAP_COMMAND_UNSUPPORTED 2
+#define RMNET_MAP_COMMAND_INVALID 3
+
+#define RMNET_MAP_NO_PAD_BYTES 0
+#define RMNET_MAP_ADD_PAD_BYTES 1
+
+uint8_t rmnet_map_demultiplex(struct sk_buff *skb);
+struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
+ struct rmnet_phys_ep_config *config);
+
+struct rmnet_map_header_s *rmnet_map_add_map_header(struct sk_buff *skb,
+ int hdrlen, int pad);
+rx_handler_result_t rmnet_map_command(struct sk_buff *skb,
+ struct rmnet_phys_ep_config *config);
+void rmnet_map_aggregate(struct sk_buff *skb,
+ struct rmnet_phys_ep_config *config);
+
+int rmnet_map_checksum_downlink_packet(struct sk_buff *skb);
+int rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
+ struct net_device *orig_dev, uint32_t egress_data_format);
+
+#endif /* _RMNET_MAP_H_ */
diff --git a/net/rmnet_data/rmnet_map_command.c b/net/rmnet_data/rmnet_map_command.c
new file mode 100644
index 000000000000..9dac2b27d4c3
--- /dev/null
+++ b/net/rmnet_data/rmnet_map_command.c
@@ -0,0 +1,210 @@
+/*
+ * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/rmnet_data.h>
+#include <linux/net_map.h>
+#include <net/pkt_sched.h>
+#include <net/rmnet_config.h>
+#include "rmnet_data_config.h"
+#include "rmnet_map.h"
+#include "rmnet_data_private.h"
+#include "rmnet_data_vnd.h"
+#include "rmnet_data_stats.h"
+
+RMNET_LOG_MODULE(RMNET_DATA_LOGMASK_MAPC);
+
+unsigned long int rmnet_map_command_stats[RMNET_MAP_COMMAND_ENUM_LENGTH];
+module_param_array(rmnet_map_command_stats, ulong, 0, S_IRUGO);
+MODULE_PARM_DESC(rmnet_map_command_stats, "MAP command statistics");
+
+/**
+ * rmnet_map_do_flow_control() - Process MAP flow control command
+ * @skb: Socket buffer containing the MAP flow control message
+ * @config: Physical end-point configuration of ingress device
+ * @enable: boolean for enable/disable
+ *
+ * Process in-band MAP flow control messages. Assumes mux ID is mapped to a
+ * RmNet Data vitrual network device.
+ *
+ * Return:
+ * - RMNET_MAP_COMMAND_UNSUPPORTED on any error
+ * - RMNET_MAP_COMMAND_ACK on success
+ */
+static uint8_t rmnet_map_do_flow_control(struct sk_buff *skb,
+ struct rmnet_phys_ep_config *config,
+ int enable)
+{
+ struct rmnet_map_control_command_s *cmd;
+ struct net_device *vnd;
+ struct rmnet_logical_ep_conf_s *ep;
+ uint8_t mux_id;
+ uint16_t ip_family;
+ uint16_t fc_seq;
+ uint32_t qos_id;
+ int r;
+
+ if (unlikely(!skb || !config))
+ BUG();
+
+ mux_id = RMNET_MAP_GET_MUX_ID(skb);
+ cmd = RMNET_MAP_GET_CMD_START(skb);
+
+ if (mux_id >= RMNET_DATA_MAX_LOGICAL_EP) {
+ LOGD("Got packet on %s with bad mux id %d",
+ skb->dev->name, mux_id);
+ rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_MAPC_BAD_MUX);
+ return RX_HANDLER_CONSUMED;
+ }
+
+ ep = &(config->muxed_ep[mux_id]);
+
+ if (!ep->refcount) {
+ LOGD("Packet on %s:%d; has no logical endpoint config",
+ skb->dev->name, mux_id);
+
+ rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_MAPC_MUX_NO_EP);
+ return RX_HANDLER_CONSUMED;
+ }
+
+ vnd = ep->egress_dev;
+
+ ip_family = cmd->flow_control.ip_family;
+ fc_seq = ntohs(cmd->flow_control.flow_control_seq_num);
+ qos_id = ntohl(cmd->flow_control.qos_id);
+
+ /* Ignore the ip family and pass the sequence number for both v4 and v6
+ * sequence. User space does not support creating dedicated flows for
+ * the 2 protocols
+ */
+ r = rmnet_vnd_do_flow_control(vnd, qos_id, fc_seq, fc_seq, enable);
+ LOGD("dev:%s, qos_id:0x%08X, ip_family:%hd, fc_seq %hd, en:%d",
+ skb->dev->name, qos_id, ip_family & 3, fc_seq, enable);
+
+ if (r) {
+ rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_MAPC_UNSUPPORTED);
+ return RMNET_MAP_COMMAND_UNSUPPORTED;
+ } else {
+ return RMNET_MAP_COMMAND_ACK;
+ }
+}
+
+/**
+ * rmnet_map_send_ack() - Send N/ACK message for MAP commands
+ * @skb: Socket buffer containing the MAP command message
+ * @type: N/ACK message selector
+ * @config: Physical end-point configuration of ingress device
+ *
+ * skb is modified to contain the message type selector. The message is then
+ * transmitted on skb->dev. Note that this function grabs global Tx lock on
+ * skb->dev for latency reasons.
+ *
+ * Return:
+ * - void
+ */
+static void rmnet_map_send_ack(struct sk_buff *skb,
+ unsigned char type,
+ struct rmnet_phys_ep_config *config)
+{
+ struct rmnet_map_control_command_s *cmd;
+ int xmit_status;
+ int rc;
+
+ if (unlikely(!skb))
+ BUG();
+
+ skb->protocol = htons(ETH_P_MAP);
+
+ if ((config->ingress_data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV3) ||
+ (config->ingress_data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV4)) {
+ if (unlikely(skb->len < (sizeof(struct rmnet_map_header_s) +
+ + RMNET_MAP_GET_LENGTH(skb)
+ + sizeof(struct rmnet_map_dl_checksum_trailer_s)))) {
+ rmnet_stats_dl_checksum(
+ RMNET_MAP_CHECKSUM_ERR_BAD_BUFFER);
+ return;
+ }
+
+ skb_trim(skb, skb->len -
+ sizeof(struct rmnet_map_dl_checksum_trailer_s));
+ }
+
+ cmd = RMNET_MAP_GET_CMD_START(skb);
+ cmd->cmd_type = type & 0x03;
+
+ netif_tx_lock(skb->dev);
+ xmit_status = skb->dev->netdev_ops->ndo_start_xmit(skb, skb->dev);
+ netif_tx_unlock(skb->dev);
+
+ LOGD("MAP command ACK=%hhu sent with rc: %d", type & 0x03, xmit_status);
+
+ if (xmit_status != NETDEV_TX_OK) {
+ rc = dev_queue_xmit(skb);
+ if (rc != 0) {
+ LOGD("Failed to queue packet for transmission on [%s]",
+ skb->dev->name);
+ }
+ }
+
+}
+
+/**
+ * rmnet_map_command() - Entry point for handling MAP commands
+ * @skb: Socket buffer containing the MAP command message
+ * @config: Physical end-point configuration of ingress device
+ *
+ * Process MAP command frame and send N/ACK message as appropriate. Message cmd
+ * name is decoded here and appropriate handler is called.
+ *
+ * Return:
+ * - RX_HANDLER_CONSUMED. Command frames are always consumed.
+ */
+rx_handler_result_t rmnet_map_command(struct sk_buff *skb,
+ struct rmnet_phys_ep_config *config)
+{
+ struct rmnet_map_control_command_s *cmd;
+ unsigned char command_name;
+ unsigned char rc = 0;
+
+ if (unlikely(!skb))
+ BUG();
+
+ cmd = RMNET_MAP_GET_CMD_START(skb);
+ command_name = cmd->command_name;
+
+ if (command_name < RMNET_MAP_COMMAND_ENUM_LENGTH)
+ rmnet_map_command_stats[command_name]++;
+
+ switch (command_name) {
+ case RMNET_MAP_COMMAND_FLOW_ENABLE:
+ rc = rmnet_map_do_flow_control(skb, config, 1);
+ break;
+
+ case RMNET_MAP_COMMAND_FLOW_DISABLE:
+ rc = rmnet_map_do_flow_control(skb, config, 0);
+ break;
+
+ default:
+ rmnet_map_command_stats[RMNET_MAP_COMMAND_UNKNOWN]++;
+ LOGM("Uknown MAP command: %d", command_name);
+ rc = RMNET_MAP_COMMAND_UNSUPPORTED;
+ rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_MAPC_UNSUPPORTED);
+ break;
+ }
+ if (rc == RMNET_MAP_COMMAND_ACK)
+ rmnet_map_send_ack(skb, rc, config);
+ return RX_HANDLER_CONSUMED;
+}
diff --git a/net/rmnet_data/rmnet_map_data.c b/net/rmnet_data/rmnet_map_data.c
new file mode 100644
index 000000000000..beff8332c731
--- /dev/null
+++ b/net/rmnet_data/rmnet_map_data.c
@@ -0,0 +1,757 @@
+/*
+ * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * RMNET Data MAP protocol
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/rmnet_data.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/time.h>
+#include <linux/net_map.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/udp.h>
+#include <linux/tcp.h>
+#include <linux/in.h>
+#include <net/ip.h>
+#include <net/checksum.h>
+#include <net/ip6_checksum.h>
+#include <net/rmnet_config.h>
+#include "rmnet_data_config.h"
+#include "rmnet_map.h"
+#include "rmnet_data_private.h"
+#include "rmnet_data_stats.h"
+#include "rmnet_data_trace.h"
+
+RMNET_LOG_MODULE(RMNET_DATA_LOGMASK_MAPD);
+
+/* ***************** Local Definitions ************************************** */
+
+long agg_time_limit __read_mostly = 1000000L;
+module_param(agg_time_limit, long, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(agg_time_limit, "Maximum time packets sit in the agg buf");
+
+long agg_bypass_time __read_mostly = 10000000L;
+module_param(agg_bypass_time, long, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(agg_bypass_time, "Skip agg when apart spaced more than this");
+
+
+struct agg_work {
+ struct delayed_work work;
+ struct rmnet_phys_ep_config *config;
+};
+
+#define RMNET_MAP_DEAGGR_SPACING 64
+#define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING/2)
+/******************************************************************************/
+
+/**
+ * rmnet_map_add_map_header() - Adds MAP header to front of skb->data
+ * @skb: Socket buffer ("packet") to modify
+ * @hdrlen: Number of bytes of header data which should not be included in
+ * MAP length field
+ * @pad: Specify if padding the MAP packet to make it 4 byte aligned is
+ * necessary
+ *
+ * Padding is calculated and set appropriately in MAP header. Mux ID is
+ * initialized to 0.
+ *
+ * Return:
+ * - Pointer to MAP structure
+ * - 0 (null) if insufficient headroom
+ * - 0 (null) if insufficient tailroom for padding bytes
+ *
+ * todo: Parameterize skb alignment
+ */
+struct rmnet_map_header_s *rmnet_map_add_map_header(struct sk_buff *skb,
+ int hdrlen, int pad)
+{
+ uint32_t padding, map_datalen;
+ uint8_t *padbytes;
+ struct rmnet_map_header_s *map_header;
+
+ if (skb_headroom(skb) < sizeof(struct rmnet_map_header_s))
+ return 0;
+
+ map_datalen = skb->len - hdrlen;
+ map_header = (struct rmnet_map_header_s *)
+ skb_push(skb, sizeof(struct rmnet_map_header_s));
+ memset(map_header, 0, sizeof(struct rmnet_map_header_s));
+
+ if (pad == RMNET_MAP_NO_PAD_BYTES) {
+ map_header->pkt_len = htons(map_datalen);
+ return map_header;
+ }
+
+ padding = ALIGN(map_datalen, 4) - map_datalen;
+
+ if (padding == 0)
+ goto done;
+
+ if (skb_tailroom(skb) < padding)
+ return 0;
+
+ padbytes = (uint8_t *) skb_put(skb, padding);
+ LOGD("pad: %d", padding);
+ memset(padbytes, 0, padding);
+
+done:
+ map_header->pkt_len = htons(map_datalen + padding);
+ map_header->pad_len = padding&0x3F;
+
+ return map_header;
+}
+
+/**
+ * rmnet_map_deaggregate() - Deaggregates a single packet
+ * @skb: Source socket buffer containing multiple MAP frames
+ * @config: Physical endpoint configuration of the ingress device
+ *
+ * A whole new buffer is allocated for each portion of an aggregated frame.
+ * Caller should keep calling deaggregate() on the source skb until 0 is
+ * returned, indicating that there are no more packets to deaggregate. Caller
+ * is responsible for freeing the original skb.
+ *
+ * Return:
+ * - Pointer to new skb
+ * - 0 (null) if no more aggregated packets
+ */
+struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
+ struct rmnet_phys_ep_config *config)
+{
+ struct sk_buff *skbn;
+ struct rmnet_map_header_s *maph;
+ uint32_t packet_len;
+
+ if (skb->len == 0)
+ return 0;
+
+ maph = (struct rmnet_map_header_s *) skb->data;
+ packet_len = ntohs(maph->pkt_len) + sizeof(struct rmnet_map_header_s);
+
+ if ((config->ingress_data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV3) ||
+ (config->ingress_data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV4))
+ packet_len += sizeof(struct rmnet_map_dl_checksum_trailer_s);
+
+ if ((((int)skb->len) - ((int)packet_len)) < 0) {
+ LOGM("%s", "Got malformed packet. Dropping");
+ return 0;
+ }
+
+ skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING, GFP_ATOMIC);
+ if (!skbn)
+ return 0;
+
+ skbn->dev = skb->dev;
+ skb_reserve(skbn, RMNET_MAP_DEAGGR_HEADROOM);
+ skb_put(skbn, packet_len);
+ memcpy(skbn->data, skb->data, packet_len);
+ skb_pull(skb, packet_len);
+
+
+ /* Some hardware can send us empty frames. Catch them */
+ if (ntohs(maph->pkt_len) == 0) {
+ LOGD("Dropping empty MAP frame");
+ rmnet_kfree_skb(skbn, RMNET_STATS_SKBFREE_DEAGG_DATA_LEN_0);
+ return 0;
+ }
+
+ return skbn;
+}
+
+/**
+ * rmnet_map_flush_packet_queue() - Transmits aggregeted frame on timeout
+ * @work: struct agg_work containing delayed work and skb to flush
+ *
+ * This function is scheduled to run in a specified number of jiffies after
+ * the last frame transmitted by the network stack. When run, the buffer
+ * containing aggregated packets is finally transmitted on the underlying link.
+ *
+ */
+static void rmnet_map_flush_packet_queue(struct work_struct *work)
+{
+ struct agg_work *real_work;
+ struct rmnet_phys_ep_config *config;
+ unsigned long flags;
+ struct sk_buff *skb;
+ int rc, agg_count = 0;
+
+ skb = 0;
+ real_work = (struct agg_work *)work;
+ config = real_work->config;
+ LOGD("%s", "Entering flush thread");
+ spin_lock_irqsave(&config->agg_lock, flags);
+ if (likely(config->agg_state == RMNET_MAP_TXFER_SCHEDULED)) {
+ /* Buffer may have already been shipped out */
+ if (likely(config->agg_skb)) {
+ rmnet_stats_agg_pkts(config->agg_count);
+ if (config->agg_count > 1)
+ LOGL("Agg count: %d", config->agg_count);
+ skb = config->agg_skb;
+ agg_count = config->agg_count;
+ config->agg_skb = 0;
+ config->agg_count = 0;
+ memset(&(config->agg_time), 0, sizeof(struct timespec));
+ }
+ config->agg_state = RMNET_MAP_AGG_IDLE;
+ } else {
+ /* How did we get here? */
+ LOGE("Ran queued command when state %s",
+ "is idle. State machine likely broken");
+ }
+
+ spin_unlock_irqrestore(&config->agg_lock, flags);
+ if (skb) {
+ trace_rmnet_map_flush_packet_queue(skb, agg_count);
+ rc = dev_queue_xmit(skb);
+ rmnet_stats_queue_xmit(rc, RMNET_STATS_QUEUE_XMIT_AGG_TIMEOUT);
+ }
+ kfree(work);
+}
+
+/**
+ * rmnet_map_aggregate() - Software aggregates multiple packets.
+ * @skb: current packet being transmitted
+ * @config: Physical endpoint configuration of the ingress device
+ *
+ * Aggregates multiple SKBs into a single large SKB for transmission. MAP
+ * protocol is used to separate the packets in the buffer. This funcion consumes
+ * the argument SKB and should not be further processed by any other function.
+ */
+void rmnet_map_aggregate(struct sk_buff *skb,
+ struct rmnet_phys_ep_config *config) {
+ uint8_t *dest_buff;
+ struct agg_work *work;
+ unsigned long flags;
+ struct sk_buff *agg_skb;
+ struct timespec diff, last;
+ int size, rc, agg_count = 0;
+
+
+ if (!skb || !config)
+ BUG();
+ size = config->egress_agg_size-skb->len;
+
+ if (size < 2000) {
+ LOGL("Invalid length %d", size);
+ return;
+ }
+
+new_packet:
+ spin_lock_irqsave(&config->agg_lock, flags);
+
+ memcpy(&last, &(config->agg_last), sizeof(struct timespec));
+ getnstimeofday(&(config->agg_last));
+
+ if (!config->agg_skb) {
+ /* Check to see if we should agg first. If the traffic is very
+ * sparse, don't aggregate. We will need to tune this later
+ */
+ diff = timespec_sub(config->agg_last, last);
+
+ if ((diff.tv_sec > 0) || (diff.tv_nsec > agg_bypass_time)) {
+ spin_unlock_irqrestore(&config->agg_lock, flags);
+ LOGL("delta t: %ld.%09lu\tcount: bypass", diff.tv_sec,
+ diff.tv_nsec);
+ rmnet_stats_agg_pkts(1);
+ trace_rmnet_map_aggregate(skb, 0);
+ rc = dev_queue_xmit(skb);
+ rmnet_stats_queue_xmit(rc,
+ RMNET_STATS_QUEUE_XMIT_AGG_SKIP);
+ return;
+ }
+
+ config->agg_skb = skb_copy_expand(skb, 0, size, GFP_ATOMIC);
+ if (!config->agg_skb) {
+ config->agg_skb = 0;
+ config->agg_count = 0;
+ memset(&(config->agg_time), 0, sizeof(struct timespec));
+ spin_unlock_irqrestore(&config->agg_lock, flags);
+ rmnet_stats_agg_pkts(1);
+ trace_rmnet_map_aggregate(skb, 0);
+ rc = dev_queue_xmit(skb);
+ rmnet_stats_queue_xmit(rc,
+ RMNET_STATS_QUEUE_XMIT_AGG_CPY_EXP_FAIL);
+ return;
+ }
+ config->agg_count = 1;
+ getnstimeofday(&(config->agg_time));
+ trace_rmnet_start_aggregation(skb);
+ rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_AGG_CPY_EXPAND);
+ goto schedule;
+ }
+ diff = timespec_sub(config->agg_last, config->agg_time);
+
+ if (skb->len > (config->egress_agg_size - config->agg_skb->len)
+ || (config->agg_count >= config->egress_agg_count)
+ || (diff.tv_sec > 0) || (diff.tv_nsec > agg_time_limit)) {
+ rmnet_stats_agg_pkts(config->agg_count);
+ agg_skb = config->agg_skb;
+ agg_count = config->agg_count;
+ config->agg_skb = 0;
+ config->agg_count = 0;
+ memset(&(config->agg_time), 0, sizeof(struct timespec));
+ spin_unlock_irqrestore(&config->agg_lock, flags);
+ LOGL("delta t: %ld.%09lu\tcount: %d", diff.tv_sec,
+ diff.tv_nsec, agg_count);
+ trace_rmnet_map_aggregate(skb, agg_count);
+ rc = dev_queue_xmit(agg_skb);
+ rmnet_stats_queue_xmit(rc,
+ RMNET_STATS_QUEUE_XMIT_AGG_FILL_BUFFER);
+ goto new_packet;
+ }
+
+ dest_buff = skb_put(config->agg_skb, skb->len);
+ memcpy(dest_buff, skb->data, skb->len);
+ config->agg_count++;
+ rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_AGG_INTO_BUFF);
+
+schedule:
+ if (config->agg_state != RMNET_MAP_TXFER_SCHEDULED) {
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work) {
+ LOGE("Failed to allocate work item for packet %s",
+ "transfer. DATA PATH LIKELY BROKEN!");
+ config->agg_state = RMNET_MAP_AGG_IDLE;
+ spin_unlock_irqrestore(&config->agg_lock, flags);
+ return;
+ }
+ INIT_DELAYED_WORK((struct delayed_work *)work,
+ rmnet_map_flush_packet_queue);
+ work->config = config;
+ config->agg_state = RMNET_MAP_TXFER_SCHEDULED;
+ schedule_delayed_work((struct delayed_work *)work, 1);
+ }
+ spin_unlock_irqrestore(&config->agg_lock, flags);
+ return;
+}
+
+
+/* ***************** Checksum Offload ************************************** */
+
+static inline uint16_t *rmnet_map_get_checksum_field(unsigned char protocol,
+ const void *txporthdr)
+{
+ uint16_t *check = 0;
+ switch (protocol) {
+ case IPPROTO_TCP:
+ check = &(((struct tcphdr *)txporthdr)->check);
+ break;
+
+ case IPPROTO_UDP:
+ check = &(((struct udphdr *)txporthdr)->check);
+ break;
+
+ default:
+ check = 0;
+ break;
+ }
+
+ return check;
+}
+
+static inline uint16_t rmnet_map_add_checksums(uint16_t val1, uint16_t val2)
+{
+ int sum = val1+val2;
+ sum = (((sum&0xFFFF0000)>>16) + sum) & 0x0000FFFF;
+ return (uint16_t) (sum&0x0000FFFF);
+}
+
+static inline uint16_t rmnet_map_subtract_checksums(uint16_t val1,
+ uint16_t val2)
+{
+ return rmnet_map_add_checksums(val1, ~val2);
+}
+
+/**
+ * rmnet_map_validate_ipv4_packet_checksum() - Validates TCP/UDP checksum
+ * value for IPv4 packet
+ * @map_payload: Pointer to the beginning of the map payload
+ * @cksum_trailer: Pointer to the checksum trailer
+ *
+ * Validates the TCP/UDP checksum for the packet using the checksum value
+ * from the checksum trailer added to the packet.
+ * The validation formula is the following:
+ * 1. Performs 1's complement over the checksum value from the trailer
+ * 2. Computes 1's complement checksum over IPv4 header and subtracts it from
+ * the value from step 1
+ * 3. Computes 1's complement checksum over IPv4 pseudo header and adds it to
+ * the value from step 2
+ * 4. Subtracts the checksum value from the TCP/UDP header from the value from
+ * step 3
+ * 5. Compares the value from step 4 to the checksum value from the TCP/UDP
+ * header
+ *
+ * Fragmentation and tunneling are not supported.
+ *
+ * Return: 0 is validation succeeded.
+ */
+static int rmnet_map_validate_ipv4_packet_checksum(unsigned char *map_payload,
+ struct rmnet_map_dl_checksum_trailer_s *cksum_trailer)
+{
+ struct iphdr *ip4h;
+ uint16_t *checksum_field;
+ void *txporthdr;
+ uint16_t pseudo_checksum;
+ uint16_t ip_hdr_checksum;
+ uint16_t checksum_value;
+ uint16_t ip_payload_checksum;
+ uint16_t ip_pseudo_payload_checksum;
+ uint16_t checksum_value_final;
+
+ ip4h = (struct iphdr *) map_payload;
+ if ((ntohs(ip4h->frag_off) & IP_MF)
+ || ((ntohs(ip4h->frag_off) & IP_OFFSET) > 0))
+ return RMNET_MAP_CHECKSUM_FRAGMENTED_PACKET;
+
+ txporthdr = map_payload + ip4h->ihl*4;
+
+ checksum_field = rmnet_map_get_checksum_field(ip4h->protocol,
+ txporthdr);
+
+ if (unlikely(!checksum_field))
+ return RMNET_MAP_CHECKSUM_ERR_UNKNOWN_TRANSPORT;
+
+ /* RFC 768 - Skip IPv4 UDP packets where sender checksum field is 0 */
+ if ((*checksum_field == 0) && (ip4h->protocol == IPPROTO_UDP))
+ return RMNET_MAP_CHECKSUM_SKIPPED;
+
+ checksum_value = ~ntohs(cksum_trailer->checksum_value);
+ ip_hdr_checksum = ~ip_fast_csum(ip4h, (int)ip4h->ihl);
+ ip_payload_checksum = rmnet_map_subtract_checksums(checksum_value,
+ ip_hdr_checksum);
+
+ pseudo_checksum = ~ntohs(csum_tcpudp_magic(ip4h->saddr, ip4h->daddr,
+ (uint16_t)(ntohs(ip4h->tot_len) - ip4h->ihl*4),
+ (uint16_t)ip4h->protocol, 0));
+ ip_pseudo_payload_checksum = rmnet_map_add_checksums(
+ ip_payload_checksum, pseudo_checksum);
+
+ checksum_value_final = ~rmnet_map_subtract_checksums(
+ ip_pseudo_payload_checksum, ntohs(*checksum_field));
+
+ if (unlikely(checksum_value_final == 0)) {
+ switch (ip4h->protocol) {
+ case IPPROTO_UDP:
+ /* RFC 768 */
+ LOGD("DL4 1's complement rule for UDP checksum 0");
+ checksum_value_final = ~checksum_value_final;
+ break;
+
+ case IPPROTO_TCP:
+ if (*checksum_field == 0xFFFF) {
+ LOGD(
+ "DL4 Non-RFC compliant TCP checksum found");
+ checksum_value_final = ~checksum_value_final;
+ }
+ break;
+ }
+ }
+
+ LOGD(
+ "DL4 cksum: ~HW: %04X, field: %04X, pseudo header: %04X, final: %04X",
+ ~ntohs(cksum_trailer->checksum_value), ntohs(*checksum_field),
+ pseudo_checksum, checksum_value_final);
+
+ if (checksum_value_final == ntohs(*checksum_field))
+ return RMNET_MAP_CHECKSUM_OK;
+ else
+ return RMNET_MAP_CHECKSUM_VALIDATION_FAILED;
+}
+
+/**
+ * rmnet_map_validate_ipv6_packet_checksum() - Validates TCP/UDP checksum
+ * value for IPv6 packet
+ * @map_payload: Pointer to the beginning of the map payload
+ * @cksum_trailer: Pointer to the checksum trailer
+ *
+ * Validates the TCP/UDP checksum for the packet using the checksum value
+ * from the checksum trailer added to the packet.
+ * The validation formula is the following:
+ * 1. Performs 1's complement over the checksum value from the trailer
+ * 2. Computes 1's complement checksum over IPv6 header and subtracts it from
+ * the value from step 1
+ * 3. Computes 1's complement checksum over IPv6 pseudo header and adds it to
+ * the value from step 2
+ * 4. Subtracts the checksum value from the TCP/UDP header from the value from
+ * step 3
+ * 5. Compares the value from step 4 to the checksum value from the TCP/UDP
+ * header
+ *
+ * Fragmentation, extension headers and tunneling are not supported.
+ *
+ * Return: 0 is validation succeeded.
+ */
+static int rmnet_map_validate_ipv6_packet_checksum(unsigned char *map_payload,
+ struct rmnet_map_dl_checksum_trailer_s *cksum_trailer)
+{
+ struct ipv6hdr *ip6h;
+ uint16_t *checksum_field;
+ void *txporthdr;
+ uint16_t pseudo_checksum;
+ uint16_t ip_hdr_checksum;
+ uint16_t checksum_value;
+ uint16_t ip_payload_checksum;
+ uint16_t ip_pseudo_payload_checksum;
+ uint16_t checksum_value_final;
+ uint32_t length;
+
+ ip6h = (struct ipv6hdr *) map_payload;
+
+ txporthdr = map_payload + sizeof(struct ipv6hdr);
+ checksum_field = rmnet_map_get_checksum_field(ip6h->nexthdr,
+ txporthdr);
+
+ if (unlikely(!checksum_field))
+ return RMNET_MAP_CHECKSUM_ERR_UNKNOWN_TRANSPORT;
+
+ checksum_value = ~ntohs(cksum_trailer->checksum_value);
+ ip_hdr_checksum = ~ntohs(ip_compute_csum(ip6h,
+ (int)(txporthdr - (void *)map_payload)));
+ ip_payload_checksum = rmnet_map_subtract_checksums(checksum_value,
+ ip_hdr_checksum);
+
+ length = (ip6h->nexthdr == IPPROTO_UDP) ?
+ ntohs(((struct udphdr *)txporthdr)->len) :
+ ntohs(ip6h->payload_len);
+ pseudo_checksum = ~ntohs(csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
+ length, ip6h->nexthdr, 0));
+ ip_pseudo_payload_checksum = rmnet_map_add_checksums(
+ ip_payload_checksum, pseudo_checksum);
+
+ checksum_value_final = ~rmnet_map_subtract_checksums(
+ ip_pseudo_payload_checksum, ntohs(*checksum_field));
+
+ if (unlikely(checksum_value_final == 0)) {
+ switch (ip6h->nexthdr) {
+ case IPPROTO_UDP:
+ /* RFC 2460 section 8.1 */
+ LOGD("DL6 One's complement rule for UDP checksum 0");
+ checksum_value_final = ~checksum_value_final;
+ break;
+
+ case IPPROTO_TCP:
+ if (*checksum_field == 0xFFFF) {
+ LOGD(
+ "DL6 Non-RFC compliant TCP checksum found");
+ checksum_value_final = ~checksum_value_final;
+ }
+ break;
+ }
+ }
+
+ LOGD(
+ "DL6 cksum: ~HW: %04X, field: %04X, pseudo header: %04X, final: %04X",
+ ~ntohs(cksum_trailer->checksum_value), ntohs(*checksum_field),
+ pseudo_checksum, checksum_value_final);
+
+ if (checksum_value_final == ntohs(*checksum_field))
+ return RMNET_MAP_CHECKSUM_OK;
+ else
+ return RMNET_MAP_CHECKSUM_VALIDATION_FAILED;
+ }
+
+/**
+ * rmnet_map_checksum_downlink_packet() - Validates checksum on
+ * a downlink packet
+ * @skb: Pointer to the packet's skb.
+ *
+ * Validates packet checksums. Function takes a pointer to
+ * the beginning of a buffer which contains the entire MAP
+ * frame: MAP header + IP payload + padding + checksum trailer.
+ * Currently, only IPv4 and IPv6 are supported along with
+ * TCP & UDP. Fragmented or tunneled packets are not supported.
+ *
+ * Return:
+ * - RMNET_MAP_CHECKSUM_OK: Validation of checksum succeeded.
+ * - RMNET_MAP_CHECKSUM_ERR_BAD_BUFFER: Skb buffer given is corrupted.
+ * - RMNET_MAP_CHECKSUM_VALID_FLAG_NOT_SET: Valid flag is not set in the
+ * checksum trailer.
+ * - RMNET_MAP_CHECKSUM_FRAGMENTED_PACKET: The packet is a fragment.
+ * - RMNET_MAP_CHECKSUM_ERR_UNKNOWN_TRANSPORT: The transport header is
+ * not TCP/UDP.
+ * - RMNET_MAP_CHECKSUM_ERR_UNKNOWN_IP_VERSION: Unrecognized IP header.
+ * - RMNET_MAP_CHECKSUM_VALIDATION_FAILED: In case the validation failed.
+ */
+int rmnet_map_checksum_downlink_packet(struct sk_buff *skb)
+{
+ struct rmnet_map_dl_checksum_trailer_s *cksum_trailer;
+ unsigned int data_len;
+ unsigned char *map_payload;
+ unsigned char ip_version;
+
+ data_len = RMNET_MAP_GET_LENGTH(skb);
+
+ if (unlikely(skb->len < (sizeof(struct rmnet_map_header_s) + data_len +
+ sizeof(struct rmnet_map_dl_checksum_trailer_s))))
+ return RMNET_MAP_CHECKSUM_ERR_BAD_BUFFER;
+
+ cksum_trailer = (struct rmnet_map_dl_checksum_trailer_s *)
+ (skb->data + data_len
+ + sizeof(struct rmnet_map_header_s));
+
+ if (unlikely(!ntohs(cksum_trailer->valid)))
+ return RMNET_MAP_CHECKSUM_VALID_FLAG_NOT_SET;
+
+ map_payload = (unsigned char *)(skb->data
+ + sizeof(struct rmnet_map_header_s));
+
+ ip_version = (*map_payload & 0xF0) >> 4;
+ if (ip_version == 0x04)
+ return rmnet_map_validate_ipv4_packet_checksum(map_payload,
+ cksum_trailer);
+ else if (ip_version == 0x06)
+ return rmnet_map_validate_ipv6_packet_checksum(map_payload,
+ cksum_trailer);
+
+ return RMNET_MAP_CHECKSUM_ERR_UNKNOWN_IP_VERSION;
+}
+
+static void rmnet_map_fill_ipv4_packet_ul_checksum_header(void *iphdr,
+ struct rmnet_map_ul_checksum_header_s *ul_header, struct sk_buff *skb)
+{
+ struct iphdr *ip4h = (struct iphdr *)iphdr;
+ unsigned short *hdr = (unsigned short *)ul_header;
+
+ ul_header->checksum_start_offset = htons((unsigned short)
+ (skb_transport_header(skb) - (unsigned char *)iphdr));
+ ul_header->checksum_insert_offset = skb->csum_offset;
+ ul_header->cks_en = 1;
+ if (ip4h->protocol == IPPROTO_UDP)
+ ul_header->udp_ip4_ind = 1;
+ else
+ ul_header->udp_ip4_ind = 0;
+ /* Changing checksum_insert_offset to network order */
+ hdr++;
+ *hdr = htons(*hdr);
+ skb->ip_summed = CHECKSUM_NONE;
+}
+
+static void rmnet_map_fill_ipv6_packet_ul_checksum_header(void *iphdr,
+ struct rmnet_map_ul_checksum_header_s *ul_header, struct sk_buff *skb)
+{
+ unsigned short *hdr = (unsigned short *)ul_header;
+
+ ul_header->checksum_start_offset = htons((unsigned short)
+ (skb_transport_header(skb) - (unsigned char *)iphdr));
+ ul_header->checksum_insert_offset = skb->csum_offset;
+ ul_header->cks_en = 1;
+ ul_header->udp_ip4_ind = 0;
+ /* Changing checksum_insert_offset to network order */
+ hdr++;
+ *hdr = htons(*hdr);
+ skb->ip_summed = CHECKSUM_NONE;
+}
+
+static void rmnet_map_complement_ipv4_txporthdr_csum_field(void *iphdr)
+{
+ struct iphdr *ip4h = (struct iphdr *)iphdr;
+ void *txporthdr;
+ uint16_t *csum;
+
+ txporthdr = iphdr + ip4h->ihl*4;
+
+ if ((ip4h->protocol == IPPROTO_TCP) ||
+ (ip4h->protocol == IPPROTO_UDP)) {
+ csum = (uint16_t *)rmnet_map_get_checksum_field(ip4h->protocol,
+ txporthdr);
+ *csum = ~(*csum);
+ }
+}
+
+static void rmnet_map_complement_ipv6_txporthdr_csum_field(void *ip6hdr)
+{
+ struct ipv6hdr *ip6h = (struct ipv6hdr *)ip6hdr;
+ void *txporthdr;
+ uint16_t *csum;
+
+ txporthdr = ip6hdr + sizeof(struct ipv6hdr);
+
+ if ((ip6h->nexthdr == IPPROTO_TCP) || (ip6h->nexthdr == IPPROTO_UDP)) {
+ csum = (uint16_t *)rmnet_map_get_checksum_field(ip6h->nexthdr,
+ txporthdr);
+ *csum = ~(*csum);
+ }
+}
+
+/**
+ * rmnet_map_checksum_uplink_packet() - Generates UL checksum
+ * meta info header
+ * @skb: Pointer to the packet's skb.
+ *
+ * Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP
+ * packets that are supported for UL checksum offload.
+ *
+ * Return:
+ * - RMNET_MAP_CHECKSUM_OK: Validation of checksum succeeded.
+ * - RMNET_MAP_CHECKSUM_ERR_UNKNOWN_IP_VERSION: Unrecognized IP header.
+ * - RMNET_MAP_CHECKSUM_SW: Unsupported packet for UL checksum offload.
+ */
+int rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
+ struct net_device *orig_dev, uint32_t egress_data_format)
+{
+ unsigned char ip_version;
+ struct rmnet_map_ul_checksum_header_s *ul_header;
+ void *iphdr;
+ int ret;
+
+ ul_header = (struct rmnet_map_ul_checksum_header_s *)
+ skb_push(skb, sizeof(struct rmnet_map_ul_checksum_header_s));
+
+ if (unlikely(!(orig_dev->features &
+ (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM)))) {
+ ret = RMNET_MAP_CHECKSUM_SW;
+ goto sw_checksum;
+ }
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ iphdr = (char *)ul_header +
+ sizeof(struct rmnet_map_ul_checksum_header_s);
+ ip_version = (*(char *)iphdr & 0xF0) >> 4;
+ if (ip_version == 0x04) {
+ rmnet_map_fill_ipv4_packet_ul_checksum_header(iphdr,
+ ul_header, skb);
+ if (egress_data_format &
+ RMNET_EGRESS_FORMAT_MAP_CKSUMV4)
+ rmnet_map_complement_ipv4_txporthdr_csum_field(
+ iphdr);
+ return RMNET_MAP_CHECKSUM_OK;
+ } else if (ip_version == 0x06) {
+ rmnet_map_fill_ipv6_packet_ul_checksum_header(iphdr,
+ ul_header, skb);
+ if (egress_data_format &
+ RMNET_EGRESS_FORMAT_MAP_CKSUMV4)
+ rmnet_map_complement_ipv6_txporthdr_csum_field(
+ iphdr);
+ return RMNET_MAP_CHECKSUM_OK;
+ } else {
+ ret = RMNET_MAP_CHECKSUM_ERR_UNKNOWN_IP_VERSION;
+ goto sw_checksum;
+ }
+ } else {
+ ret = RMNET_MAP_CHECKSUM_SW;
+ goto sw_checksum;
+ }
+
+sw_checksum:
+ ul_header->checksum_start_offset = 0;
+ ul_header->checksum_insert_offset = 0;
+ ul_header->cks_en = 0;
+ ul_header->udp_ip4_ind = 0;
+ return ret;
+}
diff --git a/net/rose/rose_in.c b/net/rose/rose_in.c
index 79c4abcfa6b4..fb31d2ea5a81 100644
--- a/net/rose/rose_in.c
+++ b/net/rose/rose_in.c
@@ -164,7 +164,8 @@ static int rose_state3_machine(struct sock *sk, struct sk_buff *skb, int framety
rose_frames_acked(sk, nr);
if (ns == rose->vr) {
rose_start_idletimer(sk);
- if (sock_queue_rcv_skb(sk, skb) == 0) {
+ if (sk_filter_trim_cap(sk, skb, ROSE_MIN_LEN) == 0 &&
+ sock_queue_rcv_skb(sk, skb) == 0) {
rose->vr = (rose->vr + 1) % ROSE_MODULUS;
queued = 1;
} else {
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 95b560f0b253..b855352167b1 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -1173,6 +1173,45 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n)
}
/*
+ * enable/disable flow on qdisc.
+ */
+int
+tc_qdisc_flow_control(struct net_device *dev, u32 tcm_handle, int enable_flow)
+{
+ struct Qdisc *q;
+ int qdisc_len = 0;
+ struct __qdisc_change_req {
+ struct nlattr attr;
+ struct tc_prio_qopt data;
+ } req = {
+ .attr = {sizeof(struct __qdisc_change_req), TCA_OPTIONS},
+ .data = {3, {1, 2, 2, 2, 1, 2, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1}, 1}
+ };
+
+ /* override flow bit */
+ req.data.enable_flow = enable_flow;
+
+ /* look up using tcm handle */
+ q = qdisc_lookup(dev, tcm_handle);
+
+ /* call registered change function */
+ if (likely(q && q->ops)) {
+ if (likely(q->ops->change)) {
+ qdisc_len = q->q.qlen;
+ if (q->ops->change(q, &req.attr) != 0)
+ pr_err("%s(): qdisc change failed", __func__);
+ } else {
+ WARN_ONCE(1, "%s(): called on queue which does %s",
+ __func__, "not support change() operation");
+ }
+ } else {
+ WARN_ONCE(1, "%s(): called on bad queue", __func__);
+ }
+ return qdisc_len;
+}
+EXPORT_SYMBOL(tc_qdisc_flow_control);
+
+/*
* Create/change qdisc.
*/
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index fee1b15506b2..0d4630b155fe 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -18,6 +18,7 @@
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/skbuff.h>
+#include <linux/netdevice.h>
#include <net/netlink.h>
#include <net/pkt_sched.h>
@@ -27,6 +28,7 @@ struct prio_sched_data {
struct tcf_proto __rcu *filter_list;
u8 prio2band[TC_PRIO_MAX+1];
struct Qdisc *queues[TCQ_PRIO_BANDS];
+ u8 enable_flow;
};
@@ -98,6 +100,9 @@ static struct sk_buff *prio_peek(struct Qdisc *sch)
struct prio_sched_data *q = qdisc_priv(sch);
int prio;
+ if (!q->enable_flow)
+ return NULL;
+
for (prio = 0; prio < q->bands; prio++) {
struct Qdisc *qdisc = q->queues[prio];
struct sk_buff *skb = qdisc->ops->peek(qdisc);
@@ -112,6 +117,9 @@ static struct sk_buff *prio_dequeue(struct Qdisc *sch)
struct prio_sched_data *q = qdisc_priv(sch);
int prio;
+ if (!q->enable_flow)
+ return NULL;
+
for (prio = 0; prio < q->bands; prio++) {
struct Qdisc *qdisc = q->queues[prio];
struct sk_buff *skb = qdisc_dequeue_peeked(qdisc);
@@ -152,6 +160,7 @@ prio_reset(struct Qdisc *sch)
for (prio = 0; prio < q->bands; prio++)
qdisc_reset(q->queues[prio]);
sch->q.qlen = 0;
+ q->enable_flow = 1;
}
static void
@@ -170,6 +179,7 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
struct prio_sched_data *q = qdisc_priv(sch);
struct tc_prio_qopt *qopt;
int i;
+ int flow_change = 0;
if (nla_len(opt) < sizeof(*qopt))
return -EINVAL;
@@ -184,6 +194,10 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
}
sch_tree_lock(sch);
+ if (q->enable_flow != qopt->enable_flow) {
+ q->enable_flow = qopt->enable_flow;
+ flow_change = 1;
+ }
q->bands = qopt->bands;
memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1);
@@ -219,6 +233,13 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
}
}
}
+
+ /* Schedule qdisc when flow re-enabled */
+ if (flow_change && q->enable_flow) {
+ if (!test_bit(__QDISC_STATE_DEACTIVATED,
+ &sch->state))
+ __netif_schedule(qdisc_root(sch));
+ }
return 0;
}
@@ -248,6 +269,7 @@ static int prio_dump(struct Qdisc *sch, struct sk_buff *skb)
struct tc_prio_qopt opt;
opt.bands = q->bands;
+ opt.enable_flow = q->enable_flow;
memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX + 1);
if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
diff --git a/net/socket.c b/net/socket.c
index 1f69e48b1501..bb11725c0e50 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -89,6 +89,8 @@
#include <linux/magic.h>
#include <linux/slab.h>
#include <linux/xattr.h>
+#include <linux/seemp_api.h>
+#include <linux/seemp_instrumentation.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
@@ -115,6 +117,9 @@ unsigned int sysctl_net_busy_poll __read_mostly;
static ssize_t sock_read_iter(struct kiocb *iocb, struct iov_iter *to);
static ssize_t sock_write_iter(struct kiocb *iocb, struct iov_iter *from);
+
+static BLOCKING_NOTIFIER_HEAD(sockev_notifier_list);
+
static int sock_mmap(struct file *file, struct vm_area_struct *vma);
static int sock_close(struct inode *inode, struct file *file);
@@ -169,6 +174,14 @@ static const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
static DEFINE_PER_CPU(int, sockets_in_use);
/*
+ * Socket Event framework helpers
+ */
+static void sockev_notify(unsigned long event, struct socket *sk)
+{
+ blocking_notifier_call_chain(&sockev_notifier_list, event, sk);
+}
+
+/**
* Support routines.
* Move socket addresses back and forth across the kernel/user
* divide and look after the messy bits.
@@ -1248,6 +1261,9 @@ SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol)
if (retval < 0)
goto out;
+ if (retval == 0)
+ sockev_notify(SOCKEV_SOCKET, sock);
+
retval = sock_map_fd(sock, flags & (O_CLOEXEC | O_NONBLOCK));
if (retval < 0)
goto out_release;
@@ -1392,6 +1408,13 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
(struct sockaddr *)
&address, addrlen);
}
+ if (!err) {
+ if (sock->sk)
+ sock_hold(sock->sk);
+ sockev_notify(SOCKEV_BIND, sock);
+ if (sock->sk)
+ sock_put(sock->sk);
+ }
fput_light(sock->file, fput_needed);
}
return err;
@@ -1419,6 +1442,13 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog)
if (!err)
err = sock->ops->listen(sock, backlog);
+ if (!err) {
+ if (sock->sk)
+ sock_hold(sock->sk);
+ sockev_notify(SOCKEV_LISTEN, sock);
+ if (sock->sk)
+ sock_put(sock->sk);
+ }
fput_light(sock->file, fput_needed);
}
return err;
@@ -1506,7 +1536,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
fd_install(newfd, newfile);
err = newfd;
-
+ if (!err)
+ sockev_notify(SOCKEV_ACCEPT, sock);
out_put:
fput_light(sock->file, fput_needed);
out:
@@ -1556,6 +1587,8 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
err = sock->ops->connect(sock, (struct sockaddr *)&address, addrlen,
sock->file->f_flags);
+ if (!err)
+ sockev_notify(SOCKEV_CONNECT, sock);
out_put:
fput_light(sock->file, fput_needed);
out:
@@ -1641,6 +1674,13 @@ SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len,
struct iovec iov;
int fput_needed;
+ seemp_logk_sendto(fd, buff, len, flags, addr, addr_len);
+
+ if (len > INT_MAX)
+ len = INT_MAX;
+ if (unlikely(!access_ok(VERIFY_READ, buff, len)))
+ return -EFAULT;
+
err = import_single_range(WRITE, buff, len, &iov, &msg.msg_iter);
if (unlikely(err))
return err;
@@ -1697,9 +1737,14 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size,
int err, err2;
int fput_needed;
+ if (size > INT_MAX)
+ size = INT_MAX;
+ if (unlikely(!access_ok(VERIFY_WRITE, ubuf, size)))
+ return -EFAULT;
err = import_single_range(READ, ubuf, size, &iov, &msg.msg_iter);
if (unlikely(err))
return err;
+
sock = sockfd_lookup_light(fd, &err, &fput_needed);
if (!sock)
goto out;
@@ -1813,6 +1858,7 @@ SYSCALL_DEFINE2(shutdown, int, fd, int, how)
sock = sockfd_lookup_light(fd, &err, &fput_needed);
if (sock != NULL) {
+ sockev_notify(SOCKEV_SHUTDOWN, sock);
err = security_socket_shutdown(sock, how);
if (!err)
err = sock->ops->shutdown(sock, how);
@@ -3310,3 +3356,15 @@ int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how)
return sock->ops->shutdown(sock, how);
}
EXPORT_SYMBOL(kernel_sock_shutdown);
+
+int sockev_register_notify(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&sockev_notifier_list, nb);
+}
+EXPORT_SYMBOL(sockev_register_notify);
+
+int sockev_unregister_notify(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&sockev_notifier_list, nb);
+}
+EXPORT_SYMBOL(sockev_unregister_notify);
diff --git a/net/wireless/ap.c b/net/wireless/ap.c
index bdad1f951561..91d02ac0f42f 100644
--- a/net/wireless/ap.c
+++ b/net/wireless/ap.c
@@ -25,8 +25,8 @@ int __cfg80211_stop_ap(struct cfg80211_registered_device *rdev,
return -ENOENT;
err = rdev_stop_ap(rdev, dev);
+ wdev->beacon_interval = 0;
if (!err) {
- wdev->beacon_interval = 0;
memset(&wdev->chandef, 0, sizeof(wdev->chandef));
wdev->ssid_len = 0;
rdev_set_qos_map(rdev, dev, NULL);
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index 59cabc9bce69..d5ccaeaa76e0 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -319,7 +319,8 @@ static int cfg80211_get_chans_dfs_required(struct wiphy *wiphy,
if (!c)
return -EINVAL;
- if (c->flags & IEEE80211_CHAN_RADAR)
+ if ((c->flags & IEEE80211_CHAN_RADAR) &&
+ !(wiphy->flags & WIPHY_FLAG_DFS_OFFLOAD))
return 1;
}
return 0;
@@ -479,7 +480,9 @@ static bool cfg80211_get_chans_dfs_available(struct wiphy *wiphy,
if (c->flags & IEEE80211_CHAN_DISABLED)
return false;
- if ((c->flags & IEEE80211_CHAN_RADAR) &&
+ /* check for radar flags */
+ if ((!(wiphy->flags & WIPHY_FLAG_DFS_OFFLOAD)) &&
+ (c->flags & IEEE80211_CHAN_RADAR) &&
(c->dfs_state != NL80211_DFS_AVAILABLE))
return false;
}
@@ -590,10 +593,17 @@ static bool cfg80211_secondary_chans_ok(struct wiphy *wiphy,
for (freq = start_freq; freq <= end_freq; freq += 20) {
c = ieee80211_get_channel(wiphy, freq);
- if (!c || c->flags & prohibited_flags)
+
+ if (!c)
return false;
- }
+ if ((!(wiphy->flags & WIPHY_FLAG_DFS_OFFLOAD)) &&
+ (c->flags & prohibited_flags & IEEE80211_CHAN_RADAR))
+ return false;
+
+ if (c->flags & prohibited_flags & ~IEEE80211_CHAN_RADAR)
+ return false;
+ }
return true;
}
@@ -739,7 +749,7 @@ static bool cfg80211_ir_permissive_chan(struct wiphy *wiphy,
* and thus fail the GO instantiation, consider only the interfaces of
* the current registered device.
*/
- list_for_each_entry(wdev, &rdev->wdev_list, list) {
+ list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
struct ieee80211_channel *other_chan = NULL;
int r1, r2;
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 8f0bac7e03c4..01d0c4eb06fc 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -3,6 +3,7 @@
*
* Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
+ * Copyright 2015 Intel Deutschland GmbH
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -157,7 +158,7 @@ int cfg80211_switch_netns(struct cfg80211_registered_device *rdev,
if (!(rdev->wiphy.flags & WIPHY_FLAG_NETNS_OK))
return -EOPNOTSUPP;
- list_for_each_entry(wdev, &rdev->wdev_list, list) {
+ list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
if (!wdev->netdev)
continue;
wdev->netdev->features &= ~NETIF_F_NETNS_LOCAL;
@@ -171,7 +172,8 @@ int cfg80211_switch_netns(struct cfg80211_registered_device *rdev,
/* failed -- clean up to old netns */
net = wiphy_net(&rdev->wiphy);
- list_for_each_entry_continue_reverse(wdev, &rdev->wdev_list,
+ list_for_each_entry_continue_reverse(wdev,
+ &rdev->wiphy.wdev_list,
list) {
if (!wdev->netdev)
continue;
@@ -230,7 +232,7 @@ void cfg80211_shutdown_all_interfaces(struct wiphy *wiphy)
ASSERT_RTNL();
- list_for_each_entry(wdev, &rdev->wdev_list, list) {
+ list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
if (wdev->netdev) {
dev_close(wdev->netdev);
continue;
@@ -298,7 +300,8 @@ void cfg80211_destroy_ifaces(struct cfg80211_registered_device *rdev)
kfree(item);
spin_unlock_irq(&rdev->destroy_list_lock);
- list_for_each_entry_safe(wdev, tmp, &rdev->wdev_list, list) {
+ list_for_each_entry_safe(wdev, tmp,
+ &rdev->wiphy.wdev_list, list) {
if (nlportid == wdev->owner_nlportid)
rdev_del_virtual_intf(rdev, wdev);
}
@@ -400,7 +403,7 @@ use_default_name:
dev_set_name(&rdev->wiphy.dev, PHY_NAME "%d", rdev->wiphy_idx);
}
- INIT_LIST_HEAD(&rdev->wdev_list);
+ INIT_LIST_HEAD(&rdev->wiphy.wdev_list);
INIT_LIST_HEAD(&rdev->beacon_registrations);
spin_lock_init(&rdev->beacon_registrations_lock);
spin_lock_init(&rdev->bss_lock);
@@ -616,6 +619,13 @@ int wiphy_register(struct wiphy *wiphy)
!rdev->ops->set_mac_acl)))
return -EINVAL;
+ /* assure only valid behaviours are flagged by driver
+ * hence subtract 2 as bit 0 is invalid.
+ */
+ if (WARN_ON(wiphy->bss_select_support &&
+ (wiphy->bss_select_support & ~(BIT(__NL80211_BSS_SELECT_ATTR_AFTER_LAST) - 2))))
+ return -EINVAL;
+
if (wiphy->addresses)
memcpy(wiphy->perm_addr, wiphy->addresses[0].addr, ETH_ALEN);
@@ -730,6 +740,36 @@ int wiphy_register(struct wiphy *wiphy)
nl80211_send_reg_change_event(&request);
}
+ /* Check that nobody globally advertises any capabilities they do not
+ * advertise on all possible interface types.
+ */
+ if (wiphy->extended_capabilities_len &&
+ wiphy->num_iftype_ext_capab &&
+ wiphy->iftype_ext_capab) {
+ u8 supported_on_all, j;
+ const struct wiphy_iftype_ext_capab *capab;
+
+ capab = wiphy->iftype_ext_capab;
+ for (j = 0; j < wiphy->extended_capabilities_len; j++) {
+ if (capab[0].extended_capabilities_len > j)
+ supported_on_all =
+ capab[0].extended_capabilities[j];
+ else
+ supported_on_all = 0x00;
+ for (i = 1; i < wiphy->num_iftype_ext_capab; i++) {
+ if (j >= capab[i].extended_capabilities_len) {
+ supported_on_all = 0x00;
+ break;
+ }
+ supported_on_all &=
+ capab[i].extended_capabilities[j];
+ }
+ if (WARN_ON(wiphy->extended_capabilities[j] &
+ ~supported_on_all))
+ break;
+ }
+ }
+
rdev->wiphy.registered = true;
rtnl_unlock();
@@ -782,7 +822,7 @@ void wiphy_unregister(struct wiphy *wiphy)
nl80211_notify_wiphy(rdev, NL80211_CMD_DEL_WIPHY);
rdev->wiphy.registered = false;
- WARN_ON(!list_empty(&rdev->wdev_list));
+ WARN_ON(!list_empty(&rdev->wiphy.wdev_list));
/*
* First remove the hardware from everywhere, this makes
@@ -905,7 +945,6 @@ void __cfg80211_leave(struct cfg80211_registered_device *rdev,
sched_scan_req = rtnl_dereference(rdev->sched_scan_req);
if (sched_scan_req && dev == sched_scan_req->dev)
__cfg80211_stop_sched_scan(rdev, false);
-
#ifdef CONFIG_CFG80211_WEXT
kfree(wdev->wext.ie);
wdev->wext.ie = NULL;
@@ -914,6 +953,7 @@ void __cfg80211_leave(struct cfg80211_registered_device *rdev,
#endif
cfg80211_disconnect(rdev, dev,
WLAN_REASON_DEAUTH_LEAVING, true);
+ cfg80211_mlme_down(rdev, dev);
break;
case NL80211_IFTYPE_MESH_POINT:
__cfg80211_leave_mesh(rdev, dev);
@@ -940,6 +980,7 @@ void __cfg80211_leave(struct cfg80211_registered_device *rdev,
/* invalid */
break;
}
+ wdev->beacon_interval = 0;
}
void cfg80211_leave(struct cfg80211_registered_device *rdev,
@@ -1004,7 +1045,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
spin_lock_init(&wdev->mgmt_registrations_lock);
wdev->identifier = ++rdev->wdev_id;
- list_add_rcu(&wdev->list, &rdev->wdev_list);
+ list_add_rcu(&wdev->list, &rdev->wiphy.wdev_list);
rdev->devlist_generation++;
/* can only change netns with wiphy */
dev->features |= NETIF_F_NETNS_LOCAL;
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 47ea169aa0a3..54865316358e 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -50,8 +50,7 @@ struct cfg80211_registered_device {
/* wiphy index, internal only */
int wiphy_idx;
- /* associated wireless interfaces, protected by rtnl or RCU */
- struct list_head wdev_list;
+ /* protected by RTNL */
int devlist_generation, wdev_id;
int opencount; /* also protected by devlist_mtx */
wait_queue_head_t dev_wait;
@@ -215,7 +214,9 @@ struct cfg80211_event {
const u8 *resp_ie;
size_t req_ie_len;
size_t resp_ie_len;
- u16 status;
+ struct cfg80211_bss *bss;
+ int status; /* -1 = failed; 0..65535 = status code */
+ enum nl80211_timeout_reason timeout_reason;
} cr;
struct {
const u8 *req_ie;
@@ -375,8 +376,9 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev,
void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
const u8 *req_ie, size_t req_ie_len,
const u8 *resp_ie, size_t resp_ie_len,
- u16 status, bool wextev,
- struct cfg80211_bss *bss);
+ int status, bool wextev,
+ struct cfg80211_bss *bss,
+ enum nl80211_timeout_reason timeout_reason);
void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
size_t ie_len, u16 reason, bool from_ap);
int cfg80211_disconnect(struct cfg80211_registered_device *rdev,
@@ -472,7 +474,7 @@ int ieee80211_get_ratemask(struct ieee80211_supported_band *sband,
u32 *mask);
int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev,
- u32 beacon_int);
+ enum nl80211_iftype iftype, u32 beacon_int);
void cfg80211_update_iface_num(struct cfg80211_registered_device *rdev,
enum nl80211_iftype iftype, int num);
diff --git a/net/wireless/db.txt b/net/wireless/db.txt
index a2fc3a09ccdc..89130cf4db04 100644
--- a/net/wireless/db.txt
+++ b/net/wireless/db.txt
@@ -1,17 +1,1491 @@
-#
-# This file is a placeholder to prevent accidental build breakage if someone
-# enables CONFIG_CFG80211_INTERNAL_REGDB. Almost no one actually needs to
-# enable that build option.
-#
-# You should be using CRDA instead. It is even better if you use the CRDA
-# package provided by your distribution, since they will probably keep it
-# up-to-date on your behalf.
-#
-# If you _really_ intend to use CONFIG_CFG80211_INTERNAL_REGDB then you will
-# need to replace this file with one containing appropriately formatted
-# regulatory rules that cover the regulatory domains you will be using. Your
-# best option is to extract the db.txt file from the wireless-regdb git
-# repository:
-#
-# git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-regdb.git
-#
+# This is the world regulatory domain
+country 00:
+ (2402 - 2472 @ 40), (20)
+ # Channel 12 - 13.
+ (2457 - 2482 @ 40), (20), PASSIVE-SCAN, NO-IBSS
+ # Channel 14. Only JP enables this and for 802.11b only
+ (2474 - 2494 @ 20), (20), PASSIVE-SCAN, NO-IBSS, NO-OFDM
+ # Channel 36 - 48
+ (5170 - 5250 @ 80), (20), PASSIVE-SCAN, NO-IBSS
+ (5250 - 5330 @ 80), (20), PASSIVE-SCAN, NO-IBSS
+ (5490 - 5710 @ 80), (20), PASSIVE-SCAN, NO-IBSS
+ # NB: 5260 MHz - 5700 MHz requies DFS
+ # Channel 149 - 165
+ (5735 - 5835 @ 80), (20), PASSIVE-SCAN, NO-IBSS
+ # IEEE 802.11ad (60GHz), channels 1..3
+ (57240 - 63720 @ 2160), (0)
+
+
+country AE: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country AF: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+
+country AI: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+
+country AL: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5150 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5350 @ 80), (23), DFS, AUTO-BW
+ (5470 - 5710 @ 160), (30), DFS
+
+country AM: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 20), (18)
+ (5250 - 5330 @ 20), (18), DFS
+
+country AN: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+
+country AR:
+ (2402 - 2482 @ 40), (36)
+ (5170 - 5330 @ 160), (23)
+ (5490 - 5590 @ 80), (36)
+ (5650 - 5730 @ 80), (36)
+ (5735 - 5835 @ 80), (36)
+
+country AS: DFS-FCC
+ (2402 - 2472 @ 40), (30)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5850 @ 80), (30)
+
+country AT: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 5.9ghz band
+ # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+ (5850 - 5870 @ 10), (30)
+ (5860 - 5880 @ 10), (30)
+ (5870 - 5890 @ 10), (30)
+ (5880 - 5900 @ 10), (30)
+ (5890 - 5910 @ 10), (30)
+ (5900 - 5920 @ 10), (30)
+ (5910 - 5930 @ 10), (30)
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+country AU: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5590 @ 80), (24), DFS
+ (5650 - 5730 @ 80), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country AW: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+
+country AZ: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (18), AUTO-BW
+ (5250 - 5330 @ 80), (18), DFS, AUTO-BW
+
+country BA: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+country BB: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5735 - 5835 @ 80), (30)
+
+country BD:
+ (2402 - 2482 @ 40), (20)
+ (5735 - 5835 @ 80), (30)
+
+country BE: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 5.9ghz band
+ # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+ (5850 - 5870 @ 10), (30)
+ (5860 - 5880 @ 10), (30)
+ (5870 - 5890 @ 10), (30)
+ (5880 - 5900 @ 10), (30)
+ (5890 - 5910 @ 10), (30)
+ (5900 - 5920 @ 10), (30)
+ (5910 - 5930 @ 10), (30)
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+country BF: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country BG: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 5.9ghz band
+ # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+ (5850 - 5870 @ 10), (30)
+ (5860 - 5880 @ 10), (30)
+ (5870 - 5890 @ 10), (30)
+ (5880 - 5900 @ 10), (30)
+ (5890 - 5910 @ 10), (30)
+ (5900 - 5920 @ 10), (30)
+ (5910 - 5930 @ 10), (30)
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+country BH:
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5330 @ 20), (23)
+ (5735 - 5835 @ 20), (33)
+
+country BL: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+
+country BM: DFS-FCC
+ (2402 - 2472 @ 40), (30)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country BN: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (20), AUTO-BW
+ (5250 - 5330 @ 80), (20), DFS, AUTO-BW
+ (5735 - 5835 @ 80), (20)
+
+country BO: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5250 - 5330 @ 80), (30), DFS
+ (5735 - 5835 @ 80), (30)
+
+country BR: DFS-FCC
+ (2402 - 2482 @ 40), (30)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country BS: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country BT: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+
+country BY: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+
+country BZ:
+ (2402 - 2482 @ 40), (36)
+ (5170 - 5330 @ 160), (27)
+ (5490 - 5730 @ 160), (36)
+ (5735 - 5835 @ 80), (36)
+
+country CA: DFS-FCC
+ (2402 - 2472 @ 40), (30)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5590 @ 80), (24), DFS
+ (5650 - 5730 @ 80), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country CF: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 40), (24)
+ (5250 - 5330 @ 40), (24), DFS
+ (5490 - 5730 @ 40), (24), DFS
+ (5735 - 5835 @ 40), (30)
+
+country CH: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 5.9ghz band
+ # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+ (5850 - 5870 @ 10), (30)
+ (5860 - 5880 @ 10), (30)
+ (5870 - 5890 @ 10), (30)
+ (5880 - 5900 @ 10), (30)
+ (5890 - 5910 @ 10), (30)
+ (5900 - 5920 @ 10), (30)
+ (5910 - 5930 @ 10), (30)
+
+country CI: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country CL:
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5330 @ 160), (20)
+ (5735 - 5835 @ 80), (20)
+
+country CN: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5735 - 5835 @ 80), (30)
+ # 60 gHz band channels 1,4: 28dBm, channels 2,3: 44dBm
+ # ref: http://www.miit.gov.cn/n11293472/n11505629/n11506593/n11960250/n11960606/n11960700/n12330791.files/n12330790.pdf
+ (57240 - 59400 @ 2160), (28)
+ (59400 - 63720 @ 2160), (44)
+ (63720 - 65880 @ 2160), (28)
+
+country CO: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country CR: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 20), (24)
+ (5250 - 5330 @ 20), (24), DFS
+ (5490 - 5730 @ 20), (24), DFS
+ (5735 - 5835 @ 20), (30)
+
+country CX: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country CY: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 5.9ghz band
+ # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+ (5850 - 5870 @ 10), (30)
+ (5860 - 5880 @ 10), (30)
+ (5870 - 5890 @ 10), (30)
+ (5880 - 5900 @ 10), (30)
+ (5890 - 5910 @ 10), (30)
+ (5900 - 5920 @ 10), (30)
+ (5910 - 5930 @ 10), (30)
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+# Data from http://www.ctu.eu/164/download/VOR/VOR-12-08-2005-34.pdf
+# and http://www.ctu.eu/164/download/VOR/VOR-12-05-2007-6-AN.pdf
+country CZ: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 5.9ghz band
+ # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+ (5850 - 5870 @ 10), (30)
+ (5860 - 5880 @ 10), (30)
+ (5870 - 5890 @ 10), (30)
+ (5880 - 5900 @ 10), (30)
+ (5890 - 5910 @ 10), (30)
+ (5900 - 5920 @ 10), (30)
+ (5910 - 5930 @ 10), (30)
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+# Data from "Frequenznutzungsplan" (as published in April 2008), downloaded from
+# http://www.bundesnetzagentur.de/cae/servlet/contentblob/38448/publicationFile/2659/Frequenznutzungsplan2008_Id17448pdf.pdf
+# For the 5GHz range also see
+# http://www.bundesnetzagentur.de/cae/servlet/contentblob/38216/publicationFile/6579/WLAN5GHzVfg7_2010_28042010pdf.pdf
+
+country DE: DFS-ETSI
+ # entries 279004 and 280006
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 5.9ghz band
+ # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+ (5850 - 5870 @ 10), (30)
+ (5860 - 5880 @ 10), (30)
+ (5870 - 5890 @ 10), (30)
+ (5880 - 5900 @ 10), (30)
+ (5890 - 5910 @ 10), (30)
+ (5900 - 5920 @ 10), (30)
+ (5910 - 5930 @ 10), (30)
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+country DK: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 5.9ghz band
+ # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+ (5850 - 5870 @ 10), (30)
+ (5860 - 5880 @ 10), (30)
+ (5870 - 5890 @ 10), (30)
+ (5880 - 5900 @ 10), (30)
+ (5890 - 5910 @ 10), (30)
+ (5900 - 5920 @ 10), (30)
+ (5910 - 5930 @ 10), (30)
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+country DM: DFS-FCC
+ (2402 - 2472 @ 40), (30)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5735 - 5835 @ 80), (30)
+
+country DO: DFS-FCC
+ (2402 - 2472 @ 40), (30)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5735 - 5835 @ 80), (30)
+
+country DZ: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5670 @ 160), (23), DFS
+
+country EC: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 20), (24)
+ (5250 - 5330 @ 20), (24), DFS
+ (5490 - 5730 @ 20), (24), DFS
+ (5735 - 5835 @ 20), (30)
+
+country EE: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 5.9ghz band
+ # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+ (5850 - 5870 @ 10), (30)
+ (5860 - 5880 @ 10), (30)
+ (5870 - 5890 @ 10), (30)
+ (5880 - 5900 @ 10), (30)
+ (5890 - 5910 @ 10), (30)
+ (5900 - 5920 @ 10), (30)
+ (5910 - 5930 @ 10), (30)
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+country EG: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 40), (23)
+ (5250 - 5330 @ 40), (23), DFS
+
+country ES: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 5.9ghz band
+ # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+ (5850 - 5870 @ 10), (30)
+ (5860 - 5880 @ 10), (30)
+ (5870 - 5890 @ 10), (30)
+ (5880 - 5900 @ 10), (30)
+ (5890 - 5910 @ 10), (30)
+ (5900 - 5920 @ 10), (30)
+ (5910 - 5930 @ 10), (30)
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+country ET: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+
+country FI: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 5.9ghz band
+ # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+ (5850 - 5870 @ 10), (30)
+ (5860 - 5880 @ 10), (30)
+ (5870 - 5890 @ 10), (30)
+ (5880 - 5900 @ 10), (30)
+ (5890 - 5910 @ 10), (30)
+ (5900 - 5920 @ 10), (30)
+ (5910 - 5930 @ 10), (30)
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+country FM: DFS-FCC
+ (2402 - 2472 @ 40), (30)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country FR: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 5.9ghz band
+ # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+ (5850 - 5870 @ 10), (30)
+ (5860 - 5880 @ 10), (30)
+ (5870 - 5890 @ 10), (30)
+ (5880 - 5900 @ 10), (30)
+ (5890 - 5910 @ 10), (30)
+ (5900 - 5920 @ 10), (30)
+ (5910 - 5930 @ 10), (30)
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+country GB: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 5.9ghz band
+ # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+ (5850 - 5870 @ 10), (30)
+ (5860 - 5880 @ 10), (30)
+ (5870 - 5890 @ 10), (30)
+ (5880 - 5900 @ 10), (30)
+ (5890 - 5910 @ 10), (30)
+ (5900 - 5920 @ 10), (30)
+ (5910 - 5930 @ 10), (30)
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+country GD: DFS-FCC
+ (2402 - 2472 @ 40), (30)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country GE: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (18), AUTO-BW
+ (5250 - 5330 @ 80), (18), DFS, AUTO-BW
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+country GF: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+
+country GH: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country GL: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+
+country GP: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+
+country GR: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 5.9ghz band
+ # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+ (5850 - 5870 @ 10), (30)
+ (5860 - 5880 @ 10), (30)
+ (5870 - 5890 @ 10), (30)
+ (5880 - 5900 @ 10), (30)
+ (5890 - 5910 @ 10), (30)
+ (5900 - 5920 @ 10), (30)
+ (5910 - 5930 @ 10), (30)
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+country GT: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+
+country GU: DFS-FCC
+ (2402 - 2472 @ 40), (30)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country GY:
+ (2402 - 2482 @ 40), (30)
+ (5735 - 5835 @ 80), (30)
+
+country HK: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country HN:
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5330 @ 160), (24)
+ (5490 - 5730 @ 160), (24)
+ (5735 - 5835 @ 80), (30)
+
+country HR: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 5.9ghz band
+ # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+ (5850 - 5870 @ 10), (30)
+ (5860 - 5880 @ 10), (30)
+ (5870 - 5890 @ 10), (30)
+ (5880 - 5900 @ 10), (30)
+ (5890 - 5910 @ 10), (30)
+ (5900 - 5920 @ 10), (30)
+ (5910 - 5930 @ 10), (30)
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+country HT: DFS-FCC
+ (2402 - 2472 @ 40), (30)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country HU: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 5.9ghz band
+ # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+ (5850 - 5870 @ 10), (30)
+ (5860 - 5880 @ 10), (30)
+ (5870 - 5890 @ 10), (30)
+ (5880 - 5900 @ 10), (30)
+ (5890 - 5910 @ 10), (30)
+ (5900 - 5920 @ 10), (30)
+ (5910 - 5930 @ 10), (30)
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+country ID:
+ # ref: http://www.postel.go.id/content/ID/regulasi/standardisasi/kepdir/bwa%205,8%20ghz.pdf
+ (2402 - 2482 @ 40), (30)
+ (5735 - 5815 @ 20), (30)
+
+country IE: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ # 5.9ghz band
+ # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+ (5850 - 5870 @ 10), (30)
+ (5860 - 5880 @ 10), (30)
+ (5870 - 5890 @ 10), (30)
+ (5880 - 5900 @ 10), (30)
+ (5890 - 5910 @ 10), (30)
+ (5900 - 5920 @ 10), (30)
+ (5910 - 5930 @ 10), (30)
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+country IL: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+
+country IN:
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5330 @ 160), (23)
+ (5735 - 5835 @ 80), (30)
+
+country IR:
+ (2402 - 2482 @ 40), (20)
+ (5735 - 5835 @ 80), (30)
+
+country IS: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 5.9ghz band
+ # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+ (5850 - 5870 @ 10), (30)
+ (5860 - 5880 @ 10), (30)
+ (5870 - 5890 @ 10), (30)
+ (5880 - 5900 @ 10), (30)
+ (5890 - 5910 @ 10), (30)
+ (5900 - 5920 @ 10), (30)
+ (5910 - 5930 @ 10), (30)
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+country IT: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 5.9ghz band
+ # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+ (5850 - 5870 @ 10), (30)
+ (5860 - 5880 @ 10), (30)
+ (5870 - 5890 @ 10), (30)
+ (5880 - 5900 @ 10), (30)
+ (5890 - 5910 @ 10), (30)
+ (5900 - 5920 @ 10), (30)
+ (5910 - 5930 @ 10), (30)
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+country JM: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country JO:
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23)
+ (5735 - 5835 @ 80), (23)
+
+country JP: DFS-JP
+ (2402 - 2482 @ 40), (20)
+ (2474 - 2494 @ 20), (20), NO-OFDM
+ (5170 - 5250 @ 80), (20), AUTO-BW, NO-OUTDOOR
+ (5250 - 5330 @ 80), (20), DFS, AUTO-BW, NO-OUTDOOR
+ (5490 - 5710 @ 160), (20), DFS
+ # 60 GHz band channels 2-4 at 10mW,
+ # ref: http://www.arib.or.jp/english/html/overview/doc/1-STD-T74v1_1.pdf
+ (59000 - 66000 @ 2160), (10 mW)
+
+country KE: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23)
+ (5490 - 5570 @ 80), (30), DFS
+ (5735 - 5775 @ 40), (23)
+
+country KH: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+
+country KN: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ (5735 - 5815 @ 80), (30)
+
+country KR: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (20), AUTO-BW
+ (5250 - 5330 @ 80), (20), DFS, AUTO-BW
+ (5490 - 5710 @ 80), (30), DFS
+ (5735 - 5835 @ 80), (30)
+ # 60 GHz band channels 1-4,
+ # ref: http://www.law.go.kr/%ED%96%89%EC%A0%95%EA%B7%9C%EC%B9%99/%EB%AC%B4%EC%84%A0%EC%84%A4%EB%B9%84%EA%B7%9C%EC%B9%99
+ (57000 - 66000 @ 2160), (43)
+
+country KP: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (20)
+ (5250 - 5330 @ 80), (20), DFS
+ (5490 - 5630 @ 80), (30), DFS
+ (5735 - 5815 @ 80), (30)
+
+country KW: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+
+country KY: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country KZ:
+ (2402 - 2482 @ 40), (20)
+
+country LB: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country LC: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (20), AUTO-BW
+ (5250 - 5330 @ 80), (30), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ (5735 - 5815 @ 80), (30)
+
+country LI: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 5.9ghz band
+ # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+ (5850 - 5870 @ 10), (30)
+ (5860 - 5880 @ 10), (30)
+ (5870 - 5890 @ 10), (30)
+ (5880 - 5900 @ 10), (30)
+ (5890 - 5910 @ 10), (30)
+ (5900 - 5920 @ 10), (30)
+ (5910 - 5930 @ 10), (30)
+
+country LK: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 20), (24)
+ (5250 - 5330 @ 20), (24), DFS
+ (5490 - 5730 @ 20), (24), DFS
+ (5735 - 5835 @ 20), (30)
+
+country LS: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+
+country LT: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 5.9ghz band
+ # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+ (5850 - 5870 @ 10), (30)
+ (5860 - 5880 @ 10), (30)
+ (5870 - 5890 @ 10), (30)
+ (5880 - 5900 @ 10), (30)
+ (5890 - 5910 @ 10), (30)
+ (5900 - 5920 @ 10), (30)
+ (5910 - 5930 @ 10), (30)
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+country LU: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 5.9ghz band
+ # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+ (5850 - 5870 @ 10), (30)
+ (5860 - 5880 @ 10), (30)
+ (5870 - 5890 @ 10), (30)
+ (5880 - 5900 @ 10), (30)
+ (5890 - 5910 @ 10), (30)
+ (5900 - 5920 @ 10), (30)
+ (5910 - 5930 @ 10), (30)
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+country LV: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 5.9ghz band
+ # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+ (5850 - 5870 @ 10), (30)
+ (5860 - 5880 @ 10), (30)
+ (5870 - 5890 @ 10), (30)
+ (5880 - 5900 @ 10), (30)
+ (5890 - 5910 @ 10), (30)
+ (5900 - 5920 @ 10), (30)
+ (5910 - 5930 @ 10), (30)
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+country MA: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+
+country MC: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+
+country MD: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+
+country ME: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+
+country MF: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+
+country MH: DFS-FCC
+ (2402 - 2472 @ 40), (30)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country MK: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+country MN: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country MO: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country MP: DFS-FCC
+ (2402 - 2472 @ 40), (30)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country MQ: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+
+country MR: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+
+country MT: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 5.9ghz band
+ # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+ (5850 - 5870 @ 10), (30)
+ (5860 - 5880 @ 10), (30)
+ (5870 - 5890 @ 10), (30)
+ (5880 - 5900 @ 10), (30)
+ (5890 - 5910 @ 10), (30)
+ (5900 - 5920 @ 10), (30)
+ (5910 - 5930 @ 10), (30)
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+country MU: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country MV: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (20), AUTO-BW
+ (5250 - 5330 @ 80), (20), DFS, AUTO-BW
+ (5735 - 5835 @ 80), (20)
+
+country MW: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+
+country MX: DFS-FCC
+ (2402 - 2482 @ 40), (30)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country MY: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5650 @ 160), (24), DFS
+ (5735 - 5815 @ 80), (24)
+
+country NA: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ (5735 - 5835 @ 80), (33)
+
+country NG: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5250 - 5330 @ 80), (30), DFS
+ (5735 - 5835 @ 80), (30)
+
+country NI: DFS-FCC
+ (2402 - 2472 @ 40), (30)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country NL: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 5.9ghz band
+ # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+ (5850 - 5870 @ 10), (30)
+ (5860 - 5880 @ 10), (30)
+ (5870 - 5890 @ 10), (30)
+ (5880 - 5900 @ 10), (30)
+ (5890 - 5910 @ 10), (30)
+ (5900 - 5920 @ 10), (30)
+ (5910 - 5930 @ 10), (30)
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+country NO: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 5.9ghz band
+ # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+ (5850 - 5870 @ 10), (30)
+ (5860 - 5880 @ 10), (30)
+ (5870 - 5890 @ 10), (30)
+ (5880 - 5900 @ 10), (30)
+ (5890 - 5910 @ 10), (30)
+ (5900 - 5920 @ 10), (30)
+ (5910 - 5930 @ 10), (30)
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+country NP:
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5330 @ 160), (20)
+ (5735 - 5835 @ 80), (20)
+
+country NZ: DFS-FCC
+ (2402 - 2482 @ 40), (30)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country OM: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+
+country PA:
+ (2402 - 2472 @ 40), (30)
+ (5170 - 5250 @ 80), (23), AUT0-BW
+ (5250 - 5330 @ 80), (30), AUTO-BW
+ (5735 - 5835 @ 80), (36)
+
+country PE: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country PF: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+
+country PG: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country PH: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country PK:
+ (2402 - 2482 @ 40), (30)
+ (5735 - 5835 @ 80), (30)
+
+country PL: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 5.9ghz band
+ # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+ (5850 - 5870 @ 10), (30)
+ (5860 - 5880 @ 10), (30)
+ (5870 - 5890 @ 10), (30)
+ (5880 - 5900 @ 10), (30)
+ (5890 - 5910 @ 10), (30)
+ (5900 - 5920 @ 10), (30)
+ (5910 - 5930 @ 10), (30)
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+country PM: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+
+country PR: DFS-FCC
+ (2402 - 2472 @ 40), (30)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+# Public Safety FCCA, FCC4
+# 27dBm [4.9GHz 1/4 rate], 30dBm [1/2 rate], 33dBm [full rate], and 5GHz same as FCC1
+# db.txt cannot express the limitation on 5G so disable all 5G channels for FCC4
+country PS: DFS-FCC
+ (2402 - 2472 @ 40), (30)
+ (4940 - 4990 @ 40), (33)
+
+country PT: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 5.9ghz band
+ # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+ (5850 - 5870 @ 10), (30)
+ (5860 - 5880 @ 10), (30)
+ (5870 - 5890 @ 10), (30)
+ (5880 - 5900 @ 10), (30)
+ (5890 - 5910 @ 10), (30)
+ (5900 - 5920 @ 10), (30)
+ (5910 - 5930 @ 10), (30)
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+country PW: DFS-FCC
+ (2402 - 2472 @ 40), (30)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country PY: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country QA:
+ (2402 - 2482 @ 40), (20)
+ (5735 - 5835 @ 80), (30)
+
+country RE: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+
+country RO: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 5.9ghz band
+ # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+ (5850 - 5870 @ 10), (30)
+ (5860 - 5880 @ 10), (30)
+ (5870 - 5890 @ 10), (30)
+ (5880 - 5900 @ 10), (30)
+ (5890 - 5910 @ 10), (30)
+ (5900 - 5920 @ 10), (30)
+ (5910 - 5930 @ 10), (30)
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+# Source:
+# http://www.ratel.rs/upload/documents/Plan_namene/Plan_namene-sl_glasnik.pdf
+country RS: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+country RU:
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5330 @ 160), (23)
+ (5490 - 5730 @ 160), (30)
+ (5735 - 5835 @ 80), (30)
+
+country RW: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country SA: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+
+country SE: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 5.9ghz band
+ # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+ (5850 - 5870 @ 10), (30)
+ (5860 - 5880 @ 10), (30)
+ (5870 - 5890 @ 10), (30)
+ (5880 - 5900 @ 10), (30)
+ (5890 - 5910 @ 10), (30)
+ (5900 - 5920 @ 10), (30)
+ (5910 - 5930 @ 10), (30)
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+country SG: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country SI: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 5.9ghz band
+ # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+ (5850 - 5870 @ 10), (30)
+ (5860 - 5880 @ 10), (30)
+ (5870 - 5890 @ 10), (30)
+ (5880 - 5900 @ 10), (30)
+ (5890 - 5910 @ 10), (30)
+ (5900 - 5920 @ 10), (30)
+ (5910 - 5930 @ 10), (30)
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+country SK: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 5.9ghz band
+ # reference: http://www.etsi.org/deliver/etsi_en/302500_302599/302571/01.02.00_20/en_302571v010200a.pdf
+ (5850 - 5870 @ 10), (30)
+ (5860 - 5880 @ 10), (30)
+ (5870 - 5890 @ 10), (30)
+ (5880 - 5900 @ 10), (30)
+ (5890 - 5910 @ 10), (30)
+ (5900 - 5920 @ 10), (30)
+ (5910 - 5930 @ 10), (30)
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+country SN:
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5330 @ 160), (24)
+ (5490 - 5730 @ 160), (24)
+ (5735 - 5835 @ 80), (30)
+
+country SR: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+
+country SV: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 20), (23)
+ (5250 - 5330 @ 20), (23), DFS
+ (5735 - 5835 @ 20), (30)
+
+country SY:
+ (2402 - 2482 @ 40), (20)
+
+country TC: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country TD: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+
+country TG: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 40), (23)
+ (5250 - 5330 @ 40), (23), DFS
+ (5490 - 5710 @ 40), (30), DFS
+
+country TH: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country TN: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+
+country TR: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+country TT:
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5330 @ 160), (27)
+ (5490 - 5730 @ 160), (36)
+ (5735 - 5835 @ 80), (36)
+
+country TW: DFS-FCC
+ (2402 - 2472 @ 40), (30)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country TZ:
+ (2402 - 2482 @ 40), (20)
+ (5735 - 5835 @ 80), (30)
+
+# Source:
+# #914 / 06 Sep 2007: http://www.ucrf.gov.ua/uk/doc/nkrz/1196068874
+# #1174 / 23 Oct 2008: http://www.nkrz.gov.ua/uk/activities/ruling/1225269361
+# (appendix 8)
+# Listed 5GHz range is a lowest common denominator for all related
+# rules in the referenced laws. Such a range is used because of
+# disputable definitions there.
+country UA: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (20), AUTO-BW
+ (5250 - 5330 @ 80), (20), DFS, AUTO-BW
+ (5490 - 5670 @ 160), (20), DFS
+ (5735 - 5835 @ 80), (20)
+ # 60 gHz band channels 1-4, ref: Etsi En 302 567
+ (57240 - 65880 @ 2160), (40), NO-OUTDOOR
+
+country UG: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country US: DFS-FCC
+ (2402 - 2472 @ 40), (30)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+ # 5.9ghz band
+ # reference: https://apps.fcc.gov/edocs_public/attachmatch/FCC-03-324A1.pdf
+ (5842 - 5863 @ 5), (30)
+ (5850 - 5870 @ 10), (30)
+ (5860 - 5880 @ 10), (30)
+ (5865 - 5885 @ 20), (30)
+ (5870 - 5890 @ 10), (30)
+ (5880 - 5900 @ 10), (30)
+ (5890 - 5910 @ 10), (30)
+ (5895 - 5915 @ 20), (30)
+ (5900 - 5920 @ 10), (30)
+ (5910 - 5930 @ 10), (30)
+ # 60g band
+ # reference: http://cfr.regstoday.com/47cfr15.aspx#47_CFR_15p255
+ # channels 1,2,3, EIRP=40dBm(43dBm peak)
+ (57240 - 63720 @ 2160), (40)
+
+country UY: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5735 - 5835 @ 80), (30)
+
+country UZ: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (20), DFS, AUTO-BW
+
+country VC: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+
+country VE: DFS-FCC
+ (2402 - 2482 @ 40), (30)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5735 - 5835 @ 80), (30)
+
+country VI: DFS-FCC
+ (2402 - 2472 @ 40), (30)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country VN: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country VU: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country WF: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+
+country WS: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 40), (23)
+ (5250 - 5330 @ 40), (23), DFS
+ (5490 - 5710 @ 40), (30), DFS
+
+country XA: DFS-JP
+ (2402 - 2482 @ 40), (20)
+ (2474 - 2494 @ 20), (20), NO-OFDM
+ (5170 - 5250 @ 80), (20), NO-IR, AUTO-BW, NO-OUTDOOR
+ (5250 - 5330 @ 80), (20), DFS, AUTO-BW, NO-OUTDOOR
+ (5490 - 5710 @ 160), (20), DFS
+ (59000 - 66000 @ 2160), (10 mW)
+
+country YE:
+ (2402 - 2482 @ 40), (20)
+
+country YT: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+
+country ZA: DFS-FCC
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (24), AUTO-BW
+ (5250 - 5330 @ 80), (24), DFS, AUTO-BW
+ (5490 - 5730 @ 160), (24), DFS
+ (5735 - 5835 @ 80), (30)
+
+country ZW: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index c0e02f72e931..1b97f978ccd6 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -48,7 +48,8 @@ void cfg80211_rx_assoc_resp(struct net_device *dev, struct cfg80211_bss *bss,
/* update current_bss etc., consumes the bss reference */
__cfg80211_connect_result(dev, mgmt->bssid, NULL, 0, ie, len - ieoffs,
status_code,
- status_code == WLAN_STATUS_SUCCESS, bss);
+ status_code == WLAN_STATUS_SUCCESS, bss,
+ NL80211_TIMEOUT_UNSPECIFIED);
}
EXPORT_SYMBOL(cfg80211_rx_assoc_resp);
@@ -656,8 +657,25 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
return err;
}
- if (!ether_addr_equal(mgmt->sa, wdev_address(wdev)))
- return -EINVAL;
+ if (!ether_addr_equal(mgmt->sa, wdev_address(wdev))) {
+ /* Allow random TA to be used with Public Action frames if the
+ * driver has indicated support for this. Otherwise, only allow
+ * the local address to be used.
+ */
+ if (!ieee80211_is_action(mgmt->frame_control) ||
+ mgmt->u.action.category != WLAN_CATEGORY_PUBLIC)
+ return -EINVAL;
+ if (!wdev->current_bss &&
+ !wiphy_ext_feature_isset(
+ &rdev->wiphy,
+ NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA))
+ return -EINVAL;
+ if (wdev->current_bss &&
+ !wiphy_ext_feature_isset(
+ &rdev->wiphy,
+ NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA_CONNECTED))
+ return -EINVAL;
+ }
/* Transmit the Action frame as requested by user space */
return rdev_mgmt_tx(rdev, wdev, params, cookie);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 1f0de6d74daa..58b45366c42c 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -103,7 +103,7 @@ __cfg80211_wdev_from_attrs(struct net *netns, struct nlattr **attrs)
if (have_wdev_id && rdev->wiphy_idx != wiphy_idx)
continue;
- list_for_each_entry(wdev, &rdev->wdev_list, list) {
+ list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
if (have_ifidx && wdev->netdev &&
wdev->netdev->ifindex == ifidx) {
result = wdev;
@@ -149,7 +149,7 @@ __cfg80211_rdev_from_attrs(struct net *netns, struct nlattr **attrs)
tmp = cfg80211_rdev_by_wiphy_idx(wdev_id >> 32);
if (tmp) {
/* make sure wdev exists */
- list_for_each_entry(wdev, &tmp->wdev_list, list) {
+ list_for_each_entry(wdev, &tmp->wiphy.wdev_list, list) {
if (wdev->identifier != (u32)wdev_id)
continue;
found = true;
@@ -401,6 +401,14 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_NETNS_FD] = { .type = NLA_U32 },
[NL80211_ATTR_SCHED_SCAN_DELAY] = { .type = NLA_U32 },
[NL80211_ATTR_REG_INDOOR] = { .type = NLA_FLAG },
+ [NL80211_ATTR_PBSS] = { .type = NLA_FLAG },
+ [NL80211_ATTR_BSS_SELECT] = { .type = NLA_NESTED },
+ [NL80211_ATTR_BSSID] = { .len = ETH_ALEN },
+ [NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI] = { .type = NLA_S8 },
+ [NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST] = {
+ .len = sizeof(struct nl80211_bss_select_rssi_adjust)
+ },
+ [NL80211_ATTR_TIMEOUT_REASON] = { .type = NLA_U32 },
};
/* policy for the key attributes */
@@ -485,6 +493,15 @@ nl80211_plan_policy[NL80211_SCHED_SCAN_PLAN_MAX + 1] = {
[NL80211_SCHED_SCAN_PLAN_ITERATIONS] = { .type = NLA_U32 },
};
+static const struct nla_policy
+nl80211_bss_select_policy[NL80211_BSS_SELECT_ATTR_MAX + 1] = {
+ [NL80211_BSS_SELECT_ATTR_RSSI] = { .type = NLA_FLAG },
+ [NL80211_BSS_SELECT_ATTR_BAND_PREF] = { .type = NLA_U32 },
+ [NL80211_BSS_SELECT_ATTR_RSSI_ADJUST] = {
+ .len = sizeof(struct nl80211_bss_select_rssi_adjust)
+ },
+};
+
static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
struct netlink_callback *cb,
struct cfg80211_registered_device **rdev,
@@ -523,7 +540,7 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
*rdev = wiphy_to_rdev(wiphy);
*wdev = NULL;
- list_for_each_entry(tmp, &(*rdev)->wdev_list, list) {
+ list_for_each_entry(tmp, &(*rdev)->wiphy.wdev_list, list) {
if (tmp->identifier == cb->args[1]) {
*wdev = tmp;
break;
@@ -1002,6 +1019,10 @@ static int nl80211_put_iface_combinations(struct wiphy *wiphy,
nla_put_u32(msg, NL80211_IFACE_COMB_RADAR_DETECT_REGIONS,
c->radar_detect_regions)))
goto nla_put_failure;
+ if (c->beacon_int_min_gcd &&
+ nla_put_u32(msg, NL80211_IFACE_COMB_BI_MIN_GCD,
+ c->beacon_int_min_gcd))
+ goto nla_put_failure;
nla_nest_end(msg, nl_combi);
}
@@ -1252,7 +1273,7 @@ nl80211_send_mgmt_stypes(struct sk_buff *msg,
struct nl80211_dump_wiphy_state {
s64 filter_wiphy;
long start;
- long split_start, band_start, chan_start;
+ long split_start, band_start, chan_start, capa_start;
bool split;
};
@@ -1546,6 +1567,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev,
if (rdev->wiphy.features &
NL80211_FEATURE_SUPPORTS_WMM_ADMISSION)
CMD(add_tx_ts, ADD_TX_TS);
+ CMD(update_connect_params, UPDATE_CONNECT_PARAMS);
}
/* add into the if now */
#undef CMD
@@ -1730,6 +1752,66 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev,
rdev->wiphy.ext_features))
goto nla_put_failure;
+ state->split_start++;
+ break;
+ case 13:
+ if (rdev->wiphy.num_iftype_ext_capab &&
+ rdev->wiphy.iftype_ext_capab) {
+ struct nlattr *nested_ext_capab, *nested;
+
+ nested = nla_nest_start(msg,
+ NL80211_ATTR_IFTYPE_EXT_CAPA);
+ if (!nested)
+ goto nla_put_failure;
+
+ for (i = state->capa_start;
+ i < rdev->wiphy.num_iftype_ext_capab; i++) {
+ const struct wiphy_iftype_ext_capab *capab;
+
+ capab = &rdev->wiphy.iftype_ext_capab[i];
+
+ nested_ext_capab = nla_nest_start(msg, i);
+ if (!nested_ext_capab ||
+ nla_put_u32(msg, NL80211_ATTR_IFTYPE,
+ capab->iftype) ||
+ nla_put(msg, NL80211_ATTR_EXT_CAPA,
+ capab->extended_capabilities_len,
+ capab->extended_capabilities) ||
+ nla_put(msg, NL80211_ATTR_EXT_CAPA_MASK,
+ capab->extended_capabilities_len,
+ capab->extended_capabilities_mask))
+ goto nla_put_failure;
+
+ nla_nest_end(msg, nested_ext_capab);
+ if (state->split)
+ break;
+ }
+ nla_nest_end(msg, nested);
+ if (i < rdev->wiphy.num_iftype_ext_capab) {
+ state->capa_start = i + 1;
+ break;
+ }
+ }
+
+ if (rdev->wiphy.bss_select_support) {
+ struct nlattr *nested;
+ u32 bss_select_support = rdev->wiphy.bss_select_support;
+
+ nested = nla_nest_start(msg, NL80211_ATTR_BSS_SELECT);
+ if (!nested)
+ goto nla_put_failure;
+
+ i = 0;
+ while (bss_select_support) {
+ if ((bss_select_support & 1) &&
+ nla_put_flag(msg, i))
+ goto nla_put_failure;
+ i++;
+ bss_select_support >>= 1;
+ }
+ nla_nest_end(msg, nested);
+ }
+
/* done */
state->split_start = 0;
break;
@@ -2458,7 +2540,7 @@ static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback *
}
if_idx = 0;
- list_for_each_entry(wdev, &rdev->wdev_list, list) {
+ list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
if (if_idx < if_start) {
if_idx++;
continue;
@@ -2730,7 +2812,7 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
spin_lock_init(&wdev->mgmt_registrations_lock);
wdev->identifier = ++rdev->wdev_id;
- list_add_rcu(&wdev->list, &rdev->wdev_list);
+ list_add_rcu(&wdev->list, &rdev->wiphy.wdev_list);
rdev->devlist_generation++;
break;
default:
@@ -3203,6 +3285,291 @@ static int nl80211_set_mac_acl(struct sk_buff *skb, struct genl_info *info)
return err;
}
+static u32 rateset_to_mask(struct ieee80211_supported_band *sband,
+ u8 *rates, u8 rates_len)
+{
+ u8 i;
+ u32 mask = 0;
+
+ for (i = 0; i < rates_len; i++) {
+ int rate = (rates[i] & 0x7f) * 5;
+ int ridx;
+
+ for (ridx = 0; ridx < sband->n_bitrates; ridx++) {
+ struct ieee80211_rate *srate =
+ &sband->bitrates[ridx];
+ if (rate == srate->bitrate) {
+ mask |= 1 << ridx;
+ break;
+ }
+ }
+ if (ridx == sband->n_bitrates)
+ return 0; /* rate not found */
+ }
+
+ return mask;
+}
+
+static bool ht_rateset_to_mask(struct ieee80211_supported_band *sband,
+ u8 *rates, u8 rates_len,
+ u8 mcs[IEEE80211_HT_MCS_MASK_LEN])
+{
+ u8 i;
+
+ memset(mcs, 0, IEEE80211_HT_MCS_MASK_LEN);
+
+ for (i = 0; i < rates_len; i++) {
+ int ridx, rbit;
+
+ ridx = rates[i] / 8;
+ rbit = BIT(rates[i] % 8);
+
+ /* check validity */
+ if ((ridx < 0) || (ridx >= IEEE80211_HT_MCS_MASK_LEN))
+ return false;
+
+ /* check availability */
+ if (sband->ht_cap.mcs.rx_mask[ridx] & rbit)
+ mcs[ridx] |= rbit;
+ else
+ return false;
+ }
+
+ return true;
+}
+
+static u16 vht_mcs_map_to_mcs_mask(u8 vht_mcs_map)
+{
+ u16 mcs_mask = 0;
+
+ switch (vht_mcs_map) {
+ case IEEE80211_VHT_MCS_NOT_SUPPORTED:
+ break;
+ case IEEE80211_VHT_MCS_SUPPORT_0_7:
+ mcs_mask = 0x00FF;
+ break;
+ case IEEE80211_VHT_MCS_SUPPORT_0_8:
+ mcs_mask = 0x01FF;
+ break;
+ case IEEE80211_VHT_MCS_SUPPORT_0_9:
+ mcs_mask = 0x03FF;
+ break;
+ default:
+ break;
+ }
+
+ return mcs_mask;
+}
+
+static void vht_build_mcs_mask(u16 vht_mcs_map,
+ u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
+{
+ u8 nss;
+
+ for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) {
+ vht_mcs_mask[nss] = vht_mcs_map_to_mcs_mask(vht_mcs_map & 0x03);
+ vht_mcs_map >>= 2;
+ }
+}
+
+static bool vht_set_mcs_mask(struct ieee80211_supported_band *sband,
+ struct nl80211_txrate_vht *txrate,
+ u16 mcs[NL80211_VHT_NSS_MAX])
+{
+ u16 tx_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map);
+ u16 tx_mcs_mask[NL80211_VHT_NSS_MAX] = {};
+ u8 i;
+
+ if (!sband->vht_cap.vht_supported)
+ return false;
+
+ memset(mcs, 0, sizeof(u16) * NL80211_VHT_NSS_MAX);
+
+ /* Build vht_mcs_mask from VHT capabilities */
+ vht_build_mcs_mask(tx_mcs_map, tx_mcs_mask);
+
+ for (i = 0; i < NL80211_VHT_NSS_MAX; i++) {
+ if ((tx_mcs_mask[i] & txrate->mcs[i]) == txrate->mcs[i])
+ mcs[i] = txrate->mcs[i];
+ else
+ return false;
+ }
+
+ return true;
+}
+
+static const struct nla_policy nl80211_txattr_policy[NL80211_TXRATE_MAX + 1] = {
+ [NL80211_TXRATE_LEGACY] = { .type = NLA_BINARY,
+ .len = NL80211_MAX_SUPP_RATES },
+ [NL80211_TXRATE_HT] = { .type = NLA_BINARY,
+ .len = NL80211_MAX_SUPP_HT_RATES },
+ [NL80211_TXRATE_VHT] = { .len = sizeof(struct nl80211_txrate_vht)},
+ [NL80211_TXRATE_GI] = { .type = NLA_U8 },
+};
+
+static int nl80211_parse_tx_bitrate_mask(struct genl_info *info,
+ struct cfg80211_bitrate_mask *mask)
+{
+ struct nlattr *tb[NL80211_TXRATE_MAX + 1];
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ int rem, i;
+ struct nlattr *tx_rates;
+ struct ieee80211_supported_band *sband;
+ u16 vht_tx_mcs_map;
+
+ memset(mask, 0, sizeof(*mask));
+ /* Default to all rates enabled */
+ for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
+ sband = rdev->wiphy.bands[i];
+
+ if (!sband)
+ continue;
+
+ mask->control[i].legacy = (1 << sband->n_bitrates) - 1;
+ memcpy(mask->control[i].ht_mcs,
+ sband->ht_cap.mcs.rx_mask,
+ sizeof(mask->control[i].ht_mcs));
+
+ if (!sband->vht_cap.vht_supported)
+ continue;
+
+ vht_tx_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map);
+ vht_build_mcs_mask(vht_tx_mcs_map, mask->control[i].vht_mcs);
+ }
+
+ /* if no rates are given set it back to the defaults */
+ if (!info->attrs[NL80211_ATTR_TX_RATES])
+ goto out;
+
+ /* The nested attribute uses enum nl80211_band as the index. This maps
+ * directly to the enum nl80211_band values used in cfg80211.
+ */
+ BUILD_BUG_ON(NL80211_MAX_SUPP_HT_RATES > IEEE80211_HT_MCS_MASK_LEN * 8);
+ nla_for_each_nested(tx_rates, info->attrs[NL80211_ATTR_TX_RATES], rem) {
+ enum ieee80211_band band = nla_type(tx_rates);
+ int err;
+
+ if (band < 0 || band >= IEEE80211_NUM_BANDS)
+ return -EINVAL;
+ sband = rdev->wiphy.bands[band];
+ if (sband == NULL)
+ return -EINVAL;
+ err = nla_parse(tb, NL80211_TXRATE_MAX, nla_data(tx_rates),
+ nla_len(tx_rates), nl80211_txattr_policy);
+ if (err)
+ return err;
+ if (tb[NL80211_TXRATE_LEGACY]) {
+ mask->control[band].legacy = rateset_to_mask(
+ sband,
+ nla_data(tb[NL80211_TXRATE_LEGACY]),
+ nla_len(tb[NL80211_TXRATE_LEGACY]));
+ if ((mask->control[band].legacy == 0) &&
+ nla_len(tb[NL80211_TXRATE_LEGACY]))
+ return -EINVAL;
+ }
+ if (tb[NL80211_TXRATE_HT]) {
+ if (!ht_rateset_to_mask(
+ sband,
+ nla_data(tb[NL80211_TXRATE_HT]),
+ nla_len(tb[NL80211_TXRATE_HT]),
+ mask->control[band].ht_mcs))
+ return -EINVAL;
+ }
+ if (tb[NL80211_TXRATE_VHT]) {
+ if (!vht_set_mcs_mask(
+ sband,
+ nla_data(tb[NL80211_TXRATE_VHT]),
+ mask->control[band].vht_mcs))
+ return -EINVAL;
+ }
+ if (tb[NL80211_TXRATE_GI]) {
+ mask->control[band].gi =
+ nla_get_u8(tb[NL80211_TXRATE_GI]);
+ if (mask->control[band].gi > NL80211_TXRATE_FORCE_LGI)
+ return -EINVAL;
+ }
+
+ if (mask->control[band].legacy == 0) {
+ /* don't allow empty legacy rates if HT or VHT
+ * are not even supported.
+ */
+ if (!(rdev->wiphy.bands[band]->ht_cap.ht_supported ||
+ rdev->wiphy.bands[band]->vht_cap.vht_supported))
+ return -EINVAL;
+
+ for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
+ if (mask->control[band].ht_mcs[i])
+ goto out;
+
+ for (i = 0; i < NL80211_VHT_NSS_MAX; i++)
+ if (mask->control[band].vht_mcs[i])
+ goto out;
+
+ /* legacy and mcs rates may not be both empty */
+ return -EINVAL;
+ }
+ }
+
+out:
+ return 0;
+}
+
+static int validate_beacon_tx_rate(struct cfg80211_registered_device *rdev,
+ enum nl80211_band band,
+ struct cfg80211_bitrate_mask *beacon_rate)
+{
+ u32 count_ht, count_vht, i;
+ u32 rate = beacon_rate->control[band].legacy;
+
+ /* Allow only one rate */
+ if (hweight32(rate) > 1)
+ return -EINVAL;
+
+ count_ht = 0;
+ for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++) {
+ if (hweight8(beacon_rate->control[band].ht_mcs[i]) > 1) {
+ return -EINVAL;
+ } else if (beacon_rate->control[band].ht_mcs[i]) {
+ count_ht++;
+ if (count_ht > 1)
+ return -EINVAL;
+ }
+ if (count_ht && rate)
+ return -EINVAL;
+ }
+
+ count_vht = 0;
+ for (i = 0; i < NL80211_VHT_NSS_MAX; i++) {
+ if (hweight16(beacon_rate->control[band].vht_mcs[i]) > 1) {
+ return -EINVAL;
+ } else if (beacon_rate->control[band].vht_mcs[i]) {
+ count_vht++;
+ if (count_vht > 1)
+ return -EINVAL;
+ }
+ if (count_vht && rate)
+ return -EINVAL;
+ }
+
+ if ((count_ht && count_vht) || (!rate && !count_ht && !count_vht))
+ return -EINVAL;
+
+ if (rate &&
+ !wiphy_ext_feature_isset(&rdev->wiphy,
+ NL80211_EXT_FEATURE_BEACON_RATE_LEGACY))
+ return -EINVAL;
+ if (count_ht &&
+ !wiphy_ext_feature_isset(&rdev->wiphy,
+ NL80211_EXT_FEATURE_BEACON_RATE_HT))
+ return -EINVAL;
+ if (count_vht &&
+ !wiphy_ext_feature_isset(&rdev->wiphy,
+ NL80211_EXT_FEATURE_BEACON_RATE_VHT))
+ return -EINVAL;
+
+ return 0;
+}
+
static int nl80211_parse_beacon(struct nlattr *attrs[],
struct cfg80211_beacon_data *bcn)
{
@@ -3266,7 +3633,7 @@ static bool nl80211_get_ap_channel(struct cfg80211_registered_device *rdev,
struct wireless_dev *wdev;
bool ret = false;
- list_for_each_entry(wdev, &rdev->wdev_list, list) {
+ list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
if (wdev->iftype != NL80211_IFTYPE_AP &&
wdev->iftype != NL80211_IFTYPE_P2P_GO)
continue;
@@ -3341,7 +3708,8 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
params.dtim_period =
nla_get_u32(info->attrs[NL80211_ATTR_DTIM_PERIOD]);
- err = cfg80211_validate_beacon_int(rdev, params.beacon_interval);
+ err = cfg80211_validate_beacon_int(rdev, dev->ieee80211_ptr->iftype,
+ params.beacon_interval);
if (err)
return err;
@@ -3432,6 +3800,17 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
wdev->iftype))
return -EINVAL;
+ if (info->attrs[NL80211_ATTR_TX_RATES]) {
+ err = nl80211_parse_tx_bitrate_mask(info, &params.beacon_rate);
+ if (err)
+ return err;
+
+ err = validate_beacon_tx_rate(rdev, params.chandef.chan->band,
+ &params.beacon_rate);
+ if (err)
+ return err;
+ }
+
if (info->attrs[NL80211_ATTR_SMPS_MODE]) {
params.smps_mode =
nla_get_u8(info->attrs[NL80211_ATTR_SMPS_MODE]);
@@ -3455,6 +3834,10 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
params.smps_mode = NL80211_SMPS_OFF;
}
+ params.pbss = nla_get_flag(info->attrs[NL80211_ATTR_PBSS]);
+ if (params.pbss && !rdev->wiphy.bands[IEEE80211_BAND_60GHZ])
+ return -EOPNOTSUPP;
+
if (info->attrs[NL80211_ATTR_ACL_POLICY]) {
params.acl = parse_acl_data(&rdev->wiphy, info);
if (IS_ERR(params.acl))
@@ -5738,6 +6121,73 @@ static int validate_scan_freqs(struct nlattr *freqs)
return n_channels;
}
+static bool is_band_valid(struct wiphy *wiphy, enum ieee80211_band b)
+{
+ return b < IEEE80211_NUM_BANDS && wiphy->bands[b];
+}
+
+static int parse_bss_select(struct nlattr *nla, struct wiphy *wiphy,
+ struct cfg80211_bss_selection *bss_select)
+{
+ struct nlattr *attr[NL80211_BSS_SELECT_ATTR_MAX + 1];
+ struct nlattr *nest;
+ int err;
+ bool found = false;
+ int i;
+
+ /* only process one nested attribute */
+ nest = nla_data(nla);
+ if (!nla_ok(nest, nla_len(nest)))
+ return -EINVAL;
+
+ err = nla_parse(attr, NL80211_BSS_SELECT_ATTR_MAX, nla_data(nest),
+ nla_len(nest), nl80211_bss_select_policy);
+ if (err)
+ return err;
+
+ /* only one attribute may be given */
+ for (i = 0; i <= NL80211_BSS_SELECT_ATTR_MAX; i++) {
+ if (attr[i]) {
+ if (found)
+ return -EINVAL;
+ found = true;
+ }
+ }
+
+ bss_select->behaviour = __NL80211_BSS_SELECT_ATTR_INVALID;
+
+ if (attr[NL80211_BSS_SELECT_ATTR_RSSI])
+ bss_select->behaviour = NL80211_BSS_SELECT_ATTR_RSSI;
+
+ if (attr[NL80211_BSS_SELECT_ATTR_BAND_PREF]) {
+ bss_select->behaviour = NL80211_BSS_SELECT_ATTR_BAND_PREF;
+ bss_select->param.band_pref =
+ nla_get_u32(attr[NL80211_BSS_SELECT_ATTR_BAND_PREF]);
+ if (!is_band_valid(wiphy, bss_select->param.band_pref))
+ return -EINVAL;
+ }
+
+ if (attr[NL80211_BSS_SELECT_ATTR_RSSI_ADJUST]) {
+ struct nl80211_bss_select_rssi_adjust *adj_param;
+
+ adj_param = nla_data(attr[NL80211_BSS_SELECT_ATTR_RSSI_ADJUST]);
+ bss_select->behaviour = NL80211_BSS_SELECT_ATTR_RSSI_ADJUST;
+ bss_select->param.adjust.band = adj_param->band;
+ bss_select->param.adjust.delta = adj_param->delta;
+ if (!is_band_valid(wiphy, bss_select->param.adjust.band))
+ return -EINVAL;
+ }
+
+ /* user-space did not provide behaviour attribute */
+ if (bss_select->behaviour == __NL80211_BSS_SELECT_ATTR_INVALID)
+ return -EINVAL;
+
+ if (!(wiphy->bss_select_support & BIT(bss_select->behaviour)))
+ return -EINVAL;
+
+ return 0;
+}
+
static int nl80211_parse_random_mac(struct nlattr **attrs,
u8 *mac_addr, u8 *mac_addr_mask)
{
@@ -5976,6 +6426,25 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
request->no_cck =
nla_get_flag(info->attrs[NL80211_ATTR_TX_NO_CCK_RATE]);
+ /* Initial implementation used NL80211_ATTR_MAC to set the specific
+ * BSSID to scan for. This was problematic because that same attribute
+ * was already used for another purpose (local random MAC address). The
+ * NL80211_ATTR_BSSID attribute was added to fix this. For backwards
+ * compatibility with older userspace components, also use the
+ * NL80211_ATTR_MAC value here if it can be determined to be used for
+ * the specific BSSID use case instead of the random MAC address
+ * (NL80211_ATTR_SCAN_FLAGS is used to enable random MAC address use).
+ */
+ if (info->attrs[NL80211_ATTR_BSSID])
+ memcpy(request->bssid,
+ nla_data(info->attrs[NL80211_ATTR_BSSID]), ETH_ALEN);
+ else if (!(request->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) &&
+ info->attrs[NL80211_ATTR_MAC])
+ memcpy(request->bssid, nla_data(info->attrs[NL80211_ATTR_MAC]),
+ ETH_ALEN);
+ else
+ eth_broadcast_addr(request->bssid);
+
request->wdev = wdev;
request->wiphy = &rdev->wiphy;
request->scan_start = jiffies;
@@ -6189,6 +6658,12 @@ nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev,
if (!n_plans || n_plans > wiphy->max_sched_scan_plans)
return ERR_PTR(-EINVAL);
+ if (!wiphy_ext_feature_isset(
+ wiphy, NL80211_EXT_FEATURE_SCHED_SCAN_RELATIVE_RSSI) &&
+ (attrs[NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI] ||
+ attrs[NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST]))
+ return ERR_PTR(-EINVAL);
+
request = kzalloc(sizeof(*request)
+ sizeof(*request->ssids) * n_ssids
+ sizeof(*request->match_sets) * n_match_sets
@@ -6394,6 +6869,26 @@ nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev,
request->delay =
nla_get_u32(attrs[NL80211_ATTR_SCHED_SCAN_DELAY]);
+ if (attrs[NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI]) {
+ request->relative_rssi = nla_get_s8(
+ attrs[NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI]);
+ request->relative_rssi_set = true;
+ }
+
+ if (request->relative_rssi_set &&
+ attrs[NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST]) {
+ struct nl80211_bss_select_rssi_adjust *rssi_adjust;
+
+ rssi_adjust = nla_data(
+ attrs[NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST]);
+ request->rssi_adjust.band = rssi_adjust->band;
+ request->rssi_adjust.delta = rssi_adjust->delta;
+ if (!is_band_valid(wiphy, request->rssi_adjust.band)) {
+ err = -EINVAL;
+ goto out_free;
+ }
+ }
+
err = nl80211_parse_sched_scan_plans(wiphy, n_plans, request, attrs);
if (err)
goto out_free;
@@ -6407,6 +6902,24 @@ out_free:
return ERR_PTR(err);
}
+static int nl80211_abort_scan(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ struct wireless_dev *wdev = info->user_ptr[1];
+
+ if (!rdev->ops->abort_scan)
+ return -EOPNOTSUPP;
+
+ if (rdev->scan_msg)
+ return 0;
+
+ if (!rdev->scan_req)
+ return -ENOENT;
+
+ rdev_abort_scan(rdev, wdev);
+ return 0;
+}
+
static int nl80211_start_sched_scan(struct sk_buff *skb,
struct genl_info *info)
{
@@ -6483,6 +6996,9 @@ static int nl80211_start_radar_detection(struct sk_buff *skb,
if (err)
return err;
+ if (rdev->wiphy.flags & WIPHY_FLAG_DFS_OFFLOAD)
+ return -EOPNOTSUPP;
+
if (netif_carrier_ok(dev))
return -EBUSY;
@@ -7409,12 +7925,14 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
ibss.beacon_interval = 100;
- if (info->attrs[NL80211_ATTR_BEACON_INTERVAL]) {
+ if (info->attrs[NL80211_ATTR_BEACON_INTERVAL])
ibss.beacon_interval =
nla_get_u32(info->attrs[NL80211_ATTR_BEACON_INTERVAL]);
- if (ibss.beacon_interval < 1 || ibss.beacon_interval > 10000)
- return -EINVAL;
- }
+
+ err = cfg80211_validate_beacon_int(rdev, NL80211_IFTYPE_ADHOC,
+ ibss.beacon_interval);
+ if (err)
+ return err;
if (!rdev->ops->join_ibss)
return -EOPNOTSUPP;
@@ -7883,6 +8401,10 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info)
connect.mfp = NL80211_MFP_NO;
}
+ if (info->attrs[NL80211_ATTR_PREV_BSSID])
+ connect.prev_bssid =
+ nla_data(info->attrs[NL80211_ATTR_PREV_BSSID]);
+
if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) {
connect.channel = nl80211_get_valid_chan(
wiphy, info->attrs[NL80211_ATTR_WIPHY_FREQ]);
@@ -7948,6 +8470,27 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info)
connect.flags |= ASSOC_REQ_USE_RRM;
}
+ connect.pbss = nla_get_flag(info->attrs[NL80211_ATTR_PBSS]);
+ if (connect.pbss && !rdev->wiphy.bands[IEEE80211_BAND_60GHZ]) {
+ kzfree(connkeys);
+ return -EOPNOTSUPP;
+ }
+
+ if (info->attrs[NL80211_ATTR_BSS_SELECT]) {
+ /* bss selection makes no sense if bssid is set */
+ if (connect.bssid) {
+ kzfree(connkeys);
+ return -EINVAL;
+ }
+
+ err = parse_bss_select(info->attrs[NL80211_ATTR_BSS_SELECT],
+ wiphy, &connect.bss_select);
+ if (err) {
+ kzfree(connkeys);
+ return err;
+ }
+ }
+
wdev_lock(dev->ieee80211_ptr);
err = cfg80211_connect(rdev, dev, &connect, connkeys, NULL);
wdev_unlock(dev->ieee80211_ptr);
@@ -7956,6 +8499,37 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info)
return err;
}
+static int nl80211_update_connect_params(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct cfg80211_connect_params connect = {};
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ struct net_device *dev = info->user_ptr[1];
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ u32 changed = 0;
+ int ret;
+
+ if (!rdev->ops->update_connect_params)
+ return -EOPNOTSUPP;
+
+ if (info->attrs[NL80211_ATTR_IE]) {
+ if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
+ return -EINVAL;
+ connect.ie = nla_data(info->attrs[NL80211_ATTR_IE]);
+ connect.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
+ changed |= UPDATE_ASSOC_IES;
+ }
+
+ wdev_lock(dev->ieee80211_ptr);
+ if (!wdev->current_bss)
+ ret = -ENOLINK;
+ else
+ ret = rdev_update_connect_params(rdev, dev, &connect, changed);
+ wdev_unlock(dev->ieee80211_ptr);
+
+ return ret;
+}
+
static int nl80211_disconnect(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -8209,237 +8783,21 @@ static int nl80211_cancel_remain_on_channel(struct sk_buff *skb,
return rdev_cancel_remain_on_channel(rdev, wdev, cookie);
}
-static u32 rateset_to_mask(struct ieee80211_supported_band *sband,
- u8 *rates, u8 rates_len)
-{
- u8 i;
- u32 mask = 0;
-
- for (i = 0; i < rates_len; i++) {
- int rate = (rates[i] & 0x7f) * 5;
- int ridx;
- for (ridx = 0; ridx < sband->n_bitrates; ridx++) {
- struct ieee80211_rate *srate =
- &sband->bitrates[ridx];
- if (rate == srate->bitrate) {
- mask |= 1 << ridx;
- break;
- }
- }
- if (ridx == sband->n_bitrates)
- return 0; /* rate not found */
- }
-
- return mask;
-}
-
-static bool ht_rateset_to_mask(struct ieee80211_supported_band *sband,
- u8 *rates, u8 rates_len,
- u8 mcs[IEEE80211_HT_MCS_MASK_LEN])
-{
- u8 i;
-
- memset(mcs, 0, IEEE80211_HT_MCS_MASK_LEN);
-
- for (i = 0; i < rates_len; i++) {
- int ridx, rbit;
-
- ridx = rates[i] / 8;
- rbit = BIT(rates[i] % 8);
-
- /* check validity */
- if ((ridx < 0) || (ridx >= IEEE80211_HT_MCS_MASK_LEN))
- return false;
-
- /* check availability */
- if (sband->ht_cap.mcs.rx_mask[ridx] & rbit)
- mcs[ridx] |= rbit;
- else
- return false;
- }
-
- return true;
-}
-
-static u16 vht_mcs_map_to_mcs_mask(u8 vht_mcs_map)
-{
- u16 mcs_mask = 0;
-
- switch (vht_mcs_map) {
- case IEEE80211_VHT_MCS_NOT_SUPPORTED:
- break;
- case IEEE80211_VHT_MCS_SUPPORT_0_7:
- mcs_mask = 0x00FF;
- break;
- case IEEE80211_VHT_MCS_SUPPORT_0_8:
- mcs_mask = 0x01FF;
- break;
- case IEEE80211_VHT_MCS_SUPPORT_0_9:
- mcs_mask = 0x03FF;
- break;
- default:
- break;
- }
-
- return mcs_mask;
-}
-
-static void vht_build_mcs_mask(u16 vht_mcs_map,
- u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
-{
- u8 nss;
-
- for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) {
- vht_mcs_mask[nss] = vht_mcs_map_to_mcs_mask(vht_mcs_map & 0x03);
- vht_mcs_map >>= 2;
- }
-}
-
-static bool vht_set_mcs_mask(struct ieee80211_supported_band *sband,
- struct nl80211_txrate_vht *txrate,
- u16 mcs[NL80211_VHT_NSS_MAX])
-{
- u16 tx_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map);
- u16 tx_mcs_mask[NL80211_VHT_NSS_MAX] = {};
- u8 i;
-
- if (!sband->vht_cap.vht_supported)
- return false;
-
- memset(mcs, 0, sizeof(u16) * NL80211_VHT_NSS_MAX);
-
- /* Build vht_mcs_mask from VHT capabilities */
- vht_build_mcs_mask(tx_mcs_map, tx_mcs_mask);
-
- for (i = 0; i < NL80211_VHT_NSS_MAX; i++) {
- if ((tx_mcs_mask[i] & txrate->mcs[i]) == txrate->mcs[i])
- mcs[i] = txrate->mcs[i];
- else
- return false;
- }
-
- return true;
-}
-
-static const struct nla_policy nl80211_txattr_policy[NL80211_TXRATE_MAX + 1] = {
- [NL80211_TXRATE_LEGACY] = { .type = NLA_BINARY,
- .len = NL80211_MAX_SUPP_RATES },
- [NL80211_TXRATE_HT] = { .type = NLA_BINARY,
- .len = NL80211_MAX_SUPP_HT_RATES },
- [NL80211_TXRATE_VHT] = { .len = sizeof(struct nl80211_txrate_vht)},
- [NL80211_TXRATE_GI] = { .type = NLA_U8 },
-};
-
static int nl80211_set_tx_bitrate_mask(struct sk_buff *skb,
struct genl_info *info)
{
- struct nlattr *tb[NL80211_TXRATE_MAX + 1];
- struct cfg80211_registered_device *rdev = info->user_ptr[0];
struct cfg80211_bitrate_mask mask;
- int rem, i;
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
struct net_device *dev = info->user_ptr[1];
- struct nlattr *tx_rates;
- struct ieee80211_supported_band *sband;
- u16 vht_tx_mcs_map;
+ int err;
if (!rdev->ops->set_bitrate_mask)
return -EOPNOTSUPP;
- memset(&mask, 0, sizeof(mask));
- /* Default to all rates enabled */
- for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
- sband = rdev->wiphy.bands[i];
-
- if (!sband)
- continue;
-
- mask.control[i].legacy = (1 << sband->n_bitrates) - 1;
- memcpy(mask.control[i].ht_mcs,
- sband->ht_cap.mcs.rx_mask,
- sizeof(mask.control[i].ht_mcs));
-
- if (!sband->vht_cap.vht_supported)
- continue;
-
- vht_tx_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map);
- vht_build_mcs_mask(vht_tx_mcs_map, mask.control[i].vht_mcs);
- }
-
- /* if no rates are given set it back to the defaults */
- if (!info->attrs[NL80211_ATTR_TX_RATES])
- goto out;
-
- /*
- * The nested attribute uses enum nl80211_band as the index. This maps
- * directly to the enum ieee80211_band values used in cfg80211.
- */
- BUILD_BUG_ON(NL80211_MAX_SUPP_HT_RATES > IEEE80211_HT_MCS_MASK_LEN * 8);
- nla_for_each_nested(tx_rates, info->attrs[NL80211_ATTR_TX_RATES], rem) {
- enum ieee80211_band band = nla_type(tx_rates);
- int err;
-
- if (band < 0 || band >= IEEE80211_NUM_BANDS)
- return -EINVAL;
- sband = rdev->wiphy.bands[band];
- if (sband == NULL)
- return -EINVAL;
- err = nla_parse(tb, NL80211_TXRATE_MAX, nla_data(tx_rates),
- nla_len(tx_rates), nl80211_txattr_policy);
- if (err)
- return err;
- if (tb[NL80211_TXRATE_LEGACY]) {
- mask.control[band].legacy = rateset_to_mask(
- sband,
- nla_data(tb[NL80211_TXRATE_LEGACY]),
- nla_len(tb[NL80211_TXRATE_LEGACY]));
- if ((mask.control[band].legacy == 0) &&
- nla_len(tb[NL80211_TXRATE_LEGACY]))
- return -EINVAL;
- }
- if (tb[NL80211_TXRATE_HT]) {
- if (!ht_rateset_to_mask(
- sband,
- nla_data(tb[NL80211_TXRATE_HT]),
- nla_len(tb[NL80211_TXRATE_HT]),
- mask.control[band].ht_mcs))
- return -EINVAL;
- }
- if (tb[NL80211_TXRATE_VHT]) {
- if (!vht_set_mcs_mask(
- sband,
- nla_data(tb[NL80211_TXRATE_VHT]),
- mask.control[band].vht_mcs))
- return -EINVAL;
- }
- if (tb[NL80211_TXRATE_GI]) {
- mask.control[band].gi =
- nla_get_u8(tb[NL80211_TXRATE_GI]);
- if (mask.control[band].gi > NL80211_TXRATE_FORCE_LGI)
- return -EINVAL;
- }
-
- if (mask.control[band].legacy == 0) {
- /* don't allow empty legacy rates if HT or VHT
- * are not even supported.
- */
- if (!(rdev->wiphy.bands[band]->ht_cap.ht_supported ||
- rdev->wiphy.bands[band]->vht_cap.vht_supported))
- return -EINVAL;
-
- for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
- if (mask.control[band].ht_mcs[i])
- goto out;
-
- for (i = 0; i < NL80211_VHT_NSS_MAX; i++)
- if (mask.control[band].vht_mcs[i])
- goto out;
-
- /* legacy and mcs rates may not be both empty */
- return -EINVAL;
- }
- }
+ err = nl80211_parse_tx_bitrate_mask(info, &mask);
+ if (err)
+ return err;
-out:
return rdev_set_bitrate_mask(rdev, dev, NULL, &mask);
}
@@ -8858,9 +9216,12 @@ static int nl80211_join_mesh(struct sk_buff *skb, struct genl_info *info)
if (info->attrs[NL80211_ATTR_BEACON_INTERVAL]) {
setup.beacon_interval =
nla_get_u32(info->attrs[NL80211_ATTR_BEACON_INTERVAL]);
- if (setup.beacon_interval < 10 ||
- setup.beacon_interval > 10000)
- return -EINVAL;
+
+ err = cfg80211_validate_beacon_int(rdev,
+ NL80211_IFTYPE_MESH_POINT,
+ setup.beacon_interval);
+ if (err)
+ return err;
}
if (info->attrs[NL80211_ATTR_DTIM_PERIOD]) {
@@ -8906,6 +9267,17 @@ static int nl80211_join_mesh(struct sk_buff *skb, struct genl_info *info)
return err;
}
+ if (info->attrs[NL80211_ATTR_TX_RATES] && setup.chandef.chan != NULL) {
+ err = nl80211_parse_tx_bitrate_mask(info, &setup.beacon_rate);
+ if (err)
+ return err;
+
+ err = validate_beacon_tx_rate(rdev, setup.chandef.chan->band,
+ &setup.beacon_rate);
+ if (err)
+ return err;
+ }
+
return cfg80211_join_mesh(rdev, dev, &setup, &cfg);
}
@@ -9015,6 +9387,20 @@ static int nl80211_send_wowlan_nd(struct sk_buff *msg,
if (nla_put_u32(msg, NL80211_ATTR_SCHED_SCAN_DELAY, req->delay))
return -ENOBUFS;
+ if (req->relative_rssi_set) {
+ struct nl80211_bss_select_rssi_adjust rssi_adjust;
+
+ if (nla_put_s8(msg, NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI,
+ req->relative_rssi))
+ return -ENOBUFS;
+
+ rssi_adjust.band = req->rssi_adjust.band;
+ rssi_adjust.delta = req->rssi_adjust.delta;
+ if (nla_put(msg, NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST,
+ sizeof(rssi_adjust), &rssi_adjust))
+ return -ENOBUFS;
+ }
+
freqs = nla_nest_start(msg, NL80211_ATTR_SCAN_FREQUENCIES);
if (!freqs)
return -ENOBUFS;
@@ -10173,7 +10559,7 @@ static int nl80211_prepare_vendor_dump(struct sk_buff *skb,
*wdev = NULL;
if (cb->args[1]) {
- list_for_each_entry(tmp, &(*rdev)->wdev_list, list) {
+ list_for_each_entry(tmp, &wiphy->wdev_list, list) {
if (tmp->identifier == cb->args[1] - 1) {
*wdev = tmp;
break;
@@ -10949,6 +11335,14 @@ static const struct genl_ops nl80211_ops[] = {
NL80211_FLAG_NEED_RTNL,
},
{
+ .cmd = NL80211_CMD_ABORT_SCAN,
+ .doit = nl80211_abort_scan,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL80211_FLAG_NEED_WDEV_UP |
+ NL80211_FLAG_NEED_RTNL,
+ },
+ {
.cmd = NL80211_CMD_GET_SCAN,
.policy = nl80211_policy,
.dumpit = nl80211_dump_scan,
@@ -11038,6 +11432,14 @@ static const struct genl_ops nl80211_ops[] = {
NL80211_FLAG_NEED_RTNL,
},
{
+ .cmd = NL80211_CMD_UPDATE_CONNECT_PARAMS,
+ .doit = nl80211_update_connect_params,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
+ NL80211_FLAG_NEED_RTNL,
+ },
+ {
.cmd = NL80211_CMD_DISCONNECT,
.doit = nl80211_disconnect,
.policy = nl80211_policy,
@@ -11863,7 +12265,9 @@ void nl80211_send_connect_result(struct cfg80211_registered_device *rdev,
struct net_device *netdev, const u8 *bssid,
const u8 *req_ie, size_t req_ie_len,
const u8 *resp_ie, size_t resp_ie_len,
- u16 status, gfp_t gfp)
+ int status,
+ enum nl80211_timeout_reason timeout_reason,
+ gfp_t gfp)
{
struct sk_buff *msg;
void *hdr;
@@ -11881,7 +12285,12 @@ void nl80211_send_connect_result(struct cfg80211_registered_device *rdev,
if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
(bssid && nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid)) ||
- nla_put_u16(msg, NL80211_ATTR_STATUS_CODE, status) ||
+ nla_put_u16(msg, NL80211_ATTR_STATUS_CODE,
+ status < 0 ? WLAN_STATUS_UNSPECIFIED_FAILURE :
+ status) ||
+ (status < 0 &&
+ (nla_put_flag(msg, NL80211_ATTR_TIMED_OUT) ||
+ nla_put_u32(msg, NL80211_ATTR_TIMEOUT_REASON, timeout_reason))) ||
(req_ie &&
nla_put(msg, NL80211_ATTR_REQ_IE, req_ie_len, req_ie)) ||
(resp_ie &&
@@ -13180,7 +13589,7 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
schedule_work(&rdev->sched_scan_stop_wk);
}
- list_for_each_entry_rcu(wdev, &rdev->wdev_list, list) {
+ list_for_each_entry_rcu(wdev, &rdev->wiphy.wdev_list, list) {
cfg80211_mlme_unregister_socket(wdev, notify->portid);
if (wdev->owner_nlportid == notify->portid)
@@ -13338,6 +13747,16 @@ void nl80211_send_ap_stopped(struct wireless_dev *wdev)
nlmsg_free(msg);
}
+void cfg80211_ap_stopped(struct net_device *netdev, gfp_t gfp)
+{
+ struct wireless_dev *wdev = netdev->ieee80211_ptr;
+ struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
+
+ nl80211_send_mlme_event(rdev, netdev, NULL, 0,
+ NL80211_CMD_STOP_AP, gfp, -1);
+}
+EXPORT_SYMBOL(cfg80211_ap_stopped);
+
/* initialisation/exit functions */
int nl80211_init(void)
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index 84d4edf1d545..a749c9be2836 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -55,7 +55,9 @@ void nl80211_send_connect_result(struct cfg80211_registered_device *rdev,
struct net_device *netdev, const u8 *bssid,
const u8 *req_ie, size_t req_ie_len,
const u8 *resp_ie, size_t resp_ie_len,
- u16 status, gfp_t gfp);
+ int status,
+ enum nl80211_timeout_reason timeout_reason,
+ gfp_t gfp);
void nl80211_send_roamed(struct cfg80211_registered_device *rdev,
struct net_device *netdev, const u8 *bssid,
const u8 *req_ie, size_t req_ie_len,
diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h
index c23516d0f807..6bde2241bffa 100644
--- a/net/wireless/rdev-ops.h
+++ b/net/wireless/rdev-ops.h
@@ -427,6 +427,14 @@ static inline int rdev_scan(struct cfg80211_registered_device *rdev,
return ret;
}
+static inline void rdev_abort_scan(struct cfg80211_registered_device *rdev,
+ struct wireless_dev *wdev)
+{
+ trace_rdev_abort_scan(&rdev->wiphy, wdev);
+ rdev->ops->abort_scan(&rdev->wiphy, wdev);
+ trace_rdev_return_void(&rdev->wiphy);
+}
+
static inline int rdev_auth(struct cfg80211_registered_device *rdev,
struct net_device *dev,
struct cfg80211_auth_request *req)
@@ -482,6 +490,18 @@ static inline int rdev_connect(struct cfg80211_registered_device *rdev,
return ret;
}
+static inline int
+rdev_update_connect_params(struct cfg80211_registered_device *rdev,
+ struct net_device *dev,
+ struct cfg80211_connect_params *sme, u32 changed)
+{
+ int ret;
+ trace_rdev_update_connect_params(&rdev->wiphy, dev, sme, changed);
+ ret = rdev->ops->update_connect_params(&rdev->wiphy, dev, sme, changed);
+ trace_rdev_return_int(&rdev->wiphy, ret);
+ return ret;
+}
+
static inline int rdev_disconnect(struct cfg80211_registered_device *rdev,
struct net_device *dev, u16 reason_code)
{
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 06d050da0d94..050d7948dd68 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -425,6 +425,11 @@ static bool is_user_regdom_saved(void)
return true;
}
+static bool is_cfg80211_regdom_intersected(void)
+{
+ return is_intersected_alpha2(get_cfg80211_regdom()->alpha2);
+}
+
static const struct ieee80211_regdomain *
reg_copy_regd(const struct ieee80211_regdomain *src_regd)
{
@@ -1676,12 +1681,48 @@ static void reg_leave_invalid_chans(struct wiphy *wiphy)
{
struct wireless_dev *wdev;
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
-
+ struct net_device *dev;
+ struct cfg80211_sched_scan_request *sched_scan_req;
ASSERT_RTNL();
- list_for_each_entry(wdev, &rdev->wdev_list, list)
- if (!reg_wdev_chan_valid(wiphy, wdev))
- cfg80211_leave(rdev, wdev);
+ list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list)
+ if (!reg_wdev_chan_valid(wiphy, wdev)) {
+ dev = wdev->netdev;
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_ADHOC:
+ cfg80211_leave_ibss(rdev, dev, true);
+ break;
+ case NL80211_IFTYPE_P2P_CLIENT:
+ case NL80211_IFTYPE_STATION:
+ ASSERT_RTNL();
+ sched_scan_req = rtnl_dereference(rdev->sched_scan_req);
+ if (sched_scan_req && dev == sched_scan_req->dev)
+ __cfg80211_stop_sched_scan(rdev, false);
+
+ wdev_lock(wdev);
+#ifdef CONFIG_CFG80211_WEXT
+ kfree(wdev->wext.ie);
+ wdev->wext.ie = NULL;
+ wdev->wext.ie_len = 0;
+ wdev->wext.connect.auth_type =
+ NL80211_AUTHTYPE_AUTOMATIC;
+#endif
+ cfg80211_disconnect(rdev, dev,
+ WLAN_REASON_DEAUTH_LEAVING, true);
+ cfg80211_mlme_down(rdev, dev);
+ wdev_unlock(wdev);
+ break;
+ case NL80211_IFTYPE_MESH_POINT:
+ cfg80211_leave_mesh(rdev, dev);
+ break;
+ case NL80211_IFTYPE_AP:
+ cfg80211_stop_ap(rdev, dev, false);
+ break;
+ default:
+ break;
+ }
+ wdev->beacon_interval = 0;
+ }
}
static void reg_check_chans_work(struct work_struct *work)
@@ -1941,9 +1982,14 @@ __reg_process_hint_user(struct regulatory_request *user_request)
*/
if ((lr->initiator == NL80211_REGDOM_SET_BY_CORE ||
lr->initiator == NL80211_REGDOM_SET_BY_DRIVER ||
- lr->initiator == NL80211_REGDOM_SET_BY_USER) &&
- regdom_changes(lr->alpha2))
- return REG_REQ_IGNORE;
+ lr->initiator == NL80211_REGDOM_SET_BY_USER)) {
+ if (lr->intersect) {
+ if (!is_cfg80211_regdom_intersected())
+ return REG_REQ_IGNORE;
+ } else if (regdom_changes(lr->alpha2)) {
+ return REG_REQ_IGNORE;
+ }
+ }
if (!regdom_changes(user_request->alpha2))
return REG_REQ_ALREADY_SET;
@@ -2399,6 +2445,7 @@ int regulatory_hint_user(const char *alpha2,
return 0;
}
+EXPORT_SYMBOL(regulatory_hint_user);
int regulatory_hint_indoor(bool is_indoor, u32 portid)
{
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 6e7b86ca2abd..5b4906ad1b81 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -1362,6 +1362,8 @@ int cfg80211_wext_siwscan(struct net_device *dev,
if (wiphy->bands[i])
creq->rates[i] = (1 << wiphy->bands[i]->n_bitrates) - 1;
+ eth_broadcast_addr(creq->bssid);
+
rdev->scan_req = creq;
err = rdev_scan(rdev, creq);
if (err) {
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index 18b4a652cf41..fe8a9062de98 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -34,10 +34,11 @@ struct cfg80211_conn {
CFG80211_CONN_SCAN_AGAIN,
CFG80211_CONN_AUTHENTICATE_NEXT,
CFG80211_CONN_AUTHENTICATING,
- CFG80211_CONN_AUTH_FAILED,
+ CFG80211_CONN_AUTH_FAILED_TIMEOUT,
CFG80211_CONN_ASSOCIATE_NEXT,
CFG80211_CONN_ASSOCIATING,
CFG80211_CONN_ASSOC_FAILED,
+ CFG80211_CONN_ASSOC_FAILED_TIMEOUT,
CFG80211_CONN_DEAUTH,
CFG80211_CONN_ABANDON,
CFG80211_CONN_CONNECTED,
@@ -48,6 +49,29 @@ struct cfg80211_conn {
bool auto_auth, prev_bssid_valid;
};
+static bool cfg80211_is_all_countryie_ignore(void)
+{
+ struct cfg80211_registered_device *rdev;
+ struct wireless_dev *wdev;
+ bool is_all_countryie_ignore = true;
+
+ list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
+ list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
+ wdev_lock(wdev);
+ if (!(wdev->wiphy->regulatory_flags &
+ REGULATORY_COUNTRY_IE_IGNORE)) {
+ is_all_countryie_ignore = false;
+ wdev_unlock(wdev);
+ goto out;
+ }
+ wdev_unlock(wdev);
+ }
+ }
+
+out:
+ return is_all_countryie_ignore;
+}
+
static void cfg80211_sme_free(struct wireless_dev *wdev)
{
if (!wdev->conn)
@@ -120,6 +144,8 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev)
wdev->conn->params.ssid_len);
request->ssids[0].ssid_len = wdev->conn->params.ssid_len;
+ eth_broadcast_addr(request->bssid);
+
request->wdev = wdev;
request->wiphy = &rdev->wiphy;
request->scan_start = jiffies;
@@ -138,7 +164,8 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev)
return err;
}
-static int cfg80211_conn_do_work(struct wireless_dev *wdev)
+static int cfg80211_conn_do_work(struct wireless_dev *wdev,
+ enum nl80211_timeout_reason *treason)
{
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
struct cfg80211_connect_params *params;
@@ -169,7 +196,8 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev)
NULL, 0,
params->key, params->key_len,
params->key_idx, NULL, 0);
- case CFG80211_CONN_AUTH_FAILED:
+ case CFG80211_CONN_AUTH_FAILED_TIMEOUT:
+ *treason = NL80211_TIMEOUT_AUTH;
return -ENOTCONN;
case CFG80211_CONN_ASSOCIATE_NEXT:
if (WARN_ON(!rdev->ops->assoc))
@@ -196,6 +224,9 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev)
WLAN_REASON_DEAUTH_LEAVING,
false);
return err;
+ case CFG80211_CONN_ASSOC_FAILED_TIMEOUT:
+ *treason = NL80211_TIMEOUT_ASSOC;
+ /* fall through */
case CFG80211_CONN_ASSOC_FAILED:
cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid,
NULL, 0,
@@ -221,10 +252,11 @@ void cfg80211_conn_work(struct work_struct *work)
container_of(work, struct cfg80211_registered_device, conn_work);
struct wireless_dev *wdev;
u8 bssid_buf[ETH_ALEN], *bssid = NULL;
+ enum nl80211_timeout_reason treason;
rtnl_lock();
- list_for_each_entry(wdev, &rdev->wdev_list, list) {
+ list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
if (!wdev->netdev)
continue;
@@ -242,12 +274,12 @@ void cfg80211_conn_work(struct work_struct *work)
memcpy(bssid_buf, wdev->conn->params.bssid, ETH_ALEN);
bssid = bssid_buf;
}
- if (cfg80211_conn_do_work(wdev)) {
+ treason = NL80211_TIMEOUT_UNSPECIFIED;
+ if (cfg80211_conn_do_work(wdev, &treason)) {
__cfg80211_connect_result(
wdev->netdev, bssid,
- NULL, 0, NULL, 0,
- WLAN_STATUS_UNSPECIFIED_FAILURE,
- false, NULL);
+ NULL, 0, NULL, 0, -1, false, NULL,
+ treason);
}
wdev_unlock(wdev);
}
@@ -267,7 +299,7 @@ static struct cfg80211_bss *cfg80211_get_conn_bss(struct wireless_dev *wdev)
wdev->conn->params.bssid,
wdev->conn->params.ssid,
wdev->conn->params.ssid_len,
- IEEE80211_BSS_TYPE_ESS,
+ wdev->conn_bss_type,
IEEE80211_PRIVACY(wdev->conn->params.privacy));
if (!bss)
return NULL;
@@ -352,7 +384,8 @@ void cfg80211_sme_rx_auth(struct wireless_dev *wdev, const u8 *buf, size_t len)
} else if (status_code != WLAN_STATUS_SUCCESS) {
__cfg80211_connect_result(wdev->netdev, mgmt->bssid,
NULL, 0, NULL, 0,
- status_code, false, NULL);
+ status_code, false, NULL,
+ NL80211_TIMEOUT_UNSPECIFIED);
} else if (wdev->conn->state == CFG80211_CONN_AUTHENTICATING) {
wdev->conn->state = CFG80211_CONN_ASSOCIATE_NEXT;
schedule_work(&rdev->conn_work);
@@ -400,7 +433,7 @@ void cfg80211_sme_auth_timeout(struct wireless_dev *wdev)
if (!wdev->conn)
return;
- wdev->conn->state = CFG80211_CONN_AUTH_FAILED;
+ wdev->conn->state = CFG80211_CONN_AUTH_FAILED_TIMEOUT;
schedule_work(&rdev->conn_work);
}
@@ -422,7 +455,7 @@ void cfg80211_sme_assoc_timeout(struct wireless_dev *wdev)
if (!wdev->conn)
return;
- wdev->conn->state = CFG80211_CONN_ASSOC_FAILED;
+ wdev->conn->state = CFG80211_CONN_ASSOC_FAILED_TIMEOUT;
schedule_work(&rdev->conn_work);
}
@@ -554,7 +587,9 @@ static int cfg80211_sme_connect(struct wireless_dev *wdev,
/* we're good if we have a matching bss struct */
if (bss) {
- err = cfg80211_conn_do_work(wdev);
+ enum nl80211_timeout_reason treason;
+
+ err = cfg80211_conn_do_work(wdev, &treason);
cfg80211_put_bss(wdev->wiphy, bss);
} else {
/* otherwise we'll need to scan for the AP first */
@@ -619,7 +654,7 @@ static bool cfg80211_is_all_idle(void)
* count as new regulatory hints.
*/
list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
- list_for_each_entry(wdev, &rdev->wdev_list, list) {
+ list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
wdev_lock(wdev);
if (wdev->conn || wdev->current_bss)
is_all_idle = false;
@@ -633,7 +668,8 @@ static bool cfg80211_is_all_idle(void)
static void disconnect_work(struct work_struct *work)
{
rtnl_lock();
- if (cfg80211_is_all_idle())
+ if (cfg80211_is_all_idle() &&
+ !cfg80211_is_all_countryie_ignore())
regulatory_hint_disconnect();
rtnl_unlock();
}
@@ -650,8 +686,9 @@ static DECLARE_WORK(cfg80211_disconnect_work, disconnect_work);
void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
const u8 *req_ie, size_t req_ie_len,
const u8 *resp_ie, size_t resp_ie_len,
- u16 status, bool wextev,
- struct cfg80211_bss *bss)
+ int status, bool wextev,
+ struct cfg80211_bss *bss,
+ enum nl80211_timeout_reason timeout_reason)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
const u8 *country_ie;
@@ -670,7 +707,7 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
nl80211_send_connect_result(wiphy_to_rdev(wdev->wiphy), dev,
bssid, req_ie, req_ie_len,
resp_ie, resp_ie_len,
- status, GFP_KERNEL);
+ status, timeout_reason, GFP_KERNEL);
#ifdef CONFIG_CFG80211_WEXT
if (wextev) {
@@ -701,7 +738,7 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
WARN_ON_ONCE(!wiphy_to_rdev(wdev->wiphy)->ops->connect);
bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid,
wdev->ssid, wdev->ssid_len,
- IEEE80211_BSS_TYPE_ESS,
+ wdev->conn_bss_type,
IEEE80211_PRIVACY_ANY);
if (bss)
cfg80211_hold_bss(bss_from_pub(bss));
@@ -755,19 +792,33 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
kfree(country_ie);
}
-void cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
- const u8 *req_ie, size_t req_ie_len,
- const u8 *resp_ie, size_t resp_ie_len,
- u16 status, gfp_t gfp)
+/* Consumes bss object one way or another */
+void cfg80211_connect_bss(struct net_device *dev, const u8 *bssid,
+ struct cfg80211_bss *bss, const u8 *req_ie,
+ size_t req_ie_len, const u8 *resp_ie,
+ size_t resp_ie_len, int status, gfp_t gfp,
+ enum nl80211_timeout_reason timeout_reason)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
struct cfg80211_event *ev;
unsigned long flags;
+ if (bss) {
+ /* Make sure the bss entry provided by the driver is valid. */
+ struct cfg80211_internal_bss *ibss = bss_from_pub(bss);
+
+ if (WARN_ON(list_empty(&ibss->list))) {
+ cfg80211_put_bss(wdev->wiphy, bss);
+ return;
+ }
+ }
+
ev = kzalloc(sizeof(*ev) + req_ie_len + resp_ie_len, gfp);
- if (!ev)
+ if (!ev) {
+ cfg80211_put_bss(wdev->wiphy, bss);
return;
+ }
ev->type = EVENT_CONNECT_RESULT;
if (bssid)
@@ -782,14 +833,18 @@ void cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
ev->cr.resp_ie_len = resp_ie_len;
memcpy((void *)ev->cr.resp_ie, resp_ie, resp_ie_len);
}
+ if (bss)
+ cfg80211_hold_bss(bss_from_pub(bss));
+ ev->cr.bss = bss;
ev->cr.status = status;
+ ev->cr.timeout_reason = timeout_reason;
spin_lock_irqsave(&wdev->event_lock, flags);
list_add_tail(&ev->list, &wdev->event_list);
spin_unlock_irqrestore(&wdev->event_lock, flags);
queue_work(cfg80211_wq, &rdev->event_work);
}
-EXPORT_SYMBOL(cfg80211_connect_result);
+EXPORT_SYMBOL(cfg80211_connect_bss);
/* Consumes bss object one way or another */
void __cfg80211_roamed(struct wireless_dev *wdev,
@@ -860,7 +915,7 @@ void cfg80211_roamed(struct net_device *dev,
bss = cfg80211_get_bss(wdev->wiphy, channel, bssid, wdev->ssid,
wdev->ssid_len,
- IEEE80211_BSS_TYPE_ESS, IEEE80211_PRIVACY_ANY);
+ wdev->conn_bss_type, IEEE80211_PRIVACY_ANY);
if (WARN_ON(!bss))
return;
@@ -1031,6 +1086,9 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev,
memcpy(wdev->ssid, connect->ssid, connect->ssid_len);
wdev->ssid_len = connect->ssid_len;
+ wdev->conn_bss_type = connect->pbss ? IEEE80211_BSS_TYPE_PBSS :
+ IEEE80211_BSS_TYPE_ESS;
+
if (!rdev->ops->connect)
err = cfg80211_sme_connect(wdev, connect, prev_bssid);
else
diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
index 9cee0220665d..460c4b0e343c 100644
--- a/net/wireless/sysfs.c
+++ b/net/wireless/sysfs.c
@@ -87,14 +87,6 @@ static int wiphy_uevent(struct device *dev, struct kobj_uevent_env *env)
}
#ifdef CONFIG_PM_SLEEP
-static void cfg80211_leave_all(struct cfg80211_registered_device *rdev)
-{
- struct wireless_dev *wdev;
-
- list_for_each_entry(wdev, &rdev->wdev_list, list)
- cfg80211_leave(rdev, wdev);
-}
-
static int wiphy_suspend(struct device *dev)
{
struct cfg80211_registered_device *rdev = dev_to_rdev(dev);
@@ -103,17 +95,9 @@ static int wiphy_suspend(struct device *dev)
rdev->suspend_at = get_seconds();
rtnl_lock();
- if (rdev->wiphy.registered) {
- if (!rdev->wiphy.wowlan_config)
- cfg80211_leave_all(rdev);
+ if (rdev->wiphy.registered)
if (rdev->ops->suspend)
- ret = rdev_suspend(rdev, rdev->wiphy.wowlan_config);
- if (ret == 1) {
- /* Driver refuse to configure wowlan */
- cfg80211_leave_all(rdev);
ret = rdev_suspend(rdev, NULL);
- }
- }
rtnl_unlock();
return ret;
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index 0c392d36781b..b7bf5ba63555 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -1221,6 +1221,7 @@ TRACE_EVENT(rdev_connect,
__field(bool, privacy)
__field(u32, wpa_versions)
__field(u32, flags)
+ MAC_ENTRY(prev_bssid)
),
TP_fast_assign(
WIPHY_ASSIGN;
@@ -1232,13 +1233,32 @@ TRACE_EVENT(rdev_connect,
__entry->privacy = sme->privacy;
__entry->wpa_versions = sme->crypto.wpa_versions;
__entry->flags = sme->flags;
+ MAC_ASSIGN(prev_bssid, sme->prev_bssid);
),
TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", bssid: " MAC_PR_FMT
", ssid: %s, auth type: %d, privacy: %s, wpa versions: %u, "
- "flags: %u",
+ "flags: %u, previous bssid: " MAC_PR_FMT,
WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(bssid), __entry->ssid,
__entry->auth_type, BOOL_TO_STR(__entry->privacy),
- __entry->wpa_versions, __entry->flags)
+ __entry->wpa_versions, __entry->flags, MAC_PR_ARG(prev_bssid))
+);
+
+TRACE_EVENT(rdev_update_connect_params,
+ TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+ struct cfg80211_connect_params *sme, u32 changed),
+ TP_ARGS(wiphy, netdev, sme, changed),
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ NETDEV_ENTRY
+ __field(u32, changed)
+ ),
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ NETDEV_ASSIGN;
+ __entry->changed = changed;
+ ),
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", parameters changed: %u",
+ WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->changed)
);
TRACE_EVENT(rdev_set_cqm_rssi_config,
@@ -2803,6 +2823,11 @@ TRACE_EVENT(cfg80211_ft_event,
WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(target_ap))
);
+DEFINE_EVENT(wiphy_wdev_evt, rdev_abort_scan,
+ TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev),
+ TP_ARGS(wiphy, wdev)
+);
+
TRACE_EVENT(cfg80211_stop_iface,
TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev),
TP_ARGS(wiphy, wdev),
diff --git a/net/wireless/util.c b/net/wireless/util.c
index baf7218cec15..305370cfd1e0 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -13,6 +13,7 @@
#include <net/dsfield.h>
#include <linux/if_vlan.h>
#include <linux/mpls.h>
+#include <linux/gcd.h>
#include "core.h"
#include "rdev-ops.h"
@@ -877,7 +878,7 @@ void cfg80211_process_wdev_events(struct wireless_dev *wdev)
ev->cr.resp_ie, ev->cr.resp_ie_len,
ev->cr.status,
ev->cr.status == WLAN_STATUS_SUCCESS,
- NULL);
+ ev->cr.bss, ev->cr.timeout_reason);
break;
case EVENT_ROAMED:
__cfg80211_roamed(wdev, ev->rm.bss, ev->rm.req_ie,
@@ -913,7 +914,7 @@ void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev)
ASSERT_RTNL();
- list_for_each_entry(wdev, &rdev->wdev_list, list)
+ list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list)
cfg80211_process_wdev_events(wdev);
}
@@ -1082,7 +1083,7 @@ static u32 cfg80211_calculate_bitrate_vht(struct rate_info *rate)
58500000,
65000000,
78000000,
- 0,
+ 86500000,
},
{ 13500000,
27000000,
@@ -1485,31 +1486,57 @@ bool ieee80211_chandef_to_operating_class(struct cfg80211_chan_def *chandef,
}
EXPORT_SYMBOL(ieee80211_chandef_to_operating_class);
-int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev,
- u32 beacon_int)
+static void cfg80211_calculate_bi_data(struct wiphy *wiphy, u32 new_beacon_int,
+ u32 *beacon_int_gcd,
+ bool *beacon_int_different)
{
struct wireless_dev *wdev;
- int res = 0;
- if (!beacon_int)
- return -EINVAL;
+ *beacon_int_gcd = 0;
+ *beacon_int_different = false;
- list_for_each_entry(wdev, &rdev->wdev_list, list) {
+ list_for_each_entry(wdev, &wiphy->wdev_list, list) {
if (!wdev->beacon_interval)
continue;
- if (wdev->beacon_interval != beacon_int) {
- res = -EINVAL;
- break;
+
+ if (!*beacon_int_gcd) {
+ *beacon_int_gcd = wdev->beacon_interval;
+ continue;
}
+
+ if (wdev->beacon_interval == *beacon_int_gcd)
+ continue;
+
+ *beacon_int_different = true;
+ *beacon_int_gcd = gcd(*beacon_int_gcd, wdev->beacon_interval);
}
- return res;
+ if (new_beacon_int && *beacon_int_gcd != new_beacon_int) {
+ if (*beacon_int_gcd)
+ *beacon_int_different = true;
+ *beacon_int_gcd = gcd(*beacon_int_gcd, new_beacon_int);
+ }
+}
+
+int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev,
+ enum nl80211_iftype iftype, u32 beacon_int)
+{
+ /*
+ * This is just a basic pre-condition check; if interface combinations
+ * are possible the driver must already be checking those with a call
+ * to cfg80211_check_combinations(), in which case we'll validate more
+ * through the cfg80211_calculate_bi_data() call and code in
+ * cfg80211_iter_combinations().
+ */
+
+ if (beacon_int < 10 || beacon_int > 10000)
+ return -EINVAL;
+
+ return 0;
}
int cfg80211_iter_combinations(struct wiphy *wiphy,
- const int num_different_channels,
- const u8 radar_detect,
- const int iftype_num[NUM_NL80211_IFTYPES],
+ struct iface_combination_params *params,
void (*iter)(const struct ieee80211_iface_combination *c,
void *data),
void *data)
@@ -1519,8 +1546,23 @@ int cfg80211_iter_combinations(struct wiphy *wiphy,
int i, j, iftype;
int num_interfaces = 0;
u32 used_iftypes = 0;
+ u32 beacon_int_gcd;
+ bool beacon_int_different;
- if (radar_detect) {
+ /*
+ * This is a bit strange, since the iteration used to rely only on
+ * the data given by the driver, but here it now relies on context,
+ * in form of the currently operating interfaces.
+ * This is OK for all current users, and saves us from having to
+ * push the GCD calculations into all the drivers.
+ * In the future, this should probably rely more on data that's in
+ * cfg80211 already - the only thing not would appear to be any new
+ * interfaces (while being brought up) and channel/radar data.
+ */
+ cfg80211_calculate_bi_data(wiphy, params->new_beacon_int,
+ &beacon_int_gcd, &beacon_int_different);
+
+ if (params->radar_detect) {
rcu_read_lock();
regdom = rcu_dereference(cfg80211_regdomain);
if (regdom)
@@ -1529,8 +1571,8 @@ int cfg80211_iter_combinations(struct wiphy *wiphy,
}
for (iftype = 0; iftype < NUM_NL80211_IFTYPES; iftype++) {
- num_interfaces += iftype_num[iftype];
- if (iftype_num[iftype] > 0 &&
+ num_interfaces += params->iftype_num[iftype];
+ if (params->iftype_num[iftype] > 0 &&
!(wiphy->software_iftypes & BIT(iftype)))
used_iftypes |= BIT(iftype);
}
@@ -1544,7 +1586,7 @@ int cfg80211_iter_combinations(struct wiphy *wiphy,
if (num_interfaces > c->max_interfaces)
continue;
- if (num_different_channels > c->num_different_channels)
+ if (params->num_different_channels > c->num_different_channels)
continue;
limits = kmemdup(c->limits, sizeof(limits[0]) * c->n_limits,
@@ -1559,16 +1601,17 @@ int cfg80211_iter_combinations(struct wiphy *wiphy,
all_iftypes |= limits[j].types;
if (!(limits[j].types & BIT(iftype)))
continue;
- if (limits[j].max < iftype_num[iftype])
+ if (limits[j].max < params->iftype_num[iftype])
goto cont;
- limits[j].max -= iftype_num[iftype];
+ limits[j].max -= params->iftype_num[iftype];
}
}
- if (radar_detect != (c->radar_detect_widths & radar_detect))
+ if (params->radar_detect !=
+ (c->radar_detect_widths & params->radar_detect))
goto cont;
- if (radar_detect && c->radar_detect_regions &&
+ if (params->radar_detect && c->radar_detect_regions &&
!(c->radar_detect_regions & BIT(region)))
goto cont;
@@ -1580,6 +1623,14 @@ int cfg80211_iter_combinations(struct wiphy *wiphy,
if ((all_iftypes & used_iftypes) != used_iftypes)
goto cont;
+ if (beacon_int_gcd) {
+ if (c->beacon_int_min_gcd &&
+ beacon_int_gcd < c->beacon_int_min_gcd)
+ goto cont;
+ if (!c->beacon_int_min_gcd && beacon_int_different)
+ goto cont;
+ }
+
/* This combination covered all interface types and
* supported the requested numbers, so we're good.
*/
@@ -1602,14 +1653,11 @@ cfg80211_iter_sum_ifcombs(const struct ieee80211_iface_combination *c,
}
int cfg80211_check_combinations(struct wiphy *wiphy,
- const int num_different_channels,
- const u8 radar_detect,
- const int iftype_num[NUM_NL80211_IFTYPES])
+ struct iface_combination_params *params)
{
int err, num = 0;
- err = cfg80211_iter_combinations(wiphy, num_different_channels,
- radar_detect, iftype_num,
+ err = cfg80211_iter_combinations(wiphy, params,
cfg80211_iter_sum_ifcombs, &num);
if (err)
return err;
@@ -1628,14 +1676,15 @@ int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev,
u8 radar_detect)
{
struct wireless_dev *wdev_iter;
- int num[NUM_NL80211_IFTYPES];
struct ieee80211_channel
*used_channels[CFG80211_MAX_NUM_DIFFERENT_CHANNELS];
struct ieee80211_channel *ch;
enum cfg80211_chan_mode chmode;
- int num_different_channels = 0;
int total = 1;
int i;
+ struct iface_combination_params params = {
+ .radar_detect = radar_detect,
+ };
ASSERT_RTNL();
@@ -1652,10 +1701,9 @@ int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev,
return 0;
}
- memset(num, 0, sizeof(num));
memset(used_channels, 0, sizeof(used_channels));
- num[iftype] = 1;
+ params.iftype_num[iftype] = 1;
/* TODO: We'll probably not need this anymore, since this
* should only be called with CHAN_MODE_UNDEFINED. There are
@@ -1668,14 +1716,14 @@ int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev,
case CHAN_MODE_SHARED:
WARN_ON(!chan);
used_channels[0] = chan;
- num_different_channels++;
+ params.num_different_channels++;
break;
case CHAN_MODE_EXCLUSIVE:
- num_different_channels++;
+ params.num_different_channels++;
break;
}
- list_for_each_entry(wdev_iter, &rdev->wdev_list, list) {
+ list_for_each_entry(wdev_iter, &rdev->wiphy.wdev_list, list) {
if (wdev_iter == wdev)
continue;
if (wdev_iter->iftype == NL80211_IFTYPE_P2P_DEVICE) {
@@ -1699,7 +1747,8 @@ int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev,
*/
mutex_lock_nested(&wdev_iter->mtx, 1);
__acquire(wdev_iter->mtx);
- cfg80211_get_chan_state(wdev_iter, &ch, &chmode, &radar_detect);
+ cfg80211_get_chan_state(wdev_iter, &ch, &chmode,
+ &params.radar_detect);
wdev_unlock(wdev_iter);
switch (chmode) {
@@ -1715,23 +1764,22 @@ int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev,
if (used_channels[i] == NULL) {
used_channels[i] = ch;
- num_different_channels++;
+ params.num_different_channels++;
}
break;
case CHAN_MODE_EXCLUSIVE:
- num_different_channels++;
+ params.num_different_channels++;
break;
}
- num[wdev_iter->iftype]++;
+ params.iftype_num[wdev_iter->iftype]++;
total++;
}
- if (total == 1 && !radar_detect)
+ if (total == 1 && !params.radar_detect)
return 0;
- return cfg80211_check_combinations(&rdev->wiphy, num_different_channels,
- radar_detect, num);
+ return cfg80211_check_combinations(&rdev->wiphy, &params);
}
int ieee80211_get_ratemask(struct ieee80211_supported_band *sband,
@@ -1813,3 +1861,54 @@ EXPORT_SYMBOL(rfc1042_header);
const unsigned char bridge_tunnel_header[] __aligned(2) =
{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
EXPORT_SYMBOL(bridge_tunnel_header);
+
+bool cfg80211_is_gratuitous_arp_unsolicited_na(struct sk_buff *skb)
+{
+ const struct ethhdr *eth = (void *)skb->data;
+ const struct {
+ struct arphdr hdr;
+ u8 ar_sha[ETH_ALEN];
+ u8 ar_sip[4];
+ u8 ar_tha[ETH_ALEN];
+ u8 ar_tip[4];
+ } __packed *arp;
+ const struct ipv6hdr *ipv6;
+ const struct icmp6hdr *icmpv6;
+
+ switch (eth->h_proto) {
+ case cpu_to_be16(ETH_P_ARP):
+ /* can't say - but will probably be dropped later anyway */
+ if (!pskb_may_pull(skb, sizeof(*eth) + sizeof(*arp)))
+ return false;
+
+ arp = (void *)(eth + 1);
+
+ if ((arp->hdr.ar_op == cpu_to_be16(ARPOP_REPLY) ||
+ arp->hdr.ar_op == cpu_to_be16(ARPOP_REQUEST)) &&
+ !memcmp(arp->ar_sip, arp->ar_tip, sizeof(arp->ar_sip)))
+ return true;
+ break;
+ case cpu_to_be16(ETH_P_IPV6):
+ /* can't say - but will probably be dropped later anyway */
+ if (!pskb_may_pull(skb, sizeof(*eth) + sizeof(*ipv6) +
+ sizeof(*icmpv6)))
+ return false;
+
+ ipv6 = (void *)(eth + 1);
+ icmpv6 = (void *)(ipv6 + 1);
+
+ if (icmpv6->icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT &&
+ !memcmp(&ipv6->saddr, &ipv6->daddr, sizeof(ipv6->saddr)))
+ return true;
+ break;
+ default:
+ /*
+ * no need to support other protocols, proxy service isn't
+ * specified for any others
+ */
+ break;
+ }
+
+ return false;
+}
+EXPORT_SYMBOL(cfg80211_is_gratuitous_arp_unsolicited_na);
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index b5e665b3cfb0..cf0193b74ae3 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -115,8 +115,7 @@ static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo)
rcu_read_unlock();
}
-static inline struct dst_entry *__xfrm_dst_lookup(struct net *net,
- int tos, int oif,
+static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos,
const xfrm_address_t *saddr,
const xfrm_address_t *daddr,
int family)
@@ -128,15 +127,14 @@ static inline struct dst_entry *__xfrm_dst_lookup(struct net *net,
if (unlikely(afinfo == NULL))
return ERR_PTR(-EAFNOSUPPORT);
- dst = afinfo->dst_lookup(net, tos, oif, saddr, daddr);
+ dst = afinfo->dst_lookup(net, tos, saddr, daddr);
xfrm_policy_put_afinfo(afinfo);
return dst;
}
-static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x,
- int tos, int oif,
+static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x, int tos,
xfrm_address_t *prev_saddr,
xfrm_address_t *prev_daddr,
int family)
@@ -155,7 +153,7 @@ static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x,
daddr = x->coaddr;
}
- dst = __xfrm_dst_lookup(net, tos, oif, saddr, daddr, family);
+ dst = __xfrm_dst_lookup(net, tos, saddr, daddr, family);
if (!IS_ERR(dst)) {
if (prev_saddr != saddr)
@@ -1395,15 +1393,15 @@ int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk)
}
static int
-xfrm_get_saddr(struct net *net, int oif, xfrm_address_t *local,
- xfrm_address_t *remote, unsigned short family)
+xfrm_get_saddr(struct net *net, xfrm_address_t *local, xfrm_address_t *remote,
+ unsigned short family)
{
int err;
struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
if (unlikely(afinfo == NULL))
return -EINVAL;
- err = afinfo->get_saddr(net, oif, local, remote);
+ err = afinfo->get_saddr(net, local, remote);
xfrm_policy_put_afinfo(afinfo);
return err;
}
@@ -1432,8 +1430,7 @@ xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl,
remote = &tmpl->id.daddr;
local = &tmpl->saddr;
if (xfrm_addr_any(local, tmpl->encap_family)) {
- error = xfrm_get_saddr(net, fl->flowi_oif,
- &tmp, remote,
+ error = xfrm_get_saddr(net, &tmp, remote,
tmpl->encap_family);
if (error)
goto fail;
@@ -1712,8 +1709,8 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
family = xfrm[i]->props.family;
- dst = xfrm_dst_lookup(xfrm[i], tos, fl->flowi_oif,
- &saddr, &daddr, family);
+ dst = xfrm_dst_lookup(xfrm[i], tos, &saddr, &daddr,
+ family);
err = PTR_ERR(dst);
if (IS_ERR(dst))
goto put_states;