diff options
author | Harout Hedeshian <harouth@codeaurora.org> | 2013-08-09 09:10:08 -0600 |
---|---|---|
committer | David Keitel <dkeitel@codeaurora.org> | 2016-03-22 11:04:54 -0700 |
commit | 658fd52bad949b556ffc4013df6779adc06b237b (patch) | |
tree | e1247498f98847bc0f92d8344f6ee0dd652999d7 /net/rmnet_data | |
parent | 50de92cd6d2b57305664dd7e6488619b7290859f (diff) |
RmNet Data: Initial release
RmNet Data driver provides a transport agnostic MAP (multiplexing and
aggregation protocol) support in embedded and bridge modes. Module
provides virtual network devices which can be attached to any IP-mode
physical device. This will be used to provide all MAP functionality
on future hardware in a single consistent location.
CRs-Fixed: 525675
Change-Id: I739947c9c3de008974dd485a74e9953ba2cbb75e
Signed-off-by: Harout Hedeshian <harouth@codeaurora.org>
[subashab@codeaurora.org: resolve trivial merge conflicts]
Signed-off-by: Subash Abhinov Kasiviswanathan <subashab@codeaurora.org>
Diffstat (limited to 'net/rmnet_data')
-rw-r--r-- | net/rmnet_data/Kconfig | 29 | ||||
-rw-r--r-- | net/rmnet_data/Makefile | 11 | ||||
-rw-r--r-- | net/rmnet_data/rmnet_data_config.c | 673 | ||||
-rw-r--r-- | net/rmnet_data/rmnet_data_config.h | 69 | ||||
-rw-r--r-- | net/rmnet_data/rmnet_data_handlers.c | 511 | ||||
-rw-r--r-- | net/rmnet_data/rmnet_data_handlers.h | 25 | ||||
-rw-r--r-- | net/rmnet_data/rmnet_data_main.c | 54 | ||||
-rw-r--r-- | net/rmnet_data/rmnet_data_private.h | 57 | ||||
-rw-r--r-- | net/rmnet_data/rmnet_data_vnd.c | 529 | ||||
-rw-r--r-- | net/rmnet_data/rmnet_data_vnd.h | 33 | ||||
-rw-r--r-- | net/rmnet_data/rmnet_map.h | 152 | ||||
-rw-r--r-- | net/rmnet_data/rmnet_map_command.c | 89 | ||||
-rw-r--r-- | net/rmnet_data/rmnet_map_data.c | 256 |
13 files changed, 2488 insertions, 0 deletions
diff --git a/net/rmnet_data/Kconfig b/net/rmnet_data/Kconfig new file mode 100644 index 000000000000..36d581771cb9 --- /dev/null +++ b/net/rmnet_data/Kconfig @@ -0,0 +1,29 @@ +# +# RMNET Data and MAP driver +# + +menuconfig RMNET_DATA + depends on NETDEVICES + bool "RmNet Data and MAP driver" + ---help--- + If you say Y here, then the rmnet_data module will be statically + compiled into the kernel. The rmnet data module provides MAP + functionality for embedded and bridged traffic. +if RMNET_DATA + +config RMNET_DATA_FC + bool "RmNet Data Flow Control" + depends on NET_SCHED && NET_SCH_PRIO + ---help--- + Say Y here if you want RmNet data to handle in-band flow control and + ioctl based flow control. This depends on net scheduler and prio queue + capability being present in the kernel. In-band flow control requires + MAP protocol be used. +config RMNET_DATA_DEBUG_PKT + bool "Packet Debug Logging" + ---help--- + Say Y here if you want RmNet data to be able to log packets in main + system log. This should not be enabled on production builds as it can + impact system performance. Note that simply enabling it here will not + enable the logging; it must be enabled at run-time as well. +endif # RMNET_DATA diff --git a/net/rmnet_data/Makefile b/net/rmnet_data/Makefile new file mode 100644 index 000000000000..5841495a1711 --- /dev/null +++ b/net/rmnet_data/Makefile @@ -0,0 +1,11 @@ +# +# Makefile for the RMNET Data module +# + +rmnet_data-y := rmnet_data_main.o +rmnet_data-y += rmnet_data_config.o +rmnet_data-y += rmnet_data_vnd.o +rmnet_data-y += rmnet_data_handlers.o +rmnet_data-y += rmnet_map_data.o +rmnet_data-y += rmnet_map_command.o +obj-$(CONFIG_RMNET_DATA) += rmnet_data.o diff --git a/net/rmnet_data/rmnet_data_config.c b/net/rmnet_data/rmnet_data_config.c new file mode 100644 index 000000000000..caa6cfb516c7 --- /dev/null +++ b/net/rmnet_data/rmnet_data_config.c @@ -0,0 +1,673 @@ +/* + * Copyright (c) 2013, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * RMNET Data configuration engine + * + */ + +#include <net/sock.h> +#include <linux/module.h> +#include <linux/netlink.h> +#include <linux/netdevice.h> +#include <linux/skbuff.h> +#include <linux/spinlock.h> +#include <linux/rmnet_data.h> +#include "rmnet_data_config.h" +#include "rmnet_data_handlers.h" +#include "rmnet_data_vnd.h" +#include "rmnet_data_private.h" + +/* ***************** Local Definitions and Declarations ********************* */ +static struct sock *nl_socket_handle; + +#ifndef RMNET_KERNEL_PRE_3_8 +static struct netlink_kernel_cfg rmnet_netlink_cfg = { + .input = rmnet_config_netlink_msg_handler +}; +#endif + +#define RMNET_NL_MSG_SIZE(Y) (sizeof(((struct rmnet_nl_msg_s *)0)->Y)) + +/* ***************** Init and Cleanup *************************************** */ + +#ifdef RMNET_KERNEL_PRE_3_8 +static struct sock *_rmnet_config_start_netlink(void) +{ + return netlink_kernel_create(&init_net, + RMNET_NETLINK_PROTO, + 0, + rmnet_config_netlink_msg_handler, + NULL, + THIS_MODULE); +} +#else +static struct sock *_rmnet_config_start_netlink(void) +{ + return netlink_kernel_create(&init_net, + RMNET_NETLINK_PROTO, + &rmnet_netlink_cfg); +} +#endif /* RMNET_KERNEL_PRE_3_8 */ + +/** + * rmnet_config_init() - Startup init + * + * Registers netlink protocol with kernel and opens socket. Netlink handler is + * registered with kernel. + */ +int rmnet_config_init(void) +{ + nl_socket_handle = _rmnet_config_start_netlink(); + if (!nl_socket_handle) { + LOGE("%s(): Failed to init netlink socket", __func__); + return RMNET_INIT_ERROR; + } + return 0; +} + +/** + * rmnet_config_exit() - Cleans up all netlink related resources + */ +void rmnet_config_exit(void) +{ + netlink_kernel_release(nl_socket_handle); +} + +/* ***************** Helper Functions *************************************** */ + +/** + * _rmnet_is_physical_endpoint_associated() - Determines if device is associated + * @dev: Device to get check + * + * Compares device rx_handler callback pointer against known funtion + * + * Return: + * - 1 if associated + * - 0 if NOT associated + */ +static inline int _rmnet_is_physical_endpoint_associated(struct net_device *dev) +{ + rx_handler_func_t *rx_handler; + rx_handler = rcu_dereference(dev->rx_handler); + + if (rx_handler == rmnet_rx_handler) + return 1; + else + return 0; +} + +/** + * _rmnet_get_phys_ep_config() - Get physical ep config for an associated device + * @dev: Device to get endpoint configuration from + * + * Return: + * - pointer to configuration if successful + * - 0 (null) if device is not associated + */ +static inline struct rmnet_phys_ep_conf_s *_rmnet_get_phys_ep_config + (struct net_device *dev) +{ + if (_rmnet_is_physical_endpoint_associated(dev)) + return (struct rmnet_phys_ep_conf_s *) + rcu_dereference(dev->rx_handler_data); + else + return 0; +} + +/* ***************** Netlink Handler **************************************** */ +#define _RMNET_NETLINK_NULL_CHECKS() do { if (!rmnet_header || !resp_rmnet) \ + BUG(); \ + } while (0) + +static void _rmnet_netlink_set_link_egress_data_format + (struct rmnet_nl_msg_s *rmnet_header, + struct rmnet_nl_msg_s *resp_rmnet) +{ + struct net_device *dev; + _RMNET_NETLINK_NULL_CHECKS(); + + resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE; + dev = dev_get_by_name(&init_net, rmnet_header->data_format.dev); + + if (!dev) { + resp_rmnet->return_code = RMNET_CONFIG_NO_SUCH_DEVICE; + return; + } + + resp_rmnet->return_code = + rmnet_set_egress_data_format(dev, + rmnet_header->data_format.flags, + rmnet_header->data_format.agg_size, + rmnet_header->data_format.agg_count + ); +} + +static void _rmnet_netlink_set_link_ingress_data_format + (struct rmnet_nl_msg_s *rmnet_header, + struct rmnet_nl_msg_s *resp_rmnet) +{ + struct net_device *dev; + _RMNET_NETLINK_NULL_CHECKS(); + + resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE; + + dev = dev_get_by_name(&init_net, rmnet_header->data_format.dev); + if (!dev) { + resp_rmnet->return_code = RMNET_CONFIG_NO_SUCH_DEVICE; + return; + } + + resp_rmnet->return_code = + rmnet_set_ingress_data_format(dev, + rmnet_header->data_format.flags); +} + +static void _rmnet_netlink_set_logical_ep_config + (struct rmnet_nl_msg_s *rmnet_header, + struct rmnet_nl_msg_s *resp_rmnet) +{ + struct net_device *dev, *dev2; + _RMNET_NETLINK_NULL_CHECKS(); + + resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE; + if (rmnet_header->local_ep_config.ep_id < -1 + || rmnet_header->local_ep_config.ep_id > 254) { + resp_rmnet->return_code = RMNET_CONFIG_BAD_ARGUMENTS; + return; + } + + dev = dev_get_by_name(&init_net, + rmnet_header->local_ep_config.dev); + + dev2 = dev_get_by_name(&init_net, + rmnet_header->local_ep_config.next_dev); + + + if (dev != 0 && dev2 != 0) + resp_rmnet->return_code = + rmnet_set_logical_endpoint_config( + dev, + rmnet_header->local_ep_config.ep_id, + rmnet_header->local_ep_config.operating_mode, + dev2); + else + resp_rmnet->return_code = RMNET_CONFIG_NO_SUCH_DEVICE; +} + +static void _rmnet_netlink_associate_network_device + (struct rmnet_nl_msg_s *rmnet_header, + struct rmnet_nl_msg_s *resp_rmnet) +{ + struct net_device *dev; + _RMNET_NETLINK_NULL_CHECKS(); + resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE; + + dev = dev_get_by_name(&init_net, rmnet_header->data); + if (!dev) { + resp_rmnet->return_code = RMNET_CONFIG_NO_SUCH_DEVICE; + return; + } + + resp_rmnet->return_code = rmnet_associate_network_device(dev); +} + +static void _rmnet_netlink_unassociate_network_device + (struct rmnet_nl_msg_s *rmnet_header, + struct rmnet_nl_msg_s *resp_rmnet) +{ + struct net_device *dev; + _RMNET_NETLINK_NULL_CHECKS(); + resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE; + + dev = dev_get_by_name(&init_net, rmnet_header->data); + if (!dev) { + resp_rmnet->return_code = RMNET_CONFIG_NO_SUCH_DEVICE; + return; + } + + resp_rmnet->return_code = rmnet_unassociate_network_device(dev); +} + +static inline void _rmnet_netlink_get_link_egress_data_format + (struct rmnet_nl_msg_s *rmnet_header, + struct rmnet_nl_msg_s *resp_rmnet) +{ + struct net_device *dev; + struct rmnet_phys_ep_conf_s *config; + _RMNET_NETLINK_NULL_CHECKS(); + resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE; + + + dev = dev_get_by_name(&init_net, rmnet_header->data_format.dev); + if (!dev) { + resp_rmnet->return_code = RMNET_CONFIG_NO_SUCH_DEVICE; + return; + } + + config = _rmnet_get_phys_ep_config(dev); + if (!config) { + resp_rmnet->return_code = RMNET_CONFIG_INVALID_REQUEST; + return; + } + + /* Begin Data */ + resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNDATA; + resp_rmnet->arg_length = RMNET_NL_MSG_SIZE(data_format); + resp_rmnet->data_format.flags = config->egress_data_format; + resp_rmnet->data_format.agg_count = config->egress_agg_count; + resp_rmnet->data_format.agg_size = config->egress_agg_size; +} + +static inline void _rmnet_netlink_get_link_ingress_data_format + (struct rmnet_nl_msg_s *rmnet_header, + struct rmnet_nl_msg_s *resp_rmnet) +{ + struct net_device *dev; + struct rmnet_phys_ep_conf_s *config; + _RMNET_NETLINK_NULL_CHECKS(); + resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE; + + + dev = dev_get_by_name(&init_net, rmnet_header->data_format.dev); + if (!dev) { + resp_rmnet->return_code = RMNET_CONFIG_NO_SUCH_DEVICE; + return; + } + + config = _rmnet_get_phys_ep_config(dev); + if (!config) { + resp_rmnet->return_code = RMNET_CONFIG_INVALID_REQUEST; + return; + } + + /* Begin Data */ + resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNDATA; + resp_rmnet->arg_length = RMNET_NL_MSG_SIZE(data_format); + resp_rmnet->data_format.flags = config->ingress_data_format; +} + +/** + * rmnet_config_netlink_msg_handler() - Netlink message handler callback + * @skb: Packet containing netlink messages + * + * Standard kernel-expected format for a netlink message handler. Processes SKBs + * which contain RmNet data specific netlink messages. + */ +void rmnet_config_netlink_msg_handler(struct sk_buff *skb) +{ + struct nlmsghdr *nlmsg_header, *resp_nlmsg; + struct rmnet_nl_msg_s *rmnet_header, *resp_rmnet; + int return_pid, response_data_length; + struct sk_buff *skb_response; + + response_data_length = 0; + nlmsg_header = (struct nlmsghdr *) skb->data; + rmnet_header = (struct rmnet_nl_msg_s *) nlmsg_data(nlmsg_header); + + LOGL("%s(): Netlink message pid=%d, seq=%d, length=%d, rmnet_type=%d\n", + __func__, + nlmsg_header->nlmsg_pid, + nlmsg_header->nlmsg_seq, + nlmsg_header->nlmsg_len, + rmnet_header->message_type); + + return_pid = nlmsg_header->nlmsg_pid; + + skb_response = nlmsg_new(sizeof(struct nlmsghdr) + + sizeof(struct rmnet_nl_msg_s), + GFP_KERNEL); + + if (!skb_response) { + LOGH("%s(): Failed to allocate response buffer\n", __func__); + return; + } + + resp_nlmsg = nlmsg_put(skb_response, + 0, + nlmsg_header->nlmsg_seq, + NLMSG_DONE, + sizeof(struct rmnet_nl_msg_s), + 0); + + resp_rmnet = nlmsg_data(resp_nlmsg); + + if (!resp_rmnet) + BUG(); + + resp_rmnet->message_type = rmnet_header->message_type; + rtnl_lock(); + switch (rmnet_header->message_type) { + case RMNET_NETLINK_ASSOCIATE_NETWORK_DEVICE: + _rmnet_netlink_associate_network_device + (rmnet_header, resp_rmnet); + break; + + case RMNET_NETLINK_UNASSOCIATE_NETWORK_DEVICE: + _rmnet_netlink_unassociate_network_device + (rmnet_header, resp_rmnet); + break; + + case RMNET_NETLINK_SET_LINK_EGRESS_DATA_FORMAT: + _rmnet_netlink_set_link_egress_data_format + (rmnet_header, resp_rmnet); + break; + + case RMNET_NETLINK_GET_LINK_EGRESS_DATA_FORMAT: + _rmnet_netlink_get_link_egress_data_format + (rmnet_header, resp_rmnet); + break; + + case RMNET_NETLINK_SET_LINK_INGRESS_DATA_FORMAT: + _rmnet_netlink_set_link_ingress_data_format + (rmnet_header, resp_rmnet); + break; + + case RMNET_NETLINK_GET_LINK_INGRESS_DATA_FORMAT: + _rmnet_netlink_get_link_ingress_data_format + (rmnet_header, resp_rmnet); + break; + + case RMNET_NETLINK_SET_LOGICAL_EP_CONFIG: + _rmnet_netlink_set_logical_ep_config(rmnet_header, resp_rmnet); + break; + + case RMNET_NETLINK_NEW_VND: + resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE; + resp_rmnet->return_code = + rmnet_create_vnd(rmnet_header->vnd.id); + break; + + default: + resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE; + resp_rmnet->return_code = RMNET_CONFIG_UNKNOWN_MESSAGE; + break; + } + rtnl_unlock(); + nlmsg_unicast(nl_socket_handle, skb_response, return_pid); + +} + +/* ***************** Configuration API ************************************** */ + +/** + * rmnet_unassociate_network_device() - Unassociate network device + * @dev: Device to unassociate + * + * Frees all structures generate for device. Unregisters rx_handler + * todo: needs to do some sanity verification first (is device in use, etc...) + * + * Return: + * - RMNET_CONFIG_OK if successful + * - RMNET_CONFIG_NO_SUCH_DEVICE dev is null + * - RMNET_CONFIG_INVALID_REQUEST if device is not already associated + * - RMNET_CONFIG_UNKNOWN_ERROR net_device private section is null + */ +int rmnet_unassociate_network_device(struct net_device *dev) +{ + struct rmnet_phys_ep_conf_s *config; + ASSERT_RTNL(); + + LOGL("%s(%s);", __func__, dev->name); + + if (!dev) + return RMNET_CONFIG_NO_SUCH_DEVICE; + + if (!_rmnet_is_physical_endpoint_associated(dev)) + return RMNET_CONFIG_INVALID_REQUEST; + + config = (struct rmnet_phys_ep_conf_s *) + rcu_dereference(dev->rx_handler_data); + + if (!config) + return RMNET_CONFIG_UNKNOWN_ERROR; + + kfree(config); + + netdev_rx_handler_unregister(dev); + + return RMNET_CONFIG_OK; +} + +/** + * rmnet_set_ingress_data_format() - Set ingress data format on network device + * @dev: Device to ingress data format on + * @egress_data_format: 32-bit unsigned bitmask of ingress format + * + * Network device must already have association with RmNet Data driver + * + * Return: + * - RMNET_CONFIG_OK if successful + * - RMNET_CONFIG_NO_SUCH_DEVICE dev is null + * - RMNET_CONFIG_UNKNOWN_ERROR net_device private section is null + */ +int rmnet_set_ingress_data_format(struct net_device *dev, + uint32_t ingress_data_format) +{ + struct rmnet_phys_ep_conf_s *config; + ASSERT_RTNL(); + + LOGL("%s(%s,0x%08X);", __func__, dev->name, ingress_data_format); + + if (!dev) + return RMNET_CONFIG_NO_SUCH_DEVICE; + + config = _rmnet_get_phys_ep_config(dev); + + if (!config) + return RMNET_CONFIG_INVALID_REQUEST; + + config->ingress_data_format = ingress_data_format; + + return RMNET_CONFIG_OK; +} + +/** + * rmnet_set_egress_data_format() - Set egress data format on network device + * @dev: Device to egress data format on + * @egress_data_format: 32-bit unsigned bitmask of egress format + * + * Network device must already have association with RmNet Data driver + * todo: Bounds check on agg_* + * + * Return: + * - RMNET_CONFIG_OK if successful + * - RMNET_CONFIG_NO_SUCH_DEVICE dev is null + * - RMNET_CONFIG_UNKNOWN_ERROR net_device private section is null + */ +int rmnet_set_egress_data_format(struct net_device *dev, + uint32_t egress_data_format, + uint16_t agg_size, + uint16_t agg_count) +{ + struct rmnet_phys_ep_conf_s *config; + ASSERT_RTNL(); + + LOGL("%s(%s,0x%08X, %d, %d);", + __func__, dev->name, egress_data_format, agg_size, agg_count); + + if (!dev) + return RMNET_CONFIG_NO_SUCH_DEVICE; + + config = _rmnet_get_phys_ep_config(dev); + + if (!config) + return RMNET_CONFIG_UNKNOWN_ERROR; + + config->egress_data_format = egress_data_format; + config->egress_agg_size = agg_size; + config->egress_agg_count = agg_count; + + return RMNET_CONFIG_OK; +} + +/** + * rmnet_associate_network_device() - Associate network device + * @dev: Device to register with RmNet data + * + * Typically used on physical network devices. Registers RX handler and private + * metadata structures. + * + * Return: + * - RMNET_CONFIG_OK if successful + * - RMNET_CONFIG_NO_SUCH_DEVICE dev is null + * - RMNET_CONFIG_DEVICE_IN_USE if dev rx_handler is already filled + * - RMNET_CONFIG_DEVICE_IN_USE if netdev_rx_handler_register() fails + */ +int rmnet_associate_network_device(struct net_device *dev) +{ + struct rmnet_phys_ep_conf_s *config; + int rc; + ASSERT_RTNL(); + + LOGL("%s(%s);", __func__, dev->name); + + if (!dev) + return RMNET_CONFIG_NO_SUCH_DEVICE; + + if (_rmnet_is_physical_endpoint_associated(dev)) { + LOGM("%s(): %s is already regestered\n", __func__, dev->name); + return RMNET_CONFIG_DEVICE_IN_USE; + } + + config = (struct rmnet_phys_ep_conf_s *) + kmalloc(sizeof(struct rmnet_phys_ep_conf_s), GFP_ATOMIC); + + if (!config) + return RMNET_CONFIG_NOMEM; + + memset(config, 0, sizeof(struct rmnet_phys_ep_conf_s)); + config->dev = dev; + spin_lock_init(&config->agg_lock); + + rc = netdev_rx_handler_register(dev, rmnet_rx_handler, config); + + if (rc) { + LOGM("%s(): netdev_rx_handler_register returns %d\n", + __func__, rc); + kfree(config); + return RMNET_CONFIG_DEVICE_IN_USE; + } + + return RMNET_CONFIG_OK; +} + +/** + * _rmnet_set_logical_endpoint_config() - Set logical endpoing config on device + * @dev: Device to set endpoint configuration on + * @config_id: logical endpoint id on device + * @epconfig: endpoing configuration structure to set + * + * Return: + * - RMNET_CONFIG_OK if successful + * - RMNET_CONFIG_UNKNOWN_ERROR net_device private section is null + * - RMNET_CONFIG_NO_SUCH_DEVICE if device to set config on is null + * - RMNET_CONFIG_BAD_ARGUMENTS if logical endpoint id is out of range + */ +int _rmnet_set_logical_endpoint_config(struct net_device *dev, + int config_id, + struct rmnet_logical_ep_conf_s *epconfig) +{ + struct rmnet_phys_ep_conf_s *config; + struct rmnet_logical_ep_conf_s *epconfig_l; + + ASSERT_RTNL(); + + if (!dev) + return RMNET_CONFIG_NO_SUCH_DEVICE; + + if (config_id < RMNET_LOCAL_LOGICAL_ENDPOINT + || config_id >= RMNET_DATA_MAX_LOGICAL_EP) + return RMNET_CONFIG_BAD_ARGUMENTS; + + if (rmnet_vnd_is_vnd(dev)) + epconfig_l = rmnet_vnd_get_le_config(dev); + else { + config = _rmnet_get_phys_ep_config(dev); + + if (!config) + return RMNET_CONFIG_UNKNOWN_ERROR; + + if (config_id == RMNET_LOCAL_LOGICAL_ENDPOINT) + epconfig_l = &config->local_ep; + else + epconfig_l = &config->muxed_ep[config_id]; + } + + memcpy(epconfig_l, epconfig, sizeof(struct rmnet_logical_ep_conf_s)); + if (config_id == RMNET_LOCAL_LOGICAL_ENDPOINT) + epconfig_l->mux_id = 0; + else + epconfig_l->mux_id = config_id; + + return RMNET_CONFIG_OK; +} + +/** + * rmnet_set_logical_endpoint_config() - Set logical endpoing configuration on a device + * @dev: Device to set endpoint configuration on + * @config_id: logical endpoint id on device + * @rmnet_mode: endpoint mode. Values from: rmnet_config_endpoint_modes_e + * @egress_device: device node to forward packet to once done processing in + * ingress/egress handlers + * + * Creates a logical_endpoint_config structure and fills in the information from + * function arguments. Calls _rmnet_set_logical_endpoint_config() to finish + * configuration. Network device must already have association with RmNet Data + * driver + * + * Return: + * - RMNET_CONFIG_OK if successful + * - RMNET_CONFIG_BAD_EGRESS_DEVICE if egress device is null + * - RMNET_CONFIG_BAD_EGRESS_DEVICE if egress device is not handled by + * RmNet data module + * - RMNET_CONFIG_UNKNOWN_ERROR net_device private section is null + * - RMNET_CONFIG_NO_SUCH_DEVICE if device to set config on is null + * - RMNET_CONFIG_BAD_ARGUMENTS if logical endpoint id is out of range + */ +int rmnet_set_logical_endpoint_config(struct net_device *dev, + int config_id, + uint8_t rmnet_mode, + struct net_device *egress_dev) +{ + struct rmnet_logical_ep_conf_s epconfig; + + LOGL("%s(%s, %d, %d, %s);", + __func__, dev->name, config_id, rmnet_mode, egress_dev->name); + + if (!egress_dev + || ((!_rmnet_is_physical_endpoint_associated(egress_dev)) + && (!rmnet_vnd_is_vnd(egress_dev)))) { + return RMNET_CONFIG_BAD_EGRESS_DEVICE; + } + + memset(&epconfig, 0, sizeof(struct rmnet_logical_ep_conf_s)); + epconfig.refcount = 1; + epconfig.rmnet_mode = rmnet_mode; + epconfig.egress_dev = egress_dev; + + return _rmnet_set_logical_endpoint_config(dev, config_id, &epconfig); +} + +/** + * rmnet_create_vnd() - Create virtual network device node + * @id: RmNet virtual device node id + * + * Return: + * - result of rmnet_vnd_create_dev() + */ +int rmnet_create_vnd(int id) +{ + struct net_device *dev; + ASSERT_RTNL(); + LOGL("%s(%d);", __func__, id); + return rmnet_vnd_create_dev(id, &dev); +} diff --git a/net/rmnet_data/rmnet_data_config.h b/net/rmnet_data/rmnet_data_config.h new file mode 100644 index 000000000000..2f8efc59f45d --- /dev/null +++ b/net/rmnet_data/rmnet_data_config.h @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2013, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * RMNET Data configuration engine + * + */ + +#include <linux/types.h> +#include <linux/spinlock.h> + +#ifndef _RMNET_DATA_CONFIG_H_ +#define _RMNET_DATA_CONFIG_H_ + +#define RMNET_DATA_MAX_LOGICAL_EP 32 + +struct rmnet_logical_ep_conf_s { + uint8_t refcount; + uint8_t rmnet_mode; + uint8_t mux_id; + struct net_device *egress_dev; +}; + +struct rmnet_phys_ep_conf_s { + struct net_device *dev; + struct rmnet_logical_ep_conf_s local_ep; + struct rmnet_logical_ep_conf_s muxed_ep[RMNET_DATA_MAX_LOGICAL_EP]; + uint32_t ingress_data_format; + uint32_t egress_data_format; + + /* MAP specific */ + uint16_t egress_agg_size; + uint16_t egress_agg_count; + spinlock_t agg_lock; + struct sk_buff *agg_skb; + uint8_t agg_state; + uint8_t agg_count; +}; + +int rmnet_config_init(void); +void rmnet_config_exit(void); + +int rmnet_unassociate_network_device(struct net_device *dev); +int rmnet_set_ingress_data_format(struct net_device *dev, + uint32_t ingress_data_format); +int rmnet_set_egress_data_format(struct net_device *dev, + uint32_t egress_data_format, + uint16_t agg_size, + uint16_t agg_count); +int rmnet_associate_network_device(struct net_device *dev); +int _rmnet_set_logical_endpoint_config(struct net_device *dev, + int config_id, + struct rmnet_logical_ep_conf_s *epconfig); +int rmnet_set_logical_endpoint_config(struct net_device *dev, + int config_id, + uint8_t rmnet_mode, + struct net_device *egress_dev); +void rmnet_config_netlink_msg_handler (struct sk_buff *skb); +int rmnet_create_vnd(int id); + +#endif /* _RMNET_DATA_CONFIG_H_ */ diff --git a/net/rmnet_data/rmnet_data_handlers.c b/net/rmnet_data/rmnet_data_handlers.c new file mode 100644 index 000000000000..5100db002f26 --- /dev/null +++ b/net/rmnet_data/rmnet_data_handlers.c @@ -0,0 +1,511 @@ +/* + * Copyright (c) 2013, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * RMNET Data ingress/egress handler + * + */ + +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include <linux/module.h> +#include <linux/rmnet_data.h> +#include "rmnet_data_private.h" +#include "rmnet_data_config.h" +#include "rmnet_data_vnd.h" +#include "rmnet_map.h" + +void rmnet_egress_handler(struct sk_buff *skb, + struct rmnet_logical_ep_conf_s *ep); + +#ifdef CONFIG_RMNET_DATA_DEBUG_PKT +unsigned int dump_pkt_rx; +module_param(dump_pkt_rx, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(dump_pkt_rx, "Dump packets entering ingress handler"); + +unsigned int dump_pkt_tx; +module_param(dump_pkt_tx, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(dump_pkt_tx, "Dump packets exiting egress handler"); +#endif /* CONFIG_RMNET_DATA_DEBUG_PKT */ + +/* ***************** Helper Functions *************************************** */ + +/** + * __rmnet_data_set_skb_proto() - Set skb->protocol field + * @skb: packet being modified + * + * Peek at the first byte of the packet and set the protocol. There is not + * good way to determine if a packet has a MAP header. As of writing this, + * the reserved bit in the MAP frame will prevent it from overlapping with + * IPv4/IPv6 frames. This could change in the future! + */ +static inline void __rmnet_data_set_skb_proto(struct sk_buff *skb) +{ + switch (skb->data[0] & 0xF0) { + case 0x40: /* IPv4 */ + skb->protocol = htons(ETH_P_IP); + break; + case 0x60: /* IPv6 */ + skb->protocol = htons(ETH_P_IPV6); + break; + default: + skb->protocol = htons(ETH_P_MAP); + break; + } +} + +#ifdef CONFIG_RMNET_DATA_DEBUG_PKT +/** + * rmnet_print_packet() - Print packet / diagnostics + * @skb: Packet to print + * @printlen: Number of bytes to print + * @dev: Name of interface + * @dir: Character representing direction (e.g.. 'r' for receive) + * + * This function prints out raw bytes in an SKB. Use of this will have major + * performance impacts and may even trigger watchdog resets if too much is being + * printed. Hence, this should always be compiled out unless absolutely needed. + */ +void rmnet_print_packet(const struct sk_buff *skb, const char *dev, char dir) +{ + char buffer[200]; + unsigned int len, printlen; + int i, buffloc = 0; + + switch (dir) { + case 'r': + printlen = dump_pkt_rx; + break; + + case 't': + printlen = dump_pkt_tx; + break; + + default: + printlen = 0; + break; + } + + if (!printlen) + return; + + pr_err("[%s][%c] - PKT skb->len=%d skb->head=%p skb->data=%p skb->tail=%p skb->end=%p", + dev, dir, skb->len, skb->head, skb->data, skb->tail, skb->end); + + if (skb->len > 0) + len = skb->len; + else + len = ((unsigned int)skb->end) - ((unsigned int)skb->data); + + pr_err("[%s][%c] - PKT len: %d, printing first %d bytes", + dev, dir, len, printlen); + + memset(buffer, 0, sizeof(buffer)); + for (i = 0; (i < printlen) && (i < len); i++) { + if ((i%16) == 0) { + pr_err("[%s][%c] - PKT%s", dev, dir, buffer); + memset(buffer, 0, sizeof(buffer)); + buffloc = 0; + buffloc += snprintf(&buffer[buffloc], + sizeof(buffer)-buffloc, "%04X:", + i); + } + + buffloc += snprintf(&buffer[buffloc], sizeof(buffer)-buffloc, + " %02x", skb->data[i]); + + } + pr_err("[%s][%c] - PKT%s", dev, dir, buffer); +} +#else +void rmnet_print_packet(const struct sk_buff *skb, const char *dev, char dir) +{ + return; +} +#endif /* CONFIG_RMNET_DATA_DEBUG_PKT */ + +/* ***************** Generic handler **************************************** */ + +/** + * rmnet_bridge_handler() - Bridge related functionality + * + * Return: + * - RX_HANDLER_CONSUMED in all cases + */ +static rx_handler_result_t rmnet_bridge_handler(struct sk_buff *skb, + struct rmnet_logical_ep_conf_s *ep) +{ + if (!ep->egress_dev) { + LOGD("%s(): Missing egress device for packet arriving on %s", + __func__, skb->dev->name); + kfree_skb(skb); + } else { + rmnet_egress_handler(skb, ep); + } + + return RX_HANDLER_CONSUMED; +} + +/** + * __rmnet_deliver_skb() - Deliver skb + * + * Determines where to deliver skb. Options are: consume by network stack, + * pass to bridge handler, or pass to virtual network device + * + * Return: + * - RX_HANDLER_CONSUMED if packet forwarded or dropped + * - RX_HANDLER_PASS if packet is to be consumed by network stack as-is + */ +static rx_handler_result_t __rmnet_deliver_skb(struct sk_buff *skb, + struct rmnet_logical_ep_conf_s *ep) +{ + switch (ep->rmnet_mode) { + case RMNET_EPMODE_NONE: + return RX_HANDLER_PASS; + + case RMNET_EPMODE_BRIDGE: + return rmnet_bridge_handler(skb, ep); + + case RMNET_EPMODE_VND: + skb_reset_transport_header(skb); + skb_reset_network_header(skb); + switch (rmnet_vnd_rx_fixup(skb, skb->dev)) { + case RX_HANDLER_CONSUMED: + return RX_HANDLER_CONSUMED; + + case RX_HANDLER_PASS: + skb->pkt_type = PACKET_HOST; + return RX_HANDLER_ANOTHER; + } + return RX_HANDLER_PASS; + + default: + LOGD("%s() unkown ep mode %d", __func__, + ep->rmnet_mode); + kfree_skb(skb); + return RX_HANDLER_CONSUMED; + } +} + +/** + * rmnet_ip_ingress_handler() - Ingress handler for all raw IP packets + * @skb: Packet needing a destination + * @config: Physical end point configuration that packet arived on. + * + * Return: + * - RX_HANDLER_CONSUMED if packet forwarded/dropped + * - RX_HANDLER_PASS if packet should be passed up the stack by caller + */ +static rx_handler_result_t rmnet_ip_ingress_handler(struct sk_buff *skb, + struct rmnet_phys_ep_conf_s *config) +{ + if (!config->local_ep.refcount) { + LOGD("%s(): Packet on %s has no local endpoint configuration\n", + __func__, skb->dev->name); + kfree_skb(skb); + return RX_HANDLER_CONSUMED; + } + + skb->dev = config->local_ep.egress_dev; + + return __rmnet_deliver_skb(skb, &config->local_ep); +} + +/* ***************** MAP handler ******************************************** */ + +/** + * _rmnet_map_ingress_handler() - Actual MAP ingress handler + * @skb: Packet being received + * @config: Physical endpoint configuration for the ingress device + * + * Most MAP ingress functions are processed here. Packets are processed + * individually; aggregates packets should use rmnet_map_ingress_handler() + * + * Return: + * - RX_HANDLER_CONSUMED if packet is dropped + * - result of __rmnet_deliver_skb() for all other cases + */ +static rx_handler_result_t _rmnet_map_ingress_handler(struct sk_buff *skb, + struct rmnet_phys_ep_conf_s *config) +{ + struct rmnet_logical_ep_conf_s *ep; + uint8_t mux_id; + uint16_t len; + + mux_id = RMNET_MAP_GET_MUX_ID(skb); + len = RMNET_MAP_GET_LENGTH(skb) - RMNET_MAP_GET_PAD(skb); + + if (mux_id >= RMNET_DATA_MAX_LOGICAL_EP) { + LOGD("%s(): Got packet on %s with bad mux id %d\n", + __func__, skb->dev->name, mux_id); + kfree_skb(skb); + return RX_HANDLER_CONSUMED; + } + + ep = &(config->muxed_ep[mux_id]); + + if (!ep->refcount) { + LOGD("%s(): Packet on %s:%d; has no logical endpoint config\n", + __func__, skb->dev->name, mux_id); + + kfree_skb(skb); + return RX_HANDLER_CONSUMED; + } + + if (config->ingress_data_format & RMNET_INGRESS_FORMAT_DEMUXING) + skb->dev = ep->egress_dev; + + /* Subtract MAP header */ + skb_pull(skb, sizeof(struct rmnet_map_header_s)); + skb_trim(skb, len); + __rmnet_data_set_skb_proto(skb); + + return __rmnet_deliver_skb(skb, ep); +} + +/** + * rmnet_map_ingress_handler() - MAP ingress handler + * @skb: Packet being received + * @config: Physical endpoint configuration for the ingress device + * + * Called if and only if MAP is configured in the ingress device's ingress data + * format. Deaggregation is done here, actual MAP processing is done in + * _rmnet_map_ingress_handler(). + * + * Return: + * - RX_HANDLER_CONSUMED for aggregated packets + * - RX_HANDLER_CONSUMED for dropped packets + * - result of _rmnet_map_ingress_handler() for all other cases + */ +static rx_handler_result_t rmnet_map_ingress_handler(struct sk_buff *skb, + struct rmnet_phys_ep_conf_s *config) +{ + struct sk_buff *skbn; + int rc, co = 0; + + if (config->ingress_data_format & RMNET_INGRESS_FORMAT_DEAGGREGATION) { + while ((skbn = rmnet_map_deaggregate(skb, config)) != 0) { + LOGD("co=%d\n", co); + _rmnet_map_ingress_handler(skbn, config); + co++; + } + kfree_skb(skb); + rc = RX_HANDLER_CONSUMED; + } else { + rc = _rmnet_map_ingress_handler(skb, config); + } + + return rc; +} + +/** + * rmnet_map_egress_handler() - MAP egress handler + * @skb: Packet being sent + * @config: Physical endpoint configuration for the egress device + * @ep: logical endpoint configuration of the packet originator + * (e.g.. RmNet virtual network device) + * + * Called if and only if MAP is configured in the egress device's egress data + * format. Will expand skb if there is insufficient headroom for MAP protocol. + * Note: headroomexpansion will incur a performance penalty. + * + * Return: + * - 0 on success + * - 1 on failure + */ +static int rmnet_map_egress_handler(struct sk_buff *skb, + struct rmnet_phys_ep_conf_s *config, + struct rmnet_logical_ep_conf_s *ep) +{ + int required_headroom, additional_header_length; + struct rmnet_map_header_s *map_header; + + additional_header_length = 0; + + required_headroom = sizeof(struct rmnet_map_header_s); + + LOGD("%s(): headroom of %d bytes\n", __func__, required_headroom); + + if (skb_headroom(skb) < required_headroom) { + if (pskb_expand_head(skb, required_headroom, 0, GFP_KERNEL)) { + LOGD("%s(): Failed to add headroom of %d bytes\n", + __func__, required_headroom); + return 1; + } + } + + map_header = rmnet_map_add_map_header(skb, additional_header_length); + + if (!map_header) { + LOGD("%s(): Failed to add MAP header to egress packet", + __func__); + return 1; + } + + if (config->egress_data_format & RMNET_EGRESS_FORMAT_MUXING) { + if (ep->mux_id == 0xff) + map_header->mux_id = 0; + else + map_header->mux_id = ep->mux_id; + } + + if (config->egress_data_format & RMNET_EGRESS_FORMAT_AGGREGATION) { + rmnet_map_aggregate(skb, config); + return RMNET_MAP_CONSUMED; + } + + return RMNET_MAP_SUCCESS; +} +/* ***************** Ingress / Egress Entry Points ************************** */ + +/** + * rmnet_ingress_handler() - Ingress handler entry point + * @skb: Packet being received + * + * Processes packet as per ingress data format for receiving device. Logical + * endpoint is determined from packet inspection. Packet is then sent to the + * egress device listed in the logical endpoint configuration. + * + * Return: + * - RX_HANDLER_PASS if packet is not processed by handler (caller must + * deal with the packet) + * - RX_HANDLER_CONSUMED if packet is forwarded or processed by MAP + */ +rx_handler_result_t rmnet_ingress_handler(struct sk_buff *skb) +{ + struct rmnet_phys_ep_conf_s *config; + struct net_device *dev; + int rc; + + if (!skb) + BUG(); + + dev = skb->dev; + rmnet_print_packet(skb, dev->name, 'r'); + + config = (struct rmnet_phys_ep_conf_s *) + rcu_dereference(skb->dev->rx_handler_data); + + /* Sometimes devices operate in ethernet mode even thouth there is no + * ethernet header. This causes the skb->protocol to contain a bogus + * value and the skb->data pointer to be off by 14 bytes. Fix it if + * configured to do so + */ + if (config->ingress_data_format & RMNET_INGRESS_FIX_ETHERNET) { + skb_push(skb, RMNET_ETHERNET_HEADER_LENGTH); + __rmnet_data_set_skb_proto(skb); + } + + if (config->ingress_data_format & RMNET_INGRESS_FORMAT_MAP) { + if (RMNET_MAP_GET_CD_BIT(skb)) { + if (config->ingress_data_format + & RMNET_INGRESS_FORMAT_MAP_COMMANDS) { + rc = rmnet_map_command(skb, config); + } else { + LOGM("%s(): MAP command packet on %s; %s\n", + __func__, dev->name, + "Not configured for MAP commands"); + kfree_skb(skb); + return RX_HANDLER_CONSUMED; + } + } else { + rc = rmnet_map_ingress_handler(skb, config); + } + } else { + switch (ntohs(skb->protocol)) { + case ETH_P_MAP: + LOGD("%s(): MAP packet on %s; not configured for MAP\n", + __func__, dev->name); + kfree_skb(skb); + rc = RX_HANDLER_CONSUMED; + break; + + case ETH_P_ARP: + case ETH_P_IP: + case ETH_P_IPV6: + rc = rmnet_ip_ingress_handler(skb, config); + break; + + default: + LOGD("%s(): Unknown skb->proto 0x%04X\n", + __func__, ntohs(skb->protocol) & 0xFFFF); + rc = RX_HANDLER_PASS; + } + } + + return rc; +} + +/** + * rmnet_rx_handler() - Rx handler callback registered with kernel + * @pskb: Packet to be processed by rx handler + * + * Standard kernel-expected footprint for rx handlers. Calls + * rmnet_ingress_handler with correctly formatted arguments + * + * Return: + * - Whatever rmnet_ingress_handler() returns + */ +rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb) +{ + return rmnet_ingress_handler(*pskb); +} + +/** + * rmnet_egress_handler() - Egress handler entry point + * @skb: packet to transmit + * @ep: logical endpoint configuration of the packet originator + * (e.g.. RmNet virtual network device) + * + * Modifies packet as per logical endpoint configuration and egress data format + * for egress device configured in logical endpoint. Packet is then transmitted + * on the egress device. + */ +void rmnet_egress_handler(struct sk_buff *skb, + struct rmnet_logical_ep_conf_s *ep) +{ + struct rmnet_phys_ep_conf_s *config; + struct net_device *orig_dev; + orig_dev = skb->dev; + skb->dev = ep->egress_dev; + + config = (struct rmnet_phys_ep_conf_s *) + rcu_dereference(skb->dev->rx_handler_data); + + LOGD("%s(): Packet going out on %s with egress format 0x%08X\n", + __func__, skb->dev->name, config->egress_data_format); + + if (config->egress_data_format & RMNET_EGRESS_FORMAT_MAP) { + switch (rmnet_map_egress_handler(skb, config, ep)) { + case RMNET_MAP_CONSUMED: + LOGD("%s(): MAP process consumed packet\n", __func__); + return; + + case RMNET_MAP_SUCCESS: + break; + + default: + LOGD("%s(): MAP egress failed on packet on %s", + __func__, skb->dev->name); + kfree_skb(skb); + return; + } + } + + if (ep->rmnet_mode == RMNET_EPMODE_VND) + rmnet_vnd_tx_fixup(skb, orig_dev); + + rmnet_print_packet(skb, skb->dev->name, 't'); + if (dev_queue_xmit(skb) != 0) { + LOGD("%s(): Failed to queue packet for transmission on [%s]\n", + __func__, skb->dev->name); + } +} diff --git a/net/rmnet_data/rmnet_data_handlers.h b/net/rmnet_data/rmnet_data_handlers.h new file mode 100644 index 000000000000..42f9e6f4403e --- /dev/null +++ b/net/rmnet_data/rmnet_data_handlers.h @@ -0,0 +1,25 @@ +/* + * Copyright (c) 2013, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * RMNET Data ingress/egress handler + * + */ + +#ifndef _RMNET_DATA_HANDLERS_H_ +#define _RMNET_DATA_HANDLERS_H_ + +void rmnet_egress_handler(struct sk_buff *skb, + struct rmnet_logical_ep_conf_s *ep); + +rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb); + +#endif /* _RMNET_DATA_HANDLERS_H_ */ diff --git a/net/rmnet_data/rmnet_data_main.c b/net/rmnet_data/rmnet_data_main.c new file mode 100644 index 000000000000..f5c2e8723d7f --- /dev/null +++ b/net/rmnet_data/rmnet_data_main.c @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2013, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * + * RMNET Data generic framework + * + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/export.h> +#include "rmnet_data_private.h" +#include "rmnet_data_config.h" +#include "rmnet_data_vnd.h" + +/* ***************** Module Parameters ************************************** */ +unsigned int rmnet_data_log_level = RMNET_LOG_LVL_ERR | RMNET_LOG_LVL_HI; +module_param(rmnet_data_log_level, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(log_level, "Logging level"); + +/* ***************** Startup/Shutdown *************************************** */ + +/** + * rmnet_init() - Module initialization + * + * todo: check for (and init) startup errors + */ +static int __init rmnet_init(void) +{ + rmnet_config_init(); + rmnet_vnd_init(); + + LOGL("%s", "RMNET Data driver loaded successfully\n"); + return 0; +} + +static void __exit rmnet_exit(void) +{ + rmnet_config_exit(); + rmnet_vnd_exit(); +} + +module_init(rmnet_init) +module_exit(rmnet_exit) +MODULE_LICENSE("GPL v2"); diff --git a/net/rmnet_data/rmnet_data_private.h b/net/rmnet_data/rmnet_data_private.h new file mode 100644 index 000000000000..ae253b9db5fc --- /dev/null +++ b/net/rmnet_data/rmnet_data_private.h @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2013, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _RMNET_DATA_PRIVATE_H_ +#define _RMNET_DATA_PRIVATE_H_ + +#define RMNET_DATA_MAX_VND 32 +#define RMNET_DATA_MAX_PACKET_SIZE 16384 +#define RMNET_DATA_DFLT_PACKET_SIZE 1500 +#define RMNET_DATA_DEV_NAME_STR "rmnet_data%d" +#define RMNET_DATA_NEEDED_HEADROOM 16 +#define RMNET_ETHERNET_HEADER_LENGTH 14 + +extern unsigned int rmnet_data_log_level; + +#define RMNET_INIT_OK 0 +#define RMNET_INIT_ERROR 1 + +#define RMNET_LOG_LVL_DBG (1<<4) +#define RMNET_LOG_LVL_LOW (1<<3) +#define RMNET_LOG_LVL_MED (1<<2) +#define RMNET_LOG_LVL_HI (1<<1) +#define RMNET_LOG_LVL_ERR (1<<0) + +#define LOGE(fmt, ...) do { if (rmnet_data_log_level & RMNET_LOG_LVL_ERR) \ + pr_err(fmt, ##__VA_ARGS__); \ + } while (0) + +#define LOGH(fmt, ...) do { if (rmnet_data_log_level & RMNET_LOG_LVL_HI) \ + pr_err(fmt, ##__VA_ARGS__); \ + } while (0) + +#define LOGM(fmt, ...) do { if (rmnet_data_log_level & RMNET_LOG_LVL_MED) \ + pr_warn(fmt, ##__VA_ARGS__); \ + } while (0) + +#define LOGL(fmt, ...) do { if (unlikely \ + (rmnet_data_log_level & RMNET_LOG_LVL_LOW)) \ + pr_notice(fmt, ##__VA_ARGS__); \ + } while (0) + +#define LOGD(fmt, ...) do { if (unlikely \ + (rmnet_data_log_level & RMNET_LOG_LVL_DBG)) \ + pr_debug(fmt, ##__VA_ARGS__); \ + } while (0) + +#endif /* _RMNET_DATA_PRIVATE_H_ */ diff --git a/net/rmnet_data/rmnet_data_vnd.c b/net/rmnet_data/rmnet_data_vnd.c new file mode 100644 index 000000000000..f304de49404d --- /dev/null +++ b/net/rmnet_data/rmnet_data_vnd.c @@ -0,0 +1,529 @@ +/* + * Copyright (c) 2013, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * + * RMNET Data virtual network driver + * + */ + +#include <linux/types.h> +#include <linux/rmnet_data.h> +#include <linux/msm_rmnet.h> +#include <linux/etherdevice.h> +#include <linux/if_arp.h> +#include <linux/spinlock.h> +#include <net/pkt_sched.h> +#include "rmnet_data_config.h" +#include "rmnet_data_handlers.h" +#include "rmnet_data_private.h" +#include "rmnet_map.h" + +struct net_device *rmnet_devices[RMNET_DATA_MAX_VND]; + +struct rmnet_vnd_private_s { + uint8_t qos_mode:1; + uint8_t reserved:7; + struct rmnet_logical_ep_conf_s local_ep; + struct rmnet_map_flow_control_s flows; +}; + +/* ***************** Helper Functions *************************************** */ + +/** + * rmnet_vnd_add_qos_header() - Adds QoS header to front of skb->data + * @skb: Socket buffer ("packet") to modify + * @dev: Egress interface + * + * Does not check for sufficient headroom! Caller must make sure there is enough + * headroom. + */ +static void rmnet_vnd_add_qos_header(struct sk_buff *skb, + struct net_device *dev) +{ + struct QMI_QOS_HDR_S *qmih; + + qmih = (struct QMI_QOS_HDR_S *) + skb_push(skb, sizeof(struct QMI_QOS_HDR_S)); + qmih->version = 1; + qmih->flags = 0; + qmih->flow_id = skb->mark; +} + +/* ***************** RX/TX Fixup ******************************************** */ + +/** + * rmnet_vnd_rx_fixup() - Virtual Network Device receive fixup hook + * @skb: Socket buffer ("packet") to modify + * @dev: Virtual network device + * + * Additional VND specific packet processing for ingress packets + * + * Return: + * - RX_HANDLER_PASS if packet should continue to process in stack + * - RX_HANDLER_CONSUMED if packet should not be processed in stack + * + */ +int rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev) +{ + if (unlikely(!dev || !skb)) + BUG(); + + dev->stats.rx_packets++; + dev->stats.rx_bytes += skb->len; + + return RX_HANDLER_PASS; +} + +/** + * rmnet_vnd_tx_fixup() - Virtual Network Device transmic fixup hook + * @skb: Socket buffer ("packet") to modify + * @dev: Virtual network device + * + * Additional VND specific packet processing for egress packets + * + * Return: + * - RX_HANDLER_PASS if packet should continue to be transmitted + * - RX_HANDLER_CONSUMED if packet should not be transmitted by stack + */ +int rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev) +{ + struct rmnet_vnd_private_s *dev_conf; + dev_conf = (struct rmnet_vnd_private_s *) netdev_priv(dev); + + if (unlikely(!dev || !skb)) + BUG(); + + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; + + return RX_HANDLER_PASS; +} + +/* ***************** Network Device Operations ****************************** */ + +/** + * rmnet_vnd_start_xmit() - Transmit NDO callback + * @skb: Socket buffer ("packet") being sent from network stack + * @dev: Virtual Network Device + * + * Standard network driver operations hook to transmit packets on virtual + * network device. Called by network stack. Packet is not transmitted directly + * from here; instead it is given to the rmnet egress handler. + * + * Return: + * - NETDEV_TX_OK under all cirumstances (cannot block/fail) + */ +static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct rmnet_vnd_private_s *dev_conf; + dev_conf = (struct rmnet_vnd_private_s *) netdev_priv(dev); + if (dev_conf->local_ep.egress_dev) { + /* QoS header should come after MAP header */ + if (dev_conf->qos_mode) + rmnet_vnd_add_qos_header(skb, dev); + rmnet_egress_handler(skb, &dev_conf->local_ep); + } else { + dev->stats.tx_dropped++; + kfree_skb(skb); + } + return NETDEV_TX_OK; +} + +/** + * rmnet_vnd_change_mtu() - Change MTU NDO callback + * @dev: Virtual network device + * @new_mtu: New MTU value to set (in bytes) + * + * Standard network driver operations hook to set the MTU. Called by kernel to + * set the device MTU. Checks if desired MTU is less than zero or greater than + * RMNET_DATA_MAX_PACKET_SIZE; + * + * Return: + * - 0 if successful + * - -EINVAL if new_mtu is out of range + */ +static int rmnet_vnd_change_mtu(struct net_device *dev, int new_mtu) +{ + if (new_mtu < 0 || new_mtu > RMNET_DATA_MAX_PACKET_SIZE) + return -EINVAL; + + dev->mtu = new_mtu; + return 0; +} + +#ifdef CONFIG_RMNET_DATA_FC +static int _rmnet_vnd_do_qos_ioctl(struct net_device *dev, + struct ifreq *ifr, + int cmd) +{ + struct rmnet_vnd_private_s *dev_conf; + int rc; + rc = 0; + dev_conf = (struct rmnet_vnd_private_s *) netdev_priv(dev); + + switch (cmd) { + + case RMNET_IOCTL_SET_QOS_ENABLE: + LOGM("%s(): RMNET_IOCTL_SET_QOS_ENABLE on %s\n", + __func__, dev->name); + dev_conf->qos_mode = 1; + break; + + case RMNET_IOCTL_SET_QOS_DISABLE: + LOGM("%s(): RMNET_IOCTL_SET_QOS_DISABLE on %s\n", + __func__, dev->name); + dev_conf->qos_mode = 0; + break; + + case RMNET_IOCTL_FLOW_ENABLE: + LOGL("%s(): RMNET_IOCTL_FLOW_ENABLE on %s\n", + __func__, dev->name); + tc_qdisc_flow_control(dev, (u32)ifr->ifr_data, 1); + break; + + case RMNET_IOCTL_FLOW_DISABLE: + LOGL("%s(): RMNET_IOCTL_FLOW_DISABLE on %s\n", + __func__, dev->name); + tc_qdisc_flow_control(dev, (u32)ifr->ifr_data, 0); + break; + + case RMNET_IOCTL_GET_QOS: /* Get QoS header state */ + LOGM("%s(): RMNET_IOCTL_GET_QOS on %s\n", + __func__, dev->name); + ifr->ifr_ifru.ifru_data = + (void *)(dev_conf->qos_mode == 1); + break; + + default: + rc = -EINVAL; + } + + return rc; +} +#else +static int _rmnet_vnd_do_qos_ioctl(struct net_device *dev, + struct ifreq *ifr, + int cmd) +{ + return -EINVAL; +} +#endif /* CONFIG_RMNET_DATA_FC */ + +/** + * rmnet_vnd_ioctl() - IOCTL NDO callback + * @dev: Virtual network device + * @ifreq: User data + * @cmd: IOCTL command value + * + * Standard network driver operations hook to process IOCTLs. Called by kernel + * to process non-stanard IOCTLs for device + * + * Return: + * - 0 if successful + * - -EINVAL if unknown IOCTL + */ +static int rmnet_vnd_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct rmnet_vnd_private_s *dev_conf; + int rc; + rc = 0; + dev_conf = (struct rmnet_vnd_private_s *) netdev_priv(dev); + + rc = _rmnet_vnd_do_qos_ioctl(dev, ifr, cmd); + if (rc != -EINVAL) + return rc; + rc = 0; /* Reset rc as it may contain -EINVAL from above */ + + switch (cmd) { + + case RMNET_IOCTL_OPEN: /* Do nothing. Support legacy behavior */ + LOGM("%s(): RMNET_IOCTL_OPEN on %s (ignored)\n", + __func__, dev->name); + break; + + case RMNET_IOCTL_CLOSE: /* Do nothing. Support legacy behavior */ + LOGM("%s(): RMNET_IOCTL_CLOSE on %s (ignored)\n", + __func__, dev->name); + break; + + case RMNET_IOCTL_SET_LLP_ETHERNET: + LOGM("%s(): RMNET_IOCTL_SET_LLP_ETHERNET on %s (no support)\n", + __func__, dev->name); + rc = -EINVAL; + break; + + case RMNET_IOCTL_SET_LLP_IP: /* Do nothing. Support legacy behavior */ + LOGM("%s(): RMNET_IOCTL_SET_LLP_IP on %s (ignored)\n", + __func__, dev->name); + break; + + case RMNET_IOCTL_GET_LLP: /* Always return IP mode */ + LOGM("%s(): RMNET_IOCTL_GET_LLP on %s\n", + __func__, dev->name); + ifr->ifr_ifru.ifru_data = (void *)(RMNET_MODE_LLP_IP); + break; + + default: + LOGH("%s(): Unkown IOCTL 0x%08X\n", __func__, cmd); + rc = -EINVAL; + } + + return rc; +} + +static const struct net_device_ops rmnet_data_vnd_ops = { + .ndo_init = 0, + .ndo_start_xmit = rmnet_vnd_start_xmit, + .ndo_do_ioctl = rmnet_vnd_ioctl, + .ndo_change_mtu = rmnet_vnd_change_mtu, + .ndo_set_mac_address = 0, + .ndo_validate_addr = 0, +}; + +/** + * rmnet_vnd_setup() - net_device initialization callback + * @dev: Virtual network device + * + * Called by kernel whenever a new rmnet_data<n> device is created. Sets MTU, + * flags, ARP type, needed headroom, etc... + * + * todo: What is watchdog_timeo? Do we need to explicitly set it? + */ +static void rmnet_vnd_setup(struct net_device *dev) +{ + struct rmnet_vnd_private_s *dev_conf; + LOGM("%s(): Setting up device %s\n", __func__, dev->name); + + /* Clear out private data */ + dev_conf = (struct rmnet_vnd_private_s *) netdev_priv(dev); + memset(dev_conf, 0, sizeof(struct rmnet_vnd_private_s)); + + /* keep the default flags, just add NOARP */ + dev->flags |= IFF_NOARP; + dev->netdev_ops = &rmnet_data_vnd_ops; + dev->mtu = RMNET_DATA_DFLT_PACKET_SIZE; + dev->needed_headroom = RMNET_DATA_NEEDED_HEADROOM; + random_ether_addr(dev->dev_addr); + dev->watchdog_timeo = 1000; + + /* Raw IP mode */ + dev->header_ops = 0; /* No header */ + dev->type = ARPHRD_RAWIP; + dev->hard_header_len = 0; + dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST); + + /* Flow control locks */ + rwlock_init(&dev_conf->flows.flow_map_lock); +} + +/* ***************** Exposed API ******************************************** */ + +/** + * rmnet_vnd_exit() - Shutdown cleanup hook + * + * Called by RmNet main on module unload. Cleans up data structures and + * unregisters/frees net_devices. + */ +void rmnet_vnd_exit(void) +{ + int i; + for (i = 0; i < RMNET_DATA_MAX_VND; i++) + if (rmnet_devices[i]) { + unregister_netdev(rmnet_devices[i]); + free_netdev(rmnet_devices[i]); + } +} + +/** + * rmnet_vnd_init() - Init hook + * + * Called by RmNet main on module load. Initializes data structures + */ +int rmnet_vnd_init(void) +{ + memset(rmnet_devices, 0, + sizeof(struct net_device *) * RMNET_DATA_MAX_VND); + return 0; +} + +/** + * rmnet_vnd_create_dev() - Create a new virtual network device node. + * @id: Virtual device node id + * @new_device: Pointer to newly created device node + * + * Allocates structures for new virtual network devices. Sets the name of the + * new device and registers it with the network stack. Device will appear in + * ifconfig list after this is called. + * + * Return: + * - 0 if successful + * - -EINVAL if id is out of range, or id already in use + * - -EINVAL if net_device allocation failed + * - return code of register_netdevice() on other errors + */ +int rmnet_vnd_create_dev(int id, struct net_device **new_device) +{ + struct net_device *dev; + int rc = 0; + + if (id < 0 || id > RMNET_DATA_MAX_VND || rmnet_devices[id] != 0) { + *new_device = 0; + return -EINVAL; + } + + dev = alloc_netdev(sizeof(struct rmnet_vnd_private_s), + RMNET_DATA_DEV_NAME_STR, + rmnet_vnd_setup); + if (!dev) { + LOGE("%s(): Failed to to allocate netdev for id %d", + __func__, id); + *new_device = 0; + return -EINVAL; + } + + rc = register_netdevice(dev); + if (rc != 0) { + LOGE("%s(): Failed to to register netdev [%s]", + __func__, dev->name); + free_netdev(dev); + *new_device = 0; + } else { + rmnet_devices[id] = dev; + *new_device = dev; + } + + LOGM("%s(): Registered device %s\n", __func__, dev->name); + return rc; +} + +/** + * rmnet_vnd_is_vnd() - Determine if net_device is RmNet owned virtual devices + * @dev: Network device to test + * + * Searches through list of known RmNet virtual devices. This function is O(n) + * and should not be used in the data path. + * + * Return: + * - 0 if device is not RmNet virtual device + * - 1 if device is RmNet virtual device + */ +int rmnet_vnd_is_vnd(struct net_device *dev) +{ + /* + * This is not an efficient search, but, this will only be called in + * a configuration context, and the list is small. + */ + int i; + + if (!dev) + BUG(); + + for (i = 0; i < RMNET_DATA_MAX_VND; i++) + if (dev == rmnet_devices[i]) + return i+1; + + return 0; +} + +/** + * rmnet_vnd_get_le_config() - Get the logical endpoint configuration + * @dev: Virtual device node + * + * Gets the logical endpoint configuration for a RmNet virtual network device + * node. Caller should confirm that devices is a RmNet VND before calling. + * + * Return: + * - Pointer to logical endpoint configuration structure + * - 0 (null) if dev is null + */ +struct rmnet_logical_ep_conf_s *rmnet_vnd_get_le_config(struct net_device *dev) +{ + struct rmnet_vnd_private_s *dev_conf; + if (!dev) + return 0; + + dev_conf = (struct rmnet_vnd_private_s *) netdev_priv(dev); + if (!dev_conf) + BUG(); + + return &dev_conf->local_ep; +} + +/** + * rmnet_vnd_get_flow_mapping() - Retrieve QoS flow mapping. + * @dev: Virtual network device node to do lookup on + * @map_flow_id: Flow ID + * @tc_handle: Pointer to TC qdisc flow handle. Results stored here + * @v4_seq: pointer to IPv4 indication sequence number. Caller can modify value + * @v6_seq: pointer to IPv6 indication sequence number. Caller can modify value + * + * Sets flow_map to 0 on error or if no flow is configured + * todo: Add flow specific mappings + * todo: Standardize return codes. + * + * Return: + * - 0 if successful + * - 1 if no mapping is found + * - 2 if dev is not RmNet virtual network device node + */ +int rmnet_vnd_get_flow_mapping(struct net_device *dev, + uint32_t map_flow_id, + uint32_t *tc_handle, + uint64_t **v4_seq, + uint64_t **v6_seq) +{ + struct rmnet_vnd_private_s *dev_conf; + struct rmnet_map_flow_mapping_s *flowmap; + int i; + int error = 0; + + if (!dev || !tc_handle) + BUG(); + + if (!rmnet_vnd_is_vnd(dev)) { + *tc_handle = 0; + return 2; + } else { + dev_conf = (struct rmnet_vnd_private_s *) netdev_priv(dev); + } + + if (!dev_conf) + BUG(); + + if (map_flow_id == 0xFFFFFFFF) { + *tc_handle = dev_conf->flows.default_tc_handle; + *v4_seq = &dev_conf->flows.default_v4_seq; + *v6_seq = &dev_conf->flows.default_v6_seq; + if (*tc_handle == 0) + error = 1; + } else { + flowmap = &dev_conf->flows.flowmap[0]; + for (i = 0; i < RMNET_MAP_MAX_FLOWS; i++) { + if ((flowmap[i].flow_id != 0) + && (flowmap[i].flow_id == map_flow_id)) { + + *tc_handle = flowmap[i].tc_handle; + *v4_seq = &flowmap[i].v4_seq; + *v6_seq = &flowmap[i].v6_seq; + error = 0; + break; + } + } + *v4_seq = 0; + *v6_seq = 0; + *tc_handle = 0; + error = 1; + } + + return error; +} diff --git a/net/rmnet_data/rmnet_data_vnd.h b/net/rmnet_data/rmnet_data_vnd.h new file mode 100644 index 000000000000..a916b2534a6d --- /dev/null +++ b/net/rmnet_data/rmnet_data_vnd.h @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2013, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * RMNET Data Virtual Network Device APIs + * + */ + +#include <linux/types.h> + +#ifndef _RMNET_DATA_VND_H_ +#define _RMNET_DATA_VND_H_ + +int rmnet_vnd_get_flow_mapping(struct net_device *dev, + unsigned int map_flow_id, + unsigned int *flow_map); +struct rmnet_logical_ep_conf_s *rmnet_vnd_get_le_config(struct net_device *dev); +int rmnet_vnd_create_dev(int id, struct net_device **new_device); +int rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev); +int rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev); +int rmnet_vnd_is_vnd(struct net_device *dev); +int rmnet_vnd_init(void); +void rmnet_vnd_exit(void); + +#endif /* _RMNET_DATA_VND_H_ */ diff --git a/net/rmnet_data/rmnet_map.h b/net/rmnet_data/rmnet_map.h new file mode 100644 index 000000000000..5d599fa6e588 --- /dev/null +++ b/net/rmnet_data/rmnet_map.h @@ -0,0 +1,152 @@ +/* + * Copyright (c) 2013, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/types.h> +#include <linux/spinlock.h> + +#ifndef _RMNET_MAP_H_ +#define _RMNET_MAP_H_ + +#define RMNET_MAP_MAX_FLOWS 8 + +struct rmnet_map_header_s { +#ifndef RMNET_USE_BIG_ENDIAN_STRUCTS + uint8_t pad_len:6; + uint8_t reserved_bit:1; + uint8_t cd_bit:1; +#else + uint8_t cd_bit:1; + uint8_t reserved_bit:1; + uint8_t pad_len:6; +#endif /* LITTLE_ENDIAN */ + uint8_t mux_id; + uint16_t pkt_len; +} __aligned(1); + +struct rmnet_map_control_command_s { + uint8_t command_name; +#ifndef RMNET_USE_BIG_ENDIAN_STRUCTS + uint8_t cmd_type:2; + uint8_t reserved:6; +#else + uint8_t reserved:6; + uint8_t cmd_type:2; +#endif /* LITTLE_ENDIAN */ + uint16_t reserved2; + uint32_t transaction_id; + union { + uint8_t data[65528]; + struct { +#ifndef RMNET_USE_BIG_ENDIAN_STRUCTS + uint16_t ip_family:2; + uint16_t reserved:14; +#else + uint16_t reserved:14; + uint16_t ip_family:2; +#endif /* LITTLE_ENDIAN */ + uint16_t flow_control_seq_num; + uint32_t qos_id; + } flow_control; + }; +} __aligned(1); + +struct rmnet_map_flow_mapping_s { + uint32_t flow_id; + uint32_t tc_handle; + uint64_t v4_seq; + uint64_t v6_seq; +}; + +struct rmnet_map_flow_control_s { + rwlock_t flow_map_lock; + uint32_t default_tc_handle; + uint64_t default_v4_seq; + uint64_t default_v6_seq; + struct rmnet_map_flow_mapping_s flowmap[RMNET_MAP_MAX_FLOWS]; +}; + +enum rmnet_map_results_e { + RMNET_MAP_SUCCESS, + RMNET_MAP_CONSUMED, + RMNET_MAP_GENERAL_FAILURE, + RMNET_MAP_NOT_ENABLED, + RMNET_MAP_FAILED_AGGREGATION, + RMNET_MAP_FAILED_MUX +}; + +enum rmnet_map_mux_errors_e { + RMNET_MAP_MUX_SUCCESS, + RMNET_MAP_MUX_INVALID_MUX_ID, + RMNET_MAP_MUX_INVALID_PAD_LENGTH, + RMNET_MAP_MUX_INVALID_PKT_LENGTH, + /* This should always be the last element */ + RMNET_MAP_MUX_ENUM_LENGTH +}; + +enum rmnet_map_checksum_errors_e { + RMNET_MAP_CHECKSUM_OK, + RMNET_MAP_CHECKSUM_VALID_FLAG_NOT_SET, + RMNET_MAP_CHECKSUM_VALIDATION_FAILED, + RMNET_MAP_CHECKSUM_ERROR_UNKOWN, + RMNET_MAP_CHECKSUM_ERROR_NOT_DATA_PACKET, + RMNET_MAP_CHECKSUM_ERROR_BAD_BUFFER, + RMNET_MAP_CHECKSUM_ERROR_UNKNOWN_IP_VERSION, + RMNET_MAP_CHECKSUM_ERROR_UNKNOWN_TRANSPORT, + /* This should always be the last element */ + RMNET_MAP_CHECKSUM_ENUM_LENGTH +}; + +enum rmnet_map_commands_e { + RMNET_MAP_COMMAND_NONE, + RMNET_MAP_COMMAND_FLOW_DISABLE, + RMNET_MAP_COMMAND_FLOW_ENABLE, + /* These should always be the last 2 elements */ + RMNET_MAP_COMMAND_UNKNOWN, + RMNET_MAP_COMMAND_ENUM_LENGTH +}; + +enum rmnet_map_agg_state_e { + RMNET_MAP_AGG_IDLE, + RMNET_MAP_TXFER_SCHEDULED +}; + +#define RMNET_MAP_P_ICMP4 0x01 +#define RMNET_MAP_P_TCP 0x06 +#define RMNET_MAP_P_UDP 0x11 +#define RMNET_MAP_P_ICMP6 0x3a + +#define RMNET_MAP_COMMAND_REQUEST 0 +#define RMNET_MAP_COMMAND_ACK 1 +#define RMNET_MAP_COMMAND_UNSUPPORTED 2 +#define RMNET_MAP_COMMAND_INVALID 3 + +uint8_t rmnet_map_demultiplex(struct sk_buff *skb); +struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb, + struct rmnet_phys_ep_conf_s *config); + +#define RMNET_MAP_GET_MUX_ID(Y) (((struct rmnet_map_header_s *)Y->data)->mux_id) +#define RMNET_MAP_GET_CD_BIT(Y) (((struct rmnet_map_header_s *)Y->data)->cd_bit) +#define RMNET_MAP_GET_PAD(Y) (((struct rmnet_map_header_s *)Y->data)->pad_len) +#define RMNET_MAP_GET_CMD_START(Y) ((struct rmnet_map_control_command_s *) \ + Y->data + sizeof(struct rmnet_map_header_s)) +#define RMNET_MAP_GET_LENGTH(Y) (ntohs( \ + ((struct rmnet_map_header_s *)Y->data)->pkt_len)) + +struct rmnet_map_header_s *rmnet_map_add_map_header(struct sk_buff *skb, + int hdrlen); +rx_handler_result_t rmnet_map_command(struct sk_buff *skb, + struct rmnet_phys_ep_conf_s *config); +void rmnet_map_aggregate(struct sk_buff *skb, + struct rmnet_phys_ep_conf_s *config); + +#endif /* _RMNET_MAP_H_ */ diff --git a/net/rmnet_data/rmnet_map_command.c b/net/rmnet_data/rmnet_map_command.c new file mode 100644 index 000000000000..68acf2d9561a --- /dev/null +++ b/net/rmnet_data/rmnet_map_command.c @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2013, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include <linux/rmnet_data.h> +#include <net/pkt_sched.h> +#include "rmnet_data_config.h" +#include "rmnet_map.h" +#include "rmnet_data_private.h" + +unsigned long int rmnet_map_command_stats[RMNET_MAP_COMMAND_ENUM_LENGTH]; +module_param_array(rmnet_map_command_stats, ulong, 0, S_IRUGO); +MODULE_PARM_DESC(rmnet_map_command_stats, "MAP command statistics"); + +static uint8_t rmnet_map_do_flow_control(struct sk_buff *skb, + struct rmnet_phys_ep_conf_s *config, + int enable) +{ + return RMNET_MAP_COMMAND_UNSUPPORTED; +} + +static void rmnet_map_send_ack(struct sk_buff *skb, + unsigned char type) +{ + struct net_device *dev; + struct rmnet_map_control_command_s *cmd; + unsigned long flags; + int xmit_status; + + if (!skb) + BUG(); + + dev = skb->dev; + + cmd = RMNET_MAP_GET_CMD_START(skb); + cmd->cmd_type = type & 0x03; + + spin_lock_irqsave(&(skb->dev->tx_global_lock), flags); + xmit_status = skb->dev->netdev_ops->ndo_start_xmit(skb, skb->dev); + spin_unlock_irqrestore(&(skb->dev->tx_global_lock), flags); +} + +rx_handler_result_t rmnet_map_command(struct sk_buff *skb, + struct rmnet_phys_ep_conf_s *config) +{ + struct rmnet_map_control_command_s *cmd; + unsigned char command_name; + unsigned char rc = 0; + + if (!skb) + BUG(); + + cmd = RMNET_MAP_GET_CMD_START(skb); + command_name = cmd->command_name; + + if (command_name < RMNET_MAP_COMMAND_ENUM_LENGTH) + rmnet_map_command_stats[command_name]++; + + switch (command_name) { + case RMNET_MAP_COMMAND_FLOW_ENABLE: + rc = rmnet_map_do_flow_control(skb, config, 1); + break; + + case RMNET_MAP_COMMAND_FLOW_DISABLE: + rc = rmnet_map_do_flow_control(skb, config, 0); + break; + + default: + rmnet_map_command_stats[RMNET_MAP_COMMAND_UNKNOWN]++; + LOGM("%s(): Uknown MAP command: %d\n", __func__, command_name); + rc = RMNET_MAP_COMMAND_UNSUPPORTED; + break; + } + rmnet_map_send_ack(skb, rc); + return 0; /* TODO: handler_consumed */ +} diff --git a/net/rmnet_data/rmnet_map_data.c b/net/rmnet_data/rmnet_map_data.c new file mode 100644 index 000000000000..f3f65c9a3f5f --- /dev/null +++ b/net/rmnet_data/rmnet_map_data.c @@ -0,0 +1,256 @@ +/* + * Copyright (c) 2013, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * RMNET Data MAP protocol + * + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include <linux/rmnet_data.h> +#include <linux/spinlock.h> +#include <linux/workqueue.h> +#include "rmnet_data_config.h" +#include "rmnet_map.h" +#include "rmnet_data_private.h" + +/* ***************** Local Definitions ************************************** */ +struct agg_work { + struct delayed_work work; + struct rmnet_phys_ep_conf_s *config; +}; + +/******************************************************************************/ + +/** + * rmnet_map_add_map_header() - Adds MAP header to front of skb->data + * @skb: Socket buffer ("packet") to modify + * @hdrlen: Number of bytes of header data which should not be included in + * MAP length field + * + * Padding is calculated and set appropriately in MAP header. Mux ID is + * initialized to 0. + * + * Return: + * - Pointer to MAP structure + * - 0 (null) if insufficient headroom + * - 0 (null) if insufficient tailroom for padding bytes + * + * todo: Parameterize skb alignment + */ +struct rmnet_map_header_s *rmnet_map_add_map_header(struct sk_buff *skb, + int hdrlen) +{ + uint32_t padding, map_datalen; + uint8_t *padbytes; + struct rmnet_map_header_s *map_header; + + if (skb_headroom(skb) < sizeof(struct rmnet_map_header_s)) + return 0; + + map_datalen = skb->len - hdrlen; + map_header = (struct rmnet_map_header_s *) + skb_push(skb, sizeof(struct rmnet_map_header_s)); + memset(map_header, 0, sizeof(struct rmnet_map_header_s)); + + padding = ALIGN(map_datalen, 4) - map_datalen; + + if (skb_tailroom(skb) < padding) + return 0; + + padbytes = (uint8_t *) skb_put(skb, padding); + LOGD("pad: %d\n", padding); + memset(padbytes, 0, padding); + + map_header->pkt_len = htons(map_datalen + padding); + map_header->pad_len = padding&0x3F; + + return map_header; +} + +/** + * rmnet_map_deaggregate() - Deaggregates a single packet + * @skb: Source socket buffer containing multiple MAP frames + * @config: Physical endpoint configuration of the ingress device + * + * Source skb is cloned with skb_clone(). The new skb data and tail pointers are + * modified to contain a single MAP frame. Clone happens with GFP_KERNEL flags + * set. User should keep calling deaggregate() on the source skb until 0 is + * returned, indicating that there are no more packets to deaggregate. + * + * Return: + * - Pointer to new skb + * - 0 (null) if no more aggregated packets + */ +struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb, + struct rmnet_phys_ep_conf_s *config) +{ + struct sk_buff *skbn; + struct rmnet_map_header_s *maph; + uint32_t packet_len; + uint8_t ip_byte; + + if (skb->len == 0) + return 0; + + maph = (struct rmnet_map_header_s *) skb->data; + packet_len = ntohs(maph->pkt_len) + sizeof(struct rmnet_map_header_s); + if ((((int)skb->len) - ((int)packet_len)) < 0) { + LOGM("%s(): Got malformed packet. Dropping\n", __func__); + return 0; + } + + skbn = skb_clone(skb, GFP_ATOMIC); + if (!skbn) + return 0; + + LOGD("Trimming to %d bytes\n", packet_len); + LOGD("before skbn->len = %d", skbn->len); + skb_trim(skbn, packet_len); + skb_pull(skb, packet_len); + LOGD("after skbn->len = %d", skbn->len); + + /* Sanity check */ + ip_byte = (skbn->data[4]) & 0xF0; + if (ip_byte != 0x40 && ip_byte != 0x60) { + LOGM("%s() Unknown IP type: 0x%02X\n", __func__, ip_byte); + kfree_skb(skbn); + return 0; + } + + return skbn; +} + +/** + * rmnet_map_flush_packet_queue() - Transmits aggregeted frame on timeout + * @work: struct agg_work containing delayed work and skb to flush + * + * This function is scheduled to run in a specified number of jiffies after + * the last frame transmitted by the network stack. When run, the buffer + * containing aggregated packets is finally transmitted on the underlying link. + * + */ +static void rmnet_map_flush_packet_queue(struct work_struct *work) +{ + struct agg_work *real_work; + struct rmnet_phys_ep_conf_s *config; + unsigned long flags; + struct sk_buff *skb; + + skb = 0; + real_work = (struct agg_work *)work; + config = real_work->config; + LOGD("Entering flush thread\n"); + spin_lock_irqsave(&config->agg_lock, flags); + if (likely(config->agg_state == RMNET_MAP_TXFER_SCHEDULED)) { + if (likely(config->agg_skb)) { + /* Buffer may have already been shipped out */ + if (config->agg_count > 1) + LOGL("Agg count: %d\n", config->agg_count); + skb = config->agg_skb; + config->agg_skb = 0; + } + config->agg_state = RMNET_MAP_AGG_IDLE; + } else { + /* How did we get here? */ + LOGE("%s(): Ran queued command when state %s\n", + "is idle. State machine likely broken", __func__); + } + + spin_unlock_irqrestore(&config->agg_lock, flags); + if (skb) + dev_queue_xmit(skb); + kfree(work); +} + +/** + * rmnet_map_aggregate() - Software aggregates multiple packets. + * @skb: current packet being transmitted + * @config: Physical endpoint configuration of the ingress device + * + * Aggregates multiple SKBs into a single large SKB for transmission. MAP + * protocol is used to separate the packets in the buffer. This funcion consumes + * the argument SKB and should not be further processed by any other function. + */ +void rmnet_map_aggregate(struct sk_buff *skb, + struct rmnet_phys_ep_conf_s *config) { + uint8_t *dest_buff; + struct agg_work *work; + unsigned long flags; + struct sk_buff *agg_skb; + int size; + + + if (!skb || !config) + BUG(); + size = config->egress_agg_size-skb->len; + + if (size < 2000) { + LOGL("Invalid length %d\n", size); + return; + } + +new_packet: + spin_lock_irqsave(&config->agg_lock, flags); + if (!config->agg_skb) { + config->agg_skb = skb_copy_expand(skb, 0, size, GFP_ATOMIC); + if (!config->agg_skb) { + config->agg_skb = 0; + config->agg_count = 0; + spin_unlock_irqrestore(&config->agg_lock, flags); + dev_queue_xmit(skb); + return; + } + config->agg_count = 1; + kfree_skb(skb); + goto schedule; + } + + if (skb->len > (config->egress_agg_size - config->agg_skb->len)) { + if (config->agg_count > 1) + LOGL("Agg count: %d\n", config->agg_count); + agg_skb = config->agg_skb; + config->agg_skb = 0; + config->agg_count = 0; + spin_unlock_irqrestore(&config->agg_lock, flags); + dev_queue_xmit(agg_skb); + goto new_packet; + } + + dest_buff = skb_put(config->agg_skb, skb->len); + memcpy(dest_buff, skb->data, skb->len); + config->agg_count++; + kfree_skb(skb); + +schedule: + if (config->agg_state != RMNET_MAP_TXFER_SCHEDULED) { + work = (struct agg_work *) + kmalloc(sizeof(struct agg_work), GFP_ATOMIC); + if (!work) { + LOGE("%s(): Failed to allocate work item for packet %s", + "transfer. DATA PATH LIKELY BROKEN!\n", __func__); + config->agg_state = RMNET_MAP_AGG_IDLE; + spin_unlock_irqrestore(&config->agg_lock, flags); + return; + } + INIT_DELAYED_WORK((struct delayed_work *)work, + rmnet_map_flush_packet_queue); + work->config = config; + config->agg_state = RMNET_MAP_TXFER_SCHEDULED; + schedule_delayed_work((struct delayed_work *)work, 1); + } + spin_unlock_irqrestore(&config->agg_lock, flags); + return; +} + |