summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/media/platform/msm/Kconfig2
-rw-r--r--drivers/media/platform/msm/Makefile2
-rw-r--r--drivers/media/platform/msm/broadcast/Kconfig14
-rw-r--r--drivers/media/platform/msm/broadcast/Makefile4
-rw-r--r--drivers/media/platform/msm/broadcast/tspp.c3094
-rw-r--r--drivers/media/platform/msm/dvb/Kconfig10
-rw-r--r--drivers/media/platform/msm/dvb/Makefile2
-rw-r--r--drivers/media/platform/msm/dvb/adapter/Makefile6
-rw-r--r--drivers/media/platform/msm/dvb/adapter/mpq_adapter.c211
-rw-r--r--drivers/media/platform/msm/dvb/adapter/mpq_stream_buffer.c827
-rw-r--r--drivers/media/platform/msm/dvb/demux/Kconfig46
-rw-r--r--drivers/media/platform/msm/dvb/demux/Makefile14
-rw-r--r--drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c5195
-rw-r--r--drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.h1027
-rw-r--r--drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_sw.c280
-rw-r--r--drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_tspp_v1.c1968
-rw-r--r--drivers/media/platform/msm/dvb/demux/mpq_sdmx.c1023
-rw-r--r--drivers/media/platform/msm/dvb/demux/mpq_sdmx.h368
-rw-r--r--drivers/media/platform/msm/dvb/include/mpq_adapter.h199
-rw-r--r--drivers/media/platform/msm/dvb/include/mpq_dvb_debug.h41
-rw-r--r--drivers/media/platform/msm/dvb/include/mpq_stream_buffer.h462
-rw-r--r--include/linux/qcom_tspp.h99
22 files changed, 14894 insertions, 0 deletions
diff --git a/drivers/media/platform/msm/Kconfig b/drivers/media/platform/msm/Kconfig
index 16060773ac96..e2523e06ab76 100644
--- a/drivers/media/platform/msm/Kconfig
+++ b/drivers/media/platform/msm/Kconfig
@@ -41,3 +41,5 @@ endif # MSMB_CAMERA
source "drivers/media/platform/msm/vidc/Kconfig"
source "drivers/media/platform/msm/sde/Kconfig"
+source "drivers/media/platform/msm/dvb/Kconfig"
+source "drivers/media/platform/msm/broadcast/Kconfig"
diff --git a/drivers/media/platform/msm/Makefile b/drivers/media/platform/msm/Makefile
index ff0369bdeca5..a3f802d3ce59 100644
--- a/drivers/media/platform/msm/Makefile
+++ b/drivers/media/platform/msm/Makefile
@@ -5,3 +5,5 @@
obj-$(CONFIG_MSMB_CAMERA) += camera_v2/
obj-$(CONFIG_MSM_VIDC_V4L2) += vidc/
obj-y += sde/
+obj-y += broadcast/
+obj-$(CONFIG_DVB_MPQ) += dvb/
diff --git a/drivers/media/platform/msm/broadcast/Kconfig b/drivers/media/platform/msm/broadcast/Kconfig
new file mode 100644
index 000000000000..cdd1b2091179
--- /dev/null
+++ b/drivers/media/platform/msm/broadcast/Kconfig
@@ -0,0 +1,14 @@
+#
+# MSM Broadcast subsystem drivers
+#
+
+config TSPP
+ depends on ARCH_QCOM
+ tristate "TSPP (Transport Stream Packet Processor) Support"
+ ---help---
+ Transport Stream Packet Processor v1 is used to offload the
+ processing of MPEG transport streams from the main processor.
+ It is used to process incoming transport streams from TSIF
+ to supports use-cases such as transport stream live play
+ and recording.
+ This can also be compiled as a loadable module.
diff --git a/drivers/media/platform/msm/broadcast/Makefile b/drivers/media/platform/msm/broadcast/Makefile
new file mode 100644
index 000000000000..3735bdc212ad
--- /dev/null
+++ b/drivers/media/platform/msm/broadcast/Makefile
@@ -0,0 +1,4 @@
+#
+# Makefile for MSM Broadcast subsystem drivers.
+#
+obj-$(CONFIG_TSPP) += tspp.o
diff --git a/drivers/media/platform/msm/broadcast/tspp.c b/drivers/media/platform/msm/broadcast/tspp.c
new file mode 100644
index 000000000000..275b8b90af05
--- /dev/null
+++ b/drivers/media/platform/msm/broadcast/tspp.c
@@ -0,0 +1,3094 @@
+/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h> /* Just for modules */
+#include <linux/kernel.h> /* Only for KERN_INFO */
+#include <linux/err.h> /* Error macros */
+#include <linux/list.h> /* Linked list */
+#include <linux/cdev.h>
+#include <linux/init.h> /* Needed for the macros */
+#include <linux/io.h> /* IO macros */
+#include <linux/device.h> /* Device drivers need this */
+#include <linux/sched.h> /* Externally defined globals */
+#include <linux/pm_runtime.h> /* Runtime power management */
+#include <linux/fs.h>
+#include <linux/uaccess.h> /* copy_to_user */
+#include <linux/slab.h> /* kfree, kzalloc */
+#include <linux/ioport.h> /* XXX_ mem_region */
+#include <linux/dma-mapping.h> /* dma_XXX */
+#include <linux/dmapool.h> /* DMA pools */
+#include <linux/delay.h> /* msleep */
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/poll.h> /* poll() file op */
+#include <linux/wait.h> /* wait() macros, sleeping */
+#include <linux/bitops.h> /* BIT() macro */
+#include <linux/regulator/consumer.h>
+#include <linux/regulator/rpm-smd-regulator.h>
+#include <linux/msm-sps.h> /* BAM stuff */
+#include <linux/wakelock.h> /* Locking functions */
+#include <linux/timer.h> /* Timer services */
+#include <linux/jiffies.h> /* Jiffies counter */
+#include <linux/qcom_tspp.h>
+#include <linux/debugfs.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/string.h>
+#include <linux/msm-bus.h>
+#include <linux/interrupt.h> /* tasklet */
+
+/*
+ * General defines
+ */
+#define TSPP_TSIF_INSTANCES 2
+#define TSPP_GPIOS_PER_TSIF 4
+#define TSPP_FILTER_TABLES 3
+#define TSPP_MAX_DEVICES 1
+#define TSPP_NUM_CHANNELS 16
+#define TSPP_NUM_PRIORITIES 16
+#define TSPP_NUM_KEYS 8
+#define INVALID_CHANNEL 0xFFFFFFFF
+#define TSPP_BAM_DEFAULT_IPC_LOGLVL 2
+/*
+ * BAM descriptor FIFO size (in number of descriptors).
+ * Max number of descriptors allowed by SPS which is 8K-1.
+ */
+#define TSPP_SPS_DESCRIPTOR_COUNT (8 * 1024 - 1)
+#define TSPP_PACKET_LENGTH 188
+#define TSPP_MIN_BUFFER_SIZE (TSPP_PACKET_LENGTH)
+
+/* Max descriptor buffer size allowed by SPS */
+#define TSPP_MAX_BUFFER_SIZE (32 * 1024 - 1)
+
+/*
+ * Returns whether to use DMA pool for TSPP output buffers.
+ * For buffers smaller than page size, using DMA pool
+ * provides better memory utilization as dma_alloc_coherent
+ * allocates minimum of page size.
+ */
+#define TSPP_USE_DMA_POOL(buff_size) ((buff_size) < PAGE_SIZE)
+
+/*
+ * Max allowed TSPP buffers/descriptors.
+ * If SPS desc FIFO holds X descriptors, we can queue up to X-1 descriptors.
+ */
+#define TSPP_NUM_BUFFERS (TSPP_SPS_DESCRIPTOR_COUNT - 1)
+#define TSPP_TSIF_DEFAULT_TIME_LIMIT 60
+#define SPS_DESCRIPTOR_SIZE 8
+#define MIN_ACCEPTABLE_BUFFER_COUNT 2
+#define TSPP_DEBUG(msg...)
+
+/*
+ * TSIF register offsets
+ */
+#define TSIF_STS_CTL_OFF (0x0)
+#define TSIF_TIME_LIMIT_OFF (0x4)
+#define TSIF_CLK_REF_OFF (0x8)
+#define TSIF_LPBK_FLAGS_OFF (0xc)
+#define TSIF_LPBK_DATA_OFF (0x10)
+#define TSIF_TEST_CTL_OFF (0x14)
+#define TSIF_TEST_MODE_OFF (0x18)
+#define TSIF_TEST_RESET_OFF (0x1c)
+#define TSIF_TEST_EXPORT_OFF (0x20)
+#define TSIF_TEST_CURRENT_OFF (0x24)
+
+#define TSIF_DATA_PORT_OFF (0x100)
+
+/* bits for TSIF_STS_CTL register */
+#define TSIF_STS_CTL_EN_IRQ BIT(28)
+#define TSIF_STS_CTL_PACK_AVAIL BIT(27)
+#define TSIF_STS_CTL_1ST_PACKET BIT(26)
+#define TSIF_STS_CTL_OVERFLOW BIT(25)
+#define TSIF_STS_CTL_LOST_SYNC BIT(24)
+#define TSIF_STS_CTL_TIMEOUT BIT(23)
+#define TSIF_STS_CTL_INV_SYNC BIT(21)
+#define TSIF_STS_CTL_INV_NULL BIT(20)
+#define TSIF_STS_CTL_INV_ERROR BIT(19)
+#define TSIF_STS_CTL_INV_ENABLE BIT(18)
+#define TSIF_STS_CTL_INV_DATA BIT(17)
+#define TSIF_STS_CTL_INV_CLOCK BIT(16)
+#define TSIF_STS_CTL_SPARE BIT(15)
+#define TSIF_STS_CTL_EN_NULL BIT(11)
+#define TSIF_STS_CTL_EN_ERROR BIT(10)
+#define TSIF_STS_CTL_LAST_BIT BIT(9)
+#define TSIF_STS_CTL_EN_TIME_LIM BIT(8)
+#define TSIF_STS_CTL_EN_TCR BIT(7)
+#define TSIF_STS_CTL_TEST_MODE BIT(6)
+#define TSIF_STS_CTL_MODE_2 BIT(5)
+#define TSIF_STS_CTL_EN_DM BIT(4)
+#define TSIF_STS_CTL_STOP BIT(3)
+#define TSIF_STS_CTL_START BIT(0)
+
+/*
+ * TSPP register offsets
+ */
+#define TSPP_RST 0x00
+#define TSPP_CLK_CONTROL 0x04
+#define TSPP_CONFIG 0x08
+#define TSPP_CONTROL 0x0C
+#define TSPP_PS_DISABLE 0x10
+#define TSPP_MSG_IRQ_STATUS 0x14
+#define TSPP_MSG_IRQ_MASK 0x18
+#define TSPP_IRQ_STATUS 0x1C
+#define TSPP_IRQ_MASK 0x20
+#define TSPP_IRQ_CLEAR 0x24
+#define TSPP_PIPE_ERROR_STATUS(_n) (0x28 + (_n << 2))
+#define TSPP_STATUS 0x68
+#define TSPP_CURR_TSP_HEADER 0x6C
+#define TSPP_CURR_PID_FILTER 0x70
+#define TSPP_SYSTEM_KEY(_n) (0x74 + (_n << 2))
+#define TSPP_CBC_INIT_VAL(_n) (0x94 + (_n << 2))
+#define TSPP_DATA_KEY_RESET 0x9C
+#define TSPP_KEY_VALID 0xA0
+#define TSPP_KEY_ERROR 0xA4
+#define TSPP_TEST_CTRL 0xA8
+#define TSPP_VERSION 0xAC
+#define TSPP_GENERICS 0xB0
+#define TSPP_NOP 0xB4
+
+/*
+ * Register bit definitions
+ */
+/* TSPP_RST */
+#define TSPP_RST_RESET BIT(0)
+
+/* TSPP_CLK_CONTROL */
+#define TSPP_CLK_CONTROL_FORCE_CRYPTO BIT(9)
+#define TSPP_CLK_CONTROL_FORCE_PES_PL BIT(8)
+#define TSPP_CLK_CONTROL_FORCE_PES_AF BIT(7)
+#define TSPP_CLK_CONTROL_FORCE_RAW_CTRL BIT(6)
+#define TSPP_CLK_CONTROL_FORCE_PERF_CNT BIT(5)
+#define TSPP_CLK_CONTROL_FORCE_CTX_SEARCH BIT(4)
+#define TSPP_CLK_CONTROL_FORCE_TSP_PROC BIT(3)
+#define TSPP_CLK_CONTROL_FORCE_CONS_AHB2MEM BIT(2)
+#define TSPP_CLK_CONTROL_FORCE_TS_AHB2MEM BIT(1)
+#define TSPP_CLK_CONTROL_SET_CLKON BIT(0)
+
+/* TSPP_CONFIG */
+#define TSPP_CONFIG_SET_PACKET_LENGTH(_a, _b) (_a = (_a & 0xF0) | \
+((_b & 0xF) << 8))
+#define TSPP_CONFIG_GET_PACKET_LENGTH(_a) ((_a >> 8) & 0xF)
+#define TSPP_CONFIG_DUP_WITH_DISC_EN BIT(7)
+#define TSPP_CONFIG_PES_SYNC_ERROR_MASK BIT(6)
+#define TSPP_CONFIG_PS_LEN_ERR_MASK BIT(5)
+#define TSPP_CONFIG_PS_CONT_ERR_UNSP_MASK BIT(4)
+#define TSPP_CONFIG_PS_CONT_ERR_MASK BIT(3)
+#define TSPP_CONFIG_PS_DUP_TSP_MASK BIT(2)
+#define TSPP_CONFIG_TSP_ERR_IND_MASK BIT(1)
+#define TSPP_CONFIG_TSP_SYNC_ERR_MASK BIT(0)
+
+/* TSPP_CONTROL */
+#define TSPP_CONTROL_PID_FILTER_LOCK BIT(5)
+#define TSPP_CONTROL_FORCE_KEY_CALC BIT(4)
+#define TSPP_CONTROL_TSP_CONS_SRC_DIS BIT(3)
+#define TSPP_CONTROL_TSP_TSIF1_SRC_DIS BIT(2)
+#define TSPP_CONTROL_TSP_TSIF0_SRC_DIS BIT(1)
+#define TSPP_CONTROL_PERF_COUNT_INIT BIT(0)
+
+/* TSPP_MSG_IRQ_STATUS + TSPP_MSG_IRQ_MASK */
+#define TSPP_MSG_TSPP_IRQ BIT(2)
+#define TSPP_MSG_TSIF_1_IRQ BIT(1)
+#define TSPP_MSG_TSIF_0_IRQ BIT(0)
+
+/* TSPP_IRQ_STATUS + TSPP_IRQ_MASK + TSPP_IRQ_CLEAR */
+#define TSPP_IRQ_STATUS_TSP_RD_CMPL BIT(19)
+#define TSPP_IRQ_STATUS_KEY_ERROR BIT(18)
+#define TSPP_IRQ_STATUS_KEY_SWITCHED_BAD BIT(17)
+#define TSPP_IRQ_STATUS_KEY_SWITCHED BIT(16)
+#define TSPP_IRQ_STATUS_PS_BROKEN(_n) BIT((_n))
+
+/* TSPP_PIPE_ERROR_STATUS */
+#define TSPP_PIPE_PES_SYNC_ERROR BIT(3)
+#define TSPP_PIPE_PS_LENGTH_ERROR BIT(2)
+#define TSPP_PIPE_PS_CONTINUITY_ERROR BIT(1)
+#define TSPP_PIP_PS_LOST_START BIT(0)
+
+/* TSPP_STATUS */
+#define TSPP_STATUS_TSP_PKT_AVAIL BIT(10)
+#define TSPP_STATUS_TSIF1_DM_REQ BIT(6)
+#define TSPP_STATUS_TSIF0_DM_REQ BIT(2)
+#define TSPP_CURR_FILTER_TABLE BIT(0)
+
+/* TSPP_GENERICS */
+#define TSPP_GENERICS_CRYPTO_GEN BIT(12)
+#define TSPP_GENERICS_MAX_CONS_PIPES BIT(7)
+#define TSPP_GENERICS_MAX_PIPES BIT(2)
+#define TSPP_GENERICS_TSIF_1_GEN BIT(1)
+#define TSPP_GENERICS_TSIF_0_GEN BIT(0)
+
+/*
+ * TSPP memory regions
+ */
+#define TSPP_PID_FILTER_TABLE0 0x800
+#define TSPP_PID_FILTER_TABLE1 0x880
+#define TSPP_PID_FILTER_TABLE2 0x900
+#define TSPP_GLOBAL_PERFORMANCE 0x980 /* see tspp_global_performance */
+#define TSPP_PIPE_CONTEXT 0x990 /* see tspp_pipe_context */
+#define TSPP_PIPE_PERFORMANCE 0x998 /* see tspp_pipe_performance */
+#define TSPP_TSP_BUFF_WORD(_n) (0xC10 + (_n << 2))
+#define TSPP_DATA_KEY 0xCD0
+
+struct debugfs_entry {
+ const char *name;
+ mode_t mode;
+ int offset;
+};
+
+static const struct debugfs_entry debugfs_tsif_regs[] = {
+ {"sts_ctl", S_IRUGO | S_IWUSR, TSIF_STS_CTL_OFF},
+ {"time_limit", S_IRUGO | S_IWUSR, TSIF_TIME_LIMIT_OFF},
+ {"clk_ref", S_IRUGO | S_IWUSR, TSIF_CLK_REF_OFF},
+ {"lpbk_flags", S_IRUGO | S_IWUSR, TSIF_LPBK_FLAGS_OFF},
+ {"lpbk_data", S_IRUGO | S_IWUSR, TSIF_LPBK_DATA_OFF},
+ {"test_ctl", S_IRUGO | S_IWUSR, TSIF_TEST_CTL_OFF},
+ {"test_mode", S_IRUGO | S_IWUSR, TSIF_TEST_MODE_OFF},
+ {"test_reset", S_IWUSR, TSIF_TEST_RESET_OFF},
+ {"test_export", S_IRUGO | S_IWUSR, TSIF_TEST_EXPORT_OFF},
+ {"test_current", S_IRUGO, TSIF_TEST_CURRENT_OFF},
+ {"data_port", S_IRUSR, TSIF_DATA_PORT_OFF},
+};
+
+static const struct debugfs_entry debugfs_tspp_regs[] = {
+ {"rst", S_IRUGO | S_IWUSR, TSPP_RST},
+ {"clk_control", S_IRUGO | S_IWUSR, TSPP_CLK_CONTROL},
+ {"config", S_IRUGO | S_IWUSR, TSPP_CONFIG},
+ {"control", S_IRUGO | S_IWUSR, TSPP_CONTROL},
+ {"ps_disable", S_IRUGO | S_IWUSR, TSPP_PS_DISABLE},
+ {"msg_irq_status", S_IRUGO | S_IWUSR, TSPP_MSG_IRQ_STATUS},
+ {"msg_irq_mask", S_IRUGO | S_IWUSR, TSPP_MSG_IRQ_MASK},
+ {"irq_status", S_IRUGO | S_IWUSR, TSPP_IRQ_STATUS},
+ {"irq_mask", S_IRUGO | S_IWUSR, TSPP_IRQ_MASK},
+ {"irq_clear", S_IRUGO | S_IWUSR, TSPP_IRQ_CLEAR},
+ /* {"pipe_error_status",S_IRUGO | S_IWUSR, TSPP_PIPE_ERROR_STATUS}, */
+ {"status", S_IRUGO | S_IWUSR, TSPP_STATUS},
+ {"curr_tsp_header", S_IRUGO | S_IWUSR, TSPP_CURR_TSP_HEADER},
+ {"curr_pid_filter", S_IRUGO | S_IWUSR, TSPP_CURR_PID_FILTER},
+ /* {"system_key", S_IRUGO | S_IWUSR, TSPP_SYSTEM_KEY}, */
+ /* {"cbc_init_val", S_IRUGO | S_IWUSR, TSPP_CBC_INIT_VAL}, */
+ {"data_key_reset", S_IRUGO | S_IWUSR, TSPP_DATA_KEY_RESET},
+ {"key_valid", S_IRUGO | S_IWUSR, TSPP_KEY_VALID},
+ {"key_error", S_IRUGO | S_IWUSR, TSPP_KEY_ERROR},
+ {"test_ctrl", S_IRUGO | S_IWUSR, TSPP_TEST_CTRL},
+ {"version", S_IRUGO | S_IWUSR, TSPP_VERSION},
+ {"generics", S_IRUGO | S_IWUSR, TSPP_GENERICS},
+ {"pid_filter_table0", S_IRUGO | S_IWUSR, TSPP_PID_FILTER_TABLE0},
+ {"pid_filter_table1", S_IRUGO | S_IWUSR, TSPP_PID_FILTER_TABLE1},
+ {"pid_filter_table2", S_IRUGO | S_IWUSR, TSPP_PID_FILTER_TABLE2},
+ {"tsp_total_num", S_IRUGO | S_IWUSR, TSPP_GLOBAL_PERFORMANCE},
+ {"tsp_ignored_num", S_IRUGO | S_IWUSR, TSPP_GLOBAL_PERFORMANCE + 4},
+ {"tsp_err_ind_num", S_IRUGO | S_IWUSR, TSPP_GLOBAL_PERFORMANCE + 8},
+ {"tsp_sync_err_num", S_IRUGO | S_IWUSR, TSPP_GLOBAL_PERFORMANCE + 16},
+ {"pipe_context", S_IRUGO | S_IWUSR, TSPP_PIPE_CONTEXT},
+ {"pipe_performance", S_IRUGO | S_IWUSR, TSPP_PIPE_PERFORMANCE},
+ {"data_key", S_IRUGO | S_IWUSR, TSPP_DATA_KEY}
+};
+
+struct tspp_pid_filter {
+ u32 filter; /* see FILTER_ macros */
+ u32 config; /* see FILTER_ macros */
+};
+
+/* tsp_info */
+#define FILTER_HEADER_ERROR_MASK BIT(7)
+#define FILTER_TRANS_END_DISABLE BIT(6)
+#define FILTER_DEC_ON_ERROR_EN BIT(5)
+#define FILTER_DECRYPT BIT(4)
+#define FILTER_HAS_ENCRYPTION(_p) (_p->config & FILTER_DECRYPT)
+#define FILTER_GET_PIPE_NUMBER0(_p) (_p->config & 0xF)
+#define FILTER_SET_PIPE_NUMBER0(_p, _b) (_p->config = \
+ (_p->config & ~0xF) | (_b & 0xF))
+#define FILTER_GET_PIPE_PROCESS0(_p) ((_p->filter >> 30) & 0x3)
+#define FILTER_SET_PIPE_PROCESS0(_p, _b) (_p->filter = \
+ (_p->filter & ~(0x3<<30)) | ((_b & 0x3) << 30))
+#define FILTER_GET_PIPE_PID(_p) ((_p->filter >> 13) & 0x1FFF)
+#define FILTER_SET_PIPE_PID(_p, _b) (_p->filter = \
+ (_p->filter & ~(0x1FFF<<13)) | ((_b & 0x1FFF) << 13))
+#define FILTER_GET_PID_MASK(_p) (_p->filter & 0x1FFF)
+#define FILTER_SET_PID_MASK(_p, _b) (_p->filter = \
+ (_p->filter & ~0x1FFF) | (_b & 0x1FFF))
+#define FILTER_GET_PIPE_PROCESS1(_p) ((_p->config >> 30) & 0x3)
+#define FILTER_SET_PIPE_PROCESS1(_p, _b) (_p->config = \
+ (_p->config & ~(0x3<<30)) | ((_b & 0x3) << 30))
+#define FILTER_GET_KEY_NUMBER(_p) ((_p->config >> 8) & 0x7)
+#define FILTER_SET_KEY_NUMBER(_p, _b) (_p->config = \
+ (_p->config & ~(0x7<<8)) | ((_b & 0x7) << 8))
+
+struct tspp_global_performance_regs {
+ u32 tsp_total;
+ u32 tsp_ignored;
+ u32 tsp_error;
+ u32 tsp_sync;
+};
+
+struct tspp_pipe_context_regs {
+ u16 pes_bytes_left;
+ u16 count;
+ u32 tsif_suffix;
+} __packed;
+#define CONTEXT_GET_STATE(_a) (_a & 0x3)
+#define CONTEXT_UNSPEC_LENGTH BIT(11)
+#define CONTEXT_GET_CONT_COUNT(_a) ((_a >> 12) & 0xF)
+
+#define MSEC_TO_JIFFIES(msec) ((msec) * HZ / 1000)
+
+struct tspp_pipe_performance_regs {
+ u32 tsp_total;
+ u32 ps_duplicate_tsp;
+ u32 tsp_no_payload;
+ u32 tsp_broken_ps;
+ u32 ps_total_num;
+ u32 ps_continuity_error;
+ u32 ps_length_error;
+ u32 pes_sync_error;
+};
+
+struct tspp_tsif_device {
+ void __iomem *base;
+ u32 time_limit;
+ u32 ref_count;
+ enum tspp_tsif_mode mode;
+ int clock_inverse;
+ int data_inverse;
+ int sync_inverse;
+ int enable_inverse;
+ u32 tsif_irq;
+
+ /* debugfs */
+ struct dentry *dent_tsif;
+ struct dentry *debugfs_tsif_regs[ARRAY_SIZE(debugfs_tsif_regs)];
+ u32 stat_rx;
+ u32 stat_overflow;
+ u32 stat_lost_sync;
+ u32 stat_timeout;
+};
+
+enum tspp_buf_state {
+ TSPP_BUF_STATE_EMPTY, /* buffer has been allocated, but not waiting */
+ TSPP_BUF_STATE_WAITING, /* buffer is waiting to be filled */
+ TSPP_BUF_STATE_DATA, /* buffer is not empty and can be read */
+ TSPP_BUF_STATE_LOCKED /* buffer is being read by a client */
+};
+
+struct tspp_mem_buffer {
+ struct tspp_mem_buffer *next;
+ struct sps_mem_buffer sps;
+ struct tspp_data_descriptor desc; /* buffer descriptor for kernel api */
+ enum tspp_buf_state state;
+ size_t filled; /* how much data this buffer is holding */
+ int read_index; /* where to start reading data from */
+};
+
+/* this represents each char device 'channel' */
+struct tspp_channel {
+ struct tspp_device *pdev; /* can use container_of instead? */
+ struct sps_pipe *pipe;
+ struct sps_connect config;
+ struct sps_register_event event;
+ struct tspp_mem_buffer *data; /* list of buffers */
+ struct tspp_mem_buffer *read; /* first buffer ready to be read */
+ struct tspp_mem_buffer *waiting; /* first outstanding transfer */
+ struct tspp_mem_buffer *locked; /* buffer currently being read */
+ wait_queue_head_t in_queue; /* set when data is received */
+ u32 id; /* channel id (0-15) */
+ int used; /* is this channel in use? */
+ int key; /* which encryption key index is used */
+ u32 buffer_size; /* size of the sps transfer buffers */
+ u32 max_buffers; /* how many buffers should be allocated */
+ u32 buffer_count; /* how many buffers are actually allocated */
+ u32 filter_count; /* how many filters have been added to this channel */
+ u32 int_freq; /* generate interrupts every x descriptors */
+ enum tspp_source src;
+ enum tspp_mode mode;
+ tspp_notifier *notifier; /* used only with kernel api */
+ void *notify_data; /* data to be passed with the notifier */
+ u32 expiration_period_ms; /* notification on partially filled buffers */
+ struct timer_list expiration_timer;
+ struct dma_pool *dma_pool;
+ tspp_memfree *memfree; /* user defined memory free function */
+ void *user_info; /* user cookie passed to memory alloc/free function */
+};
+
+struct tspp_pid_filter_table {
+ struct tspp_pid_filter filter[TSPP_NUM_PRIORITIES];
+};
+
+struct tspp_key_entry {
+ u32 even_lsb;
+ u32 even_msb;
+ u32 odd_lsb;
+ u32 odd_msb;
+};
+
+struct tspp_key_table {
+ struct tspp_key_entry entry[TSPP_NUM_KEYS];
+};
+
+struct tspp_pinctrl {
+ struct pinctrl *pinctrl;
+
+ struct pinctrl_state *disabled;
+ struct pinctrl_state *tsif0_mode1;
+ struct pinctrl_state *tsif0_mode2;
+ struct pinctrl_state *tsif1_mode1;
+ struct pinctrl_state *tsif1_mode2;
+ struct pinctrl_state *dual_mode1;
+ struct pinctrl_state *dual_mode2;
+
+ bool tsif0_active;
+ bool tsif1_active;
+};
+
+/* this represents the actual hardware device */
+struct tspp_device {
+ struct list_head devlist; /* list of all devices */
+ struct platform_device *pdev;
+ void __iomem *base;
+ uint32_t tsif_bus_client;
+ unsigned int tspp_irq;
+ unsigned int bam_irq;
+ unsigned long bam_handle;
+ struct sps_bam_props bam_props;
+ struct wakeup_source ws;
+ spinlock_t spinlock;
+ struct tasklet_struct tlet;
+ struct tspp_tsif_device tsif[TSPP_TSIF_INSTANCES];
+ /* clocks */
+ struct clk *tsif_pclk;
+ struct clk *tsif_ref_clk;
+ /* regulators */
+ struct regulator *tsif_vreg;
+ /* data */
+ struct tspp_pid_filter_table *filters[TSPP_FILTER_TABLES];
+ struct tspp_channel channels[TSPP_NUM_CHANNELS];
+ struct tspp_key_table *tspp_key_table;
+ struct tspp_global_performance_regs *tspp_global_performance;
+ struct tspp_pipe_context_regs *tspp_pipe_context;
+ struct tspp_pipe_performance_regs *tspp_pipe_performance;
+ bool req_irqs;
+ /* pinctrl */
+ struct mutex mutex;
+ struct tspp_pinctrl pinctrl;
+
+ struct dentry *dent;
+ struct dentry *debugfs_regs[ARRAY_SIZE(debugfs_tspp_regs)];
+};
+
+static int tspp_key_entry;
+static u32 channel_id; /* next channel id number to assign */
+
+static LIST_HEAD(tspp_devices);
+
+/*** IRQ ***/
+static irqreturn_t tspp_isr(int irq, void *dev)
+{
+ struct tspp_device *device = dev;
+ u32 status, mask;
+ u32 data;
+
+ status = readl_relaxed(device->base + TSPP_IRQ_STATUS);
+ mask = readl_relaxed(device->base + TSPP_IRQ_MASK);
+ status &= mask;
+
+ if (!status) {
+ dev_warn(&device->pdev->dev, "Spurious interrupt");
+ return IRQ_NONE;
+ }
+
+ /* if (status & TSPP_IRQ_STATUS_TSP_RD_CMPL) */
+
+ if (status & TSPP_IRQ_STATUS_KEY_ERROR) {
+ /* read the key error info */
+ data = readl_relaxed(device->base + TSPP_KEY_ERROR);
+ dev_info(&device->pdev->dev, "key error 0x%x", data);
+ }
+ if (status & TSPP_IRQ_STATUS_KEY_SWITCHED_BAD) {
+ data = readl_relaxed(device->base + TSPP_KEY_VALID);
+ dev_info(&device->pdev->dev, "key invalidated: 0x%x", data);
+ }
+ if (status & TSPP_IRQ_STATUS_KEY_SWITCHED)
+ dev_info(&device->pdev->dev, "key switched");
+
+ if (status & 0xffff)
+ dev_info(&device->pdev->dev, "broken pipe %i", status & 0xffff);
+
+ writel_relaxed(status, device->base + TSPP_IRQ_CLEAR);
+
+ /*
+ * Before returning IRQ_HANDLED to the generic interrupt handling
+ * framework need to make sure all operations including clearing of
+ * interrupt status registers in the hardware is performed.
+ * Thus a barrier after clearing the interrupt status register
+ * is required to guarantee that the interrupt status register has
+ * really been cleared by the time we return from this handler.
+ */
+ wmb();
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t tsif_isr(int irq, void *dev)
+{
+ struct tspp_tsif_device *tsif_device = dev;
+ u32 sts_ctl = ioread32(tsif_device->base + TSIF_STS_CTL_OFF);
+
+ if (!(sts_ctl & (TSIF_STS_CTL_PACK_AVAIL |
+ TSIF_STS_CTL_OVERFLOW |
+ TSIF_STS_CTL_LOST_SYNC |
+ TSIF_STS_CTL_TIMEOUT)))
+ return IRQ_NONE;
+
+ if (sts_ctl & TSIF_STS_CTL_OVERFLOW)
+ tsif_device->stat_overflow++;
+
+ if (sts_ctl & TSIF_STS_CTL_LOST_SYNC)
+ tsif_device->stat_lost_sync++;
+
+ if (sts_ctl & TSIF_STS_CTL_TIMEOUT)
+ tsif_device->stat_timeout++;
+
+ iowrite32(sts_ctl, tsif_device->base + TSIF_STS_CTL_OFF);
+
+ /*
+ * Before returning IRQ_HANDLED to the generic interrupt handling
+ * framework need to make sure all operations including clearing of
+ * interrupt status registers in the hardware is performed.
+ * Thus a barrier after clearing the interrupt status register
+ * is required to guarantee that the interrupt status register has
+ * really been cleared by the time we return from this handler.
+ */
+ wmb();
+ return IRQ_HANDLED;
+}
+
+/*** callbacks ***/
+static void tspp_sps_complete_cb(struct sps_event_notify *notify)
+{
+ struct tspp_device *pdev;
+
+ if (!notify || !notify->user)
+ return;
+
+ pdev = notify->user;
+ tasklet_schedule(&pdev->tlet);
+}
+
+static void tspp_expiration_timer(unsigned long data)
+{
+ struct tspp_device *pdev = (struct tspp_device *)data;
+
+ if (pdev)
+ tasklet_schedule(&pdev->tlet);
+}
+
+/*** tasklet ***/
+static void tspp_sps_complete_tlet(unsigned long data)
+{
+ int i;
+ int complete;
+ unsigned long flags;
+ struct sps_iovec iovec;
+ struct tspp_channel *channel;
+ struct tspp_device *device = (struct tspp_device *)data;
+
+ spin_lock_irqsave(&device->spinlock, flags);
+
+ for (i = 0; i < TSPP_NUM_CHANNELS; i++) {
+ complete = 0;
+ channel = &device->channels[i];
+
+ if (!channel->used || !channel->waiting)
+ continue;
+
+ /* stop the expiration timer */
+ if (channel->expiration_period_ms)
+ del_timer(&channel->expiration_timer);
+
+ /* get completions */
+ while (channel->waiting->state == TSPP_BUF_STATE_WAITING) {
+ if (sps_get_iovec(channel->pipe, &iovec) != 0) {
+ pr_err("tspp: Error in iovec on channel %i",
+ channel->id);
+ break;
+ }
+ if (iovec.size == 0)
+ break;
+
+ if (DESC_FULL_ADDR(iovec.flags, iovec.addr)
+ != channel->waiting->sps.phys_base)
+ pr_err("tspp: buffer mismatch %pa",
+ &channel->waiting->sps.phys_base);
+
+ complete = 1;
+ channel->waiting->state = TSPP_BUF_STATE_DATA;
+ channel->waiting->filled = iovec.size;
+ channel->waiting->read_index = 0;
+
+ if (channel->src == TSPP_SOURCE_TSIF0)
+ device->tsif[0].stat_rx++;
+ else if (channel->src == TSPP_SOURCE_TSIF1)
+ device->tsif[1].stat_rx++;
+
+ /* update the pointers */
+ channel->waiting = channel->waiting->next;
+ }
+
+ /* wake any waiting processes */
+ if (complete) {
+ wake_up_interruptible(&channel->in_queue);
+
+ /* call notifiers */
+ if (channel->notifier)
+ channel->notifier(channel->id,
+ channel->notify_data);
+ }
+
+ /* restart expiration timer */
+ if (channel->expiration_period_ms)
+ mod_timer(&channel->expiration_timer,
+ jiffies +
+ MSEC_TO_JIFFIES(
+ channel->expiration_period_ms));
+ }
+
+ spin_unlock_irqrestore(&device->spinlock, flags);
+}
+
+static int tspp_config_gpios(struct tspp_device *device,
+ enum tspp_source source,
+ int enable)
+{
+ int ret;
+ struct pinctrl_state *s;
+ struct tspp_pinctrl *p = &device->pinctrl;
+ bool mode2;
+
+ /*
+ * TSIF devices are handled separately, however changing of the pinctrl
+ * state must be protected from race condition.
+ */
+ if (mutex_lock_interruptible(&device->mutex))
+ return -ERESTARTSYS;
+
+ switch (source) {
+ case TSPP_SOURCE_TSIF0:
+ mode2 = device->tsif[0].mode == TSPP_TSIF_MODE_2;
+ if (enable == p->tsif1_active) {
+ if (enable)
+ /* Both tsif enabled */
+ s = mode2 ? p->dual_mode2 : p->dual_mode1;
+ else
+ /* Both tsif disabled */
+ s = p->disabled;
+ } else if (enable) {
+ /* Only tsif0 is enabled */
+ s = mode2 ? p->tsif0_mode2 : p->tsif0_mode1;
+ } else {
+ /* Only tsif1 is enabled */
+ s = mode2 ? p->tsif1_mode2 : p->tsif1_mode1;
+ }
+
+ ret = pinctrl_select_state(p->pinctrl, s);
+ if (!ret)
+ p->tsif0_active = enable;
+ break;
+ case TSPP_SOURCE_TSIF1:
+ mode2 = device->tsif[1].mode == TSPP_TSIF_MODE_2;
+ if (enable == p->tsif0_active) {
+ if (enable)
+ /* Both tsif enabled */
+ s = mode2 ? p->dual_mode2 : p->dual_mode1;
+ else
+ /* Both tsif disabled */
+ s = p->disabled;
+ } else if (enable) {
+ /* Only tsif1 is enabled */
+ s = mode2 ? p->tsif1_mode2 : p->tsif1_mode1;
+ } else {
+ /* Only tsif0 is enabled */
+ s = mode2 ? p->tsif0_mode2 : p->tsif0_mode1;
+ }
+
+ ret = pinctrl_select_state(p->pinctrl, s);
+ if (!ret)
+ p->tsif1_active = enable;
+ break;
+ default:
+ pr_err("%s: invalid source %d\n", __func__, source);
+ mutex_unlock(&device->mutex);
+ return -EINVAL;
+ }
+
+ if (ret)
+ pr_err("%s: failed to change pinctrl state, ret=%d\n",
+ __func__, ret);
+
+ mutex_unlock(&device->mutex);
+ return ret;
+}
+
+static int tspp_get_pinctrl(struct tspp_device *device)
+{
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *state;
+
+ pinctrl = devm_pinctrl_get(&device->pdev->dev);
+ if (IS_ERR(pinctrl)) {
+ pr_err("%s: Unable to get pinctrl handle\n", __func__);
+ return -EINVAL;
+ }
+ device->pinctrl.pinctrl = pinctrl;
+
+ state = pinctrl_lookup_state(pinctrl, "disabled");
+ if (IS_ERR(state)) {
+ pr_err("%s: Unable to find state %s\n",
+ __func__, "disabled");
+ return -EINVAL;
+ }
+ device->pinctrl.disabled = state;
+
+ state = pinctrl_lookup_state(pinctrl, "tsif0-mode1");
+ if (IS_ERR(state)) {
+ pr_err("%s: Unable to find state %s\n",
+ __func__, "tsif0-mode1");
+ return -EINVAL;
+ }
+ device->pinctrl.tsif0_mode1 = state;
+
+ state = pinctrl_lookup_state(pinctrl, "tsif0-mode2");
+ if (IS_ERR(state)) {
+ pr_err("%s: Unable to find state %s\n",
+ __func__, "tsif0-mode2");
+ return -EINVAL;
+ }
+ device->pinctrl.tsif0_mode2 = state;
+
+ state = pinctrl_lookup_state(pinctrl, "tsif1-mode1");
+ if (IS_ERR(state)) {
+ pr_err("%s: Unable to find state %s\n",
+ __func__, "tsif1-mode1");
+ return -EINVAL;
+ }
+ device->pinctrl.tsif1_mode1 = state;
+
+ state = pinctrl_lookup_state(pinctrl, "tsif1-mode2");
+ if (IS_ERR(state)) {
+ pr_err("%s: Unable to find state %s\n",
+ __func__, "tsif1-mode2");
+ return -EINVAL;
+ }
+ device->pinctrl.tsif1_mode2 = state;
+
+ state = pinctrl_lookup_state(pinctrl, "dual-tsif-mode1");
+ if (IS_ERR(state)) {
+ pr_err("%s: Unable to find state %s\n",
+ __func__, "dual-tsif-mode1");
+ return -EINVAL;
+ }
+ device->pinctrl.dual_mode1 = state;
+
+ state = pinctrl_lookup_state(pinctrl, "dual-tsif-mode2");
+ if (IS_ERR(state)) {
+ pr_err("%s: Unable to find state %s\n",
+ __func__, "dual-tsif-mode2");
+ return -EINVAL;
+ }
+ device->pinctrl.dual_mode2 = state;
+
+ device->pinctrl.tsif0_active = false;
+ device->pinctrl.tsif1_active = false;
+
+ return 0;
+}
+
+
+/*** Clock functions ***/
+static int tspp_clock_start(struct tspp_device *device)
+{
+ int rc;
+
+ if (device == NULL) {
+ pr_err("tspp: Can't start clocks, invalid device\n");
+ return -EINVAL;
+ }
+
+ if (device->tsif_bus_client) {
+ rc = msm_bus_scale_client_update_request(
+ device->tsif_bus_client, 1);
+ if (rc) {
+ pr_err("tspp: Can't enable bus\n");
+ return -EBUSY;
+ }
+ }
+
+ if (device->tsif_vreg) {
+ rc = regulator_set_voltage(device->tsif_vreg,
+ RPM_REGULATOR_CORNER_SUPER_TURBO,
+ RPM_REGULATOR_CORNER_SUPER_TURBO);
+ if (rc) {
+ pr_err("Unable to set CX voltage.\n");
+ if (device->tsif_bus_client)
+ msm_bus_scale_client_update_request(
+ device->tsif_bus_client, 0);
+ return rc;
+ }
+ }
+
+ if (device->tsif_pclk && clk_prepare_enable(device->tsif_pclk) != 0) {
+ pr_err("tspp: Can't start pclk");
+
+ if (device->tsif_vreg) {
+ regulator_set_voltage(device->tsif_vreg,
+ RPM_REGULATOR_CORNER_NONE,
+ RPM_REGULATOR_CORNER_SUPER_TURBO);
+ }
+
+ if (device->tsif_bus_client)
+ msm_bus_scale_client_update_request(
+ device->tsif_bus_client, 0);
+ return -EBUSY;
+ }
+
+ if (device->tsif_ref_clk &&
+ clk_prepare_enable(device->tsif_ref_clk) != 0) {
+ pr_err("tspp: Can't start ref clk");
+ clk_disable_unprepare(device->tsif_pclk);
+ if (device->tsif_vreg) {
+ regulator_set_voltage(device->tsif_vreg,
+ RPM_REGULATOR_CORNER_NONE,
+ RPM_REGULATOR_CORNER_SUPER_TURBO);
+ }
+
+ if (device->tsif_bus_client)
+ msm_bus_scale_client_update_request(
+ device->tsif_bus_client, 0);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static void tspp_clock_stop(struct tspp_device *device)
+{
+ int rc;
+
+ if (device == NULL) {
+ pr_err("tspp: Can't stop clocks, invalid device\n");
+ return;
+ }
+
+ if (device->tsif_pclk)
+ clk_disable_unprepare(device->tsif_pclk);
+
+ if (device->tsif_ref_clk)
+ clk_disable_unprepare(device->tsif_ref_clk);
+
+ if (device->tsif_vreg) {
+ rc = regulator_set_voltage(device->tsif_vreg,
+ RPM_REGULATOR_CORNER_NONE,
+ RPM_REGULATOR_CORNER_SUPER_TURBO);
+ if (rc)
+ pr_err("Unable to set CX voltage.\n");
+ }
+
+ if (device->tsif_bus_client) {
+ rc = msm_bus_scale_client_update_request(
+ device->tsif_bus_client, 0);
+ if (rc)
+ pr_err("tspp: Can't disable bus\n");
+ }
+}
+
+/*** TSIF functions ***/
+static int tspp_start_tsif(struct tspp_tsif_device *tsif_device)
+{
+ int start_hardware = 0;
+ u32 ctl;
+
+ if (tsif_device->ref_count == 0) {
+ start_hardware = 1;
+ } else if (tsif_device->ref_count > 0) {
+ ctl = readl_relaxed(tsif_device->base + TSIF_STS_CTL_OFF);
+ if ((ctl & TSIF_STS_CTL_START) != 1) {
+ /* this hardware should already be running */
+ pr_warn("tspp: tsif hw not started but ref count > 0");
+ start_hardware = 1;
+ }
+ }
+
+ if (start_hardware) {
+ ctl = TSIF_STS_CTL_EN_IRQ |
+ TSIF_STS_CTL_EN_DM |
+ TSIF_STS_CTL_PACK_AVAIL |
+ TSIF_STS_CTL_OVERFLOW |
+ TSIF_STS_CTL_LOST_SYNC;
+
+ if (tsif_device->clock_inverse)
+ ctl |= TSIF_STS_CTL_INV_CLOCK;
+
+ if (tsif_device->data_inverse)
+ ctl |= TSIF_STS_CTL_INV_DATA;
+
+ if (tsif_device->sync_inverse)
+ ctl |= TSIF_STS_CTL_INV_SYNC;
+
+ if (tsif_device->enable_inverse)
+ ctl |= TSIF_STS_CTL_INV_ENABLE;
+
+ switch (tsif_device->mode) {
+ case TSPP_TSIF_MODE_LOOPBACK:
+ ctl |= TSIF_STS_CTL_EN_NULL |
+ TSIF_STS_CTL_EN_ERROR |
+ TSIF_STS_CTL_TEST_MODE;
+ break;
+ case TSPP_TSIF_MODE_1:
+ ctl |= TSIF_STS_CTL_EN_TIME_LIM |
+ TSIF_STS_CTL_EN_TCR;
+ break;
+ case TSPP_TSIF_MODE_2:
+ ctl |= TSIF_STS_CTL_EN_TIME_LIM |
+ TSIF_STS_CTL_EN_TCR |
+ TSIF_STS_CTL_MODE_2;
+ break;
+ default:
+ pr_warn("tspp: unknown tsif mode 0x%x",
+ tsif_device->mode);
+ }
+ writel_relaxed(ctl, tsif_device->base + TSIF_STS_CTL_OFF);
+ writel_relaxed(tsif_device->time_limit,
+ tsif_device->base + TSIF_TIME_LIMIT_OFF);
+ /* assure register configuration is done before starting TSIF */
+ wmb();
+ writel_relaxed(ctl | TSIF_STS_CTL_START,
+ tsif_device->base + TSIF_STS_CTL_OFF);
+ /* assure TSIF start configuration */
+ wmb();
+ }
+
+ ctl = readl_relaxed(tsif_device->base + TSIF_STS_CTL_OFF);
+ if (!(ctl & TSIF_STS_CTL_START))
+ return -EBUSY;
+
+ tsif_device->ref_count++;
+ return 0;
+}
+
+static void tspp_stop_tsif(struct tspp_tsif_device *tsif_device)
+{
+ if (tsif_device->ref_count == 0)
+ return;
+
+ tsif_device->ref_count--;
+
+ if (tsif_device->ref_count == 0) {
+ writel_relaxed(TSIF_STS_CTL_STOP,
+ tsif_device->base + TSIF_STS_CTL_OFF);
+ /* assure TSIF stop configuration */
+ wmb();
+ }
+}
+
+/*** local TSPP functions ***/
+static int tspp_channels_in_use(struct tspp_device *pdev)
+{
+ int i;
+ int count = 0;
+
+ for (i = 0; i < TSPP_NUM_CHANNELS; i++)
+ count += (pdev->channels[i].used ? 1 : 0);
+
+ return count;
+}
+
+static struct tspp_device *tspp_find_by_id(int id)
+{
+ struct tspp_device *dev;
+
+ list_for_each_entry(dev, &tspp_devices, devlist) {
+ if (dev->pdev->id == id)
+ return dev;
+ }
+ return NULL;
+}
+
+static int tspp_get_key_entry(void)
+{
+ int i;
+
+ for (i = 0; i < TSPP_NUM_KEYS; i++) {
+ if (!(tspp_key_entry & (1 << i))) {
+ tspp_key_entry |= (1 << i);
+ return i;
+ }
+ }
+ return 1 < TSPP_NUM_KEYS;
+}
+
+static void tspp_free_key_entry(int entry)
+{
+ if (entry > TSPP_NUM_KEYS) {
+ pr_err("tspp_free_key_entry: index out of bounds");
+ return;
+ }
+
+ tspp_key_entry &= ~(1 << entry);
+}
+
+static int tspp_alloc_buffer(u32 channel_id, struct tspp_data_descriptor *desc,
+ u32 size, struct dma_pool *dma_pool, tspp_allocator *alloc, void *user)
+{
+ if (size < TSPP_MIN_BUFFER_SIZE ||
+ size > TSPP_MAX_BUFFER_SIZE) {
+ pr_err("tspp: bad buffer size %i", size);
+ return -ENOMEM;
+ }
+
+ if (alloc) {
+ TSPP_DEBUG("tspp using alloc function");
+ desc->virt_base = alloc(channel_id, size,
+ &desc->phys_base, user);
+ } else {
+ if (!dma_pool)
+ desc->virt_base = dma_alloc_coherent(NULL, size,
+ &desc->phys_base, GFP_KERNEL);
+ else
+ desc->virt_base = dma_pool_alloc(dma_pool, GFP_KERNEL,
+ &desc->phys_base);
+
+ if (desc->virt_base == 0) {
+ pr_err("tspp: dma buffer allocation failed %i\n", size);
+ return -ENOMEM;
+ }
+ }
+
+ desc->size = size;
+ return 0;
+}
+
+static int tspp_queue_buffer(struct tspp_channel *channel,
+ struct tspp_mem_buffer *buffer)
+{
+ int rc;
+ u32 flags = 0;
+
+ /* make sure the interrupt frequency is valid */
+ if (channel->int_freq < 1)
+ channel->int_freq = 1;
+
+ /* generate interrupt according to requested frequency */
+ if (buffer->desc.id % channel->int_freq == channel->int_freq-1)
+ flags = SPS_IOVEC_FLAG_INT;
+
+ /* start the transfer */
+ rc = sps_transfer_one(channel->pipe,
+ buffer->sps.phys_base,
+ buffer->sps.size,
+ flags ? channel->pdev : NULL,
+ flags);
+ if (rc < 0)
+ return rc;
+
+ buffer->state = TSPP_BUF_STATE_WAITING;
+
+ return 0;
+}
+
+static int tspp_global_reset(struct tspp_device *pdev)
+{
+ u32 i, val;
+
+ /* stop all TSIFs */
+ for (i = 0; i < TSPP_TSIF_INSTANCES; i++) {
+ pdev->tsif[i].ref_count = 1; /* allows stopping hw */
+ tspp_stop_tsif(&pdev->tsif[i]); /* will reset ref_count to 0 */
+ pdev->tsif[i].time_limit = TSPP_TSIF_DEFAULT_TIME_LIMIT;
+ pdev->tsif[i].clock_inverse = 0;
+ pdev->tsif[i].data_inverse = 0;
+ pdev->tsif[i].sync_inverse = 0;
+ pdev->tsif[i].enable_inverse = 0;
+ }
+ writel_relaxed(TSPP_RST_RESET, pdev->base + TSPP_RST);
+ /* assure state is reset before continuing with configuration */
+ wmb();
+
+ /* TSPP tables */
+ for (i = 0; i < TSPP_FILTER_TABLES; i++)
+ memset_io(pdev->filters[i],
+ 0, sizeof(struct tspp_pid_filter_table));
+
+ /* disable all filters */
+ val = (2 << TSPP_NUM_CHANNELS) - 1;
+ writel_relaxed(val, pdev->base + TSPP_PS_DISABLE);
+
+ /* TSPP registers */
+ val = readl_relaxed(pdev->base + TSPP_CONTROL);
+ writel_relaxed(val | TSPP_CLK_CONTROL_FORCE_PERF_CNT,
+ pdev->base + TSPP_CONTROL);
+ /* assure tspp performance count clock is set to 0 */
+ wmb();
+ memset_io(pdev->tspp_global_performance, 0,
+ sizeof(struct tspp_global_performance_regs));
+ memset_io(pdev->tspp_pipe_context, 0,
+ sizeof(struct tspp_pipe_context_regs));
+ memset_io(pdev->tspp_pipe_performance, 0,
+ sizeof(struct tspp_pipe_performance_regs));
+ /* assure tspp pipe context registers are set to 0 */
+ wmb();
+ writel_relaxed(val & ~TSPP_CLK_CONTROL_FORCE_PERF_CNT,
+ pdev->base + TSPP_CONTROL);
+ /* assure tspp performance count clock is reset */
+ wmb();
+
+ val = readl_relaxed(pdev->base + TSPP_CONFIG);
+ val &= ~(TSPP_CONFIG_PS_LEN_ERR_MASK |
+ TSPP_CONFIG_PS_CONT_ERR_UNSP_MASK |
+ TSPP_CONFIG_PS_CONT_ERR_MASK);
+ TSPP_CONFIG_SET_PACKET_LENGTH(val, TSPP_PACKET_LENGTH);
+ writel_relaxed(val, pdev->base + TSPP_CONFIG);
+ writel_relaxed(0x0007ffff, pdev->base + TSPP_IRQ_MASK);
+ writel_relaxed(0x000fffff, pdev->base + TSPP_IRQ_CLEAR);
+ writel_relaxed(0, pdev->base + TSPP_RST);
+ /* assure tspp reset clear */
+ wmb();
+
+ tspp_key_entry = 0;
+
+ return 0;
+}
+
+static void tspp_channel_init(struct tspp_channel *channel,
+ struct tspp_device *pdev)
+{
+ channel->pdev = pdev;
+ channel->data = NULL;
+ channel->read = NULL;
+ channel->waiting = NULL;
+ channel->locked = NULL;
+ channel->id = channel_id++;
+ channel->used = 0;
+ channel->buffer_size = TSPP_MIN_BUFFER_SIZE;
+ channel->max_buffers = TSPP_NUM_BUFFERS;
+ channel->buffer_count = 0;
+ channel->filter_count = 0;
+ channel->int_freq = 1;
+ channel->src = TSPP_SOURCE_NONE;
+ channel->mode = TSPP_MODE_DISABLED;
+ channel->notifier = NULL;
+ channel->notify_data = NULL;
+ channel->expiration_period_ms = 0;
+ channel->memfree = NULL;
+ channel->user_info = NULL;
+ init_waitqueue_head(&channel->in_queue);
+}
+
+static void tspp_set_tsif_mode(struct tspp_channel *channel,
+ enum tspp_tsif_mode mode)
+{
+ int index;
+
+ switch (channel->src) {
+ case TSPP_SOURCE_TSIF0:
+ index = 0;
+ break;
+ case TSPP_SOURCE_TSIF1:
+ index = 1;
+ break;
+ default:
+ pr_warn("tspp: can't set mode for non-tsif source %d",
+ channel->src);
+ return;
+ }
+ channel->pdev->tsif[index].mode = mode;
+}
+
+static void tspp_set_signal_inversion(struct tspp_channel *channel,
+ int clock_inverse, int data_inverse,
+ int sync_inverse, int enable_inverse)
+{
+ int index;
+
+ switch (channel->src) {
+ case TSPP_SOURCE_TSIF0:
+ index = 0;
+ break;
+ case TSPP_SOURCE_TSIF1:
+ index = 1;
+ break;
+ default:
+ return;
+ }
+ channel->pdev->tsif[index].clock_inverse = clock_inverse;
+ channel->pdev->tsif[index].data_inverse = data_inverse;
+ channel->pdev->tsif[index].sync_inverse = sync_inverse;
+ channel->pdev->tsif[index].enable_inverse = enable_inverse;
+}
+
+static int tspp_is_buffer_size_aligned(u32 size, enum tspp_mode mode)
+{
+ u32 alignment;
+
+ switch (mode) {
+ case TSPP_MODE_RAW:
+ /* must be a multiple of 192 */
+ alignment = (TSPP_PACKET_LENGTH + 4);
+ if (size % alignment)
+ return 0;
+ return 1;
+
+ case TSPP_MODE_RAW_NO_SUFFIX:
+ /* must be a multiple of 188 */
+ alignment = TSPP_PACKET_LENGTH;
+ if (size % alignment)
+ return 0;
+ return 1;
+
+ case TSPP_MODE_DISABLED:
+ case TSPP_MODE_PES:
+ default:
+ /* no alignment requirement */
+ return 1;
+ }
+
+}
+
+static u32 tspp_align_buffer_size_by_mode(u32 size, enum tspp_mode mode)
+{
+ u32 new_size;
+ u32 alignment;
+
+ switch (mode) {
+ case TSPP_MODE_RAW:
+ /* must be a multiple of 192 */
+ alignment = (TSPP_PACKET_LENGTH + 4);
+ break;
+
+ case TSPP_MODE_RAW_NO_SUFFIX:
+ /* must be a multiple of 188 */
+ alignment = TSPP_PACKET_LENGTH;
+ break;
+
+ case TSPP_MODE_DISABLED:
+ case TSPP_MODE_PES:
+ default:
+ /* no alignment requirement - give the user what he asks for */
+ alignment = 1;
+ break;
+ }
+ /* align up */
+ new_size = (((size + alignment - 1) / alignment) * alignment);
+ return new_size;
+}
+
+static void tspp_destroy_buffers(u32 channel_id, struct tspp_channel *channel)
+{
+ int i;
+ struct tspp_mem_buffer *pbuf, *temp;
+
+ pbuf = channel->data;
+ for (i = 0; i < channel->buffer_count; i++) {
+ if (pbuf->desc.phys_base) {
+ if (channel->memfree) {
+ channel->memfree(channel_id,
+ pbuf->desc.size,
+ pbuf->desc.virt_base,
+ pbuf->desc.phys_base,
+ channel->user_info);
+ } else {
+ if (!channel->dma_pool)
+ dma_free_coherent(
+ &channel->pdev->pdev->dev,
+ pbuf->desc.size,
+ pbuf->desc.virt_base,
+ pbuf->desc.phys_base);
+ else
+ dma_pool_free(channel->dma_pool,
+ pbuf->desc.virt_base,
+ pbuf->desc.phys_base);
+ }
+ pbuf->desc.phys_base = 0;
+ }
+ pbuf->desc.virt_base = 0;
+ pbuf->state = TSPP_BUF_STATE_EMPTY;
+ temp = pbuf;
+ pbuf = pbuf->next;
+ kfree(temp);
+ }
+}
+
+static int msm_tspp_req_irqs(struct tspp_device *device)
+{
+ int rc;
+ int i;
+ int j;
+
+ rc = request_irq(device->tspp_irq, tspp_isr, IRQF_SHARED,
+ dev_name(&device->pdev->dev), device);
+ if (rc) {
+ dev_err(&device->pdev->dev,
+ "failed to request TSPP IRQ %d : %d",
+ device->tspp_irq, rc);
+ return rc;
+ }
+
+ for (i = 0; i < TSPP_TSIF_INSTANCES; i++) {
+ rc = request_irq(device->tsif[i].tsif_irq,
+ tsif_isr, IRQF_SHARED, dev_name(&device->pdev->dev),
+ &device->tsif[i]);
+ if (rc) {
+ dev_err(&device->pdev->dev,
+ "failed to request TSIF%d IRQ: %d",
+ i, rc);
+ goto failed;
+ }
+ }
+ device->req_irqs = true;
+ return 0;
+
+failed:
+ free_irq(device->tspp_irq, device);
+ for (j = 0; j < i; j++)
+ free_irq(device->tsif[j].tsif_irq, device);
+
+ return rc;
+}
+
+static inline void msm_tspp_free_irqs(struct tspp_device *device)
+{
+ int i;
+
+ for (i = 0; i < TSPP_TSIF_INSTANCES; i++) {
+ if (device->tsif[i].tsif_irq)
+ free_irq(device->tsif[i].tsif_irq, &device->tsif[i]);
+ }
+
+ if (device->tspp_irq)
+ free_irq(device->tspp_irq, device);
+ device->req_irqs = false;
+}
+
+/*** TSPP API functions ***/
+
+/**
+ * tspp_open_stream - open a TSPP stream for use.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ * @source: stream source parameters.
+ *
+ * Return error status
+ *
+ */
+int tspp_open_stream(u32 dev, u32 channel_id,
+ struct tspp_select_source *source)
+{
+ u32 val;
+ int rc;
+ struct tspp_device *pdev;
+ struct tspp_channel *channel;
+ bool req_irqs = false;
+
+ TSPP_DEBUG("tspp_open_stream %i %i %i %i",
+ dev, channel_id, source->source, source->mode);
+
+ if (dev >= TSPP_MAX_DEVICES) {
+ pr_err("tspp: device id out of range");
+ return -ENODEV;
+ }
+
+ if (channel_id >= TSPP_NUM_CHANNELS) {
+ pr_err("tspp: channel id out of range");
+ return -ECHRNG;
+ }
+
+ pdev = tspp_find_by_id(dev);
+ if (!pdev) {
+ pr_err("tspp_str: can't find device %i", dev);
+ return -ENODEV;
+ }
+ channel = &pdev->channels[channel_id];
+ channel->src = source->source;
+ tspp_set_tsif_mode(channel, source->mode);
+ tspp_set_signal_inversion(channel, source->clk_inverse,
+ source->data_inverse, source->sync_inverse,
+ source->enable_inverse);
+
+ /* Request IRQ resources on first open */
+ if (!pdev->req_irqs && (source->source == TSPP_SOURCE_TSIF0 ||
+ source->source == TSPP_SOURCE_TSIF1)) {
+ rc = msm_tspp_req_irqs(pdev);
+ if (rc) {
+ pr_err("tspp: error requesting irqs\n");
+ return rc;
+ }
+ req_irqs = true;
+ }
+
+ switch (source->source) {
+ case TSPP_SOURCE_TSIF0:
+ if (tspp_config_gpios(pdev, channel->src, 1) != 0) {
+ rc = -EBUSY;
+ pr_err("tspp: error enabling tsif0 GPIOs\n");
+ goto free_irq;
+ }
+ /* make sure TSIF0 is running & enabled */
+ if (tspp_start_tsif(&pdev->tsif[0]) != 0) {
+ rc = -EBUSY;
+ pr_err("tspp: error starting tsif0");
+ goto free_irq;
+ }
+ if (pdev->tsif[0].ref_count == 1) {
+ val = readl_relaxed(pdev->base + TSPP_CONTROL);
+ writel_relaxed(val & ~TSPP_CONTROL_TSP_TSIF0_SRC_DIS,
+ pdev->base + TSPP_CONTROL);
+ /* Assure BAM TS PKT packet processing is enabled */
+ wmb();
+ }
+ break;
+ case TSPP_SOURCE_TSIF1:
+ if (tspp_config_gpios(pdev, channel->src, 1) != 0) {
+ rc = -EBUSY;
+ pr_err("tspp: error enabling tsif1 GPIOs\n");
+ goto free_irq;
+ }
+ /* make sure TSIF1 is running & enabled */
+ if (tspp_start_tsif(&pdev->tsif[1]) != 0) {
+ rc = -EBUSY;
+ pr_err("tspp: error starting tsif1");
+ goto free_irq;
+ }
+ if (pdev->tsif[1].ref_count == 1) {
+ val = readl_relaxed(pdev->base + TSPP_CONTROL);
+ writel_relaxed(val & ~TSPP_CONTROL_TSP_TSIF1_SRC_DIS,
+ pdev->base + TSPP_CONTROL);
+ /* Assure BAM TS PKT packet processing is enabled */
+ wmb();
+ }
+ break;
+ case TSPP_SOURCE_MEM:
+ break;
+ default:
+ pr_err("tspp: channel %i invalid source %i",
+ channel->id, source->source);
+ return -EBUSY;
+ }
+
+ return 0;
+
+free_irq:
+ /* Free irqs only if were requested during opening of this stream */
+ if (req_irqs)
+ msm_tspp_free_irqs(pdev);
+ return rc;
+}
+EXPORT_SYMBOL(tspp_open_stream);
+
+/**
+ * tspp_close_stream - close a TSPP stream.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ *
+ * Return error status
+ *
+ */
+int tspp_close_stream(u32 dev, u32 channel_id)
+{
+ u32 val;
+ u32 prev_ref_count = 0;
+ struct tspp_device *pdev;
+ struct tspp_channel *channel;
+
+ if (channel_id >= TSPP_NUM_CHANNELS) {
+ pr_err("tspp: channel id out of range");
+ return -ECHRNG;
+ }
+ pdev = tspp_find_by_id(dev);
+ if (!pdev) {
+ pr_err("tspp_cs: can't find device %i", dev);
+ return -EBUSY;
+ }
+ channel = &pdev->channels[channel_id];
+
+ switch (channel->src) {
+ case TSPP_SOURCE_TSIF0:
+ prev_ref_count = pdev->tsif[0].ref_count;
+ tspp_stop_tsif(&pdev->tsif[0]);
+ if (tspp_config_gpios(pdev, channel->src, 0) != 0)
+ pr_err("tspp: error disabling tsif0 GPIOs\n");
+
+ if (prev_ref_count == 1) {
+ val = readl_relaxed(pdev->base + TSPP_CONTROL);
+ writel_relaxed(val | TSPP_CONTROL_TSP_TSIF0_SRC_DIS,
+ pdev->base + TSPP_CONTROL);
+ /* Assure BAM TS PKT packet processing is disabled */
+ wmb();
+ }
+ break;
+ case TSPP_SOURCE_TSIF1:
+ prev_ref_count = pdev->tsif[1].ref_count;
+ tspp_stop_tsif(&pdev->tsif[1]);
+ if (tspp_config_gpios(pdev, channel->src, 0) != 0)
+ pr_err("tspp: error disabling tsif0 GPIOs\n");
+
+ if (prev_ref_count == 1) {
+ val = readl_relaxed(pdev->base + TSPP_CONTROL);
+ writel_relaxed(val | TSPP_CONTROL_TSP_TSIF1_SRC_DIS,
+ pdev->base + TSPP_CONTROL);
+ /* Assure BAM TS PKT packet processing is disabled */
+ wmb();
+ }
+ break;
+ case TSPP_SOURCE_MEM:
+ break;
+ case TSPP_SOURCE_NONE:
+ break;
+ }
+
+ channel->src = TSPP_SOURCE_NONE;
+
+ /* Free requested interrupts to save power */
+ if ((pdev->tsif[0].ref_count + pdev->tsif[1].ref_count) == 0 &&
+ prev_ref_count)
+ msm_tspp_free_irqs(pdev);
+
+ return 0;
+}
+EXPORT_SYMBOL(tspp_close_stream);
+
+static int tspp_init_sps_device(struct tspp_device *dev)
+{
+ int ret;
+
+ ret = sps_register_bam_device(&dev->bam_props, &dev->bam_handle);
+ if (ret) {
+ pr_err("tspp: failed to register bam device, err-%d\n", ret);
+ return ret;
+ }
+
+ ret = sps_device_reset(dev->bam_handle);
+ if (ret) {
+ sps_deregister_bam_device(dev->bam_handle);
+ pr_err("tspp: error resetting bam device, err=%d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * tspp_open_channel - open a TSPP channel.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ *
+ * Return error status
+ *
+ */
+int tspp_open_channel(u32 dev, u32 channel_id)
+{
+ int rc = 0;
+ struct sps_connect *config;
+ struct sps_register_event *event;
+ struct tspp_channel *channel;
+ struct tspp_device *pdev;
+
+ if (channel_id >= TSPP_NUM_CHANNELS) {
+ pr_err("tspp: channel id out of range");
+ return -ECHRNG;
+ }
+ pdev = tspp_find_by_id(dev);
+ if (!pdev) {
+ pr_err("tspp_oc: can't find device %i", dev);
+ return -ENODEV;
+ }
+ channel = &pdev->channels[channel_id];
+
+ if (channel->used) {
+ pr_err("tspp channel already in use");
+ return -EBUSY;
+ }
+
+ config = &channel->config;
+ event = &channel->event;
+
+ /* start the clocks if needed */
+ if (tspp_channels_in_use(pdev) == 0) {
+ rc = tspp_clock_start(pdev);
+ if (rc)
+ return rc;
+
+ if (pdev->bam_handle == SPS_DEV_HANDLE_INVALID) {
+ rc = tspp_init_sps_device(pdev);
+ if (rc) {
+ pr_err("tspp: failed to init sps device, err=%d\n",
+ rc);
+ tspp_clock_stop(pdev);
+ return rc;
+ }
+ }
+
+ __pm_stay_awake(&pdev->ws);
+ }
+
+ /* mark it as used */
+ channel->used = 1;
+
+ /* start the bam */
+ channel->pipe = sps_alloc_endpoint();
+ if (channel->pipe == 0) {
+ pr_err("tspp: error allocating endpoint");
+ rc = -ENOMEM;
+ goto err_sps_alloc;
+ }
+
+ /* get default configuration */
+ sps_get_config(channel->pipe, config);
+
+ config->source = pdev->bam_handle;
+ config->destination = SPS_DEV_HANDLE_MEM;
+ config->mode = SPS_MODE_SRC;
+ config->options =
+ SPS_O_AUTO_ENABLE | /* connection is auto-enabled */
+ SPS_O_STREAMING | /* streaming mode */
+ SPS_O_DESC_DONE | /* interrupt on end of descriptor */
+ SPS_O_ACK_TRANSFERS | /* must use sps_get_iovec() */
+ SPS_O_HYBRID; /* Read actual descriptors in sps_get_iovec() */
+ config->src_pipe_index = channel->id;
+ config->desc.size =
+ TSPP_SPS_DESCRIPTOR_COUNT * SPS_DESCRIPTOR_SIZE;
+ config->desc.base = dma_alloc_coherent(&pdev->pdev->dev,
+ config->desc.size,
+ &config->desc.phys_base,
+ GFP_KERNEL);
+ if (config->desc.base == 0) {
+ pr_err("tspp: error allocating sps descriptors");
+ rc = -ENOMEM;
+ goto err_desc_alloc;
+ }
+
+ memset(config->desc.base, 0, config->desc.size);
+
+ rc = sps_connect(channel->pipe, config);
+ if (rc) {
+ pr_err("tspp: error connecting bam");
+ goto err_connect;
+ }
+
+ event->mode = SPS_TRIGGER_CALLBACK;
+ event->options = SPS_O_DESC_DONE;
+ event->callback = tspp_sps_complete_cb;
+ event->xfer_done = NULL;
+ event->user = pdev;
+
+ rc = sps_register_event(channel->pipe, event);
+ if (rc) {
+ pr_err("tspp: error registering event");
+ goto err_event;
+ }
+
+ init_timer(&channel->expiration_timer);
+ channel->expiration_timer.function = tspp_expiration_timer;
+ channel->expiration_timer.data = (unsigned long)pdev;
+ channel->expiration_timer.expires = 0xffffffffL;
+
+ rc = pm_runtime_get(&pdev->pdev->dev);
+ if (rc < 0) {
+ dev_err(&pdev->pdev->dev,
+ "Runtime PM: Unable to wake up tspp device, rc = %d",
+ rc);
+ }
+ return 0;
+
+err_event:
+ sps_disconnect(channel->pipe);
+err_connect:
+ dma_free_coherent(&pdev->pdev->dev, config->desc.size,
+ config->desc.base, config->desc.phys_base);
+err_desc_alloc:
+ sps_free_endpoint(channel->pipe);
+err_sps_alloc:
+ channel->used = 0;
+ return rc;
+}
+EXPORT_SYMBOL(tspp_open_channel);
+
+/**
+ * tspp_close_channel - close a TSPP channel.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ *
+ * Return error status
+ *
+ */
+int tspp_close_channel(u32 dev, u32 channel_id)
+{
+ int i;
+ int id;
+ int table_idx;
+ u32 val;
+ unsigned long flags;
+
+ struct sps_connect *config;
+ struct tspp_device *pdev;
+ struct tspp_channel *channel;
+
+ if (channel_id >= TSPP_NUM_CHANNELS) {
+ pr_err("tspp: channel id out of range");
+ return -ECHRNG;
+ }
+ pdev = tspp_find_by_id(dev);
+ if (!pdev) {
+ pr_err("tspp_close: can't find device %i", dev);
+ return -ENODEV;
+ }
+ channel = &pdev->channels[channel_id];
+
+ /* if the channel is not used, we are done */
+ if (!channel->used)
+ return 0;
+
+ /*
+ * Need to protect access to used and waiting fields, as they are
+ * used by the tasklet which is invoked from interrupt context
+ */
+ spin_lock_irqsave(&pdev->spinlock, flags);
+ channel->used = 0;
+ channel->waiting = NULL;
+ spin_unlock_irqrestore(&pdev->spinlock, flags);
+
+ if (channel->expiration_period_ms)
+ del_timer(&channel->expiration_timer);
+
+ channel->notifier = NULL;
+ channel->notify_data = NULL;
+ channel->expiration_period_ms = 0;
+
+ config = &channel->config;
+ pdev = channel->pdev;
+
+ /* disable pipe (channel) */
+ val = readl_relaxed(pdev->base + TSPP_PS_DISABLE);
+ writel_relaxed(val | channel->id, pdev->base + TSPP_PS_DISABLE);
+ /* Assure PS_DISABLE register is set */
+ wmb();
+
+ /* unregister all filters for this channel */
+ for (table_idx = 0; table_idx < TSPP_FILTER_TABLES; table_idx++) {
+ for (i = 0; i < TSPP_NUM_PRIORITIES; i++) {
+ struct tspp_pid_filter *filter =
+ &pdev->filters[table_idx]->filter[i];
+ id = FILTER_GET_PIPE_NUMBER0(filter);
+ if (id == channel->id) {
+ if (FILTER_HAS_ENCRYPTION(filter))
+ tspp_free_key_entry(
+ FILTER_GET_KEY_NUMBER(filter));
+ filter->config = 0;
+ filter->filter = 0;
+ }
+ }
+ }
+ channel->filter_count = 0;
+
+ /* disconnect the bam */
+ if (sps_disconnect(channel->pipe) != 0)
+ pr_warn("tspp: Error freeing sps endpoint (%i)", channel->id);
+
+ /* destroy the buffers */
+ dma_free_coherent(&pdev->pdev->dev, config->desc.size,
+ config->desc.base, config->desc.phys_base);
+
+ sps_free_endpoint(channel->pipe);
+
+ tspp_destroy_buffers(channel_id, channel);
+
+ dma_pool_destroy(channel->dma_pool);
+ channel->dma_pool = NULL;
+
+ channel->src = TSPP_SOURCE_NONE;
+ channel->mode = TSPP_MODE_DISABLED;
+ channel->memfree = NULL;
+ channel->user_info = NULL;
+ channel->buffer_count = 0;
+ channel->data = NULL;
+ channel->read = NULL;
+ channel->locked = NULL;
+
+ if (tspp_channels_in_use(pdev) == 0) {
+ sps_deregister_bam_device(pdev->bam_handle);
+ pdev->bam_handle = SPS_DEV_HANDLE_INVALID;
+
+ __pm_relax(&pdev->ws);
+ tspp_clock_stop(pdev);
+ }
+
+ pm_runtime_put(&pdev->pdev->dev);
+
+ return 0;
+}
+EXPORT_SYMBOL(tspp_close_channel);
+
+/**
+ * tspp_get_ref_clk_counter - return the TSIF clock reference (TCR) counter.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @source: The TSIF source from which the counter should be read
+ * @tcr_counter: the value of TCR counter
+ *
+ * Return error status
+ *
+ * TCR increments at a rate equal to 27 MHz/256 = 105.47 kHz.
+ * If source is neither TSIF 0 or TSIF1 0 is returned.
+ */
+int tspp_get_ref_clk_counter(u32 dev, enum tspp_source source, u32 *tcr_counter)
+{
+ struct tspp_device *pdev;
+ struct tspp_tsif_device *tsif_device;
+
+ if (!tcr_counter)
+ return -EINVAL;
+
+ pdev = tspp_find_by_id(dev);
+ if (!pdev) {
+ pr_err("tspp_get_ref_clk_counter: can't find device %i\n", dev);
+ return -ENODEV;
+ }
+
+ switch (source) {
+ case TSPP_SOURCE_TSIF0:
+ tsif_device = &pdev->tsif[0];
+ break;
+
+ case TSPP_SOURCE_TSIF1:
+ tsif_device = &pdev->tsif[1];
+ break;
+
+ default:
+ tsif_device = NULL;
+ break;
+ }
+
+ if (tsif_device && tsif_device->ref_count)
+ *tcr_counter = ioread32(tsif_device->base + TSIF_CLK_REF_OFF);
+ else
+ *tcr_counter = 0;
+
+ return 0;
+}
+EXPORT_SYMBOL(tspp_get_ref_clk_counter);
+
+/**
+ * tspp_add_filter - add a TSPP filter to a channel.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ * @filter: TSPP filter parameters
+ *
+ * Return error status
+ *
+ */
+int tspp_add_filter(u32 dev, u32 channel_id,
+ struct tspp_filter *filter)
+{
+ int i, rc;
+ int other_channel;
+ int entry;
+ u32 val, pid, enabled;
+ struct tspp_device *pdev;
+ struct tspp_pid_filter p;
+ struct tspp_channel *channel;
+
+ TSPP_DEBUG("tspp: add filter");
+ if (channel_id >= TSPP_NUM_CHANNELS) {
+ pr_err("tspp: channel id out of range");
+ return -ECHRNG;
+ }
+ pdev = tspp_find_by_id(dev);
+ if (!pdev) {
+ pr_err("tspp_add: can't find device %i", dev);
+ return -ENODEV;
+ }
+
+ channel = &pdev->channels[channel_id];
+
+ if (filter->source > TSPP_SOURCE_MEM) {
+ pr_err("tspp invalid source");
+ return -ENOSR;
+ }
+
+ if (filter->priority >= TSPP_NUM_PRIORITIES) {
+ pr_err("tspp invalid filter priority");
+ return -ENOSR;
+ }
+
+ channel->mode = filter->mode;
+ /*
+ * if buffers are already allocated, verify they fulfil
+ * the alignment requirements.
+ */
+ if ((channel->buffer_count > 0) &&
+ (!tspp_is_buffer_size_aligned(channel->buffer_size, channel->mode)))
+ pr_warn("tspp: buffers allocated with incorrect alignment\n");
+
+ if (filter->mode == TSPP_MODE_PES) {
+ for (i = 0; i < TSPP_NUM_PRIORITIES; i++) {
+ struct tspp_pid_filter *tspp_filter =
+ &pdev->filters[channel->src]->filter[i];
+ pid = FILTER_GET_PIPE_PID((tspp_filter));
+ enabled = FILTER_GET_PIPE_PROCESS0(tspp_filter);
+ if (enabled && (pid == filter->pid)) {
+ other_channel =
+ FILTER_GET_PIPE_NUMBER0(tspp_filter);
+ pr_err("tspp: pid 0x%x already in use by channel %i",
+ filter->pid, other_channel);
+ return -EBADSLT;
+ }
+ }
+ }
+
+ /* make sure this priority is not already in use */
+ enabled = FILTER_GET_PIPE_PROCESS0(
+ (&(pdev->filters[channel->src]->filter[filter->priority])));
+ if (enabled) {
+ pr_err("tspp: filter priority %i source %i is already enabled\n",
+ filter->priority, channel->src);
+ return -ENOSR;
+ }
+
+ if (channel->mode == TSPP_MODE_PES) {
+ /*
+ * if we are already processing in PES mode, disable pipe
+ * (channel) and filter to be updated
+ */
+ val = readl_relaxed(pdev->base + TSPP_PS_DISABLE);
+ writel_relaxed(val | (1 << channel->id),
+ pdev->base + TSPP_PS_DISABLE);
+ /* Assure PS_DISABLE register is set */
+ wmb();
+ }
+
+ /* update entry */
+ p.filter = 0;
+ p.config = FILTER_TRANS_END_DISABLE;
+ FILTER_SET_PIPE_PROCESS0((&p), filter->mode);
+ FILTER_SET_PIPE_PID((&p), filter->pid);
+ FILTER_SET_PID_MASK((&p), filter->mask);
+ FILTER_SET_PIPE_NUMBER0((&p), channel->id);
+ FILTER_SET_PIPE_PROCESS1((&p), TSPP_MODE_DISABLED);
+ if (filter->decrypt) {
+ entry = tspp_get_key_entry();
+ if (entry == -1) {
+ pr_err("tspp: no more keys available!");
+ } else {
+ p.config |= FILTER_DECRYPT;
+ FILTER_SET_KEY_NUMBER((&p), entry);
+ }
+ }
+
+ pdev->filters[channel->src]->
+ filter[filter->priority].config = p.config;
+ pdev->filters[channel->src]->
+ filter[filter->priority].filter = p.filter;
+
+ /*
+ * allocate buffers if needed (i.e. if user did has not already called
+ * tspp_allocate_buffers() explicitly).
+ */
+ if (channel->buffer_count == 0) {
+ channel->buffer_size =
+ tspp_align_buffer_size_by_mode(channel->buffer_size,
+ channel->mode);
+ rc = tspp_allocate_buffers(dev, channel->id,
+ channel->max_buffers,
+ channel->buffer_size,
+ channel->int_freq, NULL, NULL, NULL);
+ if (rc != 0) {
+ pr_err("tspp: tspp_allocate_buffers failed\n");
+ return rc;
+ }
+ }
+
+ /* reenable pipe */
+ val = readl_relaxed(pdev->base + TSPP_PS_DISABLE);
+ writel_relaxed(val & ~(1 << channel->id), pdev->base + TSPP_PS_DISABLE);
+ /* Assure PS_DISABLE register is reset */
+ wmb();
+ val = readl_relaxed(pdev->base + TSPP_PS_DISABLE);
+
+ channel->filter_count++;
+
+ return 0;
+}
+EXPORT_SYMBOL(tspp_add_filter);
+
+/**
+ * tspp_remove_filter - remove a TSPP filter from a channel.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ * @filter: TSPP filter parameters
+ *
+ * Return error status
+ *
+ */
+int tspp_remove_filter(u32 dev, u32 channel_id,
+ struct tspp_filter *filter)
+{
+ int entry;
+ u32 val;
+ struct tspp_device *pdev;
+ int src;
+ struct tspp_pid_filter *tspp_filter;
+ struct tspp_channel *channel;
+
+ if (channel_id >= TSPP_NUM_CHANNELS) {
+ pr_err("tspp: channel id out of range");
+ return -ECHRNG;
+ }
+ if (!filter) {
+ pr_err("tspp: NULL filter pointer");
+ return -EINVAL;
+ }
+ pdev = tspp_find_by_id(dev);
+ if (!pdev) {
+ pr_err("tspp_remove: can't find device %i", dev);
+ return -ENODEV;
+ }
+ if (filter->priority >= TSPP_NUM_PRIORITIES) {
+ pr_err("tspp invalid filter priority");
+ return -ENOSR;
+ }
+ channel = &pdev->channels[channel_id];
+
+ src = channel->src;
+ tspp_filter = &(pdev->filters[src]->filter[filter->priority]);
+
+ /* disable pipe (channel) */
+ val = readl_relaxed(pdev->base + TSPP_PS_DISABLE);
+ writel_relaxed(val | channel->id, pdev->base + TSPP_PS_DISABLE);
+ /* Assure PS_DISABLE register is set */
+ wmb();
+
+ /* update data keys */
+ if (tspp_filter->config & FILTER_DECRYPT) {
+ entry = FILTER_GET_KEY_NUMBER(tspp_filter);
+ tspp_free_key_entry(entry);
+ }
+
+ /* update pid table */
+ tspp_filter->config = 0;
+ tspp_filter->filter = 0;
+
+ channel->filter_count--;
+
+ /* reenable pipe */
+ val = readl_relaxed(pdev->base + TSPP_PS_DISABLE);
+ writel_relaxed(val & ~(1 << channel->id),
+ pdev->base + TSPP_PS_DISABLE);
+ /* Assure PS_DISABLE register is reset */
+ wmb();
+ val = readl_relaxed(pdev->base + TSPP_PS_DISABLE);
+
+ return 0;
+}
+EXPORT_SYMBOL(tspp_remove_filter);
+
+/**
+ * tspp_set_key - set TSPP key in key table.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ * @key: TSPP key parameters
+ *
+ * Return error status
+ *
+ */
+int tspp_set_key(u32 dev, u32 channel_id, struct tspp_key *key)
+{
+ int i;
+ int id;
+ int key_index;
+ int data;
+ struct tspp_channel *channel;
+ struct tspp_device *pdev;
+
+ if (channel_id >= TSPP_NUM_CHANNELS) {
+ pr_err("tspp: channel id out of range");
+ return -ECHRNG;
+ }
+ pdev = tspp_find_by_id(dev);
+ if (!pdev) {
+ pr_err("tspp_set: can't find device %i", dev);
+ return -ENODEV;
+ }
+ channel = &pdev->channels[channel_id];
+
+ /* read the key index used by this channel */
+ for (i = 0; i < TSPP_NUM_PRIORITIES; i++) {
+ struct tspp_pid_filter *tspp_filter =
+ &(pdev->filters[channel->src]->filter[i]);
+ id = FILTER_GET_PIPE_NUMBER0(tspp_filter);
+ if (id == channel->id) {
+ if (FILTER_HAS_ENCRYPTION(tspp_filter)) {
+ key_index = FILTER_GET_KEY_NUMBER(tspp_filter);
+ break;
+ }
+ }
+ }
+ if (i == TSPP_NUM_PRIORITIES) {
+ pr_err("tspp: no encryption on this channel");
+ return -ENOKEY;
+ }
+
+ if (key->parity == TSPP_KEY_PARITY_EVEN) {
+ pdev->tspp_key_table->entry[key_index].even_lsb = key->lsb;
+ pdev->tspp_key_table->entry[key_index].even_msb = key->msb;
+ } else {
+ pdev->tspp_key_table->entry[key_index].odd_lsb = key->lsb;
+ pdev->tspp_key_table->entry[key_index].odd_msb = key->msb;
+ }
+ data = readl_relaxed(channel->pdev->base + TSPP_KEY_VALID);
+
+ return 0;
+}
+EXPORT_SYMBOL(tspp_set_key);
+
+/**
+ * tspp_register_notification - register TSPP channel notification function.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ * @notify: notification function
+ * @userdata: user data to pass to notification function
+ * @timer_ms: notification for partially filled buffers
+ *
+ * Return error status
+ *
+ */
+int tspp_register_notification(u32 dev, u32 channel_id,
+ tspp_notifier *notify, void *userdata, u32 timer_ms)
+{
+ struct tspp_channel *channel;
+ struct tspp_device *pdev;
+
+ if (channel_id >= TSPP_NUM_CHANNELS) {
+ pr_err("tspp: channel id out of range");
+ return -ECHRNG;
+ }
+ pdev = tspp_find_by_id(dev);
+ if (!pdev) {
+ pr_err("tspp_reg: can't find device %i", dev);
+ return -ENODEV;
+ }
+ channel = &pdev->channels[channel_id];
+ channel->notifier = notify;
+ channel->notify_data = userdata;
+ channel->expiration_period_ms = timer_ms;
+
+ return 0;
+}
+EXPORT_SYMBOL(tspp_register_notification);
+
+/**
+ * tspp_unregister_notification - unregister TSPP channel notification function.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ *
+ * Return error status
+ *
+ */
+int tspp_unregister_notification(u32 dev, u32 channel_id)
+{
+ struct tspp_channel *channel;
+ struct tspp_device *pdev;
+
+ if (channel_id >= TSPP_NUM_CHANNELS) {
+ pr_err("tspp: channel id out of range");
+ return -ECHRNG;
+ }
+ pdev = tspp_find_by_id(dev);
+ if (!pdev) {
+ pr_err("tspp_unreg: can't find device %i", dev);
+ return -ENODEV;
+ }
+ channel = &pdev->channels[channel_id];
+ channel->notifier = NULL;
+ channel->notify_data = 0;
+ return 0;
+}
+EXPORT_SYMBOL(tspp_unregister_notification);
+
+/**
+ * tspp_get_buffer - get TSPP data buffer.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ *
+ * Return error status
+ *
+ */
+const struct tspp_data_descriptor *tspp_get_buffer(u32 dev, u32 channel_id)
+{
+ struct tspp_mem_buffer *buffer;
+ struct tspp_channel *channel;
+ struct tspp_device *pdev;
+ unsigned long flags;
+
+ if (channel_id >= TSPP_NUM_CHANNELS) {
+ pr_err("tspp: channel id out of range");
+ return NULL;
+ }
+ pdev = tspp_find_by_id(dev);
+ if (!pdev) {
+ pr_err("tspp_get: can't find device %i", dev);
+ return NULL;
+ }
+
+ spin_lock_irqsave(&pdev->spinlock, flags);
+
+ channel = &pdev->channels[channel_id];
+
+ if (!channel->read) {
+ spin_unlock_irqrestore(&pdev->spinlock, flags);
+ pr_warn("tspp: no buffer to get on channel %i!",
+ channel->id);
+ return NULL;
+ }
+
+ buffer = channel->read;
+ /* see if we have any buffers ready to read */
+ if (buffer->state != TSPP_BUF_STATE_DATA) {
+ spin_unlock_irqrestore(&pdev->spinlock, flags);
+ return NULL;
+ }
+
+ if (buffer->state == TSPP_BUF_STATE_DATA) {
+ /* mark the buffer as busy */
+ buffer->state = TSPP_BUF_STATE_LOCKED;
+
+ /* increment the pointer along the list */
+ channel->read = channel->read->next;
+ }
+
+ spin_unlock_irqrestore(&pdev->spinlock, flags);
+
+ return &buffer->desc;
+}
+EXPORT_SYMBOL(tspp_get_buffer);
+
+/**
+ * tspp_release_buffer - release TSPP data buffer back to TSPP.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ * @descriptor_id: buffer descriptor ID
+ *
+ * Return error status
+ *
+ */
+int tspp_release_buffer(u32 dev, u32 channel_id, u32 descriptor_id)
+{
+ int i, found = 0;
+ struct tspp_mem_buffer *buffer;
+ struct tspp_channel *channel;
+ struct tspp_device *pdev;
+ unsigned long flags;
+
+ if (channel_id >= TSPP_NUM_CHANNELS) {
+ pr_err("tspp: channel id out of range");
+ return -ECHRNG;
+ }
+ pdev = tspp_find_by_id(dev);
+ if (!pdev) {
+ pr_err("tspp: can't find device %i", dev);
+ return -ENODEV;
+ }
+
+ spin_lock_irqsave(&pdev->spinlock, flags);
+
+ channel = &pdev->channels[channel_id];
+
+ if (descriptor_id > channel->buffer_count)
+ pr_warn("tspp: desc id looks weird 0x%08x", descriptor_id);
+
+ /* find the correct descriptor */
+ buffer = channel->locked;
+ for (i = 0; i < channel->buffer_count; i++) {
+ if (buffer->desc.id == descriptor_id) {
+ found = 1;
+ break;
+ }
+ buffer = buffer->next;
+ }
+ channel->locked = channel->locked->next;
+
+ if (!found) {
+ spin_unlock_irqrestore(&pdev->spinlock, flags);
+ pr_err("tspp: cant find desc %i", descriptor_id);
+ return -EINVAL;
+ }
+
+ /* make sure the buffer is in the expected state */
+ if (buffer->state != TSPP_BUF_STATE_LOCKED) {
+ spin_unlock_irqrestore(&pdev->spinlock, flags);
+ pr_err("tspp: buffer %i not locked", descriptor_id);
+ return -EINVAL;
+ }
+ /* unlock the buffer and requeue it */
+ buffer->state = TSPP_BUF_STATE_WAITING;
+
+ if (tspp_queue_buffer(channel, buffer))
+ pr_warn("tspp: can't requeue buffer");
+
+ spin_unlock_irqrestore(&pdev->spinlock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(tspp_release_buffer);
+
+/**
+ * tspp_allocate_buffers - allocate TSPP data buffers.
+ *
+ * @dev: TSPP device (up to TSPP_MAX_DEVICES)
+ * @channel_id: Channel ID number (up to TSPP_NUM_CHANNELS)
+ * @count: number of buffers to allocate
+ * @size: size of each buffer to allocate
+ * @int_freq: interrupt frequency
+ * @alloc: user defined memory allocator function. Pass NULL for default.
+ * @memfree: user defined memory free function. Pass NULL for default.
+ * @user: user data to pass to the memory allocator/free function
+ *
+ * Return error status
+ *
+ * The user can optionally call this function explicitly to allocate the TSPP
+ * data buffers. Alternatively, if the user did not call this function, it
+ * is called implicitly by tspp_add_filter().
+ */
+int tspp_allocate_buffers(u32 dev, u32 channel_id, u32 count, u32 size,
+ u32 int_freq, tspp_allocator *alloc,
+ tspp_memfree *memfree, void *user)
+{
+ struct tspp_channel *channel;
+ struct tspp_device *pdev;
+ struct tspp_mem_buffer *last = NULL;
+
+ TSPP_DEBUG("tspp_allocate_buffers");
+
+ if (channel_id >= TSPP_NUM_CHANNELS) {
+ pr_err("%s: channel id out of range", __func__);
+ return -ECHRNG;
+ }
+
+ pdev = tspp_find_by_id(dev);
+ if (!pdev) {
+ pr_err("%s: can't find device %i", __func__, dev);
+ return -ENODEV;
+ }
+
+ if (count < MIN_ACCEPTABLE_BUFFER_COUNT) {
+ pr_err("%s: tspp requires a minimum of %i buffers\n",
+ __func__, MIN_ACCEPTABLE_BUFFER_COUNT);
+ return -EINVAL;
+ }
+
+ if (count > TSPP_NUM_BUFFERS) {
+ pr_err("%s: tspp requires a maximum of %i buffers\n",
+ __func__, TSPP_NUM_BUFFERS);
+ return -EINVAL;
+ }
+
+ channel = &pdev->channels[channel_id];
+
+ /* allow buffer allocation only if there was no previous buffer
+ * allocation for this channel.
+ */
+ if (channel->buffer_count > 0) {
+ pr_err("%s: buffers already allocated for channel %u",
+ __func__, channel_id);
+ return -EINVAL;
+ }
+
+ channel->max_buffers = count;
+
+ /* set up interrupt frequency */
+ if (int_freq > channel->max_buffers) {
+ int_freq = channel->max_buffers;
+ pr_warn("%s: setting interrupt frequency to %u\n",
+ __func__, int_freq);
+ }
+ channel->int_freq = int_freq;
+ /*
+ * it is the responsibility of the caller to tspp_allocate_buffers(),
+ * whether it's the user or the driver, to make sure the size parameter
+ * is compatible to the channel mode.
+ */
+ channel->buffer_size = size;
+
+ /* save user defined memory free function for later use */
+ channel->memfree = memfree;
+ channel->user_info = user;
+
+ /*
+ * For small buffers, create a DMA pool so that memory
+ * is not wasted through dma_alloc_coherent.
+ */
+ if (TSPP_USE_DMA_POOL(channel->buffer_size)) {
+ channel->dma_pool = dma_pool_create("tspp",
+ &pdev->pdev->dev, channel->buffer_size, 0, 0);
+ if (!channel->dma_pool) {
+ pr_err("%s: Can't allocate memory pool\n", __func__);
+ return -ENOMEM;
+ }
+ } else {
+ channel->dma_pool = NULL;
+ }
+
+
+ for (channel->buffer_count = 0;
+ channel->buffer_count < channel->max_buffers;
+ channel->buffer_count++) {
+
+ /* allocate the descriptor */
+ struct tspp_mem_buffer *desc = (struct tspp_mem_buffer *)
+ kmalloc(sizeof(struct tspp_mem_buffer), GFP_KERNEL);
+ if (!desc) {
+ pr_warn("%s: Can't allocate desc %i",
+ __func__, channel->buffer_count);
+ break;
+ }
+
+ desc->desc.id = channel->buffer_count;
+ /* allocate the buffer */
+ if (tspp_alloc_buffer(channel_id, &desc->desc,
+ channel->buffer_size, channel->dma_pool,
+ alloc, user) != 0) {
+ kfree(desc);
+ pr_warn("%s: Can't allocate buffer %i",
+ __func__, channel->buffer_count);
+ break;
+ }
+
+ /* add the descriptor to the list */
+ desc->filled = 0;
+ desc->read_index = 0;
+ if (!channel->data) {
+ channel->data = desc;
+ desc->next = channel->data;
+ } else {
+ if (last != NULL)
+ last->next = desc;
+ }
+ last = desc;
+ desc->next = channel->data;
+
+ /* prepare the sps descriptor */
+ desc->sps.phys_base = desc->desc.phys_base;
+ desc->sps.base = desc->desc.virt_base;
+ desc->sps.size = desc->desc.size;
+
+ /* start the transfer */
+ if (tspp_queue_buffer(channel, desc))
+ pr_err("%s: can't queue buffer %i",
+ __func__, desc->desc.id);
+ }
+
+ if (channel->buffer_count < channel->max_buffers) {
+ /*
+ * we failed to allocate the requested number of buffers.
+ * we don't allow a partial success, so need to clean up here.
+ */
+ tspp_destroy_buffers(channel_id, channel);
+ channel->buffer_count = 0;
+
+ dma_pool_destroy(channel->dma_pool);
+ channel->dma_pool = NULL;
+ return -ENOMEM;
+ }
+
+ channel->waiting = channel->data;
+ channel->read = channel->data;
+ channel->locked = channel->data;
+
+ /* Now that buffers are scheduled to HW, kick data expiration timer */
+ if (channel->expiration_period_ms)
+ mod_timer(&channel->expiration_timer,
+ jiffies +
+ MSEC_TO_JIFFIES(
+ channel->expiration_period_ms));
+
+ return 0;
+}
+EXPORT_SYMBOL(tspp_allocate_buffers);
+
+/*** debugfs ***/
+static int debugfs_iomem_x32_set(void *data, u64 val)
+{
+ int rc;
+ int clock_started = 0;
+ struct tspp_device *pdev;
+
+ pdev = tspp_find_by_id(0);
+ if (!pdev) {
+ pr_err("%s: can't find device 0\n", __func__);
+ return 0;
+ }
+
+ if (tspp_channels_in_use(pdev) == 0) {
+ rc = tspp_clock_start(pdev);
+ if (rc) {
+ pr_err("%s: tspp_clock_start failed %d\n",
+ __func__, rc);
+ return 0;
+ }
+ clock_started = 1;
+ }
+
+ writel_relaxed(val, data);
+ /* Assure register write */
+ wmb();
+
+ if (clock_started)
+ tspp_clock_stop(pdev);
+ return 0;
+}
+
+static int debugfs_iomem_x32_get(void *data, u64 *val)
+{
+ int rc;
+ int clock_started = 0;
+ struct tspp_device *pdev;
+
+ pdev = tspp_find_by_id(0);
+ if (!pdev) {
+ pr_err("%s: can't find device 0\n", __func__);
+ *val = 0;
+ return 0;
+ }
+
+ if (tspp_channels_in_use(pdev) == 0) {
+ rc = tspp_clock_start(pdev);
+ if (rc) {
+ pr_err("%s: tspp_clock_start failed %d\n",
+ __func__, rc);
+ *val = 0;
+ return 0;
+ }
+ clock_started = 1;
+ }
+
+ *val = readl_relaxed(data);
+
+ if (clock_started)
+ tspp_clock_stop(pdev);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32, debugfs_iomem_x32_get,
+ debugfs_iomem_x32_set, "0x%08llx");
+
+static void tsif_debugfs_init(struct tspp_tsif_device *tsif_device,
+ int instance)
+{
+ char name[10];
+
+ snprintf(name, 10, "tsif%i", instance);
+ tsif_device->dent_tsif = debugfs_create_dir(
+ name, NULL);
+ if (tsif_device->dent_tsif) {
+ int i;
+ void __iomem *base = tsif_device->base;
+
+ for (i = 0; i < ARRAY_SIZE(debugfs_tsif_regs); i++) {
+ tsif_device->debugfs_tsif_regs[i] =
+ debugfs_create_file(
+ debugfs_tsif_regs[i].name,
+ debugfs_tsif_regs[i].mode,
+ tsif_device->dent_tsif,
+ base + debugfs_tsif_regs[i].offset,
+ &fops_iomem_x32);
+ }
+
+ debugfs_create_u32(
+ "stat_rx_chunks",
+ S_IRUGO | S_IWUSR | S_IWGRP,
+ tsif_device->dent_tsif,
+ &tsif_device->stat_rx);
+
+ debugfs_create_u32(
+ "stat_overflow",
+ S_IRUGO | S_IWUSR | S_IWGRP,
+ tsif_device->dent_tsif,
+ &tsif_device->stat_overflow);
+
+ debugfs_create_u32(
+ "stat_lost_sync",
+ S_IRUGO | S_IWUSR | S_IWGRP,
+ tsif_device->dent_tsif,
+ &tsif_device->stat_lost_sync);
+
+ debugfs_create_u32(
+ "stat_timeout",
+ S_IRUGO | S_IWUSR | S_IWGRP,
+ tsif_device->dent_tsif,
+ &tsif_device->stat_timeout);
+ }
+}
+
+static void tsif_debugfs_exit(struct tspp_tsif_device *tsif_device)
+{
+ int i;
+
+ debugfs_remove_recursive(tsif_device->dent_tsif);
+ tsif_device->dent_tsif = NULL;
+ for (i = 0; i < ARRAY_SIZE(debugfs_tsif_regs); i++)
+ tsif_device->debugfs_tsif_regs[i] = NULL;
+}
+
+static void tspp_debugfs_init(struct tspp_device *device, int instance)
+{
+ char name[10];
+
+ snprintf(name, 10, "tspp%i", instance);
+ device->dent = debugfs_create_dir(
+ name, NULL);
+ if (device->dent) {
+ int i;
+ void __iomem *base = device->base;
+
+ for (i = 0; i < ARRAY_SIZE(debugfs_tspp_regs); i++)
+ device->debugfs_regs[i] =
+ debugfs_create_file(
+ debugfs_tspp_regs[i].name,
+ debugfs_tspp_regs[i].mode,
+ device->dent,
+ base + debugfs_tspp_regs[i].offset,
+ &fops_iomem_x32);
+ }
+}
+
+static void tspp_debugfs_exit(struct tspp_device *device)
+{
+ int i;
+
+ debugfs_remove_recursive(device->dent);
+ for (i = 0; i < ARRAY_SIZE(debugfs_tspp_regs); i++)
+ device->debugfs_regs[i] = NULL;
+}
+
+static int msm_tspp_map_irqs(struct platform_device *pdev,
+ struct tspp_device *device)
+{
+ int rc;
+
+ /* get IRQ numbers from platform information */
+
+ /* map TSPP IRQ */
+ rc = platform_get_irq_byname(pdev, "TSIF_TSPP_IRQ");
+ if (rc > 0) {
+ device->tspp_irq = rc;
+ } else {
+ dev_err(&pdev->dev, "failed to get TSPP IRQ");
+ return -EINVAL;
+ }
+
+ /* map TSIF IRQs */
+ rc = platform_get_irq_byname(pdev, "TSIF0_IRQ");
+ if (rc > 0) {
+ device->tsif[0].tsif_irq = rc;
+ } else {
+ dev_err(&pdev->dev, "failed to get TSIF0 IRQ");
+ return -EINVAL;
+ }
+
+ rc = platform_get_irq_byname(pdev, "TSIF1_IRQ");
+ if (rc > 0) {
+ device->tsif[1].tsif_irq = rc;
+ } else {
+ dev_err(&pdev->dev, "failed to get TSIF1 IRQ");
+ return -EINVAL;
+ }
+
+ /* map BAM IRQ */
+ rc = platform_get_irq_byname(pdev, "TSIF_BAM_IRQ");
+ if (rc > 0) {
+ device->bam_irq = rc;
+ } else {
+ dev_err(&pdev->dev, "failed to get TSPP BAM IRQ");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int msm_tspp_probe(struct platform_device *pdev)
+{
+ int rc = -ENODEV;
+ u32 version;
+ u32 i;
+ struct tspp_device *device;
+ struct resource *mem_tsif0;
+ struct resource *mem_tsif1;
+ struct resource *mem_tspp;
+ struct resource *mem_bam;
+ struct msm_bus_scale_pdata *tspp_bus_pdata = NULL;
+ unsigned long rate;
+
+ if (pdev->dev.of_node) {
+ /* ID is always 0 since there is only 1 instance of TSPP */
+ pdev->id = 0;
+ tspp_bus_pdata = msm_bus_cl_get_pdata(pdev);
+ } else {
+ /* must have device tree data */
+ pr_err("tspp: Device tree data not available\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* OK, we will use this device */
+ device = kzalloc(sizeof(struct tspp_device), GFP_KERNEL);
+ if (!device) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* set up references */
+ device->pdev = pdev;
+ platform_set_drvdata(pdev, device);
+
+ /* setup pin control */
+ rc = tspp_get_pinctrl(device);
+ if (rc) {
+ pr_err("tspp: failed to get pin control data, rc=%d\n", rc);
+ goto err_pinctrl;
+ }
+
+ /* register bus client */
+ if (tspp_bus_pdata) {
+ device->tsif_bus_client =
+ msm_bus_scale_register_client(tspp_bus_pdata);
+ if (!device->tsif_bus_client)
+ pr_err("tspp: Unable to register bus client\n");
+ } else {
+ device->tsif_bus_client = 0;
+ }
+
+ /* map regulators */
+ device->tsif_vreg = devm_regulator_get_optional(&pdev->dev, "vdd_cx");
+ if (IS_ERR(device->tsif_vreg)) {
+ rc = PTR_ERR(device->tsif_vreg);
+ device->tsif_vreg = NULL;
+ if (rc == -ENODEV) {
+ pr_notice("%s: vdd_cx regulator will not be used\n",
+ __func__);
+ } else {
+ dev_err(&pdev->dev,
+ "failed to get CX regulator, err=%d\n", rc);
+ goto err_regulator;
+ }
+ } else {
+ /* Set an initial voltage and enable the regulator */
+ rc = regulator_set_voltage(device->tsif_vreg,
+ RPM_REGULATOR_CORNER_NONE,
+ RPM_REGULATOR_CORNER_SUPER_TURBO);
+ if (rc) {
+ dev_err(&pdev->dev, "Unable to set CX voltage.\n");
+ goto err_regulator;
+ }
+
+ rc = regulator_enable(device->tsif_vreg);
+ if (rc) {
+ dev_err(&pdev->dev, "Unable to enable CX regulator.\n");
+ goto err_regulator;
+ }
+ }
+
+ /* map clocks */
+ device->tsif_pclk = clk_get(&pdev->dev, "iface_clk");
+ if (IS_ERR(device->tsif_pclk)) {
+ rc = PTR_ERR(device->tsif_pclk);
+ device->tsif_pclk = NULL;
+ goto err_pclock;
+ }
+
+ device->tsif_ref_clk = clk_get(&pdev->dev, "ref_clk");
+ if (IS_ERR(device->tsif_ref_clk)) {
+ rc = PTR_ERR(device->tsif_ref_clk);
+ device->tsif_ref_clk = NULL;
+ goto err_refclock;
+ }
+ rate = clk_round_rate(device->tsif_ref_clk, 1);
+ rc = clk_set_rate(device->tsif_ref_clk, rate);
+ if (rc)
+ goto err_res_tsif0;
+
+ /* map I/O memory */
+ mem_tsif0 = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "MSM_TSIF0_PHYS");
+ if (!mem_tsif0) {
+ pr_err("tspp: Missing tsif0 MEM resource\n");
+ rc = -ENXIO;
+ goto err_res_tsif0;
+ }
+ device->tsif[0].base = ioremap(mem_tsif0->start,
+ resource_size(mem_tsif0));
+ if (!device->tsif[0].base) {
+ pr_err("tspp: ioremap failed\n");
+ goto err_map_tsif0;
+ }
+
+ mem_tsif1 = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "MSM_TSIF1_PHYS");
+ if (!mem_tsif1) {
+ dev_err(&pdev->dev, "Missing tsif1 MEM resource\n");
+ rc = -ENXIO;
+ goto err_res_tsif1;
+ }
+ device->tsif[1].base = ioremap(mem_tsif1->start,
+ resource_size(mem_tsif1));
+ if (!device->tsif[1].base) {
+ dev_err(&pdev->dev, "ioremap failed");
+ goto err_map_tsif1;
+ }
+
+ mem_tspp = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "MSM_TSPP_PHYS");
+ if (!mem_tspp) {
+ dev_err(&pdev->dev, "Missing MEM resource");
+ rc = -ENXIO;
+ goto err_res_dev;
+ }
+ device->base = ioremap(mem_tspp->start, resource_size(mem_tspp));
+ if (!device->base) {
+ dev_err(&pdev->dev, "ioremap failed");
+ goto err_map_dev;
+ }
+
+ mem_bam = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "MSM_TSPP_BAM_PHYS");
+ if (!mem_bam) {
+ pr_err("tspp: Missing bam MEM resource");
+ rc = -ENXIO;
+ goto err_res_bam;
+ }
+ memset(&device->bam_props, 0, sizeof(device->bam_props));
+ device->bam_props.phys_addr = mem_bam->start;
+ device->bam_props.virt_addr = ioremap(mem_bam->start,
+ resource_size(mem_bam));
+ if (!device->bam_props.virt_addr) {
+ dev_err(&pdev->dev, "ioremap failed");
+ goto err_map_bam;
+ }
+
+ if (msm_tspp_map_irqs(pdev, device))
+ goto err_irq;
+ device->req_irqs = false;
+
+ /* power management */
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ tspp_debugfs_init(device, 0);
+
+ for (i = 0; i < TSPP_TSIF_INSTANCES; i++)
+ tsif_debugfs_init(&device->tsif[i], i);
+
+ wakeup_source_init(&device->ws, dev_name(&pdev->dev));
+
+ /* set up pointers to ram-based 'registers' */
+ device->filters[0] = device->base + TSPP_PID_FILTER_TABLE0;
+ device->filters[1] = device->base + TSPP_PID_FILTER_TABLE1;
+ device->filters[2] = device->base + TSPP_PID_FILTER_TABLE2;
+ device->tspp_key_table = device->base + TSPP_DATA_KEY;
+ device->tspp_global_performance =
+ device->base + TSPP_GLOBAL_PERFORMANCE;
+ device->tspp_pipe_context =
+ device->base + TSPP_PIPE_CONTEXT;
+ device->tspp_pipe_performance =
+ device->base + TSPP_PIPE_PERFORMANCE;
+
+ device->bam_props.summing_threshold = 0x10;
+ device->bam_props.irq = device->bam_irq;
+ device->bam_props.manage = SPS_BAM_MGR_LOCAL;
+ /*add SPS BAM log level*/
+ device->bam_props.ipc_loglevel = TSPP_BAM_DEFAULT_IPC_LOGLVL;
+
+ if (tspp_clock_start(device) != 0) {
+ dev_err(&pdev->dev, "Can't start clocks");
+ goto err_clock;
+ }
+
+ device->bam_handle = SPS_DEV_HANDLE_INVALID;
+
+ spin_lock_init(&device->spinlock);
+ mutex_init(&device->mutex);
+ tasklet_init(&device->tlet, tspp_sps_complete_tlet,
+ (unsigned long)device);
+
+ /* initialize everything to a known state */
+ tspp_global_reset(device);
+
+ version = readl_relaxed(device->base + TSPP_VERSION);
+ /*
+ * TSPP version can be bits [7:0] or alternatively,
+ * TSPP major version is bits [31:28].
+ */
+ if ((version != 0x1) && (((version >> 28) & 0xF) != 0x1))
+ pr_warn("tspp: unrecognized hw version=%i", version);
+
+ /* initialize the channels */
+ for (i = 0; i < TSPP_NUM_CHANNELS; i++)
+ tspp_channel_init(&(device->channels[i]), device);
+
+ /* stop the clocks for power savings */
+ tspp_clock_stop(device);
+
+ /* everything is ok, so add the device to the list */
+ list_add_tail(&(device->devlist), &tspp_devices);
+ return 0;
+
+err_clock:
+ tspp_debugfs_exit(device);
+ for (i = 0; i < TSPP_TSIF_INSTANCES; i++)
+ tsif_debugfs_exit(&device->tsif[i]);
+err_irq:
+ iounmap(device->bam_props.virt_addr);
+err_map_bam:
+err_res_bam:
+ iounmap(device->base);
+err_map_dev:
+err_res_dev:
+ iounmap(device->tsif[1].base);
+err_map_tsif1:
+err_res_tsif1:
+ iounmap(device->tsif[0].base);
+err_map_tsif0:
+err_res_tsif0:
+ if (device->tsif_ref_clk)
+ clk_put(device->tsif_ref_clk);
+err_refclock:
+ if (device->tsif_pclk)
+ clk_put(device->tsif_pclk);
+err_pclock:
+ if (device->tsif_vreg)
+ regulator_disable(device->tsif_vreg);
+err_regulator:
+ if (device->tsif_bus_client)
+ msm_bus_scale_unregister_client(device->tsif_bus_client);
+err_pinctrl:
+ kfree(device);
+
+out:
+ return rc;
+}
+
+static int msm_tspp_remove(struct platform_device *pdev)
+{
+ struct tspp_channel *channel;
+ u32 i;
+
+ struct tspp_device *device = platform_get_drvdata(pdev);
+
+ /* free the buffers, and delete the channels */
+ for (i = 0; i < TSPP_NUM_CHANNELS; i++) {
+ channel = &device->channels[i];
+ tspp_close_channel(device->pdev->id, i);
+ }
+
+ for (i = 0; i < TSPP_TSIF_INSTANCES; i++)
+ tsif_debugfs_exit(&device->tsif[i]);
+
+ mutex_destroy(&device->mutex);
+
+ if (device->tsif_bus_client)
+ msm_bus_scale_unregister_client(device->tsif_bus_client);
+
+ wakeup_source_trash(&device->ws);
+ if (device->req_irqs)
+ msm_tspp_free_irqs(device);
+
+ iounmap(device->bam_props.virt_addr);
+ iounmap(device->base);
+ for (i = 0; i < TSPP_TSIF_INSTANCES; i++)
+ iounmap(device->tsif[i].base);
+
+ if (device->tsif_ref_clk)
+ clk_put(device->tsif_ref_clk);
+
+ if (device->tsif_pclk)
+ clk_put(device->tsif_pclk);
+
+ if (device->tsif_vreg)
+ regulator_disable(device->tsif_vreg);
+
+ pm_runtime_disable(&pdev->dev);
+
+ kfree(device);
+
+ return 0;
+}
+
+/*** power management ***/
+
+static int tspp_runtime_suspend(struct device *dev)
+{
+ dev_dbg(dev, "pm_runtime: suspending...");
+ return 0;
+}
+
+static int tspp_runtime_resume(struct device *dev)
+{
+ dev_dbg(dev, "pm_runtime: resuming...");
+ return 0;
+}
+
+static const struct dev_pm_ops tspp_dev_pm_ops = {
+ .runtime_suspend = tspp_runtime_suspend,
+ .runtime_resume = tspp_runtime_resume,
+};
+
+static const struct of_device_id msm_match_table[] = {
+ {.compatible = "qcom,msm_tspp"},
+ {}
+};
+
+static struct platform_driver msm_tspp_driver = {
+ .probe = msm_tspp_probe,
+ .remove = msm_tspp_remove,
+ .driver = {
+ .name = "msm_tspp",
+ .pm = &tspp_dev_pm_ops,
+ .of_match_table = msm_match_table,
+ },
+};
+
+
+static int __init mod_init(void)
+{
+ int rc;
+
+ /* register the driver, and check hardware */
+ rc = platform_driver_register(&msm_tspp_driver);
+ if (rc)
+ pr_err("tspp: platform_driver_register failed: %d", rc);
+
+ return rc;
+}
+
+static void __exit mod_exit(void)
+{
+ /* delete low level driver */
+ platform_driver_unregister(&msm_tspp_driver);
+}
+
+module_init(mod_init);
+module_exit(mod_exit);
+
+MODULE_DESCRIPTION("TSPP platform device");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/dvb/Kconfig b/drivers/media/platform/msm/dvb/Kconfig
new file mode 100644
index 000000000000..e205c8172075
--- /dev/null
+++ b/drivers/media/platform/msm/dvb/Kconfig
@@ -0,0 +1,10 @@
+config DVB_MPQ
+ tristate "Qualcomm Technologies Inc Multimedia Processor DVB Adapter"
+ depends on ARCH_QCOM && DVB_CORE
+ default n
+
+ help
+ Support for Qualcomm Technologies Inc MPQ based DVB adapter.
+ Say Y or M if you own such a device and want to use it.
+
+source "drivers/media/platform/msm/dvb/demux/Kconfig"
diff --git a/drivers/media/platform/msm/dvb/Makefile b/drivers/media/platform/msm/dvb/Makefile
new file mode 100644
index 000000000000..862ebca24db9
--- /dev/null
+++ b/drivers/media/platform/msm/dvb/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_DVB_MPQ) += adapter/
+obj-$(CONFIG_DVB_MPQ_DEMUX) += demux/
diff --git a/drivers/media/platform/msm/dvb/adapter/Makefile b/drivers/media/platform/msm/dvb/adapter/Makefile
new file mode 100644
index 000000000000..f7da6b5b2f06
--- /dev/null
+++ b/drivers/media/platform/msm/dvb/adapter/Makefile
@@ -0,0 +1,6 @@
+ccflags-y += -Idrivers/media/dvb-core/
+ccflags-y += -Idrivers/media/platform/msm/dvb/include/
+
+obj-$(CONFIG_DVB_MPQ) += mpq-adapter.o
+
+mpq-adapter-y := mpq_adapter.o mpq_stream_buffer.o
diff --git a/drivers/media/platform/msm/dvb/adapter/mpq_adapter.c b/drivers/media/platform/msm/dvb/adapter/mpq_adapter.c
new file mode 100644
index 000000000000..a3d2533485b5
--- /dev/null
+++ b/drivers/media/platform/msm/dvb/adapter/mpq_adapter.c
@@ -0,0 +1,211 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+
+#include "mpq_adapter.h"
+#include "mpq_dvb_debug.h"
+
+
+DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
+
+/* data-structure holding MPQ adapter information */
+static struct
+{
+ /* MPQ adapter registered to dvb-core */
+ struct dvb_adapter adapter;
+
+ /* mutex protect against the data-structure */
+ struct mutex mutex;
+
+ /* List of stream interfaces registered to the MPQ adapter */
+ struct {
+ /* pointer to the stream buffer using for data tunneling */
+ struct mpq_streambuffer *stream_buffer;
+
+ /* callback triggered when the stream interface is registered */
+ mpq_adapter_stream_if_callback callback;
+
+ /* parameter passed to the callback function */
+ void *user_param;
+ } interfaces[MPQ_ADAPTER_MAX_NUM_OF_INTERFACES];
+} mpq_info;
+
+
+/**
+ * Initialize MPQ DVB adapter module.
+ *
+ * Return error status
+ */
+static int __init mpq_adapter_init(void)
+{
+ int i;
+ int result;
+
+ MPQ_DVB_DBG_PRINT("%s executed\n", __func__);
+
+ mutex_init(&mpq_info.mutex);
+
+ /* reset stream interfaces list */
+ for (i = 0; i < MPQ_ADAPTER_MAX_NUM_OF_INTERFACES; i++) {
+ mpq_info.interfaces[i].stream_buffer = NULL;
+ mpq_info.interfaces[i].callback = NULL;
+ }
+
+ /* regsiter a new dvb-adapter to dvb-core */
+ result = dvb_register_adapter(&mpq_info.adapter,
+ "Qualcomm DVB adapter",
+ THIS_MODULE,
+ NULL,
+ adapter_nr);
+
+ if (result < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: dvb_register_adapter failed, errno %d\n",
+ __func__,
+ result);
+ }
+
+ return result;
+}
+
+
+/**
+ * Cleanup MPQ DVB adapter module.
+ */
+static void __exit mpq_adapter_exit(void)
+{
+ MPQ_DVB_DBG_PRINT("%s executed\n", __func__);
+
+ /* un-regsiter adapter from dvb-core */
+ dvb_unregister_adapter(&mpq_info.adapter);
+ mutex_destroy(&mpq_info.mutex);
+}
+
+struct dvb_adapter *mpq_adapter_get(void)
+{
+ return &mpq_info.adapter;
+}
+EXPORT_SYMBOL(mpq_adapter_get);
+
+
+int mpq_adapter_register_stream_if(
+ enum mpq_adapter_stream_if interface_id,
+ struct mpq_streambuffer *stream_buffer)
+{
+ int ret;
+
+ if (interface_id >= MPQ_ADAPTER_MAX_NUM_OF_INTERFACES) {
+ ret = -EINVAL;
+ goto register_failed;
+ }
+
+ if (mutex_lock_interruptible(&mpq_info.mutex)) {
+ ret = -ERESTARTSYS;
+ goto register_failed;
+ }
+
+ if (mpq_info.interfaces[interface_id].stream_buffer != NULL) {
+ /* already registered interface */
+ ret = -EINVAL;
+ goto register_failed_unlock_mutex;
+ }
+
+ mpq_info.interfaces[interface_id].stream_buffer = stream_buffer;
+ mutex_unlock(&mpq_info.mutex);
+
+ /*
+ * If callback is installed, trigger it to notify that
+ * stream interface was registered.
+ */
+ if (mpq_info.interfaces[interface_id].callback != NULL) {
+ mpq_info.interfaces[interface_id].callback(
+ interface_id,
+ mpq_info.interfaces[interface_id].user_param);
+ }
+
+ return 0;
+
+register_failed_unlock_mutex:
+ mutex_unlock(&mpq_info.mutex);
+register_failed:
+ return ret;
+}
+EXPORT_SYMBOL(mpq_adapter_register_stream_if);
+
+
+int mpq_adapter_unregister_stream_if(
+ enum mpq_adapter_stream_if interface_id)
+{
+ if (interface_id >= MPQ_ADAPTER_MAX_NUM_OF_INTERFACES)
+ return -EINVAL;
+
+ if (mutex_lock_interruptible(&mpq_info.mutex))
+ return -ERESTARTSYS;
+
+ /* clear the registered interface */
+ mpq_info.interfaces[interface_id].stream_buffer = NULL;
+
+ mutex_unlock(&mpq_info.mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL(mpq_adapter_unregister_stream_if);
+
+
+int mpq_adapter_get_stream_if(
+ enum mpq_adapter_stream_if interface_id,
+ struct mpq_streambuffer **stream_buffer)
+{
+ if ((interface_id >= MPQ_ADAPTER_MAX_NUM_OF_INTERFACES) ||
+ (stream_buffer == NULL))
+ return -EINVAL;
+
+ if (mutex_lock_interruptible(&mpq_info.mutex))
+ return -ERESTARTSYS;
+
+ *stream_buffer = mpq_info.interfaces[interface_id].stream_buffer;
+
+ mutex_unlock(&mpq_info.mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL(mpq_adapter_get_stream_if);
+
+
+int mpq_adapter_notify_stream_if(
+ enum mpq_adapter_stream_if interface_id,
+ mpq_adapter_stream_if_callback callback,
+ void *user_param)
+{
+ if (interface_id >= MPQ_ADAPTER_MAX_NUM_OF_INTERFACES)
+ return -EINVAL;
+
+ if (mutex_lock_interruptible(&mpq_info.mutex))
+ return -ERESTARTSYS;
+
+ mpq_info.interfaces[interface_id].callback = callback;
+ mpq_info.interfaces[interface_id].user_param = user_param;
+
+ mutex_unlock(&mpq_info.mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL(mpq_adapter_notify_stream_if);
+
+
+module_init(mpq_adapter_init);
+module_exit(mpq_adapter_exit);
+
+MODULE_DESCRIPTION("Qualcomm Technologies Inc. MPQ adapter");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/dvb/adapter/mpq_stream_buffer.c b/drivers/media/platform/msm/dvb/adapter/mpq_stream_buffer.c
new file mode 100644
index 000000000000..97533081766a
--- /dev/null
+++ b/drivers/media/platform/msm/dvb/adapter/mpq_stream_buffer.c
@@ -0,0 +1,827 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/uaccess.h>
+#include "mpq_dvb_debug.h"
+#include "mpq_stream_buffer.h"
+
+
+int mpq_streambuffer_init(
+ struct mpq_streambuffer *sbuff,
+ enum mpq_streambuffer_mode mode,
+ struct mpq_streambuffer_buffer_desc *data_buffers,
+ u32 data_buff_num,
+ void *packet_buff,
+ size_t packet_buff_size)
+{
+ if ((sbuff == NULL) || (data_buffers == NULL) ||
+ (packet_buff == NULL) || (data_buff_num == 0))
+ return -EINVAL;
+
+ if (data_buff_num > 1) {
+ if (mode != MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR)
+ return -EINVAL;
+ /* Linear buffer group */
+ dvb_ringbuffer_init(
+ &sbuff->raw_data,
+ data_buffers,
+ data_buff_num *
+ sizeof(struct mpq_streambuffer_buffer_desc));
+ } else {
+ if (mode != MPQ_STREAMBUFFER_BUFFER_MODE_RING)
+ return -EINVAL;
+ /* Single ring-buffer */
+ dvb_ringbuffer_init(&sbuff->raw_data,
+ data_buffers[0].base, data_buffers[0].size);
+ }
+ sbuff->mode = mode;
+ sbuff->buffers = data_buffers;
+ sbuff->pending_buffers_count = 0;
+ sbuff->buffers_num = data_buff_num;
+ sbuff->cb = NULL;
+ dvb_ringbuffer_init(&sbuff->packet_data, packet_buff, packet_buff_size);
+
+ return 0;
+}
+EXPORT_SYMBOL(mpq_streambuffer_init);
+
+void mpq_streambuffer_terminate(struct mpq_streambuffer *sbuff)
+{
+ spin_lock(&sbuff->packet_data.lock);
+ spin_lock(&sbuff->raw_data.lock);
+ sbuff->packet_data.error = -ENODEV;
+ sbuff->raw_data.error = -ENODEV;
+ spin_unlock(&sbuff->raw_data.lock);
+ spin_unlock(&sbuff->packet_data.lock);
+
+ wake_up_all(&sbuff->raw_data.queue);
+ wake_up_all(&sbuff->packet_data.queue);
+}
+EXPORT_SYMBOL(mpq_streambuffer_terminate);
+
+ssize_t mpq_streambuffer_pkt_next(
+ struct mpq_streambuffer *sbuff,
+ ssize_t idx, size_t *pktlen)
+{
+ ssize_t packet_idx;
+
+ spin_lock(&sbuff->packet_data.lock);
+
+ /* buffer was released, return no packet available */
+ if (sbuff->packet_data.error == -ENODEV) {
+ spin_unlock(&sbuff->packet_data.lock);
+ return -ENODEV;
+ }
+
+ packet_idx = dvb_ringbuffer_pkt_next(&sbuff->packet_data, idx, pktlen);
+ spin_unlock(&sbuff->packet_data.lock);
+
+ return packet_idx;
+}
+EXPORT_SYMBOL(mpq_streambuffer_pkt_next);
+
+
+ssize_t mpq_streambuffer_pkt_read(
+ struct mpq_streambuffer *sbuff,
+ size_t idx,
+ struct mpq_streambuffer_packet_header *packet,
+ u8 *user_data)
+{
+ size_t ret;
+ size_t read_len;
+
+ spin_lock(&sbuff->packet_data.lock);
+
+ /* buffer was released, return no packet available */
+ if (sbuff->packet_data.error == -ENODEV) {
+ spin_unlock(&sbuff->packet_data.lock);
+ return -ENODEV;
+ }
+
+ /* read-out the packet header first */
+ ret = dvb_ringbuffer_pkt_read(
+ &sbuff->packet_data, idx, 0,
+ (u8 *)packet,
+ sizeof(struct mpq_streambuffer_packet_header));
+
+ /* verify length, at least packet header should exist */
+ if (ret != sizeof(struct mpq_streambuffer_packet_header)) {
+ spin_unlock(&sbuff->packet_data.lock);
+ return -EINVAL;
+ }
+
+ read_len = ret;
+
+ /* read-out private user-data if there are such */
+ if ((packet->user_data_len) && (user_data != NULL)) {
+ ret = dvb_ringbuffer_pkt_read(
+ &sbuff->packet_data,
+ idx,
+ sizeof(struct mpq_streambuffer_packet_header),
+ user_data,
+ packet->user_data_len);
+
+ if (ret < 0) {
+ spin_unlock(&sbuff->packet_data.lock);
+ return ret;
+ }
+
+ read_len += ret;
+ }
+
+ spin_unlock(&sbuff->packet_data.lock);
+
+ return read_len;
+}
+EXPORT_SYMBOL(mpq_streambuffer_pkt_read);
+
+
+int mpq_streambuffer_pkt_dispose(
+ struct mpq_streambuffer *sbuff,
+ size_t idx,
+ int dispose_data)
+{
+ int ret;
+ struct mpq_streambuffer_packet_header packet;
+
+ if (sbuff == NULL)
+ return -EINVAL;
+
+ spin_lock(&sbuff->packet_data.lock);
+
+ /* check if buffer was released */
+ if (sbuff->packet_data.error == -ENODEV) {
+ spin_unlock(&sbuff->packet_data.lock);
+ return -ENODEV;
+ }
+
+ /* read-out the packet header first */
+ ret = dvb_ringbuffer_pkt_read(&sbuff->packet_data, idx,
+ 0,
+ (u8 *)&packet,
+ sizeof(struct mpq_streambuffer_packet_header));
+
+ spin_unlock(&sbuff->packet_data.lock);
+
+ if (ret != sizeof(struct mpq_streambuffer_packet_header))
+ return -EINVAL;
+
+ if ((sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR) ||
+ (dispose_data)) {
+ /* Advance the read pointer in the raw-data buffer first */
+ ret = mpq_streambuffer_data_read_dispose(sbuff,
+ packet.raw_data_len);
+ if (ret != 0)
+ return ret;
+ }
+
+ spin_lock(&sbuff->packet_data.lock);
+ spin_lock(&sbuff->raw_data.lock);
+
+ /* check if buffer was released */
+ if ((sbuff->packet_data.error == -ENODEV) ||
+ (sbuff->raw_data.error == -ENODEV)) {
+ spin_unlock(&sbuff->raw_data.lock);
+ spin_unlock(&sbuff->packet_data.lock);
+ return -ENODEV;
+ }
+
+ /* Move read pointer to the next linear buffer for subsequent reads */
+ if ((sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR) &&
+ (packet.raw_data_len > 0)) {
+ struct mpq_streambuffer_buffer_desc *desc;
+
+ desc = (struct mpq_streambuffer_buffer_desc *)
+ &sbuff->raw_data.data[sbuff->raw_data.pread];
+
+ desc->write_ptr = 0;
+ desc->read_ptr = 0;
+
+ DVB_RINGBUFFER_SKIP(&sbuff->raw_data,
+ sizeof(struct mpq_streambuffer_buffer_desc));
+ sbuff->pending_buffers_count--;
+
+ wake_up_all(&sbuff->raw_data.queue);
+ }
+
+ /* Now clear the packet from the packet header */
+ dvb_ringbuffer_pkt_dispose(&sbuff->packet_data, idx);
+
+ spin_unlock(&sbuff->raw_data.lock);
+ spin_unlock(&sbuff->packet_data.lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(mpq_streambuffer_pkt_dispose);
+
+int mpq_streambuffer_pkt_write(
+ struct mpq_streambuffer *sbuff,
+ struct mpq_streambuffer_packet_header *packet,
+ u8 *user_data)
+{
+ ssize_t idx;
+ size_t len;
+
+ if ((sbuff == NULL) || (packet == NULL))
+ return -EINVAL;
+
+ spin_lock(&sbuff->packet_data.lock);
+
+ /* check if buffer was released */
+ if (sbuff->packet_data.error == -ENODEV) {
+ spin_unlock(&sbuff->packet_data.lock);
+ return -ENODEV;
+ }
+
+ /* Make sure we can go to the next linear buffer */
+ if (sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR &&
+ sbuff->pending_buffers_count == sbuff->buffers_num &&
+ packet->raw_data_len) {
+ spin_unlock(&sbuff->packet_data.lock);
+ return -ENOSPC;
+ }
+
+ len = sizeof(struct mpq_streambuffer_packet_header) +
+ packet->user_data_len;
+
+ /* Make sure enough space available for packet header */
+ if (dvb_ringbuffer_free(&sbuff->packet_data) <
+ (len + DVB_RINGBUFFER_PKTHDRSIZE)) {
+ spin_unlock(&sbuff->packet_data.lock);
+ return -ENOSPC;
+ }
+
+ /* Starting writing packet header */
+ idx = dvb_ringbuffer_pkt_start(&sbuff->packet_data, len);
+
+ /* Write non-user private data header */
+ dvb_ringbuffer_write(&sbuff->packet_data,
+ (u8 *)packet,
+ sizeof(struct mpq_streambuffer_packet_header));
+
+ /* Write user's own private data header */
+ dvb_ringbuffer_write(&sbuff->packet_data,
+ user_data,
+ packet->user_data_len);
+
+ dvb_ringbuffer_pkt_close(&sbuff->packet_data, idx);
+
+ /* Move write pointer to next linear buffer for subsequent writes */
+ if (sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR &&
+ packet->raw_data_len) {
+ DVB_RINGBUFFER_PUSH(&sbuff->raw_data,
+ sizeof(struct mpq_streambuffer_buffer_desc));
+ sbuff->pending_buffers_count++;
+ }
+
+ spin_unlock(&sbuff->packet_data.lock);
+ wake_up_all(&sbuff->packet_data.queue);
+
+ return idx;
+}
+EXPORT_SYMBOL(mpq_streambuffer_pkt_write);
+
+ssize_t mpq_streambuffer_data_write(
+ struct mpq_streambuffer *sbuff,
+ const u8 *buf, size_t len)
+{
+ int res;
+
+ if ((sbuff == NULL) || (buf == NULL))
+ return -EINVAL;
+
+ spin_lock(&sbuff->raw_data.lock);
+
+ /* check if buffer was released */
+ if (sbuff->raw_data.error == -ENODEV) {
+ spin_unlock(&sbuff->raw_data.lock);
+ return -ENODEV;
+ }
+
+ if (sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_RING) {
+ if (unlikely(dvb_ringbuffer_free(&sbuff->raw_data) < len)) {
+ spin_unlock(&sbuff->raw_data.lock);
+ return -ENOSPC;
+ }
+ /*
+ * Secure buffers are not permitted to be mapped into kernel
+ * memory, and so buffer base address may be NULL
+ */
+ if (sbuff->raw_data.data == NULL) {
+ spin_unlock(&sbuff->raw_data.lock);
+ return -EPERM;
+ }
+ res = dvb_ringbuffer_write(&sbuff->raw_data, buf, len);
+ wake_up_all(&sbuff->raw_data.queue);
+ } else {
+ /* Linear buffer group */
+ struct mpq_streambuffer_buffer_desc *desc;
+
+ desc = (struct mpq_streambuffer_buffer_desc *)
+ &sbuff->raw_data.data[sbuff->raw_data.pwrite];
+
+ /*
+ * Secure buffers are not permitted to be mapped into kernel
+ * memory, and so buffer base address may be NULL
+ */
+ if (desc->base == NULL) {
+ spin_unlock(&sbuff->raw_data.lock);
+ return -EPERM;
+ }
+
+ if ((sbuff->pending_buffers_count == sbuff->buffers_num) ||
+ ((desc->size - desc->write_ptr) < len)) {
+ MPQ_DVB_DBG_PRINT(
+ "%s: No space available! %d pending buffers out of %d total buffers. write_ptr=%d, size=%d\n",
+ __func__,
+ sbuff->pending_buffers_count,
+ sbuff->buffers_num,
+ desc->write_ptr,
+ desc->size);
+ spin_unlock(&sbuff->raw_data.lock);
+ return -ENOSPC;
+ }
+ memcpy(desc->base + desc->write_ptr, buf, len);
+ desc->write_ptr += len;
+ res = len;
+ }
+
+ spin_unlock(&sbuff->raw_data.lock);
+ return res;
+}
+EXPORT_SYMBOL(mpq_streambuffer_data_write);
+
+
+int mpq_streambuffer_data_write_deposit(
+ struct mpq_streambuffer *sbuff,
+ size_t len)
+{
+ if (sbuff == NULL)
+ return -EINVAL;
+
+ spin_lock(&sbuff->raw_data.lock);
+
+ /* check if buffer was released */
+ if (sbuff->raw_data.error == -ENODEV) {
+ spin_unlock(&sbuff->raw_data.lock);
+ return -ENODEV;
+ }
+
+ if (sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_RING) {
+ if (unlikely(dvb_ringbuffer_free(&sbuff->raw_data) < len)) {
+ spin_unlock(&sbuff->raw_data.lock);
+ return -ENOSPC;
+ }
+
+ DVB_RINGBUFFER_PUSH(&sbuff->raw_data, len);
+ wake_up_all(&sbuff->raw_data.queue);
+ } else {
+ /* Linear buffer group */
+ struct mpq_streambuffer_buffer_desc *desc =
+ (struct mpq_streambuffer_buffer_desc *)
+ &sbuff->raw_data.data[sbuff->raw_data.pwrite];
+
+ if ((sbuff->pending_buffers_count == sbuff->buffers_num) ||
+ ((desc->size - desc->write_ptr) < len)) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: No space available!\n",
+ __func__);
+ spin_unlock(&sbuff->raw_data.lock);
+ return -ENOSPC;
+ }
+ desc->write_ptr += len;
+ }
+
+ spin_unlock(&sbuff->raw_data.lock);
+ return 0;
+}
+EXPORT_SYMBOL(mpq_streambuffer_data_write_deposit);
+
+
+ssize_t mpq_streambuffer_data_read(
+ struct mpq_streambuffer *sbuff,
+ u8 *buf, size_t len)
+{
+ ssize_t actual_len = 0;
+ u32 offset;
+
+ if ((sbuff == NULL) || (buf == NULL))
+ return -EINVAL;
+
+ spin_lock(&sbuff->raw_data.lock);
+
+ /* check if buffer was released */
+ if (sbuff->raw_data.error == -ENODEV) {
+ spin_unlock(&sbuff->raw_data.lock);
+ return -ENODEV;
+ }
+
+ if (sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_RING) {
+ /*
+ * Secure buffers are not permitted to be mapped into kernel
+ * memory, and so buffer base address may be NULL
+ */
+ if (sbuff->raw_data.data == NULL) {
+ spin_unlock(&sbuff->raw_data.lock);
+ return -EPERM;
+ }
+
+ offset = sbuff->raw_data.pread;
+ actual_len = dvb_ringbuffer_avail(&sbuff->raw_data);
+ if (actual_len < len)
+ len = actual_len;
+ if (len)
+ dvb_ringbuffer_read(&sbuff->raw_data, buf, len);
+
+ wake_up_all(&sbuff->raw_data.queue);
+ } else {
+ /* Linear buffer group */
+ struct mpq_streambuffer_buffer_desc *desc;
+
+ desc = (struct mpq_streambuffer_buffer_desc *)
+ &sbuff->raw_data.data[sbuff->raw_data.pread];
+
+ /*
+ * Secure buffers are not permitted to be mapped into kernel
+ * memory, and so buffer base address may be NULL
+ */
+ if (desc->base == NULL) {
+ spin_unlock(&sbuff->raw_data.lock);
+ return -EPERM;
+ }
+
+ actual_len = (desc->write_ptr - desc->read_ptr);
+ if (actual_len < len)
+ len = actual_len;
+ memcpy(buf, desc->base + desc->read_ptr, len);
+ offset = desc->read_ptr;
+ desc->read_ptr += len;
+ }
+
+ spin_unlock(&sbuff->raw_data.lock);
+
+ if (sbuff->cb)
+ sbuff->cb(sbuff, offset, len, sbuff->cb_user_data);
+
+ return len;
+}
+EXPORT_SYMBOL(mpq_streambuffer_data_read);
+
+
+ssize_t mpq_streambuffer_data_read_user(
+ struct mpq_streambuffer *sbuff,
+ u8 __user *buf, size_t len)
+{
+ ssize_t actual_len = 0;
+ u32 offset;
+
+ if ((sbuff == NULL) || (buf == NULL))
+ return -EINVAL;
+
+ /* check if buffer was released */
+ if (sbuff->raw_data.error == -ENODEV)
+ return -ENODEV;
+
+ if (sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_RING) {
+ /*
+ * Secure buffers are not permitted to be mapped into kernel
+ * memory, and so buffer base address may be NULL
+ */
+ if (sbuff->raw_data.data == NULL)
+ return -EPERM;
+
+ offset = sbuff->raw_data.pread;
+ actual_len = dvb_ringbuffer_avail(&sbuff->raw_data);
+ if (actual_len < len)
+ len = actual_len;
+ if (len)
+ dvb_ringbuffer_read_user(&sbuff->raw_data, buf, len);
+ wake_up_all(&sbuff->raw_data.queue);
+ } else {
+ /* Linear buffer group */
+ struct mpq_streambuffer_buffer_desc *desc;
+
+ desc = (struct mpq_streambuffer_buffer_desc *)
+ &sbuff->raw_data.data[sbuff->raw_data.pread];
+
+ /*
+ * Secure buffers are not permitted to be mapped into kernel
+ * memory, and so buffer base address may be NULL
+ */
+ if (desc->base == NULL)
+ return -EPERM;
+
+ actual_len = (desc->write_ptr - desc->read_ptr);
+ if (actual_len < len)
+ len = actual_len;
+ if (copy_to_user(buf, desc->base + desc->read_ptr, len))
+ return -EFAULT;
+
+ offset = desc->read_ptr;
+ desc->read_ptr += len;
+ }
+
+ if (sbuff->cb)
+ sbuff->cb(sbuff, offset, len, sbuff->cb_user_data);
+
+ return len;
+}
+EXPORT_SYMBOL(mpq_streambuffer_data_read_user);
+
+int mpq_streambuffer_data_read_dispose(
+ struct mpq_streambuffer *sbuff,
+ size_t len)
+{
+ u32 offset;
+
+ if (sbuff == NULL)
+ return -EINVAL;
+
+ spin_lock(&sbuff->raw_data.lock);
+
+ /* check if buffer was released */
+ if (sbuff->raw_data.error == -ENODEV) {
+ spin_unlock(&sbuff->raw_data.lock);
+ return -ENODEV;
+ }
+
+ if (sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_RING) {
+ if (unlikely(dvb_ringbuffer_avail(&sbuff->raw_data) < len)) {
+ spin_unlock(&sbuff->raw_data.lock);
+ return -EINVAL;
+ }
+
+ offset = sbuff->raw_data.pread;
+ DVB_RINGBUFFER_SKIP(&sbuff->raw_data, len);
+ wake_up_all(&sbuff->raw_data.queue);
+ } else {
+ struct mpq_streambuffer_buffer_desc *desc;
+
+ desc = (struct mpq_streambuffer_buffer_desc *)
+ &sbuff->raw_data.data[sbuff->raw_data.pread];
+ offset = desc->read_ptr;
+
+ if ((desc->read_ptr + len) > desc->size)
+ desc->read_ptr = desc->size;
+ else
+ desc->read_ptr += len;
+ }
+
+ spin_unlock(&sbuff->raw_data.lock);
+
+ if (sbuff->cb)
+ sbuff->cb(sbuff, offset, len, sbuff->cb_user_data);
+
+ return 0;
+}
+EXPORT_SYMBOL(mpq_streambuffer_data_read_dispose);
+
+
+int mpq_streambuffer_get_buffer_handle(
+ struct mpq_streambuffer *sbuff,
+ int read_buffer,
+ int *handle)
+{
+ struct mpq_streambuffer_buffer_desc *desc = NULL;
+
+ if ((sbuff == NULL) || (handle == NULL))
+ return -EINVAL;
+
+ spin_lock(&sbuff->raw_data.lock);
+
+ /* check if buffer was released */
+ if (sbuff->raw_data.error == -ENODEV) {
+ spin_unlock(&sbuff->raw_data.lock);
+ return -ENODEV;
+ }
+
+ if (sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_RING) {
+ *handle = sbuff->buffers[0].handle;
+ } else {
+ if (read_buffer)
+ desc = (struct mpq_streambuffer_buffer_desc *)
+ &sbuff->raw_data.data[sbuff->raw_data.pread];
+ else
+ desc = (struct mpq_streambuffer_buffer_desc *)
+ &sbuff->raw_data.data[sbuff->raw_data.pwrite];
+ *handle = desc->handle;
+ }
+
+ spin_unlock(&sbuff->raw_data.lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(mpq_streambuffer_get_buffer_handle);
+
+
+int mpq_streambuffer_register_data_dispose(
+ struct mpq_streambuffer *sbuff,
+ mpq_streambuffer_dispose_cb cb_func,
+ void *user_data)
+{
+ if ((sbuff == NULL) || (cb_func == NULL))
+ return -EINVAL;
+
+ sbuff->cb = cb_func;
+ sbuff->cb_user_data = user_data;
+
+ return 0;
+}
+EXPORT_SYMBOL(mpq_streambuffer_register_data_dispose);
+
+
+ssize_t mpq_streambuffer_data_free(
+ struct mpq_streambuffer *sbuff)
+{
+ struct mpq_streambuffer_buffer_desc *desc;
+
+ if (sbuff == NULL)
+ return -EINVAL;
+
+ spin_lock(&sbuff->raw_data.lock);
+
+ /* check if buffer was released */
+ if (sbuff->raw_data.error == -ENODEV) {
+ spin_unlock(&sbuff->raw_data.lock);
+ return -ENODEV;
+ }
+
+ if (sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_RING) {
+ spin_unlock(&sbuff->raw_data.lock);
+ return dvb_ringbuffer_free(&sbuff->raw_data);
+ }
+
+ if (sbuff->pending_buffers_count == sbuff->buffers_num) {
+ spin_unlock(&sbuff->raw_data.lock);
+ return 0;
+ }
+
+ desc = (struct mpq_streambuffer_buffer_desc *)
+ &sbuff->raw_data.data[sbuff->raw_data.pwrite];
+
+ spin_unlock(&sbuff->raw_data.lock);
+
+ return desc->size - desc->write_ptr;
+}
+EXPORT_SYMBOL(mpq_streambuffer_data_free);
+
+
+ssize_t mpq_streambuffer_data_avail(
+ struct mpq_streambuffer *sbuff)
+{
+ struct mpq_streambuffer_buffer_desc *desc;
+
+ if (sbuff == NULL)
+ return -EINVAL;
+
+ spin_lock(&sbuff->raw_data.lock);
+
+ /* check if buffer was released */
+ if (sbuff->raw_data.error == -ENODEV) {
+ spin_unlock(&sbuff->raw_data.lock);
+ return -ENODEV;
+ }
+
+ if (sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_RING) {
+ ssize_t avail = dvb_ringbuffer_avail(&sbuff->raw_data);
+
+ spin_unlock(&sbuff->raw_data.lock);
+ return avail;
+ }
+
+ desc = (struct mpq_streambuffer_buffer_desc *)
+ &sbuff->raw_data.data[sbuff->raw_data.pread];
+
+ spin_unlock(&sbuff->raw_data.lock);
+
+ return desc->write_ptr - desc->read_ptr;
+}
+EXPORT_SYMBOL(mpq_streambuffer_data_avail);
+
+int mpq_streambuffer_get_data_rw_offset(
+ struct mpq_streambuffer *sbuff,
+ u32 *read_offset,
+ u32 *write_offset)
+{
+ if (sbuff == NULL)
+ return -EINVAL;
+
+ spin_lock(&sbuff->raw_data.lock);
+
+ /* check if buffer was released */
+ if (sbuff->raw_data.error == -ENODEV) {
+ spin_unlock(&sbuff->raw_data.lock);
+ return -ENODEV;
+ }
+
+ if (sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_RING) {
+ if (read_offset)
+ *read_offset = sbuff->raw_data.pread;
+ if (write_offset)
+ *write_offset = sbuff->raw_data.pwrite;
+ } else {
+ struct mpq_streambuffer_buffer_desc *desc;
+
+ if (read_offset) {
+ desc = (struct mpq_streambuffer_buffer_desc *)
+ &sbuff->raw_data.data[sbuff->raw_data.pread];
+ *read_offset = desc->read_ptr;
+ }
+ if (write_offset) {
+ desc = (struct mpq_streambuffer_buffer_desc *)
+ &sbuff->raw_data.data[sbuff->raw_data.pwrite];
+ *write_offset = desc->write_ptr;
+ }
+ }
+
+ spin_unlock(&sbuff->raw_data.lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(mpq_streambuffer_get_data_rw_offset);
+
+ssize_t mpq_streambuffer_metadata_free(struct mpq_streambuffer *sbuff)
+{
+ ssize_t free;
+
+ if (sbuff == NULL)
+ return -EINVAL;
+
+ spin_lock(&sbuff->packet_data.lock);
+
+ /* check if buffer was released */
+ if (sbuff->packet_data.error == -ENODEV) {
+ spin_unlock(&sbuff->packet_data.lock);
+ return -ENODEV;
+ }
+
+ free = dvb_ringbuffer_free(&sbuff->packet_data);
+
+ spin_unlock(&sbuff->packet_data.lock);
+
+ return free;
+}
+EXPORT_SYMBOL(mpq_streambuffer_metadata_free);
+
+int mpq_streambuffer_flush(struct mpq_streambuffer *sbuff)
+{
+ struct mpq_streambuffer_buffer_desc *desc;
+ size_t len;
+ int idx;
+ int ret = 0;
+
+ if (sbuff == NULL)
+ return -EINVAL;
+
+ spin_lock(&sbuff->packet_data.lock);
+ spin_lock(&sbuff->raw_data.lock);
+
+ /* Check if buffer was released */
+ if (sbuff->packet_data.error == -ENODEV ||
+ sbuff->raw_data.error == -ENODEV) {
+ ret = -ENODEV;
+ goto end;
+ }
+
+ if (sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR)
+ while (sbuff->pending_buffers_count) {
+ desc = (struct mpq_streambuffer_buffer_desc *)
+ &sbuff->raw_data.data[sbuff->raw_data.pread];
+ desc->write_ptr = 0;
+ desc->read_ptr = 0;
+ DVB_RINGBUFFER_SKIP(&sbuff->raw_data,
+ sizeof(struct mpq_streambuffer_buffer_desc));
+ sbuff->pending_buffers_count--;
+ }
+ else
+ dvb_ringbuffer_flush(&sbuff->raw_data);
+
+ /*
+ * Dispose all packets (simply flushing is not enough since we want
+ * the packets' status to move to disposed).
+ */
+ do {
+ idx = dvb_ringbuffer_pkt_next(&sbuff->packet_data, -1, &len);
+ if (idx >= 0)
+ dvb_ringbuffer_pkt_dispose(&sbuff->packet_data, idx);
+ } while (idx >= 0);
+
+end:
+ spin_unlock(&sbuff->raw_data.lock);
+ spin_unlock(&sbuff->packet_data.lock);
+ return ret;
+}
+EXPORT_SYMBOL(mpq_streambuffer_flush);
diff --git a/drivers/media/platform/msm/dvb/demux/Kconfig b/drivers/media/platform/msm/dvb/demux/Kconfig
new file mode 100644
index 000000000000..319e2ab2eb96
--- /dev/null
+++ b/drivers/media/platform/msm/dvb/demux/Kconfig
@@ -0,0 +1,46 @@
+config DVB_MPQ_DEMUX
+ tristate "DVB Demux Device"
+ depends on DVB_MPQ && ION && ION_MSM
+ default n
+
+ help
+ Support for Qualcomm Technologies Inc based dvb demux device.
+ Say Y or M if you own such a device and want to use it.
+ The Demux device is used to stream playback either
+ from TSIF interface or from DVR interface.
+
+config DVB_MPQ_NUM_DMX_DEVICES
+ int "Number of demux devices"
+ depends on DVB_MPQ_DEMUX
+ default 4
+ range 1 255
+
+ help
+ Configure number of demux devices.
+ Depends on your use-cases for maximum concurrent stream playback.
+
+choice
+ prompt "Demux Hardware Plugin"
+ depends on DVB_MPQ_DEMUX
+ default DVB_MPQ_TSPP1
+ help
+ Enable support of specific demux HW plugin depending on the existing HW support.
+ Depending on the enabled HW, demux may take advantage of HW capbailities
+ to perform some tasks in HW instead of SW.
+
+ config DVB_MPQ_TSPP1
+ bool "TSPPv1 plugin"
+ depends on TSPP
+ help
+ Use this option if your HW has
+ Transport Stream Packet Processor(TSPP) version1 support.
+ Demux may take adavantage of HW capabilities to perform
+ some tasks in HW instead of SW.
+
+ config DVB_MPQ_SW
+ bool "Software plugin"
+ help
+ Use this option if your HW does not have any
+ TSPP hardware support. All demux tasks will be
+ performed in SW.
+endchoice
diff --git a/drivers/media/platform/msm/dvb/demux/Makefile b/drivers/media/platform/msm/dvb/demux/Makefile
new file mode 100644
index 000000000000..c08fa85a8d5d
--- /dev/null
+++ b/drivers/media/platform/msm/dvb/demux/Makefile
@@ -0,0 +1,14 @@
+
+ccflags-y += -Idrivers/media/dvb-core/
+ccflags-y += -Idrivers/media/platform/msm/dvb/include/
+ccflags-y += -Idrivers/misc/
+
+obj-$(CONFIG_DVB_MPQ_DEMUX) += mpq-dmx-hw-plugin.o
+
+mpq-dmx-hw-plugin-y := mpq_dmx_plugin_common.o
+
+mpq-dmx-hw-plugin-$(CONFIG_QSEECOM) += mpq_sdmx.o
+
+mpq-dmx-hw-plugin-$(CONFIG_DVB_MPQ_TSPP1) += mpq_dmx_plugin_tspp_v1.o
+
+mpq-dmx-hw-plugin-$(CONFIG_DVB_MPQ_SW) += mpq_dmx_plugin_sw.o
diff --git a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c
new file mode 100644
index 000000000000..d1e3c090c972
--- /dev/null
+++ b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.c
@@ -0,0 +1,5195 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/file.h>
+#include <linux/scatterlist.h>
+#include "mpq_dvb_debug.h"
+#include "mpq_dmx_plugin_common.h"
+#include "mpq_sdmx.h"
+
+#define SDMX_MAJOR_VERSION_MATCH (8)
+
+/* Length of mandatory fields that must exist in header of video PES */
+#define PES_MANDATORY_FIELDS_LEN 9
+
+/* Index of first byte in TS packet holding STC */
+#define STC_LOCATION_IDX 188
+
+#define MAX_PES_LENGTH (SZ_64K)
+
+#define MAX_TS_PACKETS_FOR_SDMX_PROCESS (500)
+
+/*
+ * PES header length field is 8 bits so PES header length after this field
+ * can be up to 256 bytes.
+ * Preceding fields of the PES header total to 9 bytes
+ * (including the PES header length field).
+ */
+#define MAX_PES_HEADER_LENGTH (256 + PES_MANDATORY_FIELDS_LEN)
+
+/* TS packet with adaptation field only can take up the entire TSP */
+#define MAX_TSP_ADAPTATION_LENGTH (184)
+
+#define MAX_SDMX_METADATA_LENGTH \
+ (TS_PACKET_HEADER_LENGTH + \
+ MAX_TSP_ADAPTATION_LENGTH + \
+ MAX_PES_HEADER_LENGTH)
+
+#define SDMX_METADATA_BUFFER_SIZE (64*1024)
+#define SDMX_SECTION_BUFFER_SIZE (64*1024)
+#define SDMX_PCR_BUFFER_SIZE (64*1024)
+
+/* Number of demux devices, has default of linux configuration */
+static int mpq_demux_device_num = CONFIG_DVB_MPQ_NUM_DMX_DEVICES;
+module_param(mpq_demux_device_num, int, S_IRUGO);
+
+/* ION heap IDs used for allocating video output buffer */
+static int video_secure_ion_heap = ION_CP_MM_HEAP_ID;
+module_param(video_secure_ion_heap, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(video_secure_ion_heap, "ION heap for secure video buffer allocation");
+
+static int video_nonsecure_ion_heap = ION_IOMMU_HEAP_ID;
+module_param(video_nonsecure_ion_heap, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(video_nonsecure_ion_heap, "ION heap for non-secure video buffer allocation");
+
+/* Value of TS packet scramble bits field for even key */
+static int mpq_sdmx_scramble_even = 0x2;
+module_param(mpq_sdmx_scramble_even, int, S_IRUGO | S_IWUSR);
+
+/* Value of TS packet scramble bits field for odd key */
+static int mpq_sdmx_scramble_odd = 0x3;
+module_param(mpq_sdmx_scramble_odd, int, S_IRUGO | S_IWUSR);
+
+/*
+ * Default action (discard or pass) taken when scramble bit is not one of the
+ * pass-through / odd / even values.
+ * When set packets will be discarded, otherwise passed through.
+ */
+static int mpq_sdmx_scramble_default_discard = 1;
+module_param(mpq_sdmx_scramble_default_discard, int, S_IRUGO | S_IWUSR);
+
+/* Max number of TS packets allowed as input for a single sdmx process */
+static int mpq_sdmx_proc_limit = MAX_TS_PACKETS_FOR_SDMX_PROCESS;
+module_param(mpq_sdmx_proc_limit, int, S_IRUGO | S_IWUSR);
+
+/* Debug flag for secure demux process */
+static int mpq_sdmx_debug;
+module_param(mpq_sdmx_debug, int, S_IRUGO | S_IWUSR);
+
+/*
+ * Indicates whether the demux should search for frame boundaries
+ * and notify on video packets on frame-basis or whether to provide
+ * only video PES packet payloads as-is.
+ */
+static int video_framing = 1;
+module_param(video_framing, int, S_IRUGO | S_IWUSR);
+
+/* TSIF operation mode: 1 = TSIF_MODE_1, 2 = TSIF_MODE_2, 3 = TSIF_LOOPBACK */
+static int tsif_mode = 2;
+module_param(tsif_mode, int, S_IRUGO | S_IWUSR);
+
+/* Inverse TSIF clock signal */
+static int clock_inv;
+module_param(clock_inv, int, S_IRUGO | S_IWUSR);
+
+/* Global data-structure for managing demux devices */
+static struct
+{
+ /* ION demux client used for memory allocation */
+ struct ion_client *ion_client;
+
+ /* demux devices array */
+ struct mpq_demux *devices;
+
+ /* Stream buffers objects used for tunneling to decoders */
+ struct mpq_streambuffer
+ decoder_buffers[MPQ_ADAPTER_MAX_NUM_OF_INTERFACES];
+
+ /* Indicates whether secure demux TZ application is available */
+ int secure_demux_app_loaded;
+} mpq_dmx_info;
+
+
+int mpq_dmx_get_param_scramble_odd(void)
+{
+ return mpq_sdmx_scramble_odd;
+}
+
+int mpq_dmx_get_param_scramble_even(void)
+{
+ return mpq_sdmx_scramble_even;
+}
+
+int mpq_dmx_get_param_scramble_default_discard(void)
+{
+ return mpq_sdmx_scramble_default_discard;
+}
+
+int mpq_dmx_get_param_tsif_mode(void)
+{
+ return tsif_mode;
+}
+
+int mpq_dmx_get_param_clock_inv(void)
+{
+ return clock_inv;
+}
+
+/* Check that PES header is valid and that it is a video PES */
+static int mpq_dmx_is_valid_video_pes(struct pes_packet_header *pes_header)
+{
+ /* start-code valid? */
+ if ((pes_header->packet_start_code_prefix_1 != 0) ||
+ (pes_header->packet_start_code_prefix_2 != 0) ||
+ (pes_header->packet_start_code_prefix_3 != 1))
+ return -EINVAL;
+
+ /* stream_id is video? */
+ if ((pes_header->stream_id & 0xF0) != 0xE0)
+ return -EINVAL;
+
+ return 0;
+}
+
+/* Check if a framing pattern is a video frame pattern or a header pattern */
+static inline int mpq_dmx_is_video_frame(
+ enum dmx_video_codec codec,
+ u64 pattern_type)
+{
+ switch (codec) {
+ case DMX_VIDEO_CODEC_MPEG2:
+ if ((pattern_type == DMX_IDX_MPEG_I_FRAME_START) ||
+ (pattern_type == DMX_IDX_MPEG_P_FRAME_START) ||
+ (pattern_type == DMX_IDX_MPEG_B_FRAME_START))
+ return 1;
+ return 0;
+
+ case DMX_VIDEO_CODEC_H264:
+ if ((pattern_type == DMX_IDX_H264_IDR_START) ||
+ (pattern_type == DMX_IDX_H264_NON_IDR_START))
+ return 1;
+ return 0;
+
+ case DMX_VIDEO_CODEC_VC1:
+ if (pattern_type == DMX_IDX_VC1_FRAME_START)
+ return 1;
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+/*
+ * mpq_dmx_get_pattern_params - Returns the required video
+ * patterns for framing operation based on video codec.
+ *
+ * @video_codec: the video codec.
+ * @patterns: a pointer to the pattern parameters, updated by this function.
+ * @patterns_num: number of patterns, updated by this function.
+ */
+static inline int mpq_dmx_get_pattern_params(
+ enum dmx_video_codec video_codec,
+ const struct dvb_dmx_video_patterns
+ *patterns[DVB_DMX_MAX_SEARCH_PATTERN_NUM],
+ int *patterns_num)
+{
+ switch (video_codec) {
+ case DMX_VIDEO_CODEC_MPEG2:
+ patterns[0] = dvb_dmx_get_pattern(DMX_IDX_MPEG_SEQ_HEADER);
+ patterns[1] = dvb_dmx_get_pattern(DMX_IDX_MPEG_GOP);
+ patterns[2] = dvb_dmx_get_pattern(DMX_IDX_MPEG_I_FRAME_START);
+ patterns[3] = dvb_dmx_get_pattern(DMX_IDX_MPEG_P_FRAME_START);
+ patterns[4] = dvb_dmx_get_pattern(DMX_IDX_MPEG_B_FRAME_START);
+ *patterns_num = 5;
+ break;
+
+ case DMX_VIDEO_CODEC_H264:
+ patterns[0] = dvb_dmx_get_pattern(DMX_IDX_H264_SPS);
+ patterns[1] = dvb_dmx_get_pattern(DMX_IDX_H264_PPS);
+ patterns[2] = dvb_dmx_get_pattern(DMX_IDX_H264_IDR_START);
+ patterns[3] = dvb_dmx_get_pattern(DMX_IDX_H264_NON_IDR_START);
+ patterns[4] = dvb_dmx_get_pattern(DMX_IDX_H264_SEI);
+ *patterns_num = 5;
+ break;
+
+ case DMX_VIDEO_CODEC_VC1:
+ patterns[0] = dvb_dmx_get_pattern(DMX_IDX_VC1_SEQ_HEADER);
+ patterns[1] = dvb_dmx_get_pattern(DMX_IDX_VC1_ENTRY_POINT);
+ patterns[2] = dvb_dmx_get_pattern(DMX_IDX_VC1_FRAME_START);
+ *patterns_num = 3;
+ break;
+
+ default:
+ MPQ_DVB_ERR_PRINT("%s: invalid parameters\n", __func__);
+ *patterns_num = 0;
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * mpq_dmx_update_decoder_stat -
+ * Update decoder output statistics in debug-fs.
+ *
+ * @mpq_feed: decoder feed object
+ */
+void mpq_dmx_update_decoder_stat(struct mpq_feed *mpq_feed)
+{
+ struct timespec curr_time;
+ u64 delta_time_ms;
+ struct mpq_demux *mpq_demux = mpq_feed->mpq_demux;
+ enum mpq_adapter_stream_if idx;
+
+ if (!dvb_dmx_is_video_feed(mpq_feed->dvb_demux_feed) ||
+ (mpq_feed->video_info.stream_interface >
+ MPQ_ADAPTER_VIDEO3_STREAM_IF))
+ return;
+
+ idx = mpq_feed->video_info.stream_interface;
+
+ curr_time = current_kernel_time();
+ if (unlikely(!mpq_demux->decoder_stat[idx].out_count)) {
+ mpq_demux->decoder_stat[idx].out_last_time = curr_time;
+ mpq_demux->decoder_stat[idx].out_count++;
+ return;
+ }
+
+ /* calculate time-delta between frame */
+ delta_time_ms = mpq_dmx_calc_time_delta(&curr_time,
+ &mpq_demux->decoder_stat[idx].out_last_time);
+
+ mpq_demux->decoder_stat[idx].out_interval_sum += (u32)delta_time_ms;
+
+ mpq_demux->decoder_stat[idx].out_interval_average =
+ mpq_demux->decoder_stat[idx].out_interval_sum /
+ mpq_demux->decoder_stat[idx].out_count;
+
+ if (delta_time_ms > mpq_demux->decoder_stat[idx].out_interval_max)
+ mpq_demux->decoder_stat[idx].out_interval_max = delta_time_ms;
+
+ mpq_demux->decoder_stat[idx].out_last_time = curr_time;
+ mpq_demux->decoder_stat[idx].out_count++;
+}
+
+/*
+ * mpq_dmx_update_sdmx_stat -
+ * Update SDMX statistics in debug-fs.
+ *
+ * @mpq_demux: mpq_demux object
+ * @bytes_processed: number of bytes processed by sdmx
+ * @process_start_time: time before sdmx process was triggered
+ * @process_end_time: time after sdmx process finished
+ */
+static inline void mpq_dmx_update_sdmx_stat(struct mpq_demux *mpq_demux,
+ u32 bytes_processed, struct timespec *process_start_time,
+ struct timespec *process_end_time)
+{
+ u32 packets_num;
+ u64 process_time;
+
+ mpq_demux->sdmx_process_count++;
+ packets_num = bytes_processed / mpq_demux->demux.ts_packet_size;
+ mpq_demux->sdmx_process_packets_sum += packets_num;
+ mpq_demux->sdmx_process_packets_average =
+ mpq_demux->sdmx_process_packets_sum /
+ mpq_demux->sdmx_process_count;
+
+ process_time =
+ mpq_dmx_calc_time_delta(process_end_time, process_start_time);
+
+ mpq_demux->sdmx_process_time_sum += process_time;
+ mpq_demux->sdmx_process_time_average =
+ mpq_demux->sdmx_process_time_sum /
+ mpq_demux->sdmx_process_count;
+
+ if ((mpq_demux->sdmx_process_count == 1) ||
+ (packets_num < mpq_demux->sdmx_process_packets_min))
+ mpq_demux->sdmx_process_packets_min = packets_num;
+
+ if ((mpq_demux->sdmx_process_count == 1) ||
+ (process_time > mpq_demux->sdmx_process_time_max))
+ mpq_demux->sdmx_process_time_max = process_time;
+}
+
+static int mpq_sdmx_log_level_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t mpq_sdmx_log_level_read(struct file *fp,
+ char __user *user_buffer, size_t count, loff_t *position)
+{
+ char user_str[16];
+ struct mpq_demux *mpq_demux = fp->private_data;
+ int ret;
+
+ ret = scnprintf(user_str, 16, "%d", mpq_demux->sdmx_log_level);
+ ret = simple_read_from_buffer(user_buffer, count, position,
+ user_str, ret+1);
+
+ return ret;
+}
+
+static ssize_t mpq_sdmx_log_level_write(struct file *fp,
+ const char __user *user_buffer, size_t count, loff_t *position)
+{
+ char user_str[16];
+ int ret;
+ int ret_count;
+ int level;
+ struct mpq_demux *mpq_demux = fp->private_data;
+
+ if (count >= 16)
+ return -EINVAL;
+
+ ret_count = simple_write_to_buffer(user_str, 16, position, user_buffer,
+ count);
+ if (ret_count < 0)
+ return ret_count;
+
+ ret = kstrtoint(user_str, 0, &level);
+ if (ret)
+ return ret;
+
+ if (level < SDMX_LOG_NO_PRINT || level > SDMX_LOG_VERBOSE)
+ return -EINVAL;
+
+ mutex_lock(&mpq_demux->mutex);
+ mpq_demux->sdmx_log_level = level;
+ if (mpq_demux->sdmx_session_handle != SDMX_INVALID_SESSION_HANDLE) {
+ ret = sdmx_set_log_level(mpq_demux->sdmx_session_handle,
+ mpq_demux->sdmx_log_level);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: Could not set sdmx log level. ret = %d\n",
+ __func__, ret);
+ mutex_unlock(&mpq_demux->mutex);
+ return -EINVAL;
+ }
+ }
+
+ mutex_unlock(&mpq_demux->mutex);
+ return ret_count;
+}
+
+static const struct file_operations sdmx_debug_fops = {
+ .open = mpq_sdmx_log_level_open,
+ .read = mpq_sdmx_log_level_read,
+ .write = mpq_sdmx_log_level_write,
+ .owner = THIS_MODULE,
+};
+
+/* Extend dvb-demux debugfs with common plug-in entries */
+void mpq_dmx_init_debugfs_entries(struct mpq_demux *mpq_demux)
+{
+ int i;
+ char file_name[50];
+ struct dentry *debugfs_decoder_dir;
+
+ /*
+ * Extend dvb-demux debugfs with HW statistics.
+ * Note that destruction of debugfs directory is done
+ * when dvb-demux is terminated.
+ */
+ mpq_demux->hw_notification_count = 0;
+ mpq_demux->hw_notification_interval = 0;
+ mpq_demux->hw_notification_size = 0;
+ mpq_demux->hw_notification_min_size = 0xFFFFFFFF;
+
+ if (mpq_demux->demux.dmx.debugfs_demux_dir == NULL)
+ return;
+
+ debugfs_create_u32(
+ "hw_notification_interval",
+ S_IRUGO | S_IWUSR | S_IWGRP,
+ mpq_demux->demux.dmx.debugfs_demux_dir,
+ &mpq_demux->hw_notification_interval);
+
+ debugfs_create_u32(
+ "hw_notification_min_interval",
+ S_IRUGO | S_IWUSR | S_IWGRP,
+ mpq_demux->demux.dmx.debugfs_demux_dir,
+ &mpq_demux->hw_notification_min_interval);
+
+ debugfs_create_u32(
+ "hw_notification_count",
+ S_IRUGO | S_IWUSR | S_IWGRP,
+ mpq_demux->demux.dmx.debugfs_demux_dir,
+ &mpq_demux->hw_notification_count);
+
+ debugfs_create_u32(
+ "hw_notification_size",
+ S_IRUGO | S_IWUSR | S_IWGRP,
+ mpq_demux->demux.dmx.debugfs_demux_dir,
+ &mpq_demux->hw_notification_size);
+
+ debugfs_create_u32(
+ "hw_notification_min_size",
+ S_IRUGO | S_IWUSR | S_IWGRP,
+ mpq_demux->demux.dmx.debugfs_demux_dir,
+ &mpq_demux->hw_notification_min_size);
+
+ debugfs_decoder_dir = debugfs_create_dir("decoder",
+ mpq_demux->demux.dmx.debugfs_demux_dir);
+
+ for (i = 0;
+ debugfs_decoder_dir &&
+ (i < MPQ_ADAPTER_MAX_NUM_OF_INTERFACES);
+ i++) {
+ snprintf(file_name, 50, "decoder%d_drop_count", i);
+ debugfs_create_u32(
+ file_name,
+ S_IRUGO,
+ debugfs_decoder_dir,
+ &mpq_demux->decoder_stat[i].drop_count);
+
+ snprintf(file_name, 50, "decoder%d_out_count", i);
+ debugfs_create_u32(
+ file_name,
+ S_IRUGO,
+ debugfs_decoder_dir,
+ &mpq_demux->decoder_stat[i].out_count);
+
+ snprintf(file_name, 50, "decoder%d_out_interval_sum", i);
+ debugfs_create_u32(
+ file_name,
+ S_IRUGO,
+ debugfs_decoder_dir,
+ &mpq_demux->decoder_stat[i].out_interval_sum);
+
+ snprintf(file_name, 50, "decoder%d_out_interval_average", i);
+ debugfs_create_u32(
+ file_name,
+ S_IRUGO,
+ debugfs_decoder_dir,
+ &mpq_demux->decoder_stat[i].out_interval_average);
+
+ snprintf(file_name, 50, "decoder%d_out_interval_max", i);
+ debugfs_create_u32(
+ file_name,
+ S_IRUGO,
+ debugfs_decoder_dir,
+ &mpq_demux->decoder_stat[i].out_interval_max);
+
+ snprintf(file_name, 50, "decoder%d_ts_errors", i);
+ debugfs_create_u32(
+ file_name,
+ S_IRUGO,
+ debugfs_decoder_dir,
+ &mpq_demux->decoder_stat[i].ts_errors);
+
+ snprintf(file_name, 50, "decoder%d_cc_errors", i);
+ debugfs_create_u32(
+ file_name,
+ S_IRUGO,
+ debugfs_decoder_dir,
+ &mpq_demux->decoder_stat[i].cc_errors);
+ }
+
+ debugfs_create_u32(
+ "sdmx_process_count",
+ S_IRUGO | S_IWUSR | S_IWGRP,
+ mpq_demux->demux.dmx.debugfs_demux_dir,
+ &mpq_demux->sdmx_process_count);
+
+ debugfs_create_u32(
+ "sdmx_process_time_sum",
+ S_IRUGO | S_IWUSR | S_IWGRP,
+ mpq_demux->demux.dmx.debugfs_demux_dir,
+ &mpq_demux->sdmx_process_time_sum);
+
+ debugfs_create_u32(
+ "sdmx_process_time_average",
+ S_IRUGO | S_IWUSR | S_IWGRP,
+ mpq_demux->demux.dmx.debugfs_demux_dir,
+ &mpq_demux->sdmx_process_time_average);
+
+ debugfs_create_u32(
+ "sdmx_process_time_max",
+ S_IRUGO | S_IWUSR | S_IWGRP,
+ mpq_demux->demux.dmx.debugfs_demux_dir,
+ &mpq_demux->sdmx_process_time_max);
+
+ debugfs_create_u32(
+ "sdmx_process_packets_sum",
+ S_IRUGO | S_IWUSR | S_IWGRP,
+ mpq_demux->demux.dmx.debugfs_demux_dir,
+ &mpq_demux->sdmx_process_packets_sum);
+
+ debugfs_create_u32(
+ "sdmx_process_packets_average",
+ S_IRUGO | S_IWUSR | S_IWGRP,
+ mpq_demux->demux.dmx.debugfs_demux_dir,
+ &mpq_demux->sdmx_process_packets_average);
+
+ debugfs_create_u32(
+ "sdmx_process_packets_min",
+ S_IRUGO | S_IWUSR | S_IWGRP,
+ mpq_demux->demux.dmx.debugfs_demux_dir,
+ &mpq_demux->sdmx_process_packets_min);
+
+ debugfs_create_file("sdmx_log_level",
+ S_IRUGO | S_IWUSR | S_IWGRP,
+ mpq_demux->demux.dmx.debugfs_demux_dir,
+ mpq_demux,
+ &sdmx_debug_fops);
+}
+
+/* Update dvb-demux debugfs with HW notification statistics */
+void mpq_dmx_update_hw_statistics(struct mpq_demux *mpq_demux)
+{
+ struct timespec curr_time;
+ u64 delta_time_ms;
+
+ curr_time = current_kernel_time();
+ if (likely(mpq_demux->hw_notification_count)) {
+ /* calculate time-delta between notifications */
+ delta_time_ms = mpq_dmx_calc_time_delta(&curr_time,
+ &mpq_demux->last_notification_time);
+
+ mpq_demux->hw_notification_interval = delta_time_ms;
+
+ if ((mpq_demux->hw_notification_count == 1) ||
+ (mpq_demux->hw_notification_interval &&
+ mpq_demux->hw_notification_interval <
+ mpq_demux->hw_notification_min_interval))
+ mpq_demux->hw_notification_min_interval =
+ mpq_demux->hw_notification_interval;
+ }
+
+ mpq_demux->hw_notification_count++;
+ mpq_demux->last_notification_time = curr_time;
+}
+
+static void mpq_sdmx_check_app_loaded(void)
+{
+ int session;
+ u32 version;
+ int ret;
+
+ ret = sdmx_open_session(&session);
+ if (ret != SDMX_SUCCESS) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: Could not initialize session with SDMX. ret = %d\n",
+ __func__, ret);
+ mpq_dmx_info.secure_demux_app_loaded = 0;
+ return;
+ }
+
+ /* Check proper sdmx major version */
+ ret = sdmx_get_version(session, &version);
+ if (ret != SDMX_SUCCESS) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: Could not get sdmx version. ret = %d\n",
+ __func__, ret);
+ } else {
+ if ((version >> 8) != SDMX_MAJOR_VERSION_MATCH)
+ MPQ_DVB_ERR_PRINT(
+ "%s: sdmx major version does not match. expected=%d, actual=%d\n",
+ __func__, SDMX_MAJOR_VERSION_MATCH,
+ (version >> 8));
+ else
+ MPQ_DVB_DBG_PRINT(
+ "%s: sdmx major version is ok = %d\n",
+ __func__, SDMX_MAJOR_VERSION_MATCH);
+ }
+
+ mpq_dmx_info.secure_demux_app_loaded = 1;
+ sdmx_close_session(session);
+}
+
+int mpq_dmx_plugin_init(mpq_dmx_init dmx_init_func)
+{
+ int i;
+ int j;
+ int result;
+ struct mpq_demux *mpq_demux;
+ struct dvb_adapter *mpq_adapter;
+ struct mpq_feed *feed;
+
+ MPQ_DVB_DBG_PRINT("%s executed, device num %d\n",
+ __func__,
+ mpq_demux_device_num);
+
+ mpq_adapter = mpq_adapter_get();
+
+ if (mpq_adapter == NULL) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_adapter is not valid\n",
+ __func__);
+ result = -EPERM;
+ goto init_failed;
+ }
+
+ if (mpq_demux_device_num == 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_demux_device_num set to 0\n",
+ __func__);
+
+ result = -EPERM;
+ goto init_failed;
+ }
+
+ mpq_dmx_info.devices = NULL;
+ mpq_dmx_info.ion_client = NULL;
+
+ mpq_dmx_info.secure_demux_app_loaded = 0;
+
+ /* Allocate memory for all MPQ devices */
+ mpq_dmx_info.devices =
+ vzalloc(mpq_demux_device_num*sizeof(struct mpq_demux));
+
+ if (!mpq_dmx_info.devices) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: failed to allocate devices memory\n",
+ __func__);
+
+ result = -ENOMEM;
+ goto init_failed;
+ }
+
+ /*
+ * Create a new ION client used by demux to allocate memory
+ * for decoder's buffers.
+ */
+ mpq_dmx_info.ion_client =
+ msm_ion_client_create("demux_client");
+ if (IS_ERR_OR_NULL(mpq_dmx_info.ion_client)) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: msm_ion_client_create\n",
+ __func__);
+
+ result = PTR_ERR(mpq_dmx_info.ion_client);
+ if (!result)
+ result = -ENOMEM;
+ mpq_dmx_info.ion_client = NULL;
+ goto init_failed_free_demux_devices;
+ }
+
+ /* Initialize and register all demux devices to the system */
+ for (i = 0; i < mpq_demux_device_num; i++) {
+ mpq_demux = mpq_dmx_info.devices+i;
+ mpq_demux->idx = i;
+
+ /* initialize demux source to memory by default */
+ mpq_demux->source = DMX_SOURCE_DVR0 + i;
+
+ /*
+ * Give the plugin pointer to the ion client so
+ * that it can allocate memory from ION if it requires so
+ */
+ mpq_demux->ion_client = mpq_dmx_info.ion_client;
+
+ mutex_init(&mpq_demux->mutex);
+
+ mpq_demux->num_secure_feeds = 0;
+ mpq_demux->num_active_feeds = 0;
+ mpq_demux->sdmx_filter_count = 0;
+ mpq_demux->sdmx_session_handle = SDMX_INVALID_SESSION_HANDLE;
+ mpq_demux->sdmx_eos = 0;
+ mpq_demux->sdmx_log_level = SDMX_LOG_NO_PRINT;
+
+ if (mpq_demux->demux.feednum > MPQ_MAX_DMX_FILES) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: err - actual feednum (%d) larger than max, enlarge MPQ_MAX_DMX_FILES!\n",
+ __func__,
+ mpq_demux->demux.feednum);
+ result = -EINVAL;
+ goto init_failed_free_demux_devices;
+ }
+
+ /* Initialize private feed info */
+ for (j = 0; j < MPQ_MAX_DMX_FILES; j++) {
+ feed = &mpq_demux->feeds[j];
+ memset(feed, 0, sizeof(*feed));
+ feed->sdmx_filter_handle = SDMX_INVALID_FILTER_HANDLE;
+ feed->mpq_demux = mpq_demux;
+ feed->session_id = 0;
+ }
+
+ /*
+ * mpq_demux_plugin_hw_init should be implemented
+ * by the specific plugin
+ */
+ result = dmx_init_func(mpq_adapter, mpq_demux);
+ if (result < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: dmx_init_func (errno=%d)\n",
+ __func__,
+ result);
+
+ goto init_failed_free_demux_devices;
+ }
+
+ mpq_demux->is_initialized = 1;
+
+ /*
+ * dvb-demux is now initialized,
+ * update back-pointers of private feeds
+ */
+ for (j = 0; j < MPQ_MAX_DMX_FILES; j++) {
+ feed = &mpq_demux->feeds[j];
+ feed->dvb_demux_feed = &mpq_demux->demux.feed[j];
+ mpq_demux->demux.feed[j].priv = feed;
+ }
+
+ /*
+ * Add capability of receiving input from memory.
+ * Every demux in our system may be connected to memory input,
+ * or any live input.
+ */
+ mpq_demux->fe_memory.source = DMX_MEMORY_FE;
+ result =
+ mpq_demux->demux.dmx.add_frontend(
+ &mpq_demux->demux.dmx,
+ &mpq_demux->fe_memory);
+
+ if (result < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: add_frontend (mem) failed (errno=%d)\n",
+ __func__,
+ result);
+
+ goto init_failed_free_demux_devices;
+ }
+ }
+
+ return 0;
+
+init_failed_free_demux_devices:
+ mpq_dmx_plugin_exit();
+init_failed:
+ return result;
+}
+
+void mpq_dmx_plugin_exit(void)
+{
+ int i;
+ struct mpq_demux *mpq_demux;
+
+ MPQ_DVB_DBG_PRINT("%s executed\n", __func__);
+
+ if (mpq_dmx_info.ion_client != NULL) {
+ ion_client_destroy(mpq_dmx_info.ion_client);
+ mpq_dmx_info.ion_client = NULL;
+ }
+
+ if (mpq_dmx_info.devices != NULL) {
+ for (i = 0; i < mpq_demux_device_num; i++) {
+ mpq_demux = mpq_dmx_info.devices + i;
+
+ if (!mpq_demux->is_initialized)
+ continue;
+
+ if (mpq_demux->mpq_dmx_plugin_release)
+ mpq_demux->mpq_dmx_plugin_release(mpq_demux);
+
+ mpq_demux->demux.dmx.remove_frontend(
+ &mpq_demux->demux.dmx,
+ &mpq_demux->fe_memory);
+
+ if (mpq_dmx_info.secure_demux_app_loaded)
+ mpq_sdmx_close_session(mpq_demux);
+ mutex_destroy(&mpq_demux->mutex);
+ dvb_dmxdev_release(&mpq_demux->dmxdev);
+ dvb_dmx_release(&mpq_demux->demux);
+ }
+
+ vfree(mpq_dmx_info.devices);
+ mpq_dmx_info.devices = NULL;
+ }
+}
+
+int mpq_dmx_set_source(
+ struct dmx_demux *demux,
+ const dmx_source_t *src)
+{
+ int i;
+ int dvr_index;
+ int dmx_index;
+ struct dvb_demux *dvb_demux = demux->priv;
+ struct mpq_demux *mpq_demux;
+
+ if ((mpq_dmx_info.devices == NULL) || (dvb_demux == NULL)) {
+ MPQ_DVB_ERR_PRINT("%s: invalid parameters\n", __func__);
+ return -EINVAL;
+ }
+
+ mpq_demux = dvb_demux->priv;
+ if (mpq_demux == NULL) {
+ MPQ_DVB_ERR_PRINT("%s: invalid parameters\n", __func__);
+ return -EINVAL;
+ }
+
+ /*
+ * For dvr sources,
+ * verify that this source is connected to the respective demux
+ */
+ dmx_index = mpq_demux - mpq_dmx_info.devices;
+
+ if (*src >= DMX_SOURCE_DVR0) {
+ dvr_index = *src - DMX_SOURCE_DVR0;
+
+ if (dvr_index != dmx_index) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: can't connect demux%d to dvr%d\n",
+ __func__,
+ dmx_index,
+ dvr_index);
+ return -EINVAL;
+ }
+ }
+
+ /*
+ * For front-end sources,
+ * verify that this source is not already set to different demux
+ */
+ for (i = 0; i < mpq_demux_device_num; i++) {
+ if ((&mpq_dmx_info.devices[i] != mpq_demux) &&
+ (mpq_dmx_info.devices[i].source == *src)) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: demux%d source can't be set,\n"
+ "demux%d occupies this source already\n",
+ __func__,
+ dmx_index,
+ i);
+ return -EBUSY;
+ }
+ }
+
+ mpq_demux->source = *src;
+ return 0;
+}
+
+/**
+ * Takes an ION allocated buffer's file descriptor and handles the details of
+ * mapping it into kernel memory and obtaining an ION handle for it.
+ * Internal helper function.
+ *
+ * @client: ION client
+ * @handle: ION file descriptor to map
+ * @priv_handle: returned ION handle. Must be freed when no longer needed
+ * @kernel_mem: returned kernel mapped pointer
+ *
+ * Note: mapping might not be possible in secured heaps/buffers, and so NULL
+ * might be returned in kernel_mem
+ *
+ * Return errors status
+ */
+static int mpq_map_buffer_to_kernel(
+ struct ion_client *client,
+ int handle,
+ struct ion_handle **priv_handle,
+ void **kernel_mem)
+{
+ struct ion_handle *ion_handle;
+ unsigned long ionflag = 0;
+ int ret;
+
+ if (client == NULL || priv_handle == NULL || kernel_mem == NULL) {
+ MPQ_DVB_ERR_PRINT("%s: invalid parameters\n", __func__);
+ return -EINVAL;
+ }
+
+ ion_handle = ion_import_dma_buf(client, handle);
+ if (IS_ERR_OR_NULL(ion_handle)) {
+ ret = PTR_ERR(ion_handle);
+ MPQ_DVB_ERR_PRINT("%s: ion_import_dma_buf failed %d\n",
+ __func__, ret);
+ if (!ret)
+ ret = -ENOMEM;
+
+ goto map_buffer_failed;
+ }
+
+ ret = ion_handle_get_flags(client, ion_handle, &ionflag);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT("%s: ion_handle_get_flags failed %d\n",
+ __func__, ret);
+ goto map_buffer_failed_free_buff;
+ }
+
+ if (ionflag & ION_FLAG_SECURE) {
+ MPQ_DVB_DBG_PRINT("%s: secured buffer\n", __func__);
+ *kernel_mem = NULL;
+ } else {
+ unsigned long tmp;
+ *kernel_mem = ion_map_kernel(client, ion_handle);
+ if (IS_ERR_OR_NULL(*kernel_mem)) {
+ ret = PTR_ERR(*kernel_mem);
+ MPQ_DVB_ERR_PRINT("%s: ion_map_kernel failed, ret=%d\n",
+ __func__, ret);
+ if (!ret)
+ ret = -ENOMEM;
+ goto map_buffer_failed_free_buff;
+ }
+ ion_handle_get_size(client, ion_handle, &tmp);
+ MPQ_DVB_DBG_PRINT(
+ "%s: mapped to address 0x%p, size=%lu\n",
+ __func__, *kernel_mem, tmp);
+ }
+
+ *priv_handle = ion_handle;
+ return 0;
+
+map_buffer_failed_free_buff:
+ ion_free(client, ion_handle);
+map_buffer_failed:
+ return ret;
+}
+
+int mpq_dmx_map_buffer(struct dmx_demux *demux, struct dmx_buffer *dmx_buffer,
+ void **priv_handle, void **kernel_mem)
+{
+ struct dvb_demux *dvb_demux = demux->priv;
+ struct mpq_demux *mpq_demux;
+
+ if ((mpq_dmx_info.devices == NULL) || (dvb_demux == NULL) ||
+ (priv_handle == NULL) || (kernel_mem == NULL)) {
+ MPQ_DVB_ERR_PRINT("%s: invalid parameters\n", __func__);
+ return -EINVAL;
+ }
+
+ mpq_demux = dvb_demux->priv;
+ if (mpq_demux == NULL) {
+ MPQ_DVB_ERR_PRINT("%s: invalid parameters\n", __func__);
+ return -EINVAL;
+ }
+
+ return mpq_map_buffer_to_kernel(
+ mpq_demux->ion_client,
+ dmx_buffer->handle,
+ (struct ion_handle **)priv_handle, kernel_mem);
+}
+
+int mpq_dmx_unmap_buffer(struct dmx_demux *demux,
+ void *priv_handle)
+{
+ struct dvb_demux *dvb_demux = demux->priv;
+ struct ion_handle *ion_handle = priv_handle;
+ struct mpq_demux *mpq_demux;
+ unsigned long ionflag = 0;
+ int ret;
+
+ if ((mpq_dmx_info.devices == NULL) || (dvb_demux == NULL) ||
+ (priv_handle == NULL)) {
+ MPQ_DVB_ERR_PRINT("%s: invalid parameters\n", __func__);
+ return -EINVAL;
+ }
+
+ mpq_demux = dvb_demux->priv;
+ if (mpq_demux == NULL) {
+ MPQ_DVB_ERR_PRINT("%s: invalid parameters\n", __func__);
+ return -EINVAL;
+ }
+
+ ret = ion_handle_get_flags(mpq_demux->ion_client, ion_handle, &ionflag);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT("%s: ion_handle_get_flags failed %d\n",
+ __func__, ret);
+ return -EINVAL;
+ }
+
+ if (!(ionflag & ION_FLAG_SECURE))
+ ion_unmap_kernel(mpq_demux->ion_client, ion_handle);
+
+ ion_free(mpq_demux->ion_client, ion_handle);
+
+ return 0;
+}
+
+int mpq_dmx_reuse_decoder_buffer(struct dvb_demux_feed *feed, int cookie)
+{
+ struct mpq_demux *mpq_demux = feed->demux->priv;
+
+ MPQ_DVB_DBG_PRINT("%s: cookie=%d\n", __func__, cookie);
+
+ if (cookie < 0) {
+ MPQ_DVB_ERR_PRINT("%s: invalid cookie parameter\n", __func__);
+ return -EINVAL;
+ }
+
+ if (dvb_dmx_is_video_feed(feed)) {
+ struct mpq_video_feed_info *feed_data;
+ struct mpq_feed *mpq_feed;
+ struct mpq_streambuffer *stream_buffer;
+ int ret;
+
+ mutex_lock(&mpq_demux->mutex);
+ mpq_feed = feed->priv;
+ feed_data = &mpq_feed->video_info;
+
+ spin_lock(&feed_data->video_buffer_lock);
+ stream_buffer = feed_data->video_buffer;
+ if (stream_buffer == NULL) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: invalid feed, feed_data->video_buffer is NULL\n",
+ __func__);
+ spin_unlock(&feed_data->video_buffer_lock);
+ mutex_unlock(&mpq_demux->mutex);
+ return -EINVAL;
+ }
+
+ ret = mpq_streambuffer_pkt_dispose(stream_buffer, cookie, 1);
+ spin_unlock(&feed_data->video_buffer_lock);
+ mutex_unlock(&mpq_demux->mutex);
+
+ return ret;
+ }
+
+ /* else */
+ MPQ_DVB_ERR_PRINT("%s: Invalid feed type %d\n",
+ __func__, feed->pes_type);
+
+ return -EINVAL;
+}
+
+/**
+ * Handles the details of internal decoder buffer allocation via ION.
+ * Internal helper function.
+ * @feed_data: decoder feed object
+ * @dec_buffs: buffer information
+ * @client: ION client
+ *
+ * Return error status
+ */
+static int mpq_dmx_init_internal_buffers(
+ struct mpq_demux *mpq_demux,
+ struct mpq_video_feed_info *feed_data,
+ struct dmx_decoder_buffers *dec_buffs)
+{
+ struct ion_handle *temp_handle = NULL;
+ void *payload_buffer = NULL;
+ int actual_buffer_size = 0;
+ int ret = 0;
+
+ MPQ_DVB_DBG_PRINT("%s: Internal decoder buffer allocation\n", __func__);
+
+ actual_buffer_size = dec_buffs->buffers_size;
+ actual_buffer_size += (SZ_4K - 1);
+ actual_buffer_size &= ~(SZ_4K - 1);
+
+ temp_handle = ion_alloc(mpq_demux->ion_client,
+ actual_buffer_size, SZ_4K,
+ ION_HEAP(video_secure_ion_heap) |
+ ION_HEAP(video_nonsecure_ion_heap),
+ mpq_demux->decoder_alloc_flags);
+
+ if (IS_ERR_OR_NULL(temp_handle)) {
+ ret = PTR_ERR(temp_handle);
+ MPQ_DVB_ERR_PRINT("%s: FAILED to allocate payload buffer %d\n",
+ __func__, ret);
+ if (!ret)
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ payload_buffer = ion_map_kernel(mpq_demux->ion_client, temp_handle);
+
+ if (IS_ERR_OR_NULL(payload_buffer)) {
+ ret = PTR_ERR(payload_buffer);
+ MPQ_DVB_ERR_PRINT(
+ "%s: FAILED to map payload buffer %d\n",
+ __func__, ret);
+ if (!ret)
+ ret = -ENOMEM;
+ goto init_failed_free_payload_buffer;
+ }
+ feed_data->buffer_desc.decoder_buffers_num = 1;
+ feed_data->buffer_desc.ion_handle[0] = temp_handle;
+ feed_data->buffer_desc.desc[0].base = payload_buffer;
+ feed_data->buffer_desc.desc[0].size = actual_buffer_size;
+ feed_data->buffer_desc.desc[0].read_ptr = 0;
+ feed_data->buffer_desc.desc[0].write_ptr = 0;
+ feed_data->buffer_desc.desc[0].handle =
+ ion_share_dma_buf_fd(mpq_demux->ion_client, temp_handle);
+ if (IS_ERR_VALUE(feed_data->buffer_desc.desc[0].handle)) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: FAILED to share payload buffer %d\n",
+ __func__, ret);
+ ret = -ENOMEM;
+ goto init_failed_unmap_payload_buffer;
+ }
+
+ feed_data->buffer_desc.shared_file = fget(
+ feed_data->buffer_desc.desc[0].handle);
+
+ return 0;
+
+init_failed_unmap_payload_buffer:
+ ion_unmap_kernel(mpq_demux->ion_client, temp_handle);
+ feed_data->buffer_desc.desc[0].base = NULL;
+init_failed_free_payload_buffer:
+ ion_free(mpq_demux->ion_client, temp_handle);
+ feed_data->buffer_desc.ion_handle[0] = NULL;
+ feed_data->buffer_desc.desc[0].size = 0;
+ feed_data->buffer_desc.decoder_buffers_num = 0;
+ feed_data->buffer_desc.shared_file = NULL;
+end:
+ return ret;
+}
+
+/**
+ * Handles the details of external decoder buffers allocated by user.
+ * Each buffer is mapped into kernel memory and an ION handle is obtained, and
+ * decoder feed object is updated with related information.
+ * Internal helper function.
+ * @feed_data: decoder feed object
+ * @dec_buffs: buffer information
+ * @client: ION client
+ *
+ * Return error status
+ */
+static int mpq_dmx_init_external_buffers(
+ struct mpq_video_feed_info *feed_data,
+ struct dmx_decoder_buffers *dec_buffs,
+ struct ion_client *client)
+{
+ struct ion_handle *temp_handle = NULL;
+ void *payload_buffer = NULL;
+ int actual_buffer_size = 0;
+ int ret = 0;
+ int i;
+
+ /*
+ * Payload buffer was allocated externally (through ION).
+ * Map the ion handles to kernel memory
+ */
+ MPQ_DVB_DBG_PRINT("%s: External decoder buffer allocation\n", __func__);
+
+ actual_buffer_size = dec_buffs->buffers_size;
+ if (!dec_buffs->is_linear) {
+ MPQ_DVB_DBG_PRINT("%s: Ex. Ring-buffer\n", __func__);
+ feed_data->buffer_desc.decoder_buffers_num = 1;
+ } else {
+ MPQ_DVB_DBG_PRINT("%s: Ex. Linear\n", __func__);
+ feed_data->buffer_desc.decoder_buffers_num =
+ dec_buffs->buffers_num;
+ }
+
+ for (i = 0; i < feed_data->buffer_desc.decoder_buffers_num; i++) {
+ ret = mpq_map_buffer_to_kernel(
+ client,
+ dec_buffs->handles[i],
+ &temp_handle,
+ &payload_buffer);
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: Failed mapping buffer %d\n",
+ __func__, i);
+ goto init_failed;
+ }
+ feed_data->buffer_desc.ion_handle[i] = temp_handle;
+ feed_data->buffer_desc.desc[i].base = payload_buffer;
+ feed_data->buffer_desc.desc[i].handle =
+ dec_buffs->handles[i];
+ feed_data->buffer_desc.desc[i].size =
+ dec_buffs->buffers_size;
+ feed_data->buffer_desc.desc[i].read_ptr = 0;
+ feed_data->buffer_desc.desc[i].write_ptr = 0;
+
+ MPQ_DVB_DBG_PRINT(
+ "%s: Buffer #%d: base=0x%p, handle=%d, size=%d\n",
+ __func__, i,
+ feed_data->buffer_desc.desc[i].base,
+ feed_data->buffer_desc.desc[i].handle,
+ feed_data->buffer_desc.desc[i].size);
+ }
+
+ return 0;
+
+init_failed:
+ for (i = 0; i < feed_data->buffer_desc.decoder_buffers_num; i++) {
+ if (feed_data->buffer_desc.ion_handle[i]) {
+ if (feed_data->buffer_desc.desc[i].base) {
+ ion_unmap_kernel(client,
+ feed_data->buffer_desc.ion_handle[i]);
+ feed_data->buffer_desc.desc[i].base = NULL;
+ }
+ ion_free(client, feed_data->buffer_desc.ion_handle[i]);
+ feed_data->buffer_desc.ion_handle[i] = NULL;
+ feed_data->buffer_desc.desc[i].size = 0;
+ }
+ }
+ return ret;
+}
+
+/**
+ * Handles the details of initializing the mpq_streambuffer object according
+ * to the user decoder buffer configuration: External/Internal buffers and
+ * ring/linear buffering mode.
+ * Internal helper function.
+ * @feed: dvb demux feed object, contains the buffers configuration
+ * @feed_data: decoder feed object
+ * @stream_buffer: stream buffer object to initialize
+ *
+ * Return error status
+ */
+static int mpq_dmx_init_streambuffer(
+ struct mpq_feed *feed,
+ struct mpq_video_feed_info *feed_data,
+ struct mpq_streambuffer *stream_buffer)
+{
+ int ret;
+ void *packet_buffer = NULL;
+ struct mpq_demux *mpq_demux = feed->mpq_demux;
+ struct ion_client *client = mpq_demux->ion_client;
+ struct dmx_decoder_buffers *dec_buffs = NULL;
+ enum mpq_streambuffer_mode mode;
+
+ dec_buffs = feed->dvb_demux_feed->feed.ts.decoder_buffers;
+
+ /* Allocate packet buffer holding the meta-data */
+ packet_buffer = vmalloc(VIDEO_META_DATA_BUFFER_SIZE);
+
+ if (packet_buffer == NULL) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: FAILED to allocate packets buffer\n",
+ __func__);
+
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ MPQ_DVB_DBG_PRINT("%s: dec_buffs: num=%d, size=%d, linear=%d\n",
+ __func__,
+ dec_buffs->buffers_num,
+ dec_buffs->buffers_size,
+ dec_buffs->is_linear);
+
+ if (dec_buffs->buffers_num == 0)
+ ret = mpq_dmx_init_internal_buffers(
+ mpq_demux, feed_data, dec_buffs);
+ else
+ ret = mpq_dmx_init_external_buffers(
+ feed_data, dec_buffs, client);
+
+ if (ret != 0)
+ goto init_failed_free_packet_buffer;
+
+ mode = dec_buffs->is_linear ? MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR :
+ MPQ_STREAMBUFFER_BUFFER_MODE_RING;
+ ret = mpq_streambuffer_init(
+ feed_data->video_buffer,
+ mode,
+ feed_data->buffer_desc.desc,
+ feed_data->buffer_desc.decoder_buffers_num,
+ packet_buffer,
+ VIDEO_META_DATA_BUFFER_SIZE);
+
+ if (ret != 0)
+ goto init_failed_free_packet_buffer;
+
+ goto end;
+
+
+init_failed_free_packet_buffer:
+ vfree(packet_buffer);
+end:
+ return ret;
+}
+
+static void mpq_dmx_release_streambuffer(
+ struct mpq_feed *feed,
+ struct mpq_video_feed_info *feed_data,
+ struct mpq_streambuffer *video_buffer,
+ struct ion_client *client)
+{
+ int buf_num = 0;
+ int i;
+ struct dmx_decoder_buffers *dec_buffs =
+ feed->dvb_demux_feed->feed.ts.decoder_buffers;
+
+ mpq_adapter_unregister_stream_if(feed_data->stream_interface);
+
+ mpq_streambuffer_terminate(video_buffer);
+
+ vfree(video_buffer->packet_data.data);
+
+ buf_num = feed_data->buffer_desc.decoder_buffers_num;
+
+ for (i = 0; i < buf_num; i++) {
+ if (feed_data->buffer_desc.ion_handle[i]) {
+ if (feed_data->buffer_desc.desc[i].base) {
+ ion_unmap_kernel(client,
+ feed_data->buffer_desc.ion_handle[i]);
+ feed_data->buffer_desc.desc[i].base = NULL;
+ }
+
+ /*
+ * Un-share the buffer if kernel it the one that
+ * shared it.
+ */
+ if (!dec_buffs->buffers_num &&
+ feed_data->buffer_desc.shared_file) {
+ fput(feed_data->buffer_desc.shared_file);
+ feed_data->buffer_desc.shared_file = NULL;
+ }
+
+ ion_free(client, feed_data->buffer_desc.ion_handle[i]);
+ feed_data->buffer_desc.ion_handle[i] = NULL;
+ feed_data->buffer_desc.desc[i].size = 0;
+ }
+ }
+}
+
+int mpq_dmx_flush_stream_buffer(struct dvb_demux_feed *feed)
+{
+ struct mpq_feed *mpq_feed = feed->priv;
+ struct mpq_video_feed_info *feed_data = &mpq_feed->video_info;
+ struct mpq_streambuffer *sbuff;
+ int ret = 0;
+
+ if (!dvb_dmx_is_video_feed(feed)) {
+ MPQ_DVB_DBG_PRINT("%s: not a video feed, feed type=%d\n",
+ __func__, feed->pes_type);
+ return 0;
+ }
+
+ spin_lock(&feed_data->video_buffer_lock);
+
+ sbuff = feed_data->video_buffer;
+ if (sbuff == NULL) {
+ MPQ_DVB_DBG_PRINT("%s: feed_data->video_buffer is NULL\n",
+ __func__);
+ spin_unlock(&feed_data->video_buffer_lock);
+ return -ENODEV;
+ }
+
+ feed_data->pending_pattern_len = 0;
+
+ ret = mpq_streambuffer_flush(sbuff);
+ if (ret)
+ MPQ_DVB_ERR_PRINT("%s: mpq_streambuffer_flush failed, ret=%d\n",
+ __func__, ret);
+
+ spin_unlock(&feed_data->video_buffer_lock);
+
+ return ret;
+}
+
+static int mpq_dmx_flush_buffer(struct dmx_ts_feed *ts_feed, size_t length)
+{
+ struct dvb_demux_feed *feed = (struct dvb_demux_feed *)ts_feed;
+ struct dvb_demux *demux = feed->demux;
+ int ret = 0;
+
+ if (mutex_lock_interruptible(&demux->mutex))
+ return -ERESTARTSYS;
+
+ dvbdmx_ts_reset_pes_state(feed);
+
+ if (dvb_dmx_is_video_feed(feed)) {
+ MPQ_DVB_DBG_PRINT("%s: flushing video buffer\n", __func__);
+
+ ret = mpq_dmx_flush_stream_buffer(feed);
+ }
+
+ mutex_unlock(&demux->mutex);
+ return ret;
+}
+
+/**
+ * mpq_dmx_init_video_feed - Initializes of video feed information
+ * used to pass data directly to decoder.
+ *
+ * @mpq_feed: The mpq feed object
+ *
+ * Return error code.
+ */
+int mpq_dmx_init_video_feed(struct mpq_feed *mpq_feed)
+{
+ int ret;
+ struct mpq_video_feed_info *feed_data = &mpq_feed->video_info;
+ struct mpq_demux *mpq_demux = mpq_feed->mpq_demux;
+ struct mpq_streambuffer *stream_buffer;
+
+ /* get and store framing information if required */
+ if (video_framing) {
+ mpq_dmx_get_pattern_params(
+ mpq_feed->dvb_demux_feed->video_codec,
+ feed_data->patterns, &feed_data->patterns_num);
+ if (!feed_data->patterns_num) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: FAILED to get framing pattern parameters\n",
+ __func__);
+
+ ret = -EINVAL;
+ goto init_failed_free_priv_data;
+ }
+ }
+
+ /* Register the new stream-buffer interface to MPQ adapter */
+ switch (mpq_feed->dvb_demux_feed->pes_type) {
+ case DMX_PES_VIDEO0:
+ feed_data->stream_interface =
+ MPQ_ADAPTER_VIDEO0_STREAM_IF;
+ break;
+
+ case DMX_PES_VIDEO1:
+ feed_data->stream_interface =
+ MPQ_ADAPTER_VIDEO1_STREAM_IF;
+ break;
+
+ case DMX_PES_VIDEO2:
+ feed_data->stream_interface =
+ MPQ_ADAPTER_VIDEO2_STREAM_IF;
+ break;
+
+ case DMX_PES_VIDEO3:
+ feed_data->stream_interface =
+ MPQ_ADAPTER_VIDEO3_STREAM_IF;
+ break;
+
+ default:
+ MPQ_DVB_ERR_PRINT(
+ "%s: Invalid pes type %d\n",
+ __func__,
+ mpq_feed->dvb_demux_feed->pes_type);
+ ret = -EINVAL;
+ goto init_failed_free_priv_data;
+ }
+
+ /* make sure not occupied already */
+ stream_buffer = NULL;
+ mpq_adapter_get_stream_if(
+ feed_data->stream_interface,
+ &stream_buffer);
+ if (stream_buffer != NULL) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: Video interface %d already occupied!\n",
+ __func__,
+ feed_data->stream_interface);
+ ret = -EBUSY;
+ goto init_failed_free_priv_data;
+ }
+
+ feed_data->video_buffer =
+ &mpq_dmx_info.decoder_buffers[feed_data->stream_interface];
+
+ ret = mpq_dmx_init_streambuffer(
+ mpq_feed, feed_data, feed_data->video_buffer);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_dmx_init_streambuffer failed, err = %d\n",
+ __func__, ret);
+ goto init_failed_free_priv_data;
+ }
+
+ ret = mpq_adapter_register_stream_if(
+ feed_data->stream_interface,
+ feed_data->video_buffer);
+
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_adapter_register_stream_if failed, err = %d\n",
+ __func__, ret);
+ goto init_failed_free_stream_buffer;
+ }
+
+ spin_lock_init(&feed_data->video_buffer_lock);
+
+ feed_data->pes_header_left_bytes = PES_MANDATORY_FIELDS_LEN;
+ feed_data->pes_header_offset = 0;
+ mpq_feed->dvb_demux_feed->pusi_seen = 0;
+ mpq_feed->dvb_demux_feed->peslen = 0;
+ feed_data->fullness_wait_cancel = 0;
+ mpq_streambuffer_get_data_rw_offset(feed_data->video_buffer, NULL,
+ &feed_data->frame_offset);
+ feed_data->last_pattern_offset = 0;
+ feed_data->pending_pattern_len = 0;
+ feed_data->last_framing_match_type = 0;
+ feed_data->found_sequence_header_pattern = 0;
+ memset(&feed_data->prefix_size, 0,
+ sizeof(struct dvb_dmx_video_prefix_size_masks));
+ feed_data->first_prefix_size = 0;
+ feed_data->saved_pts_dts_info.pts_exist = 0;
+ feed_data->saved_pts_dts_info.dts_exist = 0;
+ feed_data->new_pts_dts_info.pts_exist = 0;
+ feed_data->new_pts_dts_info.dts_exist = 0;
+ feed_data->saved_info_used = 1;
+ feed_data->new_info_exists = 0;
+ feed_data->first_pts_dts_copy = 1;
+ feed_data->tei_errs = 0;
+ feed_data->last_continuity = -1;
+ feed_data->continuity_errs = 0;
+ feed_data->ts_packets_num = 0;
+ feed_data->ts_dropped_bytes = 0;
+
+ mpq_demux->decoder_stat[feed_data->stream_interface].drop_count = 0;
+ mpq_demux->decoder_stat[feed_data->stream_interface].out_count = 0;
+ mpq_demux->decoder_stat[feed_data->stream_interface].
+ out_interval_sum = 0;
+ mpq_demux->decoder_stat[feed_data->stream_interface].
+ out_interval_max = 0;
+ mpq_demux->decoder_stat[feed_data->stream_interface].ts_errors = 0;
+ mpq_demux->decoder_stat[feed_data->stream_interface].cc_errors = 0;
+
+ return 0;
+
+init_failed_free_stream_buffer:
+ mpq_dmx_release_streambuffer(mpq_feed, feed_data,
+ feed_data->video_buffer, mpq_demux->ion_client);
+ mpq_adapter_unregister_stream_if(feed_data->stream_interface);
+init_failed_free_priv_data:
+ feed_data->video_buffer = NULL;
+ return ret;
+}
+
+/**
+ * mpq_dmx_terminate_video_feed - terminate video feed information
+ * that was previously initialized in mpq_dmx_init_video_feed
+ *
+ * @mpq_feed: The mpq feed used for the video TS packets
+ *
+ * Return error code.
+ */
+int mpq_dmx_terminate_video_feed(struct mpq_feed *mpq_feed)
+{
+ struct mpq_streambuffer *video_buffer;
+ struct mpq_video_feed_info *feed_data;
+ struct mpq_demux *mpq_demux = mpq_feed->mpq_demux;
+
+ if (mpq_feed == NULL)
+ return -EINVAL;
+
+ feed_data = &mpq_feed->video_info;
+
+ spin_lock(&feed_data->video_buffer_lock);
+ video_buffer = feed_data->video_buffer;
+ feed_data->video_buffer = NULL;
+ wake_up_all(&video_buffer->raw_data.queue);
+ spin_unlock(&feed_data->video_buffer_lock);
+
+ mpq_dmx_release_streambuffer(mpq_feed, feed_data,
+ video_buffer, mpq_demux->ion_client);
+
+ return 0;
+}
+
+struct dvb_demux_feed *mpq_dmx_peer_rec_feed(struct dvb_demux_feed *feed)
+{
+ struct dvb_demux_feed *tmp;
+ struct dvb_demux *dvb_demux = feed->demux;
+
+ list_for_each_entry(tmp, &dvb_demux->feed_list, list_head) {
+ if (tmp != feed && tmp->state == DMX_STATE_GO &&
+ tmp->feed.ts.buffer.ringbuff ==
+ feed->feed.ts.buffer.ringbuff) {
+ MPQ_DVB_DBG_PRINT(
+ "%s: main feed pid=%d, secondary feed pid=%d\n",
+ __func__, tmp->pid, feed->pid);
+ return tmp;
+ }
+ }
+
+ return NULL;
+}
+
+static int mpq_sdmx_alloc_data_buf(struct mpq_feed *mpq_feed, size_t size)
+{
+ struct mpq_demux *mpq_demux = mpq_feed->mpq_demux;
+ void *buf_base;
+ int ret;
+
+ mpq_feed->sdmx_buf_handle = ion_alloc(mpq_demux->ion_client,
+ size,
+ SZ_4K,
+ ION_HEAP(ION_QSECOM_HEAP_ID),
+ 0);
+ if (IS_ERR_OR_NULL(mpq_feed->sdmx_buf_handle)) {
+ ret = PTR_ERR(mpq_feed->sdmx_buf_handle);
+ mpq_feed->sdmx_buf_handle = NULL;
+ MPQ_DVB_ERR_PRINT(
+ "%s: FAILED to allocate sdmx buffer %d\n",
+ __func__, ret);
+ if (!ret)
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ buf_base = ion_map_kernel(mpq_demux->ion_client,
+ mpq_feed->sdmx_buf_handle);
+ if (IS_ERR_OR_NULL(buf_base)) {
+ ret = PTR_ERR(buf_base);
+ MPQ_DVB_ERR_PRINT(
+ "%s: FAILED to map sdmx buffer %d\n",
+ __func__, ret);
+ if (!ret)
+ ret = -ENOMEM;
+ goto failed_free_buf;
+ }
+
+ dvb_ringbuffer_init(&mpq_feed->sdmx_buf, buf_base, size);
+
+ return 0;
+
+failed_free_buf:
+ ion_free(mpq_demux->ion_client, mpq_feed->sdmx_buf_handle);
+ mpq_feed->sdmx_buf_handle = NULL;
+end:
+ return ret;
+}
+
+static int mpq_sdmx_free_data_buf(struct mpq_feed *mpq_feed)
+{
+ struct mpq_demux *mpq_demux = mpq_feed->mpq_demux;
+
+ if (mpq_feed->sdmx_buf_handle) {
+ ion_unmap_kernel(mpq_demux->ion_client,
+ mpq_feed->sdmx_buf_handle);
+ mpq_feed->sdmx_buf.data = NULL;
+ ion_free(mpq_demux->ion_client,
+ mpq_feed->sdmx_buf_handle);
+ mpq_feed->sdmx_buf_handle = NULL;
+ }
+
+ return 0;
+}
+
+static int mpq_sdmx_init_metadata_buffer(struct mpq_demux *mpq_demux,
+ struct mpq_feed *feed, struct sdmx_buff_descr *metadata_buff_desc)
+{
+ void *metadata_buff_base;
+ ion_phys_addr_t temp;
+ int ret;
+ size_t size;
+
+ feed->metadata_buf_handle = ion_alloc(mpq_demux->ion_client,
+ SDMX_METADATA_BUFFER_SIZE,
+ SZ_4K,
+ ION_HEAP(ION_QSECOM_HEAP_ID),
+ 0);
+ if (IS_ERR_OR_NULL(feed->metadata_buf_handle)) {
+ ret = PTR_ERR(feed->metadata_buf_handle);
+ feed->metadata_buf_handle = NULL;
+ MPQ_DVB_ERR_PRINT(
+ "%s: FAILED to allocate metadata buffer %d\n",
+ __func__, ret);
+ if (!ret)
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ metadata_buff_base = ion_map_kernel(mpq_demux->ion_client,
+ feed->metadata_buf_handle);
+ if (IS_ERR_OR_NULL(metadata_buff_base)) {
+ ret = PTR_ERR(metadata_buff_base);
+ MPQ_DVB_ERR_PRINT(
+ "%s: FAILED to map metadata buffer %d\n",
+ __func__, ret);
+ if (!ret)
+ ret = -ENOMEM;
+ goto failed_free_metadata_buf;
+ }
+
+ ret = ion_phys(mpq_demux->ion_client,
+ feed->metadata_buf_handle,
+ &temp,
+ &size);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: FAILED to get physical address %d\n",
+ __func__, ret);
+ goto failed_unmap_metadata_buf;
+ }
+ metadata_buff_desc->size = size;
+ metadata_buff_desc->base_addr = (u64)temp;
+
+ dvb_ringbuffer_init(&feed->metadata_buf, metadata_buff_base,
+ SDMX_METADATA_BUFFER_SIZE);
+
+ return 0;
+
+failed_unmap_metadata_buf:
+ ion_unmap_kernel(mpq_demux->ion_client, feed->metadata_buf_handle);
+failed_free_metadata_buf:
+ ion_free(mpq_demux->ion_client, feed->metadata_buf_handle);
+ feed->metadata_buf_handle = NULL;
+end:
+ return ret;
+}
+
+static int mpq_sdmx_terminate_metadata_buffer(struct mpq_feed *mpq_feed)
+{
+ struct mpq_demux *mpq_demux = mpq_feed->mpq_demux;
+
+ if (mpq_feed->metadata_buf_handle) {
+ ion_unmap_kernel(mpq_demux->ion_client,
+ mpq_feed->metadata_buf_handle);
+ mpq_feed->metadata_buf.data = NULL;
+ ion_free(mpq_demux->ion_client,
+ mpq_feed->metadata_buf_handle);
+ mpq_feed->metadata_buf_handle = NULL;
+ }
+
+ return 0;
+}
+
+int mpq_dmx_terminate_feed(struct dvb_demux_feed *feed)
+{
+ int ret = 0;
+ struct mpq_demux *mpq_demux;
+ struct mpq_feed *mpq_feed;
+ struct mpq_feed *main_rec_feed = NULL;
+ struct dvb_demux_feed *tmp;
+
+ if (feed == NULL)
+ return -EINVAL;
+
+ mpq_demux = feed->demux->priv;
+
+ mutex_lock(&mpq_demux->mutex);
+ mpq_feed = feed->priv;
+
+ if (mpq_feed->sdmx_filter_handle != SDMX_INVALID_FILTER_HANDLE) {
+ if (mpq_feed->filter_type == SDMX_RAW_FILTER) {
+ tmp = mpq_dmx_peer_rec_feed(feed);
+ if (tmp)
+ main_rec_feed = tmp->priv;
+ }
+
+ if (main_rec_feed) {
+ /* This feed is part of a recording filter */
+ MPQ_DVB_DBG_PRINT(
+ "%s: Removing raw pid %d from filter %d\n",
+ __func__, feed->pid,
+ mpq_feed->sdmx_filter_handle);
+ ret = sdmx_remove_raw_pid(
+ mpq_demux->sdmx_session_handle,
+ mpq_feed->sdmx_filter_handle, feed->pid);
+ if (ret)
+ MPQ_DVB_ERR_PRINT(
+ "%s: SDMX_remove_raw_pid failed. ret = %d\n",
+ __func__, ret);
+
+ /* If this feed that we are removing was set as primary,
+ * now other feeds should be set as primary
+ */
+ if (!mpq_feed->secondary_feed)
+ main_rec_feed->secondary_feed = 0;
+ } else {
+ MPQ_DVB_DBG_PRINT("%s: Removing filter %d, pid %d\n",
+ __func__, mpq_feed->sdmx_filter_handle,
+ feed->pid);
+ ret = sdmx_remove_filter(mpq_demux->sdmx_session_handle,
+ mpq_feed->sdmx_filter_handle);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: SDMX_remove_filter failed. ret = %d\n",
+ __func__, ret);
+ }
+
+ mpq_demux->sdmx_filter_count--;
+ mpq_feed->sdmx_filter_handle =
+ SDMX_INVALID_FILTER_HANDLE;
+ }
+
+ mpq_sdmx_close_session(mpq_demux);
+ mpq_demux->num_secure_feeds--;
+ }
+
+ if (dvb_dmx_is_video_feed(feed)) {
+ ret = mpq_dmx_terminate_video_feed(mpq_feed);
+ if (ret)
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_dmx_terminate_video_feed failed. ret = %d\n",
+ __func__, ret);
+ }
+
+ if (mpq_feed->sdmx_buf_handle) {
+ wake_up_all(&mpq_feed->sdmx_buf.queue);
+ mpq_sdmx_free_data_buf(mpq_feed);
+ }
+
+ mpq_sdmx_terminate_metadata_buffer(mpq_feed);
+ mpq_demux->num_active_feeds--;
+
+ mutex_unlock(&mpq_demux->mutex);
+
+ return ret;
+}
+
+int mpq_dmx_decoder_fullness_init(struct dvb_demux_feed *feed)
+{
+ if (dvb_dmx_is_video_feed(feed)) {
+ struct mpq_feed *mpq_feed;
+ struct mpq_video_feed_info *feed_data;
+
+ mpq_feed = feed->priv;
+ feed_data = &mpq_feed->video_info;
+ feed_data->fullness_wait_cancel = 0;
+
+ return 0;
+ }
+
+ /* else */
+ MPQ_DVB_DBG_PRINT(
+ "%s: Invalid feed type %d\n",
+ __func__,
+ feed->pes_type);
+
+ return -EINVAL;
+}
+
+/**
+ * Returns whether the free space of decoder's output
+ * buffer is larger than specific number of bytes.
+ *
+ * @sbuff: MPQ stream buffer used for decoder data.
+ * @required_space: number of required free bytes in the buffer
+ *
+ * Return 1 if required free bytes are available, 0 otherwise.
+ */
+static inline int mpq_dmx_check_decoder_fullness(
+ struct mpq_streambuffer *sbuff,
+ size_t required_space)
+{
+ ssize_t free = mpq_streambuffer_data_free(sbuff);
+ ssize_t free_meta = mpq_streambuffer_metadata_free(sbuff);
+
+ /* Verify meta-data buffer can contain at least 1 packet */
+ if (free_meta < VIDEO_META_DATA_PACKET_SIZE)
+ return 0;
+
+ /*
+ * For linear buffers, verify there's enough space for this TSP
+ * and an additional buffer is free, as framing might required one
+ * more buffer to be available.
+ */
+ if (sbuff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR)
+ return (free >= required_space &&
+ sbuff->pending_buffers_count < sbuff->buffers_num-1);
+ else
+ /* Ring buffer mode */
+ return (free >= required_space);
+}
+
+/**
+ * Checks whether decoder's output buffer has free space
+ * for specific number of bytes, if not, the function waits
+ * until the amount of free-space is available.
+ *
+ * @feed: decoder's feed object
+ * @required_space: number of required free bytes in the buffer
+ * @lock_feed: indicates whether mutex should be held before
+ * accessing the feed information. If the caller of this function
+ * already holds a mutex then this should be set to 0 and 1 otherwise.
+ *
+ * Return 0 if required space is available and error code
+ * in case waiting on buffer fullness was aborted.
+ */
+static int mpq_dmx_decoder_fullness_check(
+ struct dvb_demux_feed *feed,
+ size_t required_space,
+ int lock_feed)
+{
+ struct mpq_demux *mpq_demux = feed->demux->priv;
+ struct mpq_streambuffer *sbuff = NULL;
+ struct mpq_video_feed_info *feed_data;
+ struct mpq_feed *mpq_feed;
+ int ret = 0;
+
+ if (!dvb_dmx_is_video_feed(feed)) {
+ MPQ_DVB_DBG_PRINT("%s: Invalid feed type %d\n",
+ __func__,
+ feed->pes_type);
+ return -EINVAL;
+ }
+
+ if (lock_feed) {
+ mutex_lock(&mpq_demux->mutex);
+ } else if (!mutex_is_locked(&mpq_demux->mutex)) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: Mutex should have been locked\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ mpq_feed = feed->priv;
+ feed_data = &mpq_feed->video_info;
+
+ sbuff = feed_data->video_buffer;
+ if (sbuff == NULL) {
+ if (lock_feed)
+ mutex_unlock(&mpq_demux->mutex);
+ MPQ_DVB_ERR_PRINT("%s: mpq_streambuffer object is NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if ((feed_data->video_buffer != NULL) &&
+ (!feed_data->fullness_wait_cancel) &&
+ (!mpq_dmx_check_decoder_fullness(sbuff, required_space))) {
+ DEFINE_WAIT(__wait);
+
+ for (;;) {
+ prepare_to_wait(&sbuff->raw_data.queue,
+ &__wait,
+ TASK_INTERRUPTIBLE);
+ if (!feed_data->video_buffer ||
+ feed_data->fullness_wait_cancel ||
+ mpq_dmx_check_decoder_fullness(sbuff,
+ required_space))
+ break;
+
+ if (!signal_pending(current)) {
+ mutex_unlock(&mpq_demux->mutex);
+ schedule();
+ mutex_lock(&mpq_demux->mutex);
+ continue;
+ }
+
+ ret = -ERESTARTSYS;
+ break;
+ }
+ finish_wait(&sbuff->raw_data.queue, &__wait);
+ }
+
+ if (ret < 0) {
+ if (lock_feed)
+ mutex_unlock(&mpq_demux->mutex);
+ return ret;
+ }
+
+ if ((feed_data->fullness_wait_cancel) ||
+ (feed_data->video_buffer == NULL)) {
+ if (lock_feed)
+ mutex_unlock(&mpq_demux->mutex);
+ return -EINVAL;
+ }
+
+ if (lock_feed)
+ mutex_unlock(&mpq_demux->mutex);
+ return 0;
+}
+
+int mpq_dmx_decoder_fullness_wait(
+ struct dvb_demux_feed *feed,
+ size_t required_space)
+{
+ return mpq_dmx_decoder_fullness_check(feed, required_space, 1);
+}
+
+int mpq_dmx_decoder_fullness_abort(struct dvb_demux_feed *feed)
+{
+ if (dvb_dmx_is_video_feed(feed)) {
+ struct mpq_feed *mpq_feed;
+ struct mpq_video_feed_info *feed_data;
+ struct dvb_ringbuffer *video_buff;
+
+ mpq_feed = feed->priv;
+ feed_data = &mpq_feed->video_info;
+
+ feed_data->fullness_wait_cancel = 1;
+
+ spin_lock(&feed_data->video_buffer_lock);
+ if (feed_data->video_buffer == NULL) {
+ MPQ_DVB_DBG_PRINT(
+ "%s: video_buffer released\n",
+ __func__);
+ spin_unlock(&feed_data->video_buffer_lock);
+ return 0;
+ }
+
+ video_buff = &feed_data->video_buffer->raw_data;
+ wake_up_all(&video_buff->queue);
+ spin_unlock(&feed_data->video_buffer_lock);
+
+ return 0;
+ }
+
+ /* else */
+ MPQ_DVB_ERR_PRINT(
+ "%s: Invalid feed type %d\n",
+ __func__,
+ feed->pes_type);
+
+ return -EINVAL;
+}
+
+int mpq_dmx_parse_mandatory_pes_header(
+ struct dvb_demux_feed *feed,
+ struct mpq_video_feed_info *feed_data,
+ struct pes_packet_header *pes_header,
+ const u8 *buf,
+ u32 *ts_payload_offset,
+ int *bytes_avail)
+{
+ int left_size, copy_len;
+
+ if (feed_data->pes_header_offset < PES_MANDATORY_FIELDS_LEN) {
+ left_size =
+ PES_MANDATORY_FIELDS_LEN -
+ feed_data->pes_header_offset;
+
+ copy_len = (left_size > *bytes_avail) ?
+ *bytes_avail :
+ left_size;
+
+ memcpy((u8 *)((u8 *)pes_header + feed_data->pes_header_offset),
+ (buf + *ts_payload_offset),
+ copy_len);
+
+ feed_data->pes_header_offset += copy_len;
+
+ if (left_size > *bytes_avail)
+ return -EINVAL;
+
+ /* else - we have beginning of PES header */
+ *bytes_avail -= left_size;
+ *ts_payload_offset += left_size;
+
+ /* Make sure the PES packet is valid */
+ if (mpq_dmx_is_valid_video_pes(pes_header) < 0) {
+ /*
+ * Since the new PES header parsing
+ * failed, reset pusi_seen to drop all
+ * data until next PUSI
+ */
+ feed->pusi_seen = 0;
+ feed_data->pes_header_offset = 0;
+
+ MPQ_DVB_ERR_PRINT(
+ "%s: invalid packet\n",
+ __func__);
+
+ return -EINVAL;
+ }
+
+ feed_data->pes_header_left_bytes =
+ pes_header->pes_header_data_length;
+ }
+
+ return 0;
+}
+
+static inline void mpq_dmx_get_pts_dts(struct mpq_video_feed_info *feed_data,
+ struct pes_packet_header *pes_header)
+{
+ struct dmx_pts_dts_info *info = &(feed_data->new_pts_dts_info);
+
+ /* Get PTS/DTS information from PES header */
+
+ if ((pes_header->pts_dts_flag == 2) ||
+ (pes_header->pts_dts_flag == 3)) {
+ info->pts_exist = 1;
+
+ info->pts =
+ ((u64)pes_header->pts_1 << 30) |
+ ((u64)pes_header->pts_2 << 22) |
+ ((u64)pes_header->pts_3 << 15) |
+ ((u64)pes_header->pts_4 << 7) |
+ (u64)pes_header->pts_5;
+ } else {
+ info->pts_exist = 0;
+ info->pts = 0;
+ }
+
+ if (pes_header->pts_dts_flag == 3) {
+ info->dts_exist = 1;
+
+ info->dts =
+ ((u64)pes_header->dts_1 << 30) |
+ ((u64)pes_header->dts_2 << 22) |
+ ((u64)pes_header->dts_3 << 15) |
+ ((u64)pes_header->dts_4 << 7) |
+ (u64)pes_header->dts_5;
+ } else {
+ info->dts_exist = 0;
+ info->dts = 0;
+ }
+
+ feed_data->new_info_exists = 1;
+}
+
+int mpq_dmx_parse_remaining_pes_header(
+ struct dvb_demux_feed *feed,
+ struct mpq_video_feed_info *feed_data,
+ struct pes_packet_header *pes_header,
+ const u8 *buf,
+ u32 *ts_payload_offset,
+ int *bytes_avail)
+{
+ int left_size, copy_len;
+
+ /* Remaining header bytes that need to be processed? */
+ if (!feed_data->pes_header_left_bytes)
+ return 0;
+
+ /* Did we capture the PTS value (if exists)? */
+ if ((*bytes_avail != 0) &&
+ (feed_data->pes_header_offset <
+ (PES_MANDATORY_FIELDS_LEN+5)) &&
+ ((pes_header->pts_dts_flag == 2) ||
+ (pes_header->pts_dts_flag == 3))) {
+
+ /* 5 more bytes should be there */
+ left_size =
+ PES_MANDATORY_FIELDS_LEN + 5 -
+ feed_data->pes_header_offset;
+
+ copy_len = (left_size > *bytes_avail) ?
+ *bytes_avail :
+ left_size;
+
+ memcpy((u8 *)((u8 *)pes_header + feed_data->pes_header_offset),
+ (buf + *ts_payload_offset),
+ copy_len);
+
+ feed_data->pes_header_offset += copy_len;
+ feed_data->pes_header_left_bytes -= copy_len;
+
+ if (left_size > *bytes_avail)
+ return -EINVAL;
+
+ /* else - we have the PTS */
+ *bytes_avail -= copy_len;
+ *ts_payload_offset += copy_len;
+ }
+
+ /* Did we capture the DTS value (if exist)? */
+ if ((*bytes_avail != 0) &&
+ (feed_data->pes_header_offset <
+ (PES_MANDATORY_FIELDS_LEN+10)) &&
+ (pes_header->pts_dts_flag == 3)) {
+
+ /* 5 more bytes should be there */
+ left_size =
+ PES_MANDATORY_FIELDS_LEN + 10 -
+ feed_data->pes_header_offset;
+
+ copy_len = (left_size > *bytes_avail) ?
+ *bytes_avail :
+ left_size;
+
+ memcpy((u8 *)((u8 *)pes_header + feed_data->pes_header_offset),
+ (buf + *ts_payload_offset),
+ copy_len);
+
+ feed_data->pes_header_offset += copy_len;
+ feed_data->pes_header_left_bytes -= copy_len;
+
+ if (left_size > *bytes_avail)
+ return -EINVAL;
+
+ /* else - we have the DTS */
+ *bytes_avail -= copy_len;
+ *ts_payload_offset += copy_len;
+ }
+
+ /* Any more header bytes?! */
+ if (feed_data->pes_header_left_bytes >= *bytes_avail) {
+ feed_data->pes_header_left_bytes -= *bytes_avail;
+ return -EINVAL;
+ }
+
+ /* get PTS/DTS information from PES header to be written later */
+ mpq_dmx_get_pts_dts(feed_data, pes_header);
+
+ /* Got PES header, process payload */
+ *bytes_avail -= feed_data->pes_header_left_bytes;
+ *ts_payload_offset += feed_data->pes_header_left_bytes;
+ feed_data->pes_header_left_bytes = 0;
+
+ return 0;
+}
+
+static void mpq_dmx_check_continuity(struct mpq_video_feed_info *feed_data,
+ int current_continuity,
+ int discontinuity_indicator)
+{
+ const int max_continuity = 0x0F; /* 4 bits in the TS packet header */
+
+ /* sanity check */
+ if (unlikely((current_continuity < 0) ||
+ (current_continuity > max_continuity))) {
+ MPQ_DVB_DBG_PRINT(
+ "%s: received invalid continuity counter value %d\n",
+ __func__, current_continuity);
+ return;
+ }
+
+ /* reset last continuity */
+ if ((feed_data->last_continuity == -1) ||
+ (discontinuity_indicator)) {
+ feed_data->last_continuity = current_continuity;
+ return;
+ }
+
+ /* check for continuity errors */
+ if (current_continuity !=
+ ((feed_data->last_continuity + 1) & max_continuity))
+ feed_data->continuity_errs++;
+
+ /* save for next time */
+ feed_data->last_continuity = current_continuity;
+}
+
+static inline void mpq_dmx_prepare_es_event_data(
+ struct mpq_streambuffer_packet_header *packet,
+ struct mpq_adapter_video_meta_data *meta_data,
+ struct mpq_video_feed_info *feed_data,
+ struct mpq_streambuffer *stream_buffer,
+ struct dmx_data_ready *data,
+ int cookie)
+{
+ struct dmx_pts_dts_info *pts_dts;
+
+ if (meta_data->packet_type == DMX_PES_PACKET) {
+ pts_dts = &meta_data->info.pes.pts_dts_info;
+ data->buf.stc = meta_data->info.pes.stc;
+ } else {
+ pts_dts = &meta_data->info.framing.pts_dts_info;
+ data->buf.stc = meta_data->info.framing.stc;
+ }
+
+ pts_dts = meta_data->packet_type == DMX_PES_PACKET ?
+ &meta_data->info.pes.pts_dts_info :
+ &meta_data->info.framing.pts_dts_info;
+
+ data->data_length = 0;
+ data->buf.handle = packet->raw_data_handle;
+ data->buf.cookie = cookie;
+ data->buf.offset = packet->raw_data_offset;
+ data->buf.len = packet->raw_data_len;
+ data->buf.pts_exists = pts_dts->pts_exist;
+ data->buf.pts = pts_dts->pts;
+ data->buf.dts_exists = pts_dts->dts_exist;
+ data->buf.dts = pts_dts->dts;
+ data->buf.tei_counter = feed_data->tei_errs;
+ data->buf.cont_err_counter = feed_data->continuity_errs;
+ data->buf.ts_packets_num = feed_data->ts_packets_num;
+ data->buf.ts_dropped_bytes = feed_data->ts_dropped_bytes;
+ data->status = DMX_OK_DECODER_BUF;
+
+ MPQ_DVB_DBG_PRINT("%s: cookie=%d\n", __func__, data->buf.cookie);
+
+ /* reset counters */
+ feed_data->ts_packets_num = 0;
+ feed_data->ts_dropped_bytes = 0;
+ feed_data->tei_errs = 0;
+ feed_data->continuity_errs = 0;
+}
+
+static int mpq_sdmx_dvr_buffer_desc(struct mpq_demux *mpq_demux,
+ struct sdmx_buff_descr *buf_desc)
+{
+ struct dvb_ringbuffer *rbuf = (struct dvb_ringbuffer *)
+ mpq_demux->demux.dmx.dvr_input.ringbuff;
+ struct ion_handle *ion_handle =
+ mpq_demux->demux.dmx.dvr_input.priv_handle;
+ ion_phys_addr_t phys_addr;
+ size_t len;
+ int ret;
+
+ ret = ion_phys(mpq_demux->ion_client, ion_handle, &phys_addr, &len);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: Failed to obtain physical address of input buffer. ret = %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ buf_desc->base_addr = (u64)phys_addr;
+ buf_desc->size = rbuf->size;
+
+ return 0;
+}
+
+static inline int mpq_dmx_notify_overflow(struct dvb_demux_feed *feed)
+{
+ struct dmx_data_ready data;
+
+ data.data_length = 0;
+ data.status = DMX_OVERRUN_ERROR;
+ return feed->data_ready_cb.ts(&feed->feed.ts, &data);
+}
+
+/**
+ * mpq_dmx_decoder_frame_closure - Helper function to handle closing current
+ * pending frame upon reaching EOS.
+ *
+ * @mpq_demux - mpq demux instance
+ * @mpq_feed - mpq feed object
+ */
+static void mpq_dmx_decoder_frame_closure(struct mpq_demux *mpq_demux,
+ struct mpq_feed *mpq_feed)
+{
+ struct mpq_streambuffer_packet_header packet;
+ struct mpq_streambuffer *stream_buffer;
+ struct mpq_adapter_video_meta_data meta_data;
+ struct mpq_video_feed_info *feed_data;
+ struct dvb_demux_feed *feed = mpq_feed->dvb_demux_feed;
+ struct dmx_data_ready data;
+ int cookie;
+
+ feed_data = &mpq_feed->video_info;
+
+ /*
+ * spin-lock is taken to protect against manipulation of video
+ * output buffer by the API (terminate video feed, re-use of video
+ * buffers).
+ */
+ spin_lock(&feed_data->video_buffer_lock);
+ stream_buffer = feed_data->video_buffer;
+
+ if (stream_buffer == NULL) {
+ MPQ_DVB_DBG_PRINT("%s: video_buffer released\n", __func__);
+ spin_unlock(&feed_data->video_buffer_lock);
+ return;
+ }
+
+ /* Report last pattern found */
+ if ((feed_data->pending_pattern_len) &&
+ mpq_dmx_is_video_frame(feed->video_codec,
+ feed_data->last_framing_match_type)) {
+ meta_data.packet_type = DMX_FRAMING_INFO_PACKET;
+ mpq_dmx_write_pts_dts(feed_data,
+ &(meta_data.info.framing.pts_dts_info));
+ mpq_dmx_save_pts_dts(feed_data);
+ packet.user_data_len =
+ sizeof(struct mpq_adapter_video_meta_data);
+ packet.raw_data_len = feed_data->pending_pattern_len;
+ packet.raw_data_offset = feed_data->frame_offset;
+ meta_data.info.framing.pattern_type =
+ feed_data->last_framing_match_type;
+ meta_data.info.framing.stc = feed_data->last_framing_match_stc;
+ meta_data.info.framing.continuity_error_counter =
+ feed_data->continuity_errs;
+ meta_data.info.framing.transport_error_indicator_counter =
+ feed_data->tei_errs;
+ meta_data.info.framing.ts_dropped_bytes =
+ feed_data->ts_dropped_bytes;
+ meta_data.info.framing.ts_packets_num =
+ feed_data->ts_packets_num;
+
+ mpq_streambuffer_get_buffer_handle(stream_buffer,
+ 0, /* current write buffer handle */
+ &packet.raw_data_handle);
+
+ mpq_dmx_update_decoder_stat(mpq_feed);
+
+ /* Writing meta-data that includes the framing information */
+ cookie = mpq_streambuffer_pkt_write(stream_buffer, &packet,
+ (u8 *)&meta_data);
+ if (cookie >= 0) {
+ mpq_dmx_prepare_es_event_data(&packet, &meta_data,
+ feed_data, stream_buffer, &data, cookie);
+ feed->data_ready_cb.ts(&feed->feed.ts, &data);
+ } else {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_streambuffer_pkt_write failed, ret=%d\n",
+ __func__, cookie);
+ }
+ }
+
+ spin_unlock(&feed_data->video_buffer_lock);
+}
+
+/**
+ * mpq_dmx_decoder_pes_closure - Helper function to handle closing current PES
+ * upon reaching EOS.
+ *
+ * @mpq_demux - mpq demux instance
+ * @mpq_feed - mpq feed object
+ */
+static void mpq_dmx_decoder_pes_closure(struct mpq_demux *mpq_demux,
+ struct mpq_feed *mpq_feed)
+{
+ struct mpq_streambuffer_packet_header packet;
+ struct mpq_streambuffer *stream_buffer;
+ struct mpq_adapter_video_meta_data meta_data;
+ struct mpq_video_feed_info *feed_data;
+ struct dvb_demux_feed *feed = mpq_feed->dvb_demux_feed;
+ struct dmx_data_ready data;
+ int cookie;
+
+ feed_data = &mpq_feed->video_info;
+
+ /*
+ * spin-lock is taken to protect against manipulation of video
+ * output buffer by the API (terminate video feed, re-use of video
+ * buffers).
+ */
+ spin_lock(&feed_data->video_buffer_lock);
+ stream_buffer = feed_data->video_buffer;
+
+ if (stream_buffer == NULL) {
+ MPQ_DVB_DBG_PRINT("%s: video_buffer released\n", __func__);
+ spin_unlock(&feed_data->video_buffer_lock);
+ return;
+ }
+
+ /*
+ * Close previous PES.
+ * Push new packet to the meta-data buffer.
+ */
+ if ((feed->pusi_seen) && (feed_data->pes_header_left_bytes == 0)) {
+ packet.raw_data_len = feed->peslen;
+ mpq_streambuffer_get_buffer_handle(stream_buffer,
+ 0, /* current write buffer handle */
+ &packet.raw_data_handle);
+ packet.raw_data_offset = feed_data->frame_offset;
+ packet.user_data_len =
+ sizeof(struct mpq_adapter_video_meta_data);
+
+ mpq_dmx_write_pts_dts(feed_data,
+ &(meta_data.info.pes.pts_dts_info));
+
+ meta_data.packet_type = DMX_PES_PACKET;
+ meta_data.info.pes.stc = feed_data->prev_stc;
+
+ mpq_dmx_update_decoder_stat(mpq_feed);
+
+ cookie = mpq_streambuffer_pkt_write(stream_buffer, &packet,
+ (u8 *)&meta_data);
+ if (cookie >= 0) {
+ /* Save write offset where new PES will begin */
+ mpq_streambuffer_get_data_rw_offset(stream_buffer, NULL,
+ &feed_data->frame_offset);
+ mpq_dmx_prepare_es_event_data(&packet, &meta_data,
+ feed_data, stream_buffer, &data, cookie);
+ feed->data_ready_cb.ts(&feed->feed.ts, &data);
+ } else {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_streambuffer_pkt_write failed, ret=%d\n",
+ __func__, cookie);
+ }
+ }
+ /* Reset PES info */
+ feed->peslen = 0;
+ feed_data->pes_header_offset = 0;
+ feed_data->pes_header_left_bytes = PES_MANDATORY_FIELDS_LEN;
+
+ spin_unlock(&feed_data->video_buffer_lock);
+}
+
+static int mpq_dmx_process_video_packet_framing(
+ struct dvb_demux_feed *feed,
+ const u8 *buf,
+ u64 curr_stc)
+{
+ int bytes_avail;
+ u32 ts_payload_offset;
+ struct mpq_video_feed_info *feed_data;
+ const struct ts_packet_header *ts_header;
+ struct mpq_streambuffer *stream_buffer;
+ struct pes_packet_header *pes_header;
+ struct mpq_demux *mpq_demux;
+ struct mpq_feed *mpq_feed;
+
+ struct dvb_dmx_video_patterns_results framing_res;
+ struct mpq_streambuffer_packet_header packet;
+ struct mpq_adapter_video_meta_data meta_data;
+ int bytes_written = 0;
+ int bytes_to_write = 0;
+ int found_patterns = 0;
+ int first_pattern = 0;
+ int i;
+ int is_video_frame = 0;
+ int pending_data_len = 0;
+ int ret = 0;
+ int discontinuity_indicator = 0;
+ struct dmx_data_ready data;
+
+ mpq_demux = feed->demux->priv;
+
+ mpq_feed = feed->priv;
+ feed_data = &mpq_feed->video_info;
+
+ /*
+ * spin-lock is taken to protect against manipulation of video
+ * output buffer by the API (terminate video feed, re-use of video
+ * buffers). Mutex on the video-feed cannot be held here
+ * since SW demux holds a spin-lock while calling write_to_decoder
+ */
+ spin_lock(&feed_data->video_buffer_lock);
+ stream_buffer = feed_data->video_buffer;
+
+ if (stream_buffer == NULL) {
+ MPQ_DVB_DBG_PRINT(
+ "%s: video_buffer released\n",
+ __func__);
+ spin_unlock(&feed_data->video_buffer_lock);
+ return 0;
+ }
+
+ ts_header = (const struct ts_packet_header *)buf;
+
+ pes_header = &feed_data->pes_header;
+
+ /* Make sure this TS packet has a payload and not scrambled */
+ if ((ts_header->sync_byte != 0x47) ||
+ (ts_header->adaptation_field_control == 0) ||
+ (ts_header->adaptation_field_control == 2) ||
+ (ts_header->transport_scrambling_control)) {
+ /* continue to next packet */
+ spin_unlock(&feed_data->video_buffer_lock);
+ return 0;
+ }
+
+ if (ts_header->payload_unit_start_indicator) { /* PUSI? */
+ if (feed->pusi_seen) { /* Did we see PUSI before? */
+ /*
+ * Double check that we are not in middle of
+ * previous PES header parsing.
+ */
+ if (feed_data->pes_header_left_bytes != 0)
+ MPQ_DVB_ERR_PRINT(
+ "%s: received PUSI while handling PES header of previous PES\n",
+ __func__);
+
+ feed->peslen = 0;
+ feed_data->pes_header_offset = 0;
+ feed_data->pes_header_left_bytes =
+ PES_MANDATORY_FIELDS_LEN;
+ } else {
+ feed->pusi_seen = 1;
+ }
+ }
+
+ /*
+ * Parse PES data only if PUSI was encountered,
+ * otherwise the data is dropped
+ */
+ if (!feed->pusi_seen) {
+ spin_unlock(&feed_data->video_buffer_lock);
+ return 0; /* drop and wait for next packets */
+ }
+
+ ts_payload_offset = sizeof(struct ts_packet_header);
+
+ /*
+ * Skip adaptation field if exists.
+ * Save discontinuity indicator if exists.
+ */
+ if (ts_header->adaptation_field_control == 3) {
+ const struct ts_adaptation_field *adaptation_field =
+ (const struct ts_adaptation_field *)(buf +
+ ts_payload_offset);
+
+ discontinuity_indicator =
+ adaptation_field->discontinuity_indicator;
+ ts_payload_offset += buf[ts_payload_offset] + 1;
+ }
+
+ bytes_avail = TS_PACKET_SIZE - ts_payload_offset;
+
+ /* Get the mandatory fields of the video PES header */
+ if (mpq_dmx_parse_mandatory_pes_header(feed, feed_data,
+ pes_header, buf,
+ &ts_payload_offset,
+ &bytes_avail)) {
+ spin_unlock(&feed_data->video_buffer_lock);
+ return 0;
+ }
+
+ if (mpq_dmx_parse_remaining_pes_header(feed, feed_data,
+ pes_header, buf,
+ &ts_payload_offset,
+ &bytes_avail)) {
+ spin_unlock(&feed_data->video_buffer_lock);
+ return 0;
+ }
+
+ /*
+ * If we reached here,
+ * then we are now at the PES payload data
+ */
+ if (bytes_avail == 0) {
+ spin_unlock(&feed_data->video_buffer_lock);
+ return 0;
+ }
+
+ /*
+ * the decoder requires demux to do framing,
+ * so search for the patterns now.
+ */
+ found_patterns = dvb_dmx_video_pattern_search(
+ feed_data->patterns,
+ feed_data->patterns_num,
+ (buf + ts_payload_offset),
+ bytes_avail,
+ &feed_data->prefix_size,
+ &framing_res);
+
+ if (!feed_data->found_sequence_header_pattern) {
+ for (i = 0; i < found_patterns; i++) {
+ if ((framing_res.info[i].type ==
+ DMX_IDX_MPEG_SEQ_HEADER) ||
+ (framing_res.info[i].type ==
+ DMX_IDX_H264_SPS) ||
+ (framing_res.info[i].type ==
+ DMX_IDX_VC1_SEQ_HEADER)) {
+
+ MPQ_DVB_DBG_PRINT(
+ "%s: Found Sequence Pattern, buf %p, i = %d, offset = %d, type = %lld\n",
+ __func__, buf, i,
+ framing_res.info[i].offset,
+ framing_res.info[i].type);
+
+ first_pattern = i;
+ feed_data->found_sequence_header_pattern = 1;
+ ts_payload_offset +=
+ framing_res.info[i].offset;
+ bytes_avail -= framing_res.info[i].offset;
+
+ if (framing_res.info[i].used_prefix_size) {
+ feed_data->first_prefix_size =
+ framing_res.info[i].
+ used_prefix_size;
+ }
+ break;
+ }
+ }
+ }
+
+ /*
+ * If decoder requires demux to do framing,
+ * pass data to decoder only after sequence header
+ * or equivalent is found. Otherwise the data is dropped.
+ */
+ if (!feed_data->found_sequence_header_pattern) {
+ feed_data->prev_stc = curr_stc;
+ spin_unlock(&feed_data->video_buffer_lock);
+ return 0;
+ }
+
+ /* Update error counters based on TS header */
+ feed_data->ts_packets_num++;
+ feed_data->tei_errs += ts_header->transport_error_indicator;
+ mpq_demux->decoder_stat[feed_data->stream_interface].ts_errors +=
+ ts_header->transport_error_indicator;
+ mpq_dmx_check_continuity(feed_data,
+ ts_header->continuity_counter,
+ discontinuity_indicator);
+ mpq_demux->decoder_stat[feed_data->stream_interface].cc_errors +=
+ feed_data->continuity_errs;
+
+ /* Need to back-up the PTS information of the very first frame */
+ if (feed_data->first_pts_dts_copy) {
+ for (i = first_pattern; i < found_patterns; i++) {
+ is_video_frame = mpq_dmx_is_video_frame(
+ feed->video_codec,
+ framing_res.info[i].type);
+
+ if (is_video_frame == 1) {
+ mpq_dmx_save_pts_dts(feed_data);
+ feed_data->first_pts_dts_copy = 0;
+ break;
+ }
+ }
+ }
+
+ /*
+ * write prefix used to find first Sequence pattern, if needed.
+ * feed_data->patterns[0]->pattern always contains the sequence
+ * header pattern.
+ */
+ if (feed_data->first_prefix_size) {
+ ret = mpq_streambuffer_data_write(stream_buffer,
+ feed_data->patterns[0]->pattern,
+ feed_data->first_prefix_size);
+ if (ret < 0) {
+ mpq_demux->decoder_stat
+ [feed_data->stream_interface].drop_count +=
+ feed_data->first_prefix_size;
+ feed_data->ts_dropped_bytes +=
+ feed_data->first_prefix_size;
+ MPQ_DVB_DBG_PRINT("%s: could not write prefix\n",
+ __func__);
+ if (ret == -ENOSPC)
+ mpq_dmx_notify_overflow(feed);
+ } else {
+ MPQ_DVB_DBG_PRINT(
+ "%s: Writing pattern prefix of size %d\n",
+ __func__, feed_data->first_prefix_size);
+ /*
+ * update the length of the data we report
+ * to include the size of the prefix that was used.
+ */
+ feed_data->pending_pattern_len +=
+ feed_data->first_prefix_size;
+ }
+ }
+
+ feed->peslen += bytes_avail;
+ pending_data_len += bytes_avail;
+
+ meta_data.packet_type = DMX_FRAMING_INFO_PACKET;
+ packet.user_data_len = sizeof(struct mpq_adapter_video_meta_data);
+
+ /*
+ * Go over all the patterns that were found in this packet.
+ * For each pattern found, write the relevant data to the data
+ * buffer, then write the respective meta-data.
+ * Each pattern can only be reported when the next pattern is found
+ * (in order to know the data length).
+ * There are three possible cases for each pattern:
+ * 1. This is the very first pattern we found in any TS packet in this
+ * feed.
+ * 2. This is the first pattern found in this TS packet, but we've
+ * already found patterns in previous packets.
+ * 3. This is not the first pattern in this packet, i.e., we've
+ * already found patterns in this TS packet.
+ */
+ for (i = first_pattern; i < found_patterns; i++) {
+ if (i == first_pattern) {
+ /*
+ * The way to identify the very first pattern:
+ * 1. It's the first pattern found in this packet.
+ * 2. The pending_pattern_len, which indicates the
+ * data length of the previous pattern that has
+ * not yet been reported, is usually 0. However,
+ * it may be larger than 0 if a prefix was used
+ * to find this pattern (i.e., the pattern was
+ * split over two TS packets). In that case,
+ * pending_pattern_len equals first_prefix_size.
+ * first_prefix_size is set to 0 later in this
+ * function.
+ */
+ if (feed_data->first_prefix_size ==
+ feed_data->pending_pattern_len) {
+ /*
+ * This is the very first pattern, so no
+ * previous pending frame data exists.
+ * Update frame info and skip to the
+ * next frame.
+ */
+ feed_data->last_framing_match_type =
+ framing_res.info[i].type;
+ feed_data->last_pattern_offset =
+ framing_res.info[i].offset;
+ if (framing_res.info[i].used_prefix_size)
+ feed_data->last_framing_match_stc =
+ feed_data->prev_stc;
+ else
+ feed_data->last_framing_match_stc =
+ curr_stc;
+ continue;
+ }
+ /*
+ * This is the first pattern in this
+ * packet and previous frame from
+ * previous packet is pending for report
+ */
+ bytes_to_write = framing_res.info[i].offset;
+ } else {
+ /* Previous pending frame is in the same packet */
+ bytes_to_write =
+ framing_res.info[i].offset -
+ feed_data->last_pattern_offset;
+ }
+
+ ret = mpq_streambuffer_data_write(
+ stream_buffer,
+ (buf + ts_payload_offset + bytes_written),
+ bytes_to_write);
+ if (ret < 0) {
+ mpq_demux->decoder_stat
+ [feed_data->stream_interface].drop_count +=
+ bytes_to_write;
+ feed_data->ts_dropped_bytes += bytes_to_write;
+ MPQ_DVB_DBG_PRINT(
+ "%s: Couldn't write %d bytes to data buffer, ret=%d\n",
+ __func__, bytes_to_write, ret);
+ if (ret == -ENOSPC)
+ mpq_dmx_notify_overflow(feed);
+ } else {
+ bytes_written += bytes_to_write;
+ pending_data_len -= bytes_to_write;
+ feed_data->pending_pattern_len += bytes_to_write;
+ }
+
+ is_video_frame = mpq_dmx_is_video_frame(
+ feed->video_codec,
+ feed_data->last_framing_match_type);
+ if (is_video_frame == 1) {
+ mpq_dmx_write_pts_dts(feed_data,
+ &(meta_data.info.framing.pts_dts_info));
+ mpq_dmx_save_pts_dts(feed_data);
+
+ packet.raw_data_len = feed_data->pending_pattern_len -
+ framing_res.info[i].used_prefix_size;
+ packet.raw_data_offset = feed_data->frame_offset;
+ meta_data.info.framing.pattern_type =
+ feed_data->last_framing_match_type;
+ meta_data.info.framing.stc =
+ feed_data->last_framing_match_stc;
+ meta_data.info.framing.continuity_error_counter =
+ feed_data->continuity_errs;
+ meta_data.info.framing.
+ transport_error_indicator_counter =
+ feed_data->tei_errs;
+ meta_data.info.framing.ts_dropped_bytes =
+ feed_data->ts_dropped_bytes;
+ meta_data.info.framing.ts_packets_num =
+ feed_data->ts_packets_num;
+
+ mpq_streambuffer_get_buffer_handle(
+ stream_buffer,
+ 0, /* current write buffer handle */
+ &packet.raw_data_handle);
+
+ mpq_dmx_update_decoder_stat(mpq_feed);
+
+ /*
+ * Write meta-data that includes the framing information
+ */
+ ret = mpq_streambuffer_pkt_write(stream_buffer, &packet,
+ (u8 *)&meta_data);
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_streambuffer_pkt_write failed, ret=%d\n",
+ __func__, ret);
+ if (ret == -ENOSPC)
+ mpq_dmx_notify_overflow(feed);
+ } else {
+ mpq_dmx_prepare_es_event_data(
+ &packet, &meta_data, feed_data,
+ stream_buffer, &data, ret);
+
+ feed->data_ready_cb.ts(&feed->feed.ts, &data);
+
+ if (feed_data->video_buffer->mode ==
+ MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR)
+ feed_data->frame_offset = 0;
+ else
+ mpq_streambuffer_get_data_rw_offset(
+ feed_data->video_buffer,
+ NULL,
+ &feed_data->frame_offset);
+ }
+
+ /*
+ * In linear buffers, after writing the packet
+ * we switched over to a new linear buffer for the new
+ * frame. In that case, we should re-write the prefix
+ * of the existing frame if any exists.
+ */
+ if ((MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR ==
+ feed_data->video_buffer->mode) &&
+ framing_res.info[i].used_prefix_size) {
+ ret = mpq_streambuffer_data_write(stream_buffer,
+ feed_data->prev_pattern +
+ DVB_DMX_MAX_PATTERN_LEN -
+ framing_res.info[i].used_prefix_size,
+ framing_res.info[i].used_prefix_size);
+
+ if (ret < 0) {
+ feed_data->pending_pattern_len = 0;
+ mpq_demux->decoder_stat
+ [feed_data->stream_interface].
+ drop_count += bytes_avail;
+ feed_data->ts_dropped_bytes +=
+ framing_res.info[i].used_prefix_size;
+ if (ret == -ENOSPC)
+ mpq_dmx_notify_overflow(feed);
+ } else {
+ feed_data->pending_pattern_len =
+ framing_res.info[i].used_prefix_size;
+ }
+ } else {
+ s32 offset = (s32)feed_data->frame_offset;
+ u32 buff_size =
+ feed_data->video_buffer->buffers[0].size;
+
+ offset -= framing_res.info[i].used_prefix_size;
+ offset += (offset < 0) ? buff_size : 0;
+ feed_data->pending_pattern_len =
+ framing_res.info[i].used_prefix_size;
+
+ if (MPQ_STREAMBUFFER_BUFFER_MODE_RING ==
+ feed_data->video_buffer->mode) {
+ feed_data->frame_offset = (u32)offset;
+ }
+ }
+ }
+
+ /* save the last match for next time */
+ feed_data->last_framing_match_type =
+ framing_res.info[i].type;
+ feed_data->last_pattern_offset =
+ framing_res.info[i].offset;
+ if (framing_res.info[i].used_prefix_size)
+ feed_data->last_framing_match_stc = feed_data->prev_stc;
+ else
+ feed_data->last_framing_match_stc = curr_stc;
+ }
+
+ feed_data->prev_stc = curr_stc;
+ feed_data->first_prefix_size = 0;
+
+ /*
+ * Save the trailing of the TS packet as we might have a pattern
+ * split that we need to re-use when closing the next
+ * video linear buffer.
+ */
+ if (MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR ==
+ feed_data->video_buffer->mode)
+ memcpy(feed_data->prev_pattern,
+ buf + TS_PACKET_SIZE - DVB_DMX_MAX_PATTERN_LEN,
+ DVB_DMX_MAX_PATTERN_LEN);
+
+ if (pending_data_len) {
+ ret = mpq_streambuffer_data_write(
+ stream_buffer,
+ (buf + ts_payload_offset + bytes_written),
+ pending_data_len);
+
+ if (ret < 0) {
+ mpq_demux->decoder_stat
+ [feed_data->stream_interface].drop_count +=
+ pending_data_len;
+ feed_data->ts_dropped_bytes += pending_data_len;
+ MPQ_DVB_DBG_PRINT(
+ "%s: Couldn't write %d pending bytes to data buffer, ret=%d\n",
+ __func__, pending_data_len, ret);
+ if (ret == -ENOSPC)
+ mpq_dmx_notify_overflow(feed);
+ } else {
+ feed_data->pending_pattern_len += pending_data_len;
+ }
+ }
+
+ spin_unlock(&feed_data->video_buffer_lock);
+ return 0;
+}
+
+static int mpq_dmx_process_video_packet_no_framing(
+ struct dvb_demux_feed *feed,
+ const u8 *buf,
+ u64 curr_stc)
+{
+ int bytes_avail;
+ u32 ts_payload_offset;
+ struct mpq_video_feed_info *feed_data;
+ const struct ts_packet_header *ts_header;
+ struct mpq_streambuffer *stream_buffer;
+ struct pes_packet_header *pes_header;
+ struct mpq_demux *mpq_demux;
+ struct mpq_feed *mpq_feed;
+ int discontinuity_indicator = 0;
+ struct dmx_data_ready data;
+ int cookie;
+ int ret;
+
+ mpq_demux = feed->demux->priv;
+ mpq_feed = feed->priv;
+ feed_data = &mpq_feed->video_info;
+
+ /*
+ * spin-lock is taken to protect against manipulation of video
+ * output buffer by the API (terminate video feed, re-use of video
+ * buffers). Mutex on the video-feed cannot be held here
+ * since SW demux holds a spin-lock while calling write_to_decoder
+ */
+ spin_lock(&feed_data->video_buffer_lock);
+ stream_buffer = feed_data->video_buffer;
+ if (stream_buffer == NULL) {
+ MPQ_DVB_DBG_PRINT(
+ "%s: video_buffer released\n",
+ __func__);
+ spin_unlock(&feed_data->video_buffer_lock);
+ return 0;
+ }
+
+ ts_header = (const struct ts_packet_header *)buf;
+
+ pes_header = &feed_data->pes_header;
+
+ /* Make sure this TS packet has a payload and not scrambled */
+ if ((ts_header->sync_byte != 0x47) ||
+ (ts_header->adaptation_field_control == 0) ||
+ (ts_header->adaptation_field_control == 2) ||
+ (ts_header->transport_scrambling_control)) {
+ /* continue to next packet */
+ spin_unlock(&feed_data->video_buffer_lock);
+ return 0;
+ }
+
+ if (ts_header->payload_unit_start_indicator) { /* PUSI? */
+ if (feed->pusi_seen) { /* Did we see PUSI before? */
+ struct mpq_streambuffer_packet_header packet;
+ struct mpq_adapter_video_meta_data meta_data;
+
+ /*
+ * Close previous PES.
+ * Push new packet to the meta-data buffer.
+ * Double check that we are not in middle of
+ * previous PES header parsing.
+ */
+
+ if (feed_data->pes_header_left_bytes == 0) {
+ packet.raw_data_len = feed->peslen;
+ mpq_streambuffer_get_buffer_handle(
+ stream_buffer,
+ 0, /* current write buffer handle */
+ &packet.raw_data_handle);
+ packet.raw_data_offset =
+ feed_data->frame_offset;
+ packet.user_data_len =
+ sizeof(struct
+ mpq_adapter_video_meta_data);
+
+ mpq_dmx_write_pts_dts(feed_data,
+ &(meta_data.info.pes.pts_dts_info));
+
+ /* Mark that we detected start of new PES */
+ feed_data->first_pts_dts_copy = 1;
+
+ meta_data.packet_type = DMX_PES_PACKET;
+ meta_data.info.pes.stc = feed_data->prev_stc;
+
+ mpq_dmx_update_decoder_stat(mpq_feed);
+
+ cookie = mpq_streambuffer_pkt_write(
+ stream_buffer, &packet,
+ (u8 *)&meta_data);
+ if (cookie < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_streambuffer_pkt_write failed, ret=%d\n",
+ __func__, cookie);
+ } else {
+ /*
+ * Save write offset where new PES
+ * will begin
+ */
+ mpq_streambuffer_get_data_rw_offset(
+ stream_buffer,
+ NULL,
+ &feed_data->frame_offset);
+
+ mpq_dmx_prepare_es_event_data(
+ &packet, &meta_data,
+ feed_data,
+ stream_buffer, &data, cookie);
+
+ feed->data_ready_cb.ts(&feed->feed.ts,
+ &data);
+ }
+ } else {
+ MPQ_DVB_ERR_PRINT(
+ "%s: received PUSI while handling PES header of previous PES\n",
+ __func__);
+ }
+
+ /* Reset PES info */
+ feed->peslen = 0;
+ feed_data->pes_header_offset = 0;
+ feed_data->pes_header_left_bytes =
+ PES_MANDATORY_FIELDS_LEN;
+ } else {
+ feed->pusi_seen = 1;
+ }
+
+ feed_data->prev_stc = curr_stc;
+ }
+
+ /*
+ * Parse PES data only if PUSI was encountered,
+ * otherwise the data is dropped
+ */
+ if (!feed->pusi_seen) {
+ spin_unlock(&feed_data->video_buffer_lock);
+ return 0; /* drop and wait for next packets */
+ }
+
+ ts_payload_offset = sizeof(struct ts_packet_header);
+
+ /*
+ * Skip adaptation field if exists.
+ * Save discontinuity indicator if exists.
+ */
+ if (ts_header->adaptation_field_control == 3) {
+ const struct ts_adaptation_field *adaptation_field =
+ (const struct ts_adaptation_field *)(buf +
+ ts_payload_offset);
+
+ discontinuity_indicator =
+ adaptation_field->discontinuity_indicator;
+ ts_payload_offset += buf[ts_payload_offset] + 1;
+ }
+
+ bytes_avail = TS_PACKET_SIZE - ts_payload_offset;
+
+ /* Get the mandatory fields of the video PES header */
+ if (mpq_dmx_parse_mandatory_pes_header(feed, feed_data,
+ pes_header, buf,
+ &ts_payload_offset,
+ &bytes_avail)) {
+ spin_unlock(&feed_data->video_buffer_lock);
+ return 0;
+ }
+
+ if (mpq_dmx_parse_remaining_pes_header(feed, feed_data,
+ pes_header, buf,
+ &ts_payload_offset,
+ &bytes_avail)) {
+ spin_unlock(&feed_data->video_buffer_lock);
+ return 0;
+ }
+
+ /*
+ * If we reached here,
+ * then we are now at the PES payload data
+ */
+ if (bytes_avail == 0) {
+ spin_unlock(&feed_data->video_buffer_lock);
+ return 0;
+ }
+
+ /*
+ * Need to back-up the PTS information
+ * of the start of new PES
+ */
+ if (feed_data->first_pts_dts_copy) {
+ mpq_dmx_save_pts_dts(feed_data);
+ feed_data->first_pts_dts_copy = 0;
+ }
+
+ /* Update error counters based on TS header */
+ feed_data->ts_packets_num++;
+ feed_data->tei_errs += ts_header->transport_error_indicator;
+ mpq_demux->decoder_stat[feed_data->stream_interface].ts_errors +=
+ ts_header->transport_error_indicator;
+ mpq_dmx_check_continuity(feed_data,
+ ts_header->continuity_counter,
+ discontinuity_indicator);
+ mpq_demux->decoder_stat[feed_data->stream_interface].cc_errors +=
+ feed_data->continuity_errs;
+
+ ret = mpq_streambuffer_data_write(stream_buffer, buf+ts_payload_offset,
+ bytes_avail);
+ if (ret < 0) {
+ mpq_demux->decoder_stat
+ [feed_data->stream_interface].drop_count += bytes_avail;
+ feed_data->ts_dropped_bytes += bytes_avail;
+ if (ret == -ENOSPC)
+ mpq_dmx_notify_overflow(feed);
+ } else {
+ feed->peslen += bytes_avail;
+ }
+
+ spin_unlock(&feed_data->video_buffer_lock);
+
+ return 0;
+}
+
+int mpq_dmx_decoder_buffer_status(struct dvb_demux_feed *feed,
+ struct dmx_buffer_status *dmx_buffer_status)
+{
+ struct mpq_demux *mpq_demux = feed->demux->priv;
+ struct mpq_video_feed_info *feed_data;
+ struct mpq_streambuffer *video_buff;
+ struct mpq_feed *mpq_feed;
+
+ if (!dvb_dmx_is_video_feed(feed)) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: Invalid feed type %d\n",
+ __func__,
+ feed->pes_type);
+ return -EINVAL;
+ }
+
+ mutex_lock(&mpq_demux->mutex);
+
+ mpq_feed = feed->priv;
+ feed_data = &mpq_feed->video_info;
+ video_buff = feed_data->video_buffer;
+ if (!video_buff) {
+ mutex_unlock(&mpq_demux->mutex);
+ return -EINVAL;
+ }
+
+ dmx_buffer_status->error = video_buff->raw_data.error;
+
+ if (video_buff->mode == MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR) {
+ dmx_buffer_status->fullness =
+ video_buff->buffers[0].size *
+ video_buff->pending_buffers_count;
+ dmx_buffer_status->free_bytes =
+ video_buff->buffers[0].size *
+ (video_buff->buffers_num -
+ video_buff->pending_buffers_count);
+ dmx_buffer_status->size =
+ video_buff->buffers[0].size *
+ video_buff->buffers_num;
+ } else {
+ dmx_buffer_status->fullness =
+ mpq_streambuffer_data_avail(video_buff);
+ dmx_buffer_status->free_bytes =
+ mpq_streambuffer_data_free(video_buff);
+ dmx_buffer_status->size = video_buff->buffers[0].size;
+ }
+
+ mpq_streambuffer_get_data_rw_offset(
+ video_buff,
+ &dmx_buffer_status->read_offset,
+ &dmx_buffer_status->write_offset);
+
+ mutex_unlock(&mpq_demux->mutex);
+
+ return 0;
+}
+
+int mpq_dmx_process_video_packet(
+ struct dvb_demux_feed *feed,
+ const u8 *buf)
+{
+ u64 curr_stc;
+ struct mpq_demux *mpq_demux = feed->demux->priv;
+
+ if ((mpq_demux->source >= DMX_SOURCE_DVR0) &&
+ (mpq_demux->demux.tsp_format != DMX_TSP_FORMAT_192_TAIL)) {
+ curr_stc = 0;
+ } else {
+ curr_stc = buf[STC_LOCATION_IDX + 2] << 16;
+ curr_stc += buf[STC_LOCATION_IDX + 1] << 8;
+ curr_stc += buf[STC_LOCATION_IDX];
+ curr_stc *= 256; /* convert from 105.47 KHZ to 27MHz */
+ }
+
+ if (!video_framing)
+ return mpq_dmx_process_video_packet_no_framing(feed, buf,
+ curr_stc);
+ else
+ return mpq_dmx_process_video_packet_framing(feed, buf,
+ curr_stc);
+}
+
+int mpq_dmx_extract_pcr_and_dci(const u8 *buf, u64 *pcr, int *dci)
+{
+ const struct ts_packet_header *ts_header;
+ const struct ts_adaptation_field *adaptation_field;
+
+ if (buf == NULL || pcr == NULL || dci == NULL)
+ return 0;
+
+ ts_header = (const struct ts_packet_header *)buf;
+
+ /* Make sure this TS packet has a adaptation field */
+ if ((ts_header->sync_byte != 0x47) ||
+ (ts_header->adaptation_field_control == 0) ||
+ (ts_header->adaptation_field_control == 1) ||
+ ts_header->transport_error_indicator)
+ return 0;
+
+ adaptation_field = (const struct ts_adaptation_field *)
+ (buf + sizeof(struct ts_packet_header));
+
+ if ((!adaptation_field->adaptation_field_length) ||
+ (!adaptation_field->PCR_flag))
+ return 0; /* 0 adaptation field or no PCR */
+
+ *pcr = ((u64)adaptation_field->program_clock_reference_base_1) << 25;
+ *pcr += ((u64)adaptation_field->program_clock_reference_base_2) << 17;
+ *pcr += ((u64)adaptation_field->program_clock_reference_base_3) << 9;
+ *pcr += ((u64)adaptation_field->program_clock_reference_base_4) << 1;
+ *pcr += adaptation_field->program_clock_reference_base_5;
+ *pcr *= 300;
+ *pcr += (((u64)adaptation_field->program_clock_reference_ext_1) << 8) +
+ adaptation_field->program_clock_reference_ext_2;
+
+ *dci = adaptation_field->discontinuity_indicator;
+
+ return 1;
+}
+
+int mpq_dmx_process_pcr_packet(
+ struct dvb_demux_feed *feed,
+ const u8 *buf)
+{
+ u64 stc;
+ struct dmx_data_ready data;
+ struct mpq_demux *mpq_demux = feed->demux->priv;
+
+ if (mpq_dmx_extract_pcr_and_dci(buf, &data.pcr.pcr,
+ &data.pcr.disc_indicator_set) == 0)
+ return 0;
+
+ /*
+ * When we play from front-end, we configure HW
+ * to output the extra timestamp, if we are playing
+ * from DVR, we don't have a timestamp if the packet
+ * format is not 192-tail.
+ */
+ if ((mpq_demux->source >= DMX_SOURCE_DVR0) &&
+ (mpq_demux->demux.tsp_format != DMX_TSP_FORMAT_192_TAIL)) {
+ stc = 0;
+ } else {
+ stc = buf[STC_LOCATION_IDX + 2] << 16;
+ stc += buf[STC_LOCATION_IDX + 1] << 8;
+ stc += buf[STC_LOCATION_IDX];
+ stc *= 256; /* convert from 105.47 KHZ to 27MHz */
+ }
+
+ data.data_length = 0;
+ data.pcr.stc = stc;
+ data.status = DMX_OK_PCR;
+ feed->data_ready_cb.ts(&feed->feed.ts, &data);
+
+ return 0;
+}
+
+int mpq_dmx_decoder_eos_cmd(struct mpq_feed *mpq_feed)
+{
+ struct mpq_video_feed_info *feed_data = &mpq_feed->video_info;
+ struct mpq_streambuffer *stream_buffer;
+ struct mpq_streambuffer_packet_header oob_packet;
+ struct mpq_adapter_video_meta_data oob_meta_data;
+ int ret;
+
+ spin_lock(&feed_data->video_buffer_lock);
+ stream_buffer = feed_data->video_buffer;
+
+ if (stream_buffer == NULL) {
+ MPQ_DVB_DBG_PRINT("%s: video_buffer released\n", __func__);
+ spin_unlock(&feed_data->video_buffer_lock);
+ return 0;
+ }
+
+ memset(&oob_packet, 0, sizeof(oob_packet));
+ oob_packet.user_data_len = sizeof(oob_meta_data);
+ oob_meta_data.packet_type = DMX_EOS_PACKET;
+
+ ret = mpq_streambuffer_pkt_write(stream_buffer, &oob_packet,
+ (u8 *)&oob_meta_data);
+
+ spin_unlock(&feed_data->video_buffer_lock);
+ return (ret < 0) ? ret : 0;
+}
+
+void mpq_dmx_convert_tts(struct dvb_demux_feed *feed,
+ const u8 timestamp[TIMESTAMP_LEN],
+ u64 *timestampIn27Mhz)
+{
+ if (unlikely(!timestampIn27Mhz))
+ return;
+
+ *timestampIn27Mhz = timestamp[2] << 16;
+ *timestampIn27Mhz += timestamp[1] << 8;
+ *timestampIn27Mhz += timestamp[0];
+ *timestampIn27Mhz *= 256; /* convert from 105.47 KHZ to 27MHz */
+}
+
+int mpq_sdmx_open_session(struct mpq_demux *mpq_demux)
+{
+ enum sdmx_status ret = SDMX_SUCCESS;
+ enum sdmx_proc_mode proc_mode;
+ enum sdmx_pkt_format pkt_format;
+
+ MPQ_DVB_DBG_PRINT("%s: ref_count %d\n",
+ __func__, mpq_demux->sdmx_session_ref_count);
+
+ if (mpq_demux->sdmx_session_ref_count) {
+ /* session is already open */
+ mpq_demux->sdmx_session_ref_count++;
+ return ret;
+ }
+
+ proc_mode = (mpq_demux->demux.playback_mode == DMX_PB_MODE_PUSH) ?
+ SDMX_PUSH_MODE : SDMX_PULL_MODE;
+ MPQ_DVB_DBG_PRINT(
+ "%s: Proc mode = %s\n",
+ __func__, SDMX_PUSH_MODE == proc_mode ? "Push" : "Pull");
+
+ if (mpq_demux->source < DMX_SOURCE_DVR0) {
+ pkt_format = SDMX_192_BYTE_PKT;
+ } else if (mpq_demux->demux.tsp_format == DMX_TSP_FORMAT_188) {
+ pkt_format = SDMX_188_BYTE_PKT;
+ } else if (mpq_demux->demux.tsp_format == DMX_TSP_FORMAT_192_TAIL) {
+ pkt_format = SDMX_192_BYTE_PKT;
+ } else {
+ MPQ_DVB_ERR_PRINT("%s: invalid tsp format\n", __func__);
+ return -EINVAL;
+ }
+
+ MPQ_DVB_DBG_PRINT("%s: (%s) source, packet format: %d\n",
+ __func__,
+ (mpq_demux->source < DMX_SOURCE_DVR0) ?
+ "frontend" : "DVR", pkt_format);
+
+ /* open session and set configuration */
+ ret = sdmx_open_session(&mpq_demux->sdmx_session_handle);
+ if (ret != SDMX_SUCCESS) {
+ MPQ_DVB_ERR_PRINT("%s: Could not open session. ret=%d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ MPQ_DVB_DBG_PRINT("%s: new session_handle = %d\n",
+ __func__, mpq_demux->sdmx_session_handle);
+
+ ret = sdmx_set_session_cfg(mpq_demux->sdmx_session_handle,
+ proc_mode,
+ SDMX_PKT_ENC_MODE,
+ pkt_format,
+ mpq_sdmx_scramble_odd,
+ mpq_sdmx_scramble_even);
+ if (ret != SDMX_SUCCESS) {
+ MPQ_DVB_ERR_PRINT("%s: Could not set session config. ret=%d\n",
+ __func__, ret);
+ sdmx_close_session(mpq_demux->sdmx_session_handle);
+ mpq_demux->sdmx_session_handle = SDMX_INVALID_SESSION_HANDLE;
+ return -EINVAL;
+ }
+
+ ret = sdmx_set_log_level(mpq_demux->sdmx_session_handle,
+ mpq_demux->sdmx_log_level);
+ if (ret != SDMX_SUCCESS) {
+ MPQ_DVB_ERR_PRINT("%s: Could not set log level. ret=%d\n",
+ __func__, ret);
+ /* Don't fail open session if just log level setting failed */
+ ret = 0;
+ }
+
+ mpq_demux->sdmx_process_count = 0;
+ mpq_demux->sdmx_process_time_sum = 0;
+ mpq_demux->sdmx_process_time_average = 0;
+ mpq_demux->sdmx_process_time_max = 0;
+ mpq_demux->sdmx_process_packets_sum = 0;
+ mpq_demux->sdmx_process_packets_average = 0;
+ mpq_demux->sdmx_process_packets_min = 0;
+
+ mpq_demux->sdmx_session_ref_count++;
+ return ret;
+}
+
+int mpq_sdmx_close_session(struct mpq_demux *mpq_demux)
+{
+ int ret = 0;
+ enum sdmx_status status;
+
+ MPQ_DVB_DBG_PRINT("%s: session_handle = %d, ref_count %d\n",
+ __func__,
+ mpq_demux->sdmx_session_handle,
+ mpq_demux->sdmx_session_ref_count);
+
+ if (!mpq_demux->sdmx_session_ref_count)
+ return -EINVAL;
+
+ if (mpq_demux->sdmx_session_ref_count == 1) {
+ status = sdmx_close_session(mpq_demux->sdmx_session_handle);
+ if (status != SDMX_SUCCESS) {
+ MPQ_DVB_ERR_PRINT("%s: sdmx_close_session failed %d\n",
+ __func__, status);
+ }
+ mpq_demux->sdmx_eos = 0;
+ mpq_demux->sdmx_session_handle = SDMX_INVALID_SESSION_HANDLE;
+ }
+
+ mpq_demux->sdmx_session_ref_count--;
+
+ return ret;
+}
+
+static int mpq_sdmx_get_buffer_chunks(struct mpq_demux *mpq_demux,
+ struct ion_handle *buff_handle,
+ u32 actual_buff_size,
+ struct sdmx_buff_descr buff_chunks[SDMX_MAX_PHYSICAL_CHUNKS])
+{
+ int i;
+ struct sg_table *sg_ptr;
+ struct scatterlist *sg;
+ u32 chunk_size;
+ int ret;
+
+ memset(buff_chunks, 0,
+ sizeof(struct sdmx_buff_descr) * SDMX_MAX_PHYSICAL_CHUNKS);
+
+ sg_ptr = ion_sg_table(mpq_demux->ion_client, buff_handle);
+ if (IS_ERR_OR_NULL(sg_ptr)) {
+ ret = PTR_ERR(sg_ptr);
+ MPQ_DVB_ERR_PRINT("%s: ion_sg_table failed, ret=%d\n",
+ __func__, ret);
+ if (!ret)
+ ret = -EINVAL;
+ return ret;
+ }
+
+ if (sg_ptr->nents == 0) {
+ MPQ_DVB_ERR_PRINT("%s: num of scattered entries is 0\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (sg_ptr->nents > SDMX_MAX_PHYSICAL_CHUNKS) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: num of scattered entries %d greater than max supported %d\n",
+ __func__, sg_ptr->nents, SDMX_MAX_PHYSICAL_CHUNKS);
+ return -EINVAL;
+ }
+
+ sg = sg_ptr->sgl;
+ for (i = 0; i < sg_ptr->nents; i++) {
+ buff_chunks[i].base_addr = (u64)sg_dma_address(sg);
+
+ if (sg->length > actual_buff_size)
+ chunk_size = actual_buff_size;
+ else
+ chunk_size = sg->length;
+
+ buff_chunks[i].size = chunk_size;
+ sg = sg_next(sg);
+ actual_buff_size -= chunk_size;
+ }
+
+ return 0;
+}
+
+static int mpq_sdmx_init_data_buffer(struct mpq_demux *mpq_demux,
+ struct mpq_feed *feed, u32 *num_buffers,
+ struct sdmx_data_buff_descr buf_desc[DMX_MAX_DECODER_BUFFER_NUM],
+ enum sdmx_buf_mode *buf_mode)
+{
+ struct dvb_demux_feed *dvbdmx_feed = feed->dvb_demux_feed;
+ struct dvb_ringbuffer *buffer;
+ struct mpq_video_feed_info *feed_data = &feed->video_info;
+ struct ion_handle *sdmx_buff;
+ int ret;
+ int i;
+
+ *buf_mode = SDMX_RING_BUF;
+
+ if (dvb_dmx_is_video_feed(feed->dvb_demux_feed)) {
+ if (feed_data->buffer_desc.decoder_buffers_num > 1)
+ *buf_mode = SDMX_LINEAR_GROUP_BUF;
+ *num_buffers = feed_data->buffer_desc.decoder_buffers_num;
+
+ for (i = 0; i < *num_buffers; i++) {
+ buf_desc[i].length =
+ feed_data->buffer_desc.desc[i].size;
+
+ ret = mpq_sdmx_get_buffer_chunks(mpq_demux,
+ feed_data->buffer_desc.ion_handle[i],
+ buf_desc[i].length,
+ buf_desc[i].buff_chunks);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_sdmx_get_buffer_chunks failed\n",
+ __func__);
+ return ret;
+ }
+ }
+
+ return 0;
+ }
+
+ *num_buffers = 1;
+ if (dvb_dmx_is_sec_feed(dvbdmx_feed) ||
+ dvb_dmx_is_pcr_feed(dvbdmx_feed)) {
+ buffer = &feed->sdmx_buf;
+ sdmx_buff = feed->sdmx_buf_handle;
+ } else {
+ buffer = (struct dvb_ringbuffer *)
+ dvbdmx_feed->feed.ts.buffer.ringbuff;
+ sdmx_buff = dvbdmx_feed->feed.ts.buffer.priv_handle;
+ }
+
+ if (sdmx_buff == NULL) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: Invalid buffer allocation\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ buf_desc[0].length = buffer->size;
+ ret = mpq_sdmx_get_buffer_chunks(mpq_demux, sdmx_buff,
+ buf_desc[0].length,
+ buf_desc[0].buff_chunks);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_sdmx_get_buffer_chunks failed\n",
+ __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mpq_sdmx_filter_setup(struct mpq_demux *mpq_demux,
+ struct dvb_demux_feed *dvbdmx_feed)
+{
+ int ret = 0;
+ struct mpq_feed *feed;
+ struct mpq_feed *main_rec_feed = NULL;
+ struct dvb_demux_feed *tmp;
+ struct sdmx_buff_descr metadata_buff_desc;
+ struct sdmx_data_buff_descr *data_buff_desc = NULL;
+ u32 data_buf_num = DMX_MAX_DECODER_BUFFER_NUM;
+ enum sdmx_buf_mode buf_mode;
+ enum sdmx_raw_out_format ts_out_format = SDMX_188_OUTPUT;
+ u32 filter_flags = 0;
+
+ feed = dvbdmx_feed->priv;
+
+ if (dvb_dmx_is_sec_feed(dvbdmx_feed)) {
+ feed->filter_type = SDMX_SECTION_FILTER;
+ if (dvbdmx_feed->feed.sec.check_crc)
+ filter_flags |= SDMX_FILTER_FLAG_VERIFY_SECTION_CRC;
+ MPQ_DVB_DBG_PRINT("%s: SDMX_SECTION_FILTER\n", __func__);
+ } else if (dvb_dmx_is_pcr_feed(dvbdmx_feed)) {
+ feed->filter_type = SDMX_PCR_FILTER;
+ MPQ_DVB_DBG_PRINT("%s: SDMX_PCR_FILTER\n", __func__);
+ } else if (dvb_dmx_is_video_feed(dvbdmx_feed)) {
+ feed->filter_type = SDMX_SEPARATED_PES_FILTER;
+ MPQ_DVB_DBG_PRINT("%s: SDMX_SEPARATED_PES_FILTER\n", __func__);
+ } else if (dvb_dmx_is_rec_feed(dvbdmx_feed)) {
+ feed->filter_type = SDMX_RAW_FILTER;
+ switch (dvbdmx_feed->tsp_out_format) {
+ case (DMX_TSP_FORMAT_188):
+ ts_out_format = SDMX_188_OUTPUT;
+ break;
+ case (DMX_TSP_FORMAT_192_HEAD):
+ ts_out_format = SDMX_192_HEAD_OUTPUT;
+ break;
+ case (DMX_TSP_FORMAT_192_TAIL):
+ ts_out_format = SDMX_192_TAIL_OUTPUT;
+ break;
+ default:
+ MPQ_DVB_ERR_PRINT(
+ "%s: Unsupported TS output format %d\n",
+ __func__, dvbdmx_feed->tsp_out_format);
+ return -EINVAL;
+ }
+ MPQ_DVB_DBG_PRINT("%s: SDMX_RAW_FILTER\n", __func__);
+ } else {
+ feed->filter_type = SDMX_PES_FILTER;
+ MPQ_DVB_DBG_PRINT("%s: SDMX_PES_FILTER\n", __func__);
+ }
+
+ data_buff_desc = vmalloc(
+ sizeof(*data_buff_desc)*DMX_MAX_DECODER_BUFFER_NUM);
+ if (!data_buff_desc) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: failed to allocate memory for data buffer\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ /*
+ * Recording feed sdmx filter handle lookup:
+ * In case this is a recording filter with multiple feeds,
+ * this feed is either the first feed of a new recording filter,
+ * or it is another feed of an existing filter for which a filter was
+ * already opened with sdmx. In such case, we need to look up in the
+ * feed pool for a allocated feed with same output buffer (meaning they
+ * belong to the same filter) and to use the already allocated sdmx
+ * filter handle.
+ */
+ if (feed->filter_type == SDMX_RAW_FILTER) {
+ tmp = mpq_dmx_peer_rec_feed(dvbdmx_feed);
+ if (tmp)
+ main_rec_feed = tmp->priv;
+ }
+
+ /*
+ * If this PID is not part of existing recording filter,
+ * configure a new filter to SDMX.
+ */
+ if (!main_rec_feed) {
+ feed->secondary_feed = 0;
+
+ MPQ_DVB_DBG_PRINT(
+ "%s: Adding new sdmx filter, pid %d, flags=0x%X, ts_out_format=%d\n",
+ __func__, dvbdmx_feed->pid, filter_flags,
+ ts_out_format);
+
+ /* Meta-data initialization,
+ * Recording filters do no need meta-data buffers.
+ */
+ if (dvb_dmx_is_rec_feed(dvbdmx_feed)) {
+ metadata_buff_desc.base_addr = 0;
+ metadata_buff_desc.size = 0;
+ } else {
+ ret = mpq_sdmx_init_metadata_buffer(mpq_demux, feed,
+ &metadata_buff_desc);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: Failed to initialize metadata buffer. ret=%d\n",
+ __func__, ret);
+ goto sdmx_filter_setup_failed;
+ }
+ }
+
+ ret = mpq_sdmx_init_data_buffer(mpq_demux, feed, &data_buf_num,
+ data_buff_desc, &buf_mode);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: Failed to initialize data buffer. ret=%d\n",
+ __func__, ret);
+ mpq_sdmx_terminate_metadata_buffer(feed);
+ goto sdmx_filter_setup_failed;
+ }
+ ret = sdmx_add_filter(mpq_demux->sdmx_session_handle,
+ dvbdmx_feed->pid,
+ feed->filter_type,
+ &metadata_buff_desc,
+ buf_mode,
+ data_buf_num,
+ data_buff_desc,
+ &feed->sdmx_filter_handle,
+ ts_out_format,
+ filter_flags);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: SDMX_add_filter failed. ret = %d\n",
+ __func__, ret);
+ ret = -ENODEV;
+ mpq_sdmx_terminate_metadata_buffer(feed);
+ goto sdmx_filter_setup_failed;
+ }
+
+ MPQ_DVB_DBG_PRINT(
+ "%s: feed=0x%p, filter pid=%d, handle=%d, data buffer(s)=%d, size=%d\n",
+ __func__, feed, dvbdmx_feed->pid,
+ feed->sdmx_filter_handle,
+ data_buf_num, data_buff_desc[0].length);
+
+ mpq_demux->sdmx_filter_count++;
+ } else {
+ MPQ_DVB_DBG_PRINT(
+ "%s: Adding RAW pid to sdmx, pid %d\n",
+ __func__, dvbdmx_feed->pid);
+
+ feed->secondary_feed = 1;
+ feed->sdmx_filter_handle = main_rec_feed->sdmx_filter_handle;
+ ret = sdmx_add_raw_pid(mpq_demux->sdmx_session_handle,
+ feed->sdmx_filter_handle, dvbdmx_feed->pid);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: FAILED to add raw pid, ret=%d\n",
+ __func__, ret);
+ ret = -ENODEV;
+ goto sdmx_filter_setup_failed;
+ }
+ }
+
+ /*
+ * If pid has a key ladder id associated, we need to
+ * set it to SDMX.
+ */
+ if (dvbdmx_feed->secure_mode.is_secured &&
+ dvbdmx_feed->cipher_ops.operations_count) {
+ MPQ_DVB_DBG_PRINT(
+ "%s: set key-ladder %d to PID %d\n",
+ __func__,
+ dvbdmx_feed->cipher_ops.operations[0].key_ladder_id,
+ dvbdmx_feed->cipher_ops.pid);
+
+ ret = sdmx_set_kl_ind(mpq_demux->sdmx_session_handle,
+ dvbdmx_feed->cipher_ops.pid,
+ dvbdmx_feed->cipher_ops.operations[0].key_ladder_id);
+
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: FAILED to set key ladder, ret=%d\n",
+ __func__, ret);
+ }
+ }
+
+ vfree(data_buff_desc);
+ return 0;
+
+sdmx_filter_setup_failed:
+ vfree(data_buff_desc);
+ return ret;
+}
+
+/**
+ * mpq_sdmx_init_feed - initialize secure demux related elements of mpq feed
+ *
+ * @mpq_demux: mpq_demux object
+ * @mpq_feed: mpq_feed object
+ *
+ * Note: the function assumes mpq_demux->mutex locking is done by caller.
+ */
+static int mpq_sdmx_init_feed(struct mpq_demux *mpq_demux,
+ struct mpq_feed *mpq_feed)
+{
+ int ret;
+
+ ret = mpq_sdmx_open_session(mpq_demux);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_sdmx_open_session failed, ret=%d\n",
+ __func__, ret);
+
+ ret = -ENODEV;
+ goto init_sdmx_feed_failed;
+ }
+
+ /* PCR and sections have internal buffer for SDMX */
+ if (dvb_dmx_is_pcr_feed(mpq_feed->dvb_demux_feed))
+ ret = mpq_sdmx_alloc_data_buf(mpq_feed, SDMX_PCR_BUFFER_SIZE);
+ else if (dvb_dmx_is_sec_feed(mpq_feed->dvb_demux_feed))
+ ret = mpq_sdmx_alloc_data_buf(mpq_feed,
+ SDMX_SECTION_BUFFER_SIZE);
+ else
+ ret = 0;
+
+ if (ret) {
+ MPQ_DVB_ERR_PRINT("%s: init buffer failed, ret=%d\n",
+ __func__, ret);
+ goto init_sdmx_feed_failed_free_sdmx;
+ }
+
+ ret = mpq_sdmx_filter_setup(mpq_demux, mpq_feed->dvb_demux_feed);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_sdmx_filter_setup failed, ret=%d\n",
+ __func__, ret);
+ goto init_sdmx_feed_failed_free_data_buff;
+ }
+
+ mpq_demux->num_secure_feeds++;
+ return 0;
+
+init_sdmx_feed_failed_free_data_buff:
+ mpq_sdmx_free_data_buf(mpq_feed);
+init_sdmx_feed_failed_free_sdmx:
+ mpq_sdmx_close_session(mpq_demux);
+init_sdmx_feed_failed:
+ return ret;
+}
+
+int mpq_dmx_init_mpq_feed(struct dvb_demux_feed *feed)
+{
+ int ret = 0;
+ struct mpq_demux *mpq_demux = feed->demux->priv;
+ struct mpq_feed *mpq_feed = feed->priv;
+
+ if (mutex_lock_interruptible(&mpq_demux->mutex))
+ return -ERESTARTSYS;
+
+ mpq_feed->sdmx_buf_handle = NULL;
+ mpq_feed->metadata_buf_handle = NULL;
+ mpq_feed->sdmx_filter_handle = SDMX_INVALID_FILTER_HANDLE;
+
+ if (feed->type != DMX_TYPE_SEC)
+ feed->feed.ts.flush_buffer = mpq_dmx_flush_buffer;
+
+ if (dvb_dmx_is_video_feed(feed)) {
+ ret = mpq_dmx_init_video_feed(mpq_feed);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_dmx_init_video_feed failed, ret=%d\n",
+ __func__, ret);
+ goto init_mpq_feed_end;
+ }
+ }
+
+ /*
+ * sdmx is not relevant for recording filters, which always use
+ * regular filters (non-sdmx)
+ */
+ if (!mpq_sdmx_is_loaded() || !feed->secure_mode.is_secured ||
+ dvb_dmx_is_rec_feed(feed)) {
+ if (!mpq_sdmx_is_loaded())
+ mpq_demux->sdmx_session_handle =
+ SDMX_INVALID_SESSION_HANDLE;
+ goto init_mpq_feed_end;
+ }
+
+ /* Initialization of secure demux filters (PES/PCR/Video/Section) */
+ ret = mpq_sdmx_init_feed(mpq_demux, mpq_feed);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_sdmx_init_feed failed, ret=%d\n",
+ __func__, ret);
+ if (dvb_dmx_is_video_feed(feed))
+ mpq_dmx_terminate_video_feed(mpq_feed);
+ }
+
+init_mpq_feed_end:
+ if (!ret) {
+ mpq_demux->num_active_feeds++;
+ mpq_feed->session_id++;
+ }
+ mutex_unlock(&mpq_demux->mutex);
+ return ret;
+}
+
+/**
+ * Note: Called only when filter is in "GO" state - after feed has been started.
+ */
+int mpq_dmx_set_cipher_ops(struct dvb_demux_feed *feed,
+ struct dmx_cipher_operations *cipher_ops)
+{
+ struct mpq_feed *mpq_feed;
+ struct mpq_demux *mpq_demux;
+ int ret = 0;
+
+ if (!feed || !feed->priv || !cipher_ops) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: invalid parameters\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ MPQ_DVB_DBG_PRINT("%s(%d, %d, %d)\n",
+ __func__, cipher_ops->pid,
+ cipher_ops->operations_count,
+ cipher_ops->operations[0].key_ladder_id);
+
+ if ((cipher_ops->operations_count > 1) ||
+ (cipher_ops->operations_count &&
+ cipher_ops->operations[0].encrypt)) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: Invalid cipher operations, count=%d, encrypt=%d\n",
+ __func__, cipher_ops->operations_count,
+ cipher_ops->operations[0].encrypt);
+ return -EINVAL;
+ }
+
+ if (!feed->secure_mode.is_secured) {
+ /*
+ * Filter is not configured as secured, setting cipher
+ * operations is not allowed.
+ */
+ MPQ_DVB_ERR_PRINT(
+ "%s: Cannot set cipher operations to non-secure filter\n",
+ __func__);
+ return -EPERM;
+ }
+
+ mpq_feed = feed->priv;
+ mpq_demux = mpq_feed->mpq_demux;
+
+ mutex_lock(&mpq_demux->mutex);
+
+ /*
+ * Feed is running in secure mode, this secure mode request is to
+ * update the key ladder id
+ */
+ if ((mpq_demux->sdmx_session_handle != SDMX_INVALID_SESSION_HANDLE) &&
+ cipher_ops->operations_count) {
+ ret = sdmx_set_kl_ind(mpq_demux->sdmx_session_handle,
+ cipher_ops->pid,
+ cipher_ops->operations[0].key_ladder_id);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: FAILED to set key ladder, ret=%d\n",
+ __func__, ret);
+ ret = -ENODEV;
+ }
+ }
+
+ mutex_unlock(&mpq_demux->mutex);
+
+ return ret;
+}
+
+static int mpq_sdmx_invalidate_buffer(struct mpq_feed *mpq_feed)
+{
+ struct dvb_demux_feed *feed = mpq_feed->dvb_demux_feed;
+ struct mpq_video_feed_info *feed_data;
+ struct dvb_ringbuffer *buffer;
+ struct ion_handle *ion_handle;
+ int ret = 0;
+ int i;
+
+ if (!dvb_dmx_is_video_feed(feed)) {
+ if (dvb_dmx_is_sec_feed(feed) ||
+ dvb_dmx_is_pcr_feed(feed)) {
+ buffer = (struct dvb_ringbuffer *)
+ &mpq_feed->sdmx_buf;
+ ion_handle = mpq_feed->sdmx_buf_handle;
+ } else {
+ buffer = (struct dvb_ringbuffer *)
+ feed->feed.ts.buffer.ringbuff;
+ ion_handle = feed->feed.ts.buffer.priv_handle;
+ }
+
+ ret = msm_ion_do_cache_op(mpq_feed->mpq_demux->ion_client,
+ ion_handle, buffer->data,
+ buffer->size, ION_IOC_INV_CACHES);
+ if (ret)
+ MPQ_DVB_ERR_PRINT(
+ "%s: msm_ion_do_cache_op failed, ret = %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ /* Video buffers */
+ feed_data = &mpq_feed->video_info;
+ for (i = 0; i < feed_data->buffer_desc.decoder_buffers_num; i++) {
+ if (feed_data->buffer_desc.desc[i].base) {
+ /* Non-secured buffer */
+ ret = msm_ion_do_cache_op(
+ mpq_feed->mpq_demux->ion_client,
+ feed_data->buffer_desc.ion_handle[i],
+ feed_data->buffer_desc.desc[i].base,
+ feed_data->buffer_desc.desc[i].size,
+ ION_IOC_INV_CACHES);
+ if (ret)
+ MPQ_DVB_ERR_PRINT(
+ "%s: msm_ion_do_cache_op failed, ret = %d\n",
+ __func__, ret);
+ }
+ }
+
+ return ret;
+}
+
+static void mpq_sdmx_prepare_filter_status(struct mpq_demux *mpq_demux,
+ struct sdmx_filter_status *filter_sts,
+ struct mpq_feed *mpq_feed)
+{
+ struct dvb_demux_feed *feed = mpq_feed->dvb_demux_feed;
+ struct mpq_video_feed_info *feed_data;
+ struct mpq_streambuffer *sbuff;
+
+ filter_sts->filter_handle = mpq_feed->sdmx_filter_handle;
+ filter_sts->metadata_fill_count =
+ dvb_ringbuffer_avail(&mpq_feed->metadata_buf);
+ filter_sts->metadata_write_offset = mpq_feed->metadata_buf.pwrite;
+ filter_sts->error_indicators = 0;
+ filter_sts->status_indicators = 0;
+
+ MPQ_DVB_DBG_PRINT(
+ "%s: Filter meta-data buffer status: fill count = %d, write_offset = %d\n",
+ __func__, filter_sts->metadata_fill_count,
+ filter_sts->metadata_write_offset);
+
+ if (!dvb_dmx_is_video_feed(feed)) {
+ struct dvb_ringbuffer *buffer;
+
+ if (dvb_dmx_is_sec_feed(feed) ||
+ dvb_dmx_is_pcr_feed(feed)) {
+ buffer = (struct dvb_ringbuffer *)
+ &mpq_feed->sdmx_buf;
+ } else {
+ buffer = (struct dvb_ringbuffer *)
+ feed->feed.ts.buffer.ringbuff;
+ }
+
+ filter_sts->data_fill_count = dvb_ringbuffer_avail(buffer);
+ filter_sts->data_write_offset = buffer->pwrite;
+
+ MPQ_DVB_DBG_PRINT(
+ "%s: Filter buffers status: fill count = %d, write_offset = %d\n",
+ __func__, filter_sts->data_fill_count,
+ filter_sts->data_write_offset);
+
+ return;
+ }
+
+ /* Video feed - decoder buffers */
+ feed_data = &mpq_feed->video_info;
+
+ spin_lock(&mpq_feed->video_info.video_buffer_lock);
+ sbuff = feed_data->video_buffer;
+ if (sbuff == NULL) {
+ MPQ_DVB_DBG_PRINT(
+ "%s: video_buffer released\n",
+ __func__);
+ spin_unlock(&feed_data->video_buffer_lock);
+ return;
+ }
+
+ if (feed_data->buffer_desc.decoder_buffers_num > 1) {
+ /* linear mode */
+ filter_sts->data_fill_count = sbuff->pending_buffers_count;
+ filter_sts->data_write_offset =
+ sbuff->raw_data.pwrite /
+ sizeof(struct mpq_streambuffer_buffer_desc);
+ } else {
+ /* ring buffer mode */
+ filter_sts->data_fill_count =
+ mpq_streambuffer_data_avail(sbuff);
+ mpq_streambuffer_get_data_rw_offset(sbuff, NULL,
+ &filter_sts->data_write_offset);
+
+ }
+
+ spin_unlock(&mpq_feed->video_info.video_buffer_lock);
+
+ MPQ_DVB_DBG_PRINT(
+ "%s: Decoder buffers filter status: fill count = %d, write_offset = %d\n",
+ __func__, filter_sts->data_fill_count,
+ filter_sts->data_write_offset);
+}
+
+static int mpq_sdmx_section_filtering(struct mpq_feed *mpq_feed,
+ struct dvb_demux_filter *f,
+ struct sdmx_metadata_header *header)
+{
+ struct dvb_demux_feed *feed = mpq_feed->dvb_demux_feed;
+ int ret;
+ u8 neq = 0;
+ u8 xor;
+ u8 tmp;
+ int i;
+
+ if (!mutex_is_locked(&mpq_feed->mpq_demux->mutex)) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: Mutex should have been locked\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < DVB_DEMUX_MASK_MAX; i++) {
+ tmp = DVB_RINGBUFFER_PEEK(&mpq_feed->sdmx_buf, i);
+ xor = f->filter.filter_value[i] ^ tmp;
+
+ if (f->maskandmode[i] & xor)
+ return 0;
+
+ neq |= f->maskandnotmode[i] & xor;
+ }
+
+ if (f->doneq && !neq)
+ return 0;
+
+ if (feed->demux->playback_mode == DMX_PB_MODE_PULL) {
+ mutex_unlock(&mpq_feed->mpq_demux->mutex);
+
+ ret = feed->demux->buffer_ctrl.sec(&f->filter,
+ header->payload_length, 1);
+
+ mutex_lock(&mpq_feed->mpq_demux->mutex);
+
+ if (ret) {
+ MPQ_DVB_DBG_PRINT(
+ "%s: buffer_ctrl.sec aborted\n",
+ __func__);
+ return ret;
+ }
+
+ if (mpq_feed->sdmx_filter_handle ==
+ SDMX_INVALID_FILTER_HANDLE) {
+ MPQ_DVB_DBG_PRINT("%s: filter was stopped\n",
+ __func__);
+ return -ENODEV;
+ }
+ }
+
+ if (mpq_feed->sdmx_buf.pread + header->payload_length <
+ mpq_feed->sdmx_buf.size) {
+ feed->cb.sec(&mpq_feed->sdmx_buf.data[mpq_feed->sdmx_buf.pread],
+ header->payload_length,
+ NULL, 0, &f->filter, DMX_OK);
+ } else {
+ int split = mpq_feed->sdmx_buf.size - mpq_feed->sdmx_buf.pread;
+
+ feed->cb.sec(&mpq_feed->sdmx_buf.data[mpq_feed->sdmx_buf.pread],
+ split,
+ &mpq_feed->sdmx_buf.data[0],
+ header->payload_length - split,
+ &f->filter, DMX_OK);
+ }
+
+ return 0;
+}
+
+static int mpq_sdmx_check_ts_stall(struct mpq_demux *mpq_demux,
+ struct mpq_feed *mpq_feed,
+ struct sdmx_filter_status *sts,
+ size_t req,
+ int events_only)
+{
+ struct dvb_demux_feed *feed = mpq_feed->dvb_demux_feed;
+ int ret;
+
+ if (!mutex_is_locked(&mpq_feed->mpq_demux->mutex)) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: Mutex should have been locked\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /*
+ * For PULL mode need to verify there is enough space for the dmxdev
+ * event. Also, if data buffer is full we want to stall until some
+ * data is removed from it to prevent calling the sdmx when it cannot
+ * output data to the still full buffer.
+ */
+ if (mpq_demux->demux.playback_mode == DMX_PB_MODE_PULL) {
+ MPQ_DVB_DBG_PRINT("%s: Stalling for events and %zu bytes\n",
+ __func__, req);
+
+ mutex_unlock(&mpq_demux->mutex);
+
+ ret = mpq_demux->demux.buffer_ctrl.ts(&feed->feed.ts, req, 1);
+ MPQ_DVB_DBG_PRINT("%s: stall result = %d\n",
+ __func__, ret);
+
+ mutex_lock(&mpq_demux->mutex);
+
+ if (mpq_feed->sdmx_filter_handle ==
+ SDMX_INVALID_FILTER_HANDLE) {
+ MPQ_DVB_DBG_PRINT("%s: filter was stopped\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ return ret;
+ }
+
+ return 0;
+}
+
+/* Handle filter results for filters with no extra meta-data */
+static void mpq_sdmx_pes_filter_results(struct mpq_demux *mpq_demux,
+ struct mpq_feed *mpq_feed,
+ struct sdmx_filter_status *sts)
+{
+ int ret;
+ struct sdmx_metadata_header header;
+ struct sdmx_pes_counters counters;
+ struct dmx_data_ready data_event;
+ struct dmx_data_ready pes_event;
+ struct dvb_demux_feed *feed = mpq_feed->dvb_demux_feed;
+ struct dvb_ringbuffer *buf = (struct dvb_ringbuffer *)
+ feed->feed.ts.buffer.ringbuff;
+ ssize_t bytes_avail;
+
+ if ((!sts->metadata_fill_count) && (!sts->data_fill_count))
+ goto pes_filter_check_overflow;
+
+ MPQ_DVB_DBG_PRINT(
+ "%s: Meta: fill=%u, write=%u. Data: fill=%u, write=%u\n",
+ __func__, sts->metadata_fill_count, sts->metadata_write_offset,
+ sts->data_fill_count, sts->data_write_offset);
+
+ mpq_feed->metadata_buf.pwrite = sts->metadata_write_offset;
+
+ if ((sts->metadata_fill_count == 0) &&
+ (sts->error_indicators & SDMX_FILTER_ERR_D_BUF_FULL)) {
+ ssize_t free = dvb_ringbuffer_free(buf);
+
+ ret = 0;
+ if ((free + SZ_2K) < MAX_PES_LENGTH)
+ ret = mpq_sdmx_check_ts_stall(mpq_demux, mpq_feed, sts,
+ free + SZ_2K, 0);
+ else
+ MPQ_DVB_ERR_PRINT(
+ "%s: Cannot stall when free space bigger than max PES size\n",
+ __func__);
+ if (ret) {
+ MPQ_DVB_DBG_PRINT(
+ "%s: mpq_sdmx_check_ts_stall aborted\n",
+ __func__);
+ return;
+ }
+ }
+
+ while (sts->metadata_fill_count) {
+ bytes_avail = dvb_ringbuffer_avail(&mpq_feed->metadata_buf);
+ if (bytes_avail < (sizeof(header) + sizeof(counters))) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: metadata_fill_count is %d less than required %zu bytes\n",
+ __func__,
+ sts->metadata_fill_count,
+ sizeof(header) + sizeof(counters));
+
+ /* clean-up remaining bytes to try to recover */
+ DVB_RINGBUFFER_SKIP(&mpq_feed->metadata_buf,
+ bytes_avail);
+ sts->metadata_fill_count = 0;
+ break;
+ }
+
+ dvb_ringbuffer_read(&mpq_feed->metadata_buf, (u8 *)&header,
+ sizeof(header));
+ MPQ_DVB_DBG_PRINT(
+ "%s: metadata header: start=%u, length=%u\n",
+ __func__, header.payload_start, header.payload_length);
+ sts->metadata_fill_count -= sizeof(header);
+
+ dvb_ringbuffer_read(&mpq_feed->metadata_buf, (u8 *)&counters,
+ sizeof(counters));
+ sts->metadata_fill_count -= sizeof(counters);
+
+ /* Notify new data in buffer */
+ data_event.status = DMX_OK;
+ data_event.data_length = header.payload_length;
+ ret = mpq_sdmx_check_ts_stall(mpq_demux, mpq_feed, sts,
+ data_event.data_length, 0);
+ if (ret) {
+ MPQ_DVB_DBG_PRINT(
+ "%s: mpq_sdmx_check_ts_stall aborted\n",
+ __func__);
+ return;
+ }
+
+ feed->data_ready_cb.ts(&feed->feed.ts, &data_event);
+
+ /* Notify new complete PES */
+ pes_event.status = DMX_OK_PES_END;
+ pes_event.pes_end.actual_length = header.payload_length;
+ pes_event.pes_end.start_gap = 0;
+ pes_event.data_length = 0;
+
+ /* Parse error indicators */
+ if (sts->error_indicators & SDMX_FILTER_ERR_INVALID_PES_LEN)
+ pes_event.pes_end.pes_length_mismatch = 1;
+ else
+ pes_event.pes_end.pes_length_mismatch = 0;
+
+ pes_event.pes_end.disc_indicator_set = 0;
+
+ pes_event.pes_end.stc = 0;
+ pes_event.pes_end.tei_counter = counters.transport_err_count;
+ pes_event.pes_end.cont_err_counter =
+ counters.continuity_err_count;
+ pes_event.pes_end.ts_packets_num =
+ counters.pes_ts_count;
+
+ ret = mpq_sdmx_check_ts_stall(mpq_demux, mpq_feed, sts, 0, 1);
+ if (ret) {
+ MPQ_DVB_DBG_PRINT(
+ "%s: mpq_sdmx_check_ts_stall aborted\n",
+ __func__);
+ return;
+ }
+ feed->data_ready_cb.ts(&feed->feed.ts, &pes_event);
+ }
+
+pes_filter_check_overflow:
+ if ((mpq_demux->demux.playback_mode == DMX_PB_MODE_PUSH) &&
+ (sts->error_indicators & SDMX_FILTER_ERR_D_BUF_FULL)) {
+ MPQ_DVB_ERR_PRINT("%s: DMX_OVERRUN_ERROR\n", __func__);
+ mpq_dmx_notify_overflow(feed);
+ }
+
+ if (sts->status_indicators & SDMX_FILTER_STATUS_EOS) {
+ data_event.data_length = 0;
+ data_event.status = DMX_OK_EOS;
+ feed->data_ready_cb.ts(&feed->feed.ts, &data_event);
+ }
+}
+
+static void mpq_sdmx_section_filter_results(struct mpq_demux *mpq_demux,
+ struct mpq_feed *mpq_feed,
+ struct sdmx_filter_status *sts)
+{
+ struct sdmx_metadata_header header;
+ struct dmx_data_ready event;
+ struct dvb_demux_feed *feed = mpq_feed->dvb_demux_feed;
+ struct dvb_demux_filter *f;
+ struct dmx_section_feed *sec = &feed->feed.sec;
+ ssize_t bytes_avail;
+
+ /* Parse error indicators */
+ if (sts->error_indicators & SDMX_FILTER_ERR_SEC_VERIF_CRC32_FAIL) {
+ MPQ_DVB_DBG_PRINT("%s: Notify CRC err event\n", __func__);
+ event.status = DMX_CRC_ERROR;
+ event.data_length = 0;
+ dvb_dmx_notify_section_event(feed, &event, 1);
+ }
+
+ if (sts->error_indicators & SDMX_FILTER_ERR_D_BUF_FULL)
+ MPQ_DVB_ERR_PRINT("%s: internal section buffer overflowed!\n",
+ __func__);
+
+ if ((!sts->metadata_fill_count) && (!sts->data_fill_count))
+ goto section_filter_check_eos;
+
+ mpq_feed->metadata_buf.pwrite = sts->metadata_write_offset;
+ mpq_feed->sdmx_buf.pwrite = sts->data_write_offset;
+
+ while (sts->metadata_fill_count) {
+ bytes_avail = dvb_ringbuffer_avail(&mpq_feed->metadata_buf);
+ if (bytes_avail < sizeof(header)) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: metadata_fill_count is %d less than required %zu bytes\n",
+ __func__,
+ sts->metadata_fill_count,
+ sizeof(header));
+
+ /* clean-up remaining bytes to try to recover */
+ DVB_RINGBUFFER_SKIP(&mpq_feed->metadata_buf,
+ bytes_avail);
+ sts->metadata_fill_count = 0;
+ break;
+ }
+
+ dvb_ringbuffer_read(&mpq_feed->metadata_buf, (u8 *) &header,
+ sizeof(header));
+ sts->metadata_fill_count -= sizeof(header);
+ MPQ_DVB_DBG_PRINT(
+ "%s: metadata header: start=%u, length=%u\n",
+ __func__, header.payload_start, header.payload_length);
+
+ f = feed->filter;
+ do {
+ if (mpq_sdmx_section_filtering(mpq_feed, f, &header))
+ return;
+ } while ((f = f->next) && sec->is_filtering);
+
+ DVB_RINGBUFFER_SKIP(&mpq_feed->sdmx_buf, header.payload_length);
+ }
+
+section_filter_check_eos:
+ if (sts->status_indicators & SDMX_FILTER_STATUS_EOS) {
+ event.data_length = 0;
+ event.status = DMX_OK_EOS;
+ dvb_dmx_notify_section_event(feed, &event, 1);
+ }
+}
+
+static void mpq_sdmx_decoder_filter_results(struct mpq_demux *mpq_demux,
+ struct mpq_feed *mpq_feed,
+ struct sdmx_filter_status *sts)
+{
+ struct sdmx_metadata_header header;
+ struct sdmx_pes_counters counters;
+ int pes_header_offset;
+ struct ts_packet_header *ts_header;
+ struct ts_adaptation_field *ts_adapt;
+ struct pes_packet_header *pes_header;
+ u8 metadata_buf[MAX_SDMX_METADATA_LENGTH];
+ struct mpq_streambuffer *sbuf;
+ int ret;
+ struct dmx_data_ready data_event;
+ struct dmx_data_ready data;
+ struct dvb_demux_feed *feed = mpq_feed->dvb_demux_feed;
+ ssize_t bytes_avail;
+
+ if ((!sts->metadata_fill_count) && (!sts->data_fill_count))
+ goto decoder_filter_check_flags;
+
+ /* Update meta data buffer write pointer */
+ mpq_feed->metadata_buf.pwrite = sts->metadata_write_offset;
+
+ if ((mpq_demux->demux.playback_mode == DMX_PB_MODE_PULL) &&
+ (sts->error_indicators & SDMX_FILTER_ERR_D_LIN_BUFS_FULL)) {
+ MPQ_DVB_DBG_PRINT("%s: Decoder stall...\n", __func__);
+
+ ret = mpq_dmx_decoder_fullness_check(
+ mpq_feed->dvb_demux_feed, 0, 0);
+ if (ret) {
+ /* we reach here if demuxing was aborted */
+ MPQ_DVB_DBG_PRINT(
+ "%s: mpq_dmx_decoder_fullness_check aborted\n",
+ __func__);
+ return;
+ }
+ }
+
+ while (sts->metadata_fill_count) {
+ struct mpq_streambuffer_packet_header packet;
+ struct mpq_adapter_video_meta_data meta_data;
+
+ bytes_avail = dvb_ringbuffer_avail(&mpq_feed->metadata_buf);
+ if (bytes_avail < (sizeof(header) + sizeof(counters))) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: metadata_fill_count is %d less than required %zu bytes\n",
+ __func__,
+ sts->metadata_fill_count,
+ sizeof(header) + sizeof(counters));
+
+ /* clean-up remaining bytes to try to recover */
+ DVB_RINGBUFFER_SKIP(&mpq_feed->metadata_buf,
+ bytes_avail);
+ sts->metadata_fill_count = 0;
+ break;
+ }
+
+ /* Read metadata header */
+ dvb_ringbuffer_read(&mpq_feed->metadata_buf, (u8 *)&header,
+ sizeof(header));
+ sts->metadata_fill_count -= sizeof(header);
+ MPQ_DVB_DBG_PRINT(
+ "%s: metadata header: start=%u, length=%u, metadata=%u\n",
+ __func__, header.payload_start, header.payload_length,
+ header.metadata_length);
+
+ /* Read metadata - PES counters */
+ dvb_ringbuffer_read(&mpq_feed->metadata_buf, (u8 *)&counters,
+ sizeof(counters));
+ sts->metadata_fill_count -= sizeof(counters);
+
+ /* Read metadata - TS & PES headers */
+ bytes_avail = dvb_ringbuffer_avail(&mpq_feed->metadata_buf);
+ if ((header.metadata_length < MAX_SDMX_METADATA_LENGTH) &&
+ (header.metadata_length >= sizeof(counters)) &&
+ (bytes_avail >=
+ (header.metadata_length - sizeof(counters)))) {
+ dvb_ringbuffer_read(&mpq_feed->metadata_buf,
+ metadata_buf,
+ header.metadata_length - sizeof(counters));
+ } else {
+ MPQ_DVB_ERR_PRINT(
+ "%s: meta-data size %d larger than available meta-data %zd or max allowed %d\n",
+ __func__, header.metadata_length,
+ bytes_avail,
+ MAX_SDMX_METADATA_LENGTH);
+
+ /* clean-up remaining bytes to try to recover */
+ DVB_RINGBUFFER_SKIP(&mpq_feed->metadata_buf,
+ bytes_avail);
+ sts->metadata_fill_count = 0;
+ break;
+ }
+
+ sts->metadata_fill_count -=
+ (header.metadata_length - sizeof(counters));
+
+ ts_header = (struct ts_packet_header *)&metadata_buf[0];
+ if (ts_header->adaptation_field_control == 1) {
+ ts_adapt = NULL;
+ pes_header_offset = sizeof(*ts_header);
+ } else {
+ ts_adapt = (struct ts_adaptation_field *)
+ &metadata_buf[sizeof(*ts_header)];
+ pes_header_offset = sizeof(*ts_header) + 1 +
+ ts_adapt->adaptation_field_length;
+ }
+ pes_header = (struct pes_packet_header *)
+ &metadata_buf[pes_header_offset];
+ meta_data.packet_type = DMX_PES_PACKET;
+ /* TODO - set to real STC when SDMX supports it */
+ meta_data.info.pes.stc = 0;
+
+ if (pes_header->pts_dts_flag & 0x2) {
+ meta_data.info.pes.pts_dts_info.pts_exist = 1;
+ meta_data.info.pes.pts_dts_info.pts =
+ ((u64)pes_header->pts_1 << 30) |
+ ((u64)pes_header->pts_2 << 22) |
+ ((u64)pes_header->pts_3 << 15) |
+ ((u64)pes_header->pts_4 << 7) |
+ (u64)pes_header->pts_5;
+ } else {
+ meta_data.info.pes.pts_dts_info.pts_exist = 0;
+ }
+
+ if (pes_header->pts_dts_flag & 0x1) {
+ meta_data.info.pes.pts_dts_info.dts_exist = 1;
+ meta_data.info.pes.pts_dts_info.dts =
+ ((u64)pes_header->dts_1 << 30) |
+ ((u64)pes_header->dts_2 << 22) |
+ ((u64)pes_header->dts_3 << 15) |
+ ((u64)pes_header->dts_4 << 7) |
+ (u64)pes_header->dts_5;
+ } else {
+ meta_data.info.pes.pts_dts_info.dts_exist = 0;
+ }
+
+ spin_lock(&mpq_feed->video_info.video_buffer_lock);
+
+ mpq_feed->video_info.tei_errs =
+ counters.transport_err_count;
+ mpq_feed->video_info.continuity_errs =
+ counters.continuity_err_count;
+ mpq_feed->video_info.ts_packets_num =
+ counters.pes_ts_count;
+ mpq_feed->video_info.ts_dropped_bytes =
+ counters.drop_count *
+ mpq_demux->demux.ts_packet_size;
+
+ sbuf = mpq_feed->video_info.video_buffer;
+ if (sbuf == NULL) {
+ MPQ_DVB_DBG_PRINT(
+ "%s: video_buffer released\n",
+ __func__);
+ spin_unlock(&mpq_feed->video_info.video_buffer_lock);
+ return;
+ }
+
+ if (!header.payload_length) {
+ MPQ_DVB_DBG_PRINT(
+ "%s: warnning - video frame with 0 length, dropping\n",
+ __func__);
+ spin_unlock(&mpq_feed->video_info.video_buffer_lock);
+ continue;
+ }
+
+ packet.raw_data_len = header.payload_length;
+ packet.user_data_len = sizeof(meta_data);
+ mpq_streambuffer_get_buffer_handle(sbuf, 0,
+ &packet.raw_data_handle);
+ mpq_streambuffer_get_data_rw_offset(sbuf,
+ NULL, &packet.raw_data_offset);
+ ret = mpq_streambuffer_data_write_deposit(sbuf,
+ header.payload_length);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_streambuffer_data_write_deposit failed. ret=%d\n",
+ __func__, ret);
+ }
+ mpq_dmx_update_decoder_stat(mpq_feed);
+ ret = mpq_streambuffer_pkt_write(sbuf, &packet,
+ (u8 *)&meta_data);
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_streambuffer_pkt_write failed, ret=%d\n",
+ __func__, ret);
+ } else {
+ mpq_dmx_prepare_es_event_data(
+ &packet, &meta_data, &mpq_feed->video_info,
+ sbuf, &data, ret);
+ MPQ_DVB_DBG_PRINT("%s: Notify ES Event\n", __func__);
+ feed->data_ready_cb.ts(&feed->feed.ts, &data);
+ }
+
+ spin_unlock(&mpq_feed->video_info.video_buffer_lock);
+ }
+
+decoder_filter_check_flags:
+ if ((mpq_demux->demux.playback_mode == DMX_PB_MODE_PUSH) &&
+ (sts->error_indicators & SDMX_FILTER_ERR_D_LIN_BUFS_FULL)) {
+ MPQ_DVB_ERR_PRINT("%s: DMX_OVERRUN_ERROR\n", __func__);
+ mpq_dmx_notify_overflow(mpq_feed->dvb_demux_feed);
+ }
+
+ if (sts->status_indicators & SDMX_FILTER_STATUS_EOS) {
+ /* Notify decoder via the stream buffer */
+ ret = mpq_dmx_decoder_eos_cmd(mpq_feed);
+ if (ret)
+ MPQ_DVB_ERR_PRINT(
+ "%s: Failed to notify decoder on EOS, ret=%d\n",
+ __func__, ret);
+
+ /* Notify user filter */
+ data_event.data_length = 0;
+ data_event.status = DMX_OK_EOS;
+ mpq_feed->dvb_demux_feed->data_ready_cb.ts(
+ &mpq_feed->dvb_demux_feed->feed.ts, &data_event);
+ }
+}
+
+static void mpq_sdmx_pcr_filter_results(struct mpq_demux *mpq_demux,
+ struct mpq_feed *mpq_feed,
+ struct sdmx_filter_status *sts)
+{
+ int ret;
+ struct sdmx_metadata_header header;
+ struct dmx_data_ready data;
+ struct dvb_ringbuffer *rbuff = &mpq_feed->sdmx_buf;
+ struct dvb_demux_feed *feed = mpq_feed->dvb_demux_feed;
+ u8 buf[TS_PACKET_HEADER_LENGTH + MAX_TSP_ADAPTATION_LENGTH +
+ TIMESTAMP_LEN];
+ size_t stc_len = 0;
+ ssize_t bytes_avail;
+
+ if (sts->error_indicators & SDMX_FILTER_ERR_D_BUF_FULL)
+ MPQ_DVB_ERR_PRINT("%s: internal PCR buffer overflowed!\n",
+ __func__);
+
+ if ((!sts->metadata_fill_count) && (!sts->data_fill_count))
+ goto pcr_filter_check_eos;
+
+ if (mpq_demux->demux.tsp_format == DMX_TSP_FORMAT_192_TAIL)
+ stc_len = 4;
+
+ mpq_feed->metadata_buf.pwrite = sts->metadata_write_offset;
+ rbuff->pwrite = sts->data_write_offset;
+
+ while (sts->metadata_fill_count) {
+ bytes_avail = dvb_ringbuffer_avail(&mpq_feed->metadata_buf);
+ if (bytes_avail < sizeof(header)) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: metadata_fill_count is %d less than required %zu bytes\n",
+ __func__,
+ sts->metadata_fill_count,
+ sizeof(header));
+
+ /* clean-up remaining bytes to try to recover */
+ DVB_RINGBUFFER_SKIP(&mpq_feed->metadata_buf,
+ bytes_avail);
+ sts->metadata_fill_count = 0;
+ break;
+ }
+
+ dvb_ringbuffer_read(&mpq_feed->metadata_buf, (u8 *) &header,
+ sizeof(header));
+ MPQ_DVB_DBG_PRINT(
+ "%s: metadata header: start=%u, length=%u\n",
+ __func__, header.payload_start, header.payload_length);
+ sts->metadata_fill_count -= sizeof(header);
+
+ dvb_ringbuffer_read(rbuff, buf, header.payload_length);
+
+ if (mpq_dmx_extract_pcr_and_dci(buf, &data.pcr.pcr,
+ &data.pcr.disc_indicator_set)) {
+
+ if (stc_len) {
+ data.pcr.stc =
+ buf[header.payload_length-2] << 16;
+ data.pcr.stc +=
+ buf[header.payload_length-3] << 8;
+ data.pcr.stc += buf[header.payload_length-4];
+ /* convert from 105.47 KHZ to 27MHz */
+ data.pcr.stc *= 256;
+ } else {
+ data.pcr.stc = 0;
+ }
+
+ data.data_length = 0;
+ data.status = DMX_OK_PCR;
+ ret = mpq_sdmx_check_ts_stall(
+ mpq_demux, mpq_feed, sts, 0, 1);
+ if (ret) {
+ MPQ_DVB_DBG_PRINT(
+ "%s: mpq_sdmx_check_ts_stall aborted\n",
+ __func__);
+ return;
+ }
+ feed->data_ready_cb.ts(&feed->feed.ts, &data);
+ }
+ }
+
+pcr_filter_check_eos:
+ if (sts->status_indicators & SDMX_FILTER_STATUS_EOS) {
+ data.data_length = 0;
+ data.status = DMX_OK_EOS;
+ feed->data_ready_cb.ts(&feed->feed.ts, &data);
+ }
+}
+
+static void mpq_sdmx_raw_filter_results(struct mpq_demux *mpq_demux,
+ struct mpq_feed *mpq_feed,
+ struct sdmx_filter_status *sts)
+{
+ int ret;
+ ssize_t new_data;
+ struct dmx_data_ready data_event;
+ struct dvb_demux_feed *feed = mpq_feed->dvb_demux_feed;
+ struct dvb_ringbuffer *buf = (struct dvb_ringbuffer *)
+ feed->feed.ts.buffer.ringbuff;
+
+ if ((!sts->metadata_fill_count) && (!sts->data_fill_count))
+ goto raw_filter_check_flags;
+
+ new_data = sts->data_write_offset -
+ buf->pwrite;
+ if (new_data < 0)
+ new_data += buf->size;
+
+ ret = mpq_sdmx_check_ts_stall(mpq_demux, mpq_feed, sts,
+ new_data + feed->demux->ts_packet_size, 0);
+ if (ret) {
+ MPQ_DVB_DBG_PRINT(
+ "%s: mpq_sdmx_check_ts_stall aborted\n",
+ __func__);
+ return;
+ }
+
+ data_event.status = DMX_OK;
+ data_event.data_length = new_data;
+ feed->data_ready_cb.ts(&feed->feed.ts, &data_event);
+ MPQ_DVB_DBG_PRINT("%s: Callback DMX_OK, size=%d\n",
+ __func__, data_event.data_length);
+
+raw_filter_check_flags:
+ if ((mpq_demux->demux.playback_mode == DMX_PB_MODE_PUSH) &&
+ (sts->error_indicators & SDMX_FILTER_ERR_D_BUF_FULL)) {
+ MPQ_DVB_DBG_PRINT("%s: DMX_OVERRUN_ERROR\n", __func__);
+ mpq_dmx_notify_overflow(feed);
+ }
+
+ if (sts->status_indicators & SDMX_FILTER_STATUS_EOS) {
+ data_event.data_length = 0;
+ data_event.status = DMX_OK_EOS;
+ feed->data_ready_cb.ts(&feed->feed.ts, &data_event);
+ }
+
+}
+
+static void mpq_sdmx_process_results(struct mpq_demux *mpq_demux)
+{
+ int i;
+ int sdmx_filters;
+ struct sdmx_filter_status *sts;
+ struct mpq_feed *mpq_feed;
+ u8 mpq_feed_idx;
+
+ sdmx_filters = mpq_demux->sdmx_filter_count;
+ for (i = 0; i < sdmx_filters; i++) {
+ sts = &mpq_demux->sdmx_filters_state.status[i];
+ MPQ_DVB_DBG_PRINT(
+ "%s: Filter: handle=%d, status=0x%x, errors=0x%x\n",
+ __func__, sts->filter_handle, sts->status_indicators,
+ sts->error_indicators);
+ MPQ_DVB_DBG_PRINT("%s: Metadata fill count=%d (write=%d)\n",
+ __func__, sts->metadata_fill_count,
+ sts->metadata_write_offset);
+ MPQ_DVB_DBG_PRINT("%s: Data fill count=%d (write=%d)\n",
+ __func__, sts->data_fill_count, sts->data_write_offset);
+
+ mpq_feed_idx = mpq_demux->sdmx_filters_state.mpq_feed_idx[i];
+ mpq_feed = &mpq_demux->feeds[mpq_feed_idx];
+ if ((mpq_feed->dvb_demux_feed->state != DMX_STATE_GO) ||
+ (sts->filter_handle != mpq_feed->sdmx_filter_handle) ||
+ mpq_feed->secondary_feed ||
+ (mpq_demux->sdmx_filters_state.session_id[i] !=
+ mpq_feed->session_id))
+ continue;
+
+ /* Invalidate output buffer before processing the results */
+ mpq_sdmx_invalidate_buffer(mpq_feed);
+
+ if (sts->error_indicators & SDMX_FILTER_ERR_MD_BUF_FULL)
+ MPQ_DVB_ERR_PRINT(
+ "%s: meta-data buff for pid %d overflowed!\n",
+ __func__, mpq_feed->dvb_demux_feed->pid);
+
+ switch (mpq_feed->filter_type) {
+ case SDMX_PCR_FILTER:
+ mpq_sdmx_pcr_filter_results(mpq_demux, mpq_feed, sts);
+ break;
+ case SDMX_PES_FILTER:
+ mpq_sdmx_pes_filter_results(mpq_demux, mpq_feed,
+ sts);
+ break;
+ case SDMX_SEPARATED_PES_FILTER:
+ mpq_sdmx_decoder_filter_results(mpq_demux, mpq_feed,
+ sts);
+ break;
+ case SDMX_SECTION_FILTER:
+ mpq_sdmx_section_filter_results(mpq_demux, mpq_feed,
+ sts);
+ break;
+ case SDMX_RAW_FILTER:
+ mpq_sdmx_raw_filter_results(mpq_demux, mpq_feed, sts);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static int mpq_sdmx_process_buffer(struct mpq_demux *mpq_demux,
+ struct sdmx_buff_descr *input,
+ u32 fill_count,
+ u32 read_offset)
+{
+ struct sdmx_filter_status *sts;
+ struct mpq_feed *mpq_feed;
+ u8 flags = 0;
+ u32 errors;
+ u32 status;
+ u32 prev_read_offset;
+ u32 prev_fill_count;
+ enum sdmx_status sdmx_res;
+ int i;
+ int filter_index = 0;
+ int bytes_read;
+ struct timespec process_start_time;
+ struct timespec process_end_time;
+
+ mutex_lock(&mpq_demux->mutex);
+
+ /*
+ * All active filters may get totally closed and therefore
+ * sdmx session may get terminated, in such case nothing to process
+ */
+ if (mpq_demux->sdmx_session_handle == SDMX_INVALID_SESSION_HANDLE) {
+ MPQ_DVB_DBG_PRINT(
+ "%s: sdmx filters aborted, filter-count %d, session %d\n",
+ __func__, mpq_demux->sdmx_filter_count,
+ mpq_demux->sdmx_session_handle);
+ mutex_unlock(&mpq_demux->mutex);
+ return 0;
+ }
+
+ /* Set input flags */
+ if (mpq_demux->sdmx_eos)
+ flags |= SDMX_INPUT_FLAG_EOS;
+ if (mpq_sdmx_debug)
+ flags |= SDMX_INPUT_FLAG_DBG_ENABLE;
+
+ /* Build up to date filter status array */
+ for (i = 0; i < MPQ_MAX_DMX_FILES; i++) {
+ mpq_feed = &mpq_demux->feeds[i];
+ if ((mpq_feed->sdmx_filter_handle != SDMX_INVALID_FILTER_HANDLE)
+ && (!mpq_feed->secondary_feed)) {
+ sts = mpq_demux->sdmx_filters_state.status +
+ filter_index;
+ mpq_sdmx_prepare_filter_status(mpq_demux, sts,
+ mpq_feed);
+ mpq_demux->sdmx_filters_state.mpq_feed_idx[filter_index]
+ = i;
+ mpq_demux->sdmx_filters_state.session_id[filter_index] =
+ mpq_feed->session_id;
+ filter_index++;
+ }
+ }
+
+ /* Sanity check */
+ if (filter_index != mpq_demux->sdmx_filter_count) {
+ mutex_unlock(&mpq_demux->mutex);
+ MPQ_DVB_ERR_PRINT(
+ "%s: Updated %d SDMX filters status but should be %d\n",
+ __func__, filter_index, mpq_demux->sdmx_filter_count);
+ return -ERESTART;
+ }
+
+ MPQ_DVB_DBG_PRINT(
+ "%s: Before SDMX_process: input read_offset=%u, fill count=%u\n",
+ __func__, read_offset, fill_count);
+
+ process_start_time = current_kernel_time();
+
+ prev_read_offset = read_offset;
+ prev_fill_count = fill_count;
+ sdmx_res = sdmx_process(mpq_demux->sdmx_session_handle, flags, input,
+ &fill_count, &read_offset, &errors, &status,
+ mpq_demux->sdmx_filter_count,
+ mpq_demux->sdmx_filters_state.status);
+
+ process_end_time = current_kernel_time();
+ bytes_read = prev_fill_count - fill_count;
+
+ mpq_dmx_update_sdmx_stat(mpq_demux, bytes_read,
+ &process_start_time, &process_end_time);
+
+ MPQ_DVB_DBG_PRINT(
+ "%s: SDMX result=%d, input_fill_count=%u, read_offset=%u, read %d bytes from input, status=0x%X, errors=0x%X\n",
+ __func__, sdmx_res, fill_count, read_offset, bytes_read,
+ status, errors);
+
+ if ((sdmx_res == SDMX_SUCCESS) ||
+ (sdmx_res == SDMX_STATUS_STALLED_IN_PULL_MODE)) {
+ if (sdmx_res == SDMX_STATUS_STALLED_IN_PULL_MODE)
+ MPQ_DVB_DBG_PRINT("%s: SDMX stalled for PULL mode\n",
+ __func__);
+
+ mpq_sdmx_process_results(mpq_demux);
+ } else {
+ MPQ_DVB_ERR_PRINT(
+ "%s: SDMX Process returned %d\n",
+ __func__, sdmx_res);
+ }
+
+ mutex_unlock(&mpq_demux->mutex);
+
+ return bytes_read;
+}
+
+int mpq_sdmx_process(struct mpq_demux *mpq_demux,
+ struct sdmx_buff_descr *input,
+ u32 fill_count,
+ u32 read_offset,
+ size_t tsp_size)
+{
+ int ret;
+ int todo;
+ int total_bytes_read = 0;
+ int limit = mpq_sdmx_proc_limit * tsp_size;
+
+ MPQ_DVB_DBG_PRINT(
+ "\n\n%s: read_offset=%u, fill_count=%u, tsp_size=%zu\n",
+ __func__, read_offset, fill_count, tsp_size);
+
+ while (fill_count >= tsp_size) {
+ todo = fill_count > limit ? limit : fill_count;
+ ret = mpq_sdmx_process_buffer(mpq_demux, input, todo,
+ read_offset);
+
+ if (mpq_demux->demux.sw_filter_abort) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: Demuxing from DVR was aborted\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ if (ret > 0) {
+ total_bytes_read += ret;
+ fill_count -= ret;
+ read_offset += ret;
+ if (read_offset >= input->size)
+ read_offset -= input->size;
+ } else {
+ /*
+ * ret < 0: some error occurred
+ * ret == 0: not enough data (less than 1 TS packet)
+ */
+ if (ret < 0)
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_sdmx_process_buffer failed, returned %d\n",
+ __func__, ret);
+ break;
+ }
+ }
+
+ return total_bytes_read;
+}
+
+static int mpq_sdmx_write(struct mpq_demux *mpq_demux,
+ struct ion_handle *input_handle,
+ const char *buf,
+ size_t count)
+{
+ struct ion_handle *ion_handle =
+ mpq_demux->demux.dmx.dvr_input.priv_handle;
+ struct dvb_ringbuffer *rbuf = (struct dvb_ringbuffer *)
+ mpq_demux->demux.dmx.dvr_input.ringbuff;
+ struct sdmx_buff_descr buf_desc;
+ u32 read_offset;
+ int ret;
+
+ if (mpq_demux == NULL || input_handle == NULL) {
+ MPQ_DVB_ERR_PRINT("%s: invalid parameters\n", __func__);
+ return -EINVAL;
+ }
+
+ ret = mpq_sdmx_dvr_buffer_desc(mpq_demux, &buf_desc);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: Failed to init input buffer descriptor. ret = %d\n",
+ __func__, ret);
+ return ret;
+ }
+ read_offset = mpq_demux->demux.dmx.dvr_input.ringbuff->pread;
+
+
+ /*
+ * We must flush the buffer before SDMX starts reading from it
+ * so that it gets a valid data in memory.
+ */
+ ret = msm_ion_do_cache_op(mpq_demux->ion_client,
+ ion_handle, rbuf->data,
+ rbuf->size, ION_IOC_CLEAN_CACHES);
+ if (ret)
+ MPQ_DVB_ERR_PRINT(
+ "%s: msm_ion_do_cache_op failed, ret = %d\n",
+ __func__, ret);
+
+ return mpq_sdmx_process(mpq_demux, &buf_desc, count,
+ read_offset, mpq_demux->demux.ts_packet_size);
+}
+
+int mpq_dmx_write(struct dmx_demux *demux, const char *buf, size_t count)
+{
+ struct dvb_demux *dvb_demux;
+ struct mpq_demux *mpq_demux;
+ int ret = count;
+
+ if (demux == NULL)
+ return -EINVAL;
+
+ dvb_demux = demux->priv;
+ mpq_demux = dvb_demux->priv;
+
+ /* Route through secure demux - process secure feeds if any exist */
+ if (mpq_sdmx_is_loaded() && mpq_demux->sdmx_filter_count) {
+ ret = mpq_sdmx_write(mpq_demux,
+ demux->dvr_input.priv_handle,
+ buf,
+ count);
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_sdmx_write failed. ret = %d\n",
+ __func__, ret);
+ ret = count;
+ }
+ }
+
+ /*
+ * Route through sw filter - process non-secure feeds if any exist.
+ * For sw filter, should process the same amount of bytes the sdmx
+ * process managed to consume, unless some sdmx error occurred, for
+ * which should process the whole buffer
+ */
+ if (mpq_demux->num_active_feeds > mpq_demux->num_secure_feeds)
+ dvb_dmx_swfilter_format(dvb_demux, buf, ret,
+ dvb_demux->tsp_format);
+
+ if (signal_pending(current))
+ return -EINTR;
+
+ return ret;
+}
+
+int mpq_sdmx_is_loaded(void)
+{
+ static int sdmx_load_checked;
+
+ if (!sdmx_load_checked) {
+ mpq_sdmx_check_app_loaded();
+ sdmx_load_checked = 1;
+ }
+
+ return mpq_dmx_info.secure_demux_app_loaded;
+}
+
+int mpq_dmx_oob_command(struct dvb_demux_feed *feed,
+ struct dmx_oob_command *cmd)
+{
+ struct mpq_feed *mpq_feed = feed->priv;
+ struct mpq_demux *mpq_demux = mpq_feed->mpq_demux;
+ struct dmx_data_ready event;
+ int ret = 0;
+
+ mutex_lock(&mpq_demux->mutex);
+ mpq_feed = feed->priv;
+
+ if (!dvb_dmx_is_video_feed(feed) && !dvb_dmx_is_pcr_feed(feed) &&
+ !feed->secure_mode.is_secured) {
+ mutex_unlock(&mpq_demux->mutex);
+ return 0;
+ }
+
+ event.data_length = 0;
+
+ switch (cmd->type) {
+ case DMX_OOB_CMD_EOS:
+ event.status = DMX_OK_EOS;
+ if (!feed->secure_mode.is_secured) {
+ if (dvb_dmx_is_video_feed(feed)) {
+ if (!video_framing)
+ mpq_dmx_decoder_pes_closure(mpq_demux,
+ mpq_feed);
+ else
+ mpq_dmx_decoder_frame_closure(mpq_demux,
+ mpq_feed);
+ ret = mpq_dmx_decoder_eos_cmd(mpq_feed);
+ if (ret)
+ MPQ_DVB_ERR_PRINT(
+ "%s: Couldn't write oob eos packet\n",
+ __func__);
+ }
+ ret = feed->data_ready_cb.ts(&feed->feed.ts, &event);
+ } else if (!mpq_demux->sdmx_eos) {
+ struct sdmx_buff_descr buf_desc;
+
+ mpq_demux->sdmx_eos = 1;
+ ret = mpq_sdmx_dvr_buffer_desc(mpq_demux, &buf_desc);
+ if (!ret) {
+ mutex_unlock(&mpq_demux->mutex);
+ mpq_sdmx_process_buffer(mpq_demux, &buf_desc,
+ 0, 0);
+ return 0;
+ }
+ }
+ break;
+ case DMX_OOB_CMD_MARKER:
+ event.status = DMX_OK_MARKER;
+ event.marker.id = cmd->params.marker.id;
+
+ if (feed->type == DMX_TYPE_SEC)
+ ret = dvb_dmx_notify_section_event(feed, &event, 1);
+ else
+ /* MPQ_TODO: Notify decoder via the stream buffer */
+ ret = feed->data_ready_cb.ts(&feed->feed.ts, &event);
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ mutex_unlock(&mpq_demux->mutex);
+ return ret;
+}
diff --git a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.h b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.h
new file mode 100644
index 000000000000..f36e9e7e7a23
--- /dev/null
+++ b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_common.h
@@ -0,0 +1,1027 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MPQ_DMX_PLUGIN_COMMON_H
+#define _MPQ_DMX_PLUGIN_COMMON_H
+
+#include <linux/msm_ion.h>
+
+#include "dvbdev.h"
+#include "dmxdev.h"
+#include "demux.h"
+#include "dvb_demux.h"
+#include "dvb_frontend.h"
+#include "mpq_adapter.h"
+#include "mpq_sdmx.h"
+
+#define TS_PACKET_SYNC_BYTE (0x47)
+#define TS_PACKET_SIZE (188)
+#define TS_PACKET_HEADER_LENGTH (4)
+
+/* Length of mandatory fields that must exist in header of video PES */
+#define PES_MANDATORY_FIELDS_LEN 9
+
+/*
+ * 500 PES header packets in the meta-data buffer,
+ * should be more than enough
+ */
+#define VIDEO_NUM_OF_PES_PACKETS 500
+
+#define VIDEO_META_DATA_PACKET_SIZE \
+ (DVB_RINGBUFFER_PKTHDRSIZE + \
+ sizeof(struct mpq_streambuffer_packet_header) + \
+ sizeof(struct mpq_adapter_video_meta_data))
+
+#define VIDEO_META_DATA_BUFFER_SIZE \
+ (VIDEO_NUM_OF_PES_PACKETS * VIDEO_META_DATA_PACKET_SIZE)
+
+/* Max number open() request can be done on demux device */
+#define MPQ_MAX_DMX_FILES 128
+
+/**
+ * TSIF alias name length
+ */
+#define TSIF_NAME_LENGTH 20
+
+/**
+ * struct ts_packet_header - Transport packet header
+ * as defined in MPEG2 transport stream standard.
+ */
+struct ts_packet_header {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ unsigned sync_byte:8;
+ unsigned transport_error_indicator:1;
+ unsigned payload_unit_start_indicator:1;
+ unsigned transport_priority:1;
+ unsigned pid_msb:5;
+ unsigned pid_lsb:8;
+ unsigned transport_scrambling_control:2;
+ unsigned adaptation_field_control:2;
+ unsigned continuity_counter:4;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ unsigned sync_byte:8;
+ unsigned pid_msb:5;
+ unsigned transport_priority:1;
+ unsigned payload_unit_start_indicator:1;
+ unsigned transport_error_indicator:1;
+ unsigned pid_lsb:8;
+ unsigned continuity_counter:4;
+ unsigned adaptation_field_control:2;
+ unsigned transport_scrambling_control:2;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+} __packed;
+
+/**
+ * struct ts_adaptation_field - Adaptation field prefix
+ * as defined in MPEG2 transport stream standard.
+ */
+struct ts_adaptation_field {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ unsigned adaptation_field_length:8;
+ unsigned discontinuity_indicator:1;
+ unsigned random_access_indicator:1;
+ unsigned elementary_stream_priority_indicator:1;
+ unsigned PCR_flag:1;
+ unsigned OPCR_flag:1;
+ unsigned splicing_point_flag:1;
+ unsigned transport_private_data_flag:1;
+ unsigned adaptation_field_extension_flag:1;
+ unsigned program_clock_reference_base_1:8;
+ unsigned program_clock_reference_base_2:8;
+ unsigned program_clock_reference_base_3:8;
+ unsigned program_clock_reference_base_4:8;
+ unsigned program_clock_reference_base_5:1;
+ unsigned reserved:6;
+ unsigned program_clock_reference_ext_1:1;
+ unsigned program_clock_reference_ext_2:8;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ unsigned adaptation_field_length:8;
+ unsigned adaptation_field_extension_flag:1;
+ unsigned transport_private_data_flag:1;
+ unsigned splicing_point_flag:1;
+ unsigned OPCR_flag:1;
+ unsigned PCR_flag:1;
+ unsigned elementary_stream_priority_indicator:1;
+ unsigned random_access_indicator:1;
+ unsigned discontinuity_indicator:1;
+ unsigned program_clock_reference_base_1:8;
+ unsigned program_clock_reference_base_2:8;
+ unsigned program_clock_reference_base_3:8;
+ unsigned program_clock_reference_base_4:8;
+ unsigned program_clock_reference_ext_1:1;
+ unsigned reserved:6;
+ unsigned program_clock_reference_base_5:1;
+ unsigned program_clock_reference_ext_2:8;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+} __packed;
+
+
+/*
+ * PES packet header containing dts and/or pts values
+ * as defined in MPEG2 transport stream standard.
+ */
+struct pes_packet_header {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ unsigned packet_start_code_prefix_1:8;
+ unsigned packet_start_code_prefix_2:8;
+ unsigned packet_start_code_prefix_3:8;
+ unsigned stream_id:8;
+ unsigned pes_packet_length_msb:8;
+ unsigned pes_packet_length_lsb:8;
+ unsigned reserved_bits0:2;
+ unsigned pes_scrambling_control:2;
+ unsigned pes_priority:1;
+ unsigned data_alignment_indicator:1;
+ unsigned copyright:1;
+ unsigned original_or_copy:1;
+ unsigned pts_dts_flag:2;
+ unsigned escr_flag:1;
+ unsigned es_rate_flag:1;
+ unsigned dsm_trick_mode_flag:1;
+ unsigned additional_copy_info_flag:1;
+ unsigned pes_crc_flag:1;
+ unsigned pes_extension_flag:1;
+ unsigned pes_header_data_length:8;
+ unsigned reserved_bits1:4;
+ unsigned pts_1:3;
+ unsigned marker_bit0:1;
+ unsigned pts_2:8;
+ unsigned pts_3:7;
+ unsigned marker_bit1:1;
+ unsigned pts_4:8;
+ unsigned pts_5:7;
+ unsigned marker_bit2:1;
+ unsigned reserved_bits2:4;
+ unsigned dts_1:3;
+ unsigned marker_bit3:1;
+ unsigned dts_2:8;
+ unsigned dts_3:7;
+ unsigned marker_bit4:1;
+ unsigned dts_4:8;
+ unsigned dts_5:7;
+ unsigned marker_bit5:1;
+ unsigned reserved_bits3:4;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ unsigned packet_start_code_prefix_1:8;
+ unsigned packet_start_code_prefix_2:8;
+ unsigned packet_start_code_prefix_3:8;
+ unsigned stream_id:8;
+ unsigned pes_packet_length_lsb:8;
+ unsigned pes_packet_length_msb:8;
+ unsigned original_or_copy:1;
+ unsigned copyright:1;
+ unsigned data_alignment_indicator:1;
+ unsigned pes_priority:1;
+ unsigned pes_scrambling_control:2;
+ unsigned reserved_bits0:2;
+ unsigned pes_extension_flag:1;
+ unsigned pes_crc_flag:1;
+ unsigned additional_copy_info_flag:1;
+ unsigned dsm_trick_mode_flag:1;
+ unsigned es_rate_flag:1;
+ unsigned escr_flag:1;
+ unsigned pts_dts_flag:2;
+ unsigned pes_header_data_length:8;
+ unsigned marker_bit0:1;
+ unsigned pts_1:3;
+ unsigned reserved_bits1:4;
+ unsigned pts_2:8;
+ unsigned marker_bit1:1;
+ unsigned pts_3:7;
+ unsigned pts_4:8;
+ unsigned marker_bit2:1;
+ unsigned pts_5:7;
+ unsigned marker_bit3:1;
+ unsigned dts_1:3;
+ unsigned reserved_bits2:4;
+ unsigned dts_2:8;
+ unsigned marker_bit4:1;
+ unsigned dts_3:7;
+ unsigned dts_4:8;
+ unsigned marker_bit5:1;
+ unsigned dts_5:7;
+ unsigned reserved_bits3:4;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+} __packed;
+
+/**
+ * mpq_decoder_buffers_desc - decoder buffer(s) management information.
+ *
+ * @desc: Array of buffer descriptors as they are passed to mpq_streambuffer
+ * upon its initialization. These descriptors must remain valid as long as
+ * the mpq_streambuffer object is used.
+ * @ion_handle: Array of ION handles, one for each decoder buffer, used for
+ * kernel memory mapping or allocation. Handles are saved in order to release
+ * resources properly later on.
+ * @decoder_buffers_num: number of buffers that are managed, either externally
+ * or internally by the mpq_streambuffer object
+ * @shared_file: File handle of internally allocated video buffer shared
+ * with video consumer.
+ */
+struct mpq_decoder_buffers_desc {
+ struct mpq_streambuffer_buffer_desc desc[DMX_MAX_DECODER_BUFFER_NUM];
+ struct ion_handle *ion_handle[DMX_MAX_DECODER_BUFFER_NUM];
+ u32 decoder_buffers_num;
+ struct file *shared_file;
+};
+
+/*
+ * mpq_video_feed_info - private data used for video feed.
+ *
+ * @video_buffer: Holds the streamer buffer shared with
+ * the decoder for feeds having the data going to the decoder.
+ * @video_buffer_lock: Lock protecting against video output buffer.
+ * The lock protects against API calls to manipulate the output buffer
+ * (initialize, free, re-use buffers) and dvb-sw demux parsing the video
+ * data through mpq_dmx_process_video_packet().
+ * @buffer_desc: Holds decoder buffer(s) information used for stream buffer.
+ * @pes_header: Used for feeds that output data to decoder,
+ * holds PES header of current processed PES.
+ * @pes_header_left_bytes: Used for feeds that output data to decoder,
+ * holds remaining PES header bytes of current processed PES.
+ * @pes_header_offset: Holds the offset within the current processed
+ * pes header.
+ * @fullness_wait_cancel: Flag used to signal to abort waiting for
+ * decoder's fullness.
+ * @stream_interface: The ID of the video stream interface registered
+ * with this stream buffer.
+ * @patterns: pointer to the framing patterns to look for.
+ * @patterns_num: number of framing patterns.
+ * @prev_pattern: holds the trailing data of the last processed video packet.
+ * @frame_offset: Saves data buffer offset to which a new frame will be written
+ * @last_pattern_offset: Holds the previous pattern offset
+ * @pending_pattern_len: Accumulated number of data bytes that will be
+ * reported for this frame.
+ * @last_framing_match_type: Used for saving the type of
+ * the previous pattern match found in this video feed.
+ * @last_framing_match_stc: Used for saving the STC attached to TS packet
+ * of the previous pattern match found in this video feed.
+ * @found_sequence_header_pattern: Flag used to note that an MPEG-2
+ * Sequence Header, H.264 SPS or VC-1 Sequence Header pattern
+ * (whichever is relevant according to the video standard) had already
+ * been found.
+ * @prefix_size: a bit mask representing the size(s) of possible prefixes
+ * to the pattern, already found in the previous buffer. If bit 0 is set,
+ * a prefix of size 1 was found. If bit 1 is set, a prefix of size 2 was
+ * found, etc. This supports a prefix size of up to 32, which is more
+ * than we need. The search function updates prefix_size as needed
+ * for the next buffer search.
+ * @first_prefix_size: used to save the prefix size used to find the first
+ * pattern written to the stream buffer.
+ * @saved_pts_dts_info: used to save PTS/DTS information until it is written.
+ * @new_pts_dts_info: used to store PTS/DTS information from current PES header.
+ * @saved_info_used: indicates if saved PTS/DTS information was used.
+ * @new_info_exists: indicates if new PTS/DTS information exists in
+ * new_pts_dts_info that should be saved to saved_pts_dts_info.
+ * @first_pts_dts_copy: a flag used to indicate if PTS/DTS information needs
+ * to be copied from the currently parsed PES header to the saved_pts_dts_info.
+ * @tei_errs: Transport stream Transport Error Indicator (TEI) counter.
+ * @last_continuity: last continuity counter value found in TS packet header.
+ * Initialized to -1.
+ * @continuity_errs: Transport stream continuity error counter.
+ * @ts_packets_num: TS packets counter.
+ * @ts_dropped_bytes: counts the number of bytes dropped due to insufficient
+ * buffer space.
+ * @prev_stc: STC attached to the previous video TS packet
+ */
+struct mpq_video_feed_info {
+ struct mpq_streambuffer *video_buffer;
+ spinlock_t video_buffer_lock;
+ struct mpq_decoder_buffers_desc buffer_desc;
+ struct pes_packet_header pes_header;
+ u32 pes_header_left_bytes;
+ u32 pes_header_offset;
+ int fullness_wait_cancel;
+ enum mpq_adapter_stream_if stream_interface;
+const struct dvb_dmx_video_patterns *patterns[DVB_DMX_MAX_SEARCH_PATTERN_NUM];
+ int patterns_num;
+ char prev_pattern[DVB_DMX_MAX_PATTERN_LEN];
+ u32 frame_offset;
+ u32 last_pattern_offset;
+ u32 pending_pattern_len;
+ u64 last_framing_match_type;
+ u64 last_framing_match_stc;
+ int found_sequence_header_pattern;
+ struct dvb_dmx_video_prefix_size_masks prefix_size;
+ u32 first_prefix_size;
+ struct dmx_pts_dts_info saved_pts_dts_info;
+ struct dmx_pts_dts_info new_pts_dts_info;
+ int saved_info_used;
+ int new_info_exists;
+ int first_pts_dts_copy;
+ u32 tei_errs;
+ int last_continuity;
+ u32 continuity_errs;
+ u32 ts_packets_num;
+ u32 ts_dropped_bytes;
+ u64 prev_stc;
+};
+
+/**
+ * mpq feed object - mpq common plugin feed information
+ *
+ * @dvb_demux_feed: Back pointer to dvb demux level feed object
+ * @mpq_demux: Pointer to common mpq demux object
+ * @plugin_priv: Plugin specific private data
+ * @sdmx_filter_handle: Secure demux filter handle. Recording feed may share
+ * same filter handle
+ * @secondary_feed: Specifies if this feed shares filter handle with
+ * other feeds
+ * @metadata_buf: Ring buffer object for managing the metadata buffer
+ * @metadata_buf_handle: Allocation handle for the metadata buffer
+ * @session_id: Counter that is incremented every time feed is initialized
+ * through mpq_dmx_init_mpq_feed
+ * @sdmx_buf: Ring buffer object for intermediate output data from the sdmx
+ * @sdmx_buf_handle: Allocation handle for the sdmx intermediate data buffer
+ * @video_info: Video feed specific information
+ */
+struct mpq_feed {
+ struct dvb_demux_feed *dvb_demux_feed;
+ struct mpq_demux *mpq_demux;
+ void *plugin_priv;
+
+ /* Secure demux related */
+ int sdmx_filter_handle;
+ int secondary_feed;
+ enum sdmx_filter filter_type;
+ struct dvb_ringbuffer metadata_buf;
+ struct ion_handle *metadata_buf_handle;
+
+ u8 session_id;
+ struct dvb_ringbuffer sdmx_buf;
+ struct ion_handle *sdmx_buf_handle;
+
+ struct mpq_video_feed_info video_info;
+};
+
+/**
+ * struct mpq_demux - mpq demux information
+ * @idx: Instance index
+ * @demux: The dvb_demux instance used by mpq_demux
+ * @dmxdev: The dmxdev instance used by mpq_demux
+ * @fe_memory: Handle of front-end memory source to mpq_demux
+ * @source: The current source connected to the demux
+ * @is_initialized: Indicates whether this demux device was
+ * initialized or not.
+ * @ion_client: ION demux client used to allocate memory from ION.
+ * @mutex: Lock used to protect against private feed data
+ * @feeds: mpq common feed object pool
+ * @num_active_feeds: Number of active mpq feeds
+ * @num_secure_feeds: Number of secure feeds (have a sdmx filter associated)
+ * currently allocated.
+ * Used before each call to sdmx_process() to build up to date state.
+ * @sdmx_session_handle: Secure demux open session handle
+ * @sdmx_filter_count: Number of active secure demux filters
+ * @sdmx_eos: End-of-stream indication flag for current sdmx session
+ * @sdmx_filters_state: Array holding buffers status for each secure
+ * demux filter.
+ * @decoder_alloc_flags: ION flags to be used when allocating internally
+ * @plugin_priv: Underlying plugin's own private data
+ * @mpq_dmx_plugin_release: Underlying plugin's release function
+ * @hw_notification_interval: Notification interval in msec,
+ * exposed in debugfs.
+ * @hw_notification_min_interval: Minimum notification internal in msec,
+ * exposed in debugfs.
+ * @hw_notification_count: Notification count, exposed in debugfs.
+ * @hw_notification_size: Notification size in bytes, exposed in debugfs.
+ * @hw_notification_min_size: Minimum notification size in bytes,
+ * exposed in debugfs.
+ * @decoder_stat: Decoder output statistics, exposed in debug-fs.
+ * @sdmx_process_count: Total number of times sdmx_process is called.
+ * @sdmx_process_time_sum: Total time sdmx_process takes.
+ * @sdmx_process_time_average: Average time sdmx_process takes.
+ * @sdmx_process_time_max: Max time sdmx_process takes.
+ * @sdmx_process_packets_sum: Total packets number sdmx_process handled.
+ * @sdmx_process_packets_average: Average packets number sdmx_process handled.
+ * @sdmx_process_packets_min: Minimum packets number sdmx_process handled.
+ * @last_notification_time: Time of last HW notification.
+ */
+struct mpq_demux {
+ int idx;
+ struct dvb_demux demux;
+ struct dmxdev dmxdev;
+ struct dmx_frontend fe_memory;
+ dmx_source_t source;
+ int is_initialized;
+ struct ion_client *ion_client;
+ struct mutex mutex;
+ struct mpq_feed feeds[MPQ_MAX_DMX_FILES];
+ u32 num_active_feeds;
+ u32 num_secure_feeds;
+ int sdmx_session_handle;
+ int sdmx_session_ref_count;
+ int sdmx_filter_count;
+ int sdmx_eos;
+ struct {
+ /* SDMX filters status */
+ struct sdmx_filter_status status[MPQ_MAX_DMX_FILES];
+
+ /* Index of the feed respective to SDMX filter */
+ u8 mpq_feed_idx[MPQ_MAX_DMX_FILES];
+
+ /*
+ * Snapshot of session_id of the feed
+ * when SDMX process was called. This is used
+ * to identify whether the feed has been
+ * restarted when processing SDMX results.
+ * May happen when demux is stalled in playback
+ * from memory with PULL mode.
+ */
+ u8 session_id[MPQ_MAX_DMX_FILES];
+ } sdmx_filters_state;
+
+ unsigned int decoder_alloc_flags;
+
+ /* HW plugin specific */
+ void *plugin_priv;
+ int (*mpq_dmx_plugin_release)(struct mpq_demux *mpq_demux);
+
+ /* debug-fs */
+ u32 hw_notification_interval;
+ u32 hw_notification_min_interval;
+ u32 hw_notification_count;
+ u32 hw_notification_size;
+ u32 hw_notification_min_size;
+
+ struct {
+ /*
+ * Accumulated number of bytes
+ * dropped due to decoder buffer fullness.
+ */
+ u32 drop_count;
+
+ /* Counter incremeneted for each video frame output by demux */
+ u32 out_count;
+
+ /*
+ * Sum of intervals (msec) holding the time
+ * between two successive video frames output.
+ */
+ u32 out_interval_sum;
+
+ /*
+ * Average interval (msec) between two
+ * successive video frames output.
+ */
+ u32 out_interval_average;
+
+ /*
+ * Max interval (msec) between two
+ * successive video frames output.
+ */
+ u32 out_interval_max;
+
+ /* Counter for number of decoder packets with TEI bit set */
+ u32 ts_errors;
+
+ /*
+ * Counter for number of decoder packets
+ * with continuity counter errors.
+ */
+ u32 cc_errors;
+
+ /* Time of last video frame output */
+ struct timespec out_last_time;
+ } decoder_stat[MPQ_ADAPTER_MAX_NUM_OF_INTERFACES];
+
+ u32 sdmx_process_count;
+ u32 sdmx_process_time_sum;
+ u32 sdmx_process_time_average;
+ u32 sdmx_process_time_max;
+ u32 sdmx_process_packets_sum;
+ u32 sdmx_process_packets_average;
+ u32 sdmx_process_packets_min;
+ enum sdmx_log_level sdmx_log_level;
+
+ struct timespec last_notification_time;
+};
+
+/**
+ * mpq_dmx_init - initialization and registration function of
+ * single MPQ demux device
+ *
+ * @adapter: The adapter to register mpq_demux to
+ * @mpq_demux: The mpq demux to initialize
+ *
+ * Every HW plug-in needs to provide implementation of such
+ * function that will be called for each demux device on the
+ * module initialization. The function mpq_demux_plugin_init
+ * should be called during the HW plug-in module initialization.
+ */
+typedef int (*mpq_dmx_init)(struct dvb_adapter *mpq_adapter,
+ struct mpq_demux *demux);
+
+/**
+ * mpq_demux_plugin_init - Initialize demux devices and register
+ * them to the dvb adapter.
+ *
+ * @dmx_init_func: Pointer to the function to be used
+ * to initialize demux of the underlying HW plugin.
+ *
+ * Return error code
+ *
+ * Should be called at the HW plugin module initialization.
+ */
+int mpq_dmx_plugin_init(mpq_dmx_init dmx_init_func);
+
+/**
+ * mpq_demux_plugin_exit - terminate demux devices.
+ *
+ * Should be called at the HW plugin module termination.
+ */
+void mpq_dmx_plugin_exit(void);
+
+/**
+ * mpq_dmx_set_source - implmenetation of set_source routine.
+ *
+ * @demux: The demux device to set its source.
+ * @src: The source to be set.
+ *
+ * Return error code
+ *
+ * Can be used by the underlying plugins to implement kernel
+ * demux API set_source routine.
+ */
+int mpq_dmx_set_source(struct dmx_demux *demux, const dmx_source_t *src);
+
+/**
+ * mpq_dmx_map_buffer - map user-space buffer into kernel space.
+ *
+ * @demux: The demux device.
+ * @dmx_buffer: The demux buffer from user-space, assumes that
+ * buffer handle is ION file-handle.
+ * @priv_handle: Saves ION-handle of the buffer imported by this function.
+ * @kernel_mem: Saves kernel mapped address of the buffer.
+ *
+ * Return error code
+ *
+ * The function maps the buffer into kernel memory only if the buffer
+ * was not allocated with secure flag, otherwise the returned kernel
+ * memory address is set to NULL.
+ */
+int mpq_dmx_map_buffer(struct dmx_demux *demux, struct dmx_buffer *dmx_buffer,
+ void **priv_handle, void **kernel_mem);
+
+/**
+ * mpq_dmx_unmap_buffer - unmap user-space buffer from kernel space memory.
+ *
+ * @demux: The demux device.
+ * @priv_handle: ION-handle of the buffer returned from mpq_dmx_map_buffer.
+ *
+ * Return error code
+ *
+ * The function unmaps the buffer from kernel memory only if the buffer
+ * was not allocated with secure flag.
+ */
+int mpq_dmx_unmap_buffer(struct dmx_demux *demux, void *priv_handle);
+
+/**
+ * mpq_dmx_decoder_fullness_init - Initialize waiting
+ * mechanism on decoder's buffer fullness.
+ *
+ * @feed: The decoder's feed
+ *
+ * Return error code.
+ */
+int mpq_dmx_decoder_fullness_init(struct dvb_demux_feed *feed);
+
+/**
+ * mpq_dmx_decoder_fullness_wait - Checks whether decoder buffer
+ * have free space as required, if not, wait for it.
+ *
+ * @feed: The decoder's feed
+ * @required_space: the required free space to wait for
+ *
+ * Return error code.
+ */
+int mpq_dmx_decoder_fullness_wait(struct dvb_demux_feed *feed,
+ size_t required_space);
+
+/**
+ * mpq_dmx_decoder_fullness_abort - Aborts waiting
+ * on decoder's buffer fullness if any waiting is done
+ * now. After calling this, to wait again the user must
+ * call mpq_dmx_decoder_fullness_init.
+ *
+ * @feed: The decoder's feed
+ *
+ * Return error code.
+ */
+int mpq_dmx_decoder_fullness_abort(struct dvb_demux_feed *feed);
+
+/**
+ * mpq_dmx_decoder_buffer_status - Returns the
+ * status of the decoder's buffer.
+ *
+ * @feed: The decoder's feed
+ * @dmx_buffer_status: Status of decoder's buffer
+ *
+ * Return error code.
+ */
+int mpq_dmx_decoder_buffer_status(struct dvb_demux_feed *feed,
+ struct dmx_buffer_status *dmx_buffer_status);
+
+/**
+ * mpq_dmx_reuse_decoder_buffer - release buffer passed to decoder for reuse
+ * by the stream-buffer.
+ *
+ * @feed: The decoder's feed.
+ * @cookie: stream-buffer handle of the buffer.
+ *
+ * Return error code
+ *
+ * The function releases the buffer provided by the stream-buffer
+ * connected to the decoder back to the stream-buffer for reuse.
+ */
+int mpq_dmx_reuse_decoder_buffer(struct dvb_demux_feed *feed, int cookie);
+
+/**
+ * mpq_dmx_process_video_packet - Assemble PES data and output it
+ * to the stream-buffer connected to the decoder.
+ *
+ * @feed: The feed used for the video TS packets
+ * @buf: The buffer holding video TS packet.
+ *
+ * Return error code.
+ *
+ * The function assumes it receives buffer with single TS packet
+ * of the relevant PID.
+ * If the output buffer is full while assembly, the function drops
+ * the packet and does not write them to the output buffer.
+ * Scrambled packets are bypassed.
+ */
+int mpq_dmx_process_video_packet(struct dvb_demux_feed *feed, const u8 *buf);
+
+/**
+ * mpq_dmx_process_pcr_packet - Extract PCR/STC pairs from
+ * a 192 bytes packet.
+ *
+ * @feed: The feed used for the PCR TS packets
+ * @buf: The buffer holding pcr/stc packet.
+ *
+ * Return error code.
+ *
+ * The function assumes it receives buffer with single TS packet
+ * of the relevant PID, and that it has 4 bytes
+ * suffix as extra timestamp in the following format:
+ *
+ * Byte3: TSIF flags
+ * Byte0-2: TTS, 0..2^24-1 at 105.47 Khz (27*10^6/256).
+ *
+ * The function callbacks dmxdev after extraction of the pcr/stc
+ * pair.
+ */
+int mpq_dmx_process_pcr_packet(struct dvb_demux_feed *feed, const u8 *buf);
+
+/**
+ * mpq_dmx_extract_pcr_and_dci() - Extract the PCR field and discontinuity
+ * indicator from a TS packet buffer.
+ *
+ * @buf: TS packet buffer
+ * @pcr: returned PCR value
+ * @dci: returned discontinuity indicator
+ *
+ * Returns 1 if PCR was extracted, 0 otherwise.
+ */
+int mpq_dmx_extract_pcr_and_dci(const u8 *buf, u64 *pcr, int *dci);
+
+/**
+ * mpq_dmx_init_debugfs_entries -
+ * Extend dvb-demux debugfs with mpq related entries (HW statistics and secure
+ * demux log level).
+ *
+ * @mpq_demux: The mpq_demux device to initialize.
+ */
+void mpq_dmx_init_debugfs_entries(struct mpq_demux *mpq_demux);
+
+/**
+ * mpq_dmx_update_hw_statistics -
+ * Update dvb-demux debugfs with HW notification statistics.
+ *
+ * @mpq_demux: The mpq_demux device to update.
+ */
+void mpq_dmx_update_hw_statistics(struct mpq_demux *mpq_demux);
+
+/**
+ * mpq_dmx_set_cipher_ops - Handles setting of cipher operations
+ *
+ * @feed: The feed to set its cipher operations
+ * @cipher_ops: Cipher operations to be set
+ *
+ * This common function handles only the case when working with
+ * secure-demux. When working with secure demux a single decrypt cipher
+ * operation is allowed.
+ *
+ * Return error code
+ */
+int mpq_dmx_set_cipher_ops(struct dvb_demux_feed *feed,
+ struct dmx_cipher_operations *cipher_ops);
+
+/**
+ * mpq_dmx_convert_tts - Convert timestamp attached by HW to each TS
+ * packet to 27MHz.
+ *
+ * @feed: The feed with TTS attached
+ * @timestamp: Buffer holding the timestamp attached by the HW
+ * @timestampIn27Mhz: Timestamp result in 27MHz
+ *
+ * Return error code
+ */
+void mpq_dmx_convert_tts(struct dvb_demux_feed *feed,
+ const u8 timestamp[TIMESTAMP_LEN],
+ u64 *timestampIn27Mhz);
+
+/**
+ * mpq_sdmx_open_session - Handle the details of opening a new secure demux
+ * session for the specified mpq demux instance. Multiple calls to this
+ * is allowed, reference counting is managed to open it only when needed.
+ *
+ * @mpq_demux: mpq demux instance
+ *
+ * Return error code
+ */
+int mpq_sdmx_open_session(struct mpq_demux *mpq_demux);
+
+/**
+ * mpq_sdmx_close_session - Closes secure demux session. The session
+ * is closed only if reference counter of the session reaches 0.
+ *
+ * @mpq_demux: mpq demux instance
+ *
+ * Return error code
+ */
+int mpq_sdmx_close_session(struct mpq_demux *mpq_demux);
+
+/**
+ * mpq_dmx_init_mpq_feed - Initialize an mpq feed object
+ * The function allocates mpq_feed object and saves in the dvb_demux_feed
+ * priv field.
+ *
+ * @feed: A dvb demux level feed parent object
+ *
+ * Return error code
+ */
+int mpq_dmx_init_mpq_feed(struct dvb_demux_feed *feed);
+
+/**
+ * mpq_dmx_terminate_feed - Destroy an mpq feed object
+ *
+ * @feed: A dvb demux level feed parent object
+ *
+ * Return error code
+ */
+int mpq_dmx_terminate_feed(struct dvb_demux_feed *feed);
+
+/**
+ * mpq_dmx_init_video_feed() - Initializes video related data structures
+ *
+ * @mpq_feed: mpq_feed object to initialize
+ *
+ * Return error code
+ */
+int mpq_dmx_init_video_feed(struct mpq_feed *mpq_feed);
+
+/**
+ * mpq_dmx_terminate_video_feed() - Release video related feed resources
+ *
+ * @mpq_feed: mpq_feed object to terminate
+ *
+ * Return error code
+ */
+int mpq_dmx_terminate_video_feed(struct mpq_feed *mpq_feed);
+
+/**
+ * mpq_dmx_write - demux write() function implementation.
+ *
+ * A wrapper function used for writing new data into the demux via DVR.
+ * It checks where new data should actually go, the secure demux or the normal
+ * dvb demux software demux.
+ *
+ * @demux: demux interface
+ * @buf: input buffer
+ * @count: number of data bytes in input buffer
+ *
+ * Return number of bytes processed or error code
+ */
+int mpq_dmx_write(struct dmx_demux *demux, const char *buf, size_t count);
+
+/**
+ * mpq_sdmx_process - Perform demuxing process on the specified input buffer
+ * in the secure demux instance
+ *
+ * @mpq_demux: mpq demux instance
+ * @input: input buffer descriptor
+ * @fill_count: number of data bytes in input buffer that can be read
+ * @read_offset: offset in buffer for reading
+ * @tsp_size: size of single TS packet
+ *
+ * Return number of bytes read or error code
+ */
+int mpq_sdmx_process(struct mpq_demux *mpq_demux,
+ struct sdmx_buff_descr *input,
+ u32 fill_count,
+ u32 read_offset,
+ size_t tsp_size);
+
+/**
+ * mpq_sdmx_loaded - Returns 1 if secure demux application is loaded,
+ * 0 otherwise. This function should be used to determine whether or not
+ * processing should take place in the SDMX.
+ */
+int mpq_sdmx_is_loaded(void);
+
+/**
+ * mpq_dmx_oob_command - Handles OOB command from dvb-demux.
+ *
+ * OOB marker commands trigger callback to the dmxdev.
+ * Handling of EOS command may trigger current (last on stream) PES/Frame to
+ * be reported, in addition to callback to the dmxdev.
+ * In case secure demux is active for the feed, EOS command is passed to the
+ * secure demux for handling.
+ *
+ * @feed: dvb demux feed object
+ * @cmd: oob command data
+ *
+ * returns 0 on success or error
+ */
+int mpq_dmx_oob_command(struct dvb_demux_feed *feed,
+ struct dmx_oob_command *cmd);
+
+/**
+ * mpq_dmx_peer_rec_feed() - For a recording filter with multiple feeds objects
+ * search for a feed object that shares the same filter as the specified feed
+ * object, and return it.
+ * This can be used to test whether the specified feed object is the first feed
+ * allocate for the recording filter - return value is NULL.
+ *
+ * @feed: dvb demux feed object
+ *
+ * Return the dvb_demux_feed sharing the same filter's buffer or NULL if no
+ * such is found.
+ */
+struct dvb_demux_feed *mpq_dmx_peer_rec_feed(struct dvb_demux_feed *feed);
+
+/**
+ * mpq_dmx_decoder_eos_cmd() - Report EOS event to the mpq_streambuffer
+ *
+ * @mpq_feed: Video mpq_feed object for notification
+ *
+ * Return error code
+ */
+int mpq_dmx_decoder_eos_cmd(struct mpq_feed *mpq_feed);
+
+/**
+ * mpq_dmx_parse_mandatory_pes_header() - Parse non-optional PES header fields
+ * from TS packet buffer and save results in the feed object.
+ *
+ * @feed: Video dvb demux feed object
+ * @feed_data: Structure where results will be saved
+ * @pes_header: Saved PES header
+ * @buf: Input buffer containing TS packet with the PES header
+ * @ts_payload_offset: Offset in 'buf' where payload begins
+ * @bytes_avail: Length of actual payload
+ *
+ * Return error code
+ */
+int mpq_dmx_parse_mandatory_pes_header(
+ struct dvb_demux_feed *feed,
+ struct mpq_video_feed_info *feed_data,
+ struct pes_packet_header *pes_header,
+ const u8 *buf,
+ u32 *ts_payload_offset,
+ int *bytes_avail);
+
+/**
+ * mpq_dmx_parse_remaining_pes_header() - Parse optional PES header fields
+ * from TS packet buffer and save results in the feed object.
+ * This function depends on mpq_dmx_parse_mandatory_pes_header being called
+ * first for state to be valid.
+ *
+ * @feed: Video dvb demux feed object
+ * @feed_data: Structure where results will be saved
+ * @pes_header: Saved PES header
+ * @buf: Input buffer containing TS packet with the PES header
+ * @ts_payload_offset: Offset in 'buf' where payload begins
+ * @bytes_avail: Length of actual payload
+ *
+ * Return error code
+ */
+int mpq_dmx_parse_remaining_pes_header(
+ struct dvb_demux_feed *feed,
+ struct mpq_video_feed_info *feed_data,
+ struct pes_packet_header *pes_header,
+ const u8 *buf,
+ u32 *ts_payload_offset,
+ int *bytes_avail);
+
+/**
+ * mpq_dmx_flush_stream_buffer() - Flush video stream buffer object of the
+ * specific video feed, both meta-data packets and data.
+ *
+ * @feed: dvb demux video feed object
+ *
+ * Return error code
+ */
+int mpq_dmx_flush_stream_buffer(struct dvb_demux_feed *feed);
+
+/**
+ * mpq_dmx_save_pts_dts() - Save the current PTS/DTS data
+ *
+ * @feed_data: Video feed structure where PTS/DTS is saved
+ */
+static inline void mpq_dmx_save_pts_dts(struct mpq_video_feed_info *feed_data)
+{
+ if (feed_data->new_info_exists) {
+ feed_data->saved_pts_dts_info.pts_exist =
+ feed_data->new_pts_dts_info.pts_exist;
+ feed_data->saved_pts_dts_info.pts =
+ feed_data->new_pts_dts_info.pts;
+ feed_data->saved_pts_dts_info.dts_exist =
+ feed_data->new_pts_dts_info.dts_exist;
+ feed_data->saved_pts_dts_info.dts =
+ feed_data->new_pts_dts_info.dts;
+
+ feed_data->new_info_exists = 0;
+ feed_data->saved_info_used = 0;
+ }
+}
+
+/**
+ * mpq_dmx_write_pts_dts() - Write out the saved PTS/DTS data and mark as used
+ *
+ * @feed_data: Video feed structure where PTS/DTS was saved
+ * @info: PTS/DTS structure to write to
+ */
+static inline void mpq_dmx_write_pts_dts(struct mpq_video_feed_info *feed_data,
+ struct dmx_pts_dts_info *info)
+{
+ if (!feed_data->saved_info_used) {
+ info->pts_exist = feed_data->saved_pts_dts_info.pts_exist;
+ info->pts = feed_data->saved_pts_dts_info.pts;
+ info->dts_exist = feed_data->saved_pts_dts_info.dts_exist;
+ info->dts = feed_data->saved_pts_dts_info.dts;
+
+ feed_data->saved_info_used = 1;
+ } else {
+ info->pts_exist = 0;
+ info->dts_exist = 0;
+ }
+}
+
+/*
+ * mpq_dmx_calc_time_delta -
+ * Calculate delta in msec between two time snapshots.
+ *
+ * @curr_time: value of current time
+ * @prev_time: value of previous time
+ *
+ * Return time-delta in msec
+ */
+static inline u32 mpq_dmx_calc_time_delta(struct timespec *curr_time,
+ struct timespec *prev_time)
+{
+ struct timespec delta_time;
+ u64 delta_time_ms;
+
+ delta_time = timespec_sub(*curr_time, *prev_time);
+
+ delta_time_ms = ((u64)delta_time.tv_sec * MSEC_PER_SEC) +
+ delta_time.tv_nsec / NSEC_PER_MSEC;
+
+ return (u32)delta_time_ms;
+}
+
+void mpq_dmx_update_decoder_stat(struct mpq_feed *mpq_feed);
+
+/* Return the common module parameter tsif_mode */
+int mpq_dmx_get_param_tsif_mode(void);
+
+/* Return the common module parameter clock_inv */
+int mpq_dmx_get_param_clock_inv(void);
+
+/* Return the common module parameter mpq_sdmx_scramble_odd */
+int mpq_dmx_get_param_scramble_odd(void);
+
+/* Return the common module parameter mpq_sdmx_scramble_even */
+int mpq_dmx_get_param_scramble_even(void);
+
+/* Return the common module parameter mpq_sdmx_scramble_default_discard */
+int mpq_dmx_get_param_scramble_default_discard(void);
+
+
+#endif /* _MPQ_DMX_PLUGIN_COMMON_H */
diff --git a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_sw.c b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_sw.c
new file mode 100644
index 000000000000..b350e4f4144a
--- /dev/null
+++ b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_sw.c
@@ -0,0 +1,280 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include "mpq_dvb_debug.h"
+#include "mpq_dmx_plugin_common.h"
+
+
+static int mpq_sw_dmx_start_filtering(struct dvb_demux_feed *feed)
+{
+ int ret = -EINVAL;
+ struct mpq_demux *mpq_demux = feed->demux->priv;
+
+ MPQ_DVB_DBG_PRINT("%s(pid=%d) executed\n", __func__, feed->pid);
+
+ if (mpq_demux == NULL) {
+ MPQ_DVB_ERR_PRINT("%s: invalid mpq_demux handle\n", __func__);
+ goto out;
+ }
+
+ if (mpq_demux->source < DMX_SOURCE_DVR0) {
+ MPQ_DVB_ERR_PRINT("%s: only DVR source is supported (%d)\n",
+ __func__, mpq_demux->source);
+ goto out;
+ }
+
+ /*
+ * Always feed sections/PES starting from a new one and
+ * do not partial transfer data from older one
+ */
+ feed->pusi_seen = 0;
+
+ ret = mpq_dmx_init_mpq_feed(feed);
+ if (ret)
+ MPQ_DVB_ERR_PRINT("%s: mpq_dmx_init_mpq_feed failed(%d)\n",
+ __func__, ret);
+out:
+ return ret;
+}
+
+static int mpq_sw_dmx_stop_filtering(struct dvb_demux_feed *feed)
+{
+ int ret;
+
+ MPQ_DVB_DBG_PRINT("%s(%d) executed\n", __func__, feed->pid);
+
+ ret = mpq_dmx_terminate_feed(feed);
+ if (ret)
+ MPQ_DVB_ERR_PRINT("%s: mpq_dmx_terminate_feed failed(%d)\n",
+ __func__, ret);
+
+ return ret;
+}
+
+static int mpq_sw_dmx_write_to_decoder(struct dvb_demux_feed *feed,
+ const u8 *buf, size_t len)
+{
+ /*
+ * It is assumed that this function is called once for each
+ * TS packet of the relevant feed.
+ */
+ if (len > (TIMESTAMP_LEN + TS_PACKET_SIZE))
+ MPQ_DVB_DBG_PRINT(
+ "%s: warnning - len larger than one packet\n",
+ __func__);
+
+ if (dvb_dmx_is_video_feed(feed))
+ return mpq_dmx_process_video_packet(feed, buf);
+
+ if (dvb_dmx_is_pcr_feed(feed))
+ return mpq_dmx_process_pcr_packet(feed, buf);
+
+ return 0;
+}
+
+static int mpq_sw_dmx_set_source(struct dmx_demux *demux,
+ const dmx_source_t *src)
+{
+ int ret = -EINVAL;
+
+ if (demux == NULL || demux->priv == NULL || src == NULL) {
+ MPQ_DVB_ERR_PRINT("%s: invalid parameters\n", __func__);
+ goto out;
+ }
+
+ if (*src >= DMX_SOURCE_DVR0 && *src <= DMX_SOURCE_DVR3) {
+ ret = mpq_dmx_set_source(demux, src);
+ if (ret)
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_dmx_set_source(%d) failed, ret=%d\n",
+ __func__, *src, ret);
+ } else {
+ MPQ_DVB_ERR_PRINT("%s: not a DVR source\n", __func__);
+ }
+
+out:
+ return ret;
+}
+
+static int mpq_sw_dmx_get_caps(struct dmx_demux *demux, struct dmx_caps *caps)
+{
+ struct dvb_demux *dvb_demux = demux->priv;
+
+ if (dvb_demux == NULL || caps == NULL) {
+ MPQ_DVB_ERR_PRINT("%s: invalid parameters\n", __func__);
+ return -EINVAL;
+ }
+
+ caps->caps = DMX_CAP_PULL_MODE | DMX_CAP_VIDEO_DECODER_DATA |
+ DMX_CAP_TS_INSERTION | DMX_CAP_VIDEO_INDEXING |
+ DMX_CAP_AUTO_BUFFER_FLUSH;
+ caps->recording_max_video_pids_indexed = 0;
+ caps->num_decoders = MPQ_ADAPTER_MAX_NUM_OF_INTERFACES;
+ caps->num_demux_devices = CONFIG_DVB_MPQ_NUM_DMX_DEVICES;
+ caps->num_pid_filters = MPQ_MAX_DMX_FILES;
+ caps->num_section_filters = dvb_demux->filternum;
+ caps->num_section_filters_per_pid = dvb_demux->filternum;
+ caps->section_filter_length = DMX_FILTER_SIZE;
+ caps->num_demod_inputs = 0;
+ caps->num_memory_inputs = CONFIG_DVB_MPQ_NUM_DMX_DEVICES;
+ caps->max_bitrate = 192;
+ caps->demod_input_max_bitrate = 96;
+ caps->memory_input_max_bitrate = 96;
+ caps->num_cipher_ops = 1;
+
+ /* No STC support */
+ caps->max_stc = 0;
+
+ /* Buffer requirements */
+ caps->section.flags =
+ DMX_BUFFER_EXTERNAL_SUPPORT |
+ DMX_BUFFER_INTERNAL_SUPPORT |
+ DMX_BUFFER_CACHED;
+ caps->section.max_buffer_num = 1;
+ caps->section.max_size = 0xFFFFFFFF;
+ caps->section.size_alignment = 0;
+ caps->pes.flags =
+ DMX_BUFFER_EXTERNAL_SUPPORT |
+ DMX_BUFFER_INTERNAL_SUPPORT |
+ DMX_BUFFER_CACHED;
+ caps->pes.max_buffer_num = 1;
+ caps->pes.max_size = 0xFFFFFFFF;
+ caps->pes.size_alignment = 0;
+ caps->recording_188_tsp.flags =
+ DMX_BUFFER_EXTERNAL_SUPPORT |
+ DMX_BUFFER_INTERNAL_SUPPORT |
+ DMX_BUFFER_CACHED;
+ caps->recording_188_tsp.max_buffer_num = 1;
+ caps->recording_188_tsp.max_size = 0xFFFFFFFF;
+ caps->recording_188_tsp.size_alignment = 0;
+ caps->recording_192_tsp.flags =
+ DMX_BUFFER_EXTERNAL_SUPPORT |
+ DMX_BUFFER_INTERNAL_SUPPORT |
+ DMX_BUFFER_CACHED;
+ caps->recording_192_tsp.max_buffer_num = 1;
+ caps->recording_192_tsp.max_size = 0xFFFFFFFF;
+ caps->recording_192_tsp.size_alignment = 0;
+ caps->playback_188_tsp.flags =
+ DMX_BUFFER_EXTERNAL_SUPPORT |
+ DMX_BUFFER_INTERNAL_SUPPORT |
+ DMX_BUFFER_CACHED;
+ caps->playback_188_tsp.max_buffer_num = 1;
+ caps->playback_188_tsp.max_size = 0xFFFFFFFF;
+ caps->playback_188_tsp.size_alignment = 188;
+ caps->playback_192_tsp.flags =
+ DMX_BUFFER_EXTERNAL_SUPPORT |
+ DMX_BUFFER_INTERNAL_SUPPORT |
+ DMX_BUFFER_CACHED;
+ caps->playback_192_tsp.max_buffer_num = 1;
+ caps->playback_192_tsp.max_size = 0xFFFFFFFF;
+ caps->playback_192_tsp.size_alignment = 192;
+ caps->decoder.flags =
+ DMX_BUFFER_SECURED_IF_DECRYPTED |
+ DMX_BUFFER_EXTERNAL_SUPPORT |
+ DMX_BUFFER_INTERNAL_SUPPORT |
+ DMX_BUFFER_LINEAR_GROUP_SUPPORT |
+ DMX_BUFFER_CACHED;
+ caps->decoder.max_buffer_num = DMX_MAX_DECODER_BUFFER_NUM;
+ caps->decoder.max_size = 0xFFFFFFFF;
+ caps->decoder.size_alignment = SZ_4K;
+
+ return 0;
+}
+
+static int mpq_sw_dmx_init(struct dvb_adapter *mpq_adapter,
+ struct mpq_demux *mpq_demux)
+{
+ int ret;
+ struct dvb_demux *dvb_demux = &mpq_demux->demux;
+
+ /* Set the kernel-demux object capabilities */
+ mpq_demux->demux.dmx.capabilities =
+ DMX_TS_FILTERING |
+ DMX_PES_FILTERING |
+ DMX_SECTION_FILTERING |
+ DMX_MEMORY_BASED_FILTERING |
+ DMX_CRC_CHECKING |
+ DMX_TS_DESCRAMBLING;
+
+ mpq_demux->decoder_alloc_flags = ION_FLAG_CACHED;
+
+ /* Set dvb-demux "virtual" function pointers */
+ dvb_demux->priv = (void *)mpq_demux;
+ dvb_demux->filternum = MPQ_MAX_DMX_FILES;
+ dvb_demux->feednum = MPQ_MAX_DMX_FILES;
+ dvb_demux->start_feed = mpq_sw_dmx_start_filtering;
+ dvb_demux->stop_feed = mpq_sw_dmx_stop_filtering;
+ dvb_demux->write_to_decoder = mpq_sw_dmx_write_to_decoder;
+ dvb_demux->decoder_fullness_init = mpq_dmx_decoder_fullness_init;
+ dvb_demux->decoder_fullness_wait = mpq_dmx_decoder_fullness_wait;
+ dvb_demux->decoder_fullness_abort = mpq_dmx_decoder_fullness_abort;
+ dvb_demux->decoder_buffer_status = mpq_dmx_decoder_buffer_status;
+ dvb_demux->reuse_decoder_buffer = mpq_dmx_reuse_decoder_buffer;
+ dvb_demux->set_cipher_op = mpq_dmx_set_cipher_ops;
+ dvb_demux->oob_command = mpq_dmx_oob_command;
+ dvb_demux->convert_ts = mpq_dmx_convert_tts;
+ dvb_demux->flush_decoder_buffer = NULL;
+
+ /* Initialize dvb_demux object */
+ ret = dvb_dmx_init(dvb_demux);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT("%s: dvb_dmx_init failed, ret=%d\n",
+ __func__, ret);
+ goto init_failed;
+ }
+
+ /* Now initialize the dmx-dev object */
+ mpq_demux->dmxdev.filternum = MPQ_MAX_DMX_FILES;
+ mpq_demux->dmxdev.demux = &mpq_demux->demux.dmx;
+ mpq_demux->dmxdev.capabilities = DMXDEV_CAP_DUPLEX;
+
+ mpq_demux->dmxdev.demux->set_source = mpq_sw_dmx_set_source;
+ mpq_demux->dmxdev.demux->get_stc = NULL;
+ mpq_demux->dmxdev.demux->get_caps = mpq_sw_dmx_get_caps;
+ mpq_demux->dmxdev.demux->map_buffer = mpq_dmx_map_buffer;
+ mpq_demux->dmxdev.demux->unmap_buffer = mpq_dmx_unmap_buffer;
+ mpq_demux->dmxdev.demux->write = mpq_dmx_write;
+ ret = dvb_dmxdev_init(&mpq_demux->dmxdev, mpq_adapter);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT("%s: dvb_dmxdev_init failed, ret=%d\n",
+ __func__, ret);
+ goto init_failed_dmx_release;
+ }
+
+ /* Extend dvb-demux debugfs with mpq demux statistics. */
+ mpq_dmx_init_debugfs_entries(mpq_demux);
+
+ return 0;
+
+init_failed_dmx_release:
+ dvb_dmx_release(dvb_demux);
+init_failed:
+ return ret;
+}
+
+static int __init mpq_dmx_sw_plugin_init(void)
+{
+ return mpq_dmx_plugin_init(mpq_sw_dmx_init);
+}
+
+static void __exit mpq_dmx_sw_plugin_exit(void)
+{
+ mpq_dmx_plugin_exit();
+}
+
+
+module_init(mpq_dmx_sw_plugin_init);
+module_exit(mpq_dmx_sw_plugin_exit);
+
+MODULE_DESCRIPTION("Qualcomm Technologies Inc. demux software plugin");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_tspp_v1.c b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_tspp_v1.c
new file mode 100644
index 000000000000..be88bc1bf19f
--- /dev/null
+++ b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_tspp_v1.c
@@ -0,0 +1,1968 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kthread.h>
+#include <linux/vmalloc.h>
+#include <linux/qcom_tspp.h>
+#include "mpq_dvb_debug.h"
+#include "mpq_dmx_plugin_common.h"
+
+#define TSIF_COUNT 2
+
+/* Max number of PID filters */
+#define TSPP_MAX_PID_FILTER_NUM 128
+
+/* Max number of user-defined HW PID filters */
+#define TSPP_MAX_HW_PID_FILTER_NUM 15
+
+/* HW index of the last entry in the TSPP HW filter table */
+#define TSPP_LAST_HW_FILTER_INDEX 15
+
+/* Number of filters required to accept all packets except NULL packets */
+#define TSPP_BLOCK_NULLS_FILTERS_NUM 13
+
+/* Max number of section filters */
+#define TSPP_MAX_SECTION_FILTER_NUM 128
+
+/* For each TSIF we use a single pipe holding the data after PID filtering */
+#define TSPP_CHANNEL 0
+
+/* the channel_id set to TSPP driver based on TSIF number and channel type */
+#define TSPP_CHANNEL_ID(tsif, ch) ((tsif << 1) + ch)
+#define TSPP_GET_TSIF_NUM(ch_id) (ch_id >> 1)
+
+/* mask that set to care for all bits in pid filter */
+#define TSPP_PID_MASK 0x1FFF
+
+/* dvb-demux defines pid 0x2000 as full capture pid */
+#define TSPP_PASS_THROUGH_PID 0x2000
+
+/* NULL packets pid */
+#define TSPP_NULL_PACKETS_PID 0x1FFF
+
+#define TSPP_RAW_TTS_SIZE 192
+#define TSPP_RAW_SIZE 188
+
+#define MAX_BAM_DESCRIPTOR_SIZE (32 * 1024 - 1)
+
+#define MAX_BAM_DESCRIPTOR_COUNT (8 * 1024 - 2)
+
+#define TSPP_BUFFER_SIZE (500 * 1024) /* 500KB */
+
+#define TSPP_DEFAULT_DESCRIPTOR_SIZE (TSPP_RAW_TTS_SIZE)
+
+#define TSPP_BUFFER_COUNT(buffer_size) \
+ ((buffer_size) / tspp_desc_size)
+
+/* When TSPP notifies demux that new packets are received.
+ * Using max descriptor size (170 packets).
+ * Assuming 20MBit/sec stream, with 170 packets
+ * per descriptor there would be about 82 descriptors,
+ * Meaning about 82 notifications per second.
+ */
+#define TSPP_NOTIFICATION_SIZE(desc_size) \
+ (MAX_BAM_DESCRIPTOR_SIZE / (desc_size))
+
+/* Channel timeout in msec */
+#define TSPP_CHANNEL_TIMEOUT 100
+
+enum mem_buffer_allocation_mode {
+ MPQ_DMX_TSPP_INTERNAL_ALLOC = 0,
+ MPQ_DMX_TSPP_CONTIGUOUS_PHYS_ALLOC = 1
+};
+
+/* module parameters for load time configuration */
+static int allocation_mode = MPQ_DMX_TSPP_INTERNAL_ALLOC;
+static int tspp_out_buffer_size = TSPP_BUFFER_SIZE;
+static int tspp_desc_size = TSPP_DEFAULT_DESCRIPTOR_SIZE;
+static int tspp_notification_size =
+ TSPP_NOTIFICATION_SIZE(TSPP_DEFAULT_DESCRIPTOR_SIZE);
+static int tspp_channel_timeout = TSPP_CHANNEL_TIMEOUT;
+static int tspp_out_ion_heap = ION_QSECOM_HEAP_ID;
+
+module_param(allocation_mode, int, S_IRUGO | S_IWUSR);
+module_param(tspp_out_buffer_size, int, S_IRUGO | S_IWUSR);
+module_param(tspp_desc_size, int, S_IRUGO | S_IWUSR);
+module_param(tspp_notification_size, int, S_IRUGO | S_IWUSR);
+module_param(tspp_channel_timeout, int, S_IRUGO | S_IWUSR);
+module_param(tspp_out_ion_heap, int, S_IRUGO | S_IWUSR);
+
+/* The following structure hold singleton information
+ * required for dmx implementation on top of TSPP.
+ */
+static struct
+{
+ /* Information for each TSIF input processing */
+ struct {
+ /*
+ * TSPP pipe holding all TS packets after PID filtering.
+ * The following is reference count for number of feeds
+ * allocated on that pipe.
+ */
+ int channel_ref;
+
+ /* Counter for data notifications on the pipe */
+ atomic_t data_cnt;
+
+ /* flag to indicate control operation is in progress */
+ atomic_t control_op;
+
+ /* ION handle used for TSPP data buffer allocation */
+ struct ion_handle *ch_mem_heap_handle;
+
+ /* TSPP data buffer heap virtual base address */
+ void *ch_mem_heap_virt_base;
+
+ /* TSPP data buffer heap physical base address */
+ ion_phys_addr_t ch_mem_heap_phys_base;
+
+ /* Buffer allocation index */
+ int buff_index;
+
+ /* Number of buffers */
+ u32 buffer_count;
+
+ /*
+ * Array holding the IDs of the TSPP buffer descriptors in the
+ * current aggregate, in order to release these descriptors at
+ * the end of processing.
+ */
+ int *aggregate_ids;
+
+ /*
+ * Holds PIDs of allocated filters along with
+ * how many feeds are opened on the same PID. For
+ * TSPP HW filters, holds also the filter table index.
+ * When pid == -1, the entry is free.
+ */
+ struct {
+ int pid;
+ int ref_count;
+ int hw_index;
+ } filters[TSPP_MAX_PID_FILTER_NUM];
+
+ /* Indicates available/allocated filter table indexes */
+ int hw_indexes[TSPP_MAX_HW_PID_FILTER_NUM];
+
+ /* Number of currently allocated PID filters */
+ u16 current_filter_count;
+
+ /*
+ * Flag to indicate whether the user added a filter to accept
+ * NULL packets (PID = 0x1FFF)
+ */
+ int pass_nulls_flag;
+
+ /*
+ * Flag to indicate whether the user added a filter to accept
+ * all packets (PID = 0x2000)
+ */
+ int pass_all_flag;
+
+ /*
+ * Flag to indicate whether the filter that accepts
+ * all packets has already been added and is
+ * currently enabled
+ */
+ int accept_all_filter_exists_flag;
+
+ /* Thread processing TS packets from TSPP */
+ struct task_struct *thread;
+ wait_queue_head_t wait_queue;
+
+ /* TSIF alias */
+ char name[TSIF_NAME_LENGTH];
+
+ /* Pointer to the demux connected to this TSIF */
+ struct mpq_demux *mpq_demux;
+
+ /* Mutex protecting the data-structure */
+ struct mutex mutex;
+ } tsif[TSIF_COUNT];
+
+ /* ION client used for TSPP data buffer allocation */
+ struct ion_client *ion_client;
+} mpq_dmx_tspp_info;
+
+static void *tspp_mem_allocator(int channel_id, u32 size,
+ phys_addr_t *phys_base, void *user)
+{
+ void *virt_addr = NULL;
+ int i = TSPP_GET_TSIF_NUM(channel_id);
+
+ if (mpq_dmx_tspp_info.tsif[i].buff_index ==
+ mpq_dmx_tspp_info.tsif[i].buffer_count)
+ return NULL;
+
+ virt_addr =
+ (mpq_dmx_tspp_info.tsif[i].ch_mem_heap_virt_base +
+ (mpq_dmx_tspp_info.tsif[i].buff_index * size));
+
+ *phys_base =
+ (mpq_dmx_tspp_info.tsif[i].ch_mem_heap_phys_base +
+ (mpq_dmx_tspp_info.tsif[i].buff_index * size));
+
+ mpq_dmx_tspp_info.tsif[i].buff_index++;
+
+ return virt_addr;
+}
+
+static void tspp_mem_free(int channel_id, u32 size,
+ void *virt_base, phys_addr_t phys_base, void *user)
+{
+ int i = TSPP_GET_TSIF_NUM(channel_id);
+
+ /*
+ * actual buffer heap free is done in mpq_dmx_tspp_plugin_exit().
+ * we update index here, so if this function is called repetitively
+ * for all the buffers, then afterwards tspp_mem_allocator()
+ * can be called again.
+ * Note: it would be incorrect to call tspp_mem_allocator()
+ * a few times, then call tspp_mem_free(), then call
+ * tspp_mem_allocator() again.
+ */
+ if (mpq_dmx_tspp_info.tsif[i].buff_index > 0)
+ mpq_dmx_tspp_info.tsif[i].buff_index--;
+}
+
+/**
+ * Returns a free HW filter index that can be used.
+ *
+ * @tsif: The TSIF to allocate filter from
+ *
+ * Return HW filter index or -ENOMEM if no filters available
+ */
+static int mpq_tspp_allocate_hw_filter_index(int tsif)
+{
+ int i;
+
+ for (i = 0; i < TSPP_MAX_HW_PID_FILTER_NUM; i++) {
+ if (mpq_dmx_tspp_info.tsif[tsif].hw_indexes[i] == 0) {
+ mpq_dmx_tspp_info.tsif[tsif].hw_indexes[i] = 1;
+ return i;
+ }
+ }
+
+ return -ENOMEM;
+}
+
+/**
+ * Releases a HW filter index for future reuse.
+ *
+ * @tsif: The TSIF from which the filter should be released
+ * @hw_index: The HW index to release
+ *
+ */
+static inline void mpq_tspp_release_hw_filter_index(int tsif, int hw_index)
+{
+ if ((hw_index >= 0) && (hw_index < TSPP_MAX_HW_PID_FILTER_NUM))
+ mpq_dmx_tspp_info.tsif[tsif].hw_indexes[hw_index] = 0;
+}
+
+
+/**
+ * Returns a free filter slot that can be used.
+ *
+ * @tsif: The TSIF to allocate filter from
+ *
+ * Return filter index or -ENOMEM if no filters available
+ */
+static int mpq_tspp_get_free_filter_slot(int tsif)
+{
+ int slot;
+
+ for (slot = 0; slot < TSPP_MAX_PID_FILTER_NUM; slot++)
+ if (mpq_dmx_tspp_info.tsif[tsif].filters[slot].pid == -1)
+ return slot;
+
+ return -ENOMEM;
+}
+
+/**
+ * Returns filter index of specific pid.
+ *
+ * @tsif: The TSIF to which the pid is allocated
+ * @pid: The pid to search for
+ *
+ * Return filter index or -1 if no filter available
+ */
+static int mpq_tspp_get_filter_slot(int tsif, int pid)
+{
+ int slot;
+
+ for (slot = 0; slot < TSPP_MAX_PID_FILTER_NUM; slot++)
+ if (mpq_dmx_tspp_info.tsif[tsif].filters[slot].pid == pid)
+ return slot;
+
+ return -EINVAL;
+}
+
+/**
+ * mpq_dmx_tspp_swfilter_desc - helper function
+ *
+ * Takes a tspp buffer descriptor and send it to the SW filter for demuxing,
+ * one TS packet at a time.
+ *
+ * @mpq_demux - mpq demux object
+ * @tspp_data_desc - tspp buffer descriptor
+ */
+static inline void mpq_dmx_tspp_swfilter_desc(struct mpq_demux *mpq_demux,
+ const struct tspp_data_descriptor *tspp_data_desc)
+{
+ u32 notif_size;
+ int i;
+
+ notif_size = tspp_data_desc->size / TSPP_RAW_TTS_SIZE;
+ for (i = 0; i < notif_size; i++)
+ dvb_dmx_swfilter_packet(&mpq_demux->demux,
+ ((u8 *)tspp_data_desc->virt_base) +
+ i * TSPP_RAW_TTS_SIZE,
+ ((u8 *)tspp_data_desc->virt_base) +
+ i * TSPP_RAW_TTS_SIZE + TSPP_RAW_SIZE);
+}
+
+/**
+ * Demux TS packets from TSPP by secure-demux.
+ * The function assumes the buffer is physically contiguous
+ * and that TSPP descriptors are continuous in memory.
+ *
+ * @tsif: The TSIF interface to process its packets
+ * @channel_id: the TSPP output pipe with the TS packets
+ */
+static void mpq_dmx_tspp_aggregated_process(int tsif, int channel_id)
+{
+ const struct tspp_data_descriptor *tspp_data_desc;
+ struct mpq_demux *mpq_demux = mpq_dmx_tspp_info.tsif[tsif].mpq_demux;
+ struct sdmx_buff_descr input;
+ size_t aggregate_len = 0;
+ size_t aggregate_count = 0;
+ phys_addr_t buff_start_addr_phys;
+ phys_addr_t buff_current_addr_phys = 0;
+ u32 notif_size;
+ int i;
+
+ while ((tspp_data_desc = tspp_get_buffer(0, channel_id)) != NULL) {
+ if (aggregate_count == 0)
+ buff_current_addr_phys = tspp_data_desc->phys_base;
+ notif_size = tspp_data_desc->size / TSPP_RAW_TTS_SIZE;
+ mpq_dmx_tspp_info.tsif[tsif].aggregate_ids[aggregate_count] =
+ tspp_data_desc->id;
+ aggregate_len += tspp_data_desc->size;
+ aggregate_count++;
+ mpq_demux->hw_notification_size += notif_size;
+
+ /* Let SW filter process only if it might be relevant */
+ if (mpq_demux->num_active_feeds > mpq_demux->num_secure_feeds)
+ mpq_dmx_tspp_swfilter_desc(mpq_demux, tspp_data_desc);
+
+ }
+
+ if (!aggregate_count)
+ return;
+
+ buff_start_addr_phys =
+ mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_phys_base;
+
+ input.base_addr = (u64)buff_start_addr_phys;
+ input.size = mpq_dmx_tspp_info.tsif[tsif].buffer_count * tspp_desc_size;
+
+ if (mpq_sdmx_is_loaded() && mpq_demux->sdmx_filter_count) {
+ MPQ_DVB_DBG_PRINT(
+ "%s: SDMX Processing %zu descriptors: %zu bytes at start address 0x%llx, read offset %d\n",
+ __func__, aggregate_count, aggregate_len,
+ input.base_addr,
+ (int)(buff_current_addr_phys - buff_start_addr_phys));
+
+ mpq_sdmx_process(mpq_demux, &input, aggregate_len,
+ buff_current_addr_phys - buff_start_addr_phys,
+ TSPP_RAW_TTS_SIZE);
+ }
+
+ for (i = 0; i < aggregate_count; i++)
+ tspp_release_buffer(0, channel_id,
+ mpq_dmx_tspp_info.tsif[tsif].aggregate_ids[i]);
+}
+
+
+/**
+ * Demux thread function handling data from specific TSIF.
+ *
+ * @arg: TSIF number
+ */
+static int mpq_dmx_tspp_thread(void *arg)
+{
+ int tsif = (int)(uintptr_t)arg;
+ struct mpq_demux *mpq_demux;
+ const struct tspp_data_descriptor *tspp_data_desc;
+ atomic_t *data_cnt;
+ u32 notif_size;
+ int channel_id;
+ int ref_count;
+ int ret;
+
+ do {
+ ret = wait_event_interruptible(
+ mpq_dmx_tspp_info.tsif[tsif].wait_queue,
+ (atomic_read(&mpq_dmx_tspp_info.tsif[tsif].data_cnt) &&
+ !atomic_read(&mpq_dmx_tspp_info.tsif[tsif].control_op))
+ || kthread_should_stop());
+
+ if ((ret < 0) || kthread_should_stop()) {
+ MPQ_DVB_ERR_PRINT("%s: exit\n", __func__);
+ break;
+ }
+
+ /* Lock against the TSPP filters data-structure */
+ if (mutex_lock_interruptible(
+ &mpq_dmx_tspp_info.tsif[tsif].mutex))
+ return -ERESTARTSYS;
+
+ channel_id = TSPP_CHANNEL_ID(tsif, TSPP_CHANNEL);
+
+ ref_count = mpq_dmx_tspp_info.tsif[tsif].channel_ref;
+ data_cnt = &mpq_dmx_tspp_info.tsif[tsif].data_cnt;
+
+ /* Make sure channel is still active */
+ if (ref_count == 0) {
+ mutex_unlock(&mpq_dmx_tspp_info.tsif[tsif].mutex);
+ continue;
+ }
+
+ atomic_dec(data_cnt);
+
+ mpq_demux = mpq_dmx_tspp_info.tsif[tsif].mpq_demux;
+ mpq_demux->hw_notification_size = 0;
+
+ if (allocation_mode != MPQ_DMX_TSPP_CONTIGUOUS_PHYS_ALLOC &&
+ mpq_sdmx_is_loaded())
+ pr_err_once(
+ "%s: TSPP Allocation mode does not support secure demux.\n",
+ __func__);
+
+ if (allocation_mode == MPQ_DMX_TSPP_CONTIGUOUS_PHYS_ALLOC &&
+ mpq_sdmx_is_loaded()) {
+ mpq_dmx_tspp_aggregated_process(tsif, channel_id);
+ } else {
+ /*
+ * Go through all filled descriptors
+ * and perform demuxing on them
+ */
+ do {
+ if (atomic_read(&mpq_dmx_tspp_info.tsif[tsif].
+ control_op)) {
+ /* restore for next iteration */
+ atomic_inc(data_cnt);
+ break;
+ }
+ tspp_data_desc = tspp_get_buffer(0, channel_id);
+ if (!tspp_data_desc)
+ break;
+
+ notif_size = tspp_data_desc->size /
+ TSPP_RAW_TTS_SIZE;
+ mpq_demux->hw_notification_size += notif_size;
+
+ mpq_dmx_tspp_swfilter_desc(mpq_demux,
+ tspp_data_desc);
+ /*
+ * Notify TSPP that the buffer
+ * is no longer needed
+ */
+ tspp_release_buffer(0, channel_id,
+ tspp_data_desc->id);
+ } while (1);
+ }
+
+ if (mpq_demux->hw_notification_size &&
+ (mpq_demux->hw_notification_size <
+ mpq_demux->hw_notification_min_size))
+ mpq_demux->hw_notification_min_size =
+ mpq_demux->hw_notification_size;
+
+ mutex_unlock(&mpq_dmx_tspp_info.tsif[tsif].mutex);
+ } while (1);
+
+ return 0;
+}
+
+/**
+ * Callback function from TSPP when new data is ready.
+ *
+ * @channel_id: Channel with new TS packets
+ * @user: user-data holding TSIF number
+ */
+static void mpq_tspp_callback(int channel_id, void *user)
+{
+ int tsif = (int)(uintptr_t)user;
+ struct mpq_demux *mpq_demux;
+
+ /* Save statistics on TSPP notifications */
+ mpq_demux = mpq_dmx_tspp_info.tsif[tsif].mpq_demux;
+ mpq_dmx_update_hw_statistics(mpq_demux);
+
+ atomic_inc(&mpq_dmx_tspp_info.tsif[tsif].data_cnt);
+ wake_up(&mpq_dmx_tspp_info.tsif[tsif].wait_queue);
+}
+
+/**
+ * Free memory of channel output of specific TSIF.
+ *
+ * @tsif: The TSIF id to which memory should be freed.
+ */
+static void mpq_dmx_channel_mem_free(int tsif)
+{
+ MPQ_DVB_DBG_PRINT("%s(%d)\n", __func__, tsif);
+
+ mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_phys_base = 0;
+
+ if (!IS_ERR_OR_NULL(mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_handle)) {
+ if (!IS_ERR_OR_NULL(mpq_dmx_tspp_info.tsif[tsif].
+ ch_mem_heap_virt_base))
+ ion_unmap_kernel(mpq_dmx_tspp_info.ion_client,
+ mpq_dmx_tspp_info.tsif[tsif].
+ ch_mem_heap_handle);
+
+ ion_free(mpq_dmx_tspp_info.ion_client,
+ mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_handle);
+ }
+
+ mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_virt_base = NULL;
+ mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_handle = NULL;
+}
+
+/**
+ * Allocate memory for channel output of specific TSIF.
+ *
+ * @tsif: The TSIF id to which memory should be allocated.
+ *
+ * Return error status
+ */
+static int mpq_dmx_channel_mem_alloc(int tsif)
+{
+ int result;
+ size_t len;
+
+ MPQ_DVB_DBG_PRINT("%s(%d)\n", __func__, tsif);
+
+ mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_handle =
+ ion_alloc(mpq_dmx_tspp_info.ion_client,
+ (mpq_dmx_tspp_info.tsif[tsif].buffer_count * tspp_desc_size),
+ SZ_4K,
+ ION_HEAP(tspp_out_ion_heap),
+ 0); /* non-cached */
+
+ if (IS_ERR_OR_NULL(mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_handle)) {
+ MPQ_DVB_ERR_PRINT("%s: ion_alloc() failed\n", __func__);
+ mpq_dmx_channel_mem_free(tsif);
+ return -ENOMEM;
+ }
+
+ /* save virtual base address of heap */
+ mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_virt_base =
+ ion_map_kernel(mpq_dmx_tspp_info.ion_client,
+ mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_handle);
+ if (IS_ERR_OR_NULL(mpq_dmx_tspp_info.tsif[tsif].
+ ch_mem_heap_virt_base)) {
+ MPQ_DVB_ERR_PRINT("%s: ion_map_kernel() failed\n", __func__);
+ mpq_dmx_channel_mem_free(tsif);
+ return -ENOMEM;
+ }
+
+ /* save physical base address of heap */
+ result = ion_phys(mpq_dmx_tspp_info.ion_client,
+ mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_handle,
+ &(mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_phys_base), &len);
+ if (result < 0) {
+ MPQ_DVB_ERR_PRINT("%s: ion_phys() failed\n", __func__);
+ mpq_dmx_channel_mem_free(tsif);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * Add a filter to accept all packets as the last entry
+ * of the TSPP HW filter table.
+ *
+ * @channel_id: Channel ID number.
+ * @source: TSPP source.
+ *
+ * Return error status
+ */
+static int mpq_tspp_add_accept_all_filter(int channel_id,
+ enum tspp_source source)
+{
+ struct tspp_filter tspp_filter;
+ int tsif = TSPP_GET_TSIF_NUM(channel_id);
+ int ret;
+
+ MPQ_DVB_DBG_PRINT("%s: executed, channel id = %d, source = %d\n",
+ __func__, channel_id, source);
+
+ if (mpq_dmx_tspp_info.tsif[tsif].accept_all_filter_exists_flag) {
+ MPQ_DVB_DBG_PRINT("%s: accept all filter already exists\n",
+ __func__);
+ return 0;
+ }
+
+ /* This filter will be the last entry in the table */
+ tspp_filter.priority = TSPP_LAST_HW_FILTER_INDEX;
+ /* Pass all pids - set mask to 0 */
+ tspp_filter.pid = 0;
+ tspp_filter.mask = 0;
+ /*
+ * Include TTS in RAW packets, if you change this to
+ * TSPP_MODE_RAW_NO_SUFFIX you must also change TSPP_RAW_TTS_SIZE
+ * accordingly.
+ */
+ tspp_filter.mode = TSPP_MODE_RAW;
+ tspp_filter.source = source;
+ tspp_filter.decrypt = 0;
+
+ ret = tspp_add_filter(0, channel_id, &tspp_filter);
+ if (!ret) {
+ mpq_dmx_tspp_info.tsif[tsif].accept_all_filter_exists_flag = 1;
+ MPQ_DVB_DBG_PRINT(
+ "%s: accept all filter added successfully\n",
+ __func__);
+ }
+
+ return ret;
+}
+
+/**
+ * Remove the filter that accepts all packets from the last entry
+ * of the TSPP HW filter table.
+ *
+ * @channel_id: Channel ID number.
+ * @source: TSPP source.
+ *
+ * Return error status
+ */
+static int mpq_tspp_remove_accept_all_filter(int channel_id,
+ enum tspp_source source)
+{
+ struct tspp_filter tspp_filter;
+ int tsif = TSPP_GET_TSIF_NUM(channel_id);
+ int ret;
+
+ MPQ_DVB_DBG_PRINT("%s: executed, channel id = %d, source = %d\n",
+ __func__, channel_id, source);
+
+ if (mpq_dmx_tspp_info.tsif[tsif].accept_all_filter_exists_flag == 0) {
+ MPQ_DVB_DBG_PRINT("%s: accept all filter doesn't exist\n",
+ __func__);
+ return 0;
+ }
+
+ tspp_filter.priority = TSPP_LAST_HW_FILTER_INDEX;
+
+ ret = tspp_remove_filter(0, channel_id, &tspp_filter);
+ if (!ret) {
+ mpq_dmx_tspp_info.tsif[tsif].accept_all_filter_exists_flag = 0;
+ MPQ_DVB_DBG_PRINT(
+ "%s: accept all filter removed successfully\n",
+ __func__);
+ }
+
+ return ret;
+}
+
+/**
+ * Add filters designed to accept all packets except NULL packets, i.e.
+ * packets with PID = 0x1FFF.
+ * This function is called after user-defined filters were removed,
+ * so it assumes that the first 13 HW filters in the TSPP filter
+ * table are free for use.
+ *
+ * @channel_id: Channel ID number.
+ * @source: TSPP source.
+ *
+ * Return 0 on success, -1 otherwise
+ */
+static int mpq_tspp_add_null_blocking_filters(int channel_id,
+ enum tspp_source source)
+{
+ struct tspp_filter tspp_filter;
+ int ret = 0;
+ int i, j;
+ u16 full_pid_mask = 0x1FFF;
+ u8 mask_shift;
+ u8 pid_shift;
+ int tsif = TSPP_GET_TSIF_NUM(channel_id);
+
+ MPQ_DVB_DBG_PRINT("%s: executed, channel id = %d, source = %d\n",
+ __func__, channel_id, source);
+
+ /*
+ * Add a total of 13 filters that will accept packets with
+ * every PID other than 0x1FFF, which is the NULL PID.
+ *
+ * Filter 0: accept all PIDs with bit 12 clear, i.e.
+ * PID = 0x0000 .. 0x0FFF (4096 PIDs in total):
+ * Mask = 0x1000, PID = 0x0000.
+ *
+ * Filter 12: Accept PID 0x1FFE:
+ * Mask = 0x1FFF, PID = 0x1FFE.
+ *
+ * In general: For N = 0 .. 12,
+ * Filter <N>: accept all PIDs with <N> MSBits set and bit <N-1> clear.
+ * Filter <N> Mask = N+1 MSBits set, others clear.
+ * Filter <N> PID = <N> MSBits set, others clear.
+ */
+
+ /*
+ * Include TTS in RAW packets, if you change this to
+ * TSPP_MODE_RAW_NO_SUFFIX you must also change TSPP_RAW_TTS_SIZE
+ * accordingly.
+ */
+ tspp_filter.mode = TSPP_MODE_RAW;
+ tspp_filter.source = source;
+ tspp_filter.decrypt = 0;
+
+ for (i = 0; i < TSPP_BLOCK_NULLS_FILTERS_NUM; i++) {
+ tspp_filter.priority = mpq_tspp_allocate_hw_filter_index(tsif);
+ if (tspp_filter.priority != i) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: got unexpected HW index %d, expected %d\n",
+ __func__, tspp_filter.priority, i);
+ ret = -1;
+ break;
+ }
+ mask_shift = (TSPP_BLOCK_NULLS_FILTERS_NUM - 1 - i);
+ pid_shift = (TSPP_BLOCK_NULLS_FILTERS_NUM - i);
+ tspp_filter.mask =
+ ((full_pid_mask >> mask_shift) << mask_shift);
+ tspp_filter.pid = ((full_pid_mask >> pid_shift) << pid_shift);
+
+ if (tspp_add_filter(0, channel_id, &tspp_filter)) {
+ ret = -1;
+ break;
+ }
+ }
+
+ if (ret) {
+ /* cleanup on failure */
+ for (j = 0; j < i; j++) {
+ tspp_filter.priority = j;
+ mpq_tspp_release_hw_filter_index(tsif, j);
+ tspp_remove_filter(0, channel_id, &tspp_filter);
+ }
+ } else {
+ MPQ_DVB_DBG_PRINT(
+ "%s: NULL blocking filters added successfully\n",
+ __func__);
+ }
+
+ return ret;
+}
+
+/**
+ * Remove filters designed to accept all packets except NULL packets, i.e.
+ * packets with PID = 0x1FFF.
+ *
+ * @channel_id: Channel ID number.
+ *
+ * @source: TSPP source.
+ *
+ * Return 0 on success, -1 otherwise
+ */
+static int mpq_tspp_remove_null_blocking_filters(int channel_id,
+ enum tspp_source source)
+{
+ struct tspp_filter tspp_filter;
+ int tsif = TSPP_GET_TSIF_NUM(channel_id);
+ int ret = 0;
+ int i;
+
+ MPQ_DVB_DBG_PRINT("%s: executed, channel id = %d, source = %d\n",
+ __func__, channel_id, source);
+
+ for (i = 0; i < TSPP_BLOCK_NULLS_FILTERS_NUM; i++) {
+ tspp_filter.priority = i;
+ if (tspp_remove_filter(0, channel_id, &tspp_filter)) {
+ MPQ_DVB_ERR_PRINT("%s: failed to remove filter %d\n",
+ __func__, i);
+ ret = -1;
+ }
+
+ mpq_tspp_release_hw_filter_index(tsif, i);
+ }
+
+ return ret;
+}
+
+/**
+ * Add all current user-defined filters (up to 15) as HW filters
+ *
+ * @channel_id: Channel ID number.
+ *
+ * @source: TSPP source.
+ *
+ * Return 0 on success, -1 otherwise
+ */
+static int mpq_tspp_add_all_user_filters(int channel_id,
+ enum tspp_source source)
+{
+ struct tspp_filter tspp_filter;
+ int tsif = TSPP_GET_TSIF_NUM(channel_id);
+ int slot;
+ u16 added_count = 0;
+ u16 total_filters_count = 0;
+
+ MPQ_DVB_DBG_PRINT("%s: executed\n", __func__);
+
+ /*
+ * Include TTS in RAW packets, if you change this to
+ * TSPP_MODE_RAW_NO_SUFFIX you must also change TSPP_RAW_TTS_SIZE
+ * accordingly.
+ */
+ tspp_filter.mode = TSPP_MODE_RAW;
+ tspp_filter.source = source;
+ tspp_filter.decrypt = 0;
+
+ for (slot = 0; slot < TSPP_MAX_PID_FILTER_NUM; slot++) {
+ if (mpq_dmx_tspp_info.tsif[tsif].filters[slot].pid == -1)
+ continue;
+
+ /*
+ * count total number of user filters to verify that it is
+ * exactly TSPP_MAX_HW_PID_FILTER_NUM as expected.
+ */
+ total_filters_count++;
+
+ if (added_count > TSPP_MAX_HW_PID_FILTER_NUM)
+ continue;
+
+ tspp_filter.priority = mpq_tspp_allocate_hw_filter_index(tsif);
+
+ if (mpq_dmx_tspp_info.tsif[tsif].filters[slot].pid ==
+ TSPP_PASS_THROUGH_PID) {
+ /* pass all pids */
+ tspp_filter.pid = 0;
+ tspp_filter.mask = 0;
+ } else {
+ tspp_filter.pid =
+ mpq_dmx_tspp_info.tsif[tsif].filters[slot].pid;
+ tspp_filter.mask = TSPP_PID_MASK;
+ }
+
+ MPQ_DVB_DBG_PRINT(
+ "%s: adding HW filter, PID = %d, mask = 0x%X, index = %d\n",
+ __func__, tspp_filter.pid, tspp_filter.mask,
+ tspp_filter.priority);
+
+ if (!tspp_add_filter(0, channel_id, &tspp_filter)) {
+ mpq_dmx_tspp_info.tsif[tsif].filters[slot].hw_index =
+ tspp_filter.priority;
+ added_count++;
+ } else {
+ MPQ_DVB_ERR_PRINT("%s: tspp_add_filter failed\n",
+ __func__);
+ }
+ }
+
+ if ((added_count != TSPP_MAX_HW_PID_FILTER_NUM) ||
+ (added_count != total_filters_count))
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * Remove all user-defined HW filters
+ *
+ * @channel_id: Channel ID number.
+ *
+ * @source: TSPP source.
+ *
+ * Return 0 on success, -1 otherwise
+ */
+static int mpq_tspp_remove_all_user_filters(int channel_id,
+ enum tspp_source source)
+{
+ struct tspp_filter tspp_filter;
+ int ret = 0;
+ int tsif = TSPP_GET_TSIF_NUM(channel_id);
+ int i;
+
+ MPQ_DVB_DBG_PRINT("%s: executed\n", __func__);
+
+ for (i = 0; i < TSPP_MAX_HW_PID_FILTER_NUM; i++) {
+ tspp_filter.priority = i;
+ MPQ_DVB_DBG_PRINT("%s: Removing HW filter %d\n",
+ __func__, tspp_filter.priority);
+ if (tspp_remove_filter(0, channel_id, &tspp_filter))
+ ret = -1;
+
+ mpq_tspp_release_hw_filter_index(tsif, i);
+ mpq_dmx_tspp_info.tsif[tsif].filters[i].hw_index = -1;
+ }
+
+ return ret;
+}
+
+/**
+ * Configure TSPP channel to filter the PID of new feed.
+ *
+ * @feed: The feed to configure the channel with
+ *
+ * Return error status
+ *
+ * The function checks if the new PID can be added to an already
+ * allocated channel, if not, a new channel is allocated and configured.
+ */
+static int mpq_tspp_dmx_add_channel(struct dvb_demux_feed *feed)
+{
+ struct mpq_demux *mpq_demux = feed->demux->priv;
+ struct tspp_select_source tspp_source;
+ struct tspp_filter tspp_filter;
+ int tsif;
+ int tsif_mode = mpq_dmx_get_param_tsif_mode();
+ int ret = 0;
+ int slot;
+ int channel_id;
+ int *channel_ref_count;
+ u32 buffer_size;
+ int restore_user_filters = 0;
+ int remove_accept_all_filter = 0;
+ int remove_null_blocking_filters = 0;
+ size_t agg_size;
+
+ tspp_source.clk_inverse = mpq_dmx_get_param_clock_inv();
+ tspp_source.data_inverse = 0;
+ tspp_source.sync_inverse = 0;
+ tspp_source.enable_inverse = 0;
+
+ MPQ_DVB_DBG_PRINT("%s: executed, PID = %d\n", __func__, feed->pid);
+
+ switch (tsif_mode) {
+ case 1:
+ tspp_source.mode = TSPP_TSIF_MODE_1;
+ break;
+ case 2:
+ tspp_source.mode = TSPP_TSIF_MODE_2;
+ break;
+ default:
+ tspp_source.mode = TSPP_TSIF_MODE_LOOPBACK;
+ break;
+ }
+
+ /* determine the TSIF we are reading from */
+ if (mpq_demux->source == DMX_SOURCE_FRONT0) {
+ tsif = 0;
+ tspp_source.source = TSPP_SOURCE_TSIF0;
+ } else if (mpq_demux->source == DMX_SOURCE_FRONT1) {
+ tsif = 1;
+ tspp_source.source = TSPP_SOURCE_TSIF1;
+ } else {
+ /* invalid source */
+ MPQ_DVB_ERR_PRINT(
+ "%s: invalid input source (%d)\n",
+ __func__,
+ mpq_demux->source);
+
+ return -EINVAL;
+ }
+
+ atomic_inc(&mpq_dmx_tspp_info.tsif[tsif].control_op);
+ if (mutex_lock_interruptible(&mpq_dmx_tspp_info.tsif[tsif].mutex)) {
+ atomic_dec(&mpq_dmx_tspp_info.tsif[tsif].control_op);
+ return -ERESTARTSYS;
+ }
+
+ /*
+ * It is possible that this PID was already requested before.
+ * Can happen if we play and record same PES or PCR
+ * piggypacked on video packet.
+ */
+ slot = mpq_tspp_get_filter_slot(tsif, feed->pid);
+ if (slot >= 0) {
+ /* PID already configured */
+ mpq_dmx_tspp_info.tsif[tsif].filters[slot].ref_count++;
+ goto out;
+ }
+
+
+ channel_id = TSPP_CHANNEL_ID(tsif, TSPP_CHANNEL);
+ channel_ref_count = &mpq_dmx_tspp_info.tsif[tsif].channel_ref;
+
+ /*
+ * Recalculate 'tspp_notification_size' and buffer count in case
+ * 'tspp_desc_size' or 'tspp_out_buffer_size' parameters have changed.
+ */
+ buffer_size = tspp_desc_size;
+ tspp_notification_size = TSPP_NOTIFICATION_SIZE(tspp_desc_size);
+ mpq_dmx_tspp_info.tsif[tsif].buffer_count =
+ TSPP_BUFFER_COUNT(tspp_out_buffer_size);
+ if (mpq_dmx_tspp_info.tsif[tsif].buffer_count >
+ MAX_BAM_DESCRIPTOR_COUNT)
+ mpq_dmx_tspp_info.tsif[tsif].buffer_count =
+ MAX_BAM_DESCRIPTOR_COUNT;
+
+ /* check if required TSPP pipe is already allocated or not */
+ if (*channel_ref_count == 0) {
+ if (allocation_mode == MPQ_DMX_TSPP_CONTIGUOUS_PHYS_ALLOC) {
+ agg_size = mpq_dmx_tspp_info.tsif[tsif].buffer_count *
+ sizeof(int);
+ mpq_dmx_tspp_info.tsif[tsif].aggregate_ids =
+ vzalloc(agg_size);
+ if (!mpq_dmx_tspp_info.tsif[tsif].aggregate_ids) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: Failed to allocate memory for buffer descriptors aggregation\n",
+ __func__);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = mpq_dmx_channel_mem_alloc(tsif);
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_dmx_channel_mem_alloc(%d) failed (%d)\n",
+ __func__,
+ channel_id,
+ ret);
+
+ goto add_channel_failed;
+ }
+ }
+
+ ret = tspp_open_channel(0, channel_id);
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: tspp_open_channel(%d) failed (%d)\n",
+ __func__,
+ channel_id,
+ ret);
+
+ goto add_channel_failed;
+ }
+
+ /* set TSPP source */
+ ret = tspp_open_stream(0, channel_id, &tspp_source);
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: tspp_select_source(%d,%d) failed (%d)\n",
+ __func__,
+ channel_id,
+ tspp_source.source,
+ ret);
+
+ goto add_channel_close_ch;
+ }
+
+ /* register notification on TS packets */
+ tspp_register_notification(0,
+ channel_id,
+ mpq_tspp_callback,
+ (void *)(uintptr_t)tsif,
+ tspp_channel_timeout);
+
+ /*
+ * Register allocator and provide allocation function
+ * that allocates from contiguous memory so that we can have
+ * big notification size, smallest descriptor, and still provide
+ * TZ with single big buffer based on notification size.
+ */
+ if (allocation_mode == MPQ_DMX_TSPP_CONTIGUOUS_PHYS_ALLOC) {
+ ret = tspp_allocate_buffers(0, channel_id,
+ mpq_dmx_tspp_info.tsif[tsif].buffer_count,
+ buffer_size, tspp_notification_size,
+ tspp_mem_allocator, tspp_mem_free, NULL);
+ } else {
+ ret = tspp_allocate_buffers(0, channel_id,
+ mpq_dmx_tspp_info.tsif[tsif].buffer_count,
+ buffer_size, tspp_notification_size,
+ NULL, NULL, NULL);
+ }
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: tspp_allocate_buffers(%d) failed (%d)\n",
+ __func__,
+ channel_id,
+ ret);
+
+ goto add_channel_unregister_notif;
+ }
+
+ mpq_dmx_tspp_info.tsif[tsif].mpq_demux = mpq_demux;
+ }
+
+ /* add new PID to the existing pipe */
+ slot = mpq_tspp_get_free_filter_slot(tsif);
+ if (slot < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_tspp_get_free_filter_slot(%d) failed\n",
+ __func__, tsif);
+
+ goto add_channel_unregister_notif;
+ }
+
+ if (feed->pid == TSPP_PASS_THROUGH_PID)
+ mpq_dmx_tspp_info.tsif[tsif].pass_all_flag = 1;
+ else if (feed->pid == TSPP_NULL_PACKETS_PID)
+ mpq_dmx_tspp_info.tsif[tsif].pass_nulls_flag = 1;
+
+ mpq_dmx_tspp_info.tsif[tsif].filters[slot].pid = feed->pid;
+ mpq_dmx_tspp_info.tsif[tsif].filters[slot].ref_count++;
+
+ tspp_filter.priority = -1;
+
+ if (mpq_dmx_tspp_info.tsif[tsif].current_filter_count <
+ TSPP_MAX_HW_PID_FILTER_NUM) {
+ /* HW filtering mode */
+ tspp_filter.priority = mpq_tspp_allocate_hw_filter_index(tsif);
+ if (tspp_filter.priority < 0)
+ goto add_channel_free_filter_slot;
+
+ if (feed->pid == TSPP_PASS_THROUGH_PID) {
+ /* pass all pids */
+ tspp_filter.pid = 0;
+ tspp_filter.mask = 0;
+ } else {
+ tspp_filter.pid = feed->pid;
+ tspp_filter.mask = TSPP_PID_MASK;
+ }
+
+ /*
+ * Include TTS in RAW packets, if you change this to
+ * TSPP_MODE_RAW_NO_SUFFIX you must also change
+ * TSPP_RAW_TTS_SIZE accordingly.
+ */
+ tspp_filter.mode = TSPP_MODE_RAW;
+ tspp_filter.source = tspp_source.source;
+ tspp_filter.decrypt = 0;
+ ret = tspp_add_filter(0, channel_id, &tspp_filter);
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: tspp_add_filter(%d) failed (%d)\n",
+ __func__,
+ channel_id,
+ ret);
+
+ goto add_channel_free_filter_slot;
+ }
+ mpq_dmx_tspp_info.tsif[tsif].filters[slot].hw_index =
+ tspp_filter.priority;
+
+ MPQ_DVB_DBG_PRINT(
+ "%s: HW filtering mode: added TSPP HW filter, PID = %d, mask = 0x%X, index = %d\n",
+ __func__, tspp_filter.pid, tspp_filter.mask,
+ tspp_filter.priority);
+ } else if (mpq_dmx_tspp_info.tsif[tsif].current_filter_count ==
+ TSPP_MAX_HW_PID_FILTER_NUM) {
+ /* Crossing the threshold - from HW to SW filtering mode */
+
+ /* Add a temporary filter to accept all packets */
+ ret = mpq_tspp_add_accept_all_filter(channel_id,
+ tspp_source.source);
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_tspp_add_accept_all_filter(%d, %d) failed\n",
+ __func__, channel_id, tspp_source.source);
+
+ goto add_channel_free_filter_slot;
+ }
+
+ /* Remove all existing user filters */
+ ret = mpq_tspp_remove_all_user_filters(channel_id,
+ tspp_source.source);
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_tspp_remove_all_user_filters(%d, %d) failed\n",
+ __func__, channel_id, tspp_source.source);
+
+ restore_user_filters = 1;
+ remove_accept_all_filter = 1;
+
+ goto add_channel_free_filter_slot;
+ }
+
+ /* Add HW filters to block NULL packets */
+ ret = mpq_tspp_add_null_blocking_filters(channel_id,
+ tspp_source.source);
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_tspp_add_null_blocking_filters(%d, %d) failed\n",
+ __func__, channel_id, tspp_source.source);
+
+ restore_user_filters = 1;
+ remove_accept_all_filter = 1;
+
+ goto add_channel_free_filter_slot;
+ }
+
+ /* Remove filters that accepts all packets, if necessary */
+ if ((mpq_dmx_tspp_info.tsif[tsif].pass_all_flag == 0) &&
+ (mpq_dmx_tspp_info.tsif[tsif].pass_nulls_flag == 0)) {
+
+ ret = mpq_tspp_remove_accept_all_filter(channel_id,
+ tspp_source.source);
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_tspp_remove_accept_all_filter(%d, %d) failed\n",
+ __func__, channel_id,
+ tspp_source.source);
+
+ remove_null_blocking_filters = 1;
+ restore_user_filters = 1;
+ remove_accept_all_filter = 1;
+
+ goto add_channel_free_filter_slot;
+ }
+ }
+ } else {
+ /* Already working in SW filtering mode */
+ if (mpq_dmx_tspp_info.tsif[tsif].pass_all_flag ||
+ mpq_dmx_tspp_info.tsif[tsif].pass_nulls_flag) {
+
+ ret = mpq_tspp_add_accept_all_filter(channel_id,
+ tspp_source.source);
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_tspp_add_accept_all_filter(%d, %d) failed\n",
+ __func__, channel_id,
+ tspp_source.source);
+
+ goto add_channel_free_filter_slot;
+ }
+ }
+ }
+
+ (*channel_ref_count)++;
+ mpq_dmx_tspp_info.tsif[tsif].current_filter_count++;
+
+ MPQ_DVB_DBG_PRINT("%s: success, current_filter_count = %d\n",
+ __func__, mpq_dmx_tspp_info.tsif[tsif].current_filter_count);
+
+ goto out;
+
+add_channel_free_filter_slot:
+ /* restore internal database state */
+ mpq_dmx_tspp_info.tsif[tsif].filters[slot].pid = -1;
+ mpq_dmx_tspp_info.tsif[tsif].filters[slot].ref_count--;
+
+ /* release HW index if we allocated one */
+ if (tspp_filter.priority >= 0) {
+ mpq_dmx_tspp_info.tsif[tsif].filters[slot].hw_index = -1;
+ mpq_tspp_release_hw_filter_index(tsif, tspp_filter.priority);
+ }
+
+ /* restore HW filter table state if necessary */
+ if (remove_null_blocking_filters)
+ mpq_tspp_remove_null_blocking_filters(channel_id,
+ tspp_source.source);
+
+ if (restore_user_filters)
+ mpq_tspp_add_all_user_filters(channel_id, tspp_source.source);
+
+ if (remove_accept_all_filter)
+ mpq_tspp_remove_accept_all_filter(channel_id,
+ tspp_source.source);
+
+ /* restore flags. we can only get here if we changed the flags. */
+ if (feed->pid == TSPP_PASS_THROUGH_PID)
+ mpq_dmx_tspp_info.tsif[tsif].pass_all_flag = 0;
+ else if (feed->pid == TSPP_NULL_PACKETS_PID)
+ mpq_dmx_tspp_info.tsif[tsif].pass_nulls_flag = 0;
+
+add_channel_unregister_notif:
+ if (*channel_ref_count == 0) {
+ tspp_unregister_notification(0, channel_id);
+ tspp_close_stream(0, channel_id);
+ }
+add_channel_close_ch:
+ if (*channel_ref_count == 0)
+ tspp_close_channel(0, channel_id);
+add_channel_failed:
+ if (*channel_ref_count == 0)
+ if (allocation_mode == MPQ_DMX_TSPP_CONTIGUOUS_PHYS_ALLOC) {
+ vfree(mpq_dmx_tspp_info.tsif[tsif].aggregate_ids);
+ mpq_dmx_tspp_info.tsif[tsif].aggregate_ids = NULL;
+ mpq_dmx_channel_mem_free(tsif);
+ }
+
+out:
+ mutex_unlock(&mpq_dmx_tspp_info.tsif[tsif].mutex);
+ atomic_dec(&mpq_dmx_tspp_info.tsif[tsif].control_op);
+ return ret;
+}
+
+/**
+ * Removes filter from TSPP.
+ *
+ * @feed: The feed to remove
+ *
+ * Return error status
+ *
+ * The function checks if this is the only PID allocated within
+ * the channel, if so, the channel is closed as well.
+ */
+static int mpq_tspp_dmx_remove_channel(struct dvb_demux_feed *feed)
+{
+ int tsif;
+ int ret = 0;
+ int channel_id;
+ int slot;
+ atomic_t *data_cnt;
+ int *channel_ref_count;
+ enum tspp_source tspp_source;
+ struct tspp_filter tspp_filter;
+ struct mpq_demux *mpq_demux = feed->demux->priv;
+ int restore_null_blocking_filters = 0;
+ int remove_accept_all_filter = 0;
+ int remove_user_filters = 0;
+ int accept_all_filter_existed = 0;
+
+ MPQ_DVB_DBG_PRINT("%s: executed, PID = %d\n", __func__, feed->pid);
+
+ /* determine the TSIF we are reading from */
+ if (mpq_demux->source == DMX_SOURCE_FRONT0) {
+ tsif = 0;
+ tspp_source = TSPP_SOURCE_TSIF0;
+ } else if (mpq_demux->source == DMX_SOURCE_FRONT1) {
+ tsif = 1;
+ tspp_source = TSPP_SOURCE_TSIF1;
+ } else {
+ /* invalid source */
+ MPQ_DVB_ERR_PRINT(
+ "%s: invalid input source (%d)\n",
+ __func__,
+ mpq_demux->source);
+
+ return -EINVAL;
+ }
+
+ atomic_inc(&mpq_dmx_tspp_info.tsif[tsif].control_op);
+ if (mutex_lock_interruptible(&mpq_dmx_tspp_info.tsif[tsif].mutex)) {
+ atomic_dec(&mpq_dmx_tspp_info.tsif[tsif].control_op);
+ return -ERESTARTSYS;
+ }
+
+ channel_id = TSPP_CHANNEL_ID(tsif, TSPP_CHANNEL);
+ channel_ref_count = &mpq_dmx_tspp_info.tsif[tsif].channel_ref;
+ data_cnt = &mpq_dmx_tspp_info.tsif[tsif].data_cnt;
+
+ /* check if required TSPP pipe is already allocated or not */
+ if (*channel_ref_count == 0) {
+ /* invalid feed provided as the channel is not allocated */
+ MPQ_DVB_ERR_PRINT(
+ "%s: invalid feed (%d)\n",
+ __func__,
+ channel_id);
+
+ ret = -EINVAL;
+ goto out;
+ }
+
+ slot = mpq_tspp_get_filter_slot(tsif, feed->pid);
+
+ if (slot < 0) {
+ /* invalid feed provided as it has no filter allocated */
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_tspp_get_filter_slot failed (%d,%d)\n",
+ __func__,
+ feed->pid,
+ tsif);
+
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* since filter was found, ref_count > 0 so it's ok to decrement it */
+ mpq_dmx_tspp_info.tsif[tsif].filters[slot].ref_count--;
+
+ if (mpq_dmx_tspp_info.tsif[tsif].filters[slot].ref_count) {
+ /*
+ * there are still references to this pid, do not
+ * remove the filter yet
+ */
+ goto out;
+ }
+
+ if (feed->pid == TSPP_PASS_THROUGH_PID)
+ mpq_dmx_tspp_info.tsif[tsif].pass_all_flag = 0;
+ else if (feed->pid == TSPP_NULL_PACKETS_PID)
+ mpq_dmx_tspp_info.tsif[tsif].pass_nulls_flag = 0;
+
+ mpq_dmx_tspp_info.tsif[tsif].filters[slot].pid = -1;
+
+ if (mpq_dmx_tspp_info.tsif[tsif].current_filter_count <=
+ TSPP_MAX_HW_PID_FILTER_NUM) {
+ /* staying in HW filtering mode */
+ tspp_filter.priority =
+ mpq_dmx_tspp_info.tsif[tsif].filters[slot].hw_index;
+ ret = tspp_remove_filter(0, channel_id, &tspp_filter);
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: tspp_remove_filter failed (%d,%d)\n",
+ __func__,
+ channel_id,
+ tspp_filter.priority);
+
+ goto remove_channel_failed_restore_count;
+ }
+ mpq_tspp_release_hw_filter_index(tsif, tspp_filter.priority);
+ mpq_dmx_tspp_info.tsif[tsif].filters[slot].hw_index = -1;
+
+ MPQ_DVB_DBG_PRINT(
+ "%s: HW filtering mode: Removed TSPP HW filter, PID = %d, index = %d\n",
+ __func__, feed->pid, tspp_filter.priority);
+ } else if (mpq_dmx_tspp_info.tsif[tsif].current_filter_count ==
+ (TSPP_MAX_HW_PID_FILTER_NUM + 1)) {
+ /* Crossing the threshold - from SW to HW filtering mode */
+
+ accept_all_filter_existed =
+ mpq_dmx_tspp_info.tsif[tsif].
+ accept_all_filter_exists_flag;
+
+ /* Add a temporary filter to accept all packets */
+ ret = mpq_tspp_add_accept_all_filter(channel_id,
+ tspp_source);
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_tspp_add_accept_all_filter(%d, %d) failed\n",
+ __func__, channel_id, tspp_source);
+
+ goto remove_channel_failed_restore_count;
+ }
+
+ ret = mpq_tspp_remove_null_blocking_filters(channel_id,
+ tspp_source);
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_tspp_remove_null_blocking_filters(%d, %d) failed\n",
+ __func__, channel_id, tspp_source);
+
+ restore_null_blocking_filters = 1;
+ if (!accept_all_filter_existed)
+ remove_accept_all_filter = 1;
+
+ goto remove_channel_failed_restore_count;
+ }
+
+ ret = mpq_tspp_add_all_user_filters(channel_id,
+ tspp_source);
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_tspp_add_all_user_filters(%d, %d) failed\n",
+ __func__, channel_id, tspp_source);
+
+ remove_user_filters = 1;
+ restore_null_blocking_filters = 1;
+ if (!accept_all_filter_existed)
+ remove_accept_all_filter = 1;
+
+ goto remove_channel_failed_restore_count;
+ }
+
+ ret = mpq_tspp_remove_accept_all_filter(channel_id,
+ tspp_source);
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_tspp_remove_accept_all_filter(%d, %d) failed\n",
+ __func__, channel_id, tspp_source);
+
+ remove_user_filters = 1;
+ restore_null_blocking_filters = 1;
+ if (!accept_all_filter_existed)
+ remove_accept_all_filter = 1;
+
+ goto remove_channel_failed_restore_count;
+ }
+ } else {
+ /* staying in SW filtering mode */
+ if ((mpq_dmx_tspp_info.tsif[tsif].pass_all_flag == 0) &&
+ (mpq_dmx_tspp_info.tsif[tsif].pass_nulls_flag == 0)) {
+
+ ret = mpq_tspp_remove_accept_all_filter(channel_id,
+ tspp_source);
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_tspp_remove_accept_all_filter(%d, %d) failed\n",
+ __func__, channel_id,
+ tspp_source);
+
+ goto remove_channel_failed_restore_count;
+ }
+ }
+ }
+
+ mpq_dmx_tspp_info.tsif[tsif].current_filter_count--;
+ (*channel_ref_count)--;
+
+ MPQ_DVB_DBG_PRINT("%s: success, current_filter_count = %d\n",
+ __func__, mpq_dmx_tspp_info.tsif[tsif].current_filter_count);
+
+ if (*channel_ref_count == 0) {
+ /* channel is not used any more, release it */
+ tspp_unregister_notification(0, channel_id);
+ tspp_close_stream(0, channel_id);
+ tspp_close_channel(0, channel_id);
+ atomic_set(data_cnt, 0);
+
+ if (allocation_mode == MPQ_DMX_TSPP_CONTIGUOUS_PHYS_ALLOC) {
+ vfree(mpq_dmx_tspp_info.tsif[tsif].aggregate_ids);
+ mpq_dmx_tspp_info.tsif[tsif].aggregate_ids = NULL;
+ mpq_dmx_channel_mem_free(tsif);
+ }
+ }
+
+ goto out;
+
+remove_channel_failed_restore_count:
+ /* restore internal database state */
+ mpq_dmx_tspp_info.tsif[tsif].filters[slot].pid = feed->pid;
+ mpq_dmx_tspp_info.tsif[tsif].filters[slot].ref_count++;
+
+ if (remove_user_filters)
+ mpq_tspp_remove_all_user_filters(channel_id, tspp_source);
+
+ if (restore_null_blocking_filters)
+ mpq_tspp_add_null_blocking_filters(channel_id, tspp_source);
+
+ if (remove_accept_all_filter)
+ mpq_tspp_remove_accept_all_filter(channel_id, tspp_source);
+
+ /* restore flags. we can only get here if we changed the flags. */
+ if (feed->pid == TSPP_PASS_THROUGH_PID)
+ mpq_dmx_tspp_info.tsif[tsif].pass_all_flag = 1;
+ else if (feed->pid == TSPP_NULL_PACKETS_PID)
+ mpq_dmx_tspp_info.tsif[tsif].pass_nulls_flag = 1;
+
+out:
+ mutex_unlock(&mpq_dmx_tspp_info.tsif[tsif].mutex);
+ atomic_dec(&mpq_dmx_tspp_info.tsif[tsif].control_op);
+ return ret;
+}
+
+static int mpq_tspp_dmx_start_filtering(struct dvb_demux_feed *feed)
+{
+ int ret;
+ struct mpq_demux *mpq_demux = feed->demux->priv;
+
+ MPQ_DVB_DBG_PRINT(
+ "%s(pid=%d) executed\n",
+ __func__,
+ feed->pid);
+
+ if (mpq_demux == NULL) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: invalid mpq_demux handle\n",
+ __func__);
+
+ return -EINVAL;
+ }
+
+ if (mpq_demux->source < DMX_SOURCE_DVR0) {
+ /* source from TSPP, need to configure tspp pipe */
+ ret = mpq_tspp_dmx_add_channel(feed);
+
+ if (ret < 0) {
+ MPQ_DVB_DBG_PRINT(
+ "%s: mpq_tspp_dmx_add_channel failed(%d)\n",
+ __func__,
+ ret);
+ return ret;
+ }
+ }
+
+ /*
+ * Always feed sections/PES starting from a new one and
+ * do not partial transfer data from older one
+ */
+ feed->pusi_seen = 0;
+
+ ret = mpq_dmx_init_mpq_feed(feed);
+ if (ret) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_dmx_init_mpq_feed failed(%d)\n",
+ __func__,
+ ret);
+ if (mpq_demux->source < DMX_SOURCE_DVR0)
+ mpq_tspp_dmx_remove_channel(feed);
+
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mpq_tspp_dmx_stop_filtering(struct dvb_demux_feed *feed)
+{
+ int ret = 0;
+ struct mpq_demux *mpq_demux = feed->demux->priv;
+
+ MPQ_DVB_DBG_PRINT("%s(%d) executed\n", __func__, feed->pid);
+
+ mpq_dmx_terminate_feed(feed);
+
+ if (mpq_demux->source < DMX_SOURCE_DVR0) {
+ /* source from TSPP, need to configure tspp pipe */
+ ret = mpq_tspp_dmx_remove_channel(feed);
+ }
+
+ return ret;
+}
+
+static int mpq_tspp_dmx_write_to_decoder(
+ struct dvb_demux_feed *feed,
+ const u8 *buf,
+ size_t len)
+{
+ /*
+ * It is assumed that this function is called once for each
+ * TS packet of the relevant feed.
+ */
+ if (len > TSPP_RAW_TTS_SIZE)
+ MPQ_DVB_DBG_PRINT(
+ "%s: warnning - len larger than one packet\n",
+ __func__);
+
+ if (dvb_dmx_is_video_feed(feed))
+ return mpq_dmx_process_video_packet(feed, buf);
+
+ if (dvb_dmx_is_pcr_feed(feed))
+ return mpq_dmx_process_pcr_packet(feed, buf);
+
+ return 0;
+}
+
+/**
+ * Returns demux capabilities of TSPPv1 plugin
+ *
+ * @demux: demux device
+ * @caps: Returned capbabilities
+ *
+ * Return error code
+ */
+static int mpq_tspp_dmx_get_caps(struct dmx_demux *demux,
+ struct dmx_caps *caps)
+{
+ struct dvb_demux *dvb_demux = demux->priv;
+
+ if ((dvb_demux == NULL) || (caps == NULL)) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: invalid parameters\n",
+ __func__);
+
+ return -EINVAL;
+ }
+
+ caps->caps = DMX_CAP_PULL_MODE | DMX_CAP_VIDEO_DECODER_DATA |
+ DMX_CAP_TS_INSERTION | DMX_CAP_VIDEO_INDEXING |
+ DMX_CAP_AUTO_BUFFER_FLUSH;
+ caps->recording_max_video_pids_indexed = 0;
+ caps->num_decoders = MPQ_ADAPTER_MAX_NUM_OF_INTERFACES;
+ caps->num_demux_devices = CONFIG_DVB_MPQ_NUM_DMX_DEVICES;
+ caps->num_pid_filters = TSPP_MAX_PID_FILTER_NUM;
+ caps->num_section_filters = dvb_demux->filternum;
+ caps->num_section_filters_per_pid = dvb_demux->filternum;
+ caps->section_filter_length = DMX_FILTER_SIZE;
+ caps->num_demod_inputs = TSIF_COUNT;
+ caps->num_memory_inputs = CONFIG_DVB_MPQ_NUM_DMX_DEVICES;
+ caps->max_bitrate = 192;
+ caps->demod_input_max_bitrate = 96;
+ caps->memory_input_max_bitrate = 96;
+ caps->num_cipher_ops = 1;
+
+ /* TSIF reports 3 bytes STC at unit of 27MHz/256 */
+ caps->max_stc = (u64)0xFFFFFF * 256;
+
+ /* Buffer requirements */
+ caps->section.flags =
+ DMX_BUFFER_EXTERNAL_SUPPORT |
+ DMX_BUFFER_INTERNAL_SUPPORT |
+ DMX_BUFFER_CACHED;
+ caps->section.max_buffer_num = 1;
+ caps->section.max_size = 0xFFFFFFFF;
+ caps->section.size_alignment = 0;
+ caps->pes.flags =
+ DMX_BUFFER_EXTERNAL_SUPPORT |
+ DMX_BUFFER_INTERNAL_SUPPORT |
+ DMX_BUFFER_CACHED;
+ caps->pes.max_buffer_num = 1;
+ caps->pes.max_size = 0xFFFFFFFF;
+ caps->pes.size_alignment = 0;
+ caps->recording_188_tsp.flags =
+ DMX_BUFFER_EXTERNAL_SUPPORT |
+ DMX_BUFFER_INTERNAL_SUPPORT |
+ DMX_BUFFER_CACHED;
+ caps->recording_188_tsp.max_buffer_num = 1;
+ caps->recording_188_tsp.max_size = 0xFFFFFFFF;
+ caps->recording_188_tsp.size_alignment = 0;
+ caps->recording_192_tsp.flags =
+ DMX_BUFFER_EXTERNAL_SUPPORT |
+ DMX_BUFFER_INTERNAL_SUPPORT |
+ DMX_BUFFER_CACHED;
+ caps->recording_192_tsp.max_buffer_num = 1;
+ caps->recording_192_tsp.max_size = 0xFFFFFFFF;
+ caps->recording_192_tsp.size_alignment = 0;
+ caps->playback_188_tsp.flags =
+ DMX_BUFFER_EXTERNAL_SUPPORT |
+ DMX_BUFFER_INTERNAL_SUPPORT |
+ DMX_BUFFER_CACHED;
+ caps->playback_188_tsp.max_buffer_num = 1;
+ caps->playback_188_tsp.max_size = 0xFFFFFFFF;
+ caps->playback_188_tsp.size_alignment = 188;
+ caps->playback_192_tsp.flags =
+ DMX_BUFFER_EXTERNAL_SUPPORT |
+ DMX_BUFFER_INTERNAL_SUPPORT |
+ DMX_BUFFER_CACHED;
+ caps->playback_192_tsp.max_buffer_num = 1;
+ caps->playback_192_tsp.max_size = 0xFFFFFFFF;
+ caps->playback_192_tsp.size_alignment = 192;
+ caps->decoder.flags =
+ DMX_BUFFER_SECURED_IF_DECRYPTED |
+ DMX_BUFFER_EXTERNAL_SUPPORT |
+ DMX_BUFFER_INTERNAL_SUPPORT |
+ DMX_BUFFER_LINEAR_GROUP_SUPPORT |
+ DMX_BUFFER_CACHED;
+ caps->decoder.max_buffer_num = DMX_MAX_DECODER_BUFFER_NUM;
+ caps->decoder.max_size = 0xFFFFFFFF;
+ caps->decoder.size_alignment = SZ_4K;
+
+ return 0;
+}
+
+
+/**
+ * Reads TSIF STC from TSPP
+ *
+ * @demux: demux device
+ * @num: STC number. 0 for TSIF0 and 1 for TSIF1.
+ * @stc: STC value
+ * @base: divisor to get 90KHz value
+ *
+ * Return error code
+ */
+static int mpq_tspp_dmx_get_stc(struct dmx_demux *demux, unsigned int num,
+ u64 *stc, unsigned int *base)
+{
+ enum tspp_source source;
+ u32 tcr_counter;
+
+ if (!demux || !stc || !base)
+ return -EINVAL;
+
+ if (num == 0)
+ source = TSPP_SOURCE_TSIF0;
+ else if (num == 1)
+ source = TSPP_SOURCE_TSIF1;
+ else
+ return -EINVAL;
+
+ tspp_get_ref_clk_counter(0, source, &tcr_counter);
+
+ *stc = ((u64)tcr_counter) * 256; /* conversion to 27MHz */
+ *base = 300; /* divisor to get 90KHz clock from stc value */
+
+ return 0;
+}
+
+static int mpq_tspp_dmx_init(
+ struct dvb_adapter *mpq_adapter,
+ struct mpq_demux *mpq_demux)
+{
+ int result;
+
+ MPQ_DVB_DBG_PRINT("%s executed\n", __func__);
+
+ mpq_dmx_tspp_info.ion_client = mpq_demux->ion_client;
+
+ /* Set the kernel-demux object capabilities */
+ mpq_demux->demux.dmx.capabilities =
+ DMX_TS_FILTERING |
+ DMX_PES_FILTERING |
+ DMX_SECTION_FILTERING |
+ DMX_MEMORY_BASED_FILTERING |
+ DMX_CRC_CHECKING |
+ DMX_TS_DESCRAMBLING;
+
+ mpq_demux->decoder_alloc_flags = ION_FLAG_CACHED;
+
+ /* Set dvb-demux "virtual" function pointers */
+ mpq_demux->demux.priv = (void *)mpq_demux;
+ mpq_demux->demux.filternum = TSPP_MAX_SECTION_FILTER_NUM;
+ mpq_demux->demux.feednum = MPQ_MAX_DMX_FILES;
+ mpq_demux->demux.start_feed = mpq_tspp_dmx_start_filtering;
+ mpq_demux->demux.stop_feed = mpq_tspp_dmx_stop_filtering;
+ mpq_demux->demux.write_to_decoder = mpq_tspp_dmx_write_to_decoder;
+ mpq_demux->demux.decoder_fullness_init = mpq_dmx_decoder_fullness_init;
+ mpq_demux->demux.decoder_fullness_wait = mpq_dmx_decoder_fullness_wait;
+ mpq_demux->demux.decoder_fullness_abort =
+ mpq_dmx_decoder_fullness_abort;
+ mpq_demux->demux.decoder_buffer_status = mpq_dmx_decoder_buffer_status;
+ mpq_demux->demux.reuse_decoder_buffer = mpq_dmx_reuse_decoder_buffer;
+ mpq_demux->demux.set_cipher_op = mpq_dmx_set_cipher_ops;
+ mpq_demux->demux.oob_command = mpq_dmx_oob_command;
+ mpq_demux->demux.convert_ts = mpq_dmx_convert_tts;
+ mpq_demux->demux.flush_decoder_buffer = NULL;
+
+ /* Initialize dvb_demux object */
+ result = dvb_dmx_init(&mpq_demux->demux);
+ if (result < 0) {
+ MPQ_DVB_ERR_PRINT("%s: dvb_dmx_init failed\n", __func__);
+ goto init_failed;
+ }
+
+ /* Now initailize the dmx-dev object */
+ mpq_demux->dmxdev.filternum = MPQ_MAX_DMX_FILES;
+ mpq_demux->dmxdev.demux = &mpq_demux->demux.dmx;
+ mpq_demux->dmxdev.capabilities = DMXDEV_CAP_DUPLEX;
+
+ mpq_demux->dmxdev.demux->set_source = mpq_dmx_set_source;
+ mpq_demux->dmxdev.demux->get_stc = mpq_tspp_dmx_get_stc;
+ mpq_demux->dmxdev.demux->get_caps = mpq_tspp_dmx_get_caps;
+ mpq_demux->dmxdev.demux->map_buffer = mpq_dmx_map_buffer;
+ mpq_demux->dmxdev.demux->unmap_buffer = mpq_dmx_unmap_buffer;
+ mpq_demux->dmxdev.demux->write = mpq_dmx_write;
+ result = dvb_dmxdev_init(&mpq_demux->dmxdev, mpq_adapter);
+ if (result < 0) {
+ MPQ_DVB_ERR_PRINT("%s: dvb_dmxdev_init failed (errno=%d)\n",
+ __func__,
+ result);
+ goto init_failed_dmx_release;
+ }
+
+ /* Extend dvb-demux debugfs with TSPP statistics. */
+ mpq_dmx_init_debugfs_entries(mpq_demux);
+
+ return 0;
+
+init_failed_dmx_release:
+ dvb_dmx_release(&mpq_demux->demux);
+init_failed:
+ return result;
+}
+
+static int __init mpq_dmx_tspp_plugin_init(void)
+{
+ int i;
+ int j;
+ int ret;
+
+ MPQ_DVB_DBG_PRINT("%s executed\n", __func__);
+
+ for (i = 0; i < TSIF_COUNT; i++) {
+ mpq_dmx_tspp_info.tsif[i].aggregate_ids = NULL;
+ mpq_dmx_tspp_info.tsif[i].channel_ref = 0;
+ mpq_dmx_tspp_info.tsif[i].buff_index = 0;
+ mpq_dmx_tspp_info.tsif[i].ch_mem_heap_handle = NULL;
+ mpq_dmx_tspp_info.tsif[i].ch_mem_heap_virt_base = NULL;
+ mpq_dmx_tspp_info.tsif[i].ch_mem_heap_phys_base = 0;
+ atomic_set(&mpq_dmx_tspp_info.tsif[i].data_cnt, 0);
+ atomic_set(&mpq_dmx_tspp_info.tsif[i].control_op, 0);
+
+ for (j = 0; j < TSPP_MAX_PID_FILTER_NUM; j++) {
+ mpq_dmx_tspp_info.tsif[i].filters[j].pid = -1;
+ mpq_dmx_tspp_info.tsif[i].filters[j].ref_count = 0;
+ mpq_dmx_tspp_info.tsif[i].filters[j].hw_index = -1;
+ }
+
+ for (j = 0; j < TSPP_MAX_HW_PID_FILTER_NUM; j++)
+ mpq_dmx_tspp_info.tsif[i].hw_indexes[j] = 0;
+
+ mpq_dmx_tspp_info.tsif[i].current_filter_count = 0;
+ mpq_dmx_tspp_info.tsif[i].pass_nulls_flag = 0;
+ mpq_dmx_tspp_info.tsif[i].pass_all_flag = 0;
+ mpq_dmx_tspp_info.tsif[i].accept_all_filter_exists_flag = 0;
+
+ snprintf(mpq_dmx_tspp_info.tsif[i].name,
+ TSIF_NAME_LENGTH,
+ "dmx_tsif%d",
+ i);
+
+ init_waitqueue_head(&mpq_dmx_tspp_info.tsif[i].wait_queue);
+ mpq_dmx_tspp_info.tsif[i].thread =
+ kthread_run(
+ mpq_dmx_tspp_thread, (void *)(uintptr_t)i,
+ mpq_dmx_tspp_info.tsif[i].name);
+
+ if (IS_ERR(mpq_dmx_tspp_info.tsif[i].thread)) {
+ for (j = 0; j < i; j++) {
+ kthread_stop(mpq_dmx_tspp_info.tsif[j].thread);
+ mutex_destroy(&mpq_dmx_tspp_info.tsif[j].mutex);
+ }
+
+ MPQ_DVB_ERR_PRINT(
+ "%s: kthread_run failed\n",
+ __func__);
+
+ return -ENOMEM;
+ }
+
+ mutex_init(&mpq_dmx_tspp_info.tsif[i].mutex);
+ }
+
+ ret = mpq_dmx_plugin_init(mpq_tspp_dmx_init);
+
+ if (ret < 0) {
+ MPQ_DVB_ERR_PRINT(
+ "%s: mpq_dmx_plugin_init failed (errno=%d)\n",
+ __func__,
+ ret);
+
+ for (i = 0; i < TSIF_COUNT; i++) {
+ kthread_stop(mpq_dmx_tspp_info.tsif[i].thread);
+ mutex_destroy(&mpq_dmx_tspp_info.tsif[i].mutex);
+ }
+ }
+
+ return ret;
+}
+
+static void __exit mpq_dmx_tspp_plugin_exit(void)
+{
+ int i;
+
+ MPQ_DVB_DBG_PRINT("%s executed\n", __func__);
+
+ for (i = 0; i < TSIF_COUNT; i++) {
+ mutex_lock(&mpq_dmx_tspp_info.tsif[i].mutex);
+
+ /*
+ * Note: tspp_close_channel will also free the TSPP buffers
+ * even if we allocated them ourselves,
+ * using our free function.
+ */
+ if (mpq_dmx_tspp_info.tsif[i].channel_ref) {
+ tspp_unregister_notification(0,
+ TSPP_CHANNEL_ID(i, TSPP_CHANNEL));
+ tspp_close_channel(0,
+ TSPP_CHANNEL_ID(i, TSPP_CHANNEL));
+
+ if (allocation_mode ==
+ MPQ_DMX_TSPP_CONTIGUOUS_PHYS_ALLOC) {
+ vfree(mpq_dmx_tspp_info.tsif[i].aggregate_ids);
+ mpq_dmx_tspp_info.tsif[i].aggregate_ids = NULL;
+ mpq_dmx_channel_mem_free(i);
+ }
+ }
+
+ mutex_unlock(&mpq_dmx_tspp_info.tsif[i].mutex);
+ kthread_stop(mpq_dmx_tspp_info.tsif[i].thread);
+ mutex_destroy(&mpq_dmx_tspp_info.tsif[i].mutex);
+ }
+
+ mpq_dmx_plugin_exit();
+}
+
+
+module_init(mpq_dmx_tspp_plugin_init);
+module_exit(mpq_dmx_tspp_plugin_exit);
+
+MODULE_DESCRIPTION("Qualcomm Technologies Inc. demux TSPP version 1 HW Plugin");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/dvb/demux/mpq_sdmx.c b/drivers/media/platform/msm/dvb/demux/mpq_sdmx.c
new file mode 100644
index 000000000000..625609a07f02
--- /dev/null
+++ b/drivers/media/platform/msm/dvb/demux/mpq_sdmx.c
@@ -0,0 +1,1023 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kthread.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include "qseecom_kernel.h"
+#include "mpq_sdmx.h"
+
+static struct qseecom_handle *sdmx_qseecom_handles[SDMX_MAX_SESSIONS];
+static struct mutex sdmx_lock[SDMX_MAX_SESSIONS];
+
+#define QSEECOM_SBUFF_SIZE SZ_128K
+
+enum sdmx_cmd_id {
+ SDMX_OPEN_SESSION_CMD,
+ SDMX_CLOSE_SESSION_CMD,
+ SDMX_SET_SESSION_CFG_CMD,
+ SDMX_ADD_FILTER_CMD,
+ SDMX_REMOVE_FILTER_CMD,
+ SDMX_SET_KL_IDX_CMD,
+ SDMX_ADD_RAW_PID_CMD,
+ SDMX_REMOVE_RAW_PID_CMD,
+ SDMX_PROCESS_CMD,
+ SDMX_GET_DBG_COUNTERS_CMD,
+ SDMX_RESET_DBG_COUNTERS_CMD,
+ SDMX_GET_VERSION_CMD,
+ SDMX_INVALIDATE_KL_CMD,
+ SDMX_SET_LOG_LEVEL_CMD
+};
+
+#pragma pack(push, sdmx, 1)
+
+struct sdmx_proc_req {
+ enum sdmx_cmd_id cmd_id;
+ u32 session_handle;
+ u8 flags;
+ struct sdmx_buff_descr in_buf_descr;
+ u32 inp_fill_cnt;
+ u32 in_rd_offset;
+ u32 num_filters;
+ struct sdmx_filter_status filters_status[];
+};
+
+struct sdmx_proc_rsp {
+ enum sdmx_status ret;
+ u32 inp_fill_cnt;
+ u32 in_rd_offset;
+ u32 err_indicators;
+ u32 status_indicators;
+};
+
+struct sdmx_open_ses_req {
+ enum sdmx_cmd_id cmd_id;
+};
+
+struct sdmx_open_ses_rsp {
+ enum sdmx_status ret;
+ u32 session_handle;
+};
+
+struct sdmx_close_ses_req {
+ enum sdmx_cmd_id cmd_id;
+ u32 session_handle;
+};
+
+struct sdmx_close_ses_rsp {
+ enum sdmx_status ret;
+};
+
+struct sdmx_ses_cfg_req {
+ enum sdmx_cmd_id cmd_id;
+ u32 session_handle;
+ enum sdmx_proc_mode process_mode;
+ enum sdmx_inp_mode input_mode;
+ enum sdmx_pkt_format packet_len;
+ u8 odd_scramble_bits;
+ u8 even_scramble_bits;
+};
+
+struct sdmx_ses_cfg_rsp {
+ enum sdmx_status ret;
+};
+
+struct sdmx_set_kl_ind_req {
+ enum sdmx_cmd_id cmd_id;
+ u32 session_handle;
+ u32 pid;
+ u32 kl_index;
+};
+
+struct sdmx_set_kl_ind_rsp {
+ enum sdmx_status ret;
+};
+
+struct sdmx_add_filt_req {
+ enum sdmx_cmd_id cmd_id;
+ u32 session_handle;
+ u32 pid;
+ enum sdmx_filter filter_type;
+ struct sdmx_buff_descr meta_data_buf;
+ enum sdmx_buf_mode buffer_mode;
+ enum sdmx_raw_out_format ts_out_format;
+ u32 flags;
+ u32 num_data_bufs;
+ struct sdmx_data_buff_descr data_bufs[];
+};
+
+struct sdmx_add_filt_rsp {
+ enum sdmx_status ret;
+ u32 filter_handle;
+};
+
+struct sdmx_rem_filt_req {
+ enum sdmx_cmd_id cmd_id;
+ u32 session_handle;
+ u32 filter_handle;
+};
+
+struct sdmx_rem_filt_rsp {
+ enum sdmx_status ret;
+};
+
+struct sdmx_add_raw_req {
+ enum sdmx_cmd_id cmd_id;
+ u32 session_handle;
+ u32 filter_handle;
+ u32 pid;
+};
+
+struct sdmx_add_raw_rsp {
+ enum sdmx_status ret;
+};
+
+struct sdmx_rem_raw_req {
+ enum sdmx_cmd_id cmd_id;
+ u32 session_handle;
+ u32 filter_handle;
+ u32 pid;
+};
+
+struct sdmx_rem_raw_rsp {
+ enum sdmx_status ret;
+};
+
+struct sdmx_get_counters_req {
+ enum sdmx_cmd_id cmd_id;
+ u32 session_handle;
+ u32 num_filters;
+};
+
+struct sdmx_get_counters_rsp {
+ enum sdmx_status ret;
+ struct sdmx_session_dbg_counters session_counters;
+ u32 num_filters;
+ struct sdmx_filter_dbg_counters filter_counters[];
+};
+
+struct sdmx_rst_counters_req {
+ enum sdmx_cmd_id cmd_id;
+ u32 session_handle;
+};
+
+struct sdmx_rst_counters_rsp {
+ enum sdmx_status ret;
+};
+
+struct sdmx_get_version_req {
+ enum sdmx_cmd_id cmd_id;
+};
+
+struct sdmx_get_version_rsp {
+ enum sdmx_status ret;
+ int32_t version;
+};
+
+struct sdmx_set_log_level_req {
+ enum sdmx_cmd_id cmd_id;
+ enum sdmx_log_level level;
+ u32 session_handle;
+};
+
+struct sdmx_set_log_level_rsp {
+ enum sdmx_status ret;
+};
+
+#pragma pack(pop, sdmx)
+
+static int get_cmd_rsp_buffers(int handle_index,
+ void **cmd,
+ int *cmd_len,
+ void **rsp,
+ int *rsp_len)
+{
+ if (*cmd_len & QSEECOM_ALIGN_MASK)
+ *cmd_len = QSEECOM_ALIGN(*cmd_len);
+
+ if (*rsp_len & QSEECOM_ALIGN_MASK)
+ *rsp_len = QSEECOM_ALIGN(*rsp_len);
+
+ if ((*rsp_len + *cmd_len) > QSEECOM_SBUFF_SIZE) {
+ pr_err("%s: shared buffer too small to hold cmd=%d and rsp=%d\n",
+ __func__, *cmd_len, *rsp_len);
+ return SDMX_STATUS_OUT_OF_MEM;
+ }
+
+ *cmd = sdmx_qseecom_handles[handle_index]->sbuf;
+ *rsp = sdmx_qseecom_handles[handle_index]->sbuf + *cmd_len;
+ return SDMX_SUCCESS;
+}
+
+/*
+ * Returns version of secure-demux app.
+ *
+ * @session_handle: Returned instance handle. Must not be NULL.
+ * Return error code
+ */
+int sdmx_get_version(int session_handle, int32_t *version)
+{
+ int res, cmd_len, rsp_len;
+ struct sdmx_get_version_req *cmd;
+ struct sdmx_get_version_rsp *rsp;
+ enum sdmx_status ret;
+
+ if ((session_handle < 0) || (session_handle >= SDMX_MAX_SESSIONS) ||
+ (version == NULL))
+ return SDMX_STATUS_INVALID_INPUT_PARAMS;
+
+ cmd_len = sizeof(struct sdmx_get_version_req);
+ rsp_len = sizeof(struct sdmx_get_version_rsp);
+
+ /* Lock shared memory */
+ mutex_lock(&sdmx_lock[session_handle]);
+
+ /* Get command and response buffers */
+ ret = get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len,
+ (void **)&rsp, &rsp_len);
+ if (ret)
+ goto out;
+
+ /* Populate command struct */
+ cmd->cmd_id = SDMX_GET_VERSION_CMD;
+
+ /* Issue QSEECom command */
+ res = qseecom_send_command(sdmx_qseecom_handles[session_handle],
+ (void *)cmd, cmd_len, (void *)rsp, rsp_len);
+
+ if (res < 0) {
+ mutex_unlock(&sdmx_lock[session_handle]);
+ return SDMX_STATUS_GENERAL_FAILURE;
+ }
+
+ ret = rsp->ret;
+ *version = rsp->version;
+out:
+ mutex_unlock(&sdmx_lock[session_handle]);
+
+ return ret;
+
+}
+EXPORT_SYMBOL(sdmx_get_version);
+
+/*
+ * Initializes a new secure demux instance and returns a handle of the instance.
+ *
+ * @session_handle: handle of a secure demux instance to get its version.
+ * Return the version if successful or an error code.
+ */
+int sdmx_open_session(int *session_handle)
+{
+ int res, cmd_len, rsp_len;
+ enum sdmx_status ret, version_ret;
+ struct sdmx_open_ses_req *cmd;
+ struct sdmx_open_ses_rsp *rsp;
+ struct qseecom_handle *qseecom_handle = NULL;
+ int32_t version;
+
+ /* Input validation */
+ if (session_handle == NULL)
+ return SDMX_STATUS_GENERAL_FAILURE;
+
+ /* Start the TZ app */
+ res = qseecom_start_app(&qseecom_handle, "securemm",
+ QSEECOM_SBUFF_SIZE);
+
+ if (res < 0)
+ return SDMX_STATUS_GENERAL_FAILURE;
+
+ cmd_len = sizeof(struct sdmx_open_ses_req);
+ rsp_len = sizeof(struct sdmx_open_ses_rsp);
+
+ /* Get command and response buffers */
+ cmd = (struct sdmx_open_ses_req *)qseecom_handle->sbuf;
+
+ if (cmd_len & QSEECOM_ALIGN_MASK)
+ cmd_len = QSEECOM_ALIGN(cmd_len);
+
+ rsp = (struct sdmx_open_ses_rsp *)qseecom_handle->sbuf + cmd_len;
+
+ if (rsp_len & QSEECOM_ALIGN_MASK)
+ rsp_len = QSEECOM_ALIGN(rsp_len);
+
+ /* Will be later overridden by SDMX response */
+ *session_handle = SDMX_INVALID_SESSION_HANDLE;
+
+ /* Populate command struct */
+ cmd->cmd_id = SDMX_OPEN_SESSION_CMD;
+
+ /* Issue QSEECom command */
+ res = qseecom_send_command(qseecom_handle, (void *)cmd, cmd_len,
+ (void *)rsp, rsp_len);
+
+ if (res < 0) {
+ qseecom_shutdown_app(&qseecom_handle);
+ return SDMX_STATUS_GENERAL_FAILURE;
+ }
+
+ /* Parse response struct */
+ *session_handle = rsp->session_handle;
+
+ /* Initialize handle and mutex */
+ sdmx_qseecom_handles[*session_handle] = qseecom_handle;
+ mutex_init(&sdmx_lock[*session_handle]);
+ ret = rsp->ret;
+
+ /* Get and print the app version */
+ version_ret = sdmx_get_version(*session_handle, &version);
+ if (version_ret == SDMX_SUCCESS)
+ pr_info("TZ SDMX version is %x.%x\n", version >> 8,
+ version & 0xFF);
+ else
+ pr_err("Error reading TZ SDMX version\n");
+
+ return ret;
+}
+EXPORT_SYMBOL(sdmx_open_session);
+
+/*
+ * Closes a secure demux instance.
+ *
+ * @session_handle: handle of a secure demux instance to close.
+ * Return error code
+ */
+int sdmx_close_session(int session_handle)
+{
+ int res, cmd_len, rsp_len;
+ struct sdmx_close_ses_req *cmd;
+ struct sdmx_close_ses_rsp *rsp;
+ enum sdmx_status ret;
+
+ if ((session_handle < 0) || (session_handle >= SDMX_MAX_SESSIONS))
+ return SDMX_STATUS_INVALID_INPUT_PARAMS;
+
+ cmd_len = sizeof(struct sdmx_close_ses_req);
+ rsp_len = sizeof(struct sdmx_close_ses_rsp);
+
+ /* Lock shared memory */
+ mutex_lock(&sdmx_lock[session_handle]);
+
+ /* Get command and response buffers */
+ ret = get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len,
+ (void **)&rsp, &rsp_len);
+ if (ret)
+ goto out;
+
+ /* Populate command struct */
+ cmd->cmd_id = SDMX_CLOSE_SESSION_CMD;
+ cmd->session_handle = session_handle;
+
+ /* Issue QSEECom command */
+ res = qseecom_send_command(sdmx_qseecom_handles[session_handle],
+ (void *)cmd, cmd_len, (void *)rsp, rsp_len);
+
+ if (res < 0) {
+ mutex_unlock(&sdmx_lock[session_handle]);
+ return SDMX_STATUS_GENERAL_FAILURE;
+ }
+
+ ret = rsp->ret;
+
+ /* Shutdown the TZ app (or at least free the current handle) */
+ res = qseecom_shutdown_app(&sdmx_qseecom_handles[session_handle]);
+ if (res < 0) {
+ mutex_unlock(&sdmx_lock[session_handle]);
+ return SDMX_STATUS_GENERAL_FAILURE;
+ }
+
+ sdmx_qseecom_handles[session_handle] = NULL;
+out:
+ mutex_unlock(&sdmx_lock[session_handle]);
+
+ return ret;
+}
+EXPORT_SYMBOL(sdmx_close_session);
+
+/*
+ * Configures an open secure demux instance.
+ *
+ * @session_handle: secure demux instance
+ * @proc_mode: Defines secure demux's behavior in case of output
+ * buffer overflow.
+ * @inp_mode: Defines the input encryption settings.
+ * @pkt_format: TS packet length in input buffer.
+ * @odd_scramble_bits: Value of the scramble bits indicating the ODD key.
+ * @even_scramble_bits: Value of the scramble bits indicating the EVEN key.
+ * Return error code
+ */
+int sdmx_set_session_cfg(int session_handle,
+ enum sdmx_proc_mode proc_mode,
+ enum sdmx_inp_mode inp_mode,
+ enum sdmx_pkt_format pkt_format,
+ u8 odd_scramble_bits,
+ u8 even_scramble_bits)
+{
+ int res, cmd_len, rsp_len;
+ struct sdmx_ses_cfg_req *cmd;
+ struct sdmx_ses_cfg_rsp *rsp;
+ enum sdmx_status ret;
+
+ if ((session_handle < 0) || (session_handle >= SDMX_MAX_SESSIONS))
+ return SDMX_STATUS_INVALID_INPUT_PARAMS;
+
+ cmd_len = sizeof(struct sdmx_ses_cfg_req);
+ rsp_len = sizeof(struct sdmx_ses_cfg_rsp);
+
+ /* Lock shared memory */
+ mutex_lock(&sdmx_lock[session_handle]);
+
+ /* Get command and response buffers */
+ ret = get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len,
+ (void **)&rsp, &rsp_len);
+ if (ret)
+ goto out;
+
+ /* Populate command struct */
+ cmd->cmd_id = SDMX_SET_SESSION_CFG_CMD;
+ cmd->session_handle = session_handle;
+ cmd->process_mode = proc_mode;
+ cmd->input_mode = inp_mode;
+ cmd->packet_len = pkt_format;
+ cmd->odd_scramble_bits = odd_scramble_bits;
+ cmd->even_scramble_bits = even_scramble_bits;
+
+ /* Issue QSEECom command */
+ res = qseecom_send_command(sdmx_qseecom_handles[session_handle],
+ (void *)cmd, cmd_len, (void *)rsp, rsp_len);
+
+ if (res < 0) {
+ mutex_unlock(&sdmx_lock[session_handle]);
+ return SDMX_STATUS_GENERAL_FAILURE;
+ }
+
+ ret = rsp->ret;
+out:
+ mutex_unlock(&sdmx_lock[session_handle]);
+
+ return ret;
+}
+EXPORT_SYMBOL(sdmx_set_session_cfg);
+
+/*
+ * Creates a new secure demux filter and returns a filter handle
+ *
+ * @session_handle: secure demux instance
+ * @pid: pid to filter
+ * @filter_type: type of filtering
+ * @meta_data_buf: meta data buffer descriptor
+ * @data_buf_mode: data buffer mode (ring/linear)
+ * @num_data_bufs: number of data buffers (use 1 for a ring buffer)
+ * @data_bufs: data buffers descriptors array
+ * @filter_handle: returned filter handle
+ * @ts_out_format: output format for raw filters
+ * @flags: optional flags for filter
+ * (currently only clear section CRC verification is supported)
+ *
+ * Return error code
+ */
+int sdmx_add_filter(int session_handle,
+ u16 pid,
+ enum sdmx_filter filterype,
+ struct sdmx_buff_descr *meta_data_buf,
+ enum sdmx_buf_mode d_buf_mode,
+ u32 num_data_bufs,
+ struct sdmx_data_buff_descr *data_bufs,
+ int *filter_handle,
+ enum sdmx_raw_out_format ts_out_format,
+ u32 flags)
+{
+ int res, cmd_len, rsp_len;
+ struct sdmx_add_filt_req *cmd;
+ struct sdmx_add_filt_rsp *rsp;
+ enum sdmx_status ret;
+
+ if ((session_handle < 0) || (session_handle >= SDMX_MAX_SESSIONS) ||
+ (filter_handle == NULL))
+ return SDMX_STATUS_INVALID_INPUT_PARAMS;
+
+ cmd_len = sizeof(struct sdmx_add_filt_req)
+ + num_data_bufs * sizeof(struct sdmx_data_buff_descr);
+ rsp_len = sizeof(struct sdmx_add_filt_rsp);
+
+ /* Will be later overridden by SDMX response */
+ *filter_handle = SDMX_INVALID_FILTER_HANDLE;
+
+ /* Lock shared memory */
+ mutex_lock(&sdmx_lock[session_handle]);
+
+ /* Get command and response buffers */
+ ret = get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len,
+ (void **)&rsp, &rsp_len);
+ if (ret)
+ goto out;
+
+ /* Populate command struct */
+ cmd->cmd_id = SDMX_ADD_FILTER_CMD;
+ cmd->session_handle = session_handle;
+ cmd->pid = (u32)pid;
+ cmd->filter_type = filterype;
+ cmd->ts_out_format = ts_out_format;
+ cmd->flags = flags;
+ if (meta_data_buf != NULL)
+ memcpy(&(cmd->meta_data_buf), meta_data_buf,
+ sizeof(struct sdmx_buff_descr));
+ else
+ memset(&(cmd->meta_data_buf), 0, sizeof(cmd->meta_data_buf));
+
+ cmd->buffer_mode = d_buf_mode;
+ cmd->num_data_bufs = num_data_bufs;
+ memcpy(cmd->data_bufs, data_bufs,
+ num_data_bufs * sizeof(struct sdmx_data_buff_descr));
+
+ /* Issue QSEECom command */
+ res = qseecom_send_command(sdmx_qseecom_handles[session_handle],
+ (void *)cmd, cmd_len, (void *)rsp, rsp_len);
+
+ if (res < 0) {
+ mutex_unlock(&sdmx_lock[session_handle]);
+ return SDMX_STATUS_GENERAL_FAILURE;
+ }
+
+ /* Parse response struct */
+ *filter_handle = rsp->filter_handle;
+ ret = rsp->ret;
+out:
+ mutex_unlock(&sdmx_lock[session_handle]);
+
+ return ret;
+}
+EXPORT_SYMBOL(sdmx_add_filter);
+
+/*
+ * Removes a secure demux filter
+ *
+ * @session_handle: secure demux instance
+ * @filter_handle: filter handle to remove
+ *
+ * Return error code
+ */
+int sdmx_remove_filter(int session_handle, int filter_handle)
+{
+ int res, cmd_len, rsp_len;
+ struct sdmx_rem_filt_req *cmd;
+ struct sdmx_rem_filt_rsp *rsp;
+ enum sdmx_status ret;
+
+ if ((session_handle < 0) || (session_handle >= SDMX_MAX_SESSIONS))
+ return SDMX_STATUS_INVALID_INPUT_PARAMS;
+
+ cmd_len = sizeof(struct sdmx_rem_filt_req);
+ rsp_len = sizeof(struct sdmx_rem_filt_rsp);
+
+ /* Lock shared memory */
+ mutex_lock(&sdmx_lock[session_handle]);
+
+ /* Get command and response buffers */
+ ret = get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len,
+ (void **)&rsp, &rsp_len);
+ if (ret)
+ goto out;
+
+ /* Populate command struct */
+ cmd->cmd_id = SDMX_REMOVE_FILTER_CMD;
+ cmd->session_handle = session_handle;
+ cmd->filter_handle = filter_handle;
+
+ /* Issue QSEECom command */
+ res = qseecom_send_command(sdmx_qseecom_handles[session_handle],
+ (void *)cmd, cmd_len, (void *)rsp, rsp_len);
+
+ if (res < 0) {
+ mutex_unlock(&sdmx_lock[session_handle]);
+ return SDMX_STATUS_GENERAL_FAILURE;
+ }
+
+ ret = rsp->ret;
+out:
+ mutex_unlock(&sdmx_lock[session_handle]);
+
+ return ret;
+}
+EXPORT_SYMBOL(sdmx_remove_filter);
+
+/*
+ * Associates a key ladder index for the specified pid
+ *
+ * @session_handle: secure demux instance
+ * @pid: pid
+ * @key_ladder_index: key ladder index to associate to the pid
+ *
+ * Return error code
+ *
+ * Note: if pid already has some key ladder index associated, it will be
+ * overridden.
+ */
+int sdmx_set_kl_ind(int session_handle, u16 pid, u32 key_ladder_index)
+{
+ int res, cmd_len, rsp_len;
+ struct sdmx_set_kl_ind_req *cmd;
+ struct sdmx_set_kl_ind_rsp *rsp;
+ enum sdmx_status ret;
+
+ if ((session_handle < 0) || (session_handle >= SDMX_MAX_SESSIONS))
+ return SDMX_STATUS_INVALID_INPUT_PARAMS;
+
+ cmd_len = sizeof(struct sdmx_set_kl_ind_req);
+ rsp_len = sizeof(struct sdmx_set_kl_ind_rsp);
+
+ /* Lock shared memory */
+ mutex_lock(&sdmx_lock[session_handle]);
+
+ /* Get command and response buffers */
+ ret = get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len,
+ (void **)&rsp, &rsp_len);
+ if (ret)
+ goto out;
+
+ /* Populate command struct */
+ cmd->cmd_id = SDMX_SET_KL_IDX_CMD;
+ cmd->session_handle = session_handle;
+ cmd->pid = (u32)pid;
+ cmd->kl_index = key_ladder_index;
+
+ /* Issue QSEECom command */
+ res = qseecom_send_command(sdmx_qseecom_handles[session_handle],
+ (void *)cmd, cmd_len, (void *)rsp, rsp_len);
+
+ if (res < 0) {
+ mutex_unlock(&sdmx_lock[session_handle]);
+ return SDMX_STATUS_GENERAL_FAILURE;
+ }
+
+ ret = rsp->ret;
+out:
+ mutex_unlock(&sdmx_lock[session_handle]);
+
+ return ret;
+}
+EXPORT_SYMBOL(sdmx_set_kl_ind);
+
+/*
+ * Adds the specified pid to an existing raw (recording) filter
+ *
+ * @session_handle: secure demux instance
+ * @filter_handle: raw filter handle
+ * @pid: pid
+ *
+ * Return error code
+ */
+int sdmx_add_raw_pid(int session_handle, int filter_handle, u16 pid)
+{
+ int res, cmd_len, rsp_len;
+ struct sdmx_add_raw_req *cmd;
+ struct sdmx_add_raw_rsp *rsp;
+ enum sdmx_status ret;
+
+ if ((session_handle < 0) || (session_handle >= SDMX_MAX_SESSIONS))
+ return SDMX_STATUS_INVALID_INPUT_PARAMS;
+
+ cmd_len = sizeof(struct sdmx_add_raw_req);
+ rsp_len = sizeof(struct sdmx_add_raw_rsp);
+
+ /* Lock shared memory */
+ mutex_lock(&sdmx_lock[session_handle]);
+
+ /* Get command and response buffers */
+ ret = get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len,
+ (void **)&rsp, &rsp_len);
+ if (ret)
+ goto out;
+
+ /* Populate command struct */
+ cmd->cmd_id = SDMX_ADD_RAW_PID_CMD;
+ cmd->session_handle = session_handle;
+ cmd->filter_handle = filter_handle;
+ cmd->pid = (u32)pid;
+
+ /* Issue QSEECom command */
+ res = qseecom_send_command(sdmx_qseecom_handles[session_handle],
+ (void *)cmd, cmd_len, (void *)rsp, rsp_len);
+
+ if (res < 0) {
+ mutex_unlock(&sdmx_lock[session_handle]);
+ return SDMX_STATUS_GENERAL_FAILURE;
+ }
+
+ ret = rsp->ret;
+out:
+ mutex_unlock(&sdmx_lock[session_handle]);
+
+ return ret;
+}
+EXPORT_SYMBOL(sdmx_add_raw_pid);
+
+/*
+ * Removes the specified pid from a raw (recording) filter
+ *
+ * @session_handle: secure demux instance
+ * @filter_handle: raw filter handle
+ * @pid: pid
+ *
+ * Return error code
+ */
+int sdmx_remove_raw_pid(int session_handle, int filter_handle, u16 pid)
+{
+ int res, cmd_len, rsp_len;
+ struct sdmx_rem_raw_req *cmd;
+ struct sdmx_rem_raw_rsp *rsp;
+ enum sdmx_status ret;
+
+ if ((session_handle < 0) || (session_handle >= SDMX_MAX_SESSIONS))
+ return SDMX_STATUS_INVALID_INPUT_PARAMS;
+
+ cmd_len = sizeof(struct sdmx_rem_raw_req);
+ rsp_len = sizeof(struct sdmx_rem_raw_rsp);
+
+ /* Lock shared memory */
+ mutex_lock(&sdmx_lock[session_handle]);
+
+ /* Get command and response buffers */
+ ret = get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len,
+ (void **)&rsp, &rsp_len);
+ if (ret)
+ goto out;
+
+ /* Populate command struct */
+ cmd->cmd_id = SDMX_REMOVE_RAW_PID_CMD;
+ cmd->session_handle = session_handle;
+ cmd->filter_handle = filter_handle;
+ cmd->pid = (u32)pid;
+
+ /* Issue QSEECom command */
+ res = qseecom_send_command(sdmx_qseecom_handles[session_handle],
+ (void *)cmd, cmd_len, (void *)rsp, rsp_len);
+
+ if (res < 0) {
+ mutex_unlock(&sdmx_lock[session_handle]);
+ return SDMX_STATUS_GENERAL_FAILURE;
+ }
+
+ ret = rsp->ret;
+out:
+ mutex_unlock(&sdmx_lock[session_handle]);
+
+ return ret;
+}
+EXPORT_SYMBOL(sdmx_remove_raw_pid);
+
+/*
+ * Call secure demux to perform processing on the specified input buffer
+ *
+ * @session_handle: secure demux instance
+ * @flags: input flags. Currently only EOS marking is supported.
+ * @input_buf_desc: input buffer descriptor
+ * @input_fill_count: number of bytes available in input buffer
+ * @input_read_offset: offset inside input buffer where data starts
+ * @error_indicators: returned general error indicators
+ * @status_indicators: returned general status indicators
+ * @num_filters: number of filters in filter status array
+ * @filter_status: filter status descriptor array
+ *
+ * Return error code
+ */
+int sdmx_process(int session_handle, u8 flags,
+ struct sdmx_buff_descr *input_buf_desc,
+ u32 *input_fill_count,
+ u32 *input_read_offset,
+ u32 *error_indicators,
+ u32 *status_indicators,
+ u32 num_filters,
+ struct sdmx_filter_status *filter_status)
+{
+ int res, cmd_len, rsp_len;
+ struct sdmx_proc_req *cmd;
+ struct sdmx_proc_rsp *rsp;
+ enum sdmx_status ret;
+
+ if ((session_handle < 0) || (session_handle >= SDMX_MAX_SESSIONS) ||
+ (input_buf_desc == NULL) ||
+ (input_fill_count == NULL) || (input_read_offset == NULL) ||
+ (error_indicators == NULL) || (status_indicators == NULL) ||
+ (filter_status == NULL))
+ return SDMX_STATUS_INVALID_INPUT_PARAMS;
+
+ cmd_len = sizeof(struct sdmx_proc_req)
+ + num_filters * sizeof(struct sdmx_filter_status);
+ rsp_len = sizeof(struct sdmx_proc_rsp);
+
+ /* Lock shared memory */
+ mutex_lock(&sdmx_lock[session_handle]);
+
+ /* Get command and response buffers */
+ ret = get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len,
+ (void **)&rsp, &rsp_len);
+ if (ret)
+ goto out;
+
+ /* Populate command struct */
+ cmd->cmd_id = SDMX_PROCESS_CMD;
+ cmd->session_handle = session_handle;
+ cmd->flags = flags;
+ cmd->in_buf_descr.base_addr = input_buf_desc->base_addr;
+ cmd->in_buf_descr.size = input_buf_desc->size;
+ cmd->inp_fill_cnt = *input_fill_count;
+ cmd->in_rd_offset = *input_read_offset;
+ cmd->num_filters = num_filters;
+ memcpy(cmd->filters_status, filter_status,
+ num_filters * sizeof(struct sdmx_filter_status));
+
+ /* Issue QSEECom command */
+ res = qseecom_send_command(sdmx_qseecom_handles[session_handle],
+ (void *)cmd, cmd_len, (void *)rsp, rsp_len);
+
+ if (res < 0) {
+ mutex_unlock(&sdmx_lock[session_handle]);
+ return SDMX_STATUS_GENERAL_FAILURE;
+ }
+
+ /* Parse response struct */
+ *input_fill_count = rsp->inp_fill_cnt;
+ *input_read_offset = rsp->in_rd_offset;
+ *error_indicators = rsp->err_indicators;
+ *status_indicators = rsp->status_indicators;
+ memcpy(filter_status, cmd->filters_status,
+ num_filters * sizeof(struct sdmx_filter_status));
+ ret = rsp->ret;
+out:
+ mutex_unlock(&sdmx_lock[session_handle]);
+
+ return ret;
+}
+EXPORT_SYMBOL(sdmx_process);
+
+/*
+ * Returns session-level & filter-level debug counters
+ *
+ * @session_handle: secure demux instance
+ * @session_counters: returned session-level debug counters
+ * @num_filters: returned number of filters reported in filter_counters
+ * @filter_counters: returned filter-level debug counters array
+ *
+ * Return error code
+ */
+int sdmx_get_dbg_counters(int session_handle,
+ struct sdmx_session_dbg_counters *session_counters,
+ u32 *num_filters,
+ struct sdmx_filter_dbg_counters *filter_counters)
+{
+ int res, cmd_len, rsp_len;
+ struct sdmx_get_counters_req *cmd;
+ struct sdmx_get_counters_rsp *rsp;
+ enum sdmx_status ret;
+
+ if ((session_handle < 0) || (session_handle >= SDMX_MAX_SESSIONS) ||
+ (session_counters == NULL) || (num_filters == NULL) ||
+ (filter_counters == NULL))
+ return SDMX_STATUS_INVALID_INPUT_PARAMS;
+
+ cmd_len = sizeof(struct sdmx_get_counters_req);
+ rsp_len = sizeof(struct sdmx_get_counters_rsp)
+ + *num_filters * sizeof(struct sdmx_filter_dbg_counters);
+
+ /* Lock shared memory */
+ mutex_lock(&sdmx_lock[session_handle]);
+
+ /* Get command and response buffers */
+ ret = get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len,
+ (void **)&rsp, &rsp_len);
+ if (ret)
+ goto out;
+
+ /* Populate command struct */
+ cmd->cmd_id = SDMX_GET_DBG_COUNTERS_CMD;
+ cmd->session_handle = session_handle;
+ cmd->num_filters = *num_filters;
+
+ /* Issue QSEECom command */
+ res = qseecom_send_command(sdmx_qseecom_handles[session_handle],
+ (void *)cmd, cmd_len, (void *)rsp, rsp_len);
+
+ if (res < 0) {
+ mutex_unlock(&sdmx_lock[session_handle]);
+ return SDMX_STATUS_GENERAL_FAILURE;
+ }
+
+ /* Parse response struct */
+ *session_counters = rsp->session_counters;
+ *num_filters = rsp->num_filters;
+ memcpy(filter_counters, rsp->filter_counters,
+ *num_filters * sizeof(struct sdmx_filter_dbg_counters));
+ ret = rsp->ret;
+out:
+ mutex_unlock(&sdmx_lock[session_handle]);
+
+ return ret;
+}
+EXPORT_SYMBOL(sdmx_get_dbg_counters);
+
+/*
+ * Reset debug counters
+ *
+ * @session_handle: secure demux instance
+ *
+ * Return error code
+ */
+int sdmx_reset_dbg_counters(int session_handle)
+{
+ int res, cmd_len, rsp_len;
+ struct sdmx_rst_counters_req *cmd;
+ struct sdmx_rst_counters_rsp *rsp;
+ enum sdmx_status ret;
+
+ if ((session_handle < 0) || (session_handle >= SDMX_MAX_SESSIONS))
+ return SDMX_STATUS_INVALID_INPUT_PARAMS;
+
+ cmd_len = sizeof(struct sdmx_rst_counters_req);
+ rsp_len = sizeof(struct sdmx_rst_counters_rsp);
+
+ /* Lock shared memory */
+ mutex_lock(&sdmx_lock[session_handle]);
+
+ /* Get command and response buffers */
+ ret = get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len,
+ (void **)&rsp, &rsp_len);
+ if (ret)
+ goto out;
+
+ /* Populate command struct */
+ cmd->cmd_id = SDMX_RESET_DBG_COUNTERS_CMD;
+ cmd->session_handle = session_handle;
+
+ /* Issue QSEECom command */
+ res = qseecom_send_command(sdmx_qseecom_handles[session_handle],
+ (void *)cmd, cmd_len, (void *)rsp, rsp_len);
+
+ if (res < 0) {
+ mutex_unlock(&sdmx_lock[session_handle]);
+ return SDMX_STATUS_GENERAL_FAILURE;
+ }
+
+ ret = rsp->ret;
+out:
+ mutex_unlock(&sdmx_lock[session_handle]);
+
+ return ret;
+}
+EXPORT_SYMBOL(sdmx_reset_dbg_counters);
+
+/*
+ * Set debug log verbosity level
+ *
+ * @session_handle: secure demux instance
+ * @level: requested log level
+ *
+ * Return error code
+ */
+int sdmx_set_log_level(int session_handle, enum sdmx_log_level level)
+{
+ int res, cmd_len, rsp_len;
+ struct sdmx_set_log_level_req *cmd;
+ struct sdmx_set_log_level_rsp *rsp;
+ enum sdmx_status ret;
+
+ cmd_len = sizeof(struct sdmx_set_log_level_req);
+ rsp_len = sizeof(struct sdmx_set_log_level_rsp);
+
+ /* Lock shared memory */
+ mutex_lock(&sdmx_lock[session_handle]);
+
+ /* Get command and response buffers */
+ ret = get_cmd_rsp_buffers(session_handle, (void **)&cmd, &cmd_len,
+ (void **)&rsp, &rsp_len);
+ if (ret)
+ goto out;
+
+ /* Populate command struct */
+ cmd->cmd_id = SDMX_SET_LOG_LEVEL_CMD;
+ cmd->session_handle = session_handle;
+ cmd->level = level;
+
+ /* Issue QSEECom command */
+ res = qseecom_send_command(sdmx_qseecom_handles[session_handle],
+ (void *)cmd, cmd_len, (void *)rsp, rsp_len);
+ if (res < 0) {
+ mutex_unlock(&sdmx_lock[session_handle]);
+ return SDMX_STATUS_GENERAL_FAILURE;
+ }
+ ret = rsp->ret;
+out:
+ /* Unlock */
+ mutex_unlock(&sdmx_lock[session_handle]);
+ return ret;
+}
diff --git a/drivers/media/platform/msm/dvb/demux/mpq_sdmx.h b/drivers/media/platform/msm/dvb/demux/mpq_sdmx.h
new file mode 100644
index 000000000000..799f688d186a
--- /dev/null
+++ b/drivers/media/platform/msm/dvb/demux/mpq_sdmx.h
@@ -0,0 +1,368 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MPQ_SDMX_H
+#define _MPQ_SDMX_H
+
+#include <linux/types.h>
+
+/* Constant declarations */
+#define SDMX_MAX_SESSIONS (4)
+#define SDMX_LOOPBACK_PID (0x2000)
+
+#define SDMX_MAX_PHYSICAL_CHUNKS (256)
+
+/* Filter-level error indicators */
+#define SDMX_FILTER_SUCCESS (0)
+#define SDMX_FILTER_ERR_MD_BUF_FULL BIT(0)
+#define SDMX_FILTER_ERR_D_BUF_FULL BIT(1)
+#define SDMX_FILTER_ERR_D_LIN_BUFS_FULL BIT(2)
+#define SDMX_FILTER_ERR_INVALID_SCRAMBLE_BITS BIT(3)
+#define SDMX_FILTER_ERR_KL_IND_NOT_SET BIT(4)
+#define SDMX_FILTER_ERR_CAS_DECRYPT_ERROR BIT(5)
+#define SDMX_FILTER_ERR_SEC_VERIF_CRC32_FAIL BIT(6)
+#define SDMX_FILTER_ERR_SEC_INTERNAL_MALLOC_FAIL BIT(7)
+#define SDMX_FILTER_ERR_SEC_LEN_INVALID BIT(8)
+#define SDMX_FILTER_ERR_SEC_PUSI_PTR_INVALID BIT(9)
+#define SDMX_FILTER_ERR_TS_SYNC_BYTE_INVALID BIT(10)
+#define SDMX_FILTER_ERR_TS_TRANSPORT_ERR BIT(11)
+#define SDMX_FILTER_ERR_CONT_CNT_INVALID BIT(12)
+#define SDMX_FILTER_ERR_CONT_CNT_DUPLICATE BIT(13)
+#define SDMX_FILTER_ERR_INVALID_PES_HDR BIT(14)
+#define SDMX_FILTER_ERR_INVALID_PES_LEN BIT(15)
+#define SDMX_FILTER_ERR_INVALID_PES_ENCRYPTION BIT(16)
+#define SDMX_FILTER_ERR_SECURITY_FAULT BIT(17)
+#define SDMX_FILTER_ERR_IN_NS_BUFFER BIT(18)
+
+/* Filter-level status indicators */
+#define SDMX_FILTER_STATUS_EOS BIT(0)
+#define SDMX_FILTER_STATUS_WR_PTR_CHANGED BIT(1)
+
+/* Filter-level flags */
+#define SDMX_FILTER_FLAG_VERIFY_SECTION_CRC BIT(0)
+
+#define SDMX_INVALID_SESSION_HANDLE (-1)
+#define SDMX_INVALID_FILTER_HANDLE (-1)
+
+/* Input flags */
+#define SDMX_INPUT_FLAG_EOS BIT(0)
+#define SDMX_INPUT_FLAG_DBG_ENABLE BIT(1)
+
+
+enum sdmx_buf_mode {
+ SDMX_RING_BUF,
+ SDMX_LINEAR_GROUP_BUF,
+};
+
+enum sdmx_proc_mode {
+ SDMX_PUSH_MODE,
+ SDMX_PULL_MODE,
+};
+
+enum sdmx_inp_mode {
+ SDMX_PKT_ENC_MODE,
+ SDMX_BULK_ENC_MODE,
+ SDMX_CLEAR_MODE,
+};
+
+enum sdmx_pkt_format {
+ SDMX_188_BYTE_PKT = 188,
+ SDMX_192_BYTE_PKT = 192,
+ SDMX_195_BYTE_PKT = 195,
+};
+
+enum sdmx_log_level {
+ SDMX_LOG_NO_PRINT,
+ SDMX_LOG_MSG_ERROR,
+ SDMX_LOG_DEBUG,
+ SDMX_LOG_VERBOSE
+};
+
+enum sdmx_status {
+ SDMX_SUCCESS = 0,
+ SDMX_STATUS_GENERAL_FAILURE = -1,
+ SDMX_STATUS_MAX_OPEN_SESSIONS_REACHED = -2,
+ SDMX_STATUS_INVALID_SESSION_HANDLE = -3,
+ SDMX_STATUS_INVALID_INPUT_PARAMS = -4,
+ SDMX_STATUS_UNSUPPORTED_MODE = -5,
+ SDMX_STATUS_INVALID_PID = -6,
+ SDMX_STATUS_OUT_OF_MEM = -7,
+ SDMX_STATUS_FILTER_EXISTS = -8,
+ SDMX_STATUS_INVALID_FILTER_HANDLE = -9,
+ SDMX_STATUS_MAX_RAW_PIDS_REACHED = -10,
+ SDMX_STATUS_SINGLE_PID_RAW_FILTER = -11,
+ SDMX_STATUS_INP_BUF_INVALID_PARAMS = -12,
+ SDMX_STATUS_INVALID_FILTER_CFG = -13,
+ SDMX_STATUS_STALLED_IN_PULL_MODE = -14,
+ SDMX_STATUS_SECURITY_FAULT = -15,
+ SDMX_STATUS_NS_BUFFER_ERROR = -16,
+};
+
+enum sdmx_filter {
+ SDMX_PES_FILTER, /* Other PES */
+ SDMX_SEPARATED_PES_FILTER, /* Separated PES (for decoder) */
+ SDMX_SECTION_FILTER, /* Section */
+ SDMX_PCR_FILTER, /* PCR */
+ SDMX_RAW_FILTER, /* Recording */
+};
+
+enum sdmx_raw_out_format {
+ SDMX_188_OUTPUT,
+ SDMX_192_HEAD_OUTPUT,
+ SDMX_192_TAIL_OUTPUT
+};
+
+#pragma pack(push, sdmx, 1)
+
+struct sdmx_session_dbg_counters {
+ /* Total number of TS-packets input to SDMX. */
+ u32 ts_pkt_in;
+
+ /* Total number of TS-packets filtered out by SDMX. */
+ u32 ts_pkt_out;
+};
+
+struct sdmx_filter_dbg_counters {
+ int filter_handle;
+
+ /* Number of TS-packets filtered. */
+ u32 ts_pkt_count;
+
+ /* Number of TS-packets with adaptation field only (no payload). */
+ u32 ts_pkt_no_payload;
+
+ /* Number of TS-packets with the discontinuity indicator set. */
+ u32 ts_pkt_discont;
+
+ /* Number of duplicate TS-packets detected. */
+ u32 ts_pkt_dup;
+
+ /* Number of packets not decrypted because the key wasn't ready. */
+ u32 ts_pkt_key_not_ready;
+};
+
+struct sdmx_pes_counters {
+ /* Number of TS packets with the TEI flag set */
+ u32 transport_err_count;
+
+ /* Number of TS packets with continuity counter errors */
+ u32 continuity_err_count;
+
+ /* Number of TS packets composing this PES frame */
+ u32 pes_ts_count;
+
+ /* Number of TS packets dropped due to full buffer */
+ u32 drop_count;
+};
+
+struct sdmx_buff_descr {
+ /* Physical address where buffer starts */
+ u64 base_addr;
+
+ /* Size of buffer */
+ u32 size;
+};
+
+struct sdmx_data_buff_descr {
+ /* Physical chunks of the buffer */
+ struct sdmx_buff_descr buff_chunks[SDMX_MAX_PHYSICAL_CHUNKS];
+
+ /* Length of buffer */
+ u32 length;
+};
+
+/*
+ * Data payload residing in the data buffers is described using this meta-data
+ * header. The meta data header specifies where the payload is located in the
+ * data buffer and how big it is.
+ * The meta data header optionally carries additional relevant meta data
+ * immediately following the meta-data header.
+ */
+struct sdmx_metadata_header {
+ /*
+ * Payload start offset inside data buffer. In case data is managed
+ * as a linear buffer group, this specifies buffer index.
+ */
+ u32 payload_start;
+
+ /* Payload length */
+ u32 payload_length;
+
+ /* Number of meta data bytes immediately following this header */
+ u32 metadata_length;
+};
+
+
+struct sdmx_filter_status {
+ /* Secure demux filter handle */
+ int filter_handle;
+
+ /*
+ * Number of pending bytes in filter's output data buffer.
+ * For linear buffer mode, this is number of buffers pending.
+ */
+ u32 data_fill_count;
+
+ /*
+ * Offset in data buffer for next data payload to be written.
+ * For linear buffer mode, this is a buffer index.
+ */
+ u32 data_write_offset;
+
+ /* Number of pending bytes in filter's output meta data buffer */
+ u32 metadata_fill_count;
+
+ /* Offset in meta data buffer for next metadata header to be written */
+ u32 metadata_write_offset;
+
+ /* Errors (bitmap) reported by secure demux for this filter */
+ u32 error_indicators;
+
+ /* General status (bitmap) reported by secure demux for this filter */
+ u32 status_indicators;
+};
+#pragma pack(pop, sdmx)
+
+#ifdef CONFIG_QSEECOM
+
+int sdmx_open_session(int *session_handle);
+
+int sdmx_close_session(int session_handle);
+
+int sdmx_get_version(int session_handle, int32_t *version);
+
+int sdmx_set_session_cfg(int session_handle, enum sdmx_proc_mode proc_mode,
+ enum sdmx_inp_mode inp_mode, enum sdmx_pkt_format pkt_format,
+ u8 odd_scramble_bits, u8 even_scramble_bits);
+
+int sdmx_add_filter(int session_handle, u16 pid, enum sdmx_filter filter_type,
+ struct sdmx_buff_descr *meta_data_buf, enum sdmx_buf_mode data_buf_mode,
+ u32 num_data_bufs, struct sdmx_data_buff_descr *data_bufs,
+ int *filter_handle, enum sdmx_raw_out_format ts_out_format, u32 flags);
+
+int sdmx_remove_filter(int session_handle, int filter_handle);
+
+int sdmx_set_kl_ind(int session_handle, u16 pid, u32 key_ladder_index);
+
+int sdmx_add_raw_pid(int session_handle, int filter_handle, u16 pid);
+
+int sdmx_remove_raw_pid(int session_handle, int filter_handle, u16 pid);
+
+int sdmx_process(int session_handle, u8 flags,
+ struct sdmx_buff_descr *input_buf_desc,
+ u32 *input_fill_count, u32 *input_read_offset,
+ u32 *error_indicators,
+ u32 *status_indicators,
+ u32 num_filters,
+ struct sdmx_filter_status *filter_status);
+
+int sdmx_get_dbg_counters(int session_handle,
+ struct sdmx_session_dbg_counters *session_counters,
+ u32 *num_filters,
+ struct sdmx_filter_dbg_counters *filter_counters);
+
+int sdmx_reset_dbg_counters(int session_handle);
+
+int sdmx_set_log_level(int session_handle, enum sdmx_log_level level);
+
+#else
+
+static inline int sdmx_open_session(int *session_handle)
+{
+ return SDMX_STATUS_GENERAL_FAILURE;
+}
+
+static inline int sdmx_close_session(int session_handle)
+{
+ return SDMX_STATUS_GENERAL_FAILURE;
+}
+
+static inline int sdmx_get_version(int session_handle, int32_t *version)
+{
+ return SDMX_STATUS_GENERAL_FAILURE;
+}
+
+static inline int sdmx_set_session_cfg(int session_handle,
+ enum sdmx_proc_mode proc_mode,
+ enum sdmx_inp_mode inp_mode, enum sdmx_pkt_format pkt_format,
+ u8 odd_scramble_bits, u8 even_scramble_bits)
+{
+ return SDMX_STATUS_GENERAL_FAILURE;
+}
+
+static inline int sdmx_add_filter(int session_handle, u16 pid,
+ enum sdmx_filter filter_type,
+ struct sdmx_buff_descr *meta_data_buf, enum sdmx_buf_mode data_buf_mode,
+ u32 num_data_bufs, struct sdmx_data_buff_descr *data_bufs,
+ int *filter_handle, enum sdmx_raw_out_format ts_out_format, u32 flags)
+{
+ return SDMX_STATUS_GENERAL_FAILURE;
+}
+
+static inline int sdmx_remove_filter(int session_handle, int filter_handle)
+{
+ return SDMX_STATUS_GENERAL_FAILURE;
+}
+
+static inline int sdmx_set_kl_ind(int session_handle, u16 pid,
+ u32 key_ladder_index)
+{
+ return SDMX_STATUS_GENERAL_FAILURE;
+}
+
+static inline int sdmx_add_raw_pid(int session_handle, int filter_handle,
+ u16 pid)
+{
+ return SDMX_STATUS_GENERAL_FAILURE;
+}
+
+static inline int sdmx_remove_raw_pid(int session_handle, int filter_handle,
+ u16 pid)
+{
+ return SDMX_STATUS_GENERAL_FAILURE;
+}
+
+static inline int sdmx_process(int session_handle, u8 flags,
+ struct sdmx_buff_descr *input_buf_desc,
+ u32 *input_fill_count, u32 *input_read_offset,
+ u32 *error_indicators,
+ u32 *status_indicators,
+ u32 num_filters,
+ struct sdmx_filter_status *filter_status)
+{
+ *status_indicators = 0;
+ *error_indicators = 0;
+ return SDMX_STATUS_GENERAL_FAILURE;
+}
+
+static inline int sdmx_get_dbg_counters(int session_handle,
+ struct sdmx_session_dbg_counters *session_counters,
+ u32 *num_filters,
+ struct sdmx_filter_dbg_counters *filter_counters)
+{
+ return SDMX_STATUS_GENERAL_FAILURE;
+}
+
+static inline int sdmx_reset_dbg_counters(int session_handle)
+{
+ return SDMX_STATUS_GENERAL_FAILURE;
+}
+
+static inline int sdmx_set_log_level(int session_handle,
+ enum sdmx_log_level level)
+{
+ return SDMX_STATUS_GENERAL_FAILURE;
+}
+
+#endif
+
+#endif /* _MPQ_SDMX_H */
diff --git a/drivers/media/platform/msm/dvb/include/mpq_adapter.h b/drivers/media/platform/msm/dvb/include/mpq_adapter.h
new file mode 100644
index 000000000000..54e671c3b38d
--- /dev/null
+++ b/drivers/media/platform/msm/dvb/include/mpq_adapter.h
@@ -0,0 +1,199 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MPQ_ADAPTER_H
+#define _MPQ_ADAPTER_H
+
+#include "dvbdev.h"
+#include "dvb_demux.h"
+#include "mpq_stream_buffer.h"
+
+
+
+/** IDs of interfaces holding stream-buffers */
+enum mpq_adapter_stream_if {
+ /** Interface holding stream-buffer for video0 stream */
+ MPQ_ADAPTER_VIDEO0_STREAM_IF = 0,
+
+ /** Interface holding stream-buffer for video1 stream */
+ MPQ_ADAPTER_VIDEO1_STREAM_IF = 1,
+
+ /** Interface holding stream-buffer for video1 stream */
+ MPQ_ADAPTER_VIDEO2_STREAM_IF = 2,
+
+ /** Interface holding stream-buffer for video1 stream */
+ MPQ_ADAPTER_VIDEO3_STREAM_IF = 3,
+
+ /** Maximum number of interfaces holding stream-buffers */
+ MPQ_ADAPTER_MAX_NUM_OF_INTERFACES,
+};
+
+enum dmx_packet_type {
+ DMX_PES_PACKET,
+ DMX_FRAMING_INFO_PACKET,
+ DMX_EOS_PACKET,
+ DMX_MARKER_PACKET
+};
+
+struct dmx_pts_dts_info {
+ /** Indication whether PTS exist */
+ int pts_exist;
+
+ /** Indication whether DTS exist */
+ int dts_exist;
+
+ /** PTS value associated with the PES data if any */
+ u64 pts;
+
+ /** DTS value associated with the PES data if any */
+ u64 dts;
+};
+
+struct dmx_framing_packet_info {
+ /** framing pattern type, one of DMX_IDX_* definitions */
+ u64 pattern_type;
+
+ /** PTS/DTS information */
+ struct dmx_pts_dts_info pts_dts_info;
+
+ /** STC value attached to first TS packet holding the pattern */
+ u64 stc;
+
+ /*
+ * Number of TS packets with Transport Error Indicator (TEI)
+ * found while constructing the frame.
+ */
+ __u32 transport_error_indicator_counter;
+
+ /* Number of continuity errors found while constructing the frame */
+ __u32 continuity_error_counter;
+
+ /*
+ * Number of dropped bytes due to insufficient buffer space,
+ * since last reported frame.
+ */
+ __u32 ts_dropped_bytes;
+
+ /* Total number of TS packets holding the frame */
+ __u32 ts_packets_num;
+};
+
+struct dmx_pes_packet_info {
+ /** PTS/DTS information */
+ struct dmx_pts_dts_info pts_dts_info;
+
+ /** STC value attached to first TS packet holding the PES */
+ u64 stc;
+};
+
+struct dmx_marker_info {
+ /* marker id */
+ u64 id;
+};
+
+/** The meta-data used for video interface */
+struct mpq_adapter_video_meta_data {
+ /** meta-data packet type */
+ enum dmx_packet_type packet_type;
+
+ /** packet-type specific information */
+ union {
+ struct dmx_framing_packet_info framing;
+ struct dmx_pes_packet_info pes;
+ struct dmx_marker_info marker;
+ } info;
+} __packed;
+
+
+/** Callback function to notify on registrations of specific interfaces */
+typedef void (*mpq_adapter_stream_if_callback)(
+ enum mpq_adapter_stream_if interface_id,
+ void *user_param);
+
+
+/**
+ * mpq_adapter_get - Returns pointer to Qualcomm Technologies Inc. DVB adapter
+ *
+ * Return dvb adapter or NULL if not exist.
+ */
+struct dvb_adapter *mpq_adapter_get(void);
+
+
+/**
+ * mpq_adapter_register_stream_if - Register a stream interface.
+ *
+ * @interface_id: The interface id
+ * @stream_buffer: The buffer used for the interface
+ *
+ * Return error status
+ *
+ * Stream interface used to connect between two units in tunneling
+ * mode using mpq_streambuffer implementation.
+ * The producer of the interface should register the new interface,
+ * consumer may get the interface using mpq_adapter_get_stream_if.
+ *
+ * Note that the function holds a pointer to this interface,
+ * stream_buffer pointer assumed to be valid as long as interface
+ * is active.
+ */
+int mpq_adapter_register_stream_if(
+ enum mpq_adapter_stream_if interface_id,
+ struct mpq_streambuffer *stream_buffer);
+
+
+/**
+ * mpq_adapter_unregister_stream_if - Un-register a stream interface.
+ *
+ * @interface_id: The interface id
+ *
+ * Return error status
+ */
+int mpq_adapter_unregister_stream_if(
+ enum mpq_adapter_stream_if interface_id);
+
+
+/**
+ * mpq_adapter_get_stream_if - Get buffer used for a stream interface.
+ *
+ * @interface_id: The interface id
+ * @stream_buffer: The returned stream buffer
+ *
+ * Return error status
+ */
+int mpq_adapter_get_stream_if(
+ enum mpq_adapter_stream_if interface_id,
+ struct mpq_streambuffer **stream_buffer);
+
+
+/**
+ * mpq_adapter_notify_stream_if - Register notification
+ * to be triggered when a stream interface is registered.
+ *
+ * @interface_id: The interface id
+ * @callback: The callback to be triggered when the interface is registered
+ * @user_param: A parameter that is passed back to the callback function
+ * when triggered.
+ *
+ * Return error status
+ *
+ * Producer may use this to register notification when desired
+ * interface registered in the system and query its information
+ * afterwards using mpq_adapter_get_stream_if.
+ * To remove the callback, this function should be called with NULL
+ * value in callback parameter.
+ */
+int mpq_adapter_notify_stream_if(
+ enum mpq_adapter_stream_if interface_id,
+ mpq_adapter_stream_if_callback callback,
+ void *user_param);
+
+#endif /* _MPQ_ADAPTER_H */
diff --git a/drivers/media/platform/msm/dvb/include/mpq_dvb_debug.h b/drivers/media/platform/msm/dvb/include/mpq_dvb_debug.h
new file mode 100644
index 000000000000..720b1b9cb70b
--- /dev/null
+++ b/drivers/media/platform/msm/dvb/include/mpq_dvb_debug.h
@@ -0,0 +1,41 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MPQ_DVB_DEBUG_H
+#define _MPQ_DVB_DEBUG_H
+
+/* Enable this line if you want to output debug printouts */
+#define MPG_DVB_DEBUG_ENABLE
+
+#undef MPQ_DVB_DBG_PRINT /* undef it, just in case */
+
+#ifdef MPG_DVB_DEBUG_ENABLE
+#define MPQ_DVB_ERR_PRINT(fmt, args...) pr_err(fmt, ## args)
+#define MPQ_DVB_WARN_PRINT(fmt, args...) pr_warn(fmt, ## args)
+#define MPQ_DVB_NOTICE_PRINT(fmt, args...) pr_notice(fmt, ## args)
+#define MPQ_DVB_DBG_PRINT(fmt, args...) pr_debug(fmt, ## args)
+#else /* MPG_DVB_DEBUG_ENABLE */
+#define MPQ_DVB_ERR_PRINT(fmt, args...)
+#define MPQ_DVB_WARN_PRINT(fmt, args...)
+#define MPQ_DVB_NOTICE_PRINT(fmt, args...)
+#define MPQ_DVB_DBG_PRINT(fmt, args...)
+#endif /* MPG_DVB_DEBUG_ENABLE */
+
+
+/*
+ * The following can be used to disable specific printout
+ * by adding a letter to the end of MPQ_DVB_DBG_PRINT
+ */
+#undef MPQ_DVB_DBG_PRINTT
+#define MPQ_DVB_DBG_PRINTT(fmt, args...)
+
+#endif /* _MPQ_DVB_DEBUG_H */
diff --git a/drivers/media/platform/msm/dvb/include/mpq_stream_buffer.h b/drivers/media/platform/msm/dvb/include/mpq_stream_buffer.h
new file mode 100644
index 000000000000..b24dc1f3b5ff
--- /dev/null
+++ b/drivers/media/platform/msm/dvb/include/mpq_stream_buffer.h
@@ -0,0 +1,462 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MPQ_STREAM_BUFFER_H
+#define _MPQ_STREAM_BUFFER_H
+
+#include "dvb_ringbuffer.h"
+
+
+/**
+ * DOC: MPQ Stream Buffer
+ *
+ * A stream buffer implementation is used to transfer data between two units
+ * such as demux and decoders. The implementation relies on dvb_ringbuffer
+ * implementation. Refer to dvb_ringbuffer.h for details.
+ *
+ * The implementation uses two dvb_ringbuffers, one to pass the
+ * raw-data (PES payload for example) and the other to pass
+ * meta-data (information from PES header for example).
+ *
+ * The meta-data uses dvb_ringbuffer packet interface. Each meta-data
+ * packet points to the data buffer, and includes the offset to the data in the
+ * buffer, the size of raw-data described by the meta-data packet, and also the
+ * size of user's own parameters if any required.
+ *
+ * Data can be managed in two ways: ring-buffer & linear buffers, as specified
+ * in initialization when calling the mpq_streambuffer_init function.
+ * For managing data as a ring buffer exactly 1 data buffer descriptor must be
+ * specified in initialization. For this mode, dvb_ringbuffer is used "as-is".
+ * For managing data in several linear buffers, an array of buffer descriptors
+ * must be passed.
+ * For both modes, data descriptor(s) must be remain valid throughout the life
+ * span of the mpq_streambuffer object.
+ * Apart from initialization API remains the same for both modes.
+ *
+ * Contrary to dvb_ringbuffer implementation, this API makes sure there's
+ * enough data to read/write when making read/write operations.
+ * Users interested to flush/reset specific buffer, check for bytes
+ * ready or space available for write should use the respective services
+ * in dvb_ringbuffer (dvb_ringbuffer_avail, dvb_ringbuffer_free,
+ * dvb_ringbuffer_reset, dvb_ringbuffer_flush,
+ * dvb_ringbuffer_flush_spinlock_wakeup).
+ *
+ * Concurrency protection is handled in the same manner as in
+ * dvb_ringbuffer implementation.
+ *
+ * Typical call flow from producer:
+ *
+ * - Start writing the raw-data of new packet, the following call is
+ * repeated until end of data of the specific packet
+ *
+ * mpq_streambuffer_data_write(...)
+ *
+ * - Now write a new packet describing the new available raw-data
+ * mpq_streambuffer_pkt_write(...)
+ *
+ * For linear buffer mode, writing a new packet with data size > 0, causes the
+ * current buffer to be marked as pending for reading, and triggers moving to
+ * the next available buffer, that shall now be the current write buffer.
+ *
+ * Typical call flow from consumer:
+ *
+ * - Poll for next available packet:
+ * mpq_streambuffer_pkt_next(&streambuff,-1,&len)
+ *
+ * In different approach, consumer can wait on event for new data and then
+ * call mpq_streambuffer_pkt_next, waiting for data can be done as follows:
+ *
+ * wait_event_interruptible(
+ * streambuff->packet_data->queue,
+ * !dvb_ringbuffer_empty(&streambuff->packet_data) ||
+ * (streambuff->packet_data.error != 0);
+ *
+ * - Get the new packet information:
+ * mpq_streambuffer_pkt_read(..)
+ *
+ * - Read the raw-data of the new packet. Here you can use two methods:
+ *
+ * 1. Read the data to a user supplied buffer:
+ * mpq_streambuffer_data_read()
+ *
+ * In this case memory copy is done, read pointer is updated in the raw
+ * data buffer, the amount of raw-data is provided part of the
+ * packet's information. User should then call mpq_streambuffer_pkt_dispose
+ * with dispose_data set to 0 as the raw-data was already disposed.
+ * Note that secure buffer cannot be accessed directly and an error will
+ * occur.
+ *
+ * 2. Access the data directly using the raw-data address. The address
+ * of the raw data is provided part of the packet's information. User
+ * then should call mpq_streambuffer_pkt_dispose with dispose_data set
+ * to 1 to dispose the packet along with it's raw-data.
+ *
+ * - Disposal of packets:
+ * mpq_streambuffer_pkt_dispose(...)
+ *
+ * For linear buffer mode, disposing of a packet with data size > 0,
+ * regardless of the 'dispose_data' parameter, causes the current buffer's
+ * data to be disposed and marked as free for writing, and triggers moving to
+ * the next available buffer, that shall now be the current read buffer.
+ */
+
+struct mpq_streambuffer;
+struct mpq_streambuffer_packet_header;
+
+typedef void (*mpq_streambuffer_dispose_cb) (
+ struct mpq_streambuffer *sbuff,
+ u32 offset,
+ size_t len,
+ void *user_data);
+
+enum mpq_streambuffer_mode {
+ MPQ_STREAMBUFFER_BUFFER_MODE_RING,
+ MPQ_STREAMBUFFER_BUFFER_MODE_LINEAR
+};
+
+/**
+ * struct mpq_streambuffer - mpq stream buffer representation
+ *
+ * @raw_data: The buffer used to hold raw-data, or linear buffer descriptors
+ * @packet_data: The buffer user to hold the meta-data
+ * @buffers: array of buffer descriptor(s) holding buffer initial & dynamic
+ * buffer information
+ * @mode: mpq_streambuffer buffer management work mode - Ring-buffer or Linear
+ * buffers
+ * @buffers_num: number of data buffers to manage
+ * @pending_buffers_count: for linear buffer management, counts the number of
+ * buffer that has been
+ */
+struct mpq_streambuffer {
+ struct dvb_ringbuffer raw_data;
+ struct dvb_ringbuffer packet_data;
+ struct mpq_streambuffer_buffer_desc *buffers;
+ enum mpq_streambuffer_mode mode;
+ u32 buffers_num;
+ u32 pending_buffers_count;
+ mpq_streambuffer_dispose_cb cb;
+ void *cb_user_data;
+};
+
+/**
+ * mpq_streambuffer_linear_desc
+ * @handle: ION handle's file descriptor of buffer
+ * @base: kernel mapped address to start of buffer.
+ * Can be NULL for secured buffers
+ * @size: size of buffer
+ * @read_ptr: initial read pointer value (should normally be 0)
+ * @write_ptr: initial write pointer value (should normally be 0)
+ */
+struct mpq_streambuffer_buffer_desc {
+ int handle;
+ void *base;
+ u32 size;
+ u32 read_ptr;
+ u32 write_ptr;
+};
+
+/**
+ * struct mpq_streambuffer_packet_header - packet header saved in packet buffer
+ * @user_data_len: length of private user (meta) data
+ * @raw_data_handle: ION handle's file descriptor of raw-data buffer
+ * @raw_data_offset: offset of raw-data from start of buffer (0 for linear)
+ * @raw_data_len: size of raw-data in the raw-data buffer (can be 0)
+ *
+ * The packet structure that is saved in each packet-buffer:
+ * user_data_len
+ * raw_data_handle
+ * raw_data_offset
+ * raw_data_len
+ * private user-data bytes
+ */
+struct mpq_streambuffer_packet_header {
+ u32 user_data_len;
+ int raw_data_handle;
+ u32 raw_data_offset;
+ u32 raw_data_len;
+} __packed;
+
+/**
+ * mpq_streambuffer_init - Initialize a new stream buffer
+ *
+ * @sbuff: The buffer to initialize
+ * @data_buffers: array of data buffer descriptor(s).
+ * Data descriptor(s) must be remain valid throughout the life
+ * span of the mpq_streambuffer object
+ * @data_buff_num: number of data buffer in array
+ * @packet_buff: The buffer holding meta-data
+ * @packet_buff_size: Size of meta-data buffer
+ *
+ * Return Error status, -EINVAL if any of the arguments are invalid
+ *
+ * Note:
+ * for data_buff_num > 1, mpq_streambuffer object manages these buffers as a
+ * separated set of linear buffers. A linear buffer cannot wrap-around and one
+ * can only write as many data bytes as the buffer's size. Data will not be
+ * written to the next free buffer.
+ */
+int mpq_streambuffer_init(
+ struct mpq_streambuffer *sbuff,
+ enum mpq_streambuffer_mode mode,
+ struct mpq_streambuffer_buffer_desc *data_buffers,
+ u32 data_buff_num,
+ void *packet_buff,
+ size_t packet_buff_size);
+
+/**
+ * mpq_streambuffer_terminate - Terminate stream buffer
+ *
+ * @sbuff: The buffer to terminate
+ *
+ * The function sets the the buffers error flags to ENODEV
+ * and wakeup any waiting threads on the buffer queues.
+ * Threads waiting on the buffer queues should check if
+ * error was set.
+ */
+void mpq_streambuffer_terminate(struct mpq_streambuffer *sbuff);
+
+/**
+ * mpq_streambuffer_packet_next - Returns index of next available packet.
+ *
+ * @sbuff: The stream buffer
+ * @idx: Previous packet index or -1 to return index of the the first
+ * available packet.
+ * @pktlen: The length of the ready packet
+ *
+ * Return index to the packet-buffer, -1 if buffer is empty
+ *
+ * After getting the index, the user of this function can either
+ * access the packet buffer directly using the returned index
+ * or ask to read the data back from the buffer using mpq_ringbuffer_pkt_read
+ */
+ssize_t mpq_streambuffer_pkt_next(
+ struct mpq_streambuffer *sbuff,
+ ssize_t idx, size_t *pktlen);
+
+/**
+ * mpq_streambuffer_pkt_read - Reads out the packet from the provided index.
+ *
+ * @sbuff: The stream buffer
+ * @idx: The index of the packet to be read
+ * @packet: The read packet's header
+ * @user_data: The read private user data
+ *
+ * Return The actual number of bytes read, -EINVAL if the packet is
+ * already disposed or the packet-data is invalid.
+ *
+ * The packet is not disposed after this function is called, to dispose it
+ * along with the raw-data it points to use mpq_streambuffer_pkt_dispose.
+ * If there are no private user-data, the user-data pointer can be NULL.
+ * The caller of this function must make sure that the private user-data
+ * buffer has enough space for the private user-data length
+ */
+ssize_t mpq_streambuffer_pkt_read(
+ struct mpq_streambuffer *sbuff,
+ size_t idx,
+ struct mpq_streambuffer_packet_header *packet,
+ u8 *user_data);
+
+/**
+ * mpq_streambuffer_pkt_dispose - Disposes a packet from the packet buffer
+ *
+ * @sbuff: The stream buffer
+ * @idx: The index of the packet to be disposed
+ * @dispose_data: Indicates whether to update the read pointer inside the
+ * raw-data buffer for the respective data pointed by the packet.
+ *
+ * Return error status, -EINVAL if the packet-data is invalid
+ *
+ * The function updates the read pointer inside the raw-data buffer
+ * for the respective data pointed by the packet if dispose_data is set.
+ */
+int mpq_streambuffer_pkt_dispose(
+ struct mpq_streambuffer *sbuff,
+ size_t idx,
+ int dispose_data);
+
+/**
+ * mpq_streambuffer_pkt_write - Write a new packet to the packet buffer.
+ *
+ * @sbuff: The stream buffer
+ * @packet: The packet header to write
+ * @user_data: The private user-data to be written
+ *
+ * Return error status, -ENOSPC if there's no space to write the packet
+ */
+int mpq_streambuffer_pkt_write(
+ struct mpq_streambuffer *sbuff,
+ struct mpq_streambuffer_packet_header *packet,
+ u8 *user_data);
+
+/**
+ * mpq_streambuffer_data_write - Write data to raw-data buffer
+ *
+ * @sbuff: The stream buffer
+ * @buf: The buffer holding the data to be written
+ * @len: The length of the data buffer
+ *
+ * Return The actual number of bytes written or -ENOSPC if
+ * no space to write the data
+ */
+ssize_t mpq_streambuffer_data_write(
+ struct mpq_streambuffer *sbuff,
+ const u8 *buf, size_t len);
+
+/**
+ * mpq_streambuffer_data_write_deposit - Advances the raw-buffer write pointer.
+ * Assumes the raw-data was written by the user directly
+ *
+ * @sbuff: The stream buffer
+ * @len: The length of the raw-data that was already written
+ *
+ * Return error status
+ */
+int mpq_streambuffer_data_write_deposit(
+ struct mpq_streambuffer *sbuff,
+ size_t len);
+
+/**
+ * mpq_streambuffer_data_read - Reads out raw-data to the provided buffer.
+ *
+ * @sbuff: The stream buffer
+ * @buf: The buffer to read the raw-data data to
+ * @len: The length of the buffer that will hold the raw-data
+ *
+ * Return The actual number of bytes read or error code
+ *
+ * This function copies the data from the ring-buffer to the
+ * provided buf parameter. The user can save the extra copy by accessing
+ * the data pointer directly and reading from it, then update the
+ * read pointer by the amount of data that was read using
+ * mpq_streambuffer_data_read_dispose
+ */
+ssize_t mpq_streambuffer_data_read(
+ struct mpq_streambuffer *sbuff,
+ u8 *buf, size_t len);
+
+/**
+ * mpq_streambuffer_data_read_user
+ *
+ * Same as mpq_streambuffer_data_read except data can be copied to user-space
+ * buffer.
+ */
+ssize_t mpq_streambuffer_data_read_user(
+ struct mpq_streambuffer *sbuff,
+ u8 __user *buf, size_t len);
+
+/**
+ * mpq_streambuffer_data_read_dispose - Advances the raw-buffer read pointer.
+ * Assumes the raw-data was read by the user directly.
+ *
+ * @sbuff: The stream buffer
+ * @len: The length of the raw-data to be disposed
+ *
+ * Return error status, -EINVAL if buffer there's no enough data to
+ * be disposed
+ *
+ * The user can instead dispose a packet along with the data in the
+ * raw-data buffer using mpq_streambuffer_pkt_dispose.
+ */
+int mpq_streambuffer_data_read_dispose(
+ struct mpq_streambuffer *sbuff,
+ size_t len);
+/**
+ * mpq_streambuffer_get_buffer_handle - Returns the current linear buffer
+ * ION handle.
+ * @sbuff: The stream buffer
+ * @read_buffer: specifies if a read buffer handle is requested (when set),
+ * or a write buffer handle is requested.
+ * For linear buffer mode read & write buffers may be different
+ * buffers. For ring buffer mode, the same (single) buffer handle
+ * is returned.
+ * buffer handle
+ * @handle: returned handle
+ *
+ * Return error status
+ * -EINVAL is arguments are invalid.
+ * -EPERM if stream buffer specified was not initialized with linear support.
+ */
+int mpq_streambuffer_get_buffer_handle(
+ struct mpq_streambuffer *sbuff,
+ int read_buffer,
+ int *handle);
+
+/**
+ * mpq_streambuffer_data_free - Returns number of free bytes in data buffer.
+ * @sbuff: The stream buffer object
+ *
+ * Note: for linear buffer management this return number of free bytes in the
+ * current write buffer only.
+ */
+ssize_t mpq_streambuffer_data_free(
+ struct mpq_streambuffer *sbuff);
+
+/**
+ * mpq_streambuffer_data_avail - Returns number of bytes in data buffer that
+ * can be read.
+ * @sbuff: The stream buffer object
+ *
+ * Note: for linear buffer management this return number of data bytes in the
+ * current read buffer only.
+ */
+ssize_t mpq_streambuffer_data_avail(
+ struct mpq_streambuffer *sbuff);
+
+/**
+ * mpq_streambuffer_register_pkt_dispose - Registers a callback to notify on
+ * packet disposal events.
+ * can be read.
+ * @sbuff: The stream buffer object
+ * @cb_func: user callback function
+ * @user_data: user data to be passed to callback function.
+ *
+ * Returns error status
+ * -EINVAL if arguments are invalid
+ */
+int mpq_streambuffer_register_data_dispose(
+ struct mpq_streambuffer *sbuff,
+ mpq_streambuffer_dispose_cb cb_func,
+ void *user_data);
+
+/**
+ * mpq_streambuffer_data_rw_offset - returns read/write offsets of current data
+ * buffer.
+ * @sbuff: The stream buffer object
+ * @read_offset: returned read offset
+ * @write_offset: returned write offset
+ *
+ * Note: read offset or write offset may be NULL if not required.
+ * Returns error status
+ * -EINVAL if arguments are invalid
+ */
+int mpq_streambuffer_get_data_rw_offset(
+ struct mpq_streambuffer *sbuff,
+ u32 *read_offset,
+ u32 *write_offset);
+
+/**
+ * mpq_streambuffer_metadata_free - returns number of free bytes in the meta
+ * data buffer, or error status.
+ * @sbuff: the stream buffer object
+ */
+ssize_t mpq_streambuffer_metadata_free(struct mpq_streambuffer *sbuff);
+
+/**
+ * mpq_streambuffer_flush - flush both pending packets and data in buffer
+ *
+ * @sbuff: the stream buffer object
+ *
+ * Returns error status
+ */
+int mpq_streambuffer_flush(struct mpq_streambuffer *sbuff);
+
+#endif /* _MPQ_STREAM_BUFFER_H */
diff --git a/include/linux/qcom_tspp.h b/include/linux/qcom_tspp.h
new file mode 100644
index 000000000000..28e6695fb057
--- /dev/null
+++ b/include/linux/qcom_tspp.h
@@ -0,0 +1,99 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MSM_TSPP_H_
+#define _MSM_TSPP_H_
+
+struct tspp_data_descriptor {
+ void *virt_base; /* logical address of the actual data */
+ phys_addr_t phys_base; /* physical address of the actual data */
+ u32 size; /* size of buffer in bytes */
+ int id; /* unique identifier */
+ void *user; /* user-defined data */
+};
+
+enum tspp_key_parity {
+ TSPP_KEY_PARITY_EVEN,
+ TSPP_KEY_PARITY_ODD
+};
+
+struct tspp_key {
+ enum tspp_key_parity parity;
+ int lsb;
+ int msb;
+};
+
+enum tspp_source {
+ TSPP_SOURCE_TSIF0,
+ TSPP_SOURCE_TSIF1,
+ TSPP_SOURCE_MEM,
+ TSPP_SOURCE_NONE = -1
+};
+
+enum tspp_mode {
+ TSPP_MODE_DISABLED,
+ TSPP_MODE_PES,
+ TSPP_MODE_RAW,
+ TSPP_MODE_RAW_NO_SUFFIX
+};
+
+enum tspp_tsif_mode {
+ TSPP_TSIF_MODE_LOOPBACK, /* loopback mode */
+ TSPP_TSIF_MODE_1, /* without sync */
+ TSPP_TSIF_MODE_2 /* with sync signal */
+};
+
+struct tspp_filter {
+ int pid;
+ int mask;
+ enum tspp_mode mode;
+ unsigned int priority; /* 0 - 15 */
+ int decrypt;
+ enum tspp_source source;
+};
+
+struct tspp_select_source {
+ enum tspp_source source;
+ enum tspp_tsif_mode mode;
+ int clk_inverse;
+ int data_inverse;
+ int sync_inverse;
+ int enable_inverse;
+};
+
+typedef void (tspp_notifier)(int channel_id, void *user);
+typedef void* (tspp_allocator)(int channel_id, u32 size,
+ phys_addr_t *phys_base, void *user);
+typedef void (tspp_memfree)(int channel_id, u32 size,
+ void *virt_base, phys_addr_t phys_base, void *user);
+
+/* Kernel API functions */
+int tspp_open_stream(u32 dev, u32 channel_id,
+ struct tspp_select_source *source);
+int tspp_close_stream(u32 dev, u32 channel_id);
+int tspp_open_channel(u32 dev, u32 channel_id);
+int tspp_close_channel(u32 dev, u32 channel_id);
+int tspp_get_ref_clk_counter(u32 dev,
+ enum tspp_source source, u32 *tcr_counter);
+int tspp_add_filter(u32 dev, u32 channel_id, struct tspp_filter *filter);
+int tspp_remove_filter(u32 dev, u32 channel_id, struct tspp_filter *filter);
+int tspp_set_key(u32 dev, u32 channel_id, struct tspp_key *key);
+int tspp_register_notification(u32 dev, u32 channel_id, tspp_notifier *notify,
+ void *data, u32 timer_ms);
+int tspp_unregister_notification(u32 dev, u32 channel_id);
+const struct tspp_data_descriptor *tspp_get_buffer(u32 dev, u32 channel_id);
+int tspp_release_buffer(u32 dev, u32 channel_id, u32 descriptor_id);
+int tspp_allocate_buffers(u32 dev, u32 channel_id, u32 count,
+ u32 size, u32 int_freq, tspp_allocator *alloc,
+ tspp_memfree *memfree, void *user);
+
+#endif /* _MSM_TSPP_H_ */