summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMahesh Sivasubramanian <msivasub@codeaurora.org>2016-01-11 14:27:35 -0700
committerDavid Keitel <dkeitel@codeaurora.org>2016-03-22 11:08:34 -0700
commitb68798fafa4ee121b3f6ce13a3a7ef92b6e960a6 (patch)
treecd2e81663b67c1e50d60b735f4b868def179aad9
parent3da2a8c7d197a827076d241fe492cb63736d69ed (diff)
soc: qcom: Snapshot of thermal/LMH drivers
This snapshot is taken as of msm-3.18 commit e70ad0c (Promotion of kernel.lnx.3.18-151201.) Include necessary thermal_core changes to convert long to int inline with upstream kernel changes. Change-Id: I642b666518fe72385794b743989a0f5e5120ec03 Conflicts: drivers/thermal/Makefile
-rw-r--r--drivers/thermal/Kconfig30
-rw-r--r--drivers/thermal/Makefile3
-rw-r--r--drivers/thermal/lmh_interface.c1211
-rw-r--r--drivers/thermal/lmh_interface.h112
-rw-r--r--drivers/thermal/lmh_lite.c1408
-rw-r--r--drivers/thermal/msm_thermal-dev.c425
-rw-r--r--drivers/thermal/msm_thermal.c7214
-rw-r--r--drivers/thermal/thermal_core.c14
-rw-r--r--include/linux/msm_thermal.h333
-rw-r--r--include/linux/thermal.h6
-rw-r--r--include/uapi/linux/msm_thermal_ioctl.h92
11 files changed, 10838 insertions, 10 deletions
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index bc155401728d..31f1ed7a26c2 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -175,6 +175,36 @@ config THERMAL_EMULATION
because userland can easily disable the thermal policy by simply
flooding this sysfs node with low temperature values.
+config LIMITS_MONITOR
+ bool "LMH monitor driver"
+ depends on THERMAL
+ help
+ Enable this to manage the limits hardware for interrupts, throttling
+ intensities, and LMH device profiles. This driver also registers the
+ Limits hardware's monitoring entities as sensors with the thermal
+ framework.
+
+config LIMITS_LITE_HW
+ bool "LMH Lite hardware driver"
+ depends on LIMITS_MONITOR
+ help
+ Enable this option for interacting with LMH Lite hardware. This
+ implements the APIs required for getting the details about sensors
+ supported by LMH Lite, their throttling intensity and the operating
+ profiles.
+
+config THERMAL_MONITOR
+ bool "Monitor thermal state and limit CPU Frequency"
+ depends on THERMAL_TSENS8974
+ depends on CPU_FREQ || CPU_FREQ_MSM
+ depends on PM_OPP
+ default n
+ help
+ This enables thermal monitoring capability in the kernel in the
+ absence of a system wide thermal monitoring entity or until such an
+ entity starts running in the userspace. Monitors TSENS temperature
+ and limits the max frequency of the cores.
+
config HISI_THERMAL
tristate "Hisilicon thermal driver"
depends on (ARCH_HISI && CPU_THERMAL && OF) || COMPILE_TEST
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index 53186c885f8d..cbfff2226bcd 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -51,3 +51,6 @@ obj-$(CONFIG_HISI_THERMAL) += hisi_thermal.o
obj-$(CONFIG_THERMAL_QPNP) += qpnp-temp-alarm.o
obj-$(CONFIG_THERMAL_QPNP_ADC_TM) += qpnp-adc-tm.o
obj-$(CONFIG_THERMAL_TSENS8974) += msm8974-tsens.o
+obj-$(CONFIG_THERMAL_MONITOR) += msm_thermal.o msm_thermal-dev.o
+obj-$(CONFIG_LIMITS_MONITOR) += lmh_interface.o
+obj-$(CONFIG_LIMITS_LITE_HW) += lmh_lite.o
diff --git a/drivers/thermal/lmh_interface.c b/drivers/thermal/lmh_interface.c
new file mode 100644
index 000000000000..e7a873b6d426
--- /dev/null
+++ b/drivers/thermal/lmh_interface.c
@@ -0,0 +1,1211 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s:%s " fmt, KBUILD_MODNAME, __func__
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/sysfs.h>
+#include <linux/rwsem.h>
+#include <linux/debugfs.h>
+#include <linux/thermal.h>
+#include <linux/slab.h>
+#include "lmh_interface.h"
+#include <linux/string.h>
+#include <linux/uaccess.h>
+
+#define LMH_MON_NAME "lmh_monitor"
+#define LMH_ISR_POLL_DELAY "interrupt_poll_delay_msec"
+#define LMH_TRACE_ENABLE "hw_trace_enable"
+#define LMH_TRACE_INTERVAL "hw_trace_interval"
+#define LMH_DBGFS_DIR "debug"
+#define LMH_DBGFS_READ "data"
+#define LMH_DBGFS_CONFIG_READ "config"
+#define LMH_DBGFS_READ_TYPES "data_types"
+#define LMH_DBGFS_CONFIG_TYPES "config_types"
+#define LMH_TRACE_INTERVAL_XO_TICKS 250
+
+struct lmh_mon_threshold {
+ long value;
+ bool active;
+};
+
+struct lmh_device_data {
+ char device_name[LMH_NAME_MAX];
+ struct lmh_device_ops *device_ops;
+ uint32_t max_level;
+ int curr_level;
+ int *levels;
+ struct dentry *dev_parent;
+ struct dentry *max_lvl_fs;
+ struct dentry *curr_lvl_fs;
+ struct dentry *avail_lvl_fs;
+ struct list_head list_ptr;
+ struct rw_semaphore lock;
+ struct device dev;
+};
+
+struct lmh_mon_sensor_data {
+ struct list_head list_ptr;
+ char sensor_name[LMH_NAME_MAX];
+ struct lmh_sensor_ops *sensor_ops;
+ struct rw_semaphore lock;
+ struct lmh_mon_threshold trip[LMH_TRIP_MAX];
+ struct thermal_zone_device *tzdev;
+ enum thermal_device_mode mode;
+};
+
+struct lmh_mon_driver_data {
+ struct dentry *debugfs_parent;
+ struct dentry *poll_fs;
+ struct dentry *enable_hw_log;
+ struct dentry *hw_log_delay;
+ uint32_t hw_log_enable;
+ uint64_t hw_log_interval;
+ struct dentry *debug_dir;
+ struct dentry *debug_read;
+ struct dentry *debug_config;
+ struct dentry *debug_read_type;
+ struct dentry *debug_config_type;
+ struct lmh_debug_ops *debug_ops;
+};
+
+enum lmh_read_type {
+ LMH_DEBUG_READ_TYPE,
+ LMH_DEBUG_CONFIG_TYPE,
+ LMH_PROFILES,
+};
+
+static struct lmh_mon_driver_data *lmh_mon_data;
+static struct class lmh_class_info = {
+ .name = "msm_limits",
+};
+static DECLARE_RWSEM(lmh_mon_access_lock);
+static LIST_HEAD(lmh_sensor_list);
+static DECLARE_RWSEM(lmh_dev_access_lock);
+static LIST_HEAD(lmh_device_list);
+
+#define LMH_CREATE_DEBUGFS_FILE(_node, _name, _mode, _parent, _data, _ops, \
+ _ret) do { \
+ _node = debugfs_create_file(_name, _mode, _parent, \
+ _data, _ops); \
+ if (IS_ERR(_node)) { \
+ _ret = PTR_ERR(_node); \
+ pr_err("Error creating debugfs file:%s. err:%d\n", \
+ _name, _ret); \
+ } \
+ } while (0)
+
+#define LMH_CREATE_DEBUGFS_DIR(_node, _name, _parent, _ret) \
+ do { \
+ _node = debugfs_create_dir(_name, _parent); \
+ if (IS_ERR(_node)) { \
+ _ret = PTR_ERR(_node); \
+ pr_err("Error creating debugfs dir:%s. err:%d\n", \
+ _name, _ret); \
+ } \
+ } while (0)
+
+#define LMH_HW_LOG_FS(_name) \
+static int _name##_get(void *data, u64 *val) \
+{ \
+ *val = lmh_mon_data->_name; \
+ return 0; \
+} \
+static int _name##_set(void *data, u64 val) \
+{ \
+ struct lmh_mon_sensor_data *lmh_sensor = data; \
+ int ret = 0; \
+ lmh_mon_data->_name = val; \
+ if (lmh_mon_data->hw_log_enable) \
+ ret = lmh_sensor->sensor_ops->enable_hw_log( \
+ lmh_mon_data->hw_log_interval \
+ , lmh_mon_data->hw_log_enable); \
+ else \
+ ret = lmh_sensor->sensor_ops->disable_hw_log(); \
+ return ret; \
+} \
+DEFINE_SIMPLE_ATTRIBUTE(_name##_fops, _name##_get, _name##_set, \
+ "%llu\n");
+
+#define LMH_DEV_GET(_name) \
+static ssize_t _name##_get(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct lmh_device_data *lmh_dev = container_of(dev, \
+ struct lmh_device_data, dev); \
+ return snprintf(buf, LMH_NAME_MAX, "%d", lmh_dev->_name); \
+} \
+
+LMH_HW_LOG_FS(hw_log_enable);
+LMH_HW_LOG_FS(hw_log_interval);
+LMH_DEV_GET(max_level);
+LMH_DEV_GET(curr_level);
+
+static ssize_t curr_level_set(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct lmh_device_data *lmh_dev = container_of(dev,
+ struct lmh_device_data, dev);
+ int val = 0, ret = 0;
+
+ ret = kstrtouint(buf, 0, &val);
+ if (ret < 0) {
+ pr_err("Invalid input [%s]. err:%d\n", buf, ret);
+ return ret;
+ }
+ return lmh_set_dev_level(lmh_dev->device_name, val);
+}
+
+static ssize_t avail_level_get(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct lmh_device_data *lmh_dev = container_of(dev,
+ struct lmh_device_data, dev);
+ uint32_t *type_list = NULL;
+ int ret = 0, count = 0, lvl_buf_count = 0, idx = 0;
+ char *lvl_buf = NULL;
+
+ if (!lmh_dev || !lmh_dev->levels || !lmh_dev->max_level) {
+ pr_err("Invalid input\n");
+ return -EINVAL;
+ }
+ type_list = lmh_dev->levels;
+ lvl_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!lvl_buf) {
+ pr_err("Error allocating memory\n");
+ return -ENOMEM;
+ }
+ for (idx = 0; (idx < lmh_dev->max_level) && (lvl_buf_count < PAGE_SIZE)
+ ; idx++) {
+ count = snprintf(lvl_buf + lvl_buf_count,
+ PAGE_SIZE - lvl_buf_count, "%d ",
+ type_list[idx]);
+ if (count + lvl_buf_count >= PAGE_SIZE) {
+ pr_err("overflow.\n");
+ break;
+ }
+ lvl_buf_count += count;
+ }
+ count = snprintf(lvl_buf + lvl_buf_count, PAGE_SIZE - lvl_buf_count,
+ "\n");
+ if (count + lvl_buf_count < PAGE_SIZE)
+ lvl_buf_count += count;
+
+ count = snprintf(buf, lvl_buf_count + 1, lvl_buf);
+ if (count > PAGE_SIZE) {
+ pr_err("copy to user buf failed\n");
+ ret = -EFAULT;
+ goto lvl_get_exit;
+ }
+
+lvl_get_exit:
+ kfree(lvl_buf);
+ return (ret) ? ret : count;
+}
+
+static int lmh_create_dev_sysfs(struct lmh_device_data *lmh_dev)
+{
+ int ret = 0;
+ static DEVICE_ATTR(level, 0600, curr_level_get, curr_level_set);
+ static DEVICE_ATTR(available_levels, 0400, avail_level_get, NULL);
+ static DEVICE_ATTR(total_levels, 0400, max_level_get, NULL);
+
+ lmh_dev->dev.class = &lmh_class_info;
+ dev_set_name(&lmh_dev->dev, "%s", lmh_dev->device_name);
+ ret = device_register(&lmh_dev->dev);
+ if (ret) {
+ pr_err("Error registering profile device. err:%d\n", ret);
+ return ret;
+ }
+ ret = device_create_file(&lmh_dev->dev, &dev_attr_level);
+ if (ret) {
+ pr_err("Error creating profile level sysfs node. err:%d\n",
+ ret);
+ goto dev_sysfs_exit;
+ }
+ ret = device_create_file(&lmh_dev->dev, &dev_attr_total_levels);
+ if (ret) {
+ pr_err("Error creating total level sysfs node. err:%d\n",
+ ret);
+ goto dev_sysfs_exit;
+ }
+ ret = device_create_file(&lmh_dev->dev, &dev_attr_available_levels);
+ if (ret) {
+ pr_err("Error creating available level sysfs node. err:%d\n",
+ ret);
+ goto dev_sysfs_exit;
+ }
+
+dev_sysfs_exit:
+ if (ret)
+ device_unregister(&lmh_dev->dev);
+ return ret;
+}
+
+static int lmh_create_debugfs_nodes(struct lmh_mon_sensor_data *lmh_sensor)
+{
+ int ret = 0;
+
+ lmh_mon_data->hw_log_enable = 0;
+ lmh_mon_data->hw_log_interval = LMH_TRACE_INTERVAL_XO_TICKS;
+ LMH_CREATE_DEBUGFS_FILE(lmh_mon_data->enable_hw_log, LMH_TRACE_ENABLE,
+ 0600, lmh_mon_data->debugfs_parent, (void *)lmh_sensor,
+ &hw_log_enable_fops, ret);
+ if (ret)
+ goto create_debugfs_exit;
+ LMH_CREATE_DEBUGFS_FILE(lmh_mon_data->hw_log_delay, LMH_TRACE_INTERVAL,
+ 0600, lmh_mon_data->debugfs_parent, (void *)lmh_sensor,
+ &hw_log_interval_fops, ret);
+ if (ret)
+ goto create_debugfs_exit;
+
+create_debugfs_exit:
+ if (ret)
+ debugfs_remove_recursive(lmh_mon_data->debugfs_parent);
+ return ret;
+}
+
+static struct lmh_mon_sensor_data *lmh_match_sensor_ops(
+ struct lmh_sensor_ops *ops)
+{
+ struct lmh_mon_sensor_data *lmh_sensor = NULL;
+
+ list_for_each_entry(lmh_sensor, &lmh_sensor_list, list_ptr) {
+ if (lmh_sensor->sensor_ops == ops)
+ return lmh_sensor;
+ }
+
+ return NULL;
+}
+
+static struct lmh_mon_sensor_data *lmh_match_sensor_name(char *sensor_name)
+{
+ struct lmh_mon_sensor_data *lmh_sensor = NULL;
+
+ list_for_each_entry(lmh_sensor, &lmh_sensor_list, list_ptr) {
+ if (!strncasecmp(lmh_sensor->sensor_name, sensor_name,
+ LMH_NAME_MAX))
+ return lmh_sensor;
+ }
+
+ return NULL;
+}
+
+static void lmh_evaluate_and_notify(struct lmh_mon_sensor_data *lmh_sensor,
+ long val)
+{
+ int idx = 0, trip = 0;
+ bool cond = false;
+
+ for (idx = 0; idx < LMH_TRIP_MAX; idx++) {
+ if (!lmh_sensor->trip[idx].active)
+ continue;
+ if (idx == LMH_HIGH_TRIP) {
+ trip = THERMAL_TRIP_CONFIGURABLE_HI;
+ cond = (val >= lmh_sensor->trip[idx].value);
+ } else {
+ trip = THERMAL_TRIP_CONFIGURABLE_LOW;
+ cond = (val <= lmh_sensor->trip[idx].value);
+ }
+ if (cond) {
+ lmh_sensor->trip[idx].active = false;
+ thermal_sensor_trip(lmh_sensor->tzdev, trip, val);
+ }
+ }
+}
+
+void lmh_update_reading(struct lmh_sensor_ops *ops, long trip_val)
+{
+ struct lmh_mon_sensor_data *lmh_sensor = NULL;
+
+ if (!ops) {
+ pr_err("Invalid input\n");
+ return;
+ }
+
+ down_read(&lmh_mon_access_lock);
+ lmh_sensor = lmh_match_sensor_ops(ops);
+ if (!lmh_sensor) {
+ pr_err("Invalid ops\n");
+ goto interrupt_exit;
+ }
+ down_write(&lmh_sensor->lock);
+ pr_debug("Sensor:[%s] intensity:%ld\n", lmh_sensor->sensor_name,
+ trip_val);
+ lmh_evaluate_and_notify(lmh_sensor, trip_val);
+interrupt_exit:
+ if (lmh_sensor)
+ up_write(&lmh_sensor->lock);
+ up_read(&lmh_mon_access_lock);
+ return;
+}
+
+static int lmh_sensor_read(struct thermal_zone_device *dev, int *val)
+{
+ int ret = 0;
+ struct lmh_mon_sensor_data *lmh_sensor;
+
+ if (!val || !dev || !dev->devdata) {
+ pr_err("Invalid input\n");
+ return -EINVAL;
+ }
+ lmh_sensor = dev->devdata;
+ down_read(&lmh_mon_access_lock);
+ down_read(&lmh_sensor->lock);
+ ret = lmh_sensor->sensor_ops->read(lmh_sensor->sensor_ops, (long *)val);
+ if (ret) {
+ pr_err("Error reading sensor:%s. err:%d\n",
+ lmh_sensor->sensor_name, ret);
+ goto unlock_and_exit;
+ }
+unlock_and_exit:
+ up_read(&lmh_sensor->lock);
+ up_read(&lmh_mon_access_lock);
+
+ return ret;
+}
+
+static int lmh_get_mode(struct thermal_zone_device *dev,
+ enum thermal_device_mode *mode)
+{
+ struct lmh_mon_sensor_data *lmh_sensor;
+
+ if (!dev || !dev->devdata || !mode) {
+ pr_err("Invalid input\n");
+ return -EINVAL;
+ }
+ lmh_sensor = dev->devdata;
+ *mode = lmh_sensor->mode;
+
+ return 0;
+}
+
+static int lmh_get_trip_type(struct thermal_zone_device *dev,
+ int trip, enum thermal_trip_type *type)
+{
+ if (!type || !dev || !dev->devdata || trip < 0
+ || trip >= LMH_TRIP_MAX) {
+ pr_err("Invalid input\n");
+ return -EINVAL;
+ }
+
+ switch (trip) {
+ case LMH_HIGH_TRIP:
+ *type = THERMAL_TRIP_CONFIGURABLE_HI;
+ break;
+ case LMH_LOW_TRIP:
+ *type = THERMAL_TRIP_CONFIGURABLE_LOW;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int lmh_activate_trip(struct thermal_zone_device *dev,
+ int trip, enum thermal_trip_activation_mode mode)
+{
+ struct lmh_mon_sensor_data *lmh_sensor;
+
+ if (!dev || !dev->devdata || trip < 0 || trip >= LMH_TRIP_MAX) {
+ pr_err("Invalid input\n");
+ return -EINVAL;
+ }
+
+ lmh_sensor = dev->devdata;
+ down_read(&lmh_mon_access_lock);
+ down_write(&lmh_sensor->lock);
+ lmh_sensor->trip[trip].active = (mode ==
+ THERMAL_TRIP_ACTIVATION_ENABLED);
+ up_write(&lmh_sensor->lock);
+ up_read(&lmh_mon_access_lock);
+
+ return 0;
+}
+
+static int lmh_get_trip_value(struct thermal_zone_device *dev,
+ int trip, int *value)
+{
+ struct lmh_mon_sensor_data *lmh_sensor;
+
+ if (!dev || !dev->devdata || trip < 0 || trip >= LMH_TRIP_MAX
+ || !value) {
+ pr_err("Invalid input\n");
+ return -EINVAL;
+ }
+
+ lmh_sensor = dev->devdata;
+ down_read(&lmh_mon_access_lock);
+ down_read(&lmh_sensor->lock);
+ *value = lmh_sensor->trip[trip].value;
+ up_read(&lmh_sensor->lock);
+ up_read(&lmh_mon_access_lock);
+
+ return 0;
+}
+
+static int lmh_set_trip_value(struct thermal_zone_device *dev,
+ int trip, int value)
+{
+ struct lmh_mon_sensor_data *lmh_sensor;
+
+ if (!dev || !dev->devdata || trip < 0 || trip >= LMH_TRIP_MAX) {
+ pr_err("Invalid input\n");
+ return -EINVAL;
+ }
+
+ lmh_sensor = dev->devdata;
+ down_read(&lmh_mon_access_lock);
+ down_write(&lmh_sensor->lock);
+ lmh_sensor->trip[trip].value = value;
+ up_write(&lmh_sensor->lock);
+ up_read(&lmh_mon_access_lock);
+
+ return 0;
+}
+
+static struct thermal_zone_device_ops lmh_sens_ops = {
+ .get_temp = lmh_sensor_read,
+ .get_mode = lmh_get_mode,
+ .get_trip_type = lmh_get_trip_type,
+ .activate_trip_type = lmh_activate_trip,
+ .get_trip_temp = lmh_get_trip_value,
+ .set_trip_temp = lmh_set_trip_value,
+};
+
+static int lmh_register_sensor(struct lmh_mon_sensor_data *lmh_sensor)
+{
+ int ret = 0;
+
+ lmh_sensor->tzdev = thermal_zone_device_register(
+ lmh_sensor->sensor_name, LMH_TRIP_MAX,
+ (1 << LMH_TRIP_MAX) - 1, lmh_sensor, &lmh_sens_ops,
+ NULL, 0 , 0);
+ if (IS_ERR_OR_NULL(lmh_sensor->tzdev)) {
+ ret = PTR_ERR(lmh_sensor->tzdev);
+ pr_err("Error registering sensor:[%s] with thermal. err:%d\n",
+ lmh_sensor->sensor_name, ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int lmh_sensor_init(struct lmh_mon_sensor_data *lmh_sensor,
+ char *sensor_name, struct lmh_sensor_ops *ops)
+{
+ int idx = 0, ret = 0;
+
+ strlcpy(lmh_sensor->sensor_name, sensor_name, LMH_NAME_MAX);
+ lmh_sensor->sensor_ops = ops;
+ ops->new_value_notify = lmh_update_reading;
+ for (idx = 0; idx < LMH_TRIP_MAX; idx++) {
+ lmh_sensor->trip[idx].value = 0;
+ lmh_sensor->trip[idx].active = false;
+ }
+ init_rwsem(&lmh_sensor->lock);
+ if (list_empty(&lmh_sensor_list)
+ && !lmh_mon_data->enable_hw_log)
+ lmh_create_debugfs_nodes(lmh_sensor);
+ list_add_tail(&lmh_sensor->list_ptr, &lmh_sensor_list);
+
+ return ret;
+}
+
+int lmh_sensor_register(char *sensor_name, struct lmh_sensor_ops *ops)
+{
+ int ret = 0;
+ struct lmh_mon_sensor_data *lmh_sensor = NULL;
+
+ if (!sensor_name || !ops) {
+ pr_err("Invalid input\n");
+ return -EINVAL;
+ }
+
+ if (!ops->read || !ops->enable_hw_log || !ops->disable_hw_log) {
+ pr_err("Invalid ops input for sensor:%s\n", sensor_name);
+ return -EINVAL;
+ }
+ down_write(&lmh_mon_access_lock);
+ if (lmh_match_sensor_name(sensor_name)
+ || lmh_match_sensor_ops(ops)) {
+ ret = -EEXIST;
+ pr_err("Sensor[%s] exists\n", sensor_name);
+ goto register_exit;
+ }
+ lmh_sensor = kzalloc(sizeof(struct lmh_mon_sensor_data), GFP_KERNEL);
+ if (!lmh_sensor) {
+ pr_err("kzalloc failed\n");
+ ret = -ENOMEM;
+ goto register_exit;
+ }
+ ret = lmh_sensor_init(lmh_sensor, sensor_name, ops);
+ if (ret) {
+ pr_err("Error registering sensor:%s. err:%d\n", sensor_name,
+ ret);
+ kfree(lmh_sensor);
+ goto register_exit;
+ }
+
+ pr_debug("Registered Sensor:[%s]\n", sensor_name);
+
+register_exit:
+ up_write(&lmh_mon_access_lock);
+ if (ret)
+ return ret;
+ ret = lmh_register_sensor(lmh_sensor);
+ if (ret) {
+ pr_err("Thermal Zone register failed for Sensor:[%s]\n"
+ , sensor_name);
+ return ret;
+ }
+ pr_debug("Registered Sensor:[%s]\n", sensor_name);
+ return ret;
+}
+
+static void lmh_sensor_remove(struct lmh_sensor_ops *ops)
+{
+ struct lmh_mon_sensor_data *lmh_sensor = NULL;
+
+ lmh_sensor = lmh_match_sensor_ops(ops);
+ if (!lmh_sensor) {
+ pr_err("No match for the sensor\n");
+ goto deregister_exit;
+ }
+ down_write(&lmh_sensor->lock);
+ thermal_zone_device_unregister(lmh_sensor->tzdev);
+ list_del(&lmh_sensor->list_ptr);
+ up_write(&lmh_sensor->lock);
+ pr_debug("Deregistered sensor:[%s]\n", lmh_sensor->sensor_name);
+ kfree(lmh_sensor);
+
+deregister_exit:
+ return;
+}
+
+void lmh_sensor_deregister(struct lmh_sensor_ops *ops)
+{
+ if (!ops) {
+ pr_err("Invalid input\n");
+ return;
+ }
+
+ down_write(&lmh_mon_access_lock);
+ lmh_sensor_remove(ops);
+ up_write(&lmh_mon_access_lock);
+
+ return;
+}
+
+static struct lmh_device_data *lmh_match_device_name(char *device_name)
+{
+ struct lmh_device_data *lmh_device = NULL;
+
+ list_for_each_entry(lmh_device, &lmh_device_list, list_ptr) {
+ if (!strncasecmp(lmh_device->device_name, device_name,
+ LMH_NAME_MAX))
+ return lmh_device;
+ }
+
+ return NULL;
+}
+
+static struct lmh_device_data *lmh_match_device_ops(struct lmh_device_ops *ops)
+{
+ struct lmh_device_data *lmh_device = NULL;
+
+ list_for_each_entry(lmh_device, &lmh_device_list, list_ptr) {
+ if (lmh_device->device_ops == ops)
+ return lmh_device;
+ }
+
+ return NULL;
+}
+
+static int lmh_device_init(struct lmh_device_data *lmh_device,
+ char *device_name, struct lmh_device_ops *ops)
+{
+ int ret = 0;
+
+ ret = ops->get_curr_level(ops, &lmh_device->curr_level);
+ if (ret) {
+ pr_err("Error getting curr level for Device:[%s]. err:%d\n",
+ device_name, ret);
+ goto dev_init_exit;
+ }
+ ret = ops->get_available_levels(ops, NULL);
+ if (ret <= 0) {
+ pr_err("Error getting max level for Device:[%s]. err:%d\n",
+ device_name, ret);
+ ret = (!ret) ? -EINVAL : ret;
+ goto dev_init_exit;
+ }
+ lmh_device->max_level = ret;
+ lmh_device->levels = kzalloc(lmh_device->max_level * sizeof(int),
+ GFP_KERNEL);
+ if (!lmh_device->levels) {
+ pr_err("No memory\n");
+ ret = -ENOMEM;
+ goto dev_init_exit;
+ }
+ ret = ops->get_available_levels(ops, lmh_device->levels);
+ if (ret) {
+ pr_err("Error getting device:[%s] levels. err:%d\n",
+ device_name, ret);
+ goto dev_init_exit;
+ }
+ init_rwsem(&lmh_device->lock);
+ lmh_device->device_ops = ops;
+ strlcpy(lmh_device->device_name, device_name, LMH_NAME_MAX);
+ list_add_tail(&lmh_device->list_ptr, &lmh_device_list);
+ lmh_create_dev_sysfs(lmh_device);
+
+dev_init_exit:
+ if (ret)
+ kfree(lmh_device->levels);
+ return ret;
+}
+
+int lmh_get_all_dev_levels(char *device_name, int *val)
+{
+ int ret = 0;
+ struct lmh_device_data *lmh_device = NULL;
+
+ if (!device_name) {
+ pr_err("Invalid input\n");
+ return -EINVAL;
+ }
+ down_read(&lmh_dev_access_lock);
+ lmh_device = lmh_match_device_name(device_name);
+ if (!lmh_device) {
+ pr_err("Invalid device:%s\n", device_name);
+ ret = -EINVAL;
+ goto get_all_lvl_exit;
+ }
+ down_read(&lmh_device->lock);
+ if (!val) {
+ ret = lmh_device->max_level;
+ goto get_all_lvl_exit;
+ }
+ memcpy(val, lmh_device->levels,
+ sizeof(int) * lmh_device->max_level);
+
+get_all_lvl_exit:
+ if (lmh_device)
+ up_read(&lmh_device->lock);
+ up_read(&lmh_dev_access_lock);
+ return ret;
+}
+
+int lmh_set_dev_level(char *device_name, int curr_lvl)
+{
+ int ret = 0;
+ struct lmh_device_data *lmh_device = NULL;
+
+ if (!device_name) {
+ pr_err("Invalid input\n");
+ return -EINVAL;
+ }
+ down_read(&lmh_dev_access_lock);
+ lmh_device = lmh_match_device_name(device_name);
+ if (!lmh_device) {
+ pr_err("Invalid device:%s\n", device_name);
+ ret = -EINVAL;
+ goto set_dev_exit;
+ }
+ down_write(&lmh_device->lock);
+ curr_lvl = min(curr_lvl, lmh_device->levels[lmh_device->max_level - 1]);
+ curr_lvl = max(curr_lvl, lmh_device->levels[0]);
+ if (curr_lvl == lmh_device->curr_level)
+ goto set_dev_exit;
+ ret = lmh_device->device_ops->set_level(lmh_device->device_ops,
+ curr_lvl);
+ if (ret) {
+ pr_err("Error setting current level%d for device[%s]. err:%d\n",
+ curr_lvl, device_name, ret);
+ goto set_dev_exit;
+ }
+ pr_debug("Device:[%s] configured to level %d\n", device_name, curr_lvl);
+ lmh_device->curr_level = curr_lvl;
+
+set_dev_exit:
+ if (lmh_device)
+ up_write(&lmh_device->lock);
+ up_read(&lmh_dev_access_lock);
+ return ret;
+}
+
+int lmh_get_curr_level(char *device_name, int *val)
+{
+ int ret = 0;
+ struct lmh_device_data *lmh_device = NULL;
+
+ if (!device_name || !val) {
+ pr_err("Invalid input\n");
+ return -EINVAL;
+ }
+ down_read(&lmh_dev_access_lock);
+ lmh_device = lmh_match_device_name(device_name);
+ if (!lmh_device) {
+ pr_err("Invalid device:%s\n", device_name);
+ ret = -EINVAL;
+ goto get_curr_level;
+ }
+ down_read(&lmh_device->lock);
+ ret = lmh_device->device_ops->get_curr_level(lmh_device->device_ops,
+ &lmh_device->curr_level);
+ if (ret) {
+ pr_err("Error getting device[%s] current level. err:%d\n",
+ device_name, ret);
+ goto get_curr_level;
+ }
+ *val = lmh_device->curr_level;
+ pr_debug("Device:%s current level:%d\n", device_name, *val);
+
+get_curr_level:
+ if (lmh_device)
+ up_read(&lmh_device->lock);
+ up_read(&lmh_dev_access_lock);
+ return ret;
+}
+
+int lmh_device_register(char *device_name, struct lmh_device_ops *ops)
+{
+ int ret = 0;
+ struct lmh_device_data *lmh_device = NULL;
+
+ if (!device_name || !ops) {
+ pr_err("Invalid input\n");
+ return -EINVAL;
+ }
+
+ if (!ops->get_available_levels || !ops->get_curr_level
+ || !ops->set_level) {
+ pr_err("Invalid ops input for device:%s\n", device_name);
+ return -EINVAL;
+ }
+
+ down_write(&lmh_dev_access_lock);
+ if (lmh_match_device_name(device_name)
+ || lmh_match_device_ops(ops)) {
+ ret = -EEXIST;
+ pr_err("Device[%s] allready exists\n", device_name);
+ goto register_exit;
+ }
+ lmh_device = kzalloc(sizeof(struct lmh_device_data), GFP_KERNEL);
+ if (!lmh_device) {
+ pr_err("kzalloc failed\n");
+ ret = -ENOMEM;
+ goto register_exit;
+ }
+ ret = lmh_device_init(lmh_device, device_name, ops);
+ if (ret) {
+ pr_err("Error registering device:%s. err:%d\n", device_name,
+ ret);
+ kfree(lmh_device);
+ goto register_exit;
+ }
+
+ pr_debug("Registered Device:[%s] with %d levels\n", device_name,
+ lmh_device->max_level);
+
+register_exit:
+ up_write(&lmh_dev_access_lock);
+ return ret;
+}
+
+static void lmh_device_remove(struct lmh_device_ops *ops)
+{
+ struct lmh_device_data *lmh_device = NULL;
+
+ lmh_device = lmh_match_device_ops(ops);
+ if (!lmh_device) {
+ pr_err("No match for the device\n");
+ goto deregister_exit;
+ }
+ down_write(&lmh_device->lock);
+ list_del(&lmh_device->list_ptr);
+ pr_debug("Deregistered device:[%s]\n", lmh_device->device_name);
+ kfree(lmh_device->levels);
+ up_write(&lmh_device->lock);
+ kfree(lmh_device);
+
+deregister_exit:
+ return;
+}
+
+void lmh_device_deregister(struct lmh_device_ops *ops)
+{
+ if (!ops) {
+ pr_err("Invalid input\n");
+ return;
+ }
+
+ down_write(&lmh_dev_access_lock);
+ lmh_device_remove(ops);
+ up_write(&lmh_dev_access_lock);
+ return;
+}
+
+static int lmh_parse_and_extract(const char __user *user_buf, size_t count,
+ enum lmh_read_type type)
+{
+ char *local_buf = NULL, *token = NULL, *curr_ptr = NULL, *token1 = NULL;
+ char *next_line = NULL;
+ int ret = 0, data_ct = 0, i = 0, size = 0;
+ uint32_t *config_buf = NULL;
+
+ /* Allocate two extra space to add ';' character and NULL terminate */
+ local_buf = kzalloc(count + 2, GFP_KERNEL);
+ if (!local_buf) {
+ ret = -ENOMEM;
+ goto dfs_cfg_write_exit;
+ }
+ if (copy_from_user(local_buf, user_buf, count)) {
+ pr_err("user buf error\n");
+ ret = -EFAULT;
+ goto dfs_cfg_write_exit;
+ }
+ size = count + (strnchr(local_buf, count, '\n') ? 1 : 2);
+ local_buf[size - 2] = ';';
+ local_buf[size - 1] = '\0';
+ curr_ptr = next_line = local_buf;
+ while ((token1 = strnchr(next_line, local_buf + size - next_line, ';'))
+ != NULL) {
+ data_ct = 0;
+ *token1 = '\0';
+ curr_ptr = next_line;
+ next_line = token1 + 1;
+ for (token = (char *)curr_ptr; token &&
+ ((token = strnchr(token, next_line - token, ' '))
+ != NULL); token++)
+ data_ct++;
+ if (data_ct < 2) {
+ pr_err("Invalid format string:[%s]\n", curr_ptr);
+ ret = -EINVAL;
+ goto dfs_cfg_write_exit;
+ }
+ config_buf = kzalloc((++data_ct) * sizeof(uint32_t),
+ GFP_KERNEL);
+ if (!config_buf) {
+ ret = -ENOMEM;
+ goto dfs_cfg_write_exit;
+ }
+ pr_debug("Input:%s data_ct:%d\n", curr_ptr, data_ct);
+ for (i = 0, token = (char *)curr_ptr; token && (i < data_ct);
+ i++) {
+ token = strnchr(token, next_line - token, ' ');
+ if (token)
+ *token = '\0';
+ ret = kstrtouint(curr_ptr, 0, &config_buf[i]);
+ if (ret < 0) {
+ pr_err("Data[%s] scan error. err:%d\n",
+ curr_ptr, ret);
+ kfree(config_buf);
+ goto dfs_cfg_write_exit;
+ }
+ if (token)
+ curr_ptr = ++token;
+ }
+ switch (type) {
+ case LMH_DEBUG_READ_TYPE:
+ ret = lmh_mon_data->debug_ops->debug_config_read(
+ lmh_mon_data->debug_ops, config_buf, data_ct);
+ break;
+ case LMH_DEBUG_CONFIG_TYPE:
+ ret = lmh_mon_data->debug_ops->debug_config_lmh(
+ lmh_mon_data->debug_ops, config_buf, data_ct);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ kfree(config_buf);
+ if (ret) {
+ pr_err("Config error. type:%d err:%d\n", type, ret);
+ goto dfs_cfg_write_exit;
+ }
+ }
+
+dfs_cfg_write_exit:
+ kfree(local_buf);
+ return ret;
+}
+
+static ssize_t lmh_dbgfs_config_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ lmh_parse_and_extract(user_buf, count, LMH_DEBUG_CONFIG_TYPE);
+ return count;
+}
+
+static int lmh_dbgfs_data_read(struct seq_file *seq_fp, void *data)
+{
+ uint32_t *read_buf = NULL;
+ int ret = 0, i = 0;
+
+ ret = lmh_mon_data->debug_ops->debug_read(lmh_mon_data->debug_ops,
+ &read_buf);
+ if (ret <= 0 || !read_buf)
+ goto dfs_read_exit;
+
+ do {
+ seq_printf(seq_fp, "0x%x ", read_buf[i]);
+ i++;
+ if ((i % LMH_READ_LINE_LENGTH) == 0)
+ seq_puts(seq_fp, "\n");
+ } while (i < (ret / sizeof(uint32_t)));
+
+dfs_read_exit:
+ return (ret < 0) ? ret : 0;
+}
+
+static ssize_t lmh_dbgfs_data_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ lmh_parse_and_extract(user_buf, count, LMH_DEBUG_READ_TYPE);
+ return count;
+}
+
+static int lmh_dbgfs_data_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, lmh_dbgfs_data_read, inode->i_private);
+}
+
+static int lmh_get_types(struct seq_file *seq_fp, enum lmh_read_type type)
+{
+ int ret = 0, idx = 0, size = 0;
+ uint32_t *type_list = NULL;
+
+ switch (type) {
+ case LMH_DEBUG_READ_TYPE:
+ ret = lmh_mon_data->debug_ops->debug_get_types(
+ lmh_mon_data->debug_ops, true, &type_list);
+ break;
+ case LMH_DEBUG_CONFIG_TYPE:
+ ret = lmh_mon_data->debug_ops->debug_get_types(
+ lmh_mon_data->debug_ops, false, &type_list);
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (ret <= 0 || !type_list) {
+ pr_err("No device information. err:%d\n", ret);
+ return -ENODEV;
+ }
+ size = ret;
+ for (idx = 0; idx < size; idx++)
+ seq_printf(seq_fp, "0x%x ", type_list[idx]);
+ seq_puts(seq_fp, "\n");
+
+ return 0;
+}
+
+static int lmh_dbgfs_read_type(struct seq_file *seq_fp, void *data)
+{
+ return lmh_get_types(seq_fp, LMH_DEBUG_READ_TYPE);
+}
+
+static int lmh_dbgfs_read_type_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, lmh_dbgfs_read_type, inode->i_private);
+}
+
+static int lmh_dbgfs_config_type(struct seq_file *seq_fp, void *data)
+{
+ return lmh_get_types(seq_fp, LMH_DEBUG_CONFIG_TYPE);
+}
+
+static int lmh_dbgfs_config_type_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, lmh_dbgfs_config_type, inode->i_private);
+}
+
+static const struct file_operations lmh_dbgfs_config_fops = {
+ .write = lmh_dbgfs_config_write,
+};
+static const struct file_operations lmh_dbgfs_read_fops = {
+ .open = lmh_dbgfs_data_open,
+ .read = seq_read,
+ .write = lmh_dbgfs_data_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+static const struct file_operations lmh_dbgfs_read_type_fops = {
+ .open = lmh_dbgfs_read_type_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+static const struct file_operations lmh_dbgfs_config_type_fops = {
+ .open = lmh_dbgfs_config_type_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+int lmh_debug_register(struct lmh_debug_ops *ops)
+{
+ int ret = 0;
+
+ if (!ops || !ops->debug_read || !ops->debug_config_read
+ || !ops->debug_get_types) {
+ pr_err("Invalid input");
+ ret = -EINVAL;
+ goto dbg_reg_exit;
+ }
+
+ lmh_mon_data->debug_ops = ops;
+ LMH_CREATE_DEBUGFS_DIR(lmh_mon_data->debug_dir, LMH_DBGFS_DIR,
+ lmh_mon_data->debugfs_parent, ret);
+ if (ret)
+ goto dbg_reg_exit;
+
+ LMH_CREATE_DEBUGFS_FILE(lmh_mon_data->debug_read, LMH_DBGFS_READ, 0600,
+ lmh_mon_data->debug_dir, NULL, &lmh_dbgfs_read_fops, ret);
+ if (!lmh_mon_data->debug_read) {
+ pr_err("Error creating" LMH_DBGFS_READ "entry.\n");
+ ret = -ENODEV;
+ goto dbg_reg_exit;
+ }
+ LMH_CREATE_DEBUGFS_FILE(lmh_mon_data->debug_config,
+ LMH_DBGFS_CONFIG_READ, 0200, lmh_mon_data->debug_dir, NULL,
+ &lmh_dbgfs_config_fops, ret);
+ if (!lmh_mon_data->debug_config) {
+ pr_err("Error creating" LMH_DBGFS_CONFIG_READ "entry\n");
+ ret = -ENODEV;
+ goto dbg_reg_exit;
+ }
+ LMH_CREATE_DEBUGFS_FILE(lmh_mon_data->debug_read_type,
+ LMH_DBGFS_READ_TYPES, 0400, lmh_mon_data->debug_dir, NULL,
+ &lmh_dbgfs_read_type_fops, ret);
+ if (!lmh_mon_data->debug_read_type) {
+ pr_err("Error creating" LMH_DBGFS_READ_TYPES "entry\n");
+ ret = -ENODEV;
+ goto dbg_reg_exit;
+ }
+ LMH_CREATE_DEBUGFS_FILE(lmh_mon_data->debug_config_type,
+ LMH_DBGFS_CONFIG_TYPES, 0400, lmh_mon_data->debug_dir, NULL,
+ &lmh_dbgfs_config_type_fops, ret);
+ if (!lmh_mon_data->debug_config_type) {
+ pr_err("Error creating" LMH_DBGFS_CONFIG_TYPES "entry\n");
+ ret = -ENODEV;
+ goto dbg_reg_exit;
+ }
+
+dbg_reg_exit:
+ if (ret) {
+ /*Clean up all the dbg nodes*/
+ debugfs_remove_recursive(lmh_mon_data->debug_dir);
+ lmh_mon_data->debug_ops = NULL;
+ }
+
+ return ret;
+}
+
+static int lmh_mon_init_driver(void)
+{
+ int ret = 0;
+
+ lmh_mon_data = kzalloc(sizeof(struct lmh_mon_driver_data),
+ GFP_KERNEL);
+ if (!lmh_mon_data) {
+ pr_err("No memory\n");
+ return -ENOMEM;
+ }
+
+ LMH_CREATE_DEBUGFS_DIR(lmh_mon_data->debugfs_parent, LMH_MON_NAME,
+ NULL, ret);
+ if (ret)
+ goto init_exit;
+ lmh_mon_data->poll_fs = debugfs_create_u32(LMH_ISR_POLL_DELAY, 0600,
+ lmh_mon_data->debugfs_parent, &lmh_poll_interval);
+ if (IS_ERR(lmh_mon_data->poll_fs))
+ pr_err("Error creating debugfs:[%s]. err:%ld\n",
+ LMH_ISR_POLL_DELAY, PTR_ERR(lmh_mon_data->poll_fs));
+
+init_exit:
+ if (ret == -ENODEV)
+ ret = 0;
+ return ret;
+}
+
+static int __init lmh_mon_init_call(void)
+{
+ int ret = 0;
+
+ ret = lmh_mon_init_driver();
+ if (ret) {
+ pr_err("Error initializing the debugfs. err:%d\n", ret);
+ goto lmh_init_exit;
+ }
+ ret = class_register(&lmh_class_info);
+ if (ret)
+ goto lmh_init_exit;
+
+lmh_init_exit:
+ if (ret)
+ class_unregister(&lmh_class_info);
+ return ret;
+}
+
+static void lmh_mon_cleanup(void)
+{
+ down_write(&lmh_mon_access_lock);
+ while (!list_empty(&lmh_sensor_list)) {
+ lmh_sensor_remove(list_first_entry(&lmh_sensor_list,
+ struct lmh_mon_sensor_data, list_ptr)->sensor_ops);
+ }
+ up_write(&lmh_mon_access_lock);
+ debugfs_remove_recursive(lmh_mon_data->debugfs_parent);
+ kfree(lmh_mon_data);
+}
+
+static void lmh_device_cleanup(void)
+{
+ down_write(&lmh_dev_access_lock);
+ while (!list_empty(&lmh_device_list)) {
+ lmh_device_remove(list_first_entry(&lmh_device_list,
+ struct lmh_device_data, list_ptr)->device_ops);
+ }
+ up_write(&lmh_dev_access_lock);
+}
+
+static void lmh_debug_cleanup(void)
+{
+ if (lmh_mon_data->debug_ops) {
+ debugfs_remove_recursive(lmh_mon_data->debug_dir);
+ lmh_mon_data->debug_ops = NULL;
+ }
+}
+
+static void __exit lmh_mon_exit(void)
+{
+ lmh_mon_cleanup();
+ lmh_device_cleanup();
+ lmh_debug_cleanup();
+ class_unregister(&lmh_class_info);
+}
+
+module_init(lmh_mon_init_call);
+module_exit(lmh_mon_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("LMH monitor driver");
+MODULE_ALIAS("platform:" LMH_MON_NAME);
diff --git a/drivers/thermal/lmh_interface.h b/drivers/thermal/lmh_interface.h
new file mode 100644
index 000000000000..bf844b563cc7
--- /dev/null
+++ b/drivers/thermal/lmh_interface.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LMH_INTERFACE_H
+#define __LMH_INTERFACE_H
+
+#define LMH_NAME_MAX 20
+#define LMH_POLLING_MSEC 30
+#define LMH_READ_LINE_LENGTH 10
+
+enum lmh_trip_type {
+ LMH_LOW_TRIP,
+ LMH_HIGH_TRIP,
+ LMH_TRIP_MAX,
+};
+
+enum lmh_monitor_state {
+ LMH_ISR_DISABLED,
+ LMH_ISR_MONITOR,
+ LMH_ISR_POLLING,
+ LMH_ISR_NR,
+};
+
+struct lmh_sensor_ops {
+ int (*read)(struct lmh_sensor_ops *, long *);
+ int (*enable_hw_log)(uint32_t, uint32_t);
+ int (*disable_hw_log)(void);
+ void (*new_value_notify)(struct lmh_sensor_ops *, long);
+};
+
+struct lmh_device_ops {
+ int (*get_available_levels)(struct lmh_device_ops *, int *);
+ int (*get_curr_level)(struct lmh_device_ops *, int *);
+ int (*set_level)(struct lmh_device_ops *, int);
+};
+
+struct lmh_debug_ops {
+ int (*debug_read)(struct lmh_debug_ops *, uint32_t **);
+ int (*debug_config_read)(struct lmh_debug_ops *, uint32_t *, int);
+ int (*debug_config_lmh)(struct lmh_debug_ops *, uint32_t *, int);
+ int (*debug_get_types)(struct lmh_debug_ops *, bool, uint32_t **);
+};
+
+static int lmh_poll_interval = LMH_POLLING_MSEC;
+#ifdef CONFIG_LIMITS_MONITOR
+int lmh_get_all_dev_levels(char *, int *);
+int lmh_set_dev_level(char *, int);
+int lmh_get_curr_level(char *, int *);
+int lmh_sensor_register(char *, struct lmh_sensor_ops *);
+void lmh_sensor_deregister(struct lmh_sensor_ops *);
+int lmh_device_register(char *, struct lmh_device_ops *);
+void lmh_device_deregister(struct lmh_device_ops *);
+int lmh_debug_register(struct lmh_debug_ops *);
+void lmh_debug_deregister(struct lmh_debug_ops *ops);
+#else
+static inline int lmh_get_all_dev_levels(char *device_name, int *level)
+{
+ return -ENOSYS;
+}
+
+static inline int lmh_set_dev_level(char *device_name, int level)
+{
+ return -ENOSYS;
+}
+
+static inline int lmh_get_curr_level(char *device_name, int *level)
+{
+ return -ENOSYS;
+}
+
+static inline int lmh_sensor_register(char *sensor_name,
+ struct lmh_sensor_ops *ops)
+{
+ return -ENOSYS;
+}
+
+static inline void lmh_sensor_deregister(struct lmh_sensor_ops *ops)
+{
+ return;
+}
+
+static inline int lmh_device_register(char *device_name,
+ struct lmh_device_ops *ops)
+{
+ return -ENOSYS;
+}
+
+static inline void lmh_device_deregister(struct lmh_device_ops *ops)
+{
+ return;
+}
+
+static inline int lmh_debug_register(struct lmh_debug_ops *)
+{
+ return -ENOSYS;
+}
+
+static inline void lmh_debug_deregister(struct lmh_debug_ops *ops)
+{ }
+#endif
+
+#endif /*__LMH_INTERFACE_H*/
diff --git a/drivers/thermal/lmh_lite.c b/drivers/thermal/lmh_lite.c
new file mode 100644
index 000000000000..0b2067a5dee1
--- /dev/null
+++ b/drivers/thermal/lmh_lite.c
@@ -0,0 +1,1408 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s:%s " fmt, KBUILD_MODNAME, __func__
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/mutex.h>
+#include "lmh_interface.h"
+#include <linux/slab.h>
+#include <asm/cacheflush.h>
+#include <soc/qcom/scm.h>
+#include <linux/dma-mapping.h>
+#include <linux/regulator/consumer.h>
+
+#define CREATE_TRACE_POINTS
+#define TRACE_MSM_LMH
+#include <trace/trace_thermal.h>
+
+#define LMH_DRIVER_NAME "lmh-lite-driver"
+#define LMH_INTERRUPT "lmh-interrupt"
+#define LMH_DEVICE "lmh-profile"
+#define LMH_MAX_SENSOR 10
+#define LMH_GET_PROFILE_SIZE 10
+#define LMH_SCM_PAYLOAD_SIZE 10
+#define LMH_DEFAULT_PROFILE 0
+#define LMH_DEBUG_READ_TYPE 0x0
+#define LMH_DEBUG_CONFIG_TYPE 0x1
+#define LMH_CHANGE_PROFILE 0x01
+#define LMH_GET_PROFILES 0x02
+#define LMH_CTRL_QPMDA 0x03
+#define LMH_TRIM_ERROR 0x04
+#define LMH_GET_INTENSITY 0x06
+#define LMH_GET_SENSORS 0x07
+#define LMH_DEBUG_SET 0x08
+#define LMH_DEBUG_READ_BUF_SIZE 0x09
+#define LMH_DEBUG_READ 0x0A
+#define LMH_DEBUG_GET_TYPE 0x0B
+#define MAX_TRACE_EVENT_MSG_LEN 50
+#define APCS_DPM_VOLTAGE_SCALE 0x09950804
+#define LMH_ODCM_MAX_COUNT 6
+
+#define LMH_CHECK_SCM_CMD(_cmd) \
+ do { \
+ if (!scm_is_call_available(SCM_SVC_LMH, _cmd)) { \
+ pr_err("SCM cmd:%d not available\n", _cmd); \
+ return -ENODEV; \
+ } \
+ } while (0)
+
+#define LMH_GET_RECURSSIVE_DATA(desc_arg, cmd_idx, cmd_buf, payload, next, \
+ size, cmd_id, dest_buf, ret) \
+ do { \
+ int idx = 0; \
+ desc_arg.args[cmd_idx] = cmd_buf.list_start = next; \
+ trace_lmh_event_call("GET_TYPE enter"); \
+ dmac_flush_range(payload, payload + sizeof(uint32_t) * \
+ LMH_SCM_PAYLOAD_SIZE); \
+ if (!is_scm_armv8()) { \
+ ret = scm_call(SCM_SVC_LMH, cmd_id, \
+ (void *) &cmd_buf, SCM_BUFFER_SIZE(cmd_buf), \
+ &size, SCM_BUFFER_SIZE(size)); \
+ } else { \
+ ret = scm_call2(SCM_SIP_FNID(SCM_SVC_LMH, \
+ cmd_id), &desc_arg); \
+ size = desc_arg.ret[0]; \
+ } \
+ /* Have barrier before reading from TZ data */ \
+ mb(); \
+ trace_lmh_event_call("GET_TYPE exit"); \
+ if (ret) { \
+ pr_err("Error in SCM v%d get type. cmd:%x err:%d\n", \
+ (is_scm_armv8()) ? 8 : 7, cmd_id, ret); \
+ break; \
+ } \
+ if (!size) { \
+ pr_err("No LMH device supported.\n"); \
+ ret = -ENODEV; \
+ break; \
+ } \
+ if (!dest_buf) { \
+ dest_buf = devm_kzalloc(lmh_data->dev, \
+ sizeof(uint32_t) * size, GFP_KERNEL); \
+ if (!dest_buf) { \
+ ret = -ENOMEM; \
+ break; \
+ } \
+ } \
+ for (idx = next; \
+ idx < min((next + LMH_SCM_PAYLOAD_SIZE), size); \
+ idx++) \
+ dest_buf[idx] = payload[idx - next]; \
+ next += LMH_SCM_PAYLOAD_SIZE; \
+ } while (next < size) \
+
+struct __attribute__((__packed__)) lmh_sensor_info {
+ uint32_t name;
+ uint32_t node_id;
+ uint32_t intensity;
+ uint32_t max_intensity;
+ uint32_t type;
+};
+
+struct __attribute__((__packed__)) lmh_sensor_packet {
+ uint32_t count;
+ struct lmh_sensor_info sensor[LMH_MAX_SENSOR];
+};
+
+struct lmh_profile {
+ struct lmh_device_ops dev_ops;
+ uint32_t level_ct;
+ uint32_t curr_level;
+ uint32_t *levels;
+ uint32_t read_type_count;
+ uint32_t config_type_count;
+};
+
+struct lmh_debug {
+ struct lmh_debug_ops debug_ops;
+ uint32_t *read_type;
+ uint32_t *config_type;
+ uint32_t read_type_count;
+ uint32_t config_type_count;
+};
+
+struct lmh_driver_data {
+ struct device *dev;
+ struct workqueue_struct *poll_wq;
+ struct delayed_work poll_work;
+ uint32_t log_enabled;
+ uint32_t log_delay;
+ enum lmh_monitor_state intr_state;
+ uint32_t intr_reg_val;
+ uint32_t intr_status_val;
+ uint32_t trim_err_offset;
+ bool trim_err_disable;
+ void *intr_addr;
+ int irq_num;
+ int max_sensor_count;
+ struct lmh_profile dev_info;
+ struct lmh_debug debug_info;
+ struct regulator *regulator;
+ struct notifier_block dpm_notifier_blk;
+ void __iomem *dpm_voltage_scale_reg;
+ uint32_t odcm_thresh_mV;
+ void __iomem *odcm_reg[LMH_ODCM_MAX_COUNT];
+ bool odcm_enabled;
+};
+
+struct lmh_sensor_data {
+ char sensor_name[LMH_NAME_MAX];
+ uint32_t sensor_hw_name;
+ uint32_t sensor_hw_node_id;
+ int sensor_sw_id;
+ struct lmh_sensor_ops ops;
+ long last_read_value;
+ struct list_head list_ptr;
+};
+
+struct lmh_default_data {
+ uint32_t default_profile;
+ uint32_t odcm_reg_addr[LMH_ODCM_MAX_COUNT];
+};
+
+static struct lmh_default_data lmh_lite_data = {
+ .default_profile = 0,
+};
+static struct lmh_default_data lmh_v1_data = {
+ .default_profile = 1,
+ .odcm_reg_addr = { 0x09981030, /* CPU0 */
+ 0x09991030, /* CPU1 */
+ 0x099A1028, /* APC0_L2 */
+ 0x099B1030, /* CPU2 */
+ 0x099C1030, /* CPU3 */
+ 0x099D1028, /* APC1_l2 */
+ },
+};
+static struct lmh_default_data *lmh_hw_data;
+static struct lmh_driver_data *lmh_data;
+static DECLARE_RWSEM(lmh_sensor_access);
+static DEFINE_MUTEX(lmh_sensor_read);
+static DEFINE_MUTEX(lmh_odcm_access);
+static LIST_HEAD(lmh_sensor_list);
+
+static int lmh_read(struct lmh_sensor_ops *ops, long *val)
+{
+ struct lmh_sensor_data *lmh_sensor = container_of(ops,
+ struct lmh_sensor_data, ops);
+
+ mutex_lock(&lmh_sensor_read);
+ *val = lmh_sensor->last_read_value;
+ mutex_unlock(&lmh_sensor_read);
+
+ return 0;
+}
+
+static int lmh_ctrl_qpmda(uint32_t enable)
+{
+ int ret = 0;
+ struct scm_desc desc_arg;
+ struct {
+ uint32_t enable;
+ uint32_t rate;
+ } cmd_buf;
+
+ desc_arg.args[0] = cmd_buf.enable = enable;
+ desc_arg.args[1] = cmd_buf.rate = lmh_data->log_delay;
+ desc_arg.arginfo = SCM_ARGS(2, SCM_VAL, SCM_VAL);
+ trace_lmh_event_call("CTRL_QPMDA enter");
+ if (!is_scm_armv8())
+ ret = scm_call(SCM_SVC_LMH, LMH_CTRL_QPMDA,
+ (void *) &cmd_buf, SCM_BUFFER_SIZE(cmd_buf), NULL, 0);
+ else
+ ret = scm_call2(SCM_SIP_FNID(SCM_SVC_LMH,
+ LMH_CTRL_QPMDA), &desc_arg);
+ trace_lmh_event_call("CTRL_QPMDA exit");
+ if (ret) {
+ pr_err("Error in SCM v%d %s QPMDA call. err:%d\n",
+ (is_scm_armv8()) ? 8 : 7, (enable) ? "enable" :
+ "disable", ret);
+ goto ctrl_exit;
+ }
+
+ctrl_exit:
+ return ret;
+}
+
+static int lmh_disable_log(void)
+{
+ int ret = 0;
+
+ if (!lmh_data->log_enabled)
+ return ret;
+ ret = lmh_ctrl_qpmda(0);
+ if (ret)
+ goto disable_exit;
+ pr_debug("LMH hardware log disabled.\n");
+ lmh_data->log_enabled = 0;
+
+disable_exit:
+ return ret;
+}
+
+static int lmh_enable_log(uint32_t delay, uint32_t reg_val)
+{
+ int ret = 0;
+
+ if (lmh_data->log_enabled == reg_val && lmh_data->log_delay == delay)
+ return ret;
+
+ lmh_data->log_delay = delay;
+ ret = lmh_ctrl_qpmda(reg_val);
+ if (ret)
+ goto enable_exit;
+ pr_debug("LMH hardware log enabled[%u]. delay:%u\n", reg_val, delay);
+ lmh_data->log_enabled = reg_val;
+
+enable_exit:
+ return ret;
+}
+
+static void lmh_update(struct lmh_driver_data *lmh_dat,
+ struct lmh_sensor_data *lmh_sensor)
+{
+ if (lmh_sensor->last_read_value > 0 && !(lmh_dat->intr_status_val
+ & BIT(lmh_sensor->sensor_sw_id))) {
+ pr_debug("Sensor:[%s] interrupt triggered\n",
+ lmh_sensor->sensor_name);
+ trace_lmh_sensor_interrupt(lmh_sensor->sensor_name,
+ lmh_sensor->last_read_value);
+ lmh_dat->intr_status_val |= BIT(lmh_sensor->sensor_sw_id);
+ } else if (lmh_sensor->last_read_value == 0 && (lmh_dat->intr_status_val
+ & BIT(lmh_sensor->sensor_sw_id))) {
+ pr_debug("Sensor:[%s] interrupt clear\n",
+ lmh_sensor->sensor_name);
+ trace_lmh_sensor_interrupt(lmh_sensor->sensor_name,
+ lmh_sensor->last_read_value);
+
+ lmh_data->intr_status_val ^= BIT(lmh_sensor->sensor_sw_id);
+ }
+ lmh_sensor->ops.new_value_notify(&lmh_sensor->ops,
+ lmh_sensor->last_read_value);
+}
+
+static void lmh_read_and_update(struct lmh_driver_data *lmh_dat)
+{
+ int ret = 0, idx = 0;
+ struct lmh_sensor_data *lmh_sensor = NULL;
+ static struct lmh_sensor_packet payload;
+ struct scm_desc desc_arg;
+ struct {
+ /* TZ is 32-bit right now */
+ uint32_t addr;
+ uint32_t size;
+ } cmd_buf;
+
+ mutex_lock(&lmh_sensor_read);
+ list_for_each_entry(lmh_sensor, &lmh_sensor_list, list_ptr)
+ lmh_sensor->last_read_value = 0;
+ payload.count = 0;
+ cmd_buf.addr = SCM_BUFFER_PHYS(&payload);
+ /* &payload may be a physical address > 4 GB */
+ desc_arg.args[0] = SCM_BUFFER_PHYS(&payload);
+ desc_arg.args[1] = cmd_buf.size
+ = SCM_BUFFER_SIZE(struct lmh_sensor_packet);
+ desc_arg.arginfo = SCM_ARGS(2, SCM_RW, SCM_VAL);
+ trace_lmh_event_call("GET_INTENSITY enter");
+ dmac_flush_range(&payload, &payload + sizeof(struct lmh_sensor_packet));
+ if (!is_scm_armv8())
+ ret = scm_call(SCM_SVC_LMH, LMH_GET_INTENSITY,
+ (void *) &cmd_buf, SCM_BUFFER_SIZE(cmd_buf), NULL, 0);
+ else
+ ret = scm_call2(SCM_SIP_FNID(SCM_SVC_LMH,
+ LMH_GET_INTENSITY), &desc_arg);
+ /* Have memory barrier before we access the TZ data */
+ mb();
+ trace_lmh_event_call("GET_INTENSITY exit");
+ if (ret) {
+ pr_err("Error in SCM v%d read call. err:%d\n",
+ (is_scm_armv8()) ? 8 : 7, ret);
+ goto read_exit;
+ }
+
+ for (idx = 0; idx < payload.count; idx++) {
+ list_for_each_entry(lmh_sensor, &lmh_sensor_list, list_ptr) {
+
+ if (payload.sensor[idx].name
+ == lmh_sensor->sensor_hw_name
+ && (payload.sensor[idx].node_id
+ == lmh_sensor->sensor_hw_node_id)) {
+
+ lmh_sensor->last_read_value =
+ (payload.sensor[idx].max_intensity) ?
+ ((payload.sensor[idx].intensity * 100)
+ / payload.sensor[idx].max_intensity)
+ : payload.sensor[idx].intensity;
+ trace_lmh_sensor_reading(
+ lmh_sensor->sensor_name,
+ lmh_sensor->last_read_value);
+ break;
+ }
+ }
+ }
+
+read_exit:
+ mutex_unlock(&lmh_sensor_read);
+ list_for_each_entry(lmh_sensor, &lmh_sensor_list, list_ptr)
+ lmh_update(lmh_dat, lmh_sensor);
+
+ return;
+}
+
+static void lmh_poll(struct work_struct *work)
+{
+ struct lmh_driver_data *lmh_dat = container_of(work,
+ struct lmh_driver_data, poll_work.work);
+
+ down_write(&lmh_sensor_access);
+ if (lmh_dat->intr_state != LMH_ISR_POLLING)
+ goto poll_exit;
+ lmh_read_and_update(lmh_dat);
+ if (!lmh_data->intr_status_val) {
+ lmh_data->intr_state = LMH_ISR_MONITOR;
+ pr_debug("Zero throttling. Re-enabling interrupt\n");
+ trace_lmh_event_call("Lmh Interrupt Clear");
+ enable_irq(lmh_data->irq_num);
+ goto poll_exit;
+ } else {
+ queue_delayed_work(lmh_dat->poll_wq, &lmh_dat->poll_work,
+ msecs_to_jiffies(lmh_poll_interval));
+ }
+
+poll_exit:
+ up_write(&lmh_sensor_access);
+ return;
+}
+
+static void lmh_trim_error(void)
+{
+ struct scm_desc desc_arg;
+ int ret = 0;
+
+ WARN_ON(1);
+ pr_err("LMH hardware trim error\n");
+ desc_arg.arginfo = SCM_ARGS(0);
+ trace_lmh_event_call("TRIM_ERROR enter");
+ if (!is_scm_armv8())
+ ret = scm_call(SCM_SVC_LMH, LMH_TRIM_ERROR, NULL, 0, NULL, 0);
+ else
+ ret = scm_call2(SCM_SIP_FNID(SCM_SVC_LMH,
+ LMH_TRIM_ERROR), &desc_arg);
+ trace_lmh_event_call("TRIM_ERROR exit");
+ if (ret)
+ pr_err("Error in SCM v%d trim error call. err:%d\n",
+ (is_scm_armv8()) ? 8 : 7, ret);
+
+ return;
+}
+
+static irqreturn_t lmh_handle_isr(int irq, void *dev_id)
+{
+ disable_irq_nosync(irq);
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t lmh_isr_thread(int irq, void *data)
+{
+ struct lmh_driver_data *lmh_dat = data;
+
+ pr_debug("LMH Interrupt triggered\n");
+ trace_lmh_event_call("Lmh Interrupt");
+
+ down_write(&lmh_sensor_access);
+ if (lmh_dat->intr_state != LMH_ISR_MONITOR) {
+ pr_err("Invalid software state\n");
+ trace_lmh_event_call("Invalid software state");
+ WARN_ON(1);
+ goto isr_unlock_exit;
+ }
+ lmh_dat->intr_state = LMH_ISR_POLLING;
+ if (!lmh_data->trim_err_disable) {
+ lmh_dat->intr_reg_val = readl_relaxed(lmh_dat->intr_addr);
+ pr_debug("Lmh hw interrupt:%d\n", lmh_dat->intr_reg_val);
+ if (lmh_dat->intr_reg_val & BIT(lmh_dat->trim_err_offset)) {
+ trace_lmh_event_call("Lmh trim error");
+ lmh_trim_error();
+ lmh_dat->intr_state = LMH_ISR_MONITOR;
+ goto decide_next_action;
+ }
+ }
+ lmh_read_and_update(lmh_dat);
+ if (!lmh_dat->intr_status_val) {
+ pr_debug("LMH not throttling. Enabling interrupt\n");
+ lmh_dat->intr_state = LMH_ISR_MONITOR;
+ trace_lmh_event_call("Lmh Zero throttle Interrupt Clear");
+ goto decide_next_action;
+ }
+
+decide_next_action:
+ if (lmh_dat->intr_state == LMH_ISR_POLLING)
+ queue_delayed_work(lmh_dat->poll_wq, &lmh_dat->poll_work,
+ msecs_to_jiffies(lmh_poll_interval));
+ else
+ enable_irq(lmh_dat->irq_num);
+
+isr_unlock_exit:
+ up_write(&lmh_sensor_access);
+ return IRQ_HANDLED;
+}
+
+static int lmh_get_sensor_devicetree(struct platform_device *pdev)
+{
+ int ret = 0, idx = 0;
+ char *key = NULL;
+ struct device_node *node = pdev->dev.of_node;
+ struct resource *lmh_intr_base = NULL;
+
+ lmh_data->trim_err_disable = false;
+ key = "qcom,lmh-trim-err-offset";
+ ret = of_property_read_u32(node, key,
+ &lmh_data->trim_err_offset);
+ if (ret) {
+ if (ret == -EINVAL) {
+ lmh_data->trim_err_disable = true;
+ ret = 0;
+ } else {
+ pr_err("Error reading:%s. err:%d\n", key, ret);
+ goto dev_exit;
+ }
+ }
+
+ lmh_data->regulator = devm_regulator_get(lmh_data->dev, "vdd-apss");
+ if (IS_ERR(lmh_data->regulator)) {
+ pr_err("unable to get vdd-apss regulator. err:%ld\n",
+ PTR_ERR(lmh_data->regulator));
+ lmh_data->regulator = NULL;
+ } else {
+ key = "qcom,lmh-odcm-disable-threshold-mA";
+ ret = of_property_read_u32(node, key,
+ &lmh_data->odcm_thresh_mV);
+ if (ret) {
+ pr_err("Error getting ODCM thresh. err:%d\n", ret);
+ ret = 0;
+ } else {
+ lmh_data->odcm_enabled = true;
+ for (; idx < LMH_ODCM_MAX_COUNT; idx++) {
+ lmh_data->odcm_reg[idx] =
+ devm_ioremap(&pdev->dev,
+ lmh_hw_data->odcm_reg_addr[idx], 4);
+ if (!lmh_data->odcm_reg[idx]) {
+ pr_err("Err mapping ODCM memory 0x%x\n",
+ lmh_hw_data->odcm_reg_addr[idx]);
+ lmh_data->odcm_enabled = false;
+ lmh_data->odcm_reg[0] = NULL;
+ break;
+ }
+ }
+ }
+ }
+
+ lmh_data->irq_num = platform_get_irq(pdev, 0);
+ if (lmh_data->irq_num < 0) {
+ ret = lmh_data->irq_num;
+ pr_err("Error getting IRQ number. err:%d\n", ret);
+ goto dev_exit;
+ }
+
+ ret = request_threaded_irq(lmh_data->irq_num, lmh_handle_isr,
+ lmh_isr_thread, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ LMH_INTERRUPT, lmh_data);
+ if (ret) {
+ pr_err("Error getting irq for LMH. err:%d\n", ret);
+ goto dev_exit;
+ }
+
+ if (!lmh_data->trim_err_disable) {
+ lmh_intr_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!lmh_intr_base) {
+ ret = -EINVAL;
+ pr_err("Error getting reg MEM for LMH.\n");
+ goto dev_exit;
+ }
+ lmh_data->intr_addr =
+ devm_ioremap(&pdev->dev, lmh_intr_base->start,
+ resource_size(lmh_intr_base));
+ if (!lmh_data->intr_addr) {
+ ret = -ENODEV;
+ pr_err("Error Mapping LMH memory address\n");
+ goto dev_exit;
+ }
+ }
+
+dev_exit:
+ return ret;
+}
+
+static void lmh_remove_sensors(void)
+{
+ struct lmh_sensor_data *curr_sensor = NULL, *prev_sensor = NULL;
+
+ down_write(&lmh_sensor_access);
+ list_for_each_entry_safe(prev_sensor, curr_sensor, &lmh_sensor_list,
+ list_ptr) {
+ list_del(&prev_sensor->list_ptr);
+ pr_debug("Deregistering Sensor:[%s]\n",
+ prev_sensor->sensor_name);
+ lmh_sensor_deregister(&prev_sensor->ops);
+ devm_kfree(lmh_data->dev, prev_sensor);
+ }
+ up_write(&lmh_sensor_access);
+}
+
+static int lmh_check_tz_debug_cmds(void)
+{
+ LMH_CHECK_SCM_CMD(LMH_DEBUG_SET);
+ LMH_CHECK_SCM_CMD(LMH_DEBUG_READ_BUF_SIZE);
+ LMH_CHECK_SCM_CMD(LMH_DEBUG_READ);
+ LMH_CHECK_SCM_CMD(LMH_DEBUG_GET_TYPE);
+
+ return 0;
+}
+
+static int lmh_check_tz_dev_cmds(void)
+{
+ LMH_CHECK_SCM_CMD(LMH_CHANGE_PROFILE);
+ LMH_CHECK_SCM_CMD(LMH_GET_PROFILES);
+
+ return 0;
+}
+
+static int lmh_check_tz_sensor_cmds(void)
+{
+ LMH_CHECK_SCM_CMD(LMH_CTRL_QPMDA);
+ if (!lmh_data->trim_err_disable)
+ LMH_CHECK_SCM_CMD(LMH_TRIM_ERROR);
+ LMH_CHECK_SCM_CMD(LMH_GET_INTENSITY);
+ LMH_CHECK_SCM_CMD(LMH_GET_SENSORS);
+
+ return 0;
+}
+
+static int lmh_parse_sensor(struct lmh_sensor_info *sens_info)
+{
+ int ret = 0, idx = 0, size = 0;
+ struct lmh_sensor_data *lmh_sensor = NULL;
+
+ lmh_sensor = devm_kzalloc(lmh_data->dev, sizeof(struct lmh_sensor_data),
+ GFP_KERNEL);
+ if (!lmh_sensor) {
+ pr_err("No payload\n");
+ return -ENOMEM;
+ }
+ size = sizeof(sens_info->name);
+ size = min(size, LMH_NAME_MAX);
+ memset(lmh_sensor->sensor_name, '\0', LMH_NAME_MAX);
+ while (size--)
+ lmh_sensor->sensor_name[idx++] = ((sens_info->name
+ & (0xFF << (size * 8))) >> (size * 8));
+ if (lmh_sensor->sensor_name[idx - 1] == '\0')
+ idx--;
+ lmh_sensor->sensor_name[idx++] = '_';
+ size = sizeof(sens_info->node_id);
+ if ((idx + size) > LMH_NAME_MAX)
+ size -= LMH_NAME_MAX - idx - size - 1;
+ while (size--)
+ lmh_sensor->sensor_name[idx++] = ((sens_info->node_id
+ & (0xFF << (size * 8))) >> (size * 8));
+ pr_info("Registering sensor:[%s]\n", lmh_sensor->sensor_name);
+ lmh_sensor->ops.read = lmh_read;
+ lmh_sensor->ops.disable_hw_log = lmh_disable_log;
+ lmh_sensor->ops.enable_hw_log = lmh_enable_log;
+ lmh_sensor->sensor_sw_id = lmh_data->max_sensor_count++;
+ lmh_sensor->sensor_hw_name = sens_info->name;
+ lmh_sensor->sensor_hw_node_id = sens_info->node_id;
+ ret = lmh_sensor_register(lmh_sensor->sensor_name, &lmh_sensor->ops);
+ if (ret) {
+ pr_err("Sensor:[%s] registration failed. err:%d\n",
+ lmh_sensor->sensor_name, ret);
+ goto sens_exit;
+ }
+ list_add_tail(&lmh_sensor->list_ptr, &lmh_sensor_list);
+ pr_debug("Registered sensor:[%s] driver\n", lmh_sensor->sensor_name);
+
+sens_exit:
+ if (ret)
+ devm_kfree(lmh_data->dev, lmh_sensor);
+ return ret;
+}
+
+static int lmh_get_sensor_list(void)
+{
+ int ret = 0;
+ uint32_t size = 0, next = 0, idx = 0, count = 0;
+ struct scm_desc desc_arg;
+ struct lmh_sensor_packet *payload = NULL;
+ struct {
+ uint32_t addr;
+ uint32_t size;
+ } cmd_buf;
+ dma_addr_t payload_phys;
+ DEFINE_DMA_ATTRS(attrs);
+ struct device dev = {0};
+
+ dev.coherent_dma_mask = DMA_BIT_MASK(sizeof(dma_addr_t) * 8);
+ dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
+ payload = dma_alloc_attrs(&dev,
+ PAGE_ALIGN(sizeof(struct lmh_sensor_packet)),
+ &payload_phys, GFP_KERNEL, &attrs);
+ if (!payload) {
+ pr_err("No payload\n");
+ return -ENOMEM;
+ }
+
+ do {
+ payload->count = next;
+ cmd_buf.addr = payload_phys;
+ /* payload_phys may be a physical address > 4 GB */
+ desc_arg.args[0] = payload_phys;
+ desc_arg.args[1] = cmd_buf.size = SCM_BUFFER_SIZE(struct
+ lmh_sensor_packet);
+ desc_arg.arginfo = SCM_ARGS(2, SCM_RW, SCM_VAL);
+ trace_lmh_event_call("GET_SENSORS enter");
+ if (!is_scm_armv8())
+ ret = scm_call(SCM_SVC_LMH, LMH_GET_SENSORS,
+ (void *) &cmd_buf,
+ SCM_BUFFER_SIZE(cmd_buf),
+ NULL, 0);
+ else
+ ret = scm_call2(SCM_SIP_FNID(SCM_SVC_LMH,
+ LMH_GET_SENSORS), &desc_arg);
+ /* Have memory barrier before we access the TZ data */
+ mb();
+ trace_lmh_event_call("GET_SENSORS exit");
+ if (ret < 0) {
+ pr_err("Error in SCM v%d call. err:%d\n",
+ (is_scm_armv8()) ? 8 : 7, ret);
+ goto get_exit;
+ }
+ size = payload->count;
+ if (!size) {
+ pr_err("No LMH sensor supported\n");
+ ret = -ENODEV;
+ goto get_exit;
+ }
+ count = ((size - next) > LMH_MAX_SENSOR) ? LMH_MAX_SENSOR :
+ (size - next);
+ next += LMH_MAX_SENSOR;
+ for (idx = 0; idx < count; idx++) {
+ ret = lmh_parse_sensor(&payload->sensor[idx]);
+ if (ret)
+ goto get_exit;
+ }
+ } while (next < size);
+
+get_exit:
+ dma_free_attrs(&dev, size, payload, payload_phys, &attrs);
+ return ret;
+}
+
+static int lmh_set_level(struct lmh_device_ops *ops, int level)
+{
+ int ret = 0, idx = 0;
+ struct scm_desc desc_arg;
+ struct lmh_profile *lmh_dev;
+
+ if (level < 0 || !ops) {
+ pr_err("Invalid Input\n");
+ return -EINVAL;
+ }
+ lmh_dev = container_of(ops, struct lmh_profile, dev_ops);
+ for (idx = 0; idx < lmh_dev->level_ct; idx++) {
+ if (level != lmh_dev->levels[idx])
+ continue;
+ break;
+ }
+ if (idx == lmh_dev->level_ct) {
+ pr_err("Invalid profile:[%d]\n", level);
+ return -EINVAL;
+ }
+ desc_arg.args[0] = level;
+ desc_arg.arginfo = SCM_ARGS(1, SCM_VAL);
+ if (!is_scm_armv8())
+ ret = scm_call_atomic1(SCM_SVC_LMH, LMH_CHANGE_PROFILE,
+ level);
+ else
+ ret = scm_call2(SCM_SIP_FNID(SCM_SVC_LMH,
+ LMH_CHANGE_PROFILE), &desc_arg);
+ if (ret) {
+ pr_err("Error in SCM v%d switching profile:[%d]. err:%d\n",
+ (is_scm_armv8()) ? 8 : 7, level, ret);
+ return ret;
+ }
+ pr_debug("Device:[%s] Current level:%d\n", LMH_DEVICE, level);
+ lmh_dev->curr_level = level;
+
+ return ret;
+
+}
+
+static int lmh_get_all_level(struct lmh_device_ops *ops, int *level)
+{
+ struct lmh_profile *lmh_dev;
+
+ if (!ops) {
+ pr_err("Invalid input\n");
+ return -EINVAL;
+ }
+ lmh_dev = container_of(ops, struct lmh_profile, dev_ops);
+ if (!level)
+ return lmh_dev->level_ct;
+ memcpy(level, lmh_dev->levels, lmh_dev->level_ct * sizeof(uint32_t));
+
+ return 0;
+}
+
+
+static int lmh_get_level(struct lmh_device_ops *ops, int *level)
+{
+ struct lmh_profile *lmh_dev;
+
+ if (!level || !ops) {
+ pr_err("Invalid input\n");
+ return -EINVAL;
+ }
+ lmh_dev = container_of(ops, struct lmh_profile, dev_ops);
+
+ *level = lmh_dev->curr_level;
+
+ return 0;
+}
+
+static int lmh_get_dev_info(void)
+{
+ int ret = 0;
+ uint32_t size = 0, next = 0;
+ struct scm_desc desc_arg;
+ uint32_t *payload = NULL;
+ struct {
+ uint32_t list_addr;
+ uint32_t list_size;
+ uint32_t list_start;
+ } cmd_buf;
+
+ payload = devm_kzalloc(lmh_data->dev, sizeof(uint32_t) *
+ LMH_GET_PROFILE_SIZE, GFP_KERNEL);
+ if (!payload) {
+ pr_err("No payload\n");
+ ret = -ENOMEM;
+ goto get_dev_exit;
+ }
+
+ cmd_buf.list_addr = SCM_BUFFER_PHYS(payload);
+ /* &payload may be a physical address > 4 GB */
+ desc_arg.args[0] = SCM_BUFFER_PHYS(payload);
+ desc_arg.args[1] = cmd_buf.list_size =
+ SCM_BUFFER_SIZE(uint32_t) * LMH_GET_PROFILE_SIZE;
+ desc_arg.arginfo = SCM_ARGS(3, SCM_RW, SCM_VAL, SCM_VAL);
+ LMH_GET_RECURSSIVE_DATA(desc_arg, 2, cmd_buf, payload, next, size,
+ LMH_GET_PROFILES, lmh_data->dev_info.levels, ret);
+ if (ret)
+ goto get_dev_exit;
+ lmh_data->dev_info.level_ct = size;
+ lmh_data->dev_info.curr_level = LMH_DEFAULT_PROFILE;
+ ret = lmh_set_level(&lmh_data->dev_info.dev_ops,
+ lmh_hw_data->default_profile);
+ if (ret) {
+ pr_err("Error switching to default profile%d, err:%d\n",
+ lmh_data->dev_info.curr_level, ret);
+ goto get_dev_exit;
+ }
+
+get_dev_exit:
+ if (ret)
+ devm_kfree(lmh_data->dev, lmh_data->dev_info.levels);
+ devm_kfree(lmh_data->dev, payload);
+ return ret;
+}
+
+static int lmh_device_init(void)
+{
+ int ret = 0;
+
+ if (lmh_check_tz_dev_cmds())
+ return -ENODEV;
+
+ ret = lmh_get_dev_info();
+ if (ret)
+ goto dev_init_exit;
+
+ lmh_data->dev_info.dev_ops.get_available_levels = lmh_get_all_level;
+ lmh_data->dev_info.dev_ops.get_curr_level = lmh_get_level;
+ lmh_data->dev_info.dev_ops.set_level = lmh_set_level;
+ ret = lmh_device_register(LMH_DEVICE, &lmh_data->dev_info.dev_ops);
+ if (ret) {
+ pr_err("Error registering device:[%s]. err:%d", LMH_DEVICE,
+ ret);
+ goto dev_init_exit;
+ }
+
+dev_init_exit:
+ return ret;
+}
+
+static int lmh_debug_read(struct lmh_debug_ops *ops, uint32_t **buf)
+{
+ int ret = 0, size = 0, tz_ret = 0;
+ static uint32_t curr_size;
+ struct scm_desc desc_arg;
+ static uint32_t *payload;
+ struct {
+ uint32_t buf_addr;
+ uint32_t buf_size;
+ } cmd_buf;
+
+ desc_arg.arginfo = SCM_ARGS(0);
+ trace_lmh_event_call("GET_DEBUG_READ_SIZE enter");
+ if (!is_scm_armv8()) {
+ ret = scm_call(SCM_SVC_LMH, LMH_DEBUG_READ_BUF_SIZE,
+ NULL, 0, &size, SCM_BUFFER_SIZE(size));
+ } else {
+ ret = scm_call2(SCM_SIP_FNID(SCM_SVC_LMH,
+ LMH_DEBUG_READ_BUF_SIZE), &desc_arg);
+ size = desc_arg.ret[0];
+ }
+ trace_lmh_event_call("GET_DEBUG_READ_SIZE exit");
+ if (ret) {
+ pr_err("Error in SCM v%d get debug buffer size call. err:%d\n",
+ (is_scm_armv8()) ? 8 : 7, ret);
+ goto get_dbg_exit;
+ }
+ if (!size) {
+ pr_err("No Debug data to read.\n");
+ ret = -ENODEV;
+ goto get_dbg_exit;
+ }
+ size = SCM_BUFFER_SIZE(uint32_t) * size * LMH_READ_LINE_LENGTH;
+ if (curr_size != size) {
+ if (payload)
+ devm_kfree(lmh_data->dev, payload);
+ payload = devm_kzalloc(lmh_data->dev, size, GFP_KERNEL);
+ if (!payload) {
+ pr_err("payload buffer alloc failed\n");
+ ret = -ENOMEM;
+ goto get_dbg_exit;
+ }
+ curr_size = size;
+ }
+
+ cmd_buf.buf_addr = SCM_BUFFER_PHYS(payload);
+ /* &payload may be a physical address > 4 GB */
+ desc_arg.args[0] = SCM_BUFFER_PHYS(payload);
+ desc_arg.args[1] = cmd_buf.buf_size = curr_size;
+ desc_arg.arginfo = SCM_ARGS(2, SCM_RW, SCM_VAL);
+ trace_lmh_event_call("GET_DEBUG_READ enter");
+ dmac_flush_range(payload, payload + curr_size);
+ if (!is_scm_armv8()) {
+ ret = scm_call(SCM_SVC_LMH, LMH_DEBUG_READ,
+ (void *) &cmd_buf, SCM_BUFFER_SIZE(cmd_buf),
+ &tz_ret, SCM_BUFFER_SIZE(tz_ret));
+ } else {
+ ret = scm_call2(SCM_SIP_FNID(SCM_SVC_LMH,
+ LMH_DEBUG_READ), &desc_arg);
+ tz_ret = desc_arg.ret[0];
+ }
+ /* Have memory barrier before we access the TZ data */
+ mb();
+ trace_lmh_event_call("GET_DEBUG_READ exit");
+ if (ret) {
+ pr_err("Error in SCM v%d get debug read. err:%d\n",
+ (is_scm_armv8()) ? 8 : 7, ret);
+ goto get_dbg_exit;
+ }
+ if (tz_ret) {
+ pr_err("TZ API returned error. err:%d\n", tz_ret);
+ ret = tz_ret;
+ goto get_dbg_exit;
+ }
+ trace_lmh_debug_data("Debug read", payload,
+ curr_size / sizeof(uint32_t));
+
+get_dbg_exit:
+ if (ret && payload) {
+ devm_kfree(lmh_data->dev, payload);
+ payload = NULL;
+ curr_size = 0;
+ }
+ *buf = payload;
+
+ return (ret < 0) ? ret : curr_size;
+}
+
+static int lmh_debug_config_write(uint32_t cmd_id, uint32_t *buf, int size)
+{
+ int ret = 0, size_bytes = 0;
+ struct scm_desc desc_arg;
+ uint32_t *payload = NULL;
+ struct {
+ uint32_t buf_addr;
+ uint32_t buf_size;
+ uint32_t node;
+ uint32_t node_id;
+ uint32_t read_type;
+ } cmd_buf;
+
+ trace_lmh_debug_data("Config LMH", buf, size);
+ size_bytes = (size - 3) * sizeof(uint32_t);
+ payload = devm_kzalloc(lmh_data->dev, size_bytes, GFP_KERNEL);
+ if (!payload) {
+ ret = -ENOMEM;
+ goto set_cfg_exit;
+ }
+ memcpy(payload, &buf[3], size_bytes);
+
+ cmd_buf.buf_addr = SCM_BUFFER_PHYS(payload);
+ /* &payload may be a physical address > 4 GB */
+ desc_arg.args[0] = SCM_BUFFER_PHYS(payload);
+ desc_arg.args[1] = cmd_buf.buf_size = size_bytes;
+ desc_arg.args[2] = cmd_buf.node = buf[0];
+ desc_arg.args[3] = cmd_buf.node_id = buf[1];
+ desc_arg.args[4] = cmd_buf.read_type = buf[2];
+ desc_arg.arginfo = SCM_ARGS(5, SCM_RO, SCM_VAL, SCM_VAL, SCM_VAL,
+ SCM_VAL);
+ trace_lmh_event_call("CONFIG_DEBUG_WRITE enter");
+ dmac_flush_range(payload, payload + size_bytes);
+ if (!is_scm_armv8())
+ ret = scm_call(SCM_SVC_LMH, cmd_id, (void *) &cmd_buf,
+ SCM_BUFFER_SIZE(cmd_buf), NULL, 0);
+ else
+ ret = scm_call2(SCM_SIP_FNID(SCM_SVC_LMH, cmd_id), &desc_arg);
+ /* Have memory barrier before we access the TZ data */
+ mb();
+ trace_lmh_event_call("CONFIG_DEBUG_WRITE exit");
+ if (ret) {
+ pr_err("Error in SCM v%d config debug read. err:%d\n",
+ (is_scm_armv8()) ? 8 : 7, ret);
+ goto set_cfg_exit;
+ }
+
+set_cfg_exit:
+ return ret;
+}
+
+static int lmh_debug_config_read(struct lmh_debug_ops *ops, uint32_t *buf,
+ int size)
+{
+ return lmh_debug_config_write(LMH_DEBUG_SET, buf, size);
+}
+
+static int lmh_debug_get_types(struct lmh_debug_ops *ops, bool is_read,
+ uint32_t **buf)
+{
+ int ret = 0;
+ uint32_t size = 0, next = 0;
+ struct scm_desc desc_arg;
+ uint32_t *payload = NULL, *dest_buf = NULL;
+ struct {
+ uint32_t list_addr;
+ uint32_t list_size;
+ uint32_t cmd_type;
+ uint32_t list_start;
+ } cmd_buf;
+
+ if (is_read && lmh_data->debug_info.read_type) {
+ *buf = lmh_data->debug_info.read_type;
+ trace_lmh_debug_data("Data type",
+ lmh_data->debug_info.read_type,
+ lmh_data->debug_info.read_type_count);
+ return lmh_data->debug_info.read_type_count;
+ } else if (!is_read && lmh_data->debug_info.config_type) {
+ *buf = lmh_data->debug_info.config_type;
+ trace_lmh_debug_data("Config type",
+ lmh_data->debug_info.config_type,
+ lmh_data->debug_info.config_type_count);
+ return lmh_data->debug_info.config_type_count;
+ }
+ payload = devm_kzalloc(lmh_data->dev, sizeof(uint32_t) *
+ LMH_SCM_PAYLOAD_SIZE, GFP_KERNEL);
+ if (!payload) {
+ ret = -ENOMEM;
+ goto get_type_exit;
+ }
+ cmd_buf.list_addr = SCM_BUFFER_PHYS(payload);
+ /* &payload may be a physical address > 4 GB */
+ desc_arg.args[0] = SCM_BUFFER_PHYS(payload);
+ desc_arg.args[1] = cmd_buf.list_size =
+ SCM_BUFFER_SIZE(uint32_t) * LMH_SCM_PAYLOAD_SIZE;
+ desc_arg.args[2] = cmd_buf.cmd_type = (is_read) ?
+ LMH_DEBUG_READ_TYPE : LMH_DEBUG_CONFIG_TYPE;
+ desc_arg.arginfo = SCM_ARGS(4, SCM_RW, SCM_VAL, SCM_VAL, SCM_VAL);
+ LMH_GET_RECURSSIVE_DATA(desc_arg, 3, cmd_buf, payload, next, size,
+ LMH_DEBUG_GET_TYPE, dest_buf, ret);
+ if (ret)
+ goto get_type_exit;
+ pr_debug("Total %s types:%d\n", (is_read) ? "read" : "config", size);
+ if (is_read) {
+ lmh_data->debug_info.read_type = *buf = dest_buf;
+ lmh_data->debug_info.read_type_count = size;
+ trace_lmh_debug_data("Data type", dest_buf, size);
+ } else {
+ lmh_data->debug_info.config_type = *buf = dest_buf;
+ lmh_data->debug_info.config_type_count = size;
+ trace_lmh_debug_data("Config type", dest_buf, size);
+ }
+
+get_type_exit:
+ if (ret) {
+ devm_kfree(lmh_data->dev, lmh_data->debug_info.read_type);
+ devm_kfree(lmh_data->dev, lmh_data->debug_info.config_type);
+ lmh_data->debug_info.config_type_count = 0;
+ lmh_data->debug_info.read_type_count = 0;
+ }
+ devm_kfree(lmh_data->dev, payload);
+ return (ret) ? ret : size;
+}
+
+static int lmh_debug_lmh_config(struct lmh_debug_ops *ops, uint32_t *buf,
+ int size)
+{
+ return lmh_debug_config_write(LMH_DEBUG_SET, buf, size);
+}
+
+static void lmh_voltage_scale_set(uint32_t voltage)
+{
+ char trace_buf[MAX_TRACE_EVENT_MSG_LEN] = "";
+
+ mutex_lock(&scm_lmh_lock);
+ writel_relaxed(voltage, lmh_data->dpm_voltage_scale_reg);
+ mutex_unlock(&scm_lmh_lock);
+ snprintf(trace_buf, MAX_TRACE_EVENT_MSG_LEN,
+ "DPM voltage scale %d mV", voltage);
+ pr_debug("%s\n", trace_buf);
+ trace_lmh_event_call(trace_buf);
+}
+
+static void write_to_odcm(bool enable)
+{
+ uint32_t idx = 0, data = enable ? 1 : 0;
+
+ for (; idx < LMH_ODCM_MAX_COUNT; idx++)
+ writel_relaxed(data, lmh_data->odcm_reg[idx]);
+}
+
+static void evaluate_and_config_odcm(uint32_t rail_uV, unsigned long state)
+{
+ uint32_t rail_mV = rail_uV / 1000;
+ static bool prev_state, disable_odcm;
+
+ mutex_lock(&lmh_odcm_access);
+ switch (state) {
+ case REGULATOR_EVENT_VOLTAGE_CHANGE:
+ if (!disable_odcm)
+ break;
+ pr_debug("Disable ODCM\n");
+ write_to_odcm(false);
+ lmh_data->odcm_enabled = false;
+ disable_odcm = false;
+ break;
+ case REGULATOR_EVENT_PRE_VOLTAGE_CHANGE:
+ disable_odcm = false;
+ prev_state = lmh_data->odcm_enabled;
+ if (rail_mV > lmh_data->odcm_thresh_mV) {
+ if (lmh_data->odcm_enabled)
+ break;
+ /* Enable ODCM before the voltage increases */
+ pr_debug("Enable ODCM for voltage %u mV\n", rail_mV);
+ write_to_odcm(true);
+ lmh_data->odcm_enabled = true;
+ } else {
+ if (!lmh_data->odcm_enabled)
+ break;
+ /* Disable ODCM after the voltage decreases */
+ pr_debug("Disable ODCM for voltage %u mV\n", rail_mV);
+ disable_odcm = true;
+ }
+ break;
+ case REGULATOR_EVENT_ABORT_VOLTAGE_CHANGE:
+ disable_odcm = false;
+ if (prev_state == lmh_data->odcm_enabled)
+ break;
+ pr_debug("Reverting ODCM state to %s\n",
+ prev_state ? "enabled" : "disabled");
+ write_to_odcm(prev_state);
+ lmh_data->odcm_enabled = prev_state;
+ break;
+ default:
+ break;
+ }
+ mutex_unlock(&lmh_odcm_access);
+}
+
+static int lmh_voltage_change_notifier(struct notifier_block *nb_data,
+ unsigned long event, void *data)
+{
+ uint32_t voltage = 0;
+ static uint32_t last_voltage;
+ static bool change_needed;
+
+ if (event == REGULATOR_EVENT_VOLTAGE_CHANGE) {
+ /* Convert from uV to mV */
+ pr_debug("Received event POST_VOLTAGE_CHANGE\n");
+ voltage = ((unsigned long)data) / 1000;
+ if (change_needed == 1 &&
+ (last_voltage == voltage)) {
+ lmh_voltage_scale_set(voltage);
+ change_needed = 0;
+ }
+ if (lmh_data->odcm_reg[0])
+ evaluate_and_config_odcm(0, event);
+ } else if (event == REGULATOR_EVENT_PRE_VOLTAGE_CHANGE) {
+ struct pre_voltage_change_data *change_data =
+ (struct pre_voltage_change_data *)data;
+ last_voltage = change_data->min_uV / 1000;
+ if (change_data->min_uV > change_data->old_uV)
+ /* Going from low to high apply change first */
+ lmh_voltage_scale_set(last_voltage);
+ else
+ /* Going from high to low apply change after */
+ change_needed = 1;
+ pr_debug("Received event PRE_VOLTAGE_CHANGE\n");
+ pr_debug("max = %lu mV min = %lu mV previous = %lu mV\n",
+ change_data->max_uV / 1000, change_data->min_uV / 1000,
+ change_data->old_uV / 1000);
+
+ if (lmh_data->odcm_reg[0])
+ evaluate_and_config_odcm(change_data->max_uV, event);
+ } else if (event == REGULATOR_EVENT_ABORT_VOLTAGE_CHANGE) {
+ pr_debug("Received event ABORT_VOLTAGE_CHANGE\n");
+ if (lmh_data->odcm_reg[0])
+ evaluate_and_config_odcm(0, event);
+ }
+
+ return NOTIFY_OK;
+}
+
+static void lmh_dpm_remove(void)
+{
+ if (!IS_ERR_OR_NULL(lmh_data->regulator) &&
+ lmh_data->dpm_notifier_blk.notifier_call != NULL) {
+ regulator_unregister_notifier(lmh_data->regulator,
+ &(lmh_data->dpm_notifier_blk));
+ lmh_data->regulator = NULL;
+ }
+}
+
+static void lmh_dpm_init(void)
+{
+ int ret = 0;
+
+ lmh_data->dpm_voltage_scale_reg = devm_ioremap(lmh_data->dev,
+ (phys_addr_t)APCS_DPM_VOLTAGE_SCALE, 4);
+ if (!lmh_data->dpm_voltage_scale_reg) {
+ ret = -ENODEV;
+ pr_err("Error mapping LMH DPM voltage scale register\n");
+ goto dpm_init_exit;
+ }
+
+ lmh_data->dpm_notifier_blk.notifier_call = lmh_voltage_change_notifier;
+ ret = regulator_register_notifier(lmh_data->regulator,
+ &(lmh_data->dpm_notifier_blk));
+ if (ret) {
+ pr_err("DPM regulator notification registration failed. err:%d\n",
+ ret);
+ goto dpm_init_exit;
+ }
+
+dpm_init_exit:
+ if (ret) {
+ if (lmh_data->dpm_notifier_blk.notifier_call)
+ regulator_unregister_notifier(lmh_data->regulator,
+ &(lmh_data->dpm_notifier_blk));
+ devm_regulator_put(lmh_data->regulator);
+ lmh_data->dpm_notifier_blk.notifier_call = NULL;
+ lmh_data->regulator = NULL;
+ }
+}
+
+
+static int lmh_debug_init(void)
+{
+ int ret = 0;
+
+ if (lmh_check_tz_debug_cmds()) {
+ pr_debug("Debug commands not available.\n");
+ return -ENODEV;
+ }
+
+ lmh_data->debug_info.debug_ops.debug_read = lmh_debug_read;
+ lmh_data->debug_info.debug_ops.debug_config_read
+ = lmh_debug_config_read;
+ lmh_data->debug_info.debug_ops.debug_config_lmh
+ = lmh_debug_lmh_config;
+ lmh_data->debug_info.debug_ops.debug_get_types
+ = lmh_debug_get_types;
+ ret = lmh_debug_register(&lmh_data->debug_info.debug_ops);
+ if (ret) {
+ pr_err("Error registering debug ops. err:%d\n", ret);
+ goto debug_init_exit;
+ }
+
+debug_init_exit:
+ return ret;
+}
+static int lmh_sensor_init(struct platform_device *pdev)
+{
+ int ret = 0;
+
+ if (lmh_check_tz_sensor_cmds())
+ return -ENODEV;
+
+ down_write(&lmh_sensor_access);
+ ret = lmh_get_sensor_list();
+ if (ret)
+ goto init_exit;
+
+ lmh_data->intr_state = LMH_ISR_MONITOR;
+
+ ret = lmh_get_sensor_devicetree(pdev);
+ if (ret) {
+ pr_err("Error getting device tree data. err:%d\n", ret);
+ goto init_exit;
+ }
+ pr_debug("LMH Sensor Init complete\n");
+
+init_exit:
+ up_write(&lmh_sensor_access);
+ if (ret)
+ lmh_remove_sensors();
+
+ return ret;
+}
+
+static int lmh_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+
+ if (lmh_data) {
+ pr_err("Reinitializing lmh hardware driver\n");
+ return -EEXIST;
+ }
+ lmh_data = devm_kzalloc(&pdev->dev, sizeof(struct lmh_driver_data),
+ GFP_KERNEL);
+ if (!lmh_data) {
+ pr_err("kzalloc failed\n");
+ return -ENOMEM;
+ }
+ lmh_data->dev = &pdev->dev;
+
+ lmh_data->poll_wq = alloc_workqueue("lmh_poll_wq", WQ_HIGHPRI, 0);
+ if (!lmh_data->poll_wq) {
+ pr_err("Error allocating workqueue\n");
+ ret = -ENOMEM;
+ goto probe_exit;
+ }
+ INIT_DEFERRABLE_WORK(&lmh_data->poll_work, lmh_poll);
+
+ ret = lmh_sensor_init(pdev);
+ if (ret) {
+ pr_err("Sensor Init failed. err:%d\n", ret);
+ goto probe_exit;
+ }
+ ret = lmh_device_init();
+ if (ret) {
+ pr_err("WARNING: Device Init failed. err:%d. LMH continues\n",
+ ret);
+ ret = 0;
+ }
+
+ if (lmh_data->regulator)
+ lmh_dpm_init();
+
+ ret = lmh_debug_init();
+ if (ret) {
+ pr_err("LMH debug init failed. err:%d\n", ret);
+ ret = 0;
+ }
+ platform_set_drvdata(pdev, lmh_data);
+
+ return ret;
+
+probe_exit:
+ if (lmh_data->poll_wq)
+ destroy_workqueue(lmh_data->poll_wq);
+ lmh_data = NULL;
+ return ret;
+}
+
+static int lmh_remove(struct platform_device *pdev)
+{
+ struct lmh_driver_data *lmh_dat = platform_get_drvdata(pdev);
+
+ destroy_workqueue(lmh_dat->poll_wq);
+ free_irq(lmh_dat->irq_num, lmh_dat);
+ lmh_remove_sensors();
+ lmh_device_deregister(&lmh_dat->dev_info.dev_ops);
+ lmh_dpm_remove();
+
+ return 0;
+}
+
+static struct of_device_id lmh_match[] = {
+ {
+ .compatible = "qcom,lmh",
+ .data = (void *)&lmh_lite_data,
+ },
+ {
+ .compatible = "qcom,lmh_v1",
+ .data = (void *)&lmh_v1_data,
+ },
+ {},
+};
+
+static struct platform_driver lmh_driver = {
+ .probe = lmh_probe,
+ .remove = lmh_remove,
+ .driver = {
+ .name = LMH_DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = lmh_match,
+ },
+};
+
+int __init lmh_init_driver(void)
+{
+ struct device_node *comp_node;
+
+ comp_node = of_find_matching_node(NULL, lmh_match);
+ if (comp_node) {
+ const struct of_device_id *match = of_match_node(lmh_match,
+ comp_node);
+ if (!match) {
+ pr_err("Couldnt find a match\n");
+ goto plt_register;
+ }
+ lmh_hw_data = (struct lmh_default_data *)match->data;
+ of_node_put(comp_node);
+ }
+
+plt_register:
+ return platform_driver_register(&lmh_driver);
+}
+
+static void __exit lmh_exit(void)
+{
+ platform_driver_unregister(&lmh_driver);
+}
+
+late_initcall(lmh_init_driver);
+module_exit(lmh_exit);
+
+MODULE_DESCRIPTION("LMH hardware interface");
+MODULE_ALIAS("platform:" LMH_DRIVER_NAME);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/msm_thermal-dev.c b/drivers/thermal/msm_thermal-dev.c
new file mode 100644
index 000000000000..e1032bc03c61
--- /dev/null
+++ b/drivers/thermal/msm_thermal-dev.c
@@ -0,0 +1,425 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/msm_thermal_ioctl.h>
+#include <linux/msm_thermal.h>
+#include <linux/uaccess.h>
+#include <linux/cdev.h>
+#include <linux/semaphore.h>
+#include <linux/module.h>
+
+struct msm_thermal_ioctl_dev {
+ struct semaphore sem;
+ struct cdev char_dev;
+};
+
+static int msm_thermal_major;
+static struct class *thermal_class;
+static struct msm_thermal_ioctl_dev *msm_thermal_dev;
+static unsigned int freq_table_len[NR_CPUS], freq_table_set[NR_CPUS];
+static unsigned int voltage_table_set[NR_CPUS];
+static unsigned int *freq_table_ptr[NR_CPUS];
+static uint32_t *voltage_table_ptr[NR_CPUS];
+
+static int msm_thermal_ioctl_open(struct inode *node, struct file *filep)
+{
+ int ret = 0;
+ struct msm_thermal_ioctl_dev *dev;
+
+ dev = container_of(node->i_cdev, struct msm_thermal_ioctl_dev,
+ char_dev);
+ filep->private_data = dev;
+
+ return ret;
+}
+
+static int msm_thermal_ioctl_release(struct inode *node, struct file *filep)
+{
+ pr_debug("%s: IOCTL: release\n", KBUILD_MODNAME);
+ return 0;
+}
+
+static long validate_and_copy(unsigned int *cmd, unsigned long *arg,
+ struct msm_thermal_ioctl *query)
+{
+ long ret = 0, err_val = 0;
+
+ if ((_IOC_TYPE(*cmd) != MSM_THERMAL_MAGIC_NUM) ||
+ (_IOC_NR(*cmd) >= MSM_CMD_MAX_NR)) {
+ ret = -ENOTTY;
+ goto validate_exit;
+ }
+
+ if (_IOC_DIR(*cmd) & _IOC_READ) {
+ err_val = !access_ok(VERIFY_WRITE, (void __user *)*arg,
+ _IOC_SIZE(*cmd));
+ } else if (_IOC_DIR(*cmd) & _IOC_WRITE) {
+ err_val = !access_ok(VERIFY_READ, (void __user *)*arg,
+ _IOC_SIZE(*cmd));
+ }
+ if (err_val) {
+ ret = -EFAULT;
+ goto validate_exit;
+ }
+
+ if (copy_from_user(query, (void __user *)(*arg),
+ sizeof(struct msm_thermal_ioctl))) {
+ ret = -EACCES;
+ goto validate_exit;
+ }
+
+ if (query->size != sizeof(struct msm_thermal_ioctl)) {
+ pr_err("%s: Invalid input argument size\n", __func__);
+ ret = -EINVAL;
+ goto validate_exit;
+ }
+
+ switch (*cmd) {
+ case MSM_THERMAL_SET_CPU_MAX_FREQUENCY:
+ case MSM_THERMAL_SET_CPU_MIN_FREQUENCY:
+ if (query->cpu_freq.cpu_num >= num_possible_cpus()) {
+ pr_err("%s: Invalid CPU number: %u\n", __func__,
+ query->cpu_freq.cpu_num);
+ ret = -EINVAL;
+ goto validate_exit;
+ }
+ break;
+ default:
+ break;
+ }
+
+validate_exit:
+ return ret;
+}
+
+static long msm_thermal_process_freq_table_req(struct msm_thermal_ioctl *query,
+ unsigned long *arg)
+{
+ long ret = 0;
+ uint32_t table_idx, idx = 0, cluster_id = query->clock_freq.cluster_num;
+ struct clock_plan_arg *clock_freq = &(query->clock_freq);
+
+ if (!freq_table_len[cluster_id]) {
+ ret = msm_thermal_get_freq_plan_size(cluster_id,
+ &freq_table_len[cluster_id]);
+ if (ret) {
+ pr_err("%s: Cluster%d freq table length get err:%ld\n",
+ KBUILD_MODNAME, cluster_id, ret);
+ goto process_freq_exit;
+ }
+ if (!freq_table_len[cluster_id]) {
+ pr_err("%s: Cluster%d freq table empty\n",
+ KBUILD_MODNAME, cluster_id);
+ ret = -EAGAIN;
+ goto process_freq_exit;
+ }
+
+ freq_table_set[cluster_id] = freq_table_len[cluster_id]
+ / MSM_IOCTL_FREQ_SIZE;
+ if (freq_table_len[cluster_id] % MSM_IOCTL_FREQ_SIZE)
+ freq_table_set[cluster_id]++;
+
+ if (!freq_table_ptr[cluster_id]) {
+ freq_table_ptr[cluster_id] = kzalloc(
+ sizeof(unsigned int) *
+ freq_table_len[cluster_id], GFP_KERNEL);
+ if (!freq_table_ptr[cluster_id]) {
+ pr_err("%s: memory alloc failed\n",
+ KBUILD_MODNAME);
+ freq_table_len[cluster_id] = 0;
+ ret = -ENOMEM;
+ goto process_freq_exit;
+ }
+ }
+ ret = msm_thermal_get_cluster_freq_plan(cluster_id,
+ freq_table_ptr[cluster_id]);
+ if (ret) {
+ pr_err("%s: Error getting frequency table. err:%ld\n",
+ KBUILD_MODNAME, ret);
+ freq_table_len[cluster_id] = 0;
+ freq_table_set[cluster_id] = 0;
+ kfree(freq_table_ptr[cluster_id]);
+ freq_table_ptr[cluster_id] = NULL;
+ goto process_freq_exit;
+ }
+ }
+
+ if (!clock_freq->freq_table_len) {
+ clock_freq->freq_table_len = freq_table_len[cluster_id];
+ goto copy_and_return;
+ }
+ if (clock_freq->set_idx >= freq_table_set[cluster_id]) {
+ pr_err("%s: Invalid freq table set%d for cluster%d\n",
+ KBUILD_MODNAME, clock_freq->set_idx,
+ cluster_id);
+ ret = -EINVAL;
+ goto process_freq_exit;
+ }
+
+ table_idx = MSM_IOCTL_FREQ_SIZE * clock_freq->set_idx;
+ for (; table_idx < freq_table_len[cluster_id]
+ && idx < MSM_IOCTL_FREQ_SIZE; idx++, table_idx++) {
+ clock_freq->freq_table[idx] =
+ freq_table_ptr[cluster_id][table_idx];
+ }
+ clock_freq->freq_table_len = idx;
+
+copy_and_return:
+ ret = copy_to_user((void __user *)(*arg), query,
+ sizeof(struct msm_thermal_ioctl));
+ if (ret) {
+ pr_err("%s: copy_to_user error:%ld.\n", KBUILD_MODNAME, ret);
+ goto process_freq_exit;
+ }
+
+process_freq_exit:
+ return ret;
+}
+
+static long msm_thermal_process_voltage_table_req(
+ struct msm_thermal_ioctl *query,
+ unsigned long *arg)
+{
+ long ret = 0;
+ uint32_t table_idx = 0, idx = 0;
+ uint32_t cluster_id = query->voltage.cluster_num;
+ struct voltage_plan_arg *voltage = &(query->voltage);
+
+ if (!voltage_table_ptr[cluster_id]) {
+ if (!freq_table_len[cluster_id]) {
+ ret = msm_thermal_get_freq_plan_size(cluster_id,
+ &freq_table_len[cluster_id]);
+ if (ret) {
+ pr_err(
+ "%s: Cluster%d freq table len err:%ld\n",
+ KBUILD_MODNAME, cluster_id, ret);
+ goto process_volt_exit;
+ }
+ if (!freq_table_len[cluster_id]) {
+ pr_err("%s: Cluster%d freq table empty\n",
+ KBUILD_MODNAME, cluster_id);
+ ret = -EAGAIN;
+ goto process_volt_exit;
+ }
+ }
+ voltage_table_ptr[cluster_id] = kzalloc(
+ sizeof(uint32_t) *
+ freq_table_len[cluster_id], GFP_KERNEL);
+ if (!voltage_table_ptr[cluster_id]) {
+ pr_err("%s: memory alloc failed\n",
+ KBUILD_MODNAME);
+ ret = -ENOMEM;
+ goto process_volt_exit;
+ }
+ ret = msm_thermal_get_cluster_voltage_plan(cluster_id,
+ voltage_table_ptr[cluster_id]);
+ if (ret) {
+ pr_err("%s: Error getting voltage table. err:%ld\n",
+ KBUILD_MODNAME, ret);
+ kfree(voltage_table_ptr[cluster_id]);
+ voltage_table_ptr[cluster_id] = NULL;
+ goto process_volt_exit;
+ }
+ }
+
+ if (!voltage->voltage_table_len) {
+ voltage->voltage_table_len = freq_table_len[cluster_id];
+ goto copy_and_return;
+ }
+
+ voltage_table_set[cluster_id] = freq_table_len[cluster_id]
+ / MSM_IOCTL_FREQ_SIZE;
+ if (freq_table_len[cluster_id] % MSM_IOCTL_FREQ_SIZE)
+ voltage_table_set[cluster_id]++;
+
+ if (voltage->set_idx >= voltage_table_set[cluster_id]) {
+ pr_err("%s: Invalid voltage table set%d for cluster%d\n",
+ KBUILD_MODNAME, voltage->set_idx,
+ cluster_id);
+ ret = -EINVAL;
+ goto process_volt_exit;
+ }
+
+ table_idx = MSM_IOCTL_FREQ_SIZE * voltage->set_idx;
+ for (; table_idx < freq_table_len[cluster_id]
+ && idx < MSM_IOCTL_FREQ_SIZE; idx++, table_idx++) {
+ voltage->voltage_table[idx] =
+ voltage_table_ptr[cluster_id][table_idx];
+ }
+ voltage->voltage_table_len = idx;
+
+copy_and_return:
+ ret = copy_to_user((void __user *)(*arg), query,
+ sizeof(struct msm_thermal_ioctl));
+ if (ret) {
+ pr_err("%s: copy_to_user error:%ld.\n", KBUILD_MODNAME, ret);
+ goto process_volt_exit;
+ }
+
+process_volt_exit:
+ return ret;
+}
+
+static long msm_thermal_ioctl_process(struct file *filep, unsigned int cmd,
+ unsigned long arg)
+{
+ long ret = 0;
+ struct msm_thermal_ioctl query;
+
+ pr_debug("%s: IOCTL: processing cmd:%u\n", KBUILD_MODNAME, cmd);
+
+ ret = validate_and_copy(&cmd, &arg, &query);
+ if (ret)
+ goto process_exit;
+
+ switch (cmd) {
+ case MSM_THERMAL_SET_CPU_MAX_FREQUENCY:
+ ret = msm_thermal_set_frequency(query.cpu_freq.cpu_num,
+ query.cpu_freq.freq_req, true);
+ break;
+ case MSM_THERMAL_SET_CPU_MIN_FREQUENCY:
+ ret = msm_thermal_set_frequency(query.cpu_freq.cpu_num,
+ query.cpu_freq.freq_req, false);
+ break;
+ case MSM_THERMAL_SET_CLUSTER_MAX_FREQUENCY:
+ ret = msm_thermal_set_cluster_freq(query.cpu_freq.cpu_num,
+ query.cpu_freq.freq_req, true);
+ break;
+ case MSM_THERMAL_SET_CLUSTER_MIN_FREQUENCY:
+ ret = msm_thermal_set_cluster_freq(query.cpu_freq.cpu_num,
+ query.cpu_freq.freq_req, false);
+ break;
+ case MSM_THERMAL_GET_CLUSTER_FREQUENCY_PLAN:
+ ret = msm_thermal_process_freq_table_req(&query, &arg);
+ break;
+ case MSM_THERMAL_GET_CLUSTER_VOLTAGE_PLAN:
+ ret = msm_thermal_process_voltage_table_req(&query, &arg);
+ break;
+ default:
+ ret = -ENOTTY;
+ goto process_exit;
+ }
+process_exit:
+ return ret;
+}
+
+#ifdef CONFIG_COMPAT
+static long msm_thermal_compat_ioctl_process(struct file *filep,
+ unsigned int cmd, unsigned long arg)
+{
+ arg = (unsigned long)compat_ptr(arg);
+ return msm_thermal_ioctl_process(filep, cmd, arg);
+}
+#endif /* CONFIG_COMPAT */
+
+static const struct file_operations msm_thermal_fops = {
+ .owner = THIS_MODULE,
+ .open = msm_thermal_ioctl_open,
+ .unlocked_ioctl = msm_thermal_ioctl_process,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = msm_thermal_compat_ioctl_process,
+#endif /* CONFIG_COMPAT */
+ .release = msm_thermal_ioctl_release,
+};
+
+int msm_thermal_ioctl_init()
+{
+ int ret = 0;
+ dev_t thermal_dev;
+ struct device *therm_device;
+
+ ret = alloc_chrdev_region(&thermal_dev, 0, 1,
+ MSM_THERMAL_IOCTL_NAME);
+ if (ret < 0) {
+ pr_err("%s: Error in allocating char device region. Err:%d\n",
+ KBUILD_MODNAME, ret);
+ goto ioctl_init_exit;
+ }
+
+ msm_thermal_major = MAJOR(thermal_dev);
+
+ thermal_class = class_create(THIS_MODULE, "msm_thermal");
+ if (IS_ERR(thermal_class)) {
+ pr_err("%s: Error in creating class\n",
+ KBUILD_MODNAME);
+ ret = PTR_ERR(thermal_class);
+ goto ioctl_class_fail;
+ }
+
+ therm_device = device_create(thermal_class, NULL, thermal_dev, NULL,
+ MSM_THERMAL_IOCTL_NAME);
+ if (IS_ERR(therm_device)) {
+ pr_err("%s: Error in creating character device\n",
+ KBUILD_MODNAME);
+ ret = PTR_ERR(therm_device);
+ goto ioctl_dev_fail;
+ }
+ msm_thermal_dev = kmalloc(sizeof(struct msm_thermal_ioctl_dev),
+ GFP_KERNEL);
+ if (!msm_thermal_dev) {
+ pr_err("%s: Error allocating memory\n",
+ KBUILD_MODNAME);
+ ret = -ENOMEM;
+ goto ioctl_clean_all;
+ }
+
+ memset(msm_thermal_dev, 0, sizeof(struct msm_thermal_ioctl_dev));
+ sema_init(&msm_thermal_dev->sem, 1);
+ cdev_init(&msm_thermal_dev->char_dev, &msm_thermal_fops);
+ ret = cdev_add(&msm_thermal_dev->char_dev, thermal_dev, 1);
+ if (ret < 0) {
+ pr_err("%s: Error in adding character device\n",
+ KBUILD_MODNAME);
+ goto ioctl_clean_all;
+ }
+
+ return ret;
+
+ioctl_clean_all:
+ device_destroy(thermal_class, thermal_dev);
+ioctl_dev_fail:
+ class_destroy(thermal_class);
+ioctl_class_fail:
+ unregister_chrdev_region(thermal_dev, 1);
+ioctl_init_exit:
+ return ret;
+}
+
+void msm_thermal_ioctl_cleanup()
+{
+ uint32_t idx = 0;
+ dev_t thermal_dev = MKDEV(msm_thermal_major, 0);
+
+ if (!msm_thermal_dev) {
+ pr_err("%s: Thermal IOCTL cleanup already done\n",
+ KBUILD_MODNAME);
+ return;
+ }
+
+ for (; idx < num_possible_cpus(); idx++) {
+ kfree(freq_table_ptr[idx]);
+ kfree(voltage_table_ptr[idx]);
+ }
+ device_destroy(thermal_class, thermal_dev);
+ class_destroy(thermal_class);
+ cdev_del(&msm_thermal_dev->char_dev);
+ unregister_chrdev_region(thermal_dev, 1);
+ kfree(msm_thermal_dev);
+ msm_thermal_dev = NULL;
+ thermal_class = NULL;
+}
diff --git a/drivers/thermal/msm_thermal.c b/drivers/thermal/msm_thermal.c
new file mode 100644
index 000000000000..0fa7188b2581
--- /dev/null
+++ b/drivers/thermal/msm_thermal.c
@@ -0,0 +1,7214 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s:%s " fmt, KBUILD_MODNAME, __func__
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kthread.h>
+#include <linux/mutex.h>
+#include <linux/msm_tsens.h>
+#include <linux/workqueue.h>
+#include <linux/completion.h>
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/msm_tsens.h>
+#include <linux/msm_thermal.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+#include <linux/thermal.h>
+#include <linux/regulator/rpm-smd-regulator.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regulator/driver.h>
+#include <linux/msm_thermal_ioctl.h>
+#include <soc/qcom/rpm-smd.h>
+#include <soc/qcom/scm.h>
+#include <linux/debugfs.h>
+#include <linux/pm_opp.h>
+#include <linux/sched/rt.h>
+#include <linux/notifier.h>
+#include <linux/reboot.h>
+#include <soc/qcom/msm-core.h>
+#include <linux/cpumask.h>
+#include <linux/suspend.h>
+#include <linux/uaccess.h>
+#include <linux/uio_driver.h>
+
+#define CREATE_TRACE_POINTS
+#define TRACE_MSM_THERMAL
+#include <trace/trace_thermal.h>
+
+#define MAX_CURRENT_UA 100000
+#define MAX_RAILS 5
+#define TSENS_NAME_FORMAT "tsens_tz_sensor%d"
+#define THERM_SECURE_BITE_CMD 8
+#define SENSOR_SCALING_FACTOR 1
+#define MSM_THERMAL_NAME "msm_thermal"
+#define MSM_TSENS_PRINT "log_tsens_temperature"
+#define CPU_BUF_SIZE 64
+#define CPU_DEVICE "cpu%d"
+#define MAX_DEBUGFS_CONFIG_LEN 32
+#define MSM_THERMAL_CONFIG "config"
+#define MSM_CONFIG_DATA "data"
+#define DEBUGFS_DISABLE_ALL_MIT "disable"
+#define DEBUGFS_CONFIG_UPDATE "update"
+#define MSM_THERMAL_THRESH "thresh_degc"
+#define MSM_THERMAL_THRESH_CLR "thresh_clr_degc"
+#define MSM_THERMAL_THRESH_UPDATE "update"
+#define DEVM_NAME_MAX 30
+#define HOTPLUG_RETRY_INTERVAL_MS 100
+#define UIO_VERSION "1.0"
+
+#define VALIDATE_AND_SET_MASK(_node, _key, _mask, _cpu) \
+ do { \
+ if (of_property_read_bool(_node, _key)) \
+ _mask |= BIT(_cpu); \
+ } while (0)
+
+#define THERM_CREATE_DEBUGFS_DIR(_node, _name, _parent, _ret) \
+ do { \
+ _node = debugfs_create_dir(_name, _parent); \
+ if (IS_ERR(_node)) { \
+ _ret = PTR_ERR(_node); \
+ pr_err("Error creating debugfs dir:%s. err:%d\n", \
+ _name, _ret); \
+ } \
+ } while (0)
+
+#define UPDATE_THRESHOLD_SET(_val, _trip) do { \
+ if (_trip == THERMAL_TRIP_CONFIGURABLE_HI) \
+ _val |= 1; \
+ else if (_trip == THERMAL_TRIP_CONFIGURABLE_LOW)\
+ _val |= 2; \
+} while (0)
+
+#define UPDATE_CPU_CONFIG_THRESHOLD(_mask, _id, _high, _low) \
+ do { \
+ int cpu; \
+ for_each_possible_cpu(cpu) { \
+ if (!(_mask & BIT(cpus[cpu].cpu))) \
+ continue; \
+ cpus[cpu].threshold[_id].temp = _high \
+ * tsens_scaling_factor; \
+ cpus[cpu].threshold[_id + 1].temp = _low \
+ * tsens_scaling_factor; \
+ set_and_activate_threshold( \
+ cpus[cpu].sensor_id, \
+ &cpus[cpu].threshold[_id]); \
+ set_and_activate_threshold( \
+ cpus[cpu].sensor_id, \
+ &cpus[cpu].threshold[_id + 1]); \
+ } \
+ } while (0)
+
+static struct msm_thermal_data msm_thermal_info;
+static struct delayed_work check_temp_work, retry_hotplug_work;
+static bool core_control_enabled;
+static uint32_t cpus_offlined;
+static cpumask_var_t cpus_previously_online;
+static DEFINE_MUTEX(core_control_mutex);
+static struct kobject *cc_kobj;
+static struct kobject *mx_kobj;
+static struct task_struct *hotplug_task;
+static struct task_struct *freq_mitigation_task;
+static struct task_struct *thermal_monitor_task;
+static struct completion hotplug_notify_complete;
+static struct completion freq_mitigation_complete;
+static struct completion thermal_monitor_complete;
+
+static int enabled;
+static int polling_enabled;
+static int rails_cnt;
+static int sensor_cnt;
+static int psm_rails_cnt;
+static int ocr_rail_cnt;
+static int limit_idx;
+static int limit_idx_low;
+static int limit_idx_high;
+static int max_tsens_num;
+static struct cpufreq_frequency_table *table;
+static uint32_t usefreq;
+static int freq_table_get;
+static bool vdd_rstr_enabled;
+static bool vdd_rstr_nodes_called;
+static bool vdd_rstr_probed;
+static bool sensor_info_nodes_called;
+static bool sensor_info_probed;
+static bool psm_enabled;
+static bool psm_nodes_called;
+static bool psm_probed;
+static bool freq_mitigation_enabled;
+static bool ocr_enabled;
+static bool ocr_nodes_called;
+static bool ocr_probed;
+static bool ocr_reg_init_defer;
+static bool hotplug_enabled;
+static bool interrupt_mode_enable;
+static bool msm_thermal_probed;
+static bool gfx_crit_phase_ctrl_enabled;
+static bool gfx_warm_phase_ctrl_enabled;
+static bool cx_phase_ctrl_enabled;
+static bool vdd_mx_enabled;
+static bool therm_reset_enabled;
+static bool online_core;
+static bool cluster_info_probed;
+static bool cluster_info_nodes_called;
+static bool in_suspend, retry_in_progress;
+static int *tsens_id_map;
+static int *zone_id_tsens_map;
+static DEFINE_MUTEX(vdd_rstr_mutex);
+static DEFINE_MUTEX(psm_mutex);
+static DEFINE_MUTEX(cx_mutex);
+static DEFINE_MUTEX(gfx_mutex);
+static DEFINE_MUTEX(ocr_mutex);
+static DEFINE_MUTEX(vdd_mx_mutex);
+static DEFINE_MUTEX(threshold_mutex);
+static uint32_t curr_gfx_band;
+static uint32_t curr_cx_band;
+static struct kobj_attribute cx_mode_attr;
+static struct kobj_attribute gfx_mode_attr;
+static struct kobj_attribute mx_enabled_attr;
+static struct attribute_group cx_attr_gp;
+static struct attribute_group gfx_attr_gp;
+static struct attribute_group mx_attr_group;
+static struct regulator *vdd_mx, *vdd_cx;
+static long *tsens_temp_at_panic;
+static u32 tsens_temp_print;
+static uint32_t bucket;
+static cpumask_t throttling_mask;
+static int tsens_scaling_factor = SENSOR_SCALING_FACTOR;
+
+static LIST_HEAD(devices_list);
+static LIST_HEAD(thresholds_list);
+static int mitigation = 1;
+
+enum thermal_threshold {
+ HOTPLUG_THRESHOLD_HIGH,
+ HOTPLUG_THRESHOLD_LOW,
+ FREQ_THRESHOLD_HIGH,
+ FREQ_THRESHOLD_LOW,
+ THRESHOLD_MAX_NR,
+};
+
+struct cluster_info {
+ int cluster_id;
+ uint32_t entity_count;
+ struct cluster_info *child_entity_ptr;
+ struct cluster_info *parent_ptr;
+ struct cpufreq_frequency_table *freq_table;
+ int freq_idx;
+ int freq_idx_low;
+ int freq_idx_high;
+ cpumask_t cluster_cores;
+ bool sync_cluster;
+ uint32_t limited_max_freq;
+ uint32_t limited_min_freq;
+};
+
+struct cpu_info {
+ uint32_t cpu;
+ const char *sensor_type;
+ enum sensor_id_type id_type;
+ uint32_t sensor_id;
+ bool offline;
+ bool user_offline;
+ bool hotplug_thresh_clear;
+ struct sensor_threshold threshold[THRESHOLD_MAX_NR];
+ bool max_freq;
+ uint32_t user_max_freq;
+ uint32_t shutdown_max_freq;
+ uint32_t suspend_max_freq;
+ uint32_t vdd_max_freq;
+ uint32_t user_min_freq;
+ uint32_t limited_max_freq;
+ uint32_t limited_min_freq;
+ bool freq_thresh_clear;
+ struct cluster_info *parent_ptr;
+};
+
+struct rail {
+ const char *name;
+ uint32_t freq_req;
+ uint32_t min_level;
+ uint32_t num_levels;
+ int32_t curr_level;
+ uint32_t levels[3];
+ struct kobj_attribute value_attr;
+ struct kobj_attribute level_attr;
+ struct regulator *reg;
+ struct attribute_group attr_gp;
+ uint32_t max_frequency_limit;
+ struct device_clnt_data *device_handle[NR_CPUS];
+ union device_request request[NR_CPUS];
+};
+
+struct msm_sensor_info {
+ const char *name;
+ const char *alias;
+ const char *type;
+ uint32_t scaling_factor;
+};
+
+struct psm_rail {
+ const char *name;
+ uint8_t init;
+ uint8_t mode;
+ struct kobj_attribute mode_attr;
+ struct rpm_regulator *reg;
+ struct regulator *phase_reg;
+ struct attribute_group attr_gp;
+};
+
+struct devmgr_devices {
+ struct device_manager_data *hotplug_dev;
+ struct device_manager_data *cpufreq_dev[NR_CPUS];
+};
+
+enum msm_thresh_list {
+ MSM_THERM_RESET,
+ MSM_VDD_RESTRICTION,
+ MSM_CX_PHASE_CTRL_HOT,
+ MSM_GFX_PHASE_CTRL_WARM,
+ MSM_GFX_PHASE_CTRL_HOT,
+ MSM_OCR,
+ MSM_VDD_MX_RESTRICTION,
+ MSM_LIST_MAX_NR,
+};
+
+enum msm_thermal_phase_ctrl {
+ MSM_CX_PHASE_CTRL,
+ MSM_GFX_PHASE_CTRL,
+ MSM_PHASE_CTRL_NR,
+};
+
+enum msm_temp_band {
+ MSM_COLD_CRITICAL = 1,
+ MSM_COLD,
+ MSM_COOL,
+ MSM_NORMAL,
+ MSM_WARM,
+ MSM_HOT,
+ MSM_HOT_CRITICAL,
+ MSM_TEMP_MAX_NR,
+};
+
+enum cpu_mit_type {
+ CPU_FREQ_MITIGATION = 0x1,
+ CPU_HOTPLUG_MITIGATION = 0x2,
+};
+
+enum cpu_config {
+ HOTPLUG_CONFIG,
+ CPUFREQ_CONFIG,
+ MAX_CPU_CONFIG
+};
+
+struct msm_thermal_debugfs_thresh_config {
+ char config_name[MAX_DEBUGFS_CONFIG_LEN];
+ long thresh;
+ long thresh_clr;
+ bool update;
+ void (*disable_config)(void);
+ struct dentry *dbg_config;
+ struct dentry *dbg_thresh;
+ struct dentry *dbg_thresh_clr;
+ struct dentry *dbg_thresh_update;
+};
+
+struct msm_thermal_debugfs_entry {
+ struct dentry *parent;
+ struct dentry *tsens_print;
+ struct dentry *config;
+ struct dentry *config_data;
+};
+
+static struct psm_rail *psm_rails;
+static struct psm_rail *ocr_rails;
+static struct rail *rails;
+static struct msm_sensor_info *sensors;
+static struct cpu_info cpus[NR_CPUS];
+static struct threshold_info *thresh;
+static bool mx_restr_applied;
+static struct cluster_info *core_ptr;
+static struct msm_thermal_debugfs_entry *msm_therm_debugfs;
+static struct devmgr_devices *devices;
+static struct msm_thermal_debugfs_thresh_config *mit_config;
+
+struct vdd_rstr_enable {
+ struct kobj_attribute ko_attr;
+ uint32_t enabled;
+};
+
+/* For SMPS only*/
+enum PMIC_SW_MODE {
+ PMIC_AUTO_MODE = RPM_REGULATOR_MODE_AUTO,
+ PMIC_IPEAK_MODE = RPM_REGULATOR_MODE_IPEAK,
+ PMIC_PWM_MODE = RPM_REGULATOR_MODE_HPM,
+};
+
+enum ocr_request {
+ OPTIMUM_CURRENT_MIN,
+ OPTIMUM_CURRENT_MAX,
+ OPTIMUM_CURRENT_NR,
+};
+
+static int thermal_config_debugfs_read(struct seq_file *m, void *data);
+static ssize_t thermal_config_debugfs_write(struct file *file,
+ const char __user *buffer,
+ size_t count, loff_t *ppos);
+
+#define SYNC_CORE(_cpu) \
+ (core_ptr && cpus[_cpu].parent_ptr->sync_cluster)
+
+#define VDD_RES_RO_ATTRIB(_rail, ko_attr, j, _name) \
+ ko_attr.attr.name = __stringify(_name); \
+ ko_attr.attr.mode = 0444; \
+ ko_attr.show = vdd_rstr_reg_##_name##_show; \
+ ko_attr.store = NULL; \
+ sysfs_attr_init(&ko_attr.attr); \
+ _rail.attr_gp.attrs[j] = &ko_attr.attr;
+
+#define VDD_RES_RW_ATTRIB(_rail, ko_attr, j, _name) \
+ ko_attr.attr.name = __stringify(_name); \
+ ko_attr.attr.mode = 0644; \
+ ko_attr.show = vdd_rstr_reg_##_name##_show; \
+ ko_attr.store = vdd_rstr_reg_##_name##_store; \
+ sysfs_attr_init(&ko_attr.attr); \
+ _rail.attr_gp.attrs[j] = &ko_attr.attr;
+
+#define VDD_RSTR_ENABLE_FROM_ATTRIBS(attr) \
+ (container_of(attr, struct vdd_rstr_enable, ko_attr));
+
+#define VDD_RSTR_REG_VALUE_FROM_ATTRIBS(attr) \
+ (container_of(attr, struct rail, value_attr));
+
+#define VDD_RSTR_REG_LEVEL_FROM_ATTRIBS(attr) \
+ (container_of(attr, struct rail, level_attr));
+
+#define OCR_RW_ATTRIB(_rail, ko_attr, j, _name) \
+ ko_attr.attr.name = __stringify(_name); \
+ ko_attr.attr.mode = 0644; \
+ ko_attr.show = ocr_reg_##_name##_show; \
+ ko_attr.store = ocr_reg_##_name##_store; \
+ sysfs_attr_init(&ko_attr.attr); \
+ _rail.attr_gp.attrs[j] = &ko_attr.attr;
+
+#define PSM_RW_ATTRIB(_rail, ko_attr, j, _name) \
+ ko_attr.attr.name = __stringify(_name); \
+ ko_attr.attr.mode = 0644; \
+ ko_attr.show = psm_reg_##_name##_show; \
+ ko_attr.store = psm_reg_##_name##_store; \
+ sysfs_attr_init(&ko_attr.attr); \
+ _rail.attr_gp.attrs[j] = &ko_attr.attr;
+
+#define PSM_REG_MODE_FROM_ATTRIBS(attr) \
+ (container_of(attr, struct psm_rail, mode_attr));
+
+#define PHASE_RW_ATTR(_phase, _name, _attr, j, _attr_gr) \
+ _attr.attr.name = __stringify(_name); \
+ _attr.attr.mode = 0644; \
+ _attr.show = _phase##_phase_show; \
+ _attr.store = _phase##_phase_store; \
+ sysfs_attr_init(&_attr.attr); \
+ _attr_gr.attrs[j] = &_attr.attr;
+
+#define MX_RW_ATTR(ko_attr, _name, _attr_gp) \
+ ko_attr.attr.name = __stringify(_name); \
+ ko_attr.attr.mode = 0644; \
+ ko_attr.show = show_mx_##_name; \
+ ko_attr.store = store_mx_##_name; \
+ sysfs_attr_init(&ko_attr.attr); \
+ _attr_gp.attrs[0] = &ko_attr.attr;
+
+#define THERM_MITIGATION_DISABLE(_flag, _id) \
+ do { \
+ if (!_flag) \
+ return; \
+ if (_id >= 0) \
+ sensor_mgr_disable_threshold( \
+ &thresh[_id]); \
+ _flag = 0; \
+ } while (0)
+
+#define APPLY_VDD_RESTRICTION(vdd, level, name, ret) \
+ do { \
+ ret = regulator_set_voltage(vdd, level, INT_MAX); \
+ if (ret) { \
+ pr_err("Failed to vote %s to level %d, err %d\n", \
+ #name, level, ret); \
+ } else { \
+ ret = regulator_enable(vdd); \
+ if (ret) \
+ pr_err("Failed to enable %s, err %d\n", \
+ #name, ret); \
+ else \
+ pr_debug("Vote %s with level %d\n", \
+ #name, level); \
+ } \
+ } while (0)
+
+#define REMOVE_VDD_RESTRICTION(vdd, name, ret) \
+ do { \
+ ret = regulator_disable(vdd); \
+ if (ret) { \
+ pr_err("Failed to disable %s, error %d\n", \
+ #name, ret); \
+ } else { \
+ ret = regulator_set_voltage(vdd, 0, INT_MAX); \
+ if (ret) \
+ pr_err("Failed to remove %s vote, error %d\n",\
+ #name, ret); \
+ else \
+ pr_debug("Remove voting to %s\n", #name); \
+ } \
+ } while (0)
+
+static void uio_init(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct uio_info *uio_reg_info = NULL;
+ struct resource *clnt_res = NULL;
+ u32 mem_size = 0;
+ phys_addr_t mem_pyhsical = 0;
+
+ clnt_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!clnt_res) {
+ pr_debug("resource not found\n");
+ goto exit;
+ }
+ mem_size = resource_size(clnt_res);
+ if (mem_size == 0) {
+ pr_err("resource memory size is zero\n");
+ goto exit;
+ }
+
+ uio_reg_info = devm_kzalloc(&pdev->dev, sizeof(struct uio_info),
+ GFP_KERNEL);
+ if (!uio_reg_info)
+ goto exit;
+
+ mem_pyhsical = clnt_res->start;
+
+ /* Setup device */
+ uio_reg_info->name = clnt_res->name;
+ uio_reg_info->version = UIO_VERSION;
+ uio_reg_info->mem[0].addr = mem_pyhsical;
+ uio_reg_info->mem[0].size = mem_size;
+ uio_reg_info->mem[0].memtype = UIO_MEM_PHYS;
+
+ ret = uio_register_device(&pdev->dev, uio_reg_info);
+ if (ret) {
+ devm_kfree(&pdev->dev, uio_reg_info);
+ pr_err("uio register failed ret=%d\n", ret);
+ goto exit;
+ }
+ dev_set_drvdata(&pdev->dev, uio_reg_info);
+
+exit:
+ return;
+}
+
+static void get_cluster_mask(uint32_t cpu, cpumask_t *mask)
+{
+ int i;
+
+ cpumask_set_cpu(cpu, mask);
+ if (core_ptr) {
+ for (i = 0; i < core_ptr->entity_count; i++) {
+ struct cluster_info *cluster_ptr =
+ &core_ptr->child_entity_ptr[i];
+ if (*cluster_ptr->cluster_cores.bits & BIT(cpu)) {
+ cpumask_copy(mask,
+ &cluster_ptr->cluster_cores);
+ break;
+ }
+ }
+ }
+}
+
+static uint32_t get_core_max_freq(uint32_t cpu)
+{
+ int i;
+ uint32_t max_freq = 0;
+
+ if (core_ptr) {
+ for (i = 0; i < core_ptr->entity_count; i++) {
+ struct cluster_info *cluster_ptr =
+ &core_ptr->child_entity_ptr[i];
+ if (*cluster_ptr->cluster_cores.bits & BIT(cpu)) {
+ if (cluster_ptr->freq_table)
+ max_freq =
+ cluster_ptr->freq_table
+ [cluster_ptr->freq_idx_high].frequency;
+ break;
+ }
+ }
+ } else {
+ if (table)
+ max_freq = table[limit_idx_high].frequency;
+ }
+
+ return max_freq;
+}
+
+static void cpus_previously_online_update(void)
+{
+ char buf[CPU_BUF_SIZE];
+
+ get_online_cpus();
+ cpumask_or(cpus_previously_online, cpus_previously_online,
+ cpu_online_mask);
+ put_online_cpus();
+ cpulist_scnprintf(buf, sizeof(buf), cpus_previously_online);
+ pr_debug("%s\n", buf);
+}
+
+static uint32_t get_core_min_freq(uint32_t cpu)
+{
+ int i;
+ uint32_t min_freq = UINT_MAX;
+
+ if (core_ptr) {
+ for (i = 0; i < core_ptr->entity_count; i++) {
+ struct cluster_info *cluster_ptr =
+ &core_ptr->child_entity_ptr[i];
+ if (*cluster_ptr->cluster_cores.bits & BIT(cpu)) {
+ if (cluster_ptr->freq_table)
+ min_freq =
+ cluster_ptr->freq_table[0].frequency;
+ break;
+ }
+ }
+ } else {
+ if (table)
+ min_freq = table[0].frequency;
+ }
+
+ return min_freq;
+}
+
+static void msm_thermal_update_freq(bool is_shutdown, bool mitigate)
+{
+ uint32_t cpu;
+ bool update = false;
+
+ for_each_possible_cpu(cpu) {
+ if (msm_thermal_info.freq_mitig_control_mask
+ & BIT(cpu)) {
+ uint32_t *freq = (is_shutdown)
+ ? &cpus[cpu].shutdown_max_freq
+ : &cpus[cpu].suspend_max_freq;
+ uint32_t mitigation_freq = (mitigate) ?
+ get_core_min_freq(cpu) : UINT_MAX;
+
+ if (*freq == mitigation_freq)
+ continue;
+ *freq = mitigation_freq;
+ update = true;
+ pr_debug("%s mitigate CPU%u to %u\n",
+ (is_shutdown) ? "Shutdown" : "Suspend", cpu,
+ mitigation_freq);
+ }
+ }
+
+ if (!update)
+ goto notify_exit;
+
+ if (freq_mitigation_task)
+ complete(&freq_mitigation_complete);
+ else
+ pr_err("Freq mitigation task is not initialized\n");
+notify_exit:
+ return;
+}
+
+static int msm_thermal_power_down_callback(
+ struct notifier_block *nfb, unsigned long action, void *data)
+{
+
+ switch (action) {
+ case SYS_RESTART:
+ case SYS_POWER_OFF:
+ case SYS_HALT:
+ msm_thermal_update_freq(true, true);
+ break;
+
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_OK;
+}
+
+static int msm_thermal_suspend_callback(
+ struct notifier_block *nfb, unsigned long action, void *data)
+{
+ switch (action) {
+ case PM_HIBERNATION_PREPARE:
+ case PM_SUSPEND_PREPARE:
+ msm_thermal_update_freq(false, true);
+ in_suspend = true;
+ retry_in_progress = false;
+ cancel_delayed_work_sync(&retry_hotplug_work);
+ break;
+
+ case PM_POST_HIBERNATION:
+ case PM_POST_SUSPEND:
+ msm_thermal_update_freq(false, false);
+ in_suspend = false;
+ if (hotplug_task)
+ complete(&hotplug_notify_complete);
+ else
+ pr_debug("Hotplug task not initialized\n");
+ break;
+
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block msm_thermal_reboot_notifier = {
+ .notifier_call = msm_thermal_power_down_callback,
+};
+
+static struct device_manager_data *find_device_by_name(const char *device_name)
+{
+ struct device_manager_data *dev_mgr = NULL;
+
+ list_for_each_entry(dev_mgr, &devices_list, dev_ptr) {
+ if (strcmp(dev_mgr->device_name, device_name) == 0)
+ return dev_mgr;
+ }
+
+ return NULL;
+}
+
+static int validate_client(struct device_clnt_data *clnt)
+{
+ int ret = 0;
+ struct device_manager_data *dev_mgr = NULL;
+ struct device_clnt_data *client_ptr = NULL;
+
+ if (!clnt || !clnt->dev_mgr) {
+ pr_err("Invalid client\n");
+ ret = -EINVAL;
+ goto validate_exit;
+ }
+
+ list_for_each_entry(dev_mgr, &devices_list, dev_ptr) {
+ if (dev_mgr == clnt->dev_mgr)
+ break;
+ }
+ if (dev_mgr != clnt->dev_mgr) {
+ pr_err("Invalid device manager\n");
+ ret = -EINVAL;
+ goto validate_exit;
+ }
+
+ mutex_lock(&dev_mgr->clnt_lock);
+ list_for_each_entry(client_ptr, &dev_mgr->client_list, clnt_ptr) {
+ if (clnt == client_ptr)
+ break;
+ }
+ if (clnt != client_ptr) {
+ pr_err("Invalid client\n");
+ ret = -EINVAL;
+ goto validate_unlock;
+ }
+validate_unlock:
+ mutex_unlock(&dev_mgr->clnt_lock);
+
+validate_exit:
+ return ret;
+}
+
+static int devmgr_client_cpufreq_update(struct device_manager_data *dev_mgr)
+{
+ int ret = 0;
+ struct device_clnt_data *clnt = NULL;
+ uint32_t max_freq = UINT_MAX;
+ uint32_t min_freq = 0;
+
+ mutex_lock(&dev_mgr->clnt_lock);
+ list_for_each_entry(clnt, &dev_mgr->client_list, clnt_ptr) {
+ if (!clnt->req_active)
+ continue;
+ max_freq = min(max_freq, clnt->request.freq.max_freq);
+ min_freq = max(min_freq, clnt->request.freq.min_freq);
+ }
+ if (dev_mgr->active_req.freq.max_freq == max_freq &&
+ dev_mgr->active_req.freq.min_freq == min_freq) {
+ goto update_exit;
+ }
+ dev_mgr->active_req.freq.max_freq = max_freq;
+ dev_mgr->active_req.freq.min_freq = min_freq;
+
+ if (freq_mitigation_task) {
+ complete(&freq_mitigation_complete);
+ } else {
+ pr_err("Frequency mitigation task is not initialized\n");
+ ret = -ESRCH;
+ }
+
+update_exit:
+ mutex_unlock(&dev_mgr->clnt_lock);
+ return ret;
+}
+
+static int devmgr_client_hotplug_update(struct device_manager_data *dev_mgr)
+{
+ int ret = 0;
+ struct device_clnt_data *clnt = NULL;
+ cpumask_t offline_mask = CPU_MASK_NONE;
+
+ mutex_lock(&dev_mgr->clnt_lock);
+ list_for_each_entry(clnt, &dev_mgr->client_list, clnt_ptr) {
+ if (!clnt->req_active)
+ continue;
+ cpumask_or(&offline_mask, &offline_mask,
+ &clnt->request.offline_mask);
+ }
+ if (cpumask_equal(&dev_mgr->active_req.offline_mask, &offline_mask))
+ goto update_exit;
+
+ cpumask_copy(&dev_mgr->active_req.offline_mask, &offline_mask);
+
+ if (hotplug_task) {
+ complete(&hotplug_notify_complete);
+ } else {
+ pr_err("Hotplug task is not initialized\n");
+ ret = -ESRCH;
+ }
+
+update_exit:
+ mutex_unlock(&dev_mgr->clnt_lock);
+ return ret;
+}
+
+static int devmgr_hotplug_client_request_validate_and_update(
+ struct device_clnt_data *clnt,
+ union device_request *req,
+ enum device_req_type type)
+{
+ if (type != HOTPLUG_MITIGATION_REQ)
+ return -EINVAL;
+
+ cpumask_copy(&clnt->request.offline_mask, &req->offline_mask);
+
+ if (!cpumask_empty(&req->offline_mask))
+ clnt->req_active = true;
+ else
+ clnt->req_active = false;
+
+ return 0;
+}
+
+static int devmgr_cpufreq_client_request_validate_and_update(
+ struct device_clnt_data *clnt,
+ union device_request *req,
+ enum device_req_type type)
+{
+ if (type != CPUFREQ_MITIGATION_REQ)
+ return -EINVAL;
+
+ if (req->freq.max_freq < req->freq.min_freq) {
+ pr_err("Invalid Max and Min freq req. max:%u min:%u\n",
+ req->freq.max_freq, req->freq.min_freq);
+ return -EINVAL;
+ }
+
+ clnt->request.freq.max_freq = req->freq.max_freq;
+ clnt->request.freq.min_freq = req->freq.min_freq;
+
+ if ((req->freq.max_freq == CPUFREQ_MAX_NO_MITIGATION) &&
+ (req->freq.min_freq == CPUFREQ_MIN_NO_MITIGATION))
+ clnt->req_active = false;
+ else
+ clnt->req_active = true;
+
+ return 0;
+}
+
+int devmgr_client_request_mitigation(struct device_clnt_data *clnt,
+ enum device_req_type type,
+ union device_request *req)
+{
+ int ret = 0;
+ struct device_manager_data *dev_mgr = NULL;
+
+ if (!mitigation) {
+ pr_err("Thermal Mitigations disabled.\n");
+ goto req_exit;
+ }
+
+ if (!clnt || !req) {
+ pr_err("Invalid inputs for mitigation.\n");
+ ret = -EINVAL;
+ goto req_exit;
+ }
+
+ ret = validate_client(clnt);
+ if (ret) {
+ pr_err("Invalid mitigation client. ret:%d\n", ret);
+ goto req_exit;
+ }
+
+ if (!clnt->dev_mgr->request_validate) {
+ pr_err("Invalid dev mgr request update\n");
+ ret = -EINVAL;
+ goto req_exit;
+ }
+
+ dev_mgr = clnt->dev_mgr;
+ mutex_lock(&dev_mgr->clnt_lock);
+ ret = dev_mgr->request_validate(clnt, req, type);
+ if (ret) {
+ pr_err("Invalid client request\n");
+ goto req_unlock;
+ }
+
+req_unlock:
+ mutex_unlock(&dev_mgr->clnt_lock);
+ if (!ret && dev_mgr->update)
+ dev_mgr->update(dev_mgr);
+
+req_exit:
+ return ret;
+}
+
+struct device_clnt_data *devmgr_register_mitigation_client(struct device *dev,
+ const char *device_name,
+ void (*callback)(struct device_clnt_data *,
+ union device_request *, void *))
+{
+ struct device_clnt_data *client = NULL;
+ struct device_manager_data *dev_mgr = NULL;
+
+ if (!dev || !device_name) {
+ pr_err("Invalid input\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ dev_mgr = find_device_by_name(device_name);
+ if (!dev_mgr) {
+ pr_err("Invalid device %s\n", device_name);
+ return ERR_PTR(-EINVAL);
+ }
+
+ client = devm_kzalloc(dev,
+ sizeof(struct device_clnt_data), GFP_KERNEL);
+ if (!client) {
+ pr_err("Memory alloc failed\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ mutex_lock(&dev_mgr->clnt_lock);
+ client->dev_mgr = dev_mgr;
+ client->callback = callback;
+ list_add_tail(&client->clnt_ptr, &dev_mgr->client_list);
+ mutex_unlock(&dev_mgr->clnt_lock);
+
+ return client;
+}
+
+void devmgr_unregister_mitigation_client(struct device *dev,
+ struct device_clnt_data *clnt)
+{
+ int ret = 0;
+ struct device_manager_data *dev_mgr = NULL;
+
+ if (!clnt) {
+ pr_err("Invalid input\n");
+ return;
+ }
+
+ ret = validate_client(clnt);
+ if (ret)
+ return;
+
+ dev_mgr = clnt->dev_mgr;
+ mutex_lock(&dev_mgr->clnt_lock);
+ list_del(&clnt->clnt_ptr);
+ mutex_unlock(&dev_mgr->clnt_lock);
+ devm_kfree(dev, clnt);
+ if (dev_mgr->update)
+ dev_mgr->update(dev_mgr);
+}
+
+static int msm_thermal_cpufreq_callback(struct notifier_block *nfb,
+ unsigned long event, void *data)
+{
+ struct cpufreq_policy *policy = data;
+ uint32_t max_freq_req, min_freq_req;
+
+ switch (event) {
+ case CPUFREQ_INCOMPATIBLE:
+ if (SYNC_CORE(policy->cpu)) {
+ max_freq_req =
+ cpus[policy->cpu].parent_ptr->limited_max_freq;
+ min_freq_req =
+ cpus[policy->cpu].parent_ptr->limited_min_freq;
+ } else {
+ max_freq_req = cpus[policy->cpu].limited_max_freq;
+ min_freq_req = cpus[policy->cpu].limited_min_freq;
+ }
+ pr_debug("mitigating CPU%d to freq max: %u min: %u\n",
+ policy->cpu, max_freq_req, min_freq_req);
+
+ cpufreq_verify_within_limits(policy, min_freq_req,
+ max_freq_req);
+
+ if (max_freq_req < min_freq_req)
+ pr_err("Invalid frequency request Max:%u Min:%u\n",
+ max_freq_req, min_freq_req);
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block msm_thermal_cpufreq_notifier = {
+ .notifier_call = msm_thermal_cpufreq_callback,
+};
+
+static void update_cpu_freq(int cpu)
+{
+ int ret = 0;
+ cpumask_t mask;
+
+ get_cluster_mask(cpu, &mask);
+ if (cpu_online(cpu)) {
+ if ((cpumask_intersects(&mask, &throttling_mask))
+ && (cpus[cpu].limited_max_freq
+ >= get_core_max_freq(cpu))) {
+ cpumask_xor(&throttling_mask, &mask, &throttling_mask);
+ set_cpu_throttled(&mask, false);
+ } else if (!cpumask_intersects(&mask, &throttling_mask)) {
+ cpumask_or(&throttling_mask, &mask, &throttling_mask);
+ set_cpu_throttled(&mask, true);
+ }
+ trace_thermal_pre_frequency_mit(cpu,
+ cpus[cpu].limited_max_freq,
+ cpus[cpu].limited_min_freq);
+ ret = cpufreq_update_policy(cpu);
+ trace_thermal_post_frequency_mit(cpu,
+ cpufreq_quick_get_max(cpu),
+ cpus[cpu].limited_min_freq);
+ if (ret)
+ pr_err("Unable to update policy for cpu:%d. err:%d\n",
+ cpu, ret);
+ }
+}
+
+static int * __init get_sync_cluster(struct device *dev, int *cnt)
+{
+ int *sync_cluster = NULL, cluster_cnt = 0, ret = 0;
+ char *key = "qcom,synchronous-cluster-id";
+
+ if (!of_get_property(dev->of_node, key, &cluster_cnt)
+ || cluster_cnt <= 0 || !core_ptr)
+ return NULL;
+
+ cluster_cnt /= sizeof(__be32);
+ if (cluster_cnt > core_ptr->entity_count) {
+ pr_err("Invalid cluster count:%d\n", cluster_cnt);
+ return NULL;
+ }
+ sync_cluster = devm_kzalloc(dev, sizeof(int) * cluster_cnt, GFP_KERNEL);
+ if (!sync_cluster) {
+ pr_err("Memory alloc failed\n");
+ return NULL;
+ }
+
+ ret = of_property_read_u32_array(dev->of_node, key, sync_cluster,
+ cluster_cnt);
+ if (ret) {
+ pr_err("Error in reading property:%s. err:%d\n", key, ret);
+ devm_kfree(dev, sync_cluster);
+ return NULL;
+ }
+ *cnt = cluster_cnt;
+
+ return sync_cluster;
+}
+
+static void update_cpu_datastructure(struct cluster_info *cluster_ptr,
+ int *sync_cluster, int sync_cluster_cnt)
+{
+ int i = 0;
+ bool is_sync_cluster = false;
+
+ for (i = 0; (sync_cluster) && (i < sync_cluster_cnt); i++) {
+ if (cluster_ptr->cluster_id != sync_cluster[i])
+ continue;
+ is_sync_cluster = true;
+ break;
+ }
+
+ cluster_ptr->sync_cluster = is_sync_cluster;
+ pr_debug("Cluster ID:%d Sync cluster:%s Sibling mask:%lu\n",
+ cluster_ptr->cluster_id, is_sync_cluster ? "Yes" : "No",
+ *cluster_ptr->cluster_cores.bits);
+ for_each_cpu_mask(i, cluster_ptr->cluster_cores) {
+ cpus[i].parent_ptr = cluster_ptr;
+ }
+}
+
+static ssize_t cluster_info_show(
+ struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ uint32_t i = 0;
+ ssize_t tot_size = 0, size = 0;
+
+ for (; i < core_ptr->entity_count; i++) {
+ struct cluster_info *cluster_ptr =
+ &core_ptr->child_entity_ptr[i];
+
+ size = snprintf(&buf[tot_size], PAGE_SIZE - tot_size,
+ "%d:%lu:%d ", cluster_ptr->cluster_id,
+ *cluster_ptr->cluster_cores.bits,
+ cluster_ptr->sync_cluster);
+ if ((tot_size + size) >= PAGE_SIZE) {
+ pr_err("Not enough buffer size");
+ break;
+ }
+ tot_size += size;
+ }
+
+ return tot_size;
+}
+
+static int thermal_config_debugfs_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, thermal_config_debugfs_read,
+ inode->i_private);
+}
+
+static const struct file_operations thermal_debugfs_config_ops = {
+ .open = thermal_config_debugfs_open,
+ .read = seq_read,
+ .write = thermal_config_debugfs_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int create_config_debugfs(
+ struct msm_thermal_debugfs_thresh_config *config_ptr,
+ struct dentry *parent)
+{
+ int ret = 0;
+
+ if (!strlen(config_ptr->config_name))
+ return -ENODEV;
+
+ THERM_CREATE_DEBUGFS_DIR(config_ptr->dbg_config,
+ config_ptr->config_name, parent, ret);
+ if (ret)
+ goto create_exit;
+
+ config_ptr->dbg_thresh = debugfs_create_u64(MSM_THERMAL_THRESH,
+ 0600, config_ptr->dbg_config, (u64 *)&config_ptr->thresh);
+ if (IS_ERR(config_ptr->dbg_thresh)) {
+ ret = PTR_ERR(config_ptr->dbg_thresh);
+ pr_err("Error creating thresh debugfs:[%s]. error:%d\n",
+ config_ptr->config_name, ret);
+ goto create_exit;
+ }
+
+ config_ptr->dbg_thresh_clr = debugfs_create_u64(MSM_THERMAL_THRESH_CLR,
+ 0600, config_ptr->dbg_config, (u64 *)&config_ptr->thresh_clr);
+ if (IS_ERR(config_ptr->dbg_thresh)) {
+ ret = PTR_ERR(config_ptr->dbg_thresh);
+ pr_err("Error creating thresh_clr debugfs:[%s]. error:%d\n",
+ config_ptr->config_name, ret);
+ goto create_exit;
+ }
+
+ config_ptr->dbg_thresh_update = debugfs_create_bool(
+ MSM_THERMAL_THRESH_UPDATE, 0600, config_ptr->dbg_config,
+ (u32 *)&config_ptr->update);
+ if (IS_ERR(config_ptr->dbg_thresh_update)) {
+ ret = PTR_ERR(config_ptr->dbg_thresh_update);
+ pr_err("Error creating enable debugfs:[%s]. error:%d\n",
+ config_ptr->config_name, ret);
+ goto create_exit;
+ }
+
+create_exit:
+ if (ret)
+ debugfs_remove_recursive(parent);
+
+ return ret;
+}
+
+static int create_thermal_debugfs(void)
+{
+ int ret = 0, idx = 0;
+
+ if (msm_therm_debugfs)
+ return ret;
+
+ msm_therm_debugfs = devm_kzalloc(&msm_thermal_info.pdev->dev,
+ sizeof(struct msm_thermal_debugfs_entry), GFP_KERNEL);
+ if (!msm_therm_debugfs) {
+ ret = -ENOMEM;
+ pr_err("Memory alloc failed. err:%d\n", ret);
+ return ret;
+ }
+
+ THERM_CREATE_DEBUGFS_DIR(msm_therm_debugfs->parent, MSM_THERMAL_NAME,
+ NULL, ret);
+ if (ret)
+ goto create_exit;
+
+ msm_therm_debugfs->tsens_print = debugfs_create_bool(MSM_TSENS_PRINT,
+ 0600, msm_therm_debugfs->parent, &tsens_temp_print);
+ if (IS_ERR(msm_therm_debugfs->tsens_print)) {
+ ret = PTR_ERR(msm_therm_debugfs->tsens_print);
+ pr_err("Error creating debugfs:[%s]. err:%d\n",
+ MSM_TSENS_PRINT, ret);
+ goto create_exit;
+ }
+
+ THERM_CREATE_DEBUGFS_DIR(msm_therm_debugfs->config, MSM_THERMAL_CONFIG,
+ msm_therm_debugfs->parent, ret);
+ if (ret)
+ goto create_exit;
+
+ msm_therm_debugfs->config_data = debugfs_create_file(MSM_CONFIG_DATA,
+ 0600, msm_therm_debugfs->config, NULL,
+ &thermal_debugfs_config_ops);
+ if (!msm_therm_debugfs->config_data) {
+ pr_err("Error creating debugfs:[%s]\n",
+ MSM_CONFIG_DATA);
+ goto create_exit;
+ }
+ for (idx = 0; idx < MSM_LIST_MAX_NR + MAX_CPU_CONFIG; idx++)
+ create_config_debugfs(&mit_config[idx],
+ msm_therm_debugfs->config);
+
+create_exit:
+ if (ret) {
+ debugfs_remove_recursive(msm_therm_debugfs->parent);
+ devm_kfree(&msm_thermal_info.pdev->dev, msm_therm_debugfs);
+ }
+ return ret;
+}
+
+static struct kobj_attribute cluster_info_attr = __ATTR_RO(cluster_info);
+static int create_cpu_topology_sysfs(void)
+{
+ int ret = 0;
+ struct kobject *module_kobj = NULL;
+
+ if (!cluster_info_probed) {
+ cluster_info_nodes_called = true;
+ return ret;
+ }
+ if (!core_ptr)
+ return ret;
+
+ module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
+ if (!module_kobj) {
+ pr_err("cannot find kobject\n");
+ return -ENODEV;
+ }
+
+ sysfs_attr_init(&cluster_info_attr.attr);
+ ret = sysfs_create_file(module_kobj, &cluster_info_attr.attr);
+ if (ret) {
+ pr_err("cannot create cluster info attr group. err:%d\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int get_device_tree_cluster_info(struct device *dev, int *cluster_id,
+ cpumask_t *cluster_cpus)
+{
+ int idx = 0, ret = 0, max_entry = 0, core_cnt = 0, c_idx = 0, cpu = 0;
+ uint32_t val = 0;
+ char *key = "qcom,synchronous-cluster-map";
+ struct device_node *core_phandle = NULL;
+
+ if (!of_get_property(dev->of_node, key, &max_entry)
+ || max_entry <= 0) {
+ pr_debug("Property %s not defined.\n", key);
+ return -ENODEV;
+ }
+ max_entry /= sizeof(__be32);
+
+ for (idx = 0; idx < max_entry; idx++, c_idx++) {
+ /* Read Cluster ID */
+ ret = of_property_read_u32_index(dev->of_node, key, idx++,
+ &val);
+ if (ret) {
+ pr_err("Error reading index%d. err:%d\n", idx - 1,
+ ret);
+ return -EINVAL;
+ }
+ /* Read number of cores inside a cluster */
+ cluster_id[c_idx] = val;
+ cpumask_clear(&cluster_cpus[c_idx]);
+ ret = of_property_read_u32_index(dev->of_node, key, idx,
+ &val);
+ if (ret || val < 1) {
+ pr_err("Invalid core count[%d] for Cluster%d. err:%d\n"
+ , val, cluster_id[c_idx - 1], ret);
+ return -EINVAL;
+ }
+ core_cnt = val + idx;
+ /* map the cores to logical CPUs and get sibiling mask */
+ for (; core_cnt != idx; core_cnt--) {
+ core_phandle = of_parse_phandle(dev->of_node, key,
+ core_cnt);
+ if (!core_phandle) {
+ pr_debug("Invalid phandle. core%d cluster%d\n",
+ core_cnt, cluster_id[c_idx - 1]);
+ continue;
+ }
+
+ for_each_possible_cpu(cpu) {
+ if (of_get_cpu_node(cpu, NULL)
+ == core_phandle)
+ break;
+ }
+ if (cpu >= num_possible_cpus()) {
+ pr_debug("Skipping core%d in cluster%d\n",
+ core_cnt, cluster_id[c_idx - 1]);
+ continue;
+ }
+ cpumask_set_cpu(cpu, &cluster_cpus[c_idx]);
+ }
+ idx += val;
+ }
+
+ return c_idx;
+}
+
+static int get_kernel_cluster_info(int *cluster_id, cpumask_t *cluster_cpus)
+{
+ uint32_t _cpu, cluster_index, cluster_cnt;
+
+ for (_cpu = 0, cluster_cnt = 0; _cpu < num_possible_cpus(); _cpu++) {
+ if (topology_physical_package_id(_cpu) < 0) {
+ pr_err("CPU%d topology not initialized.\n", _cpu);
+ return -ENODEV;
+ }
+ /* Do not use the sibling cpumask from topology module.
+ ** kernel topology module updates the sibling cpumask
+ ** only when the cores are brought online for the first time.
+ ** KTM figures out the sibling cpumask using the
+ ** cluster and core ID mapping.
+ */
+ for (cluster_index = 0; cluster_index < num_possible_cpus();
+ cluster_index++) {
+ if (cluster_id[cluster_index] == -1) {
+ cluster_id[cluster_index] =
+ topology_physical_package_id(_cpu);
+ cpumask_clear(&cluster_cpus[cluster_index]);
+ cpumask_set_cpu(_cpu,
+ &cluster_cpus[cluster_index]);
+ cluster_cnt++;
+ break;
+ }
+ if (cluster_id[cluster_index] ==
+ topology_physical_package_id(_cpu)) {
+ cpumask_set_cpu(_cpu,
+ &cluster_cpus[cluster_index]);
+ break;
+ }
+ }
+ }
+
+ return cluster_cnt;
+}
+
+static void update_cpu_topology(struct device *dev)
+{
+ int cluster_id[NR_CPUS] = {[0 ... NR_CPUS-1] = -1};
+ cpumask_t cluster_cpus[NR_CPUS];
+ uint32_t i;
+ int cluster_cnt, sync_cluster_cnt = 0;
+ struct cluster_info *temp_ptr = NULL;
+ int *sync_cluster_id = NULL;
+
+ cluster_info_probed = true;
+ cluster_cnt = get_kernel_cluster_info(cluster_id, cluster_cpus);
+ if (cluster_cnt <= 0) {
+ cluster_cnt = get_device_tree_cluster_info(dev, cluster_id,
+ cluster_cpus);
+ if (cluster_cnt <= 0) {
+ core_ptr = NULL;
+ pr_debug("Cluster Info not defined. KTM continues.\n");
+ return;
+ }
+ }
+
+ core_ptr = devm_kzalloc(dev, sizeof(struct cluster_info), GFP_KERNEL);
+ if (!core_ptr) {
+ pr_err("Memory alloc failed\n");
+ return;
+ }
+ core_ptr->parent_ptr = NULL;
+ core_ptr->entity_count = cluster_cnt;
+ core_ptr->cluster_id = -1;
+ core_ptr->sync_cluster = false;
+
+ sync_cluster_id = get_sync_cluster(dev, &sync_cluster_cnt);
+ if (!sync_cluster_id) {
+ devm_kfree(dev, core_ptr);
+ core_ptr = NULL;
+ return;
+ }
+ temp_ptr = devm_kzalloc(dev, sizeof(struct cluster_info) * cluster_cnt,
+ GFP_KERNEL);
+ if (!temp_ptr) {
+ pr_err("Memory alloc failed\n");
+ devm_kfree(dev, core_ptr);
+ core_ptr = NULL;
+ return;
+ }
+
+ for (i = 0; i < cluster_cnt; i++) {
+ pr_debug("Cluster_ID:%d CPU's:%lu\n", cluster_id[i],
+ *cpumask_bits(&cluster_cpus[i]));
+ temp_ptr[i].cluster_id = cluster_id[i];
+ temp_ptr[i].parent_ptr = core_ptr;
+ cpumask_copy(&temp_ptr[i].cluster_cores, &cluster_cpus[i]);
+ temp_ptr[i].limited_max_freq = UINT_MAX;
+ temp_ptr[i].limited_min_freq = 0;
+ temp_ptr[i].freq_idx = 0;
+ temp_ptr[i].freq_idx_low = 0;
+ temp_ptr[i].freq_idx_high = 0;
+ temp_ptr[i].freq_table = NULL;
+ temp_ptr[i].entity_count = cpumask_weight(&cluster_cpus[i]);
+ temp_ptr[i].child_entity_ptr = NULL;
+ update_cpu_datastructure(&temp_ptr[i], sync_cluster_id,
+ sync_cluster_cnt);
+ }
+ core_ptr->child_entity_ptr = temp_ptr;
+}
+
+static int get_cpu_freq_plan_len(int cpu)
+{
+ int table_len = 0;
+ struct device *cpu_dev = NULL;
+
+ cpu_dev = get_cpu_device(cpu);
+ if (!cpu_dev) {
+ pr_err("Error in get CPU%d device\n", cpu);
+ goto exit;
+ }
+
+ rcu_read_lock();
+ table_len = dev_pm_opp_get_opp_count(cpu_dev);
+ if (table_len <= 0) {
+ pr_err("Error reading CPU%d freq table len. error:%d\n",
+ cpu, table_len);
+ table_len = 0;
+ goto unlock_and_exit;
+ }
+
+unlock_and_exit:
+ rcu_read_unlock();
+
+exit:
+ return table_len;
+}
+
+static int get_cpu_freq_plan(int cpu,
+ struct cpufreq_frequency_table *freq_table_ptr)
+{
+ int table_len = 0;
+ struct dev_pm_opp *opp = NULL;
+ unsigned long freq = 0;
+ struct device *cpu_dev = NULL;
+
+ cpu_dev = get_cpu_device(cpu);
+ if (!cpu_dev) {
+ pr_err("Error in get CPU%d device\n", cpu);
+ goto exit;
+ }
+
+ rcu_read_lock();
+ while (!IS_ERR(opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq))) {
+ /* Convert from Hz to kHz */
+ freq_table_ptr[table_len].frequency = freq / 1000;
+ pr_debug("cpu%d freq %d :%d\n", cpu, table_len,
+ freq_table_ptr[table_len].frequency);
+ freq++;
+ table_len++;
+ }
+ rcu_read_unlock();
+
+exit:
+ return table_len;
+}
+
+static int init_cluster_freq_table(void)
+{
+ uint32_t _cluster = 0;
+ int table_len = 0;
+ int ret = 0;
+ struct cluster_info *cluster_ptr = NULL;
+
+ for (; _cluster < core_ptr->entity_count; _cluster++, table_len = 0) {
+ cluster_ptr = &core_ptr->child_entity_ptr[_cluster];
+ if (cluster_ptr->freq_table)
+ continue;
+
+ table_len = get_cpu_freq_plan_len(
+ first_cpu(cluster_ptr->cluster_cores));
+ if (!table_len) {
+ ret = -EAGAIN;
+ continue;
+ }
+ cluster_ptr->freq_idx_low = 0;
+ cluster_ptr->freq_idx_high = cluster_ptr->freq_idx =
+ table_len - 1;
+ if (cluster_ptr->freq_idx_high < 0
+ || (cluster_ptr->freq_idx_high
+ < cluster_ptr->freq_idx_low)) {
+ cluster_ptr->freq_idx = cluster_ptr->freq_idx_low =
+ cluster_ptr->freq_idx_high = 0;
+ WARN(1, "Cluster%d frequency table length:%d\n",
+ cluster_ptr->cluster_id, table_len);
+ ret = -EINVAL;
+ goto exit;
+ }
+ cluster_ptr->freq_table = kzalloc(
+ sizeof(struct cpufreq_frequency_table) * table_len,
+ GFP_KERNEL);
+ if (!cluster_ptr->freq_table) {
+ pr_err("memory alloc failed\n");
+ cluster_ptr->freq_idx = cluster_ptr->freq_idx_low =
+ cluster_ptr->freq_idx_high = 0;
+ ret = -ENOMEM;
+ goto exit;
+ }
+ table_len = get_cpu_freq_plan(
+ first_cpu(cluster_ptr->cluster_cores),
+ cluster_ptr->freq_table);
+ if (!table_len) {
+ kfree(cluster_ptr->freq_table);
+ cluster_ptr->freq_table = NULL;
+ pr_err("Error reading cluster%d cpufreq table\n",
+ cluster_ptr->cluster_id);
+ ret = -EAGAIN;
+ continue;
+ }
+ }
+
+exit:
+ return ret;
+}
+
+static void update_cluster_freq(void)
+{
+ int online_cpu = -1;
+ struct cluster_info *cluster_ptr = NULL;
+ uint32_t _cluster = 0, _cpu = 0, max = UINT_MAX, min = 0;
+
+ if (!core_ptr)
+ return;
+
+ for (; _cluster < core_ptr->entity_count; _cluster++, _cpu = 0,
+ online_cpu = -1, max = UINT_MAX, min = 0) {
+ /*
+ ** If a cluster is synchronous, go over the frequency limits
+ ** of each core in that cluster and aggregate the minimum
+ ** and maximum frequencies. After aggregating, request for
+ ** frequency update on the first online core in that cluster.
+ ** Cpufreq driver takes care of updating the frequency of
+ ** other cores in a synchronous cluster.
+ */
+ cluster_ptr = &core_ptr->child_entity_ptr[_cluster];
+
+ if (!cluster_ptr->sync_cluster)
+ continue;
+ for_each_cpu_mask(_cpu, cluster_ptr->cluster_cores) {
+ if (online_cpu == -1 && cpu_online(_cpu))
+ online_cpu = _cpu;
+ max = min(max, cpus[_cpu].limited_max_freq);
+ min = max(min, cpus[_cpu].limited_min_freq);
+ }
+ if (cluster_ptr->limited_max_freq == max
+ && cluster_ptr->limited_min_freq == min)
+ continue;
+ cluster_ptr->limited_max_freq = max;
+ cluster_ptr->limited_min_freq = min;
+ if (online_cpu != -1)
+ update_cpu_freq(online_cpu);
+ }
+}
+
+static void do_cluster_freq_ctrl(long temp)
+{
+ uint32_t _cluster = 0;
+ int _cpu = -1, freq_idx = 0;
+ bool mitigate = false;
+ struct cluster_info *cluster_ptr = NULL;
+
+ if (temp >= msm_thermal_info.limit_temp_degC)
+ mitigate = true;
+ else if (temp < msm_thermal_info.limit_temp_degC -
+ msm_thermal_info.temp_hysteresis_degC)
+ mitigate = false;
+ else
+ return;
+
+ get_online_cpus();
+ for (; _cluster < core_ptr->entity_count; _cluster++) {
+ cluster_ptr = &core_ptr->child_entity_ptr[_cluster];
+ if (!cluster_ptr->freq_table)
+ continue;
+
+ if (mitigate)
+ freq_idx = max_t(int, cluster_ptr->freq_idx_low,
+ (cluster_ptr->freq_idx
+ - msm_thermal_info.bootup_freq_step));
+ else
+ freq_idx = min_t(int, cluster_ptr->freq_idx_high,
+ (cluster_ptr->freq_idx
+ + msm_thermal_info.bootup_freq_step));
+ if (freq_idx == cluster_ptr->freq_idx)
+ continue;
+
+ cluster_ptr->freq_idx = freq_idx;
+ for_each_cpu_mask(_cpu, cluster_ptr->cluster_cores) {
+ if (!(msm_thermal_info.bootup_freq_control_mask
+ & BIT(_cpu)))
+ continue;
+ pr_info("Limiting CPU%d max frequency to %u. Temp:%ld\n"
+ , _cpu
+ , cluster_ptr->freq_table[freq_idx].frequency
+ , temp);
+ cpus[_cpu].limited_max_freq = min(
+ cluster_ptr->freq_table[freq_idx].frequency,
+ cpus[_cpu].vdd_max_freq);
+ }
+ }
+ if (_cpu != -1)
+ update_cluster_freq();
+ put_online_cpus();
+}
+
+/* If freq table exists, then we can send freq request */
+static int check_freq_table(void)
+{
+ int ret = 0;
+ static bool invalid_table;
+ int table_len = 0;
+
+ if (invalid_table)
+ return -EINVAL;
+ if (freq_table_get)
+ return 0;
+
+ if (core_ptr) {
+ ret = init_cluster_freq_table();
+ if (!ret)
+ freq_table_get = 1;
+ else if (ret == -EINVAL)
+ invalid_table = true;
+ goto exit;
+ }
+
+ table_len = get_cpu_freq_plan_len(0);
+ if (!table_len)
+ return -EINVAL;
+
+ table = kzalloc(sizeof(struct cpufreq_frequency_table)
+ * table_len, GFP_KERNEL);
+ if (!table) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+ table_len = get_cpu_freq_plan(0, table);
+ if (!table_len) {
+ pr_err("error reading cpufreq table\n");
+ ret = -EINVAL;
+ goto free_and_exit;
+ }
+
+ limit_idx_low = 0;
+ limit_idx_high = limit_idx = table_len - 1;
+ if (limit_idx_high < 0 || limit_idx_high < limit_idx_low) {
+ invalid_table = true;
+ limit_idx_low = limit_idx_high = limit_idx = 0;
+ WARN(1, "CPU0 frequency table length:%d\n", table_len);
+ ret = -EINVAL;
+ goto free_and_exit;
+ }
+ freq_table_get = 1;
+
+free_and_exit:
+ if (ret) {
+ kfree(table);
+ table = NULL;
+ }
+
+exit:
+ return ret;
+}
+
+static int update_cpu_min_freq_all(struct rail *apss_rail, uint32_t min)
+{
+ uint32_t cpu = 0, _cluster = 0, max_freq = UINT_MAX;
+ int ret = 0;
+ struct cluster_info *cluster_ptr = NULL;
+ bool valid_table = false;
+
+ if (!freq_table_get) {
+ ret = check_freq_table();
+ if (ret && !core_ptr) {
+ pr_err("Fail to get freq table. err:%d\n", ret);
+ return ret;
+ }
+ }
+ if (min != apss_rail->min_level)
+ max_freq = apss_rail->max_frequency_limit;
+
+ get_online_cpus();
+ /* If min is larger than allowed max */
+ if (core_ptr) {
+ for (; _cluster < core_ptr->entity_count; _cluster++) {
+ cluster_ptr = &core_ptr->child_entity_ptr[_cluster];
+ if (!cluster_ptr->freq_table)
+ continue;
+ valid_table = true;
+ min = min(min,
+ cluster_ptr->freq_table[
+ cluster_ptr->freq_idx_high].frequency);
+ max_freq = max(max_freq, cluster_ptr->freq_table[
+ cluster_ptr->freq_idx_low].frequency);
+ }
+ if (!valid_table)
+ goto update_freq_exit;
+ } else {
+ min = min(min, table[limit_idx_high].frequency);
+ max_freq = max(max_freq, table[limit_idx_low].frequency);
+ }
+
+ pr_debug("Requesting min freq:%u max freq:%u for all CPU's\n",
+ min, max_freq);
+ if (freq_mitigation_task) {
+ if (!apss_rail->device_handle[0]) {
+ pr_err("device manager handle not registered\n");
+ ret = -ENODEV;
+ goto update_freq_exit;
+ }
+ for_each_possible_cpu(cpu) {
+ cpus[cpu].vdd_max_freq = max_freq;
+ apss_rail->request[cpu].freq.max_freq = max_freq;
+ apss_rail->request[cpu].freq.min_freq = min;
+ ret = devmgr_client_request_mitigation(
+ apss_rail->device_handle[cpu],
+ CPUFREQ_MITIGATION_REQ,
+ &apss_rail->request[cpu]);
+ }
+ } else if (core_ptr) {
+ for (_cluster = 0; _cluster < core_ptr->entity_count;
+ _cluster++) {
+ cluster_ptr = &core_ptr->child_entity_ptr[_cluster];
+ if (!cluster_ptr->freq_table)
+ continue;
+ for_each_cpu_mask(cpu, cluster_ptr->cluster_cores) {
+ cpus[cpu].limited_min_freq = min;
+ cpus[cpu].vdd_max_freq = max_freq;
+ cpus[cpu].limited_max_freq = min(
+ cluster_ptr->freq_table[
+ cluster_ptr->freq_idx].frequency,
+ cpus[cpu].vdd_max_freq);
+ if (!SYNC_CORE(cpu))
+ update_cpu_freq(cpu);
+ }
+ update_cluster_freq();
+ }
+ } else {
+ for_each_possible_cpu(cpu) {
+ cpus[cpu].limited_min_freq = min;
+ cpus[cpu].vdd_max_freq = max_freq;
+ cpus[cpu].limited_max_freq =
+ min(table[limit_idx].frequency,
+ cpus[cpu].vdd_max_freq);
+ if (!SYNC_CORE(cpu))
+ update_cpu_freq(cpu);
+ }
+ update_cluster_freq();
+ }
+
+update_freq_exit:
+ put_online_cpus();
+ return ret;
+}
+
+static int vdd_restriction_apply_freq(struct rail *r, int level)
+{
+ int ret = 0;
+
+ if (level == r->curr_level)
+ return ret;
+
+ /* level = -1: disable, level = 0,1,2..n: enable */
+ if (level == -1) {
+ ret = update_cpu_min_freq_all(r, r->min_level);
+ if (ret)
+ return ret;
+ else
+ r->curr_level = -1;
+ } else if (level >= 0 && level < (r->num_levels)) {
+ ret = update_cpu_min_freq_all(r, r->levels[level]);
+ if (ret)
+ return ret;
+ else
+ r->curr_level = level;
+ } else {
+ pr_err("level input:%d is not within range\n", level);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int vdd_restriction_apply_voltage(struct rail *r, int level)
+{
+ int ret = 0;
+
+ if (r->reg == NULL) {
+ pr_err("%s don't have regulator handle. can't apply vdd\n",
+ r->name);
+ return -EFAULT;
+ }
+ if (level == r->curr_level)
+ return ret;
+
+ /* level = -1: disable, level = 0,1,2..n: enable */
+ if (level == -1) {
+ ret = regulator_set_voltage(r->reg, r->min_level,
+ r->levels[r->num_levels - 1]);
+ if (!ret)
+ r->curr_level = -1;
+ pr_debug("Requested min level for %s. curr level: %d\n",
+ r->name, r->curr_level);
+ } else if (level >= 0 && level < (r->num_levels)) {
+ ret = regulator_set_voltage(r->reg, r->levels[level],
+ r->levels[r->num_levels - 1]);
+ if (!ret)
+ r->curr_level = level;
+ pr_debug("Requesting level %d for %s. curr level: %d\n",
+ r->levels[level], r->name, r->levels[r->curr_level]);
+ } else {
+ pr_err("level input:%d is not within range\n", level);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+/* Setting all rails the same mode */
+static int psm_set_mode_all(int mode)
+{
+ int i = 0;
+ int fail_cnt = 0;
+ int ret = 0;
+
+ pr_debug("Requesting PMIC Mode: %d\n", mode);
+ for (i = 0; i < psm_rails_cnt; i++) {
+ if (psm_rails[i].mode != mode) {
+ ret = rpm_regulator_set_mode(psm_rails[i].reg, mode);
+ if (ret) {
+ pr_err("Cannot set mode:%d for %s. err:%d",
+ mode, psm_rails[i].name, ret);
+ fail_cnt++;
+ } else
+ psm_rails[i].mode = mode;
+ }
+ }
+
+ return fail_cnt ? (-EFAULT) : ret;
+}
+
+static ssize_t vdd_rstr_en_show(
+ struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct vdd_rstr_enable *en = VDD_RSTR_ENABLE_FROM_ATTRIBS(attr);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", en->enabled);
+}
+
+static ssize_t vdd_rstr_en_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ int ret = 0;
+ int i = 0;
+ uint8_t en_cnt = 0;
+ uint8_t dis_cnt = 0;
+ uint32_t val = 0;
+ struct kernel_param kp;
+ struct vdd_rstr_enable *en = VDD_RSTR_ENABLE_FROM_ATTRIBS(attr);
+
+ mutex_lock(&vdd_rstr_mutex);
+ kp.arg = &val;
+ ret = param_set_bool(buf, &kp);
+ if (ret) {
+ pr_err("Invalid input %s for enabled\n", buf);
+ goto done_vdd_rstr_en;
+ }
+
+ if ((val == 0) && (en->enabled == 0))
+ goto done_vdd_rstr_en;
+
+ for (i = 0; i < rails_cnt; i++) {
+ if (rails[i].freq_req == 1 && freq_table_get)
+ ret = vdd_restriction_apply_freq(&rails[i],
+ (val) ? 0 : -1);
+ else
+ ret = vdd_restriction_apply_voltage(&rails[i],
+ (val) ? 0 : -1);
+
+ /*
+ * Even if fail to set one rail, still try to set the
+ * others. Continue the loop
+ */
+ if (ret)
+ pr_err("Set vdd restriction for %s failed\n",
+ rails[i].name);
+ else {
+ if (val)
+ en_cnt++;
+ else
+ dis_cnt++;
+ }
+ }
+ /* As long as one rail is enabled, vdd rstr is enabled */
+ if (val && en_cnt)
+ en->enabled = 1;
+ else if (!val && (dis_cnt == rails_cnt))
+ en->enabled = 0;
+ pr_debug("%s vdd restriction. curr: %d\n",
+ (val) ? "Enable" : "Disable", en->enabled);
+
+done_vdd_rstr_en:
+ mutex_unlock(&vdd_rstr_mutex);
+ return count;
+}
+
+static int send_temperature_band(enum msm_thermal_phase_ctrl phase,
+ enum msm_temp_band req_band)
+{
+ int ret = 0;
+ uint32_t msg_id;
+ struct msm_rpm_request *rpm_req;
+ unsigned int band = req_band;
+ uint32_t key, resource, resource_id;
+
+ if (phase < 0 || phase >= MSM_PHASE_CTRL_NR ||
+ req_band <= 0 || req_band >= MSM_TEMP_MAX_NR) {
+ pr_err("Invalid input\n");
+ ret = -EINVAL;
+ goto phase_ctrl_exit;
+ }
+ switch (phase) {
+ case MSM_CX_PHASE_CTRL:
+ key = msm_thermal_info.cx_phase_request_key;
+ break;
+ case MSM_GFX_PHASE_CTRL:
+ key = msm_thermal_info.gfx_phase_request_key;
+ break;
+ default:
+ goto phase_ctrl_exit;
+ break;
+ }
+
+ resource = msm_thermal_info.phase_rpm_resource_type;
+ resource_id = msm_thermal_info.phase_rpm_resource_id;
+ pr_debug("Sending %s temperature band %d\n",
+ (phase == MSM_CX_PHASE_CTRL) ? "CX" : "GFX",
+ req_band);
+ rpm_req = msm_rpm_create_request(MSM_RPM_CTX_ACTIVE_SET,
+ resource, resource_id, 1);
+ if (!rpm_req) {
+ pr_err("Creating RPM request failed\n");
+ ret = -ENXIO;
+ goto phase_ctrl_exit;
+ }
+
+ ret = msm_rpm_add_kvp_data(rpm_req, key, (const uint8_t *)&band,
+ (int)sizeof(band));
+ if (ret) {
+ pr_err("Adding KVP data failed. err:%d\n", ret);
+ goto free_rpm_handle;
+ }
+
+ msg_id = msm_rpm_send_request(rpm_req);
+ if (!msg_id) {
+ pr_err("RPM send request failed\n");
+ ret = -ENXIO;
+ goto free_rpm_handle;
+ }
+
+ ret = msm_rpm_wait_for_ack(msg_id);
+ if (ret) {
+ pr_err("RPM wait for ACK failed. err:%d\n", ret);
+ goto free_rpm_handle;
+ }
+
+free_rpm_handle:
+ msm_rpm_free_request(rpm_req);
+phase_ctrl_exit:
+ return ret;
+}
+
+static uint32_t msm_thermal_str_to_int(const char *inp)
+{
+ int i, len;
+ uint32_t output = 0;
+
+ len = strnlen(inp, sizeof(uint32_t));
+ for (i = 0; i < len; i++)
+ output |= inp[i] << (i * 8);
+
+ return output;
+}
+
+static ssize_t sensor_info_show(
+ struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ int i;
+ ssize_t tot_size = 0, size = 0;
+
+ for (i = 0; i < sensor_cnt; i++) {
+ size = snprintf(&buf[tot_size], PAGE_SIZE - tot_size,
+ "%s:%s:%s:%d ",
+ sensors[i].type, sensors[i].name,
+ sensors[i].alias ? : "",
+ sensors[i].scaling_factor);
+ if (tot_size + size >= PAGE_SIZE) {
+ pr_err("Not enough buffer size\n");
+ break;
+ }
+ tot_size += size;
+ }
+ if (tot_size)
+ buf[tot_size - 1] = '\n';
+
+ return tot_size;
+}
+
+static struct vdd_rstr_enable vdd_rstr_en = {
+ .ko_attr.attr.name = __stringify(enabled),
+ .ko_attr.attr.mode = 0644,
+ .ko_attr.show = vdd_rstr_en_show,
+ .ko_attr.store = vdd_rstr_en_store,
+ .enabled = 1,
+};
+
+static struct attribute *vdd_rstr_en_attribs[] = {
+ &vdd_rstr_en.ko_attr.attr,
+ NULL,
+};
+
+static struct attribute_group vdd_rstr_en_attribs_gp = {
+ .attrs = vdd_rstr_en_attribs,
+};
+
+static ssize_t vdd_rstr_reg_value_show(
+ struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ int val = 0;
+ struct rail *reg = VDD_RSTR_REG_VALUE_FROM_ATTRIBS(attr);
+ /* -1:disabled, -2:fail to get regualtor handle */
+ if (reg->curr_level < 0)
+ val = reg->curr_level;
+ else
+ val = reg->levels[reg->curr_level];
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", val);
+}
+
+static ssize_t vdd_rstr_reg_level_show(
+ struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct rail *reg = VDD_RSTR_REG_LEVEL_FROM_ATTRIBS(attr);
+ return snprintf(buf, PAGE_SIZE, "%d\n", reg->curr_level);
+}
+
+static ssize_t vdd_rstr_reg_level_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ int ret = 0;
+ int val = 0;
+
+ struct rail *reg = VDD_RSTR_REG_LEVEL_FROM_ATTRIBS(attr);
+
+ mutex_lock(&vdd_rstr_mutex);
+ if (vdd_rstr_en.enabled == 0)
+ goto done_store_level;
+
+ ret = kstrtouint(buf, 10, &val);
+ if (ret) {
+ pr_err("Invalid input %s for level\n", buf);
+ goto done_store_level;
+ }
+
+ if (val < 0 || val > reg->num_levels - 1) {
+ pr_err(" Invalid number %d for level\n", val);
+ goto done_store_level;
+ }
+
+ if (val != reg->curr_level) {
+ if (reg->freq_req == 1 && freq_table_get)
+ update_cpu_min_freq_all(reg, reg->levels[val]);
+ else {
+ ret = vdd_restriction_apply_voltage(reg, val);
+ if (ret) {
+ pr_err( \
+ "Set vdd restriction for regulator %s failed. err:%d\n",
+ reg->name, ret);
+ goto done_store_level;
+ }
+ }
+ reg->curr_level = val;
+ pr_debug("Request level %d for %s\n",
+ reg->curr_level, reg->name);
+ }
+
+done_store_level:
+ mutex_unlock(&vdd_rstr_mutex);
+ return count;
+}
+
+static int request_optimum_current(struct psm_rail *rail, enum ocr_request req)
+{
+ int ret = 0;
+
+ if ((!rail) || (req >= OPTIMUM_CURRENT_NR) ||
+ (req < 0)) {
+ pr_err("Invalid input %d\n", req);
+ ret = -EINVAL;
+ goto request_ocr_exit;
+ }
+
+ ret = regulator_set_optimum_mode(rail->phase_reg,
+ (req == OPTIMUM_CURRENT_MAX) ? MAX_CURRENT_UA : 0);
+ if (ret < 0) {
+ pr_err("Optimum current request failed. err:%d\n", ret);
+ goto request_ocr_exit;
+ }
+ ret = 0; /*regulator_set_optimum_mode returns the mode on success*/
+ pr_debug("Requested optimum current mode: %d\n", req);
+
+request_ocr_exit:
+ return ret;
+}
+
+static int ocr_set_mode_all(enum ocr_request req)
+{
+ int ret = 0, i;
+
+ for (i = 0; i < ocr_rail_cnt; i++) {
+ if (ocr_rails[i].mode == req)
+ continue;
+ ret = request_optimum_current(&ocr_rails[i], req);
+ if (ret)
+ goto ocr_set_mode_exit;
+ ocr_rails[i].mode = req;
+ }
+
+ocr_set_mode_exit:
+ return ret;
+}
+
+static ssize_t ocr_reg_mode_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct psm_rail *reg = PSM_REG_MODE_FROM_ATTRIBS(attr);
+ return snprintf(buf, PAGE_SIZE, "%d\n", reg->mode);
+}
+
+static ssize_t ocr_reg_mode_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ int ret = 0;
+ int val = 0;
+ struct psm_rail *reg = PSM_REG_MODE_FROM_ATTRIBS(attr);
+
+ if (!ocr_enabled)
+ return count;
+
+ mutex_lock(&ocr_mutex);
+ ret = kstrtoint(buf, 10, &val);
+ if (ret) {
+ pr_err("Invalid input %s for mode. err:%d\n",
+ buf, ret);
+ goto done_ocr_store;
+ }
+
+ if ((val != OPTIMUM_CURRENT_MAX) &&
+ (val != OPTIMUM_CURRENT_MIN)) {
+ pr_err("Invalid value %d for mode\n", val);
+ goto done_ocr_store;
+ }
+
+ if (val != reg->mode) {
+ ret = request_optimum_current(reg, val);
+ if (ret)
+ goto done_ocr_store;
+ reg->mode = val;
+ }
+
+done_ocr_store:
+ mutex_unlock(&ocr_mutex);
+ return count;
+}
+
+static ssize_t store_phase_request(const char *buf, size_t count, bool is_cx)
+{
+ int ret = 0, val;
+ struct mutex *phase_mutex = (is_cx) ? (&cx_mutex) : (&gfx_mutex);
+ enum msm_thermal_phase_ctrl phase_req = (is_cx) ? MSM_CX_PHASE_CTRL :
+ MSM_GFX_PHASE_CTRL;
+
+ ret = kstrtoint(buf, 10, &val);
+ if (ret) {
+ pr_err("Invalid input %s for %s temperature band\n",
+ buf, (is_cx) ? "CX" : "GFX");
+ goto phase_store_exit;
+ }
+ if ((val <= 0) || (val >= MSM_TEMP_MAX_NR)) {
+ pr_err("Invalid input %d for %s temperature band\n",
+ val, (is_cx) ? "CX" : "GFX");
+ ret = -EINVAL;
+ goto phase_store_exit;
+ }
+ mutex_lock(phase_mutex);
+ if (val != ((is_cx) ? curr_cx_band : curr_gfx_band)) {
+ ret = send_temperature_band(phase_req, val);
+ if (!ret) {
+ *((is_cx) ? &curr_cx_band : &curr_gfx_band) = val;
+ } else {
+ pr_err("Failed to send %d temp. band to %s rail\n", val,
+ (is_cx) ? "CX" : "GFX");
+ goto phase_store_unlock_exit;
+ }
+ }
+ ret = count;
+phase_store_unlock_exit:
+ mutex_unlock(phase_mutex);
+phase_store_exit:
+ return ret;
+}
+
+#define show_phase(_name, _variable) \
+static ssize_t _name##_phase_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, char *buf) \
+{ \
+ return snprintf(buf, PAGE_SIZE, "%u\n", _variable); \
+}
+
+#define store_phase(_name, _variable, _iscx) \
+static ssize_t _name##_phase_store(struct kobject *kobj, \
+ struct kobj_attribute *attr, const char *buf, size_t count) \
+{ \
+ return store_phase_request(buf, count, _iscx); \
+}
+
+show_phase(gfx, curr_gfx_band)
+show_phase(cx, curr_cx_band)
+store_phase(gfx, curr_gfx_band, false)
+store_phase(cx, curr_cx_band, true)
+
+static ssize_t psm_reg_mode_show(
+ struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct psm_rail *reg = PSM_REG_MODE_FROM_ATTRIBS(attr);
+ return snprintf(buf, PAGE_SIZE, "%d\n", reg->mode);
+}
+
+static ssize_t psm_reg_mode_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ int ret = 0;
+ int val = 0;
+ struct psm_rail *reg = PSM_REG_MODE_FROM_ATTRIBS(attr);
+
+ mutex_lock(&psm_mutex);
+ ret = kstrtoint(buf, 10, &val);
+ if (ret) {
+ pr_err("Invalid input %s for mode\n", buf);
+ goto done_psm_store;
+ }
+
+ if ((val != PMIC_PWM_MODE) && (val != PMIC_AUTO_MODE)) {
+ pr_err("Invalid number %d for mode\n", val);
+ goto done_psm_store;
+ }
+
+ if (val != reg->mode) {
+ ret = rpm_regulator_set_mode(reg->reg, val);
+ if (ret) {
+ pr_err("Fail to set Mode:%d for %s. err:%d\n",
+ val, reg->name, ret);
+ goto done_psm_store;
+ }
+ reg->mode = val;
+ }
+
+done_psm_store:
+ mutex_unlock(&psm_mutex);
+ return count;
+}
+
+static int check_sensor_id(int sensor_id)
+{
+ int i = 0;
+ bool hw_id_found = false;
+ int ret = 0;
+
+ for (i = 0; i < max_tsens_num; i++) {
+ if (sensor_id == tsens_id_map[i]) {
+ hw_id_found = true;
+ break;
+ }
+ }
+ if (!hw_id_found) {
+ pr_err("Invalid sensor hw id:%d\n", sensor_id);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int zone_id_to_tsen_id(int zone_id, int *tsens_id)
+{
+ int i = 0;
+ int ret = 0;
+
+ if (!zone_id_tsens_map) {
+ pr_debug("zone_id_tsens_map is not initialized.\n");
+ *tsens_id = zone_id;
+ return ret;
+ }
+
+ for (i = 0; i < max_tsens_num; i++) {
+ if (zone_id == zone_id_tsens_map[i]) {
+ *tsens_id = tsens_id_map[i];
+ break;
+ }
+ }
+ if (i == max_tsens_num) {
+ pr_err("Invalid sensor zone id:%d\n", zone_id);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int create_sensor_zone_id_map(void)
+{
+ int i = 0;
+ int zone_id = -1;
+
+ zone_id_tsens_map = devm_kzalloc(&msm_thermal_info.pdev->dev,
+ sizeof(int) * max_tsens_num, GFP_KERNEL);
+
+ if (!zone_id_tsens_map) {
+ pr_err("Cannot allocate memory for zone_id_tsens_map\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < max_tsens_num; i++) {
+ char tsens_name[TSENS_NAME_MAX] = "";
+
+ snprintf(tsens_name, TSENS_NAME_MAX, TSENS_NAME_FORMAT,
+ tsens_id_map[i]);
+ zone_id = sensor_get_id(tsens_name);
+ if (zone_id < 0) {
+ pr_err("Error getting zone id for %s. err:%d\n",
+ tsens_name, zone_id);
+ goto fail;
+ } else {
+ zone_id_tsens_map[i] = zone_id;
+ }
+ }
+ return 0;
+
+fail:
+ devm_kfree(&msm_thermal_info.pdev->dev, zone_id_tsens_map);
+ return zone_id;
+}
+
+static int create_sensor_id_map(struct device *dev)
+{
+ int i = 0;
+ int ret = 0;
+
+ tsens_id_map = devm_kzalloc(dev,
+ sizeof(int) * max_tsens_num, GFP_KERNEL);
+
+ if (!tsens_id_map) {
+ pr_err("Cannot allocate memory for tsens_id_map\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < max_tsens_num; i++) {
+ ret = tsens_get_hw_id_mapping(i, &tsens_id_map[i]);
+ /* If return -ENXIO, hw_id is default in sequence */
+ if (ret) {
+ if (ret == -ENXIO) {
+ tsens_id_map[i] = i;
+ ret = 0;
+ } else {
+ pr_err("Failed to get hw id for id:%d.err:%d\n",
+ i, ret);
+ goto fail;
+ }
+ }
+ }
+
+ return ret;
+fail:
+ devm_kfree(dev, tsens_id_map);
+ return ret;
+}
+
+/* 1:enable, 0:disable */
+static int vdd_restriction_apply_all(int en)
+{
+ int i = 0;
+ int en_cnt = 0;
+ int dis_cnt = 0;
+ int fail_cnt = 0;
+ int ret = 0;
+
+ for (i = 0; i < rails_cnt; i++) {
+ if (rails[i].freq_req == 1)
+ if (freq_table_get)
+ ret = vdd_restriction_apply_freq(&rails[i],
+ en ? 0 : -1);
+ else
+ continue;
+ else
+ ret = vdd_restriction_apply_voltage(&rails[i],
+ en ? 0 : -1);
+ if (ret) {
+ pr_err("Failed to %s for %s. err:%d",
+ (en) ? "enable" : "disable",
+ rails[i].name, ret);
+ fail_cnt++;
+ } else {
+ if (en)
+ en_cnt++;
+ else
+ dis_cnt++;
+ }
+ }
+
+ /* As long as one rail is enabled, vdd rstr is enabled */
+ if (en && en_cnt)
+ vdd_rstr_en.enabled = 1;
+ else if (!en && (dis_cnt == rails_cnt))
+ vdd_rstr_en.enabled = 0;
+
+ /*
+ * Check fail_cnt again to make sure all of the rails are applied
+ * restriction successfully or not
+ */
+ if (fail_cnt)
+ return -EFAULT;
+ return ret;
+}
+
+static int set_and_activate_threshold(uint32_t sensor_id,
+ struct sensor_threshold *threshold)
+{
+ int ret = 0;
+
+ ret = sensor_set_trip(sensor_id, threshold);
+ if (ret != 0) {
+ pr_err("sensor:%u Error in setting trip:%d. err:%d\n",
+ sensor_id, threshold->trip, ret);
+ goto set_done;
+ }
+
+ ret = sensor_activate_trip(sensor_id, threshold, true);
+ if (ret != 0) {
+ pr_err("sensor:%u Error in enabling trip:%d. err:%d\n",
+ sensor_id, threshold->trip, ret);
+ goto set_done;
+ }
+
+set_done:
+ return ret;
+}
+
+static int therm_get_temp(uint32_t id, enum sensor_id_type type, long *temp)
+{
+ int ret = 0;
+ struct tsens_device tsens_dev;
+
+ if (!temp) {
+ pr_err("Invalid value\n");
+ ret = -EINVAL;
+ goto get_temp_exit;
+ }
+
+ switch (type) {
+ case THERM_ZONE_ID:
+ ret = sensor_get_temp(id, temp);
+ if (ret) {
+ pr_err("Unable to read thermal zone sensor:%d\n", id);
+ goto get_temp_exit;
+ }
+ break;
+ case THERM_TSENS_ID:
+ tsens_dev.sensor_num = id;
+ ret = tsens_get_temp(&tsens_dev, temp);
+ if (ret) {
+ pr_err("Unable to read TSENS sensor:%d\n",
+ tsens_dev.sensor_num);
+ goto get_temp_exit;
+ }
+ break;
+ default:
+ pr_err("Invalid type\n");
+ ret = -EINVAL;
+ goto get_temp_exit;
+ }
+
+ if (tsens_scaling_factor)
+ *temp = *temp / tsens_scaling_factor;
+
+get_temp_exit:
+ return ret;
+}
+
+static int msm_thermal_panic_callback(struct notifier_block *nfb,
+ unsigned long event, void *data)
+{
+ int i;
+
+ for (i = 0; i < max_tsens_num; i++) {
+ therm_get_temp(tsens_id_map[i],
+ THERM_TSENS_ID,
+ &tsens_temp_at_panic[i]);
+ if (tsens_temp_print)
+ pr_err("tsens%d temperature:%ldC\n",
+ tsens_id_map[i], tsens_temp_at_panic[i]);
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block msm_thermal_panic_notifier = {
+ .notifier_call = msm_thermal_panic_callback,
+};
+
+int sensor_mgr_set_threshold(uint32_t zone_id,
+ struct sensor_threshold *threshold)
+{
+ int i = 0, ret = 0;
+ long temp;
+
+ if (!threshold) {
+ pr_err("Invalid input\n");
+ ret = -EINVAL;
+ goto set_threshold_exit;
+ }
+
+ ret = therm_get_temp(zone_id, THERM_ZONE_ID, &temp);
+ if (ret) {
+ pr_err("Unable to read temperature for zone:%d. err:%d\n",
+ zone_id, ret);
+ goto set_threshold_exit;
+ }
+ pr_debug("Sensor:[%d] temp:[%ld]\n", zone_id, temp);
+ while (i < MAX_THRESHOLD) {
+ switch (threshold[i].trip) {
+ case THERMAL_TRIP_CONFIGURABLE_HI:
+ if (threshold[i].temp / tsens_scaling_factor >= temp) {
+ ret = set_and_activate_threshold(zone_id,
+ &threshold[i]);
+ if (ret)
+ goto set_threshold_exit;
+ UPDATE_THRESHOLD_SET(ret,
+ THERMAL_TRIP_CONFIGURABLE_HI);
+ }
+ break;
+ case THERMAL_TRIP_CONFIGURABLE_LOW:
+ if (threshold[i].temp / tsens_scaling_factor <= temp) {
+ ret = set_and_activate_threshold(zone_id,
+ &threshold[i]);
+ if (ret)
+ goto set_threshold_exit;
+ UPDATE_THRESHOLD_SET(ret,
+ THERMAL_TRIP_CONFIGURABLE_LOW);
+ }
+ break;
+ default:
+ pr_err("zone:%u Invalid trip:%d\n", zone_id,
+ threshold[i].trip);
+ break;
+ }
+ i++;
+ }
+set_threshold_exit:
+ return ret;
+}
+
+static int apply_vdd_mx_restriction(void)
+{
+ int ret_mx = 0, ret_cx = 0;
+
+ if (mx_restr_applied)
+ goto done;
+
+ APPLY_VDD_RESTRICTION(vdd_mx, msm_thermal_info.vdd_mx_min, mx, ret_mx);
+ if (vdd_cx)
+ APPLY_VDD_RESTRICTION(vdd_cx, msm_thermal_info.vdd_cx_min,
+ cx, ret_cx);
+ if (!ret_mx && !ret_cx)
+ mx_restr_applied = true;
+
+done:
+ return (ret_mx | ret_cx);
+}
+
+static int remove_vdd_mx_restriction(void)
+{
+ int ret_mx = 0, ret_cx = 0;
+
+ if (!mx_restr_applied)
+ goto done;
+
+ REMOVE_VDD_RESTRICTION(vdd_mx, mx, ret_mx);
+ if (vdd_cx)
+ REMOVE_VDD_RESTRICTION(vdd_cx, cx, ret_cx);
+ if (!ret_mx && !ret_cx)
+ mx_restr_applied = false;
+
+done:
+ return (ret_mx | ret_cx);
+}
+
+static int do_vdd_mx(void)
+{
+ long temp = 0;
+ int ret = 0;
+ int i = 0;
+ int dis_cnt = 0;
+
+ if (!vdd_mx_enabled)
+ return ret;
+
+ mutex_lock(&vdd_mx_mutex);
+ for (i = 0; i < thresh[MSM_VDD_MX_RESTRICTION].thresh_ct; i++) {
+ ret = therm_get_temp(
+ thresh[MSM_VDD_MX_RESTRICTION].thresh_list[i].sensor_id,
+ thresh[MSM_VDD_MX_RESTRICTION].thresh_list[i].id_type,
+ &temp);
+ if (ret) {
+ pr_err("Unable to read TSENS sensor:%d, err:%d\n",
+ thresh[MSM_VDD_MX_RESTRICTION].thresh_list[i].
+ sensor_id, ret);
+ dis_cnt++;
+ continue;
+ }
+ if (temp <= msm_thermal_info.vdd_mx_temp_degC) {
+ ret = apply_vdd_mx_restriction();
+ if (ret)
+ pr_err(
+ "Failed to apply mx restriction\n");
+ goto exit;
+ } else if (temp >= (msm_thermal_info.vdd_mx_temp_degC +
+ msm_thermal_info.vdd_mx_temp_hyst_degC)) {
+ dis_cnt++;
+ }
+ }
+
+ if ((dis_cnt == thresh[MSM_VDD_MX_RESTRICTION].thresh_ct)) {
+ ret = remove_vdd_mx_restriction();
+ if (ret)
+ pr_err("Failed to remove vdd mx restriction\n");
+ }
+
+exit:
+ mutex_unlock(&vdd_mx_mutex);
+ return ret;
+}
+
+static void vdd_mx_notify(struct therm_threshold *trig_thresh)
+{
+ static uint32_t mx_sens_status;
+ int ret;
+
+ pr_debug("Sensor%d trigger recevied for type %d\n",
+ trig_thresh->sensor_id,
+ trig_thresh->trip_triggered);
+
+ if (!vdd_mx_enabled)
+ return;
+
+ mutex_lock(&vdd_mx_mutex);
+
+ switch (trig_thresh->trip_triggered) {
+ case THERMAL_TRIP_CONFIGURABLE_LOW:
+ mx_sens_status |= BIT(trig_thresh->sensor_id);
+ break;
+ case THERMAL_TRIP_CONFIGURABLE_HI:
+ if (mx_sens_status & BIT(trig_thresh->sensor_id))
+ mx_sens_status ^= BIT(trig_thresh->sensor_id);
+ break;
+ default:
+ pr_err("Unsupported trip type\n");
+ break;
+ }
+
+ if (mx_sens_status) {
+ ret = apply_vdd_mx_restriction();
+ if (ret)
+ pr_err("Failed to apply mx restriction\n");
+ } else if (!mx_sens_status) {
+ ret = remove_vdd_mx_restriction();
+ if (ret)
+ pr_err("Failed to remove vdd mx restriction\n");
+ }
+ mutex_unlock(&vdd_mx_mutex);
+ sensor_mgr_set_threshold(trig_thresh->sensor_id,
+ trig_thresh->threshold);
+}
+
+static void msm_thermal_bite(int zone_id, long temp)
+{
+ struct scm_desc desc;
+ int tsens_id = 0;
+ int ret = 0;
+
+ ret = zone_id_to_tsen_id(zone_id, &tsens_id);
+ if (ret < 0) {
+ pr_err("Zone:%d reached temperature:%ld. Err = %d System reset\n",
+ zone_id, temp, ret);
+ } else {
+ pr_err("Tsens:%d reached temperature:%ld. System reset\n",
+ tsens_id, temp);
+ }
+ if (!is_scm_armv8()) {
+ scm_call_atomic1(SCM_SVC_BOOT, THERM_SECURE_BITE_CMD, 0);
+ } else {
+ desc.args[0] = 0;
+ desc.arginfo = SCM_ARGS(1);
+ scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_BOOT,
+ THERM_SECURE_BITE_CMD), &desc);
+ }
+}
+
+static int do_therm_reset(void)
+{
+ int ret = 0, i;
+ long temp = 0;
+
+ if (!therm_reset_enabled)
+ return ret;
+
+ for (i = 0; i < thresh[MSM_THERM_RESET].thresh_ct; i++) {
+ ret = therm_get_temp(
+ thresh[MSM_THERM_RESET].thresh_list[i].sensor_id,
+ thresh[MSM_THERM_RESET].thresh_list[i].id_type,
+ &temp);
+ if (ret) {
+ pr_err("Unable to read TSENS sensor:%d. err:%d\n",
+ thresh[MSM_THERM_RESET].thresh_list[i].sensor_id,
+ ret);
+ continue;
+ }
+
+ if (temp >= msm_thermal_info.therm_reset_temp_degC)
+ msm_thermal_bite(
+ thresh[MSM_THERM_RESET].thresh_list[i].sensor_id, temp);
+ }
+
+ return ret;
+}
+
+static void therm_reset_notify(struct therm_threshold *thresh_data)
+{
+ long temp;
+ int ret = 0;
+
+ if (!therm_reset_enabled)
+ return;
+
+ if (!thresh_data) {
+ pr_err("Invalid input\n");
+ return;
+ }
+
+ switch (thresh_data->trip_triggered) {
+ case THERMAL_TRIP_CONFIGURABLE_HI:
+ ret = therm_get_temp(thresh_data->sensor_id,
+ thresh_data->id_type, &temp);
+ if (ret)
+ pr_err("Unable to read TSENS sensor:%d. err:%d\n",
+ thresh_data->sensor_id, ret);
+ msm_thermal_bite(thresh_data->sensor_id, temp);
+ break;
+ case THERMAL_TRIP_CONFIGURABLE_LOW:
+ break;
+ default:
+ pr_err("Invalid trip type\n");
+ break;
+ }
+ sensor_mgr_set_threshold(thresh_data->sensor_id,
+ thresh_data->threshold);
+}
+
+static void retry_hotplug(struct work_struct *work)
+{
+ mutex_lock(&core_control_mutex);
+ if (retry_in_progress) {
+ pr_debug("Retrying hotplug\n");
+ retry_in_progress = false;
+ complete(&hotplug_notify_complete);
+ }
+ mutex_unlock(&core_control_mutex);
+}
+
+#ifdef CONFIG_SMP
+static void __ref do_core_control(long temp)
+{
+ int i = 0;
+ int ret = 0;
+
+ if (!core_control_enabled)
+ return;
+
+ mutex_lock(&core_control_mutex);
+ if (msm_thermal_info.core_control_mask &&
+ temp >= msm_thermal_info.core_limit_temp_degC) {
+ for (i = num_possible_cpus(); i > 0; i--) {
+ if (!(msm_thermal_info.core_control_mask & BIT(i)))
+ continue;
+ if (cpus_offlined & BIT(i) && !cpu_online(i))
+ continue;
+ pr_info("Set Offline: CPU%d Temp: %ld\n",
+ i, temp);
+ if (cpu_online(i)) {
+ trace_thermal_pre_core_offline(i);
+ ret = cpu_down(i);
+ if (ret)
+ pr_err("Error %d offline core %d\n",
+ ret, i);
+ trace_thermal_post_core_offline(i,
+ cpumask_test_cpu(i, cpu_online_mask));
+ }
+ cpus_offlined |= BIT(i);
+ break;
+ }
+ } else if (msm_thermal_info.core_control_mask && cpus_offlined &&
+ temp <= (msm_thermal_info.core_limit_temp_degC -
+ msm_thermal_info.core_temp_hysteresis_degC)) {
+ for (i = 0; i < num_possible_cpus(); i++) {
+ if (!(cpus_offlined & BIT(i)))
+ continue;
+ cpus_offlined &= ~BIT(i);
+ pr_info("Allow Online CPU%d Temp: %ld\n",
+ i, temp);
+ /*
+ * If this core is already online, then bring up the
+ * next offlined core.
+ */
+ if (cpu_online(i))
+ continue;
+ /* If this core wasn't previously online don't put it
+ online */
+ if (!(cpumask_test_cpu(i, cpus_previously_online)))
+ continue;
+ trace_thermal_pre_core_online(i);
+ ret = cpu_up(i);
+ if (ret)
+ pr_err("Error %d online core %d\n",
+ ret, i);
+ trace_thermal_post_core_online(i,
+ cpumask_test_cpu(i, cpu_online_mask));
+ break;
+ }
+ }
+ mutex_unlock(&core_control_mutex);
+}
+/* Call with core_control_mutex locked */
+static int __ref update_offline_cores(int val)
+{
+ uint32_t cpu = 0;
+ int ret = 0;
+ uint32_t previous_cpus_offlined = 0;
+ bool pend_hotplug_req = false;
+
+ if (!core_control_enabled)
+ return 0;
+
+ previous_cpus_offlined = cpus_offlined;
+ cpus_offlined = msm_thermal_info.core_control_mask & val;
+
+ for_each_possible_cpu(cpu) {
+ if (cpus_offlined & BIT(cpu)) {
+ if (!cpu_online(cpu))
+ continue;
+ trace_thermal_pre_core_offline(cpu);
+ ret = cpu_down(cpu);
+ if (ret) {
+ pr_err_ratelimited(
+ "Unable to offline CPU%d. err:%d\n",
+ cpu, ret);
+ pend_hotplug_req = true;
+ } else {
+ pr_debug("Offlined CPU%d\n", cpu);
+ }
+ trace_thermal_post_core_offline(cpu,
+ cpumask_test_cpu(cpu, cpu_online_mask));
+ } else if (online_core && (previous_cpus_offlined & BIT(cpu))) {
+ if (cpu_online(cpu))
+ continue;
+ /* If this core wasn't previously online don't put it
+ online */
+ if (!(cpumask_test_cpu(cpu, cpus_previously_online)))
+ continue;
+ trace_thermal_pre_core_online(cpu);
+ ret = cpu_up(cpu);
+ if (ret && ret == notifier_to_errno(NOTIFY_BAD)) {
+ pr_debug("Onlining CPU%d is vetoed\n", cpu);
+ } else if (ret) {
+ cpus_offlined |= BIT(cpu);
+ pend_hotplug_req = true;
+ pr_err_ratelimited(
+ "Unable to online CPU%d. err:%d\n",
+ cpu, ret);
+ } else {
+ pr_debug("Onlined CPU%d\n", cpu);
+ trace_thermal_post_core_online(cpu,
+ cpumask_test_cpu(cpu, cpu_online_mask));
+ }
+ }
+ }
+
+ if (pend_hotplug_req && !in_suspend && !retry_in_progress) {
+ retry_in_progress = true;
+ schedule_delayed_work(&retry_hotplug_work,
+ msecs_to_jiffies(HOTPLUG_RETRY_INTERVAL_MS));
+ }
+
+ return ret;
+}
+
+static __ref int do_hotplug(void *data)
+{
+ int ret = 0;
+ uint32_t cpu = 0, mask = 0;
+ struct device_clnt_data *clnt = NULL;
+ struct sched_param param = {.sched_priority = MAX_RT_PRIO-2};
+
+ if (!core_control_enabled) {
+ pr_debug("Core control disabled\n");
+ return -EINVAL;
+ }
+
+ sched_setscheduler(current, SCHED_FIFO, &param);
+ while (!kthread_should_stop()) {
+ while (wait_for_completion_interruptible(
+ &hotplug_notify_complete) != 0)
+ ;
+ reinit_completion(&hotplug_notify_complete);
+ mask = 0;
+
+ mutex_lock(&core_control_mutex);
+ for_each_possible_cpu(cpu) {
+ if (hotplug_enabled &&
+ cpus[cpu].hotplug_thresh_clear) {
+ ret =
+ sensor_mgr_set_threshold(cpus[cpu].sensor_id,
+ &cpus[cpu].threshold[HOTPLUG_THRESHOLD_HIGH]);
+
+ if (cpus[cpu].offline
+ && !IS_LOW_THRESHOLD_SET(ret))
+ cpus[cpu].offline = 0;
+ cpus[cpu].hotplug_thresh_clear = false;
+ }
+ if (cpus[cpu].offline || cpus[cpu].user_offline)
+ mask |= BIT(cpu);
+ }
+ if (devices && devices->hotplug_dev) {
+ mutex_lock(&devices->hotplug_dev->clnt_lock);
+ for_each_cpu_mask(cpu,
+ devices->hotplug_dev->active_req.offline_mask)
+ mask |= BIT(cpu);
+ mutex_unlock(&devices->hotplug_dev->clnt_lock);
+ }
+ if (mask != cpus_offlined)
+ update_offline_cores(mask);
+ mutex_unlock(&core_control_mutex);
+
+ if (devices && devices->hotplug_dev) {
+ union device_request req;
+
+ req.offline_mask = CPU_MASK_NONE;
+ mutex_lock(&devices->hotplug_dev->clnt_lock);
+ for_each_cpu_mask(cpu,
+ devices->hotplug_dev->active_req.offline_mask)
+ if (mask & BIT(cpu))
+ cpumask_test_and_set_cpu(cpu,
+ &req.offline_mask);
+
+ list_for_each_entry(clnt,
+ &devices->hotplug_dev->client_list,
+ clnt_ptr) {
+ if (clnt->callback)
+ clnt->callback(clnt, &req,
+ clnt->usr_data);
+ }
+ mutex_unlock(&devices->hotplug_dev->clnt_lock);
+ }
+ sysfs_notify(cc_kobj, NULL, "cpus_offlined");
+ }
+
+ return ret;
+}
+#else
+static void __ref do_core_control(long temp)
+{
+ return;
+}
+
+static __ref int do_hotplug(void *data)
+{
+ return 0;
+}
+
+static int __ref update_offline_cores(int val)
+{
+ return 0;
+}
+#endif
+
+static int do_gfx_phase_cond(void)
+{
+ long temp = 0;
+ int ret = 0;
+ uint32_t new_req_band = curr_gfx_band;
+
+ if (!gfx_warm_phase_ctrl_enabled && !gfx_crit_phase_ctrl_enabled)
+ return ret;
+
+ mutex_lock(&gfx_mutex);
+ if (gfx_warm_phase_ctrl_enabled) {
+ ret = therm_get_temp(
+ thresh[MSM_GFX_PHASE_CTRL_WARM].thresh_list->sensor_id,
+ thresh[MSM_GFX_PHASE_CTRL_WARM].thresh_list->id_type,
+ &temp);
+ if (ret) {
+ pr_err("Unable to read TSENS sensor:%d. err:%d\n",
+ thresh[MSM_GFX_PHASE_CTRL_WARM].thresh_list->sensor_id,
+ ret);
+ goto gfx_phase_cond_exit;
+ }
+ } else {
+ ret = therm_get_temp(
+ thresh[MSM_GFX_PHASE_CTRL_HOT].thresh_list->sensor_id,
+ thresh[MSM_GFX_PHASE_CTRL_HOT].thresh_list->id_type,
+ &temp);
+ if (ret) {
+ pr_err("Unable to read TSENS sensor:%d. err:%d\n",
+ thresh[MSM_GFX_PHASE_CTRL_HOT].thresh_list->sensor_id,
+ ret);
+ goto gfx_phase_cond_exit;
+ }
+ }
+
+ switch (curr_gfx_band) {
+ case MSM_HOT_CRITICAL:
+ if (temp < (msm_thermal_info.gfx_phase_hot_temp_degC -
+ msm_thermal_info.gfx_phase_hot_temp_hyst_degC))
+ new_req_band = MSM_WARM;
+ break;
+ case MSM_WARM:
+ if (temp >= msm_thermal_info.gfx_phase_hot_temp_degC)
+ new_req_band = MSM_HOT_CRITICAL;
+ else if (temp < (msm_thermal_info.gfx_phase_warm_temp_degC -
+ msm_thermal_info.gfx_phase_warm_temp_hyst_degC))
+ new_req_band = MSM_NORMAL;
+ break;
+ case MSM_NORMAL:
+ if (temp >= msm_thermal_info.gfx_phase_warm_temp_degC)
+ new_req_band = MSM_WARM;
+ break;
+ default:
+ if (temp >= msm_thermal_info.gfx_phase_hot_temp_degC)
+ new_req_band = MSM_HOT_CRITICAL;
+ else if (temp >= msm_thermal_info.gfx_phase_warm_temp_degC)
+ new_req_band = MSM_WARM;
+ else
+ new_req_band = MSM_NORMAL;
+ break;
+ }
+
+ if (new_req_band != curr_gfx_band) {
+ ret = send_temperature_band(MSM_GFX_PHASE_CTRL, new_req_band);
+ if (!ret) {
+ pr_debug("Reached %d band. Temp:%ld\n", new_req_band,
+ temp);
+ curr_gfx_band = new_req_band;
+ } else {
+ pr_err("Error sending temp. band:%d. Temp:%ld. err:%d",
+ new_req_band, temp, ret);
+ }
+ }
+
+gfx_phase_cond_exit:
+ mutex_unlock(&gfx_mutex);
+ return ret;
+}
+
+static int do_cx_phase_cond(void)
+{
+ long temp = 0;
+ int i, ret = 0, dis_cnt = 0;
+
+ if (!cx_phase_ctrl_enabled)
+ return ret;
+
+ mutex_lock(&cx_mutex);
+ for (i = 0; i < thresh[MSM_CX_PHASE_CTRL_HOT].thresh_ct; i++) {
+ ret = therm_get_temp(
+ thresh[MSM_CX_PHASE_CTRL_HOT].thresh_list[i].sensor_id,
+ thresh[MSM_CX_PHASE_CTRL_HOT].thresh_list[i].id_type,
+ &temp);
+ if (ret) {
+ pr_err("Unable to read TSENS sensor:%d. err:%d\n",
+ thresh[MSM_CX_PHASE_CTRL_HOT].thresh_list[i].sensor_id,
+ ret);
+ dis_cnt++;
+ continue;
+ }
+
+ if (temp >= msm_thermal_info.cx_phase_hot_temp_degC) {
+ if (curr_cx_band != MSM_HOT_CRITICAL) {
+ ret = send_temperature_band(MSM_CX_PHASE_CTRL,
+ MSM_HOT_CRITICAL);
+ if (!ret) {
+ pr_debug("band:HOT_CRITICAL Temp:%ld\n",
+ temp);
+ curr_cx_band = MSM_HOT_CRITICAL;
+ } else {
+ pr_err("Error %d sending HOT_CRITICAL",
+ ret);
+ }
+ }
+ goto cx_phase_cond_exit;
+ } else if (temp < (msm_thermal_info.cx_phase_hot_temp_degC -
+ msm_thermal_info.cx_phase_hot_temp_hyst_degC))
+ dis_cnt++;
+ }
+ if (dis_cnt == max_tsens_num && curr_cx_band != MSM_WARM) {
+ ret = send_temperature_band(MSM_CX_PHASE_CTRL, MSM_WARM);
+ if (!ret) {
+ pr_debug("band:WARM Temp:%ld\n", temp);
+ curr_cx_band = MSM_WARM;
+ } else {
+ pr_err("Error sending WARM temp band. err:%d",
+ ret);
+ }
+ }
+cx_phase_cond_exit:
+ mutex_unlock(&cx_mutex);
+ return ret;
+}
+
+static int do_ocr(void)
+{
+ long temp = 0;
+ int ret = 0;
+ int i = 0, j = 0;
+ int pfm_cnt = 0;
+
+ if (!ocr_enabled)
+ return ret;
+
+ mutex_lock(&ocr_mutex);
+ for (i = 0; i < thresh[MSM_OCR].thresh_ct; i++) {
+ ret = therm_get_temp(
+ thresh[MSM_OCR].thresh_list[i].sensor_id,
+ thresh[MSM_OCR].thresh_list[i].id_type,
+ &temp);
+ if (ret) {
+ pr_err("Unable to read TSENS sensor %d. err:%d\n",
+ thresh[MSM_OCR].thresh_list[i].sensor_id,
+ ret);
+ pfm_cnt++;
+ continue;
+ }
+
+ if (temp > msm_thermal_info.ocr_temp_degC) {
+ if (ocr_rails[0].init != OPTIMUM_CURRENT_NR)
+ for (j = 0; j < ocr_rail_cnt; j++)
+ ocr_rails[j].init = OPTIMUM_CURRENT_NR;
+ ret = ocr_set_mode_all(OPTIMUM_CURRENT_MAX);
+ if (ret)
+ pr_err("Error setting max ocr. err:%d\n",
+ ret);
+ else
+ pr_debug("Requested MAX OCR. tsens:%d Temp:%ld",
+ thresh[MSM_OCR].thresh_list[i].sensor_id, temp);
+ goto do_ocr_exit;
+ } else if (temp <= (msm_thermal_info.ocr_temp_degC -
+ msm_thermal_info.ocr_temp_hyst_degC))
+ pfm_cnt++;
+ }
+
+ if (pfm_cnt == thresh[MSM_OCR].thresh_ct ||
+ ocr_rails[0].init != OPTIMUM_CURRENT_NR) {
+ /* 'init' not equal to OPTIMUM_CURRENT_NR means this is the
+ ** first polling iteration after device probe. During first
+ ** iteration, if temperature is less than the set point, clear
+ ** the max current request made and reset the 'init'.
+ */
+ if (ocr_rails[0].init != OPTIMUM_CURRENT_NR)
+ for (j = 0; j < ocr_rail_cnt; j++)
+ ocr_rails[j].init = OPTIMUM_CURRENT_NR;
+ ret = ocr_set_mode_all(OPTIMUM_CURRENT_MIN);
+ if (ret) {
+ pr_err("Error setting min ocr. err:%d\n",
+ ret);
+ goto do_ocr_exit;
+ } else {
+ pr_debug("Requested MIN OCR. Temp:%ld", temp);
+ }
+ }
+do_ocr_exit:
+ mutex_unlock(&ocr_mutex);
+ return ret;
+}
+
+static int do_vdd_restriction(void)
+{
+ long temp = 0;
+ int ret = 0;
+ int i = 0;
+ int dis_cnt = 0;
+
+ if (!vdd_rstr_enabled)
+ return ret;
+
+ if (usefreq && !freq_table_get) {
+ if (check_freq_table() && !core_ptr)
+ return ret;
+ }
+ mutex_lock(&vdd_rstr_mutex);
+ for (i = 0; i < thresh[MSM_VDD_RESTRICTION].thresh_ct; i++) {
+ ret = therm_get_temp(
+ thresh[MSM_VDD_RESTRICTION].thresh_list[i].sensor_id,
+ thresh[MSM_VDD_RESTRICTION].thresh_list[i].id_type,
+ &temp);
+ if (ret) {
+ pr_err("Unable to read TSENS sensor:%d. err:%d\n",
+ thresh[MSM_VDD_RESTRICTION].thresh_list[i].sensor_id,
+ ret);
+ dis_cnt++;
+ continue;
+ }
+ if (temp <= msm_thermal_info.vdd_rstr_temp_degC) {
+ ret = vdd_restriction_apply_all(1);
+ if (ret) {
+ pr_err( \
+ "Enable vdd rstr for all failed. err:%d\n",
+ ret);
+ goto exit;
+ }
+ pr_debug("Enabled Vdd Restriction tsens:%d. Temp:%ld\n",
+ thresh[MSM_VDD_RESTRICTION].thresh_list[i].sensor_id,
+ temp);
+ goto exit;
+ } else if (temp > msm_thermal_info.vdd_rstr_temp_hyst_degC)
+ dis_cnt++;
+ }
+ if (dis_cnt == max_tsens_num) {
+ ret = vdd_restriction_apply_all(0);
+ if (ret) {
+ pr_err("Disable vdd rstr for all failed. err:%d\n",
+ ret);
+ goto exit;
+ }
+ pr_debug("Disabled Vdd Restriction\n");
+ }
+exit:
+ mutex_unlock(&vdd_rstr_mutex);
+ return ret;
+}
+
+static int do_psm(void)
+{
+ long temp = 0;
+ int ret = 0;
+ int i = 0;
+ int auto_cnt = 0;
+
+ if (!psm_enabled)
+ return ret;
+
+ mutex_lock(&psm_mutex);
+ for (i = 0; i < max_tsens_num; i++) {
+ ret = therm_get_temp(tsens_id_map[i], THERM_TSENS_ID, &temp);
+ if (ret) {
+ pr_err("Unable to read TSENS sensor:%d. err:%d\n",
+ tsens_id_map[i], ret);
+ auto_cnt++;
+ continue;
+ }
+
+ /*
+ * As long as one sensor is above the threshold, set PWM mode
+ * on all rails, and loop stops. Set auto mode when all rails
+ * are below thershold
+ */
+ if (temp > msm_thermal_info.psm_temp_degC) {
+ ret = psm_set_mode_all(PMIC_PWM_MODE);
+ if (ret) {
+ pr_err("Set pwm mode for all failed. err:%d\n",
+ ret);
+ goto exit;
+ }
+ pr_debug("Requested PMIC PWM Mode tsens:%d. Temp:%ld\n",
+ tsens_id_map[i], temp);
+ break;
+ } else if (temp <= msm_thermal_info.psm_temp_hyst_degC)
+ auto_cnt++;
+ }
+
+ if (auto_cnt == max_tsens_num) {
+ ret = psm_set_mode_all(PMIC_AUTO_MODE);
+ if (ret) {
+ pr_err("Set auto mode for all failed. err:%d\n", ret);
+ goto exit;
+ }
+ pr_debug("Requested PMIC AUTO Mode\n");
+ }
+
+exit:
+ mutex_unlock(&psm_mutex);
+ return ret;
+}
+
+static void do_freq_control(long temp)
+{
+ uint32_t cpu = 0;
+ uint32_t max_freq = cpus[cpu].limited_max_freq;
+
+ if (core_ptr)
+ return do_cluster_freq_ctrl(temp);
+ if (!freq_table_get)
+ return;
+
+ if (temp >= msm_thermal_info.limit_temp_degC) {
+ if (limit_idx == limit_idx_low)
+ return;
+
+ limit_idx -= msm_thermal_info.bootup_freq_step;
+ if (limit_idx < limit_idx_low)
+ limit_idx = limit_idx_low;
+ } else if (temp < msm_thermal_info.limit_temp_degC -
+ msm_thermal_info.temp_hysteresis_degC) {
+ if (limit_idx == limit_idx_high)
+ return;
+
+ limit_idx += msm_thermal_info.bootup_freq_step;
+ if (limit_idx >= limit_idx_high)
+ limit_idx = limit_idx_high;
+ }
+
+ /* Update new limits */
+ get_online_cpus();
+ max_freq = table[limit_idx].frequency;
+ if (max_freq == cpus[cpu].limited_max_freq) {
+ put_online_cpus();
+ return;
+ }
+
+ for_each_possible_cpu(cpu) {
+ if (!(msm_thermal_info.bootup_freq_control_mask & BIT(cpu)))
+ continue;
+ pr_info("Limiting CPU%d max frequency to %u. Temp:%ld\n",
+ cpu, max_freq, temp);
+ cpus[cpu].limited_max_freq =
+ min(max_freq, cpus[cpu].vdd_max_freq);
+ if (!SYNC_CORE(cpu))
+ update_cpu_freq(cpu);
+ }
+ update_cluster_freq();
+ put_online_cpus();
+}
+
+static void check_temp(struct work_struct *work)
+{
+ long temp = 0;
+ int ret = 0;
+
+ do_therm_reset();
+
+ ret = therm_get_temp(msm_thermal_info.sensor_id, THERM_TSENS_ID, &temp);
+ if (ret) {
+ pr_err("Unable to read TSENS sensor:%d. err:%d\n",
+ msm_thermal_info.sensor_id, ret);
+ goto reschedule;
+ }
+ do_core_control(temp);
+ do_vdd_mx();
+ do_psm();
+ do_gfx_phase_cond();
+ do_cx_phase_cond();
+ do_ocr();
+
+ /*
+ ** All mitigation involving CPU frequency should be
+ ** placed below this check. The mitigation following this
+ ** frequency table check, should be able to handle the failure case.
+ */
+ if (!freq_table_get)
+ check_freq_table();
+
+ do_vdd_restriction();
+ do_freq_control(temp);
+
+reschedule:
+ if (polling_enabled)
+ schedule_delayed_work(&check_temp_work,
+ msecs_to_jiffies(msm_thermal_info.poll_ms));
+}
+
+static int __ref msm_thermal_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ uint32_t cpu = (uintptr_t)hcpu;
+
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_UP_PREPARE:
+ if (!cpumask_test_and_set_cpu(cpu, cpus_previously_online))
+ pr_debug("Total prev cores online tracked %u\n",
+ cpumask_weight(cpus_previously_online));
+ if (core_control_enabled &&
+ (msm_thermal_info.core_control_mask & BIT(cpu)) &&
+ (cpus_offlined & BIT(cpu))) {
+ pr_debug("Preventing CPU%d from coming online.\n",
+ cpu);
+ return NOTIFY_BAD;
+ }
+ break;
+ case CPU_DOWN_PREPARE:
+ if (!cpumask_test_and_set_cpu(cpu, cpus_previously_online))
+ pr_debug("Total prev cores online tracked %u\n",
+ cpumask_weight(cpus_previously_online));
+ break;
+ case CPU_ONLINE:
+ if (core_control_enabled &&
+ (msm_thermal_info.core_control_mask & BIT(cpu)) &&
+ (cpus_offlined & BIT(cpu))) {
+ if (hotplug_task) {
+ pr_debug("Re-evaluate and hotplug CPU%d\n",
+ cpu);
+ complete(&hotplug_notify_complete);
+ } else {
+ /*
+ * This will be auto-corrected next time
+ * do_core_control() is called
+ */
+ pr_err("CPU%d online, after thermal veto\n",
+ cpu);
+ }
+ }
+ break;
+ default:
+ break;
+ }
+
+ pr_debug("voting for CPU%d to be online\n", cpu);
+ return NOTIFY_OK;
+}
+
+static struct notifier_block __refdata msm_thermal_cpu_notifier = {
+ .notifier_call = msm_thermal_cpu_callback,
+};
+static int hotplug_notify(enum thermal_trip_type type, int temp, void *data)
+{
+ struct cpu_info *cpu_node = (struct cpu_info *)data;
+
+ pr_info_ratelimited("%s reach temp threshold: %d\n",
+ cpu_node->sensor_type, temp);
+
+ if (!(msm_thermal_info.core_control_mask & BIT(cpu_node->cpu)))
+ return 0;
+ switch (type) {
+ case THERMAL_TRIP_CONFIGURABLE_HI:
+ if (!(cpu_node->offline))
+ cpu_node->offline = 1;
+ break;
+ case THERMAL_TRIP_CONFIGURABLE_LOW:
+ if (cpu_node->offline)
+ cpu_node->offline = 0;
+ break;
+ default:
+ break;
+ }
+ if (hotplug_task) {
+ cpu_node->hotplug_thresh_clear = true;
+ complete(&hotplug_notify_complete);
+ } else
+ pr_err("Hotplug task is not initialized\n");
+ return 0;
+}
+/* Adjust cpus offlined bit based on temperature reading. */
+static int hotplug_init_cpu_offlined(void)
+{
+ long temp = 0;
+ uint32_t cpu = 0;
+
+ if (!hotplug_enabled)
+ return 0;
+
+ mutex_lock(&core_control_mutex);
+ for_each_possible_cpu(cpu) {
+ if (!(msm_thermal_info.core_control_mask & BIT(cpus[cpu].cpu)))
+ continue;
+ if (therm_get_temp(cpus[cpu].sensor_id, cpus[cpu].id_type,
+ &temp)) {
+ pr_err("Unable to read TSENS sensor:%d.\n",
+ cpus[cpu].sensor_id);
+ mutex_unlock(&core_control_mutex);
+ return -EINVAL;
+ }
+
+ if (temp >= msm_thermal_info.hotplug_temp_degC)
+ cpus[cpu].offline = 1;
+ else if (temp <= (msm_thermal_info.hotplug_temp_degC -
+ msm_thermal_info.hotplug_temp_hysteresis_degC))
+ cpus[cpu].offline = 0;
+ }
+ mutex_unlock(&core_control_mutex);
+
+ if (hotplug_task)
+ complete(&hotplug_notify_complete);
+ else {
+ pr_err("Hotplug task is not initialized\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void hotplug_init(void)
+{
+ uint32_t cpu = 0;
+ struct sensor_threshold *hi_thresh = NULL, *low_thresh = NULL;
+
+ if (hotplug_task)
+ return;
+
+ if (!hotplug_enabled)
+ goto init_kthread;
+
+ for_each_possible_cpu(cpu) {
+ cpus[cpu].sensor_id =
+ sensor_get_id((char *)cpus[cpu].sensor_type);
+ cpus[cpu].id_type = THERM_ZONE_ID;
+ if (!(msm_thermal_info.core_control_mask & BIT(cpus[cpu].cpu)))
+ continue;
+
+ hi_thresh = &cpus[cpu].threshold[HOTPLUG_THRESHOLD_HIGH];
+ low_thresh = &cpus[cpu].threshold[HOTPLUG_THRESHOLD_LOW];
+ hi_thresh->temp = (msm_thermal_info.hotplug_temp_degC)
+ * tsens_scaling_factor;
+ hi_thresh->trip = THERMAL_TRIP_CONFIGURABLE_HI;
+ low_thresh->temp = (msm_thermal_info.hotplug_temp_degC -
+ msm_thermal_info.hotplug_temp_hysteresis_degC)
+ * tsens_scaling_factor;
+ low_thresh->trip = THERMAL_TRIP_CONFIGURABLE_LOW;
+ hi_thresh->notify = low_thresh->notify = hotplug_notify;
+ hi_thresh->data = low_thresh->data = (void *)&cpus[cpu];
+
+ sensor_mgr_set_threshold(cpus[cpu].sensor_id, hi_thresh);
+ }
+init_kthread:
+ init_completion(&hotplug_notify_complete);
+ hotplug_task = kthread_run(do_hotplug, NULL, "msm_thermal:hotplug");
+ if (IS_ERR(hotplug_task)) {
+ pr_err("Failed to create do_hotplug thread. err:%ld\n",
+ PTR_ERR(hotplug_task));
+ return;
+ }
+ /*
+ * Adjust cpus offlined bit when hotplug intitializes so that the new
+ * cpus offlined state is based on hotplug threshold range
+ */
+ if (hotplug_init_cpu_offlined())
+ kthread_stop(hotplug_task);
+}
+
+static __ref int do_freq_mitigation(void *data)
+{
+ int ret = 0;
+ uint32_t cpu = 0, max_freq_req = 0, min_freq_req = 0;
+ struct sched_param param = {.sched_priority = MAX_RT_PRIO-1};
+ struct device_clnt_data *clnt = NULL;
+ struct device_manager_data *cpu_dev = NULL;
+
+ sched_setscheduler(current, SCHED_FIFO, &param);
+ while (!kthread_should_stop()) {
+ while (wait_for_completion_interruptible(
+ &freq_mitigation_complete) != 0)
+ ;
+ reinit_completion(&freq_mitigation_complete);
+
+ for_each_possible_cpu(cpu) {
+ max_freq_req = (cpus[cpu].max_freq) ?
+ msm_thermal_info.freq_limit :
+ UINT_MAX;
+ max_freq_req = min(max_freq_req,
+ cpus[cpu].user_max_freq);
+
+ max_freq_req = min(max_freq_req,
+ cpus[cpu].shutdown_max_freq);
+
+ max_freq_req = min(max_freq_req,
+ cpus[cpu].suspend_max_freq);
+
+ if (devices && devices->cpufreq_dev[cpu]) {
+ cpu_dev = devices->cpufreq_dev[cpu];
+ mutex_lock(&cpu_dev->clnt_lock);
+ max_freq_req = min(max_freq_req,
+ cpu_dev->active_req.freq.max_freq);
+ min_freq_req =
+ cpu_dev->active_req.freq.min_freq;
+ mutex_unlock(&cpu_dev->clnt_lock);
+ }
+
+ if ((max_freq_req == cpus[cpu].limited_max_freq)
+ && (min_freq_req ==
+ cpus[cpu].limited_min_freq))
+ goto reset_threshold;
+
+ cpus[cpu].limited_max_freq = max_freq_req;
+ cpus[cpu].limited_min_freq = min_freq_req;
+ if (!SYNC_CORE(cpu))
+ update_cpu_freq(cpu);
+reset_threshold:
+ if (!SYNC_CORE(cpu) &&
+ devices && devices->cpufreq_dev[cpu]) {
+ union device_request req;
+
+ req.freq.max_freq = max_freq_req;
+ req.freq.min_freq = min_freq_req;
+ cpu_dev = devices->cpufreq_dev[cpu];
+ mutex_lock(&cpu_dev->clnt_lock);
+ list_for_each_entry(clnt,
+ &cpu_dev->client_list,
+ clnt_ptr) {
+ if (clnt->callback)
+ clnt->callback(clnt,
+ &req,
+ clnt->usr_data);
+ }
+ mutex_unlock(&cpu_dev->clnt_lock);
+ }
+ if (freq_mitigation_enabled &&
+ cpus[cpu].freq_thresh_clear) {
+ ret =
+ sensor_mgr_set_threshold(cpus[cpu].sensor_id,
+ &cpus[cpu].threshold[FREQ_THRESHOLD_HIGH]);
+
+ if (cpus[cpu].max_freq
+ && !IS_LOW_THRESHOLD_SET(ret)) {
+ cpus[cpu].max_freq = false;
+ complete(&freq_mitigation_complete);
+ }
+ cpus[cpu].freq_thresh_clear = false;
+ }
+ }
+ update_cluster_freq();
+ }
+ return ret;
+}
+
+static int freq_mitigation_notify(enum thermal_trip_type type,
+ int temp, void *data)
+{
+ struct cpu_info *cpu_node = (struct cpu_info *) data;
+
+ pr_debug("%s reached temp threshold: %d\n",
+ cpu_node->sensor_type, temp);
+
+ if (!(msm_thermal_info.freq_mitig_control_mask &
+ BIT(cpu_node->cpu)))
+ return 0;
+
+ switch (type) {
+ case THERMAL_TRIP_CONFIGURABLE_HI:
+ if (!cpu_node->max_freq) {
+ pr_info_ratelimited(
+ "Mitigating CPU%d frequency to %d\n",
+ cpu_node->cpu, msm_thermal_info.freq_limit);
+
+ cpu_node->max_freq = true;
+ }
+ break;
+ case THERMAL_TRIP_CONFIGURABLE_LOW:
+ if (cpu_node->max_freq) {
+ pr_info_ratelimited(
+ "Removing frequency mitigation for CPU%d\n",
+ cpu_node->cpu);
+
+ cpu_node->max_freq = false;
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (freq_mitigation_task) {
+ cpu_node->freq_thresh_clear = true;
+ complete(&freq_mitigation_complete);
+ } else {
+ pr_err("Frequency mitigation task is not initialized\n");
+ }
+
+ return 0;
+}
+
+static void freq_mitigation_init(void)
+{
+ uint32_t cpu = 0;
+ struct sensor_threshold *hi_thresh = NULL, *low_thresh = NULL;
+
+ if (freq_mitigation_task)
+ return;
+ if (!freq_mitigation_enabled)
+ goto init_freq_thread;
+
+ for_each_possible_cpu(cpu) {
+ /*
+ * Hotplug may not be enabled,
+ * make sure core sensor id is initialized.
+ */
+ cpus[cpu].sensor_id =
+ sensor_get_id((char *)cpus[cpu].sensor_type);
+ cpus[cpu].id_type = THERM_ZONE_ID;
+ if (!(msm_thermal_info.freq_mitig_control_mask & BIT(cpu)))
+ continue;
+ hi_thresh = &cpus[cpu].threshold[FREQ_THRESHOLD_HIGH];
+ low_thresh = &cpus[cpu].threshold[FREQ_THRESHOLD_LOW];
+
+ hi_thresh->temp = msm_thermal_info.freq_mitig_temp_degc
+ * tsens_scaling_factor;
+ hi_thresh->trip = THERMAL_TRIP_CONFIGURABLE_HI;
+ low_thresh->temp = (msm_thermal_info.freq_mitig_temp_degc -
+ msm_thermal_info.freq_mitig_temp_hysteresis_degc)
+ * tsens_scaling_factor;
+ low_thresh->trip = THERMAL_TRIP_CONFIGURABLE_LOW;
+ hi_thresh->notify = low_thresh->notify =
+ freq_mitigation_notify;
+ hi_thresh->data = low_thresh->data = (void *)&cpus[cpu];
+
+ sensor_mgr_set_threshold(cpus[cpu].sensor_id, hi_thresh);
+ }
+init_freq_thread:
+ init_completion(&freq_mitigation_complete);
+ freq_mitigation_task = kthread_run(do_freq_mitigation, NULL,
+ "msm_thermal:freq_mitig");
+
+ if (IS_ERR(freq_mitigation_task)) {
+ pr_err("Failed to create frequency mitigation thread. err:%ld\n",
+ PTR_ERR(freq_mitigation_task));
+ return;
+ }
+}
+
+int msm_thermal_get_freq_plan_size(uint32_t cluster, unsigned int *table_len)
+{
+ uint32_t i = 0;
+ struct cluster_info *cluster_ptr = NULL;
+
+ if (!core_ptr) {
+ pr_err("Topology ptr not initialized\n");
+ return -ENODEV;
+ }
+ if (!table_len) {
+ pr_err("Invalid input\n");
+ return -EINVAL;
+ }
+ if (!freq_table_get)
+ check_freq_table();
+
+ for (; i < core_ptr->entity_count; i++) {
+ cluster_ptr = &core_ptr->child_entity_ptr[i];
+ if (cluster_ptr->cluster_id == cluster) {
+ if (!cluster_ptr->freq_table) {
+ pr_err("Cluster%d clock plan not initialized\n",
+ cluster);
+ return -EINVAL;
+ }
+ *table_len = cluster_ptr->freq_idx_high + 1;
+ return 0;
+ }
+ }
+
+ pr_err("Invalid cluster ID:%d\n", cluster);
+ return -EINVAL;
+}
+
+int msm_thermal_get_cluster_voltage_plan(uint32_t cluster, uint32_t *table_ptr)
+{
+ int i = 0, corner = 0;
+ struct dev_pm_opp *opp = NULL;
+ unsigned int table_len = 0;
+ struct device *cpu_dev = NULL;
+ struct cluster_info *cluster_ptr = NULL;
+
+ if (!core_ptr) {
+ pr_err("Topology ptr not initialized\n");
+ return -ENODEV;
+ }
+ if (!table_ptr) {
+ pr_err("Invalid input\n");
+ return -EINVAL;
+ }
+ if (!freq_table_get)
+ check_freq_table();
+
+ for (i = 0; i < core_ptr->entity_count; i++) {
+ cluster_ptr = &core_ptr->child_entity_ptr[i];
+ if (cluster_ptr->cluster_id == cluster)
+ break;
+ }
+ if (i == core_ptr->entity_count) {
+ pr_err("Invalid cluster ID:%d\n", cluster);
+ return -EINVAL;
+ }
+ if (!cluster_ptr->freq_table) {
+ pr_err("Cluster%d clock plan not initialized\n", cluster);
+ return -EINVAL;
+ }
+
+ cpu_dev = get_cpu_device(first_cpu(cluster_ptr->cluster_cores));
+ table_len = cluster_ptr->freq_idx_high + 1;
+
+ rcu_read_lock();
+ for (i = 0; i < table_len; i++) {
+ opp = dev_pm_opp_find_freq_exact(cpu_dev,
+ cluster_ptr->freq_table[i].frequency * 1000, true);
+ if (IS_ERR(opp)) {
+ pr_err("Error on OPP freq :%d\n",
+ cluster_ptr->freq_table[i].frequency);
+ return -EINVAL;
+ }
+ corner = dev_pm_opp_get_voltage(opp);
+ if (corner == 0) {
+ pr_err("Bad voltage corner for OPP freq :%d\n",
+ cluster_ptr->freq_table[i].frequency);
+ return -EINVAL;
+ }
+ table_ptr[i] = corner / 1000;
+ pr_debug("Cluster:%d freq:%d Khz voltage:%d mV\n",
+ cluster, cluster_ptr->freq_table[i].frequency,
+ table_ptr[i]);
+ }
+ rcu_read_unlock();
+
+ return 0;
+}
+
+int msm_thermal_get_cluster_freq_plan(uint32_t cluster, unsigned int *table_ptr)
+{
+ uint32_t i = 0;
+ struct cluster_info *cluster_ptr = NULL;
+
+ if (!core_ptr) {
+ pr_err("Topology ptr not initialized\n");
+ return -ENODEV;
+ }
+ if (!table_ptr) {
+ pr_err("Invalid input\n");
+ return -EINVAL;
+ }
+ if (!freq_table_get)
+ check_freq_table();
+
+ for (; i < core_ptr->entity_count; i++) {
+ cluster_ptr = &core_ptr->child_entity_ptr[i];
+ if (cluster_ptr->cluster_id == cluster)
+ break;
+ }
+ if (i == core_ptr->entity_count) {
+ pr_err("Invalid cluster ID:%d\n", cluster);
+ return -EINVAL;
+ }
+ if (!cluster_ptr->freq_table) {
+ pr_err("Cluster%d clock plan not initialized\n", cluster);
+ return -EINVAL;
+ }
+
+ for (i = 0; i <= cluster_ptr->freq_idx_high; i++)
+ table_ptr[i] = cluster_ptr->freq_table[i].frequency;
+
+ return 0;
+}
+
+int msm_thermal_set_cluster_freq(uint32_t cluster, uint32_t freq, bool is_max)
+{
+ int ret = 0;
+ uint32_t i = 0;
+ struct cluster_info *cluster_ptr = NULL;
+ bool notify = false;
+
+ if (!mitigation) {
+ pr_err("Thermal Mitigations disabled.\n");
+ return -ENODEV;
+ }
+
+ if (!core_ptr) {
+ pr_err("Topology ptr not initialized\n");
+ return -ENODEV;
+ }
+
+ for (; i < core_ptr->entity_count; i++) {
+ cluster_ptr = &core_ptr->child_entity_ptr[i];
+ if (cluster_ptr->cluster_id != cluster)
+ continue;
+ if (!cluster_ptr->sync_cluster) {
+ pr_err("Cluster%d is not synchronous\n", cluster);
+ return -EINVAL;
+ } else {
+ pr_debug("Update Cluster%d %s frequency to %d\n",
+ cluster, (is_max) ? "max" : "min", freq);
+ break;
+ }
+ }
+ if (i == core_ptr->entity_count) {
+ pr_err("Invalid cluster ID:%d\n", cluster);
+ return -EINVAL;
+ }
+
+ for_each_cpu_mask(i, cluster_ptr->cluster_cores) {
+ uint32_t *freq_ptr = (is_max) ? &cpus[i].user_max_freq
+ : &cpus[i].user_min_freq;
+ if (*freq_ptr == freq)
+ continue;
+ notify = true;
+ *freq_ptr = freq;
+ }
+
+ if (freq_mitigation_task) {
+ if (notify)
+ complete(&freq_mitigation_complete);
+ } else {
+ pr_err("Frequency mitigation task is not initialized\n");
+ return -ESRCH;
+ }
+
+ return ret;
+}
+
+int msm_thermal_set_frequency(uint32_t cpu, uint32_t freq, bool is_max)
+{
+ int ret = 0;
+
+ if (!mitigation) {
+ pr_err("Thermal Mitigations disabled.\n");
+ goto set_freq_exit;
+ }
+
+ if (cpu >= num_possible_cpus()) {
+ pr_err("Invalid input\n");
+ ret = -EINVAL;
+ goto set_freq_exit;
+ }
+
+ pr_debug("Userspace requested %s frequency %u for CPU%u\n",
+ (is_max) ? "Max" : "Min", freq, cpu);
+ if (is_max) {
+ if (cpus[cpu].user_max_freq == freq)
+ goto set_freq_exit;
+
+ cpus[cpu].user_max_freq = freq;
+ } else {
+ if (cpus[cpu].user_min_freq == freq)
+ goto set_freq_exit;
+
+ cpus[cpu].user_min_freq = freq;
+ }
+
+ if (freq_mitigation_task) {
+ complete(&freq_mitigation_complete);
+ } else {
+ pr_err("Frequency mitigation task is not initialized\n");
+ ret = -ESRCH;
+ goto set_freq_exit;
+ }
+
+set_freq_exit:
+ return ret;
+}
+
+int therm_set_threshold(struct threshold_info *thresh_inp)
+{
+ int ret = 0, i = 0, err = 0;
+ struct therm_threshold *thresh_ptr;
+
+ if (!thresh_inp) {
+ pr_err("Invalid input\n");
+ ret = -EINVAL;
+ goto therm_set_exit;
+ }
+
+ thresh_inp->thresh_triggered = false;
+ for (i = 0; i < thresh_inp->thresh_ct; i++) {
+ thresh_ptr = &thresh_inp->thresh_list[i];
+ thresh_ptr->trip_triggered = -1;
+ err = sensor_mgr_set_threshold(thresh_ptr->sensor_id,
+ thresh_ptr->threshold);
+ if (err < 0) {
+ ret = err;
+ err = 0;
+ }
+ }
+
+therm_set_exit:
+ return ret;
+}
+
+static void cx_phase_ctrl_notify(struct therm_threshold *trig_thresh)
+{
+ static uint32_t cx_sens_status;
+ int ret = 0;
+
+ if (!cx_phase_ctrl_enabled)
+ return;
+
+ if (trig_thresh->trip_triggered < 0)
+ goto cx_phase_ctrl_exit;
+
+ mutex_lock(&cx_mutex);
+ pr_debug("sensor:%d reached %s thresh for CX\n",
+ tsens_id_map[trig_thresh->sensor_id],
+ (trig_thresh->trip_triggered == THERMAL_TRIP_CONFIGURABLE_HI) ?
+ "hot critical" : "warm");
+
+ switch (trig_thresh->trip_triggered) {
+ case THERMAL_TRIP_CONFIGURABLE_HI:
+ cx_sens_status |= BIT(trig_thresh->sensor_id);
+ break;
+ case THERMAL_TRIP_CONFIGURABLE_LOW:
+ if (cx_sens_status & BIT(trig_thresh->sensor_id))
+ cx_sens_status ^= BIT(trig_thresh->sensor_id);
+ break;
+ default:
+ pr_err("Unsupported trip type\n");
+ goto cx_phase_unlock_exit;
+ break;
+ }
+
+ if ((cx_sens_status && (curr_cx_band == MSM_HOT_CRITICAL)) ||
+ (!cx_sens_status && (curr_cx_band == MSM_WARM)))
+ goto cx_phase_unlock_exit;
+ ret = send_temperature_band(MSM_CX_PHASE_CTRL, (cx_sens_status) ?
+ MSM_HOT_CRITICAL : MSM_WARM);
+ if (!ret)
+ curr_cx_band = (cx_sens_status) ? MSM_HOT_CRITICAL : MSM_WARM;
+
+cx_phase_unlock_exit:
+ mutex_unlock(&cx_mutex);
+cx_phase_ctrl_exit:
+ sensor_mgr_set_threshold(trig_thresh->sensor_id,
+ trig_thresh->threshold);
+ return;
+}
+
+static void gfx_phase_ctrl_notify(struct therm_threshold *trig_thresh)
+{
+ uint32_t new_req_band = curr_gfx_band;
+ int ret = 0;
+
+ if (!gfx_warm_phase_ctrl_enabled && !gfx_crit_phase_ctrl_enabled)
+ return;
+
+ if (trig_thresh->trip_triggered < 0)
+ goto gfx_phase_ctrl_exit;
+
+ mutex_lock(&gfx_mutex);
+ if (gfx_crit_phase_ctrl_enabled) {
+ switch (
+ thresh[MSM_GFX_PHASE_CTRL_HOT].thresh_list->trip_triggered) {
+ case THERMAL_TRIP_CONFIGURABLE_HI:
+ new_req_band = MSM_HOT_CRITICAL;
+ pr_debug(
+ "sensor:%d reached hot critical thresh for GFX\n",
+ tsens_id_map[trig_thresh->sensor_id]);
+ goto notify_new_band;
+ break;
+ case THERMAL_TRIP_CONFIGURABLE_LOW:
+ new_req_band = MSM_WARM;
+ pr_debug("sensor:%d reached warm thresh for GFX\n",
+ tsens_id_map[trig_thresh->sensor_id]);
+ goto notify_new_band;
+ break;
+ default:
+ break;
+ }
+ }
+ if (gfx_warm_phase_ctrl_enabled) {
+ switch (
+ thresh[MSM_GFX_PHASE_CTRL_WARM].thresh_list->trip_triggered) {
+ case THERMAL_TRIP_CONFIGURABLE_HI:
+ new_req_band = MSM_WARM;
+ pr_debug("sensor:%d reached warm thresh for GFX\n",
+ tsens_id_map[trig_thresh->sensor_id]);
+ goto notify_new_band;
+ break;
+ case THERMAL_TRIP_CONFIGURABLE_LOW:
+ new_req_band = MSM_NORMAL;
+ pr_debug("sensor:%d reached normal thresh for GFX\n",
+ tsens_id_map[trig_thresh->sensor_id]);
+ goto notify_new_band;
+ break;
+ default:
+ break;
+ }
+ }
+
+notify_new_band:
+ if (new_req_band != curr_gfx_band) {
+ ret = send_temperature_band(MSM_GFX_PHASE_CTRL, new_req_band);
+ if (!ret)
+ curr_gfx_band = new_req_band;
+ }
+ mutex_unlock(&gfx_mutex);
+gfx_phase_ctrl_exit:
+ switch (curr_gfx_band) {
+ case MSM_HOT_CRITICAL:
+ if (gfx_crit_phase_ctrl_enabled)
+ therm_set_threshold(&thresh[MSM_GFX_PHASE_CTRL_HOT]);
+ break;
+ case MSM_NORMAL:
+ if (gfx_warm_phase_ctrl_enabled)
+ therm_set_threshold(&thresh[MSM_GFX_PHASE_CTRL_WARM]);
+ break;
+ case MSM_WARM:
+ default:
+ if (gfx_crit_phase_ctrl_enabled)
+ therm_set_threshold(&thresh[MSM_GFX_PHASE_CTRL_HOT]);
+ if (gfx_warm_phase_ctrl_enabled)
+ therm_set_threshold(&thresh[MSM_GFX_PHASE_CTRL_WARM]);
+ break;
+ }
+ return;
+}
+
+static void vdd_restriction_notify(struct therm_threshold *trig_thresh)
+{
+ int ret = 0;
+ static uint32_t vdd_sens_status;
+
+ if (!vdd_rstr_enabled)
+ return;
+ if (!trig_thresh) {
+ pr_err("Invalid input\n");
+ return;
+ }
+ if (trig_thresh->trip_triggered < 0)
+ goto set_and_exit;
+
+ mutex_lock(&vdd_rstr_mutex);
+ pr_debug("sensor:%d reached %s thresh for Vdd restriction\n",
+ tsens_id_map[trig_thresh->sensor_id],
+ (trig_thresh->trip_triggered == THERMAL_TRIP_CONFIGURABLE_HI) ?
+ "high" : "low");
+ switch (trig_thresh->trip_triggered) {
+ case THERMAL_TRIP_CONFIGURABLE_HI:
+ if (vdd_sens_status & BIT(trig_thresh->sensor_id))
+ vdd_sens_status ^= BIT(trig_thresh->sensor_id);
+ break;
+ case THERMAL_TRIP_CONFIGURABLE_LOW:
+ vdd_sens_status |= BIT(trig_thresh->sensor_id);
+ break;
+ default:
+ pr_err("Unsupported trip type\n");
+ goto unlock_and_exit;
+ break;
+ }
+
+ ret = vdd_restriction_apply_all((vdd_sens_status) ? 1 : 0);
+ if (ret) {
+ pr_err("%s vdd rstr votlage for all failed\n",
+ (vdd_sens_status) ?
+ "Enable" : "Disable");
+ goto unlock_and_exit;
+ }
+
+unlock_and_exit:
+ mutex_unlock(&vdd_rstr_mutex);
+set_and_exit:
+ sensor_mgr_set_threshold(trig_thresh->sensor_id,
+ trig_thresh->threshold);
+ return;
+}
+
+static void ocr_notify(struct therm_threshold *trig_thresh)
+{
+ int ret = 0;
+ static uint32_t ocr_sens_status;
+
+ if (!ocr_enabled)
+ return;
+ if (!trig_thresh) {
+ pr_err("Invalid input\n");
+ return;
+ }
+ if (trig_thresh->trip_triggered < 0)
+ goto set_and_exit;
+
+ mutex_lock(&ocr_mutex);
+ pr_debug("sensor%d reached %d thresh for Optimum current request\n",
+ tsens_id_map[trig_thresh->sensor_id],
+ trig_thresh->trip_triggered);
+ switch (trig_thresh->trip_triggered) {
+ case THERMAL_TRIP_CONFIGURABLE_HI:
+ ocr_sens_status |= BIT(trig_thresh->sensor_id);
+ break;
+ case THERMAL_TRIP_CONFIGURABLE_LOW:
+ if (ocr_sens_status & BIT(trig_thresh->sensor_id))
+ ocr_sens_status ^= BIT(trig_thresh->sensor_id);
+ break;
+ default:
+ pr_err("Unsupported trip type\n");
+ goto unlock_and_exit;
+ break;
+ }
+
+ ret = ocr_set_mode_all(ocr_sens_status ? OPTIMUM_CURRENT_MAX :
+ OPTIMUM_CURRENT_MIN);
+ if (ret) {
+ pr_err("%s Optimum current mode for all failed. err:%d\n",
+ (ocr_sens_status) ?
+ "Enable" : "Disable", ret);
+ goto unlock_and_exit;
+ }
+
+unlock_and_exit:
+ mutex_unlock(&ocr_mutex);
+set_and_exit:
+ sensor_mgr_set_threshold(trig_thresh->sensor_id,
+ trig_thresh->threshold);
+ return;
+}
+
+static __ref int do_thermal_monitor(void *data)
+{
+ int ret = 0, j;
+ struct therm_threshold *sensor_list;
+ struct threshold_info *thresholds = NULL;
+
+ while (!kthread_should_stop()) {
+ while (wait_for_completion_interruptible(
+ &thermal_monitor_complete) != 0)
+ ;
+ reinit_completion(&thermal_monitor_complete);
+
+ mutex_lock(&threshold_mutex);
+ list_for_each_entry(thresholds, &thresholds_list, list_ptr) {
+ if (!thresholds->thresh_triggered)
+ continue;
+ thresholds->thresh_triggered = false;
+ for (j = 0; j < thresholds->thresh_ct; j++) {
+ sensor_list = &thresholds->thresh_list[j];
+ if (sensor_list->trip_triggered < 0)
+ continue;
+ sensor_list->notify(sensor_list);
+ sensor_list->trip_triggered = -1;
+ }
+ }
+ mutex_unlock(&threshold_mutex);
+ }
+ return ret;
+}
+
+static int vdd_rstr_apss_freq_dev_init(void)
+{
+ int idx = 0, ret = 0;
+ char device_str[DEVM_NAME_MAX] = "";
+ struct rail *r = NULL;
+
+ for (idx = 0; idx < rails_cnt; idx++) {
+ if (rails[idx].freq_req) {
+ r = &rails[idx];
+ break;
+ }
+ }
+ if (!r) {
+ pr_err("APSS rail not initialized\n");
+ return -ENODEV;
+ }
+
+ for_each_possible_cpu(idx) {
+ if (r->device_handle[idx])
+ continue;
+ snprintf(device_str, DEVM_NAME_MAX, CPU_DEVICE, idx);
+ r->device_handle[idx]
+ = devmgr_register_mitigation_client(
+ &msm_thermal_info.pdev->dev,
+ device_str, NULL);
+ if (IS_ERR(r->device_handle[idx])) {
+ ret = PTR_ERR(r->device_handle[idx]);
+ pr_err("Error registering %s handle. err:%d\n",
+ device_str, ret);
+ goto freq_init_exit;
+ }
+ r->request[idx].freq.max_freq = CPUFREQ_MAX_NO_MITIGATION;
+ r->request[idx].freq.min_freq = CPUFREQ_MIN_NO_MITIGATION;
+ }
+
+freq_init_exit:
+ if (ret) {
+ for_each_possible_cpu(idx) {
+ devmgr_unregister_mitigation_client(
+ &msm_thermal_info.pdev->dev,
+ r->device_handle[idx]);
+ r->device_handle[idx] = NULL;
+ }
+ }
+ return ret;
+}
+
+static int convert_to_zone_id(struct threshold_info *thresh_inp)
+{
+ int ret = 0, i, zone_id;
+ struct therm_threshold *thresh_array;
+
+ if (!thresh_inp) {
+ pr_err("Invalid input\n");
+ ret = -EINVAL;
+ goto convert_to_exit;
+ }
+ thresh_array = thresh_inp->thresh_list;
+
+ for (i = 0; i < thresh_inp->thresh_ct; i++) {
+ char tsens_name[TSENS_NAME_MAX] = "";
+
+ if (thresh_array[i].id_type == THERM_ZONE_ID)
+ continue;
+ snprintf(tsens_name, TSENS_NAME_MAX, TSENS_NAME_FORMAT,
+ thresh_array[i].sensor_id);
+ zone_id = sensor_get_id(tsens_name);
+ if (zone_id < 0) {
+ pr_err("Error getting zone id for %s. err:%d\n",
+ tsens_name, ret);
+ ret = zone_id;
+ goto convert_to_exit;
+ }
+ thresh_array[i].sensor_id = zone_id;
+ thresh_array[i].id_type = THERM_ZONE_ID;
+ }
+
+convert_to_exit:
+ return ret;
+}
+
+int sensor_mgr_convert_id_and_set_threshold(struct threshold_info *thresh_inp)
+{
+ int ret = 0;
+
+ if (!thresh_inp) {
+ pr_err("Invalid input\n");
+ ret = -EINVAL;
+ goto therm_set_exit;
+ }
+ ret = convert_to_zone_id(thresh_inp);
+ if (ret)
+ goto therm_set_exit;
+ ret = therm_set_threshold(thresh_inp);
+
+therm_set_exit:
+ return ret;
+}
+
+static void thermal_monitor_init(void)
+{
+ if (thermal_monitor_task)
+ return;
+
+ init_completion(&thermal_monitor_complete);
+ thermal_monitor_task = kthread_run(do_thermal_monitor, NULL,
+ "msm_thermal:therm_monitor");
+ if (IS_ERR(thermal_monitor_task)) {
+ pr_err("Failed to create thermal monitor thread. err:%ld\n",
+ PTR_ERR(thermal_monitor_task));
+ goto init_exit;
+ }
+
+ if (therm_reset_enabled &&
+ !(convert_to_zone_id(&thresh[MSM_THERM_RESET])))
+ therm_set_threshold(&thresh[MSM_THERM_RESET]);
+
+ if ((cx_phase_ctrl_enabled) &&
+ !(convert_to_zone_id(&thresh[MSM_CX_PHASE_CTRL_HOT])))
+ therm_set_threshold(&thresh[MSM_CX_PHASE_CTRL_HOT]);
+
+ if (vdd_rstr_enabled) {
+ if (vdd_rstr_apss_freq_dev_init())
+ pr_err("vdd APSS mitigation device init failed\n");
+ else if (!(convert_to_zone_id(&thresh[MSM_VDD_RESTRICTION])))
+ therm_set_threshold(&thresh[MSM_VDD_RESTRICTION]);
+ }
+
+ if ((gfx_warm_phase_ctrl_enabled) &&
+ !(convert_to_zone_id(&thresh[MSM_GFX_PHASE_CTRL_WARM]))) {
+ therm_set_threshold(&thresh[MSM_GFX_PHASE_CTRL_WARM]);
+ }
+
+ if ((gfx_crit_phase_ctrl_enabled) &&
+ !(convert_to_zone_id(&thresh[MSM_GFX_PHASE_CTRL_HOT]))) {
+ therm_set_threshold(&thresh[MSM_GFX_PHASE_CTRL_HOT]);
+ }
+
+ if ((ocr_enabled) &&
+ !(convert_to_zone_id(&thresh[MSM_OCR])))
+ therm_set_threshold(&thresh[MSM_OCR]);
+
+ if (vdd_mx_enabled &&
+ !(convert_to_zone_id(&thresh[MSM_VDD_MX_RESTRICTION])))
+ therm_set_threshold(&thresh[MSM_VDD_MX_RESTRICTION]);
+
+init_exit:
+ return;
+}
+
+static int msm_thermal_notify(enum thermal_trip_type type, int temp, void *data)
+{
+ struct therm_threshold *thresh_data = (struct therm_threshold *)data;
+
+ if (thermal_monitor_task) {
+ thresh_data->trip_triggered = type;
+ thresh_data->parent->thresh_triggered = true;
+ complete(&thermal_monitor_complete);
+ } else {
+ pr_err("Thermal monitor task is not initialized\n");
+ }
+ return 0;
+}
+
+int sensor_mgr_init_threshold(struct threshold_info *thresh_inp,
+ int sensor_id, int32_t high_temp, int32_t low_temp,
+ void (*callback)(struct therm_threshold *))
+{
+ int ret = 0, i;
+ struct therm_threshold *thresh_ptr;
+
+ if (!callback || !thresh_inp
+ || sensor_id == -ENODEV) {
+ pr_err("Invalid input\n");
+ ret = -EINVAL;
+ goto init_thresh_exit;
+ }
+ if (thresh_inp->thresh_list) {
+ pr_info("threshold id already initialized\n");
+ goto init_thresh_exit;
+ }
+
+ thresh_inp->thresh_ct = (sensor_id == MONITOR_ALL_TSENS) ?
+ max_tsens_num : 1;
+ thresh_inp->thresh_triggered = false;
+ thresh_inp->thresh_list = kzalloc(sizeof(struct therm_threshold) *
+ thresh_inp->thresh_ct, GFP_KERNEL);
+ if (!thresh_inp->thresh_list) {
+ pr_err("kzalloc failed for thresh\n");
+ ret = -ENOMEM;
+ goto init_thresh_exit;
+ }
+
+ thresh_ptr = thresh_inp->thresh_list;
+ if (sensor_id == MONITOR_ALL_TSENS) {
+ for (i = 0; i < max_tsens_num; i++) {
+ thresh_ptr[i].sensor_id = tsens_id_map[i];
+ thresh_ptr[i].id_type = THERM_TSENS_ID;
+ thresh_ptr[i].notify = callback;
+ thresh_ptr[i].trip_triggered = -1;
+ thresh_ptr[i].parent = thresh_inp;
+ thresh_ptr[i].threshold[0].temp =
+ high_temp * tsens_scaling_factor;
+ thresh_ptr[i].threshold[0].trip =
+ THERMAL_TRIP_CONFIGURABLE_HI;
+ thresh_ptr[i].threshold[1].temp =
+ low_temp * tsens_scaling_factor;
+ thresh_ptr[i].threshold[1].trip =
+ THERMAL_TRIP_CONFIGURABLE_LOW;
+ thresh_ptr[i].threshold[0].notify =
+ thresh_ptr[i].threshold[1].notify = msm_thermal_notify;
+ thresh_ptr[i].threshold[0].data =
+ thresh_ptr[i].threshold[1].data =
+ (void *)&thresh_ptr[i];
+ }
+ } else {
+ thresh_ptr->sensor_id = sensor_id;
+ thresh_ptr->id_type = THERM_TSENS_ID;
+ thresh_ptr->notify = callback;
+ thresh_ptr->trip_triggered = -1;
+ thresh_ptr->parent = thresh_inp;
+ thresh_ptr->threshold[0].temp = high_temp * tsens_scaling_factor;
+ thresh_ptr->threshold[0].trip =
+ THERMAL_TRIP_CONFIGURABLE_HI;
+ thresh_ptr->threshold[1].temp = low_temp * tsens_scaling_factor;
+ thresh_ptr->threshold[1].trip =
+ THERMAL_TRIP_CONFIGURABLE_LOW;
+ thresh_ptr->threshold[0].notify =
+ thresh_ptr->threshold[1].notify = msm_thermal_notify;
+ thresh_ptr->threshold[0].data =
+ thresh_ptr->threshold[1].data = (void *)thresh_ptr;
+ }
+ mutex_lock(&threshold_mutex);
+ list_add_tail(&thresh_inp->list_ptr, &thresholds_list);
+ mutex_unlock(&threshold_mutex);
+
+init_thresh_exit:
+ return ret;
+}
+
+void sensor_mgr_disable_threshold(struct threshold_info *thresh_inp)
+{
+ int i;
+ struct therm_threshold *thresh_ptr;
+
+ mutex_lock(&threshold_mutex);
+ for (i = 0; i < thresh_inp->thresh_ct; i++) {
+ thresh_ptr = &thresh_inp->thresh_list[i];
+ thresh_ptr->trip_triggered = -1;
+ sensor_cancel_trip(thresh_ptr->sensor_id,
+ &thresh_ptr->threshold[0]);
+ sensor_cancel_trip(thresh_ptr->sensor_id,
+ &thresh_ptr->threshold[1]);
+ }
+ thresh_inp->thresh_triggered = false;
+ mutex_unlock(&threshold_mutex);
+}
+
+void sensor_mgr_remove_threshold(struct threshold_info *thresh_inp)
+{
+ sensor_mgr_disable_threshold(thresh_inp);
+ mutex_lock(&threshold_mutex);
+ kfree(thresh_inp->thresh_list);
+ thresh_inp->thresh_list = NULL;
+ thresh_inp->thresh_ct = 0;
+ list_del(&thresh_inp->list_ptr);
+ mutex_unlock(&threshold_mutex);
+}
+
+static int msm_thermal_add_gfx_nodes(void)
+{
+ struct kobject *module_kobj = NULL;
+ struct kobject *gfx_kobj = NULL;
+ int ret = 0;
+
+ if (!gfx_warm_phase_ctrl_enabled && !gfx_crit_phase_ctrl_enabled)
+ return -EINVAL;
+
+ module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
+ if (!module_kobj) {
+ pr_err("cannot find kobject\n");
+ ret = -ENOENT;
+ goto gfx_node_exit;
+ }
+
+ gfx_kobj = kobject_create_and_add("gfx_phase_ctrl", module_kobj);
+ if (!gfx_kobj) {
+ pr_err("cannot create gfx kobject\n");
+ ret = -ENOMEM;
+ goto gfx_node_exit;
+ }
+
+ gfx_attr_gp.attrs = kzalloc(sizeof(struct attribute *) * 2, GFP_KERNEL);
+ if (!gfx_attr_gp.attrs) {
+ pr_err("kzalloc failed\n");
+ ret = -ENOMEM;
+ goto gfx_node_fail;
+ }
+
+ PHASE_RW_ATTR(gfx, temp_band, gfx_mode_attr, 0, gfx_attr_gp);
+ gfx_attr_gp.attrs[1] = NULL;
+
+ ret = sysfs_create_group(gfx_kobj, &gfx_attr_gp);
+ if (ret) {
+ pr_err("cannot create GFX attribute group. err:%d\n", ret);
+ goto gfx_node_fail;
+ }
+
+gfx_node_fail:
+ if (ret) {
+ kobject_put(gfx_kobj);
+ kfree(gfx_attr_gp.attrs);
+ gfx_attr_gp.attrs = NULL;
+ }
+gfx_node_exit:
+ return ret;
+}
+
+static int msm_thermal_add_cx_nodes(void)
+{
+ struct kobject *module_kobj = NULL;
+ struct kobject *cx_kobj = NULL;
+ int ret = 0;
+
+ if (!cx_phase_ctrl_enabled)
+ return -EINVAL;
+
+ module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
+ if (!module_kobj) {
+ pr_err("cannot find kobject\n");
+ ret = -ENOENT;
+ goto cx_node_exit;
+ }
+
+ cx_kobj = kobject_create_and_add("cx_phase_ctrl", module_kobj);
+ if (!cx_kobj) {
+ pr_err("cannot create cx kobject\n");
+ ret = -ENOMEM;
+ goto cx_node_exit;
+ }
+
+ cx_attr_gp.attrs = kzalloc(sizeof(struct attribute *) * 2, GFP_KERNEL);
+ if (!cx_attr_gp.attrs) {
+ pr_err("kzalloc failed\n");
+ ret = -ENOMEM;
+ goto cx_node_fail;
+ }
+
+ PHASE_RW_ATTR(cx, temp_band, cx_mode_attr, 0, cx_attr_gp);
+ cx_attr_gp.attrs[1] = NULL;
+
+ ret = sysfs_create_group(cx_kobj, &cx_attr_gp);
+ if (ret) {
+ pr_err("cannot create CX attribute group. err:%d\n", ret);
+ goto cx_node_fail;
+ }
+
+cx_node_fail:
+ if (ret) {
+ kobject_put(cx_kobj);
+ kfree(cx_attr_gp.attrs);
+ cx_attr_gp.attrs = NULL;
+ }
+cx_node_exit:
+ return ret;
+}
+
+/*
+ * We will reset the cpu frequencies limits here. The core online/offline
+ * status will be carried over to the process stopping the msm_thermal, as
+ * we dont want to online a core and bring in the thermal issues.
+ */
+static void __ref disable_msm_thermal(void)
+{
+ uint32_t cpu = 0;
+
+ /* make sure check_temp is no longer running */
+ cancel_delayed_work_sync(&check_temp_work);
+
+ get_online_cpus();
+ for_each_possible_cpu(cpu) {
+ if (cpus[cpu].limited_max_freq == UINT_MAX &&
+ cpus[cpu].limited_min_freq == 0)
+ continue;
+ pr_info("Max frequency reset for CPU%d\n", cpu);
+ cpus[cpu].limited_max_freq = UINT_MAX;
+ cpus[cpu].vdd_max_freq = UINT_MAX;
+ cpus[cpu].limited_min_freq = 0;
+ if (!SYNC_CORE(cpu))
+ update_cpu_freq(cpu);
+ }
+ update_cluster_freq();
+ put_online_cpus();
+}
+
+static void interrupt_mode_init(void)
+{
+ if (!msm_thermal_probed) {
+ interrupt_mode_enable = true;
+ return;
+ }
+ if (polling_enabled) {
+ polling_enabled = 0;
+ create_sensor_zone_id_map();
+ disable_msm_thermal();
+ hotplug_init();
+ freq_mitigation_init();
+ thermal_monitor_init();
+ msm_thermal_add_cx_nodes();
+ msm_thermal_add_gfx_nodes();
+ }
+}
+
+static int __ref set_enabled(const char *val, const struct kernel_param *kp)
+{
+ int ret = 0;
+
+ ret = param_set_bool(val, kp);
+ if (!enabled)
+ interrupt_mode_init();
+ else
+ pr_info("no action for enabled = %d\n",
+ enabled);
+
+ pr_info("enabled = %d\n", enabled);
+
+ return ret;
+}
+
+static struct kernel_param_ops module_ops = {
+ .set = set_enabled,
+ .get = param_get_bool,
+};
+
+module_param_cb(enabled, &module_ops, &enabled, 0644);
+MODULE_PARM_DESC(enabled, "enforce thermal limit on cpu");
+
+static ssize_t show_cc_enabled(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", core_control_enabled);
+}
+
+static ssize_t __ref store_cc_enabled(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ int ret = 0;
+ int val = 0;
+ uint32_t cpu = 0;
+
+ if (!mitigation) {
+ pr_err("Thermal Mitigations disabled.\n");
+ goto done_store_cc;
+ }
+
+ ret = kstrtoint(buf, 10, &val);
+ if (ret) {
+ pr_err("Invalid input %s. err:%d\n", buf, ret);
+ goto done_store_cc;
+ }
+
+ if (core_control_enabled == !!val)
+ goto done_store_cc;
+
+ core_control_enabled = !!val;
+ if (core_control_enabled) {
+ pr_info("Core control enabled\n");
+ cpus_previously_online_update();
+ register_cpu_notifier(&msm_thermal_cpu_notifier);
+ /*
+ * Re-evaluate thermal core condition, update current status
+ * and set threshold for all cpus.
+ */
+ hotplug_init_cpu_offlined();
+ mutex_lock(&core_control_mutex);
+ update_offline_cores(cpus_offlined);
+ if (hotplug_enabled) {
+ for_each_possible_cpu(cpu) {
+ if (!(msm_thermal_info.core_control_mask &
+ BIT(cpus[cpu].cpu)))
+ continue;
+ sensor_mgr_set_threshold(cpus[cpu].sensor_id,
+ &cpus[cpu].threshold[HOTPLUG_THRESHOLD_HIGH]);
+ }
+ }
+ mutex_unlock(&core_control_mutex);
+ } else {
+ pr_info("Core control disabled\n");
+ unregister_cpu_notifier(&msm_thermal_cpu_notifier);
+ }
+
+done_store_cc:
+ return count;
+}
+
+static ssize_t show_cpus_offlined(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", cpus_offlined);
+}
+
+static ssize_t __ref store_cpus_offlined(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ int ret = 0;
+ uint32_t val = 0;
+ uint32_t cpu;
+
+ if (!mitigation) {
+ pr_err("Thermal Mitigations disabled.\n");
+ goto done_cc;
+ }
+ mutex_lock(&core_control_mutex);
+ ret = kstrtouint(buf, 10, &val);
+ if (ret) {
+ pr_err("Invalid input %s. err:%d\n", buf, ret);
+ goto done_cc;
+ }
+
+ if (polling_enabled) {
+ pr_err("Ignoring request; polling thread is enabled.\n");
+ goto done_cc;
+ }
+
+ for_each_possible_cpu(cpu) {
+ if (!(msm_thermal_info.core_control_mask & BIT(cpu)))
+ continue;
+ cpus[cpu].user_offline = !!(val & BIT(cpu));
+ pr_debug("\"%s\"(PID:%i) requests %s CPU%d.\n", current->comm,
+ current->pid, (cpus[cpu].user_offline) ? "offline" :
+ "online", cpu);
+ }
+
+ if (hotplug_task)
+ complete(&hotplug_notify_complete);
+ else
+ pr_err("Hotplug task is not initialized\n");
+done_cc:
+ mutex_unlock(&core_control_mutex);
+ return count;
+}
+
+static __refdata struct kobj_attribute cc_enabled_attr =
+__ATTR(enabled, 0644, show_cc_enabled, store_cc_enabled);
+
+static __refdata struct kobj_attribute cpus_offlined_attr =
+__ATTR(cpus_offlined, 0644, show_cpus_offlined, store_cpus_offlined);
+
+static __refdata struct attribute *cc_attrs[] = {
+ &cc_enabled_attr.attr,
+ &cpus_offlined_attr.attr,
+ NULL,
+};
+
+static __refdata struct attribute_group cc_attr_group = {
+ .attrs = cc_attrs,
+};
+static __init int msm_thermal_add_cc_nodes(void)
+{
+ struct kobject *module_kobj = NULL;
+ int ret = 0;
+
+ module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
+ if (!module_kobj) {
+ pr_err("cannot find kobject\n");
+ ret = -ENOENT;
+ goto done_cc_nodes;
+ }
+
+ cc_kobj = kobject_create_and_add("core_control", module_kobj);
+ if (!cc_kobj) {
+ pr_err("cannot create core control kobj\n");
+ ret = -ENOMEM;
+ goto done_cc_nodes;
+ }
+
+ ret = sysfs_create_group(cc_kobj, &cc_attr_group);
+ if (ret) {
+ pr_err("cannot create sysfs group. err:%d\n", ret);
+ goto done_cc_nodes;
+ }
+
+ return 0;
+
+done_cc_nodes:
+ if (cc_kobj)
+ kobject_del(cc_kobj);
+ return ret;
+}
+
+static ssize_t show_mx_enabled(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", vdd_mx_enabled);
+}
+
+static ssize_t __ref store_mx_enabled(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ int ret = 0;
+ int val = 0;
+
+ ret = kstrtoint(buf, 10, &val);
+ if (ret) {
+ pr_err("Invalid input %s\n", buf);
+ goto done_store_mx;
+ }
+
+ if (vdd_mx_enabled == !!val)
+ goto done_store_mx;
+
+ vdd_mx_enabled = !!val;
+
+ mutex_lock(&vdd_mx_mutex);
+ if (!vdd_mx_enabled)
+ remove_vdd_mx_restriction();
+ else if (!(convert_to_zone_id(&thresh[MSM_VDD_MX_RESTRICTION])))
+ therm_set_threshold(&thresh[MSM_VDD_MX_RESTRICTION]);
+ mutex_unlock(&vdd_mx_mutex);
+
+done_store_mx:
+ return count;
+}
+
+static __init int msm_thermal_add_mx_nodes(void)
+{
+ struct kobject *module_kobj = NULL;
+ int ret = 0;
+
+ if (!vdd_mx_enabled)
+ return -EINVAL;
+
+ module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
+ if (!module_kobj) {
+ pr_err("cannot find kobject for module\n");
+ ret = -ENOENT;
+ goto done_mx_nodes;
+ }
+
+ mx_kobj = kobject_create_and_add("vdd_mx", module_kobj);
+ if (!mx_kobj) {
+ pr_err("cannot create mx restriction kobj\n");
+ ret = -ENOMEM;
+ goto done_mx_nodes;
+ }
+
+ mx_attr_group.attrs = kzalloc(sizeof(struct attribute *) * 2,
+ GFP_KERNEL);
+ if (!mx_attr_group.attrs) {
+ ret = -ENOMEM;
+ pr_err("cannot allocate memory for mx_attr_group.attrs");
+ goto done_mx_nodes;
+ }
+
+ MX_RW_ATTR(mx_enabled_attr, enabled, mx_attr_group);
+ mx_attr_group.attrs[1] = NULL;
+
+ ret = sysfs_create_group(mx_kobj, &mx_attr_group);
+ if (ret) {
+ pr_err("cannot create group\n");
+ goto done_mx_nodes;
+ }
+
+done_mx_nodes:
+ if (ret) {
+ if (mx_kobj)
+ kobject_del(mx_kobj);
+ kfree(mx_attr_group.attrs);
+ }
+ return ret;
+}
+
+static void msm_thermal_panic_notifier_init(struct device *dev)
+{
+ int i;
+
+ tsens_temp_at_panic = devm_kzalloc(dev,
+ sizeof(long) * max_tsens_num,
+ GFP_KERNEL);
+ if (!tsens_temp_at_panic) {
+ pr_err("kzalloc failed\n");
+ return;
+ }
+
+ for (i = 0; i < max_tsens_num; i++)
+ tsens_temp_at_panic[i] = LONG_MIN;
+
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &msm_thermal_panic_notifier);
+}
+
+int msm_thermal_pre_init(struct device *dev)
+{
+ int ret = 0;
+
+ if (tsens_is_ready() <= 0) {
+ pr_err("Tsens driver is not ready yet\n");
+ return -EPROBE_DEFER;
+ }
+
+ ret = tsens_get_max_sensor_num(&max_tsens_num);
+ if (ret < 0) {
+ pr_err("failed to get max sensor number, err:%d\n", ret);
+ return ret;
+ }
+
+ if (create_sensor_id_map(dev)) {
+ pr_err("Creating sensor id map failed\n");
+ ret = -EINVAL;
+ goto pre_init_exit;
+ }
+
+ if (!thresh) {
+ thresh = kzalloc(
+ sizeof(struct threshold_info) * MSM_LIST_MAX_NR,
+ GFP_KERNEL);
+ if (!thresh) {
+ pr_err("kzalloc failed\n");
+ ret = -ENOMEM;
+ goto pre_init_exit;
+ }
+ memset(thresh, 0, sizeof(struct threshold_info) *
+ MSM_LIST_MAX_NR);
+ }
+ mit_config = devm_kzalloc(dev,
+ sizeof(struct msm_thermal_debugfs_thresh_config)
+ * (MSM_LIST_MAX_NR + MAX_CPU_CONFIG), GFP_KERNEL);
+ if (!mit_config) {
+ pr_err("kzalloc failed\n");
+ ret = -ENOMEM;
+ goto pre_init_exit;
+ }
+
+pre_init_exit:
+ return ret;
+}
+
+static int devmgr_devices_init(struct platform_device *pdev)
+{
+ int ret = 0;
+ uint32_t cpu;
+ struct device_manager_data *dev_mgr = NULL;
+
+ devices = devm_kzalloc(&pdev->dev,
+ sizeof(struct devmgr_devices),
+ GFP_KERNEL);
+ if (!devices) {
+ pr_err("Malloc failed for devmgr devices\n");
+ ret = -ENOMEM;
+ goto device_exit;
+ }
+ if (num_possible_cpus() > 1) {
+ /* Add hotplug device */
+ dev_mgr = devm_kzalloc(&pdev->dev,
+ sizeof(struct device_manager_data),
+ GFP_KERNEL);
+ if (!dev_mgr) {
+ pr_err("Malloc failed for hotplug device\n");
+ ret = -ENOMEM;
+ goto device_exit;
+ }
+ snprintf(dev_mgr->device_name,
+ TSENS_NAME_MAX, HOTPLUG_DEVICE);
+ dev_mgr->request_validate =
+ devmgr_hotplug_client_request_validate_and_update;
+ dev_mgr->update = devmgr_client_hotplug_update;
+ HOTPLUG_NO_MITIGATION(&dev_mgr->active_req.offline_mask);
+ mutex_init(&dev_mgr->clnt_lock);
+ INIT_LIST_HEAD(&dev_mgr->client_list);
+ list_add_tail(&dev_mgr->dev_ptr, &devices_list);
+ devices->hotplug_dev = dev_mgr;
+ }
+ /* Add cpu devices */
+ for_each_possible_cpu(cpu) {
+ dev_mgr = devm_kzalloc(&pdev->dev,
+ sizeof(struct device_manager_data),
+ GFP_KERNEL);
+ if (!dev_mgr) {
+ pr_err("Malloc failed for cpu%d device\n", cpu);
+ ret = -ENOMEM;
+ goto device_exit;
+ }
+ snprintf(dev_mgr->device_name, TSENS_NAME_MAX, CPU_DEVICE, cpu);
+ dev_mgr->request_validate =
+ devmgr_cpufreq_client_request_validate_and_update;
+ dev_mgr->update = devmgr_client_cpufreq_update;
+ dev_mgr->active_req.freq.max_freq = CPUFREQ_MAX_NO_MITIGATION;
+ dev_mgr->active_req.freq.min_freq = CPUFREQ_MIN_NO_MITIGATION;
+ mutex_init(&dev_mgr->clnt_lock);
+ INIT_LIST_HEAD(&dev_mgr->client_list);
+ list_add_tail(&dev_mgr->dev_ptr, &devices_list);
+ devices->cpufreq_dev[cpu] = dev_mgr;
+ }
+device_exit:
+ if (ret) {
+ if (devices) {
+ if (devices->hotplug_dev)
+ devm_kfree(&pdev->dev,
+ devices->hotplug_dev);
+ for_each_possible_cpu(cpu) {
+ if (devices->cpufreq_dev[cpu])
+ devm_kfree(&pdev->dev,
+ devices->cpufreq_dev[cpu]);
+ }
+ }
+ }
+ return ret;
+}
+
+static void msm_thermal_init_cpu_mit(enum cpu_mit_type cpu_mit)
+{
+ uint32_t cpu;
+
+ for_each_possible_cpu(cpu) {
+ cpus[cpu].cpu = cpu;
+ if (cpu_mit & CPU_HOTPLUG_MITIGATION) {
+ cpus[cpu].offline = 0;
+ cpus[cpu].user_offline = 0;
+ cpus[cpu].hotplug_thresh_clear = false;
+ }
+ if (cpu_mit & CPU_FREQ_MITIGATION) {
+ cpus[cpu].max_freq = false;
+ cpus[cpu].user_max_freq = UINT_MAX;
+ cpus[cpu].shutdown_max_freq = UINT_MAX;
+ cpus[cpu].suspend_max_freq = UINT_MAX;
+ cpus[cpu].user_min_freq = 0;
+ cpus[cpu].limited_max_freq = UINT_MAX;
+ cpus[cpu].limited_min_freq = 0;
+ cpus[cpu].freq_thresh_clear = false;
+ }
+ }
+}
+
+int msm_thermal_init(struct msm_thermal_data *pdata)
+{
+ int ret = 0;
+
+ ret = devmgr_devices_init(pdata->pdev);
+ if (ret)
+ pr_err("cannot initialize devm devices. err:%d\n", ret);
+
+ msm_thermal_init_cpu_mit(CPU_FREQ_MITIGATION | CPU_HOTPLUG_MITIGATION);
+ BUG_ON(!pdata);
+ memcpy(&msm_thermal_info, pdata, sizeof(struct msm_thermal_data));
+
+ if (check_sensor_id(msm_thermal_info.sensor_id)) {
+ pr_err("Invalid sensor:%d for polling\n",
+ msm_thermal_info.sensor_id);
+ return -EINVAL;
+ }
+
+ enabled = 1;
+ polling_enabled = 1;
+ ret = cpufreq_register_notifier(&msm_thermal_cpufreq_notifier,
+ CPUFREQ_POLICY_NOTIFIER);
+ if (ret)
+ pr_err("cannot register cpufreq notifier. err:%d\n", ret);
+
+ register_reboot_notifier(&msm_thermal_reboot_notifier);
+ pm_notifier(msm_thermal_suspend_callback, 0);
+ INIT_DELAYED_WORK(&retry_hotplug_work, retry_hotplug);
+ INIT_DELAYED_WORK(&check_temp_work, check_temp);
+ schedule_delayed_work(&check_temp_work, 0);
+
+ if (num_possible_cpus() > 1) {
+ cpus_previously_online_update();
+ register_cpu_notifier(&msm_thermal_cpu_notifier);
+ }
+ msm_thermal_panic_notifier_init(&pdata->pdev->dev);
+
+ return ret;
+}
+
+static int ocr_reg_init(struct platform_device *pdev)
+{
+ int ret = 0;
+ int i, j;
+
+ for (i = 0; i < ocr_rail_cnt; i++) {
+ /* Check if vdd_restriction has already initialized any
+ * regualtor handle. If so use the same handle.*/
+ for (j = 0; j < rails_cnt; j++) {
+ if (!strcmp(ocr_rails[i].name, rails[j].name)) {
+ if (rails[j].reg == NULL)
+ break;
+ ocr_rails[i].phase_reg = rails[j].reg;
+ goto reg_init;
+ }
+
+ }
+ ocr_rails[i].phase_reg = devm_regulator_get(&pdev->dev,
+ ocr_rails[i].name);
+ if (IS_ERR_OR_NULL(ocr_rails[i].phase_reg)) {
+ ret = PTR_ERR(ocr_rails[i].phase_reg);
+ if (ret != -EPROBE_DEFER) {
+ pr_err("Could not get regulator: %s, err:%d\n",
+ ocr_rails[i].name, ret);
+ ocr_rails[i].phase_reg = NULL;
+ ocr_rails[i].mode = 0;
+ ocr_rails[i].init = 0;
+ }
+ return ret;
+ }
+reg_init:
+ ocr_rails[i].mode = OPTIMUM_CURRENT_MIN;
+ }
+ return ret;
+}
+
+static int vdd_restriction_reg_init(struct platform_device *pdev)
+{
+ int ret = 0;
+ int i;
+
+ for (i = 0; i < rails_cnt; i++) {
+ if (rails[i].freq_req == 1) {
+ usefreq |= BIT(i);
+ check_freq_table();
+ /*
+ * Restrict frequency by default until we have made
+ * our first temp reading
+ */
+ if (freq_table_get)
+ ret = vdd_restriction_apply_freq(&rails[i], 0);
+ else
+ pr_info("Defer vdd rstr freq init.\n");
+ } else {
+ rails[i].reg = devm_regulator_get(&pdev->dev,
+ rails[i].name);
+ if (IS_ERR_OR_NULL(rails[i].reg)) {
+ ret = PTR_ERR(rails[i].reg);
+ if (ret != -EPROBE_DEFER) {
+ pr_err( \
+ "could not get regulator: %s. err:%d\n",
+ rails[i].name, ret);
+ rails[i].reg = NULL;
+ rails[i].curr_level = -2;
+ return ret;
+ }
+ pr_info("Defer regulator %s probe\n",
+ rails[i].name);
+ return ret;
+ }
+ /*
+ * Restrict votlage by default until we have made
+ * our first temp reading
+ */
+ ret = vdd_restriction_apply_voltage(&rails[i], 0);
+ }
+ }
+
+ return ret;
+}
+
+static int psm_reg_init(struct platform_device *pdev)
+{
+ int ret = 0;
+ int i = 0;
+ int j = 0;
+
+ for (i = 0; i < psm_rails_cnt; i++) {
+ psm_rails[i].reg = rpm_regulator_get(&pdev->dev,
+ psm_rails[i].name);
+ if (IS_ERR_OR_NULL(psm_rails[i].reg)) {
+ ret = PTR_ERR(psm_rails[i].reg);
+ if (ret != -EPROBE_DEFER) {
+ pr_err("couldn't get rpm regulator %s. err%d\n",
+ psm_rails[i].name, ret);
+ psm_rails[i].reg = NULL;
+ goto psm_reg_exit;
+ }
+ pr_info("Defer regulator %s probe\n",
+ psm_rails[i].name);
+ return ret;
+ }
+ /* Apps default vote for PWM mode */
+ psm_rails[i].init = PMIC_PWM_MODE;
+ ret = rpm_regulator_set_mode(psm_rails[i].reg,
+ psm_rails[i].init);
+ if (ret) {
+ pr_err("Cannot set PMIC PWM mode. err:%d\n", ret);
+ return ret;
+ } else
+ psm_rails[i].mode = PMIC_PWM_MODE;
+ }
+
+ return ret;
+
+psm_reg_exit:
+ if (ret) {
+ for (j = 0; j < i; j++) {
+ if (psm_rails[j].reg != NULL)
+ rpm_regulator_put(psm_rails[j].reg);
+ }
+ }
+
+ return ret;
+}
+
+static ssize_t bucket_info_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ int ret = 0;
+ uint32_t val = 0;
+
+ ret = kstrtouint(buf, 10, &val);
+ if (ret) {
+ pr_err("Invalid input:%s. ret:%d", buf, ret);
+ goto done_store;
+ }
+
+ bucket = val & 0xff;
+ pr_debug("\"%s\"(PID:%i) request cluster:%d bucket:%d\n",
+ current->comm, current->pid, (bucket & 0xf0) >> 4,
+ bucket & 0xf);
+
+done_store:
+ return count;
+}
+
+static ssize_t bucket_info_show(
+ struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", bucket);
+}
+
+static struct kobj_attribute bucket_info_attr =
+ __ATTR_RW(bucket_info);
+static int msm_thermal_add_bucket_info_nodes(void)
+{
+ struct kobject *module_kobj = NULL;
+ int ret = 0;
+
+ module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
+ if (!module_kobj) {
+ pr_err("cannot find kobject\n");
+ return -ENOENT;
+ }
+ sysfs_attr_init(&bucket_info_attr.attr);
+ ret = sysfs_create_file(module_kobj, &bucket_info_attr.attr);
+ if (ret) {
+ pr_err(
+ "cannot create bucket info kobject attribute. err:%d\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static struct kobj_attribute sensor_info_attr =
+ __ATTR_RO(sensor_info);
+static int msm_thermal_add_sensor_info_nodes(void)
+{
+ struct kobject *module_kobj = NULL;
+ int ret = 0;
+
+ if (!sensor_info_probed) {
+ sensor_info_nodes_called = true;
+ return ret;
+ }
+ if (sensor_info_probed && sensor_cnt == 0)
+ return ret;
+
+ module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
+ if (!module_kobj) {
+ pr_err("cannot find kobject\n");
+ return -ENOENT;
+ }
+ sysfs_attr_init(&sensor_info_attr.attr);
+ ret = sysfs_create_file(module_kobj, &sensor_info_attr.attr);
+ if (ret) {
+ pr_err(
+ "cannot create sensor info kobject attribute. err:%d\n",
+ ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int msm_thermal_add_vdd_rstr_nodes(void)
+{
+ struct kobject *module_kobj = NULL;
+ struct kobject *vdd_rstr_kobj = NULL;
+ struct kobject *vdd_rstr_reg_kobj[MAX_RAILS] = {0};
+ int rc = 0;
+ int i = 0;
+
+ if (!vdd_rstr_probed) {
+ vdd_rstr_nodes_called = true;
+ return rc;
+ }
+
+ if (vdd_rstr_probed && rails_cnt == 0)
+ return rc;
+
+ module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
+ if (!module_kobj) {
+ pr_err("cannot find kobject\n");
+ rc = -ENOENT;
+ goto thermal_sysfs_add_exit;
+ }
+
+ vdd_rstr_kobj = kobject_create_and_add("vdd_restriction", module_kobj);
+ if (!vdd_rstr_kobj) {
+ pr_err("cannot create vdd_restriction kobject\n");
+ rc = -ENOMEM;
+ goto thermal_sysfs_add_exit;
+ }
+
+ rc = sysfs_create_group(vdd_rstr_kobj, &vdd_rstr_en_attribs_gp);
+ if (rc) {
+ pr_err("cannot create kobject attribute group. err:%d\n", rc);
+ rc = -ENOMEM;
+ goto thermal_sysfs_add_exit;
+ }
+
+ for (i = 0; i < rails_cnt; i++) {
+ vdd_rstr_reg_kobj[i] = kobject_create_and_add(rails[i].name,
+ vdd_rstr_kobj);
+ if (!vdd_rstr_reg_kobj[i]) {
+ pr_err("cannot create kobject for %s\n",
+ rails[i].name);
+ rc = -ENOMEM;
+ goto thermal_sysfs_add_exit;
+ }
+
+ rails[i].attr_gp.attrs = kzalloc(sizeof(struct attribute *) * 3,
+ GFP_KERNEL);
+ if (!rails[i].attr_gp.attrs) {
+ pr_err("kzalloc failed\n");
+ rc = -ENOMEM;
+ goto thermal_sysfs_add_exit;
+ }
+
+ VDD_RES_RW_ATTRIB(rails[i], rails[i].level_attr, 0, level);
+ VDD_RES_RO_ATTRIB(rails[i], rails[i].value_attr, 1, value);
+ rails[i].attr_gp.attrs[2] = NULL;
+
+ rc = sysfs_create_group(vdd_rstr_reg_kobj[i],
+ &rails[i].attr_gp);
+ if (rc) {
+ pr_err("cannot create attribute group for %s. err:%d\n",
+ rails[i].name, rc);
+ goto thermal_sysfs_add_exit;
+ }
+ }
+
+ return rc;
+
+thermal_sysfs_add_exit:
+ if (rc) {
+ for (i = 0; i < rails_cnt; i++) {
+ kobject_del(vdd_rstr_reg_kobj[i]);
+ kfree(rails[i].attr_gp.attrs);
+ }
+ if (vdd_rstr_kobj)
+ kobject_del(vdd_rstr_kobj);
+ }
+ return rc;
+}
+
+static int msm_thermal_add_ocr_nodes(void)
+{
+ struct kobject *module_kobj = NULL;
+ struct kobject *ocr_kobj = NULL;
+ struct kobject *ocr_reg_kobj[MAX_RAILS] = {0};
+ int rc = 0;
+ int i = 0;
+
+ if (!ocr_probed) {
+ ocr_nodes_called = true;
+ return rc;
+ }
+
+ if (ocr_probed && ocr_rail_cnt == 0)
+ return rc;
+
+ module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
+ if (!module_kobj) {
+ pr_err("Cannot find kobject\n");
+ rc = -ENOENT;
+ goto ocr_node_exit;
+ }
+
+ ocr_kobj = kobject_create_and_add("opt_curr_req", module_kobj);
+ if (!ocr_kobj) {
+ pr_err("Cannot create ocr kobject\n");
+ rc = -ENOMEM;
+ goto ocr_node_exit;
+ }
+
+ for (i = 0; i < ocr_rail_cnt; i++) {
+ ocr_reg_kobj[i] = kobject_create_and_add(ocr_rails[i].name,
+ ocr_kobj);
+ if (!ocr_reg_kobj[i]) {
+ pr_err("Cannot create kobject for %s\n",
+ ocr_rails[i].name);
+ rc = -ENOMEM;
+ goto ocr_node_exit;
+ }
+ ocr_rails[i].attr_gp.attrs = kzalloc(
+ sizeof(struct attribute *) * 2, GFP_KERNEL);
+ if (!ocr_rails[i].attr_gp.attrs) {
+ pr_err("Fail to allocate memory for attribute for %s\n",
+ ocr_rails[i].name);
+ rc = -ENOMEM;
+ goto ocr_node_exit;
+ }
+
+ OCR_RW_ATTRIB(ocr_rails[i], ocr_rails[i].mode_attr, 0, mode);
+ ocr_rails[i].attr_gp.attrs[1] = NULL;
+
+ rc = sysfs_create_group(ocr_reg_kobj[i], &ocr_rails[i].attr_gp);
+ if (rc) {
+ pr_err("Cannot create attribute group for %s. err:%d\n",
+ ocr_rails[i].name, rc);
+ goto ocr_node_exit;
+ }
+ }
+
+ocr_node_exit:
+ if (rc) {
+ for (i = 0; i < ocr_rail_cnt; i++) {
+ if (ocr_reg_kobj[i])
+ kobject_del(ocr_reg_kobj[i]);
+ kfree(ocr_rails[i].attr_gp.attrs);
+ ocr_rails[i].attr_gp.attrs = NULL;
+ }
+ if (ocr_kobj)
+ kobject_del(ocr_kobj);
+ }
+ return rc;
+}
+
+static int msm_thermal_add_psm_nodes(void)
+{
+ struct kobject *module_kobj = NULL;
+ struct kobject *psm_kobj = NULL;
+ struct kobject *psm_reg_kobj[MAX_RAILS] = {0};
+ int rc = 0;
+ int i = 0;
+
+ if (!psm_probed) {
+ psm_nodes_called = true;
+ return rc;
+ }
+
+ if (psm_probed && psm_rails_cnt == 0)
+ return rc;
+
+ module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
+ if (!module_kobj) {
+ pr_err("cannot find kobject\n");
+ rc = -ENOENT;
+ goto psm_node_exit;
+ }
+
+ psm_kobj = kobject_create_and_add("pmic_sw_mode", module_kobj);
+ if (!psm_kobj) {
+ pr_err("cannot create psm kobject\n");
+ rc = -ENOMEM;
+ goto psm_node_exit;
+ }
+
+ for (i = 0; i < psm_rails_cnt; i++) {
+ psm_reg_kobj[i] = kobject_create_and_add(psm_rails[i].name,
+ psm_kobj);
+ if (!psm_reg_kobj[i]) {
+ pr_err("cannot create kobject for %s\n",
+ psm_rails[i].name);
+ rc = -ENOMEM;
+ goto psm_node_exit;
+ }
+ psm_rails[i].attr_gp.attrs = kzalloc( \
+ sizeof(struct attribute *) * 2, GFP_KERNEL);
+ if (!psm_rails[i].attr_gp.attrs) {
+ pr_err("kzalloc failed\n");
+ rc = -ENOMEM;
+ goto psm_node_exit;
+ }
+
+ PSM_RW_ATTRIB(psm_rails[i], psm_rails[i].mode_attr, 0, mode);
+ psm_rails[i].attr_gp.attrs[1] = NULL;
+
+ rc = sysfs_create_group(psm_reg_kobj[i], &psm_rails[i].attr_gp);
+ if (rc) {
+ pr_err("cannot create attribute group for %s. err:%d\n",
+ psm_rails[i].name, rc);
+ goto psm_node_exit;
+ }
+ }
+
+ return rc;
+
+psm_node_exit:
+ if (rc) {
+ for (i = 0; i < psm_rails_cnt; i++) {
+ kobject_del(psm_reg_kobj[i]);
+ kfree(psm_rails[i].attr_gp.attrs);
+ }
+ if (psm_kobj)
+ kobject_del(psm_kobj);
+ }
+ return rc;
+}
+
+static void thermal_cpu_freq_mit_disable(void)
+{
+ uint32_t cpu = 0, th_cnt = 0;
+ struct device_manager_data *dev_mgr = NULL;
+ struct device_clnt_data *clnt = NULL;
+ char device_name[TSENS_NAME_MAX] = {0};
+
+ freq_mitigation_enabled = 0;
+ msm_thermal_init_cpu_mit(CPU_FREQ_MITIGATION);
+ for_each_possible_cpu(cpu) {
+ for (th_cnt = FREQ_THRESHOLD_HIGH;
+ th_cnt <= FREQ_THRESHOLD_LOW; th_cnt++)
+ sensor_cancel_trip(cpus[cpu].sensor_id,
+ &cpus[cpu].threshold[th_cnt]);
+
+ snprintf(device_name, TSENS_NAME_MAX, CPU_DEVICE, cpu);
+ dev_mgr = find_device_by_name(device_name);
+ if (!dev_mgr) {
+ pr_err("Invalid device %s\n", device_name);
+ return;
+ }
+ mutex_lock(&dev_mgr->clnt_lock);
+ list_for_each_entry(clnt, &dev_mgr->client_list, clnt_ptr) {
+ if (!clnt->req_active)
+ continue;
+ clnt->request.freq.max_freq
+ = CPUFREQ_MAX_NO_MITIGATION;
+ clnt->request.freq.min_freq
+ = CPUFREQ_MIN_NO_MITIGATION;
+ }
+ dev_mgr->active_req.freq.max_freq = CPUFREQ_MAX_NO_MITIGATION;
+ dev_mgr->active_req.freq.min_freq = CPUFREQ_MIN_NO_MITIGATION;
+ mutex_unlock(&dev_mgr->clnt_lock);
+ }
+ if (freq_mitigation_task)
+ complete(&freq_mitigation_complete);
+ else
+ pr_err("Freq mit task is not initialized\n");
+}
+
+static void thermal_cpu_hotplug_mit_disable(void)
+{
+ uint32_t cpu = 0, th_cnt = 0;
+ struct device_manager_data *dev_mgr = NULL;
+ struct device_clnt_data *clnt = NULL;
+
+ mutex_lock(&core_control_mutex);
+ hotplug_enabled = 0;
+ msm_thermal_init_cpu_mit(CPU_HOTPLUG_MITIGATION);
+ for_each_possible_cpu(cpu) {
+ if (!(msm_thermal_info.core_control_mask & BIT(cpu)))
+ continue;
+
+ for (th_cnt = HOTPLUG_THRESHOLD_HIGH;
+ th_cnt <= HOTPLUG_THRESHOLD_LOW; th_cnt++)
+ sensor_cancel_trip(cpus[cpu].sensor_id,
+ &cpus[cpu].threshold[th_cnt]);
+ }
+
+ dev_mgr = find_device_by_name(HOTPLUG_DEVICE);
+ if (!dev_mgr) {
+ pr_err("Invalid device %s\n", HOTPLUG_DEVICE);
+ mutex_unlock(&core_control_mutex);
+ return;
+ }
+ mutex_lock(&dev_mgr->clnt_lock);
+ list_for_each_entry(clnt, &dev_mgr->client_list, clnt_ptr) {
+ if (!clnt->req_active)
+ continue;
+ HOTPLUG_NO_MITIGATION(&clnt->request.offline_mask);
+ }
+ HOTPLUG_NO_MITIGATION(&dev_mgr->active_req.offline_mask);
+ mutex_unlock(&dev_mgr->clnt_lock);
+
+ if (hotplug_task)
+ complete(&hotplug_notify_complete);
+ else
+ pr_err("Hotplug task is not initialized\n");
+
+ mutex_unlock(&core_control_mutex);
+}
+
+static void thermal_reset_disable(void)
+{
+ THERM_MITIGATION_DISABLE(therm_reset_enabled, MSM_THERM_RESET);
+}
+
+static void thermal_mx_mit_disable(void)
+{
+ int ret = 0;
+
+ THERM_MITIGATION_DISABLE(vdd_mx_enabled, MSM_VDD_MX_RESTRICTION);
+ ret = remove_vdd_mx_restriction();
+ if (ret)
+ pr_err("Failed to remove vdd mx restriction\n");
+}
+
+static void thermal_vdd_mit_disable(void)
+{
+ int ret = 0;
+
+ THERM_MITIGATION_DISABLE(vdd_rstr_enabled, MSM_VDD_RESTRICTION);
+ ret = vdd_restriction_apply_all(0);
+ if (ret)
+ pr_err("Disable vdd rstr for all failed. err:%d\n", ret);
+}
+
+static void thermal_psm_mit_disable(void)
+{
+ int ret = 0;
+
+ THERM_MITIGATION_DISABLE(psm_enabled, -1);
+ ret = psm_set_mode_all(PMIC_AUTO_MODE);
+ if (ret)
+ pr_err("Set auto mode for all failed. err:%d\n", ret);
+}
+
+static void thermal_ocr_mit_disable(void)
+{
+ int ret = 0;
+
+ THERM_MITIGATION_DISABLE(ocr_enabled, MSM_OCR);
+ ret = ocr_set_mode_all(OPTIMUM_CURRENT_MAX);
+ if (ret)
+ pr_err("Set max optimum current failed. err:%d\n", ret);
+}
+
+static void thermal_cx_phase_ctrl_mit_disable(void)
+{
+ int ret = 0;
+
+ THERM_MITIGATION_DISABLE(cx_phase_ctrl_enabled, MSM_CX_PHASE_CTRL_HOT);
+ ret = send_temperature_band(MSM_CX_PHASE_CTRL, MSM_WARM);
+ if (ret)
+ pr_err("cx band set to WARM failed. err:%d\n", ret);
+}
+
+static void thermal_gfx_phase_warm_ctrl_mit_disable(void)
+{
+ int ret = 0;
+
+ if (gfx_warm_phase_ctrl_enabled) {
+ THERM_MITIGATION_DISABLE(gfx_warm_phase_ctrl_enabled,
+ MSM_GFX_PHASE_CTRL_WARM);
+ ret = send_temperature_band(MSM_GFX_PHASE_CTRL, MSM_NORMAL);
+ if (!ret)
+ pr_err("gfx phase set to NORMAL failed. err:%d\n",
+ ret);
+ }
+}
+
+static void thermal_gfx_phase_crit_ctrl_mit_disable(void)
+{
+ int ret = 0;
+
+ if (gfx_crit_phase_ctrl_enabled) {
+ THERM_MITIGATION_DISABLE(gfx_crit_phase_ctrl_enabled,
+ MSM_GFX_PHASE_CTRL_HOT);
+ ret = send_temperature_band(MSM_GFX_PHASE_CTRL, MSM_NORMAL);
+ if (!ret)
+ pr_err("gfx phase set to NORMAL failed. err:%d\n",
+ ret);
+ }
+}
+
+static int probe_vdd_mx(struct device_node *node,
+ struct msm_thermal_data *data, struct platform_device *pdev)
+{
+ int ret = 0;
+ char *key = NULL;
+
+ key = "qcom,disable-vdd-mx";
+ if (of_property_read_bool(node, key)) {
+ vdd_mx_enabled = false;
+ return ret;
+ }
+
+ key = "qcom,mx-restriction-temp";
+ ret = of_property_read_u32(node, key, &data->vdd_mx_temp_degC);
+ if (ret)
+ goto read_node_done;
+
+ key = "qcom,mx-restriction-temp-hysteresis";
+ ret = of_property_read_u32(node, key, &data->vdd_mx_temp_hyst_degC);
+ if (ret)
+ goto read_node_done;
+
+ key = "qcom,mx-retention-min";
+ ret = of_property_read_u32(node, key, &data->vdd_mx_min);
+ if (ret)
+ goto read_node_done;
+
+ vdd_mx = devm_regulator_get(&pdev->dev, "vdd-mx");
+ if (IS_ERR_OR_NULL(vdd_mx)) {
+ ret = PTR_ERR(vdd_mx);
+ if (ret != -EPROBE_DEFER) {
+ pr_err(
+ "Could not get regulator: vdd-mx, err:%d\n", ret);
+ }
+ goto read_node_done;
+ }
+
+ key = "qcom,cx-retention-min";
+ ret = of_property_read_u32(node, key, &data->vdd_cx_min);
+ if (!ret) {
+ vdd_cx = devm_regulator_get(&pdev->dev, "vdd-cx");
+ if (IS_ERR_OR_NULL(vdd_cx)) {
+ ret = PTR_ERR(vdd_cx);
+ if (ret != -EPROBE_DEFER) {
+ pr_err(
+ "Could not get regulator: vdd-cx, err:%d\n",
+ ret);
+ }
+ goto read_node_done;
+ }
+ }
+
+ ret = sensor_mgr_init_threshold(&thresh[MSM_VDD_MX_RESTRICTION],
+ MONITOR_ALL_TSENS,
+ data->vdd_mx_temp_degC + data->vdd_mx_temp_hyst_degC,
+ data->vdd_mx_temp_degC, vdd_mx_notify);
+
+read_node_done:
+ if (!ret) {
+ vdd_mx_enabled = true;
+ snprintf(mit_config[MSM_VDD_MX_RESTRICTION].config_name,
+ MAX_DEBUGFS_CONFIG_LEN, "mx");
+ mit_config[MSM_VDD_MX_RESTRICTION].disable_config
+ = thermal_mx_mit_disable;
+ } else if (ret != -EPROBE_DEFER) {
+ dev_info(&pdev->dev,
+ "%s:Failed reading node=%s, key=%s. KTM continues\n",
+ __func__, node->full_name, key);
+ }
+
+ return ret;
+}
+
+static int probe_vdd_rstr(struct device_node *node,
+ struct msm_thermal_data *data, struct platform_device *pdev)
+{
+ int ret = 0;
+ int i = 0;
+ int arr_size;
+ char *key = NULL;
+ struct device_node *child_node = NULL;
+
+ rails = NULL;
+
+ key = "qcom,disable-vdd-rstr";
+ if (of_property_read_bool(node, key)) {
+ vdd_rstr_probed = true;
+ vdd_rstr_enabled = false;
+ rails_cnt = 0;
+ return ret;
+ }
+
+ key = "qcom,vdd-restriction-temp";
+ ret = of_property_read_u32(node, key, &data->vdd_rstr_temp_degC);
+ if (ret)
+ goto read_node_fail;
+
+ key = "qcom,vdd-restriction-temp-hysteresis";
+ ret = of_property_read_u32(node, key, &data->vdd_rstr_temp_hyst_degC);
+ if (ret)
+ goto read_node_fail;
+
+ for_each_child_of_node(node, child_node) {
+ rails_cnt++;
+ }
+
+ if (rails_cnt == 0)
+ goto read_node_fail;
+ if (rails_cnt >= MAX_RAILS) {
+ pr_err("Too many rails:%d.\n", rails_cnt);
+ return -EFAULT;
+ }
+
+ rails = kzalloc(sizeof(struct rail) * rails_cnt,
+ GFP_KERNEL);
+ if (!rails) {
+ pr_err("Fail to allocate memory for rails.\n");
+ return -ENOMEM;
+ }
+
+ i = 0;
+ for_each_child_of_node(node, child_node) {
+ key = "qcom,vdd-rstr-reg";
+ ret = of_property_read_string(child_node, key, &rails[i].name);
+ if (ret)
+ goto read_node_fail;
+
+ key = "qcom,levels";
+ if (!of_get_property(child_node, key, &arr_size))
+ goto read_node_fail;
+ rails[i].num_levels = arr_size/sizeof(__be32);
+ if (rails[i].num_levels >
+ sizeof(rails[i].levels)/sizeof(uint32_t)) {
+ pr_err("Array size:%d too large for index:%d\n",
+ rails[i].num_levels, i);
+ return -EFAULT;
+ }
+ ret = of_property_read_u32_array(child_node, key,
+ rails[i].levels, rails[i].num_levels);
+ if (ret)
+ goto read_node_fail;
+
+ key = "qcom,freq-req";
+ rails[i].freq_req = of_property_read_bool(child_node, key);
+ if (rails[i].freq_req) {
+ rails[i].min_level = 0;
+ key = "qcom,max-freq-level";
+ ret = of_property_read_u32(child_node, key,
+ &rails[i].max_frequency_limit);
+ if (ret)
+ rails[i].max_frequency_limit
+ = UINT_MAX;
+ ret = 0;
+ } else {
+ key = "qcom,min-level";
+ ret = of_property_read_u32(child_node, key,
+ &rails[i].min_level);
+ if (ret)
+ goto read_node_fail;
+ }
+
+ rails[i].curr_level = -1;
+ rails[i].reg = NULL;
+ i++;
+ }
+
+ if (rails_cnt) {
+ ret = vdd_restriction_reg_init(pdev);
+ if (ret) {
+ pr_err("Err regulator init. err:%d. KTM continues.\n",
+ ret);
+ goto read_node_fail;
+ }
+ ret = sensor_mgr_init_threshold(&thresh[MSM_VDD_RESTRICTION],
+ MONITOR_ALL_TSENS,
+ data->vdd_rstr_temp_hyst_degC, data->vdd_rstr_temp_degC,
+ vdd_restriction_notify);
+ if (ret) {
+ pr_err("Error in initializing thresholds. err:%d\n",
+ ret);
+ goto read_node_fail;
+ }
+ vdd_rstr_enabled = true;
+ snprintf(mit_config[MSM_VDD_RESTRICTION].config_name,
+ MAX_DEBUGFS_CONFIG_LEN, "vdd");
+ mit_config[MSM_VDD_RESTRICTION].disable_config
+ = thermal_vdd_mit_disable;
+ }
+read_node_fail:
+ vdd_rstr_probed = true;
+ if (ret) {
+ dev_info(&pdev->dev,
+ "%s:Failed reading node=%s, key=%s. err=%d. KTM continues\n",
+ __func__, node->full_name, key, ret);
+ kfree(rails);
+ rails_cnt = 0;
+ }
+ if (ret == -EPROBE_DEFER)
+ vdd_rstr_probed = false;
+ return ret;
+}
+
+static int create_alias_name(int _cpu, struct device_node *limits,
+ struct platform_device *pdev)
+{
+ char device_name[DEVM_NAME_MAX] = "";
+ int sensor_idx = 0, sensor_ct = 0, idx = 0, err = 0;
+ struct device_node *tsens = NULL;
+ const char *sensor_name = NULL;
+
+ if (!sensors) {
+ pr_debug("sensor info not defined\n");
+ return -ENOSYS;
+ }
+ snprintf(device_name, DEVM_NAME_MAX, CPU_DEVICE, _cpu);
+
+ if (!of_get_property(limits, "qcom,temperature-sensor", &sensor_ct)
+ || sensor_ct <= 0) {
+ pr_err("Sensor not defined\n");
+ return -ENODEV;
+ }
+ sensor_ct /= sizeof(__be32);
+ do {
+ tsens = of_parse_phandle(limits, "qcom,temperature-sensor",
+ idx);
+ if (!tsens) {
+ pr_err("No temperature sensor defined for CPU%d\n",
+ _cpu);
+ return -ENODEV;
+ }
+
+ err = of_property_read_string(tsens, "qcom,sensor-name",
+ &sensor_name);
+ if (err) {
+ pr_err("Sensor name not populated for CPU%d. err:%d\n",
+ _cpu, err);
+ return -ENODEV;
+ }
+ for (sensor_idx = 0; sensor_idx < sensor_cnt; sensor_idx++) {
+ char cat_str[DEVM_NAME_MAX] = "";
+
+ if (strcmp(sensors[sensor_idx].name, sensor_name))
+ continue;
+ if (!sensors[sensor_idx].alias) {
+ sensors[sensor_idx].alias = devm_kzalloc(
+ &pdev->dev, DEVM_NAME_MAX, GFP_KERNEL);
+ if (!sensors[sensor_idx].alias) {
+ pr_err("Memory alloc failed\n");
+ return -ENOMEM;
+ }
+ strlcpy((char *)sensors[sensor_idx].alias,
+ device_name, DEVM_NAME_MAX);
+ if (sensor_ct > 1) {
+ /* Multiple sensor monitoring
+ * single device */
+ snprintf(cat_str, DEVM_NAME_MAX, "_%d",
+ idx);
+ strlcat((char *)
+ sensors[sensor_idx].alias,
+ cat_str, DEVM_NAME_MAX);
+ }
+ } else {
+ /* Single sensor monitoring multiple devices */
+ snprintf(cat_str, DEVM_NAME_MAX,
+ "-"CPU_DEVICE, _cpu);
+ strlcat((char *)sensors[sensor_idx].alias,
+ cat_str, DEVM_NAME_MAX);
+ }
+ break;
+ }
+ idx++;
+ } while (idx < sensor_ct);
+
+ return 0;
+}
+
+static int fetch_cpu_mitigaiton_info(struct msm_thermal_data *data,
+ struct platform_device *pdev)
+{
+
+ int _cpu = 0, err = 0;
+ struct device_node *cpu_node = NULL, *limits = NULL, *tsens = NULL;
+
+ for_each_possible_cpu(_cpu) {
+ const char *sensor_name = NULL;
+
+ cpu_node = of_get_cpu_node(_cpu, NULL);
+ if (!cpu_node) {
+ pr_err("No CPU phandle for CPU%d\n", _cpu);
+ __WARN();
+ continue;
+ }
+ limits = of_parse_phandle(cpu_node, "qcom,limits-info", 0);
+ if (!limits) {
+ pr_err("No mitigation info defined for CPU%d\n", _cpu);
+ continue;
+ }
+ VALIDATE_AND_SET_MASK(limits, "qcom,boot-frequency-mitigate",
+ data->bootup_freq_control_mask, _cpu);
+ VALIDATE_AND_SET_MASK(limits,
+ "qcom,emergency-frequency-mitigate",
+ data->freq_mitig_control_mask, _cpu);
+ VALIDATE_AND_SET_MASK(limits, "qcom,hotplug-mitigation-enable",
+ data->core_control_mask, _cpu);
+
+ tsens = of_parse_phandle(limits, "qcom,temperature-sensor", 0);
+ if (!tsens) {
+ pr_err("No temperature sensor defined for CPU%d\n",
+ _cpu);
+ continue;
+ }
+
+ err = of_property_read_string(tsens, "qcom,sensor-name",
+ &sensor_name);
+ if (err) {
+ pr_err("Sensor name not populated for CPU%d. err:%d\n",
+ _cpu, err);
+ continue;
+ }
+ cpus[_cpu].sensor_type = devm_kzalloc(&pdev->dev,
+ strlen(sensor_name) + 1, GFP_KERNEL);
+ if (!cpus[_cpu].sensor_type) {
+ pr_err("Memory alloc failed\n");
+ err = -ENOMEM;
+ goto fetch_mitig_exit;
+ }
+ strlcpy((char *) cpus[_cpu].sensor_type, sensor_name,
+ strlen(sensor_name) + 1);
+ create_alias_name(_cpu, limits, pdev);
+ }
+
+fetch_mitig_exit:
+ return err;
+}
+
+static void probe_sensor_info(struct device_node *node,
+ struct msm_thermal_data *data, struct platform_device *pdev)
+{
+ int err = 0;
+ int i = 0;
+ char *key = NULL;
+ struct device_node *child_node = NULL;
+ struct device_node *np = NULL;
+ int scale_tsens_found = 0;
+
+ key = "qcom,disable-sensor-info";
+ if (of_property_read_bool(node, key)) {
+ sensor_info_probed = true;
+ return;
+ }
+
+ np = of_find_compatible_node(NULL, NULL, "qcom,sensor-information");
+ if (!np) {
+ dev_info(&pdev->dev,
+ "%s:unable to find DT for sensor-information.KTM continues\n",
+ __func__);
+ sensor_info_probed = true;
+ return;
+ }
+ sensor_cnt = of_get_child_count(np);
+ if (sensor_cnt == 0) {
+ err = -ENODEV;
+ goto read_node_fail;
+ }
+
+ sensors = devm_kzalloc(&pdev->dev,
+ sizeof(struct msm_sensor_info) * sensor_cnt,
+ GFP_KERNEL);
+ if (!sensors) {
+ pr_err("Fail to allocate memory for sensor_info.\n");
+ err = -ENOMEM;
+ goto read_node_fail;
+ }
+
+ for_each_child_of_node(np, child_node) {
+ const char *alias_name = NULL;
+
+ key = "qcom,sensor-type";
+ err = of_property_read_string(child_node,
+ key, &sensors[i].type);
+ if (err)
+ goto read_node_fail;
+
+ key = "qcom,sensor-name";
+ err = of_property_read_string(child_node,
+ key, &sensors[i].name);
+ if (err)
+ goto read_node_fail;
+
+ key = "qcom,alias-name";
+ of_property_read_string(child_node, key, &alias_name);
+ if (alias_name && !strnstr(alias_name, "cpu",
+ strlen(alias_name)))
+ sensors[i].alias = alias_name;
+
+ key = "qcom,scaling-factor";
+ err = of_property_read_u32(child_node, key,
+ &sensors[i].scaling_factor);
+ if (err || sensors[i].scaling_factor == 0) {
+ sensors[i].scaling_factor = SENSOR_SCALING_FACTOR;
+ err = 0;
+ }
+ if (scale_tsens_found == 0) {
+ if (!strcmp(sensors[i].type, "tsens")) {
+ scale_tsens_found = 1;
+ tsens_scaling_factor =
+ sensors[i].scaling_factor;
+ }
+ }
+ i++;
+ }
+
+read_node_fail:
+ sensor_info_probed = true;
+ if (err) {
+ dev_info(&pdev->dev,
+ "%s:Failed reading node=%s, key=%s. err=%d. KTM continues\n",
+ __func__, np->full_name, key, err);
+ devm_kfree(&pdev->dev, sensors);
+ }
+}
+
+static int probe_ocr(struct device_node *node, struct msm_thermal_data *data,
+ struct platform_device *pdev)
+{
+ int ret = 0;
+ int j = 0;
+ char *key = NULL;
+
+ if (ocr_probed) {
+ pr_info("Nodes already probed\n");
+ goto read_ocr_exit;
+ }
+ ocr_rails = NULL;
+
+ key = "qcom,disable-ocr";
+ if (of_property_read_bool(node, key)) {
+ ocr_probed = true;
+ ocr_enabled = false;
+ ocr_rail_cnt = 0;
+ goto read_ocr_exit;
+ }
+
+ key = "qcom,pmic-opt-curr-temp";
+ ret = of_property_read_u32(node, key, &data->ocr_temp_degC);
+ if (ret)
+ goto read_ocr_fail;
+
+ key = "qcom,pmic-opt-curr-temp-hysteresis";
+ ret = of_property_read_u32(node, key, &data->ocr_temp_hyst_degC);
+ if (ret)
+ goto read_ocr_fail;
+
+ key = "qcom,pmic-opt-curr-regs";
+ ocr_rail_cnt = of_property_count_strings(node, key);
+ if (ocr_rail_cnt <= 0) {
+ pr_err("Invalid ocr rail count. err:%d\n", ocr_rail_cnt);
+ goto read_ocr_fail;
+ }
+ ocr_rails = kzalloc(sizeof(struct psm_rail) * ocr_rail_cnt,
+ GFP_KERNEL);
+ if (!ocr_rails) {
+ pr_err("Fail to allocate memory for ocr rails\n");
+ ocr_rail_cnt = 0;
+ return -ENOMEM;
+ }
+
+ for (j = 0; j < ocr_rail_cnt; j++) {
+ ret = of_property_read_string_index(node, key, j,
+ &ocr_rails[j].name);
+ if (ret)
+ goto read_ocr_fail;
+ ocr_rails[j].phase_reg = NULL;
+ ocr_rails[j].init = OPTIMUM_CURRENT_MAX;
+ }
+
+ key = "qcom,pmic-opt-curr-sensor-id";
+ ret = of_property_read_u32(node, key, &data->ocr_sensor_id);
+ if (ret) {
+ pr_info("ocr sensor is not configured, use all TSENS. err:%d\n",
+ ret);
+ data->ocr_sensor_id = MONITOR_ALL_TSENS;
+ }
+
+ ret = ocr_reg_init(pdev);
+ if (ret) {
+ if (ret == -EPROBE_DEFER) {
+ ocr_reg_init_defer = true;
+ pr_info("ocr reg init is defered\n");
+ } else {
+ pr_err(
+ "Failed to get regulators. KTM continues. err:%d\n",
+ ret);
+ goto read_ocr_fail;
+ }
+ }
+
+ ret = sensor_mgr_init_threshold(&thresh[MSM_OCR], data->ocr_sensor_id,
+ data->ocr_temp_degC,
+ data->ocr_temp_degC - data->ocr_temp_hyst_degC,
+ ocr_notify);
+ if (ret)
+ goto read_ocr_fail;
+
+ if (!ocr_reg_init_defer)
+ ocr_enabled = true;
+ ocr_nodes_called = false;
+ /*
+ * Vote for max optimum current by default until we have made
+ * our first temp reading
+ */
+ if (ocr_enabled) {
+ ret = ocr_set_mode_all(OPTIMUM_CURRENT_MAX);
+ if (ret) {
+ pr_err("Set max optimum current failed. err:%d\n",
+ ret);
+ ocr_enabled = false;
+ }
+ }
+
+read_ocr_fail:
+ ocr_probed = true;
+ if (ret) {
+ if (ret == -EPROBE_DEFER) {
+ ret = 0;
+ goto read_ocr_exit;
+ }
+ dev_err(
+ &pdev->dev,
+ "%s:Failed reading node=%s, key=%s err:%d. KTM continues\n",
+ __func__, node->full_name, key, ret);
+ kfree(ocr_rails);
+ ocr_rails = NULL;
+ ocr_rail_cnt = 0;
+ } else {
+ snprintf(mit_config[MSM_OCR].config_name,
+ MAX_DEBUGFS_CONFIG_LEN, "ocr");
+ mit_config[MSM_OCR].disable_config = thermal_ocr_mit_disable;
+ }
+
+read_ocr_exit:
+ return ret;
+}
+
+static int probe_psm(struct device_node *node, struct msm_thermal_data *data,
+ struct platform_device *pdev)
+{
+ int ret = 0;
+ int j = 0;
+ char *key = NULL;
+
+ psm_rails = NULL;
+
+ key = "qcom,disable-psm";
+ if (of_property_read_bool(node, key)) {
+ psm_probed = true;
+ psm_enabled = false;
+ psm_rails_cnt = 0;
+ return ret;
+ }
+
+ key = "qcom,pmic-sw-mode-temp";
+ ret = of_property_read_u32(node, key, &data->psm_temp_degC);
+ if (ret)
+ goto read_node_fail;
+
+ key = "qcom,pmic-sw-mode-temp-hysteresis";
+ ret = of_property_read_u32(node, key, &data->psm_temp_hyst_degC);
+ if (ret)
+ goto read_node_fail;
+
+ key = "qcom,pmic-sw-mode-regs";
+ psm_rails_cnt = of_property_count_strings(node, key);
+ psm_rails = kzalloc(sizeof(struct psm_rail) * psm_rails_cnt,
+ GFP_KERNEL);
+ if (!psm_rails) {
+ pr_err("Fail to allocate memory for psm rails\n");
+ psm_rails_cnt = 0;
+ return -ENOMEM;
+ }
+
+ for (j = 0; j < psm_rails_cnt; j++) {
+ ret = of_property_read_string_index(node, key, j,
+ &psm_rails[j].name);
+ if (ret)
+ goto read_node_fail;
+ }
+
+ if (psm_rails_cnt) {
+ ret = psm_reg_init(pdev);
+ if (ret) {
+ pr_err("Err regulator init. err:%d. KTM continues.\n",
+ ret);
+ goto read_node_fail;
+ }
+ psm_enabled = true;
+ }
+
+read_node_fail:
+ psm_probed = true;
+ if (ret) {
+ dev_info(&pdev->dev,
+ "%s:Failed reading node=%s, key=%s. err=%d. KTM continues\n",
+ __func__, node->full_name, key, ret);
+ kfree(psm_rails);
+ psm_rails_cnt = 0;
+ }
+ if (ret == -EPROBE_DEFER)
+ psm_probed = false;
+ return ret;
+}
+
+static int probe_cc(struct device_node *node, struct msm_thermal_data *data,
+ struct platform_device *pdev)
+{
+ char *key = NULL;
+ int ret = 0;
+
+ if (num_possible_cpus() > 1) {
+ core_control_enabled = 1;
+ hotplug_enabled = 1;
+ }
+
+ key = "qcom,core-limit-temp";
+ ret = of_property_read_u32(node, key, &data->core_limit_temp_degC);
+ if (ret)
+ goto read_node_fail;
+
+ key = "qcom,core-temp-hysteresis";
+ ret = of_property_read_u32(node, key, &data->core_temp_hysteresis_degC);
+ if (ret)
+ goto read_node_fail;
+
+ key = "qcom,hotplug-temp";
+ ret = of_property_read_u32(node, key, &data->hotplug_temp_degC);
+ if (ret)
+ goto hotplug_node_fail;
+
+ key = "qcom,hotplug-temp-hysteresis";
+ ret = of_property_read_u32(node, key,
+ &data->hotplug_temp_hysteresis_degC);
+ if (ret)
+ goto hotplug_node_fail;
+
+read_node_fail:
+ if (ret) {
+ dev_info(&pdev->dev,
+ "%s:Failed reading node=%s, key=%s. err=%d. KTM continues\n",
+ KBUILD_MODNAME, node->full_name, key, ret);
+ core_control_enabled = 0;
+ } else {
+ snprintf(mit_config[MSM_LIST_MAX_NR + HOTPLUG_CONFIG]
+ .config_name, MAX_DEBUGFS_CONFIG_LEN,
+ "hotplug");
+ mit_config[MSM_LIST_MAX_NR + HOTPLUG_CONFIG].disable_config
+ = thermal_cpu_hotplug_mit_disable;
+ }
+
+ return ret;
+
+hotplug_node_fail:
+ if (ret) {
+ dev_info(&pdev->dev,
+ "%s:Failed reading node=%s, key=%s. err=%d. KTM continues\n",
+ KBUILD_MODNAME, node->full_name, key, ret);
+ hotplug_enabled = 0;
+ }
+
+ return ret;
+}
+
+static int probe_gfx_phase_ctrl(struct device_node *node,
+ struct msm_thermal_data *data,
+ struct platform_device *pdev)
+{
+ char *key = NULL;
+ const char *tmp_str = NULL;
+ int ret = 0;
+
+ key = "qcom,disable-gfx-phase-ctrl";
+ if (of_property_read_bool(node, key)) {
+ gfx_crit_phase_ctrl_enabled = false;
+ gfx_warm_phase_ctrl_enabled = false;
+ return ret;
+ }
+
+ key = "qcom,gfx-sensor-id";
+ ret = of_property_read_u32(node, key,
+ &data->gfx_sensor);
+ if (ret)
+ goto probe_gfx_exit;
+
+ key = "qcom,gfx-phase-resource-key";
+ ret = of_property_read_string(node, key,
+ &tmp_str);
+ if (ret)
+ goto probe_gfx_exit;
+ data->gfx_phase_request_key = msm_thermal_str_to_int(tmp_str);
+
+ key = "qcom,gfx-phase-warm-temp";
+ ret = of_property_read_u32(node, key,
+ &data->gfx_phase_warm_temp_degC);
+ if (ret) {
+ dev_info(&pdev->dev,
+ "%s:Failed reading node=%s, key=%s. err=%d. KTM continues\n",
+ KBUILD_MODNAME, node->full_name, key, ret);
+ data->gfx_phase_warm_temp_degC = INT_MIN;
+ goto probe_gfx_crit;
+ }
+
+ key = "qcom,gfx-phase-warm-temp-hyst";
+ ret = of_property_read_u32(node, key,
+ &data->gfx_phase_warm_temp_hyst_degC);
+ if (ret) {
+ dev_info(&pdev->dev,
+ "%s:Failed reading node=%s, key=%s. err=%d. KTM continues\n",
+ KBUILD_MODNAME, node->full_name, key, ret);
+ goto probe_gfx_crit;
+ }
+
+ ret = sensor_mgr_init_threshold(&thresh[MSM_GFX_PHASE_CTRL_WARM],
+ data->gfx_sensor,
+ data->gfx_phase_warm_temp_degC, data->gfx_phase_warm_temp_degC -
+ data->gfx_phase_warm_temp_hyst_degC,
+ gfx_phase_ctrl_notify);
+ if (ret) {
+ pr_err("init WARM threshold failed. err:%d\n", ret);
+ goto probe_gfx_crit;
+ }
+ gfx_warm_phase_ctrl_enabled = true;
+ snprintf(mit_config[MSM_GFX_PHASE_CTRL_WARM].config_name,
+ MAX_DEBUGFS_CONFIG_LEN, "gfx_phase_warm");
+ mit_config[MSM_GFX_PHASE_CTRL_WARM].disable_config
+ = thermal_gfx_phase_warm_ctrl_mit_disable;
+
+probe_gfx_crit:
+ key = "qcom,gfx-phase-hot-crit-temp";
+ ret = of_property_read_u32(node, key,
+ &data->gfx_phase_hot_temp_degC);
+ if (ret) {
+ data->gfx_phase_hot_temp_degC = INT_MAX;
+ goto probe_gfx_exit;
+ }
+
+ key = "qcom,gfx-phase-hot-crit-temp-hyst";
+ ret = of_property_read_u32(node, key,
+ &data->gfx_phase_hot_temp_hyst_degC);
+ if (ret)
+ goto probe_gfx_exit;
+
+ ret = sensor_mgr_init_threshold(&thresh[MSM_GFX_PHASE_CTRL_HOT],
+ data->gfx_sensor,
+ data->gfx_phase_hot_temp_degC, data->gfx_phase_hot_temp_degC -
+ data->gfx_phase_hot_temp_hyst_degC,
+ gfx_phase_ctrl_notify);
+ if (ret) {
+ pr_err("init HOT threshold failed. err:%d\n", ret);
+ goto probe_gfx_exit;
+ }
+
+ gfx_crit_phase_ctrl_enabled = true;
+ snprintf(mit_config[MSM_GFX_PHASE_CTRL_HOT].config_name,
+ MAX_DEBUGFS_CONFIG_LEN, "gfx_phase_crit");
+ mit_config[MSM_GFX_PHASE_CTRL_HOT].disable_config
+ = thermal_gfx_phase_crit_ctrl_mit_disable;
+
+probe_gfx_exit:
+ if (ret) {
+ dev_info(&pdev->dev,
+ "%s:Failed reading node=%s, key=%s. err=%d. KTM continues\n",
+ KBUILD_MODNAME, node->full_name, key, ret);
+ }
+ return ret;
+}
+
+static int probe_cx_phase_ctrl(struct device_node *node,
+ struct msm_thermal_data *data,
+ struct platform_device *pdev)
+{
+ char *key = NULL;
+ const char *tmp_str;
+ int ret = 0;
+
+ key = "qcom,disable-cx-phase-ctrl";
+ if (of_property_read_bool(node, key)) {
+ cx_phase_ctrl_enabled = false;
+ return ret;
+ }
+
+ key = "qcom,rpm-phase-resource-type";
+ ret = of_property_read_string(node, key,
+ &tmp_str);
+ if (ret)
+ goto probe_cx_exit;
+ data->phase_rpm_resource_type = msm_thermal_str_to_int(tmp_str);
+
+ key = "qcom,rpm-phase-resource-id";
+ ret = of_property_read_u32(node, key,
+ &data->phase_rpm_resource_id);
+ if (ret)
+ goto probe_cx_exit;
+
+ key = "qcom,cx-phase-resource-key";
+ ret = of_property_read_string(node, key,
+ &tmp_str);
+ if (ret)
+ goto probe_cx_exit;
+ data->cx_phase_request_key = msm_thermal_str_to_int(tmp_str);
+
+ key = "qcom,cx-phase-hot-crit-temp";
+ ret = of_property_read_u32(node, key,
+ &data->cx_phase_hot_temp_degC);
+ if (ret)
+ goto probe_cx_exit;
+
+ key = "qcom,cx-phase-hot-crit-temp-hyst";
+ ret = of_property_read_u32(node, key,
+ &data->cx_phase_hot_temp_hyst_degC);
+ if (ret)
+ goto probe_cx_exit;
+
+ ret = sensor_mgr_init_threshold(&thresh[MSM_CX_PHASE_CTRL_HOT],
+ MONITOR_ALL_TSENS,
+ data->cx_phase_hot_temp_degC, data->cx_phase_hot_temp_degC -
+ data->cx_phase_hot_temp_hyst_degC,
+ cx_phase_ctrl_notify);
+ if (ret) {
+ pr_err("init HOT threshold failed. err:%d\n", ret);
+ goto probe_cx_exit;
+ }
+
+ cx_phase_ctrl_enabled = true;
+ snprintf(mit_config[MSM_CX_PHASE_CTRL_HOT].config_name,
+ MAX_DEBUGFS_CONFIG_LEN, "cx_phase");
+ mit_config[MSM_CX_PHASE_CTRL_HOT].disable_config
+ = thermal_cx_phase_ctrl_mit_disable;
+
+probe_cx_exit:
+ if (ret) {
+ dev_info(&pdev->dev,
+ "%s:Failed reading node=%s, key=%s err=%d. KTM continues\n",
+ KBUILD_MODNAME, node->full_name, key, ret);
+ cx_phase_ctrl_enabled = false;
+ }
+ return ret;
+}
+
+static int probe_therm_reset(struct device_node *node,
+ struct msm_thermal_data *data,
+ struct platform_device *pdev)
+{
+ char *key = NULL;
+ int ret = 0;
+
+ key = "qcom,therm-reset-temp";
+ ret = of_property_read_u32(node, key, &data->therm_reset_temp_degC);
+ if (ret)
+ goto PROBE_RESET_EXIT;
+
+ ret = sensor_mgr_init_threshold(&thresh[MSM_THERM_RESET],
+ MONITOR_ALL_TSENS,
+ data->therm_reset_temp_degC, data->therm_reset_temp_degC - 10,
+ therm_reset_notify);
+ if (ret) {
+ pr_err("Therm reset data structure init failed\n");
+ goto PROBE_RESET_EXIT;
+ }
+
+ therm_reset_enabled = true;
+ snprintf(mit_config[MSM_THERM_RESET].config_name,
+ MAX_DEBUGFS_CONFIG_LEN, "reset");
+ mit_config[MSM_THERM_RESET].disable_config
+ = thermal_reset_disable;
+
+PROBE_RESET_EXIT:
+ if (ret) {
+ dev_info(&pdev->dev,
+ "%s:Failed reading node=%s, key=%s err=%d. KTM continues\n",
+ __func__, node->full_name, key, ret);
+ therm_reset_enabled = false;
+ }
+ return ret;
+}
+
+static int probe_freq_mitigation(struct device_node *node,
+ struct msm_thermal_data *data,
+ struct platform_device *pdev)
+{
+ char *key = NULL;
+ int ret = 0;
+
+ key = "qcom,freq-mitigation-temp";
+ ret = of_property_read_u32(node, key, &data->freq_mitig_temp_degc);
+ if (ret)
+ goto PROBE_FREQ_EXIT;
+
+ key = "qcom,freq-mitigation-temp-hysteresis";
+ ret = of_property_read_u32(node, key,
+ &data->freq_mitig_temp_hysteresis_degc);
+ if (ret)
+ goto PROBE_FREQ_EXIT;
+
+ key = "qcom,freq-mitigation-value";
+ ret = of_property_read_u32(node, key, &data->freq_limit);
+ if (ret)
+ goto PROBE_FREQ_EXIT;
+
+ freq_mitigation_enabled = 1;
+ snprintf(mit_config[MSM_LIST_MAX_NR + CPUFREQ_CONFIG].config_name,
+ MAX_DEBUGFS_CONFIG_LEN, "cpufreq");
+ mit_config[MSM_LIST_MAX_NR + CPUFREQ_CONFIG].disable_config
+ = thermal_cpu_freq_mit_disable;
+
+PROBE_FREQ_EXIT:
+ if (ret) {
+ dev_info(&pdev->dev,
+ "%s:Failed reading node=%s, key=%s. err=%d. KTM continues\n",
+ __func__, node->full_name, key, ret);
+ freq_mitigation_enabled = 0;
+ }
+ return ret;
+}
+
+static void thermal_boot_config_read(struct seq_file *m, void *data)
+{
+
+ seq_puts(m, "---------Boot Mitigation------------\n");
+ seq_printf(m, "tsens sensor:tsens_tz_sensor%d\n",
+ msm_thermal_info.sensor_id);
+ seq_printf(m, "polling rate:%d ms\n", msm_thermal_info.poll_ms);
+ seq_printf(m, "frequency threshold:%d degC\n",
+ msm_thermal_info.limit_temp_degC);
+ seq_printf(m, "frequency threshold clear:%d degC\n",
+ msm_thermal_info.limit_temp_degC
+ - msm_thermal_info.temp_hysteresis_degC);
+ seq_printf(m, "frequency step:%d\n",
+ msm_thermal_info.bootup_freq_step);
+ seq_printf(m, "frequency mask:0x%x\n",
+ msm_thermal_info.bootup_freq_control_mask);
+ seq_printf(m, "hotplug threshold:%d degC\n",
+ msm_thermal_info.core_limit_temp_degC);
+ seq_printf(m, "hotplug threshold clear:%d degC\n",
+ msm_thermal_info.core_limit_temp_degC
+ - msm_thermal_info.core_temp_hysteresis_degC);
+ seq_printf(m, "hotplug mask:0x%x\n",
+ msm_thermal_info.core_control_mask);
+ seq_printf(m, "reset threshold:%d degC\n",
+ msm_thermal_info.therm_reset_temp_degC);
+}
+
+static void thermal_emergency_config_read(struct seq_file *m, void *data)
+{
+ int cpu = 0;
+
+ seq_puts(m, "\n---------Emergency Mitigation------------\n");
+ for_each_possible_cpu(cpu)
+ seq_printf(m, "cpu%d sensor:%s\n", cpu, cpus[cpu].sensor_type);
+ seq_printf(m, "frequency threshold:%d degC\n",
+ msm_thermal_info.freq_mitig_temp_degc);
+ seq_printf(m, "frequency threshold clr:%d degC\n",
+ msm_thermal_info.freq_mitig_temp_degc
+ - msm_thermal_info.freq_mitig_temp_hysteresis_degc);
+ seq_printf(m, "frequency value:%d KHz\n",
+ msm_thermal_info.freq_limit);
+ seq_printf(m, "frequency mask:0x%x\n",
+ msm_thermal_info.freq_mitig_control_mask);
+ seq_printf(m, "hotplug threshold:%d degC\n",
+ msm_thermal_info.hotplug_temp_degC);
+ seq_printf(m, "hotplug threshold clr:%d degC\n",
+ msm_thermal_info.hotplug_temp_degC
+ - msm_thermal_info.hotplug_temp_hysteresis_degC);
+ seq_printf(m, "hotplug mask:0x%x\n",
+ msm_thermal_info.core_control_mask);
+ seq_printf(m, "online hotplug core:%s\n", online_core
+ ? "true" : "false");
+
+}
+
+static void thermal_mx_config_read(struct seq_file *m, void *data)
+{
+ if (vdd_mx_enabled) {
+ seq_puts(m, "\n---------Mx Retention------------\n");
+ seq_printf(m, "threshold:%d degC\n",
+ msm_thermal_info.vdd_mx_temp_degC);
+ seq_printf(m, "threshold clear:%d degC\n",
+ msm_thermal_info.vdd_mx_temp_degC
+ + msm_thermal_info.vdd_mx_temp_hyst_degC);
+ seq_printf(m, "mx retention value:%d\n",
+ msm_thermal_info.vdd_mx_min);
+ if (vdd_cx)
+ seq_printf(m, "cx retention value:%d\n",
+ msm_thermal_info.vdd_cx_min);
+ }
+}
+
+static void thermal_vdd_config_read(struct seq_file *m, void *data)
+{
+ int i = 0;
+
+ if (vdd_rstr_enabled) {
+ seq_puts(m, "\n---------VDD restriction------------\n");
+ seq_printf(m, "threshold:%d degC\n",
+ msm_thermal_info.vdd_rstr_temp_degC);
+ seq_printf(m, "threshold clear:%d degC\n",
+ msm_thermal_info.vdd_rstr_temp_hyst_degC);
+ for (i = 0; i < rails_cnt; i++) {
+ if (!strcmp(rails[i].name, "vdd-dig")
+ && rails[i].num_levels)
+ seq_printf(m, "vdd_dig restriction value:%d\n",
+ rails[i].levels[0]);
+ if (!strcmp(rails[i].name, "vdd-gfx")
+ && rails[i].num_levels)
+ seq_printf(m, "vdd_gfx restriction value:%d\n",
+ rails[i].levels[0]);
+ if (!strcmp(rails[i].name, "vdd-apps")
+ && rails[i].num_levels)
+ seq_printf(m,
+ "vdd_apps restriction value:%d KHz\n",
+ rails[i].levels[0]);
+ }
+ }
+}
+
+static void thermal_psm_config_read(struct seq_file *m, void *data)
+{
+ if (psm_enabled) {
+ seq_puts(m, "\n------PMIC Software Mode(PSM)-------\n");
+ seq_printf(m, "threshold:%d degC\n",
+ msm_thermal_info.psm_temp_degC);
+ seq_printf(m, "threshold clear:%d degC\n",
+ msm_thermal_info.psm_temp_degC
+ - msm_thermal_info.psm_temp_hyst_degC);
+ }
+}
+
+static void thermal_ocr_config_read(struct seq_file *m, void *data)
+{
+ if (ocr_enabled) {
+ seq_puts(m, "\n-----Optimum Current Request(OCR)-----\n");
+ seq_printf(m, "threshold:%d degC\n",
+ msm_thermal_info.ocr_temp_degC);
+ seq_printf(m, "threshold clear:%d degC\n",
+ msm_thermal_info.ocr_temp_degC
+ - msm_thermal_info.ocr_temp_hyst_degC);
+ seq_printf(m, "tsens sensor:tsens_tz_sensor%d\n",
+ msm_thermal_info.ocr_sensor_id);
+ }
+}
+
+static void thermal_phase_ctrl_config_read(struct seq_file *m, void *data)
+{
+ if (cx_phase_ctrl_enabled) {
+ seq_puts(m, "\n---------Phase control------------\n");
+ seq_printf(m, "cx hot critical threshold:%d degC\n",
+ msm_thermal_info.cx_phase_hot_temp_degC);
+ seq_printf(m, "cx hot critical threshold clear:%d degC\n",
+ msm_thermal_info.cx_phase_hot_temp_degC
+ - msm_thermal_info.cx_phase_hot_temp_hyst_degC);
+ }
+ if (gfx_crit_phase_ctrl_enabled) {
+ seq_printf(m, "gfx hot critical threshold:%d degC\n",
+ msm_thermal_info.gfx_phase_hot_temp_degC);
+ seq_printf(m, "gfx hot critical threshold clear:%d degC\n",
+ msm_thermal_info.gfx_phase_hot_temp_degC
+ - msm_thermal_info.gfx_phase_hot_temp_hyst_degC);
+ }
+ if (gfx_warm_phase_ctrl_enabled) {
+ seq_printf(m, "gfx warm threshold:%d degC\n",
+ msm_thermal_info.gfx_phase_warm_temp_degC);
+ seq_printf(m, "gfx warm threshold clear:%d degC\n",
+ msm_thermal_info.gfx_phase_warm_temp_degC
+ - msm_thermal_info.gfx_phase_warm_temp_hyst_degC);
+ }
+ if (gfx_crit_phase_ctrl_enabled || gfx_warm_phase_ctrl_enabled)
+ seq_printf(m, "gfx tsens sensor:tsens_tz_sensor%d\n",
+ msm_thermal_info.gfx_sensor);
+}
+
+static void thermal_disable_all_mitigation(void)
+{
+ thermal_cpu_freq_mit_disable();
+ thermal_cpu_hotplug_mit_disable();
+ thermal_reset_disable();
+ thermal_mx_mit_disable();
+ thermal_vdd_mit_disable();
+ thermal_psm_mit_disable();
+ thermal_ocr_mit_disable();
+ thermal_cx_phase_ctrl_mit_disable();
+ thermal_gfx_phase_warm_ctrl_mit_disable();
+ thermal_gfx_phase_crit_ctrl_mit_disable();
+}
+
+static void enable_config(int config_id)
+{
+ switch (config_id) {
+ case MSM_THERM_RESET:
+ therm_reset_enabled = 1;
+ break;
+ case MSM_VDD_RESTRICTION:
+ vdd_rstr_enabled = 1;
+ break;
+ case MSM_CX_PHASE_CTRL_HOT:
+ cx_phase_ctrl_enabled = 1;
+ break;
+ case MSM_GFX_PHASE_CTRL_WARM:
+ gfx_warm_phase_ctrl_enabled = 1;
+ break;
+ case MSM_GFX_PHASE_CTRL_HOT:
+ gfx_crit_phase_ctrl_enabled = 1;
+ break;
+ case MSM_OCR:
+ ocr_enabled = 1;
+ break;
+ case MSM_VDD_MX_RESTRICTION:
+ vdd_mx_enabled = 1;
+ break;
+ case MSM_LIST_MAX_NR + HOTPLUG_CONFIG:
+ hotplug_enabled = 1;
+ break;
+ case MSM_LIST_MAX_NR + CPUFREQ_CONFIG:
+ freq_mitigation_enabled = 1;
+ break;
+ default:
+ pr_err("Bad config:%d\n", config_id);
+ break;
+ }
+}
+
+static void thermal_update_mit_threshold(
+ struct msm_thermal_debugfs_thresh_config *config, int max_mit)
+{
+ int idx = 0, i = 0;
+
+ for (idx = 0; idx < max_mit; idx++) {
+ if (!config[idx].update)
+ continue;
+ config[idx].disable_config();
+ enable_config(idx);
+ if (idx >= MSM_LIST_MAX_NR) {
+ if (idx == MSM_LIST_MAX_NR + HOTPLUG_CONFIG)
+ UPDATE_CPU_CONFIG_THRESHOLD(
+ msm_thermal_info.core_control_mask,
+ HOTPLUG_THRESHOLD_HIGH,
+ config[idx].thresh,
+ config[idx].thresh_clr);
+ else if (idx == MSM_LIST_MAX_NR + CPUFREQ_CONFIG)
+ UPDATE_CPU_CONFIG_THRESHOLD(
+ msm_thermal_info
+ .freq_mitig_control_mask,
+ FREQ_THRESHOLD_HIGH,
+ config[idx].thresh,
+ config[idx].thresh_clr);
+ } else {
+ for (i = 0; i < thresh[idx].thresh_ct; i++) {
+ thresh[idx].thresh_list[i].threshold[0].temp
+ = config[idx].thresh
+ * tsens_scaling_factor;
+ thresh[idx].thresh_list[i].threshold[1].temp
+ = config[idx].thresh_clr
+ * tsens_scaling_factor;
+ set_and_activate_threshold(
+ thresh[idx].thresh_list[i].sensor_id,
+ &thresh[idx].thresh_list[i]
+ .threshold[0]);
+ set_and_activate_threshold(
+ thresh[idx].thresh_list[i].sensor_id,
+ &thresh[idx].thresh_list[i]
+ .threshold[1]);
+ }
+ }
+ config[idx].update = 0;
+ }
+}
+
+static ssize_t thermal_config_debugfs_write(struct file *file,
+ const char __user *buffer, size_t count, loff_t *ppos)
+{
+ int ret = 0;
+ char config_string[MAX_DEBUGFS_CONFIG_LEN] = { '\0' };
+
+ if (!mitigation || count > (MAX_DEBUGFS_CONFIG_LEN - 1)) {
+ pr_err("Invalid parameters\n");
+ return -EINVAL;
+ }
+
+ if (copy_from_user(config_string, buffer, count)) {
+ pr_err("Error reading debugfs command\n");
+ ret = -EFAULT;
+ goto exit_debugfs_write;
+ }
+ pr_debug("Debugfs config command string: %s\n", config_string);
+ if (!strcmp(config_string, DEBUGFS_DISABLE_ALL_MIT)) {
+ mitigation = 0;
+ pr_err("KTM mitigations disabled via debugfs\n");
+ thermal_disable_all_mitigation();
+ } else if (!strcmp(config_string, DEBUGFS_CONFIG_UPDATE)) {
+ thermal_update_mit_threshold(mit_config, MSM_LIST_MAX_NR
+ + MAX_CPU_CONFIG);
+ }
+
+exit_debugfs_write:
+ if (!ret)
+ return count;
+ return ret;
+}
+
+static int thermal_config_debugfs_read(struct seq_file *m, void *data)
+{
+ if (!mitigation) {
+ seq_puts(m, "KTM Mitigations Disabled\n");
+ return 0;
+ }
+ thermal_boot_config_read(m, data);
+ thermal_emergency_config_read(m, data);
+ thermal_mx_config_read(m, data);
+ thermal_vdd_config_read(m, data);
+ thermal_psm_config_read(m, data);
+ thermal_ocr_config_read(m, data);
+ thermal_phase_ctrl_config_read(m, data);
+
+ return 0;
+}
+
+static int msm_thermal_dev_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ char *key = NULL;
+ struct device_node *node = pdev->dev.of_node;
+ struct msm_thermal_data data;
+
+ if (!mitigation)
+ return ret;
+
+ memset(&data, 0, sizeof(struct msm_thermal_data));
+ data.pdev = pdev;
+
+ ret = msm_thermal_pre_init(&pdev->dev);
+ if (ret) {
+ pr_err("thermal pre init failed. err:%d\n", ret);
+ goto fail;
+ }
+
+ key = "qcom,sensor-id";
+ ret = of_property_read_u32(node, key, &data.sensor_id);
+ if (ret)
+ goto fail;
+
+ key = "qcom,poll-ms";
+ ret = of_property_read_u32(node, key, &data.poll_ms);
+ if (ret)
+ goto fail;
+
+ key = "qcom,limit-temp";
+ ret = of_property_read_u32(node, key, &data.limit_temp_degC);
+ if (ret)
+ goto fail;
+
+ key = "qcom,temp-hysteresis";
+ ret = of_property_read_u32(node, key, &data.temp_hysteresis_degC);
+ if (ret)
+ goto fail;
+
+ key = "qcom,freq-step";
+ ret = of_property_read_u32(node, key, &data.bootup_freq_step);
+ if (ret)
+ goto fail;
+
+ key = "qcom,online-hotplug-core";
+ if (of_property_read_bool(node, key))
+ online_core = true;
+ else
+ online_core = false;
+
+ probe_sensor_info(node, &data, pdev);
+ ret = probe_cc(node, &data, pdev);
+
+ ret = probe_freq_mitigation(node, &data, pdev);
+ ret = probe_cx_phase_ctrl(node, &data, pdev);
+ ret = probe_gfx_phase_ctrl(node, &data, pdev);
+ ret = probe_therm_reset(node, &data, pdev);
+
+ ret = probe_vdd_mx(node, &data, pdev);
+ if (ret == -EPROBE_DEFER)
+ goto fail;
+ /*
+ * Probe optional properties below. Call probe_psm before
+ * probe_vdd_rstr because rpm_regulator_get has to be called
+ * before devm_regulator_get
+ * probe_ocr should be called after probe_vdd_rstr to reuse the
+ * regualtor handle. calling devm_regulator_get more than once
+ * will fail.
+ */
+ ret = probe_psm(node, &data, pdev);
+ if (ret == -EPROBE_DEFER)
+ goto fail;
+
+ update_cpu_topology(&pdev->dev);
+ ret = probe_vdd_rstr(node, &data, pdev);
+ if (ret == -EPROBE_DEFER)
+ goto fail;
+ ret = probe_ocr(node, &data, pdev);
+
+ ret = fetch_cpu_mitigaiton_info(&data, pdev);
+ if (ret) {
+ pr_err("Error fetching CPU mitigation information. err:%d\n",
+ ret);
+ goto probe_exit;
+ }
+
+ /*
+ * In case sysfs add nodes get called before probe function.
+ * Need to make sure sysfs node is created again
+ */
+ if (psm_nodes_called) {
+ msm_thermal_add_psm_nodes();
+ psm_nodes_called = false;
+ }
+ if (vdd_rstr_nodes_called) {
+ msm_thermal_add_vdd_rstr_nodes();
+ vdd_rstr_nodes_called = false;
+ }
+ if (sensor_info_nodes_called) {
+ msm_thermal_add_sensor_info_nodes();
+ sensor_info_nodes_called = false;
+ }
+ if (ocr_nodes_called) {
+ msm_thermal_add_ocr_nodes();
+ ocr_nodes_called = false;
+ }
+ if (cluster_info_nodes_called) {
+ create_cpu_topology_sysfs();
+ cluster_info_nodes_called = false;
+ }
+ msm_thermal_ioctl_init();
+ ret = msm_thermal_init(&data);
+ msm_thermal_probed = true;
+
+ if (interrupt_mode_enable) {
+ interrupt_mode_init();
+ interrupt_mode_enable = false;
+ }
+
+ return ret;
+fail:
+ if (ret)
+ pr_err("Failed reading node=%s, key=%s. err:%d\n",
+ node->full_name, key, ret);
+probe_exit:
+ return ret;
+}
+
+static int msm_thermal_dev_exit(struct platform_device *inp_dev)
+{
+ int i = 0;
+ uint32_t _cluster = 0;
+ struct cluster_info *cluster_ptr = NULL;
+ struct uio_info *info = dev_get_drvdata(&inp_dev->dev);
+ struct rail *r = NULL;
+
+ uio_unregister_device(info);
+ unregister_reboot_notifier(&msm_thermal_reboot_notifier);
+ if (msm_therm_debugfs && msm_therm_debugfs->parent)
+ debugfs_remove_recursive(msm_therm_debugfs->parent);
+ msm_thermal_ioctl_cleanup();
+ if (thresh) {
+ if (vdd_rstr_enabled) {
+ sensor_mgr_remove_threshold(
+ &thresh[MSM_VDD_RESTRICTION]);
+ kfree(thresh[MSM_VDD_RESTRICTION].thresh_list);
+ for (i = 0; i < rails_cnt; i++) {
+ if (!rails[i].freq_req)
+ continue;
+ r = &rails[i];
+ for_each_possible_cpu(i) {
+ devmgr_unregister_mitigation_client(
+ &msm_thermal_info.pdev->dev,
+ r->device_handle[i]);
+ r->device_handle[i] = NULL;
+ }
+ }
+ kfree(rails);
+ }
+ if (cx_phase_ctrl_enabled) {
+ sensor_mgr_remove_threshold(
+ &thresh[MSM_CX_PHASE_CTRL_HOT]);
+ kfree(thresh[MSM_CX_PHASE_CTRL_HOT].thresh_list);
+ }
+ if (gfx_warm_phase_ctrl_enabled) {
+ sensor_mgr_remove_threshold(
+ &thresh[MSM_GFX_PHASE_CTRL_WARM]);
+ kfree(thresh[MSM_GFX_PHASE_CTRL_WARM].thresh_list);
+ }
+ if (gfx_crit_phase_ctrl_enabled) {
+ sensor_mgr_remove_threshold(
+ &thresh[MSM_GFX_PHASE_CTRL_HOT]);
+ kfree(thresh[MSM_GFX_PHASE_CTRL_HOT].thresh_list);
+ }
+ if (ocr_enabled) {
+ for (i = 0; i < ocr_rail_cnt; i++)
+ kfree(ocr_rails[i].attr_gp.attrs);
+ kfree(ocr_rails);
+ ocr_rails = NULL;
+ sensor_mgr_remove_threshold(
+ &thresh[MSM_OCR]);
+ kfree(thresh[MSM_OCR].thresh_list);
+ }
+ if (vdd_mx_enabled) {
+ kfree(mx_kobj);
+ kfree(mx_attr_group.attrs);
+ sensor_mgr_remove_threshold(
+ &thresh[MSM_VDD_MX_RESTRICTION]);
+ kfree(thresh[MSM_VDD_MX_RESTRICTION].thresh_list);
+ }
+ kfree(thresh);
+ thresh = NULL;
+ }
+ kfree(table);
+ if (core_ptr) {
+ for (; _cluster < core_ptr->entity_count; _cluster++) {
+ cluster_ptr = &core_ptr->child_entity_ptr[_cluster];
+ kfree(cluster_ptr->freq_table);
+ }
+ }
+
+ return 0;
+}
+
+static int __init ktm_params(char *str)
+{
+ if (str != NULL && !strcmp(str, "disable")) {
+ pr_info("KTM Disabled at Boot\n");
+ mitigation = 0;
+ }
+
+ return 0;
+}
+
+early_param("qcomthermal", ktm_params);
+
+static struct of_device_id msm_thermal_match_table[] = {
+ {.compatible = "qcom,msm-thermal"},
+ {},
+};
+
+static struct platform_driver msm_thermal_device_driver = {
+ .probe = msm_thermal_dev_probe,
+ .driver = {
+ .name = "msm-thermal",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_thermal_match_table,
+ },
+ .remove = msm_thermal_dev_exit,
+};
+
+int __init msm_thermal_device_init(void)
+{
+ return platform_driver_register(&msm_thermal_device_driver);
+}
+arch_initcall(msm_thermal_device_init);
+
+int __init msm_thermal_late_init(void)
+{
+ if (!msm_thermal_probed)
+ return 0;
+
+ if (num_possible_cpus() > 1)
+ msm_thermal_add_cc_nodes();
+ msm_thermal_add_psm_nodes();
+ msm_thermal_add_vdd_rstr_nodes();
+ msm_thermal_add_sensor_info_nodes();
+ if (ocr_reg_init_defer) {
+ if (!ocr_reg_init(msm_thermal_info.pdev)) {
+ ocr_enabled = true;
+ msm_thermal_add_ocr_nodes();
+ }
+ }
+ msm_thermal_add_mx_nodes();
+ interrupt_mode_init();
+ create_cpu_topology_sysfs();
+ create_thermal_debugfs();
+ msm_thermal_add_bucket_info_nodes();
+ uio_init(msm_thermal_info.pdev);
+
+ return 0;
+}
+late_initcall(msm_thermal_late_init);
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 5d1071b372fd..60455d9a28d2 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -271,8 +271,8 @@ static void init_sensor_trip(struct sensor_info *sensor)
static int __update_sensor_thresholds(struct sensor_info *sensor)
{
- long max_of_low_thresh = LONG_MIN;
- long min_of_high_thresh = LONG_MAX;
+ int max_of_low_thresh = INT_MIN;
+ int min_of_high_thresh = INT_MAX;
struct sensor_threshold *pos, *var;
int ret = 0;
@@ -300,7 +300,7 @@ static int __update_sensor_thresholds(struct sensor_info *sensor)
}
}
- pr_debug("sensor %d: Thresholds: max of low: %ld min of high: %ld\n",
+ pr_debug("sensor %d: Thresholds: max of low: %d min of high: %d\n",
sensor->sensor_id, max_of_low_thresh,
min_of_high_thresh);
@@ -346,7 +346,7 @@ static int __update_sensor_thresholds(struct sensor_info *sensor)
goto update_done;
}
- pr_debug("sensor %d: low: %ld high: %ld\n",
+ pr_debug("sensor %d: low: %d high: %d\n",
sensor->sensor_id,
sensor->threshold_min, sensor->threshold_max);
@@ -422,7 +422,7 @@ int thermal_sensor_trip(struct thermal_zone_device *tz,
}
EXPORT_SYMBOL(thermal_sensor_trip);
-int sensor_get_temp(uint32_t sensor_id, long *temp)
+int sensor_get_temp(uint32_t sensor_id, int *temp)
{
struct sensor_info *sensor = get_sensor(sensor_id);
int ret = 0;
@@ -565,8 +565,8 @@ int sensor_init(struct thermal_zone_device *tz)
sensor->sensor_id = tz->id;
sensor->tz = tz;
- sensor->threshold_min = LONG_MIN;
- sensor->threshold_max = LONG_MAX;
+ sensor->threshold_min = INT_MIN;
+ sensor->threshold_max = INT_MAX;
sensor->max_idx = -1;
sensor->min_idx = -1;
mutex_init(&sensor->lock);
diff --git a/include/linux/msm_thermal.h b/include/linux/msm_thermal.h
new file mode 100644
index 000000000000..ab9f70002630
--- /dev/null
+++ b/include/linux/msm_thermal.h
@@ -0,0 +1,333 @@
+/*
+ * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_THERMAL_H
+#define __MSM_THERMAL_H
+
+#include <linux/thermal.h>
+
+#define MAX_THRESHOLD 2
+#define TSENS_NAME_MAX 20
+#define MONITOR_ALL_TSENS -1
+#define HOTPLUG_DEVICE "hotplug"
+#define CPU0_DEVICE "cpu0"
+#define CPU1_DEVICE "cpu1"
+#define CPU2_DEVICE "cpu2"
+#define CPU3_DEVICE "cpu3"
+#define CPU4_DEVICE "cpu4"
+#define CPU5_DEVICE "cpu5"
+#define CPU6_DEVICE "cpu6"
+#define CPU7_DEVICE "cpu7"
+#define CPUFREQ_MAX_NO_MITIGATION UINT_MAX
+#define CPUFREQ_MIN_NO_MITIGATION 0
+#define HOTPLUG_NO_MITIGATION(_mask) cpumask_clear(_mask)
+
+#define IS_HI_THRESHOLD_SET(_val) (_val & 1)
+#define IS_LOW_THRESHOLD_SET(_val) (_val & 2)
+
+struct msm_thermal_data {
+ struct platform_device *pdev;
+ uint32_t sensor_id;
+ uint32_t poll_ms;
+ int32_t limit_temp_degC;
+ int32_t temp_hysteresis_degC;
+ uint32_t bootup_freq_step;
+ uint32_t bootup_freq_control_mask;
+ int32_t core_limit_temp_degC;
+ int32_t core_temp_hysteresis_degC;
+ int32_t hotplug_temp_degC;
+ int32_t hotplug_temp_hysteresis_degC;
+ uint32_t core_control_mask;
+ uint32_t freq_mitig_temp_degc;
+ uint32_t freq_mitig_temp_hysteresis_degc;
+ uint32_t freq_mitig_control_mask;
+ uint32_t freq_limit;
+ int32_t vdd_rstr_temp_degC;
+ int32_t vdd_rstr_temp_hyst_degC;
+ int32_t vdd_mx_min;
+ int32_t vdd_cx_min;
+ int32_t psm_temp_degC;
+ int32_t psm_temp_hyst_degC;
+ int32_t ocr_temp_degC;
+ int32_t ocr_temp_hyst_degC;
+ uint32_t ocr_sensor_id;
+ int32_t phase_rpm_resource_type;
+ int32_t phase_rpm_resource_id;
+ int32_t gfx_phase_warm_temp_degC;
+ int32_t gfx_phase_warm_temp_hyst_degC;
+ int32_t gfx_phase_hot_temp_degC;
+ int32_t gfx_phase_hot_temp_hyst_degC;
+ int32_t gfx_sensor;
+ int32_t gfx_phase_request_key;
+ int32_t cx_phase_hot_temp_degC;
+ int32_t cx_phase_hot_temp_hyst_degC;
+ int32_t cx_phase_request_key;
+ int32_t vdd_mx_temp_degC;
+ int32_t vdd_mx_temp_hyst_degC;
+ int32_t therm_reset_temp_degC;
+};
+
+enum sensor_id_type {
+ THERM_ZONE_ID,
+ THERM_TSENS_ID,
+ THERM_ID_MAX_NR,
+};
+
+struct threshold_info;
+struct therm_threshold {
+ int32_t sensor_id;
+ enum sensor_id_type id_type;
+ struct sensor_threshold threshold[MAX_THRESHOLD];
+ int32_t trip_triggered;
+ void (*notify)(struct therm_threshold *);
+ struct threshold_info *parent;
+};
+
+struct threshold_info {
+ uint32_t thresh_ct;
+ bool thresh_triggered;
+ struct list_head list_ptr;
+ struct therm_threshold *thresh_list;
+};
+
+enum device_req_type {
+ DEVICE_REQ_NONE = -1,
+ HOTPLUG_MITIGATION_REQ,
+ CPUFREQ_MITIGATION_REQ,
+ DEVICE_REQ_MAX,
+};
+
+/**
+ * For frequency mitigation request, if client is interested
+ * only in one, either max_freq or min_freq, update default
+ * value for other one also for mitigation request.
+ * Default value for request structure variables:
+ * max_freq = UINT_MAX;
+ * min_freq = 0;
+ * offline_mask = CPU_MASK_NONE;
+ */
+struct cpufreq_request {
+ uint32_t max_freq;
+ uint32_t min_freq;
+};
+
+union device_request {
+ struct cpufreq_request freq;
+ cpumask_t offline_mask;
+};
+
+struct device_clnt_data;
+struct device_manager_data {
+ char device_name[TSENS_NAME_MAX];
+ union device_request active_req;
+ struct list_head client_list;
+ struct list_head dev_ptr;
+ struct mutex clnt_lock;
+ int (*request_validate)(struct device_clnt_data *,
+ union device_request *,
+ enum device_req_type);
+ int (*update)(struct device_manager_data *);
+ void *data;
+};
+
+struct device_clnt_data {
+ struct device_manager_data *dev_mgr;
+ bool req_active;
+ union device_request request;
+ struct list_head clnt_ptr;
+ void (*callback)(struct device_clnt_data *,
+ union device_request *req, void *);
+ void *usr_data;
+};
+
+#ifdef CONFIG_THERMAL_MONITOR
+extern int msm_thermal_ioctl_init(void);
+extern void msm_thermal_ioctl_cleanup(void);
+extern int msm_thermal_init(struct msm_thermal_data *pdata);
+extern int msm_thermal_device_init(void);
+extern int msm_thermal_set_frequency(uint32_t cpu, uint32_t freq,
+ bool is_max);
+extern int msm_thermal_set_cluster_freq(uint32_t cluster, uint32_t freq,
+ bool is_max);
+extern int msm_thermal_get_freq_plan_size(uint32_t cluster,
+ unsigned int *table_len);
+extern int msm_thermal_get_cluster_freq_plan(uint32_t cluster,
+ unsigned int *table_ptr);
+extern int msm_thermal_get_cluster_voltage_plan(uint32_t cluster,
+ uint32_t *table_ptr);
+/**
+ * sensor_mgr_init_threshold - Initialize thresholds data structure for
+ * sensor(s) with high and low thresholds and
+ * threshold callback.
+ *
+ * @thresh_inp: Client threshold data structure.
+ * @sensor_id: Sensor h/w ID to be monitored. Use MONITOR_ALL_TSENS
+ * to monitor all temperature sensors.
+ *
+ * @high_temp: Trigger threshold value for sensor_id or all sensors.
+ * @low_temp: Clear threshold value for sensor_id or all sensors.
+ * @callback: Callback pointer for threshold notification.
+ *
+ * Returns which threshold is set on success, negative error number
+ * on failure. MACRO IS_HI_THRESHOLD_SET/IS_LOW_THRESHOLD_SET can be used
+ * to decipher which threshold being set.
+ */
+extern int sensor_mgr_init_threshold(struct threshold_info *thresh_inp,
+ int sensor_id, int32_t high_temp,
+ int32_t low_temp,
+ void (*callback)(struct therm_threshold *));
+/**
+ * sensor_mgr_convert_id_and_set_threshold - It accepts sensor h/w ID, converts
+ * it to sensor zone id and sets
+ * thermal threshold for those
+ * sensors listed in threshold info.
+ *
+ * @thresh_inp: Client threshold data structure.
+ *
+ * Returns zero on success, negative error number on failure.
+ */
+extern int sensor_mgr_convert_id_and_set_threshold(
+ struct threshold_info *thresh_inp);
+/**
+ * sensor_mgr_set_threshold- It sets thermal threshold trips for a sensor.
+ *
+ * @zone_id: Thermal zone ID for the sensor.
+ * @threshold: threshold info for the sensor.
+ *
+ * Returns zero on success, negative error number on failure.
+ */
+extern int sensor_mgr_set_threshold(uint32_t zone_id,
+ struct sensor_threshold *threshold);
+/**
+ * sensor_mgr_remove_threshold- It cancels threshold notification and
+ * removes threshold from sensor manager
+ * threshold list.
+ *
+ * @thresh_inp: The threshold info which needs to be removed.
+ */
+extern void sensor_mgr_remove_threshold(struct threshold_info *thresh_inp);
+/**
+ * devmgr_register_mitigation_client - Register for a device and
+ * gets a handle for mitigation.
+ * @dev: Client device structure.
+ * @device_name: Mitgation device name which the client is interested
+ * to mitigate.
+ * @callback: Optional callback pointer for device change notification,
+ * otherwise pass NULL.
+ *
+ * Returns client handle structure for that device on success, or NULL
+ * with IS_ERR() condition containing error number.
+ */
+extern struct device_clnt_data *devmgr_register_mitigation_client(
+ struct device *dev,
+ const char *device_name,
+ void (*callback)(struct device_clnt_data *,
+ union device_request *, void *));
+/**
+ * devmgr_client_request_mitigation - Set a valid mitigation for
+ * registered device.
+ * @clnt: Client handle for device.
+ * @type: Type of device request populated above.
+ * @req: Valid mitigation request.
+ *
+ * Returns zero on successful mitigation update, or negative error number.
+ */
+extern int devmgr_client_request_mitigation(struct device_clnt_data *clnt,
+ enum device_req_type type,
+ union device_request *req);
+/**
+ * devmgr_unregister_mitigation_client - Unregister mitigation device
+ * @dev: Client device structure.
+ * @clnt: Client handle for device.
+ */
+extern void devmgr_unregister_mitigation_client(
+ struct device *dev,
+ struct device_clnt_data *clnt);
+#else
+static inline int msm_thermal_init(struct msm_thermal_data *pdata)
+{
+ return -ENOSYS;
+}
+static inline int msm_thermal_device_init(void)
+{
+ return -ENOSYS;
+}
+static inline int msm_thermal_set_frequency(uint32_t cpu, uint32_t freq,
+ bool is_max)
+{
+ return -ENOSYS;
+}
+static inline int msm_thermal_set_cluster_freq(uint32_t cluster, uint32_t freq,
+ bool is_max)
+{
+ return -ENOSYS;
+}
+static inline int msm_thermal_get_freq_plan_size(uint32_t cluster,
+ unsigned int *table_len)
+{
+ return -ENOSYS;
+}
+static inline int msm_thermal_get_cluster_freq_plan(uint32_t cluster,
+ unsigned int *table_ptr)
+{
+ return -ENOSYS;
+}
+static inline int msm_thermal_get_cluster_voltage_plan(uint32_t cluster,
+ uint32_t *table_ptr)
+{
+ return -ENOSYS;
+}
+static inline int sensor_mgr_init_threshold(struct threshold_info *thresh_inp,
+ int sensor_id, int32_t high_temp,
+ int32_t low_temp,
+ void (*callback)(struct therm_threshold *))
+{
+ return -ENOSYS;
+}
+static inline int sensor_mgr_convert_id_and_set_threshold(
+ struct threshold_info *thresh_inp)
+{
+ return -ENOSYS;
+}
+static inline int sensor_mgr_set_threshold(uint32_t zone_id,
+ struct sensor_threshold *threshold)
+{
+ return -ENOSYS;
+}
+static inline void sensor_mgr_remove_threshold(
+ struct threshold_info *thresh_inp)
+{
+}
+static inline struct device_clnt_data *devmgr_register_mitigation_client(
+ struct device *dev,
+ const char *device_name,
+ void (*callback)(struct device_clnt_data *,
+ union device_request *, void *))
+{
+ return NULL;
+}
+static inline int devmgr_client_request_mitigation(
+ struct device_clnt_data *clnt,
+ enum device_req_type type,
+ union device_request *req)
+{
+ return -ENOSYS;
+}
+static inline void devmgr_unregister_mitigation_client(
+ struct device *dev,
+ struct device_clnt_data *clnt)
+{
+}
+#endif
+
+#endif /*__MSM_THERMAL_H*/
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index 34016d986815..e7f3180bcb95 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -165,8 +165,8 @@ struct sensor_threshold {
struct sensor_info {
uint32_t sensor_id;
struct thermal_zone_device *tz;
- long threshold_min;
- long threshold_max;
+ int threshold_min;
+ int threshold_max;
int max_idx;
int min_idx;
struct list_head sensor_list;
@@ -450,7 +450,7 @@ struct thermal_instance *get_thermal_instance(struct thermal_zone_device *,
void thermal_cdev_update(struct thermal_cooling_device *);
void thermal_notify_framework(struct thermal_zone_device *, int);
-int sensor_get_temp(uint32_t sensor_id, long *temp);
+int sensor_get_temp(uint32_t sensor_id, int *temp);
int sensor_get_id(char *name);
int sensor_set_trip(uint32_t sensor_id, struct sensor_threshold *threshold);
int sensor_cancel_trip(uint32_t sensor_id, struct sensor_threshold *threshold);
diff --git a/include/uapi/linux/msm_thermal_ioctl.h b/include/uapi/linux/msm_thermal_ioctl.h
new file mode 100644
index 000000000000..18caab69ca1e
--- /dev/null
+++ b/include/uapi/linux/msm_thermal_ioctl.h
@@ -0,0 +1,92 @@
+#ifndef _MSM_THERMAL_IOCTL_H
+#define _MSM_THERMAL_IOCTL_H
+
+#include <linux/ioctl.h>
+
+#define MSM_THERMAL_IOCTL_NAME "msm_thermal_query"
+#define MSM_IOCTL_FREQ_SIZE 16
+
+struct __attribute__((__packed__)) cpu_freq_arg {
+ uint32_t cpu_num;
+ uint32_t freq_req;
+};
+
+struct __attribute__((__packed__)) clock_plan_arg {
+ uint32_t cluster_num;
+ /*
+ ** A value of zero for freq_table_len, will fetch the length of the
+ ** cluster frequency table. A non-zero value will fetch the frequency
+ ** table contents.
+ */
+ uint32_t freq_table_len;
+ /*
+ ** For clusters with frequency table length greater than
+ ** MSM_IOCTL_FREQ_SIZE, the frequency table is fetched from kernel
+ ** in multiple sets or iterations. The set_idx variable,
+ ** indicates, which set/part of frequency table the user is requesting.
+ ** The set index value starts from zero. A set index value of 'Z',
+ ** will fetch MSM_IOCTL_FREQ_SIZE or maximum available number of
+ ** frequency values (if it is less than MSM_IOCTL_FREQ_SIZE)
+ ** from the frequency table, starting from the index
+ ** (Z * MSM_IOCTL_FREQ_SIZE).
+ ** For example, in a device supporting 19 different frequencies, a set
+ ** index value of 0 will fetch the first 16 (MSM_IOCTL_FREQ_SIZE)
+ ** frequencies starting from the index 0 and a set value of 1 will fetch
+ ** the remaining 3 frequencies starting from the index 16.
+ ** A successful get, will populate the freq_table_len with the
+ ** number of frequency table entries fetched.
+ */
+ uint32_t set_idx;
+ unsigned int freq_table[MSM_IOCTL_FREQ_SIZE];
+};
+
+struct __attribute__((__packed__)) voltage_plan_arg {
+ uint32_t cluster_num;
+ uint32_t voltage_table_len;
+ uint32_t set_idx;
+ uint32_t voltage_table[MSM_IOCTL_FREQ_SIZE];
+};
+
+struct __attribute__((__packed__)) msm_thermal_ioctl {
+ uint32_t size;
+ union {
+ struct cpu_freq_arg cpu_freq;
+ struct clock_plan_arg clock_freq;
+ struct voltage_plan_arg voltage;
+ };
+};
+
+enum {
+ /*Set CPU Frequency*/
+ MSM_SET_CPU_MAX_FREQ = 0x00,
+ MSM_SET_CPU_MIN_FREQ = 0x01,
+ /*Set cluster frequency*/
+ MSM_SET_CLUSTER_MAX_FREQ = 0x02,
+ MSM_SET_CLUSTER_MIN_FREQ = 0x03,
+ /*Get cluster frequency plan*/
+ MSM_GET_CLUSTER_FREQ_PLAN = 0x04,
+ /*Get cluster voltage plan */
+ MSM_GET_CLUSTER_VOLTAGE_PLAN = 0x05,
+ MSM_CMD_MAX_NR,
+};
+
+#define MSM_THERMAL_MAGIC_NUM 0xCA /*Unique magic number*/
+
+#define MSM_THERMAL_SET_CPU_MAX_FREQUENCY _IOW(MSM_THERMAL_MAGIC_NUM,\
+ MSM_SET_CPU_MAX_FREQ, struct msm_thermal_ioctl)
+
+#define MSM_THERMAL_SET_CPU_MIN_FREQUENCY _IOW(MSM_THERMAL_MAGIC_NUM,\
+ MSM_SET_CPU_MIN_FREQ, struct msm_thermal_ioctl)
+
+#define MSM_THERMAL_SET_CLUSTER_MAX_FREQUENCY _IOW(MSM_THERMAL_MAGIC_NUM,\
+ MSM_SET_CLUSTER_MAX_FREQ, struct msm_thermal_ioctl)
+
+#define MSM_THERMAL_SET_CLUSTER_MIN_FREQUENCY _IOW(MSM_THERMAL_MAGIC_NUM,\
+ MSM_SET_CLUSTER_MIN_FREQ, struct msm_thermal_ioctl)
+
+#define MSM_THERMAL_GET_CLUSTER_FREQUENCY_PLAN _IOR(MSM_THERMAL_MAGIC_NUM,\
+ MSM_GET_CLUSTER_FREQ_PLAN, struct msm_thermal_ioctl)
+
+#define MSM_THERMAL_GET_CLUSTER_VOLTAGE_PLAN _IOR(MSM_THERMAL_MAGIC_NUM,\
+ MSM_GET_CLUSTER_VOLTAGE_PLAN, struct msm_thermal_ioctl)
+#endif