summaryrefslogtreecommitdiff
path: root/drivers/cpuidle/lpm-levels.h
blob: 3c9665ea89812cb848ef766cc62a8e5a2e3fe551 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 and
 * only version 2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 */

#include <soc/qcom/pm.h>
#include <soc/qcom/spm.h>

#define NR_LPM_LEVELS 8
#define MAXSAMPLES 5
#define CLUST_SMPL_INVLD_TIME 40000

extern bool use_psci;

struct lpm_lookup_table {
	uint32_t modes;
	const char *mode_name;
};

struct power_params {
	uint32_t latency_us;		/* Enter + Exit latency */
	uint32_t ss_power;		/* Steady state power */
	uint32_t energy_overhead;	/* Enter + exit over head */
	uint32_t time_overhead_us;	/* Enter + exit overhead */
	uint32_t residencies[NR_LPM_LEVELS];
	uint32_t min_residency;
	uint32_t max_residency;
};

struct lpm_cpu_level {
	const char *name;
	enum msm_pm_sleep_mode mode;
	bool use_bc_timer;
	struct power_params pwr;
	unsigned int psci_id;
	bool is_reset;
	bool jtag_save_restore;
	bool hyp_psci;
	int reset_level;
};

struct lpm_cpu {
	struct lpm_cpu_level levels[NR_LPM_LEVELS];
	int nlevels;
	unsigned int psci_mode_shift;
	unsigned int psci_mode_mask;
	struct lpm_cluster *parent;
};

struct lpm_level_avail {
	bool idle_enabled;
	bool suspend_enabled;
	struct kobject *kobj;
	struct kobj_attribute idle_enabled_attr;
	struct kobj_attribute suspend_enabled_attr;
	void *data;
	int idx;
	bool cpu_node;
};

struct lpm_cluster_level {
	const char *level_name;
	int *mode;			/* SPM mode to enter */
	int min_child_level;
	struct cpumask num_cpu_votes;
	struct power_params pwr;
	bool notify_rpm;
	bool disable_dynamic_routing;
	bool sync_level;
	bool last_core_only;
	struct lpm_level_avail available;
	unsigned int psci_id;
	bool is_reset;
	int reset_level;
};

struct low_power_ops {
	struct msm_spm_device *spm;
	int (*set_mode)(struct low_power_ops *ops, int mode, bool notify_rpm);
	enum msm_pm_l2_scm_flag tz_flag;
};

struct cluster_history {
	uint32_t resi[MAXSAMPLES];
	int mode[MAXSAMPLES];
	int64_t stime[MAXSAMPLES];
	uint32_t hptr;
	uint32_t hinvalid;
	uint32_t htmr_wkup;
	uint64_t entry_time;
	int entry_idx;
	int nsamp;
	int flag;
};

struct lpm_cluster {
	struct list_head list;
	struct list_head child;
	const char *cluster_name;
	const char **name;
	unsigned long aff_level; /* Affinity level of the node */
	struct low_power_ops *lpm_dev;
	int ndevices;
	struct lpm_cluster_level levels[NR_LPM_LEVELS];
	int nlevels;
	enum msm_pm_l2_scm_flag l2_flag;
	int min_child_level;
	int default_level;
	int last_level;
	struct lpm_cpu *cpu;
	struct cpuidle_driver *drv;
	spinlock_t sync_lock;
	struct cpumask child_cpus;
	struct cpumask num_children_in_sync;
	struct lpm_cluster *parent;
	struct lpm_stats *stats;
	unsigned int psci_mode_shift;
	unsigned int psci_mode_mask;
	bool no_saw_devices;
	struct cluster_history history;
	struct hrtimer histtimer;
};

int set_l2_mode(struct low_power_ops *ops, int mode, bool notify_rpm);
int set_system_mode(struct low_power_ops *ops, int mode, bool notify_rpm);
int set_l3_mode(struct low_power_ops *ops, int mode, bool notify_rpm);
void lpm_suspend_wake_time(uint64_t wakeup_time);

struct lpm_cluster *lpm_of_parse_cluster(struct platform_device *pdev);
void free_cluster_node(struct lpm_cluster *cluster);
void cluster_dt_walkthrough(struct lpm_cluster *cluster);

int create_cluster_lvl_nodes(struct lpm_cluster *p, struct kobject *kobj);
bool lpm_cpu_mode_allow(unsigned int cpu,
		unsigned int mode, bool from_idle);
bool lpm_cluster_mode_allow(struct lpm_cluster *cluster,
		unsigned int mode, bool from_idle);
uint32_t *get_per_cpu_max_residency(int cpu);
uint32_t *get_per_cpu_min_residency(int cpu);
extern struct lpm_cluster *lpm_root_node;

#ifdef CONFIG_SMP
extern DEFINE_PER_CPU(bool, pending_ipi);
static inline bool is_IPI_pending(const struct cpumask *mask)
{
	unsigned int cpu;

	for_each_cpu(cpu, mask) {
		if per_cpu(pending_ipi, cpu)
			return true;
	}
	return false;
}
#else
static inline bool is_IPI_pending(const struct cpumask *mask)
{
	return false;
}
#endif