Input: sur40 - skip all blobs that are not touches
[GitHub/LineageOS/android_kernel_motorola_exynos9610.git] / drivers / cpuidle / dt_idle_states.c
1 /*
2 * DT idle states parsing code.
3 *
4 * Copyright (C) 2014 ARM Ltd.
5 * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12 #define pr_fmt(fmt) "DT idle-states: " fmt
13
14 #include <linux/cpuidle.h>
15 #include <linux/cpumask.h>
16 #include <linux/errno.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/of.h>
20 #include <linux/of_device.h>
21
22 #include "dt_idle_states.h"
23
24 static int init_state_node(struct cpuidle_state *idle_state,
25 const struct of_device_id *matches,
26 struct device_node *state_node)
27 {
28 int err;
29 const struct of_device_id *match_id;
30 const char *desc;
31
32 match_id = of_match_node(matches, state_node);
33 if (!match_id)
34 return -ENODEV;
35 /*
36 * CPUidle drivers are expected to initialize the const void *data
37 * pointer of the passed in struct of_device_id array to the idle
38 * state enter function.
39 */
40 idle_state->enter = match_id->data;
41 /*
42 * Since this is not a "coupled" state, it's safe to assume interrupts
43 * won't be enabled when it exits allowing the tick to be frozen
44 * safely. So enter() can be also enter_freeze() callback.
45 */
46 idle_state->enter_freeze = match_id->data;
47
48 err = of_property_read_u32(state_node, "wakeup-latency-us",
49 &idle_state->exit_latency);
50 if (err) {
51 u32 entry_latency, exit_latency;
52
53 err = of_property_read_u32(state_node, "entry-latency-us",
54 &entry_latency);
55 if (err) {
56 pr_debug(" * %s missing entry-latency-us property\n",
57 state_node->full_name);
58 return -EINVAL;
59 }
60
61 err = of_property_read_u32(state_node, "exit-latency-us",
62 &exit_latency);
63 if (err) {
64 pr_debug(" * %s missing exit-latency-us property\n",
65 state_node->full_name);
66 return -EINVAL;
67 }
68 /*
69 * If wakeup-latency-us is missing, default to entry+exit
70 * latencies as defined in idle states bindings
71 */
72 idle_state->exit_latency = entry_latency + exit_latency;
73 }
74
75 err = of_property_read_u32(state_node, "min-residency-us",
76 &idle_state->target_residency);
77 if (err) {
78 pr_debug(" * %s missing min-residency-us property\n",
79 state_node->full_name);
80 return -EINVAL;
81 }
82
83 err = of_property_read_string(state_node, "idle-state-name", &desc);
84 if (err)
85 desc = state_node->name;
86
87 idle_state->flags = 0;
88 if (of_property_read_bool(state_node, "local-timer-stop"))
89 idle_state->flags |= CPUIDLE_FLAG_TIMER_STOP;
90 /*
91 * TODO:
92 * replace with kstrdup and pointer assignment when name
93 * and desc become string pointers
94 */
95 strncpy(idle_state->name, state_node->name, CPUIDLE_NAME_LEN - 1);
96 strncpy(idle_state->desc, desc, CPUIDLE_DESC_LEN - 1);
97 return 0;
98 }
99
100 /*
101 * Check that the idle state is uniform across all CPUs in the CPUidle driver
102 * cpumask
103 */
104 static bool idle_state_valid(struct device_node *state_node, unsigned int idx,
105 const cpumask_t *cpumask)
106 {
107 int cpu;
108 struct device_node *cpu_node, *curr_state_node;
109 bool valid = true;
110
111 /*
112 * Compare idle state phandles for index idx on all CPUs in the
113 * CPUidle driver cpumask. Start from next logical cpu following
114 * cpumask_first(cpumask) since that's the CPU state_node was
115 * retrieved from. If a mismatch is found bail out straight
116 * away since we certainly hit a firmware misconfiguration.
117 */
118 for (cpu = cpumask_next(cpumask_first(cpumask), cpumask);
119 cpu < nr_cpu_ids; cpu = cpumask_next(cpu, cpumask)) {
120 cpu_node = of_cpu_device_node_get(cpu);
121 curr_state_node = of_parse_phandle(cpu_node, "cpu-idle-states",
122 idx);
123 if (state_node != curr_state_node)
124 valid = false;
125
126 of_node_put(curr_state_node);
127 of_node_put(cpu_node);
128 if (!valid)
129 break;
130 }
131
132 return valid;
133 }
134
135 /**
136 * dt_init_idle_driver() - Parse the DT idle states and initialize the
137 * idle driver states array
138 * @drv: Pointer to CPU idle driver to be initialized
139 * @matches: Array of of_device_id match structures to search in for
140 * compatible idle state nodes. The data pointer for each valid
141 * struct of_device_id entry in the matches array must point to
142 * a function with the following signature, that corresponds to
143 * the CPUidle state enter function signature:
144 *
145 * int (*)(struct cpuidle_device *dev,
146 * struct cpuidle_driver *drv,
147 * int index);
148 *
149 * @start_idx: First idle state index to be initialized
150 *
151 * If DT idle states are detected and are valid the state count and states
152 * array entries in the cpuidle driver are initialized accordingly starting
153 * from index start_idx.
154 *
155 * Return: number of valid DT idle states parsed, <0 on failure
156 */
157 int dt_init_idle_driver(struct cpuidle_driver *drv,
158 const struct of_device_id *matches,
159 unsigned int start_idx)
160 {
161 struct cpuidle_state *idle_state;
162 struct device_node *state_node, *cpu_node;
163 int i, err = 0;
164 const cpumask_t *cpumask;
165 unsigned int state_idx = start_idx;
166
167 if (state_idx >= CPUIDLE_STATE_MAX)
168 return -EINVAL;
169 /*
170 * We get the idle states for the first logical cpu in the
171 * driver mask (or cpu_possible_mask if the driver cpumask is not set)
172 * and we check through idle_state_valid() if they are uniform
173 * across CPUs, otherwise we hit a firmware misconfiguration.
174 */
175 cpumask = drv->cpumask ? : cpu_possible_mask;
176 cpu_node = of_cpu_device_node_get(cpumask_first(cpumask));
177
178 for (i = 0; ; i++) {
179 state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
180 if (!state_node)
181 break;
182
183 if (!of_device_is_available(state_node))
184 continue;
185
186 if (!idle_state_valid(state_node, i, cpumask)) {
187 pr_warn("%s idle state not valid, bailing out\n",
188 state_node->full_name);
189 err = -EINVAL;
190 break;
191 }
192
193 if (state_idx == CPUIDLE_STATE_MAX) {
194 pr_warn("State index reached static CPU idle driver states array size\n");
195 break;
196 }
197
198 idle_state = &drv->states[state_idx++];
199 err = init_state_node(idle_state, matches, state_node);
200 if (err) {
201 pr_err("Parsing idle state node %s failed with err %d\n",
202 state_node->full_name, err);
203 err = -EINVAL;
204 break;
205 }
206 of_node_put(state_node);
207 }
208
209 of_node_put(state_node);
210 of_node_put(cpu_node);
211 if (err)
212 return err;
213 /*
214 * Update the driver state count only if some valid DT idle states
215 * were detected
216 */
217 if (i)
218 drv->state_count = state_idx;
219
220 /*
221 * Return the number of present and valid DT idle states, which can
222 * also be 0 on platforms with missing DT idle states or legacy DT
223 * configuration predating the DT idle states bindings.
224 */
225 return i;
226 }
227 EXPORT_SYMBOL_GPL(dt_init_idle_driver);