3dd61985b2833b71b696a943823016b09d729739
[openwrt/openwrt.git] / target / linux / ipq806x / patches-6.1 / 111-v5.19-02-PM-devfreq-Add-cpu-based-scaling-support-to-passive-.patch
1 From a03dacb0316f74400846aaf144d6c73f4217ca08 Mon Sep 17 00:00:00 2001
2 From: Saravana Kannan <skannan@codeaurora.org>
3 Date: Tue, 2 Mar 2021 15:58:21 +0900
4 Subject: [PATCH 2/5] PM / devfreq: Add cpu based scaling support to passive
5 governor
6
7 Many CPU architectures have caches that can scale independent of the
8 CPUs. Frequency scaling of the caches is necessary to make sure that the
9 cache is not a performance bottleneck that leads to poor performance and
10 power. The same idea applies for RAM/DDR.
11
12 To achieve this, this patch adds support for cpu based scaling to the
13 passive governor. This is accomplished by taking the current frequency
14 of each CPU frequency domain and then adjust the frequency of the cache
15 (or any devfreq device) based on the frequency of the CPUs. It listens
16 to CPU frequency transition notifiers to keep itself up to date on the
17 current CPU frequency.
18
19 To decide the frequency of the device, the governor does one of the
20 following:
21 * Derives the optimal devfreq device opp from required-opps property of
22 the parent cpu opp_table.
23
24 * Scales the device frequency in proportion to the CPU frequency. So, if
25 the CPUs are running at their max frequency, the device runs at its
26 max frequency. If the CPUs are running at their min frequency, the
27 device runs at its min frequency. It is interpolated for frequencies
28 in between.
29
30 Tested-by: Chen-Yu Tsai <wenst@chromium.org>
31 Tested-by: Johnson Wang <johnson.wang@mediatek.com>
32 Signed-off-by: Saravana Kannan <skannan@codeaurora.org>
33 [Sibi: Integrated cpu-freqmap governor into passive_governor]
34 Signed-off-by: Sibi Sankar <sibis@codeaurora.org>
35 [Chanwoo: Fix conflict with latest code and cleanup code]
36 Signed-off-by: Chanwoo Choi <cw00.choi@samsung.com>
37 ---
38 drivers/devfreq/governor.h | 22 +++
39 drivers/devfreq/governor_passive.c | 298 +++++++++++++++++++++++++++--
40 include/linux/devfreq.h | 17 +-
41 3 files changed, 323 insertions(+), 14 deletions(-)
42
43 --- a/drivers/devfreq/governor.h
44 +++ b/drivers/devfreq/governor.h
45 @@ -48,6 +48,28 @@
46 #define DEVFREQ_GOV_ATTR_TIMER BIT(1)
47
48 /**
49 + * struct devfreq_cpu_data - Hold the per-cpu data
50 + * @dev: reference to cpu device.
51 + * @first_cpu: the cpumask of the first cpu of a policy.
52 + * @opp_table: reference to cpu opp table.
53 + * @cur_freq: the current frequency of the cpu.
54 + * @min_freq: the min frequency of the cpu.
55 + * @max_freq: the max frequency of the cpu.
56 + *
57 + * This structure stores the required cpu_data of a cpu.
58 + * This is auto-populated by the governor.
59 + */
60 +struct devfreq_cpu_data {
61 + struct device *dev;
62 + unsigned int first_cpu;
63 +
64 + struct opp_table *opp_table;
65 + unsigned int cur_freq;
66 + unsigned int min_freq;
67 + unsigned int max_freq;
68 +};
69 +
70 +/**
71 * struct devfreq_governor - Devfreq policy governor
72 * @node: list node - contains registered devfreq governors
73 * @name: Governor's name
74 --- a/drivers/devfreq/governor_passive.c
75 +++ b/drivers/devfreq/governor_passive.c
76 @@ -8,11 +8,85 @@
77 */
78
79 #include <linux/module.h>
80 +#include <linux/cpu.h>
81 +#include <linux/cpufreq.h>
82 +#include <linux/cpumask.h>
83 +#include <linux/slab.h>
84 #include <linux/device.h>
85 #include <linux/devfreq.h>
86 #include "governor.h"
87
88 -static int devfreq_passive_get_target_freq(struct devfreq *devfreq,
89 +#define HZ_PER_KHZ 1000
90 +
91 +static unsigned long get_target_freq_by_required_opp(struct device *p_dev,
92 + struct opp_table *p_opp_table,
93 + struct opp_table *opp_table,
94 + unsigned long *freq)
95 +{
96 + struct dev_pm_opp *opp = NULL, *p_opp = NULL;
97 + unsigned long target_freq;
98 +
99 + if (!p_dev || !p_opp_table || !opp_table || !freq)
100 + return 0;
101 +
102 + p_opp = devfreq_recommended_opp(p_dev, freq, 0);
103 + if (IS_ERR(p_opp))
104 + return 0;
105 +
106 + opp = dev_pm_opp_xlate_required_opp(p_opp_table, opp_table, p_opp);
107 + dev_pm_opp_put(p_opp);
108 +
109 + if (IS_ERR(opp))
110 + return 0;
111 +
112 + target_freq = dev_pm_opp_get_freq(opp);
113 + dev_pm_opp_put(opp);
114 +
115 + return target_freq;
116 +}
117 +
118 +static int get_target_freq_with_cpufreq(struct devfreq *devfreq,
119 + unsigned long *target_freq)
120 +{
121 + struct devfreq_passive_data *p_data =
122 + (struct devfreq_passive_data *)devfreq->data;
123 + struct devfreq_cpu_data *parent_cpu_data;
124 + unsigned long cpu, cpu_cur, cpu_min, cpu_max, cpu_percent;
125 + unsigned long dev_min, dev_max;
126 + unsigned long freq = 0;
127 +
128 + for_each_online_cpu(cpu) {
129 + parent_cpu_data = p_data->parent_cpu_data[cpu];
130 + if (!parent_cpu_data || parent_cpu_data->first_cpu != cpu)
131 + continue;
132 +
133 + /* Get target freq via required opps */
134 + cpu_cur = parent_cpu_data->cur_freq * HZ_PER_KHZ;
135 + freq = get_target_freq_by_required_opp(parent_cpu_data->dev,
136 + parent_cpu_data->opp_table,
137 + devfreq->opp_table, &cpu_cur);
138 + if (freq) {
139 + *target_freq = max(freq, *target_freq);
140 + continue;
141 + }
142 +
143 + /* Use interpolation if required opps is not available */
144 + devfreq_get_freq_range(devfreq, &dev_min, &dev_max);
145 +
146 + cpu_min = parent_cpu_data->min_freq;
147 + cpu_max = parent_cpu_data->max_freq;
148 + cpu_cur = parent_cpu_data->cur_freq;
149 +
150 + cpu_percent = ((cpu_cur - cpu_min) * 100) / (cpu_max - cpu_min);
151 + freq = dev_min + mult_frac(dev_max - dev_min, cpu_percent, 100);
152 +
153 + *target_freq = max(freq, *target_freq);
154 + }
155 +
156 + return 0;
157 +}
158 +
159 +static int get_target_freq_with_devfreq(struct devfreq *devfreq,
160 unsigned long *freq)
161 {
162 struct devfreq_passive_data *p_data
163 @@ -99,6 +173,181 @@ no_required_opp:
164 return 0;
165 }
166
167 +static int devfreq_passive_get_target_freq(struct devfreq *devfreq,
168 + unsigned long *freq)
169 +{
170 + struct devfreq_passive_data *p_data =
171 + (struct devfreq_passive_data *)devfreq->data;
172 + int ret;
173 +
174 + if (!p_data)
175 + return -EINVAL;
176 +
177 + /*
178 + * If the devfreq device with passive governor has the specific method
179 + * to determine the next frequency, should use the get_target_freq()
180 + * of struct devfreq_passive_data.
181 + */
182 + if (p_data->get_target_freq)
183 + return p_data->get_target_freq(devfreq, freq);
184 +
185 + switch (p_data->parent_type) {
186 + case DEVFREQ_PARENT_DEV:
187 + ret = get_target_freq_with_devfreq(devfreq, freq);
188 + break;
189 + case CPUFREQ_PARENT_DEV:
190 + ret = get_target_freq_with_cpufreq(devfreq, freq);
191 + break;
192 + default:
193 + ret = -EINVAL;
194 + dev_err(&devfreq->dev, "Invalid parent type\n");
195 + break;
196 + }
197 +
198 + return ret;
199 +}
200 +
201 +static int cpufreq_passive_notifier_call(struct notifier_block *nb,
202 + unsigned long event, void *ptr)
203 +{
204 + struct devfreq_passive_data *p_data =
205 + container_of(nb, struct devfreq_passive_data, nb);
206 + struct devfreq *devfreq = (struct devfreq *)p_data->this;
207 + struct devfreq_cpu_data *parent_cpu_data;
208 + struct cpufreq_freqs *freqs = ptr;
209 + unsigned int cur_freq;
210 + int ret;
211 +
212 + if (event != CPUFREQ_POSTCHANGE || !freqs ||
213 + !p_data->parent_cpu_data[freqs->policy->cpu])
214 + return 0;
215 +
216 + parent_cpu_data = p_data->parent_cpu_data[freqs->policy->cpu];
217 + if (parent_cpu_data->cur_freq == freqs->new)
218 + return 0;
219 +
220 + cur_freq = parent_cpu_data->cur_freq;
221 + parent_cpu_data->cur_freq = freqs->new;
222 +
223 + mutex_lock(&devfreq->lock);
224 + ret = devfreq_update_target(devfreq, freqs->new);
225 + mutex_unlock(&devfreq->lock);
226 + if (ret) {
227 + parent_cpu_data->cur_freq = cur_freq;
228 + dev_err(&devfreq->dev, "failed to update the frequency.\n");
229 + return ret;
230 + }
231 +
232 + return 0;
233 +}
234 +
235 +static int cpufreq_passive_unregister_notifier(struct devfreq *devfreq)
236 +{
237 + struct devfreq_passive_data *p_data
238 + = (struct devfreq_passive_data *)devfreq->data;
239 + struct devfreq_cpu_data *parent_cpu_data;
240 + int cpu, ret;
241 +
242 + if (p_data->nb.notifier_call) {
243 + ret = cpufreq_unregister_notifier(&p_data->nb,
244 + CPUFREQ_TRANSITION_NOTIFIER);
245 + if (ret < 0)
246 + return ret;
247 + }
248 +
249 + for_each_possible_cpu(cpu) {
250 + parent_cpu_data = p_data->parent_cpu_data[cpu];
251 + if (!parent_cpu_data)
252 + continue;
253 +
254 + if (parent_cpu_data->opp_table)
255 + dev_pm_opp_put_opp_table(parent_cpu_data->opp_table);
256 + kfree(parent_cpu_data);
257 + }
258 +
259 + return 0;
260 +}
261 +
262 +static int cpufreq_passive_register_notifier(struct devfreq *devfreq)
263 +{
264 + struct devfreq_passive_data *p_data
265 + = (struct devfreq_passive_data *)devfreq->data;
266 + struct device *dev = devfreq->dev.parent;
267 + struct opp_table *opp_table = NULL;
268 + struct devfreq_cpu_data *parent_cpu_data;
269 + struct cpufreq_policy *policy;
270 + struct device *cpu_dev;
271 + unsigned int cpu;
272 + int ret;
273 +
274 + p_data->nb.notifier_call = cpufreq_passive_notifier_call;
275 + ret = cpufreq_register_notifier(&p_data->nb, CPUFREQ_TRANSITION_NOTIFIER);
276 + if (ret) {
277 + dev_err(dev, "failed to register cpufreq notifier\n");
278 + p_data->nb.notifier_call = NULL;
279 + goto err;
280 + }
281 +
282 + for_each_possible_cpu(cpu) {
283 + if (p_data->parent_cpu_data[cpu])
284 + continue;
285 +
286 + policy = cpufreq_cpu_get(cpu);
287 + if (!policy) {
288 + ret = -EPROBE_DEFER;
289 + goto err;
290 + }
291 +
292 + parent_cpu_data = kzalloc(sizeof(*parent_cpu_data),
293 + GFP_KERNEL);
294 + if (!parent_cpu_data) {
295 + ret = -ENOMEM;
296 + goto err_put_policy;
297 + }
298 +
299 + cpu_dev = get_cpu_device(cpu);
300 + if (!cpu_dev) {
301 + dev_err(dev, "failed to get cpu device\n");
302 + ret = -ENODEV;
303 + goto err_free_cpu_data;
304 + }
305 +
306 + opp_table = dev_pm_opp_get_opp_table(cpu_dev);
307 + if (IS_ERR(opp_table)) {
308 + dev_err(dev, "failed to get opp_table of cpu%d\n", cpu);
309 + ret = PTR_ERR(opp_table);
310 + goto err_free_cpu_data;
311 + }
312 +
313 + parent_cpu_data->dev = cpu_dev;
314 + parent_cpu_data->opp_table = opp_table;
315 + parent_cpu_data->first_cpu = cpumask_first(policy->related_cpus);
316 + parent_cpu_data->cur_freq = policy->cur;
317 + parent_cpu_data->min_freq = policy->cpuinfo.min_freq;
318 + parent_cpu_data->max_freq = policy->cpuinfo.max_freq;
319 +
320 + p_data->parent_cpu_data[cpu] = parent_cpu_data;
321 + cpufreq_cpu_put(policy);
322 + }
323 +
324 + mutex_lock(&devfreq->lock);
325 + ret = devfreq_update_target(devfreq, 0L);
326 + mutex_unlock(&devfreq->lock);
327 + if (ret)
328 + dev_err(dev, "failed to update the frequency\n");
329 +
330 + return ret;
331 +
332 +err_free_cpu_data:
333 + kfree(parent_cpu_data);
334 +err_put_policy:
335 + cpufreq_cpu_put(policy);
336 +err:
337 + WARN_ON(cpufreq_passive_unregister_notifier(devfreq));
338 +
339 + return ret;
340 +}
341 +
342 static int devfreq_passive_notifier_call(struct notifier_block *nb,
343 unsigned long event, void *ptr)
344 {
345 @@ -131,30 +380,55 @@ static int devfreq_passive_notifier_call
346 return NOTIFY_DONE;
347 }
348
349 -static int devfreq_passive_event_handler(struct devfreq *devfreq,
350 - unsigned int event, void *data)
351 +static int devfreq_passive_unregister_notifier(struct devfreq *devfreq)
352 +{
353 + struct devfreq_passive_data *p_data
354 + = (struct devfreq_passive_data *)devfreq->data;
355 + struct devfreq *parent = (struct devfreq *)p_data->parent;
356 + struct notifier_block *nb = &p_data->nb;
357 +
358 + return devfreq_unregister_notifier(parent, nb, DEVFREQ_TRANSITION_NOTIFIER);
359 +}
360 +
361 +static int devfreq_passive_register_notifier(struct devfreq *devfreq)
362 {
363 struct devfreq_passive_data *p_data
364 = (struct devfreq_passive_data *)devfreq->data;
365 struct devfreq *parent = (struct devfreq *)p_data->parent;
366 struct notifier_block *nb = &p_data->nb;
367 - int ret = 0;
368
369 if (!parent)
370 return -EPROBE_DEFER;
371
372 + nb->notifier_call = devfreq_passive_notifier_call;
373 + return devfreq_register_notifier(parent, nb, DEVFREQ_TRANSITION_NOTIFIER);
374 +}
375 +
376 +static int devfreq_passive_event_handler(struct devfreq *devfreq,
377 + unsigned int event, void *data)
378 +{
379 + struct devfreq_passive_data *p_data
380 + = (struct devfreq_passive_data *)devfreq->data;
381 + int ret = -EINVAL;
382 +
383 + if (!p_data)
384 + return -EINVAL;
385 +
386 + if (!p_data->this)
387 + p_data->this = devfreq;
388 +
389 switch (event) {
390 case DEVFREQ_GOV_START:
391 - if (!p_data->this)
392 - p_data->this = devfreq;
393 -
394 - nb->notifier_call = devfreq_passive_notifier_call;
395 - ret = devfreq_register_notifier(parent, nb,
396 - DEVFREQ_TRANSITION_NOTIFIER);
397 + if (p_data->parent_type == DEVFREQ_PARENT_DEV)
398 + ret = devfreq_passive_register_notifier(devfreq);
399 + else if (p_data->parent_type == CPUFREQ_PARENT_DEV)
400 + ret = cpufreq_passive_register_notifier(devfreq);
401 break;
402 case DEVFREQ_GOV_STOP:
403 - WARN_ON(devfreq_unregister_notifier(parent, nb,
404 - DEVFREQ_TRANSITION_NOTIFIER));
405 + if (p_data->parent_type == DEVFREQ_PARENT_DEV)
406 + WARN_ON(devfreq_passive_unregister_notifier(devfreq));
407 + else if (p_data->parent_type == CPUFREQ_PARENT_DEV)
408 + WARN_ON(cpufreq_passive_unregister_notifier(devfreq));
409 break;
410 default:
411 break;
412 --- a/include/linux/devfreq.h
413 +++ b/include/linux/devfreq.h
414 @@ -38,6 +38,7 @@ enum devfreq_timer {
415
416 struct devfreq;
417 struct devfreq_governor;
418 +struct devfreq_cpu_data;
419 struct thermal_cooling_device;
420
421 /**
422 @@ -289,6 +290,11 @@ struct devfreq_simple_ondemand_data {
423 #endif
424
425 #if IS_ENABLED(CONFIG_DEVFREQ_GOV_PASSIVE)
426 +enum devfreq_parent_dev_type {
427 + DEVFREQ_PARENT_DEV,
428 + CPUFREQ_PARENT_DEV,
429 +};
430 +
431 /**
432 * struct devfreq_passive_data - ``void *data`` fed to struct devfreq
433 * and devfreq_add_device
434 @@ -300,8 +306,11 @@ struct devfreq_simple_ondemand_data {
435 * using governors except for passive governor.
436 * If the devfreq device has the specific method to decide
437 * the next frequency, should use this callback.
438 - * @this: the devfreq instance of own device.
439 - * @nb: the notifier block for DEVFREQ_TRANSITION_NOTIFIER list
440 + * @parent_type: the parent type of the device.
441 + * @this: the devfreq instance of own device.
442 + * @nb: the notifier block for DEVFREQ_TRANSITION_NOTIFIER or
443 + * CPUFREQ_TRANSITION_NOTIFIER list.
444 + * @parent_cpu_data: the state min/max/current frequency of all online cpu's.
445 *
446 * The devfreq_passive_data have to set the devfreq instance of parent
447 * device with governors except for the passive governor. But, don't need to
448 @@ -315,9 +324,13 @@ struct devfreq_passive_data {
449 /* Optional callback to decide the next frequency of passvice device */
450 int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
451
452 + /* Should set the type of parent device */
453 + enum devfreq_parent_dev_type parent_type;
454 +
455 /* For passive governor's internal use. Don't need to set them */
456 struct devfreq *this;
457 struct notifier_block nb;
458 + struct devfreq_cpu_data *parent_cpu_data[NR_CPUS];
459 };
460 #endif
461