PM / devfreq: register governors with devfreq framework
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / devfreq / devfreq.c
CommitLineData
a3c98b8b
MH
1/*
2 * devfreq: Generic Dynamic Voltage and Frequency Scaling (DVFS) Framework
3 * for Non-CPU Devices.
4 *
5 * Copyright (C) 2011 Samsung Electronics
6 * MyungJoo Ham <myungjoo.ham@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/kernel.h>
14#include <linux/sched.h>
15#include <linux/errno.h>
16#include <linux/err.h>
17#include <linux/init.h>
952f6d13 18#include <linux/module.h>
a3c98b8b 19#include <linux/slab.h>
952f6d13 20#include <linux/stat.h>
a3c98b8b
MH
21#include <linux/opp.h>
22#include <linux/devfreq.h>
23#include <linux/workqueue.h>
24#include <linux/platform_device.h>
25#include <linux/list.h>
26#include <linux/printk.h>
27#include <linux/hrtimer.h>
28#include "governor.h"
29
1a1357ea 30static struct class *devfreq_class;
a3c98b8b
MH
31
32/*
7e6fdd4b
RV
33 * devfreq core provides delayed work based load monitoring helper
34 * functions. Governors can use these or can implement their own
35 * monitoring mechanism.
a3c98b8b 36 */
a3c98b8b 37static struct workqueue_struct *devfreq_wq;
a3c98b8b 38
3aa173b8
NM
39/* The list of all device-devfreq governors */
40static LIST_HEAD(devfreq_governor_list);
a3c98b8b
MH
41/* The list of all device-devfreq */
42static LIST_HEAD(devfreq_list);
43static DEFINE_MUTEX(devfreq_list_lock);
44
45/**
46 * find_device_devfreq() - find devfreq struct using device pointer
47 * @dev: device pointer used to lookup device devfreq.
48 *
49 * Search the list of device devfreqs and return the matched device's
50 * devfreq info. devfreq_list_lock should be held by the caller.
51 */
52static struct devfreq *find_device_devfreq(struct device *dev)
53{
54 struct devfreq *tmp_devfreq;
55
56 if (unlikely(IS_ERR_OR_NULL(dev))) {
57 pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
58 return ERR_PTR(-EINVAL);
59 }
60 WARN(!mutex_is_locked(&devfreq_list_lock),
61 "devfreq_list_lock must be locked.");
62
63 list_for_each_entry(tmp_devfreq, &devfreq_list, node) {
64 if (tmp_devfreq->dev.parent == dev)
65 return tmp_devfreq;
66 }
67
68 return ERR_PTR(-ENODEV);
69}
70
e552bbaf
JL
71/**
72 * devfreq_get_freq_level() - Lookup freq_table for the frequency
73 * @devfreq: the devfreq instance
74 * @freq: the target frequency
75 */
76static int devfreq_get_freq_level(struct devfreq *devfreq, unsigned long freq)
77{
78 int lev;
79
80 for (lev = 0; lev < devfreq->profile->max_state; lev++)
81 if (freq == devfreq->profile->freq_table[lev])
82 return lev;
83
84 return -EINVAL;
85}
86
87/**
88 * devfreq_update_status() - Update statistics of devfreq behavior
89 * @devfreq: the devfreq instance
90 * @freq: the update target frequency
91 */
92static int devfreq_update_status(struct devfreq *devfreq, unsigned long freq)
93{
94 int lev, prev_lev;
95 unsigned long cur_time;
96
97 lev = devfreq_get_freq_level(devfreq, freq);
98 if (lev < 0)
99 return lev;
100
101 cur_time = jiffies;
102 devfreq->time_in_state[lev] +=
103 cur_time - devfreq->last_stat_updated;
104 if (freq != devfreq->previous_freq) {
105 prev_lev = devfreq_get_freq_level(devfreq,
106 devfreq->previous_freq);
107 devfreq->trans_table[(prev_lev *
108 devfreq->profile->max_state) + lev]++;
109 devfreq->total_trans++;
110 }
111 devfreq->last_stat_updated = cur_time;
112
113 return 0;
114}
115
3aa173b8
NM
116/**
117 * find_devfreq_governor() - find devfreq governor from name
118 * @name: name of the governor
119 *
120 * Search the list of devfreq governors and return the matched
121 * governor's pointer. devfreq_list_lock should be held by the caller.
122 */
123static struct devfreq_governor *find_devfreq_governor(const char *name)
124{
125 struct devfreq_governor *tmp_governor;
126
127 if (unlikely(IS_ERR_OR_NULL(name))) {
128 pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
129 return ERR_PTR(-EINVAL);
130 }
131 WARN(!mutex_is_locked(&devfreq_list_lock),
132 "devfreq_list_lock must be locked.");
133
134 list_for_each_entry(tmp_governor, &devfreq_governor_list, node) {
135 if (!strncmp(tmp_governor->name, name, DEVFREQ_NAME_LEN))
136 return tmp_governor;
137 }
138
139 return ERR_PTR(-ENODEV);
140}
141
7e6fdd4b
RV
142/* Load monitoring helper functions for governors use */
143
a3c98b8b
MH
144/**
145 * update_devfreq() - Reevaluate the device and configure frequency.
146 * @devfreq: the devfreq instance.
147 *
148 * Note: Lock devfreq->lock before calling update_devfreq
149 * This function is exported for governors.
150 */
151int update_devfreq(struct devfreq *devfreq)
152{
153 unsigned long freq;
154 int err = 0;
ab5f299f 155 u32 flags = 0;
a3c98b8b
MH
156
157 if (!mutex_is_locked(&devfreq->lock)) {
158 WARN(true, "devfreq->lock must be locked by the caller.\n");
159 return -EINVAL;
160 }
161
162 /* Reevaluate the proper frequency */
163 err = devfreq->governor->get_target_freq(devfreq, &freq);
164 if (err)
165 return err;
166
ab5f299f
MH
167 /*
168 * Adjust the freuqency with user freq and QoS.
169 *
170 * List from the highest proiority
171 * max_freq (probably called by thermal when it's too hot)
172 * min_freq
173 */
174
175 if (devfreq->min_freq && freq < devfreq->min_freq) {
176 freq = devfreq->min_freq;
177 flags &= ~DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use GLB */
178 }
179 if (devfreq->max_freq && freq > devfreq->max_freq) {
180 freq = devfreq->max_freq;
181 flags |= DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use LUB */
182 }
183
184 err = devfreq->profile->target(devfreq->dev.parent, &freq, flags);
a3c98b8b
MH
185 if (err)
186 return err;
187
e552bbaf
JL
188 if (devfreq->profile->freq_table)
189 if (devfreq_update_status(devfreq, freq))
190 dev_err(&devfreq->dev,
191 "Couldn't update frequency transition information.\n");
192
a3c98b8b
MH
193 devfreq->previous_freq = freq;
194 return err;
195}
2df5021f 196EXPORT_SYMBOL(update_devfreq);
a3c98b8b 197
7e6fdd4b
RV
198/**
199 * devfreq_monitor() - Periodically poll devfreq objects.
200 * @work: the work struct used to run devfreq_monitor periodically.
201 *
202 */
203static void devfreq_monitor(struct work_struct *work)
204{
205 int err;
206 struct devfreq *devfreq = container_of(work,
207 struct devfreq, work.work);
208
209 mutex_lock(&devfreq->lock);
210 err = update_devfreq(devfreq);
211 if (err)
212 dev_err(&devfreq->dev, "dvfs failed with (%d) error\n", err);
213
214 queue_delayed_work(devfreq_wq, &devfreq->work,
215 msecs_to_jiffies(devfreq->profile->polling_ms));
216 mutex_unlock(&devfreq->lock);
217}
218
219/**
220 * devfreq_monitor_start() - Start load monitoring of devfreq instance
221 * @devfreq: the devfreq instance.
222 *
223 * Helper function for starting devfreq device load monitoing. By
224 * default delayed work based monitoring is supported. Function
225 * to be called from governor in response to DEVFREQ_GOV_START
226 * event when device is added to devfreq framework.
227 */
228void devfreq_monitor_start(struct devfreq *devfreq)
229{
230 INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor);
231 if (devfreq->profile->polling_ms)
232 queue_delayed_work(devfreq_wq, &devfreq->work,
233 msecs_to_jiffies(devfreq->profile->polling_ms));
234}
235
236/**
237 * devfreq_monitor_stop() - Stop load monitoring of a devfreq instance
238 * @devfreq: the devfreq instance.
239 *
240 * Helper function to stop devfreq device load monitoing. Function
241 * to be called from governor in response to DEVFREQ_GOV_STOP
242 * event when device is removed from devfreq framework.
243 */
244void devfreq_monitor_stop(struct devfreq *devfreq)
245{
246 cancel_delayed_work_sync(&devfreq->work);
247}
248
249/**
250 * devfreq_monitor_suspend() - Suspend load monitoring of a devfreq instance
251 * @devfreq: the devfreq instance.
252 *
253 * Helper function to suspend devfreq device load monitoing. Function
254 * to be called from governor in response to DEVFREQ_GOV_SUSPEND
255 * event or when polling interval is set to zero.
256 *
257 * Note: Though this function is same as devfreq_monitor_stop(),
258 * intentionally kept separate to provide hooks for collecting
259 * transition statistics.
260 */
261void devfreq_monitor_suspend(struct devfreq *devfreq)
262{
263 mutex_lock(&devfreq->lock);
264 if (devfreq->stop_polling) {
265 mutex_unlock(&devfreq->lock);
266 return;
267 }
268
269 devfreq->stop_polling = true;
270 mutex_unlock(&devfreq->lock);
271 cancel_delayed_work_sync(&devfreq->work);
272}
273
274/**
275 * devfreq_monitor_resume() - Resume load monitoring of a devfreq instance
276 * @devfreq: the devfreq instance.
277 *
278 * Helper function to resume devfreq device load monitoing. Function
279 * to be called from governor in response to DEVFREQ_GOV_RESUME
280 * event or when polling interval is set to non-zero.
281 */
282void devfreq_monitor_resume(struct devfreq *devfreq)
283{
284 mutex_lock(&devfreq->lock);
285 if (!devfreq->stop_polling)
286 goto out;
287
288 if (!delayed_work_pending(&devfreq->work) &&
289 devfreq->profile->polling_ms)
290 queue_delayed_work(devfreq_wq, &devfreq->work,
291 msecs_to_jiffies(devfreq->profile->polling_ms));
292 devfreq->stop_polling = false;
293
294out:
295 mutex_unlock(&devfreq->lock);
296}
297
298/**
299 * devfreq_interval_update() - Update device devfreq monitoring interval
300 * @devfreq: the devfreq instance.
301 * @delay: new polling interval to be set.
302 *
303 * Helper function to set new load monitoring polling interval. Function
304 * to be called from governor in response to DEVFREQ_GOV_INTERVAL event.
305 */
306void devfreq_interval_update(struct devfreq *devfreq, unsigned int *delay)
307{
308 unsigned int cur_delay = devfreq->profile->polling_ms;
309 unsigned int new_delay = *delay;
310
311 mutex_lock(&devfreq->lock);
312 devfreq->profile->polling_ms = new_delay;
313
314 if (devfreq->stop_polling)
315 goto out;
316
317 /* if new delay is zero, stop polling */
318 if (!new_delay) {
319 mutex_unlock(&devfreq->lock);
320 cancel_delayed_work_sync(&devfreq->work);
321 return;
322 }
323
324 /* if current delay is zero, start polling with new delay */
325 if (!cur_delay) {
326 queue_delayed_work(devfreq_wq, &devfreq->work,
327 msecs_to_jiffies(devfreq->profile->polling_ms));
328 goto out;
329 }
330
331 /* if current delay is greater than new delay, restart polling */
332 if (cur_delay > new_delay) {
333 mutex_unlock(&devfreq->lock);
334 cancel_delayed_work_sync(&devfreq->work);
335 mutex_lock(&devfreq->lock);
336 if (!devfreq->stop_polling)
337 queue_delayed_work(devfreq_wq, &devfreq->work,
338 msecs_to_jiffies(devfreq->profile->polling_ms));
339 }
340out:
341 mutex_unlock(&devfreq->lock);
342}
343
a3c98b8b
MH
344/**
345 * devfreq_notifier_call() - Notify that the device frequency requirements
346 * has been changed out of devfreq framework.
c5b4a1c1
NM
347 * @nb: the notifier_block (supposed to be devfreq->nb)
348 * @type: not used
349 * @devp: not used
a3c98b8b
MH
350 *
351 * Called by a notifier that uses devfreq->nb.
352 */
353static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type,
354 void *devp)
355{
356 struct devfreq *devfreq = container_of(nb, struct devfreq, nb);
357 int ret;
358
359 mutex_lock(&devfreq->lock);
360 ret = update_devfreq(devfreq);
361 mutex_unlock(&devfreq->lock);
362
363 return ret;
364}
365
366/**
7e6fdd4b 367 * _remove_devfreq() - Remove devfreq from the list and release its resources.
a3c98b8b
MH
368 * @devfreq: the devfreq struct
369 * @skip: skip calling device_unregister().
a3c98b8b
MH
370 */
371static void _remove_devfreq(struct devfreq *devfreq, bool skip)
372{
7e6fdd4b
RV
373 mutex_lock(&devfreq_list_lock);
374 if (IS_ERR(find_device_devfreq(devfreq->dev.parent))) {
375 mutex_unlock(&devfreq_list_lock);
376 dev_warn(&devfreq->dev, "releasing devfreq which doesn't exist\n");
a3c98b8b
MH
377 return;
378 }
7e6fdd4b
RV
379 list_del(&devfreq->node);
380 mutex_unlock(&devfreq_list_lock);
a3c98b8b 381
7e6fdd4b 382 devfreq->governor->event_handler(devfreq, DEVFREQ_GOV_STOP, NULL);
a3c98b8b
MH
383
384 if (devfreq->profile->exit)
385 devfreq->profile->exit(devfreq->dev.parent);
386
a3c98b8b
MH
387 if (!skip && get_device(&devfreq->dev)) {
388 device_unregister(&devfreq->dev);
389 put_device(&devfreq->dev);
390 }
391
a3c98b8b 392 mutex_destroy(&devfreq->lock);
a3c98b8b
MH
393 kfree(devfreq);
394}
395
396/**
397 * devfreq_dev_release() - Callback for struct device to release the device.
398 * @dev: the devfreq device
399 *
400 * This calls _remove_devfreq() if _remove_devfreq() is not called.
401 * Note that devfreq_dev_release() could be called by _remove_devfreq() as
402 * well as by others unregistering the device.
403 */
404static void devfreq_dev_release(struct device *dev)
405{
406 struct devfreq *devfreq = to_devfreq(dev);
a3c98b8b 407
a3c98b8b 408 _remove_devfreq(devfreq, true);
a3c98b8b
MH
409}
410
411/**
412 * devfreq_add_device() - Add devfreq feature to the device
413 * @dev: the device to add devfreq feature.
414 * @profile: device-specific profile to run devfreq.
415 * @governor: the policy to choose frequency.
416 * @data: private data for the governor. The devfreq framework does not
417 * touch this value.
418 */
419struct devfreq *devfreq_add_device(struct device *dev,
420 struct devfreq_dev_profile *profile,
421 const struct devfreq_governor *governor,
422 void *data)
423{
424 struct devfreq *devfreq;
425 int err = 0;
426
427 if (!dev || !profile || !governor) {
428 dev_err(dev, "%s: Invalid parameters.\n", __func__);
429 return ERR_PTR(-EINVAL);
430 }
431
7e6fdd4b
RV
432 mutex_lock(&devfreq_list_lock);
433 devfreq = find_device_devfreq(dev);
434 mutex_unlock(&devfreq_list_lock);
435 if (!IS_ERR(devfreq)) {
436 dev_err(dev, "%s: Unable to create devfreq for the device. It already has one.\n", __func__);
437 err = -EINVAL;
438 goto err_out;
a3c98b8b
MH
439 }
440
441 devfreq = kzalloc(sizeof(struct devfreq), GFP_KERNEL);
442 if (!devfreq) {
443 dev_err(dev, "%s: Unable to create devfreq for the device\n",
444 __func__);
445 err = -ENOMEM;
3f19f08a 446 goto err_out;
a3c98b8b
MH
447 }
448
449 mutex_init(&devfreq->lock);
450 mutex_lock(&devfreq->lock);
451 devfreq->dev.parent = dev;
452 devfreq->dev.class = devfreq_class;
453 devfreq->dev.release = devfreq_dev_release;
454 devfreq->profile = profile;
455 devfreq->governor = governor;
456 devfreq->previous_freq = profile->initial_freq;
457 devfreq->data = data;
a3c98b8b
MH
458 devfreq->nb.notifier_call = devfreq_notifier_call;
459
e552bbaf
JL
460 devfreq->trans_table = devm_kzalloc(dev, sizeof(unsigned int) *
461 devfreq->profile->max_state *
462 devfreq->profile->max_state,
463 GFP_KERNEL);
464 devfreq->time_in_state = devm_kzalloc(dev, sizeof(unsigned int) *
465 devfreq->profile->max_state,
466 GFP_KERNEL);
467 devfreq->last_stat_updated = jiffies;
468
a3c98b8b
MH
469 dev_set_name(&devfreq->dev, dev_name(dev));
470 err = device_register(&devfreq->dev);
471 if (err) {
472 put_device(&devfreq->dev);
7e6fdd4b 473 mutex_unlock(&devfreq->lock);
a3c98b8b
MH
474 goto err_dev;
475 }
476
a3c98b8b
MH
477 mutex_unlock(&devfreq->lock);
478
a3c98b8b 479 mutex_lock(&devfreq_list_lock);
a3c98b8b 480 list_add(&devfreq->node, &devfreq_list);
7e6fdd4b 481 mutex_unlock(&devfreq_list_lock);
a3c98b8b 482
7e6fdd4b
RV
483 err = devfreq->governor->event_handler(devfreq,
484 DEVFREQ_GOV_START, NULL);
485 if (err) {
486 dev_err(dev, "%s: Unable to start governor for the device\n",
487 __func__);
488 goto err_init;
a3c98b8b 489 }
7e6fdd4b 490
3f19f08a
AL
491 return devfreq;
492
a3c98b8b 493err_init:
7e6fdd4b 494 list_del(&devfreq->node);
a3c98b8b
MH
495 device_unregister(&devfreq->dev);
496err_dev:
a3c98b8b 497 kfree(devfreq);
3f19f08a
AL
498err_out:
499 return ERR_PTR(err);
a3c98b8b 500}
7e6fdd4b 501EXPORT_SYMBOL(devfreq_add_device);
a3c98b8b
MH
502
503/**
504 * devfreq_remove_device() - Remove devfreq feature from a device.
c5b4a1c1 505 * @devfreq: the devfreq instance to be removed
a3c98b8b
MH
506 */
507int devfreq_remove_device(struct devfreq *devfreq)
508{
509 if (!devfreq)
510 return -EINVAL;
511
7e6fdd4b 512 _remove_devfreq(devfreq, false);
a3c98b8b
MH
513
514 return 0;
515}
7e6fdd4b 516EXPORT_SYMBOL(devfreq_remove_device);
a3c98b8b 517
206c30cf
RV
518/**
519 * devfreq_suspend_device() - Suspend devfreq of a device.
520 * @devfreq: the devfreq instance to be suspended
521 */
522int devfreq_suspend_device(struct devfreq *devfreq)
523{
524 if (!devfreq)
525 return -EINVAL;
526
527 return devfreq->governor->event_handler(devfreq,
528 DEVFREQ_GOV_SUSPEND, NULL);
529}
530EXPORT_SYMBOL(devfreq_suspend_device);
531
532/**
533 * devfreq_resume_device() - Resume devfreq of a device.
534 * @devfreq: the devfreq instance to be resumed
535 */
536int devfreq_resume_device(struct devfreq *devfreq)
537{
538 if (!devfreq)
539 return -EINVAL;
540
541 return devfreq->governor->event_handler(devfreq,
542 DEVFREQ_GOV_RESUME, NULL);
543}
544EXPORT_SYMBOL(devfreq_resume_device);
545
3aa173b8
NM
546/**
547 * devfreq_add_governor() - Add devfreq governor
548 * @governor: the devfreq governor to be added
549 */
550int devfreq_add_governor(struct devfreq_governor *governor)
551{
552 struct devfreq_governor *g;
553 int err = 0;
554
555 if (!governor) {
556 pr_err("%s: Invalid parameters.\n", __func__);
557 return -EINVAL;
558 }
559
560 mutex_lock(&devfreq_list_lock);
561 g = find_devfreq_governor(governor->name);
562 if (!IS_ERR(g)) {
563 pr_err("%s: governor %s already registered\n", __func__,
564 g->name);
565 err = -EINVAL;
566 goto err_out;
567 }
568
569 list_add(&governor->node, &devfreq_governor_list);
570
571err_out:
572 mutex_unlock(&devfreq_list_lock);
573
574 return err;
575}
576EXPORT_SYMBOL(devfreq_add_governor);
577
578/**
579 * devfreq_remove_device() - Remove devfreq feature from a device.
580 * @governor: the devfreq governor to be removed
581 */
582int devfreq_remove_governor(struct devfreq_governor *governor)
583{
584 struct devfreq_governor *g;
585 int err = 0;
586
587 if (!governor) {
588 pr_err("%s: Invalid parameters.\n", __func__);
589 return -EINVAL;
590 }
591
592 mutex_lock(&devfreq_list_lock);
593 g = find_devfreq_governor(governor->name);
594 if (IS_ERR(g)) {
595 pr_err("%s: governor %s not registered\n", __func__,
596 g->name);
597 err = -EINVAL;
598 goto err_out;
599 }
600
601 list_del(&governor->node);
602err_out:
603 mutex_unlock(&devfreq_list_lock);
604
605 return err;
606}
607EXPORT_SYMBOL(devfreq_remove_governor);
608
9005b650
MH
609static ssize_t show_governor(struct device *dev,
610 struct device_attribute *attr, char *buf)
611{
612 return sprintf(buf, "%s\n", to_devfreq(dev)->governor->name);
613}
614
615static ssize_t show_freq(struct device *dev,
616 struct device_attribute *attr, char *buf)
7f98a905
RV
617{
618 unsigned long freq;
619 struct devfreq *devfreq = to_devfreq(dev);
620
621 if (devfreq->profile->get_cur_freq &&
622 !devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq))
623 return sprintf(buf, "%lu\n", freq);
624
625 return sprintf(buf, "%lu\n", devfreq->previous_freq);
626}
627
628static ssize_t show_target_freq(struct device *dev,
629 struct device_attribute *attr, char *buf)
9005b650
MH
630{
631 return sprintf(buf, "%lu\n", to_devfreq(dev)->previous_freq);
632}
633
634static ssize_t show_polling_interval(struct device *dev,
635 struct device_attribute *attr, char *buf)
636{
637 return sprintf(buf, "%d\n", to_devfreq(dev)->profile->polling_ms);
638}
639
640static ssize_t store_polling_interval(struct device *dev,
641 struct device_attribute *attr,
642 const char *buf, size_t count)
643{
644 struct devfreq *df = to_devfreq(dev);
645 unsigned int value;
646 int ret;
647
648 ret = sscanf(buf, "%u", &value);
649 if (ret != 1)
12e26265 650 return -EINVAL;
9005b650 651
7e6fdd4b 652 df->governor->event_handler(df, DEVFREQ_GOV_INTERVAL, &value);
9005b650
MH
653 ret = count;
654
9005b650
MH
655 return ret;
656}
657
6530b9de
MH
658static ssize_t store_min_freq(struct device *dev, struct device_attribute *attr,
659 const char *buf, size_t count)
660{
661 struct devfreq *df = to_devfreq(dev);
662 unsigned long value;
663 int ret;
664 unsigned long max;
665
666 ret = sscanf(buf, "%lu", &value);
667 if (ret != 1)
12e26265 668 return -EINVAL;
6530b9de
MH
669
670 mutex_lock(&df->lock);
671 max = df->max_freq;
672 if (value && max && value > max) {
673 ret = -EINVAL;
674 goto unlock;
675 }
676
677 df->min_freq = value;
678 update_devfreq(df);
679 ret = count;
680unlock:
681 mutex_unlock(&df->lock);
6530b9de
MH
682 return ret;
683}
684
685static ssize_t show_min_freq(struct device *dev, struct device_attribute *attr,
686 char *buf)
687{
688 return sprintf(buf, "%lu\n", to_devfreq(dev)->min_freq);
689}
690
691static ssize_t store_max_freq(struct device *dev, struct device_attribute *attr,
692 const char *buf, size_t count)
693{
694 struct devfreq *df = to_devfreq(dev);
695 unsigned long value;
696 int ret;
697 unsigned long min;
698
699 ret = sscanf(buf, "%lu", &value);
700 if (ret != 1)
12e26265 701 return -EINVAL;
6530b9de
MH
702
703 mutex_lock(&df->lock);
704 min = df->min_freq;
705 if (value && min && value < min) {
706 ret = -EINVAL;
707 goto unlock;
708 }
709
710 df->max_freq = value;
711 update_devfreq(df);
712 ret = count;
713unlock:
714 mutex_unlock(&df->lock);
6530b9de
MH
715 return ret;
716}
717
718static ssize_t show_max_freq(struct device *dev, struct device_attribute *attr,
719 char *buf)
720{
721 return sprintf(buf, "%lu\n", to_devfreq(dev)->max_freq);
722}
723
d287de85
NM
724static ssize_t show_available_freqs(struct device *d,
725 struct device_attribute *attr,
726 char *buf)
727{
728 struct devfreq *df = to_devfreq(d);
729 struct device *dev = df->dev.parent;
730 struct opp *opp;
731 ssize_t count = 0;
732 unsigned long freq = 0;
733
734 rcu_read_lock();
735 do {
736 opp = opp_find_freq_ceil(dev, &freq);
737 if (IS_ERR(opp))
738 break;
739
740 count += scnprintf(&buf[count], (PAGE_SIZE - count - 2),
741 "%lu ", freq);
742 freq++;
743 } while (1);
744 rcu_read_unlock();
745
746 /* Truncate the trailing space */
747 if (count)
748 count--;
749
750 count += sprintf(&buf[count], "\n");
751
752 return count;
753}
754
e552bbaf
JL
755static ssize_t show_trans_table(struct device *dev, struct device_attribute *attr,
756 char *buf)
757{
758 struct devfreq *devfreq = to_devfreq(dev);
759 ssize_t len;
760 int i, j, err;
761 unsigned int max_state = devfreq->profile->max_state;
762
763 err = devfreq_update_status(devfreq, devfreq->previous_freq);
764 if (err)
765 return 0;
766
767 len = sprintf(buf, " From : To\n");
768 len += sprintf(buf + len, " :");
769 for (i = 0; i < max_state; i++)
770 len += sprintf(buf + len, "%8u",
771 devfreq->profile->freq_table[i]);
772
773 len += sprintf(buf + len, " time(ms)\n");
774
775 for (i = 0; i < max_state; i++) {
776 if (devfreq->profile->freq_table[i]
777 == devfreq->previous_freq) {
778 len += sprintf(buf + len, "*");
779 } else {
780 len += sprintf(buf + len, " ");
781 }
782 len += sprintf(buf + len, "%8u:",
783 devfreq->profile->freq_table[i]);
784 for (j = 0; j < max_state; j++)
785 len += sprintf(buf + len, "%8u",
786 devfreq->trans_table[(i * max_state) + j]);
787 len += sprintf(buf + len, "%10u\n",
788 jiffies_to_msecs(devfreq->time_in_state[i]));
789 }
790
791 len += sprintf(buf + len, "Total transition : %u\n",
792 devfreq->total_trans);
793 return len;
794}
795
9005b650
MH
796static struct device_attribute devfreq_attrs[] = {
797 __ATTR(governor, S_IRUGO, show_governor, NULL),
798 __ATTR(cur_freq, S_IRUGO, show_freq, NULL),
d287de85 799 __ATTR(available_frequencies, S_IRUGO, show_available_freqs, NULL),
7f98a905 800 __ATTR(target_freq, S_IRUGO, show_target_freq, NULL),
9005b650
MH
801 __ATTR(polling_interval, S_IRUGO | S_IWUSR, show_polling_interval,
802 store_polling_interval),
6530b9de
MH
803 __ATTR(min_freq, S_IRUGO | S_IWUSR, show_min_freq, store_min_freq),
804 __ATTR(max_freq, S_IRUGO | S_IWUSR, show_max_freq, store_max_freq),
e552bbaf 805 __ATTR(trans_stat, S_IRUGO, show_trans_table, NULL),
9005b650
MH
806 { },
807};
808
a3c98b8b
MH
809static int __init devfreq_init(void)
810{
811 devfreq_class = class_create(THIS_MODULE, "devfreq");
812 if (IS_ERR(devfreq_class)) {
813 pr_err("%s: couldn't create class\n", __FILE__);
814 return PTR_ERR(devfreq_class);
815 }
7e6fdd4b
RV
816
817 devfreq_wq = create_freezable_workqueue("devfreq_wq");
818 if (IS_ERR(devfreq_wq)) {
819 class_destroy(devfreq_class);
820 pr_err("%s: couldn't create workqueue\n", __FILE__);
821 return PTR_ERR(devfreq_wq);
822 }
9005b650 823 devfreq_class->dev_attrs = devfreq_attrs;
7e6fdd4b 824
a3c98b8b
MH
825 return 0;
826}
827subsys_initcall(devfreq_init);
828
829static void __exit devfreq_exit(void)
830{
831 class_destroy(devfreq_class);
7e6fdd4b 832 destroy_workqueue(devfreq_wq);
a3c98b8b
MH
833}
834module_exit(devfreq_exit);
835
836/*
837 * The followings are helper functions for devfreq user device drivers with
838 * OPP framework.
839 */
840
841/**
842 * devfreq_recommended_opp() - Helper function to get proper OPP for the
843 * freq value given to target callback.
c5b4a1c1
NM
844 * @dev: The devfreq user device. (parent of devfreq)
845 * @freq: The frequency given to target function
846 * @flags: Flags handed from devfreq framework.
a3c98b8b
MH
847 *
848 */
ab5f299f
MH
849struct opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq,
850 u32 flags)
a3c98b8b 851{
ab5f299f 852 struct opp *opp;
a3c98b8b 853
ab5f299f
MH
854 if (flags & DEVFREQ_FLAG_LEAST_UPPER_BOUND) {
855 /* The freq is an upper bound. opp should be lower */
a3c98b8b 856 opp = opp_find_freq_floor(dev, freq);
ab5f299f
MH
857
858 /* If not available, use the closest opp */
859 if (opp == ERR_PTR(-ENODEV))
860 opp = opp_find_freq_ceil(dev, freq);
861 } else {
862 /* The freq is an lower bound. opp should be higher */
863 opp = opp_find_freq_ceil(dev, freq);
864
865 /* If not available, use the closest opp */
866 if (opp == ERR_PTR(-ENODEV))
867 opp = opp_find_freq_floor(dev, freq);
868 }
869
a3c98b8b
MH
870 return opp;
871}
872
873/**
874 * devfreq_register_opp_notifier() - Helper function to get devfreq notified
875 * for any changes in the OPP availability
876 * changes
c5b4a1c1
NM
877 * @dev: The devfreq user device. (parent of devfreq)
878 * @devfreq: The devfreq object.
a3c98b8b
MH
879 */
880int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq)
881{
882 struct srcu_notifier_head *nh = opp_get_notifier(dev);
883
884 if (IS_ERR(nh))
885 return PTR_ERR(nh);
886 return srcu_notifier_chain_register(nh, &devfreq->nb);
887}
888
889/**
890 * devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq
891 * notified for any changes in the OPP
892 * availability changes anymore.
c5b4a1c1
NM
893 * @dev: The devfreq user device. (parent of devfreq)
894 * @devfreq: The devfreq object.
a3c98b8b
MH
895 *
896 * At exit() callback of devfreq_dev_profile, this must be included if
897 * devfreq_recommended_opp is used.
898 */
899int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq)
900{
901 struct srcu_notifier_head *nh = opp_get_notifier(dev);
902
903 if (IS_ERR(nh))
904 return PTR_ERR(nh);
905 return srcu_notifier_chain_unregister(nh, &devfreq->nb);
906}
907
908MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
909MODULE_DESCRIPTION("devfreq class support");
910MODULE_LICENSE("GPL");