Merge branch 'agp-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / s390 / appldata / appldata_base.c
1 /*
2 * arch/s390/appldata/appldata_base.c
3 *
4 * Base infrastructure for Linux-z/VM Monitor Stream, Stage 1.
5 * Exports appldata_register_ops() and appldata_unregister_ops() for the
6 * data gathering modules.
7 *
8 * Copyright IBM Corp. 2003, 2009
9 *
10 * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com>
11 */
12
13 #define KMSG_COMPONENT "appldata"
14 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
15
16 #include <linux/module.h>
17 #include <linux/init.h>
18 #include <linux/slab.h>
19 #include <linux/errno.h>
20 #include <linux/interrupt.h>
21 #include <linux/proc_fs.h>
22 #include <linux/mm.h>
23 #include <linux/swap.h>
24 #include <linux/pagemap.h>
25 #include <linux/sysctl.h>
26 #include <linux/notifier.h>
27 #include <linux/cpu.h>
28 #include <linux/workqueue.h>
29 #include <linux/suspend.h>
30 #include <linux/platform_device.h>
31 #include <asm/appldata.h>
32 #include <asm/timer.h>
33 #include <asm/uaccess.h>
34 #include <asm/io.h>
35 #include <asm/smp.h>
36
37 #include "appldata.h"
38
39
40 #define APPLDATA_CPU_INTERVAL 10000 /* default (CPU) time for
41 sampling interval in
42 milliseconds */
43
44 #define TOD_MICRO 0x01000 /* nr. of TOD clock units
45 for 1 microsecond */
46
47 static struct platform_device *appldata_pdev;
48
49 /*
50 * /proc entries (sysctl)
51 */
52 static const char appldata_proc_name[APPLDATA_PROC_NAME_LENGTH] = "appldata";
53 static int appldata_timer_handler(ctl_table *ctl, int write,
54 void __user *buffer, size_t *lenp, loff_t *ppos);
55 static int appldata_interval_handler(ctl_table *ctl, int write,
56 void __user *buffer,
57 size_t *lenp, loff_t *ppos);
58
59 static struct ctl_table_header *appldata_sysctl_header;
60 static struct ctl_table appldata_table[] = {
61 {
62 .procname = "timer",
63 .mode = S_IRUGO | S_IWUSR,
64 .proc_handler = appldata_timer_handler,
65 },
66 {
67 .procname = "interval",
68 .mode = S_IRUGO | S_IWUSR,
69 .proc_handler = appldata_interval_handler,
70 },
71 { },
72 };
73
74 static struct ctl_table appldata_dir_table[] = {
75 {
76 .procname = appldata_proc_name,
77 .maxlen = 0,
78 .mode = S_IRUGO | S_IXUGO,
79 .child = appldata_table,
80 },
81 { },
82 };
83
84 /*
85 * Timer
86 */
87 static DEFINE_PER_CPU(struct vtimer_list, appldata_timer);
88 static atomic_t appldata_expire_count = ATOMIC_INIT(0);
89
90 static DEFINE_SPINLOCK(appldata_timer_lock);
91 static int appldata_interval = APPLDATA_CPU_INTERVAL;
92 static int appldata_timer_active;
93 static int appldata_timer_suspended = 0;
94
95 /*
96 * Work queue
97 */
98 static struct workqueue_struct *appldata_wq;
99 static void appldata_work_fn(struct work_struct *work);
100 static DECLARE_WORK(appldata_work, appldata_work_fn);
101
102
103 /*
104 * Ops list
105 */
106 static DEFINE_MUTEX(appldata_ops_mutex);
107 static LIST_HEAD(appldata_ops_list);
108
109
110 /*************************** timer, work, DIAG *******************************/
111 /*
112 * appldata_timer_function()
113 *
114 * schedule work and reschedule timer
115 */
116 static void appldata_timer_function(unsigned long data)
117 {
118 if (atomic_dec_and_test(&appldata_expire_count)) {
119 atomic_set(&appldata_expire_count, num_online_cpus());
120 queue_work(appldata_wq, (struct work_struct *) data);
121 }
122 }
123
124 /*
125 * appldata_work_fn()
126 *
127 * call data gathering function for each (active) module
128 */
129 static void appldata_work_fn(struct work_struct *work)
130 {
131 struct list_head *lh;
132 struct appldata_ops *ops;
133 int i;
134
135 i = 0;
136 get_online_cpus();
137 mutex_lock(&appldata_ops_mutex);
138 list_for_each(lh, &appldata_ops_list) {
139 ops = list_entry(lh, struct appldata_ops, list);
140 if (ops->active == 1) {
141 ops->callback(ops->data);
142 }
143 }
144 mutex_unlock(&appldata_ops_mutex);
145 put_online_cpus();
146 }
147
148 /*
149 * appldata_diag()
150 *
151 * prepare parameter list, issue DIAG 0xDC
152 */
153 int appldata_diag(char record_nr, u16 function, unsigned long buffer,
154 u16 length, char *mod_lvl)
155 {
156 struct appldata_product_id id = {
157 .prod_nr = {0xD3, 0xC9, 0xD5, 0xE4,
158 0xE7, 0xD2, 0xD9}, /* "LINUXKR" */
159 .prod_fn = 0xD5D3, /* "NL" */
160 .version_nr = 0xF2F6, /* "26" */
161 .release_nr = 0xF0F1, /* "01" */
162 };
163
164 id.record_nr = record_nr;
165 id.mod_lvl = (mod_lvl[0]) << 8 | mod_lvl[1];
166 return appldata_asm(&id, function, (void *) buffer, length);
167 }
168 /************************ timer, work, DIAG <END> ****************************/
169
170
171 /****************************** /proc stuff **********************************/
172
173 /*
174 * appldata_mod_vtimer_wrap()
175 *
176 * wrapper function for mod_virt_timer(), because smp_call_function_single()
177 * accepts only one parameter.
178 */
179 static void __appldata_mod_vtimer_wrap(void *p) {
180 struct {
181 struct vtimer_list *timer;
182 u64 expires;
183 } *args = p;
184 mod_virt_timer_periodic(args->timer, args->expires);
185 }
186
187 #define APPLDATA_ADD_TIMER 0
188 #define APPLDATA_DEL_TIMER 1
189 #define APPLDATA_MOD_TIMER 2
190
191 /*
192 * __appldata_vtimer_setup()
193 *
194 * Add, delete or modify virtual timers on all online cpus.
195 * The caller needs to get the appldata_timer_lock spinlock.
196 */
197 static void
198 __appldata_vtimer_setup(int cmd)
199 {
200 u64 per_cpu_interval;
201 int i;
202
203 switch (cmd) {
204 case APPLDATA_ADD_TIMER:
205 if (appldata_timer_active)
206 break;
207 per_cpu_interval = (u64) (appldata_interval*1000 /
208 num_online_cpus()) * TOD_MICRO;
209 for_each_online_cpu(i) {
210 per_cpu(appldata_timer, i).expires = per_cpu_interval;
211 smp_call_function_single(i, add_virt_timer_periodic,
212 &per_cpu(appldata_timer, i),
213 1);
214 }
215 appldata_timer_active = 1;
216 break;
217 case APPLDATA_DEL_TIMER:
218 for_each_online_cpu(i)
219 del_virt_timer(&per_cpu(appldata_timer, i));
220 if (!appldata_timer_active)
221 break;
222 appldata_timer_active = 0;
223 atomic_set(&appldata_expire_count, num_online_cpus());
224 break;
225 case APPLDATA_MOD_TIMER:
226 per_cpu_interval = (u64) (appldata_interval*1000 /
227 num_online_cpus()) * TOD_MICRO;
228 if (!appldata_timer_active)
229 break;
230 for_each_online_cpu(i) {
231 struct {
232 struct vtimer_list *timer;
233 u64 expires;
234 } args;
235 args.timer = &per_cpu(appldata_timer, i);
236 args.expires = per_cpu_interval;
237 smp_call_function_single(i, __appldata_mod_vtimer_wrap,
238 &args, 1);
239 }
240 }
241 }
242
243 /*
244 * appldata_timer_handler()
245 *
246 * Start/Stop timer, show status of timer (0 = not active, 1 = active)
247 */
248 static int
249 appldata_timer_handler(ctl_table *ctl, int write,
250 void __user *buffer, size_t *lenp, loff_t *ppos)
251 {
252 int len;
253 char buf[2];
254
255 if (!*lenp || *ppos) {
256 *lenp = 0;
257 return 0;
258 }
259 if (!write) {
260 len = sprintf(buf, appldata_timer_active ? "1\n" : "0\n");
261 if (len > *lenp)
262 len = *lenp;
263 if (copy_to_user(buffer, buf, len))
264 return -EFAULT;
265 goto out;
266 }
267 len = *lenp;
268 if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len))
269 return -EFAULT;
270 get_online_cpus();
271 spin_lock(&appldata_timer_lock);
272 if (buf[0] == '1')
273 __appldata_vtimer_setup(APPLDATA_ADD_TIMER);
274 else if (buf[0] == '0')
275 __appldata_vtimer_setup(APPLDATA_DEL_TIMER);
276 spin_unlock(&appldata_timer_lock);
277 put_online_cpus();
278 out:
279 *lenp = len;
280 *ppos += len;
281 return 0;
282 }
283
284 /*
285 * appldata_interval_handler()
286 *
287 * Set (CPU) timer interval for collection of data (in milliseconds), show
288 * current timer interval.
289 */
290 static int
291 appldata_interval_handler(ctl_table *ctl, int write,
292 void __user *buffer, size_t *lenp, loff_t *ppos)
293 {
294 int len, interval;
295 char buf[16];
296
297 if (!*lenp || *ppos) {
298 *lenp = 0;
299 return 0;
300 }
301 if (!write) {
302 len = sprintf(buf, "%i\n", appldata_interval);
303 if (len > *lenp)
304 len = *lenp;
305 if (copy_to_user(buffer, buf, len))
306 return -EFAULT;
307 goto out;
308 }
309 len = *lenp;
310 if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len)) {
311 return -EFAULT;
312 }
313 interval = 0;
314 sscanf(buf, "%i", &interval);
315 if (interval <= 0)
316 return -EINVAL;
317
318 get_online_cpus();
319 spin_lock(&appldata_timer_lock);
320 appldata_interval = interval;
321 __appldata_vtimer_setup(APPLDATA_MOD_TIMER);
322 spin_unlock(&appldata_timer_lock);
323 put_online_cpus();
324 out:
325 *lenp = len;
326 *ppos += len;
327 return 0;
328 }
329
330 /*
331 * appldata_generic_handler()
332 *
333 * Generic start/stop monitoring and DIAG, show status of
334 * monitoring (0 = not in process, 1 = in process)
335 */
336 static int
337 appldata_generic_handler(ctl_table *ctl, int write,
338 void __user *buffer, size_t *lenp, loff_t *ppos)
339 {
340 struct appldata_ops *ops = NULL, *tmp_ops;
341 int rc, len, found;
342 char buf[2];
343 struct list_head *lh;
344
345 found = 0;
346 mutex_lock(&appldata_ops_mutex);
347 list_for_each(lh, &appldata_ops_list) {
348 tmp_ops = list_entry(lh, struct appldata_ops, list);
349 if (&tmp_ops->ctl_table[2] == ctl) {
350 found = 1;
351 }
352 }
353 if (!found) {
354 mutex_unlock(&appldata_ops_mutex);
355 return -ENODEV;
356 }
357 ops = ctl->data;
358 if (!try_module_get(ops->owner)) { // protect this function
359 mutex_unlock(&appldata_ops_mutex);
360 return -ENODEV;
361 }
362 mutex_unlock(&appldata_ops_mutex);
363
364 if (!*lenp || *ppos) {
365 *lenp = 0;
366 module_put(ops->owner);
367 return 0;
368 }
369 if (!write) {
370 len = sprintf(buf, ops->active ? "1\n" : "0\n");
371 if (len > *lenp)
372 len = *lenp;
373 if (copy_to_user(buffer, buf, len)) {
374 module_put(ops->owner);
375 return -EFAULT;
376 }
377 goto out;
378 }
379 len = *lenp;
380 if (copy_from_user(buf, buffer,
381 len > sizeof(buf) ? sizeof(buf) : len)) {
382 module_put(ops->owner);
383 return -EFAULT;
384 }
385
386 mutex_lock(&appldata_ops_mutex);
387 if ((buf[0] == '1') && (ops->active == 0)) {
388 // protect work queue callback
389 if (!try_module_get(ops->owner)) {
390 mutex_unlock(&appldata_ops_mutex);
391 module_put(ops->owner);
392 return -ENODEV;
393 }
394 ops->callback(ops->data); // init record
395 rc = appldata_diag(ops->record_nr,
396 APPLDATA_START_INTERVAL_REC,
397 (unsigned long) ops->data, ops->size,
398 ops->mod_lvl);
399 if (rc != 0) {
400 pr_err("Starting the data collection for %s "
401 "failed with rc=%d\n", ops->name, rc);
402 module_put(ops->owner);
403 } else
404 ops->active = 1;
405 } else if ((buf[0] == '0') && (ops->active == 1)) {
406 ops->active = 0;
407 rc = appldata_diag(ops->record_nr, APPLDATA_STOP_REC,
408 (unsigned long) ops->data, ops->size,
409 ops->mod_lvl);
410 if (rc != 0)
411 pr_err("Stopping the data collection for %s "
412 "failed with rc=%d\n", ops->name, rc);
413 module_put(ops->owner);
414 }
415 mutex_unlock(&appldata_ops_mutex);
416 out:
417 *lenp = len;
418 *ppos += len;
419 module_put(ops->owner);
420 return 0;
421 }
422
423 /*************************** /proc stuff <END> *******************************/
424
425
426 /************************* module-ops management *****************************/
427 /*
428 * appldata_register_ops()
429 *
430 * update ops list, register /proc/sys entries
431 */
432 int appldata_register_ops(struct appldata_ops *ops)
433 {
434 if (ops->size > APPLDATA_MAX_REC_SIZE)
435 return -EINVAL;
436
437 ops->ctl_table = kzalloc(4 * sizeof(struct ctl_table), GFP_KERNEL);
438 if (!ops->ctl_table)
439 return -ENOMEM;
440
441 mutex_lock(&appldata_ops_mutex);
442 list_add(&ops->list, &appldata_ops_list);
443 mutex_unlock(&appldata_ops_mutex);
444
445 ops->ctl_table[0].procname = appldata_proc_name;
446 ops->ctl_table[0].maxlen = 0;
447 ops->ctl_table[0].mode = S_IRUGO | S_IXUGO;
448 ops->ctl_table[0].child = &ops->ctl_table[2];
449
450 ops->ctl_table[2].procname = ops->name;
451 ops->ctl_table[2].mode = S_IRUGO | S_IWUSR;
452 ops->ctl_table[2].proc_handler = appldata_generic_handler;
453 ops->ctl_table[2].data = ops;
454
455 ops->sysctl_header = register_sysctl_table(ops->ctl_table);
456 if (!ops->sysctl_header)
457 goto out;
458 return 0;
459 out:
460 mutex_lock(&appldata_ops_mutex);
461 list_del(&ops->list);
462 mutex_unlock(&appldata_ops_mutex);
463 kfree(ops->ctl_table);
464 return -ENOMEM;
465 }
466
467 /*
468 * appldata_unregister_ops()
469 *
470 * update ops list, unregister /proc entries, stop DIAG if necessary
471 */
472 void appldata_unregister_ops(struct appldata_ops *ops)
473 {
474 mutex_lock(&appldata_ops_mutex);
475 list_del(&ops->list);
476 mutex_unlock(&appldata_ops_mutex);
477 unregister_sysctl_table(ops->sysctl_header);
478 kfree(ops->ctl_table);
479 }
480 /********************** module-ops management <END> **************************/
481
482
483 /**************************** suspend / resume *******************************/
484 static int appldata_freeze(struct device *dev)
485 {
486 struct appldata_ops *ops;
487 int rc;
488 struct list_head *lh;
489
490 get_online_cpus();
491 spin_lock(&appldata_timer_lock);
492 if (appldata_timer_active) {
493 __appldata_vtimer_setup(APPLDATA_DEL_TIMER);
494 appldata_timer_suspended = 1;
495 }
496 spin_unlock(&appldata_timer_lock);
497 put_online_cpus();
498
499 mutex_lock(&appldata_ops_mutex);
500 list_for_each(lh, &appldata_ops_list) {
501 ops = list_entry(lh, struct appldata_ops, list);
502 if (ops->active == 1) {
503 rc = appldata_diag(ops->record_nr, APPLDATA_STOP_REC,
504 (unsigned long) ops->data, ops->size,
505 ops->mod_lvl);
506 if (rc != 0)
507 pr_err("Stopping the data collection for %s "
508 "failed with rc=%d\n", ops->name, rc);
509 }
510 }
511 mutex_unlock(&appldata_ops_mutex);
512 return 0;
513 }
514
515 static int appldata_restore(struct device *dev)
516 {
517 struct appldata_ops *ops;
518 int rc;
519 struct list_head *lh;
520
521 get_online_cpus();
522 spin_lock(&appldata_timer_lock);
523 if (appldata_timer_suspended) {
524 __appldata_vtimer_setup(APPLDATA_ADD_TIMER);
525 appldata_timer_suspended = 0;
526 }
527 spin_unlock(&appldata_timer_lock);
528 put_online_cpus();
529
530 mutex_lock(&appldata_ops_mutex);
531 list_for_each(lh, &appldata_ops_list) {
532 ops = list_entry(lh, struct appldata_ops, list);
533 if (ops->active == 1) {
534 ops->callback(ops->data); // init record
535 rc = appldata_diag(ops->record_nr,
536 APPLDATA_START_INTERVAL_REC,
537 (unsigned long) ops->data, ops->size,
538 ops->mod_lvl);
539 if (rc != 0) {
540 pr_err("Starting the data collection for %s "
541 "failed with rc=%d\n", ops->name, rc);
542 }
543 }
544 }
545 mutex_unlock(&appldata_ops_mutex);
546 return 0;
547 }
548
549 static int appldata_thaw(struct device *dev)
550 {
551 return appldata_restore(dev);
552 }
553
554 static const struct dev_pm_ops appldata_pm_ops = {
555 .freeze = appldata_freeze,
556 .thaw = appldata_thaw,
557 .restore = appldata_restore,
558 };
559
560 static struct platform_driver appldata_pdrv = {
561 .driver = {
562 .name = "appldata",
563 .owner = THIS_MODULE,
564 .pm = &appldata_pm_ops,
565 },
566 };
567 /************************* suspend / resume <END> ****************************/
568
569
570 /******************************* init / exit *********************************/
571
572 static void __cpuinit appldata_online_cpu(int cpu)
573 {
574 init_virt_timer(&per_cpu(appldata_timer, cpu));
575 per_cpu(appldata_timer, cpu).function = appldata_timer_function;
576 per_cpu(appldata_timer, cpu).data = (unsigned long)
577 &appldata_work;
578 atomic_inc(&appldata_expire_count);
579 spin_lock(&appldata_timer_lock);
580 __appldata_vtimer_setup(APPLDATA_MOD_TIMER);
581 spin_unlock(&appldata_timer_lock);
582 }
583
584 static void __cpuinit appldata_offline_cpu(int cpu)
585 {
586 del_virt_timer(&per_cpu(appldata_timer, cpu));
587 if (atomic_dec_and_test(&appldata_expire_count)) {
588 atomic_set(&appldata_expire_count, num_online_cpus());
589 queue_work(appldata_wq, &appldata_work);
590 }
591 spin_lock(&appldata_timer_lock);
592 __appldata_vtimer_setup(APPLDATA_MOD_TIMER);
593 spin_unlock(&appldata_timer_lock);
594 }
595
596 static int __cpuinit appldata_cpu_notify(struct notifier_block *self,
597 unsigned long action,
598 void *hcpu)
599 {
600 switch (action) {
601 case CPU_ONLINE:
602 case CPU_ONLINE_FROZEN:
603 appldata_online_cpu((long) hcpu);
604 break;
605 case CPU_DEAD:
606 case CPU_DEAD_FROZEN:
607 appldata_offline_cpu((long) hcpu);
608 break;
609 default:
610 break;
611 }
612 return NOTIFY_OK;
613 }
614
615 static struct notifier_block __cpuinitdata appldata_nb = {
616 .notifier_call = appldata_cpu_notify,
617 };
618
619 /*
620 * appldata_init()
621 *
622 * init timer, register /proc entries
623 */
624 static int __init appldata_init(void)
625 {
626 int i, rc;
627
628 rc = platform_driver_register(&appldata_pdrv);
629 if (rc)
630 return rc;
631
632 appldata_pdev = platform_device_register_simple("appldata", -1, NULL,
633 0);
634 if (IS_ERR(appldata_pdev)) {
635 rc = PTR_ERR(appldata_pdev);
636 goto out_driver;
637 }
638 appldata_wq = create_singlethread_workqueue("appldata");
639 if (!appldata_wq) {
640 rc = -ENOMEM;
641 goto out_device;
642 }
643
644 get_online_cpus();
645 for_each_online_cpu(i)
646 appldata_online_cpu(i);
647 put_online_cpus();
648
649 /* Register cpu hotplug notifier */
650 register_hotcpu_notifier(&appldata_nb);
651
652 appldata_sysctl_header = register_sysctl_table(appldata_dir_table);
653 return 0;
654
655 out_device:
656 platform_device_unregister(appldata_pdev);
657 out_driver:
658 platform_driver_unregister(&appldata_pdrv);
659 return rc;
660 }
661
662 __initcall(appldata_init);
663
664 /**************************** init / exit <END> ******************************/
665
666 EXPORT_SYMBOL_GPL(appldata_register_ops);
667 EXPORT_SYMBOL_GPL(appldata_unregister_ops);
668 EXPORT_SYMBOL_GPL(appldata_diag);
669
670 #ifdef CONFIG_SWAP
671 EXPORT_SYMBOL_GPL(si_swapinfo);
672 #endif
673 EXPORT_SYMBOL_GPL(nr_threads);
674 EXPORT_SYMBOL_GPL(nr_running);
675 EXPORT_SYMBOL_GPL(nr_iowait);