import PULS_20160108
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / misc / mediatek / sched / mt_sched.c
1 #ifdef CONFIG_MT_SCHED
2 #include <linux/cdev.h>
3 #include <linux/device.h>
4 #include <linux/seq_file.h>
5 #include <linux/cpu.h>
6 #include <linux/security.h>
7 #include <linux/cpuset.h>
8 #include <linux/poll.h>
9 #include <linux/proc_fs.h>
10 #include <linux/module.h>
11 #include <linux/version.h>
12 #include "mt_sched_drv.h"
13
14 #define SCHED_DEV_NAME "sched"
15
16 struct mt_task {
17 pid_t pid;
18 struct task_struct *p;
19 struct cpumask mask;
20 struct list_head list;
21 };
22
23 static struct mt_task mt_task_head;
24 static DEFINE_SPINLOCK(mt_sched_spinlock);
25
26 static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
27 struct cpumask *new_mask)
28 {
29 if (len < cpumask_size())
30 cpumask_clear(new_mask);
31 else if (len > cpumask_size())
32 len = cpumask_size();
33
34 return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
35 }
36
37 /**
38 * find_process_by_pid - find a process with a matching PID value.
39 * @pid: the pid in question.
40 */
41 static struct task_struct *find_process_by_pid(pid_t pid)
42 {
43 return pid ? find_task_by_vpid(pid) : current;
44 }
45
46 /*
47 * check the target process has a UID that matches the current process's
48 */
49 static bool check_same_owner(struct task_struct *p)
50 {
51 const struct cred *cred = current_cred(), *pcred;
52 bool match;
53
54 rcu_read_lock();
55 pcred = __task_cred(p);
56 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)
57 match = (cred->euid == pcred->euid || cred->euid == pcred->uid);
58 #else
59 match = (uid_eq(cred->euid, pcred->euid) || uid_eq(cred->euid, pcred->uid));
60 #endif
61 rcu_read_unlock();
62 return match;
63 }
64
65 /*
66 * check the task link list. If the task is exit, delete the task.
67 */
68 static void mt_sched_check_tasks(void)
69 {
70 struct mt_task *tmp, *tmp2;
71 unsigned long irq_flags;
72
73 spin_lock_irqsave(&mt_sched_spinlock, irq_flags);
74 list_for_each_entry_safe(tmp, tmp2, &mt_task_head.list, list) {
75 if (tmp->pid != tmp->p->pid) {
76 list_del(&(tmp->list));
77 kfree(tmp);
78 }
79 }
80 spin_unlock_irqrestore(&mt_sched_spinlock, irq_flags);
81 }
82
83 static long __mt_sched_addaffinity(struct task_struct *p, const struct cpumask *new_mask)
84 {
85 struct mt_task *new;
86 struct mt_task *tmp, *tmp2;
87 unsigned long irq_flags;
88 int find = 0;
89
90 new = kmalloc(sizeof(struct mt_task), GFP_KERNEL);
91 if (!new)
92 return -ENOMEM;
93
94 INIT_LIST_HEAD(&(new->list));
95 new->pid = p->pid;
96 new->p = p;
97 cpumask_copy(&new->mask, new_mask);
98
99 spin_lock_irqsave(&mt_sched_spinlock, irq_flags);
100 list_for_each_entry_safe(tmp, tmp2, &mt_task_head.list, list) {
101 if (tmp->pid != tmp->p->pid) {
102 list_del(&(tmp->list));
103 kfree(tmp);
104 continue;
105 }
106 if (!find && (tmp->p == p)) {
107 find = 1;
108 cpumask_copy(&tmp->mask, new_mask);
109 }
110 }
111
112 if (!find)
113 list_add(&(new->list), &(mt_task_head.list));
114
115 spin_unlock_irqrestore(&mt_sched_spinlock, irq_flags);
116
117 if (find)
118 kfree(new);
119
120 return 0;
121 }
122
123 static long __mt_sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
124 {
125 cpumask_var_t cpus_allowed, new_mask;
126 struct task_struct *p;
127 int retval;
128
129 get_online_cpus();
130 rcu_read_lock();
131
132 p = find_process_by_pid(pid);
133 if (!p) {
134 rcu_read_unlock();
135 put_online_cpus();
136 pr_debug("MT_SCHED: setaffinity find process %d fail\n", pid);
137 return -ESRCH;
138 }
139
140 /* Prevent p going away */
141 get_task_struct(p);
142 rcu_read_unlock();
143 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
144 if (p->flags & PF_NO_SETAFFINITY) {
145 retval = -EINVAL;
146 pr_debug("MT_SCHED: setaffinity flags PF_NO_SETAFFINITY fail\n");
147 goto out_put_task;
148 }
149 #endif
150 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
151 retval = -ENOMEM;
152 pr_debug("MT_SCHED: setaffinity allo_cpumask_var for cpus_allowed fail\n");
153 goto out_put_task;
154 }
155 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
156 retval = -ENOMEM;
157 pr_debug("MT_SCHED: setaffinity allo_cpumask_var for new_mask fail\n");
158 goto out_free_cpus_allowed;
159 }
160 retval = -EPERM;
161 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)
162 if (!check_same_owner(p) && !ns_capable(task_user_ns(p), CAP_SYS_NICE)) {
163 pr_debug("MT_SCHED: setaffinity check_same_owner and task_ns_capable fail\n");
164 goto out_unlock;
165 }
166 #else
167 if (!check_same_owner(p)) {
168 rcu_read_lock();
169 if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
170 rcu_read_unlock();
171 pr_debug("MT_SCHED: setaffinity check_same_owner and task_ns_capable fail\n");
172 goto out_unlock;
173 }
174 rcu_read_unlock();
175 }
176 #endif
177
178 retval = security_task_setscheduler(p);
179 if (retval) {
180 pr_debug("MT_SCHED: setaffinity security_task_setscheduler fail, status: %d\n",
181 retval);
182 goto out_unlock;
183 }
184
185 cpuset_cpus_allowed(p, cpus_allowed);
186 cpumask_and(new_mask, in_mask, cpus_allowed);
187 again:
188 retval = set_cpus_allowed_ptr(p, new_mask);
189 if (retval)
190 pr_debug("MT_SCHED: set_cpus_allowed_ptr status %d\n", retval);
191
192 if (!retval) {
193 cpuset_cpus_allowed(p, cpus_allowed);
194 if (!cpumask_subset(new_mask, cpus_allowed)) {
195 /*
196 * We must have raced with a concurrent cpuset
197 * update. Just reset the cpus_allowed to the
198 * cpuset's cpus_allowed
199 */
200 cpumask_copy(new_mask, cpus_allowed);
201 goto again;
202 }
203 }
204
205 /* modify for the mt_sched_setaffinity */
206 if ((!retval) || (!cpumask_intersects(new_mask, cpu_active_mask)))
207 retval = __mt_sched_addaffinity(p, new_mask);
208
209 out_unlock:
210 free_cpumask_var(new_mask);
211 out_free_cpus_allowed:
212 free_cpumask_var(cpus_allowed);
213 out_put_task:
214 put_task_struct(p);
215 put_online_cpus();
216
217 if (retval)
218 pr_debug("MT_SCHED: setaffinity status %d\n", retval);
219
220 return retval;
221 }
222
223 static long __mt_sched_getaffinity(pid_t pid, struct cpumask *mask, struct cpumask *mt_mask)
224 {
225 struct task_struct *p;
226 struct mt_task *tmp;
227 unsigned long irq_flags;
228 unsigned long flags;
229 int retval;
230
231 get_online_cpus();
232 rcu_read_lock();
233
234 retval = -ESRCH;
235 p = find_process_by_pid(pid);
236 if (!p) {
237 pr_debug("MT_SCHED: getaffinity find process %d fail\n", pid);
238 goto out_unlock;
239 }
240
241 retval = security_task_getscheduler(p);
242 if (retval) {
243 pr_debug("MT_SCHED: getaffinity security_task_getscheduler fail, status: %d\n",
244 retval);
245 goto out_unlock;
246 }
247
248 raw_spin_lock_irqsave(&p->pi_lock, flags);
249 cpumask_and(mask, &p->cpus_allowed, cpu_online_mask);
250 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
251
252 /* add for the mt_mask */
253 cpumask_clear(mt_mask);
254 spin_lock_irqsave(&mt_sched_spinlock, irq_flags);
255 list_for_each_entry(tmp, &mt_task_head.list, list) {
256 if ((p == tmp->p) && (p->pid == tmp->pid)) {
257 cpumask_copy(mt_mask, &tmp->mask);
258 break;
259 }
260 }
261 spin_unlock_irqrestore(&mt_sched_spinlock, irq_flags);
262
263 out_unlock:
264 rcu_read_unlock();
265 put_online_cpus();
266
267 if (retval)
268 pr_debug("MT_SCHED: getaffinity status %d\n", retval);
269
270 return retval;
271 }
272
273 static long __mt_sched_exitaffinity(pid_t pid)
274 {
275 struct mt_task *tmp, *tmp2;
276 unsigned long irq_flags;
277 int find = 0;
278
279 if (0 == pid)
280 pid = current->pid;
281
282 spin_lock_irqsave(&mt_sched_spinlock, irq_flags);
283 list_for_each_entry_safe(tmp, tmp2, &mt_task_head.list, list) {
284 if (pid == tmp->pid) {
285 list_del(&(tmp->list));
286 kfree(tmp);
287 find = 1;
288 break;
289 }
290 }
291 spin_unlock_irqrestore(&mt_sched_spinlock, irq_flags);
292 if (!find) {
293 pr_debug("MT_SCHED: exit affinity find process %d fail.\n", pid);
294 return -ESRCH;
295 }
296 return 0;
297 }
298
299 static long sched_ioctl_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
300 {
301 int retval;
302 pid_t pid;
303 struct ioctl_arg data;
304 cpumask_var_t new_mask, mask, mt_mask;
305 int len;
306
307 memset(&data, 0, sizeof(data));
308 switch (cmd) {
309 case IOCTL_SETAFFINITY:
310 if (copy_from_user(&data, (int __user *)arg, sizeof(data))) {
311 retval = -EFAULT;
312 goto done;
313 }
314
315 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
316 return -ENOMEM;
317
318 retval = get_user_cpu_mask(data.mask, data.len, new_mask);
319 if (retval == 0)
320 retval = __mt_sched_setaffinity(data.pid, new_mask);
321 if (retval)
322 pr_debug("MT_SCHED: setaffinity status %d\n", retval);
323 free_cpumask_var(new_mask);
324 break;
325
326 case IOCTL_GETAFFINITY:
327 if (copy_from_user(&data, (int __user *)arg, sizeof(data))) {
328 retval = -EFAULT;
329 goto done;
330 }
331
332 len = data.len;
333
334 if ((len * BITS_PER_BYTE) < nr_cpu_ids)
335 return -EINVAL;
336 if (len & (sizeof(unsigned int) - 1))
337 return -EINVAL;
338
339 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
340 return -ENOMEM;
341
342 if (!alloc_cpumask_var(&mt_mask, GFP_KERNEL)) {
343 goto getaffinity_free1;
344 return -ENOMEM;
345 }
346
347 retval = __mt_sched_getaffinity(data.pid, mask, mt_mask);
348 if (retval == 0) {
349 size_t retlen = min_t(size_t, len, cpumask_size());
350
351 if (copy_to_user((int __user *)data.mask, mask, retlen)) {
352 retval = -EFAULT;
353 goto getaffinity_free;
354 } else {
355 retval = retlen;
356 }
357
358 if (copy_to_user((int __user *)data.mt_mask, mt_mask, retlen))
359 retval = -EFAULT;
360 else
361 retval = retlen;
362 }
363 getaffinity_free:
364 free_cpumask_var(mt_mask);
365 getaffinity_free1:
366 free_cpumask_var(mask);
367 break;
368
369 case IOCTL_EXITAFFINITY:
370 if (copy_from_user(&pid, (int __user *)arg, sizeof(pid))) {
371 retval = -EFAULT;
372 goto done;
373 }
374
375 retval = __mt_sched_exitaffinity(pid);
376 break;
377 default:
378 retval = -ENOTTY;
379 }
380 done:
381 return retval;
382 }
383
384 const struct file_operations sched_ioctl_fops = {
385 .owner = THIS_MODULE,
386 .unlocked_ioctl = sched_ioctl_ioctl,
387 #ifdef CONFIG_COMPAT
388 .compat_ioctl = sched_ioctl_compat,
389 #endif
390 };
391
392 static struct cdev *sched_ioctl_cdev;
393 static dev_t sched_dev_num;
394 struct class *sched_class;
395 static int __init sched_ioctl_init(void)
396 {
397 int ret;
398 struct device *class_dev = NULL;
399
400 if (alloc_chrdev_region(&sched_dev_num, 0, 1, SCHED_DEV_NAME)) {
401 pr_debug("MT_SCHED: Device major number allocation failed\n");
402 return -EAGAIN;
403 }
404
405 sched_ioctl_cdev = cdev_alloc();
406 if (NULL == sched_ioctl_cdev) {
407 pr_debug("MT_SCHED: cdev_alloc failed\n");
408 ret = -1;
409 goto out_err2;
410 }
411
412 cdev_init(sched_ioctl_cdev, &sched_ioctl_fops);
413 sched_ioctl_cdev->owner = THIS_MODULE;
414 ret = cdev_add(sched_ioctl_cdev, sched_dev_num, 1);
415 if (ret) {
416 pr_debug("MT_SCHED: Char device add failed\n");
417 goto out_err2;
418 }
419
420 sched_class = class_create(THIS_MODULE, "scheddrv");
421 if (IS_ERR(sched_class)) {
422 pr_debug("Unable to create class, err = %d\n", (int)PTR_ERR(sched_class));
423 goto out_err1;
424 }
425 class_dev = device_create(sched_class, NULL, sched_dev_num, NULL, "mtk_sched");
426
427 pr_alert("MT_SCHED: Init complete, device major number = %d\n", MAJOR(sched_dev_num));
428
429 goto out;
430
431 class_destroy(sched_class);
432 out_err1:
433 cdev_del(sched_ioctl_cdev);
434 out_err2:
435 unregister_chrdev_region(sched_dev_num, 1);
436 out:
437 return ret;
438 }
439
440 /**
441 * /proc/mtk_sched/affinity_status
442 */
443 static int sched_status_show(struct seq_file *seq, void *v);
444 static int sched_status_open(struct inode *inode, struct file *file);
445 static unsigned int sched_status_poll(struct file *file, poll_table *wait);
446 static const struct file_operations sched_status_fops = {
447 .open = sched_status_open,
448 .read = seq_read,
449 .poll = sched_status_poll,
450 .llseek = seq_lseek,
451 .release = single_release
452 };
453
454 static int sched_status_open(struct inode *inode, struct file *file)
455 {
456 return single_open(file, sched_status_show, inode->i_private);
457 }
458
459 static int sched_status_show(struct seq_file *seq, void *v)
460 {
461 struct mt_task *tmp;
462 struct cpumask mask;
463 unsigned long irq_flags;
464 int i;
465
466 for_each_online_cpu(i)
467 seq_printf(seq, "CPU%d:\t\tonline\n", i);
468
469 mt_sched_check_tasks();
470 spin_lock_irqsave(&mt_sched_spinlock, irq_flags);
471 seq_puts(seq, "\n PID REAL BACKUP CMD\n");
472 list_for_each_entry(tmp, &mt_task_head.list, list) {
473 cpumask_and(&mask, &tmp->p->cpus_allowed, cpu_online_mask);
474 seq_printf(seq, "%5d %4lu %4lu %s\n", tmp->pid, *mask.bits, *tmp->mask.bits,
475 tmp->p->comm);
476 }
477 spin_unlock_irqrestore(&mt_sched_spinlock, irq_flags);
478
479 return 0;
480 }
481
482 static unsigned int sched_status_poll(struct file *file, poll_table *wait)
483 {
484 return 0;
485 }
486
487 static int __init sched_proc_init(void)
488 {
489 struct proc_dir_entry *pe;
490
491 if (!proc_mkdir("mtk_sched", NULL))
492 return -1;
493
494 pe = proc_create("mtk_sched/affinity_status", 0444, NULL, &sched_status_fops);
495 if (!pe)
496 return -ENOMEM;
497
498 return 0;
499 }
500
501 /**
502 * sched_cpu_notify - sched cpu notifer callback function.
503 */
504 static int sched_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
505 {
506 long cpu = (long)hcpu;
507 struct mt_task *tmp;
508 unsigned long irq_flags;
509
510 switch (action) {
511 case CPU_ONLINE:
512 spin_lock_irqsave(&mt_sched_spinlock, irq_flags);
513 list_for_each_entry(tmp, &mt_task_head.list, list) {
514 if (cpumask_test_cpu(cpu, &(tmp->mask))) {
515 if (tmp->pid == tmp->p->pid)
516 cpumask_copy(&tmp->p->cpus_allowed, &(tmp->mask));
517 }
518 }
519 spin_unlock_irqrestore(&mt_sched_spinlock, irq_flags);
520
521 break;
522 case CPU_DOWN_FAILED:
523 break;
524 case CPU_DOWN_PREPARE:
525 break;
526 default:
527 break;
528 }
529 return NOTIFY_OK;
530 }
531
532 static struct notifier_block sched_cpu_nb = {
533 .notifier_call = sched_cpu_notify,
534 };
535
536 static int __init sched_module_init(void)
537 {
538 int ret;
539 unsigned long irq_flags;
540
541 ret = sched_ioctl_init();
542 if (ret)
543 return ret;
544
545 ret = sched_proc_init();
546 if (ret)
547 return ret;
548
549 ret = register_cpu_notifier(&sched_cpu_nb);
550 if (ret)
551 return ret;
552
553 spin_lock_irqsave(&mt_sched_spinlock, irq_flags);
554 INIT_LIST_HEAD(&(mt_task_head.list));
555 spin_unlock_irqrestore(&mt_sched_spinlock, irq_flags);
556 return ret;
557 }
558
559 static void sched_module_exit(void)
560 {
561 class_destroy(sched_class);
562 cdev_del(sched_ioctl_cdev);
563 unregister_chrdev_region(sched_dev_num, 1);
564 pr_alert("MT_SCHED: driver removed.\n");
565 }
566 module_init(sched_module_init);
567 module_exit(sched_module_exit);
568
569 MODULE_LICENSE("GPL");
570 MODULE_AUTHOR("YaTing Chang <yt.chang@mediatek.com>");
571 MODULE_DESCRIPTION("This is sched module.");
572
573 #endif /* CONFIG_MT_SCHED */