X-Git-Url: https://git.stricted.de/?p=GitHub%2Fmt8127%2Fandroid_kernel_alcatel_ttab.git;a=blobdiff_plain;f=kernel%2Fworkqueue.c;h=1cafae743885236e74d2b3b7857a5244b972ecf4;hp=3f8558f85a40c13b273fbe7a216e6f6142938706;hb=HEAD;hpb=ae49cb71c5f4a51dded50f66c2dcf9a764285148 diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 3f8558f85a40..1cafae743885 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -507,6 +507,13 @@ static inline void debug_work_activate(struct work_struct *work) { } static inline void debug_work_deactivate(struct work_struct *work) { } #endif +#ifdef CONFIG_MTK_WQ_DEBUG +extern void mttrace_workqueue_execute_work(struct work_struct *work); +extern void mttrace_workqueue_activate_work(struct work_struct *work); +extern void mttrace_workqueue_queue_work(unsigned int req_cpu, struct work_struct *work); +extern void mttrace_workqueue_execute_end(struct work_struct *work); +#endif //CONFIG_MTK_WQ_DEBUG + /* allocate ID and assign it to @pool */ static int worker_pool_assign_id(struct worker_pool *pool) { @@ -1106,6 +1113,9 @@ static void pwq_activate_delayed_work(struct work_struct *work) struct pool_workqueue *pwq = get_work_pwq(work); trace_workqueue_activate_work(work); +#ifdef CONFIG_MTK_WQ_DEBUG + mttrace_workqueue_activate_work(work); +#endif //CONFIG_MTK_WQ_DEBUG move_linked_works(work, &pwq->pool->worklist, NULL); __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work)); pwq->nr_active++; @@ -1393,6 +1403,9 @@ retry: /* pwq determined, queue */ trace_workqueue_queue_work(req_cpu, pwq, work); +#ifdef CONFIG_MTK_WQ_DEBUG + mttrace_workqueue_queue_work(cpu, work); +#endif //CONFIG_MTK_WQ_DEBUG if (WARN_ON(!list_empty(&work->entry))) { spin_unlock(&pwq->pool->lock); @@ -1404,6 +1417,9 @@ retry: if (likely(pwq->nr_active < pwq->max_active)) { trace_workqueue_activate_work(work); +#ifdef CONFIG_MTK_WQ_DEBUG + mttrace_workqueue_activate_work(work); +#endif //CONFIG_MTK_WQ_DEBUG pwq->nr_active++; worklist = &pwq->pool->worklist; } else { @@ -2132,6 +2148,9 @@ __acquires(&pool->lock) bool cpu_intensive = pwq->wq->flags & WQ_CPU_INTENSIVE; int work_color; struct worker *collision; + unsigned long long exec_start; + char func[128]; + #ifdef CONFIG_LOCKDEP /* * It is permissible to free the struct work_struct from @@ -2201,13 +2220,29 @@ __acquires(&pool->lock) lock_map_acquire_read(&pwq->wq->lockdep_map); lock_map_acquire(&lockdep_map); + + exec_start = sched_clock(); + sprintf(func, "%pf", work->func); + trace_workqueue_execute_start(work); +#ifdef CONFIG_MTK_WQ_DEBUG + mttrace_workqueue_execute_work(work); +#endif //CONFIG_MTK_WQ_DEBUG + worker->current_func(work); + /* * While we must be careful to not use "work" after this, the trace * point will only record its address. */ trace_workqueue_execute_end(work); +#ifdef CONFIG_MTK_WQ_DEBUG + mttrace_workqueue_execute_end(work); +#endif //CONFIG_MTK_WQ_DEBUG + + if ((sched_clock() - exec_start)> 1000000000) // dump log if execute more than 1 sec + pr_warning("WQ warning! work (%s, %p) execute more than 1 sec, time: %llu ns\n", func, work, sched_clock() - exec_start); + lock_map_release(&lockdep_map); lock_map_release(&pwq->wq->lockdep_map); @@ -3399,7 +3434,7 @@ int workqueue_sysfs_register(struct workqueue_struct *wq) * attributes breaks ordering guarantee. Disallow exposing ordered * workqueues. */ - if (WARN_ON(wq->flags & __WQ_ORDERED)) + if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT)) return -EINVAL; wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL); @@ -3964,8 +3999,12 @@ int apply_workqueue_attrs(struct workqueue_struct *wq, return -EINVAL; /* creating multiple pwqs breaks ordering guarantee */ - if (WARN_ON((wq->flags & __WQ_ORDERED) && !list_empty(&wq->pwqs))) - return -EINVAL; + if (!list_empty(&wq->pwqs)) { + if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT)) + return -EINVAL; + + wq->flags &= ~__WQ_ORDERED; + } pwq_tbl = kzalloc(wq_numa_tbl_len * sizeof(pwq_tbl[0]), GFP_KERNEL); new_attrs = alloc_workqueue_attrs(GFP_KERNEL); @@ -4411,13 +4450,14 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) struct pool_workqueue *pwq; /* disallow meddling with max_active for ordered workqueues */ - if (WARN_ON(wq->flags & __WQ_ORDERED)) + if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT)) return; max_active = wq_clamp_max_active(max_active, wq->flags, wq->name); mutex_lock(&wq->mutex); + wq->flags &= ~__WQ_ORDERED; wq->saved_max_active = max_active; for_each_pwq(pwq, wq)