Merge tag 'v3.10.106' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / trace / ring_buffer.c
index 5c053e79d90820c296fc722a3de0e5dfdfc9e8c9..62e48a3e1c69c76896834d7bb0499fdea4a28c26 100644 (file)
@@ -472,7 +472,7 @@ struct ring_buffer_per_cpu {
        raw_spinlock_t                  reader_lock;    /* serialize readers */
        arch_spinlock_t                 lock;
        struct lock_class_key           lock_key;
-       unsigned int                    nr_pages;
+       unsigned long                   nr_pages;
        struct list_head                *pages;
        struct buffer_page              *head_page;     /* read from head */
        struct buffer_page              *tail_page;     /* write to tail */
@@ -492,7 +492,7 @@ struct ring_buffer_per_cpu {
        u64                             write_stamp;
        u64                             read_stamp;
        /* ring buffer pages to update, > 0 to add, < 0 to remove */
-       int                             nr_pages_to_update;
+       long                            nr_pages_to_update;
        struct list_head                new_pages; /* new pages to add */
        struct work_struct              update_pages_work;
        struct completion               update_done;
@@ -1129,10 +1129,10 @@ static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
        return 0;
 }
 
-static int __rb_allocate_pages(int nr_pages, struct list_head *pages, int cpu)
+static int __rb_allocate_pages(long nr_pages, struct list_head *pages, int cpu)
 {
-       int i;
        struct buffer_page *bpage, *tmp;
+       long i;
 
        for (i = 0; i < nr_pages; i++) {
 #if !defined (CONFIG_MTK_EXTMEM)
@@ -1179,7 +1179,7 @@ free_pages:
 }
 
 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
-                            unsigned nr_pages)
+                            unsigned long nr_pages)
 {
        LIST_HEAD(pages);
 
@@ -1204,7 +1204,7 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
 }
 
 static struct ring_buffer_per_cpu *
-rb_allocate_cpu_buffer(struct ring_buffer *buffer, int nr_pages, int cpu)
+rb_allocate_cpu_buffer(struct ring_buffer *buffer, long nr_pages, int cpu)
 {
        struct ring_buffer_per_cpu *cpu_buffer;
        struct buffer_page *bpage;
@@ -1312,8 +1312,9 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
                                        struct lock_class_key *key)
 {
        struct ring_buffer *buffer;
+       long nr_pages;
        int bsize;
-       int cpu, nr_pages;
+       int cpu;
 
        /* keep it in its own cache line */
        buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
@@ -1436,12 +1437,12 @@ static inline unsigned long rb_page_write(struct buffer_page *bpage)
 }
 
 static int
-rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
+rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
 {
        struct list_head *tail_page, *to_remove, *next_page;
        struct buffer_page *to_remove_page, *tmp_iter_page;
        struct buffer_page *last_page, *first_page;
-       unsigned int nr_removed;
+       unsigned long nr_removed;
        unsigned long head_bit;
        int page_entries;
 
@@ -1657,7 +1658,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
                        int cpu_id)
 {
        struct ring_buffer_per_cpu *cpu_buffer;
-       unsigned nr_pages;
+       unsigned long nr_pages;
        int cpu, err = 0;
 
        /*
@@ -1671,14 +1672,13 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
            !cpumask_test_cpu(cpu_id, buffer->cpumask))
                return size;
 
-       size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
-       size *= BUF_PAGE_SIZE;
+       nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
 
        /* we need a minimum of two pages */
-       if (size < BUF_PAGE_SIZE * 2)
-               size = BUF_PAGE_SIZE * 2;
+       if (nr_pages < 2)
+               nr_pages = 2;
 
-       nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
+       size = nr_pages * BUF_PAGE_SIZE;
 
        /*
         * Don't succeed if resizing is disabled, as a reader might be
@@ -1976,12 +1976,6 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
                goto again;
 }
 
-static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
-{
-       cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
-       cpu_buffer->reader_page->read = 0;
-}
-
 static void rb_inc_iter(struct ring_buffer_iter *iter)
 {
        struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
@@ -3436,11 +3430,23 @@ EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
 int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
 {
        struct ring_buffer_per_cpu *cpu_buffer;
+       struct buffer_page *reader;
+       struct buffer_page *head_page;
+       struct buffer_page *commit_page;
+       unsigned commit;
 
        cpu_buffer = iter->cpu_buffer;
 
-       return iter->head_page == cpu_buffer->commit_page &&
-               iter->head == rb_commit_index(cpu_buffer);
+       /* Remember, trace recording is off when iterator is in use */
+       reader = cpu_buffer->reader_page;
+       head_page = cpu_buffer->head_page;
+       commit_page = cpu_buffer->commit_page;
+       commit = rb_page_commit(commit_page);
+
+       return ((iter->head_page == commit_page && iter->head == commit) ||
+               (iter->head_page == reader && commit_page == head_page &&
+                head_page->read == commit &&
+                iter->head == rb_page_commit(cpu_buffer->reader_page)));
 }
 EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
 
@@ -3619,7 +3625,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
 
        /* Finally update the reader page to the new head */
        cpu_buffer->reader_page = reader;
-       rb_reset_reader_page(cpu_buffer);
+       cpu_buffer->reader_page->read = 0;
 
        if (overwrite != cpu_buffer->last_overrun) {
                cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
@@ -3629,6 +3635,10 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
        goto again;
 
  out:
+       /* Update the read_stamp on the first event */
+       if (reader && reader->read == 0)
+               cpu_buffer->read_stamp = reader->page->time_stamp;
+
        arch_spin_unlock(&cpu_buffer->lock);
        local_irq_restore(flags);
 
@@ -4637,8 +4647,9 @@ static int rb_cpu_notify(struct notifier_block *self,
        struct ring_buffer *buffer =
                container_of(self, struct ring_buffer, cpu_notify);
        long cpu = (long)hcpu;
-       int cpu_i, nr_pages_same;
-       unsigned int nr_pages;
+       long nr_pages_same;
+       int cpu_i;
+       unsigned long nr_pages;
 
        switch (action) {
        case CPU_UP_PREPARE:
@@ -4866,9 +4877,9 @@ static __init int test_ringbuffer(void)
                rb_data[cpu].cnt = cpu;
                rb_threads[cpu] = kthread_create(rb_test, &rb_data[cpu],
                                                 "rbtester/%d", cpu);
-               if (WARN_ON(!rb_threads[cpu])) {
+               if (WARN_ON(IS_ERR(rb_threads[cpu]))) {
                        pr_cont("FAILED\n");
-                       ret = -1;
+                       ret = PTR_ERR(rb_threads[cpu]);
                        goto out_free;
                }
 
@@ -4878,9 +4889,9 @@ static __init int test_ringbuffer(void)
 
        /* Now create the rb hammer! */
        rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer");
-       if (WARN_ON(!rb_hammer)) {
+       if (WARN_ON(IS_ERR(rb_hammer))) {
                pr_cont("FAILED\n");
-               ret = -1;
+               ret = PTR_ERR(rb_hammer);
                goto out_free;
        }