[PATCH] ppc64: Simplify counting of lpevents, remove lpevent_count from paca
authorMichael Ellerman <michael@ellerman.id.au>
Thu, 30 Jun 2005 05:16:09 +0000 (15:16 +1000)
committerPaul Mackerras <paulus@samba.org>
Thu, 30 Jun 2005 05:16:09 +0000 (15:16 +1000)
Currently there's a per-cpu count of lpevents processed, a per-queue (ie.
global) total count, and a count by event type.

Replace all that with a count by event for each cpu. We only need to add
it up int the proc code.

Signed-off-by: Michael Ellerman <michael@ellerman.id.au>
Acked-by: Stephen Rothwell <sfr@canb.auug.org.au>
Signed-off-by: Paul Mackerras <paulus@samba.org>
arch/ppc64/kernel/ItLpQueue.c
include/asm-ppc64/iSeries/ItLpQueue.h
include/asm-ppc64/paca.h

index a849f6775ad6068685fcf6afbbfe8e0a84ce32cc..4a6ab9de629d3ad2b42fa1a9dccb3a2fedd9ffa4 100644 (file)
@@ -28,7 +28,9 @@
  */
 struct hvlpevent_queue hvlpevent_queue __attribute__((__section__(".data")));
 
-static char *event_types[9] = {
+DEFINE_PER_CPU(unsigned long[HvLpEvent_Type_NumTypes], hvlpevent_counts);
+
+static char *event_types[HvLpEvent_Type_NumTypes] = {
        "Hypervisor\t\t",
        "Machine Facilities\t",
        "Session Manager\t",
@@ -129,7 +131,6 @@ static void hvlpevent_clear_valid( struct HvLpEvent * event )
 
 void process_hvlpevents(struct pt_regs *regs)
 {
-       unsigned numIntsProcessed = 0;
        struct HvLpEvent * nextLpEvent;
 
        /* If we have recursed, just return */
@@ -144,8 +145,6 @@ void process_hvlpevents(struct pt_regs *regs)
        for (;;) {
                nextLpEvent = get_next_hvlpevent();
                if ( nextLpEvent ) {
-                       ++numIntsProcessed;
-                       hvlpevent_queue.xLpIntCount++;
                        /* Call appropriate handler here, passing 
                         * a pointer to the LpEvent.  The handler
                         * must make a copy of the LpEvent if it
@@ -160,7 +159,7 @@ void process_hvlpevents(struct pt_regs *regs)
                         * here!
                         */
                        if ( nextLpEvent->xType < HvLpEvent_Type_NumTypes )
-                               hvlpevent_queue.xLpIntCountByType[nextLpEvent->xType]++;
+                               __get_cpu_var(hvlpevent_counts)[nextLpEvent->xType]++;
                        if ( nextLpEvent->xType < HvLpEvent_Type_NumTypes &&
                             lpEventHandler[nextLpEvent->xType] ) 
                                lpEventHandler[nextLpEvent->xType](nextLpEvent, regs);
@@ -181,8 +180,6 @@ void process_hvlpevents(struct pt_regs *regs)
        ItLpQueueInProcess = 0;
        mb();
        clear_inUse();
-
-       get_paca()->lpevent_count += numIntsProcessed;
 }
 
 static int set_spread_lpevents(char *str)
@@ -228,20 +225,37 @@ void setup_hvlpevent_queue(void)
 
 static int proc_lpevents_show(struct seq_file *m, void *v)
 {
-       unsigned int i;
+       int cpu, i;
+       unsigned long sum;
+       static unsigned long cpu_totals[NR_CPUS];
+
+       /* FIXME: do we care that there's no locking here? */
+       sum = 0;
+       for_each_online_cpu(cpu) {
+               cpu_totals[cpu] = 0;
+               for (i = 0; i < HvLpEvent_Type_NumTypes; i++) {
+                       cpu_totals[cpu] += per_cpu(hvlpevent_counts, cpu)[i];
+               }
+               sum += cpu_totals[cpu];
+       }
 
        seq_printf(m, "LpEventQueue 0\n");
-       seq_printf(m, "  events processed:\t%lu\n",
-                  (unsigned long)hvlpevent_queue.xLpIntCount);
+       seq_printf(m, "  events processed:\t%lu\n", sum);
 
-       for (i = 0; i < 9; ++i)
-               seq_printf(m, "    %s %10lu\n", event_types[i],
-                          (unsigned long)hvlpevent_queue.xLpIntCountByType[i]);
+       for (i = 0; i < HvLpEvent_Type_NumTypes; ++i) {
+               sum = 0;
+               for_each_online_cpu(cpu) {
+                       sum += per_cpu(hvlpevent_counts, cpu)[i];
+               }
+
+               seq_printf(m, "    %s %10lu\n", event_types[i], sum);
+       }
 
        seq_printf(m, "\n  events processed by processor:\n");
 
-       for_each_online_cpu(i)
-               seq_printf(m, "    CPU%02d  %10u\n", i, paca[i].lpevent_count);
+       for_each_online_cpu(cpu) {
+               seq_printf(m, "    CPU%02d  %10lu\n", cpu, cpu_totals[cpu]);
+       }
 
        return 0;
 }
index 6ba74c0d910b40b2ad78cbad4f37fdac150ea283..51db08852dba7bf41215058044ea2a0c67392b53 100644 (file)
@@ -70,8 +70,6 @@ struct hvlpevent_queue {
        u8      xIndex;                 // 0x28 unique sequential index.
        u8      xSlicRsvd[3];           // 0x29-2b
        u32     xInUseWord;             // 0x2C
-       u64     xLpIntCount;            // 0x30 Total Lp Int msgs processed
-       u64     xLpIntCountByType[9];   // 0x38-0x7F Event counts by type
 };
 
 extern struct hvlpevent_queue hvlpevent_queue;
index 0146f51684e993ba36ec3bb0f16ec863ece401fc..2f0f36f73d38507d548a14ff221b96fe42ffce85 100644 (file)
@@ -89,7 +89,6 @@ struct paca_struct {
        u64 next_jiffy_update_tb;       /* TB value for next jiffy update */
        u64 saved_r1;                   /* r1 save for RTAS calls */
        u64 saved_msr;                  /* MSR saved here by enter_rtas */
-       u32 lpevent_count;              /* lpevents processed  */
        u8 proc_enabled;                /* irq soft-enable flag */
 
        /* not yet used */