return nextLpEvent;
}
+unsigned long spread_lpevents = 1;
+
int ItLpQueue_isLpIntPending( struct ItLpQueue * lpQueue )
{
- int retval = 0;
- struct HvLpEvent * nextLpEvent;
- if ( lpQueue ) {
- nextLpEvent = (struct HvLpEvent *)lpQueue->xSlicCurEventPtr;
- retval = nextLpEvent->xFlags.xValid | lpQueue->xPlicOverflowIntPending;
- }
- return retval;
+ struct HvLpEvent *next_event;
+
+ if (smp_processor_id() >= spread_lpevents)
+ return 0;
+
+ next_event = (struct HvLpEvent *)lpQueue->xSlicCurEventPtr;
+ return next_event->xFlags.xValid | lpQueue->xPlicOverflowIntPending;
}
void ItLpQueue_clearValid( struct HvLpEvent * event )
static int set_spread_lpevents(char *str)
{
- unsigned long i;
unsigned long val = simple_strtoul(str, NULL, 0);
+ extern unsigned long spread_lpevents;
/*
* The parameter is the number of processors to share in processing
* lp events.
*/
if (( val > 0) && (val <= NR_CPUS)) {
- for (i = 1; i < val; ++i)
- paca[i].lpqueue_ptr = paca[0].lpqueue_ptr;
-
+ spread_lpevents = val;
printk("lpevent processing spread over %ld processors\n", val);
} else {
printk("invalid spread_lpevents %ld\n", val);
while (1) {
if (lpaca->lppaca.shared_proc) {
- if (ItLpQueue_isLpIntPending(lpaca->lpqueue_ptr))
+ if (ItLpQueue_isLpIntPending(&xItLpQueue))
process_iSeries_events();
if (!need_resched())
yield_shared_processor();
while (!need_resched()) {
HMT_medium();
- if (ItLpQueue_isLpIntPending(lpaca->lpqueue_ptr))
+ if (ItLpQueue_isLpIntPending(&xItLpQueue))
process_iSeries_events();
HMT_low();
}
void do_IRQ(struct pt_regs *regs)
{
struct paca_struct *lpaca;
- struct ItLpQueue *lpq;
irq_enter();
iSeries_smp_message_recv(regs);
}
#endif /* CONFIG_SMP */
- lpq = lpaca->lpqueue_ptr;
- if (lpq && ItLpQueue_isLpIntPending(lpq))
- lpevent_count += ItLpQueue_process(lpq, regs);
+ if (ItLpQueue_isLpIntPending(&xItLpQueue))
+ lpevent_count += ItLpQueue_process(&xItLpQueue, regs);
irq_exit();
/* We need to poll here as we are not yet taking interrupts */
while (rtc_data.busy) {
extern unsigned long lpevent_count;
- struct ItLpQueue *lpq = get_paca()->lpqueue_ptr;
- if (lpq && ItLpQueue_isLpIntPending(lpq))
- lpevent_count += ItLpQueue_process(lpq, NULL);
+ if (ItLpQueue_isLpIntPending(&xItLpQueue))
+ lpevent_count += ItLpQueue_process(&xItLpQueue, NULL);
}
return rtc_set_tm(rtc_data.rc, rtc_data.ce_msg.ce_msg, tm);
}
#ifdef CONFIG_PPC_ISERIES
#define EXTRA_INITS(number, lpq) \
.lppaca_ptr = &paca[number].lppaca, \
- .lpqueue_ptr = (lpq), /* &xItLpQueue, */ \
.reg_save_ptr = &paca[number].reg_save, \
.reg_save = { \
.xDesc = 0xd397d9e2, /* "LpRS" */ \
set_dec(next_dec);
#ifdef CONFIG_PPC_ISERIES
- {
- struct ItLpQueue *lpq = lpaca->lpqueue_ptr;
- if (lpq && ItLpQueue_isLpIntPending(lpq))
- lpevent_count += ItLpQueue_process(lpq, regs);
- }
+ if (ItLpQueue_isLpIntPending(&xItLpQueue))
+ lpevent_count += ItLpQueue_process(&xItLpQueue, regs);
#endif
/* collect purr register values often, for accurate calculations */
#include <asm/types.h>
#include <asm/lppaca.h>
#include <asm/iSeries/ItLpRegSave.h>
-#include <asm/iSeries/ItLpQueue.h>
#include <asm/mmu.h>
register struct paca_struct *local_paca asm("r13");
u16 paca_index; /* Logical processor number */
u32 default_decr; /* Default decrementer value */
- struct ItLpQueue *lpqueue_ptr; /* LpQueue handled by this CPU */
u64 kernel_toc; /* Kernel TOC address */
u64 stab_real; /* Absolute address of segment table */
u64 stab_addr; /* Virtual address of segment table */