#define cpu_relax() barrier()
#define cpu_relax_yield() cpu_relax()
-#define cpu_relax_lowlatency() cpu_relax()
#define ARCH_HAS_PREFETCH
#define ARCH_HAS_PREFETCHW
#define cpu_relax() barrier()
#define cpu_relax_yield() cpu_relax()
-#define cpu_relax_lowlatency() cpu_relax()
#else
__asm__ __volatile__ (".word %0" : : "i"(CTOP_INST_SCHD_RW) : "memory")
#define cpu_relax_yield() cpu_relax()
-#define cpu_relax_lowlatency() barrier()
#endif
#endif
#define cpu_relax_yield() cpu_relax()
-#define cpu_relax_lowlatency() cpu_relax()
#define task_pt_regs(p) \
((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
}
#define cpu_relax_yield() cpu_relax()
-#define cpu_relax_lowlatency() cpu_relax()
/* Thread switching */
extern struct task_struct *cpu_switch_to(struct task_struct *prev,
#define cpu_relax() barrier()
#define cpu_relax_yield() cpu_relax()
-#define cpu_relax_lowlatency() cpu_relax()
#define cpu_sync_pipeline() asm volatile("sub pc, -2" : : : "memory")
struct cpu_context {
#define cpu_relax() smp_mb()
#define cpu_relax_yield() cpu_relax()
-#define cpu_relax_lowlatency() cpu_relax()
/* Get the Silicon Revision of the chip */
static inline uint32_t __pure bfin_revid(void)
#define cpu_relax() do { } while (0)
#define cpu_relax_yield() cpu_relax()
-#define cpu_relax_lowlatency() cpu_relax()
extern const struct seq_operations cpuinfo_op;
#define cpu_relax() barrier()
#define cpu_relax_yield() cpu_relax()
-#define cpu_relax_lowlatency() cpu_relax()
void default_idle(void);
#define cpu_relax() barrier()
#define cpu_relax_yield() cpu_relax()
-#define cpu_relax_lowlatency() cpu_relax()
/* data cache prefetch */
#define ARCH_HAS_PREFETCH
#define cpu_relax() barrier()
#define cpu_relax_yield() cpu_relax()
-#define cpu_relax_lowlatency() cpu_relax()
#define HARD_RESET_NOW() ({ \
local_irq_disable(); \
#define cpu_relax() __vmyield()
#define cpu_relax_yield() cpu_relax()
-#define cpu_relax_lowlatency() cpu_relax()
/*
* Decides where the kernel will search for a free chunk of vm space during
#define cpu_relax() ia64_hint(ia64_hint_pause)
#define cpu_relax_yield() cpu_relax()
-#define cpu_relax_lowlatency() cpu_relax()
static inline int
ia64_get_irr(unsigned int vector)
#define cpu_relax() barrier()
#define cpu_relax_yield() cpu_relax()
-#define cpu_relax_lowlatency() cpu_relax()
#endif /* _ASM_M32R_PROCESSOR_H */
#define cpu_relax() barrier()
#define cpu_relax_yield() cpu_relax()
-#define cpu_relax_lowlatency() cpu_relax()
#endif
#define cpu_relax() barrier()
#define cpu_relax_yield() cpu_relax()
-#define cpu_relax_lowlatency() cpu_relax()
extern void setup_priv(void);
# define cpu_relax() barrier()
# define cpu_relax_yield() cpu_relax()
-# define cpu_relax_lowlatency() cpu_relax()
#define task_pt_regs(tsk) \
(((struct pt_regs *)(THREAD_SIZE + task_stack_page(tsk))) - 1)
#define cpu_relax() barrier()
#define cpu_relax_yield() cpu_relax()
-#define cpu_relax_lowlatency() cpu_relax()
/*
* Return_address is a replacement for __builtin_return_address(count)
#define cpu_relax() barrier()
#define cpu_relax_yield() cpu_relax()
-#define cpu_relax_lowlatency() cpu_relax()
/*
* User space process size: 1.75GB (default).
#define cpu_relax() barrier()
#define cpu_relax_yield() cpu_relax()
-#define cpu_relax_lowlatency() cpu_relax()
#endif /* __ASSEMBLY__ */
#define cpu_relax() barrier()
#define cpu_relax_yield() cpu_relax()
-#define cpu_relax_lowlatency() cpu_relax()
#endif /* __ASSEMBLY__ */
#endif /* __ASM_OPENRISC_PROCESSOR_H */
#define cpu_relax() barrier()
#define cpu_relax_yield() cpu_relax()
-#define cpu_relax_lowlatency() cpu_relax()
/*
* parisc_requires_coherency() is used to identify the combined VIPT/PIPT
#endif
#define cpu_relax_yield() cpu_relax()
-#define cpu_relax_lowlatency() cpu_relax()
/* Check that a certain kernel stack pointer is valid in task_struct p */
int validate_sp(unsigned long sp, struct task_struct *p,
void cpu_relax_yield(void);
#define cpu_relax() barrier()
-#define cpu_relax_lowlatency() barrier()
#define ECAG_CACHE_ATTRIBUTE 0
#define ECAG_CPU_ATTRIBUTE 1
#define cpu_relax() barrier()
#define cpu_relax_yield() cpu_relax()
-#define cpu_relax_lowlatency() cpu_relax()
#define release_thread(thread) do {} while (0)
/*
#define cpu_sleep() __asm__ __volatile__ ("sleep" : : : "memory")
#define cpu_relax() barrier()
#define cpu_relax_yield() cpu_relax()
-#define cpu_relax_lowlatency() cpu_relax()
void default_idle(void);
void stop_this_cpu(void *);
#define cpu_relax() barrier()
#define cpu_relax_yield() cpu_relax()
-#define cpu_relax_lowlatency() cpu_relax()
extern void (*sparc_idle)(void);
".previous" \
::: "memory")
#define cpu_relax_yield() cpu_relax()
-#define cpu_relax_lowlatency() cpu_relax()
/* Prefetch support. This is tuned for UltraSPARC-III and later.
* UltraSPARC-I will treat these as nops, and UltraSPARC-II has
}
#define cpu_relax_yield() cpu_relax()
-#define cpu_relax_lowlatency() cpu_relax()
/* Info on this processor (see fs/proc/cpuinfo.c) */
struct seq_operations;
#define cpu_relax() barrier()
#define cpu_relax_yield() cpu_relax()
-#define cpu_relax_lowlatency() cpu_relax()
#define task_pt_regs(p) \
((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
}
#define cpu_relax_yield() cpu_relax()
-#define cpu_relax_lowlatency() cpu_relax()
/* Stop speculative execution and prefetching of modified code. */
static inline void sync_core(void)
#define cpu_relax() rep_nop()
#define cpu_relax_yield() cpu_relax()
-#define cpu_relax_lowlatency() cpu_relax()
#define task_pt_regs(t) (&(t)->thread.regs)
#define cpu_relax() barrier()
#define cpu_relax_yield() cpu_relax()
-#define cpu_relax_lowlatency() cpu_relax()
/* Special register access. */