#include <asm/irq_regs.h>
struct irq_desc;
-typedef void fastcall (*irq_flow_handler_t)(unsigned int irq,
+typedef void (*irq_flow_handler_t)(unsigned int irq,
struct irq_desc *desc);
* Built-in IRQ handlers for various IRQ types,
* callable via desc->chip->handle_irq()
*/
-extern void fastcall handle_level_irq(unsigned int irq, struct irq_desc *desc);
-extern void fastcall handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc);
-extern void fastcall handle_edge_irq(unsigned int irq, struct irq_desc *desc);
-extern void fastcall handle_simple_irq(unsigned int irq, struct irq_desc *desc);
-extern void fastcall handle_percpu_irq(unsigned int irq, struct irq_desc *desc);
-extern void fastcall handle_bad_irq(unsigned int irq, struct irq_desc *desc);
+extern void handle_level_irq(unsigned int irq, struct irq_desc *desc);
+extern void handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc);
+extern void handle_edge_irq(unsigned int irq, struct irq_desc *desc);
+extern void handle_simple_irq(unsigned int irq, struct irq_desc *desc);
+extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc);
+extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc);
/*
* Monolithic do_IRQ implementation.
* (is an explicit fastcall, because i386 4KSTACKS calls it from assembly)
*/
#ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
-extern fastcall unsigned int __do_IRQ(unsigned int irq);
+extern unsigned int __do_IRQ(unsigned int irq);
#endif
/*
extern void oops_enter(void);
extern void oops_exit(void);
extern int oops_may_print(void);
-fastcall NORET_TYPE void do_exit(long error_code)
+NORET_TYPE void do_exit(long error_code)
ATTRIB_NORET;
NORET_TYPE void complete_and_exit(struct completion *, long)
ATTRIB_NORET;
*
* Returns 1 if the mutex is locked, 0 if unlocked.
*/
-static inline int fastcall mutex_is_locked(struct mutex *lock)
+static inline int mutex_is_locked(struct mutex *lock)
{
return atomic_read(&lock->count) != 1;
}
#define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0)
#define mutex_lock_killable(lock) mutex_lock_killable_nested(lock, 0)
#else
-extern void fastcall mutex_lock(struct mutex *lock);
-extern int __must_check fastcall mutex_lock_interruptible(struct mutex *lock);
-extern int __must_check fastcall mutex_lock_killable(struct mutex *lock);
+extern void mutex_lock(struct mutex *lock);
+extern int __must_check mutex_lock_interruptible(struct mutex *lock);
+extern int __must_check mutex_lock_killable(struct mutex *lock);
# define mutex_lock_nested(lock, subclass) mutex_lock(lock)
# define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock)
* NOTE: mutex_trylock() follows the spin_trylock() convention,
* not the down_trylock() convention!
*/
-extern int fastcall mutex_trylock(struct mutex *lock);
-extern void fastcall mutex_unlock(struct mutex *lock);
+extern int mutex_trylock(struct mutex *lock);
+extern void mutex_unlock(struct mutex *lock);
#endif
#include <linux/list.h>
#ifdef CONFIG_DEBUG_PREEMPT
- extern void fastcall add_preempt_count(int val);
- extern void fastcall sub_preempt_count(int val);
+ extern void add_preempt_count(int val);
+ extern void sub_preempt_count(int val);
#else
# define add_preempt_count(val) do { preempt_count() += (val); } while (0)
# define sub_preempt_count(val) do { preempt_count() -= (val); } while (0)
#define LOCK_SECTION_END \
".previous\n\t"
-#define __lockfunc fastcall __attribute__((section(".spinlock.text")))
+#define __lockfunc __attribute__((section(".spinlock.text")))
/*
* Pull the raw_spinlock_t and raw_rwlock_t definitions:
struct timer_list _name = \
TIMER_INITIALIZER(_function, _expires, _data)
-void fastcall init_timer(struct timer_list * timer);
-void fastcall init_timer_deferrable(struct timer_list *timer);
+void init_timer(struct timer_list *timer);
+void init_timer_deferrable(struct timer_list *timer);
static inline void setup_timer(struct timer_list * timer,
void (*function)(unsigned long),