"extern inline" doesn't make much sense.
Signed-off-by: Adrian Bunk <bunk@stusta.de>
Cc: Miles Bader <miles@gnu.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
#define atomic_read(v) ((v)->counter)
#define atomic_set(v,i) (((v)->counter) = (i))
-extern __inline__ int atomic_add_return (int i, volatile atomic_t *v)
+static inline int atomic_add_return (int i, volatile atomic_t *v)
{
unsigned long flags;
int res;
* ffz = Find First Zero in word. Undefined if no zero exists,
* so code should check against ~0UL first..
*/
-extern __inline__ unsigned long ffz (unsigned long word)
+static inline unsigned long ffz (unsigned long word)
{
unsigned long result = 0;
"m" (*((const char *)(addr) + ((nr) >> 3)))); \
__test_bit_res; \
})
-extern __inline__ int __test_bit (int nr, const void *addr)
+static inline int __test_bit (int nr, const void *addr)
{
int res;
__asm__ __volatile__ ("tst1 %1, [%2]; setf nz, %0"
#define find_first_zero_bit(addr, size) \
find_next_zero_bit ((addr), (size), 0)
-extern __inline__ int find_next_zero_bit(const void *addr, int size, int offset)
+static inline int find_next_zero_bit(const void *addr, int size, int offset)
{
unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
unsigned long result = offset & ~31UL;
#include <asm/param.h>
-extern __inline__ void __delay(unsigned long loops)
+static inline void __delay(unsigned long loops)
{
if (loops)
__asm__ __volatile__ ("1: add -1, %0; bnz 1b"
extern unsigned long loops_per_jiffy;
-extern __inline__ void udelay(unsigned long usecs)
+static inline void udelay(unsigned long usecs)
{
register unsigned long full_loops, part_loops;
#ifndef __V850_HW_IRQ_H__
#define __V850_HW_IRQ_H__
-extern inline void hw_resend_irq (struct hw_interrupt_type *h, unsigned int i)
+static inline void hw_resend_irq (struct hw_interrupt_type *h, unsigned int i)
{
}
/* Do necessary setup to start up a newly executed thread. */
-extern inline void start_thread (struct pt_regs *regs,
+static inline void start_thread (struct pt_regs *regs,
unsigned long pc, unsigned long usp)
{
regs->pc = pc;
}
/* Free all resources held by a thread. */
-extern inline void release_thread (struct task_struct *dead_task)
+static inline void release_thread (struct task_struct *dead_task)
{
}
#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC (name,1)
#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC (name,0)
-extern inline void sema_init (struct semaphore *sem, int val)
+static inline void sema_init (struct semaphore *sem, int val)
{
*sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
}
extern int __down_trylock (struct semaphore * sem);
extern void __up (struct semaphore * sem);
-extern inline void down (struct semaphore * sem)
+static inline void down (struct semaphore * sem)
{
might_sleep();
if (atomic_dec_return (&sem->count) < 0)
__down (sem);
}
-extern inline int down_interruptible (struct semaphore * sem)
+static inline int down_interruptible (struct semaphore * sem)
{
int ret = 0;
might_sleep();
return ret;
}
-extern inline int down_trylock (struct semaphore *sem)
+static inline int down_trylock (struct semaphore *sem)
{
int ret = 0;
if (atomic_dec_return (&sem->count) < 0)
return ret;
}
-extern inline void up (struct semaphore * sem)
+static inline void up (struct semaphore * sem)
{
if (atomic_inc_return (&sem->count) <= 0)
__up (sem);
((__typeof__ (*(ptr)))__xchg ((unsigned long)(with), (ptr), sizeof (*(ptr))))
#define tas(ptr) (xchg ((ptr), 1))
-extern inline unsigned long __xchg (unsigned long with,
+static inline unsigned long __xchg (unsigned long with,
__volatile__ void *ptr, int size)
{
unsigned long tmp, flags;
BUG ();
}
-extern inline void flush_tlb_kernel_page(unsigned long addr)
+static inline void flush_tlb_kernel_page(unsigned long addr)
{
BUG ();
}
-extern inline void flush_tlb_pgtables(struct mm_struct *mm,
+static inline void flush_tlb_pgtables(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
BUG ();
#define VERIFY_READ 0
#define VERIFY_WRITE 1
-extern inline int access_ok (int type, const void *addr, unsigned long size)
+static inline int access_ok (int type, const void *addr, unsigned long size)
{
/* XXX I guess we should check against real ram bounds at least, and
possibly make sure ADDR is not within the kernel.
})
-extern inline void __put_unaligned_2(__u32 __v, register __u8 *__p)
+static inline void __put_unaligned_2(__u32 __v, register __u8 *__p)
{
*__p++ = __v;
*__p++ = __v >> 8;
}
-extern inline void __put_unaligned_4(__u32 __v, register __u8 *__p)
+static inline void __put_unaligned_4(__u32 __v, register __u8 *__p)
{
__put_unaligned_2(__v >> 16, __p + 2);
__put_unaligned_2(__v, __p);
}
-extern inline void __put_unaligned_8(const unsigned long long __v, register __u8 *__p)
+static inline void __put_unaligned_8(const unsigned long long __v, register __u8 *__p)
{
/*
* tradeoff: 8 bytes of stack for all unaligned puts (2