/*
* #define byte(x, nr) ((unsigned char)((x) >> (nr*8)))
*/
-inline static u8
+static inline u8
byte(const u32 x, const unsigned n)
{
return x >> (n << 3);
/* Busy wait until FLAG goes low. Return 0 on timeout. */
-inline static int flag_low(int flag, unsigned long timeout)
+static inline int flag_low(int flag, unsigned long timeout)
{
int flag_high;
unsigned long count = 0;
/* Wait for command execution status. Choice between busy waiting
and sleeping. Return value <0 indicates timeout. */
-inline static int get_exec_status(int busy_waiting)
+static inline int get_exec_status(int busy_waiting)
{
unsigned char exec_status;
/* Wait busy for extra byte of data that a command returns.
Return value <0 indicates timeout. */
-inline static int get_data(int short_timeout)
+static inline int get_data(int short_timeout)
{
unsigned char data;
/* Facilities for asynchronous operation */
/* Read status/data availability flags FL_STEN and FL_DTEN */
-inline static int stdt_flags(void)
+static inline int stdt_flags(void)
{
return inb(STATUS_PORT) & FL_STDT;
}
/* Fetch status that has previously been waited for. <0 means not available */
-inline static int fetch_status(void)
+static inline int fetch_status(void)
{
unsigned char status;
/* Fetch data that has previously been waited for. */
-inline static void fetch_data(char *buf, int n)
+static inline void fetch_data(char *buf, int n)
{
insb(DATA_PORT, buf, n);
DEBUG((DEBUG_DRIVE_IF, "fetched 0x%x bytes", n));
/* Flush status and data fifos */
-inline static void flush_data(void)
+static inline void flush_data(void)
{
while ((inb(STATUS_PORT) & FL_STDT) != FL_STDT)
inb(DATA_PORT);
/* Send a simple command and wait for response. Command codes < COMFETCH
are quick response commands */
-inline static int exec_cmd(int cmd)
+static inline int exec_cmd(int cmd)
{
int ack = send_cmd(cmd);
if (ack < 0)
/* Send a command with parameters. Don't wait for the response,
* which consists of data blocks read from the CD. */
-inline static int exec_read_cmd(int cmd, struct cdrom_msf *params)
+static inline int exec_read_cmd(int cmd, struct cdrom_msf *params)
{
int ack = send_cmd(cmd);
if (ack < 0)
/* Send a seek command with parameters and wait for response */
-inline static int exec_seek_cmd(int cmd, struct cdrom_msf *params)
+static inline int exec_seek_cmd(int cmd, struct cdrom_msf *params)
{
int ack = send_cmd(cmd);
if (ack < 0)
/* Send a command with parameters and wait for response */
-inline static int exec_long_cmd(int cmd, struct cdrom_msf *params)
+static inline int exec_long_cmd(int cmd, struct cdrom_msf *params)
{
int ack = exec_read_cmd(cmd, params);
if (ack < 0)
/* Binary to BCD (2 digits) */
-inline static void single_bin2bcd(u_char *p)
+static inline void single_bin2bcd(u_char *p)
{
DEBUG((DEBUG_CONV, "bin2bcd %02d", *p));
*p = (*p % 10) | ((*p / 10) << 4);
/* Two BCD digits to binary */
-inline static u_char bcd2bin(u_char bcd)
+static inline u_char bcd2bin(u_char bcd)
{
DEBUG((DEBUG_CONV, "bcd2bin %x%02x", bcd));
return (bcd >> 4) * 10 + (bcd & 0x0f);
static volatile int buf_bn[N_BUFS], next_bn;
static volatile int buf_in = 0, buf_out = NOBUF;
-inline static void opt_invalidate_buffers(void)
+static inline void opt_invalidate_buffers(void)
{
int i;
return status;
}
-inline static int dmi_checksum(u8 *buf)
+static inline int dmi_checksum(u8 *buf)
{
u8 sum=0;
int a;
* Pack active and recovery counts into single byte representation
* used by controller
*/
-inline static u8 pack_nibbles (u8 upper, u8 lower)
+static inline u8 pack_nibbles (u8 upper, u8 lower)
{
return ((upper & 0x0f) << 4) | (lower & 0x0f);
}
return IRQ_HANDLED;
}
-inline static void
+static inline void
release_ioregs(struct IsdnCardState *cs, int mask)
{
release_region(cs->hw.avm.cfg_reg, 8);
}
}
-inline static void
+static inline void
enqueue_super(struct PStack *st,
struct sk_buff *skb)
{
return IRQ_HANDLED;
}
-inline static void
+static inline void
release_ioregs(struct IsdnCardState *cs, int mask)
{
if (mask & 1)
return NULL;
}
-inline static sector_t calc_dev_sboffset(struct block_device *bdev)
+static inline sector_t calc_dev_sboffset(struct block_device *bdev)
{
sector_t size = bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
return MD_NEW_SIZE_BLOCKS(size);
msleep(125);
}
-inline static int radio_function(struct inode *inode, struct file *file,
+static inline int radio_function(struct inode *inode, struct file *file,
unsigned int cmd, void *arg)
{
struct video_device *dev = video_devdata(file);
module_init(maestro_radio_init);
module_exit(maestro_radio_exit);
-inline static __u16 radio_power_on(struct radio_device *dev)
+static inline __u16 radio_power_on(struct radio_device *dev)
{
register __u16 io=dev->io;
register __u32 ofreq;
}
-inline static int radio_function(struct inode *inode, struct file *file,
+static inline int radio_function(struct inode *inode, struct file *file,
unsigned int cmd, void *arg)
{
struct video_device *dev = video_devdata(file);
* Tasklets
*/
-inline static struct mmc_data* wbsd_get_data(struct wbsd_host* host)
+static inline struct mmc_data* wbsd_get_data(struct wbsd_host* host)
{
WARN_ON(!host->mrq);
if (!host->mrq)
static int start_receive(struct net_device *, pcb_struct *);
-inline static void adapter_reset(struct net_device *dev)
+static inline void adapter_reset(struct net_device *dev)
{
unsigned long timeout;
elp_device *adapter = dev->priv;
static int plip_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
static int plip_preempt(void *handle);
static void plip_wakeup(void *handle);
-\f
+
enum plip_connection_state {
PLIP_CN_NONE=0,
PLIP_CN_RECEIVE,
atomic_t kill_timer;
struct semaphore killed_timer_sem;
};
-\f
-inline static void enable_parport_interrupts (struct net_device *dev)
+
+static inline void enable_parport_interrupts (struct net_device *dev)
{
if (dev->irq != -1)
{
}
}
-inline static void disable_parport_interrupts (struct net_device *dev)
+static inline void disable_parport_interrupts (struct net_device *dev)
{
if (dev->irq != -1)
{
}
}
-inline static void write_data (struct net_device *dev, unsigned char data)
+static inline void write_data (struct net_device *dev, unsigned char data)
{
struct parport *port =
((struct net_local *)dev->priv)->pardev->port;
port->ops->write_data (port, data);
}
-inline static unsigned char read_status (struct net_device *dev)
+static inline unsigned char read_status (struct net_device *dev)
{
struct parport *port =
((struct net_local *)dev->priv)->pardev->port;
return port->ops->read_status (port);
}
-\f
+
/* Entry point of PLIP driver.
Probe the hardware, and register/initialize the driver.
spin_lock_init(&nl->lock);
}
-\f
+
/* Bottom half handler for the delayed request.
This routine is kicked by do_timer().
Request `plip_bh' to be invoked. */
return TIMEOUT;
}
-\f
+
static int
plip_none(struct net_device *dev, struct net_local *nl,
struct plip_local *snd, struct plip_local *rcv)
/* PLIP_RECEIVE --- receive a byte(two nibbles)
Returns OK on success, TIMEOUT on timeout */
-inline static int
+static inline int
plip_receive(unsigned short nibble_timeout, struct net_device *dev,
enum plip_nibble_state *ns_p, unsigned char *data_p)
{
return htons(ETH_P_802_2);
}
-
/* PLIP_RECEIVE_PACKET --- receive a packet */
static int
plip_receive_packet(struct net_device *dev, struct net_local *nl,
/* PLIP_SEND --- send a byte (two nibbles)
Returns OK on success, TIMEOUT when timeout */
-inline static int
+static inline int
plip_send(unsigned short nibble_timeout, struct net_device *dev,
enum plip_nibble_state *ns_p, unsigned char data)
{
return OK;
}
-\f
+
/* Handle the parallel port interrupts. */
static void
plip_interrupt(int irq, void *dev_id, struct pt_regs * regs)
spin_unlock_irq(&nl->lock);
}
-\f
+
static int
plip_tx_packet(struct sk_buff *skb, struct net_device *dev)
{
}
return 0;
}
-\f
+
static int parport[PLIP_MAX] = { [0 ... PLIP_MAX-1] = -1 };
static int timid;
* the rest of the logic from the result of sleep/wakeup
*/
-inline static void mac_wol_reset(struct mac_regs __iomem * regs)
+static inline void mac_wol_reset(struct mac_regs __iomem * regs)
{
/* Turn off SWPTAG right after leaving power mode */
* CHECK ME: locking
*/
-inline static int velocity_get_ip(struct velocity_info *vptr)
+static inline int velocity_get_ip(struct velocity_info *vptr)
{
struct in_device *in_dev = (struct in_device *) vptr->dev->ip_ptr;
struct in_ifaddr *ifa;
enable_MAC(ai, &rsp, 1);
}
-inline static u8 hexVal(char c) {
+static inline u8 hexVal(char c) {
if (c>='0' && c<='9') return c -= '0';
if (c>='a' && c<='f') return c -= 'a'-10;
if (c>='A' && c<='F') return c -= 'A'-10;
vfree(cpu_buffer[i].buffer);
}
}
-
-
+
int alloc_cpu_buffers(void)
{
int i;
free_cpu_buffers();
return -ENOMEM;
}
-
void start_cpu_work(void)
{
}
}
-
void end_cpu_work(void)
{
int i;
flush_scheduled_work();
}
-
/* Resets the cpu buffer to a sane state. */
void cpu_buffer_reset(struct oprofile_cpu_buffer * cpu_buf)
{
cpu_buf->last_task = NULL;
}
-
/* compute number of available slots in cpu_buffer queue */
static unsigned long nr_available_slots(struct oprofile_cpu_buffer const * b)
{
return tail + (b->buffer_size - head) - 1;
}
-
static void increment_head(struct oprofile_cpu_buffer * b)
{
unsigned long new_head = b->head_pos + 1;
b->head_pos = 0;
}
-
-
-
-inline static void
+static inline void
add_sample(struct oprofile_cpu_buffer * cpu_buf,
unsigned long pc, unsigned long event)
{
increment_head(cpu_buf);
}
-
-inline static void
+static inline void
add_code(struct oprofile_cpu_buffer * buffer, unsigned long value)
{
add_sample(buffer, ESCAPE_CODE, value);
}
-
/* This must be safe from any context. It's safe writing here
* because of the head/tail separation of the writer and reader
* of the CPU buffer.
return 1;
}
-
static void oprofile_end_trace(struct oprofile_cpu_buffer * cpu_buf)
{
cpu_buf->tracing = 0;
}
-
void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
{
struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()];
oprofile_end_trace(cpu_buf);
}
-
void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
{
struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()];
log_sample(cpu_buf, pc, is_kernel, event);
}
-
void oprofile_add_trace(unsigned long pc)
{
struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()];
add_sample(cpu_buf, pc, 0);
}
-
-
/*
* This serves to avoid cpu buffer overflow, and makes sure
* the task mortuary progresses
/************************* OUTBOUND ROUTINES *******************************/
-inline static int
+static inline int
qdio_get_outbound_buffer_frontier(struct qdio_q *q)
{
int f,f_mod_no;
}
/* all buffers are processed */
-inline static int
+static inline int
qdio_is_outbound_q_done(struct qdio_q *q)
{
int no_used;
return (no_used==0);
}
-inline static int
+static inline int
qdio_has_outbound_q_moved(struct qdio_q *q)
{
int i;
}
}
-inline static void
+static inline void
qdio_kick_outbound_q(struct qdio_q *q)
{
int result;
}
}
-inline static void
+static inline void
qdio_kick_outbound_handler(struct qdio_q *q)
{
int start, end, real_end, count;
/************************* INBOUND ROUTINES *******************************/
-inline static int
+static inline int
qdio_get_inbound_buffer_frontier(struct qdio_q *q)
{
int f,f_mod_no;
return q->first_to_check;
}
-inline static int
+static inline int
qdio_has_inbound_q_moved(struct qdio_q *q)
{
int i;
}
/* means, no more buffers to be filled */
-inline static int
+static inline int
tiqdio_is_inbound_q_done(struct qdio_q *q)
{
int no_used;
return 0;
}
-inline static int
+static inline int
qdio_is_inbound_q_done(struct qdio_q *q)
{
int no_used;
}
}
-inline static void
+static inline void
qdio_kick_inbound_handler(struct qdio_q *q)
{
int count, start, end, real_end, i;
#define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "")
-inline static __u8
+static inline __u8
qeth_get_ipa_adp_type(enum qeth_link_types link_type)
{
switch (link_type) {
}
}
-inline static int
+static inline int
qeth_realloc_headroom(struct qeth_card *card, struct sk_buff **skb, int size)
{
struct sk_buff *new_skb = NULL;
}
return 0;
}
+
static inline struct sk_buff *
qeth_pskb_unshare(struct sk_buff *skb, int pri)
{
return nskb;
}
-
-inline static void *
+static inline void *
qeth_push_skb(struct qeth_card *card, struct sk_buff **skb, int size)
{
void *hdr;
}
-inline static int
+static inline int
qeth_get_hlen(__u8 link_type)
{
#ifdef CONFIG_QETH_IPV6
#endif /* CONFIG_QETH_IPV6 */
}
-inline static unsigned short
+static inline unsigned short
qeth_get_netdev_flags(struct qeth_card *card)
{
if (card->options.layer2)
}
}
-inline static int
+static inline int
qeth_get_initial_mtu_for_card(struct qeth_card * card)
{
switch (card->info.type) {
}
}
-inline static int
+static inline int
qeth_get_max_mtu_for_card(int cardtype)
{
switch (cardtype) {
}
}
-inline static int
+static inline int
qeth_get_mtu_out_of_mpc(int cardtype)
{
switch (cardtype) {
}
}
-inline static int
+static inline int
qeth_get_mtu_outof_framesize(int framesize)
{
switch (framesize) {
}
}
-inline static int
+static inline int
qeth_mtu_is_valid(struct qeth_card * card, int mtu)
{
switch (card->info.type) {
}
}
-inline static int
+static inline int
qeth_get_arphdr_type(int cardtype, int linktype)
{
switch (cardtype) {
}
#ifdef CONFIG_QETH_PERF_STATS
-inline static int
+static inline int
qeth_get_micros(void)
{
return (int) (get_clock() >> 12);
/* Find cmd in SRB list */
-inline static struct ScsiReqBlk *find_cmd(struct scsi_cmnd *cmd,
+static inline struct ScsiReqBlk *find_cmd(struct scsi_cmnd *cmd,
struct list_head *head)
{
struct ScsiReqBlk *i;
mdelay(10*amount);
}
-inline static void fdomain_make_bus_idle( void )
+static inline void fdomain_make_bus_idle( void )
{
outb(0, port_base + SCSI_Cntl);
outb(0, port_base + SCSI_Mode_Cntl);
static void mts_transfer_cleanup( struct urb *transfer );
static void mts_do_sg(struct urb * transfer, struct pt_regs *regs);
-
-inline static
+static inline
void mts_int_submit_urb (struct urb* transfer,
int pipe,
void* data,
* Utility functions
*/
-inline static u32 RD32(unsigned char __iomem *base, s32 off)
+static inline u32 RD32(unsigned char __iomem *base, s32 off)
{
return fb_readl(base + off);
}
-inline static void WR32(unsigned char __iomem *base, s32 off, u32 v)
+static inline void WR32(unsigned char __iomem *base, s32 off, u32 v)
{
fb_writel(v, base + off);
}
-inline static u32 pm2_RD(struct pm2fb_par* p, s32 off)
+static inline u32 pm2_RD(struct pm2fb_par* p, s32 off)
{
return RD32(p->v_regs, off);
}
-inline static void pm2_WR(struct pm2fb_par* p, s32 off, u32 v)
+static inline void pm2_WR(struct pm2fb_par* p, s32 off, u32 v)
{
WR32(p->v_regs, off, v);
}
-inline static u32 pm2_RDAC_RD(struct pm2fb_par* p, s32 idx)
+static inline u32 pm2_RDAC_RD(struct pm2fb_par* p, s32 idx)
{
int index = PM2R_RD_INDEXED_DATA;
switch (p->type) {
return pm2_RD(p, index);
}
-inline static void pm2_RDAC_WR(struct pm2fb_par* p, s32 idx, u32 v)
+static inline void pm2_RDAC_WR(struct pm2fb_par* p, s32 idx, u32 v)
{
int index = PM2R_RD_INDEXED_DATA;
switch (p->type) {
pm2_WR(p, index, v);
}
-inline static void pm2v_RDAC_WR(struct pm2fb_par* p, s32 idx, u32 v)
+static inline void pm2v_RDAC_WR(struct pm2fb_par* p, s32 idx, u32 v)
{
pm2_WR(p, PM2VR_RD_INDEX_LOW, idx & 0xff);
mb();
#ifdef CONFIG_FB_PM2_FIFO_DISCONNECT
#define WAIT_FIFO(p,a)
#else
-inline static void WAIT_FIFO(struct pm2fb_par* p, u32 a)
+static inline void WAIT_FIFO(struct pm2fb_par* p, u32 a)
{
while( pm2_RD(p, PM2R_IN_FIFO_SPACE) < a );
mb();
}
/* lock the current transaction */
-inline static void lock_journal(struct super_block *p_s_sb)
+static inline void lock_journal(struct super_block *p_s_sb)
{
PROC_INFO_INC(p_s_sb, journal.lock_journal);
down(&SB_JOURNAL(p_s_sb)->j_lock);
}
/* unlock the current transaction */
-inline static void unlock_journal(struct super_block *p_s_sb)
+static inline void unlock_journal(struct super_block *p_s_sb)
{
up(&SB_JOURNAL(p_s_sb)->j_lock);
}
/*
* lowlevel functions
*/
-inline static int vx_test_and_ack(vx_core_t *chip)
+static inline int vx_test_and_ack(vx_core_t *chip)
{
snd_assert(chip->ops->test_and_ack, return -ENXIO);
return chip->ops->test_and_ack(chip);
}
-inline static void vx_validate_irq(vx_core_t *chip, int enable)
+static inline void vx_validate_irq(vx_core_t *chip, int enable)
{
snd_assert(chip->ops->validate_irq, return);
chip->ops->validate_irq(chip, enable);
}
-inline static unsigned char snd_vx_inb(vx_core_t *chip, int reg)
+static inline unsigned char snd_vx_inb(vx_core_t *chip, int reg)
{
snd_assert(chip->ops->in8, return 0);
return chip->ops->in8(chip, reg);
}
-inline static unsigned int snd_vx_inl(vx_core_t *chip, int reg)
+static inline unsigned int snd_vx_inl(vx_core_t *chip, int reg)
{
snd_assert(chip->ops->in32, return 0);
return chip->ops->in32(chip, reg);
}
-inline static void snd_vx_outb(vx_core_t *chip, int reg, unsigned char val)
+static inline void snd_vx_outb(vx_core_t *chip, int reg, unsigned char val)
{
snd_assert(chip->ops->out8, return);
chip->ops->out8(chip, reg, val);
}
-inline static void snd_vx_outl(vx_core_t *chip, int reg, unsigned int val)
+static inline void snd_vx_outl(vx_core_t *chip, int reg, unsigned int val)
{
snd_assert(chip->ops->out32, return);
chip->ops->out32(chip, reg, val);
/*
* pseudo-DMA transfer
*/
-inline static void vx_pseudo_dma_write(vx_core_t *chip, snd_pcm_runtime_t *runtime,
+static inline void vx_pseudo_dma_write(vx_core_t *chip, snd_pcm_runtime_t *runtime,
vx_pipe_t *pipe, int count)
{
snd_assert(chip->ops->dma_write, return);
chip->ops->dma_write(chip, runtime, pipe, count);
}
-inline static void vx_pseudo_dma_read(vx_core_t *chip, snd_pcm_runtime_t *runtime,
+static inline void vx_pseudo_dma_read(vx_core_t *chip, snd_pcm_runtime_t *runtime,
vx_pipe_t *pipe, int count)
{
snd_assert(chip->ops->dma_read, return);
* as real UNIX machines always do it. This avoids all headaches about
* daylight saving times and warping kernel clocks.
*/
-inline static void warp_clock(void)
+static inline void warp_clock(void)
{
write_seqlock_irq(&xtime_lock);
wall_to_monotonic.tv_sec -= sys_tz.tz_minuteswest * 60;
* All Rights Reserved.
*
*/
-inline static s64 divremdi3(s64 x, s64 y, int type)
+static inline s64 divremdi3(s64 x, s64 y, int type)
{
u64 a = (x < 0) ? -x : x;
u64 b = (y < 0) ? -y : y;
#define is_nonblock_mode(mode) ((mode) & SNDRV_SEQ_OSS_FILE_NONBLOCK)
/* dispatch event */
-inline static int
+static inline int
snd_seq_oss_dispatch(seq_oss_devinfo_t *dp, snd_seq_event_t *ev, int atomic, int hop)
{
return snd_seq_kernel_client_dispatch(dp->cseq, ev, atomic, hop);
}
/* ioctl */
-inline static int
+static inline int
snd_seq_oss_control(seq_oss_devinfo_t *dp, unsigned int type, void *arg)
{
return snd_seq_kernel_client_ctl(dp->cseq, type, arg);
}
/* fill the addresses in header */
-inline static void
+static inline void
snd_seq_oss_fill_addr(seq_oss_devinfo_t *dp, snd_seq_event_t *ev,
int dest_client, int dest_port)
{
#define semaphore_of(fp) ((fp)->f_dentry->d_inode->i_sem)
-inline static int snd_seq_pool_available(pool_t *pool)
+static inline int snd_seq_pool_available(pool_t *pool)
{
return pool->total_elements - atomic_read(&pool->counter);
}
-inline static int snd_seq_output_ok(pool_t *pool)
+static inline int snd_seq_output_ok(pool_t *pool)
{
return snd_seq_pool_available(pool) >= pool->room;
}
/*
* initialize record
*/
-inline static void reset_encode(snd_midi_event_t *dev)
+static inline void reset_encode(snd_midi_event_t *dev)
{
dev->read = 0;
dev->qlen = 0;
static snd_card_t *snd_serial_cards[SNDRV_CARDS] = SNDRV_DEFAULT_PTR;
-inline static void snd_uart16550_add_timer(snd_uart16550_t *uart)
+static inline void snd_uart16550_add_timer(snd_uart16550_t *uart)
{
if (! uart->timer_running) {
/* timer 38600bps * 10bit * 16byte */
}
}
-inline static void snd_uart16550_del_timer(snd_uart16550_t *uart)
+static inline void snd_uart16550_del_timer(snd_uart16550_t *uart)
{
if (uart->timer_running) {
del_timer(&uart->buffer_timer);
}
/* This macro is only used in snd_uart16550_io_loop */
-inline static void snd_uart16550_buffer_output(snd_uart16550_t *uart)
+static inline void snd_uart16550_buffer_output(snd_uart16550_t *uart)
{
unsigned short buff_out = uart->buff_out;
if( uart->buff_in_count > 0 ) {
return 0;
};
-inline static int snd_uart16550_buffer_can_write( snd_uart16550_t *uart, int Num )
+static inline int snd_uart16550_buffer_can_write( snd_uart16550_t *uart, int Num )
{
if( uart->buff_in_count + Num < TX_BUFF_SIZE )
return 1;
return 0;
}
-inline static int snd_uart16550_write_buffer(snd_uart16550_t *uart, unsigned char byte)
+static inline int snd_uart16550_write_buffer(snd_uart16550_t *uart, unsigned char byte)
{
unsigned short buff_in = uart->buff_in;
if( uart->buff_in_count < TX_BUFF_SIZE ) {
* This is therefore much slower than need be, but is at least
* working.
*/
-inline static void
+static inline void
write_word(emu8000_t *emu, int *offset, unsigned short data)
{
if (emu8000_reset_addr) {
/* All the burgundy functions: */
/* Waits for busy flag to clear */
-inline static void
+static inline void
awacs_burgundy_busy_wait(void)
{
int count = 50; /* > 2 samples at 44k1 */
udelay(1) ;
}
-inline static void
+static inline void
awacs_burgundy_extend_wait(void)
{
int count = 50 ; /* > 2 samples at 44k1 */
/* read/write operations for dword register */
-inline static void snd_cmipci_write(cmipci_t *cm, unsigned int cmd, unsigned int data)
+static inline void snd_cmipci_write(cmipci_t *cm, unsigned int cmd, unsigned int data)
{
outl(data, cm->iobase + cmd);
}
-inline static unsigned int snd_cmipci_read(cmipci_t *cm, unsigned int cmd)
+
+static inline unsigned int snd_cmipci_read(cmipci_t *cm, unsigned int cmd)
{
return inl(cm->iobase + cmd);
}
/* read/write operations for word register */
-inline static void snd_cmipci_write_w(cmipci_t *cm, unsigned int cmd, unsigned short data)
+static inline void snd_cmipci_write_w(cmipci_t *cm, unsigned int cmd, unsigned short data)
{
outw(data, cm->iobase + cmd);
}
-inline static unsigned short snd_cmipci_read_w(cmipci_t *cm, unsigned int cmd)
+
+static inline unsigned short snd_cmipci_read_w(cmipci_t *cm, unsigned int cmd)
{
return inw(cm->iobase + cmd);
}
/* read/write operations for byte register */
-inline static void snd_cmipci_write_b(cmipci_t *cm, unsigned int cmd, unsigned char data)
+static inline void snd_cmipci_write_b(cmipci_t *cm, unsigned int cmd, unsigned char data)
{
outb(data, cm->iobase + cmd);
}
-inline static unsigned char snd_cmipci_read_b(cmipci_t *cm, unsigned int cmd)
+static inline unsigned char snd_cmipci_read_b(cmipci_t *cm, unsigned int cmd)
{
return inb(cm->iobase + cmd);
}
}
}
-inline static void snd_cs4281_delay_long(void)
+static inline void snd_cs4281_delay_long(void)
{
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(1);
}
/* calculate buffer pointer from offset address */
-inline static void *offset_ptr(emu10k1_t *emu, int page, int offset)
+static inline void *offset_ptr(emu10k1_t *emu, int page, int offset)
{
char *ptr;
snd_assert(page >= 0 && page < emu->max_cache_pages, return NULL);
chip->maestro_map[reg] = data;
}
-inline static void maestro_write(es1968_t *chip, u16 reg, u16 data)
+static inline void maestro_write(es1968_t *chip, u16 reg, u16 data)
{
unsigned long flags;
spin_lock_irqsave(&chip->reg_lock, flags);
return chip->maestro_map[reg];
}
-inline static u16 maestro_read(es1968_t *chip, u16 reg)
+static inline u16 maestro_read(es1968_t *chip, u16 reg)
{
unsigned long flags;
u16 result;
apu_data_set(chip, data);
}
-inline static void apu_set_register(es1968_t *chip, u16 channel, u8 reg, u16 data)
+static inline void apu_set_register(es1968_t *chip, u16 channel, u8 reg, u16 data)
{
unsigned long flags;
spin_lock_irqsave(&chip->reg_lock, flags);
return __maestro_read(chip, IDR0_DATA_PORT);
}
-inline static u16 apu_get_register(es1968_t *chip, u16 channel, u8 reg)
+static inline u16 apu_get_register(es1968_t *chip, u16 channel, u8 reg)
{
unsigned long flags;
u16 v;
}
/* get current pointer */
-inline static unsigned int
+static inline unsigned int
snd_es1968_get_dma_ptr(es1968_t *chip, esschan_t *es)
{
unsigned int offset;
}
/* spin lock held */
-inline static void snd_es1968_trigger_apu(es1968_t *esm, int apu, int mode)
+static inline void snd_es1968_trigger_apu(es1968_t *esm, int apu, int mode)
{
/* set the APU mode */
__apu_set_register(esm, apu, 0,
schedule_timeout(((msec) * HZ) / 1000);\
} while (0)
-inline static void snd_m3_outw(m3_t *chip, u16 value, unsigned long reg)
+static inline void snd_m3_outw(m3_t *chip, u16 value, unsigned long reg)
{
outw(value, chip->iobase + reg);
}
-inline static u16 snd_m3_inw(m3_t *chip, unsigned long reg)
+static inline u16 snd_m3_inw(m3_t *chip, unsigned long reg)
{
return inw(chip->iobase + reg);
}
-inline static void snd_m3_outb(m3_t *chip, u8 value, unsigned long reg)
+static inline void snd_m3_outb(m3_t *chip, u8 value, unsigned long reg)
{
outb(value, chip->iobase + reg);
}
-inline static u8 snd_m3_inb(m3_t *chip, unsigned long reg)
+static inline u8 snd_m3_inb(m3_t *chip, unsigned long reg)
{
return inb(chip->iobase + reg);
}
* lowlvel stuffs
*/
-inline static u8
+static inline u8
snd_nm256_readb(nm256_t *chip, int offset)
{
return readb(chip->cport + offset);
}
-inline static u16
+static inline u16
snd_nm256_readw(nm256_t *chip, int offset)
{
return readw(chip->cport + offset);
}
-inline static u32
+static inline u32
snd_nm256_readl(nm256_t *chip, int offset)
{
return readl(chip->cport + offset);
}
-inline static void
+static inline void
snd_nm256_writeb(nm256_t *chip, int offset, u8 val)
{
writeb(val, chip->cport + offset);
}
-inline static void
+static inline void
snd_nm256_writew(nm256_t *chip, int offset, u16 val)
{
writew(val, chip->cport + offset);
}
-inline static void
+static inline void
snd_nm256_writel(nm256_t *chip, int offset, u32 val)
{
writel(val, chip->cport + offset);
}
-inline static void
+static inline void
snd_nm256_write_buffer(nm256_t *chip, void *src, int offset, int size)
{
offset -= chip->buffer_start;
}
-inline static void
+static inline void
snd_nm256_intr_check(nm256_t *chip)
{
if (chip->badintrcount++ > 1000) {
/*
* delay for 1 tick
*/
-inline static void do_delay(trident_t *chip)
+static inline void do_delay(trident_t *chip)
{
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(1);
#endif /* PAGE_SIZE */
/* calculate buffer pointer from offset address */
-inline static void *offset_ptr(trident_t *trident, int offset)
+static inline void *offset_ptr(trident_t *trident, int offset)
{
char *ptr;
ptr = page_to_ptr(trident, get_aligned_page(offset));
[VX_GPIOC] = 0, /* on the PLX */
};
-inline static unsigned long vx2_reg_addr(vx_core_t *_chip, int reg)
+static inline unsigned long vx2_reg_addr(vx_core_t *_chip, int reg)
{
struct snd_vx222 *chip = (struct snd_vx222 *)_chip;
return chip->port[vx2_reg_index[reg]] + vx2_reg_offset[reg];
/*
* vx_release_pseudo_dma - disable the pseudo-DMA mode
*/
-inline static void vx2_release_pseudo_dma(vx_core_t *chip)
+static inline void vx2_release_pseudo_dma(vx_core_t *chip)
{
/* HREQ pin disabled. */
vx_outl(chip, ICR, 0);
};
-inline static unsigned long vxp_reg_addr(vx_core_t *_chip, int reg)
+static inline unsigned long vxp_reg_addr(vx_core_t *_chip, int reg)
{
struct snd_vxpocket *chip = (struct snd_vxpocket *)_chip;
return chip->port + vxp_reg_offset[reg];
/* Waits for busy flag to clear */
-inline static void
+static inline void
snd_pmac_burgundy_busy_wait(pmac_t *chip)
{
int timeout = 50;
printk(KERN_DEBUG "burgundy_busy_wait: timeout\n");
}
-inline static void
+static inline void
snd_pmac_burgundy_extend_wait(pmac_t *chip)
{
int timeout;
/*
* wait while run status is on
*/
-inline static void
+static inline void
snd_pmac_wait_ack(pmac_stream_t *rec)
{
int timeout = 50000;
/*
* stop the DMA transfer
*/
-inline static void snd_pmac_dma_stop(pmac_stream_t *rec)
+static inline void snd_pmac_dma_stop(pmac_stream_t *rec)
{
out_le32(&rec->dma->control, (RUN|WAKE|FLUSH|PAUSE) << 16);
snd_pmac_wait_ack(rec);
/*
* set the command pointer address
*/
-inline static void snd_pmac_dma_set_command(pmac_stream_t *rec, pmac_dbdma_t *cmd)
+static inline void snd_pmac_dma_set_command(pmac_stream_t *rec, pmac_dbdma_t *cmd)
{
out_le32(&rec->dma->cmdptr, cmd->addr);
}
/*
* start the DMA
*/
-inline static void snd_pmac_dma_run(pmac_stream_t *rec, int status)
+static inline void snd_pmac_dma_run(pmac_stream_t *rec, int status)
{
out_le32(&rec->dma->control, status | (status << 16));
}
* convert a sampling rate into our full speed format (fs/1000 in Q16.16)
* this will overflow at approx 524 kHz
*/
-inline static unsigned get_usb_full_speed_rate(unsigned int rate)
+static inline unsigned get_usb_full_speed_rate(unsigned int rate)
{
return ((rate << 13) + 62) / 125;
}
* convert a sampling rate into USB high speed format (fs/8000 in Q16.16)
* this will overflow at approx 4 MHz
*/
-inline static unsigned get_usb_high_speed_rate(unsigned int rate)
+static inline unsigned get_usb_high_speed_rate(unsigned int rate)
{
return ((rate << 10) + 62) / 125;
}
/* convert our full speed USB rate into sampling rate in Hz */
-inline static unsigned get_full_speed_hz(unsigned int usb_rate)
+static inline unsigned get_full_speed_hz(unsigned int usb_rate)
{
return (usb_rate * 125 + (1 << 12)) >> 13;
}
/* convert our high speed USB rate into sampling rate in Hz */
-inline static unsigned get_high_speed_hz(unsigned int usb_rate)
+static inline unsigned get_high_speed_hz(unsigned int usb_rate)
{
return (usb_rate * 125 + (1 << 9)) >> 10;
}
}
/* channel = 0: master, 1 = first channel */
-inline static int get_cur_mix_value(usb_mixer_elem_info_t *cval, int channel, int *value)
+static inline int get_cur_mix_value(usb_mixer_elem_info_t *cval, int channel, int *value)
{
return get_ctl_value(cval, GET_CUR, (cval->control << 8) | channel, value);
}
return set_ctl_value(cval, SET_CUR, validx, value);
}
-inline static int set_cur_mix_value(usb_mixer_elem_info_t *cval, int channel, int value)
+static inline int set_cur_mix_value(usb_mixer_elem_info_t *cval, int channel, int value)
{
return set_ctl_value(cval, SET_CUR, (cval->control << 8) | channel, value);
}