{
int i;
- for (i=0; i < ARRAY_SIZE(dev_self) && io[i] < 2000; i++) {
+ for (i = 0; i < ARRAY_SIZE(dev_self) && io[i] < 2000; i++) {
if (w83977af_open(i, io[i], irq[i], dma[i]) == 0)
return 0;
}
{
int i;
- for (i=0; i < ARRAY_SIZE(dev_self); i++) {
+ for (i = 0; i < ARRAY_SIZE(dev_self); i++) {
if (dev_self[i])
w83977af_close(dev_self[i]);
}
/* Lock the port that we need */
if (!request_region(iobase, CHIP_IO_EXTENT, driver_name)) {
pr_debug("%s(), can't get iobase of 0x%03x\n",
- __func__ , iobase);
+ __func__, iobase);
return -ENODEV;
}
*/
dev = alloc_irdadev(sizeof(struct w83977af_ir));
if (dev == NULL) {
- printk( KERN_ERR "IrDA: Can't allocate memory for "
+ printk(KERN_ERR "IrDA: Can't allocate memory for "
"IrDA control block!\n");
err = -ENOMEM;
goto err_out;
/* The only value we must override it the baudrate */
/* FIXME: The HP HDLS-1100 does not support 1152000! */
- self->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|
- IR_115200|IR_576000|IR_1152000|(IR_4000000 << 8);
+ self->qos.baud_rate.bits = IR_9600 | IR_19200 | IR_38400 | IR_57600 |
+ IR_115200 | IR_576000 | IR_1152000 | (IR_4000000 << 8);
/* The HP HDLS-1100 needs 1 ms according to the specs */
self->qos.min_turn_time.bits = qos_mtt_bits;
/* Release the PORT that this driver is using */
pr_debug("%s(), Releasing Region %03x\n",
- __func__ , self->io.fir_base);
+ __func__, self->io.fir_base);
release_region(self->io.fir_base, self->io.fir_ext);
if (self->tx_buff.head)
int version;
int i;
- for (i=0; i < 2; i++) {
+ for (i = 0; i < 2; i++) {
#ifdef CONFIG_USE_W977_PNP
/* Enter PnP configuration mode */
w977_efm_enter(efbase[i]);
w977_write_reg(0x70, irq, efbase[i]);
#ifdef CONFIG_ARCH_NETWINDER
/* Netwinder uses 1 higher than Linux */
- w977_write_reg(0x74, dma+1, efbase[i]);
+ w977_write_reg(0x74, dma + 1, efbase[i]);
#else
w977_write_reg(0x74, dma, efbase[i]);
#endif /* CONFIG_ARCH_NETWINDER */
#endif /* CONFIG_USE_W977_PNP */
/* Disable Advanced mode */
switch_bank(iobase, SET2);
- outb(iobase+2, 0x00);
+ outb(iobase + 2, 0x00);
/* Turn on UART (global) interrupts */
switch_bank(iobase, SET0);
- outb(HCR_EN_IRQ, iobase+HCR);
+ outb(HCR_EN_IRQ, iobase + HCR);
/* Switch to advanced mode */
switch_bank(iobase, SET2);
- outb(inb(iobase+ADCR1) | ADCR1_ADV_SL, iobase+ADCR1);
+ outb(inb(iobase + ADCR1) | ADCR1_ADV_SL, iobase + ADCR1);
/* Set default IR-mode */
switch_bank(iobase, SET0);
- outb(HCR_SIR, iobase+HCR);
+ outb(HCR_SIR, iobase + HCR);
/* Read the Advanced IR ID */
switch_bank(iobase, SET3);
- version = inb(iobase+AUID);
+ version = inb(iobase + AUID);
/* Should be 0x1? */
if (0x10 == (version & 0xf0)) {
/* Set FIFO size to 32 */
switch_bank(iobase, SET2);
- outb(ADCR2_RXFS32|ADCR2_TXFS32, iobase+ADCR2);
+ outb(ADCR2_RXFS32 | ADCR2_TXFS32, iobase + ADCR2);
/* Set FIFO threshold to TX17, RX16 */
switch_bank(iobase, SET0);
- outb(UFR_RXTL|UFR_TXTL|UFR_TXF_RST|UFR_RXF_RST|
- UFR_EN_FIFO,iobase+UFR);
+ outb(UFR_RXTL | UFR_TXTL | UFR_TXF_RST | UFR_RXF_RST |
+ UFR_EN_FIFO, iobase + UFR);
/* Receiver frame length */
switch_bank(iobase, SET4);
- outb(2048 & 0xff, iobase+6);
- outb((2048 >> 8) & 0x1f, iobase+7);
+ outb(2048 & 0xff, iobase + 6);
+ outb((2048 >> 8) & 0x1f, iobase + 7);
/*
* Init HP HSDL-1100 transceiver.
* CIRRX pin 40 connected to pin 37
*/
switch_bank(iobase, SET7);
- outb(0x40, iobase+7);
+ outb(0x40, iobase + 7);
net_info_ratelimited("W83977AF (IR) driver loaded. Version: 0x%02x\n",
version);
self->io.speed = speed;
/* Save current bank */
- set = inb(iobase+SSR);
+ set = inb(iobase + SSR);
/* Disable interrupts */
switch_bank(iobase, SET0);
- outb(0, iobase+ICR);
+ outb(0, iobase + ICR);
/* Select Set 2 */
switch_bank(iobase, SET2);
- outb(0x00, iobase+ABHL);
+ outb(0x00, iobase + ABHL);
switch (speed) {
- case 9600: outb(0x0c, iobase+ABLL); break;
- case 19200: outb(0x06, iobase+ABLL); break;
- case 38400: outb(0x03, iobase+ABLL); break;
- case 57600: outb(0x02, iobase+ABLL); break;
- case 115200: outb(0x01, iobase+ABLL); break;
+ case 9600: outb(0x0c, iobase + ABLL); break;
+ case 19200: outb(0x06, iobase + ABLL); break;
+ case 38400: outb(0x03, iobase + ABLL); break;
+ case 57600: outb(0x02, iobase + ABLL); break;
+ case 115200: outb(0x01, iobase + ABLL); break;
case 576000:
ir_mode = HCR_MIR_576;
pr_debug("%s(), handling baud of 576000\n", __func__);
break;
default:
ir_mode = HCR_FIR;
- pr_debug("%s(), unknown baud rate of %d\n", __func__ , speed);
+ pr_debug("%s(), unknown baud rate of %d\n", __func__, speed);
break;
}
/* Set speed mode */
switch_bank(iobase, SET0);
- outb(ir_mode, iobase+HCR);
+ outb(ir_mode, iobase + HCR);
/* set FIFO size to 32 */
switch_bank(iobase, SET2);
- outb(ADCR2_RXFS32|ADCR2_TXFS32, iobase+ADCR2);
+ outb(ADCR2_RXFS32 | ADCR2_TXFS32, iobase + ADCR2);
/* set FIFO threshold to TX17, RX16 */
switch_bank(iobase, SET0);
- outb(0x00, iobase+UFR); /* Reset */
- outb(UFR_EN_FIFO, iobase+UFR); /* First we must enable FIFO */
- outb(0xa7, iobase+UFR);
+ outb(0x00, iobase + UFR); /* Reset */
+ outb(UFR_EN_FIFO, iobase + UFR); /* First we must enable FIFO */
+ outb(0xa7, iobase + UFR);
netif_wake_queue(self->netdev);
/* Enable some interrupts so we can receive frames */
switch_bank(iobase, SET0);
if (speed > PIO_MAX_SPEED) {
- outb(ICR_EFSFI, iobase+ICR);
+ outb(ICR_EFSFI, iobase + ICR);
w83977af_dma_receive(self);
} else
- outb(ICR_ERBRI, iobase+ICR);
+ outb(ICR_ERBRI, iobase + ICR);
/* Restore SSR */
- outb(set, iobase+SSR);
+ outb(set, iobase + SSR);
}
/*
iobase = self->io.fir_base;
- pr_debug("%s(%ld), skb->len=%d\n", __func__ , jiffies,
+ pr_debug("%s(%ld), skb->len=%d\n", __func__, jiffies,
(int)skb->len);
/* Lock transmit buffer */
}
/* Save current set */
- set = inb(iobase+SSR);
+ set = inb(iobase + SSR);
/* Decide if we should use PIO or DMA transfer */
if (self->io.speed > PIO_MAX_SPEED) {
self->tx_buff.len = skb->len;
mtt = irda_get_mtt(skb);
- pr_debug("%s(%ld), mtt=%d\n", __func__ , jiffies, mtt);
+ pr_debug("%s(%ld), mtt=%d\n", __func__, jiffies, mtt);
if (mtt > 1000)
- mdelay(mtt/1000);
+ mdelay(mtt / 1000);
else if (mtt)
udelay(mtt);
/* Enable DMA interrupt */
switch_bank(iobase, SET0);
- outb(ICR_EDMAI, iobase+ICR);
+ outb(ICR_EDMAI, iobase + ICR);
w83977af_dma_write(self, iobase);
} else {
self->tx_buff.data = self->tx_buff.head;
/* Add interrupt on tx low level (will fire immediately) */
switch_bank(iobase, SET0);
- outb(ICR_ETXTHI, iobase+ICR);
+ outb(ICR_ETXTHI, iobase + ICR);
}
dev_kfree_skb(skb);
/* Restore set register */
- outb(set, iobase+SSR);
+ outb(set, iobase + SSR);
return NETDEV_TX_OK;
}
static void w83977af_dma_write(struct w83977af_ir *self, int iobase)
{
__u8 set;
- pr_debug("%s(), len=%d\n", __func__ , self->tx_buff.len);
+ pr_debug("%s(), len=%d\n", __func__, self->tx_buff.len);
/* Save current set */
- set = inb(iobase+SSR);
+ set = inb(iobase + SSR);
/* Disable DMA */
switch_bank(iobase, SET0);
- outb(inb(iobase+HCR) & ~HCR_EN_DMA, iobase+HCR);
+ outb(inb(iobase + HCR) & ~HCR_EN_DMA, iobase + HCR);
/* Choose transmit DMA channel */
switch_bank(iobase, SET2);
- outb(ADCR1_D_CHSW|/*ADCR1_DMA_F|*/ADCR1_ADV_SL, iobase+ADCR1);
+ outb(ADCR1_D_CHSW | /*ADCR1_DMA_F|*/ADCR1_ADV_SL, iobase + ADCR1);
irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len,
DMA_MODE_WRITE);
self->io.direction = IO_XMIT;
/* Enable DMA */
switch_bank(iobase, SET0);
- outb(inb(iobase+HCR) | HCR_EN_DMA | HCR_TX_WT, iobase+HCR);
+ outb(inb(iobase + HCR) | HCR_EN_DMA | HCR_TX_WT, iobase + HCR);
/* Restore set register */
- outb(set, iobase+SSR);
+ outb(set, iobase + SSR);
}
/*
__u8 set;
/* Save current bank */
- set = inb(iobase+SSR);
+ set = inb(iobase + SSR);
switch_bank(iobase, SET0);
- if (!(inb_p(iobase+USR) & USR_TSRE)) {
+ if (!(inb_p(iobase + USR) & USR_TSRE)) {
pr_debug("%s(), warning, FIFO not empty yet!\n", __func__);
fifo_size -= 17;
pr_debug("%s(), %d bytes left in tx fifo\n",
- __func__ , fifo_size);
+ __func__, fifo_size);
}
/* Fill FIFO with current frame */
while ((fifo_size-- > 0) && (actual < len)) {
/* Transmit next byte */
- outb(buf[actual++], iobase+TBR);
+ outb(buf[actual++], iobase + TBR);
}
pr_debug("%s(), fifo_size %d ; %d sent of %d\n",
- __func__ , fifo_size, actual, len);
+ __func__, fifo_size, actual, len);
/* Restore bank */
- outb(set, iobase+SSR);
+ outb(set, iobase + SSR);
return actual;
}
int iobase;
__u8 set;
- pr_debug("%s(%ld)\n", __func__ , jiffies);
+ pr_debug("%s(%ld)\n", __func__, jiffies);
IRDA_ASSERT(self != NULL, return;);
iobase = self->io.fir_base;
/* Save current set */
- set = inb(iobase+SSR);
+ set = inb(iobase + SSR);
/* Disable DMA */
switch_bank(iobase, SET0);
- outb(inb(iobase+HCR) & ~HCR_EN_DMA, iobase+HCR);
+ outb(inb(iobase + HCR) & ~HCR_EN_DMA, iobase + HCR);
/* Check for underrun! */
- if (inb(iobase+AUDR) & AUDR_UNDR) {
+ if (inb(iobase + AUDR) & AUDR_UNDR) {
pr_debug("%s(), Transmit underrun!\n", __func__);
self->netdev->stats.tx_errors++;
self->netdev->stats.tx_fifo_errors++;
/* Clear bit, by writing 1 to it */
- outb(AUDR_UNDR, iobase+AUDR);
+ outb(AUDR_UNDR, iobase + AUDR);
} else
self->netdev->stats.tx_packets++;
netif_wake_queue(self->netdev);
/* Restore set */
- outb(set, iobase+SSR);
+ outb(set, iobase + SSR);
}
/*
pr_debug("%s\n", __func__);
- iobase= self->io.fir_base;
+ iobase = self->io.fir_base;
/* Save current set */
- set = inb(iobase+SSR);
+ set = inb(iobase + SSR);
/* Disable DMA */
switch_bank(iobase, SET0);
- outb(inb(iobase+HCR) & ~HCR_EN_DMA, iobase+HCR);
+ outb(inb(iobase + HCR) & ~HCR_EN_DMA, iobase + HCR);
/* Choose DMA Rx, DMA Fairness, and Advanced mode */
switch_bank(iobase, SET2);
- outb((inb(iobase+ADCR1) & ~ADCR1_D_CHSW)/*|ADCR1_DMA_F*/|ADCR1_ADV_SL,
- iobase+ADCR1);
+ outb((inb(iobase + ADCR1) & ~ADCR1_D_CHSW)/*|ADCR1_DMA_F*/ | ADCR1_ADV_SL,
+ iobase + ADCR1);
self->io.direction = IO_RECV;
self->rx_buff.data = self->rx_buff.head;
* be finished transmitting yet
*/
switch_bank(iobase, SET0);
- outb(UFR_RXTL|UFR_TXTL|UFR_RXF_RST|UFR_EN_FIFO, iobase+UFR);
+ outb(UFR_RXTL | UFR_TXTL | UFR_RXF_RST | UFR_EN_FIFO, iobase + UFR);
self->st_fifo.len = self->st_fifo.tail = self->st_fifo.head = 0;
/* Enable DMA */
switch_bank(iobase, SET0);
#ifdef CONFIG_ARCH_NETWINDER
- hcr = inb(iobase+HCR);
- outb(hcr | HCR_EN_DMA, iobase+HCR);
+ hcr = inb(iobase + HCR);
+ outb(hcr | HCR_EN_DMA, iobase + HCR);
enable_dma(self->io.dma);
spin_unlock_irqrestore(&self->lock, flags);
#else
- outb(inb(iobase+HCR) | HCR_EN_DMA, iobase+HCR);
+ outb(inb(iobase + HCR) | HCR_EN_DMA, iobase + HCR);
#endif
/* Restore set */
- outb(set, iobase+SSR);
+ outb(set, iobase + SSR);
return 0;
}
iobase = self->io.fir_base;
/* Save current set */
- set = inb(iobase+SSR);
+ set = inb(iobase + SSR);
iobase = self->io.fir_base;
/* Read status FIFO */
switch_bank(iobase, SET5);
- while ((status = inb(iobase+FS_FO)) & FS_FO_FSFDR) {
+ while ((status = inb(iobase + FS_FO)) & FS_FO_FSFDR) {
st_fifo->entries[st_fifo->tail].status = status;
- st_fifo->entries[st_fifo->tail].len = inb(iobase+RFLFL);
- st_fifo->entries[st_fifo->tail].len |= inb(iobase+RFLFH) << 8;
+ st_fifo->entries[st_fifo->tail].len = inb(iobase + RFLFL);
+ st_fifo->entries[st_fifo->tail].len |= inb(iobase + RFLFH) << 8;
st_fifo->tail++;
st_fifo->len++;
} else {
/* Check if we have transferred all data to memory */
switch_bank(iobase, SET0);
- if (inb(iobase+USR) & USR_RDR) {
+ if (inb(iobase + USR) & USR_RDR) {
udelay(80); /* Should be enough!? */
}
- skb = dev_alloc_skb(len+1);
+ skb = dev_alloc_skb(len + 1);
if (skb == NULL) {
printk(KERN_INFO
"%s(), memory squeeze, dropping frame.\n", __func__);
/* Restore set register */
- outb(set, iobase+SSR);
+ outb(set, iobase + SSR);
return FALSE;
}
/* Copy frame without CRC */
if (self->io.speed < 4000000) {
- skb_put(skb, len-2);
+ skb_put(skb, len - 2);
skb_copy_to_linear_data(skb,
self->rx_buff.data,
len - 2);
} else {
- skb_put(skb, len-4);
+ skb_put(skb, len - 4);
skb_copy_to_linear_data(skb,
self->rx_buff.data,
len - 4);
}
}
/* Restore set register */
- outb(set, iobase+SSR);
+ outb(set, iobase + SSR);
return TRUE;
}
/* Receive all characters in Rx FIFO */
do {
- byte = inb(iobase+RBR);
+ byte = inb(iobase + RBR);
async_unwrap_char(self->netdev, &self->netdev->stats, &self->rx_buff,
byte);
- } while (inb(iobase+USR) & USR_RDR); /* Data available */
+ } while (inb(iobase + USR) & USR_RDR); /* Data available */
}
/*
__u8 set;
int iobase;
- pr_debug("%s(), isr=%#x\n", __func__ , isr);
+ pr_debug("%s(), isr=%#x\n", __func__, isr);
iobase = self->io.fir_base;
/* Transmit FIFO low on data */
if (self->tx_buff.len > 0) {
new_icr |= ICR_ETXTHI;
} else {
- set = inb(iobase+SSR);
+ set = inb(iobase + SSR);
switch_bank(iobase, SET0);
- outb(AUDR_SFEND, iobase+AUDR);
- outb(set, iobase+SSR);
+ outb(AUDR_SFEND, iobase + AUDR);
+ outb(set, iobase + SSR);
self->netdev->stats.tx_packets++;
int iobase;
iobase = self->io.fir_base;
- set = inb(iobase+SSR);
+ set = inb(iobase + SSR);
/* End of frame detected in FIFO */
- if (isr & (ISR_FEND_I|ISR_FSF_I)) {
+ if (isr & (ISR_FEND_I | ISR_FSF_I)) {
if (w83977af_dma_receive_complete(self)) {
/* Wait for next status FIFO interrupt */
/* Set timer value, resolution 1 ms */
switch_bank(iobase, SET4);
- outb(0x01, iobase+TMRL); /* 1 ms */
- outb(0x00, iobase+TMRH);
+ outb(0x01, iobase + TMRL); /* 1 ms */
+ outb(0x00, iobase + TMRH);
/* Start timer */
- outb(IR_MSL_EN_TMR, iobase+IR_MSL);
+ outb(IR_MSL_EN_TMR, iobase + IR_MSL);
new_icr |= ICR_ETMRI;
}
if (isr & ISR_TMR_I) {
/* Disable timer */
switch_bank(iobase, SET4);
- outb(0, iobase+IR_MSL);
+ outb(0, iobase + IR_MSL);
/* Clear timer event */
/* switch_bank(iobase, SET0); */
}
/* Restore set */
- outb(set, iobase+SSR);
+ outb(set, iobase + SSR);
return new_icr;
}
iobase = self->io.fir_base;
/* Save current bank */
- set = inb(iobase+SSR);
+ set = inb(iobase + SSR);
switch_bank(iobase, SET0);
- icr = inb(iobase+ICR);
- isr = inb(iobase+ISR) & icr; /* Mask out the interesting ones */
+ icr = inb(iobase + ICR);
+ isr = inb(iobase + ISR) & icr; /* Mask out the interesting ones */
- outb(0, iobase+ICR); /* Disable interrupts */
+ outb(0, iobase + ICR); /* Disable interrupts */
if (isr) {
/* Dispatch interrupt handler for the current speed */
- if (self->io.speed > PIO_MAX_SPEED )
+ if (self->io.speed > PIO_MAX_SPEED)
icr = w83977af_fir_interrupt(self, isr);
else
icr = w83977af_sir_interrupt(self, isr);
}
- outb(icr, iobase+ICR); /* Restore (new) interrupts */
- outb(set, iobase+SSR); /* Restore bank register */
+ outb(icr, iobase + ICR); /* Restore (new) interrupts */
+ outb(set, iobase + SSR); /* Restore bank register */
return IRQ_RETVAL(isr);
}
iobase = self->io.fir_base;
/* Check if rx FIFO is not empty */
- set = inb(iobase+SSR);
+ set = inb(iobase + SSR);
switch_bank(iobase, SET2);
- if ((inb(iobase+RXFDTH) & 0x3f) != 0) {
+ if ((inb(iobase + RXFDTH) & 0x3f) != 0) {
/* We are receiving something */
status = TRUE;
}
- outb(set, iobase+SSR);
+ outb(set, iobase + SSR);
} else
status = (self->rx_buff.state != OUTSIDE_FRAME);
iobase = self->io.fir_base;
if (request_irq(self->io.irq, w83977af_interrupt, 0, dev->name,
- (void *) dev)) {
+ (void *)dev)) {
return -EAGAIN;
}
/*
}
/* Save current set */
- set = inb(iobase+SSR);
+ set = inb(iobase + SSR);
/* Enable some interrupts so we can receive frames again */
switch_bank(iobase, SET0);
if (self->io.speed > 115200) {
- outb(ICR_EFSFI, iobase+ICR);
+ outb(ICR_EFSFI, iobase + ICR);
w83977af_dma_receive(self);
} else
- outb(ICR_ERBRI, iobase+ICR);
+ outb(ICR_ERBRI, iobase + ICR);
/* Restore bank register */
- outb(set, iobase+SSR);
+ outb(set, iobase + SSR);
/* Ready to play! */
netif_start_queue(dev);
disable_dma(self->io.dma);
/* Save current set */
- set = inb(iobase+SSR);
+ set = inb(iobase + SSR);
/* Disable interrupts */
switch_bank(iobase, SET0);
- outb(0, iobase+ICR);
+ outb(0, iobase + ICR);
free_irq(self->io.irq, dev);
free_dma(self->io.dma);
/* Restore bank register */
- outb(set, iobase+SSR);
+ outb(set, iobase + SSR);
return 0;
}
*/
static int w83977af_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- struct if_irda_req *irq = (struct if_irda_req *) rq;
+ struct if_irda_req *irq = (struct if_irda_req *)rq;
struct w83977af_ir *self;
unsigned long flags;
int ret = 0;
IRDA_ASSERT(self != NULL, return -1;);
- pr_debug("%s(), %s, (cmd=0x%X)\n", __func__ , dev->name, cmd);
+ pr_debug("%s(), %s, (cmd=0x%X)\n", __func__, dev->name, cmd);
spin_lock_irqsave(&self->lock, flags);