int init_loops;
struct timer_list setup_timer;
+ /* Flag if hw is ready to send next packet */
int tx_ready;
- struct list_head tx_queue[NL_NUM_OF_PRIORITIES];
- /* True if any packets are queued for transmission */
+ /* Count of pending packets to be sent */
int tx_queued;
+ struct list_head tx_queue[NL_NUM_OF_PRIORITIES];
int rx_bytes_queued;
struct list_head rx_queue;
spin_lock_irqsave(&hw->spinlock, flags);
+ hw->tx_ready = 0;
+
if (hw->hw_version == HW_VERSION_1) {
outw((unsigned short) length, hw->base_port + IODWR);
spin_lock_irqsave(&hw->spinlock, flags);
list_add(&packet->queue, &hw->tx_queue[0]);
+ hw->tx_queued++;
spin_unlock_irqrestore(&hw->spinlock, flags);
} else {
if (packet->packet_callback)
unsigned long flags;
spin_lock_irqsave(&hw->spinlock, flags);
- if (hw->tx_queued && hw->tx_ready != 0) {
+ if (hw->tx_queued && hw->tx_ready) {
int priority;
struct ipw_tx_packet *packet = NULL;
- hw->tx_ready--;
-
/* Pick a packet */
for (priority = 0; priority < priority_limit; priority++) {
if (!list_empty(&hw->tx_queue[priority])) {
struct ipw_tx_packet,
queue);
+ hw->tx_queued--;
list_del(&packet->queue);
break;
spin_unlock_irqrestore(&hw->spinlock, flags);
return 0;
}
+
spin_unlock_irqrestore(&hw->spinlock, flags);
/* Send */
if (irqn & IR_TXINTR) {
ack |= IR_TXINTR;
spin_lock_irqsave(&hw->spinlock, flags);
- hw->tx_ready++;
+ hw->tx_ready = 1;
spin_unlock_irqrestore(&hw->spinlock, flags);
}
/* Received data */
if (memrxdone & MEMRX_RX_DONE) {
writew(0, &hw->memory_info_regs->memreg_rx_done);
spin_lock_irqsave(&hw->spinlock, flags);
- hw->tx_ready++;
+ hw->tx_ready = 1;
spin_unlock_irqrestore(&hw->spinlock, flags);
tx = 1;
}
spin_lock_irqsave(&hw->spinlock, flags);
list_add_tail(&packet->queue, &hw->tx_queue[priority]);
- hw->tx_queued = 1;
+ hw->tx_queued++;
spin_unlock_irqrestore(&hw->spinlock, flags);
flush_packets_to_hw(hw);