/* Napi polling weight */
#define MVNETA_RX_POLL_WEIGHT 64
-/*
- * The two bytes Marvell header. Either contains a special value used
+/* The two bytes Marvell header. Either contains a special value used
* by Marvell switches when a specific hardware mode is enabled (not
* supported by this driver) or is filled automatically by zeroes on
* the RX side. Those two bytes being at the front of the Ethernet
unsigned int speed;
};
-/*
- * The mvneta_tx_desc and mvneta_rx_desc structures describe the
+/* The mvneta_tx_desc and mvneta_rx_desc structures describe the
* layout of the transmit and reception DMA descriptors, and their
* layout is therefore defined by the hardware design
*/
int size;
/* Number of currently used TX DMA descriptor in the
- * descriptor ring */
+ * descriptor ring
+ */
int count;
/* Array of transmitted skb */
/* Rx descriptors helper methods */
-/*
- * Checks whether the given RX descriptor is both the first and the
+/* Checks whether the given RX descriptor is both the first and the
* last descriptor for the RX packet. Each RX packet is currently
* received through a single RX descriptor, so not having each RX
* descriptor with its first and last bits set is an error
int ndescs)
{
/* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can
- * be added at once */
+ * be added at once
+ */
while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) {
mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
(MVNETA_RXQ_ADD_NON_OCCUPIED_MAX <<
return val & MVNETA_RXQ_OCCUPIED_ALL_MASK;
}
-/*
- * Update num of rx desc called upon return from rx path or
+/* Update num of rx desc called upon return from rx path or
* from mvneta_rxq_drop_pkts().
*/
static void mvneta_rxq_desc_num_update(struct mvneta_port *pp,
u32 val;
/* Only 255 descriptors can be added at once ; Assume caller
- process TX desriptors in quanta less than 256 */
+ * process TX desriptors in quanta less than 256
+ */
val = pend_desc;
mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
}
}
/* Release the last allocated TX descriptor. Useful to handle DMA
- * mapping failures in the TX path. */
+ * mapping failures in the TX path.
+ */
static void mvneta_txq_desc_put(struct mvneta_tx_queue *txq)
{
if (txq->next_desc_to_proc == 0)
} while (val & 0xff);
/* Stop Tx port activity. Check port Tx activity. Issue stop
- command for active channels only */
+ * command for active channels only
+ */
val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK;
if (val != 0)
mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20);
/* Set CPU queue access map - all CPUs have access to all RX
- queues and to all TX queues */
+ * queues and to all TX queues
+ */
for (cpu = 0; cpu < CONFIG_NR_CPUS; cpu++)
mvreg_write(pp, MVNETA_CPU_MAP(cpu),
(MVNETA_CPU_RXQ_ACCESS_ALL_MASK |
mvneta_set_ucast_addr(pp, addr[5], queue);
}
-/*
- * Set the number of packets that will be received before
- * RX interrupt will be generated by HW.
+/* Set the number of packets that will be received before RX interrupt
+ * will be generated by HW.
*/
static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp,
struct mvneta_rx_queue *rxq, u32 value)
rxq->pkts_coal = value;
}
-/*
- * Set the time delay in usec before
- * RX interrupt will be generated by HW.
+/* Set the time delay in usec before RX interrupt will be generated by
+ * HW.
*/
static void mvneta_rx_time_coal_set(struct mvneta_port *pp,
struct mvneta_rx_queue *rxq, u32 value)
return sent_desc;
}
-/*
- * Get number of sent descriptors and decrement counter.
+/* Get number of sent descriptors and decrement counter.
* The number of sent descriptors is returned.
*/
static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp,
u32 command;
/* Fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
- G_L4_chk, L4_type; required only for checksum
- calculation */
+ * G_L4_chk, L4_type; required only for checksum
+ * calculation
+ */
command = l3_offs << MVNETA_TX_L3_OFF_SHIFT;
command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT;
return MVNETA_TX_L4_CSUM_NOT;
}
-/*
- * Returns rx queue pointer (find last set bit) according to causeRxTx
+/* Returns rx queue pointer (find last set bit) according to causeRxTx
* value
*/
static struct mvneta_rx_queue *mvneta_rx_policy(struct mvneta_port *pp,
error:
/* Release all descriptors that were used to map fragments of
- * this packet, as well as the corresponding DMA mappings */
+ * this packet, as well as the corresponding DMA mappings
+ */
for (i = i - 1; i >= 0; i--) {
tx_desc = txq->descs + i;
dma_unmap_single(pp->dev->dev.parent,
mvneta_txq_done(pp, txq);
/* If after calling mvneta_txq_done, count equals
- frags, we need to set the timer */
+ * frags, we need to set the timer
+ */
if (txq->count == frags && frags > 0)
mvneta_add_tx_done_timer(pp);
return tx_done;
}
-/*
- * Compute crc8 of the specified address, using a unique algorithm ,
+/* Compute crc8 of the specified address, using a unique algorithm ,
* according to hw spec, different than generic crc8 algorithm
*/
static int mvneta_addr_crc(unsigned char *addr)
cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE) &
MVNETA_RX_INTR_MASK(rxq_number);
- /*
- * For the case where the last mvneta_poll did not process all
+ /* For the case where the last mvneta_poll did not process all
* RX packets
*/
cause_rx_tx |= pp->cause_rx_tx;
rx_done += count;
budget -= count;
if (budget > 0) {
- /* set off the rx bit of the corresponding bit
- in the cause rx tx register, so that next
- iteration will find the next rx queue where
- packets are received on */
+ /* set off the rx bit of the
+ * corresponding bit in the cause rx
+ * tx register, so that next iteration
+ * will find the next rx queue where
+ * packets are received on
+ */
cause_rx_tx &= ~((1 << rxq->id) << 8);
}
}
}
/* Add this number of RX descriptors as non occupied (ready to
- get packets) */
+ * get packets)
+ */
mvneta_rxq_non_occup_desc_add(pp, rxq, i);
return i;
return -EINVAL;
}
- /* 9676 == 9700 - 20 and rounding to 8 */
+ /* 9676 == 9700 - 20 and rounding to 8 */
if (mtu > 9676) {
netdev_info(dev, "Illegal MTU value %d, round to 9676\n", mtu);
mtu = 9676;
if (!netif_running(dev))
return 0;
- /*
- * The interface is running, so we have to force a
+ /* The interface is running, so we have to force a
* reallocation of the RXQs
*/
mvneta_stop_dev(pp);
int phy_mode;
int err;
- /*
- * Our multiqueue support is not complete, so for now, only
+ /* Our multiqueue support is not complete, so for now, only
* allow the usage of the first RX queue
*/
if (rxq_def != 0) {