/* DMA channel register entry bit positions and sizes */
#define DMA_CH_CR_PBLX8_INDEX 16
#define DMA_CH_CR_PBLX8_WIDTH 1
+#define DMA_CH_CR_SPH_INDEX 24
+#define DMA_CH_CR_SPH_WIDTH 1
#define DMA_CH_IER_AIE_INDEX 15
#define DMA_CH_IER_AIE_WIDTH 1
#define DMA_CH_IER_FBEE_INDEX 12
#define MAC_RCR_CST_WIDTH 1
#define MAC_RCR_DCRCC_INDEX 3
#define MAC_RCR_DCRCC_WIDTH 1
+#define MAC_RCR_HDSMS_INDEX 12
+#define MAC_RCR_HDSMS_WIDTH 3
#define MAC_RCR_IPC_INDEX 9
#define MAC_RCR_IPC_WIDTH 1
#define MAC_RCR_JE_INDEX 8
#define RX_NORMAL_DESC0_OVT_INDEX 0
#define RX_NORMAL_DESC0_OVT_WIDTH 16
+#define RX_NORMAL_DESC2_HL_INDEX 0
+#define RX_NORMAL_DESC2_HL_WIDTH 10
#define RX_NORMAL_DESC3_CDA_INDEX 27
#define RX_NORMAL_DESC3_CDA_WIDTH 1
#define RX_NORMAL_DESC3_CTXT_INDEX 30
#define RX_NORMAL_DESC3_ES_WIDTH 1
#define RX_NORMAL_DESC3_ETLT_INDEX 16
#define RX_NORMAL_DESC3_ETLT_WIDTH 4
+#define RX_NORMAL_DESC3_FD_INDEX 29
+#define RX_NORMAL_DESC3_FD_WIDTH 1
#define RX_NORMAL_DESC3_INTE_INDEX 30
#define RX_NORMAL_DESC3_INTE_WIDTH 1
#define RX_NORMAL_DESC3_LD_INDEX 28
ring->rdata = NULL;
}
- if (ring->rx_pa.pages) {
- dma_unmap_page(pdata->dev, ring->rx_pa.pages_dma,
- ring->rx_pa.pages_len, DMA_FROM_DEVICE);
- put_page(ring->rx_pa.pages);
-
- ring->rx_pa.pages = NULL;
- ring->rx_pa.pages_len = 0;
- ring->rx_pa.pages_offset = 0;
- ring->rx_pa.pages_dma = 0;
+ if (ring->rx_hdr_pa.pages) {
+ dma_unmap_page(pdata->dev, ring->rx_hdr_pa.pages_dma,
+ ring->rx_hdr_pa.pages_len, DMA_FROM_DEVICE);
+ put_page(ring->rx_hdr_pa.pages);
+
+ ring->rx_hdr_pa.pages = NULL;
+ ring->rx_hdr_pa.pages_len = 0;
+ ring->rx_hdr_pa.pages_offset = 0;
+ ring->rx_hdr_pa.pages_dma = 0;
+ }
+
+ if (ring->rx_buf_pa.pages) {
+ dma_unmap_page(pdata->dev, ring->rx_buf_pa.pages_dma,
+ ring->rx_buf_pa.pages_len, DMA_FROM_DEVICE);
+ put_page(ring->rx_buf_pa.pages);
+
+ ring->rx_buf_pa.pages = NULL;
+ ring->rx_buf_pa.pages_len = 0;
+ ring->rx_buf_pa.pages_offset = 0;
+ ring->rx_buf_pa.pages_dma = 0;
}
if (ring->rdesc) {
return ret;
}
-static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
- struct xgbe_ring *ring,
- struct xgbe_ring_data *rdata)
+static int xgbe_alloc_pages(struct xgbe_prv_data *pdata,
+ struct xgbe_page_alloc *pa, gfp_t gfp, int order)
{
- if (!ring->rx_pa.pages) {
- struct page *pages = NULL;
- dma_addr_t pages_dma;
- gfp_t gfp;
- int order, ret;
-
- /* Try to obtain pages, decreasing order if necessary */
- gfp = GFP_ATOMIC | __GFP_COLD | __GFP_COMP;
- order = max_t(int, PAGE_ALLOC_COSTLY_ORDER, 1);
- while (--order >= 0) {
- pages = alloc_pages(gfp, order);
- if (pages)
- break;
- }
- if (!pages)
- return -ENOMEM;
+ struct page *pages = NULL;
+ dma_addr_t pages_dma;
+ int ret;
- /* Map the pages */
- pages_dma = dma_map_page(pdata->dev, pages, 0,
- PAGE_SIZE << order, DMA_FROM_DEVICE);
- ret = dma_mapping_error(pdata->dev, pages_dma);
- if (ret) {
- put_page(pages);
- return ret;
- }
+ /* Try to obtain pages, decreasing order if necessary */
+ gfp |= __GFP_COLD | __GFP_COMP;
+ while (order >= 0) {
+ pages = alloc_pages(gfp, order);
+ if (pages)
+ break;
- /* Set the values for this ring */
- ring->rx_pa.pages = pages;
- ring->rx_pa.pages_len = PAGE_SIZE << order;
- ring->rx_pa.pages_offset = 0;
- ring->rx_pa.pages_dma = pages_dma;
+ order--;
}
+ if (!pages)
+ return -ENOMEM;
- get_page(ring->rx_pa.pages);
- rdata->rx_pa = ring->rx_pa;
+ /* Map the pages */
+ pages_dma = dma_map_page(pdata->dev, pages, 0,
+ PAGE_SIZE << order, DMA_FROM_DEVICE);
+ ret = dma_mapping_error(pdata->dev, pages_dma);
+ if (ret) {
+ put_page(pages);
+ return ret;
+ }
- rdata->rx_dma = ring->rx_pa.pages_dma + ring->rx_pa.pages_offset;
- rdata->rx_dma_len = pdata->rx_buf_size;
+ pa->pages = pages;
+ pa->pages_len = PAGE_SIZE << order;
+ pa->pages_offset = 0;
+ pa->pages_dma = pages_dma;
- ring->rx_pa.pages_offset += pdata->rx_buf_size;
- if ((ring->rx_pa.pages_offset + pdata->rx_buf_size) >
- ring->rx_pa.pages_len) {
+ return 0;
+}
+
+static void xgbe_set_buffer_data(struct xgbe_buffer_data *bd,
+ struct xgbe_page_alloc *pa,
+ unsigned int len)
+{
+ get_page(pa->pages);
+ bd->pa = *pa;
+
+ bd->dma = pa->pages_dma + pa->pages_offset;
+ bd->dma_len = len;
+
+ pa->pages_offset += len;
+ if ((pa->pages_offset + len) > pa->pages_len) {
/* This data descriptor is responsible for unmapping page(s) */
- rdata->rx_unmap = ring->rx_pa;
+ bd->pa_unmap = *pa;
/* Get a new allocation next time */
- ring->rx_pa.pages = NULL;
- ring->rx_pa.pages_len = 0;
- ring->rx_pa.pages_offset = 0;
- ring->rx_pa.pages_dma = 0;
+ pa->pages = NULL;
+ pa->pages_len = 0;
+ pa->pages_offset = 0;
+ pa->pages_dma = 0;
+ }
+}
+
+static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
+ struct xgbe_ring *ring,
+ struct xgbe_ring_data *rdata)
+{
+ int order, ret;
+
+ if (!ring->rx_hdr_pa.pages) {
+ ret = xgbe_alloc_pages(pdata, &ring->rx_hdr_pa, GFP_ATOMIC, 0);
+ if (ret)
+ return ret;
+ }
+
+ if (!ring->rx_buf_pa.pages) {
+ order = max_t(int, PAGE_ALLOC_COSTLY_ORDER - 1, 0);
+ ret = xgbe_alloc_pages(pdata, &ring->rx_buf_pa, GFP_ATOMIC,
+ order);
+ if (ret)
+ return ret;
}
+ /* Set up the header page info */
+ xgbe_set_buffer_data(&rdata->rx_hdr, &ring->rx_hdr_pa,
+ XGBE_SKB_ALLOC_SIZE);
+
+ /* Set up the buffer page info */
+ xgbe_set_buffer_data(&rdata->rx_buf, &ring->rx_buf_pa,
+ pdata->rx_buf_size);
+
return 0;
}
rdata->skb = NULL;
}
- if (rdata->rx_pa.pages)
- put_page(rdata->rx_pa.pages);
+ if (rdata->rx_hdr.pa.pages)
+ put_page(rdata->rx_hdr.pa.pages);
- if (rdata->rx_unmap.pages) {
- dma_unmap_page(pdata->dev, rdata->rx_unmap.pages_dma,
- rdata->rx_unmap.pages_len, DMA_FROM_DEVICE);
- put_page(rdata->rx_unmap.pages);
+ if (rdata->rx_hdr.pa_unmap.pages) {
+ dma_unmap_page(pdata->dev, rdata->rx_hdr.pa_unmap.pages_dma,
+ rdata->rx_hdr.pa_unmap.pages_len,
+ DMA_FROM_DEVICE);
+ put_page(rdata->rx_hdr.pa_unmap.pages);
}
- memset(&rdata->rx_pa, 0, sizeof(rdata->rx_pa));
- memset(&rdata->rx_unmap, 0, sizeof(rdata->rx_unmap));
+ if (rdata->rx_buf.pa.pages)
+ put_page(rdata->rx_buf.pa.pages);
+
+ if (rdata->rx_buf.pa_unmap.pages) {
+ dma_unmap_page(pdata->dev, rdata->rx_buf.pa_unmap.pages_dma,
+ rdata->rx_buf.pa_unmap.pages_len,
+ DMA_FROM_DEVICE);
+ put_page(rdata->rx_buf.pa_unmap.pages);
+ }
- rdata->rx_dma = 0;
- rdata->rx_dma_len = 0;
+ memset(&rdata->rx_hdr, 0, sizeof(rdata->rx_hdr));
+ memset(&rdata->rx_buf, 0, sizeof(rdata->rx_buf));
rdata->tso_header = 0;
rdata->len = 0;
}
}
+static void xgbe_config_sph_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->rx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_CR, SPH, 1);
+ }
+
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE);
+}
+
static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
{
unsigned int max_q_count, q_count;
struct xgbe_ring_desc *rdesc = rdata->rdesc;
/* Reset the Rx descriptor
- * Set buffer 1 (lo) address to dma address (lo)
- * Set buffer 1 (hi) address to dma address (hi)
- * Set buffer 2 (lo) address to zero
- * Set buffer 2 (hi) address to zero and set control bits
- * OWN and INTE
+ * Set buffer 1 (lo) address to header dma address (lo)
+ * Set buffer 1 (hi) address to header dma address (hi)
+ * Set buffer 2 (lo) address to buffer dma address (lo)
+ * Set buffer 2 (hi) address to buffer dma address (hi) and
+ * set control bits OWN and INTE
*/
- rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->rx_dma));
- rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->rx_dma));
- rdesc->desc2 = 0;
+ rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->rx_hdr.dma));
+ rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->rx_hdr.dma));
+ rdesc->desc2 = cpu_to_le32(lower_32_bits(rdata->rx_buf.dma));
+ rdesc->desc3 = cpu_to_le32(upper_32_bits(rdata->rx_buf.dma));
- rdesc->desc3 = 0;
- if (rdata->interrupt)
- XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, 1);
+ XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE,
+ rdata->interrupt ? 1 : 0);
/* Since the Rx DMA engine is likely running, make sure everything
* is written to the descriptor(s) before setting the OWN bit
XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
CONTEXT_NEXT, 1);
+ /* Get the header length */
+ if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD))
+ rdata->hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2,
+ RX_NORMAL_DESC2, HL);
+
/* Get the packet length */
rdata->len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL);
xgbe_config_tx_coalesce(pdata);
xgbe_config_rx_buffer_size(pdata);
xgbe_config_tso_mode(pdata);
+ xgbe_config_sph_mode(pdata);
desc_if->wrapper_tx_desc_init(pdata);
desc_if->wrapper_rx_desc_init(pdata);
xgbe_enable_dma_interrupts(pdata);
static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
struct xgbe_ring_data *rdata,
- unsigned int len)
+ unsigned int *len)
{
struct net_device *netdev = pdata->netdev;
struct sk_buff *skb;
u8 *packet;
unsigned int copy_len;
- skb = netdev_alloc_skb_ip_align(netdev, XGBE_SKB_ALLOC_SIZE);
+ skb = netdev_alloc_skb_ip_align(netdev, rdata->rx_hdr.dma_len);
if (!skb)
return NULL;
- packet = page_address(rdata->rx_pa.pages) + rdata->rx_pa.pages_offset;
- copy_len = min_t(unsigned int, XGBE_SKB_ALLOC_SIZE, len);
+ packet = page_address(rdata->rx_hdr.pa.pages) +
+ rdata->rx_hdr.pa.pages_offset;
+ copy_len = (rdata->hdr_len) ? rdata->hdr_len : *len;
+ copy_len = min(rdata->rx_hdr.dma_len, copy_len);
skb_copy_to_linear_data(skb, packet, copy_len);
skb_put(skb, copy_len);
- rdata->rx_pa.pages_offset += copy_len;
- len -= copy_len;
- if (len)
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- rdata->rx_pa.pages,
- rdata->rx_pa.pages_offset,
- len, rdata->rx_dma_len);
- else
- put_page(rdata->rx_pa.pages);
+ *len -= copy_len;
return skb;
}
ring->cur++;
ring->dirty++;
- dma_sync_single_for_cpu(pdata->dev, rdata->rx_dma,
- rdata->rx_dma_len,
- DMA_FROM_DEVICE);
-
incomplete = XGMAC_GET_BITS(packet->attributes,
RX_PACKET_ATTRIBUTES,
INCOMPLETE);
len += put_len;
if (!skb) {
- skb = xgbe_create_skb(pdata, rdata, put_len);
+ dma_sync_single_for_cpu(pdata->dev,
+ rdata->rx_hdr.dma,
+ rdata->rx_hdr.dma_len,
+ DMA_FROM_DEVICE);
+
+ skb = xgbe_create_skb(pdata, rdata, &put_len);
if (!skb) {
error = 1;
goto read_again;
}
- } else {
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- rdata->rx_pa.pages,
- rdata->rx_pa.pages_offset,
- put_len, rdata->rx_dma_len);
}
- rdata->rx_pa.pages = NULL;
+ if (put_len) {
+ dma_sync_single_for_cpu(pdata->dev,
+ rdata->rx_buf.dma,
+ rdata->rx_buf.dma_len,
+ DMA_FROM_DEVICE);
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ rdata->rx_buf.pa.pages,
+ rdata->rx_buf.pa.pages_offset,
+ put_len, rdata->rx_buf.dma_len);
+ rdata->rx_buf.pa.pages = NULL;
+ }
}
if (incomplete || context_next)
while (count--) {
rdata = XGBE_GET_DESC_DATA(ring, idx);
rdesc = rdata->rdesc;
- DBGPR("TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx,
- (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
- le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1),
- le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3));
+ pr_alert("TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx,
+ (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
+ le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1),
+ le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3));
idx++;
}
}
void xgbe_dump_rx_desc(struct xgbe_ring *ring, struct xgbe_ring_desc *desc,
unsigned int idx)
{
- DBGPR("RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n", idx,
- le32_to_cpu(desc->desc0), le32_to_cpu(desc->desc1),
- le32_to_cpu(desc->desc2), le32_to_cpu(desc->desc3));
+ pr_alert("RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n", idx,
+ le32_to_cpu(desc->desc0), le32_to_cpu(desc->desc1),
+ le32_to_cpu(desc->desc2), le32_to_cpu(desc->desc3));
}
void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
#define XGBE_RX_MIN_BUF_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
#define XGBE_RX_BUF_ALIGN 64
#define XGBE_SKB_ALLOC_SIZE 256
+#define XGBE_SPH_HDSMS_SIZE 2 /* Keep in sync with SKB_ALLOC_SIZE */
#define XGBE_MAX_DMA_CHANNELS 16
#define XGBE_MAX_QUEUES 16
dma_addr_t pages_dma;
};
+/* Ring entry buffer data */
+struct xgbe_buffer_data {
+ struct xgbe_page_alloc pa;
+ struct xgbe_page_alloc pa_unmap;
+
+ dma_addr_t dma;
+ unsigned int dma_len;
+};
+
/* Structure used to hold information related to the descriptor
* and the packet associated with the descriptor (always use
* use the XGBE_GET_DESC_DATA macro to access this data from the ring)
unsigned int skb_dma_len; /* Length of SKB DMA area */
unsigned int tso_header; /* TSO header indicator */
- struct xgbe_page_alloc rx_pa; /* Rx buffer page allocation */
- struct xgbe_page_alloc rx_unmap;
-
- dma_addr_t rx_dma; /* DMA address of Rx buffer */
- unsigned int rx_dma_len; /* Length of the Rx DMA buffer */
+ struct xgbe_buffer_data rx_hdr; /* Header locations */
+ struct xgbe_buffer_data rx_buf; /* Payload locations */
+ unsigned short hdr_len; /* Length of received header */
unsigned short len; /* Length of received Rx packet */
unsigned int interrupt; /* Interrupt indicator */
struct xgbe_ring_data *rdata;
/* Page allocation for RX buffers */
- struct xgbe_page_alloc rx_pa;
+ struct xgbe_page_alloc rx_hdr_pa;
+ struct xgbe_page_alloc rx_buf_pa;
/* Ring index values
* cur - Tx: index of descriptor to be used for current transfer