*ndesc = le32_to_cpu(desc->next_desc);
}
-static void get_pad_info(u32 *pad0, u32 *pad1, u32 *pad2, struct knav_dma_desc *desc)
+static void get_pad_info(u32 *pad0, u32 *pad1, struct knav_dma_desc *desc)
{
*pad0 = le32_to_cpu(desc->pad[0]);
*pad1 = le32_to_cpu(desc->pad[1]);
- *pad2 = le32_to_cpu(desc->pad[2]);
-}
-
-static void get_pad_ptr(void **padptr, struct knav_dma_desc *desc)
-{
- u64 pad64;
-
- pad64 = le32_to_cpu(desc->pad[0]) +
- ((u64)le32_to_cpu(desc->pad[1]) << 32);
- *padptr = (void *)(uintptr_t)pad64;
}
static void get_org_pkt_info(dma_addr_t *buff, u32 *buff_len,
desc->packet_info = cpu_to_le32(pkt_info);
}
-static void set_pad_info(u32 pad0, u32 pad1, u32 pad2, struct knav_dma_desc *desc)
+static void set_pad_info(u32 pad0, u32 pad1, struct knav_dma_desc *desc)
{
desc->pad[0] = cpu_to_le32(pad0);
desc->pad[1] = cpu_to_le32(pad1);
- desc->pad[2] = cpu_to_le32(pad1);
}
static void set_org_pkt_info(dma_addr_t buff, u32 buff_len,
dma_addr_t dma_desc, dma_buf;
unsigned int buf_len, dma_sz = sizeof(*ndesc);
void *buf_ptr;
- u32 pad[2];
u32 tmp;
get_words(&dma_desc, 1, &desc->next_desc);
break;
}
get_pkt_info(&dma_buf, &tmp, &dma_desc, ndesc);
- get_pad_ptr(&buf_ptr, ndesc);
+ get_pad_info((u32 *)&buf_ptr, &buf_len, ndesc);
dma_unmap_page(netcp->dev, dma_buf, PAGE_SIZE, DMA_FROM_DEVICE);
__free_page(buf_ptr);
knav_pool_desc_put(netcp->rx_pool, desc);
}
-
- get_pad_info(&pad[0], &pad[1], &buf_len, desc);
- buf_ptr = (void *)(uintptr_t)(pad[0] + ((u64)pad[1] << 32));
+ get_pad_info((u32 *)&buf_ptr, &buf_len, desc);
if (buf_ptr)
netcp_frag_free(buf_len <= PAGE_SIZE, buf_ptr);
dma_addr_t dma_desc, dma_buff;
struct netcp_packet p_info;
struct sk_buff *skb;
- u32 pad[2];
void *org_buf_ptr;
+ u32 tmp;
dma_desc = knav_queue_pop(netcp->rx_queue, &dma_sz);
if (!dma_desc)
}
get_pkt_info(&dma_buff, &buf_len, &dma_desc, desc);
- get_pad_info(&pad[0], &pad[1], &org_buf_len, desc);
- org_buf_ptr = (void *)(uintptr_t)(pad[0] + ((u64)pad[1] << 32));
+ get_pad_info((u32 *)&org_buf_ptr, &org_buf_len, desc);
if (unlikely(!org_buf_ptr)) {
dev_err(netcp->ndev_dev, "NULL bufptr in desc\n");
/* Fill in the page fragment list */
while (dma_desc) {
struct page *page;
- void *ptr;
ndesc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz);
if (unlikely(!ndesc)) {
}
get_pkt_info(&dma_buff, &buf_len, &dma_desc, ndesc);
- get_pad_ptr(&ptr, ndesc);
- page = ptr;
+ get_pad_info((u32 *)&page, &tmp, ndesc);
if (likely(dma_buff && buf_len && page)) {
dma_unmap_page(netcp->dev, dma_buff, PAGE_SIZE,
unsigned int buf_len, dma_sz;
dma_addr_t dma;
void *buf_ptr;
+ u32 tmp;
/* Allocate descriptor */
while ((dma = knav_queue_pop(netcp->rx_fdq[fdq], &dma_sz))) {
}
get_org_pkt_info(&dma, &buf_len, desc);
- get_pad_ptr(&buf_ptr, desc);
+ get_pad_info((u32 *)&buf_ptr, &tmp, desc);
if (unlikely(!dma)) {
dev_err(netcp->ndev_dev, "NULL orig_buff in desc\n");
struct page *page;
dma_addr_t dma;
void *bufptr;
- u32 pad[3];
+ u32 pad[2];
/* Allocate descriptor */
hwdesc = knav_pool_desc_get(netcp->rx_pool);
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
bufptr = netdev_alloc_frag(primary_buf_len);
- pad[2] = primary_buf_len;
+ pad[1] = primary_buf_len;
if (unlikely(!bufptr)) {
dev_warn_ratelimited(netcp->ndev_dev,
if (unlikely(dma_mapping_error(netcp->dev, dma)))
goto fail;
- pad[0] = lower_32_bits((uintptr_t)bufptr);
- pad[1] = upper_32_bits((uintptr_t)bufptr);
-
+ pad[0] = (u32)bufptr;
} else {
/* Allocate a secondary receive queue entry */
page = alloc_page(GFP_ATOMIC | GFP_DMA | __GFP_COLD);
}
buf_len = PAGE_SIZE;
dma = dma_map_page(netcp->dev, page, 0, buf_len, DMA_TO_DEVICE);
- pad[0] = lower_32_bits(dma);
- pad[1] = upper_32_bits(dma);
- pad[2] = 0;
+ pad[0] = (u32)page;
+ pad[1] = 0;
}
desc_info = KNAV_DMA_DESC_PS_INFO_IN_DESC;
pkt_info |= (netcp->rx_queue_id & KNAV_DMA_DESC_RETQ_MASK) <<
KNAV_DMA_DESC_RETQ_SHIFT;
set_org_pkt_info(dma, buf_len, hwdesc);
- set_pad_info(pad[0], pad[1], pad[2], hwdesc);
+ set_pad_info(pad[0], pad[1], hwdesc);
set_desc_info(desc_info, pkt_info, hwdesc);
/* Push to FDQs */
unsigned int budget)
{
struct knav_dma_desc *desc;
- void *ptr;
struct sk_buff *skb;
unsigned int dma_sz;
dma_addr_t dma;
int pkts = 0;
+ u32 tmp;
while (budget--) {
dma = knav_queue_pop(netcp->tx_compl_q, &dma_sz);
continue;
}
- get_pad_ptr(&ptr, desc);
- skb = ptr;
+ get_pad_info((u32 *)&skb, &tmp, desc);
netcp_free_tx_desc_chain(netcp, desc, dma_sz);
if (!skb) {
dev_err(netcp->ndev_dev, "No skb in Tx desc\n");
}
set_words(&tmp, 1, &desc->packet_info);
- tmp = lower_32_bits((uintptr_t)&skb);
- set_words(&tmp, 1, &desc->pad[0]);
- tmp = upper_32_bits((uintptr_t)&skb);
- set_words(&tmp, 1, &desc->pad[1]);
+ set_words((u32 *)&skb, 1, &desc->pad[0]);
if (tx_pipe->flags & SWITCH_TO_PORT_IN_TAGINFO) {
tmp = tx_pipe->switch_to_port;