page_alloc->page_size = PAGE_SIZE << order;
page_alloc->page = page;
page_alloc->dma = dma;
- page_alloc->page_offset = frag_info->frag_align;
+ page_alloc->page_offset = 0;
/* Not doing get_page() for each frag is a big win
* on asymetric workloads. Note we can not use atomic_set().
*/
(eff_mtu > buf_size + frag_sizes[i]) ?
frag_sizes[i] : eff_mtu - buf_size;
priv->frag_info[i].frag_prefix_size = buf_size;
- if (!i) {
- priv->frag_info[i].frag_align = NET_IP_ALIGN;
- priv->frag_info[i].frag_stride =
- ALIGN(frag_sizes[i] + NET_IP_ALIGN, SMP_CACHE_BYTES);
- } else {
- priv->frag_info[i].frag_align = 0;
- priv->frag_info[i].frag_stride =
- ALIGN(frag_sizes[i], SMP_CACHE_BYTES);
- }
+ priv->frag_info[i].frag_stride = ALIGN(frag_sizes[i],
+ SMP_CACHE_BYTES);
buf_size += priv->frag_info[i].frag_size;
i++;
}
eff_mtu, priv->num_frags);
for (i = 0; i < priv->num_frags; i++) {
en_err(priv,
- " frag:%d - size:%d prefix:%d align:%d stride:%d\n",
+ " frag:%d - size:%d prefix:%d stride:%d\n",
i,
priv->frag_info[i].frag_size,
priv->frag_info[i].frag_prefix_size,
- priv->frag_info[i].frag_align,
priv->frag_info[i].frag_stride);
}
}