mb();
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
- drm_clflush_page(sg_iter.page);
+ drm_clflush_page(sg_page_iter_page(&sg_iter));
mb();
return;
struct sg_page_iter sg_iter;
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, n)
- return sg_iter.page;
+ return sg_page_iter_page(&sg_iter);
return NULL;
}
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
offset >> PAGE_SHIFT) {
- struct page *page = sg_iter.page;
+ struct page *page = sg_page_iter_page(&sg_iter);
if (remain <= 0)
break;
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
offset >> PAGE_SHIFT) {
- struct page *page = sg_iter.page;
+ struct page *page = sg_page_iter_page(&sg_iter);
int partial_cacheline_write;
if (remain <= 0)
obj->dirty = 0;
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
- struct page *page = sg_iter.page;
+ struct page *page = sg_page_iter_page(&sg_iter);
if (obj->dirty)
set_page_dirty(page);
err_pages:
sg_mark_end(sg);
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
- page_cache_release(sg_iter.page);
+ page_cache_release(sg_page_iter_page(&sg_iter));
sg_free_table(st);
kfree(st);
return PTR_ERR(page);
i = 0;
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0);
- pages[i++] = sg_iter.page;
+ pages[i++] = sg_page_iter_page(&sg_iter);
obj->dma_buf_vmapping = vmap(pages, i, 0, PAGE_KERNEL);
drm_free_large(pages);
for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
dma_addr_t page_addr;
- page_addr = sg_dma_address(sg_iter.sg) +
- (sg_iter.sg_pgoffset << PAGE_SHIFT);
+ page_addr = sg_page_iter_dma_address(&sg_iter);
pt_vaddr[act_pte] = gen6_pte_encode(ppgtt->dev, page_addr,
cache_level);
if (++act_pte == I915_PPGTT_PT_ENTRIES) {
dma_addr_t addr;
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
- addr = sg_dma_address(sg_iter.sg) +
- (sg_iter.sg_pgoffset << PAGE_SHIFT);
+ addr = sg_page_iter_dma_address(&sg_iter);
iowrite32(gen6_pte_encode(dev, addr, level), >t_entries[i]);
i++;
}
i = 0;
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
- struct page *page = sg_iter.page;
+ struct page *page = sg_page_iter_page(&sg_iter);
char new_bit_17 = page_to_phys(page) >> 17;
if ((new_bit_17 & 0x1) !=
(test_bit(i, obj->bit_17) != 0)) {
i = 0;
for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
- if (page_to_phys(sg_iter.page) & (1 << 17))
+ if (page_to_phys(sg_page_iter_page(&sg_iter)) & (1 << 17))
__set_bit(i, obj->bit_17);
else
__clear_bit(i, obj->bit_17);
* sg page iterator
*
* Iterates over sg entries page-by-page. On each successful iteration,
- * @piter->page points to the current page, @piter->sg to the sg holding this
- * page and @piter->sg_pgoffset to the page's page offset within the sg. The
- * iteration will stop either when a maximum number of sg entries was reached
- * or a terminating sg (sg_last(sg) == true) was reached.
+ * you can call sg_page_iter_page(@piter) and sg_page_iter_dma_address(@piter)
+ * to get the current page and its dma address. @piter->sg will point to the
+ * sg holding this page and @piter->sg_pgoffset to the page's page offset
+ * within the sg. The iteration will stop either when a maximum number of sg
+ * entries was reached or a terminating sg (sg_last(sg) == true) was reached.
*/
struct sg_page_iter {
- struct page *page; /* current page */
struct scatterlist *sg; /* sg holding the page */
unsigned int sg_pgoffset; /* page offset within the sg */
void __sg_page_iter_start(struct sg_page_iter *piter,
struct scatterlist *sglist, unsigned int nents,
unsigned long pgoffset);
+/**
+ * sg_page_iter_page - get the current page held by the page iterator
+ * @piter: page iterator holding the page
+ */
+static inline struct page *sg_page_iter_page(struct sg_page_iter *piter)
+{
+ return nth_page(sg_page(piter->sg), piter->sg_pgoffset);
+}
+
+/**
+ * sg_page_iter_dma_address - get the dma address of the current page held by
+ * the page iterator.
+ * @piter: page iterator holding the page
+ */
+static inline dma_addr_t sg_page_iter_dma_address(struct sg_page_iter *piter)
+{
+ return sg_dma_address(piter->sg) + (piter->sg_pgoffset << PAGE_SHIFT);
+}
/**
* for_each_sg_page - iterate over the pages of the given sg list
piter->__pg_advance = 0;
piter->__nents = nents;
- piter->page = NULL;
piter->sg = sglist;
piter->sg_pgoffset = pgoffset;
}
if (!--piter->__nents || !piter->sg)
return false;
}
- piter->page = nth_page(sg_page(piter->sg), piter->sg_pgoffset);
return true;
}
miter->__remaining = min_t(unsigned long, miter->__remaining,
PAGE_SIZE - miter->__offset);
}
- miter->page = miter->piter.page;
+ miter->page = sg_page_iter_page(&miter->piter);
miter->consumed = miter->length = miter->__remaining;
if (miter->__flags & SG_MITER_ATOMIC)