bool hfi1_can_pin_pages(struct hfi1_devdata *, u32, u32);
int hfi1_acquire_user_pages(unsigned long, size_t, bool, struct page **);
-void hfi1_release_user_pages(struct page **, size_t, bool);
+void hfi1_release_user_pages(struct mm_struct *, struct page **, size_t, bool);
static inline void clear_rcvhdrtail(const struct hfi1_ctxtdata *rcd)
{
* for example), unpin all unmapped pages so we can pin them nex time.
*/
if (mapped_pages != pinned)
- hfi1_release_user_pages(&pages[mapped_pages],
+ hfi1_release_user_pages(current->mm, &pages[mapped_pages],
pinned - mapped_pages,
false);
bail:
pci_unmap_single(dd->pcidev, node->dma_addr, node->mmu.len,
PCI_DMA_FROMDEVICE);
- hfi1_release_user_pages(node->pages, node->npages, true);
+ hfi1_release_user_pages(current->mm, node->pages, node->npages, true);
node->grp->used--;
node->grp->map &= ~(1 << (node->rcventry - node->grp->base));
return ret;
}
-void hfi1_release_user_pages(struct page **p, size_t npages, bool dirty)
+void hfi1_release_user_pages(struct mm_struct *mm, struct page **p,
+ size_t npages, bool dirty)
{
size_t i;
put_page(p[i]);
}
- if (current->mm) { /* during close after signal, mm can be NULL */
- down_write(¤t->mm->mmap_sem);
- current->mm->pinned_vm -= npages;
- up_write(¤t->mm->mmap_sem);
+ if (mm) { /* during close after signal, mm can be NULL */
+ down_write(&mm->mmap_sem);
+ mm->pinned_vm -= npages;
+ up_write(&mm->mmap_sem);
}
}
static void user_sdma_free_request(struct user_sdma_request *, bool);
static int pin_vector_pages(struct user_sdma_request *,
struct user_sdma_iovec *);
-static void unpin_vector_pages(struct page **, unsigned);
+static void unpin_vector_pages(struct mm_struct *, struct page **, unsigned);
static int check_header_template(struct user_sdma_request *,
struct hfi1_pkt_header *, u32, u32);
static int set_txreq_header(struct user_sdma_request *,
goto bail;
}
if (pinned != npages) {
- unpin_vector_pages(pages, pinned);
+ unpin_vector_pages(current->mm, pages, pinned);
ret = -EFAULT;
goto bail;
}
return ret;
}
-static void unpin_vector_pages(struct page **pages, unsigned npages)
+static void unpin_vector_pages(struct mm_struct *mm, struct page **pages,
+ unsigned npages)
{
- hfi1_release_user_pages(pages, npages, 0);
+ hfi1_release_user_pages(mm, pages, npages, 0);
kfree(pages);
}
struct sdma_mmu_node *node =
container_of(mnode, struct sdma_mmu_node, rb);
- if (!notifier)
- unpin_vector_pages(node->pages, node->npages);
+ unpin_vector_pages(notifier ? NULL : current->mm, node->pages,
+ node->npages);
+ /*
+ * If called by the MMU notifier, we have to adjust the pinned
+ * page count ourselves.
+ */
+ if (notifier)
+ current->mm->pinned_vm -= node->npages;
kfree(node);
}