From: Mitko Haralanov Date: Tue, 8 Mar 2016 19:15:33 +0000 (-0800) Subject: IB/hfi1: Specify mm when releasing pages X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=bd3a8947de916534722b0861d865d3a809c0743c;p=GitHub%2Fmoto-9609%2Fandroid_kernel_motorola_exynos9610.git IB/hfi1: Specify mm when releasing pages This change adds a pointer to the process mm_struct when calling hfi1_release_user_pages(). Previously, the function used the mm_struct of the current process to adjust the number of pinned pages. However, is some cases, namely when unpinning pages due to a MMU notifier call, we want to drop into that code block as it will cause a deadlock (the MMU notifiers take the process' mmap_sem prior to calling the callbacks). By allowing to caller to specify the pointer to the mm_struct, the caller has finer control over that part of hfi1_release_user_pages(). Reviewed-by: Dennis Dalessandro Reviewed-by: Dean Luick Signed-off-by: Mitko Haralanov Signed-off-by: Jubin John Signed-off-by: Doug Ledford --- diff --git a/drivers/staging/rdma/hfi1/hfi.h b/drivers/staging/rdma/hfi1/hfi.h index ff3b37ad89a3..3dc644d92e3a 100644 --- a/drivers/staging/rdma/hfi1/hfi.h +++ b/drivers/staging/rdma/hfi1/hfi.h @@ -1666,7 +1666,7 @@ void shutdown_led_override(struct hfi1_pportdata *ppd); bool hfi1_can_pin_pages(struct hfi1_devdata *, u32, u32); int hfi1_acquire_user_pages(unsigned long, size_t, bool, struct page **); -void hfi1_release_user_pages(struct page **, size_t, bool); +void hfi1_release_user_pages(struct mm_struct *, struct page **, size_t, bool); static inline void clear_rcvhdrtail(const struct hfi1_ctxtdata *rcd) { diff --git a/drivers/staging/rdma/hfi1/user_exp_rcv.c b/drivers/staging/rdma/hfi1/user_exp_rcv.c index bf670cbf82da..591605a13243 100644 --- a/drivers/staging/rdma/hfi1/user_exp_rcv.c +++ b/drivers/staging/rdma/hfi1/user_exp_rcv.c @@ -550,7 +550,7 @@ nomem: * for example), unpin all unmapped pages so we can pin them nex time. */ if (mapped_pages != pinned) - hfi1_release_user_pages(&pages[mapped_pages], + hfi1_release_user_pages(current->mm, &pages[mapped_pages], pinned - mapped_pages, false); bail: @@ -923,7 +923,7 @@ static void clear_tid_node(struct hfi1_filedata *fd, u16 subctxt, pci_unmap_single(dd->pcidev, node->dma_addr, node->mmu.len, PCI_DMA_FROMDEVICE); - hfi1_release_user_pages(node->pages, node->npages, true); + hfi1_release_user_pages(current->mm, node->pages, node->npages, true); node->grp->used--; node->grp->map &= ~(1 << (node->rcventry - node->grp->base)); diff --git a/drivers/staging/rdma/hfi1/user_pages.c b/drivers/staging/rdma/hfi1/user_pages.c index bd7a8ab0d635..88e10b5f55f1 100644 --- a/drivers/staging/rdma/hfi1/user_pages.c +++ b/drivers/staging/rdma/hfi1/user_pages.c @@ -116,7 +116,8 @@ int hfi1_acquire_user_pages(unsigned long vaddr, size_t npages, bool writable, return ret; } -void hfi1_release_user_pages(struct page **p, size_t npages, bool dirty) +void hfi1_release_user_pages(struct mm_struct *mm, struct page **p, + size_t npages, bool dirty) { size_t i; @@ -126,9 +127,9 @@ void hfi1_release_user_pages(struct page **p, size_t npages, bool dirty) put_page(p[i]); } - if (current->mm) { /* during close after signal, mm can be NULL */ - down_write(¤t->mm->mmap_sem); - current->mm->pinned_vm -= npages; - up_write(¤t->mm->mmap_sem); + if (mm) { /* during close after signal, mm can be NULL */ + down_write(&mm->mmap_sem); + mm->pinned_vm -= npages; + up_write(&mm->mmap_sem); } } diff --git a/drivers/staging/rdma/hfi1/user_sdma.c b/drivers/staging/rdma/hfi1/user_sdma.c index a53edb96ca50..bf55a41d151a 100644 --- a/drivers/staging/rdma/hfi1/user_sdma.c +++ b/drivers/staging/rdma/hfi1/user_sdma.c @@ -277,7 +277,7 @@ static inline void pq_update(struct hfi1_user_sdma_pkt_q *); static void user_sdma_free_request(struct user_sdma_request *, bool); static int pin_vector_pages(struct user_sdma_request *, struct user_sdma_iovec *); -static void unpin_vector_pages(struct page **, unsigned); +static void unpin_vector_pages(struct mm_struct *, struct page **, unsigned); static int check_header_template(struct user_sdma_request *, struct hfi1_pkt_header *, u32, u32); static int set_txreq_header(struct user_sdma_request *, @@ -1072,7 +1072,7 @@ static int pin_vector_pages(struct user_sdma_request *req, goto bail; } if (pinned != npages) { - unpin_vector_pages(pages, pinned); + unpin_vector_pages(current->mm, pages, pinned); ret = -EFAULT; goto bail; } @@ -1097,9 +1097,10 @@ bail: return ret; } -static void unpin_vector_pages(struct page **pages, unsigned npages) +static void unpin_vector_pages(struct mm_struct *mm, struct page **pages, + unsigned npages) { - hfi1_release_user_pages(pages, npages, 0); + hfi1_release_user_pages(mm, pages, npages, 0); kfree(pages); } @@ -1502,8 +1503,14 @@ static void sdma_rb_remove(struct rb_root *root, struct mmu_rb_node *mnode, struct sdma_mmu_node *node = container_of(mnode, struct sdma_mmu_node, rb); - if (!notifier) - unpin_vector_pages(node->pages, node->npages); + unpin_vector_pages(notifier ? NULL : current->mm, node->pages, + node->npages); + /* + * If called by the MMU notifier, we have to adjust the pinned + * page count ourselves. + */ + if (notifier) + current->mm->pinned_vm -= node->npages; kfree(node); }