Merge tag 'media/v4.8-6' of git://git.kernel.org/pub/scm/linux/kernel/git/mchehab...
[GitHub/moto-9609/android_kernel_motorola_exynos9610.git] / net / sunrpc / xprtrdma / fmr_ops.c
CommitLineData
a0ce85f5
CL
1/*
2 * Copyright (c) 2015 Oracle. All rights reserved.
3 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
4 */
5
6/* Lightweight memory registration using Fast Memory Regions (FMR).
7 * Referred to sometimes as MTHCAFMR mode.
8 *
9 * FMR uses synchronous memory registration and deregistration.
10 * FMR registration is known to be fast, but FMR deregistration
11 * can take tens of usecs to complete.
12 */
13
fc7fbb59
CL
14/* Normal operation
15 *
16 * A Memory Region is prepared for RDMA READ or WRITE using the
17 * ib_map_phys_fmr verb (fmr_op_map). When the RDMA operation is
18 * finished, the Memory Region is unmapped using the ib_unmap_fmr
19 * verb (fmr_op_unmap).
20 */
21
a0ce85f5
CL
22#include "xprt_rdma.h"
23
24#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
25# define RPCDBG_FACILITY RPCDBG_TRANS
26#endif
27
1c9351ee
CL
28/* Maximum scatter/gather per FMR */
29#define RPCRDMA_MAX_FMR_SGES (64)
30
d48b1d29
CL
31/* Access mode of externally registered pages */
32enum {
33 RPCRDMA_FMR_ACCESS_FLAGS = IB_ACCESS_REMOTE_WRITE |
34 IB_ACCESS_REMOTE_READ,
35};
36
b54054ca
CL
37bool
38fmr_is_supported(struct rpcrdma_ia *ia)
39{
40 if (!ia->ri_device->alloc_fmr) {
41 pr_info("rpcrdma: 'fmr' mode is not supported by device %s\n",
42 ia->ri_device->name);
43 return false;
44 }
45 return true;
46}
47
d48b1d29 48static int
e2ac236c 49fmr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *mw)
d48b1d29
CL
50{
51 static struct ib_fmr_attr fmr_attr = {
52 .max_pages = RPCRDMA_MAX_FMR_SGES,
53 .max_maps = 1,
54 .page_shift = PAGE_SHIFT
55 };
56
88975ebe
CL
57 mw->fmr.fm_physaddrs = kcalloc(RPCRDMA_MAX_FMR_SGES,
58 sizeof(u64), GFP_KERNEL);
59 if (!mw->fmr.fm_physaddrs)
d48b1d29
CL
60 goto out_free;
61
62 mw->mw_sg = kcalloc(RPCRDMA_MAX_FMR_SGES,
63 sizeof(*mw->mw_sg), GFP_KERNEL);
64 if (!mw->mw_sg)
65 goto out_free;
66
67 sg_init_table(mw->mw_sg, RPCRDMA_MAX_FMR_SGES);
68
e2ac236c 69 mw->fmr.fm_mr = ib_alloc_fmr(ia->ri_pd, RPCRDMA_FMR_ACCESS_FLAGS,
88975ebe
CL
70 &fmr_attr);
71 if (IS_ERR(mw->fmr.fm_mr))
d48b1d29
CL
72 goto out_fmr_err;
73
74 return 0;
75
76out_fmr_err:
77 dprintk("RPC: %s: ib_alloc_fmr returned %ld\n", __func__,
88975ebe 78 PTR_ERR(mw->fmr.fm_mr));
d48b1d29
CL
79
80out_free:
81 kfree(mw->mw_sg);
88975ebe 82 kfree(mw->fmr.fm_physaddrs);
d48b1d29
CL
83 return -ENOMEM;
84}
85
ead3f26e
CL
86static int
87__fmr_unmap(struct rpcrdma_mw *mw)
88{
89 LIST_HEAD(l);
38f1932e 90 int rc;
ead3f26e 91
88975ebe 92 list_add(&mw->fmr.fm_mr->list, &l);
38f1932e 93 rc = ib_unmap_fmr(&l);
88975ebe 94 list_del_init(&mw->fmr.fm_mr->list);
38f1932e 95 return rc;
ead3f26e
CL
96}
97
d48b1d29 98static void
e2ac236c 99fmr_op_release_mr(struct rpcrdma_mw *r)
d48b1d29 100{
505bbe64 101 LIST_HEAD(unmap_list);
d48b1d29
CL
102 int rc;
103
9d6b0409
CL
104 /* Ensure MW is not on any rl_registered list */
105 if (!list_empty(&r->mw_list))
106 list_del(&r->mw_list);
107
88975ebe 108 kfree(r->fmr.fm_physaddrs);
d48b1d29
CL
109 kfree(r->mw_sg);
110
505bbe64
CL
111 /* In case this one was left mapped, try to unmap it
112 * to prevent dealloc_fmr from failing with EBUSY
113 */
114 rc = __fmr_unmap(r);
115 if (rc)
116 pr_err("rpcrdma: final ib_unmap_fmr for %p failed %i\n",
117 r, rc);
118
88975ebe 119 rc = ib_dealloc_fmr(r->fmr.fm_mr);
d48b1d29
CL
120 if (rc)
121 pr_err("rpcrdma: final ib_dealloc_fmr for %p returned %i\n",
122 r, rc);
e2ac236c
CL
123
124 kfree(r);
d48b1d29
CL
125}
126
505bbe64 127/* Reset of a single FMR.
ead3f26e
CL
128 */
129static void
505bbe64 130fmr_op_recover_mr(struct rpcrdma_mw *mw)
ead3f26e 131{
505bbe64
CL
132 struct rpcrdma_xprt *r_xprt = mw->mw_xprt;
133 int rc;
ead3f26e 134
505bbe64
CL
135 /* ORDER: invalidate first */
136 rc = __fmr_unmap(mw);
ead3f26e 137
505bbe64
CL
138 /* ORDER: then DMA unmap */
139 ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
140 mw->mw_sg, mw->mw_nents, mw->mw_dir);
2ffc871a
CL
141 if (rc)
142 goto out_release;
505bbe64
CL
143
144 rpcrdma_put_mw(r_xprt, mw);
145 r_xprt->rx_stats.mrs_recovered++;
2ffc871a
CL
146 return;
147
148out_release:
149 pr_err("rpcrdma: FMR reset failed (%d), %p released\n", rc, mw);
150 r_xprt->rx_stats.mrs_orphaned++;
151
152 spin_lock(&r_xprt->rx_buf.rb_mwlock);
153 list_del(&mw->mw_all);
154 spin_unlock(&r_xprt->rx_buf.rb_mwlock);
155
156 fmr_op_release_mr(mw);
ead3f26e
CL
157}
158
3968cb58
CL
159static int
160fmr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
161 struct rpcrdma_create_data_internal *cdata)
162{
302d3deb
CL
163 rpcrdma_set_max_header_sizes(ia, cdata, max_t(unsigned int, 1,
164 RPCRDMA_MAX_DATA_SEGS /
165 RPCRDMA_MAX_FMR_SGES));
3968cb58
CL
166 return 0;
167}
168
1c9351ee
CL
169/* FMR mode conveys up to 64 pages of payload per chunk segment.
170 */
171static size_t
172fmr_op_maxpages(struct rpcrdma_xprt *r_xprt)
173{
174 return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
94931746 175 RPCRDMA_MAX_HDR_SEGS * RPCRDMA_MAX_FMR_SGES);
1c9351ee
CL
176}
177
9c1b4d77
CL
178/* Use the ib_map_phys_fmr() verb to register a memory region
179 * for remote access via RDMA READ or RDMA WRITE.
180 */
181static int
182fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
9d6b0409 183 int nsegs, bool writing, struct rpcrdma_mw **out)
9c1b4d77 184{
9c1b4d77 185 struct rpcrdma_mr_seg *seg1 = seg;
9c1b4d77 186 int len, pageoff, i, rc;
fc7fbb59 187 struct rpcrdma_mw *mw;
fcdfb968 188 u64 *dma_pages;
fc7fbb59 189
505bbe64
CL
190 mw = rpcrdma_get_mw(r_xprt);
191 if (!mw)
7a89f9c6 192 return -ENOBUFS;
9c1b4d77
CL
193
194 pageoff = offset_in_page(seg1->mr_offset);
195 seg1->mr_offset -= pageoff; /* start of page */
196 seg1->mr_len += pageoff;
197 len = -pageoff;
198 if (nsegs > RPCRDMA_MAX_FMR_SGES)
199 nsegs = RPCRDMA_MAX_FMR_SGES;
200 for (i = 0; i < nsegs;) {
fcdfb968
CL
201 if (seg->mr_page)
202 sg_set_page(&mw->mw_sg[i],
203 seg->mr_page,
204 seg->mr_len,
205 offset_in_page(seg->mr_offset));
206 else
207 sg_set_buf(&mw->mw_sg[i], seg->mr_offset,
208 seg->mr_len);
9c1b4d77
CL
209 len += seg->mr_len;
210 ++seg;
211 ++i;
212 /* Check for holes */
213 if ((i < nsegs && offset_in_page(seg->mr_offset)) ||
214 offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
215 break;
216 }
fcdfb968
CL
217 mw->mw_nents = i;
218 mw->mw_dir = rpcrdma_data_dir(writing);
a54d4059
CL
219 if (i == 0)
220 goto out_dmamap_err;
fcdfb968
CL
221
222 if (!ib_dma_map_sg(r_xprt->rx_ia.ri_device,
223 mw->mw_sg, mw->mw_nents, mw->mw_dir))
224 goto out_dmamap_err;
9c1b4d77 225
fcdfb968
CL
226 for (i = 0, dma_pages = mw->fmr.fm_physaddrs; i < mw->mw_nents; i++)
227 dma_pages[i] = sg_dma_address(&mw->mw_sg[i]);
228 rc = ib_map_phys_fmr(mw->fmr.fm_mr, dma_pages, mw->mw_nents,
229 dma_pages[0]);
9c1b4d77
CL
230 if (rc)
231 goto out_maperr;
232
9d6b0409
CL
233 mw->mw_handle = mw->fmr.fm_mr->rkey;
234 mw->mw_length = len;
235 mw->mw_offset = dma_pages[0] + pageoff;
236
237 *out = mw;
fcdfb968
CL
238 return mw->mw_nents;
239
240out_dmamap_err:
241 pr_err("rpcrdma: failed to dma map sg %p sg_nents %u\n",
242 mw->mw_sg, mw->mw_nents);
42fe28f6 243 rpcrdma_defer_mr_recovery(mw);
7a89f9c6 244 return -EIO;
9c1b4d77
CL
245
246out_maperr:
fcdfb968
CL
247 pr_err("rpcrdma: ib_map_phys_fmr %u@0x%llx+%i (%d) status %i\n",
248 len, (unsigned long long)dma_pages[0],
249 pageoff, mw->mw_nents, rc);
505bbe64 250 rpcrdma_defer_mr_recovery(mw);
7a89f9c6 251 return -EIO;
9c1b4d77
CL
252}
253
7c7a5390
CL
254/* Invalidate all memory regions that were registered for "req".
255 *
256 * Sleeps until it is safe for the host CPU to access the
257 * previously mapped memory regions.
9d6b0409
CL
258 *
259 * Caller ensures that req->rl_registered is not empty.
7c7a5390
CL
260 */
261static void
262fmr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
263{
9d6b0409 264 struct rpcrdma_mw *mw, *tmp;
7c7a5390
CL
265 LIST_HEAD(unmap_list);
266 int rc;
267
268 dprintk("RPC: %s: req %p\n", __func__, req);
269
270 /* ORDER: Invalidate all of the req's MRs first
271 *
272 * ib_unmap_fmr() is slow, so use a single call instead
505bbe64 273 * of one call per mapped FMR.
7c7a5390 274 */
9d6b0409 275 list_for_each_entry(mw, &req->rl_registered, mw_list)
88975ebe 276 list_add_tail(&mw->fmr.fm_mr->list, &unmap_list);
7c7a5390
CL
277 rc = ib_unmap_fmr(&unmap_list);
278 if (rc)
505bbe64 279 goto out_reset;
7c7a5390
CL
280
281 /* ORDER: Now DMA unmap all of the req's MRs, and return
282 * them to the free MW list.
283 */
9d6b0409
CL
284 list_for_each_entry_safe(mw, tmp, &req->rl_registered, mw_list) {
285 list_del_init(&mw->mw_list);
88975ebe 286 list_del_init(&mw->fmr.fm_mr->list);
505bbe64
CL
287 ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
288 mw->mw_sg, mw->mw_nents, mw->mw_dir);
289 rpcrdma_put_mw(r_xprt, mw);
7c7a5390
CL
290 }
291
505bbe64
CL
292 return;
293
294out_reset:
295 pr_err("rpcrdma: ib_unmap_fmr failed (%i)\n", rc);
296
9d6b0409 297 list_for_each_entry_safe(mw, tmp, &req->rl_registered, mw_list) {
505bbe64
CL
298 list_del_init(&mw->fmr.fm_mr->list);
299 fmr_op_recover_mr(mw);
505bbe64 300 }
7c7a5390
CL
301}
302
ead3f26e
CL
303/* Use a slow, safe mechanism to invalidate all memory regions
304 * that were registered for "req".
ead3f26e
CL
305 */
306static void
307fmr_op_unmap_safe(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
308 bool sync)
309{
ead3f26e 310 struct rpcrdma_mw *mw;
ead3f26e 311
9d6b0409
CL
312 while (!list_empty(&req->rl_registered)) {
313 mw = list_first_entry(&req->rl_registered,
314 struct rpcrdma_mw, mw_list);
315 list_del_init(&mw->mw_list);
ead3f26e 316
fcdfb968 317 if (sync)
505bbe64 318 fmr_op_recover_mr(mw);
fcdfb968 319 else
505bbe64 320 rpcrdma_defer_mr_recovery(mw);
ead3f26e
CL
321 }
322}
323
a0ce85f5 324const struct rpcrdma_memreg_ops rpcrdma_fmr_memreg_ops = {
9c1b4d77 325 .ro_map = fmr_op_map,
7c7a5390 326 .ro_unmap_sync = fmr_op_unmap_sync,
ead3f26e 327 .ro_unmap_safe = fmr_op_unmap_safe,
505bbe64 328 .ro_recover_mr = fmr_op_recover_mr,
3968cb58 329 .ro_open = fmr_op_open,
1c9351ee 330 .ro_maxpages = fmr_op_maxpages,
e2ac236c
CL
331 .ro_init_mr = fmr_op_init_mr,
332 .ro_release_mr = fmr_op_release_mr,
a0ce85f5
CL
333 .ro_displayname = "fmr",
334};