Commit | Line | Data |
---|---|---|
f58851e6 TT |
1 | /* |
2 | * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the BSD-type | |
8 | * license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or without | |
11 | * modification, are permitted provided that the following conditions | |
12 | * are met: | |
13 | * | |
14 | * Redistributions of source code must retain the above copyright | |
15 | * notice, this list of conditions and the following disclaimer. | |
16 | * | |
17 | * Redistributions in binary form must reproduce the above | |
18 | * copyright notice, this list of conditions and the following | |
19 | * disclaimer in the documentation and/or other materials provided | |
20 | * with the distribution. | |
21 | * | |
22 | * Neither the name of the Network Appliance, Inc. nor the names of | |
23 | * its contributors may be used to endorse or promote products | |
24 | * derived from this software without specific prior written | |
25 | * permission. | |
26 | * | |
27 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
28 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
29 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
30 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
31 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
32 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
33 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
34 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
35 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
36 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
37 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
38 | */ | |
39 | ||
40 | #ifndef _LINUX_SUNRPC_XPRT_RDMA_H | |
41 | #define _LINUX_SUNRPC_XPRT_RDMA_H | |
42 | ||
43 | #include <linux/wait.h> /* wait_queue_head_t, etc */ | |
44 | #include <linux/spinlock.h> /* spinlock_t, etc */ | |
60063497 | 45 | #include <linux/atomic.h> /* atomic_t, etc */ |
254f91e2 | 46 | #include <linux/workqueue.h> /* struct work_struct */ |
f58851e6 TT |
47 | |
48 | #include <rdma/rdma_cm.h> /* RDMA connection api */ | |
49 | #include <rdma/ib_verbs.h> /* RDMA verbs api */ | |
50 | ||
51 | #include <linux/sunrpc/clnt.h> /* rpc_xprt */ | |
52 | #include <linux/sunrpc/rpc_rdma.h> /* RPC/RDMA protocol */ | |
53 | #include <linux/sunrpc/xprtrdma.h> /* xprt parameters */ | |
54 | ||
5675add3 TT |
55 | #define RDMA_RESOLVE_TIMEOUT (5000) /* 5 seconds */ |
56 | #define RDMA_CONNECT_RETRY_MAX (2) /* retries if no listener backlog */ | |
57 | ||
5d252f90 CL |
58 | #define RPCRDMA_BIND_TO (60U * HZ) |
59 | #define RPCRDMA_INIT_REEST_TO (5U * HZ) | |
60 | #define RPCRDMA_MAX_REEST_TO (30U * HZ) | |
61 | #define RPCRDMA_IDLE_DISC_TO (5U * 60 * HZ) | |
62 | ||
f58851e6 TT |
63 | /* |
64 | * Interface Adapter -- one per transport instance | |
65 | */ | |
66 | struct rpcrdma_ia { | |
a0ce85f5 | 67 | const struct rpcrdma_memreg_ops *ri_ops; |
73806c88 | 68 | rwlock_t ri_qplock; |
89e0d112 | 69 | struct ib_device *ri_device; |
f58851e6 TT |
70 | struct rdma_cm_id *ri_id; |
71 | struct ib_pd *ri_pd; | |
d1ed857e | 72 | struct ib_mr *ri_dma_mr; |
f58851e6 TT |
73 | struct completion ri_done; |
74 | int ri_async_rc; | |
0fc6c4e7 | 75 | unsigned int ri_max_frmr_depth; |
ce1ab9ab CL |
76 | struct ib_qp_attr ri_qp_attr; |
77 | struct ib_qp_init_attr ri_qp_init_attr; | |
f58851e6 TT |
78 | }; |
79 | ||
80 | /* | |
81 | * RDMA Endpoint -- one per transport instance | |
82 | */ | |
83 | ||
84 | struct rpcrdma_ep { | |
85 | atomic_t rep_cqcount; | |
86 | int rep_cqinit; | |
87 | int rep_connected; | |
f58851e6 TT |
88 | struct ib_qp_init_attr rep_attr; |
89 | wait_queue_head_t rep_connect_wait; | |
f58851e6 TT |
90 | struct rdma_conn_param rep_remote_cma; |
91 | struct sockaddr_storage rep_remote_addr; | |
254f91e2 | 92 | struct delayed_work rep_connect_worker; |
f58851e6 TT |
93 | }; |
94 | ||
95 | #define INIT_CQCOUNT(ep) atomic_set(&(ep)->rep_cqcount, (ep)->rep_cqinit) | |
96 | #define DECR_CQCOUNT(ep) atomic_sub_return(1, &(ep)->rep_cqcount) | |
97 | ||
124fa17d CL |
98 | /* Pre-allocate extra Work Requests for handling backward receives |
99 | * and sends. This is a fixed value because the Work Queues are | |
100 | * allocated when the forward channel is set up. | |
101 | */ | |
102 | #if defined(CONFIG_SUNRPC_BACKCHANNEL) | |
103 | #define RPCRDMA_BACKWARD_WRS (8) | |
104 | #else | |
105 | #define RPCRDMA_BACKWARD_WRS (0) | |
106 | #endif | |
107 | ||
9128c3e7 CL |
108 | /* Registered buffer -- registered kmalloc'd memory for RDMA SEND/RECV |
109 | * | |
110 | * The below structure appears at the front of a large region of kmalloc'd | |
111 | * memory, which always starts on a good alignment boundary. | |
112 | */ | |
113 | ||
114 | struct rpcrdma_regbuf { | |
115 | size_t rg_size; | |
116 | struct rpcrdma_req *rg_owner; | |
9128c3e7 CL |
117 | struct ib_sge rg_iov; |
118 | __be32 rg_base[0] __attribute__ ((aligned(256))); | |
119 | }; | |
120 | ||
121 | static inline u64 | |
122 | rdmab_addr(struct rpcrdma_regbuf *rb) | |
123 | { | |
124 | return rb->rg_iov.addr; | |
125 | } | |
126 | ||
127 | static inline u32 | |
128 | rdmab_length(struct rpcrdma_regbuf *rb) | |
129 | { | |
130 | return rb->rg_iov.length; | |
131 | } | |
132 | ||
133 | static inline u32 | |
134 | rdmab_lkey(struct rpcrdma_regbuf *rb) | |
135 | { | |
136 | return rb->rg_iov.lkey; | |
137 | } | |
138 | ||
139 | static inline struct rpcrdma_msg * | |
140 | rdmab_to_msg(struct rpcrdma_regbuf *rb) | |
141 | { | |
142 | return (struct rpcrdma_msg *)rb->rg_base; | |
143 | } | |
144 | ||
5d252f90 CL |
145 | #define RPCRDMA_DEF_GFP (GFP_NOIO | __GFP_NOWARN) |
146 | ||
94931746 CL |
147 | /* To ensure a transport can always make forward progress, |
148 | * the number of RDMA segments allowed in header chunk lists | |
149 | * is capped at 8. This prevents less-capable devices and | |
150 | * memory registrations from overrunning the Send buffer | |
151 | * while building chunk lists. | |
152 | * | |
153 | * Elements of the Read list take up more room than the | |
154 | * Write list or Reply chunk. 8 read segments means the Read | |
155 | * list (or Write list or Reply chunk) cannot consume more | |
156 | * than | |
157 | * | |
158 | * ((8 + 2) * read segment size) + 1 XDR words, or 244 bytes. | |
159 | * | |
160 | * And the fixed part of the header is another 24 bytes. | |
161 | * | |
162 | * The smallest inline threshold is 1024 bytes, ensuring that | |
163 | * at least 750 bytes are available for RPC messages. | |
164 | */ | |
165 | #define RPCRDMA_MAX_HDR_SEGS (8) | |
166 | ||
f58851e6 TT |
167 | /* |
168 | * struct rpcrdma_rep -- this structure encapsulates state required to recv | |
169 | * and complete a reply, asychronously. It needs several pieces of | |
170 | * state: | |
171 | * o recv buffer (posted to provider) | |
172 | * o ib_sge (also donated to provider) | |
173 | * o status of reply (length, success or not) | |
174 | * o bookkeeping state to get run by tasklet (list, etc) | |
175 | * | |
176 | * These are allocated during initialization, per-transport instance; | |
177 | * however, the tasklet execution list itself is global, as it should | |
178 | * always be pretty short. | |
179 | * | |
180 | * N of these are associated with a transport instance, and stored in | |
181 | * struct rpcrdma_buffer. N is the max number of outstanding requests. | |
182 | */ | |
183 | ||
864be126 | 184 | #define RPCRDMA_MAX_DATA_SEGS ((1 * 1024 * 1024) / PAGE_SIZE) |
f58851e6 | 185 | #define RPCRDMA_MAX_SEGS (RPCRDMA_MAX_DATA_SEGS + 2) /* head+tail = 2 */ |
f58851e6 TT |
186 | |
187 | struct rpcrdma_buffer; | |
188 | ||
189 | struct rpcrdma_rep { | |
552bf225 | 190 | struct ib_cqe rr_cqe; |
6b1184cd | 191 | unsigned int rr_len; |
89e0d112 | 192 | struct ib_device *rr_device; |
fed171b3 | 193 | struct rpcrdma_xprt *rr_rxprt; |
fe97b47c | 194 | struct work_struct rr_work; |
6b1184cd CL |
195 | struct list_head rr_list; |
196 | struct rpcrdma_regbuf *rr_rdmabuf; | |
f58851e6 TT |
197 | }; |
198 | ||
b0e178a2 CL |
199 | #define RPCRDMA_BAD_LEN (~0U) |
200 | ||
0dbb4108 CL |
201 | /* |
202 | * struct rpcrdma_mw - external memory region metadata | |
203 | * | |
204 | * An external memory region is any buffer or page that is registered | |
205 | * on the fly (ie, not pre-registered). | |
206 | * | |
3111d72c | 207 | * Each rpcrdma_buffer has a list of free MWs anchored in rb_mws. During |
0dbb4108 CL |
208 | * call_allocate, rpcrdma_buffer_get() assigns one to each segment in |
209 | * an rpcrdma_req. Then rpcrdma_register_external() grabs these to keep | |
210 | * track of registration metadata while each RPC is pending. | |
211 | * rpcrdma_deregister_external() uses this metadata to unmap and | |
212 | * release these resources when an RPC is complete. | |
213 | */ | |
214 | enum rpcrdma_frmr_state { | |
215 | FRMR_IS_INVALID, /* ready to be used */ | |
216 | FRMR_IS_VALID, /* in use */ | |
9f9d802a | 217 | FRMR_IS_STALE, /* failed completion */ |
0dbb4108 CL |
218 | }; |
219 | ||
220 | struct rpcrdma_frmr { | |
4143f34e SG |
221 | struct scatterlist *sg; |
222 | int sg_nents; | |
0dbb4108 | 223 | struct ib_mr *fr_mr; |
2fa8f88d | 224 | struct ib_cqe fr_cqe; |
0dbb4108 | 225 | enum rpcrdma_frmr_state fr_state; |
2fa8f88d | 226 | struct completion fr_linv_done; |
951e721c CL |
227 | struct work_struct fr_work; |
228 | struct rpcrdma_xprt *fr_xprt; | |
3cf4e169 CL |
229 | union { |
230 | struct ib_reg_wr fr_regwr; | |
231 | struct ib_send_wr fr_invwr; | |
232 | }; | |
0dbb4108 CL |
233 | }; |
234 | ||
acb9da7a CL |
235 | struct rpcrdma_fmr { |
236 | struct ib_fmr *fmr; | |
237 | u64 *physaddrs; | |
0dbb4108 CL |
238 | }; |
239 | ||
240 | struct rpcrdma_mw { | |
241 | union { | |
acb9da7a | 242 | struct rpcrdma_fmr fmr; |
0dbb4108 | 243 | struct rpcrdma_frmr frmr; |
c882a655 | 244 | }; |
0dbb4108 | 245 | struct list_head mw_list; |
3111d72c | 246 | struct list_head mw_all; |
0dbb4108 CL |
247 | }; |
248 | ||
f58851e6 TT |
249 | /* |
250 | * struct rpcrdma_req -- structure central to the request/reply sequence. | |
251 | * | |
252 | * N of these are associated with a transport instance, and stored in | |
253 | * struct rpcrdma_buffer. N is the max number of outstanding requests. | |
254 | * | |
255 | * It includes pre-registered buffer memory for send AND recv. | |
256 | * The recv buffer, however, is not owned by this structure, and | |
257 | * is "donated" to the hardware when a recv is posted. When a | |
258 | * reply is handled, the recv buffer used is given back to the | |
259 | * struct rpcrdma_req associated with the request. | |
260 | * | |
261 | * In addition to the basic memory, this structure includes an array | |
262 | * of iovs for send operations. The reason is that the iovs passed to | |
263 | * ib_post_{send,recv} must not be modified until the work request | |
264 | * completes. | |
265 | * | |
266 | * NOTES: | |
267 | * o RPCRDMA_MAX_SEGS is the max number of addressible chunk elements we | |
268 | * marshal. The number needed varies depending on the iov lists that | |
269 | * are passed to us, the memory registration mode we are in, and if | |
270 | * physical addressing is used, the layout. | |
271 | */ | |
272 | ||
273 | struct rpcrdma_mr_seg { /* chunk descriptors */ | |
3eb35810 | 274 | struct rpcrdma_mw *rl_mw; /* registered MR */ |
f58851e6 TT |
275 | u64 mr_base; /* registration result */ |
276 | u32 mr_rkey; /* registration result */ | |
277 | u32 mr_len; /* length of chunk or segment */ | |
278 | int mr_nsegs; /* number of segments in chunk or 0 */ | |
279 | enum dma_data_direction mr_dir; /* segment mapping direction */ | |
280 | dma_addr_t mr_dma; /* segment mapping address */ | |
281 | size_t mr_dmalen; /* segment mapping length */ | |
282 | struct page *mr_page; /* owning page, if any */ | |
283 | char *mr_offset; /* kva if no page, else offset */ | |
284 | }; | |
285 | ||
b3221d6a CL |
286 | #define RPCRDMA_MAX_IOVS (2) |
287 | ||
f58851e6 | 288 | struct rpcrdma_req { |
1e465fd4 | 289 | struct list_head rl_free; |
b3221d6a CL |
290 | unsigned int rl_niovs; |
291 | unsigned int rl_nchunks; | |
292 | unsigned int rl_connect_cookie; | |
293 | struct rpcrdma_buffer *rl_buffer; | |
f58851e6 | 294 | struct rpcrdma_rep *rl_reply;/* holder for reply buffer */ |
b3221d6a CL |
295 | struct ib_sge rl_send_iov[RPCRDMA_MAX_IOVS]; |
296 | struct rpcrdma_regbuf *rl_rdmabuf; | |
297 | struct rpcrdma_regbuf *rl_sendbuf; | |
298 | struct rpcrdma_mr_seg rl_segments[RPCRDMA_MAX_SEGS]; | |
f531a5db | 299 | |
2fa8f88d | 300 | struct ib_cqe rl_cqe; |
f531a5db CL |
301 | struct list_head rl_all; |
302 | bool rl_backchannel; | |
f58851e6 | 303 | }; |
0ca77dc3 CL |
304 | |
305 | static inline struct rpcrdma_req * | |
306 | rpcr_to_rdmar(struct rpc_rqst *rqst) | |
307 | { | |
b625a616 CL |
308 | void *buffer = rqst->rq_buffer; |
309 | struct rpcrdma_regbuf *rb; | |
310 | ||
311 | rb = container_of(buffer, struct rpcrdma_regbuf, rg_base); | |
0ca77dc3 CL |
312 | return rb->rg_owner; |
313 | } | |
f58851e6 TT |
314 | |
315 | /* | |
316 | * struct rpcrdma_buffer -- holds list/queue of pre-registered memory for | |
317 | * inline requests/replies, and client/server credits. | |
318 | * | |
319 | * One of these is associated with a transport instance | |
320 | */ | |
321 | struct rpcrdma_buffer { | |
58d1dcf5 CL |
322 | spinlock_t rb_mwlock; /* protect rb_mws list */ |
323 | struct list_head rb_mws; | |
324 | struct list_head rb_all; | |
325 | char *rb_pool; | |
326 | ||
1e465fd4 CL |
327 | spinlock_t rb_lock; /* protect buf lists */ |
328 | struct list_head rb_send_bufs; | |
329 | struct list_head rb_recv_bufs; | |
58d1dcf5 | 330 | u32 rb_max_requests; |
23826c7a | 331 | atomic_t rb_credits; /* most recent credit grant */ |
f531a5db CL |
332 | |
333 | u32 rb_bc_srv_max_requests; | |
334 | spinlock_t rb_reqslock; /* protect rb_allreqs */ | |
335 | struct list_head rb_allreqs; | |
5d252f90 CL |
336 | |
337 | u32 rb_bc_max_requests; | |
f58851e6 TT |
338 | }; |
339 | #define rdmab_to_ia(b) (&container_of((b), struct rpcrdma_xprt, rx_buf)->rx_ia) | |
340 | ||
341 | /* | |
342 | * Internal structure for transport instance creation. This | |
343 | * exists primarily for modularity. | |
344 | * | |
345 | * This data should be set with mount options | |
346 | */ | |
347 | struct rpcrdma_create_data_internal { | |
348 | struct sockaddr_storage addr; /* RDMA server address */ | |
349 | unsigned int max_requests; /* max requests (slots) in flight */ | |
350 | unsigned int rsize; /* mount rsize - max read hdr+data */ | |
351 | unsigned int wsize; /* mount wsize - max write hdr+data */ | |
352 | unsigned int inline_rsize; /* max non-rdma read data payload */ | |
353 | unsigned int inline_wsize; /* max non-rdma write data payload */ | |
354 | unsigned int padding; /* non-rdma write header padding */ | |
355 | }; | |
356 | ||
357 | #define RPCRDMA_INLINE_READ_THRESHOLD(rq) \ | |
a4f0835c | 358 | (rpcx_to_rdmad(rq->rq_xprt).inline_rsize) |
f58851e6 TT |
359 | |
360 | #define RPCRDMA_INLINE_WRITE_THRESHOLD(rq)\ | |
a4f0835c | 361 | (rpcx_to_rdmad(rq->rq_xprt).inline_wsize) |
f58851e6 TT |
362 | |
363 | #define RPCRDMA_INLINE_PAD_VALUE(rq)\ | |
a4f0835c | 364 | rpcx_to_rdmad(rq->rq_xprt).padding |
f58851e6 TT |
365 | |
366 | /* | |
367 | * Statistics for RPCRDMA | |
368 | */ | |
369 | struct rpcrdma_stats { | |
370 | unsigned long read_chunk_count; | |
371 | unsigned long write_chunk_count; | |
372 | unsigned long reply_chunk_count; | |
373 | ||
374 | unsigned long long total_rdma_request; | |
375 | unsigned long long total_rdma_reply; | |
376 | ||
377 | unsigned long long pullup_copy_count; | |
378 | unsigned long long fixup_copy_count; | |
379 | unsigned long hardway_register_count; | |
380 | unsigned long failed_marshal_count; | |
381 | unsigned long bad_reply_count; | |
860477d1 | 382 | unsigned long nomsg_call_count; |
63cae470 | 383 | unsigned long bcall_count; |
f58851e6 TT |
384 | }; |
385 | ||
a0ce85f5 CL |
386 | /* |
387 | * Per-registration mode operations | |
388 | */ | |
1c9351ee | 389 | struct rpcrdma_xprt; |
a0ce85f5 | 390 | struct rpcrdma_memreg_ops { |
9c1b4d77 CL |
391 | int (*ro_map)(struct rpcrdma_xprt *, |
392 | struct rpcrdma_mr_seg *, int, bool); | |
32d0ceec CL |
393 | void (*ro_unmap_sync)(struct rpcrdma_xprt *, |
394 | struct rpcrdma_req *); | |
6814baea CL |
395 | int (*ro_unmap)(struct rpcrdma_xprt *, |
396 | struct rpcrdma_mr_seg *); | |
3968cb58 CL |
397 | int (*ro_open)(struct rpcrdma_ia *, |
398 | struct rpcrdma_ep *, | |
399 | struct rpcrdma_create_data_internal *); | |
1c9351ee | 400 | size_t (*ro_maxpages)(struct rpcrdma_xprt *); |
91e70e70 | 401 | int (*ro_init)(struct rpcrdma_xprt *); |
4561f347 | 402 | void (*ro_destroy)(struct rpcrdma_buffer *); |
a0ce85f5 CL |
403 | const char *ro_displayname; |
404 | }; | |
405 | ||
406 | extern const struct rpcrdma_memreg_ops rpcrdma_fmr_memreg_ops; | |
407 | extern const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops; | |
408 | extern const struct rpcrdma_memreg_ops rpcrdma_physical_memreg_ops; | |
409 | ||
f58851e6 TT |
410 | /* |
411 | * RPCRDMA transport -- encapsulates the structures above for | |
412 | * integration with RPC. | |
413 | * | |
414 | * The contained structures are embedded, not pointers, | |
415 | * for convenience. This structure need not be visible externally. | |
416 | * | |
417 | * It is allocated and initialized during mount, and released | |
418 | * during unmount. | |
419 | */ | |
420 | struct rpcrdma_xprt { | |
5abefb86 | 421 | struct rpc_xprt rx_xprt; |
f58851e6 TT |
422 | struct rpcrdma_ia rx_ia; |
423 | struct rpcrdma_ep rx_ep; | |
424 | struct rpcrdma_buffer rx_buf; | |
425 | struct rpcrdma_create_data_internal rx_data; | |
5abefb86 | 426 | struct delayed_work rx_connect_worker; |
f58851e6 TT |
427 | struct rpcrdma_stats rx_stats; |
428 | }; | |
429 | ||
5abefb86 | 430 | #define rpcx_to_rdmax(x) container_of(x, struct rpcrdma_xprt, rx_xprt) |
f58851e6 TT |
431 | #define rpcx_to_rdmad(x) (rpcx_to_rdmax(x)->rx_data) |
432 | ||
9191ca3b TT |
433 | /* Setting this to 0 ensures interoperability with early servers. |
434 | * Setting this to 1 enhances certain unaligned read/write performance. | |
435 | * Default is 0, see sysctl entry and rpc_rdma.c rpcrdma_convert_iovs() */ | |
436 | extern int xprt_rdma_pad_optimize; | |
437 | ||
f58851e6 TT |
438 | /* |
439 | * Interface Adapter calls - xprtrdma/verbs.c | |
440 | */ | |
441 | int rpcrdma_ia_open(struct rpcrdma_xprt *, struct sockaddr *, int); | |
442 | void rpcrdma_ia_close(struct rpcrdma_ia *); | |
443 | ||
444 | /* | |
445 | * Endpoint calls - xprtrdma/verbs.c | |
446 | */ | |
447 | int rpcrdma_ep_create(struct rpcrdma_ep *, struct rpcrdma_ia *, | |
448 | struct rpcrdma_create_data_internal *); | |
7f1d5419 | 449 | void rpcrdma_ep_destroy(struct rpcrdma_ep *, struct rpcrdma_ia *); |
f58851e6 | 450 | int rpcrdma_ep_connect(struct rpcrdma_ep *, struct rpcrdma_ia *); |
282191cb | 451 | void rpcrdma_ep_disconnect(struct rpcrdma_ep *, struct rpcrdma_ia *); |
f58851e6 TT |
452 | |
453 | int rpcrdma_ep_post(struct rpcrdma_ia *, struct rpcrdma_ep *, | |
454 | struct rpcrdma_req *); | |
455 | int rpcrdma_ep_post_recv(struct rpcrdma_ia *, struct rpcrdma_ep *, | |
456 | struct rpcrdma_rep *); | |
457 | ||
458 | /* | |
459 | * Buffer calls - xprtrdma/verbs.c | |
460 | */ | |
f531a5db CL |
461 | struct rpcrdma_req *rpcrdma_create_req(struct rpcrdma_xprt *); |
462 | struct rpcrdma_rep *rpcrdma_create_rep(struct rpcrdma_xprt *); | |
463 | void rpcrdma_destroy_req(struct rpcrdma_ia *, struct rpcrdma_req *); | |
ac920d04 | 464 | int rpcrdma_buffer_create(struct rpcrdma_xprt *); |
f58851e6 TT |
465 | void rpcrdma_buffer_destroy(struct rpcrdma_buffer *); |
466 | ||
346aa66b CL |
467 | struct rpcrdma_mw *rpcrdma_get_mw(struct rpcrdma_xprt *); |
468 | void rpcrdma_put_mw(struct rpcrdma_xprt *, struct rpcrdma_mw *); | |
f58851e6 TT |
469 | struct rpcrdma_req *rpcrdma_buffer_get(struct rpcrdma_buffer *); |
470 | void rpcrdma_buffer_put(struct rpcrdma_req *); | |
471 | void rpcrdma_recv_buffer_get(struct rpcrdma_req *); | |
472 | void rpcrdma_recv_buffer_put(struct rpcrdma_rep *); | |
473 | ||
9128c3e7 CL |
474 | struct rpcrdma_regbuf *rpcrdma_alloc_regbuf(struct rpcrdma_ia *, |
475 | size_t, gfp_t); | |
476 | void rpcrdma_free_regbuf(struct rpcrdma_ia *, | |
477 | struct rpcrdma_regbuf *); | |
478 | ||
f531a5db | 479 | int rpcrdma_ep_post_extra_recv(struct rpcrdma_xprt *, unsigned int); |
d654788e | 480 | |
951e721c CL |
481 | int frwr_alloc_recovery_wq(void); |
482 | void frwr_destroy_recovery_wq(void); | |
483 | ||
fe97b47c CL |
484 | int rpcrdma_alloc_wq(void); |
485 | void rpcrdma_destroy_wq(void); | |
486 | ||
d654788e CL |
487 | /* |
488 | * Wrappers for chunk registration, shared by read/write chunk code. | |
489 | */ | |
490 | ||
491 | void rpcrdma_mapping_error(struct rpcrdma_mr_seg *); | |
492 | ||
493 | static inline enum dma_data_direction | |
494 | rpcrdma_data_dir(bool writing) | |
495 | { | |
496 | return writing ? DMA_FROM_DEVICE : DMA_TO_DEVICE; | |
497 | } | |
498 | ||
499 | static inline void | |
500 | rpcrdma_map_one(struct ib_device *device, struct rpcrdma_mr_seg *seg, | |
501 | enum dma_data_direction direction) | |
502 | { | |
503 | seg->mr_dir = direction; | |
504 | seg->mr_dmalen = seg->mr_len; | |
505 | ||
506 | if (seg->mr_page) | |
507 | seg->mr_dma = ib_dma_map_page(device, | |
508 | seg->mr_page, offset_in_page(seg->mr_offset), | |
509 | seg->mr_dmalen, seg->mr_dir); | |
510 | else | |
511 | seg->mr_dma = ib_dma_map_single(device, | |
512 | seg->mr_offset, | |
513 | seg->mr_dmalen, seg->mr_dir); | |
514 | ||
515 | if (ib_dma_mapping_error(device, seg->mr_dma)) | |
516 | rpcrdma_mapping_error(seg); | |
517 | } | |
518 | ||
519 | static inline void | |
520 | rpcrdma_unmap_one(struct ib_device *device, struct rpcrdma_mr_seg *seg) | |
521 | { | |
522 | if (seg->mr_page) | |
523 | ib_dma_unmap_page(device, | |
524 | seg->mr_dma, seg->mr_dmalen, seg->mr_dir); | |
525 | else | |
526 | ib_dma_unmap_single(device, | |
527 | seg->mr_dma, seg->mr_dmalen, seg->mr_dir); | |
528 | } | |
1c9351ee | 529 | |
f58851e6 TT |
530 | /* |
531 | * RPC/RDMA connection management calls - xprtrdma/rpc_rdma.c | |
532 | */ | |
254f91e2 | 533 | void rpcrdma_connect_worker(struct work_struct *); |
f58851e6 TT |
534 | void rpcrdma_conn_func(struct rpcrdma_ep *); |
535 | void rpcrdma_reply_handler(struct rpcrdma_rep *); | |
536 | ||
537 | /* | |
538 | * RPC/RDMA protocol calls - xprtrdma/rpc_rdma.c | |
539 | */ | |
540 | int rpcrdma_marshal_req(struct rpc_rqst *); | |
541 | ||
ffe1f0df CL |
542 | /* RPC/RDMA module init - xprtrdma/transport.c |
543 | */ | |
5d252f90 CL |
544 | extern unsigned int xprt_rdma_max_inline_read; |
545 | void xprt_rdma_format_addresses(struct rpc_xprt *xprt, struct sockaddr *sap); | |
546 | void xprt_rdma_free_addresses(struct rpc_xprt *xprt); | |
547 | void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq); | |
ffe1f0df CL |
548 | int xprt_rdma_init(void); |
549 | void xprt_rdma_cleanup(void); | |
550 | ||
f531a5db CL |
551 | /* Backchannel calls - xprtrdma/backchannel.c |
552 | */ | |
553 | #if defined(CONFIG_SUNRPC_BACKCHANNEL) | |
554 | int xprt_rdma_bc_setup(struct rpc_xprt *, unsigned int); | |
76566773 | 555 | int xprt_rdma_bc_up(struct svc_serv *, struct net *); |
6b26cc8c | 556 | size_t xprt_rdma_bc_maxpayload(struct rpc_xprt *); |
f531a5db | 557 | int rpcrdma_bc_post_recv(struct rpcrdma_xprt *, unsigned int); |
63cae470 | 558 | void rpcrdma_bc_receive_call(struct rpcrdma_xprt *, struct rpcrdma_rep *); |
83128a60 | 559 | int rpcrdma_bc_marshal_reply(struct rpc_rqst *); |
f531a5db CL |
560 | void xprt_rdma_bc_free_rqst(struct rpc_rqst *); |
561 | void xprt_rdma_bc_destroy(struct rpc_xprt *, unsigned int); | |
562 | #endif /* CONFIG_SUNRPC_BACKCHANNEL */ | |
563 | ||
5d252f90 | 564 | extern struct xprt_class xprt_rdma_bc; |
cec56c8f | 565 | |
f58851e6 | 566 | #endif /* _LINUX_SUNRPC_XPRT_RDMA_H */ |