RPC/RDMA: fix connect/reconnect resource leak.
[GitHub/moto-9609/android_kernel_motorola_exynos9610.git] / net / sunrpc / xprtrdma / xprt_rdma.h
CommitLineData
f58851e6
TT
1/*
2 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the BSD-type
8 * license below:
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 *
14 * Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 *
17 * Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials provided
20 * with the distribution.
21 *
22 * Neither the name of the Network Appliance, Inc. nor the names of
23 * its contributors may be used to endorse or promote products
24 * derived from this software without specific prior written
25 * permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 */
39
40#ifndef _LINUX_SUNRPC_XPRT_RDMA_H
41#define _LINUX_SUNRPC_XPRT_RDMA_H
42
43#include <linux/wait.h> /* wait_queue_head_t, etc */
44#include <linux/spinlock.h> /* spinlock_t, etc */
45#include <asm/atomic.h> /* atomic_t, etc */
46
47#include <rdma/rdma_cm.h> /* RDMA connection api */
48#include <rdma/ib_verbs.h> /* RDMA verbs api */
49
50#include <linux/sunrpc/clnt.h> /* rpc_xprt */
51#include <linux/sunrpc/rpc_rdma.h> /* RPC/RDMA protocol */
52#include <linux/sunrpc/xprtrdma.h> /* xprt parameters */
53
54/*
55 * Interface Adapter -- one per transport instance
56 */
57struct rpcrdma_ia {
58 struct rdma_cm_id *ri_id;
59 struct ib_pd *ri_pd;
60 struct ib_mr *ri_bind_mem;
fe9053b3
TT
61 u32 ri_dma_lkey;
62 int ri_have_dma_lkey;
f58851e6
TT
63 struct completion ri_done;
64 int ri_async_rc;
65 enum rpcrdma_memreg ri_memreg_strategy;
66};
67
68/*
69 * RDMA Endpoint -- one per transport instance
70 */
71
72struct rpcrdma_ep {
73 atomic_t rep_cqcount;
74 int rep_cqinit;
75 int rep_connected;
76 struct rpcrdma_ia *rep_ia;
77 struct ib_cq *rep_cq;
78 struct ib_qp_init_attr rep_attr;
79 wait_queue_head_t rep_connect_wait;
80 struct ib_sge rep_pad; /* holds zeroed pad */
81 struct ib_mr *rep_pad_mr; /* holds zeroed pad */
82 void (*rep_func)(struct rpcrdma_ep *);
83 struct rpc_xprt *rep_xprt; /* for rep_func */
84 struct rdma_conn_param rep_remote_cma;
85 struct sockaddr_storage rep_remote_addr;
86};
87
88#define INIT_CQCOUNT(ep) atomic_set(&(ep)->rep_cqcount, (ep)->rep_cqinit)
89#define DECR_CQCOUNT(ep) atomic_sub_return(1, &(ep)->rep_cqcount)
90
91/*
92 * struct rpcrdma_rep -- this structure encapsulates state required to recv
93 * and complete a reply, asychronously. It needs several pieces of
94 * state:
95 * o recv buffer (posted to provider)
96 * o ib_sge (also donated to provider)
97 * o status of reply (length, success or not)
98 * o bookkeeping state to get run by tasklet (list, etc)
99 *
100 * These are allocated during initialization, per-transport instance;
101 * however, the tasklet execution list itself is global, as it should
102 * always be pretty short.
103 *
104 * N of these are associated with a transport instance, and stored in
105 * struct rpcrdma_buffer. N is the max number of outstanding requests.
106 */
107
108/* temporary static scatter/gather max */
109#define RPCRDMA_MAX_DATA_SEGS (8) /* max scatter/gather */
110#define RPCRDMA_MAX_SEGS (RPCRDMA_MAX_DATA_SEGS + 2) /* head+tail = 2 */
111#define MAX_RPCRDMAHDR (\
112 /* max supported RPC/RDMA header */ \
113 sizeof(struct rpcrdma_msg) + (2 * sizeof(u32)) + \
114 (sizeof(struct rpcrdma_read_chunk) * RPCRDMA_MAX_SEGS) + sizeof(u32))
115
116struct rpcrdma_buffer;
117
118struct rpcrdma_rep {
119 unsigned int rr_len; /* actual received reply length */
120 struct rpcrdma_buffer *rr_buffer; /* home base for this structure */
121 struct rpc_xprt *rr_xprt; /* needed for request/reply matching */
122 void (*rr_func)(struct rpcrdma_rep *);/* called by tasklet in softint */
123 struct list_head rr_list; /* tasklet list */
124 wait_queue_head_t rr_unbind; /* optional unbind wait */
125 struct ib_sge rr_iov; /* for posting */
126 struct ib_mr *rr_handle; /* handle for mem in rr_iov */
127 char rr_base[MAX_RPCRDMAHDR]; /* minimal inline receive buffer */
128};
129
130/*
131 * struct rpcrdma_req -- structure central to the request/reply sequence.
132 *
133 * N of these are associated with a transport instance, and stored in
134 * struct rpcrdma_buffer. N is the max number of outstanding requests.
135 *
136 * It includes pre-registered buffer memory for send AND recv.
137 * The recv buffer, however, is not owned by this structure, and
138 * is "donated" to the hardware when a recv is posted. When a
139 * reply is handled, the recv buffer used is given back to the
140 * struct rpcrdma_req associated with the request.
141 *
142 * In addition to the basic memory, this structure includes an array
143 * of iovs for send operations. The reason is that the iovs passed to
144 * ib_post_{send,recv} must not be modified until the work request
145 * completes.
146 *
147 * NOTES:
148 * o RPCRDMA_MAX_SEGS is the max number of addressible chunk elements we
149 * marshal. The number needed varies depending on the iov lists that
150 * are passed to us, the memory registration mode we are in, and if
151 * physical addressing is used, the layout.
152 */
153
154struct rpcrdma_mr_seg { /* chunk descriptors */
155 union { /* chunk memory handles */
156 struct ib_mr *rl_mr; /* if registered directly */
157 struct rpcrdma_mw { /* if registered from region */
158 union {
159 struct ib_mw *mw;
160 struct ib_fmr *fmr;
fe9053b3
TT
161 struct {
162 struct ib_fast_reg_page_list *fr_pgl;
163 struct ib_mr *fr_mr;
164 } frmr;
f58851e6
TT
165 } r;
166 struct list_head mw_list;
167 } *rl_mw;
168 } mr_chunk;
169 u64 mr_base; /* registration result */
170 u32 mr_rkey; /* registration result */
171 u32 mr_len; /* length of chunk or segment */
172 int mr_nsegs; /* number of segments in chunk or 0 */
173 enum dma_data_direction mr_dir; /* segment mapping direction */
174 dma_addr_t mr_dma; /* segment mapping address */
175 size_t mr_dmalen; /* segment mapping length */
176 struct page *mr_page; /* owning page, if any */
177 char *mr_offset; /* kva if no page, else offset */
178};
179
180struct rpcrdma_req {
181 size_t rl_size; /* actual length of buffer */
182 unsigned int rl_niovs; /* 0, 2 or 4 */
183 unsigned int rl_nchunks; /* non-zero if chunks */
575448bd 184 unsigned int rl_connect_cookie; /* retry detection */
f58851e6
TT
185 struct rpcrdma_buffer *rl_buffer; /* home base for this structure */
186 struct rpcrdma_rep *rl_reply;/* holder for reply buffer */
187 struct rpcrdma_mr_seg rl_segments[RPCRDMA_MAX_SEGS];/* chunk segments */
188 struct ib_sge rl_send_iov[4]; /* for active requests */
189 struct ib_sge rl_iov; /* for posting */
190 struct ib_mr *rl_handle; /* handle for mem in rl_iov */
191 char rl_base[MAX_RPCRDMAHDR]; /* start of actual buffer */
192 __u32 rl_xdr_buf[0]; /* start of returned rpc rq_buffer */
193};
194#define rpcr_to_rdmar(r) \
195 container_of((r)->rq_buffer, struct rpcrdma_req, rl_xdr_buf[0])
196
197/*
198 * struct rpcrdma_buffer -- holds list/queue of pre-registered memory for
199 * inline requests/replies, and client/server credits.
200 *
201 * One of these is associated with a transport instance
202 */
203struct rpcrdma_buffer {
204 spinlock_t rb_lock; /* protects indexes */
205 atomic_t rb_credits; /* most recent server credits */
206 unsigned long rb_cwndscale; /* cached framework rpc_cwndscale */
207 int rb_max_requests;/* client max requests */
fe9053b3 208 struct list_head rb_mws; /* optional memory windows/fmrs/frmrs */
f58851e6
TT
209 int rb_send_index;
210 struct rpcrdma_req **rb_send_bufs;
211 int rb_recv_index;
212 struct rpcrdma_rep **rb_recv_bufs;
213 char *rb_pool;
214};
215#define rdmab_to_ia(b) (&container_of((b), struct rpcrdma_xprt, rx_buf)->rx_ia)
216
217/*
218 * Internal structure for transport instance creation. This
219 * exists primarily for modularity.
220 *
221 * This data should be set with mount options
222 */
223struct rpcrdma_create_data_internal {
224 struct sockaddr_storage addr; /* RDMA server address */
225 unsigned int max_requests; /* max requests (slots) in flight */
226 unsigned int rsize; /* mount rsize - max read hdr+data */
227 unsigned int wsize; /* mount wsize - max write hdr+data */
228 unsigned int inline_rsize; /* max non-rdma read data payload */
229 unsigned int inline_wsize; /* max non-rdma write data payload */
230 unsigned int padding; /* non-rdma write header padding */
231};
232
233#define RPCRDMA_INLINE_READ_THRESHOLD(rq) \
234 (rpcx_to_rdmad(rq->rq_task->tk_xprt).inline_rsize)
235
236#define RPCRDMA_INLINE_WRITE_THRESHOLD(rq)\
237 (rpcx_to_rdmad(rq->rq_task->tk_xprt).inline_wsize)
238
239#define RPCRDMA_INLINE_PAD_VALUE(rq)\
240 rpcx_to_rdmad(rq->rq_task->tk_xprt).padding
241
242/*
243 * Statistics for RPCRDMA
244 */
245struct rpcrdma_stats {
246 unsigned long read_chunk_count;
247 unsigned long write_chunk_count;
248 unsigned long reply_chunk_count;
249
250 unsigned long long total_rdma_request;
251 unsigned long long total_rdma_reply;
252
253 unsigned long long pullup_copy_count;
254 unsigned long long fixup_copy_count;
255 unsigned long hardway_register_count;
256 unsigned long failed_marshal_count;
257 unsigned long bad_reply_count;
258};
259
260/*
261 * RPCRDMA transport -- encapsulates the structures above for
262 * integration with RPC.
263 *
264 * The contained structures are embedded, not pointers,
265 * for convenience. This structure need not be visible externally.
266 *
267 * It is allocated and initialized during mount, and released
268 * during unmount.
269 */
270struct rpcrdma_xprt {
271 struct rpc_xprt xprt;
272 struct rpcrdma_ia rx_ia;
273 struct rpcrdma_ep rx_ep;
274 struct rpcrdma_buffer rx_buf;
275 struct rpcrdma_create_data_internal rx_data;
276 struct delayed_work rdma_connect;
277 struct rpcrdma_stats rx_stats;
278};
279
280#define rpcx_to_rdmax(x) container_of(x, struct rpcrdma_xprt, xprt)
281#define rpcx_to_rdmad(x) (rpcx_to_rdmax(x)->rx_data)
282
9191ca3b
TT
283/* Setting this to 0 ensures interoperability with early servers.
284 * Setting this to 1 enhances certain unaligned read/write performance.
285 * Default is 0, see sysctl entry and rpc_rdma.c rpcrdma_convert_iovs() */
286extern int xprt_rdma_pad_optimize;
287
f58851e6
TT
288/*
289 * Interface Adapter calls - xprtrdma/verbs.c
290 */
291int rpcrdma_ia_open(struct rpcrdma_xprt *, struct sockaddr *, int);
292void rpcrdma_ia_close(struct rpcrdma_ia *);
293
294/*
295 * Endpoint calls - xprtrdma/verbs.c
296 */
297int rpcrdma_ep_create(struct rpcrdma_ep *, struct rpcrdma_ia *,
298 struct rpcrdma_create_data_internal *);
299int rpcrdma_ep_destroy(struct rpcrdma_ep *, struct rpcrdma_ia *);
300int rpcrdma_ep_connect(struct rpcrdma_ep *, struct rpcrdma_ia *);
301int rpcrdma_ep_disconnect(struct rpcrdma_ep *, struct rpcrdma_ia *);
302
303int rpcrdma_ep_post(struct rpcrdma_ia *, struct rpcrdma_ep *,
304 struct rpcrdma_req *);
305int rpcrdma_ep_post_recv(struct rpcrdma_ia *, struct rpcrdma_ep *,
306 struct rpcrdma_rep *);
307
308/*
309 * Buffer calls - xprtrdma/verbs.c
310 */
311int rpcrdma_buffer_create(struct rpcrdma_buffer *, struct rpcrdma_ep *,
312 struct rpcrdma_ia *,
313 struct rpcrdma_create_data_internal *);
314void rpcrdma_buffer_destroy(struct rpcrdma_buffer *);
315
316struct rpcrdma_req *rpcrdma_buffer_get(struct rpcrdma_buffer *);
317void rpcrdma_buffer_put(struct rpcrdma_req *);
318void rpcrdma_recv_buffer_get(struct rpcrdma_req *);
319void rpcrdma_recv_buffer_put(struct rpcrdma_rep *);
320
321int rpcrdma_register_internal(struct rpcrdma_ia *, void *, int,
322 struct ib_mr **, struct ib_sge *);
323int rpcrdma_deregister_internal(struct rpcrdma_ia *,
324 struct ib_mr *, struct ib_sge *);
325
326int rpcrdma_register_external(struct rpcrdma_mr_seg *,
327 int, int, struct rpcrdma_xprt *);
328int rpcrdma_deregister_external(struct rpcrdma_mr_seg *,
329 struct rpcrdma_xprt *, void *);
330
331/*
332 * RPC/RDMA connection management calls - xprtrdma/rpc_rdma.c
333 */
334void rpcrdma_conn_func(struct rpcrdma_ep *);
335void rpcrdma_reply_handler(struct rpcrdma_rep *);
336
337/*
338 * RPC/RDMA protocol calls - xprtrdma/rpc_rdma.c
339 */
340int rpcrdma_marshal_req(struct rpc_rqst *);
341
342#endif /* _LINUX_SUNRPC_XPRT_RDMA_H */