Commit | Line | Data |
---|---|---|
cfdda9d7 SW |
1 | /* |
2 | * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved. | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | */ | |
32 | #include "iw_cxgb4.h" | |
33 | ||
34 | static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, | |
35 | struct c4iw_dev_ucontext *uctx) | |
36 | { | |
37 | /* | |
38 | * uP clears EQ contexts when the connection exits rdma mode, | |
39 | * so no need to post a RESET WR for these EQs. | |
40 | */ | |
41 | dma_free_coherent(&(rdev->lldi.pdev->dev), | |
42 | wq->rq.memsize, wq->rq.queue, | |
43 | pci_unmap_addr(&wq->rq, mapping)); | |
44 | dma_free_coherent(&(rdev->lldi.pdev->dev), | |
45 | wq->sq.memsize, wq->sq.queue, | |
46 | pci_unmap_addr(&wq->sq, mapping)); | |
47 | c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size); | |
48 | kfree(wq->rq.sw_rq); | |
49 | kfree(wq->sq.sw_sq); | |
50 | c4iw_put_qpid(rdev, wq->rq.qid, uctx); | |
51 | c4iw_put_qpid(rdev, wq->sq.qid, uctx); | |
52 | return 0; | |
53 | } | |
54 | ||
55 | static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, | |
56 | struct t4_cq *rcq, struct t4_cq *scq, | |
57 | struct c4iw_dev_ucontext *uctx) | |
58 | { | |
59 | int user = (uctx != &rdev->uctx); | |
60 | struct fw_ri_res_wr *res_wr; | |
61 | struct fw_ri_res *res; | |
62 | int wr_len; | |
63 | struct c4iw_wr_wait wr_wait; | |
64 | struct sk_buff *skb; | |
65 | int ret; | |
66 | int eqsize; | |
67 | ||
68 | wq->sq.qid = c4iw_get_qpid(rdev, uctx); | |
69 | if (!wq->sq.qid) | |
70 | return -ENOMEM; | |
71 | ||
72 | wq->rq.qid = c4iw_get_qpid(rdev, uctx); | |
73 | if (!wq->rq.qid) | |
74 | goto err1; | |
75 | ||
76 | if (!user) { | |
77 | wq->sq.sw_sq = kzalloc(wq->sq.size * sizeof *wq->sq.sw_sq, | |
78 | GFP_KERNEL); | |
79 | if (!wq->sq.sw_sq) | |
80 | goto err2; | |
81 | ||
82 | wq->rq.sw_rq = kzalloc(wq->rq.size * sizeof *wq->rq.sw_rq, | |
83 | GFP_KERNEL); | |
84 | if (!wq->rq.sw_rq) | |
85 | goto err3; | |
86 | } | |
87 | ||
88 | /* | |
89 | * RQT must be a power of 2. | |
90 | */ | |
91 | wq->rq.rqt_size = roundup_pow_of_two(wq->rq.size); | |
92 | wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size); | |
93 | if (!wq->rq.rqt_hwaddr) | |
94 | goto err4; | |
95 | ||
96 | wq->sq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev), | |
97 | wq->sq.memsize, &(wq->sq.dma_addr), | |
98 | GFP_KERNEL); | |
99 | if (!wq->sq.queue) | |
100 | goto err5; | |
101 | memset(wq->sq.queue, 0, wq->sq.memsize); | |
102 | pci_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr); | |
103 | ||
104 | wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev), | |
105 | wq->rq.memsize, &(wq->rq.dma_addr), | |
106 | GFP_KERNEL); | |
107 | if (!wq->rq.queue) | |
108 | goto err6; | |
109 | PDBG("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n", | |
110 | __func__, wq->sq.queue, | |
111 | (unsigned long long)virt_to_phys(wq->sq.queue), | |
112 | wq->rq.queue, | |
113 | (unsigned long long)virt_to_phys(wq->rq.queue)); | |
114 | memset(wq->rq.queue, 0, wq->rq.memsize); | |
115 | pci_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr); | |
116 | ||
117 | wq->db = rdev->lldi.db_reg; | |
118 | wq->gts = rdev->lldi.gts_reg; | |
119 | if (user) { | |
120 | wq->sq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) + | |
121 | (wq->sq.qid << rdev->qpshift); | |
122 | wq->sq.udb &= PAGE_MASK; | |
123 | wq->rq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) + | |
124 | (wq->rq.qid << rdev->qpshift); | |
125 | wq->rq.udb &= PAGE_MASK; | |
126 | } | |
127 | wq->rdev = rdev; | |
128 | wq->rq.msn = 1; | |
129 | ||
130 | /* build fw_ri_res_wr */ | |
131 | wr_len = sizeof *res_wr + 2 * sizeof *res; | |
132 | ||
133 | skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL); | |
134 | if (!skb) { | |
135 | ret = -ENOMEM; | |
136 | goto err7; | |
137 | } | |
138 | set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); | |
139 | ||
140 | res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len); | |
141 | memset(res_wr, 0, wr_len); | |
142 | res_wr->op_nres = cpu_to_be32( | |
143 | FW_WR_OP(FW_RI_RES_WR) | | |
144 | V_FW_RI_RES_WR_NRES(2) | | |
145 | FW_WR_COMPL(1)); | |
146 | res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); | |
147 | res_wr->cookie = (u64)&wr_wait; | |
148 | res = res_wr->res; | |
149 | res->u.sqrq.restype = FW_RI_RES_TYPE_SQ; | |
150 | res->u.sqrq.op = FW_RI_RES_OP_WRITE; | |
151 | ||
152 | /* | |
153 | * eqsize is the number of 64B entries plus the status page size. | |
154 | */ | |
155 | eqsize = wq->sq.size * T4_SQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES; | |
156 | ||
157 | res->u.sqrq.fetchszm_to_iqid = cpu_to_be32( | |
158 | V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */ | |
159 | V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */ | |
160 | V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */ | |
161 | V_FW_RI_RES_WR_IQID(scq->cqid)); | |
162 | res->u.sqrq.dcaen_to_eqsize = cpu_to_be32( | |
163 | V_FW_RI_RES_WR_DCAEN(0) | | |
164 | V_FW_RI_RES_WR_DCACPU(0) | | |
165 | V_FW_RI_RES_WR_FBMIN(3) | | |
166 | V_FW_RI_RES_WR_FBMAX(3) | | |
167 | V_FW_RI_RES_WR_CIDXFTHRESHO(0) | | |
168 | V_FW_RI_RES_WR_CIDXFTHRESH(0) | | |
169 | V_FW_RI_RES_WR_EQSIZE(eqsize)); | |
170 | res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid); | |
171 | res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr); | |
172 | res++; | |
173 | res->u.sqrq.restype = FW_RI_RES_TYPE_RQ; | |
174 | res->u.sqrq.op = FW_RI_RES_OP_WRITE; | |
175 | ||
176 | /* | |
177 | * eqsize is the number of 64B entries plus the status page size. | |
178 | */ | |
179 | eqsize = wq->rq.size * T4_RQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES; | |
180 | res->u.sqrq.fetchszm_to_iqid = cpu_to_be32( | |
181 | V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */ | |
182 | V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */ | |
183 | V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */ | |
184 | V_FW_RI_RES_WR_IQID(rcq->cqid)); | |
185 | res->u.sqrq.dcaen_to_eqsize = cpu_to_be32( | |
186 | V_FW_RI_RES_WR_DCAEN(0) | | |
187 | V_FW_RI_RES_WR_DCACPU(0) | | |
188 | V_FW_RI_RES_WR_FBMIN(3) | | |
189 | V_FW_RI_RES_WR_FBMAX(3) | | |
190 | V_FW_RI_RES_WR_CIDXFTHRESHO(0) | | |
191 | V_FW_RI_RES_WR_CIDXFTHRESH(0) | | |
192 | V_FW_RI_RES_WR_EQSIZE(eqsize)); | |
193 | res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid); | |
194 | res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr); | |
195 | ||
196 | c4iw_init_wr_wait(&wr_wait); | |
197 | ||
198 | ret = c4iw_ofld_send(rdev, skb); | |
199 | if (ret) | |
200 | goto err7; | |
201 | wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO); | |
202 | if (!wr_wait.done) { | |
203 | printk(KERN_ERR MOD "Device %s not responding!\n", | |
204 | pci_name(rdev->lldi.pdev)); | |
205 | rdev->flags = T4_FATAL_ERROR; | |
206 | ret = -EIO; | |
207 | } else | |
208 | ret = wr_wait.ret; | |
209 | if (ret) | |
210 | goto err7; | |
211 | ||
212 | PDBG("%s sqid 0x%x rqid 0x%x kdb 0x%p squdb 0x%llx rqudb 0x%llx\n", | |
213 | __func__, wq->sq.qid, wq->rq.qid, wq->db, | |
214 | (unsigned long long)wq->sq.udb, (unsigned long long)wq->rq.udb); | |
215 | ||
216 | return 0; | |
217 | err7: | |
218 | dma_free_coherent(&(rdev->lldi.pdev->dev), | |
219 | wq->rq.memsize, wq->rq.queue, | |
220 | pci_unmap_addr(&wq->rq, mapping)); | |
221 | err6: | |
222 | dma_free_coherent(&(rdev->lldi.pdev->dev), | |
223 | wq->sq.memsize, wq->sq.queue, | |
224 | pci_unmap_addr(&wq->sq, mapping)); | |
225 | err5: | |
226 | c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size); | |
227 | err4: | |
228 | kfree(wq->rq.sw_rq); | |
229 | err3: | |
230 | kfree(wq->sq.sw_sq); | |
231 | err2: | |
232 | c4iw_put_qpid(rdev, wq->rq.qid, uctx); | |
233 | err1: | |
234 | c4iw_put_qpid(rdev, wq->sq.qid, uctx); | |
235 | return -ENOMEM; | |
236 | } | |
237 | ||
238 | static int build_rdma_send(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) | |
239 | { | |
240 | int i; | |
241 | u32 plen; | |
242 | int size; | |
243 | u8 *datap; | |
244 | ||
245 | if (wr->num_sge > T4_MAX_SEND_SGE) | |
246 | return -EINVAL; | |
247 | switch (wr->opcode) { | |
248 | case IB_WR_SEND: | |
249 | if (wr->send_flags & IB_SEND_SOLICITED) | |
250 | wqe->send.sendop_pkd = cpu_to_be32( | |
251 | V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE)); | |
252 | else | |
253 | wqe->send.sendop_pkd = cpu_to_be32( | |
254 | V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND)); | |
255 | wqe->send.stag_inv = 0; | |
256 | break; | |
257 | case IB_WR_SEND_WITH_INV: | |
258 | if (wr->send_flags & IB_SEND_SOLICITED) | |
259 | wqe->send.sendop_pkd = cpu_to_be32( | |
260 | V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE_INV)); | |
261 | else | |
262 | wqe->send.sendop_pkd = cpu_to_be32( | |
263 | V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_INV)); | |
264 | wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey); | |
265 | break; | |
266 | ||
267 | default: | |
268 | return -EINVAL; | |
269 | } | |
270 | plen = 0; | |
271 | if (wr->num_sge) { | |
272 | if (wr->send_flags & IB_SEND_INLINE) { | |
273 | datap = (u8 *)wqe->send.u.immd_src[0].data; | |
274 | for (i = 0; i < wr->num_sge; i++) { | |
275 | if ((plen + wr->sg_list[i].length) > | |
276 | T4_MAX_SEND_INLINE) { | |
277 | return -EMSGSIZE; | |
278 | } | |
279 | plen += wr->sg_list[i].length; | |
280 | memcpy(datap, | |
281 | (void *)(unsigned long)wr->sg_list[i].addr, | |
282 | wr->sg_list[i].length); | |
283 | datap += wr->sg_list[i].length; | |
284 | } | |
285 | wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD; | |
286 | wqe->send.u.immd_src[0].r1 = 0; | |
287 | wqe->send.u.immd_src[0].r2 = 0; | |
288 | wqe->send.u.immd_src[0].immdlen = cpu_to_be32(plen); | |
289 | size = sizeof wqe->send + sizeof(struct fw_ri_immd) + | |
290 | plen; | |
291 | } else { | |
292 | for (i = 0; i < wr->num_sge; i++) { | |
293 | if ((plen + wr->sg_list[i].length) < plen) | |
294 | return -EMSGSIZE; | |
295 | plen += wr->sg_list[i].length; | |
296 | wqe->send.u.isgl_src[0].sge[i].stag = | |
297 | cpu_to_be32(wr->sg_list[i].lkey); | |
298 | wqe->send.u.isgl_src[0].sge[i].len = | |
299 | cpu_to_be32(wr->sg_list[i].length); | |
300 | wqe->send.u.isgl_src[0].sge[i].to = | |
301 | cpu_to_be64(wr->sg_list[i].addr); | |
302 | } | |
303 | wqe->send.u.isgl_src[0].op = FW_RI_DATA_ISGL; | |
304 | wqe->send.u.isgl_src[0].r1 = 0; | |
305 | wqe->send.u.isgl_src[0].nsge = cpu_to_be16(wr->num_sge); | |
306 | wqe->send.u.isgl_src[0].r2 = 0; | |
307 | size = sizeof wqe->send + sizeof(struct fw_ri_isgl) + | |
308 | wr->num_sge * sizeof(struct fw_ri_sge); | |
309 | } | |
310 | } else { | |
311 | wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD; | |
312 | wqe->send.u.immd_src[0].r1 = 0; | |
313 | wqe->send.u.immd_src[0].r2 = 0; | |
314 | wqe->send.u.immd_src[0].immdlen = 0; | |
315 | size = sizeof wqe->send + sizeof(struct fw_ri_immd); | |
316 | } | |
317 | *len16 = DIV_ROUND_UP(size, 16); | |
318 | wqe->send.plen = cpu_to_be32(plen); | |
319 | return 0; | |
320 | } | |
321 | ||
322 | static int build_rdma_write(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) | |
323 | { | |
324 | int i; | |
325 | u32 plen; | |
326 | int size; | |
327 | u8 *datap; | |
328 | ||
329 | if (wr->num_sge > T4_MAX_WRITE_SGE) | |
330 | return -EINVAL; | |
331 | wqe->write.r2 = 0; | |
332 | wqe->write.stag_sink = cpu_to_be32(wr->wr.rdma.rkey); | |
333 | wqe->write.to_sink = cpu_to_be64(wr->wr.rdma.remote_addr); | |
334 | plen = 0; | |
335 | if (wr->num_sge) { | |
336 | if (wr->send_flags & IB_SEND_INLINE) { | |
337 | datap = (u8 *)wqe->write.u.immd_src[0].data; | |
338 | for (i = 0; i < wr->num_sge; i++) { | |
339 | if ((plen + wr->sg_list[i].length) > | |
340 | T4_MAX_WRITE_INLINE) { | |
341 | return -EMSGSIZE; | |
342 | } | |
343 | plen += wr->sg_list[i].length; | |
344 | memcpy(datap, | |
345 | (void *)(unsigned long)wr->sg_list[i].addr, | |
346 | wr->sg_list[i].length); | |
347 | datap += wr->sg_list[i].length; | |
348 | } | |
349 | wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD; | |
350 | wqe->write.u.immd_src[0].r1 = 0; | |
351 | wqe->write.u.immd_src[0].r2 = 0; | |
352 | wqe->write.u.immd_src[0].immdlen = cpu_to_be32(plen); | |
353 | size = sizeof wqe->write + sizeof(struct fw_ri_immd) + | |
354 | plen; | |
355 | } else { | |
356 | for (i = 0; i < wr->num_sge; i++) { | |
357 | if ((plen + wr->sg_list[i].length) < plen) | |
358 | return -EMSGSIZE; | |
359 | plen += wr->sg_list[i].length; | |
360 | wqe->write.u.isgl_src[0].sge[i].stag = | |
361 | cpu_to_be32(wr->sg_list[i].lkey); | |
362 | wqe->write.u.isgl_src[0].sge[i].len = | |
363 | cpu_to_be32(wr->sg_list[i].length); | |
364 | wqe->write.u.isgl_src[0].sge[i].to = | |
365 | cpu_to_be64(wr->sg_list[i].addr); | |
366 | } | |
367 | wqe->write.u.isgl_src[0].op = FW_RI_DATA_ISGL; | |
368 | wqe->write.u.isgl_src[0].r1 = 0; | |
369 | wqe->write.u.isgl_src[0].nsge = | |
370 | cpu_to_be16(wr->num_sge); | |
371 | wqe->write.u.isgl_src[0].r2 = 0; | |
372 | size = sizeof wqe->write + sizeof(struct fw_ri_isgl) + | |
373 | wr->num_sge * sizeof(struct fw_ri_sge); | |
374 | } | |
375 | } else { | |
376 | wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD; | |
377 | wqe->write.u.immd_src[0].r1 = 0; | |
378 | wqe->write.u.immd_src[0].r2 = 0; | |
379 | wqe->write.u.immd_src[0].immdlen = 0; | |
380 | size = sizeof wqe->write + sizeof(struct fw_ri_immd); | |
381 | } | |
382 | *len16 = DIV_ROUND_UP(size, 16); | |
383 | wqe->write.plen = cpu_to_be32(plen); | |
384 | return 0; | |
385 | } | |
386 | ||
387 | static int build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) | |
388 | { | |
389 | if (wr->num_sge > 1) | |
390 | return -EINVAL; | |
391 | if (wr->num_sge) { | |
392 | wqe->read.stag_src = cpu_to_be32(wr->wr.rdma.rkey); | |
393 | wqe->read.to_src_hi = cpu_to_be32((u32)(wr->wr.rdma.remote_addr | |
394 | >> 32)); | |
395 | wqe->read.to_src_lo = cpu_to_be32((u32)wr->wr.rdma.remote_addr); | |
396 | wqe->read.stag_sink = cpu_to_be32(wr->sg_list[0].lkey); | |
397 | wqe->read.plen = cpu_to_be32(wr->sg_list[0].length); | |
398 | wqe->read.to_sink_hi = cpu_to_be32((u32)(wr->sg_list[0].addr | |
399 | >> 32)); | |
400 | wqe->read.to_sink_lo = cpu_to_be32((u32)(wr->sg_list[0].addr)); | |
401 | } else { | |
402 | wqe->read.stag_src = cpu_to_be32(2); | |
403 | wqe->read.to_src_hi = 0; | |
404 | wqe->read.to_src_lo = 0; | |
405 | wqe->read.stag_sink = cpu_to_be32(2); | |
406 | wqe->read.plen = 0; | |
407 | wqe->read.to_sink_hi = 0; | |
408 | wqe->read.to_sink_lo = 0; | |
409 | } | |
410 | wqe->read.r2 = 0; | |
411 | wqe->read.r5 = 0; | |
412 | *len16 = DIV_ROUND_UP(sizeof wqe->read, 16); | |
413 | return 0; | |
414 | } | |
415 | ||
416 | static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe, | |
417 | struct ib_recv_wr *wr, u8 *len16) | |
418 | { | |
419 | int i; | |
420 | int plen = 0; | |
421 | ||
422 | for (i = 0; i < wr->num_sge; i++) { | |
423 | if ((plen + wr->sg_list[i].length) < plen) | |
424 | return -EMSGSIZE; | |
425 | plen += wr->sg_list[i].length; | |
426 | wqe->recv.isgl.sge[i].stag = | |
427 | cpu_to_be32(wr->sg_list[i].lkey); | |
428 | wqe->recv.isgl.sge[i].len = | |
429 | cpu_to_be32(wr->sg_list[i].length); | |
430 | wqe->recv.isgl.sge[i].to = | |
431 | cpu_to_be64(wr->sg_list[i].addr); | |
432 | } | |
433 | for (; i < T4_MAX_RECV_SGE; i++) { | |
434 | wqe->recv.isgl.sge[i].stag = 0; | |
435 | wqe->recv.isgl.sge[i].len = 0; | |
436 | wqe->recv.isgl.sge[i].to = 0; | |
437 | } | |
438 | wqe->recv.isgl.op = FW_RI_DATA_ISGL; | |
439 | wqe->recv.isgl.r1 = 0; | |
440 | wqe->recv.isgl.nsge = cpu_to_be16(wr->num_sge); | |
441 | wqe->recv.isgl.r2 = 0; | |
442 | *len16 = DIV_ROUND_UP(sizeof wqe->recv + | |
443 | wr->num_sge * sizeof(struct fw_ri_sge), 16); | |
444 | return 0; | |
445 | } | |
446 | ||
447 | static int build_fastreg(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) | |
448 | { | |
449 | ||
450 | struct fw_ri_immd *imdp; | |
451 | __be64 *p; | |
452 | int i; | |
453 | int pbllen = roundup(wr->wr.fast_reg.page_list_len * sizeof(u64), 32); | |
454 | ||
455 | if (wr->wr.fast_reg.page_list_len > T4_MAX_FR_DEPTH) | |
456 | return -EINVAL; | |
457 | ||
458 | wqe->fr.qpbinde_to_dcacpu = 0; | |
459 | wqe->fr.pgsz_shift = wr->wr.fast_reg.page_shift - 12; | |
460 | wqe->fr.addr_type = FW_RI_VA_BASED_TO; | |
461 | wqe->fr.mem_perms = c4iw_ib_to_tpt_access(wr->wr.fast_reg.access_flags); | |
462 | wqe->fr.len_hi = 0; | |
463 | wqe->fr.len_lo = cpu_to_be32(wr->wr.fast_reg.length); | |
464 | wqe->fr.stag = cpu_to_be32(wr->wr.fast_reg.rkey); | |
465 | wqe->fr.va_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32); | |
466 | wqe->fr.va_lo_fbo = cpu_to_be32(wr->wr.fast_reg.iova_start & | |
467 | 0xffffffff); | |
468 | if (pbllen > T4_MAX_FR_IMMD) { | |
469 | struct c4iw_fr_page_list *c4pl = | |
470 | to_c4iw_fr_page_list(wr->wr.fast_reg.page_list); | |
471 | struct fw_ri_dsgl *sglp; | |
472 | ||
473 | sglp = (struct fw_ri_dsgl *)(&wqe->fr + 1); | |
474 | sglp->op = FW_RI_DATA_DSGL; | |
475 | sglp->r1 = 0; | |
476 | sglp->nsge = cpu_to_be16(1); | |
477 | sglp->addr0 = cpu_to_be64(c4pl->dma_addr); | |
478 | sglp->len0 = cpu_to_be32(pbllen); | |
479 | ||
480 | *len16 = DIV_ROUND_UP(sizeof wqe->fr + sizeof *sglp, 16); | |
481 | } else { | |
482 | imdp = (struct fw_ri_immd *)(&wqe->fr + 1); | |
483 | imdp->op = FW_RI_DATA_IMMD; | |
484 | imdp->r1 = 0; | |
485 | imdp->r2 = 0; | |
486 | imdp->immdlen = cpu_to_be32(pbllen); | |
487 | p = (__be64 *)(imdp + 1); | |
488 | for (i = 0; i < wr->wr.fast_reg.page_list_len; i++, p++) | |
489 | *p = cpu_to_be64( | |
490 | (u64)wr->wr.fast_reg.page_list->page_list[i]); | |
491 | *len16 = DIV_ROUND_UP(sizeof wqe->fr + sizeof *imdp + pbllen, | |
492 | 16); | |
493 | } | |
494 | return 0; | |
495 | } | |
496 | ||
497 | static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr, | |
498 | u8 *len16) | |
499 | { | |
500 | wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey); | |
501 | wqe->inv.r2 = 0; | |
502 | *len16 = DIV_ROUND_UP(sizeof wqe->inv, 16); | |
503 | return 0; | |
504 | } | |
505 | ||
506 | void c4iw_qp_add_ref(struct ib_qp *qp) | |
507 | { | |
508 | PDBG("%s ib_qp %p\n", __func__, qp); | |
509 | atomic_inc(&(to_c4iw_qp(qp)->refcnt)); | |
510 | } | |
511 | ||
512 | void c4iw_qp_rem_ref(struct ib_qp *qp) | |
513 | { | |
514 | PDBG("%s ib_qp %p\n", __func__, qp); | |
515 | if (atomic_dec_and_test(&(to_c4iw_qp(qp)->refcnt))) | |
516 | wake_up(&(to_c4iw_qp(qp)->wait)); | |
517 | } | |
518 | ||
519 | int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |
520 | struct ib_send_wr **bad_wr) | |
521 | { | |
522 | int err = 0; | |
523 | u8 len16 = 0; | |
524 | enum fw_wr_opcodes fw_opcode = 0; | |
525 | enum fw_ri_wr_flags fw_flags; | |
526 | struct c4iw_qp *qhp; | |
527 | union t4_wr *wqe; | |
528 | u32 num_wrs; | |
529 | struct t4_swsqe *swsqe; | |
530 | unsigned long flag; | |
531 | u16 idx = 0; | |
532 | ||
533 | qhp = to_c4iw_qp(ibqp); | |
534 | spin_lock_irqsave(&qhp->lock, flag); | |
535 | if (t4_wq_in_error(&qhp->wq)) { | |
536 | spin_unlock_irqrestore(&qhp->lock, flag); | |
537 | return -EINVAL; | |
538 | } | |
539 | num_wrs = t4_sq_avail(&qhp->wq); | |
540 | if (num_wrs == 0) { | |
541 | spin_unlock_irqrestore(&qhp->lock, flag); | |
542 | return -ENOMEM; | |
543 | } | |
544 | while (wr) { | |
545 | if (num_wrs == 0) { | |
546 | err = -ENOMEM; | |
547 | *bad_wr = wr; | |
548 | break; | |
549 | } | |
550 | wqe = &qhp->wq.sq.queue[qhp->wq.sq.pidx]; | |
551 | fw_flags = 0; | |
552 | if (wr->send_flags & IB_SEND_SOLICITED) | |
553 | fw_flags |= FW_RI_SOLICITED_EVENT_FLAG; | |
554 | if (wr->send_flags & IB_SEND_SIGNALED) | |
555 | fw_flags |= FW_RI_COMPLETION_FLAG; | |
556 | swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx]; | |
557 | switch (wr->opcode) { | |
558 | case IB_WR_SEND_WITH_INV: | |
559 | case IB_WR_SEND: | |
560 | if (wr->send_flags & IB_SEND_FENCE) | |
561 | fw_flags |= FW_RI_READ_FENCE_FLAG; | |
562 | fw_opcode = FW_RI_SEND_WR; | |
563 | if (wr->opcode == IB_WR_SEND) | |
564 | swsqe->opcode = FW_RI_SEND; | |
565 | else | |
566 | swsqe->opcode = FW_RI_SEND_WITH_INV; | |
567 | err = build_rdma_send(wqe, wr, &len16); | |
568 | break; | |
569 | case IB_WR_RDMA_WRITE: | |
570 | fw_opcode = FW_RI_RDMA_WRITE_WR; | |
571 | swsqe->opcode = FW_RI_RDMA_WRITE; | |
572 | err = build_rdma_write(wqe, wr, &len16); | |
573 | break; | |
574 | case IB_WR_RDMA_READ: | |
575 | fw_opcode = FW_RI_RDMA_READ_WR; | |
576 | swsqe->opcode = FW_RI_READ_REQ; | |
577 | fw_flags = 0; | |
578 | err = build_rdma_read(wqe, wr, &len16); | |
579 | if (err) | |
580 | break; | |
581 | swsqe->read_len = wr->sg_list[0].length; | |
582 | if (!qhp->wq.sq.oldest_read) | |
583 | qhp->wq.sq.oldest_read = swsqe; | |
584 | break; | |
585 | case IB_WR_FAST_REG_MR: | |
586 | fw_opcode = FW_RI_FR_NSMR_WR; | |
587 | swsqe->opcode = FW_RI_FAST_REGISTER; | |
588 | err = build_fastreg(wqe, wr, &len16); | |
589 | break; | |
590 | case IB_WR_LOCAL_INV: | |
591 | fw_opcode = FW_RI_INV_LSTAG_WR; | |
592 | swsqe->opcode = FW_RI_LOCAL_INV; | |
593 | err = build_inv_stag(wqe, wr, &len16); | |
594 | break; | |
595 | default: | |
596 | PDBG("%s post of type=%d TBD!\n", __func__, | |
597 | wr->opcode); | |
598 | err = -EINVAL; | |
599 | } | |
600 | if (err) { | |
601 | *bad_wr = wr; | |
602 | break; | |
603 | } | |
604 | swsqe->idx = qhp->wq.sq.pidx; | |
605 | swsqe->complete = 0; | |
606 | swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED); | |
607 | swsqe->wr_id = wr->wr_id; | |
608 | ||
609 | init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16); | |
610 | ||
611 | PDBG("%s cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u\n", | |
612 | __func__, (unsigned long long)wr->wr_id, qhp->wq.sq.pidx, | |
613 | swsqe->opcode, swsqe->read_len); | |
614 | wr = wr->next; | |
615 | num_wrs--; | |
616 | t4_sq_produce(&qhp->wq); | |
617 | idx++; | |
618 | } | |
619 | if (t4_wq_db_enabled(&qhp->wq)) | |
620 | t4_ring_sq_db(&qhp->wq, idx); | |
621 | spin_unlock_irqrestore(&qhp->lock, flag); | |
622 | return err; | |
623 | } | |
624 | ||
625 | int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |
626 | struct ib_recv_wr **bad_wr) | |
627 | { | |
628 | int err = 0; | |
629 | struct c4iw_qp *qhp; | |
630 | union t4_recv_wr *wqe; | |
631 | u32 num_wrs; | |
632 | u8 len16 = 0; | |
633 | unsigned long flag; | |
634 | u16 idx = 0; | |
635 | ||
636 | qhp = to_c4iw_qp(ibqp); | |
637 | spin_lock_irqsave(&qhp->lock, flag); | |
638 | if (t4_wq_in_error(&qhp->wq)) { | |
639 | spin_unlock_irqrestore(&qhp->lock, flag); | |
640 | return -EINVAL; | |
641 | } | |
642 | num_wrs = t4_rq_avail(&qhp->wq); | |
643 | if (num_wrs == 0) { | |
644 | spin_unlock_irqrestore(&qhp->lock, flag); | |
645 | return -ENOMEM; | |
646 | } | |
647 | while (wr) { | |
648 | if (wr->num_sge > T4_MAX_RECV_SGE) { | |
649 | err = -EINVAL; | |
650 | *bad_wr = wr; | |
651 | break; | |
652 | } | |
653 | wqe = &qhp->wq.rq.queue[qhp->wq.rq.pidx]; | |
654 | if (num_wrs) | |
655 | err = build_rdma_recv(qhp, wqe, wr, &len16); | |
656 | else | |
657 | err = -ENOMEM; | |
658 | if (err) { | |
659 | *bad_wr = wr; | |
660 | break; | |
661 | } | |
662 | ||
663 | qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id; | |
664 | ||
665 | wqe->recv.opcode = FW_RI_RECV_WR; | |
666 | wqe->recv.r1 = 0; | |
667 | wqe->recv.wrid = qhp->wq.rq.pidx; | |
668 | wqe->recv.r2[0] = 0; | |
669 | wqe->recv.r2[1] = 0; | |
670 | wqe->recv.r2[2] = 0; | |
671 | wqe->recv.len16 = len16; | |
672 | if (len16 < 5) | |
673 | wqe->flits[8] = 0; | |
674 | ||
675 | PDBG("%s cookie 0x%llx pidx %u\n", __func__, | |
676 | (unsigned long long) wr->wr_id, qhp->wq.rq.pidx); | |
677 | t4_rq_produce(&qhp->wq); | |
678 | wr = wr->next; | |
679 | num_wrs--; | |
680 | idx++; | |
681 | } | |
682 | if (t4_wq_db_enabled(&qhp->wq)) | |
683 | t4_ring_rq_db(&qhp->wq, idx); | |
684 | spin_unlock_irqrestore(&qhp->lock, flag); | |
685 | return err; | |
686 | } | |
687 | ||
688 | int c4iw_bind_mw(struct ib_qp *qp, struct ib_mw *mw, struct ib_mw_bind *mw_bind) | |
689 | { | |
690 | return -ENOSYS; | |
691 | } | |
692 | ||
693 | static inline void build_term_codes(struct t4_cqe *err_cqe, u8 *layer_type, | |
694 | u8 *ecode) | |
695 | { | |
696 | int status; | |
697 | int tagged; | |
698 | int opcode; | |
699 | int rqtype; | |
700 | int send_inv; | |
701 | ||
702 | if (!err_cqe) { | |
703 | *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA; | |
704 | *ecode = 0; | |
705 | return; | |
706 | } | |
707 | ||
708 | status = CQE_STATUS(err_cqe); | |
709 | opcode = CQE_OPCODE(err_cqe); | |
710 | rqtype = RQ_TYPE(err_cqe); | |
711 | send_inv = (opcode == FW_RI_SEND_WITH_INV) || | |
712 | (opcode == FW_RI_SEND_WITH_SE_INV); | |
713 | tagged = (opcode == FW_RI_RDMA_WRITE) || | |
714 | (rqtype && (opcode == FW_RI_READ_RESP)); | |
715 | ||
716 | switch (status) { | |
717 | case T4_ERR_STAG: | |
718 | if (send_inv) { | |
719 | *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP; | |
720 | *ecode = RDMAP_CANT_INV_STAG; | |
721 | } else { | |
722 | *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; | |
723 | *ecode = RDMAP_INV_STAG; | |
724 | } | |
725 | break; | |
726 | case T4_ERR_PDID: | |
727 | *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; | |
728 | if ((opcode == FW_RI_SEND_WITH_INV) || | |
729 | (opcode == FW_RI_SEND_WITH_SE_INV)) | |
730 | *ecode = RDMAP_CANT_INV_STAG; | |
731 | else | |
732 | *ecode = RDMAP_STAG_NOT_ASSOC; | |
733 | break; | |
734 | case T4_ERR_QPID: | |
735 | *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; | |
736 | *ecode = RDMAP_STAG_NOT_ASSOC; | |
737 | break; | |
738 | case T4_ERR_ACCESS: | |
739 | *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; | |
740 | *ecode = RDMAP_ACC_VIOL; | |
741 | break; | |
742 | case T4_ERR_WRAP: | |
743 | *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; | |
744 | *ecode = RDMAP_TO_WRAP; | |
745 | break; | |
746 | case T4_ERR_BOUND: | |
747 | if (tagged) { | |
748 | *layer_type = LAYER_DDP|DDP_TAGGED_ERR; | |
749 | *ecode = DDPT_BASE_BOUNDS; | |
750 | } else { | |
751 | *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT; | |
752 | *ecode = RDMAP_BASE_BOUNDS; | |
753 | } | |
754 | break; | |
755 | case T4_ERR_INVALIDATE_SHARED_MR: | |
756 | case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND: | |
757 | *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP; | |
758 | *ecode = RDMAP_CANT_INV_STAG; | |
759 | break; | |
760 | case T4_ERR_ECC: | |
761 | case T4_ERR_ECC_PSTAG: | |
762 | case T4_ERR_INTERNAL_ERR: | |
763 | *layer_type = LAYER_RDMAP|RDMAP_LOCAL_CATA; | |
764 | *ecode = 0; | |
765 | break; | |
766 | case T4_ERR_OUT_OF_RQE: | |
767 | *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR; | |
768 | *ecode = DDPU_INV_MSN_NOBUF; | |
769 | break; | |
770 | case T4_ERR_PBL_ADDR_BOUND: | |
771 | *layer_type = LAYER_DDP|DDP_TAGGED_ERR; | |
772 | *ecode = DDPT_BASE_BOUNDS; | |
773 | break; | |
774 | case T4_ERR_CRC: | |
775 | *layer_type = LAYER_MPA|DDP_LLP; | |
776 | *ecode = MPA_CRC_ERR; | |
777 | break; | |
778 | case T4_ERR_MARKER: | |
779 | *layer_type = LAYER_MPA|DDP_LLP; | |
780 | *ecode = MPA_MARKER_ERR; | |
781 | break; | |
782 | case T4_ERR_PDU_LEN_ERR: | |
783 | *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR; | |
784 | *ecode = DDPU_MSG_TOOBIG; | |
785 | break; | |
786 | case T4_ERR_DDP_VERSION: | |
787 | if (tagged) { | |
788 | *layer_type = LAYER_DDP|DDP_TAGGED_ERR; | |
789 | *ecode = DDPT_INV_VERS; | |
790 | } else { | |
791 | *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR; | |
792 | *ecode = DDPU_INV_VERS; | |
793 | } | |
794 | break; | |
795 | case T4_ERR_RDMA_VERSION: | |
796 | *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP; | |
797 | *ecode = RDMAP_INV_VERS; | |
798 | break; | |
799 | case T4_ERR_OPCODE: | |
800 | *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP; | |
801 | *ecode = RDMAP_INV_OPCODE; | |
802 | break; | |
803 | case T4_ERR_DDP_QUEUE_NUM: | |
804 | *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR; | |
805 | *ecode = DDPU_INV_QN; | |
806 | break; | |
807 | case T4_ERR_MSN: | |
808 | case T4_ERR_MSN_GAP: | |
809 | case T4_ERR_MSN_RANGE: | |
810 | case T4_ERR_IRD_OVERFLOW: | |
811 | *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR; | |
812 | *ecode = DDPU_INV_MSN_RANGE; | |
813 | break; | |
814 | case T4_ERR_TBIT: | |
815 | *layer_type = LAYER_DDP|DDP_LOCAL_CATA; | |
816 | *ecode = 0; | |
817 | break; | |
818 | case T4_ERR_MO: | |
819 | *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR; | |
820 | *ecode = DDPU_INV_MO; | |
821 | break; | |
822 | default: | |
823 | *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA; | |
824 | *ecode = 0; | |
825 | break; | |
826 | } | |
827 | } | |
828 | ||
829 | int c4iw_post_zb_read(struct c4iw_qp *qhp) | |
830 | { | |
831 | union t4_wr *wqe; | |
832 | struct sk_buff *skb; | |
833 | u8 len16; | |
834 | ||
835 | PDBG("%s enter\n", __func__); | |
836 | skb = alloc_skb(40, GFP_KERNEL); | |
837 | if (!skb) { | |
838 | printk(KERN_ERR "%s cannot send zb_read!!\n", __func__); | |
839 | return -ENOMEM; | |
840 | } | |
841 | set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx); | |
842 | ||
843 | wqe = (union t4_wr *)skb_put(skb, sizeof wqe->read); | |
844 | memset(wqe, 0, sizeof wqe->read); | |
845 | wqe->read.r2 = cpu_to_be64(0); | |
846 | wqe->read.stag_sink = cpu_to_be32(1); | |
847 | wqe->read.to_sink_hi = cpu_to_be32(0); | |
848 | wqe->read.to_sink_lo = cpu_to_be32(1); | |
849 | wqe->read.stag_src = cpu_to_be32(1); | |
850 | wqe->read.plen = cpu_to_be32(0); | |
851 | wqe->read.to_src_hi = cpu_to_be32(0); | |
852 | wqe->read.to_src_lo = cpu_to_be32(1); | |
853 | len16 = DIV_ROUND_UP(sizeof wqe->read, 16); | |
854 | init_wr_hdr(wqe, 0, FW_RI_RDMA_READ_WR, FW_RI_COMPLETION_FLAG, len16); | |
855 | ||
856 | return c4iw_ofld_send(&qhp->rhp->rdev, skb); | |
857 | } | |
858 | ||
be4c9bad RD |
859 | static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe, |
860 | gfp_t gfp) | |
cfdda9d7 SW |
861 | { |
862 | struct fw_ri_wr *wqe; | |
863 | struct sk_buff *skb; | |
864 | struct terminate_message *term; | |
865 | ||
866 | PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid, | |
867 | qhp->ep->hwtid); | |
868 | ||
be4c9bad | 869 | skb = alloc_skb(sizeof *wqe, gfp); |
cfdda9d7 | 870 | if (!skb) |
be4c9bad | 871 | return; |
cfdda9d7 SW |
872 | set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx); |
873 | ||
874 | wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe)); | |
875 | memset(wqe, 0, sizeof *wqe); | |
876 | wqe->op_compl = cpu_to_be32(FW_WR_OP(FW_RI_INIT_WR)); | |
877 | wqe->flowid_len16 = cpu_to_be32( | |
878 | FW_WR_FLOWID(qhp->ep->hwtid) | | |
879 | FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16))); | |
880 | ||
881 | wqe->u.terminate.type = FW_RI_TYPE_TERMINATE; | |
882 | wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term); | |
883 | term = (struct terminate_message *)wqe->u.terminate.termmsg; | |
884 | build_term_codes(err_cqe, &term->layer_etype, &term->ecode); | |
be4c9bad | 885 | c4iw_ofld_send(&qhp->rhp->rdev, skb); |
cfdda9d7 SW |
886 | } |
887 | ||
888 | /* | |
889 | * Assumes qhp lock is held. | |
890 | */ | |
891 | static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp, | |
892 | struct c4iw_cq *schp, unsigned long *flag) | |
893 | { | |
894 | int count; | |
895 | int flushed; | |
896 | ||
897 | PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp); | |
898 | /* take a ref on the qhp since we must release the lock */ | |
899 | atomic_inc(&qhp->refcnt); | |
900 | spin_unlock_irqrestore(&qhp->lock, *flag); | |
901 | ||
902 | /* locking heirarchy: cq lock first, then qp lock. */ | |
903 | spin_lock_irqsave(&rchp->lock, *flag); | |
904 | spin_lock(&qhp->lock); | |
905 | c4iw_flush_hw_cq(&rchp->cq); | |
906 | c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count); | |
907 | flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count); | |
908 | spin_unlock(&qhp->lock); | |
909 | spin_unlock_irqrestore(&rchp->lock, *flag); | |
910 | if (flushed) | |
911 | (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); | |
912 | ||
913 | /* locking heirarchy: cq lock first, then qp lock. */ | |
914 | spin_lock_irqsave(&schp->lock, *flag); | |
915 | spin_lock(&qhp->lock); | |
916 | c4iw_flush_hw_cq(&schp->cq); | |
917 | c4iw_count_scqes(&schp->cq, &qhp->wq, &count); | |
918 | flushed = c4iw_flush_sq(&qhp->wq, &schp->cq, count); | |
919 | spin_unlock(&qhp->lock); | |
920 | spin_unlock_irqrestore(&schp->lock, *flag); | |
921 | if (flushed) | |
922 | (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context); | |
923 | ||
924 | /* deref */ | |
925 | if (atomic_dec_and_test(&qhp->refcnt)) | |
926 | wake_up(&qhp->wait); | |
927 | ||
928 | spin_lock_irqsave(&qhp->lock, *flag); | |
929 | } | |
930 | ||
931 | static void flush_qp(struct c4iw_qp *qhp, unsigned long *flag) | |
932 | { | |
933 | struct c4iw_cq *rchp, *schp; | |
934 | ||
935 | rchp = get_chp(qhp->rhp, qhp->attr.rcq); | |
936 | schp = get_chp(qhp->rhp, qhp->attr.scq); | |
937 | ||
938 | if (qhp->ibqp.uobject) { | |
939 | t4_set_wq_in_error(&qhp->wq); | |
940 | t4_set_cq_in_error(&rchp->cq); | |
941 | if (schp != rchp) | |
942 | t4_set_cq_in_error(&schp->cq); | |
943 | return; | |
944 | } | |
945 | __flush_qp(qhp, rchp, schp, flag); | |
946 | } | |
947 | ||
948 | static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp) | |
949 | { | |
950 | struct fw_ri_wr *wqe; | |
951 | int ret; | |
952 | struct c4iw_wr_wait wr_wait; | |
953 | struct sk_buff *skb; | |
954 | ||
955 | PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid, | |
956 | qhp->ep->hwtid); | |
957 | ||
958 | skb = alloc_skb(sizeof *wqe, GFP_KERNEL | __GFP_NOFAIL); | |
959 | if (!skb) | |
960 | return -ENOMEM; | |
961 | set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx); | |
962 | ||
963 | wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe)); | |
964 | memset(wqe, 0, sizeof *wqe); | |
965 | wqe->op_compl = cpu_to_be32( | |
966 | FW_WR_OP(FW_RI_INIT_WR) | | |
967 | FW_WR_COMPL(1)); | |
968 | wqe->flowid_len16 = cpu_to_be32( | |
969 | FW_WR_FLOWID(qhp->ep->hwtid) | | |
970 | FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16))); | |
971 | wqe->cookie = (u64)&wr_wait; | |
972 | ||
973 | wqe->u.fini.type = FW_RI_TYPE_FINI; | |
974 | c4iw_init_wr_wait(&wr_wait); | |
975 | ret = c4iw_ofld_send(&rhp->rdev, skb); | |
976 | if (ret) | |
977 | goto out; | |
978 | ||
979 | wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO); | |
980 | if (!wr_wait.done) { | |
981 | printk(KERN_ERR MOD "Device %s not responding!\n", | |
982 | pci_name(rhp->rdev.lldi.pdev)); | |
983 | rhp->rdev.flags = T4_FATAL_ERROR; | |
984 | ret = -EIO; | |
985 | } else { | |
986 | ret = wr_wait.ret; | |
987 | if (ret) | |
988 | printk(KERN_WARNING MOD | |
989 | "%s: Abnormal close qpid %d ret %u\n", | |
990 | pci_name(rhp->rdev.lldi.pdev), qhp->wq.sq.qid, | |
991 | ret); | |
992 | } | |
993 | out: | |
994 | PDBG("%s ret %d\n", __func__, ret); | |
995 | return ret; | |
996 | } | |
997 | ||
998 | static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init) | |
999 | { | |
1000 | memset(&init->u, 0, sizeof init->u); | |
1001 | switch (p2p_type) { | |
1002 | case FW_RI_INIT_P2PTYPE_RDMA_WRITE: | |
1003 | init->u.write.opcode = FW_RI_RDMA_WRITE_WR; | |
1004 | init->u.write.stag_sink = cpu_to_be32(1); | |
1005 | init->u.write.to_sink = cpu_to_be64(1); | |
1006 | init->u.write.u.immd_src[0].op = FW_RI_DATA_IMMD; | |
1007 | init->u.write.len16 = DIV_ROUND_UP(sizeof init->u.write + | |
1008 | sizeof(struct fw_ri_immd), | |
1009 | 16); | |
1010 | break; | |
1011 | case FW_RI_INIT_P2PTYPE_READ_REQ: | |
1012 | init->u.write.opcode = FW_RI_RDMA_READ_WR; | |
1013 | init->u.read.stag_src = cpu_to_be32(1); | |
1014 | init->u.read.to_src_lo = cpu_to_be32(1); | |
1015 | init->u.read.stag_sink = cpu_to_be32(1); | |
1016 | init->u.read.to_sink_lo = cpu_to_be32(1); | |
1017 | init->u.read.len16 = DIV_ROUND_UP(sizeof init->u.read, 16); | |
1018 | break; | |
1019 | } | |
1020 | } | |
1021 | ||
1022 | static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp) | |
1023 | { | |
1024 | struct fw_ri_wr *wqe; | |
1025 | int ret; | |
1026 | struct c4iw_wr_wait wr_wait; | |
1027 | struct sk_buff *skb; | |
1028 | ||
1029 | PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid, | |
1030 | qhp->ep->hwtid); | |
1031 | ||
1032 | skb = alloc_skb(sizeof *wqe, GFP_KERNEL | __GFP_NOFAIL); | |
1033 | if (!skb) | |
1034 | return -ENOMEM; | |
1035 | set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx); | |
1036 | ||
1037 | wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe)); | |
1038 | memset(wqe, 0, sizeof *wqe); | |
1039 | wqe->op_compl = cpu_to_be32( | |
1040 | FW_WR_OP(FW_RI_INIT_WR) | | |
1041 | FW_WR_COMPL(1)); | |
1042 | wqe->flowid_len16 = cpu_to_be32( | |
1043 | FW_WR_FLOWID(qhp->ep->hwtid) | | |
1044 | FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16))); | |
1045 | ||
1046 | wqe->cookie = (u64)&wr_wait; | |
1047 | ||
1048 | wqe->u.init.type = FW_RI_TYPE_INIT; | |
1049 | wqe->u.init.mpareqbit_p2ptype = | |
1050 | V_FW_RI_WR_MPAREQBIT(qhp->attr.mpa_attr.initiator) | | |
1051 | V_FW_RI_WR_P2PTYPE(qhp->attr.mpa_attr.p2p_type); | |
1052 | wqe->u.init.mpa_attrs = FW_RI_MPA_IETF_ENABLE; | |
1053 | if (qhp->attr.mpa_attr.recv_marker_enabled) | |
1054 | wqe->u.init.mpa_attrs |= FW_RI_MPA_RX_MARKER_ENABLE; | |
1055 | if (qhp->attr.mpa_attr.xmit_marker_enabled) | |
1056 | wqe->u.init.mpa_attrs |= FW_RI_MPA_TX_MARKER_ENABLE; | |
1057 | if (qhp->attr.mpa_attr.crc_enabled) | |
1058 | wqe->u.init.mpa_attrs |= FW_RI_MPA_CRC_ENABLE; | |
1059 | ||
1060 | wqe->u.init.qp_caps = FW_RI_QP_RDMA_READ_ENABLE | | |
1061 | FW_RI_QP_RDMA_WRITE_ENABLE | | |
1062 | FW_RI_QP_BIND_ENABLE; | |
1063 | if (!qhp->ibqp.uobject) | |
1064 | wqe->u.init.qp_caps |= FW_RI_QP_FAST_REGISTER_ENABLE | | |
1065 | FW_RI_QP_STAG0_ENABLE; | |
1066 | wqe->u.init.nrqe = cpu_to_be16(t4_rqes_posted(&qhp->wq)); | |
1067 | wqe->u.init.pdid = cpu_to_be32(qhp->attr.pd); | |
1068 | wqe->u.init.qpid = cpu_to_be32(qhp->wq.sq.qid); | |
1069 | wqe->u.init.sq_eqid = cpu_to_be32(qhp->wq.sq.qid); | |
1070 | wqe->u.init.rq_eqid = cpu_to_be32(qhp->wq.rq.qid); | |
1071 | wqe->u.init.scqid = cpu_to_be32(qhp->attr.scq); | |
1072 | wqe->u.init.rcqid = cpu_to_be32(qhp->attr.rcq); | |
1073 | wqe->u.init.ord_max = cpu_to_be32(qhp->attr.max_ord); | |
1074 | wqe->u.init.ird_max = cpu_to_be32(qhp->attr.max_ird); | |
1075 | wqe->u.init.iss = cpu_to_be32(qhp->ep->snd_seq); | |
1076 | wqe->u.init.irs = cpu_to_be32(qhp->ep->rcv_seq); | |
1077 | wqe->u.init.hwrqsize = cpu_to_be32(qhp->wq.rq.rqt_size); | |
1078 | wqe->u.init.hwrqaddr = cpu_to_be32(qhp->wq.rq.rqt_hwaddr - | |
1079 | rhp->rdev.lldi.vr->rq.start); | |
1080 | if (qhp->attr.mpa_attr.initiator) | |
1081 | build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init); | |
1082 | ||
1083 | c4iw_init_wr_wait(&wr_wait); | |
1084 | ret = c4iw_ofld_send(&rhp->rdev, skb); | |
1085 | if (ret) | |
1086 | goto out; | |
1087 | ||
1088 | wait_event_timeout(wr_wait.wait, wr_wait.done, C4IW_WR_TO); | |
1089 | if (!wr_wait.done) { | |
1090 | printk(KERN_ERR MOD "Device %s not responding!\n", | |
1091 | pci_name(rhp->rdev.lldi.pdev)); | |
1092 | rhp->rdev.flags = T4_FATAL_ERROR; | |
1093 | ret = -EIO; | |
1094 | } else | |
1095 | ret = wr_wait.ret; | |
1096 | out: | |
1097 | PDBG("%s ret %d\n", __func__, ret); | |
1098 | return ret; | |
1099 | } | |
1100 | ||
1101 | int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, | |
1102 | enum c4iw_qp_attr_mask mask, | |
1103 | struct c4iw_qp_attributes *attrs, | |
1104 | int internal) | |
1105 | { | |
1106 | int ret = 0; | |
1107 | struct c4iw_qp_attributes newattr = qhp->attr; | |
1108 | unsigned long flag; | |
1109 | int disconnect = 0; | |
1110 | int terminate = 0; | |
1111 | int abort = 0; | |
1112 | int free = 0; | |
1113 | struct c4iw_ep *ep = NULL; | |
1114 | ||
1115 | PDBG("%s qhp %p sqid 0x%x rqid 0x%x ep %p state %d -> %d\n", __func__, | |
1116 | qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state, | |
1117 | (mask & C4IW_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1); | |
1118 | ||
1119 | spin_lock_irqsave(&qhp->lock, flag); | |
1120 | ||
1121 | /* Process attr changes if in IDLE */ | |
1122 | if (mask & C4IW_QP_ATTR_VALID_MODIFY) { | |
1123 | if (qhp->attr.state != C4IW_QP_STATE_IDLE) { | |
1124 | ret = -EIO; | |
1125 | goto out; | |
1126 | } | |
1127 | if (mask & C4IW_QP_ATTR_ENABLE_RDMA_READ) | |
1128 | newattr.enable_rdma_read = attrs->enable_rdma_read; | |
1129 | if (mask & C4IW_QP_ATTR_ENABLE_RDMA_WRITE) | |
1130 | newattr.enable_rdma_write = attrs->enable_rdma_write; | |
1131 | if (mask & C4IW_QP_ATTR_ENABLE_RDMA_BIND) | |
1132 | newattr.enable_bind = attrs->enable_bind; | |
1133 | if (mask & C4IW_QP_ATTR_MAX_ORD) { | |
be4c9bad | 1134 | if (attrs->max_ord > c4iw_max_read_depth) { |
cfdda9d7 SW |
1135 | ret = -EINVAL; |
1136 | goto out; | |
1137 | } | |
1138 | newattr.max_ord = attrs->max_ord; | |
1139 | } | |
1140 | if (mask & C4IW_QP_ATTR_MAX_IRD) { | |
be4c9bad | 1141 | if (attrs->max_ird > c4iw_max_read_depth) { |
cfdda9d7 SW |
1142 | ret = -EINVAL; |
1143 | goto out; | |
1144 | } | |
1145 | newattr.max_ird = attrs->max_ird; | |
1146 | } | |
1147 | qhp->attr = newattr; | |
1148 | } | |
1149 | ||
1150 | if (!(mask & C4IW_QP_ATTR_NEXT_STATE)) | |
1151 | goto out; | |
1152 | if (qhp->attr.state == attrs->next_state) | |
1153 | goto out; | |
1154 | ||
1155 | switch (qhp->attr.state) { | |
1156 | case C4IW_QP_STATE_IDLE: | |
1157 | switch (attrs->next_state) { | |
1158 | case C4IW_QP_STATE_RTS: | |
1159 | if (!(mask & C4IW_QP_ATTR_LLP_STREAM_HANDLE)) { | |
1160 | ret = -EINVAL; | |
1161 | goto out; | |
1162 | } | |
1163 | if (!(mask & C4IW_QP_ATTR_MPA_ATTR)) { | |
1164 | ret = -EINVAL; | |
1165 | goto out; | |
1166 | } | |
1167 | qhp->attr.mpa_attr = attrs->mpa_attr; | |
1168 | qhp->attr.llp_stream_handle = attrs->llp_stream_handle; | |
1169 | qhp->ep = qhp->attr.llp_stream_handle; | |
1170 | qhp->attr.state = C4IW_QP_STATE_RTS; | |
1171 | ||
1172 | /* | |
1173 | * Ref the endpoint here and deref when we | |
1174 | * disassociate the endpoint from the QP. This | |
1175 | * happens in CLOSING->IDLE transition or *->ERROR | |
1176 | * transition. | |
1177 | */ | |
1178 | c4iw_get_ep(&qhp->ep->com); | |
1179 | spin_unlock_irqrestore(&qhp->lock, flag); | |
1180 | ret = rdma_init(rhp, qhp); | |
1181 | spin_lock_irqsave(&qhp->lock, flag); | |
1182 | if (ret) | |
1183 | goto err; | |
1184 | break; | |
1185 | case C4IW_QP_STATE_ERROR: | |
1186 | qhp->attr.state = C4IW_QP_STATE_ERROR; | |
1187 | flush_qp(qhp, &flag); | |
1188 | break; | |
1189 | default: | |
1190 | ret = -EINVAL; | |
1191 | goto out; | |
1192 | } | |
1193 | break; | |
1194 | case C4IW_QP_STATE_RTS: | |
1195 | switch (attrs->next_state) { | |
1196 | case C4IW_QP_STATE_CLOSING: | |
1197 | BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2); | |
1198 | qhp->attr.state = C4IW_QP_STATE_CLOSING; | |
1199 | if (!internal) { | |
1200 | abort = 0; | |
1201 | disconnect = 1; | |
1202 | ep = qhp->ep; | |
1203 | c4iw_get_ep(&ep->com); | |
1204 | } | |
1205 | spin_unlock_irqrestore(&qhp->lock, flag); | |
1206 | ret = rdma_fini(rhp, qhp); | |
1207 | spin_lock_irqsave(&qhp->lock, flag); | |
1208 | if (ret) { | |
1209 | ep = qhp->ep; | |
1210 | c4iw_get_ep(&ep->com); | |
1211 | disconnect = abort = 1; | |
1212 | goto err; | |
1213 | } | |
1214 | break; | |
1215 | case C4IW_QP_STATE_TERMINATE: | |
1216 | qhp->attr.state = C4IW_QP_STATE_TERMINATE; | |
1217 | if (qhp->ibqp.uobject) | |
1218 | t4_set_wq_in_error(&qhp->wq); | |
be4c9bad RD |
1219 | ep = qhp->ep; |
1220 | c4iw_get_ep(&ep->com); | |
1221 | terminate = 1; | |
1222 | disconnect = 1; | |
cfdda9d7 SW |
1223 | break; |
1224 | case C4IW_QP_STATE_ERROR: | |
1225 | qhp->attr.state = C4IW_QP_STATE_ERROR; | |
1226 | if (!internal) { | |
1227 | abort = 1; | |
1228 | disconnect = 1; | |
1229 | ep = qhp->ep; | |
1230 | c4iw_get_ep(&ep->com); | |
1231 | } | |
1232 | goto err; | |
1233 | break; | |
1234 | default: | |
1235 | ret = -EINVAL; | |
1236 | goto out; | |
1237 | } | |
1238 | break; | |
1239 | case C4IW_QP_STATE_CLOSING: | |
1240 | if (!internal) { | |
1241 | ret = -EINVAL; | |
1242 | goto out; | |
1243 | } | |
1244 | switch (attrs->next_state) { | |
1245 | case C4IW_QP_STATE_IDLE: | |
1246 | flush_qp(qhp, &flag); | |
1247 | qhp->attr.state = C4IW_QP_STATE_IDLE; | |
1248 | qhp->attr.llp_stream_handle = NULL; | |
1249 | c4iw_put_ep(&qhp->ep->com); | |
1250 | qhp->ep = NULL; | |
1251 | wake_up(&qhp->wait); | |
1252 | break; | |
1253 | case C4IW_QP_STATE_ERROR: | |
1254 | goto err; | |
1255 | default: | |
1256 | ret = -EINVAL; | |
1257 | goto err; | |
1258 | } | |
1259 | break; | |
1260 | case C4IW_QP_STATE_ERROR: | |
1261 | if (attrs->next_state != C4IW_QP_STATE_IDLE) { | |
1262 | ret = -EINVAL; | |
1263 | goto out; | |
1264 | } | |
1265 | if (!t4_sq_empty(&qhp->wq) || !t4_rq_empty(&qhp->wq)) { | |
1266 | ret = -EINVAL; | |
1267 | goto out; | |
1268 | } | |
1269 | qhp->attr.state = C4IW_QP_STATE_IDLE; | |
1270 | break; | |
1271 | case C4IW_QP_STATE_TERMINATE: | |
1272 | if (!internal) { | |
1273 | ret = -EINVAL; | |
1274 | goto out; | |
1275 | } | |
1276 | goto err; | |
1277 | break; | |
1278 | default: | |
1279 | printk(KERN_ERR "%s in a bad state %d\n", | |
1280 | __func__, qhp->attr.state); | |
1281 | ret = -EINVAL; | |
1282 | goto err; | |
1283 | break; | |
1284 | } | |
1285 | goto out; | |
1286 | err: | |
1287 | PDBG("%s disassociating ep %p qpid 0x%x\n", __func__, qhp->ep, | |
1288 | qhp->wq.sq.qid); | |
1289 | ||
1290 | /* disassociate the LLP connection */ | |
1291 | qhp->attr.llp_stream_handle = NULL; | |
1292 | ep = qhp->ep; | |
1293 | qhp->ep = NULL; | |
1294 | qhp->attr.state = C4IW_QP_STATE_ERROR; | |
1295 | free = 1; | |
1296 | wake_up(&qhp->wait); | |
1297 | BUG_ON(!ep); | |
1298 | flush_qp(qhp, &flag); | |
1299 | out: | |
1300 | spin_unlock_irqrestore(&qhp->lock, flag); | |
1301 | ||
1302 | if (terminate) | |
be4c9bad | 1303 | post_terminate(qhp, NULL, internal ? GFP_ATOMIC : GFP_KERNEL); |
cfdda9d7 SW |
1304 | |
1305 | /* | |
1306 | * If disconnect is 1, then we need to initiate a disconnect | |
1307 | * on the EP. This can be a normal close (RTS->CLOSING) or | |
1308 | * an abnormal close (RTS/CLOSING->ERROR). | |
1309 | */ | |
1310 | if (disconnect) { | |
be4c9bad RD |
1311 | c4iw_ep_disconnect(ep, abort, internal ? GFP_ATOMIC : |
1312 | GFP_KERNEL); | |
cfdda9d7 SW |
1313 | c4iw_put_ep(&ep->com); |
1314 | } | |
1315 | ||
1316 | /* | |
1317 | * If free is 1, then we've disassociated the EP from the QP | |
1318 | * and we need to dereference the EP. | |
1319 | */ | |
1320 | if (free) | |
1321 | c4iw_put_ep(&ep->com); | |
1322 | ||
1323 | PDBG("%s exit state %d\n", __func__, qhp->attr.state); | |
1324 | return ret; | |
1325 | } | |
1326 | ||
1327 | int c4iw_destroy_qp(struct ib_qp *ib_qp) | |
1328 | { | |
1329 | struct c4iw_dev *rhp; | |
1330 | struct c4iw_qp *qhp; | |
1331 | struct c4iw_qp_attributes attrs; | |
1332 | struct c4iw_ucontext *ucontext; | |
1333 | ||
1334 | qhp = to_c4iw_qp(ib_qp); | |
1335 | rhp = qhp->rhp; | |
1336 | ||
1337 | attrs.next_state = C4IW_QP_STATE_ERROR; | |
1338 | c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); | |
1339 | wait_event(qhp->wait, !qhp->ep); | |
1340 | ||
1341 | remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid); | |
1342 | remove_handle(rhp, &rhp->qpidr, qhp->wq.rq.qid); | |
1343 | atomic_dec(&qhp->refcnt); | |
1344 | wait_event(qhp->wait, !atomic_read(&qhp->refcnt)); | |
1345 | ||
1346 | ucontext = ib_qp->uobject ? | |
1347 | to_c4iw_ucontext(ib_qp->uobject->context) : NULL; | |
1348 | destroy_qp(&rhp->rdev, &qhp->wq, | |
1349 | ucontext ? &ucontext->uctx : &rhp->rdev.uctx); | |
1350 | ||
1351 | PDBG("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid); | |
1352 | kfree(qhp); | |
1353 | return 0; | |
1354 | } | |
1355 | ||
1356 | struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, | |
1357 | struct ib_udata *udata) | |
1358 | { | |
1359 | struct c4iw_dev *rhp; | |
1360 | struct c4iw_qp *qhp; | |
1361 | struct c4iw_pd *php; | |
1362 | struct c4iw_cq *schp; | |
1363 | struct c4iw_cq *rchp; | |
1364 | struct c4iw_create_qp_resp uresp; | |
1365 | int sqsize, rqsize; | |
1366 | struct c4iw_ucontext *ucontext; | |
1367 | int ret; | |
1368 | struct c4iw_mm_entry *mm1, *mm2, *mm3, *mm4; | |
1369 | ||
1370 | PDBG("%s ib_pd %p\n", __func__, pd); | |
1371 | ||
1372 | if (attrs->qp_type != IB_QPT_RC) | |
1373 | return ERR_PTR(-EINVAL); | |
1374 | ||
1375 | php = to_c4iw_pd(pd); | |
1376 | rhp = php->rhp; | |
1377 | schp = get_chp(rhp, ((struct c4iw_cq *)attrs->send_cq)->cq.cqid); | |
1378 | rchp = get_chp(rhp, ((struct c4iw_cq *)attrs->recv_cq)->cq.cqid); | |
1379 | if (!schp || !rchp) | |
1380 | return ERR_PTR(-EINVAL); | |
1381 | ||
1382 | if (attrs->cap.max_inline_data > T4_MAX_SEND_INLINE) | |
1383 | return ERR_PTR(-EINVAL); | |
1384 | ||
1385 | rqsize = roundup(attrs->cap.max_recv_wr + 1, 16); | |
1386 | if (rqsize > T4_MAX_RQ_SIZE) | |
1387 | return ERR_PTR(-E2BIG); | |
1388 | ||
1389 | sqsize = roundup(attrs->cap.max_send_wr + 1, 16); | |
1390 | if (sqsize > T4_MAX_SQ_SIZE) | |
1391 | return ERR_PTR(-E2BIG); | |
1392 | ||
1393 | ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL; | |
1394 | ||
1395 | ||
1396 | qhp = kzalloc(sizeof(*qhp), GFP_KERNEL); | |
1397 | if (!qhp) | |
1398 | return ERR_PTR(-ENOMEM); | |
1399 | qhp->wq.sq.size = sqsize; | |
1400 | qhp->wq.sq.memsize = (sqsize + 1) * sizeof *qhp->wq.sq.queue; | |
1401 | qhp->wq.rq.size = rqsize; | |
1402 | qhp->wq.rq.memsize = (rqsize + 1) * sizeof *qhp->wq.rq.queue; | |
1403 | ||
1404 | if (ucontext) { | |
1405 | qhp->wq.sq.memsize = roundup(qhp->wq.sq.memsize, PAGE_SIZE); | |
1406 | qhp->wq.rq.memsize = roundup(qhp->wq.rq.memsize, PAGE_SIZE); | |
1407 | } | |
1408 | ||
1409 | PDBG("%s sqsize %u sqmemsize %zu rqsize %u rqmemsize %zu\n", | |
1410 | __func__, sqsize, qhp->wq.sq.memsize, rqsize, qhp->wq.rq.memsize); | |
1411 | ||
1412 | ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq, | |
1413 | ucontext ? &ucontext->uctx : &rhp->rdev.uctx); | |
1414 | if (ret) | |
1415 | goto err1; | |
1416 | ||
1417 | attrs->cap.max_recv_wr = rqsize - 1; | |
1418 | attrs->cap.max_send_wr = sqsize - 1; | |
1419 | attrs->cap.max_inline_data = T4_MAX_SEND_INLINE; | |
1420 | ||
1421 | qhp->rhp = rhp; | |
1422 | qhp->attr.pd = php->pdid; | |
1423 | qhp->attr.scq = ((struct c4iw_cq *) attrs->send_cq)->cq.cqid; | |
1424 | qhp->attr.rcq = ((struct c4iw_cq *) attrs->recv_cq)->cq.cqid; | |
1425 | qhp->attr.sq_num_entries = attrs->cap.max_send_wr; | |
1426 | qhp->attr.rq_num_entries = attrs->cap.max_recv_wr; | |
1427 | qhp->attr.sq_max_sges = attrs->cap.max_send_sge; | |
1428 | qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge; | |
1429 | qhp->attr.rq_max_sges = attrs->cap.max_recv_sge; | |
1430 | qhp->attr.state = C4IW_QP_STATE_IDLE; | |
1431 | qhp->attr.next_state = C4IW_QP_STATE_IDLE; | |
1432 | qhp->attr.enable_rdma_read = 1; | |
1433 | qhp->attr.enable_rdma_write = 1; | |
1434 | qhp->attr.enable_bind = 1; | |
1435 | qhp->attr.max_ord = 1; | |
1436 | qhp->attr.max_ird = 1; | |
1437 | spin_lock_init(&qhp->lock); | |
1438 | init_waitqueue_head(&qhp->wait); | |
1439 | atomic_set(&qhp->refcnt, 1); | |
1440 | ||
1441 | ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid); | |
1442 | if (ret) | |
1443 | goto err2; | |
1444 | ||
1445 | ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.rq.qid); | |
1446 | if (ret) | |
1447 | goto err3; | |
1448 | ||
1449 | if (udata) { | |
1450 | mm1 = kmalloc(sizeof *mm1, GFP_KERNEL); | |
1451 | if (!mm1) { | |
1452 | ret = -ENOMEM; | |
1453 | goto err4; | |
1454 | } | |
1455 | mm2 = kmalloc(sizeof *mm2, GFP_KERNEL); | |
1456 | if (!mm2) { | |
1457 | ret = -ENOMEM; | |
1458 | goto err5; | |
1459 | } | |
1460 | mm3 = kmalloc(sizeof *mm3, GFP_KERNEL); | |
1461 | if (!mm3) { | |
1462 | ret = -ENOMEM; | |
1463 | goto err6; | |
1464 | } | |
1465 | mm4 = kmalloc(sizeof *mm4, GFP_KERNEL); | |
1466 | if (!mm4) { | |
1467 | ret = -ENOMEM; | |
1468 | goto err7; | |
1469 | } | |
1470 | ||
1471 | uresp.qid_mask = rhp->rdev.qpmask; | |
1472 | uresp.sqid = qhp->wq.sq.qid; | |
1473 | uresp.sq_size = qhp->wq.sq.size; | |
1474 | uresp.sq_memsize = qhp->wq.sq.memsize; | |
1475 | uresp.rqid = qhp->wq.rq.qid; | |
1476 | uresp.rq_size = qhp->wq.rq.size; | |
1477 | uresp.rq_memsize = qhp->wq.rq.memsize; | |
1478 | spin_lock(&ucontext->mmap_lock); | |
1479 | uresp.sq_key = ucontext->key; | |
1480 | ucontext->key += PAGE_SIZE; | |
1481 | uresp.rq_key = ucontext->key; | |
1482 | ucontext->key += PAGE_SIZE; | |
1483 | uresp.sq_db_gts_key = ucontext->key; | |
1484 | ucontext->key += PAGE_SIZE; | |
1485 | uresp.rq_db_gts_key = ucontext->key; | |
1486 | ucontext->key += PAGE_SIZE; | |
1487 | spin_unlock(&ucontext->mmap_lock); | |
1488 | ret = ib_copy_to_udata(udata, &uresp, sizeof uresp); | |
1489 | if (ret) | |
1490 | goto err8; | |
1491 | mm1->key = uresp.sq_key; | |
1492 | mm1->addr = virt_to_phys(qhp->wq.sq.queue); | |
1493 | mm1->len = PAGE_ALIGN(qhp->wq.sq.memsize); | |
1494 | insert_mmap(ucontext, mm1); | |
1495 | mm2->key = uresp.rq_key; | |
1496 | mm2->addr = virt_to_phys(qhp->wq.rq.queue); | |
1497 | mm2->len = PAGE_ALIGN(qhp->wq.rq.memsize); | |
1498 | insert_mmap(ucontext, mm2); | |
1499 | mm3->key = uresp.sq_db_gts_key; | |
1500 | mm3->addr = qhp->wq.sq.udb; | |
1501 | mm3->len = PAGE_SIZE; | |
1502 | insert_mmap(ucontext, mm3); | |
1503 | mm4->key = uresp.rq_db_gts_key; | |
1504 | mm4->addr = qhp->wq.rq.udb; | |
1505 | mm4->len = PAGE_SIZE; | |
1506 | insert_mmap(ucontext, mm4); | |
1507 | } | |
1508 | qhp->ibqp.qp_num = qhp->wq.sq.qid; | |
1509 | init_timer(&(qhp->timer)); | |
1510 | PDBG("%s qhp %p sq_num_entries %d, rq_num_entries %d qpid 0x%0x\n", | |
1511 | __func__, qhp, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries, | |
1512 | qhp->wq.sq.qid); | |
1513 | return &qhp->ibqp; | |
1514 | err8: | |
1515 | kfree(mm4); | |
1516 | err7: | |
1517 | kfree(mm3); | |
1518 | err6: | |
1519 | kfree(mm2); | |
1520 | err5: | |
1521 | kfree(mm1); | |
1522 | err4: | |
1523 | remove_handle(rhp, &rhp->qpidr, qhp->wq.rq.qid); | |
1524 | err3: | |
1525 | remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid); | |
1526 | err2: | |
1527 | destroy_qp(&rhp->rdev, &qhp->wq, | |
1528 | ucontext ? &ucontext->uctx : &rhp->rdev.uctx); | |
1529 | err1: | |
1530 | kfree(qhp); | |
1531 | return ERR_PTR(ret); | |
1532 | } | |
1533 | ||
1534 | int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |
1535 | int attr_mask, struct ib_udata *udata) | |
1536 | { | |
1537 | struct c4iw_dev *rhp; | |
1538 | struct c4iw_qp *qhp; | |
1539 | enum c4iw_qp_attr_mask mask = 0; | |
1540 | struct c4iw_qp_attributes attrs; | |
1541 | ||
1542 | PDBG("%s ib_qp %p\n", __func__, ibqp); | |
1543 | ||
1544 | /* iwarp does not support the RTR state */ | |
1545 | if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR)) | |
1546 | attr_mask &= ~IB_QP_STATE; | |
1547 | ||
1548 | /* Make sure we still have something left to do */ | |
1549 | if (!attr_mask) | |
1550 | return 0; | |
1551 | ||
1552 | memset(&attrs, 0, sizeof attrs); | |
1553 | qhp = to_c4iw_qp(ibqp); | |
1554 | rhp = qhp->rhp; | |
1555 | ||
1556 | attrs.next_state = c4iw_convert_state(attr->qp_state); | |
1557 | attrs.enable_rdma_read = (attr->qp_access_flags & | |
1558 | IB_ACCESS_REMOTE_READ) ? 1 : 0; | |
1559 | attrs.enable_rdma_write = (attr->qp_access_flags & | |
1560 | IB_ACCESS_REMOTE_WRITE) ? 1 : 0; | |
1561 | attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0; | |
1562 | ||
1563 | ||
1564 | mask |= (attr_mask & IB_QP_STATE) ? C4IW_QP_ATTR_NEXT_STATE : 0; | |
1565 | mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ? | |
1566 | (C4IW_QP_ATTR_ENABLE_RDMA_READ | | |
1567 | C4IW_QP_ATTR_ENABLE_RDMA_WRITE | | |
1568 | C4IW_QP_ATTR_ENABLE_RDMA_BIND) : 0; | |
1569 | ||
1570 | return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0); | |
1571 | } | |
1572 | ||
1573 | struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn) | |
1574 | { | |
1575 | PDBG("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn); | |
1576 | return (struct ib_qp *)get_qhp(to_c4iw_dev(dev), qpn); | |
1577 | } |