IB/ehca: Use common error code mapping instead of specific ones
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / infiniband / hw / ehca / ehca_mrmw.c
1 /*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * MR/MW functions
5 *
6 * Authors: Dietmar Decker <ddecker@de.ibm.com>
7 * Christoph Raisch <raisch@de.ibm.com>
8 *
9 * Copyright (c) 2005 IBM Corporation
10 *
11 * All rights reserved.
12 *
13 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
14 * BSD.
15 *
16 * OpenIB BSD License
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are met:
20 *
21 * Redistributions of source code must retain the above copyright notice, this
22 * list of conditions and the following disclaimer.
23 *
24 * Redistributions in binary form must reproduce the above copyright notice,
25 * this list of conditions and the following disclaimer in the documentation
26 * and/or other materials
27 * provided with the distribution.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
30 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
33 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
34 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
36 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
37 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
38 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
39 * POSSIBILITY OF SUCH DAMAGE.
40 */
41
42 #include <rdma/ib_umem.h>
43
44 #include <asm/current.h>
45
46 #include "ehca_iverbs.h"
47 #include "ehca_mrmw.h"
48 #include "hcp_if.h"
49 #include "hipz_hw.h"
50
51 static struct kmem_cache *mr_cache;
52 static struct kmem_cache *mw_cache;
53
54 static struct ehca_mr *ehca_mr_new(void)
55 {
56 struct ehca_mr *me;
57
58 me = kmem_cache_zalloc(mr_cache, GFP_KERNEL);
59 if (me) {
60 spin_lock_init(&me->mrlock);
61 } else
62 ehca_gen_err("alloc failed");
63
64 return me;
65 }
66
67 static void ehca_mr_delete(struct ehca_mr *me)
68 {
69 kmem_cache_free(mr_cache, me);
70 }
71
72 static struct ehca_mw *ehca_mw_new(void)
73 {
74 struct ehca_mw *me;
75
76 me = kmem_cache_zalloc(mw_cache, GFP_KERNEL);
77 if (me) {
78 spin_lock_init(&me->mwlock);
79 } else
80 ehca_gen_err("alloc failed");
81
82 return me;
83 }
84
85 static void ehca_mw_delete(struct ehca_mw *me)
86 {
87 kmem_cache_free(mw_cache, me);
88 }
89
90 /*----------------------------------------------------------------------*/
91
92 struct ib_mr *ehca_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
93 {
94 struct ib_mr *ib_mr;
95 int ret;
96 struct ehca_mr *e_maxmr;
97 struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
98 struct ehca_shca *shca =
99 container_of(pd->device, struct ehca_shca, ib_device);
100
101 if (shca->maxmr) {
102 e_maxmr = ehca_mr_new();
103 if (!e_maxmr) {
104 ehca_err(&shca->ib_device, "out of memory");
105 ib_mr = ERR_PTR(-ENOMEM);
106 goto get_dma_mr_exit0;
107 }
108
109 ret = ehca_reg_maxmr(shca, e_maxmr, (u64*)KERNELBASE,
110 mr_access_flags, e_pd,
111 &e_maxmr->ib.ib_mr.lkey,
112 &e_maxmr->ib.ib_mr.rkey);
113 if (ret) {
114 ehca_mr_delete(e_maxmr);
115 ib_mr = ERR_PTR(ret);
116 goto get_dma_mr_exit0;
117 }
118 ib_mr = &e_maxmr->ib.ib_mr;
119 } else {
120 ehca_err(&shca->ib_device, "no internal max-MR exist!");
121 ib_mr = ERR_PTR(-EINVAL);
122 goto get_dma_mr_exit0;
123 }
124
125 get_dma_mr_exit0:
126 if (IS_ERR(ib_mr))
127 ehca_err(&shca->ib_device, "rc=%lx pd=%p mr_access_flags=%x ",
128 PTR_ERR(ib_mr), pd, mr_access_flags);
129 return ib_mr;
130 } /* end ehca_get_dma_mr() */
131
132 /*----------------------------------------------------------------------*/
133
134 struct ib_mr *ehca_reg_phys_mr(struct ib_pd *pd,
135 struct ib_phys_buf *phys_buf_array,
136 int num_phys_buf,
137 int mr_access_flags,
138 u64 *iova_start)
139 {
140 struct ib_mr *ib_mr;
141 int ret;
142 struct ehca_mr *e_mr;
143 struct ehca_shca *shca =
144 container_of(pd->device, struct ehca_shca, ib_device);
145 struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
146
147 u64 size;
148 struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
149 u32 num_pages_mr;
150 u32 num_pages_4k; /* 4k portion "pages" */
151
152 if ((num_phys_buf <= 0) || !phys_buf_array) {
153 ehca_err(pd->device, "bad input values: num_phys_buf=%x "
154 "phys_buf_array=%p", num_phys_buf, phys_buf_array);
155 ib_mr = ERR_PTR(-EINVAL);
156 goto reg_phys_mr_exit0;
157 }
158 if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
159 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
160 ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
161 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
162 /*
163 * Remote Write Access requires Local Write Access
164 * Remote Atomic Access requires Local Write Access
165 */
166 ehca_err(pd->device, "bad input values: mr_access_flags=%x",
167 mr_access_flags);
168 ib_mr = ERR_PTR(-EINVAL);
169 goto reg_phys_mr_exit0;
170 }
171
172 /* check physical buffer list and calculate size */
173 ret = ehca_mr_chk_buf_and_calc_size(phys_buf_array, num_phys_buf,
174 iova_start, &size);
175 if (ret) {
176 ib_mr = ERR_PTR(ret);
177 goto reg_phys_mr_exit0;
178 }
179 if ((size == 0) ||
180 (((u64)iova_start + size) < (u64)iova_start)) {
181 ehca_err(pd->device, "bad input values: size=%lx iova_start=%p",
182 size, iova_start);
183 ib_mr = ERR_PTR(-EINVAL);
184 goto reg_phys_mr_exit0;
185 }
186
187 e_mr = ehca_mr_new();
188 if (!e_mr) {
189 ehca_err(pd->device, "out of memory");
190 ib_mr = ERR_PTR(-ENOMEM);
191 goto reg_phys_mr_exit0;
192 }
193
194 /* determine number of MR pages */
195 num_pages_mr = ((((u64)iova_start % PAGE_SIZE) + size +
196 PAGE_SIZE - 1) / PAGE_SIZE);
197 num_pages_4k = ((((u64)iova_start % EHCA_PAGESIZE) + size +
198 EHCA_PAGESIZE - 1) / EHCA_PAGESIZE);
199
200 /* register MR on HCA */
201 if (ehca_mr_is_maxmr(size, iova_start)) {
202 e_mr->flags |= EHCA_MR_FLAG_MAXMR;
203 ret = ehca_reg_maxmr(shca, e_mr, iova_start, mr_access_flags,
204 e_pd, &e_mr->ib.ib_mr.lkey,
205 &e_mr->ib.ib_mr.rkey);
206 if (ret) {
207 ib_mr = ERR_PTR(ret);
208 goto reg_phys_mr_exit1;
209 }
210 } else {
211 pginfo.type = EHCA_MR_PGI_PHYS;
212 pginfo.num_pages = num_pages_mr;
213 pginfo.num_4k = num_pages_4k;
214 pginfo.num_phys_buf = num_phys_buf;
215 pginfo.phys_buf_array = phys_buf_array;
216 pginfo.next_4k = (((u64)iova_start & ~PAGE_MASK) /
217 EHCA_PAGESIZE);
218
219 ret = ehca_reg_mr(shca, e_mr, iova_start, size, mr_access_flags,
220 e_pd, &pginfo, &e_mr->ib.ib_mr.lkey,
221 &e_mr->ib.ib_mr.rkey);
222 if (ret) {
223 ib_mr = ERR_PTR(ret);
224 goto reg_phys_mr_exit1;
225 }
226 }
227
228 /* successful registration of all pages */
229 return &e_mr->ib.ib_mr;
230
231 reg_phys_mr_exit1:
232 ehca_mr_delete(e_mr);
233 reg_phys_mr_exit0:
234 if (IS_ERR(ib_mr))
235 ehca_err(pd->device, "rc=%lx pd=%p phys_buf_array=%p "
236 "num_phys_buf=%x mr_access_flags=%x iova_start=%p",
237 PTR_ERR(ib_mr), pd, phys_buf_array,
238 num_phys_buf, mr_access_flags, iova_start);
239 return ib_mr;
240 } /* end ehca_reg_phys_mr() */
241
242 /*----------------------------------------------------------------------*/
243
244 struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt,
245 int mr_access_flags, struct ib_udata *udata)
246 {
247 struct ib_mr *ib_mr;
248 struct ehca_mr *e_mr;
249 struct ehca_shca *shca =
250 container_of(pd->device, struct ehca_shca, ib_device);
251 struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
252 struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
253 int ret;
254 u32 num_pages_mr;
255 u32 num_pages_4k; /* 4k portion "pages" */
256
257 if (!pd) {
258 ehca_gen_err("bad pd=%p", pd);
259 return ERR_PTR(-EFAULT);
260 }
261
262 if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
263 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
264 ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
265 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
266 /*
267 * Remote Write Access requires Local Write Access
268 * Remote Atomic Access requires Local Write Access
269 */
270 ehca_err(pd->device, "bad input values: mr_access_flags=%x",
271 mr_access_flags);
272 ib_mr = ERR_PTR(-EINVAL);
273 goto reg_user_mr_exit0;
274 }
275
276 if (length == 0 || virt + length < virt) {
277 ehca_err(pd->device, "bad input values: length=%lx "
278 "virt_base=%lx", length, virt);
279 ib_mr = ERR_PTR(-EINVAL);
280 goto reg_user_mr_exit0;
281 }
282
283 e_mr = ehca_mr_new();
284 if (!e_mr) {
285 ehca_err(pd->device, "out of memory");
286 ib_mr = ERR_PTR(-ENOMEM);
287 goto reg_user_mr_exit0;
288 }
289
290 e_mr->umem = ib_umem_get(pd->uobject->context, start, length,
291 mr_access_flags);
292 if (IS_ERR(e_mr->umem)) {
293 ib_mr = (void *) e_mr->umem;
294 goto reg_user_mr_exit1;
295 }
296
297 if (e_mr->umem->page_size != PAGE_SIZE) {
298 ehca_err(pd->device, "page size not supported, "
299 "e_mr->umem->page_size=%x", e_mr->umem->page_size);
300 ib_mr = ERR_PTR(-EINVAL);
301 goto reg_user_mr_exit2;
302 }
303
304 /* determine number of MR pages */
305 num_pages_mr = (((virt % PAGE_SIZE) + length + PAGE_SIZE - 1) /
306 PAGE_SIZE);
307 num_pages_4k = (((virt % EHCA_PAGESIZE) + length + EHCA_PAGESIZE - 1) /
308 EHCA_PAGESIZE);
309
310 /* register MR on HCA */
311 pginfo.type = EHCA_MR_PGI_USER;
312 pginfo.num_pages = num_pages_mr;
313 pginfo.num_4k = num_pages_4k;
314 pginfo.region = e_mr->umem;
315 pginfo.next_4k = e_mr->umem->offset / EHCA_PAGESIZE;
316 pginfo.next_chunk = list_prepare_entry(pginfo.next_chunk,
317 (&e_mr->umem->chunk_list),
318 list);
319
320 ret = ehca_reg_mr(shca, e_mr, (u64*) virt, length, mr_access_flags, e_pd,
321 &pginfo, &e_mr->ib.ib_mr.lkey, &e_mr->ib.ib_mr.rkey);
322 if (ret) {
323 ib_mr = ERR_PTR(ret);
324 goto reg_user_mr_exit2;
325 }
326
327 /* successful registration of all pages */
328 return &e_mr->ib.ib_mr;
329
330 reg_user_mr_exit2:
331 ib_umem_release(e_mr->umem);
332 reg_user_mr_exit1:
333 ehca_mr_delete(e_mr);
334 reg_user_mr_exit0:
335 if (IS_ERR(ib_mr))
336 ehca_err(pd->device, "rc=%lx pd=%p mr_access_flags=%x"
337 " udata=%p",
338 PTR_ERR(ib_mr), pd, mr_access_flags, udata);
339 return ib_mr;
340 } /* end ehca_reg_user_mr() */
341
342 /*----------------------------------------------------------------------*/
343
344 int ehca_rereg_phys_mr(struct ib_mr *mr,
345 int mr_rereg_mask,
346 struct ib_pd *pd,
347 struct ib_phys_buf *phys_buf_array,
348 int num_phys_buf,
349 int mr_access_flags,
350 u64 *iova_start)
351 {
352 int ret;
353
354 struct ehca_shca *shca =
355 container_of(mr->device, struct ehca_shca, ib_device);
356 struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
357 struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
358 u64 new_size;
359 u64 *new_start;
360 u32 new_acl;
361 struct ehca_pd *new_pd;
362 u32 tmp_lkey, tmp_rkey;
363 unsigned long sl_flags;
364 u32 num_pages_mr = 0;
365 u32 num_pages_4k = 0; /* 4k portion "pages" */
366 struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
367 u32 cur_pid = current->tgid;
368
369 if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
370 (my_pd->ownpid != cur_pid)) {
371 ehca_err(mr->device, "Invalid caller pid=%x ownpid=%x",
372 cur_pid, my_pd->ownpid);
373 ret = -EINVAL;
374 goto rereg_phys_mr_exit0;
375 }
376
377 if (!(mr_rereg_mask & IB_MR_REREG_TRANS)) {
378 /* TODO not supported, because PHYP rereg hCall needs pages */
379 ehca_err(mr->device, "rereg without IB_MR_REREG_TRANS not "
380 "supported yet, mr_rereg_mask=%x", mr_rereg_mask);
381 ret = -EINVAL;
382 goto rereg_phys_mr_exit0;
383 }
384
385 if (mr_rereg_mask & IB_MR_REREG_PD) {
386 if (!pd) {
387 ehca_err(mr->device, "rereg with bad pd, pd=%p "
388 "mr_rereg_mask=%x", pd, mr_rereg_mask);
389 ret = -EINVAL;
390 goto rereg_phys_mr_exit0;
391 }
392 }
393
394 if ((mr_rereg_mask &
395 ~(IB_MR_REREG_TRANS | IB_MR_REREG_PD | IB_MR_REREG_ACCESS)) ||
396 (mr_rereg_mask == 0)) {
397 ret = -EINVAL;
398 goto rereg_phys_mr_exit0;
399 }
400
401 /* check other parameters */
402 if (e_mr == shca->maxmr) {
403 /* should be impossible, however reject to be sure */
404 ehca_err(mr->device, "rereg internal max-MR impossible, mr=%p "
405 "shca->maxmr=%p mr->lkey=%x",
406 mr, shca->maxmr, mr->lkey);
407 ret = -EINVAL;
408 goto rereg_phys_mr_exit0;
409 }
410 if (mr_rereg_mask & IB_MR_REREG_TRANS) { /* transl., i.e. addr/size */
411 if (e_mr->flags & EHCA_MR_FLAG_FMR) {
412 ehca_err(mr->device, "not supported for FMR, mr=%p "
413 "flags=%x", mr, e_mr->flags);
414 ret = -EINVAL;
415 goto rereg_phys_mr_exit0;
416 }
417 if (!phys_buf_array || num_phys_buf <= 0) {
418 ehca_err(mr->device, "bad input values: mr_rereg_mask=%x"
419 " phys_buf_array=%p num_phys_buf=%x",
420 mr_rereg_mask, phys_buf_array, num_phys_buf);
421 ret = -EINVAL;
422 goto rereg_phys_mr_exit0;
423 }
424 }
425 if ((mr_rereg_mask & IB_MR_REREG_ACCESS) && /* change ACL */
426 (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
427 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
428 ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
429 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)))) {
430 /*
431 * Remote Write Access requires Local Write Access
432 * Remote Atomic Access requires Local Write Access
433 */
434 ehca_err(mr->device, "bad input values: mr_rereg_mask=%x "
435 "mr_access_flags=%x", mr_rereg_mask, mr_access_flags);
436 ret = -EINVAL;
437 goto rereg_phys_mr_exit0;
438 }
439
440 /* set requested values dependent on rereg request */
441 spin_lock_irqsave(&e_mr->mrlock, sl_flags);
442 new_start = e_mr->start; /* new == old address */
443 new_size = e_mr->size; /* new == old length */
444 new_acl = e_mr->acl; /* new == old access control */
445 new_pd = container_of(mr->pd,struct ehca_pd,ib_pd); /*new == old PD*/
446
447 if (mr_rereg_mask & IB_MR_REREG_TRANS) {
448 new_start = iova_start; /* change address */
449 /* check physical buffer list and calculate size */
450 ret = ehca_mr_chk_buf_and_calc_size(phys_buf_array,
451 num_phys_buf, iova_start,
452 &new_size);
453 if (ret)
454 goto rereg_phys_mr_exit1;
455 if ((new_size == 0) ||
456 (((u64)iova_start + new_size) < (u64)iova_start)) {
457 ehca_err(mr->device, "bad input values: new_size=%lx "
458 "iova_start=%p", new_size, iova_start);
459 ret = -EINVAL;
460 goto rereg_phys_mr_exit1;
461 }
462 num_pages_mr = ((((u64)new_start % PAGE_SIZE) + new_size +
463 PAGE_SIZE - 1) / PAGE_SIZE);
464 num_pages_4k = ((((u64)new_start % EHCA_PAGESIZE) + new_size +
465 EHCA_PAGESIZE - 1) / EHCA_PAGESIZE);
466 pginfo.type = EHCA_MR_PGI_PHYS;
467 pginfo.num_pages = num_pages_mr;
468 pginfo.num_4k = num_pages_4k;
469 pginfo.num_phys_buf = num_phys_buf;
470 pginfo.phys_buf_array = phys_buf_array;
471 pginfo.next_4k = (((u64)iova_start & ~PAGE_MASK) /
472 EHCA_PAGESIZE);
473 }
474 if (mr_rereg_mask & IB_MR_REREG_ACCESS)
475 new_acl = mr_access_flags;
476 if (mr_rereg_mask & IB_MR_REREG_PD)
477 new_pd = container_of(pd, struct ehca_pd, ib_pd);
478
479 ret = ehca_rereg_mr(shca, e_mr, new_start, new_size, new_acl,
480 new_pd, &pginfo, &tmp_lkey, &tmp_rkey);
481 if (ret)
482 goto rereg_phys_mr_exit1;
483
484 /* successful reregistration */
485 if (mr_rereg_mask & IB_MR_REREG_PD)
486 mr->pd = pd;
487 mr->lkey = tmp_lkey;
488 mr->rkey = tmp_rkey;
489
490 rereg_phys_mr_exit1:
491 spin_unlock_irqrestore(&e_mr->mrlock, sl_flags);
492 rereg_phys_mr_exit0:
493 if (ret)
494 ehca_err(mr->device, "ret=%x mr=%p mr_rereg_mask=%x pd=%p "
495 "phys_buf_array=%p num_phys_buf=%x mr_access_flags=%x "
496 "iova_start=%p",
497 ret, mr, mr_rereg_mask, pd, phys_buf_array,
498 num_phys_buf, mr_access_flags, iova_start);
499 return ret;
500 } /* end ehca_rereg_phys_mr() */
501
502 /*----------------------------------------------------------------------*/
503
504 int ehca_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
505 {
506 int ret = 0;
507 u64 h_ret;
508 struct ehca_shca *shca =
509 container_of(mr->device, struct ehca_shca, ib_device);
510 struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
511 struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
512 u32 cur_pid = current->tgid;
513 unsigned long sl_flags;
514 struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
515
516 if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
517 (my_pd->ownpid != cur_pid)) {
518 ehca_err(mr->device, "Invalid caller pid=%x ownpid=%x",
519 cur_pid, my_pd->ownpid);
520 ret = -EINVAL;
521 goto query_mr_exit0;
522 }
523
524 if ((e_mr->flags & EHCA_MR_FLAG_FMR)) {
525 ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p "
526 "e_mr->flags=%x", mr, e_mr, e_mr->flags);
527 ret = -EINVAL;
528 goto query_mr_exit0;
529 }
530
531 memset(mr_attr, 0, sizeof(struct ib_mr_attr));
532 spin_lock_irqsave(&e_mr->mrlock, sl_flags);
533
534 h_ret = hipz_h_query_mr(shca->ipz_hca_handle, e_mr, &hipzout);
535 if (h_ret != H_SUCCESS) {
536 ehca_err(mr->device, "hipz_mr_query failed, h_ret=%lx mr=%p "
537 "hca_hndl=%lx mr_hndl=%lx lkey=%x",
538 h_ret, mr, shca->ipz_hca_handle.handle,
539 e_mr->ipz_mr_handle.handle, mr->lkey);
540 ret = ehca2ib_return_code(h_ret);
541 goto query_mr_exit1;
542 }
543 mr_attr->pd = mr->pd;
544 mr_attr->device_virt_addr = hipzout.vaddr;
545 mr_attr->size = hipzout.len;
546 mr_attr->lkey = hipzout.lkey;
547 mr_attr->rkey = hipzout.rkey;
548 ehca_mrmw_reverse_map_acl(&hipzout.acl, &mr_attr->mr_access_flags);
549
550 query_mr_exit1:
551 spin_unlock_irqrestore(&e_mr->mrlock, sl_flags);
552 query_mr_exit0:
553 if (ret)
554 ehca_err(mr->device, "ret=%x mr=%p mr_attr=%p",
555 ret, mr, mr_attr);
556 return ret;
557 } /* end ehca_query_mr() */
558
559 /*----------------------------------------------------------------------*/
560
561 int ehca_dereg_mr(struct ib_mr *mr)
562 {
563 int ret = 0;
564 u64 h_ret;
565 struct ehca_shca *shca =
566 container_of(mr->device, struct ehca_shca, ib_device);
567 struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
568 struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
569 u32 cur_pid = current->tgid;
570
571 if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
572 (my_pd->ownpid != cur_pid)) {
573 ehca_err(mr->device, "Invalid caller pid=%x ownpid=%x",
574 cur_pid, my_pd->ownpid);
575 ret = -EINVAL;
576 goto dereg_mr_exit0;
577 }
578
579 if ((e_mr->flags & EHCA_MR_FLAG_FMR)) {
580 ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p "
581 "e_mr->flags=%x", mr, e_mr, e_mr->flags);
582 ret = -EINVAL;
583 goto dereg_mr_exit0;
584 } else if (e_mr == shca->maxmr) {
585 /* should be impossible, however reject to be sure */
586 ehca_err(mr->device, "dereg internal max-MR impossible, mr=%p "
587 "shca->maxmr=%p mr->lkey=%x",
588 mr, shca->maxmr, mr->lkey);
589 ret = -EINVAL;
590 goto dereg_mr_exit0;
591 }
592
593 /* TODO: BUSY: MR still has bound window(s) */
594 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
595 if (h_ret != H_SUCCESS) {
596 ehca_err(mr->device, "hipz_free_mr failed, h_ret=%lx shca=%p "
597 "e_mr=%p hca_hndl=%lx mr_hndl=%lx mr->lkey=%x",
598 h_ret, shca, e_mr, shca->ipz_hca_handle.handle,
599 e_mr->ipz_mr_handle.handle, mr->lkey);
600 ret = ehca2ib_return_code(h_ret);
601 goto dereg_mr_exit0;
602 }
603
604 if (e_mr->umem)
605 ib_umem_release(e_mr->umem);
606
607 /* successful deregistration */
608 ehca_mr_delete(e_mr);
609
610 dereg_mr_exit0:
611 if (ret)
612 ehca_err(mr->device, "ret=%x mr=%p", ret, mr);
613 return ret;
614 } /* end ehca_dereg_mr() */
615
616 /*----------------------------------------------------------------------*/
617
618 struct ib_mw *ehca_alloc_mw(struct ib_pd *pd)
619 {
620 struct ib_mw *ib_mw;
621 u64 h_ret;
622 struct ehca_mw *e_mw;
623 struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
624 struct ehca_shca *shca =
625 container_of(pd->device, struct ehca_shca, ib_device);
626 struct ehca_mw_hipzout_parms hipzout = {{0},0};
627
628 e_mw = ehca_mw_new();
629 if (!e_mw) {
630 ib_mw = ERR_PTR(-ENOMEM);
631 goto alloc_mw_exit0;
632 }
633
634 h_ret = hipz_h_alloc_resource_mw(shca->ipz_hca_handle, e_mw,
635 e_pd->fw_pd, &hipzout);
636 if (h_ret != H_SUCCESS) {
637 ehca_err(pd->device, "hipz_mw_allocate failed, h_ret=%lx "
638 "shca=%p hca_hndl=%lx mw=%p",
639 h_ret, shca, shca->ipz_hca_handle.handle, e_mw);
640 ib_mw = ERR_PTR(ehca2ib_return_code(h_ret));
641 goto alloc_mw_exit1;
642 }
643 /* successful MW allocation */
644 e_mw->ipz_mw_handle = hipzout.handle;
645 e_mw->ib_mw.rkey = hipzout.rkey;
646 return &e_mw->ib_mw;
647
648 alloc_mw_exit1:
649 ehca_mw_delete(e_mw);
650 alloc_mw_exit0:
651 if (IS_ERR(ib_mw))
652 ehca_err(pd->device, "rc=%lx pd=%p", PTR_ERR(ib_mw), pd);
653 return ib_mw;
654 } /* end ehca_alloc_mw() */
655
656 /*----------------------------------------------------------------------*/
657
658 int ehca_bind_mw(struct ib_qp *qp,
659 struct ib_mw *mw,
660 struct ib_mw_bind *mw_bind)
661 {
662 /* TODO: not supported up to now */
663 ehca_gen_err("bind MW currently not supported by HCAD");
664
665 return -EPERM;
666 } /* end ehca_bind_mw() */
667
668 /*----------------------------------------------------------------------*/
669
670 int ehca_dealloc_mw(struct ib_mw *mw)
671 {
672 u64 h_ret;
673 struct ehca_shca *shca =
674 container_of(mw->device, struct ehca_shca, ib_device);
675 struct ehca_mw *e_mw = container_of(mw, struct ehca_mw, ib_mw);
676
677 h_ret = hipz_h_free_resource_mw(shca->ipz_hca_handle, e_mw);
678 if (h_ret != H_SUCCESS) {
679 ehca_err(mw->device, "hipz_free_mw failed, h_ret=%lx shca=%p "
680 "mw=%p rkey=%x hca_hndl=%lx mw_hndl=%lx",
681 h_ret, shca, mw, mw->rkey, shca->ipz_hca_handle.handle,
682 e_mw->ipz_mw_handle.handle);
683 return ehca2ib_return_code(h_ret);
684 }
685 /* successful deallocation */
686 ehca_mw_delete(e_mw);
687 return 0;
688 } /* end ehca_dealloc_mw() */
689
690 /*----------------------------------------------------------------------*/
691
692 struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd,
693 int mr_access_flags,
694 struct ib_fmr_attr *fmr_attr)
695 {
696 struct ib_fmr *ib_fmr;
697 struct ehca_shca *shca =
698 container_of(pd->device, struct ehca_shca, ib_device);
699 struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
700 struct ehca_mr *e_fmr;
701 int ret;
702 u32 tmp_lkey, tmp_rkey;
703 struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
704
705 /* check other parameters */
706 if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
707 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
708 ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
709 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
710 /*
711 * Remote Write Access requires Local Write Access
712 * Remote Atomic Access requires Local Write Access
713 */
714 ehca_err(pd->device, "bad input values: mr_access_flags=%x",
715 mr_access_flags);
716 ib_fmr = ERR_PTR(-EINVAL);
717 goto alloc_fmr_exit0;
718 }
719 if (mr_access_flags & IB_ACCESS_MW_BIND) {
720 ehca_err(pd->device, "bad input values: mr_access_flags=%x",
721 mr_access_flags);
722 ib_fmr = ERR_PTR(-EINVAL);
723 goto alloc_fmr_exit0;
724 }
725 if ((fmr_attr->max_pages == 0) || (fmr_attr->max_maps == 0)) {
726 ehca_err(pd->device, "bad input values: fmr_attr->max_pages=%x "
727 "fmr_attr->max_maps=%x fmr_attr->page_shift=%x",
728 fmr_attr->max_pages, fmr_attr->max_maps,
729 fmr_attr->page_shift);
730 ib_fmr = ERR_PTR(-EINVAL);
731 goto alloc_fmr_exit0;
732 }
733 if (((1 << fmr_attr->page_shift) != EHCA_PAGESIZE) &&
734 ((1 << fmr_attr->page_shift) != PAGE_SIZE)) {
735 ehca_err(pd->device, "unsupported fmr_attr->page_shift=%x",
736 fmr_attr->page_shift);
737 ib_fmr = ERR_PTR(-EINVAL);
738 goto alloc_fmr_exit0;
739 }
740
741 e_fmr = ehca_mr_new();
742 if (!e_fmr) {
743 ib_fmr = ERR_PTR(-ENOMEM);
744 goto alloc_fmr_exit0;
745 }
746 e_fmr->flags |= EHCA_MR_FLAG_FMR;
747
748 /* register MR on HCA */
749 ret = ehca_reg_mr(shca, e_fmr, NULL,
750 fmr_attr->max_pages * (1 << fmr_attr->page_shift),
751 mr_access_flags, e_pd, &pginfo,
752 &tmp_lkey, &tmp_rkey);
753 if (ret) {
754 ib_fmr = ERR_PTR(ret);
755 goto alloc_fmr_exit1;
756 }
757
758 /* successful */
759 e_fmr->fmr_page_size = 1 << fmr_attr->page_shift;
760 e_fmr->fmr_max_pages = fmr_attr->max_pages;
761 e_fmr->fmr_max_maps = fmr_attr->max_maps;
762 e_fmr->fmr_map_cnt = 0;
763 return &e_fmr->ib.ib_fmr;
764
765 alloc_fmr_exit1:
766 ehca_mr_delete(e_fmr);
767 alloc_fmr_exit0:
768 if (IS_ERR(ib_fmr))
769 ehca_err(pd->device, "rc=%lx pd=%p mr_access_flags=%x "
770 "fmr_attr=%p", PTR_ERR(ib_fmr), pd,
771 mr_access_flags, fmr_attr);
772 return ib_fmr;
773 } /* end ehca_alloc_fmr() */
774
775 /*----------------------------------------------------------------------*/
776
777 int ehca_map_phys_fmr(struct ib_fmr *fmr,
778 u64 *page_list,
779 int list_len,
780 u64 iova)
781 {
782 int ret;
783 struct ehca_shca *shca =
784 container_of(fmr->device, struct ehca_shca, ib_device);
785 struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);
786 struct ehca_pd *e_pd = container_of(fmr->pd, struct ehca_pd, ib_pd);
787 struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
788 u32 tmp_lkey, tmp_rkey;
789
790 if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
791 ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x",
792 e_fmr, e_fmr->flags);
793 ret = -EINVAL;
794 goto map_phys_fmr_exit0;
795 }
796 ret = ehca_fmr_check_page_list(e_fmr, page_list, list_len);
797 if (ret)
798 goto map_phys_fmr_exit0;
799 if (iova % e_fmr->fmr_page_size) {
800 /* only whole-numbered pages */
801 ehca_err(fmr->device, "bad iova, iova=%lx fmr_page_size=%x",
802 iova, e_fmr->fmr_page_size);
803 ret = -EINVAL;
804 goto map_phys_fmr_exit0;
805 }
806 if (e_fmr->fmr_map_cnt >= e_fmr->fmr_max_maps) {
807 /* HCAD does not limit the maps, however trace this anyway */
808 ehca_info(fmr->device, "map limit exceeded, fmr=%p "
809 "e_fmr->fmr_map_cnt=%x e_fmr->fmr_max_maps=%x",
810 fmr, e_fmr->fmr_map_cnt, e_fmr->fmr_max_maps);
811 }
812
813 pginfo.type = EHCA_MR_PGI_FMR;
814 pginfo.num_pages = list_len;
815 pginfo.num_4k = list_len * (e_fmr->fmr_page_size / EHCA_PAGESIZE);
816 pginfo.page_list = page_list;
817 pginfo.next_4k = ((iova & (e_fmr->fmr_page_size-1)) /
818 EHCA_PAGESIZE);
819
820 ret = ehca_rereg_mr(shca, e_fmr, (u64*)iova,
821 list_len * e_fmr->fmr_page_size,
822 e_fmr->acl, e_pd, &pginfo, &tmp_lkey, &tmp_rkey);
823 if (ret)
824 goto map_phys_fmr_exit0;
825
826 /* successful reregistration */
827 e_fmr->fmr_map_cnt++;
828 e_fmr->ib.ib_fmr.lkey = tmp_lkey;
829 e_fmr->ib.ib_fmr.rkey = tmp_rkey;
830 return 0;
831
832 map_phys_fmr_exit0:
833 if (ret)
834 ehca_err(fmr->device, "ret=%x fmr=%p page_list=%p list_len=%x "
835 "iova=%lx",
836 ret, fmr, page_list, list_len, iova);
837 return ret;
838 } /* end ehca_map_phys_fmr() */
839
840 /*----------------------------------------------------------------------*/
841
842 int ehca_unmap_fmr(struct list_head *fmr_list)
843 {
844 int ret = 0;
845 struct ib_fmr *ib_fmr;
846 struct ehca_shca *shca = NULL;
847 struct ehca_shca *prev_shca;
848 struct ehca_mr *e_fmr;
849 u32 num_fmr = 0;
850 u32 unmap_fmr_cnt = 0;
851
852 /* check all FMR belong to same SHCA, and check internal flag */
853 list_for_each_entry(ib_fmr, fmr_list, list) {
854 prev_shca = shca;
855 if (!ib_fmr) {
856 ehca_gen_err("bad fmr=%p in list", ib_fmr);
857 ret = -EINVAL;
858 goto unmap_fmr_exit0;
859 }
860 shca = container_of(ib_fmr->device, struct ehca_shca,
861 ib_device);
862 e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr);
863 if ((shca != prev_shca) && prev_shca) {
864 ehca_err(&shca->ib_device, "SHCA mismatch, shca=%p "
865 "prev_shca=%p e_fmr=%p",
866 shca, prev_shca, e_fmr);
867 ret = -EINVAL;
868 goto unmap_fmr_exit0;
869 }
870 if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
871 ehca_err(&shca->ib_device, "not a FMR, e_fmr=%p "
872 "e_fmr->flags=%x", e_fmr, e_fmr->flags);
873 ret = -EINVAL;
874 goto unmap_fmr_exit0;
875 }
876 num_fmr++;
877 }
878
879 /* loop over all FMRs to unmap */
880 list_for_each_entry(ib_fmr, fmr_list, list) {
881 unmap_fmr_cnt++;
882 e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr);
883 shca = container_of(ib_fmr->device, struct ehca_shca,
884 ib_device);
885 ret = ehca_unmap_one_fmr(shca, e_fmr);
886 if (ret) {
887 /* unmap failed, stop unmapping of rest of FMRs */
888 ehca_err(&shca->ib_device, "unmap of one FMR failed, "
889 "stop rest, e_fmr=%p num_fmr=%x "
890 "unmap_fmr_cnt=%x lkey=%x", e_fmr, num_fmr,
891 unmap_fmr_cnt, e_fmr->ib.ib_fmr.lkey);
892 goto unmap_fmr_exit0;
893 }
894 }
895
896 unmap_fmr_exit0:
897 if (ret)
898 ehca_gen_err("ret=%x fmr_list=%p num_fmr=%x unmap_fmr_cnt=%x",
899 ret, fmr_list, num_fmr, unmap_fmr_cnt);
900 return ret;
901 } /* end ehca_unmap_fmr() */
902
903 /*----------------------------------------------------------------------*/
904
905 int ehca_dealloc_fmr(struct ib_fmr *fmr)
906 {
907 int ret;
908 u64 h_ret;
909 struct ehca_shca *shca =
910 container_of(fmr->device, struct ehca_shca, ib_device);
911 struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);
912
913 if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
914 ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x",
915 e_fmr, e_fmr->flags);
916 ret = -EINVAL;
917 goto free_fmr_exit0;
918 }
919
920 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
921 if (h_ret != H_SUCCESS) {
922 ehca_err(fmr->device, "hipz_free_mr failed, h_ret=%lx e_fmr=%p "
923 "hca_hndl=%lx fmr_hndl=%lx fmr->lkey=%x",
924 h_ret, e_fmr, shca->ipz_hca_handle.handle,
925 e_fmr->ipz_mr_handle.handle, fmr->lkey);
926 ret = ehca2ib_return_code(h_ret);
927 goto free_fmr_exit0;
928 }
929 /* successful deregistration */
930 ehca_mr_delete(e_fmr);
931 return 0;
932
933 free_fmr_exit0:
934 if (ret)
935 ehca_err(&shca->ib_device, "ret=%x fmr=%p", ret, fmr);
936 return ret;
937 } /* end ehca_dealloc_fmr() */
938
939 /*----------------------------------------------------------------------*/
940
941 int ehca_reg_mr(struct ehca_shca *shca,
942 struct ehca_mr *e_mr,
943 u64 *iova_start,
944 u64 size,
945 int acl,
946 struct ehca_pd *e_pd,
947 struct ehca_mr_pginfo *pginfo,
948 u32 *lkey, /*OUT*/
949 u32 *rkey) /*OUT*/
950 {
951 int ret;
952 u64 h_ret;
953 u32 hipz_acl;
954 struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
955
956 ehca_mrmw_map_acl(acl, &hipz_acl);
957 ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
958 if (ehca_use_hp_mr == 1)
959 hipz_acl |= 0x00000001;
960
961 h_ret = hipz_h_alloc_resource_mr(shca->ipz_hca_handle, e_mr,
962 (u64)iova_start, size, hipz_acl,
963 e_pd->fw_pd, &hipzout);
964 if (h_ret != H_SUCCESS) {
965 ehca_err(&shca->ib_device, "hipz_alloc_mr failed, h_ret=%lx "
966 "hca_hndl=%lx", h_ret, shca->ipz_hca_handle.handle);
967 ret = ehca2ib_return_code(h_ret);
968 goto ehca_reg_mr_exit0;
969 }
970
971 e_mr->ipz_mr_handle = hipzout.handle;
972
973 ret = ehca_reg_mr_rpages(shca, e_mr, pginfo);
974 if (ret)
975 goto ehca_reg_mr_exit1;
976
977 /* successful registration */
978 e_mr->num_pages = pginfo->num_pages;
979 e_mr->num_4k = pginfo->num_4k;
980 e_mr->start = iova_start;
981 e_mr->size = size;
982 e_mr->acl = acl;
983 *lkey = hipzout.lkey;
984 *rkey = hipzout.rkey;
985 return 0;
986
987 ehca_reg_mr_exit1:
988 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
989 if (h_ret != H_SUCCESS) {
990 ehca_err(&shca->ib_device, "h_ret=%lx shca=%p e_mr=%p "
991 "iova_start=%p size=%lx acl=%x e_pd=%p lkey=%x "
992 "pginfo=%p num_pages=%lx num_4k=%lx ret=%x",
993 h_ret, shca, e_mr, iova_start, size, acl, e_pd,
994 hipzout.lkey, pginfo, pginfo->num_pages,
995 pginfo->num_4k, ret);
996 ehca_err(&shca->ib_device, "internal error in ehca_reg_mr, "
997 "not recoverable");
998 }
999 ehca_reg_mr_exit0:
1000 if (ret)
1001 ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p "
1002 "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p "
1003 "num_pages=%lx num_4k=%lx",
1004 ret, shca, e_mr, iova_start, size, acl, e_pd, pginfo,
1005 pginfo->num_pages, pginfo->num_4k);
1006 return ret;
1007 } /* end ehca_reg_mr() */
1008
1009 /*----------------------------------------------------------------------*/
1010
1011 int ehca_reg_mr_rpages(struct ehca_shca *shca,
1012 struct ehca_mr *e_mr,
1013 struct ehca_mr_pginfo *pginfo)
1014 {
1015 int ret = 0;
1016 u64 h_ret;
1017 u32 rnum;
1018 u64 rpage;
1019 u32 i;
1020 u64 *kpage;
1021
1022 kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
1023 if (!kpage) {
1024 ehca_err(&shca->ib_device, "kpage alloc failed");
1025 ret = -ENOMEM;
1026 goto ehca_reg_mr_rpages_exit0;
1027 }
1028
1029 /* max 512 pages per shot */
1030 for (i = 0; i < ((pginfo->num_4k + 512 - 1) / 512); i++) {
1031
1032 if (i == ((pginfo->num_4k + 512 - 1) / 512) - 1) {
1033 rnum = pginfo->num_4k % 512; /* last shot */
1034 if (rnum == 0)
1035 rnum = 512; /* last shot is full */
1036 } else
1037 rnum = 512;
1038
1039 if (rnum > 1) {
1040 ret = ehca_set_pagebuf(e_mr, pginfo, rnum, kpage);
1041 if (ret) {
1042 ehca_err(&shca->ib_device, "ehca_set_pagebuf "
1043 "bad rc, ret=%x rnum=%x kpage=%p",
1044 ret, rnum, kpage);
1045 ret = -EFAULT;
1046 goto ehca_reg_mr_rpages_exit1;
1047 }
1048 rpage = virt_to_abs(kpage);
1049 if (!rpage) {
1050 ehca_err(&shca->ib_device, "kpage=%p i=%x",
1051 kpage, i);
1052 ret = -EFAULT;
1053 goto ehca_reg_mr_rpages_exit1;
1054 }
1055 } else { /* rnum==1 */
1056 ret = ehca_set_pagebuf_1(e_mr, pginfo, &rpage);
1057 if (ret) {
1058 ehca_err(&shca->ib_device, "ehca_set_pagebuf_1 "
1059 "bad rc, ret=%x i=%x", ret, i);
1060 ret = -EFAULT;
1061 goto ehca_reg_mr_rpages_exit1;
1062 }
1063 }
1064
1065 h_ret = hipz_h_register_rpage_mr(shca->ipz_hca_handle, e_mr,
1066 0, /* pagesize 4k */
1067 0, rpage, rnum);
1068
1069 if (i == ((pginfo->num_4k + 512 - 1) / 512) - 1) {
1070 /*
1071 * check for 'registration complete'==H_SUCCESS
1072 * and for 'page registered'==H_PAGE_REGISTERED
1073 */
1074 if (h_ret != H_SUCCESS) {
1075 ehca_err(&shca->ib_device, "last "
1076 "hipz_reg_rpage_mr failed, h_ret=%lx "
1077 "e_mr=%p i=%x hca_hndl=%lx mr_hndl=%lx"
1078 " lkey=%x", h_ret, e_mr, i,
1079 shca->ipz_hca_handle.handle,
1080 e_mr->ipz_mr_handle.handle,
1081 e_mr->ib.ib_mr.lkey);
1082 ret = ehca2ib_return_code(h_ret);
1083 break;
1084 } else
1085 ret = 0;
1086 } else if (h_ret != H_PAGE_REGISTERED) {
1087 ehca_err(&shca->ib_device, "hipz_reg_rpage_mr failed, "
1088 "h_ret=%lx e_mr=%p i=%x lkey=%x hca_hndl=%lx "
1089 "mr_hndl=%lx", h_ret, e_mr, i,
1090 e_mr->ib.ib_mr.lkey,
1091 shca->ipz_hca_handle.handle,
1092 e_mr->ipz_mr_handle.handle);
1093 ret = ehca2ib_return_code(h_ret);
1094 break;
1095 } else
1096 ret = 0;
1097 } /* end for(i) */
1098
1099
1100 ehca_reg_mr_rpages_exit1:
1101 ehca_free_fw_ctrlblock(kpage);
1102 ehca_reg_mr_rpages_exit0:
1103 if (ret)
1104 ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p pginfo=%p "
1105 "num_pages=%lx num_4k=%lx", ret, shca, e_mr, pginfo,
1106 pginfo->num_pages, pginfo->num_4k);
1107 return ret;
1108 } /* end ehca_reg_mr_rpages() */
1109
1110 /*----------------------------------------------------------------------*/
1111
1112 inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca,
1113 struct ehca_mr *e_mr,
1114 u64 *iova_start,
1115 u64 size,
1116 u32 acl,
1117 struct ehca_pd *e_pd,
1118 struct ehca_mr_pginfo *pginfo,
1119 u32 *lkey, /*OUT*/
1120 u32 *rkey) /*OUT*/
1121 {
1122 int ret;
1123 u64 h_ret;
1124 u32 hipz_acl;
1125 u64 *kpage;
1126 u64 rpage;
1127 struct ehca_mr_pginfo pginfo_save;
1128 struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
1129
1130 ehca_mrmw_map_acl(acl, &hipz_acl);
1131 ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
1132
1133 kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
1134 if (!kpage) {
1135 ehca_err(&shca->ib_device, "kpage alloc failed");
1136 ret = -ENOMEM;
1137 goto ehca_rereg_mr_rereg1_exit0;
1138 }
1139
1140 pginfo_save = *pginfo;
1141 ret = ehca_set_pagebuf(e_mr, pginfo, pginfo->num_4k, kpage);
1142 if (ret) {
1143 ehca_err(&shca->ib_device, "set pagebuf failed, e_mr=%p "
1144 "pginfo=%p type=%x num_pages=%lx num_4k=%lx kpage=%p",
1145 e_mr, pginfo, pginfo->type, pginfo->num_pages,
1146 pginfo->num_4k,kpage);
1147 goto ehca_rereg_mr_rereg1_exit1;
1148 }
1149 rpage = virt_to_abs(kpage);
1150 if (!rpage) {
1151 ehca_err(&shca->ib_device, "kpage=%p", kpage);
1152 ret = -EFAULT;
1153 goto ehca_rereg_mr_rereg1_exit1;
1154 }
1155 h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_mr,
1156 (u64)iova_start, size, hipz_acl,
1157 e_pd->fw_pd, rpage, &hipzout);
1158 if (h_ret != H_SUCCESS) {
1159 /*
1160 * reregistration unsuccessful, try it again with the 3 hCalls,
1161 * e.g. this is required in case H_MR_CONDITION
1162 * (MW bound or MR is shared)
1163 */
1164 ehca_warn(&shca->ib_device, "hipz_h_reregister_pmr failed "
1165 "(Rereg1), h_ret=%lx e_mr=%p", h_ret, e_mr);
1166 *pginfo = pginfo_save;
1167 ret = -EAGAIN;
1168 } else if ((u64*)hipzout.vaddr != iova_start) {
1169 ehca_err(&shca->ib_device, "PHYP changed iova_start in "
1170 "rereg_pmr, iova_start=%p iova_start_out=%lx e_mr=%p "
1171 "mr_handle=%lx lkey=%x lkey_out=%x", iova_start,
1172 hipzout.vaddr, e_mr, e_mr->ipz_mr_handle.handle,
1173 e_mr->ib.ib_mr.lkey, hipzout.lkey);
1174 ret = -EFAULT;
1175 } else {
1176 /*
1177 * successful reregistration
1178 * note: start and start_out are identical for eServer HCAs
1179 */
1180 e_mr->num_pages = pginfo->num_pages;
1181 e_mr->num_4k = pginfo->num_4k;
1182 e_mr->start = iova_start;
1183 e_mr->size = size;
1184 e_mr->acl = acl;
1185 *lkey = hipzout.lkey;
1186 *rkey = hipzout.rkey;
1187 }
1188
1189 ehca_rereg_mr_rereg1_exit1:
1190 ehca_free_fw_ctrlblock(kpage);
1191 ehca_rereg_mr_rereg1_exit0:
1192 if ( ret && (ret != -EAGAIN) )
1193 ehca_err(&shca->ib_device, "ret=%x lkey=%x rkey=%x "
1194 "pginfo=%p num_pages=%lx num_4k=%lx",
1195 ret, *lkey, *rkey, pginfo, pginfo->num_pages,
1196 pginfo->num_4k);
1197 return ret;
1198 } /* end ehca_rereg_mr_rereg1() */
1199
1200 /*----------------------------------------------------------------------*/
1201
1202 int ehca_rereg_mr(struct ehca_shca *shca,
1203 struct ehca_mr *e_mr,
1204 u64 *iova_start,
1205 u64 size,
1206 int acl,
1207 struct ehca_pd *e_pd,
1208 struct ehca_mr_pginfo *pginfo,
1209 u32 *lkey,
1210 u32 *rkey)
1211 {
1212 int ret = 0;
1213 u64 h_ret;
1214 int rereg_1_hcall = 1; /* 1: use hipz_h_reregister_pmr directly */
1215 int rereg_3_hcall = 0; /* 1: use 3 hipz calls for reregistration */
1216
1217 /* first determine reregistration hCall(s) */
1218 if ((pginfo->num_4k > 512) || (e_mr->num_4k > 512) ||
1219 (pginfo->num_4k > e_mr->num_4k)) {
1220 ehca_dbg(&shca->ib_device, "Rereg3 case, pginfo->num_4k=%lx "
1221 "e_mr->num_4k=%x", pginfo->num_4k, e_mr->num_4k);
1222 rereg_1_hcall = 0;
1223 rereg_3_hcall = 1;
1224 }
1225
1226 if (e_mr->flags & EHCA_MR_FLAG_MAXMR) { /* check for max-MR */
1227 rereg_1_hcall = 0;
1228 rereg_3_hcall = 1;
1229 e_mr->flags &= ~EHCA_MR_FLAG_MAXMR;
1230 ehca_err(&shca->ib_device, "Rereg MR for max-MR! e_mr=%p",
1231 e_mr);
1232 }
1233
1234 if (rereg_1_hcall) {
1235 ret = ehca_rereg_mr_rereg1(shca, e_mr, iova_start, size,
1236 acl, e_pd, pginfo, lkey, rkey);
1237 if (ret) {
1238 if (ret == -EAGAIN)
1239 rereg_3_hcall = 1;
1240 else
1241 goto ehca_rereg_mr_exit0;
1242 }
1243 }
1244
1245 if (rereg_3_hcall) {
1246 struct ehca_mr save_mr;
1247
1248 /* first deregister old MR */
1249 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
1250 if (h_ret != H_SUCCESS) {
1251 ehca_err(&shca->ib_device, "hipz_free_mr failed, "
1252 "h_ret=%lx e_mr=%p hca_hndl=%lx mr_hndl=%lx "
1253 "mr->lkey=%x",
1254 h_ret, e_mr, shca->ipz_hca_handle.handle,
1255 e_mr->ipz_mr_handle.handle,
1256 e_mr->ib.ib_mr.lkey);
1257 ret = ehca2ib_return_code(h_ret);
1258 goto ehca_rereg_mr_exit0;
1259 }
1260 /* clean ehca_mr_t, without changing struct ib_mr and lock */
1261 save_mr = *e_mr;
1262 ehca_mr_deletenew(e_mr);
1263
1264 /* set some MR values */
1265 e_mr->flags = save_mr.flags;
1266 e_mr->fmr_page_size = save_mr.fmr_page_size;
1267 e_mr->fmr_max_pages = save_mr.fmr_max_pages;
1268 e_mr->fmr_max_maps = save_mr.fmr_max_maps;
1269 e_mr->fmr_map_cnt = save_mr.fmr_map_cnt;
1270
1271 ret = ehca_reg_mr(shca, e_mr, iova_start, size, acl,
1272 e_pd, pginfo, lkey, rkey);
1273 if (ret) {
1274 u32 offset = (u64)(&e_mr->flags) - (u64)e_mr;
1275 memcpy(&e_mr->flags, &(save_mr.flags),
1276 sizeof(struct ehca_mr) - offset);
1277 goto ehca_rereg_mr_exit0;
1278 }
1279 }
1280
1281 ehca_rereg_mr_exit0:
1282 if (ret)
1283 ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p "
1284 "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p "
1285 "num_pages=%lx lkey=%x rkey=%x rereg_1_hcall=%x "
1286 "rereg_3_hcall=%x", ret, shca, e_mr, iova_start, size,
1287 acl, e_pd, pginfo, pginfo->num_pages, *lkey, *rkey,
1288 rereg_1_hcall, rereg_3_hcall);
1289 return ret;
1290 } /* end ehca_rereg_mr() */
1291
1292 /*----------------------------------------------------------------------*/
1293
1294 int ehca_unmap_one_fmr(struct ehca_shca *shca,
1295 struct ehca_mr *e_fmr)
1296 {
1297 int ret = 0;
1298 u64 h_ret;
1299 int rereg_1_hcall = 1; /* 1: use hipz_mr_reregister directly */
1300 int rereg_3_hcall = 0; /* 1: use 3 hipz calls for unmapping */
1301 struct ehca_pd *e_pd =
1302 container_of(e_fmr->ib.ib_fmr.pd, struct ehca_pd, ib_pd);
1303 struct ehca_mr save_fmr;
1304 u32 tmp_lkey, tmp_rkey;
1305 struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
1306 struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
1307
1308 /* first check if reregistration hCall can be used for unmap */
1309 if (e_fmr->fmr_max_pages > 512) {
1310 rereg_1_hcall = 0;
1311 rereg_3_hcall = 1;
1312 }
1313
1314 if (rereg_1_hcall) {
1315 /*
1316 * note: after using rereg hcall with len=0,
1317 * rereg hcall must be used again for registering pages
1318 */
1319 h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_fmr, 0,
1320 0, 0, e_pd->fw_pd, 0, &hipzout);
1321 if (h_ret != H_SUCCESS) {
1322 /*
1323 * should not happen, because length checked above,
1324 * FMRs are not shared and no MW bound to FMRs
1325 */
1326 ehca_err(&shca->ib_device, "hipz_reregister_pmr failed "
1327 "(Rereg1), h_ret=%lx e_fmr=%p hca_hndl=%lx "
1328 "mr_hndl=%lx lkey=%x lkey_out=%x",
1329 h_ret, e_fmr, shca->ipz_hca_handle.handle,
1330 e_fmr->ipz_mr_handle.handle,
1331 e_fmr->ib.ib_fmr.lkey, hipzout.lkey);
1332 rereg_3_hcall = 1;
1333 } else {
1334 /* successful reregistration */
1335 e_fmr->start = NULL;
1336 e_fmr->size = 0;
1337 tmp_lkey = hipzout.lkey;
1338 tmp_rkey = hipzout.rkey;
1339 }
1340 }
1341
1342 if (rereg_3_hcall) {
1343 struct ehca_mr save_mr;
1344
1345 /* first free old FMR */
1346 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
1347 if (h_ret != H_SUCCESS) {
1348 ehca_err(&shca->ib_device, "hipz_free_mr failed, "
1349 "h_ret=%lx e_fmr=%p hca_hndl=%lx mr_hndl=%lx "
1350 "lkey=%x",
1351 h_ret, e_fmr, shca->ipz_hca_handle.handle,
1352 e_fmr->ipz_mr_handle.handle,
1353 e_fmr->ib.ib_fmr.lkey);
1354 ret = ehca2ib_return_code(h_ret);
1355 goto ehca_unmap_one_fmr_exit0;
1356 }
1357 /* clean ehca_mr_t, without changing lock */
1358 save_fmr = *e_fmr;
1359 ehca_mr_deletenew(e_fmr);
1360
1361 /* set some MR values */
1362 e_fmr->flags = save_fmr.flags;
1363 e_fmr->fmr_page_size = save_fmr.fmr_page_size;
1364 e_fmr->fmr_max_pages = save_fmr.fmr_max_pages;
1365 e_fmr->fmr_max_maps = save_fmr.fmr_max_maps;
1366 e_fmr->fmr_map_cnt = save_fmr.fmr_map_cnt;
1367 e_fmr->acl = save_fmr.acl;
1368
1369 pginfo.type = EHCA_MR_PGI_FMR;
1370 pginfo.num_pages = 0;
1371 pginfo.num_4k = 0;
1372 ret = ehca_reg_mr(shca, e_fmr, NULL,
1373 (e_fmr->fmr_max_pages * e_fmr->fmr_page_size),
1374 e_fmr->acl, e_pd, &pginfo, &tmp_lkey,
1375 &tmp_rkey);
1376 if (ret) {
1377 u32 offset = (u64)(&e_fmr->flags) - (u64)e_fmr;
1378 memcpy(&e_fmr->flags, &(save_mr.flags),
1379 sizeof(struct ehca_mr) - offset);
1380 goto ehca_unmap_one_fmr_exit0;
1381 }
1382 }
1383
1384 ehca_unmap_one_fmr_exit0:
1385 if (ret)
1386 ehca_err(&shca->ib_device, "ret=%x tmp_lkey=%x tmp_rkey=%x "
1387 "fmr_max_pages=%x rereg_1_hcall=%x rereg_3_hcall=%x",
1388 ret, tmp_lkey, tmp_rkey, e_fmr->fmr_max_pages,
1389 rereg_1_hcall, rereg_3_hcall);
1390 return ret;
1391 } /* end ehca_unmap_one_fmr() */
1392
1393 /*----------------------------------------------------------------------*/
1394
1395 int ehca_reg_smr(struct ehca_shca *shca,
1396 struct ehca_mr *e_origmr,
1397 struct ehca_mr *e_newmr,
1398 u64 *iova_start,
1399 int acl,
1400 struct ehca_pd *e_pd,
1401 u32 *lkey, /*OUT*/
1402 u32 *rkey) /*OUT*/
1403 {
1404 int ret = 0;
1405 u64 h_ret;
1406 u32 hipz_acl;
1407 struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
1408
1409 ehca_mrmw_map_acl(acl, &hipz_acl);
1410 ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
1411
1412 h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr,
1413 (u64)iova_start, hipz_acl, e_pd->fw_pd,
1414 &hipzout);
1415 if (h_ret != H_SUCCESS) {
1416 ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lx "
1417 "shca=%p e_origmr=%p e_newmr=%p iova_start=%p acl=%x "
1418 "e_pd=%p hca_hndl=%lx mr_hndl=%lx lkey=%x",
1419 h_ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd,
1420 shca->ipz_hca_handle.handle,
1421 e_origmr->ipz_mr_handle.handle,
1422 e_origmr->ib.ib_mr.lkey);
1423 ret = ehca2ib_return_code(h_ret);
1424 goto ehca_reg_smr_exit0;
1425 }
1426 /* successful registration */
1427 e_newmr->num_pages = e_origmr->num_pages;
1428 e_newmr->num_4k = e_origmr->num_4k;
1429 e_newmr->start = iova_start;
1430 e_newmr->size = e_origmr->size;
1431 e_newmr->acl = acl;
1432 e_newmr->ipz_mr_handle = hipzout.handle;
1433 *lkey = hipzout.lkey;
1434 *rkey = hipzout.rkey;
1435 return 0;
1436
1437 ehca_reg_smr_exit0:
1438 if (ret)
1439 ehca_err(&shca->ib_device, "ret=%x shca=%p e_origmr=%p "
1440 "e_newmr=%p iova_start=%p acl=%x e_pd=%p",
1441 ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd);
1442 return ret;
1443 } /* end ehca_reg_smr() */
1444
1445 /*----------------------------------------------------------------------*/
1446
1447 /* register internal max-MR to internal SHCA */
1448 int ehca_reg_internal_maxmr(
1449 struct ehca_shca *shca,
1450 struct ehca_pd *e_pd,
1451 struct ehca_mr **e_maxmr) /*OUT*/
1452 {
1453 int ret;
1454 struct ehca_mr *e_mr;
1455 u64 *iova_start;
1456 u64 size_maxmr;
1457 struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
1458 struct ib_phys_buf ib_pbuf;
1459 u32 num_pages_mr;
1460 u32 num_pages_4k; /* 4k portion "pages" */
1461
1462 e_mr = ehca_mr_new();
1463 if (!e_mr) {
1464 ehca_err(&shca->ib_device, "out of memory");
1465 ret = -ENOMEM;
1466 goto ehca_reg_internal_maxmr_exit0;
1467 }
1468 e_mr->flags |= EHCA_MR_FLAG_MAXMR;
1469
1470 /* register internal max-MR on HCA */
1471 size_maxmr = (u64)high_memory - PAGE_OFFSET;
1472 iova_start = (u64*)KERNELBASE;
1473 ib_pbuf.addr = 0;
1474 ib_pbuf.size = size_maxmr;
1475 num_pages_mr = ((((u64)iova_start % PAGE_SIZE) + size_maxmr +
1476 PAGE_SIZE - 1) / PAGE_SIZE);
1477 num_pages_4k = ((((u64)iova_start % EHCA_PAGESIZE) + size_maxmr +
1478 EHCA_PAGESIZE - 1) / EHCA_PAGESIZE);
1479
1480 pginfo.type = EHCA_MR_PGI_PHYS;
1481 pginfo.num_pages = num_pages_mr;
1482 pginfo.num_4k = num_pages_4k;
1483 pginfo.num_phys_buf = 1;
1484 pginfo.phys_buf_array = &ib_pbuf;
1485
1486 ret = ehca_reg_mr(shca, e_mr, iova_start, size_maxmr, 0, e_pd,
1487 &pginfo, &e_mr->ib.ib_mr.lkey,
1488 &e_mr->ib.ib_mr.rkey);
1489 if (ret) {
1490 ehca_err(&shca->ib_device, "reg of internal max MR failed, "
1491 "e_mr=%p iova_start=%p size_maxmr=%lx num_pages_mr=%x "
1492 "num_pages_4k=%x", e_mr, iova_start, size_maxmr,
1493 num_pages_mr, num_pages_4k);
1494 goto ehca_reg_internal_maxmr_exit1;
1495 }
1496
1497 /* successful registration of all pages */
1498 e_mr->ib.ib_mr.device = e_pd->ib_pd.device;
1499 e_mr->ib.ib_mr.pd = &e_pd->ib_pd;
1500 e_mr->ib.ib_mr.uobject = NULL;
1501 atomic_inc(&(e_pd->ib_pd.usecnt));
1502 atomic_set(&(e_mr->ib.ib_mr.usecnt), 0);
1503 *e_maxmr = e_mr;
1504 return 0;
1505
1506 ehca_reg_internal_maxmr_exit1:
1507 ehca_mr_delete(e_mr);
1508 ehca_reg_internal_maxmr_exit0:
1509 if (ret)
1510 ehca_err(&shca->ib_device, "ret=%x shca=%p e_pd=%p e_maxmr=%p",
1511 ret, shca, e_pd, e_maxmr);
1512 return ret;
1513 } /* end ehca_reg_internal_maxmr() */
1514
1515 /*----------------------------------------------------------------------*/
1516
1517 int ehca_reg_maxmr(struct ehca_shca *shca,
1518 struct ehca_mr *e_newmr,
1519 u64 *iova_start,
1520 int acl,
1521 struct ehca_pd *e_pd,
1522 u32 *lkey,
1523 u32 *rkey)
1524 {
1525 u64 h_ret;
1526 struct ehca_mr *e_origmr = shca->maxmr;
1527 u32 hipz_acl;
1528 struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
1529
1530 ehca_mrmw_map_acl(acl, &hipz_acl);
1531 ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
1532
1533 h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr,
1534 (u64)iova_start, hipz_acl, e_pd->fw_pd,
1535 &hipzout);
1536 if (h_ret != H_SUCCESS) {
1537 ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lx "
1538 "e_origmr=%p hca_hndl=%lx mr_hndl=%lx lkey=%x",
1539 h_ret, e_origmr, shca->ipz_hca_handle.handle,
1540 e_origmr->ipz_mr_handle.handle,
1541 e_origmr->ib.ib_mr.lkey);
1542 return ehca2ib_return_code(h_ret);
1543 }
1544 /* successful registration */
1545 e_newmr->num_pages = e_origmr->num_pages;
1546 e_newmr->num_4k = e_origmr->num_4k;
1547 e_newmr->start = iova_start;
1548 e_newmr->size = e_origmr->size;
1549 e_newmr->acl = acl;
1550 e_newmr->ipz_mr_handle = hipzout.handle;
1551 *lkey = hipzout.lkey;
1552 *rkey = hipzout.rkey;
1553 return 0;
1554 } /* end ehca_reg_maxmr() */
1555
1556 /*----------------------------------------------------------------------*/
1557
1558 int ehca_dereg_internal_maxmr(struct ehca_shca *shca)
1559 {
1560 int ret;
1561 struct ehca_mr *e_maxmr;
1562 struct ib_pd *ib_pd;
1563
1564 if (!shca->maxmr) {
1565 ehca_err(&shca->ib_device, "bad call, shca=%p", shca);
1566 ret = -EINVAL;
1567 goto ehca_dereg_internal_maxmr_exit0;
1568 }
1569
1570 e_maxmr = shca->maxmr;
1571 ib_pd = e_maxmr->ib.ib_mr.pd;
1572 shca->maxmr = NULL; /* remove internal max-MR indication from SHCA */
1573
1574 ret = ehca_dereg_mr(&e_maxmr->ib.ib_mr);
1575 if (ret) {
1576 ehca_err(&shca->ib_device, "dereg internal max-MR failed, "
1577 "ret=%x e_maxmr=%p shca=%p lkey=%x",
1578 ret, e_maxmr, shca, e_maxmr->ib.ib_mr.lkey);
1579 shca->maxmr = e_maxmr;
1580 goto ehca_dereg_internal_maxmr_exit0;
1581 }
1582
1583 atomic_dec(&ib_pd->usecnt);
1584
1585 ehca_dereg_internal_maxmr_exit0:
1586 if (ret)
1587 ehca_err(&shca->ib_device, "ret=%x shca=%p shca->maxmr=%p",
1588 ret, shca, shca->maxmr);
1589 return ret;
1590 } /* end ehca_dereg_internal_maxmr() */
1591
1592 /*----------------------------------------------------------------------*/
1593
1594 /*
1595 * check physical buffer array of MR verbs for validness and
1596 * calculates MR size
1597 */
1598 int ehca_mr_chk_buf_and_calc_size(struct ib_phys_buf *phys_buf_array,
1599 int num_phys_buf,
1600 u64 *iova_start,
1601 u64 *size)
1602 {
1603 struct ib_phys_buf *pbuf = phys_buf_array;
1604 u64 size_count = 0;
1605 u32 i;
1606
1607 if (num_phys_buf == 0) {
1608 ehca_gen_err("bad phys buf array len, num_phys_buf=0");
1609 return -EINVAL;
1610 }
1611 /* check first buffer */
1612 if (((u64)iova_start & ~PAGE_MASK) != (pbuf->addr & ~PAGE_MASK)) {
1613 ehca_gen_err("iova_start/addr mismatch, iova_start=%p "
1614 "pbuf->addr=%lx pbuf->size=%lx",
1615 iova_start, pbuf->addr, pbuf->size);
1616 return -EINVAL;
1617 }
1618 if (((pbuf->addr + pbuf->size) % PAGE_SIZE) &&
1619 (num_phys_buf > 1)) {
1620 ehca_gen_err("addr/size mismatch in 1st buf, pbuf->addr=%lx "
1621 "pbuf->size=%lx", pbuf->addr, pbuf->size);
1622 return -EINVAL;
1623 }
1624
1625 for (i = 0; i < num_phys_buf; i++) {
1626 if ((i > 0) && (pbuf->addr % PAGE_SIZE)) {
1627 ehca_gen_err("bad address, i=%x pbuf->addr=%lx "
1628 "pbuf->size=%lx",
1629 i, pbuf->addr, pbuf->size);
1630 return -EINVAL;
1631 }
1632 if (((i > 0) && /* not 1st */
1633 (i < (num_phys_buf - 1)) && /* not last */
1634 (pbuf->size % PAGE_SIZE)) || (pbuf->size == 0)) {
1635 ehca_gen_err("bad size, i=%x pbuf->size=%lx",
1636 i, pbuf->size);
1637 return -EINVAL;
1638 }
1639 size_count += pbuf->size;
1640 pbuf++;
1641 }
1642
1643 *size = size_count;
1644 return 0;
1645 } /* end ehca_mr_chk_buf_and_calc_size() */
1646
1647 /*----------------------------------------------------------------------*/
1648
1649 /* check page list of map FMR verb for validness */
1650 int ehca_fmr_check_page_list(struct ehca_mr *e_fmr,
1651 u64 *page_list,
1652 int list_len)
1653 {
1654 u32 i;
1655 u64 *page;
1656
1657 if ((list_len == 0) || (list_len > e_fmr->fmr_max_pages)) {
1658 ehca_gen_err("bad list_len, list_len=%x "
1659 "e_fmr->fmr_max_pages=%x fmr=%p",
1660 list_len, e_fmr->fmr_max_pages, e_fmr);
1661 return -EINVAL;
1662 }
1663
1664 /* each page must be aligned */
1665 page = page_list;
1666 for (i = 0; i < list_len; i++) {
1667 if (*page % e_fmr->fmr_page_size) {
1668 ehca_gen_err("bad page, i=%x *page=%lx page=%p fmr=%p "
1669 "fmr_page_size=%x", i, *page, page, e_fmr,
1670 e_fmr->fmr_page_size);
1671 return -EINVAL;
1672 }
1673 page++;
1674 }
1675
1676 return 0;
1677 } /* end ehca_fmr_check_page_list() */
1678
1679 /*----------------------------------------------------------------------*/
1680
1681 /* setup page buffer from page info */
1682 int ehca_set_pagebuf(struct ehca_mr *e_mr,
1683 struct ehca_mr_pginfo *pginfo,
1684 u32 number,
1685 u64 *kpage)
1686 {
1687 int ret = 0;
1688 struct ib_umem_chunk *prev_chunk;
1689 struct ib_umem_chunk *chunk;
1690 struct ib_phys_buf *pbuf;
1691 u64 *fmrlist;
1692 u64 num4k, pgaddr, offs4k;
1693 u32 i = 0;
1694 u32 j = 0;
1695
1696 if (pginfo->type == EHCA_MR_PGI_PHYS) {
1697 /* loop over desired phys_buf_array entries */
1698 while (i < number) {
1699 pbuf = pginfo->phys_buf_array + pginfo->next_buf;
1700 num4k = ((pbuf->addr % EHCA_PAGESIZE) + pbuf->size +
1701 EHCA_PAGESIZE - 1) / EHCA_PAGESIZE;
1702 offs4k = (pbuf->addr & ~PAGE_MASK) / EHCA_PAGESIZE;
1703 while (pginfo->next_4k < offs4k + num4k) {
1704 /* sanity check */
1705 if ((pginfo->page_cnt >= pginfo->num_pages) ||
1706 (pginfo->page_4k_cnt >= pginfo->num_4k)) {
1707 ehca_gen_err("page_cnt >= num_pages, "
1708 "page_cnt=%lx "
1709 "num_pages=%lx "
1710 "page_4k_cnt=%lx "
1711 "num_4k=%lx i=%x",
1712 pginfo->page_cnt,
1713 pginfo->num_pages,
1714 pginfo->page_4k_cnt,
1715 pginfo->num_4k, i);
1716 ret = -EFAULT;
1717 goto ehca_set_pagebuf_exit0;
1718 }
1719 *kpage = phys_to_abs(
1720 (pbuf->addr & EHCA_PAGEMASK)
1721 + (pginfo->next_4k * EHCA_PAGESIZE));
1722 if ( !(*kpage) && pbuf->addr ) {
1723 ehca_gen_err("pbuf->addr=%lx "
1724 "pbuf->size=%lx "
1725 "next_4k=%lx", pbuf->addr,
1726 pbuf->size,
1727 pginfo->next_4k);
1728 ret = -EFAULT;
1729 goto ehca_set_pagebuf_exit0;
1730 }
1731 (pginfo->page_4k_cnt)++;
1732 (pginfo->next_4k)++;
1733 if (pginfo->next_4k %
1734 (PAGE_SIZE / EHCA_PAGESIZE) == 0)
1735 (pginfo->page_cnt)++;
1736 kpage++;
1737 i++;
1738 if (i >= number) break;
1739 }
1740 if (pginfo->next_4k >= offs4k + num4k) {
1741 (pginfo->next_buf)++;
1742 pginfo->next_4k = 0;
1743 }
1744 }
1745 } else if (pginfo->type == EHCA_MR_PGI_USER) {
1746 /* loop over desired chunk entries */
1747 chunk = pginfo->next_chunk;
1748 prev_chunk = pginfo->next_chunk;
1749 list_for_each_entry_continue(chunk,
1750 (&(pginfo->region->chunk_list)),
1751 list) {
1752 for (i = pginfo->next_nmap; i < chunk->nmap; ) {
1753 pgaddr = ( page_to_pfn(chunk->page_list[i].page)
1754 << PAGE_SHIFT );
1755 *kpage = phys_to_abs(pgaddr +
1756 (pginfo->next_4k *
1757 EHCA_PAGESIZE));
1758 if ( !(*kpage) ) {
1759 ehca_gen_err("pgaddr=%lx "
1760 "chunk->page_list[i]=%lx "
1761 "i=%x next_4k=%lx mr=%p",
1762 pgaddr,
1763 (u64)sg_dma_address(
1764 &chunk->
1765 page_list[i]),
1766 i, pginfo->next_4k, e_mr);
1767 ret = -EFAULT;
1768 goto ehca_set_pagebuf_exit0;
1769 }
1770 (pginfo->page_4k_cnt)++;
1771 (pginfo->next_4k)++;
1772 kpage++;
1773 if (pginfo->next_4k %
1774 (PAGE_SIZE / EHCA_PAGESIZE) == 0) {
1775 (pginfo->page_cnt)++;
1776 (pginfo->next_nmap)++;
1777 pginfo->next_4k = 0;
1778 i++;
1779 }
1780 j++;
1781 if (j >= number) break;
1782 }
1783 if ((pginfo->next_nmap >= chunk->nmap) &&
1784 (j >= number)) {
1785 pginfo->next_nmap = 0;
1786 prev_chunk = chunk;
1787 break;
1788 } else if (pginfo->next_nmap >= chunk->nmap) {
1789 pginfo->next_nmap = 0;
1790 prev_chunk = chunk;
1791 } else if (j >= number)
1792 break;
1793 else
1794 prev_chunk = chunk;
1795 }
1796 pginfo->next_chunk =
1797 list_prepare_entry(prev_chunk,
1798 (&(pginfo->region->chunk_list)),
1799 list);
1800 } else if (pginfo->type == EHCA_MR_PGI_FMR) {
1801 /* loop over desired page_list entries */
1802 fmrlist = pginfo->page_list + pginfo->next_listelem;
1803 for (i = 0; i < number; i++) {
1804 *kpage = phys_to_abs((*fmrlist & EHCA_PAGEMASK) +
1805 pginfo->next_4k * EHCA_PAGESIZE);
1806 if ( !(*kpage) ) {
1807 ehca_gen_err("*fmrlist=%lx fmrlist=%p "
1808 "next_listelem=%lx next_4k=%lx",
1809 *fmrlist, fmrlist,
1810 pginfo->next_listelem,
1811 pginfo->next_4k);
1812 ret = -EFAULT;
1813 goto ehca_set_pagebuf_exit0;
1814 }
1815 (pginfo->page_4k_cnt)++;
1816 (pginfo->next_4k)++;
1817 kpage++;
1818 if (pginfo->next_4k %
1819 (e_mr->fmr_page_size / EHCA_PAGESIZE) == 0) {
1820 (pginfo->page_cnt)++;
1821 (pginfo->next_listelem)++;
1822 fmrlist++;
1823 pginfo->next_4k = 0;
1824 }
1825 }
1826 } else {
1827 ehca_gen_err("bad pginfo->type=%x", pginfo->type);
1828 ret = -EFAULT;
1829 goto ehca_set_pagebuf_exit0;
1830 }
1831
1832 ehca_set_pagebuf_exit0:
1833 if (ret)
1834 ehca_gen_err("ret=%x e_mr=%p pginfo=%p type=%x num_pages=%lx "
1835 "num_4k=%lx next_buf=%lx next_4k=%lx number=%x "
1836 "kpage=%p page_cnt=%lx page_4k_cnt=%lx i=%x "
1837 "next_listelem=%lx region=%p next_chunk=%p "
1838 "next_nmap=%lx", ret, e_mr, pginfo, pginfo->type,
1839 pginfo->num_pages, pginfo->num_4k,
1840 pginfo->next_buf, pginfo->next_4k, number, kpage,
1841 pginfo->page_cnt, pginfo->page_4k_cnt, i,
1842 pginfo->next_listelem, pginfo->region,
1843 pginfo->next_chunk, pginfo->next_nmap);
1844 return ret;
1845 } /* end ehca_set_pagebuf() */
1846
1847 /*----------------------------------------------------------------------*/
1848
1849 /* setup 1 page from page info page buffer */
1850 int ehca_set_pagebuf_1(struct ehca_mr *e_mr,
1851 struct ehca_mr_pginfo *pginfo,
1852 u64 *rpage)
1853 {
1854 int ret = 0;
1855 struct ib_phys_buf *tmp_pbuf;
1856 u64 *fmrlist;
1857 struct ib_umem_chunk *chunk;
1858 struct ib_umem_chunk *prev_chunk;
1859 u64 pgaddr, num4k, offs4k;
1860
1861 if (pginfo->type == EHCA_MR_PGI_PHYS) {
1862 /* sanity check */
1863 if ((pginfo->page_cnt >= pginfo->num_pages) ||
1864 (pginfo->page_4k_cnt >= pginfo->num_4k)) {
1865 ehca_gen_err("page_cnt >= num_pages, page_cnt=%lx "
1866 "num_pages=%lx page_4k_cnt=%lx num_4k=%lx",
1867 pginfo->page_cnt, pginfo->num_pages,
1868 pginfo->page_4k_cnt, pginfo->num_4k);
1869 ret = -EFAULT;
1870 goto ehca_set_pagebuf_1_exit0;
1871 }
1872 tmp_pbuf = pginfo->phys_buf_array + pginfo->next_buf;
1873 num4k = ((tmp_pbuf->addr % EHCA_PAGESIZE) + tmp_pbuf->size +
1874 EHCA_PAGESIZE - 1) / EHCA_PAGESIZE;
1875 offs4k = (tmp_pbuf->addr & ~PAGE_MASK) / EHCA_PAGESIZE;
1876 *rpage = phys_to_abs((tmp_pbuf->addr & EHCA_PAGEMASK) +
1877 (pginfo->next_4k * EHCA_PAGESIZE));
1878 if ( !(*rpage) && tmp_pbuf->addr ) {
1879 ehca_gen_err("tmp_pbuf->addr=%lx"
1880 " tmp_pbuf->size=%lx next_4k=%lx",
1881 tmp_pbuf->addr, tmp_pbuf->size,
1882 pginfo->next_4k);
1883 ret = -EFAULT;
1884 goto ehca_set_pagebuf_1_exit0;
1885 }
1886 (pginfo->page_4k_cnt)++;
1887 (pginfo->next_4k)++;
1888 if (pginfo->next_4k % (PAGE_SIZE / EHCA_PAGESIZE) == 0)
1889 (pginfo->page_cnt)++;
1890 if (pginfo->next_4k >= offs4k + num4k) {
1891 (pginfo->next_buf)++;
1892 pginfo->next_4k = 0;
1893 }
1894 } else if (pginfo->type == EHCA_MR_PGI_USER) {
1895 chunk = pginfo->next_chunk;
1896 prev_chunk = pginfo->next_chunk;
1897 list_for_each_entry_continue(chunk,
1898 (&(pginfo->region->chunk_list)),
1899 list) {
1900 pgaddr = ( page_to_pfn(chunk->page_list[
1901 pginfo->next_nmap].page)
1902 << PAGE_SHIFT);
1903 *rpage = phys_to_abs(pgaddr +
1904 (pginfo->next_4k * EHCA_PAGESIZE));
1905 if ( !(*rpage) ) {
1906 ehca_gen_err("pgaddr=%lx chunk->page_list[]=%lx"
1907 " next_nmap=%lx next_4k=%lx mr=%p",
1908 pgaddr, (u64)sg_dma_address(
1909 &chunk->page_list[
1910 pginfo->
1911 next_nmap]),
1912 pginfo->next_nmap, pginfo->next_4k,
1913 e_mr);
1914 ret = -EFAULT;
1915 goto ehca_set_pagebuf_1_exit0;
1916 }
1917 (pginfo->page_4k_cnt)++;
1918 (pginfo->next_4k)++;
1919 if (pginfo->next_4k %
1920 (PAGE_SIZE / EHCA_PAGESIZE) == 0) {
1921 (pginfo->page_cnt)++;
1922 (pginfo->next_nmap)++;
1923 pginfo->next_4k = 0;
1924 }
1925 if (pginfo->next_nmap >= chunk->nmap) {
1926 pginfo->next_nmap = 0;
1927 prev_chunk = chunk;
1928 }
1929 break;
1930 }
1931 pginfo->next_chunk =
1932 list_prepare_entry(prev_chunk,
1933 (&(pginfo->region->chunk_list)),
1934 list);
1935 } else if (pginfo->type == EHCA_MR_PGI_FMR) {
1936 fmrlist = pginfo->page_list + pginfo->next_listelem;
1937 *rpage = phys_to_abs((*fmrlist & EHCA_PAGEMASK) +
1938 pginfo->next_4k * EHCA_PAGESIZE);
1939 if ( !(*rpage) ) {
1940 ehca_gen_err("*fmrlist=%lx fmrlist=%p "
1941 "next_listelem=%lx next_4k=%lx",
1942 *fmrlist, fmrlist, pginfo->next_listelem,
1943 pginfo->next_4k);
1944 ret = -EFAULT;
1945 goto ehca_set_pagebuf_1_exit0;
1946 }
1947 (pginfo->page_4k_cnt)++;
1948 (pginfo->next_4k)++;
1949 if (pginfo->next_4k %
1950 (e_mr->fmr_page_size / EHCA_PAGESIZE) == 0) {
1951 (pginfo->page_cnt)++;
1952 (pginfo->next_listelem)++;
1953 pginfo->next_4k = 0;
1954 }
1955 } else {
1956 ehca_gen_err("bad pginfo->type=%x", pginfo->type);
1957 ret = -EFAULT;
1958 goto ehca_set_pagebuf_1_exit0;
1959 }
1960
1961 ehca_set_pagebuf_1_exit0:
1962 if (ret)
1963 ehca_gen_err("ret=%x e_mr=%p pginfo=%p type=%x num_pages=%lx "
1964 "num_4k=%lx next_buf=%lx next_4k=%lx rpage=%p "
1965 "page_cnt=%lx page_4k_cnt=%lx next_listelem=%lx "
1966 "region=%p next_chunk=%p next_nmap=%lx", ret, e_mr,
1967 pginfo, pginfo->type, pginfo->num_pages,
1968 pginfo->num_4k, pginfo->next_buf, pginfo->next_4k,
1969 rpage, pginfo->page_cnt, pginfo->page_4k_cnt,
1970 pginfo->next_listelem, pginfo->region,
1971 pginfo->next_chunk, pginfo->next_nmap);
1972 return ret;
1973 } /* end ehca_set_pagebuf_1() */
1974
1975 /*----------------------------------------------------------------------*/
1976
1977 /*
1978 * check MR if it is a max-MR, i.e. uses whole memory
1979 * in case it's a max-MR 1 is returned, else 0
1980 */
1981 int ehca_mr_is_maxmr(u64 size,
1982 u64 *iova_start)
1983 {
1984 /* a MR is treated as max-MR only if it fits following: */
1985 if ((size == ((u64)high_memory - PAGE_OFFSET)) &&
1986 (iova_start == (void*)KERNELBASE)) {
1987 ehca_gen_dbg("this is a max-MR");
1988 return 1;
1989 } else
1990 return 0;
1991 } /* end ehca_mr_is_maxmr() */
1992
1993 /*----------------------------------------------------------------------*/
1994
1995 /* map access control for MR/MW. This routine is used for MR and MW. */
1996 void ehca_mrmw_map_acl(int ib_acl,
1997 u32 *hipz_acl)
1998 {
1999 *hipz_acl = 0;
2000 if (ib_acl & IB_ACCESS_REMOTE_READ)
2001 *hipz_acl |= HIPZ_ACCESSCTRL_R_READ;
2002 if (ib_acl & IB_ACCESS_REMOTE_WRITE)
2003 *hipz_acl |= HIPZ_ACCESSCTRL_R_WRITE;
2004 if (ib_acl & IB_ACCESS_REMOTE_ATOMIC)
2005 *hipz_acl |= HIPZ_ACCESSCTRL_R_ATOMIC;
2006 if (ib_acl & IB_ACCESS_LOCAL_WRITE)
2007 *hipz_acl |= HIPZ_ACCESSCTRL_L_WRITE;
2008 if (ib_acl & IB_ACCESS_MW_BIND)
2009 *hipz_acl |= HIPZ_ACCESSCTRL_MW_BIND;
2010 } /* end ehca_mrmw_map_acl() */
2011
2012 /*----------------------------------------------------------------------*/
2013
2014 /* sets page size in hipz access control for MR/MW. */
2015 void ehca_mrmw_set_pgsize_hipz_acl(u32 *hipz_acl) /*INOUT*/
2016 {
2017 return; /* HCA supports only 4k */
2018 } /* end ehca_mrmw_set_pgsize_hipz_acl() */
2019
2020 /*----------------------------------------------------------------------*/
2021
2022 /*
2023 * reverse map access control for MR/MW.
2024 * This routine is used for MR and MW.
2025 */
2026 void ehca_mrmw_reverse_map_acl(const u32 *hipz_acl,
2027 int *ib_acl) /*OUT*/
2028 {
2029 *ib_acl = 0;
2030 if (*hipz_acl & HIPZ_ACCESSCTRL_R_READ)
2031 *ib_acl |= IB_ACCESS_REMOTE_READ;
2032 if (*hipz_acl & HIPZ_ACCESSCTRL_R_WRITE)
2033 *ib_acl |= IB_ACCESS_REMOTE_WRITE;
2034 if (*hipz_acl & HIPZ_ACCESSCTRL_R_ATOMIC)
2035 *ib_acl |= IB_ACCESS_REMOTE_ATOMIC;
2036 if (*hipz_acl & HIPZ_ACCESSCTRL_L_WRITE)
2037 *ib_acl |= IB_ACCESS_LOCAL_WRITE;
2038 if (*hipz_acl & HIPZ_ACCESSCTRL_MW_BIND)
2039 *ib_acl |= IB_ACCESS_MW_BIND;
2040 } /* end ehca_mrmw_reverse_map_acl() */
2041
2042
2043 /*----------------------------------------------------------------------*/
2044
2045 /*
2046 * MR destructor and constructor
2047 * used in Reregister MR verb, sets all fields in ehca_mr_t to 0,
2048 * except struct ib_mr and spinlock
2049 */
2050 void ehca_mr_deletenew(struct ehca_mr *mr)
2051 {
2052 mr->flags = 0;
2053 mr->num_pages = 0;
2054 mr->num_4k = 0;
2055 mr->acl = 0;
2056 mr->start = NULL;
2057 mr->fmr_page_size = 0;
2058 mr->fmr_max_pages = 0;
2059 mr->fmr_max_maps = 0;
2060 mr->fmr_map_cnt = 0;
2061 memset(&mr->ipz_mr_handle, 0, sizeof(mr->ipz_mr_handle));
2062 memset(&mr->galpas, 0, sizeof(mr->galpas));
2063 mr->nr_of_pages = 0;
2064 mr->pagearray = NULL;
2065 } /* end ehca_mr_deletenew() */
2066
2067 int ehca_init_mrmw_cache(void)
2068 {
2069 mr_cache = kmem_cache_create("ehca_cache_mr",
2070 sizeof(struct ehca_mr), 0,
2071 SLAB_HWCACHE_ALIGN,
2072 NULL, NULL);
2073 if (!mr_cache)
2074 return -ENOMEM;
2075 mw_cache = kmem_cache_create("ehca_cache_mw",
2076 sizeof(struct ehca_mw), 0,
2077 SLAB_HWCACHE_ALIGN,
2078 NULL, NULL);
2079 if (!mw_cache) {
2080 kmem_cache_destroy(mr_cache);
2081 mr_cache = NULL;
2082 return -ENOMEM;
2083 }
2084 return 0;
2085 }
2086
2087 void ehca_cleanup_mrmw_cache(void)
2088 {
2089 if (mr_cache)
2090 kmem_cache_destroy(mr_cache);
2091 if (mw_cache)
2092 kmem_cache_destroy(mw_cache);
2093 }