2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
6 * Authors: Dietmar Decker <ddecker@de.ibm.com>
7 * Christoph Raisch <raisch@de.ibm.com>
9 * Copyright (c) 2005 IBM Corporation
11 * All rights reserved.
13 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are met:
21 * Redistributions of source code must retain the above copyright notice, this
22 * list of conditions and the following disclaimer.
24 * Redistributions in binary form must reproduce the above copyright notice,
25 * this list of conditions and the following disclaimer in the documentation
26 * and/or other materials
27 * provided with the distribution.
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
30 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
33 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
34 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
36 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
37 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
38 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
39 * POSSIBILITY OF SUCH DAMAGE.
42 #include <rdma/ib_umem.h>
44 #include <asm/current.h>
46 #include "ehca_iverbs.h"
47 #include "ehca_mrmw.h"
51 #define NUM_CHUNKS(length, chunk_size) \
52 (((length) + (chunk_size - 1)) / (chunk_size))
53 /* max number of rpages (per hcall register_rpages) */
54 #define MAX_RPAGES 512
56 static struct kmem_cache
*mr_cache
;
57 static struct kmem_cache
*mw_cache
;
59 static struct ehca_mr
*ehca_mr_new(void)
63 me
= kmem_cache_zalloc(mr_cache
, GFP_KERNEL
);
65 spin_lock_init(&me
->mrlock
);
67 ehca_gen_err("alloc failed");
72 static void ehca_mr_delete(struct ehca_mr
*me
)
74 kmem_cache_free(mr_cache
, me
);
77 static struct ehca_mw
*ehca_mw_new(void)
81 me
= kmem_cache_zalloc(mw_cache
, GFP_KERNEL
);
83 spin_lock_init(&me
->mwlock
);
85 ehca_gen_err("alloc failed");
90 static void ehca_mw_delete(struct ehca_mw
*me
)
92 kmem_cache_free(mw_cache
, me
);
95 /*----------------------------------------------------------------------*/
97 struct ib_mr
*ehca_get_dma_mr(struct ib_pd
*pd
, int mr_access_flags
)
101 struct ehca_mr
*e_maxmr
;
102 struct ehca_pd
*e_pd
= container_of(pd
, struct ehca_pd
, ib_pd
);
103 struct ehca_shca
*shca
=
104 container_of(pd
->device
, struct ehca_shca
, ib_device
);
107 e_maxmr
= ehca_mr_new();
109 ehca_err(&shca
->ib_device
, "out of memory");
110 ib_mr
= ERR_PTR(-ENOMEM
);
111 goto get_dma_mr_exit0
;
114 ret
= ehca_reg_maxmr(shca
, e_maxmr
, (u64
*)KERNELBASE
,
115 mr_access_flags
, e_pd
,
116 &e_maxmr
->ib
.ib_mr
.lkey
,
117 &e_maxmr
->ib
.ib_mr
.rkey
);
119 ehca_mr_delete(e_maxmr
);
120 ib_mr
= ERR_PTR(ret
);
121 goto get_dma_mr_exit0
;
123 ib_mr
= &e_maxmr
->ib
.ib_mr
;
125 ehca_err(&shca
->ib_device
, "no internal max-MR exist!");
126 ib_mr
= ERR_PTR(-EINVAL
);
127 goto get_dma_mr_exit0
;
132 ehca_err(&shca
->ib_device
, "rc=%lx pd=%p mr_access_flags=%x ",
133 PTR_ERR(ib_mr
), pd
, mr_access_flags
);
135 } /* end ehca_get_dma_mr() */
137 /*----------------------------------------------------------------------*/
139 struct ib_mr
*ehca_reg_phys_mr(struct ib_pd
*pd
,
140 struct ib_phys_buf
*phys_buf_array
,
147 struct ehca_mr
*e_mr
;
148 struct ehca_shca
*shca
=
149 container_of(pd
->device
, struct ehca_shca
, ib_device
);
150 struct ehca_pd
*e_pd
= container_of(pd
, struct ehca_pd
, ib_pd
);
154 if ((num_phys_buf
<= 0) || !phys_buf_array
) {
155 ehca_err(pd
->device
, "bad input values: num_phys_buf=%x "
156 "phys_buf_array=%p", num_phys_buf
, phys_buf_array
);
157 ib_mr
= ERR_PTR(-EINVAL
);
158 goto reg_phys_mr_exit0
;
160 if (((mr_access_flags
& IB_ACCESS_REMOTE_WRITE
) &&
161 !(mr_access_flags
& IB_ACCESS_LOCAL_WRITE
)) ||
162 ((mr_access_flags
& IB_ACCESS_REMOTE_ATOMIC
) &&
163 !(mr_access_flags
& IB_ACCESS_LOCAL_WRITE
))) {
165 * Remote Write Access requires Local Write Access
166 * Remote Atomic Access requires Local Write Access
168 ehca_err(pd
->device
, "bad input values: mr_access_flags=%x",
170 ib_mr
= ERR_PTR(-EINVAL
);
171 goto reg_phys_mr_exit0
;
174 /* check physical buffer list and calculate size */
175 ret
= ehca_mr_chk_buf_and_calc_size(phys_buf_array
, num_phys_buf
,
178 ib_mr
= ERR_PTR(ret
);
179 goto reg_phys_mr_exit0
;
182 (((u64
)iova_start
+ size
) < (u64
)iova_start
)) {
183 ehca_err(pd
->device
, "bad input values: size=%lx iova_start=%p",
185 ib_mr
= ERR_PTR(-EINVAL
);
186 goto reg_phys_mr_exit0
;
189 e_mr
= ehca_mr_new();
191 ehca_err(pd
->device
, "out of memory");
192 ib_mr
= ERR_PTR(-ENOMEM
);
193 goto reg_phys_mr_exit0
;
196 /* register MR on HCA */
197 if (ehca_mr_is_maxmr(size
, iova_start
)) {
198 e_mr
->flags
|= EHCA_MR_FLAG_MAXMR
;
199 ret
= ehca_reg_maxmr(shca
, e_mr
, iova_start
, mr_access_flags
,
200 e_pd
, &e_mr
->ib
.ib_mr
.lkey
,
201 &e_mr
->ib
.ib_mr
.rkey
);
203 ib_mr
= ERR_PTR(ret
);
204 goto reg_phys_mr_exit1
;
207 struct ehca_mr_pginfo pginfo
;
211 num_kpages
= NUM_CHUNKS(((u64
)iova_start
% PAGE_SIZE
) + size
,
213 num_hwpages
= NUM_CHUNKS(((u64
)iova_start
% EHCA_PAGESIZE
) +
214 size
, EHCA_PAGESIZE
);
215 memset(&pginfo
, 0, sizeof(pginfo
));
216 pginfo
.type
= EHCA_MR_PGI_PHYS
;
217 pginfo
.num_kpages
= num_kpages
;
218 pginfo
.num_hwpages
= num_hwpages
;
219 pginfo
.u
.phy
.num_phys_buf
= num_phys_buf
;
220 pginfo
.u
.phy
.phys_buf_array
= phys_buf_array
;
221 pginfo
.next_hwpage
= (((u64
)iova_start
& ~PAGE_MASK
) /
224 ret
= ehca_reg_mr(shca
, e_mr
, iova_start
, size
, mr_access_flags
,
225 e_pd
, &pginfo
, &e_mr
->ib
.ib_mr
.lkey
,
226 &e_mr
->ib
.ib_mr
.rkey
);
228 ib_mr
= ERR_PTR(ret
);
229 goto reg_phys_mr_exit1
;
233 /* successful registration of all pages */
234 return &e_mr
->ib
.ib_mr
;
237 ehca_mr_delete(e_mr
);
240 ehca_err(pd
->device
, "rc=%lx pd=%p phys_buf_array=%p "
241 "num_phys_buf=%x mr_access_flags=%x iova_start=%p",
242 PTR_ERR(ib_mr
), pd
, phys_buf_array
,
243 num_phys_buf
, mr_access_flags
, iova_start
);
245 } /* end ehca_reg_phys_mr() */
247 /*----------------------------------------------------------------------*/
249 struct ib_mr
*ehca_reg_user_mr(struct ib_pd
*pd
, u64 start
, u64 length
, u64 virt
,
250 int mr_access_flags
, struct ib_udata
*udata
)
253 struct ehca_mr
*e_mr
;
254 struct ehca_shca
*shca
=
255 container_of(pd
->device
, struct ehca_shca
, ib_device
);
256 struct ehca_pd
*e_pd
= container_of(pd
, struct ehca_pd
, ib_pd
);
257 struct ehca_mr_pginfo pginfo
;
263 ehca_gen_err("bad pd=%p", pd
);
264 return ERR_PTR(-EFAULT
);
267 if (((mr_access_flags
& IB_ACCESS_REMOTE_WRITE
) &&
268 !(mr_access_flags
& IB_ACCESS_LOCAL_WRITE
)) ||
269 ((mr_access_flags
& IB_ACCESS_REMOTE_ATOMIC
) &&
270 !(mr_access_flags
& IB_ACCESS_LOCAL_WRITE
))) {
272 * Remote Write Access requires Local Write Access
273 * Remote Atomic Access requires Local Write Access
275 ehca_err(pd
->device
, "bad input values: mr_access_flags=%x",
277 ib_mr
= ERR_PTR(-EINVAL
);
278 goto reg_user_mr_exit0
;
281 if (length
== 0 || virt
+ length
< virt
) {
282 ehca_err(pd
->device
, "bad input values: length=%lx "
283 "virt_base=%lx", length
, virt
);
284 ib_mr
= ERR_PTR(-EINVAL
);
285 goto reg_user_mr_exit0
;
288 e_mr
= ehca_mr_new();
290 ehca_err(pd
->device
, "out of memory");
291 ib_mr
= ERR_PTR(-ENOMEM
);
292 goto reg_user_mr_exit0
;
295 e_mr
->umem
= ib_umem_get(pd
->uobject
->context
, start
, length
,
297 if (IS_ERR(e_mr
->umem
)) {
298 ib_mr
= (void *) e_mr
->umem
;
299 goto reg_user_mr_exit1
;
302 if (e_mr
->umem
->page_size
!= PAGE_SIZE
) {
303 ehca_err(pd
->device
, "page size not supported, "
304 "e_mr->umem->page_size=%x", e_mr
->umem
->page_size
);
305 ib_mr
= ERR_PTR(-EINVAL
);
306 goto reg_user_mr_exit2
;
309 /* determine number of MR pages */
310 num_kpages
= NUM_CHUNKS((virt
% PAGE_SIZE
) + length
, PAGE_SIZE
);
311 num_hwpages
= NUM_CHUNKS((virt
% EHCA_PAGESIZE
) + length
,
314 /* register MR on HCA */
315 memset(&pginfo
, 0, sizeof(pginfo
));
316 pginfo
.type
= EHCA_MR_PGI_USER
;
317 pginfo
.num_kpages
= num_kpages
;
318 pginfo
.num_hwpages
= num_hwpages
;
319 pginfo
.u
.usr
.region
= e_mr
->umem
;
320 pginfo
.next_hwpage
= e_mr
->umem
->offset
/ EHCA_PAGESIZE
;
321 pginfo
.u
.usr
.next_chunk
= list_prepare_entry(pginfo
.u
.usr
.next_chunk
,
322 (&e_mr
->umem
->chunk_list
),
325 ret
= ehca_reg_mr(shca
, e_mr
, (u64
*) virt
, length
, mr_access_flags
, e_pd
,
326 &pginfo
, &e_mr
->ib
.ib_mr
.lkey
, &e_mr
->ib
.ib_mr
.rkey
);
328 ib_mr
= ERR_PTR(ret
);
329 goto reg_user_mr_exit2
;
332 /* successful registration of all pages */
333 return &e_mr
->ib
.ib_mr
;
336 ib_umem_release(e_mr
->umem
);
338 ehca_mr_delete(e_mr
);
341 ehca_err(pd
->device
, "rc=%lx pd=%p mr_access_flags=%x"
343 PTR_ERR(ib_mr
), pd
, mr_access_flags
, udata
);
345 } /* end ehca_reg_user_mr() */
347 /*----------------------------------------------------------------------*/
349 int ehca_rereg_phys_mr(struct ib_mr
*mr
,
352 struct ib_phys_buf
*phys_buf_array
,
359 struct ehca_shca
*shca
=
360 container_of(mr
->device
, struct ehca_shca
, ib_device
);
361 struct ehca_mr
*e_mr
= container_of(mr
, struct ehca_mr
, ib
.ib_mr
);
362 struct ehca_pd
*my_pd
= container_of(mr
->pd
, struct ehca_pd
, ib_pd
);
366 struct ehca_pd
*new_pd
;
367 u32 tmp_lkey
, tmp_rkey
;
368 unsigned long sl_flags
;
371 struct ehca_mr_pginfo pginfo
;
372 u32 cur_pid
= current
->tgid
;
374 if (my_pd
->ib_pd
.uobject
&& my_pd
->ib_pd
.uobject
->context
&&
375 (my_pd
->ownpid
!= cur_pid
)) {
376 ehca_err(mr
->device
, "Invalid caller pid=%x ownpid=%x",
377 cur_pid
, my_pd
->ownpid
);
379 goto rereg_phys_mr_exit0
;
382 if (!(mr_rereg_mask
& IB_MR_REREG_TRANS
)) {
383 /* TODO not supported, because PHYP rereg hCall needs pages */
384 ehca_err(mr
->device
, "rereg without IB_MR_REREG_TRANS not "
385 "supported yet, mr_rereg_mask=%x", mr_rereg_mask
);
387 goto rereg_phys_mr_exit0
;
390 if (mr_rereg_mask
& IB_MR_REREG_PD
) {
392 ehca_err(mr
->device
, "rereg with bad pd, pd=%p "
393 "mr_rereg_mask=%x", pd
, mr_rereg_mask
);
395 goto rereg_phys_mr_exit0
;
400 ~(IB_MR_REREG_TRANS
| IB_MR_REREG_PD
| IB_MR_REREG_ACCESS
)) ||
401 (mr_rereg_mask
== 0)) {
403 goto rereg_phys_mr_exit0
;
406 /* check other parameters */
407 if (e_mr
== shca
->maxmr
) {
408 /* should be impossible, however reject to be sure */
409 ehca_err(mr
->device
, "rereg internal max-MR impossible, mr=%p "
410 "shca->maxmr=%p mr->lkey=%x",
411 mr
, shca
->maxmr
, mr
->lkey
);
413 goto rereg_phys_mr_exit0
;
415 if (mr_rereg_mask
& IB_MR_REREG_TRANS
) { /* transl., i.e. addr/size */
416 if (e_mr
->flags
& EHCA_MR_FLAG_FMR
) {
417 ehca_err(mr
->device
, "not supported for FMR, mr=%p "
418 "flags=%x", mr
, e_mr
->flags
);
420 goto rereg_phys_mr_exit0
;
422 if (!phys_buf_array
|| num_phys_buf
<= 0) {
423 ehca_err(mr
->device
, "bad input values: mr_rereg_mask=%x"
424 " phys_buf_array=%p num_phys_buf=%x",
425 mr_rereg_mask
, phys_buf_array
, num_phys_buf
);
427 goto rereg_phys_mr_exit0
;
430 if ((mr_rereg_mask
& IB_MR_REREG_ACCESS
) && /* change ACL */
431 (((mr_access_flags
& IB_ACCESS_REMOTE_WRITE
) &&
432 !(mr_access_flags
& IB_ACCESS_LOCAL_WRITE
)) ||
433 ((mr_access_flags
& IB_ACCESS_REMOTE_ATOMIC
) &&
434 !(mr_access_flags
& IB_ACCESS_LOCAL_WRITE
)))) {
436 * Remote Write Access requires Local Write Access
437 * Remote Atomic Access requires Local Write Access
439 ehca_err(mr
->device
, "bad input values: mr_rereg_mask=%x "
440 "mr_access_flags=%x", mr_rereg_mask
, mr_access_flags
);
442 goto rereg_phys_mr_exit0
;
445 /* set requested values dependent on rereg request */
446 spin_lock_irqsave(&e_mr
->mrlock
, sl_flags
);
447 new_start
= e_mr
->start
; /* new == old address */
448 new_size
= e_mr
->size
; /* new == old length */
449 new_acl
= e_mr
->acl
; /* new == old access control */
450 new_pd
= container_of(mr
->pd
,struct ehca_pd
,ib_pd
); /*new == old PD*/
452 if (mr_rereg_mask
& IB_MR_REREG_TRANS
) {
453 new_start
= iova_start
; /* change address */
454 /* check physical buffer list and calculate size */
455 ret
= ehca_mr_chk_buf_and_calc_size(phys_buf_array
,
456 num_phys_buf
, iova_start
,
459 goto rereg_phys_mr_exit1
;
460 if ((new_size
== 0) ||
461 (((u64
)iova_start
+ new_size
) < (u64
)iova_start
)) {
462 ehca_err(mr
->device
, "bad input values: new_size=%lx "
463 "iova_start=%p", new_size
, iova_start
);
465 goto rereg_phys_mr_exit1
;
467 num_kpages
= NUM_CHUNKS(((u64
)new_start
% PAGE_SIZE
) +
468 new_size
, PAGE_SIZE
);
469 num_hwpages
= NUM_CHUNKS(((u64
)new_start
% EHCA_PAGESIZE
) +
470 new_size
, EHCA_PAGESIZE
);
471 memset(&pginfo
, 0, sizeof(pginfo
));
472 pginfo
.type
= EHCA_MR_PGI_PHYS
;
473 pginfo
.num_kpages
= num_kpages
;
474 pginfo
.num_hwpages
= num_hwpages
;
475 pginfo
.u
.phy
.num_phys_buf
= num_phys_buf
;
476 pginfo
.u
.phy
.phys_buf_array
= phys_buf_array
;
477 pginfo
.next_hwpage
= (((u64
)iova_start
& ~PAGE_MASK
) /
480 if (mr_rereg_mask
& IB_MR_REREG_ACCESS
)
481 new_acl
= mr_access_flags
;
482 if (mr_rereg_mask
& IB_MR_REREG_PD
)
483 new_pd
= container_of(pd
, struct ehca_pd
, ib_pd
);
485 ret
= ehca_rereg_mr(shca
, e_mr
, new_start
, new_size
, new_acl
,
486 new_pd
, &pginfo
, &tmp_lkey
, &tmp_rkey
);
488 goto rereg_phys_mr_exit1
;
490 /* successful reregistration */
491 if (mr_rereg_mask
& IB_MR_REREG_PD
)
497 spin_unlock_irqrestore(&e_mr
->mrlock
, sl_flags
);
500 ehca_err(mr
->device
, "ret=%x mr=%p mr_rereg_mask=%x pd=%p "
501 "phys_buf_array=%p num_phys_buf=%x mr_access_flags=%x "
503 ret
, mr
, mr_rereg_mask
, pd
, phys_buf_array
,
504 num_phys_buf
, mr_access_flags
, iova_start
);
506 } /* end ehca_rereg_phys_mr() */
508 /*----------------------------------------------------------------------*/
510 int ehca_query_mr(struct ib_mr
*mr
, struct ib_mr_attr
*mr_attr
)
514 struct ehca_shca
*shca
=
515 container_of(mr
->device
, struct ehca_shca
, ib_device
);
516 struct ehca_mr
*e_mr
= container_of(mr
, struct ehca_mr
, ib
.ib_mr
);
517 struct ehca_pd
*my_pd
= container_of(mr
->pd
, struct ehca_pd
, ib_pd
);
518 u32 cur_pid
= current
->tgid
;
519 unsigned long sl_flags
;
520 struct ehca_mr_hipzout_parms hipzout
= {{0},0,0,0,0,0};
522 if (my_pd
->ib_pd
.uobject
&& my_pd
->ib_pd
.uobject
->context
&&
523 (my_pd
->ownpid
!= cur_pid
)) {
524 ehca_err(mr
->device
, "Invalid caller pid=%x ownpid=%x",
525 cur_pid
, my_pd
->ownpid
);
530 if ((e_mr
->flags
& EHCA_MR_FLAG_FMR
)) {
531 ehca_err(mr
->device
, "not supported for FMR, mr=%p e_mr=%p "
532 "e_mr->flags=%x", mr
, e_mr
, e_mr
->flags
);
537 memset(mr_attr
, 0, sizeof(struct ib_mr_attr
));
538 spin_lock_irqsave(&e_mr
->mrlock
, sl_flags
);
540 h_ret
= hipz_h_query_mr(shca
->ipz_hca_handle
, e_mr
, &hipzout
);
541 if (h_ret
!= H_SUCCESS
) {
542 ehca_err(mr
->device
, "hipz_mr_query failed, h_ret=%lx mr=%p "
543 "hca_hndl=%lx mr_hndl=%lx lkey=%x",
544 h_ret
, mr
, shca
->ipz_hca_handle
.handle
,
545 e_mr
->ipz_mr_handle
.handle
, mr
->lkey
);
546 ret
= ehca2ib_return_code(h_ret
);
549 mr_attr
->pd
= mr
->pd
;
550 mr_attr
->device_virt_addr
= hipzout
.vaddr
;
551 mr_attr
->size
= hipzout
.len
;
552 mr_attr
->lkey
= hipzout
.lkey
;
553 mr_attr
->rkey
= hipzout
.rkey
;
554 ehca_mrmw_reverse_map_acl(&hipzout
.acl
, &mr_attr
->mr_access_flags
);
557 spin_unlock_irqrestore(&e_mr
->mrlock
, sl_flags
);
560 ehca_err(mr
->device
, "ret=%x mr=%p mr_attr=%p",
563 } /* end ehca_query_mr() */
565 /*----------------------------------------------------------------------*/
567 int ehca_dereg_mr(struct ib_mr
*mr
)
571 struct ehca_shca
*shca
=
572 container_of(mr
->device
, struct ehca_shca
, ib_device
);
573 struct ehca_mr
*e_mr
= container_of(mr
, struct ehca_mr
, ib
.ib_mr
);
574 struct ehca_pd
*my_pd
= container_of(mr
->pd
, struct ehca_pd
, ib_pd
);
575 u32 cur_pid
= current
->tgid
;
577 if (my_pd
->ib_pd
.uobject
&& my_pd
->ib_pd
.uobject
->context
&&
578 (my_pd
->ownpid
!= cur_pid
)) {
579 ehca_err(mr
->device
, "Invalid caller pid=%x ownpid=%x",
580 cur_pid
, my_pd
->ownpid
);
585 if ((e_mr
->flags
& EHCA_MR_FLAG_FMR
)) {
586 ehca_err(mr
->device
, "not supported for FMR, mr=%p e_mr=%p "
587 "e_mr->flags=%x", mr
, e_mr
, e_mr
->flags
);
590 } else if (e_mr
== shca
->maxmr
) {
591 /* should be impossible, however reject to be sure */
592 ehca_err(mr
->device
, "dereg internal max-MR impossible, mr=%p "
593 "shca->maxmr=%p mr->lkey=%x",
594 mr
, shca
->maxmr
, mr
->lkey
);
599 /* TODO: BUSY: MR still has bound window(s) */
600 h_ret
= hipz_h_free_resource_mr(shca
->ipz_hca_handle
, e_mr
);
601 if (h_ret
!= H_SUCCESS
) {
602 ehca_err(mr
->device
, "hipz_free_mr failed, h_ret=%lx shca=%p "
603 "e_mr=%p hca_hndl=%lx mr_hndl=%lx mr->lkey=%x",
604 h_ret
, shca
, e_mr
, shca
->ipz_hca_handle
.handle
,
605 e_mr
->ipz_mr_handle
.handle
, mr
->lkey
);
606 ret
= ehca2ib_return_code(h_ret
);
611 ib_umem_release(e_mr
->umem
);
613 /* successful deregistration */
614 ehca_mr_delete(e_mr
);
618 ehca_err(mr
->device
, "ret=%x mr=%p", ret
, mr
);
620 } /* end ehca_dereg_mr() */
622 /*----------------------------------------------------------------------*/
624 struct ib_mw
*ehca_alloc_mw(struct ib_pd
*pd
)
628 struct ehca_mw
*e_mw
;
629 struct ehca_pd
*e_pd
= container_of(pd
, struct ehca_pd
, ib_pd
);
630 struct ehca_shca
*shca
=
631 container_of(pd
->device
, struct ehca_shca
, ib_device
);
632 struct ehca_mw_hipzout_parms hipzout
= {{0},0};
634 e_mw
= ehca_mw_new();
636 ib_mw
= ERR_PTR(-ENOMEM
);
640 h_ret
= hipz_h_alloc_resource_mw(shca
->ipz_hca_handle
, e_mw
,
641 e_pd
->fw_pd
, &hipzout
);
642 if (h_ret
!= H_SUCCESS
) {
643 ehca_err(pd
->device
, "hipz_mw_allocate failed, h_ret=%lx "
644 "shca=%p hca_hndl=%lx mw=%p",
645 h_ret
, shca
, shca
->ipz_hca_handle
.handle
, e_mw
);
646 ib_mw
= ERR_PTR(ehca2ib_return_code(h_ret
));
649 /* successful MW allocation */
650 e_mw
->ipz_mw_handle
= hipzout
.handle
;
651 e_mw
->ib_mw
.rkey
= hipzout
.rkey
;
655 ehca_mw_delete(e_mw
);
658 ehca_err(pd
->device
, "rc=%lx pd=%p", PTR_ERR(ib_mw
), pd
);
660 } /* end ehca_alloc_mw() */
662 /*----------------------------------------------------------------------*/
664 int ehca_bind_mw(struct ib_qp
*qp
,
666 struct ib_mw_bind
*mw_bind
)
668 /* TODO: not supported up to now */
669 ehca_gen_err("bind MW currently not supported by HCAD");
672 } /* end ehca_bind_mw() */
674 /*----------------------------------------------------------------------*/
676 int ehca_dealloc_mw(struct ib_mw
*mw
)
679 struct ehca_shca
*shca
=
680 container_of(mw
->device
, struct ehca_shca
, ib_device
);
681 struct ehca_mw
*e_mw
= container_of(mw
, struct ehca_mw
, ib_mw
);
683 h_ret
= hipz_h_free_resource_mw(shca
->ipz_hca_handle
, e_mw
);
684 if (h_ret
!= H_SUCCESS
) {
685 ehca_err(mw
->device
, "hipz_free_mw failed, h_ret=%lx shca=%p "
686 "mw=%p rkey=%x hca_hndl=%lx mw_hndl=%lx",
687 h_ret
, shca
, mw
, mw
->rkey
, shca
->ipz_hca_handle
.handle
,
688 e_mw
->ipz_mw_handle
.handle
);
689 return ehca2ib_return_code(h_ret
);
691 /* successful deallocation */
692 ehca_mw_delete(e_mw
);
694 } /* end ehca_dealloc_mw() */
696 /*----------------------------------------------------------------------*/
698 struct ib_fmr
*ehca_alloc_fmr(struct ib_pd
*pd
,
700 struct ib_fmr_attr
*fmr_attr
)
702 struct ib_fmr
*ib_fmr
;
703 struct ehca_shca
*shca
=
704 container_of(pd
->device
, struct ehca_shca
, ib_device
);
705 struct ehca_pd
*e_pd
= container_of(pd
, struct ehca_pd
, ib_pd
);
706 struct ehca_mr
*e_fmr
;
708 u32 tmp_lkey
, tmp_rkey
;
709 struct ehca_mr_pginfo pginfo
;
711 /* check other parameters */
712 if (((mr_access_flags
& IB_ACCESS_REMOTE_WRITE
) &&
713 !(mr_access_flags
& IB_ACCESS_LOCAL_WRITE
)) ||
714 ((mr_access_flags
& IB_ACCESS_REMOTE_ATOMIC
) &&
715 !(mr_access_flags
& IB_ACCESS_LOCAL_WRITE
))) {
717 * Remote Write Access requires Local Write Access
718 * Remote Atomic Access requires Local Write Access
720 ehca_err(pd
->device
, "bad input values: mr_access_flags=%x",
722 ib_fmr
= ERR_PTR(-EINVAL
);
723 goto alloc_fmr_exit0
;
725 if (mr_access_flags
& IB_ACCESS_MW_BIND
) {
726 ehca_err(pd
->device
, "bad input values: mr_access_flags=%x",
728 ib_fmr
= ERR_PTR(-EINVAL
);
729 goto alloc_fmr_exit0
;
731 if ((fmr_attr
->max_pages
== 0) || (fmr_attr
->max_maps
== 0)) {
732 ehca_err(pd
->device
, "bad input values: fmr_attr->max_pages=%x "
733 "fmr_attr->max_maps=%x fmr_attr->page_shift=%x",
734 fmr_attr
->max_pages
, fmr_attr
->max_maps
,
735 fmr_attr
->page_shift
);
736 ib_fmr
= ERR_PTR(-EINVAL
);
737 goto alloc_fmr_exit0
;
739 if (((1 << fmr_attr
->page_shift
) != EHCA_PAGESIZE
) &&
740 ((1 << fmr_attr
->page_shift
) != PAGE_SIZE
)) {
741 ehca_err(pd
->device
, "unsupported fmr_attr->page_shift=%x",
742 fmr_attr
->page_shift
);
743 ib_fmr
= ERR_PTR(-EINVAL
);
744 goto alloc_fmr_exit0
;
747 e_fmr
= ehca_mr_new();
749 ib_fmr
= ERR_PTR(-ENOMEM
);
750 goto alloc_fmr_exit0
;
752 e_fmr
->flags
|= EHCA_MR_FLAG_FMR
;
754 /* register MR on HCA */
755 memset(&pginfo
, 0, sizeof(pginfo
));
756 ret
= ehca_reg_mr(shca
, e_fmr
, NULL
,
757 fmr_attr
->max_pages
* (1 << fmr_attr
->page_shift
),
758 mr_access_flags
, e_pd
, &pginfo
,
759 &tmp_lkey
, &tmp_rkey
);
761 ib_fmr
= ERR_PTR(ret
);
762 goto alloc_fmr_exit1
;
766 e_fmr
->fmr_page_size
= 1 << fmr_attr
->page_shift
;
767 e_fmr
->fmr_max_pages
= fmr_attr
->max_pages
;
768 e_fmr
->fmr_max_maps
= fmr_attr
->max_maps
;
769 e_fmr
->fmr_map_cnt
= 0;
770 return &e_fmr
->ib
.ib_fmr
;
773 ehca_mr_delete(e_fmr
);
776 ehca_err(pd
->device
, "rc=%lx pd=%p mr_access_flags=%x "
777 "fmr_attr=%p", PTR_ERR(ib_fmr
), pd
,
778 mr_access_flags
, fmr_attr
);
780 } /* end ehca_alloc_fmr() */
782 /*----------------------------------------------------------------------*/
784 int ehca_map_phys_fmr(struct ib_fmr
*fmr
,
790 struct ehca_shca
*shca
=
791 container_of(fmr
->device
, struct ehca_shca
, ib_device
);
792 struct ehca_mr
*e_fmr
= container_of(fmr
, struct ehca_mr
, ib
.ib_fmr
);
793 struct ehca_pd
*e_pd
= container_of(fmr
->pd
, struct ehca_pd
, ib_pd
);
794 struct ehca_mr_pginfo pginfo
;
795 u32 tmp_lkey
, tmp_rkey
;
797 if (!(e_fmr
->flags
& EHCA_MR_FLAG_FMR
)) {
798 ehca_err(fmr
->device
, "not a FMR, e_fmr=%p e_fmr->flags=%x",
799 e_fmr
, e_fmr
->flags
);
801 goto map_phys_fmr_exit0
;
803 ret
= ehca_fmr_check_page_list(e_fmr
, page_list
, list_len
);
805 goto map_phys_fmr_exit0
;
806 if (iova
% e_fmr
->fmr_page_size
) {
807 /* only whole-numbered pages */
808 ehca_err(fmr
->device
, "bad iova, iova=%lx fmr_page_size=%x",
809 iova
, e_fmr
->fmr_page_size
);
811 goto map_phys_fmr_exit0
;
813 if (e_fmr
->fmr_map_cnt
>= e_fmr
->fmr_max_maps
) {
814 /* HCAD does not limit the maps, however trace this anyway */
815 ehca_info(fmr
->device
, "map limit exceeded, fmr=%p "
816 "e_fmr->fmr_map_cnt=%x e_fmr->fmr_max_maps=%x",
817 fmr
, e_fmr
->fmr_map_cnt
, e_fmr
->fmr_max_maps
);
820 memset(&pginfo
, 0, sizeof(pginfo
));
821 pginfo
.type
= EHCA_MR_PGI_FMR
;
822 pginfo
.num_kpages
= list_len
;
823 pginfo
.num_hwpages
= list_len
* (e_fmr
->fmr_page_size
/ EHCA_PAGESIZE
);
824 pginfo
.u
.fmr
.page_list
= page_list
;
825 pginfo
.next_hwpage
= ((iova
& (e_fmr
->fmr_page_size
-1)) /
827 pginfo
.u
.fmr
.fmr_pgsize
= e_fmr
->fmr_page_size
;
829 ret
= ehca_rereg_mr(shca
, e_fmr
, (u64
*)iova
,
830 list_len
* e_fmr
->fmr_page_size
,
831 e_fmr
->acl
, e_pd
, &pginfo
, &tmp_lkey
, &tmp_rkey
);
833 goto map_phys_fmr_exit0
;
835 /* successful reregistration */
836 e_fmr
->fmr_map_cnt
++;
837 e_fmr
->ib
.ib_fmr
.lkey
= tmp_lkey
;
838 e_fmr
->ib
.ib_fmr
.rkey
= tmp_rkey
;
843 ehca_err(fmr
->device
, "ret=%x fmr=%p page_list=%p list_len=%x "
845 ret
, fmr
, page_list
, list_len
, iova
);
847 } /* end ehca_map_phys_fmr() */
849 /*----------------------------------------------------------------------*/
851 int ehca_unmap_fmr(struct list_head
*fmr_list
)
854 struct ib_fmr
*ib_fmr
;
855 struct ehca_shca
*shca
= NULL
;
856 struct ehca_shca
*prev_shca
;
857 struct ehca_mr
*e_fmr
;
859 u32 unmap_fmr_cnt
= 0;
861 /* check all FMR belong to same SHCA, and check internal flag */
862 list_for_each_entry(ib_fmr
, fmr_list
, list
) {
865 ehca_gen_err("bad fmr=%p in list", ib_fmr
);
867 goto unmap_fmr_exit0
;
869 shca
= container_of(ib_fmr
->device
, struct ehca_shca
,
871 e_fmr
= container_of(ib_fmr
, struct ehca_mr
, ib
.ib_fmr
);
872 if ((shca
!= prev_shca
) && prev_shca
) {
873 ehca_err(&shca
->ib_device
, "SHCA mismatch, shca=%p "
874 "prev_shca=%p e_fmr=%p",
875 shca
, prev_shca
, e_fmr
);
877 goto unmap_fmr_exit0
;
879 if (!(e_fmr
->flags
& EHCA_MR_FLAG_FMR
)) {
880 ehca_err(&shca
->ib_device
, "not a FMR, e_fmr=%p "
881 "e_fmr->flags=%x", e_fmr
, e_fmr
->flags
);
883 goto unmap_fmr_exit0
;
888 /* loop over all FMRs to unmap */
889 list_for_each_entry(ib_fmr
, fmr_list
, list
) {
891 e_fmr
= container_of(ib_fmr
, struct ehca_mr
, ib
.ib_fmr
);
892 shca
= container_of(ib_fmr
->device
, struct ehca_shca
,
894 ret
= ehca_unmap_one_fmr(shca
, e_fmr
);
896 /* unmap failed, stop unmapping of rest of FMRs */
897 ehca_err(&shca
->ib_device
, "unmap of one FMR failed, "
898 "stop rest, e_fmr=%p num_fmr=%x "
899 "unmap_fmr_cnt=%x lkey=%x", e_fmr
, num_fmr
,
900 unmap_fmr_cnt
, e_fmr
->ib
.ib_fmr
.lkey
);
901 goto unmap_fmr_exit0
;
907 ehca_gen_err("ret=%x fmr_list=%p num_fmr=%x unmap_fmr_cnt=%x",
908 ret
, fmr_list
, num_fmr
, unmap_fmr_cnt
);
910 } /* end ehca_unmap_fmr() */
912 /*----------------------------------------------------------------------*/
914 int ehca_dealloc_fmr(struct ib_fmr
*fmr
)
918 struct ehca_shca
*shca
=
919 container_of(fmr
->device
, struct ehca_shca
, ib_device
);
920 struct ehca_mr
*e_fmr
= container_of(fmr
, struct ehca_mr
, ib
.ib_fmr
);
922 if (!(e_fmr
->flags
& EHCA_MR_FLAG_FMR
)) {
923 ehca_err(fmr
->device
, "not a FMR, e_fmr=%p e_fmr->flags=%x",
924 e_fmr
, e_fmr
->flags
);
929 h_ret
= hipz_h_free_resource_mr(shca
->ipz_hca_handle
, e_fmr
);
930 if (h_ret
!= H_SUCCESS
) {
931 ehca_err(fmr
->device
, "hipz_free_mr failed, h_ret=%lx e_fmr=%p "
932 "hca_hndl=%lx fmr_hndl=%lx fmr->lkey=%x",
933 h_ret
, e_fmr
, shca
->ipz_hca_handle
.handle
,
934 e_fmr
->ipz_mr_handle
.handle
, fmr
->lkey
);
935 ret
= ehca2ib_return_code(h_ret
);
938 /* successful deregistration */
939 ehca_mr_delete(e_fmr
);
944 ehca_err(&shca
->ib_device
, "ret=%x fmr=%p", ret
, fmr
);
946 } /* end ehca_dealloc_fmr() */
948 /*----------------------------------------------------------------------*/
950 int ehca_reg_mr(struct ehca_shca
*shca
,
951 struct ehca_mr
*e_mr
,
955 struct ehca_pd
*e_pd
,
956 struct ehca_mr_pginfo
*pginfo
,
963 struct ehca_mr_hipzout_parms hipzout
= {{0},0,0,0,0,0};
965 ehca_mrmw_map_acl(acl
, &hipz_acl
);
966 ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl
);
967 if (ehca_use_hp_mr
== 1)
968 hipz_acl
|= 0x00000001;
970 h_ret
= hipz_h_alloc_resource_mr(shca
->ipz_hca_handle
, e_mr
,
971 (u64
)iova_start
, size
, hipz_acl
,
972 e_pd
->fw_pd
, &hipzout
);
973 if (h_ret
!= H_SUCCESS
) {
974 ehca_err(&shca
->ib_device
, "hipz_alloc_mr failed, h_ret=%lx "
975 "hca_hndl=%lx", h_ret
, shca
->ipz_hca_handle
.handle
);
976 ret
= ehca2ib_return_code(h_ret
);
977 goto ehca_reg_mr_exit0
;
980 e_mr
->ipz_mr_handle
= hipzout
.handle
;
982 ret
= ehca_reg_mr_rpages(shca
, e_mr
, pginfo
);
984 goto ehca_reg_mr_exit1
;
986 /* successful registration */
987 e_mr
->num_kpages
= pginfo
->num_kpages
;
988 e_mr
->num_hwpages
= pginfo
->num_hwpages
;
989 e_mr
->start
= iova_start
;
992 *lkey
= hipzout
.lkey
;
993 *rkey
= hipzout
.rkey
;
997 h_ret
= hipz_h_free_resource_mr(shca
->ipz_hca_handle
, e_mr
);
998 if (h_ret
!= H_SUCCESS
) {
999 ehca_err(&shca
->ib_device
, "h_ret=%lx shca=%p e_mr=%p "
1000 "iova_start=%p size=%lx acl=%x e_pd=%p lkey=%x "
1001 "pginfo=%p num_kpages=%lx num_hwpages=%lx ret=%x",
1002 h_ret
, shca
, e_mr
, iova_start
, size
, acl
, e_pd
,
1003 hipzout
.lkey
, pginfo
, pginfo
->num_kpages
,
1004 pginfo
->num_hwpages
, ret
);
1005 ehca_err(&shca
->ib_device
, "internal error in ehca_reg_mr, "
1010 ehca_err(&shca
->ib_device
, "ret=%x shca=%p e_mr=%p "
1011 "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p "
1012 "num_kpages=%lx num_hwpages=%lx",
1013 ret
, shca
, e_mr
, iova_start
, size
, acl
, e_pd
, pginfo
,
1014 pginfo
->num_kpages
, pginfo
->num_hwpages
);
1016 } /* end ehca_reg_mr() */
1018 /*----------------------------------------------------------------------*/
1020 int ehca_reg_mr_rpages(struct ehca_shca
*shca
,
1021 struct ehca_mr
*e_mr
,
1022 struct ehca_mr_pginfo
*pginfo
)
1031 kpage
= ehca_alloc_fw_ctrlblock(GFP_KERNEL
);
1033 ehca_err(&shca
->ib_device
, "kpage alloc failed");
1035 goto ehca_reg_mr_rpages_exit0
;
1038 /* max 512 pages per shot */
1039 for (i
= 0; i
< NUM_CHUNKS(pginfo
->num_hwpages
, MAX_RPAGES
); i
++) {
1041 if (i
== NUM_CHUNKS(pginfo
->num_hwpages
, MAX_RPAGES
) - 1) {
1042 rnum
= pginfo
->num_hwpages
% MAX_RPAGES
; /* last shot */
1044 rnum
= MAX_RPAGES
; /* last shot is full */
1048 ret
= ehca_set_pagebuf(pginfo
, rnum
, kpage
);
1050 ehca_err(&shca
->ib_device
, "ehca_set_pagebuf "
1051 "bad rc, ret=%x rnum=%x kpage=%p",
1053 goto ehca_reg_mr_rpages_exit1
;
1057 rpage
= virt_to_abs(kpage
);
1059 ehca_err(&shca
->ib_device
, "kpage=%p i=%x",
1062 goto ehca_reg_mr_rpages_exit1
;
1067 h_ret
= hipz_h_register_rpage_mr(shca
->ipz_hca_handle
, e_mr
,
1068 0, /* pagesize 4k */
1071 if (i
== NUM_CHUNKS(pginfo
->num_hwpages
, MAX_RPAGES
) - 1) {
1073 * check for 'registration complete'==H_SUCCESS
1074 * and for 'page registered'==H_PAGE_REGISTERED
1076 if (h_ret
!= H_SUCCESS
) {
1077 ehca_err(&shca
->ib_device
, "last "
1078 "hipz_reg_rpage_mr failed, h_ret=%lx "
1079 "e_mr=%p i=%x hca_hndl=%lx mr_hndl=%lx"
1080 " lkey=%x", h_ret
, e_mr
, i
,
1081 shca
->ipz_hca_handle
.handle
,
1082 e_mr
->ipz_mr_handle
.handle
,
1083 e_mr
->ib
.ib_mr
.lkey
);
1084 ret
= ehca2ib_return_code(h_ret
);
1088 } else if (h_ret
!= H_PAGE_REGISTERED
) {
1089 ehca_err(&shca
->ib_device
, "hipz_reg_rpage_mr failed, "
1090 "h_ret=%lx e_mr=%p i=%x lkey=%x hca_hndl=%lx "
1091 "mr_hndl=%lx", h_ret
, e_mr
, i
,
1092 e_mr
->ib
.ib_mr
.lkey
,
1093 shca
->ipz_hca_handle
.handle
,
1094 e_mr
->ipz_mr_handle
.handle
);
1095 ret
= ehca2ib_return_code(h_ret
);
1102 ehca_reg_mr_rpages_exit1
:
1103 ehca_free_fw_ctrlblock(kpage
);
1104 ehca_reg_mr_rpages_exit0
:
1106 ehca_err(&shca
->ib_device
, "ret=%x shca=%p e_mr=%p pginfo=%p "
1107 "num_kpages=%lx num_hwpages=%lx", ret
, shca
, e_mr
,
1108 pginfo
, pginfo
->num_kpages
, pginfo
->num_hwpages
);
1110 } /* end ehca_reg_mr_rpages() */
1112 /*----------------------------------------------------------------------*/
1114 inline int ehca_rereg_mr_rereg1(struct ehca_shca
*shca
,
1115 struct ehca_mr
*e_mr
,
1119 struct ehca_pd
*e_pd
,
1120 struct ehca_mr_pginfo
*pginfo
,
1129 struct ehca_mr_pginfo pginfo_save
;
1130 struct ehca_mr_hipzout_parms hipzout
= {{0},0,0,0,0,0};
1132 ehca_mrmw_map_acl(acl
, &hipz_acl
);
1133 ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl
);
1135 kpage
= ehca_alloc_fw_ctrlblock(GFP_KERNEL
);
1137 ehca_err(&shca
->ib_device
, "kpage alloc failed");
1139 goto ehca_rereg_mr_rereg1_exit0
;
1142 pginfo_save
= *pginfo
;
1143 ret
= ehca_set_pagebuf(pginfo
, pginfo
->num_hwpages
, kpage
);
1145 ehca_err(&shca
->ib_device
, "set pagebuf failed, e_mr=%p "
1146 "pginfo=%p type=%x num_kpages=%lx num_hwpages=%lx "
1147 "kpage=%p", e_mr
, pginfo
, pginfo
->type
,
1148 pginfo
->num_kpages
, pginfo
->num_hwpages
, kpage
);
1149 goto ehca_rereg_mr_rereg1_exit1
;
1151 rpage
= virt_to_abs(kpage
);
1153 ehca_err(&shca
->ib_device
, "kpage=%p", kpage
);
1155 goto ehca_rereg_mr_rereg1_exit1
;
1157 h_ret
= hipz_h_reregister_pmr(shca
->ipz_hca_handle
, e_mr
,
1158 (u64
)iova_start
, size
, hipz_acl
,
1159 e_pd
->fw_pd
, rpage
, &hipzout
);
1160 if (h_ret
!= H_SUCCESS
) {
1162 * reregistration unsuccessful, try it again with the 3 hCalls,
1163 * e.g. this is required in case H_MR_CONDITION
1164 * (MW bound or MR is shared)
1166 ehca_warn(&shca
->ib_device
, "hipz_h_reregister_pmr failed "
1167 "(Rereg1), h_ret=%lx e_mr=%p", h_ret
, e_mr
);
1168 *pginfo
= pginfo_save
;
1170 } else if ((u64
*)hipzout
.vaddr
!= iova_start
) {
1171 ehca_err(&shca
->ib_device
, "PHYP changed iova_start in "
1172 "rereg_pmr, iova_start=%p iova_start_out=%lx e_mr=%p "
1173 "mr_handle=%lx lkey=%x lkey_out=%x", iova_start
,
1174 hipzout
.vaddr
, e_mr
, e_mr
->ipz_mr_handle
.handle
,
1175 e_mr
->ib
.ib_mr
.lkey
, hipzout
.lkey
);
1179 * successful reregistration
1180 * note: start and start_out are identical for eServer HCAs
1182 e_mr
->num_kpages
= pginfo
->num_kpages
;
1183 e_mr
->num_hwpages
= pginfo
->num_hwpages
;
1184 e_mr
->start
= iova_start
;
1187 *lkey
= hipzout
.lkey
;
1188 *rkey
= hipzout
.rkey
;
1191 ehca_rereg_mr_rereg1_exit1
:
1192 ehca_free_fw_ctrlblock(kpage
);
1193 ehca_rereg_mr_rereg1_exit0
:
1194 if ( ret
&& (ret
!= -EAGAIN
) )
1195 ehca_err(&shca
->ib_device
, "ret=%x lkey=%x rkey=%x "
1196 "pginfo=%p num_kpages=%lx num_hwpages=%lx",
1197 ret
, *lkey
, *rkey
, pginfo
, pginfo
->num_kpages
,
1198 pginfo
->num_hwpages
);
1200 } /* end ehca_rereg_mr_rereg1() */
1202 /*----------------------------------------------------------------------*/
1204 int ehca_rereg_mr(struct ehca_shca
*shca
,
1205 struct ehca_mr
*e_mr
,
1209 struct ehca_pd
*e_pd
,
1210 struct ehca_mr_pginfo
*pginfo
,
1216 int rereg_1_hcall
= 1; /* 1: use hipz_h_reregister_pmr directly */
1217 int rereg_3_hcall
= 0; /* 1: use 3 hipz calls for reregistration */
1219 /* first determine reregistration hCall(s) */
1220 if ((pginfo
->num_hwpages
> MAX_RPAGES
) ||
1221 (e_mr
->num_hwpages
> MAX_RPAGES
) ||
1222 (pginfo
->num_hwpages
> e_mr
->num_hwpages
)) {
1223 ehca_dbg(&shca
->ib_device
, "Rereg3 case, "
1224 "pginfo->num_hwpages=%lx e_mr->num_hwpages=%x",
1225 pginfo
->num_hwpages
, e_mr
->num_hwpages
);
1230 if (e_mr
->flags
& EHCA_MR_FLAG_MAXMR
) { /* check for max-MR */
1233 e_mr
->flags
&= ~EHCA_MR_FLAG_MAXMR
;
1234 ehca_err(&shca
->ib_device
, "Rereg MR for max-MR! e_mr=%p",
1238 if (rereg_1_hcall
) {
1239 ret
= ehca_rereg_mr_rereg1(shca
, e_mr
, iova_start
, size
,
1240 acl
, e_pd
, pginfo
, lkey
, rkey
);
1245 goto ehca_rereg_mr_exit0
;
1249 if (rereg_3_hcall
) {
1250 struct ehca_mr save_mr
;
1252 /* first deregister old MR */
1253 h_ret
= hipz_h_free_resource_mr(shca
->ipz_hca_handle
, e_mr
);
1254 if (h_ret
!= H_SUCCESS
) {
1255 ehca_err(&shca
->ib_device
, "hipz_free_mr failed, "
1256 "h_ret=%lx e_mr=%p hca_hndl=%lx mr_hndl=%lx "
1258 h_ret
, e_mr
, shca
->ipz_hca_handle
.handle
,
1259 e_mr
->ipz_mr_handle
.handle
,
1260 e_mr
->ib
.ib_mr
.lkey
);
1261 ret
= ehca2ib_return_code(h_ret
);
1262 goto ehca_rereg_mr_exit0
;
1264 /* clean ehca_mr_t, without changing struct ib_mr and lock */
1266 ehca_mr_deletenew(e_mr
);
1268 /* set some MR values */
1269 e_mr
->flags
= save_mr
.flags
;
1270 e_mr
->fmr_page_size
= save_mr
.fmr_page_size
;
1271 e_mr
->fmr_max_pages
= save_mr
.fmr_max_pages
;
1272 e_mr
->fmr_max_maps
= save_mr
.fmr_max_maps
;
1273 e_mr
->fmr_map_cnt
= save_mr
.fmr_map_cnt
;
1275 ret
= ehca_reg_mr(shca
, e_mr
, iova_start
, size
, acl
,
1276 e_pd
, pginfo
, lkey
, rkey
);
1278 u32 offset
= (u64
)(&e_mr
->flags
) - (u64
)e_mr
;
1279 memcpy(&e_mr
->flags
, &(save_mr
.flags
),
1280 sizeof(struct ehca_mr
) - offset
);
1281 goto ehca_rereg_mr_exit0
;
1285 ehca_rereg_mr_exit0
:
1287 ehca_err(&shca
->ib_device
, "ret=%x shca=%p e_mr=%p "
1288 "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p "
1289 "num_kpages=%lx lkey=%x rkey=%x rereg_1_hcall=%x "
1290 "rereg_3_hcall=%x", ret
, shca
, e_mr
, iova_start
, size
,
1291 acl
, e_pd
, pginfo
, pginfo
->num_kpages
, *lkey
, *rkey
,
1292 rereg_1_hcall
, rereg_3_hcall
);
1294 } /* end ehca_rereg_mr() */
1296 /*----------------------------------------------------------------------*/
1298 int ehca_unmap_one_fmr(struct ehca_shca
*shca
,
1299 struct ehca_mr
*e_fmr
)
1303 struct ehca_pd
*e_pd
=
1304 container_of(e_fmr
->ib
.ib_fmr
.pd
, struct ehca_pd
, ib_pd
);
1305 struct ehca_mr save_fmr
;
1306 u32 tmp_lkey
, tmp_rkey
;
1307 struct ehca_mr_pginfo pginfo
;
1308 struct ehca_mr_hipzout_parms hipzout
= {{0},0,0,0,0,0};
1309 struct ehca_mr save_mr
;
1311 if (e_fmr
->fmr_max_pages
<= MAX_RPAGES
) {
1313 * note: after using rereg hcall with len=0,
1314 * rereg hcall must be used again for registering pages
1316 h_ret
= hipz_h_reregister_pmr(shca
->ipz_hca_handle
, e_fmr
, 0,
1317 0, 0, e_pd
->fw_pd
, 0, &hipzout
);
1318 if (h_ret
== H_SUCCESS
) {
1319 /* successful reregistration */
1320 e_fmr
->start
= NULL
;
1322 tmp_lkey
= hipzout
.lkey
;
1323 tmp_rkey
= hipzout
.rkey
;
1327 * should not happen, because length checked above,
1328 * FMRs are not shared and no MW bound to FMRs
1330 ehca_err(&shca
->ib_device
, "hipz_reregister_pmr failed "
1331 "(Rereg1), h_ret=%lx e_fmr=%p hca_hndl=%lx "
1332 "mr_hndl=%lx lkey=%x lkey_out=%x",
1333 h_ret
, e_fmr
, shca
->ipz_hca_handle
.handle
,
1334 e_fmr
->ipz_mr_handle
.handle
,
1335 e_fmr
->ib
.ib_fmr
.lkey
, hipzout
.lkey
);
1336 /* try free and rereg */
1339 /* first free old FMR */
1340 h_ret
= hipz_h_free_resource_mr(shca
->ipz_hca_handle
, e_fmr
);
1341 if (h_ret
!= H_SUCCESS
) {
1342 ehca_err(&shca
->ib_device
, "hipz_free_mr failed, "
1343 "h_ret=%lx e_fmr=%p hca_hndl=%lx mr_hndl=%lx "
1345 h_ret
, e_fmr
, shca
->ipz_hca_handle
.handle
,
1346 e_fmr
->ipz_mr_handle
.handle
,
1347 e_fmr
->ib
.ib_fmr
.lkey
);
1348 ret
= ehca2ib_return_code(h_ret
);
1349 goto ehca_unmap_one_fmr_exit0
;
1351 /* clean ehca_mr_t, without changing lock */
1353 ehca_mr_deletenew(e_fmr
);
1355 /* set some MR values */
1356 e_fmr
->flags
= save_fmr
.flags
;
1357 e_fmr
->fmr_page_size
= save_fmr
.fmr_page_size
;
1358 e_fmr
->fmr_max_pages
= save_fmr
.fmr_max_pages
;
1359 e_fmr
->fmr_max_maps
= save_fmr
.fmr_max_maps
;
1360 e_fmr
->fmr_map_cnt
= save_fmr
.fmr_map_cnt
;
1361 e_fmr
->acl
= save_fmr
.acl
;
1363 memset(&pginfo
, 0, sizeof(pginfo
));
1364 pginfo
.type
= EHCA_MR_PGI_FMR
;
1365 pginfo
.num_kpages
= 0;
1366 pginfo
.num_hwpages
= 0;
1367 ret
= ehca_reg_mr(shca
, e_fmr
, NULL
,
1368 (e_fmr
->fmr_max_pages
* e_fmr
->fmr_page_size
),
1369 e_fmr
->acl
, e_pd
, &pginfo
, &tmp_lkey
,
1372 u32 offset
= (u64
)(&e_fmr
->flags
) - (u64
)e_fmr
;
1373 memcpy(&e_fmr
->flags
, &(save_mr
.flags
),
1374 sizeof(struct ehca_mr
) - offset
);
1375 goto ehca_unmap_one_fmr_exit0
;
1378 ehca_unmap_one_fmr_exit0
:
1380 ehca_err(&shca
->ib_device
, "ret=%x tmp_lkey=%x tmp_rkey=%x "
1382 ret
, tmp_lkey
, tmp_rkey
, e_fmr
->fmr_max_pages
);
1384 } /* end ehca_unmap_one_fmr() */
1386 /*----------------------------------------------------------------------*/
1388 int ehca_reg_smr(struct ehca_shca
*shca
,
1389 struct ehca_mr
*e_origmr
,
1390 struct ehca_mr
*e_newmr
,
1393 struct ehca_pd
*e_pd
,
1400 struct ehca_mr_hipzout_parms hipzout
= {{0},0,0,0,0,0};
1402 ehca_mrmw_map_acl(acl
, &hipz_acl
);
1403 ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl
);
1405 h_ret
= hipz_h_register_smr(shca
->ipz_hca_handle
, e_newmr
, e_origmr
,
1406 (u64
)iova_start
, hipz_acl
, e_pd
->fw_pd
,
1408 if (h_ret
!= H_SUCCESS
) {
1409 ehca_err(&shca
->ib_device
, "hipz_reg_smr failed, h_ret=%lx "
1410 "shca=%p e_origmr=%p e_newmr=%p iova_start=%p acl=%x "
1411 "e_pd=%p hca_hndl=%lx mr_hndl=%lx lkey=%x",
1412 h_ret
, shca
, e_origmr
, e_newmr
, iova_start
, acl
, e_pd
,
1413 shca
->ipz_hca_handle
.handle
,
1414 e_origmr
->ipz_mr_handle
.handle
,
1415 e_origmr
->ib
.ib_mr
.lkey
);
1416 ret
= ehca2ib_return_code(h_ret
);
1417 goto ehca_reg_smr_exit0
;
1419 /* successful registration */
1420 e_newmr
->num_kpages
= e_origmr
->num_kpages
;
1421 e_newmr
->num_hwpages
= e_origmr
->num_hwpages
;
1422 e_newmr
->start
= iova_start
;
1423 e_newmr
->size
= e_origmr
->size
;
1425 e_newmr
->ipz_mr_handle
= hipzout
.handle
;
1426 *lkey
= hipzout
.lkey
;
1427 *rkey
= hipzout
.rkey
;
1432 ehca_err(&shca
->ib_device
, "ret=%x shca=%p e_origmr=%p "
1433 "e_newmr=%p iova_start=%p acl=%x e_pd=%p",
1434 ret
, shca
, e_origmr
, e_newmr
, iova_start
, acl
, e_pd
);
1436 } /* end ehca_reg_smr() */
1438 /*----------------------------------------------------------------------*/
1440 /* register internal max-MR to internal SHCA */
1441 int ehca_reg_internal_maxmr(
1442 struct ehca_shca
*shca
,
1443 struct ehca_pd
*e_pd
,
1444 struct ehca_mr
**e_maxmr
) /*OUT*/
1447 struct ehca_mr
*e_mr
;
1450 struct ehca_mr_pginfo pginfo
;
1451 struct ib_phys_buf ib_pbuf
;
1455 e_mr
= ehca_mr_new();
1457 ehca_err(&shca
->ib_device
, "out of memory");
1459 goto ehca_reg_internal_maxmr_exit0
;
1461 e_mr
->flags
|= EHCA_MR_FLAG_MAXMR
;
1463 /* register internal max-MR on HCA */
1464 size_maxmr
= (u64
)high_memory
- PAGE_OFFSET
;
1465 iova_start
= (u64
*)KERNELBASE
;
1467 ib_pbuf
.size
= size_maxmr
;
1468 num_kpages
= NUM_CHUNKS(((u64
)iova_start
% PAGE_SIZE
) + size_maxmr
,
1470 num_hwpages
= NUM_CHUNKS(((u64
)iova_start
% EHCA_PAGESIZE
) + size_maxmr
,
1473 memset(&pginfo
, 0, sizeof(pginfo
));
1474 pginfo
.type
= EHCA_MR_PGI_PHYS
;
1475 pginfo
.num_kpages
= num_kpages
;
1476 pginfo
.num_hwpages
= num_hwpages
;
1477 pginfo
.u
.phy
.num_phys_buf
= 1;
1478 pginfo
.u
.phy
.phys_buf_array
= &ib_pbuf
;
1480 ret
= ehca_reg_mr(shca
, e_mr
, iova_start
, size_maxmr
, 0, e_pd
,
1481 &pginfo
, &e_mr
->ib
.ib_mr
.lkey
,
1482 &e_mr
->ib
.ib_mr
.rkey
);
1484 ehca_err(&shca
->ib_device
, "reg of internal max MR failed, "
1485 "e_mr=%p iova_start=%p size_maxmr=%lx num_kpages=%x "
1486 "num_hwpages=%x", e_mr
, iova_start
, size_maxmr
,
1487 num_kpages
, num_hwpages
);
1488 goto ehca_reg_internal_maxmr_exit1
;
1491 /* successful registration of all pages */
1492 e_mr
->ib
.ib_mr
.device
= e_pd
->ib_pd
.device
;
1493 e_mr
->ib
.ib_mr
.pd
= &e_pd
->ib_pd
;
1494 e_mr
->ib
.ib_mr
.uobject
= NULL
;
1495 atomic_inc(&(e_pd
->ib_pd
.usecnt
));
1496 atomic_set(&(e_mr
->ib
.ib_mr
.usecnt
), 0);
1500 ehca_reg_internal_maxmr_exit1
:
1501 ehca_mr_delete(e_mr
);
1502 ehca_reg_internal_maxmr_exit0
:
1504 ehca_err(&shca
->ib_device
, "ret=%x shca=%p e_pd=%p e_maxmr=%p",
1505 ret
, shca
, e_pd
, e_maxmr
);
1507 } /* end ehca_reg_internal_maxmr() */
1509 /*----------------------------------------------------------------------*/
1511 int ehca_reg_maxmr(struct ehca_shca
*shca
,
1512 struct ehca_mr
*e_newmr
,
1515 struct ehca_pd
*e_pd
,
1520 struct ehca_mr
*e_origmr
= shca
->maxmr
;
1522 struct ehca_mr_hipzout_parms hipzout
= {{0},0,0,0,0,0};
1524 ehca_mrmw_map_acl(acl
, &hipz_acl
);
1525 ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl
);
1527 h_ret
= hipz_h_register_smr(shca
->ipz_hca_handle
, e_newmr
, e_origmr
,
1528 (u64
)iova_start
, hipz_acl
, e_pd
->fw_pd
,
1530 if (h_ret
!= H_SUCCESS
) {
1531 ehca_err(&shca
->ib_device
, "hipz_reg_smr failed, h_ret=%lx "
1532 "e_origmr=%p hca_hndl=%lx mr_hndl=%lx lkey=%x",
1533 h_ret
, e_origmr
, shca
->ipz_hca_handle
.handle
,
1534 e_origmr
->ipz_mr_handle
.handle
,
1535 e_origmr
->ib
.ib_mr
.lkey
);
1536 return ehca2ib_return_code(h_ret
);
1538 /* successful registration */
1539 e_newmr
->num_kpages
= e_origmr
->num_kpages
;
1540 e_newmr
->num_hwpages
= e_origmr
->num_hwpages
;
1541 e_newmr
->start
= iova_start
;
1542 e_newmr
->size
= e_origmr
->size
;
1544 e_newmr
->ipz_mr_handle
= hipzout
.handle
;
1545 *lkey
= hipzout
.lkey
;
1546 *rkey
= hipzout
.rkey
;
1548 } /* end ehca_reg_maxmr() */
1550 /*----------------------------------------------------------------------*/
1552 int ehca_dereg_internal_maxmr(struct ehca_shca
*shca
)
1555 struct ehca_mr
*e_maxmr
;
1556 struct ib_pd
*ib_pd
;
1559 ehca_err(&shca
->ib_device
, "bad call, shca=%p", shca
);
1561 goto ehca_dereg_internal_maxmr_exit0
;
1564 e_maxmr
= shca
->maxmr
;
1565 ib_pd
= e_maxmr
->ib
.ib_mr
.pd
;
1566 shca
->maxmr
= NULL
; /* remove internal max-MR indication from SHCA */
1568 ret
= ehca_dereg_mr(&e_maxmr
->ib
.ib_mr
);
1570 ehca_err(&shca
->ib_device
, "dereg internal max-MR failed, "
1571 "ret=%x e_maxmr=%p shca=%p lkey=%x",
1572 ret
, e_maxmr
, shca
, e_maxmr
->ib
.ib_mr
.lkey
);
1573 shca
->maxmr
= e_maxmr
;
1574 goto ehca_dereg_internal_maxmr_exit0
;
1577 atomic_dec(&ib_pd
->usecnt
);
1579 ehca_dereg_internal_maxmr_exit0
:
1581 ehca_err(&shca
->ib_device
, "ret=%x shca=%p shca->maxmr=%p",
1582 ret
, shca
, shca
->maxmr
);
1584 } /* end ehca_dereg_internal_maxmr() */
1586 /*----------------------------------------------------------------------*/
1589 * check physical buffer array of MR verbs for validness and
1590 * calculates MR size
1592 int ehca_mr_chk_buf_and_calc_size(struct ib_phys_buf
*phys_buf_array
,
1597 struct ib_phys_buf
*pbuf
= phys_buf_array
;
1601 if (num_phys_buf
== 0) {
1602 ehca_gen_err("bad phys buf array len, num_phys_buf=0");
1605 /* check first buffer */
1606 if (((u64
)iova_start
& ~PAGE_MASK
) != (pbuf
->addr
& ~PAGE_MASK
)) {
1607 ehca_gen_err("iova_start/addr mismatch, iova_start=%p "
1608 "pbuf->addr=%lx pbuf->size=%lx",
1609 iova_start
, pbuf
->addr
, pbuf
->size
);
1612 if (((pbuf
->addr
+ pbuf
->size
) % PAGE_SIZE
) &&
1613 (num_phys_buf
> 1)) {
1614 ehca_gen_err("addr/size mismatch in 1st buf, pbuf->addr=%lx "
1615 "pbuf->size=%lx", pbuf
->addr
, pbuf
->size
);
1619 for (i
= 0; i
< num_phys_buf
; i
++) {
1620 if ((i
> 0) && (pbuf
->addr
% PAGE_SIZE
)) {
1621 ehca_gen_err("bad address, i=%x pbuf->addr=%lx "
1623 i
, pbuf
->addr
, pbuf
->size
);
1626 if (((i
> 0) && /* not 1st */
1627 (i
< (num_phys_buf
- 1)) && /* not last */
1628 (pbuf
->size
% PAGE_SIZE
)) || (pbuf
->size
== 0)) {
1629 ehca_gen_err("bad size, i=%x pbuf->size=%lx",
1633 size_count
+= pbuf
->size
;
1639 } /* end ehca_mr_chk_buf_and_calc_size() */
1641 /*----------------------------------------------------------------------*/
1643 /* check page list of map FMR verb for validness */
1644 int ehca_fmr_check_page_list(struct ehca_mr
*e_fmr
,
1651 if ((list_len
== 0) || (list_len
> e_fmr
->fmr_max_pages
)) {
1652 ehca_gen_err("bad list_len, list_len=%x "
1653 "e_fmr->fmr_max_pages=%x fmr=%p",
1654 list_len
, e_fmr
->fmr_max_pages
, e_fmr
);
1658 /* each page must be aligned */
1660 for (i
= 0; i
< list_len
; i
++) {
1661 if (*page
% e_fmr
->fmr_page_size
) {
1662 ehca_gen_err("bad page, i=%x *page=%lx page=%p fmr=%p "
1663 "fmr_page_size=%x", i
, *page
, page
, e_fmr
,
1664 e_fmr
->fmr_page_size
);
1671 } /* end ehca_fmr_check_page_list() */
1673 /*----------------------------------------------------------------------*/
1675 /* PAGE_SIZE >= pginfo->hwpage_size */
1676 static int ehca_set_pagebuf_user1(struct ehca_mr_pginfo
*pginfo
,
1681 struct ib_umem_chunk
*prev_chunk
;
1682 struct ib_umem_chunk
*chunk
;
1687 /* loop over desired chunk entries */
1688 chunk
= pginfo
->u
.usr
.next_chunk
;
1689 prev_chunk
= pginfo
->u
.usr
.next_chunk
;
1690 list_for_each_entry_continue(
1691 chunk
, (&(pginfo
->u
.usr
.region
->chunk_list
)), list
) {
1692 for (i
= pginfo
->u
.usr
.next_nmap
; i
< chunk
->nmap
; ) {
1693 pgaddr
= page_to_pfn(chunk
->page_list
[i
].page
)
1695 *kpage
= phys_to_abs(pgaddr
+
1696 (pginfo
->next_hwpage
*
1699 ehca_gen_err("pgaddr=%lx "
1700 "chunk->page_list[i]=%lx "
1701 "i=%x next_hwpage=%lx",
1702 pgaddr
, (u64
)sg_dma_address(
1703 &chunk
->page_list
[i
]),
1704 i
, pginfo
->next_hwpage
);
1707 (pginfo
->hwpage_cnt
)++;
1708 (pginfo
->next_hwpage
)++;
1710 if (pginfo
->next_hwpage
%
1711 (PAGE_SIZE
/ EHCA_PAGESIZE
) == 0) {
1712 (pginfo
->kpage_cnt
)++;
1713 (pginfo
->u
.usr
.next_nmap
)++;
1714 pginfo
->next_hwpage
= 0;
1718 if (j
>= number
) break;
1720 if ((pginfo
->u
.usr
.next_nmap
>= chunk
->nmap
) &&
1722 pginfo
->u
.usr
.next_nmap
= 0;
1725 } else if (pginfo
->u
.usr
.next_nmap
>= chunk
->nmap
) {
1726 pginfo
->u
.usr
.next_nmap
= 0;
1728 } else if (j
>= number
)
1733 pginfo
->u
.usr
.next_chunk
=
1734 list_prepare_entry(prev_chunk
,
1735 (&(pginfo
->u
.usr
.region
->chunk_list
)),
1740 int ehca_set_pagebuf_phys(struct ehca_mr_pginfo
*pginfo
,
1745 struct ib_phys_buf
*pbuf
;
1746 u64 num_hw
, offs_hw
;
1749 /* loop over desired phys_buf_array entries */
1750 while (i
< number
) {
1751 pbuf
= pginfo
->u
.phy
.phys_buf_array
+ pginfo
->u
.phy
.next_buf
;
1752 num_hw
= NUM_CHUNKS((pbuf
->addr
% EHCA_PAGESIZE
) +
1753 pbuf
->size
, EHCA_PAGESIZE
);
1754 offs_hw
= (pbuf
->addr
& ~PAGE_MASK
) / EHCA_PAGESIZE
;
1755 while (pginfo
->next_hwpage
< offs_hw
+ num_hw
) {
1757 if ((pginfo
->kpage_cnt
>= pginfo
->num_kpages
) ||
1758 (pginfo
->hwpage_cnt
>= pginfo
->num_hwpages
)) {
1759 ehca_gen_err("kpage_cnt >= num_kpages, "
1760 "kpage_cnt=%lx num_kpages=%lx "
1762 "num_hwpages=%lx i=%x",
1766 pginfo
->num_hwpages
, i
);
1769 *kpage
= phys_to_abs(
1770 (pbuf
->addr
& EHCA_PAGEMASK
)
1771 + (pginfo
->next_hwpage
* EHCA_PAGESIZE
));
1772 if ( !(*kpage
) && pbuf
->addr
) {
1773 ehca_gen_err("pbuf->addr=%lx "
1775 "next_hwpage=%lx", pbuf
->addr
,
1777 pginfo
->next_hwpage
);
1780 (pginfo
->hwpage_cnt
)++;
1781 (pginfo
->next_hwpage
)++;
1782 if (pginfo
->next_hwpage
%
1783 (PAGE_SIZE
/ EHCA_PAGESIZE
) == 0)
1784 (pginfo
->kpage_cnt
)++;
1787 if (i
>= number
) break;
1789 if (pginfo
->next_hwpage
>= offs_hw
+ num_hw
) {
1790 (pginfo
->u
.phy
.next_buf
)++;
1791 pginfo
->next_hwpage
= 0;
1797 int ehca_set_pagebuf_fmr(struct ehca_mr_pginfo
*pginfo
,
1805 /* loop over desired page_list entries */
1806 fmrlist
= pginfo
->u
.fmr
.page_list
+ pginfo
->u
.fmr
.next_listelem
;
1807 for (i
= 0; i
< number
; i
++) {
1808 *kpage
= phys_to_abs((*fmrlist
& EHCA_PAGEMASK
) +
1809 pginfo
->next_hwpage
* EHCA_PAGESIZE
);
1811 ehca_gen_err("*fmrlist=%lx fmrlist=%p "
1812 "next_listelem=%lx next_hwpage=%lx",
1814 pginfo
->u
.fmr
.next_listelem
,
1815 pginfo
->next_hwpage
);
1818 (pginfo
->hwpage_cnt
)++;
1819 (pginfo
->next_hwpage
)++;
1821 if (pginfo
->next_hwpage
%
1822 (pginfo
->u
.fmr
.fmr_pgsize
/ EHCA_PAGESIZE
) == 0) {
1823 (pginfo
->kpage_cnt
)++;
1824 (pginfo
->u
.fmr
.next_listelem
)++;
1826 pginfo
->next_hwpage
= 0;
1832 /* setup page buffer from page info */
1833 int ehca_set_pagebuf(struct ehca_mr_pginfo
*pginfo
,
1839 switch (pginfo
->type
) {
1840 case EHCA_MR_PGI_PHYS
:
1841 ret
= ehca_set_pagebuf_phys(pginfo
, number
, kpage
);
1843 case EHCA_MR_PGI_USER
:
1844 ret
= ehca_set_pagebuf_user1(pginfo
, number
, kpage
);
1846 case EHCA_MR_PGI_FMR
:
1847 ret
= ehca_set_pagebuf_fmr(pginfo
, number
, kpage
);
1850 ehca_gen_err("bad pginfo->type=%x", pginfo
->type
);
1855 } /* end ehca_set_pagebuf() */
1857 /*----------------------------------------------------------------------*/
1860 * check MR if it is a max-MR, i.e. uses whole memory
1861 * in case it's a max-MR 1 is returned, else 0
1863 int ehca_mr_is_maxmr(u64 size
,
1866 /* a MR is treated as max-MR only if it fits following: */
1867 if ((size
== ((u64
)high_memory
- PAGE_OFFSET
)) &&
1868 (iova_start
== (void*)KERNELBASE
)) {
1869 ehca_gen_dbg("this is a max-MR");
1873 } /* end ehca_mr_is_maxmr() */
1875 /*----------------------------------------------------------------------*/
1877 /* map access control for MR/MW. This routine is used for MR and MW. */
1878 void ehca_mrmw_map_acl(int ib_acl
,
1882 if (ib_acl
& IB_ACCESS_REMOTE_READ
)
1883 *hipz_acl
|= HIPZ_ACCESSCTRL_R_READ
;
1884 if (ib_acl
& IB_ACCESS_REMOTE_WRITE
)
1885 *hipz_acl
|= HIPZ_ACCESSCTRL_R_WRITE
;
1886 if (ib_acl
& IB_ACCESS_REMOTE_ATOMIC
)
1887 *hipz_acl
|= HIPZ_ACCESSCTRL_R_ATOMIC
;
1888 if (ib_acl
& IB_ACCESS_LOCAL_WRITE
)
1889 *hipz_acl
|= HIPZ_ACCESSCTRL_L_WRITE
;
1890 if (ib_acl
& IB_ACCESS_MW_BIND
)
1891 *hipz_acl
|= HIPZ_ACCESSCTRL_MW_BIND
;
1892 } /* end ehca_mrmw_map_acl() */
1894 /*----------------------------------------------------------------------*/
1896 /* sets page size in hipz access control for MR/MW. */
1897 void ehca_mrmw_set_pgsize_hipz_acl(u32
*hipz_acl
) /*INOUT*/
1899 return; /* HCA supports only 4k */
1900 } /* end ehca_mrmw_set_pgsize_hipz_acl() */
1902 /*----------------------------------------------------------------------*/
1905 * reverse map access control for MR/MW.
1906 * This routine is used for MR and MW.
1908 void ehca_mrmw_reverse_map_acl(const u32
*hipz_acl
,
1909 int *ib_acl
) /*OUT*/
1912 if (*hipz_acl
& HIPZ_ACCESSCTRL_R_READ
)
1913 *ib_acl
|= IB_ACCESS_REMOTE_READ
;
1914 if (*hipz_acl
& HIPZ_ACCESSCTRL_R_WRITE
)
1915 *ib_acl
|= IB_ACCESS_REMOTE_WRITE
;
1916 if (*hipz_acl
& HIPZ_ACCESSCTRL_R_ATOMIC
)
1917 *ib_acl
|= IB_ACCESS_REMOTE_ATOMIC
;
1918 if (*hipz_acl
& HIPZ_ACCESSCTRL_L_WRITE
)
1919 *ib_acl
|= IB_ACCESS_LOCAL_WRITE
;
1920 if (*hipz_acl
& HIPZ_ACCESSCTRL_MW_BIND
)
1921 *ib_acl
|= IB_ACCESS_MW_BIND
;
1922 } /* end ehca_mrmw_reverse_map_acl() */
1925 /*----------------------------------------------------------------------*/
1928 * MR destructor and constructor
1929 * used in Reregister MR verb, sets all fields in ehca_mr_t to 0,
1930 * except struct ib_mr and spinlock
1932 void ehca_mr_deletenew(struct ehca_mr
*mr
)
1936 mr
->num_hwpages
= 0;
1939 mr
->fmr_page_size
= 0;
1940 mr
->fmr_max_pages
= 0;
1941 mr
->fmr_max_maps
= 0;
1942 mr
->fmr_map_cnt
= 0;
1943 memset(&mr
->ipz_mr_handle
, 0, sizeof(mr
->ipz_mr_handle
));
1944 memset(&mr
->galpas
, 0, sizeof(mr
->galpas
));
1945 } /* end ehca_mr_deletenew() */
1947 int ehca_init_mrmw_cache(void)
1949 mr_cache
= kmem_cache_create("ehca_cache_mr",
1950 sizeof(struct ehca_mr
), 0,
1955 mw_cache
= kmem_cache_create("ehca_cache_mw",
1956 sizeof(struct ehca_mw
), 0,
1960 kmem_cache_destroy(mr_cache
);
1967 void ehca_cleanup_mrmw_cache(void)
1970 kmem_cache_destroy(mr_cache
);
1972 kmem_cache_destroy(mw_cache
);