IB/ehca: Support large page MRs
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / infiniband / hw / ehca / ehca_mrmw.c
1 /*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * MR/MW functions
5 *
6 * Authors: Dietmar Decker <ddecker@de.ibm.com>
7 * Christoph Raisch <raisch@de.ibm.com>
8 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
9 *
10 * Copyright (c) 2005 IBM Corporation
11 *
12 * All rights reserved.
13 *
14 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
15 * BSD.
16 *
17 * OpenIB BSD License
18 *
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions are met:
21 *
22 * Redistributions of source code must retain the above copyright notice, this
23 * list of conditions and the following disclaimer.
24 *
25 * Redistributions in binary form must reproduce the above copyright notice,
26 * this list of conditions and the following disclaimer in the documentation
27 * and/or other materials
28 * provided with the distribution.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
31 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
34 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
35 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
36 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
37 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
38 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
40 * POSSIBILITY OF SUCH DAMAGE.
41 */
42
43 #include <rdma/ib_umem.h>
44
45 #include <asm/current.h>
46
47 #include "ehca_iverbs.h"
48 #include "ehca_mrmw.h"
49 #include "hcp_if.h"
50 #include "hipz_hw.h"
51
52 #define NUM_CHUNKS(length, chunk_size) \
53 (((length) + (chunk_size - 1)) / (chunk_size))
54 /* max number of rpages (per hcall register_rpages) */
55 #define MAX_RPAGES 512
56
57 static struct kmem_cache *mr_cache;
58 static struct kmem_cache *mw_cache;
59
60 enum ehca_mr_pgsize {
61 EHCA_MR_PGSIZE4K = 0x1000L,
62 EHCA_MR_PGSIZE64K = 0x10000L,
63 EHCA_MR_PGSIZE1M = 0x100000L,
64 EHCA_MR_PGSIZE16M = 0x1000000L
65 };
66
67 extern int ehca_mr_largepage;
68
69 static u32 ehca_encode_hwpage_size(u32 pgsize)
70 {
71 u32 idx = 0;
72 pgsize >>= 12;
73 /*
74 * map mr page size into hw code:
75 * 0, 1, 2, 3 for 4K, 64K, 1M, 64M
76 */
77 while (!(pgsize & 1)) {
78 idx++;
79 pgsize >>= 4;
80 }
81 return idx;
82 }
83
84 static u64 ehca_get_max_hwpage_size(struct ehca_shca *shca)
85 {
86 if (shca->hca_cap_mr_pgsize & HCA_CAP_MR_PGSIZE_16M)
87 return EHCA_MR_PGSIZE16M;
88 return EHCA_MR_PGSIZE4K;
89 }
90
91 static struct ehca_mr *ehca_mr_new(void)
92 {
93 struct ehca_mr *me;
94
95 me = kmem_cache_zalloc(mr_cache, GFP_KERNEL);
96 if (me)
97 spin_lock_init(&me->mrlock);
98 else
99 ehca_gen_err("alloc failed");
100
101 return me;
102 }
103
104 static void ehca_mr_delete(struct ehca_mr *me)
105 {
106 kmem_cache_free(mr_cache, me);
107 }
108
109 static struct ehca_mw *ehca_mw_new(void)
110 {
111 struct ehca_mw *me;
112
113 me = kmem_cache_zalloc(mw_cache, GFP_KERNEL);
114 if (me)
115 spin_lock_init(&me->mwlock);
116 else
117 ehca_gen_err("alloc failed");
118
119 return me;
120 }
121
122 static void ehca_mw_delete(struct ehca_mw *me)
123 {
124 kmem_cache_free(mw_cache, me);
125 }
126
127 /*----------------------------------------------------------------------*/
128
129 struct ib_mr *ehca_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
130 {
131 struct ib_mr *ib_mr;
132 int ret;
133 struct ehca_mr *e_maxmr;
134 struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
135 struct ehca_shca *shca =
136 container_of(pd->device, struct ehca_shca, ib_device);
137
138 if (shca->maxmr) {
139 e_maxmr = ehca_mr_new();
140 if (!e_maxmr) {
141 ehca_err(&shca->ib_device, "out of memory");
142 ib_mr = ERR_PTR(-ENOMEM);
143 goto get_dma_mr_exit0;
144 }
145
146 ret = ehca_reg_maxmr(shca, e_maxmr, (u64 *)KERNELBASE,
147 mr_access_flags, e_pd,
148 &e_maxmr->ib.ib_mr.lkey,
149 &e_maxmr->ib.ib_mr.rkey);
150 if (ret) {
151 ehca_mr_delete(e_maxmr);
152 ib_mr = ERR_PTR(ret);
153 goto get_dma_mr_exit0;
154 }
155 ib_mr = &e_maxmr->ib.ib_mr;
156 } else {
157 ehca_err(&shca->ib_device, "no internal max-MR exist!");
158 ib_mr = ERR_PTR(-EINVAL);
159 goto get_dma_mr_exit0;
160 }
161
162 get_dma_mr_exit0:
163 if (IS_ERR(ib_mr))
164 ehca_err(&shca->ib_device, "rc=%lx pd=%p mr_access_flags=%x ",
165 PTR_ERR(ib_mr), pd, mr_access_flags);
166 return ib_mr;
167 } /* end ehca_get_dma_mr() */
168
169 /*----------------------------------------------------------------------*/
170
171 struct ib_mr *ehca_reg_phys_mr(struct ib_pd *pd,
172 struct ib_phys_buf *phys_buf_array,
173 int num_phys_buf,
174 int mr_access_flags,
175 u64 *iova_start)
176 {
177 struct ib_mr *ib_mr;
178 int ret;
179 struct ehca_mr *e_mr;
180 struct ehca_shca *shca =
181 container_of(pd->device, struct ehca_shca, ib_device);
182 struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
183
184 u64 size;
185
186 if ((num_phys_buf <= 0) || !phys_buf_array) {
187 ehca_err(pd->device, "bad input values: num_phys_buf=%x "
188 "phys_buf_array=%p", num_phys_buf, phys_buf_array);
189 ib_mr = ERR_PTR(-EINVAL);
190 goto reg_phys_mr_exit0;
191 }
192 if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
193 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
194 ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
195 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
196 /*
197 * Remote Write Access requires Local Write Access
198 * Remote Atomic Access requires Local Write Access
199 */
200 ehca_err(pd->device, "bad input values: mr_access_flags=%x",
201 mr_access_flags);
202 ib_mr = ERR_PTR(-EINVAL);
203 goto reg_phys_mr_exit0;
204 }
205
206 /* check physical buffer list and calculate size */
207 ret = ehca_mr_chk_buf_and_calc_size(phys_buf_array, num_phys_buf,
208 iova_start, &size);
209 if (ret) {
210 ib_mr = ERR_PTR(ret);
211 goto reg_phys_mr_exit0;
212 }
213 if ((size == 0) ||
214 (((u64)iova_start + size) < (u64)iova_start)) {
215 ehca_err(pd->device, "bad input values: size=%lx iova_start=%p",
216 size, iova_start);
217 ib_mr = ERR_PTR(-EINVAL);
218 goto reg_phys_mr_exit0;
219 }
220
221 e_mr = ehca_mr_new();
222 if (!e_mr) {
223 ehca_err(pd->device, "out of memory");
224 ib_mr = ERR_PTR(-ENOMEM);
225 goto reg_phys_mr_exit0;
226 }
227
228 /* register MR on HCA */
229 if (ehca_mr_is_maxmr(size, iova_start)) {
230 e_mr->flags |= EHCA_MR_FLAG_MAXMR;
231 ret = ehca_reg_maxmr(shca, e_mr, iova_start, mr_access_flags,
232 e_pd, &e_mr->ib.ib_mr.lkey,
233 &e_mr->ib.ib_mr.rkey);
234 if (ret) {
235 ib_mr = ERR_PTR(ret);
236 goto reg_phys_mr_exit1;
237 }
238 } else {
239 struct ehca_mr_pginfo pginfo;
240 u32 num_kpages;
241 u32 num_hwpages;
242 u64 hw_pgsize;
243
244 num_kpages = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size,
245 PAGE_SIZE);
246 /* for kernel space we try most possible pgsize */
247 hw_pgsize = ehca_get_max_hwpage_size(shca);
248 num_hwpages = NUM_CHUNKS(((u64)iova_start % hw_pgsize) + size,
249 hw_pgsize);
250 memset(&pginfo, 0, sizeof(pginfo));
251 pginfo.type = EHCA_MR_PGI_PHYS;
252 pginfo.num_kpages = num_kpages;
253 pginfo.hwpage_size = hw_pgsize;
254 pginfo.num_hwpages = num_hwpages;
255 pginfo.u.phy.num_phys_buf = num_phys_buf;
256 pginfo.u.phy.phys_buf_array = phys_buf_array;
257 pginfo.next_hwpage =
258 ((u64)iova_start & ~(hw_pgsize - 1)) / hw_pgsize;
259
260 ret = ehca_reg_mr(shca, e_mr, iova_start, size, mr_access_flags,
261 e_pd, &pginfo, &e_mr->ib.ib_mr.lkey,
262 &e_mr->ib.ib_mr.rkey);
263 if (ret) {
264 ib_mr = ERR_PTR(ret);
265 goto reg_phys_mr_exit1;
266 }
267 }
268
269 /* successful registration of all pages */
270 return &e_mr->ib.ib_mr;
271
272 reg_phys_mr_exit1:
273 ehca_mr_delete(e_mr);
274 reg_phys_mr_exit0:
275 if (IS_ERR(ib_mr))
276 ehca_err(pd->device, "rc=%lx pd=%p phys_buf_array=%p "
277 "num_phys_buf=%x mr_access_flags=%x iova_start=%p",
278 PTR_ERR(ib_mr), pd, phys_buf_array,
279 num_phys_buf, mr_access_flags, iova_start);
280 return ib_mr;
281 } /* end ehca_reg_phys_mr() */
282
283 /*----------------------------------------------------------------------*/
284
285 struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
286 u64 virt, int mr_access_flags,
287 struct ib_udata *udata)
288 {
289 struct ib_mr *ib_mr;
290 struct ehca_mr *e_mr;
291 struct ehca_shca *shca =
292 container_of(pd->device, struct ehca_shca, ib_device);
293 struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
294 struct ehca_mr_pginfo pginfo;
295 int ret;
296 u32 num_kpages;
297 u32 num_hwpages;
298 u64 hwpage_size;
299
300 if (!pd) {
301 ehca_gen_err("bad pd=%p", pd);
302 return ERR_PTR(-EFAULT);
303 }
304
305 if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
306 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
307 ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
308 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
309 /*
310 * Remote Write Access requires Local Write Access
311 * Remote Atomic Access requires Local Write Access
312 */
313 ehca_err(pd->device, "bad input values: mr_access_flags=%x",
314 mr_access_flags);
315 ib_mr = ERR_PTR(-EINVAL);
316 goto reg_user_mr_exit0;
317 }
318
319 if (length == 0 || virt + length < virt) {
320 ehca_err(pd->device, "bad input values: length=%lx "
321 "virt_base=%lx", length, virt);
322 ib_mr = ERR_PTR(-EINVAL);
323 goto reg_user_mr_exit0;
324 }
325
326 e_mr = ehca_mr_new();
327 if (!e_mr) {
328 ehca_err(pd->device, "out of memory");
329 ib_mr = ERR_PTR(-ENOMEM);
330 goto reg_user_mr_exit0;
331 }
332
333 e_mr->umem = ib_umem_get(pd->uobject->context, start, length,
334 mr_access_flags);
335 if (IS_ERR(e_mr->umem)) {
336 ib_mr = (void *)e_mr->umem;
337 goto reg_user_mr_exit1;
338 }
339
340 if (e_mr->umem->page_size != PAGE_SIZE) {
341 ehca_err(pd->device, "page size not supported, "
342 "e_mr->umem->page_size=%x", e_mr->umem->page_size);
343 ib_mr = ERR_PTR(-EINVAL);
344 goto reg_user_mr_exit2;
345 }
346
347 /* determine number of MR pages */
348 num_kpages = NUM_CHUNKS((virt % PAGE_SIZE) + length, PAGE_SIZE);
349 /* select proper hw_pgsize */
350 if (ehca_mr_largepage &&
351 (shca->hca_cap_mr_pgsize & HCA_CAP_MR_PGSIZE_16M)) {
352 if (length <= EHCA_MR_PGSIZE4K
353 && PAGE_SIZE == EHCA_MR_PGSIZE4K)
354 hwpage_size = EHCA_MR_PGSIZE4K;
355 else if (length <= EHCA_MR_PGSIZE64K)
356 hwpage_size = EHCA_MR_PGSIZE64K;
357 else if (length <= EHCA_MR_PGSIZE1M)
358 hwpage_size = EHCA_MR_PGSIZE1M;
359 else
360 hwpage_size = EHCA_MR_PGSIZE16M;
361 } else
362 hwpage_size = EHCA_MR_PGSIZE4K;
363 ehca_dbg(pd->device, "hwpage_size=%lx", hwpage_size);
364
365 reg_user_mr_fallback:
366 num_hwpages = NUM_CHUNKS((virt % hwpage_size) + length, hwpage_size);
367 /* register MR on HCA */
368 memset(&pginfo, 0, sizeof(pginfo));
369 pginfo.type = EHCA_MR_PGI_USER;
370 pginfo.hwpage_size = hwpage_size;
371 pginfo.num_kpages = num_kpages;
372 pginfo.num_hwpages = num_hwpages;
373 pginfo.u.usr.region = e_mr->umem;
374 pginfo.next_hwpage = e_mr->umem->offset / hwpage_size;
375 pginfo.u.usr.next_chunk = list_prepare_entry(pginfo.u.usr.next_chunk,
376 (&e_mr->umem->chunk_list),
377 list);
378
379 ret = ehca_reg_mr(shca, e_mr, (u64 *)virt, length, mr_access_flags,
380 e_pd, &pginfo, &e_mr->ib.ib_mr.lkey,
381 &e_mr->ib.ib_mr.rkey);
382 if (ret == -EINVAL && pginfo.hwpage_size > PAGE_SIZE) {
383 ehca_warn(pd->device, "failed to register mr "
384 "with hwpage_size=%lx", hwpage_size);
385 ehca_info(pd->device, "try to register mr with "
386 "kpage_size=%lx", PAGE_SIZE);
387 /*
388 * this means kpages are not contiguous for a hw page
389 * try kernel page size as fallback solution
390 */
391 hwpage_size = PAGE_SIZE;
392 goto reg_user_mr_fallback;
393 }
394 if (ret) {
395 ib_mr = ERR_PTR(ret);
396 goto reg_user_mr_exit2;
397 }
398
399 /* successful registration of all pages */
400 return &e_mr->ib.ib_mr;
401
402 reg_user_mr_exit2:
403 ib_umem_release(e_mr->umem);
404 reg_user_mr_exit1:
405 ehca_mr_delete(e_mr);
406 reg_user_mr_exit0:
407 if (IS_ERR(ib_mr))
408 ehca_err(pd->device, "rc=%lx pd=%p mr_access_flags=%x"
409 " udata=%p",
410 PTR_ERR(ib_mr), pd, mr_access_flags, udata);
411 return ib_mr;
412 } /* end ehca_reg_user_mr() */
413
414 /*----------------------------------------------------------------------*/
415
416 int ehca_rereg_phys_mr(struct ib_mr *mr,
417 int mr_rereg_mask,
418 struct ib_pd *pd,
419 struct ib_phys_buf *phys_buf_array,
420 int num_phys_buf,
421 int mr_access_flags,
422 u64 *iova_start)
423 {
424 int ret;
425
426 struct ehca_shca *shca =
427 container_of(mr->device, struct ehca_shca, ib_device);
428 struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
429 struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
430 u64 new_size;
431 u64 *new_start;
432 u32 new_acl;
433 struct ehca_pd *new_pd;
434 u32 tmp_lkey, tmp_rkey;
435 unsigned long sl_flags;
436 u32 num_kpages = 0;
437 u32 num_hwpages = 0;
438 struct ehca_mr_pginfo pginfo;
439 u32 cur_pid = current->tgid;
440
441 if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
442 (my_pd->ownpid != cur_pid)) {
443 ehca_err(mr->device, "Invalid caller pid=%x ownpid=%x",
444 cur_pid, my_pd->ownpid);
445 ret = -EINVAL;
446 goto rereg_phys_mr_exit0;
447 }
448
449 if (!(mr_rereg_mask & IB_MR_REREG_TRANS)) {
450 /* TODO not supported, because PHYP rereg hCall needs pages */
451 ehca_err(mr->device, "rereg without IB_MR_REREG_TRANS not "
452 "supported yet, mr_rereg_mask=%x", mr_rereg_mask);
453 ret = -EINVAL;
454 goto rereg_phys_mr_exit0;
455 }
456
457 if (mr_rereg_mask & IB_MR_REREG_PD) {
458 if (!pd) {
459 ehca_err(mr->device, "rereg with bad pd, pd=%p "
460 "mr_rereg_mask=%x", pd, mr_rereg_mask);
461 ret = -EINVAL;
462 goto rereg_phys_mr_exit0;
463 }
464 }
465
466 if ((mr_rereg_mask &
467 ~(IB_MR_REREG_TRANS | IB_MR_REREG_PD | IB_MR_REREG_ACCESS)) ||
468 (mr_rereg_mask == 0)) {
469 ret = -EINVAL;
470 goto rereg_phys_mr_exit0;
471 }
472
473 /* check other parameters */
474 if (e_mr == shca->maxmr) {
475 /* should be impossible, however reject to be sure */
476 ehca_err(mr->device, "rereg internal max-MR impossible, mr=%p "
477 "shca->maxmr=%p mr->lkey=%x",
478 mr, shca->maxmr, mr->lkey);
479 ret = -EINVAL;
480 goto rereg_phys_mr_exit0;
481 }
482 if (mr_rereg_mask & IB_MR_REREG_TRANS) { /* transl., i.e. addr/size */
483 if (e_mr->flags & EHCA_MR_FLAG_FMR) {
484 ehca_err(mr->device, "not supported for FMR, mr=%p "
485 "flags=%x", mr, e_mr->flags);
486 ret = -EINVAL;
487 goto rereg_phys_mr_exit0;
488 }
489 if (!phys_buf_array || num_phys_buf <= 0) {
490 ehca_err(mr->device, "bad input values mr_rereg_mask=%x"
491 " phys_buf_array=%p num_phys_buf=%x",
492 mr_rereg_mask, phys_buf_array, num_phys_buf);
493 ret = -EINVAL;
494 goto rereg_phys_mr_exit0;
495 }
496 }
497 if ((mr_rereg_mask & IB_MR_REREG_ACCESS) && /* change ACL */
498 (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
499 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
500 ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
501 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)))) {
502 /*
503 * Remote Write Access requires Local Write Access
504 * Remote Atomic Access requires Local Write Access
505 */
506 ehca_err(mr->device, "bad input values: mr_rereg_mask=%x "
507 "mr_access_flags=%x", mr_rereg_mask, mr_access_flags);
508 ret = -EINVAL;
509 goto rereg_phys_mr_exit0;
510 }
511
512 /* set requested values dependent on rereg request */
513 spin_lock_irqsave(&e_mr->mrlock, sl_flags);
514 new_start = e_mr->start;
515 new_size = e_mr->size;
516 new_acl = e_mr->acl;
517 new_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
518
519 if (mr_rereg_mask & IB_MR_REREG_TRANS) {
520 u64 hw_pgsize = ehca_get_max_hwpage_size(shca);
521
522 new_start = iova_start; /* change address */
523 /* check physical buffer list and calculate size */
524 ret = ehca_mr_chk_buf_and_calc_size(phys_buf_array,
525 num_phys_buf, iova_start,
526 &new_size);
527 if (ret)
528 goto rereg_phys_mr_exit1;
529 if ((new_size == 0) ||
530 (((u64)iova_start + new_size) < (u64)iova_start)) {
531 ehca_err(mr->device, "bad input values: new_size=%lx "
532 "iova_start=%p", new_size, iova_start);
533 ret = -EINVAL;
534 goto rereg_phys_mr_exit1;
535 }
536 num_kpages = NUM_CHUNKS(((u64)new_start % PAGE_SIZE) +
537 new_size, PAGE_SIZE);
538 num_hwpages = NUM_CHUNKS(((u64)new_start % hw_pgsize) +
539 new_size, hw_pgsize);
540 memset(&pginfo, 0, sizeof(pginfo));
541 pginfo.type = EHCA_MR_PGI_PHYS;
542 pginfo.num_kpages = num_kpages;
543 pginfo.hwpage_size = hw_pgsize;
544 pginfo.num_hwpages = num_hwpages;
545 pginfo.u.phy.num_phys_buf = num_phys_buf;
546 pginfo.u.phy.phys_buf_array = phys_buf_array;
547 pginfo.next_hwpage =
548 ((u64)iova_start & ~(hw_pgsize - 1)) / hw_pgsize;
549 }
550 if (mr_rereg_mask & IB_MR_REREG_ACCESS)
551 new_acl = mr_access_flags;
552 if (mr_rereg_mask & IB_MR_REREG_PD)
553 new_pd = container_of(pd, struct ehca_pd, ib_pd);
554
555 ret = ehca_rereg_mr(shca, e_mr, new_start, new_size, new_acl,
556 new_pd, &pginfo, &tmp_lkey, &tmp_rkey);
557 if (ret)
558 goto rereg_phys_mr_exit1;
559
560 /* successful reregistration */
561 if (mr_rereg_mask & IB_MR_REREG_PD)
562 mr->pd = pd;
563 mr->lkey = tmp_lkey;
564 mr->rkey = tmp_rkey;
565
566 rereg_phys_mr_exit1:
567 spin_unlock_irqrestore(&e_mr->mrlock, sl_flags);
568 rereg_phys_mr_exit0:
569 if (ret)
570 ehca_err(mr->device, "ret=%x mr=%p mr_rereg_mask=%x pd=%p "
571 "phys_buf_array=%p num_phys_buf=%x mr_access_flags=%x "
572 "iova_start=%p",
573 ret, mr, mr_rereg_mask, pd, phys_buf_array,
574 num_phys_buf, mr_access_flags, iova_start);
575 return ret;
576 } /* end ehca_rereg_phys_mr() */
577
578 /*----------------------------------------------------------------------*/
579
580 int ehca_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
581 {
582 int ret = 0;
583 u64 h_ret;
584 struct ehca_shca *shca =
585 container_of(mr->device, struct ehca_shca, ib_device);
586 struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
587 struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
588 u32 cur_pid = current->tgid;
589 unsigned long sl_flags;
590 struct ehca_mr_hipzout_parms hipzout;
591
592 if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
593 (my_pd->ownpid != cur_pid)) {
594 ehca_err(mr->device, "Invalid caller pid=%x ownpid=%x",
595 cur_pid, my_pd->ownpid);
596 ret = -EINVAL;
597 goto query_mr_exit0;
598 }
599
600 if ((e_mr->flags & EHCA_MR_FLAG_FMR)) {
601 ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p "
602 "e_mr->flags=%x", mr, e_mr, e_mr->flags);
603 ret = -EINVAL;
604 goto query_mr_exit0;
605 }
606
607 memset(mr_attr, 0, sizeof(struct ib_mr_attr));
608 spin_lock_irqsave(&e_mr->mrlock, sl_flags);
609
610 h_ret = hipz_h_query_mr(shca->ipz_hca_handle, e_mr, &hipzout);
611 if (h_ret != H_SUCCESS) {
612 ehca_err(mr->device, "hipz_mr_query failed, h_ret=%lx mr=%p "
613 "hca_hndl=%lx mr_hndl=%lx lkey=%x",
614 h_ret, mr, shca->ipz_hca_handle.handle,
615 e_mr->ipz_mr_handle.handle, mr->lkey);
616 ret = ehca2ib_return_code(h_ret);
617 goto query_mr_exit1;
618 }
619 mr_attr->pd = mr->pd;
620 mr_attr->device_virt_addr = hipzout.vaddr;
621 mr_attr->size = hipzout.len;
622 mr_attr->lkey = hipzout.lkey;
623 mr_attr->rkey = hipzout.rkey;
624 ehca_mrmw_reverse_map_acl(&hipzout.acl, &mr_attr->mr_access_flags);
625
626 query_mr_exit1:
627 spin_unlock_irqrestore(&e_mr->mrlock, sl_flags);
628 query_mr_exit0:
629 if (ret)
630 ehca_err(mr->device, "ret=%x mr=%p mr_attr=%p",
631 ret, mr, mr_attr);
632 return ret;
633 } /* end ehca_query_mr() */
634
635 /*----------------------------------------------------------------------*/
636
637 int ehca_dereg_mr(struct ib_mr *mr)
638 {
639 int ret = 0;
640 u64 h_ret;
641 struct ehca_shca *shca =
642 container_of(mr->device, struct ehca_shca, ib_device);
643 struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
644 struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
645 u32 cur_pid = current->tgid;
646
647 if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
648 (my_pd->ownpid != cur_pid)) {
649 ehca_err(mr->device, "Invalid caller pid=%x ownpid=%x",
650 cur_pid, my_pd->ownpid);
651 ret = -EINVAL;
652 goto dereg_mr_exit0;
653 }
654
655 if ((e_mr->flags & EHCA_MR_FLAG_FMR)) {
656 ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p "
657 "e_mr->flags=%x", mr, e_mr, e_mr->flags);
658 ret = -EINVAL;
659 goto dereg_mr_exit0;
660 } else if (e_mr == shca->maxmr) {
661 /* should be impossible, however reject to be sure */
662 ehca_err(mr->device, "dereg internal max-MR impossible, mr=%p "
663 "shca->maxmr=%p mr->lkey=%x",
664 mr, shca->maxmr, mr->lkey);
665 ret = -EINVAL;
666 goto dereg_mr_exit0;
667 }
668
669 /* TODO: BUSY: MR still has bound window(s) */
670 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
671 if (h_ret != H_SUCCESS) {
672 ehca_err(mr->device, "hipz_free_mr failed, h_ret=%lx shca=%p "
673 "e_mr=%p hca_hndl=%lx mr_hndl=%lx mr->lkey=%x",
674 h_ret, shca, e_mr, shca->ipz_hca_handle.handle,
675 e_mr->ipz_mr_handle.handle, mr->lkey);
676 ret = ehca2ib_return_code(h_ret);
677 goto dereg_mr_exit0;
678 }
679
680 if (e_mr->umem)
681 ib_umem_release(e_mr->umem);
682
683 /* successful deregistration */
684 ehca_mr_delete(e_mr);
685
686 dereg_mr_exit0:
687 if (ret)
688 ehca_err(mr->device, "ret=%x mr=%p", ret, mr);
689 return ret;
690 } /* end ehca_dereg_mr() */
691
692 /*----------------------------------------------------------------------*/
693
694 struct ib_mw *ehca_alloc_mw(struct ib_pd *pd)
695 {
696 struct ib_mw *ib_mw;
697 u64 h_ret;
698 struct ehca_mw *e_mw;
699 struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
700 struct ehca_shca *shca =
701 container_of(pd->device, struct ehca_shca, ib_device);
702 struct ehca_mw_hipzout_parms hipzout;
703
704 e_mw = ehca_mw_new();
705 if (!e_mw) {
706 ib_mw = ERR_PTR(-ENOMEM);
707 goto alloc_mw_exit0;
708 }
709
710 h_ret = hipz_h_alloc_resource_mw(shca->ipz_hca_handle, e_mw,
711 e_pd->fw_pd, &hipzout);
712 if (h_ret != H_SUCCESS) {
713 ehca_err(pd->device, "hipz_mw_allocate failed, h_ret=%lx "
714 "shca=%p hca_hndl=%lx mw=%p",
715 h_ret, shca, shca->ipz_hca_handle.handle, e_mw);
716 ib_mw = ERR_PTR(ehca2ib_return_code(h_ret));
717 goto alloc_mw_exit1;
718 }
719 /* successful MW allocation */
720 e_mw->ipz_mw_handle = hipzout.handle;
721 e_mw->ib_mw.rkey = hipzout.rkey;
722 return &e_mw->ib_mw;
723
724 alloc_mw_exit1:
725 ehca_mw_delete(e_mw);
726 alloc_mw_exit0:
727 if (IS_ERR(ib_mw))
728 ehca_err(pd->device, "rc=%lx pd=%p", PTR_ERR(ib_mw), pd);
729 return ib_mw;
730 } /* end ehca_alloc_mw() */
731
732 /*----------------------------------------------------------------------*/
733
734 int ehca_bind_mw(struct ib_qp *qp,
735 struct ib_mw *mw,
736 struct ib_mw_bind *mw_bind)
737 {
738 /* TODO: not supported up to now */
739 ehca_gen_err("bind MW currently not supported by HCAD");
740
741 return -EPERM;
742 } /* end ehca_bind_mw() */
743
744 /*----------------------------------------------------------------------*/
745
746 int ehca_dealloc_mw(struct ib_mw *mw)
747 {
748 u64 h_ret;
749 struct ehca_shca *shca =
750 container_of(mw->device, struct ehca_shca, ib_device);
751 struct ehca_mw *e_mw = container_of(mw, struct ehca_mw, ib_mw);
752
753 h_ret = hipz_h_free_resource_mw(shca->ipz_hca_handle, e_mw);
754 if (h_ret != H_SUCCESS) {
755 ehca_err(mw->device, "hipz_free_mw failed, h_ret=%lx shca=%p "
756 "mw=%p rkey=%x hca_hndl=%lx mw_hndl=%lx",
757 h_ret, shca, mw, mw->rkey, shca->ipz_hca_handle.handle,
758 e_mw->ipz_mw_handle.handle);
759 return ehca2ib_return_code(h_ret);
760 }
761 /* successful deallocation */
762 ehca_mw_delete(e_mw);
763 return 0;
764 } /* end ehca_dealloc_mw() */
765
766 /*----------------------------------------------------------------------*/
767
768 struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd,
769 int mr_access_flags,
770 struct ib_fmr_attr *fmr_attr)
771 {
772 struct ib_fmr *ib_fmr;
773 struct ehca_shca *shca =
774 container_of(pd->device, struct ehca_shca, ib_device);
775 struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
776 struct ehca_mr *e_fmr;
777 int ret;
778 u32 tmp_lkey, tmp_rkey;
779 struct ehca_mr_pginfo pginfo;
780 u64 hw_pgsize;
781
782 /* check other parameters */
783 if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
784 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
785 ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
786 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
787 /*
788 * Remote Write Access requires Local Write Access
789 * Remote Atomic Access requires Local Write Access
790 */
791 ehca_err(pd->device, "bad input values: mr_access_flags=%x",
792 mr_access_flags);
793 ib_fmr = ERR_PTR(-EINVAL);
794 goto alloc_fmr_exit0;
795 }
796 if (mr_access_flags & IB_ACCESS_MW_BIND) {
797 ehca_err(pd->device, "bad input values: mr_access_flags=%x",
798 mr_access_flags);
799 ib_fmr = ERR_PTR(-EINVAL);
800 goto alloc_fmr_exit0;
801 }
802 if ((fmr_attr->max_pages == 0) || (fmr_attr->max_maps == 0)) {
803 ehca_err(pd->device, "bad input values: fmr_attr->max_pages=%x "
804 "fmr_attr->max_maps=%x fmr_attr->page_shift=%x",
805 fmr_attr->max_pages, fmr_attr->max_maps,
806 fmr_attr->page_shift);
807 ib_fmr = ERR_PTR(-EINVAL);
808 goto alloc_fmr_exit0;
809 }
810 hw_pgsize = ehca_get_max_hwpage_size(shca);
811 if ((1 << fmr_attr->page_shift) != hw_pgsize) {
812 ehca_err(pd->device, "unsupported fmr_attr->page_shift=%x",
813 fmr_attr->page_shift);
814 ib_fmr = ERR_PTR(-EINVAL);
815 goto alloc_fmr_exit0;
816 }
817
818 e_fmr = ehca_mr_new();
819 if (!e_fmr) {
820 ib_fmr = ERR_PTR(-ENOMEM);
821 goto alloc_fmr_exit0;
822 }
823 e_fmr->flags |= EHCA_MR_FLAG_FMR;
824
825 /* register MR on HCA */
826 memset(&pginfo, 0, sizeof(pginfo));
827 /*
828 * pginfo.num_hwpages==0, ie register_rpages() will not be called
829 * but deferred to map_phys_fmr()
830 */
831 ret = ehca_reg_mr(shca, e_fmr, NULL,
832 fmr_attr->max_pages * (1 << fmr_attr->page_shift),
833 mr_access_flags, e_pd, &pginfo,
834 &tmp_lkey, &tmp_rkey);
835 if (ret) {
836 ib_fmr = ERR_PTR(ret);
837 goto alloc_fmr_exit1;
838 }
839
840 /* successful */
841 e_fmr->hwpage_size = hw_pgsize;
842 e_fmr->fmr_page_size = 1 << fmr_attr->page_shift;
843 e_fmr->fmr_max_pages = fmr_attr->max_pages;
844 e_fmr->fmr_max_maps = fmr_attr->max_maps;
845 e_fmr->fmr_map_cnt = 0;
846 return &e_fmr->ib.ib_fmr;
847
848 alloc_fmr_exit1:
849 ehca_mr_delete(e_fmr);
850 alloc_fmr_exit0:
851 if (IS_ERR(ib_fmr))
852 ehca_err(pd->device, "rc=%lx pd=%p mr_access_flags=%x "
853 "fmr_attr=%p", PTR_ERR(ib_fmr), pd,
854 mr_access_flags, fmr_attr);
855 return ib_fmr;
856 } /* end ehca_alloc_fmr() */
857
858 /*----------------------------------------------------------------------*/
859
860 int ehca_map_phys_fmr(struct ib_fmr *fmr,
861 u64 *page_list,
862 int list_len,
863 u64 iova)
864 {
865 int ret;
866 struct ehca_shca *shca =
867 container_of(fmr->device, struct ehca_shca, ib_device);
868 struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);
869 struct ehca_pd *e_pd = container_of(fmr->pd, struct ehca_pd, ib_pd);
870 struct ehca_mr_pginfo pginfo;
871 u32 tmp_lkey, tmp_rkey;
872
873 if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
874 ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x",
875 e_fmr, e_fmr->flags);
876 ret = -EINVAL;
877 goto map_phys_fmr_exit0;
878 }
879 ret = ehca_fmr_check_page_list(e_fmr, page_list, list_len);
880 if (ret)
881 goto map_phys_fmr_exit0;
882 if (iova % e_fmr->fmr_page_size) {
883 /* only whole-numbered pages */
884 ehca_err(fmr->device, "bad iova, iova=%lx fmr_page_size=%x",
885 iova, e_fmr->fmr_page_size);
886 ret = -EINVAL;
887 goto map_phys_fmr_exit0;
888 }
889 if (e_fmr->fmr_map_cnt >= e_fmr->fmr_max_maps) {
890 /* HCAD does not limit the maps, however trace this anyway */
891 ehca_info(fmr->device, "map limit exceeded, fmr=%p "
892 "e_fmr->fmr_map_cnt=%x e_fmr->fmr_max_maps=%x",
893 fmr, e_fmr->fmr_map_cnt, e_fmr->fmr_max_maps);
894 }
895
896 memset(&pginfo, 0, sizeof(pginfo));
897 pginfo.type = EHCA_MR_PGI_FMR;
898 pginfo.num_kpages = list_len;
899 pginfo.hwpage_size = e_fmr->hwpage_size;
900 pginfo.num_hwpages =
901 list_len * e_fmr->fmr_page_size / pginfo.hwpage_size;
902 pginfo.u.fmr.page_list = page_list;
903 pginfo.next_hwpage =
904 (iova & (e_fmr->fmr_page_size-1)) / pginfo.hwpage_size;
905 pginfo.u.fmr.fmr_pgsize = e_fmr->fmr_page_size;
906
907 ret = ehca_rereg_mr(shca, e_fmr, (u64 *)iova,
908 list_len * e_fmr->fmr_page_size,
909 e_fmr->acl, e_pd, &pginfo, &tmp_lkey, &tmp_rkey);
910 if (ret)
911 goto map_phys_fmr_exit0;
912
913 /* successful reregistration */
914 e_fmr->fmr_map_cnt++;
915 e_fmr->ib.ib_fmr.lkey = tmp_lkey;
916 e_fmr->ib.ib_fmr.rkey = tmp_rkey;
917 return 0;
918
919 map_phys_fmr_exit0:
920 if (ret)
921 ehca_err(fmr->device, "ret=%x fmr=%p page_list=%p list_len=%x "
922 "iova=%lx", ret, fmr, page_list, list_len, iova);
923 return ret;
924 } /* end ehca_map_phys_fmr() */
925
926 /*----------------------------------------------------------------------*/
927
928 int ehca_unmap_fmr(struct list_head *fmr_list)
929 {
930 int ret = 0;
931 struct ib_fmr *ib_fmr;
932 struct ehca_shca *shca = NULL;
933 struct ehca_shca *prev_shca;
934 struct ehca_mr *e_fmr;
935 u32 num_fmr = 0;
936 u32 unmap_fmr_cnt = 0;
937
938 /* check all FMR belong to same SHCA, and check internal flag */
939 list_for_each_entry(ib_fmr, fmr_list, list) {
940 prev_shca = shca;
941 if (!ib_fmr) {
942 ehca_gen_err("bad fmr=%p in list", ib_fmr);
943 ret = -EINVAL;
944 goto unmap_fmr_exit0;
945 }
946 shca = container_of(ib_fmr->device, struct ehca_shca,
947 ib_device);
948 e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr);
949 if ((shca != prev_shca) && prev_shca) {
950 ehca_err(&shca->ib_device, "SHCA mismatch, shca=%p "
951 "prev_shca=%p e_fmr=%p",
952 shca, prev_shca, e_fmr);
953 ret = -EINVAL;
954 goto unmap_fmr_exit0;
955 }
956 if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
957 ehca_err(&shca->ib_device, "not a FMR, e_fmr=%p "
958 "e_fmr->flags=%x", e_fmr, e_fmr->flags);
959 ret = -EINVAL;
960 goto unmap_fmr_exit0;
961 }
962 num_fmr++;
963 }
964
965 /* loop over all FMRs to unmap */
966 list_for_each_entry(ib_fmr, fmr_list, list) {
967 unmap_fmr_cnt++;
968 e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr);
969 shca = container_of(ib_fmr->device, struct ehca_shca,
970 ib_device);
971 ret = ehca_unmap_one_fmr(shca, e_fmr);
972 if (ret) {
973 /* unmap failed, stop unmapping of rest of FMRs */
974 ehca_err(&shca->ib_device, "unmap of one FMR failed, "
975 "stop rest, e_fmr=%p num_fmr=%x "
976 "unmap_fmr_cnt=%x lkey=%x", e_fmr, num_fmr,
977 unmap_fmr_cnt, e_fmr->ib.ib_fmr.lkey);
978 goto unmap_fmr_exit0;
979 }
980 }
981
982 unmap_fmr_exit0:
983 if (ret)
984 ehca_gen_err("ret=%x fmr_list=%p num_fmr=%x unmap_fmr_cnt=%x",
985 ret, fmr_list, num_fmr, unmap_fmr_cnt);
986 return ret;
987 } /* end ehca_unmap_fmr() */
988
989 /*----------------------------------------------------------------------*/
990
991 int ehca_dealloc_fmr(struct ib_fmr *fmr)
992 {
993 int ret;
994 u64 h_ret;
995 struct ehca_shca *shca =
996 container_of(fmr->device, struct ehca_shca, ib_device);
997 struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);
998
999 if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
1000 ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x",
1001 e_fmr, e_fmr->flags);
1002 ret = -EINVAL;
1003 goto free_fmr_exit0;
1004 }
1005
1006 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
1007 if (h_ret != H_SUCCESS) {
1008 ehca_err(fmr->device, "hipz_free_mr failed, h_ret=%lx e_fmr=%p "
1009 "hca_hndl=%lx fmr_hndl=%lx fmr->lkey=%x",
1010 h_ret, e_fmr, shca->ipz_hca_handle.handle,
1011 e_fmr->ipz_mr_handle.handle, fmr->lkey);
1012 ret = ehca2ib_return_code(h_ret);
1013 goto free_fmr_exit0;
1014 }
1015 /* successful deregistration */
1016 ehca_mr_delete(e_fmr);
1017 return 0;
1018
1019 free_fmr_exit0:
1020 if (ret)
1021 ehca_err(&shca->ib_device, "ret=%x fmr=%p", ret, fmr);
1022 return ret;
1023 } /* end ehca_dealloc_fmr() */
1024
1025 /*----------------------------------------------------------------------*/
1026
1027 int ehca_reg_mr(struct ehca_shca *shca,
1028 struct ehca_mr *e_mr,
1029 u64 *iova_start,
1030 u64 size,
1031 int acl,
1032 struct ehca_pd *e_pd,
1033 struct ehca_mr_pginfo *pginfo,
1034 u32 *lkey, /*OUT*/
1035 u32 *rkey) /*OUT*/
1036 {
1037 int ret;
1038 u64 h_ret;
1039 u32 hipz_acl;
1040 struct ehca_mr_hipzout_parms hipzout;
1041
1042 ehca_mrmw_map_acl(acl, &hipz_acl);
1043 ehca_mrmw_set_pgsize_hipz_acl(pginfo->hwpage_size, &hipz_acl);
1044 if (ehca_use_hp_mr == 1)
1045 hipz_acl |= 0x00000001;
1046
1047 h_ret = hipz_h_alloc_resource_mr(shca->ipz_hca_handle, e_mr,
1048 (u64)iova_start, size, hipz_acl,
1049 e_pd->fw_pd, &hipzout);
1050 if (h_ret != H_SUCCESS) {
1051 ehca_err(&shca->ib_device, "hipz_alloc_mr failed, h_ret=%lx "
1052 "hca_hndl=%lx", h_ret, shca->ipz_hca_handle.handle);
1053 ret = ehca2ib_return_code(h_ret);
1054 goto ehca_reg_mr_exit0;
1055 }
1056
1057 e_mr->ipz_mr_handle = hipzout.handle;
1058
1059 ret = ehca_reg_mr_rpages(shca, e_mr, pginfo);
1060 if (ret)
1061 goto ehca_reg_mr_exit1;
1062
1063 /* successful registration */
1064 e_mr->num_kpages = pginfo->num_kpages;
1065 e_mr->num_hwpages = pginfo->num_hwpages;
1066 e_mr->hwpage_size = pginfo->hwpage_size;
1067 e_mr->start = iova_start;
1068 e_mr->size = size;
1069 e_mr->acl = acl;
1070 *lkey = hipzout.lkey;
1071 *rkey = hipzout.rkey;
1072 return 0;
1073
1074 ehca_reg_mr_exit1:
1075 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
1076 if (h_ret != H_SUCCESS) {
1077 ehca_err(&shca->ib_device, "h_ret=%lx shca=%p e_mr=%p "
1078 "iova_start=%p size=%lx acl=%x e_pd=%p lkey=%x "
1079 "pginfo=%p num_kpages=%lx num_hwpages=%lx ret=%x",
1080 h_ret, shca, e_mr, iova_start, size, acl, e_pd,
1081 hipzout.lkey, pginfo, pginfo->num_kpages,
1082 pginfo->num_hwpages, ret);
1083 ehca_err(&shca->ib_device, "internal error in ehca_reg_mr, "
1084 "not recoverable");
1085 }
1086 ehca_reg_mr_exit0:
1087 if (ret)
1088 ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p "
1089 "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p "
1090 "num_kpages=%lx num_hwpages=%lx",
1091 ret, shca, e_mr, iova_start, size, acl, e_pd, pginfo,
1092 pginfo->num_kpages, pginfo->num_hwpages);
1093 return ret;
1094 } /* end ehca_reg_mr() */
1095
1096 /*----------------------------------------------------------------------*/
1097
1098 int ehca_reg_mr_rpages(struct ehca_shca *shca,
1099 struct ehca_mr *e_mr,
1100 struct ehca_mr_pginfo *pginfo)
1101 {
1102 int ret = 0;
1103 u64 h_ret;
1104 u32 rnum;
1105 u64 rpage;
1106 u32 i;
1107 u64 *kpage;
1108
1109 if (!pginfo->num_hwpages) /* in case of fmr */
1110 return 0;
1111
1112 kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
1113 if (!kpage) {
1114 ehca_err(&shca->ib_device, "kpage alloc failed");
1115 ret = -ENOMEM;
1116 goto ehca_reg_mr_rpages_exit0;
1117 }
1118
1119 /* max MAX_RPAGES ehca mr pages per register call */
1120 for (i = 0; i < NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES); i++) {
1121
1122 if (i == NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES) - 1) {
1123 rnum = pginfo->num_hwpages % MAX_RPAGES; /* last shot */
1124 if (rnum == 0)
1125 rnum = MAX_RPAGES; /* last shot is full */
1126 } else
1127 rnum = MAX_RPAGES;
1128
1129 ret = ehca_set_pagebuf(pginfo, rnum, kpage);
1130 if (ret) {
1131 ehca_err(&shca->ib_device, "ehca_set_pagebuf "
1132 "bad rc, ret=%x rnum=%x kpage=%p",
1133 ret, rnum, kpage);
1134 goto ehca_reg_mr_rpages_exit1;
1135 }
1136
1137 if (rnum > 1) {
1138 rpage = virt_to_abs(kpage);
1139 if (!rpage) {
1140 ehca_err(&shca->ib_device, "kpage=%p i=%x",
1141 kpage, i);
1142 ret = -EFAULT;
1143 goto ehca_reg_mr_rpages_exit1;
1144 }
1145 } else
1146 rpage = *kpage;
1147
1148 h_ret = hipz_h_register_rpage_mr(
1149 shca->ipz_hca_handle, e_mr,
1150 ehca_encode_hwpage_size(pginfo->hwpage_size),
1151 0, rpage, rnum);
1152
1153 if (i == NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES) - 1) {
1154 /*
1155 * check for 'registration complete'==H_SUCCESS
1156 * and for 'page registered'==H_PAGE_REGISTERED
1157 */
1158 if (h_ret != H_SUCCESS) {
1159 ehca_err(&shca->ib_device, "last "
1160 "hipz_reg_rpage_mr failed, h_ret=%lx "
1161 "e_mr=%p i=%x hca_hndl=%lx mr_hndl=%lx"
1162 " lkey=%x", h_ret, e_mr, i,
1163 shca->ipz_hca_handle.handle,
1164 e_mr->ipz_mr_handle.handle,
1165 e_mr->ib.ib_mr.lkey);
1166 ret = ehca2ib_return_code(h_ret);
1167 break;
1168 } else
1169 ret = 0;
1170 } else if (h_ret != H_PAGE_REGISTERED) {
1171 ehca_err(&shca->ib_device, "hipz_reg_rpage_mr failed, "
1172 "h_ret=%lx e_mr=%p i=%x lkey=%x hca_hndl=%lx "
1173 "mr_hndl=%lx", h_ret, e_mr, i,
1174 e_mr->ib.ib_mr.lkey,
1175 shca->ipz_hca_handle.handle,
1176 e_mr->ipz_mr_handle.handle);
1177 ret = ehca2ib_return_code(h_ret);
1178 break;
1179 } else
1180 ret = 0;
1181 } /* end for(i) */
1182
1183
1184 ehca_reg_mr_rpages_exit1:
1185 ehca_free_fw_ctrlblock(kpage);
1186 ehca_reg_mr_rpages_exit0:
1187 if (ret)
1188 ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p pginfo=%p "
1189 "num_kpages=%lx num_hwpages=%lx", ret, shca, e_mr,
1190 pginfo, pginfo->num_kpages, pginfo->num_hwpages);
1191 return ret;
1192 } /* end ehca_reg_mr_rpages() */
1193
1194 /*----------------------------------------------------------------------*/
1195
1196 inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca,
1197 struct ehca_mr *e_mr,
1198 u64 *iova_start,
1199 u64 size,
1200 u32 acl,
1201 struct ehca_pd *e_pd,
1202 struct ehca_mr_pginfo *pginfo,
1203 u32 *lkey, /*OUT*/
1204 u32 *rkey) /*OUT*/
1205 {
1206 int ret;
1207 u64 h_ret;
1208 u32 hipz_acl;
1209 u64 *kpage;
1210 u64 rpage;
1211 struct ehca_mr_pginfo pginfo_save;
1212 struct ehca_mr_hipzout_parms hipzout;
1213
1214 ehca_mrmw_map_acl(acl, &hipz_acl);
1215 ehca_mrmw_set_pgsize_hipz_acl(pginfo->hwpage_size, &hipz_acl);
1216
1217 kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
1218 if (!kpage) {
1219 ehca_err(&shca->ib_device, "kpage alloc failed");
1220 ret = -ENOMEM;
1221 goto ehca_rereg_mr_rereg1_exit0;
1222 }
1223
1224 pginfo_save = *pginfo;
1225 ret = ehca_set_pagebuf(pginfo, pginfo->num_hwpages, kpage);
1226 if (ret) {
1227 ehca_err(&shca->ib_device, "set pagebuf failed, e_mr=%p "
1228 "pginfo=%p type=%x num_kpages=%lx num_hwpages=%lx "
1229 "kpage=%p", e_mr, pginfo, pginfo->type,
1230 pginfo->num_kpages, pginfo->num_hwpages, kpage);
1231 goto ehca_rereg_mr_rereg1_exit1;
1232 }
1233 rpage = virt_to_abs(kpage);
1234 if (!rpage) {
1235 ehca_err(&shca->ib_device, "kpage=%p", kpage);
1236 ret = -EFAULT;
1237 goto ehca_rereg_mr_rereg1_exit1;
1238 }
1239 h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_mr,
1240 (u64)iova_start, size, hipz_acl,
1241 e_pd->fw_pd, rpage, &hipzout);
1242 if (h_ret != H_SUCCESS) {
1243 /*
1244 * reregistration unsuccessful, try it again with the 3 hCalls,
1245 * e.g. this is required in case H_MR_CONDITION
1246 * (MW bound or MR is shared)
1247 */
1248 ehca_warn(&shca->ib_device, "hipz_h_reregister_pmr failed "
1249 "(Rereg1), h_ret=%lx e_mr=%p", h_ret, e_mr);
1250 *pginfo = pginfo_save;
1251 ret = -EAGAIN;
1252 } else if ((u64 *)hipzout.vaddr != iova_start) {
1253 ehca_err(&shca->ib_device, "PHYP changed iova_start in "
1254 "rereg_pmr, iova_start=%p iova_start_out=%lx e_mr=%p "
1255 "mr_handle=%lx lkey=%x lkey_out=%x", iova_start,
1256 hipzout.vaddr, e_mr, e_mr->ipz_mr_handle.handle,
1257 e_mr->ib.ib_mr.lkey, hipzout.lkey);
1258 ret = -EFAULT;
1259 } else {
1260 /*
1261 * successful reregistration
1262 * note: start and start_out are identical for eServer HCAs
1263 */
1264 e_mr->num_kpages = pginfo->num_kpages;
1265 e_mr->num_hwpages = pginfo->num_hwpages;
1266 e_mr->hwpage_size = pginfo->hwpage_size;
1267 e_mr->start = iova_start;
1268 e_mr->size = size;
1269 e_mr->acl = acl;
1270 *lkey = hipzout.lkey;
1271 *rkey = hipzout.rkey;
1272 }
1273
1274 ehca_rereg_mr_rereg1_exit1:
1275 ehca_free_fw_ctrlblock(kpage);
1276 ehca_rereg_mr_rereg1_exit0:
1277 if ( ret && (ret != -EAGAIN) )
1278 ehca_err(&shca->ib_device, "ret=%x lkey=%x rkey=%x "
1279 "pginfo=%p num_kpages=%lx num_hwpages=%lx",
1280 ret, *lkey, *rkey, pginfo, pginfo->num_kpages,
1281 pginfo->num_hwpages);
1282 return ret;
1283 } /* end ehca_rereg_mr_rereg1() */
1284
1285 /*----------------------------------------------------------------------*/
1286
1287 int ehca_rereg_mr(struct ehca_shca *shca,
1288 struct ehca_mr *e_mr,
1289 u64 *iova_start,
1290 u64 size,
1291 int acl,
1292 struct ehca_pd *e_pd,
1293 struct ehca_mr_pginfo *pginfo,
1294 u32 *lkey,
1295 u32 *rkey)
1296 {
1297 int ret = 0;
1298 u64 h_ret;
1299 int rereg_1_hcall = 1; /* 1: use hipz_h_reregister_pmr directly */
1300 int rereg_3_hcall = 0; /* 1: use 3 hipz calls for reregistration */
1301
1302 /* first determine reregistration hCall(s) */
1303 if ((pginfo->num_hwpages > MAX_RPAGES) ||
1304 (e_mr->num_hwpages > MAX_RPAGES) ||
1305 (pginfo->num_hwpages > e_mr->num_hwpages)) {
1306 ehca_dbg(&shca->ib_device, "Rereg3 case, "
1307 "pginfo->num_hwpages=%lx e_mr->num_hwpages=%x",
1308 pginfo->num_hwpages, e_mr->num_hwpages);
1309 rereg_1_hcall = 0;
1310 rereg_3_hcall = 1;
1311 }
1312
1313 if (e_mr->flags & EHCA_MR_FLAG_MAXMR) { /* check for max-MR */
1314 rereg_1_hcall = 0;
1315 rereg_3_hcall = 1;
1316 e_mr->flags &= ~EHCA_MR_FLAG_MAXMR;
1317 ehca_err(&shca->ib_device, "Rereg MR for max-MR! e_mr=%p",
1318 e_mr);
1319 }
1320
1321 if (rereg_1_hcall) {
1322 ret = ehca_rereg_mr_rereg1(shca, e_mr, iova_start, size,
1323 acl, e_pd, pginfo, lkey, rkey);
1324 if (ret) {
1325 if (ret == -EAGAIN)
1326 rereg_3_hcall = 1;
1327 else
1328 goto ehca_rereg_mr_exit0;
1329 }
1330 }
1331
1332 if (rereg_3_hcall) {
1333 struct ehca_mr save_mr;
1334
1335 /* first deregister old MR */
1336 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
1337 if (h_ret != H_SUCCESS) {
1338 ehca_err(&shca->ib_device, "hipz_free_mr failed, "
1339 "h_ret=%lx e_mr=%p hca_hndl=%lx mr_hndl=%lx "
1340 "mr->lkey=%x",
1341 h_ret, e_mr, shca->ipz_hca_handle.handle,
1342 e_mr->ipz_mr_handle.handle,
1343 e_mr->ib.ib_mr.lkey);
1344 ret = ehca2ib_return_code(h_ret);
1345 goto ehca_rereg_mr_exit0;
1346 }
1347 /* clean ehca_mr_t, without changing struct ib_mr and lock */
1348 save_mr = *e_mr;
1349 ehca_mr_deletenew(e_mr);
1350
1351 /* set some MR values */
1352 e_mr->flags = save_mr.flags;
1353 e_mr->hwpage_size = save_mr.hwpage_size;
1354 e_mr->fmr_page_size = save_mr.fmr_page_size;
1355 e_mr->fmr_max_pages = save_mr.fmr_max_pages;
1356 e_mr->fmr_max_maps = save_mr.fmr_max_maps;
1357 e_mr->fmr_map_cnt = save_mr.fmr_map_cnt;
1358
1359 ret = ehca_reg_mr(shca, e_mr, iova_start, size, acl,
1360 e_pd, pginfo, lkey, rkey);
1361 if (ret) {
1362 u32 offset = (u64)(&e_mr->flags) - (u64)e_mr;
1363 memcpy(&e_mr->flags, &(save_mr.flags),
1364 sizeof(struct ehca_mr) - offset);
1365 goto ehca_rereg_mr_exit0;
1366 }
1367 }
1368
1369 ehca_rereg_mr_exit0:
1370 if (ret)
1371 ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p "
1372 "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p "
1373 "num_kpages=%lx lkey=%x rkey=%x rereg_1_hcall=%x "
1374 "rereg_3_hcall=%x", ret, shca, e_mr, iova_start, size,
1375 acl, e_pd, pginfo, pginfo->num_kpages, *lkey, *rkey,
1376 rereg_1_hcall, rereg_3_hcall);
1377 return ret;
1378 } /* end ehca_rereg_mr() */
1379
1380 /*----------------------------------------------------------------------*/
1381
1382 int ehca_unmap_one_fmr(struct ehca_shca *shca,
1383 struct ehca_mr *e_fmr)
1384 {
1385 int ret = 0;
1386 u64 h_ret;
1387 struct ehca_pd *e_pd =
1388 container_of(e_fmr->ib.ib_fmr.pd, struct ehca_pd, ib_pd);
1389 struct ehca_mr save_fmr;
1390 u32 tmp_lkey, tmp_rkey;
1391 struct ehca_mr_pginfo pginfo;
1392 struct ehca_mr_hipzout_parms hipzout;
1393 struct ehca_mr save_mr;
1394
1395 if (e_fmr->fmr_max_pages <= MAX_RPAGES) {
1396 /*
1397 * note: after using rereg hcall with len=0,
1398 * rereg hcall must be used again for registering pages
1399 */
1400 h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_fmr, 0,
1401 0, 0, e_pd->fw_pd, 0, &hipzout);
1402 if (h_ret == H_SUCCESS) {
1403 /* successful reregistration */
1404 e_fmr->start = NULL;
1405 e_fmr->size = 0;
1406 tmp_lkey = hipzout.lkey;
1407 tmp_rkey = hipzout.rkey;
1408 return 0;
1409 }
1410 /*
1411 * should not happen, because length checked above,
1412 * FMRs are not shared and no MW bound to FMRs
1413 */
1414 ehca_err(&shca->ib_device, "hipz_reregister_pmr failed "
1415 "(Rereg1), h_ret=%lx e_fmr=%p hca_hndl=%lx "
1416 "mr_hndl=%lx lkey=%x lkey_out=%x",
1417 h_ret, e_fmr, shca->ipz_hca_handle.handle,
1418 e_fmr->ipz_mr_handle.handle,
1419 e_fmr->ib.ib_fmr.lkey, hipzout.lkey);
1420 /* try free and rereg */
1421 }
1422
1423 /* first free old FMR */
1424 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
1425 if (h_ret != H_SUCCESS) {
1426 ehca_err(&shca->ib_device, "hipz_free_mr failed, "
1427 "h_ret=%lx e_fmr=%p hca_hndl=%lx mr_hndl=%lx "
1428 "lkey=%x",
1429 h_ret, e_fmr, shca->ipz_hca_handle.handle,
1430 e_fmr->ipz_mr_handle.handle,
1431 e_fmr->ib.ib_fmr.lkey);
1432 ret = ehca2ib_return_code(h_ret);
1433 goto ehca_unmap_one_fmr_exit0;
1434 }
1435 /* clean ehca_mr_t, without changing lock */
1436 save_fmr = *e_fmr;
1437 ehca_mr_deletenew(e_fmr);
1438
1439 /* set some MR values */
1440 e_fmr->flags = save_fmr.flags;
1441 e_fmr->hwpage_size = save_fmr.hwpage_size;
1442 e_fmr->fmr_page_size = save_fmr.fmr_page_size;
1443 e_fmr->fmr_max_pages = save_fmr.fmr_max_pages;
1444 e_fmr->fmr_max_maps = save_fmr.fmr_max_maps;
1445 e_fmr->fmr_map_cnt = save_fmr.fmr_map_cnt;
1446 e_fmr->acl = save_fmr.acl;
1447
1448 memset(&pginfo, 0, sizeof(pginfo));
1449 pginfo.type = EHCA_MR_PGI_FMR;
1450 ret = ehca_reg_mr(shca, e_fmr, NULL,
1451 (e_fmr->fmr_max_pages * e_fmr->fmr_page_size),
1452 e_fmr->acl, e_pd, &pginfo, &tmp_lkey,
1453 &tmp_rkey);
1454 if (ret) {
1455 u32 offset = (u64)(&e_fmr->flags) - (u64)e_fmr;
1456 memcpy(&e_fmr->flags, &(save_mr.flags),
1457 sizeof(struct ehca_mr) - offset);
1458 }
1459
1460 ehca_unmap_one_fmr_exit0:
1461 if (ret)
1462 ehca_err(&shca->ib_device, "ret=%x tmp_lkey=%x tmp_rkey=%x "
1463 "fmr_max_pages=%x",
1464 ret, tmp_lkey, tmp_rkey, e_fmr->fmr_max_pages);
1465 return ret;
1466 } /* end ehca_unmap_one_fmr() */
1467
1468 /*----------------------------------------------------------------------*/
1469
1470 int ehca_reg_smr(struct ehca_shca *shca,
1471 struct ehca_mr *e_origmr,
1472 struct ehca_mr *e_newmr,
1473 u64 *iova_start,
1474 int acl,
1475 struct ehca_pd *e_pd,
1476 u32 *lkey, /*OUT*/
1477 u32 *rkey) /*OUT*/
1478 {
1479 int ret = 0;
1480 u64 h_ret;
1481 u32 hipz_acl;
1482 struct ehca_mr_hipzout_parms hipzout;
1483
1484 ehca_mrmw_map_acl(acl, &hipz_acl);
1485 ehca_mrmw_set_pgsize_hipz_acl(e_origmr->hwpage_size, &hipz_acl);
1486
1487 h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr,
1488 (u64)iova_start, hipz_acl, e_pd->fw_pd,
1489 &hipzout);
1490 if (h_ret != H_SUCCESS) {
1491 ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lx "
1492 "shca=%p e_origmr=%p e_newmr=%p iova_start=%p acl=%x "
1493 "e_pd=%p hca_hndl=%lx mr_hndl=%lx lkey=%x",
1494 h_ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd,
1495 shca->ipz_hca_handle.handle,
1496 e_origmr->ipz_mr_handle.handle,
1497 e_origmr->ib.ib_mr.lkey);
1498 ret = ehca2ib_return_code(h_ret);
1499 goto ehca_reg_smr_exit0;
1500 }
1501 /* successful registration */
1502 e_newmr->num_kpages = e_origmr->num_kpages;
1503 e_newmr->num_hwpages = e_origmr->num_hwpages;
1504 e_newmr->hwpage_size = e_origmr->hwpage_size;
1505 e_newmr->start = iova_start;
1506 e_newmr->size = e_origmr->size;
1507 e_newmr->acl = acl;
1508 e_newmr->ipz_mr_handle = hipzout.handle;
1509 *lkey = hipzout.lkey;
1510 *rkey = hipzout.rkey;
1511 return 0;
1512
1513 ehca_reg_smr_exit0:
1514 if (ret)
1515 ehca_err(&shca->ib_device, "ret=%x shca=%p e_origmr=%p "
1516 "e_newmr=%p iova_start=%p acl=%x e_pd=%p",
1517 ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd);
1518 return ret;
1519 } /* end ehca_reg_smr() */
1520
1521 /*----------------------------------------------------------------------*/
1522
1523 /* register internal max-MR to internal SHCA */
1524 int ehca_reg_internal_maxmr(
1525 struct ehca_shca *shca,
1526 struct ehca_pd *e_pd,
1527 struct ehca_mr **e_maxmr) /*OUT*/
1528 {
1529 int ret;
1530 struct ehca_mr *e_mr;
1531 u64 *iova_start;
1532 u64 size_maxmr;
1533 struct ehca_mr_pginfo pginfo;
1534 struct ib_phys_buf ib_pbuf;
1535 u32 num_kpages;
1536 u32 num_hwpages;
1537 u64 hw_pgsize;
1538
1539 e_mr = ehca_mr_new();
1540 if (!e_mr) {
1541 ehca_err(&shca->ib_device, "out of memory");
1542 ret = -ENOMEM;
1543 goto ehca_reg_internal_maxmr_exit0;
1544 }
1545 e_mr->flags |= EHCA_MR_FLAG_MAXMR;
1546
1547 /* register internal max-MR on HCA */
1548 size_maxmr = (u64)high_memory - PAGE_OFFSET;
1549 iova_start = (u64 *)KERNELBASE;
1550 ib_pbuf.addr = 0;
1551 ib_pbuf.size = size_maxmr;
1552 num_kpages = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size_maxmr,
1553 PAGE_SIZE);
1554 hw_pgsize = ehca_get_max_hwpage_size(shca);
1555 num_hwpages = NUM_CHUNKS(((u64)iova_start % hw_pgsize) + size_maxmr,
1556 hw_pgsize);
1557
1558 memset(&pginfo, 0, sizeof(pginfo));
1559 pginfo.type = EHCA_MR_PGI_PHYS;
1560 pginfo.num_kpages = num_kpages;
1561 pginfo.num_hwpages = num_hwpages;
1562 pginfo.hwpage_size = hw_pgsize;
1563 pginfo.u.phy.num_phys_buf = 1;
1564 pginfo.u.phy.phys_buf_array = &ib_pbuf;
1565
1566 ret = ehca_reg_mr(shca, e_mr, iova_start, size_maxmr, 0, e_pd,
1567 &pginfo, &e_mr->ib.ib_mr.lkey,
1568 &e_mr->ib.ib_mr.rkey);
1569 if (ret) {
1570 ehca_err(&shca->ib_device, "reg of internal max MR failed, "
1571 "e_mr=%p iova_start=%p size_maxmr=%lx num_kpages=%x "
1572 "num_hwpages=%x", e_mr, iova_start, size_maxmr,
1573 num_kpages, num_hwpages);
1574 goto ehca_reg_internal_maxmr_exit1;
1575 }
1576
1577 /* successful registration of all pages */
1578 e_mr->ib.ib_mr.device = e_pd->ib_pd.device;
1579 e_mr->ib.ib_mr.pd = &e_pd->ib_pd;
1580 e_mr->ib.ib_mr.uobject = NULL;
1581 atomic_inc(&(e_pd->ib_pd.usecnt));
1582 atomic_set(&(e_mr->ib.ib_mr.usecnt), 0);
1583 *e_maxmr = e_mr;
1584 return 0;
1585
1586 ehca_reg_internal_maxmr_exit1:
1587 ehca_mr_delete(e_mr);
1588 ehca_reg_internal_maxmr_exit0:
1589 if (ret)
1590 ehca_err(&shca->ib_device, "ret=%x shca=%p e_pd=%p e_maxmr=%p",
1591 ret, shca, e_pd, e_maxmr);
1592 return ret;
1593 } /* end ehca_reg_internal_maxmr() */
1594
1595 /*----------------------------------------------------------------------*/
1596
1597 int ehca_reg_maxmr(struct ehca_shca *shca,
1598 struct ehca_mr *e_newmr,
1599 u64 *iova_start,
1600 int acl,
1601 struct ehca_pd *e_pd,
1602 u32 *lkey,
1603 u32 *rkey)
1604 {
1605 u64 h_ret;
1606 struct ehca_mr *e_origmr = shca->maxmr;
1607 u32 hipz_acl;
1608 struct ehca_mr_hipzout_parms hipzout;
1609
1610 ehca_mrmw_map_acl(acl, &hipz_acl);
1611 ehca_mrmw_set_pgsize_hipz_acl(e_origmr->hwpage_size, &hipz_acl);
1612
1613 h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr,
1614 (u64)iova_start, hipz_acl, e_pd->fw_pd,
1615 &hipzout);
1616 if (h_ret != H_SUCCESS) {
1617 ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lx "
1618 "e_origmr=%p hca_hndl=%lx mr_hndl=%lx lkey=%x",
1619 h_ret, e_origmr, shca->ipz_hca_handle.handle,
1620 e_origmr->ipz_mr_handle.handle,
1621 e_origmr->ib.ib_mr.lkey);
1622 return ehca2ib_return_code(h_ret);
1623 }
1624 /* successful registration */
1625 e_newmr->num_kpages = e_origmr->num_kpages;
1626 e_newmr->num_hwpages = e_origmr->num_hwpages;
1627 e_newmr->hwpage_size = e_origmr->hwpage_size;
1628 e_newmr->start = iova_start;
1629 e_newmr->size = e_origmr->size;
1630 e_newmr->acl = acl;
1631 e_newmr->ipz_mr_handle = hipzout.handle;
1632 *lkey = hipzout.lkey;
1633 *rkey = hipzout.rkey;
1634 return 0;
1635 } /* end ehca_reg_maxmr() */
1636
1637 /*----------------------------------------------------------------------*/
1638
1639 int ehca_dereg_internal_maxmr(struct ehca_shca *shca)
1640 {
1641 int ret;
1642 struct ehca_mr *e_maxmr;
1643 struct ib_pd *ib_pd;
1644
1645 if (!shca->maxmr) {
1646 ehca_err(&shca->ib_device, "bad call, shca=%p", shca);
1647 ret = -EINVAL;
1648 goto ehca_dereg_internal_maxmr_exit0;
1649 }
1650
1651 e_maxmr = shca->maxmr;
1652 ib_pd = e_maxmr->ib.ib_mr.pd;
1653 shca->maxmr = NULL; /* remove internal max-MR indication from SHCA */
1654
1655 ret = ehca_dereg_mr(&e_maxmr->ib.ib_mr);
1656 if (ret) {
1657 ehca_err(&shca->ib_device, "dereg internal max-MR failed, "
1658 "ret=%x e_maxmr=%p shca=%p lkey=%x",
1659 ret, e_maxmr, shca, e_maxmr->ib.ib_mr.lkey);
1660 shca->maxmr = e_maxmr;
1661 goto ehca_dereg_internal_maxmr_exit0;
1662 }
1663
1664 atomic_dec(&ib_pd->usecnt);
1665
1666 ehca_dereg_internal_maxmr_exit0:
1667 if (ret)
1668 ehca_err(&shca->ib_device, "ret=%x shca=%p shca->maxmr=%p",
1669 ret, shca, shca->maxmr);
1670 return ret;
1671 } /* end ehca_dereg_internal_maxmr() */
1672
1673 /*----------------------------------------------------------------------*/
1674
1675 /*
1676 * check physical buffer array of MR verbs for validness and
1677 * calculates MR size
1678 */
1679 int ehca_mr_chk_buf_and_calc_size(struct ib_phys_buf *phys_buf_array,
1680 int num_phys_buf,
1681 u64 *iova_start,
1682 u64 *size)
1683 {
1684 struct ib_phys_buf *pbuf = phys_buf_array;
1685 u64 size_count = 0;
1686 u32 i;
1687
1688 if (num_phys_buf == 0) {
1689 ehca_gen_err("bad phys buf array len, num_phys_buf=0");
1690 return -EINVAL;
1691 }
1692 /* check first buffer */
1693 if (((u64)iova_start & ~PAGE_MASK) != (pbuf->addr & ~PAGE_MASK)) {
1694 ehca_gen_err("iova_start/addr mismatch, iova_start=%p "
1695 "pbuf->addr=%lx pbuf->size=%lx",
1696 iova_start, pbuf->addr, pbuf->size);
1697 return -EINVAL;
1698 }
1699 if (((pbuf->addr + pbuf->size) % PAGE_SIZE) &&
1700 (num_phys_buf > 1)) {
1701 ehca_gen_err("addr/size mismatch in 1st buf, pbuf->addr=%lx "
1702 "pbuf->size=%lx", pbuf->addr, pbuf->size);
1703 return -EINVAL;
1704 }
1705
1706 for (i = 0; i < num_phys_buf; i++) {
1707 if ((i > 0) && (pbuf->addr % PAGE_SIZE)) {
1708 ehca_gen_err("bad address, i=%x pbuf->addr=%lx "
1709 "pbuf->size=%lx",
1710 i, pbuf->addr, pbuf->size);
1711 return -EINVAL;
1712 }
1713 if (((i > 0) && /* not 1st */
1714 (i < (num_phys_buf - 1)) && /* not last */
1715 (pbuf->size % PAGE_SIZE)) || (pbuf->size == 0)) {
1716 ehca_gen_err("bad size, i=%x pbuf->size=%lx",
1717 i, pbuf->size);
1718 return -EINVAL;
1719 }
1720 size_count += pbuf->size;
1721 pbuf++;
1722 }
1723
1724 *size = size_count;
1725 return 0;
1726 } /* end ehca_mr_chk_buf_and_calc_size() */
1727
1728 /*----------------------------------------------------------------------*/
1729
1730 /* check page list of map FMR verb for validness */
1731 int ehca_fmr_check_page_list(struct ehca_mr *e_fmr,
1732 u64 *page_list,
1733 int list_len)
1734 {
1735 u32 i;
1736 u64 *page;
1737
1738 if ((list_len == 0) || (list_len > e_fmr->fmr_max_pages)) {
1739 ehca_gen_err("bad list_len, list_len=%x "
1740 "e_fmr->fmr_max_pages=%x fmr=%p",
1741 list_len, e_fmr->fmr_max_pages, e_fmr);
1742 return -EINVAL;
1743 }
1744
1745 /* each page must be aligned */
1746 page = page_list;
1747 for (i = 0; i < list_len; i++) {
1748 if (*page % e_fmr->fmr_page_size) {
1749 ehca_gen_err("bad page, i=%x *page=%lx page=%p fmr=%p "
1750 "fmr_page_size=%x", i, *page, page, e_fmr,
1751 e_fmr->fmr_page_size);
1752 return -EINVAL;
1753 }
1754 page++;
1755 }
1756
1757 return 0;
1758 } /* end ehca_fmr_check_page_list() */
1759
1760 /*----------------------------------------------------------------------*/
1761
1762 /* PAGE_SIZE >= pginfo->hwpage_size */
1763 static int ehca_set_pagebuf_user1(struct ehca_mr_pginfo *pginfo,
1764 u32 number,
1765 u64 *kpage)
1766 {
1767 int ret = 0;
1768 struct ib_umem_chunk *prev_chunk;
1769 struct ib_umem_chunk *chunk;
1770 u64 pgaddr;
1771 u32 i = 0;
1772 u32 j = 0;
1773 int hwpages_per_kpage = PAGE_SIZE / pginfo->hwpage_size;
1774
1775 /* loop over desired chunk entries */
1776 chunk = pginfo->u.usr.next_chunk;
1777 prev_chunk = pginfo->u.usr.next_chunk;
1778 list_for_each_entry_continue(
1779 chunk, (&(pginfo->u.usr.region->chunk_list)), list) {
1780 for (i = pginfo->u.usr.next_nmap; i < chunk->nmap; ) {
1781 pgaddr = page_to_pfn(chunk->page_list[i].page)
1782 << PAGE_SHIFT ;
1783 *kpage = phys_to_abs(pgaddr +
1784 (pginfo->next_hwpage *
1785 pginfo->hwpage_size));
1786 if ( !(*kpage) ) {
1787 ehca_gen_err("pgaddr=%lx "
1788 "chunk->page_list[i]=%lx "
1789 "i=%x next_hwpage=%lx",
1790 pgaddr, (u64)sg_dma_address(
1791 &chunk->page_list[i]),
1792 i, pginfo->next_hwpage);
1793 return -EFAULT;
1794 }
1795 (pginfo->hwpage_cnt)++;
1796 (pginfo->next_hwpage)++;
1797 kpage++;
1798 if (pginfo->next_hwpage % hwpages_per_kpage == 0) {
1799 (pginfo->kpage_cnt)++;
1800 (pginfo->u.usr.next_nmap)++;
1801 pginfo->next_hwpage = 0;
1802 i++;
1803 }
1804 j++;
1805 if (j >= number) break;
1806 }
1807 if ((pginfo->u.usr.next_nmap >= chunk->nmap) &&
1808 (j >= number)) {
1809 pginfo->u.usr.next_nmap = 0;
1810 prev_chunk = chunk;
1811 break;
1812 } else if (pginfo->u.usr.next_nmap >= chunk->nmap) {
1813 pginfo->u.usr.next_nmap = 0;
1814 prev_chunk = chunk;
1815 } else if (j >= number)
1816 break;
1817 else
1818 prev_chunk = chunk;
1819 }
1820 pginfo->u.usr.next_chunk =
1821 list_prepare_entry(prev_chunk,
1822 (&(pginfo->u.usr.region->chunk_list)),
1823 list);
1824 return ret;
1825 }
1826
1827 /*
1828 * check given pages for contiguous layout
1829 * last page addr is returned in prev_pgaddr for further check
1830 */
1831 static int ehca_check_kpages_per_ate(struct scatterlist *page_list,
1832 int start_idx, int end_idx,
1833 u64 *prev_pgaddr)
1834 {
1835 int t;
1836 for (t = start_idx; t <= end_idx; t++) {
1837 u64 pgaddr = page_to_pfn(page_list[t].page) << PAGE_SHIFT;
1838 ehca_gen_dbg("chunk_page=%lx value=%016lx", pgaddr,
1839 *(u64 *)abs_to_virt(phys_to_abs(pgaddr)));
1840 if (pgaddr - PAGE_SIZE != *prev_pgaddr) {
1841 ehca_gen_err("uncontiguous page found pgaddr=%lx "
1842 "prev_pgaddr=%lx page_list_i=%x",
1843 pgaddr, *prev_pgaddr, t);
1844 return -EINVAL;
1845 }
1846 *prev_pgaddr = pgaddr;
1847 }
1848 return 0;
1849 }
1850
1851 /* PAGE_SIZE < pginfo->hwpage_size */
1852 static int ehca_set_pagebuf_user2(struct ehca_mr_pginfo *pginfo,
1853 u32 number,
1854 u64 *kpage)
1855 {
1856 int ret = 0;
1857 struct ib_umem_chunk *prev_chunk;
1858 struct ib_umem_chunk *chunk;
1859 u64 pgaddr, prev_pgaddr;
1860 u32 i = 0;
1861 u32 j = 0;
1862 int kpages_per_hwpage = pginfo->hwpage_size / PAGE_SIZE;
1863 int nr_kpages = kpages_per_hwpage;
1864
1865 /* loop over desired chunk entries */
1866 chunk = pginfo->u.usr.next_chunk;
1867 prev_chunk = pginfo->u.usr.next_chunk;
1868 list_for_each_entry_continue(
1869 chunk, (&(pginfo->u.usr.region->chunk_list)), list) {
1870 for (i = pginfo->u.usr.next_nmap; i < chunk->nmap; ) {
1871 if (nr_kpages == kpages_per_hwpage) {
1872 pgaddr = ( page_to_pfn(chunk->page_list[i].page)
1873 << PAGE_SHIFT );
1874 *kpage = phys_to_abs(pgaddr);
1875 if ( !(*kpage) ) {
1876 ehca_gen_err("pgaddr=%lx i=%x",
1877 pgaddr, i);
1878 ret = -EFAULT;
1879 return ret;
1880 }
1881 /*
1882 * The first page in a hwpage must be aligned;
1883 * the first MR page is exempt from this rule.
1884 */
1885 if (pgaddr & (pginfo->hwpage_size - 1)) {
1886 if (pginfo->hwpage_cnt) {
1887 ehca_gen_err(
1888 "invalid alignment "
1889 "pgaddr=%lx i=%x "
1890 "mr_pgsize=%lx",
1891 pgaddr, i,
1892 pginfo->hwpage_size);
1893 ret = -EFAULT;
1894 return ret;
1895 }
1896 /* first MR page */
1897 pginfo->kpage_cnt =
1898 (pgaddr &
1899 (pginfo->hwpage_size - 1)) >>
1900 PAGE_SHIFT;
1901 nr_kpages -= pginfo->kpage_cnt;
1902 *kpage = phys_to_abs(
1903 pgaddr &
1904 ~(pginfo->hwpage_size - 1));
1905 }
1906 ehca_gen_dbg("kpage=%lx chunk_page=%lx "
1907 "value=%016lx", *kpage, pgaddr,
1908 *(u64 *)abs_to_virt(
1909 phys_to_abs(pgaddr)));
1910 prev_pgaddr = pgaddr;
1911 i++;
1912 pginfo->kpage_cnt++;
1913 pginfo->u.usr.next_nmap++;
1914 nr_kpages--;
1915 if (!nr_kpages)
1916 goto next_kpage;
1917 continue;
1918 }
1919 if (i + nr_kpages > chunk->nmap) {
1920 ret = ehca_check_kpages_per_ate(
1921 chunk->page_list, i,
1922 chunk->nmap - 1, &prev_pgaddr);
1923 if (ret) return ret;
1924 pginfo->kpage_cnt += chunk->nmap - i;
1925 pginfo->u.usr.next_nmap += chunk->nmap - i;
1926 nr_kpages -= chunk->nmap - i;
1927 break;
1928 }
1929
1930 ret = ehca_check_kpages_per_ate(chunk->page_list, i,
1931 i + nr_kpages - 1,
1932 &prev_pgaddr);
1933 if (ret) return ret;
1934 i += nr_kpages;
1935 pginfo->kpage_cnt += nr_kpages;
1936 pginfo->u.usr.next_nmap += nr_kpages;
1937 next_kpage:
1938 nr_kpages = kpages_per_hwpage;
1939 (pginfo->hwpage_cnt)++;
1940 kpage++;
1941 j++;
1942 if (j >= number) break;
1943 }
1944 if ((pginfo->u.usr.next_nmap >= chunk->nmap) &&
1945 (j >= number)) {
1946 pginfo->u.usr.next_nmap = 0;
1947 prev_chunk = chunk;
1948 break;
1949 } else if (pginfo->u.usr.next_nmap >= chunk->nmap) {
1950 pginfo->u.usr.next_nmap = 0;
1951 prev_chunk = chunk;
1952 } else if (j >= number)
1953 break;
1954 else
1955 prev_chunk = chunk;
1956 }
1957 pginfo->u.usr.next_chunk =
1958 list_prepare_entry(prev_chunk,
1959 (&(pginfo->u.usr.region->chunk_list)),
1960 list);
1961 return ret;
1962 }
1963
1964 int ehca_set_pagebuf_phys(struct ehca_mr_pginfo *pginfo,
1965 u32 number,
1966 u64 *kpage)
1967 {
1968 int ret = 0;
1969 struct ib_phys_buf *pbuf;
1970 u64 num_hw, offs_hw;
1971 u32 i = 0;
1972
1973 /* loop over desired phys_buf_array entries */
1974 while (i < number) {
1975 pbuf = pginfo->u.phy.phys_buf_array + pginfo->u.phy.next_buf;
1976 num_hw = NUM_CHUNKS((pbuf->addr % pginfo->hwpage_size) +
1977 pbuf->size, pginfo->hwpage_size);
1978 offs_hw = (pbuf->addr & ~(pginfo->hwpage_size - 1)) /
1979 pginfo->hwpage_size;
1980 while (pginfo->next_hwpage < offs_hw + num_hw) {
1981 /* sanity check */
1982 if ((pginfo->kpage_cnt >= pginfo->num_kpages) ||
1983 (pginfo->hwpage_cnt >= pginfo->num_hwpages)) {
1984 ehca_gen_err("kpage_cnt >= num_kpages, "
1985 "kpage_cnt=%lx num_kpages=%lx "
1986 "hwpage_cnt=%lx "
1987 "num_hwpages=%lx i=%x",
1988 pginfo->kpage_cnt,
1989 pginfo->num_kpages,
1990 pginfo->hwpage_cnt,
1991 pginfo->num_hwpages, i);
1992 return -EFAULT;
1993 }
1994 *kpage = phys_to_abs(
1995 (pbuf->addr & ~(pginfo->hwpage_size - 1)) +
1996 (pginfo->next_hwpage * pginfo->hwpage_size));
1997 if ( !(*kpage) && pbuf->addr ) {
1998 ehca_gen_err("pbuf->addr=%lx pbuf->size=%lx "
1999 "next_hwpage=%lx", pbuf->addr,
2000 pbuf->size, pginfo->next_hwpage);
2001 return -EFAULT;
2002 }
2003 (pginfo->hwpage_cnt)++;
2004 (pginfo->next_hwpage)++;
2005 if (PAGE_SIZE >= pginfo->hwpage_size) {
2006 if (pginfo->next_hwpage %
2007 (PAGE_SIZE / pginfo->hwpage_size) == 0)
2008 (pginfo->kpage_cnt)++;
2009 } else
2010 pginfo->kpage_cnt += pginfo->hwpage_size /
2011 PAGE_SIZE;
2012 kpage++;
2013 i++;
2014 if (i >= number) break;
2015 }
2016 if (pginfo->next_hwpage >= offs_hw + num_hw) {
2017 (pginfo->u.phy.next_buf)++;
2018 pginfo->next_hwpage = 0;
2019 }
2020 }
2021 return ret;
2022 }
2023
2024 int ehca_set_pagebuf_fmr(struct ehca_mr_pginfo *pginfo,
2025 u32 number,
2026 u64 *kpage)
2027 {
2028 int ret = 0;
2029 u64 *fmrlist;
2030 u32 i;
2031
2032 /* loop over desired page_list entries */
2033 fmrlist = pginfo->u.fmr.page_list + pginfo->u.fmr.next_listelem;
2034 for (i = 0; i < number; i++) {
2035 *kpage = phys_to_abs((*fmrlist & ~(pginfo->hwpage_size - 1)) +
2036 pginfo->next_hwpage * pginfo->hwpage_size);
2037 if ( !(*kpage) ) {
2038 ehca_gen_err("*fmrlist=%lx fmrlist=%p "
2039 "next_listelem=%lx next_hwpage=%lx",
2040 *fmrlist, fmrlist,
2041 pginfo->u.fmr.next_listelem,
2042 pginfo->next_hwpage);
2043 return -EFAULT;
2044 }
2045 (pginfo->hwpage_cnt)++;
2046 if (pginfo->u.fmr.fmr_pgsize >= pginfo->hwpage_size) {
2047 if (pginfo->next_hwpage %
2048 (pginfo->u.fmr.fmr_pgsize /
2049 pginfo->hwpage_size) == 0) {
2050 (pginfo->kpage_cnt)++;
2051 (pginfo->u.fmr.next_listelem)++;
2052 fmrlist++;
2053 pginfo->next_hwpage = 0;
2054 } else
2055 (pginfo->next_hwpage)++;
2056 } else {
2057 unsigned int cnt_per_hwpage = pginfo->hwpage_size /
2058 pginfo->u.fmr.fmr_pgsize;
2059 unsigned int j;
2060 u64 prev = *kpage;
2061 /* check if adrs are contiguous */
2062 for (j = 1; j < cnt_per_hwpage; j++) {
2063 u64 p = phys_to_abs(fmrlist[j] &
2064 ~(pginfo->hwpage_size - 1));
2065 if (prev + pginfo->u.fmr.fmr_pgsize != p) {
2066 ehca_gen_err("uncontiguous fmr pages "
2067 "found prev=%lx p=%lx "
2068 "idx=%x", prev, p, i + j);
2069 return -EINVAL;
2070 }
2071 prev = p;
2072 }
2073 pginfo->kpage_cnt += cnt_per_hwpage;
2074 pginfo->u.fmr.next_listelem += cnt_per_hwpage;
2075 fmrlist += cnt_per_hwpage;
2076 }
2077 kpage++;
2078 }
2079 return ret;
2080 }
2081
2082 /* setup page buffer from page info */
2083 int ehca_set_pagebuf(struct ehca_mr_pginfo *pginfo,
2084 u32 number,
2085 u64 *kpage)
2086 {
2087 int ret;
2088
2089 switch (pginfo->type) {
2090 case EHCA_MR_PGI_PHYS:
2091 ret = ehca_set_pagebuf_phys(pginfo, number, kpage);
2092 break;
2093 case EHCA_MR_PGI_USER:
2094 ret = PAGE_SIZE >= pginfo->hwpage_size ?
2095 ehca_set_pagebuf_user1(pginfo, number, kpage) :
2096 ehca_set_pagebuf_user2(pginfo, number, kpage);
2097 break;
2098 case EHCA_MR_PGI_FMR:
2099 ret = ehca_set_pagebuf_fmr(pginfo, number, kpage);
2100 break;
2101 default:
2102 ehca_gen_err("bad pginfo->type=%x", pginfo->type);
2103 ret = -EFAULT;
2104 break;
2105 }
2106 return ret;
2107 } /* end ehca_set_pagebuf() */
2108
2109 /*----------------------------------------------------------------------*/
2110
2111 /*
2112 * check MR if it is a max-MR, i.e. uses whole memory
2113 * in case it's a max-MR 1 is returned, else 0
2114 */
2115 int ehca_mr_is_maxmr(u64 size,
2116 u64 *iova_start)
2117 {
2118 /* a MR is treated as max-MR only if it fits following: */
2119 if ((size == ((u64)high_memory - PAGE_OFFSET)) &&
2120 (iova_start == (void *)KERNELBASE)) {
2121 ehca_gen_dbg("this is a max-MR");
2122 return 1;
2123 } else
2124 return 0;
2125 } /* end ehca_mr_is_maxmr() */
2126
2127 /*----------------------------------------------------------------------*/
2128
2129 /* map access control for MR/MW. This routine is used for MR and MW. */
2130 void ehca_mrmw_map_acl(int ib_acl,
2131 u32 *hipz_acl)
2132 {
2133 *hipz_acl = 0;
2134 if (ib_acl & IB_ACCESS_REMOTE_READ)
2135 *hipz_acl |= HIPZ_ACCESSCTRL_R_READ;
2136 if (ib_acl & IB_ACCESS_REMOTE_WRITE)
2137 *hipz_acl |= HIPZ_ACCESSCTRL_R_WRITE;
2138 if (ib_acl & IB_ACCESS_REMOTE_ATOMIC)
2139 *hipz_acl |= HIPZ_ACCESSCTRL_R_ATOMIC;
2140 if (ib_acl & IB_ACCESS_LOCAL_WRITE)
2141 *hipz_acl |= HIPZ_ACCESSCTRL_L_WRITE;
2142 if (ib_acl & IB_ACCESS_MW_BIND)
2143 *hipz_acl |= HIPZ_ACCESSCTRL_MW_BIND;
2144 } /* end ehca_mrmw_map_acl() */
2145
2146 /*----------------------------------------------------------------------*/
2147
2148 /* sets page size in hipz access control for MR/MW. */
2149 void ehca_mrmw_set_pgsize_hipz_acl(u32 pgsize, u32 *hipz_acl) /*INOUT*/
2150 {
2151 *hipz_acl |= (ehca_encode_hwpage_size(pgsize) << 24);
2152 } /* end ehca_mrmw_set_pgsize_hipz_acl() */
2153
2154 /*----------------------------------------------------------------------*/
2155
2156 /*
2157 * reverse map access control for MR/MW.
2158 * This routine is used for MR and MW.
2159 */
2160 void ehca_mrmw_reverse_map_acl(const u32 *hipz_acl,
2161 int *ib_acl) /*OUT*/
2162 {
2163 *ib_acl = 0;
2164 if (*hipz_acl & HIPZ_ACCESSCTRL_R_READ)
2165 *ib_acl |= IB_ACCESS_REMOTE_READ;
2166 if (*hipz_acl & HIPZ_ACCESSCTRL_R_WRITE)
2167 *ib_acl |= IB_ACCESS_REMOTE_WRITE;
2168 if (*hipz_acl & HIPZ_ACCESSCTRL_R_ATOMIC)
2169 *ib_acl |= IB_ACCESS_REMOTE_ATOMIC;
2170 if (*hipz_acl & HIPZ_ACCESSCTRL_L_WRITE)
2171 *ib_acl |= IB_ACCESS_LOCAL_WRITE;
2172 if (*hipz_acl & HIPZ_ACCESSCTRL_MW_BIND)
2173 *ib_acl |= IB_ACCESS_MW_BIND;
2174 } /* end ehca_mrmw_reverse_map_acl() */
2175
2176
2177 /*----------------------------------------------------------------------*/
2178
2179 /*
2180 * MR destructor and constructor
2181 * used in Reregister MR verb, sets all fields in ehca_mr_t to 0,
2182 * except struct ib_mr and spinlock
2183 */
2184 void ehca_mr_deletenew(struct ehca_mr *mr)
2185 {
2186 mr->flags = 0;
2187 mr->num_kpages = 0;
2188 mr->num_hwpages = 0;
2189 mr->acl = 0;
2190 mr->start = NULL;
2191 mr->fmr_page_size = 0;
2192 mr->fmr_max_pages = 0;
2193 mr->fmr_max_maps = 0;
2194 mr->fmr_map_cnt = 0;
2195 memset(&mr->ipz_mr_handle, 0, sizeof(mr->ipz_mr_handle));
2196 memset(&mr->galpas, 0, sizeof(mr->galpas));
2197 } /* end ehca_mr_deletenew() */
2198
2199 int ehca_init_mrmw_cache(void)
2200 {
2201 mr_cache = kmem_cache_create("ehca_cache_mr",
2202 sizeof(struct ehca_mr), 0,
2203 SLAB_HWCACHE_ALIGN,
2204 NULL);
2205 if (!mr_cache)
2206 return -ENOMEM;
2207 mw_cache = kmem_cache_create("ehca_cache_mw",
2208 sizeof(struct ehca_mw), 0,
2209 SLAB_HWCACHE_ALIGN,
2210 NULL);
2211 if (!mw_cache) {
2212 kmem_cache_destroy(mr_cache);
2213 mr_cache = NULL;
2214 return -ENOMEM;
2215 }
2216 return 0;
2217 }
2218
2219 void ehca_cleanup_mrmw_cache(void)
2220 {
2221 if (mr_cache)
2222 kmem_cache_destroy(mr_cache);
2223 if (mw_cache)
2224 kmem_cache_destroy(mw_cache);
2225 }