IB/ehca: Restructure ehca_set_pagebuf()
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / infiniband / hw / ehca / ehca_mrmw.c
1 /*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * MR/MW functions
5 *
6 * Authors: Dietmar Decker <ddecker@de.ibm.com>
7 * Christoph Raisch <raisch@de.ibm.com>
8 *
9 * Copyright (c) 2005 IBM Corporation
10 *
11 * All rights reserved.
12 *
13 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
14 * BSD.
15 *
16 * OpenIB BSD License
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are met:
20 *
21 * Redistributions of source code must retain the above copyright notice, this
22 * list of conditions and the following disclaimer.
23 *
24 * Redistributions in binary form must reproduce the above copyright notice,
25 * this list of conditions and the following disclaimer in the documentation
26 * and/or other materials
27 * provided with the distribution.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
30 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
33 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
34 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
36 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
37 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
38 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
39 * POSSIBILITY OF SUCH DAMAGE.
40 */
41
42 #include <rdma/ib_umem.h>
43
44 #include <asm/current.h>
45
46 #include "ehca_iverbs.h"
47 #include "ehca_mrmw.h"
48 #include "hcp_if.h"
49 #include "hipz_hw.h"
50
51 #define NUM_CHUNKS(length, chunk_size) \
52 (((length) + (chunk_size - 1)) / (chunk_size))
53 /* max number of rpages (per hcall register_rpages) */
54 #define MAX_RPAGES 512
55
56 static struct kmem_cache *mr_cache;
57 static struct kmem_cache *mw_cache;
58
59 static struct ehca_mr *ehca_mr_new(void)
60 {
61 struct ehca_mr *me;
62
63 me = kmem_cache_zalloc(mr_cache, GFP_KERNEL);
64 if (me) {
65 spin_lock_init(&me->mrlock);
66 } else
67 ehca_gen_err("alloc failed");
68
69 return me;
70 }
71
72 static void ehca_mr_delete(struct ehca_mr *me)
73 {
74 kmem_cache_free(mr_cache, me);
75 }
76
77 static struct ehca_mw *ehca_mw_new(void)
78 {
79 struct ehca_mw *me;
80
81 me = kmem_cache_zalloc(mw_cache, GFP_KERNEL);
82 if (me) {
83 spin_lock_init(&me->mwlock);
84 } else
85 ehca_gen_err("alloc failed");
86
87 return me;
88 }
89
90 static void ehca_mw_delete(struct ehca_mw *me)
91 {
92 kmem_cache_free(mw_cache, me);
93 }
94
95 /*----------------------------------------------------------------------*/
96
97 struct ib_mr *ehca_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
98 {
99 struct ib_mr *ib_mr;
100 int ret;
101 struct ehca_mr *e_maxmr;
102 struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
103 struct ehca_shca *shca =
104 container_of(pd->device, struct ehca_shca, ib_device);
105
106 if (shca->maxmr) {
107 e_maxmr = ehca_mr_new();
108 if (!e_maxmr) {
109 ehca_err(&shca->ib_device, "out of memory");
110 ib_mr = ERR_PTR(-ENOMEM);
111 goto get_dma_mr_exit0;
112 }
113
114 ret = ehca_reg_maxmr(shca, e_maxmr, (u64*)KERNELBASE,
115 mr_access_flags, e_pd,
116 &e_maxmr->ib.ib_mr.lkey,
117 &e_maxmr->ib.ib_mr.rkey);
118 if (ret) {
119 ehca_mr_delete(e_maxmr);
120 ib_mr = ERR_PTR(ret);
121 goto get_dma_mr_exit0;
122 }
123 ib_mr = &e_maxmr->ib.ib_mr;
124 } else {
125 ehca_err(&shca->ib_device, "no internal max-MR exist!");
126 ib_mr = ERR_PTR(-EINVAL);
127 goto get_dma_mr_exit0;
128 }
129
130 get_dma_mr_exit0:
131 if (IS_ERR(ib_mr))
132 ehca_err(&shca->ib_device, "rc=%lx pd=%p mr_access_flags=%x ",
133 PTR_ERR(ib_mr), pd, mr_access_flags);
134 return ib_mr;
135 } /* end ehca_get_dma_mr() */
136
137 /*----------------------------------------------------------------------*/
138
139 struct ib_mr *ehca_reg_phys_mr(struct ib_pd *pd,
140 struct ib_phys_buf *phys_buf_array,
141 int num_phys_buf,
142 int mr_access_flags,
143 u64 *iova_start)
144 {
145 struct ib_mr *ib_mr;
146 int ret;
147 struct ehca_mr *e_mr;
148 struct ehca_shca *shca =
149 container_of(pd->device, struct ehca_shca, ib_device);
150 struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
151
152 u64 size;
153
154 if ((num_phys_buf <= 0) || !phys_buf_array) {
155 ehca_err(pd->device, "bad input values: num_phys_buf=%x "
156 "phys_buf_array=%p", num_phys_buf, phys_buf_array);
157 ib_mr = ERR_PTR(-EINVAL);
158 goto reg_phys_mr_exit0;
159 }
160 if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
161 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
162 ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
163 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
164 /*
165 * Remote Write Access requires Local Write Access
166 * Remote Atomic Access requires Local Write Access
167 */
168 ehca_err(pd->device, "bad input values: mr_access_flags=%x",
169 mr_access_flags);
170 ib_mr = ERR_PTR(-EINVAL);
171 goto reg_phys_mr_exit0;
172 }
173
174 /* check physical buffer list and calculate size */
175 ret = ehca_mr_chk_buf_and_calc_size(phys_buf_array, num_phys_buf,
176 iova_start, &size);
177 if (ret) {
178 ib_mr = ERR_PTR(ret);
179 goto reg_phys_mr_exit0;
180 }
181 if ((size == 0) ||
182 (((u64)iova_start + size) < (u64)iova_start)) {
183 ehca_err(pd->device, "bad input values: size=%lx iova_start=%p",
184 size, iova_start);
185 ib_mr = ERR_PTR(-EINVAL);
186 goto reg_phys_mr_exit0;
187 }
188
189 e_mr = ehca_mr_new();
190 if (!e_mr) {
191 ehca_err(pd->device, "out of memory");
192 ib_mr = ERR_PTR(-ENOMEM);
193 goto reg_phys_mr_exit0;
194 }
195
196 /* register MR on HCA */
197 if (ehca_mr_is_maxmr(size, iova_start)) {
198 e_mr->flags |= EHCA_MR_FLAG_MAXMR;
199 ret = ehca_reg_maxmr(shca, e_mr, iova_start, mr_access_flags,
200 e_pd, &e_mr->ib.ib_mr.lkey,
201 &e_mr->ib.ib_mr.rkey);
202 if (ret) {
203 ib_mr = ERR_PTR(ret);
204 goto reg_phys_mr_exit1;
205 }
206 } else {
207 struct ehca_mr_pginfo pginfo;
208 u32 num_kpages;
209 u32 num_hwpages;
210
211 num_kpages = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size,
212 PAGE_SIZE);
213 num_hwpages = NUM_CHUNKS(((u64)iova_start % EHCA_PAGESIZE) +
214 size, EHCA_PAGESIZE);
215 memset(&pginfo, 0, sizeof(pginfo));
216 pginfo.type = EHCA_MR_PGI_PHYS;
217 pginfo.num_kpages = num_kpages;
218 pginfo.num_hwpages = num_hwpages;
219 pginfo.u.phy.num_phys_buf = num_phys_buf;
220 pginfo.u.phy.phys_buf_array = phys_buf_array;
221 pginfo.next_hwpage = (((u64)iova_start & ~PAGE_MASK) /
222 EHCA_PAGESIZE);
223
224 ret = ehca_reg_mr(shca, e_mr, iova_start, size, mr_access_flags,
225 e_pd, &pginfo, &e_mr->ib.ib_mr.lkey,
226 &e_mr->ib.ib_mr.rkey);
227 if (ret) {
228 ib_mr = ERR_PTR(ret);
229 goto reg_phys_mr_exit1;
230 }
231 }
232
233 /* successful registration of all pages */
234 return &e_mr->ib.ib_mr;
235
236 reg_phys_mr_exit1:
237 ehca_mr_delete(e_mr);
238 reg_phys_mr_exit0:
239 if (IS_ERR(ib_mr))
240 ehca_err(pd->device, "rc=%lx pd=%p phys_buf_array=%p "
241 "num_phys_buf=%x mr_access_flags=%x iova_start=%p",
242 PTR_ERR(ib_mr), pd, phys_buf_array,
243 num_phys_buf, mr_access_flags, iova_start);
244 return ib_mr;
245 } /* end ehca_reg_phys_mr() */
246
247 /*----------------------------------------------------------------------*/
248
249 struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt,
250 int mr_access_flags, struct ib_udata *udata)
251 {
252 struct ib_mr *ib_mr;
253 struct ehca_mr *e_mr;
254 struct ehca_shca *shca =
255 container_of(pd->device, struct ehca_shca, ib_device);
256 struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
257 struct ehca_mr_pginfo pginfo;
258 int ret;
259 u32 num_kpages;
260 u32 num_hwpages;
261
262 if (!pd) {
263 ehca_gen_err("bad pd=%p", pd);
264 return ERR_PTR(-EFAULT);
265 }
266
267 if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
268 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
269 ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
270 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
271 /*
272 * Remote Write Access requires Local Write Access
273 * Remote Atomic Access requires Local Write Access
274 */
275 ehca_err(pd->device, "bad input values: mr_access_flags=%x",
276 mr_access_flags);
277 ib_mr = ERR_PTR(-EINVAL);
278 goto reg_user_mr_exit0;
279 }
280
281 if (length == 0 || virt + length < virt) {
282 ehca_err(pd->device, "bad input values: length=%lx "
283 "virt_base=%lx", length, virt);
284 ib_mr = ERR_PTR(-EINVAL);
285 goto reg_user_mr_exit0;
286 }
287
288 e_mr = ehca_mr_new();
289 if (!e_mr) {
290 ehca_err(pd->device, "out of memory");
291 ib_mr = ERR_PTR(-ENOMEM);
292 goto reg_user_mr_exit0;
293 }
294
295 e_mr->umem = ib_umem_get(pd->uobject->context, start, length,
296 mr_access_flags);
297 if (IS_ERR(e_mr->umem)) {
298 ib_mr = (void *) e_mr->umem;
299 goto reg_user_mr_exit1;
300 }
301
302 if (e_mr->umem->page_size != PAGE_SIZE) {
303 ehca_err(pd->device, "page size not supported, "
304 "e_mr->umem->page_size=%x", e_mr->umem->page_size);
305 ib_mr = ERR_PTR(-EINVAL);
306 goto reg_user_mr_exit2;
307 }
308
309 /* determine number of MR pages */
310 num_kpages = NUM_CHUNKS((virt % PAGE_SIZE) + length, PAGE_SIZE);
311 num_hwpages = NUM_CHUNKS((virt % EHCA_PAGESIZE) + length,
312 EHCA_PAGESIZE);
313
314 /* register MR on HCA */
315 memset(&pginfo, 0, sizeof(pginfo));
316 pginfo.type = EHCA_MR_PGI_USER;
317 pginfo.num_kpages = num_kpages;
318 pginfo.num_hwpages = num_hwpages;
319 pginfo.u.usr.region = e_mr->umem;
320 pginfo.next_hwpage = e_mr->umem->offset / EHCA_PAGESIZE;
321 pginfo.u.usr.next_chunk = list_prepare_entry(pginfo.u.usr.next_chunk,
322 (&e_mr->umem->chunk_list),
323 list);
324
325 ret = ehca_reg_mr(shca, e_mr, (u64*) virt, length, mr_access_flags, e_pd,
326 &pginfo, &e_mr->ib.ib_mr.lkey, &e_mr->ib.ib_mr.rkey);
327 if (ret) {
328 ib_mr = ERR_PTR(ret);
329 goto reg_user_mr_exit2;
330 }
331
332 /* successful registration of all pages */
333 return &e_mr->ib.ib_mr;
334
335 reg_user_mr_exit2:
336 ib_umem_release(e_mr->umem);
337 reg_user_mr_exit1:
338 ehca_mr_delete(e_mr);
339 reg_user_mr_exit0:
340 if (IS_ERR(ib_mr))
341 ehca_err(pd->device, "rc=%lx pd=%p mr_access_flags=%x"
342 " udata=%p",
343 PTR_ERR(ib_mr), pd, mr_access_flags, udata);
344 return ib_mr;
345 } /* end ehca_reg_user_mr() */
346
347 /*----------------------------------------------------------------------*/
348
349 int ehca_rereg_phys_mr(struct ib_mr *mr,
350 int mr_rereg_mask,
351 struct ib_pd *pd,
352 struct ib_phys_buf *phys_buf_array,
353 int num_phys_buf,
354 int mr_access_flags,
355 u64 *iova_start)
356 {
357 int ret;
358
359 struct ehca_shca *shca =
360 container_of(mr->device, struct ehca_shca, ib_device);
361 struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
362 struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
363 u64 new_size;
364 u64 *new_start;
365 u32 new_acl;
366 struct ehca_pd *new_pd;
367 u32 tmp_lkey, tmp_rkey;
368 unsigned long sl_flags;
369 u32 num_kpages = 0;
370 u32 num_hwpages = 0;
371 struct ehca_mr_pginfo pginfo;
372 u32 cur_pid = current->tgid;
373
374 if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
375 (my_pd->ownpid != cur_pid)) {
376 ehca_err(mr->device, "Invalid caller pid=%x ownpid=%x",
377 cur_pid, my_pd->ownpid);
378 ret = -EINVAL;
379 goto rereg_phys_mr_exit0;
380 }
381
382 if (!(mr_rereg_mask & IB_MR_REREG_TRANS)) {
383 /* TODO not supported, because PHYP rereg hCall needs pages */
384 ehca_err(mr->device, "rereg without IB_MR_REREG_TRANS not "
385 "supported yet, mr_rereg_mask=%x", mr_rereg_mask);
386 ret = -EINVAL;
387 goto rereg_phys_mr_exit0;
388 }
389
390 if (mr_rereg_mask & IB_MR_REREG_PD) {
391 if (!pd) {
392 ehca_err(mr->device, "rereg with bad pd, pd=%p "
393 "mr_rereg_mask=%x", pd, mr_rereg_mask);
394 ret = -EINVAL;
395 goto rereg_phys_mr_exit0;
396 }
397 }
398
399 if ((mr_rereg_mask &
400 ~(IB_MR_REREG_TRANS | IB_MR_REREG_PD | IB_MR_REREG_ACCESS)) ||
401 (mr_rereg_mask == 0)) {
402 ret = -EINVAL;
403 goto rereg_phys_mr_exit0;
404 }
405
406 /* check other parameters */
407 if (e_mr == shca->maxmr) {
408 /* should be impossible, however reject to be sure */
409 ehca_err(mr->device, "rereg internal max-MR impossible, mr=%p "
410 "shca->maxmr=%p mr->lkey=%x",
411 mr, shca->maxmr, mr->lkey);
412 ret = -EINVAL;
413 goto rereg_phys_mr_exit0;
414 }
415 if (mr_rereg_mask & IB_MR_REREG_TRANS) { /* transl., i.e. addr/size */
416 if (e_mr->flags & EHCA_MR_FLAG_FMR) {
417 ehca_err(mr->device, "not supported for FMR, mr=%p "
418 "flags=%x", mr, e_mr->flags);
419 ret = -EINVAL;
420 goto rereg_phys_mr_exit0;
421 }
422 if (!phys_buf_array || num_phys_buf <= 0) {
423 ehca_err(mr->device, "bad input values: mr_rereg_mask=%x"
424 " phys_buf_array=%p num_phys_buf=%x",
425 mr_rereg_mask, phys_buf_array, num_phys_buf);
426 ret = -EINVAL;
427 goto rereg_phys_mr_exit0;
428 }
429 }
430 if ((mr_rereg_mask & IB_MR_REREG_ACCESS) && /* change ACL */
431 (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
432 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
433 ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
434 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)))) {
435 /*
436 * Remote Write Access requires Local Write Access
437 * Remote Atomic Access requires Local Write Access
438 */
439 ehca_err(mr->device, "bad input values: mr_rereg_mask=%x "
440 "mr_access_flags=%x", mr_rereg_mask, mr_access_flags);
441 ret = -EINVAL;
442 goto rereg_phys_mr_exit0;
443 }
444
445 /* set requested values dependent on rereg request */
446 spin_lock_irqsave(&e_mr->mrlock, sl_flags);
447 new_start = e_mr->start; /* new == old address */
448 new_size = e_mr->size; /* new == old length */
449 new_acl = e_mr->acl; /* new == old access control */
450 new_pd = container_of(mr->pd,struct ehca_pd,ib_pd); /*new == old PD*/
451
452 if (mr_rereg_mask & IB_MR_REREG_TRANS) {
453 new_start = iova_start; /* change address */
454 /* check physical buffer list and calculate size */
455 ret = ehca_mr_chk_buf_and_calc_size(phys_buf_array,
456 num_phys_buf, iova_start,
457 &new_size);
458 if (ret)
459 goto rereg_phys_mr_exit1;
460 if ((new_size == 0) ||
461 (((u64)iova_start + new_size) < (u64)iova_start)) {
462 ehca_err(mr->device, "bad input values: new_size=%lx "
463 "iova_start=%p", new_size, iova_start);
464 ret = -EINVAL;
465 goto rereg_phys_mr_exit1;
466 }
467 num_kpages = NUM_CHUNKS(((u64)new_start % PAGE_SIZE) +
468 new_size, PAGE_SIZE);
469 num_hwpages = NUM_CHUNKS(((u64)new_start % EHCA_PAGESIZE) +
470 new_size, EHCA_PAGESIZE);
471 memset(&pginfo, 0, sizeof(pginfo));
472 pginfo.type = EHCA_MR_PGI_PHYS;
473 pginfo.num_kpages = num_kpages;
474 pginfo.num_hwpages = num_hwpages;
475 pginfo.u.phy.num_phys_buf = num_phys_buf;
476 pginfo.u.phy.phys_buf_array = phys_buf_array;
477 pginfo.next_hwpage = (((u64)iova_start & ~PAGE_MASK) /
478 EHCA_PAGESIZE);
479 }
480 if (mr_rereg_mask & IB_MR_REREG_ACCESS)
481 new_acl = mr_access_flags;
482 if (mr_rereg_mask & IB_MR_REREG_PD)
483 new_pd = container_of(pd, struct ehca_pd, ib_pd);
484
485 ret = ehca_rereg_mr(shca, e_mr, new_start, new_size, new_acl,
486 new_pd, &pginfo, &tmp_lkey, &tmp_rkey);
487 if (ret)
488 goto rereg_phys_mr_exit1;
489
490 /* successful reregistration */
491 if (mr_rereg_mask & IB_MR_REREG_PD)
492 mr->pd = pd;
493 mr->lkey = tmp_lkey;
494 mr->rkey = tmp_rkey;
495
496 rereg_phys_mr_exit1:
497 spin_unlock_irqrestore(&e_mr->mrlock, sl_flags);
498 rereg_phys_mr_exit0:
499 if (ret)
500 ehca_err(mr->device, "ret=%x mr=%p mr_rereg_mask=%x pd=%p "
501 "phys_buf_array=%p num_phys_buf=%x mr_access_flags=%x "
502 "iova_start=%p",
503 ret, mr, mr_rereg_mask, pd, phys_buf_array,
504 num_phys_buf, mr_access_flags, iova_start);
505 return ret;
506 } /* end ehca_rereg_phys_mr() */
507
508 /*----------------------------------------------------------------------*/
509
510 int ehca_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
511 {
512 int ret = 0;
513 u64 h_ret;
514 struct ehca_shca *shca =
515 container_of(mr->device, struct ehca_shca, ib_device);
516 struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
517 struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
518 u32 cur_pid = current->tgid;
519 unsigned long sl_flags;
520 struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
521
522 if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
523 (my_pd->ownpid != cur_pid)) {
524 ehca_err(mr->device, "Invalid caller pid=%x ownpid=%x",
525 cur_pid, my_pd->ownpid);
526 ret = -EINVAL;
527 goto query_mr_exit0;
528 }
529
530 if ((e_mr->flags & EHCA_MR_FLAG_FMR)) {
531 ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p "
532 "e_mr->flags=%x", mr, e_mr, e_mr->flags);
533 ret = -EINVAL;
534 goto query_mr_exit0;
535 }
536
537 memset(mr_attr, 0, sizeof(struct ib_mr_attr));
538 spin_lock_irqsave(&e_mr->mrlock, sl_flags);
539
540 h_ret = hipz_h_query_mr(shca->ipz_hca_handle, e_mr, &hipzout);
541 if (h_ret != H_SUCCESS) {
542 ehca_err(mr->device, "hipz_mr_query failed, h_ret=%lx mr=%p "
543 "hca_hndl=%lx mr_hndl=%lx lkey=%x",
544 h_ret, mr, shca->ipz_hca_handle.handle,
545 e_mr->ipz_mr_handle.handle, mr->lkey);
546 ret = ehca2ib_return_code(h_ret);
547 goto query_mr_exit1;
548 }
549 mr_attr->pd = mr->pd;
550 mr_attr->device_virt_addr = hipzout.vaddr;
551 mr_attr->size = hipzout.len;
552 mr_attr->lkey = hipzout.lkey;
553 mr_attr->rkey = hipzout.rkey;
554 ehca_mrmw_reverse_map_acl(&hipzout.acl, &mr_attr->mr_access_flags);
555
556 query_mr_exit1:
557 spin_unlock_irqrestore(&e_mr->mrlock, sl_flags);
558 query_mr_exit0:
559 if (ret)
560 ehca_err(mr->device, "ret=%x mr=%p mr_attr=%p",
561 ret, mr, mr_attr);
562 return ret;
563 } /* end ehca_query_mr() */
564
565 /*----------------------------------------------------------------------*/
566
567 int ehca_dereg_mr(struct ib_mr *mr)
568 {
569 int ret = 0;
570 u64 h_ret;
571 struct ehca_shca *shca =
572 container_of(mr->device, struct ehca_shca, ib_device);
573 struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
574 struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
575 u32 cur_pid = current->tgid;
576
577 if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
578 (my_pd->ownpid != cur_pid)) {
579 ehca_err(mr->device, "Invalid caller pid=%x ownpid=%x",
580 cur_pid, my_pd->ownpid);
581 ret = -EINVAL;
582 goto dereg_mr_exit0;
583 }
584
585 if ((e_mr->flags & EHCA_MR_FLAG_FMR)) {
586 ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p "
587 "e_mr->flags=%x", mr, e_mr, e_mr->flags);
588 ret = -EINVAL;
589 goto dereg_mr_exit0;
590 } else if (e_mr == shca->maxmr) {
591 /* should be impossible, however reject to be sure */
592 ehca_err(mr->device, "dereg internal max-MR impossible, mr=%p "
593 "shca->maxmr=%p mr->lkey=%x",
594 mr, shca->maxmr, mr->lkey);
595 ret = -EINVAL;
596 goto dereg_mr_exit0;
597 }
598
599 /* TODO: BUSY: MR still has bound window(s) */
600 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
601 if (h_ret != H_SUCCESS) {
602 ehca_err(mr->device, "hipz_free_mr failed, h_ret=%lx shca=%p "
603 "e_mr=%p hca_hndl=%lx mr_hndl=%lx mr->lkey=%x",
604 h_ret, shca, e_mr, shca->ipz_hca_handle.handle,
605 e_mr->ipz_mr_handle.handle, mr->lkey);
606 ret = ehca2ib_return_code(h_ret);
607 goto dereg_mr_exit0;
608 }
609
610 if (e_mr->umem)
611 ib_umem_release(e_mr->umem);
612
613 /* successful deregistration */
614 ehca_mr_delete(e_mr);
615
616 dereg_mr_exit0:
617 if (ret)
618 ehca_err(mr->device, "ret=%x mr=%p", ret, mr);
619 return ret;
620 } /* end ehca_dereg_mr() */
621
622 /*----------------------------------------------------------------------*/
623
624 struct ib_mw *ehca_alloc_mw(struct ib_pd *pd)
625 {
626 struct ib_mw *ib_mw;
627 u64 h_ret;
628 struct ehca_mw *e_mw;
629 struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
630 struct ehca_shca *shca =
631 container_of(pd->device, struct ehca_shca, ib_device);
632 struct ehca_mw_hipzout_parms hipzout = {{0},0};
633
634 e_mw = ehca_mw_new();
635 if (!e_mw) {
636 ib_mw = ERR_PTR(-ENOMEM);
637 goto alloc_mw_exit0;
638 }
639
640 h_ret = hipz_h_alloc_resource_mw(shca->ipz_hca_handle, e_mw,
641 e_pd->fw_pd, &hipzout);
642 if (h_ret != H_SUCCESS) {
643 ehca_err(pd->device, "hipz_mw_allocate failed, h_ret=%lx "
644 "shca=%p hca_hndl=%lx mw=%p",
645 h_ret, shca, shca->ipz_hca_handle.handle, e_mw);
646 ib_mw = ERR_PTR(ehca2ib_return_code(h_ret));
647 goto alloc_mw_exit1;
648 }
649 /* successful MW allocation */
650 e_mw->ipz_mw_handle = hipzout.handle;
651 e_mw->ib_mw.rkey = hipzout.rkey;
652 return &e_mw->ib_mw;
653
654 alloc_mw_exit1:
655 ehca_mw_delete(e_mw);
656 alloc_mw_exit0:
657 if (IS_ERR(ib_mw))
658 ehca_err(pd->device, "rc=%lx pd=%p", PTR_ERR(ib_mw), pd);
659 return ib_mw;
660 } /* end ehca_alloc_mw() */
661
662 /*----------------------------------------------------------------------*/
663
664 int ehca_bind_mw(struct ib_qp *qp,
665 struct ib_mw *mw,
666 struct ib_mw_bind *mw_bind)
667 {
668 /* TODO: not supported up to now */
669 ehca_gen_err("bind MW currently not supported by HCAD");
670
671 return -EPERM;
672 } /* end ehca_bind_mw() */
673
674 /*----------------------------------------------------------------------*/
675
676 int ehca_dealloc_mw(struct ib_mw *mw)
677 {
678 u64 h_ret;
679 struct ehca_shca *shca =
680 container_of(mw->device, struct ehca_shca, ib_device);
681 struct ehca_mw *e_mw = container_of(mw, struct ehca_mw, ib_mw);
682
683 h_ret = hipz_h_free_resource_mw(shca->ipz_hca_handle, e_mw);
684 if (h_ret != H_SUCCESS) {
685 ehca_err(mw->device, "hipz_free_mw failed, h_ret=%lx shca=%p "
686 "mw=%p rkey=%x hca_hndl=%lx mw_hndl=%lx",
687 h_ret, shca, mw, mw->rkey, shca->ipz_hca_handle.handle,
688 e_mw->ipz_mw_handle.handle);
689 return ehca2ib_return_code(h_ret);
690 }
691 /* successful deallocation */
692 ehca_mw_delete(e_mw);
693 return 0;
694 } /* end ehca_dealloc_mw() */
695
696 /*----------------------------------------------------------------------*/
697
698 struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd,
699 int mr_access_flags,
700 struct ib_fmr_attr *fmr_attr)
701 {
702 struct ib_fmr *ib_fmr;
703 struct ehca_shca *shca =
704 container_of(pd->device, struct ehca_shca, ib_device);
705 struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
706 struct ehca_mr *e_fmr;
707 int ret;
708 u32 tmp_lkey, tmp_rkey;
709 struct ehca_mr_pginfo pginfo;
710
711 /* check other parameters */
712 if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
713 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
714 ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
715 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
716 /*
717 * Remote Write Access requires Local Write Access
718 * Remote Atomic Access requires Local Write Access
719 */
720 ehca_err(pd->device, "bad input values: mr_access_flags=%x",
721 mr_access_flags);
722 ib_fmr = ERR_PTR(-EINVAL);
723 goto alloc_fmr_exit0;
724 }
725 if (mr_access_flags & IB_ACCESS_MW_BIND) {
726 ehca_err(pd->device, "bad input values: mr_access_flags=%x",
727 mr_access_flags);
728 ib_fmr = ERR_PTR(-EINVAL);
729 goto alloc_fmr_exit0;
730 }
731 if ((fmr_attr->max_pages == 0) || (fmr_attr->max_maps == 0)) {
732 ehca_err(pd->device, "bad input values: fmr_attr->max_pages=%x "
733 "fmr_attr->max_maps=%x fmr_attr->page_shift=%x",
734 fmr_attr->max_pages, fmr_attr->max_maps,
735 fmr_attr->page_shift);
736 ib_fmr = ERR_PTR(-EINVAL);
737 goto alloc_fmr_exit0;
738 }
739 if (((1 << fmr_attr->page_shift) != EHCA_PAGESIZE) &&
740 ((1 << fmr_attr->page_shift) != PAGE_SIZE)) {
741 ehca_err(pd->device, "unsupported fmr_attr->page_shift=%x",
742 fmr_attr->page_shift);
743 ib_fmr = ERR_PTR(-EINVAL);
744 goto alloc_fmr_exit0;
745 }
746
747 e_fmr = ehca_mr_new();
748 if (!e_fmr) {
749 ib_fmr = ERR_PTR(-ENOMEM);
750 goto alloc_fmr_exit0;
751 }
752 e_fmr->flags |= EHCA_MR_FLAG_FMR;
753
754 /* register MR on HCA */
755 memset(&pginfo, 0, sizeof(pginfo));
756 ret = ehca_reg_mr(shca, e_fmr, NULL,
757 fmr_attr->max_pages * (1 << fmr_attr->page_shift),
758 mr_access_flags, e_pd, &pginfo,
759 &tmp_lkey, &tmp_rkey);
760 if (ret) {
761 ib_fmr = ERR_PTR(ret);
762 goto alloc_fmr_exit1;
763 }
764
765 /* successful */
766 e_fmr->fmr_page_size = 1 << fmr_attr->page_shift;
767 e_fmr->fmr_max_pages = fmr_attr->max_pages;
768 e_fmr->fmr_max_maps = fmr_attr->max_maps;
769 e_fmr->fmr_map_cnt = 0;
770 return &e_fmr->ib.ib_fmr;
771
772 alloc_fmr_exit1:
773 ehca_mr_delete(e_fmr);
774 alloc_fmr_exit0:
775 if (IS_ERR(ib_fmr))
776 ehca_err(pd->device, "rc=%lx pd=%p mr_access_flags=%x "
777 "fmr_attr=%p", PTR_ERR(ib_fmr), pd,
778 mr_access_flags, fmr_attr);
779 return ib_fmr;
780 } /* end ehca_alloc_fmr() */
781
782 /*----------------------------------------------------------------------*/
783
784 int ehca_map_phys_fmr(struct ib_fmr *fmr,
785 u64 *page_list,
786 int list_len,
787 u64 iova)
788 {
789 int ret;
790 struct ehca_shca *shca =
791 container_of(fmr->device, struct ehca_shca, ib_device);
792 struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);
793 struct ehca_pd *e_pd = container_of(fmr->pd, struct ehca_pd, ib_pd);
794 struct ehca_mr_pginfo pginfo;
795 u32 tmp_lkey, tmp_rkey;
796
797 if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
798 ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x",
799 e_fmr, e_fmr->flags);
800 ret = -EINVAL;
801 goto map_phys_fmr_exit0;
802 }
803 ret = ehca_fmr_check_page_list(e_fmr, page_list, list_len);
804 if (ret)
805 goto map_phys_fmr_exit0;
806 if (iova % e_fmr->fmr_page_size) {
807 /* only whole-numbered pages */
808 ehca_err(fmr->device, "bad iova, iova=%lx fmr_page_size=%x",
809 iova, e_fmr->fmr_page_size);
810 ret = -EINVAL;
811 goto map_phys_fmr_exit0;
812 }
813 if (e_fmr->fmr_map_cnt >= e_fmr->fmr_max_maps) {
814 /* HCAD does not limit the maps, however trace this anyway */
815 ehca_info(fmr->device, "map limit exceeded, fmr=%p "
816 "e_fmr->fmr_map_cnt=%x e_fmr->fmr_max_maps=%x",
817 fmr, e_fmr->fmr_map_cnt, e_fmr->fmr_max_maps);
818 }
819
820 memset(&pginfo, 0, sizeof(pginfo));
821 pginfo.type = EHCA_MR_PGI_FMR;
822 pginfo.num_kpages = list_len;
823 pginfo.num_hwpages = list_len * (e_fmr->fmr_page_size / EHCA_PAGESIZE);
824 pginfo.u.fmr.page_list = page_list;
825 pginfo.next_hwpage = ((iova & (e_fmr->fmr_page_size-1)) /
826 EHCA_PAGESIZE);
827 pginfo.u.fmr.fmr_pgsize = e_fmr->fmr_page_size;
828
829 ret = ehca_rereg_mr(shca, e_fmr, (u64*)iova,
830 list_len * e_fmr->fmr_page_size,
831 e_fmr->acl, e_pd, &pginfo, &tmp_lkey, &tmp_rkey);
832 if (ret)
833 goto map_phys_fmr_exit0;
834
835 /* successful reregistration */
836 e_fmr->fmr_map_cnt++;
837 e_fmr->ib.ib_fmr.lkey = tmp_lkey;
838 e_fmr->ib.ib_fmr.rkey = tmp_rkey;
839 return 0;
840
841 map_phys_fmr_exit0:
842 if (ret)
843 ehca_err(fmr->device, "ret=%x fmr=%p page_list=%p list_len=%x "
844 "iova=%lx",
845 ret, fmr, page_list, list_len, iova);
846 return ret;
847 } /* end ehca_map_phys_fmr() */
848
849 /*----------------------------------------------------------------------*/
850
851 int ehca_unmap_fmr(struct list_head *fmr_list)
852 {
853 int ret = 0;
854 struct ib_fmr *ib_fmr;
855 struct ehca_shca *shca = NULL;
856 struct ehca_shca *prev_shca;
857 struct ehca_mr *e_fmr;
858 u32 num_fmr = 0;
859 u32 unmap_fmr_cnt = 0;
860
861 /* check all FMR belong to same SHCA, and check internal flag */
862 list_for_each_entry(ib_fmr, fmr_list, list) {
863 prev_shca = shca;
864 if (!ib_fmr) {
865 ehca_gen_err("bad fmr=%p in list", ib_fmr);
866 ret = -EINVAL;
867 goto unmap_fmr_exit0;
868 }
869 shca = container_of(ib_fmr->device, struct ehca_shca,
870 ib_device);
871 e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr);
872 if ((shca != prev_shca) && prev_shca) {
873 ehca_err(&shca->ib_device, "SHCA mismatch, shca=%p "
874 "prev_shca=%p e_fmr=%p",
875 shca, prev_shca, e_fmr);
876 ret = -EINVAL;
877 goto unmap_fmr_exit0;
878 }
879 if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
880 ehca_err(&shca->ib_device, "not a FMR, e_fmr=%p "
881 "e_fmr->flags=%x", e_fmr, e_fmr->flags);
882 ret = -EINVAL;
883 goto unmap_fmr_exit0;
884 }
885 num_fmr++;
886 }
887
888 /* loop over all FMRs to unmap */
889 list_for_each_entry(ib_fmr, fmr_list, list) {
890 unmap_fmr_cnt++;
891 e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr);
892 shca = container_of(ib_fmr->device, struct ehca_shca,
893 ib_device);
894 ret = ehca_unmap_one_fmr(shca, e_fmr);
895 if (ret) {
896 /* unmap failed, stop unmapping of rest of FMRs */
897 ehca_err(&shca->ib_device, "unmap of one FMR failed, "
898 "stop rest, e_fmr=%p num_fmr=%x "
899 "unmap_fmr_cnt=%x lkey=%x", e_fmr, num_fmr,
900 unmap_fmr_cnt, e_fmr->ib.ib_fmr.lkey);
901 goto unmap_fmr_exit0;
902 }
903 }
904
905 unmap_fmr_exit0:
906 if (ret)
907 ehca_gen_err("ret=%x fmr_list=%p num_fmr=%x unmap_fmr_cnt=%x",
908 ret, fmr_list, num_fmr, unmap_fmr_cnt);
909 return ret;
910 } /* end ehca_unmap_fmr() */
911
912 /*----------------------------------------------------------------------*/
913
914 int ehca_dealloc_fmr(struct ib_fmr *fmr)
915 {
916 int ret;
917 u64 h_ret;
918 struct ehca_shca *shca =
919 container_of(fmr->device, struct ehca_shca, ib_device);
920 struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);
921
922 if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
923 ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x",
924 e_fmr, e_fmr->flags);
925 ret = -EINVAL;
926 goto free_fmr_exit0;
927 }
928
929 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
930 if (h_ret != H_SUCCESS) {
931 ehca_err(fmr->device, "hipz_free_mr failed, h_ret=%lx e_fmr=%p "
932 "hca_hndl=%lx fmr_hndl=%lx fmr->lkey=%x",
933 h_ret, e_fmr, shca->ipz_hca_handle.handle,
934 e_fmr->ipz_mr_handle.handle, fmr->lkey);
935 ret = ehca2ib_return_code(h_ret);
936 goto free_fmr_exit0;
937 }
938 /* successful deregistration */
939 ehca_mr_delete(e_fmr);
940 return 0;
941
942 free_fmr_exit0:
943 if (ret)
944 ehca_err(&shca->ib_device, "ret=%x fmr=%p", ret, fmr);
945 return ret;
946 } /* end ehca_dealloc_fmr() */
947
948 /*----------------------------------------------------------------------*/
949
950 int ehca_reg_mr(struct ehca_shca *shca,
951 struct ehca_mr *e_mr,
952 u64 *iova_start,
953 u64 size,
954 int acl,
955 struct ehca_pd *e_pd,
956 struct ehca_mr_pginfo *pginfo,
957 u32 *lkey, /*OUT*/
958 u32 *rkey) /*OUT*/
959 {
960 int ret;
961 u64 h_ret;
962 u32 hipz_acl;
963 struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
964
965 ehca_mrmw_map_acl(acl, &hipz_acl);
966 ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
967 if (ehca_use_hp_mr == 1)
968 hipz_acl |= 0x00000001;
969
970 h_ret = hipz_h_alloc_resource_mr(shca->ipz_hca_handle, e_mr,
971 (u64)iova_start, size, hipz_acl,
972 e_pd->fw_pd, &hipzout);
973 if (h_ret != H_SUCCESS) {
974 ehca_err(&shca->ib_device, "hipz_alloc_mr failed, h_ret=%lx "
975 "hca_hndl=%lx", h_ret, shca->ipz_hca_handle.handle);
976 ret = ehca2ib_return_code(h_ret);
977 goto ehca_reg_mr_exit0;
978 }
979
980 e_mr->ipz_mr_handle = hipzout.handle;
981
982 ret = ehca_reg_mr_rpages(shca, e_mr, pginfo);
983 if (ret)
984 goto ehca_reg_mr_exit1;
985
986 /* successful registration */
987 e_mr->num_kpages = pginfo->num_kpages;
988 e_mr->num_hwpages = pginfo->num_hwpages;
989 e_mr->start = iova_start;
990 e_mr->size = size;
991 e_mr->acl = acl;
992 *lkey = hipzout.lkey;
993 *rkey = hipzout.rkey;
994 return 0;
995
996 ehca_reg_mr_exit1:
997 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
998 if (h_ret != H_SUCCESS) {
999 ehca_err(&shca->ib_device, "h_ret=%lx shca=%p e_mr=%p "
1000 "iova_start=%p size=%lx acl=%x e_pd=%p lkey=%x "
1001 "pginfo=%p num_kpages=%lx num_hwpages=%lx ret=%x",
1002 h_ret, shca, e_mr, iova_start, size, acl, e_pd,
1003 hipzout.lkey, pginfo, pginfo->num_kpages,
1004 pginfo->num_hwpages, ret);
1005 ehca_err(&shca->ib_device, "internal error in ehca_reg_mr, "
1006 "not recoverable");
1007 }
1008 ehca_reg_mr_exit0:
1009 if (ret)
1010 ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p "
1011 "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p "
1012 "num_kpages=%lx num_hwpages=%lx",
1013 ret, shca, e_mr, iova_start, size, acl, e_pd, pginfo,
1014 pginfo->num_kpages, pginfo->num_hwpages);
1015 return ret;
1016 } /* end ehca_reg_mr() */
1017
1018 /*----------------------------------------------------------------------*/
1019
1020 int ehca_reg_mr_rpages(struct ehca_shca *shca,
1021 struct ehca_mr *e_mr,
1022 struct ehca_mr_pginfo *pginfo)
1023 {
1024 int ret = 0;
1025 u64 h_ret;
1026 u32 rnum;
1027 u64 rpage;
1028 u32 i;
1029 u64 *kpage;
1030
1031 kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
1032 if (!kpage) {
1033 ehca_err(&shca->ib_device, "kpage alloc failed");
1034 ret = -ENOMEM;
1035 goto ehca_reg_mr_rpages_exit0;
1036 }
1037
1038 /* max 512 pages per shot */
1039 for (i = 0; i < NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES); i++) {
1040
1041 if (i == NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES) - 1) {
1042 rnum = pginfo->num_hwpages % MAX_RPAGES; /* last shot */
1043 if (rnum == 0)
1044 rnum = MAX_RPAGES; /* last shot is full */
1045 } else
1046 rnum = MAX_RPAGES;
1047
1048 ret = ehca_set_pagebuf(pginfo, rnum, kpage);
1049 if (ret) {
1050 ehca_err(&shca->ib_device, "ehca_set_pagebuf "
1051 "bad rc, ret=%x rnum=%x kpage=%p",
1052 ret, rnum, kpage);
1053 goto ehca_reg_mr_rpages_exit1;
1054 }
1055
1056 if (rnum > 1) {
1057 rpage = virt_to_abs(kpage);
1058 if (!rpage) {
1059 ehca_err(&shca->ib_device, "kpage=%p i=%x",
1060 kpage, i);
1061 ret = -EFAULT;
1062 goto ehca_reg_mr_rpages_exit1;
1063 }
1064 } else
1065 rpage = *kpage;
1066
1067 h_ret = hipz_h_register_rpage_mr(shca->ipz_hca_handle, e_mr,
1068 0, /* pagesize 4k */
1069 0, rpage, rnum);
1070
1071 if (i == NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES) - 1) {
1072 /*
1073 * check for 'registration complete'==H_SUCCESS
1074 * and for 'page registered'==H_PAGE_REGISTERED
1075 */
1076 if (h_ret != H_SUCCESS) {
1077 ehca_err(&shca->ib_device, "last "
1078 "hipz_reg_rpage_mr failed, h_ret=%lx "
1079 "e_mr=%p i=%x hca_hndl=%lx mr_hndl=%lx"
1080 " lkey=%x", h_ret, e_mr, i,
1081 shca->ipz_hca_handle.handle,
1082 e_mr->ipz_mr_handle.handle,
1083 e_mr->ib.ib_mr.lkey);
1084 ret = ehca2ib_return_code(h_ret);
1085 break;
1086 } else
1087 ret = 0;
1088 } else if (h_ret != H_PAGE_REGISTERED) {
1089 ehca_err(&shca->ib_device, "hipz_reg_rpage_mr failed, "
1090 "h_ret=%lx e_mr=%p i=%x lkey=%x hca_hndl=%lx "
1091 "mr_hndl=%lx", h_ret, e_mr, i,
1092 e_mr->ib.ib_mr.lkey,
1093 shca->ipz_hca_handle.handle,
1094 e_mr->ipz_mr_handle.handle);
1095 ret = ehca2ib_return_code(h_ret);
1096 break;
1097 } else
1098 ret = 0;
1099 } /* end for(i) */
1100
1101
1102 ehca_reg_mr_rpages_exit1:
1103 ehca_free_fw_ctrlblock(kpage);
1104 ehca_reg_mr_rpages_exit0:
1105 if (ret)
1106 ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p pginfo=%p "
1107 "num_kpages=%lx num_hwpages=%lx", ret, shca, e_mr,
1108 pginfo, pginfo->num_kpages, pginfo->num_hwpages);
1109 return ret;
1110 } /* end ehca_reg_mr_rpages() */
1111
1112 /*----------------------------------------------------------------------*/
1113
1114 inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca,
1115 struct ehca_mr *e_mr,
1116 u64 *iova_start,
1117 u64 size,
1118 u32 acl,
1119 struct ehca_pd *e_pd,
1120 struct ehca_mr_pginfo *pginfo,
1121 u32 *lkey, /*OUT*/
1122 u32 *rkey) /*OUT*/
1123 {
1124 int ret;
1125 u64 h_ret;
1126 u32 hipz_acl;
1127 u64 *kpage;
1128 u64 rpage;
1129 struct ehca_mr_pginfo pginfo_save;
1130 struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
1131
1132 ehca_mrmw_map_acl(acl, &hipz_acl);
1133 ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
1134
1135 kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
1136 if (!kpage) {
1137 ehca_err(&shca->ib_device, "kpage alloc failed");
1138 ret = -ENOMEM;
1139 goto ehca_rereg_mr_rereg1_exit0;
1140 }
1141
1142 pginfo_save = *pginfo;
1143 ret = ehca_set_pagebuf(pginfo, pginfo->num_hwpages, kpage);
1144 if (ret) {
1145 ehca_err(&shca->ib_device, "set pagebuf failed, e_mr=%p "
1146 "pginfo=%p type=%x num_kpages=%lx num_hwpages=%lx "
1147 "kpage=%p", e_mr, pginfo, pginfo->type,
1148 pginfo->num_kpages, pginfo->num_hwpages, kpage);
1149 goto ehca_rereg_mr_rereg1_exit1;
1150 }
1151 rpage = virt_to_abs(kpage);
1152 if (!rpage) {
1153 ehca_err(&shca->ib_device, "kpage=%p", kpage);
1154 ret = -EFAULT;
1155 goto ehca_rereg_mr_rereg1_exit1;
1156 }
1157 h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_mr,
1158 (u64)iova_start, size, hipz_acl,
1159 e_pd->fw_pd, rpage, &hipzout);
1160 if (h_ret != H_SUCCESS) {
1161 /*
1162 * reregistration unsuccessful, try it again with the 3 hCalls,
1163 * e.g. this is required in case H_MR_CONDITION
1164 * (MW bound or MR is shared)
1165 */
1166 ehca_warn(&shca->ib_device, "hipz_h_reregister_pmr failed "
1167 "(Rereg1), h_ret=%lx e_mr=%p", h_ret, e_mr);
1168 *pginfo = pginfo_save;
1169 ret = -EAGAIN;
1170 } else if ((u64*)hipzout.vaddr != iova_start) {
1171 ehca_err(&shca->ib_device, "PHYP changed iova_start in "
1172 "rereg_pmr, iova_start=%p iova_start_out=%lx e_mr=%p "
1173 "mr_handle=%lx lkey=%x lkey_out=%x", iova_start,
1174 hipzout.vaddr, e_mr, e_mr->ipz_mr_handle.handle,
1175 e_mr->ib.ib_mr.lkey, hipzout.lkey);
1176 ret = -EFAULT;
1177 } else {
1178 /*
1179 * successful reregistration
1180 * note: start and start_out are identical for eServer HCAs
1181 */
1182 e_mr->num_kpages = pginfo->num_kpages;
1183 e_mr->num_hwpages = pginfo->num_hwpages;
1184 e_mr->start = iova_start;
1185 e_mr->size = size;
1186 e_mr->acl = acl;
1187 *lkey = hipzout.lkey;
1188 *rkey = hipzout.rkey;
1189 }
1190
1191 ehca_rereg_mr_rereg1_exit1:
1192 ehca_free_fw_ctrlblock(kpage);
1193 ehca_rereg_mr_rereg1_exit0:
1194 if ( ret && (ret != -EAGAIN) )
1195 ehca_err(&shca->ib_device, "ret=%x lkey=%x rkey=%x "
1196 "pginfo=%p num_kpages=%lx num_hwpages=%lx",
1197 ret, *lkey, *rkey, pginfo, pginfo->num_kpages,
1198 pginfo->num_hwpages);
1199 return ret;
1200 } /* end ehca_rereg_mr_rereg1() */
1201
1202 /*----------------------------------------------------------------------*/
1203
1204 int ehca_rereg_mr(struct ehca_shca *shca,
1205 struct ehca_mr *e_mr,
1206 u64 *iova_start,
1207 u64 size,
1208 int acl,
1209 struct ehca_pd *e_pd,
1210 struct ehca_mr_pginfo *pginfo,
1211 u32 *lkey,
1212 u32 *rkey)
1213 {
1214 int ret = 0;
1215 u64 h_ret;
1216 int rereg_1_hcall = 1; /* 1: use hipz_h_reregister_pmr directly */
1217 int rereg_3_hcall = 0; /* 1: use 3 hipz calls for reregistration */
1218
1219 /* first determine reregistration hCall(s) */
1220 if ((pginfo->num_hwpages > MAX_RPAGES) ||
1221 (e_mr->num_hwpages > MAX_RPAGES) ||
1222 (pginfo->num_hwpages > e_mr->num_hwpages)) {
1223 ehca_dbg(&shca->ib_device, "Rereg3 case, "
1224 "pginfo->num_hwpages=%lx e_mr->num_hwpages=%x",
1225 pginfo->num_hwpages, e_mr->num_hwpages);
1226 rereg_1_hcall = 0;
1227 rereg_3_hcall = 1;
1228 }
1229
1230 if (e_mr->flags & EHCA_MR_FLAG_MAXMR) { /* check for max-MR */
1231 rereg_1_hcall = 0;
1232 rereg_3_hcall = 1;
1233 e_mr->flags &= ~EHCA_MR_FLAG_MAXMR;
1234 ehca_err(&shca->ib_device, "Rereg MR for max-MR! e_mr=%p",
1235 e_mr);
1236 }
1237
1238 if (rereg_1_hcall) {
1239 ret = ehca_rereg_mr_rereg1(shca, e_mr, iova_start, size,
1240 acl, e_pd, pginfo, lkey, rkey);
1241 if (ret) {
1242 if (ret == -EAGAIN)
1243 rereg_3_hcall = 1;
1244 else
1245 goto ehca_rereg_mr_exit0;
1246 }
1247 }
1248
1249 if (rereg_3_hcall) {
1250 struct ehca_mr save_mr;
1251
1252 /* first deregister old MR */
1253 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
1254 if (h_ret != H_SUCCESS) {
1255 ehca_err(&shca->ib_device, "hipz_free_mr failed, "
1256 "h_ret=%lx e_mr=%p hca_hndl=%lx mr_hndl=%lx "
1257 "mr->lkey=%x",
1258 h_ret, e_mr, shca->ipz_hca_handle.handle,
1259 e_mr->ipz_mr_handle.handle,
1260 e_mr->ib.ib_mr.lkey);
1261 ret = ehca2ib_return_code(h_ret);
1262 goto ehca_rereg_mr_exit0;
1263 }
1264 /* clean ehca_mr_t, without changing struct ib_mr and lock */
1265 save_mr = *e_mr;
1266 ehca_mr_deletenew(e_mr);
1267
1268 /* set some MR values */
1269 e_mr->flags = save_mr.flags;
1270 e_mr->fmr_page_size = save_mr.fmr_page_size;
1271 e_mr->fmr_max_pages = save_mr.fmr_max_pages;
1272 e_mr->fmr_max_maps = save_mr.fmr_max_maps;
1273 e_mr->fmr_map_cnt = save_mr.fmr_map_cnt;
1274
1275 ret = ehca_reg_mr(shca, e_mr, iova_start, size, acl,
1276 e_pd, pginfo, lkey, rkey);
1277 if (ret) {
1278 u32 offset = (u64)(&e_mr->flags) - (u64)e_mr;
1279 memcpy(&e_mr->flags, &(save_mr.flags),
1280 sizeof(struct ehca_mr) - offset);
1281 goto ehca_rereg_mr_exit0;
1282 }
1283 }
1284
1285 ehca_rereg_mr_exit0:
1286 if (ret)
1287 ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p "
1288 "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p "
1289 "num_kpages=%lx lkey=%x rkey=%x rereg_1_hcall=%x "
1290 "rereg_3_hcall=%x", ret, shca, e_mr, iova_start, size,
1291 acl, e_pd, pginfo, pginfo->num_kpages, *lkey, *rkey,
1292 rereg_1_hcall, rereg_3_hcall);
1293 return ret;
1294 } /* end ehca_rereg_mr() */
1295
1296 /*----------------------------------------------------------------------*/
1297
1298 int ehca_unmap_one_fmr(struct ehca_shca *shca,
1299 struct ehca_mr *e_fmr)
1300 {
1301 int ret = 0;
1302 u64 h_ret;
1303 struct ehca_pd *e_pd =
1304 container_of(e_fmr->ib.ib_fmr.pd, struct ehca_pd, ib_pd);
1305 struct ehca_mr save_fmr;
1306 u32 tmp_lkey, tmp_rkey;
1307 struct ehca_mr_pginfo pginfo;
1308 struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
1309 struct ehca_mr save_mr;
1310
1311 if (e_fmr->fmr_max_pages <= MAX_RPAGES) {
1312 /*
1313 * note: after using rereg hcall with len=0,
1314 * rereg hcall must be used again for registering pages
1315 */
1316 h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_fmr, 0,
1317 0, 0, e_pd->fw_pd, 0, &hipzout);
1318 if (h_ret == H_SUCCESS) {
1319 /* successful reregistration */
1320 e_fmr->start = NULL;
1321 e_fmr->size = 0;
1322 tmp_lkey = hipzout.lkey;
1323 tmp_rkey = hipzout.rkey;
1324 return 0;
1325 }
1326 /*
1327 * should not happen, because length checked above,
1328 * FMRs are not shared and no MW bound to FMRs
1329 */
1330 ehca_err(&shca->ib_device, "hipz_reregister_pmr failed "
1331 "(Rereg1), h_ret=%lx e_fmr=%p hca_hndl=%lx "
1332 "mr_hndl=%lx lkey=%x lkey_out=%x",
1333 h_ret, e_fmr, shca->ipz_hca_handle.handle,
1334 e_fmr->ipz_mr_handle.handle,
1335 e_fmr->ib.ib_fmr.lkey, hipzout.lkey);
1336 /* try free and rereg */
1337 }
1338
1339 /* first free old FMR */
1340 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
1341 if (h_ret != H_SUCCESS) {
1342 ehca_err(&shca->ib_device, "hipz_free_mr failed, "
1343 "h_ret=%lx e_fmr=%p hca_hndl=%lx mr_hndl=%lx "
1344 "lkey=%x",
1345 h_ret, e_fmr, shca->ipz_hca_handle.handle,
1346 e_fmr->ipz_mr_handle.handle,
1347 e_fmr->ib.ib_fmr.lkey);
1348 ret = ehca2ib_return_code(h_ret);
1349 goto ehca_unmap_one_fmr_exit0;
1350 }
1351 /* clean ehca_mr_t, without changing lock */
1352 save_fmr = *e_fmr;
1353 ehca_mr_deletenew(e_fmr);
1354
1355 /* set some MR values */
1356 e_fmr->flags = save_fmr.flags;
1357 e_fmr->fmr_page_size = save_fmr.fmr_page_size;
1358 e_fmr->fmr_max_pages = save_fmr.fmr_max_pages;
1359 e_fmr->fmr_max_maps = save_fmr.fmr_max_maps;
1360 e_fmr->fmr_map_cnt = save_fmr.fmr_map_cnt;
1361 e_fmr->acl = save_fmr.acl;
1362
1363 memset(&pginfo, 0, sizeof(pginfo));
1364 pginfo.type = EHCA_MR_PGI_FMR;
1365 pginfo.num_kpages = 0;
1366 pginfo.num_hwpages = 0;
1367 ret = ehca_reg_mr(shca, e_fmr, NULL,
1368 (e_fmr->fmr_max_pages * e_fmr->fmr_page_size),
1369 e_fmr->acl, e_pd, &pginfo, &tmp_lkey,
1370 &tmp_rkey);
1371 if (ret) {
1372 u32 offset = (u64)(&e_fmr->flags) - (u64)e_fmr;
1373 memcpy(&e_fmr->flags, &(save_mr.flags),
1374 sizeof(struct ehca_mr) - offset);
1375 goto ehca_unmap_one_fmr_exit0;
1376 }
1377
1378 ehca_unmap_one_fmr_exit0:
1379 if (ret)
1380 ehca_err(&shca->ib_device, "ret=%x tmp_lkey=%x tmp_rkey=%x "
1381 "fmr_max_pages=%x",
1382 ret, tmp_lkey, tmp_rkey, e_fmr->fmr_max_pages);
1383 return ret;
1384 } /* end ehca_unmap_one_fmr() */
1385
1386 /*----------------------------------------------------------------------*/
1387
1388 int ehca_reg_smr(struct ehca_shca *shca,
1389 struct ehca_mr *e_origmr,
1390 struct ehca_mr *e_newmr,
1391 u64 *iova_start,
1392 int acl,
1393 struct ehca_pd *e_pd,
1394 u32 *lkey, /*OUT*/
1395 u32 *rkey) /*OUT*/
1396 {
1397 int ret = 0;
1398 u64 h_ret;
1399 u32 hipz_acl;
1400 struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
1401
1402 ehca_mrmw_map_acl(acl, &hipz_acl);
1403 ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
1404
1405 h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr,
1406 (u64)iova_start, hipz_acl, e_pd->fw_pd,
1407 &hipzout);
1408 if (h_ret != H_SUCCESS) {
1409 ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lx "
1410 "shca=%p e_origmr=%p e_newmr=%p iova_start=%p acl=%x "
1411 "e_pd=%p hca_hndl=%lx mr_hndl=%lx lkey=%x",
1412 h_ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd,
1413 shca->ipz_hca_handle.handle,
1414 e_origmr->ipz_mr_handle.handle,
1415 e_origmr->ib.ib_mr.lkey);
1416 ret = ehca2ib_return_code(h_ret);
1417 goto ehca_reg_smr_exit0;
1418 }
1419 /* successful registration */
1420 e_newmr->num_kpages = e_origmr->num_kpages;
1421 e_newmr->num_hwpages = e_origmr->num_hwpages;
1422 e_newmr->start = iova_start;
1423 e_newmr->size = e_origmr->size;
1424 e_newmr->acl = acl;
1425 e_newmr->ipz_mr_handle = hipzout.handle;
1426 *lkey = hipzout.lkey;
1427 *rkey = hipzout.rkey;
1428 return 0;
1429
1430 ehca_reg_smr_exit0:
1431 if (ret)
1432 ehca_err(&shca->ib_device, "ret=%x shca=%p e_origmr=%p "
1433 "e_newmr=%p iova_start=%p acl=%x e_pd=%p",
1434 ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd);
1435 return ret;
1436 } /* end ehca_reg_smr() */
1437
1438 /*----------------------------------------------------------------------*/
1439
1440 /* register internal max-MR to internal SHCA */
1441 int ehca_reg_internal_maxmr(
1442 struct ehca_shca *shca,
1443 struct ehca_pd *e_pd,
1444 struct ehca_mr **e_maxmr) /*OUT*/
1445 {
1446 int ret;
1447 struct ehca_mr *e_mr;
1448 u64 *iova_start;
1449 u64 size_maxmr;
1450 struct ehca_mr_pginfo pginfo;
1451 struct ib_phys_buf ib_pbuf;
1452 u32 num_kpages;
1453 u32 num_hwpages;
1454
1455 e_mr = ehca_mr_new();
1456 if (!e_mr) {
1457 ehca_err(&shca->ib_device, "out of memory");
1458 ret = -ENOMEM;
1459 goto ehca_reg_internal_maxmr_exit0;
1460 }
1461 e_mr->flags |= EHCA_MR_FLAG_MAXMR;
1462
1463 /* register internal max-MR on HCA */
1464 size_maxmr = (u64)high_memory - PAGE_OFFSET;
1465 iova_start = (u64*)KERNELBASE;
1466 ib_pbuf.addr = 0;
1467 ib_pbuf.size = size_maxmr;
1468 num_kpages = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size_maxmr,
1469 PAGE_SIZE);
1470 num_hwpages = NUM_CHUNKS(((u64)iova_start % EHCA_PAGESIZE) + size_maxmr,
1471 EHCA_PAGESIZE);
1472
1473 memset(&pginfo, 0, sizeof(pginfo));
1474 pginfo.type = EHCA_MR_PGI_PHYS;
1475 pginfo.num_kpages = num_kpages;
1476 pginfo.num_hwpages = num_hwpages;
1477 pginfo.u.phy.num_phys_buf = 1;
1478 pginfo.u.phy.phys_buf_array = &ib_pbuf;
1479
1480 ret = ehca_reg_mr(shca, e_mr, iova_start, size_maxmr, 0, e_pd,
1481 &pginfo, &e_mr->ib.ib_mr.lkey,
1482 &e_mr->ib.ib_mr.rkey);
1483 if (ret) {
1484 ehca_err(&shca->ib_device, "reg of internal max MR failed, "
1485 "e_mr=%p iova_start=%p size_maxmr=%lx num_kpages=%x "
1486 "num_hwpages=%x", e_mr, iova_start, size_maxmr,
1487 num_kpages, num_hwpages);
1488 goto ehca_reg_internal_maxmr_exit1;
1489 }
1490
1491 /* successful registration of all pages */
1492 e_mr->ib.ib_mr.device = e_pd->ib_pd.device;
1493 e_mr->ib.ib_mr.pd = &e_pd->ib_pd;
1494 e_mr->ib.ib_mr.uobject = NULL;
1495 atomic_inc(&(e_pd->ib_pd.usecnt));
1496 atomic_set(&(e_mr->ib.ib_mr.usecnt), 0);
1497 *e_maxmr = e_mr;
1498 return 0;
1499
1500 ehca_reg_internal_maxmr_exit1:
1501 ehca_mr_delete(e_mr);
1502 ehca_reg_internal_maxmr_exit0:
1503 if (ret)
1504 ehca_err(&shca->ib_device, "ret=%x shca=%p e_pd=%p e_maxmr=%p",
1505 ret, shca, e_pd, e_maxmr);
1506 return ret;
1507 } /* end ehca_reg_internal_maxmr() */
1508
1509 /*----------------------------------------------------------------------*/
1510
1511 int ehca_reg_maxmr(struct ehca_shca *shca,
1512 struct ehca_mr *e_newmr,
1513 u64 *iova_start,
1514 int acl,
1515 struct ehca_pd *e_pd,
1516 u32 *lkey,
1517 u32 *rkey)
1518 {
1519 u64 h_ret;
1520 struct ehca_mr *e_origmr = shca->maxmr;
1521 u32 hipz_acl;
1522 struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
1523
1524 ehca_mrmw_map_acl(acl, &hipz_acl);
1525 ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
1526
1527 h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr,
1528 (u64)iova_start, hipz_acl, e_pd->fw_pd,
1529 &hipzout);
1530 if (h_ret != H_SUCCESS) {
1531 ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lx "
1532 "e_origmr=%p hca_hndl=%lx mr_hndl=%lx lkey=%x",
1533 h_ret, e_origmr, shca->ipz_hca_handle.handle,
1534 e_origmr->ipz_mr_handle.handle,
1535 e_origmr->ib.ib_mr.lkey);
1536 return ehca2ib_return_code(h_ret);
1537 }
1538 /* successful registration */
1539 e_newmr->num_kpages = e_origmr->num_kpages;
1540 e_newmr->num_hwpages = e_origmr->num_hwpages;
1541 e_newmr->start = iova_start;
1542 e_newmr->size = e_origmr->size;
1543 e_newmr->acl = acl;
1544 e_newmr->ipz_mr_handle = hipzout.handle;
1545 *lkey = hipzout.lkey;
1546 *rkey = hipzout.rkey;
1547 return 0;
1548 } /* end ehca_reg_maxmr() */
1549
1550 /*----------------------------------------------------------------------*/
1551
1552 int ehca_dereg_internal_maxmr(struct ehca_shca *shca)
1553 {
1554 int ret;
1555 struct ehca_mr *e_maxmr;
1556 struct ib_pd *ib_pd;
1557
1558 if (!shca->maxmr) {
1559 ehca_err(&shca->ib_device, "bad call, shca=%p", shca);
1560 ret = -EINVAL;
1561 goto ehca_dereg_internal_maxmr_exit0;
1562 }
1563
1564 e_maxmr = shca->maxmr;
1565 ib_pd = e_maxmr->ib.ib_mr.pd;
1566 shca->maxmr = NULL; /* remove internal max-MR indication from SHCA */
1567
1568 ret = ehca_dereg_mr(&e_maxmr->ib.ib_mr);
1569 if (ret) {
1570 ehca_err(&shca->ib_device, "dereg internal max-MR failed, "
1571 "ret=%x e_maxmr=%p shca=%p lkey=%x",
1572 ret, e_maxmr, shca, e_maxmr->ib.ib_mr.lkey);
1573 shca->maxmr = e_maxmr;
1574 goto ehca_dereg_internal_maxmr_exit0;
1575 }
1576
1577 atomic_dec(&ib_pd->usecnt);
1578
1579 ehca_dereg_internal_maxmr_exit0:
1580 if (ret)
1581 ehca_err(&shca->ib_device, "ret=%x shca=%p shca->maxmr=%p",
1582 ret, shca, shca->maxmr);
1583 return ret;
1584 } /* end ehca_dereg_internal_maxmr() */
1585
1586 /*----------------------------------------------------------------------*/
1587
1588 /*
1589 * check physical buffer array of MR verbs for validness and
1590 * calculates MR size
1591 */
1592 int ehca_mr_chk_buf_and_calc_size(struct ib_phys_buf *phys_buf_array,
1593 int num_phys_buf,
1594 u64 *iova_start,
1595 u64 *size)
1596 {
1597 struct ib_phys_buf *pbuf = phys_buf_array;
1598 u64 size_count = 0;
1599 u32 i;
1600
1601 if (num_phys_buf == 0) {
1602 ehca_gen_err("bad phys buf array len, num_phys_buf=0");
1603 return -EINVAL;
1604 }
1605 /* check first buffer */
1606 if (((u64)iova_start & ~PAGE_MASK) != (pbuf->addr & ~PAGE_MASK)) {
1607 ehca_gen_err("iova_start/addr mismatch, iova_start=%p "
1608 "pbuf->addr=%lx pbuf->size=%lx",
1609 iova_start, pbuf->addr, pbuf->size);
1610 return -EINVAL;
1611 }
1612 if (((pbuf->addr + pbuf->size) % PAGE_SIZE) &&
1613 (num_phys_buf > 1)) {
1614 ehca_gen_err("addr/size mismatch in 1st buf, pbuf->addr=%lx "
1615 "pbuf->size=%lx", pbuf->addr, pbuf->size);
1616 return -EINVAL;
1617 }
1618
1619 for (i = 0; i < num_phys_buf; i++) {
1620 if ((i > 0) && (pbuf->addr % PAGE_SIZE)) {
1621 ehca_gen_err("bad address, i=%x pbuf->addr=%lx "
1622 "pbuf->size=%lx",
1623 i, pbuf->addr, pbuf->size);
1624 return -EINVAL;
1625 }
1626 if (((i > 0) && /* not 1st */
1627 (i < (num_phys_buf - 1)) && /* not last */
1628 (pbuf->size % PAGE_SIZE)) || (pbuf->size == 0)) {
1629 ehca_gen_err("bad size, i=%x pbuf->size=%lx",
1630 i, pbuf->size);
1631 return -EINVAL;
1632 }
1633 size_count += pbuf->size;
1634 pbuf++;
1635 }
1636
1637 *size = size_count;
1638 return 0;
1639 } /* end ehca_mr_chk_buf_and_calc_size() */
1640
1641 /*----------------------------------------------------------------------*/
1642
1643 /* check page list of map FMR verb for validness */
1644 int ehca_fmr_check_page_list(struct ehca_mr *e_fmr,
1645 u64 *page_list,
1646 int list_len)
1647 {
1648 u32 i;
1649 u64 *page;
1650
1651 if ((list_len == 0) || (list_len > e_fmr->fmr_max_pages)) {
1652 ehca_gen_err("bad list_len, list_len=%x "
1653 "e_fmr->fmr_max_pages=%x fmr=%p",
1654 list_len, e_fmr->fmr_max_pages, e_fmr);
1655 return -EINVAL;
1656 }
1657
1658 /* each page must be aligned */
1659 page = page_list;
1660 for (i = 0; i < list_len; i++) {
1661 if (*page % e_fmr->fmr_page_size) {
1662 ehca_gen_err("bad page, i=%x *page=%lx page=%p fmr=%p "
1663 "fmr_page_size=%x", i, *page, page, e_fmr,
1664 e_fmr->fmr_page_size);
1665 return -EINVAL;
1666 }
1667 page++;
1668 }
1669
1670 return 0;
1671 } /* end ehca_fmr_check_page_list() */
1672
1673 /*----------------------------------------------------------------------*/
1674
1675 /* PAGE_SIZE >= pginfo->hwpage_size */
1676 static int ehca_set_pagebuf_user1(struct ehca_mr_pginfo *pginfo,
1677 u32 number,
1678 u64 *kpage)
1679 {
1680 int ret = 0;
1681 struct ib_umem_chunk *prev_chunk;
1682 struct ib_umem_chunk *chunk;
1683 u64 pgaddr;
1684 u32 i = 0;
1685 u32 j = 0;
1686
1687 /* loop over desired chunk entries */
1688 chunk = pginfo->u.usr.next_chunk;
1689 prev_chunk = pginfo->u.usr.next_chunk;
1690 list_for_each_entry_continue(
1691 chunk, (&(pginfo->u.usr.region->chunk_list)), list) {
1692 for (i = pginfo->u.usr.next_nmap; i < chunk->nmap; ) {
1693 pgaddr = page_to_pfn(chunk->page_list[i].page)
1694 << PAGE_SHIFT ;
1695 *kpage = phys_to_abs(pgaddr +
1696 (pginfo->next_hwpage *
1697 EHCA_PAGESIZE));
1698 if ( !(*kpage) ) {
1699 ehca_gen_err("pgaddr=%lx "
1700 "chunk->page_list[i]=%lx "
1701 "i=%x next_hwpage=%lx",
1702 pgaddr, (u64)sg_dma_address(
1703 &chunk->page_list[i]),
1704 i, pginfo->next_hwpage);
1705 return -EFAULT;
1706 }
1707 (pginfo->hwpage_cnt)++;
1708 (pginfo->next_hwpage)++;
1709 kpage++;
1710 if (pginfo->next_hwpage %
1711 (PAGE_SIZE / EHCA_PAGESIZE) == 0) {
1712 (pginfo->kpage_cnt)++;
1713 (pginfo->u.usr.next_nmap)++;
1714 pginfo->next_hwpage = 0;
1715 i++;
1716 }
1717 j++;
1718 if (j >= number) break;
1719 }
1720 if ((pginfo->u.usr.next_nmap >= chunk->nmap) &&
1721 (j >= number)) {
1722 pginfo->u.usr.next_nmap = 0;
1723 prev_chunk = chunk;
1724 break;
1725 } else if (pginfo->u.usr.next_nmap >= chunk->nmap) {
1726 pginfo->u.usr.next_nmap = 0;
1727 prev_chunk = chunk;
1728 } else if (j >= number)
1729 break;
1730 else
1731 prev_chunk = chunk;
1732 }
1733 pginfo->u.usr.next_chunk =
1734 list_prepare_entry(prev_chunk,
1735 (&(pginfo->u.usr.region->chunk_list)),
1736 list);
1737 return ret;
1738 }
1739
1740 int ehca_set_pagebuf_phys(struct ehca_mr_pginfo *pginfo,
1741 u32 number,
1742 u64 *kpage)
1743 {
1744 int ret = 0;
1745 struct ib_phys_buf *pbuf;
1746 u64 num_hw, offs_hw;
1747 u32 i = 0;
1748
1749 /* loop over desired phys_buf_array entries */
1750 while (i < number) {
1751 pbuf = pginfo->u.phy.phys_buf_array + pginfo->u.phy.next_buf;
1752 num_hw = NUM_CHUNKS((pbuf->addr % EHCA_PAGESIZE) +
1753 pbuf->size, EHCA_PAGESIZE);
1754 offs_hw = (pbuf->addr & ~PAGE_MASK) / EHCA_PAGESIZE;
1755 while (pginfo->next_hwpage < offs_hw + num_hw) {
1756 /* sanity check */
1757 if ((pginfo->kpage_cnt >= pginfo->num_kpages) ||
1758 (pginfo->hwpage_cnt >= pginfo->num_hwpages)) {
1759 ehca_gen_err("kpage_cnt >= num_kpages, "
1760 "kpage_cnt=%lx num_kpages=%lx "
1761 "hwpage_cnt=%lx "
1762 "num_hwpages=%lx i=%x",
1763 pginfo->kpage_cnt,
1764 pginfo->num_kpages,
1765 pginfo->hwpage_cnt,
1766 pginfo->num_hwpages, i);
1767 return -EFAULT;
1768 }
1769 *kpage = phys_to_abs(
1770 (pbuf->addr & EHCA_PAGEMASK)
1771 + (pginfo->next_hwpage * EHCA_PAGESIZE));
1772 if ( !(*kpage) && pbuf->addr ) {
1773 ehca_gen_err("pbuf->addr=%lx "
1774 "pbuf->size=%lx "
1775 "next_hwpage=%lx", pbuf->addr,
1776 pbuf->size,
1777 pginfo->next_hwpage);
1778 return -EFAULT;
1779 }
1780 (pginfo->hwpage_cnt)++;
1781 (pginfo->next_hwpage)++;
1782 if (pginfo->next_hwpage %
1783 (PAGE_SIZE / EHCA_PAGESIZE) == 0)
1784 (pginfo->kpage_cnt)++;
1785 kpage++;
1786 i++;
1787 if (i >= number) break;
1788 }
1789 if (pginfo->next_hwpage >= offs_hw + num_hw) {
1790 (pginfo->u.phy.next_buf)++;
1791 pginfo->next_hwpage = 0;
1792 }
1793 }
1794 return ret;
1795 }
1796
1797 int ehca_set_pagebuf_fmr(struct ehca_mr_pginfo *pginfo,
1798 u32 number,
1799 u64 *kpage)
1800 {
1801 int ret = 0;
1802 u64 *fmrlist;
1803 u32 i;
1804
1805 /* loop over desired page_list entries */
1806 fmrlist = pginfo->u.fmr.page_list + pginfo->u.fmr.next_listelem;
1807 for (i = 0; i < number; i++) {
1808 *kpage = phys_to_abs((*fmrlist & EHCA_PAGEMASK) +
1809 pginfo->next_hwpage * EHCA_PAGESIZE);
1810 if ( !(*kpage) ) {
1811 ehca_gen_err("*fmrlist=%lx fmrlist=%p "
1812 "next_listelem=%lx next_hwpage=%lx",
1813 *fmrlist, fmrlist,
1814 pginfo->u.fmr.next_listelem,
1815 pginfo->next_hwpage);
1816 return -EFAULT;
1817 }
1818 (pginfo->hwpage_cnt)++;
1819 (pginfo->next_hwpage)++;
1820 kpage++;
1821 if (pginfo->next_hwpage %
1822 (pginfo->u.fmr.fmr_pgsize / EHCA_PAGESIZE) == 0) {
1823 (pginfo->kpage_cnt)++;
1824 (pginfo->u.fmr.next_listelem)++;
1825 fmrlist++;
1826 pginfo->next_hwpage = 0;
1827 }
1828 }
1829 return ret;
1830 }
1831
1832 /* setup page buffer from page info */
1833 int ehca_set_pagebuf(struct ehca_mr_pginfo *pginfo,
1834 u32 number,
1835 u64 *kpage)
1836 {
1837 int ret;
1838
1839 switch (pginfo->type) {
1840 case EHCA_MR_PGI_PHYS:
1841 ret = ehca_set_pagebuf_phys(pginfo, number, kpage);
1842 break;
1843 case EHCA_MR_PGI_USER:
1844 ret = ehca_set_pagebuf_user1(pginfo, number, kpage);
1845 break;
1846 case EHCA_MR_PGI_FMR:
1847 ret = ehca_set_pagebuf_fmr(pginfo, number, kpage);
1848 break;
1849 default:
1850 ehca_gen_err("bad pginfo->type=%x", pginfo->type);
1851 ret = -EFAULT;
1852 break;
1853 }
1854 return ret;
1855 } /* end ehca_set_pagebuf() */
1856
1857 /*----------------------------------------------------------------------*/
1858
1859 /*
1860 * check MR if it is a max-MR, i.e. uses whole memory
1861 * in case it's a max-MR 1 is returned, else 0
1862 */
1863 int ehca_mr_is_maxmr(u64 size,
1864 u64 *iova_start)
1865 {
1866 /* a MR is treated as max-MR only if it fits following: */
1867 if ((size == ((u64)high_memory - PAGE_OFFSET)) &&
1868 (iova_start == (void*)KERNELBASE)) {
1869 ehca_gen_dbg("this is a max-MR");
1870 return 1;
1871 } else
1872 return 0;
1873 } /* end ehca_mr_is_maxmr() */
1874
1875 /*----------------------------------------------------------------------*/
1876
1877 /* map access control for MR/MW. This routine is used for MR and MW. */
1878 void ehca_mrmw_map_acl(int ib_acl,
1879 u32 *hipz_acl)
1880 {
1881 *hipz_acl = 0;
1882 if (ib_acl & IB_ACCESS_REMOTE_READ)
1883 *hipz_acl |= HIPZ_ACCESSCTRL_R_READ;
1884 if (ib_acl & IB_ACCESS_REMOTE_WRITE)
1885 *hipz_acl |= HIPZ_ACCESSCTRL_R_WRITE;
1886 if (ib_acl & IB_ACCESS_REMOTE_ATOMIC)
1887 *hipz_acl |= HIPZ_ACCESSCTRL_R_ATOMIC;
1888 if (ib_acl & IB_ACCESS_LOCAL_WRITE)
1889 *hipz_acl |= HIPZ_ACCESSCTRL_L_WRITE;
1890 if (ib_acl & IB_ACCESS_MW_BIND)
1891 *hipz_acl |= HIPZ_ACCESSCTRL_MW_BIND;
1892 } /* end ehca_mrmw_map_acl() */
1893
1894 /*----------------------------------------------------------------------*/
1895
1896 /* sets page size in hipz access control for MR/MW. */
1897 void ehca_mrmw_set_pgsize_hipz_acl(u32 *hipz_acl) /*INOUT*/
1898 {
1899 return; /* HCA supports only 4k */
1900 } /* end ehca_mrmw_set_pgsize_hipz_acl() */
1901
1902 /*----------------------------------------------------------------------*/
1903
1904 /*
1905 * reverse map access control for MR/MW.
1906 * This routine is used for MR and MW.
1907 */
1908 void ehca_mrmw_reverse_map_acl(const u32 *hipz_acl,
1909 int *ib_acl) /*OUT*/
1910 {
1911 *ib_acl = 0;
1912 if (*hipz_acl & HIPZ_ACCESSCTRL_R_READ)
1913 *ib_acl |= IB_ACCESS_REMOTE_READ;
1914 if (*hipz_acl & HIPZ_ACCESSCTRL_R_WRITE)
1915 *ib_acl |= IB_ACCESS_REMOTE_WRITE;
1916 if (*hipz_acl & HIPZ_ACCESSCTRL_R_ATOMIC)
1917 *ib_acl |= IB_ACCESS_REMOTE_ATOMIC;
1918 if (*hipz_acl & HIPZ_ACCESSCTRL_L_WRITE)
1919 *ib_acl |= IB_ACCESS_LOCAL_WRITE;
1920 if (*hipz_acl & HIPZ_ACCESSCTRL_MW_BIND)
1921 *ib_acl |= IB_ACCESS_MW_BIND;
1922 } /* end ehca_mrmw_reverse_map_acl() */
1923
1924
1925 /*----------------------------------------------------------------------*/
1926
1927 /*
1928 * MR destructor and constructor
1929 * used in Reregister MR verb, sets all fields in ehca_mr_t to 0,
1930 * except struct ib_mr and spinlock
1931 */
1932 void ehca_mr_deletenew(struct ehca_mr *mr)
1933 {
1934 mr->flags = 0;
1935 mr->num_kpages = 0;
1936 mr->num_hwpages = 0;
1937 mr->acl = 0;
1938 mr->start = NULL;
1939 mr->fmr_page_size = 0;
1940 mr->fmr_max_pages = 0;
1941 mr->fmr_max_maps = 0;
1942 mr->fmr_map_cnt = 0;
1943 memset(&mr->ipz_mr_handle, 0, sizeof(mr->ipz_mr_handle));
1944 memset(&mr->galpas, 0, sizeof(mr->galpas));
1945 } /* end ehca_mr_deletenew() */
1946
1947 int ehca_init_mrmw_cache(void)
1948 {
1949 mr_cache = kmem_cache_create("ehca_cache_mr",
1950 sizeof(struct ehca_mr), 0,
1951 SLAB_HWCACHE_ALIGN,
1952 NULL, NULL);
1953 if (!mr_cache)
1954 return -ENOMEM;
1955 mw_cache = kmem_cache_create("ehca_cache_mw",
1956 sizeof(struct ehca_mw), 0,
1957 SLAB_HWCACHE_ALIGN,
1958 NULL, NULL);
1959 if (!mw_cache) {
1960 kmem_cache_destroy(mr_cache);
1961 mr_cache = NULL;
1962 return -ENOMEM;
1963 }
1964 return 0;
1965 }
1966
1967 void ehca_cleanup_mrmw_cache(void)
1968 {
1969 if (mr_cache)
1970 kmem_cache_destroy(mr_cache);
1971 if (mw_cache)
1972 kmem_cache_destroy(mw_cache);
1973 }