Merge branch 'pci/resource' into next
[cascardo/linux.git] / drivers / staging / rdma / ehca / ehca_mrmw.c
1 /*
2  *  IBM eServer eHCA Infiniband device driver for Linux on POWER
3  *
4  *  MR/MW functions
5  *
6  *  Authors: Dietmar Decker <ddecker@de.ibm.com>
7  *           Christoph Raisch <raisch@de.ibm.com>
8  *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
9  *
10  *  Copyright (c) 2005 IBM Corporation
11  *
12  *  All rights reserved.
13  *
14  *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
15  *  BSD.
16  *
17  * OpenIB BSD License
18  *
19  * Redistribution and use in source and binary forms, with or without
20  * modification, are permitted provided that the following conditions are met:
21  *
22  * Redistributions of source code must retain the above copyright notice, this
23  * list of conditions and the following disclaimer.
24  *
25  * Redistributions in binary form must reproduce the above copyright notice,
26  * this list of conditions and the following disclaimer in the documentation
27  * and/or other materials
28  * provided with the distribution.
29  *
30  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
31  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
34  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
35  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
36  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
37  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
38  * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
40  * POSSIBILITY OF SUCH DAMAGE.
41  */
42
43 #include <linux/slab.h>
44 #include <rdma/ib_umem.h>
45
46 #include "ehca_iverbs.h"
47 #include "ehca_mrmw.h"
48 #include "hcp_if.h"
49 #include "hipz_hw.h"
50
51 #define NUM_CHUNKS(length, chunk_size) \
52         (((length) + (chunk_size - 1)) / (chunk_size))
53
54 /* max number of rpages (per hcall register_rpages) */
55 #define MAX_RPAGES 512
56
57 /* DMEM toleration management */
58 #define EHCA_SECTSHIFT        SECTION_SIZE_BITS
59 #define EHCA_SECTSIZE          (1UL << EHCA_SECTSHIFT)
60 #define EHCA_HUGEPAGESHIFT     34
61 #define EHCA_HUGEPAGE_SIZE     (1UL << EHCA_HUGEPAGESHIFT)
62 #define EHCA_HUGEPAGE_PFN_MASK ((EHCA_HUGEPAGE_SIZE - 1) >> PAGE_SHIFT)
63 #define EHCA_INVAL_ADDR        0xFFFFFFFFFFFFFFFFULL
64 #define EHCA_DIR_INDEX_SHIFT 13                   /* 8k Entries in 64k block */
65 #define EHCA_TOP_INDEX_SHIFT (EHCA_DIR_INDEX_SHIFT * 2)
66 #define EHCA_MAP_ENTRIES (1 << EHCA_DIR_INDEX_SHIFT)
67 #define EHCA_TOP_MAP_SIZE (0x10000)               /* currently fixed map size */
68 #define EHCA_DIR_MAP_SIZE (0x10000)
69 #define EHCA_ENT_MAP_SIZE (0x10000)
70 #define EHCA_INDEX_MASK (EHCA_MAP_ENTRIES - 1)
71
72 static unsigned long ehca_mr_len;
73
74 /*
75  * Memory map data structures
76  */
77 struct ehca_dir_bmap {
78         u64 ent[EHCA_MAP_ENTRIES];
79 };
80 struct ehca_top_bmap {
81         struct ehca_dir_bmap *dir[EHCA_MAP_ENTRIES];
82 };
83 struct ehca_bmap {
84         struct ehca_top_bmap *top[EHCA_MAP_ENTRIES];
85 };
86
87 static struct ehca_bmap *ehca_bmap;
88
89 static struct kmem_cache *mr_cache;
90 static struct kmem_cache *mw_cache;
91
92 enum ehca_mr_pgsize {
93         EHCA_MR_PGSIZE4K  = 0x1000L,
94         EHCA_MR_PGSIZE64K = 0x10000L,
95         EHCA_MR_PGSIZE1M  = 0x100000L,
96         EHCA_MR_PGSIZE16M = 0x1000000L
97 };
98
99 #define EHCA_MR_PGSHIFT4K  12
100 #define EHCA_MR_PGSHIFT64K 16
101 #define EHCA_MR_PGSHIFT1M  20
102 #define EHCA_MR_PGSHIFT16M 24
103
104 static u64 ehca_map_vaddr(void *caddr);
105
106 static u32 ehca_encode_hwpage_size(u32 pgsize)
107 {
108         int log = ilog2(pgsize);
109         WARN_ON(log < 12 || log > 24 || log & 3);
110         return (log - 12) / 4;
111 }
112
113 static u64 ehca_get_max_hwpage_size(struct ehca_shca *shca)
114 {
115         return rounddown_pow_of_two(shca->hca_cap_mr_pgsize);
116 }
117
118 static struct ehca_mr *ehca_mr_new(void)
119 {
120         struct ehca_mr *me;
121
122         me = kmem_cache_zalloc(mr_cache, GFP_KERNEL);
123         if (me)
124                 spin_lock_init(&me->mrlock);
125         else
126                 ehca_gen_err("alloc failed");
127
128         return me;
129 }
130
131 static void ehca_mr_delete(struct ehca_mr *me)
132 {
133         kmem_cache_free(mr_cache, me);
134 }
135
136 static struct ehca_mw *ehca_mw_new(void)
137 {
138         struct ehca_mw *me;
139
140         me = kmem_cache_zalloc(mw_cache, GFP_KERNEL);
141         if (me)
142                 spin_lock_init(&me->mwlock);
143         else
144                 ehca_gen_err("alloc failed");
145
146         return me;
147 }
148
149 static void ehca_mw_delete(struct ehca_mw *me)
150 {
151         kmem_cache_free(mw_cache, me);
152 }
153
154 /*----------------------------------------------------------------------*/
155
156 struct ib_mr *ehca_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
157 {
158         struct ib_mr *ib_mr;
159         int ret;
160         struct ehca_mr *e_maxmr;
161         struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
162         struct ehca_shca *shca =
163                 container_of(pd->device, struct ehca_shca, ib_device);
164
165         if (shca->maxmr) {
166                 e_maxmr = ehca_mr_new();
167                 if (!e_maxmr) {
168                         ehca_err(&shca->ib_device, "out of memory");
169                         ib_mr = ERR_PTR(-ENOMEM);
170                         goto get_dma_mr_exit0;
171                 }
172
173                 ret = ehca_reg_maxmr(shca, e_maxmr,
174                                      (void *)ehca_map_vaddr((void *)(KERNELBASE + PHYSICAL_START)),
175                                      mr_access_flags, e_pd,
176                                      &e_maxmr->ib.ib_mr.lkey,
177                                      &e_maxmr->ib.ib_mr.rkey);
178                 if (ret) {
179                         ehca_mr_delete(e_maxmr);
180                         ib_mr = ERR_PTR(ret);
181                         goto get_dma_mr_exit0;
182                 }
183                 ib_mr = &e_maxmr->ib.ib_mr;
184         } else {
185                 ehca_err(&shca->ib_device, "no internal max-MR exist!");
186                 ib_mr = ERR_PTR(-EINVAL);
187                 goto get_dma_mr_exit0;
188         }
189
190 get_dma_mr_exit0:
191         if (IS_ERR(ib_mr))
192                 ehca_err(&shca->ib_device, "h_ret=%li pd=%p mr_access_flags=%x",
193                          PTR_ERR(ib_mr), pd, mr_access_flags);
194         return ib_mr;
195 } /* end ehca_get_dma_mr() */
196
197 /*----------------------------------------------------------------------*/
198
199 struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
200                                u64 virt, int mr_access_flags,
201                                struct ib_udata *udata)
202 {
203         struct ib_mr *ib_mr;
204         struct ehca_mr *e_mr;
205         struct ehca_shca *shca =
206                 container_of(pd->device, struct ehca_shca, ib_device);
207         struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
208         struct ehca_mr_pginfo pginfo;
209         int ret, page_shift;
210         u32 num_kpages;
211         u32 num_hwpages;
212         u64 hwpage_size;
213
214         if (!pd) {
215                 ehca_gen_err("bad pd=%p", pd);
216                 return ERR_PTR(-EFAULT);
217         }
218
219         if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
220              !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
221             ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
222              !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
223                 /*
224                  * Remote Write Access requires Local Write Access
225                  * Remote Atomic Access requires Local Write Access
226                  */
227                 ehca_err(pd->device, "bad input values: mr_access_flags=%x",
228                          mr_access_flags);
229                 ib_mr = ERR_PTR(-EINVAL);
230                 goto reg_user_mr_exit0;
231         }
232
233         if (length == 0 || virt + length < virt) {
234                 ehca_err(pd->device, "bad input values: length=%llx "
235                          "virt_base=%llx", length, virt);
236                 ib_mr = ERR_PTR(-EINVAL);
237                 goto reg_user_mr_exit0;
238         }
239
240         e_mr = ehca_mr_new();
241         if (!e_mr) {
242                 ehca_err(pd->device, "out of memory");
243                 ib_mr = ERR_PTR(-ENOMEM);
244                 goto reg_user_mr_exit0;
245         }
246
247         e_mr->umem = ib_umem_get(pd->uobject->context, start, length,
248                                  mr_access_flags, 0);
249         if (IS_ERR(e_mr->umem)) {
250                 ib_mr = (void *)e_mr->umem;
251                 goto reg_user_mr_exit1;
252         }
253
254         if (e_mr->umem->page_size != PAGE_SIZE) {
255                 ehca_err(pd->device, "page size not supported, "
256                          "e_mr->umem->page_size=%x", e_mr->umem->page_size);
257                 ib_mr = ERR_PTR(-EINVAL);
258                 goto reg_user_mr_exit2;
259         }
260
261         /* determine number of MR pages */
262         num_kpages = NUM_CHUNKS((virt % PAGE_SIZE) + length, PAGE_SIZE);
263         /* select proper hw_pgsize */
264         page_shift = PAGE_SHIFT;
265         if (e_mr->umem->hugetlb) {
266                 /* determine page_shift, clamp between 4K and 16M */
267                 page_shift = (fls64(length - 1) + 3) & ~3;
268                 page_shift = min(max(page_shift, EHCA_MR_PGSHIFT4K),
269                                  EHCA_MR_PGSHIFT16M);
270         }
271         hwpage_size = 1UL << page_shift;
272
273         /* now that we have the desired page size, shift until it's
274          * supported, too. 4K is always supported, so this terminates.
275          */
276         while (!(hwpage_size & shca->hca_cap_mr_pgsize))
277                 hwpage_size >>= 4;
278
279 reg_user_mr_fallback:
280         num_hwpages = NUM_CHUNKS((virt % hwpage_size) + length, hwpage_size);
281         /* register MR on HCA */
282         memset(&pginfo, 0, sizeof(pginfo));
283         pginfo.type = EHCA_MR_PGI_USER;
284         pginfo.hwpage_size = hwpage_size;
285         pginfo.num_kpages = num_kpages;
286         pginfo.num_hwpages = num_hwpages;
287         pginfo.u.usr.region = e_mr->umem;
288         pginfo.next_hwpage = ib_umem_offset(e_mr->umem) / hwpage_size;
289         pginfo.u.usr.next_sg = pginfo.u.usr.region->sg_head.sgl;
290         ret = ehca_reg_mr(shca, e_mr, (u64 *)virt, length, mr_access_flags,
291                           e_pd, &pginfo, &e_mr->ib.ib_mr.lkey,
292                           &e_mr->ib.ib_mr.rkey, EHCA_REG_MR);
293         if (ret == -EINVAL && pginfo.hwpage_size > PAGE_SIZE) {
294                 ehca_warn(pd->device, "failed to register mr "
295                           "with hwpage_size=%llx", hwpage_size);
296                 ehca_info(pd->device, "try to register mr with "
297                           "kpage_size=%lx", PAGE_SIZE);
298                 /*
299                  * this means kpages are not contiguous for a hw page
300                  * try kernel page size as fallback solution
301                  */
302                 hwpage_size = PAGE_SIZE;
303                 goto reg_user_mr_fallback;
304         }
305         if (ret) {
306                 ib_mr = ERR_PTR(ret);
307                 goto reg_user_mr_exit2;
308         }
309
310         /* successful registration of all pages */
311         return &e_mr->ib.ib_mr;
312
313 reg_user_mr_exit2:
314         ib_umem_release(e_mr->umem);
315 reg_user_mr_exit1:
316         ehca_mr_delete(e_mr);
317 reg_user_mr_exit0:
318         if (IS_ERR(ib_mr))
319                 ehca_err(pd->device, "rc=%li pd=%p mr_access_flags=%x udata=%p",
320                          PTR_ERR(ib_mr), pd, mr_access_flags, udata);
321         return ib_mr;
322 } /* end ehca_reg_user_mr() */
323
324 /*----------------------------------------------------------------------*/
325
326 int ehca_dereg_mr(struct ib_mr *mr)
327 {
328         int ret = 0;
329         u64 h_ret;
330         struct ehca_shca *shca =
331                 container_of(mr->device, struct ehca_shca, ib_device);
332         struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
333
334         if ((e_mr->flags & EHCA_MR_FLAG_FMR)) {
335                 ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p "
336                          "e_mr->flags=%x", mr, e_mr, e_mr->flags);
337                 ret = -EINVAL;
338                 goto dereg_mr_exit0;
339         } else if (e_mr == shca->maxmr) {
340                 /* should be impossible, however reject to be sure */
341                 ehca_err(mr->device, "dereg internal max-MR impossible, mr=%p "
342                          "shca->maxmr=%p mr->lkey=%x",
343                          mr, shca->maxmr, mr->lkey);
344                 ret = -EINVAL;
345                 goto dereg_mr_exit0;
346         }
347
348         /* TODO: BUSY: MR still has bound window(s) */
349         h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
350         if (h_ret != H_SUCCESS) {
351                 ehca_err(mr->device, "hipz_free_mr failed, h_ret=%lli shca=%p "
352                          "e_mr=%p hca_hndl=%llx mr_hndl=%llx mr->lkey=%x",
353                          h_ret, shca, e_mr, shca->ipz_hca_handle.handle,
354                          e_mr->ipz_mr_handle.handle, mr->lkey);
355                 ret = ehca2ib_return_code(h_ret);
356                 goto dereg_mr_exit0;
357         }
358
359         if (e_mr->umem)
360                 ib_umem_release(e_mr->umem);
361
362         /* successful deregistration */
363         ehca_mr_delete(e_mr);
364
365 dereg_mr_exit0:
366         if (ret)
367                 ehca_err(mr->device, "ret=%i mr=%p", ret, mr);
368         return ret;
369 } /* end ehca_dereg_mr() */
370
371 /*----------------------------------------------------------------------*/
372
373 struct ib_mw *ehca_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
374 {
375         struct ib_mw *ib_mw;
376         u64 h_ret;
377         struct ehca_mw *e_mw;
378         struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
379         struct ehca_shca *shca =
380                 container_of(pd->device, struct ehca_shca, ib_device);
381         struct ehca_mw_hipzout_parms hipzout;
382
383         if (type != IB_MW_TYPE_1)
384                 return ERR_PTR(-EINVAL);
385
386         e_mw = ehca_mw_new();
387         if (!e_mw) {
388                 ib_mw = ERR_PTR(-ENOMEM);
389                 goto alloc_mw_exit0;
390         }
391
392         h_ret = hipz_h_alloc_resource_mw(shca->ipz_hca_handle, e_mw,
393                                          e_pd->fw_pd, &hipzout);
394         if (h_ret != H_SUCCESS) {
395                 ehca_err(pd->device, "hipz_mw_allocate failed, h_ret=%lli "
396                          "shca=%p hca_hndl=%llx mw=%p",
397                          h_ret, shca, shca->ipz_hca_handle.handle, e_mw);
398                 ib_mw = ERR_PTR(ehca2ib_return_code(h_ret));
399                 goto alloc_mw_exit1;
400         }
401         /* successful MW allocation */
402         e_mw->ipz_mw_handle = hipzout.handle;
403         e_mw->ib_mw.rkey    = hipzout.rkey;
404         return &e_mw->ib_mw;
405
406 alloc_mw_exit1:
407         ehca_mw_delete(e_mw);
408 alloc_mw_exit0:
409         if (IS_ERR(ib_mw))
410                 ehca_err(pd->device, "h_ret=%li pd=%p", PTR_ERR(ib_mw), pd);
411         return ib_mw;
412 } /* end ehca_alloc_mw() */
413
414 /*----------------------------------------------------------------------*/
415
416 int ehca_dealloc_mw(struct ib_mw *mw)
417 {
418         u64 h_ret;
419         struct ehca_shca *shca =
420                 container_of(mw->device, struct ehca_shca, ib_device);
421         struct ehca_mw *e_mw = container_of(mw, struct ehca_mw, ib_mw);
422
423         h_ret = hipz_h_free_resource_mw(shca->ipz_hca_handle, e_mw);
424         if (h_ret != H_SUCCESS) {
425                 ehca_err(mw->device, "hipz_free_mw failed, h_ret=%lli shca=%p "
426                          "mw=%p rkey=%x hca_hndl=%llx mw_hndl=%llx",
427                          h_ret, shca, mw, mw->rkey, shca->ipz_hca_handle.handle,
428                          e_mw->ipz_mw_handle.handle);
429                 return ehca2ib_return_code(h_ret);
430         }
431         /* successful deallocation */
432         ehca_mw_delete(e_mw);
433         return 0;
434 } /* end ehca_dealloc_mw() */
435
436 /*----------------------------------------------------------------------*/
437
438 struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd,
439                               int mr_access_flags,
440                               struct ib_fmr_attr *fmr_attr)
441 {
442         struct ib_fmr *ib_fmr;
443         struct ehca_shca *shca =
444                 container_of(pd->device, struct ehca_shca, ib_device);
445         struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
446         struct ehca_mr *e_fmr;
447         int ret;
448         u32 tmp_lkey, tmp_rkey;
449         struct ehca_mr_pginfo pginfo;
450         u64 hw_pgsize;
451
452         /* check other parameters */
453         if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
454              !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
455             ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
456              !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
457                 /*
458                  * Remote Write Access requires Local Write Access
459                  * Remote Atomic Access requires Local Write Access
460                  */
461                 ehca_err(pd->device, "bad input values: mr_access_flags=%x",
462                          mr_access_flags);
463                 ib_fmr = ERR_PTR(-EINVAL);
464                 goto alloc_fmr_exit0;
465         }
466         if (mr_access_flags & IB_ACCESS_MW_BIND) {
467                 ehca_err(pd->device, "bad input values: mr_access_flags=%x",
468                          mr_access_flags);
469                 ib_fmr = ERR_PTR(-EINVAL);
470                 goto alloc_fmr_exit0;
471         }
472         if ((fmr_attr->max_pages == 0) || (fmr_attr->max_maps == 0)) {
473                 ehca_err(pd->device, "bad input values: fmr_attr->max_pages=%x "
474                          "fmr_attr->max_maps=%x fmr_attr->page_shift=%x",
475                          fmr_attr->max_pages, fmr_attr->max_maps,
476                          fmr_attr->page_shift);
477                 ib_fmr = ERR_PTR(-EINVAL);
478                 goto alloc_fmr_exit0;
479         }
480
481         hw_pgsize = 1 << fmr_attr->page_shift;
482         if (!(hw_pgsize & shca->hca_cap_mr_pgsize)) {
483                 ehca_err(pd->device, "unsupported fmr_attr->page_shift=%x",
484                          fmr_attr->page_shift);
485                 ib_fmr = ERR_PTR(-EINVAL);
486                 goto alloc_fmr_exit0;
487         }
488
489         e_fmr = ehca_mr_new();
490         if (!e_fmr) {
491                 ib_fmr = ERR_PTR(-ENOMEM);
492                 goto alloc_fmr_exit0;
493         }
494         e_fmr->flags |= EHCA_MR_FLAG_FMR;
495
496         /* register MR on HCA */
497         memset(&pginfo, 0, sizeof(pginfo));
498         pginfo.hwpage_size = hw_pgsize;
499         /*
500          * pginfo.num_hwpages==0, ie register_rpages() will not be called
501          * but deferred to map_phys_fmr()
502          */
503         ret = ehca_reg_mr(shca, e_fmr, NULL,
504                           fmr_attr->max_pages * (1 << fmr_attr->page_shift),
505                           mr_access_flags, e_pd, &pginfo,
506                           &tmp_lkey, &tmp_rkey, EHCA_REG_MR);
507         if (ret) {
508                 ib_fmr = ERR_PTR(ret);
509                 goto alloc_fmr_exit1;
510         }
511
512         /* successful */
513         e_fmr->hwpage_size = hw_pgsize;
514         e_fmr->fmr_page_size = 1 << fmr_attr->page_shift;
515         e_fmr->fmr_max_pages = fmr_attr->max_pages;
516         e_fmr->fmr_max_maps = fmr_attr->max_maps;
517         e_fmr->fmr_map_cnt = 0;
518         return &e_fmr->ib.ib_fmr;
519
520 alloc_fmr_exit1:
521         ehca_mr_delete(e_fmr);
522 alloc_fmr_exit0:
523         return ib_fmr;
524 } /* end ehca_alloc_fmr() */
525
526 /*----------------------------------------------------------------------*/
527
528 int ehca_map_phys_fmr(struct ib_fmr *fmr,
529                       u64 *page_list,
530                       int list_len,
531                       u64 iova)
532 {
533         int ret;
534         struct ehca_shca *shca =
535                 container_of(fmr->device, struct ehca_shca, ib_device);
536         struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);
537         struct ehca_pd *e_pd = container_of(fmr->pd, struct ehca_pd, ib_pd);
538         struct ehca_mr_pginfo pginfo;
539         u32 tmp_lkey, tmp_rkey;
540
541         if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
542                 ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x",
543                          e_fmr, e_fmr->flags);
544                 ret = -EINVAL;
545                 goto map_phys_fmr_exit0;
546         }
547         ret = ehca_fmr_check_page_list(e_fmr, page_list, list_len);
548         if (ret)
549                 goto map_phys_fmr_exit0;
550         if (iova % e_fmr->fmr_page_size) {
551                 /* only whole-numbered pages */
552                 ehca_err(fmr->device, "bad iova, iova=%llx fmr_page_size=%x",
553                          iova, e_fmr->fmr_page_size);
554                 ret = -EINVAL;
555                 goto map_phys_fmr_exit0;
556         }
557         if (e_fmr->fmr_map_cnt >= e_fmr->fmr_max_maps) {
558                 /* HCAD does not limit the maps, however trace this anyway */
559                 ehca_info(fmr->device, "map limit exceeded, fmr=%p "
560                           "e_fmr->fmr_map_cnt=%x e_fmr->fmr_max_maps=%x",
561                           fmr, e_fmr->fmr_map_cnt, e_fmr->fmr_max_maps);
562         }
563
564         memset(&pginfo, 0, sizeof(pginfo));
565         pginfo.type = EHCA_MR_PGI_FMR;
566         pginfo.num_kpages = list_len;
567         pginfo.hwpage_size = e_fmr->hwpage_size;
568         pginfo.num_hwpages =
569                 list_len * e_fmr->fmr_page_size / pginfo.hwpage_size;
570         pginfo.u.fmr.page_list = page_list;
571         pginfo.next_hwpage =
572                 (iova & (e_fmr->fmr_page_size-1)) / pginfo.hwpage_size;
573         pginfo.u.fmr.fmr_pgsize = e_fmr->fmr_page_size;
574
575         ret = ehca_rereg_mr(shca, e_fmr, (u64 *)iova,
576                             list_len * e_fmr->fmr_page_size,
577                             e_fmr->acl, e_pd, &pginfo, &tmp_lkey, &tmp_rkey);
578         if (ret)
579                 goto map_phys_fmr_exit0;
580
581         /* successful reregistration */
582         e_fmr->fmr_map_cnt++;
583         e_fmr->ib.ib_fmr.lkey = tmp_lkey;
584         e_fmr->ib.ib_fmr.rkey = tmp_rkey;
585         return 0;
586
587 map_phys_fmr_exit0:
588         if (ret)
589                 ehca_err(fmr->device, "ret=%i fmr=%p page_list=%p list_len=%x "
590                          "iova=%llx", ret, fmr, page_list, list_len, iova);
591         return ret;
592 } /* end ehca_map_phys_fmr() */
593
594 /*----------------------------------------------------------------------*/
595
596 int ehca_unmap_fmr(struct list_head *fmr_list)
597 {
598         int ret = 0;
599         struct ib_fmr *ib_fmr;
600         struct ehca_shca *shca = NULL;
601         struct ehca_shca *prev_shca;
602         struct ehca_mr *e_fmr;
603         u32 num_fmr = 0;
604         u32 unmap_fmr_cnt = 0;
605
606         /* check all FMR belong to same SHCA, and check internal flag */
607         list_for_each_entry(ib_fmr, fmr_list, list) {
608                 prev_shca = shca;
609                 shca = container_of(ib_fmr->device, struct ehca_shca,
610                                     ib_device);
611                 e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr);
612                 if ((shca != prev_shca) && prev_shca) {
613                         ehca_err(&shca->ib_device, "SHCA mismatch, shca=%p "
614                                  "prev_shca=%p e_fmr=%p",
615                                  shca, prev_shca, e_fmr);
616                         ret = -EINVAL;
617                         goto unmap_fmr_exit0;
618                 }
619                 if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
620                         ehca_err(&shca->ib_device, "not a FMR, e_fmr=%p "
621                                  "e_fmr->flags=%x", e_fmr, e_fmr->flags);
622                         ret = -EINVAL;
623                         goto unmap_fmr_exit0;
624                 }
625                 num_fmr++;
626         }
627
628         /* loop over all FMRs to unmap */
629         list_for_each_entry(ib_fmr, fmr_list, list) {
630                 unmap_fmr_cnt++;
631                 e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr);
632                 shca = container_of(ib_fmr->device, struct ehca_shca,
633                                     ib_device);
634                 ret = ehca_unmap_one_fmr(shca, e_fmr);
635                 if (ret) {
636                         /* unmap failed, stop unmapping of rest of FMRs */
637                         ehca_err(&shca->ib_device, "unmap of one FMR failed, "
638                                  "stop rest, e_fmr=%p num_fmr=%x "
639                                  "unmap_fmr_cnt=%x lkey=%x", e_fmr, num_fmr,
640                                  unmap_fmr_cnt, e_fmr->ib.ib_fmr.lkey);
641                         goto unmap_fmr_exit0;
642                 }
643         }
644
645 unmap_fmr_exit0:
646         if (ret)
647                 ehca_gen_err("ret=%i fmr_list=%p num_fmr=%x unmap_fmr_cnt=%x",
648                              ret, fmr_list, num_fmr, unmap_fmr_cnt);
649         return ret;
650 } /* end ehca_unmap_fmr() */
651
652 /*----------------------------------------------------------------------*/
653
654 int ehca_dealloc_fmr(struct ib_fmr *fmr)
655 {
656         int ret;
657         u64 h_ret;
658         struct ehca_shca *shca =
659                 container_of(fmr->device, struct ehca_shca, ib_device);
660         struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);
661
662         if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
663                 ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x",
664                          e_fmr, e_fmr->flags);
665                 ret = -EINVAL;
666                 goto free_fmr_exit0;
667         }
668
669         h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
670         if (h_ret != H_SUCCESS) {
671                 ehca_err(fmr->device, "hipz_free_mr failed, h_ret=%lli e_fmr=%p "
672                          "hca_hndl=%llx fmr_hndl=%llx fmr->lkey=%x",
673                          h_ret, e_fmr, shca->ipz_hca_handle.handle,
674                          e_fmr->ipz_mr_handle.handle, fmr->lkey);
675                 ret = ehca2ib_return_code(h_ret);
676                 goto free_fmr_exit0;
677         }
678         /* successful deregistration */
679         ehca_mr_delete(e_fmr);
680         return 0;
681
682 free_fmr_exit0:
683         if (ret)
684                 ehca_err(&shca->ib_device, "ret=%i fmr=%p", ret, fmr);
685         return ret;
686 } /* end ehca_dealloc_fmr() */
687
688 /*----------------------------------------------------------------------*/
689
690 static int ehca_reg_bmap_mr_rpages(struct ehca_shca *shca,
691                                    struct ehca_mr *e_mr,
692                                    struct ehca_mr_pginfo *pginfo);
693
694 int ehca_reg_mr(struct ehca_shca *shca,
695                 struct ehca_mr *e_mr,
696                 u64 *iova_start,
697                 u64 size,
698                 int acl,
699                 struct ehca_pd *e_pd,
700                 struct ehca_mr_pginfo *pginfo,
701                 u32 *lkey, /*OUT*/
702                 u32 *rkey, /*OUT*/
703                 enum ehca_reg_type reg_type)
704 {
705         int ret;
706         u64 h_ret;
707         u32 hipz_acl;
708         struct ehca_mr_hipzout_parms hipzout;
709
710         ehca_mrmw_map_acl(acl, &hipz_acl);
711         ehca_mrmw_set_pgsize_hipz_acl(pginfo->hwpage_size, &hipz_acl);
712         if (ehca_use_hp_mr == 1)
713                 hipz_acl |= 0x00000001;
714
715         h_ret = hipz_h_alloc_resource_mr(shca->ipz_hca_handle, e_mr,
716                                          (u64)iova_start, size, hipz_acl,
717                                          e_pd->fw_pd, &hipzout);
718         if (h_ret != H_SUCCESS) {
719                 ehca_err(&shca->ib_device, "hipz_alloc_mr failed, h_ret=%lli "
720                          "hca_hndl=%llx", h_ret, shca->ipz_hca_handle.handle);
721                 ret = ehca2ib_return_code(h_ret);
722                 goto ehca_reg_mr_exit0;
723         }
724
725         e_mr->ipz_mr_handle = hipzout.handle;
726
727         if (reg_type == EHCA_REG_BUSMAP_MR)
728                 ret = ehca_reg_bmap_mr_rpages(shca, e_mr, pginfo);
729         else if (reg_type == EHCA_REG_MR)
730                 ret = ehca_reg_mr_rpages(shca, e_mr, pginfo);
731         else
732                 ret = -EINVAL;
733
734         if (ret)
735                 goto ehca_reg_mr_exit1;
736
737         /* successful registration */
738         e_mr->num_kpages = pginfo->num_kpages;
739         e_mr->num_hwpages = pginfo->num_hwpages;
740         e_mr->hwpage_size = pginfo->hwpage_size;
741         e_mr->start = iova_start;
742         e_mr->size = size;
743         e_mr->acl = acl;
744         *lkey = hipzout.lkey;
745         *rkey = hipzout.rkey;
746         return 0;
747
748 ehca_reg_mr_exit1:
749         h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
750         if (h_ret != H_SUCCESS) {
751                 ehca_err(&shca->ib_device, "h_ret=%lli shca=%p e_mr=%p "
752                          "iova_start=%p size=%llx acl=%x e_pd=%p lkey=%x "
753                          "pginfo=%p num_kpages=%llx num_hwpages=%llx ret=%i",
754                          h_ret, shca, e_mr, iova_start, size, acl, e_pd,
755                          hipzout.lkey, pginfo, pginfo->num_kpages,
756                          pginfo->num_hwpages, ret);
757                 ehca_err(&shca->ib_device, "internal error in ehca_reg_mr, "
758                          "not recoverable");
759         }
760 ehca_reg_mr_exit0:
761         if (ret)
762                 ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p "
763                          "iova_start=%p size=%llx acl=%x e_pd=%p pginfo=%p "
764                          "num_kpages=%llx num_hwpages=%llx",
765                          ret, shca, e_mr, iova_start, size, acl, e_pd, pginfo,
766                          pginfo->num_kpages, pginfo->num_hwpages);
767         return ret;
768 } /* end ehca_reg_mr() */
769
770 /*----------------------------------------------------------------------*/
771
772 int ehca_reg_mr_rpages(struct ehca_shca *shca,
773                        struct ehca_mr *e_mr,
774                        struct ehca_mr_pginfo *pginfo)
775 {
776         int ret = 0;
777         u64 h_ret;
778         u32 rnum;
779         u64 rpage;
780         u32 i;
781         u64 *kpage;
782
783         if (!pginfo->num_hwpages) /* in case of fmr */
784                 return 0;
785
786         kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
787         if (!kpage) {
788                 ehca_err(&shca->ib_device, "kpage alloc failed");
789                 ret = -ENOMEM;
790                 goto ehca_reg_mr_rpages_exit0;
791         }
792
793         /* max MAX_RPAGES ehca mr pages per register call */
794         for (i = 0; i < NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES); i++) {
795
796                 if (i == NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES) - 1) {
797                         rnum = pginfo->num_hwpages % MAX_RPAGES; /* last shot */
798                         if (rnum == 0)
799                                 rnum = MAX_RPAGES;      /* last shot is full */
800                 } else
801                         rnum = MAX_RPAGES;
802
803                 ret = ehca_set_pagebuf(pginfo, rnum, kpage);
804                 if (ret) {
805                         ehca_err(&shca->ib_device, "ehca_set_pagebuf "
806                                  "bad rc, ret=%i rnum=%x kpage=%p",
807                                  ret, rnum, kpage);
808                         goto ehca_reg_mr_rpages_exit1;
809                 }
810
811                 if (rnum > 1) {
812                         rpage = __pa(kpage);
813                         if (!rpage) {
814                                 ehca_err(&shca->ib_device, "kpage=%p i=%x",
815                                          kpage, i);
816                                 ret = -EFAULT;
817                                 goto ehca_reg_mr_rpages_exit1;
818                         }
819                 } else
820                         rpage = *kpage;
821
822                 h_ret = hipz_h_register_rpage_mr(
823                         shca->ipz_hca_handle, e_mr,
824                         ehca_encode_hwpage_size(pginfo->hwpage_size),
825                         0, rpage, rnum);
826
827                 if (i == NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES) - 1) {
828                         /*
829                          * check for 'registration complete'==H_SUCCESS
830                          * and for 'page registered'==H_PAGE_REGISTERED
831                          */
832                         if (h_ret != H_SUCCESS) {
833                                 ehca_err(&shca->ib_device, "last "
834                                          "hipz_reg_rpage_mr failed, h_ret=%lli "
835                                          "e_mr=%p i=%x hca_hndl=%llx mr_hndl=%llx"
836                                          " lkey=%x", h_ret, e_mr, i,
837                                          shca->ipz_hca_handle.handle,
838                                          e_mr->ipz_mr_handle.handle,
839                                          e_mr->ib.ib_mr.lkey);
840                                 ret = ehca2ib_return_code(h_ret);
841                                 break;
842                         } else
843                                 ret = 0;
844                 } else if (h_ret != H_PAGE_REGISTERED) {
845                         ehca_err(&shca->ib_device, "hipz_reg_rpage_mr failed, "
846                                  "h_ret=%lli e_mr=%p i=%x lkey=%x hca_hndl=%llx "
847                                  "mr_hndl=%llx", h_ret, e_mr, i,
848                                  e_mr->ib.ib_mr.lkey,
849                                  shca->ipz_hca_handle.handle,
850                                  e_mr->ipz_mr_handle.handle);
851                         ret = ehca2ib_return_code(h_ret);
852                         break;
853                 } else
854                         ret = 0;
855         } /* end for(i) */
856
857
858 ehca_reg_mr_rpages_exit1:
859         ehca_free_fw_ctrlblock(kpage);
860 ehca_reg_mr_rpages_exit0:
861         if (ret)
862                 ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p pginfo=%p "
863                          "num_kpages=%llx num_hwpages=%llx", ret, shca, e_mr,
864                          pginfo, pginfo->num_kpages, pginfo->num_hwpages);
865         return ret;
866 } /* end ehca_reg_mr_rpages() */
867
868 /*----------------------------------------------------------------------*/
869
870 inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca,
871                                 struct ehca_mr *e_mr,
872                                 u64 *iova_start,
873                                 u64 size,
874                                 u32 acl,
875                                 struct ehca_pd *e_pd,
876                                 struct ehca_mr_pginfo *pginfo,
877                                 u32 *lkey, /*OUT*/
878                                 u32 *rkey) /*OUT*/
879 {
880         int ret;
881         u64 h_ret;
882         u32 hipz_acl;
883         u64 *kpage;
884         u64 rpage;
885         struct ehca_mr_pginfo pginfo_save;
886         struct ehca_mr_hipzout_parms hipzout;
887
888         ehca_mrmw_map_acl(acl, &hipz_acl);
889         ehca_mrmw_set_pgsize_hipz_acl(pginfo->hwpage_size, &hipz_acl);
890
891         kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
892         if (!kpage) {
893                 ehca_err(&shca->ib_device, "kpage alloc failed");
894                 ret = -ENOMEM;
895                 goto ehca_rereg_mr_rereg1_exit0;
896         }
897
898         pginfo_save = *pginfo;
899         ret = ehca_set_pagebuf(pginfo, pginfo->num_hwpages, kpage);
900         if (ret) {
901                 ehca_err(&shca->ib_device, "set pagebuf failed, e_mr=%p "
902                          "pginfo=%p type=%x num_kpages=%llx num_hwpages=%llx "
903                          "kpage=%p", e_mr, pginfo, pginfo->type,
904                          pginfo->num_kpages, pginfo->num_hwpages, kpage);
905                 goto ehca_rereg_mr_rereg1_exit1;
906         }
907         rpage = __pa(kpage);
908         if (!rpage) {
909                 ehca_err(&shca->ib_device, "kpage=%p", kpage);
910                 ret = -EFAULT;
911                 goto ehca_rereg_mr_rereg1_exit1;
912         }
913         h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_mr,
914                                       (u64)iova_start, size, hipz_acl,
915                                       e_pd->fw_pd, rpage, &hipzout);
916         if (h_ret != H_SUCCESS) {
917                 /*
918                  * reregistration unsuccessful, try it again with the 3 hCalls,
919                  * e.g. this is required in case H_MR_CONDITION
920                  * (MW bound or MR is shared)
921                  */
922                 ehca_warn(&shca->ib_device, "hipz_h_reregister_pmr failed "
923                           "(Rereg1), h_ret=%lli e_mr=%p", h_ret, e_mr);
924                 *pginfo = pginfo_save;
925                 ret = -EAGAIN;
926         } else if ((u64 *)hipzout.vaddr != iova_start) {
927                 ehca_err(&shca->ib_device, "PHYP changed iova_start in "
928                          "rereg_pmr, iova_start=%p iova_start_out=%llx e_mr=%p "
929                          "mr_handle=%llx lkey=%x lkey_out=%x", iova_start,
930                          hipzout.vaddr, e_mr, e_mr->ipz_mr_handle.handle,
931                          e_mr->ib.ib_mr.lkey, hipzout.lkey);
932                 ret = -EFAULT;
933         } else {
934                 /*
935                  * successful reregistration
936                  * note: start and start_out are identical for eServer HCAs
937                  */
938                 e_mr->num_kpages = pginfo->num_kpages;
939                 e_mr->num_hwpages = pginfo->num_hwpages;
940                 e_mr->hwpage_size = pginfo->hwpage_size;
941                 e_mr->start = iova_start;
942                 e_mr->size = size;
943                 e_mr->acl = acl;
944                 *lkey = hipzout.lkey;
945                 *rkey = hipzout.rkey;
946         }
947
948 ehca_rereg_mr_rereg1_exit1:
949         ehca_free_fw_ctrlblock(kpage);
950 ehca_rereg_mr_rereg1_exit0:
951         if ( ret && (ret != -EAGAIN) )
952                 ehca_err(&shca->ib_device, "ret=%i lkey=%x rkey=%x "
953                          "pginfo=%p num_kpages=%llx num_hwpages=%llx",
954                          ret, *lkey, *rkey, pginfo, pginfo->num_kpages,
955                          pginfo->num_hwpages);
956         return ret;
957 } /* end ehca_rereg_mr_rereg1() */
958
959 /*----------------------------------------------------------------------*/
960
961 int ehca_rereg_mr(struct ehca_shca *shca,
962                   struct ehca_mr *e_mr,
963                   u64 *iova_start,
964                   u64 size,
965                   int acl,
966                   struct ehca_pd *e_pd,
967                   struct ehca_mr_pginfo *pginfo,
968                   u32 *lkey,
969                   u32 *rkey)
970 {
971         int ret = 0;
972         u64 h_ret;
973         int rereg_1_hcall = 1; /* 1: use hipz_h_reregister_pmr directly */
974         int rereg_3_hcall = 0; /* 1: use 3 hipz calls for reregistration */
975
976         /* first determine reregistration hCall(s) */
977         if ((pginfo->num_hwpages > MAX_RPAGES) ||
978             (e_mr->num_hwpages > MAX_RPAGES) ||
979             (pginfo->num_hwpages > e_mr->num_hwpages)) {
980                 ehca_dbg(&shca->ib_device, "Rereg3 case, "
981                          "pginfo->num_hwpages=%llx e_mr->num_hwpages=%x",
982                          pginfo->num_hwpages, e_mr->num_hwpages);
983                 rereg_1_hcall = 0;
984                 rereg_3_hcall = 1;
985         }
986
987         if (e_mr->flags & EHCA_MR_FLAG_MAXMR) { /* check for max-MR */
988                 rereg_1_hcall = 0;
989                 rereg_3_hcall = 1;
990                 e_mr->flags &= ~EHCA_MR_FLAG_MAXMR;
991                 ehca_err(&shca->ib_device, "Rereg MR for max-MR! e_mr=%p",
992                          e_mr);
993         }
994
995         if (rereg_1_hcall) {
996                 ret = ehca_rereg_mr_rereg1(shca, e_mr, iova_start, size,
997                                            acl, e_pd, pginfo, lkey, rkey);
998                 if (ret) {
999                         if (ret == -EAGAIN)
1000                                 rereg_3_hcall = 1;
1001                         else
1002                                 goto ehca_rereg_mr_exit0;
1003                 }
1004         }
1005
1006         if (rereg_3_hcall) {
1007                 struct ehca_mr save_mr;
1008
1009                 /* first deregister old MR */
1010                 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
1011                 if (h_ret != H_SUCCESS) {
1012                         ehca_err(&shca->ib_device, "hipz_free_mr failed, "
1013                                  "h_ret=%lli e_mr=%p hca_hndl=%llx mr_hndl=%llx "
1014                                  "mr->lkey=%x",
1015                                  h_ret, e_mr, shca->ipz_hca_handle.handle,
1016                                  e_mr->ipz_mr_handle.handle,
1017                                  e_mr->ib.ib_mr.lkey);
1018                         ret = ehca2ib_return_code(h_ret);
1019                         goto ehca_rereg_mr_exit0;
1020                 }
1021                 /* clean ehca_mr_t, without changing struct ib_mr and lock */
1022                 save_mr = *e_mr;
1023                 ehca_mr_deletenew(e_mr);
1024
1025                 /* set some MR values */
1026                 e_mr->flags = save_mr.flags;
1027                 e_mr->hwpage_size = save_mr.hwpage_size;
1028                 e_mr->fmr_page_size = save_mr.fmr_page_size;
1029                 e_mr->fmr_max_pages = save_mr.fmr_max_pages;
1030                 e_mr->fmr_max_maps = save_mr.fmr_max_maps;
1031                 e_mr->fmr_map_cnt = save_mr.fmr_map_cnt;
1032
1033                 ret = ehca_reg_mr(shca, e_mr, iova_start, size, acl,
1034                                   e_pd, pginfo, lkey, rkey, EHCA_REG_MR);
1035                 if (ret) {
1036                         u32 offset = (u64)(&e_mr->flags) - (u64)e_mr;
1037                         memcpy(&e_mr->flags, &(save_mr.flags),
1038                                sizeof(struct ehca_mr) - offset);
1039                         goto ehca_rereg_mr_exit0;
1040                 }
1041         }
1042
1043 ehca_rereg_mr_exit0:
1044         if (ret)
1045                 ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p "
1046                          "iova_start=%p size=%llx acl=%x e_pd=%p pginfo=%p "
1047                          "num_kpages=%llx lkey=%x rkey=%x rereg_1_hcall=%x "
1048                          "rereg_3_hcall=%x", ret, shca, e_mr, iova_start, size,
1049                          acl, e_pd, pginfo, pginfo->num_kpages, *lkey, *rkey,
1050                          rereg_1_hcall, rereg_3_hcall);
1051         return ret;
1052 } /* end ehca_rereg_mr() */
1053
1054 /*----------------------------------------------------------------------*/
1055
1056 int ehca_unmap_one_fmr(struct ehca_shca *shca,
1057                        struct ehca_mr *e_fmr)
1058 {
1059         int ret = 0;
1060         u64 h_ret;
1061         struct ehca_pd *e_pd =
1062                 container_of(e_fmr->ib.ib_fmr.pd, struct ehca_pd, ib_pd);
1063         struct ehca_mr save_fmr;
1064         u32 tmp_lkey, tmp_rkey;
1065         struct ehca_mr_pginfo pginfo;
1066         struct ehca_mr_hipzout_parms hipzout;
1067         struct ehca_mr save_mr;
1068
1069         if (e_fmr->fmr_max_pages <= MAX_RPAGES) {
1070                 /*
1071                  * note: after using rereg hcall with len=0,
1072                  * rereg hcall must be used again for registering pages
1073                  */
1074                 h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_fmr, 0,
1075                                               0, 0, e_pd->fw_pd, 0, &hipzout);
1076                 if (h_ret == H_SUCCESS) {
1077                         /* successful reregistration */
1078                         e_fmr->start = NULL;
1079                         e_fmr->size = 0;
1080                         tmp_lkey = hipzout.lkey;
1081                         tmp_rkey = hipzout.rkey;
1082                         return 0;
1083                 }
1084                 /*
1085                  * should not happen, because length checked above,
1086                  * FMRs are not shared and no MW bound to FMRs
1087                  */
1088                 ehca_err(&shca->ib_device, "hipz_reregister_pmr failed "
1089                          "(Rereg1), h_ret=%lli e_fmr=%p hca_hndl=%llx "
1090                          "mr_hndl=%llx lkey=%x lkey_out=%x",
1091                          h_ret, e_fmr, shca->ipz_hca_handle.handle,
1092                          e_fmr->ipz_mr_handle.handle,
1093                          e_fmr->ib.ib_fmr.lkey, hipzout.lkey);
1094                 /* try free and rereg */
1095         }
1096
1097         /* first free old FMR */
1098         h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
1099         if (h_ret != H_SUCCESS) {
1100                 ehca_err(&shca->ib_device, "hipz_free_mr failed, "
1101                          "h_ret=%lli e_fmr=%p hca_hndl=%llx mr_hndl=%llx "
1102                          "lkey=%x",
1103                          h_ret, e_fmr, shca->ipz_hca_handle.handle,
1104                          e_fmr->ipz_mr_handle.handle,
1105                          e_fmr->ib.ib_fmr.lkey);
1106                 ret = ehca2ib_return_code(h_ret);
1107                 goto ehca_unmap_one_fmr_exit0;
1108         }
1109         /* clean ehca_mr_t, without changing lock */
1110         save_fmr = *e_fmr;
1111         ehca_mr_deletenew(e_fmr);
1112
1113         /* set some MR values */
1114         e_fmr->flags = save_fmr.flags;
1115         e_fmr->hwpage_size = save_fmr.hwpage_size;
1116         e_fmr->fmr_page_size = save_fmr.fmr_page_size;
1117         e_fmr->fmr_max_pages = save_fmr.fmr_max_pages;
1118         e_fmr->fmr_max_maps = save_fmr.fmr_max_maps;
1119         e_fmr->fmr_map_cnt = save_fmr.fmr_map_cnt;
1120         e_fmr->acl = save_fmr.acl;
1121
1122         memset(&pginfo, 0, sizeof(pginfo));
1123         pginfo.type = EHCA_MR_PGI_FMR;
1124         ret = ehca_reg_mr(shca, e_fmr, NULL,
1125                           (e_fmr->fmr_max_pages * e_fmr->fmr_page_size),
1126                           e_fmr->acl, e_pd, &pginfo, &tmp_lkey,
1127                           &tmp_rkey, EHCA_REG_MR);
1128         if (ret) {
1129                 u32 offset = (u64)(&e_fmr->flags) - (u64)e_fmr;
1130                 memcpy(&e_fmr->flags, &(save_mr.flags),
1131                        sizeof(struct ehca_mr) - offset);
1132         }
1133
1134 ehca_unmap_one_fmr_exit0:
1135         if (ret)
1136                 ehca_err(&shca->ib_device, "ret=%i tmp_lkey=%x tmp_rkey=%x "
1137                          "fmr_max_pages=%x",
1138                          ret, tmp_lkey, tmp_rkey, e_fmr->fmr_max_pages);
1139         return ret;
1140 } /* end ehca_unmap_one_fmr() */
1141
1142 /*----------------------------------------------------------------------*/
1143
1144 int ehca_reg_smr(struct ehca_shca *shca,
1145                  struct ehca_mr *e_origmr,
1146                  struct ehca_mr *e_newmr,
1147                  u64 *iova_start,
1148                  int acl,
1149                  struct ehca_pd *e_pd,
1150                  u32 *lkey, /*OUT*/
1151                  u32 *rkey) /*OUT*/
1152 {
1153         int ret = 0;
1154         u64 h_ret;
1155         u32 hipz_acl;
1156         struct ehca_mr_hipzout_parms hipzout;
1157
1158         ehca_mrmw_map_acl(acl, &hipz_acl);
1159         ehca_mrmw_set_pgsize_hipz_acl(e_origmr->hwpage_size, &hipz_acl);
1160
1161         h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr,
1162                                     (u64)iova_start, hipz_acl, e_pd->fw_pd,
1163                                     &hipzout);
1164         if (h_ret != H_SUCCESS) {
1165                 ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lli "
1166                          "shca=%p e_origmr=%p e_newmr=%p iova_start=%p acl=%x "
1167                          "e_pd=%p hca_hndl=%llx mr_hndl=%llx lkey=%x",
1168                          h_ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd,
1169                          shca->ipz_hca_handle.handle,
1170                          e_origmr->ipz_mr_handle.handle,
1171                          e_origmr->ib.ib_mr.lkey);
1172                 ret = ehca2ib_return_code(h_ret);
1173                 goto ehca_reg_smr_exit0;
1174         }
1175         /* successful registration */
1176         e_newmr->num_kpages = e_origmr->num_kpages;
1177         e_newmr->num_hwpages = e_origmr->num_hwpages;
1178         e_newmr->hwpage_size   = e_origmr->hwpage_size;
1179         e_newmr->start = iova_start;
1180         e_newmr->size = e_origmr->size;
1181         e_newmr->acl = acl;
1182         e_newmr->ipz_mr_handle = hipzout.handle;
1183         *lkey = hipzout.lkey;
1184         *rkey = hipzout.rkey;
1185         return 0;
1186
1187 ehca_reg_smr_exit0:
1188         if (ret)
1189                 ehca_err(&shca->ib_device, "ret=%i shca=%p e_origmr=%p "
1190                          "e_newmr=%p iova_start=%p acl=%x e_pd=%p",
1191                          ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd);
1192         return ret;
1193 } /* end ehca_reg_smr() */
1194
1195 /*----------------------------------------------------------------------*/
1196 static inline void *ehca_calc_sectbase(int top, int dir, int idx)
1197 {
1198         unsigned long ret = idx;
1199         ret |= dir << EHCA_DIR_INDEX_SHIFT;
1200         ret |= top << EHCA_TOP_INDEX_SHIFT;
1201         return __va(ret << SECTION_SIZE_BITS);
1202 }
1203
1204 #define ehca_bmap_valid(entry) \
1205         ((u64)entry != (u64)EHCA_INVAL_ADDR)
1206
1207 static u64 ehca_reg_mr_section(int top, int dir, int idx, u64 *kpage,
1208                                struct ehca_shca *shca, struct ehca_mr *mr,
1209                                struct ehca_mr_pginfo *pginfo)
1210 {
1211         u64 h_ret = 0;
1212         unsigned long page = 0;
1213         u64 rpage = __pa(kpage);
1214         int page_count;
1215
1216         void *sectbase = ehca_calc_sectbase(top, dir, idx);
1217         if ((unsigned long)sectbase & (pginfo->hwpage_size - 1)) {
1218                 ehca_err(&shca->ib_device, "reg_mr_section will probably fail:"
1219                                            "hwpage_size does not fit to "
1220                                            "section start address");
1221         }
1222         page_count = EHCA_SECTSIZE / pginfo->hwpage_size;
1223
1224         while (page < page_count) {
1225                 u64 rnum;
1226                 for (rnum = 0; (rnum < MAX_RPAGES) && (page < page_count);
1227                      rnum++) {
1228                         void *pg = sectbase + ((page++) * pginfo->hwpage_size);
1229                         kpage[rnum] = __pa(pg);
1230                 }
1231
1232                 h_ret = hipz_h_register_rpage_mr(shca->ipz_hca_handle, mr,
1233                         ehca_encode_hwpage_size(pginfo->hwpage_size),
1234                         0, rpage, rnum);
1235
1236                 if ((h_ret != H_SUCCESS) && (h_ret != H_PAGE_REGISTERED)) {
1237                         ehca_err(&shca->ib_device, "register_rpage_mr failed");
1238                         return h_ret;
1239                 }
1240         }
1241         return h_ret;
1242 }
1243
1244 static u64 ehca_reg_mr_sections(int top, int dir, u64 *kpage,
1245                                 struct ehca_shca *shca, struct ehca_mr *mr,
1246                                 struct ehca_mr_pginfo *pginfo)
1247 {
1248         u64 hret = H_SUCCESS;
1249         int idx;
1250
1251         for (idx = 0; idx < EHCA_MAP_ENTRIES; idx++) {
1252                 if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]->ent[idx]))
1253                         continue;
1254
1255                 hret = ehca_reg_mr_section(top, dir, idx, kpage, shca, mr,
1256                                            pginfo);
1257                 if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
1258                                 return hret;
1259         }
1260         return hret;
1261 }
1262
1263 static u64 ehca_reg_mr_dir_sections(int top, u64 *kpage, struct ehca_shca *shca,
1264                                     struct ehca_mr *mr,
1265                                     struct ehca_mr_pginfo *pginfo)
1266 {
1267         u64 hret = H_SUCCESS;
1268         int dir;
1269
1270         for (dir = 0; dir < EHCA_MAP_ENTRIES; dir++) {
1271                 if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]))
1272                         continue;
1273
1274                 hret = ehca_reg_mr_sections(top, dir, kpage, shca, mr, pginfo);
1275                 if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
1276                                 return hret;
1277         }
1278         return hret;
1279 }
1280
1281 /* register internal max-MR to internal SHCA */
1282 int ehca_reg_internal_maxmr(
1283         struct ehca_shca *shca,
1284         struct ehca_pd *e_pd,
1285         struct ehca_mr **e_maxmr)  /*OUT*/
1286 {
1287         int ret;
1288         struct ehca_mr *e_mr;
1289         u64 *iova_start;
1290         u64 size_maxmr;
1291         struct ehca_mr_pginfo pginfo;
1292         u32 num_kpages;
1293         u32 num_hwpages;
1294         u64 hw_pgsize;
1295
1296         if (!ehca_bmap) {
1297                 ret = -EFAULT;
1298                 goto ehca_reg_internal_maxmr_exit0;
1299         }
1300
1301         e_mr = ehca_mr_new();
1302         if (!e_mr) {
1303                 ehca_err(&shca->ib_device, "out of memory");
1304                 ret = -ENOMEM;
1305                 goto ehca_reg_internal_maxmr_exit0;
1306         }
1307         e_mr->flags |= EHCA_MR_FLAG_MAXMR;
1308
1309         /* register internal max-MR on HCA */
1310         size_maxmr = ehca_mr_len;
1311         iova_start = (u64 *)ehca_map_vaddr((void *)(KERNELBASE + PHYSICAL_START));
1312         num_kpages = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size_maxmr,
1313                                 PAGE_SIZE);
1314         hw_pgsize = ehca_get_max_hwpage_size(shca);
1315         num_hwpages = NUM_CHUNKS(((u64)iova_start % hw_pgsize) + size_maxmr,
1316                                  hw_pgsize);
1317
1318         memset(&pginfo, 0, sizeof(pginfo));
1319         pginfo.type = EHCA_MR_PGI_PHYS;
1320         pginfo.num_kpages = num_kpages;
1321         pginfo.num_hwpages = num_hwpages;
1322         pginfo.hwpage_size = hw_pgsize;
1323         pginfo.u.phy.addr = 0;
1324         pginfo.u.phy.size = size_maxmr;
1325
1326         ret = ehca_reg_mr(shca, e_mr, iova_start, size_maxmr, 0, e_pd,
1327                           &pginfo, &e_mr->ib.ib_mr.lkey,
1328                           &e_mr->ib.ib_mr.rkey, EHCA_REG_BUSMAP_MR);
1329         if (ret) {
1330                 ehca_err(&shca->ib_device, "reg of internal max MR failed, "
1331                          "e_mr=%p iova_start=%p size_maxmr=%llx num_kpages=%x "
1332                          "num_hwpages=%x", e_mr, iova_start, size_maxmr,
1333                          num_kpages, num_hwpages);
1334                 goto ehca_reg_internal_maxmr_exit1;
1335         }
1336
1337         /* successful registration of all pages */
1338         e_mr->ib.ib_mr.device = e_pd->ib_pd.device;
1339         e_mr->ib.ib_mr.pd = &e_pd->ib_pd;
1340         e_mr->ib.ib_mr.uobject = NULL;
1341         atomic_inc(&(e_pd->ib_pd.usecnt));
1342         *e_maxmr = e_mr;
1343         return 0;
1344
1345 ehca_reg_internal_maxmr_exit1:
1346         ehca_mr_delete(e_mr);
1347 ehca_reg_internal_maxmr_exit0:
1348         if (ret)
1349                 ehca_err(&shca->ib_device, "ret=%i shca=%p e_pd=%p e_maxmr=%p",
1350                          ret, shca, e_pd, e_maxmr);
1351         return ret;
1352 } /* end ehca_reg_internal_maxmr() */
1353
1354 /*----------------------------------------------------------------------*/
1355
1356 int ehca_reg_maxmr(struct ehca_shca *shca,
1357                    struct ehca_mr *e_newmr,
1358                    u64 *iova_start,
1359                    int acl,
1360                    struct ehca_pd *e_pd,
1361                    u32 *lkey,
1362                    u32 *rkey)
1363 {
1364         u64 h_ret;
1365         struct ehca_mr *e_origmr = shca->maxmr;
1366         u32 hipz_acl;
1367         struct ehca_mr_hipzout_parms hipzout;
1368
1369         ehca_mrmw_map_acl(acl, &hipz_acl);
1370         ehca_mrmw_set_pgsize_hipz_acl(e_origmr->hwpage_size, &hipz_acl);
1371
1372         h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr,
1373                                     (u64)iova_start, hipz_acl, e_pd->fw_pd,
1374                                     &hipzout);
1375         if (h_ret != H_SUCCESS) {
1376                 ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lli "
1377                          "e_origmr=%p hca_hndl=%llx mr_hndl=%llx lkey=%x",
1378                          h_ret, e_origmr, shca->ipz_hca_handle.handle,
1379                          e_origmr->ipz_mr_handle.handle,
1380                          e_origmr->ib.ib_mr.lkey);
1381                 return ehca2ib_return_code(h_ret);
1382         }
1383         /* successful registration */
1384         e_newmr->num_kpages = e_origmr->num_kpages;
1385         e_newmr->num_hwpages = e_origmr->num_hwpages;
1386         e_newmr->hwpage_size = e_origmr->hwpage_size;
1387         e_newmr->start = iova_start;
1388         e_newmr->size = e_origmr->size;
1389         e_newmr->acl = acl;
1390         e_newmr->ipz_mr_handle = hipzout.handle;
1391         *lkey = hipzout.lkey;
1392         *rkey = hipzout.rkey;
1393         return 0;
1394 } /* end ehca_reg_maxmr() */
1395
1396 /*----------------------------------------------------------------------*/
1397
1398 int ehca_dereg_internal_maxmr(struct ehca_shca *shca)
1399 {
1400         int ret;
1401         struct ehca_mr *e_maxmr;
1402         struct ib_pd *ib_pd;
1403
1404         if (!shca->maxmr) {
1405                 ehca_err(&shca->ib_device, "bad call, shca=%p", shca);
1406                 ret = -EINVAL;
1407                 goto ehca_dereg_internal_maxmr_exit0;
1408         }
1409
1410         e_maxmr = shca->maxmr;
1411         ib_pd = e_maxmr->ib.ib_mr.pd;
1412         shca->maxmr = NULL; /* remove internal max-MR indication from SHCA */
1413
1414         ret = ehca_dereg_mr(&e_maxmr->ib.ib_mr);
1415         if (ret) {
1416                 ehca_err(&shca->ib_device, "dereg internal max-MR failed, "
1417                          "ret=%i e_maxmr=%p shca=%p lkey=%x",
1418                          ret, e_maxmr, shca, e_maxmr->ib.ib_mr.lkey);
1419                 shca->maxmr = e_maxmr;
1420                 goto ehca_dereg_internal_maxmr_exit0;
1421         }
1422
1423         atomic_dec(&ib_pd->usecnt);
1424
1425 ehca_dereg_internal_maxmr_exit0:
1426         if (ret)
1427                 ehca_err(&shca->ib_device, "ret=%i shca=%p shca->maxmr=%p",
1428                          ret, shca, shca->maxmr);
1429         return ret;
1430 } /* end ehca_dereg_internal_maxmr() */
1431
1432 /*----------------------------------------------------------------------*/
1433
1434 /* check page list of map FMR verb for validness */
1435 int ehca_fmr_check_page_list(struct ehca_mr *e_fmr,
1436                              u64 *page_list,
1437                              int list_len)
1438 {
1439         u32 i;
1440         u64 *page;
1441
1442         if ((list_len == 0) || (list_len > e_fmr->fmr_max_pages)) {
1443                 ehca_gen_err("bad list_len, list_len=%x "
1444                              "e_fmr->fmr_max_pages=%x fmr=%p",
1445                              list_len, e_fmr->fmr_max_pages, e_fmr);
1446                 return -EINVAL;
1447         }
1448
1449         /* each page must be aligned */
1450         page = page_list;
1451         for (i = 0; i < list_len; i++) {
1452                 if (*page % e_fmr->fmr_page_size) {
1453                         ehca_gen_err("bad page, i=%x *page=%llx page=%p fmr=%p "
1454                                      "fmr_page_size=%x", i, *page, page, e_fmr,
1455                                      e_fmr->fmr_page_size);
1456                         return -EINVAL;
1457                 }
1458                 page++;
1459         }
1460
1461         return 0;
1462 } /* end ehca_fmr_check_page_list() */
1463
1464 /*----------------------------------------------------------------------*/
1465
1466 /* PAGE_SIZE >= pginfo->hwpage_size */
1467 static int ehca_set_pagebuf_user1(struct ehca_mr_pginfo *pginfo,
1468                                   u32 number,
1469                                   u64 *kpage)
1470 {
1471         int ret = 0;
1472         u64 pgaddr;
1473         u32 j = 0;
1474         int hwpages_per_kpage = PAGE_SIZE / pginfo->hwpage_size;
1475         struct scatterlist **sg = &pginfo->u.usr.next_sg;
1476
1477         while (*sg != NULL) {
1478                 pgaddr = page_to_pfn(sg_page(*sg))
1479                         << PAGE_SHIFT;
1480                 *kpage = pgaddr + (pginfo->next_hwpage *
1481                                    pginfo->hwpage_size);
1482                 if (!(*kpage)) {
1483                         ehca_gen_err("pgaddr=%llx "
1484                                      "sg_dma_address=%llx "
1485                                      "entry=%llx next_hwpage=%llx",
1486                                      pgaddr, (u64)sg_dma_address(*sg),
1487                                      pginfo->u.usr.next_nmap,
1488                                      pginfo->next_hwpage);
1489                         return -EFAULT;
1490                 }
1491                 (pginfo->hwpage_cnt)++;
1492                 (pginfo->next_hwpage)++;
1493                 kpage++;
1494                 if (pginfo->next_hwpage % hwpages_per_kpage == 0) {
1495                         (pginfo->kpage_cnt)++;
1496                         (pginfo->u.usr.next_nmap)++;
1497                         pginfo->next_hwpage = 0;
1498                         *sg = sg_next(*sg);
1499                 }
1500                 j++;
1501                 if (j >= number)
1502                         break;
1503         }
1504
1505         return ret;
1506 }
1507
1508 /*
1509  * check given pages for contiguous layout
1510  * last page addr is returned in prev_pgaddr for further check
1511  */
1512 static int ehca_check_kpages_per_ate(struct scatterlist **sg,
1513                                      int num_pages,
1514                                      u64 *prev_pgaddr)
1515 {
1516         for (; *sg && num_pages > 0; *sg = sg_next(*sg), num_pages--) {
1517                 u64 pgaddr = page_to_pfn(sg_page(*sg)) << PAGE_SHIFT;
1518                 if (ehca_debug_level >= 3)
1519                         ehca_gen_dbg("chunk_page=%llx value=%016llx", pgaddr,
1520                                      *(u64 *)__va(pgaddr));
1521                 if (pgaddr - PAGE_SIZE != *prev_pgaddr) {
1522                         ehca_gen_err("uncontiguous page found pgaddr=%llx "
1523                                      "prev_pgaddr=%llx entries_left_in_hwpage=%x",
1524                                      pgaddr, *prev_pgaddr, num_pages);
1525                         return -EINVAL;
1526                 }
1527                 *prev_pgaddr = pgaddr;
1528         }
1529         return 0;
1530 }
1531
1532 /* PAGE_SIZE < pginfo->hwpage_size */
1533 static int ehca_set_pagebuf_user2(struct ehca_mr_pginfo *pginfo,
1534                                   u32 number,
1535                                   u64 *kpage)
1536 {
1537         int ret = 0;
1538         u64 pgaddr, prev_pgaddr;
1539         u32 j = 0;
1540         int kpages_per_hwpage = pginfo->hwpage_size / PAGE_SIZE;
1541         int nr_kpages = kpages_per_hwpage;
1542         struct scatterlist **sg = &pginfo->u.usr.next_sg;
1543
1544         while (*sg != NULL) {
1545
1546                 if (nr_kpages == kpages_per_hwpage) {
1547                         pgaddr = (page_to_pfn(sg_page(*sg))
1548                                    << PAGE_SHIFT);
1549                         *kpage = pgaddr;
1550                         if (!(*kpage)) {
1551                                 ehca_gen_err("pgaddr=%llx entry=%llx",
1552                                              pgaddr, pginfo->u.usr.next_nmap);
1553                                 ret = -EFAULT;
1554                                 return ret;
1555                         }
1556                         /*
1557                          * The first page in a hwpage must be aligned;
1558                          * the first MR page is exempt from this rule.
1559                          */
1560                         if (pgaddr & (pginfo->hwpage_size - 1)) {
1561                                 if (pginfo->hwpage_cnt) {
1562                                         ehca_gen_err(
1563                                                 "invalid alignment "
1564                                                 "pgaddr=%llx entry=%llx "
1565                                                 "mr_pgsize=%llx",
1566                                                 pgaddr, pginfo->u.usr.next_nmap,
1567                                                 pginfo->hwpage_size);
1568                                         ret = -EFAULT;
1569                                         return ret;
1570                                 }
1571                                 /* first MR page */
1572                                 pginfo->kpage_cnt =
1573                                         (pgaddr &
1574                                          (pginfo->hwpage_size - 1)) >>
1575                                         PAGE_SHIFT;
1576                                 nr_kpages -= pginfo->kpage_cnt;
1577                                 *kpage = pgaddr &
1578                                          ~(pginfo->hwpage_size - 1);
1579                         }
1580                         if (ehca_debug_level >= 3) {
1581                                 u64 val = *(u64 *)__va(pgaddr);
1582                                 ehca_gen_dbg("kpage=%llx page=%llx "
1583                                              "value=%016llx",
1584                                              *kpage, pgaddr, val);
1585                         }
1586                         prev_pgaddr = pgaddr;
1587                         *sg = sg_next(*sg);
1588                         pginfo->kpage_cnt++;
1589                         pginfo->u.usr.next_nmap++;
1590                         nr_kpages--;
1591                         if (!nr_kpages)
1592                                 goto next_kpage;
1593                         continue;
1594                 }
1595
1596                 ret = ehca_check_kpages_per_ate(sg, nr_kpages,
1597                                                 &prev_pgaddr);
1598                 if (ret)
1599                         return ret;
1600                 pginfo->kpage_cnt += nr_kpages;
1601                 pginfo->u.usr.next_nmap += nr_kpages;
1602
1603 next_kpage:
1604                 nr_kpages = kpages_per_hwpage;
1605                 (pginfo->hwpage_cnt)++;
1606                 kpage++;
1607                 j++;
1608                 if (j >= number)
1609                         break;
1610         }
1611
1612         return ret;
1613 }
1614
1615 static int ehca_set_pagebuf_phys(struct ehca_mr_pginfo *pginfo,
1616                                  u32 number, u64 *kpage)
1617 {
1618         int ret = 0;
1619         u64 addr = pginfo->u.phy.addr;
1620         u64 size = pginfo->u.phy.size;
1621         u64 num_hw, offs_hw;
1622         u32 i = 0;
1623
1624         num_hw  = NUM_CHUNKS((addr % pginfo->hwpage_size) + size,
1625                                 pginfo->hwpage_size);
1626         offs_hw = (addr & ~(pginfo->hwpage_size - 1)) / pginfo->hwpage_size;
1627
1628         while (pginfo->next_hwpage < offs_hw + num_hw) {
1629                 /* sanity check */
1630                 if ((pginfo->kpage_cnt >= pginfo->num_kpages) ||
1631                     (pginfo->hwpage_cnt >= pginfo->num_hwpages)) {
1632                         ehca_gen_err("kpage_cnt >= num_kpages, "
1633                                      "kpage_cnt=%llx num_kpages=%llx "
1634                                      "hwpage_cnt=%llx "
1635                                      "num_hwpages=%llx i=%x",
1636                                      pginfo->kpage_cnt,
1637                                      pginfo->num_kpages,
1638                                      pginfo->hwpage_cnt,
1639                                      pginfo->num_hwpages, i);
1640                         return -EFAULT;
1641                 }
1642                 *kpage = (addr & ~(pginfo->hwpage_size - 1)) +
1643                          (pginfo->next_hwpage * pginfo->hwpage_size);
1644                 if ( !(*kpage) && addr ) {
1645                         ehca_gen_err("addr=%llx size=%llx "
1646                                      "next_hwpage=%llx", addr,
1647                                      size, pginfo->next_hwpage);
1648                         return -EFAULT;
1649                 }
1650                 (pginfo->hwpage_cnt)++;
1651                 (pginfo->next_hwpage)++;
1652                 if (PAGE_SIZE >= pginfo->hwpage_size) {
1653                         if (pginfo->next_hwpage %
1654                             (PAGE_SIZE / pginfo->hwpage_size) == 0)
1655                                 (pginfo->kpage_cnt)++;
1656                 } else
1657                         pginfo->kpage_cnt += pginfo->hwpage_size /
1658                                 PAGE_SIZE;
1659                 kpage++;
1660                 i++;
1661                 if (i >= number) break;
1662         }
1663         if (pginfo->next_hwpage >= offs_hw + num_hw) {
1664                 pginfo->next_hwpage = 0;
1665         }
1666
1667         return ret;
1668 }
1669
1670 static int ehca_set_pagebuf_fmr(struct ehca_mr_pginfo *pginfo,
1671                                 u32 number, u64 *kpage)
1672 {
1673         int ret = 0;
1674         u64 *fmrlist;
1675         u32 i;
1676
1677         /* loop over desired page_list entries */
1678         fmrlist = pginfo->u.fmr.page_list + pginfo->u.fmr.next_listelem;
1679         for (i = 0; i < number; i++) {
1680                 *kpage = (*fmrlist & ~(pginfo->hwpage_size - 1)) +
1681                            pginfo->next_hwpage * pginfo->hwpage_size;
1682                 if ( !(*kpage) ) {
1683                         ehca_gen_err("*fmrlist=%llx fmrlist=%p "
1684                                      "next_listelem=%llx next_hwpage=%llx",
1685                                      *fmrlist, fmrlist,
1686                                      pginfo->u.fmr.next_listelem,
1687                                      pginfo->next_hwpage);
1688                         return -EFAULT;
1689                 }
1690                 (pginfo->hwpage_cnt)++;
1691                 if (pginfo->u.fmr.fmr_pgsize >= pginfo->hwpage_size) {
1692                         if (pginfo->next_hwpage %
1693                             (pginfo->u.fmr.fmr_pgsize /
1694                              pginfo->hwpage_size) == 0) {
1695                                 (pginfo->kpage_cnt)++;
1696                                 (pginfo->u.fmr.next_listelem)++;
1697                                 fmrlist++;
1698                                 pginfo->next_hwpage = 0;
1699                         } else
1700                                 (pginfo->next_hwpage)++;
1701                 } else {
1702                         unsigned int cnt_per_hwpage = pginfo->hwpage_size /
1703                                 pginfo->u.fmr.fmr_pgsize;
1704                         unsigned int j;
1705                         u64 prev = *kpage;
1706                         /* check if adrs are contiguous */
1707                         for (j = 1; j < cnt_per_hwpage; j++) {
1708                                 u64 p = fmrlist[j] & ~(pginfo->hwpage_size - 1);
1709                                 if (prev + pginfo->u.fmr.fmr_pgsize != p) {
1710                                         ehca_gen_err("uncontiguous fmr pages "
1711                                                      "found prev=%llx p=%llx "
1712                                                      "idx=%x", prev, p, i + j);
1713                                         return -EINVAL;
1714                                 }
1715                                 prev = p;
1716                         }
1717                         pginfo->kpage_cnt += cnt_per_hwpage;
1718                         pginfo->u.fmr.next_listelem += cnt_per_hwpage;
1719                         fmrlist += cnt_per_hwpage;
1720                 }
1721                 kpage++;
1722         }
1723         return ret;
1724 }
1725
1726 /* setup page buffer from page info */
1727 int ehca_set_pagebuf(struct ehca_mr_pginfo *pginfo,
1728                      u32 number,
1729                      u64 *kpage)
1730 {
1731         int ret;
1732
1733         switch (pginfo->type) {
1734         case EHCA_MR_PGI_PHYS:
1735                 ret = ehca_set_pagebuf_phys(pginfo, number, kpage);
1736                 break;
1737         case EHCA_MR_PGI_USER:
1738                 ret = PAGE_SIZE >= pginfo->hwpage_size ?
1739                         ehca_set_pagebuf_user1(pginfo, number, kpage) :
1740                         ehca_set_pagebuf_user2(pginfo, number, kpage);
1741                 break;
1742         case EHCA_MR_PGI_FMR:
1743                 ret = ehca_set_pagebuf_fmr(pginfo, number, kpage);
1744                 break;
1745         default:
1746                 ehca_gen_err("bad pginfo->type=%x", pginfo->type);
1747                 ret = -EFAULT;
1748                 break;
1749         }
1750         return ret;
1751 } /* end ehca_set_pagebuf() */
1752
1753 /*----------------------------------------------------------------------*/
1754
1755 /*
1756  * check MR if it is a max-MR, i.e. uses whole memory
1757  * in case it's a max-MR 1 is returned, else 0
1758  */
1759 int ehca_mr_is_maxmr(u64 size,
1760                      u64 *iova_start)
1761 {
1762         /* a MR is treated as max-MR only if it fits following: */
1763         if ((size == ehca_mr_len) &&
1764             (iova_start == (void *)ehca_map_vaddr((void *)(KERNELBASE + PHYSICAL_START)))) {
1765                 ehca_gen_dbg("this is a max-MR");
1766                 return 1;
1767         } else
1768                 return 0;
1769 } /* end ehca_mr_is_maxmr() */
1770
1771 /*----------------------------------------------------------------------*/
1772
1773 /* map access control for MR/MW. This routine is used for MR and MW. */
1774 void ehca_mrmw_map_acl(int ib_acl,
1775                        u32 *hipz_acl)
1776 {
1777         *hipz_acl = 0;
1778         if (ib_acl & IB_ACCESS_REMOTE_READ)
1779                 *hipz_acl |= HIPZ_ACCESSCTRL_R_READ;
1780         if (ib_acl & IB_ACCESS_REMOTE_WRITE)
1781                 *hipz_acl |= HIPZ_ACCESSCTRL_R_WRITE;
1782         if (ib_acl & IB_ACCESS_REMOTE_ATOMIC)
1783                 *hipz_acl |= HIPZ_ACCESSCTRL_R_ATOMIC;
1784         if (ib_acl & IB_ACCESS_LOCAL_WRITE)
1785                 *hipz_acl |= HIPZ_ACCESSCTRL_L_WRITE;
1786         if (ib_acl & IB_ACCESS_MW_BIND)
1787                 *hipz_acl |= HIPZ_ACCESSCTRL_MW_BIND;
1788 } /* end ehca_mrmw_map_acl() */
1789
1790 /*----------------------------------------------------------------------*/
1791
1792 /* sets page size in hipz access control for MR/MW. */
1793 void ehca_mrmw_set_pgsize_hipz_acl(u32 pgsize, u32 *hipz_acl) /*INOUT*/
1794 {
1795         *hipz_acl |= (ehca_encode_hwpage_size(pgsize) << 24);
1796 } /* end ehca_mrmw_set_pgsize_hipz_acl() */
1797
1798 /*----------------------------------------------------------------------*/
1799
1800 /*
1801  * reverse map access control for MR/MW.
1802  * This routine is used for MR and MW.
1803  */
1804 void ehca_mrmw_reverse_map_acl(const u32 *hipz_acl,
1805                                int *ib_acl) /*OUT*/
1806 {
1807         *ib_acl = 0;
1808         if (*hipz_acl & HIPZ_ACCESSCTRL_R_READ)
1809                 *ib_acl |= IB_ACCESS_REMOTE_READ;
1810         if (*hipz_acl & HIPZ_ACCESSCTRL_R_WRITE)
1811                 *ib_acl |= IB_ACCESS_REMOTE_WRITE;
1812         if (*hipz_acl & HIPZ_ACCESSCTRL_R_ATOMIC)
1813                 *ib_acl |= IB_ACCESS_REMOTE_ATOMIC;
1814         if (*hipz_acl & HIPZ_ACCESSCTRL_L_WRITE)
1815                 *ib_acl |= IB_ACCESS_LOCAL_WRITE;
1816         if (*hipz_acl & HIPZ_ACCESSCTRL_MW_BIND)
1817                 *ib_acl |= IB_ACCESS_MW_BIND;
1818 } /* end ehca_mrmw_reverse_map_acl() */
1819
1820
1821 /*----------------------------------------------------------------------*/
1822
1823 /*
1824  * MR destructor and constructor
1825  * used in Reregister MR verb, sets all fields in ehca_mr_t to 0,
1826  * except struct ib_mr and spinlock
1827  */
1828 void ehca_mr_deletenew(struct ehca_mr *mr)
1829 {
1830         mr->flags = 0;
1831         mr->num_kpages = 0;
1832         mr->num_hwpages = 0;
1833         mr->acl = 0;
1834         mr->start = NULL;
1835         mr->fmr_page_size = 0;
1836         mr->fmr_max_pages = 0;
1837         mr->fmr_max_maps = 0;
1838         mr->fmr_map_cnt = 0;
1839         memset(&mr->ipz_mr_handle, 0, sizeof(mr->ipz_mr_handle));
1840         memset(&mr->galpas, 0, sizeof(mr->galpas));
1841 } /* end ehca_mr_deletenew() */
1842
1843 int ehca_init_mrmw_cache(void)
1844 {
1845         mr_cache = kmem_cache_create("ehca_cache_mr",
1846                                      sizeof(struct ehca_mr), 0,
1847                                      SLAB_HWCACHE_ALIGN,
1848                                      NULL);
1849         if (!mr_cache)
1850                 return -ENOMEM;
1851         mw_cache = kmem_cache_create("ehca_cache_mw",
1852                                      sizeof(struct ehca_mw), 0,
1853                                      SLAB_HWCACHE_ALIGN,
1854                                      NULL);
1855         if (!mw_cache) {
1856                 kmem_cache_destroy(mr_cache);
1857                 mr_cache = NULL;
1858                 return -ENOMEM;
1859         }
1860         return 0;
1861 }
1862
1863 void ehca_cleanup_mrmw_cache(void)
1864 {
1865         kmem_cache_destroy(mr_cache);
1866         kmem_cache_destroy(mw_cache);
1867 }
1868
1869 static inline int ehca_init_top_bmap(struct ehca_top_bmap *ehca_top_bmap,
1870                                      int dir)
1871 {
1872         if (!ehca_bmap_valid(ehca_top_bmap->dir[dir])) {
1873                 ehca_top_bmap->dir[dir] =
1874                         kmalloc(sizeof(struct ehca_dir_bmap), GFP_KERNEL);
1875                 if (!ehca_top_bmap->dir[dir])
1876                         return -ENOMEM;
1877                 /* Set map block to 0xFF according to EHCA_INVAL_ADDR */
1878                 memset(ehca_top_bmap->dir[dir], 0xFF, EHCA_ENT_MAP_SIZE);
1879         }
1880         return 0;
1881 }
1882
1883 static inline int ehca_init_bmap(struct ehca_bmap *ehca_bmap, int top, int dir)
1884 {
1885         if (!ehca_bmap_valid(ehca_bmap->top[top])) {
1886                 ehca_bmap->top[top] =
1887                         kmalloc(sizeof(struct ehca_top_bmap), GFP_KERNEL);
1888                 if (!ehca_bmap->top[top])
1889                         return -ENOMEM;
1890                 /* Set map block to 0xFF according to EHCA_INVAL_ADDR */
1891                 memset(ehca_bmap->top[top], 0xFF, EHCA_DIR_MAP_SIZE);
1892         }
1893         return ehca_init_top_bmap(ehca_bmap->top[top], dir);
1894 }
1895
1896 static inline int ehca_calc_index(unsigned long i, unsigned long s)
1897 {
1898         return (i >> s) & EHCA_INDEX_MASK;
1899 }
1900
1901 void ehca_destroy_busmap(void)
1902 {
1903         int top, dir;
1904
1905         if (!ehca_bmap)
1906                 return;
1907
1908         for (top = 0; top < EHCA_MAP_ENTRIES; top++) {
1909                 if (!ehca_bmap_valid(ehca_bmap->top[top]))
1910                         continue;
1911                 for (dir = 0; dir < EHCA_MAP_ENTRIES; dir++) {
1912                         if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]))
1913                                 continue;
1914
1915                         kfree(ehca_bmap->top[top]->dir[dir]);
1916                 }
1917
1918                 kfree(ehca_bmap->top[top]);
1919         }
1920
1921         kfree(ehca_bmap);
1922         ehca_bmap = NULL;
1923 }
1924
1925 static int ehca_update_busmap(unsigned long pfn, unsigned long nr_pages)
1926 {
1927         unsigned long i, start_section, end_section;
1928         int top, dir, idx;
1929
1930         if (!nr_pages)
1931                 return 0;
1932
1933         if (!ehca_bmap) {
1934                 ehca_bmap = kmalloc(sizeof(struct ehca_bmap), GFP_KERNEL);
1935                 if (!ehca_bmap)
1936                         return -ENOMEM;
1937                 /* Set map block to 0xFF according to EHCA_INVAL_ADDR */
1938                 memset(ehca_bmap, 0xFF, EHCA_TOP_MAP_SIZE);
1939         }
1940
1941         start_section = (pfn * PAGE_SIZE) / EHCA_SECTSIZE;
1942         end_section = ((pfn + nr_pages) * PAGE_SIZE) / EHCA_SECTSIZE;
1943         for (i = start_section; i < end_section; i++) {
1944                 int ret;
1945                 top = ehca_calc_index(i, EHCA_TOP_INDEX_SHIFT);
1946                 dir = ehca_calc_index(i, EHCA_DIR_INDEX_SHIFT);
1947                 idx = i & EHCA_INDEX_MASK;
1948
1949                 ret = ehca_init_bmap(ehca_bmap, top, dir);
1950                 if (ret) {
1951                         ehca_destroy_busmap();
1952                         return ret;
1953                 }
1954                 ehca_bmap->top[top]->dir[dir]->ent[idx] = ehca_mr_len;
1955                 ehca_mr_len += EHCA_SECTSIZE;
1956         }
1957         return 0;
1958 }
1959
1960 static int ehca_is_hugepage(unsigned long pfn)
1961 {
1962         int page_order;
1963
1964         if (pfn & EHCA_HUGEPAGE_PFN_MASK)
1965                 return 0;
1966
1967         page_order = compound_order(pfn_to_page(pfn));
1968         if (page_order + PAGE_SHIFT != EHCA_HUGEPAGESHIFT)
1969                 return 0;
1970
1971         return 1;
1972 }
1973
1974 static int ehca_create_busmap_callback(unsigned long initial_pfn,
1975                                        unsigned long total_nr_pages, void *arg)
1976 {
1977         int ret;
1978         unsigned long pfn, start_pfn, end_pfn, nr_pages;
1979
1980         if ((total_nr_pages * PAGE_SIZE) < EHCA_HUGEPAGE_SIZE)
1981                 return ehca_update_busmap(initial_pfn, total_nr_pages);
1982
1983         /* Given chunk is >= 16GB -> check for hugepages */
1984         start_pfn = initial_pfn;
1985         end_pfn = initial_pfn + total_nr_pages;
1986         pfn = start_pfn;
1987
1988         while (pfn < end_pfn) {
1989                 if (ehca_is_hugepage(pfn)) {
1990                         /* Add mem found in front of the hugepage */
1991                         nr_pages = pfn - start_pfn;
1992                         ret = ehca_update_busmap(start_pfn, nr_pages);
1993                         if (ret)
1994                                 return ret;
1995                         /* Skip the hugepage */
1996                         pfn += (EHCA_HUGEPAGE_SIZE / PAGE_SIZE);
1997                         start_pfn = pfn;
1998                 } else
1999                         pfn += (EHCA_SECTSIZE / PAGE_SIZE);
2000         }
2001
2002         /* Add mem found behind the hugepage(s)  */
2003         nr_pages = pfn - start_pfn;
2004         return ehca_update_busmap(start_pfn, nr_pages);
2005 }
2006
2007 int ehca_create_busmap(void)
2008 {
2009         int ret;
2010
2011         ehca_mr_len = 0;
2012         ret = walk_system_ram_range(0, 1ULL << MAX_PHYSMEM_BITS, NULL,
2013                                    ehca_create_busmap_callback);
2014         return ret;
2015 }
2016
2017 static int ehca_reg_bmap_mr_rpages(struct ehca_shca *shca,
2018                                    struct ehca_mr *e_mr,
2019                                    struct ehca_mr_pginfo *pginfo)
2020 {
2021         int top;
2022         u64 hret, *kpage;
2023
2024         kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
2025         if (!kpage) {
2026                 ehca_err(&shca->ib_device, "kpage alloc failed");
2027                 return -ENOMEM;
2028         }
2029         for (top = 0; top < EHCA_MAP_ENTRIES; top++) {
2030                 if (!ehca_bmap_valid(ehca_bmap->top[top]))
2031                         continue;
2032                 hret = ehca_reg_mr_dir_sections(top, kpage, shca, e_mr, pginfo);
2033                 if ((hret != H_PAGE_REGISTERED) && (hret != H_SUCCESS))
2034                         break;
2035         }
2036
2037         ehca_free_fw_ctrlblock(kpage);
2038
2039         if (hret == H_SUCCESS)
2040                 return 0; /* Everything is fine */
2041         else {
2042                 ehca_err(&shca->ib_device, "ehca_reg_bmap_mr_rpages failed, "
2043                                  "h_ret=%lli e_mr=%p top=%x lkey=%x "
2044                                  "hca_hndl=%llx mr_hndl=%llx", hret, e_mr, top,
2045                                  e_mr->ib.ib_mr.lkey,
2046                                  shca->ipz_hca_handle.handle,
2047                                  e_mr->ipz_mr_handle.handle);
2048                 return ehca2ib_return_code(hret);
2049         }
2050 }
2051
2052 static u64 ehca_map_vaddr(void *caddr)
2053 {
2054         int top, dir, idx;
2055         unsigned long abs_addr, offset;
2056         u64 entry;
2057
2058         if (!ehca_bmap)
2059                 return EHCA_INVAL_ADDR;
2060
2061         abs_addr = __pa(caddr);
2062         top = ehca_calc_index(abs_addr, EHCA_TOP_INDEX_SHIFT + EHCA_SECTSHIFT);
2063         if (!ehca_bmap_valid(ehca_bmap->top[top]))
2064                 return EHCA_INVAL_ADDR;
2065
2066         dir = ehca_calc_index(abs_addr, EHCA_DIR_INDEX_SHIFT + EHCA_SECTSHIFT);
2067         if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]))
2068                 return EHCA_INVAL_ADDR;
2069
2070         idx = ehca_calc_index(abs_addr, EHCA_SECTSHIFT);
2071
2072         entry = ehca_bmap->top[top]->dir[dir]->ent[idx];
2073         if (ehca_bmap_valid(entry)) {
2074                 offset = (unsigned long)caddr & (EHCA_SECTSIZE - 1);
2075                 return entry | offset;
2076         } else
2077                 return EHCA_INVAL_ADDR;
2078 }
2079
2080 static int ehca_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
2081 {
2082         return dma_addr == EHCA_INVAL_ADDR;
2083 }
2084
2085 static u64 ehca_dma_map_single(struct ib_device *dev, void *cpu_addr,
2086                                size_t size, enum dma_data_direction direction)
2087 {
2088         if (cpu_addr)
2089                 return ehca_map_vaddr(cpu_addr);
2090         else
2091                 return EHCA_INVAL_ADDR;
2092 }
2093
2094 static void ehca_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size,
2095                                   enum dma_data_direction direction)
2096 {
2097         /* This is only a stub; nothing to be done here */
2098 }
2099
2100 static u64 ehca_dma_map_page(struct ib_device *dev, struct page *page,
2101                              unsigned long offset, size_t size,
2102                              enum dma_data_direction direction)
2103 {
2104         u64 addr;
2105
2106         if (offset + size > PAGE_SIZE)
2107                 return EHCA_INVAL_ADDR;
2108
2109         addr = ehca_map_vaddr(page_address(page));
2110         if (!ehca_dma_mapping_error(dev, addr))
2111                 addr += offset;
2112
2113         return addr;
2114 }
2115
2116 static void ehca_dma_unmap_page(struct ib_device *dev, u64 addr, size_t size,
2117                                 enum dma_data_direction direction)
2118 {
2119         /* This is only a stub; nothing to be done here */
2120 }
2121
2122 static int ehca_dma_map_sg(struct ib_device *dev, struct scatterlist *sgl,
2123                            int nents, enum dma_data_direction direction)
2124 {
2125         struct scatterlist *sg;
2126         int i;
2127
2128         for_each_sg(sgl, sg, nents, i) {
2129                 u64 addr;
2130                 addr = ehca_map_vaddr(sg_virt(sg));
2131                 if (ehca_dma_mapping_error(dev, addr))
2132                         return 0;
2133
2134                 sg->dma_address = addr;
2135                 sg->dma_length = sg->length;
2136         }
2137         return nents;
2138 }
2139
2140 static void ehca_dma_unmap_sg(struct ib_device *dev, struct scatterlist *sg,
2141                               int nents, enum dma_data_direction direction)
2142 {
2143         /* This is only a stub; nothing to be done here */
2144 }
2145
2146 static void ehca_dma_sync_single_for_cpu(struct ib_device *dev, u64 addr,
2147                                          size_t size,
2148                                          enum dma_data_direction dir)
2149 {
2150         dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
2151 }
2152
2153 static void ehca_dma_sync_single_for_device(struct ib_device *dev, u64 addr,
2154                                             size_t size,
2155                                             enum dma_data_direction dir)
2156 {
2157         dma_sync_single_for_device(dev->dma_device, addr, size, dir);
2158 }
2159
2160 static void *ehca_dma_alloc_coherent(struct ib_device *dev, size_t size,
2161                                      u64 *dma_handle, gfp_t flag)
2162 {
2163         struct page *p;
2164         void *addr = NULL;
2165         u64 dma_addr;
2166
2167         p = alloc_pages(flag, get_order(size));
2168         if (p) {
2169                 addr = page_address(p);
2170                 dma_addr = ehca_map_vaddr(addr);
2171                 if (ehca_dma_mapping_error(dev, dma_addr)) {
2172                         free_pages((unsigned long)addr, get_order(size));
2173                         return NULL;
2174                 }
2175                 if (dma_handle)
2176                         *dma_handle = dma_addr;
2177                 return addr;
2178         }
2179         return NULL;
2180 }
2181
2182 static void ehca_dma_free_coherent(struct ib_device *dev, size_t size,
2183                                    void *cpu_addr, u64 dma_handle)
2184 {
2185         if (cpu_addr && size)
2186                 free_pages((unsigned long)cpu_addr, get_order(size));
2187 }
2188
2189
2190 struct ib_dma_mapping_ops ehca_dma_mapping_ops = {
2191         .mapping_error          = ehca_dma_mapping_error,
2192         .map_single             = ehca_dma_map_single,
2193         .unmap_single           = ehca_dma_unmap_single,
2194         .map_page               = ehca_dma_map_page,
2195         .unmap_page             = ehca_dma_unmap_page,
2196         .map_sg                 = ehca_dma_map_sg,
2197         .unmap_sg               = ehca_dma_unmap_sg,
2198         .sync_single_for_cpu    = ehca_dma_sync_single_for_cpu,
2199         .sync_single_for_device = ehca_dma_sync_single_for_device,
2200         .alloc_coherent         = ehca_dma_alloc_coherent,
2201         .free_coherent          = ehca_dma_free_coherent,
2202 };