afac2352deafdb117f8ac09a5d4eebee47994617
[mirror/winof/.git] / hw / mthca / kernel / mt_verbs.c
1 /*
2  * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
3  * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
4  * Copyright (c) 2004 Intel Corporation.  All rights reserved.
5  * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
6  * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
7  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
8  * Copyright (c) 2005 Cisco Systems.  All rights reserved.
9  *
10  * This software is available to you under a choice of one of two
11  * licenses.  You may choose to be licensed under the terms of the GNU
12  * General Public License (GPL) Version 2, available from the file
13  * COPYING in the main directory of this source tree, or the
14  * OpenIB.org BSD license below:
15  *
16  *     Redistribution and use in source and binary forms, with or
17  *     without modification, are permitted provided that the following
18  *     conditions are met:
19  *
20  *      - Redistributions of source code must retain the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer.
23  *
24  *      - Redistributions in binary form must reproduce the above
25  *        copyright notice, this list of conditions and the following
26  *        disclaimer in the documentation and/or other materials
27  *        provided with the distribution.
28  *
29  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36  * SOFTWARE.
37  *
38  * $Id: verbs.c 2934 2005-07-29 17:31:49Z roland $
39  */
40
41 #include <ib_verbs.h>
42 #include <ib_cache.h>
43 #include "mthca_dev.h"
44 #include "mx_abi.h"
45
46 #if defined(EVENT_TRACING)
47 #ifdef offsetof
48 #undef offsetof
49 #endif
50 #include "mt_verbs.tmh"
51 #endif
52
53
54 void ibv_um_close(      struct ib_ucontext * h_um_ca )
55 {
56         int err;
57         ib_api_status_t         status;
58         struct ib_ucontext *context_p = (struct ib_ucontext *)h_um_ca;
59         PREP_IBDEV_FOR_PRINT(context_p->device)
60
61         HCA_ENTER(HCA_DBG_SHIM);
62
63         context_p->is_removing = TRUE;
64
65         if (atomic_read(&context_p->usecnt)) {
66                 HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_SHIM,
67                         ("resources are not released (cnt %d)\n", context_p->usecnt));
68                 status = IB_RESOURCE_BUSY;
69                 goto err_usage;
70         }
71         
72         err = ibv_dealloc_pd( context_p->pd );
73         if (err) {
74                 HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_SHIM,
75                         ("ibv_dealloc_pd failed (%d)\n", err));
76                 status = errno_to_iberr(err);
77         }
78
79         err = mthca_dealloc_ucontext(context_p);
80         if (err) {
81                 HCA_PRINT(TRACE_LEVEL_ERROR   ,HCA_DBG_SHIM,
82                         ("mthca_dealloc_ucontext failed (%d)\n", err));
83                 status = errno_to_iberr(err);
84                 goto err_dealloc_ucontext;
85         }
86
87         HCA_PRINT(TRACE_LEVEL_INFORMATION       ,HCA_DBG_SHIM,
88                 ("pcs %p\n", PsGetCurrentProcess()) );
89         status = IB_SUCCESS;
90         goto end;
91         
92 err_dealloc_ucontext: 
93 err_usage:
94 end:
95         HCA_PRINT_EXIT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM,
96                 ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));
97         return;
98 }
99
100 /* Protection domains */
101
102 struct ib_pd *ibv_alloc_pd(struct ib_device *device,
103         struct ib_ucontext *context, ci_umv_buf_t* const p_umv_buf)
104 {
105         struct ib_pd *pd;
106
107         pd = device->alloc_pd(device, context, p_umv_buf);
108
109         if (!IS_ERR(pd)) {
110                 pd->device  = device;
111                 pd->ucontext = context;
112                 atomic_set(&pd->usecnt, 0);
113                 HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_CQ ,("PD%d use cnt %d, pd_handle %p, ctx %p \n", 
114                         ((struct mthca_pd*)pd)->pd_num, pd->usecnt, pd, pd->ucontext));
115         }
116
117         return pd;
118 }
119
120 int ibv_dealloc_pd(struct ib_pd *pd)
121 {
122         if (atomic_read(&pd->usecnt)) {
123                 HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_CQ,("resources are not released (cnt %d)\n", pd->usecnt));
124                 return -EBUSY;
125         }               
126
127         HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_CQ ,("PD%d use cnt %d, pd_handle %p, ctx %p \n", 
128                 ((struct mthca_pd*)pd)->pd_num, pd->usecnt, pd, pd->ucontext));
129         return pd->device->dealloc_pd(pd);
130 }
131
132 /* Address handles */
133
134 struct ib_ah *ibv_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
135         struct ib_ucontext *context, ci_umv_buf_t* const p_umv_buf)
136 {
137         int err;
138         struct ib_ah *ah;
139         struct ib_mr *ib_mr = NULL;
140         u64 start = 0;
141
142         // for user call we need also allocate MR
143         if (context && p_umv_buf && p_umv_buf->p_inout_buf) {
144                 struct ibv_create_ah *create_ah = (struct ibv_create_ah *)(void*)p_umv_buf->p_inout_buf;
145                 
146                 // create region
147                 ib_mr   = ibv_reg_mr( 
148                         pd, 
149                         create_ah->mr.access_flags, 
150                         (void*)(ULONG_PTR)create_ah->mr.start,
151                         create_ah->mr.length, create_ah->mr.hca_va, TRUE );
152                 if (IS_ERR(ib_mr)) {
153                         err = PTR_ERR(ib_mr);
154                         HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_AV  ,("ibv_reg_mr failed (%d)\n", err));
155                         goto err_alloc_mr;
156                 }
157
158                 start = create_ah->mr.start;
159         }
160
161         ah = pd->device->create_ah(pd, ah_attr);
162
163         if (IS_ERR(ah)) {
164                 err = PTR_ERR(ah);
165                 HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_AV ,("create_ah failed (%d)\n", err));
166                 goto err_create_ah;
167         }
168
169         // fill results
170         ah->device  = pd->device;
171         ah->pd      = pd;
172         ah->ucontext = context;
173         atomic_inc(&pd->usecnt);
174         HCA_PRINT(TRACE_LEVEL_INFORMATION  ,HCA_DBG_AV  ,("PD%d use cnt %d, pd_handle %p, ctx %p \n", 
175                 ((struct mthca_pd*)pd)->pd_num, pd->usecnt, pd, pd->ucontext));
176         if (context)
177                 atomic_inc(&context->usecnt);
178
179         // fill results for user
180         if (context && p_umv_buf && p_umv_buf->p_inout_buf) {
181                 struct ibv_create_ah_resp *create_ah_resp = (struct ibv_create_ah_resp *)(void*)p_umv_buf->p_inout_buf;
182                 ah->ib_mr = ib_mr;
183                 create_ah_resp->start = start;
184                 create_ah_resp->mr.lkey = ib_mr->lkey;
185                 create_ah_resp->mr.rkey = ib_mr->rkey;
186                 create_ah_resp->mr.mr_handle = (u64)(ULONG_PTR)ib_mr;
187                 p_umv_buf->output_size = sizeof(struct ibv_create_ah_resp);
188         }
189
190         return ah;
191         
192 err_create_ah:
193         if (ib_mr)
194                 ibv_dereg_mr(ib_mr);
195 err_alloc_mr:
196         if( p_umv_buf && p_umv_buf->command ) 
197                 p_umv_buf->status = IB_ERROR;
198         return ERR_PTR(ib_mr);
199 }
200
201 struct ib_ah *ibv_create_ah_from_wc(struct ib_pd *pd, struct _ib_wc *wc,
202                                    struct ib_grh *grh, u8 port_num)
203 {
204         struct ib_ah_attr ah_attr;
205         u32 flow_class;
206         u16 gid_index;
207         int ret;
208
209         memset(&ah_attr, 0, sizeof ah_attr);
210         ah_attr.dlid = wc->recv.ud.remote_lid;
211         ah_attr.sl = wc->recv.ud.remote_sl;
212         ah_attr.src_path_bits = wc->recv.ud.path_bits;
213         ah_attr.port_num = port_num;
214
215         if (wc->recv.ud.recv_opt & IB_RECV_OPT_GRH_VALID) {
216                 ah_attr.ah_flags = IB_AH_GRH;
217                 ah_attr.grh.dgid = grh->dgid;
218
219                 ret = ib_find_cached_gid(pd->device, &grh->sgid, &port_num,
220                                          &gid_index);
221                 if (ret)
222                         return ERR_PTR(ret);
223
224                 ah_attr.grh.sgid_index = (u8) gid_index;
225                 flow_class = cl_ntoh32(grh->version_tclass_flow);
226                 ah_attr.grh.flow_label = flow_class & 0xFFFFF;
227                 ah_attr.grh.traffic_class = (u8)((flow_class >> 20) & 0xFF);
228                 ah_attr.grh.hop_limit = grh->hop_limit;
229         }
230
231         return ibv_create_ah(pd, &ah_attr, NULL, NULL);
232 }
233
234 int ibv_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
235 {
236         return ah->device->modify_ah ?
237                 ah->device->modify_ah(ah, ah_attr) :
238                 -ENOSYS;
239 }
240
241 int ibv_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
242 {
243         return ah->device->query_ah ?
244                 ah->device->query_ah(ah, ah_attr) :
245                 -ENOSYS;
246 }
247
248
249 static void release_user_cq_qp_resources(
250         struct ib_ucontext      *ucontext,
251         struct ib_mr * ib_mr)
252 {
253         if (ucontext) {
254                 ibv_dereg_mr( ib_mr );
255                 atomic_dec(&ucontext->usecnt);
256                 if (!atomic_read(&ucontext->usecnt) && ucontext->is_removing) {
257                         HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_SHIM      ,("User resources are released. Removing context\n"));
258                         ibv_um_close(ucontext);
259                 }
260         }
261 }
262
263 int ibv_destroy_ah(struct ib_ah *ah)
264 {
265         struct ib_pd *pd;
266         int ret;
267         struct ib_ucontext      *ucontext;
268         struct ib_mr * ib_mr;
269
270         HCA_ENTER(HCA_DBG_AV);
271         pd = ah->pd;
272         ucontext = ah->ucontext;
273         ib_mr = ah->ib_mr;
274
275         ret = ah->device->destroy_ah(ah);
276         if (!ret) {
277                 atomic_dec(&pd->usecnt);
278                 HCA_PRINT(TRACE_LEVEL_INFORMATION  ,HCA_DBG_AV  ,("PD%d use cnt %d, pd_handle %p, ctx %p \n", 
279                         ((struct mthca_pd*)pd)->pd_num, pd->usecnt, pd, pd->ucontext));
280         }
281         release_user_cq_qp_resources(ucontext, ib_mr);
282         HCA_EXIT(HCA_DBG_AV);
283         return ret;
284 }
285
286 /* Shared receive queues */
287
288 struct ib_srq *ibv_create_srq(struct ib_pd *pd,
289                              struct ib_srq_init_attr *srq_init_attr)
290 {
291         struct ib_srq *srq;
292
293         if (!pd->device->create_srq)
294                 return ERR_PTR(-ENOSYS);
295
296         srq = pd->device->create_srq(pd, srq_init_attr, NULL);
297
298         if (!IS_ERR(srq)) {
299                 srq->device        = pd->device;
300                 srq->pd            = pd;
301                 srq->uobject       = NULL;
302                 srq->event_handler = srq_init_attr->event_handler;
303                 srq->srq_context   = srq_init_attr->srq_context;
304                 atomic_inc(&pd->usecnt);
305                 atomic_set(&srq->usecnt, 0);
306                 HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_QP ,("PD%d use cnt %d \n", 
307                         ((struct mthca_pd*)pd)->pd_num, pd->usecnt));
308         }
309
310         return srq;
311 }
312
313 int ibv_modify_srq(struct ib_srq *srq,
314                   struct ib_srq_attr *srq_attr,
315                   enum ib_srq_attr_mask srq_attr_mask)
316 {
317         return srq->device->modify_srq(srq, srq_attr, srq_attr_mask);
318 }
319
320 int ibv_query_srq(struct ib_srq *srq,
321                  struct ib_srq_attr *srq_attr)
322 {
323         return srq->device->query_srq ?
324                 srq->device->query_srq(srq, srq_attr) : -ENOSYS;
325 }
326
327 int ibv_destroy_srq(struct ib_srq *srq)
328 {
329         struct ib_pd *pd;
330         int ret;
331
332         if (atomic_read(&srq->usecnt))
333                 return -EBUSY;
334
335         pd = srq->pd;
336
337         ret = srq->device->destroy_srq(srq);
338         if (!ret) {
339                 atomic_dec(&pd->usecnt);
340                 HCA_PRINT(TRACE_LEVEL_INFORMATION  ,HCA_DBG_SHIM        ,("PD%d use cnt %d \n", 
341                         ((struct mthca_pd*)pd)->pd_num, pd->usecnt));
342         }
343
344         return ret;
345 }
346
347 /* Queue pairs */
348
349 struct ib_qp *ibv_create_qp(struct ib_pd *pd,
350         struct ib_qp_init_attr *qp_init_attr,
351         struct ib_ucontext *context, ci_umv_buf_t* const p_umv_buf)
352 {
353         int err;
354         struct ib_qp *ib_qp;
355         struct ib_mr *ib_mr = NULL;
356         u64 user_handle = 0;
357
358         HCA_ENTER(HCA_DBG_QP);
359
360         // for user call we need also allocate MR
361         if (context && p_umv_buf && p_umv_buf->p_inout_buf) {
362                 struct ibv_create_qp *create_qp = (struct ibv_create_qp *)(void*)p_umv_buf->p_inout_buf;
363                 
364                 // create region
365                 ib_mr   = ibv_reg_mr( 
366                         (struct ib_pd *)(ULONG_PTR)create_qp->mr.pd_handle, 
367                         create_qp->mr.access_flags, 
368                         (void*)(ULONG_PTR)create_qp->mr.start,
369                         create_qp->mr.length, create_qp->mr.hca_va, TRUE );
370                 if (IS_ERR(ib_mr)) {
371                         err = PTR_ERR(ib_mr);
372                         HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP ,("ibv_reg_mr failed (%d)\n", err));
373                         goto err_alloc_mr;
374                 }
375                 create_qp->lkey = ib_mr->lkey;
376                 user_handle = create_qp->user_handle;
377         }
378
379         ib_qp = pd->device->create_qp(pd, qp_init_attr, p_umv_buf);
380
381         if (IS_ERR(ib_qp)) {
382                 err = PTR_ERR(ib_qp);
383                 HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_QP ,("create_qp failed (%d)\n", err));
384                 goto err_create_qp;
385         }
386
387         // fill results
388         ib_qp->device                           = pd->device;
389         ib_qp->pd                                       = pd;
390         ib_qp->send_cq                          = qp_init_attr->send_cq;
391         ib_qp->recv_cq                          = qp_init_attr->recv_cq;
392         ib_qp->srq                              = qp_init_attr->srq;
393         ib_qp->ucontext                         = context;
394         ib_qp->event_handler    = qp_init_attr->event_handler;
395         ib_qp->qp_context       = qp_init_attr->qp_context;
396         ib_qp->qp_type                          = qp_init_attr->qp_type;
397         atomic_inc(&pd->usecnt);
398         atomic_inc(&qp_init_attr->send_cq->usecnt);
399         atomic_inc(&qp_init_attr->recv_cq->usecnt);
400         if (qp_init_attr->srq)
401                 atomic_inc(&qp_init_attr->srq->usecnt);
402         if (context)
403                 atomic_inc(&context->usecnt);
404                 HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_QP ,("PD%d use cnt %d, pd_handle %p, ctx %p \n", 
405                         ((struct mthca_pd*)pd)->pd_num, pd->usecnt, pd, pd->ucontext));
406
407         HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_QP ,
408                 ("uctx %p, qhndl %p, qnum %#x, q_num  %#x, scq %#x:%#x, rcq %#x:%#x \n",
409                 pd->ucontext, ib_qp, ((struct mthca_qp*)ib_qp)->qpn, ib_qp->qp_num,
410                 ((struct mthca_cq*)ib_qp->send_cq)->cqn, ib_qp->send_cq->cqe,
411                 ((struct mthca_cq*)ib_qp->recv_cq)->cqn, ib_qp->recv_cq->cqe ) );
412
413         // fill results for user
414         if (context && p_umv_buf && p_umv_buf->p_inout_buf) {
415                 struct mthca_qp *qp = (struct mthca_qp *)ib_qp;
416                 struct ibv_create_qp_resp *create_qp_resp = (struct ibv_create_qp_resp *)(void*)p_umv_buf->p_inout_buf;
417                 ib_qp->ib_mr = ib_mr;
418                 create_qp_resp->qpn = ib_qp->qp_num;
419                 create_qp_resp->user_handle = user_handle;
420                 create_qp_resp->mr.lkey = ib_mr->lkey;
421                 create_qp_resp->mr.rkey = ib_mr->rkey;
422                 create_qp_resp->mr.mr_handle = (u64)(ULONG_PTR)ib_mr;
423                 create_qp_resp->qp_handle = (__u64)(ULONG_PTR)qp;
424                 create_qp_resp->max_send_wr = qp->sq.max;
425                 create_qp_resp->max_recv_wr = qp->rq.max;
426                 create_qp_resp->max_send_sge = qp->sq.max_gs;
427                 create_qp_resp->max_recv_sge = qp->rq.max_gs;
428                 create_qp_resp->max_inline_data = qp->max_inline_data;
429                 p_umv_buf->output_size = sizeof(struct ibv_create_qp_resp);
430         }
431
432         return ib_qp;
433
434 err_create_qp:
435         if (ib_mr)
436                 ibv_dereg_mr(ib_mr);
437 err_alloc_mr:
438         if( p_umv_buf && p_umv_buf->command ) 
439                 p_umv_buf->status = IB_ERROR;
440         HCA_EXIT(HCA_DBG_QP);
441         return ERR_PTR(err);
442 }
443
444 int ibv_modify_qp(struct ib_qp *qp,
445                  struct ib_qp_attr *qp_attr,
446                  int qp_attr_mask)
447 {
448         return qp->device->modify_qp(qp, qp_attr, qp_attr_mask);
449 }
450
451 int ibv_query_qp(struct ib_qp *qp,
452                 struct ib_qp_attr *qp_attr,
453                 int qp_attr_mask,
454                 struct ib_qp_init_attr *qp_init_attr)
455 {
456         return qp->device->query_qp ?
457                 qp->device->query_qp(qp, qp_attr, qp_attr_mask, qp_init_attr) :
458                 -ENOSYS;
459 }
460         
461 int ibv_destroy_qp(struct ib_qp *qp)
462 {
463         struct ib_pd *pd;
464         struct ib_cq *scq, *rcq;
465         struct ib_srq *srq;
466         int ret;
467         struct ib_ucontext      *ucontext;
468         struct ib_mr * ib_mr;
469
470         pd  = qp->pd;
471         scq = qp->send_cq;
472         rcq = qp->recv_cq;
473         srq = qp->srq;
474         ucontext = pd->ucontext;
475         ib_mr = qp->ib_mr;
476
477         ret = qp->device->destroy_qp(qp);
478         if (!ret) {
479                 atomic_dec(&pd->usecnt);
480                 atomic_dec(&scq->usecnt);
481                 atomic_dec(&rcq->usecnt);
482                 HCA_PRINT(TRACE_LEVEL_INFORMATION  ,HCA_DBG_SHIM        ,("PD%d use cnt %d, pd_handle %p, ctx %p \n", 
483                         ((struct mthca_pd*)pd)->pd_num, pd->usecnt, pd, pd->ucontext));
484                 if (srq)
485                         atomic_dec(&srq->usecnt);
486                 release_user_cq_qp_resources(ucontext, ib_mr);
487         }
488
489         return ret;
490 }
491
492 /* Completion queues */
493
494 struct ib_cq *ibv_create_cq(struct ib_device *device,
495                            ib_comp_handler comp_handler,
496                            void (*event_handler)(struct ib_event *, void *),
497                            void *cq_context, int cqe, 
498                            struct ib_ucontext *context, ci_umv_buf_t* const p_umv_buf)
499 {
500         int err;
501         struct ib_cq *cq;
502         struct ib_mr *ib_mr = NULL;
503         u64 user_handle = 0;
504
505         // for user call we need also allocate MR
506         if (context && p_umv_buf && p_umv_buf->p_inout_buf) {
507                 struct ibv_create_cq *create_cq = (struct ibv_create_cq *)(void*)p_umv_buf->p_inout_buf;
508                 
509                 // create region
510                 ib_mr   = ibv_reg_mr( 
511                         (struct ib_pd *)(ULONG_PTR)create_cq->mr.pd_handle, 
512                         create_cq->mr.access_flags, 
513                         (void*)(ULONG_PTR)create_cq->mr.start,
514                         create_cq->mr.length, create_cq->mr.hca_va, TRUE );
515                 if (IS_ERR(ib_mr)) {
516                         err = PTR_ERR(ib_mr);
517                         HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_LOW ,("ibv_reg_mr failed (%d)\n", err));
518                         goto err_alloc_mr;
519                 }
520                 user_handle = create_cq->user_handle;
521                 create_cq->lkey = ib_mr->lkey;
522                 cqe = create_cq->cqe;
523         }
524         
525         // create cq
526         cq = device->create_cq(device, cqe, context, p_umv_buf);
527         if (IS_ERR(cq)) {
528                 err = PTR_ERR(cq);
529                 HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_LOW ,("create_qp failed (%d)\n", err));
530                 goto err_create_cq;
531         }
532
533         cq->device        = device;
534         cq->ucontext = context;
535         cq->comp_handler  = comp_handler;
536         cq->event_handler = event_handler;
537         cq->cq_context    = cq_context;
538         atomic_set(&cq->usecnt, 0);
539         if (context)
540                 atomic_inc(&context->usecnt);
541
542         // fill results
543         if (context && p_umv_buf && p_umv_buf->p_inout_buf) {
544                 struct ibv_create_cq_resp *create_cq_resp = (struct ibv_create_cq_resp *)(void*)p_umv_buf->p_inout_buf;
545                 cq->ib_mr = ib_mr;
546                 create_cq_resp->user_handle = user_handle;
547                 create_cq_resp->mr.lkey = ib_mr->lkey;
548                 create_cq_resp->mr.rkey = ib_mr->rkey;
549                 create_cq_resp->mr.mr_handle = (u64)(ULONG_PTR)ib_mr;
550                 create_cq_resp->cq_handle = (u64)(ULONG_PTR)cq;
551                 create_cq_resp->cqe = cq->cqe;
552                 p_umv_buf->output_size = sizeof(struct ibv_create_cq_resp);
553         }
554         
555         return cq;
556
557 err_create_cq:
558         if (ib_mr)
559                 ibv_dereg_mr(ib_mr);
560 err_alloc_mr:
561         if( p_umv_buf && p_umv_buf->command ) 
562                 p_umv_buf->status = IB_ERROR;
563         return ERR_PTR(err);
564 }
565
566 int ibv_destroy_cq(struct ib_cq *cq)
567 {
568         int ret;
569         struct ib_ucontext      *ucontext = cq->ucontext;
570         struct ib_mr * ib_mr = cq->ib_mr;
571         
572         if (atomic_read(&cq->usecnt))
573                 return -EBUSY;
574
575         ret = cq->device->destroy_cq(cq);
576
577         release_user_cq_qp_resources(ucontext, ib_mr);
578         
579         return ret;
580 }
581
582 int ibv_resize_cq(struct ib_cq *cq,
583                  int           cqe)
584 {
585         int ret;
586
587         if (!cq->device->resize_cq)
588                 return -ENOSYS;
589
590         ret = cq->device->resize_cq(cq, &cqe);
591         if (!ret)
592                 cq->cqe = cqe;
593
594         return ret;
595 }
596
597 /* Memory regions */
598
599 struct ib_mr *ibv_reg_mr(struct ib_pd *pd, 
600         mthca_qp_access_t mr_access_flags,
601         void* __ptr64                   vaddr,
602         uint64_t                                length,
603         uint64_t                                hca_va,
604         boolean_t                       um_call
605         )
606 {
607         struct ib_mr *ib_mr;
608         int                          err;
609         HCA_ENTER(HCA_DBG_MEMORY);
610         /* sanity check */
611         if (!um_call) {
612                 err = -ENOSYS;
613                 HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_MEMORY ,("ibv_reg_mr for kernel mode is not supported (%d)\n", err));
614                 goto err_not_supported;
615         }
616
617         ib_mr = pd->device->reg_user_mr(pd, vaddr, length, hca_va, mr_access_flags);
618         if (IS_ERR(ib_mr)) {
619                 err = PTR_ERR(ib_mr);
620                 HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_MEMORY ,("mthca_reg_user_mr failed (%d)\n", err));
621                 goto err_reg_user_mr;
622         }
623
624         ib_mr->device  = pd->device;
625         ib_mr->pd      = pd;
626         atomic_inc(&pd->usecnt);
627         atomic_set(&ib_mr->usecnt, 0);
628         HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_MEMORY ,("PD%d use cnt %d, pd_handle %p, ctx %p \n", 
629                 ((struct mthca_pd*)pd)->pd_num, pd->usecnt, pd, pd->ucontext));
630         HCA_EXIT(HCA_DBG_MEMORY);
631         return ib_mr;
632
633 err_reg_user_mr:
634 err_not_supported:
635         HCA_EXIT(HCA_DBG_MEMORY);
636         return ERR_PTR(err);
637 }
638
639 struct ib_mr *ibv_get_dma_mr(struct ib_pd *pd, mthca_qp_access_t mr_access_flags)
640 {
641         struct ib_mr *mr;
642
643         mr = pd->device->get_dma_mr(pd, mr_access_flags);
644
645         if (!IS_ERR(mr)) {
646                 mr->device  = pd->device;
647                 mr->pd      = pd;
648                 atomic_inc(&pd->usecnt);
649                 atomic_set(&mr->usecnt, 0);
650                 HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_MEMORY ,("PD%d use cnt %d \n", 
651                         ((struct mthca_pd*)pd)->pd_num, pd->usecnt));
652         }
653
654         return mr;
655 }
656
657 struct ib_mr *ibv_reg_phys_mr(struct ib_pd *pd,
658                              struct ib_phys_buf *phys_buf_array,
659                              int num_phys_buf,
660                              mthca_qp_access_t mr_access_flags,
661                              u64 *iova_start)
662 {
663         struct ib_mr *mr;
664
665         mr = pd->device->reg_phys_mr(pd, phys_buf_array, num_phys_buf,
666                                      mr_access_flags, iova_start);
667
668         if (!IS_ERR(mr)) {
669                 mr->device  = pd->device;
670                 mr->pd      = pd;
671                 atomic_inc(&pd->usecnt);
672                 atomic_set(&mr->usecnt, 0);
673                 HCA_PRINT(TRACE_LEVEL_INFORMATION  ,HCA_DBG_SHIM        ,("PD%d use cnt %d \n", 
674                         ((struct mthca_pd*)pd)->pd_num, pd->usecnt));
675         }
676
677         return mr;
678 }
679
680 int ibv_rereg_phys_mr(struct ib_mr *mr,
681                      int mr_rereg_mask,
682                      struct ib_pd *pd,
683                      struct ib_phys_buf *phys_buf_array,
684                      int num_phys_buf,
685                      mthca_qp_access_t mr_access_flags,
686                      u64 *iova_start)
687 {
688         struct ib_pd *old_pd;
689         int ret;
690
691         if (!mr->device->rereg_phys_mr)
692                 return -ENOSYS;
693
694         if (atomic_read(&mr->usecnt))
695                 return -EBUSY;
696
697         old_pd = mr->pd;
698
699         ret = mr->device->rereg_phys_mr(mr, mr_rereg_mask, pd,
700                                         phys_buf_array, num_phys_buf,
701                                         mr_access_flags, iova_start);
702
703         if (!ret && (mr_rereg_mask & IB_MR_REREG_PD)) {
704                 atomic_dec(&old_pd->usecnt);
705                 atomic_inc(&pd->usecnt);
706                 HCA_PRINT(TRACE_LEVEL_INFORMATION  ,HCA_DBG_SHIM        ,("PD%d use cnt %d \n", 
707                         ((struct mthca_pd*)pd)->pd_num, pd->usecnt));
708         }
709
710         return ret;
711 }
712
713 int ibv_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
714 {
715         return mr->device->query_mr ?
716                 mr->device->query_mr(mr, mr_attr) : -ENOSYS;
717 }
718
719 int ibv_dereg_mr(struct ib_mr *mr)
720 {
721         int ret;
722         struct ib_pd *pd;
723
724         if (atomic_read(&mr->usecnt))
725                 return -EBUSY;
726
727         pd = mr->pd;
728         ret = mr->device->dereg_mr(mr);
729         if (!ret) {
730                 atomic_dec(&pd->usecnt);
731                 HCA_PRINT(TRACE_LEVEL_INFORMATION  ,HCA_DBG_SHIM        ,("PD%d use cnt %d, pd_handle %p, ctx %p \n", 
732                         ((struct mthca_pd*)pd)->pd_num, pd->usecnt, pd, pd->ucontext));
733         }
734
735         return ret;
736 }
737
738 /* Memory windows */
739
740 struct ib_mw *ibv_alloc_mw(struct ib_pd *pd)
741 {
742         struct ib_mw *mw;
743
744         if (!pd->device->alloc_mw)
745                 return ERR_PTR(-ENOSYS);
746
747         mw = pd->device->alloc_mw(pd);
748         if (!IS_ERR(mw)) {
749                 mw->device  = pd->device;
750                 mw->pd      = pd;
751                 atomic_inc(&pd->usecnt);
752                 HCA_PRINT(TRACE_LEVEL_INFORMATION  ,HCA_DBG_SHIM        ,("PD%d use cnt %d \n", 
753                         ((struct mthca_pd*)pd)->pd_num, pd->usecnt));
754         }
755
756         return mw;
757 }
758
759 int ibv_dealloc_mw(struct ib_mw *mw)
760 {
761         struct ib_pd *pd;
762         int ret;
763
764         pd = mw->pd;
765         ret = mw->device->dealloc_mw(mw);
766         if (!ret) {
767                 atomic_dec(&pd->usecnt);
768                 HCA_PRINT(TRACE_LEVEL_INFORMATION  ,HCA_DBG_SHIM        ,("PD%d use cnt %d \n", 
769                         ((struct mthca_pd*)pd)->pd_num, pd->usecnt));
770         }
771
772         return ret;
773 }
774
775 /* "Fast" memory regions */
776
777 struct ib_fmr *ibv_alloc_fmr(struct ib_pd *pd,
778                             mthca_qp_access_t mr_access_flags,
779                             struct ib_fmr_attr *fmr_attr)
780 {
781         struct ib_fmr *fmr;
782
783         if (!pd->device->alloc_fmr)
784                 return ERR_PTR(-ENOSYS);
785
786         fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr);
787         if (!IS_ERR(fmr)) {
788                 fmr->device = pd->device;
789                 fmr->pd     = pd;
790                 atomic_inc(&pd->usecnt);
791                 HCA_PRINT(TRACE_LEVEL_INFORMATION  ,HCA_DBG_SHIM        ,("PD%d use cnt %d \n", 
792                         ((struct mthca_pd*)pd)->pd_num, pd->usecnt));
793         }
794
795         return fmr;
796 }
797
798 int ibv_unmap_fmr(struct list_head *fmr_list)
799 {
800         struct ib_fmr *fmr;
801
802         if (list_empty(fmr_list))
803                 return 0;
804
805         fmr = list_entry(fmr_list->next, struct ib_fmr, list);
806         return fmr->device->unmap_fmr(fmr_list);
807 }
808
809 int ibv_dealloc_fmr(struct ib_fmr *fmr)
810 {
811         struct ib_pd *pd;
812         int ret;
813
814         pd = fmr->pd;
815         ret = fmr->device->dealloc_fmr(fmr);
816         if (!ret) {
817                 atomic_dec(&pd->usecnt);
818                 HCA_PRINT(TRACE_LEVEL_INFORMATION  ,HCA_DBG_SHIM        ,("PD%d use cnt %d \n", 
819                         ((struct mthca_pd*)pd)->pd_num, pd->usecnt));
820         }
821
822         return ret;
823 }
824
825 /* Multicast groups */
826
827 int ibv_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
828 {
829         if (!qp->device->attach_mcast)
830                 return -ENOSYS;
831         if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UNRELIABLE_DGRM)
832                 return -EINVAL;
833
834         return qp->device->attach_mcast(qp, gid, lid);
835 }
836
837 int ibv_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
838 {
839         if (!qp->device->detach_mcast)
840                 return -ENOSYS;
841         if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UNRELIABLE_DGRM)
842                 return -EINVAL;
843
844         return qp->device->detach_mcast(qp, gid, lid);
845 }