[MTHCA]bug fixes:
[mirror/winof/.git] / hw / mthca / user / mlnx_uvp_verbs.c
1 /*
2  * Copyright (c) 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Cisco Systems.  All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  *
33  * $Id$
34  */
35
36 #include <mt_l2w.h>
37
38 #include "mlnx_uvp.h"
39 #include "mx_abi.h"
40 #include "mthca_wqe.h"
41
42
43 #if defined(EVENT_TRACING)
44 #include "mlnx_uvp_verbs.tmh"
45 #endif
46
47 struct ibv_pd *mthca_alloc_pd(struct ibv_context *context, struct ibv_alloc_pd_resp *resp)
48 {
49         struct mthca_pd           *pd;
50
51         pd = cl_zalloc(sizeof *pd);
52         if (!pd)
53                 goto err_malloc;
54
55         if (!mthca_is_memfree(context)) {
56                 pd->ah_list = NULL;
57                 pd->ah_mutex = CreateMutex( NULL, FALSE, NULL );
58                 if (!pd->ah_mutex) 
59                         goto err_mutex;
60         }
61
62         /* fill response fields */
63         pd->ibv_pd.context = context;   
64         pd->ibv_pd.handle = resp->pd_handle;
65         pd->pdn = resp->pdn;
66
67         return &pd->ibv_pd;
68
69 err_mutex:
70         cl_free(pd);
71 err_malloc:
72         return NULL;
73 }
74
75 int mthca_free_pd(struct ibv_pd *ibv_pd)
76 {
77         struct mthca_pd *pd = to_mpd(ibv_pd);
78         if (!mthca_is_memfree(ibv_pd->context)) {
79                 struct mthca_ah_page *page, *next_page;
80                 WaitForSingleObject( pd->ah_mutex, INFINITE );
81                 for (page = pd->ah_list; page; page = next_page) {
82                         next_page = page->next;
83                         #ifdef NOT_USE_VIRTUAL_ALLOC    
84                                 cl_free(page->buf);
85                         #else
86                                 VirtualFree( page->buf, 0, MEM_RELEASE);
87                         #endif
88                         cl_free(page);
89                 }
90                 ReleaseMutex( pd->ah_mutex );
91                 CloseHandle(pd->ah_mutex);
92         }
93         cl_free(pd);
94         return 0;
95 }
96
97 /* allocate create_cq infrastructure  and fill it's request parameters structure */
98 struct ibv_cq *mthca_create_cq_pre(struct ibv_context *context, int *p_cqe,
99                                struct ibv_create_cq *req)
100 {
101         struct mthca_cq            *cq;
102         int                         nent;
103         int                         ret;
104
105         /* Sanity check CQ size before proceeding */
106         if (*p_cqe > 131072)
107                 goto exit;
108
109         cq = cl_zalloc(sizeof *cq);
110         if (!cq)
111                 goto exit;
112
113         cl_spinlock_construct(&cq->lock);
114         if (cl_spinlock_init(&cq->lock))
115                 goto err;
116
117         for (nent = 1; nent <= *p_cqe; nent <<= 1)
118                 ; /* nothing */
119
120         if (posix_memalign(&cq->buf, g_page_size,
121                         align(nent * MTHCA_CQ_ENTRY_SIZE, g_page_size)))
122                 goto err_memalign;
123
124         mthca_init_cq_buf(cq, nent);
125
126         if (mthca_is_memfree(context)) {
127                 cq->set_ci_db_index = mthca_alloc_db(to_mctx(context)->db_tab,
128                                                      MTHCA_DB_TYPE_CQ_SET_CI,
129                                                      &cq->set_ci_db);
130                 if (cq->set_ci_db_index < 0)
131                         goto err_unreg;
132
133                 cq->arm_db_index    = mthca_alloc_db(to_mctx(context)->db_tab,
134                                                      MTHCA_DB_TYPE_CQ_ARM,
135                                                      &cq->arm_db);
136                 if (cq->arm_db_index < 0)
137                         goto err_set_db;
138
139                 cq->u_arm_db_index    = mthca_alloc_db(to_mctx(context)->db_tab,
140                                                      MTHCA_DB_TYPE_CQ_ARM,
141                                                      &cq->p_u_arm_sn);
142                 if (cq->u_arm_db_index < 0)
143                         goto err_arm_db;
144
145                 *cq->p_u_arm_sn = 1;
146
147                 req->arm_db_page  = db_align(cq->arm_db);
148                 req->set_db_page  = db_align(cq->set_ci_db);
149                 req->u_arm_db_page  = (uint64_t)(ULONG_PTR)cq->p_u_arm_sn;
150                 req->arm_db_index = cq->arm_db_index;
151                 req->set_db_index = cq->set_ci_db_index;
152                 req->u_arm_db_index = cq->u_arm_db_index;
153         }
154
155         req->mr.start = (uint64_t)(ULONG_PTR)cq->buf;
156         req->mr.length = nent * MTHCA_CQ_ENTRY_SIZE;
157         req->mr.hca_va = 0;
158         req->mr.pd_handle    = to_mctx(context)->pd->handle;
159         req->mr.pdn = to_mpd(to_mctx(context)->pd)->pdn;
160         req->mr.access_flags = MTHCA_ACCESS_LOCAL_WRITE;
161         req->user_handle = (uint64_t)(ULONG_PTR)cq;
162 #if 1   
163         req->cqe = *p_cqe;
164         *p_cqe = nent-1;
165 //      *p_cqe = *p_cqe;        // return the same value
166 //      cq->ibv_cq.cqe = nent -1;
167 #else
168         req->cqe = nent;
169         *p_cqe = *p_cqe;        // return the same value
170 #endif
171         return &cq->ibv_cq;
172
173 err_arm_db:
174         if (mthca_is_memfree(context))
175                 mthca_free_db(to_mctx(context)->db_tab, MTHCA_DB_TYPE_CQ_SET_CI,
176                         cq->arm_db_index);
177
178 err_set_db:
179         if (mthca_is_memfree(context))
180                 mthca_free_db(to_mctx(context)->db_tab, MTHCA_DB_TYPE_CQ_SET_CI,
181                         cq->set_ci_db_index);
182
183 err_unreg:
184         cl_free(cq->buf);
185
186 err_memalign:
187         cl_spinlock_destroy(&cq->lock);
188
189 err:
190         cl_free(cq);
191         
192 exit:
193         return ERR_PTR(-ENOMEM);
194 }
195
196 struct ibv_cq *mthca_create_cq_post(struct ibv_context *context, 
197                                struct ibv_create_cq_resp *resp)
198 {
199         struct mthca_cq   *cq;
200         int                         ret;
201
202         cq = (struct mthca_cq *)(ULONG_PTR)resp->user_handle;
203
204         cq->cqn = resp->cqn;
205         cq->mr.handle = resp->mr.mr_handle;
206         cq->mr.lkey = resp->mr.lkey;
207         cq->mr.rkey = resp->mr.rkey;
208         cq->mr.pd = to_mctx(context)->pd;
209         cq->mr.context = context;
210         cq->ibv_cq.cqe = resp->cqe;
211         cq->ibv_cq.handle = resp->cq_handle;
212         cq->ibv_cq.context = context;
213
214         if (mthca_is_memfree(context)) {
215                 mthca_set_db_qn(cq->set_ci_db, MTHCA_DB_TYPE_CQ_SET_CI, cq->cqn);
216                 mthca_set_db_qn(cq->arm_db,    MTHCA_DB_TYPE_CQ_ARM,    cq->cqn);
217         }
218
219         return &cq->ibv_cq;
220
221 }
222
223 int mthca_destroy_cq(struct ibv_cq *cq)
224 {
225         int ret;
226
227         if (mthca_is_memfree(cq->context)) {
228                 mthca_free_db(to_mctx(cq->context)->db_tab, MTHCA_DB_TYPE_CQ_SET_CI,
229                               to_mcq(cq)->u_arm_db_index);
230                 mthca_free_db(to_mctx(cq->context)->db_tab, MTHCA_DB_TYPE_CQ_SET_CI,
231                               to_mcq(cq)->set_ci_db_index);
232                 mthca_free_db(to_mctx(cq->context)->db_tab, MTHCA_DB_TYPE_CQ_ARM,
233                               to_mcq(cq)->arm_db_index);
234         }
235
236 #ifdef NOT_USE_VIRTUAL_ALLOC    
237         cl_free(to_mcq(cq)->buf);
238 #else
239         VirtualFree( to_mcq(cq)->buf, 0, MEM_RELEASE);
240 #endif
241
242         
243         cl_spinlock_destroy(&((struct mthca_cq *)cq)->lock);
244         cl_free(to_mcq(cq));
245
246         return 0;
247 }
248
249 int align_queue_size(struct ibv_context *context, int size, int spare)
250 {
251         int ret;
252
253         /*
254          * If someone asks for a 0-sized queue, presumably they're not
255          * going to use it.  So don't mess with their size.
256          */
257         if (!size)
258                 return 0;
259
260         if (mthca_is_memfree(context)) {
261                 for (ret = 1; ret < size + spare; ret <<= 1)
262                         ; /* nothing */
263
264                 return ret;
265         } else
266                 return size + spare;
267 }
268
269 struct ibv_qp *mthca_create_qp_pre(struct ibv_pd *pd, 
270         struct ibv_qp_init_attr *attr, struct ibv_create_qp *req)
271 {
272         struct mthca_qp       *qp;
273         struct ibv_context *context = pd->context;
274         int                    ret = -ENOMEM;
275
276         UVP_ENTER(UVP_DBG_QP);
277         /* Sanity check QP size before proceeding */
278         if (attr->cap.max_send_wr     > 65536 ||
279             attr->cap.max_recv_wr     > 65536 ||
280             attr->cap.max_send_sge    > 64    ||
281             attr->cap.max_recv_sge    > 64    ||
282             attr->cap.max_inline_data > 1024) {
283                 ret = -EINVAL;
284                 UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP ,("sanity checks  failed (%d)\n",ret));
285                 goto exit;
286                 }
287
288         qp = cl_zalloc(sizeof *qp);
289         if (!qp) {
290                 UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP ,("cl_malloc  failed (%d)\n",ret));
291                 goto err_nomem;
292         }       
293
294         qp->sq.max = align_queue_size(context, attr->cap.max_send_wr, 0);
295         qp->rq.max = align_queue_size(context, attr->cap.max_recv_wr, 0);
296
297         if (mthca_alloc_qp_buf(pd, &attr->cap, attr->qp_type, qp)) {
298                 UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP ,("mthca_alloc_qp_buf  failed (%d)\n",ret));
299                 goto err_nomem;
300         } 
301
302         mthca_init_qp_indices(qp);
303
304         cl_spinlock_construct(&qp->sq.lock);
305         if (cl_spinlock_init(&qp->sq.lock)) {
306                 ret = -EFAULT;
307                 UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP ,("cl_spinlock_init failed for sq (%d)\n",ret));
308                 goto err_spinlock_sq;
309         }
310
311         cl_spinlock_construct(&qp->rq.lock);
312         if (cl_spinlock_init(&qp->rq.lock)) {
313                 ret = -EFAULT;
314                 UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP ,("cl_spinlock_init failed for rq (%d)\n",ret));
315                 goto err_spinlock_rq;
316         }
317
318         if (mthca_is_memfree(context)) {
319                 qp->sq.db_index = mthca_alloc_db(to_mctx(context)->db_tab,
320                                                  MTHCA_DB_TYPE_SQ,
321                                                  &qp->sq.db);
322                 if (qp->sq.db_index < 0)
323                         goto err_sq_db;
324
325                 qp->rq.db_index = mthca_alloc_db(to_mctx(context)->db_tab,
326                                                  MTHCA_DB_TYPE_RQ,
327                                                  &qp->rq.db);
328                 if (qp->rq.db_index < 0)
329                         goto err_rq_db;
330
331                 req->sq_db_page  = db_align(qp->sq.db);
332                 req->rq_db_page  = db_align(qp->rq.db);
333                 req->sq_db_index = qp->sq.db_index;
334                 req->rq_db_index = qp->rq.db_index;
335         }
336
337         // fill the rest qp fields
338         qp->ibv_qp.pd = pd;
339         qp->ibv_qp.send_cq = attr->send_cq;
340         qp->ibv_qp.recv_cq = attr->recv_cq;
341         qp->ibv_qp.srq = attr->srq;
342         qp->ibv_qp.state = IBV_QPS_RESET;
343         qp->ibv_qp.qp_type = attr->qp_type;
344
345         // fill the rest request fields
346         req->mr.start = (uint64_t)(ULONG_PTR)qp->buf;
347         req->mr.length = qp->buf_size;
348         req->mr.hca_va = 0;
349         req->mr.pd_handle    = pd->handle;
350         req->mr.pdn = to_mpd(pd)->pdn;
351         req->mr.access_flags = 0;       //local read
352         req->user_handle = (uint64_t)(ULONG_PTR)qp;
353         req->send_cq_handle = attr->send_cq->handle;
354         req->recv_cq_handle = attr->recv_cq->handle;
355         req->srq_handle = (attr->srq) ? attr->srq->handle : 0;
356         req->max_send_wr = attr->cap.max_send_wr;
357         req->max_recv_wr = attr->cap.max_recv_wr;
358         req->max_send_sge = attr->cap.max_send_sge;
359         req->max_recv_sge = attr->cap.max_recv_sge;
360         req->max_inline_data = attr->cap.max_inline_data;
361         req->sq_sig_all = (uint8_t)attr->sq_sig_all;
362         req->qp_type = attr->qp_type;
363         req->is_srq = !!attr->srq;
364
365
366         UVP_EXIT(UVP_DBG_QP);
367         return &qp->ibv_qp;
368
369 err_rq_db:
370         if (mthca_is_memfree(context))
371                 mthca_free_db(to_mctx(context)->db_tab, 
372                         MTHCA_DB_TYPE_SQ, qp->sq.db_index);
373
374 err_sq_db:
375         cl_spinlock_destroy(&qp->rq.lock);
376
377 err_spinlock_rq:
378         cl_spinlock_destroy(&qp->sq.lock);
379         
380 err_spinlock_sq:
381         cl_free(qp->wrid);
382 #ifdef NOT_USE_VIRTUAL_ALLOC    
383         cl_free(qp->buf);
384 #else
385         VirtualFree( qp->buf, 0, MEM_RELEASE);
386 #endif
387
388 err_nomem:
389         cl_free(qp);
390
391 exit:
392         
393         UVP_EXIT(UVP_DBG_QP);
394         return ERR_PTR(ret);
395 }
396
397 struct ibv_qp *mthca_create_qp_post(struct ibv_pd *pd, 
398         struct ibv_create_qp_resp *resp)
399 {
400         struct mthca_qp       *qp;
401         int                    ret;
402         UVP_ENTER(UVP_DBG_QP);
403         qp = (struct mthca_qp *)(ULONG_PTR)resp->user_handle;
404
405         qp->ibv_qp.handle                       = resp->qp_handle;
406         qp->ibv_qp.qp_num               = resp->qpn;
407         qp->sq.max                              = resp->max_send_wr;
408         qp->rq.max                              = resp->max_recv_wr;
409         qp->sq.max_gs                   = resp->max_send_sge;
410         qp->rq.max_gs                   = resp->max_recv_sge;
411         qp->max_inline_data     = resp->max_inline_data;
412         qp->mr.handle = resp->mr.mr_handle;
413         qp->mr.lkey = resp->mr.lkey;
414         qp->mr.rkey = resp->mr.rkey;
415         qp->mr.pd = pd;
416         qp->mr.context = pd->context;
417
418         if (mthca_is_memfree(pd->context)) {
419                 mthca_set_db_qn(qp->sq.db, MTHCA_DB_TYPE_SQ, qp->ibv_qp.qp_num);
420                 mthca_set_db_qn(qp->rq.db, MTHCA_DB_TYPE_RQ, qp->ibv_qp.qp_num);
421         }
422
423         ret = mthca_store_qp(to_mctx(pd->context), qp->ibv_qp.qp_num, qp);
424         if (ret)
425                 goto err_store_qp;
426
427         UVP_EXIT(UVP_DBG_QP);
428         return &qp->ibv_qp;
429
430 err_store_qp:
431         UVP_EXIT(UVP_DBG_QP);
432         return ERR_PTR(ret);
433 }
434
435
436 int mthca_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
437                     enum ibv_qp_attr_mask attr_mask)
438 {
439         int ret = 0;
440
441         if (attr_mask & IBV_QP_STATE)
442                 qp->state = attr->qp_state;
443
444         if ((attr_mask & IBV_QP_STATE) &&
445             (attr->qp_state == IBV_QPS_RESET)) {
446                 mthca_cq_clean(to_mcq(qp->recv_cq), qp->qp_num,
447                                qp->srq ? to_msrq(qp->srq) : NULL);
448                 if (qp->send_cq != qp->recv_cq)
449                         mthca_cq_clean(to_mcq(qp->send_cq), qp->qp_num, NULL);
450
451                 mthca_init_qp_indices(to_mqp(qp));
452
453                 if (mthca_is_memfree(qp->pd->context)) {
454                         *to_mqp(qp)->sq.db = 0;
455                         *to_mqp(qp)->rq.db = 0;
456                 }
457         }
458
459         return ret;
460 }
461
462
463 void mthca_destroy_qp_pre(struct ibv_qp *qp)
464 {
465         int ret;
466
467         mthca_cq_clean(to_mcq(qp->recv_cq), qp->qp_num,
468                        qp->srq ? to_msrq(qp->srq) : NULL);
469         if (qp->send_cq != qp->recv_cq)
470                 mthca_cq_clean(to_mcq(qp->send_cq), qp->qp_num, NULL);
471
472         cl_spinlock_acquire(&to_mcq(qp->send_cq)->lock);
473         if (qp->send_cq != qp->recv_cq)
474                 cl_spinlock_acquire(&to_mcq(qp->recv_cq)->lock);
475         mthca_clear_qp(to_mctx(qp->pd->context), qp->qp_num);
476         if (qp->send_cq != qp->recv_cq)
477                 cl_spinlock_release(&to_mcq(qp->recv_cq)->lock);
478         cl_spinlock_release(&to_mcq(qp->send_cq)->lock);
479 }
480
481 void mthca_destroy_qp_post(struct ibv_qp *qp, int ret)
482 {
483         if (ret) {
484                 cl_spinlock_acquire(&to_mcq(qp->send_cq)->lock);
485                 if (qp->send_cq != qp->recv_cq)
486                         cl_spinlock_acquire(&to_mcq(qp->recv_cq)->lock);
487                 mthca_store_qp(to_mctx(qp->pd->context), qp->qp_num, to_mqp(qp));
488                 if (qp->send_cq != qp->recv_cq)
489                         cl_spinlock_release(&to_mcq(qp->recv_cq)->lock);
490                 cl_spinlock_release(&to_mcq(qp->send_cq)->lock);
491         }
492         else {
493                 if (mthca_is_memfree(qp->pd->context)) {
494                         mthca_free_db(to_mctx(qp->pd->context)->db_tab, MTHCA_DB_TYPE_RQ,
495                                       to_mqp(qp)->rq.db_index);
496                         mthca_free_db(to_mctx(qp->pd->context)->db_tab, MTHCA_DB_TYPE_SQ,
497                                       to_mqp(qp)->sq.db_index);
498                 }
499
500                 cl_spinlock_destroy(&((struct mthca_qp *)qp)->sq.lock);
501                 cl_spinlock_destroy(&((struct mthca_qp *)qp)->rq.lock);
502
503 #ifdef NOT_USE_VIRTUAL_ALLOC    
504                 cl_free(to_mqp(qp)->buf);
505 #else
506                 VirtualFree( to_mqp(qp)->buf, 0, MEM_RELEASE);
507 #endif
508                 cl_free(to_mqp(qp)->wrid);
509                 cl_free(to_mqp(qp));
510         }
511
512 }
513
514 int mthca_attach_mcast(struct ibv_qp *qp, union ibv_gid *gid, uint16_t lid)
515 {
516 #ifdef WIN_TO_BE_CHANGED
517         return ibv_cmd_attach_mcast(qp, gid, lid);
518 #else
519         return -ENOSYS;
520 #endif
521 }
522
523 int mthca_detach_mcast(struct ibv_qp *qp, union ibv_gid *gid, uint16_t lid)
524 {
525 #ifdef WIN_TO_BE_CHANGED
526         return ibv_cmd_detach_mcast(qp, gid, lid);
527 #else
528         return -ENOSYS;
529 #endif
530 }
531