[MTHCA] bugfix: passing huge size values to create_cq/resize_cq causes hang in align_...
[mirror/winof/.git] / hw / mthca / user / mlnx_uvp_verbs.c
1 /*
2  * Copyright (c) 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Cisco Systems.  All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  *
33  * $Id: verbs.c 4182 2005-11-28 21:14:30Z roland $
34  */
35
36 #include <mt_l2w.h>
37
38 #include "mlnx_uvp.h"
39 #include "mx_abi.h"
40
41 #if defined(EVENT_TRACING)
42 #include "mlnx_uvp_verbs.tmh"
43 #endif
44
45 struct ibv_pd *mthca_alloc_pd(struct ibv_context *context, struct ibv_alloc_pd_resp *resp)
46 {
47         struct mthca_pd           *pd;
48
49         pd = cl_malloc(sizeof *pd);
50         if (!pd)
51                 goto err_malloc;
52
53         if (!mthca_is_memfree(context)) {
54                 pd->ah_list = NULL;
55                 pd->ah_mutex = CreateMutex( NULL, FALSE, NULL );
56                 if (!pd->ah_mutex) 
57                         goto err_mutex;
58         }
59
60         /* fill response fields */
61         pd->ibv_pd.context = context;   
62         pd->ibv_pd.handle = resp->pd_handle;
63         pd->pdn = resp->pdn;
64
65         return &pd->ibv_pd;
66
67 err_mutex:
68         cl_free(pd);
69 err_malloc:
70         return NULL;
71 }
72
73 int mthca_free_pd(struct ibv_pd *ibv_pd)
74 {
75         struct mthca_pd *pd = to_mpd(ibv_pd);
76         if (!mthca_is_memfree(ibv_pd->context)) {
77                 struct mthca_ah_page *page, *next_page;
78                 WaitForSingleObject( pd->ah_mutex, INFINITE );
79                 for (page = pd->ah_list; page; page = next_page) {
80                         next_page = page->next;
81                         #ifdef NOT_USE_VIRTUAL_ALLOC    
82                                 cl_free(page->buf);
83                         #else
84                                 VirtualFree( page->buf, 0, MEM_RELEASE);
85                         #endif
86                         cl_free(page);
87                 }
88                 ReleaseMutex( pd->ah_mutex );
89                 CloseHandle(pd->ah_mutex);
90         }
91         cl_free(pd);
92         return 0;
93 }
94
95 /* allocate create_cq infrastructure  and fill it's request parameters structure */
96 struct ibv_cq *mthca_create_cq_pre(struct ibv_context *context, int *p_cqe,
97                                struct ibv_create_cq *req)
98 {
99         struct mthca_cq            *cq;
100         int                         nent;
101         int                         ret;
102
103         /* Sanity check CQ size before proceeding */
104         if (*p_cqe > 131072)
105                 goto exit;
106
107         cq = cl_malloc(sizeof *cq);
108         if (!cq)
109                 goto exit;
110
111         cl_spinlock_construct(&cq->lock);
112         if (cl_spinlock_init(&cq->lock))
113                 goto err;
114
115         for (nent = 1; nent <= *p_cqe; nent <<= 1)
116                 ; /* nothing */
117
118         if (posix_memalign(&cq->buf, g_page_size,
119                         align(nent * MTHCA_CQ_ENTRY_SIZE, g_page_size)))
120                 goto err;
121
122         mthca_init_cq_buf(cq, nent);
123
124         if (mthca_is_memfree(context)) {
125                 cq->set_ci_db_index = mthca_alloc_db(to_mctx(context)->db_tab,
126                                                      MTHCA_DB_TYPE_CQ_SET_CI,
127                                                      &cq->set_ci_db);
128                 if (cq->set_ci_db_index < 0)
129                         goto err_unreg;
130
131                 cq->arm_db_index    = mthca_alloc_db(to_mctx(context)->db_tab,
132                                                      MTHCA_DB_TYPE_CQ_ARM,
133                                                      &cq->arm_db);
134                 if (cq->arm_db_index < 0)
135                         goto err_set_db;
136
137                 cq->u_arm_db_index    = mthca_alloc_db(to_mctx(context)->db_tab,
138                                                      MTHCA_DB_TYPE_CQ_ARM,
139                                                      &cq->p_u_arm_sn);
140                 if (cq->u_arm_db_index < 0)
141                         goto err_arm_db;
142
143                 *cq->p_u_arm_sn = 1;
144
145                 req->arm_db_page  = db_align(cq->arm_db);
146                 req->set_db_page  = db_align(cq->set_ci_db);
147                 req->u_arm_db_page  = (uint64_t)(ULONG_PTR)cq->p_u_arm_sn;
148                 req->arm_db_index = cq->arm_db_index;
149                 req->set_db_index = cq->set_ci_db_index;
150                 req->u_arm_db_index = cq->u_arm_db_index;
151         }
152
153         req->mr.start = (uint64_t)(ULONG_PTR)cq->buf;
154         req->mr.length = nent * MTHCA_CQ_ENTRY_SIZE;
155         req->mr.hca_va = 0;
156         req->mr.pd_handle    = to_mctx(context)->pd->handle;
157         req->mr.pdn = to_mpd(to_mctx(context)->pd)->pdn;
158         req->mr.access_flags = MTHCA_ACCESS_LOCAL_WRITE;
159         req->user_handle = (uint64_t)(ULONG_PTR)cq;
160 #if 1   
161         req->cqe = *p_cqe;
162         *p_cqe = nent-1;
163 //      *p_cqe = *p_cqe;        // return the same value
164 //      cq->ibv_cq.cqe = nent -1;
165 #else
166         req->cqe = nent;
167         *p_cqe = *p_cqe;        // return the same value
168 #endif
169         return &cq->ibv_cq;
170
171 err_arm_db:
172         if (mthca_is_memfree(context))
173                 mthca_free_db(to_mctx(context)->db_tab, MTHCA_DB_TYPE_CQ_SET_CI,
174                         cq->arm_db_index);
175
176 err_set_db:
177         if (mthca_is_memfree(context))
178                 mthca_free_db(to_mctx(context)->db_tab, MTHCA_DB_TYPE_CQ_SET_CI,
179                         cq->set_ci_db_index);
180
181 err_unreg:
182         cl_free(cq->buf);
183
184 err:
185         cl_free(cq);
186 exit:
187         return ERR_PTR(-ENOMEM);
188 }
189
190 struct ibv_cq *mthca_create_cq_post(struct ibv_context *context, 
191                                struct ibv_create_cq_resp *resp)
192 {
193         struct mthca_cq   *cq;
194         int                         ret;
195
196         cq = (struct mthca_cq *)(ULONG_PTR)resp->user_handle;
197
198         cq->cqn = resp->cqn;
199         cq->mr.handle = resp->mr.mr_handle;
200         cq->mr.lkey = resp->mr.lkey;
201         cq->mr.rkey = resp->mr.rkey;
202         cq->mr.pd = to_mctx(context)->pd;
203         cq->mr.context = context;
204         cq->ibv_cq.cqe = resp->cqe;
205         cq->ibv_cq.handle = resp->cq_handle;
206         cq->ibv_cq.context = context;
207
208         if (mthca_is_memfree(context)) {
209                 mthca_set_db_qn(cq->set_ci_db, MTHCA_DB_TYPE_CQ_SET_CI, cq->cqn);
210                 mthca_set_db_qn(cq->arm_db,    MTHCA_DB_TYPE_CQ_ARM,    cq->cqn);
211         }
212
213         return &cq->ibv_cq;
214
215 }
216
217 int mthca_destroy_cq(struct ibv_cq *cq)
218 {
219         int ret;
220
221         if (mthca_is_memfree(cq->context)) {
222                 mthca_free_db(to_mctx(cq->context)->db_tab, MTHCA_DB_TYPE_CQ_SET_CI,
223                               to_mcq(cq)->u_arm_db_index);
224                 mthca_free_db(to_mctx(cq->context)->db_tab, MTHCA_DB_TYPE_CQ_SET_CI,
225                               to_mcq(cq)->set_ci_db_index);
226                 mthca_free_db(to_mctx(cq->context)->db_tab, MTHCA_DB_TYPE_CQ_ARM,
227                               to_mcq(cq)->arm_db_index);
228         }
229
230 #ifdef NOT_USE_VIRTUAL_ALLOC    
231         cl_free(to_mcq(cq)->buf);
232 #else
233         VirtualFree( to_mcq(cq)->buf, 0, MEM_RELEASE);
234 #endif
235
236         
237         cl_spinlock_destroy(&((struct mthca_cq *)cq)->lock);
238         cl_free(to_mcq(cq));
239
240         return 0;
241 }
242
243 static int align_queue_size(struct ibv_context *context, int size, int spare)
244 {
245         int ret;
246
247         /*
248          * If someone asks for a 0-sized queue, presumably they're not
249          * going to use it.  So don't mess with their size.
250          */
251         if (!size)
252                 return 0;
253
254         if (mthca_is_memfree(context)) {
255                 for (ret = 1; ret < size + spare; ret <<= 1)
256                         ; /* nothing */
257
258                 return ret;
259         } else
260                 return size + spare;
261 }
262
263 struct ibv_qp *mthca_create_qp_pre(struct ibv_pd *pd, 
264         struct ibv_qp_init_attr *attr, struct ibv_create_qp *req)
265 {
266         struct mthca_qp       *qp;
267         struct ibv_context *context = pd->context;
268         int                    ret = -ENOMEM;
269
270         UVP_ENTER(UVP_DBG_QP);
271         /* Sanity check QP size before proceeding */
272         if (attr->cap.max_send_wr     > 65536 ||
273             attr->cap.max_recv_wr     > 65536 ||
274             attr->cap.max_send_sge    > 64    ||
275             attr->cap.max_recv_sge    > 64    ||
276             attr->cap.max_inline_data > 1024) {
277                 ret = -EINVAL;
278                 UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP ,("sanity checks  failed (%d)\n",ret));
279                 goto exit;
280                 }
281
282         qp = cl_malloc(sizeof *qp);
283         if (!qp) {
284                 UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP ,("cl_malloc  failed (%d)\n",ret));
285                 goto err_nomem;
286         }       
287
288         qp->sq.max = align_queue_size(context, attr->cap.max_send_wr, 0);
289         qp->rq.max = align_queue_size(context, attr->cap.max_recv_wr, 0);
290
291         if (mthca_alloc_qp_buf(pd, &attr->cap, attr->qp_type, qp)) {
292                 UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP ,("mthca_alloc_qp_buf  failed (%d)\n",ret));
293                 goto err_nomem;
294         } 
295
296         mthca_init_qp_indices(qp);
297
298         cl_spinlock_construct(&qp->sq.lock);
299         cl_spinlock_construct(&qp->rq.lock);
300         if (cl_spinlock_init(&qp->sq.lock) || cl_spinlock_init(&qp->rq.lock)) {
301                 ret = -EFAULT;
302                 UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP ,("cl_spinlock_init failed (%d)\n",ret));
303                 goto err_spinlock;
304         }
305
306         if (mthca_is_memfree(context)) {
307                 qp->sq.db_index = mthca_alloc_db(to_mctx(context)->db_tab,
308                                                  MTHCA_DB_TYPE_SQ,
309                                                  &qp->sq.db);
310                 if (qp->sq.db_index < 0)
311                         goto err_spinlock;
312
313                 qp->rq.db_index = mthca_alloc_db(to_mctx(context)->db_tab,
314                                                  MTHCA_DB_TYPE_RQ,
315                                                  &qp->rq.db);
316                 if (qp->rq.db_index < 0)
317                         goto err_sq_db;
318
319                 req->sq_db_page  = db_align(qp->sq.db);
320                 req->rq_db_page  = db_align(qp->rq.db);
321                 req->sq_db_index = qp->sq.db_index;
322                 req->rq_db_index = qp->rq.db_index;
323         }
324
325         // fill the rest qp fields
326         qp->ibv_qp      .pd = pd;
327         qp->ibv_qp.send_cq = attr->send_cq;
328         qp->ibv_qp.recv_cq = attr->recv_cq;
329         qp->ibv_qp.srq = attr->srq;
330         qp->ibv_qp.state = IBV_QPS_RESET;
331         qp->ibv_qp.qp_type = attr->qp_type;
332
333         // fill the rest request fields
334         req->mr.start = (uint64_t)(ULONG_PTR)qp->buf;
335         req->mr.length = qp->buf_size;
336         req->mr.hca_va = 0;
337         req->mr.pd_handle    = pd->handle;
338         req->mr.pdn = to_mpd(pd)->pdn;
339         req->mr.access_flags = 0;       //local read
340         req->user_handle = (uint64_t)(ULONG_PTR)qp;
341         req->send_cq_handle = attr->send_cq->handle;
342         req->recv_cq_handle = attr->recv_cq->handle;
343         req->srq_handle = (attr->srq) ? attr->srq->handle : 0;
344         req->max_send_wr = attr->cap.max_send_wr;
345         req->max_recv_wr = attr->cap.max_recv_wr;
346         req->max_send_sge = attr->cap.max_send_sge;
347         req->max_recv_sge = attr->cap.max_recv_sge;
348         req->max_inline_data = attr->cap.max_inline_data;
349         req->sq_sig_all = (uint8_t)attr->sq_sig_all;
350         req->qp_type = attr->qp_type;
351         req->is_srq = !!attr->srq;
352
353
354         UVP_EXIT(UVP_DBG_QP);
355         return &qp->ibv_qp;
356
357 err_sq_db:
358         if (mthca_is_memfree(context))
359                 mthca_free_db(to_mctx(context)->db_tab, 
360                         MTHCA_DB_TYPE_SQ, qp->sq.db_index);
361
362 err_spinlock:
363         cl_free(qp->wrid);
364 #ifdef NOT_USE_VIRTUAL_ALLOC    
365         cl_free(qp->buf);
366 #else
367         VirtualFree( qp->buf, 0, MEM_RELEASE);
368 #endif
369
370 err_nomem:
371         cl_free(qp);
372
373 exit:
374         
375         UVP_EXIT(UVP_DBG_QP);
376         return ERR_PTR(ret);
377 }
378
379 struct ibv_qp *mthca_create_qp_post(struct ibv_pd *pd, 
380         struct ibv_create_qp_resp *resp)
381 {
382         struct mthca_qp       *qp;
383         int                    ret;
384         UVP_ENTER(UVP_DBG_QP);
385         qp = (struct mthca_qp *)(ULONG_PTR)resp->user_handle;
386
387         qp->ibv_qp.handle                       = resp->qp_handle;
388         qp->ibv_qp.qp_num               = resp->qpn;
389         qp->sq.max                              = resp->max_send_wr;
390         qp->rq.max                              = resp->max_recv_wr;
391         qp->sq.max_gs                   = resp->max_send_sge;
392         qp->rq.max_gs                   = resp->max_recv_sge;
393         qp->max_inline_data     = resp->max_inline_data;
394         qp->mr.handle = resp->mr.mr_handle;
395         qp->mr.lkey = resp->mr.lkey;
396         qp->mr.rkey = resp->mr.rkey;
397         qp->mr.pd = pd;
398         qp->mr.context = pd->context;
399
400         if (mthca_is_memfree(pd->context)) {
401                 mthca_set_db_qn(qp->sq.db, MTHCA_DB_TYPE_SQ, qp->ibv_qp.qp_num);
402                 mthca_set_db_qn(qp->rq.db, MTHCA_DB_TYPE_RQ, qp->ibv_qp.qp_num);
403         }
404
405         ret = mthca_store_qp(to_mctx(pd->context), qp->ibv_qp.qp_num, qp);
406         if (ret)
407                 goto err_store_qp;
408
409         UVP_EXIT(UVP_DBG_QP);
410         return &qp->ibv_qp;
411
412 err_store_qp:
413         UVP_EXIT(UVP_DBG_QP);
414         return ERR_PTR(ret);
415 }
416
417
418 int mthca_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
419                     enum ibv_qp_attr_mask attr_mask)
420 {
421         int ret = 0;
422
423         if (attr_mask & IBV_QP_STATE)
424                 qp->state = attr->qp_state;
425
426         if ((attr_mask & IBV_QP_STATE) &&
427             (attr->qp_state == IBV_QPS_RESET)) {
428                 mthca_cq_clean(to_mcq(qp->recv_cq), qp->qp_num,
429                                qp->srq ? to_msrq(qp->srq) : NULL);
430                 if (qp->send_cq != qp->recv_cq)
431                         mthca_cq_clean(to_mcq(qp->send_cq), qp->qp_num, NULL);
432
433                 mthca_init_qp_indices(to_mqp(qp));
434
435                 if (mthca_is_memfree(qp->pd->context)) {
436                         *to_mqp(qp)->sq.db = 0;
437                         *to_mqp(qp)->rq.db = 0;
438                 }
439         }
440
441         return ret;
442 }
443
444
445 void mthca_destroy_qp_pre(struct ibv_qp *qp)
446 {
447         int ret;
448
449         mthca_cq_clean(to_mcq(qp->recv_cq), qp->qp_num,
450                        qp->srq ? to_msrq(qp->srq) : NULL);
451         if (qp->send_cq != qp->recv_cq)
452                 mthca_cq_clean(to_mcq(qp->send_cq), qp->qp_num, NULL);
453
454         cl_spinlock_acquire(&to_mcq(qp->send_cq)->lock);
455         if (qp->send_cq != qp->recv_cq)
456                 cl_spinlock_acquire(&to_mcq(qp->recv_cq)->lock);
457         mthca_clear_qp(to_mctx(qp->pd->context), qp->qp_num);
458         if (qp->send_cq != qp->recv_cq)
459                 cl_spinlock_release(&to_mcq(qp->recv_cq)->lock);
460         cl_spinlock_release(&to_mcq(qp->send_cq)->lock);
461 }
462
463 void mthca_destroy_qp_post(struct ibv_qp *qp, int ret)
464 {
465         if (ret) {
466                 cl_spinlock_acquire(&to_mcq(qp->send_cq)->lock);
467                 if (qp->send_cq != qp->recv_cq)
468                         cl_spinlock_acquire(&to_mcq(qp->recv_cq)->lock);
469                 mthca_store_qp(to_mctx(qp->pd->context), qp->qp_num, to_mqp(qp));
470                 if (qp->send_cq != qp->recv_cq)
471                         cl_spinlock_release(&to_mcq(qp->recv_cq)->lock);
472                 cl_spinlock_release(&to_mcq(qp->send_cq)->lock);
473         }
474         else {
475                 if (mthca_is_memfree(qp->pd->context)) {
476                         mthca_free_db(to_mctx(qp->pd->context)->db_tab, MTHCA_DB_TYPE_RQ,
477                                       to_mqp(qp)->rq.db_index);
478                         mthca_free_db(to_mctx(qp->pd->context)->db_tab, MTHCA_DB_TYPE_SQ,
479                                       to_mqp(qp)->sq.db_index);
480                 }
481
482                 cl_spinlock_destroy(&((struct mthca_qp *)qp)->sq.lock);
483                 cl_spinlock_destroy(&((struct mthca_qp *)qp)->rq.lock);
484
485 #ifdef NOT_USE_VIRTUAL_ALLOC    
486                 cl_free(to_mqp(qp)->buf);
487 #else
488                 VirtualFree( to_mqp(qp)->buf, 0, MEM_RELEASE);
489 #endif
490                 cl_free(to_mqp(qp)->wrid);
491                 cl_free(to_mqp(qp));
492         }
493
494 }
495
496 int mthca_attach_mcast(struct ibv_qp *qp, union ibv_gid *gid, uint16_t lid)
497 {
498 #ifdef WIN_TO_BE_CHANGED
499         return ibv_cmd_attach_mcast(qp, gid, lid);
500 #else
501         return -ENOSYS;
502 #endif
503 }
504
505 int mthca_detach_mcast(struct ibv_qp *qp, union ibv_gid *gid, uint16_t lid)
506 {
507 #ifdef WIN_TO_BE_CHANGED
508         return ibv_cmd_detach_mcast(qp, gid, lid);
509 #else
510         return -ENOSYS;
511 #endif
512 }