2ee6cafc4d4ddd83e7fc84064382dc44df94074b
[mirror/winof/.git] / hw / mthca / user / mlnx_uvp_verbs.c
1 /*
2  * Copyright (c) 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Cisco Systems.  All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  *
33  * $Id: verbs.c 4182 2005-11-28 21:14:30Z roland $
34  */
35
36 #include <mt_l2w.h>
37
38 #include "mlnx_uvp.h"
39 #include "mx_abi.h"
40
41 #if defined(EVENT_TRACING)
42 #include "mlnx_uvp_verbs.tmh"
43 #endif
44
45 struct ibv_pd *mthca_alloc_pd(struct ibv_context *context, struct ibv_alloc_pd_resp *resp)
46 {
47         struct mthca_pd           *pd;
48
49         pd = cl_malloc(sizeof *pd);
50         if (!pd)
51                 goto err_malloc;
52
53         if (!mthca_is_memfree(context)) {
54                 pd->ah_list = NULL;
55                 pd->ah_mutex = CreateMutex( NULL, FALSE, NULL );
56                 if (!pd->ah_mutex) 
57                         goto err_mutex;
58         }
59
60         /* fill response fields */
61         pd->ibv_pd.context = context;   
62         pd->ibv_pd.handle = resp->pd_handle;
63         pd->pdn = resp->pdn;
64
65         return &pd->ibv_pd;
66
67 err_mutex:
68         cl_free(pd);
69 err_malloc:
70         return NULL;
71 }
72
73 int mthca_free_pd(struct ibv_pd *ibv_pd)
74 {
75         struct mthca_pd *pd = to_mpd(ibv_pd);
76         if (!mthca_is_memfree(ibv_pd->context)) 
77                 CloseHandle(pd->ah_mutex);
78         cl_free(pd);
79         return 0;
80 }
81
82 /* allocate create_cq infrastructure  and fill it's request parameters structure */
83 struct ibv_cq *mthca_create_cq_pre(struct ibv_context *context, int *p_cqe,
84                                struct ibv_create_cq *req)
85 {
86         struct mthca_cq            *cq;
87         int                         nent;
88         int                         ret;
89
90         cq = cl_malloc(sizeof *cq);
91         if (!cq)
92                 goto exit;
93
94         cl_spinlock_construct(&cq->lock);
95         if (cl_spinlock_init(&cq->lock))
96                 goto err;
97
98         for (nent = 1; nent <= *p_cqe; nent <<= 1)
99                 ; /* nothing */
100
101         if (posix_memalign(&cq->buf, g_page_size,
102                            align(nent * MTHCA_CQ_ENTRY_SIZE, g_page_size)))
103                 goto err;
104
105         mthca_init_cq_buf(cq, nent);
106
107         if (mthca_is_memfree(context)) {
108                 cq->arm_sn          = 1;
109                 cq->set_ci_db_index = mthca_alloc_db(to_mctx(context)->db_tab,
110                                                      MTHCA_DB_TYPE_CQ_SET_CI,
111                                                      &cq->set_ci_db);
112                 if (cq->set_ci_db_index < 0)
113                         goto err_unreg;
114
115                 cq->arm_db_index    = mthca_alloc_db(to_mctx(context)->db_tab,
116                                                      MTHCA_DB_TYPE_CQ_ARM,
117                                                      &cq->arm_db);
118                 if (cq->arm_db_index < 0)
119                         goto err_set_db;
120
121                 req->arm_db_page  = db_align(cq->arm_db);
122                 req->set_db_page  = db_align(cq->set_ci_db);
123                 req->arm_db_index = cq->arm_db_index;
124                 req->set_db_index = cq->set_ci_db_index;
125         }
126
127         req->mr.start = (uint64_t)(ULONG_PTR)cq->buf;
128         req->mr.length = nent * MTHCA_CQ_ENTRY_SIZE;
129         req->mr.hca_va = 0;
130         req->mr.pd_handle    = to_mctx(context)->pd->handle;
131         req->mr.pdn = to_mpd(to_mctx(context)->pd)->pdn;
132         req->mr.access_flags = MTHCA_ACCESS_LOCAL_WRITE;
133         req->user_handle = (uint64_t)(ULONG_PTR)cq;
134 #if 1   
135         req->cqe = *p_cqe;
136         *p_cqe = nent-1;
137 //      *p_cqe = *p_cqe;        // return the same value
138 //      cq->ibv_cq.cqe = nent -1;
139 #else
140         req->cqe = nent;
141         *p_cqe = *p_cqe;        // return the same value
142 #endif
143         return &cq->ibv_cq;
144
145 err_set_db:
146         if (mthca_is_memfree(context))
147                 mthca_free_db(to_mctx(context)->db_tab, MTHCA_DB_TYPE_CQ_SET_CI,
148                               cq->set_ci_db_index);
149
150 err_unreg:
151         cl_free(cq->buf);
152
153 err:
154         cl_free(cq);
155 exit:
156         return ERR_PTR(-ENOMEM);
157 }
158
159 struct ibv_cq *mthca_create_cq_post(struct ibv_context *context, 
160                                struct ibv_create_cq_resp *resp)
161 {
162         struct mthca_cq   *cq;
163         int                         ret;
164
165         cq = (struct mthca_cq *)(ULONG_PTR)resp->user_handle;
166
167         cq->cqn = resp->cqn;
168         cq->mr.handle = resp->mr.mr_handle;
169         cq->mr.lkey = resp->mr.lkey;
170         cq->mr.rkey = resp->mr.rkey;
171         cq->mr.pd = to_mctx(context)->pd;
172         cq->mr.context = context;
173         cq->ibv_cq.cqe = resp->cqe;
174         cq->ibv_cq.handle = resp->cq_handle;
175         cq->ibv_cq.context = context;
176
177         if (mthca_is_memfree(context)) {
178                 mthca_set_db_qn(cq->set_ci_db, MTHCA_DB_TYPE_CQ_SET_CI, cq->cqn);
179                 mthca_set_db_qn(cq->arm_db,    MTHCA_DB_TYPE_CQ_ARM,    cq->cqn);
180         }
181
182         return &cq->ibv_cq;
183
184 }
185
186 int mthca_destroy_cq(struct ibv_cq *cq)
187 {
188         int ret;
189
190         if (mthca_is_memfree(cq->context)) {
191                 mthca_free_db(to_mctx(cq->context)->db_tab, MTHCA_DB_TYPE_CQ_SET_CI,
192                               to_mcq(cq)->set_ci_db_index);
193                 mthca_free_db(to_mctx(cq->context)->db_tab, MTHCA_DB_TYPE_CQ_ARM,
194                               to_mcq(cq)->arm_db_index);
195         }
196
197 #ifdef NOT_USE_VIRTUAL_ALLOC    
198         cl_free(to_mcq(cq)->buf);
199 #else
200         VirtualFree( to_mcq(cq)->buf, 0, MEM_RELEASE);
201 #endif
202
203         
204         cl_spinlock_destroy(&((struct mthca_cq *)cq)->lock);
205         cl_free(to_mcq(cq));
206
207         return 0;
208 }
209
210 static int align_queue_size(struct ibv_context *context, int size, int spare)
211 {
212         int ret;
213
214         /*
215          * If someone asks for a 0-sized queue, presumably they're not
216          * going to use it.  So don't mess with their size.
217          */
218         if (!size)
219                 return 0;
220
221         if (mthca_is_memfree(context)) {
222                 for (ret = 1; ret < size + spare; ret <<= 1)
223                         ; /* nothing */
224
225                 return ret;
226         } else
227                 return size + spare;
228 }
229
230 struct ibv_qp *mthca_create_qp_pre(struct ibv_pd *pd, 
231         struct ibv_qp_init_attr *attr, struct ibv_create_qp *req)
232 {
233         struct mthca_qp       *qp;
234         struct ibv_context *context = pd->context;
235         int                    ret;
236
237         UVP_ENTER(UVP_DBG_QP);
238         /* Sanity check QP size before proceeding */
239         if (attr->cap.max_send_wr     > 65536 ||
240             attr->cap.max_recv_wr     > 65536 ||
241             attr->cap.max_send_sge    > 64    ||
242             attr->cap.max_recv_sge    > 64    ||
243             attr->cap.max_inline_data > 1024) {
244                 ret = -EINVAL;
245                 UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP ,("sanity checks  failed (%d)\n",ret));
246                 goto exit;
247                 }
248
249         qp = cl_malloc(sizeof *qp);
250         if (!qp) {
251                 ret = -ENOMEM;
252                 UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP ,("cl_malloc  failed (%d)\n",ret));
253                 goto err_nomem;
254         }       
255
256         qp->sq.max = align_queue_size(context, attr->cap.max_send_wr, 0);
257         qp->rq.max = align_queue_size(context, attr->cap.max_recv_wr, 0);
258
259         if (mthca_alloc_qp_buf(pd, &attr->cap, attr->qp_type, qp)) {
260                 ret = -ENOMEM;
261                 UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP ,("mthca_alloc_qp_buf  failed (%d)\n",ret));
262                 goto err_nomem;
263         } 
264
265         mthca_init_qp_indices(qp);
266
267         cl_spinlock_construct(&qp->sq.lock);
268         cl_spinlock_construct(&qp->rq.lock);
269         if (cl_spinlock_init(&qp->sq.lock) || cl_spinlock_init(&qp->rq.lock)) {
270                 ret = -EFAULT;
271                 UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP ,("cl_spinlock_init failed (%d)\n",ret));
272                 goto err_spinlock;
273         }
274
275         if (mthca_is_memfree(context)) {
276                 qp->sq.db_index = mthca_alloc_db(to_mctx(context)->db_tab,
277                                                  MTHCA_DB_TYPE_SQ,
278                                                  &qp->sq.db);
279                 if (qp->sq.db_index < 0)
280                         goto err_spinlock;
281
282                 qp->rq.db_index = mthca_alloc_db(to_mctx(context)->db_tab,
283                                                  MTHCA_DB_TYPE_RQ,
284                                                  &qp->rq.db);
285                 if (qp->rq.db_index < 0)
286                         goto err_sq_db;
287
288                 req->sq_db_page  = db_align(qp->sq.db);
289                 req->rq_db_page  = db_align(qp->rq.db);
290                 req->sq_db_index = qp->sq.db_index;
291                 req->rq_db_index = qp->rq.db_index;
292         }
293
294         // fill the rest qp fields
295         qp->ibv_qp      .pd = pd;
296         qp->ibv_qp.send_cq = attr->send_cq;
297         qp->ibv_qp.recv_cq = attr->recv_cq;
298         qp->ibv_qp.srq = attr->srq;
299         qp->ibv_qp.state = IBV_QPS_RESET;
300         qp->ibv_qp.qp_type = attr->qp_type;
301
302         // fill the rest request fields
303         req->mr.start = (uint64_t)(ULONG_PTR)qp->buf;
304         req->mr.length = qp->buf_size;
305         req->mr.hca_va = 0;
306         req->mr.pd_handle    = pd->handle;
307         req->mr.pdn = to_mpd(pd)->pdn;
308         req->mr.access_flags = 0;       //local read
309         req->user_handle = (uint64_t)(ULONG_PTR)qp;
310         req->send_cq_handle = attr->send_cq->handle;
311         req->recv_cq_handle = attr->recv_cq->handle;
312         req->srq_handle = (attr->srq) ? attr->srq->handle : 0;
313         req->max_send_wr = attr->cap.max_send_wr;
314         req->max_recv_wr = attr->cap.max_recv_wr;
315         req->max_send_sge = attr->cap.max_send_sge;
316         req->max_recv_sge = attr->cap.max_recv_sge;
317         req->max_inline_data = attr->cap.max_inline_data;
318         req->sq_sig_all = (uint8_t)attr->sq_sig_all;
319         req->qp_type = attr->qp_type;
320         req->is_srq = !!attr->srq;
321
322
323         UVP_EXIT(UVP_DBG_QP);
324         return &qp->ibv_qp;
325
326 err_sq_db:
327         if (mthca_is_memfree(context))
328                 mthca_free_db(to_mctx(context)->db_tab, 
329                         MTHCA_DB_TYPE_SQ, qp->sq.db_index);
330
331 err_spinlock:
332         cl_free(qp->wrid);
333 #ifdef NOT_USE_VIRTUAL_ALLOC    
334         cl_free(qp->buf);
335 #else
336         VirtualFree( qp->buf, 0, MEM_RELEASE);
337 #endif
338
339 err_nomem:
340         cl_free(qp);
341
342 exit:
343         
344         UVP_EXIT(UVP_DBG_QP);
345         return ERR_PTR(ret);
346 }
347
348 struct ibv_qp *mthca_create_qp_post(struct ibv_pd *pd, 
349         struct ibv_create_qp_resp *resp)
350 {
351         struct mthca_qp       *qp;
352         int                    ret;
353         UVP_ENTER(UVP_DBG_QP);
354         qp = (struct mthca_qp *)(ULONG_PTR)resp->user_handle;
355
356         qp->ibv_qp.handle                       = resp->qp_handle;
357         qp->ibv_qp.qp_num               = resp->qpn;
358         qp->sq.max                              = resp->max_send_wr;
359         qp->rq.max                              = resp->max_recv_wr;
360         qp->sq.max_gs                   = resp->max_send_sge;
361         qp->rq.max_gs                   = resp->max_recv_sge;
362         qp->max_inline_data     = resp->max_inline_data;
363         qp->mr.handle = resp->mr.mr_handle;
364         qp->mr.lkey = resp->mr.lkey;
365         qp->mr.rkey = resp->mr.rkey;
366         qp->mr.pd = pd;
367         qp->mr.context = pd->context;
368
369         if (mthca_is_memfree(pd->context)) {
370                 mthca_set_db_qn(qp->sq.db, MTHCA_DB_TYPE_SQ, qp->ibv_qp.qp_num);
371                 mthca_set_db_qn(qp->rq.db, MTHCA_DB_TYPE_RQ, qp->ibv_qp.qp_num);
372         }
373
374         ret = mthca_store_qp(to_mctx(pd->context), qp->ibv_qp.qp_num, qp);
375         if (ret)
376                 goto err_store_qp;
377
378         UVP_EXIT(UVP_DBG_QP);
379         return &qp->ibv_qp;
380
381 err_store_qp:
382         UVP_EXIT(UVP_DBG_QP);
383         return ERR_PTR(ret);
384 }
385
386
387 int mthca_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
388                     enum ibv_qp_attr_mask attr_mask)
389 {
390         int ret = 0;
391
392         if (attr_mask & IBV_QP_STATE)
393                 qp->state = attr->qp_state;
394
395         if ((attr_mask & IBV_QP_STATE) &&
396             (attr->qp_state == IBV_QPS_RESET)) {
397                 mthca_cq_clean(to_mcq(qp->recv_cq), qp->qp_num,
398                                qp->srq ? to_msrq(qp->srq) : NULL);
399                 if (qp->send_cq != qp->recv_cq)
400                         mthca_cq_clean(to_mcq(qp->send_cq), qp->qp_num, NULL);
401
402                 mthca_init_qp_indices(to_mqp(qp));
403
404                 if (mthca_is_memfree(qp->pd->context)) {
405                         *to_mqp(qp)->sq.db = 0;
406                         *to_mqp(qp)->rq.db = 0;
407                 }
408         }
409
410         return ret;
411 }
412
413 int mthca_destroy_qp(struct ibv_qp *qp)
414 {
415         int ret;
416
417         mthca_cq_clean(to_mcq(qp->recv_cq), qp->qp_num,
418                        qp->srq ? to_msrq(qp->srq) : NULL);
419         if (qp->send_cq != qp->recv_cq)
420                 mthca_cq_clean(to_mcq(qp->send_cq), qp->qp_num, NULL);
421
422         cl_spinlock_acquire(&to_mcq(qp->send_cq)->lock);
423         if (qp->send_cq != qp->recv_cq)
424                 cl_spinlock_acquire(&to_mcq(qp->recv_cq)->lock);
425         mthca_clear_qp(to_mctx(qp->pd->context), qp->qp_num);
426         if (qp->send_cq != qp->recv_cq)
427                 cl_spinlock_release(&to_mcq(qp->recv_cq)->lock);
428         cl_spinlock_release(&to_mcq(qp->send_cq)->lock);
429
430         if (mthca_is_memfree(qp->pd->context)) {
431                 mthca_free_db(to_mctx(qp->pd->context)->db_tab, MTHCA_DB_TYPE_RQ,
432                               to_mqp(qp)->rq.db_index);
433                 mthca_free_db(to_mctx(qp->pd->context)->db_tab, MTHCA_DB_TYPE_SQ,
434                               to_mqp(qp)->sq.db_index);
435         }
436
437         cl_spinlock_destroy(&((struct mthca_qp *)qp)->sq.lock);
438         cl_spinlock_destroy(&((struct mthca_qp *)qp)->rq.lock);
439
440 #ifdef NOT_USE_VIRTUAL_ALLOC    
441         cl_free(to_mqp(qp)->buf);
442 #else
443         VirtualFree( to_mqp(qp)->buf, 0, MEM_RELEASE);
444 #endif
445         cl_free(to_mqp(qp)->wrid);
446         cl_free(to_mqp(qp));
447
448         return 0;
449 }
450
451 int mthca_attach_mcast(struct ibv_qp *qp, union ibv_gid *gid, uint16_t lid)
452 {
453 #ifdef WIN_TO_BE_CHANGED
454         return ibv_cmd_attach_mcast(qp, gid, lid);
455 #else
456         return -ENOSYS;
457 #endif
458 }
459
460 int mthca_detach_mcast(struct ibv_qp *qp, union ibv_gid *gid, uint16_t lid)
461 {
462 #ifdef WIN_TO_BE_CHANGED
463         return ibv_cmd_detach_mcast(qp, gid, lid);
464 #else
465         return -ENOSYS;
466 #endif
467 }