40a360dbb4b66fa3e2c38ede6a06f6c75f1429b6
[mirror/winof/.git] / hw / mthca / user / mlnx_uvp_verbs.c
1 /*
2  * Copyright (c) 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Cisco Systems.  All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  *
33  * $Id: verbs.c 4182 2005-11-28 21:14:30Z roland $
34  */
35
36 #include <mt_l2w.h>
37
38 #include "mlnx_uvp.h"
39 #include "mx_abi.h"
40
41 #if defined(EVENT_TRACING)
42 #include "mlnx_uvp_verbs.tmh"
43 #endif
44
45 struct ibv_pd *mthca_alloc_pd(struct ibv_context *context, struct ibv_alloc_pd_resp *resp)
46 {
47         struct mthca_pd           *pd;
48
49         pd = cl_malloc(sizeof *pd);
50         if (!pd)
51                 goto err_malloc;
52
53         if (!mthca_is_memfree(context)) {
54                 pd->ah_list = NULL;
55                 pd->ah_mutex = CreateMutex( NULL, FALSE, NULL );
56                 if (!pd->ah_mutex) 
57                         goto err_mutex;
58         }
59
60         /* fill response fields */
61         pd->ibv_pd.context = context;   
62         pd->ibv_pd.handle = resp->pd_handle;
63         pd->pdn = resp->pdn;
64
65         return &pd->ibv_pd;
66
67 err_mutex:
68         cl_free(pd);
69 err_malloc:
70         return NULL;
71 }
72
73 int mthca_free_pd(struct ibv_pd *ibv_pd)
74 {
75         struct mthca_pd *pd = to_mpd(ibv_pd);
76         if (!mthca_is_memfree(ibv_pd->context)) {
77                 struct mthca_ah_page *page, *next_page;
78                 WaitForSingleObject( pd->ah_mutex, INFINITE );
79                 for (page = pd->ah_list; page; page = next_page) {
80                         next_page = page->next;
81                         #ifdef NOT_USE_VIRTUAL_ALLOC    
82                                 cl_free(page->buf);
83                         #else
84                                 VirtualFree( page->buf, 0, MEM_RELEASE);
85                         #endif
86                         cl_free(page);
87                 }
88                 ReleaseMutex( pd->ah_mutex );
89                 CloseHandle(pd->ah_mutex);
90         }
91         cl_free(pd);
92         return 0;
93 }
94
95 /* allocate create_cq infrastructure  and fill it's request parameters structure */
96 struct ibv_cq *mthca_create_cq_pre(struct ibv_context *context, int *p_cqe,
97                                struct ibv_create_cq *req)
98 {
99         struct mthca_cq            *cq;
100         int                         nent;
101         int                         ret;
102
103         cq = cl_malloc(sizeof *cq);
104         if (!cq)
105                 goto exit;
106
107         cl_spinlock_construct(&cq->lock);
108         if (cl_spinlock_init(&cq->lock))
109                 goto err;
110
111         for (nent = 1; nent <= *p_cqe; nent <<= 1)
112                 ; /* nothing */
113
114         if (posix_memalign(&cq->buf, g_page_size,
115                            align(nent * MTHCA_CQ_ENTRY_SIZE, g_page_size)))
116                 goto err;
117
118         mthca_init_cq_buf(cq, nent);
119
120         if (mthca_is_memfree(context)) {
121                 cq->set_ci_db_index = mthca_alloc_db(to_mctx(context)->db_tab,
122                                                      MTHCA_DB_TYPE_CQ_SET_CI,
123                                                      &cq->set_ci_db);
124                 if (cq->set_ci_db_index < 0)
125                         goto err_unreg;
126
127                 cq->arm_db_index    = mthca_alloc_db(to_mctx(context)->db_tab,
128                                                      MTHCA_DB_TYPE_CQ_ARM,
129                                                      &cq->arm_db);
130                 if (cq->arm_db_index < 0)
131                         goto err_set_db;
132
133                 cq->u_arm_db_index    = mthca_alloc_db(to_mctx(context)->db_tab,
134                                                      MTHCA_DB_TYPE_CQ_ARM,
135                                                      &cq->p_u_arm_sn);
136                 if (cq->u_arm_db_index < 0)
137                         goto err_arm_db;
138
139                 *cq->p_u_arm_sn = 1;
140
141                 req->arm_db_page  = db_align(cq->arm_db);
142                 req->set_db_page  = db_align(cq->set_ci_db);
143                 req->u_arm_db_page  = (uint64_t)(ULONG_PTR)cq->p_u_arm_sn;
144                 req->arm_db_index = cq->arm_db_index;
145                 req->set_db_index = cq->set_ci_db_index;
146                 req->u_arm_db_index = cq->u_arm_db_index;
147         }
148
149         req->mr.start = (uint64_t)(ULONG_PTR)cq->buf;
150         req->mr.length = nent * MTHCA_CQ_ENTRY_SIZE;
151         req->mr.hca_va = 0;
152         req->mr.pd_handle    = to_mctx(context)->pd->handle;
153         req->mr.pdn = to_mpd(to_mctx(context)->pd)->pdn;
154         req->mr.access_flags = MTHCA_ACCESS_LOCAL_WRITE;
155         req->user_handle = (uint64_t)(ULONG_PTR)cq;
156 #if 1   
157         req->cqe = *p_cqe;
158         *p_cqe = nent-1;
159 //      *p_cqe = *p_cqe;        // return the same value
160 //      cq->ibv_cq.cqe = nent -1;
161 #else
162         req->cqe = nent;
163         *p_cqe = *p_cqe;        // return the same value
164 #endif
165         return &cq->ibv_cq;
166
167 err_arm_db:
168         if (mthca_is_memfree(context))
169                 mthca_free_db(to_mctx(context)->db_tab, MTHCA_DB_TYPE_CQ_SET_CI,
170                               cq->arm_db_index);
171
172 err_set_db:
173         if (mthca_is_memfree(context))
174                 mthca_free_db(to_mctx(context)->db_tab, MTHCA_DB_TYPE_CQ_SET_CI,
175                               cq->set_ci_db_index);
176
177 err_unreg:
178         cl_free(cq->buf);
179
180 err:
181         cl_free(cq);
182 exit:
183         return ERR_PTR(-ENOMEM);
184 }
185
186 struct ibv_cq *mthca_create_cq_post(struct ibv_context *context, 
187                                struct ibv_create_cq_resp *resp)
188 {
189         struct mthca_cq   *cq;
190         int                         ret;
191
192         cq = (struct mthca_cq *)(ULONG_PTR)resp->user_handle;
193
194         cq->cqn = resp->cqn;
195         cq->mr.handle = resp->mr.mr_handle;
196         cq->mr.lkey = resp->mr.lkey;
197         cq->mr.rkey = resp->mr.rkey;
198         cq->mr.pd = to_mctx(context)->pd;
199         cq->mr.context = context;
200         cq->ibv_cq.cqe = resp->cqe;
201         cq->ibv_cq.handle = resp->cq_handle;
202         cq->ibv_cq.context = context;
203
204         if (mthca_is_memfree(context)) {
205                 mthca_set_db_qn(cq->set_ci_db, MTHCA_DB_TYPE_CQ_SET_CI, cq->cqn);
206                 mthca_set_db_qn(cq->arm_db,    MTHCA_DB_TYPE_CQ_ARM,    cq->cqn);
207         }
208
209         return &cq->ibv_cq;
210
211 }
212
213 int mthca_destroy_cq(struct ibv_cq *cq)
214 {
215         int ret;
216
217         if (mthca_is_memfree(cq->context)) {
218                 mthca_free_db(to_mctx(cq->context)->db_tab, MTHCA_DB_TYPE_CQ_SET_CI,
219                               to_mcq(cq)->u_arm_db_index);
220                 mthca_free_db(to_mctx(cq->context)->db_tab, MTHCA_DB_TYPE_CQ_SET_CI,
221                               to_mcq(cq)->set_ci_db_index);
222                 mthca_free_db(to_mctx(cq->context)->db_tab, MTHCA_DB_TYPE_CQ_ARM,
223                               to_mcq(cq)->arm_db_index);
224         }
225
226 #ifdef NOT_USE_VIRTUAL_ALLOC    
227         cl_free(to_mcq(cq)->buf);
228 #else
229         VirtualFree( to_mcq(cq)->buf, 0, MEM_RELEASE);
230 #endif
231
232         
233         cl_spinlock_destroy(&((struct mthca_cq *)cq)->lock);
234         cl_free(to_mcq(cq));
235
236         return 0;
237 }
238
239 static int align_queue_size(struct ibv_context *context, int size, int spare)
240 {
241         int ret;
242
243         /*
244          * If someone asks for a 0-sized queue, presumably they're not
245          * going to use it.  So don't mess with their size.
246          */
247         if (!size)
248                 return 0;
249
250         if (mthca_is_memfree(context)) {
251                 for (ret = 1; ret < size + spare; ret <<= 1)
252                         ; /* nothing */
253
254                 return ret;
255         } else
256                 return size + spare;
257 }
258
259 struct ibv_qp *mthca_create_qp_pre(struct ibv_pd *pd, 
260         struct ibv_qp_init_attr *attr, struct ibv_create_qp *req)
261 {
262         struct mthca_qp       *qp;
263         struct ibv_context *context = pd->context;
264         int                    ret = -ENOMEM;
265
266         UVP_ENTER(UVP_DBG_QP);
267         /* Sanity check QP size before proceeding */
268         if (attr->cap.max_send_wr     > 65536 ||
269             attr->cap.max_recv_wr     > 65536 ||
270             attr->cap.max_send_sge    > 64    ||
271             attr->cap.max_recv_sge    > 64    ||
272             attr->cap.max_inline_data > 1024) {
273                 ret = -EINVAL;
274                 UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP ,("sanity checks  failed (%d)\n",ret));
275                 goto exit;
276                 }
277
278         qp = cl_malloc(sizeof *qp);
279         if (!qp) {
280                 UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP ,("cl_malloc  failed (%d)\n",ret));
281                 goto err_nomem;
282         }       
283
284         qp->sq.max = align_queue_size(context, attr->cap.max_send_wr, 0);
285         qp->rq.max = align_queue_size(context, attr->cap.max_recv_wr, 0);
286
287         if (mthca_alloc_qp_buf(pd, &attr->cap, attr->qp_type, qp)) {
288                 UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP ,("mthca_alloc_qp_buf  failed (%d)\n",ret));
289                 goto err_nomem;
290         } 
291
292         mthca_init_qp_indices(qp);
293
294         cl_spinlock_construct(&qp->sq.lock);
295         cl_spinlock_construct(&qp->rq.lock);
296         if (cl_spinlock_init(&qp->sq.lock) || cl_spinlock_init(&qp->rq.lock)) {
297                 ret = -EFAULT;
298                 UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP ,("cl_spinlock_init failed (%d)\n",ret));
299                 goto err_spinlock;
300         }
301
302         if (mthca_is_memfree(context)) {
303                 qp->sq.db_index = mthca_alloc_db(to_mctx(context)->db_tab,
304                                                  MTHCA_DB_TYPE_SQ,
305                                                  &qp->sq.db);
306                 if (qp->sq.db_index < 0)
307                         goto err_spinlock;
308
309                 qp->rq.db_index = mthca_alloc_db(to_mctx(context)->db_tab,
310                                                  MTHCA_DB_TYPE_RQ,
311                                                  &qp->rq.db);
312                 if (qp->rq.db_index < 0)
313                         goto err_sq_db;
314
315                 req->sq_db_page  = db_align(qp->sq.db);
316                 req->rq_db_page  = db_align(qp->rq.db);
317                 req->sq_db_index = qp->sq.db_index;
318                 req->rq_db_index = qp->rq.db_index;
319         }
320
321         // fill the rest qp fields
322         qp->ibv_qp      .pd = pd;
323         qp->ibv_qp.send_cq = attr->send_cq;
324         qp->ibv_qp.recv_cq = attr->recv_cq;
325         qp->ibv_qp.srq = attr->srq;
326         qp->ibv_qp.state = IBV_QPS_RESET;
327         qp->ibv_qp.qp_type = attr->qp_type;
328
329         // fill the rest request fields
330         req->mr.start = (uint64_t)(ULONG_PTR)qp->buf;
331         req->mr.length = qp->buf_size;
332         req->mr.hca_va = 0;
333         req->mr.pd_handle    = pd->handle;
334         req->mr.pdn = to_mpd(pd)->pdn;
335         req->mr.access_flags = 0;       //local read
336         req->user_handle = (uint64_t)(ULONG_PTR)qp;
337         req->send_cq_handle = attr->send_cq->handle;
338         req->recv_cq_handle = attr->recv_cq->handle;
339         req->srq_handle = (attr->srq) ? attr->srq->handle : 0;
340         req->max_send_wr = attr->cap.max_send_wr;
341         req->max_recv_wr = attr->cap.max_recv_wr;
342         req->max_send_sge = attr->cap.max_send_sge;
343         req->max_recv_sge = attr->cap.max_recv_sge;
344         req->max_inline_data = attr->cap.max_inline_data;
345         req->sq_sig_all = (uint8_t)attr->sq_sig_all;
346         req->qp_type = attr->qp_type;
347         req->is_srq = !!attr->srq;
348
349
350         UVP_EXIT(UVP_DBG_QP);
351         return &qp->ibv_qp;
352
353 err_sq_db:
354         if (mthca_is_memfree(context))
355                 mthca_free_db(to_mctx(context)->db_tab, 
356                         MTHCA_DB_TYPE_SQ, qp->sq.db_index);
357
358 err_spinlock:
359         cl_free(qp->wrid);
360 #ifdef NOT_USE_VIRTUAL_ALLOC    
361         cl_free(qp->buf);
362 #else
363         VirtualFree( qp->buf, 0, MEM_RELEASE);
364 #endif
365
366 err_nomem:
367         cl_free(qp);
368
369 exit:
370         
371         UVP_EXIT(UVP_DBG_QP);
372         return ERR_PTR(ret);
373 }
374
375 struct ibv_qp *mthca_create_qp_post(struct ibv_pd *pd, 
376         struct ibv_create_qp_resp *resp)
377 {
378         struct mthca_qp       *qp;
379         int                    ret;
380         UVP_ENTER(UVP_DBG_QP);
381         qp = (struct mthca_qp *)(ULONG_PTR)resp->user_handle;
382
383         qp->ibv_qp.handle                       = resp->qp_handle;
384         qp->ibv_qp.qp_num               = resp->qpn;
385         qp->sq.max                              = resp->max_send_wr;
386         qp->rq.max                              = resp->max_recv_wr;
387         qp->sq.max_gs                   = resp->max_send_sge;
388         qp->rq.max_gs                   = resp->max_recv_sge;
389         qp->max_inline_data     = resp->max_inline_data;
390         qp->mr.handle = resp->mr.mr_handle;
391         qp->mr.lkey = resp->mr.lkey;
392         qp->mr.rkey = resp->mr.rkey;
393         qp->mr.pd = pd;
394         qp->mr.context = pd->context;
395
396         if (mthca_is_memfree(pd->context)) {
397                 mthca_set_db_qn(qp->sq.db, MTHCA_DB_TYPE_SQ, qp->ibv_qp.qp_num);
398                 mthca_set_db_qn(qp->rq.db, MTHCA_DB_TYPE_RQ, qp->ibv_qp.qp_num);
399         }
400
401         ret = mthca_store_qp(to_mctx(pd->context), qp->ibv_qp.qp_num, qp);
402         if (ret)
403                 goto err_store_qp;
404
405         UVP_EXIT(UVP_DBG_QP);
406         return &qp->ibv_qp;
407
408 err_store_qp:
409         UVP_EXIT(UVP_DBG_QP);
410         return ERR_PTR(ret);
411 }
412
413
414 int mthca_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
415                     enum ibv_qp_attr_mask attr_mask)
416 {
417         int ret = 0;
418
419         if (attr_mask & IBV_QP_STATE)
420                 qp->state = attr->qp_state;
421
422         if ((attr_mask & IBV_QP_STATE) &&
423             (attr->qp_state == IBV_QPS_RESET)) {
424                 mthca_cq_clean(to_mcq(qp->recv_cq), qp->qp_num,
425                                qp->srq ? to_msrq(qp->srq) : NULL);
426                 if (qp->send_cq != qp->recv_cq)
427                         mthca_cq_clean(to_mcq(qp->send_cq), qp->qp_num, NULL);
428
429                 mthca_init_qp_indices(to_mqp(qp));
430
431                 if (mthca_is_memfree(qp->pd->context)) {
432                         *to_mqp(qp)->sq.db = 0;
433                         *to_mqp(qp)->rq.db = 0;
434                 }
435         }
436
437         return ret;
438 }
439
440
441 void mthca_destroy_qp_pre(struct ibv_qp *qp)
442 {
443         int ret;
444
445         mthca_cq_clean(to_mcq(qp->recv_cq), qp->qp_num,
446                        qp->srq ? to_msrq(qp->srq) : NULL);
447         if (qp->send_cq != qp->recv_cq)
448                 mthca_cq_clean(to_mcq(qp->send_cq), qp->qp_num, NULL);
449
450         cl_spinlock_acquire(&to_mcq(qp->send_cq)->lock);
451         if (qp->send_cq != qp->recv_cq)
452                 cl_spinlock_acquire(&to_mcq(qp->recv_cq)->lock);
453         mthca_clear_qp(to_mctx(qp->pd->context), qp->qp_num);
454         if (qp->send_cq != qp->recv_cq)
455                 cl_spinlock_release(&to_mcq(qp->recv_cq)->lock);
456         cl_spinlock_release(&to_mcq(qp->send_cq)->lock);
457 }
458
459 void mthca_destroy_qp_post(struct ibv_qp *qp, int ret)
460 {
461         if (ret) {
462                 cl_spinlock_acquire(&to_mcq(qp->send_cq)->lock);
463                 if (qp->send_cq != qp->recv_cq)
464                         cl_spinlock_acquire(&to_mcq(qp->recv_cq)->lock);
465                 mthca_store_qp(to_mctx(qp->pd->context), qp->qp_num, to_mqp(qp));
466                 if (qp->send_cq != qp->recv_cq)
467                         cl_spinlock_release(&to_mcq(qp->recv_cq)->lock);
468                 cl_spinlock_release(&to_mcq(qp->send_cq)->lock);
469         }
470         else {
471                 if (mthca_is_memfree(qp->pd->context)) {
472                         mthca_free_db(to_mctx(qp->pd->context)->db_tab, MTHCA_DB_TYPE_RQ,
473                                       to_mqp(qp)->rq.db_index);
474                         mthca_free_db(to_mctx(qp->pd->context)->db_tab, MTHCA_DB_TYPE_SQ,
475                                       to_mqp(qp)->sq.db_index);
476                 }
477
478                 cl_spinlock_destroy(&((struct mthca_qp *)qp)->sq.lock);
479                 cl_spinlock_destroy(&((struct mthca_qp *)qp)->rq.lock);
480
481 #ifdef NOT_USE_VIRTUAL_ALLOC    
482                 cl_free(to_mqp(qp)->buf);
483 #else
484                 VirtualFree( to_mqp(qp)->buf, 0, MEM_RELEASE);
485 #endif
486                 cl_free(to_mqp(qp)->wrid);
487                 cl_free(to_mqp(qp));
488         }
489
490 }
491
492 int mthca_attach_mcast(struct ibv_qp *qp, union ibv_gid *gid, uint16_t lid)
493 {
494 #ifdef WIN_TO_BE_CHANGED
495         return ibv_cmd_attach_mcast(qp, gid, lid);
496 #else
497         return -ENOSYS;
498 #endif
499 }
500
501 int mthca_detach_mcast(struct ibv_qp *qp, union ibv_gid *gid, uint16_t lid)
502 {
503 #ifdef WIN_TO_BE_CHANGED
504         return ibv_cmd_detach_mcast(qp, gid, lid);
505 #else
506         return -ENOSYS;
507 #endif
508 }