2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Cisco Systems. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
40 #include "mthca_wqe.h"
43 #if defined(EVENT_TRACING)
44 #include "mlnx_uvp_verbs.tmh"
47 struct ibv_pd *mthca_alloc_pd(struct ibv_context *context, struct ibv_alloc_pd_resp *resp)
51 pd = cl_zalloc(sizeof *pd);
55 if (!mthca_is_memfree(context)) {
57 pd->ah_mutex = CreateMutex( NULL, FALSE, NULL );
62 /* fill response fields */
63 pd->ibv_pd.context = context;
64 pd->ibv_pd.handle = resp->pd_handle;
75 int mthca_free_pd(struct ibv_pd *ibv_pd)
77 struct mthca_pd *pd = to_mpd(ibv_pd);
78 if (!mthca_is_memfree(ibv_pd->context)) {
79 struct mthca_ah_page *page, *next_page;
80 WaitForSingleObject( pd->ah_mutex, INFINITE );
81 for (page = pd->ah_list; page; page = next_page) {
82 next_page = page->next;
83 #ifdef NOT_USE_VIRTUAL_ALLOC
86 VirtualFree( page->buf, 0, MEM_RELEASE);
90 ReleaseMutex( pd->ah_mutex );
91 CloseHandle(pd->ah_mutex);
97 /* allocate create_cq infrastructure and fill it's request parameters structure */
98 struct ibv_cq *mthca_create_cq_pre(struct ibv_context *context, int *p_cqe,
99 struct ibv_create_cq *req)
105 /* Sanity check CQ size before proceeding */
109 cq = cl_zalloc(sizeof *cq);
113 cl_spinlock_construct(&cq->lock);
114 if (cl_spinlock_init(&cq->lock))
117 for (nent = 1; nent <= *p_cqe; nent <<= 1)
120 if (posix_memalign(&cq->buf, g_page_size,
121 align(nent * MTHCA_CQ_ENTRY_SIZE, g_page_size)))
124 mthca_init_cq_buf(cq, nent);
126 if (mthca_is_memfree(context)) {
127 cq->set_ci_db_index = mthca_alloc_db(to_mctx(context)->db_tab,
128 MTHCA_DB_TYPE_CQ_SET_CI,
130 if (cq->set_ci_db_index < 0)
133 cq->arm_db_index = mthca_alloc_db(to_mctx(context)->db_tab,
134 MTHCA_DB_TYPE_CQ_ARM,
136 if (cq->arm_db_index < 0)
139 cq->u_arm_db_index = mthca_alloc_db(to_mctx(context)->db_tab,
140 MTHCA_DB_TYPE_CQ_ARM,
142 if (cq->u_arm_db_index < 0)
147 req->arm_db_page = db_align(cq->arm_db);
148 req->set_db_page = db_align(cq->set_ci_db);
149 req->u_arm_db_page = (uint64_t)(ULONG_PTR)cq->p_u_arm_sn;
150 req->arm_db_index = cq->arm_db_index;
151 req->set_db_index = cq->set_ci_db_index;
152 req->u_arm_db_index = cq->u_arm_db_index;
155 req->mr.start = (uint64_t)(ULONG_PTR)cq->buf;
156 req->mr.length = nent * MTHCA_CQ_ENTRY_SIZE;
158 req->mr.pd_handle = to_mctx(context)->pd->handle;
159 req->mr.pdn = to_mpd(to_mctx(context)->pd)->pdn;
160 req->mr.access_flags = MTHCA_ACCESS_LOCAL_WRITE;
161 req->user_handle = (uint64_t)(ULONG_PTR)cq;
165 // *p_cqe = *p_cqe; // return the same value
166 // cq->ibv_cq.cqe = nent -1;
169 *p_cqe = *p_cqe; // return the same value
174 if (mthca_is_memfree(context))
175 mthca_free_db(to_mctx(context)->db_tab, MTHCA_DB_TYPE_CQ_SET_CI,
179 if (mthca_is_memfree(context))
180 mthca_free_db(to_mctx(context)->db_tab, MTHCA_DB_TYPE_CQ_SET_CI,
181 cq->set_ci_db_index);
187 cl_spinlock_destroy(&cq->lock);
193 return ERR_PTR(-ENOMEM);
196 struct ibv_cq *mthca_create_cq_post(struct ibv_context *context,
197 struct ibv_create_cq_resp *resp)
202 cq = (struct mthca_cq *)(ULONG_PTR)resp->user_handle;
205 cq->mr.handle = resp->mr.mr_handle;
206 cq->mr.lkey = resp->mr.lkey;
207 cq->mr.rkey = resp->mr.rkey;
208 cq->mr.pd = to_mctx(context)->pd;
209 cq->mr.context = context;
210 cq->ibv_cq.cqe = resp->cqe;
211 cq->ibv_cq.handle = resp->cq_handle;
212 cq->ibv_cq.context = context;
214 if (mthca_is_memfree(context)) {
215 mthca_set_db_qn(cq->set_ci_db, MTHCA_DB_TYPE_CQ_SET_CI, cq->cqn);
216 mthca_set_db_qn(cq->arm_db, MTHCA_DB_TYPE_CQ_ARM, cq->cqn);
223 int mthca_destroy_cq(struct ibv_cq *cq)
227 if (mthca_is_memfree(cq->context)) {
228 mthca_free_db(to_mctx(cq->context)->db_tab, MTHCA_DB_TYPE_CQ_SET_CI,
229 to_mcq(cq)->u_arm_db_index);
230 mthca_free_db(to_mctx(cq->context)->db_tab, MTHCA_DB_TYPE_CQ_SET_CI,
231 to_mcq(cq)->set_ci_db_index);
232 mthca_free_db(to_mctx(cq->context)->db_tab, MTHCA_DB_TYPE_CQ_ARM,
233 to_mcq(cq)->arm_db_index);
236 #ifdef NOT_USE_VIRTUAL_ALLOC
237 cl_free(to_mcq(cq)->buf);
239 VirtualFree( to_mcq(cq)->buf, 0, MEM_RELEASE);
243 cl_spinlock_destroy(&((struct mthca_cq *)cq)->lock);
249 int align_queue_size(struct ibv_context *context, int size, int spare)
254 * If someone asks for a 0-sized queue, presumably they're not
255 * going to use it. So don't mess with their size.
260 if (mthca_is_memfree(context)) {
261 for (ret = 1; ret < size + spare; ret <<= 1)
269 struct ibv_qp *mthca_create_qp_pre(struct ibv_pd *pd,
270 struct ibv_qp_init_attr *attr, struct ibv_create_qp *req)
273 struct ibv_context *context = pd->context;
276 UVP_ENTER(UVP_DBG_QP);
277 /* Sanity check QP size before proceeding */
278 if (attr->cap.max_send_wr > 65536 ||
279 attr->cap.max_recv_wr > 65536 ||
280 attr->cap.max_send_sge > 64 ||
281 attr->cap.max_recv_sge > 64 ||
282 attr->cap.max_inline_data > 1024) {
284 UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP ,("sanity checks failed (%d)\n",ret));
288 qp = cl_zalloc(sizeof *qp);
290 UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP ,("cl_malloc failed (%d)\n",ret));
294 qp->sq.max = align_queue_size(context, attr->cap.max_send_wr, 0);
295 qp->rq.max = align_queue_size(context, attr->cap.max_recv_wr, 0);
297 if (mthca_alloc_qp_buf(pd, &attr->cap, attr->qp_type, qp)) {
298 UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP ,("mthca_alloc_qp_buf failed (%d)\n",ret));
302 mthca_init_qp_indices(qp);
304 cl_spinlock_construct(&qp->sq.lock);
305 if (cl_spinlock_init(&qp->sq.lock)) {
307 UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP ,("cl_spinlock_init failed for sq (%d)\n",ret));
308 goto err_spinlock_sq;
311 cl_spinlock_construct(&qp->rq.lock);
312 if (cl_spinlock_init(&qp->rq.lock)) {
314 UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP ,("cl_spinlock_init failed for rq (%d)\n",ret));
315 goto err_spinlock_rq;
318 if (mthca_is_memfree(context)) {
319 qp->sq.db_index = mthca_alloc_db(to_mctx(context)->db_tab,
322 if (qp->sq.db_index < 0)
325 qp->rq.db_index = mthca_alloc_db(to_mctx(context)->db_tab,
328 if (qp->rq.db_index < 0)
331 req->sq_db_page = db_align(qp->sq.db);
332 req->rq_db_page = db_align(qp->rq.db);
333 req->sq_db_index = qp->sq.db_index;
334 req->rq_db_index = qp->rq.db_index;
337 // fill the rest qp fields
339 qp->ibv_qp.send_cq = attr->send_cq;
340 qp->ibv_qp.recv_cq = attr->recv_cq;
341 qp->ibv_qp.srq = attr->srq;
342 qp->ibv_qp.state = IBV_QPS_RESET;
343 qp->ibv_qp.qp_type = attr->qp_type;
345 // fill the rest request fields
346 req->mr.start = (uint64_t)(ULONG_PTR)qp->buf;
347 req->mr.length = qp->buf_size;
349 req->mr.pd_handle = pd->handle;
350 req->mr.pdn = to_mpd(pd)->pdn;
351 req->mr.access_flags = 0; //local read
352 req->user_handle = (uint64_t)(ULONG_PTR)qp;
353 req->send_cq_handle = attr->send_cq->handle;
354 req->recv_cq_handle = attr->recv_cq->handle;
355 req->srq_handle = (attr->srq) ? attr->srq->handle : 0;
356 req->max_send_wr = attr->cap.max_send_wr;
357 req->max_recv_wr = attr->cap.max_recv_wr;
358 req->max_send_sge = attr->cap.max_send_sge;
359 req->max_recv_sge = attr->cap.max_recv_sge;
360 req->max_inline_data = attr->cap.max_inline_data;
361 req->sq_sig_all = (uint8_t)attr->sq_sig_all;
362 req->qp_type = attr->qp_type;
363 req->is_srq = !!attr->srq;
366 UVP_EXIT(UVP_DBG_QP);
370 if (mthca_is_memfree(context))
371 mthca_free_db(to_mctx(context)->db_tab,
372 MTHCA_DB_TYPE_SQ, qp->sq.db_index);
375 cl_spinlock_destroy(&qp->rq.lock);
378 cl_spinlock_destroy(&qp->sq.lock);
382 #ifdef NOT_USE_VIRTUAL_ALLOC
385 VirtualFree( qp->buf, 0, MEM_RELEASE);
393 UVP_EXIT(UVP_DBG_QP);
397 struct ibv_qp *mthca_create_qp_post(struct ibv_pd *pd,
398 struct ibv_create_qp_resp *resp)
402 UVP_ENTER(UVP_DBG_QP);
403 qp = (struct mthca_qp *)(ULONG_PTR)resp->user_handle;
405 qp->ibv_qp.handle = resp->qp_handle;
406 qp->ibv_qp.qp_num = resp->qpn;
407 qp->sq.max = resp->max_send_wr;
408 qp->rq.max = resp->max_recv_wr;
409 qp->sq.max_gs = resp->max_send_sge;
410 qp->rq.max_gs = resp->max_recv_sge;
411 qp->max_inline_data = resp->max_inline_data;
412 qp->mr.handle = resp->mr.mr_handle;
413 qp->mr.lkey = resp->mr.lkey;
414 qp->mr.rkey = resp->mr.rkey;
416 qp->mr.context = pd->context;
418 if (mthca_is_memfree(pd->context)) {
419 mthca_set_db_qn(qp->sq.db, MTHCA_DB_TYPE_SQ, qp->ibv_qp.qp_num);
420 mthca_set_db_qn(qp->rq.db, MTHCA_DB_TYPE_RQ, qp->ibv_qp.qp_num);
423 ret = mthca_store_qp(to_mctx(pd->context), qp->ibv_qp.qp_num, qp);
427 UVP_EXIT(UVP_DBG_QP);
431 UVP_EXIT(UVP_DBG_QP);
436 int mthca_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
437 enum ibv_qp_attr_mask attr_mask)
441 if (attr_mask & IBV_QP_STATE)
442 qp->state = attr->qp_state;
444 if ((attr_mask & IBV_QP_STATE) &&
445 (attr->qp_state == IBV_QPS_RESET)) {
446 mthca_cq_clean(to_mcq(qp->recv_cq), qp->qp_num,
447 qp->srq ? to_msrq(qp->srq) : NULL);
448 if (qp->send_cq != qp->recv_cq)
449 mthca_cq_clean(to_mcq(qp->send_cq), qp->qp_num, NULL);
451 mthca_init_qp_indices(to_mqp(qp));
453 if (mthca_is_memfree(qp->pd->context)) {
454 *to_mqp(qp)->sq.db = 0;
455 *to_mqp(qp)->rq.db = 0;
463 void mthca_destroy_qp_pre(struct ibv_qp *qp)
467 mthca_cq_clean(to_mcq(qp->recv_cq), qp->qp_num,
468 qp->srq ? to_msrq(qp->srq) : NULL);
469 if (qp->send_cq != qp->recv_cq)
470 mthca_cq_clean(to_mcq(qp->send_cq), qp->qp_num, NULL);
472 cl_spinlock_acquire(&to_mcq(qp->send_cq)->lock);
473 if (qp->send_cq != qp->recv_cq)
474 cl_spinlock_acquire(&to_mcq(qp->recv_cq)->lock);
475 mthca_clear_qp(to_mctx(qp->pd->context), qp->qp_num);
476 if (qp->send_cq != qp->recv_cq)
477 cl_spinlock_release(&to_mcq(qp->recv_cq)->lock);
478 cl_spinlock_release(&to_mcq(qp->send_cq)->lock);
481 void mthca_destroy_qp_post(struct ibv_qp *qp, int ret)
484 cl_spinlock_acquire(&to_mcq(qp->send_cq)->lock);
485 if (qp->send_cq != qp->recv_cq)
486 cl_spinlock_acquire(&to_mcq(qp->recv_cq)->lock);
487 mthca_store_qp(to_mctx(qp->pd->context), qp->qp_num, to_mqp(qp));
488 if (qp->send_cq != qp->recv_cq)
489 cl_spinlock_release(&to_mcq(qp->recv_cq)->lock);
490 cl_spinlock_release(&to_mcq(qp->send_cq)->lock);
493 if (mthca_is_memfree(qp->pd->context)) {
494 mthca_free_db(to_mctx(qp->pd->context)->db_tab, MTHCA_DB_TYPE_RQ,
495 to_mqp(qp)->rq.db_index);
496 mthca_free_db(to_mctx(qp->pd->context)->db_tab, MTHCA_DB_TYPE_SQ,
497 to_mqp(qp)->sq.db_index);
500 cl_spinlock_destroy(&((struct mthca_qp *)qp)->sq.lock);
501 cl_spinlock_destroy(&((struct mthca_qp *)qp)->rq.lock);
503 #ifdef NOT_USE_VIRTUAL_ALLOC
504 cl_free(to_mqp(qp)->buf);
506 VirtualFree( to_mqp(qp)->buf, 0, MEM_RELEASE);
508 cl_free(to_mqp(qp)->wrid);
514 int mthca_attach_mcast(struct ibv_qp *qp, union ibv_gid *gid, uint16_t lid)
516 #ifdef WIN_TO_BE_CHANGED
517 return ibv_cmd_attach_mcast(qp, gid, lid);
523 int mthca_detach_mcast(struct ibv_qp *qp, union ibv_gid *gid, uint16_t lid)
525 #ifdef WIN_TO_BE_CHANGED
526 return ibv_cmd_detach_mcast(qp, gid, lid);