2 * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.
\r
3 * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved.
\r
5 * This software is available to you under the OpenIB.org BSD license
\r
8 * Redistribution and use in source and binary forms, with or
\r
9 * without modification, are permitted provided that the following
\r
10 * conditions are met:
\r
12 * - Redistributions of source code must retain the above
\r
13 * copyright notice, this list of conditions and the following
\r
16 * - Redistributions in binary form must reproduce the above
\r
17 * copyright notice, this list of conditions and the following
\r
18 * disclaimer in the documentation and/or other materials
\r
19 * provided with the distribution.
\r
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
22 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
23 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
\r
24 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
\r
25 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
\r
26 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
\r
27 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
\r
34 #include "hca_data.h"
\r
35 #include "hca_debug.h"
\r
37 static cl_spinlock_t hob_lock;
\r
40 u_int32_t g_mlnx_dbg_lvl = CL_DBG_ERROR ;
\r
42 u_int32_t g_mlnx_dbg_lvl = CL_DBG_ERROR |
\r
47 // MLNX_DBG_DIRECT |
\r
51 u_int32_t g_mlnx_dpc2thread = 0;
\r
53 #ifdef MODULE_LICENSE
\r
54 MODULE_LICENSE("Proprietary");
\r
57 MODULE_PARM(g_mlnx_dbg_lvl, "i");
\r
58 MODULE_PARM(g_mlnx_dpc2thread, "i");
\r
60 cl_qlist_t mlnx_hca_list;
\r
61 //mlnx_hca_t mlnx_hca_array[MLNX_MAX_HCA];
\r
62 //uint32_t mlnx_num_hca = 0;
\r
64 mlnx_hob_t mlnx_hob_array[MLNX_NUM_HOBKL]; // kernel HOB - one per HCA (cmdif access)
\r
66 mlnx_hobul_t *mlnx_hobul_array[MLNX_NUM_HOBUL]; // kernel HOBUL - one per HCA (kar access)
\r
68 /* User verb library name */
\r
69 /* TODO: Move to linux osd file.
\r
70 char mlnx_uvp_lib_name[MAX_LIB_NAME] = {"libmlnx_uvp.so"};
\r
75 IN cl_async_proc_item_t *async_item_p );
\r
82 IN void *pfn_comp_cb,
\r
87 IN cl_async_proc_item_t *async_item_p );
\r
90 // ### Callback Interface
\r
93 IN HH_hca_hndl_t hh_hndl,
\r
94 IN HH_cq_hndl_t hh_cq,
\r
95 IN void *private_data);
\r
99 IN HH_hca_hndl_t hh_hndl,
\r
100 IN HH_event_record_t *hh_er_p,
\r
101 IN void *private_data);
\r
103 /////////////////////////////////////////////////////////
\r
105 /////////////////////////////////////////////////////////
\r
108 IN mlnx_hca_t *p_hca )
\r
110 cl_spinlock_acquire( &hob_lock );
\r
111 cl_qlist_insert_tail( &mlnx_hca_list, &p_hca->list_item );
\r
112 cl_spinlock_release( &hob_lock );
\r
117 IN mlnx_hca_t *p_hca )
\r
119 cl_spinlock_acquire( &hob_lock );
\r
120 cl_qlist_remove_item( &mlnx_hca_list, &p_hca->list_item );
\r
121 cl_spinlock_release( &hob_lock );
\r
125 mlnx_hca_from_guid(
\r
126 IN ib_net64_t guid )
\r
128 cl_list_item_t *p_item;
\r
129 mlnx_hca_t *p_hca = NULL;
\r
131 cl_spinlock_acquire( &hob_lock );
\r
132 p_item = cl_qlist_head( &mlnx_hca_list );
\r
133 while( p_item != cl_qlist_end( &mlnx_hca_list ) )
\r
135 p_hca = PARENT_STRUCT( p_item, mlnx_hca_t, list_item );
\r
136 if( p_hca->guid == guid )
\r
138 p_item = cl_qlist_next( p_item );
\r
141 cl_spinlock_release( &hob_lock );
\r
147 mlnx_names_from_guid(
\r
148 IN ib_net64_t guid,
\r
149 OUT char **hca_name_p,
\r
150 OUT char **dev_name_p)
\r
154 if (!hca_name_p) return;
\r
155 if (!dev_name_p) return;
\r
157 for (idx = 0; idx < mlnx_num_hca; idx++)
\r
159 if (mlnx_hca_array[idx].ifx.guid == guid)
\r
161 *hca_name_p = mlnx_hca_array[idx].hca_name_p;
\r
162 *dev_name_p = mlnx_hca_array[idx].dev_name_p;
\r
168 /////////////////////////////////////////////////////////
\r
170 /////////////////////////////////////////////////////////
\r
172 mlnx_hobs_init( void )
\r
176 cl_qlist_init( &mlnx_hca_list );
\r
178 for (idx = 0; idx < MLNX_NUM_HOBKL; idx++)
\r
180 mlnx_hob_array[idx].hh_hndl = NULL;
\r
181 mlnx_hob_array[idx].comp_cb_p = NULL;
\r
182 mlnx_hob_array[idx].async_cb_p = NULL;
\r
183 mlnx_hob_array[idx].ca_context = NULL;
\r
184 mlnx_hob_array[idx].async_proc_mgr_p = NULL;
\r
185 mlnx_hob_array[idx].cl_device_h = NULL;
\r
186 // mlnx_hob_array[idx].port_lmc_p = NULL;
\r
187 mlnx_hob_array[idx].index = idx;
\r
188 mlnx_hob_array[idx].mark = E_MARK_INVALID;
\r
190 return cl_spinlock_init( &hob_lock );
\r
193 /////////////////////////////////////////////////////////
\r
194 /////////////////////////////////////////////////////////
\r
197 IN mlnx_hca_t *p_hca,
\r
198 OUT mlnx_hob_t **hob_pp)
\r
201 ib_api_status_t status = IB_ERROR;
\r
202 mlnx_cache_t *p_cache;
\r
204 p_cache = (mlnx_cache_t*)cl_pzalloc( sizeof(mlnx_cache_t) * 2 );
\r
206 return IB_INSUFFICIENT_MEMORY;
\r
208 cl_spinlock_acquire(&hob_lock);
\r
209 for (idx = 0; idx < MLNX_NUM_HOBKL; idx++)
\r
211 if (!mlnx_hob_array[idx].hh_hndl)
\r
213 mlnx_hob_array[idx].hh_hndl = p_hca->hh_hndl;
\r
214 mlnx_hob_array[idx].mark = E_MARK_CA;
\r
215 if (hob_pp) *hob_pp = &mlnx_hob_array[idx];
\r
216 status = IB_SUCCESS;
\r
220 cl_spinlock_release(&hob_lock);
\r
222 if (IB_SUCCESS == status)
\r
223 (*hob_pp)->cache = p_cache;
\r
225 cl_free( p_cache );
\r
230 /////////////////////////////////////////////////////////
\r
231 /////////////////////////////////////////////////////////
\r
234 IN mlnx_hob_t *hob_p,
\r
235 IN ci_completion_cb_t comp_cb_p,
\r
236 IN ci_async_event_cb_t async_cb_p,
\r
237 IN const void* const ib_context)
\r
239 cl_status_t cl_status;
\r
242 CL_ASSERT((hob_p - mlnx_hob_array) < MLNX_NUM_HOBKL);
\r
244 // Setup the callbacks
\r
245 if (!hob_p->async_proc_mgr_p)
\r
247 hob_p->async_proc_mgr_p = cl_malloc( sizeof( cl_async_proc_t ) );
\r
248 if( !hob_p->async_proc_mgr_p )
\r
250 return IB_INSUFFICIENT_MEMORY;
\r
252 cl_async_proc_construct( hob_p->async_proc_mgr_p );
\r
253 cl_status = cl_async_proc_init( hob_p->async_proc_mgr_p, MLNX_NUM_CB_THR, "CBthread" );
\r
254 if( cl_status != CL_SUCCESS )
\r
256 cl_async_proc_destroy( hob_p->async_proc_mgr_p );
\r
257 cl_free(hob_p->async_proc_mgr_p);
\r
258 hob_p->async_proc_mgr_p = NULL;
\r
259 return IB_INSUFFICIENT_RESOURCES;
\r
263 if (hob_p->hh_hndl)
\r
265 THH_hob_set_async_eventh(hob_p->hh_hndl,
\r
267 &hob_p->index); // This is the context our CB wants to receive
\r
268 THH_hob_set_comp_eventh( hob_p->hh_hndl,
\r
270 &hob_p->index); // This is the context our CB wants to receive
\r
271 hob_p->comp_cb_p = comp_cb_p;
\r
272 hob_p->async_cb_p = async_cb_p;
\r
273 hob_p->ca_context = ib_context; // This is the context our CB forwards to IBAL
\r
274 CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("CL: hca_idx %d context 0x%p\n", hob_p - mlnx_hob_array, ib_context));
\r
280 /////////////////////////////////////////////////////////
\r
281 /////////////////////////////////////////////////////////
\r
283 mlnx_hobs_get_context(
\r
284 IN mlnx_hob_t *hob_p,
\r
285 OUT void **context_p)
\r
288 CL_ASSERT((hob_p - mlnx_hob_array) < MLNX_NUM_HOBKL);
\r
290 if (hob_p->hh_hndl)
\r
292 if (context_p) *context_p = &hob_p->index;
\r
298 /////////////////////////////////////////////////////////
\r
299 /////////////////////////////////////////////////////////
\r
302 IN mlnx_hob_t *hob_p)
\r
304 cl_async_proc_t *p_async_proc;
\r
305 mlnx_cache_t *p_cache;
\r
308 CL_ASSERT((hob_p - mlnx_hob_array) < MLNX_NUM_HOBKL);
\r
310 cl_spinlock_acquire( &hob_lock );
\r
312 hob_p->mark = E_MARK_INVALID;
\r
314 p_async_proc = hob_p->async_proc_mgr_p;
\r
315 hob_p->async_proc_mgr_p = NULL;
\r
317 p_cache = hob_p->cache;
\r
318 hob_p->cache = NULL;
\r
320 hob_p->hh_hndl = NULL;
\r
321 hob_p->comp_cb_p = NULL;
\r
322 hob_p->async_cb_p = NULL;
\r
323 hob_p->ca_context = NULL;
\r
324 hob_p->cl_device_h = NULL;
\r
326 cl_spinlock_release( &hob_lock );
\r
330 cl_async_proc_destroy( p_async_proc );
\r
331 cl_free( p_async_proc );
\r
335 cl_free( p_cache );
\r
337 CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("CL: hobs_remove idx %d hh_hndl 0x%p\n", hob_p - mlnx_hob_array, hob_p->hh_hndl));
\r
340 /////////////////////////////////////////////////////////
\r
341 /////////////////////////////////////////////////////////
\r
344 IN HH_hca_hndl_t hndl,
\r
345 OUT mlnx_hob_t **hca_p)
\r
352 cl_spinlock_acquire( &hob_lock );
\r
353 for (idx = 0; idx < MLNX_NUM_HOBKL; idx++)
\r
355 if (hndl == mlnx_hob_array[idx].hh_hndl)
\r
357 *hca_p = &mlnx_hob_array[idx];
\r
358 cl_spinlock_release( &hob_lock );
\r
362 cl_spinlock_release( &hob_lock );
\r
366 /////////////////////////////////////////////////////////
\r
367 /////////////////////////////////////////////////////////
\r
369 mlnx_hobs_get_handle(
\r
370 IN mlnx_hob_t *hob_p,
\r
371 OUT HH_hca_hndl_t *hndl_p)
\r
374 CL_ASSERT((hob_p - mlnx_hob_array) < MLNX_NUM_HOBKL);
\r
377 *hndl_p = hob_p->hh_hndl;
\r
380 /////////////////////////////////////////////////////////
\r
381 /////////////////////////////////////////////////////////
\r
383 mlnx_hobs_get_hobul(
\r
384 IN mlnx_hob_t *hob_p)
\r
387 if ((hob_p - mlnx_hob_array) >= MLNX_NUM_HOBKL)
\r
390 return mlnx_hobul_array[hob_p->index];
\r
394 static int priv_ceil_log2(u_int32_t n)
\r
398 for (shift = 31; shift >0; shift--)
\r
399 if (n & (1 << shift)) break;
\r
401 if (((unsigned)1 << shift) < n) shift++;
\r
406 /////////////////////////////////////////////////////////
\r
408 /////////////////////////////////////////////////////////
\r
411 IN mlnx_hob_t *hob_p,
\r
412 IN HH_hca_hndl_t hh_hndl,
\r
413 IN void *resources_p)
\r
415 mlnx_hobul_t *hobul_p;
\r
416 HH_hca_dev_t *hca_ul_info;
\r
417 ib_api_status_t status;
\r
418 VAPI_hca_cap_t hca_caps;
\r
420 #if MLNX_COMP_MODEL == 1
\r
421 static uint32_t proc_num = 0;
\r
425 CL_ASSERT((hob_p - mlnx_hob_array) < MLNX_NUM_HOBKL);
\r
427 if (NULL == (hobul_p = cl_zalloc( sizeof(mlnx_hobul_t))))
\r
428 return IB_INSUFFICIENT_MEMORY;
\r
430 // The following will NULL all pointers/sizes (used in cleanup)
\r
431 // cl_memclr(hobul_p, sizeof (mlnx_hobul_t));
\r
433 hobul_p->hh_hndl = hh_hndl;
\r
435 if (HH_OK != THHUL_hob_create(resources_p, hh_hndl->dev_id, &hobul_p->hhul_hndl))
\r
437 status = IB_INSUFFICIENT_RESOURCES;
\r
441 hca_ul_info = (HH_hca_dev_t *)hh_hndl;
\r
445 hobul_p->vendor_id = hca_ul_info->vendor_id;
\r
446 hobul_p->device_id = hca_ul_info->dev_id;
\r
447 hobul_p->hca_ul_resources_p = resources_p;
\r
448 hobul_p->cq_ul_resources_sz = hca_ul_info->cq_ul_resources_sz;
\r
449 hobul_p->qp_ul_resources_sz = hca_ul_info->qp_ul_resources_sz;
\r
450 hobul_p->pd_ul_resources_sz = hca_ul_info->pd_ul_resources_sz;
\r
453 if (HH_OK != THH_hob_query(hh_hndl, &hca_caps))
\r
459 hobul_p->cq_idx_mask = MASK32(priv_ceil_log2(hca_caps.max_num_cq));
\r
460 hobul_p->qp_idx_mask = MASK32(priv_ceil_log2(hca_caps.max_num_qp)); // Currently mask = 0xFFFF
\r
461 hobul_p->max_pd = MASK32(priv_ceil_log2(hca_caps.max_pd_num)) + 1;
\r
462 hobul_p->max_cq = hobul_p->cq_idx_mask + 1;
\r
463 hobul_p->max_qp = hobul_p->qp_idx_mask + 1;
\r
465 CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("CL: sizes cq 0%x qp 0%x pd 0%x\n", hca_caps.max_num_cq, hca_caps.max_num_qp, hca_caps.max_pd_num));
\r
467 /* create and initialize the data stucture for CQs */
\r
468 hobul_p->cq_info_tbl = cl_zalloc(hobul_p->max_cq * sizeof (cq_info_t));
\r
470 /* create and initialize the data stucture for QPs */
\r
471 hobul_p->qp_info_tbl = cl_zalloc(hobul_p->max_qp * sizeof (qp_info_t));
\r
473 /* create and initialize the data stucture for PDs */
\r
474 hobul_p->pd_info_tbl = cl_zalloc(hobul_p->max_pd * sizeof (pd_info_t));
\r
476 CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("CL: alloc failed? cq=%d qp=%d pd=%d\n",
\r
477 !hobul_p->cq_info_tbl, !hobul_p->qp_info_tbl, !hobul_p->pd_info_tbl));
\r
479 if (!hobul_p->pd_info_tbl ||
\r
480 !hobul_p->qp_info_tbl ||
\r
481 !hobul_p->cq_info_tbl)
\r
483 status = IB_INSUFFICIENT_MEMORY;
\r
487 /* Initialize all mutexes. */
\r
488 for( i = 0; i < hobul_p->max_cq; i++ )
\r
490 cl_mutex_construct( &hobul_p->cq_info_tbl[i].mutex );
\r
491 #if MLNX_COMP_MODEL
\r
492 KeInitializeDpc( &hobul_p->cq_info_tbl[i].dpc,
\r
493 mlnx_comp_dpc, &hobul_p->cq_info_tbl[i] );
\r
494 #if MLNX_COMP_MODEL == 1
\r
495 KeSetTargetProcessorDpc( &hobul_p->cq_info_tbl[i].dpc,
\r
496 (CCHAR)(proc_num++ % cl_proc_count()) );
\r
497 #endif /* MLNX_COMP_MODEL == 1 */
\r
498 #endif /* MLNX_COMP_MODEL */
\r
501 for( i = 0; i < hobul_p->max_qp; i++ )
\r
502 cl_mutex_construct( &hobul_p->qp_info_tbl[i].mutex );
\r
504 for( i = 0; i < hobul_p->max_pd; i++ )
\r
505 cl_mutex_construct( &hobul_p->pd_info_tbl[i].mutex );
\r
507 for( i = 0; i < hobul_p->max_cq; i++ )
\r
509 if( cl_mutex_init( &hobul_p->cq_info_tbl[i].mutex ) != CL_SUCCESS )
\r
516 for( i = 0; i < hobul_p->max_qp; i++ )
\r
518 if( cl_mutex_init( &hobul_p->qp_info_tbl[i].mutex ) != CL_SUCCESS )
\r
525 for( i = 0; i < hobul_p->max_pd; i++ )
\r
527 if( cl_mutex_init( &hobul_p->pd_info_tbl[i].mutex ) != CL_SUCCESS )
\r
534 hobul_p->log2_mpt_size = ((THH_hca_ul_resources_t *)resources_p)->log2_mpt_size;
\r
535 CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("log2_mpt_size = %d\n", hobul_p->log2_mpt_size));
\r
537 cl_spinlock_acquire(&hob_lock);
\r
538 mlnx_hobul_array[hob_p->index] = hobul_p;
\r
539 cl_spinlock_release(&hob_lock);
\r
544 if (hobul_p->hhul_hndl) THHUL_hob_destroy( hobul_p->hhul_hndl );
\r
545 if (hobul_p->pd_info_tbl)
\r
547 for( i = 0; i < hobul_p->max_pd; i++ )
\r
548 cl_mutex_destroy( &hobul_p->pd_info_tbl[i].mutex );
\r
549 cl_free(hobul_p->pd_info_tbl);
\r
551 if (hobul_p->qp_info_tbl)
\r
553 for( i = 0; i < hobul_p->max_qp; i++ )
\r
554 cl_mutex_destroy( &hobul_p->qp_info_tbl[i].mutex );
\r
555 cl_free(hobul_p->qp_info_tbl);
\r
557 if (hobul_p->cq_info_tbl)
\r
559 for( i = 0; i < hobul_p->max_cq; i++ )
\r
560 cl_mutex_destroy( &hobul_p->cq_info_tbl[i].mutex );
\r
561 cl_free(hobul_p->cq_info_tbl);
\r
563 if (hobul_p) cl_free( hobul_p);
\r
567 /////////////////////////////////////////////////////////
\r
568 /////////////////////////////////////////////////////////
\r
571 IN mlnx_hob_t *hob_p,
\r
572 OUT void **resources_p )
\r
574 mlnx_hobul_t *hobul_p;
\r
577 CL_ASSERT((hob_p - mlnx_hob_array) < MLNX_NUM_HOBKL);
\r
579 hobul_p = mlnx_hobul_array[hob_p->index];
\r
581 if (hobul_p && resources_p)
\r
583 *resources_p = hobul_p->hca_ul_resources_p;
\r
587 /////////////////////////////////////////////////////////
\r
588 /////////////////////////////////////////////////////////
\r
591 IN mlnx_hob_t *hob_p)
\r
593 mlnx_hobul_t *hobul_p;
\r
597 CL_ASSERT((hob_p - mlnx_hob_array) < MLNX_NUM_HOBKL);
\r
599 cl_spinlock_acquire(&hob_lock);
\r
600 hobul_p = mlnx_hobul_array[hob_p->index];
\r
601 mlnx_hobul_array[hob_p->index] = NULL;
\r
602 cl_spinlock_release(&hob_lock);
\r
604 if (!hobul_p) return;
\r
606 if (hobul_p->hhul_hndl) THHUL_hob_destroy( hobul_p->hhul_hndl );
\r
607 if (hobul_p->pd_info_tbl)
\r
609 for( i = 0; i < hobul_p->max_pd; i++ )
\r
610 cl_mutex_destroy( &hobul_p->pd_info_tbl[i].mutex );
\r
611 cl_free(hobul_p->pd_info_tbl);
\r
613 if (hobul_p->qp_info_tbl)
\r
615 for( i = 0; i < hobul_p->max_qp; i++ )
\r
616 cl_mutex_destroy( &hobul_p->qp_info_tbl[i].mutex );
\r
617 cl_free(hobul_p->qp_info_tbl);
\r
619 if (hobul_p->cq_info_tbl)
\r
621 for( i = 0; i < hobul_p->max_cq; i++ )
\r
623 KeRemoveQueueDpc( &hobul_p->cq_info_tbl[i].dpc );
\r
624 cl_mutex_destroy( &hobul_p->cq_info_tbl[i].mutex );
\r
626 cl_free(hobul_p->cq_info_tbl);
\r
628 if (hobul_p) cl_free( hobul_p);
\r
631 /////////////////////////////////////////////////////////
\r
633 /////////////////////////////////////////////////////////
\r
636 mlnx_map_vapi_event_type(
\r
637 IN unsigned event_id,
\r
638 OUT ENUM_EVENT_CLASS *event_class_p)
\r
642 case VAPI_QP_PATH_MIGRATED:
\r
643 if (event_class_p) *event_class_p = E_EV_QP;
\r
644 return IB_AE_QP_APM;
\r
646 case VAPI_QP_COMM_ESTABLISHED:
\r
647 if (event_class_p) *event_class_p = E_EV_QP;
\r
648 return IB_AE_QP_COMM;
\r
650 case VAPI_SEND_QUEUE_DRAINED:
\r
651 if (event_class_p) *event_class_p = E_EV_QP;
\r
652 return IB_AE_SQ_DRAINED;
\r
654 case VAPI_CQ_ERROR:
\r
655 if (event_class_p) *event_class_p = E_EV_CQ;
\r
656 return IB_AE_CQ_ERROR;
\r
658 case VAPI_LOCAL_WQ_INV_REQUEST_ERROR:
\r
659 if (event_class_p) *event_class_p = E_EV_QP;
\r
660 return IB_AE_WQ_REQ_ERROR;
\r
662 case VAPI_LOCAL_WQ_ACCESS_VIOL_ERROR:
\r
663 if (event_class_p) *event_class_p = E_EV_QP;
\r
664 return IB_AE_WQ_ACCESS_ERROR;
\r
666 case VAPI_LOCAL_WQ_CATASTROPHIC_ERROR:
\r
667 if (event_class_p) *event_class_p = E_EV_QP;
\r
668 return IB_AE_QP_FATAL;
\r
670 case VAPI_PATH_MIG_REQ_ERROR:
\r
671 if (event_class_p) *event_class_p = E_EV_QP;
\r
672 return IB_AE_QP_APM_ERROR;
\r
674 case VAPI_LOCAL_CATASTROPHIC_ERROR:
\r
675 if (event_class_p) *event_class_p = E_EV_CA;
\r
676 return IB_AE_LOCAL_FATAL;
\r
678 case VAPI_PORT_ERROR:
\r
680 * In tavor_hca\src\Hca\hcahal\tavor\eventp\event_irqh.c:
\r
681 * TAVOR_IF_EV_TYPE_PORT_ERR maps one of two port events:
\r
682 * - TAVOR_IF_SUB_EV_PORT_DOWN
\r
683 * - TAVOR_IF_SUB_EV_PORT_UP
\r
685 * These map to (respectively)
\r
686 * - VAPI_PORT_ERROR
\r
687 * - VAPI_PORT_ACTIVE
\r
689 if (event_class_p) *event_class_p = E_EV_CA;
\r
690 return IB_AE_PORT_DOWN; /* INIT, ARMED, DOWN */
\r
692 case VAPI_PORT_ACTIVE:
\r
693 if (event_class_p) *event_class_p = E_EV_CA;
\r
694 return IB_AE_PORT_ACTIVE; /* ACTIVE STATE */
\r
697 CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("FAIL to map %d (last known %d) returning %d\n",
\r
698 event_id, VAPI_PORT_ACTIVE, IB_AE_LOCAL_FATAL));
\r
699 if (event_class_p) *event_class_p = E_EV_CA;
\r
700 return IB_AE_LOCAL_FATAL;
\r
705 mlnx_conv_vapi_event(
\r
706 IN HH_event_record_t *hh_event_p,
\r
707 IN ib_event_rec_t *ib_event_p,
\r
708 OUT ENUM_EVENT_CLASS *event_class_p)
\r
711 // ib_event_p->context is handled by the caller
\r
713 ib_event_p->type = mlnx_map_vapi_event_type(hh_event_p->etype, event_class_p);
\r
715 // no traps currently generated
\r
716 // ib_event_p->trap_info.lid = ;
\r
717 // ib_event_p->trap_info.port_guid = ;
\r
718 // ib_event_p->trap_info.port_num = hh_er;
\r
723 IN HH_hca_hndl_t hh_hndl,
\r
724 IN HH_event_record_t *hh_er_p,
\r
725 IN void *private_data)
\r
730 mlnx_cb_data_t cb_data;
\r
731 mlnx_cb_data_t *cb_data_p;
\r
733 CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("ASYNC CB %p (0x%x)\n",
\r
734 private_data, (private_data) ? *(u_int32_t *)private_data : 0xB5));
\r
736 if (!private_data || !hh_er_p) return;
\r
738 obj_idx = *(u_int32_t *)private_data;
\r
739 if (obj_idx >= MLNX_NUM_HOBKL) return;
\r
741 hob_p = mlnx_hob_array + obj_idx;
\r
743 // g_mlnx_dpc2thread will be initialized as a module paramter (default - disabled(0))
\r
744 if (g_mlnx_dpc2thread)
\r
746 cb_data_p = cl_malloc(sizeof(mlnx_cb_data_t));
\r
747 if (!cb_data_p) return;
\r
749 cb_data_p->hh_hndl = hh_hndl;
\r
750 cb_data_p->private_data = private_data;
\r
751 cl_memcpy(&cb_data_p->hh_er, hh_er_p, sizeof(HH_event_record_t));
\r
752 cb_data_p->async_item.pfn_callback = mlnx_async_dpc;
\r
753 cl_async_proc_queue(hob_p->async_proc_mgr_p, &cb_data_p->async_item );
\r
756 cb_data_p = &cb_data;
\r
758 cb_data_p->hh_hndl = hh_hndl;
\r
759 cb_data_p->private_data = private_data;
\r
760 cl_memcpy(&cb_data_p->hh_er, hh_er_p, sizeof(HH_event_record_t));
\r
761 mlnx_async_dpc( &cb_data_p->async_item );
\r
767 IN cl_async_proc_item_t *async_item_p )
\r
769 HH_event_record_t *hh_er_p;
\r
772 mlnx_hobul_t *hobul_p;
\r
773 mlnx_cb_data_t *cb_data_p;
\r
775 ENUM_EVENT_CLASS event_class;
\r
776 ib_event_rec_t event_r;
\r
778 CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("ASYNC DPC %p\n", async_item_p));
\r
780 cb_data_p = PARENT_STRUCT( async_item_p, mlnx_cb_data_t, async_item );
\r
782 if (!cb_data_p) return;
\r
784 hh_er_p = &cb_data_p->hh_er;
\r
785 obj_idx = *(u_int32_t *)cb_data_p->private_data;
\r
786 hob_p = mlnx_hob_array + obj_idx;
\r
787 hobul_p = mlnx_hobul_array[obj_idx];
\r
789 CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("ASYNC DPC type %d ca_context %p\n",
\r
790 hh_er_p->etype, hob_p->ca_context));
\r
795 !hob_p->async_cb_p)
\r
800 cl_memclr(&event_r, sizeof(event_r));
\r
801 mlnx_conv_vapi_event(hh_er_p, &event_r, &event_class);
\r
803 switch(event_class)
\r
806 event_r.context = (void *)hob_p->ca_context;
\r
811 obj_idx = hh_er_p->event_modifier.qpn;
\r
812 if (obj_idx < hobul_p->max_qp)
\r
813 event_r.context = (void *)hobul_p->qp_info_tbl[obj_idx].qp_context;
\r
816 CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("ASYNC DPC bad qpn 0x%x max 0x%x\n", obj_idx, hobul_p->max_qp));
\r
824 obj_idx = hh_er_p->event_modifier.cq;
\r
825 if (obj_idx < hobul_p->max_cq)
\r
826 event_r.context = (void *)hobul_p->cq_info_tbl[obj_idx].cq_context;
\r
829 CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("ASYNC DPC bad cqn 0x%x max 0x%x\n", obj_idx, hobul_p->max_cq));
\r
837 // CL_ASSERT(0); // This shouldn't happen
\r
838 CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("ASYNC DPC unknown event_class 0x%x\n", event_class));
\r
842 // Call the registered CB
\r
843 (*hob_p->async_cb_p)(&event_r);
\r
846 if (g_mlnx_dpc2thread)
\r
848 cl_free(cb_data_p);
\r
852 /////////////////////////////////////////////////////////
\r
853 /////////////////////////////////////////////////////////
\r
856 IN HH_hca_hndl_t hh_hndl,
\r
857 IN HH_cq_hndl_t hh_cq,
\r
858 IN void *private_data)
\r
860 #if MLNX_COMP_MODEL
\r
864 mlnx_hobul_t *hobul_p;
\r
865 #if MLNX_COMP_MODEL == 2
\r
866 static uint32_t proc_num = 0;
\r
869 CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("COMP CB cq 0x%x %p\n", hh_cq, private_data));
\r
871 UNUSED_PARAM( hh_hndl );
\r
873 hca_idx = *(u_int32_t *)private_data;
\r
874 hob_p = mlnx_hob_array + hca_idx;
\r
875 hobul_p = mlnx_hobul_array[hca_idx];
\r
876 cq_num = hh_cq & hobul_p->cq_idx_mask;
\r
878 if (NULL != hob_p && NULL != hobul_p &&
\r
879 hob_p->hh_hndl && hob_p->comp_cb_p)
\r
881 if (cq_num < hobul_p->max_cq)
\r
883 #if MLNX_COMP_MODEL == 2
\r
884 KeSetTargetProcessorDpc( &hobul_p->cq_info_tbl[cq_num].dpc,
\r
885 (CCHAR)(proc_num++ % cl_proc_count()) );
\r
886 #endif /* MLNX_COMP_MODEL == 2 */
\r
887 KeInsertQueueDpc( &hobul_p->cq_info_tbl[cq_num].dpc,
\r
892 HCA_TRACE( HCA_DBG_ERROR, ("CQ index out of range!!!\n") );
\r
895 #else /* MLNX_COMP_MODEL */
\r
899 mlnx_cb_data_t cb_data;
\r
900 mlnx_cb_data_t *cb_data_p;
\r
902 CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("COMP CB cq 0x%x %p\n", hh_cq, private_data));
\r
904 if (!private_data) return;
\r
906 obj_idx = *(u_int32_t *)private_data;
\r
907 hob_p = mlnx_hob_array + obj_idx;
\r
908 if (!hob_p) return;
\r
910 if (g_mlnx_dpc2thread)
\r
912 cb_data_p = cl_malloc(sizeof(mlnx_cb_data_t));
\r
913 if (!cb_data_p) return;
\r
915 cb_data_p->hh_hndl = hh_hndl;
\r
916 cb_data_p->hh_cq = hh_cq;
\r
917 cb_data_p->private_data = private_data;
\r
919 cb_data_p->async_item.pfn_callback = mlnx_comp_dpc;
\r
921 // Report completion through async_proc
\r
922 cl_async_proc_queue(hob_p->async_proc_mgr_p, &cb_data_p->async_item );
\r
926 cb_data_p = &cb_data;
\r
928 cb_data_p->hh_hndl = hh_hndl;
\r
929 cb_data_p->hh_cq = hh_cq;
\r
930 cb_data_p->private_data = private_data;
\r
932 // Report completion directly from DPC (verbs should NOT sleep)
\r
933 mlnx_comp_dpc( &cb_data_p->async_item );
\r
935 #endif /* MLNX_COMP_MODEL */
\r
938 #if MLNX_COMP_MODEL
\r
946 mlnx_hob_t *hob_p = (mlnx_hob_t*)arg1;
\r
947 UNUSED_PARAM( p_dpc );
\r
948 UNUSED_PARAM( unused );
\r
950 hob_p->comp_cb_p( (void*)((cq_info_t*)context)->cq_context );
\r
952 #else /* MLNX_COMP_MODEL */
\r
955 IN cl_async_proc_item_t *async_item_p )
\r
960 mlnx_hobul_t *hobul_p;
\r
961 mlnx_cb_data_t *cb_data_p;
\r
963 CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("COMP DPC %p\n", async_item_p));
\r
965 cb_data_p = PARENT_STRUCT( async_item_p, mlnx_cb_data_t, async_item );
\r
966 if (!cb_data_p) return;
\r
968 hca_idx = *(u_int32_t *)cb_data_p->private_data;
\r
969 hob_p = mlnx_hob_array + hca_idx;
\r
970 hobul_p = mlnx_hobul_array[hca_idx];
\r
971 cq_num = (u_int32_t)cb_data_p->hh_cq & hobul_p->cq_idx_mask;
\r
973 if (NULL != hob_p && NULL != hobul_p &&
\r
974 hob_p->hh_hndl && hob_p->comp_cb_p)
\r
976 if (cq_num < hobul_p->max_cq)
\r
978 (*hob_p->comp_cb_p)((void *)hobul_p->cq_info_tbl[cq_num].cq_context);
\r
982 if (g_mlnx_dpc2thread)
\r
984 cl_free(cb_data_p);
\r
987 #endif /* MLNX_COMP_MODEL */
\r
991 /////////////////////////////////////////////////////////
\r
992 /////////////////////////////////////////////////////////
\r
995 IN ib_access_t ibal_acl)
\r
997 VAPI_mrw_acl_t vapi_acl = 0;
\r
999 if (ibal_acl & IB_AC_RDMA_READ) vapi_acl |= VAPI_EN_REMOTE_READ;
\r
1000 if (ibal_acl & IB_AC_RDMA_WRITE) vapi_acl |= VAPI_EN_REMOTE_WRITE;
\r
1001 if (ibal_acl & IB_AC_ATOMIC) vapi_acl |= VAPI_EN_REMOTE_ATOM;
\r
1002 if (ibal_acl & IB_AC_LOCAL_WRITE) vapi_acl |= VAPI_EN_LOCAL_WRITE;
\r
1003 if (ibal_acl & IB_AC_MW_BIND) vapi_acl |= VAPI_EN_MEMREG_BIND;
\r
1008 /////////////////////////////////////////////////////////
\r
1009 /////////////////////////////////////////////////////////
\r
1012 IN VAPI_mrw_acl_t vapi_acl)
\r
1014 ib_access_t ibal_acl = 0;
\r
1016 if (vapi_acl & VAPI_EN_REMOTE_READ) ibal_acl |= IB_AC_RDMA_READ;
\r
1017 if (vapi_acl & VAPI_EN_REMOTE_WRITE) ibal_acl |= IB_AC_RDMA_WRITE;
\r
1018 if (vapi_acl & VAPI_EN_REMOTE_ATOM) ibal_acl |= IB_AC_ATOMIC;
\r
1019 if (vapi_acl & VAPI_EN_LOCAL_WRITE) ibal_acl |= IB_AC_LOCAL_WRITE;
\r
1020 if (vapi_acl & VAPI_EN_MEMREG_BIND) ibal_acl |= IB_AC_MW_BIND;
\r
1025 /////////////////////////////////////////////////////////
\r
1026 /////////////////////////////////////////////////////////
\r
1027 static VAPI_rdma_atom_acl_t
\r
1029 IN ib_access_t ibal_acl)
\r
1031 VAPI_rdma_atom_acl_t vapi_qp_acl = 0;
\r
1033 if (ibal_acl & IB_AC_RDMA_WRITE) vapi_qp_acl |= VAPI_EN_REM_WRITE;
\r
1034 if (ibal_acl & IB_AC_RDMA_READ) vapi_qp_acl |= VAPI_EN_REM_READ;
\r
1035 if (ibal_acl & IB_AC_ATOMIC) vapi_qp_acl |= VAPI_EN_REM_ATOMIC_OP;
\r
1037 return vapi_qp_acl;
\r
1041 /////////////////////////////////////////////////////////
\r
1042 /////////////////////////////////////////////////////////
\r
1043 static ib_access_t
\r
1045 IN VAPI_rdma_atom_acl_t vapi_qp_acl)
\r
1047 ib_access_t ibal_acl = IB_AC_LOCAL_WRITE;
\r
1049 if (vapi_qp_acl & VAPI_EN_REM_WRITE) ibal_acl |= IB_AC_RDMA_WRITE;
\r
1050 if (vapi_qp_acl & VAPI_EN_REM_READ) ibal_acl |= IB_AC_RDMA_READ;
\r
1051 if (vapi_qp_acl & VAPI_EN_REM_ATOMIC_OP) ibal_acl |= IB_AC_ATOMIC;
\r
1057 /////////////////////////////////////////////////////////
\r
1058 /////////////////////////////////////////////////////////
\r
1061 IN mlnx_mro_t *mro_p,
\r
1062 IN boolean_t um_call )
\r
1064 MOSAL_iobuf_t old_iobuf;
\r
1068 mro_p->mr_prot_ctx = MOSAL_get_current_prot_ctx();
\r
1070 mro_p->mr_prot_ctx = MOSAL_get_kernel_prot_ctx();
\r
1072 // Save pointer to existing locked region.
\r
1073 old_iobuf = mro_p->mr_iobuf;
\r
1076 if (MT_OK != MOSAL_iobuf_register((MT_virt_addr_t)mro_p->mr_start,
\r
1077 (MT_size_t)mro_p->mr_size,
\r
1078 mro_p->mr_prot_ctx,
\r
1079 mro_p->mr_mosal_perm,
\r
1088 if( MT_OK != MOSAL_iobuf_deregister( old_iobuf ) )
\r
1092 return IB_SUCCESS;
\r
1096 /////////////////////////////////////////////////////////
\r
1097 /////////////////////////////////////////////////////////
\r
1099 mlnx_conv_ibal_mr_create(
\r
1100 IN u_int32_t pd_idx,
\r
1101 IN OUT mlnx_mro_t *mro_p,
\r
1102 IN VAPI_mr_change_t change_flags,
\r
1103 IN ib_mr_create_t const *p_mr_create,
\r
1104 IN boolean_t um_call,
\r
1105 OUT HH_mr_t *mr_props_p )
\r
1107 ib_api_status_t status;
\r
1109 /* Set ACL information first since it is used to lock the region. */
\r
1110 if( change_flags & VAPI_MR_CHANGE_ACL )
\r
1112 mro_p->mr_acl = map_ibal_acl( p_mr_create->access_ctrl );
\r
1113 // This computation should be externalized by THH
\r
1114 mro_p->mr_mosal_perm =
\r
1116 ((mro_p->mr_acl & VAPI_EN_LOCAL_WRITE) ? MOSAL_PERM_WRITE : 0);
\r
1119 if( change_flags & VAPI_MR_CHANGE_TRANS )
\r
1121 CL_TRACE(MLNX_DBG_MEM, g_mlnx_dbg_lvl, ("addr 0x%p size %"PRId64"\n", (void *)p_mr_create->vaddr, p_mr_create->length));
\r
1122 // Build TPT entries
\r
1123 mro_p->mr_start = (IB_virt_addr_t)p_mr_create->vaddr;
\r
1124 mro_p->mr_size = p_mr_create->length;
\r
1125 if (IB_SUCCESS != (status = mlnx_lock_region(mro_p, um_call)))
\r
1131 /* Now fill in the MR properties. */
\r
1132 mr_props_p->start = mro_p->mr_start;
\r
1133 mr_props_p->size = mro_p->mr_size;
\r
1134 mr_props_p->acl = mro_p->mr_acl;
\r
1135 mr_props_p->pd = pd_idx;
\r
1138 mr_props_p->tpt.tpt_type = HH_TPT_IOBUF;
\r
1139 mr_props_p->tpt.tpt.iobuf = mro_p->mr_iobuf;
\r
1141 return IB_SUCCESS;
\r
1144 /////////////////////////////////////////////////////////
\r
1145 // On entry mro_p->mr_start holds the pmr address
\r
1146 /////////////////////////////////////////////////////////
\r
1148 mlnx_conv_ibal_pmr_create(
\r
1149 IN u_int32_t pd_idx,
\r
1150 IN mlnx_mro_t *mro_p,
\r
1151 IN ib_phys_create_t const *p_pmr_create,
\r
1152 OUT HH_mr_t *mr_props_p )
\r
1154 VAPI_phy_addr_t* buf_lst = NULL;
\r
1155 VAPI_size_t* sz_lst = NULL;
\r
1157 u_int32_t page_shift = priv_ceil_log2(p_pmr_create->hca_page_size);
\r
1158 u_int64_t page_mask = (1 << page_shift) - 1;
\r
1159 u_int64_t tot_sz = 0;
\r
1161 CL_TRACE(MLNX_DBG_MEM, g_mlnx_dbg_lvl,
\r
1162 ("PRE: addr %p size 0x%"PRIx64" shift %d\n",
\r
1163 (void *)(uintn_t)mro_p->mr_start, p_pmr_create->length, page_mask));
\r
1164 mro_p->mr_start = (mro_p->mr_start & ~page_mask) | (p_pmr_create->buf_offset & page_mask);
\r
1165 CL_TRACE(MLNX_DBG_MEM, g_mlnx_dbg_lvl,
\r
1166 ("POST: addr %p\n", (void *)(uintn_t)mro_p->mr_start));
\r
1168 mr_props_p->start = mro_p->mr_start;
\r
1169 mr_props_p->size = p_pmr_create->length;
\r
1170 mr_props_p->acl = map_ibal_acl(p_pmr_create->access_ctrl);
\r
1171 mr_props_p->pd = pd_idx;
\r
1174 mro_p->mr_size = mr_props_p->size;
\r
1175 // mro_p->mr_first_page_addr = 0;
\r
1176 // mro_p->mr_num_pages = (mro_p->mr_end >> PAGESHIFT) + 1 - (mro_p->mr_start >> PAGESHIFT);
\r
1177 // CL_TRACE(MLNX_DBG_MEM, g_mlnx_dbg_lvl, ("1st pg addr 0x%p pages %d\n",
\r
1178 // (void *)mro_p->mr_first_page_addr, p_pmr_create->num_bufs));
\r
1179 CL_TRACE(MLNX_DBG_MEM, g_mlnx_dbg_lvl, ("1st phys addr 0x%"PRIx64" phys pages %d\n",
\r
1180 p_pmr_create->range_array[0].base_addr, p_pmr_create->num_ranges));
\r
1183 // Build TPT entries
\r
1184 if (!p_pmr_create->range_array)
\r
1186 return IB_INVALID_PARAMETER;
\r
1189 if (p_pmr_create->hca_page_size !=
\r
1190 MT_DOWN_ALIGNX_PHYS(p_pmr_create->hca_page_size, page_shift))
\r
1192 CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("phys buf size is not page aligned\n"));
\r
1193 return IB_INVALID_PARAMETER;
\r
1196 for (i = 0; i < p_pmr_create->num_ranges; i++)
\r
1198 uint64_t start_addr = p_pmr_create->range_array[i].base_addr;
\r
1199 uint64_t end_addr = start_addr + p_pmr_create->range_array[i].size;
\r
1201 if( end_addr < start_addr ) {
\r
1202 CL_TRACE( CL_DBG_ERROR, g_mlnx_dbg_lvl, ("phys buf end < start\n") );
\r
1203 return IB_INVALID_PARAMETER;
\r
1207 MT_DOWN_ALIGNX_PHYS(start_addr, page_shift))
\r
1209 CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("phys buf start adrs is not page aligned\n"));
\r
1210 return IB_INVALID_PARAMETER;
\r
1213 tot_sz += p_pmr_create->range_array[i].size;
\r
1216 if( tot_sz < p_pmr_create->length + p_pmr_create->buf_offset )
\r
1218 HCA_TRACE_EXIT( HCA_DBG_ERROR,
\r
1219 ("length(0x"PRIx64") + buf offset(0x"PRIx64") larger than sum "
\r
1220 "of phys ranges(0x"PRIx64")\n",
\r
1221 p_pmr_create->length, p_pmr_create->buf_offset, tot_sz) );
\r
1222 return IB_INVALID_PARAMETER;
\r
1225 if( p_pmr_create->buf_offset > p_pmr_create->range_array[0].size )
\r
1227 HCA_TRACE_EXIT( HCA_DBG_ERROR,
\r
1228 ("buf offset(0x%x) > than 1st phy range size(0x"PRIx64")\n",
\r
1229 p_pmr_create->buf_offset, p_pmr_create->range_array[0].size) );
\r
1230 return IB_INVALID_PARAMETER;
\r
1233 /* Memory registration must be done at PASSIVE_LEVEL, so paged memory here is fine. */
\r
1234 buf_lst = (VAPI_phy_addr_t*)cl_pzalloc( sizeof(VAPI_phy_addr_t)*(p_pmr_create->num_ranges));
\r
1237 HCA_TRACE_EXIT( HCA_DBG_ERROR,
\r
1238 ("Failed to allocate range address list.\n") );
\r
1239 return IB_INSUFFICIENT_MEMORY;
\r
1243 /* Memory registration must be done at PASSIVE_LEVEL, so paged memory here is fine. */
\r
1244 sz_lst = (VAPI_size_t*)cl_pzalloc( sizeof(VAPI_size_t)*(p_pmr_create->num_ranges));
\r
1247 cl_free( buf_lst );
\r
1248 HCA_TRACE_EXIT( HCA_DBG_ERROR,
\r
1249 ("Failed to allocate range size list.\n") );
\r
1250 return IB_INSUFFICIENT_MEMORY;
\r
1253 for (i = 0; i < p_pmr_create->num_ranges; i++)
\r
1255 buf_lst[i] = p_pmr_create->range_array[i].base_addr;
\r
1256 sz_lst[i] = p_pmr_create->range_array[i].size;
\r
1259 mr_props_p->tpt.tpt_type = HH_TPT_BUF;
\r
1260 mr_props_p->tpt.num_entries = p_pmr_create->num_ranges;
\r
1261 mr_props_p->tpt.tpt.buf_lst.buf_sz_lst = sz_lst;
\r
1262 mr_props_p->tpt.tpt.buf_lst.phys_buf_lst = buf_lst;
\r
1263 mr_props_p->tpt.tpt.buf_lst.iova_offset = p_pmr_create->buf_offset;
\r
1265 return IB_SUCCESS;
\r
1270 mlnx_gid_to_index(
\r
1271 IN HH_hca_hndl_t hh_hndl,
\r
1272 IN u_int8_t port_num,
\r
1273 IN u_int8_t *raw_gid)
\r
1275 ib_gid_t *gid_table_p = NULL;
\r
1276 u_int8_t index = 0; // default return value
\r
1279 gid_table_p = cl_zalloc( 64*sizeof(ib_gid_t));
\r
1281 mlnx_get_hca_gid_tbl(hh_hndl, port_num, 64, gid_table_p);
\r
1283 for (i = 0; i < 64; i++)
\r
1285 if (!cl_memcmp(raw_gid, gid_table_p[i].raw, sizeof(ib_gid_t)))
\r
1287 CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("1: found GID at index %d\n", i));
\r
1293 CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("2: found GID at index %d\n", index));
\r
1295 cl_free( gid_table_p);
\r
1299 /////////////////////////////////////////////////////////
\r
1300 /////////////////////////////////////////////////////////
\r
1302 mlnx_conv_ibal_av(
\r
1303 IN HH_hca_hndl_t hh_hndl,
\r
1304 IN const ib_av_attr_t *ibal_av_p,
\r
1305 OUT VAPI_ud_av_t *vapi_av_p)
\r
1307 vapi_av_p->port = ibal_av_p->port_num;
\r
1308 vapi_av_p->sl = ibal_av_p->sl;
\r
1309 vapi_av_p->dlid = cl_ntoh16 (ibal_av_p->dlid);
\r
1311 vapi_av_p->static_rate =
\r
1312 (ibal_av_p->static_rate == IB_PATH_RECORD_RATE_10_GBS? 0 : 3);
\r
1313 ib_grh_get_ver_class_flow( ibal_av_p->grh.ver_class_flow, NULL,
\r
1314 &vapi_av_p->traffic_class, &vapi_av_p->flow_label );
\r
1315 vapi_av_p->src_path_bits = ibal_av_p->path_bits; // PATH:
\r
1316 //vapi_av_p->src_path_bits = 0;
\r
1318 /* For global destination or Multicast address:*/
\r
1319 if (ibal_av_p->grh_valid)
\r
1321 vapi_av_p->grh_flag = TRUE;
\r
1322 vapi_av_p->hop_limit = ibal_av_p->grh.hop_limit;
\r
1323 // CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("raw %p, &raw %p\n", ibal_av_p->grh.src_gid.raw, &ibal_av_p->grh.src_gid.raw));
\r
1324 vapi_av_p->sgid_index = mlnx_gid_to_index(hh_hndl, ibal_av_p->port_num, (u_int8_t *)ibal_av_p->grh.src_gid.raw);
\r
1325 cl_memcpy(vapi_av_p->dgid, ibal_av_p->grh.dest_gid.raw, sizeof(vapi_av_p->dgid));
\r
1329 /////////////////////////////////////////////////////////
\r
1330 /////////////////////////////////////////////////////////
\r
1332 mlnx_conv_vapi_av(
\r
1333 IN HH_hca_hndl_t hh_hndl,
\r
1334 IN const VAPI_ud_av_t *vapi_av_p,
\r
1335 OUT ib_av_attr_t *ibal_av_p)
\r
1339 ibal_av_p->port_num = vapi_av_p->port;
\r
1340 ibal_av_p->sl = vapi_av_p->sl;
\r
1341 ibal_av_p->dlid = cl_ntoh16(vapi_av_p->dlid);
\r
1343 /* For global destination or Multicast address:*/
\r
1344 ibal_av_p->grh_valid = vapi_av_p->grh_flag;
\r
1347 ibal_av_p->grh.ver_class_flow = ib_grh_set_ver_class_flow( ver,
\r
1348 vapi_av_p->traffic_class,
\r
1349 vapi_av_p->flow_label);
\r
1350 ibal_av_p->grh.hop_limit = vapi_av_p->hop_limit;
\r
1352 THH_hob_get_sgid(hh_hndl,
\r
1354 vapi_av_p->sgid_index,
\r
1355 &ibal_av_p->grh.src_gid.raw);
\r
1357 cl_memcpy(ibal_av_p->grh.dest_gid.raw, vapi_av_p->dgid, sizeof(vapi_av_p->dgid));
\r
1359 ibal_av_p->static_rate = (vapi_av_p->static_rate?
\r
1360 IB_PATH_RECORD_RATE_2_5_GBS : IB_PATH_RECORD_RATE_10_GBS);
\r
1361 ibal_av_p->path_bits = vapi_av_p->src_path_bits;
\r
1364 /////////////////////////////////////////////////////////
\r
1365 /////////////////////////////////////////////////////////
\r
1367 mlnx_map_vapi_cqe_status(
\r
1368 IN VAPI_wc_status_t vapi_status)
\r
1370 switch (vapi_status)
\r
1372 case IB_COMP_SUCCESS: return IB_WCS_SUCCESS;
\r
1373 case IB_COMP_LOC_LEN_ERR: return IB_WCS_LOCAL_LEN_ERR;
\r
1374 case IB_COMP_LOC_QP_OP_ERR: return IB_WCS_LOCAL_OP_ERR;
\r
1375 case IB_COMP_LOC_PROT_ERR: return IB_WCS_LOCAL_PROTECTION_ERR;
\r
1376 case IB_COMP_WR_FLUSH_ERR: return IB_WCS_WR_FLUSHED_ERR;
\r
1377 case IB_COMP_MW_BIND_ERR: return IB_WCS_MEM_WINDOW_BIND_ERR;
\r
1378 case IB_COMP_REM_INV_REQ_ERR: return IB_WCS_REM_INVALID_REQ_ERR;
\r
1379 case IB_COMP_REM_ACCESS_ERR: return IB_WCS_REM_ACCESS_ERR;
\r
1380 case IB_COMP_REM_OP_ERR: return IB_WCS_REM_OP_ERR;
\r
1381 case IB_COMP_RETRY_EXC_ERR: return IB_WCS_TIMEOUT_RETRY_ERR;
\r
1382 case IB_COMP_RNR_RETRY_EXC_ERR: return IB_WCS_RNR_RETRY_ERR;
\r
1383 case IB_COMP_REM_ABORT_ERR: return IB_WCS_REM_ACCESS_ERR; // ???
\r
1384 case IB_COMP_FATAL_ERR: return IB_WCS_REM_ACCESS_ERR; // ???
\r
1385 case IB_COMP_GENERAL_ERR: return IB_WCS_REM_ACCESS_ERR; // ???
\r
1387 CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("FAIL to map %d (last known %d) returning %d\n",
\r
1388 vapi_status, IB_COMP_GENERAL_ERR, IB_WCS_REM_ACCESS_ERR));
\r
1389 return IB_WCS_REM_ACCESS_ERR;
\r
1393 /////////////////////////////////////////////////////////
\r
1394 /////////////////////////////////////////////////////////
\r
1396 mlnx_map_vapi_cqe_type(
\r
1397 IN VAPI_cqe_opcode_t opcode)
\r
1401 case VAPI_CQE_SQ_SEND_DATA: return IB_WC_SEND;
\r
1402 case VAPI_CQE_SQ_RDMA_WRITE: return IB_WC_RDMA_WRITE;
\r
1403 case VAPI_CQE_SQ_RDMA_READ: return IB_WC_RDMA_READ;
\r
1404 case VAPI_CQE_SQ_COMP_SWAP: return IB_WC_COMPARE_SWAP;
\r
1405 case VAPI_CQE_SQ_FETCH_ADD: return IB_WC_FETCH_ADD;
\r
1406 case VAPI_CQE_SQ_BIND_MRW: return IB_WC_MW_BIND;
\r
1407 case VAPI_CQE_RQ_SEND_DATA: return IB_WC_RECV;
\r
1408 case VAPI_CQE_RQ_RDMA_WITH_IMM: return IB_WC_RECV_RDMA_WRITE;
\r
1410 return IB_WC_SEND;
\r
1414 /////////////////////////////////////////////////////////
\r
1415 // Map Remote Node Addr Type
\r
1416 /////////////////////////////////////////////////////////
\r
1418 mlnx_map_vapi_rna_type(
\r
1419 IN VAPI_remote_node_addr_type_t rna)
\r
1423 case VAPI_RNA_UD: return IB_QPT_UNRELIABLE_DGRM;
\r
1424 case VAPI_RNA_RAW_ETY: return IB_QPT_RAW_ETHER;
\r
1425 case VAPI_RNA_RAW_IPV6: return IB_QPT_RAW_IPV6;
\r
1427 return IB_QPT_RELIABLE_CONN;
\r
1431 //////////////////////////////////////////////////////////////
\r
1432 // Convert from VAPI memory-region attributes to IBAL
\r
1433 //////////////////////////////////////////////////////////////
\r
1435 mlnx_conv_vapi_mr_attr(
\r
1436 IN ib_pd_handle_t pd_h,
\r
1437 IN HH_mr_info_t *mr_info_p,
\r
1438 OUT ib_mr_attr_t *mr_query_p)
\r
1440 mr_query_p->h_pd = pd_h;
\r
1441 mr_query_p->local_lb = mr_info_p->local_start;
\r
1442 mr_query_p->local_ub = mr_info_p->local_start + mr_info_p->local_size;
\r
1443 mr_query_p->remote_lb = mr_info_p->remote_start;
\r
1444 mr_query_p->remote_ub = mr_info_p->remote_start + mr_info_p->remote_size;
\r
1446 mr_query_p->access_ctrl = map_vapi_acl(mr_info_p->acl);
\r
1447 mr_query_p->lkey = mr_info_p->lkey;
\r
1448 mr_query_p->rkey = mr_info_p->rkey;
\r
1451 //////////////////////////////////////////////////////////////
\r
1452 // Convert from IBAL memory-window bind request to VAPI
\r
1453 //////////////////////////////////////////////////////////////
\r
1455 mlnx_conv_bind_req(
\r
1456 IN HHUL_qp_hndl_t hhul_qp_hndl,
\r
1457 IN ib_bind_wr_t* const p_mw_bind,
\r
1458 OUT HHUL_mw_bind_t *bind_prop_p)
\r
1460 bind_prop_p->qp = hhul_qp_hndl;
\r
1461 bind_prop_p->id = p_mw_bind->wr_id;
\r
1462 bind_prop_p->acl = map_ibal_acl(p_mw_bind->access_ctrl);
\r
1463 bind_prop_p->size = p_mw_bind->local_ds.length;
\r
1464 bind_prop_p->start = (VAPI_virt_addr_t)(MT_virt_addr_t)p_mw_bind->local_ds.vaddr;
\r
1465 bind_prop_p->mr_lkey = p_mw_bind->local_ds.lkey;
\r
1466 bind_prop_p->comp_type =
\r
1467 (p_mw_bind->send_opt & IB_SEND_OPT_SIGNALED) ? VAPI_SIGNALED : VAPI_UNSIGNALED;
\r
1471 /////////////////////////////////////////////////////////
\r
1472 // Map IBAL qp type to VAPI transport and special qp_type
\r
1473 /////////////////////////////////////////////////////////
\r
1475 mlnx_map_ibal_qp_type(
\r
1476 IN ib_qp_type_t ibal_qpt,
\r
1477 OUT VAPI_special_qp_t *vapi_qp_type_p)
\r
1481 case IB_QPT_RELIABLE_CONN:
\r
1482 if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_REGULAR_QP;
\r
1485 case IB_QPT_UNRELIABLE_CONN:
\r
1486 if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_REGULAR_QP;
\r
1489 case IB_QPT_UNRELIABLE_DGRM:
\r
1490 if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_REGULAR_QP;
\r
1494 if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_SMI_QP;
\r
1498 if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_GSI_QP;
\r
1501 case IB_QPT_RAW_IPV6:
\r
1502 if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_RAW_IPV6_QP; // TBD: ??
\r
1505 case IB_QPT_RAW_ETHER:
\r
1506 if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_RAW_ETY_QP; // TBD: ??
\r
1510 if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_GSI_QP;
\r
1513 case IB_QPT_QP0_ALIAS:
\r
1514 if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_SMI_QP;
\r
1517 case IB_QPT_QP1_ALIAS:
\r
1518 if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_GSI_QP;
\r
1522 CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("FAIL to map ibal_qp_type %d (last known %d) returning %d\n",
\r
1523 ibal_qpt, IB_QPT_QP1_ALIAS, IB_TS_RAW));
\r
1524 if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_RAW_ETY_QP;
\r
1529 /////////////////////////////////////////////////////////
\r
1530 // QP and CQ value must be handled by caller
\r
1531 /////////////////////////////////////////////////////////
\r
1533 mlnx_conv_qp_create_attr(
\r
1534 IN const ib_qp_create_t *create_attr_p,
\r
1535 OUT HHUL_qp_init_attr_t *init_attr_p,
\r
1536 OUT VAPI_special_qp_t *vapi_qp_type_p)
\r
1538 init_attr_p->ts_type = mlnx_map_ibal_qp_type(create_attr_p->qp_type, vapi_qp_type_p);
\r
1540 init_attr_p->qp_cap.max_oust_wr_sq = create_attr_p->sq_depth;
\r
1541 init_attr_p->qp_cap.max_oust_wr_rq = create_attr_p->rq_depth;
\r
1542 init_attr_p->qp_cap.max_sg_size_sq = create_attr_p->sq_sge;
\r
1543 init_attr_p->qp_cap.max_sg_size_rq = create_attr_p->rq_sge;
\r
1545 init_attr_p->sq_sig_type = (create_attr_p->sq_signaled) ? VAPI_SIGNAL_ALL_WR : VAPI_SIGNAL_REQ_WR;
\r
1546 init_attr_p->rq_sig_type = VAPI_SIGNAL_ALL_WR;
\r
1548 init_attr_p->srq = HHUL_INVAL_SRQ_HNDL;
\r
1551 /////////////////////////////////////////////////////////
\r
1552 // NOTE: ibal_qp_state is non linear - so we cannot use a LUT
\r
1553 /////////////////////////////////////////////////////////
\r
1555 mlnx_map_ibal_qp_state(
\r
1556 IN ib_qp_state_t ibal_qp_state)
\r
1558 VAPI_qp_state_t vapi_qp_state = VAPI_RESET;
\r
1560 if (ibal_qp_state & IB_QPS_RESET) vapi_qp_state = VAPI_RESET;
\r
1561 else if (ibal_qp_state & IB_QPS_INIT) vapi_qp_state = VAPI_INIT;
\r
1562 else if (ibal_qp_state & IB_QPS_RTR) vapi_qp_state = VAPI_RTR;
\r
1563 else if (ibal_qp_state & IB_QPS_RTS) vapi_qp_state = VAPI_RTS;
\r
1564 else if (ibal_qp_state & IB_QPS_SQD) vapi_qp_state = VAPI_SQD;
\r
1565 else if (ibal_qp_state & IB_QPS_SQERR) vapi_qp_state = VAPI_SQE;
\r
1566 else if (ibal_qp_state & IB_QPS_ERROR) vapi_qp_state = VAPI_ERR;
\r
1568 return vapi_qp_state;
\r
1571 /////////////////////////////////////////////////////////
\r
1572 /////////////////////////////////////////////////////////
\r
1574 mlnx_map_vapi_qp_state(
\r
1575 IN VAPI_qp_state_t vapi_qp_state)
\r
1577 switch (vapi_qp_state)
\r
1579 case VAPI_RESET: return IB_QPS_RESET;
\r
1580 case VAPI_INIT: return IB_QPS_INIT;
\r
1581 case VAPI_RTR: return IB_QPS_RTR;
\r
1582 case VAPI_RTS: return IB_QPS_RTS;
\r
1583 case VAPI_SQD: return IB_QPS_SQD;
\r
1584 case VAPI_SQE: return IB_QPS_SQERR;
\r
1585 case VAPI_ERR: return IB_QPS_ERROR;
\r
1586 // TBD: IB_QPS_SQD_DRAINING
\r
1587 // TBD: IB_QPS_SQD_DRAINED
\r
1589 CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("FAIL to map vapi_qp_state %d (last known %d) returning %d\n",
\r
1590 vapi_qp_state, VAPI_ERR, IB_QPS_INIT));
\r
1591 return IB_QPS_INIT;
\r
1595 /////////////////////////////////////////////////////////
\r
1596 /////////////////////////////////////////////////////////
\r
1598 mlnx_map_vapi_apm_state(
\r
1599 IN VAPI_mig_state_t vapi_apm_state)
\r
1601 switch (vapi_apm_state)
\r
1603 case VAPI_MIGRATED: return IB_APM_MIGRATED;
\r
1604 case VAPI_REARM: return IB_APM_REARM;
\r
1605 case VAPI_ARMED: return IB_APM_ARMED;
\r
1608 CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("FAIL to map vapi_apm_state %d (last known %d) returning %d\n",
\r
1609 vapi_apm_state, VAPI_ARMED, 0));
\r
1615 /////////////////////////////////////////////////////////
\r
1616 // UNUSED: IBAL uses same encoding as THH
\r
1617 /////////////////////////////////////////////////////////
\r
1619 u_int32_t ibal_mtu_to_vapi(u_int32_t ibal_mtu)
\r
1621 u_int32_t mtu = 0;
\r
1623 // MTU256=1, MTU512=2, MTU1024=3
\r
1624 while (ibal_mtu >>= 1) mtu++;
\r
1628 /////////////////////////////////////////////////////////
\r
1629 /////////////////////////////////////////////////////////
\r
1631 u_int32_t vapi_mtu_to_ibal(u_int32_t vapi_mtu)
\r
1633 return (1 << (vapi_mtu + 7));
\r
1637 /////////////////////////////////////////////////////////
\r
1638 /////////////////////////////////////////////////////////
\r
1640 mlnx_conv_vapi_qp_attr(
\r
1641 IN HH_hca_hndl_t hh_hndl,
\r
1642 IN VAPI_qp_attr_t *hh_qp_attr_p,
\r
1643 OUT ib_qp_attr_t *qp_attr_p)
\r
1645 qp_attr_p->access_ctrl = map_vapi_qp_acl(hh_qp_attr_p->remote_atomic_flags);
\r
1646 qp_attr_p->pkey_index = (uint16_t)hh_qp_attr_p->pkey_ix;
\r
1647 qp_attr_p->sq_depth = hh_qp_attr_p->cap.max_oust_wr_sq;
\r
1648 qp_attr_p->rq_depth = hh_qp_attr_p->cap.max_oust_wr_rq;
\r
1649 qp_attr_p->sq_sge = hh_qp_attr_p->cap.max_sg_size_sq;
\r
1650 qp_attr_p->rq_sge = hh_qp_attr_p->cap.max_sg_size_rq;
\r
1651 qp_attr_p->sq_max_inline = hh_qp_attr_p->cap.max_inline_data_sq;
\r
1652 qp_attr_p->init_depth = hh_qp_attr_p->ous_dst_rd_atom; // outstanding outgoing
\r
1653 qp_attr_p->resp_res = hh_qp_attr_p->qp_ous_rd_atom; // outstanding as target (in)
\r
1655 qp_attr_p->num = cl_ntoh32(hh_qp_attr_p->qp_num);
\r
1656 CL_TRACE(MLNX_DBG_QPN, g_mlnx_dbg_lvl, ("ibal_qpn 0x%x = hh_qpn 0x%x\n",
\r
1658 hh_qp_attr_p->qp_num));
\r
1660 qp_attr_p->dest_num = cl_ntoh32(hh_qp_attr_p->dest_qp_num);
\r
1661 CL_TRACE(MLNX_DBG_QPN, g_mlnx_dbg_lvl, ("ibal_dest 0x%x = hh_dest 0x%x\n",
\r
1662 qp_attr_p->dest_num,
\r
1663 hh_qp_attr_p->dest_qp_num));
\r
1664 qp_attr_p->qkey = cl_ntoh32 (hh_qp_attr_p->qkey);
\r
1666 qp_attr_p->sq_psn = cl_ntoh32 (hh_qp_attr_p->sq_psn);
\r
1667 qp_attr_p->rq_psn = cl_ntoh32 (hh_qp_attr_p->rq_psn);
\r
1669 qp_attr_p->primary_port = hh_qp_attr_p->port;
\r
1670 qp_attr_p->alternate_port = hh_qp_attr_p->alt_port;
\r
1672 qp_attr_p->state = mlnx_map_vapi_qp_state(hh_qp_attr_p->qp_state);
\r
1673 qp_attr_p->apm_state = mlnx_map_vapi_apm_state(hh_qp_attr_p->path_mig_state);
\r
1675 mlnx_conv_vapi_av(hh_hndl, &hh_qp_attr_p->av, &qp_attr_p->primary_av);
\r
1676 qp_attr_p->primary_av.conn.path_mtu = (u_int8_t)hh_qp_attr_p->path_mtu;
\r
1677 qp_attr_p->primary_av.conn.local_ack_timeout = hh_qp_attr_p->timeout;
\r
1678 qp_attr_p->primary_av.conn.seq_err_retry_cnt = hh_qp_attr_p->retry_count;
\r
1679 qp_attr_p->primary_av.conn.rnr_retry_cnt = hh_qp_attr_p->rnr_retry;
\r
1681 mlnx_conv_vapi_av(hh_hndl, &hh_qp_attr_p->alt_av, &qp_attr_p->alternate_av);
\r
1682 qp_attr_p->alternate_av.conn. path_mtu = (u_int8_t)hh_qp_attr_p->path_mtu;
\r
1683 qp_attr_p->alternate_av.conn.local_ack_timeout = hh_qp_attr_p->timeout;
\r
1684 qp_attr_p->alternate_av.conn.seq_err_retry_cnt = hh_qp_attr_p->retry_count;
\r
1685 qp_attr_p->alternate_av.conn.rnr_retry_cnt = hh_qp_attr_p->rnr_retry;
\r
1690 QP_ATTR_EN_SQD_ASYN_NOTIF
\r
1692 + QP_ATTR_REMOTE_ATOMIC_FLAGS
\r
1701 + QP_ATTR_RETRY_COUNT
\r
1702 + QP_ATTR_RNR_RETRY
\r
1703 QP_ATTR_QP_OUS_RD_ATOM
\r
1705 - QP_ATTR_ALT_PATH
\r
1707 + QP_ATTR_MIN_RNR_TIMER
\r
1709 QP_ATTR_OUS_DST_RD_ATOM
\r
1710 QP_ATTR_PATH_MIG_STATE
\r
1714 /////////////////////////////////////////////////////////
\r
1715 /////////////////////////////////////////////////////////
\r
1717 mlnx_conv_qp_modify_attr(
\r
1718 IN HH_hca_hndl_t hh_hndl,
\r
1719 IN ib_qp_type_t qp_type,
\r
1720 IN const ib_qp_mod_t *modify_attr_p,
\r
1721 OUT VAPI_qp_attr_t *qp_attr_p,
\r
1722 OUT VAPI_qp_attr_mask_t *attr_mask_p)
\r
1725 qp_attr_p->qp_state = mlnx_map_ibal_qp_state(modify_attr_p->req_state);
\r
1726 *attr_mask_p = QP_ATTR_QP_STATE;
\r
1728 switch(modify_attr_p->req_state)
\r
1730 case IB_QPS_RESET:
\r
1734 *attr_mask_p |= QP_ATTR_PORT |
\r
1738 qp_attr_p->port = modify_attr_p->state.init.primary_port;
\r
1739 qp_attr_p->qkey = cl_ntoh32 (modify_attr_p->state.init.qkey);
\r
1740 qp_attr_p->pkey_ix = modify_attr_p->state.init.pkey_index;
\r
1741 if (IB_QPT_RELIABLE_CONN == qp_type)
\r
1743 *attr_mask_p |= QP_ATTR_REMOTE_ATOMIC_FLAGS;
\r
1744 qp_attr_p->remote_atomic_flags = map_ibal_qp_acl(modify_attr_p->state.init.access_ctrl);
\r
1747 qp_attr_p->remote_atomic_flags = 0;
\r
1752 /* VAPI doesn't support modifying the WQE depth ever. */
\r
1753 if( modify_attr_p->state.rtr.opts & IB_MOD_QP_SQ_DEPTH ||
\r
1754 modify_attr_p->state.rtr.opts & IB_MOD_QP_RQ_DEPTH )
\r
1756 return IB_UNSUPPORTED;
\r
1759 *attr_mask_p |= QP_ATTR_RQ_PSN |
\r
1760 QP_ATTR_DEST_QP_NUM |
\r
1761 QP_ATTR_QP_OUS_RD_ATOM |
\r
1762 QP_ATTR_MIN_RNR_TIMER |
\r
1765 qp_attr_p->rq_psn = cl_ntoh32 (modify_attr_p->state.rtr.rq_psn);
\r
1766 qp_attr_p->dest_qp_num = cl_ntoh32 (modify_attr_p->state.rtr.dest_qp);
\r
1767 qp_attr_p->qp_ous_rd_atom = modify_attr_p->state.rtr.resp_res;
\r
1769 if (modify_attr_p->state.rtr.opts & IB_MOD_QP_RNR_NAK_TIMEOUT)
\r
1771 qp_attr_p->min_rnr_timer = modify_attr_p->state.rtr.rnr_nak_timeout;
\r
1774 qp_attr_p->min_rnr_timer = 0;
\r
1778 CL_TRACE(MLNX_DBG_QPN, g_mlnx_dbg_lvl, ("modify_qp: hh_dest 0x%x = ibal_dest 0x%x\n",
\r
1779 qp_attr_p->dest_qp_num, modify_attr_p->state.rtr.dest_qp));
\r
1782 // Convert primary RC AV (mandatory)
\r
1783 cl_memclr(&qp_attr_p->av, sizeof(VAPI_ud_av_t));
\r
1784 mlnx_conv_ibal_av(hh_hndl,
\r
1785 &modify_attr_p->state.rtr.primary_av, &qp_attr_p->av);
\r
1787 if (IB_QPT_RELIABLE_CONN == qp_type)
\r
1789 *attr_mask_p |= QP_ATTR_PATH_MTU;
\r
1790 qp_attr_p->path_mtu = modify_attr_p->state.rtr.primary_av.conn.path_mtu; // MTU
\r
1792 qp_attr_p->timeout = modify_attr_p->state.rtr.primary_av.conn.local_ack_timeout; // XXX: conv
\r
1793 qp_attr_p->retry_count = modify_attr_p->state.rtr.primary_av.conn.seq_err_retry_cnt;
\r
1794 qp_attr_p->rnr_retry = modify_attr_p->state.rtr.primary_av.conn.rnr_retry_cnt;
\r
1797 // Convert Remote Atomic Flags
\r
1798 if (modify_attr_p->state.rtr.opts & IB_MOD_QP_ACCESS_CTRL)
\r
1800 *attr_mask_p |= QP_ATTR_REMOTE_ATOMIC_FLAGS;
\r
1801 qp_attr_p->remote_atomic_flags = map_ibal_qp_acl(modify_attr_p->state.rtr.access_ctrl);
\r
1804 // Convert alternate RC AV
\r
1805 if (modify_attr_p->state.rtr.opts & IB_MOD_QP_ALTERNATE_AV)
\r
1807 *attr_mask_p |= QP_ATTR_ALT_PATH;
\r
1808 cl_memclr(&qp_attr_p->alt_av, sizeof(VAPI_ud_av_t));
\r
1809 mlnx_conv_ibal_av(hh_hndl,
\r
1810 &modify_attr_p->state.rtr.alternate_av, &qp_attr_p->alt_av);
\r
1812 if (IB_QPT_RELIABLE_CONN == qp_type)
\r
1814 qp_attr_p->alt_timeout = modify_attr_p->state.rtr.alternate_av.conn.local_ack_timeout; // XXX: conv
\r
1816 /* Incompliant with spec 1.1! Data already set before */
\r
1817 qp_attr_p->retry_count = modify_attr_p->state.rtr.alternate_av.conn.seq_err_retry_cnt;
\r
1818 qp_attr_p->rnr_retry = modify_attr_p->state.rtr.alternate_av.conn.rnr_retry_cnt;
\r
1825 /* VAPI doesn't support modifying the WQE depth ever. */
\r
1826 if( modify_attr_p->state.rts.opts & IB_MOD_QP_SQ_DEPTH ||
\r
1827 modify_attr_p->state.rts.opts & IB_MOD_QP_RQ_DEPTH )
\r
1829 return IB_UNSUPPORTED;
\r
1832 *attr_mask_p |= QP_ATTR_SQ_PSN |
\r
1833 QP_ATTR_RETRY_COUNT |
\r
1834 QP_ATTR_RNR_RETRY |
\r
1835 QP_ATTR_OUS_DST_RD_ATOM |
\r
1836 QP_ATTR_MIN_RNR_TIMER;
\r
1838 qp_attr_p->sq_psn = cl_ntoh32 (modify_attr_p->state.rts.sq_psn);
\r
1840 if (modify_attr_p->state.rts.opts & IB_MOD_QP_ACCESS_CTRL)
\r
1842 *attr_mask_p |= QP_ATTR_REMOTE_ATOMIC_FLAGS;
\r
1843 qp_attr_p->remote_atomic_flags = map_ibal_qp_acl(modify_attr_p->state.rts.access_ctrl);
\r
1846 qp_attr_p->timeout = modify_attr_p->state.rts.local_ack_timeout; // XXX: conv
\r
1847 qp_attr_p->ous_dst_rd_atom = modify_attr_p->state.rts.init_depth;
\r
1848 qp_attr_p->retry_count = modify_attr_p->state.rts.retry_cnt;
\r
1849 qp_attr_p->rnr_retry = modify_attr_p->state.rts.rnr_retry_cnt;
\r
1850 qp_attr_p->min_rnr_timer = modify_attr_p->state.rts.rnr_nak_timeout;
\r
1852 // Update the responder resources for RDMA/ATOMIC (optional for SQD->RTS)
\r
1853 if (modify_attr_p->state.rts.opts & IB_MOD_QP_RESP_RES) {
\r
1854 *attr_mask_p |= QP_ATTR_QP_OUS_RD_ATOM;
\r
1855 qp_attr_p->qp_ous_rd_atom = modify_attr_p->state.rts.resp_res;
\r
1858 // Convert alternate RC AV
\r
1859 if (modify_attr_p->state.rts.opts & IB_MOD_QP_ALTERNATE_AV)
\r
1861 *attr_mask_p |= QP_ATTR_ALT_PATH;
\r
1862 cl_memclr(&qp_attr_p->alt_av, sizeof(VAPI_ud_av_t));
\r
1863 mlnx_conv_ibal_av(hh_hndl,
\r
1864 &modify_attr_p->state.rts.alternate_av, &qp_attr_p->alt_av);
\r
1865 if (IB_QPT_RELIABLE_CONN == qp_type)
\r
1867 qp_attr_p->alt_timeout = modify_attr_p->state.rts.alternate_av.conn.local_ack_timeout; // XXX: conv
\r
1869 /* Incompliant with spec 1.1! Data already set before */
\r
1870 qp_attr_p->retry_count = modify_attr_p->state.rts.alternate_av.conn.seq_err_retry_cnt;
\r
1871 qp_attr_p->rnr_retry = modify_attr_p->state.rts.alternate_av.conn.rnr_retry_cnt;
\r
1877 // TBD: The following are treated equally (SQ Drain)
\r
1879 case IB_QPS_SQD_DRAINING:
\r
1880 case IB_QPS_SQD_DRAINED:
\r
1881 *attr_mask_p |= QP_ATTR_EN_SQD_ASYN_NOTIF;
\r
1882 qp_attr_p->en_sqd_asyn_notif = (MT_bool)modify_attr_p->state.sqd.sqd_event;
\r
1885 case IB_QPS_SQERR:
\r
1886 case IB_QPS_ERROR:
\r
1887 case IB_QPS_TIME_WAIT:
\r
1891 CL_TRACE(MLNX_DBG_QPN, g_mlnx_dbg_lvl, ("CL: conv_qp_modify: new state %d attr_mask 0x%x\n", qp_attr_p->qp_state, *attr_mask_p));
\r
1892 return IB_SUCCESS;
\r
1895 /////////////////////////////////////////////////////////
\r
1896 /////////////////////////////////////////////////////////
\r
1897 static VAPI_wr_opcode_t
\r
1898 map_ibal_send_opcode(
\r
1899 IN ib_wr_type_t ibal_opcode,
\r
1902 VAPI_wr_opcode_t vapi_opcode;
\r
1904 switch (ibal_opcode)
\r
1906 case WR_SEND: vapi_opcode = VAPI_SEND;
\r
1908 case WR_RDMA_WRITE: vapi_opcode = VAPI_RDMA_WRITE;
\r
1910 case WR_RDMA_READ: vapi_opcode = VAPI_RDMA_READ;
\r
1912 case WR_COMPARE_SWAP: vapi_opcode = VAPI_ATOMIC_CMP_AND_SWP;
\r
1914 case WR_FETCH_ADD: vapi_opcode = VAPI_ATOMIC_FETCH_AND_ADD;
\r
1916 default: vapi_opcode = VAPI_SEND;
\r
1919 if (imm && (VAPI_SEND == vapi_opcode || VAPI_RDMA_WRITE == vapi_opcode)) vapi_opcode++;
\r
1920 return vapi_opcode;
\r
1923 /////////////////////////////////////////////////////////
\r
1924 /////////////////////////////////////////////////////////
\r
1926 mlnx_conv_send_desc(
\r
1927 IN IB_ts_t transport,
\r
1928 IN const ib_send_wr_t *ibal_send_wqe_p,
\r
1929 OUT VAPI_sr_desc_t *vapi_send_desc_p)
\r
1931 boolean_t imm = FALSE;
\r
1933 register VAPI_sg_lst_entry_t *sg_lst_p;
\r
1934 register ib_local_ds_t *ds_array;
\r
1937 switch (transport)
\r
1940 CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("mapping %s QP\n", "UD"));
\r
1942 mlnx_avo_t *avo_p = (mlnx_avo_t *)ibal_send_wqe_p->dgrm.ud.h_av;
\r
1944 vapi_send_desc_p->remote_qp = cl_ntoh32 (ibal_send_wqe_p->dgrm.ud.remote_qp);
\r
1945 vapi_send_desc_p->remote_qkey = cl_ntoh32 (ibal_send_wqe_p->dgrm.ud.remote_qkey);
\r
1947 if (!avo_p || avo_p->mark != E_MARK_AV)
\r
1948 return IB_INVALID_AV_HANDLE;
\r
1950 vapi_send_desc_p->remote_ah = avo_p->h_av; // was ah.hhul
\r
1955 CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("mapping %s QP\n", "RC"));
\r
1956 // vapi_send_desc_p->remote_qp = 0;
\r
1957 // vapi_send_desc_p->remote_qkey = 0;
\r
1958 vapi_send_desc_p->remote_addr = ibal_send_wqe_p->remote_ops.vaddr;
\r
1959 vapi_send_desc_p->r_key = ibal_send_wqe_p->remote_ops.rkey;
\r
1960 vapi_send_desc_p->compare_add = ibal_send_wqe_p->remote_ops.atomic1;
\r
1961 vapi_send_desc_p->swap = ibal_send_wqe_p->remote_ops.atomic2;
\r
1964 default: // TBD: RAW, RD
\r
1965 return IB_UNSUPPORTED;
\r
1968 imm = (0 != (ibal_send_wqe_p->send_opt & IB_SEND_OPT_IMMEDIATE));
\r
1969 vapi_send_desc_p->fence = (MT_bool)(0 != (ibal_send_wqe_p->send_opt & IB_SEND_OPT_FENCE));
\r
1970 vapi_send_desc_p->set_se = (MT_bool)(0 != (ibal_send_wqe_p->send_opt & IB_SEND_OPT_SOLICITED));
\r
1971 vapi_send_desc_p->comp_type = (ibal_send_wqe_p->send_opt & IB_SEND_OPT_SIGNALED) ?
\r
1972 VAPI_SIGNALED : VAPI_UNSIGNALED;
\r
1974 vapi_send_desc_p->id = ibal_send_wqe_p->wr_id;
\r
1975 vapi_send_desc_p->opcode = map_ibal_send_opcode(ibal_send_wqe_p->wr_type, imm);
\r
1978 vapi_send_desc_p->imm_data = cl_ntoh32 (ibal_send_wqe_p->immediate_data);
\r
1980 vapi_send_desc_p->sg_lst_len = ibal_send_wqe_p->num_ds;
\r
1982 sg_lst_p = vapi_send_desc_p->sg_lst_p;
\r
1983 ds_array = ibal_send_wqe_p->ds_array;
\r
1984 for (idx = 0; idx < ibal_send_wqe_p->num_ds; idx++)
\r
1986 sg_lst_p->addr = ds_array->vaddr;
\r
1987 sg_lst_p->len = ds_array->length;
\r
1988 sg_lst_p->lkey = ds_array->lkey;
\r
1989 // CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("post_send (conv) addr %Lx size %d key 0x%x\n", sg_lst_p->addr, sg_lst_p->len, sg_lst_p->lkey));
\r
1993 CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("send: rqpn 0x%x rkey 0x%x\n",
\r
1994 vapi_send_desc_p->remote_qp,
\r
1995 vapi_send_desc_p->remote_qkey));
\r
1996 return IB_SUCCESS;
\r
1999 /////////////////////////////////////////////////////////
\r
2000 /////////////////////////////////////////////////////////
\r
2002 mlnx_conv_recv_desc(
\r
2003 IN const ib_recv_wr_t *ibal_recv_wqe_p,
\r
2004 OUT VAPI_rr_desc_t *vapi_recv_desc_p)
\r
2007 register VAPI_sg_lst_entry_t *sg_lst_p;
\r
2008 register ib_local_ds_t *ds_array;
\r
2010 vapi_recv_desc_p->id = ibal_recv_wqe_p->wr_id;
\r
2011 vapi_recv_desc_p->sg_lst_len = ibal_recv_wqe_p->num_ds;
\r
2012 vapi_recv_desc_p->opcode = VAPI_RECEIVE;
\r
2013 vapi_recv_desc_p->comp_type = VAPI_SIGNALED;
\r
2015 sg_lst_p = vapi_recv_desc_p->sg_lst_p;
\r
2016 ds_array = ibal_recv_wqe_p->ds_array;
\r
2017 for (idx = 0; idx < ibal_recv_wqe_p->num_ds; idx++)
\r
2019 sg_lst_p->addr = ds_array->vaddr;
\r
2020 sg_lst_p->len = ds_array->length;
\r
2021 sg_lst_p->lkey = ds_array->lkey;
\r
2022 // CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("post_recv (conv) addr 0x%Lx size %d key 0x%x\n", sg_lst_p->addr, sg_lst_p->len, sg_lst_p->lkey));
\r
2027 return IB_SUCCESS;
\r
2030 /////////////////////////////////////////////////////////
\r
2031 /////////////////////////////////////////////////////////
\r
2033 vapi_port_cap_to_ibal(
\r
2034 IN IB_port_cap_mask_t vapi_port_cap,
\r
2035 OUT ib_port_cap_t *ibal_port_cap_p)
\r
2037 if (vapi_port_cap & IB_CAP_MASK_IS_CONN_MGMT_SUP)
\r
2038 ibal_port_cap_p->cm = TRUE;
\r
2039 if (vapi_port_cap & IB_CAP_MASK_IS_SNMP_TUNN_SUP)
\r
2040 ibal_port_cap_p->snmp = TRUE;
\r
2041 if (vapi_port_cap & IB_CAP_MASK_IS_DEVICE_MGMT_SUP)
\r
2042 ibal_port_cap_p->dev_mgmt = TRUE;
\r
2043 if (vapi_port_cap & IB_CAP_MASK_IS_VENDOR_CLS_SUP)
\r
2044 ibal_port_cap_p->vend = TRUE;
\r
2045 if (vapi_port_cap & IB_CAP_MASK_IS_SM_DISABLED)
\r
2046 ibal_port_cap_p->sm_disable = TRUE;
\r
2047 if (vapi_port_cap & IB_CAP_MASK_IS_SM)
\r
2048 ibal_port_cap_p->sm = TRUE;
\r
2051 /////////////////////////////////////////////////////////
\r
2052 /////////////////////////////////////////////////////////
\r
2054 mlnx_conv_vapi_hca_cap(
\r
2055 IN HH_hca_dev_t *hca_info_p,
\r
2056 IN VAPI_hca_cap_t *vapi_hca_cap_p,
\r
2057 IN VAPI_hca_port_t *vapi_hca_ports,
\r
2058 OUT ib_ca_attr_t *ca_attr_p)
\r
2060 u_int8_t port_num;
\r
2061 VAPI_hca_port_t *vapi_port_p;
\r
2062 ib_port_attr_t *ibal_port_p;
\r
2064 ca_attr_p->vend_id = hca_info_p->vendor_id;
\r
2065 ca_attr_p->dev_id = (uint16_t)hca_info_p->dev_id;
\r
2066 ca_attr_p->revision = (uint16_t)hca_info_p->hw_ver;
\r
2068 ca_attr_p->ca_guid = *(UNALIGNED64 u_int64_t *)vapi_hca_cap_p->node_guid;
\r
2069 ca_attr_p->num_ports = vapi_hca_cap_p->phys_port_num;
\r
2070 ca_attr_p->max_qps = vapi_hca_cap_p->max_num_qp;
\r
2071 ca_attr_p->max_wrs = vapi_hca_cap_p->max_qp_ous_wr;
\r
2072 ca_attr_p->max_sges = vapi_hca_cap_p->max_num_sg_ent;
\r
2073 ca_attr_p->max_rd_sges = vapi_hca_cap_p->max_num_sg_ent_rd;
\r
2074 ca_attr_p->max_cqs = vapi_hca_cap_p->max_num_cq;
\r
2075 ca_attr_p->max_cqes = vapi_hca_cap_p->max_num_ent_cq;
\r
2076 ca_attr_p->max_pds = vapi_hca_cap_p->max_pd_num;
\r
2077 ca_attr_p->init_regions = vapi_hca_cap_p->max_num_mr;
\r
2078 ca_attr_p->init_windows = vapi_hca_cap_p->max_mw_num;
\r
2079 ca_attr_p->init_region_size = vapi_hca_cap_p->max_mr_size;
\r
2080 ca_attr_p->max_addr_handles = vapi_hca_cap_p->max_ah_num;
\r
2081 ca_attr_p->atomicity = vapi_hca_cap_p->atomic_cap;
\r
2082 ca_attr_p->max_partitions = vapi_hca_cap_p->max_pkeys;
\r
2083 ca_attr_p->max_qp_resp_res = vapi_hca_cap_p->max_qp_ous_rd_atom;
\r
2084 ca_attr_p->max_resp_res = vapi_hca_cap_p->max_res_rd_atom;
\r
2085 ca_attr_p->max_qp_init_depth = vapi_hca_cap_p->max_qp_init_rd_atom;
\r
2086 ca_attr_p->max_ipv6_qps = vapi_hca_cap_p->max_raw_ipv6_qp;
\r
2087 ca_attr_p->max_ether_qps = vapi_hca_cap_p->max_raw_ethy_qp;
\r
2088 ca_attr_p->max_mcast_grps = vapi_hca_cap_p->max_mcast_grp_num;
\r
2089 ca_attr_p->max_mcast_qps = vapi_hca_cap_p->max_total_mcast_qp_attach_num;
\r
2090 ca_attr_p->max_qps_per_mcast_grp = vapi_hca_cap_p->max_mcast_qp_attach_num;
\r
2091 ca_attr_p->local_ack_delay = vapi_hca_cap_p->local_ca_ack_delay;
\r
2092 ca_attr_p->bad_pkey_ctr_support = vapi_hca_cap_p->flags & VAPI_BAD_PKEY_COUNT_CAP;
\r
2093 ca_attr_p->bad_qkey_ctr_support = vapi_hca_cap_p->flags & VAPI_BAD_QKEY_COUNT_CAP;
\r
2094 ca_attr_p->raw_mcast_support = vapi_hca_cap_p->flags & VAPI_RAW_MULTI_CAP;
\r
2095 ca_attr_p->apm_support = vapi_hca_cap_p->flags & VAPI_AUTO_PATH_MIG_CAP;
\r
2096 ca_attr_p->av_port_check = vapi_hca_cap_p->flags & VAPI_UD_AV_PORT_ENFORCE_CAP;
\r
2097 ca_attr_p->change_primary_port = vapi_hca_cap_p->flags & VAPI_CHANGE_PHY_PORT_CAP;
\r
2098 ca_attr_p->modify_wr_depth = vapi_hca_cap_p->flags & VAPI_RESIZE_OUS_WQE_CAP;
\r
2099 ca_attr_p->hw_agents = FALSE; // in the context of IBAL then agent is implemented on the host
\r
2101 ca_attr_p->num_page_sizes = 1;
\r
2102 ca_attr_p->p_page_size[0] = PAGESIZE; // TBD: extract an array of page sizes from HCA cap
\r
2104 for (port_num = 0; port_num < vapi_hca_cap_p->phys_port_num; port_num++)
\r
2106 // Setup port pointers
\r
2107 ibal_port_p = &ca_attr_p->p_port_attr[port_num];
\r
2108 vapi_port_p = &vapi_hca_ports[port_num];
\r
2110 // Port Cabapilities
\r
2111 cl_memclr(&ibal_port_p->cap, sizeof(ib_port_cap_t));
\r
2112 vapi_port_cap_to_ibal(vapi_port_p->capability_mask, &ibal_port_p->cap);
\r
2115 ibal_port_p->port_num = port_num + 1;
\r
2116 ibal_port_p->port_guid = ibal_port_p->p_gid_table[0].unicast.interface_id;
\r
2117 ibal_port_p->lid = cl_ntoh16(vapi_port_p->lid);
\r
2118 ibal_port_p->lmc = vapi_port_p->lmc;
\r
2119 ibal_port_p->max_vls = vapi_port_p->max_vl_num;
\r
2120 ibal_port_p->sm_lid = cl_ntoh16(vapi_port_p->sm_lid);
\r
2121 ibal_port_p->sm_sl = vapi_port_p->sm_sl;
\r
2122 ibal_port_p->link_state = (vapi_port_p->state != 0) ? (uint8_t)vapi_port_p->state : IB_LINK_DOWN;
\r
2123 ibal_port_p->num_gids = vapi_port_p->gid_tbl_len;
\r
2124 ibal_port_p->num_pkeys = vapi_port_p->pkey_tbl_len;
\r
2125 ibal_port_p->pkey_ctr = (uint16_t)vapi_port_p->bad_pkey_counter;
\r
2126 ibal_port_p->qkey_ctr = (uint16_t)vapi_port_p->qkey_viol_counter;
\r
2127 ibal_port_p->max_msg_size = vapi_port_p->max_msg_sz;
\r
2128 ibal_port_p->mtu = (u_int8_t)vapi_port_p->max_mtu;
\r
2130 ibal_port_p->subnet_timeout = 5; // TBD: currently 128us
\r
2131 // ibal_port_p->local_ack_timeout = 3; // TBD: currently ~32 usec
\r
2133 CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("Port %d port_guid 0x%"PRIx64"\n",
\r
2134 ibal_port_p->port_num, ibal_port_p->port_guid));
\r
2139 /////////////////////////////////////////////////////////
\r
2140 /////////////////////////////////////////////////////////
\r
2142 mlnx_get_hca_pkey_tbl(
\r
2143 IN HH_hca_hndl_t hh_hndl,
\r
2144 IN u_int8_t port_num,
\r
2145 IN u_int16_t num_entries,
\r
2146 OUT void* table_p)
\r
2149 ib_net16_t *pkey_p;
\r
2151 if (HH_OK != THH_hob_get_pkey_tbl( hh_hndl, port_num, num_entries, &size, table_p))
\r
2154 pkey_p = (ib_net16_t *)table_p;
\r
2156 CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("port %d pkey0 0x%x pkey1 0x%x\n", port_num, pkey_p[0], pkey_p[1]));
\r
2158 return IB_SUCCESS;
\r
2162 mlnx_get_hca_gid_tbl(
\r
2163 IN HH_hca_hndl_t hh_hndl,
\r
2164 IN u_int8_t port_num,
\r
2165 IN u_int16_t num_entries,
\r
2166 OUT void* table_p)
\r
2170 if (HH_OK != THH_hob_get_gid_tbl( hh_hndl, port_num, num_entries, &size, table_p))
\r
2173 return IB_SUCCESS;
\r