2 * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.
\r
3 * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved.
\r
5 * This software is available to you under the OpenIB.org BSD license
\r
8 * Redistribution and use in source and binary forms, with or
\r
9 * without modification, are permitted provided that the following
\r
10 * conditions are met:
\r
12 * - Redistributions of source code must retain the above
\r
13 * copyright notice, this list of conditions and the following
\r
16 * - Redistributions in binary form must reproduce the above
\r
17 * copyright notice, this list of conditions and the following
\r
18 * disclaimer in the documentation and/or other materials
\r
19 * provided with the distribution.
\r
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
22 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
23 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
\r
24 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
\r
25 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
\r
26 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
\r
27 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
\r
34 #include "hca_data.h"
\r
35 #include "hca_debug.h"
\r
37 static cl_spinlock_t hob_lock;
\r
40 u_int32_t g_mlnx_dbg_lvl = CL_DBG_ERROR ;
\r
42 u_int32_t g_mlnx_dbg_lvl = CL_DBG_ERROR |
\r
47 // MLNX_DBG_DIRECT |
\r
51 u_int32_t g_mlnx_dpc2thread = 0;
\r
53 #ifdef MODULE_LICENSE
\r
54 MODULE_LICENSE("Proprietary");
\r
57 MODULE_PARM(g_mlnx_dbg_lvl, "i");
\r
58 MODULE_PARM(g_mlnx_dpc2thread, "i");
\r
60 cl_qlist_t mlnx_hca_list;
\r
61 //mlnx_hca_t mlnx_hca_array[MLNX_MAX_HCA];
\r
62 //uint32_t mlnx_num_hca = 0;
\r
64 mlnx_hob_t mlnx_hob_array[MLNX_NUM_HOBKL]; // kernel HOB - one per HCA (cmdif access)
\r
66 mlnx_hobul_t *mlnx_hobul_array[MLNX_NUM_HOBUL]; // kernel HOBUL - one per HCA (kar access)
\r
68 /* User verb library name */
\r
69 /* TODO: Move to linux osd file.
\r
70 char mlnx_uvp_lib_name[MAX_LIB_NAME] = {"libmlnx_uvp.so"};
\r
75 IN cl_async_proc_item_t *async_item_p );
\r
82 IN void *pfn_comp_cb,
\r
87 IN cl_async_proc_item_t *async_item_p );
\r
90 // ### Callback Interface
\r
93 IN HH_hca_hndl_t hh_hndl,
\r
94 IN HH_cq_hndl_t hh_cq,
\r
95 IN void *private_data);
\r
99 IN HH_hca_hndl_t hh_hndl,
\r
100 IN HH_event_record_t *hh_er_p,
\r
101 IN void *private_data);
\r
103 /////////////////////////////////////////////////////////
\r
105 /////////////////////////////////////////////////////////
\r
108 IN mlnx_hca_t *p_hca )
\r
110 cl_spinlock_acquire( &hob_lock );
\r
111 cl_qlist_insert_tail( &mlnx_hca_list, &p_hca->list_item );
\r
112 cl_spinlock_release( &hob_lock );
\r
117 IN mlnx_hca_t *p_hca )
\r
119 cl_spinlock_acquire( &hob_lock );
\r
120 cl_qlist_remove_item( &mlnx_hca_list, &p_hca->list_item );
\r
121 cl_spinlock_release( &hob_lock );
\r
125 mlnx_hca_from_guid(
\r
126 IN ib_net64_t guid )
\r
128 cl_list_item_t *p_item;
\r
129 mlnx_hca_t *p_hca = NULL;
\r
131 cl_spinlock_acquire( &hob_lock );
\r
132 p_item = cl_qlist_head( &mlnx_hca_list );
\r
133 while( p_item != cl_qlist_end( &mlnx_hca_list ) )
\r
135 p_hca = PARENT_STRUCT( p_item, mlnx_hca_t, list_item );
\r
136 if( p_hca->guid == guid )
\r
138 p_item = cl_qlist_next( p_item );
\r
141 cl_spinlock_release( &hob_lock );
\r
146 mlnx_hca_from_hh_hndl(
\r
147 IN HH_hca_hndl_t hh_hndl )
\r
149 cl_list_item_t *p_item;
\r
150 mlnx_hca_t *p_hca = NULL;
\r
152 cl_spinlock_acquire( &hob_lock );
\r
153 p_item = cl_qlist_head( &mlnx_hca_list );
\r
154 while( p_item != cl_qlist_end( &mlnx_hca_list ) )
\r
156 p_hca = PARENT_STRUCT( p_item, mlnx_hca_t, list_item );
\r
157 if( p_hca->hh_hndl == hh_hndl )
\r
159 p_item = cl_qlist_next( p_item );
\r
162 cl_spinlock_release( &hob_lock );
\r
169 mlnx_names_from_guid(
\r
170 IN ib_net64_t guid,
\r
171 OUT char **hca_name_p,
\r
172 OUT char **dev_name_p)
\r
176 if (!hca_name_p) return;
\r
177 if (!dev_name_p) return;
\r
179 for (idx = 0; idx < mlnx_num_hca; idx++)
\r
181 if (mlnx_hca_array[idx].ifx.guid == guid)
\r
183 *hca_name_p = mlnx_hca_array[idx].hca_name_p;
\r
184 *dev_name_p = mlnx_hca_array[idx].dev_name_p;
\r
190 /////////////////////////////////////////////////////////
\r
192 /////////////////////////////////////////////////////////
\r
194 mlnx_hobs_init( void )
\r
198 cl_qlist_init( &mlnx_hca_list );
\r
200 for (idx = 0; idx < MLNX_NUM_HOBKL; idx++)
\r
202 mlnx_hob_array[idx].hh_hndl = NULL;
\r
203 mlnx_hob_array[idx].comp_cb_p = NULL;
\r
204 mlnx_hob_array[idx].async_cb_p = NULL;
\r
205 mlnx_hob_array[idx].ca_context = NULL;
\r
206 mlnx_hob_array[idx].async_proc_mgr_p = NULL;
\r
207 mlnx_hob_array[idx].cl_device_h = NULL;
\r
208 // mlnx_hob_array[idx].port_lmc_p = NULL;
\r
209 mlnx_hob_array[idx].index = idx;
\r
210 mlnx_hob_array[idx].mark = E_MARK_INVALID;
\r
212 return cl_spinlock_init( &hob_lock );
\r
215 /////////////////////////////////////////////////////////
\r
216 /////////////////////////////////////////////////////////
\r
219 IN mlnx_hca_t *p_hca,
\r
220 OUT mlnx_hob_t **hob_pp)
\r
223 ib_api_status_t status = IB_ERROR;
\r
224 mlnx_cache_t *p_cache;
\r
226 p_cache = (mlnx_cache_t*)cl_pzalloc( sizeof(mlnx_cache_t) * 2 );
\r
228 return IB_INSUFFICIENT_MEMORY;
\r
230 cl_spinlock_acquire(&hob_lock);
\r
231 for (idx = 0; idx < MLNX_NUM_HOBKL; idx++)
\r
233 if (!mlnx_hob_array[idx].hh_hndl)
\r
235 mlnx_hob_array[idx].hh_hndl = p_hca->hh_hndl;
\r
236 mlnx_hob_array[idx].mark = E_MARK_CA;
\r
237 if (hob_pp) *hob_pp = &mlnx_hob_array[idx];
\r
238 status = IB_SUCCESS;
\r
242 cl_spinlock_release(&hob_lock);
\r
244 if (IB_SUCCESS == status)
\r
245 (*hob_pp)->cache = p_cache;
\r
247 cl_free( p_cache );
\r
252 /////////////////////////////////////////////////////////
\r
253 /////////////////////////////////////////////////////////
\r
256 IN mlnx_hob_t *hob_p,
\r
257 IN ci_completion_cb_t comp_cb_p,
\r
258 IN ci_async_event_cb_t async_cb_p,
\r
259 IN const void* const ib_context)
\r
261 cl_status_t cl_status;
\r
264 CL_ASSERT((hob_p - mlnx_hob_array) < MLNX_NUM_HOBKL);
\r
266 // Setup the callbacks
\r
267 if (!hob_p->async_proc_mgr_p)
\r
269 hob_p->async_proc_mgr_p = cl_malloc( sizeof( cl_async_proc_t ) );
\r
270 if( !hob_p->async_proc_mgr_p )
\r
272 return IB_INSUFFICIENT_MEMORY;
\r
274 cl_async_proc_construct( hob_p->async_proc_mgr_p );
\r
275 cl_status = cl_async_proc_init( hob_p->async_proc_mgr_p, MLNX_NUM_CB_THR, "CBthread" );
\r
276 if( cl_status != CL_SUCCESS )
\r
278 cl_async_proc_destroy( hob_p->async_proc_mgr_p );
\r
279 cl_free(hob_p->async_proc_mgr_p);
\r
280 hob_p->async_proc_mgr_p = NULL;
\r
281 return IB_INSUFFICIENT_RESOURCES;
\r
285 if (hob_p->hh_hndl)
\r
287 THH_hob_set_async_eventh(hob_p->hh_hndl,
\r
289 &hob_p->index); // This is the context our CB wants to receive
\r
290 THH_hob_set_comp_eventh( hob_p->hh_hndl,
\r
292 &hob_p->index); // This is the context our CB wants to receive
\r
293 hob_p->comp_cb_p = comp_cb_p;
\r
294 hob_p->async_cb_p = async_cb_p;
\r
295 hob_p->ca_context = ib_context; // This is the context our CB forwards to IBAL
\r
296 CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("CL: hca_idx %d context 0x%p\n", hob_p - mlnx_hob_array, ib_context));
\r
302 /////////////////////////////////////////////////////////
\r
303 /////////////////////////////////////////////////////////
\r
305 mlnx_hobs_get_context(
\r
306 IN mlnx_hob_t *hob_p,
\r
307 OUT void **context_p)
\r
310 CL_ASSERT((hob_p - mlnx_hob_array) < MLNX_NUM_HOBKL);
\r
312 if (hob_p->hh_hndl)
\r
314 if (context_p) *context_p = &hob_p->index;
\r
320 /////////////////////////////////////////////////////////
\r
321 /////////////////////////////////////////////////////////
\r
324 IN mlnx_hob_t *hob_p)
\r
326 cl_async_proc_t *p_async_proc;
\r
327 mlnx_cache_t *p_cache;
\r
330 CL_ASSERT((hob_p - mlnx_hob_array) < MLNX_NUM_HOBKL);
\r
332 cl_spinlock_acquire( &hob_lock );
\r
334 hob_p->mark = E_MARK_INVALID;
\r
336 p_async_proc = hob_p->async_proc_mgr_p;
\r
337 hob_p->async_proc_mgr_p = NULL;
\r
339 p_cache = hob_p->cache;
\r
340 hob_p->cache = NULL;
\r
342 hob_p->hh_hndl = NULL;
\r
343 hob_p->comp_cb_p = NULL;
\r
344 hob_p->async_cb_p = NULL;
\r
345 hob_p->ca_context = NULL;
\r
346 hob_p->cl_device_h = NULL;
\r
348 cl_spinlock_release( &hob_lock );
\r
352 cl_async_proc_destroy( p_async_proc );
\r
353 cl_free( p_async_proc );
\r
357 cl_free( p_cache );
\r
359 CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("CL: hobs_remove idx %d hh_hndl 0x%p\n", hob_p - mlnx_hob_array, hob_p->hh_hndl));
\r
362 /////////////////////////////////////////////////////////
\r
363 /////////////////////////////////////////////////////////
\r
366 IN HH_hca_hndl_t hndl,
\r
367 OUT mlnx_hob_t **hca_p)
\r
374 cl_spinlock_acquire( &hob_lock );
\r
375 for (idx = 0; idx < MLNX_NUM_HOBKL; idx++)
\r
377 if (hndl == mlnx_hob_array[idx].hh_hndl)
\r
379 *hca_p = &mlnx_hob_array[idx];
\r
380 cl_spinlock_release( &hob_lock );
\r
384 cl_spinlock_release( &hob_lock );
\r
388 /////////////////////////////////////////////////////////
\r
389 /////////////////////////////////////////////////////////
\r
391 mlnx_hobs_get_handle(
\r
392 IN mlnx_hob_t *hob_p,
\r
393 OUT HH_hca_hndl_t *hndl_p)
\r
396 CL_ASSERT((hob_p - mlnx_hob_array) < MLNX_NUM_HOBKL);
\r
399 *hndl_p = hob_p->hh_hndl;
\r
402 /////////////////////////////////////////////////////////
\r
403 /////////////////////////////////////////////////////////
\r
405 mlnx_hobs_get_hobul(
\r
406 IN mlnx_hob_t *hob_p)
\r
409 if ((hob_p - mlnx_hob_array) >= MLNX_NUM_HOBKL)
\r
412 return mlnx_hobul_array[hob_p->index];
\r
416 static int priv_ceil_log2(u_int32_t n)
\r
420 for (shift = 31; shift >0; shift--)
\r
421 if (n & (1 << shift)) break;
\r
423 if (((unsigned)1 << shift) < n) shift++;
\r
428 /////////////////////////////////////////////////////////
\r
430 /////////////////////////////////////////////////////////
\r
433 IN mlnx_hob_t *hob_p,
\r
434 IN HH_hca_hndl_t hh_hndl,
\r
435 IN void *resources_p)
\r
437 mlnx_hobul_t *hobul_p;
\r
438 HH_hca_dev_t *hca_ul_info;
\r
439 ib_api_status_t status;
\r
440 VAPI_hca_cap_t hca_caps;
\r
442 #if MLNX_COMP_MODEL == 1
\r
443 static uint32_t proc_num = 0;
\r
447 CL_ASSERT((hob_p - mlnx_hob_array) < MLNX_NUM_HOBKL);
\r
449 if (NULL == (hobul_p = cl_zalloc( sizeof(mlnx_hobul_t))))
\r
450 return IB_INSUFFICIENT_MEMORY;
\r
452 // The following will NULL all pointers/sizes (used in cleanup)
\r
453 // cl_memclr(hobul_p, sizeof (mlnx_hobul_t));
\r
455 hobul_p->hh_hndl = hh_hndl;
\r
457 if (HH_OK != THHUL_hob_create(resources_p, hh_hndl->dev_id, &hobul_p->hhul_hndl))
\r
459 status = IB_INSUFFICIENT_RESOURCES;
\r
463 hca_ul_info = (HH_hca_dev_t *)hh_hndl;
\r
467 hobul_p->vendor_id = hca_ul_info->vendor_id;
\r
468 hobul_p->device_id = hca_ul_info->dev_id;
\r
469 hobul_p->hca_ul_resources_p = resources_p;
\r
470 hobul_p->cq_ul_resources_sz = hca_ul_info->cq_ul_resources_sz;
\r
471 hobul_p->qp_ul_resources_sz = hca_ul_info->qp_ul_resources_sz;
\r
472 hobul_p->pd_ul_resources_sz = hca_ul_info->pd_ul_resources_sz;
\r
475 if (HH_OK != THH_hob_query(hh_hndl, &hca_caps))
\r
481 hobul_p->cq_idx_mask = MASK32(priv_ceil_log2(hca_caps.max_num_cq));
\r
482 hobul_p->qp_idx_mask = MASK32(priv_ceil_log2(hca_caps.max_num_qp)); // Currently mask = 0xFFFF
\r
483 hobul_p->max_pd = MASK32(priv_ceil_log2(hca_caps.max_pd_num)) + 1;
\r
484 hobul_p->max_cq = hobul_p->cq_idx_mask + 1;
\r
485 hobul_p->max_qp = hobul_p->qp_idx_mask + 1;
\r
487 CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("CL: sizes cq 0%x qp 0%x pd 0%x\n", hca_caps.max_num_cq, hca_caps.max_num_qp, hca_caps.max_pd_num));
\r
489 /* create and initialize the data stucture for CQs */
\r
490 hobul_p->cq_info_tbl = cl_zalloc(hobul_p->max_cq * sizeof (cq_info_t));
\r
492 /* create and initialize the data stucture for QPs */
\r
493 hobul_p->qp_info_tbl = cl_zalloc(hobul_p->max_qp * sizeof (qp_info_t));
\r
495 /* create and initialize the data stucture for PDs */
\r
496 hobul_p->pd_info_tbl = cl_zalloc(hobul_p->max_pd * sizeof (pd_info_t));
\r
498 CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("CL: alloc failed? cq=%d qp=%d pd=%d\n",
\r
499 !hobul_p->cq_info_tbl, !hobul_p->qp_info_tbl, !hobul_p->pd_info_tbl));
\r
501 if (!hobul_p->pd_info_tbl ||
\r
502 !hobul_p->qp_info_tbl ||
\r
503 !hobul_p->cq_info_tbl)
\r
505 status = IB_INSUFFICIENT_MEMORY;
\r
509 /* Initialize all mutexes. */
\r
510 for( i = 0; i < hobul_p->max_cq; i++ )
\r
512 cl_mutex_construct( &hobul_p->cq_info_tbl[i].mutex );
\r
513 #if MLNX_COMP_MODEL
\r
514 KeInitializeDpc( &hobul_p->cq_info_tbl[i].dpc,
\r
515 mlnx_comp_dpc, &hobul_p->cq_info_tbl[i] );
\r
516 #if MLNX_COMP_MODEL == 1
\r
517 KeSetTargetProcessorDpc( &hobul_p->cq_info_tbl[i].dpc,
\r
518 (CCHAR)(proc_num++ % cl_proc_count()) );
\r
519 #endif /* MLNX_COMP_MODEL == 1 */
\r
520 #endif /* MLNX_COMP_MODEL */
\r
523 for( i = 0; i < hobul_p->max_qp; i++ )
\r
524 cl_mutex_construct( &hobul_p->qp_info_tbl[i].mutex );
\r
526 for( i = 0; i < hobul_p->max_pd; i++ )
\r
527 cl_mutex_construct( &hobul_p->pd_info_tbl[i].mutex );
\r
529 for( i = 0; i < hobul_p->max_cq; i++ )
\r
531 if( cl_mutex_init( &hobul_p->cq_info_tbl[i].mutex ) != CL_SUCCESS )
\r
538 for( i = 0; i < hobul_p->max_qp; i++ )
\r
540 if( cl_mutex_init( &hobul_p->qp_info_tbl[i].mutex ) != CL_SUCCESS )
\r
547 for( i = 0; i < hobul_p->max_pd; i++ )
\r
549 if( cl_mutex_init( &hobul_p->pd_info_tbl[i].mutex ) != CL_SUCCESS )
\r
556 hobul_p->log2_mpt_size = ((THH_hca_ul_resources_t *)resources_p)->log2_mpt_size;
\r
557 CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("log2_mpt_size = %d\n", hobul_p->log2_mpt_size));
\r
559 cl_spinlock_acquire(&hob_lock);
\r
560 mlnx_hobul_array[hob_p->index] = hobul_p;
\r
561 cl_spinlock_release(&hob_lock);
\r
566 if (hobul_p->hhul_hndl) THHUL_hob_destroy( hobul_p->hhul_hndl );
\r
567 if (hobul_p->pd_info_tbl)
\r
569 for( i = 0; i < hobul_p->max_pd; i++ )
\r
570 cl_mutex_destroy( &hobul_p->pd_info_tbl[i].mutex );
\r
571 cl_free(hobul_p->pd_info_tbl);
\r
573 if (hobul_p->qp_info_tbl)
\r
575 for( i = 0; i < hobul_p->max_qp; i++ )
\r
576 cl_mutex_destroy( &hobul_p->qp_info_tbl[i].mutex );
\r
577 cl_free(hobul_p->qp_info_tbl);
\r
579 if (hobul_p->cq_info_tbl)
\r
581 for( i = 0; i < hobul_p->max_cq; i++ )
\r
582 cl_mutex_destroy( &hobul_p->cq_info_tbl[i].mutex );
\r
583 cl_free(hobul_p->cq_info_tbl);
\r
585 if (hobul_p) cl_free( hobul_p);
\r
589 /////////////////////////////////////////////////////////
\r
590 /////////////////////////////////////////////////////////
\r
593 IN mlnx_hob_t *hob_p,
\r
594 OUT void **resources_p )
\r
596 mlnx_hobul_t *hobul_p;
\r
599 CL_ASSERT((hob_p - mlnx_hob_array) < MLNX_NUM_HOBKL);
\r
601 hobul_p = mlnx_hobul_array[hob_p->index];
\r
603 if (hobul_p && resources_p)
\r
605 *resources_p = hobul_p->hca_ul_resources_p;
\r
609 /////////////////////////////////////////////////////////
\r
610 /////////////////////////////////////////////////////////
\r
613 IN mlnx_hob_t *hob_p)
\r
615 mlnx_hobul_t *hobul_p;
\r
619 CL_ASSERT((hob_p - mlnx_hob_array) < MLNX_NUM_HOBKL);
\r
621 cl_spinlock_acquire(&hob_lock);
\r
622 hobul_p = mlnx_hobul_array[hob_p->index];
\r
623 mlnx_hobul_array[hob_p->index] = NULL;
\r
624 cl_spinlock_release(&hob_lock);
\r
626 if (!hobul_p) return;
\r
628 if (hobul_p->hhul_hndl) THHUL_hob_destroy( hobul_p->hhul_hndl );
\r
629 if (hobul_p->pd_info_tbl)
\r
631 for( i = 0; i < hobul_p->max_pd; i++ )
\r
632 cl_mutex_destroy( &hobul_p->pd_info_tbl[i].mutex );
\r
633 cl_free(hobul_p->pd_info_tbl);
\r
635 if (hobul_p->qp_info_tbl)
\r
637 for( i = 0; i < hobul_p->max_qp; i++ )
\r
638 cl_mutex_destroy( &hobul_p->qp_info_tbl[i].mutex );
\r
639 cl_free(hobul_p->qp_info_tbl);
\r
641 if (hobul_p->cq_info_tbl)
\r
643 for( i = 0; i < hobul_p->max_cq; i++ )
\r
645 KeRemoveQueueDpc( &hobul_p->cq_info_tbl[i].dpc );
\r
646 cl_mutex_destroy( &hobul_p->cq_info_tbl[i].mutex );
\r
648 cl_free(hobul_p->cq_info_tbl);
\r
650 if (hobul_p) cl_free( hobul_p);
\r
653 /////////////////////////////////////////////////////////
\r
655 /////////////////////////////////////////////////////////
\r
658 mlnx_map_vapi_event_type(
\r
659 IN unsigned event_id,
\r
660 OUT ENUM_EVENT_CLASS *event_class_p)
\r
664 case VAPI_QP_PATH_MIGRATED:
\r
665 if (event_class_p) *event_class_p = E_EV_QP;
\r
666 return IB_AE_QP_APM;
\r
668 case VAPI_QP_COMM_ESTABLISHED:
\r
669 if (event_class_p) *event_class_p = E_EV_QP;
\r
670 return IB_AE_QP_COMM;
\r
672 case VAPI_SEND_QUEUE_DRAINED:
\r
673 if (event_class_p) *event_class_p = E_EV_QP;
\r
674 return IB_AE_SQ_DRAINED;
\r
676 case VAPI_CQ_ERROR:
\r
677 if (event_class_p) *event_class_p = E_EV_CQ;
\r
678 return IB_AE_CQ_ERROR;
\r
680 case VAPI_LOCAL_WQ_INV_REQUEST_ERROR:
\r
681 if (event_class_p) *event_class_p = E_EV_QP;
\r
682 return IB_AE_WQ_REQ_ERROR;
\r
684 case VAPI_LOCAL_WQ_ACCESS_VIOL_ERROR:
\r
685 if (event_class_p) *event_class_p = E_EV_QP;
\r
686 return IB_AE_WQ_ACCESS_ERROR;
\r
688 case VAPI_LOCAL_WQ_CATASTROPHIC_ERROR:
\r
689 if (event_class_p) *event_class_p = E_EV_QP;
\r
690 return IB_AE_QP_FATAL;
\r
692 case VAPI_PATH_MIG_REQ_ERROR:
\r
693 if (event_class_p) *event_class_p = E_EV_QP;
\r
694 return IB_AE_QP_APM_ERROR;
\r
696 case VAPI_LOCAL_CATASTROPHIC_ERROR:
\r
697 if (event_class_p) *event_class_p = E_EV_CA;
\r
698 return IB_AE_LOCAL_FATAL;
\r
700 case VAPI_PORT_ERROR:
\r
702 * In tavor_hca\src\Hca\hcahal\tavor\eventp\event_irqh.c:
\r
703 * TAVOR_IF_EV_TYPE_PORT_ERR maps one of two port events:
\r
704 * - TAVOR_IF_SUB_EV_PORT_DOWN
\r
705 * - TAVOR_IF_SUB_EV_PORT_UP
\r
707 * These map to (respectively)
\r
708 * - VAPI_PORT_ERROR
\r
709 * - VAPI_PORT_ACTIVE
\r
711 if (event_class_p) *event_class_p = E_EV_CA;
\r
712 return IB_AE_PORT_DOWN; /* INIT, ARMED, DOWN */
\r
714 case VAPI_PORT_ACTIVE:
\r
715 if (event_class_p) *event_class_p = E_EV_CA;
\r
716 return IB_AE_PORT_ACTIVE; /* ACTIVE STATE */
\r
718 case VAPI_CLIENT_REREGISTER:
\r
719 if (event_class_p) *event_class_p = E_EV_CA;
\r
720 return IB_AE_CLIENT_REREGISTER; /* ACTIVE STATE */
\r
723 CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("FAIL to map %d (last known %d) returning %d\n",
\r
724 event_id, VAPI_PORT_ACTIVE, IB_AE_LOCAL_FATAL));
\r
725 if (event_class_p) *event_class_p = E_EV_CA;
\r
726 return IB_AE_LOCAL_FATAL;
\r
731 mlnx_conv_vapi_event(
\r
732 IN HH_event_record_t *hh_event_p,
\r
733 IN ib_event_rec_t *ib_event_p,
\r
734 OUT ENUM_EVENT_CLASS *event_class_p)
\r
737 // ib_event_p->context is handled by the caller
\r
739 ib_event_p->type = mlnx_map_vapi_event_type(hh_event_p->etype, event_class_p);
\r
741 // no traps currently generated
\r
742 // ib_event_p->trap_info.lid = ;
\r
743 // ib_event_p->trap_info.port_guid = ;
\r
744 // ib_event_p->trap_info.port_num = hh_er;
\r
749 IN HH_hca_hndl_t hh_hndl,
\r
750 IN HH_event_record_t *hh_er_p,
\r
751 IN void *private_data)
\r
756 mlnx_cb_data_t cb_data;
\r
757 mlnx_cb_data_t *cb_data_p;
\r
759 CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("ASYNC CB %p (0x%x)\n",
\r
760 private_data, (private_data) ? *(u_int32_t *)private_data : 0xB5));
\r
762 if (!private_data || !hh_er_p) return;
\r
764 obj_idx = *(u_int32_t *)private_data;
\r
765 if (obj_idx >= MLNX_NUM_HOBKL) return;
\r
767 hob_p = mlnx_hob_array + obj_idx;
\r
769 // g_mlnx_dpc2thread will be initialized as a module paramter (default - disabled(0))
\r
770 if (g_mlnx_dpc2thread)
\r
772 cb_data_p = cl_malloc(sizeof(mlnx_cb_data_t));
\r
773 if (!cb_data_p) return;
\r
775 cb_data_p->hh_hndl = hh_hndl;
\r
776 cb_data_p->private_data = private_data;
\r
777 cl_memcpy(&cb_data_p->hh_er, hh_er_p, sizeof(HH_event_record_t));
\r
778 cb_data_p->async_item.pfn_callback = mlnx_async_dpc;
\r
779 cl_async_proc_queue(hob_p->async_proc_mgr_p, &cb_data_p->async_item );
\r
782 cb_data_p = &cb_data;
\r
784 cb_data_p->hh_hndl = hh_hndl;
\r
785 cb_data_p->private_data = private_data;
\r
786 cl_memcpy(&cb_data_p->hh_er, hh_er_p, sizeof(HH_event_record_t));
\r
787 mlnx_async_dpc( &cb_data_p->async_item );
\r
793 IN cl_async_proc_item_t *async_item_p )
\r
795 HH_event_record_t *hh_er_p;
\r
798 mlnx_hobul_t *hobul_p;
\r
799 mlnx_cb_data_t *cb_data_p;
\r
801 ENUM_EVENT_CLASS event_class;
\r
802 ib_event_rec_t event_r;
\r
804 CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("ASYNC DPC %p\n", async_item_p));
\r
806 cb_data_p = PARENT_STRUCT( async_item_p, mlnx_cb_data_t, async_item );
\r
808 if (!cb_data_p) return;
\r
810 hh_er_p = &cb_data_p->hh_er;
\r
811 obj_idx = *(u_int32_t *)cb_data_p->private_data;
\r
812 hob_p = mlnx_hob_array + obj_idx;
\r
813 hobul_p = mlnx_hobul_array[obj_idx];
\r
815 CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("ASYNC DPC type %d ca_context %p\n",
\r
816 hh_er_p->etype, hob_p->ca_context));
\r
821 !hob_p->async_cb_p)
\r
826 cl_memclr(&event_r, sizeof(event_r));
\r
827 mlnx_conv_vapi_event(hh_er_p, &event_r, &event_class);
\r
829 switch(event_class)
\r
832 event_r.context = (void *)hob_p->ca_context;
\r
837 obj_idx = hh_er_p->event_modifier.qpn & hobul_p->qp_idx_mask;
\r
838 if (obj_idx < hobul_p->max_qp)
\r
839 event_r.context = (void *)hobul_p->qp_info_tbl[obj_idx].qp_context;
\r
842 CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("ASYNC DPC bad qpn 0x%x max 0x%x\n", obj_idx, hobul_p->max_qp));
\r
850 obj_idx = hh_er_p->event_modifier.cq & hobul_p->cq_idx_mask;
\r
851 if (obj_idx < hobul_p->max_cq)
\r
852 event_r.context = (void *)hobul_p->cq_info_tbl[obj_idx].cq_context;
\r
855 CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("ASYNC DPC bad cqn 0x%x max 0x%x\n", obj_idx, hobul_p->max_cq));
\r
863 // CL_ASSERT(0); // This shouldn't happen
\r
864 CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("ASYNC DPC unknown event_class 0x%x\n", event_class));
\r
868 // Call the registered CB
\r
869 (*hob_p->async_cb_p)(&event_r);
\r
872 if (g_mlnx_dpc2thread)
\r
874 cl_free(cb_data_p);
\r
878 /////////////////////////////////////////////////////////
\r
879 /////////////////////////////////////////////////////////
\r
882 IN HH_hca_hndl_t hh_hndl,
\r
883 IN HH_cq_hndl_t hh_cq,
\r
884 IN void *private_data)
\r
886 #if MLNX_COMP_MODEL
\r
890 mlnx_hobul_t *hobul_p;
\r
891 #if MLNX_COMP_MODEL == 2
\r
892 static uint32_t proc_num = 0;
\r
895 CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("COMP CB cq 0x%x %p\n", hh_cq, private_data));
\r
897 UNUSED_PARAM( hh_hndl );
\r
899 hca_idx = *(u_int32_t *)private_data;
\r
900 hob_p = mlnx_hob_array + hca_idx;
\r
901 hobul_p = mlnx_hobul_array[hca_idx];
\r
902 cq_num = hh_cq & hobul_p->cq_idx_mask;
\r
904 if (NULL != hob_p && NULL != hobul_p &&
\r
905 hob_p->hh_hndl && hob_p->comp_cb_p)
\r
907 if (cq_num < hobul_p->max_cq)
\r
909 #if MLNX_COMP_MODEL == 2
\r
910 KeSetTargetProcessorDpc( &hobul_p->cq_info_tbl[cq_num].dpc,
\r
911 (CCHAR)(proc_num++ % cl_proc_count()) );
\r
912 #endif /* MLNX_COMP_MODEL == 2 */
\r
913 KeInsertQueueDpc( &hobul_p->cq_info_tbl[cq_num].dpc,
\r
918 HCA_TRACE( HCA_DBG_ERROR, ("CQ index out of range!!!\n") );
\r
921 #else /* MLNX_COMP_MODEL */
\r
925 mlnx_cb_data_t cb_data;
\r
926 mlnx_cb_data_t *cb_data_p;
\r
928 CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("COMP CB cq 0x%x %p\n", hh_cq, private_data));
\r
930 if (!private_data) return;
\r
932 obj_idx = *(u_int32_t *)private_data;
\r
933 hob_p = mlnx_hob_array + obj_idx;
\r
934 if (!hob_p) return;
\r
936 if (g_mlnx_dpc2thread)
\r
938 cb_data_p = cl_malloc(sizeof(mlnx_cb_data_t));
\r
939 if (!cb_data_p) return;
\r
941 cb_data_p->hh_hndl = hh_hndl;
\r
942 cb_data_p->hh_cq = hh_cq;
\r
943 cb_data_p->private_data = private_data;
\r
945 cb_data_p->async_item.pfn_callback = mlnx_comp_dpc;
\r
947 // Report completion through async_proc
\r
948 cl_async_proc_queue(hob_p->async_proc_mgr_p, &cb_data_p->async_item );
\r
952 cb_data_p = &cb_data;
\r
954 cb_data_p->hh_hndl = hh_hndl;
\r
955 cb_data_p->hh_cq = hh_cq;
\r
956 cb_data_p->private_data = private_data;
\r
958 // Report completion directly from DPC (verbs should NOT sleep)
\r
959 mlnx_comp_dpc( &cb_data_p->async_item );
\r
961 #endif /* MLNX_COMP_MODEL */
\r
964 #if MLNX_COMP_MODEL
\r
972 mlnx_hob_t *hob_p = (mlnx_hob_t*)arg1;
\r
973 UNUSED_PARAM( p_dpc );
\r
974 UNUSED_PARAM( unused );
\r
976 hob_p->comp_cb_p( (void*)((cq_info_t*)context)->cq_context );
\r
978 #else /* MLNX_COMP_MODEL */
\r
981 IN cl_async_proc_item_t *async_item_p )
\r
986 mlnx_hobul_t *hobul_p;
\r
987 mlnx_cb_data_t *cb_data_p;
\r
989 CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("COMP DPC %p\n", async_item_p));
\r
991 cb_data_p = PARENT_STRUCT( async_item_p, mlnx_cb_data_t, async_item );
\r
992 if (!cb_data_p) return;
\r
994 hca_idx = *(u_int32_t *)cb_data_p->private_data;
\r
995 hob_p = mlnx_hob_array + hca_idx;
\r
996 hobul_p = mlnx_hobul_array[hca_idx];
\r
997 cq_num = (u_int32_t)cb_data_p->hh_cq & hobul_p->cq_idx_mask;
\r
999 if (NULL != hob_p && NULL != hobul_p &&
\r
1000 hob_p->hh_hndl && hob_p->comp_cb_p)
\r
1002 if (cq_num < hobul_p->max_cq)
\r
1004 (*hob_p->comp_cb_p)((void *)hobul_p->cq_info_tbl[cq_num].cq_context);
\r
1008 if (g_mlnx_dpc2thread)
\r
1010 cl_free(cb_data_p);
\r
1013 #endif /* MLNX_COMP_MODEL */
\r
1015 // ### Conversions
\r
1017 /////////////////////////////////////////////////////////
\r
1018 /////////////////////////////////////////////////////////
\r
1021 IN ib_access_t ibal_acl)
\r
1023 VAPI_mrw_acl_t vapi_acl = 0;
\r
1025 if (ibal_acl & IB_AC_RDMA_READ) vapi_acl |= VAPI_EN_REMOTE_READ;
\r
1026 if (ibal_acl & IB_AC_RDMA_WRITE) vapi_acl |= VAPI_EN_REMOTE_WRITE;
\r
1027 if (ibal_acl & IB_AC_ATOMIC) vapi_acl |= VAPI_EN_REMOTE_ATOM;
\r
1028 if (ibal_acl & IB_AC_LOCAL_WRITE) vapi_acl |= VAPI_EN_LOCAL_WRITE;
\r
1029 if (ibal_acl & IB_AC_MW_BIND) vapi_acl |= VAPI_EN_MEMREG_BIND;
\r
1034 /////////////////////////////////////////////////////////
\r
1035 /////////////////////////////////////////////////////////
\r
1038 IN VAPI_mrw_acl_t vapi_acl)
\r
1040 ib_access_t ibal_acl = 0;
\r
1042 if (vapi_acl & VAPI_EN_REMOTE_READ) ibal_acl |= IB_AC_RDMA_READ;
\r
1043 if (vapi_acl & VAPI_EN_REMOTE_WRITE) ibal_acl |= IB_AC_RDMA_WRITE;
\r
1044 if (vapi_acl & VAPI_EN_REMOTE_ATOM) ibal_acl |= IB_AC_ATOMIC;
\r
1045 if (vapi_acl & VAPI_EN_LOCAL_WRITE) ibal_acl |= IB_AC_LOCAL_WRITE;
\r
1046 if (vapi_acl & VAPI_EN_MEMREG_BIND) ibal_acl |= IB_AC_MW_BIND;
\r
1051 /////////////////////////////////////////////////////////
\r
1052 /////////////////////////////////////////////////////////
\r
1053 static VAPI_rdma_atom_acl_t
\r
1055 IN ib_access_t ibal_acl)
\r
1057 VAPI_rdma_atom_acl_t vapi_qp_acl = 0;
\r
1059 if (ibal_acl & IB_AC_RDMA_WRITE) vapi_qp_acl |= VAPI_EN_REM_WRITE;
\r
1060 if (ibal_acl & IB_AC_RDMA_READ) vapi_qp_acl |= VAPI_EN_REM_READ;
\r
1061 if (ibal_acl & IB_AC_ATOMIC) vapi_qp_acl |= VAPI_EN_REM_ATOMIC_OP;
\r
1063 return vapi_qp_acl;
\r
1067 /////////////////////////////////////////////////////////
\r
1068 /////////////////////////////////////////////////////////
\r
1069 static ib_access_t
\r
1071 IN VAPI_rdma_atom_acl_t vapi_qp_acl)
\r
1073 ib_access_t ibal_acl = IB_AC_LOCAL_WRITE;
\r
1075 if (vapi_qp_acl & VAPI_EN_REM_WRITE) ibal_acl |= IB_AC_RDMA_WRITE;
\r
1076 if (vapi_qp_acl & VAPI_EN_REM_READ) ibal_acl |= IB_AC_RDMA_READ;
\r
1077 if (vapi_qp_acl & VAPI_EN_REM_ATOMIC_OP) ibal_acl |= IB_AC_ATOMIC;
\r
1083 /////////////////////////////////////////////////////////
\r
1084 /////////////////////////////////////////////////////////
\r
1087 IN mlnx_mro_t *mro_p,
\r
1088 IN boolean_t um_call )
\r
1090 MOSAL_iobuf_t old_iobuf;
\r
1094 mro_p->mr_prot_ctx = MOSAL_get_current_prot_ctx();
\r
1096 mro_p->mr_prot_ctx = MOSAL_get_kernel_prot_ctx();
\r
1098 // Save pointer to existing locked region.
\r
1099 old_iobuf = mro_p->mr_iobuf;
\r
1102 if (MT_OK != MOSAL_iobuf_register((MT_virt_addr_t)mro_p->mr_start,
\r
1103 (MT_size_t)mro_p->mr_size,
\r
1104 mro_p->mr_prot_ctx,
\r
1105 mro_p->mr_mosal_perm,
\r
1114 if( MT_OK != MOSAL_iobuf_deregister( old_iobuf ) )
\r
1118 return IB_SUCCESS;
\r
1122 /////////////////////////////////////////////////////////
\r
1123 /////////////////////////////////////////////////////////
\r
1125 mlnx_conv_ibal_mr_create(
\r
1126 IN u_int32_t pd_idx,
\r
1127 IN OUT mlnx_mro_t *mro_p,
\r
1128 IN VAPI_mr_change_t change_flags,
\r
1129 IN ib_mr_create_t const *p_mr_create,
\r
1130 IN boolean_t um_call,
\r
1131 OUT HH_mr_t *mr_props_p )
\r
1133 ib_api_status_t status;
\r
1135 /* Set ACL information first since it is used to lock the region. */
\r
1136 if( change_flags & VAPI_MR_CHANGE_ACL )
\r
1138 mro_p->mr_acl = map_ibal_acl( p_mr_create->access_ctrl );
\r
1139 // This computation should be externalized by THH
\r
1140 mro_p->mr_mosal_perm =
\r
1142 ((mro_p->mr_acl & VAPI_EN_LOCAL_WRITE) ? MOSAL_PERM_WRITE : 0);
\r
1145 if( change_flags & VAPI_MR_CHANGE_TRANS )
\r
1147 CL_TRACE(MLNX_DBG_MEM, g_mlnx_dbg_lvl, ("addr 0x%p size %"PRId64"\n", (void *)p_mr_create->vaddr, p_mr_create->length));
\r
1148 // Build TPT entries
\r
1149 mro_p->mr_start = (IB_virt_addr_t)p_mr_create->vaddr;
\r
1150 mro_p->mr_size = p_mr_create->length;
\r
1151 if (IB_SUCCESS != (status = mlnx_lock_region(mro_p, um_call)))
\r
1157 /* Now fill in the MR properties. */
\r
1158 mr_props_p->start = mro_p->mr_start;
\r
1159 mr_props_p->size = mro_p->mr_size;
\r
1160 mr_props_p->acl = mro_p->mr_acl;
\r
1161 mr_props_p->pd = pd_idx;
\r
1164 mr_props_p->tpt.tpt_type = HH_TPT_IOBUF;
\r
1165 mr_props_p->tpt.tpt.iobuf = mro_p->mr_iobuf;
\r
1167 return IB_SUCCESS;
\r
1170 /////////////////////////////////////////////////////////
\r
1171 // On entry mro_p->mr_start holds the pmr address
\r
1172 /////////////////////////////////////////////////////////
\r
1174 mlnx_conv_ibal_pmr_create(
\r
1175 IN u_int32_t pd_idx,
\r
1176 IN mlnx_mro_t *mro_p,
\r
1177 IN ib_phys_create_t const *p_pmr_create,
\r
1178 OUT HH_mr_t *mr_props_p )
\r
1180 VAPI_phy_addr_t* buf_lst = NULL;
\r
1181 VAPI_size_t* sz_lst = NULL;
\r
1183 u_int32_t page_shift = priv_ceil_log2(p_pmr_create->hca_page_size);
\r
1184 u_int64_t page_mask = (1 << page_shift) - 1;
\r
1185 u_int64_t tot_sz = 0;
\r
1187 CL_TRACE(MLNX_DBG_MEM, g_mlnx_dbg_lvl,
\r
1188 ("PRE: addr %p size 0x%"PRIx64" shift %d\n",
\r
1189 (void *)(uintn_t)mro_p->mr_start, p_pmr_create->length, page_mask));
\r
1190 mro_p->mr_start = (mro_p->mr_start & ~page_mask) | (p_pmr_create->buf_offset & page_mask);
\r
1191 CL_TRACE(MLNX_DBG_MEM, g_mlnx_dbg_lvl,
\r
1192 ("POST: addr %p\n", (void *)(uintn_t)mro_p->mr_start));
\r
1194 mr_props_p->start = mro_p->mr_start;
\r
1195 mr_props_p->size = p_pmr_create->length;
\r
1196 mr_props_p->acl = map_ibal_acl(p_pmr_create->access_ctrl);
\r
1197 mr_props_p->pd = pd_idx;
\r
1200 mro_p->mr_size = mr_props_p->size;
\r
1201 // mro_p->mr_first_page_addr = 0;
\r
1202 // mro_p->mr_num_pages = (mro_p->mr_end >> PAGESHIFT) + 1 - (mro_p->mr_start >> PAGESHIFT);
\r
1203 // CL_TRACE(MLNX_DBG_MEM, g_mlnx_dbg_lvl, ("1st pg addr 0x%p pages %d\n",
\r
1204 // (void *)mro_p->mr_first_page_addr, p_pmr_create->num_bufs));
\r
1205 CL_TRACE(MLNX_DBG_MEM, g_mlnx_dbg_lvl, ("1st phys addr 0x%"PRIx64" phys pages %d\n",
\r
1206 p_pmr_create->range_array[0].base_addr, p_pmr_create->num_ranges));
\r
1209 // Build TPT entries
\r
1210 if (!p_pmr_create->range_array)
\r
1212 return IB_INVALID_PARAMETER;
\r
1215 if (p_pmr_create->hca_page_size !=
\r
1216 MT_DOWN_ALIGNX_PHYS(p_pmr_create->hca_page_size, page_shift))
\r
1218 CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("phys buf size is not page aligned\n"));
\r
1219 return IB_INVALID_PARAMETER;
\r
1222 for (i = 0; i < p_pmr_create->num_ranges; i++)
\r
1224 uint64_t start_addr = p_pmr_create->range_array[i].base_addr;
\r
1225 uint64_t end_addr = start_addr + p_pmr_create->range_array[i].size;
\r
1227 if( end_addr < start_addr ) {
\r
1228 CL_TRACE( CL_DBG_ERROR, g_mlnx_dbg_lvl, ("phys buf end < start\n") );
\r
1229 return IB_INVALID_PARAMETER;
\r
1233 MT_DOWN_ALIGNX_PHYS(start_addr, page_shift))
\r
1235 CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("phys buf start adrs is not page aligned\n"));
\r
1236 return IB_INVALID_PARAMETER;
\r
1239 tot_sz += p_pmr_create->range_array[i].size;
\r
1242 if( tot_sz < p_pmr_create->length + p_pmr_create->buf_offset )
\r
1244 HCA_TRACE_EXIT( HCA_DBG_ERROR,
\r
1245 ("length(0x"PRIx64") + buf offset(0x"PRIx64") larger than sum "
\r
1246 "of phys ranges(0x"PRIx64")\n",
\r
1247 p_pmr_create->length, p_pmr_create->buf_offset, tot_sz) );
\r
1248 return IB_INVALID_PARAMETER;
\r
1251 if( p_pmr_create->buf_offset > p_pmr_create->range_array[0].size )
\r
1253 HCA_TRACE_EXIT( HCA_DBG_ERROR,
\r
1254 ("buf offset(0x%x) > than 1st phy range size(0x"PRIx64")\n",
\r
1255 p_pmr_create->buf_offset, p_pmr_create->range_array[0].size) );
\r
1256 return IB_INVALID_PARAMETER;
\r
1259 /* Memory registration must be done at PASSIVE_LEVEL, so paged memory here is fine. */
\r
1260 buf_lst = (VAPI_phy_addr_t*)cl_pzalloc( sizeof(VAPI_phy_addr_t)*(p_pmr_create->num_ranges));
\r
1263 HCA_TRACE_EXIT( HCA_DBG_ERROR,
\r
1264 ("Failed to allocate range address list.\n") );
\r
1265 return IB_INSUFFICIENT_MEMORY;
\r
1269 /* Memory registration must be done at PASSIVE_LEVEL, so paged memory here is fine. */
\r
1270 sz_lst = (VAPI_size_t*)cl_pzalloc( sizeof(VAPI_size_t)*(p_pmr_create->num_ranges));
\r
1273 cl_free( buf_lst );
\r
1274 HCA_TRACE_EXIT( HCA_DBG_ERROR,
\r
1275 ("Failed to allocate range size list.\n") );
\r
1276 return IB_INSUFFICIENT_MEMORY;
\r
1279 for (i = 0; i < p_pmr_create->num_ranges; i++)
\r
1281 buf_lst[i] = p_pmr_create->range_array[i].base_addr;
\r
1282 sz_lst[i] = p_pmr_create->range_array[i].size;
\r
1285 mr_props_p->tpt.tpt_type = HH_TPT_BUF;
\r
1286 mr_props_p->tpt.num_entries = p_pmr_create->num_ranges;
\r
1287 mr_props_p->tpt.tpt.buf_lst.buf_sz_lst = sz_lst;
\r
1288 mr_props_p->tpt.tpt.buf_lst.phys_buf_lst = buf_lst;
\r
1289 mr_props_p->tpt.tpt.buf_lst.iova_offset = p_pmr_create->buf_offset;
\r
1291 return IB_SUCCESS;
\r
1296 mlnx_gid_to_index(
\r
1297 IN HH_hca_hndl_t hh_hndl,
\r
1298 IN u_int8_t port_num,
\r
1299 IN u_int8_t *raw_gid)
\r
1301 ib_gid_t *gid_table_p = NULL;
\r
1302 u_int8_t index = 0; // default return value
\r
1305 gid_table_p = cl_zalloc( 64*sizeof(ib_gid_t));
\r
1307 mlnx_get_hca_gid_tbl(hh_hndl, port_num, 64, gid_table_p);
\r
1309 for (i = 0; i < 64; i++)
\r
1311 if (!cl_memcmp(raw_gid, gid_table_p[i].raw, sizeof(ib_gid_t)))
\r
1313 CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("1: found GID at index %d\n", i));
\r
1319 CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("2: found GID at index %d\n", index));
\r
1321 cl_free( gid_table_p);
\r
1325 /////////////////////////////////////////////////////////
\r
1326 /////////////////////////////////////////////////////////
\r
1328 mlnx_conv_ibal_av(
\r
1329 IN HH_hca_hndl_t hh_hndl,
\r
1330 IN const ib_av_attr_t *ibal_av_p,
\r
1331 OUT VAPI_ud_av_t *vapi_av_p)
\r
1333 vapi_av_p->port = ibal_av_p->port_num;
\r
1334 vapi_av_p->sl = ibal_av_p->sl;
\r
1335 vapi_av_p->dlid = cl_ntoh16 (ibal_av_p->dlid);
\r
1337 vapi_av_p->static_rate =
\r
1338 (ibal_av_p->static_rate == IB_PATH_RECORD_RATE_10_GBS? 0 : 3);
\r
1339 ib_grh_get_ver_class_flow( ibal_av_p->grh.ver_class_flow, NULL,
\r
1340 &vapi_av_p->traffic_class, &vapi_av_p->flow_label );
\r
1341 vapi_av_p->src_path_bits = ibal_av_p->path_bits; // PATH:
\r
1342 //vapi_av_p->src_path_bits = 0;
\r
1344 /* For global destination or Multicast address:*/
\r
1345 if (ibal_av_p->grh_valid)
\r
1347 vapi_av_p->grh_flag = TRUE;
\r
1348 vapi_av_p->hop_limit = ibal_av_p->grh.hop_limit;
\r
1349 // CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("raw %p, &raw %p\n", ibal_av_p->grh.src_gid.raw, &ibal_av_p->grh.src_gid.raw));
\r
1350 vapi_av_p->sgid_index = mlnx_gid_to_index(hh_hndl, ibal_av_p->port_num, (u_int8_t *)ibal_av_p->grh.src_gid.raw);
\r
1351 cl_memcpy(vapi_av_p->dgid, ibal_av_p->grh.dest_gid.raw, sizeof(vapi_av_p->dgid));
\r
1355 /////////////////////////////////////////////////////////
\r
1356 /////////////////////////////////////////////////////////
\r
1358 mlnx_conv_vapi_av(
\r
1359 IN HH_hca_hndl_t hh_hndl,
\r
1360 IN const VAPI_ud_av_t *vapi_av_p,
\r
1361 OUT ib_av_attr_t *ibal_av_p)
\r
1365 ibal_av_p->port_num = vapi_av_p->port;
\r
1366 ibal_av_p->sl = vapi_av_p->sl;
\r
1367 ibal_av_p->dlid = cl_ntoh16(vapi_av_p->dlid);
\r
1369 /* For global destination or Multicast address:*/
\r
1370 ibal_av_p->grh_valid = vapi_av_p->grh_flag;
\r
1373 ibal_av_p->grh.ver_class_flow = ib_grh_set_ver_class_flow( ver,
\r
1374 vapi_av_p->traffic_class,
\r
1375 vapi_av_p->flow_label);
\r
1376 ibal_av_p->grh.hop_limit = vapi_av_p->hop_limit;
\r
1378 THH_hob_get_sgid(hh_hndl,
\r
1380 vapi_av_p->sgid_index,
\r
1381 &ibal_av_p->grh.src_gid.raw);
\r
1383 cl_memcpy(ibal_av_p->grh.dest_gid.raw, vapi_av_p->dgid, sizeof(vapi_av_p->dgid));
\r
1385 ibal_av_p->static_rate = (vapi_av_p->static_rate?
\r
1386 IB_PATH_RECORD_RATE_2_5_GBS : IB_PATH_RECORD_RATE_10_GBS);
\r
1387 ibal_av_p->path_bits = vapi_av_p->src_path_bits;
\r
1390 /////////////////////////////////////////////////////////
\r
1391 /////////////////////////////////////////////////////////
\r
1393 mlnx_map_vapi_cqe_status(
\r
1394 IN VAPI_wc_status_t vapi_status)
\r
1396 switch (vapi_status)
\r
1398 case IB_COMP_SUCCESS: return IB_WCS_SUCCESS;
\r
1399 case IB_COMP_LOC_LEN_ERR: return IB_WCS_LOCAL_LEN_ERR;
\r
1400 case IB_COMP_LOC_QP_OP_ERR: return IB_WCS_LOCAL_OP_ERR;
\r
1401 case IB_COMP_LOC_PROT_ERR: return IB_WCS_LOCAL_PROTECTION_ERR;
\r
1402 case IB_COMP_WR_FLUSH_ERR: return IB_WCS_WR_FLUSHED_ERR;
\r
1403 case IB_COMP_MW_BIND_ERR: return IB_WCS_MEM_WINDOW_BIND_ERR;
\r
1404 case IB_COMP_REM_INV_REQ_ERR: return IB_WCS_REM_INVALID_REQ_ERR;
\r
1405 case IB_COMP_REM_ACCESS_ERR: return IB_WCS_REM_ACCESS_ERR;
\r
1406 case IB_COMP_REM_OP_ERR: return IB_WCS_REM_OP_ERR;
\r
1407 case IB_COMP_RETRY_EXC_ERR: return IB_WCS_TIMEOUT_RETRY_ERR;
\r
1408 case IB_COMP_RNR_RETRY_EXC_ERR: return IB_WCS_RNR_RETRY_ERR;
\r
1409 case IB_COMP_REM_ABORT_ERR: return IB_WCS_REM_ACCESS_ERR; // ???
\r
1410 case IB_COMP_FATAL_ERR: return IB_WCS_REM_ACCESS_ERR; // ???
\r
1411 case IB_COMP_GENERAL_ERR: return IB_WCS_REM_ACCESS_ERR; // ???
\r
1413 CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("FAIL to map %d (last known %d) returning %d\n",
\r
1414 vapi_status, IB_COMP_GENERAL_ERR, IB_WCS_REM_ACCESS_ERR));
\r
1415 return IB_WCS_REM_ACCESS_ERR;
\r
1419 /////////////////////////////////////////////////////////
\r
1420 /////////////////////////////////////////////////////////
\r
1422 mlnx_map_vapi_cqe_type(
\r
1423 IN VAPI_cqe_opcode_t opcode)
\r
1427 case VAPI_CQE_SQ_SEND_DATA: return IB_WC_SEND;
\r
1428 case VAPI_CQE_SQ_RDMA_WRITE: return IB_WC_RDMA_WRITE;
\r
1429 case VAPI_CQE_SQ_RDMA_READ: return IB_WC_RDMA_READ;
\r
1430 case VAPI_CQE_SQ_COMP_SWAP: return IB_WC_COMPARE_SWAP;
\r
1431 case VAPI_CQE_SQ_FETCH_ADD: return IB_WC_FETCH_ADD;
\r
1432 case VAPI_CQE_SQ_BIND_MRW: return IB_WC_MW_BIND;
\r
1433 case VAPI_CQE_RQ_SEND_DATA: return IB_WC_RECV;
\r
1434 case VAPI_CQE_RQ_RDMA_WITH_IMM: return IB_WC_RECV_RDMA_WRITE;
\r
1436 return IB_WC_SEND;
\r
1440 /////////////////////////////////////////////////////////
\r
1441 // Map Remote Node Addr Type
\r
1442 /////////////////////////////////////////////////////////
\r
1444 mlnx_map_vapi_rna_type(
\r
1445 IN VAPI_remote_node_addr_type_t rna)
\r
1449 case VAPI_RNA_UD: return IB_QPT_UNRELIABLE_DGRM;
\r
1450 case VAPI_RNA_RAW_ETY: return IB_QPT_RAW_ETHER;
\r
1451 case VAPI_RNA_RAW_IPV6: return IB_QPT_RAW_IPV6;
\r
1453 return IB_QPT_RELIABLE_CONN;
\r
1457 //////////////////////////////////////////////////////////////
\r
1458 // Convert from VAPI memory-region attributes to IBAL
\r
1459 //////////////////////////////////////////////////////////////
\r
1461 mlnx_conv_vapi_mr_attr(
\r
1462 IN ib_pd_handle_t pd_h,
\r
1463 IN HH_mr_info_t *mr_info_p,
\r
1464 OUT ib_mr_attr_t *mr_query_p)
\r
1466 mr_query_p->h_pd = pd_h;
\r
1467 mr_query_p->local_lb = mr_info_p->local_start;
\r
1468 mr_query_p->local_ub = mr_info_p->local_start + mr_info_p->local_size;
\r
1469 mr_query_p->remote_lb = mr_info_p->remote_start;
\r
1470 mr_query_p->remote_ub = mr_info_p->remote_start + mr_info_p->remote_size;
\r
1472 mr_query_p->access_ctrl = map_vapi_acl(mr_info_p->acl);
\r
1473 mr_query_p->lkey = mr_info_p->lkey;
\r
1474 mr_query_p->rkey = cl_hton32(mr_info_p->rkey);
\r
1477 //////////////////////////////////////////////////////////////
\r
1478 // Convert from IBAL memory-window bind request to VAPI
\r
1479 //////////////////////////////////////////////////////////////
\r
1481 mlnx_conv_bind_req(
\r
1482 IN HHUL_qp_hndl_t hhul_qp_hndl,
\r
1483 IN ib_bind_wr_t* const p_mw_bind,
\r
1484 OUT HHUL_mw_bind_t *bind_prop_p)
\r
1486 bind_prop_p->qp = hhul_qp_hndl;
\r
1487 bind_prop_p->id = p_mw_bind->wr_id;
\r
1488 bind_prop_p->acl = map_ibal_acl(p_mw_bind->access_ctrl);
\r
1489 bind_prop_p->size = p_mw_bind->local_ds.length;
\r
1490 bind_prop_p->start = (VAPI_virt_addr_t)(MT_virt_addr_t)p_mw_bind->local_ds.vaddr;
\r
1491 bind_prop_p->mr_lkey = p_mw_bind->local_ds.lkey;
\r
1492 bind_prop_p->comp_type =
\r
1493 (p_mw_bind->send_opt & IB_SEND_OPT_SIGNALED) ? VAPI_SIGNALED : VAPI_UNSIGNALED;
\r
1497 /////////////////////////////////////////////////////////
\r
1498 // Map IBAL qp type to VAPI transport and special qp_type
\r
1499 /////////////////////////////////////////////////////////
\r
1501 mlnx_map_ibal_qp_type(
\r
1502 IN ib_qp_type_t ibal_qpt,
\r
1503 OUT VAPI_special_qp_t *vapi_qp_type_p)
\r
1507 case IB_QPT_RELIABLE_CONN:
\r
1508 if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_REGULAR_QP;
\r
1511 case IB_QPT_UNRELIABLE_CONN:
\r
1512 if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_REGULAR_QP;
\r
1515 case IB_QPT_UNRELIABLE_DGRM:
\r
1516 if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_REGULAR_QP;
\r
1520 if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_SMI_QP;
\r
1524 if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_GSI_QP;
\r
1527 case IB_QPT_RAW_IPV6:
\r
1528 if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_RAW_IPV6_QP; // TBD: ??
\r
1531 case IB_QPT_RAW_ETHER:
\r
1532 if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_RAW_ETY_QP; // TBD: ??
\r
1536 if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_GSI_QP;
\r
1539 case IB_QPT_QP0_ALIAS:
\r
1540 if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_SMI_QP;
\r
1543 case IB_QPT_QP1_ALIAS:
\r
1544 if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_GSI_QP;
\r
1548 CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("FAIL to map ibal_qp_type %d (last known %d) returning %d\n",
\r
1549 ibal_qpt, IB_QPT_QP1_ALIAS, IB_TS_RAW));
\r
1550 if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_RAW_ETY_QP;
\r
1555 /////////////////////////////////////////////////////////
\r
1556 // QP and CQ value must be handled by caller
\r
1557 /////////////////////////////////////////////////////////
\r
1559 mlnx_conv_qp_create_attr(
\r
1560 IN const ib_qp_create_t *create_attr_p,
\r
1561 OUT HHUL_qp_init_attr_t *init_attr_p,
\r
1562 OUT VAPI_special_qp_t *vapi_qp_type_p)
\r
1564 init_attr_p->ts_type = mlnx_map_ibal_qp_type(create_attr_p->qp_type, vapi_qp_type_p);
\r
1566 init_attr_p->qp_cap.max_oust_wr_sq = create_attr_p->sq_depth;
\r
1567 init_attr_p->qp_cap.max_oust_wr_rq = create_attr_p->rq_depth;
\r
1568 init_attr_p->qp_cap.max_sg_size_sq = create_attr_p->sq_sge;
\r
1569 init_attr_p->qp_cap.max_sg_size_rq = create_attr_p->rq_sge;
\r
1571 init_attr_p->sq_sig_type = (create_attr_p->sq_signaled) ? VAPI_SIGNAL_ALL_WR : VAPI_SIGNAL_REQ_WR;
\r
1572 init_attr_p->rq_sig_type = VAPI_SIGNAL_ALL_WR;
\r
1574 init_attr_p->srq = HHUL_INVAL_SRQ_HNDL;
\r
1577 /////////////////////////////////////////////////////////
\r
1578 // NOTE: ibal_qp_state is non linear - so we cannot use a LUT
\r
1579 /////////////////////////////////////////////////////////
\r
1581 mlnx_map_ibal_qp_state(
\r
1582 IN ib_qp_state_t ibal_qp_state)
\r
1584 VAPI_qp_state_t vapi_qp_state = VAPI_RESET;
\r
1586 if (ibal_qp_state & IB_QPS_RESET) vapi_qp_state = VAPI_RESET;
\r
1587 else if (ibal_qp_state & IB_QPS_INIT) vapi_qp_state = VAPI_INIT;
\r
1588 else if (ibal_qp_state & IB_QPS_RTR) vapi_qp_state = VAPI_RTR;
\r
1589 else if (ibal_qp_state & IB_QPS_RTS) vapi_qp_state = VAPI_RTS;
\r
1590 else if (ibal_qp_state & IB_QPS_SQD) vapi_qp_state = VAPI_SQD;
\r
1591 else if (ibal_qp_state & IB_QPS_SQERR) vapi_qp_state = VAPI_SQE;
\r
1592 else if (ibal_qp_state & IB_QPS_ERROR) vapi_qp_state = VAPI_ERR;
\r
1594 return vapi_qp_state;
\r
1597 /////////////////////////////////////////////////////////
\r
1598 /////////////////////////////////////////////////////////
\r
1600 mlnx_map_vapi_qp_state(
\r
1601 IN VAPI_qp_state_t vapi_qp_state)
\r
1603 switch (vapi_qp_state)
\r
1605 case VAPI_RESET: return IB_QPS_RESET;
\r
1606 case VAPI_INIT: return IB_QPS_INIT;
\r
1607 case VAPI_RTR: return IB_QPS_RTR;
\r
1608 case VAPI_RTS: return IB_QPS_RTS;
\r
1609 case VAPI_SQD: return IB_QPS_SQD;
\r
1610 case VAPI_SQE: return IB_QPS_SQERR;
\r
1611 case VAPI_ERR: return IB_QPS_ERROR;
\r
1612 // TBD: IB_QPS_SQD_DRAINING
\r
1613 // TBD: IB_QPS_SQD_DRAINED
\r
1615 CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("FAIL to map vapi_qp_state %d (last known %d) returning %d\n",
\r
1616 vapi_qp_state, VAPI_ERR, IB_QPS_INIT));
\r
1617 return IB_QPS_INIT;
\r
1621 /////////////////////////////////////////////////////////
\r
1622 /////////////////////////////////////////////////////////
\r
1624 mlnx_map_vapi_apm_state(
\r
1625 IN VAPI_mig_state_t vapi_apm_state)
\r
1627 switch (vapi_apm_state)
\r
1629 case VAPI_MIGRATED: return IB_APM_MIGRATED;
\r
1630 case VAPI_REARM: return IB_APM_REARM;
\r
1631 case VAPI_ARMED: return IB_APM_ARMED;
\r
1634 CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("FAIL to map vapi_apm_state %d (last known %d) returning %d\n",
\r
1635 vapi_apm_state, VAPI_ARMED, 0));
\r
1641 /////////////////////////////////////////////////////////
\r
1642 // UNUSED: IBAL uses same encoding as THH
\r
1643 /////////////////////////////////////////////////////////
\r
1645 u_int32_t ibal_mtu_to_vapi(u_int32_t ibal_mtu)
\r
1647 u_int32_t mtu = 0;
\r
1649 // MTU256=1, MTU512=2, MTU1024=3
\r
1650 while (ibal_mtu >>= 1) mtu++;
\r
1654 /////////////////////////////////////////////////////////
\r
1655 /////////////////////////////////////////////////////////
\r
1657 u_int32_t vapi_mtu_to_ibal(u_int32_t vapi_mtu)
\r
1659 return (1 << (vapi_mtu + 7));
\r
1663 /////////////////////////////////////////////////////////
\r
1664 /////////////////////////////////////////////////////////
\r
1666 mlnx_conv_vapi_qp_attr(
\r
1667 IN HH_hca_hndl_t hh_hndl,
\r
1668 IN VAPI_qp_attr_t *hh_qp_attr_p,
\r
1669 OUT ib_qp_attr_t *qp_attr_p)
\r
1671 qp_attr_p->access_ctrl = map_vapi_qp_acl(hh_qp_attr_p->remote_atomic_flags);
\r
1672 qp_attr_p->pkey_index = (uint16_t)hh_qp_attr_p->pkey_ix;
\r
1673 qp_attr_p->sq_depth = hh_qp_attr_p->cap.max_oust_wr_sq;
\r
1674 qp_attr_p->rq_depth = hh_qp_attr_p->cap.max_oust_wr_rq;
\r
1675 qp_attr_p->sq_sge = hh_qp_attr_p->cap.max_sg_size_sq;
\r
1676 qp_attr_p->rq_sge = hh_qp_attr_p->cap.max_sg_size_rq;
\r
1677 qp_attr_p->sq_max_inline = hh_qp_attr_p->cap.max_inline_data_sq;
\r
1678 qp_attr_p->init_depth = hh_qp_attr_p->ous_dst_rd_atom; // outstanding outgoing
\r
1679 qp_attr_p->resp_res = hh_qp_attr_p->qp_ous_rd_atom; // outstanding as target (in)
\r
1681 qp_attr_p->num = cl_ntoh32(hh_qp_attr_p->qp_num);
\r
1682 CL_TRACE(MLNX_DBG_QPN, g_mlnx_dbg_lvl, ("ibal_qpn 0x%x = hh_qpn 0x%x\n",
\r
1684 hh_qp_attr_p->qp_num));
\r
1686 qp_attr_p->dest_num = cl_ntoh32(hh_qp_attr_p->dest_qp_num);
\r
1687 CL_TRACE(MLNX_DBG_QPN, g_mlnx_dbg_lvl, ("ibal_dest 0x%x = hh_dest 0x%x\n",
\r
1688 qp_attr_p->dest_num,
\r
1689 hh_qp_attr_p->dest_qp_num));
\r
1690 qp_attr_p->qkey = cl_ntoh32 (hh_qp_attr_p->qkey);
\r
1692 qp_attr_p->sq_psn = cl_ntoh32 (hh_qp_attr_p->sq_psn);
\r
1693 qp_attr_p->rq_psn = cl_ntoh32 (hh_qp_attr_p->rq_psn);
\r
1695 qp_attr_p->primary_port = hh_qp_attr_p->port;
\r
1696 qp_attr_p->alternate_port = hh_qp_attr_p->alt_port;
\r
1698 qp_attr_p->state = mlnx_map_vapi_qp_state(hh_qp_attr_p->qp_state);
\r
1699 qp_attr_p->apm_state = mlnx_map_vapi_apm_state(hh_qp_attr_p->path_mig_state);
\r
1701 mlnx_conv_vapi_av(hh_hndl, &hh_qp_attr_p->av, &qp_attr_p->primary_av);
\r
1702 qp_attr_p->primary_av.conn.path_mtu = (u_int8_t)hh_qp_attr_p->path_mtu;
\r
1703 qp_attr_p->primary_av.conn.local_ack_timeout = hh_qp_attr_p->timeout;
\r
1704 qp_attr_p->primary_av.conn.seq_err_retry_cnt = hh_qp_attr_p->retry_count;
\r
1705 qp_attr_p->primary_av.conn.rnr_retry_cnt = hh_qp_attr_p->rnr_retry;
\r
1707 mlnx_conv_vapi_av(hh_hndl, &hh_qp_attr_p->alt_av, &qp_attr_p->alternate_av);
\r
1708 qp_attr_p->alternate_av.conn. path_mtu = (u_int8_t)hh_qp_attr_p->path_mtu;
\r
1709 qp_attr_p->alternate_av.conn.local_ack_timeout = hh_qp_attr_p->timeout;
\r
1710 qp_attr_p->alternate_av.conn.seq_err_retry_cnt = hh_qp_attr_p->retry_count;
\r
1711 qp_attr_p->alternate_av.conn.rnr_retry_cnt = hh_qp_attr_p->rnr_retry;
\r
1716 QP_ATTR_EN_SQD_ASYN_NOTIF
\r
1718 + QP_ATTR_REMOTE_ATOMIC_FLAGS
\r
1727 + QP_ATTR_RETRY_COUNT
\r
1728 + QP_ATTR_RNR_RETRY
\r
1729 QP_ATTR_QP_OUS_RD_ATOM
\r
1731 - QP_ATTR_ALT_PATH
\r
1733 + QP_ATTR_MIN_RNR_TIMER
\r
1735 QP_ATTR_OUS_DST_RD_ATOM
\r
1736 QP_ATTR_PATH_MIG_STATE
\r
1740 /////////////////////////////////////////////////////////
\r
1741 /////////////////////////////////////////////////////////
\r
1743 mlnx_conv_qp_modify_attr(
\r
1744 IN HH_hca_hndl_t hh_hndl,
\r
1745 IN ib_qp_type_t qp_type,
\r
1746 IN const ib_qp_mod_t *modify_attr_p,
\r
1747 OUT VAPI_qp_attr_t *qp_attr_p,
\r
1748 OUT VAPI_qp_attr_mask_t *attr_mask_p)
\r
1751 qp_attr_p->qp_state = mlnx_map_ibal_qp_state(modify_attr_p->req_state);
\r
1752 *attr_mask_p = QP_ATTR_QP_STATE;
\r
1754 switch(modify_attr_p->req_state)
\r
1756 case IB_QPS_RESET:
\r
1760 *attr_mask_p |= QP_ATTR_PORT |
\r
1764 qp_attr_p->port = modify_attr_p->state.init.primary_port;
\r
1765 qp_attr_p->qkey = cl_ntoh32 (modify_attr_p->state.init.qkey);
\r
1766 qp_attr_p->pkey_ix = modify_attr_p->state.init.pkey_index;
\r
1767 if (IB_QPT_RELIABLE_CONN == qp_type)
\r
1769 *attr_mask_p |= QP_ATTR_REMOTE_ATOMIC_FLAGS;
\r
1770 qp_attr_p->remote_atomic_flags = map_ibal_qp_acl(modify_attr_p->state.init.access_ctrl);
\r
1773 qp_attr_p->remote_atomic_flags = 0;
\r
1778 /* VAPI doesn't support modifying the WQE depth ever. */
\r
1779 if( modify_attr_p->state.rtr.opts & IB_MOD_QP_SQ_DEPTH ||
\r
1780 modify_attr_p->state.rtr.opts & IB_MOD_QP_RQ_DEPTH )
\r
1782 return IB_UNSUPPORTED;
\r
1785 *attr_mask_p |= QP_ATTR_RQ_PSN |
\r
1786 QP_ATTR_DEST_QP_NUM |
\r
1787 QP_ATTR_QP_OUS_RD_ATOM |
\r
1788 QP_ATTR_MIN_RNR_TIMER |
\r
1791 qp_attr_p->rq_psn = cl_ntoh32 (modify_attr_p->state.rtr.rq_psn);
\r
1792 qp_attr_p->dest_qp_num = cl_ntoh32 (modify_attr_p->state.rtr.dest_qp);
\r
1793 qp_attr_p->qp_ous_rd_atom = modify_attr_p->state.rtr.resp_res;
\r
1795 qp_attr_p->min_rnr_timer = modify_attr_p->state.rtr.rnr_nak_timeout;
\r
1798 CL_TRACE(MLNX_DBG_QPN, g_mlnx_dbg_lvl, ("modify_qp: hh_dest 0x%x = ibal_dest 0x%x\n",
\r
1799 qp_attr_p->dest_qp_num, modify_attr_p->state.rtr.dest_qp));
\r
1802 // Convert primary RC AV (mandatory)
\r
1803 cl_memclr(&qp_attr_p->av, sizeof(VAPI_ud_av_t));
\r
1804 mlnx_conv_ibal_av(hh_hndl,
\r
1805 &modify_attr_p->state.rtr.primary_av, &qp_attr_p->av);
\r
1807 if (IB_QPT_RELIABLE_CONN == qp_type)
\r
1809 *attr_mask_p |= QP_ATTR_PATH_MTU;
\r
1810 qp_attr_p->path_mtu = modify_attr_p->state.rtr.primary_av.conn.path_mtu; // MTU
\r
1811 *attr_mask_p |= QP_ATTR_TIMEOUT;
\r
1812 qp_attr_p->timeout = modify_attr_p->state.rtr.primary_av.conn.local_ack_timeout; // XXX: conv
\r
1813 *attr_mask_p |= QP_ATTR_RETRY_COUNT;
\r
1814 qp_attr_p->retry_count = modify_attr_p->state.rtr.primary_av.conn.seq_err_retry_cnt;
\r
1815 *attr_mask_p |= QP_ATTR_RNR_RETRY;
\r
1816 qp_attr_p->rnr_retry = modify_attr_p->state.rtr.primary_av.conn.rnr_retry_cnt;
\r
1819 // Convert Remote Atomic Flags
\r
1820 if (modify_attr_p->state.rtr.opts & IB_MOD_QP_ACCESS_CTRL)
\r
1822 *attr_mask_p |= QP_ATTR_REMOTE_ATOMIC_FLAGS;
\r
1823 qp_attr_p->remote_atomic_flags = map_ibal_qp_acl(modify_attr_p->state.rtr.access_ctrl);
\r
1826 // Convert alternate RC AV
\r
1827 if (modify_attr_p->state.rtr.opts & IB_MOD_QP_ALTERNATE_AV)
\r
1829 *attr_mask_p |= QP_ATTR_ALT_PATH;
\r
1830 cl_memclr(&qp_attr_p->alt_av, sizeof(VAPI_ud_av_t));
\r
1831 mlnx_conv_ibal_av(hh_hndl,
\r
1832 &modify_attr_p->state.rtr.alternate_av, &qp_attr_p->alt_av);
\r
1834 if (IB_QPT_RELIABLE_CONN == qp_type)
\r
1836 qp_attr_p->alt_timeout = modify_attr_p->state.rtr.alternate_av.conn.local_ack_timeout; // XXX: conv
\r
1838 /* Incompliant with spec 1.1! Data already set before */
\r
1839 qp_attr_p->retry_count = modify_attr_p->state.rtr.alternate_av.conn.seq_err_retry_cnt;
\r
1840 qp_attr_p->rnr_retry = modify_attr_p->state.rtr.alternate_av.conn.rnr_retry_cnt;
\r
1847 /* VAPI doesn't support modifying the WQE depth ever. */
\r
1848 if( modify_attr_p->state.rts.opts & IB_MOD_QP_SQ_DEPTH ||
\r
1849 modify_attr_p->state.rts.opts & IB_MOD_QP_RQ_DEPTH )
\r
1851 return IB_UNSUPPORTED;
\r
1854 *attr_mask_p |= QP_ATTR_SQ_PSN |
\r
1855 QP_ATTR_RETRY_COUNT |
\r
1856 QP_ATTR_RNR_RETRY |
\r
1858 QP_ATTR_OUS_DST_RD_ATOM |
\r
1859 QP_ATTR_MIN_RNR_TIMER;
\r
1861 qp_attr_p->sq_psn = cl_ntoh32 (modify_attr_p->state.rts.sq_psn);
\r
1863 if (modify_attr_p->state.rts.opts & IB_MOD_QP_ACCESS_CTRL)
\r
1865 *attr_mask_p |= QP_ATTR_REMOTE_ATOMIC_FLAGS;
\r
1866 qp_attr_p->remote_atomic_flags = map_ibal_qp_acl(modify_attr_p->state.rts.access_ctrl);
\r
1869 qp_attr_p->timeout = modify_attr_p->state.rts.local_ack_timeout; // XXX: conv
\r
1870 qp_attr_p->ous_dst_rd_atom = modify_attr_p->state.rts.init_depth;
\r
1871 qp_attr_p->retry_count = modify_attr_p->state.rts.retry_cnt;
\r
1872 qp_attr_p->rnr_retry = modify_attr_p->state.rts.rnr_retry_cnt;
\r
1873 qp_attr_p->min_rnr_timer = modify_attr_p->state.rts.rnr_nak_timeout;
\r
1875 // Update the responder resources for RDMA/ATOMIC (optional for SQD->RTS)
\r
1876 if (modify_attr_p->state.rts.opts & IB_MOD_QP_RESP_RES) {
\r
1877 *attr_mask_p |= QP_ATTR_QP_OUS_RD_ATOM;
\r
1878 qp_attr_p->qp_ous_rd_atom = modify_attr_p->state.rts.resp_res;
\r
1881 // Convert alternate RC AV
\r
1882 if (modify_attr_p->state.rts.opts & IB_MOD_QP_ALTERNATE_AV)
\r
1884 *attr_mask_p |= QP_ATTR_ALT_PATH;
\r
1885 cl_memclr(&qp_attr_p->alt_av, sizeof(VAPI_ud_av_t));
\r
1886 mlnx_conv_ibal_av(hh_hndl,
\r
1887 &modify_attr_p->state.rts.alternate_av, &qp_attr_p->alt_av);
\r
1888 if (IB_QPT_RELIABLE_CONN == qp_type)
\r
1890 qp_attr_p->alt_timeout = modify_attr_p->state.rts.alternate_av.conn.local_ack_timeout; // XXX: conv
\r
1892 /* Incompliant with spec 1.1! Data already set before */
\r
1893 qp_attr_p->retry_count = modify_attr_p->state.rts.alternate_av.conn.seq_err_retry_cnt;
\r
1894 qp_attr_p->rnr_retry = modify_attr_p->state.rts.alternate_av.conn.rnr_retry_cnt;
\r
1900 // TBD: The following are treated equally (SQ Drain)
\r
1902 case IB_QPS_SQD_DRAINING:
\r
1903 case IB_QPS_SQD_DRAINED:
\r
1904 *attr_mask_p |= QP_ATTR_EN_SQD_ASYN_NOTIF;
\r
1905 qp_attr_p->en_sqd_asyn_notif = (MT_bool)modify_attr_p->state.sqd.sqd_event;
\r
1908 case IB_QPS_SQERR:
\r
1909 case IB_QPS_ERROR:
\r
1910 case IB_QPS_TIME_WAIT:
\r
1914 CL_TRACE(MLNX_DBG_QPN, g_mlnx_dbg_lvl, ("CL: conv_qp_modify: new state %d attr_mask 0x%x\n", qp_attr_p->qp_state, *attr_mask_p));
\r
1915 return IB_SUCCESS;
\r
1918 /////////////////////////////////////////////////////////
\r
1919 /////////////////////////////////////////////////////////
\r
1920 static VAPI_wr_opcode_t
\r
1921 map_ibal_send_opcode(
\r
1922 IN ib_wr_type_t ibal_opcode,
\r
1925 VAPI_wr_opcode_t vapi_opcode;
\r
1927 switch (ibal_opcode)
\r
1929 case WR_SEND: vapi_opcode = VAPI_SEND;
\r
1931 case WR_RDMA_WRITE: vapi_opcode = VAPI_RDMA_WRITE;
\r
1933 case WR_RDMA_READ: vapi_opcode = VAPI_RDMA_READ;
\r
1935 case WR_COMPARE_SWAP: vapi_opcode = VAPI_ATOMIC_CMP_AND_SWP;
\r
1937 case WR_FETCH_ADD: vapi_opcode = VAPI_ATOMIC_FETCH_AND_ADD;
\r
1939 default: vapi_opcode = VAPI_SEND;
\r
1942 if (imm && (VAPI_SEND == vapi_opcode || VAPI_RDMA_WRITE == vapi_opcode)) vapi_opcode++;
\r
1943 return vapi_opcode;
\r
1946 /////////////////////////////////////////////////////////
\r
1947 /////////////////////////////////////////////////////////
\r
1949 mlnx_conv_send_desc(
\r
1950 IN IB_ts_t transport,
\r
1951 IN const ib_send_wr_t *ibal_send_wqe_p,
\r
1952 OUT VAPI_sr_desc_t *vapi_send_desc_p)
\r
1954 boolean_t imm = FALSE;
\r
1956 register VAPI_sg_lst_entry_t *sg_lst_p;
\r
1957 register ib_local_ds_t *ds_array;
\r
1960 switch (transport)
\r
1963 CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("mapping %s QP\n", "UD"));
\r
1965 mlnx_avo_t *avo_p = (mlnx_avo_t *)ibal_send_wqe_p->dgrm.ud.h_av;
\r
1967 vapi_send_desc_p->remote_qp = cl_ntoh32 (ibal_send_wqe_p->dgrm.ud.remote_qp);
\r
1968 vapi_send_desc_p->remote_qkey = cl_ntoh32 (ibal_send_wqe_p->dgrm.ud.remote_qkey);
\r
1970 if (!avo_p || avo_p->mark != E_MARK_AV)
\r
1971 return IB_INVALID_AV_HANDLE;
\r
1973 vapi_send_desc_p->remote_ah = avo_p->h_av; // was ah.hhul
\r
1978 CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("mapping %s QP\n", "RC"));
\r
1979 // vapi_send_desc_p->remote_qp = 0;
\r
1980 // vapi_send_desc_p->remote_qkey = 0;
\r
1981 vapi_send_desc_p->remote_addr = ibal_send_wqe_p->remote_ops.vaddr;
\r
1982 vapi_send_desc_p->r_key = ibal_send_wqe_p->remote_ops.rkey;
\r
1983 vapi_send_desc_p->compare_add = ibal_send_wqe_p->remote_ops.atomic1;
\r
1984 vapi_send_desc_p->swap = ibal_send_wqe_p->remote_ops.atomic2;
\r
1987 default: // TBD: RAW, RD
\r
1988 return IB_UNSUPPORTED;
\r
1991 imm = (0 != (ibal_send_wqe_p->send_opt & IB_SEND_OPT_IMMEDIATE));
\r
1992 vapi_send_desc_p->fence = (MT_bool)(0 != (ibal_send_wqe_p->send_opt & IB_SEND_OPT_FENCE));
\r
1993 vapi_send_desc_p->set_se = (MT_bool)(0 != (ibal_send_wqe_p->send_opt & IB_SEND_OPT_SOLICITED));
\r
1994 vapi_send_desc_p->comp_type = (ibal_send_wqe_p->send_opt & IB_SEND_OPT_SIGNALED) ?
\r
1995 VAPI_SIGNALED : VAPI_UNSIGNALED;
\r
1997 vapi_send_desc_p->id = ibal_send_wqe_p->wr_id;
\r
1998 vapi_send_desc_p->opcode = map_ibal_send_opcode(ibal_send_wqe_p->wr_type, imm);
\r
2001 vapi_send_desc_p->imm_data = cl_ntoh32 (ibal_send_wqe_p->immediate_data);
\r
2003 vapi_send_desc_p->sg_lst_len = ibal_send_wqe_p->num_ds;
\r
2005 sg_lst_p = vapi_send_desc_p->sg_lst_p;
\r
2006 ds_array = ibal_send_wqe_p->ds_array;
\r
2007 for (idx = 0; idx < ibal_send_wqe_p->num_ds; idx++)
\r
2009 sg_lst_p->addr = ds_array->vaddr;
\r
2010 sg_lst_p->len = ds_array->length;
\r
2011 sg_lst_p->lkey = ds_array->lkey;
\r
2012 // CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("post_send (conv) addr %Lx size %d key 0x%x\n", sg_lst_p->addr, sg_lst_p->len, sg_lst_p->lkey));
\r
2016 CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("send: rqpn 0x%x rkey 0x%x\n",
\r
2017 vapi_send_desc_p->remote_qp,
\r
2018 vapi_send_desc_p->remote_qkey));
\r
2019 return IB_SUCCESS;
\r
2022 /////////////////////////////////////////////////////////
\r
2023 /////////////////////////////////////////////////////////
\r
2025 mlnx_conv_recv_desc(
\r
2026 IN const ib_recv_wr_t *ibal_recv_wqe_p,
\r
2027 OUT VAPI_rr_desc_t *vapi_recv_desc_p)
\r
2030 register VAPI_sg_lst_entry_t *sg_lst_p;
\r
2031 register ib_local_ds_t *ds_array;
\r
2033 vapi_recv_desc_p->id = ibal_recv_wqe_p->wr_id;
\r
2034 vapi_recv_desc_p->sg_lst_len = ibal_recv_wqe_p->num_ds;
\r
2035 vapi_recv_desc_p->opcode = VAPI_RECEIVE;
\r
2036 vapi_recv_desc_p->comp_type = VAPI_SIGNALED;
\r
2038 sg_lst_p = vapi_recv_desc_p->sg_lst_p;
\r
2039 ds_array = ibal_recv_wqe_p->ds_array;
\r
2040 for (idx = 0; idx < ibal_recv_wqe_p->num_ds; idx++)
\r
2042 sg_lst_p->addr = ds_array->vaddr;
\r
2043 sg_lst_p->len = ds_array->length;
\r
2044 sg_lst_p->lkey = ds_array->lkey;
\r
2045 // CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("post_recv (conv) addr 0x%Lx size %d key 0x%x\n", sg_lst_p->addr, sg_lst_p->len, sg_lst_p->lkey));
\r
2050 return IB_SUCCESS;
\r
2053 /////////////////////////////////////////////////////////
\r
2054 /////////////////////////////////////////////////////////
\r
2056 vapi_port_cap_to_ibal(
\r
2057 IN IB_port_cap_mask_t vapi_port_cap,
\r
2058 OUT ib_port_cap_t *ibal_port_cap_p)
\r
2060 if (vapi_port_cap & IB_CAP_MASK_IS_CONN_MGMT_SUP)
\r
2061 ibal_port_cap_p->cm = TRUE;
\r
2062 if (vapi_port_cap & IB_CAP_MASK_IS_SNMP_TUNN_SUP)
\r
2063 ibal_port_cap_p->snmp = TRUE;
\r
2064 if (vapi_port_cap & IB_CAP_MASK_IS_DEVICE_MGMT_SUP)
\r
2065 ibal_port_cap_p->dev_mgmt = TRUE;
\r
2066 if (vapi_port_cap & IB_CAP_MASK_IS_VENDOR_CLS_SUP)
\r
2067 ibal_port_cap_p->vend = TRUE;
\r
2068 if (vapi_port_cap & IB_CAP_MASK_IS_SM_DISABLED)
\r
2069 ibal_port_cap_p->sm_disable = TRUE;
\r
2070 if (vapi_port_cap & IB_CAP_MASK_IS_SM)
\r
2071 ibal_port_cap_p->sm = TRUE;
\r
2072 if (vapi_port_cap & IB_CAP_MASK_IS_CLIENT_REREGISTRATION_SUP)
\r
2073 ibal_port_cap_p->client_reregister= TRUE;
\r
2076 /////////////////////////////////////////////////////////
\r
2077 /////////////////////////////////////////////////////////
\r
2079 mlnx_conv_vapi_hca_cap(
\r
2080 IN HH_hca_dev_t *hca_info_p,
\r
2081 IN VAPI_hca_cap_t *vapi_hca_cap_p,
\r
2082 IN VAPI_hca_port_t *vapi_hca_ports,
\r
2083 OUT ib_ca_attr_t *ca_attr_p)
\r
2085 u_int8_t port_num;
\r
2086 VAPI_hca_port_t *vapi_port_p;
\r
2087 ib_port_attr_t *ibal_port_p;
\r
2089 ca_attr_p->vend_id = hca_info_p->vendor_id;
\r
2090 ca_attr_p->dev_id = (uint16_t)hca_info_p->dev_id;
\r
2091 ca_attr_p->revision = (uint16_t)hca_info_p->hw_ver;
\r
2092 ca_attr_p->fw_ver = hca_info_p->fw_ver;
\r
2094 ca_attr_p->ca_guid = *(UNALIGNED64 u_int64_t *)vapi_hca_cap_p->node_guid;
\r
2095 ca_attr_p->num_ports = vapi_hca_cap_p->phys_port_num;
\r
2096 ca_attr_p->max_qps = vapi_hca_cap_p->max_num_qp;
\r
2097 ca_attr_p->max_wrs = vapi_hca_cap_p->max_qp_ous_wr;
\r
2098 ca_attr_p->max_sges = vapi_hca_cap_p->max_num_sg_ent;
\r
2099 ca_attr_p->max_rd_sges = vapi_hca_cap_p->max_num_sg_ent_rd;
\r
2100 ca_attr_p->max_cqs = vapi_hca_cap_p->max_num_cq;
\r
2101 ca_attr_p->max_cqes = vapi_hca_cap_p->max_num_ent_cq;
\r
2102 ca_attr_p->max_pds = vapi_hca_cap_p->max_pd_num;
\r
2103 ca_attr_p->init_regions = vapi_hca_cap_p->max_num_mr;
\r
2104 ca_attr_p->init_windows = vapi_hca_cap_p->max_mw_num;
\r
2105 ca_attr_p->init_region_size = vapi_hca_cap_p->max_mr_size;
\r
2106 ca_attr_p->max_addr_handles = vapi_hca_cap_p->max_ah_num;
\r
2107 ca_attr_p->atomicity = vapi_hca_cap_p->atomic_cap;
\r
2108 ca_attr_p->max_partitions = vapi_hca_cap_p->max_pkeys;
\r
2109 ca_attr_p->max_qp_resp_res = vapi_hca_cap_p->max_qp_ous_rd_atom;
\r
2110 ca_attr_p->max_resp_res = vapi_hca_cap_p->max_res_rd_atom;
\r
2111 ca_attr_p->max_qp_init_depth = vapi_hca_cap_p->max_qp_init_rd_atom;
\r
2112 ca_attr_p->max_ipv6_qps = vapi_hca_cap_p->max_raw_ipv6_qp;
\r
2113 ca_attr_p->max_ether_qps = vapi_hca_cap_p->max_raw_ethy_qp;
\r
2114 ca_attr_p->max_mcast_grps = vapi_hca_cap_p->max_mcast_grp_num;
\r
2115 ca_attr_p->max_mcast_qps = vapi_hca_cap_p->max_total_mcast_qp_attach_num;
\r
2116 ca_attr_p->max_qps_per_mcast_grp = vapi_hca_cap_p->max_mcast_qp_attach_num;
\r
2117 ca_attr_p->local_ack_delay = vapi_hca_cap_p->local_ca_ack_delay;
\r
2118 ca_attr_p->bad_pkey_ctr_support = vapi_hca_cap_p->flags & VAPI_BAD_PKEY_COUNT_CAP;
\r
2119 ca_attr_p->bad_qkey_ctr_support = vapi_hca_cap_p->flags & VAPI_BAD_QKEY_COUNT_CAP;
\r
2120 ca_attr_p->raw_mcast_support = vapi_hca_cap_p->flags & VAPI_RAW_MULTI_CAP;
\r
2121 ca_attr_p->apm_support = vapi_hca_cap_p->flags & VAPI_AUTO_PATH_MIG_CAP;
\r
2122 ca_attr_p->av_port_check = vapi_hca_cap_p->flags & VAPI_UD_AV_PORT_ENFORCE_CAP;
\r
2123 ca_attr_p->change_primary_port = vapi_hca_cap_p->flags & VAPI_CHANGE_PHY_PORT_CAP;
\r
2124 ca_attr_p->modify_wr_depth = vapi_hca_cap_p->flags & VAPI_RESIZE_OUS_WQE_CAP;
\r
2125 ca_attr_p->hw_agents = FALSE; // in the context of IBAL then agent is implemented on the host
\r
2127 ca_attr_p->num_page_sizes = 1;
\r
2128 ca_attr_p->p_page_size[0] = PAGESIZE; // TBD: extract an array of page sizes from HCA cap
\r
2130 for (port_num = 0; port_num < vapi_hca_cap_p->phys_port_num; port_num++)
\r
2132 // Setup port pointers
\r
2133 ibal_port_p = &ca_attr_p->p_port_attr[port_num];
\r
2134 vapi_port_p = &vapi_hca_ports[port_num];
\r
2136 // Port Cabapilities
\r
2137 cl_memclr(&ibal_port_p->cap, sizeof(ib_port_cap_t));
\r
2138 vapi_port_cap_to_ibal(vapi_port_p->capability_mask, &ibal_port_p->cap);
\r
2141 ibal_port_p->port_num = port_num + 1;
\r
2142 ibal_port_p->port_guid = ibal_port_p->p_gid_table[0].unicast.interface_id;
\r
2143 ibal_port_p->lid = cl_ntoh16(vapi_port_p->lid);
\r
2144 ibal_port_p->lmc = vapi_port_p->lmc;
\r
2145 ibal_port_p->max_vls = vapi_port_p->max_vl_num;
\r
2146 ibal_port_p->sm_lid = cl_ntoh16(vapi_port_p->sm_lid);
\r
2147 ibal_port_p->sm_sl = vapi_port_p->sm_sl;
\r
2148 ibal_port_p->link_state = (vapi_port_p->state != 0) ? (uint8_t)vapi_port_p->state : IB_LINK_DOWN;
\r
2149 ibal_port_p->num_gids = vapi_port_p->gid_tbl_len;
\r
2150 ibal_port_p->num_pkeys = vapi_port_p->pkey_tbl_len;
\r
2151 ibal_port_p->pkey_ctr = (uint16_t)vapi_port_p->bad_pkey_counter;
\r
2152 ibal_port_p->qkey_ctr = (uint16_t)vapi_port_p->qkey_viol_counter;
\r
2153 ibal_port_p->max_msg_size = vapi_port_p->max_msg_sz;
\r
2154 ibal_port_p->mtu = (u_int8_t)vapi_port_p->max_mtu;
\r
2156 ibal_port_p->subnet_timeout = 5; // TBD: currently 128us
\r
2157 // ibal_port_p->local_ack_timeout = 3; // TBD: currently ~32 usec
\r
2159 CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("Port %d port_guid 0x%"PRIx64"\n",
\r
2160 ibal_port_p->port_num, ibal_port_p->port_guid));
\r
2165 /////////////////////////////////////////////////////////
\r
2166 /////////////////////////////////////////////////////////
\r
2168 mlnx_get_hca_pkey_tbl(
\r
2169 IN HH_hca_hndl_t hh_hndl,
\r
2170 IN u_int8_t port_num,
\r
2171 IN u_int16_t num_entries,
\r
2172 OUT void* table_p)
\r
2175 ib_net16_t *pkey_p;
\r
2177 if (HH_OK != THH_hob_get_pkey_tbl( hh_hndl, port_num, num_entries, &size, table_p))
\r
2180 pkey_p = (ib_net16_t *)table_p;
\r
2182 CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("port %d pkey0 0x%x pkey1 0x%x\n", port_num, pkey_p[0], pkey_p[1]));
\r
2184 return IB_SUCCESS;
\r
2188 mlnx_get_hca_gid_tbl(
\r
2189 IN HH_hca_hndl_t hh_hndl,
\r
2190 IN u_int8_t port_num,
\r
2191 IN u_int16_t num_entries,
\r
2192 OUT void* table_p)
\r
2196 if (HH_OK != THH_hob_get_gid_tbl( hh_hndl, port_num, num_entries, &size, table_p))
\r
2199 return IB_SUCCESS;
\r