[MT23108] RNR NAK timeout is required in RTR transition.
[mirror/winof/.git] / hw / mt23108 / kernel / hca_data.c
1 /*\r
2  * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.\r
3  * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. \r
4  *\r
5  * This software is available to you under the OpenIB.org BSD license\r
6  * below:\r
7  *\r
8  *     Redistribution and use in source and binary forms, with or\r
9  *     without modification, are permitted provided that the following\r
10  *     conditions are met:\r
11  *\r
12  *      - Redistributions of source code must retain the above\r
13  *        copyright notice, this list of conditions and the following\r
14  *        disclaimer.\r
15  *\r
16  *      - Redistributions in binary form must reproduce the above\r
17  *        copyright notice, this list of conditions and the following\r
18  *        disclaimer in the documentation and/or other materials\r
19  *        provided with the distribution.\r
20  *\r
21  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
22  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
23  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
24  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
25  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
26  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
27  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
28  * SOFTWARE.\r
29  *\r
30  * $Id$\r
31  */\r
32 \r
33 \r
34 #include "hca_data.h"\r
35 #include "hca_debug.h"\r
36 \r
37 static cl_spinlock_t    hob_lock;\r
38 \r
39 #if 1\r
40 u_int32_t               g_mlnx_dbg_lvl = CL_DBG_ERROR ;\r
41 #else\r
42 u_int32_t               g_mlnx_dbg_lvl = CL_DBG_ERROR |\r
43         MLNX_DBG_QPN |\r
44         MLNX_DBG_MEM |\r
45         MLNX_DBG_INFO |\r
46         MLNX_DBG_TRACE |\r
47         // MLNX_DBG_DIRECT |\r
48         0;\r
49 #endif\r
50 \r
51 u_int32_t               g_mlnx_dpc2thread = 0;\r
52 \r
53 #ifdef MODULE_LICENSE\r
54 MODULE_LICENSE("Proprietary");\r
55 #endif\r
56 \r
57 MODULE_PARM(g_mlnx_dbg_lvl, "i");\r
58 MODULE_PARM(g_mlnx_dpc2thread, "i");\r
59 \r
60 cl_qlist_t              mlnx_hca_list;\r
61 //mlnx_hca_t            mlnx_hca_array[MLNX_MAX_HCA];\r
62 //uint32_t              mlnx_num_hca = 0;\r
63 \r
64 mlnx_hob_t              mlnx_hob_array[MLNX_NUM_HOBKL];         // kernel HOB - one per HCA (cmdif access)\r
65 \r
66 mlnx_hobul_t    *mlnx_hobul_array[MLNX_NUM_HOBUL];      // kernel HOBUL - one per HCA (kar access)\r
67 \r
68 /* User verb library name */\r
69 /* TODO: Move to linux osd file.\r
70 char                    mlnx_uvp_lib_name[MAX_LIB_NAME] = {"libmlnx_uvp.so"};\r
71 */\r
72 \r
73 static void\r
74 mlnx_async_dpc(\r
75         IN                              cl_async_proc_item_t            *async_item_p );\r
76 \r
77 #if MLNX_COMP_MODEL\r
78 static void\r
79 mlnx_comp_dpc(\r
80         IN                              PRKDPC                                          p_dpc,\r
81         IN                              void                                            *context,\r
82         IN                              void                                            *pfn_comp_cb,\r
83         IN                              void                                            *unused );\r
84 #else\r
85 static void\r
86 mlnx_comp_dpc(\r
87         IN                              cl_async_proc_item_t            *async_item_p );\r
88 #endif\r
89 \r
90 // ### Callback Interface\r
91 static void\r
92 mlnx_comp_cb(\r
93         IN                              HH_hca_hndl_t                           hh_hndl,\r
94         IN                              HH_cq_hndl_t                            hh_cq,\r
95         IN                              void                                            *private_data);\r
96 \r
97 static void\r
98 mlnx_async_cb(\r
99         IN                              HH_hca_hndl_t                           hh_hndl,\r
100         IN                              HH_event_record_t                       *hh_er_p,\r
101         IN                              void                                            *private_data);\r
102 \r
103 /////////////////////////////////////////////////////////\r
104 // ### HCA\r
105 /////////////////////////////////////////////////////////\r
106 void\r
107 mlnx_hca_insert(\r
108         IN                              mlnx_hca_t                                      *p_hca )\r
109 {\r
110         cl_spinlock_acquire( &hob_lock );\r
111         cl_qlist_insert_tail( &mlnx_hca_list, &p_hca->list_item );\r
112         cl_spinlock_release( &hob_lock );\r
113 }\r
114 \r
115 void\r
116 mlnx_hca_remove(\r
117         IN                              mlnx_hca_t                                      *p_hca )\r
118 {\r
119         cl_spinlock_acquire( &hob_lock );\r
120         cl_qlist_remove_item( &mlnx_hca_list, &p_hca->list_item );\r
121         cl_spinlock_release( &hob_lock );\r
122 }\r
123 \r
124 mlnx_hca_t*\r
125 mlnx_hca_from_guid(\r
126         IN                              ib_net64_t                                      guid )\r
127 {\r
128         cl_list_item_t  *p_item;\r
129         mlnx_hca_t              *p_hca = NULL;\r
130 \r
131         cl_spinlock_acquire( &hob_lock );\r
132         p_item = cl_qlist_head( &mlnx_hca_list );\r
133         while( p_item != cl_qlist_end( &mlnx_hca_list ) )\r
134         {\r
135                 p_hca = PARENT_STRUCT( p_item, mlnx_hca_t, list_item );\r
136                 if( p_hca->guid == guid )\r
137                         break;\r
138                 p_item = cl_qlist_next( p_item );\r
139                 p_hca = NULL;\r
140         }\r
141         cl_spinlock_release( &hob_lock );\r
142         return p_hca;\r
143 }\r
144 \r
145 mlnx_hca_t*\r
146 mlnx_hca_from_hh_hndl(\r
147         IN                              HH_hca_hndl_t                                   hh_hndl )\r
148 {\r
149         cl_list_item_t  *p_item;\r
150         mlnx_hca_t              *p_hca = NULL;\r
151 \r
152         cl_spinlock_acquire( &hob_lock );\r
153         p_item = cl_qlist_head( &mlnx_hca_list );\r
154         while( p_item != cl_qlist_end( &mlnx_hca_list ) )\r
155         {\r
156                 p_hca = PARENT_STRUCT( p_item, mlnx_hca_t, list_item );\r
157                 if( p_hca->hh_hndl == hh_hndl )\r
158                         break;\r
159                 p_item = cl_qlist_next( p_item );\r
160                 p_hca = NULL;\r
161         }\r
162         cl_spinlock_release( &hob_lock );\r
163         return p_hca;\r
164 }\r
165 \r
166 \r
167 /*\r
168 void\r
169 mlnx_names_from_guid(\r
170         IN                              ib_net64_t                                      guid,\r
171                 OUT                     char                                            **hca_name_p,\r
172                 OUT                     char                                            **dev_name_p)\r
173 {\r
174         unsigned int idx;\r
175 \r
176         if (!hca_name_p) return;\r
177         if (!dev_name_p) return;\r
178 \r
179         for (idx = 0; idx < mlnx_num_hca; idx++)\r
180         {\r
181                 if (mlnx_hca_array[idx].ifx.guid == guid)\r
182                 {\r
183                         *hca_name_p = mlnx_hca_array[idx].hca_name_p;\r
184                         *dev_name_p = mlnx_hca_array[idx].dev_name_p;\r
185                 }\r
186         }\r
187 }\r
188 */\r
189 \r
190 /////////////////////////////////////////////////////////\r
191 // ### HOB\r
192 /////////////////////////////////////////////////////////\r
193 cl_status_t\r
194 mlnx_hobs_init( void )\r
195 {\r
196         u_int32_t idx;\r
197 \r
198         cl_qlist_init( &mlnx_hca_list );\r
199 \r
200         for (idx = 0; idx < MLNX_NUM_HOBKL; idx++)\r
201         {\r
202                 mlnx_hob_array[idx].hh_hndl = NULL;\r
203                 mlnx_hob_array[idx].comp_cb_p = NULL;\r
204                 mlnx_hob_array[idx].async_cb_p = NULL;\r
205                 mlnx_hob_array[idx].ca_context = NULL;\r
206                 mlnx_hob_array[idx].async_proc_mgr_p = NULL;\r
207                 mlnx_hob_array[idx].cl_device_h = NULL;\r
208                 // mlnx_hob_array[idx].port_lmc_p = NULL;\r
209                 mlnx_hob_array[idx].index = idx;\r
210                 mlnx_hob_array[idx].mark = E_MARK_INVALID;\r
211         }\r
212         return cl_spinlock_init( &hob_lock );\r
213 }\r
214 \r
215 /////////////////////////////////////////////////////////\r
216 /////////////////////////////////////////////////////////\r
217 ib_api_status_t\r
218 mlnx_hobs_insert(\r
219         IN                              mlnx_hca_t                                      *p_hca,\r
220                 OUT                     mlnx_hob_t                                      **hob_pp)\r
221 {\r
222         u_int32_t idx;\r
223         ib_api_status_t status = IB_ERROR;\r
224         mlnx_cache_t    *p_cache;\r
225 \r
226         p_cache = (mlnx_cache_t*)cl_pzalloc( sizeof(mlnx_cache_t) * 2 );\r
227         if( !p_cache )\r
228                 return IB_INSUFFICIENT_MEMORY;\r
229 \r
230         cl_spinlock_acquire(&hob_lock);\r
231         for (idx = 0; idx < MLNX_NUM_HOBKL; idx++)\r
232         {\r
233                 if (!mlnx_hob_array[idx].hh_hndl)\r
234                 {\r
235                         mlnx_hob_array[idx].hh_hndl = p_hca->hh_hndl;\r
236                         mlnx_hob_array[idx].mark = E_MARK_CA;\r
237                         if (hob_pp) *hob_pp = &mlnx_hob_array[idx];\r
238                         status = IB_SUCCESS;\r
239                         break;\r
240                 }\r
241         }\r
242         cl_spinlock_release(&hob_lock);\r
243 \r
244         if (IB_SUCCESS == status)\r
245                 (*hob_pp)->cache = p_cache;\r
246         else\r
247                 cl_free( p_cache );\r
248 \r
249         return status;\r
250 }\r
251 \r
252 /////////////////////////////////////////////////////////\r
253 /////////////////////////////////////////////////////////\r
254 ib_api_status_t\r
255 mlnx_hobs_set_cb(\r
256         IN                              mlnx_hob_t                                      *hob_p, \r
257         IN                              ci_completion_cb_t                      comp_cb_p,\r
258         IN                              ci_async_event_cb_t                     async_cb_p,\r
259         IN              const   void* const                                     ib_context)\r
260 {\r
261         cl_status_t             cl_status;\r
262 \r
263         // Verify handle\r
264         CL_ASSERT((hob_p - mlnx_hob_array) < MLNX_NUM_HOBKL);\r
265 \r
266         // Setup the callbacks\r
267         if (!hob_p->async_proc_mgr_p)\r
268         {\r
269                 hob_p->async_proc_mgr_p = cl_malloc( sizeof( cl_async_proc_t ) );\r
270                 if( !hob_p->async_proc_mgr_p )\r
271                 {\r
272                         return IB_INSUFFICIENT_MEMORY;\r
273                 }\r
274                 cl_async_proc_construct( hob_p->async_proc_mgr_p );\r
275                 cl_status = cl_async_proc_init( hob_p->async_proc_mgr_p, MLNX_NUM_CB_THR, "CBthread" );\r
276                 if( cl_status != CL_SUCCESS )\r
277                 {\r
278                         cl_async_proc_destroy( hob_p->async_proc_mgr_p );\r
279                         cl_free(hob_p->async_proc_mgr_p);\r
280                         hob_p->async_proc_mgr_p = NULL;\r
281                         return IB_INSUFFICIENT_RESOURCES;\r
282                 }\r
283         }\r
284 \r
285         if (hob_p->hh_hndl)\r
286         {\r
287                 THH_hob_set_async_eventh(hob_p->hh_hndl,\r
288                         mlnx_async_cb,\r
289                         &hob_p->index); // This is the context our CB wants to receive\r
290                 THH_hob_set_comp_eventh( hob_p->hh_hndl,\r
291                         mlnx_comp_cb,\r
292                         &hob_p->index); // This is the context our CB wants to receive\r
293                 hob_p->comp_cb_p  = comp_cb_p;\r
294                 hob_p->async_cb_p = async_cb_p;\r
295                 hob_p->ca_context = ib_context; // This is the context our CB forwards to IBAL\r
296                 CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("CL: hca_idx %d context 0x%p\n", hob_p - mlnx_hob_array, ib_context));\r
297                 return IB_SUCCESS;\r
298         }\r
299         return IB_ERROR;\r
300 }\r
301 \r
302 /////////////////////////////////////////////////////////\r
303 /////////////////////////////////////////////////////////\r
304 ib_api_status_t\r
305 mlnx_hobs_get_context(\r
306         IN                              mlnx_hob_t                                      *hob_p,\r
307                 OUT                     void                                            **context_p)\r
308 {\r
309         // Verify handle\r
310         CL_ASSERT((hob_p - mlnx_hob_array) < MLNX_NUM_HOBKL);\r
311 \r
312         if (hob_p->hh_hndl)\r
313         {\r
314                 if (context_p) *context_p = &hob_p->index;\r
315                 return IB_SUCCESS;\r
316         }\r
317         return IB_ERROR;\r
318 }\r
319 \r
320 /////////////////////////////////////////////////////////\r
321 /////////////////////////////////////////////////////////\r
322 void\r
323 mlnx_hobs_remove(\r
324         IN                              mlnx_hob_t                                      *hob_p)\r
325 {\r
326         cl_async_proc_t *p_async_proc;\r
327         mlnx_cache_t    *p_cache;\r
328 \r
329         // Verify handle\r
330         CL_ASSERT((hob_p - mlnx_hob_array) < MLNX_NUM_HOBKL);\r
331 \r
332         cl_spinlock_acquire( &hob_lock );\r
333 \r
334         hob_p->mark = E_MARK_INVALID;\r
335 \r
336         p_async_proc = hob_p->async_proc_mgr_p;\r
337         hob_p->async_proc_mgr_p = NULL;\r
338 \r
339         p_cache = hob_p->cache;\r
340         hob_p->cache = NULL;\r
341 \r
342         hob_p->hh_hndl = NULL;\r
343         hob_p->comp_cb_p = NULL;\r
344         hob_p->async_cb_p = NULL;\r
345         hob_p->ca_context = NULL;\r
346         hob_p->cl_device_h = NULL;\r
347 \r
348         cl_spinlock_release( &hob_lock );\r
349 \r
350         if( p_async_proc )\r
351         {\r
352                 cl_async_proc_destroy( p_async_proc );\r
353                 cl_free( p_async_proc );\r
354         }\r
355 \r
356         if( p_cache )\r
357                 cl_free( p_cache );\r
358 \r
359         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("CL: hobs_remove idx %d hh_hndl 0x%p\n", hob_p - mlnx_hob_array, hob_p->hh_hndl));\r
360 }\r
361 \r
362 /////////////////////////////////////////////////////////\r
363 /////////////////////////////////////////////////////////\r
364 ib_api_status_t\r
365 mlnx_hobs_lookup(\r
366         IN                              HH_hca_hndl_t                           hndl,\r
367                 OUT                     mlnx_hob_t                                      **hca_p)\r
368 {\r
369         u_int32_t idx;\r
370 \r
371         if (!hca_p)\r
372                 return IB_ERROR;\r
373 \r
374         cl_spinlock_acquire( &hob_lock );\r
375         for (idx = 0; idx < MLNX_NUM_HOBKL; idx++)\r
376         {\r
377                 if (hndl == mlnx_hob_array[idx].hh_hndl)\r
378                 {\r
379                         *hca_p = &mlnx_hob_array[idx];\r
380                         cl_spinlock_release( &hob_lock );\r
381                         return IB_SUCCESS;\r
382                 }\r
383         }\r
384         cl_spinlock_release( &hob_lock );\r
385         return IB_ERROR;\r
386 }\r
387 \r
388 /////////////////////////////////////////////////////////\r
389 /////////////////////////////////////////////////////////\r
390 void\r
391 mlnx_hobs_get_handle(\r
392         IN                              mlnx_hob_t                                      *hob_p,\r
393                 OUT                     HH_hca_hndl_t                           *hndl_p)\r
394 {\r
395         // Verify handle\r
396         CL_ASSERT((hob_p - mlnx_hob_array) < MLNX_NUM_HOBKL);\r
397 \r
398         if (hndl_p)\r
399                 *hndl_p = hob_p->hh_hndl;\r
400 }\r
401 \r
402 /////////////////////////////////////////////////////////\r
403 /////////////////////////////////////////////////////////\r
404 mlnx_hobul_t *\r
405 mlnx_hobs_get_hobul(\r
406         IN                              mlnx_hob_t                                      *hob_p)\r
407 {\r
408         // Verify handle\r
409         if ((hob_p - mlnx_hob_array) >= MLNX_NUM_HOBKL)\r
410                 return NULL;\r
411 \r
412         return mlnx_hobul_array[hob_p->index];\r
413 }\r
414 \r
415 \r
416 static int priv_ceil_log2(u_int32_t n)\r
417 {\r
418         int shift;\r
419 \r
420         for (shift = 31; shift >0; shift--)\r
421                 if (n & (1 << shift)) break;\r
422 \r
423         if (((unsigned)1 << shift) < n) shift++;\r
424 \r
425         return shift;\r
426 }\r
427 \r
428 /////////////////////////////////////////////////////////\r
429 // ### HOBUL\r
430 /////////////////////////////////////////////////////////\r
431 ib_api_status_t\r
432 mlnx_hobul_new(\r
433         IN                              mlnx_hob_t                                      *hob_p,\r
434         IN                              HH_hca_hndl_t                           hh_hndl,\r
435         IN                              void                                            *resources_p)\r
436 {\r
437         mlnx_hobul_t            *hobul_p;\r
438         HH_hca_dev_t            *hca_ul_info;\r
439         ib_api_status_t         status;\r
440         VAPI_hca_cap_t          hca_caps;\r
441         u_int32_t                       i;\r
442 #if MLNX_COMP_MODEL == 1\r
443         static uint32_t         proc_num = 0;\r
444 #endif\r
445 \r
446         // Verify handle\r
447         CL_ASSERT((hob_p - mlnx_hob_array) < MLNX_NUM_HOBKL);\r
448 \r
449         if (NULL == (hobul_p = cl_zalloc( sizeof(mlnx_hobul_t))))\r
450                 return IB_INSUFFICIENT_MEMORY;\r
451 \r
452         // The following will NULL all pointers/sizes (used in cleanup)\r
453 //      cl_memclr(hobul_p, sizeof (mlnx_hobul_t));\r
454 \r
455         hobul_p->hh_hndl = hh_hndl;\r
456 \r
457         if (HH_OK != THHUL_hob_create(resources_p, hh_hndl->dev_id, &hobul_p->hhul_hndl))\r
458         {\r
459                 status = IB_INSUFFICIENT_RESOURCES;\r
460                 goto cleanup;\r
461         }\r
462 \r
463         hca_ul_info = (HH_hca_dev_t *)hh_hndl;\r
464 \r
465         if (hca_ul_info)\r
466         {\r
467                 hobul_p->vendor_id = hca_ul_info->vendor_id;\r
468                 hobul_p->device_id = hca_ul_info->dev_id;\r
469                 hobul_p->hca_ul_resources_p = resources_p;\r
470                 hobul_p->cq_ul_resources_sz = hca_ul_info->cq_ul_resources_sz;\r
471                 hobul_p->qp_ul_resources_sz = hca_ul_info->qp_ul_resources_sz;\r
472                 hobul_p->pd_ul_resources_sz = hca_ul_info->pd_ul_resources_sz;\r
473         }\r
474 \r
475         if (HH_OK != THH_hob_query(hh_hndl, &hca_caps))\r
476         {\r
477                 status = IB_ERROR;\r
478                 goto cleanup;\r
479         }\r
480 \r
481         hobul_p->cq_idx_mask = MASK32(priv_ceil_log2(hca_caps.max_num_cq));\r
482         hobul_p->qp_idx_mask = MASK32(priv_ceil_log2(hca_caps.max_num_qp)); // Currently mask = 0xFFFF\r
483         hobul_p->max_pd = MASK32(priv_ceil_log2(hca_caps.max_pd_num)) + 1;\r
484         hobul_p->max_cq = hobul_p->cq_idx_mask + 1;\r
485         hobul_p->max_qp = hobul_p->qp_idx_mask + 1;\r
486 \r
487         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("CL: sizes cq 0%x qp 0%x pd 0%x\n", hca_caps.max_num_cq, hca_caps.max_num_qp, hca_caps.max_pd_num));\r
488 \r
489         /* create and initialize the data stucture for CQs */\r
490         hobul_p->cq_info_tbl = cl_zalloc(hobul_p->max_cq * sizeof (cq_info_t));\r
491 \r
492         /* create and initialize the data stucture for QPs */\r
493         hobul_p->qp_info_tbl = cl_zalloc(hobul_p->max_qp * sizeof (qp_info_t));\r
494 \r
495         /* create and initialize the data stucture for PDs */\r
496         hobul_p->pd_info_tbl = cl_zalloc(hobul_p->max_pd * sizeof (pd_info_t));\r
497 \r
498         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("CL: alloc failed?  cq=%d qp=%d pd=%d\n",\r
499                 !hobul_p->cq_info_tbl, !hobul_p->qp_info_tbl, !hobul_p->pd_info_tbl));\r
500 \r
501         if (!hobul_p->pd_info_tbl ||\r
502                 !hobul_p->qp_info_tbl ||\r
503                 !hobul_p->cq_info_tbl)\r
504         {\r
505                 status = IB_INSUFFICIENT_MEMORY;\r
506                 goto cleanup;\r
507         }\r
508 \r
509         /* Initialize all mutexes. */\r
510         for( i = 0; i < hobul_p->max_cq; i++ )\r
511         {\r
512                 cl_mutex_construct( &hobul_p->cq_info_tbl[i].mutex );\r
513 #if MLNX_COMP_MODEL\r
514                 KeInitializeDpc( &hobul_p->cq_info_tbl[i].dpc,\r
515                         mlnx_comp_dpc, &hobul_p->cq_info_tbl[i] );\r
516 #if MLNX_COMP_MODEL == 1\r
517                 KeSetTargetProcessorDpc( &hobul_p->cq_info_tbl[i].dpc,\r
518                         (CCHAR)(proc_num++ % cl_proc_count()) );\r
519 #endif  /* MLNX_COMP_MODEL == 1 */\r
520 #endif  /* MLNX_COMP_MODEL */\r
521         }\r
522 \r
523         for( i = 0; i < hobul_p->max_qp; i++ )\r
524                 cl_mutex_construct( &hobul_p->qp_info_tbl[i].mutex );\r
525 \r
526         for( i = 0; i < hobul_p->max_pd; i++ )\r
527                 cl_mutex_construct( &hobul_p->pd_info_tbl[i].mutex );\r
528 \r
529         for( i = 0; i < hobul_p->max_cq; i++ )\r
530         {\r
531                 if( cl_mutex_init( &hobul_p->cq_info_tbl[i].mutex ) != CL_SUCCESS )\r
532                 {\r
533                         status = IB_ERROR;\r
534                         goto cleanup;\r
535                 }\r
536         }\r
537 \r
538         for( i = 0; i < hobul_p->max_qp; i++ )\r
539         {\r
540                 if( cl_mutex_init( &hobul_p->qp_info_tbl[i].mutex ) != CL_SUCCESS )\r
541                 {\r
542                         status = IB_ERROR;\r
543                         goto cleanup;\r
544                 }\r
545         }\r
546 \r
547         for( i = 0; i < hobul_p->max_pd; i++ )\r
548         {\r
549                 if( cl_mutex_init( &hobul_p->pd_info_tbl[i].mutex ) != CL_SUCCESS )\r
550                 {\r
551                         status = IB_ERROR;\r
552                         goto cleanup;\r
553                 }\r
554         }\r
555 \r
556         hobul_p->log2_mpt_size = ((THH_hca_ul_resources_t *)resources_p)->log2_mpt_size;\r
557         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("log2_mpt_size = %d\n", hobul_p->log2_mpt_size));\r
558 \r
559         cl_spinlock_acquire(&hob_lock);\r
560         mlnx_hobul_array[hob_p->index] = hobul_p;\r
561         cl_spinlock_release(&hob_lock);\r
562 \r
563         return IB_SUCCESS;\r
564 \r
565 cleanup:\r
566         if (hobul_p->hhul_hndl) THHUL_hob_destroy( hobul_p->hhul_hndl );\r
567         if (hobul_p->pd_info_tbl)\r
568         {\r
569                 for( i = 0; i < hobul_p->max_pd; i++ )\r
570                         cl_mutex_destroy( &hobul_p->pd_info_tbl[i].mutex );\r
571                 cl_free(hobul_p->pd_info_tbl);\r
572         }\r
573         if (hobul_p->qp_info_tbl)\r
574         {\r
575                 for( i = 0; i < hobul_p->max_qp; i++ )\r
576                         cl_mutex_destroy( &hobul_p->qp_info_tbl[i].mutex );\r
577                 cl_free(hobul_p->qp_info_tbl);\r
578         }\r
579         if (hobul_p->cq_info_tbl)\r
580         {\r
581                 for( i = 0; i < hobul_p->max_cq; i++ )\r
582                         cl_mutex_destroy( &hobul_p->cq_info_tbl[i].mutex );\r
583                 cl_free(hobul_p->cq_info_tbl);\r
584         }\r
585         if (hobul_p) cl_free( hobul_p);\r
586         return status;\r
587 }\r
588 \r
589 /////////////////////////////////////////////////////////\r
590 /////////////////////////////////////////////////////////\r
591 void\r
592 mlnx_hobul_get(\r
593         IN                              mlnx_hob_t                                      *hob_p,\r
594                 OUT                     void                                            **resources_p )\r
595 {\r
596         mlnx_hobul_t            *hobul_p;\r
597 \r
598         // Verify handle\r
599         CL_ASSERT((hob_p - mlnx_hob_array) < MLNX_NUM_HOBKL);\r
600 \r
601         hobul_p = mlnx_hobul_array[hob_p->index];\r
602 \r
603         if (hobul_p && resources_p)\r
604         {\r
605                 *resources_p = hobul_p->hca_ul_resources_p;\r
606         }\r
607 }\r
608 \r
609 /////////////////////////////////////////////////////////\r
610 /////////////////////////////////////////////////////////\r
611 void\r
612 mlnx_hobul_delete(\r
613         IN                              mlnx_hob_t                                      *hob_p)\r
614 {\r
615         mlnx_hobul_t            *hobul_p;\r
616         u_int32_t                       i;\r
617 \r
618         // Verify handle\r
619         CL_ASSERT((hob_p - mlnx_hob_array) < MLNX_NUM_HOBKL);\r
620 \r
621         cl_spinlock_acquire(&hob_lock);\r
622         hobul_p = mlnx_hobul_array[hob_p->index];\r
623         mlnx_hobul_array[hob_p->index] = NULL;\r
624         cl_spinlock_release(&hob_lock);\r
625 \r
626         if (!hobul_p) return;\r
627 \r
628         if (hobul_p->hhul_hndl) THHUL_hob_destroy( hobul_p->hhul_hndl );\r
629         if (hobul_p->pd_info_tbl)\r
630         {\r
631                 for( i = 0; i < hobul_p->max_pd; i++ )\r
632                         cl_mutex_destroy( &hobul_p->pd_info_tbl[i].mutex );\r
633                 cl_free(hobul_p->pd_info_tbl);\r
634         }\r
635         if (hobul_p->qp_info_tbl)\r
636         {\r
637                 for( i = 0; i < hobul_p->max_qp; i++ )\r
638                         cl_mutex_destroy( &hobul_p->qp_info_tbl[i].mutex );\r
639                 cl_free(hobul_p->qp_info_tbl);\r
640         }\r
641         if (hobul_p->cq_info_tbl)\r
642         {\r
643                 for( i = 0; i < hobul_p->max_cq; i++ )\r
644                 {\r
645                         KeRemoveQueueDpc( &hobul_p->cq_info_tbl[i].dpc );\r
646                         cl_mutex_destroy( &hobul_p->cq_info_tbl[i].mutex );\r
647                 }\r
648                 cl_free(hobul_p->cq_info_tbl);\r
649         }\r
650         if (hobul_p) cl_free( hobul_p);\r
651 }\r
652 \r
653 /////////////////////////////////////////////////////////\r
654 // ### Callbacks\r
655 /////////////////////////////////////////////////////////\r
656 \r
657 ib_async_event_t\r
658 mlnx_map_vapi_event_type(\r
659         IN                              unsigned                                        event_id,\r
660                 OUT                     ENUM_EVENT_CLASS                        *event_class_p)\r
661 {\r
662         switch (event_id)\r
663         {\r
664         case VAPI_QP_PATH_MIGRATED:\r
665                 if (event_class_p) *event_class_p = E_EV_QP;\r
666                 return IB_AE_QP_APM;\r
667 \r
668         case VAPI_QP_COMM_ESTABLISHED:\r
669                 if (event_class_p) *event_class_p = E_EV_QP;\r
670                 return IB_AE_QP_COMM;\r
671 \r
672         case VAPI_SEND_QUEUE_DRAINED:\r
673                 if (event_class_p) *event_class_p = E_EV_QP;\r
674                 return IB_AE_SQ_DRAINED;\r
675 \r
676         case VAPI_CQ_ERROR:\r
677                 if (event_class_p) *event_class_p = E_EV_CQ;\r
678                 return IB_AE_CQ_ERROR;\r
679 \r
680         case VAPI_LOCAL_WQ_INV_REQUEST_ERROR:\r
681                 if (event_class_p) *event_class_p = E_EV_QP;\r
682                 return IB_AE_WQ_REQ_ERROR;\r
683 \r
684         case VAPI_LOCAL_WQ_ACCESS_VIOL_ERROR:\r
685                 if (event_class_p) *event_class_p = E_EV_QP;\r
686                 return IB_AE_WQ_ACCESS_ERROR;\r
687 \r
688         case VAPI_LOCAL_WQ_CATASTROPHIC_ERROR:\r
689                 if (event_class_p) *event_class_p = E_EV_QP;\r
690                 return IB_AE_QP_FATAL;\r
691 \r
692         case VAPI_PATH_MIG_REQ_ERROR:\r
693                 if (event_class_p) *event_class_p = E_EV_QP;\r
694                 return IB_AE_QP_APM_ERROR;\r
695 \r
696         case VAPI_LOCAL_CATASTROPHIC_ERROR:\r
697                 if (event_class_p) *event_class_p = E_EV_CA;\r
698                 return IB_AE_LOCAL_FATAL;\r
699 \r
700         case VAPI_PORT_ERROR:\r
701                 /*\r
702                  * In tavor_hca\src\Hca\hcahal\tavor\eventp\event_irqh.c:\r
703                  * TAVOR_IF_EV_TYPE_PORT_ERR maps one of two port events:\r
704                  *      - TAVOR_IF_SUB_EV_PORT_DOWN\r
705                  *      - TAVOR_IF_SUB_EV_PORT_UP\r
706                  * \r
707                  * These map to (respectively)\r
708                  *      - VAPI_PORT_ERROR\r
709                  *      - VAPI_PORT_ACTIVE\r
710                  */\r
711                 if (event_class_p) *event_class_p = E_EV_CA;\r
712                 return IB_AE_PORT_DOWN; /* INIT, ARMED, DOWN */\r
713 \r
714         case VAPI_PORT_ACTIVE:\r
715                 if (event_class_p) *event_class_p = E_EV_CA;\r
716                 return IB_AE_PORT_ACTIVE; /* ACTIVE STATE */\r
717 \r
718         case VAPI_CLIENT_REREGISTER:\r
719                 if (event_class_p) *event_class_p = E_EV_CA;\r
720                 return IB_AE_CLIENT_REREGISTER; /* ACTIVE STATE */\r
721 \r
722         default:\r
723                 CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("FAIL to map %d (last known %d) returning %d\n",\r
724                         event_id, VAPI_PORT_ACTIVE, IB_AE_LOCAL_FATAL));\r
725                 if (event_class_p) *event_class_p = E_EV_CA;\r
726                 return IB_AE_LOCAL_FATAL;\r
727         }\r
728 }\r
729 \r
730 void\r
731 mlnx_conv_vapi_event(\r
732         IN                              HH_event_record_t                       *hh_event_p,\r
733         IN                              ib_event_rec_t                          *ib_event_p,\r
734                 OUT                     ENUM_EVENT_CLASS                        *event_class_p)\r
735 {\r
736 \r
737         // ib_event_p->context is handled by the caller\r
738         //\r
739         ib_event_p->type = mlnx_map_vapi_event_type(hh_event_p->etype, event_class_p);\r
740 \r
741         // no traps currently generated\r
742         // ib_event_p->trap_info.lid  =  ;\r
743         // ib_event_p->trap_info.port_guid = ;\r
744         // ib_event_p->trap_info.port_num  = hh_er;\r
745 }\r
746 \r
747 void\r
748 mlnx_async_cb(\r
749         IN                              HH_hca_hndl_t                           hh_hndl,\r
750         IN                              HH_event_record_t                       *hh_er_p,\r
751         IN                              void                                            *private_data)\r
752 {\r
753         u_int32_t                       obj_idx;\r
754         mlnx_hob_t                      *hob_p;\r
755 \r
756         mlnx_cb_data_t          cb_data;\r
757         mlnx_cb_data_t          *cb_data_p;\r
758 \r
759         CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("ASYNC CB %p (0x%x)\n",\r
760                 private_data, (private_data) ? *(u_int32_t *)private_data : 0xB5));\r
761 \r
762         if (!private_data || !hh_er_p) return;\r
763 \r
764         obj_idx =  *(u_int32_t *)private_data;\r
765         if (obj_idx >= MLNX_NUM_HOBKL) return;\r
766 \r
767         hob_p = mlnx_hob_array + obj_idx;\r
768 \r
769         // g_mlnx_dpc2thread will be initialized as a module paramter (default - disabled(0))\r
770         if (g_mlnx_dpc2thread)\r
771         {\r
772                 cb_data_p = cl_malloc(sizeof(mlnx_cb_data_t));\r
773                 if (!cb_data_p) return;\r
774 \r
775                 cb_data_p->hh_hndl      = hh_hndl;\r
776                 cb_data_p->private_data = private_data;\r
777                 cl_memcpy(&cb_data_p->hh_er, hh_er_p, sizeof(HH_event_record_t));\r
778                 cb_data_p->async_item.pfn_callback = mlnx_async_dpc;\r
779                 cl_async_proc_queue(hob_p->async_proc_mgr_p, &cb_data_p->async_item );\r
780         } else\r
781         {\r
782                 cb_data_p = &cb_data;\r
783 \r
784                 cb_data_p->hh_hndl      = hh_hndl;\r
785                 cb_data_p->private_data = private_data;\r
786                 cl_memcpy(&cb_data_p->hh_er, hh_er_p, sizeof(HH_event_record_t));\r
787                 mlnx_async_dpc( &cb_data_p->async_item );\r
788         }\r
789 }\r
790 \r
791 static void\r
792 mlnx_async_dpc(\r
793         IN                              cl_async_proc_item_t            *async_item_p )\r
794 {\r
795         HH_event_record_t       *hh_er_p;\r
796         u_int32_t                       obj_idx;\r
797         mlnx_hob_t                      *hob_p;\r
798         mlnx_hobul_t            *hobul_p;\r
799         mlnx_cb_data_t          *cb_data_p;\r
800 \r
801         ENUM_EVENT_CLASS        event_class;\r
802         ib_event_rec_t          event_r;\r
803 \r
804         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("ASYNC DPC %p\n", async_item_p));\r
805 \r
806         cb_data_p = PARENT_STRUCT( async_item_p, mlnx_cb_data_t, async_item );\r
807 \r
808         if (!cb_data_p) return;\r
809 \r
810         hh_er_p =  &cb_data_p->hh_er;\r
811         obj_idx =  *(u_int32_t *)cb_data_p->private_data;\r
812         hob_p = mlnx_hob_array + obj_idx;\r
813         hobul_p = mlnx_hobul_array[obj_idx];\r
814 \r
815         CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("ASYNC DPC type %d ca_context %p\n",\r
816                 hh_er_p->etype, hob_p->ca_context));\r
817 \r
818         if (!hob_p ||\r
819                 !hobul_p ||\r
820                 !hob_p->hh_hndl ||\r
821                 !hob_p->async_cb_p)\r
822         {\r
823                 goto cleanup;\r
824         }\r
825 \r
826         cl_memclr(&event_r, sizeof(event_r));\r
827         mlnx_conv_vapi_event(hh_er_p, &event_r, &event_class);\r
828 \r
829         switch(event_class)\r
830         {\r
831         case E_EV_CA:\r
832                 event_r.context = (void *)hob_p->ca_context;\r
833                 break;\r
834 \r
835         case E_EV_QP:\r
836                 {\r
837                         obj_idx = hh_er_p->event_modifier.qpn & hobul_p->qp_idx_mask;\r
838                         if (obj_idx < hobul_p->max_qp)\r
839                                 event_r.context = (void *)hobul_p->qp_info_tbl[obj_idx].qp_context;\r
840                         else\r
841                         {\r
842                                 CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("ASYNC DPC bad qpn 0x%x max 0x%x\n", obj_idx, hobul_p->max_qp));\r
843                                 goto cleanup;\r
844                         }\r
845                 }\r
846                 break;\r
847 \r
848         case E_EV_CQ:\r
849                 {\r
850                         obj_idx = hh_er_p->event_modifier.cq & hobul_p->cq_idx_mask;\r
851                         if (obj_idx < hobul_p->max_cq)\r
852                                 event_r.context = (void *)hobul_p->cq_info_tbl[obj_idx].cq_context;\r
853                         else\r
854                         {\r
855                                 CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("ASYNC DPC bad cqn 0x%x max 0x%x\n", obj_idx, hobul_p->max_cq));\r
856                                 goto cleanup;\r
857                         }\r
858                 }\r
859                 break;\r
860 \r
861         case E_EV_LAST:\r
862         default:\r
863                 // CL_ASSERT(0); // This shouldn't happen\r
864                 CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("ASYNC DPC unknown event_class 0x%x\n", event_class));\r
865                 break;\r
866         }\r
867 \r
868         // Call the registered CB\r
869         (*hob_p->async_cb_p)(&event_r);\r
870         // Fall Through\r
871 cleanup:\r
872         if (g_mlnx_dpc2thread)\r
873         {\r
874                 cl_free(cb_data_p);\r
875         }\r
876 }\r
877 \r
878 /////////////////////////////////////////////////////////\r
879 /////////////////////////////////////////////////////////\r
880 void\r
881 mlnx_comp_cb(\r
882         IN                              HH_hca_hndl_t                           hh_hndl,\r
883         IN                              HH_cq_hndl_t                            hh_cq,\r
884         IN                              void                                            *private_data)\r
885 {\r
886 #if MLNX_COMP_MODEL\r
887         u_int32_t                       cq_num;\r
888         u_int32_t                       hca_idx;\r
889         mlnx_hob_t                      *hob_p;\r
890         mlnx_hobul_t            *hobul_p;\r
891 #if MLNX_COMP_MODEL == 2\r
892         static uint32_t         proc_num = 0;\r
893 #endif\r
894 \r
895         CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("COMP CB cq 0x%x %p\n", hh_cq, private_data));\r
896 \r
897         UNUSED_PARAM( hh_hndl );\r
898 \r
899         hca_idx = *(u_int32_t *)private_data;\r
900         hob_p   = mlnx_hob_array + hca_idx;\r
901         hobul_p = mlnx_hobul_array[hca_idx];\r
902         cq_num  = hh_cq & hobul_p->cq_idx_mask;\r
903 \r
904         if (NULL != hob_p && NULL != hobul_p &&\r
905                 hob_p->hh_hndl && hob_p->comp_cb_p)\r
906         {\r
907                 if (cq_num < hobul_p->max_cq)\r
908                 {\r
909 #if MLNX_COMP_MODEL == 2\r
910                         KeSetTargetProcessorDpc( &hobul_p->cq_info_tbl[cq_num].dpc,\r
911                                 (CCHAR)(proc_num++ % cl_proc_count()) );\r
912 #endif  /* MLNX_COMP_MODEL == 2 */\r
913                         KeInsertQueueDpc( &hobul_p->cq_info_tbl[cq_num].dpc,\r
914                                 hob_p, NULL );\r
915                 }\r
916                 else\r
917                 {\r
918                         HCA_TRACE( HCA_DBG_ERROR, ("CQ index out of range!!!\n") );\r
919                 }\r
920         }\r
921 #else   /* MLNX_COMP_MODEL */\r
922         u_int32_t                       obj_idx;\r
923         mlnx_hob_t                      *hob_p;\r
924 \r
925         mlnx_cb_data_t          cb_data;\r
926         mlnx_cb_data_t          *cb_data_p;\r
927 \r
928         CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("COMP CB cq 0x%x %p\n", hh_cq, private_data));\r
929 \r
930         if (!private_data) return;\r
931 \r
932         obj_idx =  *(u_int32_t *)private_data;\r
933         hob_p = mlnx_hob_array + obj_idx;\r
934         if (!hob_p) return;\r
935 \r
936         if (g_mlnx_dpc2thread)\r
937         {\r
938                 cb_data_p = cl_malloc(sizeof(mlnx_cb_data_t));\r
939                 if (!cb_data_p) return;\r
940 \r
941                 cb_data_p->hh_hndl      = hh_hndl;\r
942                 cb_data_p->hh_cq        = hh_cq;\r
943                 cb_data_p->private_data = private_data;\r
944 \r
945                 cb_data_p->async_item.pfn_callback = mlnx_comp_dpc;\r
946 \r
947                 // Report completion through async_proc\r
948                 cl_async_proc_queue(hob_p->async_proc_mgr_p, &cb_data_p->async_item );\r
949 \r
950         } else\r
951         {\r
952                 cb_data_p = &cb_data;\r
953 \r
954                 cb_data_p->hh_hndl      = hh_hndl;\r
955                 cb_data_p->hh_cq        = hh_cq;\r
956                 cb_data_p->private_data = private_data;\r
957 \r
958                 // Report completion directly from DPC (verbs should NOT sleep)\r
959                 mlnx_comp_dpc( &cb_data_p->async_item );\r
960         }\r
961 #endif  /* MLNX_COMP_MODEL */\r
962 }\r
963 \r
964 #if MLNX_COMP_MODEL\r
965 static void\r
966 mlnx_comp_dpc(\r
967         IN                              PRKDPC                                          p_dpc,\r
968         IN                              void                                            *context,\r
969         IN                              void                                            *arg1,\r
970         IN                              void                                            *unused )\r
971 {\r
972         mlnx_hob_t              *hob_p = (mlnx_hob_t*)arg1;\r
973         UNUSED_PARAM( p_dpc );\r
974         UNUSED_PARAM( unused );\r
975 \r
976         hob_p->comp_cb_p( (void*)((cq_info_t*)context)->cq_context );\r
977 }\r
978 #else   /* MLNX_COMP_MODEL */\r
979 static void\r
980 mlnx_comp_dpc(\r
981         IN                              cl_async_proc_item_t            *async_item_p )\r
982 {\r
983         u_int32_t                       cq_num;\r
984         u_int32_t                       hca_idx;\r
985         mlnx_hob_t                      *hob_p;\r
986         mlnx_hobul_t            *hobul_p;\r
987         mlnx_cb_data_t          *cb_data_p;\r
988 \r
989         CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("COMP DPC %p\n", async_item_p));\r
990 \r
991         cb_data_p = PARENT_STRUCT( async_item_p, mlnx_cb_data_t, async_item );\r
992         if (!cb_data_p) return;\r
993 \r
994         hca_idx = *(u_int32_t *)cb_data_p->private_data;\r
995         hob_p   = mlnx_hob_array + hca_idx;\r
996         hobul_p = mlnx_hobul_array[hca_idx];\r
997         cq_num  = (u_int32_t)cb_data_p->hh_cq & hobul_p->cq_idx_mask;\r
998 \r
999         if (NULL != hob_p && NULL != hobul_p &&\r
1000                 hob_p->hh_hndl && hob_p->comp_cb_p)\r
1001         {\r
1002                 if (cq_num < hobul_p->max_cq)\r
1003                 {\r
1004                         (*hob_p->comp_cb_p)((void *)hobul_p->cq_info_tbl[cq_num].cq_context);\r
1005                 }\r
1006         }\r
1007 \r
1008         if (g_mlnx_dpc2thread)\r
1009         {\r
1010                 cl_free(cb_data_p);\r
1011         }\r
1012 }\r
1013 #endif  /* MLNX_COMP_MODEL */\r
1014 \r
1015 // ### Conversions\r
1016 \r
1017 /////////////////////////////////////////////////////////\r
1018 /////////////////////////////////////////////////////////\r
1019 VAPI_mrw_acl_t\r
1020 map_ibal_acl(\r
1021         IN                              ib_access_t                                     ibal_acl)\r
1022 {\r
1023         VAPI_mrw_acl_t          vapi_acl = 0;\r
1024 \r
1025         if (ibal_acl & IB_AC_RDMA_READ)   vapi_acl |= VAPI_EN_REMOTE_READ;\r
1026         if (ibal_acl & IB_AC_RDMA_WRITE)  vapi_acl |= VAPI_EN_REMOTE_WRITE;\r
1027         if (ibal_acl & IB_AC_ATOMIC)      vapi_acl |= VAPI_EN_REMOTE_ATOM;\r
1028         if (ibal_acl & IB_AC_LOCAL_WRITE) vapi_acl |= VAPI_EN_LOCAL_WRITE;\r
1029         if (ibal_acl & IB_AC_MW_BIND)     vapi_acl |= VAPI_EN_MEMREG_BIND;\r
1030 \r
1031         return vapi_acl;\r
1032 }\r
1033 \r
1034 /////////////////////////////////////////////////////////\r
1035 /////////////////////////////////////////////////////////\r
1036 ib_access_t\r
1037 map_vapi_acl(\r
1038         IN                              VAPI_mrw_acl_t                          vapi_acl)\r
1039 {\r
1040         ib_access_t ibal_acl = 0;\r
1041 \r
1042         if (vapi_acl & VAPI_EN_REMOTE_READ)  ibal_acl |= IB_AC_RDMA_READ;\r
1043         if (vapi_acl & VAPI_EN_REMOTE_WRITE) ibal_acl |= IB_AC_RDMA_WRITE;\r
1044         if (vapi_acl & VAPI_EN_REMOTE_ATOM)  ibal_acl |= IB_AC_ATOMIC;\r
1045         if (vapi_acl & VAPI_EN_LOCAL_WRITE)  ibal_acl |= IB_AC_LOCAL_WRITE;\r
1046         if (vapi_acl & VAPI_EN_MEMREG_BIND)  ibal_acl |= IB_AC_MW_BIND;\r
1047 \r
1048         return ibal_acl;\r
1049 }\r
1050 \r
1051 /////////////////////////////////////////////////////////\r
1052 /////////////////////////////////////////////////////////\r
1053 static VAPI_rdma_atom_acl_t \r
1054 map_ibal_qp_acl(\r
1055         IN                              ib_access_t                                     ibal_acl)\r
1056 {\r
1057         VAPI_rdma_atom_acl_t vapi_qp_acl = 0;\r
1058 \r
1059         if (ibal_acl & IB_AC_RDMA_WRITE) vapi_qp_acl |= VAPI_EN_REM_WRITE;\r
1060         if (ibal_acl & IB_AC_RDMA_READ)  vapi_qp_acl |= VAPI_EN_REM_READ;\r
1061         if (ibal_acl & IB_AC_ATOMIC)     vapi_qp_acl |= VAPI_EN_REM_ATOMIC_OP;\r
1062 \r
1063         return vapi_qp_acl;\r
1064 \r
1065 }\r
1066 \r
1067 /////////////////////////////////////////////////////////\r
1068 /////////////////////////////////////////////////////////\r
1069 static ib_access_t\r
1070 map_vapi_qp_acl(\r
1071         IN                              VAPI_rdma_atom_acl_t            vapi_qp_acl)\r
1072 {\r
1073         ib_access_t     ibal_acl = IB_AC_LOCAL_WRITE;\r
1074 \r
1075         if (vapi_qp_acl & VAPI_EN_REM_WRITE)     ibal_acl |= IB_AC_RDMA_WRITE;\r
1076         if (vapi_qp_acl & VAPI_EN_REM_READ)      ibal_acl |= IB_AC_RDMA_READ;\r
1077         if (vapi_qp_acl & VAPI_EN_REM_ATOMIC_OP) ibal_acl |= IB_AC_ATOMIC;\r
1078 \r
1079         return ibal_acl;\r
1080 }\r
1081 \r
1082 \r
1083 /////////////////////////////////////////////////////////\r
1084 /////////////////////////////////////////////////////////\r
1085 ib_api_status_t\r
1086 mlnx_lock_region(\r
1087         IN                              mlnx_mro_t                                      *mro_p,\r
1088         IN                              boolean_t                                       um_call )\r
1089 {\r
1090         MOSAL_iobuf_t   old_iobuf;\r
1091 \r
1092         // Find context\r
1093         if( um_call )\r
1094                 mro_p->mr_prot_ctx = MOSAL_get_current_prot_ctx();\r
1095         else\r
1096                 mro_p->mr_prot_ctx = MOSAL_get_kernel_prot_ctx();\r
1097 \r
1098         // Save pointer to existing locked region.\r
1099         old_iobuf = mro_p->mr_iobuf;\r
1100 \r
1101         // Lock Region\r
1102         if (MT_OK != MOSAL_iobuf_register((MT_virt_addr_t)mro_p->mr_start,\r
1103                 (MT_size_t)mro_p->mr_size,\r
1104                 mro_p->mr_prot_ctx,\r
1105                 mro_p->mr_mosal_perm,\r
1106                 &mro_p->mr_iobuf,\r
1107                 0 ))\r
1108         {\r
1109                 return IB_ERROR;\r
1110         }\r
1111 \r
1112         if( old_iobuf )\r
1113         {\r
1114                 if( MT_OK != MOSAL_iobuf_deregister( old_iobuf ) )\r
1115                         return IB_ERROR;\r
1116         }\r
1117 \r
1118         return IB_SUCCESS;\r
1119 }\r
1120 \r
1121 \r
1122 /////////////////////////////////////////////////////////\r
1123 /////////////////////////////////////////////////////////\r
1124 ib_api_status_t\r
1125 mlnx_conv_ibal_mr_create(\r
1126         IN                              u_int32_t                                       pd_idx,\r
1127         IN      OUT                     mlnx_mro_t                                      *mro_p,\r
1128         IN                              VAPI_mr_change_t                        change_flags,\r
1129         IN                              ib_mr_create_t const            *p_mr_create,\r
1130         IN                              boolean_t                                       um_call,\r
1131                 OUT                     HH_mr_t                                         *mr_props_p )\r
1132 {\r
1133         ib_api_status_t         status;\r
1134 \r
1135         /* Set ACL information first since it is used to lock the region. */\r
1136         if( change_flags & VAPI_MR_CHANGE_ACL )\r
1137         {\r
1138                 mro_p->mr_acl = map_ibal_acl( p_mr_create->access_ctrl );\r
1139                 // This computation should be externalized by THH\r
1140                 mro_p->mr_mosal_perm =\r
1141                         MOSAL_PERM_READ |\r
1142                         ((mro_p->mr_acl & VAPI_EN_LOCAL_WRITE) ? MOSAL_PERM_WRITE : 0);\r
1143         }\r
1144 \r
1145         if( change_flags & VAPI_MR_CHANGE_TRANS )\r
1146         {\r
1147                 CL_TRACE(MLNX_DBG_MEM, g_mlnx_dbg_lvl, ("addr 0x%p size %"PRId64"\n", (void *)p_mr_create->vaddr, p_mr_create->length));\r
1148                 // Build TPT entries\r
1149                 mro_p->mr_start = (IB_virt_addr_t)p_mr_create->vaddr;\r
1150                 mro_p->mr_size = p_mr_create->length;\r
1151                 if (IB_SUCCESS != (status = mlnx_lock_region(mro_p, um_call)))\r
1152                 {\r
1153                         return status;\r
1154                 }\r
1155         }\r
1156 \r
1157         /* Now fill in the MR properties. */\r
1158         mr_props_p->start = mro_p->mr_start;\r
1159         mr_props_p->size = mro_p->mr_size;\r
1160         mr_props_p->acl = mro_p->mr_acl;\r
1161         mr_props_p->pd = pd_idx;\r
1162 \r
1163         // Setup MTT info\r
1164         mr_props_p->tpt.tpt_type = HH_TPT_IOBUF;\r
1165         mr_props_p->tpt.tpt.iobuf = mro_p->mr_iobuf;\r
1166 \r
1167         return IB_SUCCESS;\r
1168 }\r
1169 \r
1170 /////////////////////////////////////////////////////////\r
1171 // On entry mro_p->mr_start holds the pmr address\r
1172 /////////////////////////////////////////////////////////\r
1173 ib_api_status_t\r
1174 mlnx_conv_ibal_pmr_create(\r
1175         IN                              u_int32_t                                       pd_idx,\r
1176         IN                              mlnx_mro_t                                      *mro_p,\r
1177         IN                              ib_phys_create_t const          *p_pmr_create,\r
1178                 OUT                     HH_mr_t                                         *mr_props_p )\r
1179 {\r
1180         VAPI_phy_addr_t*        buf_lst = NULL;\r
1181         VAPI_size_t*            sz_lst = NULL;\r
1182         u_int32_t                       i;\r
1183         u_int32_t                       page_shift = priv_ceil_log2(p_pmr_create->hca_page_size);\r
1184         u_int64_t                       page_mask = (1 << page_shift) - 1;\r
1185         u_int64_t                       tot_sz = 0;\r
1186 \r
1187         CL_TRACE(MLNX_DBG_MEM, g_mlnx_dbg_lvl, \r
1188                 ("PRE: addr %p size 0x%"PRIx64" shift %d\n",\r
1189                 (void *)(uintn_t)mro_p->mr_start, p_pmr_create->length, page_mask));\r
1190         mro_p->mr_start = (mro_p->mr_start & ~page_mask) | (p_pmr_create->buf_offset & page_mask);\r
1191         CL_TRACE(MLNX_DBG_MEM, g_mlnx_dbg_lvl, \r
1192                 ("POST: addr %p\n", (void *)(uintn_t)mro_p->mr_start));\r
1193 \r
1194         mr_props_p->start = mro_p->mr_start;\r
1195         mr_props_p->size = p_pmr_create->length;\r
1196         mr_props_p->acl = map_ibal_acl(p_pmr_create->access_ctrl);\r
1197         mr_props_p->pd = pd_idx;\r
1198 \r
1199 #ifdef _DEBUG_\r
1200         mro_p->mr_size           = mr_props_p->size;\r
1201 //      mro_p->mr_first_page_addr = 0;\r
1202 //      mro_p->mr_num_pages       = (mro_p->mr_end >> PAGESHIFT) + 1 - (mro_p->mr_start >> PAGESHIFT);\r
1203 //      CL_TRACE(MLNX_DBG_MEM, g_mlnx_dbg_lvl, ("1st pg addr 0x%p pages %d\n",\r
1204 //              (void *)mro_p->mr_first_page_addr, p_pmr_create->num_bufs));\r
1205         CL_TRACE(MLNX_DBG_MEM, g_mlnx_dbg_lvl, ("1st phys addr 0x%"PRIx64" phys pages %d\n",\r
1206                 p_pmr_create->range_array[0].base_addr, p_pmr_create->num_ranges));\r
1207 #endif\r
1208 \r
1209         // Build TPT entries\r
1210         if (!p_pmr_create->range_array)\r
1211         {\r
1212                 return IB_INVALID_PARAMETER;\r
1213         }\r
1214 \r
1215         if (p_pmr_create->hca_page_size !=\r
1216                 MT_DOWN_ALIGNX_PHYS(p_pmr_create->hca_page_size, page_shift))\r
1217         {\r
1218                 CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("phys buf size is not page aligned\n"));\r
1219                 return IB_INVALID_PARAMETER;\r
1220         }\r
1221 \r
1222         for (i = 0; i < p_pmr_create->num_ranges; i++)\r
1223         {\r
1224                 uint64_t        start_addr = p_pmr_create->range_array[i].base_addr;\r
1225                 uint64_t        end_addr = start_addr + p_pmr_create->range_array[i].size;\r
1226 \r
1227                 if( end_addr < start_addr ) {\r
1228                         CL_TRACE( CL_DBG_ERROR, g_mlnx_dbg_lvl, ("phys buf end < start\n") );\r
1229                         return IB_INVALID_PARAMETER;\r
1230                 }\r
1231 \r
1232                 if (start_addr !=\r
1233                         MT_DOWN_ALIGNX_PHYS(start_addr, page_shift))\r
1234                 {\r
1235                         CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("phys buf start adrs is not page aligned\n"));\r
1236                         return IB_INVALID_PARAMETER;\r
1237                 }\r
1238 \r
1239                 tot_sz += p_pmr_create->range_array[i].size;\r
1240         }\r
1241 \r
1242         if( tot_sz < p_pmr_create->length + p_pmr_create->buf_offset )\r
1243         {\r
1244                 HCA_TRACE_EXIT( HCA_DBG_ERROR, \r
1245                         ("length(0x"PRIx64") + buf offset(0x"PRIx64") larger than sum "\r
1246                         "of phys ranges(0x"PRIx64")\n",\r
1247                         p_pmr_create->length, p_pmr_create->buf_offset, tot_sz) );\r
1248                 return IB_INVALID_PARAMETER;\r
1249         }\r
1250 \r
1251         if( p_pmr_create->buf_offset > p_pmr_create->range_array[0].size )\r
1252         {\r
1253                 HCA_TRACE_EXIT( HCA_DBG_ERROR,\r
1254                         ("buf offset(0x%x) > than 1st phy range size(0x"PRIx64")\n",\r
1255                         p_pmr_create->buf_offset, p_pmr_create->range_array[0].size) );\r
1256                 return IB_INVALID_PARAMETER;\r
1257         }\r
1258 \r
1259         /* Memory registration must be done at PASSIVE_LEVEL, so paged memory here is fine. */\r
1260         buf_lst = (VAPI_phy_addr_t*)cl_pzalloc( sizeof(VAPI_phy_addr_t)*(p_pmr_create->num_ranges));\r
1261         if (!buf_lst)\r
1262         {\r
1263                 HCA_TRACE_EXIT( HCA_DBG_ERROR,\r
1264                         ("Failed to allocate range address list.\n") );\r
1265                 return IB_INSUFFICIENT_MEMORY;\r
1266         }\r
1267 \r
1268 \r
1269         /* Memory registration must be done at PASSIVE_LEVEL, so paged memory here is fine. */\r
1270         sz_lst = (VAPI_size_t*)cl_pzalloc( sizeof(VAPI_size_t)*(p_pmr_create->num_ranges));\r
1271         if (!sz_lst)\r
1272         {\r
1273                 cl_free( buf_lst );\r
1274                 HCA_TRACE_EXIT( HCA_DBG_ERROR,\r
1275                         ("Failed to allocate range size list.\n") );\r
1276                 return IB_INSUFFICIENT_MEMORY;\r
1277         }\r
1278 \r
1279         for (i = 0; i < p_pmr_create->num_ranges; i++)\r
1280         {\r
1281                 buf_lst[i] = p_pmr_create->range_array[i].base_addr;\r
1282                 sz_lst[i] = p_pmr_create->range_array[i].size;\r
1283         }\r
1284 \r
1285         mr_props_p->tpt.tpt_type = HH_TPT_BUF;\r
1286         mr_props_p->tpt.num_entries = p_pmr_create->num_ranges;\r
1287         mr_props_p->tpt.tpt.buf_lst.buf_sz_lst = sz_lst;\r
1288         mr_props_p->tpt.tpt.buf_lst.phys_buf_lst = buf_lst; \r
1289         mr_props_p->tpt.tpt.buf_lst.iova_offset = p_pmr_create->buf_offset;\r
1290 \r
1291         return IB_SUCCESS;\r
1292 }\r
1293 \r
1294 \r
1295 u_int8_t\r
1296 mlnx_gid_to_index(\r
1297         IN                              HH_hca_hndl_t                           hh_hndl,\r
1298         IN                              u_int8_t                                        port_num,\r
1299         IN                              u_int8_t                                        *raw_gid)\r
1300 {\r
1301         ib_gid_t        *gid_table_p = NULL;\r
1302         u_int8_t        index = 0; // default return value\r
1303         u_int8_t        i;\r
1304 \r
1305         gid_table_p = cl_zalloc( 64*sizeof(ib_gid_t));\r
1306 \r
1307         mlnx_get_hca_gid_tbl(hh_hndl, port_num, 64, gid_table_p);\r
1308 \r
1309         for (i = 0; i < 64; i++)\r
1310         {\r
1311                 if (!cl_memcmp(raw_gid, gid_table_p[i].raw, sizeof(ib_gid_t)))\r
1312                 {\r
1313                         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("1: found GID at index %d\n", i));\r
1314                         index = i;\r
1315                         break;\r
1316                 }\r
1317         }\r
1318 \r
1319         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("2: found GID at index %d\n", index));\r
1320 \r
1321         cl_free( gid_table_p);\r
1322         return index;\r
1323 }\r
1324 \r
1325 /////////////////////////////////////////////////////////\r
1326 /////////////////////////////////////////////////////////\r
1327 void\r
1328 mlnx_conv_ibal_av(\r
1329         IN                              HH_hca_hndl_t                           hh_hndl,\r
1330         IN              const   ib_av_attr_t                            *ibal_av_p,\r
1331                 OUT                     VAPI_ud_av_t                            *vapi_av_p)\r
1332 {\r
1333         vapi_av_p->port = ibal_av_p->port_num;\r
1334         vapi_av_p->sl   = ibal_av_p->sl;\r
1335         vapi_av_p->dlid = cl_ntoh16 (ibal_av_p->dlid);\r
1336 \r
1337         vapi_av_p->static_rate   =\r
1338                 (ibal_av_p->static_rate == IB_PATH_RECORD_RATE_10_GBS? 0 : 3);\r
1339         ib_grh_get_ver_class_flow( ibal_av_p->grh.ver_class_flow, NULL,\r
1340                 &vapi_av_p->traffic_class, &vapi_av_p->flow_label );\r
1341         vapi_av_p->src_path_bits = ibal_av_p->path_bits; // PATH:\r
1342         //vapi_av_p->src_path_bits = 0;\r
1343 \r
1344         /* For global destination or Multicast address:*/\r
1345         if (ibal_av_p->grh_valid)\r
1346         {\r
1347                 vapi_av_p->grh_flag = TRUE;\r
1348                 vapi_av_p->hop_limit     = ibal_av_p->grh.hop_limit;\r
1349                 // CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("raw %p, &raw %p\n", ibal_av_p->grh.src_gid.raw, &ibal_av_p->grh.src_gid.raw));\r
1350                 vapi_av_p->sgid_index    = mlnx_gid_to_index(hh_hndl, ibal_av_p->port_num, (u_int8_t *)ibal_av_p->grh.src_gid.raw);\r
1351                 cl_memcpy(vapi_av_p->dgid, ibal_av_p->grh.dest_gid.raw, sizeof(vapi_av_p->dgid));\r
1352         }\r
1353 }\r
1354 \r
1355 /////////////////////////////////////////////////////////\r
1356 /////////////////////////////////////////////////////////\r
1357 void\r
1358 mlnx_conv_vapi_av(\r
1359         IN                              HH_hca_hndl_t                           hh_hndl,\r
1360         IN              const   VAPI_ud_av_t                            *vapi_av_p,\r
1361                 OUT                     ib_av_attr_t                            *ibal_av_p)\r
1362 {\r
1363         uint8_t         ver;\r
1364 \r
1365         ibal_av_p->port_num = vapi_av_p->port;\r
1366         ibal_av_p->sl       = vapi_av_p->sl;\r
1367         ibal_av_p->dlid     = cl_ntoh16(vapi_av_p->dlid);\r
1368 \r
1369         /* For global destination or Multicast address:*/\r
1370         ibal_av_p->grh_valid = vapi_av_p->grh_flag;\r
1371 \r
1372         ver = 2;\r
1373         ibal_av_p->grh.ver_class_flow = ib_grh_set_ver_class_flow( ver,\r
1374                 vapi_av_p->traffic_class,\r
1375                 vapi_av_p->flow_label);\r
1376         ibal_av_p->grh.hop_limit = vapi_av_p->hop_limit;\r
1377 \r
1378         THH_hob_get_sgid(hh_hndl,\r
1379                 vapi_av_p->port,\r
1380                 vapi_av_p->sgid_index,\r
1381                 &ibal_av_p->grh.src_gid.raw);\r
1382 \r
1383         cl_memcpy(ibal_av_p->grh.dest_gid.raw, vapi_av_p->dgid, sizeof(vapi_av_p->dgid));\r
1384 \r
1385         ibal_av_p->static_rate = (vapi_av_p->static_rate?\r
1386                 IB_PATH_RECORD_RATE_2_5_GBS : IB_PATH_RECORD_RATE_10_GBS);\r
1387         ibal_av_p->path_bits   = vapi_av_p->src_path_bits;\r
1388 }\r
1389 \r
1390 /////////////////////////////////////////////////////////\r
1391 /////////////////////////////////////////////////////////\r
1392 int\r
1393 mlnx_map_vapi_cqe_status(\r
1394         IN                              VAPI_wc_status_t                        vapi_status)\r
1395 {\r
1396         switch (vapi_status)\r
1397         {\r
1398         case IB_COMP_SUCCESS:           return IB_WCS_SUCCESS;\r
1399         case IB_COMP_LOC_LEN_ERR:       return IB_WCS_LOCAL_LEN_ERR;\r
1400         case IB_COMP_LOC_QP_OP_ERR:     return IB_WCS_LOCAL_OP_ERR;\r
1401         case IB_COMP_LOC_PROT_ERR:      return IB_WCS_LOCAL_PROTECTION_ERR;\r
1402         case IB_COMP_WR_FLUSH_ERR:      return IB_WCS_WR_FLUSHED_ERR;\r
1403         case IB_COMP_MW_BIND_ERR:       return IB_WCS_MEM_WINDOW_BIND_ERR;\r
1404         case IB_COMP_REM_INV_REQ_ERR:   return IB_WCS_REM_INVALID_REQ_ERR;\r
1405         case IB_COMP_REM_ACCESS_ERR:    return IB_WCS_REM_ACCESS_ERR;\r
1406         case IB_COMP_REM_OP_ERR:        return IB_WCS_REM_OP_ERR;\r
1407         case IB_COMP_RETRY_EXC_ERR:     return IB_WCS_TIMEOUT_RETRY_ERR;\r
1408         case IB_COMP_RNR_RETRY_EXC_ERR: return IB_WCS_RNR_RETRY_ERR;\r
1409         case IB_COMP_REM_ABORT_ERR:     return IB_WCS_REM_ACCESS_ERR; // ???\r
1410         case IB_COMP_FATAL_ERR:         return IB_WCS_REM_ACCESS_ERR; // ???\r
1411         case IB_COMP_GENERAL_ERR:       return IB_WCS_REM_ACCESS_ERR; // ???\r
1412         default:\r
1413                 CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("FAIL to map %d (last known %d) returning %d\n",\r
1414                         vapi_status, IB_COMP_GENERAL_ERR, IB_WCS_REM_ACCESS_ERR));\r
1415                 return IB_WCS_REM_ACCESS_ERR;\r
1416         }\r
1417 }\r
1418 \r
1419 /////////////////////////////////////////////////////////\r
1420 /////////////////////////////////////////////////////////\r
1421 int\r
1422 mlnx_map_vapi_cqe_type(\r
1423         IN                              VAPI_cqe_opcode_t                       opcode)\r
1424 {\r
1425         switch (opcode)\r
1426         {\r
1427         case VAPI_CQE_SQ_SEND_DATA:     return IB_WC_SEND;\r
1428         case VAPI_CQE_SQ_RDMA_WRITE:    return IB_WC_RDMA_WRITE;\r
1429         case VAPI_CQE_SQ_RDMA_READ:     return IB_WC_RDMA_READ;\r
1430         case VAPI_CQE_SQ_COMP_SWAP:     return IB_WC_COMPARE_SWAP;\r
1431         case VAPI_CQE_SQ_FETCH_ADD:     return IB_WC_FETCH_ADD;\r
1432         case VAPI_CQE_SQ_BIND_MRW:      return IB_WC_MW_BIND;\r
1433         case VAPI_CQE_RQ_SEND_DATA:     return IB_WC_RECV;\r
1434         case VAPI_CQE_RQ_RDMA_WITH_IMM: return IB_WC_RECV_RDMA_WRITE;\r
1435         default:\r
1436                 return IB_WC_SEND;\r
1437         }\r
1438 }\r
1439 \r
1440 /////////////////////////////////////////////////////////\r
1441 // Map Remote Node Addr Type\r
1442 /////////////////////////////////////////////////////////\r
1443 int\r
1444 mlnx_map_vapi_rna_type(\r
1445         IN                              VAPI_remote_node_addr_type_t    rna)\r
1446 {\r
1447         switch (rna)\r
1448         {\r
1449         case VAPI_RNA_UD:       return IB_QPT_UNRELIABLE_DGRM;\r
1450         case VAPI_RNA_RAW_ETY:  return IB_QPT_RAW_ETHER;\r
1451         case VAPI_RNA_RAW_IPV6: return IB_QPT_RAW_IPV6;\r
1452         default:\r
1453                 return IB_QPT_RELIABLE_CONN;\r
1454         }\r
1455 }\r
1456 \r
1457 //////////////////////////////////////////////////////////////\r
1458 // Convert from VAPI memory-region attributes to IBAL \r
1459 //////////////////////////////////////////////////////////////\r
1460 void\r
1461 mlnx_conv_vapi_mr_attr(\r
1462         IN                              ib_pd_handle_t                          pd_h,\r
1463         IN                              HH_mr_info_t                            *mr_info_p,\r
1464                 OUT                     ib_mr_attr_t                            *mr_query_p)\r
1465 {\r
1466         mr_query_p->h_pd = pd_h;\r
1467         mr_query_p->local_lb  = mr_info_p->local_start;\r
1468         mr_query_p->local_ub  = mr_info_p->local_start + mr_info_p->local_size;\r
1469         mr_query_p->remote_lb = mr_info_p->remote_start;\r
1470         mr_query_p->remote_ub = mr_info_p->remote_start + mr_info_p->remote_size;\r
1471 \r
1472         mr_query_p->access_ctrl = map_vapi_acl(mr_info_p->acl);\r
1473         mr_query_p->lkey = mr_info_p->lkey;\r
1474         mr_query_p->rkey = cl_hton32(mr_info_p->rkey);\r
1475 }\r
1476 \r
1477 //////////////////////////////////////////////////////////////\r
1478 // Convert from IBAL memory-window bind request to VAPI \r
1479 //////////////////////////////////////////////////////////////\r
1480 void\r
1481 mlnx_conv_bind_req(\r
1482         IN                              HHUL_qp_hndl_t                          hhul_qp_hndl,\r
1483         IN                              ib_bind_wr_t* const                     p_mw_bind,\r
1484                 OUT                     HHUL_mw_bind_t                          *bind_prop_p)\r
1485 {\r
1486         bind_prop_p->qp = hhul_qp_hndl;\r
1487         bind_prop_p->id  = p_mw_bind->wr_id;\r
1488         bind_prop_p->acl  = map_ibal_acl(p_mw_bind->access_ctrl);\r
1489         bind_prop_p->size  = p_mw_bind->local_ds.length;\r
1490         bind_prop_p->start  = (VAPI_virt_addr_t)(MT_virt_addr_t)p_mw_bind->local_ds.vaddr;\r
1491         bind_prop_p->mr_lkey = p_mw_bind->local_ds.lkey;\r
1492         bind_prop_p->comp_type = \r
1493                 (p_mw_bind->send_opt & IB_SEND_OPT_SIGNALED) ? VAPI_SIGNALED : VAPI_UNSIGNALED;\r
1494 }\r
1495 \r
1496 \r
1497 /////////////////////////////////////////////////////////\r
1498 // Map IBAL qp type to VAPI transport and special qp_type\r
1499 /////////////////////////////////////////////////////////\r
1500 int\r
1501 mlnx_map_ibal_qp_type(\r
1502         IN                              ib_qp_type_t                            ibal_qpt,\r
1503                 OUT                     VAPI_special_qp_t                       *vapi_qp_type_p)\r
1504 {\r
1505         switch (ibal_qpt)\r
1506         {\r
1507         case IB_QPT_RELIABLE_CONN:\r
1508                 if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_REGULAR_QP;\r
1509                 return IB_TS_RC;\r
1510 \r
1511         case IB_QPT_UNRELIABLE_CONN:\r
1512                 if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_REGULAR_QP;\r
1513                 return IB_TS_UC;\r
1514 \r
1515         case IB_QPT_UNRELIABLE_DGRM:\r
1516                 if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_REGULAR_QP;\r
1517                 return IB_TS_UD;\r
1518 \r
1519         case IB_QPT_QP0:\r
1520                 if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_SMI_QP;\r
1521                 return IB_TS_UD;\r
1522 \r
1523         case IB_QPT_QP1:\r
1524                 if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_GSI_QP;\r
1525                 return IB_TS_UD;\r
1526 \r
1527         case IB_QPT_RAW_IPV6:\r
1528                 if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_RAW_IPV6_QP; // TBD: ??\r
1529                 return IB_TS_RAW;\r
1530 \r
1531         case IB_QPT_RAW_ETHER:\r
1532                 if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_RAW_ETY_QP;  // TBD: ??\r
1533                 return IB_TS_RAW;\r
1534 \r
1535         case IB_QPT_MAD:\r
1536                 if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_GSI_QP;\r
1537                 return IB_TS_UD;\r
1538 \r
1539         case IB_QPT_QP0_ALIAS:\r
1540                 if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_SMI_QP;\r
1541                 return IB_TS_UD;\r
1542 \r
1543         case IB_QPT_QP1_ALIAS:\r
1544                 if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_GSI_QP;\r
1545                 return IB_TS_UD;\r
1546 \r
1547         default:\r
1548                 CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("FAIL to map ibal_qp_type %d (last known %d) returning %d\n",\r
1549                         ibal_qpt, IB_QPT_QP1_ALIAS, IB_TS_RAW));\r
1550                 if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_RAW_ETY_QP;\r
1551                 return IB_TS_RAW;\r
1552         }\r
1553 }\r
1554 \r
1555 /////////////////////////////////////////////////////////\r
1556 // QP and CQ value must be handled by caller\r
1557 /////////////////////////////////////////////////////////\r
1558 void\r
1559 mlnx_conv_qp_create_attr(\r
1560         IN              const   ib_qp_create_t                          *create_attr_p,\r
1561                 OUT                     HHUL_qp_init_attr_t                     *init_attr_p,\r
1562                 OUT                     VAPI_special_qp_t                       *vapi_qp_type_p)\r
1563 {\r
1564         init_attr_p->ts_type = mlnx_map_ibal_qp_type(create_attr_p->qp_type, vapi_qp_type_p);\r
1565 \r
1566         init_attr_p->qp_cap.max_oust_wr_sq = create_attr_p->sq_depth;\r
1567         init_attr_p->qp_cap.max_oust_wr_rq = create_attr_p->rq_depth;\r
1568         init_attr_p->qp_cap.max_sg_size_sq = create_attr_p->sq_sge;\r
1569         init_attr_p->qp_cap.max_sg_size_rq = create_attr_p->rq_sge;\r
1570 \r
1571         init_attr_p->sq_sig_type = (create_attr_p->sq_signaled) ? VAPI_SIGNAL_ALL_WR : VAPI_SIGNAL_REQ_WR;\r
1572         init_attr_p->rq_sig_type = VAPI_SIGNAL_ALL_WR;\r
1573 \r
1574         init_attr_p->srq = HHUL_INVAL_SRQ_HNDL;\r
1575 }\r
1576 \r
1577 /////////////////////////////////////////////////////////\r
1578 // NOTE: ibal_qp_state is non linear - so we cannot use a LUT\r
1579 /////////////////////////////////////////////////////////\r
1580 VAPI_qp_state_t\r
1581 mlnx_map_ibal_qp_state(\r
1582         IN                              ib_qp_state_t                           ibal_qp_state)\r
1583 {\r
1584         VAPI_qp_state_t vapi_qp_state = VAPI_RESET;\r
1585 \r
1586         if      (ibal_qp_state & IB_QPS_RESET) vapi_qp_state = VAPI_RESET;\r
1587         else if (ibal_qp_state & IB_QPS_INIT)  vapi_qp_state = VAPI_INIT;\r
1588         else if (ibal_qp_state & IB_QPS_RTR)   vapi_qp_state = VAPI_RTR;\r
1589         else if (ibal_qp_state & IB_QPS_RTS)   vapi_qp_state = VAPI_RTS;\r
1590         else if (ibal_qp_state & IB_QPS_SQD)   vapi_qp_state = VAPI_SQD;\r
1591         else if (ibal_qp_state & IB_QPS_SQERR) vapi_qp_state = VAPI_SQE;\r
1592         else if (ibal_qp_state & IB_QPS_ERROR) vapi_qp_state = VAPI_ERR;\r
1593 \r
1594         return vapi_qp_state;\r
1595 }\r
1596 \r
1597 /////////////////////////////////////////////////////////\r
1598 /////////////////////////////////////////////////////////\r
1599 ib_qp_state_t\r
1600 mlnx_map_vapi_qp_state(\r
1601         IN                              VAPI_qp_state_t                         vapi_qp_state)\r
1602 {\r
1603         switch (vapi_qp_state)\r
1604         {\r
1605         case VAPI_RESET: return IB_QPS_RESET;\r
1606         case VAPI_INIT:  return IB_QPS_INIT;\r
1607         case VAPI_RTR:   return IB_QPS_RTR;\r
1608         case VAPI_RTS:   return IB_QPS_RTS;\r
1609         case VAPI_SQD:   return IB_QPS_SQD;\r
1610         case VAPI_SQE:   return IB_QPS_SQERR;\r
1611         case VAPI_ERR:   return IB_QPS_ERROR;\r
1612                 // TBD: IB_QPS_SQD_DRAINING\r
1613                 // TBD: IB_QPS_SQD_DRAINED\r
1614         default:\r
1615                 CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("FAIL to map vapi_qp_state %d (last known %d) returning %d\n",\r
1616                         vapi_qp_state, VAPI_ERR, IB_QPS_INIT));\r
1617                 return IB_QPS_INIT;\r
1618         }\r
1619 }\r
1620 \r
1621 /////////////////////////////////////////////////////////\r
1622 /////////////////////////////////////////////////////////\r
1623 ib_apm_state_t\r
1624 mlnx_map_vapi_apm_state(\r
1625         IN                              VAPI_mig_state_t                        vapi_apm_state)\r
1626 {\r
1627         switch (vapi_apm_state)\r
1628         {\r
1629         case VAPI_MIGRATED: return IB_APM_MIGRATED;\r
1630         case VAPI_REARM:    return IB_APM_REARM;\r
1631         case VAPI_ARMED:    return IB_APM_ARMED;\r
1632 \r
1633         default:\r
1634                 CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("FAIL to map vapi_apm_state %d (last known %d) returning %d\n",\r
1635                         vapi_apm_state, VAPI_ARMED, 0));\r
1636                 return 0;\r
1637         }\r
1638 }\r
1639 \r
1640 #if 0\r
1641 /////////////////////////////////////////////////////////\r
1642 // UNUSED: IBAL uses same encoding as THH\r
1643 /////////////////////////////////////////////////////////\r
1644 static\r
1645 u_int32_t ibal_mtu_to_vapi(u_int32_t ibal_mtu)\r
1646 {\r
1647         u_int32_t mtu = 0;\r
1648 \r
1649         // MTU256=1, MTU512=2, MTU1024=3\r
1650         while (ibal_mtu >>= 1) mtu++;\r
1651         return mtu - 7;\r
1652 }\r
1653 \r
1654 /////////////////////////////////////////////////////////\r
1655 /////////////////////////////////////////////////////////\r
1656 static\r
1657 u_int32_t vapi_mtu_to_ibal(u_int32_t vapi_mtu)\r
1658 {\r
1659         return (1 << (vapi_mtu + 7));\r
1660 }\r
1661 #endif\r
1662 \r
1663 /////////////////////////////////////////////////////////\r
1664 /////////////////////////////////////////////////////////\r
1665 void\r
1666 mlnx_conv_vapi_qp_attr(\r
1667         IN                              HH_hca_hndl_t                           hh_hndl,\r
1668         IN                              VAPI_qp_attr_t                          *hh_qp_attr_p,\r
1669                 OUT                     ib_qp_attr_t                            *qp_attr_p)\r
1670 {\r
1671         qp_attr_p->access_ctrl     = map_vapi_qp_acl(hh_qp_attr_p->remote_atomic_flags);\r
1672         qp_attr_p->pkey_index      = (uint16_t)hh_qp_attr_p->pkey_ix;\r
1673         qp_attr_p->sq_depth        = hh_qp_attr_p->cap.max_oust_wr_sq;\r
1674         qp_attr_p->rq_depth        = hh_qp_attr_p->cap.max_oust_wr_rq;\r
1675         qp_attr_p->sq_sge          = hh_qp_attr_p->cap.max_sg_size_sq;\r
1676         qp_attr_p->rq_sge          = hh_qp_attr_p->cap.max_sg_size_rq;\r
1677         qp_attr_p->sq_max_inline   = hh_qp_attr_p->cap.max_inline_data_sq;\r
1678         qp_attr_p->init_depth      = hh_qp_attr_p->ous_dst_rd_atom; // outstanding outgoing\r
1679         qp_attr_p->resp_res        = hh_qp_attr_p->qp_ous_rd_atom;  // outstanding as target (in)\r
1680 \r
1681         qp_attr_p->num             = cl_ntoh32(hh_qp_attr_p->qp_num);\r
1682         CL_TRACE(MLNX_DBG_QPN, g_mlnx_dbg_lvl, ("ibal_qpn 0x%x = hh_qpn 0x%x\n",\r
1683                 qp_attr_p->num,\r
1684                 hh_qp_attr_p->qp_num));\r
1685 \r
1686         qp_attr_p->dest_num        = cl_ntoh32(hh_qp_attr_p->dest_qp_num);\r
1687         CL_TRACE(MLNX_DBG_QPN, g_mlnx_dbg_lvl, ("ibal_dest 0x%x = hh_dest 0x%x\n",\r
1688                 qp_attr_p->dest_num,\r
1689                 hh_qp_attr_p->dest_qp_num));\r
1690         qp_attr_p->qkey            = cl_ntoh32 (hh_qp_attr_p->qkey);\r
1691 \r
1692         qp_attr_p->sq_psn          = cl_ntoh32 (hh_qp_attr_p->sq_psn);\r
1693         qp_attr_p->rq_psn          = cl_ntoh32 (hh_qp_attr_p->rq_psn);\r
1694 \r
1695         qp_attr_p->primary_port    = hh_qp_attr_p->port;\r
1696         qp_attr_p->alternate_port  = hh_qp_attr_p->alt_port;\r
1697 \r
1698         qp_attr_p->state           = mlnx_map_vapi_qp_state(hh_qp_attr_p->qp_state);\r
1699         qp_attr_p->apm_state       = mlnx_map_vapi_apm_state(hh_qp_attr_p->path_mig_state);\r
1700 \r
1701         mlnx_conv_vapi_av(hh_hndl, &hh_qp_attr_p->av, &qp_attr_p->primary_av);\r
1702         qp_attr_p->primary_av.conn.path_mtu          = (u_int8_t)hh_qp_attr_p->path_mtu;\r
1703         qp_attr_p->primary_av.conn.local_ack_timeout = hh_qp_attr_p->timeout; \r
1704         qp_attr_p->primary_av.conn.seq_err_retry_cnt = hh_qp_attr_p->retry_count;\r
1705         qp_attr_p->primary_av.conn.rnr_retry_cnt     = hh_qp_attr_p->rnr_retry;\r
1706 \r
1707         mlnx_conv_vapi_av(hh_hndl, &hh_qp_attr_p->alt_av, &qp_attr_p->alternate_av);\r
1708         qp_attr_p->alternate_av.conn. path_mtu         = (u_int8_t)hh_qp_attr_p->path_mtu;\r
1709         qp_attr_p->alternate_av.conn.local_ack_timeout = hh_qp_attr_p->timeout;\r
1710         qp_attr_p->alternate_av.conn.seq_err_retry_cnt = hh_qp_attr_p->retry_count;\r
1711         qp_attr_p->alternate_av.conn.rnr_retry_cnt     = hh_qp_attr_p->rnr_retry;\r
1712 }\r
1713 #if 0\r
1714 XXX:\r
1715 QP_ATTR_QP_STATE\r
1716 QP_ATTR_EN_SQD_ASYN_NOTIF\r
1717 QP_ATTR_QP_NUM\r
1718 + QP_ATTR_REMOTE_ATOMIC_FLAGS\r
1719 + QP_ATTR_PKEY_IX\r
1720 + QP_ATTR_PORT\r
1721 + QP_ATTR_QKEY\r
1722 + QP_ATTR_RQ_PSN\r
1723 + QP_ATTR_AV\r
1724 \r
1725 QP_ATTR_PATH_MTU\r
1726 + QP_ATTR_TIMEOUT\r
1727 + QP_ATTR_RETRY_COUNT\r
1728 + QP_ATTR_RNR_RETRY\r
1729 QP_ATTR_QP_OUS_RD_ATOM\r
1730 \r
1731 - QP_ATTR_ALT_PATH\r
1732 \r
1733 + QP_ATTR_MIN_RNR_TIMER\r
1734 QP_ATTR_SQ_PSN\r
1735 QP_ATTR_OUS_DST_RD_ATOM\r
1736 QP_ATTR_PATH_MIG_STATE\r
1737 QP_ATTR_CAP\r
1738 #endif\r
1739 \r
1740 /////////////////////////////////////////////////////////\r
1741 /////////////////////////////////////////////////////////\r
1742 ib_api_status_t\r
1743 mlnx_conv_qp_modify_attr(\r
1744         IN                              HH_hca_hndl_t                                   hh_hndl,\r
1745         IN                              ib_qp_type_t                                    qp_type,\r
1746         IN              const   ib_qp_mod_t                                             *modify_attr_p,\r
1747                 OUT                     VAPI_qp_attr_t                                  *qp_attr_p, \r
1748                 OUT                     VAPI_qp_attr_mask_t                             *attr_mask_p)\r
1749 {\r
1750 \r
1751         qp_attr_p->qp_state = mlnx_map_ibal_qp_state(modify_attr_p->req_state);\r
1752         *attr_mask_p = QP_ATTR_QP_STATE;\r
1753 \r
1754         switch(modify_attr_p->req_state)\r
1755         {\r
1756         case IB_QPS_RESET:\r
1757                 break;\r
1758 \r
1759         case IB_QPS_INIT:\r
1760                 *attr_mask_p |= QP_ATTR_PORT |\r
1761                         QP_ATTR_QKEY |\r
1762                         QP_ATTR_PKEY_IX ;\r
1763 \r
1764                 qp_attr_p->port    = modify_attr_p->state.init.primary_port;\r
1765                 qp_attr_p->qkey    = cl_ntoh32 (modify_attr_p->state.init.qkey);\r
1766                 qp_attr_p->pkey_ix = modify_attr_p->state.init.pkey_index;\r
1767                 if (IB_QPT_RELIABLE_CONN == qp_type)\r
1768                 {\r
1769                         *attr_mask_p |= QP_ATTR_REMOTE_ATOMIC_FLAGS;\r
1770                         qp_attr_p->remote_atomic_flags = map_ibal_qp_acl(modify_attr_p->state.init.access_ctrl);\r
1771                 } else\r
1772                 {\r
1773                         qp_attr_p->remote_atomic_flags = 0;\r
1774                 }\r
1775                 break;\r
1776 \r
1777         case IB_QPS_RTR:\r
1778                 /* VAPI doesn't support modifying the WQE depth ever. */\r
1779                 if( modify_attr_p->state.rtr.opts & IB_MOD_QP_SQ_DEPTH ||\r
1780                         modify_attr_p->state.rtr.opts & IB_MOD_QP_RQ_DEPTH )\r
1781                 {\r
1782                         return IB_UNSUPPORTED;\r
1783                 }\r
1784 \r
1785                 *attr_mask_p |= QP_ATTR_RQ_PSN |\r
1786                         QP_ATTR_DEST_QP_NUM |\r
1787                         QP_ATTR_QP_OUS_RD_ATOM |\r
1788                         QP_ATTR_MIN_RNR_TIMER |\r
1789                         QP_ATTR_AV ;\r
1790 \r
1791                 qp_attr_p->rq_psn          = cl_ntoh32 (modify_attr_p->state.rtr.rq_psn);\r
1792                 qp_attr_p->dest_qp_num     = cl_ntoh32 (modify_attr_p->state.rtr.dest_qp);\r
1793                 qp_attr_p->qp_ous_rd_atom = modify_attr_p->state.rtr.resp_res;\r
1794 \r
1795                 qp_attr_p->min_rnr_timer   = modify_attr_p->state.rtr.rnr_nak_timeout;\r
1796 \r
1797 #if 1\r
1798                 CL_TRACE(MLNX_DBG_QPN, g_mlnx_dbg_lvl, ("modify_qp: hh_dest 0x%x = ibal_dest 0x%x\n",\r
1799                         qp_attr_p->dest_qp_num, modify_attr_p->state.rtr.dest_qp));\r
1800 #endif\r
1801 \r
1802                 // Convert primary RC AV (mandatory)\r
1803                 cl_memclr(&qp_attr_p->av, sizeof(VAPI_ud_av_t));\r
1804                 mlnx_conv_ibal_av(hh_hndl,\r
1805                         &modify_attr_p->state.rtr.primary_av, &qp_attr_p->av);\r
1806 \r
1807                 if (IB_QPT_RELIABLE_CONN == qp_type)\r
1808                 {\r
1809                         *attr_mask_p |= QP_ATTR_PATH_MTU;\r
1810                         qp_attr_p->path_mtu     = modify_attr_p->state.rtr.primary_av.conn.path_mtu; // MTU\r
1811                         *attr_mask_p |= QP_ATTR_TIMEOUT;\r
1812                         qp_attr_p->timeout     = modify_attr_p->state.rtr.primary_av.conn.local_ack_timeout; // XXX: conv\r
1813                         *attr_mask_p |= QP_ATTR_RETRY_COUNT;\r
1814                         qp_attr_p->retry_count = modify_attr_p->state.rtr.primary_av.conn.seq_err_retry_cnt;\r
1815                         *attr_mask_p |= QP_ATTR_RNR_RETRY;\r
1816                         qp_attr_p->rnr_retry   = modify_attr_p->state.rtr.primary_av.conn.rnr_retry_cnt;\r
1817                 }\r
1818 \r
1819                 // Convert Remote Atomic Flags\r
1820                 if (modify_attr_p->state.rtr.opts & IB_MOD_QP_ACCESS_CTRL)\r
1821                 {\r
1822                         *attr_mask_p |= QP_ATTR_REMOTE_ATOMIC_FLAGS;\r
1823                         qp_attr_p->remote_atomic_flags = map_ibal_qp_acl(modify_attr_p->state.rtr.access_ctrl);\r
1824                 }\r
1825 \r
1826                 // Convert alternate RC AV\r
1827                 if (modify_attr_p->state.rtr.opts & IB_MOD_QP_ALTERNATE_AV)\r
1828                 {\r
1829                         *attr_mask_p |= QP_ATTR_ALT_PATH;\r
1830                         cl_memclr(&qp_attr_p->alt_av, sizeof(VAPI_ud_av_t));\r
1831                         mlnx_conv_ibal_av(hh_hndl,\r
1832                                 &modify_attr_p->state.rtr.alternate_av, &qp_attr_p->alt_av);\r
1833 \r
1834                         if (IB_QPT_RELIABLE_CONN == qp_type)\r
1835                         {\r
1836                                 qp_attr_p->alt_timeout     = modify_attr_p->state.rtr.alternate_av.conn.local_ack_timeout; // XXX: conv\r
1837 #if 0\r
1838                                 /* Incompliant with spec 1.1! Data already set before */\r
1839                                 qp_attr_p->retry_count = modify_attr_p->state.rtr.alternate_av.conn.seq_err_retry_cnt;\r
1840                                 qp_attr_p->rnr_retry   = modify_attr_p->state.rtr.alternate_av.conn.rnr_retry_cnt;\r
1841 #endif\r
1842                         }\r
1843                 }\r
1844                 break;\r
1845 \r
1846         case IB_QPS_RTS:\r
1847                 /* VAPI doesn't support modifying the WQE depth ever. */\r
1848                 if( modify_attr_p->state.rts.opts & IB_MOD_QP_SQ_DEPTH ||\r
1849                         modify_attr_p->state.rts.opts & IB_MOD_QP_RQ_DEPTH )\r
1850                 {\r
1851                         return IB_UNSUPPORTED;\r
1852                 }\r
1853 \r
1854                 *attr_mask_p |= QP_ATTR_SQ_PSN |\r
1855                         QP_ATTR_RETRY_COUNT |\r
1856                         QP_ATTR_RNR_RETRY |\r
1857                         QP_ATTR_TIMEOUT|\r
1858                         QP_ATTR_OUS_DST_RD_ATOM |\r
1859                         QP_ATTR_MIN_RNR_TIMER;\r
1860 \r
1861                 qp_attr_p->sq_psn = cl_ntoh32 (modify_attr_p->state.rts.sq_psn);\r
1862 \r
1863                 if (modify_attr_p->state.rts.opts & IB_MOD_QP_ACCESS_CTRL)\r
1864                 {\r
1865                         *attr_mask_p |= QP_ATTR_REMOTE_ATOMIC_FLAGS;\r
1866                         qp_attr_p->remote_atomic_flags = map_ibal_qp_acl(modify_attr_p->state.rts.access_ctrl);\r
1867                 }\r
1868 \r
1869                 qp_attr_p->timeout     = modify_attr_p->state.rts.local_ack_timeout; // XXX: conv\r
1870                 qp_attr_p->ous_dst_rd_atom = modify_attr_p->state.rts.init_depth;\r
1871                 qp_attr_p->retry_count = modify_attr_p->state.rts.retry_cnt;\r
1872                 qp_attr_p->rnr_retry   = modify_attr_p->state.rts.rnr_retry_cnt;\r
1873                 qp_attr_p->min_rnr_timer   = modify_attr_p->state.rts.rnr_nak_timeout;\r
1874 \r
1875                 // Update the responder resources for RDMA/ATOMIC (optional for SQD->RTS)\r
1876                 if (modify_attr_p->state.rts.opts & IB_MOD_QP_RESP_RES) {\r
1877                         *attr_mask_p |= QP_ATTR_QP_OUS_RD_ATOM;\r
1878                         qp_attr_p->qp_ous_rd_atom = modify_attr_p->state.rts.resp_res;\r
1879                 }\r
1880 \r
1881                 // Convert alternate RC AV\r
1882                 if (modify_attr_p->state.rts.opts & IB_MOD_QP_ALTERNATE_AV)\r
1883                 {\r
1884                         *attr_mask_p |= QP_ATTR_ALT_PATH;\r
1885                         cl_memclr(&qp_attr_p->alt_av, sizeof(VAPI_ud_av_t));\r
1886                         mlnx_conv_ibal_av(hh_hndl,\r
1887                                 &modify_attr_p->state.rts.alternate_av, &qp_attr_p->alt_av);\r
1888                         if (IB_QPT_RELIABLE_CONN == qp_type)\r
1889                         {\r
1890                                 qp_attr_p->alt_timeout     = modify_attr_p->state.rts.alternate_av.conn.local_ack_timeout; // XXX: conv\r
1891 #if 0\r
1892                                 /* Incompliant with spec 1.1! Data already set before */\r
1893                                 qp_attr_p->retry_count = modify_attr_p->state.rts.alternate_av.conn.seq_err_retry_cnt;\r
1894                                 qp_attr_p->rnr_retry   = modify_attr_p->state.rts.alternate_av.conn.rnr_retry_cnt;\r
1895 #endif\r
1896                         }\r
1897                 }\r
1898                 break;\r
1899 \r
1900                 // TBD: The following are treated equally (SQ Drain)\r
1901         case IB_QPS_SQD:\r
1902         case IB_QPS_SQD_DRAINING:\r
1903         case IB_QPS_SQD_DRAINED:\r
1904                 *attr_mask_p |= QP_ATTR_EN_SQD_ASYN_NOTIF;\r
1905                 qp_attr_p->en_sqd_asyn_notif = (MT_bool)modify_attr_p->state.sqd.sqd_event;\r
1906                 break;\r
1907 \r
1908         case IB_QPS_SQERR:\r
1909         case IB_QPS_ERROR:\r
1910         case IB_QPS_TIME_WAIT:\r
1911         default:\r
1912                 break;\r
1913         }\r
1914         CL_TRACE(MLNX_DBG_QPN, g_mlnx_dbg_lvl, ("CL: conv_qp_modify: new state %d attr_mask 0x%x\n", qp_attr_p->qp_state, *attr_mask_p));\r
1915         return IB_SUCCESS;\r
1916 }\r
1917 \r
1918 /////////////////////////////////////////////////////////\r
1919 /////////////////////////////////////////////////////////\r
1920 static VAPI_wr_opcode_t\r
1921 map_ibal_send_opcode(\r
1922         IN                              ib_wr_type_t                            ibal_opcode,\r
1923         IN                              boolean_t                                       imm)\r
1924 {\r
1925         VAPI_wr_opcode_t                vapi_opcode;\r
1926 \r
1927         switch (ibal_opcode)\r
1928         {\r
1929         case WR_SEND:         vapi_opcode = VAPI_SEND;\r
1930                 break;\r
1931         case WR_RDMA_WRITE:   vapi_opcode = VAPI_RDMA_WRITE;\r
1932                 break;\r
1933         case WR_RDMA_READ:    vapi_opcode = VAPI_RDMA_READ;\r
1934                 break;\r
1935         case WR_COMPARE_SWAP: vapi_opcode = VAPI_ATOMIC_CMP_AND_SWP;\r
1936                 break;\r
1937         case WR_FETCH_ADD:    vapi_opcode = VAPI_ATOMIC_FETCH_AND_ADD;\r
1938                 break;\r
1939         default:              vapi_opcode = VAPI_SEND;\r
1940                 break;\r
1941         }\r
1942         if (imm && (VAPI_SEND == vapi_opcode || VAPI_RDMA_WRITE == vapi_opcode)) vapi_opcode++;\r
1943         return vapi_opcode;\r
1944 }\r
1945 \r
1946 /////////////////////////////////////////////////////////\r
1947 /////////////////////////////////////////////////////////\r
1948 ib_api_status_t\r
1949 mlnx_conv_send_desc(\r
1950         IN                              IB_ts_t                                         transport,\r
1951         IN              const   ib_send_wr_t                            *ibal_send_wqe_p,\r
1952                 OUT                     VAPI_sr_desc_t                          *vapi_send_desc_p)\r
1953 {\r
1954         boolean_t                                               imm = FALSE;\r
1955         u_int32_t                                               idx;\r
1956         register VAPI_sg_lst_entry_t    *sg_lst_p;\r
1957         register ib_local_ds_t                  *ds_array;\r
1958 \r
1959 \r
1960         switch (transport)\r
1961         {\r
1962         case IB_TS_UD:\r
1963                 CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("mapping %s QP\n", "UD"));\r
1964                 {\r
1965                         mlnx_avo_t *avo_p = (mlnx_avo_t *)ibal_send_wqe_p->dgrm.ud.h_av;\r
1966 \r
1967                         vapi_send_desc_p->remote_qp  = cl_ntoh32 (ibal_send_wqe_p->dgrm.ud.remote_qp);\r
1968                         vapi_send_desc_p->remote_qkey = cl_ntoh32 (ibal_send_wqe_p->dgrm.ud.remote_qkey);\r
1969 \r
1970                         if (!avo_p || avo_p->mark != E_MARK_AV)\r
1971                                 return IB_INVALID_AV_HANDLE;\r
1972 \r
1973                         vapi_send_desc_p->remote_ah = avo_p->h_av; // was ah.hhul\r
1974                         break;\r
1975                 }\r
1976 \r
1977         case IB_TS_RC:\r
1978                 CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("mapping %s QP\n", "RC"));\r
1979                 // vapi_send_desc_p->remote_qp   = 0;\r
1980                 // vapi_send_desc_p->remote_qkey = 0;\r
1981                 vapi_send_desc_p->remote_addr = ibal_send_wqe_p->remote_ops.vaddr;\r
1982                 vapi_send_desc_p->r_key       = ibal_send_wqe_p->remote_ops.rkey;\r
1983                 vapi_send_desc_p->compare_add = ibal_send_wqe_p->remote_ops.atomic1;\r
1984                 vapi_send_desc_p->swap        = ibal_send_wqe_p->remote_ops.atomic2;\r
1985                 break;\r
1986 \r
1987         default: // TBD: RAW, RD\r
1988                 return IB_UNSUPPORTED;\r
1989         }\r
1990 \r
1991         imm = (0 != (ibal_send_wqe_p->send_opt & IB_SEND_OPT_IMMEDIATE));\r
1992         vapi_send_desc_p->fence      = (MT_bool)(0 != (ibal_send_wqe_p->send_opt & IB_SEND_OPT_FENCE));\r
1993         vapi_send_desc_p->set_se     = (MT_bool)(0 != (ibal_send_wqe_p->send_opt & IB_SEND_OPT_SOLICITED));\r
1994         vapi_send_desc_p->comp_type  = (ibal_send_wqe_p->send_opt & IB_SEND_OPT_SIGNALED) ?\r
1995 VAPI_SIGNALED : VAPI_UNSIGNALED;\r
1996 \r
1997         vapi_send_desc_p->id = ibal_send_wqe_p->wr_id;\r
1998         vapi_send_desc_p->opcode = map_ibal_send_opcode(ibal_send_wqe_p->wr_type, imm);\r
1999 \r
2000         if (imm)\r
2001                 vapi_send_desc_p->imm_data = cl_ntoh32 (ibal_send_wqe_p->immediate_data);\r
2002 \r
2003         vapi_send_desc_p->sg_lst_len = ibal_send_wqe_p->num_ds;\r
2004 \r
2005         sg_lst_p = vapi_send_desc_p->sg_lst_p;\r
2006         ds_array = ibal_send_wqe_p->ds_array;\r
2007         for (idx = 0; idx < ibal_send_wqe_p->num_ds; idx++)\r
2008         {\r
2009                 sg_lst_p->addr = ds_array->vaddr;\r
2010                 sg_lst_p->len  = ds_array->length;\r
2011                 sg_lst_p->lkey = ds_array->lkey;\r
2012                 // CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("post_send (conv) addr %Lx size %d key 0x%x\n", sg_lst_p->addr, sg_lst_p->len, sg_lst_p->lkey));\r
2013                 sg_lst_p++;\r
2014                 ds_array++;\r
2015         }\r
2016         CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("send: rqpn 0x%x rkey 0x%x\n", \r
2017                 vapi_send_desc_p->remote_qp,\r
2018                 vapi_send_desc_p->remote_qkey));\r
2019         return IB_SUCCESS;\r
2020 }\r
2021 \r
2022 /////////////////////////////////////////////////////////\r
2023 /////////////////////////////////////////////////////////\r
2024 ib_api_status_t\r
2025 mlnx_conv_recv_desc(\r
2026         IN              const   ib_recv_wr_t                            *ibal_recv_wqe_p,\r
2027                 OUT                     VAPI_rr_desc_t                          *vapi_recv_desc_p)\r
2028 {\r
2029         u_int32_t                                               idx;\r
2030         register VAPI_sg_lst_entry_t    *sg_lst_p;\r
2031         register ib_local_ds_t                  *ds_array;\r
2032 \r
2033         vapi_recv_desc_p->id         = ibal_recv_wqe_p->wr_id;\r
2034         vapi_recv_desc_p->sg_lst_len = ibal_recv_wqe_p->num_ds;\r
2035         vapi_recv_desc_p->opcode     = VAPI_RECEIVE;\r
2036         vapi_recv_desc_p->comp_type  = VAPI_SIGNALED;\r
2037 \r
2038         sg_lst_p = vapi_recv_desc_p->sg_lst_p;\r
2039         ds_array = ibal_recv_wqe_p->ds_array;\r
2040         for (idx = 0; idx < ibal_recv_wqe_p->num_ds; idx++)\r
2041         {\r
2042                 sg_lst_p->addr = ds_array->vaddr;\r
2043                 sg_lst_p->len  = ds_array->length;\r
2044                 sg_lst_p->lkey = ds_array->lkey;\r
2045                 // CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("post_recv (conv) addr 0x%Lx size %d key 0x%x\n", sg_lst_p->addr, sg_lst_p->len, sg_lst_p->lkey));\r
2046                 sg_lst_p++;\r
2047                 ds_array++;\r
2048         }\r
2049 \r
2050         return IB_SUCCESS;\r
2051 }\r
2052 \r
2053 /////////////////////////////////////////////////////////\r
2054 /////////////////////////////////////////////////////////\r
2055 void\r
2056 vapi_port_cap_to_ibal(\r
2057         IN                              IB_port_cap_mask_t                      vapi_port_cap,\r
2058                 OUT                     ib_port_cap_t                           *ibal_port_cap_p)\r
2059 {\r
2060         if (vapi_port_cap & IB_CAP_MASK_IS_CONN_MGMT_SUP)\r
2061                 ibal_port_cap_p->cm = TRUE;\r
2062         if (vapi_port_cap & IB_CAP_MASK_IS_SNMP_TUNN_SUP)\r
2063                 ibal_port_cap_p->snmp = TRUE;\r
2064         if (vapi_port_cap & IB_CAP_MASK_IS_DEVICE_MGMT_SUP)\r
2065                 ibal_port_cap_p->dev_mgmt = TRUE;\r
2066         if (vapi_port_cap & IB_CAP_MASK_IS_VENDOR_CLS_SUP)\r
2067                 ibal_port_cap_p->vend = TRUE;\r
2068         if (vapi_port_cap & IB_CAP_MASK_IS_SM_DISABLED)\r
2069                 ibal_port_cap_p->sm_disable = TRUE;\r
2070         if (vapi_port_cap & IB_CAP_MASK_IS_SM)\r
2071                 ibal_port_cap_p->sm = TRUE;\r
2072         if (vapi_port_cap & IB_CAP_MASK_IS_CLIENT_REREGISTRATION_SUP)\r
2073                 ibal_port_cap_p->client_reregister= TRUE;\r
2074 }\r
2075 \r
2076 /////////////////////////////////////////////////////////\r
2077 /////////////////////////////////////////////////////////\r
2078 void\r
2079 mlnx_conv_vapi_hca_cap(\r
2080         IN                              HH_hca_dev_t                            *hca_info_p,\r
2081         IN                              VAPI_hca_cap_t                          *vapi_hca_cap_p,\r
2082         IN                              VAPI_hca_port_t                         *vapi_hca_ports,\r
2083                 OUT                     ib_ca_attr_t                            *ca_attr_p)\r
2084 {\r
2085         u_int8_t                        port_num;\r
2086         VAPI_hca_port_t         *vapi_port_p;\r
2087         ib_port_attr_t          *ibal_port_p;\r
2088 \r
2089         ca_attr_p->vend_id  = hca_info_p->vendor_id;\r
2090         ca_attr_p->dev_id   = (uint16_t)hca_info_p->dev_id;\r
2091         ca_attr_p->revision = (uint16_t)hca_info_p->hw_ver;\r
2092         ca_attr_p->fw_ver = hca_info_p->fw_ver;\r
2093 \r
2094         ca_attr_p->ca_guid   = *(UNALIGNED64 u_int64_t *)vapi_hca_cap_p->node_guid;\r
2095         ca_attr_p->num_ports = vapi_hca_cap_p->phys_port_num;\r
2096         ca_attr_p->max_qps   = vapi_hca_cap_p->max_num_qp;\r
2097         ca_attr_p->max_wrs   = vapi_hca_cap_p->max_qp_ous_wr;\r
2098         ca_attr_p->max_sges   = vapi_hca_cap_p->max_num_sg_ent;\r
2099         ca_attr_p->max_rd_sges = vapi_hca_cap_p->max_num_sg_ent_rd;\r
2100         ca_attr_p->max_cqs    = vapi_hca_cap_p->max_num_cq;\r
2101         ca_attr_p->max_cqes  = vapi_hca_cap_p->max_num_ent_cq;\r
2102         ca_attr_p->max_pds    = vapi_hca_cap_p->max_pd_num;\r
2103         ca_attr_p->init_regions = vapi_hca_cap_p->max_num_mr;\r
2104         ca_attr_p->init_windows = vapi_hca_cap_p->max_mw_num;\r
2105         ca_attr_p->init_region_size = vapi_hca_cap_p->max_mr_size;\r
2106         ca_attr_p->max_addr_handles = vapi_hca_cap_p->max_ah_num;\r
2107         ca_attr_p->atomicity     = vapi_hca_cap_p->atomic_cap;\r
2108         ca_attr_p->max_partitions = vapi_hca_cap_p->max_pkeys;\r
2109         ca_attr_p->max_qp_resp_res = vapi_hca_cap_p->max_qp_ous_rd_atom;\r
2110         ca_attr_p->max_resp_res    = vapi_hca_cap_p->max_res_rd_atom;\r
2111         ca_attr_p->max_qp_init_depth = vapi_hca_cap_p->max_qp_init_rd_atom;\r
2112         ca_attr_p->max_ipv6_qps    = vapi_hca_cap_p->max_raw_ipv6_qp;\r
2113         ca_attr_p->max_ether_qps   = vapi_hca_cap_p->max_raw_ethy_qp;\r
2114         ca_attr_p->max_mcast_grps  = vapi_hca_cap_p->max_mcast_grp_num;\r
2115         ca_attr_p->max_mcast_qps   = vapi_hca_cap_p->max_total_mcast_qp_attach_num;\r
2116         ca_attr_p->max_qps_per_mcast_grp = vapi_hca_cap_p->max_mcast_qp_attach_num;\r
2117         ca_attr_p->local_ack_delay = vapi_hca_cap_p->local_ca_ack_delay;\r
2118         ca_attr_p->bad_pkey_ctr_support = vapi_hca_cap_p->flags & VAPI_BAD_PKEY_COUNT_CAP;\r
2119         ca_attr_p->bad_qkey_ctr_support = vapi_hca_cap_p->flags & VAPI_BAD_QKEY_COUNT_CAP;\r
2120         ca_attr_p->raw_mcast_support    = vapi_hca_cap_p->flags & VAPI_RAW_MULTI_CAP;\r
2121         ca_attr_p->apm_support          = vapi_hca_cap_p->flags & VAPI_AUTO_PATH_MIG_CAP;\r
2122         ca_attr_p->av_port_check        = vapi_hca_cap_p->flags & VAPI_UD_AV_PORT_ENFORCE_CAP;\r
2123         ca_attr_p->change_primary_port  = vapi_hca_cap_p->flags & VAPI_CHANGE_PHY_PORT_CAP;\r
2124         ca_attr_p->modify_wr_depth      = vapi_hca_cap_p->flags & VAPI_RESIZE_OUS_WQE_CAP;\r
2125         ca_attr_p->hw_agents            = FALSE; // in the context of IBAL then agent is implemented on the host\r
2126 \r
2127         ca_attr_p->num_page_sizes = 1;\r
2128         ca_attr_p->p_page_size[0] = PAGESIZE; // TBD: extract an array of page sizes from HCA cap\r
2129 \r
2130         for (port_num = 0; port_num < vapi_hca_cap_p->phys_port_num; port_num++)\r
2131         {\r
2132                 // Setup port pointers\r
2133                 ibal_port_p = &ca_attr_p->p_port_attr[port_num];\r
2134                 vapi_port_p = &vapi_hca_ports[port_num];\r
2135 \r
2136                 // Port Cabapilities\r
2137                 cl_memclr(&ibal_port_p->cap, sizeof(ib_port_cap_t));\r
2138                 vapi_port_cap_to_ibal(vapi_port_p->capability_mask, &ibal_port_p->cap);\r
2139 \r
2140                 // Port Atributes\r
2141                 ibal_port_p->port_num   = port_num + 1;\r
2142                 ibal_port_p->port_guid  = ibal_port_p->p_gid_table[0].unicast.interface_id;\r
2143                 ibal_port_p->lid        = cl_ntoh16(vapi_port_p->lid);\r
2144                 ibal_port_p->lmc        = vapi_port_p->lmc;\r
2145                 ibal_port_p->max_vls    = vapi_port_p->max_vl_num;\r
2146                 ibal_port_p->sm_lid     = cl_ntoh16(vapi_port_p->sm_lid);\r
2147                 ibal_port_p->sm_sl      = vapi_port_p->sm_sl;\r
2148                 ibal_port_p->link_state = (vapi_port_p->state != 0) ? (uint8_t)vapi_port_p->state : IB_LINK_DOWN;\r
2149                 ibal_port_p->num_gids   = vapi_port_p->gid_tbl_len;\r
2150                 ibal_port_p->num_pkeys  = vapi_port_p->pkey_tbl_len;\r
2151                 ibal_port_p->pkey_ctr   = (uint16_t)vapi_port_p->bad_pkey_counter;\r
2152                 ibal_port_p->qkey_ctr   = (uint16_t)vapi_port_p->qkey_viol_counter;\r
2153                 ibal_port_p->max_msg_size = vapi_port_p->max_msg_sz;\r
2154                 ibal_port_p->mtu = (u_int8_t)vapi_port_p->max_mtu;\r
2155 \r
2156                 ibal_port_p->subnet_timeout = 5; // TBD: currently 128us\r
2157                 // ibal_port_p->local_ack_timeout = 3; // TBD: currently ~32 usec\r
2158 #if 0\r
2159                 CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("Port %d port_guid 0x%"PRIx64"\n",\r
2160                         ibal_port_p->port_num, ibal_port_p->port_guid));\r
2161 #endif\r
2162         }\r
2163 }\r
2164 \r
2165 /////////////////////////////////////////////////////////\r
2166 /////////////////////////////////////////////////////////\r
2167 ib_api_status_t\r
2168 mlnx_get_hca_pkey_tbl(\r
2169         IN                              HH_hca_hndl_t                           hh_hndl,\r
2170         IN                              u_int8_t                                        port_num,\r
2171         IN                              u_int16_t                                       num_entries,\r
2172                 OUT                     void*                                           table_p)\r
2173 {\r
2174         u_int16_t               size;\r
2175         ib_net16_t              *pkey_p;\r
2176 \r
2177                 if (HH_OK != THH_hob_get_pkey_tbl( hh_hndl, port_num, num_entries, &size, table_p))\r
2178                 return IB_ERROR;\r
2179 \r
2180         pkey_p = (ib_net16_t *)table_p;\r
2181 #if 0\r
2182         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("port %d pkey0 0x%x pkey1 0x%x\n", port_num, pkey_p[0], pkey_p[1]));\r
2183 #endif\r
2184         return IB_SUCCESS;\r
2185 }\r
2186 \r
2187 ib_api_status_t\r
2188 mlnx_get_hca_gid_tbl(\r
2189         IN                              HH_hca_hndl_t                           hh_hndl,\r
2190         IN                              u_int8_t                                        port_num,\r
2191         IN                              u_int16_t                                       num_entries,\r
2192                 OUT                     void*                                           table_p)\r
2193 {\r
2194         u_int16_t               size;\r
2195 \r
2196         if (HH_OK != THH_hob_get_gid_tbl( hh_hndl, port_num, num_entries, &size, table_p))\r
2197                 return IB_ERROR;\r
2198 \r
2199         return IB_SUCCESS;\r
2200 }\r