[HCA] Add client reregister support.
[mirror/winof/.git] / hw / mt23108 / kernel / hca_data.c
1 /*\r
2  * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.\r
3  * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. \r
4  *\r
5  * This software is available to you under the OpenIB.org BSD license\r
6  * below:\r
7  *\r
8  *     Redistribution and use in source and binary forms, with or\r
9  *     without modification, are permitted provided that the following\r
10  *     conditions are met:\r
11  *\r
12  *      - Redistributions of source code must retain the above\r
13  *        copyright notice, this list of conditions and the following\r
14  *        disclaimer.\r
15  *\r
16  *      - Redistributions in binary form must reproduce the above\r
17  *        copyright notice, this list of conditions and the following\r
18  *        disclaimer in the documentation and/or other materials\r
19  *        provided with the distribution.\r
20  *\r
21  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
22  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
23  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
24  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
25  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
26  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
27  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
28  * SOFTWARE.\r
29  *\r
30  * $Id$\r
31  */\r
32 \r
33 \r
34 #include "hca_data.h"\r
35 #include "hca_debug.h"\r
36 \r
37 static cl_spinlock_t    hob_lock;\r
38 \r
39 #if 1\r
40 u_int32_t               g_mlnx_dbg_lvl = CL_DBG_ERROR ;\r
41 #else\r
42 u_int32_t               g_mlnx_dbg_lvl = CL_DBG_ERROR |\r
43         MLNX_DBG_QPN |\r
44         MLNX_DBG_MEM |\r
45         MLNX_DBG_INFO |\r
46         MLNX_DBG_TRACE |\r
47         // MLNX_DBG_DIRECT |\r
48         0;\r
49 #endif\r
50 \r
51 u_int32_t               g_mlnx_dpc2thread = 0;\r
52 \r
53 #ifdef MODULE_LICENSE\r
54 MODULE_LICENSE("Proprietary");\r
55 #endif\r
56 \r
57 MODULE_PARM(g_mlnx_dbg_lvl, "i");\r
58 MODULE_PARM(g_mlnx_dpc2thread, "i");\r
59 \r
60 cl_qlist_t              mlnx_hca_list;\r
61 //mlnx_hca_t            mlnx_hca_array[MLNX_MAX_HCA];\r
62 //uint32_t              mlnx_num_hca = 0;\r
63 \r
64 mlnx_hob_t              mlnx_hob_array[MLNX_NUM_HOBKL];         // kernel HOB - one per HCA (cmdif access)\r
65 \r
66 mlnx_hobul_t    *mlnx_hobul_array[MLNX_NUM_HOBUL];      // kernel HOBUL - one per HCA (kar access)\r
67 \r
68 /* User verb library name */\r
69 /* TODO: Move to linux osd file.\r
70 char                    mlnx_uvp_lib_name[MAX_LIB_NAME] = {"libmlnx_uvp.so"};\r
71 */\r
72 \r
73 static void\r
74 mlnx_async_dpc(\r
75         IN                              cl_async_proc_item_t            *async_item_p );\r
76 \r
77 #if MLNX_COMP_MODEL\r
78 static void\r
79 mlnx_comp_dpc(\r
80         IN                              PRKDPC                                          p_dpc,\r
81         IN                              void                                            *context,\r
82         IN                              void                                            *pfn_comp_cb,\r
83         IN                              void                                            *unused );\r
84 #else\r
85 static void\r
86 mlnx_comp_dpc(\r
87         IN                              cl_async_proc_item_t            *async_item_p );\r
88 #endif\r
89 \r
90 // ### Callback Interface\r
91 static void\r
92 mlnx_comp_cb(\r
93         IN                              HH_hca_hndl_t                           hh_hndl,\r
94         IN                              HH_cq_hndl_t                            hh_cq,\r
95         IN                              void                                            *private_data);\r
96 \r
97 static void\r
98 mlnx_async_cb(\r
99         IN                              HH_hca_hndl_t                           hh_hndl,\r
100         IN                              HH_event_record_t                       *hh_er_p,\r
101         IN                              void                                            *private_data);\r
102 \r
103 /////////////////////////////////////////////////////////\r
104 // ### HCA\r
105 /////////////////////////////////////////////////////////\r
106 void\r
107 mlnx_hca_insert(\r
108         IN                              mlnx_hca_t                                      *p_hca )\r
109 {\r
110         cl_spinlock_acquire( &hob_lock );\r
111         cl_qlist_insert_tail( &mlnx_hca_list, &p_hca->list_item );\r
112         cl_spinlock_release( &hob_lock );\r
113 }\r
114 \r
115 void\r
116 mlnx_hca_remove(\r
117         IN                              mlnx_hca_t                                      *p_hca )\r
118 {\r
119         cl_spinlock_acquire( &hob_lock );\r
120         cl_qlist_remove_item( &mlnx_hca_list, &p_hca->list_item );\r
121         cl_spinlock_release( &hob_lock );\r
122 }\r
123 \r
124 mlnx_hca_t*\r
125 mlnx_hca_from_guid(\r
126         IN                              ib_net64_t                                      guid )\r
127 {\r
128         cl_list_item_t  *p_item;\r
129         mlnx_hca_t              *p_hca = NULL;\r
130 \r
131         cl_spinlock_acquire( &hob_lock );\r
132         p_item = cl_qlist_head( &mlnx_hca_list );\r
133         while( p_item != cl_qlist_end( &mlnx_hca_list ) )\r
134         {\r
135                 p_hca = PARENT_STRUCT( p_item, mlnx_hca_t, list_item );\r
136                 if( p_hca->guid == guid )\r
137                         break;\r
138                 p_item = cl_qlist_next( p_item );\r
139                 p_hca = NULL;\r
140         }\r
141         cl_spinlock_release( &hob_lock );\r
142         return p_hca;\r
143 }\r
144 \r
145 /*\r
146 void\r
147 mlnx_names_from_guid(\r
148         IN                              ib_net64_t                                      guid,\r
149                 OUT                     char                                            **hca_name_p,\r
150                 OUT                     char                                            **dev_name_p)\r
151 {\r
152         unsigned int idx;\r
153 \r
154         if (!hca_name_p) return;\r
155         if (!dev_name_p) return;\r
156 \r
157         for (idx = 0; idx < mlnx_num_hca; idx++)\r
158         {\r
159                 if (mlnx_hca_array[idx].ifx.guid == guid)\r
160                 {\r
161                         *hca_name_p = mlnx_hca_array[idx].hca_name_p;\r
162                         *dev_name_p = mlnx_hca_array[idx].dev_name_p;\r
163                 }\r
164         }\r
165 }\r
166 */\r
167 \r
168 /////////////////////////////////////////////////////////\r
169 // ### HOB\r
170 /////////////////////////////////////////////////////////\r
171 cl_status_t\r
172 mlnx_hobs_init( void )\r
173 {\r
174         u_int32_t idx;\r
175 \r
176         cl_qlist_init( &mlnx_hca_list );\r
177 \r
178         for (idx = 0; idx < MLNX_NUM_HOBKL; idx++)\r
179         {\r
180                 mlnx_hob_array[idx].hh_hndl = NULL;\r
181                 mlnx_hob_array[idx].comp_cb_p = NULL;\r
182                 mlnx_hob_array[idx].async_cb_p = NULL;\r
183                 mlnx_hob_array[idx].ca_context = NULL;\r
184                 mlnx_hob_array[idx].async_proc_mgr_p = NULL;\r
185                 mlnx_hob_array[idx].cl_device_h = NULL;\r
186                 // mlnx_hob_array[idx].port_lmc_p = NULL;\r
187                 mlnx_hob_array[idx].index = idx;\r
188                 mlnx_hob_array[idx].mark = E_MARK_INVALID;\r
189         }\r
190         return cl_spinlock_init( &hob_lock );\r
191 }\r
192 \r
193 /////////////////////////////////////////////////////////\r
194 /////////////////////////////////////////////////////////\r
195 ib_api_status_t\r
196 mlnx_hobs_insert(\r
197         IN                              mlnx_hca_t                                      *p_hca,\r
198                 OUT                     mlnx_hob_t                                      **hob_pp)\r
199 {\r
200         u_int32_t idx;\r
201         ib_api_status_t status = IB_ERROR;\r
202         mlnx_cache_t    *p_cache;\r
203 \r
204         p_cache = (mlnx_cache_t*)cl_pzalloc( sizeof(mlnx_cache_t) * 2 );\r
205         if( !p_cache )\r
206                 return IB_INSUFFICIENT_MEMORY;\r
207 \r
208         cl_spinlock_acquire(&hob_lock);\r
209         for (idx = 0; idx < MLNX_NUM_HOBKL; idx++)\r
210         {\r
211                 if (!mlnx_hob_array[idx].hh_hndl)\r
212                 {\r
213                         mlnx_hob_array[idx].hh_hndl = p_hca->hh_hndl;\r
214                         mlnx_hob_array[idx].mark = E_MARK_CA;\r
215                         if (hob_pp) *hob_pp = &mlnx_hob_array[idx];\r
216                         status = IB_SUCCESS;\r
217                         break;\r
218                 }\r
219         }\r
220         cl_spinlock_release(&hob_lock);\r
221 \r
222         if (IB_SUCCESS == status)\r
223                 (*hob_pp)->cache = p_cache;\r
224         else\r
225                 cl_free( p_cache );\r
226 \r
227         return status;\r
228 }\r
229 \r
230 /////////////////////////////////////////////////////////\r
231 /////////////////////////////////////////////////////////\r
232 ib_api_status_t\r
233 mlnx_hobs_set_cb(\r
234         IN                              mlnx_hob_t                                      *hob_p, \r
235         IN                              ci_completion_cb_t                      comp_cb_p,\r
236         IN                              ci_async_event_cb_t                     async_cb_p,\r
237         IN              const   void* const                                     ib_context)\r
238 {\r
239         cl_status_t             cl_status;\r
240 \r
241         // Verify handle\r
242         CL_ASSERT((hob_p - mlnx_hob_array) < MLNX_NUM_HOBKL);\r
243 \r
244         // Setup the callbacks\r
245         if (!hob_p->async_proc_mgr_p)\r
246         {\r
247                 hob_p->async_proc_mgr_p = cl_malloc( sizeof( cl_async_proc_t ) );\r
248                 if( !hob_p->async_proc_mgr_p )\r
249                 {\r
250                         return IB_INSUFFICIENT_MEMORY;\r
251                 }\r
252                 cl_async_proc_construct( hob_p->async_proc_mgr_p );\r
253                 cl_status = cl_async_proc_init( hob_p->async_proc_mgr_p, MLNX_NUM_CB_THR, "CBthread" );\r
254                 if( cl_status != CL_SUCCESS )\r
255                 {\r
256                         cl_async_proc_destroy( hob_p->async_proc_mgr_p );\r
257                         cl_free(hob_p->async_proc_mgr_p);\r
258                         hob_p->async_proc_mgr_p = NULL;\r
259                         return IB_INSUFFICIENT_RESOURCES;\r
260                 }\r
261         }\r
262 \r
263         if (hob_p->hh_hndl)\r
264         {\r
265                 THH_hob_set_async_eventh(hob_p->hh_hndl,\r
266                         mlnx_async_cb,\r
267                         &hob_p->index); // This is the context our CB wants to receive\r
268                 THH_hob_set_comp_eventh( hob_p->hh_hndl,\r
269                         mlnx_comp_cb,\r
270                         &hob_p->index); // This is the context our CB wants to receive\r
271                 hob_p->comp_cb_p  = comp_cb_p;\r
272                 hob_p->async_cb_p = async_cb_p;\r
273                 hob_p->ca_context = ib_context; // This is the context our CB forwards to IBAL\r
274                 CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("CL: hca_idx %d context 0x%p\n", hob_p - mlnx_hob_array, ib_context));\r
275                 return IB_SUCCESS;\r
276         }\r
277         return IB_ERROR;\r
278 }\r
279 \r
280 /////////////////////////////////////////////////////////\r
281 /////////////////////////////////////////////////////////\r
282 ib_api_status_t\r
283 mlnx_hobs_get_context(\r
284         IN                              mlnx_hob_t                                      *hob_p,\r
285                 OUT                     void                                            **context_p)\r
286 {\r
287         // Verify handle\r
288         CL_ASSERT((hob_p - mlnx_hob_array) < MLNX_NUM_HOBKL);\r
289 \r
290         if (hob_p->hh_hndl)\r
291         {\r
292                 if (context_p) *context_p = &hob_p->index;\r
293                 return IB_SUCCESS;\r
294         }\r
295         return IB_ERROR;\r
296 }\r
297 \r
298 /////////////////////////////////////////////////////////\r
299 /////////////////////////////////////////////////////////\r
300 void\r
301 mlnx_hobs_remove(\r
302         IN                              mlnx_hob_t                                      *hob_p)\r
303 {\r
304         cl_async_proc_t *p_async_proc;\r
305         mlnx_cache_t    *p_cache;\r
306 \r
307         // Verify handle\r
308         CL_ASSERT((hob_p - mlnx_hob_array) < MLNX_NUM_HOBKL);\r
309 \r
310         cl_spinlock_acquire( &hob_lock );\r
311 \r
312         hob_p->mark = E_MARK_INVALID;\r
313 \r
314         p_async_proc = hob_p->async_proc_mgr_p;\r
315         hob_p->async_proc_mgr_p = NULL;\r
316 \r
317         p_cache = hob_p->cache;\r
318         hob_p->cache = NULL;\r
319 \r
320         hob_p->hh_hndl = NULL;\r
321         hob_p->comp_cb_p = NULL;\r
322         hob_p->async_cb_p = NULL;\r
323         hob_p->ca_context = NULL;\r
324         hob_p->cl_device_h = NULL;\r
325 \r
326         cl_spinlock_release( &hob_lock );\r
327 \r
328         if( p_async_proc )\r
329         {\r
330                 cl_async_proc_destroy( p_async_proc );\r
331                 cl_free( p_async_proc );\r
332         }\r
333 \r
334         if( p_cache )\r
335                 cl_free( p_cache );\r
336 \r
337         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("CL: hobs_remove idx %d hh_hndl 0x%p\n", hob_p - mlnx_hob_array, hob_p->hh_hndl));\r
338 }\r
339 \r
340 /////////////////////////////////////////////////////////\r
341 /////////////////////////////////////////////////////////\r
342 ib_api_status_t\r
343 mlnx_hobs_lookup(\r
344         IN                              HH_hca_hndl_t                           hndl,\r
345                 OUT                     mlnx_hob_t                                      **hca_p)\r
346 {\r
347         u_int32_t idx;\r
348 \r
349         if (!hca_p)\r
350                 return IB_ERROR;\r
351 \r
352         cl_spinlock_acquire( &hob_lock );\r
353         for (idx = 0; idx < MLNX_NUM_HOBKL; idx++)\r
354         {\r
355                 if (hndl == mlnx_hob_array[idx].hh_hndl)\r
356                 {\r
357                         *hca_p = &mlnx_hob_array[idx];\r
358                         cl_spinlock_release( &hob_lock );\r
359                         return IB_SUCCESS;\r
360                 }\r
361         }\r
362         cl_spinlock_release( &hob_lock );\r
363         return IB_ERROR;\r
364 }\r
365 \r
366 /////////////////////////////////////////////////////////\r
367 /////////////////////////////////////////////////////////\r
368 void\r
369 mlnx_hobs_get_handle(\r
370         IN                              mlnx_hob_t                                      *hob_p,\r
371                 OUT                     HH_hca_hndl_t                           *hndl_p)\r
372 {\r
373         // Verify handle\r
374         CL_ASSERT((hob_p - mlnx_hob_array) < MLNX_NUM_HOBKL);\r
375 \r
376         if (hndl_p)\r
377                 *hndl_p = hob_p->hh_hndl;\r
378 }\r
379 \r
380 /////////////////////////////////////////////////////////\r
381 /////////////////////////////////////////////////////////\r
382 mlnx_hobul_t *\r
383 mlnx_hobs_get_hobul(\r
384         IN                              mlnx_hob_t                                      *hob_p)\r
385 {\r
386         // Verify handle\r
387         if ((hob_p - mlnx_hob_array) >= MLNX_NUM_HOBKL)\r
388                 return NULL;\r
389 \r
390         return mlnx_hobul_array[hob_p->index];\r
391 }\r
392 \r
393 \r
394 static int priv_ceil_log2(u_int32_t n)\r
395 {\r
396         int shift;\r
397 \r
398         for (shift = 31; shift >0; shift--)\r
399                 if (n & (1 << shift)) break;\r
400 \r
401         if (((unsigned)1 << shift) < n) shift++;\r
402 \r
403         return shift;\r
404 }\r
405 \r
406 /////////////////////////////////////////////////////////\r
407 // ### HOBUL\r
408 /////////////////////////////////////////////////////////\r
409 ib_api_status_t\r
410 mlnx_hobul_new(\r
411         IN                              mlnx_hob_t                                      *hob_p,\r
412         IN                              HH_hca_hndl_t                           hh_hndl,\r
413         IN                              void                                            *resources_p)\r
414 {\r
415         mlnx_hobul_t            *hobul_p;\r
416         HH_hca_dev_t            *hca_ul_info;\r
417         ib_api_status_t         status;\r
418         VAPI_hca_cap_t          hca_caps;\r
419         u_int32_t                       i;\r
420 #if MLNX_COMP_MODEL == 1\r
421         static uint32_t         proc_num = 0;\r
422 #endif\r
423 \r
424         // Verify handle\r
425         CL_ASSERT((hob_p - mlnx_hob_array) < MLNX_NUM_HOBKL);\r
426 \r
427         if (NULL == (hobul_p = cl_zalloc( sizeof(mlnx_hobul_t))))\r
428                 return IB_INSUFFICIENT_MEMORY;\r
429 \r
430         // The following will NULL all pointers/sizes (used in cleanup)\r
431 //      cl_memclr(hobul_p, sizeof (mlnx_hobul_t));\r
432 \r
433         hobul_p->hh_hndl = hh_hndl;\r
434 \r
435         if (HH_OK != THHUL_hob_create(resources_p, hh_hndl->dev_id, &hobul_p->hhul_hndl))\r
436         {\r
437                 status = IB_INSUFFICIENT_RESOURCES;\r
438                 goto cleanup;\r
439         }\r
440 \r
441         hca_ul_info = (HH_hca_dev_t *)hh_hndl;\r
442 \r
443         if (hca_ul_info)\r
444         {\r
445                 hobul_p->vendor_id = hca_ul_info->vendor_id;\r
446                 hobul_p->device_id = hca_ul_info->dev_id;\r
447                 hobul_p->hca_ul_resources_p = resources_p;\r
448                 hobul_p->cq_ul_resources_sz = hca_ul_info->cq_ul_resources_sz;\r
449                 hobul_p->qp_ul_resources_sz = hca_ul_info->qp_ul_resources_sz;\r
450                 hobul_p->pd_ul_resources_sz = hca_ul_info->pd_ul_resources_sz;\r
451         }\r
452 \r
453         if (HH_OK != THH_hob_query(hh_hndl, &hca_caps))\r
454         {\r
455                 status = IB_ERROR;\r
456                 goto cleanup;\r
457         }\r
458 \r
459         hobul_p->cq_idx_mask = MASK32(priv_ceil_log2(hca_caps.max_num_cq));\r
460         hobul_p->qp_idx_mask = MASK32(priv_ceil_log2(hca_caps.max_num_qp)); // Currently mask = 0xFFFF\r
461         hobul_p->max_pd = MASK32(priv_ceil_log2(hca_caps.max_pd_num)) + 1;\r
462         hobul_p->max_cq = hobul_p->cq_idx_mask + 1;\r
463         hobul_p->max_qp = hobul_p->qp_idx_mask + 1;\r
464 \r
465         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("CL: sizes cq 0%x qp 0%x pd 0%x\n", hca_caps.max_num_cq, hca_caps.max_num_qp, hca_caps.max_pd_num));\r
466 \r
467         /* create and initialize the data stucture for CQs */\r
468         hobul_p->cq_info_tbl = cl_zalloc(hobul_p->max_cq * sizeof (cq_info_t));\r
469 \r
470         /* create and initialize the data stucture for QPs */\r
471         hobul_p->qp_info_tbl = cl_zalloc(hobul_p->max_qp * sizeof (qp_info_t));\r
472 \r
473         /* create and initialize the data stucture for PDs */\r
474         hobul_p->pd_info_tbl = cl_zalloc(hobul_p->max_pd * sizeof (pd_info_t));\r
475 \r
476         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("CL: alloc failed?  cq=%d qp=%d pd=%d\n",\r
477                 !hobul_p->cq_info_tbl, !hobul_p->qp_info_tbl, !hobul_p->pd_info_tbl));\r
478 \r
479         if (!hobul_p->pd_info_tbl ||\r
480                 !hobul_p->qp_info_tbl ||\r
481                 !hobul_p->cq_info_tbl)\r
482         {\r
483                 status = IB_INSUFFICIENT_MEMORY;\r
484                 goto cleanup;\r
485         }\r
486 \r
487         /* Initialize all mutexes. */\r
488         for( i = 0; i < hobul_p->max_cq; i++ )\r
489         {\r
490                 cl_mutex_construct( &hobul_p->cq_info_tbl[i].mutex );\r
491 #if MLNX_COMP_MODEL\r
492                 KeInitializeDpc( &hobul_p->cq_info_tbl[i].dpc,\r
493                         mlnx_comp_dpc, &hobul_p->cq_info_tbl[i] );\r
494 #if MLNX_COMP_MODEL == 1\r
495                 KeSetTargetProcessorDpc( &hobul_p->cq_info_tbl[i].dpc,\r
496                         (CCHAR)(proc_num++ % cl_proc_count()) );\r
497 #endif  /* MLNX_COMP_MODEL == 1 */\r
498 #endif  /* MLNX_COMP_MODEL */\r
499         }\r
500 \r
501         for( i = 0; i < hobul_p->max_qp; i++ )\r
502                 cl_mutex_construct( &hobul_p->qp_info_tbl[i].mutex );\r
503 \r
504         for( i = 0; i < hobul_p->max_pd; i++ )\r
505                 cl_mutex_construct( &hobul_p->pd_info_tbl[i].mutex );\r
506 \r
507         for( i = 0; i < hobul_p->max_cq; i++ )\r
508         {\r
509                 if( cl_mutex_init( &hobul_p->cq_info_tbl[i].mutex ) != CL_SUCCESS )\r
510                 {\r
511                         status = IB_ERROR;\r
512                         goto cleanup;\r
513                 }\r
514         }\r
515 \r
516         for( i = 0; i < hobul_p->max_qp; i++ )\r
517         {\r
518                 if( cl_mutex_init( &hobul_p->qp_info_tbl[i].mutex ) != CL_SUCCESS )\r
519                 {\r
520                         status = IB_ERROR;\r
521                         goto cleanup;\r
522                 }\r
523         }\r
524 \r
525         for( i = 0; i < hobul_p->max_pd; i++ )\r
526         {\r
527                 if( cl_mutex_init( &hobul_p->pd_info_tbl[i].mutex ) != CL_SUCCESS )\r
528                 {\r
529                         status = IB_ERROR;\r
530                         goto cleanup;\r
531                 }\r
532         }\r
533 \r
534         hobul_p->log2_mpt_size = ((THH_hca_ul_resources_t *)resources_p)->log2_mpt_size;\r
535         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("log2_mpt_size = %d\n", hobul_p->log2_mpt_size));\r
536 \r
537         cl_spinlock_acquire(&hob_lock);\r
538         mlnx_hobul_array[hob_p->index] = hobul_p;\r
539         cl_spinlock_release(&hob_lock);\r
540 \r
541         return IB_SUCCESS;\r
542 \r
543 cleanup:\r
544         if (hobul_p->hhul_hndl) THHUL_hob_destroy( hobul_p->hhul_hndl );\r
545         if (hobul_p->pd_info_tbl)\r
546         {\r
547                 for( i = 0; i < hobul_p->max_pd; i++ )\r
548                         cl_mutex_destroy( &hobul_p->pd_info_tbl[i].mutex );\r
549                 cl_free(hobul_p->pd_info_tbl);\r
550         }\r
551         if (hobul_p->qp_info_tbl)\r
552         {\r
553                 for( i = 0; i < hobul_p->max_qp; i++ )\r
554                         cl_mutex_destroy( &hobul_p->qp_info_tbl[i].mutex );\r
555                 cl_free(hobul_p->qp_info_tbl);\r
556         }\r
557         if (hobul_p->cq_info_tbl)\r
558         {\r
559                 for( i = 0; i < hobul_p->max_cq; i++ )\r
560                         cl_mutex_destroy( &hobul_p->cq_info_tbl[i].mutex );\r
561                 cl_free(hobul_p->cq_info_tbl);\r
562         }\r
563         if (hobul_p) cl_free( hobul_p);\r
564         return status;\r
565 }\r
566 \r
567 /////////////////////////////////////////////////////////\r
568 /////////////////////////////////////////////////////////\r
569 void\r
570 mlnx_hobul_get(\r
571         IN                              mlnx_hob_t                                      *hob_p,\r
572                 OUT                     void                                            **resources_p )\r
573 {\r
574         mlnx_hobul_t            *hobul_p;\r
575 \r
576         // Verify handle\r
577         CL_ASSERT((hob_p - mlnx_hob_array) < MLNX_NUM_HOBKL);\r
578 \r
579         hobul_p = mlnx_hobul_array[hob_p->index];\r
580 \r
581         if (hobul_p && resources_p)\r
582         {\r
583                 *resources_p = hobul_p->hca_ul_resources_p;\r
584         }\r
585 }\r
586 \r
587 /////////////////////////////////////////////////////////\r
588 /////////////////////////////////////////////////////////\r
589 void\r
590 mlnx_hobul_delete(\r
591         IN                              mlnx_hob_t                                      *hob_p)\r
592 {\r
593         mlnx_hobul_t            *hobul_p;\r
594         u_int32_t                       i;\r
595 \r
596         // Verify handle\r
597         CL_ASSERT((hob_p - mlnx_hob_array) < MLNX_NUM_HOBKL);\r
598 \r
599         cl_spinlock_acquire(&hob_lock);\r
600         hobul_p = mlnx_hobul_array[hob_p->index];\r
601         mlnx_hobul_array[hob_p->index] = NULL;\r
602         cl_spinlock_release(&hob_lock);\r
603 \r
604         if (!hobul_p) return;\r
605 \r
606         if (hobul_p->hhul_hndl) THHUL_hob_destroy( hobul_p->hhul_hndl );\r
607         if (hobul_p->pd_info_tbl)\r
608         {\r
609                 for( i = 0; i < hobul_p->max_pd; i++ )\r
610                         cl_mutex_destroy( &hobul_p->pd_info_tbl[i].mutex );\r
611                 cl_free(hobul_p->pd_info_tbl);\r
612         }\r
613         if (hobul_p->qp_info_tbl)\r
614         {\r
615                 for( i = 0; i < hobul_p->max_qp; i++ )\r
616                         cl_mutex_destroy( &hobul_p->qp_info_tbl[i].mutex );\r
617                 cl_free(hobul_p->qp_info_tbl);\r
618         }\r
619         if (hobul_p->cq_info_tbl)\r
620         {\r
621                 for( i = 0; i < hobul_p->max_cq; i++ )\r
622                 {\r
623                         KeRemoveQueueDpc( &hobul_p->cq_info_tbl[i].dpc );\r
624                         cl_mutex_destroy( &hobul_p->cq_info_tbl[i].mutex );\r
625                 }\r
626                 cl_free(hobul_p->cq_info_tbl);\r
627         }\r
628         if (hobul_p) cl_free( hobul_p);\r
629 }\r
630 \r
631 /////////////////////////////////////////////////////////\r
632 // ### Callbacks\r
633 /////////////////////////////////////////////////////////\r
634 \r
635 ib_async_event_t\r
636 mlnx_map_vapi_event_type(\r
637         IN                              unsigned                                        event_id,\r
638                 OUT                     ENUM_EVENT_CLASS                        *event_class_p)\r
639 {\r
640         switch (event_id)\r
641         {\r
642         case VAPI_QP_PATH_MIGRATED:\r
643                 if (event_class_p) *event_class_p = E_EV_QP;\r
644                 return IB_AE_QP_APM;\r
645 \r
646         case VAPI_QP_COMM_ESTABLISHED:\r
647                 if (event_class_p) *event_class_p = E_EV_QP;\r
648                 return IB_AE_QP_COMM;\r
649 \r
650         case VAPI_SEND_QUEUE_DRAINED:\r
651                 if (event_class_p) *event_class_p = E_EV_QP;\r
652                 return IB_AE_SQ_DRAINED;\r
653 \r
654         case VAPI_CQ_ERROR:\r
655                 if (event_class_p) *event_class_p = E_EV_CQ;\r
656                 return IB_AE_CQ_ERROR;\r
657 \r
658         case VAPI_LOCAL_WQ_INV_REQUEST_ERROR:\r
659                 if (event_class_p) *event_class_p = E_EV_QP;\r
660                 return IB_AE_WQ_REQ_ERROR;\r
661 \r
662         case VAPI_LOCAL_WQ_ACCESS_VIOL_ERROR:\r
663                 if (event_class_p) *event_class_p = E_EV_QP;\r
664                 return IB_AE_WQ_ACCESS_ERROR;\r
665 \r
666         case VAPI_LOCAL_WQ_CATASTROPHIC_ERROR:\r
667                 if (event_class_p) *event_class_p = E_EV_QP;\r
668                 return IB_AE_QP_FATAL;\r
669 \r
670         case VAPI_PATH_MIG_REQ_ERROR:\r
671                 if (event_class_p) *event_class_p = E_EV_QP;\r
672                 return IB_AE_QP_APM_ERROR;\r
673 \r
674         case VAPI_LOCAL_CATASTROPHIC_ERROR:\r
675                 if (event_class_p) *event_class_p = E_EV_CA;\r
676                 return IB_AE_LOCAL_FATAL;\r
677 \r
678         case VAPI_PORT_ERROR:\r
679                 /*\r
680                  * In tavor_hca\src\Hca\hcahal\tavor\eventp\event_irqh.c:\r
681                  * TAVOR_IF_EV_TYPE_PORT_ERR maps one of two port events:\r
682                  *      - TAVOR_IF_SUB_EV_PORT_DOWN\r
683                  *      - TAVOR_IF_SUB_EV_PORT_UP\r
684                  * \r
685                  * These map to (respectively)\r
686                  *      - VAPI_PORT_ERROR\r
687                  *      - VAPI_PORT_ACTIVE\r
688                  */\r
689                 if (event_class_p) *event_class_p = E_EV_CA;\r
690                 return IB_AE_PORT_DOWN; /* INIT, ARMED, DOWN */\r
691 \r
692         case VAPI_PORT_ACTIVE:\r
693                 if (event_class_p) *event_class_p = E_EV_CA;\r
694                 return IB_AE_PORT_ACTIVE; /* ACTIVE STATE */\r
695 \r
696         case VAPI_CLIENT_REREGISTER:\r
697                 if (event_class_p) *event_class_p = E_EV_CA;\r
698                 return IB_AE_CLIENT_REREGISTER; /* ACTIVE STATE */\r
699 \r
700         default:\r
701                 CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("FAIL to map %d (last known %d) returning %d\n",\r
702                         event_id, VAPI_PORT_ACTIVE, IB_AE_LOCAL_FATAL));\r
703                 if (event_class_p) *event_class_p = E_EV_CA;\r
704                 return IB_AE_LOCAL_FATAL;\r
705         }\r
706 }\r
707 \r
708 void\r
709 mlnx_conv_vapi_event(\r
710         IN                              HH_event_record_t                       *hh_event_p,\r
711         IN                              ib_event_rec_t                          *ib_event_p,\r
712                 OUT                     ENUM_EVENT_CLASS                        *event_class_p)\r
713 {\r
714 \r
715         // ib_event_p->context is handled by the caller\r
716         //\r
717         ib_event_p->type = mlnx_map_vapi_event_type(hh_event_p->etype, event_class_p);\r
718 \r
719         // no traps currently generated\r
720         // ib_event_p->trap_info.lid  =  ;\r
721         // ib_event_p->trap_info.port_guid = ;\r
722         // ib_event_p->trap_info.port_num  = hh_er;\r
723 }\r
724 \r
725 void\r
726 mlnx_async_cb(\r
727         IN                              HH_hca_hndl_t                           hh_hndl,\r
728         IN                              HH_event_record_t                       *hh_er_p,\r
729         IN                              void                                            *private_data)\r
730 {\r
731         u_int32_t                       obj_idx;\r
732         mlnx_hob_t                      *hob_p;\r
733 \r
734         mlnx_cb_data_t          cb_data;\r
735         mlnx_cb_data_t          *cb_data_p;\r
736 \r
737         CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("ASYNC CB %p (0x%x)\n",\r
738                 private_data, (private_data) ? *(u_int32_t *)private_data : 0xB5));\r
739 \r
740         if (!private_data || !hh_er_p) return;\r
741 \r
742         obj_idx =  *(u_int32_t *)private_data;\r
743         if (obj_idx >= MLNX_NUM_HOBKL) return;\r
744 \r
745         hob_p = mlnx_hob_array + obj_idx;\r
746 \r
747         // g_mlnx_dpc2thread will be initialized as a module paramter (default - disabled(0))\r
748         if (g_mlnx_dpc2thread)\r
749         {\r
750                 cb_data_p = cl_malloc(sizeof(mlnx_cb_data_t));\r
751                 if (!cb_data_p) return;\r
752 \r
753                 cb_data_p->hh_hndl      = hh_hndl;\r
754                 cb_data_p->private_data = private_data;\r
755                 cl_memcpy(&cb_data_p->hh_er, hh_er_p, sizeof(HH_event_record_t));\r
756                 cb_data_p->async_item.pfn_callback = mlnx_async_dpc;\r
757                 cl_async_proc_queue(hob_p->async_proc_mgr_p, &cb_data_p->async_item );\r
758         } else\r
759         {\r
760                 cb_data_p = &cb_data;\r
761 \r
762                 cb_data_p->hh_hndl      = hh_hndl;\r
763                 cb_data_p->private_data = private_data;\r
764                 cl_memcpy(&cb_data_p->hh_er, hh_er_p, sizeof(HH_event_record_t));\r
765                 mlnx_async_dpc( &cb_data_p->async_item );\r
766         }\r
767 }\r
768 \r
769 static void\r
770 mlnx_async_dpc(\r
771         IN                              cl_async_proc_item_t            *async_item_p )\r
772 {\r
773         HH_event_record_t       *hh_er_p;\r
774         u_int32_t                       obj_idx;\r
775         mlnx_hob_t                      *hob_p;\r
776         mlnx_hobul_t            *hobul_p;\r
777         mlnx_cb_data_t          *cb_data_p;\r
778 \r
779         ENUM_EVENT_CLASS        event_class;\r
780         ib_event_rec_t          event_r;\r
781 \r
782         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("ASYNC DPC %p\n", async_item_p));\r
783 \r
784         cb_data_p = PARENT_STRUCT( async_item_p, mlnx_cb_data_t, async_item );\r
785 \r
786         if (!cb_data_p) return;\r
787 \r
788         hh_er_p =  &cb_data_p->hh_er;\r
789         obj_idx =  *(u_int32_t *)cb_data_p->private_data;\r
790         hob_p = mlnx_hob_array + obj_idx;\r
791         hobul_p = mlnx_hobul_array[obj_idx];\r
792 \r
793         CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("ASYNC DPC type %d ca_context %p\n",\r
794                 hh_er_p->etype, hob_p->ca_context));\r
795 \r
796         if (!hob_p ||\r
797                 !hobul_p ||\r
798                 !hob_p->hh_hndl ||\r
799                 !hob_p->async_cb_p)\r
800         {\r
801                 goto cleanup;\r
802         }\r
803 \r
804         cl_memclr(&event_r, sizeof(event_r));\r
805         mlnx_conv_vapi_event(hh_er_p, &event_r, &event_class);\r
806 \r
807         switch(event_class)\r
808         {\r
809         case E_EV_CA:\r
810                 event_r.context = (void *)hob_p->ca_context;\r
811                 break;\r
812 \r
813         case E_EV_QP:\r
814                 {\r
815                         obj_idx = hh_er_p->event_modifier.qpn;\r
816                         if (obj_idx < hobul_p->max_qp)\r
817                                 event_r.context = (void *)hobul_p->qp_info_tbl[obj_idx].qp_context;\r
818                         else\r
819                         {\r
820                                 CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("ASYNC DPC bad qpn 0x%x max 0x%x\n", obj_idx, hobul_p->max_qp));\r
821                                 goto cleanup;\r
822                         }\r
823                 }\r
824                 break;\r
825 \r
826         case E_EV_CQ:\r
827                 {\r
828                         obj_idx = hh_er_p->event_modifier.cq;\r
829                         if (obj_idx < hobul_p->max_cq)\r
830                                 event_r.context = (void *)hobul_p->cq_info_tbl[obj_idx].cq_context;\r
831                         else\r
832                         {\r
833                                 CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("ASYNC DPC bad cqn 0x%x max 0x%x\n", obj_idx, hobul_p->max_cq));\r
834                                 goto cleanup;\r
835                         }\r
836                 }\r
837                 break;\r
838 \r
839         case E_EV_LAST:\r
840         default:\r
841                 // CL_ASSERT(0); // This shouldn't happen\r
842                 CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("ASYNC DPC unknown event_class 0x%x\n", event_class));\r
843                 break;\r
844         }\r
845 \r
846         // Call the registered CB\r
847         (*hob_p->async_cb_p)(&event_r);\r
848         // Fall Through\r
849 cleanup:\r
850         if (g_mlnx_dpc2thread)\r
851         {\r
852                 cl_free(cb_data_p);\r
853         }\r
854 }\r
855 \r
856 /////////////////////////////////////////////////////////\r
857 /////////////////////////////////////////////////////////\r
858 void\r
859 mlnx_comp_cb(\r
860         IN                              HH_hca_hndl_t                           hh_hndl,\r
861         IN                              HH_cq_hndl_t                            hh_cq,\r
862         IN                              void                                            *private_data)\r
863 {\r
864 #if MLNX_COMP_MODEL\r
865         u_int32_t                       cq_num;\r
866         u_int32_t                       hca_idx;\r
867         mlnx_hob_t                      *hob_p;\r
868         mlnx_hobul_t            *hobul_p;\r
869 #if MLNX_COMP_MODEL == 2\r
870         static uint32_t         proc_num = 0;\r
871 #endif\r
872 \r
873         CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("COMP CB cq 0x%x %p\n", hh_cq, private_data));\r
874 \r
875         UNUSED_PARAM( hh_hndl );\r
876 \r
877         hca_idx = *(u_int32_t *)private_data;\r
878         hob_p   = mlnx_hob_array + hca_idx;\r
879         hobul_p = mlnx_hobul_array[hca_idx];\r
880         cq_num  = hh_cq & hobul_p->cq_idx_mask;\r
881 \r
882         if (NULL != hob_p && NULL != hobul_p &&\r
883                 hob_p->hh_hndl && hob_p->comp_cb_p)\r
884         {\r
885                 if (cq_num < hobul_p->max_cq)\r
886                 {\r
887 #if MLNX_COMP_MODEL == 2\r
888                         KeSetTargetProcessorDpc( &hobul_p->cq_info_tbl[cq_num].dpc,\r
889                                 (CCHAR)(proc_num++ % cl_proc_count()) );\r
890 #endif  /* MLNX_COMP_MODEL == 2 */\r
891                         KeInsertQueueDpc( &hobul_p->cq_info_tbl[cq_num].dpc,\r
892                                 hob_p, NULL );\r
893                 }\r
894                 else\r
895                 {\r
896                         HCA_TRACE( HCA_DBG_ERROR, ("CQ index out of range!!!\n") );\r
897                 }\r
898         }\r
899 #else   /* MLNX_COMP_MODEL */\r
900         u_int32_t                       obj_idx;\r
901         mlnx_hob_t                      *hob_p;\r
902 \r
903         mlnx_cb_data_t          cb_data;\r
904         mlnx_cb_data_t          *cb_data_p;\r
905 \r
906         CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("COMP CB cq 0x%x %p\n", hh_cq, private_data));\r
907 \r
908         if (!private_data) return;\r
909 \r
910         obj_idx =  *(u_int32_t *)private_data;\r
911         hob_p = mlnx_hob_array + obj_idx;\r
912         if (!hob_p) return;\r
913 \r
914         if (g_mlnx_dpc2thread)\r
915         {\r
916                 cb_data_p = cl_malloc(sizeof(mlnx_cb_data_t));\r
917                 if (!cb_data_p) return;\r
918 \r
919                 cb_data_p->hh_hndl      = hh_hndl;\r
920                 cb_data_p->hh_cq        = hh_cq;\r
921                 cb_data_p->private_data = private_data;\r
922 \r
923                 cb_data_p->async_item.pfn_callback = mlnx_comp_dpc;\r
924 \r
925                 // Report completion through async_proc\r
926                 cl_async_proc_queue(hob_p->async_proc_mgr_p, &cb_data_p->async_item );\r
927 \r
928         } else\r
929         {\r
930                 cb_data_p = &cb_data;\r
931 \r
932                 cb_data_p->hh_hndl      = hh_hndl;\r
933                 cb_data_p->hh_cq        = hh_cq;\r
934                 cb_data_p->private_data = private_data;\r
935 \r
936                 // Report completion directly from DPC (verbs should NOT sleep)\r
937                 mlnx_comp_dpc( &cb_data_p->async_item );\r
938         }\r
939 #endif  /* MLNX_COMP_MODEL */\r
940 }\r
941 \r
942 #if MLNX_COMP_MODEL\r
943 static void\r
944 mlnx_comp_dpc(\r
945         IN                              PRKDPC                                          p_dpc,\r
946         IN                              void                                            *context,\r
947         IN                              void                                            *arg1,\r
948         IN                              void                                            *unused )\r
949 {\r
950         mlnx_hob_t              *hob_p = (mlnx_hob_t*)arg1;\r
951         UNUSED_PARAM( p_dpc );\r
952         UNUSED_PARAM( unused );\r
953 \r
954         hob_p->comp_cb_p( (void*)((cq_info_t*)context)->cq_context );\r
955 }\r
956 #else   /* MLNX_COMP_MODEL */\r
957 static void\r
958 mlnx_comp_dpc(\r
959         IN                              cl_async_proc_item_t            *async_item_p )\r
960 {\r
961         u_int32_t                       cq_num;\r
962         u_int32_t                       hca_idx;\r
963         mlnx_hob_t                      *hob_p;\r
964         mlnx_hobul_t            *hobul_p;\r
965         mlnx_cb_data_t          *cb_data_p;\r
966 \r
967         CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("COMP DPC %p\n", async_item_p));\r
968 \r
969         cb_data_p = PARENT_STRUCT( async_item_p, mlnx_cb_data_t, async_item );\r
970         if (!cb_data_p) return;\r
971 \r
972         hca_idx = *(u_int32_t *)cb_data_p->private_data;\r
973         hob_p   = mlnx_hob_array + hca_idx;\r
974         hobul_p = mlnx_hobul_array[hca_idx];\r
975         cq_num  = (u_int32_t)cb_data_p->hh_cq & hobul_p->cq_idx_mask;\r
976 \r
977         if (NULL != hob_p && NULL != hobul_p &&\r
978                 hob_p->hh_hndl && hob_p->comp_cb_p)\r
979         {\r
980                 if (cq_num < hobul_p->max_cq)\r
981                 {\r
982                         (*hob_p->comp_cb_p)((void *)hobul_p->cq_info_tbl[cq_num].cq_context);\r
983                 }\r
984         }\r
985 \r
986         if (g_mlnx_dpc2thread)\r
987         {\r
988                 cl_free(cb_data_p);\r
989         }\r
990 }\r
991 #endif  /* MLNX_COMP_MODEL */\r
992 \r
993 // ### Conversions\r
994 \r
995 /////////////////////////////////////////////////////////\r
996 /////////////////////////////////////////////////////////\r
997 VAPI_mrw_acl_t\r
998 map_ibal_acl(\r
999         IN                              ib_access_t                                     ibal_acl)\r
1000 {\r
1001         VAPI_mrw_acl_t          vapi_acl = 0;\r
1002 \r
1003         if (ibal_acl & IB_AC_RDMA_READ)   vapi_acl |= VAPI_EN_REMOTE_READ;\r
1004         if (ibal_acl & IB_AC_RDMA_WRITE)  vapi_acl |= VAPI_EN_REMOTE_WRITE;\r
1005         if (ibal_acl & IB_AC_ATOMIC)      vapi_acl |= VAPI_EN_REMOTE_ATOM;\r
1006         if (ibal_acl & IB_AC_LOCAL_WRITE) vapi_acl |= VAPI_EN_LOCAL_WRITE;\r
1007         if (ibal_acl & IB_AC_MW_BIND)     vapi_acl |= VAPI_EN_MEMREG_BIND;\r
1008 \r
1009         return vapi_acl;\r
1010 }\r
1011 \r
1012 /////////////////////////////////////////////////////////\r
1013 /////////////////////////////////////////////////////////\r
1014 ib_access_t\r
1015 map_vapi_acl(\r
1016         IN                              VAPI_mrw_acl_t                          vapi_acl)\r
1017 {\r
1018         ib_access_t ibal_acl = 0;\r
1019 \r
1020         if (vapi_acl & VAPI_EN_REMOTE_READ)  ibal_acl |= IB_AC_RDMA_READ;\r
1021         if (vapi_acl & VAPI_EN_REMOTE_WRITE) ibal_acl |= IB_AC_RDMA_WRITE;\r
1022         if (vapi_acl & VAPI_EN_REMOTE_ATOM)  ibal_acl |= IB_AC_ATOMIC;\r
1023         if (vapi_acl & VAPI_EN_LOCAL_WRITE)  ibal_acl |= IB_AC_LOCAL_WRITE;\r
1024         if (vapi_acl & VAPI_EN_MEMREG_BIND)  ibal_acl |= IB_AC_MW_BIND;\r
1025 \r
1026         return ibal_acl;\r
1027 }\r
1028 \r
1029 /////////////////////////////////////////////////////////\r
1030 /////////////////////////////////////////////////////////\r
1031 static VAPI_rdma_atom_acl_t \r
1032 map_ibal_qp_acl(\r
1033         IN                              ib_access_t                                     ibal_acl)\r
1034 {\r
1035         VAPI_rdma_atom_acl_t vapi_qp_acl = 0;\r
1036 \r
1037         if (ibal_acl & IB_AC_RDMA_WRITE) vapi_qp_acl |= VAPI_EN_REM_WRITE;\r
1038         if (ibal_acl & IB_AC_RDMA_READ)  vapi_qp_acl |= VAPI_EN_REM_READ;\r
1039         if (ibal_acl & IB_AC_ATOMIC)     vapi_qp_acl |= VAPI_EN_REM_ATOMIC_OP;\r
1040 \r
1041         return vapi_qp_acl;\r
1042 \r
1043 }\r
1044 \r
1045 /////////////////////////////////////////////////////////\r
1046 /////////////////////////////////////////////////////////\r
1047 static ib_access_t\r
1048 map_vapi_qp_acl(\r
1049         IN                              VAPI_rdma_atom_acl_t            vapi_qp_acl)\r
1050 {\r
1051         ib_access_t     ibal_acl = IB_AC_LOCAL_WRITE;\r
1052 \r
1053         if (vapi_qp_acl & VAPI_EN_REM_WRITE)     ibal_acl |= IB_AC_RDMA_WRITE;\r
1054         if (vapi_qp_acl & VAPI_EN_REM_READ)      ibal_acl |= IB_AC_RDMA_READ;\r
1055         if (vapi_qp_acl & VAPI_EN_REM_ATOMIC_OP) ibal_acl |= IB_AC_ATOMIC;\r
1056 \r
1057         return ibal_acl;\r
1058 }\r
1059 \r
1060 \r
1061 /////////////////////////////////////////////////////////\r
1062 /////////////////////////////////////////////////////////\r
1063 ib_api_status_t\r
1064 mlnx_lock_region(\r
1065         IN                              mlnx_mro_t                                      *mro_p,\r
1066         IN                              boolean_t                                       um_call )\r
1067 {\r
1068         MOSAL_iobuf_t   old_iobuf;\r
1069 \r
1070         // Find context\r
1071         if( um_call )\r
1072                 mro_p->mr_prot_ctx = MOSAL_get_current_prot_ctx();\r
1073         else\r
1074                 mro_p->mr_prot_ctx = MOSAL_get_kernel_prot_ctx();\r
1075 \r
1076         // Save pointer to existing locked region.\r
1077         old_iobuf = mro_p->mr_iobuf;\r
1078 \r
1079         // Lock Region\r
1080         if (MT_OK != MOSAL_iobuf_register((MT_virt_addr_t)mro_p->mr_start,\r
1081                 (MT_size_t)mro_p->mr_size,\r
1082                 mro_p->mr_prot_ctx,\r
1083                 mro_p->mr_mosal_perm,\r
1084                 &mro_p->mr_iobuf,\r
1085                 0 ))\r
1086         {\r
1087                 return IB_ERROR;\r
1088         }\r
1089 \r
1090         if( old_iobuf )\r
1091         {\r
1092                 if( MT_OK != MOSAL_iobuf_deregister( old_iobuf ) )\r
1093                         return IB_ERROR;\r
1094         }\r
1095 \r
1096         return IB_SUCCESS;\r
1097 }\r
1098 \r
1099 \r
1100 /////////////////////////////////////////////////////////\r
1101 /////////////////////////////////////////////////////////\r
1102 ib_api_status_t\r
1103 mlnx_conv_ibal_mr_create(\r
1104         IN                              u_int32_t                                       pd_idx,\r
1105         IN      OUT                     mlnx_mro_t                                      *mro_p,\r
1106         IN                              VAPI_mr_change_t                        change_flags,\r
1107         IN                              ib_mr_create_t const            *p_mr_create,\r
1108         IN                              boolean_t                                       um_call,\r
1109                 OUT                     HH_mr_t                                         *mr_props_p )\r
1110 {\r
1111         ib_api_status_t         status;\r
1112 \r
1113         /* Set ACL information first since it is used to lock the region. */\r
1114         if( change_flags & VAPI_MR_CHANGE_ACL )\r
1115         {\r
1116                 mro_p->mr_acl = map_ibal_acl( p_mr_create->access_ctrl );\r
1117                 // This computation should be externalized by THH\r
1118                 mro_p->mr_mosal_perm =\r
1119                         MOSAL_PERM_READ |\r
1120                         ((mro_p->mr_acl & VAPI_EN_LOCAL_WRITE) ? MOSAL_PERM_WRITE : 0);\r
1121         }\r
1122 \r
1123         if( change_flags & VAPI_MR_CHANGE_TRANS )\r
1124         {\r
1125                 CL_TRACE(MLNX_DBG_MEM, g_mlnx_dbg_lvl, ("addr 0x%p size %"PRId64"\n", (void *)p_mr_create->vaddr, p_mr_create->length));\r
1126                 // Build TPT entries\r
1127                 mro_p->mr_start = (IB_virt_addr_t)p_mr_create->vaddr;\r
1128                 mro_p->mr_size = p_mr_create->length;\r
1129                 if (IB_SUCCESS != (status = mlnx_lock_region(mro_p, um_call)))\r
1130                 {\r
1131                         return status;\r
1132                 }\r
1133         }\r
1134 \r
1135         /* Now fill in the MR properties. */\r
1136         mr_props_p->start = mro_p->mr_start;\r
1137         mr_props_p->size = mro_p->mr_size;\r
1138         mr_props_p->acl = mro_p->mr_acl;\r
1139         mr_props_p->pd = pd_idx;\r
1140 \r
1141         // Setup MTT info\r
1142         mr_props_p->tpt.tpt_type = HH_TPT_IOBUF;\r
1143         mr_props_p->tpt.tpt.iobuf = mro_p->mr_iobuf;\r
1144 \r
1145         return IB_SUCCESS;\r
1146 }\r
1147 \r
1148 /////////////////////////////////////////////////////////\r
1149 // On entry mro_p->mr_start holds the pmr address\r
1150 /////////////////////////////////////////////////////////\r
1151 ib_api_status_t\r
1152 mlnx_conv_ibal_pmr_create(\r
1153         IN                              u_int32_t                                       pd_idx,\r
1154         IN                              mlnx_mro_t                                      *mro_p,\r
1155         IN                              ib_phys_create_t const          *p_pmr_create,\r
1156                 OUT                     HH_mr_t                                         *mr_props_p )\r
1157 {\r
1158         VAPI_phy_addr_t*        buf_lst = NULL;\r
1159         VAPI_size_t*            sz_lst = NULL;\r
1160         u_int32_t                       i;\r
1161         u_int32_t                       page_shift = priv_ceil_log2(p_pmr_create->hca_page_size);\r
1162         u_int64_t                       page_mask = (1 << page_shift) - 1;\r
1163         u_int64_t                       tot_sz = 0;\r
1164 \r
1165         CL_TRACE(MLNX_DBG_MEM, g_mlnx_dbg_lvl, \r
1166                 ("PRE: addr %p size 0x%"PRIx64" shift %d\n",\r
1167                 (void *)(uintn_t)mro_p->mr_start, p_pmr_create->length, page_mask));\r
1168         mro_p->mr_start = (mro_p->mr_start & ~page_mask) | (p_pmr_create->buf_offset & page_mask);\r
1169         CL_TRACE(MLNX_DBG_MEM, g_mlnx_dbg_lvl, \r
1170                 ("POST: addr %p\n", (void *)(uintn_t)mro_p->mr_start));\r
1171 \r
1172         mr_props_p->start = mro_p->mr_start;\r
1173         mr_props_p->size = p_pmr_create->length;\r
1174         mr_props_p->acl = map_ibal_acl(p_pmr_create->access_ctrl);\r
1175         mr_props_p->pd = pd_idx;\r
1176 \r
1177 #ifdef _DEBUG_\r
1178         mro_p->mr_size           = mr_props_p->size;\r
1179 //      mro_p->mr_first_page_addr = 0;\r
1180 //      mro_p->mr_num_pages       = (mro_p->mr_end >> PAGESHIFT) + 1 - (mro_p->mr_start >> PAGESHIFT);\r
1181 //      CL_TRACE(MLNX_DBG_MEM, g_mlnx_dbg_lvl, ("1st pg addr 0x%p pages %d\n",\r
1182 //              (void *)mro_p->mr_first_page_addr, p_pmr_create->num_bufs));\r
1183         CL_TRACE(MLNX_DBG_MEM, g_mlnx_dbg_lvl, ("1st phys addr 0x%"PRIx64" phys pages %d\n",\r
1184                 p_pmr_create->range_array[0].base_addr, p_pmr_create->num_ranges));\r
1185 #endif\r
1186 \r
1187         // Build TPT entries\r
1188         if (!p_pmr_create->range_array)\r
1189         {\r
1190                 return IB_INVALID_PARAMETER;\r
1191         }\r
1192 \r
1193         if (p_pmr_create->hca_page_size !=\r
1194                 MT_DOWN_ALIGNX_PHYS(p_pmr_create->hca_page_size, page_shift))\r
1195         {\r
1196                 CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("phys buf size is not page aligned\n"));\r
1197                 return IB_INVALID_PARAMETER;\r
1198         }\r
1199 \r
1200         for (i = 0; i < p_pmr_create->num_ranges; i++)\r
1201         {\r
1202                 uint64_t        start_addr = p_pmr_create->range_array[i].base_addr;\r
1203                 uint64_t        end_addr = start_addr + p_pmr_create->range_array[i].size;\r
1204 \r
1205                 if( end_addr < start_addr ) {\r
1206                         CL_TRACE( CL_DBG_ERROR, g_mlnx_dbg_lvl, ("phys buf end < start\n") );\r
1207                         return IB_INVALID_PARAMETER;\r
1208                 }\r
1209 \r
1210                 if (start_addr !=\r
1211                         MT_DOWN_ALIGNX_PHYS(start_addr, page_shift))\r
1212                 {\r
1213                         CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("phys buf start adrs is not page aligned\n"));\r
1214                         return IB_INVALID_PARAMETER;\r
1215                 }\r
1216 \r
1217                 tot_sz += p_pmr_create->range_array[i].size;\r
1218         }\r
1219 \r
1220         if( tot_sz < p_pmr_create->length + p_pmr_create->buf_offset )\r
1221         {\r
1222                 HCA_TRACE_EXIT( HCA_DBG_ERROR, \r
1223                         ("length(0x"PRIx64") + buf offset(0x"PRIx64") larger than sum "\r
1224                         "of phys ranges(0x"PRIx64")\n",\r
1225                         p_pmr_create->length, p_pmr_create->buf_offset, tot_sz) );\r
1226                 return IB_INVALID_PARAMETER;\r
1227         }\r
1228 \r
1229         if( p_pmr_create->buf_offset > p_pmr_create->range_array[0].size )\r
1230         {\r
1231                 HCA_TRACE_EXIT( HCA_DBG_ERROR,\r
1232                         ("buf offset(0x%x) > than 1st phy range size(0x"PRIx64")\n",\r
1233                         p_pmr_create->buf_offset, p_pmr_create->range_array[0].size) );\r
1234                 return IB_INVALID_PARAMETER;\r
1235         }\r
1236 \r
1237         /* Memory registration must be done at PASSIVE_LEVEL, so paged memory here is fine. */\r
1238         buf_lst = (VAPI_phy_addr_t*)cl_pzalloc( sizeof(VAPI_phy_addr_t)*(p_pmr_create->num_ranges));\r
1239         if (!buf_lst)\r
1240         {\r
1241                 HCA_TRACE_EXIT( HCA_DBG_ERROR,\r
1242                         ("Failed to allocate range address list.\n") );\r
1243                 return IB_INSUFFICIENT_MEMORY;\r
1244         }\r
1245 \r
1246 \r
1247         /* Memory registration must be done at PASSIVE_LEVEL, so paged memory here is fine. */\r
1248         sz_lst = (VAPI_size_t*)cl_pzalloc( sizeof(VAPI_size_t)*(p_pmr_create->num_ranges));\r
1249         if (!sz_lst)\r
1250         {\r
1251                 cl_free( buf_lst );\r
1252                 HCA_TRACE_EXIT( HCA_DBG_ERROR,\r
1253                         ("Failed to allocate range size list.\n") );\r
1254                 return IB_INSUFFICIENT_MEMORY;\r
1255         }\r
1256 \r
1257         for (i = 0; i < p_pmr_create->num_ranges; i++)\r
1258         {\r
1259                 buf_lst[i] = p_pmr_create->range_array[i].base_addr;\r
1260                 sz_lst[i] = p_pmr_create->range_array[i].size;\r
1261         }\r
1262 \r
1263         mr_props_p->tpt.tpt_type = HH_TPT_BUF;\r
1264         mr_props_p->tpt.num_entries = p_pmr_create->num_ranges;\r
1265         mr_props_p->tpt.tpt.buf_lst.buf_sz_lst = sz_lst;\r
1266         mr_props_p->tpt.tpt.buf_lst.phys_buf_lst = buf_lst; \r
1267         mr_props_p->tpt.tpt.buf_lst.iova_offset = p_pmr_create->buf_offset;\r
1268 \r
1269         return IB_SUCCESS;\r
1270 }\r
1271 \r
1272 \r
1273 u_int8_t\r
1274 mlnx_gid_to_index(\r
1275         IN                              HH_hca_hndl_t                           hh_hndl,\r
1276         IN                              u_int8_t                                        port_num,\r
1277         IN                              u_int8_t                                        *raw_gid)\r
1278 {\r
1279         ib_gid_t        *gid_table_p = NULL;\r
1280         u_int8_t        index = 0; // default return value\r
1281         u_int8_t        i;\r
1282 \r
1283         gid_table_p = cl_zalloc( 64*sizeof(ib_gid_t));\r
1284 \r
1285         mlnx_get_hca_gid_tbl(hh_hndl, port_num, 64, gid_table_p);\r
1286 \r
1287         for (i = 0; i < 64; i++)\r
1288         {\r
1289                 if (!cl_memcmp(raw_gid, gid_table_p[i].raw, sizeof(ib_gid_t)))\r
1290                 {\r
1291                         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("1: found GID at index %d\n", i));\r
1292                         index = i;\r
1293                         break;\r
1294                 }\r
1295         }\r
1296 \r
1297         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("2: found GID at index %d\n", index));\r
1298 \r
1299         cl_free( gid_table_p);\r
1300         return index;\r
1301 }\r
1302 \r
1303 /////////////////////////////////////////////////////////\r
1304 /////////////////////////////////////////////////////////\r
1305 void\r
1306 mlnx_conv_ibal_av(\r
1307         IN                              HH_hca_hndl_t                           hh_hndl,\r
1308         IN              const   ib_av_attr_t                            *ibal_av_p,\r
1309                 OUT                     VAPI_ud_av_t                            *vapi_av_p)\r
1310 {\r
1311         vapi_av_p->port = ibal_av_p->port_num;\r
1312         vapi_av_p->sl   = ibal_av_p->sl;\r
1313         vapi_av_p->dlid = cl_ntoh16 (ibal_av_p->dlid);\r
1314 \r
1315         vapi_av_p->static_rate   =\r
1316                 (ibal_av_p->static_rate == IB_PATH_RECORD_RATE_10_GBS? 0 : 3);\r
1317         ib_grh_get_ver_class_flow( ibal_av_p->grh.ver_class_flow, NULL,\r
1318                 &vapi_av_p->traffic_class, &vapi_av_p->flow_label );\r
1319         vapi_av_p->src_path_bits = ibal_av_p->path_bits; // PATH:\r
1320         //vapi_av_p->src_path_bits = 0;\r
1321 \r
1322         /* For global destination or Multicast address:*/\r
1323         if (ibal_av_p->grh_valid)\r
1324         {\r
1325                 vapi_av_p->grh_flag = TRUE;\r
1326                 vapi_av_p->hop_limit     = ibal_av_p->grh.hop_limit;\r
1327                 // CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("raw %p, &raw %p\n", ibal_av_p->grh.src_gid.raw, &ibal_av_p->grh.src_gid.raw));\r
1328                 vapi_av_p->sgid_index    = mlnx_gid_to_index(hh_hndl, ibal_av_p->port_num, (u_int8_t *)ibal_av_p->grh.src_gid.raw);\r
1329                 cl_memcpy(vapi_av_p->dgid, ibal_av_p->grh.dest_gid.raw, sizeof(vapi_av_p->dgid));\r
1330         }\r
1331 }\r
1332 \r
1333 /////////////////////////////////////////////////////////\r
1334 /////////////////////////////////////////////////////////\r
1335 void\r
1336 mlnx_conv_vapi_av(\r
1337         IN                              HH_hca_hndl_t                           hh_hndl,\r
1338         IN              const   VAPI_ud_av_t                            *vapi_av_p,\r
1339                 OUT                     ib_av_attr_t                            *ibal_av_p)\r
1340 {\r
1341         uint8_t         ver;\r
1342 \r
1343         ibal_av_p->port_num = vapi_av_p->port;\r
1344         ibal_av_p->sl       = vapi_av_p->sl;\r
1345         ibal_av_p->dlid     = cl_ntoh16(vapi_av_p->dlid);\r
1346 \r
1347         /* For global destination or Multicast address:*/\r
1348         ibal_av_p->grh_valid = vapi_av_p->grh_flag;\r
1349 \r
1350         ver = 2;\r
1351         ibal_av_p->grh.ver_class_flow = ib_grh_set_ver_class_flow( ver,\r
1352                 vapi_av_p->traffic_class,\r
1353                 vapi_av_p->flow_label);\r
1354         ibal_av_p->grh.hop_limit = vapi_av_p->hop_limit;\r
1355 \r
1356         THH_hob_get_sgid(hh_hndl,\r
1357                 vapi_av_p->port,\r
1358                 vapi_av_p->sgid_index,\r
1359                 &ibal_av_p->grh.src_gid.raw);\r
1360 \r
1361         cl_memcpy(ibal_av_p->grh.dest_gid.raw, vapi_av_p->dgid, sizeof(vapi_av_p->dgid));\r
1362 \r
1363         ibal_av_p->static_rate = (vapi_av_p->static_rate?\r
1364                 IB_PATH_RECORD_RATE_2_5_GBS : IB_PATH_RECORD_RATE_10_GBS);\r
1365         ibal_av_p->path_bits   = vapi_av_p->src_path_bits;\r
1366 }\r
1367 \r
1368 /////////////////////////////////////////////////////////\r
1369 /////////////////////////////////////////////////////////\r
1370 int\r
1371 mlnx_map_vapi_cqe_status(\r
1372         IN                              VAPI_wc_status_t                        vapi_status)\r
1373 {\r
1374         switch (vapi_status)\r
1375         {\r
1376         case IB_COMP_SUCCESS:           return IB_WCS_SUCCESS;\r
1377         case IB_COMP_LOC_LEN_ERR:       return IB_WCS_LOCAL_LEN_ERR;\r
1378         case IB_COMP_LOC_QP_OP_ERR:     return IB_WCS_LOCAL_OP_ERR;\r
1379         case IB_COMP_LOC_PROT_ERR:      return IB_WCS_LOCAL_PROTECTION_ERR;\r
1380         case IB_COMP_WR_FLUSH_ERR:      return IB_WCS_WR_FLUSHED_ERR;\r
1381         case IB_COMP_MW_BIND_ERR:       return IB_WCS_MEM_WINDOW_BIND_ERR;\r
1382         case IB_COMP_REM_INV_REQ_ERR:   return IB_WCS_REM_INVALID_REQ_ERR;\r
1383         case IB_COMP_REM_ACCESS_ERR:    return IB_WCS_REM_ACCESS_ERR;\r
1384         case IB_COMP_REM_OP_ERR:        return IB_WCS_REM_OP_ERR;\r
1385         case IB_COMP_RETRY_EXC_ERR:     return IB_WCS_TIMEOUT_RETRY_ERR;\r
1386         case IB_COMP_RNR_RETRY_EXC_ERR: return IB_WCS_RNR_RETRY_ERR;\r
1387         case IB_COMP_REM_ABORT_ERR:     return IB_WCS_REM_ACCESS_ERR; // ???\r
1388         case IB_COMP_FATAL_ERR:         return IB_WCS_REM_ACCESS_ERR; // ???\r
1389         case IB_COMP_GENERAL_ERR:       return IB_WCS_REM_ACCESS_ERR; // ???\r
1390         default:\r
1391                 CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("FAIL to map %d (last known %d) returning %d\n",\r
1392                         vapi_status, IB_COMP_GENERAL_ERR, IB_WCS_REM_ACCESS_ERR));\r
1393                 return IB_WCS_REM_ACCESS_ERR;\r
1394         }\r
1395 }\r
1396 \r
1397 /////////////////////////////////////////////////////////\r
1398 /////////////////////////////////////////////////////////\r
1399 int\r
1400 mlnx_map_vapi_cqe_type(\r
1401         IN                              VAPI_cqe_opcode_t                       opcode)\r
1402 {\r
1403         switch (opcode)\r
1404         {\r
1405         case VAPI_CQE_SQ_SEND_DATA:     return IB_WC_SEND;\r
1406         case VAPI_CQE_SQ_RDMA_WRITE:    return IB_WC_RDMA_WRITE;\r
1407         case VAPI_CQE_SQ_RDMA_READ:     return IB_WC_RDMA_READ;\r
1408         case VAPI_CQE_SQ_COMP_SWAP:     return IB_WC_COMPARE_SWAP;\r
1409         case VAPI_CQE_SQ_FETCH_ADD:     return IB_WC_FETCH_ADD;\r
1410         case VAPI_CQE_SQ_BIND_MRW:      return IB_WC_MW_BIND;\r
1411         case VAPI_CQE_RQ_SEND_DATA:     return IB_WC_RECV;\r
1412         case VAPI_CQE_RQ_RDMA_WITH_IMM: return IB_WC_RECV_RDMA_WRITE;\r
1413         default:\r
1414                 return IB_WC_SEND;\r
1415         }\r
1416 }\r
1417 \r
1418 /////////////////////////////////////////////////////////\r
1419 // Map Remote Node Addr Type\r
1420 /////////////////////////////////////////////////////////\r
1421 int\r
1422 mlnx_map_vapi_rna_type(\r
1423         IN                              VAPI_remote_node_addr_type_t    rna)\r
1424 {\r
1425         switch (rna)\r
1426         {\r
1427         case VAPI_RNA_UD:       return IB_QPT_UNRELIABLE_DGRM;\r
1428         case VAPI_RNA_RAW_ETY:  return IB_QPT_RAW_ETHER;\r
1429         case VAPI_RNA_RAW_IPV6: return IB_QPT_RAW_IPV6;\r
1430         default:\r
1431                 return IB_QPT_RELIABLE_CONN;\r
1432         }\r
1433 }\r
1434 \r
1435 //////////////////////////////////////////////////////////////\r
1436 // Convert from VAPI memory-region attributes to IBAL \r
1437 //////////////////////////////////////////////////////////////\r
1438 void\r
1439 mlnx_conv_vapi_mr_attr(\r
1440         IN                              ib_pd_handle_t                          pd_h,\r
1441         IN                              HH_mr_info_t                            *mr_info_p,\r
1442                 OUT                     ib_mr_attr_t                            *mr_query_p)\r
1443 {\r
1444         mr_query_p->h_pd = pd_h;\r
1445         mr_query_p->local_lb  = mr_info_p->local_start;\r
1446         mr_query_p->local_ub  = mr_info_p->local_start + mr_info_p->local_size;\r
1447         mr_query_p->remote_lb = mr_info_p->remote_start;\r
1448         mr_query_p->remote_ub = mr_info_p->remote_start + mr_info_p->remote_size;\r
1449 \r
1450         mr_query_p->access_ctrl = map_vapi_acl(mr_info_p->acl);\r
1451         mr_query_p->lkey = mr_info_p->lkey;\r
1452         mr_query_p->rkey = mr_info_p->rkey;\r
1453 }\r
1454 \r
1455 //////////////////////////////////////////////////////////////\r
1456 // Convert from IBAL memory-window bind request to VAPI \r
1457 //////////////////////////////////////////////////////////////\r
1458 void\r
1459 mlnx_conv_bind_req(\r
1460         IN                              HHUL_qp_hndl_t                          hhul_qp_hndl,\r
1461         IN                              ib_bind_wr_t* const                     p_mw_bind,\r
1462                 OUT                     HHUL_mw_bind_t                          *bind_prop_p)\r
1463 {\r
1464         bind_prop_p->qp = hhul_qp_hndl;\r
1465         bind_prop_p->id  = p_mw_bind->wr_id;\r
1466         bind_prop_p->acl  = map_ibal_acl(p_mw_bind->access_ctrl);\r
1467         bind_prop_p->size  = p_mw_bind->local_ds.length;\r
1468         bind_prop_p->start  = (VAPI_virt_addr_t)(MT_virt_addr_t)p_mw_bind->local_ds.vaddr;\r
1469         bind_prop_p->mr_lkey = p_mw_bind->local_ds.lkey;\r
1470         bind_prop_p->comp_type = \r
1471                 (p_mw_bind->send_opt & IB_SEND_OPT_SIGNALED) ? VAPI_SIGNALED : VAPI_UNSIGNALED;\r
1472 }\r
1473 \r
1474 \r
1475 /////////////////////////////////////////////////////////\r
1476 // Map IBAL qp type to VAPI transport and special qp_type\r
1477 /////////////////////////////////////////////////////////\r
1478 int\r
1479 mlnx_map_ibal_qp_type(\r
1480         IN                              ib_qp_type_t                            ibal_qpt,\r
1481                 OUT                     VAPI_special_qp_t                       *vapi_qp_type_p)\r
1482 {\r
1483         switch (ibal_qpt)\r
1484         {\r
1485         case IB_QPT_RELIABLE_CONN:\r
1486                 if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_REGULAR_QP;\r
1487                 return IB_TS_RC;\r
1488 \r
1489         case IB_QPT_UNRELIABLE_CONN:\r
1490                 if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_REGULAR_QP;\r
1491                 return IB_TS_UC;\r
1492 \r
1493         case IB_QPT_UNRELIABLE_DGRM:\r
1494                 if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_REGULAR_QP;\r
1495                 return IB_TS_UD;\r
1496 \r
1497         case IB_QPT_QP0:\r
1498                 if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_SMI_QP;\r
1499                 return IB_TS_UD;\r
1500 \r
1501         case IB_QPT_QP1:\r
1502                 if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_GSI_QP;\r
1503                 return IB_TS_UD;\r
1504 \r
1505         case IB_QPT_RAW_IPV6:\r
1506                 if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_RAW_IPV6_QP; // TBD: ??\r
1507                 return IB_TS_RAW;\r
1508 \r
1509         case IB_QPT_RAW_ETHER:\r
1510                 if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_RAW_ETY_QP;  // TBD: ??\r
1511                 return IB_TS_RAW;\r
1512 \r
1513         case IB_QPT_MAD:\r
1514                 if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_GSI_QP;\r
1515                 return IB_TS_UD;\r
1516 \r
1517         case IB_QPT_QP0_ALIAS:\r
1518                 if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_SMI_QP;\r
1519                 return IB_TS_UD;\r
1520 \r
1521         case IB_QPT_QP1_ALIAS:\r
1522                 if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_GSI_QP;\r
1523                 return IB_TS_UD;\r
1524 \r
1525         default:\r
1526                 CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("FAIL to map ibal_qp_type %d (last known %d) returning %d\n",\r
1527                         ibal_qpt, IB_QPT_QP1_ALIAS, IB_TS_RAW));\r
1528                 if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_RAW_ETY_QP;\r
1529                 return IB_TS_RAW;\r
1530         }\r
1531 }\r
1532 \r
1533 /////////////////////////////////////////////////////////\r
1534 // QP and CQ value must be handled by caller\r
1535 /////////////////////////////////////////////////////////\r
1536 void\r
1537 mlnx_conv_qp_create_attr(\r
1538         IN              const   ib_qp_create_t                          *create_attr_p,\r
1539                 OUT                     HHUL_qp_init_attr_t                     *init_attr_p,\r
1540                 OUT                     VAPI_special_qp_t                       *vapi_qp_type_p)\r
1541 {\r
1542         init_attr_p->ts_type = mlnx_map_ibal_qp_type(create_attr_p->qp_type, vapi_qp_type_p);\r
1543 \r
1544         init_attr_p->qp_cap.max_oust_wr_sq = create_attr_p->sq_depth;\r
1545         init_attr_p->qp_cap.max_oust_wr_rq = create_attr_p->rq_depth;\r
1546         init_attr_p->qp_cap.max_sg_size_sq = create_attr_p->sq_sge;\r
1547         init_attr_p->qp_cap.max_sg_size_rq = create_attr_p->rq_sge;\r
1548 \r
1549         init_attr_p->sq_sig_type = (create_attr_p->sq_signaled) ? VAPI_SIGNAL_ALL_WR : VAPI_SIGNAL_REQ_WR;\r
1550         init_attr_p->rq_sig_type = VAPI_SIGNAL_ALL_WR;\r
1551 \r
1552         init_attr_p->srq = HHUL_INVAL_SRQ_HNDL;\r
1553 }\r
1554 \r
1555 /////////////////////////////////////////////////////////\r
1556 // NOTE: ibal_qp_state is non linear - so we cannot use a LUT\r
1557 /////////////////////////////////////////////////////////\r
1558 VAPI_qp_state_t\r
1559 mlnx_map_ibal_qp_state(\r
1560         IN                              ib_qp_state_t                           ibal_qp_state)\r
1561 {\r
1562         VAPI_qp_state_t vapi_qp_state = VAPI_RESET;\r
1563 \r
1564         if      (ibal_qp_state & IB_QPS_RESET) vapi_qp_state = VAPI_RESET;\r
1565         else if (ibal_qp_state & IB_QPS_INIT)  vapi_qp_state = VAPI_INIT;\r
1566         else if (ibal_qp_state & IB_QPS_RTR)   vapi_qp_state = VAPI_RTR;\r
1567         else if (ibal_qp_state & IB_QPS_RTS)   vapi_qp_state = VAPI_RTS;\r
1568         else if (ibal_qp_state & IB_QPS_SQD)   vapi_qp_state = VAPI_SQD;\r
1569         else if (ibal_qp_state & IB_QPS_SQERR) vapi_qp_state = VAPI_SQE;\r
1570         else if (ibal_qp_state & IB_QPS_ERROR) vapi_qp_state = VAPI_ERR;\r
1571 \r
1572         return vapi_qp_state;\r
1573 }\r
1574 \r
1575 /////////////////////////////////////////////////////////\r
1576 /////////////////////////////////////////////////////////\r
1577 ib_qp_state_t\r
1578 mlnx_map_vapi_qp_state(\r
1579         IN                              VAPI_qp_state_t                         vapi_qp_state)\r
1580 {\r
1581         switch (vapi_qp_state)\r
1582         {\r
1583         case VAPI_RESET: return IB_QPS_RESET;\r
1584         case VAPI_INIT:  return IB_QPS_INIT;\r
1585         case VAPI_RTR:   return IB_QPS_RTR;\r
1586         case VAPI_RTS:   return IB_QPS_RTS;\r
1587         case VAPI_SQD:   return IB_QPS_SQD;\r
1588         case VAPI_SQE:   return IB_QPS_SQERR;\r
1589         case VAPI_ERR:   return IB_QPS_ERROR;\r
1590                 // TBD: IB_QPS_SQD_DRAINING\r
1591                 // TBD: IB_QPS_SQD_DRAINED\r
1592         default:\r
1593                 CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("FAIL to map vapi_qp_state %d (last known %d) returning %d\n",\r
1594                         vapi_qp_state, VAPI_ERR, IB_QPS_INIT));\r
1595                 return IB_QPS_INIT;\r
1596         }\r
1597 }\r
1598 \r
1599 /////////////////////////////////////////////////////////\r
1600 /////////////////////////////////////////////////////////\r
1601 ib_apm_state_t\r
1602 mlnx_map_vapi_apm_state(\r
1603         IN                              VAPI_mig_state_t                        vapi_apm_state)\r
1604 {\r
1605         switch (vapi_apm_state)\r
1606         {\r
1607         case VAPI_MIGRATED: return IB_APM_MIGRATED;\r
1608         case VAPI_REARM:    return IB_APM_REARM;\r
1609         case VAPI_ARMED:    return IB_APM_ARMED;\r
1610 \r
1611         default:\r
1612                 CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("FAIL to map vapi_apm_state %d (last known %d) returning %d\n",\r
1613                         vapi_apm_state, VAPI_ARMED, 0));\r
1614                 return 0;\r
1615         }\r
1616 }\r
1617 \r
1618 #if 0\r
1619 /////////////////////////////////////////////////////////\r
1620 // UNUSED: IBAL uses same encoding as THH\r
1621 /////////////////////////////////////////////////////////\r
1622 static\r
1623 u_int32_t ibal_mtu_to_vapi(u_int32_t ibal_mtu)\r
1624 {\r
1625         u_int32_t mtu = 0;\r
1626 \r
1627         // MTU256=1, MTU512=2, MTU1024=3\r
1628         while (ibal_mtu >>= 1) mtu++;\r
1629         return mtu - 7;\r
1630 }\r
1631 \r
1632 /////////////////////////////////////////////////////////\r
1633 /////////////////////////////////////////////////////////\r
1634 static\r
1635 u_int32_t vapi_mtu_to_ibal(u_int32_t vapi_mtu)\r
1636 {\r
1637         return (1 << (vapi_mtu + 7));\r
1638 }\r
1639 #endif\r
1640 \r
1641 /////////////////////////////////////////////////////////\r
1642 /////////////////////////////////////////////////////////\r
1643 void\r
1644 mlnx_conv_vapi_qp_attr(\r
1645         IN                              HH_hca_hndl_t                           hh_hndl,\r
1646         IN                              VAPI_qp_attr_t                          *hh_qp_attr_p,\r
1647                 OUT                     ib_qp_attr_t                            *qp_attr_p)\r
1648 {\r
1649         qp_attr_p->access_ctrl     = map_vapi_qp_acl(hh_qp_attr_p->remote_atomic_flags);\r
1650         qp_attr_p->pkey_index      = (uint16_t)hh_qp_attr_p->pkey_ix;\r
1651         qp_attr_p->sq_depth        = hh_qp_attr_p->cap.max_oust_wr_sq;\r
1652         qp_attr_p->rq_depth        = hh_qp_attr_p->cap.max_oust_wr_rq;\r
1653         qp_attr_p->sq_sge          = hh_qp_attr_p->cap.max_sg_size_sq;\r
1654         qp_attr_p->rq_sge          = hh_qp_attr_p->cap.max_sg_size_rq;\r
1655         qp_attr_p->sq_max_inline   = hh_qp_attr_p->cap.max_inline_data_sq;\r
1656         qp_attr_p->init_depth      = hh_qp_attr_p->ous_dst_rd_atom; // outstanding outgoing\r
1657         qp_attr_p->resp_res        = hh_qp_attr_p->qp_ous_rd_atom;  // outstanding as target (in)\r
1658 \r
1659         qp_attr_p->num             = cl_ntoh32(hh_qp_attr_p->qp_num);\r
1660         CL_TRACE(MLNX_DBG_QPN, g_mlnx_dbg_lvl, ("ibal_qpn 0x%x = hh_qpn 0x%x\n",\r
1661                 qp_attr_p->num,\r
1662                 hh_qp_attr_p->qp_num));\r
1663 \r
1664         qp_attr_p->dest_num        = cl_ntoh32(hh_qp_attr_p->dest_qp_num);\r
1665         CL_TRACE(MLNX_DBG_QPN, g_mlnx_dbg_lvl, ("ibal_dest 0x%x = hh_dest 0x%x\n",\r
1666                 qp_attr_p->dest_num,\r
1667                 hh_qp_attr_p->dest_qp_num));\r
1668         qp_attr_p->qkey            = cl_ntoh32 (hh_qp_attr_p->qkey);\r
1669 \r
1670         qp_attr_p->sq_psn          = cl_ntoh32 (hh_qp_attr_p->sq_psn);\r
1671         qp_attr_p->rq_psn          = cl_ntoh32 (hh_qp_attr_p->rq_psn);\r
1672 \r
1673         qp_attr_p->primary_port    = hh_qp_attr_p->port;\r
1674         qp_attr_p->alternate_port  = hh_qp_attr_p->alt_port;\r
1675 \r
1676         qp_attr_p->state           = mlnx_map_vapi_qp_state(hh_qp_attr_p->qp_state);\r
1677         qp_attr_p->apm_state       = mlnx_map_vapi_apm_state(hh_qp_attr_p->path_mig_state);\r
1678 \r
1679         mlnx_conv_vapi_av(hh_hndl, &hh_qp_attr_p->av, &qp_attr_p->primary_av);\r
1680         qp_attr_p->primary_av.conn.path_mtu          = (u_int8_t)hh_qp_attr_p->path_mtu;\r
1681         qp_attr_p->primary_av.conn.local_ack_timeout = hh_qp_attr_p->timeout; \r
1682         qp_attr_p->primary_av.conn.seq_err_retry_cnt = hh_qp_attr_p->retry_count;\r
1683         qp_attr_p->primary_av.conn.rnr_retry_cnt     = hh_qp_attr_p->rnr_retry;\r
1684 \r
1685         mlnx_conv_vapi_av(hh_hndl, &hh_qp_attr_p->alt_av, &qp_attr_p->alternate_av);\r
1686         qp_attr_p->alternate_av.conn. path_mtu         = (u_int8_t)hh_qp_attr_p->path_mtu;\r
1687         qp_attr_p->alternate_av.conn.local_ack_timeout = hh_qp_attr_p->timeout;\r
1688         qp_attr_p->alternate_av.conn.seq_err_retry_cnt = hh_qp_attr_p->retry_count;\r
1689         qp_attr_p->alternate_av.conn.rnr_retry_cnt     = hh_qp_attr_p->rnr_retry;\r
1690 }\r
1691 #if 0\r
1692 XXX:\r
1693 QP_ATTR_QP_STATE\r
1694 QP_ATTR_EN_SQD_ASYN_NOTIF\r
1695 QP_ATTR_QP_NUM\r
1696 + QP_ATTR_REMOTE_ATOMIC_FLAGS\r
1697 + QP_ATTR_PKEY_IX\r
1698 + QP_ATTR_PORT\r
1699 + QP_ATTR_QKEY\r
1700 + QP_ATTR_RQ_PSN\r
1701 + QP_ATTR_AV\r
1702 \r
1703 QP_ATTR_PATH_MTU\r
1704 + QP_ATTR_TIMEOUT\r
1705 + QP_ATTR_RETRY_COUNT\r
1706 + QP_ATTR_RNR_RETRY\r
1707 QP_ATTR_QP_OUS_RD_ATOM\r
1708 \r
1709 - QP_ATTR_ALT_PATH\r
1710 \r
1711 + QP_ATTR_MIN_RNR_TIMER\r
1712 QP_ATTR_SQ_PSN\r
1713 QP_ATTR_OUS_DST_RD_ATOM\r
1714 QP_ATTR_PATH_MIG_STATE\r
1715 QP_ATTR_CAP\r
1716 #endif\r
1717 \r
1718 /////////////////////////////////////////////////////////\r
1719 /////////////////////////////////////////////////////////\r
1720 ib_api_status_t\r
1721 mlnx_conv_qp_modify_attr(\r
1722         IN                              HH_hca_hndl_t                                   hh_hndl,\r
1723         IN                              ib_qp_type_t                                    qp_type,\r
1724         IN              const   ib_qp_mod_t                                             *modify_attr_p,\r
1725                 OUT                     VAPI_qp_attr_t                                  *qp_attr_p, \r
1726                 OUT                     VAPI_qp_attr_mask_t                             *attr_mask_p)\r
1727 {\r
1728 \r
1729         qp_attr_p->qp_state = mlnx_map_ibal_qp_state(modify_attr_p->req_state);\r
1730         *attr_mask_p = QP_ATTR_QP_STATE;\r
1731 \r
1732         switch(modify_attr_p->req_state)\r
1733         {\r
1734         case IB_QPS_RESET:\r
1735                 break;\r
1736 \r
1737         case IB_QPS_INIT:\r
1738                 *attr_mask_p |= QP_ATTR_PORT |\r
1739                         QP_ATTR_QKEY |\r
1740                         QP_ATTR_PKEY_IX ;\r
1741 \r
1742                 qp_attr_p->port    = modify_attr_p->state.init.primary_port;\r
1743                 qp_attr_p->qkey    = cl_ntoh32 (modify_attr_p->state.init.qkey);\r
1744                 qp_attr_p->pkey_ix = modify_attr_p->state.init.pkey_index;\r
1745                 if (IB_QPT_RELIABLE_CONN == qp_type)\r
1746                 {\r
1747                         *attr_mask_p |= QP_ATTR_REMOTE_ATOMIC_FLAGS;\r
1748                         qp_attr_p->remote_atomic_flags = map_ibal_qp_acl(modify_attr_p->state.init.access_ctrl);\r
1749                 } else\r
1750                 {\r
1751                         qp_attr_p->remote_atomic_flags = 0;\r
1752                 }\r
1753                 break;\r
1754 \r
1755         case IB_QPS_RTR:\r
1756                 /* VAPI doesn't support modifying the WQE depth ever. */\r
1757                 if( modify_attr_p->state.rtr.opts & IB_MOD_QP_SQ_DEPTH ||\r
1758                         modify_attr_p->state.rtr.opts & IB_MOD_QP_RQ_DEPTH )\r
1759                 {\r
1760                         return IB_UNSUPPORTED;\r
1761                 }\r
1762 \r
1763                 *attr_mask_p |= QP_ATTR_RQ_PSN |\r
1764                         QP_ATTR_DEST_QP_NUM |\r
1765                         QP_ATTR_QP_OUS_RD_ATOM |\r
1766                         QP_ATTR_MIN_RNR_TIMER |\r
1767                         QP_ATTR_AV ;\r
1768 \r
1769                 qp_attr_p->rq_psn          = cl_ntoh32 (modify_attr_p->state.rtr.rq_psn);\r
1770                 qp_attr_p->dest_qp_num     = cl_ntoh32 (modify_attr_p->state.rtr.dest_qp);\r
1771                 qp_attr_p->qp_ous_rd_atom = modify_attr_p->state.rtr.resp_res;\r
1772 \r
1773                 if (modify_attr_p->state.rtr.opts & IB_MOD_QP_RNR_NAK_TIMEOUT)\r
1774                 {\r
1775                         qp_attr_p->min_rnr_timer   = modify_attr_p->state.rtr.rnr_nak_timeout;\r
1776                 } else\r
1777                 {\r
1778                         qp_attr_p->min_rnr_timer   = 0;\r
1779                 }\r
1780 \r
1781 #if 1\r
1782                 CL_TRACE(MLNX_DBG_QPN, g_mlnx_dbg_lvl, ("modify_qp: hh_dest 0x%x = ibal_dest 0x%x\n",\r
1783                         qp_attr_p->dest_qp_num, modify_attr_p->state.rtr.dest_qp));\r
1784 #endif\r
1785 \r
1786                 // Convert primary RC AV (mandatory)\r
1787                 cl_memclr(&qp_attr_p->av, sizeof(VAPI_ud_av_t));\r
1788                 mlnx_conv_ibal_av(hh_hndl,\r
1789                         &modify_attr_p->state.rtr.primary_av, &qp_attr_p->av);\r
1790 \r
1791                 if (IB_QPT_RELIABLE_CONN == qp_type)\r
1792                 {\r
1793                         *attr_mask_p |= QP_ATTR_PATH_MTU;\r
1794                         qp_attr_p->path_mtu     = modify_attr_p->state.rtr.primary_av.conn.path_mtu; // MTU\r
1795 \r
1796                         qp_attr_p->timeout     = modify_attr_p->state.rtr.primary_av.conn.local_ack_timeout; // XXX: conv\r
1797                         qp_attr_p->retry_count = modify_attr_p->state.rtr.primary_av.conn.seq_err_retry_cnt;\r
1798                         qp_attr_p->rnr_retry   = modify_attr_p->state.rtr.primary_av.conn.rnr_retry_cnt;\r
1799                 }\r
1800 \r
1801                 // Convert Remote Atomic Flags\r
1802                 if (modify_attr_p->state.rtr.opts & IB_MOD_QP_ACCESS_CTRL)\r
1803                 {\r
1804                         *attr_mask_p |= QP_ATTR_REMOTE_ATOMIC_FLAGS;\r
1805                         qp_attr_p->remote_atomic_flags = map_ibal_qp_acl(modify_attr_p->state.rtr.access_ctrl);\r
1806                 }\r
1807 \r
1808                 // Convert alternate RC AV\r
1809                 if (modify_attr_p->state.rtr.opts & IB_MOD_QP_ALTERNATE_AV)\r
1810                 {\r
1811                         *attr_mask_p |= QP_ATTR_ALT_PATH;\r
1812                         cl_memclr(&qp_attr_p->alt_av, sizeof(VAPI_ud_av_t));\r
1813                         mlnx_conv_ibal_av(hh_hndl,\r
1814                                 &modify_attr_p->state.rtr.alternate_av, &qp_attr_p->alt_av);\r
1815 \r
1816                         if (IB_QPT_RELIABLE_CONN == qp_type)\r
1817                         {\r
1818                                 qp_attr_p->alt_timeout     = modify_attr_p->state.rtr.alternate_av.conn.local_ack_timeout; // XXX: conv\r
1819 #if 0\r
1820                                 /* Incompliant with spec 1.1! Data already set before */\r
1821                                 qp_attr_p->retry_count = modify_attr_p->state.rtr.alternate_av.conn.seq_err_retry_cnt;\r
1822                                 qp_attr_p->rnr_retry   = modify_attr_p->state.rtr.alternate_av.conn.rnr_retry_cnt;\r
1823 #endif\r
1824                         }\r
1825                 }\r
1826                 break;\r
1827 \r
1828         case IB_QPS_RTS:\r
1829                 /* VAPI doesn't support modifying the WQE depth ever. */\r
1830                 if( modify_attr_p->state.rts.opts & IB_MOD_QP_SQ_DEPTH ||\r
1831                         modify_attr_p->state.rts.opts & IB_MOD_QP_RQ_DEPTH )\r
1832                 {\r
1833                         return IB_UNSUPPORTED;\r
1834                 }\r
1835 \r
1836                 *attr_mask_p |= QP_ATTR_SQ_PSN |\r
1837                         QP_ATTR_RETRY_COUNT |\r
1838                         QP_ATTR_RNR_RETRY |\r
1839                         QP_ATTR_OUS_DST_RD_ATOM |\r
1840                         QP_ATTR_MIN_RNR_TIMER;\r
1841 \r
1842                 qp_attr_p->sq_psn = cl_ntoh32 (modify_attr_p->state.rts.sq_psn);\r
1843 \r
1844                 if (modify_attr_p->state.rts.opts & IB_MOD_QP_ACCESS_CTRL)\r
1845                 {\r
1846                         *attr_mask_p |= QP_ATTR_REMOTE_ATOMIC_FLAGS;\r
1847                         qp_attr_p->remote_atomic_flags = map_ibal_qp_acl(modify_attr_p->state.rts.access_ctrl);\r
1848                 }\r
1849 \r
1850                 qp_attr_p->timeout     = modify_attr_p->state.rts.local_ack_timeout; // XXX: conv\r
1851                 qp_attr_p->ous_dst_rd_atom = modify_attr_p->state.rts.init_depth;\r
1852                 qp_attr_p->retry_count = modify_attr_p->state.rts.retry_cnt;\r
1853                 qp_attr_p->rnr_retry   = modify_attr_p->state.rts.rnr_retry_cnt;\r
1854                 qp_attr_p->min_rnr_timer   = modify_attr_p->state.rts.rnr_nak_timeout;\r
1855 \r
1856                 // Update the responder resources for RDMA/ATOMIC (optional for SQD->RTS)\r
1857                 if (modify_attr_p->state.rts.opts & IB_MOD_QP_RESP_RES) {\r
1858                         *attr_mask_p |= QP_ATTR_QP_OUS_RD_ATOM;\r
1859                         qp_attr_p->qp_ous_rd_atom = modify_attr_p->state.rts.resp_res;\r
1860                 }\r
1861 \r
1862                 // Convert alternate RC AV\r
1863                 if (modify_attr_p->state.rts.opts & IB_MOD_QP_ALTERNATE_AV)\r
1864                 {\r
1865                         *attr_mask_p |= QP_ATTR_ALT_PATH;\r
1866                         cl_memclr(&qp_attr_p->alt_av, sizeof(VAPI_ud_av_t));\r
1867                         mlnx_conv_ibal_av(hh_hndl,\r
1868                                 &modify_attr_p->state.rts.alternate_av, &qp_attr_p->alt_av);\r
1869                         if (IB_QPT_RELIABLE_CONN == qp_type)\r
1870                         {\r
1871                                 qp_attr_p->alt_timeout     = modify_attr_p->state.rts.alternate_av.conn.local_ack_timeout; // XXX: conv\r
1872 #if 0\r
1873                                 /* Incompliant with spec 1.1! Data already set before */\r
1874                                 qp_attr_p->retry_count = modify_attr_p->state.rts.alternate_av.conn.seq_err_retry_cnt;\r
1875                                 qp_attr_p->rnr_retry   = modify_attr_p->state.rts.alternate_av.conn.rnr_retry_cnt;\r
1876 #endif\r
1877                         }\r
1878                 }\r
1879                 break;\r
1880 \r
1881                 // TBD: The following are treated equally (SQ Drain)\r
1882         case IB_QPS_SQD:\r
1883         case IB_QPS_SQD_DRAINING:\r
1884         case IB_QPS_SQD_DRAINED:\r
1885                 *attr_mask_p |= QP_ATTR_EN_SQD_ASYN_NOTIF;\r
1886                 qp_attr_p->en_sqd_asyn_notif = (MT_bool)modify_attr_p->state.sqd.sqd_event;\r
1887                 break;\r
1888 \r
1889         case IB_QPS_SQERR:\r
1890         case IB_QPS_ERROR:\r
1891         case IB_QPS_TIME_WAIT:\r
1892         default:\r
1893                 break;\r
1894         }\r
1895         CL_TRACE(MLNX_DBG_QPN, g_mlnx_dbg_lvl, ("CL: conv_qp_modify: new state %d attr_mask 0x%x\n", qp_attr_p->qp_state, *attr_mask_p));\r
1896         return IB_SUCCESS;\r
1897 }\r
1898 \r
1899 /////////////////////////////////////////////////////////\r
1900 /////////////////////////////////////////////////////////\r
1901 static VAPI_wr_opcode_t\r
1902 map_ibal_send_opcode(\r
1903         IN                              ib_wr_type_t                            ibal_opcode,\r
1904         IN                              boolean_t                                       imm)\r
1905 {\r
1906         VAPI_wr_opcode_t                vapi_opcode;\r
1907 \r
1908         switch (ibal_opcode)\r
1909         {\r
1910         case WR_SEND:         vapi_opcode = VAPI_SEND;\r
1911                 break;\r
1912         case WR_RDMA_WRITE:   vapi_opcode = VAPI_RDMA_WRITE;\r
1913                 break;\r
1914         case WR_RDMA_READ:    vapi_opcode = VAPI_RDMA_READ;\r
1915                 break;\r
1916         case WR_COMPARE_SWAP: vapi_opcode = VAPI_ATOMIC_CMP_AND_SWP;\r
1917                 break;\r
1918         case WR_FETCH_ADD:    vapi_opcode = VAPI_ATOMIC_FETCH_AND_ADD;\r
1919                 break;\r
1920         default:              vapi_opcode = VAPI_SEND;\r
1921                 break;\r
1922         }\r
1923         if (imm && (VAPI_SEND == vapi_opcode || VAPI_RDMA_WRITE == vapi_opcode)) vapi_opcode++;\r
1924         return vapi_opcode;\r
1925 }\r
1926 \r
1927 /////////////////////////////////////////////////////////\r
1928 /////////////////////////////////////////////////////////\r
1929 ib_api_status_t\r
1930 mlnx_conv_send_desc(\r
1931         IN                              IB_ts_t                                         transport,\r
1932         IN              const   ib_send_wr_t                            *ibal_send_wqe_p,\r
1933                 OUT                     VAPI_sr_desc_t                          *vapi_send_desc_p)\r
1934 {\r
1935         boolean_t                                               imm = FALSE;\r
1936         u_int32_t                                               idx;\r
1937         register VAPI_sg_lst_entry_t    *sg_lst_p;\r
1938         register ib_local_ds_t                  *ds_array;\r
1939 \r
1940 \r
1941         switch (transport)\r
1942         {\r
1943         case IB_TS_UD:\r
1944                 CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("mapping %s QP\n", "UD"));\r
1945                 {\r
1946                         mlnx_avo_t *avo_p = (mlnx_avo_t *)ibal_send_wqe_p->dgrm.ud.h_av;\r
1947 \r
1948                         vapi_send_desc_p->remote_qp  = cl_ntoh32 (ibal_send_wqe_p->dgrm.ud.remote_qp);\r
1949                         vapi_send_desc_p->remote_qkey = cl_ntoh32 (ibal_send_wqe_p->dgrm.ud.remote_qkey);\r
1950 \r
1951                         if (!avo_p || avo_p->mark != E_MARK_AV)\r
1952                                 return IB_INVALID_AV_HANDLE;\r
1953 \r
1954                         vapi_send_desc_p->remote_ah = avo_p->h_av; // was ah.hhul\r
1955                         break;\r
1956                 }\r
1957 \r
1958         case IB_TS_RC:\r
1959                 CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("mapping %s QP\n", "RC"));\r
1960                 // vapi_send_desc_p->remote_qp   = 0;\r
1961                 // vapi_send_desc_p->remote_qkey = 0;\r
1962                 vapi_send_desc_p->remote_addr = ibal_send_wqe_p->remote_ops.vaddr;\r
1963                 vapi_send_desc_p->r_key       = ibal_send_wqe_p->remote_ops.rkey;\r
1964                 vapi_send_desc_p->compare_add = ibal_send_wqe_p->remote_ops.atomic1;\r
1965                 vapi_send_desc_p->swap        = ibal_send_wqe_p->remote_ops.atomic2;\r
1966                 break;\r
1967 \r
1968         default: // TBD: RAW, RD\r
1969                 return IB_UNSUPPORTED;\r
1970         }\r
1971 \r
1972         imm = (0 != (ibal_send_wqe_p->send_opt & IB_SEND_OPT_IMMEDIATE));\r
1973         vapi_send_desc_p->fence      = (MT_bool)(0 != (ibal_send_wqe_p->send_opt & IB_SEND_OPT_FENCE));\r
1974         vapi_send_desc_p->set_se     = (MT_bool)(0 != (ibal_send_wqe_p->send_opt & IB_SEND_OPT_SOLICITED));\r
1975         vapi_send_desc_p->comp_type  = (ibal_send_wqe_p->send_opt & IB_SEND_OPT_SIGNALED) ?\r
1976 VAPI_SIGNALED : VAPI_UNSIGNALED;\r
1977 \r
1978         vapi_send_desc_p->id = ibal_send_wqe_p->wr_id;\r
1979         vapi_send_desc_p->opcode = map_ibal_send_opcode(ibal_send_wqe_p->wr_type, imm);\r
1980 \r
1981         if (imm)\r
1982                 vapi_send_desc_p->imm_data = cl_ntoh32 (ibal_send_wqe_p->immediate_data);\r
1983 \r
1984         vapi_send_desc_p->sg_lst_len = ibal_send_wqe_p->num_ds;\r
1985 \r
1986         sg_lst_p = vapi_send_desc_p->sg_lst_p;\r
1987         ds_array = ibal_send_wqe_p->ds_array;\r
1988         for (idx = 0; idx < ibal_send_wqe_p->num_ds; idx++)\r
1989         {\r
1990                 sg_lst_p->addr = ds_array->vaddr;\r
1991                 sg_lst_p->len  = ds_array->length;\r
1992                 sg_lst_p->lkey = ds_array->lkey;\r
1993                 // CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("post_send (conv) addr %Lx size %d key 0x%x\n", sg_lst_p->addr, sg_lst_p->len, sg_lst_p->lkey));\r
1994                 sg_lst_p++;\r
1995                 ds_array++;\r
1996         }\r
1997         CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("send: rqpn 0x%x rkey 0x%x\n", \r
1998                 vapi_send_desc_p->remote_qp,\r
1999                 vapi_send_desc_p->remote_qkey));\r
2000         return IB_SUCCESS;\r
2001 }\r
2002 \r
2003 /////////////////////////////////////////////////////////\r
2004 /////////////////////////////////////////////////////////\r
2005 ib_api_status_t\r
2006 mlnx_conv_recv_desc(\r
2007         IN              const   ib_recv_wr_t                            *ibal_recv_wqe_p,\r
2008                 OUT                     VAPI_rr_desc_t                          *vapi_recv_desc_p)\r
2009 {\r
2010         u_int32_t                                               idx;\r
2011         register VAPI_sg_lst_entry_t    *sg_lst_p;\r
2012         register ib_local_ds_t                  *ds_array;\r
2013 \r
2014         vapi_recv_desc_p->id         = ibal_recv_wqe_p->wr_id;\r
2015         vapi_recv_desc_p->sg_lst_len = ibal_recv_wqe_p->num_ds;\r
2016         vapi_recv_desc_p->opcode     = VAPI_RECEIVE;\r
2017         vapi_recv_desc_p->comp_type  = VAPI_SIGNALED;\r
2018 \r
2019         sg_lst_p = vapi_recv_desc_p->sg_lst_p;\r
2020         ds_array = ibal_recv_wqe_p->ds_array;\r
2021         for (idx = 0; idx < ibal_recv_wqe_p->num_ds; idx++)\r
2022         {\r
2023                 sg_lst_p->addr = ds_array->vaddr;\r
2024                 sg_lst_p->len  = ds_array->length;\r
2025                 sg_lst_p->lkey = ds_array->lkey;\r
2026                 // CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("post_recv (conv) addr 0x%Lx size %d key 0x%x\n", sg_lst_p->addr, sg_lst_p->len, sg_lst_p->lkey));\r
2027                 sg_lst_p++;\r
2028                 ds_array++;\r
2029         }\r
2030 \r
2031         return IB_SUCCESS;\r
2032 }\r
2033 \r
2034 /////////////////////////////////////////////////////////\r
2035 /////////////////////////////////////////////////////////\r
2036 void\r
2037 vapi_port_cap_to_ibal(\r
2038         IN                              IB_port_cap_mask_t                      vapi_port_cap,\r
2039                 OUT                     ib_port_cap_t                           *ibal_port_cap_p)\r
2040 {\r
2041         if (vapi_port_cap & IB_CAP_MASK_IS_CONN_MGMT_SUP)\r
2042                 ibal_port_cap_p->cm = TRUE;\r
2043         if (vapi_port_cap & IB_CAP_MASK_IS_SNMP_TUNN_SUP)\r
2044                 ibal_port_cap_p->snmp = TRUE;\r
2045         if (vapi_port_cap & IB_CAP_MASK_IS_DEVICE_MGMT_SUP)\r
2046                 ibal_port_cap_p->dev_mgmt = TRUE;\r
2047         if (vapi_port_cap & IB_CAP_MASK_IS_VENDOR_CLS_SUP)\r
2048                 ibal_port_cap_p->vend = TRUE;\r
2049         if (vapi_port_cap & IB_CAP_MASK_IS_SM_DISABLED)\r
2050                 ibal_port_cap_p->sm_disable = TRUE;\r
2051         if (vapi_port_cap & IB_CAP_MASK_IS_SM)\r
2052                 ibal_port_cap_p->sm = TRUE;\r
2053         if (vapi_port_cap & IB_CAP_MASK_IS_CLIENT_REREGISTRATION_SUP)\r
2054                 ibal_port_cap_p->client_reregister= TRUE;\r
2055 }\r
2056 \r
2057 /////////////////////////////////////////////////////////\r
2058 /////////////////////////////////////////////////////////\r
2059 void\r
2060 mlnx_conv_vapi_hca_cap(\r
2061         IN                              HH_hca_dev_t                            *hca_info_p,\r
2062         IN                              VAPI_hca_cap_t                          *vapi_hca_cap_p,\r
2063         IN                              VAPI_hca_port_t                         *vapi_hca_ports,\r
2064                 OUT                     ib_ca_attr_t                            *ca_attr_p)\r
2065 {\r
2066         u_int8_t                        port_num;\r
2067         VAPI_hca_port_t         *vapi_port_p;\r
2068         ib_port_attr_t          *ibal_port_p;\r
2069 \r
2070         ca_attr_p->vend_id  = hca_info_p->vendor_id;\r
2071         ca_attr_p->dev_id   = (uint16_t)hca_info_p->dev_id;\r
2072         ca_attr_p->revision = (uint16_t)hca_info_p->hw_ver;\r
2073 \r
2074         ca_attr_p->ca_guid   = *(UNALIGNED64 u_int64_t *)vapi_hca_cap_p->node_guid;\r
2075         ca_attr_p->num_ports = vapi_hca_cap_p->phys_port_num;\r
2076         ca_attr_p->max_qps   = vapi_hca_cap_p->max_num_qp;\r
2077         ca_attr_p->max_wrs   = vapi_hca_cap_p->max_qp_ous_wr;\r
2078         ca_attr_p->max_sges   = vapi_hca_cap_p->max_num_sg_ent;\r
2079         ca_attr_p->max_rd_sges = vapi_hca_cap_p->max_num_sg_ent_rd;\r
2080         ca_attr_p->max_cqs    = vapi_hca_cap_p->max_num_cq;\r
2081         ca_attr_p->max_cqes  = vapi_hca_cap_p->max_num_ent_cq;\r
2082         ca_attr_p->max_pds    = vapi_hca_cap_p->max_pd_num;\r
2083         ca_attr_p->init_regions = vapi_hca_cap_p->max_num_mr;\r
2084         ca_attr_p->init_windows = vapi_hca_cap_p->max_mw_num;\r
2085         ca_attr_p->init_region_size = vapi_hca_cap_p->max_mr_size;\r
2086         ca_attr_p->max_addr_handles = vapi_hca_cap_p->max_ah_num;\r
2087         ca_attr_p->atomicity     = vapi_hca_cap_p->atomic_cap;\r
2088         ca_attr_p->max_partitions = vapi_hca_cap_p->max_pkeys;\r
2089         ca_attr_p->max_qp_resp_res = vapi_hca_cap_p->max_qp_ous_rd_atom;\r
2090         ca_attr_p->max_resp_res    = vapi_hca_cap_p->max_res_rd_atom;\r
2091         ca_attr_p->max_qp_init_depth = vapi_hca_cap_p->max_qp_init_rd_atom;\r
2092         ca_attr_p->max_ipv6_qps    = vapi_hca_cap_p->max_raw_ipv6_qp;\r
2093         ca_attr_p->max_ether_qps   = vapi_hca_cap_p->max_raw_ethy_qp;\r
2094         ca_attr_p->max_mcast_grps  = vapi_hca_cap_p->max_mcast_grp_num;\r
2095         ca_attr_p->max_mcast_qps   = vapi_hca_cap_p->max_total_mcast_qp_attach_num;\r
2096         ca_attr_p->max_qps_per_mcast_grp = vapi_hca_cap_p->max_mcast_qp_attach_num;\r
2097         ca_attr_p->local_ack_delay = vapi_hca_cap_p->local_ca_ack_delay;\r
2098         ca_attr_p->bad_pkey_ctr_support = vapi_hca_cap_p->flags & VAPI_BAD_PKEY_COUNT_CAP;\r
2099         ca_attr_p->bad_qkey_ctr_support = vapi_hca_cap_p->flags & VAPI_BAD_QKEY_COUNT_CAP;\r
2100         ca_attr_p->raw_mcast_support    = vapi_hca_cap_p->flags & VAPI_RAW_MULTI_CAP;\r
2101         ca_attr_p->apm_support          = vapi_hca_cap_p->flags & VAPI_AUTO_PATH_MIG_CAP;\r
2102         ca_attr_p->av_port_check        = vapi_hca_cap_p->flags & VAPI_UD_AV_PORT_ENFORCE_CAP;\r
2103         ca_attr_p->change_primary_port  = vapi_hca_cap_p->flags & VAPI_CHANGE_PHY_PORT_CAP;\r
2104         ca_attr_p->modify_wr_depth      = vapi_hca_cap_p->flags & VAPI_RESIZE_OUS_WQE_CAP;\r
2105         ca_attr_p->hw_agents            = FALSE; // in the context of IBAL then agent is implemented on the host\r
2106 \r
2107         ca_attr_p->num_page_sizes = 1;\r
2108         ca_attr_p->p_page_size[0] = PAGESIZE; // TBD: extract an array of page sizes from HCA cap\r
2109 \r
2110         for (port_num = 0; port_num < vapi_hca_cap_p->phys_port_num; port_num++)\r
2111         {\r
2112                 // Setup port pointers\r
2113                 ibal_port_p = &ca_attr_p->p_port_attr[port_num];\r
2114                 vapi_port_p = &vapi_hca_ports[port_num];\r
2115 \r
2116                 // Port Cabapilities\r
2117                 cl_memclr(&ibal_port_p->cap, sizeof(ib_port_cap_t));\r
2118                 vapi_port_cap_to_ibal(vapi_port_p->capability_mask, &ibal_port_p->cap);\r
2119 \r
2120                 // Port Atributes\r
2121                 ibal_port_p->port_num   = port_num + 1;\r
2122                 ibal_port_p->port_guid  = ibal_port_p->p_gid_table[0].unicast.interface_id;\r
2123                 ibal_port_p->lid        = cl_ntoh16(vapi_port_p->lid);\r
2124                 ibal_port_p->lmc        = vapi_port_p->lmc;\r
2125                 ibal_port_p->max_vls    = vapi_port_p->max_vl_num;\r
2126                 ibal_port_p->sm_lid     = cl_ntoh16(vapi_port_p->sm_lid);\r
2127                 ibal_port_p->sm_sl      = vapi_port_p->sm_sl;\r
2128                 ibal_port_p->link_state = (vapi_port_p->state != 0) ? (uint8_t)vapi_port_p->state : IB_LINK_DOWN;\r
2129                 ibal_port_p->num_gids   = vapi_port_p->gid_tbl_len;\r
2130                 ibal_port_p->num_pkeys  = vapi_port_p->pkey_tbl_len;\r
2131                 ibal_port_p->pkey_ctr   = (uint16_t)vapi_port_p->bad_pkey_counter;\r
2132                 ibal_port_p->qkey_ctr   = (uint16_t)vapi_port_p->qkey_viol_counter;\r
2133                 ibal_port_p->max_msg_size = vapi_port_p->max_msg_sz;\r
2134                 ibal_port_p->mtu = (u_int8_t)vapi_port_p->max_mtu;\r
2135 \r
2136                 ibal_port_p->subnet_timeout = 5; // TBD: currently 128us\r
2137                 // ibal_port_p->local_ack_timeout = 3; // TBD: currently ~32 usec\r
2138 #if 0\r
2139                 CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("Port %d port_guid 0x%"PRIx64"\n",\r
2140                         ibal_port_p->port_num, ibal_port_p->port_guid));\r
2141 #endif\r
2142         }\r
2143 }\r
2144 \r
2145 /////////////////////////////////////////////////////////\r
2146 /////////////////////////////////////////////////////////\r
2147 ib_api_status_t\r
2148 mlnx_get_hca_pkey_tbl(\r
2149         IN                              HH_hca_hndl_t                           hh_hndl,\r
2150         IN                              u_int8_t                                        port_num,\r
2151         IN                              u_int16_t                                       num_entries,\r
2152                 OUT                     void*                                           table_p)\r
2153 {\r
2154         u_int16_t               size;\r
2155         ib_net16_t              *pkey_p;\r
2156 \r
2157                 if (HH_OK != THH_hob_get_pkey_tbl( hh_hndl, port_num, num_entries, &size, table_p))\r
2158                 return IB_ERROR;\r
2159 \r
2160         pkey_p = (ib_net16_t *)table_p;\r
2161 #if 0\r
2162         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("port %d pkey0 0x%x pkey1 0x%x\n", port_num, pkey_p[0], pkey_p[1]));\r
2163 #endif\r
2164         return IB_SUCCESS;\r
2165 }\r
2166 \r
2167 ib_api_status_t\r
2168 mlnx_get_hca_gid_tbl(\r
2169         IN                              HH_hca_hndl_t                           hh_hndl,\r
2170         IN                              u_int8_t                                        port_num,\r
2171         IN                              u_int16_t                                       num_entries,\r
2172                 OUT                     void*                                           table_p)\r
2173 {\r
2174         u_int16_t               size;\r
2175 \r
2176         if (HH_OK != THH_hob_get_gid_tbl( hh_hndl, port_num, num_entries, &size, table_p))\r
2177                 return IB_ERROR;\r
2178 \r
2179         return IB_SUCCESS;\r
2180 }\r