f7228c92bfc0f12fed0be24a6cbd3ee7042dbe5c
[mirror/winof/.git] / hw / mt23108 / kernel / hca_data.c
1 /*\r
2  * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.\r
3  * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. \r
4  *\r
5  * This software is available to you under the OpenIB.org BSD license\r
6  * below:\r
7  *\r
8  *     Redistribution and use in source and binary forms, with or\r
9  *     without modification, are permitted provided that the following\r
10  *     conditions are met:\r
11  *\r
12  *      - Redistributions of source code must retain the above\r
13  *        copyright notice, this list of conditions and the following\r
14  *        disclaimer.\r
15  *\r
16  *      - Redistributions in binary form must reproduce the above\r
17  *        copyright notice, this list of conditions and the following\r
18  *        disclaimer in the documentation and/or other materials\r
19  *        provided with the distribution.\r
20  *\r
21  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
22  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
23  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
24  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
25  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
26  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
27  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
28  * SOFTWARE.\r
29  *\r
30  * $Id$\r
31  */\r
32 \r
33 \r
34 #include "hca_data.h"\r
35 #include "hca_debug.h"\r
36 \r
37 static cl_spinlock_t    hob_lock;\r
38 \r
39 #if 1\r
40 u_int32_t               g_mlnx_dbg_lvl = CL_DBG_ERROR ;\r
41 #else\r
42 u_int32_t               g_mlnx_dbg_lvl = CL_DBG_ERROR |\r
43         MLNX_DBG_QPN |\r
44         MLNX_DBG_MEM |\r
45         MLNX_DBG_INFO |\r
46         MLNX_DBG_TRACE |\r
47         // MLNX_DBG_DIRECT |\r
48         0;\r
49 #endif\r
50 \r
51 u_int32_t               g_mlnx_dpc2thread = 0;\r
52 \r
53 #ifdef MODULE_LICENSE\r
54 MODULE_LICENSE("Proprietary");\r
55 #endif\r
56 \r
57 MODULE_PARM(g_mlnx_dbg_lvl, "i");\r
58 MODULE_PARM(g_mlnx_dpc2thread, "i");\r
59 \r
60 cl_qlist_t              mlnx_hca_list;\r
61 //mlnx_hca_t            mlnx_hca_array[MLNX_MAX_HCA];\r
62 //uint32_t              mlnx_num_hca = 0;\r
63 \r
64 mlnx_hob_t              mlnx_hob_array[MLNX_NUM_HOBKL];         // kernel HOB - one per HCA (cmdif access)\r
65 \r
66 mlnx_hobul_t    *mlnx_hobul_array[MLNX_NUM_HOBUL];      // kernel HOBUL - one per HCA (kar access)\r
67 \r
68 /* User verb library name */\r
69 /* TODO: Move to linux osd file.\r
70 char                    mlnx_uvp_lib_name[MAX_LIB_NAME] = {"libmlnx_uvp.so"};\r
71 */\r
72 \r
73 static void\r
74 mlnx_async_dpc(\r
75         IN                              cl_async_proc_item_t            *async_item_p );\r
76 \r
77 #if MLNX_COMP_MODEL\r
78 static void\r
79 mlnx_comp_dpc(\r
80         IN                              PRKDPC                                          p_dpc,\r
81         IN                              void                                            *context,\r
82         IN                              void                                            *pfn_comp_cb,\r
83         IN                              void                                            *unused );\r
84 #else\r
85 static void\r
86 mlnx_comp_dpc(\r
87         IN                              cl_async_proc_item_t            *async_item_p );\r
88 #endif\r
89 \r
90 // ### Callback Interface\r
91 static void\r
92 mlnx_comp_cb(\r
93         IN                              HH_hca_hndl_t                           hh_hndl,\r
94         IN                              HH_cq_hndl_t                            hh_cq,\r
95         IN                              void                                            *private_data);\r
96 \r
97 static void\r
98 mlnx_async_cb(\r
99         IN                              HH_hca_hndl_t                           hh_hndl,\r
100         IN                              HH_event_record_t                       *hh_er_p,\r
101         IN                              void                                            *private_data);\r
102 \r
103 /////////////////////////////////////////////////////////\r
104 // ### HCA\r
105 /////////////////////////////////////////////////////////\r
106 void\r
107 mlnx_hca_insert(\r
108         IN                              mlnx_hca_t                                      *p_hca )\r
109 {\r
110         cl_spinlock_acquire( &hob_lock );\r
111         cl_qlist_insert_tail( &mlnx_hca_list, &p_hca->list_item );\r
112         cl_spinlock_release( &hob_lock );\r
113 }\r
114 \r
115 void\r
116 mlnx_hca_remove(\r
117         IN                              mlnx_hca_t                                      *p_hca )\r
118 {\r
119         cl_spinlock_acquire( &hob_lock );\r
120         cl_qlist_remove_item( &mlnx_hca_list, &p_hca->list_item );\r
121         cl_spinlock_release( &hob_lock );\r
122 }\r
123 \r
124 mlnx_hca_t*\r
125 mlnx_hca_from_guid(\r
126         IN                              ib_net64_t                                      guid )\r
127 {\r
128         cl_list_item_t  *p_item;\r
129         mlnx_hca_t              *p_hca = NULL;\r
130 \r
131         cl_spinlock_acquire( &hob_lock );\r
132         p_item = cl_qlist_head( &mlnx_hca_list );\r
133         while( p_item != cl_qlist_end( &mlnx_hca_list ) )\r
134         {\r
135                 p_hca = PARENT_STRUCT( p_item, mlnx_hca_t, list_item );\r
136                 if( p_hca->guid == guid )\r
137                         break;\r
138                 p_item = cl_qlist_next( p_item );\r
139                 p_hca = NULL;\r
140         }\r
141         cl_spinlock_release( &hob_lock );\r
142         return p_hca;\r
143 }\r
144 \r
145 /*\r
146 void\r
147 mlnx_names_from_guid(\r
148         IN                              ib_net64_t                                      guid,\r
149                 OUT                     char                                            **hca_name_p,\r
150                 OUT                     char                                            **dev_name_p)\r
151 {\r
152         unsigned int idx;\r
153 \r
154         if (!hca_name_p) return;\r
155         if (!dev_name_p) return;\r
156 \r
157         for (idx = 0; idx < mlnx_num_hca; idx++)\r
158         {\r
159                 if (mlnx_hca_array[idx].ifx.guid == guid)\r
160                 {\r
161                         *hca_name_p = mlnx_hca_array[idx].hca_name_p;\r
162                         *dev_name_p = mlnx_hca_array[idx].dev_name_p;\r
163                 }\r
164         }\r
165 }\r
166 */\r
167 \r
168 /////////////////////////////////////////////////////////\r
169 // ### HOB\r
170 /////////////////////////////////////////////////////////\r
171 cl_status_t\r
172 mlnx_hobs_init( void )\r
173 {\r
174         u_int32_t idx;\r
175 \r
176         cl_qlist_init( &mlnx_hca_list );\r
177 \r
178         for (idx = 0; idx < MLNX_NUM_HOBKL; idx++)\r
179         {\r
180                 mlnx_hob_array[idx].hh_hndl = NULL;\r
181                 mlnx_hob_array[idx].comp_cb_p = NULL;\r
182                 mlnx_hob_array[idx].async_cb_p = NULL;\r
183                 mlnx_hob_array[idx].ca_context = NULL;\r
184                 mlnx_hob_array[idx].async_proc_mgr_p = NULL;\r
185                 mlnx_hob_array[idx].cl_device_h = NULL;\r
186                 // mlnx_hob_array[idx].port_lmc_p = NULL;\r
187                 mlnx_hob_array[idx].index = idx;\r
188                 mlnx_hob_array[idx].mark = E_MARK_INVALID;\r
189         }\r
190         return cl_spinlock_init( &hob_lock );\r
191 }\r
192 \r
193 /////////////////////////////////////////////////////////\r
194 /////////////////////////////////////////////////////////\r
195 ib_api_status_t\r
196 mlnx_hobs_insert(\r
197         IN                              mlnx_hca_t                                      *p_hca,\r
198                 OUT                     mlnx_hob_t                                      **hob_pp)\r
199 {\r
200         u_int32_t idx;\r
201         ib_api_status_t status = IB_ERROR;\r
202         mlnx_cache_t    *p_cache;\r
203 \r
204         p_cache = (mlnx_cache_t*)cl_pzalloc( sizeof(mlnx_cache_t) * 2 );\r
205         if( !p_cache )\r
206                 return IB_INSUFFICIENT_MEMORY;\r
207 \r
208         cl_spinlock_acquire(&hob_lock);\r
209         for (idx = 0; idx < MLNX_NUM_HOBKL; idx++)\r
210         {\r
211                 if (!mlnx_hob_array[idx].hh_hndl)\r
212                 {\r
213                         mlnx_hob_array[idx].hh_hndl = p_hca->hh_hndl;\r
214                         mlnx_hob_array[idx].mark = E_MARK_CA;\r
215                         if (hob_pp) *hob_pp = &mlnx_hob_array[idx];\r
216                         status = IB_SUCCESS;\r
217                         break;\r
218                 }\r
219         }\r
220         cl_spinlock_release(&hob_lock);\r
221 \r
222         if (IB_SUCCESS == status)\r
223                 (*hob_pp)->cache = p_cache;\r
224         else\r
225                 cl_free( p_cache );\r
226 \r
227         return status;\r
228 }\r
229 \r
230 /////////////////////////////////////////////////////////\r
231 /////////////////////////////////////////////////////////\r
232 ib_api_status_t\r
233 mlnx_hobs_set_cb(\r
234         IN                              mlnx_hob_t                                      *hob_p, \r
235         IN                              ci_completion_cb_t                      comp_cb_p,\r
236         IN                              ci_async_event_cb_t                     async_cb_p,\r
237         IN              const   void* const                                     ib_context)\r
238 {\r
239         cl_status_t             cl_status;\r
240 \r
241         // Verify handle\r
242         CL_ASSERT((hob_p - mlnx_hob_array) < MLNX_NUM_HOBKL);\r
243 \r
244         // Setup the callbacks\r
245         if (!hob_p->async_proc_mgr_p)\r
246         {\r
247                 hob_p->async_proc_mgr_p = cl_malloc( sizeof( cl_async_proc_t ) );\r
248                 if( !hob_p->async_proc_mgr_p )\r
249                 {\r
250                         return IB_INSUFFICIENT_MEMORY;\r
251                 }\r
252                 cl_async_proc_construct( hob_p->async_proc_mgr_p );\r
253                 cl_status = cl_async_proc_init( hob_p->async_proc_mgr_p, MLNX_NUM_CB_THR, "CBthread" );\r
254                 if( cl_status != CL_SUCCESS )\r
255                 {\r
256                         cl_async_proc_destroy( hob_p->async_proc_mgr_p );\r
257                         cl_free(hob_p->async_proc_mgr_p);\r
258                         hob_p->async_proc_mgr_p = NULL;\r
259                         return IB_INSUFFICIENT_RESOURCES;\r
260                 }\r
261         }\r
262 \r
263         if (hob_p->hh_hndl)\r
264         {\r
265                 THH_hob_set_async_eventh(hob_p->hh_hndl,\r
266                         mlnx_async_cb,\r
267                         &hob_p->index); // This is the context our CB wants to receive\r
268                 THH_hob_set_comp_eventh( hob_p->hh_hndl,\r
269                         mlnx_comp_cb,\r
270                         &hob_p->index); // This is the context our CB wants to receive\r
271                 hob_p->comp_cb_p  = comp_cb_p;\r
272                 hob_p->async_cb_p = async_cb_p;\r
273                 hob_p->ca_context = ib_context; // This is the context our CB forwards to IBAL\r
274                 CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("CL: hca_idx %d context 0x%p\n", hob_p - mlnx_hob_array, ib_context));\r
275                 return IB_SUCCESS;\r
276         }\r
277         return IB_ERROR;\r
278 }\r
279 \r
280 /////////////////////////////////////////////////////////\r
281 /////////////////////////////////////////////////////////\r
282 ib_api_status_t\r
283 mlnx_hobs_get_context(\r
284         IN                              mlnx_hob_t                                      *hob_p,\r
285                 OUT                     void                                            **context_p)\r
286 {\r
287         // Verify handle\r
288         CL_ASSERT((hob_p - mlnx_hob_array) < MLNX_NUM_HOBKL);\r
289 \r
290         if (hob_p->hh_hndl)\r
291         {\r
292                 if (context_p) *context_p = &hob_p->index;\r
293                 return IB_SUCCESS;\r
294         }\r
295         return IB_ERROR;\r
296 }\r
297 \r
298 /////////////////////////////////////////////////////////\r
299 /////////////////////////////////////////////////////////\r
300 void\r
301 mlnx_hobs_remove(\r
302         IN                              mlnx_hob_t                                      *hob_p)\r
303 {\r
304         cl_async_proc_t *p_async_proc;\r
305         mlnx_cache_t    *p_cache;\r
306 \r
307         // Verify handle\r
308         CL_ASSERT((hob_p - mlnx_hob_array) < MLNX_NUM_HOBKL);\r
309 \r
310         cl_spinlock_acquire( &hob_lock );\r
311 \r
312         hob_p->mark = E_MARK_INVALID;\r
313 \r
314         p_async_proc = hob_p->async_proc_mgr_p;\r
315         hob_p->async_proc_mgr_p = NULL;\r
316 \r
317         p_cache = hob_p->cache;\r
318         hob_p->cache = NULL;\r
319 \r
320         hob_p->hh_hndl = NULL;\r
321         hob_p->comp_cb_p = NULL;\r
322         hob_p->async_cb_p = NULL;\r
323         hob_p->ca_context = NULL;\r
324         hob_p->cl_device_h = NULL;\r
325 \r
326         cl_spinlock_release( &hob_lock );\r
327 \r
328         if( p_async_proc )\r
329         {\r
330                 cl_async_proc_destroy( p_async_proc );\r
331                 cl_free( p_async_proc );\r
332         }\r
333 \r
334         if( p_cache )\r
335                 cl_free( p_cache );\r
336 \r
337         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("CL: hobs_remove idx %d hh_hndl 0x%p\n", hob_p - mlnx_hob_array, hob_p->hh_hndl));\r
338 }\r
339 \r
340 /////////////////////////////////////////////////////////\r
341 /////////////////////////////////////////////////////////\r
342 ib_api_status_t\r
343 mlnx_hobs_lookup(\r
344         IN                              HH_hca_hndl_t                           hndl,\r
345                 OUT                     mlnx_hob_t                                      **hca_p)\r
346 {\r
347         u_int32_t idx;\r
348 \r
349         if (!hca_p)\r
350                 return IB_ERROR;\r
351 \r
352         cl_spinlock_acquire( &hob_lock );\r
353         for (idx = 0; idx < MLNX_NUM_HOBKL; idx++)\r
354         {\r
355                 if (hndl == mlnx_hob_array[idx].hh_hndl)\r
356                 {\r
357                         *hca_p = &mlnx_hob_array[idx];\r
358                         cl_spinlock_release( &hob_lock );\r
359                         return IB_SUCCESS;\r
360                 }\r
361         }\r
362         cl_spinlock_release( &hob_lock );\r
363         return IB_ERROR;\r
364 }\r
365 \r
366 /////////////////////////////////////////////////////////\r
367 /////////////////////////////////////////////////////////\r
368 void\r
369 mlnx_hobs_get_handle(\r
370         IN                              mlnx_hob_t                                      *hob_p,\r
371                 OUT                     HH_hca_hndl_t                           *hndl_p)\r
372 {\r
373         // Verify handle\r
374         CL_ASSERT((hob_p - mlnx_hob_array) < MLNX_NUM_HOBKL);\r
375 \r
376         if (hndl_p)\r
377                 *hndl_p = hob_p->hh_hndl;\r
378 }\r
379 \r
380 /////////////////////////////////////////////////////////\r
381 /////////////////////////////////////////////////////////\r
382 mlnx_hobul_t *\r
383 mlnx_hobs_get_hobul(\r
384         IN                              mlnx_hob_t                                      *hob_p)\r
385 {\r
386         // Verify handle\r
387         if ((hob_p - mlnx_hob_array) >= MLNX_NUM_HOBKL)\r
388                 return NULL;\r
389 \r
390         return mlnx_hobul_array[hob_p->index];\r
391 }\r
392 \r
393 \r
394 static int priv_ceil_log2(u_int32_t n)\r
395 {\r
396         int shift;\r
397 \r
398         for (shift = 31; shift >0; shift--)\r
399                 if (n & (1 << shift)) break;\r
400 \r
401         if (((unsigned)1 << shift) < n) shift++;\r
402 \r
403         return shift;\r
404 }\r
405 \r
406 /////////////////////////////////////////////////////////\r
407 // ### HOBUL\r
408 /////////////////////////////////////////////////////////\r
409 ib_api_status_t\r
410 mlnx_hobul_new(\r
411         IN                              mlnx_hob_t                                      *hob_p,\r
412         IN                              HH_hca_hndl_t                           hh_hndl,\r
413         IN                              void                                            *resources_p)\r
414 {\r
415         mlnx_hobul_t            *hobul_p;\r
416         HH_hca_dev_t            *hca_ul_info;\r
417         ib_api_status_t         status;\r
418         VAPI_hca_cap_t          hca_caps;\r
419         u_int32_t                       i;\r
420 #if MLNX_COMP_MODEL == 1\r
421         static uint32_t         proc_num = 0;\r
422 #endif\r
423 \r
424         // Verify handle\r
425         CL_ASSERT((hob_p - mlnx_hob_array) < MLNX_NUM_HOBKL);\r
426 \r
427         if (NULL == (hobul_p = cl_zalloc( sizeof(mlnx_hobul_t))))\r
428                 return IB_INSUFFICIENT_MEMORY;\r
429 \r
430         // The following will NULL all pointers/sizes (used in cleanup)\r
431 //      cl_memclr(hobul_p, sizeof (mlnx_hobul_t));\r
432 \r
433         hobul_p->hh_hndl = hh_hndl;\r
434 \r
435         if (HH_OK != THHUL_hob_create(resources_p, hh_hndl->dev_id, &hobul_p->hhul_hndl))\r
436         {\r
437                 status = IB_INSUFFICIENT_RESOURCES;\r
438                 goto cleanup;\r
439         }\r
440 \r
441         hca_ul_info = (HH_hca_dev_t *)hh_hndl;\r
442 \r
443         if (hca_ul_info)\r
444         {\r
445                 hobul_p->vendor_id = hca_ul_info->vendor_id;\r
446                 hobul_p->device_id = hca_ul_info->dev_id;\r
447                 hobul_p->hca_ul_resources_p = resources_p;\r
448                 hobul_p->cq_ul_resources_sz = hca_ul_info->cq_ul_resources_sz;\r
449                 hobul_p->qp_ul_resources_sz = hca_ul_info->qp_ul_resources_sz;\r
450                 hobul_p->pd_ul_resources_sz = hca_ul_info->pd_ul_resources_sz;\r
451         }\r
452 \r
453         if (HH_OK != THH_hob_query(hh_hndl, &hca_caps))\r
454         {\r
455                 status = IB_ERROR;\r
456                 goto cleanup;\r
457         }\r
458 \r
459         hobul_p->cq_idx_mask = MASK32(priv_ceil_log2(hca_caps.max_num_cq));\r
460         hobul_p->qp_idx_mask = MASK32(priv_ceil_log2(hca_caps.max_num_qp)); // Currently mask = 0xFFFF\r
461         hobul_p->max_pd = MASK32(priv_ceil_log2(hca_caps.max_pd_num)) + 1;\r
462         hobul_p->max_cq = hobul_p->cq_idx_mask + 1;\r
463         hobul_p->max_qp = hobul_p->qp_idx_mask + 1;\r
464 \r
465         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("CL: sizes cq 0%x qp 0%x pd 0%x\n", hca_caps.max_num_cq, hca_caps.max_num_qp, hca_caps.max_pd_num));\r
466 \r
467         /* create and initialize the data stucture for CQs */\r
468         hobul_p->cq_info_tbl = cl_zalloc(hobul_p->max_cq * sizeof (cq_info_t));\r
469 \r
470         /* create and initialize the data stucture for QPs */\r
471         hobul_p->qp_info_tbl = cl_zalloc(hobul_p->max_qp * sizeof (qp_info_t));\r
472 \r
473         /* create and initialize the data stucture for PDs */\r
474         hobul_p->pd_info_tbl = cl_zalloc(hobul_p->max_pd * sizeof (pd_info_t));\r
475 \r
476         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("CL: alloc failed?  cq=%d qp=%d pd=%d\n",\r
477                 !hobul_p->cq_info_tbl, !hobul_p->qp_info_tbl, !hobul_p->pd_info_tbl));\r
478 \r
479         if (!hobul_p->pd_info_tbl ||\r
480                 !hobul_p->qp_info_tbl ||\r
481                 !hobul_p->cq_info_tbl)\r
482         {\r
483                 status = IB_INSUFFICIENT_MEMORY;\r
484                 goto cleanup;\r
485         }\r
486 \r
487         /* Initialize all mutexes. */\r
488         for( i = 0; i < hobul_p->max_cq; i++ )\r
489         {\r
490                 cl_mutex_construct( &hobul_p->cq_info_tbl[i].mutex );\r
491 #if MLNX_COMP_MODEL\r
492                 KeInitializeDpc( &hobul_p->cq_info_tbl[i].dpc,\r
493                         mlnx_comp_dpc, &hobul_p->cq_info_tbl[i] );\r
494 #if MLNX_COMP_MODEL == 1\r
495                 KeSetTargetProcessorDpc( &hobul_p->cq_info_tbl[i].dpc,\r
496                         (CCHAR)(proc_num++ % cl_proc_count()) );\r
497 #endif  /* MLNX_COMP_MODEL == 1 */\r
498 #endif  /* MLNX_COMP_MODEL */\r
499         }\r
500 \r
501         for( i = 0; i < hobul_p->max_qp; i++ )\r
502                 cl_mutex_construct( &hobul_p->qp_info_tbl[i].mutex );\r
503 \r
504         for( i = 0; i < hobul_p->max_pd; i++ )\r
505                 cl_mutex_construct( &hobul_p->pd_info_tbl[i].mutex );\r
506 \r
507         for( i = 0; i < hobul_p->max_cq; i++ )\r
508         {\r
509                 if( cl_mutex_init( &hobul_p->cq_info_tbl[i].mutex ) != CL_SUCCESS )\r
510                 {\r
511                         status = IB_ERROR;\r
512                         goto cleanup;\r
513                 }\r
514         }\r
515 \r
516         for( i = 0; i < hobul_p->max_qp; i++ )\r
517         {\r
518                 if( cl_mutex_init( &hobul_p->qp_info_tbl[i].mutex ) != CL_SUCCESS )\r
519                 {\r
520                         status = IB_ERROR;\r
521                         goto cleanup;\r
522                 }\r
523         }\r
524 \r
525         for( i = 0; i < hobul_p->max_pd; i++ )\r
526         {\r
527                 if( cl_mutex_init( &hobul_p->pd_info_tbl[i].mutex ) != CL_SUCCESS )\r
528                 {\r
529                         status = IB_ERROR;\r
530                         goto cleanup;\r
531                 }\r
532         }\r
533 \r
534         hobul_p->log2_mpt_size = ((THH_hca_ul_resources_t *)resources_p)->log2_mpt_size;\r
535         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("log2_mpt_size = %d\n", hobul_p->log2_mpt_size));\r
536 \r
537         cl_spinlock_acquire(&hob_lock);\r
538         mlnx_hobul_array[hob_p->index] = hobul_p;\r
539         cl_spinlock_release(&hob_lock);\r
540 \r
541         return IB_SUCCESS;\r
542 \r
543 cleanup:\r
544         if (hobul_p->hhul_hndl) THHUL_hob_destroy( hobul_p->hhul_hndl );\r
545         if (hobul_p->pd_info_tbl)\r
546         {\r
547                 for( i = 0; i < hobul_p->max_pd; i++ )\r
548                         cl_mutex_destroy( &hobul_p->pd_info_tbl[i].mutex );\r
549                 cl_free(hobul_p->pd_info_tbl);\r
550         }\r
551         if (hobul_p->qp_info_tbl)\r
552         {\r
553                 for( i = 0; i < hobul_p->max_qp; i++ )\r
554                         cl_mutex_destroy( &hobul_p->qp_info_tbl[i].mutex );\r
555                 cl_free(hobul_p->qp_info_tbl);\r
556         }\r
557         if (hobul_p->cq_info_tbl)\r
558         {\r
559                 for( i = 0; i < hobul_p->max_cq; i++ )\r
560                         cl_mutex_destroy( &hobul_p->cq_info_tbl[i].mutex );\r
561                 cl_free(hobul_p->cq_info_tbl);\r
562         }\r
563         if (hobul_p) cl_free( hobul_p);\r
564         return status;\r
565 }\r
566 \r
567 /////////////////////////////////////////////////////////\r
568 /////////////////////////////////////////////////////////\r
569 void\r
570 mlnx_hobul_get(\r
571         IN                              mlnx_hob_t                                      *hob_p,\r
572                 OUT                     void                                            **resources_p )\r
573 {\r
574         mlnx_hobul_t            *hobul_p;\r
575 \r
576         // Verify handle\r
577         CL_ASSERT((hob_p - mlnx_hob_array) < MLNX_NUM_HOBKL);\r
578 \r
579         hobul_p = mlnx_hobul_array[hob_p->index];\r
580 \r
581         if (hobul_p && resources_p)\r
582         {\r
583                 *resources_p = hobul_p->hca_ul_resources_p;\r
584         }\r
585 }\r
586 \r
587 /////////////////////////////////////////////////////////\r
588 /////////////////////////////////////////////////////////\r
589 void\r
590 mlnx_hobul_delete(\r
591         IN                              mlnx_hob_t                                      *hob_p)\r
592 {\r
593         mlnx_hobul_t            *hobul_p;\r
594         u_int32_t                       i;\r
595 \r
596         // Verify handle\r
597         CL_ASSERT((hob_p - mlnx_hob_array) < MLNX_NUM_HOBKL);\r
598 \r
599         cl_spinlock_acquire(&hob_lock);\r
600         hobul_p = mlnx_hobul_array[hob_p->index];\r
601         mlnx_hobul_array[hob_p->index] = NULL;\r
602         cl_spinlock_release(&hob_lock);\r
603 \r
604         if (!hobul_p) return;\r
605 \r
606         if (hobul_p->hhul_hndl) THHUL_hob_destroy( hobul_p->hhul_hndl );\r
607         if (hobul_p->pd_info_tbl)\r
608         {\r
609                 for( i = 0; i < hobul_p->max_pd; i++ )\r
610                         cl_mutex_destroy( &hobul_p->pd_info_tbl[i].mutex );\r
611                 cl_free(hobul_p->pd_info_tbl);\r
612         }\r
613         if (hobul_p->qp_info_tbl)\r
614         {\r
615                 for( i = 0; i < hobul_p->max_qp; i++ )\r
616                         cl_mutex_destroy( &hobul_p->qp_info_tbl[i].mutex );\r
617                 cl_free(hobul_p->qp_info_tbl);\r
618         }\r
619         if (hobul_p->cq_info_tbl)\r
620         {\r
621                 for( i = 0; i < hobul_p->max_cq; i++ )\r
622                 {\r
623                         KeRemoveQueueDpc( &hobul_p->cq_info_tbl[i].dpc );\r
624                         cl_mutex_destroy( &hobul_p->cq_info_tbl[i].mutex );\r
625                 }\r
626                 cl_free(hobul_p->cq_info_tbl);\r
627         }\r
628         if (hobul_p) cl_free( hobul_p);\r
629 }\r
630 \r
631 /////////////////////////////////////////////////////////\r
632 // ### Callbacks\r
633 /////////////////////////////////////////////////////////\r
634 \r
635 ib_async_event_t\r
636 mlnx_map_vapi_event_type(\r
637         IN                              unsigned                                        event_id,\r
638                 OUT                     ENUM_EVENT_CLASS                        *event_class_p)\r
639 {\r
640         switch (event_id)\r
641         {\r
642         case VAPI_QP_PATH_MIGRATED:\r
643                 if (event_class_p) *event_class_p = E_EV_QP;\r
644                 return IB_AE_QP_APM;\r
645 \r
646         case VAPI_QP_COMM_ESTABLISHED:\r
647                 if (event_class_p) *event_class_p = E_EV_QP;\r
648                 return IB_AE_QP_COMM;\r
649 \r
650         case VAPI_SEND_QUEUE_DRAINED:\r
651                 if (event_class_p) *event_class_p = E_EV_QP;\r
652                 return IB_AE_SQ_DRAINED;\r
653 \r
654         case VAPI_CQ_ERROR:\r
655                 if (event_class_p) *event_class_p = E_EV_CQ;\r
656                 return IB_AE_CQ_ERROR;\r
657 \r
658         case VAPI_LOCAL_WQ_INV_REQUEST_ERROR:\r
659                 if (event_class_p) *event_class_p = E_EV_QP;\r
660                 return IB_AE_WQ_REQ_ERROR;\r
661 \r
662         case VAPI_LOCAL_WQ_ACCESS_VIOL_ERROR:\r
663                 if (event_class_p) *event_class_p = E_EV_QP;\r
664                 return IB_AE_WQ_ACCESS_ERROR;\r
665 \r
666         case VAPI_LOCAL_WQ_CATASTROPHIC_ERROR:\r
667                 if (event_class_p) *event_class_p = E_EV_QP;\r
668                 return IB_AE_QP_FATAL;\r
669 \r
670         case VAPI_PATH_MIG_REQ_ERROR:\r
671                 if (event_class_p) *event_class_p = E_EV_QP;\r
672                 return IB_AE_QP_APM_ERROR;\r
673 \r
674         case VAPI_LOCAL_CATASTROPHIC_ERROR:\r
675                 if (event_class_p) *event_class_p = E_EV_CA;\r
676                 return IB_AE_LOCAL_FATAL;\r
677 \r
678         case VAPI_PORT_ERROR:\r
679                 /*\r
680                  * In tavor_hca\src\Hca\hcahal\tavor\eventp\event_irqh.c:\r
681                  * TAVOR_IF_EV_TYPE_PORT_ERR maps one of two port events:\r
682                  *      - TAVOR_IF_SUB_EV_PORT_DOWN\r
683                  *      - TAVOR_IF_SUB_EV_PORT_UP\r
684                  * \r
685                  * These map to (respectively)\r
686                  *      - VAPI_PORT_ERROR\r
687                  *      - VAPI_PORT_ACTIVE\r
688                  */\r
689                 if (event_class_p) *event_class_p = E_EV_CA;\r
690                 return IB_AE_PORT_DOWN; /* INIT, ARMED, DOWN */\r
691 \r
692         case VAPI_PORT_ACTIVE:\r
693                 if (event_class_p) *event_class_p = E_EV_CA;\r
694                 return IB_AE_PORT_ACTIVE; /* ACTIVE STATE */\r
695 \r
696         default:\r
697                 CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("FAIL to map %d (last known %d) returning %d\n",\r
698                         event_id, VAPI_PORT_ACTIVE, IB_AE_LOCAL_FATAL));\r
699                 if (event_class_p) *event_class_p = E_EV_CA;\r
700                 return IB_AE_LOCAL_FATAL;\r
701         }\r
702 }\r
703 \r
704 void\r
705 mlnx_conv_vapi_event(\r
706         IN                              HH_event_record_t                       *hh_event_p,\r
707         IN                              ib_event_rec_t                          *ib_event_p,\r
708                 OUT                     ENUM_EVENT_CLASS                        *event_class_p)\r
709 {\r
710 \r
711         // ib_event_p->context is handled by the caller\r
712         //\r
713         ib_event_p->type = mlnx_map_vapi_event_type(hh_event_p->etype, event_class_p);\r
714 \r
715         // no traps currently generated\r
716         // ib_event_p->trap_info.lid  =  ;\r
717         // ib_event_p->trap_info.port_guid = ;\r
718         // ib_event_p->trap_info.port_num  = hh_er;\r
719 }\r
720 \r
721 void\r
722 mlnx_async_cb(\r
723         IN                              HH_hca_hndl_t                           hh_hndl,\r
724         IN                              HH_event_record_t                       *hh_er_p,\r
725         IN                              void                                            *private_data)\r
726 {\r
727         u_int32_t                       obj_idx;\r
728         mlnx_hob_t                      *hob_p;\r
729 \r
730         mlnx_cb_data_t          cb_data;\r
731         mlnx_cb_data_t          *cb_data_p;\r
732 \r
733         CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("ASYNC CB %p (0x%x)\n",\r
734                 private_data, (private_data) ? *(u_int32_t *)private_data : 0xB5));\r
735 \r
736         if (!private_data || !hh_er_p) return;\r
737 \r
738         obj_idx =  *(u_int32_t *)private_data;\r
739         if (obj_idx >= MLNX_NUM_HOBKL) return;\r
740 \r
741         hob_p = mlnx_hob_array + obj_idx;\r
742 \r
743         // g_mlnx_dpc2thread will be initialized as a module paramter (default - disabled(0))\r
744         if (g_mlnx_dpc2thread)\r
745         {\r
746                 cb_data_p = cl_malloc(sizeof(mlnx_cb_data_t));\r
747                 if (!cb_data_p) return;\r
748 \r
749                 cb_data_p->hh_hndl      = hh_hndl;\r
750                 cb_data_p->private_data = private_data;\r
751                 cl_memcpy(&cb_data_p->hh_er, hh_er_p, sizeof(HH_event_record_t));\r
752                 cb_data_p->async_item.pfn_callback = mlnx_async_dpc;\r
753                 cl_async_proc_queue(hob_p->async_proc_mgr_p, &cb_data_p->async_item );\r
754         } else\r
755         {\r
756                 cb_data_p = &cb_data;\r
757 \r
758                 cb_data_p->hh_hndl      = hh_hndl;\r
759                 cb_data_p->private_data = private_data;\r
760                 cl_memcpy(&cb_data_p->hh_er, hh_er_p, sizeof(HH_event_record_t));\r
761                 mlnx_async_dpc( &cb_data_p->async_item );\r
762         }\r
763 }\r
764 \r
765 static void\r
766 mlnx_async_dpc(\r
767         IN                              cl_async_proc_item_t            *async_item_p )\r
768 {\r
769         HH_event_record_t       *hh_er_p;\r
770         u_int32_t                       obj_idx;\r
771         mlnx_hob_t                      *hob_p;\r
772         mlnx_hobul_t            *hobul_p;\r
773         mlnx_cb_data_t          *cb_data_p;\r
774 \r
775         ENUM_EVENT_CLASS        event_class;\r
776         ib_event_rec_t          event_r;\r
777 \r
778         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("ASYNC DPC %p\n", async_item_p));\r
779 \r
780         cb_data_p = PARENT_STRUCT( async_item_p, mlnx_cb_data_t, async_item );\r
781 \r
782         if (!cb_data_p) return;\r
783 \r
784         hh_er_p =  &cb_data_p->hh_er;\r
785         obj_idx =  *(u_int32_t *)cb_data_p->private_data;\r
786         hob_p = mlnx_hob_array + obj_idx;\r
787         hobul_p = mlnx_hobul_array[obj_idx];\r
788 \r
789         CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("ASYNC DPC type %d ca_context %p\n",\r
790                 hh_er_p->etype, hob_p->ca_context));\r
791 \r
792         if (!hob_p ||\r
793                 !hobul_p ||\r
794                 !hob_p->hh_hndl ||\r
795                 !hob_p->async_cb_p)\r
796         {\r
797                 goto cleanup;\r
798         }\r
799 \r
800         cl_memclr(&event_r, sizeof(event_r));\r
801         mlnx_conv_vapi_event(hh_er_p, &event_r, &event_class);\r
802 \r
803         switch(event_class)\r
804         {\r
805         case E_EV_CA:\r
806                 event_r.context = (void *)hob_p->ca_context;\r
807                 break;\r
808 \r
809         case E_EV_QP:\r
810                 {\r
811                         obj_idx = hh_er_p->event_modifier.qpn;\r
812                         if (obj_idx < hobul_p->max_qp)\r
813                                 event_r.context = (void *)hobul_p->qp_info_tbl[obj_idx].qp_context;\r
814                         else\r
815                         {\r
816                                 CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("ASYNC DPC bad qpn 0x%x max 0x%x\n", obj_idx, hobul_p->max_qp));\r
817                                 goto cleanup;\r
818                         }\r
819                 }\r
820                 break;\r
821 \r
822         case E_EV_CQ:\r
823                 {\r
824                         obj_idx = hh_er_p->event_modifier.cq;\r
825                         if (obj_idx < hobul_p->max_cq)\r
826                                 event_r.context = (void *)hobul_p->cq_info_tbl[obj_idx].cq_context;\r
827                         else\r
828                         {\r
829                                 CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("ASYNC DPC bad cqn 0x%x max 0x%x\n", obj_idx, hobul_p->max_cq));\r
830                                 goto cleanup;\r
831                         }\r
832                 }\r
833                 break;\r
834 \r
835         case E_EV_LAST:\r
836         default:\r
837                 // CL_ASSERT(0); // This shouldn't happen\r
838                 CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("ASYNC DPC unknown event_class 0x%x\n", event_class));\r
839                 break;\r
840         }\r
841 \r
842         // Call the registered CB\r
843         (*hob_p->async_cb_p)(&event_r);\r
844         // Fall Through\r
845 cleanup:\r
846         if (g_mlnx_dpc2thread)\r
847         {\r
848                 cl_free(cb_data_p);\r
849         }\r
850 }\r
851 \r
852 /////////////////////////////////////////////////////////\r
853 /////////////////////////////////////////////////////////\r
854 void\r
855 mlnx_comp_cb(\r
856         IN                              HH_hca_hndl_t                           hh_hndl,\r
857         IN                              HH_cq_hndl_t                            hh_cq,\r
858         IN                              void                                            *private_data)\r
859 {\r
860 #if MLNX_COMP_MODEL\r
861         u_int32_t                       cq_num;\r
862         u_int32_t                       hca_idx;\r
863         mlnx_hob_t                      *hob_p;\r
864         mlnx_hobul_t            *hobul_p;\r
865 #if MLNX_COMP_MODEL == 2\r
866         static uint32_t         proc_num = 0;\r
867 #endif\r
868 \r
869         CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("COMP CB cq 0x%x %p\n", hh_cq, private_data));\r
870 \r
871         UNUSED_PARAM( hh_hndl );\r
872 \r
873         hca_idx = *(u_int32_t *)private_data;\r
874         hob_p   = mlnx_hob_array + hca_idx;\r
875         hobul_p = mlnx_hobul_array[hca_idx];\r
876         cq_num  = hh_cq & hobul_p->cq_idx_mask;\r
877 \r
878         if (NULL != hob_p && NULL != hobul_p &&\r
879                 hob_p->hh_hndl && hob_p->comp_cb_p)\r
880         {\r
881                 if (cq_num < hobul_p->max_cq)\r
882                 {\r
883 #if MLNX_COMP_MODEL == 2\r
884                         KeSetTargetProcessorDpc( &hobul_p->cq_info_tbl[cq_num].dpc,\r
885                                 (CCHAR)(proc_num++ % cl_proc_count()) );\r
886 #endif  /* MLNX_COMP_MODEL == 2 */\r
887                         KeInsertQueueDpc( &hobul_p->cq_info_tbl[cq_num].dpc,\r
888                                 hob_p, NULL );\r
889                 }\r
890                 else\r
891                 {\r
892                         HCA_TRACE( HCA_DBG_ERROR, ("CQ index out of range!!!\n") );\r
893                 }\r
894         }\r
895 #else   /* MLNX_COMP_MODEL */\r
896         u_int32_t                       obj_idx;\r
897         mlnx_hob_t                      *hob_p;\r
898 \r
899         mlnx_cb_data_t          cb_data;\r
900         mlnx_cb_data_t          *cb_data_p;\r
901 \r
902         CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("COMP CB cq 0x%x %p\n", hh_cq, private_data));\r
903 \r
904         if (!private_data) return;\r
905 \r
906         obj_idx =  *(u_int32_t *)private_data;\r
907         hob_p = mlnx_hob_array + obj_idx;\r
908         if (!hob_p) return;\r
909 \r
910         if (g_mlnx_dpc2thread)\r
911         {\r
912                 cb_data_p = cl_malloc(sizeof(mlnx_cb_data_t));\r
913                 if (!cb_data_p) return;\r
914 \r
915                 cb_data_p->hh_hndl      = hh_hndl;\r
916                 cb_data_p->hh_cq        = hh_cq;\r
917                 cb_data_p->private_data = private_data;\r
918 \r
919                 cb_data_p->async_item.pfn_callback = mlnx_comp_dpc;\r
920 \r
921                 // Report completion through async_proc\r
922                 cl_async_proc_queue(hob_p->async_proc_mgr_p, &cb_data_p->async_item );\r
923 \r
924         } else\r
925         {\r
926                 cb_data_p = &cb_data;\r
927 \r
928                 cb_data_p->hh_hndl      = hh_hndl;\r
929                 cb_data_p->hh_cq        = hh_cq;\r
930                 cb_data_p->private_data = private_data;\r
931 \r
932                 // Report completion directly from DPC (verbs should NOT sleep)\r
933                 mlnx_comp_dpc( &cb_data_p->async_item );\r
934         }\r
935 #endif  /* MLNX_COMP_MODEL */\r
936 }\r
937 \r
938 #if MLNX_COMP_MODEL\r
939 static void\r
940 mlnx_comp_dpc(\r
941         IN                              PRKDPC                                          p_dpc,\r
942         IN                              void                                            *context,\r
943         IN                              void                                            *arg1,\r
944         IN                              void                                            *unused )\r
945 {\r
946         mlnx_hob_t              *hob_p = (mlnx_hob_t*)arg1;\r
947         UNUSED_PARAM( p_dpc );\r
948         UNUSED_PARAM( unused );\r
949 \r
950         hob_p->comp_cb_p( (void*)((cq_info_t*)context)->cq_context );\r
951 }\r
952 #else   /* MLNX_COMP_MODEL */\r
953 static void\r
954 mlnx_comp_dpc(\r
955         IN                              cl_async_proc_item_t            *async_item_p )\r
956 {\r
957         u_int32_t                       cq_num;\r
958         u_int32_t                       hca_idx;\r
959         mlnx_hob_t                      *hob_p;\r
960         mlnx_hobul_t            *hobul_p;\r
961         mlnx_cb_data_t          *cb_data_p;\r
962 \r
963         CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("COMP DPC %p\n", async_item_p));\r
964 \r
965         cb_data_p = PARENT_STRUCT( async_item_p, mlnx_cb_data_t, async_item );\r
966         if (!cb_data_p) return;\r
967 \r
968         hca_idx = *(u_int32_t *)cb_data_p->private_data;\r
969         hob_p   = mlnx_hob_array + hca_idx;\r
970         hobul_p = mlnx_hobul_array[hca_idx];\r
971         cq_num  = (u_int32_t)cb_data_p->hh_cq & hobul_p->cq_idx_mask;\r
972 \r
973         if (NULL != hob_p && NULL != hobul_p &&\r
974                 hob_p->hh_hndl && hob_p->comp_cb_p)\r
975         {\r
976                 if (cq_num < hobul_p->max_cq)\r
977                 {\r
978                         (*hob_p->comp_cb_p)((void *)hobul_p->cq_info_tbl[cq_num].cq_context);\r
979                 }\r
980         }\r
981 \r
982         if (g_mlnx_dpc2thread)\r
983         {\r
984                 cl_free(cb_data_p);\r
985         }\r
986 }\r
987 #endif  /* MLNX_COMP_MODEL */\r
988 \r
989 // ### Conversions\r
990 \r
991 /////////////////////////////////////////////////////////\r
992 /////////////////////////////////////////////////////////\r
993 VAPI_mrw_acl_t\r
994 map_ibal_acl(\r
995         IN                              ib_access_t                                     ibal_acl)\r
996 {\r
997         VAPI_mrw_acl_t          vapi_acl = 0;\r
998 \r
999         if (ibal_acl & IB_AC_RDMA_READ)   vapi_acl |= VAPI_EN_REMOTE_READ;\r
1000         if (ibal_acl & IB_AC_RDMA_WRITE)  vapi_acl |= VAPI_EN_REMOTE_WRITE;\r
1001         if (ibal_acl & IB_AC_ATOMIC)      vapi_acl |= VAPI_EN_REMOTE_ATOM;\r
1002         if (ibal_acl & IB_AC_LOCAL_WRITE) vapi_acl |= VAPI_EN_LOCAL_WRITE;\r
1003         if (ibal_acl & IB_AC_MW_BIND)     vapi_acl |= VAPI_EN_MEMREG_BIND;\r
1004 \r
1005         return vapi_acl;\r
1006 }\r
1007 \r
1008 /////////////////////////////////////////////////////////\r
1009 /////////////////////////////////////////////////////////\r
1010 ib_access_t\r
1011 map_vapi_acl(\r
1012         IN                              VAPI_mrw_acl_t                          vapi_acl)\r
1013 {\r
1014         ib_access_t ibal_acl = 0;\r
1015 \r
1016         if (vapi_acl & VAPI_EN_REMOTE_READ)  ibal_acl |= IB_AC_RDMA_READ;\r
1017         if (vapi_acl & VAPI_EN_REMOTE_WRITE) ibal_acl |= IB_AC_RDMA_WRITE;\r
1018         if (vapi_acl & VAPI_EN_REMOTE_ATOM)  ibal_acl |= IB_AC_ATOMIC;\r
1019         if (vapi_acl & VAPI_EN_LOCAL_WRITE)  ibal_acl |= IB_AC_LOCAL_WRITE;\r
1020         if (vapi_acl & VAPI_EN_MEMREG_BIND)  ibal_acl |= IB_AC_MW_BIND;\r
1021 \r
1022         return ibal_acl;\r
1023 }\r
1024 \r
1025 /////////////////////////////////////////////////////////\r
1026 /////////////////////////////////////////////////////////\r
1027 static VAPI_rdma_atom_acl_t \r
1028 map_ibal_qp_acl(\r
1029         IN                              ib_access_t                                     ibal_acl)\r
1030 {\r
1031         VAPI_rdma_atom_acl_t vapi_qp_acl = 0;\r
1032 \r
1033         if (ibal_acl & IB_AC_RDMA_WRITE) vapi_qp_acl |= VAPI_EN_REM_WRITE;\r
1034         if (ibal_acl & IB_AC_RDMA_READ)  vapi_qp_acl |= VAPI_EN_REM_READ;\r
1035         if (ibal_acl & IB_AC_ATOMIC)     vapi_qp_acl |= VAPI_EN_REM_ATOMIC_OP;\r
1036 \r
1037         return vapi_qp_acl;\r
1038 \r
1039 }\r
1040 \r
1041 /////////////////////////////////////////////////////////\r
1042 /////////////////////////////////////////////////////////\r
1043 static ib_access_t\r
1044 map_vapi_qp_acl(\r
1045         IN                              VAPI_rdma_atom_acl_t            vapi_qp_acl)\r
1046 {\r
1047         ib_access_t     ibal_acl = IB_AC_LOCAL_WRITE;\r
1048 \r
1049         if (vapi_qp_acl & VAPI_EN_REM_WRITE)     ibal_acl |= IB_AC_RDMA_WRITE;\r
1050         if (vapi_qp_acl & VAPI_EN_REM_READ)      ibal_acl |= IB_AC_RDMA_READ;\r
1051         if (vapi_qp_acl & VAPI_EN_REM_ATOMIC_OP) ibal_acl |= IB_AC_ATOMIC;\r
1052 \r
1053         return ibal_acl;\r
1054 }\r
1055 \r
1056 \r
1057 /////////////////////////////////////////////////////////\r
1058 /////////////////////////////////////////////////////////\r
1059 ib_api_status_t\r
1060 mlnx_lock_region(\r
1061         IN                              mlnx_mro_t                                      *mro_p,\r
1062         IN                              boolean_t                                       um_call )\r
1063 {\r
1064         MOSAL_iobuf_t   old_iobuf;\r
1065 \r
1066         // Find context\r
1067         if( um_call )\r
1068                 mro_p->mr_prot_ctx = MOSAL_get_current_prot_ctx();\r
1069         else\r
1070                 mro_p->mr_prot_ctx = MOSAL_get_kernel_prot_ctx();\r
1071 \r
1072         // Save pointer to existing locked region.\r
1073         old_iobuf = mro_p->mr_iobuf;\r
1074 \r
1075         // Lock Region\r
1076         if (MT_OK != MOSAL_iobuf_register((MT_virt_addr_t)mro_p->mr_start,\r
1077                 (MT_size_t)mro_p->mr_size,\r
1078                 mro_p->mr_prot_ctx,\r
1079                 mro_p->mr_mosal_perm,\r
1080                 &mro_p->mr_iobuf,\r
1081                 0 ))\r
1082         {\r
1083                 return IB_ERROR;\r
1084         }\r
1085 \r
1086         if( old_iobuf )\r
1087         {\r
1088                 if( MT_OK != MOSAL_iobuf_deregister( old_iobuf ) )\r
1089                         return IB_ERROR;\r
1090         }\r
1091 \r
1092         return IB_SUCCESS;\r
1093 }\r
1094 \r
1095 \r
1096 /////////////////////////////////////////////////////////\r
1097 /////////////////////////////////////////////////////////\r
1098 ib_api_status_t\r
1099 mlnx_conv_ibal_mr_create(\r
1100         IN                              u_int32_t                                       pd_idx,\r
1101         IN      OUT                     mlnx_mro_t                                      *mro_p,\r
1102         IN                              VAPI_mr_change_t                        change_flags,\r
1103         IN                              ib_mr_create_t const            *p_mr_create,\r
1104         IN                              boolean_t                                       um_call,\r
1105                 OUT                     HH_mr_t                                         *mr_props_p )\r
1106 {\r
1107         ib_api_status_t         status;\r
1108 \r
1109         /* Set ACL information first since it is used to lock the region. */\r
1110         if( change_flags & VAPI_MR_CHANGE_ACL )\r
1111         {\r
1112                 mro_p->mr_acl = map_ibal_acl( p_mr_create->access_ctrl );\r
1113                 // This computation should be externalized by THH\r
1114                 mro_p->mr_mosal_perm =\r
1115                         MOSAL_PERM_READ |\r
1116                         ((mro_p->mr_acl & VAPI_EN_LOCAL_WRITE) ? MOSAL_PERM_WRITE : 0);\r
1117         }\r
1118 \r
1119         if( change_flags & VAPI_MR_CHANGE_TRANS )\r
1120         {\r
1121                 CL_TRACE(MLNX_DBG_MEM, g_mlnx_dbg_lvl, ("addr 0x%p size %"PRId64"\n", (void *)p_mr_create->vaddr, p_mr_create->length));\r
1122                 // Build TPT entries\r
1123                 mro_p->mr_start = (IB_virt_addr_t)p_mr_create->vaddr;\r
1124                 mro_p->mr_size = p_mr_create->length;\r
1125                 if (IB_SUCCESS != (status = mlnx_lock_region(mro_p, um_call)))\r
1126                 {\r
1127                         return status;\r
1128                 }\r
1129         }\r
1130 \r
1131         /* Now fill in the MR properties. */\r
1132         mr_props_p->start = mro_p->mr_start;\r
1133         mr_props_p->size = mro_p->mr_size;\r
1134         mr_props_p->acl = mro_p->mr_acl;\r
1135         mr_props_p->pd = pd_idx;\r
1136 \r
1137         // Setup MTT info\r
1138         mr_props_p->tpt.tpt_type = HH_TPT_IOBUF;\r
1139         mr_props_p->tpt.tpt.iobuf = mro_p->mr_iobuf;\r
1140 \r
1141         return IB_SUCCESS;\r
1142 }\r
1143 \r
1144 /////////////////////////////////////////////////////////\r
1145 // On entry mro_p->mr_start holds the pmr address\r
1146 /////////////////////////////////////////////////////////\r
1147 ib_api_status_t\r
1148 mlnx_conv_ibal_pmr_create(\r
1149         IN                              u_int32_t                                       pd_idx,\r
1150         IN                              mlnx_mro_t                                      *mro_p,\r
1151         IN                              ib_phys_create_t const          *p_pmr_create,\r
1152                 OUT                     HH_mr_t                                         *mr_props_p )\r
1153 {\r
1154         VAPI_phy_addr_t*        buf_lst = NULL;\r
1155         VAPI_size_t*            sz_lst = NULL;\r
1156         u_int32_t                       i;\r
1157         u_int32_t                       page_shift = priv_ceil_log2(p_pmr_create->hca_page_size);\r
1158         u_int64_t                       page_mask = (1 << page_shift) - 1;\r
1159         u_int64_t                       tot_sz = 0;\r
1160 \r
1161         CL_TRACE(MLNX_DBG_MEM, g_mlnx_dbg_lvl, \r
1162                 ("PRE: addr %p size 0x%"PRIx64" shift %d\n",\r
1163                 (void *)(uintn_t)mro_p->mr_start, p_pmr_create->length, page_mask));\r
1164         mro_p->mr_start = (mro_p->mr_start & ~page_mask) | (p_pmr_create->buf_offset & page_mask);\r
1165         CL_TRACE(MLNX_DBG_MEM, g_mlnx_dbg_lvl, \r
1166                 ("POST: addr %p\n", (void *)(uintn_t)mro_p->mr_start));\r
1167 \r
1168         mr_props_p->start = mro_p->mr_start;\r
1169         mr_props_p->size = p_pmr_create->length;\r
1170         mr_props_p->acl = map_ibal_acl(p_pmr_create->access_ctrl);\r
1171         mr_props_p->pd = pd_idx;\r
1172 \r
1173 #ifdef _DEBUG_\r
1174         mro_p->mr_size           = mr_props_p->size;\r
1175 //      mro_p->mr_first_page_addr = 0;\r
1176 //      mro_p->mr_num_pages       = (mro_p->mr_end >> PAGESHIFT) + 1 - (mro_p->mr_start >> PAGESHIFT);\r
1177 //      CL_TRACE(MLNX_DBG_MEM, g_mlnx_dbg_lvl, ("1st pg addr 0x%p pages %d\n",\r
1178 //              (void *)mro_p->mr_first_page_addr, p_pmr_create->num_bufs));\r
1179         CL_TRACE(MLNX_DBG_MEM, g_mlnx_dbg_lvl, ("1st phys addr 0x%"PRIx64" phys pages %d\n",\r
1180                 p_pmr_create->range_array[0].base_addr, p_pmr_create->num_ranges));\r
1181 #endif\r
1182 \r
1183         // Build TPT entries\r
1184         if (!p_pmr_create->range_array)\r
1185         {\r
1186                 return IB_INVALID_PARAMETER;\r
1187         }\r
1188 \r
1189         if (p_pmr_create->hca_page_size !=\r
1190                 MT_DOWN_ALIGNX_PHYS(p_pmr_create->hca_page_size, page_shift))\r
1191         {\r
1192                 CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("phys buf size is not page aligned\n"));\r
1193                 return IB_INVALID_PARAMETER;\r
1194         }\r
1195 \r
1196         for (i = 0; i < p_pmr_create->num_ranges; i++)\r
1197         {\r
1198                 uint64_t        start_addr = p_pmr_create->range_array[i].base_addr;\r
1199                 uint64_t        end_addr = start_addr + p_pmr_create->range_array[i].size;\r
1200 \r
1201                 if( end_addr < start_addr ) {\r
1202                         CL_TRACE( CL_DBG_ERROR, g_mlnx_dbg_lvl, ("phys buf end < start\n") );\r
1203                         return IB_INVALID_PARAMETER;\r
1204                 }\r
1205 \r
1206                 if (start_addr !=\r
1207                         MT_DOWN_ALIGNX_PHYS(start_addr, page_shift))\r
1208                 {\r
1209                         CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("phys buf start adrs is not page aligned\n"));\r
1210                         return IB_INVALID_PARAMETER;\r
1211                 }\r
1212 \r
1213                 tot_sz += p_pmr_create->range_array[i].size;\r
1214         }\r
1215 \r
1216         if( tot_sz < p_pmr_create->length + p_pmr_create->buf_offset )\r
1217         {\r
1218                 HCA_TRACE_EXIT( HCA_DBG_ERROR, \r
1219                         ("length(0x"PRIx64") + buf offset(0x"PRIx64") larger than sum "\r
1220                         "of phys ranges(0x"PRIx64")\n",\r
1221                         p_pmr_create->length, p_pmr_create->buf_offset, tot_sz) );\r
1222                 return IB_INVALID_PARAMETER;\r
1223         }\r
1224 \r
1225         if( p_pmr_create->buf_offset > p_pmr_create->range_array[0].size )\r
1226         {\r
1227                 HCA_TRACE_EXIT( HCA_DBG_ERROR,\r
1228                         ("buf offset(0x%x) > than 1st phy range size(0x"PRIx64")\n",\r
1229                         p_pmr_create->buf_offset, p_pmr_create->range_array[0].size) );\r
1230                 return IB_INVALID_PARAMETER;\r
1231         }\r
1232 \r
1233         /* Memory registration must be done at PASSIVE_LEVEL, so paged memory here is fine. */\r
1234         buf_lst = (VAPI_phy_addr_t*)cl_pzalloc( sizeof(VAPI_phy_addr_t)*(p_pmr_create->num_ranges));\r
1235         if (!buf_lst)\r
1236         {\r
1237                 HCA_TRACE_EXIT( HCA_DBG_ERROR,\r
1238                         ("Failed to allocate range address list.\n") );\r
1239                 return IB_INSUFFICIENT_MEMORY;\r
1240         }\r
1241 \r
1242 \r
1243         /* Memory registration must be done at PASSIVE_LEVEL, so paged memory here is fine. */\r
1244         sz_lst = (VAPI_size_t*)cl_pzalloc( sizeof(VAPI_size_t)*(p_pmr_create->num_ranges));\r
1245         if (!sz_lst)\r
1246         {\r
1247                 cl_free( buf_lst );\r
1248                 HCA_TRACE_EXIT( HCA_DBG_ERROR,\r
1249                         ("Failed to allocate range size list.\n") );\r
1250                 return IB_INSUFFICIENT_MEMORY;\r
1251         }\r
1252 \r
1253         for (i = 0; i < p_pmr_create->num_ranges; i++)\r
1254         {\r
1255                 buf_lst[i] = p_pmr_create->range_array[i].base_addr;\r
1256                 sz_lst[i] = p_pmr_create->range_array[i].size;\r
1257         }\r
1258 \r
1259         mr_props_p->tpt.tpt_type = HH_TPT_BUF;\r
1260         mr_props_p->tpt.num_entries = p_pmr_create->num_ranges;\r
1261         mr_props_p->tpt.tpt.buf_lst.buf_sz_lst = sz_lst;\r
1262         mr_props_p->tpt.tpt.buf_lst.phys_buf_lst = buf_lst; \r
1263         mr_props_p->tpt.tpt.buf_lst.iova_offset = p_pmr_create->buf_offset;\r
1264 \r
1265         return IB_SUCCESS;\r
1266 }\r
1267 \r
1268 \r
1269 u_int8_t\r
1270 mlnx_gid_to_index(\r
1271         IN                              HH_hca_hndl_t                           hh_hndl,\r
1272         IN                              u_int8_t                                        port_num,\r
1273         IN                              u_int8_t                                        *raw_gid)\r
1274 {\r
1275         ib_gid_t        *gid_table_p = NULL;\r
1276         u_int8_t        index = 0; // default return value\r
1277         u_int8_t        i;\r
1278 \r
1279         gid_table_p = cl_zalloc( 64*sizeof(ib_gid_t));\r
1280 \r
1281         mlnx_get_hca_gid_tbl(hh_hndl, port_num, 64, gid_table_p);\r
1282 \r
1283         for (i = 0; i < 64; i++)\r
1284         {\r
1285                 if (!cl_memcmp(raw_gid, gid_table_p[i].raw, sizeof(ib_gid_t)))\r
1286                 {\r
1287                         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("1: found GID at index %d\n", i));\r
1288                         index = i;\r
1289                         break;\r
1290                 }\r
1291         }\r
1292 \r
1293         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("2: found GID at index %d\n", index));\r
1294 \r
1295         cl_free( gid_table_p);\r
1296         return index;\r
1297 }\r
1298 \r
1299 /////////////////////////////////////////////////////////\r
1300 /////////////////////////////////////////////////////////\r
1301 void\r
1302 mlnx_conv_ibal_av(\r
1303         IN                              HH_hca_hndl_t                           hh_hndl,\r
1304         IN              const   ib_av_attr_t                            *ibal_av_p,\r
1305                 OUT                     VAPI_ud_av_t                            *vapi_av_p)\r
1306 {\r
1307         vapi_av_p->port = ibal_av_p->port_num;\r
1308         vapi_av_p->sl   = ibal_av_p->sl;\r
1309         vapi_av_p->dlid = cl_ntoh16 (ibal_av_p->dlid);\r
1310 \r
1311         vapi_av_p->static_rate   =\r
1312                 (ibal_av_p->static_rate == IB_PATH_RECORD_RATE_10_GBS? 0 : 3);\r
1313         ib_grh_get_ver_class_flow( ibal_av_p->grh.ver_class_flow, NULL,\r
1314                 &vapi_av_p->traffic_class, &vapi_av_p->flow_label );\r
1315         vapi_av_p->src_path_bits = ibal_av_p->path_bits; // PATH:\r
1316         //vapi_av_p->src_path_bits = 0;\r
1317 \r
1318         /* For global destination or Multicast address:*/\r
1319         if (ibal_av_p->grh_valid)\r
1320         {\r
1321                 vapi_av_p->grh_flag = TRUE;\r
1322                 vapi_av_p->hop_limit     = ibal_av_p->grh.hop_limit;\r
1323                 // CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("raw %p, &raw %p\n", ibal_av_p->grh.src_gid.raw, &ibal_av_p->grh.src_gid.raw));\r
1324                 vapi_av_p->sgid_index    = mlnx_gid_to_index(hh_hndl, ibal_av_p->port_num, (u_int8_t *)ibal_av_p->grh.src_gid.raw);\r
1325                 cl_memcpy(vapi_av_p->dgid, ibal_av_p->grh.dest_gid.raw, sizeof(vapi_av_p->dgid));\r
1326         }\r
1327 }\r
1328 \r
1329 /////////////////////////////////////////////////////////\r
1330 /////////////////////////////////////////////////////////\r
1331 void\r
1332 mlnx_conv_vapi_av(\r
1333         IN                              HH_hca_hndl_t                           hh_hndl,\r
1334         IN              const   VAPI_ud_av_t                            *vapi_av_p,\r
1335                 OUT                     ib_av_attr_t                            *ibal_av_p)\r
1336 {\r
1337         uint8_t         ver;\r
1338 \r
1339         ibal_av_p->port_num = vapi_av_p->port;\r
1340         ibal_av_p->sl       = vapi_av_p->sl;\r
1341         ibal_av_p->dlid     = cl_ntoh16(vapi_av_p->dlid);\r
1342 \r
1343         /* For global destination or Multicast address:*/\r
1344         ibal_av_p->grh_valid = vapi_av_p->grh_flag;\r
1345 \r
1346         ver = 2;\r
1347         ibal_av_p->grh.ver_class_flow = ib_grh_set_ver_class_flow( ver,\r
1348                 vapi_av_p->traffic_class,\r
1349                 vapi_av_p->flow_label);\r
1350         ibal_av_p->grh.hop_limit = vapi_av_p->hop_limit;\r
1351 \r
1352         THH_hob_get_sgid(hh_hndl,\r
1353                 vapi_av_p->port,\r
1354                 vapi_av_p->sgid_index,\r
1355                 &ibal_av_p->grh.src_gid.raw);\r
1356 \r
1357         cl_memcpy(ibal_av_p->grh.dest_gid.raw, vapi_av_p->dgid, sizeof(vapi_av_p->dgid));\r
1358 \r
1359         ibal_av_p->static_rate = (vapi_av_p->static_rate?\r
1360                 IB_PATH_RECORD_RATE_2_5_GBS : IB_PATH_RECORD_RATE_10_GBS);\r
1361         ibal_av_p->path_bits   = vapi_av_p->src_path_bits;\r
1362 }\r
1363 \r
1364 /////////////////////////////////////////////////////////\r
1365 /////////////////////////////////////////////////////////\r
1366 int\r
1367 mlnx_map_vapi_cqe_status(\r
1368         IN                              VAPI_wc_status_t                        vapi_status)\r
1369 {\r
1370         switch (vapi_status)\r
1371         {\r
1372         case IB_COMP_SUCCESS:           return IB_WCS_SUCCESS;\r
1373         case IB_COMP_LOC_LEN_ERR:       return IB_WCS_LOCAL_LEN_ERR;\r
1374         case IB_COMP_LOC_QP_OP_ERR:     return IB_WCS_LOCAL_OP_ERR;\r
1375         case IB_COMP_LOC_PROT_ERR:      return IB_WCS_LOCAL_PROTECTION_ERR;\r
1376         case IB_COMP_WR_FLUSH_ERR:      return IB_WCS_WR_FLUSHED_ERR;\r
1377         case IB_COMP_MW_BIND_ERR:       return IB_WCS_MEM_WINDOW_BIND_ERR;\r
1378         case IB_COMP_REM_INV_REQ_ERR:   return IB_WCS_REM_INVALID_REQ_ERR;\r
1379         case IB_COMP_REM_ACCESS_ERR:    return IB_WCS_REM_ACCESS_ERR;\r
1380         case IB_COMP_REM_OP_ERR:        return IB_WCS_REM_OP_ERR;\r
1381         case IB_COMP_RETRY_EXC_ERR:     return IB_WCS_TIMEOUT_RETRY_ERR;\r
1382         case IB_COMP_RNR_RETRY_EXC_ERR: return IB_WCS_RNR_RETRY_ERR;\r
1383         case IB_COMP_REM_ABORT_ERR:     return IB_WCS_REM_ACCESS_ERR; // ???\r
1384         case IB_COMP_FATAL_ERR:         return IB_WCS_REM_ACCESS_ERR; // ???\r
1385         case IB_COMP_GENERAL_ERR:       return IB_WCS_REM_ACCESS_ERR; // ???\r
1386         default:\r
1387                 CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("FAIL to map %d (last known %d) returning %d\n",\r
1388                         vapi_status, IB_COMP_GENERAL_ERR, IB_WCS_REM_ACCESS_ERR));\r
1389                 return IB_WCS_REM_ACCESS_ERR;\r
1390         }\r
1391 }\r
1392 \r
1393 /////////////////////////////////////////////////////////\r
1394 /////////////////////////////////////////////////////////\r
1395 int\r
1396 mlnx_map_vapi_cqe_type(\r
1397         IN                              VAPI_cqe_opcode_t                       opcode)\r
1398 {\r
1399         switch (opcode)\r
1400         {\r
1401         case VAPI_CQE_SQ_SEND_DATA:     return IB_WC_SEND;\r
1402         case VAPI_CQE_SQ_RDMA_WRITE:    return IB_WC_RDMA_WRITE;\r
1403         case VAPI_CQE_SQ_RDMA_READ:     return IB_WC_RDMA_READ;\r
1404         case VAPI_CQE_SQ_COMP_SWAP:     return IB_WC_COMPARE_SWAP;\r
1405         case VAPI_CQE_SQ_FETCH_ADD:     return IB_WC_FETCH_ADD;\r
1406         case VAPI_CQE_SQ_BIND_MRW:      return IB_WC_MW_BIND;\r
1407         case VAPI_CQE_RQ_SEND_DATA:     return IB_WC_RECV;\r
1408         case VAPI_CQE_RQ_RDMA_WITH_IMM: return IB_WC_RECV_RDMA_WRITE;\r
1409         default:\r
1410                 return IB_WC_SEND;\r
1411         }\r
1412 }\r
1413 \r
1414 /////////////////////////////////////////////////////////\r
1415 // Map Remote Node Addr Type\r
1416 /////////////////////////////////////////////////////////\r
1417 int\r
1418 mlnx_map_vapi_rna_type(\r
1419         IN                              VAPI_remote_node_addr_type_t    rna)\r
1420 {\r
1421         switch (rna)\r
1422         {\r
1423         case VAPI_RNA_UD:       return IB_QPT_UNRELIABLE_DGRM;\r
1424         case VAPI_RNA_RAW_ETY:  return IB_QPT_RAW_ETHER;\r
1425         case VAPI_RNA_RAW_IPV6: return IB_QPT_RAW_IPV6;\r
1426         default:\r
1427                 return IB_QPT_RELIABLE_CONN;\r
1428         }\r
1429 }\r
1430 \r
1431 //////////////////////////////////////////////////////////////\r
1432 // Convert from VAPI memory-region attributes to IBAL \r
1433 //////////////////////////////////////////////////////////////\r
1434 void\r
1435 mlnx_conv_vapi_mr_attr(\r
1436         IN                              ib_pd_handle_t                          pd_h,\r
1437         IN                              HH_mr_info_t                            *mr_info_p,\r
1438                 OUT                     ib_mr_attr_t                            *mr_query_p)\r
1439 {\r
1440         mr_query_p->h_pd = pd_h;\r
1441         mr_query_p->local_lb  = mr_info_p->local_start;\r
1442         mr_query_p->local_ub  = mr_info_p->local_start + mr_info_p->local_size;\r
1443         mr_query_p->remote_lb = mr_info_p->remote_start;\r
1444         mr_query_p->remote_ub = mr_info_p->remote_start + mr_info_p->remote_size;\r
1445 \r
1446         mr_query_p->access_ctrl = map_vapi_acl(mr_info_p->acl);\r
1447         mr_query_p->lkey = mr_info_p->lkey;\r
1448         mr_query_p->rkey = mr_info_p->rkey;\r
1449 }\r
1450 \r
1451 //////////////////////////////////////////////////////////////\r
1452 // Convert from IBAL memory-window bind request to VAPI \r
1453 //////////////////////////////////////////////////////////////\r
1454 void\r
1455 mlnx_conv_bind_req(\r
1456         IN                              HHUL_qp_hndl_t                          hhul_qp_hndl,\r
1457         IN                              ib_bind_wr_t* const                     p_mw_bind,\r
1458                 OUT                     HHUL_mw_bind_t                          *bind_prop_p)\r
1459 {\r
1460         bind_prop_p->qp = hhul_qp_hndl;\r
1461         bind_prop_p->id  = p_mw_bind->wr_id;\r
1462         bind_prop_p->acl  = map_ibal_acl(p_mw_bind->access_ctrl);\r
1463         bind_prop_p->size  = p_mw_bind->local_ds.length;\r
1464         bind_prop_p->start  = (VAPI_virt_addr_t)(MT_virt_addr_t)p_mw_bind->local_ds.vaddr;\r
1465         bind_prop_p->mr_lkey = p_mw_bind->local_ds.lkey;\r
1466         bind_prop_p->comp_type = \r
1467                 (p_mw_bind->send_opt & IB_SEND_OPT_SIGNALED) ? VAPI_SIGNALED : VAPI_UNSIGNALED;\r
1468 }\r
1469 \r
1470 \r
1471 /////////////////////////////////////////////////////////\r
1472 // Map IBAL qp type to VAPI transport and special qp_type\r
1473 /////////////////////////////////////////////////////////\r
1474 int\r
1475 mlnx_map_ibal_qp_type(\r
1476         IN                              ib_qp_type_t                            ibal_qpt,\r
1477                 OUT                     VAPI_special_qp_t                       *vapi_qp_type_p)\r
1478 {\r
1479         switch (ibal_qpt)\r
1480         {\r
1481         case IB_QPT_RELIABLE_CONN:\r
1482                 if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_REGULAR_QP;\r
1483                 return IB_TS_RC;\r
1484 \r
1485         case IB_QPT_UNRELIABLE_CONN:\r
1486                 if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_REGULAR_QP;\r
1487                 return IB_TS_UC;\r
1488 \r
1489         case IB_QPT_UNRELIABLE_DGRM:\r
1490                 if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_REGULAR_QP;\r
1491                 return IB_TS_UD;\r
1492 \r
1493         case IB_QPT_QP0:\r
1494                 if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_SMI_QP;\r
1495                 return IB_TS_UD;\r
1496 \r
1497         case IB_QPT_QP1:\r
1498                 if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_GSI_QP;\r
1499                 return IB_TS_UD;\r
1500 \r
1501         case IB_QPT_RAW_IPV6:\r
1502                 if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_RAW_IPV6_QP; // TBD: ??\r
1503                 return IB_TS_RAW;\r
1504 \r
1505         case IB_QPT_RAW_ETHER:\r
1506                 if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_RAW_ETY_QP;  // TBD: ??\r
1507                 return IB_TS_RAW;\r
1508 \r
1509         case IB_QPT_MAD:\r
1510                 if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_GSI_QP;\r
1511                 return IB_TS_UD;\r
1512 \r
1513         case IB_QPT_QP0_ALIAS:\r
1514                 if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_SMI_QP;\r
1515                 return IB_TS_UD;\r
1516 \r
1517         case IB_QPT_QP1_ALIAS:\r
1518                 if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_GSI_QP;\r
1519                 return IB_TS_UD;\r
1520 \r
1521         default:\r
1522                 CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("FAIL to map ibal_qp_type %d (last known %d) returning %d\n",\r
1523                         ibal_qpt, IB_QPT_QP1_ALIAS, IB_TS_RAW));\r
1524                 if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_RAW_ETY_QP;\r
1525                 return IB_TS_RAW;\r
1526         }\r
1527 }\r
1528 \r
1529 /////////////////////////////////////////////////////////\r
1530 // QP and CQ value must be handled by caller\r
1531 /////////////////////////////////////////////////////////\r
1532 void\r
1533 mlnx_conv_qp_create_attr(\r
1534         IN              const   ib_qp_create_t                          *create_attr_p,\r
1535                 OUT                     HHUL_qp_init_attr_t                     *init_attr_p,\r
1536                 OUT                     VAPI_special_qp_t                       *vapi_qp_type_p)\r
1537 {\r
1538         init_attr_p->ts_type = mlnx_map_ibal_qp_type(create_attr_p->qp_type, vapi_qp_type_p);\r
1539 \r
1540         init_attr_p->qp_cap.max_oust_wr_sq = create_attr_p->sq_depth;\r
1541         init_attr_p->qp_cap.max_oust_wr_rq = create_attr_p->rq_depth;\r
1542         init_attr_p->qp_cap.max_sg_size_sq = create_attr_p->sq_sge;\r
1543         init_attr_p->qp_cap.max_sg_size_rq = create_attr_p->rq_sge;\r
1544 \r
1545         init_attr_p->sq_sig_type = (create_attr_p->sq_signaled) ? VAPI_SIGNAL_ALL_WR : VAPI_SIGNAL_REQ_WR;\r
1546         init_attr_p->rq_sig_type = VAPI_SIGNAL_ALL_WR;\r
1547 \r
1548         init_attr_p->srq = HHUL_INVAL_SRQ_HNDL;\r
1549 }\r
1550 \r
1551 /////////////////////////////////////////////////////////\r
1552 // NOTE: ibal_qp_state is non linear - so we cannot use a LUT\r
1553 /////////////////////////////////////////////////////////\r
1554 VAPI_qp_state_t\r
1555 mlnx_map_ibal_qp_state(\r
1556         IN                              ib_qp_state_t                           ibal_qp_state)\r
1557 {\r
1558         VAPI_qp_state_t vapi_qp_state = VAPI_RESET;\r
1559 \r
1560         if      (ibal_qp_state & IB_QPS_RESET) vapi_qp_state = VAPI_RESET;\r
1561         else if (ibal_qp_state & IB_QPS_INIT)  vapi_qp_state = VAPI_INIT;\r
1562         else if (ibal_qp_state & IB_QPS_RTR)   vapi_qp_state = VAPI_RTR;\r
1563         else if (ibal_qp_state & IB_QPS_RTS)   vapi_qp_state = VAPI_RTS;\r
1564         else if (ibal_qp_state & IB_QPS_SQD)   vapi_qp_state = VAPI_SQD;\r
1565         else if (ibal_qp_state & IB_QPS_SQERR) vapi_qp_state = VAPI_SQE;\r
1566         else if (ibal_qp_state & IB_QPS_ERROR) vapi_qp_state = VAPI_ERR;\r
1567 \r
1568         return vapi_qp_state;\r
1569 }\r
1570 \r
1571 /////////////////////////////////////////////////////////\r
1572 /////////////////////////////////////////////////////////\r
1573 ib_qp_state_t\r
1574 mlnx_map_vapi_qp_state(\r
1575         IN                              VAPI_qp_state_t                         vapi_qp_state)\r
1576 {\r
1577         switch (vapi_qp_state)\r
1578         {\r
1579         case VAPI_RESET: return IB_QPS_RESET;\r
1580         case VAPI_INIT:  return IB_QPS_INIT;\r
1581         case VAPI_RTR:   return IB_QPS_RTR;\r
1582         case VAPI_RTS:   return IB_QPS_RTS;\r
1583         case VAPI_SQD:   return IB_QPS_SQD;\r
1584         case VAPI_SQE:   return IB_QPS_SQERR;\r
1585         case VAPI_ERR:   return IB_QPS_ERROR;\r
1586                 // TBD: IB_QPS_SQD_DRAINING\r
1587                 // TBD: IB_QPS_SQD_DRAINED\r
1588         default:\r
1589                 CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("FAIL to map vapi_qp_state %d (last known %d) returning %d\n",\r
1590                         vapi_qp_state, VAPI_ERR, IB_QPS_INIT));\r
1591                 return IB_QPS_INIT;\r
1592         }\r
1593 }\r
1594 \r
1595 /////////////////////////////////////////////////////////\r
1596 /////////////////////////////////////////////////////////\r
1597 ib_apm_state_t\r
1598 mlnx_map_vapi_apm_state(\r
1599         IN                              VAPI_mig_state_t                        vapi_apm_state)\r
1600 {\r
1601         switch (vapi_apm_state)\r
1602         {\r
1603         case VAPI_MIGRATED: return IB_APM_MIGRATED;\r
1604         case VAPI_REARM:    return IB_APM_REARM;\r
1605         case VAPI_ARMED:    return IB_APM_ARMED;\r
1606 \r
1607         default:\r
1608                 CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("FAIL to map vapi_apm_state %d (last known %d) returning %d\n",\r
1609                         vapi_apm_state, VAPI_ARMED, 0));\r
1610                 return 0;\r
1611         }\r
1612 }\r
1613 \r
1614 #if 0\r
1615 /////////////////////////////////////////////////////////\r
1616 // UNUSED: IBAL uses same encoding as THH\r
1617 /////////////////////////////////////////////////////////\r
1618 static\r
1619 u_int32_t ibal_mtu_to_vapi(u_int32_t ibal_mtu)\r
1620 {\r
1621         u_int32_t mtu = 0;\r
1622 \r
1623         // MTU256=1, MTU512=2, MTU1024=3\r
1624         while (ibal_mtu >>= 1) mtu++;\r
1625         return mtu - 7;\r
1626 }\r
1627 \r
1628 /////////////////////////////////////////////////////////\r
1629 /////////////////////////////////////////////////////////\r
1630 static\r
1631 u_int32_t vapi_mtu_to_ibal(u_int32_t vapi_mtu)\r
1632 {\r
1633         return (1 << (vapi_mtu + 7));\r
1634 }\r
1635 #endif\r
1636 \r
1637 /////////////////////////////////////////////////////////\r
1638 /////////////////////////////////////////////////////////\r
1639 void\r
1640 mlnx_conv_vapi_qp_attr(\r
1641         IN                              HH_hca_hndl_t                           hh_hndl,\r
1642         IN                              VAPI_qp_attr_t                          *hh_qp_attr_p,\r
1643                 OUT                     ib_qp_attr_t                            *qp_attr_p)\r
1644 {\r
1645         qp_attr_p->access_ctrl     = map_vapi_qp_acl(hh_qp_attr_p->remote_atomic_flags);\r
1646         qp_attr_p->pkey_index      = (uint16_t)hh_qp_attr_p->pkey_ix;\r
1647         qp_attr_p->sq_depth        = hh_qp_attr_p->cap.max_oust_wr_sq;\r
1648         qp_attr_p->rq_depth        = hh_qp_attr_p->cap.max_oust_wr_rq;\r
1649         qp_attr_p->sq_sge          = hh_qp_attr_p->cap.max_sg_size_sq;\r
1650         qp_attr_p->rq_sge          = hh_qp_attr_p->cap.max_sg_size_rq;\r
1651         qp_attr_p->sq_max_inline   = hh_qp_attr_p->cap.max_inline_data_sq;\r
1652         qp_attr_p->init_depth      = hh_qp_attr_p->ous_dst_rd_atom; // outstanding outgoing\r
1653         qp_attr_p->resp_res        = hh_qp_attr_p->qp_ous_rd_atom;  // outstanding as target (in)\r
1654 \r
1655         qp_attr_p->num             = cl_ntoh32(hh_qp_attr_p->qp_num);\r
1656         CL_TRACE(MLNX_DBG_QPN, g_mlnx_dbg_lvl, ("ibal_qpn 0x%x = hh_qpn 0x%x\n",\r
1657                 qp_attr_p->num,\r
1658                 hh_qp_attr_p->qp_num));\r
1659 \r
1660         qp_attr_p->dest_num        = cl_ntoh32(hh_qp_attr_p->dest_qp_num);\r
1661         CL_TRACE(MLNX_DBG_QPN, g_mlnx_dbg_lvl, ("ibal_dest 0x%x = hh_dest 0x%x\n",\r
1662                 qp_attr_p->dest_num,\r
1663                 hh_qp_attr_p->dest_qp_num));\r
1664         qp_attr_p->qkey            = cl_ntoh32 (hh_qp_attr_p->qkey);\r
1665 \r
1666         qp_attr_p->sq_psn          = cl_ntoh32 (hh_qp_attr_p->sq_psn);\r
1667         qp_attr_p->rq_psn          = cl_ntoh32 (hh_qp_attr_p->rq_psn);\r
1668 \r
1669         qp_attr_p->primary_port    = hh_qp_attr_p->port;\r
1670         qp_attr_p->alternate_port  = hh_qp_attr_p->alt_port;\r
1671 \r
1672         qp_attr_p->state           = mlnx_map_vapi_qp_state(hh_qp_attr_p->qp_state);\r
1673         qp_attr_p->apm_state       = mlnx_map_vapi_apm_state(hh_qp_attr_p->path_mig_state);\r
1674 \r
1675         mlnx_conv_vapi_av(hh_hndl, &hh_qp_attr_p->av, &qp_attr_p->primary_av);\r
1676         qp_attr_p->primary_av.conn.path_mtu          = (u_int8_t)hh_qp_attr_p->path_mtu;\r
1677         qp_attr_p->primary_av.conn.local_ack_timeout = hh_qp_attr_p->timeout; \r
1678         qp_attr_p->primary_av.conn.seq_err_retry_cnt = hh_qp_attr_p->retry_count;\r
1679         qp_attr_p->primary_av.conn.rnr_retry_cnt     = hh_qp_attr_p->rnr_retry;\r
1680 \r
1681         mlnx_conv_vapi_av(hh_hndl, &hh_qp_attr_p->alt_av, &qp_attr_p->alternate_av);\r
1682         qp_attr_p->alternate_av.conn. path_mtu         = (u_int8_t)hh_qp_attr_p->path_mtu;\r
1683         qp_attr_p->alternate_av.conn.local_ack_timeout = hh_qp_attr_p->timeout;\r
1684         qp_attr_p->alternate_av.conn.seq_err_retry_cnt = hh_qp_attr_p->retry_count;\r
1685         qp_attr_p->alternate_av.conn.rnr_retry_cnt     = hh_qp_attr_p->rnr_retry;\r
1686 }\r
1687 #if 0\r
1688 XXX:\r
1689 QP_ATTR_QP_STATE\r
1690 QP_ATTR_EN_SQD_ASYN_NOTIF\r
1691 QP_ATTR_QP_NUM\r
1692 + QP_ATTR_REMOTE_ATOMIC_FLAGS\r
1693 + QP_ATTR_PKEY_IX\r
1694 + QP_ATTR_PORT\r
1695 + QP_ATTR_QKEY\r
1696 + QP_ATTR_RQ_PSN\r
1697 + QP_ATTR_AV\r
1698 \r
1699 QP_ATTR_PATH_MTU\r
1700 + QP_ATTR_TIMEOUT\r
1701 + QP_ATTR_RETRY_COUNT\r
1702 + QP_ATTR_RNR_RETRY\r
1703 QP_ATTR_QP_OUS_RD_ATOM\r
1704 \r
1705 - QP_ATTR_ALT_PATH\r
1706 \r
1707 + QP_ATTR_MIN_RNR_TIMER\r
1708 QP_ATTR_SQ_PSN\r
1709 QP_ATTR_OUS_DST_RD_ATOM\r
1710 QP_ATTR_PATH_MIG_STATE\r
1711 QP_ATTR_CAP\r
1712 #endif\r
1713 \r
1714 /////////////////////////////////////////////////////////\r
1715 /////////////////////////////////////////////////////////\r
1716 ib_api_status_t\r
1717 mlnx_conv_qp_modify_attr(\r
1718         IN                              HH_hca_hndl_t                                   hh_hndl,\r
1719         IN                              ib_qp_type_t                                    qp_type,\r
1720         IN              const   ib_qp_mod_t                                             *modify_attr_p,\r
1721                 OUT                     VAPI_qp_attr_t                                  *qp_attr_p, \r
1722                 OUT                     VAPI_qp_attr_mask_t                             *attr_mask_p)\r
1723 {\r
1724 \r
1725         qp_attr_p->qp_state = mlnx_map_ibal_qp_state(modify_attr_p->req_state);\r
1726         *attr_mask_p = QP_ATTR_QP_STATE;\r
1727 \r
1728         switch(modify_attr_p->req_state)\r
1729         {\r
1730         case IB_QPS_RESET:\r
1731                 break;\r
1732 \r
1733         case IB_QPS_INIT:\r
1734                 *attr_mask_p |= QP_ATTR_PORT |\r
1735                         QP_ATTR_QKEY |\r
1736                         QP_ATTR_PKEY_IX ;\r
1737 \r
1738                 qp_attr_p->port    = modify_attr_p->state.init.primary_port;\r
1739                 qp_attr_p->qkey    = cl_ntoh32 (modify_attr_p->state.init.qkey);\r
1740                 qp_attr_p->pkey_ix = modify_attr_p->state.init.pkey_index;\r
1741                 if (IB_QPT_RELIABLE_CONN == qp_type)\r
1742                 {\r
1743                         *attr_mask_p |= QP_ATTR_REMOTE_ATOMIC_FLAGS;\r
1744                         qp_attr_p->remote_atomic_flags = map_ibal_qp_acl(modify_attr_p->state.init.access_ctrl);\r
1745                 } else\r
1746                 {\r
1747                         qp_attr_p->remote_atomic_flags = 0;\r
1748                 }\r
1749                 break;\r
1750 \r
1751         case IB_QPS_RTR:\r
1752                 /* VAPI doesn't support modifying the WQE depth ever. */\r
1753                 if( modify_attr_p->state.rtr.opts & IB_MOD_QP_SQ_DEPTH ||\r
1754                         modify_attr_p->state.rtr.opts & IB_MOD_QP_RQ_DEPTH )\r
1755                 {\r
1756                         return IB_UNSUPPORTED;\r
1757                 }\r
1758 \r
1759                 *attr_mask_p |= QP_ATTR_RQ_PSN |\r
1760                         QP_ATTR_DEST_QP_NUM |\r
1761                         QP_ATTR_QP_OUS_RD_ATOM |\r
1762                         QP_ATTR_MIN_RNR_TIMER |\r
1763                         QP_ATTR_AV ;\r
1764 \r
1765                 qp_attr_p->rq_psn          = cl_ntoh32 (modify_attr_p->state.rtr.rq_psn);\r
1766                 qp_attr_p->dest_qp_num     = cl_ntoh32 (modify_attr_p->state.rtr.dest_qp);\r
1767                 qp_attr_p->qp_ous_rd_atom = modify_attr_p->state.rtr.resp_res;\r
1768 \r
1769                 if (modify_attr_p->state.rtr.opts & IB_MOD_QP_RNR_NAK_TIMEOUT)\r
1770                 {\r
1771                         qp_attr_p->min_rnr_timer   = modify_attr_p->state.rtr.rnr_nak_timeout;\r
1772                 } else\r
1773                 {\r
1774                         qp_attr_p->min_rnr_timer   = 0;\r
1775                 }\r
1776 \r
1777 #if 1\r
1778                 CL_TRACE(MLNX_DBG_QPN, g_mlnx_dbg_lvl, ("modify_qp: hh_dest 0x%x = ibal_dest 0x%x\n",\r
1779                         qp_attr_p->dest_qp_num, modify_attr_p->state.rtr.dest_qp));\r
1780 #endif\r
1781 \r
1782                 // Convert primary RC AV (mandatory)\r
1783                 cl_memclr(&qp_attr_p->av, sizeof(VAPI_ud_av_t));\r
1784                 mlnx_conv_ibal_av(hh_hndl,\r
1785                         &modify_attr_p->state.rtr.primary_av, &qp_attr_p->av);\r
1786 \r
1787                 if (IB_QPT_RELIABLE_CONN == qp_type)\r
1788                 {\r
1789                         *attr_mask_p |= QP_ATTR_PATH_MTU;\r
1790                         qp_attr_p->path_mtu     = modify_attr_p->state.rtr.primary_av.conn.path_mtu; // MTU\r
1791 \r
1792                         qp_attr_p->timeout     = modify_attr_p->state.rtr.primary_av.conn.local_ack_timeout; // XXX: conv\r
1793                         qp_attr_p->retry_count = modify_attr_p->state.rtr.primary_av.conn.seq_err_retry_cnt;\r
1794                         qp_attr_p->rnr_retry   = modify_attr_p->state.rtr.primary_av.conn.rnr_retry_cnt;\r
1795                 }\r
1796 \r
1797                 // Convert Remote Atomic Flags\r
1798                 if (modify_attr_p->state.rtr.opts & IB_MOD_QP_ACCESS_CTRL)\r
1799                 {\r
1800                         *attr_mask_p |= QP_ATTR_REMOTE_ATOMIC_FLAGS;\r
1801                         qp_attr_p->remote_atomic_flags = map_ibal_qp_acl(modify_attr_p->state.rtr.access_ctrl);\r
1802                 }\r
1803 \r
1804                 // Convert alternate RC AV\r
1805                 if (modify_attr_p->state.rtr.opts & IB_MOD_QP_ALTERNATE_AV)\r
1806                 {\r
1807                         *attr_mask_p |= QP_ATTR_ALT_PATH;\r
1808                         cl_memclr(&qp_attr_p->alt_av, sizeof(VAPI_ud_av_t));\r
1809                         mlnx_conv_ibal_av(hh_hndl,\r
1810                                 &modify_attr_p->state.rtr.alternate_av, &qp_attr_p->alt_av);\r
1811 \r
1812                         if (IB_QPT_RELIABLE_CONN == qp_type)\r
1813                         {\r
1814                                 qp_attr_p->alt_timeout     = modify_attr_p->state.rtr.alternate_av.conn.local_ack_timeout; // XXX: conv\r
1815 #if 0\r
1816                                 /* Incompliant with spec 1.1! Data already set before */\r
1817                                 qp_attr_p->retry_count = modify_attr_p->state.rtr.alternate_av.conn.seq_err_retry_cnt;\r
1818                                 qp_attr_p->rnr_retry   = modify_attr_p->state.rtr.alternate_av.conn.rnr_retry_cnt;\r
1819 #endif\r
1820                         }\r
1821                 }\r
1822                 break;\r
1823 \r
1824         case IB_QPS_RTS:\r
1825                 /* VAPI doesn't support modifying the WQE depth ever. */\r
1826                 if( modify_attr_p->state.rts.opts & IB_MOD_QP_SQ_DEPTH ||\r
1827                         modify_attr_p->state.rts.opts & IB_MOD_QP_RQ_DEPTH )\r
1828                 {\r
1829                         return IB_UNSUPPORTED;\r
1830                 }\r
1831 \r
1832                 *attr_mask_p |= QP_ATTR_SQ_PSN |\r
1833                         QP_ATTR_RETRY_COUNT |\r
1834                         QP_ATTR_RNR_RETRY |\r
1835                         QP_ATTR_OUS_DST_RD_ATOM |\r
1836                         QP_ATTR_MIN_RNR_TIMER;\r
1837 \r
1838                 qp_attr_p->sq_psn = cl_ntoh32 (modify_attr_p->state.rts.sq_psn);\r
1839 \r
1840                 if (modify_attr_p->state.rts.opts & IB_MOD_QP_ACCESS_CTRL)\r
1841                 {\r
1842                         *attr_mask_p |= QP_ATTR_REMOTE_ATOMIC_FLAGS;\r
1843                         qp_attr_p->remote_atomic_flags = map_ibal_qp_acl(modify_attr_p->state.rts.access_ctrl);\r
1844                 }\r
1845 \r
1846                 qp_attr_p->timeout     = modify_attr_p->state.rts.local_ack_timeout; // XXX: conv\r
1847                 qp_attr_p->ous_dst_rd_atom = modify_attr_p->state.rts.init_depth;\r
1848                 qp_attr_p->retry_count = modify_attr_p->state.rts.retry_cnt;\r
1849                 qp_attr_p->rnr_retry   = modify_attr_p->state.rts.rnr_retry_cnt;\r
1850                 qp_attr_p->min_rnr_timer   = modify_attr_p->state.rts.rnr_nak_timeout;\r
1851 \r
1852                 // Update the responder resources for RDMA/ATOMIC (optional for SQD->RTS)\r
1853                 if (modify_attr_p->state.rts.opts & IB_MOD_QP_RESP_RES) {\r
1854                         *attr_mask_p |= QP_ATTR_QP_OUS_RD_ATOM;\r
1855                         qp_attr_p->qp_ous_rd_atom = modify_attr_p->state.rts.resp_res;\r
1856                 }\r
1857 \r
1858                 // Convert alternate RC AV\r
1859                 if (modify_attr_p->state.rts.opts & IB_MOD_QP_ALTERNATE_AV)\r
1860                 {\r
1861                         *attr_mask_p |= QP_ATTR_ALT_PATH;\r
1862                         cl_memclr(&qp_attr_p->alt_av, sizeof(VAPI_ud_av_t));\r
1863                         mlnx_conv_ibal_av(hh_hndl,\r
1864                                 &modify_attr_p->state.rts.alternate_av, &qp_attr_p->alt_av);\r
1865                         if (IB_QPT_RELIABLE_CONN == qp_type)\r
1866                         {\r
1867                                 qp_attr_p->alt_timeout     = modify_attr_p->state.rts.alternate_av.conn.local_ack_timeout; // XXX: conv\r
1868 #if 0\r
1869                                 /* Incompliant with spec 1.1! Data already set before */\r
1870                                 qp_attr_p->retry_count = modify_attr_p->state.rts.alternate_av.conn.seq_err_retry_cnt;\r
1871                                 qp_attr_p->rnr_retry   = modify_attr_p->state.rts.alternate_av.conn.rnr_retry_cnt;\r
1872 #endif\r
1873                         }\r
1874                 }\r
1875                 break;\r
1876 \r
1877                 // TBD: The following are treated equally (SQ Drain)\r
1878         case IB_QPS_SQD:\r
1879         case IB_QPS_SQD_DRAINING:\r
1880         case IB_QPS_SQD_DRAINED:\r
1881                 *attr_mask_p |= QP_ATTR_EN_SQD_ASYN_NOTIF;\r
1882                 qp_attr_p->en_sqd_asyn_notif = (MT_bool)modify_attr_p->state.sqd.sqd_event;\r
1883                 break;\r
1884 \r
1885         case IB_QPS_SQERR:\r
1886         case IB_QPS_ERROR:\r
1887         case IB_QPS_TIME_WAIT:\r
1888         default:\r
1889                 break;\r
1890         }\r
1891         CL_TRACE(MLNX_DBG_QPN, g_mlnx_dbg_lvl, ("CL: conv_qp_modify: new state %d attr_mask 0x%x\n", qp_attr_p->qp_state, *attr_mask_p));\r
1892         return IB_SUCCESS;\r
1893 }\r
1894 \r
1895 /////////////////////////////////////////////////////////\r
1896 /////////////////////////////////////////////////////////\r
1897 static VAPI_wr_opcode_t\r
1898 map_ibal_send_opcode(\r
1899         IN                              ib_wr_type_t                            ibal_opcode,\r
1900         IN                              boolean_t                                       imm)\r
1901 {\r
1902         VAPI_wr_opcode_t                vapi_opcode;\r
1903 \r
1904         switch (ibal_opcode)\r
1905         {\r
1906         case WR_SEND:         vapi_opcode = VAPI_SEND;\r
1907                 break;\r
1908         case WR_RDMA_WRITE:   vapi_opcode = VAPI_RDMA_WRITE;\r
1909                 break;\r
1910         case WR_RDMA_READ:    vapi_opcode = VAPI_RDMA_READ;\r
1911                 break;\r
1912         case WR_COMPARE_SWAP: vapi_opcode = VAPI_ATOMIC_CMP_AND_SWP;\r
1913                 break;\r
1914         case WR_FETCH_ADD:    vapi_opcode = VAPI_ATOMIC_FETCH_AND_ADD;\r
1915                 break;\r
1916         default:              vapi_opcode = VAPI_SEND;\r
1917                 break;\r
1918         }\r
1919         if (imm && (VAPI_SEND == vapi_opcode || VAPI_RDMA_WRITE == vapi_opcode)) vapi_opcode++;\r
1920         return vapi_opcode;\r
1921 }\r
1922 \r
1923 /////////////////////////////////////////////////////////\r
1924 /////////////////////////////////////////////////////////\r
1925 ib_api_status_t\r
1926 mlnx_conv_send_desc(\r
1927         IN                              IB_ts_t                                         transport,\r
1928         IN              const   ib_send_wr_t                            *ibal_send_wqe_p,\r
1929                 OUT                     VAPI_sr_desc_t                          *vapi_send_desc_p)\r
1930 {\r
1931         boolean_t                                               imm = FALSE;\r
1932         u_int32_t                                               idx;\r
1933         register VAPI_sg_lst_entry_t    *sg_lst_p;\r
1934         register ib_local_ds_t                  *ds_array;\r
1935 \r
1936 \r
1937         switch (transport)\r
1938         {\r
1939         case IB_TS_UD:\r
1940                 CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("mapping %s QP\n", "UD"));\r
1941                 {\r
1942                         mlnx_avo_t *avo_p = (mlnx_avo_t *)ibal_send_wqe_p->dgrm.ud.h_av;\r
1943 \r
1944                         vapi_send_desc_p->remote_qp  = cl_ntoh32 (ibal_send_wqe_p->dgrm.ud.remote_qp);\r
1945                         vapi_send_desc_p->remote_qkey = cl_ntoh32 (ibal_send_wqe_p->dgrm.ud.remote_qkey);\r
1946 \r
1947                         if (!avo_p || avo_p->mark != E_MARK_AV)\r
1948                                 return IB_INVALID_AV_HANDLE;\r
1949 \r
1950                         vapi_send_desc_p->remote_ah = avo_p->h_av; // was ah.hhul\r
1951                         break;\r
1952                 }\r
1953 \r
1954         case IB_TS_RC:\r
1955                 CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("mapping %s QP\n", "RC"));\r
1956                 // vapi_send_desc_p->remote_qp   = 0;\r
1957                 // vapi_send_desc_p->remote_qkey = 0;\r
1958                 vapi_send_desc_p->remote_addr = ibal_send_wqe_p->remote_ops.vaddr;\r
1959                 vapi_send_desc_p->r_key       = ibal_send_wqe_p->remote_ops.rkey;\r
1960                 vapi_send_desc_p->compare_add = ibal_send_wqe_p->remote_ops.atomic1;\r
1961                 vapi_send_desc_p->swap        = ibal_send_wqe_p->remote_ops.atomic2;\r
1962                 break;\r
1963 \r
1964         default: // TBD: RAW, RD\r
1965                 return IB_UNSUPPORTED;\r
1966         }\r
1967 \r
1968         imm = (0 != (ibal_send_wqe_p->send_opt & IB_SEND_OPT_IMMEDIATE));\r
1969         vapi_send_desc_p->fence      = (MT_bool)(0 != (ibal_send_wqe_p->send_opt & IB_SEND_OPT_FENCE));\r
1970         vapi_send_desc_p->set_se     = (MT_bool)(0 != (ibal_send_wqe_p->send_opt & IB_SEND_OPT_SOLICITED));\r
1971         vapi_send_desc_p->comp_type  = (ibal_send_wqe_p->send_opt & IB_SEND_OPT_SIGNALED) ?\r
1972 VAPI_SIGNALED : VAPI_UNSIGNALED;\r
1973 \r
1974         vapi_send_desc_p->id = ibal_send_wqe_p->wr_id;\r
1975         vapi_send_desc_p->opcode = map_ibal_send_opcode(ibal_send_wqe_p->wr_type, imm);\r
1976 \r
1977         if (imm)\r
1978                 vapi_send_desc_p->imm_data = cl_ntoh32 (ibal_send_wqe_p->immediate_data);\r
1979 \r
1980         vapi_send_desc_p->sg_lst_len = ibal_send_wqe_p->num_ds;\r
1981 \r
1982         sg_lst_p = vapi_send_desc_p->sg_lst_p;\r
1983         ds_array = ibal_send_wqe_p->ds_array;\r
1984         for (idx = 0; idx < ibal_send_wqe_p->num_ds; idx++)\r
1985         {\r
1986                 sg_lst_p->addr = ds_array->vaddr;\r
1987                 sg_lst_p->len  = ds_array->length;\r
1988                 sg_lst_p->lkey = ds_array->lkey;\r
1989                 // CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("post_send (conv) addr %Lx size %d key 0x%x\n", sg_lst_p->addr, sg_lst_p->len, sg_lst_p->lkey));\r
1990                 sg_lst_p++;\r
1991                 ds_array++;\r
1992         }\r
1993         CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("send: rqpn 0x%x rkey 0x%x\n", \r
1994                 vapi_send_desc_p->remote_qp,\r
1995                 vapi_send_desc_p->remote_qkey));\r
1996         return IB_SUCCESS;\r
1997 }\r
1998 \r
1999 /////////////////////////////////////////////////////////\r
2000 /////////////////////////////////////////////////////////\r
2001 ib_api_status_t\r
2002 mlnx_conv_recv_desc(\r
2003         IN              const   ib_recv_wr_t                            *ibal_recv_wqe_p,\r
2004                 OUT                     VAPI_rr_desc_t                          *vapi_recv_desc_p)\r
2005 {\r
2006         u_int32_t                                               idx;\r
2007         register VAPI_sg_lst_entry_t    *sg_lst_p;\r
2008         register ib_local_ds_t                  *ds_array;\r
2009 \r
2010         vapi_recv_desc_p->id         = ibal_recv_wqe_p->wr_id;\r
2011         vapi_recv_desc_p->sg_lst_len = ibal_recv_wqe_p->num_ds;\r
2012         vapi_recv_desc_p->opcode     = VAPI_RECEIVE;\r
2013         vapi_recv_desc_p->comp_type  = VAPI_SIGNALED;\r
2014 \r
2015         sg_lst_p = vapi_recv_desc_p->sg_lst_p;\r
2016         ds_array = ibal_recv_wqe_p->ds_array;\r
2017         for (idx = 0; idx < ibal_recv_wqe_p->num_ds; idx++)\r
2018         {\r
2019                 sg_lst_p->addr = ds_array->vaddr;\r
2020                 sg_lst_p->len  = ds_array->length;\r
2021                 sg_lst_p->lkey = ds_array->lkey;\r
2022                 // CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("post_recv (conv) addr 0x%Lx size %d key 0x%x\n", sg_lst_p->addr, sg_lst_p->len, sg_lst_p->lkey));\r
2023                 sg_lst_p++;\r
2024                 ds_array++;\r
2025         }\r
2026 \r
2027         return IB_SUCCESS;\r
2028 }\r
2029 \r
2030 /////////////////////////////////////////////////////////\r
2031 /////////////////////////////////////////////////////////\r
2032 void\r
2033 vapi_port_cap_to_ibal(\r
2034         IN                              IB_port_cap_mask_t                      vapi_port_cap,\r
2035                 OUT                     ib_port_cap_t                           *ibal_port_cap_p)\r
2036 {\r
2037         if (vapi_port_cap & IB_CAP_MASK_IS_CONN_MGMT_SUP)\r
2038                 ibal_port_cap_p->cm = TRUE;\r
2039         if (vapi_port_cap & IB_CAP_MASK_IS_SNMP_TUNN_SUP)\r
2040                 ibal_port_cap_p->snmp = TRUE;\r
2041         if (vapi_port_cap & IB_CAP_MASK_IS_DEVICE_MGMT_SUP)\r
2042                 ibal_port_cap_p->dev_mgmt = TRUE;\r
2043         if (vapi_port_cap & IB_CAP_MASK_IS_VENDOR_CLS_SUP)\r
2044                 ibal_port_cap_p->vend = TRUE;\r
2045         if (vapi_port_cap & IB_CAP_MASK_IS_SM_DISABLED)\r
2046                 ibal_port_cap_p->sm_disable = TRUE;\r
2047         if (vapi_port_cap & IB_CAP_MASK_IS_SM)\r
2048                 ibal_port_cap_p->sm = TRUE;\r
2049 }\r
2050 \r
2051 /////////////////////////////////////////////////////////\r
2052 /////////////////////////////////////////////////////////\r
2053 void\r
2054 mlnx_conv_vapi_hca_cap(\r
2055         IN                              HH_hca_dev_t                            *hca_info_p,\r
2056         IN                              VAPI_hca_cap_t                          *vapi_hca_cap_p,\r
2057         IN                              VAPI_hca_port_t                         *vapi_hca_ports,\r
2058                 OUT                     ib_ca_attr_t                            *ca_attr_p)\r
2059 {\r
2060         u_int8_t                        port_num;\r
2061         VAPI_hca_port_t         *vapi_port_p;\r
2062         ib_port_attr_t          *ibal_port_p;\r
2063 \r
2064         ca_attr_p->vend_id  = hca_info_p->vendor_id;\r
2065         ca_attr_p->dev_id   = (uint16_t)hca_info_p->dev_id;\r
2066         ca_attr_p->revision = (uint16_t)hca_info_p->hw_ver;\r
2067 \r
2068         ca_attr_p->ca_guid   = *(UNALIGNED64 u_int64_t *)vapi_hca_cap_p->node_guid;\r
2069         ca_attr_p->num_ports = vapi_hca_cap_p->phys_port_num;\r
2070         ca_attr_p->max_qps   = vapi_hca_cap_p->max_num_qp;\r
2071         ca_attr_p->max_wrs   = vapi_hca_cap_p->max_qp_ous_wr;\r
2072         ca_attr_p->max_sges   = vapi_hca_cap_p->max_num_sg_ent;\r
2073         ca_attr_p->max_rd_sges = vapi_hca_cap_p->max_num_sg_ent_rd;\r
2074         ca_attr_p->max_cqs    = vapi_hca_cap_p->max_num_cq;\r
2075         ca_attr_p->max_cqes  = vapi_hca_cap_p->max_num_ent_cq;\r
2076         ca_attr_p->max_pds    = vapi_hca_cap_p->max_pd_num;\r
2077         ca_attr_p->init_regions = vapi_hca_cap_p->max_num_mr;\r
2078         ca_attr_p->init_windows = vapi_hca_cap_p->max_mw_num;\r
2079         ca_attr_p->init_region_size = vapi_hca_cap_p->max_mr_size;\r
2080         ca_attr_p->max_addr_handles = vapi_hca_cap_p->max_ah_num;\r
2081         ca_attr_p->atomicity     = vapi_hca_cap_p->atomic_cap;\r
2082         ca_attr_p->max_partitions = vapi_hca_cap_p->max_pkeys;\r
2083         ca_attr_p->max_qp_resp_res = vapi_hca_cap_p->max_qp_ous_rd_atom;\r
2084         ca_attr_p->max_resp_res    = vapi_hca_cap_p->max_res_rd_atom;\r
2085         ca_attr_p->max_qp_init_depth = vapi_hca_cap_p->max_qp_init_rd_atom;\r
2086         ca_attr_p->max_ipv6_qps    = vapi_hca_cap_p->max_raw_ipv6_qp;\r
2087         ca_attr_p->max_ether_qps   = vapi_hca_cap_p->max_raw_ethy_qp;\r
2088         ca_attr_p->max_mcast_grps  = vapi_hca_cap_p->max_mcast_grp_num;\r
2089         ca_attr_p->max_mcast_qps   = vapi_hca_cap_p->max_total_mcast_qp_attach_num;\r
2090         ca_attr_p->max_qps_per_mcast_grp = vapi_hca_cap_p->max_mcast_qp_attach_num;\r
2091         ca_attr_p->local_ack_delay = vapi_hca_cap_p->local_ca_ack_delay;\r
2092         ca_attr_p->bad_pkey_ctr_support = vapi_hca_cap_p->flags & VAPI_BAD_PKEY_COUNT_CAP;\r
2093         ca_attr_p->bad_qkey_ctr_support = vapi_hca_cap_p->flags & VAPI_BAD_QKEY_COUNT_CAP;\r
2094         ca_attr_p->raw_mcast_support    = vapi_hca_cap_p->flags & VAPI_RAW_MULTI_CAP;\r
2095         ca_attr_p->apm_support          = vapi_hca_cap_p->flags & VAPI_AUTO_PATH_MIG_CAP;\r
2096         ca_attr_p->av_port_check        = vapi_hca_cap_p->flags & VAPI_UD_AV_PORT_ENFORCE_CAP;\r
2097         ca_attr_p->change_primary_port  = vapi_hca_cap_p->flags & VAPI_CHANGE_PHY_PORT_CAP;\r
2098         ca_attr_p->modify_wr_depth      = vapi_hca_cap_p->flags & VAPI_RESIZE_OUS_WQE_CAP;\r
2099         ca_attr_p->hw_agents            = FALSE; // in the context of IBAL then agent is implemented on the host\r
2100 \r
2101         ca_attr_p->num_page_sizes = 1;\r
2102         ca_attr_p->p_page_size[0] = PAGESIZE; // TBD: extract an array of page sizes from HCA cap\r
2103 \r
2104         for (port_num = 0; port_num < vapi_hca_cap_p->phys_port_num; port_num++)\r
2105         {\r
2106                 // Setup port pointers\r
2107                 ibal_port_p = &ca_attr_p->p_port_attr[port_num];\r
2108                 vapi_port_p = &vapi_hca_ports[port_num];\r
2109 \r
2110                 // Port Cabapilities\r
2111                 cl_memclr(&ibal_port_p->cap, sizeof(ib_port_cap_t));\r
2112                 vapi_port_cap_to_ibal(vapi_port_p->capability_mask, &ibal_port_p->cap);\r
2113 \r
2114                 // Port Atributes\r
2115                 ibal_port_p->port_num   = port_num + 1;\r
2116                 ibal_port_p->port_guid  = ibal_port_p->p_gid_table[0].unicast.interface_id;\r
2117                 ibal_port_p->lid        = cl_ntoh16(vapi_port_p->lid);\r
2118                 ibal_port_p->lmc        = vapi_port_p->lmc;\r
2119                 ibal_port_p->max_vls    = vapi_port_p->max_vl_num;\r
2120                 ibal_port_p->sm_lid     = cl_ntoh16(vapi_port_p->sm_lid);\r
2121                 ibal_port_p->sm_sl      = vapi_port_p->sm_sl;\r
2122                 ibal_port_p->link_state = (vapi_port_p->state != 0) ? (uint8_t)vapi_port_p->state : IB_LINK_DOWN;\r
2123                 ibal_port_p->num_gids   = vapi_port_p->gid_tbl_len;\r
2124                 ibal_port_p->num_pkeys  = vapi_port_p->pkey_tbl_len;\r
2125                 ibal_port_p->pkey_ctr   = (uint16_t)vapi_port_p->bad_pkey_counter;\r
2126                 ibal_port_p->qkey_ctr   = (uint16_t)vapi_port_p->qkey_viol_counter;\r
2127                 ibal_port_p->max_msg_size = vapi_port_p->max_msg_sz;\r
2128                 ibal_port_p->mtu = (u_int8_t)vapi_port_p->max_mtu;\r
2129 \r
2130                 ibal_port_p->subnet_timeout = 5; // TBD: currently 128us\r
2131                 // ibal_port_p->local_ack_timeout = 3; // TBD: currently ~32 usec\r
2132 #if 0\r
2133                 CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("Port %d port_guid 0x%"PRIx64"\n",\r
2134                         ibal_port_p->port_num, ibal_port_p->port_guid));\r
2135 #endif\r
2136         }\r
2137 }\r
2138 \r
2139 /////////////////////////////////////////////////////////\r
2140 /////////////////////////////////////////////////////////\r
2141 ib_api_status_t\r
2142 mlnx_get_hca_pkey_tbl(\r
2143         IN                              HH_hca_hndl_t                           hh_hndl,\r
2144         IN                              u_int8_t                                        port_num,\r
2145         IN                              u_int16_t                                       num_entries,\r
2146                 OUT                     void*                                           table_p)\r
2147 {\r
2148         u_int16_t               size;\r
2149         ib_net16_t              *pkey_p;\r
2150 \r
2151                 if (HH_OK != THH_hob_get_pkey_tbl( hh_hndl, port_num, num_entries, &size, table_p))\r
2152                 return IB_ERROR;\r
2153 \r
2154         pkey_p = (ib_net16_t *)table_p;\r
2155 #if 0\r
2156         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("port %d pkey0 0x%x pkey1 0x%x\n", port_num, pkey_p[0], pkey_p[1]));\r
2157 #endif\r
2158         return IB_SUCCESS;\r
2159 }\r
2160 \r
2161 ib_api_status_t\r
2162 mlnx_get_hca_gid_tbl(\r
2163         IN                              HH_hca_hndl_t                           hh_hndl,\r
2164         IN                              u_int8_t                                        port_num,\r
2165         IN                              u_int16_t                                       num_entries,\r
2166                 OUT                     void*                                           table_p)\r
2167 {\r
2168         u_int16_t               size;\r
2169 \r
2170         if (HH_OK != THH_hob_get_gid_tbl( hh_hndl, port_num, num_entries, &size, table_p))\r
2171                 return IB_ERROR;\r
2172 \r
2173         return IB_SUCCESS;\r
2174 }\r