[MT23108] Removed linker dependency on IBAL.
[mirror/winof/.git] / hw / mt23108 / kernel / hca_verbs.c
1 /*\r
2  * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.\r
3  * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. \r
4  *\r
5  * This software is available to you under the OpenIB.org BSD license\r
6  * below:\r
7  *\r
8  *     Redistribution and use in source and binary forms, with or\r
9  *     without modification, are permitted provided that the following\r
10  *     conditions are met:\r
11  *\r
12  *      - Redistributions of source code must retain the above\r
13  *        copyright notice, this list of conditions and the following\r
14  *        disclaimer.\r
15  *\r
16  *      - Redistributions in binary form must reproduce the above\r
17  *        copyright notice, this list of conditions and the following\r
18  *        disclaimer in the documentation and/or other materials\r
19  *        provided with the distribution.\r
20  *\r
21  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
22  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
23  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
24  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
25  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
26  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
27  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
28  * SOFTWARE.\r
29  *\r
30  * $Id$\r
31  */\r
32 \r
33 \r
34 #include "hca_data.h"\r
35 #include "hca_debug.h"\r
36 \r
37 \r
38 #define PTR_ALIGN(size) (((size) + sizeof(void*) - 1) & ~(sizeof(void*) - 1))\r
39 \r
40 \r
41 /* Matches definition in IbAccess for MaxSMPsWatermark */\r
42 uint32_t        g_sqp_max_avs = ((4096/sizeof(ib_mad_t))*32*5);\r
43 \r
44 \r
45 // Local declarations\r
46 ib_api_status_t\r
47 mlnx_query_qp (\r
48         IN              const   ib_qp_handle_t                          h_qp,\r
49                 OUT                     ib_qp_attr_t                            *p_qp_attr,\r
50         IN      OUT                     ci_umv_buf_t                            *p_umv_buf );\r
51 \r
52 /* \r
53 * CA Access Verbs\r
54 */\r
55 ib_api_status_t\r
56 mlnx_open_ca (\r
57         IN              const   ib_net64_t                                      ca_guid, // IN  const char *                ca_name,\r
58         IN              const   ci_completion_cb_t                      pfn_completion_cb,\r
59         IN              const   ci_async_event_cb_t                     pfn_async_event_cb,\r
60         IN              const   void*const                                      ca_context,\r
61                 OUT                     ib_ca_handle_t                          *ph_ca)\r
62 {\r
63 //      char *                                  ca_name = NULL;\r
64 //      char *                                  dev_name = NULL;\r
65         mlnx_hca_t                              *p_hca;\r
66         HH_hca_dev_t *                  hca_ul_info;\r
67         void *                                  hca_ul_resources_p = NULL; // (THH_hca_ul_resources_t *)\r
68         ib_api_status_t                 status;\r
69         mlnx_hob_t                              *new_ca = NULL;\r
70         MOSAL_protection_ctx_t  prot_ctx;\r
71 \r
72         CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
73         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("context 0x%p\n", ca_context));\r
74 \r
75         p_hca = mlnx_hca_from_guid( ca_guid );\r
76         if( !p_hca ) {\r
77                 HCA_EXIT( MLNX_DBG_TRACE );\r
78                 return IB_NOT_FOUND;\r
79         }\r
80 \r
81         //// Verify that the device has been discovered (it'd better be)\r
82         //mlnx_names_from_guid(ca_guid, &ca_name, &dev_name);\r
83         //if (!ca_name) {\r
84         //      CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
85         //      return IB_NOT_FOUND;\r
86         //}\r
87 \r
88         //// We have name - lookup device\r
89         //if (HH_OK != HH_lookup_hca(ca_name, &hh_hndl)) {\r
90         //      CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
91         //      return IB_NOT_FOUND;\r
92         //}\r
93 \r
94         hca_ul_info = p_hca->hh_hndl;\r
95 \r
96         {\r
97                 // We are opening the HCA in kernel mode.\r
98                 // if a HOBKL exists for this device (i.e. it is open) - return E_BUSY\r
99                 if (IB_SUCCESS == mlnx_hobs_lookup(p_hca->hh_hndl, &new_ca)) {\r
100                         if (ph_ca) *ph_ca = (ib_ca_handle_t)new_ca;\r
101                         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
102                         return IB_RESOURCE_BUSY;\r
103                 }\r
104 \r
105                 // Create a mapping from hca index to hh_hndl\r
106                 status = mlnx_hobs_insert(p_hca, &new_ca);\r
107                 if (IB_SUCCESS != status) {\r
108                         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
109                         return status;\r
110                 }\r
111 \r
112                 /* save copy of HCA device object */\r
113                 new_ca->p_dev_obj = p_hca->p_dev_obj;\r
114 \r
115                 // Initialize the device driver\r
116                 if (HH_OK != THH_hob_open_hca(p_hca->hh_hndl, NULL, NULL)) {\r
117                         status = IB_ERROR;\r
118                         goto cleanup;\r
119                 }\r
120                 \r
121                 CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("context 0x%p\n", ca_context));\r
122                 status = mlnx_hobs_set_cb(new_ca,\r
123                         pfn_completion_cb,\r
124                         pfn_async_event_cb,\r
125                         ca_context);\r
126                 if (IB_SUCCESS != status) {\r
127                         goto cleanup;\r
128                 }\r
129 \r
130                 CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("ul_resource sizes: hca %d pd %d\n",\r
131                         hca_ul_info->hca_ul_resources_sz,\r
132                         hca_ul_info->pd_ul_resources_sz));\r
133 \r
134                 hca_ul_resources_p = cl_zalloc( hca_ul_info->hca_ul_resources_sz);\r
135 \r
136                 /* get the kernel protection context */ \r
137                 prot_ctx = MOSAL_get_kernel_prot_ctx();\r
138         }\r
139 \r
140         if (!hca_ul_resources_p) {\r
141                 status = IB_INSUFFICIENT_MEMORY;\r
142                 goto cleanup;\r
143         }\r
144 \r
145         if (HH_OK != THH_hob_alloc_ul_res(p_hca->hh_hndl, prot_ctx, hca_ul_resources_p)) {\r
146                 status = IB_ERROR;\r
147                 goto cleanup;\r
148         }\r
149 \r
150         // TBD: !!! in user mode (kernel hobul_idx != hob_idx)\r
151         status = mlnx_hobul_new(new_ca, p_hca->hh_hndl, hca_ul_resources_p);\r
152         if (IB_SUCCESS != status) {\r
153                 goto cleanup;\r
154         }\r
155 \r
156         // Return the HOBUL index\r
157         if (ph_ca) *ph_ca = new_ca;\r
158 \r
159         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
160         return IB_SUCCESS;\r
161 \r
162 cleanup:\r
163         if (hca_ul_resources_p)\r
164                 cl_free( hca_ul_resources_p);\r
165         THH_hob_close_hca(p_hca->hh_hndl);\r
166         mlnx_hobs_remove(new_ca);\r
167 \r
168         // For user mode call - return status to user mode\r
169         CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status));\r
170         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
171         return status;\r
172 }\r
173 \r
174 ib_api_status_t\r
175 mlnx_query_ca (\r
176         IN              const   ib_ca_handle_t                          h_ca,\r
177                 OUT                     ib_ca_attr_t                            *p_ca_attr,\r
178         IN      OUT                     uint32_t                                        *p_byte_count,\r
179         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
180 {\r
181         ib_api_status_t         status;\r
182 \r
183         mlnx_hob_t                      *hob_p = (mlnx_hob_t *)h_ca;\r
184         HH_hca_hndl_t           hh_hndl = NULL;\r
185         HH_hca_dev_t            *hca_ul_info;\r
186         VAPI_hca_cap_t          hca_cap;\r
187         VAPI_hca_port_t         *hca_ports = NULL;\r
188         uint32_t                        size, required_size;\r
189         u_int8_t                        port_num, num_ports;\r
190         u_int32_t                       num_gids, num_pkeys;\r
191         u_int32_t                       num_page_sizes = 1; // TBD: what is actually supported\r
192         uint8_t                         *last_p;\r
193         void                            *hca_ul_resources_p = NULL;\r
194         u_int32_t                       priv_op;\r
195 \r
196         CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
197 \r
198         if (NULL == p_byte_count) {\r
199                 status = IB_INVALID_PARAMETER;\r
200                 goto cleanup;\r
201         }\r
202 \r
203         mlnx_hobs_get_handle(hob_p, &hh_hndl);\r
204         if (NULL == hh_hndl) {\r
205                 CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("returning E_NODEV dev\n"));\r
206                 status = IB_INVALID_CA_HANDLE;\r
207                 goto cleanup;\r
208         }\r
209 \r
210         hca_ul_info = (HH_hca_dev_t *)hh_hndl;\r
211 \r
212         if (HH_OK != THH_hob_query(hh_hndl, &hca_cap)) {\r
213                 status = IB_ERROR;\r
214                 goto cleanup;\r
215         }\r
216 \r
217         num_ports = hca_cap.phys_port_num;   /* Number of physical ports of the HCA */             \r
218 \r
219         if (NULL == (hca_ports = cl_zalloc( num_ports * sizeof(VAPI_hca_port_t)))) {\r
220                 CL_TRACE (CL_DBG_ERROR, g_mlnx_dbg_lvl,\r
221                         ("Failed to cl_zalloc ports array\n"));\r
222                 status = IB_INSUFFICIENT_MEMORY;\r
223                 goto cleanup;\r
224         }\r
225 \r
226         // Loop on ports and get their properties\r
227         num_gids = 0;\r
228         num_pkeys = 0;\r
229         required_size = PTR_ALIGN(sizeof(ib_ca_attr_t)) +\r
230                 PTR_ALIGN(sizeof(u_int32_t) * num_page_sizes) +\r
231                 PTR_ALIGN(sizeof(ib_port_attr_t) * num_ports);\r
232         for (port_num = 0; port_num < num_ports; port_num++) {\r
233                 if (HH_OK != THH_hob_query_port_prop(hh_hndl, port_num+1, &hca_ports[port_num])) {\r
234                         status = IB_ERROR;\r
235                         goto cleanup;\r
236                 }\r
237 \r
238                 num_gids  = hca_ports[port_num].gid_tbl_len;\r
239                 size = PTR_ALIGN(sizeof(ib_gid_t)  * num_gids);\r
240                 required_size += size;\r
241 \r
242                 num_pkeys = hca_ports[port_num].pkey_tbl_len;\r
243                 size = PTR_ALIGN(sizeof(u_int16_t) * num_pkeys);\r
244                 required_size += size;\r
245         }\r
246 \r
247         if( p_umv_buf && p_umv_buf->command )\r
248         {\r
249                 /*\r
250                 * Prepare the buffer with the size including hca_ul_resources_sz\r
251                 * NO ALIGNMENT for this size \r
252                 */\r
253 \r
254                 if (p_umv_buf->p_inout_buf)\r
255                 {\r
256                         cl_memcpy (&priv_op, p_umv_buf->p_inout_buf, sizeof (priv_op));\r
257                         CL_TRACE(MLNX_DBG_TRACE, g_mlnx_dbg_lvl, ("priv_op = %d\n", priv_op));\r
258 \r
259                         /* \r
260                         * Yes, UVP request for hca_ul_info\r
261                         */\r
262                         if (p_umv_buf->input_size != \r
263                                 (sizeof (HH_hca_dev_t) + sizeof (priv_op) ))\r
264                         {\r
265                                 *p_byte_count = required_size;\r
266                                 p_umv_buf->output_size = 0;\r
267                                 status = IB_INVALID_PARAMETER;\r
268                                 goto cleanup;\r
269                         }\r
270                         cl_memcpy( (uint8_t* __ptr64)p_umv_buf->p_inout_buf + sizeof (priv_op), \r
271                                 hca_ul_info, sizeof (HH_hca_dev_t));\r
272                         p_umv_buf->output_size = p_umv_buf->input_size;\r
273                 }\r
274         }\r
275 \r
276         if (NULL == p_ca_attr || *p_byte_count < required_size) {\r
277                 *p_byte_count = required_size;\r
278                 status = IB_INSUFFICIENT_MEMORY;\r
279                 if ( p_ca_attr != NULL) {\r
280                         CL_TRACE (CL_DBG_ERROR, g_mlnx_dbg_lvl,\r
281                                 ("Failed *p_byte_count < required_size\n"));\r
282                 }\r
283                 goto cleanup;\r
284         }\r
285 \r
286         // Space is sufficient - setup table pointers\r
287         last_p = (uint8_t*)p_ca_attr;\r
288         last_p += PTR_ALIGN(sizeof(*p_ca_attr));\r
289 \r
290         p_ca_attr->p_page_size = (uint32_t*)last_p;\r
291         last_p += PTR_ALIGN(num_page_sizes * sizeof(u_int32_t));\r
292 \r
293         p_ca_attr->p_port_attr = (ib_port_attr_t *)last_p;\r
294         last_p += PTR_ALIGN(num_ports * sizeof(ib_port_attr_t));\r
295 \r
296         for (port_num = 0; port_num < num_ports; port_num++) {\r
297                 p_ca_attr->p_port_attr[port_num].p_gid_table = (ib_gid_t *)last_p;\r
298                 size = PTR_ALIGN(sizeof(ib_gid_t) * hca_ports[port_num].gid_tbl_len);\r
299                 last_p += size;\r
300 \r
301                 p_ca_attr->p_port_attr[port_num].p_pkey_table = (u_int16_t *)last_p;\r
302                 size = PTR_ALIGN(sizeof(u_int16_t) * hca_ports[port_num].pkey_tbl_len);\r
303                 last_p += size;\r
304         }\r
305 \r
306         // Separate the loops to ensure that table pointers are always setup\r
307         for (port_num = 0; port_num < num_ports; port_num++) {\r
308                 status = mlnx_get_hca_pkey_tbl(hh_hndl, port_num+1,\r
309                         hca_ports[port_num].pkey_tbl_len,\r
310                         p_ca_attr->p_port_attr[port_num].p_pkey_table);\r
311                 if (IB_SUCCESS != status) {\r
312                         CL_TRACE (CL_DBG_ERROR, g_mlnx_dbg_lvl,\r
313                                 ("Failed to mlnx_get_hca_pkey_tbl for port_num:%d\n",port_num));\r
314                         goto cleanup;\r
315                 }\r
316 \r
317                 status = mlnx_get_hca_gid_tbl(hh_hndl, port_num+1,\r
318                         hca_ports[port_num].gid_tbl_len,\r
319                         &p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw);\r
320                 if (IB_SUCCESS != status) {\r
321                         CL_TRACE (CL_DBG_ERROR, g_mlnx_dbg_lvl,\r
322                                 ("Failed to mlnx_get_hca_gid_tbl for port_num:%d\n",port_num));\r
323                         goto cleanup;\r
324                 }\r
325 \r
326 #if 0\r
327                 {\r
328                         int i;\r
329 \r
330                         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("port %d gid0:", port_num));\r
331                         for (i = 0; i < 16; i++)\r
332                                 CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, (" 0x%x", p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[i]));\r
333                         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("\n"));\r
334                 }\r
335 #endif\r
336         }\r
337 \r
338         // Convert query result into IBAL structure (no cl_memset())\r
339         if( p_umv_buf && p_umv_buf->command )\r
340         {\r
341                 // p_ca_attr->size = required_size - hca_ul_info->hca_ul_resources_sz;\r
342                 p_ca_attr->size = required_size;\r
343         }\r
344         else\r
345         {\r
346                 p_ca_attr->size = required_size;\r
347         }\r
348 \r
349         // !!! GID/PKEY tables must be queried before this call !!!\r
350         mlnx_conv_vapi_hca_cap(hca_ul_info, &hca_cap, hca_ports, p_ca_attr);\r
351 \r
352         // verify: required space == used space\r
353         CL_ASSERT( required_size == (((uintn_t)last_p) - ((uintn_t)p_ca_attr)) );\r
354 \r
355 #if 0\r
356         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("Space required %d used %d\n",\r
357                 required_size,\r
358                 ((uintn_t)last_p) - ((uintn_t)p_ca_attr))));\r
359 #endif\r
360 \r
361         if( p_umv_buf && p_umv_buf->command ) p_umv_buf->status = IB_SUCCESS;\r
362         if (hca_ul_resources_p) cl_free (hca_ul_resources_p);\r
363         if (hca_ports) cl_free( hca_ports );\r
364         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
365         return IB_SUCCESS;\r
366 \r
367 cleanup:\r
368         if( p_umv_buf && p_umv_buf->command ) p_umv_buf->status = status;\r
369         if (hca_ul_resources_p) cl_free (hca_ul_resources_p);\r
370         if (hca_ports) cl_free( hca_ports);\r
371         if( p_ca_attr != NULL || status != IB_INSUFFICIENT_MEMORY )\r
372                 CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status));\r
373         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
374         return status;\r
375 }\r
376 \r
377 ib_api_status_t\r
378 mlnx_modify_ca (\r
379         IN              const   ib_ca_handle_t                          h_ca,\r
380         IN              const   uint8_t                                         port_num,\r
381         IN              const   ib_ca_mod_t                                     modca_cmd,\r
382         IN              const   ib_port_attr_mod_t                      *p_port_attr)\r
383 {\r
384         ib_api_status_t                 status;\r
385 \r
386         mlnx_hob_t                              *hob_p = (mlnx_hob_t *)h_ca;\r
387         HH_hca_hndl_t                   hh_hndl = NULL;\r
388 \r
389         VAPI_hca_attr_t                 hca_attr;\r
390         VAPI_hca_attr_mask_t    hca_attr_mask = 0;\r
391 \r
392         CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
393 \r
394         mlnx_hobs_get_handle(hob_p, &hh_hndl);\r
395         if (NULL == hh_hndl) {\r
396                 status = IB_INVALID_CA_HANDLE;\r
397                 goto cleanup;\r
398         }\r
399 \r
400         cl_memclr(&hca_attr, sizeof(hca_attr));\r
401         if (modca_cmd & IB_CA_MOD_IS_SM) {\r
402                 hca_attr_mask |= HCA_ATTR_IS_SM;\r
403                 hca_attr.is_sm = (MT_bool)p_port_attr->cap.sm;\r
404         }\r
405         if (modca_cmd & IB_CA_MOD_IS_SNMP_SUPPORTED) {\r
406                 hca_attr_mask |= HCA_ATTR_IS_SNMP_TUN_SUP;\r
407                 hca_attr.is_snmp_tun_sup = (MT_bool)p_port_attr->cap.snmp;\r
408         }\r
409         if (modca_cmd & IB_CA_MOD_IS_DEV_MGMT_SUPPORTED) {\r
410                 hca_attr_mask |= HCA_ATTR_IS_DEV_MGT_SUP;\r
411                 hca_attr.is_dev_mgt_sup = (MT_bool)p_port_attr->cap.dev_mgmt;\r
412         }\r
413         if (modca_cmd & IB_CA_MOD_IS_VEND_SUPPORTED) {\r
414                 hca_attr_mask |= HCA_ATTR_IS_VENDOR_CLS_SUP;\r
415                 hca_attr.is_vendor_cls_sup = (MT_bool)p_port_attr->cap.vend;\r
416         }\r
417         if (modca_cmd & IB_CA_MOD_IS_CLIENT_REREGISTER_SUPPORTED) {\r
418                 hca_attr_mask |= HCA_ATTR_IS_CLIENT_REREGISTRATION_SUP;\r
419                 hca_attr.is_client_reregister_sup= (MT_bool)p_port_attr->cap.client_reregister;\r
420         }\r
421         if (modca_cmd & IB_CA_MOD_QKEY_CTR) {\r
422                 if (p_port_attr->qkey_ctr == 0)\r
423                         hca_attr.reset_qkey_counter = TRUE;\r
424         }\r
425 \r
426         if (0 != hca_attr_mask) {\r
427                 if (HH_OK != THH_hob_modify( hh_hndl, port_num, &hca_attr, &hca_attr_mask))\r
428                 {\r
429                         status = IB_ERROR;\r
430                         goto cleanup;\r
431                 }\r
432         }\r
433 \r
434         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
435         return IB_SUCCESS;\r
436 \r
437 cleanup:\r
438         CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status));\r
439         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
440         return status;\r
441 }\r
442 \r
443 ib_api_status_t\r
444 mlnx_close_ca (\r
445         IN                              ib_ca_handle_t                          h_ca)\r
446 {\r
447         ib_api_status_t status;\r
448 \r
449         HH_hca_hndl_t   hh_hndl = NULL;\r
450         mlnx_hob_t              *hob_p   = (mlnx_hob_t *)h_ca;\r
451         HH_hca_dev_t    *hca_ul_info;\r
452         void                    *hca_ul_resources_p = NULL;\r
453         mlnx_hobul_t    *hobul_p;\r
454 \r
455         CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
456 \r
457         hobul_p = mlnx_hobul_array[hob_p->index];\r
458         if( !hobul_p ) {\r
459                 status = IB_INVALID_CA_HANDLE;\r
460                 goto cleanup;\r
461         }\r
462 \r
463         if( hobul_p->count ) {\r
464                 status = IB_RESOURCE_BUSY;\r
465                 goto cleanup;\r
466         }\r
467 \r
468         mlnx_hobs_get_handle(hob_p, &hh_hndl);\r
469         if (NULL == hh_hndl) {\r
470                 status = IB_INVALID_CA_HANDLE;\r
471                 goto cleanup;\r
472         }\r
473 \r
474         hca_ul_info = (HH_hca_dev_t *)hh_hndl;\r
475         mlnx_hobul_get(hob_p, &hca_ul_resources_p);\r
476 \r
477         if (hca_ul_resources_p) {\r
478                 THH_hob_free_ul_res(hh_hndl, hca_ul_resources_p);\r
479                 cl_free( hca_ul_resources_p);\r
480         }\r
481         mlnx_hobul_delete(hob_p);\r
482         THH_hob_close_hca(hh_hndl);\r
483         mlnx_hobs_remove(hob_p);\r
484 \r
485         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
486         return IB_SUCCESS;\r
487 \r
488 cleanup:\r
489         CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status));\r
490         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
491         return status;\r
492 }\r
493 \r
494 \r
495 static ib_api_status_t\r
496 mlnx_um_open(\r
497         IN              const   ib_ca_handle_t                          h_ca,\r
498         IN      OUT                     ci_umv_buf_t* const                     p_umv_buf,\r
499                 OUT                     ib_ca_handle_t* const           ph_um_ca )\r
500 {\r
501         ib_api_status_t         status;\r
502 \r
503         mlnx_hob_t                              *hob_p = (mlnx_hob_t *)h_ca;\r
504         HH_hca_hndl_t                   hh_hndl = NULL;\r
505         HH_hca_dev_t                    *hca_ul_info;\r
506         mlnx_um_ca_t                    *p_um_ca;\r
507         MOSAL_protection_ctx_t  prot_ctx;\r
508 \r
509         HCA_ENTER( MLNX_DBG_TRACE );\r
510 \r
511         mlnx_hobs_get_handle( hob_p, &hh_hndl );\r
512         if( !hh_hndl )\r
513         {\r
514                 HCA_TRACE(MLNX_DBG_INFO, ("returning E_NODEV dev\n"));\r
515                 status = IB_INVALID_CA_HANDLE;\r
516                 goto mlnx_um_open_err1;\r
517         }\r
518 \r
519         hca_ul_info = (HH_hca_dev_t *)hh_hndl;\r
520 \r
521         if( !p_umv_buf->command )\r
522         {\r
523                 p_um_ca = (mlnx_um_ca_t*)cl_zalloc( sizeof(mlnx_um_ca_t) );\r
524                 if( !p_um_ca )\r
525                 {\r
526                         p_umv_buf->status = IB_INSUFFICIENT_MEMORY;\r
527                         goto mlnx_um_open_err1;\r
528                 }\r
529                 /* Copy the dev info. */\r
530                 p_um_ca->dev_info = *hca_ul_info;\r
531                 p_um_ca->hob_p = hob_p;\r
532                 *ph_um_ca = (ib_ca_handle_t)p_um_ca;\r
533                 p_umv_buf->status = IB_SUCCESS;\r
534                 p_umv_buf->output_size = 0;\r
535                 HCA_EXIT( MLNX_DBG_TRACE );\r
536                 return IB_SUCCESS;\r
537         }\r
538 \r
539         /*\r
540          * Prepare the buffer with the size including hca_ul_resources_sz\r
541          * NO ALIGNMENT for this size \r
542          */\r
543         if( !p_umv_buf->p_inout_buf ||\r
544                 p_umv_buf->output_size < sizeof(void*) )\r
545         {\r
546                 p_umv_buf->status = IB_INVALID_PARAMETER;\r
547                 goto mlnx_um_open_err1;\r
548         }\r
549 \r
550         HCA_TRACE( MLNX_DBG_TRACE, ("priv_op = %d\n", p_umv_buf->command ));\r
551 \r
552         /* Yes, UVP request for hca_ul_info. */\r
553         p_um_ca = (mlnx_um_ca_t*)cl_zalloc(\r
554                 sizeof(mlnx_um_ca_t) + hca_ul_info->hca_ul_resources_sz - 1 );\r
555         if( !p_um_ca )\r
556         {\r
557                 p_umv_buf->status = IB_INSUFFICIENT_MEMORY;\r
558                 goto mlnx_um_open_err1;\r
559         }\r
560 \r
561         p_um_ca->p_mdl = IoAllocateMdl( &p_um_ca->dev_info,\r
562                 (ULONG)(sizeof(HH_hca_dev_t) + hca_ul_info->hca_ul_resources_sz),\r
563                 FALSE, TRUE, NULL );\r
564         if( !p_um_ca->p_mdl )\r
565         {\r
566                 p_umv_buf->status = IB_ERROR;\r
567                 goto mlnx_um_open_err2;\r
568         }\r
569         /* Build the page list... */\r
570         MmBuildMdlForNonPagedPool( p_um_ca->p_mdl );\r
571 \r
572         /* Map the memory into the calling process's address space. */\r
573         __try\r
574         {\r
575                 p_um_ca->p_mapped_addr =\r
576                         MmMapLockedPagesSpecifyCache( p_um_ca->p_mdl,\r
577                         UserMode, MmCached, NULL, FALSE, NormalPagePriority );\r
578         }\r
579         __except(EXCEPTION_EXECUTE_HANDLER)\r
580         {\r
581                 p_umv_buf->status = IB_ERROR;\r
582                 goto mlnx_um_open_err3;\r
583         }\r
584 \r
585         /* Register with THH (attach to the HCA). */\r
586         prot_ctx = MOSAL_get_current_prot_ctx();\r
587         if( THH_hob_alloc_ul_res(hh_hndl, prot_ctx, p_um_ca->ul_hca_res) != HH_OK )\r
588         {\r
589                 HCA_TRACE( CL_DBG_ERROR, ("Failed to get ul_res\n"));\r
590                 p_umv_buf->status = IB_ERROR;\r
591         }\r
592 \r
593         if( p_umv_buf->status == IB_SUCCESS )\r
594         {\r
595                 /* Copy the dev info. */\r
596                 p_um_ca->dev_info = *hca_ul_info;\r
597                 p_um_ca->hob_p = hob_p;\r
598                 *ph_um_ca = (ib_ca_handle_t)p_um_ca;\r
599                 (*(void** __ptr64)p_umv_buf->p_inout_buf) = p_um_ca->p_mapped_addr;\r
600                 p_umv_buf->status = IB_SUCCESS;\r
601         }\r
602         else\r
603         {\r
604                 MmUnmapLockedPages( p_um_ca->p_mapped_addr, p_um_ca->p_mdl );\r
605 mlnx_um_open_err3:\r
606                 IoFreeMdl( p_um_ca->p_mdl );\r
607 mlnx_um_open_err2:\r
608                 cl_free( p_um_ca );\r
609 mlnx_um_open_err1:\r
610                 *ph_um_ca = NULL;\r
611         }\r
612 \r
613         //*ph_um_ca = NULL;\r
614         p_umv_buf->output_size = sizeof(void*);\r
615         HCA_EXIT( MLNX_DBG_TRACE );\r
616         return p_umv_buf->status;\r
617 }\r
618 \r
619 \r
620 static void\r
621 mlnx_um_close(\r
622         IN                              ib_ca_handle_t                          h_ca,\r
623         IN                              ib_ca_handle_t                          h_um_ca )\r
624 {\r
625         mlnx_hob_t                      *hob_p = (mlnx_hob_t *)h_ca;\r
626         HH_hca_hndl_t           hh_hndl = NULL;\r
627         mlnx_um_ca_t            *p_um_ca = (mlnx_um_ca_t*)h_um_ca;\r
628 \r
629         HCA_ENTER( MLNX_DBG_TRACE );\r
630 \r
631         mlnx_hobs_get_handle( hob_p, &hh_hndl );\r
632         if( !hh_hndl )\r
633                 goto mlnx_um_close_cleanup;\r
634 \r
635         if( !p_um_ca )\r
636                 return;\r
637 \r
638         if( !p_um_ca->p_mapped_addr )\r
639                 goto done;\r
640 \r
641         THH_hob_free_ul_res( hh_hndl, p_um_ca->ul_hca_res );\r
642 \r
643 mlnx_um_close_cleanup:\r
644         MmUnmapLockedPages( p_um_ca->p_mapped_addr, p_um_ca->p_mdl );\r
645         IoFreeMdl( p_um_ca->p_mdl );\r
646 done:\r
647         cl_free( p_um_ca );\r
648 \r
649         HCA_EXIT( MLNX_DBG_TRACE );\r
650 }\r
651 \r
652 \r
653 /*\r
654 *    Protection Domain and Reliable Datagram Domain Verbs\r
655 */\r
656 \r
657 ib_api_status_t\r
658 mlnx_allocate_pd (\r
659         IN              const   ib_ca_handle_t                          h_ca,\r
660         IN              const   ib_pd_type_t                            type,\r
661                 OUT                     ib_pd_handle_t                          *ph_pd,\r
662         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
663 {\r
664         mlnx_hob_t                              *hob_p;\r
665         mlnx_hobul_t                    *hobul_p;\r
666         HH_hca_dev_t                    *hca_ul_info;\r
667         HHUL_pd_hndl_t                  hhul_pd_hndl = 0;\r
668         void                                    *pd_ul_resources_p = NULL;\r
669         u_int32_t                               pd_idx;\r
670         ib_api_status_t                 status;\r
671         MOSAL_protection_ctx_t  prot_ctx;\r
672 \r
673         CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
674 \r
675         if( p_umv_buf )\r
676                 hob_p = ((mlnx_um_ca_t *)h_ca)->hob_p;\r
677         else\r
678                 hob_p = (mlnx_hob_t *)h_ca;\r
679         \r
680         hobul_p = mlnx_hobs_get_hobul(hob_p);\r
681         if (NULL == hobul_p) {\r
682                 status = IB_INVALID_CA_HANDLE;\r
683                 goto cleanup;\r
684         }\r
685 \r
686         hca_ul_info = (HH_hca_dev_t *)hobul_p->hh_hndl;\r
687         if (NULL == hca_ul_info) {\r
688                 status = IB_INVALID_CA_HANDLE;\r
689                 goto cleanup;\r
690         }\r
691 \r
692         if( p_umv_buf && p_umv_buf->command )\r
693         {\r
694                 // For user mode calls - obtain and verify the vendor information\r
695                 if ((p_umv_buf->input_size - sizeof (u_int32_t))  != \r
696                         hca_ul_info->pd_ul_resources_sz ||\r
697                         NULL == p_umv_buf->p_inout_buf) {\r
698                                 status = IB_INVALID_PARAMETER;\r
699                                 goto cleanup;\r
700                         }\r
701                         pd_ul_resources_p = (void *)p_umv_buf->p_inout_buf;\r
702 \r
703                         /* get the current protection context */ \r
704                         prot_ctx = MOSAL_get_current_prot_ctx();\r
705         }\r
706         else\r
707         {\r
708                 // for kernel mode calls - allocate app resources. Use prep->call->done sequence\r
709                 pd_ul_resources_p = cl_zalloc( hca_ul_info->pd_ul_resources_sz);\r
710                 if (NULL == pd_ul_resources_p) {\r
711                         status = IB_INSUFFICIENT_MEMORY;\r
712                         goto cleanup;\r
713                 }\r
714 \r
715                 switch( type )\r
716                 {\r
717                 case IB_PDT_SQP:\r
718                         if (HH_OK != THHUL_pdm_alloc_pd_avs_prep(hobul_p->hhul_hndl,\r
719                                 g_sqp_max_avs, PD_FOR_SQP, &hhul_pd_hndl, pd_ul_resources_p))\r
720                         {\r
721                                 status = IB_ERROR;\r
722                                 goto cleanup;\r
723                         }\r
724                         break;\r
725 \r
726                 case IB_PDT_UD:\r
727                         if (HH_OK != THHUL_pdm_alloc_pd_avs_prep(hobul_p->hhul_hndl,\r
728                                 g_sqp_max_avs, PD_NO_FLAGS, &hhul_pd_hndl, pd_ul_resources_p))\r
729                         {\r
730                                 status = IB_ERROR;\r
731                                 goto cleanup;\r
732                         }\r
733                         break;\r
734 \r
735                 default:\r
736                         if (HH_OK != THHUL_pdm_alloc_pd_prep(hobul_p->hhul_hndl, &hhul_pd_hndl, pd_ul_resources_p)) {\r
737                                 status = IB_ERROR;\r
738                                 goto cleanup;\r
739                         }\r
740                 }\r
741                 /* get the current protection context */ \r
742                 prot_ctx = MOSAL_get_kernel_prot_ctx();\r
743         }\r
744 \r
745         // Allocate the PD (cmdif)\r
746         if (HH_OK != THH_hob_alloc_pd(hobul_p->hh_hndl, prot_ctx, pd_ul_resources_p, &pd_idx)) {\r
747                 status = IB_INSUFFICIENT_RESOURCES;\r
748                 goto cleanup_pd;\r
749         }\r
750 \r
751         if( !(p_umv_buf && p_umv_buf->command) )\r
752         {\r
753                 // Manage user level resources\r
754                 if (HH_OK != THHUL_pdm_alloc_pd_done(hobul_p->hhul_hndl, hhul_pd_hndl, pd_idx, pd_ul_resources_p)) {\r
755                         THH_hob_free_pd(hobul_p->hh_hndl, pd_idx);\r
756                         status = IB_ERROR;\r
757                         goto cleanup_pd;\r
758                 }\r
759         }\r
760 \r
761         VALIDATE_INDEX(pd_idx, hobul_p->max_pd, IB_ERROR, cleanup_pd);\r
762 \r
763         // Save data refs for future use\r
764         cl_mutex_acquire(&hobul_p->pd_info_tbl[pd_idx].mutex);\r
765         hobul_p->pd_info_tbl[pd_idx].pd_num = pd_idx;\r
766         hobul_p->pd_info_tbl[pd_idx].hca_idx = hob_p->index;\r
767         hobul_p->pd_info_tbl[pd_idx].hhul_pd_hndl = hhul_pd_hndl;\r
768         hobul_p->pd_info_tbl[pd_idx].pd_ul_resources_p = pd_ul_resources_p;\r
769         hobul_p->pd_info_tbl[pd_idx].count = 0;\r
770         hobul_p->pd_info_tbl[pd_idx].kernel_mode = !(p_umv_buf && p_umv_buf->command);\r
771         hobul_p->pd_info_tbl[pd_idx].mark = E_MARK_PD;\r
772         cl_mutex_release(&hobul_p->pd_info_tbl[pd_idx].mutex);\r
773 \r
774         cl_atomic_inc( &hobul_p->count );\r
775 \r
776         if (ph_pd) *ph_pd = (ib_pd_handle_t)PD_HNDL_FROM_PD(pd_idx);\r
777         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("hca_idx 0x%x pd_idx 0x%x returned 0x%p\n", hob_p->index, pd_idx, *ph_pd));\r
778 \r
779         if( p_umv_buf && p_umv_buf->command )\r
780         {\r
781                 p_umv_buf->output_size = p_umv_buf->input_size;\r
782                 /* \r
783                 * Copy the pd_idx back to user\r
784                 */\r
785                 cl_memcpy (((uint8_t* __ptr64)p_umv_buf->p_inout_buf + hca_ul_info->pd_ul_resources_sz),\r
786                         &pd_idx, sizeof (pd_idx));\r
787                 p_umv_buf->status = IB_SUCCESS;\r
788         }\r
789         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
790         return IB_SUCCESS;\r
791 \r
792 cleanup_pd:\r
793         THHUL_pdm_free_pd_prep(hobul_p->hhul_hndl, hhul_pd_hndl, FALSE);\r
794         THHUL_pdm_free_pd_done(hobul_p->hhul_hndl, hhul_pd_hndl);\r
795 \r
796 cleanup:\r
797         if( !(p_umv_buf && p_umv_buf->command) && pd_ul_resources_p )\r
798                 cl_free( pd_ul_resources_p);\r
799         if( p_umv_buf && p_umv_buf->command )\r
800         {\r
801                 p_umv_buf->output_size = 0;\r
802                 p_umv_buf->status = status;\r
803         }\r
804         CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status));\r
805         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
806         return status;\r
807 }\r
808 \r
809 ib_api_status_t\r
810 mlnx_deallocate_pd (\r
811         IN                              ib_pd_handle_t                          h_pd)\r
812 {\r
813         u_int32_t                       hca_idx = PD_HCA_FROM_HNDL(h_pd);\r
814         u_int32_t                       pd_idx  = PD_NUM_FROM_HNDL(h_pd);\r
815         mlnx_hobul_t            *hobul_p;\r
816         HHUL_pd_hndl_t          hhul_pd_hndl;\r
817         ib_api_status_t         status;\r
818 \r
819         CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
820 \r
821         VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_CA_HANDLE, cleanup);\r
822         hobul_p = mlnx_hobul_array[hca_idx];\r
823         if (NULL == hobul_p) {\r
824                 status =  IB_INVALID_PD_HANDLE;\r
825                 goto cleanup;\r
826         }\r
827         VALIDATE_INDEX(pd_idx, hobul_p->max_pd, IB_INVALID_PD_HANDLE, cleanup);\r
828         if (E_MARK_PD != hobul_p->pd_info_tbl[pd_idx].mark) {\r
829                 status =  IB_INVALID_PD_HANDLE;\r
830                 goto cleanup;\r
831         }\r
832 \r
833         cl_mutex_acquire(&hobul_p->pd_info_tbl[pd_idx].mutex);\r
834 \r
835         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("pd %d count %d k_mod %d\n", pd_idx, hobul_p->pd_info_tbl[pd_idx].count, hobul_p->pd_info_tbl[pd_idx].kernel_mode));\r
836 \r
837         if (0 != hobul_p->pd_info_tbl[pd_idx].count) {\r
838                 CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("pd %d count %d\n", pd_idx, hobul_p->pd_info_tbl[pd_idx].count));\r
839                 status = IB_RESOURCE_BUSY;\r
840                 goto cleanup_locked;\r
841         }\r
842 \r
843         hhul_pd_hndl = hobul_p->pd_info_tbl[pd_idx].hhul_pd_hndl;\r
844 \r
845         // PREP:\r
846         if (hobul_p->pd_info_tbl[pd_idx].kernel_mode) {\r
847                 if (HH_OK != THHUL_pdm_free_pd_prep(hobul_p->hhul_hndl, hhul_pd_hndl, FALSE)) {\r
848                         status = IB_ERROR;\r
849                         goto cleanup_locked;\r
850                 }\r
851         }\r
852 \r
853         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("pd %d before free_pd hh_hndl %p\n", \r
854                 pd_idx, hobul_p->hh_hndl));\r
855 \r
856         if (HH_OK != THH_hob_free_pd(hobul_p->hh_hndl, pd_idx)) {\r
857                 status = IB_ERROR;\r
858                 goto cleanup_locked;\r
859         }\r
860 \r
861         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("pd %d after free_pd\n", pd_idx));\r
862 \r
863         if (hobul_p->pd_info_tbl[pd_idx].kernel_mode) {\r
864                 if (HH_OK != THHUL_pdm_free_pd_done(hobul_p->hhul_hndl, hhul_pd_hndl)) {\r
865                         status = IB_ERROR;\r
866                         goto cleanup_locked;\r
867                 }\r
868                 if (hobul_p->pd_info_tbl[pd_idx].pd_ul_resources_p)\r
869                         cl_free( hobul_p->pd_info_tbl[pd_idx].pd_ul_resources_p);\r
870         }\r
871 \r
872         hobul_p->pd_info_tbl[pd_idx].mark = E_MARK_INVALID;\r
873         hobul_p->pd_info_tbl[pd_idx].pd_ul_resources_p = NULL;\r
874 \r
875         cl_mutex_release(&hobul_p->pd_info_tbl[pd_idx].mutex);\r
876 \r
877         cl_atomic_dec( &hobul_p->count );\r
878         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
879         return IB_SUCCESS;\r
880 \r
881 cleanup_locked:\r
882         cl_mutex_release(&hobul_p->pd_info_tbl[pd_idx].mutex);\r
883 \r
884 cleanup:\r
885         CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status));\r
886         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
887         return status;\r
888 }\r
889 \r
890 /* \r
891 * Address Vector Management Verbs\r
892 */\r
893 ib_api_status_t\r
894 mlnx_create_av (\r
895         IN              const   ib_pd_handle_t                          h_pd,\r
896         IN              const   ib_av_attr_t                            *p_addr_vector,\r
897                 OUT                     ib_av_handle_t                          *ph_av,\r
898         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
899 {\r
900         u_int32_t                       hca_idx = PD_HCA_FROM_HNDL(h_pd);\r
901         u_int32_t                       pd_idx  = PD_NUM_FROM_HNDL(h_pd);\r
902         HHUL_ud_av_hndl_t       av_h;\r
903         mlnx_hobul_t            *hobul_p;\r
904         mlnx_avo_t                      *avo_p = NULL;\r
905         HHUL_pd_hndl_t          hhul_pd_hndl;\r
906         ib_api_status_t         status;\r
907 \r
908         VAPI_ud_av_t            av;\r
909 \r
910         CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
911 \r
912         VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_CA_HANDLE, cleanup);\r
913         hobul_p = mlnx_hobul_array[hca_idx];\r
914         if (NULL == hobul_p) {\r
915                 status =  IB_INVALID_PD_HANDLE;\r
916                 goto cleanup;\r
917         }\r
918         VALIDATE_INDEX(pd_idx, hobul_p->max_pd, IB_INVALID_PD_HANDLE, cleanup);\r
919         if (E_MARK_PD != hobul_p->pd_info_tbl[pd_idx].mark) {\r
920                 status =  IB_INVALID_PD_HANDLE;\r
921                 goto cleanup;\r
922         }\r
923         hhul_pd_hndl = hobul_p->pd_info_tbl[pd_idx].hhul_pd_hndl;\r
924 \r
925         if (NULL == (avo_p = cl_zalloc( sizeof(mlnx_avo_t)))) {\r
926                 status = IB_INSUFFICIENT_MEMORY;\r
927                 goto cleanup;\r
928         }\r
929 \r
930         cl_memclr(&av, sizeof(av));\r
931         mlnx_conv_ibal_av(hobul_p->hh_hndl, p_addr_vector, &av);\r
932         // This creates a non priviledged ud_av.\r
933         // To create a privilged ud_av call THH_hob_create_ud_av()\r
934         if (HH_OK != THHUL_pdm_create_ud_av(hobul_p->hhul_hndl, hhul_pd_hndl, &av, &av_h)) {\r
935                 status = IB_INSUFFICIENT_RESOURCES;\r
936                 goto cleanup;\r
937         }\r
938 \r
939         // update PD object count\r
940         cl_atomic_inc(&hobul_p->pd_info_tbl[pd_idx].count);\r
941         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("pd %d count %d\n", pd_idx, hobul_p->pd_info_tbl[pd_idx].count));\r
942 \r
943 \r
944         avo_p->mark    = E_MARK_AV;\r
945         avo_p->hca_idx = hca_idx;\r
946         avo_p->pd_idx  = pd_idx;\r
947         avo_p->h_av    = av_h;\r
948 \r
949         if (ph_av) *ph_av = (ib_av_handle_t)avo_p;\r
950 \r
951         if( p_umv_buf && p_umv_buf->command )\r
952         {\r
953                 p_umv_buf->status = IB_SUCCESS;\r
954         }\r
955         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
956         return IB_SUCCESS;\r
957 \r
958 cleanup:\r
959         if (avo_p) {\r
960                 avo_p->mark = E_MARK_INVALID;\r
961                 cl_free( avo_p);\r
962         }\r
963         if( p_umv_buf && p_umv_buf->command )\r
964         {\r
965                 p_umv_buf->output_size = 0;\r
966                 p_umv_buf->status = status;\r
967         }\r
968 \r
969         CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status));\r
970         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
971         return status;\r
972 }\r
973 \r
974 ib_api_status_t\r
975 mlnx_query_av (\r
976         IN              const   ib_av_handle_t                          h_av,\r
977                 OUT                     ib_av_attr_t                            *p_addr_vector,\r
978                 OUT                     ib_pd_handle_t                          *ph_pd,\r
979         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
980 {\r
981         mlnx_avo_t                      *avo_p = (mlnx_avo_t *)h_av;\r
982         mlnx_hobul_t            *hobul_p;\r
983         ib_api_status_t         status;\r
984 \r
985         VAPI_ud_av_t            av;\r
986 \r
987         CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
988         if (!avo_p || avo_p->mark != E_MARK_AV) {\r
989                 status = IB_INVALID_AV_HANDLE;\r
990                 goto cleanup;\r
991         }\r
992 \r
993         VALIDATE_INDEX(avo_p->hca_idx, MLNX_MAX_HCA, IB_INVALID_CA_HANDLE, cleanup);\r
994         hobul_p = mlnx_hobul_array[avo_p->hca_idx];\r
995         if (NULL == hobul_p) {\r
996                 status =  IB_INVALID_AV_HANDLE;\r
997                 goto cleanup;\r
998         }\r
999         VALIDATE_INDEX(avo_p->pd_idx, hobul_p->max_pd, IB_INVALID_PD_HANDLE, cleanup);\r
1000         if (E_MARK_PD != hobul_p->pd_info_tbl[avo_p->pd_idx].mark) {\r
1001                 status =  IB_INVALID_PD_HANDLE;\r
1002                 goto cleanup;\r
1003         }\r
1004 \r
1005         if (p_addr_vector) {\r
1006                 if (HH_OK != THHUL_pdm_query_ud_av(hobul_p->hhul_hndl, avo_p->h_av, &av)) {\r
1007                         status = IB_ERROR;\r
1008                         goto cleanup;\r
1009                 }\r
1010                 mlnx_conv_vapi_av(hobul_p->hh_hndl, &av, p_addr_vector);\r
1011         }\r
1012 \r
1013         if (ph_pd) *ph_pd = (ib_pd_handle_t)PD_HNDL_FROM_PD(avo_p->pd_idx);\r
1014 \r
1015         if( p_umv_buf && p_umv_buf->command )\r
1016         {\r
1017                 p_umv_buf->output_size = 0;\r
1018                 p_umv_buf->status = IB_SUCCESS;\r
1019         }\r
1020         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
1021         return IB_SUCCESS;\r
1022 \r
1023 cleanup:\r
1024         if( p_umv_buf && p_umv_buf->command )\r
1025         {\r
1026                 p_umv_buf->output_size = 0;\r
1027                 p_umv_buf->status = status;\r
1028         }\r
1029         CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status));\r
1030         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
1031         return status;\r
1032 }\r
1033 \r
1034 ib_api_status_t\r
1035 mlnx_modify_av (\r
1036         IN              const   ib_av_handle_t                          h_av,\r
1037         IN              const   ib_av_attr_t                            *p_addr_vector,\r
1038         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
1039 {\r
1040         mlnx_avo_t                      *avo_p = (mlnx_avo_t *)h_av;\r
1041         mlnx_hobul_t            *hobul_p;\r
1042         ib_api_status_t         status;\r
1043 \r
1044         VAPI_ud_av_t            av;\r
1045 \r
1046         CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
1047         if (!avo_p || avo_p->mark != E_MARK_AV) {\r
1048                 status = IB_INVALID_AV_HANDLE;\r
1049                 goto cleanup;\r
1050         }\r
1051 \r
1052         VALIDATE_INDEX(avo_p->hca_idx, MLNX_MAX_HCA, IB_INVALID_CA_HANDLE, cleanup);\r
1053         hobul_p = mlnx_hobul_array[avo_p->hca_idx];\r
1054         if (NULL == hobul_p) {\r
1055                 status =  IB_INVALID_AV_HANDLE;\r
1056                 goto cleanup;\r
1057         }\r
1058 \r
1059         cl_memclr(&av, sizeof(av));\r
1060         mlnx_conv_ibal_av(hobul_p->hh_hndl, p_addr_vector, &av);\r
1061         if (HH_OK != THHUL_pdm_modify_ud_av(hobul_p->hhul_hndl, avo_p->h_av, &av)) {\r
1062                 status = IB_ERROR;\r
1063                 goto cleanup;\r
1064         }\r
1065 \r
1066         if( p_umv_buf && p_umv_buf->command )\r
1067         {\r
1068                 p_umv_buf->output_size = 0;\r
1069                 p_umv_buf->status = IB_SUCCESS;\r
1070         }\r
1071         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
1072         return IB_SUCCESS;\r
1073 \r
1074 cleanup:\r
1075         if( p_umv_buf && p_umv_buf->command )\r
1076         {\r
1077                 p_umv_buf->output_size = 0;\r
1078                 p_umv_buf->status = status;\r
1079         }\r
1080         CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status));\r
1081         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
1082         return status;\r
1083 }\r
1084 \r
1085 ib_api_status_t\r
1086 mlnx_destroy_av (\r
1087         IN              const   ib_av_handle_t                          h_av)\r
1088 {\r
1089         mlnx_avo_t                      *avo_p = (mlnx_avo_t *)h_av;\r
1090         mlnx_hobul_t            *hobul_p;\r
1091         ib_api_status_t         status;\r
1092 \r
1093         CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
1094         if (!avo_p || avo_p->mark != E_MARK_AV) {\r
1095                 status = IB_INVALID_AV_HANDLE;\r
1096                 goto cleanup;\r
1097         }\r
1098 \r
1099         VALIDATE_INDEX(avo_p->hca_idx, MLNX_MAX_HCA, IB_INVALID_CA_HANDLE, cleanup);\r
1100         hobul_p = mlnx_hobul_array[avo_p->hca_idx];\r
1101         if (NULL == hobul_p) {\r
1102                 status =  IB_INVALID_AV_HANDLE;\r
1103                 goto cleanup;\r
1104         }\r
1105         VALIDATE_INDEX(avo_p->pd_idx, hobul_p->max_pd, IB_INVALID_PD_HANDLE, cleanup);\r
1106         if (E_MARK_PD != hobul_p->pd_info_tbl[avo_p->pd_idx].mark) {\r
1107                 status =  IB_INVALID_PD_HANDLE;\r
1108                 goto cleanup;\r
1109         }\r
1110 \r
1111         // This destroy's a non priviledged ud_av.\r
1112         // To destroy a privilged ud_av call THH_hob_destroy_ud_av()\r
1113         if (HH_OK != THHUL_pdm_destroy_ud_av(hobul_p->hhul_hndl, avo_p->h_av)) {\r
1114                 status = IB_ERROR;\r
1115                 goto cleanup;\r
1116         }\r
1117 \r
1118         // update PD object count\r
1119         cl_atomic_dec(&hobul_p->pd_info_tbl[avo_p->pd_idx].count);\r
1120         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("pd %d count %d\n", avo_p->pd_idx, hobul_p->pd_info_tbl[avo_p->pd_idx].count));\r
1121 \r
1122         avo_p->mark = E_MARK_INVALID;\r
1123         cl_free( avo_p);\r
1124         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
1125         return IB_SUCCESS;\r
1126 \r
1127 cleanup:\r
1128         if (avo_p) {\r
1129                 avo_p->mark = E_MARK_INVALID;\r
1130                 cl_free( avo_p);\r
1131         }\r
1132         CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status));\r
1133         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
1134         return status;\r
1135 }\r
1136 \r
1137 /*\r
1138 *       Queue Pair Management Verbs\r
1139 */\r
1140 \r
1141 ib_api_status_t\r
1142 mlnx_create_qp (\r
1143         IN              const   ib_pd_handle_t                          h_pd,\r
1144         IN              const   void                                            *qp_context,\r
1145         IN              const   ib_qp_create_t                          *p_create_attr,\r
1146                 OUT                     ib_qp_attr_t                            *p_qp_attr,\r
1147                 OUT                     ib_qp_handle_t                          *ph_qp,\r
1148         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
1149 {\r
1150         ib_api_status_t                 status;\r
1151         ib_qp_handle_t                  h_qp;\r
1152 \r
1153         u_int32_t                               hca_idx = PD_HCA_FROM_HNDL(h_pd);\r
1154         u_int32_t                               pd_idx  = PD_NUM_FROM_HNDL(h_pd);\r
1155         u_int32_t                               qp_num;\r
1156         u_int32_t                               qp_idx;\r
1157         u_int32_t                               send_cq_num;\r
1158         u_int32_t                               send_cq_idx;\r
1159         u_int32_t                               recv_cq_num;\r
1160         u_int32_t                               recv_cq_idx;\r
1161         mlnx_hobul_t                    *hobul_p;\r
1162         HH_hca_dev_t                    *hca_ul_info;\r
1163         HH_qp_init_attr_t               hh_qp_init_attr;\r
1164         HHUL_qp_init_attr_t             ul_qp_init_attr;\r
1165         HHUL_qp_hndl_t                  hhul_qp_hndl = NULL;\r
1166         VAPI_qp_cap_t                   hh_qp_cap;\r
1167         void                                    *qp_ul_resources_p = NULL;\r
1168         VAPI_sg_lst_entry_t             *send_sge_p = NULL;\r
1169         VAPI_sg_lst_entry_t             *recv_sge_p = NULL;\r
1170         u_int32_t                               num_sge;\r
1171 \r
1172         CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
1173 \r
1174         VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_CA_HANDLE, cleanup);\r
1175         hobul_p = mlnx_hobul_array[hca_idx];\r
1176         if (NULL == hobul_p) {\r
1177                 status = IB_INVALID_PD_HANDLE;\r
1178                 goto cleanup;\r
1179         }\r
1180         VALIDATE_INDEX(pd_idx, hobul_p->max_pd, IB_INVALID_PD_HANDLE, cleanup);\r
1181         if (E_MARK_PD != hobul_p->pd_info_tbl[pd_idx].mark) {\r
1182                 status =  IB_INVALID_PD_HANDLE;\r
1183                 goto cleanup;\r
1184         }\r
1185 \r
1186         hca_ul_info = (HH_hca_dev_t *)hobul_p->hh_hndl;\r
1187         if (NULL == hca_ul_info) {\r
1188                 status =  IB_INVALID_PD_HANDLE;\r
1189                 goto cleanup;\r
1190         }\r
1191 \r
1192         // The create attributes must be provided\r
1193         if (!p_create_attr) {\r
1194                 status =  IB_INVALID_PARAMETER;\r
1195                 goto cleanup;\r
1196         }\r
1197 \r
1198         // convert input parameters\r
1199         cl_memclr(&ul_qp_init_attr, sizeof(ul_qp_init_attr));\r
1200         mlnx_conv_qp_create_attr(p_create_attr, &ul_qp_init_attr, NULL);\r
1201         send_cq_num = CQ_NUM_FROM_HNDL(p_create_attr->h_sq_cq);\r
1202         recv_cq_num = CQ_NUM_FROM_HNDL(p_create_attr->h_rq_cq);\r
1203         send_cq_idx = send_cq_num & hobul_p->cq_idx_mask;\r
1204         recv_cq_idx = recv_cq_num & hobul_p->cq_idx_mask;\r
1205         VALIDATE_INDEX(send_cq_idx, hobul_p->max_cq, IB_INVALID_CQ_HANDLE, cleanup);\r
1206         if ( E_MARK_CQ != hobul_p->cq_info_tbl[send_cq_idx].mark) {\r
1207                 status =  IB_INVALID_CQ_HANDLE;\r
1208                 goto cleanup;\r
1209         }\r
1210         VALIDATE_INDEX(recv_cq_idx, hobul_p->max_cq, IB_INVALID_CQ_HANDLE, cleanup);\r
1211         if ( E_MARK_CQ != hobul_p->cq_info_tbl[recv_cq_idx].mark) {\r
1212                 status =  IB_INVALID_CQ_HANDLE;\r
1213                 goto cleanup;\r
1214         }\r
1215 \r
1216         ul_qp_init_attr.pd    = hobul_p->pd_info_tbl[pd_idx].hhul_pd_hndl;\r
1217         ul_qp_init_attr.sq_cq = hobul_p->cq_info_tbl[send_cq_idx].hhul_cq_hndl;\r
1218         ul_qp_init_attr.rq_cq = hobul_p->cq_info_tbl[recv_cq_idx].hhul_cq_hndl;\r
1219 \r
1220         if( p_umv_buf && p_umv_buf->command )\r
1221         {\r
1222                 // For user mode calls - obtain and verify the vendor information\r
1223                 if ((p_umv_buf->input_size - sizeof (u_int32_t)) != \r
1224                         hca_ul_info->qp_ul_resources_sz ||\r
1225                         NULL == p_umv_buf->p_inout_buf) {\r
1226                                 status = IB_INVALID_PARAMETER;\r
1227                                 goto cleanup;\r
1228                         }\r
1229                         qp_ul_resources_p = (void *)p_umv_buf->p_inout_buf;\r
1230 \r
1231         } else {\r
1232                 // for kernel mode calls - allocate app resources. Use prep->call->done sequence\r
1233                 qp_ul_resources_p = cl_zalloc( hca_ul_info->qp_ul_resources_sz);\r
1234                 if (!qp_ul_resources_p) {\r
1235                         status = IB_INSUFFICIENT_MEMORY;\r
1236                         goto cleanup;\r
1237                 }\r
1238 \r
1239                 if (HH_OK != THHUL_qpm_create_qp_prep(hobul_p->hhul_hndl, &ul_qp_init_attr, &hhul_qp_hndl, &hh_qp_cap, qp_ul_resources_p)) {\r
1240                         status = IB_ERROR;\r
1241                         goto cleanup;\r
1242                 }\r
1243                 // TBD: if not same report error to IBAL\r
1244                 ul_qp_init_attr.qp_cap = hh_qp_cap;  // struct assign\r
1245         }\r
1246 \r
1247         // Convert HHUL to HH structure (for HH create_qp)\r
1248         hh_qp_init_attr.pd = pd_idx;\r
1249         hh_qp_init_attr.rdd = 0; // TBD: RDD\r
1250         if( ul_qp_init_attr.srq != HHUL_INVAL_SRQ_HNDL )\r
1251         {\r
1252                 // TBD: HH handle from HHUL handle.\r
1253                 CL_ASSERT( ul_qp_init_attr.srq == HHUL_INVAL_SRQ_HNDL );\r
1254         }\r
1255         else\r
1256         {\r
1257                 hh_qp_init_attr.srq = HH_INVAL_SRQ_HNDL;\r
1258         }\r
1259         hh_qp_init_attr.sq_cq = send_cq_num;\r
1260         hh_qp_init_attr.rq_cq = recv_cq_num;\r
1261         hh_qp_init_attr.sq_sig_type = ul_qp_init_attr.sq_sig_type;\r
1262         hh_qp_init_attr.rq_sig_type = ul_qp_init_attr.rq_sig_type;\r
1263         hh_qp_init_attr.ts_type = ul_qp_init_attr.ts_type;\r
1264         hh_qp_init_attr.qp_cap  = ul_qp_init_attr.qp_cap; // struct assign\r
1265 \r
1266         // Allocate the QP (cmdif)\r
1267         if (HH_OK != THH_hob_create_qp(hobul_p->hh_hndl, &hh_qp_init_attr, qp_ul_resources_p, &qp_num)) {\r
1268                 status = IB_INSUFFICIENT_RESOURCES;\r
1269                 goto cleanup_qp;\r
1270         }\r
1271 \r
1272         if( !(p_umv_buf && p_umv_buf->command) )\r
1273         {\r
1274                 // Manage user level resources\r
1275                 if (HH_OK != THHUL_qpm_create_qp_done(hobul_p->hhul_hndl, hhul_qp_hndl, qp_num, qp_ul_resources_p)) {\r
1276                         THH_hob_destroy_qp(hobul_p->hh_hndl, qp_num);\r
1277                         status = IB_ERROR;\r
1278                         goto cleanup_qp;\r
1279                 }\r
1280 \r
1281                 // Create SQ and RQ iov\r
1282                 num_sge = ul_qp_init_attr.qp_cap.max_sg_size_sq;\r
1283                 send_sge_p = cl_zalloc( num_sge * sizeof(VAPI_sg_lst_entry_t));\r
1284                 if (!send_sge_p) {\r
1285                         status = IB_INSUFFICIENT_MEMORY;\r
1286                         goto cleanup_qp;\r
1287                 }\r
1288 \r
1289                 num_sge = ul_qp_init_attr.qp_cap.max_sg_size_rq;\r
1290                 recv_sge_p = cl_zalloc( num_sge * sizeof(VAPI_sg_lst_entry_t));\r
1291                 if (!recv_sge_p) {\r
1292                         status = IB_INSUFFICIENT_MEMORY;\r
1293                         goto cleanup_qp;\r
1294                 }\r
1295         }\r
1296 \r
1297         // Save data refs for future use\r
1298         qp_idx = qp_num & hobul_p->qp_idx_mask;\r
1299         VALIDATE_INDEX(qp_idx, hobul_p->max_qp, IB_ERROR, cleanup_qp);\r
1300         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("hobul_p 0x%p mask 0x%x qp_idx 0x%x qp_num 0x%x\n",\r
1301                 hobul_p, hobul_p->qp_idx_mask, qp_idx, qp_num));\r
1302 \r
1303         h_qp = (ib_qp_handle_t)QP_HNDL_FROM_QP(qp_idx);\r
1304         cl_mutex_acquire(&h_qp->mutex);\r
1305         h_qp->pd_num                    = pd_idx;\r
1306         h_qp->hhul_qp_hndl              = hhul_qp_hndl;\r
1307         h_qp->qp_type                   = p_create_attr->qp_type;\r
1308         h_qp->sq_signaled               = p_create_attr->sq_signaled;\r
1309         h_qp->qp_context                = qp_context;\r
1310         h_qp->qp_ul_resources_p = qp_ul_resources_p;\r
1311         h_qp->sq_size                   = ul_qp_init_attr.qp_cap.max_sg_size_sq;\r
1312         h_qp->rq_size                   = ul_qp_init_attr.qp_cap.max_sg_size_rq;\r
1313         h_qp->send_sge_p                = send_sge_p;\r
1314         h_qp->recv_sge_p                = recv_sge_p;\r
1315         h_qp->qp_num                    = qp_num;\r
1316         h_qp->h_sq_cq                   = &hobul_p->cq_info_tbl[send_cq_idx];\r
1317         h_qp->h_rq_cq                   = &hobul_p->cq_info_tbl[recv_cq_idx];\r
1318         h_qp->kernel_mode               = !(p_umv_buf && p_umv_buf->command);\r
1319         h_qp->mark                              = E_MARK_QP;\r
1320         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("qp num 0x%x idx 0x%x cq_s 0x%x cq_r 0x%x\n",\r
1321                 qp_num, qp_idx, send_cq_idx, recv_cq_idx));\r
1322         cl_mutex_release(&h_qp->mutex);\r
1323         // Update PD object count\r
1324         cl_atomic_inc(&hobul_p->pd_info_tbl[pd_idx].count);\r
1325         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("pd %d count %d\n", pd_idx, hobul_p->pd_info_tbl[pd_idx].count));\r
1326 \r
1327         // Query QP to obtain requested attributes\r
1328         if (p_qp_attr) {\r
1329                 if (IB_SUCCESS != (status = mlnx_query_qp (h_qp, p_qp_attr, p_umv_buf)))\r
1330                 {\r
1331                         if( !(p_umv_buf && p_umv_buf->command) )\r
1332                                 goto cleanup_qp;\r
1333                         else\r
1334                                 goto cleanup;\r
1335                 }\r
1336         }\r
1337 \r
1338         if (ph_qp) *ph_qp = h_qp;\r
1339         if( p_umv_buf && p_umv_buf->command )\r
1340         {\r
1341                 p_umv_buf->output_size = p_umv_buf->input_size;\r
1342                 p_umv_buf->status = IB_SUCCESS;\r
1343                 /* \r
1344                 * Copy the qp_idx back to user\r
1345                 */\r
1346                 cl_memcpy (((uint8_t* __ptr64)p_umv_buf->p_inout_buf + hca_ul_info->qp_ul_resources_sz),\r
1347                         &qp_num, sizeof (qp_num));\r
1348         }\r
1349         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
1350         return IB_SUCCESS;\r
1351 \r
1352 cleanup_qp:\r
1353         if (send_sge_p) cl_free( send_sge_p);\r
1354         if (recv_sge_p) cl_free( recv_sge_p);\r
1355         if( !(p_umv_buf && p_umv_buf->command) )\r
1356                 THHUL_qpm_destroy_qp_done(hobul_p->hhul_hndl, hhul_qp_hndl);\r
1357 \r
1358 cleanup:\r
1359         if( !(p_umv_buf && p_umv_buf->command) && qp_ul_resources_p)\r
1360                 cl_free( qp_ul_resources_p);\r
1361         if( p_umv_buf && p_umv_buf->command )\r
1362         {\r
1363                 p_umv_buf->output_size = 0;\r
1364                 p_umv_buf->status = status;\r
1365         }\r
1366         CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status));\r
1367         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
1368         return status;\r
1369 }\r
1370 \r
1371 ib_api_status_t\r
1372 mlnx_create_spl_qp (\r
1373         IN              const   ib_pd_handle_t                          h_pd,\r
1374         IN              const   uint8_t                                         port_num,\r
1375         IN              const   void                                            *qp_context,\r
1376         IN              const   ib_qp_create_t                          *p_create_attr,\r
1377                 OUT                     ib_qp_attr_t                            *p_qp_attr,\r
1378                 OUT                     ib_qp_handle_t                          *ph_qp )\r
1379 {\r
1380         ib_api_status_t                 status;\r
1381         ib_qp_handle_t                  h_qp;\r
1382         ci_umv_buf_t                    *p_umv_buf = NULL;\r
1383 \r
1384         u_int32_t                               hca_idx = PD_HCA_FROM_HNDL(h_pd);\r
1385         u_int32_t                               pd_idx  = PD_NUM_FROM_HNDL(h_pd);\r
1386         u_int32_t                               qp_num;\r
1387         u_int32_t                               qp_idx;\r
1388         u_int32_t                               send_cq_num;\r
1389         u_int32_t                               send_cq_idx;\r
1390         u_int32_t                               recv_cq_num;\r
1391         u_int32_t                               recv_cq_idx;\r
1392         mlnx_hobul_t                    *hobul_p;\r
1393         HH_hca_dev_t                    *hca_ul_info;\r
1394         HH_qp_init_attr_t               hh_qp_init_attr;\r
1395         HHUL_qp_init_attr_t             ul_qp_init_attr;\r
1396         HHUL_qp_hndl_t                  hhul_qp_hndl = NULL;\r
1397         VAPI_special_qp_t               vapi_qp_type;\r
1398         VAPI_qp_cap_t                   hh_qp_cap;\r
1399         void                                    *qp_ul_resources_p = NULL;\r
1400         VAPI_sg_lst_entry_t             *send_sge_p = NULL;\r
1401         VAPI_sg_lst_entry_t             *recv_sge_p = NULL;\r
1402         u_int32_t                               num_sge;\r
1403 \r
1404         CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
1405 \r
1406         VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_CA_HANDLE, cleanup);\r
1407         hobul_p = mlnx_hobul_array[hca_idx];\r
1408         if (NULL == hobul_p) {\r
1409                 status = IB_INVALID_PD_HANDLE;\r
1410                 goto cleanup;\r
1411         }\r
1412         VALIDATE_INDEX(pd_idx, hobul_p->max_pd, IB_INVALID_PD_HANDLE, cleanup);\r
1413         if (E_MARK_PD != hobul_p->pd_info_tbl[pd_idx].mark) {\r
1414                 status =  IB_INVALID_PD_HANDLE;\r
1415                 goto cleanup;\r
1416         }\r
1417 \r
1418         hca_ul_info = (HH_hca_dev_t *)hobul_p->hh_hndl;\r
1419         if (NULL == hca_ul_info) {\r
1420                 status =  IB_INVALID_PD_HANDLE;\r
1421                 goto cleanup;\r
1422         }\r
1423 \r
1424         // The create attributes must be provided\r
1425         if (!p_create_attr) {\r
1426                 status =  IB_INVALID_PARAMETER;\r
1427                 goto cleanup;\r
1428         }\r
1429 \r
1430         // convert input parameters\r
1431         cl_memclr(&ul_qp_init_attr, sizeof(ul_qp_init_attr));\r
1432         mlnx_conv_qp_create_attr(p_create_attr, &ul_qp_init_attr, &vapi_qp_type);\r
1433         send_cq_num = CQ_NUM_FROM_HNDL(p_create_attr->h_sq_cq);\r
1434         recv_cq_num = CQ_NUM_FROM_HNDL(p_create_attr->h_rq_cq);\r
1435         send_cq_idx = send_cq_num & hobul_p->cq_idx_mask;\r
1436         recv_cq_idx = recv_cq_num & hobul_p->cq_idx_mask;\r
1437         VALIDATE_INDEX(send_cq_idx, hobul_p->max_cq, IB_INVALID_CQ_HANDLE, cleanup);\r
1438         if ( E_MARK_CQ != hobul_p->cq_info_tbl[send_cq_idx].mark) {\r
1439                 status =  IB_INVALID_CQ_HANDLE;\r
1440                 goto cleanup;\r
1441         }\r
1442         VALIDATE_INDEX(recv_cq_idx, hobul_p->max_cq, IB_INVALID_CQ_HANDLE, cleanup);\r
1443         if ( E_MARK_CQ != hobul_p->cq_info_tbl[recv_cq_idx].mark) {\r
1444                 status =  IB_INVALID_CQ_HANDLE;\r
1445                 goto cleanup;\r
1446         }\r
1447 \r
1448         ul_qp_init_attr.pd    = hobul_p->pd_info_tbl[pd_idx].hhul_pd_hndl;\r
1449         ul_qp_init_attr.sq_cq = hobul_p->cq_info_tbl[send_cq_idx].hhul_cq_hndl;\r
1450         ul_qp_init_attr.rq_cq = hobul_p->cq_info_tbl[recv_cq_idx].hhul_cq_hndl;\r
1451 \r
1452         if( p_umv_buf && p_umv_buf->command )\r
1453         {\r
1454                 // For user mode calls - obtain and verify the vendor information\r
1455                 if (p_umv_buf->input_size != hca_ul_info->qp_ul_resources_sz ||\r
1456                         NULL == p_umv_buf->p_inout_buf) {\r
1457                                 status = IB_INVALID_PARAMETER;\r
1458                                 goto cleanup;\r
1459                         }\r
1460                         qp_ul_resources_p = (void *)p_umv_buf->p_inout_buf;\r
1461 \r
1462         } else {\r
1463                 // For kernel mode calls - allocate app resources. Use prep->call->done sequence\r
1464                 qp_ul_resources_p = cl_zalloc( hca_ul_info->qp_ul_resources_sz);\r
1465                 if (!qp_ul_resources_p) {\r
1466                         status = IB_INSUFFICIENT_MEMORY;\r
1467                         goto cleanup;\r
1468                 }\r
1469 \r
1470                 if (HH_OK != THHUL_qpm_special_qp_prep(hobul_p->hhul_hndl,\r
1471                         vapi_qp_type,\r
1472                         port_num, \r
1473                         &ul_qp_init_attr,\r
1474                         &hhul_qp_hndl,\r
1475                         &hh_qp_cap,\r
1476                         qp_ul_resources_p)) {\r
1477                                 status = IB_ERROR;\r
1478                                 goto cleanup;\r
1479                         }\r
1480                         // TBD: if not same report error to IBAL\r
1481                         ul_qp_init_attr.qp_cap = hh_qp_cap;  // struct assign\r
1482         }\r
1483 \r
1484         // Convert HHUL to HH structure (for HH create_qp)\r
1485         hh_qp_init_attr.pd = pd_idx;\r
1486         hh_qp_init_attr.rdd = 0; // TBD: RDD\r
1487         if( ul_qp_init_attr.srq != HHUL_INVAL_SRQ_HNDL )\r
1488         {\r
1489                 // TBD: HH handle from HHUL handle.\r
1490                 CL_ASSERT( ul_qp_init_attr.srq == HHUL_INVAL_SRQ_HNDL );\r
1491         }\r
1492         else\r
1493         {\r
1494                 hh_qp_init_attr.srq = HH_INVAL_SRQ_HNDL;\r
1495         }\r
1496         hh_qp_init_attr.sq_cq = send_cq_num;\r
1497         hh_qp_init_attr.rq_cq = recv_cq_num;\r
1498         hh_qp_init_attr.sq_sig_type = ul_qp_init_attr.sq_sig_type;\r
1499         hh_qp_init_attr.rq_sig_type = ul_qp_init_attr.rq_sig_type;\r
1500         hh_qp_init_attr.ts_type = VAPI_TS_UD;\r
1501         hh_qp_init_attr.qp_cap  = ul_qp_init_attr.qp_cap; // struct assign\r
1502 \r
1503         // Allocate the QP (cmdif)\r
1504         if (HH_OK != THH_hob_get_special_qp( hobul_p->hh_hndl,\r
1505                 vapi_qp_type,\r
1506                 port_num,\r
1507                 &hh_qp_init_attr,\r
1508                 qp_ul_resources_p,\r
1509                 &qp_num))\r
1510         {\r
1511                 status = IB_ERROR;\r
1512                 goto cleanup_qp;\r
1513         }\r
1514 \r
1515         if( !(p_umv_buf && p_umv_buf->command) )\r
1516         {\r
1517                 // Manage user level resources\r
1518                 if (HH_OK != THHUL_qpm_create_qp_done(hobul_p->hhul_hndl, hhul_qp_hndl, qp_num, qp_ul_resources_p)) {\r
1519                         THH_hob_destroy_qp(hobul_p->hh_hndl, qp_num);\r
1520                         status = IB_ERROR;\r
1521                         goto cleanup_qp;\r
1522                 }\r
1523 \r
1524                 // Create SQ and RQ iov\r
1525                 num_sge = ul_qp_init_attr.qp_cap.max_sg_size_sq;\r
1526                 send_sge_p = cl_zalloc( num_sge * sizeof(VAPI_sg_lst_entry_t));\r
1527                 if (!send_sge_p) {\r
1528                         status = IB_INSUFFICIENT_MEMORY;\r
1529                         goto cleanup_qp;\r
1530                 }\r
1531 \r
1532                 num_sge = ul_qp_init_attr.qp_cap.max_sg_size_rq;\r
1533                 recv_sge_p = cl_zalloc( num_sge * sizeof(VAPI_sg_lst_entry_t));\r
1534                 if (!recv_sge_p) {\r
1535                         status = IB_INSUFFICIENT_MEMORY;\r
1536                         goto cleanup_qp;\r
1537                 }\r
1538         }\r
1539 \r
1540         // Save data refs for future use\r
1541         qp_idx = qp_num & hobul_p->qp_idx_mask;\r
1542         VALIDATE_INDEX(qp_idx, hobul_p->max_qp, IB_ERROR, cleanup_qp);\r
1543 \r
1544         h_qp = (ib_qp_handle_t)QP_HNDL_FROM_QP(qp_idx);\r
1545         cl_mutex_acquire(&h_qp->mutex);\r
1546         h_qp->pd_num                    = pd_idx;\r
1547         h_qp->hhul_qp_hndl              = hhul_qp_hndl;\r
1548         h_qp->qp_type                   = p_create_attr->qp_type;\r
1549         h_qp->sq_signaled               = p_create_attr->sq_signaled;\r
1550         h_qp->qp_context                = qp_context;\r
1551         h_qp->qp_ul_resources_p = qp_ul_resources_p;\r
1552         h_qp->sq_size                   = ul_qp_init_attr.qp_cap.max_sg_size_sq;\r
1553         h_qp->rq_size                   = ul_qp_init_attr.qp_cap.max_sg_size_rq;\r
1554         h_qp->send_sge_p                = send_sge_p;\r
1555         h_qp->recv_sge_p                = recv_sge_p;\r
1556         h_qp->qp_num                    = qp_num;\r
1557         h_qp->h_sq_cq                   = &hobul_p->cq_info_tbl[send_cq_idx];\r
1558         h_qp->h_rq_cq                   = &hobul_p->cq_info_tbl[recv_cq_idx];\r
1559         h_qp->kernel_mode               = !(p_umv_buf && p_umv_buf->command);\r
1560         h_qp->mark                              = E_MARK_QP;\r
1561         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("qp num 0x%x idx 0x%x cq_s 0x%x cq_r 0x%x\n",\r
1562                 qp_num, qp_idx, send_cq_idx, recv_cq_idx));\r
1563         cl_mutex_release(&h_qp->mutex);\r
1564 \r
1565         /* Mark the CQ's associated with this special QP as being high priority. */\r
1566         cl_atomic_inc( &h_qp->h_sq_cq->spl_qp_cnt );\r
1567         KeSetImportanceDpc( &h_qp->h_sq_cq->dpc, HighImportance );\r
1568         cl_atomic_inc( &h_qp->h_rq_cq->spl_qp_cnt );\r
1569         KeSetImportanceDpc( &h_qp->h_rq_cq->dpc, HighImportance );\r
1570 \r
1571         // Update PD object count\r
1572         cl_atomic_inc(&hobul_p->pd_info_tbl[pd_idx].count);\r
1573         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("pd %d count %d\n", pd_idx, hobul_p->pd_info_tbl[pd_idx].count));\r
1574 \r
1575         // Query QP to obtain requested attributes\r
1576         if (p_qp_attr) {\r
1577                 if (IB_SUCCESS != (status = mlnx_query_qp (h_qp, p_qp_attr, p_umv_buf))) {\r
1578                         goto cleanup;\r
1579                 }\r
1580         }\r
1581 \r
1582         if (ph_qp) *ph_qp = h_qp;\r
1583         if( p_umv_buf && p_umv_buf->command )\r
1584         {\r
1585                 p_umv_buf->output_size = p_umv_buf->input_size;\r
1586                 p_umv_buf->status = IB_SUCCESS;\r
1587         }\r
1588         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
1589         return IB_SUCCESS;\r
1590 \r
1591 cleanup_qp:\r
1592         if (send_sge_p) cl_free( send_sge_p);\r
1593         if (recv_sge_p) cl_free( recv_sge_p);\r
1594         if( !(p_umv_buf && p_umv_buf->command) )\r
1595                 THHUL_qpm_destroy_qp_done(hobul_p->hhul_hndl, hhul_qp_hndl);\r
1596 \r
1597 cleanup:\r
1598         if( !(p_umv_buf && p_umv_buf->command) && qp_ul_resources_p )\r
1599                 cl_free( qp_ul_resources_p);\r
1600         if( p_umv_buf && p_umv_buf->command )\r
1601         {\r
1602                 p_umv_buf->output_size = 0;\r
1603                 p_umv_buf->status = status;\r
1604         }\r
1605         CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status));\r
1606         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
1607         return status;\r
1608 }\r
1609 \r
1610 ib_api_status_t\r
1611 mlnx_modify_qp (\r
1612         IN              const   ib_qp_handle_t                          h_qp,\r
1613         IN              const   ib_qp_mod_t                                     *p_modify_attr,\r
1614                 OUT                     ib_qp_attr_t                            *p_qp_attr OPTIONAL,\r
1615         IN      OUT                     ci_umv_buf_t                            *p_umv_buf OPTIONAL )\r
1616 {\r
1617         ib_api_status_t         status;\r
1618 \r
1619         u_int32_t                       hca_idx = QP_HCA_FROM_HNDL(h_qp);\r
1620         u_int32_t                       qp_num  = QP_NUM_FROM_HNDL(h_qp);\r
1621         u_int32_t                       qp_idx  = 0;\r
1622         mlnx_hobul_t            *hobul_p;\r
1623         HHUL_qp_hndl_t          hhul_qp_hndl;\r
1624         VAPI_qp_attr_mask_t     hh_qp_attr_mask;\r
1625         VAPI_qp_attr_t          hh_qp_attr;\r
1626         VAPI_qp_state_t         hh_qp_state;\r
1627 \r
1628         CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
1629 \r
1630         VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_CA_HANDLE, cleanup);\r
1631         hobul_p = mlnx_hobul_array[hca_idx];\r
1632         if (NULL == hobul_p) {\r
1633                 status = IB_INVALID_QP_HANDLE;\r
1634                 goto cleanup;\r
1635         }\r
1636 \r
1637         qp_idx = qp_num & hobul_p->qp_idx_mask;\r
1638         VALIDATE_INDEX(qp_idx, hobul_p->max_qp, IB_INVALID_QP_HANDLE, cleanup);\r
1639         if ( E_MARK_QP != hobul_p->qp_info_tbl[qp_idx].mark) {\r
1640                 status =  IB_INVALID_QP_HANDLE;\r
1641                 goto cleanup;\r
1642         }\r
1643 \r
1644         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, \r
1645                 ("Before acquire mutex to modify qp_idx 0x%x\n", \r
1646                 qp_idx));\r
1647 \r
1648         cl_mutex_acquire(&hobul_p->qp_info_tbl[qp_idx].mutex);\r
1649 \r
1650         hhul_qp_hndl = hobul_p->qp_info_tbl[qp_idx].hhul_qp_hndl;\r
1651 \r
1652         // Obtain curernt state of QP\r
1653         if (HH_OK != THH_hob_query_qp(hobul_p->hh_hndl, hobul_p->qp_info_tbl[qp_idx].qp_num, &hh_qp_attr))\r
1654         {\r
1655                 status = IB_ERROR;\r
1656                 goto cleanup_locked;\r
1657         }\r
1658         hh_qp_state = hh_qp_attr.qp_state; // The current (pre-modify) state\r
1659 \r
1660         // Convert the input parameters. Use query result as default (no cl_memset())\r
1661         // cl_memclr(&hh_qp_attr, sizeof(hh_qp_attr));\r
1662         status = mlnx_conv_qp_modify_attr(hobul_p->hh_hndl,\r
1663                 hobul_p->qp_info_tbl[qp_idx].qp_type,\r
1664                 p_modify_attr, &hh_qp_attr, &hh_qp_attr_mask);\r
1665         if( status != IB_SUCCESS )\r
1666                 goto cleanup_locked;\r
1667 \r
1668         if (HH_OK != THH_hob_modify_qp(hobul_p->hh_hndl,\r
1669                 hobul_p->qp_info_tbl[qp_idx].qp_num,\r
1670                 hh_qp_state, &hh_qp_attr, &hh_qp_attr_mask))\r
1671         {\r
1672                 status = IB_ERROR;\r
1673                 goto cleanup_locked;\r
1674         }\r
1675 \r
1676         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, \r
1677                 ("After hob_modify_qp qp_idx 0x%x k_mod %d\n", \r
1678                 qp_idx, hobul_p->qp_info_tbl[qp_idx].kernel_mode));\r
1679 \r
1680         // Notify HHUL of the new (post-modify) state. This is done for both k-mode calls only\r
1681         if (hobul_p->qp_info_tbl[qp_idx].kernel_mode) {\r
1682                 if (HH_OK != THHUL_qpm_modify_qp_done(hobul_p->hhul_hndl, hhul_qp_hndl, hh_qp_attr.qp_state))\r
1683                 {\r
1684                         status = IB_ERROR;\r
1685                         goto cleanup_locked;\r
1686                 } \r
1687         } \r
1688         cl_mutex_release(&hobul_p->qp_info_tbl[qp_idx].mutex);\r
1689 \r
1690         if ((p_qp_attr) && !(p_umv_buf && p_umv_buf->command)) {\r
1691                 if (IB_SUCCESS != (status = mlnx_query_qp (h_qp, p_qp_attr, p_umv_buf))) {\r
1692                         goto cleanup;\r
1693                 }\r
1694         }\r
1695 \r
1696         if ( p_umv_buf && p_umv_buf->command && (! hobul_p->qp_info_tbl[qp_idx].kernel_mode)) {\r
1697                 CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, \r
1698                         ("mod_qp qp_idx %d umv_buf %p inout_buf %p\n", \r
1699                         qp_idx, p_umv_buf, p_umv_buf->p_inout_buf));\r
1700                 if (p_umv_buf->p_inout_buf) {\r
1701                         p_umv_buf->output_size = sizeof (VAPI_qp_state_t);\r
1702                         cl_memcpy (p_umv_buf->p_inout_buf, &(hh_qp_attr.qp_state), \r
1703                                 (size_t)p_umv_buf->output_size);\r
1704                         p_umv_buf->status = IB_SUCCESS;\r
1705                 }\r
1706         }\r
1707         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
1708         return IB_SUCCESS;\r
1709 \r
1710 \r
1711 cleanup_locked:\r
1712         cl_mutex_release(&hobul_p->qp_info_tbl[qp_idx].mutex);\r
1713 \r
1714 cleanup:\r
1715         if( p_umv_buf && p_umv_buf->command )\r
1716         {\r
1717                 p_umv_buf->output_size = 0;\r
1718                 p_umv_buf->status = status;\r
1719         }\r
1720         CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status));\r
1721         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
1722         return status;\r
1723 }\r
1724 \r
1725 ib_api_status_t\r
1726 mlnx_query_qp (\r
1727         IN              const   ib_qp_handle_t                          h_qp,\r
1728                 OUT                     ib_qp_attr_t                            *p_qp_attr,\r
1729         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
1730 {\r
1731         ib_api_status_t         status;\r
1732 \r
1733         u_int32_t                       hca_idx = QP_HCA_FROM_HNDL(h_qp);\r
1734         u_int32_t                       qp_num  = QP_NUM_FROM_HNDL(h_qp);\r
1735         u_int32_t                       qp_idx  = 0;\r
1736         mlnx_hobul_t            *hobul_p;\r
1737         VAPI_qp_attr_t          hh_qp_attr;\r
1738 \r
1739         CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
1740 \r
1741         VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_CA_HANDLE, cleanup);\r
1742         hobul_p = mlnx_hobul_array[hca_idx];\r
1743         if (NULL == hobul_p) {\r
1744                 status = IB_INVALID_QP_HANDLE;\r
1745                 goto cleanup;\r
1746         }\r
1747 \r
1748         qp_idx = qp_num & hobul_p->qp_idx_mask;\r
1749         VALIDATE_INDEX(qp_idx, hobul_p->max_qp, IB_INVALID_QP_HANDLE, cleanup);\r
1750         if ( E_MARK_QP != hobul_p->qp_info_tbl[qp_idx].mark) {\r
1751                 status =  IB_INVALID_QP_HANDLE;\r
1752                 goto cleanup;\r
1753         }\r
1754 \r
1755         cl_mutex_acquire(&h_qp->mutex);\r
1756 \r
1757         if (HH_OK != THH_hob_query_qp(hobul_p->hh_hndl, h_qp->qp_num, &hh_qp_attr)) {\r
1758                 status = IB_ERROR;\r
1759                 goto cleanup_locked;\r
1760         }\r
1761 \r
1762         // Convert query result into IBAL structure (no cl_memset())\r
1763         mlnx_conv_vapi_qp_attr(hobul_p->hh_hndl, &hh_qp_attr, p_qp_attr);\r
1764         p_qp_attr->qp_type = h_qp->qp_type;\r
1765         p_qp_attr->h_pd    = (ib_pd_handle_t)PD_HNDL_FROM_PD(h_qp->pd_num);\r
1766         p_qp_attr->h_sq_cq = h_qp->h_sq_cq;\r
1767         p_qp_attr->h_rq_cq = h_qp->h_rq_cq;\r
1768         p_qp_attr->sq_signaled = h_qp->sq_signaled;\r
1769 \r
1770         cl_mutex_release(&h_qp->mutex);\r
1771 \r
1772         if( p_umv_buf && p_umv_buf->command )\r
1773         {\r
1774                 p_umv_buf->output_size = 0;\r
1775                 p_umv_buf->status = IB_SUCCESS;\r
1776         }\r
1777         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
1778         return IB_SUCCESS;\r
1779 \r
1780 cleanup_locked:\r
1781         cl_mutex_release(&hobul_p->qp_info_tbl[qp_idx].mutex);\r
1782 cleanup:\r
1783         if( p_umv_buf && p_umv_buf->command )\r
1784         {\r
1785                 p_umv_buf->output_size = 0;\r
1786                 p_umv_buf->status = status;\r
1787         }\r
1788         CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status));\r
1789         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
1790         return status;\r
1791 }\r
1792 \r
1793 ib_api_status_t\r
1794 mlnx_destroy_qp (\r
1795         IN              const   ib_qp_handle_t                          h_qp,\r
1796         IN              const   uint64_t                                        timewait )\r
1797 {\r
1798         ib_api_status_t         status;\r
1799 \r
1800         u_int32_t                       hca_idx = QP_HCA_FROM_HNDL(h_qp);\r
1801         u_int32_t                       qp_num  = QP_NUM_FROM_HNDL(h_qp);\r
1802         u_int32_t                       pd_idx  = 0;\r
1803         u_int32_t                       qp_idx  = 0;\r
1804         mlnx_hobul_t            *hobul_p;\r
1805         HHUL_qp_hndl_t          hhul_qp_hndl;\r
1806 \r
1807         UNUSED_PARAM( timewait );\r
1808 \r
1809         CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
1810         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("hca %d qp 0x%x\n", hca_idx, qp_num));\r
1811 \r
1812         VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_CA_HANDLE, cleanup);\r
1813         hobul_p = mlnx_hobul_array[hca_idx];\r
1814         if (NULL == hobul_p) {\r
1815                 status = IB_INVALID_QP_HANDLE;\r
1816                 goto cleanup;\r
1817         }\r
1818 \r
1819         qp_idx = qp_num & hobul_p->qp_idx_mask;\r
1820         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("hobul_p 0x%p mask 0x%x qp_idx 0x%x mark %d\n",\r
1821                 hobul_p, hobul_p->qp_idx_mask, qp_idx, hobul_p->qp_info_tbl[qp_idx].mark));\r
1822 \r
1823         VALIDATE_INDEX(qp_idx, hobul_p->max_qp, IB_INVALID_QP_HANDLE, cleanup);\r
1824         if ( E_MARK_QP != hobul_p->qp_info_tbl[qp_idx].mark) {\r
1825                 if (E_MARK_INVALID == hobul_p->qp_info_tbl[qp_idx].mark) {\r
1826                         CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status IB_INVALID_QP_HANDLE\n"));\r
1827                         return IB_SUCCESS; // Already freed\r
1828                 }\r
1829                 status = IB_INVALID_QP_HANDLE;\r
1830                 goto cleanup;\r
1831         }\r
1832 \r
1833         cl_mutex_acquire(&hobul_p->qp_info_tbl[qp_idx].mutex);\r
1834 \r
1835         hhul_qp_hndl = hobul_p->qp_info_tbl[qp_idx].hhul_qp_hndl;\r
1836         pd_idx       = hobul_p->qp_info_tbl[qp_idx].pd_num;\r
1837         VALIDATE_INDEX(pd_idx, hobul_p->max_pd, IB_ERROR, cleanup_locked);\r
1838 \r
1839         if (E_MARK_PD != hobul_p->pd_info_tbl[pd_idx].mark) {\r
1840                 CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("file %s line %d\n", __FILE__, __LINE__));\r
1841                 CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("pd_idx 0x%x mark %d\n", pd_idx, hobul_p->pd_info_tbl[pd_idx].mark));\r
1842                 status =  IB_INVALID_PD_HANDLE;\r
1843                 goto cleanup_locked;\r
1844         }\r
1845 \r
1846         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, \r
1847                 ("Before THH_destroy qp_idx 0x%x k_mod %d pd_idx 0x%x\n",\r
1848                 qp_idx, hobul_p->qp_info_tbl[qp_idx].kernel_mode, pd_idx));\r
1849 \r
1850         // PREP: no PREP required for destroy_qp\r
1851         if (HH_OK != THH_hob_destroy_qp(hobul_p->hh_hndl, qp_num)) {\r
1852                 status = IB_ERROR;\r
1853                 goto cleanup_locked;\r
1854         }\r
1855 \r
1856         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, \r
1857                 ("After THH_destroy qp_idx 0x%x k_mod %d pd_idx 0x%x\n",\r
1858                 qp_idx, hobul_p->qp_info_tbl[qp_idx].kernel_mode, pd_idx));\r
1859 \r
1860         if (hobul_p->qp_info_tbl[qp_idx].kernel_mode) {\r
1861                 if (HH_OK != THHUL_qpm_destroy_qp_done(hobul_p->hhul_hndl, hhul_qp_hndl)) {\r
1862                         status = IB_ERROR;\r
1863                         goto cleanup_locked;\r
1864                 }\r
1865                 if (hobul_p->qp_info_tbl[qp_idx].qp_ul_resources_p)\r
1866                         cl_free( hobul_p->qp_info_tbl[qp_idx].qp_ul_resources_p);\r
1867                 if (hobul_p->qp_info_tbl[qp_idx].send_sge_p)\r
1868                         cl_free( hobul_p->qp_info_tbl[qp_idx].send_sge_p);\r
1869                 if (hobul_p->qp_info_tbl[qp_idx].recv_sge_p)\r
1870                         cl_free( hobul_p->qp_info_tbl[qp_idx].recv_sge_p);\r
1871         }\r
1872 \r
1873         if( h_qp->qp_type == IB_QPT_QP0 || h_qp->qp_type == IB_QPT_QP1 )\r
1874         {\r
1875                 if( !cl_atomic_dec( &h_qp->h_sq_cq->spl_qp_cnt ) )\r
1876                         KeSetImportanceDpc( &h_qp->h_sq_cq->dpc, MediumImportance );\r
1877                 if( !cl_atomic_dec( &h_qp->h_rq_cq->spl_qp_cnt ) )\r
1878                         KeSetImportanceDpc( &h_qp->h_rq_cq->dpc, MediumImportance );\r
1879         }\r
1880 \r
1881         hobul_p->qp_info_tbl[qp_idx].mark = E_MARK_INVALID;\r
1882         hobul_p->qp_info_tbl[qp_idx].qp_ul_resources_p = NULL;\r
1883         cl_mutex_release(&hobul_p->qp_info_tbl[qp_idx].mutex);\r
1884 \r
1885         // Update PD object count\r
1886         cl_atomic_dec(&hobul_p->pd_info_tbl[pd_idx].count);\r
1887         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("pd %d count %d\n", pd_idx, hobul_p->pd_info_tbl[pd_idx].count));\r
1888 \r
1889         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
1890         return IB_SUCCESS;\r
1891 \r
1892 cleanup_locked:\r
1893         cl_mutex_release(&hobul_p->qp_info_tbl[qp_idx].mutex);\r
1894 cleanup:\r
1895         CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status));\r
1896         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
1897         return status;\r
1898 }\r
1899 \r
1900 /*\r
1901 * Completion Queue Managment Verbs.\r
1902 */\r
1903 \r
1904 ib_api_status_t\r
1905 mlnx_create_cq (\r
1906         IN              const   ib_ca_handle_t                          h_ca,\r
1907         IN              const   void                                            *cq_context,\r
1908         IN      OUT                     uint32_t                                        *p_size,\r
1909                 OUT                     ib_cq_handle_t                          *ph_cq,\r
1910         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
1911 {\r
1912         ib_api_status_t                 status;\r
1913 \r
1914         mlnx_hob_t                              *hob_p;\r
1915         u_int32_t                               cq_idx;\r
1916         u_int32_t                               cq_num;\r
1917         u_int32_t                               cq_size = 0;\r
1918         mlnx_hobul_t                    *hobul_p;\r
1919         HH_hca_dev_t                    *hca_ul_info;\r
1920         HHUL_cq_hndl_t                  hhul_cq_hndl = NULL;\r
1921         void                                    *cq_ul_resources_p = NULL;\r
1922         MOSAL_protection_ctx_t  prot_ctx;\r
1923 \r
1924         CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
1925 \r
1926         if( p_umv_buf )\r
1927                 hob_p = ((mlnx_um_ca_t *)h_ca)->hob_p;\r
1928         else\r
1929                 hob_p = (mlnx_hob_t *)h_ca;\r
1930 \r
1931         hobul_p = mlnx_hobs_get_hobul(hob_p);\r
1932         if (NULL == hobul_p) {\r
1933                 status = IB_INVALID_CA_HANDLE;\r
1934                 goto cleanup;\r
1935         }\r
1936 \r
1937         hca_ul_info = (HH_hca_dev_t *)hobul_p->hh_hndl;\r
1938         if (NULL == hca_ul_info) {\r
1939                 status =  IB_INVALID_PD_HANDLE;\r
1940                 goto cleanup;\r
1941         }\r
1942 \r
1943         // The size must be provided\r
1944         if (!p_size) {\r
1945                 status =  IB_INVALID_PARAMETER;\r
1946                 goto cleanup;\r
1947         }\r
1948         // TBD: verify that the number requested does not exceed to maximum allowed\r
1949 \r
1950         if( p_umv_buf && p_umv_buf->command )\r
1951         {\r
1952                 // For user mode calls - obtain and verify the vendor information\r
1953                 if ((p_umv_buf->input_size - sizeof (u_int32_t))  != \r
1954                         hca_ul_info->cq_ul_resources_sz ||\r
1955                         NULL == p_umv_buf->p_inout_buf) {\r
1956                                 status = IB_INVALID_PARAMETER;\r
1957                                 goto cleanup;\r
1958                         }\r
1959                         cq_ul_resources_p = (void *)p_umv_buf->p_inout_buf;\r
1960 \r
1961                         /* get the current protection context */ \r
1962                         prot_ctx = MOSAL_get_current_prot_ctx();\r
1963         } else {\r
1964                 // for kernel mode calls - allocate app resources. Use prep->call->done sequence\r
1965                 cq_ul_resources_p = cl_zalloc( hca_ul_info->cq_ul_resources_sz);\r
1966                 if (!cq_ul_resources_p) {\r
1967                         status = IB_INSUFFICIENT_MEMORY;\r
1968                         goto cleanup;\r
1969                 }\r
1970                 if (HH_OK != THHUL_cqm_create_cq_prep(hobul_p->hhul_hndl, *p_size, &hhul_cq_hndl, &cq_size, cq_ul_resources_p)) {\r
1971                         status = IB_ERROR;\r
1972                         goto cleanup;\r
1973                 }\r
1974                 /* get the current protection context */ \r
1975                 prot_ctx = MOSAL_get_kernel_prot_ctx();\r
1976         }\r
1977 \r
1978         // Allocate the CQ (cmdif)\r
1979         if (HH_OK != THH_hob_create_cq(hobul_p->hh_hndl, prot_ctx, cq_ul_resources_p, &cq_num)) {\r
1980                 status = IB_INSUFFICIENT_RESOURCES;\r
1981                 goto cleanup_cq;\r
1982         }\r
1983 \r
1984         if( !(p_umv_buf && p_umv_buf->command) )\r
1985         {\r
1986                 // Manage user level resources\r
1987                 if (HH_OK != THHUL_cqm_create_cq_done(hobul_p->hhul_hndl, hhul_cq_hndl, cq_num, cq_ul_resources_p)) {\r
1988                         THH_hob_destroy_cq(hobul_p->hh_hndl, cq_num);\r
1989                         status = IB_ERROR;\r
1990                         goto cleanup_cq;\r
1991                 }\r
1992         }\r
1993 \r
1994         // Save data refs for future use\r
1995         cq_idx = cq_num & hobul_p->cq_idx_mask;\r
1996         VALIDATE_INDEX(cq_idx, hobul_p->max_cq, IB_ERROR, cleanup_cq);\r
1997         cl_mutex_acquire(&hobul_p->cq_info_tbl[cq_idx].mutex);\r
1998         hobul_p->cq_info_tbl[cq_idx].hca_idx = hob_p->index;\r
1999         hobul_p->cq_info_tbl[cq_idx].cq_num = cq_num;\r
2000 //      hobul_p->cq_info_tbl[cq_idx].pd_num = pd_idx;\r
2001         hobul_p->cq_info_tbl[cq_idx].hhul_cq_hndl = hhul_cq_hndl;\r
2002         hobul_p->cq_info_tbl[cq_idx].cq_context = cq_context;\r
2003         hobul_p->cq_info_tbl[cq_idx].cq_ul_resources_p = cq_ul_resources_p;\r
2004         hobul_p->cq_info_tbl[cq_idx].kernel_mode = !(p_umv_buf && p_umv_buf->command);\r
2005         hobul_p->cq_info_tbl[cq_idx].mark = E_MARK_CQ;\r
2006         cl_mutex_release(&hobul_p->cq_info_tbl[cq_idx].mutex);\r
2007 \r
2008         // Update CA object count\r
2009         cl_atomic_inc(&hobul_p->count);\r
2010         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("HCA %d count %d\n", h_ca->index, hobul_p->count));\r
2011 \r
2012         *p_size = cq_size;\r
2013         if (ph_cq) *ph_cq = (ib_cq_handle_t)CQ_HNDL_FROM_CQ(cq_idx);\r
2014 \r
2015         if( p_umv_buf && p_umv_buf->command )\r
2016         {\r
2017                 p_umv_buf->output_size = p_umv_buf->input_size;\r
2018                 p_umv_buf->status = IB_SUCCESS;\r
2019                 /* \r
2020                 * Copy the cq_idx back to user\r
2021                 */\r
2022                 cl_memcpy (((uint8_t* __ptr64)p_umv_buf->p_inout_buf + hca_ul_info->cq_ul_resources_sz),\r
2023                         &cq_num, sizeof (cq_num));\r
2024         }\r
2025         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
2026         return IB_SUCCESS;\r
2027 \r
2028 cleanup_cq:\r
2029         THHUL_cqm_destroy_cq_done(hobul_p->hhul_hndl, hhul_cq_hndl);\r
2030 \r
2031 cleanup:\r
2032         if( !(p_umv_buf && p_umv_buf->command) && cq_ul_resources_p )\r
2033                 cl_free( cq_ul_resources_p);\r
2034         if( p_umv_buf && p_umv_buf->command )\r
2035         {\r
2036                 p_umv_buf->output_size = 0;\r
2037                 p_umv_buf->status = status;\r
2038         }\r
2039         CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status));\r
2040         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
2041         return status;\r
2042 }\r
2043 \r
2044 ib_api_status_t\r
2045 mlnx_resize_cq (\r
2046         IN              const   ib_cq_handle_t                          h_cq,\r
2047         IN      OUT                     uint32_t                                        *p_size,\r
2048         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
2049 {\r
2050         ib_api_status_t         status;\r
2051 \r
2052         u_int32_t                       hca_idx = CQ_HCA_FROM_HNDL(h_cq);\r
2053         u_int32_t                       cq_num  = CQ_NUM_FROM_HNDL(h_cq);\r
2054         u_int32_t                       cq_idx;\r
2055         mlnx_hobul_t            *hobul_p;\r
2056 \r
2057         HHUL_cq_hndl_t          hhul_cq_hndl;\r
2058         void                            *cq_ul_resources_p = NULL;\r
2059 \r
2060         CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
2061 \r
2062         if (!p_size) {\r
2063                 status = IB_INVALID_PARAMETER;\r
2064                 goto cleanup;\r
2065         }\r
2066         VALIDATE_INDEX(hca_idx,   MLNX_MAX_HCA, IB_INVALID_CQ_HANDLE, cleanup);\r
2067         hobul_p = mlnx_hobul_array[hca_idx];\r
2068         if (NULL == hobul_p) {\r
2069                 status = IB_INVALID_CQ_HANDLE;\r
2070                 goto cleanup;\r
2071         }\r
2072 \r
2073         cq_idx = cq_num & hobul_p->cq_idx_mask;\r
2074         VALIDATE_INDEX(cq_idx, hobul_p->max_cq, IB_INVALID_CQ_HANDLE, cleanup);\r
2075         if ( E_MARK_CQ != hobul_p->cq_info_tbl[cq_idx].mark) {\r
2076                 status =  IB_INVALID_CQ_HANDLE;\r
2077                 goto cleanup;\r
2078         }\r
2079 \r
2080         cl_mutex_acquire(&hobul_p->cq_info_tbl[cq_idx].mutex);\r
2081 \r
2082         hhul_cq_hndl = hobul_p->cq_info_tbl[cq_idx].hhul_cq_hndl;\r
2083 \r
2084         if( p_umv_buf && p_umv_buf->command )\r
2085         {\r
2086                 // For user mode calls - obtain and verify the vendor information\r
2087                 if( p_umv_buf->input_size != hobul_p->cq_ul_resources_sz ||\r
2088                         NULL == p_umv_buf->p_inout_buf )\r
2089                 {\r
2090                         status = IB_INVALID_PARAMETER;\r
2091                         goto cleanup_locked;\r
2092                 }\r
2093                 cq_ul_resources_p = (void *)p_umv_buf->p_inout_buf;\r
2094 \r
2095         } else {\r
2096                 // for kernel mode calls - obtain the saved app resources. Use prep->call->done sequence\r
2097                 cq_ul_resources_p = hobul_p->cq_info_tbl[cq_idx].cq_ul_resources_p;\r
2098 \r
2099                 status = THHUL_cqm_resize_cq_prep(\r
2100                         hobul_p->hhul_hndl, hhul_cq_hndl,\r
2101                         *p_size, p_size, cq_ul_resources_p );\r
2102                 if( status != IB_SUCCESS )\r
2103                         goto cleanup_locked;\r
2104         }\r
2105 \r
2106         if (HH_OK != THH_hob_resize_cq(hobul_p->hh_hndl, cq_num, cq_ul_resources_p)) {\r
2107                 status = IB_ERROR;\r
2108                 goto cleanup_locked;\r
2109         }\r
2110 \r
2111         // DONE: when called on behalf of kernel module\r
2112         if (hobul_p->cq_info_tbl[cq_idx].kernel_mode) {\r
2113                 if (HH_OK != THHUL_cqm_resize_cq_done( hobul_p->hhul_hndl, hhul_cq_hndl, cq_ul_resources_p))\r
2114                 {\r
2115                         status = IB_ERROR;\r
2116                         goto cleanup_locked;\r
2117                 }\r
2118         }\r
2119 \r
2120         cl_mutex_release(&hobul_p->cq_info_tbl[cq_idx].mutex);\r
2121 \r
2122         if( p_umv_buf && p_umv_buf->command )\r
2123         {\r
2124                 p_umv_buf->output_size = p_umv_buf->input_size;\r
2125                 p_umv_buf->status = IB_SUCCESS;\r
2126         }\r
2127         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
2128         return IB_SUCCESS;\r
2129 \r
2130 cleanup_locked:\r
2131         cl_mutex_release(&hobul_p->cq_info_tbl[cq_idx].mutex);\r
2132 \r
2133 cleanup:\r
2134         if( p_umv_buf && p_umv_buf->command )\r
2135         {\r
2136                 p_umv_buf->output_size = 0;\r
2137                 p_umv_buf->status = status;\r
2138         }\r
2139         CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status));\r
2140         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
2141         return status;\r
2142 }\r
2143 \r
2144 ib_api_status_t\r
2145 mlnx_query_cq (\r
2146         IN              const   ib_cq_handle_t                          h_cq,\r
2147                 OUT                     uint32_t                                        *p_size,\r
2148         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
2149 {\r
2150         ib_api_status_t         status;\r
2151 \r
2152         u_int32_t                       hca_idx = CQ_HCA_FROM_HNDL(h_cq);\r
2153         u_int32_t                       cq_num  = CQ_NUM_FROM_HNDL(h_cq);\r
2154         u_int32_t                       cq_idx;\r
2155         mlnx_hobul_t            *hobul_p;\r
2156         HHUL_cq_hndl_t          hhul_cq_hndl;\r
2157 \r
2158         CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
2159 \r
2160         if (!p_size) {\r
2161                 status = IB_INVALID_PARAMETER;\r
2162                 goto cleanup;\r
2163         }\r
2164 \r
2165         /* Query is fully handled in user-mode. */\r
2166         if( p_umv_buf && p_umv_buf->command )\r
2167         {\r
2168                 status = IB_INVALID_CQ_HANDLE;\r
2169                 goto cleanup;\r
2170         }\r
2171 \r
2172         VALIDATE_INDEX(hca_idx,   MLNX_MAX_HCA, IB_INVALID_CQ_HANDLE, cleanup);\r
2173         hobul_p = mlnx_hobul_array[hca_idx];\r
2174         if (NULL == hobul_p) {\r
2175                 status = IB_INVALID_CQ_HANDLE;\r
2176                 goto cleanup;\r
2177         }\r
2178 \r
2179         cq_idx = cq_num & hobul_p->cq_idx_mask;\r
2180         VALIDATE_INDEX(cq_idx, hobul_p->max_cq, IB_INVALID_CQ_HANDLE, cleanup);\r
2181         if ( E_MARK_CQ != hobul_p->cq_info_tbl[cq_idx].mark) {\r
2182                 status =  IB_INVALID_CQ_HANDLE;\r
2183                 goto cleanup;\r
2184         }\r
2185 \r
2186         cl_mutex_acquire(&hobul_p->cq_info_tbl[cq_idx].mutex);\r
2187 \r
2188         hhul_cq_hndl = hobul_p->cq_info_tbl[cq_idx].hhul_cq_hndl;\r
2189         if (HH_OK != THHUL_cqm_query_cq(hobul_p->hhul_hndl, hhul_cq_hndl, p_size)){\r
2190                 status = IB_ERROR;\r
2191                 goto cleanup_locked;\r
2192         }\r
2193 \r
2194         cl_mutex_release(&hobul_p->cq_info_tbl[cq_idx].mutex);\r
2195 \r
2196         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
2197         return IB_SUCCESS;\r
2198 \r
2199 cleanup_locked:\r
2200         cl_mutex_release(&hobul_p->cq_info_tbl[cq_idx].mutex);\r
2201 \r
2202 cleanup:\r
2203         if( p_umv_buf && p_umv_buf->command )\r
2204         {\r
2205                 p_umv_buf->output_size = 0;\r
2206                 p_umv_buf->status = status;\r
2207         }\r
2208         CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status));\r
2209         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
2210         return status;\r
2211 }\r
2212 \r
2213 ib_api_status_t\r
2214 mlnx_destroy_cq (\r
2215         IN              const   ib_cq_handle_t                          h_cq)\r
2216 {\r
2217         ib_api_status_t status;\r
2218 \r
2219         u_int32_t        hca_idx = CQ_HCA_FROM_HNDL(h_cq);\r
2220         u_int32_t        cq_num  = CQ_NUM_FROM_HNDL(h_cq);\r
2221         u_int32_t               cq_idx;\r
2222 //      u_int32_t        pd_idx = 0;\r
2223         mlnx_hobul_t     *hobul_p;\r
2224         HHUL_cq_hndl_t   hhul_cq_hndl;\r
2225 \r
2226         CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
2227 \r
2228         VALIDATE_INDEX(hca_idx,   MLNX_MAX_HCA, IB_INVALID_CQ_HANDLE, cleanup);\r
2229         hobul_p = mlnx_hobul_array[hca_idx];\r
2230         if (NULL == hobul_p) {\r
2231                 status = IB_INVALID_CQ_HANDLE;\r
2232                 goto cleanup;\r
2233         }\r
2234 \r
2235         cq_idx = cq_num & hobul_p->cq_idx_mask;\r
2236         VALIDATE_INDEX(cq_idx, hobul_p->max_cq, IB_INVALID_CQ_HANDLE, cleanup);\r
2237         if ( E_MARK_CQ != hobul_p->cq_info_tbl[cq_idx].mark) {\r
2238                 status =  IB_INVALID_CQ_HANDLE;\r
2239                 goto cleanup;\r
2240         }\r
2241 \r
2242         cl_mutex_acquire(&hobul_p->cq_info_tbl[cq_idx].mutex);\r
2243 \r
2244         hhul_cq_hndl = hobul_p->cq_info_tbl[cq_idx].hhul_cq_hndl;\r
2245 //      pd_idx       = hobul_p->cq_info_tbl[cq_idx].pd_num;\r
2246 //      VALIDATE_INDEX(pd_idx, hobul_p->max_pd, IB_ERROR, cleanup);\r
2247 //      if (E_MARK_PD != hobul_p->pd_info_tbl[pd_idx].mark) {\r
2248 //              status =  IB_INVALID_PD_HANDLE;\r
2249 //              goto cleanup_locked;\r
2250 //      }\r
2251 \r
2252         // PREP: no PREP required for destroy_cq\r
2253         if (HH_OK != THH_hob_destroy_cq(hobul_p->hh_hndl, cq_num)) {\r
2254                 status = IB_ERROR;\r
2255                 goto cleanup_locked;\r
2256         }\r
2257 \r
2258         if (hobul_p->cq_info_tbl[cq_idx].kernel_mode) {\r
2259                 if (HH_OK != THHUL_cqm_destroy_cq_done(hobul_p->hhul_hndl, hhul_cq_hndl)) {\r
2260                         status = IB_ERROR;\r
2261                         goto cleanup_locked;\r
2262                 }\r
2263                 if (hobul_p->cq_info_tbl[cq_idx].cq_ul_resources_p)\r
2264                         cl_free( hobul_p->cq_info_tbl[cq_idx].cq_ul_resources_p);\r
2265         }\r
2266 \r
2267         hobul_p->cq_info_tbl[cq_idx].mark = E_MARK_INVALID;\r
2268         hobul_p->cq_info_tbl[cq_idx].cq_ul_resources_p = NULL;\r
2269         cl_mutex_release(&hobul_p->cq_info_tbl[cq_idx].mutex);\r
2270 \r
2271         // Update CA object count\r
2272         cl_atomic_dec(&hobul_p->count);\r
2273         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("CA %d count %d\n", hca_idx, hobul_p->count));\r
2274 \r
2275 \r
2276         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
2277         return IB_SUCCESS;\r
2278 \r
2279 cleanup_locked:\r
2280         cl_mutex_release(&hobul_p->cq_info_tbl[cq_idx].mutex);\r
2281 \r
2282 cleanup:\r
2283         CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status));\r
2284         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
2285         return status;\r
2286 }\r
2287 \r
2288 \r
2289 void\r
2290 setup_ci_interface(\r
2291         IN              const   ib_net64_t                                      ca_guid,\r
2292         IN      OUT                     ci_interface_t                          *p_interface )\r
2293 {\r
2294         cl_memclr(p_interface, sizeof(*p_interface));\r
2295 \r
2296         /* Guid of the CA. */\r
2297         p_interface->guid = ca_guid;\r
2298 \r
2299         /* Version of this interface. */\r
2300         p_interface->version = VERBS_VERSION;\r
2301 \r
2302         /* UVP name */\r
2303         cl_memcpy( p_interface->libname, mlnx_uvp_lib_name, MAX_LIB_NAME);\r
2304 \r
2305         CL_TRACE(MLNX_DBG_TRACE, g_mlnx_dbg_lvl, ("UVP filename %s\n", p_interface->libname)); \r
2306 \r
2307         /* The real interface. */\r
2308         p_interface->open_ca = mlnx_open_ca;\r
2309         p_interface->query_ca = mlnx_query_ca;\r
2310         p_interface->modify_ca = mlnx_modify_ca; // ++\r
2311         p_interface->close_ca = mlnx_close_ca;\r
2312         p_interface->um_open_ca = mlnx_um_open;\r
2313         p_interface->um_close_ca = mlnx_um_close;\r
2314 \r
2315         p_interface->allocate_pd = mlnx_allocate_pd;\r
2316         p_interface->deallocate_pd = mlnx_deallocate_pd;\r
2317 \r
2318         p_interface->create_av = mlnx_create_av;\r
2319         p_interface->query_av = mlnx_query_av;\r
2320         p_interface->modify_av = mlnx_modify_av;\r
2321         p_interface->destroy_av = mlnx_destroy_av;\r
2322 \r
2323         p_interface->create_qp = mlnx_create_qp;\r
2324         p_interface->create_spl_qp = mlnx_create_spl_qp;\r
2325         p_interface->modify_qp = mlnx_modify_qp;\r
2326         p_interface->query_qp = mlnx_query_qp;\r
2327         p_interface->destroy_qp = mlnx_destroy_qp;\r
2328 \r
2329         p_interface->create_cq = mlnx_create_cq;\r
2330         p_interface->resize_cq = mlnx_resize_cq;\r
2331         p_interface->query_cq = mlnx_query_cq;\r
2332         p_interface->destroy_cq = mlnx_destroy_cq;\r
2333 \r
2334         p_interface->local_mad = mlnx_local_mad;\r
2335         \r
2336         p_interface->vendor_call = fw_access_ctrl;\r
2337 \r
2338         mlnx_memory_if(p_interface);\r
2339         mlnx_direct_if(p_interface);\r
2340         mlnx_mcast_if(p_interface);\r
2341 \r
2342 \r
2343         return;\r
2344 }\r
2345 \r
2346 #if 0\r
2347 CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("file %s line %d\n", __FILE__, __LINE__));\r
2348 #endif\r