63625b2255a56db9316ace40fe035f54631e4866
[mirror/winof/.git] / hw / mt23108 / kernel / hca_verbs.c
1 /*\r
2  * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.\r
3  * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. \r
4  *\r
5  * This software is available to you under the OpenIB.org BSD license\r
6  * below:\r
7  *\r
8  *     Redistribution and use in source and binary forms, with or\r
9  *     without modification, are permitted provided that the following\r
10  *     conditions are met:\r
11  *\r
12  *      - Redistributions of source code must retain the above\r
13  *        copyright notice, this list of conditions and the following\r
14  *        disclaimer.\r
15  *\r
16  *      - Redistributions in binary form must reproduce the above\r
17  *        copyright notice, this list of conditions and the following\r
18  *        disclaimer in the documentation and/or other materials\r
19  *        provided with the distribution.\r
20  *\r
21  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
22  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
23  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
24  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
25  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
26  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
27  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
28  * SOFTWARE.\r
29  *\r
30  * $Id$\r
31  */\r
32 \r
33 \r
34 #include "hca_data.h"\r
35 #include "hca_debug.h"\r
36 \r
37 \r
38 #define PTR_ALIGN(size) (((size) + sizeof(void*) - 1) & ~(sizeof(void*) - 1))\r
39 \r
40 \r
41 /* Matches definition in IbAccess for MaxSMPsWatermark */\r
42 uint32_t        g_sqp_max_avs = ((4096/sizeof(ib_mad_t))*32*5);\r
43 \r
44 \r
45 // Local declarations\r
46 ib_api_status_t\r
47 mlnx_query_qp (\r
48         IN              const   ib_qp_handle_t                          h_qp,\r
49                 OUT                     ib_qp_attr_t                            *p_qp_attr,\r
50         IN      OUT                     ci_umv_buf_t                            *p_umv_buf );\r
51 \r
52 /* \r
53 * CA Access Verbs\r
54 */\r
55 ib_api_status_t\r
56 mlnx_open_ca (\r
57         IN              const   ib_net64_t                                      ca_guid, // IN  const char *                ca_name,\r
58         IN              const   ci_completion_cb_t                      pfn_completion_cb,\r
59         IN              const   ci_async_event_cb_t                     pfn_async_event_cb,\r
60         IN              const   void*const                                      ca_context,\r
61                 OUT                     ib_ca_handle_t                          *ph_ca)\r
62 {\r
63 //      char *                                  ca_name = NULL;\r
64 //      char *                                  dev_name = NULL;\r
65         mlnx_hca_t                              *p_hca;\r
66         HH_hca_dev_t *                  hca_ul_info;\r
67         void *                                  hca_ul_resources_p = NULL; // (THH_hca_ul_resources_t *)\r
68         ib_api_status_t                 status;\r
69         mlnx_hob_t                              *new_ca = NULL;\r
70         MOSAL_protection_ctx_t  prot_ctx;\r
71 \r
72         CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
73         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("context 0x%p\n", ca_context));\r
74 \r
75         p_hca = mlnx_hca_from_guid( ca_guid );\r
76         if( !p_hca ) {\r
77                 HCA_EXIT( MLNX_DBG_TRACE );\r
78                 return IB_NOT_FOUND;\r
79         }\r
80 \r
81         //// Verify that the device has been discovered (it'd better be)\r
82         //mlnx_names_from_guid(ca_guid, &ca_name, &dev_name);\r
83         //if (!ca_name) {\r
84         //      CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
85         //      return IB_NOT_FOUND;\r
86         //}\r
87 \r
88         //// We have name - lookup device\r
89         //if (HH_OK != HH_lookup_hca(ca_name, &hh_hndl)) {\r
90         //      CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
91         //      return IB_NOT_FOUND;\r
92         //}\r
93 \r
94         hca_ul_info = p_hca->hh_hndl;\r
95 \r
96         {\r
97                 // We are opening the HCA in kernel mode.\r
98                 // if a HOBKL exists for this device (i.e. it is open) - return E_BUSY\r
99                 if (IB_SUCCESS == mlnx_hobs_lookup(p_hca->hh_hndl, &new_ca)) {\r
100                         if (ph_ca) *ph_ca = (ib_ca_handle_t)new_ca;\r
101                         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
102                         return IB_RESOURCE_BUSY;\r
103                 }\r
104 \r
105                 // Create a mapping from hca index to hh_hndl\r
106                 status = mlnx_hobs_insert(p_hca, &new_ca);\r
107                 if (IB_SUCCESS != status) {\r
108                         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
109                         return status;\r
110                 }\r
111 \r
112                 /* save copy of HCA device object */\r
113                 new_ca->p_dev_obj = p_hca->p_dev_obj;\r
114 \r
115                 // Initialize the device driver\r
116                 if (HH_OK != THH_hob_open_hca(p_hca->hh_hndl, NULL, NULL)) {\r
117                         status = IB_ERROR;\r
118                         goto cleanup;\r
119                 }\r
120                 \r
121                 CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("context 0x%p\n", ca_context));\r
122                 status = mlnx_hobs_set_cb(new_ca,\r
123                         pfn_completion_cb,\r
124                         pfn_async_event_cb,\r
125                         ca_context);\r
126                 if (IB_SUCCESS != status) {\r
127                         goto cleanup;\r
128                 }\r
129 \r
130                 CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("ul_resource sizes: hca %d pd %d\n",\r
131                         hca_ul_info->hca_ul_resources_sz,\r
132                         hca_ul_info->pd_ul_resources_sz));\r
133 \r
134                 hca_ul_resources_p = cl_zalloc( hca_ul_info->hca_ul_resources_sz);\r
135 \r
136                 /* get the kernel protection context */ \r
137                 prot_ctx = MOSAL_get_kernel_prot_ctx();\r
138         }\r
139 \r
140         if (!hca_ul_resources_p) {\r
141                 status = IB_INSUFFICIENT_MEMORY;\r
142                 goto cleanup;\r
143         }\r
144 \r
145         if (HH_OK != THH_hob_alloc_ul_res(p_hca->hh_hndl, prot_ctx, hca_ul_resources_p)) {\r
146                 status = IB_ERROR;\r
147                 goto cleanup;\r
148         }\r
149 \r
150         // TBD: !!! in user mode (kernel hobul_idx != hob_idx)\r
151         status = mlnx_hobul_new(new_ca, p_hca->hh_hndl, hca_ul_resources_p);\r
152         if (IB_SUCCESS != status) {\r
153                 goto cleanup;\r
154         }\r
155 \r
156         // Return the HOBUL index\r
157         if (ph_ca) *ph_ca = new_ca;\r
158 \r
159         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
160         return IB_SUCCESS;\r
161 \r
162 cleanup:\r
163         if (hca_ul_resources_p)\r
164                 cl_free( hca_ul_resources_p);\r
165         THH_hob_close_hca(p_hca->hh_hndl);\r
166         mlnx_hobs_remove(new_ca);\r
167 \r
168         // For user mode call - return status to user mode\r
169         CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %s\n", ib_get_err_str(status)));\r
170         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
171         return status;\r
172 }\r
173 \r
174 ib_api_status_t\r
175 mlnx_query_ca (\r
176         IN              const   ib_ca_handle_t                          h_ca,\r
177                 OUT                     ib_ca_attr_t                            *p_ca_attr,\r
178         IN      OUT                     uint32_t                                        *p_byte_count,\r
179         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
180 {\r
181         ib_api_status_t         status;\r
182 \r
183         mlnx_hob_t                      *hob_p = (mlnx_hob_t *)h_ca;\r
184         HH_hca_hndl_t           hh_hndl = NULL;\r
185         HH_hca_dev_t            *hca_ul_info;\r
186         VAPI_hca_cap_t          hca_cap;\r
187         VAPI_hca_port_t         *hca_ports = NULL;\r
188         uint32_t                        size, required_size;\r
189         u_int8_t                        port_num, num_ports;\r
190         u_int32_t                       num_gids, num_pkeys;\r
191         u_int32_t                       num_page_sizes = 1; // TBD: what is actually supported\r
192         uint8_t                         *last_p;\r
193         void                            *hca_ul_resources_p = NULL;\r
194         u_int32_t                       priv_op;\r
195 \r
196         CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
197 \r
198         if (NULL == p_byte_count) {\r
199                 status = IB_INVALID_PARAMETER;\r
200                 goto cleanup;\r
201         }\r
202 \r
203         mlnx_hobs_get_handle(hob_p, &hh_hndl);\r
204         if (NULL == hh_hndl) {\r
205                 CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("returning E_NODEV dev\n"));\r
206                 status = IB_INVALID_CA_HANDLE;\r
207                 goto cleanup;\r
208         }\r
209 \r
210         hca_ul_info = (HH_hca_dev_t *)hh_hndl;\r
211 \r
212         if (HH_OK != THH_hob_query(hh_hndl, &hca_cap)) {\r
213                 status = IB_ERROR;\r
214                 goto cleanup;\r
215         }\r
216 \r
217         num_ports = hca_cap.phys_port_num;   /* Number of physical ports of the HCA */             \r
218 \r
219         if (NULL == (hca_ports = cl_zalloc( num_ports * sizeof(VAPI_hca_port_t)))) {\r
220                 CL_TRACE (CL_DBG_ERROR, g_mlnx_dbg_lvl,\r
221                         ("Failed to cl_zalloc ports array\n"));\r
222                 status = IB_INSUFFICIENT_MEMORY;\r
223                 goto cleanup;\r
224         }\r
225 \r
226         // Loop on ports and get their properties\r
227         num_gids = 0;\r
228         num_pkeys = 0;\r
229         required_size = PTR_ALIGN(sizeof(ib_ca_attr_t)) +\r
230                 PTR_ALIGN(sizeof(u_int32_t) * num_page_sizes) +\r
231                 PTR_ALIGN(sizeof(ib_port_attr_t) * num_ports);\r
232         for (port_num = 0; port_num < num_ports; port_num++) {\r
233                 if (HH_OK != THH_hob_query_port_prop(hh_hndl, port_num+1, &hca_ports[port_num])) {\r
234                         status = IB_ERROR;\r
235                         goto cleanup;\r
236                 }\r
237 \r
238                 num_gids  = hca_ports[port_num].gid_tbl_len;\r
239                 size = PTR_ALIGN(sizeof(ib_gid_t)  * num_gids);\r
240                 required_size += size;\r
241 \r
242                 num_pkeys = hca_ports[port_num].pkey_tbl_len;\r
243                 size = PTR_ALIGN(sizeof(u_int16_t) * num_pkeys);\r
244                 required_size += size;\r
245         }\r
246 \r
247         if( p_umv_buf && p_umv_buf->command )\r
248         {\r
249                 /*\r
250                 * Prepare the buffer with the size including hca_ul_resources_sz\r
251                 * NO ALIGNMENT for this size \r
252                 */\r
253 \r
254                 if (p_umv_buf->p_inout_buf)\r
255                 {\r
256                         cl_memcpy (&priv_op, p_umv_buf->p_inout_buf, sizeof (priv_op));\r
257                         CL_TRACE(MLNX_DBG_TRACE, g_mlnx_dbg_lvl, ("priv_op = %d\n", priv_op));\r
258 \r
259                         /* \r
260                         * Yes, UVP request for hca_ul_info\r
261                         */\r
262                         if (p_umv_buf->input_size != \r
263                                 (sizeof (HH_hca_dev_t) + sizeof (priv_op) ))\r
264                         {\r
265                                 *p_byte_count = required_size;\r
266                                 p_umv_buf->output_size = 0;\r
267                                 status = IB_INVALID_PARAMETER;\r
268                                 goto cleanup;\r
269                         }\r
270                         cl_memcpy( (uint8_t* __ptr64)p_umv_buf->p_inout_buf + sizeof (priv_op), \r
271                                 hca_ul_info, sizeof (HH_hca_dev_t));\r
272                         p_umv_buf->output_size = p_umv_buf->input_size;\r
273                 }\r
274         }\r
275 \r
276         if (NULL == p_ca_attr || *p_byte_count < required_size) {\r
277                 *p_byte_count = required_size;\r
278                 status = IB_INSUFFICIENT_MEMORY;\r
279                 if ( p_ca_attr != NULL) {\r
280                         CL_TRACE (CL_DBG_ERROR, g_mlnx_dbg_lvl,\r
281                                 ("Failed *p_byte_count < required_size\n"));\r
282                 }\r
283                 goto cleanup;\r
284         }\r
285 \r
286         // Space is sufficient - setup table pointers\r
287         last_p = (uint8_t*)p_ca_attr;\r
288         last_p += PTR_ALIGN(sizeof(*p_ca_attr));\r
289 \r
290         p_ca_attr->p_page_size = (uint32_t*)last_p;\r
291         last_p += PTR_ALIGN(num_page_sizes * sizeof(u_int32_t));\r
292 \r
293         p_ca_attr->p_port_attr = (ib_port_attr_t *)last_p;\r
294         last_p += PTR_ALIGN(num_ports * sizeof(ib_port_attr_t));\r
295 \r
296         for (port_num = 0; port_num < num_ports; port_num++) {\r
297                 p_ca_attr->p_port_attr[port_num].p_gid_table = (ib_gid_t *)last_p;\r
298                 size = PTR_ALIGN(sizeof(ib_gid_t) * hca_ports[port_num].gid_tbl_len);\r
299                 last_p += size;\r
300 \r
301                 p_ca_attr->p_port_attr[port_num].p_pkey_table = (u_int16_t *)last_p;\r
302                 size = PTR_ALIGN(sizeof(u_int16_t) * hca_ports[port_num].pkey_tbl_len);\r
303                 last_p += size;\r
304         }\r
305 \r
306         // Separate the loops to ensure that table pointers are always setup\r
307         for (port_num = 0; port_num < num_ports; port_num++) {\r
308                 status = mlnx_get_hca_pkey_tbl(hh_hndl, port_num+1,\r
309                         hca_ports[port_num].pkey_tbl_len,\r
310                         p_ca_attr->p_port_attr[port_num].p_pkey_table);\r
311                 if (IB_SUCCESS != status) {\r
312                         CL_TRACE (CL_DBG_ERROR, g_mlnx_dbg_lvl,\r
313                                 ("Failed to mlnx_get_hca_pkey_tbl for port_num:%d\n",port_num));\r
314                         goto cleanup;\r
315                 }\r
316 \r
317                 status = mlnx_get_hca_gid_tbl(hh_hndl, port_num+1,\r
318                         hca_ports[port_num].gid_tbl_len,\r
319                         &p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw);\r
320                 if (IB_SUCCESS != status) {\r
321                         CL_TRACE (CL_DBG_ERROR, g_mlnx_dbg_lvl,\r
322                                 ("Failed to mlnx_get_hca_gid_tbl for port_num:%d\n",port_num));\r
323                         goto cleanup;\r
324                 }\r
325 \r
326 #if 0\r
327                 {\r
328                         int i;\r
329 \r
330                         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("port %d gid0:", port_num));\r
331                         for (i = 0; i < 16; i++)\r
332                                 CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, (" 0x%x", p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[i]));\r
333                         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("\n"));\r
334                 }\r
335 #endif\r
336         }\r
337 \r
338         // Convert query result into IBAL structure (no cl_memset())\r
339         if( p_umv_buf && p_umv_buf->command )\r
340         {\r
341                 // p_ca_attr->size = required_size - hca_ul_info->hca_ul_resources_sz;\r
342                 p_ca_attr->size = required_size;\r
343         }\r
344         else\r
345         {\r
346                 p_ca_attr->size = required_size;\r
347         }\r
348 \r
349         // !!! GID/PKEY tables must be queried before this call !!!\r
350         mlnx_conv_vapi_hca_cap(hca_ul_info, &hca_cap, hca_ports, p_ca_attr);\r
351 \r
352         // verify: required space == used space\r
353         CL_ASSERT( required_size == (((uintn_t)last_p) - ((uintn_t)p_ca_attr)) );\r
354 \r
355 #if 0\r
356         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("Space required %d used %d\n",\r
357                 required_size,\r
358                 ((uintn_t)last_p) - ((uintn_t)p_ca_attr))));\r
359 #endif\r
360 \r
361         if( p_umv_buf && p_umv_buf->command ) p_umv_buf->status = IB_SUCCESS;\r
362         if (hca_ul_resources_p) cl_free (hca_ul_resources_p);\r
363         if (hca_ports) cl_free( hca_ports );\r
364         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
365         return IB_SUCCESS;\r
366 \r
367 cleanup:\r
368         if( p_umv_buf && p_umv_buf->command ) p_umv_buf->status = status;\r
369         if (hca_ul_resources_p) cl_free (hca_ul_resources_p);\r
370         if (hca_ports) cl_free( hca_ports);\r
371         if( p_ca_attr != NULL || status != IB_INSUFFICIENT_MEMORY )\r
372                 CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %s\n", ib_get_err_str(status)));\r
373         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
374         return status;\r
375 }\r
376 \r
377 ib_api_status_t\r
378 mlnx_modify_ca (\r
379         IN              const   ib_ca_handle_t                          h_ca,\r
380         IN              const   uint8_t                                         port_num,\r
381         IN              const   ib_ca_mod_t                                     modca_cmd,\r
382         IN              const   ib_port_attr_mod_t                      *p_port_attr)\r
383 {\r
384         ib_api_status_t                 status;\r
385 \r
386         mlnx_hob_t                              *hob_p = (mlnx_hob_t *)h_ca;\r
387         HH_hca_hndl_t                   hh_hndl = NULL;\r
388 \r
389         VAPI_hca_attr_t                 hca_attr;\r
390         VAPI_hca_attr_mask_t    hca_attr_mask = 0;\r
391 \r
392         CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
393 \r
394         mlnx_hobs_get_handle(hob_p, &hh_hndl);\r
395         if (NULL == hh_hndl) {\r
396                 status = IB_INVALID_CA_HANDLE;\r
397                 goto cleanup;\r
398         }\r
399 \r
400         cl_memclr(&hca_attr, sizeof(hca_attr));\r
401         if (modca_cmd & IB_CA_MOD_IS_SM) {\r
402                 hca_attr_mask |= HCA_ATTR_IS_SM;\r
403                 hca_attr.is_sm = (MT_bool)p_port_attr->cap.sm;\r
404         }\r
405         if (modca_cmd & IB_CA_MOD_IS_SNMP_SUPPORTED) {\r
406                 hca_attr_mask |= HCA_ATTR_IS_SNMP_TUN_SUP;\r
407                 hca_attr.is_snmp_tun_sup = (MT_bool)p_port_attr->cap.snmp;\r
408         }\r
409         if (modca_cmd & IB_CA_MOD_IS_DEV_MGMT_SUPPORTED) {\r
410                 hca_attr_mask |= HCA_ATTR_IS_DEV_MGT_SUP;\r
411                 hca_attr.is_dev_mgt_sup = (MT_bool)p_port_attr->cap.dev_mgmt;\r
412         }\r
413         if (modca_cmd & IB_CA_MOD_IS_VEND_SUPPORTED) {\r
414                 hca_attr_mask |= HCA_ATTR_IS_VENDOR_CLS_SUP;\r
415                 hca_attr.is_vendor_cls_sup = (MT_bool)p_port_attr->cap.vend;\r
416         }\r
417         if (modca_cmd & IB_CA_MOD_IS_CLIENT_REREGISTER_SUPPORTED) {\r
418                 hca_attr_mask |= HCA_ATTR_IS_CLIENT_REREGISTRATION_SUP;\r
419                 hca_attr.is_client_reregister_sup= (MT_bool)p_port_attr->cap.client_reregister;\r
420         }\r
421         if (modca_cmd & IB_CA_MOD_QKEY_CTR) {\r
422                 if (p_port_attr->qkey_ctr == 0)\r
423                         hca_attr.reset_qkey_counter = TRUE;\r
424         }\r
425 \r
426         if (0 != hca_attr_mask) {\r
427                 if (HH_OK != THH_hob_modify( hh_hndl, port_num, &hca_attr, &hca_attr_mask))\r
428                 {\r
429                         status = IB_ERROR;\r
430                         goto cleanup;\r
431                 }\r
432         }\r
433 \r
434         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
435         return IB_SUCCESS;\r
436 \r
437 cleanup:\r
438         CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %s\n", ib_get_err_str(status)));\r
439         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
440         return status;\r
441 }\r
442 \r
443 ib_api_status_t\r
444 mlnx_close_ca (\r
445         IN                              ib_ca_handle_t                          h_ca)\r
446 {\r
447         ib_api_status_t status;\r
448 \r
449         HH_hca_hndl_t   hh_hndl = NULL;\r
450         mlnx_hob_t              *hob_p   = (mlnx_hob_t *)h_ca;\r
451         HH_hca_dev_t    *hca_ul_info;\r
452         void                    *hca_ul_resources_p = NULL;\r
453         mlnx_hobul_t    *hobul_p;\r
454 \r
455         CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
456 \r
457         hobul_p = mlnx_hobul_array[hob_p->index];\r
458         if( !hobul_p ) {\r
459                 status = IB_INVALID_CA_HANDLE;\r
460                 goto cleanup;\r
461         }\r
462 \r
463         if( hobul_p->count ) {\r
464                 status = IB_RESOURCE_BUSY;\r
465                 goto cleanup;\r
466         }\r
467 \r
468         mlnx_hobs_get_handle(hob_p, &hh_hndl);\r
469         if (NULL == hh_hndl) {\r
470                 status = IB_INVALID_CA_HANDLE;\r
471                 goto cleanup;\r
472         }\r
473 \r
474         hca_ul_info = (HH_hca_dev_t *)hh_hndl;\r
475         mlnx_hobul_get(hob_p, &hca_ul_resources_p);\r
476 \r
477         if (hca_ul_resources_p) {\r
478                 THH_hob_free_ul_res(hh_hndl, hca_ul_resources_p);\r
479                 cl_free( hca_ul_resources_p);\r
480         }\r
481         mlnx_hobul_delete(hob_p);\r
482         THH_hob_close_hca(hh_hndl);\r
483         mlnx_hobs_remove(hob_p);\r
484 \r
485         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
486         return IB_SUCCESS;\r
487 \r
488 cleanup:\r
489         CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %s\n", ib_get_err_str(status)));\r
490         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
491         return status;\r
492 }\r
493 \r
494 \r
495 static ib_api_status_t\r
496 mlnx_um_open(\r
497         IN              const   ib_ca_handle_t                          h_ca,\r
498         IN      OUT                     ci_umv_buf_t* const                     p_umv_buf,\r
499                 OUT                     ib_ca_handle_t* const           ph_um_ca )\r
500 {\r
501         ib_api_status_t         status;\r
502 \r
503         mlnx_hob_t                              *hob_p = (mlnx_hob_t *)h_ca;\r
504         HH_hca_hndl_t                   hh_hndl = NULL;\r
505         HH_hca_dev_t                    *hca_ul_info;\r
506         mlnx_um_ca_t                    *p_um_ca;\r
507         MOSAL_protection_ctx_t  prot_ctx;\r
508 \r
509         HCA_ENTER( MLNX_DBG_TRACE );\r
510 \r
511         mlnx_hobs_get_handle( hob_p, &hh_hndl );\r
512         if( !hh_hndl )\r
513         {\r
514                 HCA_TRACE(MLNX_DBG_INFO, ("returning E_NODEV dev\n"));\r
515                 status = IB_INVALID_CA_HANDLE;\r
516                 goto mlnx_um_open_err1;\r
517         }\r
518 \r
519         hca_ul_info = (HH_hca_dev_t *)hh_hndl;\r
520 \r
521         if( !p_umv_buf->command )\r
522         {\r
523                 p_umv_buf->status = IB_SUCCESS;\r
524                 goto mlnx_um_open_err1;\r
525         }\r
526 \r
527         /*\r
528          * Prepare the buffer with the size including hca_ul_resources_sz\r
529          * NO ALIGNMENT for this size \r
530          */\r
531         if( !p_umv_buf->p_inout_buf ||\r
532                 p_umv_buf->output_size < sizeof(void*) )\r
533         {\r
534                 p_umv_buf->status = IB_INVALID_PARAMETER;\r
535                 goto mlnx_um_open_err1;\r
536         }\r
537 \r
538         HCA_TRACE( MLNX_DBG_TRACE, ("priv_op = %d\n", p_umv_buf->command ));\r
539 \r
540         /* Yes, UVP request for hca_ul_info. */\r
541         p_um_ca = (mlnx_um_ca_t*)cl_zalloc(\r
542                 sizeof(mlnx_um_ca_t) + hca_ul_info->hca_ul_resources_sz - 1 );\r
543         if( !p_um_ca )\r
544         {\r
545                 p_umv_buf->status = IB_INSUFFICIENT_MEMORY;\r
546                 goto mlnx_um_open_err1;\r
547         }\r
548 \r
549         p_um_ca->p_mdl = IoAllocateMdl( &p_um_ca->dev_info,\r
550                 (ULONG)(sizeof(HH_hca_dev_t) + hca_ul_info->hca_ul_resources_sz),\r
551                 FALSE, TRUE, NULL );\r
552         if( !p_um_ca->p_mdl )\r
553         {\r
554                 p_umv_buf->status = IB_ERROR;\r
555                 goto mlnx_um_open_err2;\r
556         }\r
557         /* Build the page list... */\r
558         MmBuildMdlForNonPagedPool( p_um_ca->p_mdl );\r
559 \r
560         /* Map the memory into the calling process's address space. */\r
561         __try\r
562         {\r
563                 p_um_ca->p_mapped_addr =\r
564                         MmMapLockedPagesSpecifyCache( p_um_ca->p_mdl,\r
565                         UserMode, MmCached, NULL, FALSE, NormalPagePriority );\r
566         }\r
567         __except(EXCEPTION_EXECUTE_HANDLER)\r
568         {\r
569                 p_umv_buf->status = IB_ERROR;\r
570                 goto mlnx_um_open_err3;\r
571         }\r
572 \r
573         /* Register with THH (attach to the HCA). */\r
574         prot_ctx = MOSAL_get_current_prot_ctx();\r
575         if( THH_hob_alloc_ul_res(hh_hndl, prot_ctx, p_um_ca->ul_hca_res) != HH_OK )\r
576         {\r
577                 HCA_TRACE( CL_DBG_ERROR, ("Failed to get ul_res\n"));\r
578                 p_umv_buf->status = IB_ERROR;\r
579         }\r
580 \r
581         if( p_umv_buf->status == IB_SUCCESS )\r
582         {\r
583                 /* Copy the dev info. */\r
584                 p_um_ca->dev_info = *hca_ul_info;\r
585                 *ph_um_ca = (ib_ca_handle_t)p_um_ca;\r
586                 (*(void** __ptr64)p_umv_buf->p_inout_buf) = p_um_ca->p_mapped_addr;\r
587                 p_umv_buf->status = IB_SUCCESS;\r
588         }\r
589         else\r
590         {\r
591                 MmUnmapLockedPages( p_um_ca->p_mapped_addr, p_um_ca->p_mdl );\r
592 mlnx_um_open_err3:\r
593                 IoFreeMdl( p_um_ca->p_mdl );\r
594 mlnx_um_open_err2:\r
595                 cl_free( p_um_ca );\r
596 mlnx_um_open_err1:\r
597                 *ph_um_ca = NULL;\r
598         }\r
599 \r
600         //*ph_um_ca = NULL;\r
601         p_umv_buf->output_size = sizeof(void*);\r
602         HCA_EXIT( MLNX_DBG_TRACE );\r
603         return p_umv_buf->status;\r
604 }\r
605 \r
606 \r
607 static void\r
608 mlnx_um_close(\r
609         IN                              ib_ca_handle_t                          h_ca,\r
610         IN                              ib_ca_handle_t                          h_um_ca )\r
611 {\r
612         mlnx_hob_t                      *hob_p = (mlnx_hob_t *)h_ca;\r
613         HH_hca_hndl_t           hh_hndl = NULL;\r
614         mlnx_um_ca_t            *p_um_ca = (mlnx_um_ca_t*)h_um_ca;\r
615 \r
616         HCA_ENTER( MLNX_DBG_TRACE );\r
617 \r
618         mlnx_hobs_get_handle( hob_p, &hh_hndl );\r
619         if( !hh_hndl )\r
620                 goto mlnx_um_close_cleanup;\r
621 \r
622         if( !p_um_ca )\r
623                 return;\r
624 \r
625         THH_hob_free_ul_res( hh_hndl, p_um_ca->ul_hca_res );\r
626 \r
627 mlnx_um_close_cleanup:\r
628         MmUnmapLockedPages( p_um_ca->p_mapped_addr, p_um_ca->p_mdl );\r
629         IoFreeMdl( p_um_ca->p_mdl );\r
630         cl_free( p_um_ca );\r
631 \r
632         HCA_EXIT( MLNX_DBG_TRACE );\r
633 }\r
634 \r
635 \r
636 /*\r
637 *    Protection Domain and Reliable Datagram Domain Verbs\r
638 */\r
639 \r
640 ib_api_status_t\r
641 mlnx_allocate_pd (\r
642         IN              const   ib_ca_handle_t                          h_ca,\r
643         IN              const   ib_pd_type_t                            type,\r
644                 OUT                     ib_pd_handle_t                          *ph_pd,\r
645         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
646 {\r
647         mlnx_hob_t                              *hob_p = (mlnx_hob_t *)h_ca;\r
648         mlnx_hobul_t                    *hobul_p;\r
649         HH_hca_dev_t                    *hca_ul_info;\r
650         HHUL_pd_hndl_t                  hhul_pd_hndl = 0;\r
651         void                                    *pd_ul_resources_p = NULL;\r
652         u_int32_t                               pd_idx;\r
653         ib_api_status_t                 status;\r
654         MOSAL_protection_ctx_t  prot_ctx;\r
655 \r
656         CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
657 \r
658         hobul_p = mlnx_hobs_get_hobul(hob_p);\r
659         if (NULL == hobul_p) {\r
660                 status = IB_INVALID_CA_HANDLE;\r
661                 goto cleanup;\r
662         }\r
663 \r
664         hca_ul_info = (HH_hca_dev_t *)hobul_p->hh_hndl;\r
665         if (NULL == hca_ul_info) {\r
666                 status = IB_INVALID_CA_HANDLE;\r
667                 goto cleanup;\r
668         }\r
669 \r
670         if( p_umv_buf && p_umv_buf->command )\r
671         {\r
672                 // For user mode calls - obtain and verify the vendor information\r
673                 if ((p_umv_buf->input_size - sizeof (u_int32_t))  != \r
674                         hca_ul_info->pd_ul_resources_sz ||\r
675                         NULL == p_umv_buf->p_inout_buf) {\r
676                                 status = IB_INVALID_PARAMETER;\r
677                                 goto cleanup;\r
678                         }\r
679                         pd_ul_resources_p = (void *)p_umv_buf->p_inout_buf;\r
680 \r
681                         /* get the current protection context */ \r
682                         prot_ctx = MOSAL_get_current_prot_ctx();\r
683         }\r
684         else\r
685         {\r
686                 // for kernel mode calls - allocate app resources. Use prep->call->done sequence\r
687                 pd_ul_resources_p = cl_zalloc( hca_ul_info->pd_ul_resources_sz);\r
688                 if (NULL == pd_ul_resources_p) {\r
689                         status = IB_INSUFFICIENT_MEMORY;\r
690                         goto cleanup;\r
691                 }\r
692 \r
693                 switch( type )\r
694                 {\r
695                 case IB_PDT_SQP:\r
696                         if (HH_OK != THHUL_pdm_alloc_pd_avs_prep(hobul_p->hhul_hndl,\r
697                                 g_sqp_max_avs, PD_FOR_SQP, &hhul_pd_hndl, pd_ul_resources_p))\r
698                         {\r
699                                 status = IB_ERROR;\r
700                                 goto cleanup;\r
701                         }\r
702                         break;\r
703 \r
704                 case IB_PDT_UD:\r
705                         if (HH_OK != THHUL_pdm_alloc_pd_avs_prep(hobul_p->hhul_hndl,\r
706                                 g_sqp_max_avs, PD_NO_FLAGS, &hhul_pd_hndl, pd_ul_resources_p))\r
707                         {\r
708                                 status = IB_ERROR;\r
709                                 goto cleanup;\r
710                         }\r
711                         break;\r
712 \r
713                 default:\r
714                         if (HH_OK != THHUL_pdm_alloc_pd_prep(hobul_p->hhul_hndl, &hhul_pd_hndl, pd_ul_resources_p)) {\r
715                                 status = IB_ERROR;\r
716                                 goto cleanup;\r
717                         }\r
718                 }\r
719                 /* get the current protection context */ \r
720                 prot_ctx = MOSAL_get_kernel_prot_ctx();\r
721         }\r
722 \r
723         // Allocate the PD (cmdif)\r
724         if (HH_OK != THH_hob_alloc_pd(hobul_p->hh_hndl, prot_ctx, pd_ul_resources_p, &pd_idx)) {\r
725                 status = IB_INSUFFICIENT_RESOURCES;\r
726                 goto cleanup_pd;\r
727         }\r
728 \r
729         if( !(p_umv_buf && p_umv_buf->command) )\r
730         {\r
731                 // Manage user level resources\r
732                 if (HH_OK != THHUL_pdm_alloc_pd_done(hobul_p->hhul_hndl, hhul_pd_hndl, pd_idx, pd_ul_resources_p)) {\r
733                         THH_hob_free_pd(hobul_p->hh_hndl, pd_idx);\r
734                         status = IB_ERROR;\r
735                         goto cleanup_pd;\r
736                 }\r
737         }\r
738 \r
739         VALIDATE_INDEX(pd_idx, hobul_p->max_pd, IB_ERROR, cleanup_pd);\r
740 \r
741         // Save data refs for future use\r
742         cl_mutex_acquire(&hobul_p->pd_info_tbl[pd_idx].mutex);\r
743         hobul_p->pd_info_tbl[pd_idx].pd_num = pd_idx;\r
744         hobul_p->pd_info_tbl[pd_idx].hca_idx = hob_p->index;\r
745         hobul_p->pd_info_tbl[pd_idx].hhul_pd_hndl = hhul_pd_hndl;\r
746         hobul_p->pd_info_tbl[pd_idx].pd_ul_resources_p = pd_ul_resources_p;\r
747         hobul_p->pd_info_tbl[pd_idx].count = 0;\r
748         hobul_p->pd_info_tbl[pd_idx].kernel_mode = !(p_umv_buf && p_umv_buf->command);\r
749         hobul_p->pd_info_tbl[pd_idx].mark = E_MARK_PD;\r
750         cl_mutex_release(&hobul_p->pd_info_tbl[pd_idx].mutex);\r
751 \r
752         cl_atomic_inc( &hobul_p->count );\r
753 \r
754         if (ph_pd) *ph_pd = (ib_pd_handle_t)PD_HNDL_FROM_PD(pd_idx);\r
755         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("hca_idx 0x%x pd_idx 0x%x returned 0x%p\n", hob_p->index, pd_idx, *ph_pd));\r
756 \r
757         if( p_umv_buf && p_umv_buf->command )\r
758         {\r
759                 p_umv_buf->output_size = p_umv_buf->input_size;\r
760                 /* \r
761                 * Copy the pd_idx back to user\r
762                 */\r
763                 cl_memcpy (((uint8_t* __ptr64)p_umv_buf->p_inout_buf + hca_ul_info->pd_ul_resources_sz),\r
764                         &pd_idx, sizeof (pd_idx));\r
765                 p_umv_buf->status = IB_SUCCESS;\r
766         }\r
767         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
768         return IB_SUCCESS;\r
769 \r
770 cleanup_pd:\r
771         THHUL_pdm_free_pd_prep(hobul_p->hhul_hndl, hhul_pd_hndl, FALSE);\r
772         THHUL_pdm_free_pd_done(hobul_p->hhul_hndl, hhul_pd_hndl);\r
773 \r
774 cleanup:\r
775         if( !(p_umv_buf && p_umv_buf->command) && pd_ul_resources_p )\r
776                 cl_free( pd_ul_resources_p);\r
777         if( p_umv_buf && p_umv_buf->command )\r
778         {\r
779                 p_umv_buf->output_size = 0;\r
780                 p_umv_buf->status = status;\r
781         }\r
782         CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %s\n", ib_get_err_str(status)));\r
783         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
784         return status;\r
785 }\r
786 \r
787 ib_api_status_t\r
788 mlnx_deallocate_pd (\r
789         IN                              ib_pd_handle_t                          h_pd)\r
790 {\r
791         u_int32_t                       hca_idx = PD_HCA_FROM_HNDL(h_pd);\r
792         u_int32_t                       pd_idx  = PD_NUM_FROM_HNDL(h_pd);\r
793         mlnx_hobul_t            *hobul_p;\r
794         HHUL_pd_hndl_t          hhul_pd_hndl;\r
795         ib_api_status_t         status;\r
796 \r
797         CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
798 \r
799         VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_CA_HANDLE, cleanup);\r
800         hobul_p = mlnx_hobul_array[hca_idx];\r
801         if (NULL == hobul_p) {\r
802                 status =  IB_INVALID_PD_HANDLE;\r
803                 goto cleanup;\r
804         }\r
805         VALIDATE_INDEX(pd_idx, hobul_p->max_pd, IB_INVALID_PD_HANDLE, cleanup);\r
806         if (E_MARK_PD != hobul_p->pd_info_tbl[pd_idx].mark) {\r
807                 status =  IB_INVALID_PD_HANDLE;\r
808                 goto cleanup;\r
809         }\r
810 \r
811         cl_mutex_acquire(&hobul_p->pd_info_tbl[pd_idx].mutex);\r
812 \r
813         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("pd %d count %d k_mod %d\n", pd_idx, hobul_p->pd_info_tbl[pd_idx].count, hobul_p->pd_info_tbl[pd_idx].kernel_mode));\r
814 \r
815         if (0 != hobul_p->pd_info_tbl[pd_idx].count) {\r
816                 CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("pd %d count %d\n", pd_idx, hobul_p->pd_info_tbl[pd_idx].count));\r
817                 status = IB_RESOURCE_BUSY;\r
818                 goto cleanup_locked;\r
819         }\r
820 \r
821         hhul_pd_hndl = hobul_p->pd_info_tbl[pd_idx].hhul_pd_hndl;\r
822 \r
823         // PREP:\r
824         if (hobul_p->pd_info_tbl[pd_idx].kernel_mode) {\r
825                 if (HH_OK != THHUL_pdm_free_pd_prep(hobul_p->hhul_hndl, hhul_pd_hndl, FALSE)) {\r
826                         status = IB_ERROR;\r
827                         goto cleanup_locked;\r
828                 }\r
829         }\r
830 \r
831         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("pd %d before free_pd hh_hndl %p\n", \r
832                 pd_idx, hobul_p->hh_hndl));\r
833 \r
834         if (HH_OK != THH_hob_free_pd(hobul_p->hh_hndl, pd_idx)) {\r
835                 status = IB_ERROR;\r
836                 goto cleanup_locked;\r
837         }\r
838 \r
839         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("pd %d after free_pd\n", pd_idx));\r
840 \r
841         if (hobul_p->pd_info_tbl[pd_idx].kernel_mode) {\r
842                 if (HH_OK != THHUL_pdm_free_pd_done(hobul_p->hhul_hndl, hhul_pd_hndl)) {\r
843                         status = IB_ERROR;\r
844                         goto cleanup_locked;\r
845                 }\r
846                 if (hobul_p->pd_info_tbl[pd_idx].pd_ul_resources_p)\r
847                         cl_free( hobul_p->pd_info_tbl[pd_idx].pd_ul_resources_p);\r
848         }\r
849 \r
850         hobul_p->pd_info_tbl[pd_idx].mark = E_MARK_INVALID;\r
851         hobul_p->pd_info_tbl[pd_idx].pd_ul_resources_p = NULL;\r
852 \r
853         cl_mutex_release(&hobul_p->pd_info_tbl[pd_idx].mutex);\r
854 \r
855         cl_atomic_dec( &hobul_p->count );\r
856         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
857         return IB_SUCCESS;\r
858 \r
859 cleanup_locked:\r
860         cl_mutex_release(&hobul_p->pd_info_tbl[pd_idx].mutex);\r
861 \r
862 cleanup:\r
863         CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %s\n", ib_get_err_str(status)));\r
864         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
865         return status;\r
866 }\r
867 \r
868 /* \r
869 * Address Vector Management Verbs\r
870 */\r
871 ib_api_status_t\r
872 mlnx_create_av (\r
873         IN              const   ib_pd_handle_t                          h_pd,\r
874         IN              const   ib_av_attr_t                            *p_addr_vector,\r
875                 OUT                     ib_av_handle_t                          *ph_av,\r
876         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
877 {\r
878         u_int32_t                       hca_idx = PD_HCA_FROM_HNDL(h_pd);\r
879         u_int32_t                       pd_idx  = PD_NUM_FROM_HNDL(h_pd);\r
880         HHUL_ud_av_hndl_t       av_h;\r
881         mlnx_hobul_t            *hobul_p;\r
882         mlnx_avo_t                      *avo_p = NULL;\r
883         HHUL_pd_hndl_t          hhul_pd_hndl;\r
884         ib_api_status_t         status;\r
885 \r
886         VAPI_ud_av_t            av;\r
887 \r
888         CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
889 \r
890         VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_CA_HANDLE, cleanup);\r
891         hobul_p = mlnx_hobul_array[hca_idx];\r
892         if (NULL == hobul_p) {\r
893                 status =  IB_INVALID_PD_HANDLE;\r
894                 goto cleanup;\r
895         }\r
896         VALIDATE_INDEX(pd_idx, hobul_p->max_pd, IB_INVALID_PD_HANDLE, cleanup);\r
897         if (E_MARK_PD != hobul_p->pd_info_tbl[pd_idx].mark) {\r
898                 status =  IB_INVALID_PD_HANDLE;\r
899                 goto cleanup;\r
900         }\r
901         hhul_pd_hndl = hobul_p->pd_info_tbl[pd_idx].hhul_pd_hndl;\r
902 \r
903         if (NULL == (avo_p = cl_zalloc( sizeof(mlnx_avo_t)))) {\r
904                 status = IB_INSUFFICIENT_MEMORY;\r
905                 goto cleanup;\r
906         }\r
907 \r
908         cl_memclr(&av, sizeof(av));\r
909         mlnx_conv_ibal_av(hobul_p->hh_hndl, p_addr_vector, &av);\r
910         // This creates a non priviledged ud_av.\r
911         // To create a privilged ud_av call THH_hob_create_ud_av()\r
912         if (HH_OK != THHUL_pdm_create_ud_av(hobul_p->hhul_hndl, hhul_pd_hndl, &av, &av_h)) {\r
913                 status = IB_INSUFFICIENT_RESOURCES;\r
914                 goto cleanup;\r
915         }\r
916 \r
917         // update PD object count\r
918         cl_atomic_inc(&hobul_p->pd_info_tbl[pd_idx].count);\r
919         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("pd %d count %d\n", pd_idx, hobul_p->pd_info_tbl[pd_idx].count));\r
920 \r
921 \r
922         avo_p->mark    = E_MARK_AV;\r
923         avo_p->hca_idx = hca_idx;\r
924         avo_p->pd_idx  = pd_idx;\r
925         avo_p->h_av    = av_h;\r
926 \r
927         if (ph_av) *ph_av = (ib_av_handle_t)avo_p;\r
928 \r
929         if( p_umv_buf && p_umv_buf->command )\r
930         {\r
931                 p_umv_buf->status = IB_SUCCESS;\r
932         }\r
933         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
934         return IB_SUCCESS;\r
935 \r
936 cleanup:\r
937         if (avo_p) {\r
938                 avo_p->mark = E_MARK_INVALID;\r
939                 cl_free( avo_p);\r
940         }\r
941         if( p_umv_buf && p_umv_buf->command )\r
942         {\r
943                 p_umv_buf->output_size = 0;\r
944                 p_umv_buf->status = status;\r
945         }\r
946 \r
947         CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %s\n", ib_get_err_str(status)));\r
948         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
949         return status;\r
950 }\r
951 \r
952 ib_api_status_t\r
953 mlnx_query_av (\r
954         IN              const   ib_av_handle_t                          h_av,\r
955                 OUT                     ib_av_attr_t                            *p_addr_vector,\r
956                 OUT                     ib_pd_handle_t                          *ph_pd,\r
957         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
958 {\r
959         mlnx_avo_t                      *avo_p = (mlnx_avo_t *)h_av;\r
960         mlnx_hobul_t            *hobul_p;\r
961         ib_api_status_t         status;\r
962 \r
963         VAPI_ud_av_t            av;\r
964 \r
965         CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
966         if (!avo_p || avo_p->mark != E_MARK_AV) {\r
967                 status = IB_INVALID_AV_HANDLE;\r
968                 goto cleanup;\r
969         }\r
970 \r
971         VALIDATE_INDEX(avo_p->hca_idx, MLNX_MAX_HCA, IB_INVALID_CA_HANDLE, cleanup);\r
972         hobul_p = mlnx_hobul_array[avo_p->hca_idx];\r
973         if (NULL == hobul_p) {\r
974                 status =  IB_INVALID_AV_HANDLE;\r
975                 goto cleanup;\r
976         }\r
977         VALIDATE_INDEX(avo_p->pd_idx, hobul_p->max_pd, IB_INVALID_PD_HANDLE, cleanup);\r
978         if (E_MARK_PD != hobul_p->pd_info_tbl[avo_p->pd_idx].mark) {\r
979                 status =  IB_INVALID_PD_HANDLE;\r
980                 goto cleanup;\r
981         }\r
982 \r
983         if (p_addr_vector) {\r
984                 if (HH_OK != THHUL_pdm_query_ud_av(hobul_p->hhul_hndl, avo_p->h_av, &av)) {\r
985                         status = IB_ERROR;\r
986                         goto cleanup;\r
987                 }\r
988                 mlnx_conv_vapi_av(hobul_p->hh_hndl, &av, p_addr_vector);\r
989         }\r
990 \r
991         if (ph_pd) *ph_pd = (ib_pd_handle_t)PD_HNDL_FROM_PD(avo_p->pd_idx);\r
992 \r
993         if( p_umv_buf && p_umv_buf->command )\r
994         {\r
995                 p_umv_buf->output_size = 0;\r
996                 p_umv_buf->status = IB_SUCCESS;\r
997         }\r
998         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
999         return IB_SUCCESS;\r
1000 \r
1001 cleanup:\r
1002         if( p_umv_buf && p_umv_buf->command )\r
1003         {\r
1004                 p_umv_buf->output_size = 0;\r
1005                 p_umv_buf->status = status;\r
1006         }\r
1007         CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %s\n", ib_get_err_str(status)));\r
1008         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
1009         return status;\r
1010 }\r
1011 \r
1012 ib_api_status_t\r
1013 mlnx_modify_av (\r
1014         IN              const   ib_av_handle_t                          h_av,\r
1015         IN              const   ib_av_attr_t                            *p_addr_vector,\r
1016         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
1017 {\r
1018         mlnx_avo_t                      *avo_p = (mlnx_avo_t *)h_av;\r
1019         mlnx_hobul_t            *hobul_p;\r
1020         ib_api_status_t         status;\r
1021 \r
1022         VAPI_ud_av_t            av;\r
1023 \r
1024         CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
1025         if (!avo_p || avo_p->mark != E_MARK_AV) {\r
1026                 status = IB_INVALID_AV_HANDLE;\r
1027                 goto cleanup;\r
1028         }\r
1029 \r
1030         VALIDATE_INDEX(avo_p->hca_idx, MLNX_MAX_HCA, IB_INVALID_CA_HANDLE, cleanup);\r
1031         hobul_p = mlnx_hobul_array[avo_p->hca_idx];\r
1032         if (NULL == hobul_p) {\r
1033                 status =  IB_INVALID_AV_HANDLE;\r
1034                 goto cleanup;\r
1035         }\r
1036 \r
1037         cl_memclr(&av, sizeof(av));\r
1038         mlnx_conv_ibal_av(hobul_p->hh_hndl, p_addr_vector, &av);\r
1039         if (HH_OK != THHUL_pdm_modify_ud_av(hobul_p->hhul_hndl, avo_p->h_av, &av)) {\r
1040                 status = IB_ERROR;\r
1041                 goto cleanup;\r
1042         }\r
1043 \r
1044         if( p_umv_buf && p_umv_buf->command )\r
1045         {\r
1046                 p_umv_buf->output_size = 0;\r
1047                 p_umv_buf->status = IB_SUCCESS;\r
1048         }\r
1049         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
1050         return IB_SUCCESS;\r
1051 \r
1052 cleanup:\r
1053         if( p_umv_buf && p_umv_buf->command )\r
1054         {\r
1055                 p_umv_buf->output_size = 0;\r
1056                 p_umv_buf->status = status;\r
1057         }\r
1058         CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %s\n", ib_get_err_str(status)));\r
1059         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
1060         return status;\r
1061 }\r
1062 \r
1063 ib_api_status_t\r
1064 mlnx_destroy_av (\r
1065         IN              const   ib_av_handle_t                          h_av)\r
1066 {\r
1067         mlnx_avo_t                      *avo_p = (mlnx_avo_t *)h_av;\r
1068         mlnx_hobul_t            *hobul_p;\r
1069         ib_api_status_t         status;\r
1070 \r
1071         CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
1072         if (!avo_p || avo_p->mark != E_MARK_AV) {\r
1073                 status = IB_INVALID_AV_HANDLE;\r
1074                 goto cleanup;\r
1075         }\r
1076 \r
1077         VALIDATE_INDEX(avo_p->hca_idx, MLNX_MAX_HCA, IB_INVALID_CA_HANDLE, cleanup);\r
1078         hobul_p = mlnx_hobul_array[avo_p->hca_idx];\r
1079         if (NULL == hobul_p) {\r
1080                 status =  IB_INVALID_AV_HANDLE;\r
1081                 goto cleanup;\r
1082         }\r
1083         VALIDATE_INDEX(avo_p->pd_idx, hobul_p->max_pd, IB_INVALID_PD_HANDLE, cleanup);\r
1084         if (E_MARK_PD != hobul_p->pd_info_tbl[avo_p->pd_idx].mark) {\r
1085                 status =  IB_INVALID_PD_HANDLE;\r
1086                 goto cleanup;\r
1087         }\r
1088 \r
1089         // This destroy's a non priviledged ud_av.\r
1090         // To destroy a privilged ud_av call THH_hob_destroy_ud_av()\r
1091         if (HH_OK != THHUL_pdm_destroy_ud_av(hobul_p->hhul_hndl, avo_p->h_av)) {\r
1092                 status = IB_ERROR;\r
1093                 goto cleanup;\r
1094         }\r
1095 \r
1096         // update PD object count\r
1097         cl_atomic_dec(&hobul_p->pd_info_tbl[avo_p->pd_idx].count);\r
1098         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("pd %d count %d\n", avo_p->pd_idx, hobul_p->pd_info_tbl[avo_p->pd_idx].count));\r
1099 \r
1100         avo_p->mark = E_MARK_INVALID;\r
1101         cl_free( avo_p);\r
1102         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
1103         return IB_SUCCESS;\r
1104 \r
1105 cleanup:\r
1106         if (avo_p) {\r
1107                 avo_p->mark = E_MARK_INVALID;\r
1108                 cl_free( avo_p);\r
1109         }\r
1110         CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %s\n", ib_get_err_str(status)));\r
1111         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
1112         return status;\r
1113 }\r
1114 \r
1115 /*\r
1116 *       Queue Pair Management Verbs\r
1117 */\r
1118 \r
1119 ib_api_status_t\r
1120 mlnx_create_qp (\r
1121         IN              const   ib_pd_handle_t                          h_pd,\r
1122         IN              const   void                                            *qp_context,\r
1123         IN              const   ib_qp_create_t                          *p_create_attr,\r
1124                 OUT                     ib_qp_attr_t                            *p_qp_attr,\r
1125                 OUT                     ib_qp_handle_t                          *ph_qp,\r
1126         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
1127 {\r
1128         ib_api_status_t                 status;\r
1129         ib_qp_handle_t                  h_qp;\r
1130 \r
1131         u_int32_t                               hca_idx = PD_HCA_FROM_HNDL(h_pd);\r
1132         u_int32_t                               pd_idx  = PD_NUM_FROM_HNDL(h_pd);\r
1133         u_int32_t                               qp_num;\r
1134         u_int32_t                               qp_idx;\r
1135         u_int32_t                               send_cq_num;\r
1136         u_int32_t                               send_cq_idx;\r
1137         u_int32_t                               recv_cq_num;\r
1138         u_int32_t                               recv_cq_idx;\r
1139         mlnx_hobul_t                    *hobul_p;\r
1140         HH_hca_dev_t                    *hca_ul_info;\r
1141         HH_qp_init_attr_t               hh_qp_init_attr;\r
1142         HHUL_qp_init_attr_t             ul_qp_init_attr;\r
1143         HHUL_qp_hndl_t                  hhul_qp_hndl = NULL;\r
1144         VAPI_qp_cap_t                   hh_qp_cap;\r
1145         void                                    *qp_ul_resources_p = NULL;\r
1146         VAPI_sg_lst_entry_t             *send_sge_p = NULL;\r
1147         VAPI_sg_lst_entry_t             *recv_sge_p = NULL;\r
1148         u_int32_t                               num_sge;\r
1149 \r
1150         CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
1151 \r
1152         VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_CA_HANDLE, cleanup);\r
1153         hobul_p = mlnx_hobul_array[hca_idx];\r
1154         if (NULL == hobul_p) {\r
1155                 status = IB_INVALID_PD_HANDLE;\r
1156                 goto cleanup;\r
1157         }\r
1158         VALIDATE_INDEX(pd_idx, hobul_p->max_pd, IB_INVALID_PD_HANDLE, cleanup);\r
1159         if (E_MARK_PD != hobul_p->pd_info_tbl[pd_idx].mark) {\r
1160                 status =  IB_INVALID_PD_HANDLE;\r
1161                 goto cleanup;\r
1162         }\r
1163 \r
1164         hca_ul_info = (HH_hca_dev_t *)hobul_p->hh_hndl;\r
1165         if (NULL == hca_ul_info) {\r
1166                 status =  IB_INVALID_PD_HANDLE;\r
1167                 goto cleanup;\r
1168         }\r
1169 \r
1170         // The create attributes must be provided\r
1171         if (!p_create_attr) {\r
1172                 status =  IB_INVALID_PARAMETER;\r
1173                 goto cleanup;\r
1174         }\r
1175 \r
1176         // convert input parameters\r
1177         cl_memclr(&ul_qp_init_attr, sizeof(ul_qp_init_attr));\r
1178         mlnx_conv_qp_create_attr(p_create_attr, &ul_qp_init_attr, NULL);\r
1179         send_cq_num = CQ_NUM_FROM_HNDL(p_create_attr->h_sq_cq);\r
1180         recv_cq_num = CQ_NUM_FROM_HNDL(p_create_attr->h_rq_cq);\r
1181         send_cq_idx = send_cq_num & hobul_p->cq_idx_mask;\r
1182         recv_cq_idx = recv_cq_num & hobul_p->cq_idx_mask;\r
1183         VALIDATE_INDEX(send_cq_idx, hobul_p->max_cq, IB_INVALID_CQ_HANDLE, cleanup);\r
1184         if ( E_MARK_CQ != hobul_p->cq_info_tbl[send_cq_idx].mark) {\r
1185                 status =  IB_INVALID_CQ_HANDLE;\r
1186                 goto cleanup;\r
1187         }\r
1188         VALIDATE_INDEX(recv_cq_idx, hobul_p->max_cq, IB_INVALID_CQ_HANDLE, cleanup);\r
1189         if ( E_MARK_CQ != hobul_p->cq_info_tbl[recv_cq_idx].mark) {\r
1190                 status =  IB_INVALID_CQ_HANDLE;\r
1191                 goto cleanup;\r
1192         }\r
1193 \r
1194         ul_qp_init_attr.pd    = hobul_p->pd_info_tbl[pd_idx].hhul_pd_hndl;\r
1195         ul_qp_init_attr.sq_cq = hobul_p->cq_info_tbl[send_cq_idx].hhul_cq_hndl;\r
1196         ul_qp_init_attr.rq_cq = hobul_p->cq_info_tbl[recv_cq_idx].hhul_cq_hndl;\r
1197 \r
1198         if( p_umv_buf && p_umv_buf->command )\r
1199         {\r
1200                 // For user mode calls - obtain and verify the vendor information\r
1201                 if ((p_umv_buf->input_size - sizeof (u_int32_t)) != \r
1202                         hca_ul_info->qp_ul_resources_sz ||\r
1203                         NULL == p_umv_buf->p_inout_buf) {\r
1204                                 status = IB_INVALID_PARAMETER;\r
1205                                 goto cleanup;\r
1206                         }\r
1207                         qp_ul_resources_p = (void *)p_umv_buf->p_inout_buf;\r
1208 \r
1209         } else {\r
1210                 // for kernel mode calls - allocate app resources. Use prep->call->done sequence\r
1211                 qp_ul_resources_p = cl_zalloc( hca_ul_info->qp_ul_resources_sz);\r
1212                 if (!qp_ul_resources_p) {\r
1213                         status = IB_INSUFFICIENT_MEMORY;\r
1214                         goto cleanup;\r
1215                 }\r
1216 \r
1217                 if (HH_OK != THHUL_qpm_create_qp_prep(hobul_p->hhul_hndl, &ul_qp_init_attr, &hhul_qp_hndl, &hh_qp_cap, qp_ul_resources_p)) {\r
1218                         status = IB_ERROR;\r
1219                         goto cleanup;\r
1220                 }\r
1221                 // TBD: if not same report error to IBAL\r
1222                 ul_qp_init_attr.qp_cap = hh_qp_cap;  // struct assign\r
1223         }\r
1224 \r
1225         // Convert HHUL to HH structure (for HH create_qp)\r
1226         hh_qp_init_attr.pd = pd_idx;\r
1227         hh_qp_init_attr.rdd = 0; // TBD: RDD\r
1228         if( ul_qp_init_attr.srq != HHUL_INVAL_SRQ_HNDL )\r
1229         {\r
1230                 // TBD: HH handle from HHUL handle.\r
1231                 CL_ASSERT( ul_qp_init_attr.srq == HHUL_INVAL_SRQ_HNDL );\r
1232         }\r
1233         else\r
1234         {\r
1235                 hh_qp_init_attr.srq = HH_INVAL_SRQ_HNDL;\r
1236         }\r
1237         hh_qp_init_attr.sq_cq = send_cq_num;\r
1238         hh_qp_init_attr.rq_cq = recv_cq_num;\r
1239         hh_qp_init_attr.sq_sig_type = ul_qp_init_attr.sq_sig_type;\r
1240         hh_qp_init_attr.rq_sig_type = ul_qp_init_attr.rq_sig_type;\r
1241         hh_qp_init_attr.ts_type = ul_qp_init_attr.ts_type;\r
1242         hh_qp_init_attr.qp_cap  = ul_qp_init_attr.qp_cap; // struct assign\r
1243 \r
1244         // Allocate the QP (cmdif)\r
1245         if (HH_OK != THH_hob_create_qp(hobul_p->hh_hndl, &hh_qp_init_attr, qp_ul_resources_p, &qp_num)) {\r
1246                 status = IB_INSUFFICIENT_RESOURCES;\r
1247                 goto cleanup_qp;\r
1248         }\r
1249 \r
1250         if( !(p_umv_buf && p_umv_buf->command) )\r
1251         {\r
1252                 // Manage user level resources\r
1253                 if (HH_OK != THHUL_qpm_create_qp_done(hobul_p->hhul_hndl, hhul_qp_hndl, qp_num, qp_ul_resources_p)) {\r
1254                         THH_hob_destroy_qp(hobul_p->hh_hndl, qp_num);\r
1255                         status = IB_ERROR;\r
1256                         goto cleanup_qp;\r
1257                 }\r
1258 \r
1259                 // Create SQ and RQ iov\r
1260                 num_sge = ul_qp_init_attr.qp_cap.max_sg_size_sq;\r
1261                 send_sge_p = cl_zalloc( num_sge * sizeof(VAPI_sg_lst_entry_t));\r
1262                 if (!send_sge_p) {\r
1263                         status = IB_INSUFFICIENT_MEMORY;\r
1264                         goto cleanup_qp;\r
1265                 }\r
1266 \r
1267                 num_sge = ul_qp_init_attr.qp_cap.max_sg_size_rq;\r
1268                 recv_sge_p = cl_zalloc( num_sge * sizeof(VAPI_sg_lst_entry_t));\r
1269                 if (!recv_sge_p) {\r
1270                         status = IB_INSUFFICIENT_MEMORY;\r
1271                         goto cleanup_qp;\r
1272                 }\r
1273         }\r
1274 \r
1275         // Save data refs for future use\r
1276         qp_idx = qp_num & hobul_p->qp_idx_mask;\r
1277         VALIDATE_INDEX(qp_idx, hobul_p->max_qp, IB_ERROR, cleanup_qp);\r
1278         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("hobul_p 0x%p mask 0x%x qp_idx 0x%x qp_num 0x%x\n",\r
1279                 hobul_p, hobul_p->qp_idx_mask, qp_idx, qp_num));\r
1280 \r
1281         h_qp = (ib_qp_handle_t)QP_HNDL_FROM_QP(qp_idx);\r
1282         cl_mutex_acquire(&h_qp->mutex);\r
1283         h_qp->pd_num                    = pd_idx;\r
1284         h_qp->hhul_qp_hndl              = hhul_qp_hndl;\r
1285         h_qp->qp_type                   = p_create_attr->qp_type;\r
1286         h_qp->sq_signaled               = p_create_attr->sq_signaled;\r
1287         h_qp->qp_context                = qp_context;\r
1288         h_qp->qp_ul_resources_p = qp_ul_resources_p;\r
1289         h_qp->sq_size                   = ul_qp_init_attr.qp_cap.max_sg_size_sq;\r
1290         h_qp->rq_size                   = ul_qp_init_attr.qp_cap.max_sg_size_rq;\r
1291         h_qp->send_sge_p                = send_sge_p;\r
1292         h_qp->recv_sge_p                = recv_sge_p;\r
1293         h_qp->qp_num                    = qp_num;\r
1294         h_qp->h_sq_cq                   = &hobul_p->cq_info_tbl[send_cq_idx];\r
1295         h_qp->h_rq_cq                   = &hobul_p->cq_info_tbl[recv_cq_idx];\r
1296         h_qp->kernel_mode               = !(p_umv_buf && p_umv_buf->command);\r
1297         h_qp->mark                              = E_MARK_QP;\r
1298         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("qp num 0x%x idx 0x%x cq_s 0x%x cq_r 0x%x\n",\r
1299                 qp_num, qp_idx, send_cq_idx, recv_cq_idx));\r
1300         cl_mutex_release(&h_qp->mutex);\r
1301         // Update PD object count\r
1302         cl_atomic_inc(&hobul_p->pd_info_tbl[pd_idx].count);\r
1303         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("pd %d count %d\n", pd_idx, hobul_p->pd_info_tbl[pd_idx].count));\r
1304 \r
1305         // Query QP to obtain requested attributes\r
1306         if (p_qp_attr) {\r
1307                 if (IB_SUCCESS != (status = mlnx_query_qp (h_qp, p_qp_attr, p_umv_buf)))\r
1308                 {\r
1309                         if( !(p_umv_buf && p_umv_buf->command) )\r
1310                                 goto cleanup_qp;\r
1311                         else\r
1312                                 goto cleanup;\r
1313                 }\r
1314         }\r
1315 \r
1316         if (ph_qp) *ph_qp = h_qp;\r
1317         if( p_umv_buf && p_umv_buf->command )\r
1318         {\r
1319                 p_umv_buf->output_size = p_umv_buf->input_size;\r
1320                 p_umv_buf->status = IB_SUCCESS;\r
1321                 /* \r
1322                 * Copy the qp_idx back to user\r
1323                 */\r
1324                 cl_memcpy (((uint8_t* __ptr64)p_umv_buf->p_inout_buf + hca_ul_info->qp_ul_resources_sz),\r
1325                         &qp_num, sizeof (qp_num));\r
1326         }\r
1327         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
1328         return IB_SUCCESS;\r
1329 \r
1330 cleanup_qp:\r
1331         if (send_sge_p) cl_free( send_sge_p);\r
1332         if (recv_sge_p) cl_free( recv_sge_p);\r
1333         if( !(p_umv_buf && p_umv_buf->command) )\r
1334                 THHUL_qpm_destroy_qp_done(hobul_p->hhul_hndl, hhul_qp_hndl);\r
1335 \r
1336 cleanup:\r
1337         if( !(p_umv_buf && p_umv_buf->command) && qp_ul_resources_p)\r
1338                 cl_free( qp_ul_resources_p);\r
1339         if( p_umv_buf && p_umv_buf->command )\r
1340         {\r
1341                 p_umv_buf->output_size = 0;\r
1342                 p_umv_buf->status = status;\r
1343         }\r
1344         CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %s\n", ib_get_err_str(status)));\r
1345         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
1346         return status;\r
1347 }\r
1348 \r
1349 ib_api_status_t\r
1350 mlnx_create_spl_qp (\r
1351         IN              const   ib_pd_handle_t                          h_pd,\r
1352         IN              const   uint8_t                                         port_num,\r
1353         IN              const   void                                            *qp_context,\r
1354         IN              const   ib_qp_create_t                          *p_create_attr,\r
1355                 OUT                     ib_qp_attr_t                            *p_qp_attr,\r
1356                 OUT                     ib_qp_handle_t                          *ph_qp )\r
1357 {\r
1358         ib_api_status_t                 status;\r
1359         ib_qp_handle_t                  h_qp;\r
1360         ci_umv_buf_t                    *p_umv_buf = NULL;\r
1361 \r
1362         u_int32_t                               hca_idx = PD_HCA_FROM_HNDL(h_pd);\r
1363         u_int32_t                               pd_idx  = PD_NUM_FROM_HNDL(h_pd);\r
1364         u_int32_t                               qp_num;\r
1365         u_int32_t                               qp_idx;\r
1366         u_int32_t                               send_cq_num;\r
1367         u_int32_t                               send_cq_idx;\r
1368         u_int32_t                               recv_cq_num;\r
1369         u_int32_t                               recv_cq_idx;\r
1370         mlnx_hobul_t                    *hobul_p;\r
1371         HH_hca_dev_t                    *hca_ul_info;\r
1372         HH_qp_init_attr_t               hh_qp_init_attr;\r
1373         HHUL_qp_init_attr_t             ul_qp_init_attr;\r
1374         HHUL_qp_hndl_t                  hhul_qp_hndl = NULL;\r
1375         VAPI_special_qp_t               vapi_qp_type;\r
1376         VAPI_qp_cap_t                   hh_qp_cap;\r
1377         void                                    *qp_ul_resources_p = NULL;\r
1378         VAPI_sg_lst_entry_t             *send_sge_p = NULL;\r
1379         VAPI_sg_lst_entry_t             *recv_sge_p = NULL;\r
1380         u_int32_t                               num_sge;\r
1381 \r
1382         CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
1383 \r
1384         VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_CA_HANDLE, cleanup);\r
1385         hobul_p = mlnx_hobul_array[hca_idx];\r
1386         if (NULL == hobul_p) {\r
1387                 status = IB_INVALID_PD_HANDLE;\r
1388                 goto cleanup;\r
1389         }\r
1390         VALIDATE_INDEX(pd_idx, hobul_p->max_pd, IB_INVALID_PD_HANDLE, cleanup);\r
1391         if (E_MARK_PD != hobul_p->pd_info_tbl[pd_idx].mark) {\r
1392                 status =  IB_INVALID_PD_HANDLE;\r
1393                 goto cleanup;\r
1394         }\r
1395 \r
1396         hca_ul_info = (HH_hca_dev_t *)hobul_p->hh_hndl;\r
1397         if (NULL == hca_ul_info) {\r
1398                 status =  IB_INVALID_PD_HANDLE;\r
1399                 goto cleanup;\r
1400         }\r
1401 \r
1402         // The create attributes must be provided\r
1403         if (!p_create_attr) {\r
1404                 status =  IB_INVALID_PARAMETER;\r
1405                 goto cleanup;\r
1406         }\r
1407 \r
1408         // convert input parameters\r
1409         cl_memclr(&ul_qp_init_attr, sizeof(ul_qp_init_attr));\r
1410         mlnx_conv_qp_create_attr(p_create_attr, &ul_qp_init_attr, &vapi_qp_type);\r
1411         send_cq_num = CQ_NUM_FROM_HNDL(p_create_attr->h_sq_cq);\r
1412         recv_cq_num = CQ_NUM_FROM_HNDL(p_create_attr->h_rq_cq);\r
1413         send_cq_idx = send_cq_num & hobul_p->cq_idx_mask;\r
1414         recv_cq_idx = recv_cq_num & hobul_p->cq_idx_mask;\r
1415         VALIDATE_INDEX(send_cq_idx, hobul_p->max_cq, IB_INVALID_CQ_HANDLE, cleanup);\r
1416         if ( E_MARK_CQ != hobul_p->cq_info_tbl[send_cq_idx].mark) {\r
1417                 status =  IB_INVALID_CQ_HANDLE;\r
1418                 goto cleanup;\r
1419         }\r
1420         VALIDATE_INDEX(recv_cq_idx, hobul_p->max_cq, IB_INVALID_CQ_HANDLE, cleanup);\r
1421         if ( E_MARK_CQ != hobul_p->cq_info_tbl[recv_cq_idx].mark) {\r
1422                 status =  IB_INVALID_CQ_HANDLE;\r
1423                 goto cleanup;\r
1424         }\r
1425 \r
1426         ul_qp_init_attr.pd    = hobul_p->pd_info_tbl[pd_idx].hhul_pd_hndl;\r
1427         ul_qp_init_attr.sq_cq = hobul_p->cq_info_tbl[send_cq_idx].hhul_cq_hndl;\r
1428         ul_qp_init_attr.rq_cq = hobul_p->cq_info_tbl[recv_cq_idx].hhul_cq_hndl;\r
1429 \r
1430         if( p_umv_buf && p_umv_buf->command )\r
1431         {\r
1432                 // For user mode calls - obtain and verify the vendor information\r
1433                 if (p_umv_buf->input_size != hca_ul_info->qp_ul_resources_sz ||\r
1434                         NULL == p_umv_buf->p_inout_buf) {\r
1435                                 status = IB_INVALID_PARAMETER;\r
1436                                 goto cleanup;\r
1437                         }\r
1438                         qp_ul_resources_p = (void *)p_umv_buf->p_inout_buf;\r
1439 \r
1440         } else {\r
1441                 // For kernel mode calls - allocate app resources. Use prep->call->done sequence\r
1442                 qp_ul_resources_p = cl_zalloc( hca_ul_info->qp_ul_resources_sz);\r
1443                 if (!qp_ul_resources_p) {\r
1444                         status = IB_INSUFFICIENT_MEMORY;\r
1445                         goto cleanup;\r
1446                 }\r
1447 \r
1448                 if (HH_OK != THHUL_qpm_special_qp_prep(hobul_p->hhul_hndl,\r
1449                         vapi_qp_type,\r
1450                         port_num, \r
1451                         &ul_qp_init_attr,\r
1452                         &hhul_qp_hndl,\r
1453                         &hh_qp_cap,\r
1454                         qp_ul_resources_p)) {\r
1455                                 status = IB_ERROR;\r
1456                                 goto cleanup;\r
1457                         }\r
1458                         // TBD: if not same report error to IBAL\r
1459                         ul_qp_init_attr.qp_cap = hh_qp_cap;  // struct assign\r
1460         }\r
1461 \r
1462         // Convert HHUL to HH structure (for HH create_qp)\r
1463         hh_qp_init_attr.pd = pd_idx;\r
1464         hh_qp_init_attr.rdd = 0; // TBD: RDD\r
1465         if( ul_qp_init_attr.srq != HHUL_INVAL_SRQ_HNDL )\r
1466         {\r
1467                 // TBD: HH handle from HHUL handle.\r
1468                 CL_ASSERT( ul_qp_init_attr.srq == HHUL_INVAL_SRQ_HNDL );\r
1469         }\r
1470         else\r
1471         {\r
1472                 hh_qp_init_attr.srq = HH_INVAL_SRQ_HNDL;\r
1473         }\r
1474         hh_qp_init_attr.sq_cq = send_cq_num;\r
1475         hh_qp_init_attr.rq_cq = recv_cq_num;\r
1476         hh_qp_init_attr.sq_sig_type = ul_qp_init_attr.sq_sig_type;\r
1477         hh_qp_init_attr.rq_sig_type = ul_qp_init_attr.rq_sig_type;\r
1478         hh_qp_init_attr.ts_type = VAPI_TS_UD;\r
1479         hh_qp_init_attr.qp_cap  = ul_qp_init_attr.qp_cap; // struct assign\r
1480 \r
1481         // Allocate the QP (cmdif)\r
1482         if (HH_OK != THH_hob_get_special_qp( hobul_p->hh_hndl,\r
1483                 vapi_qp_type,\r
1484                 port_num,\r
1485                 &hh_qp_init_attr,\r
1486                 qp_ul_resources_p,\r
1487                 &qp_num))\r
1488         {\r
1489                 status = IB_ERROR;\r
1490                 goto cleanup_qp;\r
1491         }\r
1492 \r
1493         if( !(p_umv_buf && p_umv_buf->command) )\r
1494         {\r
1495                 // Manage user level resources\r
1496                 if (HH_OK != THHUL_qpm_create_qp_done(hobul_p->hhul_hndl, hhul_qp_hndl, qp_num, qp_ul_resources_p)) {\r
1497                         THH_hob_destroy_qp(hobul_p->hh_hndl, qp_num);\r
1498                         status = IB_ERROR;\r
1499                         goto cleanup_qp;\r
1500                 }\r
1501 \r
1502                 // Create SQ and RQ iov\r
1503                 num_sge = ul_qp_init_attr.qp_cap.max_sg_size_sq;\r
1504                 send_sge_p = cl_zalloc( num_sge * sizeof(VAPI_sg_lst_entry_t));\r
1505                 if (!send_sge_p) {\r
1506                         status = IB_INSUFFICIENT_MEMORY;\r
1507                         goto cleanup_qp;\r
1508                 }\r
1509 \r
1510                 num_sge = ul_qp_init_attr.qp_cap.max_sg_size_rq;\r
1511                 recv_sge_p = cl_zalloc( num_sge * sizeof(VAPI_sg_lst_entry_t));\r
1512                 if (!recv_sge_p) {\r
1513                         status = IB_INSUFFICIENT_MEMORY;\r
1514                         goto cleanup_qp;\r
1515                 }\r
1516         }\r
1517 \r
1518         // Save data refs for future use\r
1519         qp_idx = qp_num & hobul_p->qp_idx_mask;\r
1520         VALIDATE_INDEX(qp_idx, hobul_p->max_qp, IB_ERROR, cleanup_qp);\r
1521 \r
1522         h_qp = (ib_qp_handle_t)QP_HNDL_FROM_QP(qp_idx);\r
1523         cl_mutex_acquire(&h_qp->mutex);\r
1524         h_qp->pd_num                    = pd_idx;\r
1525         h_qp->hhul_qp_hndl              = hhul_qp_hndl;\r
1526         h_qp->qp_type                   = p_create_attr->qp_type;\r
1527         h_qp->sq_signaled               = p_create_attr->sq_signaled;\r
1528         h_qp->qp_context                = qp_context;\r
1529         h_qp->qp_ul_resources_p = qp_ul_resources_p;\r
1530         h_qp->sq_size                   = ul_qp_init_attr.qp_cap.max_sg_size_sq;\r
1531         h_qp->rq_size                   = ul_qp_init_attr.qp_cap.max_sg_size_rq;\r
1532         h_qp->send_sge_p                = send_sge_p;\r
1533         h_qp->recv_sge_p                = recv_sge_p;\r
1534         h_qp->qp_num                    = qp_num;\r
1535         h_qp->h_sq_cq                   = &hobul_p->cq_info_tbl[send_cq_idx];\r
1536         h_qp->h_rq_cq                   = &hobul_p->cq_info_tbl[recv_cq_idx];\r
1537         h_qp->kernel_mode               = !(p_umv_buf && p_umv_buf->command);\r
1538         h_qp->mark                              = E_MARK_QP;\r
1539         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("qp num 0x%x idx 0x%x cq_s 0x%x cq_r 0x%x\n",\r
1540                 qp_num, qp_idx, send_cq_idx, recv_cq_idx));\r
1541         cl_mutex_release(&h_qp->mutex);\r
1542 \r
1543         /* Mark the CQ's associated with this special QP as being high priority. */\r
1544         cl_atomic_inc( &h_qp->h_sq_cq->spl_qp_cnt );\r
1545         KeSetImportanceDpc( &h_qp->h_sq_cq->dpc, HighImportance );\r
1546         cl_atomic_inc( &h_qp->h_rq_cq->spl_qp_cnt );\r
1547         KeSetImportanceDpc( &h_qp->h_rq_cq->dpc, HighImportance );\r
1548 \r
1549         // Update PD object count\r
1550         cl_atomic_inc(&hobul_p->pd_info_tbl[pd_idx].count);\r
1551         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("pd %d count %d\n", pd_idx, hobul_p->pd_info_tbl[pd_idx].count));\r
1552 \r
1553         // Query QP to obtain requested attributes\r
1554         if (p_qp_attr) {\r
1555                 if (IB_SUCCESS != (status = mlnx_query_qp (h_qp, p_qp_attr, p_umv_buf))) {\r
1556                         goto cleanup;\r
1557                 }\r
1558         }\r
1559 \r
1560         if (ph_qp) *ph_qp = h_qp;\r
1561         if( p_umv_buf && p_umv_buf->command )\r
1562         {\r
1563                 p_umv_buf->output_size = p_umv_buf->input_size;\r
1564                 p_umv_buf->status = IB_SUCCESS;\r
1565         }\r
1566         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
1567         return IB_SUCCESS;\r
1568 \r
1569 cleanup_qp:\r
1570         if (send_sge_p) cl_free( send_sge_p);\r
1571         if (recv_sge_p) cl_free( recv_sge_p);\r
1572         if( !(p_umv_buf && p_umv_buf->command) )\r
1573                 THHUL_qpm_destroy_qp_done(hobul_p->hhul_hndl, hhul_qp_hndl);\r
1574 \r
1575 cleanup:\r
1576         if( !(p_umv_buf && p_umv_buf->command) && qp_ul_resources_p )\r
1577                 cl_free( qp_ul_resources_p);\r
1578         if( p_umv_buf && p_umv_buf->command )\r
1579         {\r
1580                 p_umv_buf->output_size = 0;\r
1581                 p_umv_buf->status = status;\r
1582         }\r
1583         CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %s\n", ib_get_err_str(status)));\r
1584         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
1585         return status;\r
1586 }\r
1587 \r
1588 ib_api_status_t\r
1589 mlnx_modify_qp (\r
1590         IN              const   ib_qp_handle_t                          h_qp,\r
1591         IN              const   ib_qp_mod_t                                     *p_modify_attr,\r
1592                 OUT                     ib_qp_attr_t                            *p_qp_attr OPTIONAL,\r
1593         IN      OUT                     ci_umv_buf_t                            *p_umv_buf OPTIONAL )\r
1594 {\r
1595         ib_api_status_t         status;\r
1596 \r
1597         u_int32_t                       hca_idx = QP_HCA_FROM_HNDL(h_qp);\r
1598         u_int32_t                       qp_num  = QP_NUM_FROM_HNDL(h_qp);\r
1599         u_int32_t                       qp_idx  = 0;\r
1600         mlnx_hobul_t            *hobul_p;\r
1601         HHUL_qp_hndl_t          hhul_qp_hndl;\r
1602         VAPI_qp_attr_mask_t     hh_qp_attr_mask;\r
1603         VAPI_qp_attr_t          hh_qp_attr;\r
1604         VAPI_qp_state_t         hh_qp_state;\r
1605 \r
1606         CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
1607 \r
1608         VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_CA_HANDLE, cleanup);\r
1609         hobul_p = mlnx_hobul_array[hca_idx];\r
1610         if (NULL == hobul_p) {\r
1611                 status = IB_INVALID_QP_HANDLE;\r
1612                 goto cleanup;\r
1613         }\r
1614 \r
1615         qp_idx = qp_num & hobul_p->qp_idx_mask;\r
1616         VALIDATE_INDEX(qp_idx, hobul_p->max_qp, IB_INVALID_QP_HANDLE, cleanup);\r
1617         if ( E_MARK_QP != hobul_p->qp_info_tbl[qp_idx].mark) {\r
1618                 status =  IB_INVALID_QP_HANDLE;\r
1619                 goto cleanup;\r
1620         }\r
1621 \r
1622         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, \r
1623                 ("Before acquire mutex to modify qp_idx 0x%x\n", \r
1624                 qp_idx));\r
1625 \r
1626         cl_mutex_acquire(&hobul_p->qp_info_tbl[qp_idx].mutex);\r
1627 \r
1628         hhul_qp_hndl = hobul_p->qp_info_tbl[qp_idx].hhul_qp_hndl;\r
1629 \r
1630         // Obtain curernt state of QP\r
1631         if (HH_OK != THH_hob_query_qp(hobul_p->hh_hndl, hobul_p->qp_info_tbl[qp_idx].qp_num, &hh_qp_attr))\r
1632         {\r
1633                 status = IB_ERROR;\r
1634                 goto cleanup_locked;\r
1635         }\r
1636         hh_qp_state = hh_qp_attr.qp_state; // The current (pre-modify) state\r
1637 \r
1638         // Convert the input parameters. Use query result as default (no cl_memset())\r
1639         // cl_memclr(&hh_qp_attr, sizeof(hh_qp_attr));\r
1640         status = mlnx_conv_qp_modify_attr(hobul_p->hh_hndl,\r
1641                 hobul_p->qp_info_tbl[qp_idx].qp_type,\r
1642                 p_modify_attr, &hh_qp_attr, &hh_qp_attr_mask);\r
1643         if( status != IB_SUCCESS )\r
1644                 goto cleanup_locked;\r
1645 \r
1646         if (HH_OK != THH_hob_modify_qp(hobul_p->hh_hndl,\r
1647                 hobul_p->qp_info_tbl[qp_idx].qp_num,\r
1648                 hh_qp_state, &hh_qp_attr, &hh_qp_attr_mask))\r
1649         {\r
1650                 status = IB_ERROR;\r
1651                 goto cleanup_locked;\r
1652         }\r
1653 \r
1654         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, \r
1655                 ("After hob_modify_qp qp_idx 0x%x k_mod %d\n", \r
1656                 qp_idx, hobul_p->qp_info_tbl[qp_idx].kernel_mode));\r
1657 \r
1658         // Notify HHUL of the new (post-modify) state. This is done for both k-mode calls only\r
1659         if (hobul_p->qp_info_tbl[qp_idx].kernel_mode) {\r
1660                 if (HH_OK != THHUL_qpm_modify_qp_done(hobul_p->hhul_hndl, hhul_qp_hndl, hh_qp_attr.qp_state))\r
1661                 {\r
1662                         status = IB_ERROR;\r
1663                         goto cleanup_locked;\r
1664                 } \r
1665         } \r
1666         cl_mutex_release(&hobul_p->qp_info_tbl[qp_idx].mutex);\r
1667 \r
1668         if ((p_qp_attr) && !(p_umv_buf && p_umv_buf->command)) {\r
1669                 if (IB_SUCCESS != (status = mlnx_query_qp (h_qp, p_qp_attr, p_umv_buf))) {\r
1670                         goto cleanup;\r
1671                 }\r
1672         }\r
1673 \r
1674         if ( p_umv_buf && p_umv_buf->command && (! hobul_p->qp_info_tbl[qp_idx].kernel_mode)) {\r
1675                 CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, \r
1676                         ("mod_qp qp_idx %d umv_buf %p inout_buf %p\n", \r
1677                         qp_idx, p_umv_buf, p_umv_buf->p_inout_buf));\r
1678                 if (p_umv_buf->p_inout_buf) {\r
1679                         p_umv_buf->output_size = sizeof (VAPI_qp_state_t);\r
1680                         cl_memcpy (p_umv_buf->p_inout_buf, &(hh_qp_attr.qp_state), \r
1681                                 (size_t)p_umv_buf->output_size);\r
1682                         p_umv_buf->status = IB_SUCCESS;\r
1683                 }\r
1684         }\r
1685         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
1686         return IB_SUCCESS;\r
1687 \r
1688 \r
1689 cleanup_locked:\r
1690         cl_mutex_release(&hobul_p->qp_info_tbl[qp_idx].mutex);\r
1691 \r
1692 cleanup:\r
1693         if( p_umv_buf && p_umv_buf->command )\r
1694         {\r
1695                 p_umv_buf->output_size = 0;\r
1696                 p_umv_buf->status = status;\r
1697         }\r
1698         CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %s\n", ib_get_err_str(status)));\r
1699         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
1700         return status;\r
1701 }\r
1702 \r
1703 ib_api_status_t\r
1704 mlnx_query_qp (\r
1705         IN              const   ib_qp_handle_t                          h_qp,\r
1706                 OUT                     ib_qp_attr_t                            *p_qp_attr,\r
1707         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
1708 {\r
1709         ib_api_status_t         status;\r
1710 \r
1711         u_int32_t                       hca_idx = QP_HCA_FROM_HNDL(h_qp);\r
1712         u_int32_t                       qp_num  = QP_NUM_FROM_HNDL(h_qp);\r
1713         u_int32_t                       qp_idx  = 0;\r
1714         mlnx_hobul_t            *hobul_p;\r
1715         VAPI_qp_attr_t          hh_qp_attr;\r
1716 \r
1717         CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
1718 \r
1719         VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_CA_HANDLE, cleanup);\r
1720         hobul_p = mlnx_hobul_array[hca_idx];\r
1721         if (NULL == hobul_p) {\r
1722                 status = IB_INVALID_QP_HANDLE;\r
1723                 goto cleanup;\r
1724         }\r
1725 \r
1726         qp_idx = qp_num & hobul_p->qp_idx_mask;\r
1727         VALIDATE_INDEX(qp_idx, hobul_p->max_qp, IB_INVALID_QP_HANDLE, cleanup);\r
1728         if ( E_MARK_QP != hobul_p->qp_info_tbl[qp_idx].mark) {\r
1729                 status =  IB_INVALID_QP_HANDLE;\r
1730                 goto cleanup;\r
1731         }\r
1732 \r
1733         cl_mutex_acquire(&h_qp->mutex);\r
1734 \r
1735         if (HH_OK != THH_hob_query_qp(hobul_p->hh_hndl, h_qp->qp_num, &hh_qp_attr)) {\r
1736                 status = IB_ERROR;\r
1737                 goto cleanup_locked;\r
1738         }\r
1739 \r
1740         // Convert query result into IBAL structure (no cl_memset())\r
1741         mlnx_conv_vapi_qp_attr(hobul_p->hh_hndl, &hh_qp_attr, p_qp_attr);\r
1742         p_qp_attr->qp_type = h_qp->qp_type;\r
1743         p_qp_attr->h_pd    = (ib_pd_handle_t)PD_HNDL_FROM_PD(h_qp->pd_num);\r
1744         p_qp_attr->h_sq_cq = h_qp->h_sq_cq;\r
1745         p_qp_attr->h_rq_cq = h_qp->h_rq_cq;\r
1746         p_qp_attr->sq_signaled = h_qp->sq_signaled;\r
1747 \r
1748         cl_mutex_release(&h_qp->mutex);\r
1749 \r
1750         if( p_umv_buf && p_umv_buf->command )\r
1751         {\r
1752                 p_umv_buf->output_size = 0;\r
1753                 p_umv_buf->status = IB_SUCCESS;\r
1754         }\r
1755         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
1756         return IB_SUCCESS;\r
1757 \r
1758 cleanup_locked:\r
1759         cl_mutex_release(&hobul_p->qp_info_tbl[qp_idx].mutex);\r
1760 cleanup:\r
1761         if( p_umv_buf && p_umv_buf->command )\r
1762         {\r
1763                 p_umv_buf->output_size = 0;\r
1764                 p_umv_buf->status = status;\r
1765         }\r
1766         CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %s\n", ib_get_err_str(status)));\r
1767         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
1768         return status;\r
1769 }\r
1770 \r
1771 ib_api_status_t\r
1772 mlnx_destroy_qp (\r
1773         IN              const   ib_qp_handle_t                          h_qp,\r
1774         IN              const   uint64_t                                        timewait )\r
1775 {\r
1776         ib_api_status_t         status;\r
1777 \r
1778         u_int32_t                       hca_idx = QP_HCA_FROM_HNDL(h_qp);\r
1779         u_int32_t                       qp_num  = QP_NUM_FROM_HNDL(h_qp);\r
1780         u_int32_t                       pd_idx  = 0;\r
1781         u_int32_t                       qp_idx  = 0;\r
1782         mlnx_hobul_t            *hobul_p;\r
1783         HHUL_qp_hndl_t          hhul_qp_hndl;\r
1784 \r
1785         UNUSED_PARAM( timewait );\r
1786 \r
1787         CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
1788         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("hca %d qp 0x%x\n", hca_idx, qp_num));\r
1789 \r
1790         VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_CA_HANDLE, cleanup);\r
1791         hobul_p = mlnx_hobul_array[hca_idx];\r
1792         if (NULL == hobul_p) {\r
1793                 status = IB_INVALID_QP_HANDLE;\r
1794                 goto cleanup;\r
1795         }\r
1796 \r
1797         qp_idx = qp_num & hobul_p->qp_idx_mask;\r
1798         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("hobul_p 0x%p mask 0x%x qp_idx 0x%x mark %d\n",\r
1799                 hobul_p, hobul_p->qp_idx_mask, qp_idx, hobul_p->qp_info_tbl[qp_idx].mark));\r
1800 \r
1801         VALIDATE_INDEX(qp_idx, hobul_p->max_qp, IB_INVALID_QP_HANDLE, cleanup);\r
1802         if ( E_MARK_QP != hobul_p->qp_info_tbl[qp_idx].mark) {\r
1803                 if (E_MARK_INVALID == hobul_p->qp_info_tbl[qp_idx].mark) {\r
1804                         CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %s\n", ib_get_err_str(IB_INVALID_QP_HANDLE)));\r
1805                         return IB_SUCCESS; // Already freed\r
1806                 }\r
1807                 status = IB_INVALID_QP_HANDLE;\r
1808                 goto cleanup;\r
1809         }\r
1810 \r
1811         cl_mutex_acquire(&hobul_p->qp_info_tbl[qp_idx].mutex);\r
1812 \r
1813         hhul_qp_hndl = hobul_p->qp_info_tbl[qp_idx].hhul_qp_hndl;\r
1814         pd_idx       = hobul_p->qp_info_tbl[qp_idx].pd_num;\r
1815         VALIDATE_INDEX(pd_idx, hobul_p->max_pd, IB_ERROR, cleanup_locked);\r
1816 \r
1817         if (E_MARK_PD != hobul_p->pd_info_tbl[pd_idx].mark) {\r
1818                 CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("file %s line %d\n", __FILE__, __LINE__));\r
1819                 CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("pd_idx 0x%x mark %d\n", pd_idx, hobul_p->pd_info_tbl[pd_idx].mark));\r
1820                 status =  IB_INVALID_PD_HANDLE;\r
1821                 goto cleanup_locked;\r
1822         }\r
1823 \r
1824         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, \r
1825                 ("Before THH_destroy qp_idx 0x%x k_mod %d pd_idx 0x%x\n",\r
1826                 qp_idx, hobul_p->qp_info_tbl[qp_idx].kernel_mode, pd_idx));\r
1827 \r
1828         // PREP: no PREP required for destroy_qp\r
1829         if (HH_OK != THH_hob_destroy_qp(hobul_p->hh_hndl, qp_num)) {\r
1830                 status = IB_ERROR;\r
1831                 goto cleanup_locked;\r
1832         }\r
1833 \r
1834         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, \r
1835                 ("After THH_destroy qp_idx 0x%x k_mod %d pd_idx 0x%x\n",\r
1836                 qp_idx, hobul_p->qp_info_tbl[qp_idx].kernel_mode, pd_idx));\r
1837 \r
1838         if (hobul_p->qp_info_tbl[qp_idx].kernel_mode) {\r
1839                 if (HH_OK != THHUL_qpm_destroy_qp_done(hobul_p->hhul_hndl, hhul_qp_hndl)) {\r
1840                         status = IB_ERROR;\r
1841                         goto cleanup_locked;\r
1842                 }\r
1843                 if (hobul_p->qp_info_tbl[qp_idx].qp_ul_resources_p)\r
1844                         cl_free( hobul_p->qp_info_tbl[qp_idx].qp_ul_resources_p);\r
1845                 if (hobul_p->qp_info_tbl[qp_idx].send_sge_p)\r
1846                         cl_free( hobul_p->qp_info_tbl[qp_idx].send_sge_p);\r
1847                 if (hobul_p->qp_info_tbl[qp_idx].recv_sge_p)\r
1848                         cl_free( hobul_p->qp_info_tbl[qp_idx].recv_sge_p);\r
1849         }\r
1850 \r
1851         if( h_qp->qp_type == IB_QPT_QP0 || h_qp->qp_type == IB_QPT_QP1 )\r
1852         {\r
1853                 if( !cl_atomic_dec( &h_qp->h_sq_cq->spl_qp_cnt ) )\r
1854                         KeSetImportanceDpc( &h_qp->h_sq_cq->dpc, MediumImportance );\r
1855                 if( !cl_atomic_dec( &h_qp->h_rq_cq->spl_qp_cnt ) )\r
1856                         KeSetImportanceDpc( &h_qp->h_rq_cq->dpc, MediumImportance );\r
1857         }\r
1858 \r
1859         hobul_p->qp_info_tbl[qp_idx].mark = E_MARK_INVALID;\r
1860         hobul_p->qp_info_tbl[qp_idx].qp_ul_resources_p = NULL;\r
1861         cl_mutex_release(&hobul_p->qp_info_tbl[qp_idx].mutex);\r
1862 \r
1863         // Update PD object count\r
1864         cl_atomic_dec(&hobul_p->pd_info_tbl[pd_idx].count);\r
1865         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("pd %d count %d\n", pd_idx, hobul_p->pd_info_tbl[pd_idx].count));\r
1866 \r
1867         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
1868         return IB_SUCCESS;\r
1869 \r
1870 cleanup_locked:\r
1871         cl_mutex_release(&hobul_p->qp_info_tbl[qp_idx].mutex);\r
1872 cleanup:\r
1873         CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %s\n", ib_get_err_str(status)));\r
1874         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
1875         return status;\r
1876 }\r
1877 \r
1878 /*\r
1879 * Completion Queue Managment Verbs.\r
1880 */\r
1881 \r
1882 ib_api_status_t\r
1883 mlnx_create_cq (\r
1884         IN              const   ib_ca_handle_t                          h_ca,\r
1885         IN              const   void                                            *cq_context,\r
1886         IN      OUT                     uint32_t                                        *p_size,\r
1887                 OUT                     ib_cq_handle_t                          *ph_cq,\r
1888         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
1889 {\r
1890         ib_api_status_t                 status;\r
1891 \r
1892         mlnx_hob_t                              *hob_p = (mlnx_hob_t *)h_ca;\r
1893         u_int32_t                               cq_idx;\r
1894         u_int32_t                               cq_num;\r
1895         u_int32_t                               cq_size = 0;\r
1896         mlnx_hobul_t                    *hobul_p;\r
1897         HH_hca_dev_t                    *hca_ul_info;\r
1898         HHUL_cq_hndl_t                  hhul_cq_hndl = NULL;\r
1899         void                                    *cq_ul_resources_p = NULL;\r
1900         MOSAL_protection_ctx_t  prot_ctx;\r
1901 \r
1902         CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
1903 \r
1904         hobul_p = mlnx_hobs_get_hobul(hob_p);\r
1905         if (NULL == hobul_p) {\r
1906                 status = IB_INVALID_CA_HANDLE;\r
1907                 goto cleanup;\r
1908         }\r
1909 \r
1910         hca_ul_info = (HH_hca_dev_t *)hobul_p->hh_hndl;\r
1911         if (NULL == hca_ul_info) {\r
1912                 status =  IB_INVALID_PD_HANDLE;\r
1913                 goto cleanup;\r
1914         }\r
1915 \r
1916         // The size must be provided\r
1917         if (!p_size) {\r
1918                 status =  IB_INVALID_PARAMETER;\r
1919                 goto cleanup;\r
1920         }\r
1921         // TBD: verify that the number requested does not exceed to maximum allowed\r
1922 \r
1923         if( p_umv_buf && p_umv_buf->command )\r
1924         {\r
1925                 // For user mode calls - obtain and verify the vendor information\r
1926                 if ((p_umv_buf->input_size - sizeof (u_int32_t))  != \r
1927                         hca_ul_info->cq_ul_resources_sz ||\r
1928                         NULL == p_umv_buf->p_inout_buf) {\r
1929                                 status = IB_INVALID_PARAMETER;\r
1930                                 goto cleanup;\r
1931                         }\r
1932                         cq_ul_resources_p = (void *)p_umv_buf->p_inout_buf;\r
1933 \r
1934                         /* get the current protection context */ \r
1935                         prot_ctx = MOSAL_get_current_prot_ctx();\r
1936         } else {\r
1937                 // for kernel mode calls - allocate app resources. Use prep->call->done sequence\r
1938                 cq_ul_resources_p = cl_zalloc( hca_ul_info->cq_ul_resources_sz);\r
1939                 if (!cq_ul_resources_p) {\r
1940                         status = IB_INSUFFICIENT_MEMORY;\r
1941                         goto cleanup;\r
1942                 }\r
1943                 if (HH_OK != THHUL_cqm_create_cq_prep(hobul_p->hhul_hndl, *p_size, &hhul_cq_hndl, &cq_size, cq_ul_resources_p)) {\r
1944                         status = IB_ERROR;\r
1945                         goto cleanup;\r
1946                 }\r
1947                 /* get the current protection context */ \r
1948                 prot_ctx = MOSAL_get_kernel_prot_ctx();\r
1949         }\r
1950 \r
1951         // Allocate the CQ (cmdif)\r
1952         if (HH_OK != THH_hob_create_cq(hobul_p->hh_hndl, prot_ctx, cq_ul_resources_p, &cq_num)) {\r
1953                 status = IB_INSUFFICIENT_RESOURCES;\r
1954                 goto cleanup_cq;\r
1955         }\r
1956 \r
1957         if( !(p_umv_buf && p_umv_buf->command) )\r
1958         {\r
1959                 // Manage user level resources\r
1960                 if (HH_OK != THHUL_cqm_create_cq_done(hobul_p->hhul_hndl, hhul_cq_hndl, cq_num, cq_ul_resources_p)) {\r
1961                         THH_hob_destroy_cq(hobul_p->hh_hndl, cq_num);\r
1962                         status = IB_ERROR;\r
1963                         goto cleanup_cq;\r
1964                 }\r
1965         }\r
1966 \r
1967         // Save data refs for future use\r
1968         cq_idx = cq_num & hobul_p->cq_idx_mask;\r
1969         VALIDATE_INDEX(cq_idx, hobul_p->max_cq, IB_ERROR, cleanup_cq);\r
1970         cl_mutex_acquire(&hobul_p->cq_info_tbl[cq_idx].mutex);\r
1971         hobul_p->cq_info_tbl[cq_idx].hca_idx = hob_p->index;\r
1972         hobul_p->cq_info_tbl[cq_idx].cq_num = cq_num;\r
1973 //      hobul_p->cq_info_tbl[cq_idx].pd_num = pd_idx;\r
1974         hobul_p->cq_info_tbl[cq_idx].hhul_cq_hndl = hhul_cq_hndl;\r
1975         hobul_p->cq_info_tbl[cq_idx].cq_context = cq_context;\r
1976         hobul_p->cq_info_tbl[cq_idx].cq_ul_resources_p = cq_ul_resources_p;\r
1977         hobul_p->cq_info_tbl[cq_idx].kernel_mode = !(p_umv_buf && p_umv_buf->command);\r
1978         hobul_p->cq_info_tbl[cq_idx].mark = E_MARK_CQ;\r
1979         cl_mutex_release(&hobul_p->cq_info_tbl[cq_idx].mutex);\r
1980 \r
1981         // Update CA object count\r
1982         cl_atomic_inc(&hobul_p->count);\r
1983         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("HCA %d count %d\n", h_ca->index, hobul_p->count));\r
1984 \r
1985         *p_size = cq_size;\r
1986         if (ph_cq) *ph_cq = (ib_cq_handle_t)CQ_HNDL_FROM_CQ(cq_idx);\r
1987 \r
1988         if( p_umv_buf && p_umv_buf->command )\r
1989         {\r
1990                 p_umv_buf->output_size = p_umv_buf->input_size;\r
1991                 p_umv_buf->status = IB_SUCCESS;\r
1992                 /* \r
1993                 * Copy the cq_idx back to user\r
1994                 */\r
1995                 cl_memcpy (((uint8_t* __ptr64)p_umv_buf->p_inout_buf + hca_ul_info->cq_ul_resources_sz),\r
1996                         &cq_num, sizeof (cq_num));\r
1997         }\r
1998         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
1999         return IB_SUCCESS;\r
2000 \r
2001 cleanup_cq:\r
2002         THHUL_cqm_destroy_cq_done(hobul_p->hhul_hndl, hhul_cq_hndl);\r
2003 \r
2004 cleanup:\r
2005         if( !(p_umv_buf && p_umv_buf->command) && cq_ul_resources_p )\r
2006                 cl_free( cq_ul_resources_p);\r
2007         if( p_umv_buf && p_umv_buf->command )\r
2008         {\r
2009                 p_umv_buf->output_size = 0;\r
2010                 p_umv_buf->status = status;\r
2011         }\r
2012         CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %s\n", ib_get_err_str(status)));\r
2013         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
2014         return status;\r
2015 }\r
2016 \r
2017 ib_api_status_t\r
2018 mlnx_resize_cq (\r
2019         IN              const   ib_cq_handle_t                          h_cq,\r
2020         IN      OUT                     uint32_t                                        *p_size,\r
2021         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
2022 {\r
2023         ib_api_status_t         status;\r
2024 \r
2025         u_int32_t                       hca_idx = CQ_HCA_FROM_HNDL(h_cq);\r
2026         u_int32_t                       cq_num  = CQ_NUM_FROM_HNDL(h_cq);\r
2027         u_int32_t                       cq_idx;\r
2028         mlnx_hobul_t            *hobul_p;\r
2029 \r
2030         HHUL_cq_hndl_t          hhul_cq_hndl;\r
2031         void                            *cq_ul_resources_p = NULL;\r
2032 \r
2033         CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
2034 \r
2035         if (!p_size) {\r
2036                 status = IB_INVALID_PARAMETER;\r
2037                 goto cleanup;\r
2038         }\r
2039         VALIDATE_INDEX(hca_idx,   MLNX_MAX_HCA, IB_INVALID_CQ_HANDLE, cleanup);\r
2040         hobul_p = mlnx_hobul_array[hca_idx];\r
2041         if (NULL == hobul_p) {\r
2042                 status = IB_INVALID_CQ_HANDLE;\r
2043                 goto cleanup;\r
2044         }\r
2045 \r
2046         cq_idx = cq_num & hobul_p->cq_idx_mask;\r
2047         VALIDATE_INDEX(cq_idx, hobul_p->max_cq, IB_INVALID_CQ_HANDLE, cleanup);\r
2048         if ( E_MARK_CQ != hobul_p->cq_info_tbl[cq_idx].mark) {\r
2049                 status =  IB_INVALID_CQ_HANDLE;\r
2050                 goto cleanup;\r
2051         }\r
2052 \r
2053         cl_mutex_acquire(&hobul_p->cq_info_tbl[cq_idx].mutex);\r
2054 \r
2055         hhul_cq_hndl = hobul_p->cq_info_tbl[cq_idx].hhul_cq_hndl;\r
2056 \r
2057         if( p_umv_buf && p_umv_buf->command )\r
2058         {\r
2059                 // For user mode calls - obtain and verify the vendor information\r
2060                 if( p_umv_buf->input_size != hobul_p->cq_ul_resources_sz ||\r
2061                         NULL == p_umv_buf->p_inout_buf )\r
2062                 {\r
2063                         status = IB_INVALID_PARAMETER;\r
2064                         goto cleanup_locked;\r
2065                 }\r
2066                 cq_ul_resources_p = (void *)p_umv_buf->p_inout_buf;\r
2067 \r
2068         } else {\r
2069                 // for kernel mode calls - obtain the saved app resources. Use prep->call->done sequence\r
2070                 cq_ul_resources_p = hobul_p->cq_info_tbl[cq_idx].cq_ul_resources_p;\r
2071 \r
2072                 status = THHUL_cqm_resize_cq_prep(\r
2073                         hobul_p->hhul_hndl, hhul_cq_hndl,\r
2074                         *p_size, p_size, cq_ul_resources_p );\r
2075                 if( status != IB_SUCCESS )\r
2076                         goto cleanup_locked;\r
2077         }\r
2078 \r
2079         if (HH_OK != THH_hob_resize_cq(hobul_p->hh_hndl, cq_num, cq_ul_resources_p)) {\r
2080                 status = IB_ERROR;\r
2081                 goto cleanup_locked;\r
2082         }\r
2083 \r
2084         // DONE: when called on behalf of kernel module\r
2085         if (hobul_p->cq_info_tbl[cq_idx].kernel_mode) {\r
2086                 if (HH_OK != THHUL_cqm_resize_cq_done( hobul_p->hhul_hndl, hhul_cq_hndl, cq_ul_resources_p))\r
2087                 {\r
2088                         status = IB_ERROR;\r
2089                         goto cleanup_locked;\r
2090                 }\r
2091         }\r
2092 \r
2093         cl_mutex_release(&hobul_p->cq_info_tbl[cq_idx].mutex);\r
2094 \r
2095         if( p_umv_buf && p_umv_buf->command )\r
2096         {\r
2097                 p_umv_buf->output_size = p_umv_buf->input_size;\r
2098                 p_umv_buf->status = IB_SUCCESS;\r
2099         }\r
2100         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
2101         return IB_SUCCESS;\r
2102 \r
2103 cleanup_locked:\r
2104         cl_mutex_release(&hobul_p->cq_info_tbl[cq_idx].mutex);\r
2105 \r
2106 cleanup:\r
2107         if( p_umv_buf && p_umv_buf->command )\r
2108         {\r
2109                 p_umv_buf->output_size = 0;\r
2110                 p_umv_buf->status = status;\r
2111         }\r
2112         CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %s\n", ib_get_err_str(status)));\r
2113         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
2114         return status;\r
2115 }\r
2116 \r
2117 ib_api_status_t\r
2118 mlnx_query_cq (\r
2119         IN              const   ib_cq_handle_t                          h_cq,\r
2120                 OUT                     uint32_t                                        *p_size,\r
2121         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
2122 {\r
2123         ib_api_status_t         status;\r
2124 \r
2125         u_int32_t                       hca_idx = CQ_HCA_FROM_HNDL(h_cq);\r
2126         u_int32_t                       cq_num  = CQ_NUM_FROM_HNDL(h_cq);\r
2127         u_int32_t                       cq_idx;\r
2128         mlnx_hobul_t            *hobul_p;\r
2129         HHUL_cq_hndl_t          hhul_cq_hndl;\r
2130 \r
2131         CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
2132 \r
2133         if (!p_size) {\r
2134                 status = IB_INVALID_PARAMETER;\r
2135                 goto cleanup;\r
2136         }\r
2137 \r
2138         /* Query is fully handled in user-mode. */\r
2139         if( p_umv_buf && p_umv_buf->command )\r
2140         {\r
2141                 status = IB_INVALID_CQ_HANDLE;\r
2142                 goto cleanup;\r
2143         }\r
2144 \r
2145         VALIDATE_INDEX(hca_idx,   MLNX_MAX_HCA, IB_INVALID_CQ_HANDLE, cleanup);\r
2146         hobul_p = mlnx_hobul_array[hca_idx];\r
2147         if (NULL == hobul_p) {\r
2148                 status = IB_INVALID_CQ_HANDLE;\r
2149                 goto cleanup;\r
2150         }\r
2151 \r
2152         cq_idx = cq_num & hobul_p->cq_idx_mask;\r
2153         VALIDATE_INDEX(cq_idx, hobul_p->max_cq, IB_INVALID_CQ_HANDLE, cleanup);\r
2154         if ( E_MARK_CQ != hobul_p->cq_info_tbl[cq_idx].mark) {\r
2155                 status =  IB_INVALID_CQ_HANDLE;\r
2156                 goto cleanup;\r
2157         }\r
2158 \r
2159         cl_mutex_acquire(&hobul_p->cq_info_tbl[cq_idx].mutex);\r
2160 \r
2161         hhul_cq_hndl = hobul_p->cq_info_tbl[cq_idx].hhul_cq_hndl;\r
2162         if (HH_OK != THHUL_cqm_query_cq(hobul_p->hhul_hndl, hhul_cq_hndl, p_size)){\r
2163                 status = IB_ERROR;\r
2164                 goto cleanup_locked;\r
2165         }\r
2166 \r
2167         cl_mutex_release(&hobul_p->cq_info_tbl[cq_idx].mutex);\r
2168 \r
2169         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
2170         return IB_SUCCESS;\r
2171 \r
2172 cleanup_locked:\r
2173         cl_mutex_release(&hobul_p->cq_info_tbl[cq_idx].mutex);\r
2174 \r
2175 cleanup:\r
2176         if( p_umv_buf && p_umv_buf->command )\r
2177         {\r
2178                 p_umv_buf->output_size = 0;\r
2179                 p_umv_buf->status = status;\r
2180         }\r
2181         CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %s\n", ib_get_err_str(status)));\r
2182         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
2183         return status;\r
2184 }\r
2185 \r
2186 ib_api_status_t\r
2187 mlnx_destroy_cq (\r
2188         IN              const   ib_cq_handle_t                          h_cq)\r
2189 {\r
2190         ib_api_status_t status;\r
2191 \r
2192         u_int32_t        hca_idx = CQ_HCA_FROM_HNDL(h_cq);\r
2193         u_int32_t        cq_num  = CQ_NUM_FROM_HNDL(h_cq);\r
2194         u_int32_t               cq_idx;\r
2195 //      u_int32_t        pd_idx = 0;\r
2196         mlnx_hobul_t     *hobul_p;\r
2197         HHUL_cq_hndl_t   hhul_cq_hndl;\r
2198 \r
2199         CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
2200 \r
2201         VALIDATE_INDEX(hca_idx,   MLNX_MAX_HCA, IB_INVALID_CQ_HANDLE, cleanup);\r
2202         hobul_p = mlnx_hobul_array[hca_idx];\r
2203         if (NULL == hobul_p) {\r
2204                 status = IB_INVALID_CQ_HANDLE;\r
2205                 goto cleanup;\r
2206         }\r
2207 \r
2208         cq_idx = cq_num & hobul_p->cq_idx_mask;\r
2209         VALIDATE_INDEX(cq_idx, hobul_p->max_cq, IB_INVALID_CQ_HANDLE, cleanup);\r
2210         if ( E_MARK_CQ != hobul_p->cq_info_tbl[cq_idx].mark) {\r
2211                 status =  IB_INVALID_CQ_HANDLE;\r
2212                 goto cleanup;\r
2213         }\r
2214 \r
2215         cl_mutex_acquire(&hobul_p->cq_info_tbl[cq_idx].mutex);\r
2216 \r
2217         hhul_cq_hndl = hobul_p->cq_info_tbl[cq_idx].hhul_cq_hndl;\r
2218 //      pd_idx       = hobul_p->cq_info_tbl[cq_idx].pd_num;\r
2219 //      VALIDATE_INDEX(pd_idx, hobul_p->max_pd, IB_ERROR, cleanup);\r
2220 //      if (E_MARK_PD != hobul_p->pd_info_tbl[pd_idx].mark) {\r
2221 //              status =  IB_INVALID_PD_HANDLE;\r
2222 //              goto cleanup_locked;\r
2223 //      }\r
2224 \r
2225         // PREP: no PREP required for destroy_cq\r
2226         if (HH_OK != THH_hob_destroy_cq(hobul_p->hh_hndl, cq_num)) {\r
2227                 status = IB_ERROR;\r
2228                 goto cleanup_locked;\r
2229         }\r
2230 \r
2231         if (hobul_p->cq_info_tbl[cq_idx].kernel_mode) {\r
2232                 if (HH_OK != THHUL_cqm_destroy_cq_done(hobul_p->hhul_hndl, hhul_cq_hndl)) {\r
2233                         status = IB_ERROR;\r
2234                         goto cleanup_locked;\r
2235                 }\r
2236                 if (hobul_p->cq_info_tbl[cq_idx].cq_ul_resources_p)\r
2237                         cl_free( hobul_p->cq_info_tbl[cq_idx].cq_ul_resources_p);\r
2238         }\r
2239 \r
2240         hobul_p->cq_info_tbl[cq_idx].mark = E_MARK_INVALID;\r
2241         hobul_p->cq_info_tbl[cq_idx].cq_ul_resources_p = NULL;\r
2242         cl_mutex_release(&hobul_p->cq_info_tbl[cq_idx].mutex);\r
2243 \r
2244         // Update CA object count\r
2245         cl_atomic_dec(&hobul_p->count);\r
2246         CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("CA %d count %d\n", hca_idx, hobul_p->count));\r
2247 \r
2248 \r
2249         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
2250         return IB_SUCCESS;\r
2251 \r
2252 cleanup_locked:\r
2253         cl_mutex_release(&hobul_p->cq_info_tbl[cq_idx].mutex);\r
2254 \r
2255 cleanup:\r
2256         CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %s\n", ib_get_err_str(status)));\r
2257         CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
2258         return status;\r
2259 }\r
2260 \r
2261 \r
2262 void\r
2263 setup_ci_interface(\r
2264         IN              const   ib_net64_t                                      ca_guid,\r
2265         IN      OUT                     ci_interface_t                          *p_interface )\r
2266 {\r
2267         cl_memclr(p_interface, sizeof(*p_interface));\r
2268 \r
2269         /* Guid of the CA. */\r
2270         p_interface->guid = ca_guid;\r
2271 \r
2272         /* Version of this interface. */\r
2273         p_interface->version = VERBS_VERSION;\r
2274 \r
2275         /* UVP name */\r
2276         cl_memcpy( p_interface->libname, mlnx_uvp_lib_name, MAX_LIB_NAME);\r
2277 \r
2278         CL_TRACE(MLNX_DBG_TRACE, g_mlnx_dbg_lvl, ("UVP filename %s\n", p_interface->libname)); \r
2279 \r
2280         /* The real interface. */\r
2281         p_interface->open_ca = mlnx_open_ca;\r
2282         p_interface->query_ca = mlnx_query_ca;\r
2283         p_interface->modify_ca = mlnx_modify_ca; // ++\r
2284         p_interface->close_ca = mlnx_close_ca;\r
2285         p_interface->um_open_ca = mlnx_um_open;\r
2286         p_interface->um_close_ca = mlnx_um_close;\r
2287 \r
2288         p_interface->allocate_pd = mlnx_allocate_pd;\r
2289         p_interface->deallocate_pd = mlnx_deallocate_pd;\r
2290 \r
2291         p_interface->create_av = mlnx_create_av;\r
2292         p_interface->query_av = mlnx_query_av;\r
2293         p_interface->modify_av = mlnx_modify_av;\r
2294         p_interface->destroy_av = mlnx_destroy_av;\r
2295 \r
2296         p_interface->create_qp = mlnx_create_qp;\r
2297         p_interface->create_spl_qp = mlnx_create_spl_qp;\r
2298         p_interface->modify_qp = mlnx_modify_qp;\r
2299         p_interface->query_qp = mlnx_query_qp;\r
2300         p_interface->destroy_qp = mlnx_destroy_qp;\r
2301 \r
2302         p_interface->create_cq = mlnx_create_cq;\r
2303         p_interface->resize_cq = mlnx_resize_cq;\r
2304         p_interface->query_cq = mlnx_query_cq;\r
2305         p_interface->destroy_cq = mlnx_destroy_cq;\r
2306 \r
2307         p_interface->local_mad = mlnx_local_mad;\r
2308         \r
2309         p_interface->vendor_call = fw_access_ctrl;\r
2310 \r
2311         mlnx_memory_if(p_interface);\r
2312         mlnx_direct_if(p_interface);\r
2313         mlnx_mcast_if(p_interface);\r
2314 \r
2315 \r
2316         return;\r
2317 }\r
2318 \r
2319 #if 0\r
2320 CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("file %s line %d\n", __FILE__, __LINE__));\r
2321 #endif\r