mthca/mlx4: add check to validate output data sizes
[mirror/winof/.git] / hw / mthca / kernel / hca_verbs.c
1 /*\r
2  * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.\r
3  * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. \r
4  * Portions Copyright (c) 2008 Microsoft Corporation.  All rights reserved.\r
5  *\r
6  * This software is available to you under the OpenIB.org BSD license\r
7  * below:\r
8  *\r
9  *     Redistribution and use in source and binary forms, with or\r
10  *     without modification, are permitted provided that the following\r
11  *     conditions are met:\r
12  *\r
13  *      - Redistributions of source code must retain the above\r
14  *        copyright notice, this list of conditions and the following\r
15  *        disclaimer.\r
16  *\r
17  *      - Redistributions in binary form must reproduce the above\r
18  *        copyright notice, this list of conditions and the following\r
19  *        disclaimer in the documentation and/or other materials\r
20  *        provided with the distribution.\r
21  *\r
22  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
23  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
24  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
25  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
26  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
27  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
28  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
29  * SOFTWARE.\r
30  *\r
31  * $Id$\r
32  */\r
33 \r
34 \r
35 #include "hca_driver.h"\r
36 #if defined(EVENT_TRACING)\r
37 #ifdef offsetof\r
38 #undef offsetof\r
39 #endif\r
40 #include "hca_verbs.tmh"\r
41 #endif\r
42 #include "mthca_dev.h"\r
43 #include "ib_cache.h"\r
44 #include "mx_abi.h"\r
45 #include "mt_pa_cash.h"\r
46 \r
47 \r
48 \r
49 // Local declarations\r
50 ib_api_status_t\r
51 mlnx_query_qp (\r
52         IN              const   ib_qp_handle_t                          h_qp,\r
53                 OUT                     ib_qp_attr_t                            *p_qp_attr,\r
54         IN      OUT                     ci_umv_buf_t                            *p_umv_buf );\r
55 \r
56 /* \r
57 * CA Access Verbs\r
58 */\r
59 ib_api_status_t\r
60 mlnx_open_ca (\r
61         IN              const   ib_net64_t                                      ca_guid, // IN  const char *                ca_name,\r
62         IN              const   ci_async_event_cb_t                     pfn_async_event_cb,\r
63         IN              const   void*const                                      ca_context,\r
64                 OUT                     ib_ca_handle_t                          *ph_ca)\r
65 {\r
66         mlnx_hca_t                              *p_hca;\r
67         ib_api_status_t status = IB_NOT_FOUND;\r
68         struct ib_device *ib_dev;\r
69 \r
70         HCA_ENTER(HCA_DBG_SHIM);\r
71         HCA_PRINT(TRACE_LEVEL_INFORMATION  ,HCA_DBG_SHIM,\r
72                 ("context 0x%p\n", ca_context));\r
73 \r
74         // find CA object\r
75         p_hca = mlnx_hca_from_guid( ca_guid );\r
76         if( !p_hca ) {\r
77                 if (status != IB_SUCCESS) \r
78                 {\r
79                         HCA_PRINT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM,\r
80                         ("completes with ERROR status IB_NOT_FOUND\n"));\r
81                 }\r
82                 HCA_EXIT(HCA_DBG_SHIM);\r
83                 return IB_NOT_FOUND;\r
84         }\r
85 \r
86         ib_dev = &p_hca->mdev->ib_dev;\r
87 \r
88         if (mthca_is_livefish(p_hca->mdev)) \r
89                 goto done;\r
90 \r
91         HCA_PRINT(TRACE_LEVEL_INFORMATION  ,HCA_DBG_SHIM,\r
92                 ("context 0x%p\n", ca_context));\r
93         status = mlnx_hobs_set_cb(&p_hca->hob,\r
94                 pfn_async_event_cb,\r
95                 ca_context);\r
96         if (IB_SUCCESS != status) {\r
97                 goto err_set_cb;\r
98         }\r
99 \r
100         \r
101         //TODO: do we need something for kernel users ?\r
102 \r
103         // Return pointer to HOB object\r
104 done:   \r
105         if (ph_ca) *ph_ca = &p_hca->hob;\r
106         status =  IB_SUCCESS;\r
107 \r
108 //err_mad_cache:\r
109 err_set_cb:\r
110         if (status != IB_SUCCESS)\r
111         {\r
112                 HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_SHIM,\r
113                         ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
114         }\r
115         HCA_EXIT(HCA_DBG_SHIM);\r
116         return status;\r
117 }\r
118 \r
119 static void\r
120 mlnx_register_event_handler (\r
121         IN              const   ib_ca_handle_t                          h_ca,\r
122         IN                              ci_event_handler_t*                     p_reg)\r
123 {\r
124         mlnx_hob_t *hob_p = (mlnx_hob_t *) h_ca;\r
125         KIRQL irql;\r
126 \r
127         KeAcquireSpinLock(&hob_p->event_list_lock, &irql);\r
128         InsertTailList(&hob_p->event_list, &p_reg->entry);\r
129         KeReleaseSpinLock(&hob_p->event_list_lock, irql);\r
130 }\r
131 \r
132 static void\r
133 mlnx_unregister_event_handler (\r
134         IN              const   ib_ca_handle_t                          h_ca,\r
135         IN                              ci_event_handler_t*                     p_reg)\r
136 {\r
137         mlnx_hob_t *hob_p = (mlnx_hob_t *) h_ca;\r
138         KIRQL irql;\r
139 \r
140         KeAcquireSpinLock(&hob_p->event_list_lock, &irql);\r
141         RemoveEntryList(&p_reg->entry);\r
142         KeReleaseSpinLock(&hob_p->event_list_lock, irql);\r
143 }\r
144 \r
145 ib_api_status_t\r
146 mlnx_query_ca (\r
147         IN              const   ib_ca_handle_t                          h_ca,\r
148                 OUT                     ib_ca_attr_t                            *p_ca_attr,\r
149         IN      OUT                     uint32_t                                        *p_byte_count,\r
150         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
151 {\r
152         ib_api_status_t         status;\r
153         uint32_t                        size, required_size;\r
154         uint8_t                 port_num, num_ports;\r
155         uint32_t                        num_gids, num_pkeys;\r
156         uint32_t                        num_page_sizes = 1; // TBD: what is actually supported\r
157         uint8_t                         *last_p;\r
158         struct ib_device_attr props;\r
159         struct ib_port_attr  *hca_ports = NULL;\r
160         int i;\r
161         \r
162         mlnx_hob_t                      *hob_p = (mlnx_hob_t *)h_ca;\r
163         struct ib_device *ib_dev = IBDEV_FROM_HOB( hob_p );\r
164         int err;\r
165         \r
166         HCA_ENTER(HCA_DBG_SHIM);\r
167 \r
168         // sanity checks\r
169         if( p_umv_buf && p_umv_buf->command ) {\r
170                         HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_SHIM ,("User mode is not supported yet\n"));\r
171                         p_umv_buf->status = status = IB_UNSUPPORTED;\r
172                         goto err_user_unsupported;\r
173         }\r
174 \r
175         if( !cl_is_blockable() ) {\r
176                         status = IB_UNSUPPORTED;\r
177                         goto err_unsupported;\r
178         }\r
179 \r
180         if (NULL == p_byte_count) {\r
181                 status = IB_INVALID_PARAMETER;\r
182                 goto err_byte_count;\r
183         }\r
184 \r
185         // query the device\r
186         err = mthca_query_device(ib_dev, &props );\r
187         if (err) {\r
188                 HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_SHIM, \r
189                         ("ib_query_device failed (%d)\n",err));\r
190                 status = errno_to_iberr(err);\r
191                 goto err_query_device;\r
192         }\r
193         \r
194         // alocate arrary for port properties\r
195         num_ports = ib_dev->phys_port_cnt;   /* Number of physical ports of the HCA */             \r
196         if (NULL == (hca_ports = cl_zalloc( num_ports * sizeof *hca_ports))) {\r
197                 HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_SHIM, ("Failed to cl_zalloc ports array\n"));\r
198                 status = IB_INSUFFICIENT_MEMORY;\r
199                 goto err_alloc_ports;\r
200         }\r
201 \r
202         // start calculation of ib_ca_attr_t full size\r
203         num_gids = 0;\r
204         num_pkeys = 0;\r
205         required_size = PTR_ALIGN(sizeof(ib_ca_attr_t)) +\r
206                 PTR_ALIGN(sizeof(uint32_t) * num_page_sizes) +\r
207                 PTR_ALIGN(sizeof(ib_port_attr_t) * num_ports)+\r
208                 PTR_ALIGN(MTHCA_BOARD_ID_LEN)+\r
209                 PTR_ALIGN(sizeof(uplink_info_t));       /* uplink info */\r
210         \r
211         // get port properties\r
212         for (port_num = 0; port_num <= end_port(ib_dev) - start_port(ib_dev); ++port_num) {\r
213                 // request\r
214                 err = mthca_query_port(ib_dev, port_num + start_port(ib_dev), &hca_ports[port_num]);\r
215                 if (err) {\r
216                         HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_SHIM, ("ib_query_port failed(%d) for port %d\n",err, port_num));\r
217                         status = errno_to_iberr(err);\r
218                         goto err_query_port;\r
219                 }\r
220 \r
221                 // calculate GID table size\r
222                 num_gids  = hca_ports[port_num].gid_tbl_len;\r
223                 size = PTR_ALIGN(sizeof(ib_gid_t)  * num_gids);\r
224                 required_size += size;\r
225 \r
226                 // calculate pkeys table size\r
227                 num_pkeys = hca_ports[port_num].pkey_tbl_len;\r
228                 size = PTR_ALIGN(sizeof(uint16_t) * num_pkeys);\r
229                 required_size += size;\r
230         }\r
231 \r
232         // resource sufficience check\r
233         if (NULL == p_ca_attr || *p_byte_count < required_size) {\r
234                 *p_byte_count = required_size;\r
235                 status = IB_INSUFFICIENT_MEMORY;\r
236                 if ( p_ca_attr != NULL) {\r
237                         HCA_PRINT (TRACE_LEVEL_ERROR,HCA_DBG_SHIM, \r
238                                 ("Failed *p_byte_count (%d) < required_size (%d)\n", *p_byte_count, required_size ));\r
239                 }\r
240                 goto err_insuff_mem;\r
241         }\r
242         RtlZeroMemory(p_ca_attr, required_size);\r
243 \r
244         // Space is sufficient - setup table pointers\r
245         last_p = (uint8_t*)p_ca_attr;\r
246         last_p += PTR_ALIGN(sizeof(*p_ca_attr));\r
247 \r
248         p_ca_attr->p_page_size = (uint32_t*)last_p;\r
249         last_p += PTR_ALIGN(num_page_sizes * sizeof(uint32_t));\r
250 \r
251         p_ca_attr->p_port_attr = (ib_port_attr_t *)last_p;\r
252         last_p += PTR_ALIGN(num_ports * sizeof(ib_port_attr_t));\r
253 \r
254         for (port_num = 0; port_num < num_ports; port_num++) {\r
255                 p_ca_attr->p_port_attr[port_num].p_gid_table = (ib_gid_t *)last_p;\r
256                 size = PTR_ALIGN(sizeof(ib_gid_t) * hca_ports[port_num].gid_tbl_len);\r
257                 last_p += size;\r
258 \r
259                 p_ca_attr->p_port_attr[port_num].p_pkey_table = (uint16_t *)last_p;\r
260                 size = PTR_ALIGN(sizeof(uint16_t) * hca_ports[port_num].pkey_tbl_len);\r
261                 last_p += size;\r
262         }\r
263         \r
264         //copy vendor specific data\r
265         cl_memcpy(last_p,to_mdev(ib_dev)->board_id, MTHCA_BOARD_ID_LEN);\r
266         last_p += PTR_ALIGN(MTHCA_BOARD_ID_LEN);\r
267         *(uplink_info_t*)last_p = to_mdev(ib_dev)->uplink_info;\r
268         last_p += PTR_ALIGN(sizeof(uplink_info_t));     /* uplink info */\r
269         \r
270         // Separate the loops to ensure that table pointers are always setup\r
271         for (port_num = 0; port_num < num_ports; port_num++) {\r
272 \r
273                 // get pkeys, using cache\r
274                 for (i=0; i < hca_ports[port_num].pkey_tbl_len; ++i) {\r
275                         err = ib_get_cached_pkey( ib_dev, port_num + start_port(ib_dev), i,\r
276                                 &p_ca_attr->p_port_attr[port_num].p_pkey_table[i] );\r
277                         if (err) {\r
278                                 status = errno_to_iberr(err);\r
279                                 HCA_PRINT (TRACE_LEVEL_ERROR,HCA_DBG_SHIM, \r
280                                         ("ib_get_cached_pkey failed (%d) for port_num %d, index %d\n",\r
281                                         err, port_num + start_port(ib_dev), i));\r
282                                 goto err_get_pkey;\r
283                         }\r
284                 }\r
285                 \r
286                 // get gids, using cache\r
287                 for (i=0; i < hca_ports[port_num].gid_tbl_len; ++i) {\r
288                         union ib_gid * gid = (union ib_gid *)&p_ca_attr->p_port_attr[port_num].p_gid_table[i];\r
289                         err = ib_get_cached_gid( ib_dev, port_num + start_port(ib_dev), i, (union ib_gid *)gid );\r
290                         //TODO: do we need to convert gids to little endian\r
291                         if (err) {\r
292                                 status = errno_to_iberr(err);\r
293                                 HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_SHIM, \r
294                                         ("ib_get_cached_gid failed (%d) for port_num %d, index %d\n",\r
295                                         err, port_num + start_port(ib_dev), i));\r
296                                 goto err_get_gid;\r
297                         }\r
298                 }\r
299 \r
300                 HCA_PRINT(TRACE_LEVEL_VERBOSE, HCA_DBG_SHIM,("port %d gid0:\n", port_num));\r
301                 HCA_PRINT(TRACE_LEVEL_VERBOSE, HCA_DBG_SHIM,\r
302                         (" 0x%x%x%x%x%x%x%x%x-0x%x%x%x%x%x%x%x%x\n", \r
303                         p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[0],\r
304                         p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[1],\r
305                         p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[2],\r
306                         p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[3],\r
307                         p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[4],\r
308                         p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[5],\r
309                         p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[6],\r
310                         p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[7],\r
311                         p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[8],\r
312                         p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[9],\r
313                         p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[10],\r
314                         p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[11],\r
315                         p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[12],\r
316                         p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[13],\r
317                         p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[14],\r
318                         p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[15]));\r
319         }\r
320 \r
321         // set result size\r
322         p_ca_attr->size = required_size;\r
323         CL_ASSERT( required_size == (((uintn_t)last_p) - ((uintn_t)p_ca_attr)) );\r
324         HCA_PRINT(TRACE_LEVEL_VERBOSE, HCA_DBG_SHIM , ("Space required %d used %d\n",\r
325                 required_size, (int)((uintn_t)last_p - (uintn_t)p_ca_attr) ));\r
326         \r
327         // !!! GID/PKEY tables must be queried before this call !!!\r
328         mlnx_conv_hca_cap(ib_dev, &props, hca_ports, p_ca_attr);\r
329 \r
330         status = IB_SUCCESS;\r
331 \r
332 err_get_gid:\r
333 err_get_pkey:\r
334 err_insuff_mem:\r
335 err_query_port:\r
336         cl_free(hca_ports);\r
337 err_alloc_ports:\r
338 err_query_device:\r
339 err_byte_count: \r
340 err_unsupported:\r
341 err_user_unsupported:\r
342         if( status != IB_INSUFFICIENT_MEMORY && status != IB_SUCCESS )\r
343                 HCA_PRINT(TRACE_LEVEL_ERROR, HCA_DBG_SHIM,\r
344                         ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
345         HCA_EXIT(HCA_DBG_SHIM);\r
346         return status;\r
347 }\r
348 \r
349 ib_api_status_t\r
350 mlnx_modify_ca (\r
351         IN              const   ib_ca_handle_t                          h_ca,\r
352         IN              const   uint8_t                                         port_num,\r
353         IN              const   ib_ca_mod_t                                     modca_cmd,\r
354         IN              const   ib_port_attr_mod_t                      *p_port_attr)\r
355 {\r
356 #define SET_CAP_MOD(al_mask, al_fld, ib)                \\r
357                 if (modca_cmd & al_mask) {      \\r
358                         if (p_port_attr->cap.##al_fld)          \\r
359                                 props.set_port_cap_mask |= ib;  \\r
360                         else            \\r
361                                 props.clr_port_cap_mask |= ib;  \\r
362                 }\r
363 \r
364         ib_api_status_t status;\r
365         int err;\r
366         struct ib_port_modify props;\r
367         int port_modify_mask = 0;\r
368         mlnx_hob_t                      *hob_p = (mlnx_hob_t *)h_ca;\r
369         struct ib_device *ib_dev = IBDEV_FROM_HOB( hob_p );\r
370 \r
371         HCA_ENTER(HCA_DBG_SHIM);\r
372 \r
373         //sanity check\r
374         if( !cl_is_blockable() ) {\r
375                         status = IB_UNSUPPORTED;\r
376                         goto err_unsupported;\r
377         }\r
378         \r
379         if (port_num < start_port(ib_dev) || port_num > end_port(ib_dev)) {\r
380                 status = IB_INVALID_PORT;\r
381                 goto err_port;\r
382         }\r
383 \r
384         // prepare parameters\r
385         RtlZeroMemory(&props, sizeof(props));\r
386         SET_CAP_MOD(IB_CA_MOD_IS_SM, sm, IB_PORT_SM);\r
387         SET_CAP_MOD(IB_CA_MOD_IS_SNMP_SUPPORTED, snmp, IB_PORT_SNMP_TUNNEL_SUP);\r
388         SET_CAP_MOD(IB_CA_MOD_IS_DEV_MGMT_SUPPORTED, dev_mgmt, IB_PORT_DEVICE_MGMT_SUP);\r
389         SET_CAP_MOD(IB_CA_MOD_IS_VEND_SUPPORTED, vend, IB_PORT_VENDOR_CLASS_SUP);\r
390         if ((modca_cmd & IB_CA_MOD_QKEY_CTR) && (p_port_attr->qkey_ctr == 0)) \r
391                 port_modify_mask |= IB_PORT_RESET_QKEY_CNTR;\r
392         \r
393         // modify port\r
394         err = mthca_modify_port(ib_dev, port_num, port_modify_mask, &props );\r
395         if (err) {\r
396                 status = errno_to_iberr(err);\r
397                 HCA_PRINT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM  ,("mthca_modify_port failed (%d) \n",err));\r
398                 goto err_modify_port;\r
399         }\r
400 \r
401         status =        IB_SUCCESS;\r
402 \r
403 err_modify_port:\r
404 err_port:\r
405 err_unsupported:\r
406         if (status != IB_SUCCESS)\r
407         {\r
408                 HCA_PRINT(TRACE_LEVEL_ERROR, HCA_DBG_SHIM,\r
409                         ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
410         }\r
411         HCA_EXIT(HCA_DBG_SHIM);\r
412         return status;\r
413 }\r
414 \r
415 ib_api_status_t\r
416 mlnx_close_ca (\r
417         IN                              ib_ca_handle_t                          h_ca)\r
418 {\r
419         mlnx_hob_t                      *hob_p = (mlnx_hob_t *)h_ca;\r
420         HCA_ENTER(HCA_DBG_SHIM);\r
421 \r
422         if (mthca_is_livefish(MDEV_FROM_HOB( hob_p ))) \r
423                 goto done;\r
424 \r
425         mlnx_hobs_remove(h_ca);\r
426 \r
427 done:\r
428         HCA_EXIT(HCA_DBG_SHIM);\r
429         \r
430         return IB_SUCCESS;\r
431 }\r
432 \r
433 \r
434 static ib_api_status_t\r
435 mlnx_um_open(\r
436         IN              const   ib_ca_handle_t                          h_ca,\r
437         IN      OUT                     ci_umv_buf_t* const                     p_umv_buf,\r
438                 OUT                     ib_ca_handle_t* const           ph_um_ca )\r
439 {\r
440         int err;\r
441         ib_api_status_t         status;\r
442         mlnx_hob_t                      *hob_p = (mlnx_hob_t *)h_ca;\r
443         hca_dev_ext_t *ext_p = EXT_FROM_HOB( hob_p );\r
444         struct ib_device *ib_dev = IBDEV_FROM_HOB( hob_p );\r
445         struct ib_ucontext *p_context;\r
446         struct ibv_get_context_resp *uresp_p;\r
447         struct ibv_alloc_pd_resp resp;\r
448         ci_umv_buf_t umv_buf;\r
449 \r
450         HCA_ENTER(HCA_DBG_SHIM);\r
451 \r
452         // sanity check\r
453         ASSERT( p_umv_buf );\r
454         if( !p_umv_buf->command )\r
455         {\r
456                 p_context = cl_zalloc( sizeof(struct ib_ucontext) );\r
457                 if( !p_context )\r
458                 {\r
459                         status = IB_INSUFFICIENT_MEMORY;\r
460                         goto err_alloc_ucontext;\r
461                 }\r
462                 /* Copy the dev info. */\r
463                 p_context->device = ib_dev;\r
464                 p_umv_buf->output_size = 0;\r
465                 goto done;\r
466         }\r
467 \r
468         // create user context in kernel\r
469         p_context = mthca_alloc_ucontext(ib_dev, p_umv_buf);\r
470         if (IS_ERR(p_context)) {\r
471                 err = PTR_ERR(p_context);\r
472                 HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_SHIM,\r
473                         ("mthca_alloc_ucontext failed (%d)\n", err));\r
474                 status = errno_to_iberr(err);\r
475                 goto err_alloc_ucontext;\r
476         }\r
477 \r
478         /* allocate pd */\r
479         umv_buf.command = 1;\r
480         umv_buf.input_size = umv_buf.status = 0;\r
481         umv_buf.output_size = sizeof(struct ibv_alloc_pd_resp);\r
482         umv_buf.p_inout_buf = (ULONG_PTR)&resp;\r
483         //NB: Pay attention ! Ucontext parameter is important here:\r
484         // when it is present (i.e. - for user space) - mthca_alloc_pd won't create MR\r
485         p_context->pd = ibv_alloc_pd(ib_dev, p_context, &umv_buf);\r
486         if (IS_ERR(p_context->pd)) {\r
487                 err = PTR_ERR(p_context->pd);\r
488                 HCA_PRINT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM,\r
489                         ("ibv_alloc_pd failed (%d)\n", err));\r
490                 status = errno_to_iberr(err);\r
491                 goto err_alloc_pd;\r
492         }\r
493         \r
494         // fill more parameters for user (sanity checks are in mthca_alloc_ucontext)\r
495         uresp_p = (struct ibv_get_context_resp *)(ULONG_PTR)p_umv_buf->p_inout_buf;\r
496         uresp_p->uar_addr = (uint64_t)(UINT_PTR)p_context->user_uar;\r
497         uresp_p->pd_handle = resp.pd_handle;\r
498         uresp_p->pdn = resp.pdn;\r
499         uresp_p->vend_id = (uint32_t)ext_p->hcaConfig.VendorID;\r
500         uresp_p->dev_id = (uint16_t)ext_p->hcaConfig.DeviceID;\r
501 \r
502 done:\r
503         // some more inits\r
504         p_context->va = p_context->p_mdl = NULL;\r
505         p_context->fw_if_open = FALSE;\r
506         KeInitializeMutex( &p_context->mutex, 0 );\r
507         // chain user context to the device\r
508         cl_spinlock_acquire( &ext_p->uctx_lock );\r
509         cl_qlist_insert_tail( &ext_p->uctx_list, &p_context->list_item );\r
510         cl_atomic_inc(&ext_p->usecnt);\r
511         cl_spinlock_release( &ext_p->uctx_lock );\r
512         \r
513         // return the result\r
514         if (ph_um_ca) *ph_um_ca = (ib_ca_handle_t)p_context;\r
515 \r
516         status = IB_SUCCESS;\r
517         goto end;\r
518         \r
519 err_alloc_pd:\r
520         mthca_dealloc_ucontext(p_context);\r
521 err_alloc_ucontext: \r
522 end:\r
523         if (p_umv_buf && p_umv_buf->command) \r
524                 p_umv_buf->status = status;\r
525         if (status != IB_SUCCESS) \r
526         {\r
527                 HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_SHIM,\r
528                         ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
529         }\r
530         HCA_EXIT(HCA_DBG_SHIM);\r
531         return status;\r
532 }\r
533 \r
534 static void\r
535 mlnx_um_close(\r
536         IN                              ib_ca_handle_t                          h_ca,\r
537         IN                              ib_ca_handle_t                          h_um_ca )\r
538 {\r
539         struct ib_ucontext *p_ucontext = (struct ib_ucontext *)h_um_ca;\r
540         mlnx_hob_t                      *hob_p = (mlnx_hob_t *)h_ca;\r
541         hca_dev_ext_t *ext_p = EXT_FROM_HOB( hob_p );\r
542 \r
543         if (mthca_is_livefish(to_mdev(p_ucontext->device)))\r
544                 goto done;\r
545         unmap_crspace_for_all(p_ucontext);\r
546 done:   \r
547         cl_spinlock_acquire( &ext_p->uctx_lock );\r
548         cl_qlist_remove_item( &ext_p->uctx_list, &p_ucontext->list_item );\r
549         cl_atomic_dec(&ext_p->usecnt);\r
550         cl_spinlock_release( &ext_p->uctx_lock );\r
551         if( !p_ucontext->pd )\r
552                 cl_free( h_um_ca );\r
553         else\r
554                 ibv_um_close(p_ucontext);\r
555         pa_cash_print();\r
556         return;\r
557 }\r
558 \r
559 \r
560 /*\r
561 *    Protection Domain and Reliable Datagram Domain Verbs\r
562 */\r
563 \r
564 ib_api_status_t\r
565 mlnx_allocate_pd (\r
566         IN              const   ib_ca_handle_t                          h_ca,\r
567         IN              const   ib_pd_type_t                            type,\r
568                 OUT                     ib_pd_handle_t                          *ph_pd,\r
569         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
570 {\r
571         ib_api_status_t         status;\r
572         struct ib_device *ib_dev;\r
573         struct ib_ucontext *p_context;\r
574         struct ib_pd *ib_pd_p;\r
575         int err;\r
576 \r
577         //TODO: how are we use it ?\r
578         UNREFERENCED_PARAMETER(type);\r
579         \r
580         HCA_ENTER(HCA_DBG_PD);\r
581 \r
582         if( p_umv_buf ) {\r
583                 p_context = (struct ib_ucontext *)h_ca;\r
584                 ib_dev = p_context->device;\r
585         }\r
586         else {\r
587                 mlnx_hob_t                      *hob_p = (mlnx_hob_t *)h_ca;\r
588                 p_context = NULL;\r
589                 ib_dev = IBDEV_FROM_HOB( hob_p );\r
590         }\r
591         \r
592         // create PD\r
593         ib_pd_p = ibv_alloc_pd(ib_dev, p_context, p_umv_buf);\r
594         if (IS_ERR(ib_pd_p)) {\r
595                 err = PTR_ERR(ib_pd_p);\r
596                 HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_PD,\r
597                         ("ibv_alloc_pd failed (%d)\n", err));\r
598                 status = errno_to_iberr(err);\r
599                 goto err_alloc_pd;\r
600         }\r
601 \r
602         // return the result\r
603         if (ph_pd) *ph_pd = (ib_pd_handle_t)ib_pd_p;\r
604 \r
605         status = IB_SUCCESS;\r
606         \r
607 err_alloc_pd:   \r
608         if (p_umv_buf && p_umv_buf->command) \r
609                 p_umv_buf->status = status;\r
610         if (status != IB_SUCCESS)\r
611         {\r
612                 HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_PD,\r
613                         ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
614         }\r
615         HCA_EXIT(HCA_DBG_PD);\r
616         return status;\r
617 }\r
618 \r
619 ib_api_status_t\r
620 mlnx_deallocate_pd (\r
621         IN                              ib_pd_handle_t                          h_pd)\r
622 {\r
623         ib_api_status_t         status;\r
624         int err;\r
625         struct ib_pd *ib_pd_p = (struct ib_pd *)h_pd;\r
626         PREP_IBDEV_FOR_PRINT(ib_pd_p->device);\r
627 \r
628         HCA_ENTER( HCA_DBG_PD);\r
629 \r
630         HCA_PRINT(TRACE_LEVEL_INFORMATION,HCA_DBG_PD,\r
631                 ("pcs %p\n", PsGetCurrentProcess()));\r
632         \r
633         // dealloc pd\r
634         err = ibv_dealloc_pd( ib_pd_p );\r
635         if (err) {\r
636                 HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_PD\r
637                         ,("ibv_dealloc_pd failed (%d)\n", err));\r
638                 status = errno_to_iberr(err);\r
639                 goto err_dealloc_pd;\r
640         }\r
641         status = IB_SUCCESS;\r
642 \r
643 err_dealloc_pd:\r
644         if (status != IB_SUCCESS) \r
645         {\r
646                         HCA_PRINT(TRACE_LEVEL_ERROR, HCA_DBG_PD\r
647                 ,("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
648         }\r
649         HCA_EXIT(HCA_DBG_PD);\r
650         return status;\r
651 }\r
652 \r
653 /* \r
654 * Address Vector Management Verbs\r
655 */\r
656 ib_api_status_t\r
657 mlnx_create_av (\r
658         IN              const   ib_pd_handle_t                          h_pd,\r
659         IN              const   ib_av_attr_t                            *p_addr_vector,\r
660                 OUT                     ib_av_handle_t                          *ph_av,\r
661         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
662 {\r
663         int err = 0;\r
664         ib_api_status_t         status = IB_SUCCESS;\r
665         struct ib_pd *ib_pd_p = (struct ib_pd *)h_pd;\r
666         struct ib_device *ib_dev = ib_pd_p->device;\r
667         struct ib_ah *ib_av_p;\r
668         struct ib_ah_attr ah_attr;\r
669         struct ib_ucontext *p_context = NULL;\r
670 \r
671         HCA_ENTER(HCA_DBG_AV);\r
672 \r
673         if( p_umv_buf && p_umv_buf->command ) {\r
674                 // sanity checks \r
675                 if (p_umv_buf->input_size < sizeof(struct ibv_create_ah) ||\r
676                         p_umv_buf->output_size < sizeof(struct ibv_create_ah_resp) ||\r
677                         !p_umv_buf->p_inout_buf) {\r
678                         status = IB_INVALID_PARAMETER;\r
679                         goto err_inval_params;\r
680                 }\r
681                 p_context = ib_pd_p->ucontext;\r
682         }\r
683         else \r
684                 p_context = NULL;\r
685 \r
686         // fill parameters \r
687         RtlZeroMemory(&ah_attr, sizeof(ah_attr));\r
688         mlnx_conv_ibal_av( ib_dev, p_addr_vector,  &ah_attr );\r
689 \r
690         ib_av_p = ibv_create_ah(ib_pd_p, &ah_attr, p_context, p_umv_buf);\r
691         if (IS_ERR(ib_av_p)) {\r
692                 err = PTR_ERR(ib_av_p);\r
693                 HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_AV,\r
694                         ("ibv_create_ah failed (%d)\n", err));\r
695                 status = errno_to_iberr(err);\r
696                 goto err_alloc_av;\r
697         }\r
698 \r
699         // return the result\r
700         if (ph_av) *ph_av = (ib_av_handle_t)ib_av_p;\r
701 \r
702         status = IB_SUCCESS;\r
703 \r
704 err_alloc_av:   \r
705 err_inval_params:\r
706         if (p_umv_buf && p_umv_buf->command) \r
707                 p_umv_buf->status = status;\r
708         if (status != IB_SUCCESS)\r
709         {\r
710                 HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_AV,\r
711                         ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
712         }\r
713         HCA_EXIT(HCA_DBG_AV);\r
714         return status;\r
715 }\r
716 \r
717 ib_api_status_t\r
718 mlnx_query_av (\r
719         IN              const   ib_av_handle_t                          h_av,\r
720                 OUT                     ib_av_attr_t                            *p_addr_vector,\r
721                 OUT                     ib_pd_handle_t                          *ph_pd,\r
722         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
723 {\r
724         int err;\r
725         ib_api_status_t         status = IB_SUCCESS;\r
726         struct ib_ah *ib_ah_p = (struct ib_ah *)h_av;\r
727         PREP_IBDEV_FOR_PRINT(ib_ah_p->device);\r
728 \r
729         HCA_ENTER(HCA_DBG_AV);\r
730 \r
731         // sanity checks\r
732         if( p_umv_buf && p_umv_buf->command ) {\r
733                         HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_AV,\r
734                                 ("User mode is not supported yet\n"));\r
735                         status = IB_UNSUPPORTED;\r
736                         goto err_user_unsupported;\r
737         }\r
738 \r
739         // query AV\r
740 #ifdef WIN_TO_BE_CHANGED\r
741         //TODO: not implemented in low-level driver\r
742         err = ibv_query_ah(ib_ah_p, &ah_attr)\r
743         if (err) {\r
744                 HCA_PRINT(TRACE_LEVEL_ERROR, HCA_DBG_AV,\r
745                         ("ibv_query_ah failed (%d)\n", err));\r
746                 status = errno_to_iberr(err);\r
747                 goto err_query_ah;\r
748         }\r
749         // convert to IBAL structure: something like that\r
750         mlnx_conv_mthca_av( p_addr_vector,  &ah_attr );\r
751 #else\r
752 \r
753         err = mlnx_conv_mthca_av( ib_ah_p, p_addr_vector );\r
754         if (err) {\r
755                 HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_AV,\r
756                         ("mlnx_conv_mthca_av failed (%d)\n", err));\r
757                 status = errno_to_iberr(err);\r
758                 goto err_conv_mthca_av;\r
759         }\r
760 #endif\r
761 \r
762         // results\r
763         *ph_pd = (ib_pd_handle_t)ib_ah_p->pd;\r
764         \r
765 err_conv_mthca_av:\r
766 err_user_unsupported:\r
767         if (status != IB_SUCCESS)\r
768         {\r
769                 HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_AV,\r
770                         ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
771         }\r
772         HCA_EXIT(HCA_DBG_AV);\r
773         return status;\r
774 }\r
775 \r
776 ib_api_status_t\r
777 mlnx_modify_av (\r
778         IN              const   ib_av_handle_t                          h_av,\r
779         IN              const   ib_av_attr_t                            *p_addr_vector,\r
780         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
781 {\r
782         struct ib_ah_attr ah_attr;\r
783         ib_api_status_t         status = IB_SUCCESS;\r
784         struct ib_ah *ib_ah_p = (struct ib_ah *)h_av;\r
785         struct ib_device *ib_dev = ib_ah_p->pd->device;\r
786 \r
787         HCA_ENTER(HCA_DBG_AV);\r
788 \r
789         // sanity checks\r
790         if( p_umv_buf && p_umv_buf->command ) {\r
791                         HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_AV,\r
792                                 ("User mode is not supported yet\n"));\r
793                         status = IB_UNSUPPORTED;\r
794                         goto err_user_unsupported;\r
795         }\r
796 \r
797         // fill parameters \r
798         RtlZeroMemory(&ah_attr, sizeof(ah_attr));\r
799         mlnx_conv_ibal_av( ib_dev, p_addr_vector,  &ah_attr );\r
800 \r
801         // modify AH\r
802 #ifdef WIN_TO_BE_CHANGED\r
803         //TODO: not implemented in low-level driver\r
804         err = ibv_modify_ah(ib_ah_p, &ah_attr)\r
805         if (err) {\r
806                 HCA_PRINT (TRACE_LEVEL_ERROR,HCA_DBG_AV,\r
807                         ("ibv_query_ah failed (%d)\n", err));\r
808                 status = errno_to_iberr(err);\r
809                 goto err_query_ah;\r
810         }\r
811 #else\r
812 \r
813         mlnx_modify_ah( ib_ah_p, &ah_attr );\r
814 #endif\r
815 \r
816 err_user_unsupported:\r
817         if (status != IB_SUCCESS)\r
818         {\r
819                 HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_AV,\r
820                         ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
821         }\r
822         HCA_EXIT(HCA_DBG_AV);\r
823         return status;\r
824 }\r
825 \r
826 ib_api_status_t\r
827 mlnx_destroy_av (\r
828         IN              const   ib_av_handle_t                          h_av)\r
829 {\r
830         int err;\r
831         ib_api_status_t         status = IB_SUCCESS;\r
832         struct ib_ah *ib_ah_p = (struct ib_ah *)h_av;\r
833         PREP_IBDEV_FOR_PRINT(ib_ah_p->device);\r
834 \r
835         HCA_ENTER(HCA_DBG_AV);\r
836 \r
837         // destroy AV\r
838         err = ibv_destroy_ah( ib_ah_p );\r
839         if (err) {\r
840                 HCA_PRINT (TRACE_LEVEL_ERROR ,HCA_DBG_AV,\r
841                         ("ibv_destroy_ah failed (%d)\n", err));\r
842                 status = errno_to_iberr(err);\r
843                 goto err_destroy_ah;\r
844         }\r
845 \r
846 err_destroy_ah:\r
847         if (status != IB_SUCCESS)\r
848         {\r
849                 HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_AV,\r
850                         ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
851         }\r
852         HCA_EXIT(HCA_DBG_AV);\r
853         return status;\r
854 }\r
855 \r
856 /*\r
857 *       Shared Queue Pair Management Verbs\r
858 */\r
859 \r
860 \r
861 ib_api_status_t\r
862 mlnx_create_srq (\r
863         IN              const   ib_pd_handle_t                          h_pd,\r
864         IN              const   void                                            *srq_context,\r
865         IN                              ci_async_event_cb_t             event_handler,\r
866         IN              const   ib_srq_attr_t * const           p_srq_attr,\r
867                 OUT                     ib_srq_handle_t                         *ph_srq,\r
868         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
869 {\r
870         int err;\r
871         ib_api_status_t         status;\r
872         struct ib_srq *ib_srq_p;\r
873         struct ib_srq_init_attr srq_init_attr;\r
874         struct ib_ucontext *p_context = NULL;\r
875         struct ib_pd *ib_pd_p = (struct ib_pd *)h_pd;\r
876         struct ib_device *ib_dev = ib_pd_p->device;\r
877 \r
878         HCA_ENTER(HCA_DBG_SRQ);\r
879 \r
880         if( p_umv_buf  && p_umv_buf->command) {\r
881 \r
882                 // sanity checks \r
883                 if (p_umv_buf->input_size < sizeof(struct ibv_create_srq) ||\r
884                         p_umv_buf->output_size < sizeof(struct ibv_create_srq_resp) ||\r
885                         !p_umv_buf->p_inout_buf) {\r
886                         status = IB_INVALID_PARAMETER;\r
887                         goto err_inval_params;\r
888                 }\r
889                 p_context = ib_pd_p->ucontext;\r
890         }\r
891 \r
892         // prepare the parameters\r
893         RtlZeroMemory(&srq_init_attr, sizeof(srq_init_attr));\r
894         srq_init_attr.event_handler = event_handler;\r
895         srq_init_attr.srq_context = (void*)srq_context;\r
896         srq_init_attr.attr = *p_srq_attr;\r
897 \r
898         // allocate srq \r
899         ib_srq_p = ibv_create_srq(ib_pd_p, &srq_init_attr, p_context, p_umv_buf );\r
900         if (IS_ERR(ib_srq_p)) {\r
901                 err = PTR_ERR(ib_srq_p);\r
902                 HCA_PRINT (TRACE_LEVEL_ERROR ,HCA_DBG_SRQ, ("ibv_create_srq failed (%d)\n", err));\r
903                 status = errno_to_iberr(err);\r
904                 goto err_create_srq;\r
905         }\r
906 \r
907         // return the result\r
908         if (ph_srq) *ph_srq = (ib_srq_handle_t)ib_srq_p;\r
909 \r
910         status = IB_SUCCESS;\r
911         \r
912 err_create_srq:\r
913 err_inval_params:\r
914         if (p_umv_buf && p_umv_buf->command) \r
915                 p_umv_buf->status = status;\r
916         if (status != IB_SUCCESS)\r
917         {\r
918                 HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_SRQ,\r
919                         ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
920         }\r
921         HCA_EXIT(HCA_DBG_SRQ);\r
922         return status;\r
923 }\r
924 \r
925 \r
926 ib_api_status_t\r
927 mlnx_modify_srq (\r
928                 IN              const   ib_srq_handle_t                         h_srq,\r
929                 IN              const   ib_srq_attr_t* const                    p_srq_attr,\r
930                 IN              const   ib_srq_attr_mask_t                      srq_attr_mask,\r
931                 IN      OUT             ci_umv_buf_t                            *p_umv_buf OPTIONAL )\r
932 {\r
933         int err;\r
934         ib_api_status_t         status = IB_SUCCESS;\r
935         struct ib_srq *ib_srq = (struct ib_srq *)h_srq;\r
936         struct ib_device *ib_dev = ib_srq->device;\r
937         UNUSED_PARAM(p_umv_buf);\r
938     UNUSED_PARAM_WOWPP(ib_dev);\r
939 \r
940         HCA_ENTER(HCA_DBG_SRQ);\r
941 \r
942         err = ibv_modify_srq(ib_srq, (void*)p_srq_attr, srq_attr_mask);\r
943         if (err) {\r
944                 HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_AV,\r
945                         ("ibv_modify_srq failed (%d)\n", err));\r
946                 status = errno_to_iberr(err);\r
947         }\r
948 \r
949         if (status != IB_SUCCESS)\r
950         {\r
951                 HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_SRQ,\r
952                         ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
953         }\r
954         HCA_EXIT(HCA_DBG_SRQ);\r
955         return status;\r
956 }\r
957 \r
958 ib_api_status_t\r
959 mlnx_query_srq (\r
960         IN              const   ib_srq_handle_t                         h_srq,\r
961                 OUT                     ib_srq_attr_t* const                    p_srq_attr,\r
962         IN      OUT                     ci_umv_buf_t                            *p_umv_buf OPTIONAL )\r
963 {\r
964         int err;\r
965         ib_api_status_t         status = IB_SUCCESS;\r
966         struct ib_srq *ib_srq = (struct ib_srq *)h_srq;\r
967         struct ib_device *ib_dev = ib_srq->device;\r
968         UNUSED_PARAM(p_umv_buf);\r
969         UNUSED_PARAM_WOWPP(ib_dev);\r
970 \r
971         HCA_ENTER(HCA_DBG_SRQ);\r
972 \r
973         err = ibv_query_srq(ib_srq, p_srq_attr);\r
974         if (err) {\r
975                 HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_AV,\r
976                         ("ibv_query_srq failed (%d)\n", err));\r
977                 status = errno_to_iberr(err);\r
978         }\r
979 \r
980         if (status != IB_SUCCESS)\r
981         {\r
982                 HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_SRQ,\r
983                         ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
984         }\r
985         HCA_EXIT(HCA_DBG_SRQ);\r
986         return status;\r
987 }\r
988 \r
989 ib_api_status_t\r
990 mlnx_destroy_srq (\r
991         IN      const   ib_srq_handle_t         h_srq )\r
992 {\r
993         int err;\r
994         ib_api_status_t         status = IB_SUCCESS;\r
995         struct ib_srq *ib_srq = (struct ib_srq *)h_srq;\r
996         struct ib_device *ib_dev = ib_srq->device;\r
997     UNUSED_PARAM_WOWPP(ib_dev);\r
998 \r
999         HCA_ENTER(HCA_DBG_SRQ);\r
1000 \r
1001         err = ibv_destroy_srq(ib_srq);\r
1002         if (err) {\r
1003                 HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_AV,\r
1004                         ("ibv_destroy_srq failed (%d)\n", err));\r
1005                 status = errno_to_iberr(err);\r
1006         }\r
1007 \r
1008         if (status != IB_SUCCESS)\r
1009         {\r
1010                 HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_SRQ,\r
1011                         ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
1012         }\r
1013         HCA_EXIT(HCA_DBG_SRQ);\r
1014         return status;\r
1015 }\r
1016 \r
1017 /*\r
1018 *       Queue Pair Management Verbs\r
1019 */\r
1020 \r
1021 \r
1022 static ib_api_status_t\r
1023 _create_qp (\r
1024         IN              const   ib_pd_handle_t                          h_pd,\r
1025         IN              const   uint8_t                                         port_num,\r
1026         IN              const   void                                            *qp_context,\r
1027         IN                              ci_async_event_cb_t                     event_handler,\r
1028         IN              const   ib_qp_create_t                          *p_create_attr,\r
1029                 OUT                     ib_qp_attr_t                            *p_qp_attr,\r
1030                 OUT                     ib_qp_handle_t                          *ph_qp,\r
1031         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
1032 {\r
1033         int err;\r
1034         ib_api_status_t         status;\r
1035         struct ib_qp * ib_qp_p;\r
1036         struct mthca_qp *qp_p;\r
1037         struct ib_qp_init_attr qp_init_attr;\r
1038         struct ib_ucontext *p_context = NULL;\r
1039         struct ib_pd *ib_pd_p = (struct ib_pd *)h_pd;\r
1040         struct ib_device *ib_dev = ib_pd_p->device;\r
1041         \r
1042         HCA_ENTER(HCA_DBG_QP);\r
1043 \r
1044         if( p_umv_buf && p_umv_buf->command ) {\r
1045                 // sanity checks \r
1046                 if (p_umv_buf->input_size < sizeof(struct ibv_create_qp) ||\r
1047                         p_umv_buf->output_size < sizeof(struct ibv_create_qp_resp) ||\r
1048                         !p_umv_buf->p_inout_buf) {\r
1049                         status = IB_INVALID_PARAMETER;\r
1050                         goto err_inval_params;\r
1051                 }\r
1052                 p_context = ib_pd_p->ucontext;\r
1053         }\r
1054 \r
1055         // prepare the parameters\r
1056         RtlZeroMemory(&qp_init_attr, sizeof(qp_init_attr));\r
1057         qp_init_attr.qp_type = p_create_attr->qp_type;\r
1058         qp_init_attr.event_handler = event_handler;\r
1059         qp_init_attr.qp_context = (void*)qp_context;\r
1060         qp_init_attr.recv_cq = (struct ib_cq *)p_create_attr->h_rq_cq;\r
1061         qp_init_attr.send_cq = (struct ib_cq *)p_create_attr->h_sq_cq;\r
1062         qp_init_attr.srq = (struct ib_srq *)p_create_attr->h_srq;\r
1063         qp_init_attr.cap.max_recv_sge = p_create_attr->rq_sge;\r
1064         qp_init_attr.cap.max_send_sge = p_create_attr->sq_sge;\r
1065         qp_init_attr.cap.max_recv_wr = p_create_attr->rq_depth;\r
1066         qp_init_attr.cap.max_send_wr = p_create_attr->sq_depth;\r
1067         qp_init_attr.sq_sig_type = (p_create_attr->sq_signaled) ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;\r
1068         qp_init_attr.port_num = port_num;\r
1069 \r
1070 \r
1071         // create qp            \r
1072         ib_qp_p = ibv_create_qp( ib_pd_p, &qp_init_attr, p_context, p_umv_buf );\r
1073         if (IS_ERR(ib_qp_p)) {\r
1074                 err = PTR_ERR(ib_qp_p);\r
1075                 HCA_PRINT(TRACE_LEVEL_ERROR  , HCA_DBG_QP,\r
1076                         ("ibv_create_qp failed (%d)\n", err));\r
1077                 status = errno_to_iberr(err);\r
1078                 goto err_create_qp;\r
1079         }\r
1080 \r
1081         // fill the object\r
1082         qp_p = (struct mthca_qp *)ib_qp_p;\r
1083         qp_p->qp_init_attr = qp_init_attr;\r
1084 \r
1085         // Query QP to obtain requested attributes\r
1086         if (p_qp_attr) {\r
1087                 status = mlnx_query_qp ((ib_qp_handle_t)ib_qp_p, p_qp_attr, p_umv_buf);\r
1088                 if (status != IB_SUCCESS)\r
1089                                 goto err_query_qp;\r
1090         }\r
1091         \r
1092         // return the results\r
1093         if (ph_qp) *ph_qp = (ib_qp_handle_t)ib_qp_p;\r
1094 \r
1095         status = IB_SUCCESS;\r
1096         goto end;\r
1097 \r
1098 err_query_qp:\r
1099         ibv_destroy_qp( ib_qp_p );\r
1100 err_create_qp:\r
1101 err_inval_params:\r
1102 end:\r
1103         if (p_umv_buf && p_umv_buf->command) \r
1104                 p_umv_buf->status = status;\r
1105         if (status != IB_SUCCESS)\r
1106         {\r
1107                 HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_QP,\r
1108                         ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
1109         }\r
1110         HCA_EXIT(HCA_DBG_QP);\r
1111         return status;\r
1112 }\r
1113 \r
1114 ib_api_status_t\r
1115 mlnx_create_spl_qp (\r
1116         IN              const   ib_pd_handle_t                          h_pd,\r
1117         IN              const   uint8_t                                         port_num,\r
1118         IN              const   void                                            *qp_context,\r
1119         IN                              ci_async_event_cb_t                     event_handler,\r
1120         IN              const   ib_qp_create_t                          *p_create_attr,\r
1121                 OUT                     ib_qp_attr_t                            *p_qp_attr,\r
1122                 OUT                     ib_qp_handle_t                          *ph_qp )\r
1123 {\r
1124         ib_api_status_t         status;\r
1125         PREP_IBDEV_FOR_PRINT(((struct ib_pd*)h_pd)->device);\r
1126 \r
1127         HCA_ENTER(HCA_DBG_SHIM);\r
1128 \r
1129         status =        _create_qp( h_pd, port_num,\r
1130                 qp_context, event_handler, p_create_attr, p_qp_attr, ph_qp, NULL );\r
1131                 \r
1132         if (status != IB_SUCCESS)\r
1133         {\r
1134                 HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_QP,\r
1135                         ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
1136         }\r
1137         HCA_EXIT(HCA_DBG_QP);\r
1138         return status;\r
1139 }\r
1140 \r
1141 ib_api_status_t\r
1142 mlnx_create_qp (\r
1143         IN              const   ib_pd_handle_t                          h_pd,\r
1144         IN              const   void                                            *qp_context,\r
1145         IN                              ci_async_event_cb_t                     event_handler,\r
1146         IN              const   ib_qp_create_t                          *p_create_attr,\r
1147                 OUT                     ib_qp_attr_t                            *p_qp_attr,\r
1148                 OUT                     ib_qp_handle_t                          *ph_qp,\r
1149         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
1150 {\r
1151         ib_api_status_t         status;\r
1152         PREP_IBDEV_FOR_PRINT(((struct ib_pd*)h_pd)->device);\r
1153 \r
1154         //NB: algorithm of mthca_alloc_sqp() requires port_num\r
1155         // PRM states, that special pares are created in couples, so\r
1156         // looks like we can put here port_num = 1 always\r
1157         uint8_t port_num = 1;\r
1158 \r
1159         HCA_ENTER(HCA_DBG_QP);\r
1160 \r
1161         status = _create_qp( h_pd, port_num,\r
1162                 qp_context, event_handler, p_create_attr, p_qp_attr, ph_qp, p_umv_buf );\r
1163                 \r
1164         if (status != IB_SUCCESS)\r
1165         {\r
1166                 HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_QP,\r
1167                         ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
1168         }\r
1169         HCA_EXIT(HCA_DBG_QP);\r
1170         return status;\r
1171 }\r
1172 \r
1173 ib_api_status_t\r
1174 mlnx_modify_qp (\r
1175         IN              const   ib_qp_handle_t                          h_qp,\r
1176         IN              const   ib_qp_mod_t                                     *p_modify_attr,\r
1177                 OUT                     ib_qp_attr_t                            *p_qp_attr OPTIONAL,\r
1178         IN      OUT                     ci_umv_buf_t                            *p_umv_buf OPTIONAL )\r
1179 {\r
1180         ib_api_status_t         status;\r
1181         int err;\r
1182         struct ib_qp_attr qp_attr;\r
1183         int qp_attr_mask;\r
1184         struct ib_qp *ib_qp_p = (struct ib_qp *)h_qp;\r
1185         PREP_IBDEV_FOR_PRINT(ib_qp_p->device);\r
1186 \r
1187         HCA_ENTER(HCA_DBG_QP);\r
1188 \r
1189         // sanity checks\r
1190         if( p_umv_buf && p_umv_buf->command ) {\r
1191                 // sanity checks \r
1192                 if (p_umv_buf->output_size < sizeof(struct ibv_modify_qp_resp) ||\r
1193                         !p_umv_buf->p_inout_buf) {\r
1194                         status = IB_INVALID_PARAMETER;\r
1195                         goto err_inval_params;\r
1196                 }\r
1197         }\r
1198         \r
1199         // fill parameters \r
1200         status = mlnx_conv_qp_modify_attr( ib_qp_p, ib_qp_p->qp_type, \r
1201                 p_modify_attr,  &qp_attr, &qp_attr_mask );\r
1202         if (status == IB_NOT_DONE)\r
1203                 goto query_qp;\r
1204         if (status != IB_SUCCESS ) \r
1205                 goto err_mode_unsupported;\r
1206 \r
1207         // modify QP\r
1208         err = ibv_modify_qp(ib_qp_p, &qp_attr, qp_attr_mask);\r
1209         if (err) {\r
1210                 HCA_PRINT(TRACE_LEVEL_ERROR, HCA_DBG_QP,\r
1211                         ("ibv_modify_qp failed (%d)\n", err));\r
1212                 status = errno_to_iberr(err);\r
1213                 goto err_modify_qp;\r
1214         }\r
1215 \r
1216         // Query QP to obtain requested attributes\r
1217 query_qp:       \r
1218         if (p_qp_attr) {\r
1219                 status = mlnx_query_qp ((ib_qp_handle_t)ib_qp_p, p_qp_attr, p_umv_buf);\r
1220                 if (status != IB_SUCCESS)\r
1221                                 goto err_query_qp;\r
1222         }\r
1223         \r
1224         if( p_umv_buf && p_umv_buf->command ) {\r
1225                         struct ibv_modify_qp_resp resp;\r
1226                         resp.attr_mask = qp_attr_mask;\r
1227                         resp.qp_state = qp_attr.qp_state;\r
1228                         err = ib_copy_to_umv_buf(p_umv_buf, &resp, sizeof(struct ibv_modify_qp_resp));\r
1229                         if (err) {\r
1230                                 HCA_PRINT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM  ,("ib_copy_to_umv_buf failed (%d)\n", err));\r
1231                                 status = errno_to_iberr(err);\r
1232                                 goto err_copy;\r
1233                         }\r
1234         }\r
1235 \r
1236         status = IB_SUCCESS;\r
1237 \r
1238 err_copy:       \r
1239 err_query_qp:\r
1240 err_modify_qp:  \r
1241 err_mode_unsupported:\r
1242 err_inval_params:\r
1243         if (p_umv_buf && p_umv_buf->command) \r
1244                 p_umv_buf->status = status;\r
1245         if (status != IB_SUCCESS)\r
1246         {\r
1247                 HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_QP,\r
1248                         ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
1249         }\r
1250         HCA_EXIT(HCA_DBG_QP);\r
1251         return status;\r
1252 }\r
1253 \r
1254 ib_api_status_t\r
1255 mlnx_ndi_modify_qp (\r
1256         IN              const   ib_qp_handle_t                          h_qp,\r
1257         IN              const   ib_qp_mod_t                                     *p_modify_attr,\r
1258                 OUT                     ib_qp_attr_t                            *p_qp_attr OPTIONAL,\r
1259         IN              const   uint32_t                                        buf_size,\r
1260         IN                              uint8_t* const                          p_outbuf)\r
1261 {\r
1262         ci_umv_buf_t umv_buf;\r
1263         ib_api_status_t status;\r
1264         struct ibv_modify_qp_resp resp;\r
1265         void *buf = &resp;\r
1266 \r
1267         HCA_ENTER(HCA_DBG_QP);\r
1268 \r
1269         if (buf_size < sizeof(resp.qp_state)) {\r
1270                 status = IB_INVALID_PARAMETER;\r
1271                 goto out;\r
1272         }\r
1273 \r
1274         /* imitate umv_buf */\r
1275         umv_buf.command = TRUE; /* special case for NDI. Usually it's TRUE */\r
1276         umv_buf.input_size = 0;\r
1277         umv_buf.output_size = sizeof(struct ibv_modify_qp_resp);\r
1278         umv_buf.p_inout_buf = (ULONG_PTR)buf;\r
1279 \r
1280         status = mlnx_modify_qp ( h_qp, p_modify_attr, p_qp_attr, &umv_buf );\r
1281 \r
1282         if (status == IB_SUCCESS) {\r
1283                 cl_memclr( p_outbuf, buf_size );\r
1284                 *p_outbuf = resp.qp_state;\r
1285         }\r
1286 \r
1287 out:\r
1288         HCA_EXIT(HCA_DBG_QP);\r
1289         return status;\r
1290 }\r
1291 \r
1292 \r
1293 \r
1294 ib_api_status_t\r
1295 mlnx_query_qp (\r
1296         IN              const   ib_qp_handle_t                          h_qp,\r
1297                 OUT                     ib_qp_attr_t                            *p_qp_attr,\r
1298         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
1299 {\r
1300         int err;\r
1301         int qp_attr_mask = 0;\r
1302         ib_api_status_t         status = IB_SUCCESS;\r
1303         struct ib_qp *ib_qp_p = (struct ib_qp *)h_qp;\r
1304         struct ib_qp_attr qp_attr;\r
1305         struct ib_qp_init_attr qp_init_attr;\r
1306         struct mthca_qp *qp_p = (struct mthca_qp *)ib_qp_p;\r
1307 \r
1308         UNREFERENCED_PARAMETER(p_umv_buf);\r
1309         \r
1310         HCA_ENTER( HCA_DBG_QP);\r
1311 \r
1312         // sanity checks\r
1313         if (!p_qp_attr) {\r
1314                 status =  IB_INVALID_PARAMETER;\r
1315                 goto err_parm;\r
1316         }\r
1317 \r
1318         memset( &qp_attr, 0, sizeof(struct ib_qp_attr) );\r
1319 \r
1320         if (qp_p->state == IBQPS_RESET) {\r
1321                 // the QP doesn't yet exist in HW - fill what we can fill now\r
1322                 p_qp_attr->h_pd                                 = (ib_pd_handle_t)qp_p->ibqp.pd;\r
1323                 p_qp_attr->qp_type                              = qp_p->ibqp.qp_type;\r
1324                 p_qp_attr->sq_max_inline                = qp_p->qp_init_attr.cap.max_inline_data;\r
1325                 p_qp_attr->sq_depth                             = qp_p->qp_init_attr.cap.max_send_wr;\r
1326                 p_qp_attr->rq_depth                             = qp_p->qp_init_attr.cap.max_recv_wr;\r
1327                 p_qp_attr->sq_sge                               = qp_p->qp_init_attr.cap.max_send_sge;\r
1328                 p_qp_attr->rq_sge                               = qp_p->qp_init_attr.cap.max_recv_sge;\r
1329                 p_qp_attr->resp_res                             = qp_p->resp_depth;\r
1330                 p_qp_attr->h_sq_cq                              = (ib_cq_handle_t)qp_p->ibqp.send_cq;\r
1331                 p_qp_attr->h_rq_cq                              = (ib_cq_handle_t)qp_p->ibqp.recv_cq;\r
1332                 p_qp_attr->sq_signaled                  = qp_p->sq_policy == IB_SIGNAL_ALL_WR;\r
1333                 p_qp_attr->state                                = mlnx_qps_to_ibal( qp_p->state );\r
1334                 p_qp_attr->num                                  = cl_hton32(qp_p->ibqp.qp_num);\r
1335                 p_qp_attr->primary_port                 = qp_p->qp_init_attr.port_num;\r
1336         }\r
1337         else {\r
1338                 //request the info from the card\r
1339                 err = ib_qp_p->device->query_qp( ib_qp_p, &qp_attr, \r
1340                         qp_attr_mask, &qp_init_attr);\r
1341                 if (err){\r
1342                         status = errno_to_iberr(err);\r
1343                         HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_PD,\r
1344                                 ("ib_query_qp failed (%#x)\n", status));\r
1345                         goto err_query_qp;\r
1346                 }\r
1347                 \r
1348                 // convert the results back to IBAL\r
1349                 status = mlnx_conv_qp_attr( ib_qp_p, &qp_attr, p_qp_attr );\r
1350         }\r
1351 \r
1352 err_query_qp:\r
1353 err_parm:\r
1354         HCA_EXIT(HCA_DBG_QP);\r
1355         return status;\r
1356 }\r
1357 \r
1358 ib_api_status_t\r
1359 mlnx_destroy_qp (\r
1360         IN              const   ib_qp_handle_t                          h_qp,\r
1361         IN              const   uint64_t                                        timewait )\r
1362 {\r
1363         ib_api_status_t         status;\r
1364         int err;\r
1365         struct ib_qp *ib_qp_p = (struct ib_qp *)h_qp;\r
1366         PREP_IBDEV_FOR_PRINT(ib_qp_p->device);\r
1367 \r
1368         UNUSED_PARAM( timewait );\r
1369 \r
1370         HCA_ENTER( HCA_DBG_QP);\r
1371 \r
1372         HCA_PRINT(TRACE_LEVEL_INFORMATION       ,HCA_DBG_SHIM  ,\r
1373                 ("qpnum %#x, pcs %p\n", ib_qp_p->qp_num, PsGetCurrentProcess()) );\r
1374 \r
1375         err = ibv_destroy_qp( ib_qp_p );\r
1376         if (err) {\r
1377                 HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP,\r
1378                         ("ibv_destroy_qp failed (%d)\n", err));\r
1379                 status = errno_to_iberr(err);\r
1380                 goto err_destroy_qp;\r
1381         }\r
1382 \r
1383         status = IB_SUCCESS;\r
1384 \r
1385 err_destroy_qp:\r
1386         if (status != IB_SUCCESS)\r
1387         {\r
1388                 HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_QP,\r
1389                         ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
1390         }\r
1391         HCA_EXIT(HCA_DBG_QP);\r
1392         return status;\r
1393 }\r
1394 \r
1395 /*\r
1396 * Completion Queue Managment Verbs.\r
1397 */\r
1398 \r
1399 ib_api_status_t\r
1400 mlnx_create_cq (\r
1401         IN              const   ib_ca_handle_t                          h_ca,\r
1402         IN              const   void                                            *cq_context,\r
1403         IN                              ci_async_event_cb_t                     event_handler,\r
1404         IN                              ci_completion_cb_t                      cq_comp_handler,\r
1405         IN      OUT                     uint32_t                                        *p_size,\r
1406                 OUT                     ib_cq_handle_t                          *ph_cq,\r
1407         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
1408 {\r
1409         int err;\r
1410         ib_api_status_t         status;\r
1411         struct ib_cq *ib_cq_p;\r
1412         mlnx_hob_t                      *hob_p;\r
1413         struct ib_device *ib_dev;\r
1414         struct ib_ucontext *p_context;\r
1415 \r
1416         HCA_ENTER(HCA_DBG_CQ);\r
1417 \r
1418         if( p_umv_buf ) {\r
1419 \r
1420                 p_context = (struct ib_ucontext *)h_ca;\r
1421                 hob_p = HOB_FROM_IBDEV(p_context->device);\r
1422                 ib_dev = p_context->device;\r
1423 \r
1424                 // sanity checks \r
1425                 if (p_umv_buf->input_size < sizeof(struct ibv_create_cq) ||\r
1426                         p_umv_buf->output_size < sizeof(struct ibv_create_cq_resp) ||\r
1427                         !p_umv_buf->p_inout_buf) {\r
1428                         status = IB_INVALID_PARAMETER;\r
1429                         goto err_inval_params;\r
1430                 }\r
1431         }\r
1432         else {\r
1433                 hob_p = (mlnx_hob_t *)h_ca;\r
1434                 p_context = NULL;\r
1435                 ib_dev = IBDEV_FROM_HOB( hob_p );\r
1436         }\r
1437 \r
1438         /* sanity check */\r
1439         if (!*p_size || *p_size > (uint32_t)ib_dev->mdev->limits.max_cqes) {\r
1440                 status = IB_INVALID_CQ_SIZE;\r
1441                 goto err_cqe;\r
1442         }\r
1443 \r
1444         // allocate cq  \r
1445         ib_cq_p = ibv_create_cq(ib_dev, \r
1446                 cq_comp_handler, event_handler,\r
1447                 (void*)cq_context, *p_size, p_context, p_umv_buf );\r
1448         if (IS_ERR(ib_cq_p)) {\r
1449                 err = PTR_ERR(ib_cq_p);\r
1450                 HCA_PRINT (TRACE_LEVEL_ERROR ,HCA_DBG_CQ, ("ibv_create_cq failed (%d)\n", err));\r
1451                 status = errno_to_iberr(err);\r
1452                 goto err_create_cq;\r
1453         }\r
1454 \r
1455         // return the result\r
1456 //      *p_size = *p_size;      // return the same value\r
1457         *p_size = ib_cq_p->cqe;\r
1458 \r
1459         if (ph_cq) *ph_cq = (ib_cq_handle_t)ib_cq_p;\r
1460 \r
1461         status = IB_SUCCESS;\r
1462         \r
1463 err_create_cq:\r
1464 err_inval_params:\r
1465 err_cqe:\r
1466         if (p_umv_buf && p_umv_buf->command) \r
1467                 p_umv_buf->status = status;\r
1468         if (status != IB_SUCCESS)\r
1469         {\r
1470                 HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_CQ,\r
1471                         ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
1472         }\r
1473         HCA_EXIT(HCA_DBG_CQ);\r
1474         return status;\r
1475 }\r
1476 \r
1477 ib_api_status_t\r
1478 mlnx_resize_cq (\r
1479         IN              const   ib_cq_handle_t                          h_cq,\r
1480         IN      OUT                     uint32_t                                        *p_size,\r
1481         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
1482 {\r
1483         UNREFERENCED_PARAMETER(h_cq);\r
1484         UNREFERENCED_PARAMETER(p_size);\r
1485         if (p_umv_buf && p_umv_buf->command) {\r
1486                 p_umv_buf->status = IB_UNSUPPORTED;\r
1487         }\r
1488         HCA_PRINT(TRACE_LEVEL_ERROR, HCA_DBG_CQ,("mlnx_resize_cq not implemented\n"));\r
1489         return IB_UNSUPPORTED;\r
1490 }\r
1491 \r
1492 ib_api_status_t\r
1493 mlnx_query_cq (\r
1494         IN              const   ib_cq_handle_t                          h_cq,\r
1495                 OUT                     uint32_t                                        *p_size,\r
1496         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
1497 {\r
1498         UNREFERENCED_PARAMETER(h_cq);\r
1499         UNREFERENCED_PARAMETER(p_size);\r
1500         if (p_umv_buf && p_umv_buf->command) {\r
1501                 p_umv_buf->status = IB_UNSUPPORTED;\r
1502         }\r
1503         HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_CQ,("mlnx_query_cq not implemented\n"));\r
1504         return IB_UNSUPPORTED;\r
1505 }\r
1506 \r
1507 ib_api_status_t\r
1508 mlnx_destroy_cq (\r
1509         IN              const   ib_cq_handle_t                          h_cq)\r
1510 {\r
1511                                                                                                                                                                 \r
1512         ib_api_status_t         status;\r
1513         int err;\r
1514         struct ib_cq *ib_cq_p = (struct ib_cq *)h_cq;\r
1515         PREP_IBDEV_FOR_PRINT(ib_cq_p->device);\r
1516 \r
1517         HCA_ENTER( HCA_DBG_QP);\r
1518 \r
1519         HCA_PRINT(TRACE_LEVEL_INFORMATION,HCA_DBG_CQ,\r
1520                 ("cqn %#x, pcs %p\n", ((struct mthca_cq*)ib_cq_p)->cqn, PsGetCurrentProcess()) );\r
1521 \r
1522         // destroy CQ\r
1523         err = ibv_destroy_cq( ib_cq_p );\r
1524         if (err) {\r
1525                 HCA_PRINT (TRACE_LEVEL_ERROR ,HCA_DBG_SHIM,\r
1526                         ("ibv_destroy_cq failed (%d)\n", err));\r
1527                 status = errno_to_iberr(err);\r
1528                 goto err_destroy_cq;\r
1529         }\r
1530 \r
1531         status = IB_SUCCESS;\r
1532 \r
1533 err_destroy_cq:\r
1534         if (status != IB_SUCCESS)\r
1535         {\r
1536                 HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_CQ,\r
1537                         ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
1538         }\r
1539         HCA_EXIT(HCA_DBG_CQ);\r
1540         return status;\r
1541 }\r
1542 \r
1543 \r
1544 ib_api_status_t\r
1545 mlnx_local_mad (\r
1546         IN              const   ib_ca_handle_t                          h_ca,\r
1547         IN              const   uint8_t                                         port_num,\r
1548         IN              const   ib_av_attr_t*                                   p_av_attr,\r
1549         IN              const   ib_mad_t                                        *p_mad_in,\r
1550         OUT             ib_mad_t                                        *p_mad_out )\r
1551 {\r
1552         int err;\r
1553         ib_api_status_t         status = IB_SUCCESS;\r
1554         mlnx_hob_t                      *hob_p = (mlnx_hob_t *)h_ca;\r
1555         struct ib_device *ib_dev = IBDEV_FROM_HOB( hob_p );\r
1556         //TODO: do we need use flags (IB_MAD_IGNORE_MKEY, IB_MAD_IGNORE_BKEY) ?\r
1557         int mad_flags = 0;  \r
1558         struct _ib_wc *wc_p = NULL;\r
1559         //TODO: do we need use grh ?\r
1560         struct _ib_grh *grh_p = NULL;\r
1561 \r
1562         HCA_ENTER(HCA_DBG_MAD);\r
1563 \r
1564         // sanity checks\r
1565         if (port_num > 2) {\r
1566                 status = IB_INVALID_PARAMETER;\r
1567                 goto err_port_num;\r
1568         }\r
1569 \r
1570         if (p_av_attr){\r
1571                 wc_p = cl_zalloc(sizeof(struct _ib_wc));\r
1572                 if(!wc_p){\r
1573                         status =  IB_INSUFFICIENT_MEMORY ;\r
1574                         goto err_wc_alloc;\r
1575                 }\r
1576                 //Copy part of the attributes need to fill the mad extended fields in mellanox devices\r
1577                 wc_p->recv.ud.remote_lid = p_av_attr->dlid;\r
1578                 wc_p->recv.ud.remote_sl  = p_av_attr->sl;\r
1579                 wc_p->recv.ud.path_bits  = p_av_attr->path_bits;\r
1580                 wc_p->recv.ud.recv_opt = p_av_attr->grh_valid?IB_RECV_OPT_GRH_VALID:0;\r
1581 \r
1582                 if(wc_p->recv.ud.recv_opt &IB_RECV_OPT_GRH_VALID){\r
1583                         grh_p = cl_zalloc(sizeof(struct _ib_grh));\r
1584                         if(!grh_p){\r
1585                                 status =  IB_INSUFFICIENT_MEMORY ;\r
1586                                 goto err_grh_alloc;\r
1587                         }\r
1588                         cl_memcpy(grh_p, &p_av_attr->grh, sizeof(ib_grh_t));\r
1589                 }\r
1590                         \r
1591 \r
1592         }\r
1593 \r
1594         HCA_PRINT( TRACE_LEVEL_INFORMATION, HCA_DBG_MAD, \r
1595                 ("MAD: Class %02x, Method %02x, Attr %02x, HopPtr %d, HopCnt %d, \n",\r
1596                 (uint32_t)((ib_smp_t *)p_mad_in)->mgmt_class, \r
1597                 (uint32_t)((ib_smp_t *)p_mad_in)->method, \r
1598                 (uint32_t)((ib_smp_t *)p_mad_in)->attr_id, \r
1599                 (uint32_t)((ib_smp_t *)p_mad_in)->hop_ptr,\r
1600                 (uint32_t)((ib_smp_t *)p_mad_in)->hop_count));\r
1601 \r
1602         \r
1603         // process mad\r
1604         \r
1605         err = mthca_process_mad(ib_dev, mad_flags, (uint8_t)port_num, \r
1606                 wc_p, grh_p, (struct ib_mad*)p_mad_in, (struct ib_mad*)p_mad_out);\r
1607         if (!err) {\r
1608                 HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_MAD, \r
1609                         ("MAD failed:\n\tClass 0x%x\n\tMethod 0x%x\n\tAttr 0x%x",\r
1610                         p_mad_in->mgmt_class, p_mad_in->method, p_mad_in->attr_id ));\r
1611                 status = IB_ERROR;\r
1612                 goto err_process_mad;\r
1613         }\r
1614         \r
1615         if( (p_mad_in->mgmt_class == IB_MCLASS_SUBN_DIR ||\r
1616                 p_mad_in->mgmt_class == IB_MCLASS_SUBN_LID) &&\r
1617                 p_mad_in->attr_id == IB_MAD_ATTR_PORT_INFO )\r
1618         {\r
1619                 ib_port_info_t  *p_pi_in, *p_pi_out;\r
1620 \r
1621                 if( p_mad_in->mgmt_class == IB_MCLASS_SUBN_DIR )\r
1622                 {\r
1623                         p_pi_in = (ib_port_info_t*)\r
1624                                 ib_smp_get_payload_ptr( (ib_smp_t*)p_mad_in );\r
1625                         p_pi_out = (ib_port_info_t*)\r
1626                                 ib_smp_get_payload_ptr( (ib_smp_t*)p_mad_out );\r
1627                 }\r
1628                 else\r
1629                 {\r
1630                         p_pi_in = (ib_port_info_t*)(p_mad_in + 1);\r
1631                         p_pi_out = (ib_port_info_t*)(p_mad_out + 1);\r
1632                 }\r
1633 \r
1634                 /* Work around FW bug 33958 */\r
1635                 p_pi_out->subnet_timeout &= 0x7F;\r
1636                 if( p_mad_in->method == IB_MAD_METHOD_SET )\r
1637                         p_pi_out->subnet_timeout |= (p_pi_in->subnet_timeout & 0x80);\r
1638         }\r
1639 \r
1640         /* Modify direction for Direct MAD */\r
1641         if ( p_mad_in->mgmt_class == IB_MCLASS_SUBN_DIR )\r
1642                 p_mad_out->status |= IB_SMP_DIRECTION;\r
1643 \r
1644 \r
1645 err_process_mad:\r
1646         if(grh_p)\r
1647                 cl_free(grh_p);\r
1648 err_grh_alloc:\r
1649         if(wc_p)\r
1650                 cl_free(wc_p);\r
1651 err_wc_alloc:\r
1652 err_port_num:   \r
1653         if (status != IB_SUCCESS)\r
1654         {\r
1655                 HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_MAD,\r
1656                         ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
1657         }\r
1658         HCA_EXIT(HCA_DBG_MAD);\r
1659         return status;\r
1660 }\r
1661         \r
1662 \r
1663 void\r
1664 setup_ci_interface(\r
1665         IN              const   ib_net64_t                                      ca_guid,\r
1666         IN              const   int                                                     is_livefish,\r
1667         IN      OUT                     ci_interface_t                          *p_interface )\r
1668 {\r
1669         cl_memclr(p_interface, sizeof(*p_interface));\r
1670 \r
1671         /* Guid of the CA. */\r
1672         p_interface->guid = ca_guid;\r
1673 \r
1674         /* Version of this interface. */\r
1675         p_interface->version = VERBS_VERSION;\r
1676 \r
1677         /* UVP name */\r
1678         cl_memcpy( p_interface->libname, mlnx_uvp_lib_name, MAX_LIB_NAME);\r
1679 \r
1680         HCA_PRINT(TRACE_LEVEL_VERBOSE  , HCA_DBG_SHIM  ,("UVP filename %s\n", p_interface->libname));\r
1681 \r
1682         /* The real interface. */\r
1683         p_interface->open_ca = mlnx_open_ca;\r
1684         p_interface->query_ca = mlnx_query_ca;\r
1685         p_interface->close_ca = mlnx_close_ca;\r
1686         p_interface->um_open_ca = mlnx_um_open;\r
1687         p_interface->um_close_ca = mlnx_um_close;\r
1688         p_interface->register_event_handler = mlnx_register_event_handler;\r
1689         p_interface->unregister_event_handler = mlnx_unregister_event_handler;\r
1690 \r
1691         p_interface->allocate_pd = mlnx_allocate_pd;\r
1692         p_interface->deallocate_pd = mlnx_deallocate_pd;\r
1693         p_interface->vendor_call = fw_access_ctrl;\r
1694 \r
1695         if (is_livefish) {\r
1696                 mlnx_memory_if_livefish(p_interface);\r
1697         }\r
1698         else {  \r
1699                 p_interface->modify_ca = mlnx_modify_ca; \r
1700                 \r
1701                 p_interface->create_av = mlnx_create_av;\r
1702                 p_interface->query_av = mlnx_query_av;\r
1703                 p_interface->modify_av = mlnx_modify_av;\r
1704                 p_interface->destroy_av = mlnx_destroy_av;\r
1705 \r
1706                 p_interface->create_srq = mlnx_create_srq;\r
1707                 p_interface->modify_srq = mlnx_modify_srq;\r
1708                 p_interface->query_srq = mlnx_query_srq;\r
1709                 p_interface->destroy_srq = mlnx_destroy_srq;\r
1710 \r
1711                 p_interface->create_qp = mlnx_create_qp;\r
1712                 p_interface->create_spl_qp = mlnx_create_spl_qp;\r
1713                 p_interface->modify_qp = mlnx_modify_qp;\r
1714                 p_interface->ndi_modify_qp = mlnx_ndi_modify_qp;\r
1715                 p_interface->query_qp = mlnx_query_qp;\r
1716                 p_interface->destroy_qp = mlnx_destroy_qp;\r
1717 \r
1718                 p_interface->create_cq = mlnx_create_cq;\r
1719                 p_interface->resize_cq = mlnx_resize_cq;\r
1720                 p_interface->query_cq = mlnx_query_cq;\r
1721                 p_interface->destroy_cq = mlnx_destroy_cq;\r
1722 \r
1723                 p_interface->local_mad = mlnx_local_mad;\r
1724                 \r
1725 \r
1726                 mlnx_memory_if(p_interface);\r
1727                 mlnx_direct_if(p_interface);\r
1728                 mlnx_mcast_if(p_interface);\r
1729         }\r
1730 \r
1731         return;\r
1732 }\r
1733 \r
1734 \r