[IBAL, HW] Remove pointers from ci_umv_buf_t.
[mirror/winof/.git] / hw / mthca / kernel / hca_verbs.c
1 /*\r
2  * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.\r
3  * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. \r
4  * Portions Copyright (c) 2008 Microsoft Corporation.  All rights reserved.\r
5  *\r
6  * This software is available to you under the OpenIB.org BSD license\r
7  * below:\r
8  *\r
9  *     Redistribution and use in source and binary forms, with or\r
10  *     without modification, are permitted provided that the following\r
11  *     conditions are met:\r
12  *\r
13  *      - Redistributions of source code must retain the above\r
14  *        copyright notice, this list of conditions and the following\r
15  *        disclaimer.\r
16  *\r
17  *      - Redistributions in binary form must reproduce the above\r
18  *        copyright notice, this list of conditions and the following\r
19  *        disclaimer in the documentation and/or other materials\r
20  *        provided with the distribution.\r
21  *\r
22  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
23  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
24  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
25  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
26  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
27  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
28  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
29  * SOFTWARE.\r
30  *\r
31  * $Id$\r
32  */\r
33 \r
34 \r
35 #include "hca_driver.h"\r
36 #if defined(EVENT_TRACING)\r
37 #ifdef offsetof\r
38 #undef offsetof\r
39 #endif\r
40 #include "hca_verbs.tmh"\r
41 #endif\r
42 #include "mthca_dev.h"\r
43 #include "ib_cache.h"\r
44 #include "mx_abi.h"\r
45 #include "mt_pa_cash.h"\r
46 \r
47 \r
48 \r
49 // Local declarations\r
50 ib_api_status_t\r
51 mlnx_query_qp (\r
52         IN              const   ib_qp_handle_t                          h_qp,\r
53                 OUT                     ib_qp_attr_t                            *p_qp_attr,\r
54         IN      OUT                     ci_umv_buf_t                            *p_umv_buf );\r
55 \r
56 /* \r
57 * CA Access Verbs\r
58 */\r
59 ib_api_status_t\r
60 mlnx_open_ca (\r
61         IN              const   ib_net64_t                                      ca_guid, // IN  const char *                ca_name,\r
62         IN              const   ci_completion_cb_t                      pfn_completion_cb,\r
63         IN              const   ci_async_event_cb_t                     pfn_async_event_cb,\r
64         IN              const   void*const                                      ca_context,\r
65                 OUT                     ib_ca_handle_t                          *ph_ca)\r
66 {\r
67         mlnx_hca_t                              *p_hca;\r
68         ib_api_status_t status = IB_NOT_FOUND;\r
69         struct ib_device *ib_dev;\r
70 \r
71         HCA_ENTER(HCA_DBG_SHIM);\r
72         HCA_PRINT(TRACE_LEVEL_INFORMATION  ,HCA_DBG_SHIM,\r
73                 ("context 0x%p\n", ca_context));\r
74 \r
75         // find CA object\r
76         p_hca = mlnx_hca_from_guid( ca_guid );\r
77         if( !p_hca ) {\r
78                 if (status != IB_SUCCESS) \r
79                 {\r
80                         HCA_PRINT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM,\r
81                         ("completes with ERROR status IB_NOT_FOUND\n"));\r
82                 }\r
83                 HCA_EXIT(HCA_DBG_SHIM);\r
84                 return IB_NOT_FOUND;\r
85         }\r
86 \r
87         ib_dev = &p_hca->mdev->ib_dev;\r
88 \r
89         if (mthca_is_livefish(p_hca->mdev)) \r
90                 goto done;\r
91 \r
92         HCA_PRINT(TRACE_LEVEL_INFORMATION  ,HCA_DBG_SHIM,\r
93                 ("context 0x%p\n", ca_context));\r
94         status = mlnx_hobs_set_cb(&p_hca->hob,\r
95                 pfn_completion_cb,\r
96                 pfn_async_event_cb,\r
97                 ca_context);\r
98         if (IB_SUCCESS != status) {\r
99                 goto err_set_cb;\r
100         }\r
101 \r
102         \r
103         //TODO: do we need something for kernel users ?\r
104 \r
105         // Return pointer to HOB object\r
106 done:   \r
107         if (ph_ca) *ph_ca = &p_hca->hob;\r
108         status =  IB_SUCCESS;\r
109 \r
110 //err_mad_cache:\r
111 err_set_cb:\r
112         if (status != IB_SUCCESS)\r
113         {\r
114                 HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_SHIM,\r
115                         ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
116         }\r
117         HCA_EXIT(HCA_DBG_SHIM);\r
118         return status;\r
119 }\r
120 \r
121 ib_api_status_t\r
122 mlnx_query_ca (\r
123         IN              const   ib_ca_handle_t                          h_ca,\r
124                 OUT                     ib_ca_attr_t                            *p_ca_attr,\r
125         IN      OUT                     uint32_t                                        *p_byte_count,\r
126         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
127 {\r
128         ib_api_status_t         status;\r
129         uint32_t                        size, required_size;\r
130         uint8_t                 port_num, num_ports;\r
131         uint32_t                        num_gids, num_pkeys;\r
132         uint32_t                        num_page_sizes = 1; // TBD: what is actually supported\r
133         uint8_t                         *last_p;\r
134         struct ib_device_attr props;\r
135         struct ib_port_attr  *hca_ports = NULL;\r
136         int i;\r
137         \r
138         mlnx_hob_t                      *hob_p = (mlnx_hob_t *)h_ca;\r
139         struct ib_device *ib_dev = IBDEV_FROM_HOB( hob_p );\r
140         int err;\r
141         \r
142         HCA_ENTER(HCA_DBG_SHIM);\r
143 \r
144         // sanity checks\r
145         if( p_umv_buf && p_umv_buf->command ) {\r
146                         HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_SHIM ,("User mode is not supported yet\n"));\r
147                         p_umv_buf->status = status = IB_UNSUPPORTED;\r
148                         goto err_user_unsupported;\r
149         }\r
150 \r
151         if( !cl_is_blockable() ) {\r
152                         status = IB_UNSUPPORTED;\r
153                         goto err_unsupported;\r
154         }\r
155 \r
156         if (NULL == p_byte_count) {\r
157                 status = IB_INVALID_PARAMETER;\r
158                 goto err_byte_count;\r
159         }\r
160 \r
161         // query the device\r
162         err = mthca_query_device(ib_dev, &props );\r
163         if (err) {\r
164                 HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_SHIM, \r
165                         ("ib_query_device failed (%d)\n",err));\r
166                 status = errno_to_iberr(err);\r
167                 goto err_query_device;\r
168         }\r
169         \r
170         // alocate arrary for port properties\r
171         num_ports = ib_dev->phys_port_cnt;   /* Number of physical ports of the HCA */             \r
172         if (NULL == (hca_ports = cl_zalloc( num_ports * sizeof *hca_ports))) {\r
173                 HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_SHIM, ("Failed to cl_zalloc ports array\n"));\r
174                 status = IB_INSUFFICIENT_MEMORY;\r
175                 goto err_alloc_ports;\r
176         }\r
177 \r
178         // start calculation of ib_ca_attr_t full size\r
179         num_gids = 0;\r
180         num_pkeys = 0;\r
181         required_size = PTR_ALIGN(sizeof(ib_ca_attr_t)) +\r
182                 PTR_ALIGN(sizeof(uint32_t) * num_page_sizes) +\r
183                 PTR_ALIGN(sizeof(ib_port_attr_t) * num_ports)+\r
184                 PTR_ALIGN(MTHCA_BOARD_ID_LEN)+\r
185                 PTR_ALIGN(sizeof(uplink_info_t));       /* uplink info */\r
186         \r
187         // get port properties\r
188         for (port_num = 0; port_num <= end_port(ib_dev) - start_port(ib_dev); ++port_num) {\r
189                 // request\r
190                 err = mthca_query_port(ib_dev, port_num + start_port(ib_dev), &hca_ports[port_num]);\r
191                 if (err) {\r
192                         HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_SHIM, ("ib_query_port failed(%d) for port %d\n",err, port_num));\r
193                         status = errno_to_iberr(err);\r
194                         goto err_query_port;\r
195                 }\r
196 \r
197                 // calculate GID table size\r
198                 num_gids  = hca_ports[port_num].gid_tbl_len;\r
199                 size = PTR_ALIGN(sizeof(ib_gid_t)  * num_gids);\r
200                 required_size += size;\r
201 \r
202                 // calculate pkeys table size\r
203                 num_pkeys = hca_ports[port_num].pkey_tbl_len;\r
204                 size = PTR_ALIGN(sizeof(uint16_t) * num_pkeys);\r
205                 required_size += size;\r
206         }\r
207 \r
208         // resource sufficience check\r
209         if (NULL == p_ca_attr || *p_byte_count < required_size) {\r
210                 *p_byte_count = required_size;\r
211                 status = IB_INSUFFICIENT_MEMORY;\r
212                 if ( p_ca_attr != NULL) {\r
213                         HCA_PRINT (TRACE_LEVEL_ERROR,HCA_DBG_SHIM, \r
214                                 ("Failed *p_byte_count (%d) < required_size (%d)\n", *p_byte_count, required_size ));\r
215                 }\r
216                 goto err_insuff_mem;\r
217         }\r
218 \r
219         // Space is sufficient - setup table pointers\r
220         last_p = (uint8_t*)p_ca_attr;\r
221         last_p += PTR_ALIGN(sizeof(*p_ca_attr));\r
222 \r
223         p_ca_attr->p_page_size = (uint32_t*)last_p;\r
224         last_p += PTR_ALIGN(num_page_sizes * sizeof(uint32_t));\r
225 \r
226         p_ca_attr->p_port_attr = (ib_port_attr_t *)last_p;\r
227         last_p += PTR_ALIGN(num_ports * sizeof(ib_port_attr_t));\r
228 \r
229         for (port_num = 0; port_num < num_ports; port_num++) {\r
230                 p_ca_attr->p_port_attr[port_num].p_gid_table = (ib_gid_t *)last_p;\r
231                 size = PTR_ALIGN(sizeof(ib_gid_t) * hca_ports[port_num].gid_tbl_len);\r
232                 last_p += size;\r
233 \r
234                 p_ca_attr->p_port_attr[port_num].p_pkey_table = (uint16_t *)last_p;\r
235                 size = PTR_ALIGN(sizeof(uint16_t) * hca_ports[port_num].pkey_tbl_len);\r
236                 last_p += size;\r
237         }\r
238         \r
239         //copy vendor specific data\r
240         cl_memcpy(last_p,to_mdev(ib_dev)->board_id, MTHCA_BOARD_ID_LEN);\r
241         last_p += PTR_ALIGN(MTHCA_BOARD_ID_LEN);\r
242         *(uplink_info_t*)last_p = to_mdev(ib_dev)->uplink_info;\r
243         last_p += PTR_ALIGN(sizeof(uplink_info_t));     /* uplink info */\r
244         \r
245         // Separate the loops to ensure that table pointers are always setup\r
246         for (port_num = 0; port_num < num_ports; port_num++) {\r
247 \r
248                 // get pkeys, using cache\r
249                 for (i=0; i < hca_ports[port_num].pkey_tbl_len; ++i) {\r
250                         err = ib_get_cached_pkey( ib_dev, port_num + start_port(ib_dev), i,\r
251                                 &p_ca_attr->p_port_attr[port_num].p_pkey_table[i] );\r
252                         if (err) {\r
253                                 status = errno_to_iberr(err);\r
254                                 HCA_PRINT (TRACE_LEVEL_ERROR,HCA_DBG_SHIM, \r
255                                         ("ib_get_cached_pkey failed (%d) for port_num %d, index %d\n",\r
256                                         err, port_num + start_port(ib_dev), i));\r
257                                 goto err_get_pkey;\r
258                         }\r
259                 }\r
260                 \r
261                 // get gids, using cache\r
262                 for (i=0; i < hca_ports[port_num].gid_tbl_len; ++i) {\r
263                         union ib_gid * gid = (union ib_gid *)&p_ca_attr->p_port_attr[port_num].p_gid_table[i];\r
264                         err = ib_get_cached_gid( ib_dev, port_num + start_port(ib_dev), i, (union ib_gid *)gid );\r
265                         //TODO: do we need to convert gids to little endian\r
266                         if (err) {\r
267                                 status = errno_to_iberr(err);\r
268                                 HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_SHIM, \r
269                                         ("ib_get_cached_gid failed (%d) for port_num %d, index %d\n",\r
270                                         err, port_num + start_port(ib_dev), i));\r
271                                 goto err_get_gid;\r
272                         }\r
273                 }\r
274 \r
275                 HCA_PRINT(TRACE_LEVEL_VERBOSE, HCA_DBG_SHIM,("port %d gid0:\n", port_num));\r
276                 HCA_PRINT(TRACE_LEVEL_VERBOSE, HCA_DBG_SHIM,\r
277                         (" 0x%x%x%x%x%x%x%x%x-0x%x%x%x%x%x%x%x%x\n", \r
278                         p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[0],\r
279                         p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[1],\r
280                         p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[2],\r
281                         p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[3],\r
282                         p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[4],\r
283                         p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[5],\r
284                         p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[6],\r
285                         p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[7],\r
286                         p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[8],\r
287                         p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[9],\r
288                         p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[10],\r
289                         p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[11],\r
290                         p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[12],\r
291                         p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[13],\r
292                         p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[14],\r
293                         p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[15]));\r
294         }\r
295 \r
296         // set result size\r
297         p_ca_attr->size = required_size;\r
298         CL_ASSERT( required_size == (((uintn_t)last_p) - ((uintn_t)p_ca_attr)) );\r
299         HCA_PRINT(TRACE_LEVEL_VERBOSE, HCA_DBG_SHIM , ("Space required %d used %d\n",\r
300                 required_size, (int)((uintn_t)last_p - (uintn_t)p_ca_attr) ));\r
301         \r
302         // !!! GID/PKEY tables must be queried before this call !!!\r
303         mlnx_conv_hca_cap(ib_dev, &props, hca_ports, p_ca_attr);\r
304 \r
305         status = IB_SUCCESS;\r
306 \r
307 err_get_gid:\r
308 err_get_pkey:\r
309 err_insuff_mem:\r
310 err_query_port:\r
311         cl_free(hca_ports);\r
312 err_alloc_ports:\r
313 err_query_device:\r
314 err_byte_count: \r
315 err_unsupported:\r
316 err_user_unsupported:\r
317         if( status != IB_INSUFFICIENT_MEMORY && status != IB_SUCCESS )\r
318                 HCA_PRINT(TRACE_LEVEL_ERROR, HCA_DBG_SHIM,\r
319                         ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
320         HCA_EXIT(HCA_DBG_SHIM);\r
321         return status;\r
322 }\r
323 \r
324 ib_api_status_t\r
325 mlnx_modify_ca (\r
326         IN              const   ib_ca_handle_t                          h_ca,\r
327         IN              const   uint8_t                                         port_num,\r
328         IN              const   ib_ca_mod_t                                     modca_cmd,\r
329         IN              const   ib_port_attr_mod_t                      *p_port_attr)\r
330 {\r
331 #define SET_CAP_MOD(al_mask, al_fld, ib)                \\r
332                 if (modca_cmd & al_mask) {      \\r
333                         if (p_port_attr->cap.##al_fld)          \\r
334                                 props.set_port_cap_mask |= ib;  \\r
335                         else            \\r
336                                 props.clr_port_cap_mask |= ib;  \\r
337                 }\r
338 \r
339         ib_api_status_t status;\r
340         int err;\r
341         struct ib_port_modify props;\r
342         int port_modify_mask = 0;\r
343         mlnx_hob_t                      *hob_p = (mlnx_hob_t *)h_ca;\r
344         struct ib_device *ib_dev = IBDEV_FROM_HOB( hob_p );\r
345 \r
346         HCA_ENTER(HCA_DBG_SHIM);\r
347 \r
348         //sanity check\r
349         if( !cl_is_blockable() ) {\r
350                         status = IB_UNSUPPORTED;\r
351                         goto err_unsupported;\r
352         }\r
353         \r
354         if (port_num < start_port(ib_dev) || port_num > end_port(ib_dev)) {\r
355                 status = IB_INVALID_PORT;\r
356                 goto err_port;\r
357         }\r
358 \r
359         // prepare parameters\r
360         RtlZeroMemory(&props, sizeof(props));\r
361         SET_CAP_MOD(IB_CA_MOD_IS_SM, sm, IB_PORT_SM);\r
362         SET_CAP_MOD(IB_CA_MOD_IS_SNMP_SUPPORTED, snmp, IB_PORT_SNMP_TUNNEL_SUP);\r
363         SET_CAP_MOD(IB_CA_MOD_IS_DEV_MGMT_SUPPORTED, dev_mgmt, IB_PORT_DEVICE_MGMT_SUP);\r
364         SET_CAP_MOD(IB_CA_MOD_IS_VEND_SUPPORTED, vend, IB_PORT_VENDOR_CLASS_SUP);\r
365         if ((modca_cmd & IB_CA_MOD_QKEY_CTR) && (p_port_attr->qkey_ctr == 0)) \r
366                 port_modify_mask |= IB_PORT_RESET_QKEY_CNTR;\r
367         \r
368         // modify port\r
369         err = mthca_modify_port(ib_dev, port_num, port_modify_mask, &props );\r
370         if (err) {\r
371                 status = errno_to_iberr(err);\r
372                 HCA_PRINT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM  ,("mthca_modify_port failed (%d) \n",err));\r
373                 goto err_modify_port;\r
374         }\r
375 \r
376         status =        IB_SUCCESS;\r
377 \r
378 err_modify_port:\r
379 err_port:\r
380 err_unsupported:\r
381         if (status != IB_SUCCESS)\r
382         {\r
383                 HCA_PRINT(TRACE_LEVEL_ERROR, HCA_DBG_SHIM,\r
384                         ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
385         }\r
386         HCA_EXIT(HCA_DBG_SHIM);\r
387         return status;\r
388 }\r
389 \r
390 ib_api_status_t\r
391 mlnx_close_ca (\r
392         IN                              ib_ca_handle_t                          h_ca)\r
393 {\r
394         mlnx_hob_t                      *hob_p = (mlnx_hob_t *)h_ca;\r
395         HCA_ENTER(HCA_DBG_SHIM);\r
396 \r
397         if (mthca_is_livefish(MDEV_FROM_HOB( hob_p ))) \r
398                 goto done;\r
399 \r
400         mlnx_hobs_remove(h_ca);\r
401 \r
402 done:\r
403         HCA_EXIT(HCA_DBG_SHIM);\r
404         \r
405         return IB_SUCCESS;\r
406 }\r
407 \r
408 \r
409 static ib_api_status_t\r
410 mlnx_um_open(\r
411         IN              const   ib_ca_handle_t                          h_ca,\r
412         IN      OUT                     ci_umv_buf_t* const                     p_umv_buf,\r
413                 OUT                     ib_ca_handle_t* const           ph_um_ca )\r
414 {\r
415         int err;\r
416         ib_api_status_t         status;\r
417         mlnx_hob_t                      *hob_p = (mlnx_hob_t *)h_ca;\r
418         hca_dev_ext_t *ext_p = EXT_FROM_HOB( hob_p );\r
419         struct ib_device *ib_dev = IBDEV_FROM_HOB( hob_p );\r
420         struct ib_ucontext *p_context;\r
421         struct ibv_get_context_resp *uresp_p;\r
422         struct ibv_alloc_pd_resp resp;\r
423         ci_umv_buf_t umv_buf;\r
424 \r
425         HCA_ENTER(HCA_DBG_SHIM);\r
426 \r
427         // sanity check\r
428         ASSERT( p_umv_buf );\r
429         if( !p_umv_buf->command )\r
430         {\r
431                 p_context = cl_zalloc( sizeof(struct ib_ucontext) );\r
432                 if( !p_context )\r
433                 {\r
434                         status = IB_INSUFFICIENT_MEMORY;\r
435                         goto err_alloc_ucontext;\r
436                 }\r
437                 /* Copy the dev info. */\r
438                 p_context->device = ib_dev;\r
439                 p_umv_buf->output_size = 0;\r
440                 goto done;\r
441         }\r
442 \r
443         // create user context in kernel\r
444         p_context = mthca_alloc_ucontext(ib_dev, p_umv_buf);\r
445         if (IS_ERR(p_context)) {\r
446                 err = PTR_ERR(p_context);\r
447                 HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_SHIM,\r
448                         ("mthca_alloc_ucontext failed (%d)\n", err));\r
449                 status = errno_to_iberr(err);\r
450                 goto err_alloc_ucontext;\r
451         }\r
452 \r
453         /* allocate pd */\r
454         umv_buf.command = 1;\r
455         umv_buf.input_size = umv_buf.status = 0;\r
456         umv_buf.output_size = sizeof(struct ibv_alloc_pd_resp);\r
457         umv_buf.p_inout_buf = (ULONG_PTR)&resp;\r
458         //NB: Pay attention ! Ucontext parameter is important here:\r
459         // when it is present (i.e. - for user space) - mthca_alloc_pd won't create MR\r
460         p_context->pd = ibv_alloc_pd(ib_dev, p_context, &umv_buf);\r
461         if (IS_ERR(p_context->pd)) {\r
462                 err = PTR_ERR(p_context->pd);\r
463                 HCA_PRINT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM,\r
464                         ("ibv_alloc_pd failed (%d)\n", err));\r
465                 status = errno_to_iberr(err);\r
466                 goto err_alloc_pd;\r
467         }\r
468         \r
469         // fill more parameters for user (sanity checks are in mthca_alloc_ucontext)\r
470         uresp_p = (struct ibv_get_context_resp *)(ULONG_PTR)p_umv_buf->p_inout_buf;\r
471         uresp_p->uar_addr = (uint64_t)(UINT_PTR)p_context->user_uar;\r
472         uresp_p->pd_handle = resp.pd_handle;\r
473         uresp_p->pdn = resp.pdn;\r
474         uresp_p->vend_id = (uint32_t)ext_p->hcaConfig.VendorID;\r
475         uresp_p->dev_id = (uint16_t)ext_p->hcaConfig.DeviceID;\r
476 \r
477 done:\r
478         // some more inits\r
479         p_context->va = p_context->p_mdl = NULL;\r
480         p_context->fw_if_open = FALSE;\r
481         KeInitializeMutex( &p_context->mutex, 0 );\r
482         // chain user context to the device\r
483         cl_spinlock_acquire( &ext_p->uctx_lock );\r
484         cl_qlist_insert_tail( &ext_p->uctx_list, &p_context->list_item );\r
485         cl_atomic_inc(&ext_p->usecnt);\r
486         cl_spinlock_release( &ext_p->uctx_lock );\r
487         \r
488         // return the result\r
489         if (ph_um_ca) *ph_um_ca = (ib_ca_handle_t)p_context;\r
490 \r
491         status = IB_SUCCESS;\r
492         goto end;\r
493         \r
494 err_alloc_pd:\r
495         mthca_dealloc_ucontext(p_context);\r
496 err_alloc_ucontext: \r
497 end:\r
498         if (p_umv_buf && p_umv_buf->command) \r
499                 p_umv_buf->status = status;\r
500         if (status != IB_SUCCESS) \r
501         {\r
502                 HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_SHIM,\r
503                         ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
504         }\r
505         HCA_EXIT(HCA_DBG_SHIM);\r
506         return status;\r
507 }\r
508 \r
509 static void\r
510 mlnx_um_close(\r
511         IN                              ib_ca_handle_t                          h_ca,\r
512         IN                              ib_ca_handle_t                          h_um_ca )\r
513 {\r
514         struct ib_ucontext *p_ucontext = (struct ib_ucontext *)h_um_ca;\r
515         mlnx_hob_t                      *hob_p = (mlnx_hob_t *)h_ca;\r
516         hca_dev_ext_t *ext_p = EXT_FROM_HOB( hob_p );\r
517 \r
518         if (mthca_is_livefish(to_mdev(p_ucontext->device)))\r
519                 goto done;\r
520         unmap_crspace_for_all(p_ucontext);\r
521 done:   \r
522         cl_spinlock_acquire( &ext_p->uctx_lock );\r
523         cl_qlist_remove_item( &ext_p->uctx_list, &p_ucontext->list_item );\r
524         cl_atomic_dec(&ext_p->usecnt);\r
525         cl_spinlock_release( &ext_p->uctx_lock );\r
526         if( !p_ucontext->pd )\r
527                 cl_free( h_um_ca );\r
528         else\r
529                 ibv_um_close(p_ucontext);\r
530         pa_cash_print();\r
531         return;\r
532 }\r
533 \r
534 \r
535 /*\r
536 *    Protection Domain and Reliable Datagram Domain Verbs\r
537 */\r
538 \r
539 ib_api_status_t\r
540 mlnx_allocate_pd (\r
541         IN              const   ib_ca_handle_t                          h_ca,\r
542         IN              const   ib_pd_type_t                            type,\r
543                 OUT                     ib_pd_handle_t                          *ph_pd,\r
544         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
545 {\r
546         ib_api_status_t         status;\r
547         struct ib_device *ib_dev;\r
548         struct ib_ucontext *p_context;\r
549         struct ib_pd *ib_pd_p;\r
550         int err;\r
551 \r
552         //TODO: how are we use it ?\r
553         UNREFERENCED_PARAMETER(type);\r
554         \r
555         HCA_ENTER(HCA_DBG_PD);\r
556 \r
557         if( p_umv_buf ) {\r
558                 p_context = (struct ib_ucontext *)h_ca;\r
559                 ib_dev = p_context->device;\r
560         }\r
561         else {\r
562                 mlnx_hob_t                      *hob_p = (mlnx_hob_t *)h_ca;\r
563                 p_context = NULL;\r
564                 ib_dev = IBDEV_FROM_HOB( hob_p );\r
565         }\r
566         \r
567         // create PD\r
568         ib_pd_p = ibv_alloc_pd(ib_dev, p_context, p_umv_buf);\r
569         if (IS_ERR(ib_pd_p)) {\r
570                 err = PTR_ERR(ib_pd_p);\r
571                 HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_PD,\r
572                         ("ibv_alloc_pd failed (%d)\n", err));\r
573                 status = errno_to_iberr(err);\r
574                 goto err_alloc_pd;\r
575         }\r
576 \r
577         // return the result\r
578         if (ph_pd) *ph_pd = (ib_pd_handle_t)ib_pd_p;\r
579 \r
580         status = IB_SUCCESS;\r
581         \r
582 err_alloc_pd:   \r
583         if (p_umv_buf && p_umv_buf->command) \r
584                 p_umv_buf->status = status;\r
585         if (status != IB_SUCCESS)\r
586         {\r
587                 HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_PD,\r
588                         ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
589         }\r
590         HCA_EXIT(HCA_DBG_PD);\r
591         return status;\r
592 }\r
593 \r
594 ib_api_status_t\r
595 mlnx_deallocate_pd (\r
596         IN                              ib_pd_handle_t                          h_pd)\r
597 {\r
598         ib_api_status_t         status;\r
599         int err;\r
600         struct ib_pd *ib_pd_p = (struct ib_pd *)h_pd;\r
601         PREP_IBDEV_FOR_PRINT(ib_pd_p->device);\r
602 \r
603         HCA_ENTER( HCA_DBG_PD);\r
604 \r
605         HCA_PRINT(TRACE_LEVEL_INFORMATION,HCA_DBG_PD,\r
606                 ("pcs %p\n", PsGetCurrentProcess()));\r
607         \r
608         // dealloc pd\r
609         err = ibv_dealloc_pd( ib_pd_p );\r
610         if (err) {\r
611                 HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_PD\r
612                         ,("ibv_dealloc_pd failed (%d)\n", err));\r
613                 status = errno_to_iberr(err);\r
614                 goto err_dealloc_pd;\r
615         }\r
616         status = IB_SUCCESS;\r
617 \r
618 err_dealloc_pd:\r
619         if (status != IB_SUCCESS) \r
620         {\r
621                         HCA_PRINT(TRACE_LEVEL_ERROR, HCA_DBG_PD\r
622                 ,("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
623         }\r
624         HCA_EXIT(HCA_DBG_PD);\r
625         return status;\r
626 }\r
627 \r
628 /* \r
629 * Address Vector Management Verbs\r
630 */\r
631 ib_api_status_t\r
632 mlnx_create_av (\r
633         IN              const   ib_pd_handle_t                          h_pd,\r
634         IN              const   ib_av_attr_t                            *p_addr_vector,\r
635                 OUT                     ib_av_handle_t                          *ph_av,\r
636         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
637 {\r
638         int err = 0;\r
639         ib_api_status_t         status = IB_SUCCESS;\r
640         struct ib_pd *ib_pd_p = (struct ib_pd *)h_pd;\r
641         struct ib_device *ib_dev = ib_pd_p->device;\r
642         struct ib_ah *ib_av_p;\r
643         struct ib_ah_attr ah_attr;\r
644         struct ib_ucontext *p_context = NULL;\r
645 \r
646         HCA_ENTER(HCA_DBG_AV);\r
647 \r
648         if( p_umv_buf && p_umv_buf->command ) {\r
649                 // sanity checks \r
650                 if (p_umv_buf->input_size < sizeof(struct ibv_create_ah) ||\r
651                         p_umv_buf->output_size < sizeof(struct ibv_create_ah_resp) ||\r
652                         !p_umv_buf->p_inout_buf) {\r
653                         status = IB_INVALID_PARAMETER;\r
654                         goto err_inval_params;\r
655                 }\r
656                 p_context = ib_pd_p->ucontext;\r
657         }\r
658         else \r
659                 p_context = NULL;\r
660 \r
661         // fill parameters \r
662         RtlZeroMemory(&ah_attr, sizeof(ah_attr));\r
663         mlnx_conv_ibal_av( ib_dev, p_addr_vector,  &ah_attr );\r
664 \r
665         ib_av_p = ibv_create_ah(ib_pd_p, &ah_attr, p_context, p_umv_buf);\r
666         if (IS_ERR(ib_av_p)) {\r
667                 err = PTR_ERR(ib_av_p);\r
668                 HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_AV,\r
669                         ("ibv_create_ah failed (%d)\n", err));\r
670                 status = errno_to_iberr(err);\r
671                 goto err_alloc_av;\r
672         }\r
673 \r
674         // return the result\r
675         if (ph_av) *ph_av = (ib_av_handle_t)ib_av_p;\r
676 \r
677         status = IB_SUCCESS;\r
678 \r
679 err_alloc_av:   \r
680 err_inval_params:\r
681         if (p_umv_buf && p_umv_buf->command) \r
682                 p_umv_buf->status = status;\r
683         if (status != IB_SUCCESS)\r
684         {\r
685                 HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_AV,\r
686                         ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
687         }\r
688         HCA_EXIT(HCA_DBG_AV);\r
689         return status;\r
690 }\r
691 \r
692 ib_api_status_t\r
693 mlnx_query_av (\r
694         IN              const   ib_av_handle_t                          h_av,\r
695                 OUT                     ib_av_attr_t                            *p_addr_vector,\r
696                 OUT                     ib_pd_handle_t                          *ph_pd,\r
697         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
698 {\r
699         int err;\r
700         ib_api_status_t         status = IB_SUCCESS;\r
701         struct ib_ah *ib_ah_p = (struct ib_ah *)h_av;\r
702         PREP_IBDEV_FOR_PRINT(ib_ah_p->device);\r
703 \r
704         HCA_ENTER(HCA_DBG_AV);\r
705 \r
706         // sanity checks\r
707         if( p_umv_buf && p_umv_buf->command ) {\r
708                         HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_AV,\r
709                                 ("User mode is not supported yet\n"));\r
710                         status = IB_UNSUPPORTED;\r
711                         goto err_user_unsupported;\r
712         }\r
713 \r
714         // query AV\r
715 #ifdef WIN_TO_BE_CHANGED\r
716         //TODO: not implemented in low-level driver\r
717         err = ibv_query_ah(ib_ah_p, &ah_attr)\r
718         if (err) {\r
719                 HCA_PRINT(TRACE_LEVEL_ERROR, HCA_DBG_AV,\r
720                         ("ibv_query_ah failed (%d)\n", err));\r
721                 status = errno_to_iberr(err);\r
722                 goto err_query_ah;\r
723         }\r
724         // convert to IBAL structure: something like that\r
725         mlnx_conv_mthca_av( p_addr_vector,  &ah_attr );\r
726 #else\r
727 \r
728         err = mlnx_conv_mthca_av( ib_ah_p, p_addr_vector );\r
729         if (err) {\r
730                 HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_AV,\r
731                         ("mlnx_conv_mthca_av failed (%d)\n", err));\r
732                 status = errno_to_iberr(err);\r
733                 goto err_conv_mthca_av;\r
734         }\r
735 #endif\r
736 \r
737         // results\r
738         *ph_pd = (ib_pd_handle_t)ib_ah_p->pd;\r
739         \r
740 err_conv_mthca_av:\r
741 err_user_unsupported:\r
742         if (status != IB_SUCCESS)\r
743         {\r
744                 HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_AV,\r
745                         ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
746         }\r
747         HCA_EXIT(HCA_DBG_AV);\r
748         return status;\r
749 }\r
750 \r
751 ib_api_status_t\r
752 mlnx_modify_av (\r
753         IN              const   ib_av_handle_t                          h_av,\r
754         IN              const   ib_av_attr_t                            *p_addr_vector,\r
755         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
756 {\r
757         struct ib_ah_attr ah_attr;\r
758         ib_api_status_t         status = IB_SUCCESS;\r
759         struct ib_ah *ib_ah_p = (struct ib_ah *)h_av;\r
760         struct ib_device *ib_dev = ib_ah_p->pd->device;\r
761 \r
762         HCA_ENTER(HCA_DBG_AV);\r
763 \r
764         // sanity checks\r
765         if( p_umv_buf && p_umv_buf->command ) {\r
766                         HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_AV,\r
767                                 ("User mode is not supported yet\n"));\r
768                         status = IB_UNSUPPORTED;\r
769                         goto err_user_unsupported;\r
770         }\r
771 \r
772         // fill parameters \r
773         RtlZeroMemory(&ah_attr, sizeof(ah_attr));\r
774         mlnx_conv_ibal_av( ib_dev, p_addr_vector,  &ah_attr );\r
775 \r
776         // modify AH\r
777 #ifdef WIN_TO_BE_CHANGED\r
778         //TODO: not implemented in low-level driver\r
779         err = ibv_modify_ah(ib_ah_p, &ah_attr)\r
780         if (err) {\r
781                 HCA_PRINT (TRACE_LEVEL_ERROR,HCA_DBG_AV,\r
782                         ("ibv_query_ah failed (%d)\n", err));\r
783                 status = errno_to_iberr(err);\r
784                 goto err_query_ah;\r
785         }\r
786 #else\r
787 \r
788         mlnx_modify_ah( ib_ah_p, &ah_attr );\r
789 #endif\r
790 \r
791 err_user_unsupported:\r
792         if (status != IB_SUCCESS)\r
793         {\r
794                 HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_AV,\r
795                         ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
796         }\r
797         HCA_EXIT(HCA_DBG_AV);\r
798         return status;\r
799 }\r
800 \r
801 ib_api_status_t\r
802 mlnx_destroy_av (\r
803         IN              const   ib_av_handle_t                          h_av)\r
804 {\r
805         int err;\r
806         ib_api_status_t         status = IB_SUCCESS;\r
807         struct ib_ah *ib_ah_p = (struct ib_ah *)h_av;\r
808         PREP_IBDEV_FOR_PRINT(ib_ah_p->device);\r
809 \r
810         HCA_ENTER(HCA_DBG_AV);\r
811 \r
812         // destroy AV\r
813         err = ibv_destroy_ah( ib_ah_p );\r
814         if (err) {\r
815                 HCA_PRINT (TRACE_LEVEL_ERROR ,HCA_DBG_AV,\r
816                         ("ibv_destroy_ah failed (%d)\n", err));\r
817                 status = errno_to_iberr(err);\r
818                 goto err_destroy_ah;\r
819         }\r
820 \r
821 err_destroy_ah:\r
822         if (status != IB_SUCCESS)\r
823         {\r
824                 HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_AV,\r
825                         ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
826         }\r
827         HCA_EXIT(HCA_DBG_AV);\r
828         return status;\r
829 }\r
830 \r
831 /*\r
832 *       Shared Queue Pair Management Verbs\r
833 */\r
834 \r
835 \r
836 ib_api_status_t\r
837 mlnx_create_srq (\r
838         IN              const   ib_pd_handle_t                          h_pd,\r
839         IN              const   void                                            *srq_context,\r
840         IN              const   ib_srq_attr_t * const           p_srq_attr,\r
841                 OUT                     ib_srq_handle_t                         *ph_srq,\r
842         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
843 {\r
844         int err;\r
845         ib_api_status_t         status;\r
846         struct ib_srq *ib_srq_p;\r
847         struct mthca_srq *srq_p;\r
848         struct ib_srq_init_attr srq_init_attr;\r
849         struct ib_ucontext *p_context = NULL;\r
850         struct ib_pd *ib_pd_p = (struct ib_pd *)h_pd;\r
851         struct ib_device *ib_dev = ib_pd_p->device;\r
852         mlnx_hob_t       *hob_p = HOB_FROM_IBDEV(ib_dev);\r
853 \r
854         HCA_ENTER(HCA_DBG_SRQ);\r
855 \r
856         if( p_umv_buf  && p_umv_buf->command) {\r
857 \r
858                 // sanity checks \r
859                 if (p_umv_buf->input_size < sizeof(struct ibv_create_srq) ||\r
860                         p_umv_buf->output_size < sizeof(struct ibv_create_srq_resp) ||\r
861                         !p_umv_buf->p_inout_buf) {\r
862                         status = IB_INVALID_PARAMETER;\r
863                         goto err_inval_params;\r
864                 }\r
865                 p_context = ib_pd_p->ucontext;\r
866         }\r
867 \r
868         // prepare the parameters\r
869         RtlZeroMemory(&srq_init_attr, sizeof(srq_init_attr));\r
870         srq_init_attr.event_handler = srq_event_handler;\r
871         srq_init_attr.srq_context = hob_p;\r
872         srq_init_attr.attr = *p_srq_attr;\r
873 \r
874         // allocate srq \r
875         ib_srq_p = ibv_create_srq(ib_pd_p, &srq_init_attr, p_context, p_umv_buf );\r
876         if (IS_ERR(ib_srq_p)) {\r
877                 err = PTR_ERR(ib_srq_p);\r
878                 HCA_PRINT (TRACE_LEVEL_ERROR ,HCA_DBG_SRQ, ("ibv_create_srq failed (%d)\n", err));\r
879                 status = errno_to_iberr(err);\r
880                 goto err_create_srq;\r
881         }\r
882 \r
883         // fill the object\r
884         srq_p = (struct mthca_srq *)ib_srq_p;\r
885         srq_p->srq_context = (void*)srq_context;\r
886         \r
887         // return the result\r
888         if (ph_srq) *ph_srq = (ib_srq_handle_t)srq_p;\r
889 \r
890         status = IB_SUCCESS;\r
891         \r
892 err_create_srq:\r
893 err_inval_params:\r
894         if (p_umv_buf && p_umv_buf->command) \r
895                 p_umv_buf->status = status;\r
896         if (status != IB_SUCCESS)\r
897         {\r
898                 HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_SRQ,\r
899                         ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
900         }\r
901         HCA_EXIT(HCA_DBG_SRQ);\r
902         return status;\r
903 }\r
904 \r
905 \r
906 ib_api_status_t\r
907 mlnx_modify_srq (\r
908                 IN              const   ib_srq_handle_t                         h_srq,\r
909                 IN              const   ib_srq_attr_t* const                    p_srq_attr,\r
910                 IN              const   ib_srq_attr_mask_t                      srq_attr_mask,\r
911                 IN      OUT             ci_umv_buf_t                            *p_umv_buf OPTIONAL )\r
912 {\r
913         int err;\r
914         ib_api_status_t         status = IB_SUCCESS;\r
915         struct ib_srq *ib_srq = (struct ib_srq *)h_srq;\r
916         struct ib_device *ib_dev = ib_srq->device;\r
917         UNUSED_PARAM(p_umv_buf);\r
918     UNUSED_PARAM_WOWPP(ib_dev);\r
919 \r
920         HCA_ENTER(HCA_DBG_SRQ);\r
921 \r
922         err = ibv_modify_srq(ib_srq, (void*)p_srq_attr, srq_attr_mask);\r
923         if (err) {\r
924                 HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_AV,\r
925                         ("ibv_modify_srq failed (%d)\n", err));\r
926                 status = errno_to_iberr(err);\r
927         }\r
928 \r
929         if (status != IB_SUCCESS)\r
930         {\r
931                 HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_SRQ,\r
932                         ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
933         }\r
934         HCA_EXIT(HCA_DBG_SRQ);\r
935         return status;\r
936 }\r
937 \r
938 ib_api_status_t\r
939 mlnx_query_srq (\r
940         IN              const   ib_srq_handle_t                         h_srq,\r
941                 OUT                     ib_srq_attr_t* const                    p_srq_attr,\r
942         IN      OUT                     ci_umv_buf_t                            *p_umv_buf OPTIONAL )\r
943 {\r
944         int err;\r
945         ib_api_status_t         status = IB_SUCCESS;\r
946         struct ib_srq *ib_srq = (struct ib_srq *)h_srq;\r
947         struct ib_device *ib_dev = ib_srq->device;\r
948         UNUSED_PARAM(p_umv_buf);\r
949         UNUSED_PARAM_WOWPP(ib_dev);\r
950 \r
951         HCA_ENTER(HCA_DBG_SRQ);\r
952 \r
953         err = ibv_query_srq(ib_srq, p_srq_attr);\r
954         if (err) {\r
955                 HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_AV,\r
956                         ("ibv_query_srq failed (%d)\n", err));\r
957                 status = errno_to_iberr(err);\r
958         }\r
959 \r
960         if (status != IB_SUCCESS)\r
961         {\r
962                 HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_SRQ,\r
963                         ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
964         }\r
965         HCA_EXIT(HCA_DBG_SRQ);\r
966         return status;\r
967 }\r
968 \r
969 ib_api_status_t\r
970 mlnx_destroy_srq (\r
971         IN      const   ib_srq_handle_t         h_srq )\r
972 {\r
973         int err;\r
974         ib_api_status_t         status = IB_SUCCESS;\r
975         struct ib_srq *ib_srq = (struct ib_srq *)h_srq;\r
976         struct ib_device *ib_dev = ib_srq->device;\r
977     UNUSED_PARAM_WOWPP(ib_dev);\r
978 \r
979         HCA_ENTER(HCA_DBG_SRQ);\r
980 \r
981         err = ibv_destroy_srq(ib_srq);\r
982         if (err) {\r
983                 HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_AV,\r
984                         ("ibv_destroy_srq failed (%d)\n", err));\r
985                 status = errno_to_iberr(err);\r
986         }\r
987 \r
988         if (status != IB_SUCCESS)\r
989         {\r
990                 HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_SRQ,\r
991                         ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
992         }\r
993         HCA_EXIT(HCA_DBG_SRQ);\r
994         return status;\r
995 }\r
996 \r
997 /*\r
998 *       Queue Pair Management Verbs\r
999 */\r
1000 \r
1001 \r
1002 static ib_api_status_t\r
1003 _create_qp (\r
1004         IN              const   ib_pd_handle_t                          h_pd,\r
1005         IN              const   uint8_t                                         port_num,\r
1006         IN              const   void                                            *qp_context,\r
1007         IN              const   ib_qp_create_t                          *p_create_attr,\r
1008                 OUT                     ib_qp_attr_t                            *p_qp_attr,\r
1009                 OUT                     ib_qp_handle_t                          *ph_qp,\r
1010         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
1011 {\r
1012         int err;\r
1013         ib_api_status_t         status;\r
1014         struct ib_qp * ib_qp_p;\r
1015         struct mthca_qp *qp_p;\r
1016         struct ib_qp_init_attr qp_init_attr;\r
1017         struct ib_ucontext *p_context = NULL;\r
1018         struct ib_pd *ib_pd_p = (struct ib_pd *)h_pd;\r
1019         struct ib_device *ib_dev = ib_pd_p->device;\r
1020         mlnx_hob_t       *hob_p = HOB_FROM_IBDEV(ib_dev);\r
1021         \r
1022         HCA_ENTER(HCA_DBG_QP);\r
1023 \r
1024         if( p_umv_buf && p_umv_buf->command ) {\r
1025                 // sanity checks \r
1026                 if (p_umv_buf->input_size < sizeof(struct ibv_create_qp) ||\r
1027                         p_umv_buf->output_size < sizeof(struct ibv_create_qp_resp) ||\r
1028                         !p_umv_buf->p_inout_buf) {\r
1029                         status = IB_INVALID_PARAMETER;\r
1030                         goto err_inval_params;\r
1031                 }\r
1032                 p_context = ib_pd_p->ucontext;\r
1033         }\r
1034 \r
1035         // prepare the parameters\r
1036         RtlZeroMemory(&qp_init_attr, sizeof(qp_init_attr));\r
1037         qp_init_attr.qp_type = p_create_attr->qp_type;\r
1038         qp_init_attr.event_handler = qp_event_handler;\r
1039         qp_init_attr.qp_context = hob_p;\r
1040         qp_init_attr.recv_cq = (struct ib_cq *)p_create_attr->h_rq_cq;\r
1041         qp_init_attr.send_cq = (struct ib_cq *)p_create_attr->h_sq_cq;\r
1042         qp_init_attr.srq = (struct ib_srq *)p_create_attr->h_srq;\r
1043         qp_init_attr.cap.max_recv_sge = p_create_attr->rq_sge;\r
1044         qp_init_attr.cap.max_send_sge = p_create_attr->sq_sge;\r
1045         qp_init_attr.cap.max_recv_wr = p_create_attr->rq_depth;\r
1046         qp_init_attr.cap.max_send_wr = p_create_attr->sq_depth;\r
1047         qp_init_attr.sq_sig_type = (p_create_attr->sq_signaled) ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;\r
1048         qp_init_attr.port_num = port_num;\r
1049 \r
1050 \r
1051         // create qp            \r
1052         ib_qp_p = ibv_create_qp( ib_pd_p, &qp_init_attr, p_context, p_umv_buf );\r
1053         if (IS_ERR(ib_qp_p)) {\r
1054                 err = PTR_ERR(ib_qp_p);\r
1055                 HCA_PRINT(TRACE_LEVEL_ERROR  , HCA_DBG_QP,\r
1056                         ("ibv_create_qp failed (%d)\n", err));\r
1057                 status = errno_to_iberr(err);\r
1058                 goto err_create_qp;\r
1059         }\r
1060 \r
1061         // fill the object\r
1062         qp_p = (struct mthca_qp *)ib_qp_p;\r
1063         qp_p->qp_context = (void*)qp_context;\r
1064         qp_p->qp_init_attr = qp_init_attr;\r
1065 \r
1066         // Query QP to obtain requested attributes\r
1067         if (p_qp_attr) {\r
1068                 status = mlnx_query_qp ((ib_qp_handle_t)ib_qp_p, p_qp_attr, p_umv_buf);\r
1069                 if (status != IB_SUCCESS)\r
1070                                 goto err_query_qp;\r
1071         }\r
1072         \r
1073         // return the results\r
1074         if (ph_qp) *ph_qp = (ib_qp_handle_t)ib_qp_p;\r
1075 \r
1076         status = IB_SUCCESS;\r
1077         goto end;\r
1078 \r
1079 err_query_qp:\r
1080         ibv_destroy_qp( ib_qp_p );\r
1081 err_create_qp:\r
1082 err_inval_params:\r
1083 end:\r
1084         if (p_umv_buf && p_umv_buf->command) \r
1085                 p_umv_buf->status = status;\r
1086         if (status != IB_SUCCESS)\r
1087         {\r
1088                 HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_QP,\r
1089                         ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
1090         }\r
1091         HCA_EXIT(HCA_DBG_QP);\r
1092         return status;\r
1093 }\r
1094 \r
1095 ib_api_status_t\r
1096 mlnx_create_spl_qp (\r
1097         IN              const   ib_pd_handle_t                          h_pd,\r
1098         IN              const   uint8_t                                         port_num,\r
1099         IN              const   void                                            *qp_context,\r
1100         IN              const   ib_qp_create_t                          *p_create_attr,\r
1101                 OUT                     ib_qp_attr_t                            *p_qp_attr,\r
1102                 OUT                     ib_qp_handle_t                          *ph_qp )\r
1103 {\r
1104         ib_api_status_t         status;\r
1105         PREP_IBDEV_FOR_PRINT(((struct ib_pd*)h_pd)->device);\r
1106 \r
1107         HCA_ENTER(HCA_DBG_SHIM);\r
1108 \r
1109         status =        _create_qp( h_pd, port_num,\r
1110                 qp_context, p_create_attr, p_qp_attr, ph_qp, NULL );\r
1111                 \r
1112         if (status != IB_SUCCESS)\r
1113         {\r
1114                 HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_QP,\r
1115                         ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
1116         }\r
1117         HCA_EXIT(HCA_DBG_QP);\r
1118         return status;\r
1119 }\r
1120 \r
1121 ib_api_status_t\r
1122 mlnx_create_qp (\r
1123         IN              const   ib_pd_handle_t                          h_pd,\r
1124         IN              const   void                                            *qp_context,\r
1125         IN              const   ib_qp_create_t                          *p_create_attr,\r
1126                 OUT                     ib_qp_attr_t                            *p_qp_attr,\r
1127                 OUT                     ib_qp_handle_t                          *ph_qp,\r
1128         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
1129 {\r
1130         ib_api_status_t         status;\r
1131         PREP_IBDEV_FOR_PRINT(((struct ib_pd*)h_pd)->device);\r
1132 \r
1133         //NB: algorithm of mthca_alloc_sqp() requires port_num\r
1134         // PRM states, that special pares are created in couples, so\r
1135         // looks like we can put here port_num = 1 always\r
1136         uint8_t port_num = 1;\r
1137 \r
1138         HCA_ENTER(HCA_DBG_QP);\r
1139 \r
1140         status = _create_qp( h_pd, port_num,\r
1141                 qp_context, p_create_attr, p_qp_attr, ph_qp, p_umv_buf );\r
1142                 \r
1143         if (status != IB_SUCCESS)\r
1144         {\r
1145                 HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_QP,\r
1146                         ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
1147         }\r
1148         HCA_EXIT(HCA_DBG_QP);\r
1149         return status;\r
1150 }\r
1151 \r
1152 ib_api_status_t\r
1153 mlnx_modify_qp (\r
1154         IN              const   ib_qp_handle_t                          h_qp,\r
1155         IN              const   ib_qp_mod_t                                     *p_modify_attr,\r
1156                 OUT                     ib_qp_attr_t                            *p_qp_attr OPTIONAL,\r
1157         IN      OUT                     ci_umv_buf_t                            *p_umv_buf OPTIONAL )\r
1158 {\r
1159         ib_api_status_t         status;\r
1160         int err;\r
1161         struct ib_qp_attr qp_attr;\r
1162         int qp_attr_mask;\r
1163         struct ib_qp *ib_qp_p = (struct ib_qp *)h_qp;\r
1164         PREP_IBDEV_FOR_PRINT(ib_qp_p->device);\r
1165 \r
1166         HCA_ENTER(HCA_DBG_QP);\r
1167 \r
1168         // sanity checks\r
1169         if( p_umv_buf && p_umv_buf->command ) {\r
1170                 // sanity checks \r
1171                 if (p_umv_buf->output_size < sizeof(struct ibv_modify_qp_resp) ||\r
1172                         !p_umv_buf->p_inout_buf) {\r
1173                         status = IB_INVALID_PARAMETER;\r
1174                         goto err_inval_params;\r
1175                 }\r
1176         }\r
1177         \r
1178         // fill parameters \r
1179         status = mlnx_conv_qp_modify_attr( ib_qp_p, ib_qp_p->qp_type, \r
1180                 p_modify_attr,  &qp_attr, &qp_attr_mask );\r
1181         if (status == IB_NOT_DONE)\r
1182                 goto query_qp;\r
1183         if (status != IB_SUCCESS ) \r
1184                 goto err_mode_unsupported;\r
1185 \r
1186         // modify QP\r
1187         err = ibv_modify_qp(ib_qp_p, &qp_attr, qp_attr_mask);\r
1188         if (err) {\r
1189                 HCA_PRINT(TRACE_LEVEL_ERROR, HCA_DBG_QP,\r
1190                         ("ibv_modify_qp failed (%d)\n", err));\r
1191                 status = errno_to_iberr(err);\r
1192                 goto err_modify_qp;\r
1193         }\r
1194 \r
1195         // Query QP to obtain requested attributes\r
1196 query_qp:       \r
1197         if (p_qp_attr) {\r
1198                 status = mlnx_query_qp ((ib_qp_handle_t)ib_qp_p, p_qp_attr, p_umv_buf);\r
1199                 if (status != IB_SUCCESS)\r
1200                                 goto err_query_qp;\r
1201         }\r
1202         \r
1203         if( p_umv_buf && p_umv_buf->command ) {\r
1204                         struct ibv_modify_qp_resp resp;\r
1205                         resp.attr_mask = qp_attr_mask;\r
1206                         resp.qp_state = qp_attr.qp_state;\r
1207                         err = ib_copy_to_umv_buf(p_umv_buf, &resp, sizeof(struct ibv_modify_qp_resp));\r
1208                         if (err) {\r
1209                                 HCA_PRINT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM  ,("ib_copy_to_umv_buf failed (%d)\n", err));\r
1210                                 status = errno_to_iberr(err);\r
1211                                 goto err_copy;\r
1212                         }\r
1213         }\r
1214 \r
1215         status = IB_SUCCESS;\r
1216 \r
1217 err_copy:       \r
1218 err_query_qp:\r
1219 err_modify_qp:  \r
1220 err_mode_unsupported:\r
1221 err_inval_params:\r
1222         if (p_umv_buf && p_umv_buf->command) \r
1223                 p_umv_buf->status = status;\r
1224         if (status != IB_SUCCESS)\r
1225         {\r
1226                 HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_QP,\r
1227                         ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
1228         }\r
1229         HCA_EXIT(HCA_DBG_QP);\r
1230         return status;\r
1231 }\r
1232 \r
1233 ib_api_status_t\r
1234 mlnx_ndi_modify_qp (\r
1235         IN              const   ib_qp_handle_t                          h_qp,\r
1236         IN              const   ib_qp_mod_t                                     *p_modify_attr,\r
1237                 OUT                     ib_qp_attr_t                            *p_qp_attr OPTIONAL,\r
1238         IN              const   uint32_t                                        buf_size,\r
1239         IN                              uint8_t* const                          p_outbuf)\r
1240 {\r
1241         ci_umv_buf_t umv_buf;\r
1242         ib_api_status_t status;\r
1243         struct ibv_modify_qp_resp resp;\r
1244         void *buf = &resp;\r
1245 \r
1246         HCA_ENTER(HCA_DBG_QP);\r
1247 \r
1248         /* imitate umv_buf */\r
1249         umv_buf.command = TRUE; /* special case for NDI. Usually it's TRUE */\r
1250         umv_buf.input_size = 0;\r
1251         umv_buf.output_size = sizeof(struct ibv_modify_qp_resp);\r
1252         umv_buf.p_inout_buf = (ULONG_PTR)buf;\r
1253 \r
1254         status = mlnx_modify_qp ( h_qp, p_modify_attr, p_qp_attr, &umv_buf );\r
1255 \r
1256         if (status == IB_SUCCESS) {\r
1257                 cl_memclr( p_outbuf, buf_size );\r
1258                 *p_outbuf = resp.qp_state;\r
1259         }\r
1260 \r
1261         HCA_EXIT(HCA_DBG_QP);\r
1262         return status;\r
1263 }\r
1264 \r
1265 ib_api_status_t\r
1266 mlnx_query_qp (\r
1267         IN              const   ib_qp_handle_t                          h_qp,\r
1268                 OUT                     ib_qp_attr_t                            *p_qp_attr,\r
1269         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
1270 {\r
1271         ib_api_status_t         status = IB_SUCCESS;\r
1272         struct ib_qp *ib_qp_p = (struct ib_qp *)h_qp;\r
1273         struct mthca_qp *qp_p = (struct mthca_qp *)ib_qp_p;\r
1274 \r
1275         UNREFERENCED_PARAMETER(p_umv_buf);\r
1276         \r
1277         HCA_ENTER( HCA_DBG_QP);\r
1278         // sanity checks\r
1279 \r
1280         // clean the structure\r
1281         RtlZeroMemory( p_qp_attr, sizeof *p_qp_attr );\r
1282         \r
1283         // fill the structure\r
1284         //TODO: this function is to be implemented via ibv_query_qp, which is not supported now \r
1285         p_qp_attr->h_pd                                         = (ib_pd_handle_t)qp_p->ibqp.pd;\r
1286         p_qp_attr->qp_type                              = qp_p->ibqp.qp_type;\r
1287         p_qp_attr->sq_max_inline                = qp_p->qp_init_attr.cap.max_inline_data;\r
1288         p_qp_attr->sq_depth                             = qp_p->qp_init_attr.cap.max_send_wr;\r
1289         p_qp_attr->rq_depth                             = qp_p->qp_init_attr.cap.max_recv_wr;\r
1290         p_qp_attr->sq_sge                                       = qp_p->qp_init_attr.cap.max_send_sge;\r
1291         p_qp_attr->rq_sge                                       = qp_p->qp_init_attr.cap.max_recv_sge;\r
1292         p_qp_attr->resp_res                             = qp_p->resp_depth;\r
1293         p_qp_attr->h_sq_cq                              = (ib_cq_handle_t)qp_p->ibqp.send_cq;\r
1294         p_qp_attr->h_rq_cq                              = (ib_cq_handle_t)qp_p->ibqp.recv_cq;\r
1295         p_qp_attr->sq_signaled                  = qp_p->sq_policy == IB_SIGNAL_ALL_WR;\r
1296         p_qp_attr->state                                                = mlnx_qps_to_ibal( qp_p->state );\r
1297         p_qp_attr->num                                          = cl_hton32(qp_p->ibqp.qp_num);\r
1298 \r
1299 #ifdef WIN_TO_BE_CHANGED\r
1300 //TODO: don't know how to fill the following fields     without support of query_qp in MTHCA    \r
1301         p_qp_attr->access_ctrl                  = qp_p->\r
1302         p_qp_attr->pkey_index                   = qp_p->\r
1303         p_qp_attr->dest_num                             = qp_p-\r
1304         p_qp_attr->init_depth                   = qp_p-\r
1305         p_qp_attr->qkey                                         = qp_p-\r
1306         p_qp_attr->sq_psn                                       = qp_p-\r
1307         p_qp_attr->rq_psn                                       = qp_p-\r
1308         p_qp_attr->primary_port         = qp_p-\r
1309         p_qp_attr->alternate_port               = qp_p-\r
1310         p_qp_attr->primary_av                   = qp_p-\r
1311         p_qp_attr->alternate_av                 = qp_p-\r
1312         p_qp_attr->apm_state                    = qp_p-\r
1313 #endif          \r
1314 \r
1315         status = IB_SUCCESS;\r
1316 \r
1317         HCA_EXIT(HCA_DBG_QP);\r
1318         return status;\r
1319 }\r
1320 \r
1321 ib_api_status_t\r
1322 mlnx_destroy_qp (\r
1323         IN              const   ib_qp_handle_t                          h_qp,\r
1324         IN              const   uint64_t                                        timewait )\r
1325 {\r
1326         ib_api_status_t         status;\r
1327         int err;\r
1328         struct ib_qp *ib_qp_p = (struct ib_qp *)h_qp;\r
1329         PREP_IBDEV_FOR_PRINT(ib_qp_p->device);\r
1330 \r
1331         UNUSED_PARAM( timewait );\r
1332 \r
1333         HCA_ENTER( HCA_DBG_QP);\r
1334 \r
1335         HCA_PRINT(TRACE_LEVEL_INFORMATION       ,HCA_DBG_SHIM  ,\r
1336                 ("qpnum %#x, pcs %p\n", ib_qp_p->qp_num, PsGetCurrentProcess()) );\r
1337 \r
1338         err = ibv_destroy_qp( ib_qp_p );\r
1339         if (err) {\r
1340                 HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP,\r
1341                         ("ibv_destroy_qp failed (%d)\n", err));\r
1342                 status = errno_to_iberr(err);\r
1343                 goto err_destroy_qp;\r
1344         }\r
1345 \r
1346         status = IB_SUCCESS;\r
1347 \r
1348 err_destroy_qp:\r
1349         if (status != IB_SUCCESS)\r
1350         {\r
1351                 HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_QP,\r
1352                         ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
1353         }\r
1354         HCA_EXIT(HCA_DBG_QP);\r
1355         return status;\r
1356 }\r
1357 \r
1358 /*\r
1359 * Completion Queue Managment Verbs.\r
1360 */\r
1361 \r
1362 ib_api_status_t\r
1363 mlnx_create_cq (\r
1364         IN              const   ib_ca_handle_t                          h_ca,\r
1365         IN              const   void                                            *cq_context,\r
1366         IN      OUT                     uint32_t                                        *p_size,\r
1367                 OUT                     ib_cq_handle_t                          *ph_cq,\r
1368         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
1369 {\r
1370         int err;\r
1371         ib_api_status_t         status;\r
1372         struct ib_cq *ib_cq_p;\r
1373         struct mthca_cq *cq_p;\r
1374         mlnx_hob_t                      *hob_p;\r
1375         struct ib_device *ib_dev;\r
1376         struct ib_ucontext *p_context;\r
1377 \r
1378         HCA_ENTER(HCA_DBG_CQ);\r
1379 \r
1380         if( p_umv_buf ) {\r
1381 \r
1382                 p_context = (struct ib_ucontext *)h_ca;\r
1383                 hob_p = HOB_FROM_IBDEV(p_context->device);\r
1384                 ib_dev = p_context->device;\r
1385 \r
1386                 // sanity checks \r
1387                 if (p_umv_buf->input_size < sizeof(struct ibv_create_cq) ||\r
1388                         p_umv_buf->output_size < sizeof(struct ibv_create_cq_resp) ||\r
1389                         !p_umv_buf->p_inout_buf) {\r
1390                         status = IB_INVALID_PARAMETER;\r
1391                         goto err_inval_params;\r
1392                 }\r
1393         }\r
1394         else {\r
1395                 hob_p = (mlnx_hob_t *)h_ca;\r
1396                 p_context = NULL;\r
1397                 ib_dev = IBDEV_FROM_HOB( hob_p );\r
1398         }\r
1399 \r
1400         /* sanity check */\r
1401         if (!*p_size || *p_size > (uint32_t)ib_dev->mdev->limits.max_cqes) {\r
1402                 status = IB_INVALID_CQ_SIZE;\r
1403                 goto err_cqe;\r
1404         }\r
1405 \r
1406         // allocate cq  \r
1407         ib_cq_p = ibv_create_cq(ib_dev, \r
1408                 cq_comp_handler, cq_event_handler,\r
1409                 hob_p, *p_size, p_context, p_umv_buf );\r
1410         if (IS_ERR(ib_cq_p)) {\r
1411                 err = PTR_ERR(ib_cq_p);\r
1412                 HCA_PRINT (TRACE_LEVEL_ERROR ,HCA_DBG_CQ, ("ibv_create_cq failed (%d)\n", err));\r
1413                 status = errno_to_iberr(err);\r
1414                 goto err_create_cq;\r
1415         }\r
1416 \r
1417         // fill the object\r
1418         cq_p = (struct mthca_cq *)ib_cq_p;\r
1419         cq_p->cq_context = (void*)cq_context;\r
1420         \r
1421         // return the result\r
1422 //      *p_size = *p_size;      // return the same value\r
1423         *p_size = ib_cq_p->cqe;\r
1424 \r
1425         if (ph_cq) *ph_cq = (ib_cq_handle_t)cq_p;\r
1426 \r
1427         status = IB_SUCCESS;\r
1428         \r
1429 err_create_cq:\r
1430 err_inval_params:\r
1431 err_cqe:\r
1432         if (p_umv_buf && p_umv_buf->command) \r
1433                 p_umv_buf->status = status;\r
1434         if (status != IB_SUCCESS)\r
1435         {\r
1436                 HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_CQ,\r
1437                         ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
1438         }\r
1439         HCA_EXIT(HCA_DBG_CQ);\r
1440         return status;\r
1441 }\r
1442 \r
1443 ib_api_status_t\r
1444 mlnx_resize_cq (\r
1445         IN              const   ib_cq_handle_t                          h_cq,\r
1446         IN      OUT                     uint32_t                                        *p_size,\r
1447         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
1448 {\r
1449         UNREFERENCED_PARAMETER(h_cq);\r
1450         UNREFERENCED_PARAMETER(p_size);\r
1451         if (p_umv_buf && p_umv_buf->command) {\r
1452                 p_umv_buf->status = IB_UNSUPPORTED;\r
1453         }\r
1454         HCA_PRINT(TRACE_LEVEL_ERROR, HCA_DBG_CQ,("mlnx_resize_cq not implemented\n"));\r
1455         return IB_UNSUPPORTED;\r
1456 }\r
1457 \r
1458 ib_api_status_t\r
1459 mlnx_query_cq (\r
1460         IN              const   ib_cq_handle_t                          h_cq,\r
1461                 OUT                     uint32_t                                        *p_size,\r
1462         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
1463 {\r
1464         UNREFERENCED_PARAMETER(h_cq);\r
1465         UNREFERENCED_PARAMETER(p_size);\r
1466         if (p_umv_buf && p_umv_buf->command) {\r
1467                 p_umv_buf->status = IB_UNSUPPORTED;\r
1468         }\r
1469         HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_CQ,("mlnx_query_cq not implemented\n"));\r
1470         return IB_UNSUPPORTED;\r
1471 }\r
1472 \r
1473 ib_api_status_t\r
1474 mlnx_destroy_cq (\r
1475         IN              const   ib_cq_handle_t                          h_cq)\r
1476 {\r
1477                                                                                                                                                                 \r
1478         ib_api_status_t         status;\r
1479         int err;\r
1480         struct ib_cq *ib_cq_p = (struct ib_cq *)h_cq;\r
1481         PREP_IBDEV_FOR_PRINT(ib_cq_p->device);\r
1482 \r
1483         HCA_ENTER( HCA_DBG_QP);\r
1484 \r
1485         HCA_PRINT(TRACE_LEVEL_INFORMATION,HCA_DBG_CQ,\r
1486                 ("cqn %#x, pcs %p\n", ((struct mthca_cq*)ib_cq_p)->cqn, PsGetCurrentProcess()) );\r
1487 \r
1488         // destroy CQ\r
1489         err = ibv_destroy_cq( ib_cq_p );\r
1490         if (err) {\r
1491                 HCA_PRINT (TRACE_LEVEL_ERROR ,HCA_DBG_SHIM,\r
1492                         ("ibv_destroy_cq failed (%d)\n", err));\r
1493                 status = errno_to_iberr(err);\r
1494                 goto err_destroy_cq;\r
1495         }\r
1496 \r
1497         status = IB_SUCCESS;\r
1498 \r
1499 err_destroy_cq:\r
1500         if (status != IB_SUCCESS)\r
1501         {\r
1502                 HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_CQ,\r
1503                         ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
1504         }\r
1505         HCA_EXIT(HCA_DBG_CQ);\r
1506         return status;\r
1507 }\r
1508 \r
1509 \r
1510 ib_api_status_t\r
1511 mlnx_local_mad (\r
1512         IN              const   ib_ca_handle_t                          h_ca,\r
1513         IN              const   uint8_t                                         port_num,\r
1514         IN              const   ib_av_attr_t*                                   p_av_attr,\r
1515         IN              const   ib_mad_t                                        *p_mad_in,\r
1516         OUT             ib_mad_t                                        *p_mad_out )\r
1517 {\r
1518         int err;\r
1519         ib_api_status_t         status = IB_SUCCESS;\r
1520         mlnx_hob_t                      *hob_p = (mlnx_hob_t *)h_ca;\r
1521         struct ib_device *ib_dev = IBDEV_FROM_HOB( hob_p );\r
1522         //TODO: do we need use flags (IB_MAD_IGNORE_MKEY, IB_MAD_IGNORE_BKEY) ?\r
1523         int mad_flags = 0;  \r
1524         struct _ib_wc *wc_p = NULL;\r
1525         //TODO: do we need use grh ?\r
1526         struct _ib_grh *grh_p = NULL;\r
1527 \r
1528         HCA_ENTER(HCA_DBG_MAD);\r
1529 \r
1530         // sanity checks\r
1531         if (port_num > 2) {\r
1532                 status = IB_INVALID_PARAMETER;\r
1533                 goto err_port_num;\r
1534         }\r
1535 \r
1536         if (p_av_attr){\r
1537                 wc_p = cl_zalloc(sizeof(struct _ib_wc));\r
1538                 if(!wc_p){\r
1539                         status =  IB_INSUFFICIENT_MEMORY ;\r
1540                         goto err_wc_alloc;\r
1541                 }\r
1542                 //Copy part of the attributes need to fill the mad extended fields in mellanox devices\r
1543                 wc_p->recv.ud.remote_lid = p_av_attr->dlid;\r
1544                 wc_p->recv.ud.remote_sl  = p_av_attr->sl;\r
1545                 wc_p->recv.ud.path_bits  = p_av_attr->path_bits;\r
1546                 wc_p->recv.ud.recv_opt = p_av_attr->grh_valid?IB_RECV_OPT_GRH_VALID:0;\r
1547 \r
1548                 if(wc_p->recv.ud.recv_opt &IB_RECV_OPT_GRH_VALID){\r
1549                         grh_p = cl_zalloc(sizeof(struct _ib_grh));\r
1550                         if(!grh_p){\r
1551                                 status =  IB_INSUFFICIENT_MEMORY ;\r
1552                                 goto err_grh_alloc;\r
1553                         }\r
1554                         cl_memcpy(grh_p, &p_av_attr->grh, sizeof(ib_grh_t));\r
1555                 }\r
1556                         \r
1557 \r
1558         }\r
1559 \r
1560         HCA_PRINT( TRACE_LEVEL_INFORMATION, HCA_DBG_MAD, \r
1561                 ("MAD: Class %02x, Method %02x, Attr %02x, HopPtr %d, HopCnt %d, \n",\r
1562                 (uint32_t)((ib_smp_t *)p_mad_in)->mgmt_class, \r
1563                 (uint32_t)((ib_smp_t *)p_mad_in)->method, \r
1564                 (uint32_t)((ib_smp_t *)p_mad_in)->attr_id, \r
1565                 (uint32_t)((ib_smp_t *)p_mad_in)->hop_ptr,\r
1566                 (uint32_t)((ib_smp_t *)p_mad_in)->hop_count));\r
1567 \r
1568         \r
1569         // process mad\r
1570         \r
1571         err = mthca_process_mad(ib_dev, mad_flags, (uint8_t)port_num, \r
1572                 wc_p, grh_p, (struct ib_mad*)p_mad_in, (struct ib_mad*)p_mad_out);\r
1573         if (!err) {\r
1574                 HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_MAD, \r
1575                         ("MAD failed:\n\tClass 0x%x\n\tMethod 0x%x\n\tAttr 0x%x",\r
1576                         p_mad_in->mgmt_class, p_mad_in->method, p_mad_in->attr_id ));\r
1577                 status = IB_ERROR;\r
1578                 goto err_process_mad;\r
1579         }\r
1580         \r
1581         if( (p_mad_in->mgmt_class == IB_MCLASS_SUBN_DIR ||\r
1582                 p_mad_in->mgmt_class == IB_MCLASS_SUBN_LID) &&\r
1583                 p_mad_in->attr_id == IB_MAD_ATTR_PORT_INFO )\r
1584         {\r
1585                 ib_port_info_t  *p_pi_in, *p_pi_out;\r
1586 \r
1587                 if( p_mad_in->mgmt_class == IB_MCLASS_SUBN_DIR )\r
1588                 {\r
1589                         p_pi_in = (ib_port_info_t*)\r
1590                                 ib_smp_get_payload_ptr( (ib_smp_t*)p_mad_in );\r
1591                         p_pi_out = (ib_port_info_t*)\r
1592                                 ib_smp_get_payload_ptr( (ib_smp_t*)p_mad_out );\r
1593                 }\r
1594                 else\r
1595                 {\r
1596                         p_pi_in = (ib_port_info_t*)(p_mad_in + 1);\r
1597                         p_pi_out = (ib_port_info_t*)(p_mad_out + 1);\r
1598                 }\r
1599 \r
1600                 /* Work around FW bug 33958 */\r
1601                 p_pi_out->subnet_timeout &= 0x7F;\r
1602                 if( p_mad_in->method == IB_MAD_METHOD_SET )\r
1603                         p_pi_out->subnet_timeout |= (p_pi_in->subnet_timeout & 0x80);\r
1604         }\r
1605 \r
1606         /* Modify direction for Direct MAD */\r
1607         if ( p_mad_in->mgmt_class == IB_MCLASS_SUBN_DIR )\r
1608                 p_mad_out->status |= IB_SMP_DIRECTION;\r
1609 \r
1610 \r
1611 err_process_mad:\r
1612         if(grh_p)\r
1613                 cl_free(grh_p);\r
1614 err_grh_alloc:\r
1615         if(wc_p)\r
1616                 cl_free(wc_p);\r
1617 err_wc_alloc:\r
1618 err_port_num:   \r
1619         if (status != IB_SUCCESS)\r
1620         {\r
1621                 HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_MAD,\r
1622                         ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
1623         }\r
1624         HCA_EXIT(HCA_DBG_MAD);\r
1625         return status;\r
1626 }\r
1627         \r
1628 \r
1629 void\r
1630 setup_ci_interface(\r
1631         IN              const   ib_net64_t                                      ca_guid,\r
1632         IN              const   int                                                     is_livefish,\r
1633         IN      OUT                     ci_interface_t                          *p_interface )\r
1634 {\r
1635         cl_memclr(p_interface, sizeof(*p_interface));\r
1636 \r
1637         /* Guid of the CA. */\r
1638         p_interface->guid = ca_guid;\r
1639 \r
1640         /* Version of this interface. */\r
1641         p_interface->version = VERBS_VERSION;\r
1642 \r
1643         /* UVP name */\r
1644         cl_memcpy( p_interface->libname, mlnx_uvp_lib_name, MAX_LIB_NAME);\r
1645 \r
1646         HCA_PRINT(TRACE_LEVEL_VERBOSE  , HCA_DBG_SHIM  ,("UVP filename %s\n", p_interface->libname));\r
1647 \r
1648         /* The real interface. */\r
1649         p_interface->open_ca = mlnx_open_ca;\r
1650         p_interface->query_ca = mlnx_query_ca;\r
1651         p_interface->close_ca = mlnx_close_ca;\r
1652         p_interface->um_open_ca = mlnx_um_open;\r
1653         p_interface->um_close_ca = mlnx_um_close;\r
1654 \r
1655         p_interface->allocate_pd = mlnx_allocate_pd;\r
1656         p_interface->deallocate_pd = mlnx_deallocate_pd;\r
1657         p_interface->vendor_call = fw_access_ctrl;\r
1658 \r
1659         if (is_livefish) {\r
1660                 mlnx_memory_if_livefish(p_interface);\r
1661         }\r
1662         else {  \r
1663                 p_interface->modify_ca = mlnx_modify_ca; \r
1664                 \r
1665                 p_interface->create_av = mlnx_create_av;\r
1666                 p_interface->query_av = mlnx_query_av;\r
1667                 p_interface->modify_av = mlnx_modify_av;\r
1668                 p_interface->destroy_av = mlnx_destroy_av;\r
1669 \r
1670                 p_interface->create_srq = mlnx_create_srq;\r
1671                 p_interface->modify_srq = mlnx_modify_srq;\r
1672                 p_interface->query_srq = mlnx_query_srq;\r
1673                 p_interface->destroy_srq = mlnx_destroy_srq;\r
1674 \r
1675                 p_interface->create_qp = mlnx_create_qp;\r
1676                 p_interface->create_spl_qp = mlnx_create_spl_qp;\r
1677                 p_interface->modify_qp = mlnx_modify_qp;\r
1678                 p_interface->ndi_modify_qp = mlnx_ndi_modify_qp;\r
1679                 p_interface->query_qp = mlnx_query_qp;\r
1680                 p_interface->destroy_qp = mlnx_destroy_qp;\r
1681 \r
1682                 p_interface->create_cq = mlnx_create_cq;\r
1683                 p_interface->resize_cq = mlnx_resize_cq;\r
1684                 p_interface->query_cq = mlnx_query_cq;\r
1685                 p_interface->destroy_cq = mlnx_destroy_cq;\r
1686 \r
1687                 p_interface->local_mad = mlnx_local_mad;\r
1688                 \r
1689 \r
1690                 mlnx_memory_if(p_interface);\r
1691                 mlnx_direct_if(p_interface);\r
1692                 mlnx_mcast_if(p_interface);\r
1693         }\r
1694 \r
1695         return;\r
1696 }\r
1697 \r
1698 \r