[MTHCA] fixed a typo, producing a crash, and changed debug flags
[mirror/winof/.git] / hw / mthca / kernel / hca_verbs.c
1 /*\r
2  * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.\r
3  * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. \r
4  *\r
5  * This software is available to you under the OpenIB.org BSD license\r
6  * below:\r
7  *\r
8  *     Redistribution and use in source and binary forms, with or\r
9  *     without modification, are permitted provided that the following\r
10  *     conditions are met:\r
11  *\r
12  *      - Redistributions of source code must retain the above\r
13  *        copyright notice, this list of conditions and the following\r
14  *        disclaimer.\r
15  *\r
16  *      - Redistributions in binary form must reproduce the above\r
17  *        copyright notice, this list of conditions and the following\r
18  *        disclaimer in the documentation and/or other materials\r
19  *        provided with the distribution.\r
20  *\r
21  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
22  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
23  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
24  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
25  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
26  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
27  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
28  * SOFTWARE.\r
29  *\r
30  * $Id: hca_verbs.c 148 2005-07-12 07:48:46Z sleybo $\r
31  */\r
32 \r
33 \r
34 #include "hca_driver.h"\r
35 #if defined(EVENT_TRACING)\r
36 #ifdef offsetof\r
37 #undef offsetof\r
38 #endif\r
39 #include "hca_verbs.tmh"\r
40 #endif\r
41 #include "mthca_dev.h"\r
42 #include "ib_cache.h"\r
43 #include "mx_abi.h"\r
44 \r
45 #define PTR_ALIGN(size) (((size) + sizeof(void*) - 1) & ~(sizeof(void*) - 1))\r
46 \r
47 \r
48 // Local declarations\r
49 ib_api_status_t\r
50 mlnx_query_qp (\r
51         IN              const   ib_qp_handle_t                          h_qp,\r
52                 OUT                     ib_qp_attr_t                            *p_qp_attr,\r
53         IN      OUT                     ci_umv_buf_t                            *p_umv_buf );\r
54 \r
55 /* \r
56 * CA Access Verbs\r
57 */\r
58 ib_api_status_t\r
59 mlnx_open_ca (\r
60         IN              const   ib_net64_t                                      ca_guid, // IN  const char *                ca_name,\r
61         IN              const   ci_completion_cb_t                      pfn_completion_cb,\r
62         IN              const   ci_async_event_cb_t                     pfn_async_event_cb,\r
63         IN              const   void*const                                      ca_context,\r
64                 OUT                     ib_ca_handle_t                          *ph_ca)\r
65 {\r
66         mlnx_hca_t                              *p_hca;\r
67         ib_api_status_t status = IB_NOT_FOUND;\r
68         mlnx_cache_t    *p_cache;\r
69         struct ib_device *ib_dev;\r
70 \r
71         HCA_ENTER(HCA_DBG_SHIM);\r
72         HCA_PRINT(TRACE_LEVEL_INFORMATION  ,HCA_DBG_SHIM,\r
73                 ("context 0x%p\n", ca_context));\r
74 \r
75         // find CA object\r
76         p_hca = mlnx_hca_from_guid( ca_guid );\r
77         if( !p_hca ) {\r
78                 HCA_PRINT_EXIT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM,\r
79                         ("completes with ERROR status IB_NOT_FOUND\n"));\r
80                 return IB_NOT_FOUND;\r
81         }\r
82 \r
83         ib_dev = &p_hca->mdev->ib_dev;\r
84 \r
85         HCA_PRINT(TRACE_LEVEL_INFORMATION  ,HCA_DBG_SHIM,\r
86                 ("context 0x%p\n", ca_context));\r
87         status = mlnx_hobs_set_cb(&p_hca->hob,\r
88                 pfn_completion_cb,\r
89                 pfn_async_event_cb,\r
90                 ca_context);\r
91         if (IB_SUCCESS != status) {\r
92                 goto err_set_cb;\r
93         }\r
94 \r
95         // MAD cache\r
96         p_cache = (mlnx_cache_t*)cl_pzalloc( sizeof(mlnx_cache_t) * 2 );\r
97         if( !p_cache ) {\r
98                 status = IB_INSUFFICIENT_MEMORY;\r
99                 goto err_mad_cache;\r
100         }\r
101         p_hca->hob.cache = p_cache;\r
102 \r
103         \r
104         //TODO: do we need something for kernel users ?\r
105 \r
106         // Return pointer to HOB object\r
107         if (ph_ca) *ph_ca = &p_hca->hob;\r
108         status =  IB_SUCCESS;\r
109 \r
110 err_mad_cache:\r
111 err_set_cb:\r
112         HCA_PRINT_EXIT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM,\r
113                 ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
114         return status;\r
115 }\r
116 \r
117 ib_api_status_t\r
118 mlnx_query_ca (\r
119         IN              const   ib_ca_handle_t                          h_ca,\r
120                 OUT                     ib_ca_attr_t                            *p_ca_attr,\r
121         IN      OUT                     uint32_t                                        *p_byte_count,\r
122         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
123 {\r
124         ib_api_status_t         status;\r
125         uint32_t                        size, required_size;\r
126         uint8_t                 port_num, num_ports;\r
127         uint32_t                        num_gids, num_pkeys;\r
128         uint32_t                        num_page_sizes = 1; // TBD: what is actually supported\r
129         uint8_t                         *last_p;\r
130         struct ib_device_attr props;\r
131         struct ib_port_attr  *hca_ports = NULL;\r
132         int i;\r
133         \r
134         mlnx_hob_t                      *hob_p = (mlnx_hob_t *)h_ca;\r
135         struct ib_device *ib_dev = IBDEV_FROM_HOB( hob_p );\r
136         int err;\r
137         \r
138         HCA_ENTER(HCA_DBG_SHIM);\r
139 \r
140         // sanity checks\r
141         if( p_umv_buf && p_umv_buf->command ) {\r
142                         HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_SHIM ,("User mode is not supported yet\n"));\r
143                         p_umv_buf->status = status = IB_UNSUPPORTED;\r
144                         goto err_user_unsupported;\r
145         }\r
146         if (NULL == p_byte_count) {\r
147                 status = IB_INVALID_PARAMETER;\r
148                 goto err_byte_count;\r
149         }\r
150 \r
151         // query the device\r
152         err = mthca_query_device(ib_dev, &props );\r
153         if (err) {\r
154                 HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_SHIM, \r
155                         ("ib_query_device failed (%d)\n",err));\r
156                 status = errno_to_iberr(err);\r
157                 goto err_query_device;\r
158         }\r
159         \r
160         // alocate arrary for port properties\r
161         num_ports = ib_dev->phys_port_cnt;   /* Number of physical ports of the HCA */             \r
162         if (NULL == (hca_ports = cl_zalloc( num_ports * sizeof *hca_ports))) {\r
163                 HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_SHIM, ("Failed to cl_zalloc ports array\n"));\r
164                 status = IB_INSUFFICIENT_MEMORY;\r
165                 goto err_alloc_ports;\r
166         }\r
167 \r
168         // start calculation of ib_ca_attr_t full size\r
169         num_gids = 0;\r
170         num_pkeys = 0;\r
171         required_size = PTR_ALIGN(sizeof(ib_ca_attr_t)) +\r
172                 PTR_ALIGN(sizeof(uint32_t) * num_page_sizes) +\r
173                 PTR_ALIGN(sizeof(ib_port_attr_t) * num_ports);\r
174 \r
175         // get port properties\r
176         for (port_num = 0; port_num <= end_port(ib_dev) - start_port(ib_dev); ++port_num) {\r
177                 // request\r
178                 err = mthca_query_port(ib_dev, port_num + start_port(ib_dev), &hca_ports[port_num]);\r
179                 if (err) {\r
180                         HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_SHIM, ("ib_query_port failed(%d) for port %d\n",err, port_num));\r
181                         status = errno_to_iberr(err);\r
182                         goto err_query_port;\r
183                 }\r
184 \r
185                 // calculate GID table size\r
186                 num_gids  = hca_ports[port_num].gid_tbl_len;\r
187                 size = PTR_ALIGN(sizeof(ib_gid_t)  * num_gids);\r
188                 required_size += size;\r
189 \r
190                 // calculate pkeys table size\r
191                 num_pkeys = hca_ports[port_num].pkey_tbl_len;\r
192                 size = PTR_ALIGN(sizeof(uint16_t) * num_pkeys);\r
193                 required_size += size;\r
194         }\r
195 \r
196         // resource sufficience check\r
197         if (NULL == p_ca_attr || *p_byte_count < required_size) {\r
198                 *p_byte_count = required_size;\r
199                 status = IB_INSUFFICIENT_MEMORY;\r
200                 if ( p_ca_attr != NULL) {\r
201                         HCA_PRINT (TRACE_LEVEL_ERROR,HCA_DBG_SHIM, \r
202                                 ("Failed *p_byte_count (%d) < required_size (%d)\n", *p_byte_count, required_size ));\r
203                 }\r
204                 goto err_insuff_mem;\r
205         }\r
206 \r
207         // Space is sufficient - setup table pointers\r
208         last_p = (uint8_t*)p_ca_attr;\r
209         last_p += PTR_ALIGN(sizeof(*p_ca_attr));\r
210 \r
211         p_ca_attr->p_page_size = (uint32_t*)last_p;\r
212         last_p += PTR_ALIGN(num_page_sizes * sizeof(uint32_t));\r
213 \r
214         p_ca_attr->p_port_attr = (ib_port_attr_t *)last_p;\r
215         last_p += PTR_ALIGN(num_ports * sizeof(ib_port_attr_t));\r
216 \r
217         for (port_num = 0; port_num < num_ports; port_num++) {\r
218                 p_ca_attr->p_port_attr[port_num].p_gid_table = (ib_gid_t *)last_p;\r
219                 size = PTR_ALIGN(sizeof(ib_gid_t) * hca_ports[port_num].gid_tbl_len);\r
220                 last_p += size;\r
221 \r
222                 p_ca_attr->p_port_attr[port_num].p_pkey_table = (uint16_t *)last_p;\r
223                 size = PTR_ALIGN(sizeof(uint16_t) * hca_ports[port_num].pkey_tbl_len);\r
224                 last_p += size;\r
225         }\r
226 \r
227         // Separate the loops to ensure that table pointers are always setup\r
228         for (port_num = 0; port_num < num_ports; port_num++) {\r
229 \r
230                 // get pkeys, using cache\r
231                 for (i=0; i < hca_ports[port_num].pkey_tbl_len; ++i) {\r
232                         err = ib_get_cached_pkey( ib_dev, port_num + start_port(ib_dev), i,\r
233                                 &p_ca_attr->p_port_attr[port_num].p_pkey_table[i] );\r
234                         if (err) {\r
235                                 status = errno_to_iberr(err);\r
236                                 HCA_PRINT (TRACE_LEVEL_ERROR,HCA_DBG_SHIM, \r
237                                         ("ib_get_cached_pkey failed (%d) for port_num %d, index %d\n",\r
238                                         err, port_num + start_port(ib_dev), i));\r
239                                 goto err_get_pkey;\r
240                         }\r
241                 }\r
242                 \r
243                 // get gids, using cache\r
244                 for (i=0; i < hca_ports[port_num].gid_tbl_len; ++i) {\r
245                         union ib_gid * __ptr64  gid = (union ib_gid     *)&p_ca_attr->p_port_attr[port_num].p_gid_table[i];\r
246                         err = ib_get_cached_gid( ib_dev, port_num + start_port(ib_dev), i, (union ib_gid *)gid );\r
247                         //TODO: do we need to convert gids to little endian\r
248                         if (err) {\r
249                                 status = errno_to_iberr(err);\r
250                                 HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_SHIM, \r
251                                         ("ib_get_cached_gid failed (%d) for port_num %d, index %d\n",\r
252                                         err, port_num + start_port(ib_dev), i));\r
253                                 goto err_get_gid;\r
254                         }\r
255                 }\r
256 \r
257                 HCA_PRINT(TRACE_LEVEL_VERBOSE, HCA_DBG_SHIM,("port %d gid0:", port_num));\r
258                 for (i = 0; i < 16; i++)\r
259                         HCA_PRINT(TRACE_LEVEL_VERBOSE, HCA_DBG_SHIM,\r
260                                 (" 0x%x", p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[i]));\r
261                 HCA_PRINT(TRACE_LEVEL_VERBOSE, HCA_DBG_SHIM,("\n"));\r
262         }\r
263 \r
264         // set result size\r
265         p_ca_attr->size = required_size;\r
266         CL_ASSERT( required_size == (((uintn_t)last_p) - ((uintn_t)p_ca_attr)) );\r
267         HCA_PRINT(TRACE_LEVEL_VERBOSE, HCA_DBG_SHIM , ("Space required %d used %d\n",\r
268                 required_size, (int)((uintn_t)last_p - (uintn_t)p_ca_attr) ));\r
269         \r
270         // !!! GID/PKEY tables must be queried before this call !!!\r
271         mlnx_conv_hca_cap(ib_dev, &props, hca_ports, p_ca_attr);\r
272 \r
273         status = IB_SUCCESS;\r
274 \r
275 err_get_gid:\r
276 err_get_pkey:\r
277 err_insuff_mem:\r
278 err_query_port:\r
279         cl_free(hca_ports);\r
280 err_alloc_ports:\r
281 err_query_device:\r
282 err_byte_count: \r
283 err_user_unsupported:\r
284         if( status != IB_INSUFFICIENT_MEMORY && status != IB_SUCCESS )\r
285                 HCA_PRINT(TRACE_LEVEL_ERROR     , HCA_DBG_SHIM  ,("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
286         HCA_EXIT(HCA_DBG_SHIM);\r
287         return status;\r
288 }\r
289 \r
290 ib_api_status_t\r
291 mlnx_modify_ca (\r
292         IN              const   ib_ca_handle_t                          h_ca,\r
293         IN              const   uint8_t                                         port_num,\r
294         IN              const   ib_ca_mod_t                                     modca_cmd,\r
295         IN              const   ib_port_attr_mod_t                      *p_port_attr)\r
296 {\r
297 #define SET_CAP_MOD(al_mask, al_fld, ib)                \\r
298                 if (modca_cmd & al_mask) {      \\r
299                         if (p_port_attr->cap.##al_fld)          \\r
300                                 props.set_port_cap_mask |= ib;  \\r
301                         else            \\r
302                                 props.clr_port_cap_mask |= ib;  \\r
303                 }\r
304 \r
305         ib_api_status_t status;\r
306         int err;\r
307         struct ib_port_modify props;\r
308         int port_modify_mask = 0;\r
309         mlnx_hob_t                      *hob_p = (mlnx_hob_t *)h_ca;\r
310         struct ib_device *ib_dev = IBDEV_FROM_HOB( hob_p );\r
311 \r
312         HCA_ENTER(HCA_DBG_SHIM);\r
313 \r
314         // prepare parameters\r
315         RtlZeroMemory(&props, sizeof(props));\r
316         SET_CAP_MOD(IB_CA_MOD_IS_SM, sm, IB_PORT_SM);\r
317         SET_CAP_MOD(IB_CA_MOD_IS_SNMP_SUPPORTED, snmp, IB_PORT_SNMP_TUNNEL_SUP);\r
318         SET_CAP_MOD(IB_CA_MOD_IS_DEV_MGMT_SUPPORTED, dev_mgmt, IB_PORT_DEVICE_MGMT_SUP);\r
319         SET_CAP_MOD(IB_CA_MOD_IS_VEND_SUPPORTED, vend, IB_PORT_VENDOR_CLASS_SUP);\r
320         if ((modca_cmd & IB_CA_MOD_QKEY_CTR) && (p_port_attr->qkey_ctr == 0)) \r
321                 port_modify_mask |= IB_PORT_RESET_QKEY_CNTR;\r
322         \r
323         // modify port\r
324         err = mthca_modify_port(ib_dev, port_num, port_modify_mask, &props );\r
325         if (err) {\r
326                 status = errno_to_iberr(err);\r
327                 HCA_PRINT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM  ,("mthca_modify_port failed (%d) \n",err));\r
328                 goto err_modify_port;\r
329         }\r
330         \r
331         status =        IB_SUCCESS;\r
332 \r
333 err_modify_port:\r
334         HCA_PRINT_EXIT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM  ,("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
335         return status;\r
336 }\r
337 \r
338 ib_api_status_t\r
339 mlnx_close_ca (\r
340         IN                              ib_ca_handle_t                          h_ca)\r
341 {\r
342         HCA_ENTER(HCA_DBG_SHIM);\r
343 \r
344         // release HOB resources\r
345         mlnx_hobs_remove(h_ca);\r
346 \r
347         //TODO: release HOBUL resources\r
348 \r
349         HCA_EXIT(HCA_DBG_SHIM);\r
350         \r
351         return IB_SUCCESS;\r
352 }\r
353 \r
354 \r
355 static ib_api_status_t\r
356 mlnx_um_open(\r
357         IN              const   ib_ca_handle_t                          h_ca,\r
358         IN      OUT                     ci_umv_buf_t* const                     p_umv_buf,\r
359                 OUT                     ib_ca_handle_t* const           ph_um_ca )\r
360 {\r
361         int err;\r
362         ib_api_status_t         status;\r
363         mlnx_hob_t                      *hob_p = (mlnx_hob_t *)h_ca;\r
364         hca_dev_ext_t *ext_p = EXT_FROM_HOB( hob_p );\r
365         struct ib_device *ib_dev = IBDEV_FROM_HOB( hob_p );\r
366         struct ib_ucontext *p_context;\r
367         struct mthca_alloc_ucontext_resp *uresp_p;\r
368         struct ibv_alloc_pd_resp resp;\r
369         ci_umv_buf_t umv_buf;\r
370 \r
371         HCA_ENTER(HCA_DBG_SHIM);\r
372 \r
373         // sanity check\r
374         ASSERT( p_umv_buf );\r
375         if( !p_umv_buf->command )\r
376         {\r
377                 p_context = cl_zalloc( sizeof(struct ib_ucontext) );\r
378                 if( !p_context )\r
379                 {\r
380                         status = IB_INSUFFICIENT_MEMORY;\r
381                         goto err_alloc_ucontext;\r
382                 }\r
383                 /* Copy the dev info. */\r
384                 p_context->device = ib_dev;\r
385                 p_umv_buf->output_size = 0;\r
386                 goto done;\r
387         }\r
388 \r
389         // create user context in kernel\r
390         p_context = mthca_alloc_ucontext(ib_dev, p_umv_buf);\r
391         if (IS_ERR(p_context)) {\r
392                 err = PTR_ERR(p_context);\r
393                 HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_SHIM,\r
394                         ("mthca_alloc_ucontext failed (%d)\n", err));\r
395                 status = errno_to_iberr(err);\r
396                 goto err_alloc_ucontext;\r
397         }\r
398 \r
399         /* allocate pd */\r
400         umv_buf.output_size = sizeof(struct ibv_alloc_pd_resp);\r
401         umv_buf.p_inout_buf = &resp;\r
402         //NB: Pay attention ! Ucontext parameter is important here:\r
403         // when it is present (i.e. - for user space) - mthca_alloc_pd won't create MR\r
404         p_context->pd = ibv_alloc_pd(ib_dev, p_context, &umv_buf);\r
405         if (IS_ERR(p_context->pd)) {\r
406                 err = PTR_ERR(p_context->pd);\r
407                 HCA_PRINT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM,\r
408                         ("ibv_alloc_pd failed (%d)\n", err));\r
409                 status = errno_to_iberr(err);\r
410                 goto err_alloc_pd;\r
411         }\r
412         \r
413         // fill more parameters for user (sanity checks are in mthca_alloc_ucontext)\r
414         uresp_p = (struct mthca_alloc_ucontext_resp *)(void*)p_umv_buf->p_inout_buf;\r
415         uresp_p->uar_addr = (uint64_t)(UINT_PTR)p_context->user_uar;\r
416         uresp_p->pd_handle = resp.pd_handle;\r
417         uresp_p->pdn = resp.pdn;\r
418         uresp_p->vend_id = (uint32_t)ext_p->hcaConfig.VendorID;\r
419         uresp_p->dev_id = (uint16_t)ext_p->hcaConfig.DeviceID;\r
420 \r
421 done:\r
422         // some more inits\r
423         p_context->va = p_context->p_mdl = NULL;\r
424         p_context->fw_if_open = FALSE;\r
425         KeInitializeMutex( &p_context->mutex, 0 );\r
426         \r
427         // return the result\r
428         if (ph_um_ca) *ph_um_ca = (ib_ca_handle_t)p_context;\r
429 \r
430         status = IB_SUCCESS;\r
431         goto end;\r
432         \r
433 err_alloc_pd:\r
434         mthca_dealloc_ucontext(p_context);\r
435 err_alloc_ucontext: \r
436 end:\r
437         if (p_umv_buf && p_umv_buf->command) \r
438                 p_umv_buf->status = status;\r
439         HCA_PRINT_EXIT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM,\r
440                 ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
441         return status;\r
442 }\r
443 \r
444 static void\r
445 mlnx_um_close(\r
446         IN                              ib_ca_handle_t                          h_ca,\r
447         IN                              ib_ca_handle_t                          h_um_ca )\r
448 {\r
449         struct ib_ucontext *p_ucontext = (struct ib_ucontext *)h_um_ca;\r
450         UNREFERENCED_PARAMETER(h_ca);\r
451 \r
452         unmap_crspace_for_all(p_ucontext);\r
453         if( !p_ucontext->pd )\r
454                 cl_free( h_um_ca );\r
455         else\r
456                 ibv_um_close(p_ucontext);\r
457         return;\r
458 }\r
459 \r
460 \r
461 /*\r
462 *    Protection Domain and Reliable Datagram Domain Verbs\r
463 */\r
464 \r
465 ib_api_status_t\r
466 mlnx_allocate_pd (\r
467         IN              const   ib_ca_handle_t                          h_ca,\r
468         IN              const   ib_pd_type_t                            type,\r
469                 OUT                     ib_pd_handle_t                          *ph_pd,\r
470         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
471 {\r
472         ib_api_status_t         status;\r
473         struct ib_device *ib_dev;\r
474         struct ib_ucontext *p_context;\r
475         struct ib_pd *ib_pd_p;\r
476         int err;\r
477 \r
478         //TODO: how are we use it ?\r
479         UNREFERENCED_PARAMETER(type);\r
480         \r
481         HCA_ENTER(HCA_DBG_SHIM);\r
482 \r
483         if( p_umv_buf ) {\r
484                 p_context = (struct ib_ucontext *)h_ca;\r
485                 ib_dev = p_context->device;\r
486         }\r
487         else {\r
488                 mlnx_hob_t                      *hob_p = (mlnx_hob_t *)h_ca;\r
489                 p_context = NULL;\r
490                 ib_dev = IBDEV_FROM_HOB( hob_p );\r
491         }\r
492         \r
493         // create PD\r
494         ib_pd_p = ibv_alloc_pd(ib_dev, p_context, p_umv_buf);\r
495         if (IS_ERR(ib_pd_p)) {\r
496                 err = PTR_ERR(ib_pd_p);\r
497                 HCA_PRINT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM,\r
498                         ("ibv_alloc_pd failed (%d)\n", err));\r
499                 status = errno_to_iberr(err);\r
500                 goto err_alloc_pd;\r
501         }\r
502 \r
503         // return the result\r
504         if (ph_pd) *ph_pd = (ib_pd_handle_t)ib_pd_p;\r
505 \r
506         status = IB_SUCCESS;\r
507         \r
508 err_alloc_pd:   \r
509         if (p_umv_buf && p_umv_buf->command) \r
510                 p_umv_buf->status = status;\r
511         HCA_PRINT_EXIT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM,\r
512                 ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
513         return status;\r
514 }\r
515 \r
516 ib_api_status_t\r
517 mlnx_deallocate_pd (\r
518         IN                              ib_pd_handle_t                          h_pd)\r
519 {\r
520         ib_api_status_t         status;\r
521         int err;\r
522         struct ib_pd *ib_pd_p = (struct ib_pd *)h_pd;\r
523         PREP_IBDEV_FOR_PRINT(ib_pd_p->device)\r
524 \r
525         HCA_ENTER( HCA_DBG_QP);\r
526 \r
527         HCA_PRINT(TRACE_LEVEL_INFORMATION       ,HCA_DBG_SHIM  ,\r
528                 ("pcs %p\n", PsGetCurrentProcess()));\r
529         \r
530         // dealloc pd\r
531         err = ibv_dealloc_pd( ib_pd_p );\r
532         if (err) {\r
533                 HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_SHIM\r
534                         ,("ibv_dealloc_pd failed (%d)\n", err));\r
535                 status = errno_to_iberr(err);\r
536                 goto err_dealloc_pd;\r
537         }\r
538         status = IB_SUCCESS;\r
539 \r
540 err_dealloc_pd:\r
541         HCA_PRINT_EXIT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM\r
542                 ,("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
543         return status;\r
544 }\r
545 \r
546 /* \r
547 * Address Vector Management Verbs\r
548 */\r
549 ib_api_status_t\r
550 mlnx_create_av (\r
551         IN              const   ib_pd_handle_t                          h_pd,\r
552         IN              const   ib_av_attr_t                            *p_addr_vector,\r
553                 OUT                     ib_av_handle_t                          *ph_av,\r
554         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
555 {\r
556         int err = 0;\r
557         ib_api_status_t         status = IB_SUCCESS;\r
558         struct ib_pd *ib_pd_p = (struct ib_pd *)h_pd;\r
559         struct ib_device *ib_dev = ib_pd_p->device;\r
560         struct ib_ah *ib_av_p;\r
561         struct ib_ah_attr ah_attr;\r
562         struct ib_ucontext *p_context = NULL;\r
563 \r
564         HCA_ENTER(HCA_DBG_AV);\r
565 \r
566         if( p_umv_buf && p_umv_buf->command ) {\r
567                 // sanity checks \r
568                 if (p_umv_buf->input_size < sizeof(struct ibv_create_ah) ||\r
569                         p_umv_buf->output_size < sizeof(struct ibv_create_ah_resp) ||\r
570                         !p_umv_buf->p_inout_buf) {\r
571                         status = IB_INVALID_PARAMETER;\r
572                         goto err_inval_params;\r
573                 }\r
574                 p_context = ib_pd_p->ucontext;\r
575         }\r
576         else \r
577                 p_context = NULL;\r
578 \r
579         // fill parameters \r
580         RtlZeroMemory(&ah_attr, sizeof(ah_attr));\r
581         mlnx_conv_ibal_av( ib_dev, p_addr_vector,  &ah_attr );\r
582 \r
583         ib_av_p = ibv_create_ah(ib_pd_p, &ah_attr, p_context, p_umv_buf);\r
584         if (IS_ERR(ib_av_p)) {\r
585                 err = PTR_ERR(ib_av_p);\r
586                 HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_AV,\r
587                         ("ibv_create_ah failed (%d)\n", err));\r
588                 status = errno_to_iberr(err);\r
589                 goto err_alloc_av;\r
590         }\r
591 \r
592         // return the result\r
593         if (ph_av) *ph_av = (ib_av_handle_t)ib_av_p;\r
594 \r
595         if( p_context )\r
596         {\r
597                 struct ibv_create_ah_resp *create_ah_resp = (struct ibv_create_ah_resp *)(void*)p_umv_buf->p_inout_buf;\r
598                 cl_memcpy( &create_ah_resp->av_attr, p_addr_vector, sizeof(create_ah_resp->av_attr) );\r
599                 p_umv_buf->status = IB_SUCCESS;\r
600         }\r
601 \r
602         status = IB_SUCCESS;\r
603         \r
604 err_alloc_av:   \r
605 err_inval_params:\r
606         HCA_PRINT_EXIT(TRACE_LEVEL_ERROR ,HCA_DBG_AV,\r
607                 ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
608         return status;\r
609 }\r
610 \r
611 ib_api_status_t\r
612 mlnx_query_av (\r
613         IN              const   ib_av_handle_t                          h_av,\r
614                 OUT                     ib_av_attr_t                            *p_addr_vector,\r
615                 OUT                     ib_pd_handle_t                          *ph_pd,\r
616         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
617 {\r
618         int err;\r
619         ib_api_status_t         status = IB_SUCCESS;\r
620         struct ib_ah *ib_ah_p = (struct ib_ah *)h_av;\r
621         PREP_IBDEV_FOR_PRINT(ib_ah_p->device)\r
622 \r
623         HCA_ENTER(HCA_DBG_AV);\r
624 \r
625         // sanity checks\r
626         if( p_umv_buf && p_umv_buf->command ) {\r
627                         HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_AV,\r
628                                 ("User mode is not supported yet\n"));\r
629                         status = IB_UNSUPPORTED;\r
630                         goto err_user_unsupported;\r
631         }\r
632 \r
633         // query AV\r
634 #ifdef WIN_TO_BE_CHANGED\r
635         //TODO: not implemented in low-level driver\r
636         err = ibv_query_ah(ib_ah_p, &ah_attr)\r
637         if (err) {\r
638                 HCA_PRINT(TRACE_LEVEL_ERROR, HCA_DBG_AV,\r
639                         ("ibv_query_ah failed (%d)\n", err));\r
640                 status = errno_to_iberr(err);\r
641                 goto err_query_ah;\r
642         }\r
643         // convert to IBAL structure: something like that\r
644         mlnx_conv_mthca_av( p_addr_vector,  &ah_attr );\r
645 #else\r
646 \r
647         err = mlnx_conv_mthca_av( ib_ah_p, p_addr_vector );\r
648         if (err) {\r
649                 HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_AV,\r
650                         ("mlnx_conv_mthca_av failed (%d)\n", err));\r
651                 status = errno_to_iberr(err);\r
652                 goto err_conv_mthca_av;\r
653         }\r
654 #endif\r
655 \r
656         // results\r
657         *ph_pd = (ib_pd_handle_t)ib_ah_p->pd;\r
658         \r
659 err_conv_mthca_av:\r
660 err_user_unsupported:\r
661         HCA_PRINT_EXIT(TRACE_LEVEL_ERROR  , HCA_DBG_AV,\r
662                 ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
663         return status;\r
664 }\r
665 \r
666 ib_api_status_t\r
667 mlnx_modify_av (\r
668         IN              const   ib_av_handle_t                          h_av,\r
669         IN              const   ib_av_attr_t                            *p_addr_vector,\r
670         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
671 {\r
672         struct ib_ah_attr ah_attr;\r
673         ib_api_status_t         status = IB_SUCCESS;\r
674         struct ib_ah *ib_ah_p = (struct ib_ah *)h_av;\r
675         struct ib_device *ib_dev = ib_ah_p->pd->device;\r
676 \r
677         HCA_ENTER(HCA_DBG_AV);\r
678 \r
679         // sanity checks\r
680         if( p_umv_buf && p_umv_buf->command ) {\r
681                         HCA_PRINT(TRACE_LEVEL_ERROR   ,HCA_DBG_AV,\r
682                                 ("User mode is not supported yet\n"));\r
683                         status = IB_UNSUPPORTED;\r
684                         goto err_user_unsupported;\r
685         }\r
686 \r
687         // fill parameters \r
688         RtlZeroMemory(&ah_attr, sizeof(ah_attr));\r
689         mlnx_conv_ibal_av( ib_dev, p_addr_vector,  &ah_attr );\r
690 \r
691         // modify AH\r
692 #ifdef WIN_TO_BE_CHANGED\r
693         //TODO: not implemented in low-level driver\r
694         err = ibv_modify_ah(ib_ah_p, &ah_attr)\r
695         if (err) {\r
696                 HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_AV,\r
697                         ("ibv_query_ah failed (%d)\n", err));\r
698                 status = errno_to_iberr(err);\r
699                 goto err_query_ah;\r
700         }\r
701 #else\r
702 \r
703         mlnx_modify_ah( ib_ah_p, &ah_attr );\r
704 #endif\r
705 \r
706 err_user_unsupported:\r
707         HCA_PRINT_EXIT(TRACE_LEVEL_ERROR  , HCA_DBG_AV,\r
708                 ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
709         return status;\r
710 }\r
711 \r
712 ib_api_status_t\r
713 mlnx_destroy_av (\r
714         IN              const   ib_av_handle_t                          h_av)\r
715 {\r
716         int err;\r
717         ib_api_status_t         status = IB_SUCCESS;\r
718         struct ib_ah *ib_ah_p = (struct ib_ah *)h_av;\r
719         PREP_IBDEV_FOR_PRINT(ib_ah_p->device)\r
720 \r
721         HCA_ENTER(HCA_DBG_AV);\r
722 \r
723         // destroy AV\r
724         err = ibv_destroy_ah( ib_ah_p );\r
725         if (err) {\r
726                 HCA_PRINT (TRACE_LEVEL_ERROR ,HCA_DBG_AV,\r
727                         ("ibv_destroy_ah failed (%d)\n", err));\r
728                 status = errno_to_iberr(err);\r
729                 goto err_destroy_ah;\r
730         }\r
731 \r
732 err_destroy_ah:\r
733         HCA_PRINT_EXIT(TRACE_LEVEL_ERROR  , HCA_DBG_AV,\r
734                 ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
735         return status;\r
736 }\r
737 \r
738 /*\r
739 *       Queue Pair Management Verbs\r
740 */\r
741 \r
742 \r
743 static ib_api_status_t\r
744 _create_qp (\r
745         IN              const   ib_pd_handle_t                          h_pd,\r
746         IN              const   uint8_t                                         port_num,\r
747         IN              const   void                                            *qp_context,\r
748         IN              const   ib_qp_create_t                          *p_create_attr,\r
749                 OUT                     ib_qp_attr_t                            *p_qp_attr,\r
750                 OUT                     ib_qp_handle_t                          *ph_qp,\r
751         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
752 {\r
753                 int err;\r
754                 ib_api_status_t         status;\r
755                 struct ib_qp * ib_qp_p;\r
756                 struct mthca_qp *qp_p;\r
757                 struct ib_qp_init_attr qp_init_attr;\r
758                 struct ib_ucontext *p_context = NULL;\r
759                 struct ib_pd *ib_pd_p = (struct ib_pd *)h_pd;\r
760                 struct ib_device *ib_dev = ib_pd_p->device;\r
761                 mlnx_hob_t       *hob_p = HOB_FROM_IBDEV(ib_dev);\r
762                 \r
763                 HCA_ENTER(HCA_DBG_QP);\r
764 \r
765         \r
766                 if( p_umv_buf && p_umv_buf->command ) {\r
767                         // sanity checks \r
768                         if (p_umv_buf->input_size < sizeof(struct ibv_create_qp) ||\r
769                                 p_umv_buf->output_size < sizeof(struct ibv_create_qp_resp) ||\r
770                                 !p_umv_buf->p_inout_buf) {\r
771                                 status = IB_INVALID_PARAMETER;\r
772                                 goto err_inval_params;\r
773                         }\r
774                         p_context = ib_pd_p->ucontext;\r
775                 }\r
776                 else \r
777                         p_context = NULL;\r
778 \r
779                 // prepare the parameters\r
780                 RtlZeroMemory(&qp_init_attr, sizeof(qp_init_attr));\r
781                 qp_init_attr.qp_type = p_create_attr->qp_type;\r
782                 qp_init_attr.event_handler = qp_event_handler;\r
783                 qp_init_attr.qp_context = hob_p;\r
784                 qp_init_attr.recv_cq = (struct ib_cq *)p_create_attr->h_rq_cq;\r
785                 qp_init_attr.send_cq = (struct ib_cq *)p_create_attr->h_sq_cq;\r
786                 qp_init_attr.cap.max_recv_sge = p_create_attr->rq_sge;\r
787                 qp_init_attr.cap.max_send_sge = p_create_attr->sq_sge;\r
788                 qp_init_attr.cap.max_recv_wr = p_create_attr->rq_depth;\r
789                 qp_init_attr.cap.max_send_wr = p_create_attr->sq_depth;\r
790                 qp_init_attr.sq_sig_type = (p_create_attr->sq_signaled) ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;\r
791                 qp_init_attr.port_num = port_num;\r
792 \r
793 \r
794                 // create qp            \r
795                 ib_qp_p = ibv_create_qp( ib_pd_p, &qp_init_attr, p_context, p_umv_buf );\r
796                 if (IS_ERR(ib_qp_p)) {\r
797                         err = PTR_ERR(ib_qp_p);\r
798                         HCA_PRINT(TRACE_LEVEL_ERROR  , HCA_DBG_QP,\r
799                                 ("ibv_create_qp failed (%d)\n", err));\r
800                         status = errno_to_iberr(err);\r
801                         goto err_create_qp;\r
802                 }\r
803         \r
804                 // fill the object\r
805                 qp_p = (struct mthca_qp *)ib_qp_p;\r
806                 qp_p->qp_context = (void*)qp_context;\r
807                 qp_p->qp_init_attr = qp_init_attr;\r
808         \r
809                 // Query QP to obtain requested attributes\r
810                 if (p_qp_attr) {\r
811                         status = mlnx_query_qp ((ib_qp_handle_t)ib_qp_p, p_qp_attr, p_umv_buf);\r
812                         if (status != IB_SUCCESS)\r
813                                         goto err_query_qp;\r
814                 }\r
815                 \r
816                 // return the results\r
817                 if (ph_qp) *ph_qp = (ib_qp_handle_t)ib_qp_p;\r
818         \r
819                 status = IB_SUCCESS;\r
820                 goto end;\r
821         \r
822         err_query_qp:\r
823                 ibv_destroy_qp( ib_qp_p );\r
824         err_create_qp:\r
825         err_inval_params:\r
826         end:\r
827                 if (p_umv_buf && p_umv_buf->command) \r
828                         p_umv_buf->status = status;\r
829                 HCA_PRINT_EXIT(TRACE_LEVEL_ERROR ,HCA_DBG_QP ,\r
830                         ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
831                 return status;\r
832 }\r
833 \r
834 ib_api_status_t\r
835 mlnx_create_spl_qp (\r
836         IN              const   ib_pd_handle_t                          h_pd,\r
837         IN              const   uint8_t                                         port_num,\r
838         IN              const   void                                            *qp_context,\r
839         IN              const   ib_qp_create_t                          *p_create_attr,\r
840                 OUT                     ib_qp_attr_t                            *p_qp_attr,\r
841                 OUT                     ib_qp_handle_t                          *ph_qp )\r
842 {\r
843         ib_api_status_t         status;\r
844         PREP_IBDEV_FOR_PRINT(((struct ib_pd*)h_pd)->device)\r
845 \r
846         HCA_ENTER(HCA_DBG_SHIM);\r
847 \r
848         status =        _create_qp( h_pd, port_num,\r
849                 qp_context, p_create_attr, p_qp_attr, ph_qp, NULL );\r
850                 \r
851         HCA_PRINT_EXIT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM,\r
852                 ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
853         return status;\r
854 }\r
855 \r
856 ib_api_status_t\r
857 mlnx_create_qp (\r
858         IN              const   ib_pd_handle_t                          h_pd,\r
859         IN              const   void                                            *qp_context,\r
860         IN              const   ib_qp_create_t                          *p_create_attr,\r
861                 OUT                     ib_qp_attr_t                            *p_qp_attr,\r
862                 OUT                     ib_qp_handle_t                          *ph_qp,\r
863         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
864 {\r
865         ib_api_status_t         status;\r
866         PREP_IBDEV_FOR_PRINT(((struct ib_pd*)h_pd)->device)\r
867 \r
868         //NB: algorithm of mthca_alloc_sqp() requires port_num\r
869         // PRM states, that special pares are created in couples, so\r
870         // looks like we can put here port_num = 1 always\r
871         uint8_t port_num = 1;\r
872 \r
873         HCA_ENTER(HCA_DBG_QP);\r
874 \r
875         status = _create_qp( h_pd, port_num,\r
876                 qp_context, p_create_attr, p_qp_attr, ph_qp, p_umv_buf );\r
877                 \r
878         HCA_PRINT_EXIT(TRACE_LEVEL_ERROR ,HCA_DBG_QP,\r
879                 ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
880         return status;\r
881 }\r
882 \r
883 ib_api_status_t\r
884 mlnx_modify_qp (\r
885         IN              const   ib_qp_handle_t                          h_qp,\r
886         IN              const   ib_qp_mod_t                                     *p_modify_attr,\r
887                 OUT                     ib_qp_attr_t                            *p_qp_attr OPTIONAL,\r
888         IN      OUT                     ci_umv_buf_t                            *p_umv_buf OPTIONAL )\r
889 {\r
890         ib_api_status_t         status;\r
891         int err;\r
892         struct ib_qp_attr qp_attr;\r
893         int qp_attr_mask;\r
894         struct ib_qp *ib_qp_p = (struct ib_qp *)h_qp;\r
895         PREP_IBDEV_FOR_PRINT(ib_qp_p->device)\r
896 \r
897         HCA_ENTER(HCA_DBG_QP);\r
898 \r
899         // sanity checks\r
900         if( p_umv_buf && p_umv_buf->command ) {\r
901                 // sanity checks \r
902                 if (p_umv_buf->output_size < sizeof(struct ibv_modify_qp_resp) ||\r
903                         !p_umv_buf->p_inout_buf) {\r
904                         status = IB_INVALID_PARAMETER;\r
905                         goto err_inval_params;\r
906                 }\r
907         }\r
908         \r
909         // fill parameters \r
910         status = mlnx_conv_qp_modify_attr( ib_qp_p, ib_qp_p->qp_type, \r
911                 p_modify_attr,  &qp_attr, &qp_attr_mask );\r
912         if (status == IB_NOT_DONE)\r
913                 goto query_qp;\r
914         if (status != IB_SUCCESS ) \r
915                 goto err_mode_unsupported;\r
916 \r
917         // modify QP\r
918         err = ibv_modify_qp(ib_qp_p, &qp_attr, qp_attr_mask);\r
919         if (err) {\r
920                 HCA_PRINT(TRACE_LEVEL_ERROR  , HCA_DBG_QP ,("ibv_modify_qp failed (%d)\n", err));\r
921                 status = errno_to_iberr(err);\r
922                 goto err_modify_qp;\r
923         }\r
924 \r
925         // Query QP to obtain requested attributes\r
926 query_qp:       \r
927         if (p_qp_attr) {\r
928                 status = mlnx_query_qp ((ib_qp_handle_t)ib_qp_p, p_qp_attr, p_umv_buf);\r
929                 if (status != IB_SUCCESS)\r
930                                 goto err_query_qp;\r
931         }\r
932         \r
933         if( p_umv_buf && p_umv_buf->command )\r
934         {\r
935                 struct ibv_modify_qp_resp resp;\r
936                 resp.attr_mask = qp_attr_mask;\r
937                 resp.qp_state = qp_attr.qp_state;\r
938                 err = ib_copy_to_umv_buf(p_umv_buf, &resp, sizeof(struct ibv_modify_qp_resp));\r
939                 if (err) {\r
940                         HCA_PRINT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM  ,("ib_copy_to_umv_buf failed (%d)\n", err));\r
941                         status = errno_to_iberr(err);\r
942                         goto err_copy;\r
943                 }\r
944         }\r
945 \r
946         status = IB_SUCCESS;\r
947 \r
948 err_copy:       \r
949 err_query_qp:\r
950 err_modify_qp:  \r
951 err_mode_unsupported:\r
952 err_inval_params:\r
953         if (p_umv_buf && p_umv_buf->command) \r
954                 p_umv_buf->status = status;\r
955         HCA_PRINT_EXIT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM,\r
956                 ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
957         return status;\r
958 }\r
959 \r
960 ib_api_status_t\r
961 mlnx_query_qp (\r
962         IN              const   ib_qp_handle_t                          h_qp,\r
963                 OUT                     ib_qp_attr_t                            *p_qp_attr,\r
964         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
965 {\r
966         ib_api_status_t         status = IB_SUCCESS;\r
967         struct ib_qp *ib_qp_p = (struct ib_qp *)h_qp;\r
968         struct mthca_qp *qp_p = (struct mthca_qp *)ib_qp_p;\r
969         PREP_IBDEV_FOR_PRINT(ib_qp_p->device)\r
970 \r
971         UNREFERENCED_PARAMETER(p_umv_buf);\r
972         \r
973         HCA_ENTER( HCA_DBG_QP);\r
974         // sanity checks\r
975 \r
976         // clean the structure\r
977         RtlZeroMemory( p_qp_attr, sizeof *p_qp_attr );\r
978         \r
979         // fill the structure\r
980         //TODO: this function is to be implemented via ibv_query_qp, which is not supported now \r
981         p_qp_attr->h_pd                                         = (ib_pd_handle_t)qp_p->ibqp.pd;\r
982         p_qp_attr->qp_type                              = qp_p->ibqp.qp_type;\r
983         p_qp_attr->sq_max_inline                = qp_p->qp_init_attr.cap.max_inline_data;\r
984         p_qp_attr->sq_depth                             = qp_p->qp_init_attr.cap.max_send_wr;\r
985         p_qp_attr->rq_depth                             = qp_p->qp_init_attr.cap.max_recv_wr;\r
986         p_qp_attr->sq_sge                                       = qp_p->qp_init_attr.cap.max_send_sge;\r
987         p_qp_attr->rq_sge                                       = qp_p->qp_init_attr.cap.max_recv_sge;\r
988         p_qp_attr->resp_res                             = qp_p->resp_depth;\r
989         p_qp_attr->h_sq_cq                              = (ib_cq_handle_t)qp_p->ibqp.send_cq;\r
990         p_qp_attr->h_rq_cq                              = (ib_cq_handle_t)qp_p->ibqp.recv_cq;\r
991         p_qp_attr->sq_signaled                  = qp_p->sq_policy == IB_SIGNAL_ALL_WR;\r
992         p_qp_attr->state                                                = mlnx_qps_to_ibal( qp_p->state );\r
993         p_qp_attr->num                                          = cl_hton32(qp_p->ibqp.qp_num);\r
994 \r
995 #ifdef WIN_TO_BE_CHANGED\r
996 //TODO: don't know how to fill the following fields     without support of query_qp in MTHCA    \r
997         p_qp_attr->access_ctrl                  = qp_p->\r
998         p_qp_attr->pkey_index                   = qp_p->\r
999         p_qp_attr->dest_num                             = qp_p-\r
1000         p_qp_attr->init_depth                   = qp_p-\r
1001         p_qp_attr->qkey                                         = qp_p-\r
1002         p_qp_attr->sq_psn                                       = qp_p-\r
1003         p_qp_attr->rq_psn                                       = qp_p-\r
1004         p_qp_attr->primary_port         = qp_p-\r
1005         p_qp_attr->alternate_port               = qp_p-\r
1006         p_qp_attr->primary_av                   = qp_p-\r
1007         p_qp_attr->alternate_av                 = qp_p-\r
1008         p_qp_attr->apm_state                    = qp_p-\r
1009 #endif          \r
1010 \r
1011         status = IB_SUCCESS;\r
1012 \r
1013         HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_QP,\r
1014                 ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
1015         return status;\r
1016 }\r
1017 \r
1018 ib_api_status_t\r
1019 mlnx_destroy_qp (\r
1020         IN              const   ib_qp_handle_t                          h_qp,\r
1021         IN              const   uint64_t                                        timewait )\r
1022 {\r
1023         ib_api_status_t         status;\r
1024         int err;\r
1025         struct ib_qp *ib_qp_p = (struct ib_qp *)h_qp;\r
1026         PREP_IBDEV_FOR_PRINT(ib_qp_p->device)\r
1027 \r
1028         UNUSED_PARAM( timewait );\r
1029 \r
1030         HCA_ENTER( HCA_DBG_QP);\r
1031 \r
1032         HCA_PRINT(TRACE_LEVEL_INFORMATION       ,HCA_DBG_SHIM  ,\r
1033                 ("qpnum %#x, pcs %p\n", ib_qp_p->qp_num, PsGetCurrentProcess()) );\r
1034 \r
1035         err = ibv_destroy_qp( ib_qp_p );\r
1036         if (err) {\r
1037                 HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP,\r
1038                         ("ibv_destroy_qp failed (%d)\n", err));\r
1039                 status = errno_to_iberr(err);\r
1040                 goto err_destroy_qp;\r
1041         }\r
1042 \r
1043         status = IB_SUCCESS;\r
1044 \r
1045 err_destroy_qp:\r
1046         HCA_PRINT_EXIT(TRACE_LEVEL_ERROR ,HCA_DBG_QP,\r
1047                 ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
1048         return status;\r
1049 }\r
1050 \r
1051 /*\r
1052 * Completion Queue Managment Verbs.\r
1053 */\r
1054 \r
1055 ib_api_status_t\r
1056 mlnx_create_cq (\r
1057         IN              const   ib_ca_handle_t                          h_ca,\r
1058         IN              const   void                                            *cq_context,\r
1059         IN      OUT                     uint32_t                                        *p_size,\r
1060                 OUT                     ib_cq_handle_t                          *ph_cq,\r
1061         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
1062 {\r
1063         int err;\r
1064         ib_api_status_t         status;\r
1065         struct ib_cq *ib_cq_p;\r
1066         struct mthca_cq *cq_p;\r
1067         mlnx_hob_t                      *hob_p;\r
1068         struct ib_device *ib_dev;\r
1069         struct ib_ucontext *p_context;\r
1070         \r
1071         HCA_ENTER(HCA_DBG_CQ);\r
1072 \r
1073         if( p_umv_buf ) {\r
1074 \r
1075                 p_context = (struct ib_ucontext *)h_ca;\r
1076                 hob_p = HOB_FROM_IBDEV(p_context->device);\r
1077                 ib_dev = p_context->device;\r
1078 \r
1079                 // sanity checks \r
1080                 if (p_umv_buf->input_size < sizeof(struct ibv_create_cq) ||\r
1081                         p_umv_buf->output_size < sizeof(struct ibv_create_cq_resp) ||\r
1082                         !p_umv_buf->p_inout_buf) {\r
1083                         status = IB_INVALID_PARAMETER;\r
1084                         goto err_inval_params;\r
1085                 }\r
1086         }\r
1087         else {\r
1088                 hob_p = (mlnx_hob_t *)h_ca;\r
1089                 p_context = NULL;\r
1090                 ib_dev = IBDEV_FROM_HOB( hob_p );\r
1091         }\r
1092 \r
1093         // allocate cq  \r
1094         ib_cq_p = ibv_create_cq(ib_dev, \r
1095                 cq_comp_handler, cq_event_handler,\r
1096                 hob_p, *p_size, p_context, p_umv_buf );\r
1097         if (IS_ERR(ib_cq_p)) {\r
1098                 err = PTR_ERR(ib_cq_p);\r
1099                 HCA_PRINT (TRACE_LEVEL_ERROR ,HCA_DBG_CQ, ("ibv_create_cq failed (%d)\n", err));\r
1100                 status = errno_to_iberr(err);\r
1101                 goto err_create_cq;\r
1102         }\r
1103 \r
1104         // fill the object\r
1105         cq_p = (struct mthca_cq *)ib_cq_p;\r
1106         cq_p->cq_context = (void*)cq_context;\r
1107         \r
1108         // return the result\r
1109 //      *p_size = *p_size;      // return the same value\r
1110         *p_size = ib_cq_p->cqe;\r
1111 \r
1112         if (ph_cq) *ph_cq = (ib_cq_handle_t)cq_p;\r
1113 \r
1114         status = IB_SUCCESS;\r
1115         \r
1116 err_create_cq:\r
1117 err_inval_params:\r
1118         if (p_umv_buf && p_umv_buf->command) \r
1119                 p_umv_buf->status = status;\r
1120         HCA_PRINT_EXIT(TRACE_LEVEL_ERROR ,HCA_DBG_CQ,\r
1121                 ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
1122         return status;\r
1123 }\r
1124 \r
1125 ib_api_status_t\r
1126 mlnx_resize_cq (\r
1127         IN              const   ib_cq_handle_t                          h_cq,\r
1128         IN      OUT                     uint32_t                                        *p_size,\r
1129         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
1130 {\r
1131         UNREFERENCED_PARAMETER(h_cq);\r
1132         UNREFERENCED_PARAMETER(p_size);\r
1133         if (p_umv_buf && p_umv_buf->command) {\r
1134                 p_umv_buf->status = IB_UNSUPPORTED;\r
1135         }\r
1136         HCA_PRINT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM  ,("mlnx_resize_cq not implemented\n"));\r
1137         return IB_UNSUPPORTED;\r
1138 }\r
1139 \r
1140 ib_api_status_t\r
1141 mlnx_query_cq (\r
1142         IN              const   ib_cq_handle_t                          h_cq,\r
1143                 OUT                     uint32_t                                        *p_size,\r
1144         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
1145 {\r
1146         UNREFERENCED_PARAMETER(h_cq);\r
1147         UNREFERENCED_PARAMETER(p_size);\r
1148         if (p_umv_buf && p_umv_buf->command) {\r
1149                 p_umv_buf->status = IB_UNSUPPORTED;\r
1150         }\r
1151         HCA_PRINT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM  ,("mlnx_query_cq not implemented\n"));\r
1152         return IB_UNSUPPORTED;\r
1153 }\r
1154 \r
1155 ib_api_status_t\r
1156 mlnx_destroy_cq (\r
1157         IN              const   ib_cq_handle_t                          h_cq)\r
1158 {\r
1159                                                                                                                                                                 \r
1160         ib_api_status_t         status;\r
1161         int err;\r
1162         struct ib_cq *ib_cq_p = (struct ib_cq *)h_cq;\r
1163         PREP_IBDEV_FOR_PRINT(ib_cq_p->device)\r
1164 \r
1165         HCA_ENTER( HCA_DBG_QP);\r
1166 \r
1167         HCA_PRINT(TRACE_LEVEL_INFORMATION       ,HCA_DBG_SHIM  ,\r
1168                 ("cqn %#x, pcs %p\n", ((struct mthca_cq*)ib_cq_p)->cqn, PsGetCurrentProcess()) );\r
1169 \r
1170         // destroy CQ\r
1171         err = ibv_destroy_cq( ib_cq_p );\r
1172         if (err) {\r
1173                 HCA_PRINT (TRACE_LEVEL_ERROR ,HCA_DBG_SHIM,\r
1174                         ("ibv_destroy_cq failed (%d)\n", err));\r
1175                 status = errno_to_iberr(err);\r
1176                 goto err_destroy_cq;\r
1177         }\r
1178 \r
1179         status = IB_SUCCESS;\r
1180 \r
1181 err_destroy_cq:\r
1182         HCA_PRINT_EXIT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM,\r
1183                 ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
1184         return status;\r
1185 }\r
1186 \r
1187 \r
1188 void\r
1189 setup_ci_interface(\r
1190         IN              const   ib_net64_t                                      ca_guid,\r
1191         IN      OUT                     ci_interface_t                          *p_interface )\r
1192 {\r
1193         cl_memclr(p_interface, sizeof(*p_interface));\r
1194 \r
1195         /* Guid of the CA. */\r
1196         p_interface->guid = ca_guid;\r
1197 \r
1198         /* Version of this interface. */\r
1199         p_interface->version = VERBS_VERSION;\r
1200 \r
1201         /* UVP name */\r
1202         cl_memcpy( p_interface->libname, mlnx_uvp_lib_name, MAX_LIB_NAME);\r
1203 \r
1204         HCA_PRINT(TRACE_LEVEL_VERBOSE  , HCA_DBG_SHIM  ,("UVP filename %s\n", p_interface->libname));\r
1205 \r
1206         /* The real interface. */\r
1207         p_interface->open_ca = mlnx_open_ca;\r
1208         p_interface->query_ca = mlnx_query_ca;\r
1209         p_interface->modify_ca = mlnx_modify_ca; \r
1210         p_interface->close_ca = mlnx_close_ca;\r
1211         p_interface->um_open_ca = mlnx_um_open;\r
1212         p_interface->um_close_ca = mlnx_um_close;\r
1213 \r
1214         p_interface->allocate_pd = mlnx_allocate_pd;\r
1215         p_interface->deallocate_pd = mlnx_deallocate_pd;\r
1216 \r
1217         p_interface->create_av = mlnx_create_av;\r
1218         p_interface->query_av = mlnx_query_av;\r
1219         p_interface->modify_av = mlnx_modify_av;\r
1220         p_interface->destroy_av = mlnx_destroy_av;\r
1221 \r
1222         p_interface->create_qp = mlnx_create_qp;\r
1223         p_interface->create_spl_qp = mlnx_create_spl_qp;\r
1224         p_interface->modify_qp = mlnx_modify_qp;\r
1225         p_interface->query_qp = mlnx_query_qp;\r
1226         p_interface->destroy_qp = mlnx_destroy_qp;\r
1227 \r
1228         p_interface->create_cq = mlnx_create_cq;\r
1229         p_interface->resize_cq = mlnx_resize_cq;\r
1230         p_interface->query_cq = mlnx_query_cq;\r
1231         p_interface->destroy_cq = mlnx_destroy_cq;\r
1232 \r
1233         p_interface->local_mad = mlnx_local_mad;\r
1234         \r
1235         p_interface->vendor_call = fw_access_ctrl;\r
1236 \r
1237         mlnx_memory_if(p_interface);\r
1238         mlnx_direct_if(p_interface);\r
1239         mlnx_mcast_if(p_interface);\r
1240 \r
1241 \r
1242         return;\r
1243 }\r
1244 \r