5d6a5d962eaca39469140872321356f725c295fc
[mirror/winof/.git] / hw / mthca / kernel / hca_verbs.c
1 /*\r
2  * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.\r
3  * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. \r
4  *\r
5  * This software is available to you under the OpenIB.org BSD license\r
6  * below:\r
7  *\r
8  *     Redistribution and use in source and binary forms, with or\r
9  *     without modification, are permitted provided that the following\r
10  *     conditions are met:\r
11  *\r
12  *      - Redistributions of source code must retain the above\r
13  *        copyright notice, this list of conditions and the following\r
14  *        disclaimer.\r
15  *\r
16  *      - Redistributions in binary form must reproduce the above\r
17  *        copyright notice, this list of conditions and the following\r
18  *        disclaimer in the documentation and/or other materials\r
19  *        provided with the distribution.\r
20  *\r
21  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
22  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
23  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
24  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
25  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
26  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
27  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
28  * SOFTWARE.\r
29  *\r
30  * $Id: hca_verbs.c 148 2005-07-12 07:48:46Z sleybo $\r
31  */\r
32 \r
33 \r
34 #include "hca_driver.h"\r
35 #if defined(EVENT_TRACING)\r
36 #ifdef offsetof\r
37 #undef offsetof\r
38 #endif\r
39 #include "hca_verbs.tmh"\r
40 #endif\r
41 #include "mthca_dev.h"\r
42 #include "ib_cache.h"\r
43 #include "mx_abi.h"\r
44 \r
45 #define PTR_ALIGN(size) (((size) + sizeof(void*) - 1) & ~(sizeof(void*) - 1))\r
46 \r
47 \r
48 // Local declarations\r
49 ib_api_status_t\r
50 mlnx_query_qp (\r
51         IN              const   ib_qp_handle_t                          h_qp,\r
52                 OUT                     ib_qp_attr_t                            *p_qp_attr,\r
53         IN      OUT                     ci_umv_buf_t                            *p_umv_buf );\r
54 \r
55 /* \r
56 * CA Access Verbs\r
57 */\r
58 ib_api_status_t\r
59 mlnx_open_ca (\r
60         IN              const   ib_net64_t                                      ca_guid, // IN  const char *                ca_name,\r
61         IN              const   ci_completion_cb_t                      pfn_completion_cb,\r
62         IN              const   ci_async_event_cb_t                     pfn_async_event_cb,\r
63         IN              const   void*const                                      ca_context,\r
64                 OUT                     ib_ca_handle_t                          *ph_ca)\r
65 {\r
66         mlnx_hca_t                              *p_hca;\r
67         ib_api_status_t status = IB_NOT_FOUND;\r
68         mlnx_cache_t    *p_cache;\r
69         struct ib_device *ib_dev;\r
70 \r
71         HCA_ENTER(HCA_DBG_SHIM);\r
72         HCA_PRINT(TRACE_LEVEL_INFORMATION  ,HCA_DBG_SHIM,\r
73                 ("context 0x%p\n", ca_context));\r
74 \r
75         // find CA object\r
76         p_hca = mlnx_hca_from_guid( ca_guid );\r
77         if( !p_hca ) {\r
78                 HCA_PRINT_EXIT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM,\r
79                         ("completes with ERROR status IB_NOT_FOUND\n"));\r
80                 return IB_NOT_FOUND;\r
81         }\r
82 \r
83         ib_dev = &p_hca->mdev->ib_dev;\r
84 \r
85         if (mthca_is_livefish(p_hca->mdev)) \r
86                 goto done;\r
87 \r
88         HCA_PRINT(TRACE_LEVEL_INFORMATION  ,HCA_DBG_SHIM,\r
89                 ("context 0x%p\n", ca_context));\r
90         status = mlnx_hobs_set_cb(&p_hca->hob,\r
91                 pfn_completion_cb,\r
92                 pfn_async_event_cb,\r
93                 ca_context);\r
94         if (IB_SUCCESS != status) {\r
95                 goto err_set_cb;\r
96         }\r
97 \r
98         // MAD cache\r
99         p_cache = (mlnx_cache_t*)cl_pzalloc( sizeof(mlnx_cache_t) * 2 );\r
100         if( !p_cache ) {\r
101                 status = IB_INSUFFICIENT_MEMORY;\r
102                 goto err_mad_cache;\r
103         }\r
104         p_hca->hob.cache = p_cache;\r
105 \r
106         \r
107         //TODO: do we need something for kernel users ?\r
108 \r
109         // Return pointer to HOB object\r
110 done:   \r
111         if (ph_ca) *ph_ca = &p_hca->hob;\r
112         status =  IB_SUCCESS;\r
113 \r
114 err_mad_cache:\r
115 err_set_cb:\r
116         HCA_PRINT_EXIT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM,\r
117                 ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
118         return status;\r
119 }\r
120 \r
121 ib_api_status_t\r
122 mlnx_query_ca (\r
123         IN              const   ib_ca_handle_t                          h_ca,\r
124                 OUT                     ib_ca_attr_t                            *p_ca_attr,\r
125         IN      OUT                     uint32_t                                        *p_byte_count,\r
126         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
127 {\r
128         ib_api_status_t         status;\r
129         uint32_t                        size, required_size;\r
130         uint8_t                 port_num, num_ports;\r
131         uint32_t                        num_gids, num_pkeys;\r
132         uint32_t                        num_page_sizes = 1; // TBD: what is actually supported\r
133         uint8_t                         *last_p;\r
134         struct ib_device_attr props;\r
135         struct ib_port_attr  *hca_ports = NULL;\r
136         int i;\r
137         \r
138         mlnx_hob_t                      *hob_p = (mlnx_hob_t *)h_ca;\r
139         struct ib_device *ib_dev = IBDEV_FROM_HOB( hob_p );\r
140         int err;\r
141         \r
142         HCA_ENTER(HCA_DBG_SHIM);\r
143 \r
144         // sanity checks\r
145         if( p_umv_buf && p_umv_buf->command ) {\r
146                         HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_SHIM ,("User mode is not supported yet\n"));\r
147                         p_umv_buf->status = status = IB_UNSUPPORTED;\r
148                         goto err_user_unsupported;\r
149         }\r
150         if (NULL == p_byte_count) {\r
151                 status = IB_INVALID_PARAMETER;\r
152                 goto err_byte_count;\r
153         }\r
154 \r
155         // query the device\r
156         err = mthca_query_device(ib_dev, &props );\r
157         if (err) {\r
158                 HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_SHIM, \r
159                         ("ib_query_device failed (%d)\n",err));\r
160                 status = errno_to_iberr(err);\r
161                 goto err_query_device;\r
162         }\r
163         \r
164         // alocate arrary for port properties\r
165         num_ports = ib_dev->phys_port_cnt;   /* Number of physical ports of the HCA */             \r
166         if (NULL == (hca_ports = cl_zalloc( num_ports * sizeof *hca_ports))) {\r
167                 HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_SHIM, ("Failed to cl_zalloc ports array\n"));\r
168                 status = IB_INSUFFICIENT_MEMORY;\r
169                 goto err_alloc_ports;\r
170         }\r
171 \r
172         // start calculation of ib_ca_attr_t full size\r
173         num_gids = 0;\r
174         num_pkeys = 0;\r
175         required_size = PTR_ALIGN(sizeof(ib_ca_attr_t)) +\r
176                 PTR_ALIGN(sizeof(uint32_t) * num_page_sizes) +\r
177                 PTR_ALIGN(sizeof(ib_port_attr_t) * num_ports)+\r
178                 PTR_ALIGN(MTHCA_BOARD_ID_LEN)+\r
179                 sizeof(uplink_info_t);  /* uplink info */\r
180         \r
181         // get port properties\r
182         for (port_num = 0; port_num <= end_port(ib_dev) - start_port(ib_dev); ++port_num) {\r
183                 // request\r
184                 err = mthca_query_port(ib_dev, port_num + start_port(ib_dev), &hca_ports[port_num]);\r
185                 if (err) {\r
186                         HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_SHIM, ("ib_query_port failed(%d) for port %d\n",err, port_num));\r
187                         status = errno_to_iberr(err);\r
188                         goto err_query_port;\r
189                 }\r
190 \r
191                 // calculate GID table size\r
192                 num_gids  = hca_ports[port_num].gid_tbl_len;\r
193                 size = PTR_ALIGN(sizeof(ib_gid_t)  * num_gids);\r
194                 required_size += size;\r
195 \r
196                 // calculate pkeys table size\r
197                 num_pkeys = hca_ports[port_num].pkey_tbl_len;\r
198                 size = PTR_ALIGN(sizeof(uint16_t) * num_pkeys);\r
199                 required_size += size;\r
200         }\r
201 \r
202         // resource sufficience check\r
203         if (NULL == p_ca_attr || *p_byte_count < required_size) {\r
204                 *p_byte_count = required_size;\r
205                 status = IB_INSUFFICIENT_MEMORY;\r
206                 if ( p_ca_attr != NULL) {\r
207                         HCA_PRINT (TRACE_LEVEL_ERROR,HCA_DBG_SHIM, \r
208                                 ("Failed *p_byte_count (%d) < required_size (%d)\n", *p_byte_count, required_size ));\r
209                 }\r
210                 goto err_insuff_mem;\r
211         }\r
212 \r
213         // Space is sufficient - setup table pointers\r
214         last_p = (uint8_t*)p_ca_attr;\r
215         last_p += PTR_ALIGN(sizeof(*p_ca_attr));\r
216 \r
217         p_ca_attr->p_page_size = (uint32_t*)last_p;\r
218         last_p += PTR_ALIGN(num_page_sizes * sizeof(uint32_t));\r
219 \r
220         p_ca_attr->p_port_attr = (ib_port_attr_t *)last_p;\r
221         last_p += PTR_ALIGN(num_ports * sizeof(ib_port_attr_t));\r
222 \r
223         for (port_num = 0; port_num < num_ports; port_num++) {\r
224                 p_ca_attr->p_port_attr[port_num].p_gid_table = (ib_gid_t *)last_p;\r
225                 size = PTR_ALIGN(sizeof(ib_gid_t) * hca_ports[port_num].gid_tbl_len);\r
226                 last_p += size;\r
227 \r
228                 p_ca_attr->p_port_attr[port_num].p_pkey_table = (uint16_t *)last_p;\r
229                 size = PTR_ALIGN(sizeof(uint16_t) * hca_ports[port_num].pkey_tbl_len);\r
230                 last_p += size;\r
231         }\r
232         \r
233         //copy vendor specific data\r
234         cl_memcpy(last_p,to_mdev(ib_dev)->board_id, MTHCA_BOARD_ID_LEN);\r
235         last_p += PTR_ALIGN(MTHCA_BOARD_ID_LEN);\r
236         *(uplink_info_t*)last_p = to_mdev(ib_dev)->uplink_info;\r
237         last_p += sizeof(uplink_info_t);        /* uplink info */\r
238         \r
239         // Separate the loops to ensure that table pointers are always setup\r
240         for (port_num = 0; port_num < num_ports; port_num++) {\r
241 \r
242                 // get pkeys, using cache\r
243                 for (i=0; i < hca_ports[port_num].pkey_tbl_len; ++i) {\r
244                         err = ib_get_cached_pkey( ib_dev, port_num + start_port(ib_dev), i,\r
245                                 &p_ca_attr->p_port_attr[port_num].p_pkey_table[i] );\r
246                         if (err) {\r
247                                 status = errno_to_iberr(err);\r
248                                 HCA_PRINT (TRACE_LEVEL_ERROR,HCA_DBG_SHIM, \r
249                                         ("ib_get_cached_pkey failed (%d) for port_num %d, index %d\n",\r
250                                         err, port_num + start_port(ib_dev), i));\r
251                                 goto err_get_pkey;\r
252                         }\r
253                 }\r
254                 \r
255                 // get gids, using cache\r
256                 for (i=0; i < hca_ports[port_num].gid_tbl_len; ++i) {\r
257                         union ib_gid * __ptr64  gid = (union ib_gid     *)&p_ca_attr->p_port_attr[port_num].p_gid_table[i];\r
258                         err = ib_get_cached_gid( ib_dev, port_num + start_port(ib_dev), i, (union ib_gid *)gid );\r
259                         //TODO: do we need to convert gids to little endian\r
260                         if (err) {\r
261                                 status = errno_to_iberr(err);\r
262                                 HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_SHIM, \r
263                                         ("ib_get_cached_gid failed (%d) for port_num %d, index %d\n",\r
264                                         err, port_num + start_port(ib_dev), i));\r
265                                 goto err_get_gid;\r
266                         }\r
267                 }\r
268 \r
269                 HCA_PRINT(TRACE_LEVEL_VERBOSE, HCA_DBG_SHIM,("port %d gid0:\n", port_num));\r
270                 HCA_PRINT(TRACE_LEVEL_VERBOSE, HCA_DBG_SHIM,\r
271                         (" 0x%x%x%x%x%x%x%x%x-0x%x%x%x%x%x%x%x%x\n", \r
272                         p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[0],\r
273                         p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[1],\r
274                         p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[2],\r
275                         p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[3],\r
276                         p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[4],\r
277                         p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[5],\r
278                         p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[6],\r
279                         p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[7],\r
280                         p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[8],\r
281                         p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[9],\r
282                         p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[10],\r
283                         p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[11],\r
284                         p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[12],\r
285                         p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[13],\r
286                         p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[14],\r
287                         p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[15]));\r
288         }\r
289 \r
290         // set result size\r
291         p_ca_attr->size = required_size;\r
292         CL_ASSERT( required_size == (((uintn_t)last_p) - ((uintn_t)p_ca_attr)) );\r
293         HCA_PRINT(TRACE_LEVEL_VERBOSE, HCA_DBG_SHIM , ("Space required %d used %d\n",\r
294                 required_size, (int)((uintn_t)last_p - (uintn_t)p_ca_attr) ));\r
295         \r
296         // !!! GID/PKEY tables must be queried before this call !!!\r
297         mlnx_conv_hca_cap(ib_dev, &props, hca_ports, p_ca_attr);\r
298 \r
299         status = IB_SUCCESS;\r
300 \r
301 err_get_gid:\r
302 err_get_pkey:\r
303 err_insuff_mem:\r
304 err_query_port:\r
305         cl_free(hca_ports);\r
306 err_alloc_ports:\r
307 err_query_device:\r
308 err_byte_count: \r
309 err_user_unsupported:\r
310         if( status != IB_INSUFFICIENT_MEMORY && status != IB_SUCCESS )\r
311                 HCA_PRINT(TRACE_LEVEL_ERROR     , HCA_DBG_SHIM  ,("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
312         HCA_EXIT(HCA_DBG_SHIM);\r
313         return status;\r
314 }\r
315 \r
316 ib_api_status_t\r
317 mlnx_modify_ca (\r
318         IN              const   ib_ca_handle_t                          h_ca,\r
319         IN              const   uint8_t                                         port_num,\r
320         IN              const   ib_ca_mod_t                                     modca_cmd,\r
321         IN              const   ib_port_attr_mod_t                      *p_port_attr)\r
322 {\r
323 #define SET_CAP_MOD(al_mask, al_fld, ib)                \\r
324                 if (modca_cmd & al_mask) {      \\r
325                         if (p_port_attr->cap.##al_fld)          \\r
326                                 props.set_port_cap_mask |= ib;  \\r
327                         else            \\r
328                                 props.clr_port_cap_mask |= ib;  \\r
329                 }\r
330 \r
331         ib_api_status_t status;\r
332         int err;\r
333         struct ib_port_modify props;\r
334         int port_modify_mask = 0;\r
335         mlnx_hob_t                      *hob_p = (mlnx_hob_t *)h_ca;\r
336         struct ib_device *ib_dev = IBDEV_FROM_HOB( hob_p );\r
337 \r
338         HCA_ENTER(HCA_DBG_SHIM);\r
339 \r
340         // prepare parameters\r
341         RtlZeroMemory(&props, sizeof(props));\r
342         SET_CAP_MOD(IB_CA_MOD_IS_SM, sm, IB_PORT_SM);\r
343         SET_CAP_MOD(IB_CA_MOD_IS_SNMP_SUPPORTED, snmp, IB_PORT_SNMP_TUNNEL_SUP);\r
344         SET_CAP_MOD(IB_CA_MOD_IS_DEV_MGMT_SUPPORTED, dev_mgmt, IB_PORT_DEVICE_MGMT_SUP);\r
345         SET_CAP_MOD(IB_CA_MOD_IS_VEND_SUPPORTED, vend, IB_PORT_VENDOR_CLASS_SUP);\r
346         if ((modca_cmd & IB_CA_MOD_QKEY_CTR) && (p_port_attr->qkey_ctr == 0)) \r
347                 port_modify_mask |= IB_PORT_RESET_QKEY_CNTR;\r
348         \r
349         // modify port\r
350         err = mthca_modify_port(ib_dev, port_num, port_modify_mask, &props );\r
351         if (err) {\r
352                 status = errno_to_iberr(err);\r
353                 HCA_PRINT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM  ,("mthca_modify_port failed (%d) \n",err));\r
354                 goto err_modify_port;\r
355         }\r
356 \r
357         status =        IB_SUCCESS;\r
358 \r
359 err_modify_port:\r
360         HCA_PRINT_EXIT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM  ,("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
361         return status;\r
362 }\r
363 \r
364 ib_api_status_t\r
365 mlnx_close_ca (\r
366         IN                              ib_ca_handle_t                          h_ca)\r
367 {\r
368         mlnx_hob_t                      *hob_p = (mlnx_hob_t *)h_ca;\r
369         HCA_ENTER(HCA_DBG_SHIM);\r
370 \r
371         if (mthca_is_livefish(MDEV_FROM_HOB( hob_p ))) \r
372                 goto done;\r
373 \r
374         mlnx_hobs_remove(h_ca);\r
375 \r
376 done:\r
377         HCA_EXIT(HCA_DBG_SHIM);\r
378         \r
379         return IB_SUCCESS;\r
380 }\r
381 \r
382 \r
383 static ib_api_status_t\r
384 mlnx_um_open(\r
385         IN              const   ib_ca_handle_t                          h_ca,\r
386         IN      OUT                     ci_umv_buf_t* const                     p_umv_buf,\r
387                 OUT                     ib_ca_handle_t* const           ph_um_ca )\r
388 {\r
389         int err;\r
390         ib_api_status_t         status;\r
391         mlnx_hob_t                      *hob_p = (mlnx_hob_t *)h_ca;\r
392         hca_dev_ext_t *ext_p = EXT_FROM_HOB( hob_p );\r
393         struct ib_device *ib_dev = IBDEV_FROM_HOB( hob_p );\r
394         struct ib_ucontext *p_context;\r
395         struct mthca_alloc_ucontext_resp *uresp_p;\r
396         struct ibv_alloc_pd_resp resp;\r
397         ci_umv_buf_t umv_buf;\r
398 \r
399         HCA_ENTER(HCA_DBG_SHIM);\r
400 \r
401         // sanity check\r
402         ASSERT( p_umv_buf );\r
403         if( !p_umv_buf->command )\r
404         {\r
405                 p_context = cl_zalloc( sizeof(struct ib_ucontext) );\r
406                 if( !p_context )\r
407                 {\r
408                         status = IB_INSUFFICIENT_MEMORY;\r
409                         goto err_alloc_ucontext;\r
410                 }\r
411                 /* Copy the dev info. */\r
412                 p_context->device = ib_dev;\r
413                 p_umv_buf->output_size = 0;\r
414                 goto done;\r
415         }\r
416 \r
417         // create user context in kernel\r
418         p_context = mthca_alloc_ucontext(ib_dev, p_umv_buf);\r
419         if (IS_ERR(p_context)) {\r
420                 err = PTR_ERR(p_context);\r
421                 HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_SHIM,\r
422                         ("mthca_alloc_ucontext failed (%d)\n", err));\r
423                 status = errno_to_iberr(err);\r
424                 goto err_alloc_ucontext;\r
425         }\r
426 \r
427         /* allocate pd */\r
428         umv_buf.command = 1;\r
429         umv_buf.input_size = umv_buf.status = 0;\r
430         umv_buf.output_size = sizeof(struct ibv_alloc_pd_resp);\r
431         umv_buf.p_inout_buf = &resp;\r
432         //NB: Pay attention ! Ucontext parameter is important here:\r
433         // when it is present (i.e. - for user space) - mthca_alloc_pd won't create MR\r
434         p_context->pd = ibv_alloc_pd(ib_dev, p_context, &umv_buf);\r
435         if (IS_ERR(p_context->pd)) {\r
436                 err = PTR_ERR(p_context->pd);\r
437                 HCA_PRINT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM,\r
438                         ("ibv_alloc_pd failed (%d)\n", err));\r
439                 status = errno_to_iberr(err);\r
440                 goto err_alloc_pd;\r
441         }\r
442         \r
443         // fill more parameters for user (sanity checks are in mthca_alloc_ucontext)\r
444         uresp_p = (struct mthca_alloc_ucontext_resp *)(void*)p_umv_buf->p_inout_buf;\r
445         uresp_p->uar_addr = (uint64_t)(UINT_PTR)p_context->user_uar;\r
446         uresp_p->pd_handle = resp.pd_handle;\r
447         uresp_p->pdn = resp.pdn;\r
448         uresp_p->vend_id = (uint32_t)ext_p->hcaConfig.VendorID;\r
449         uresp_p->dev_id = (uint16_t)ext_p->hcaConfig.DeviceID;\r
450 \r
451 done:\r
452         // some more inits\r
453         p_context->va = p_context->p_mdl = NULL;\r
454         p_context->fw_if_open = FALSE;\r
455         KeInitializeMutex( &p_context->mutex, 0 );\r
456         // chain user context to the device\r
457         cl_spinlock_acquire( &ext_p->uctx_lock );\r
458         cl_qlist_insert_tail( &ext_p->uctx_list, &p_context->list_item );\r
459         cl_atomic_inc(&ext_p->usecnt);\r
460         cl_spinlock_release( &ext_p->uctx_lock );\r
461         \r
462         // return the result\r
463         if (ph_um_ca) *ph_um_ca = (ib_ca_handle_t)p_context;\r
464 \r
465         status = IB_SUCCESS;\r
466         goto end;\r
467         \r
468 err_alloc_pd:\r
469         mthca_dealloc_ucontext(p_context);\r
470 err_alloc_ucontext: \r
471 end:\r
472         if (p_umv_buf && p_umv_buf->command) \r
473                 p_umv_buf->status = status;\r
474         HCA_PRINT_EXIT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM,\r
475                 ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
476         return status;\r
477 }\r
478 \r
479 static void\r
480 mlnx_um_close(\r
481         IN                              ib_ca_handle_t                          h_ca,\r
482         IN                              ib_ca_handle_t                          h_um_ca )\r
483 {\r
484         struct ib_ucontext *p_ucontext = (struct ib_ucontext *)h_um_ca;\r
485         mlnx_hob_t                      *hob_p = (mlnx_hob_t *)h_ca;\r
486         hca_dev_ext_t *ext_p = EXT_FROM_HOB( hob_p );\r
487 \r
488         if (mthca_is_livefish(to_mdev(p_ucontext->device)))\r
489                 goto done;\r
490         unmap_crspace_for_all(p_ucontext);\r
491 done:   \r
492         cl_spinlock_acquire( &ext_p->uctx_lock );\r
493         cl_qlist_remove_item( &ext_p->uctx_list, &p_ucontext->list_item );\r
494         cl_atomic_dec(&ext_p->usecnt);\r
495         cl_spinlock_release( &ext_p->uctx_lock );\r
496         if( !p_ucontext->pd )\r
497                 cl_free( h_um_ca );\r
498         else\r
499                 ibv_um_close(p_ucontext);\r
500         return;\r
501 }\r
502 \r
503 \r
504 /*\r
505 *    Protection Domain and Reliable Datagram Domain Verbs\r
506 */\r
507 \r
508 ib_api_status_t\r
509 mlnx_allocate_pd (\r
510         IN              const   ib_ca_handle_t                          h_ca,\r
511         IN              const   ib_pd_type_t                            type,\r
512                 OUT                     ib_pd_handle_t                          *ph_pd,\r
513         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
514 {\r
515         ib_api_status_t         status;\r
516         struct ib_device *ib_dev;\r
517         struct ib_ucontext *p_context;\r
518         struct ib_pd *ib_pd_p;\r
519         int err;\r
520 \r
521         //TODO: how are we use it ?\r
522         UNREFERENCED_PARAMETER(type);\r
523         \r
524         HCA_ENTER(HCA_DBG_SHIM);\r
525 \r
526         if( p_umv_buf ) {\r
527                 p_context = (struct ib_ucontext *)h_ca;\r
528                 ib_dev = p_context->device;\r
529         }\r
530         else {\r
531                 mlnx_hob_t                      *hob_p = (mlnx_hob_t *)h_ca;\r
532                 p_context = NULL;\r
533                 ib_dev = IBDEV_FROM_HOB( hob_p );\r
534         }\r
535         \r
536         // create PD\r
537         ib_pd_p = ibv_alloc_pd(ib_dev, p_context, p_umv_buf);\r
538         if (IS_ERR(ib_pd_p)) {\r
539                 err = PTR_ERR(ib_pd_p);\r
540                 HCA_PRINT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM,\r
541                         ("ibv_alloc_pd failed (%d)\n", err));\r
542                 status = errno_to_iberr(err);\r
543                 goto err_alloc_pd;\r
544         }\r
545 \r
546         // return the result\r
547         if (ph_pd) *ph_pd = (ib_pd_handle_t)ib_pd_p;\r
548 \r
549         status = IB_SUCCESS;\r
550         \r
551 err_alloc_pd:   \r
552         if (p_umv_buf && p_umv_buf->command) \r
553                 p_umv_buf->status = status;\r
554         HCA_PRINT_EXIT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM,\r
555                 ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
556         return status;\r
557 }\r
558 \r
559 ib_api_status_t\r
560 mlnx_deallocate_pd (\r
561         IN                              ib_pd_handle_t                          h_pd)\r
562 {\r
563         ib_api_status_t         status;\r
564         int err;\r
565         struct ib_pd *ib_pd_p = (struct ib_pd *)h_pd;\r
566         PREP_IBDEV_FOR_PRINT(ib_pd_p->device)\r
567 \r
568         HCA_ENTER( HCA_DBG_QP);\r
569 \r
570         HCA_PRINT(TRACE_LEVEL_INFORMATION       ,HCA_DBG_SHIM  ,\r
571                 ("pcs %p\n", PsGetCurrentProcess()));\r
572         \r
573         // dealloc pd\r
574         err = ibv_dealloc_pd( ib_pd_p );\r
575         if (err) {\r
576                 HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_SHIM\r
577                         ,("ibv_dealloc_pd failed (%d)\n", err));\r
578                 status = errno_to_iberr(err);\r
579                 goto err_dealloc_pd;\r
580         }\r
581         status = IB_SUCCESS;\r
582 \r
583 err_dealloc_pd:\r
584         HCA_PRINT_EXIT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM\r
585                 ,("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
586         return status;\r
587 }\r
588 \r
589 /* \r
590 * Address Vector Management Verbs\r
591 */\r
592 ib_api_status_t\r
593 mlnx_create_av (\r
594         IN              const   ib_pd_handle_t                          h_pd,\r
595         IN              const   ib_av_attr_t                            *p_addr_vector,\r
596                 OUT                     ib_av_handle_t                          *ph_av,\r
597         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
598 {\r
599         int err = 0;\r
600         ib_api_status_t         status = IB_SUCCESS;\r
601         struct ib_pd *ib_pd_p = (struct ib_pd *)h_pd;\r
602         struct ib_device *ib_dev = ib_pd_p->device;\r
603         struct ib_ah *ib_av_p;\r
604         struct ib_ah_attr ah_attr;\r
605         struct ib_ucontext *p_context = NULL;\r
606 \r
607         HCA_ENTER(HCA_DBG_AV);\r
608 \r
609         if( p_umv_buf && p_umv_buf->command ) {\r
610                 // sanity checks \r
611                 if (p_umv_buf->input_size < sizeof(struct ibv_create_ah) ||\r
612                         p_umv_buf->output_size < sizeof(struct ibv_create_ah_resp) ||\r
613                         !p_umv_buf->p_inout_buf) {\r
614                         status = IB_INVALID_PARAMETER;\r
615                         goto err_inval_params;\r
616                 }\r
617                 p_context = ib_pd_p->ucontext;\r
618         }\r
619         else \r
620                 p_context = NULL;\r
621 \r
622         // fill parameters \r
623         RtlZeroMemory(&ah_attr, sizeof(ah_attr));\r
624         mlnx_conv_ibal_av( ib_dev, p_addr_vector,  &ah_attr );\r
625 \r
626         ib_av_p = ibv_create_ah(ib_pd_p, &ah_attr, p_context, p_umv_buf);\r
627         if (IS_ERR(ib_av_p)) {\r
628                 err = PTR_ERR(ib_av_p);\r
629                 HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_AV,\r
630                         ("ibv_create_ah failed (%d)\n", err));\r
631                 status = errno_to_iberr(err);\r
632                 goto err_alloc_av;\r
633         }\r
634 \r
635         // return the result\r
636         if (ph_av) *ph_av = (ib_av_handle_t)ib_av_p;\r
637 \r
638         status = IB_SUCCESS;\r
639 \r
640 err_alloc_av:   \r
641 err_inval_params:\r
642         if (p_umv_buf && p_umv_buf->command) \r
643                 p_umv_buf->status = status;\r
644         HCA_PRINT_EXIT(TRACE_LEVEL_ERROR ,HCA_DBG_AV,\r
645                 ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
646         return status;\r
647 }\r
648 \r
649 ib_api_status_t\r
650 mlnx_query_av (\r
651         IN              const   ib_av_handle_t                          h_av,\r
652                 OUT                     ib_av_attr_t                            *p_addr_vector,\r
653                 OUT                     ib_pd_handle_t                          *ph_pd,\r
654         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
655 {\r
656         int err;\r
657         ib_api_status_t         status = IB_SUCCESS;\r
658         struct ib_ah *ib_ah_p = (struct ib_ah *)h_av;\r
659         PREP_IBDEV_FOR_PRINT(ib_ah_p->device)\r
660 \r
661         HCA_ENTER(HCA_DBG_AV);\r
662 \r
663         // sanity checks\r
664         if( p_umv_buf && p_umv_buf->command ) {\r
665                         HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_AV,\r
666                                 ("User mode is not supported yet\n"));\r
667                         status = IB_UNSUPPORTED;\r
668                         goto err_user_unsupported;\r
669         }\r
670 \r
671         // query AV\r
672 #ifdef WIN_TO_BE_CHANGED\r
673         //TODO: not implemented in low-level driver\r
674         err = ibv_query_ah(ib_ah_p, &ah_attr)\r
675         if (err) {\r
676                 HCA_PRINT(TRACE_LEVEL_ERROR, HCA_DBG_AV,\r
677                         ("ibv_query_ah failed (%d)\n", err));\r
678                 status = errno_to_iberr(err);\r
679                 goto err_query_ah;\r
680         }\r
681         // convert to IBAL structure: something like that\r
682         mlnx_conv_mthca_av( p_addr_vector,  &ah_attr );\r
683 #else\r
684 \r
685         err = mlnx_conv_mthca_av( ib_ah_p, p_addr_vector );\r
686         if (err) {\r
687                 HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_AV,\r
688                         ("mlnx_conv_mthca_av failed (%d)\n", err));\r
689                 status = errno_to_iberr(err);\r
690                 goto err_conv_mthca_av;\r
691         }\r
692 #endif\r
693 \r
694         // results\r
695         *ph_pd = (ib_pd_handle_t)ib_ah_p->pd;\r
696         \r
697 err_conv_mthca_av:\r
698 err_user_unsupported:\r
699         HCA_PRINT_EXIT(TRACE_LEVEL_ERROR  , HCA_DBG_AV,\r
700                 ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
701         return status;\r
702 }\r
703 \r
704 ib_api_status_t\r
705 mlnx_modify_av (\r
706         IN              const   ib_av_handle_t                          h_av,\r
707         IN              const   ib_av_attr_t                            *p_addr_vector,\r
708         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
709 {\r
710         struct ib_ah_attr ah_attr;\r
711         ib_api_status_t         status = IB_SUCCESS;\r
712         struct ib_ah *ib_ah_p = (struct ib_ah *)h_av;\r
713         struct ib_device *ib_dev = ib_ah_p->pd->device;\r
714 \r
715         HCA_ENTER(HCA_DBG_AV);\r
716 \r
717         // sanity checks\r
718         if( p_umv_buf && p_umv_buf->command ) {\r
719                         HCA_PRINT(TRACE_LEVEL_ERROR   ,HCA_DBG_AV,\r
720                                 ("User mode is not supported yet\n"));\r
721                         status = IB_UNSUPPORTED;\r
722                         goto err_user_unsupported;\r
723         }\r
724 \r
725         // fill parameters \r
726         RtlZeroMemory(&ah_attr, sizeof(ah_attr));\r
727         mlnx_conv_ibal_av( ib_dev, p_addr_vector,  &ah_attr );\r
728 \r
729         // modify AH\r
730 #ifdef WIN_TO_BE_CHANGED\r
731         //TODO: not implemented in low-level driver\r
732         err = ibv_modify_ah(ib_ah_p, &ah_attr)\r
733         if (err) {\r
734                 HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_AV,\r
735                         ("ibv_query_ah failed (%d)\n", err));\r
736                 status = errno_to_iberr(err);\r
737                 goto err_query_ah;\r
738         }\r
739 #else\r
740 \r
741         mlnx_modify_ah( ib_ah_p, &ah_attr );\r
742 #endif\r
743 \r
744 err_user_unsupported:\r
745         HCA_PRINT_EXIT(TRACE_LEVEL_ERROR  , HCA_DBG_AV,\r
746                 ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
747         return status;\r
748 }\r
749 \r
750 ib_api_status_t\r
751 mlnx_destroy_av (\r
752         IN              const   ib_av_handle_t                          h_av)\r
753 {\r
754         int err;\r
755         ib_api_status_t         status = IB_SUCCESS;\r
756         struct ib_ah *ib_ah_p = (struct ib_ah *)h_av;\r
757         PREP_IBDEV_FOR_PRINT(ib_ah_p->device)\r
758 \r
759         HCA_ENTER(HCA_DBG_AV);\r
760 \r
761         // destroy AV\r
762         err = ibv_destroy_ah( ib_ah_p );\r
763         if (err) {\r
764                 HCA_PRINT (TRACE_LEVEL_ERROR ,HCA_DBG_AV,\r
765                         ("ibv_destroy_ah failed (%d)\n", err));\r
766                 status = errno_to_iberr(err);\r
767                 goto err_destroy_ah;\r
768         }\r
769 \r
770 err_destroy_ah:\r
771         HCA_PRINT_EXIT(TRACE_LEVEL_ERROR  , HCA_DBG_AV,\r
772                 ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
773         return status;\r
774 }\r
775 \r
776 /*\r
777 *       Queue Pair Management Verbs\r
778 */\r
779 \r
780 \r
781 static ib_api_status_t\r
782 _create_qp (\r
783         IN              const   ib_pd_handle_t                          h_pd,\r
784         IN              const   uint8_t                                         port_num,\r
785         IN              const   void                                            *qp_context,\r
786         IN              const   ib_qp_create_t                          *p_create_attr,\r
787                 OUT                     ib_qp_attr_t                            *p_qp_attr,\r
788                 OUT                     ib_qp_handle_t                          *ph_qp,\r
789         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
790 {\r
791         int err;\r
792         ib_api_status_t         status;\r
793         struct ib_qp * ib_qp_p;\r
794         struct mthca_qp *qp_p;\r
795         struct ib_qp_init_attr qp_init_attr;\r
796         struct ib_ucontext *p_context = NULL;\r
797         struct ib_pd *ib_pd_p = (struct ib_pd *)h_pd;\r
798         struct ib_device *ib_dev = ib_pd_p->device;\r
799         mlnx_hob_t       *hob_p = HOB_FROM_IBDEV(ib_dev);\r
800         \r
801         HCA_ENTER(HCA_DBG_QP);\r
802 \r
803         if( p_umv_buf && p_umv_buf->command ) {\r
804                 // sanity checks \r
805                 if (p_umv_buf->input_size < sizeof(struct ibv_create_qp) ||\r
806                         p_umv_buf->output_size < sizeof(struct ibv_create_qp_resp) ||\r
807                         !p_umv_buf->p_inout_buf) {\r
808                         status = IB_INVALID_PARAMETER;\r
809                         goto err_inval_params;\r
810                 }\r
811                 p_context = ib_pd_p->ucontext;\r
812         }\r
813         else \r
814                 p_context = NULL;\r
815 \r
816         // prepare the parameters\r
817         RtlZeroMemory(&qp_init_attr, sizeof(qp_init_attr));\r
818         qp_init_attr.qp_type = p_create_attr->qp_type;\r
819         qp_init_attr.event_handler = qp_event_handler;\r
820         qp_init_attr.qp_context = hob_p;\r
821         qp_init_attr.recv_cq = (struct ib_cq *)p_create_attr->h_rq_cq;\r
822         qp_init_attr.send_cq = (struct ib_cq *)p_create_attr->h_sq_cq;\r
823         qp_init_attr.cap.max_recv_sge = p_create_attr->rq_sge;\r
824         qp_init_attr.cap.max_send_sge = p_create_attr->sq_sge;\r
825         qp_init_attr.cap.max_recv_wr = p_create_attr->rq_depth;\r
826         qp_init_attr.cap.max_send_wr = p_create_attr->sq_depth;\r
827         qp_init_attr.sq_sig_type = (p_create_attr->sq_signaled) ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;\r
828         qp_init_attr.port_num = port_num;\r
829 \r
830 \r
831         // create qp            \r
832         ib_qp_p = ibv_create_qp( ib_pd_p, &qp_init_attr, p_context, p_umv_buf );\r
833         if (IS_ERR(ib_qp_p)) {\r
834                 err = PTR_ERR(ib_qp_p);\r
835                 HCA_PRINT(TRACE_LEVEL_ERROR  , HCA_DBG_QP,\r
836                         ("ibv_create_qp failed (%d)\n", err));\r
837                 status = errno_to_iberr(err);\r
838                 goto err_create_qp;\r
839         }\r
840 \r
841         // fill the object\r
842         qp_p = (struct mthca_qp *)ib_qp_p;\r
843         qp_p->qp_context = (void*)qp_context;\r
844         qp_p->qp_init_attr = qp_init_attr;\r
845 \r
846         // Query QP to obtain requested attributes\r
847         if (p_qp_attr) {\r
848                 status = mlnx_query_qp ((ib_qp_handle_t)ib_qp_p, p_qp_attr, p_umv_buf);\r
849                 if (status != IB_SUCCESS)\r
850                                 goto err_query_qp;\r
851         }\r
852         \r
853         // return the results\r
854         if (ph_qp) *ph_qp = (ib_qp_handle_t)ib_qp_p;\r
855 \r
856         status = IB_SUCCESS;\r
857         goto end;\r
858 \r
859 err_query_qp:\r
860         ibv_destroy_qp( ib_qp_p );\r
861 err_create_qp:\r
862 err_inval_params:\r
863 end:\r
864         if (p_umv_buf && p_umv_buf->command) \r
865                 p_umv_buf->status = status;\r
866         HCA_PRINT_EXIT(TRACE_LEVEL_ERROR ,HCA_DBG_QP ,\r
867                 ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
868         return status;\r
869 }\r
870 \r
871 ib_api_status_t\r
872 mlnx_create_spl_qp (\r
873         IN              const   ib_pd_handle_t                          h_pd,\r
874         IN              const   uint8_t                                         port_num,\r
875         IN              const   void                                            *qp_context,\r
876         IN              const   ib_qp_create_t                          *p_create_attr,\r
877                 OUT                     ib_qp_attr_t                            *p_qp_attr,\r
878                 OUT                     ib_qp_handle_t                          *ph_qp )\r
879 {\r
880         ib_api_status_t         status;\r
881         PREP_IBDEV_FOR_PRINT(((struct ib_pd*)h_pd)->device)\r
882 \r
883         HCA_ENTER(HCA_DBG_SHIM);\r
884 \r
885         status =        _create_qp( h_pd, port_num,\r
886                 qp_context, p_create_attr, p_qp_attr, ph_qp, NULL );\r
887                 \r
888         HCA_PRINT_EXIT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM,\r
889                 ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
890         return status;\r
891 }\r
892 \r
893 ib_api_status_t\r
894 mlnx_create_qp (\r
895         IN              const   ib_pd_handle_t                          h_pd,\r
896         IN              const   void                                            *qp_context,\r
897         IN              const   ib_qp_create_t                          *p_create_attr,\r
898                 OUT                     ib_qp_attr_t                            *p_qp_attr,\r
899                 OUT                     ib_qp_handle_t                          *ph_qp,\r
900         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
901 {\r
902         ib_api_status_t         status;\r
903         PREP_IBDEV_FOR_PRINT(((struct ib_pd*)h_pd)->device)\r
904 \r
905         //NB: algorithm of mthca_alloc_sqp() requires port_num\r
906         // PRM states, that special pares are created in couples, so\r
907         // looks like we can put here port_num = 1 always\r
908         uint8_t port_num = 1;\r
909 \r
910         HCA_ENTER(HCA_DBG_QP);\r
911 \r
912         status = _create_qp( h_pd, port_num,\r
913                 qp_context, p_create_attr, p_qp_attr, ph_qp, p_umv_buf );\r
914                 \r
915         HCA_PRINT_EXIT(TRACE_LEVEL_ERROR ,HCA_DBG_QP,\r
916                 ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
917         return status;\r
918 }\r
919 \r
920 ib_api_status_t\r
921 mlnx_modify_qp (\r
922         IN              const   ib_qp_handle_t                          h_qp,\r
923         IN              const   ib_qp_mod_t                                     *p_modify_attr,\r
924                 OUT                     ib_qp_attr_t                            *p_qp_attr OPTIONAL,\r
925         IN      OUT                     ci_umv_buf_t                            *p_umv_buf OPTIONAL )\r
926 {\r
927         ib_api_status_t         status;\r
928         int err;\r
929         struct ib_qp_attr qp_attr;\r
930         int qp_attr_mask;\r
931         struct ib_qp *ib_qp_p = (struct ib_qp *)h_qp;\r
932         PREP_IBDEV_FOR_PRINT(ib_qp_p->device)\r
933 \r
934         HCA_ENTER(HCA_DBG_QP);\r
935 \r
936         // sanity checks\r
937         if( p_umv_buf && p_umv_buf->command ) {\r
938                 // sanity checks \r
939                 if (p_umv_buf->output_size < sizeof(struct ibv_modify_qp_resp) ||\r
940                         !p_umv_buf->p_inout_buf) {\r
941                         status = IB_INVALID_PARAMETER;\r
942                         goto err_inval_params;\r
943                 }\r
944         }\r
945         \r
946         // fill parameters \r
947         status = mlnx_conv_qp_modify_attr( ib_qp_p, ib_qp_p->qp_type, \r
948                 p_modify_attr,  &qp_attr, &qp_attr_mask );\r
949         if (status == IB_NOT_DONE)\r
950                 goto query_qp;\r
951         if (status != IB_SUCCESS ) \r
952                 goto err_mode_unsupported;\r
953 \r
954         // modify QP\r
955         err = ibv_modify_qp(ib_qp_p, &qp_attr, qp_attr_mask);\r
956         if (err) {\r
957                 HCA_PRINT(TRACE_LEVEL_ERROR  , HCA_DBG_QP ,("ibv_modify_qp failed (%d)\n", err));\r
958                 status = errno_to_iberr(err);\r
959                 goto err_modify_qp;\r
960         }\r
961 \r
962         // Query QP to obtain requested attributes\r
963 query_qp:       \r
964         if (p_qp_attr) {\r
965                 status = mlnx_query_qp ((ib_qp_handle_t)ib_qp_p, p_qp_attr, p_umv_buf);\r
966                 if (status != IB_SUCCESS)\r
967                                 goto err_query_qp;\r
968         }\r
969         \r
970         if( p_umv_buf && p_umv_buf->command )\r
971         {\r
972                 struct ibv_modify_qp_resp resp;\r
973                 resp.attr_mask = qp_attr_mask;\r
974                 resp.qp_state = qp_attr.qp_state;\r
975                 err = ib_copy_to_umv_buf(p_umv_buf, &resp, sizeof(struct ibv_modify_qp_resp));\r
976                 if (err) {\r
977                         HCA_PRINT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM  ,("ib_copy_to_umv_buf failed (%d)\n", err));\r
978                         status = errno_to_iberr(err);\r
979                         goto err_copy;\r
980                 }\r
981         }\r
982 \r
983         status = IB_SUCCESS;\r
984 \r
985 err_copy:       \r
986 err_query_qp:\r
987 err_modify_qp:  \r
988 err_mode_unsupported:\r
989 err_inval_params:\r
990         if (p_umv_buf && p_umv_buf->command) \r
991                 p_umv_buf->status = status;\r
992         HCA_PRINT_EXIT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM,\r
993                 ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
994         return status;\r
995 }\r
996 \r
997 ib_api_status_t\r
998 mlnx_query_qp (\r
999         IN              const   ib_qp_handle_t                          h_qp,\r
1000                 OUT                     ib_qp_attr_t                            *p_qp_attr,\r
1001         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
1002 {\r
1003         ib_api_status_t         status = IB_SUCCESS;\r
1004         struct ib_qp *ib_qp_p = (struct ib_qp *)h_qp;\r
1005         struct mthca_qp *qp_p = (struct mthca_qp *)ib_qp_p;\r
1006         PREP_IBDEV_FOR_PRINT(ib_qp_p->device)\r
1007 \r
1008         UNREFERENCED_PARAMETER(p_umv_buf);\r
1009         \r
1010         HCA_ENTER( HCA_DBG_QP);\r
1011         // sanity checks\r
1012 \r
1013         // clean the structure\r
1014         RtlZeroMemory( p_qp_attr, sizeof *p_qp_attr );\r
1015         \r
1016         // fill the structure\r
1017         //TODO: this function is to be implemented via ibv_query_qp, which is not supported now \r
1018         p_qp_attr->h_pd                                         = (ib_pd_handle_t)qp_p->ibqp.pd;\r
1019         p_qp_attr->qp_type                              = qp_p->ibqp.qp_type;\r
1020         p_qp_attr->sq_max_inline                = qp_p->qp_init_attr.cap.max_inline_data;\r
1021         p_qp_attr->sq_depth                             = qp_p->qp_init_attr.cap.max_send_wr;\r
1022         p_qp_attr->rq_depth                             = qp_p->qp_init_attr.cap.max_recv_wr;\r
1023         p_qp_attr->sq_sge                                       = qp_p->qp_init_attr.cap.max_send_sge;\r
1024         p_qp_attr->rq_sge                                       = qp_p->qp_init_attr.cap.max_recv_sge;\r
1025         p_qp_attr->resp_res                             = qp_p->resp_depth;\r
1026         p_qp_attr->h_sq_cq                              = (ib_cq_handle_t)qp_p->ibqp.send_cq;\r
1027         p_qp_attr->h_rq_cq                              = (ib_cq_handle_t)qp_p->ibqp.recv_cq;\r
1028         p_qp_attr->sq_signaled                  = qp_p->sq_policy == IB_SIGNAL_ALL_WR;\r
1029         p_qp_attr->state                                                = mlnx_qps_to_ibal( qp_p->state );\r
1030         p_qp_attr->num                                          = cl_hton32(qp_p->ibqp.qp_num);\r
1031 \r
1032 #ifdef WIN_TO_BE_CHANGED\r
1033 //TODO: don't know how to fill the following fields     without support of query_qp in MTHCA    \r
1034         p_qp_attr->access_ctrl                  = qp_p->\r
1035         p_qp_attr->pkey_index                   = qp_p->\r
1036         p_qp_attr->dest_num                             = qp_p-\r
1037         p_qp_attr->init_depth                   = qp_p-\r
1038         p_qp_attr->qkey                                         = qp_p-\r
1039         p_qp_attr->sq_psn                                       = qp_p-\r
1040         p_qp_attr->rq_psn                                       = qp_p-\r
1041         p_qp_attr->primary_port         = qp_p-\r
1042         p_qp_attr->alternate_port               = qp_p-\r
1043         p_qp_attr->primary_av                   = qp_p-\r
1044         p_qp_attr->alternate_av                 = qp_p-\r
1045         p_qp_attr->apm_state                    = qp_p-\r
1046 #endif          \r
1047 \r
1048         status = IB_SUCCESS;\r
1049 \r
1050         HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_QP,\r
1051                 ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
1052         return status;\r
1053 }\r
1054 \r
1055 ib_api_status_t\r
1056 mlnx_destroy_qp (\r
1057         IN              const   ib_qp_handle_t                          h_qp,\r
1058         IN              const   uint64_t                                        timewait )\r
1059 {\r
1060         ib_api_status_t         status;\r
1061         int err;\r
1062         struct ib_qp *ib_qp_p = (struct ib_qp *)h_qp;\r
1063         PREP_IBDEV_FOR_PRINT(ib_qp_p->device)\r
1064 \r
1065         UNUSED_PARAM( timewait );\r
1066 \r
1067         HCA_ENTER( HCA_DBG_QP);\r
1068 \r
1069         HCA_PRINT(TRACE_LEVEL_INFORMATION       ,HCA_DBG_SHIM  ,\r
1070                 ("qpnum %#x, pcs %p\n", ib_qp_p->qp_num, PsGetCurrentProcess()) );\r
1071 \r
1072         err = ibv_destroy_qp( ib_qp_p );\r
1073         if (err) {\r
1074                 HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP,\r
1075                         ("ibv_destroy_qp failed (%d)\n", err));\r
1076                 status = errno_to_iberr(err);\r
1077                 goto err_destroy_qp;\r
1078         }\r
1079 \r
1080         status = IB_SUCCESS;\r
1081 \r
1082 err_destroy_qp:\r
1083         HCA_PRINT_EXIT(TRACE_LEVEL_ERROR ,HCA_DBG_QP,\r
1084                 ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
1085         return status;\r
1086 }\r
1087 \r
1088 /*\r
1089 * Completion Queue Managment Verbs.\r
1090 */\r
1091 \r
1092 ib_api_status_t\r
1093 mlnx_create_cq (\r
1094         IN              const   ib_ca_handle_t                          h_ca,\r
1095         IN              const   void                                            *cq_context,\r
1096         IN      OUT                     uint32_t                                        *p_size,\r
1097                 OUT                     ib_cq_handle_t                          *ph_cq,\r
1098         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
1099 {\r
1100         int err;\r
1101         ib_api_status_t         status;\r
1102         struct ib_cq *ib_cq_p;\r
1103         struct mthca_cq *cq_p;\r
1104         mlnx_hob_t                      *hob_p;\r
1105         struct ib_device *ib_dev;\r
1106         struct ib_ucontext *p_context;\r
1107 \r
1108         HCA_ENTER(HCA_DBG_CQ);\r
1109 \r
1110         if( p_umv_buf ) {\r
1111 \r
1112                 p_context = (struct ib_ucontext *)h_ca;\r
1113                 hob_p = HOB_FROM_IBDEV(p_context->device);\r
1114                 ib_dev = p_context->device;\r
1115 \r
1116                 // sanity checks \r
1117                 if (p_umv_buf->input_size < sizeof(struct ibv_create_cq) ||\r
1118                         p_umv_buf->output_size < sizeof(struct ibv_create_cq_resp) ||\r
1119                         !p_umv_buf->p_inout_buf) {\r
1120                         status = IB_INVALID_PARAMETER;\r
1121                         goto err_inval_params;\r
1122                 }\r
1123         }\r
1124         else {\r
1125                 hob_p = (mlnx_hob_t *)h_ca;\r
1126                 p_context = NULL;\r
1127                 ib_dev = IBDEV_FROM_HOB( hob_p );\r
1128         }\r
1129 \r
1130         /* sanity check */\r
1131         if (*p_size > (uint32_t)ib_dev->mdev->limits.max_cqes) {\r
1132                 status = IB_INVALID_CQ_SIZE;\r
1133                 goto err_cqe;\r
1134         }\r
1135 \r
1136         // allocate cq  \r
1137         ib_cq_p = ibv_create_cq(ib_dev, \r
1138                 cq_comp_handler, cq_event_handler,\r
1139                 hob_p, *p_size, p_context, p_umv_buf );\r
1140         if (IS_ERR(ib_cq_p)) {\r
1141                 err = PTR_ERR(ib_cq_p);\r
1142                 HCA_PRINT (TRACE_LEVEL_ERROR ,HCA_DBG_CQ, ("ibv_create_cq failed (%d)\n", err));\r
1143                 status = errno_to_iberr(err);\r
1144                 goto err_create_cq;\r
1145         }\r
1146 \r
1147         // fill the object\r
1148         cq_p = (struct mthca_cq *)ib_cq_p;\r
1149         cq_p->cq_context = (void*)cq_context;\r
1150         \r
1151         // return the result\r
1152 //      *p_size = *p_size;      // return the same value\r
1153         *p_size = ib_cq_p->cqe;\r
1154 \r
1155         if (ph_cq) *ph_cq = (ib_cq_handle_t)cq_p;\r
1156 \r
1157         status = IB_SUCCESS;\r
1158         \r
1159 err_create_cq:\r
1160 err_inval_params:\r
1161 err_cqe:\r
1162         if (p_umv_buf && p_umv_buf->command) \r
1163                 p_umv_buf->status = status;\r
1164         HCA_PRINT_EXIT(TRACE_LEVEL_ERROR ,HCA_DBG_CQ,\r
1165                 ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
1166         return status;\r
1167 }\r
1168 \r
1169 ib_api_status_t\r
1170 mlnx_resize_cq (\r
1171         IN              const   ib_cq_handle_t                          h_cq,\r
1172         IN      OUT                     uint32_t                                        *p_size,\r
1173         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
1174 {\r
1175         UNREFERENCED_PARAMETER(h_cq);\r
1176         UNREFERENCED_PARAMETER(p_size);\r
1177         if (p_umv_buf && p_umv_buf->command) {\r
1178                 p_umv_buf->status = IB_UNSUPPORTED;\r
1179         }\r
1180         HCA_PRINT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM  ,("mlnx_resize_cq not implemented\n"));\r
1181         return IB_UNSUPPORTED;\r
1182 }\r
1183 \r
1184 ib_api_status_t\r
1185 mlnx_query_cq (\r
1186         IN              const   ib_cq_handle_t                          h_cq,\r
1187                 OUT                     uint32_t                                        *p_size,\r
1188         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
1189 {\r
1190         UNREFERENCED_PARAMETER(h_cq);\r
1191         UNREFERENCED_PARAMETER(p_size);\r
1192         if (p_umv_buf && p_umv_buf->command) {\r
1193                 p_umv_buf->status = IB_UNSUPPORTED;\r
1194         }\r
1195         HCA_PRINT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM  ,("mlnx_query_cq not implemented\n"));\r
1196         return IB_UNSUPPORTED;\r
1197 }\r
1198 \r
1199 ib_api_status_t\r
1200 mlnx_destroy_cq (\r
1201         IN              const   ib_cq_handle_t                          h_cq)\r
1202 {\r
1203                                                                                                                                                                 \r
1204         ib_api_status_t         status;\r
1205         int err;\r
1206         struct ib_cq *ib_cq_p = (struct ib_cq *)h_cq;\r
1207         PREP_IBDEV_FOR_PRINT(ib_cq_p->device)\r
1208 \r
1209         HCA_ENTER( HCA_DBG_QP);\r
1210 \r
1211         HCA_PRINT(TRACE_LEVEL_INFORMATION       ,HCA_DBG_SHIM  ,\r
1212                 ("cqn %#x, pcs %p\n", ((struct mthca_cq*)ib_cq_p)->cqn, PsGetCurrentProcess()) );\r
1213 \r
1214         // destroy CQ\r
1215         err = ibv_destroy_cq( ib_cq_p );\r
1216         if (err) {\r
1217                 HCA_PRINT (TRACE_LEVEL_ERROR ,HCA_DBG_SHIM,\r
1218                         ("ibv_destroy_cq failed (%d)\n", err));\r
1219                 status = errno_to_iberr(err);\r
1220                 goto err_destroy_cq;\r
1221         }\r
1222 \r
1223         status = IB_SUCCESS;\r
1224 \r
1225 err_destroy_cq:\r
1226         HCA_PRINT_EXIT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM,\r
1227                 ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
1228         return status;\r
1229 }\r
1230 \r
1231 \r
1232 void\r
1233 setup_ci_interface(\r
1234         IN              const   ib_net64_t                                      ca_guid,\r
1235         IN              const   int                                                     is_livefish,\r
1236         IN      OUT                     ci_interface_t                          *p_interface )\r
1237 {\r
1238         cl_memclr(p_interface, sizeof(*p_interface));\r
1239 \r
1240         /* Guid of the CA. */\r
1241         p_interface->guid = ca_guid;\r
1242 \r
1243         /* Version of this interface. */\r
1244         p_interface->version = VERBS_VERSION;\r
1245 \r
1246         /* UVP name */\r
1247         cl_memcpy( p_interface->libname, mlnx_uvp_lib_name, MAX_LIB_NAME);\r
1248 \r
1249         HCA_PRINT(TRACE_LEVEL_VERBOSE  , HCA_DBG_SHIM  ,("UVP filename %s\n", p_interface->libname));\r
1250 \r
1251         /* The real interface. */\r
1252         p_interface->open_ca = mlnx_open_ca;\r
1253         p_interface->query_ca = mlnx_query_ca;\r
1254         p_interface->close_ca = mlnx_close_ca;\r
1255         p_interface->um_open_ca = mlnx_um_open;\r
1256         p_interface->um_close_ca = mlnx_um_close;\r
1257 \r
1258         p_interface->allocate_pd = mlnx_allocate_pd;\r
1259         p_interface->deallocate_pd = mlnx_deallocate_pd;\r
1260         p_interface->vendor_call = fw_access_ctrl;\r
1261 \r
1262         if (is_livefish) {\r
1263                 mlnx_memory_if_livefish(p_interface);\r
1264         }\r
1265         else {  \r
1266                 p_interface->modify_ca = mlnx_modify_ca; \r
1267                 \r
1268                 p_interface->create_av = mlnx_create_av;\r
1269                 p_interface->query_av = mlnx_query_av;\r
1270                 p_interface->modify_av = mlnx_modify_av;\r
1271                 p_interface->destroy_av = mlnx_destroy_av;\r
1272 \r
1273                 p_interface->create_qp = mlnx_create_qp;\r
1274                 p_interface->create_spl_qp = mlnx_create_spl_qp;\r
1275                 p_interface->modify_qp = mlnx_modify_qp;\r
1276                 p_interface->query_qp = mlnx_query_qp;\r
1277                 p_interface->destroy_qp = mlnx_destroy_qp;\r
1278 \r
1279                 p_interface->create_cq = mlnx_create_cq;\r
1280                 p_interface->resize_cq = mlnx_resize_cq;\r
1281                 p_interface->query_cq = mlnx_query_cq;\r
1282                 p_interface->destroy_cq = mlnx_destroy_cq;\r
1283 \r
1284                 p_interface->local_mad = mlnx_local_mad;\r
1285                 \r
1286 \r
1287                 mlnx_memory_if(p_interface);\r
1288                 mlnx_direct_if(p_interface);\r
1289                 mlnx_mcast_if(p_interface);\r
1290         }\r
1291 \r
1292         return;\r
1293 }\r
1294 \r