[MTHCA] 1. bugfix: gid lookup use wrong port number;
[mirror/winof/.git] / hw / mthca / kernel / hca_verbs.c
1 /*\r
2  * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.\r
3  * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. \r
4  *\r
5  * This software is available to you under the OpenIB.org BSD license\r
6  * below:\r
7  *\r
8  *     Redistribution and use in source and binary forms, with or\r
9  *     without modification, are permitted provided that the following\r
10  *     conditions are met:\r
11  *\r
12  *      - Redistributions of source code must retain the above\r
13  *        copyright notice, this list of conditions and the following\r
14  *        disclaimer.\r
15  *\r
16  *      - Redistributions in binary form must reproduce the above\r
17  *        copyright notice, this list of conditions and the following\r
18  *        disclaimer in the documentation and/or other materials\r
19  *        provided with the distribution.\r
20  *\r
21  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
22  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
23  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
24  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
25  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
26  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
27  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
28  * SOFTWARE.\r
29  *\r
30  * $Id: hca_verbs.c 148 2005-07-12 07:48:46Z sleybo $\r
31  */\r
32 \r
33 \r
34 #include "hca_driver.h"\r
35 #if defined(EVENT_TRACING)\r
36 #ifdef offsetof\r
37 #undef offsetof\r
38 #endif\r
39 #include "hca_verbs.tmh"\r
40 #endif\r
41 #include "mthca_dev.h"\r
42 #include "ib_cache.h"\r
43 #include "mx_abi.h"\r
44 \r
45 #define PTR_ALIGN(size) (((size) + sizeof(void*) - 1) & ~(sizeof(void*) - 1))\r
46 \r
47 \r
48 // Local declarations\r
49 ib_api_status_t\r
50 mlnx_query_qp (\r
51         IN              const   ib_qp_handle_t                          h_qp,\r
52                 OUT                     ib_qp_attr_t                            *p_qp_attr,\r
53         IN      OUT                     ci_umv_buf_t                            *p_umv_buf );\r
54 \r
55 /* \r
56 * CA Access Verbs\r
57 */\r
58 ib_api_status_t\r
59 mlnx_open_ca (\r
60         IN              const   ib_net64_t                                      ca_guid, // IN  const char *                ca_name,\r
61         IN              const   ci_completion_cb_t                      pfn_completion_cb,\r
62         IN              const   ci_async_event_cb_t                     pfn_async_event_cb,\r
63         IN              const   void*const                                      ca_context,\r
64                 OUT                     ib_ca_handle_t                          *ph_ca)\r
65 {\r
66         mlnx_hca_t                              *p_hca;\r
67         ib_api_status_t status = IB_NOT_FOUND;\r
68         mlnx_cache_t    *p_cache;\r
69         struct ib_device *ib_dev;\r
70 \r
71         HCA_ENTER(HCA_DBG_SHIM);\r
72         HCA_PRINT(TRACE_LEVEL_INFORMATION  ,HCA_DBG_SHIM,\r
73                 ("context 0x%p\n", ca_context));\r
74 \r
75         // find CA object\r
76         p_hca = mlnx_hca_from_guid( ca_guid );\r
77         if( !p_hca ) {\r
78                 HCA_PRINT_EXIT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM,\r
79                         ("completes with ERROR status IB_NOT_FOUND\n"));\r
80                 return IB_NOT_FOUND;\r
81         }\r
82 \r
83         ib_dev = &p_hca->mdev->ib_dev;\r
84 \r
85         HCA_PRINT(TRACE_LEVEL_INFORMATION  ,HCA_DBG_SHIM,\r
86                 ("context 0x%p\n", ca_context));\r
87         status = mlnx_hobs_set_cb(&p_hca->hob,\r
88                 pfn_completion_cb,\r
89                 pfn_async_event_cb,\r
90                 ca_context);\r
91         if (IB_SUCCESS != status) {\r
92                 goto err_set_cb;\r
93         }\r
94 \r
95         // MAD cache\r
96         p_cache = (mlnx_cache_t*)cl_pzalloc( sizeof(mlnx_cache_t) * 2 );\r
97         if( !p_cache ) {\r
98                 status = IB_INSUFFICIENT_MEMORY;\r
99                 goto err_mad_cache;\r
100         }\r
101         p_hca->hob.cache = p_cache;\r
102 \r
103         \r
104         //TODO: do we need something for kernel users ?\r
105 \r
106         // Return pointer to HOB object\r
107         if (ph_ca) *ph_ca = &p_hca->hob;\r
108         status =  IB_SUCCESS;\r
109 \r
110 err_mad_cache:\r
111 err_set_cb:\r
112         HCA_PRINT_EXIT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM,\r
113                 ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
114         return status;\r
115 }\r
116 \r
117 ib_api_status_t\r
118 mlnx_query_ca (\r
119         IN              const   ib_ca_handle_t                          h_ca,\r
120                 OUT                     ib_ca_attr_t                            *p_ca_attr,\r
121         IN      OUT                     uint32_t                                        *p_byte_count,\r
122         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
123 {\r
124         ib_api_status_t         status;\r
125         uint32_t                        size, required_size;\r
126         uint8_t                 port_num, num_ports;\r
127         uint32_t                        num_gids, num_pkeys;\r
128         uint32_t                        num_page_sizes = 1; // TBD: what is actually supported\r
129         uint8_t                         *last_p;\r
130         struct ib_device_attr props;\r
131         struct ib_port_attr  *hca_ports = NULL;\r
132         int i;\r
133         \r
134         mlnx_hob_t                      *hob_p = (mlnx_hob_t *)h_ca;\r
135         struct ib_device *ib_dev = IBDEV_FROM_HOB( hob_p );\r
136         int err;\r
137         \r
138         HCA_ENTER(HCA_DBG_SHIM);\r
139 \r
140         // sanity checks\r
141         if( p_umv_buf && p_umv_buf->command ) {\r
142                         HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_SHIM ,("User mode is not supported yet\n"));\r
143                         p_umv_buf->status = status = IB_UNSUPPORTED;\r
144                         goto err_user_unsupported;\r
145         }\r
146         if (NULL == p_byte_count) {\r
147                 status = IB_INVALID_PARAMETER;\r
148                 goto err_byte_count;\r
149         }\r
150 \r
151         // query the device\r
152         err = mthca_query_device(ib_dev, &props );\r
153         if (err) {\r
154                 HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_SHIM, \r
155                         ("ib_query_device failed (%d)\n",err));\r
156                 status = errno_to_iberr(err);\r
157                 goto err_query_device;\r
158         }\r
159         \r
160         // alocate arrary for port properties\r
161         num_ports = ib_dev->phys_port_cnt;   /* Number of physical ports of the HCA */             \r
162         if (NULL == (hca_ports = cl_zalloc( num_ports * sizeof *hca_ports))) {\r
163                 HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_SHIM, ("Failed to cl_zalloc ports array\n"));\r
164                 status = IB_INSUFFICIENT_MEMORY;\r
165                 goto err_alloc_ports;\r
166         }\r
167 \r
168         // start calculation of ib_ca_attr_t full size\r
169         num_gids = 0;\r
170         num_pkeys = 0;\r
171         required_size = PTR_ALIGN(sizeof(ib_ca_attr_t)) +\r
172                 PTR_ALIGN(sizeof(uint32_t) * num_page_sizes) +\r
173                 PTR_ALIGN(sizeof(ib_port_attr_t) * num_ports)+\r
174                 PTR_ALIGN(MTHCA_BOARD_ID_LEN);\r
175         // get port properties\r
176         for (port_num = 0; port_num <= end_port(ib_dev) - start_port(ib_dev); ++port_num) {\r
177                 // request\r
178                 err = mthca_query_port(ib_dev, port_num + start_port(ib_dev), &hca_ports[port_num]);\r
179                 if (err) {\r
180                         HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_SHIM, ("ib_query_port failed(%d) for port %d\n",err, port_num));\r
181                         status = errno_to_iberr(err);\r
182                         goto err_query_port;\r
183                 }\r
184 \r
185                 // calculate GID table size\r
186                 num_gids  = hca_ports[port_num].gid_tbl_len;\r
187                 size = PTR_ALIGN(sizeof(ib_gid_t)  * num_gids);\r
188                 required_size += size;\r
189 \r
190                 // calculate pkeys table size\r
191                 num_pkeys = hca_ports[port_num].pkey_tbl_len;\r
192                 size = PTR_ALIGN(sizeof(uint16_t) * num_pkeys);\r
193                 required_size += size;\r
194         }\r
195 \r
196         // resource sufficience check\r
197         if (NULL == p_ca_attr || *p_byte_count < required_size) {\r
198                 *p_byte_count = required_size;\r
199                 status = IB_INSUFFICIENT_MEMORY;\r
200                 if ( p_ca_attr != NULL) {\r
201                         HCA_PRINT (TRACE_LEVEL_ERROR,HCA_DBG_SHIM, \r
202                                 ("Failed *p_byte_count (%d) < required_size (%d)\n", *p_byte_count, required_size ));\r
203                 }\r
204                 goto err_insuff_mem;\r
205         }\r
206 \r
207         // Space is sufficient - setup table pointers\r
208         last_p = (uint8_t*)p_ca_attr;\r
209         last_p += PTR_ALIGN(sizeof(*p_ca_attr));\r
210 \r
211         p_ca_attr->p_page_size = (uint32_t*)last_p;\r
212         last_p += PTR_ALIGN(num_page_sizes * sizeof(uint32_t));\r
213 \r
214         p_ca_attr->p_port_attr = (ib_port_attr_t *)last_p;\r
215         last_p += PTR_ALIGN(num_ports * sizeof(ib_port_attr_t));\r
216 \r
217         for (port_num = 0; port_num < num_ports; port_num++) {\r
218                 p_ca_attr->p_port_attr[port_num].p_gid_table = (ib_gid_t *)last_p;\r
219                 size = PTR_ALIGN(sizeof(ib_gid_t) * hca_ports[port_num].gid_tbl_len);\r
220                 last_p += size;\r
221 \r
222                 p_ca_attr->p_port_attr[port_num].p_pkey_table = (uint16_t *)last_p;\r
223                 size = PTR_ALIGN(sizeof(uint16_t) * hca_ports[port_num].pkey_tbl_len);\r
224                 last_p += size;\r
225         }\r
226         \r
227         //copy vendor specific data\r
228         cl_memcpy(last_p,to_mdev(ib_dev)->board_id, MTHCA_BOARD_ID_LEN);\r
229         last_p += PTR_ALIGN(MTHCA_BOARD_ID_LEN);\r
230         \r
231         // Separate the loops to ensure that table pointers are always setup\r
232         for (port_num = 0; port_num < num_ports; port_num++) {\r
233 \r
234                 // get pkeys, using cache\r
235                 for (i=0; i < hca_ports[port_num].pkey_tbl_len; ++i) {\r
236                         err = ib_get_cached_pkey( ib_dev, port_num + start_port(ib_dev), i,\r
237                                 &p_ca_attr->p_port_attr[port_num].p_pkey_table[i] );\r
238                         if (err) {\r
239                                 status = errno_to_iberr(err);\r
240                                 HCA_PRINT (TRACE_LEVEL_ERROR,HCA_DBG_SHIM, \r
241                                         ("ib_get_cached_pkey failed (%d) for port_num %d, index %d\n",\r
242                                         err, port_num + start_port(ib_dev), i));\r
243                                 goto err_get_pkey;\r
244                         }\r
245                 }\r
246                 \r
247                 // get gids, using cache\r
248                 for (i=0; i < hca_ports[port_num].gid_tbl_len; ++i) {\r
249                         union ib_gid * __ptr64  gid = (union ib_gid     *)&p_ca_attr->p_port_attr[port_num].p_gid_table[i];\r
250                         err = ib_get_cached_gid( ib_dev, port_num + start_port(ib_dev), i, (union ib_gid *)gid );\r
251                         //TODO: do we need to convert gids to little endian\r
252                         if (err) {\r
253                                 status = errno_to_iberr(err);\r
254                                 HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_SHIM, \r
255                                         ("ib_get_cached_gid failed (%d) for port_num %d, index %d\n",\r
256                                         err, port_num + start_port(ib_dev), i));\r
257                                 goto err_get_gid;\r
258                         }\r
259                 }\r
260 \r
261                 HCA_PRINT(TRACE_LEVEL_VERBOSE, HCA_DBG_SHIM,("port %d gid0:\n", port_num));\r
262                 HCA_PRINT(TRACE_LEVEL_VERBOSE, HCA_DBG_SHIM,\r
263                         (" 0x%x%x%x%x%x%x%x%x-0x%x%x%x%x%x%x%x%x\n", \r
264                         p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[0],\r
265                         p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[1],\r
266                         p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[2],\r
267                         p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[3],\r
268                         p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[4],\r
269                         p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[5],\r
270                         p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[6],\r
271                         p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[7],\r
272                         p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[8],\r
273                         p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[9],\r
274                         p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[10],\r
275                         p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[11],\r
276                         p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[12],\r
277                         p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[13],\r
278                         p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[14],\r
279                         p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[15]));\r
280         }\r
281 \r
282         // set result size\r
283         p_ca_attr->size = required_size;\r
284         CL_ASSERT( required_size == (((uintn_t)last_p) - ((uintn_t)p_ca_attr)) );\r
285         HCA_PRINT(TRACE_LEVEL_VERBOSE, HCA_DBG_SHIM , ("Space required %d used %d\n",\r
286                 required_size, (int)((uintn_t)last_p - (uintn_t)p_ca_attr) ));\r
287         \r
288         // !!! GID/PKEY tables must be queried before this call !!!\r
289         mlnx_conv_hca_cap(ib_dev, &props, hca_ports, p_ca_attr);\r
290 \r
291         status = IB_SUCCESS;\r
292 \r
293 err_get_gid:\r
294 err_get_pkey:\r
295 err_insuff_mem:\r
296 err_query_port:\r
297         cl_free(hca_ports);\r
298 err_alloc_ports:\r
299 err_query_device:\r
300 err_byte_count: \r
301 err_user_unsupported:\r
302         if( status != IB_INSUFFICIENT_MEMORY && status != IB_SUCCESS )\r
303                 HCA_PRINT(TRACE_LEVEL_ERROR     , HCA_DBG_SHIM  ,("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
304         HCA_EXIT(HCA_DBG_SHIM);\r
305         return status;\r
306 }\r
307 \r
308 ib_api_status_t\r
309 mlnx_modify_ca (\r
310         IN              const   ib_ca_handle_t                          h_ca,\r
311         IN              const   uint8_t                                         port_num,\r
312         IN              const   ib_ca_mod_t                                     modca_cmd,\r
313         IN              const   ib_port_attr_mod_t                      *p_port_attr)\r
314 {\r
315 #define SET_CAP_MOD(al_mask, al_fld, ib)                \\r
316                 if (modca_cmd & al_mask) {      \\r
317                         if (p_port_attr->cap.##al_fld)          \\r
318                                 props.set_port_cap_mask |= ib;  \\r
319                         else            \\r
320                                 props.clr_port_cap_mask |= ib;  \\r
321                 }\r
322 \r
323         ib_api_status_t status;\r
324         int err;\r
325         struct ib_port_modify props;\r
326         int port_modify_mask = 0;\r
327         mlnx_hob_t                      *hob_p = (mlnx_hob_t *)h_ca;\r
328         struct ib_device *ib_dev = IBDEV_FROM_HOB( hob_p );\r
329 \r
330         HCA_ENTER(HCA_DBG_SHIM);\r
331 \r
332         // prepare parameters\r
333         RtlZeroMemory(&props, sizeof(props));\r
334         SET_CAP_MOD(IB_CA_MOD_IS_SM, sm, IB_PORT_SM);\r
335         SET_CAP_MOD(IB_CA_MOD_IS_SNMP_SUPPORTED, snmp, IB_PORT_SNMP_TUNNEL_SUP);\r
336         SET_CAP_MOD(IB_CA_MOD_IS_DEV_MGMT_SUPPORTED, dev_mgmt, IB_PORT_DEVICE_MGMT_SUP);\r
337         SET_CAP_MOD(IB_CA_MOD_IS_VEND_SUPPORTED, vend, IB_PORT_VENDOR_CLASS_SUP);\r
338         if ((modca_cmd & IB_CA_MOD_QKEY_CTR) && (p_port_attr->qkey_ctr == 0)) \r
339                 port_modify_mask |= IB_PORT_RESET_QKEY_CNTR;\r
340         \r
341         // modify port\r
342         err = mthca_modify_port(ib_dev, port_num, port_modify_mask, &props );\r
343         if (err) {\r
344                 status = errno_to_iberr(err);\r
345                 HCA_PRINT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM  ,("mthca_modify_port failed (%d) \n",err));\r
346                 goto err_modify_port;\r
347         }\r
348         \r
349         status =        IB_SUCCESS;\r
350 \r
351 err_modify_port:\r
352         HCA_PRINT_EXIT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM  ,("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
353         return status;\r
354 }\r
355 \r
356 ib_api_status_t\r
357 mlnx_close_ca (\r
358         IN                              ib_ca_handle_t                          h_ca)\r
359 {\r
360         HCA_ENTER(HCA_DBG_SHIM);\r
361 \r
362         // release HOB resources\r
363         mlnx_hobs_remove(h_ca);\r
364 \r
365         //TODO: release HOBUL resources\r
366 \r
367         HCA_EXIT(HCA_DBG_SHIM);\r
368         \r
369         return IB_SUCCESS;\r
370 }\r
371 \r
372 \r
373 static ib_api_status_t\r
374 mlnx_um_open(\r
375         IN              const   ib_ca_handle_t                          h_ca,\r
376         IN      OUT                     ci_umv_buf_t* const                     p_umv_buf,\r
377                 OUT                     ib_ca_handle_t* const           ph_um_ca )\r
378 {\r
379         int err;\r
380         ib_api_status_t         status;\r
381         mlnx_hob_t                      *hob_p = (mlnx_hob_t *)h_ca;\r
382         hca_dev_ext_t *ext_p = EXT_FROM_HOB( hob_p );\r
383         struct ib_device *ib_dev = IBDEV_FROM_HOB( hob_p );\r
384         struct ib_ucontext *p_context;\r
385         struct mthca_alloc_ucontext_resp *uresp_p;\r
386         struct ibv_alloc_pd_resp resp;\r
387         ci_umv_buf_t umv_buf;\r
388 \r
389         HCA_ENTER(HCA_DBG_SHIM);\r
390 \r
391         // sanity check\r
392         ASSERT( p_umv_buf );\r
393         if( !p_umv_buf->command )\r
394         {\r
395                 p_context = cl_zalloc( sizeof(struct ib_ucontext) );\r
396                 if( !p_context )\r
397                 {\r
398                         status = IB_INSUFFICIENT_MEMORY;\r
399                         goto err_alloc_ucontext;\r
400                 }\r
401                 /* Copy the dev info. */\r
402                 p_context->device = ib_dev;\r
403                 p_umv_buf->output_size = 0;\r
404                 goto done;\r
405         }\r
406 \r
407         // create user context in kernel\r
408         p_context = mthca_alloc_ucontext(ib_dev, p_umv_buf);\r
409         if (IS_ERR(p_context)) {\r
410                 err = PTR_ERR(p_context);\r
411                 HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_SHIM,\r
412                         ("mthca_alloc_ucontext failed (%d)\n", err));\r
413                 status = errno_to_iberr(err);\r
414                 goto err_alloc_ucontext;\r
415         }\r
416 \r
417         /* allocate pd */\r
418         umv_buf.command = 1;\r
419         umv_buf.input_size = umv_buf.status = 0;\r
420         umv_buf.output_size = sizeof(struct ibv_alloc_pd_resp);\r
421         umv_buf.p_inout_buf = &resp;\r
422         //NB: Pay attention ! Ucontext parameter is important here:\r
423         // when it is present (i.e. - for user space) - mthca_alloc_pd won't create MR\r
424         p_context->pd = ibv_alloc_pd(ib_dev, p_context, &umv_buf);\r
425         if (IS_ERR(p_context->pd)) {\r
426                 err = PTR_ERR(p_context->pd);\r
427                 HCA_PRINT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM,\r
428                         ("ibv_alloc_pd failed (%d)\n", err));\r
429                 status = errno_to_iberr(err);\r
430                 goto err_alloc_pd;\r
431         }\r
432         \r
433         // fill more parameters for user (sanity checks are in mthca_alloc_ucontext)\r
434         uresp_p = (struct mthca_alloc_ucontext_resp *)(void*)p_umv_buf->p_inout_buf;\r
435         uresp_p->uar_addr = (uint64_t)(UINT_PTR)p_context->user_uar;\r
436         uresp_p->pd_handle = resp.pd_handle;\r
437         uresp_p->pdn = resp.pdn;\r
438         uresp_p->vend_id = (uint32_t)ext_p->hcaConfig.VendorID;\r
439         uresp_p->dev_id = (uint16_t)ext_p->hcaConfig.DeviceID;\r
440 \r
441 done:\r
442         // some more inits\r
443         p_context->va = p_context->p_mdl = NULL;\r
444         p_context->fw_if_open = FALSE;\r
445         KeInitializeMutex( &p_context->mutex, 0 );\r
446         \r
447         // return the result\r
448         if (ph_um_ca) *ph_um_ca = (ib_ca_handle_t)p_context;\r
449 \r
450         status = IB_SUCCESS;\r
451         goto end;\r
452         \r
453 err_alloc_pd:\r
454         mthca_dealloc_ucontext(p_context);\r
455 err_alloc_ucontext: \r
456 end:\r
457         if (p_umv_buf && p_umv_buf->command) \r
458                 p_umv_buf->status = status;\r
459         HCA_PRINT_EXIT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM,\r
460                 ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
461         return status;\r
462 }\r
463 \r
464 static void\r
465 mlnx_um_close(\r
466         IN                              ib_ca_handle_t                          h_ca,\r
467         IN                              ib_ca_handle_t                          h_um_ca )\r
468 {\r
469         struct ib_ucontext *p_ucontext = (struct ib_ucontext *)h_um_ca;\r
470         UNREFERENCED_PARAMETER(h_ca);\r
471 \r
472         unmap_crspace_for_all(p_ucontext);\r
473         if( !p_ucontext->pd )\r
474                 cl_free( h_um_ca );\r
475         else\r
476                 ibv_um_close(p_ucontext);\r
477         return;\r
478 }\r
479 \r
480 \r
481 /*\r
482 *    Protection Domain and Reliable Datagram Domain Verbs\r
483 */\r
484 \r
485 ib_api_status_t\r
486 mlnx_allocate_pd (\r
487         IN              const   ib_ca_handle_t                          h_ca,\r
488         IN              const   ib_pd_type_t                            type,\r
489                 OUT                     ib_pd_handle_t                          *ph_pd,\r
490         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
491 {\r
492         ib_api_status_t         status;\r
493         struct ib_device *ib_dev;\r
494         struct ib_ucontext *p_context;\r
495         struct ib_pd *ib_pd_p;\r
496         int err;\r
497 \r
498         //TODO: how are we use it ?\r
499         UNREFERENCED_PARAMETER(type);\r
500         \r
501         HCA_ENTER(HCA_DBG_SHIM);\r
502 \r
503         if( p_umv_buf ) {\r
504                 p_context = (struct ib_ucontext *)h_ca;\r
505                 ib_dev = p_context->device;\r
506         }\r
507         else {\r
508                 mlnx_hob_t                      *hob_p = (mlnx_hob_t *)h_ca;\r
509                 p_context = NULL;\r
510                 ib_dev = IBDEV_FROM_HOB( hob_p );\r
511         }\r
512         \r
513         // create PD\r
514         ib_pd_p = ibv_alloc_pd(ib_dev, p_context, p_umv_buf);\r
515         if (IS_ERR(ib_pd_p)) {\r
516                 err = PTR_ERR(ib_pd_p);\r
517                 HCA_PRINT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM,\r
518                         ("ibv_alloc_pd failed (%d)\n", err));\r
519                 status = errno_to_iberr(err);\r
520                 goto err_alloc_pd;\r
521         }\r
522 \r
523         // return the result\r
524         if (ph_pd) *ph_pd = (ib_pd_handle_t)ib_pd_p;\r
525 \r
526         status = IB_SUCCESS;\r
527         \r
528 err_alloc_pd:   \r
529         if (p_umv_buf && p_umv_buf->command) \r
530                 p_umv_buf->status = status;\r
531         HCA_PRINT_EXIT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM,\r
532                 ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
533         return status;\r
534 }\r
535 \r
536 ib_api_status_t\r
537 mlnx_deallocate_pd (\r
538         IN                              ib_pd_handle_t                          h_pd)\r
539 {\r
540         ib_api_status_t         status;\r
541         int err;\r
542         struct ib_pd *ib_pd_p = (struct ib_pd *)h_pd;\r
543         PREP_IBDEV_FOR_PRINT(ib_pd_p->device)\r
544 \r
545         HCA_ENTER( HCA_DBG_QP);\r
546 \r
547         HCA_PRINT(TRACE_LEVEL_INFORMATION       ,HCA_DBG_SHIM  ,\r
548                 ("pcs %p\n", PsGetCurrentProcess()));\r
549         \r
550         // dealloc pd\r
551         err = ibv_dealloc_pd( ib_pd_p );\r
552         if (err) {\r
553                 HCA_PRINT(TRACE_LEVEL_ERROR  ,HCA_DBG_SHIM\r
554                         ,("ibv_dealloc_pd failed (%d)\n", err));\r
555                 status = errno_to_iberr(err);\r
556                 goto err_dealloc_pd;\r
557         }\r
558         status = IB_SUCCESS;\r
559 \r
560 err_dealloc_pd:\r
561         HCA_PRINT_EXIT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM\r
562                 ,("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
563         return status;\r
564 }\r
565 \r
566 /* \r
567 * Address Vector Management Verbs\r
568 */\r
569 ib_api_status_t\r
570 mlnx_create_av (\r
571         IN              const   ib_pd_handle_t                          h_pd,\r
572         IN              const   ib_av_attr_t                            *p_addr_vector,\r
573                 OUT                     ib_av_handle_t                          *ph_av,\r
574         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
575 {\r
576         int err = 0;\r
577         ib_api_status_t         status = IB_SUCCESS;\r
578         struct ib_pd *ib_pd_p = (struct ib_pd *)h_pd;\r
579         struct ib_device *ib_dev = ib_pd_p->device;\r
580         struct ib_ah *ib_av_p;\r
581         struct ib_ah_attr ah_attr;\r
582         struct ib_ucontext *p_context = NULL;\r
583 \r
584         HCA_ENTER(HCA_DBG_AV);\r
585 \r
586         if( p_umv_buf && p_umv_buf->command ) {\r
587                 // sanity checks \r
588                 if (p_umv_buf->input_size < sizeof(struct ibv_create_ah) ||\r
589                         p_umv_buf->output_size < sizeof(struct ibv_create_ah_resp) ||\r
590                         !p_umv_buf->p_inout_buf) {\r
591                         status = IB_INVALID_PARAMETER;\r
592                         goto err_inval_params;\r
593                 }\r
594                 p_context = ib_pd_p->ucontext;\r
595         }\r
596         else \r
597                 p_context = NULL;\r
598 \r
599         // fill parameters \r
600         RtlZeroMemory(&ah_attr, sizeof(ah_attr));\r
601         mlnx_conv_ibal_av( ib_dev, p_addr_vector,  &ah_attr );\r
602 \r
603         ib_av_p = ibv_create_ah(ib_pd_p, &ah_attr, p_context, p_umv_buf);\r
604         if (IS_ERR(ib_av_p)) {\r
605                 err = PTR_ERR(ib_av_p);\r
606                 HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_AV,\r
607                         ("ibv_create_ah failed (%d)\n", err));\r
608                 status = errno_to_iberr(err);\r
609                 goto err_alloc_av;\r
610         }\r
611 \r
612         // return the result\r
613         if (ph_av) *ph_av = (ib_av_handle_t)ib_av_p;\r
614 \r
615         status = IB_SUCCESS;\r
616         \r
617 err_alloc_av:   \r
618 err_inval_params:\r
619         if (p_umv_buf && p_umv_buf->command) \r
620                 p_umv_buf->status = status;\r
621         HCA_PRINT_EXIT(TRACE_LEVEL_ERROR ,HCA_DBG_AV,\r
622                 ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
623         return status;\r
624 }\r
625 \r
626 ib_api_status_t\r
627 mlnx_query_av (\r
628         IN              const   ib_av_handle_t                          h_av,\r
629                 OUT                     ib_av_attr_t                            *p_addr_vector,\r
630                 OUT                     ib_pd_handle_t                          *ph_pd,\r
631         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
632 {\r
633         int err;\r
634         ib_api_status_t         status = IB_SUCCESS;\r
635         struct ib_ah *ib_ah_p = (struct ib_ah *)h_av;\r
636         PREP_IBDEV_FOR_PRINT(ib_ah_p->device)\r
637 \r
638         HCA_ENTER(HCA_DBG_AV);\r
639 \r
640         // sanity checks\r
641         if( p_umv_buf && p_umv_buf->command ) {\r
642                         HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_AV,\r
643                                 ("User mode is not supported yet\n"));\r
644                         status = IB_UNSUPPORTED;\r
645                         goto err_user_unsupported;\r
646         }\r
647 \r
648         // query AV\r
649 #ifdef WIN_TO_BE_CHANGED\r
650         //TODO: not implemented in low-level driver\r
651         err = ibv_query_ah(ib_ah_p, &ah_attr)\r
652         if (err) {\r
653                 HCA_PRINT(TRACE_LEVEL_ERROR, HCA_DBG_AV,\r
654                         ("ibv_query_ah failed (%d)\n", err));\r
655                 status = errno_to_iberr(err);\r
656                 goto err_query_ah;\r
657         }\r
658         // convert to IBAL structure: something like that\r
659         mlnx_conv_mthca_av( p_addr_vector,  &ah_attr );\r
660 #else\r
661 \r
662         err = mlnx_conv_mthca_av( ib_ah_p, p_addr_vector );\r
663         if (err) {\r
664                 HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_AV,\r
665                         ("mlnx_conv_mthca_av failed (%d)\n", err));\r
666                 status = errno_to_iberr(err);\r
667                 goto err_conv_mthca_av;\r
668         }\r
669 #endif\r
670 \r
671         // results\r
672         *ph_pd = (ib_pd_handle_t)ib_ah_p->pd;\r
673         \r
674 err_conv_mthca_av:\r
675 err_user_unsupported:\r
676         HCA_PRINT_EXIT(TRACE_LEVEL_ERROR  , HCA_DBG_AV,\r
677                 ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
678         return status;\r
679 }\r
680 \r
681 ib_api_status_t\r
682 mlnx_modify_av (\r
683         IN              const   ib_av_handle_t                          h_av,\r
684         IN              const   ib_av_attr_t                            *p_addr_vector,\r
685         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
686 {\r
687         struct ib_ah_attr ah_attr;\r
688         ib_api_status_t         status = IB_SUCCESS;\r
689         struct ib_ah *ib_ah_p = (struct ib_ah *)h_av;\r
690         struct ib_device *ib_dev = ib_ah_p->pd->device;\r
691 \r
692         HCA_ENTER(HCA_DBG_AV);\r
693 \r
694         // sanity checks\r
695         if( p_umv_buf && p_umv_buf->command ) {\r
696                         HCA_PRINT(TRACE_LEVEL_ERROR   ,HCA_DBG_AV,\r
697                                 ("User mode is not supported yet\n"));\r
698                         status = IB_UNSUPPORTED;\r
699                         goto err_user_unsupported;\r
700         }\r
701 \r
702         // fill parameters \r
703         RtlZeroMemory(&ah_attr, sizeof(ah_attr));\r
704         mlnx_conv_ibal_av( ib_dev, p_addr_vector,  &ah_attr );\r
705 \r
706         // modify AH\r
707 #ifdef WIN_TO_BE_CHANGED\r
708         //TODO: not implemented in low-level driver\r
709         err = ibv_modify_ah(ib_ah_p, &ah_attr)\r
710         if (err) {\r
711                 HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_AV,\r
712                         ("ibv_query_ah failed (%d)\n", err));\r
713                 status = errno_to_iberr(err);\r
714                 goto err_query_ah;\r
715         }\r
716 #else\r
717 \r
718         mlnx_modify_ah( ib_ah_p, &ah_attr );\r
719 #endif\r
720 \r
721 err_user_unsupported:\r
722         HCA_PRINT_EXIT(TRACE_LEVEL_ERROR  , HCA_DBG_AV,\r
723                 ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
724         return status;\r
725 }\r
726 \r
727 ib_api_status_t\r
728 mlnx_destroy_av (\r
729         IN              const   ib_av_handle_t                          h_av)\r
730 {\r
731         int err;\r
732         ib_api_status_t         status = IB_SUCCESS;\r
733         struct ib_ah *ib_ah_p = (struct ib_ah *)h_av;\r
734         PREP_IBDEV_FOR_PRINT(ib_ah_p->device)\r
735 \r
736         HCA_ENTER(HCA_DBG_AV);\r
737 \r
738         // destroy AV\r
739         err = ibv_destroy_ah( ib_ah_p );\r
740         if (err) {\r
741                 HCA_PRINT (TRACE_LEVEL_ERROR ,HCA_DBG_AV,\r
742                         ("ibv_destroy_ah failed (%d)\n", err));\r
743                 status = errno_to_iberr(err);\r
744                 goto err_destroy_ah;\r
745         }\r
746 \r
747 err_destroy_ah:\r
748         HCA_PRINT_EXIT(TRACE_LEVEL_ERROR  , HCA_DBG_AV,\r
749                 ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
750         return status;\r
751 }\r
752 \r
753 /*\r
754 *       Queue Pair Management Verbs\r
755 */\r
756 \r
757 \r
758 static ib_api_status_t\r
759 _create_qp (\r
760         IN              const   ib_pd_handle_t                          h_pd,\r
761         IN              const   uint8_t                                         port_num,\r
762         IN              const   void                                            *qp_context,\r
763         IN              const   ib_qp_create_t                          *p_create_attr,\r
764                 OUT                     ib_qp_attr_t                            *p_qp_attr,\r
765                 OUT                     ib_qp_handle_t                          *ph_qp,\r
766         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
767 {\r
768                 int err;\r
769                 ib_api_status_t         status;\r
770                 struct ib_qp * ib_qp_p;\r
771                 struct mthca_qp *qp_p;\r
772                 struct ib_qp_init_attr qp_init_attr;\r
773                 struct ib_ucontext *p_context = NULL;\r
774                 struct ib_pd *ib_pd_p = (struct ib_pd *)h_pd;\r
775                 struct ib_device *ib_dev = ib_pd_p->device;\r
776                 mlnx_hob_t       *hob_p = HOB_FROM_IBDEV(ib_dev);\r
777                 \r
778                 HCA_ENTER(HCA_DBG_QP);\r
779 \r
780         \r
781                 if( p_umv_buf && p_umv_buf->command ) {\r
782                         // sanity checks \r
783                         if (p_umv_buf->input_size < sizeof(struct ibv_create_qp) ||\r
784                                 p_umv_buf->output_size < sizeof(struct ibv_create_qp_resp) ||\r
785                                 !p_umv_buf->p_inout_buf) {\r
786                                 status = IB_INVALID_PARAMETER;\r
787                                 goto err_inval_params;\r
788                         }\r
789                         p_context = ib_pd_p->ucontext;\r
790                 }\r
791                 else \r
792                         p_context = NULL;\r
793 \r
794                 // prepare the parameters\r
795                 RtlZeroMemory(&qp_init_attr, sizeof(qp_init_attr));\r
796                 qp_init_attr.qp_type = p_create_attr->qp_type;\r
797                 qp_init_attr.event_handler = qp_event_handler;\r
798                 qp_init_attr.qp_context = hob_p;\r
799                 qp_init_attr.recv_cq = (struct ib_cq *)p_create_attr->h_rq_cq;\r
800                 qp_init_attr.send_cq = (struct ib_cq *)p_create_attr->h_sq_cq;\r
801                 qp_init_attr.cap.max_recv_sge = p_create_attr->rq_sge;\r
802                 qp_init_attr.cap.max_send_sge = p_create_attr->sq_sge;\r
803                 qp_init_attr.cap.max_recv_wr = p_create_attr->rq_depth;\r
804                 qp_init_attr.cap.max_send_wr = p_create_attr->sq_depth;\r
805                 qp_init_attr.sq_sig_type = (p_create_attr->sq_signaled) ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;\r
806                 qp_init_attr.port_num = port_num;\r
807 \r
808 \r
809                 // create qp            \r
810                 ib_qp_p = ibv_create_qp( ib_pd_p, &qp_init_attr, p_context, p_umv_buf );\r
811                 if (IS_ERR(ib_qp_p)) {\r
812                         err = PTR_ERR(ib_qp_p);\r
813                         HCA_PRINT(TRACE_LEVEL_ERROR  , HCA_DBG_QP,\r
814                                 ("ibv_create_qp failed (%d)\n", err));\r
815                         status = errno_to_iberr(err);\r
816                         goto err_create_qp;\r
817                 }\r
818         \r
819                 // fill the object\r
820                 qp_p = (struct mthca_qp *)ib_qp_p;\r
821                 qp_p->qp_context = (void*)qp_context;\r
822                 qp_p->qp_init_attr = qp_init_attr;\r
823         \r
824                 // Query QP to obtain requested attributes\r
825                 if (p_qp_attr) {\r
826                         status = mlnx_query_qp ((ib_qp_handle_t)ib_qp_p, p_qp_attr, p_umv_buf);\r
827                         if (status != IB_SUCCESS)\r
828                                         goto err_query_qp;\r
829                 }\r
830                 \r
831                 // return the results\r
832                 if (ph_qp) *ph_qp = (ib_qp_handle_t)ib_qp_p;\r
833         \r
834                 status = IB_SUCCESS;\r
835                 goto end;\r
836         \r
837         err_query_qp:\r
838                 ibv_destroy_qp( ib_qp_p );\r
839         err_create_qp:\r
840         err_inval_params:\r
841         end:\r
842                 if (p_umv_buf && p_umv_buf->command) \r
843                         p_umv_buf->status = status;\r
844                 HCA_PRINT_EXIT(TRACE_LEVEL_ERROR ,HCA_DBG_QP ,\r
845                         ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
846                 return status;\r
847 }\r
848 \r
849 ib_api_status_t\r
850 mlnx_create_spl_qp (\r
851         IN              const   ib_pd_handle_t                          h_pd,\r
852         IN              const   uint8_t                                         port_num,\r
853         IN              const   void                                            *qp_context,\r
854         IN              const   ib_qp_create_t                          *p_create_attr,\r
855                 OUT                     ib_qp_attr_t                            *p_qp_attr,\r
856                 OUT                     ib_qp_handle_t                          *ph_qp )\r
857 {\r
858         ib_api_status_t         status;\r
859         PREP_IBDEV_FOR_PRINT(((struct ib_pd*)h_pd)->device)\r
860 \r
861         HCA_ENTER(HCA_DBG_SHIM);\r
862 \r
863         status =        _create_qp( h_pd, port_num,\r
864                 qp_context, p_create_attr, p_qp_attr, ph_qp, NULL );\r
865                 \r
866         HCA_PRINT_EXIT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM,\r
867                 ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
868         return status;\r
869 }\r
870 \r
871 ib_api_status_t\r
872 mlnx_create_qp (\r
873         IN              const   ib_pd_handle_t                          h_pd,\r
874         IN              const   void                                            *qp_context,\r
875         IN              const   ib_qp_create_t                          *p_create_attr,\r
876                 OUT                     ib_qp_attr_t                            *p_qp_attr,\r
877                 OUT                     ib_qp_handle_t                          *ph_qp,\r
878         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
879 {\r
880         ib_api_status_t         status;\r
881         PREP_IBDEV_FOR_PRINT(((struct ib_pd*)h_pd)->device)\r
882 \r
883         //NB: algorithm of mthca_alloc_sqp() requires port_num\r
884         // PRM states, that special pares are created in couples, so\r
885         // looks like we can put here port_num = 1 always\r
886         uint8_t port_num = 1;\r
887 \r
888         HCA_ENTER(HCA_DBG_QP);\r
889 \r
890         status = _create_qp( h_pd, port_num,\r
891                 qp_context, p_create_attr, p_qp_attr, ph_qp, p_umv_buf );\r
892                 \r
893         HCA_PRINT_EXIT(TRACE_LEVEL_ERROR ,HCA_DBG_QP,\r
894                 ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
895         return status;\r
896 }\r
897 \r
898 ib_api_status_t\r
899 mlnx_modify_qp (\r
900         IN              const   ib_qp_handle_t                          h_qp,\r
901         IN              const   ib_qp_mod_t                                     *p_modify_attr,\r
902                 OUT                     ib_qp_attr_t                            *p_qp_attr OPTIONAL,\r
903         IN      OUT                     ci_umv_buf_t                            *p_umv_buf OPTIONAL )\r
904 {\r
905         ib_api_status_t         status;\r
906         int err;\r
907         struct ib_qp_attr qp_attr;\r
908         int qp_attr_mask;\r
909         struct ib_qp *ib_qp_p = (struct ib_qp *)h_qp;\r
910         PREP_IBDEV_FOR_PRINT(ib_qp_p->device)\r
911 \r
912         HCA_ENTER(HCA_DBG_QP);\r
913 \r
914         // sanity checks\r
915         if( p_umv_buf && p_umv_buf->command ) {\r
916                 // sanity checks \r
917                 if (p_umv_buf->output_size < sizeof(struct ibv_modify_qp_resp) ||\r
918                         !p_umv_buf->p_inout_buf) {\r
919                         status = IB_INVALID_PARAMETER;\r
920                         goto err_inval_params;\r
921                 }\r
922         }\r
923         \r
924         // fill parameters \r
925         status = mlnx_conv_qp_modify_attr( ib_qp_p, ib_qp_p->qp_type, \r
926                 p_modify_attr,  &qp_attr, &qp_attr_mask );\r
927         if (status == IB_NOT_DONE)\r
928                 goto query_qp;\r
929         if (status != IB_SUCCESS ) \r
930                 goto err_mode_unsupported;\r
931 \r
932         // modify QP\r
933         err = ibv_modify_qp(ib_qp_p, &qp_attr, qp_attr_mask);\r
934         if (err) {\r
935                 HCA_PRINT(TRACE_LEVEL_ERROR  , HCA_DBG_QP ,("ibv_modify_qp failed (%d)\n", err));\r
936                 status = errno_to_iberr(err);\r
937                 goto err_modify_qp;\r
938         }\r
939 \r
940         // Query QP to obtain requested attributes\r
941 query_qp:       \r
942         if (p_qp_attr) {\r
943                 status = mlnx_query_qp ((ib_qp_handle_t)ib_qp_p, p_qp_attr, p_umv_buf);\r
944                 if (status != IB_SUCCESS)\r
945                                 goto err_query_qp;\r
946         }\r
947         \r
948         if( p_umv_buf && p_umv_buf->command )\r
949         {\r
950                 struct ibv_modify_qp_resp resp;\r
951                 resp.attr_mask = qp_attr_mask;\r
952                 resp.qp_state = qp_attr.qp_state;\r
953                 err = ib_copy_to_umv_buf(p_umv_buf, &resp, sizeof(struct ibv_modify_qp_resp));\r
954                 if (err) {\r
955                         HCA_PRINT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM  ,("ib_copy_to_umv_buf failed (%d)\n", err));\r
956                         status = errno_to_iberr(err);\r
957                         goto err_copy;\r
958                 }\r
959         }\r
960 \r
961         status = IB_SUCCESS;\r
962 \r
963 err_copy:       \r
964 err_query_qp:\r
965 err_modify_qp:  \r
966 err_mode_unsupported:\r
967 err_inval_params:\r
968         if (p_umv_buf && p_umv_buf->command) \r
969                 p_umv_buf->status = status;\r
970         HCA_PRINT_EXIT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM,\r
971                 ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
972         return status;\r
973 }\r
974 \r
975 ib_api_status_t\r
976 mlnx_query_qp (\r
977         IN              const   ib_qp_handle_t                          h_qp,\r
978                 OUT                     ib_qp_attr_t                            *p_qp_attr,\r
979         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
980 {\r
981         ib_api_status_t         status = IB_SUCCESS;\r
982         struct ib_qp *ib_qp_p = (struct ib_qp *)h_qp;\r
983         struct mthca_qp *qp_p = (struct mthca_qp *)ib_qp_p;\r
984         PREP_IBDEV_FOR_PRINT(ib_qp_p->device)\r
985 \r
986         UNREFERENCED_PARAMETER(p_umv_buf);\r
987         \r
988         HCA_ENTER( HCA_DBG_QP);\r
989         // sanity checks\r
990 \r
991         // clean the structure\r
992         RtlZeroMemory( p_qp_attr, sizeof *p_qp_attr );\r
993         \r
994         // fill the structure\r
995         //TODO: this function is to be implemented via ibv_query_qp, which is not supported now \r
996         p_qp_attr->h_pd                                         = (ib_pd_handle_t)qp_p->ibqp.pd;\r
997         p_qp_attr->qp_type                              = qp_p->ibqp.qp_type;\r
998         p_qp_attr->sq_max_inline                = qp_p->qp_init_attr.cap.max_inline_data;\r
999         p_qp_attr->sq_depth                             = qp_p->qp_init_attr.cap.max_send_wr;\r
1000         p_qp_attr->rq_depth                             = qp_p->qp_init_attr.cap.max_recv_wr;\r
1001         p_qp_attr->sq_sge                                       = qp_p->qp_init_attr.cap.max_send_sge;\r
1002         p_qp_attr->rq_sge                                       = qp_p->qp_init_attr.cap.max_recv_sge;\r
1003         p_qp_attr->resp_res                             = qp_p->resp_depth;\r
1004         p_qp_attr->h_sq_cq                              = (ib_cq_handle_t)qp_p->ibqp.send_cq;\r
1005         p_qp_attr->h_rq_cq                              = (ib_cq_handle_t)qp_p->ibqp.recv_cq;\r
1006         p_qp_attr->sq_signaled                  = qp_p->sq_policy == IB_SIGNAL_ALL_WR;\r
1007         p_qp_attr->state                                                = mlnx_qps_to_ibal( qp_p->state );\r
1008         p_qp_attr->num                                          = cl_hton32(qp_p->ibqp.qp_num);\r
1009 \r
1010 #ifdef WIN_TO_BE_CHANGED\r
1011 //TODO: don't know how to fill the following fields     without support of query_qp in MTHCA    \r
1012         p_qp_attr->access_ctrl                  = qp_p->\r
1013         p_qp_attr->pkey_index                   = qp_p->\r
1014         p_qp_attr->dest_num                             = qp_p-\r
1015         p_qp_attr->init_depth                   = qp_p-\r
1016         p_qp_attr->qkey                                         = qp_p-\r
1017         p_qp_attr->sq_psn                                       = qp_p-\r
1018         p_qp_attr->rq_psn                                       = qp_p-\r
1019         p_qp_attr->primary_port         = qp_p-\r
1020         p_qp_attr->alternate_port               = qp_p-\r
1021         p_qp_attr->primary_av                   = qp_p-\r
1022         p_qp_attr->alternate_av                 = qp_p-\r
1023         p_qp_attr->apm_state                    = qp_p-\r
1024 #endif          \r
1025 \r
1026         status = IB_SUCCESS;\r
1027 \r
1028         HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_QP,\r
1029                 ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
1030         return status;\r
1031 }\r
1032 \r
1033 ib_api_status_t\r
1034 mlnx_destroy_qp (\r
1035         IN              const   ib_qp_handle_t                          h_qp,\r
1036         IN              const   uint64_t                                        timewait )\r
1037 {\r
1038         ib_api_status_t         status;\r
1039         int err;\r
1040         struct ib_qp *ib_qp_p = (struct ib_qp *)h_qp;\r
1041         PREP_IBDEV_FOR_PRINT(ib_qp_p->device)\r
1042 \r
1043         UNUSED_PARAM( timewait );\r
1044 \r
1045         HCA_ENTER( HCA_DBG_QP);\r
1046 \r
1047         HCA_PRINT(TRACE_LEVEL_INFORMATION       ,HCA_DBG_SHIM  ,\r
1048                 ("qpnum %#x, pcs %p\n", ib_qp_p->qp_num, PsGetCurrentProcess()) );\r
1049 \r
1050         err = ibv_destroy_qp( ib_qp_p );\r
1051         if (err) {\r
1052                 HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP,\r
1053                         ("ibv_destroy_qp failed (%d)\n", err));\r
1054                 status = errno_to_iberr(err);\r
1055                 goto err_destroy_qp;\r
1056         }\r
1057 \r
1058         status = IB_SUCCESS;\r
1059 \r
1060 err_destroy_qp:\r
1061         HCA_PRINT_EXIT(TRACE_LEVEL_ERROR ,HCA_DBG_QP,\r
1062                 ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
1063         return status;\r
1064 }\r
1065 \r
1066 /*\r
1067 * Completion Queue Managment Verbs.\r
1068 */\r
1069 \r
1070 ib_api_status_t\r
1071 mlnx_create_cq (\r
1072         IN              const   ib_ca_handle_t                          h_ca,\r
1073         IN              const   void                                            *cq_context,\r
1074         IN      OUT                     uint32_t                                        *p_size,\r
1075                 OUT                     ib_cq_handle_t                          *ph_cq,\r
1076         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
1077 {\r
1078         int err;\r
1079         ib_api_status_t         status;\r
1080         struct ib_cq *ib_cq_p;\r
1081         struct mthca_cq *cq_p;\r
1082         mlnx_hob_t                      *hob_p;\r
1083         struct ib_device *ib_dev;\r
1084         struct ib_ucontext *p_context;\r
1085         \r
1086         HCA_ENTER(HCA_DBG_CQ);\r
1087 \r
1088         if( p_umv_buf ) {\r
1089 \r
1090                 p_context = (struct ib_ucontext *)h_ca;\r
1091                 hob_p = HOB_FROM_IBDEV(p_context->device);\r
1092                 ib_dev = p_context->device;\r
1093 \r
1094                 // sanity checks \r
1095                 if (p_umv_buf->input_size < sizeof(struct ibv_create_cq) ||\r
1096                         p_umv_buf->output_size < sizeof(struct ibv_create_cq_resp) ||\r
1097                         !p_umv_buf->p_inout_buf) {\r
1098                         status = IB_INVALID_PARAMETER;\r
1099                         goto err_inval_params;\r
1100                 }\r
1101         }\r
1102         else {\r
1103                 hob_p = (mlnx_hob_t *)h_ca;\r
1104                 p_context = NULL;\r
1105                 ib_dev = IBDEV_FROM_HOB( hob_p );\r
1106         }\r
1107 \r
1108         // allocate cq  \r
1109         ib_cq_p = ibv_create_cq(ib_dev, \r
1110                 cq_comp_handler, cq_event_handler,\r
1111                 hob_p, *p_size, p_context, p_umv_buf );\r
1112         if (IS_ERR(ib_cq_p)) {\r
1113                 err = PTR_ERR(ib_cq_p);\r
1114                 HCA_PRINT (TRACE_LEVEL_ERROR ,HCA_DBG_CQ, ("ibv_create_cq failed (%d)\n", err));\r
1115                 status = errno_to_iberr(err);\r
1116                 goto err_create_cq;\r
1117         }\r
1118 \r
1119         // fill the object\r
1120         cq_p = (struct mthca_cq *)ib_cq_p;\r
1121         cq_p->cq_context = (void*)cq_context;\r
1122         \r
1123         // return the result\r
1124 //      *p_size = *p_size;      // return the same value\r
1125         *p_size = ib_cq_p->cqe;\r
1126 \r
1127         if (ph_cq) *ph_cq = (ib_cq_handle_t)cq_p;\r
1128 \r
1129         status = IB_SUCCESS;\r
1130         \r
1131 err_create_cq:\r
1132 err_inval_params:\r
1133         if (p_umv_buf && p_umv_buf->command) \r
1134                 p_umv_buf->status = status;\r
1135         HCA_PRINT_EXIT(TRACE_LEVEL_ERROR ,HCA_DBG_CQ,\r
1136                 ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
1137         return status;\r
1138 }\r
1139 \r
1140 ib_api_status_t\r
1141 mlnx_resize_cq (\r
1142         IN              const   ib_cq_handle_t                          h_cq,\r
1143         IN      OUT                     uint32_t                                        *p_size,\r
1144         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
1145 {\r
1146         UNREFERENCED_PARAMETER(h_cq);\r
1147         UNREFERENCED_PARAMETER(p_size);\r
1148         if (p_umv_buf && p_umv_buf->command) {\r
1149                 p_umv_buf->status = IB_UNSUPPORTED;\r
1150         }\r
1151         HCA_PRINT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM  ,("mlnx_resize_cq not implemented\n"));\r
1152         return IB_UNSUPPORTED;\r
1153 }\r
1154 \r
1155 ib_api_status_t\r
1156 mlnx_query_cq (\r
1157         IN              const   ib_cq_handle_t                          h_cq,\r
1158                 OUT                     uint32_t                                        *p_size,\r
1159         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
1160 {\r
1161         UNREFERENCED_PARAMETER(h_cq);\r
1162         UNREFERENCED_PARAMETER(p_size);\r
1163         if (p_umv_buf && p_umv_buf->command) {\r
1164                 p_umv_buf->status = IB_UNSUPPORTED;\r
1165         }\r
1166         HCA_PRINT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM  ,("mlnx_query_cq not implemented\n"));\r
1167         return IB_UNSUPPORTED;\r
1168 }\r
1169 \r
1170 ib_api_status_t\r
1171 mlnx_destroy_cq (\r
1172         IN              const   ib_cq_handle_t                          h_cq)\r
1173 {\r
1174                                                                                                                                                                 \r
1175         ib_api_status_t         status;\r
1176         int err;\r
1177         struct ib_cq *ib_cq_p = (struct ib_cq *)h_cq;\r
1178         PREP_IBDEV_FOR_PRINT(ib_cq_p->device)\r
1179 \r
1180         HCA_ENTER( HCA_DBG_QP);\r
1181 \r
1182         HCA_PRINT(TRACE_LEVEL_INFORMATION       ,HCA_DBG_SHIM  ,\r
1183                 ("cqn %#x, pcs %p\n", ((struct mthca_cq*)ib_cq_p)->cqn, PsGetCurrentProcess()) );\r
1184 \r
1185         // destroy CQ\r
1186         err = ibv_destroy_cq( ib_cq_p );\r
1187         if (err) {\r
1188                 HCA_PRINT (TRACE_LEVEL_ERROR ,HCA_DBG_SHIM,\r
1189                         ("ibv_destroy_cq failed (%d)\n", err));\r
1190                 status = errno_to_iberr(err);\r
1191                 goto err_destroy_cq;\r
1192         }\r
1193 \r
1194         status = IB_SUCCESS;\r
1195 \r
1196 err_destroy_cq:\r
1197         HCA_PRINT_EXIT(TRACE_LEVEL_ERROR  , HCA_DBG_SHIM,\r
1198                 ("completes with ERROR status %s\n", IB_GET_ERR_STR(status)));\r
1199         return status;\r
1200 }\r
1201 \r
1202 \r
1203 void\r
1204 setup_ci_interface(\r
1205         IN              const   ib_net64_t                                      ca_guid,\r
1206         IN      OUT                     ci_interface_t                          *p_interface )\r
1207 {\r
1208         cl_memclr(p_interface, sizeof(*p_interface));\r
1209 \r
1210         /* Guid of the CA. */\r
1211         p_interface->guid = ca_guid;\r
1212 \r
1213         /* Version of this interface. */\r
1214         p_interface->version = VERBS_VERSION;\r
1215 \r
1216         /* UVP name */\r
1217         cl_memcpy( p_interface->libname, mlnx_uvp_lib_name, MAX_LIB_NAME);\r
1218 \r
1219         HCA_PRINT(TRACE_LEVEL_VERBOSE  , HCA_DBG_SHIM  ,("UVP filename %s\n", p_interface->libname));\r
1220 \r
1221         /* The real interface. */\r
1222         p_interface->open_ca = mlnx_open_ca;\r
1223         p_interface->query_ca = mlnx_query_ca;\r
1224         p_interface->modify_ca = mlnx_modify_ca; \r
1225         p_interface->close_ca = mlnx_close_ca;\r
1226         p_interface->um_open_ca = mlnx_um_open;\r
1227         p_interface->um_close_ca = mlnx_um_close;\r
1228 \r
1229         p_interface->allocate_pd = mlnx_allocate_pd;\r
1230         p_interface->deallocate_pd = mlnx_deallocate_pd;\r
1231 \r
1232         p_interface->create_av = mlnx_create_av;\r
1233         p_interface->query_av = mlnx_query_av;\r
1234         p_interface->modify_av = mlnx_modify_av;\r
1235         p_interface->destroy_av = mlnx_destroy_av;\r
1236 \r
1237         p_interface->create_qp = mlnx_create_qp;\r
1238         p_interface->create_spl_qp = mlnx_create_spl_qp;\r
1239         p_interface->modify_qp = mlnx_modify_qp;\r
1240         p_interface->query_qp = mlnx_query_qp;\r
1241         p_interface->destroy_qp = mlnx_destroy_qp;\r
1242 \r
1243         p_interface->create_cq = mlnx_create_cq;\r
1244         p_interface->resize_cq = mlnx_resize_cq;\r
1245         p_interface->query_cq = mlnx_query_cq;\r
1246         p_interface->destroy_cq = mlnx_destroy_cq;\r
1247 \r
1248         p_interface->local_mad = mlnx_local_mad;\r
1249         \r
1250         p_interface->vendor_call = fw_access_ctrl;\r
1251 \r
1252         mlnx_memory_if(p_interface);\r
1253         mlnx_direct_if(p_interface);\r
1254         mlnx_mcast_if(p_interface);\r
1255 \r
1256 \r
1257         return;\r
1258 }\r
1259 \r