[IBAL, HW] Remove pointers from ci_umv_buf_t.
[mirror/winof/.git] / hw / mlx4 / user / hca / verbs.c
1 /*\r
2  * Copyright (c) 2007 Cisco, Inc.  All rights reserved.\r
3  * Portions Copyright (c) 2008 Microsoft Corporation.  All rights reserved.\r
4  *\r
5  * This software is available to you under the OpenIB.org BSD license\r
6  * below:\r
7  *\r
8  *     Redistribution and use in source and binary forms, with or\r
9  *     without modification, are permitted provided that the following\r
10  *     conditions are met:\r
11  *\r
12  *      - Redistributions of source code must retain the above\r
13  *        copyright notice, this list of conditions and the following\r
14  *        disclaimer.\r
15  *\r
16  *      - Redistributions in binary form must reproduce the above\r
17  *        copyright notice, this list of conditions and the following\r
18  *        disclaimer in the documentation and/or other materials\r
19  *        provided with the distribution.\r
20  *\r
21  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
22  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
23  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
24  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
25  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
26  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
27  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
28  * SOFTWARE.\r
29  */\r
30 \r
31 #include "mlx4.h"\r
32 #include "verbs.h"\r
33 #include "mx_abi.h"\r
34 #include "wqe.h"\r
35 #include "mlx4_debug.h"\r
36 \r
37 #if defined(EVENT_TRACING)\r
38 #include "verbs.tmh"\r
39 #endif\r
40 \r
41 ib_api_status_t\r
42 mlx4_pre_open_ca (\r
43         IN              const   ib_net64_t                              ca_guid,\r
44         IN      OUT                     ci_umv_buf_t                            *p_umv_buf,\r
45                 OUT                     ib_ca_handle_t                  *ph_uvp_ca )\r
46 {\r
47         struct ibv_context      *context;\r
48         ib_api_status_t status = IB_SUCCESS;\r
49 \r
50         UNREFERENCED_PARAMETER(ca_guid);\r
51 \r
52         context = mlx4_alloc_context();\r
53         if (!context) {\r
54                 status = IB_INSUFFICIENT_MEMORY;                \r
55                 goto end;\r
56         }\r
57         \r
58         if( p_umv_buf )\r
59         {\r
60                 if( !p_umv_buf->p_inout_buf )\r
61                 {\r
62                         p_umv_buf->p_inout_buf =\r
63                                 (ULONG_PTR)cl_zalloc( sizeof(struct ibv_get_context_resp) );\r
64                         if( !p_umv_buf->p_inout_buf )\r
65                         {\r
66                                 status = IB_INSUFFICIENT_MEMORY;\r
67                                 goto end;\r
68                         }\r
69                 }\r
70                 p_umv_buf->input_size = 0;\r
71                 p_umv_buf->output_size = sizeof(struct ibv_get_context_resp);\r
72                 p_umv_buf->command = TRUE;\r
73         }\r
74 \r
75         *ph_uvp_ca = (ib_ca_handle_t)context;\r
76 \r
77 end:    \r
78         return status;\r
79 }\r
80 \r
81 ib_api_status_t\r
82 mlx4_post_open_ca (\r
83         IN              const   ib_net64_t                              ca_guid,\r
84         IN                              ib_api_status_t                 ioctl_status,\r
85         IN      OUT                     ib_ca_handle_t                  *ph_uvp_ca,\r
86         IN                              ci_umv_buf_t                            *p_umv_buf )\r
87 {\r
88         struct ibv_get_context_resp *p_resp;\r
89         struct ibv_context *context = (struct ibv_context *)*ph_uvp_ca;\r
90         ib_api_status_t status = IB_SUCCESS;\r
91 \r
92         UNREFERENCED_PARAMETER(ca_guid);\r
93 \r
94         CL_ASSERT(p_umv_buf && p_umv_buf->p_inout_buf);\r
95         \r
96         p_resp = (struct ibv_get_context_resp*)(ULONG_PTR)p_umv_buf->p_inout_buf;\r
97 \r
98         if (IB_SUCCESS == ioctl_status)\r
99         {\r
100                 if (!mlx4_fill_context(context, p_resp))\r
101                 {\r
102                         status = IB_INSUFFICIENT_RESOURCES;\r
103                         goto end;\r
104                 }\r
105         }\r
106 \r
107 end:\r
108         cl_free(p_resp);\r
109         return status;\r
110 }\r
111 \r
112 ib_api_status_t\r
113 mlx4_pre_query_ca (\r
114         IN                              ib_ca_handle_t                  h_uvp_ca,\r
115         IN                              ib_ca_attr_t                            *p_ca_attr,\r
116         IN                              size_t                                  byte_count,\r
117         IN                              ci_umv_buf_t                            *p_umv_buf )\r
118 {\r
119         ib_api_status_t status = IB_SUCCESS;\r
120 \r
121         UNREFERENCED_PARAMETER(h_uvp_ca);\r
122 \r
123         /* Note that query_ca calls *always* get their attributes from the kernel.\r
124          *\r
125          * Assume if user buffer is valid then byte_cnt is valid too \r
126          * so we can preallocate ca attr buffer for post ioctl data saving\r
127          *\r
128          * Note that we squirrel the buffer away into the umv_buf and only\r
129          * set it into the HCA if the query is successful.\r
130          */\r
131         if ( p_ca_attr != NULL )\r
132         {\r
133                 p_umv_buf->p_inout_buf = (ULONG_PTR)cl_malloc(byte_count);\r
134                 if ( !p_umv_buf->p_inout_buf )\r
135                 {\r
136                         status = IB_INSUFFICIENT_RESOURCES;\r
137                         goto end;\r
138                 }\r
139         }\r
140 \r
141 end:\r
142         return status;\r
143 }\r
144 \r
145 void\r
146 __fixup_ca_attr(\r
147         IN                              ib_ca_attr_t* const                     p_dest,\r
148         IN              const   ib_ca_attr_t* const                     p_src )\r
149 {\r
150         uint8_t         i;\r
151         uintn_t         offset = (uintn_t)p_dest - (uintn_t)p_src;\r
152         ib_port_attr_t                  *p_tmp_port_attr = NULL;\r
153 \r
154         CL_ASSERT( p_dest );\r
155         CL_ASSERT( p_src );\r
156 \r
157         /* Fix up the pointers to point within the destination buffer. */\r
158         p_dest->p_page_size =\r
159                 (uint32_t*)(((uint8_t*)p_dest->p_page_size) + offset);\r
160 \r
161         p_tmp_port_attr =\r
162                 (ib_port_attr_t*)(((uint8_t*)p_dest->p_port_attr) + offset);\r
163 \r
164         /* Fix up each port attribute's gid and pkey table pointers. */\r
165         for( i = 0; i < p_dest->num_ports; i++ )\r
166         {\r
167                 p_tmp_port_attr[i].p_gid_table = (ib_gid_t*)\r
168                         (((uint8_t*)p_tmp_port_attr[i].p_gid_table) + offset);\r
169 \r
170                 p_tmp_port_attr[i].p_pkey_table =(ib_net16_t*)\r
171                         (((uint8_t*)p_tmp_port_attr[i].p_pkey_table) + offset);\r
172         }\r
173         p_dest->p_port_attr = p_tmp_port_attr;\r
174 }\r
175 \r
176 void\r
177 mlx4_post_query_ca (\r
178         IN                              ib_ca_handle_t                  h_uvp_ca,\r
179         IN                              ib_api_status_t                 ioctl_status,\r
180         IN                              ib_ca_attr_t                            *p_ca_attr,\r
181         IN                              size_t                                  byte_count,\r
182         IN                              ci_umv_buf_t                            *p_umv_buf )\r
183 {\r
184         struct ibv_context *context = (struct ibv_context *)h_uvp_ca;\r
185         \r
186         CL_ASSERT(context && p_umv_buf);\r
187 \r
188         if ( ioctl_status == IB_SUCCESS && p_ca_attr && byte_count)\r
189         {\r
190                 CL_ASSERT( byte_count >= p_ca_attr->size );\r
191 \r
192                 pthread_mutex_lock(&context->mutex);\r
193 \r
194                 if (context->p_hca_attr)\r
195                         cl_free(context->p_hca_attr);\r
196                 context->p_hca_attr = (ib_ca_attr_t*)(ULONG_PTR)p_umv_buf->p_inout_buf;\r
197                 cl_memcpy( context->p_hca_attr, p_ca_attr, p_ca_attr->size );\r
198                 __fixup_ca_attr( context->p_hca_attr, p_ca_attr );\r
199                 \r
200                 pthread_mutex_unlock(&context->mutex);\r
201         }\r
202         else if (p_umv_buf->p_inout_buf) \r
203         {\r
204                 cl_free( (void*)(ULONG_PTR)p_umv_buf->p_inout_buf );\r
205         }\r
206 }\r
207 \r
208 ib_api_status_t\r
209 mlx4_post_close_ca (\r
210         IN                      ib_ca_handle_t                          h_uvp_ca,\r
211         IN                      ib_api_status_t                         ioctl_status )\r
212 {\r
213         struct ibv_context *context = (struct ibv_context *)h_uvp_ca;\r
214 \r
215         CL_ASSERT(context);\r
216 \r
217         if (IB_SUCCESS == ioctl_status)\r
218                 mlx4_free_context(context);\r
219 \r
220         return IB_SUCCESS;\r
221 }\r
222 \r
223 ib_api_status_t\r
224 mlx4_pre_alloc_pd (\r
225         IN              const   ib_ca_handle_t                  h_uvp_ca,\r
226         IN      OUT                     ci_umv_buf_t                            *p_umv_buf,\r
227                 OUT                     ib_pd_handle_t                  *ph_uvp_pd )\r
228 {\r
229         struct mlx4_pd *pd;\r
230         struct ibv_context *context = (struct ibv_context *)h_uvp_ca;\r
231         ib_api_status_t status = IB_SUCCESS;\r
232 \r
233         CL_ASSERT(context && p_umv_buf);\r
234 \r
235         if( !p_umv_buf->p_inout_buf )\r
236         {\r
237                 p_umv_buf->p_inout_buf = (ULONG_PTR)cl_malloc( sizeof(struct ibv_alloc_pd_resp) );\r
238                 if( !p_umv_buf->p_inout_buf )\r
239                 {\r
240                         status = IB_INSUFFICIENT_MEMORY;\r
241                         goto end;\r
242                 }\r
243         }\r
244         p_umv_buf->input_size = 0;\r
245         p_umv_buf->output_size = sizeof(struct ibv_alloc_pd_resp);\r
246         p_umv_buf->command = TRUE;\r
247 \r
248         // Mlx4 code:\r
249 \r
250         pd = cl_malloc(sizeof *pd);\r
251         if (!pd) {\r
252                 status = IB_INSUFFICIENT_MEMORY;                \r
253                 goto end;\r
254         }\r
255 \r
256         pd->ibv_pd.context = context;\r
257 \r
258         *ph_uvp_pd = (ib_pd_handle_t)&pd->ibv_pd;\r
259         \r
260 end:\r
261         return status;\r
262 }\r
263 \r
264 void\r
265 mlx4_post_alloc_pd (\r
266         IN                              ib_ca_handle_t                  h_uvp_ca,\r
267         IN                              ib_api_status_t                 ioctl_status,\r
268         IN      OUT                     ib_pd_handle_t                  *ph_uvp_pd,\r
269         IN                              ci_umv_buf_t                            *p_umv_buf )\r
270 {\r
271         struct ibv_pd                   *pd = (struct ibv_pd *)*ph_uvp_pd;\r
272         struct ibv_alloc_pd_resp        *p_resp;\r
273 \r
274 \r
275         UNREFERENCED_PARAMETER(h_uvp_ca);\r
276         \r
277         CL_ASSERT(p_umv_buf && p_umv_buf->p_inout_buf);\r
278 \r
279         p_resp = (struct ibv_alloc_pd_resp*)(ULONG_PTR)p_umv_buf->p_inout_buf;\r
280 \r
281         if (IB_SUCCESS == ioctl_status)\r
282         {\r
283                 // Mlx4 code:\r
284                 \r
285                 pd->handle = p_resp->pd_handle;\r
286                 to_mpd(pd)->pdn = p_resp->pdn;\r
287         }\r
288         else\r
289         {\r
290                 cl_free(to_mpd(pd));\r
291         }\r
292         \r
293         cl_free(p_resp);\r
294         return;\r
295 }\r
296 \r
297 void\r
298 mlx4_post_free_pd (\r
299         IN              const   ib_pd_handle_t                  h_uvp_pd,\r
300         IN                              ib_api_status_t                 ioctl_status )\r
301 {\r
302         struct ibv_pd *pd = (struct ibv_pd *)h_uvp_pd;\r
303 \r
304         CL_ASSERT(pd);\r
305 \r
306         if (IB_SUCCESS == ioctl_status)\r
307                 cl_free(to_mpd(pd));\r
308 }\r
309 \r
310 static int __align_queue_size(int req)\r
311 {\r
312         int nent;\r
313 \r
314         for (nent = 1; nent < req; nent <<= 1)\r
315                 ; /* nothing */\r
316 \r
317         return nent;\r
318 }\r
319 \r
320 ib_api_status_t\r
321 mlx4_pre_create_cq (\r
322         IN              const   ib_ca_handle_t                  h_uvp_ca,\r
323         IN      OUT             uint32_t* const                 p_size,\r
324         IN      OUT             ci_umv_buf_t                            *p_umv_buf,\r
325                 OUT                     ib_cq_handle_t                  *ph_uvp_cq )\r
326 {\r
327         struct mlx4_cq          *cq;\r
328         struct ibv_create_cq    *p_create_cq;\r
329         struct ibv_context              *context = (struct ibv_context *)h_uvp_ca;\r
330         ib_api_status_t         status = IB_SUCCESS;\r
331         int size = max( sizeof(struct ibv_create_cq), sizeof(struct ibv_create_cq_resp) );\r
332 \r
333         CL_ASSERT(h_uvp_ca && p_umv_buf);\r
334 \r
335         if( !p_umv_buf->p_inout_buf )\r
336         {\r
337                 p_umv_buf->p_inout_buf = (ULONG_PTR)cl_malloc( size );\r
338                 if( !p_umv_buf->p_inout_buf )\r
339                 {\r
340                         status = IB_INSUFFICIENT_MEMORY;\r
341                         goto err_umv_buf;\r
342                 }\r
343         }\r
344         p_umv_buf->input_size = sizeof(struct ibv_create_cq);\r
345         p_umv_buf->output_size = sizeof(struct ibv_create_cq_resp);\r
346         p_umv_buf->command = TRUE;\r
347 \r
348         p_create_cq = (struct ibv_create_cq*)(ULONG_PTR)p_umv_buf->p_inout_buf;\r
349 \r
350         // Mlx4 code:\r
351         \r
352         /* Sanity check CQ size before proceeding */\r
353         if (*p_size > 0x3fffff) {\r
354                 status = IB_INVALID_CQ_SIZE;\r
355                 goto err_cqe_size;\r
356         }\r
357 \r
358         cq = cl_malloc(sizeof *cq);\r
359         if (!cq) {\r
360                 status = IB_INSUFFICIENT_MEMORY;\r
361                 goto err_cq;\r
362         }\r
363 \r
364         if (cl_spinlock_init(&cq->lock)) {\r
365                 status = IB_INSUFFICIENT_MEMORY;\r
366                 goto err_lock;\r
367         }\r
368 \r
369         *p_size = __align_queue_size(*p_size + 1);\r
370 \r
371         if (mlx4_alloc_buf(&cq->buf, *p_size * MLX4_CQ_ENTRY_SIZE, \r
372                                                 context->page_size))\r
373                 goto err_alloc_buf;\r
374 \r
375         cq->ibv_cq.context = context;\r
376         cq->cons_index = 0;\r
377                 \r
378         cq->set_ci_db  = mlx4_alloc_db(to_mctx(context), MLX4_DB_TYPE_CQ);\r
379         if (!cq->set_ci_db)\r
380                 goto err_alloc_db;\r
381 \r
382         cq->arm_db = cq->set_ci_db + 1;\r
383         *cq->arm_db = 0;\r
384         cq->arm_sn = 1;\r
385         *cq->set_ci_db = 0;\r
386 \r
387         p_create_cq->buf_addr = (uintptr_t) cq->buf.buf;\r
388         p_create_cq->db_addr  = (uintptr_t) cq->set_ci_db;\r
389         p_create_cq->arm_sn_addr  = (uintptr_t) &cq->arm_sn;\r
390         p_create_cq->cqe = --(*p_size);\r
391 \r
392         *ph_uvp_cq = (ib_cq_handle_t)&cq->ibv_cq;\r
393         goto end;\r
394 \r
395 err_alloc_db:\r
396         mlx4_free_buf(&cq->buf);\r
397 err_alloc_buf:\r
398         cl_spinlock_destroy(&cq->lock);\r
399 err_lock:\r
400         cl_free(cq);\r
401 err_cq:\r
402 err_cqe_size:\r
403         cl_free( (void*)(ULONG_PTR)p_umv_buf->p_inout_buf );\r
404 err_umv_buf:\r
405 end:\r
406         return status;\r
407 }\r
408 \r
409 void\r
410 mlx4_post_create_cq (\r
411         IN              const   ib_ca_handle_t                  h_uvp_ca,\r
412         IN                              ib_api_status_t                 ioctl_status,\r
413         IN              const   uint32_t                                        size,\r
414         IN      OUT                     ib_cq_handle_t                  *ph_uvp_cq,\r
415         IN                              ci_umv_buf_t                            *p_umv_buf )\r
416 {\r
417         struct ibv_cq                           *cq = (struct ibv_cq *)*ph_uvp_cq;\r
418         struct ibv_create_cq_resp       *p_resp;\r
419 \r
420         UNREFERENCED_PARAMETER(h_uvp_ca);\r
421         UNREFERENCED_PARAMETER(size);\r
422         \r
423         CL_ASSERT(p_umv_buf && p_umv_buf->p_inout_buf);\r
424 \r
425         p_resp = (struct ibv_create_cq_resp*)(ULONG_PTR)p_umv_buf->p_inout_buf;\r
426 \r
427         if (IB_SUCCESS == ioctl_status)\r
428         {\r
429                 // Mlx4 code:\r
430                 \r
431                 to_mcq(cq)->cqn = p_resp->cqn;\r
432                 cq->cqe                 = p_resp->cqe;\r
433                 cq->handle              = p_resp->cq_handle;\r
434         }\r
435         else\r
436         {\r
437                 mlx4_post_destroy_cq (*ph_uvp_cq, IB_SUCCESS);\r
438         }\r
439         \r
440         cl_free(p_resp);\r
441         return;\r
442 }\r
443 \r
444 ib_api_status_t\r
445 mlx4_pre_query_cq (\r
446         IN              const   ib_cq_handle_t                  h_uvp_cq,\r
447                 OUT                     uint32_t* const                 p_size,\r
448         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
449 {\r
450         struct ibv_cq *cq = (struct ibv_cq *)h_uvp_cq;\r
451 \r
452         UNREFERENCED_PARAMETER(p_umv_buf);\r
453         \r
454         *p_size = cq->cqe;\r
455 \r
456         return IB_VERBS_PROCESSING_DONE;\r
457 }\r
458 \r
459 void\r
460 mlx4_post_destroy_cq (\r
461         IN              const   ib_cq_handle_t                  h_uvp_cq,\r
462         IN                              ib_api_status_t                 ioctl_status )\r
463 {\r
464         struct ibv_cq *cq = (struct ibv_cq *)h_uvp_cq;\r
465 \r
466         CL_ASSERT(cq);\r
467 \r
468         if (IB_SUCCESS == ioctl_status) {\r
469                 mlx4_free_db(to_mctx(cq->context), MLX4_DB_TYPE_CQ, to_mcq(cq)->set_ci_db);\r
470                 mlx4_free_buf(&to_mcq(cq)->buf);\r
471 \r
472                 cl_spinlock_destroy(&to_mcq(cq)->lock);\r
473                 cl_free(to_mcq(cq));\r
474         }\r
475 }\r
476 \r
477 ib_api_status_t  \r
478 mlx4_pre_create_srq (\r
479         IN              const   ib_pd_handle_t                  h_uvp_pd,\r
480         IN              const   ib_srq_attr_t                           *p_srq_attr,\r
481         IN      OUT                     ci_umv_buf_t                            *p_umv_buf,\r
482                 OUT                     ib_srq_handle_t                 *ph_uvp_srq )\r
483 {\r
484         struct mlx4_srq *srq;\r
485         struct ibv_create_srq *p_create_srq;\r
486         struct ibv_pd *pd = (struct ibv_pd *)h_uvp_pd;\r
487         ib_api_status_t status = IB_SUCCESS;\r
488         size_t size = max( sizeof(struct ibv_create_srq), sizeof(struct ibv_create_srq_resp) );\r
489 \r
490         CL_ASSERT(p_umv_buf);\r
491 \r
492         if( !p_umv_buf->p_inout_buf )\r
493         {\r
494                 p_umv_buf->p_inout_buf = (ULONG_PTR)cl_malloc( size ); \r
495                 if( !p_umv_buf->p_inout_buf )\r
496                 {\r
497                         status = IB_INSUFFICIENT_MEMORY;\r
498                         goto err_memory;\r
499                 }\r
500         }\r
501         p_umv_buf->input_size = sizeof(struct ibv_create_srq);\r
502         p_umv_buf->output_size = sizeof(struct ibv_create_srq_resp);\r
503         p_umv_buf->command = TRUE;\r
504 \r
505         p_create_srq = (struct ibv_create_srq*)(ULONG_PTR)p_umv_buf->p_inout_buf;\r
506         \r
507         // Mlx4 code:\r
508 \r
509         /* Sanity check SRQ size before proceeding */\r
510         if (p_srq_attr->max_wr > 1 << 16 || p_srq_attr->max_sge > 64)\r
511         {\r
512                 status = IB_INVALID_PARAMETER;\r
513                 goto err_params;\r
514         }\r
515 \r
516         srq = cl_malloc(sizeof *srq);\r
517         if (!srq) {\r
518                 status = IB_INSUFFICIENT_MEMORY;\r
519                 goto err_alloc_srq;\r
520         }\r
521 \r
522         if (cl_spinlock_init(&srq->lock)) {\r
523                 status = IB_INSUFFICIENT_MEMORY;\r
524                 goto err_lock;\r
525         }\r
526 \r
527         srq->ibv_srq.pd                 = pd;\r
528         srq->ibv_srq.context    = pd->context;\r
529         \r
530         srq->max        = __align_queue_size(p_srq_attr->max_wr + 1);\r
531         srq->max_gs  = p_srq_attr->max_sge;\r
532         srq->counter    = 0;\r
533 \r
534         if (mlx4_alloc_srq_buf(pd, (struct ibv_srq_attr *)p_srq_attr, srq))\r
535         {\r
536                 status = IB_INSUFFICIENT_MEMORY;\r
537                 goto err_alloc_buf;\r
538         }\r
539 \r
540         srq->db = mlx4_alloc_db(to_mctx(pd->context), MLX4_DB_TYPE_RQ);\r
541         if (!srq->db)\r
542                 goto err_alloc_db;\r
543 \r
544         *srq->db = 0;\r
545         \r
546         // fill the parameters for ioctl\r
547         p_create_srq->buf_addr = (uintptr_t) srq->buf.buf;\r
548         p_create_srq->db_addr  = (uintptr_t) srq->db;\r
549         p_create_srq->pd_handle = pd->handle;\r
550         p_create_srq->max_wr = p_srq_attr->max_wr;\r
551         p_create_srq->max_sge = p_srq_attr->max_sge;\r
552         p_create_srq->srq_limit = p_srq_attr->srq_limit;\r
553 \r
554         *ph_uvp_srq = (ib_srq_handle_t)&srq->ibv_srq;\r
555         goto end;\r
556 \r
557 err_alloc_db:\r
558         cl_free(srq->wrid);\r
559         mlx4_free_buf(&srq->buf);\r
560 err_alloc_buf:\r
561         cl_spinlock_destroy(&srq->lock);\r
562 err_lock:\r
563         cl_free(srq);\r
564 err_alloc_srq:\r
565         cl_free( (void*)(ULONG_PTR)p_umv_buf->p_inout_buf );\r
566 err_params: err_memory:\r
567 end:\r
568         return status;\r
569 }\r
570 \r
571 void\r
572 mlx4_post_create_srq (\r
573         IN              const   ib_pd_handle_t                  h_uvp_pd,\r
574         IN                              ib_api_status_t                 ioctl_status,\r
575         IN      OUT                     ib_srq_handle_t                 *ph_uvp_srq,\r
576         IN                              ci_umv_buf_t                            *p_umv_buf )\r
577 {\r
578         struct ibv_srq *ibsrq = (struct ibv_srq *)*ph_uvp_srq;\r
579         struct mlx4_srq *srq = to_msrq(ibsrq);\r
580         struct ibv_create_srq_resp *p_resp;\r
581 \r
582         UNREFERENCED_PARAMETER(h_uvp_pd);\r
583         \r
584         CL_ASSERT(p_umv_buf && p_umv_buf->p_inout_buf);\r
585         \r
586         p_resp = (struct ibv_create_srq_resp*)(ULONG_PTR)p_umv_buf->p_inout_buf;\r
587 \r
588         if (IB_SUCCESS == ioctl_status)\r
589         {\r
590                 // Mlx4 code:\r
591 \r
592                 srq->srqn       = p_resp->srqn;\r
593                 ibsrq->handle   = p_resp->srq_handle;\r
594                 \r
595                 srq->max                = p_resp->max_wr;\r
596                 srq->max_gs     = p_resp->max_sge;\r
597         }\r
598         else\r
599         {\r
600                 mlx4_post_destroy_srq (*ph_uvp_srq, IB_SUCCESS);\r
601         }\r
602 \r
603         cl_free(p_resp);\r
604         return;\r
605 }\r
606 \r
607 ib_api_status_t\r
608 mlx4_pre_destroy_srq (\r
609         IN              const   ib_srq_handle_t                 h_uvp_srq )\r
610 {\r
611 #ifdef XRC_SUPPORT\r
612         struct ibv_srq *ibsrq = (struct ibv_srq *)h_uvp_srq;\r
613         struct mlx4_srq *srq = to_msrq(ibsrq);\r
614         struct mlx4_cq *mcq = NULL;\r
615         \r
616         if (ibsrq->xrc_cq)\r
617         {\r
618                 /* is an xrc_srq */\r
619                 mcq = to_mcq(ibsrq->xrc_cq);\r
620                 mlx4_cq_clean(mcq, 0, srq);\r
621                 cl_spinlock_acquire(&mcq->lock);\r
622                 mlx4_clear_xrc_srq(to_mctx(ibsrq->context), srq->srqn);\r
623                 cl_spinlock_release(&mcq->lock);\r
624         }\r
625 #else\r
626         UNUSED_PARAM(h_uvp_srq);\r
627 #endif  \r
628         return IB_SUCCESS;\r
629 }\r
630 \r
631 void\r
632 mlx4_post_destroy_srq (\r
633         IN              const   ib_srq_handle_t                 h_uvp_srq,\r
634         IN                              ib_api_status_t                 ioctl_status )\r
635 {\r
636         struct ibv_srq          *ibsrq = (struct ibv_srq *)h_uvp_srq;\r
637         struct mlx4_srq *srq = to_msrq(ibsrq);\r
638         \r
639         CL_ASSERT(srq);\r
640 \r
641         if (IB_SUCCESS == ioctl_status)\r
642         {\r
643                 mlx4_free_db(to_mctx(ibsrq->context), MLX4_DB_TYPE_RQ, srq->db);\r
644                 cl_free(srq->wrid);\r
645                 mlx4_free_buf(&srq->buf);\r
646                 cl_spinlock_destroy(&srq->lock);\r
647                 cl_free(srq);\r
648         }\r
649         else\r
650         {\r
651 #ifdef XRC_SUPPORT\r
652                 if (ibsrq->xrc_cq) {\r
653                         /* is an xrc_srq */\r
654                         struct mlx4_cq  *mcq = to_mcq(ibsrq->xrc_cq);\r
655                         cl_spinlock_acquire(&mcq->lock);\r
656                         mlx4_store_xrc_srq(to_mctx(ibsrq->context), srq->srqn, srq);\r
657                         cl_spinlock_release(&mcq->lock);\r
658                 }\r
659 #endif          \r
660         }\r
661 }\r
662 \r
663 static enum ibv_qp_type\r
664 __to_qp_type(ib_qp_type_t type)\r
665 {\r
666         switch (type) {\r
667         case IB_QPT_RELIABLE_CONN: return IBV_QPT_RC;\r
668         case IB_QPT_UNRELIABLE_CONN: return IBV_QPT_UC;\r
669         case IB_QPT_UNRELIABLE_DGRM: return IBV_QPT_UD;\r
670 #ifdef XRC_SUPPORT\r
671         //case IB_QPT_XRC_CONN: return IBV_QPT_XRC;\r
672 #endif  \r
673         default: return IBV_QPT_RC;\r
674         }\r
675 }\r
676 \r
677 ib_api_status_t\r
678 mlx4_pre_create_qp (\r
679         IN              const   ib_pd_handle_t                  h_uvp_pd,\r
680         IN              const   ib_qp_create_t                  *p_create_attr,\r
681         IN      OUT                     ci_umv_buf_t                            *p_umv_buf,\r
682                 OUT                     ib_qp_handle_t                  *ph_uvp_qp )\r
683 {\r
684         struct ibv_pd                   *pd = (struct ibv_pd *)h_uvp_pd;\r
685         struct mlx4_context     *context = to_mctx(pd->context);\r
686         struct mlx4_qp          *qp;\r
687         struct ibv_create_qp    *p_create_qp;\r
688         struct ibv_qp_init_attr attr;\r
689         ib_api_status_t                 status = IB_SUCCESS;\r
690         int size = max( sizeof(struct ibv_create_qp), sizeof(struct ibv_create_qp_resp) );\r
691 \r
692         CL_ASSERT(p_umv_buf);\r
693 \r
694         if( !p_umv_buf->p_inout_buf )\r
695         {\r
696                 p_umv_buf->p_inout_buf = (ULONG_PTR)cl_malloc(size);\r
697                 if( !p_umv_buf->p_inout_buf )\r
698                 {\r
699                         status = IB_INSUFFICIENT_MEMORY;\r
700                         goto err_memory;\r
701                 }\r
702         }\r
703         p_umv_buf->input_size = sizeof(struct ibv_create_qp);\r
704         p_umv_buf->output_size = sizeof(struct ibv_create_qp_resp);\r
705         p_umv_buf->command = TRUE;\r
706 \r
707         p_create_qp = (struct ibv_create_qp*)(ULONG_PTR)p_umv_buf->p_inout_buf;\r
708         \r
709         /* convert attributes */\r
710         memset( &attr, 0, sizeof(attr) );\r
711         attr.send_cq                            = (struct ibv_cq *)p_create_attr->h_sq_cq;\r
712         attr.recv_cq                            = (struct ibv_cq *)p_create_attr->h_rq_cq;\r
713         attr.srq                                        = (struct ibv_srq*)p_create_attr->h_srq;\r
714         attr.cap.max_send_wr            = p_create_attr->sq_depth;\r
715         attr.cap.max_recv_wr            = p_create_attr->rq_depth;\r
716         attr.cap.max_send_sge           = p_create_attr->sq_sge;\r
717         attr.cap.max_recv_sge           = p_create_attr->rq_sge;\r
718         attr.cap.max_inline_data        = p_create_attr->sq_max_inline;\r
719         attr.qp_type                            = __to_qp_type(p_create_attr->qp_type);\r
720         attr.sq_sig_all                         = p_create_attr->sq_signaled;\r
721 \r
722         // Mlx4 code:\r
723         \r
724         /* Sanity check QP size before proceeding */\r
725         if (attr.cap.max_send_wr    > (uint32_t) context->max_qp_wr ||\r
726             attr.cap.max_recv_wr     > (uint32_t) context->max_qp_wr ||\r
727             attr.cap.max_send_sge   > (uint32_t) context->max_sge   ||\r
728             attr.cap.max_recv_sge   > (uint32_t) context->max_sge   ||\r
729             attr.cap.max_inline_data > 1024)\r
730         {\r
731                 status = IB_INVALID_PARAMETER;\r
732                 goto end;\r
733         }\r
734 \r
735         qp = cl_malloc(sizeof *qp);\r
736         if (!qp) {\r
737                 status = IB_INSUFFICIENT_MEMORY;\r
738                 goto err_alloc_qp;\r
739         }\r
740 \r
741         mlx4_calc_sq_wqe_size(&attr.cap, attr.qp_type, qp);\r
742 \r
743         /*\r
744          * We need to leave 2 KB + 1 WQE of headroom in the SQ to\r
745          * allow HW to prefetch.\r
746          */\r
747         qp->sq_spare_wqes = (2048 >> qp->sq.wqe_shift) + 1;\r
748         qp->sq.wqe_cnt = __align_queue_size(attr.cap.max_send_wr + qp->sq_spare_wqes);\r
749         qp->rq.wqe_cnt = __align_queue_size(attr.cap.max_recv_wr);\r
750 \r
751         if (attr.srq || attr.qp_type == IBV_QPT_XRC)\r
752                 attr.cap.max_recv_wr = qp->rq.wqe_cnt = 0;\r
753         else \r
754         {\r
755                 if (attr.cap.max_recv_sge < 1)\r
756                         attr.cap.max_recv_sge = 1;\r
757                 if (attr.cap.max_recv_wr < 1)\r
758                         attr.cap.max_recv_wr = 1;\r
759         }\r
760 \r
761         if (mlx4_alloc_qp_buf(pd, &attr.cap, attr.qp_type, qp))\r
762                 goto err_alloc_qp_buff;\r
763 \r
764         mlx4_init_qp_indices(qp);\r
765 \r
766         if (cl_spinlock_init(&qp->sq.lock)) {\r
767                 status = IB_INSUFFICIENT_MEMORY;\r
768                 goto err_spinlock_sq;\r
769         }\r
770         if (cl_spinlock_init(&qp->rq.lock)) {\r
771                 status = IB_INSUFFICIENT_MEMORY;\r
772                 goto err_spinlock_rq;\r
773         }\r
774 \r
775         // fill qp fields\r
776         if (!attr.srq && attr.qp_type != IBV_QPT_XRC) {\r
777                 qp->db = mlx4_alloc_db(context, MLX4_DB_TYPE_RQ);\r
778                 if (!qp->db) {\r
779                         status = IB_INSUFFICIENT_MEMORY;\r
780                         goto err_db;\r
781                 }\r
782 \r
783                 *qp->db = 0;\r
784         }\r
785         if (attr.sq_sig_all)\r
786                 qp->sq_signal_bits = cl_hton32(MLX4_WQE_CTRL_CQ_UPDATE);\r
787         else\r
788                 qp->sq_signal_bits = 0;\r
789 \r
790         // fill the rest of qp fields\r
791         qp->ibv_qp.pd = pd;\r
792         qp->ibv_qp.context= pd->context;\r
793         qp->ibv_qp.send_cq = attr.send_cq;\r
794         qp->ibv_qp.recv_cq = attr.recv_cq;\r
795         qp->ibv_qp.srq = attr.srq;\r
796         qp->ibv_qp.state = IBV_QPS_RESET;\r
797         qp->ibv_qp.qp_type = attr.qp_type;\r
798 \r
799         // fill request fields\r
800         p_create_qp->buf_addr = (uintptr_t) qp->buf.buf;\r
801         if (!attr.srq && attr.qp_type != IBV_QPT_XRC)\r
802                 p_create_qp->db_addr = (uintptr_t) qp->db;\r
803         else\r
804                 p_create_qp->db_addr = 0;\r
805 \r
806         p_create_qp->pd_handle = pd->handle;\r
807         p_create_qp->send_cq_handle = attr.send_cq->handle;\r
808         p_create_qp->recv_cq_handle = attr.recv_cq->handle;\r
809         p_create_qp->srq_handle = attr.qp_type == IBV_QPT_XRC ?\r
810                 (attr.xrc_domain ? attr.xrc_domain->handle : 0) :\r
811                 (attr.srq ? attr.srq->handle : 0);\r
812 \r
813         p_create_qp->max_send_wr = attr.cap.max_send_wr;\r
814         p_create_qp->max_recv_wr = attr.cap.max_recv_wr;\r
815         p_create_qp->max_send_sge = attr.cap.max_send_sge;\r
816         p_create_qp->max_recv_sge = attr.cap.max_recv_sge;\r
817         p_create_qp->max_inline_data = attr.cap.max_inline_data;\r
818         p_create_qp->sq_sig_all = (uint8_t)attr.sq_sig_all;\r
819         p_create_qp->qp_type = attr.qp_type;\r
820         p_create_qp->is_srq = (uint8_t)(attr.qp_type == IBV_QPT_XRC ?\r
821                                                                         !!attr.xrc_domain : !!attr.srq);\r
822 \r
823         p_create_qp->log_sq_stride   = (uint8_t)qp->sq.wqe_shift;\r
824         for (p_create_qp->log_sq_bb_count = 0;\r
825              qp->sq.wqe_cnt > 1 << p_create_qp->log_sq_bb_count;\r
826              ++p_create_qp->log_sq_bb_count)\r
827                 ; /* nothing */\r
828         p_create_qp->sq_no_prefetch = 0;\r
829 \r
830         *ph_uvp_qp = (ib_qp_handle_t)&qp->ibv_qp;\r
831         goto end;\r
832 \r
833 err_db:\r
834         cl_spinlock_destroy(&qp->rq.lock);\r
835 err_spinlock_rq:\r
836         cl_spinlock_destroy(&qp->sq.lock);\r
837 err_spinlock_sq:\r
838         cl_free(qp->sq.wrid);\r
839         if (qp->rq.wqe_cnt)\r
840                 free(qp->rq.wrid);\r
841         mlx4_free_buf(&qp->buf);\r
842 err_alloc_qp_buff:\r
843         cl_free(qp);    \r
844 err_alloc_qp:\r
845         cl_free( (void*)(ULONG_PTR)p_umv_buf->p_inout_buf );\r
846 err_memory:\r
847 end:\r
848         return status;\r
849 }\r
850 \r
851 ib_api_status_t\r
852 mlx4_post_create_qp (\r
853         IN              const   ib_pd_handle_t                  h_uvp_pd,\r
854         IN                              ib_api_status_t                         ioctl_status,\r
855         IN      OUT             ib_qp_handle_t                  *ph_uvp_qp,\r
856         IN                              ci_umv_buf_t                            *p_umv_buf )\r
857 {\r
858         struct mlx4_qp                  *qp = (struct mlx4_qp *)*ph_uvp_qp;\r
859         struct ibv_pd                   *pd = (struct ibv_pd *)h_uvp_pd;\r
860         struct ibv_context                      *context = pd->context;\r
861         struct ibv_create_qp_resp       *p_resp;\r
862         ib_api_status_t status = IB_SUCCESS;\r
863                 \r
864         CL_ASSERT(p_umv_buf && p_umv_buf->p_inout_buf);\r
865         \r
866         p_resp = (struct ibv_create_qp_resp*)(ULONG_PTR)p_umv_buf->p_inout_buf;\r
867 \r
868         if (IB_SUCCESS == ioctl_status)\r
869         {\r
870                 // Mlx4 code:\r
871                 \r
872                 struct ibv_qp_cap       cap;\r
873                 \r
874                 cap.max_recv_sge                = p_resp->max_recv_sge;\r
875                 cap.max_send_sge                = p_resp->max_send_sge;\r
876                 cap.max_recv_wr         = p_resp->max_recv_wr;\r
877                 cap.max_send_wr         = p_resp->max_send_wr;\r
878                 cap.max_inline_data     = p_resp->max_inline_data;\r
879                 \r
880                 qp->ibv_qp.handle               = p_resp->qp_handle;\r
881                 qp->ibv_qp.qp_num       = p_resp->qpn;\r
882                 \r
883                 qp->rq.wqe_cnt  = cap.max_recv_wr;\r
884                 qp->rq.max_gs   = cap.max_recv_sge;\r
885 \r
886                 /* adjust rq maxima to not exceed reported device maxima */\r
887                 cap.max_recv_wr = min((uint32_t) to_mctx(context)->max_qp_wr, cap.max_recv_wr);\r
888                 cap.max_recv_sge = min((uint32_t) to_mctx(context)->max_sge, cap.max_recv_sge);\r
889 \r
890                 qp->rq.max_post = cap.max_recv_wr;\r
891                 //qp->rq.max_gs = cap.max_recv_sge;  - RIB : add this ?\r
892                 mlx4_set_sq_sizes(qp, &cap, qp->ibv_qp.qp_type);\r
893 \r
894                 qp->doorbell_qpn    = cl_hton32(qp->ibv_qp.qp_num << 8);\r
895 \r
896                 if (mlx4_store_qp(to_mctx(context), qp->ibv_qp.qp_num, qp))\r
897                 {\r
898                         mlx4_post_destroy_qp(*ph_uvp_qp, IB_SUCCESS);\r
899                         status = IB_INSUFFICIENT_MEMORY;\r
900                 }\r
901                 MLX4_PRINT( TRACE_LEVEL_INFORMATION, MLX4_DBG_QP, \r
902                         ("qpn %#x, buf %p, db_rec %p, sq %d:%d, rq %d:%d\n", \r
903                         qp->ibv_qp.qp_num, qp->buf.buf, qp->db,\r
904                         qp->sq.head, qp->sq.tail, qp->rq.head, qp->rq.tail )); \r
905         }\r
906         else\r
907         {\r
908                 mlx4_post_destroy_qp(*ph_uvp_qp, IB_SUCCESS);\r
909         }\r
910 \r
911         cl_free(p_resp);\r
912         return status;\r
913 }\r
914 \r
915 ib_api_status_t\r
916 mlx4_pre_modify_qp (\r
917         IN              const   ib_qp_handle_t                  h_uvp_qp,\r
918         IN              const   ib_qp_mod_t                             *p_modify_attr,\r
919         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
920 {\r
921         ib_api_status_t status = IB_SUCCESS;\r
922 \r
923         UNREFERENCED_PARAMETER(h_uvp_qp);\r
924         UNREFERENCED_PARAMETER(p_modify_attr);\r
925 \r
926         CL_ASSERT(p_umv_buf);\r
927 \r
928         if( !p_umv_buf->p_inout_buf )\r
929         {\r
930                 p_umv_buf->p_inout_buf = (ULONG_PTR)cl_malloc(sizeof(struct ibv_modify_qp_resp));\r
931                 if( !p_umv_buf->p_inout_buf )\r
932                 {\r
933                         status = IB_INSUFFICIENT_MEMORY;\r
934                         goto err_memory;\r
935                 }\r
936         }\r
937         p_umv_buf->input_size = 0;\r
938         p_umv_buf->output_size = sizeof(struct ibv_modify_qp_resp);\r
939         p_umv_buf->command = TRUE;\r
940         \r
941 err_memory:\r
942         return status;\r
943 }\r
944 \r
945 void\r
946 mlx4_post_query_qp (\r
947         IN                              ib_qp_handle_t                          h_uvp_qp,\r
948         IN                              ib_api_status_t                         ioctl_status,\r
949         IN      OUT                     ib_qp_attr_t                            *p_query_attr,\r
950         IN      OUT                     ci_umv_buf_t                                    *p_umv_buf )\r
951 {\r
952         struct mlx4_qp *qp = (struct mlx4_qp *)h_uvp_qp;\r
953 \r
954         UNREFERENCED_PARAMETER(p_umv_buf);\r
955 \r
956         if(IB_SUCCESS == ioctl_status)\r
957         {\r
958                 p_query_attr->sq_max_inline = qp->max_inline_data;\r
959                 p_query_attr->sq_sge            = qp->sq.max_gs;\r
960                 p_query_attr->sq_depth          = qp->sq.max_post;\r
961                 p_query_attr->rq_sge            = qp->rq.max_gs;\r
962                 p_query_attr->rq_depth          = qp->rq.max_post;\r
963         }\r
964 }\r
965 \r
966 void\r
967 mlx4_post_modify_qp (\r
968         IN              const   ib_qp_handle_t                  h_uvp_qp,\r
969         IN                              ib_api_status_t                 ioctl_status,\r
970         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
971 {\r
972         struct ibv_qp                           *qp = (struct ibv_qp *)h_uvp_qp;\r
973         struct ibv_modify_qp_resp       *p_resp;\r
974 \r
975         CL_ASSERT(p_umv_buf && p_umv_buf->p_inout_buf);\r
976 \r
977         p_resp = (struct ibv_modify_qp_resp*)(ULONG_PTR)p_umv_buf->p_inout_buf;\r
978 \r
979         if (IB_SUCCESS == ioctl_status) \r
980         {\r
981                 // Mlx4 code:\r
982                 \r
983                 if (qp->state == IBV_QPS_RESET &&\r
984                     p_resp->attr_mask & IBV_QP_STATE &&\r
985                     p_resp->qp_state == IBV_QPS_INIT)\r
986                 {\r
987                         mlx4_qp_init_sq_ownership(to_mqp(qp));\r
988                 }\r
989 \r
990                 if (p_resp->attr_mask & IBV_QP_STATE) {\r
991                         qp->state = p_resp->qp_state;\r
992                 }\r
993 \r
994                 if (p_resp->attr_mask & IBV_QP_STATE &&\r
995                     p_resp->qp_state == IBV_QPS_RESET)\r
996                 {\r
997                         mlx4_cq_clean(to_mcq(qp->recv_cq), qp->qp_num,\r
998                                                 qp->srq ? to_msrq(qp->srq) : NULL);\r
999                         if (qp->send_cq != qp->recv_cq)\r
1000                                 mlx4_cq_clean(to_mcq(qp->send_cq), qp->qp_num, NULL);\r
1001 \r
1002                         mlx4_init_qp_indices(to_mqp(qp));\r
1003                         if (!qp->srq && qp->qp_type != IBV_QPT_XRC)\r
1004                                 *to_mqp(qp)->db = 0;\r
1005                 }\r
1006         }\r
1007 \r
1008         cl_free (p_resp);\r
1009         return;\r
1010 }\r
1011 \r
1012 static void\r
1013 __mlx4_lock_cqs(struct ibv_qp *qp)\r
1014 {\r
1015         struct mlx4_cq *send_cq = to_mcq(qp->send_cq);\r
1016         struct mlx4_cq *recv_cq = to_mcq(qp->recv_cq);\r
1017 \r
1018         if (send_cq == recv_cq)\r
1019                 cl_spinlock_acquire(&send_cq->lock);\r
1020         else if (send_cq->cqn < recv_cq->cqn) {\r
1021                 cl_spinlock_acquire(&send_cq->lock);\r
1022                 cl_spinlock_acquire(&recv_cq->lock);\r
1023         } else {\r
1024                 cl_spinlock_acquire(&recv_cq->lock);\r
1025                 cl_spinlock_acquire(&send_cq->lock);\r
1026         }\r
1027 }\r
1028 \r
1029 static void\r
1030 __mlx4_unlock_cqs(struct ibv_qp *qp)\r
1031 {\r
1032         struct mlx4_cq *send_cq = to_mcq(qp->send_cq);\r
1033         struct mlx4_cq *recv_cq = to_mcq(qp->recv_cq);\r
1034 \r
1035         if (send_cq == recv_cq)\r
1036                 cl_spinlock_release(&send_cq->lock);\r
1037         else if (send_cq->cqn < recv_cq->cqn) {\r
1038                 cl_spinlock_release(&recv_cq->lock);\r
1039                 cl_spinlock_release(&send_cq->lock);\r
1040         } else {\r
1041                 cl_spinlock_release(&send_cq->lock);\r
1042                 cl_spinlock_release(&recv_cq->lock);\r
1043         }\r
1044 }\r
1045 \r
1046 ib_api_status_t\r
1047 mlx4_pre_destroy_qp (\r
1048         IN              const   ib_qp_handle_t                  h_uvp_qp )\r
1049 {\r
1050         struct ibv_qp *qp = (struct ibv_qp*)h_uvp_qp;\r
1051 \r
1052         mlx4_cq_clean(to_mcq(qp->recv_cq), qp->qp_num,\r
1053                                 qp->srq ? to_msrq(qp->srq) : NULL);\r
1054         if (qp->send_cq != qp->recv_cq)\r
1055                 mlx4_cq_clean(to_mcq(qp->send_cq), qp->qp_num, NULL);\r
1056 \r
1057         __mlx4_lock_cqs(qp);\r
1058         mlx4_clear_qp(to_mctx(qp->context), qp->qp_num);\r
1059         __mlx4_unlock_cqs(qp);\r
1060 \r
1061         return IB_SUCCESS;\r
1062 }\r
1063 \r
1064 void\r
1065 mlx4_post_destroy_qp (\r
1066         IN              const   ib_qp_handle_t                  h_uvp_qp,\r
1067         IN                              ib_api_status_t                 ioctl_status )\r
1068 {\r
1069         struct ibv_qp* ibqp = (struct ibv_qp *)h_uvp_qp;\r
1070         struct mlx4_qp* qp = to_mqp(ibqp);\r
1071         \r
1072         CL_ASSERT(h_uvp_qp);\r
1073 \r
1074         if (IB_SUCCESS == ioctl_status)\r
1075         {\r
1076                 if (!ibqp->srq && ibqp->qp_type != IBV_QPT_XRC)\r
1077                         mlx4_free_db(to_mctx(ibqp->context), MLX4_DB_TYPE_RQ, qp->db);\r
1078 \r
1079                 cl_spinlock_destroy(&qp->sq.lock);\r
1080                 cl_spinlock_destroy(&qp->rq.lock);\r
1081 \r
1082                 MLX4_PRINT( TRACE_LEVEL_INFORMATION, MLX4_DBG_QP, \r
1083                         ("qpn %#x, buf %p, sq %d:%d, rq %d:%d\n", qp->ibv_qp.qp_num, qp->buf.buf, \r
1084                         qp->sq.head, qp->sq.tail, qp->rq.head, qp->rq.tail )); \r
1085                 cl_free(qp->sq.wrid);\r
1086                 if (qp->rq.wqe_cnt)\r
1087                         cl_free(qp->rq.wrid);\r
1088                 mlx4_free_buf(&qp->buf);\r
1089                 cl_free(qp);\r
1090         }\r
1091         else\r
1092         {\r
1093                 __mlx4_lock_cqs(ibqp);\r
1094                 mlx4_store_qp(to_mctx(ibqp->context), ibqp->qp_num, qp);\r
1095                 __mlx4_unlock_cqs(ibqp);                \r
1096         }\r
1097 }\r
1098 \r
1099 void\r
1100 mlx4_nd_modify_qp (\r
1101         IN              const   ib_qp_handle_t                  h_uvp_qp,\r
1102                 OUT                     void**                                  pp_outbuf,\r
1103                 OUT                     DWORD*                                  p_size )\r
1104 {\r
1105         struct ibv_qp *ibv_qp = (struct ibv_qp *)h_uvp_qp;\r
1106 \r
1107         *(uint32_t**)pp_outbuf = (uint32_t*)&ibv_qp->state;\r
1108         *p_size = sizeof(ibv_qp->state);\r
1109 }\r
1110 \r
1111 static ib_qp_state_t __from_qp_state(enum ibv_qp_state state)\r
1112 {\r
1113         switch (state) {\r
1114                 case IBV_QPS_RESET: return IB_QPS_RESET;\r
1115                 case IBV_QPS_INIT: return IB_QPS_INIT;\r
1116                 case IBV_QPS_RTR: return IB_QPS_RTR;\r
1117                 case IBV_QPS_RTS: return IB_QPS_RTS;\r
1118                 case IBV_QPS_SQD: return IB_QPS_SQD;\r
1119                 case IBV_QPS_SQE: return IB_QPS_SQERR;\r
1120                 case IBV_QPS_ERR: return IB_QPS_ERROR;\r
1121                 default: return IB_QPS_TIME_WAIT;\r
1122         };\r
1123 }\r
1124 \r
1125 uint32_t\r
1126 mlx4_nd_get_qp_state (\r
1127         IN              const   ib_qp_handle_t                  h_uvp_qp )\r
1128 {\r
1129         struct ibv_qp *ibv_qp = (struct ibv_qp *)h_uvp_qp;\r
1130 \r
1131         return __from_qp_state(ibv_qp->state);\r
1132 }\r
1133 \r
1134 static uint8_t\r
1135 __gid_to_index_lookup (\r
1136         IN                      ib_ca_attr_t                                    *p_ca_attr,\r
1137         IN                      uint8_t                                         port_num,\r
1138         IN                      uint8_t                                         *raw_gid )\r
1139 {\r
1140         ib_gid_t *p_gid_table = NULL;\r
1141         uint8_t i, index = 0;\r
1142         uint16_t num_gids;\r
1143 \r
1144         p_gid_table = p_ca_attr->p_port_attr[port_num-1].p_gid_table;\r
1145         CL_ASSERT (p_gid_table);\r
1146 \r
1147         num_gids = p_ca_attr->p_port_attr[port_num-1].num_gids;\r
1148 \r
1149         for (i = 0; i < num_gids; i++)\r
1150         {\r
1151                 if (cl_memcmp (raw_gid, p_gid_table[i].raw, 16))\r
1152                 {\r
1153                         index = i;\r
1154                         break;\r
1155                 }\r
1156         }\r
1157         return index;\r
1158 }\r
1159 \r
1160 static enum ibv_rate __to_rate(uint8_t rate)\r
1161 {\r
1162         if (rate == IB_PATH_RECORD_RATE_2_5_GBS) return IBV_RATE_2_5_GBPS;\r
1163         if (rate == IB_PATH_RECORD_RATE_5_GBS) return IBV_RATE_5_GBPS;\r
1164         if (rate == IB_PATH_RECORD_RATE_10_GBS) return IBV_RATE_10_GBPS;\r
1165         if (rate == IB_PATH_RECORD_RATE_20_GBS) return IBV_RATE_20_GBPS;\r
1166         if (rate == IB_PATH_RECORD_RATE_30_GBS) return IBV_RATE_30_GBPS;\r
1167         if (rate == IB_PATH_RECORD_RATE_40_GBS) return IBV_RATE_40_GBPS;\r
1168         if (rate == IB_PATH_RECORD_RATE_60_GBS) return IBV_RATE_60_GBPS;\r
1169         if (rate == IB_PATH_RECORD_RATE_80_GBS) return IBV_RATE_80_GBPS;\r
1170         if (rate == IB_PATH_RECORD_RATE_120_GBS) return IBV_RATE_120_GBPS;\r
1171         return IBV_RATE_MAX;\r
1172 }\r
1173 \r
1174 inline void \r
1175 __grh_get_ver_class_flow(\r
1176         IN              const   ib_net32_t                                      ver_class_flow,\r
1177                 OUT                     uint8_t* const                          p_ver OPTIONAL,\r
1178                 OUT                     uint8_t* const                          p_tclass OPTIONAL,\r
1179                 OUT                     net32_t* const                          p_flow_lbl OPTIONAL )\r
1180 {\r
1181         ib_net32_t tmp_ver_class_flow;\r
1182 \r
1183         tmp_ver_class_flow = cl_ntoh32( ver_class_flow );\r
1184 \r
1185         if (p_ver)\r
1186                 *p_ver = (uint8_t)(tmp_ver_class_flow >> 28);\r
1187 \r
1188         if (p_tclass)\r
1189                 *p_tclass = (uint8_t)(tmp_ver_class_flow >> 20);\r
1190 \r
1191         if (p_flow_lbl)\r
1192                 *p_flow_lbl = (ver_class_flow & CL_HTON32( 0x000FFFFF ));\r
1193 }\r
1194 \r
1195 static ib_api_status_t\r
1196 __to_ah (\r
1197         IN                              ib_ca_attr_t                            *p_ca_attr,\r
1198         IN              const   ib_av_attr_t                            *p_av_attr,\r
1199                 OUT                     struct ibv_ah_attr                      *p_attr )\r
1200 {\r
1201         if (p_av_attr->port_num == 0 || \r
1202                 p_av_attr->port_num > p_ca_attr->num_ports) {\r
1203                 MLX4_PRINT(TRACE_LEVEL_WARNING ,MLX4_DBG_AV ,\r
1204                         (" invalid port number specified (%d)\n",p_av_attr->port_num));\r
1205                 return IB_INVALID_PORT;\r
1206         }\r
1207 \r
1208         p_attr->port_num = p_av_attr->port_num;\r
1209         p_attr->sl = p_av_attr->sl;\r
1210         p_attr->dlid = cl_ntoh16 (p_av_attr->dlid);\r
1211         p_attr->static_rate = __to_rate(p_av_attr->static_rate);\r
1212         p_attr->src_path_bits = p_av_attr->path_bits;\r
1213                         \r
1214         /* For global destination or Multicast address:*/\r
1215         if (p_av_attr->grh_valid)\r
1216         {\r
1217                 p_attr->is_global               = TRUE;\r
1218                 p_attr->grh.hop_limit   = p_av_attr->grh.hop_limit;\r
1219                 __grh_get_ver_class_flow( p_av_attr->grh.ver_class_flow, NULL,\r
1220                                                                 &p_attr->grh.traffic_class, &p_attr->grh.flow_label );\r
1221                 p_attr->grh.sgid_index  = __gid_to_index_lookup (p_ca_attr, p_av_attr->port_num,\r
1222                                                                                                         (uint8_t *) p_av_attr->grh.src_gid.raw); \r
1223                 cl_memcpy (p_attr->grh.dgid.raw, p_av_attr->grh.dest_gid.raw, 16);\r
1224         }\r
1225         else\r
1226         {\r
1227                 p_attr->is_global = FALSE;\r
1228         }\r
1229         return IB_SUCCESS;\r
1230\r
1231 \r
1232 static void\r
1233 __set_av_params(struct mlx4_ah *ah, struct ibv_pd *pd, struct ibv_ah_attr *attr)\r
1234 {\r
1235         ah->av.port_pd = cl_hton32(to_mpd(pd)->pdn | (attr->port_num << 24));\r
1236         ah->av.g_slid  = attr->src_path_bits;\r
1237         ah->av.dlid    = cl_hton16(attr->dlid);\r
1238         if (attr->static_rate) {\r
1239                 ah->av.stat_rate = (uint8_t)(attr->static_rate + MLX4_STAT_RATE_OFFSET);\r
1240                 /* XXX check rate cap? */\r
1241         }\r
1242         ah->av.sl_tclass_flowlabel = cl_hton32(attr->sl << 28);\r
1243         if (attr->is_global)\r
1244         {\r
1245                 ah->av.g_slid |= 0x80;\r
1246                 ah->av.gid_index = attr->grh.sgid_index;\r
1247                 ah->av.hop_limit = attr->grh.hop_limit;\r
1248                 ah->av.sl_tclass_flowlabel |=\r
1249                         cl_hton32((attr->grh.traffic_class << 20) |\r
1250                                     attr->grh.flow_label);\r
1251                 cl_memcpy(ah->av.dgid, attr->grh.dgid.raw, 16);\r
1252         }\r
1253 }\r
1254 \r
1255 ib_api_status_t\r
1256 mlx4_pre_create_ah (\r
1257         IN              const   ib_pd_handle_t                  h_uvp_pd,\r
1258         IN              const   ib_av_attr_t                            *p_av_attr,\r
1259         IN      OUT                     ci_umv_buf_t                            *p_umv_buf,\r
1260                 OUT                     ib_av_handle_t                  *ph_uvp_av )\r
1261 {\r
1262         struct mlx4_ah *ah;\r
1263         struct ibv_ah_attr attr;\r
1264         struct ibv_pd *pd = (struct ibv_pd *)h_uvp_pd;\r
1265         ib_api_status_t status = IB_SUCCESS;\r
1266         \r
1267         UNREFERENCED_PARAMETER(p_umv_buf);\r
1268 \r
1269         if (pd->context->p_hca_attr == NULL) {\r
1270                 status = IB_ERROR;\r
1271                 goto end;\r
1272         }\r
1273 \r
1274         ah = cl_malloc(sizeof *ah);\r
1275         if (!ah) {\r
1276                 status = IB_INSUFFICIENT_MEMORY;\r
1277                 goto end;\r
1278         }\r
1279 \r
1280         // sanity check\r
1281         if (p_av_attr->port_num == 0 || \r
1282                 p_av_attr->port_num > pd->context->p_hca_attr->num_ports)\r
1283         {\r
1284                 status = IB_INVALID_PORT;\r
1285                 goto end;\r
1286         }\r
1287 \r
1288         // convert parameters \r
1289         cl_memset(&attr, 0, sizeof(attr));\r
1290         status = __to_ah(pd->context->p_hca_attr, p_av_attr, &attr);\r
1291         if (status)\r
1292                 goto end;\r
1293 \r
1294         ah->ibv_ah.pd = pd;\r
1295         ah->ibv_ah.context = pd->context;\r
1296         cl_memcpy(&ah->ibv_ah.av_attr, p_av_attr, sizeof (ib_av_attr_t));\r
1297 \r
1298         cl_memset(&ah->av, 0, sizeof ah->av);\r
1299         __set_av_params(ah, pd, &attr);\r
1300 \r
1301         *ph_uvp_av = (ib_av_handle_t)&ah->ibv_ah;\r
1302         status = IB_VERBS_PROCESSING_DONE;\r
1303 \r
1304 end:\r
1305         return status;\r
1306 }\r
1307 \r
1308 ib_api_status_t\r
1309 mlx4_pre_query_ah (\r
1310         IN              const   ib_av_handle_t                  h_uvp_av,\r
1311         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
1312 {\r
1313         UNREFERENCED_PARAMETER(h_uvp_av);\r
1314         UNREFERENCED_PARAMETER(p_umv_buf);\r
1315         \r
1316         return IB_VERBS_PROCESSING_DONE;\r
1317 }\r
1318 \r
1319 void\r
1320 mlx4_post_query_ah (\r
1321         IN              const   ib_av_handle_t                  h_uvp_av,\r
1322         IN                              ib_api_status_t                 ioctl_status,\r
1323         IN      OUT                     ib_av_attr_t                            *p_addr_vector,\r
1324         IN      OUT                     ib_pd_handle_t                  *ph_pd,\r
1325         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
1326 {\r
1327         struct ibv_ah *ah = (struct ibv_ah *)h_uvp_av;\r
1328 \r
1329         UNREFERENCED_PARAMETER(p_umv_buf);\r
1330 \r
1331         CL_ASSERT(h_uvp_av && p_addr_vector);\r
1332 \r
1333         if (ioctl_status == IB_SUCCESS)\r
1334         {\r
1335                 cl_memcpy(p_addr_vector, &ah->av_attr, sizeof(ib_av_attr_t));\r
1336                 if (ph_pd)\r
1337                         *ph_pd = (ib_pd_handle_t)ah->pd;\r
1338         }\r
1339 }\r
1340 \r
1341 ib_api_status_t\r
1342 mlx4_pre_modify_ah (\r
1343         IN              const   ib_av_handle_t                  h_uvp_av,\r
1344         IN              const   ib_av_attr_t                            *p_addr_vector,\r
1345         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
1346 {\r
1347         struct ibv_ah *ah = (struct ibv_ah *)h_uvp_av;\r
1348         struct ibv_ah_attr attr;\r
1349         ib_api_status_t status;\r
1350 \r
1351         UNREFERENCED_PARAMETER(p_umv_buf);\r
1352         \r
1353         CL_ASSERT (h_uvp_av);\r
1354 \r
1355         status = __to_ah(ah->context->p_hca_attr, p_addr_vector, &attr);\r
1356         if (status)\r
1357                 return status;\r
1358 \r
1359         __set_av_params(to_mah(ah), ah->pd, &attr);\r
1360         cl_memcpy(&ah->av_attr, p_addr_vector, sizeof(ib_av_attr_t));\r
1361         \r
1362         return IB_VERBS_PROCESSING_DONE;\r
1363 }\r
1364 \r
1365 ib_api_status_t\r
1366 mlx4_pre_destroy_ah (\r
1367         IN              const   ib_av_handle_t                  h_uvp_av )\r
1368 {\r
1369         struct ibv_ah *ah = (struct ibv_ah *)h_uvp_av;\r
1370         \r
1371         CL_ASSERT(ah);\r
1372         \r
1373         cl_free(to_mah(ah));\r
1374         \r
1375         return IB_VERBS_PROCESSING_DONE;\r
1376 }\r
1377 \r
1378 #ifdef XRC_SUPPORT\r
1379 ib_api_status_t  \r
1380 mlx4_pre_create_xrc_srq (\r
1381         IN              const   ib_pd_handle_t                  h_uvp_pd,\r
1382         IN              const   ib_xrcd_handle_t                        h_uvp_xrcd,\r
1383         IN              const   ib_srq_attr_t                           *p_srq_attr,\r
1384         IN      OUT                     ci_umv_buf_t                            *p_umv_buf,\r
1385                 OUT                     ib_srq_handle_t                 *ph_uvp_srq )\r
1386 {\r
1387         struct mlx4_srq *srq;\r
1388         struct ibv_create_srq *p_create_srq;\r
1389         struct ibv_pd *pd = (struct ibv_pd *)h_uvp_pd;\r
1390         struct ibv_xrc_domain *xrc_domain = (struct ibv_xrc_domain *)h_uvp_xrcd;\r
1391         ib_api_status_t status = IB_SUCCESS;\r
1392         size_t size = max( sizeof(struct ibv_create_srq), sizeof(struct ibv_create_srq_resp) );\r
1393 \r
1394         CL_ASSERT(p_umv_buf);\r
1395 \r
1396         if( !p_umv_buf->p_inout_buf )\r
1397         {\r
1398                 p_umv_buf->p_inout_buf = cl_malloc( size ); \r
1399                 if( !p_umv_buf->p_inout_buf )\r
1400                 {\r
1401                         status = IB_INSUFFICIENT_MEMORY;\r
1402                         goto err_memory;\r
1403                 }\r
1404         }\r
1405         p_umv_buf->input_size = sizeof(struct ibv_create_srq);\r
1406         p_umv_buf->output_size = sizeof(struct ibv_create_srq_resp);\r
1407         p_umv_buf->command = TRUE;\r
1408 \r
1409         p_create_srq = p_umv_buf->p_inout_buf;\r
1410         \r
1411         // Mlx4 code:\r
1412 \r
1413         /* Sanity check SRQ size before proceeding */\r
1414         if (p_srq_attr->max_wr > 1 << 16 || p_srq_attr->max_sge > 64)\r
1415         {\r
1416                 status = IB_INVALID_PARAMETER;\r
1417                 goto err_params;\r
1418         }\r
1419 \r
1420         srq = cl_malloc(sizeof *srq);\r
1421         if (!srq) {\r
1422                 status = IB_INSUFFICIENT_MEMORY;\r
1423                 goto err_alloc_srq;\r
1424         }\r
1425 \r
1426         if (cl_spinlock_init(&srq->lock)) {\r
1427                 status = IB_INSUFFICIENT_MEMORY;\r
1428                 goto err_lock;\r
1429         }\r
1430 \r
1431         srq->ibv_srq.pd                 = pd;\r
1432         srq->ibv_srq.context    = pd->context;\r
1433         \r
1434         srq->max        = __align_queue_size(p_srq_attr->max_wr + 1);\r
1435         srq->max_gs  = p_srq_attr->max_sge;\r
1436         srq->counter    = 0;\r
1437 \r
1438         if (mlx4_alloc_srq_buf(pd, (struct ibv_srq_attr *)p_srq_attr, srq))\r
1439         {\r
1440                 status = IB_INSUFFICIENT_MEMORY;\r
1441                 goto err_alloc_buf;\r
1442         }\r
1443 \r
1444         srq->db = mlx4_alloc_db(to_mctx(pd->context), MLX4_DB_TYPE_RQ);\r
1445         if (!srq->db)\r
1446                 goto err_alloc_db;\r
1447 \r
1448         *srq->db = 0;\r
1449         \r
1450         // fill the parameters for ioctl\r
1451         p_create_srq->buf_addr = (uintptr_t) srq->buf.buf;\r
1452         p_create_srq->db_addr  = (uintptr_t) srq->db;\r
1453         p_create_srq->pd_handle = pd->handle;\r
1454         p_create_srq->max_wr = p_srq_attr->max_wr;\r
1455         p_create_srq->max_sge = p_srq_attr->max_sge;\r
1456         p_create_srq->srq_limit = p_srq_attr->srq_limit;\r
1457 \r
1458         *ph_uvp_srq = (ib_srq_handle_t)&srq->ibv_srq;\r
1459         goto end;\r
1460 \r
1461 err_alloc_db:\r
1462         cl_free(srq->wrid);\r
1463         mlx4_free_buf(&srq->buf);\r
1464 err_alloc_buf:\r
1465         cl_spinlock_destroy(&srq->lock);\r
1466 err_lock:\r
1467         cl_free(srq);\r
1468 err_alloc_srq:\r
1469         cl_free(p_umv_buf->p_inout_buf);\r
1470 err_params: err_memory:\r
1471 end:\r
1472         return status;\r
1473 }\r
1474 \r
1475 ib_api_status_t  \r
1476 mlx4_post_create_xrc_srq (\r
1477         IN              const   ib_pd_handle_t                  h_uvp_pd,\r
1478         IN                              ib_api_status_t                 ioctl_status,\r
1479         IN      OUT                     ib_srq_handle_t                 *ph_uvp_srq,\r
1480         IN                              ci_umv_buf_t                            *p_umv_buf )\r
1481 {\r
1482         struct mlx4_srq *srq = (struct mlx4_srq *)*ph_uvp_srq;\r
1483         struct ibv_create_srq_resp *p_resp;\r
1484         ib_api_status_t status = IB_SUCCESS;\r
1485 \r
1486         UNREFERENCED_PARAMETER(h_uvp_pd);\r
1487         \r
1488         CL_ASSERT(p_umv_buf && p_umv_buf->p_inout_buf);\r
1489         \r
1490         p_resp = p_umv_buf->p_inout_buf;\r
1491 \r
1492         if (IB_SUCCESS == ioctl_status)\r
1493         {\r
1494                 // Mlx4 code:\r
1495 \r
1496                 srq->ibv_srq.xrc_srq_num        = srq->srqn = p_resp->srqn;\r
1497                 srq->ibv_srq.handle             = p_resp->srq_handle;\r
1498 \r
1499                 srq->max                = p_resp->max_wr;\r
1500                 srq->max_gs     = p_resp->max_sge;\r
1501                 \r
1502                 if (mlx4_store_xrc_srq(to_mctx(pd->context), srq->ibv_srq.xrc_srq_num, srq))\r
1503                 {\r
1504                         mlx4_post_destroy_srq(*ph_uvp_srq, IB_SUCCESS);\r
1505                         status = IB_INSUFFICIENT_MEMORY;\r
1506                 }       \r
1507         }\r
1508         else\r
1509         {\r
1510                 mlx4_post_destroy_srq (*ph_uvp_srq, IB_SUCCESS);\r
1511         }\r
1512 \r
1513         cl_free( p_resp );\r
1514         return status;\r
1515 }\r
1516 \r
1517 ib_api_status_t\r
1518 mlx4_pre_open_xrc_domain (\r
1519         IN              const   ib_ca_handle_t                  h_uvp_ca,\r
1520         IN              const   uint32_t                                        oflag,\r
1521         IN      OUT             ci_umv_buf_t                            *p_umv_buf,\r
1522                 OUT                     ib_xrcd_handle_t                        *ph_uvp_xrcd )\r
1523 {\r
1524         struct mlx4_xrc_domain *xrcd;\r
1525         struct ibv_context * context = (struct ibv_context *)h_uvp_ca;\r
1526         struct ibv_open_xrc_domain      *p_open_xrcd;\r
1527         ib_api_status_t status = IB_SUCCESS;\r
1528         int size = max( sizeof(struct ibv_open_xrc_domain), sizeof(struct ibv_open_xrc_domain_resp) );\r
1529 \r
1530         CL_ASSERT(h_uvp_ca && p_umv_buf);\r
1531 \r
1532         if( !p_umv_buf->p_inout_buf )\r
1533         {\r
1534                 p_umv_buf->p_inout_buf = cl_malloc( size );\r
1535                 if( !p_umv_buf->p_inout_buf )\r
1536                 {\r
1537                         status = IB_INSUFFICIENT_MEMORY;\r
1538                         goto err_umv_buf;\r
1539                 }\r
1540         }\r
1541         p_umv_buf->input_size = sizeof(struct ibv_open_xrc_domain);\r
1542         p_umv_buf->output_size = sizeof(struct ibv_open_xrc_domain_resp);\r
1543         p_umv_buf->command = TRUE;\r
1544 \r
1545         p_open_xrcd = p_umv_buf->p_inout_buf;\r
1546 \r
1547         // Mlx4 code:\r
1548 \r
1549         xrcd = cl_malloc(sizeof *xrcd);\r
1550         if (!xrcd) {\r
1551                 status = IB_INSUFFICIENT_MEMORY;\r
1552                 goto err_xrc;\r
1553         }\r
1554 \r
1555         xrcd->ibv_xrcd.context = context;\r
1556         \r
1557         p_open_xrcd->oflags = oflag;\r
1558 \r
1559         *ph_uvp_xrcd = (struct ibv_xrc_domain *)&xrcd->ibv_xrcd;\r
1560         goto end;\r
1561 \r
1562 err_xrc:\r
1563         cl_free(p_umv_buf->p_inout_buf);\r
1564 err_umv_buf:\r
1565 end:\r
1566         return status;\r
1567 }\r
1568 \r
1569 void\r
1570 mlx4_post_open_xrc_domain (\r
1571         IN              const   ib_ca_handle_t                  h_uvp_ca,\r
1572         IN                              ib_api_status_t                 ioctl_status,\r
1573         IN      OUT                     ib_xrcd_handle_t                        *ph_uvp_xrcd,\r
1574         IN                              ci_umv_buf_t                            *p_umv_buf )\r
1575 {\r
1576         struct ibv_xrc_domain *xrcd = (struct ibv_xrc_domain *)*ph_uvp_xrcd;\r
1577         struct ibv_open_xrc_domain_resp *p_resp;\r
1578 \r
1579         UNREFERENCED_PARAMETER(h_uvp_ca);\r
1580         \r
1581         CL_ASSERT(p_umv_buf && p_umv_buf->p_inout_buf);\r
1582 \r
1583         p_resp = p_umv_buf->p_inout_buf;\r
1584 \r
1585         if (IB_SUCCESS == ioctl_status)\r
1586         {\r
1587                 // Mlx4 code:\r
1588                 \r
1589                 xrcd->handle = p_resp->xrcd_handle;\r
1590                 to_mxrcd(xrcd)->xrcdn = p_resp->xrcdn;\r
1591         }\r
1592         else\r
1593         {\r
1594                 cl_free(to_mxrcd(xrcd));\r
1595         }\r
1596         \r
1597         cl_free(p_resp);\r
1598         return;\r
1599 }\r
1600 \r
1601 void\r
1602 mlx4_post_close_xrc_domain (\r
1603         IN              const   ib_xrcd_handle_t                        h_uvp_xrcd,\r
1604         IN                              ib_api_status_t                 ioctl_status )\r
1605 {\r
1606         struct ibv_xrc_domain *xrdc = (struct ibv_xrc_domain *)h_uvp_xrcd;\r
1607 \r
1608         CL_ASSERT(xrdc);\r
1609 \r
1610         if (IB_SUCCESS == ioctl_status) {\r
1611                 cl_free(to_mxrcd(xrdc));\r
1612         }\r
1613 }\r
1614 #endif\r