be059099be5991a42066050c902580ad36c99410
[mirror/winof/.git] / hw / mlx4 / user / hca / verbs.c
1 /*\r
2  * Copyright (c) 2007 Cisco, Inc.  All rights reserved.\r
3  *\r
4  * This software is available to you under a choice of one of two\r
5  * licenses.  You may choose to be licensed under the terms of the GNU\r
6  * General Public License (GPL) Version 2, available from the file\r
7  * COPYING in the main directory of this source tree, or the\r
8  * OpenIB.org BSD license below:\r
9  *\r
10  *     Redistribution and use in source and binary forms, with or\r
11  *     without modification, are permitted provided that the following\r
12  *     conditions are met:\r
13  *\r
14  *      - Redistributions of source code must retain the above\r
15  *        copyright notice, this list of conditions and the following\r
16  *        disclaimer.\r
17  *\r
18  *      - Redistributions in binary form must reproduce the above\r
19  *        copyright notice, this list of conditions and the following\r
20  *        disclaimer in the documentation and/or other materials\r
21  *        provided with the distribution.\r
22  *\r
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
30  * SOFTWARE.\r
31  */\r
32 \r
33 #include "mlx4.h"\r
34 #include "verbs.h"\r
35 #include "mx_abi.h"\r
36 #include "wqe.h"\r
37 #include "mlx4_debug.h"\r
38 \r
39 #if defined(EVENT_TRACING)\r
40 #include "verbs.tmh"\r
41 #endif\r
42 \r
43 ib_api_status_t\r
44 mlx4_pre_open_ca (\r
45         IN              const   ib_net64_t                              ca_guid,\r
46         IN      OUT                     ci_umv_buf_t                            *p_umv_buf,\r
47                 OUT                     ib_ca_handle_t                  *ph_uvp_ca )\r
48 {\r
49         struct ibv_context      *context;\r
50         ib_api_status_t status = IB_SUCCESS;\r
51 \r
52         UNREFERENCED_PARAMETER(ca_guid);\r
53 \r
54         context = mlx4_alloc_context();\r
55         if (!context) {\r
56                 status = IB_INSUFFICIENT_MEMORY;                \r
57                 goto end;\r
58         }\r
59         \r
60         if( p_umv_buf )\r
61         {\r
62                 if( !p_umv_buf->p_inout_buf )\r
63                 {\r
64                         p_umv_buf->p_inout_buf = cl_zalloc( sizeof(struct ibv_get_context_resp) );\r
65                         if( !p_umv_buf->p_inout_buf )\r
66                         {\r
67                                 status = IB_INSUFFICIENT_MEMORY;\r
68                                 goto end;\r
69                         }\r
70                 }\r
71                 p_umv_buf->input_size = 0;\r
72                 p_umv_buf->output_size = sizeof(struct ibv_get_context_resp);\r
73                 p_umv_buf->command = TRUE;\r
74         }\r
75 \r
76         *ph_uvp_ca = (ib_ca_handle_t)context;\r
77 \r
78 end:    \r
79         return status;\r
80 }\r
81 \r
82 ib_api_status_t\r
83 mlx4_post_open_ca (\r
84         IN              const   ib_net64_t                              ca_guid,\r
85         IN                              ib_api_status_t                 ioctl_status,\r
86         IN      OUT                     ib_ca_handle_t                  *ph_uvp_ca,\r
87         IN                              ci_umv_buf_t                            *p_umv_buf )\r
88 {\r
89         struct ibv_get_context_resp *p_resp;\r
90         struct ibv_context *context = (struct ibv_context *)*ph_uvp_ca;\r
91         ib_api_status_t status = IB_SUCCESS;\r
92 \r
93         UNREFERENCED_PARAMETER(ca_guid);\r
94 \r
95         CL_ASSERT(p_umv_buf && p_umv_buf->p_inout_buf);\r
96         \r
97         p_resp = p_umv_buf->p_inout_buf;\r
98 \r
99         if (IB_SUCCESS == ioctl_status)\r
100         {\r
101                 if (!mlx4_fill_context(context, p_resp))\r
102                 {\r
103                         status = IB_INSUFFICIENT_RESOURCES;\r
104                         goto end;\r
105                 }\r
106         }\r
107 \r
108 end:\r
109         cl_free(p_resp);\r
110         return status;\r
111 }\r
112 \r
113 ib_api_status_t\r
114 mlx4_pre_query_ca (\r
115         IN                              ib_ca_handle_t                  h_uvp_ca,\r
116         IN                              ib_ca_attr_t                            *p_ca_attr,\r
117         IN                              size_t                                  byte_count,\r
118         IN                              ci_umv_buf_t                            *p_umv_buf )\r
119 {\r
120         ib_api_status_t status = IB_SUCCESS;\r
121 \r
122         UNREFERENCED_PARAMETER(h_uvp_ca);\r
123 \r
124         /* Note that query_ca calls *always* get their attributes from the kernel.\r
125          *\r
126          * Assume if user buffer is valid then byte_cnt is valid too \r
127          * so we can preallocate ca attr buffer for post ioctl data saving\r
128          *\r
129          * Note that we squirrel the buffer away into the umv_buf and only\r
130          * set it into the HCA if the query is successful.\r
131          */\r
132         if ( p_ca_attr != NULL )\r
133         {\r
134                 p_umv_buf->p_inout_buf = cl_malloc(byte_count);\r
135                 if ( !p_umv_buf->p_inout_buf )\r
136                 {\r
137                         status = IB_INSUFFICIENT_RESOURCES;\r
138                         goto end;\r
139                 }\r
140         }\r
141 \r
142 end:\r
143         return status;\r
144 }\r
145 \r
146 void\r
147 __fixup_ca_attr(\r
148         IN                              ib_ca_attr_t* const                     p_dest,\r
149         IN              const   ib_ca_attr_t* const                     p_src )\r
150 {\r
151         uint8_t         i;\r
152         uintn_t         offset = (uintn_t)p_dest - (uintn_t)p_src;\r
153         ib_port_attr_t                  *p_tmp_port_attr = NULL;\r
154 \r
155         CL_ASSERT( p_dest );\r
156         CL_ASSERT( p_src );\r
157 \r
158         /* Fix up the pointers to point within the destination buffer. */\r
159         p_dest->p_page_size =\r
160                 (uint32_t*)(((uint8_t*)p_dest->p_page_size) + offset);\r
161 \r
162         p_tmp_port_attr =\r
163                 (ib_port_attr_t*)(((uint8_t*)p_dest->p_port_attr) + offset);\r
164 \r
165         /* Fix up each port attribute's gid and pkey table pointers. */\r
166         for( i = 0; i < p_dest->num_ports; i++ )\r
167         {\r
168                 p_tmp_port_attr[i].p_gid_table = (ib_gid_t*)\r
169                         (((uint8_t*)p_tmp_port_attr[i].p_gid_table) + offset);\r
170 \r
171                 p_tmp_port_attr[i].p_pkey_table =(ib_net16_t*)\r
172                         (((uint8_t*)p_tmp_port_attr[i].p_pkey_table) + offset);\r
173         }\r
174         p_dest->p_port_attr = p_tmp_port_attr;\r
175 }\r
176 \r
177 void\r
178 mlx4_post_query_ca (\r
179         IN                              ib_ca_handle_t                  h_uvp_ca,\r
180         IN                              ib_api_status_t                 ioctl_status,\r
181         IN                              ib_ca_attr_t                            *p_ca_attr,\r
182         IN                              size_t                                  byte_count,\r
183         IN                              ci_umv_buf_t                            *p_umv_buf )\r
184 {\r
185         struct ibv_context *context = (struct ibv_context *)h_uvp_ca;\r
186         \r
187         CL_ASSERT(context && p_umv_buf);\r
188 \r
189         if ( ioctl_status == IB_SUCCESS && p_ca_attr && byte_count)\r
190         {\r
191                 CL_ASSERT( byte_count >= p_ca_attr->size );\r
192 \r
193                 pthread_mutex_lock(&context->mutex);\r
194 \r
195                 if (context->p_hca_attr)\r
196                         cl_free(context->p_hca_attr);\r
197                 context->p_hca_attr = p_umv_buf->p_inout_buf;\r
198                 cl_memcpy( context->p_hca_attr, p_ca_attr, p_ca_attr->size );\r
199                 __fixup_ca_attr( context->p_hca_attr, p_ca_attr );\r
200                 \r
201                 pthread_mutex_unlock(&context->mutex);\r
202         }\r
203         else if (p_umv_buf->p_inout_buf) \r
204         {\r
205                 cl_free(p_umv_buf->p_inout_buf);\r
206         }\r
207 }\r
208 \r
209 ib_api_status_t\r
210 mlx4_post_close_ca (\r
211         IN                      ib_ca_handle_t                          h_uvp_ca,\r
212         IN                      ib_api_status_t                         ioctl_status )\r
213 {\r
214         struct ibv_context *context = (struct ibv_context *)h_uvp_ca;\r
215 \r
216         CL_ASSERT(context);\r
217 \r
218         if (IB_SUCCESS == ioctl_status)\r
219                 mlx4_free_context(context);\r
220 \r
221         return IB_SUCCESS;\r
222 }\r
223 \r
224 ib_api_status_t\r
225 mlx4_pre_alloc_pd (\r
226         IN              const   ib_ca_handle_t                  h_uvp_ca,\r
227         IN      OUT                     ci_umv_buf_t                            *p_umv_buf,\r
228                 OUT                     ib_pd_handle_t                  *ph_uvp_pd )\r
229 {\r
230         struct mlx4_pd *pd;\r
231         struct ibv_context *context = (struct ibv_context *)h_uvp_ca;\r
232         ib_api_status_t status = IB_SUCCESS;\r
233 \r
234         CL_ASSERT(context && p_umv_buf);\r
235 \r
236         if( !p_umv_buf->p_inout_buf )\r
237         {\r
238                 p_umv_buf->p_inout_buf = cl_malloc( sizeof(struct ibv_alloc_pd_resp) );\r
239                 if( !p_umv_buf->p_inout_buf )\r
240                 {\r
241                         status = IB_INSUFFICIENT_MEMORY;\r
242                         goto end;\r
243                 }\r
244         }\r
245         p_umv_buf->input_size = 0;\r
246         p_umv_buf->output_size = sizeof(struct ibv_alloc_pd_resp);\r
247         p_umv_buf->command = TRUE;\r
248 \r
249         // Mlx4 code:\r
250 \r
251         pd = cl_malloc(sizeof *pd);\r
252         if (!pd) {\r
253                 status = IB_INSUFFICIENT_MEMORY;                \r
254                 goto end;\r
255         }\r
256 \r
257         pd->ibv_pd.context = context;\r
258 \r
259         *ph_uvp_pd = (ib_pd_handle_t)&pd->ibv_pd;\r
260         \r
261 end:\r
262         return status;\r
263 }\r
264 \r
265 void\r
266 mlx4_post_alloc_pd (\r
267         IN                              ib_ca_handle_t                  h_uvp_ca,\r
268         IN                              ib_api_status_t                 ioctl_status,\r
269         IN      OUT                     ib_pd_handle_t                  *ph_uvp_pd,\r
270         IN                              ci_umv_buf_t                            *p_umv_buf )\r
271 {\r
272         struct ibv_pd                   *pd = (struct ibv_pd *)*ph_uvp_pd;\r
273         struct ibv_alloc_pd_resp        *p_resp;\r
274 \r
275 \r
276         UNREFERENCED_PARAMETER(h_uvp_ca);\r
277         \r
278         CL_ASSERT(p_umv_buf && p_umv_buf->p_inout_buf);\r
279 \r
280         p_resp = p_umv_buf->p_inout_buf;\r
281 \r
282         if (IB_SUCCESS == ioctl_status)\r
283         {\r
284                 // Mlx4 code:\r
285                 \r
286                 pd->handle = p_resp->pd_handle;\r
287                 to_mpd(pd)->pdn = p_resp->pdn;\r
288         }\r
289         else\r
290         {\r
291                 cl_free(to_mpd(pd));\r
292         }\r
293         \r
294         cl_free(p_resp);\r
295         return;\r
296 }\r
297 \r
298 void\r
299 mlx4_post_free_pd (\r
300         IN              const   ib_pd_handle_t                  h_uvp_pd,\r
301         IN                              ib_api_status_t                 ioctl_status )\r
302 {\r
303         struct ibv_pd *pd = (struct ibv_pd *)h_uvp_pd;\r
304 \r
305         CL_ASSERT(pd);\r
306 \r
307         if (IB_SUCCESS == ioctl_status)\r
308                 cl_free(to_mpd(pd));\r
309 }\r
310 \r
311 static int __align_queue_size(int req)\r
312 {\r
313         int nent;\r
314 \r
315         for (nent = 1; nent < req; nent <<= 1)\r
316                 ; /* nothing */\r
317 \r
318         return nent;\r
319 }\r
320 \r
321 ib_api_status_t\r
322 mlx4_pre_create_cq (\r
323         IN              const   ib_ca_handle_t                  h_uvp_ca,\r
324         IN      OUT             uint32_t* const                 p_size,\r
325         IN      OUT             ci_umv_buf_t                            *p_umv_buf,\r
326                 OUT                     ib_cq_handle_t                  *ph_uvp_cq )\r
327 {\r
328         struct mlx4_cq          *cq;\r
329         struct ibv_create_cq    *p_create_cq;\r
330         struct ibv_context              *context = (struct ibv_context *)h_uvp_ca;\r
331         ib_api_status_t         status = IB_SUCCESS;\r
332         int size = max( sizeof(struct ibv_create_cq), sizeof(struct ibv_create_cq_resp) );\r
333 \r
334         CL_ASSERT(h_uvp_ca && p_umv_buf);\r
335 \r
336         if( !p_umv_buf->p_inout_buf )\r
337         {\r
338                 p_umv_buf->p_inout_buf = cl_malloc( size );\r
339                 if( !p_umv_buf->p_inout_buf )\r
340                 {\r
341                         status = IB_INSUFFICIENT_MEMORY;\r
342                         goto err_umv_buf;\r
343                 }\r
344         }\r
345         p_umv_buf->input_size = sizeof(struct ibv_create_cq);\r
346         p_umv_buf->output_size = sizeof(struct ibv_create_cq_resp);\r
347         p_umv_buf->command = TRUE;\r
348 \r
349         p_create_cq = p_umv_buf->p_inout_buf;\r
350 \r
351         // Mlx4 code:\r
352         \r
353         /* Sanity check CQ size before proceeding */\r
354         if (*p_size > 0x3fffff) {\r
355                 status = IB_INVALID_CQ_SIZE;\r
356                 goto err_cqe_size;\r
357         }\r
358 \r
359         cq = cl_malloc(sizeof *cq);\r
360         if (!cq) {\r
361                 status = IB_INSUFFICIENT_MEMORY;\r
362                 goto err_cq;\r
363         }\r
364 \r
365         if (cl_spinlock_init(&cq->lock)) {\r
366                 status = IB_INSUFFICIENT_MEMORY;\r
367                 goto err_lock;\r
368         }\r
369 \r
370         *p_size = __align_queue_size(*p_size + 1);\r
371 \r
372         if (mlx4_alloc_buf(&cq->buf, *p_size * MLX4_CQ_ENTRY_SIZE, \r
373                                                 context->page_size))\r
374                 goto err_alloc_buf;\r
375 \r
376         cq->ibv_cq.context = context;\r
377         cq->cons_index = 0;\r
378                 \r
379         cq->set_ci_db  = mlx4_alloc_db(to_mctx(context), MLX4_DB_TYPE_CQ);\r
380         if (!cq->set_ci_db)\r
381                 goto err_alloc_db;\r
382 \r
383         cq->arm_db = cq->set_ci_db + 1;\r
384         *cq->arm_db = 0;\r
385         cq->arm_sn = 1;\r
386         *cq->set_ci_db = 0;\r
387 \r
388         p_create_cq->buf_addr = (uintptr_t) cq->buf.buf;\r
389         p_create_cq->db_addr  = (uintptr_t) cq->set_ci_db;\r
390         p_create_cq->arm_sn_addr  = (uintptr_t) &cq->arm_sn;\r
391         p_create_cq->cqe = --(*p_size);\r
392 \r
393         *ph_uvp_cq = (ib_cq_handle_t)&cq->ibv_cq;\r
394         goto end;\r
395 \r
396 err_alloc_db:\r
397         mlx4_free_buf(&cq->buf);\r
398 err_alloc_buf:\r
399         cl_spinlock_destroy(&cq->lock);\r
400 err_lock:\r
401         cl_free(cq);\r
402 err_cq:\r
403 err_cqe_size:\r
404         cl_free(p_umv_buf->p_inout_buf);\r
405 err_umv_buf:\r
406 end:\r
407         return status;\r
408 }\r
409 \r
410 void\r
411 mlx4_post_create_cq (\r
412         IN              const   ib_ca_handle_t                  h_uvp_ca,\r
413         IN                              ib_api_status_t                 ioctl_status,\r
414         IN              const   uint32_t                                        size,\r
415         IN      OUT                     ib_cq_handle_t                  *ph_uvp_cq,\r
416         IN                              ci_umv_buf_t                            *p_umv_buf )\r
417 {\r
418         struct ibv_cq                           *cq = (struct ibv_cq *)*ph_uvp_cq;\r
419         struct ibv_create_cq_resp       *p_resp;\r
420 \r
421         UNREFERENCED_PARAMETER(h_uvp_ca);\r
422         UNREFERENCED_PARAMETER(size);\r
423         \r
424         CL_ASSERT(p_umv_buf && p_umv_buf->p_inout_buf);\r
425 \r
426         p_resp = p_umv_buf->p_inout_buf;\r
427 \r
428         if (IB_SUCCESS == ioctl_status)\r
429         {\r
430                 // Mlx4 code:\r
431                 \r
432                 to_mcq(cq)->cqn = p_resp->cqn;\r
433                 cq->cqe                 = p_resp->cqe;\r
434                 cq->handle              = p_resp->cq_handle;\r
435         }\r
436         else\r
437         {\r
438                 mlx4_post_destroy_cq (*ph_uvp_cq, IB_SUCCESS);\r
439         }\r
440         \r
441         cl_free(p_resp);\r
442         return;\r
443 }\r
444 \r
445 ib_api_status_t\r
446 mlx4_pre_query_cq (\r
447         IN              const   ib_cq_handle_t                  h_uvp_cq,\r
448                 OUT                     uint32_t* const                 p_size,\r
449         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
450 {\r
451         struct ibv_cq *cq = (struct ibv_cq *)h_uvp_cq;\r
452 \r
453         UNREFERENCED_PARAMETER(p_umv_buf);\r
454         \r
455         *p_size = cq->cqe;\r
456 \r
457         return IB_VERBS_PROCESSING_DONE;\r
458 }\r
459 \r
460 void\r
461 mlx4_post_destroy_cq (\r
462         IN              const   ib_cq_handle_t                  h_uvp_cq,\r
463         IN                              ib_api_status_t                 ioctl_status )\r
464 {\r
465         struct ibv_cq *cq = (struct ibv_cq *)h_uvp_cq;\r
466 \r
467         CL_ASSERT(cq);\r
468 \r
469         if (IB_SUCCESS == ioctl_status) {\r
470                 mlx4_free_db(to_mctx(cq->context), MLX4_DB_TYPE_CQ, to_mcq(cq)->set_ci_db);\r
471                 mlx4_free_buf(&to_mcq(cq)->buf);\r
472 \r
473                 cl_spinlock_destroy(&to_mcq(cq)->lock);\r
474                 cl_free(to_mcq(cq));\r
475         }\r
476 }\r
477 \r
478 ib_api_status_t  \r
479 mlx4_pre_create_srq (\r
480         IN              const   ib_pd_handle_t                  h_uvp_pd,\r
481         IN              const   ib_srq_attr_t                           *p_srq_attr,\r
482         IN      OUT                     ci_umv_buf_t                            *p_umv_buf,\r
483                 OUT                     ib_srq_handle_t                 *ph_uvp_srq )\r
484 {\r
485         struct mlx4_srq *srq;\r
486         struct ibv_create_srq *p_create_srq;\r
487         struct ibv_pd *pd = (struct ibv_pd *)h_uvp_pd;\r
488         ib_api_status_t status = IB_SUCCESS;\r
489         size_t size = max( sizeof(struct ibv_create_srq), sizeof(struct ibv_create_srq_resp) );\r
490 \r
491         CL_ASSERT(p_umv_buf);\r
492 \r
493         if( !p_umv_buf->p_inout_buf )\r
494         {\r
495                 p_umv_buf->p_inout_buf = cl_malloc( size ); \r
496                 if( !p_umv_buf->p_inout_buf )\r
497                 {\r
498                         status = IB_INSUFFICIENT_MEMORY;\r
499                         goto err_memory;\r
500                 }\r
501         }\r
502         p_umv_buf->input_size = sizeof(struct ibv_create_srq);\r
503         p_umv_buf->output_size = sizeof(struct ibv_create_srq_resp);\r
504         p_umv_buf->command = TRUE;\r
505 \r
506         p_create_srq = p_umv_buf->p_inout_buf;\r
507         \r
508         // Mlx4 code:\r
509 \r
510         /* Sanity check SRQ size before proceeding */\r
511         if (p_srq_attr->max_wr > 1 << 16 || p_srq_attr->max_sge > 64)\r
512         {\r
513                 status = IB_INVALID_PARAMETER;\r
514                 goto err_params;\r
515         }\r
516 \r
517         srq = cl_malloc(sizeof *srq);\r
518         if (!srq) {\r
519                 status = IB_INSUFFICIENT_MEMORY;\r
520                 goto err_alloc_srq;\r
521         }\r
522 \r
523         if (cl_spinlock_init(&srq->lock)) {\r
524                 status = IB_INSUFFICIENT_MEMORY;\r
525                 goto err_lock;\r
526         }\r
527 \r
528         srq->ibv_srq.pd                 = pd;\r
529         srq->ibv_srq.context    = pd->context;\r
530         \r
531         srq->max        = __align_queue_size(p_srq_attr->max_wr + 1);\r
532         srq->max_gs  = p_srq_attr->max_sge;\r
533         srq->counter    = 0;\r
534 \r
535         if (mlx4_alloc_srq_buf(pd, (struct ibv_srq_attr *)p_srq_attr, srq))\r
536         {\r
537                 status = IB_INSUFFICIENT_MEMORY;\r
538                 goto err_alloc_buf;\r
539         }\r
540 \r
541         srq->db = mlx4_alloc_db(to_mctx(pd->context), MLX4_DB_TYPE_RQ);\r
542         if (!srq->db)\r
543                 goto err_alloc_db;\r
544 \r
545         *srq->db = 0;\r
546         \r
547         // fill the parameters for ioctl\r
548         p_create_srq->buf_addr = (uintptr_t) srq->buf.buf;\r
549         p_create_srq->db_addr  = (uintptr_t) srq->db;\r
550         p_create_srq->pd_handle = pd->handle;\r
551         p_create_srq->max_wr = p_srq_attr->max_wr;\r
552         p_create_srq->max_sge = p_srq_attr->max_sge;\r
553         p_create_srq->srq_limit = p_srq_attr->srq_limit;\r
554 \r
555         *ph_uvp_srq = (ib_srq_handle_t)&srq->ibv_srq;\r
556         goto end;\r
557 \r
558 err_alloc_db:\r
559         cl_free(srq->wrid);\r
560         mlx4_free_buf(&srq->buf);\r
561 err_alloc_buf:\r
562         cl_spinlock_destroy(&srq->lock);\r
563 err_lock:\r
564         cl_free(srq);\r
565 err_alloc_srq:\r
566         cl_free(p_umv_buf->p_inout_buf);\r
567 err_params: err_memory:\r
568 end:\r
569         return status;\r
570 }\r
571 \r
572 void\r
573 mlx4_post_create_srq (\r
574         IN              const   ib_pd_handle_t                  h_uvp_pd,\r
575         IN                              ib_api_status_t                 ioctl_status,\r
576         IN      OUT                     ib_srq_handle_t                 *ph_uvp_srq,\r
577         IN                              ci_umv_buf_t                            *p_umv_buf )\r
578 {\r
579         struct ibv_srq *ibsrq = (struct ibv_srq *)*ph_uvp_srq;\r
580         struct mlx4_srq *srq = to_msrq(ibsrq);\r
581         struct ibv_create_srq_resp *p_resp;\r
582 \r
583         UNREFERENCED_PARAMETER(h_uvp_pd);\r
584         \r
585         CL_ASSERT(p_umv_buf && p_umv_buf->p_inout_buf);\r
586         \r
587         p_resp = p_umv_buf->p_inout_buf;\r
588 \r
589         if (IB_SUCCESS == ioctl_status)\r
590         {\r
591                 // Mlx4 code:\r
592 \r
593                 srq->srqn       = p_resp->srqn;\r
594                 ibsrq->handle   = p_resp->srq_handle;\r
595                 \r
596                 srq->max                = p_resp->max_wr;\r
597                 srq->max_gs     = p_resp->max_sge;\r
598         }\r
599         else\r
600         {\r
601                 mlx4_post_destroy_srq (*ph_uvp_srq, IB_SUCCESS);\r
602         }\r
603 \r
604         cl_free(p_resp);\r
605         return;\r
606 }\r
607 \r
608 ib_api_status_t\r
609 mlx4_pre_destroy_srq (\r
610         IN              const   ib_srq_handle_t                 h_uvp_srq )\r
611 {\r
612 #ifdef XRC_SUPPORT\r
613         struct ibv_srq *ibsrq = (struct ibv_srq *)h_uvp_srq;\r
614         struct mlx4_srq *srq = to_msrq(ibsrq);\r
615         struct mlx4_cq *mcq = NULL;\r
616         \r
617         if (ibsrq->xrc_cq)\r
618         {\r
619                 /* is an xrc_srq */\r
620                 mcq = to_mcq(ibsrq->xrc_cq);\r
621                 mlx4_cq_clean(mcq, 0, srq);\r
622                 cl_spinlock_acquire(&mcq->lock);\r
623                 mlx4_clear_xrc_srq(to_mctx(ibsrq->context), srq->srqn);\r
624                 cl_spinlock_release(&mcq->lock);\r
625         }\r
626 #else\r
627         UNUSED_PARAM(h_uvp_srq);\r
628 #endif  \r
629         return IB_SUCCESS;\r
630 }\r
631 \r
632 void\r
633 mlx4_post_destroy_srq (\r
634         IN              const   ib_srq_handle_t                 h_uvp_srq,\r
635         IN                              ib_api_status_t                 ioctl_status )\r
636 {\r
637         struct ibv_srq          *ibsrq = (struct ibv_srq *)h_uvp_srq;\r
638         struct mlx4_srq *srq = to_msrq(ibsrq);\r
639         \r
640         CL_ASSERT(srq);\r
641 \r
642         if (IB_SUCCESS == ioctl_status)\r
643         {\r
644                 mlx4_free_db(to_mctx(ibsrq->context), MLX4_DB_TYPE_RQ, srq->db);\r
645                 cl_free(srq->wrid);\r
646                 mlx4_free_buf(&srq->buf);\r
647                 cl_spinlock_destroy(&srq->lock);\r
648                 cl_free(srq);\r
649         }\r
650         else\r
651         {\r
652 #ifdef XRC_SUPPORT\r
653                 if (ibsrq->xrc_cq) {\r
654                         /* is an xrc_srq */\r
655                         struct mlx4_cq  *mcq = to_mcq(ibsrq->xrc_cq);\r
656                         cl_spinlock_acquire(&mcq->lock);\r
657                         mlx4_store_xrc_srq(to_mctx(ibsrq->context), srq->srqn, srq);\r
658                         cl_spinlock_release(&mcq->lock);\r
659                 }\r
660 #endif          \r
661         }\r
662 }\r
663 \r
664 static enum ibv_qp_type\r
665 __to_qp_type(ib_qp_type_t type)\r
666 {\r
667         switch (type) {\r
668         case IB_QPT_RELIABLE_CONN: return IBV_QPT_RC;\r
669         case IB_QPT_UNRELIABLE_CONN: return IBV_QPT_UC;\r
670         case IB_QPT_UNRELIABLE_DGRM: return IBV_QPT_UD;\r
671 #ifdef XRC_SUPPORT\r
672         //case IB_QPT_XRC_CONN: return IBV_QPT_XRC;\r
673 #endif  \r
674         default: return IBV_QPT_RC;\r
675         }\r
676 }\r
677 \r
678 ib_api_status_t\r
679 mlx4_pre_create_qp (\r
680         IN              const   ib_pd_handle_t                  h_uvp_pd,\r
681         IN              const   ib_qp_create_t                  *p_create_attr,\r
682         IN      OUT                     ci_umv_buf_t                            *p_umv_buf,\r
683                 OUT                     ib_qp_handle_t                  *ph_uvp_qp )\r
684 {\r
685         struct ibv_pd                   *pd = (struct ibv_pd *)h_uvp_pd;\r
686         struct mlx4_context     *context = to_mctx(pd->context);\r
687         struct mlx4_qp          *qp;\r
688         struct ibv_create_qp    *p_create_qp;\r
689         struct ibv_qp_init_attr attr;\r
690         ib_api_status_t                 status = IB_SUCCESS;\r
691         int size = max( sizeof(struct ibv_create_qp), sizeof(struct ibv_create_qp_resp) );\r
692 \r
693         CL_ASSERT(p_umv_buf);\r
694 \r
695         if( !p_umv_buf->p_inout_buf )\r
696         {\r
697                 p_umv_buf->p_inout_buf = cl_malloc(size);\r
698                 if( !p_umv_buf->p_inout_buf )\r
699                 {\r
700                         status = IB_INSUFFICIENT_MEMORY;\r
701                         goto err_memory;\r
702                 }\r
703         }\r
704         p_umv_buf->input_size = sizeof(struct ibv_create_qp);\r
705         p_umv_buf->output_size = sizeof(struct ibv_create_qp_resp);\r
706         p_umv_buf->command = TRUE;\r
707 \r
708         p_create_qp = p_umv_buf->p_inout_buf;\r
709         \r
710         /* convert attributes */\r
711         memset( &attr, 0, sizeof(attr) );\r
712         attr.send_cq                            = (struct ibv_cq *)p_create_attr->h_sq_cq;\r
713         attr.recv_cq                            = (struct ibv_cq *)p_create_attr->h_rq_cq;\r
714         attr.srq                                        = (struct ibv_srq*)p_create_attr->h_srq;\r
715         attr.cap.max_send_wr            = p_create_attr->sq_depth;\r
716         attr.cap.max_recv_wr            = p_create_attr->rq_depth;\r
717         attr.cap.max_send_sge           = p_create_attr->sq_sge;\r
718         attr.cap.max_recv_sge           = p_create_attr->rq_sge;\r
719         attr.cap.max_inline_data        = p_create_attr->sq_max_inline;\r
720         attr.qp_type                            = __to_qp_type(p_create_attr->qp_type);\r
721         attr.sq_sig_all                         = p_create_attr->sq_signaled;\r
722 \r
723         // Mlx4 code:\r
724         \r
725         /* Sanity check QP size before proceeding */\r
726         if (attr.cap.max_send_wr    > (uint32_t) context->max_qp_wr ||\r
727             attr.cap.max_recv_wr     > (uint32_t) context->max_qp_wr ||\r
728             attr.cap.max_send_sge   > (uint32_t) context->max_sge   ||\r
729             attr.cap.max_recv_sge   > (uint32_t) context->max_sge   ||\r
730             attr.cap.max_inline_data > 1024)\r
731         {\r
732                 status = IB_INVALID_PARAMETER;\r
733                 goto end;\r
734         }\r
735 \r
736         qp = cl_malloc(sizeof *qp);\r
737         if (!qp) {\r
738                 status = IB_INSUFFICIENT_MEMORY;\r
739                 goto err_alloc_qp;\r
740         }\r
741 \r
742         mlx4_calc_sq_wqe_size(&attr.cap, attr.qp_type, qp);\r
743 \r
744         /*\r
745          * We need to leave 2 KB + 1 WQE of headroom in the SQ to\r
746          * allow HW to prefetch.\r
747          */\r
748         qp->sq_spare_wqes = (2048 >> qp->sq.wqe_shift) + 1;\r
749         qp->sq.wqe_cnt = __align_queue_size(attr.cap.max_send_wr + qp->sq_spare_wqes);\r
750         qp->rq.wqe_cnt = __align_queue_size(attr.cap.max_recv_wr);\r
751 \r
752         if (attr.srq || attr.qp_type == IBV_QPT_XRC)\r
753                 attr.cap.max_recv_wr = qp->rq.wqe_cnt = 0;\r
754         else \r
755         {\r
756                 if (attr.cap.max_recv_sge < 1)\r
757                         attr.cap.max_recv_sge = 1;\r
758                 if (attr.cap.max_recv_wr < 1)\r
759                         attr.cap.max_recv_wr = 1;\r
760         }\r
761 \r
762         if (mlx4_alloc_qp_buf(pd, &attr.cap, attr.qp_type, qp))\r
763                 goto err_alloc_qp_buff;\r
764 \r
765         mlx4_init_qp_indices(qp);\r
766 \r
767         if (cl_spinlock_init(&qp->sq.lock)) {\r
768                 status = IB_INSUFFICIENT_MEMORY;\r
769                 goto err_spinlock_sq;\r
770         }\r
771         if (cl_spinlock_init(&qp->rq.lock)) {\r
772                 status = IB_INSUFFICIENT_MEMORY;\r
773                 goto err_spinlock_rq;\r
774         }\r
775 \r
776         // fill qp fields\r
777         if (!attr.srq && attr.qp_type != IBV_QPT_XRC) {\r
778                 qp->db = mlx4_alloc_db(context, MLX4_DB_TYPE_RQ);\r
779                 if (!qp->db) {\r
780                         status = IB_INSUFFICIENT_MEMORY;\r
781                         goto err_db;\r
782                 }\r
783 \r
784                 *qp->db = 0;\r
785         }\r
786         if (attr.sq_sig_all)\r
787                 qp->sq_signal_bits = cl_hton32(MLX4_WQE_CTRL_CQ_UPDATE);\r
788         else\r
789                 qp->sq_signal_bits = 0;\r
790 \r
791         // fill the rest of qp fields\r
792         qp->ibv_qp.pd = pd;\r
793         qp->ibv_qp.context= pd->context;\r
794         qp->ibv_qp.send_cq = attr.send_cq;\r
795         qp->ibv_qp.recv_cq = attr.recv_cq;\r
796         qp->ibv_qp.srq = attr.srq;\r
797         qp->ibv_qp.state = IBV_QPS_RESET;\r
798         qp->ibv_qp.qp_type = attr.qp_type;\r
799 \r
800         // fill request fields\r
801         p_create_qp->buf_addr = (uintptr_t) qp->buf.buf;\r
802         if (!attr.srq && attr.qp_type != IBV_QPT_XRC)\r
803                 p_create_qp->db_addr = (uintptr_t) qp->db;\r
804         else\r
805                 p_create_qp->db_addr = 0;\r
806 \r
807         p_create_qp->pd_handle = pd->handle;\r
808         p_create_qp->send_cq_handle = attr.send_cq->handle;\r
809         p_create_qp->recv_cq_handle = attr.recv_cq->handle;\r
810         p_create_qp->srq_handle = attr.qp_type == IBV_QPT_XRC ?\r
811                 (attr.xrc_domain ? attr.xrc_domain->handle : 0) :\r
812                 (attr.srq ? attr.srq->handle : 0);\r
813 \r
814         p_create_qp->max_send_wr = attr.cap.max_send_wr;\r
815         p_create_qp->max_recv_wr = attr.cap.max_recv_wr;\r
816         p_create_qp->max_send_sge = attr.cap.max_send_sge;\r
817         p_create_qp->max_recv_sge = attr.cap.max_recv_sge;\r
818         p_create_qp->max_inline_data = attr.cap.max_inline_data;\r
819         p_create_qp->sq_sig_all = (uint8_t)attr.sq_sig_all;\r
820         p_create_qp->qp_type = attr.qp_type;\r
821         p_create_qp->is_srq = (uint8_t)(attr.qp_type == IBV_QPT_XRC ?\r
822                                                                         !!attr.xrc_domain : !!attr.srq);\r
823 \r
824         p_create_qp->log_sq_stride   = (uint8_t)qp->sq.wqe_shift;\r
825         for (p_create_qp->log_sq_bb_count = 0;\r
826              qp->sq.wqe_cnt > 1 << p_create_qp->log_sq_bb_count;\r
827              ++p_create_qp->log_sq_bb_count)\r
828                 ; /* nothing */\r
829         p_create_qp->sq_no_prefetch = 0;\r
830 \r
831         *ph_uvp_qp = (ib_qp_handle_t)&qp->ibv_qp;\r
832         goto end;\r
833 \r
834 err_db:\r
835         cl_spinlock_destroy(&qp->rq.lock);\r
836 err_spinlock_rq:\r
837         cl_spinlock_destroy(&qp->sq.lock);\r
838 err_spinlock_sq:\r
839         cl_free(qp->sq.wrid);\r
840         if (qp->rq.wqe_cnt)\r
841                 free(qp->rq.wrid);\r
842         mlx4_free_buf(&qp->buf);\r
843 err_alloc_qp_buff:\r
844         cl_free(qp);    \r
845 err_alloc_qp:\r
846         cl_free(p_umv_buf->p_inout_buf);\r
847 err_memory:\r
848 end:\r
849         return status;\r
850 }\r
851 \r
852 ib_api_status_t\r
853 mlx4_post_create_qp (\r
854         IN              const   ib_pd_handle_t                  h_uvp_pd,\r
855         IN                              ib_api_status_t                         ioctl_status,\r
856         IN      OUT             ib_qp_handle_t                  *ph_uvp_qp,\r
857         IN                              ci_umv_buf_t                            *p_umv_buf )\r
858 {\r
859         struct mlx4_qp                  *qp = (struct mlx4_qp *)*ph_uvp_qp;\r
860         struct ibv_pd                   *pd = (struct ibv_pd *)h_uvp_pd;\r
861         struct ibv_context                      *context = pd->context;\r
862         struct ibv_create_qp_resp       *p_resp;\r
863         ib_api_status_t status = IB_SUCCESS;\r
864                 \r
865         CL_ASSERT(p_umv_buf && p_umv_buf->p_inout_buf);\r
866         \r
867         p_resp = p_umv_buf->p_inout_buf;\r
868 \r
869         if (IB_SUCCESS == ioctl_status)\r
870         {\r
871                 // Mlx4 code:\r
872                 \r
873                 struct ibv_qp_cap       cap;\r
874                 \r
875                 cap.max_recv_sge                = p_resp->max_recv_sge;\r
876                 cap.max_send_sge                = p_resp->max_send_sge;\r
877                 cap.max_recv_wr         = p_resp->max_recv_wr;\r
878                 cap.max_send_wr         = p_resp->max_send_wr;\r
879                 cap.max_inline_data     = p_resp->max_inline_data;\r
880                 \r
881                 qp->ibv_qp.handle               = p_resp->qp_handle;\r
882                 qp->ibv_qp.qp_num       = p_resp->qpn;\r
883                 \r
884                 qp->rq.wqe_cnt  = cap.max_recv_wr;\r
885                 qp->rq.max_gs   = cap.max_recv_sge;\r
886 \r
887                 /* adjust rq maxima to not exceed reported device maxima */\r
888                 cap.max_recv_wr = min((uint32_t) to_mctx(context)->max_qp_wr, cap.max_recv_wr);\r
889                 cap.max_recv_sge = min((uint32_t) to_mctx(context)->max_sge, cap.max_recv_sge);\r
890 \r
891                 qp->rq.max_post = cap.max_recv_wr;\r
892                 //qp->rq.max_gs = cap.max_recv_sge;  - RIB : add this ?\r
893                 mlx4_set_sq_sizes(qp, &cap, qp->ibv_qp.qp_type);\r
894 \r
895                 qp->doorbell_qpn    = cl_hton32(qp->ibv_qp.qp_num << 8);\r
896 \r
897                 if (mlx4_store_qp(to_mctx(context), qp->ibv_qp.qp_num, qp))\r
898                 {\r
899                         mlx4_post_destroy_qp(*ph_uvp_qp, IB_SUCCESS);\r
900                         status = IB_INSUFFICIENT_MEMORY;\r
901                 }\r
902                 MLX4_PRINT( TRACE_LEVEL_INFORMATION, MLX4_DBG_QP, \r
903                         ("qpn %#x, buf %p, db_rec %p, sq %d:%d, rq %d:%d\n", \r
904                         qp->ibv_qp.qp_num, qp->buf.buf, qp->db,\r
905                         qp->sq.head, qp->sq.tail, qp->rq.head, qp->rq.tail )); \r
906         }\r
907         else\r
908         {\r
909                 mlx4_post_destroy_qp(*ph_uvp_qp, IB_SUCCESS);\r
910         }\r
911 \r
912         cl_free(p_resp);\r
913         return status;\r
914 }\r
915 \r
916 ib_api_status_t\r
917 mlx4_pre_modify_qp (\r
918         IN              const   ib_qp_handle_t                  h_uvp_qp,\r
919         IN              const   ib_qp_mod_t                             *p_modify_attr,\r
920         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
921 {\r
922         ib_api_status_t status = IB_SUCCESS;\r
923 \r
924         UNREFERENCED_PARAMETER(h_uvp_qp);\r
925         UNREFERENCED_PARAMETER(p_modify_attr);\r
926 \r
927         CL_ASSERT(p_umv_buf);\r
928 \r
929         if( !p_umv_buf->p_inout_buf )\r
930         {\r
931                 p_umv_buf->p_inout_buf = cl_malloc(sizeof(struct ibv_modify_qp_resp));\r
932                 if( !p_umv_buf->p_inout_buf )\r
933                 {\r
934                         status = IB_INSUFFICIENT_MEMORY;\r
935                         goto err_memory;\r
936                 }\r
937         }\r
938         p_umv_buf->input_size = 0;\r
939         p_umv_buf->output_size = sizeof(struct ibv_modify_qp_resp);\r
940         p_umv_buf->command = TRUE;\r
941         \r
942 err_memory:\r
943         return status;\r
944 }\r
945 \r
946 void\r
947 mlx4_post_query_qp (\r
948         IN                              ib_qp_handle_t                          h_uvp_qp,\r
949         IN                              ib_api_status_t                         ioctl_status,\r
950         IN      OUT                     ib_qp_attr_t                            *p_query_attr,\r
951         IN      OUT                     ci_umv_buf_t                                    *p_umv_buf )\r
952 {\r
953         struct mlx4_qp *qp = (struct mlx4_qp *)h_uvp_qp;\r
954 \r
955         UNREFERENCED_PARAMETER(p_umv_buf);\r
956 \r
957         if(IB_SUCCESS == ioctl_status)\r
958         {\r
959                 p_query_attr->sq_max_inline = qp->max_inline_data;\r
960                 p_query_attr->sq_sge            = qp->sq.max_gs;\r
961                 p_query_attr->sq_depth          = qp->sq.max_post;\r
962                 p_query_attr->rq_sge            = qp->rq.max_gs;\r
963                 p_query_attr->rq_depth          = qp->rq.max_post;\r
964         }\r
965 }\r
966 \r
967 void\r
968 mlx4_post_modify_qp (\r
969         IN              const   ib_qp_handle_t                  h_uvp_qp,\r
970         IN                              ib_api_status_t                 ioctl_status,\r
971         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
972 {\r
973         struct ibv_qp                           *qp = (struct ibv_qp *)h_uvp_qp;\r
974         struct ibv_modify_qp_resp       *p_resp;\r
975 \r
976         CL_ASSERT(p_umv_buf && p_umv_buf->p_inout_buf);\r
977 \r
978         p_resp = p_umv_buf->p_inout_buf;\r
979 \r
980         if (IB_SUCCESS == ioctl_status) \r
981         {\r
982                 // Mlx4 code:\r
983                 \r
984                 if (qp->state == IBV_QPS_RESET &&\r
985                     p_resp->attr_mask & IBV_QP_STATE &&\r
986                     p_resp->qp_state == IBV_QPS_INIT)\r
987                 {\r
988                         mlx4_qp_init_sq_ownership(to_mqp(qp));\r
989                 }\r
990 \r
991                 if (p_resp->attr_mask & IBV_QP_STATE) {\r
992                         qp->state = p_resp->qp_state;\r
993                 }\r
994 \r
995                 if (p_resp->attr_mask & IBV_QP_STATE &&\r
996                     p_resp->qp_state == IBV_QPS_RESET)\r
997                 {\r
998                         mlx4_cq_clean(to_mcq(qp->recv_cq), qp->qp_num,\r
999                                                 qp->srq ? to_msrq(qp->srq) : NULL);\r
1000                         if (qp->send_cq != qp->recv_cq)\r
1001                                 mlx4_cq_clean(to_mcq(qp->send_cq), qp->qp_num, NULL);\r
1002 \r
1003                         mlx4_init_qp_indices(to_mqp(qp));\r
1004                         if (!qp->srq && qp->qp_type != IBV_QPT_XRC)\r
1005                                 *to_mqp(qp)->db = 0;\r
1006                 }\r
1007         }\r
1008 \r
1009         cl_free (p_resp);\r
1010         return;\r
1011 }\r
1012 \r
1013 static void\r
1014 __mlx4_lock_cqs(struct ibv_qp *qp)\r
1015 {\r
1016         struct mlx4_cq *send_cq = to_mcq(qp->send_cq);\r
1017         struct mlx4_cq *recv_cq = to_mcq(qp->recv_cq);\r
1018 \r
1019         if (send_cq == recv_cq)\r
1020                 cl_spinlock_acquire(&send_cq->lock);\r
1021         else if (send_cq->cqn < recv_cq->cqn) {\r
1022                 cl_spinlock_acquire(&send_cq->lock);\r
1023                 cl_spinlock_acquire(&recv_cq->lock);\r
1024         } else {\r
1025                 cl_spinlock_acquire(&recv_cq->lock);\r
1026                 cl_spinlock_acquire(&send_cq->lock);\r
1027         }\r
1028 }\r
1029 \r
1030 static void\r
1031 __mlx4_unlock_cqs(struct ibv_qp *qp)\r
1032 {\r
1033         struct mlx4_cq *send_cq = to_mcq(qp->send_cq);\r
1034         struct mlx4_cq *recv_cq = to_mcq(qp->recv_cq);\r
1035 \r
1036         if (send_cq == recv_cq)\r
1037                 cl_spinlock_release(&send_cq->lock);\r
1038         else if (send_cq->cqn < recv_cq->cqn) {\r
1039                 cl_spinlock_release(&recv_cq->lock);\r
1040                 cl_spinlock_release(&send_cq->lock);\r
1041         } else {\r
1042                 cl_spinlock_release(&send_cq->lock);\r
1043                 cl_spinlock_release(&recv_cq->lock);\r
1044         }\r
1045 }\r
1046 \r
1047 ib_api_status_t\r
1048 mlx4_pre_destroy_qp (\r
1049         IN              const   ib_qp_handle_t                  h_uvp_qp )\r
1050 {\r
1051         struct ibv_qp *qp = (struct ibv_qp*)h_uvp_qp;\r
1052 \r
1053         mlx4_cq_clean(to_mcq(qp->recv_cq), qp->qp_num,\r
1054                                 qp->srq ? to_msrq(qp->srq) : NULL);\r
1055         if (qp->send_cq != qp->recv_cq)\r
1056                 mlx4_cq_clean(to_mcq(qp->send_cq), qp->qp_num, NULL);\r
1057 \r
1058         __mlx4_lock_cqs(qp);\r
1059         mlx4_clear_qp(to_mctx(qp->context), qp->qp_num);\r
1060         __mlx4_unlock_cqs(qp);\r
1061 \r
1062         return IB_SUCCESS;\r
1063 }\r
1064 \r
1065 void\r
1066 mlx4_post_destroy_qp (\r
1067         IN              const   ib_qp_handle_t                  h_uvp_qp,\r
1068         IN                              ib_api_status_t                 ioctl_status )\r
1069 {\r
1070         struct ibv_qp* ibqp = (struct ibv_qp *)h_uvp_qp;\r
1071         struct mlx4_qp* qp = to_mqp(ibqp);\r
1072         \r
1073         CL_ASSERT(h_uvp_qp);\r
1074 \r
1075         if (IB_SUCCESS == ioctl_status)\r
1076         {\r
1077                 if (!ibqp->srq && ibqp->qp_type != IBV_QPT_XRC)\r
1078                         mlx4_free_db(to_mctx(ibqp->context), MLX4_DB_TYPE_RQ, qp->db);\r
1079 \r
1080                 cl_spinlock_destroy(&qp->sq.lock);\r
1081                 cl_spinlock_destroy(&qp->rq.lock);\r
1082 \r
1083                 MLX4_PRINT( TRACE_LEVEL_INFORMATION, MLX4_DBG_QP, \r
1084                         ("qpn %#x, buf %p, sq %d:%d, rq %d:%d\n", qp->ibv_qp.qp_num, qp->buf.buf, \r
1085                         qp->sq.head, qp->sq.tail, qp->rq.head, qp->rq.tail )); \r
1086                 cl_free(qp->sq.wrid);\r
1087                 if (qp->rq.wqe_cnt)\r
1088                         cl_free(qp->rq.wrid);\r
1089                 mlx4_free_buf(&qp->buf);\r
1090                 cl_free(qp);\r
1091         }\r
1092         else\r
1093         {\r
1094                 __mlx4_lock_cqs(ibqp);\r
1095                 mlx4_store_qp(to_mctx(ibqp->context), ibqp->qp_num, qp);\r
1096                 __mlx4_unlock_cqs(ibqp);                \r
1097         }\r
1098 }\r
1099 \r
1100 void\r
1101 mlx4_nd_modify_qp (\r
1102         IN              const   ib_qp_handle_t                  h_uvp_qp,\r
1103                 OUT                     void**                                  pp_outbuf,\r
1104                 OUT                     DWORD*                                  p_size )\r
1105 {\r
1106         struct ibv_qp *ibv_qp = (struct ibv_qp *)h_uvp_qp;\r
1107 \r
1108         *(uint32_t**)pp_outbuf = (uint32_t*)&ibv_qp->state;\r
1109         *p_size = sizeof(ibv_qp->state);\r
1110 }\r
1111 \r
1112 static ib_qp_state_t __from_qp_state(enum ibv_qp_state state)\r
1113 {\r
1114         switch (state) {\r
1115                 case IBV_QPS_RESET: return IB_QPS_RESET;\r
1116                 case IBV_QPS_INIT: return IB_QPS_INIT;\r
1117                 case IBV_QPS_RTR: return IB_QPS_RTR;\r
1118                 case IBV_QPS_RTS: return IB_QPS_RTS;\r
1119                 case IBV_QPS_SQD: return IB_QPS_SQD;\r
1120                 case IBV_QPS_SQE: return IB_QPS_SQERR;\r
1121                 case IBV_QPS_ERR: return IB_QPS_ERROR;\r
1122                 default: return IB_QPS_TIME_WAIT;\r
1123         };\r
1124 }\r
1125 \r
1126 uint32_t\r
1127 mlx4_nd_get_qp_state (\r
1128         IN              const   ib_qp_handle_t                  h_uvp_qp )\r
1129 {\r
1130         struct ibv_qp *ibv_qp = (struct ibv_qp *)h_uvp_qp;\r
1131 \r
1132         return __from_qp_state(ibv_qp->state);\r
1133 }\r
1134 \r
1135 static uint8_t\r
1136 __gid_to_index_lookup (\r
1137         IN                      ib_ca_attr_t                                    *p_ca_attr,\r
1138         IN                      uint8_t                                         port_num,\r
1139         IN                      uint8_t                                         *raw_gid )\r
1140 {\r
1141         ib_gid_t *p_gid_table = NULL;\r
1142         uint8_t i, index = 0;\r
1143         uint16_t num_gids;\r
1144 \r
1145         p_gid_table = p_ca_attr->p_port_attr[port_num-1].p_gid_table;\r
1146         CL_ASSERT (p_gid_table);\r
1147 \r
1148         num_gids = p_ca_attr->p_port_attr[port_num-1].num_gids;\r
1149 \r
1150         for (i = 0; i < num_gids; i++)\r
1151         {\r
1152                 if (cl_memcmp (raw_gid, p_gid_table[i].raw, 16))\r
1153                 {\r
1154                         index = i;\r
1155                         break;\r
1156                 }\r
1157         }\r
1158         return index;\r
1159 }\r
1160 \r
1161 static enum ibv_rate __to_rate(uint8_t rate)\r
1162 {\r
1163         if (rate == IB_PATH_RECORD_RATE_2_5_GBS) return IBV_RATE_2_5_GBPS;\r
1164         if (rate == IB_PATH_RECORD_RATE_5_GBS) return IBV_RATE_5_GBPS;\r
1165         if (rate == IB_PATH_RECORD_RATE_10_GBS) return IBV_RATE_10_GBPS;\r
1166         if (rate == IB_PATH_RECORD_RATE_20_GBS) return IBV_RATE_20_GBPS;\r
1167         if (rate == IB_PATH_RECORD_RATE_30_GBS) return IBV_RATE_30_GBPS;\r
1168         if (rate == IB_PATH_RECORD_RATE_40_GBS) return IBV_RATE_40_GBPS;\r
1169         if (rate == IB_PATH_RECORD_RATE_60_GBS) return IBV_RATE_60_GBPS;\r
1170         if (rate == IB_PATH_RECORD_RATE_80_GBS) return IBV_RATE_80_GBPS;\r
1171         if (rate == IB_PATH_RECORD_RATE_120_GBS) return IBV_RATE_120_GBPS;\r
1172         return IBV_RATE_MAX;\r
1173 }\r
1174 \r
1175 inline void \r
1176 __grh_get_ver_class_flow(\r
1177         IN              const   ib_net32_t                                      ver_class_flow,\r
1178                 OUT                     uint8_t* const                          p_ver OPTIONAL,\r
1179                 OUT                     uint8_t* const                          p_tclass OPTIONAL,\r
1180                 OUT                     net32_t* const                          p_flow_lbl OPTIONAL )\r
1181 {\r
1182         ib_net32_t tmp_ver_class_flow;\r
1183 \r
1184         tmp_ver_class_flow = cl_ntoh32( ver_class_flow );\r
1185 \r
1186         if (p_ver)\r
1187                 *p_ver = (uint8_t)(tmp_ver_class_flow >> 28);\r
1188 \r
1189         if (p_tclass)\r
1190                 *p_tclass = (uint8_t)(tmp_ver_class_flow >> 20);\r
1191 \r
1192         if (p_flow_lbl)\r
1193                 *p_flow_lbl = (ver_class_flow & CL_HTON32( 0x000FFFFF ));\r
1194 }\r
1195 \r
1196 static ib_api_status_t\r
1197 __to_ah (\r
1198         IN                              ib_ca_attr_t                            *p_ca_attr,\r
1199         IN              const   ib_av_attr_t                            *p_av_attr,\r
1200                 OUT                     struct ibv_ah_attr                      *p_attr )\r
1201 {\r
1202         if (p_av_attr->port_num == 0 || \r
1203                 p_av_attr->port_num > p_ca_attr->num_ports) {\r
1204                 MLX4_PRINT(TRACE_LEVEL_WARNING ,MLX4_DBG_AV ,\r
1205                         (" invalid port number specified (%d)\n",p_av_attr->port_num));\r
1206                 return IB_INVALID_PORT;\r
1207         }\r
1208 \r
1209         p_attr->port_num = p_av_attr->port_num;\r
1210         p_attr->sl = p_av_attr->sl;\r
1211         p_attr->dlid = cl_ntoh16 (p_av_attr->dlid);\r
1212         p_attr->static_rate = __to_rate(p_av_attr->static_rate);\r
1213         p_attr->src_path_bits = p_av_attr->path_bits;\r
1214                         \r
1215         /* For global destination or Multicast address:*/\r
1216         if (p_av_attr->grh_valid)\r
1217         {\r
1218                 p_attr->is_global               = TRUE;\r
1219                 p_attr->grh.hop_limit   = p_av_attr->grh.hop_limit;\r
1220                 __grh_get_ver_class_flow( p_av_attr->grh.ver_class_flow, NULL,\r
1221                                                                 &p_attr->grh.traffic_class, &p_attr->grh.flow_label );\r
1222                 p_attr->grh.sgid_index  = __gid_to_index_lookup (p_ca_attr, p_av_attr->port_num,\r
1223                                                                                                         (uint8_t *) p_av_attr->grh.src_gid.raw); \r
1224                 cl_memcpy (p_attr->grh.dgid.raw, p_av_attr->grh.dest_gid.raw, 16);\r
1225         }\r
1226         else\r
1227         {\r
1228                 p_attr->is_global = FALSE;\r
1229         }\r
1230         return IB_SUCCESS;\r
1231\r
1232 \r
1233 static void\r
1234 __set_av_params(struct mlx4_ah *ah, struct ibv_pd *pd, struct ibv_ah_attr *attr)\r
1235 {\r
1236         ah->av.port_pd = cl_hton32(to_mpd(pd)->pdn | (attr->port_num << 24));\r
1237         ah->av.g_slid  = attr->src_path_bits;\r
1238         ah->av.dlid    = cl_hton16(attr->dlid);\r
1239         if (attr->static_rate) {\r
1240                 ah->av.stat_rate = (uint8_t)(attr->static_rate + MLX4_STAT_RATE_OFFSET);\r
1241                 /* XXX check rate cap? */\r
1242         }\r
1243         ah->av.sl_tclass_flowlabel = cl_hton32(attr->sl << 28);\r
1244         if (attr->is_global)\r
1245         {\r
1246                 ah->av.g_slid |= 0x80;\r
1247                 ah->av.gid_index = attr->grh.sgid_index;\r
1248                 ah->av.hop_limit = attr->grh.hop_limit;\r
1249                 ah->av.sl_tclass_flowlabel |=\r
1250                         cl_hton32((attr->grh.traffic_class << 20) |\r
1251                                     attr->grh.flow_label);\r
1252                 cl_memcpy(ah->av.dgid, attr->grh.dgid.raw, 16);\r
1253         }\r
1254 }\r
1255 \r
1256 ib_api_status_t\r
1257 mlx4_pre_create_ah (\r
1258         IN              const   ib_pd_handle_t                  h_uvp_pd,\r
1259         IN              const   ib_av_attr_t                            *p_av_attr,\r
1260         IN      OUT                     ci_umv_buf_t                            *p_umv_buf,\r
1261                 OUT                     ib_av_handle_t                  *ph_uvp_av )\r
1262 {\r
1263         struct mlx4_ah *ah;\r
1264         struct ibv_ah_attr attr;\r
1265         struct ibv_pd *pd = (struct ibv_pd *)h_uvp_pd;\r
1266         ib_api_status_t status = IB_SUCCESS;\r
1267         \r
1268         UNREFERENCED_PARAMETER(p_umv_buf);\r
1269 \r
1270         if (pd->context->p_hca_attr == NULL) {\r
1271                 status = IB_ERROR;\r
1272                 goto end;\r
1273         }\r
1274 \r
1275         ah = cl_malloc(sizeof *ah);\r
1276         if (!ah) {\r
1277                 status = IB_INSUFFICIENT_MEMORY;\r
1278                 goto end;\r
1279         }\r
1280 \r
1281         // sanity check\r
1282         if (p_av_attr->port_num == 0 || \r
1283                 p_av_attr->port_num > pd->context->p_hca_attr->num_ports)\r
1284         {\r
1285                 status = IB_INVALID_PORT;\r
1286                 goto end;\r
1287         }\r
1288 \r
1289         // convert parameters \r
1290         cl_memset(&attr, 0, sizeof(attr));\r
1291         status = __to_ah(pd->context->p_hca_attr, p_av_attr, &attr);\r
1292         if (status)\r
1293                 goto end;\r
1294 \r
1295         ah->ibv_ah.pd = pd;\r
1296         ah->ibv_ah.context = pd->context;\r
1297         cl_memcpy(&ah->ibv_ah.av_attr, p_av_attr, sizeof (ib_av_attr_t));\r
1298 \r
1299         cl_memset(&ah->av, 0, sizeof ah->av);\r
1300         __set_av_params(ah, pd, &attr);\r
1301 \r
1302         *ph_uvp_av = (ib_av_handle_t)&ah->ibv_ah;\r
1303         status = IB_VERBS_PROCESSING_DONE;\r
1304 \r
1305 end:\r
1306         return status;\r
1307 }\r
1308 \r
1309 ib_api_status_t\r
1310 mlx4_pre_query_ah (\r
1311         IN              const   ib_av_handle_t                  h_uvp_av,\r
1312         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
1313 {\r
1314         UNREFERENCED_PARAMETER(h_uvp_av);\r
1315         UNREFERENCED_PARAMETER(p_umv_buf);\r
1316         \r
1317         return IB_VERBS_PROCESSING_DONE;\r
1318 }\r
1319 \r
1320 void\r
1321 mlx4_post_query_ah (\r
1322         IN              const   ib_av_handle_t                  h_uvp_av,\r
1323         IN                              ib_api_status_t                 ioctl_status,\r
1324         IN      OUT                     ib_av_attr_t                            *p_addr_vector,\r
1325         IN      OUT                     ib_pd_handle_t                  *ph_pd,\r
1326         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
1327 {\r
1328         struct ibv_ah *ah = (struct ibv_ah *)h_uvp_av;\r
1329 \r
1330         UNREFERENCED_PARAMETER(p_umv_buf);\r
1331 \r
1332         CL_ASSERT(h_uvp_av && p_addr_vector);\r
1333 \r
1334         if (ioctl_status == IB_SUCCESS)\r
1335         {\r
1336                 cl_memcpy(p_addr_vector, &ah->av_attr, sizeof(ib_av_attr_t));\r
1337                 if (ph_pd)\r
1338                         *ph_pd = (ib_pd_handle_t)ah->pd;\r
1339         }\r
1340 }\r
1341 \r
1342 ib_api_status_t\r
1343 mlx4_pre_modify_ah (\r
1344         IN              const   ib_av_handle_t                  h_uvp_av,\r
1345         IN              const   ib_av_attr_t                            *p_addr_vector,\r
1346         IN      OUT                     ci_umv_buf_t                            *p_umv_buf )\r
1347 {\r
1348         struct ibv_ah *ah = (struct ibv_ah *)h_uvp_av;\r
1349         struct ibv_ah_attr attr;\r
1350         ib_api_status_t status;\r
1351 \r
1352         UNREFERENCED_PARAMETER(p_umv_buf);\r
1353         \r
1354         CL_ASSERT (h_uvp_av);\r
1355 \r
1356         status = __to_ah(ah->context->p_hca_attr, p_addr_vector, &attr);\r
1357         if (status)\r
1358                 return status;\r
1359 \r
1360         __set_av_params(to_mah(ah), ah->pd, &attr);\r
1361         cl_memcpy(&ah->av_attr, p_addr_vector, sizeof(ib_av_attr_t));\r
1362         \r
1363         return IB_VERBS_PROCESSING_DONE;\r
1364 }\r
1365 \r
1366 ib_api_status_t\r
1367 mlx4_pre_destroy_ah (\r
1368         IN              const   ib_av_handle_t                  h_uvp_av )\r
1369 {\r
1370         struct ibv_ah *ah = (struct ibv_ah *)h_uvp_av;\r
1371         \r
1372         CL_ASSERT(ah);\r
1373         \r
1374         cl_free(to_mah(ah));\r
1375         \r
1376         return IB_VERBS_PROCESSING_DONE;\r
1377 }\r
1378 \r
1379 #ifdef XRC_SUPPORT\r
1380 ib_api_status_t  \r
1381 mlx4_pre_create_xrc_srq (\r
1382         IN              const   ib_pd_handle_t                  h_uvp_pd,\r
1383         IN              const   ib_xrcd_handle_t                        h_uvp_xrcd,\r
1384         IN              const   ib_srq_attr_t                           *p_srq_attr,\r
1385         IN      OUT                     ci_umv_buf_t                            *p_umv_buf,\r
1386                 OUT                     ib_srq_handle_t                 *ph_uvp_srq )\r
1387 {\r
1388         struct mlx4_srq *srq;\r
1389         struct ibv_create_srq *p_create_srq;\r
1390         struct ibv_pd *pd = (struct ibv_pd *)h_uvp_pd;\r
1391         struct ibv_xrc_domain *xrc_domain = (struct ibv_xrc_domain *)h_uvp_xrcd;\r
1392         ib_api_status_t status = IB_SUCCESS;\r
1393         size_t size = max( sizeof(struct ibv_create_srq), sizeof(struct ibv_create_srq_resp) );\r
1394 \r
1395         CL_ASSERT(p_umv_buf);\r
1396 \r
1397         if( !p_umv_buf->p_inout_buf )\r
1398         {\r
1399                 p_umv_buf->p_inout_buf = cl_malloc( size ); \r
1400                 if( !p_umv_buf->p_inout_buf )\r
1401                 {\r
1402                         status = IB_INSUFFICIENT_MEMORY;\r
1403                         goto err_memory;\r
1404                 }\r
1405         }\r
1406         p_umv_buf->input_size = sizeof(struct ibv_create_srq);\r
1407         p_umv_buf->output_size = sizeof(struct ibv_create_srq_resp);\r
1408         p_umv_buf->command = TRUE;\r
1409 \r
1410         p_create_srq = p_umv_buf->p_inout_buf;\r
1411         \r
1412         // Mlx4 code:\r
1413 \r
1414         /* Sanity check SRQ size before proceeding */\r
1415         if (p_srq_attr->max_wr > 1 << 16 || p_srq_attr->max_sge > 64)\r
1416         {\r
1417                 status = IB_INVALID_PARAMETER;\r
1418                 goto err_params;\r
1419         }\r
1420 \r
1421         srq = cl_malloc(sizeof *srq);\r
1422         if (!srq) {\r
1423                 status = IB_INSUFFICIENT_MEMORY;\r
1424                 goto err_alloc_srq;\r
1425         }\r
1426 \r
1427         if (cl_spinlock_init(&srq->lock)) {\r
1428                 status = IB_INSUFFICIENT_MEMORY;\r
1429                 goto err_lock;\r
1430         }\r
1431 \r
1432         srq->ibv_srq.pd                 = pd;\r
1433         srq->ibv_srq.context    = pd->context;\r
1434         \r
1435         srq->max        = __align_queue_size(p_srq_attr->max_wr + 1);\r
1436         srq->max_gs  = p_srq_attr->max_sge;\r
1437         srq->counter    = 0;\r
1438 \r
1439         if (mlx4_alloc_srq_buf(pd, (struct ibv_srq_attr *)p_srq_attr, srq))\r
1440         {\r
1441                 status = IB_INSUFFICIENT_MEMORY;\r
1442                 goto err_alloc_buf;\r
1443         }\r
1444 \r
1445         srq->db = mlx4_alloc_db(to_mctx(pd->context), MLX4_DB_TYPE_RQ);\r
1446         if (!srq->db)\r
1447                 goto err_alloc_db;\r
1448 \r
1449         *srq->db = 0;\r
1450         \r
1451         // fill the parameters for ioctl\r
1452         p_create_srq->buf_addr = (uintptr_t) srq->buf.buf;\r
1453         p_create_srq->db_addr  = (uintptr_t) srq->db;\r
1454         p_create_srq->pd_handle = pd->handle;\r
1455         p_create_srq->max_wr = p_srq_attr->max_wr;\r
1456         p_create_srq->max_sge = p_srq_attr->max_sge;\r
1457         p_create_srq->srq_limit = p_srq_attr->srq_limit;\r
1458 \r
1459         *ph_uvp_srq = (ib_srq_handle_t)&srq->ibv_srq;\r
1460         goto end;\r
1461 \r
1462 err_alloc_db:\r
1463         cl_free(srq->wrid);\r
1464         mlx4_free_buf(&srq->buf);\r
1465 err_alloc_buf:\r
1466         cl_spinlock_destroy(&srq->lock);\r
1467 err_lock:\r
1468         cl_free(srq);\r
1469 err_alloc_srq:\r
1470         cl_free(p_umv_buf->p_inout_buf);\r
1471 err_params: err_memory:\r
1472 end:\r
1473         return status;\r
1474 }\r
1475 \r
1476 ib_api_status_t  \r
1477 mlx4_post_create_xrc_srq (\r
1478         IN              const   ib_pd_handle_t                  h_uvp_pd,\r
1479         IN                              ib_api_status_t                 ioctl_status,\r
1480         IN      OUT                     ib_srq_handle_t                 *ph_uvp_srq,\r
1481         IN                              ci_umv_buf_t                            *p_umv_buf )\r
1482 {\r
1483         struct mlx4_srq *srq = (struct mlx4_srq *)*ph_uvp_srq;\r
1484         struct ibv_create_srq_resp *p_resp;\r
1485         ib_api_status_t status = IB_SUCCESS;\r
1486 \r
1487         UNREFERENCED_PARAMETER(h_uvp_pd);\r
1488         \r
1489         CL_ASSERT(p_umv_buf && p_umv_buf->p_inout_buf);\r
1490         \r
1491         p_resp = p_umv_buf->p_inout_buf;\r
1492 \r
1493         if (IB_SUCCESS == ioctl_status)\r
1494         {\r
1495                 // Mlx4 code:\r
1496 \r
1497                 srq->ibv_srq.xrc_srq_num        = srq->srqn = p_resp->srqn;\r
1498                 srq->ibv_srq.handle             = p_resp->srq_handle;\r
1499 \r
1500                 srq->max                = p_resp->max_wr;\r
1501                 srq->max_gs     = p_resp->max_sge;\r
1502                 \r
1503                 if (mlx4_store_xrc_srq(to_mctx(pd->context), srq->ibv_srq.xrc_srq_num, srq))\r
1504                 {\r
1505                         mlx4_post_destroy_srq(*ph_uvp_srq, IB_SUCCESS);\r
1506                         status = IB_INSUFFICIENT_MEMORY;\r
1507                 }       \r
1508         }\r
1509         else\r
1510         {\r
1511                 mlx4_post_destroy_srq (*ph_uvp_srq, IB_SUCCESS);\r
1512         }\r
1513 \r
1514         cl_free( p_resp );\r
1515         return status;\r
1516 }\r
1517 \r
1518 ib_api_status_t\r
1519 mlx4_pre_open_xrc_domain (\r
1520         IN              const   ib_ca_handle_t                  h_uvp_ca,\r
1521         IN              const   uint32_t                                        oflag,\r
1522         IN      OUT             ci_umv_buf_t                            *p_umv_buf,\r
1523                 OUT                     ib_xrcd_handle_t                        *ph_uvp_xrcd )\r
1524 {\r
1525         struct mlx4_xrc_domain *xrcd;\r
1526         struct ibv_context * context = (struct ibv_context *)h_uvp_ca;\r
1527         struct ibv_open_xrc_domain      *p_open_xrcd;\r
1528         ib_api_status_t status = IB_SUCCESS;\r
1529         int size = max( sizeof(struct ibv_open_xrc_domain), sizeof(struct ibv_open_xrc_domain_resp) );\r
1530 \r
1531         CL_ASSERT(h_uvp_ca && p_umv_buf);\r
1532 \r
1533         if( !p_umv_buf->p_inout_buf )\r
1534         {\r
1535                 p_umv_buf->p_inout_buf = cl_malloc( size );\r
1536                 if( !p_umv_buf->p_inout_buf )\r
1537                 {\r
1538                         status = IB_INSUFFICIENT_MEMORY;\r
1539                         goto err_umv_buf;\r
1540                 }\r
1541         }\r
1542         p_umv_buf->input_size = sizeof(struct ibv_open_xrc_domain);\r
1543         p_umv_buf->output_size = sizeof(struct ibv_open_xrc_domain_resp);\r
1544         p_umv_buf->command = TRUE;\r
1545 \r
1546         p_open_xrcd = p_umv_buf->p_inout_buf;\r
1547 \r
1548         // Mlx4 code:\r
1549 \r
1550         xrcd = cl_malloc(sizeof *xrcd);\r
1551         if (!xrcd) {\r
1552                 status = IB_INSUFFICIENT_MEMORY;\r
1553                 goto err_xrc;\r
1554         }\r
1555 \r
1556         xrcd->ibv_xrcd.context = context;\r
1557         \r
1558         p_open_xrcd->oflags = oflag;\r
1559 \r
1560         *ph_uvp_xrcd = (struct ibv_xrc_domain *)&xrcd->ibv_xrcd;\r
1561         goto end;\r
1562 \r
1563 err_xrc:\r
1564         cl_free(p_umv_buf->p_inout_buf);\r
1565 err_umv_buf:\r
1566 end:\r
1567         return status;\r
1568 }\r
1569 \r
1570 void\r
1571 mlx4_post_open_xrc_domain (\r
1572         IN              const   ib_ca_handle_t                  h_uvp_ca,\r
1573         IN                              ib_api_status_t                 ioctl_status,\r
1574         IN      OUT                     ib_xrcd_handle_t                        *ph_uvp_xrcd,\r
1575         IN                              ci_umv_buf_t                            *p_umv_buf )\r
1576 {\r
1577         struct ibv_xrc_domain *xrcd = (struct ibv_xrc_domain *)*ph_uvp_xrcd;\r
1578         struct ibv_open_xrc_domain_resp *p_resp;\r
1579 \r
1580         UNREFERENCED_PARAMETER(h_uvp_ca);\r
1581         \r
1582         CL_ASSERT(p_umv_buf && p_umv_buf->p_inout_buf);\r
1583 \r
1584         p_resp = p_umv_buf->p_inout_buf;\r
1585 \r
1586         if (IB_SUCCESS == ioctl_status)\r
1587         {\r
1588                 // Mlx4 code:\r
1589                 \r
1590                 xrcd->handle = p_resp->xrcd_handle;\r
1591                 to_mxrcd(xrcd)->xrcdn = p_resp->xrcdn;\r
1592         }\r
1593         else\r
1594         {\r
1595                 cl_free(to_mxrcd(xrcd));\r
1596         }\r
1597         \r
1598         cl_free(p_resp);\r
1599         return;\r
1600 }\r
1601 \r
1602 void\r
1603 mlx4_post_close_xrc_domain (\r
1604         IN              const   ib_xrcd_handle_t                        h_uvp_xrcd,\r
1605         IN                              ib_api_status_t                 ioctl_status )\r
1606 {\r
1607         struct ibv_xrc_domain *xrdc = (struct ibv_xrc_domain *)h_uvp_xrcd;\r
1608 \r
1609         CL_ASSERT(xrdc);\r
1610 \r
1611         if (IB_SUCCESS == ioctl_status) {\r
1612                 cl_free(to_mxrcd(xrdc));\r
1613         }\r
1614 }\r
1615 #endif\r