3 * Copyright (c) 2002, Network Appliance, Inc. All rights reserved.
\r
5 * This Software is licensed under the terms of the "Common Public
\r
6 * License" a copy of which is in the file LICENSE.txt in the root
\r
7 * directory. The license is also available from the Open Source
\r
8 * Initiative, see http://www.opensource.org/licenses/cpl.php.
\r
12 /**********************************************************************
\r
14 * MODULE: dapl_ibal_util.c
\r
16 * PURPOSE: Utility routines for access to IBAL APIs
\r
20 **********************************************************************/
\r
23 #include "dapl_adapter_util.h"
\r
24 #include "dapl_evd_util.h"
\r
25 #include "dapl_cr_util.h"
\r
26 #include "dapl_lmr_util.h"
\r
27 #include "dapl_rmr_util.h"
\r
28 #include "dapl_cookie.h"
\r
29 #include "dapl_ring_buffer_util.h"
\r
31 #ifndef NO_NAME_SERVICE
\r
32 #include "dapl_name_service.h"
\r
33 #endif /* NO_NAME_SERVICE */
\r
35 #define DAPL_IBAL_MAX_CA 4
\r
36 #define DAT_ADAPTER_NAME "InfiniHost (Tavor)"
\r
37 #define DAT_VENDOR_NAME "Mellanox Technolgy Inc."
\r
40 * Root data structure for DAPL_IIBA.
\r
42 dapl_ibal_root_t dapl_ibal_root;
\r
43 DAPL_HCA_NAME dapl_ibal_hca_name_array [DAPL_IBAL_MAX_CA] =
\r
44 {"IbalHca0", "IbalHca1", "IbalHca2", "IbalHca3"};
\r
45 ib_net64_t *gp_ibal_ca_guid_tbl = NULL;
\r
48 * DAT spec does not tie max_mtu_size with IB MTU
\r
50 static ib_net32_t dapl_ibal_mtu_table[6] = {0, 256, 512, 1024, 2048, 4096};
\r
53 int g_loopback_connection = 0;
\r
57 dapli_init_root_ca_list(
\r
58 IN dapl_ibal_root_t *root )
\r
62 cl_qlist_init (&root->ca_head);
\r
63 status = cl_spinlock_init (&root->ca_lock);
\r
65 if (status == CL_SUCCESS)
\r
68 * Get the time ready to go but don't start here
\r
70 root->shutdown = FALSE;
\r
71 root->initialized = TRUE;
\r
75 dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> DiIRCL: cl_spinlock_init returned %d\n", status);
\r
76 root->initialized = FALSE;
\r
86 dapli_destroy_root_ca_list(
\r
87 IN dapl_ibal_root_t *root )
\r
90 root->initialized = FALSE;
\r
93 * At this point the lock should not be necessary
\r
95 if (!cl_is_qlist_empty (&root->ca_head) )
\r
97 dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> Destroying nonempty ca list (%s)\n",
\r
100 cl_spinlock_destroy (&root->ca_lock);
\r
107 dapli_shutdown_port_access(
\r
108 IN dapl_ibal_ca_t *ca )
\r
110 dapl_ibal_port_t *p_port;
\r
112 TAKE_LOCK( ca->port_lock );
\r
114 while ( ! cl_is_qlist_empty( &ca->port_head ) )
\r
116 p_port = (dapl_ibal_port_t *)cl_qlist_remove_head( &ca->port_head );
\r
117 RELEASE_LOCK( ca->port_lock );
\r
119 REMOVE_REFERENCE( &p_port->refs );
\r
120 REMOVE_REFERENCE( &p_port->ca->refs );
\r
122 dapl_os_free (p_port, sizeof (dapl_ibal_port_t));
\r
124 TAKE_LOCK( ca->port_lock );
\r
127 RELEASE_LOCK( ca->port_lock );
\r
131 static void dapli_shutdown_ca_access (void)
\r
133 dapl_ibal_ca_t *ca;
\r
135 if ( dapl_ibal_root.initialized == FALSE )
\r
140 TAKE_LOCK (dapl_ibal_root.ca_lock);
\r
142 while ( ! cl_is_qlist_empty (&dapl_ibal_root.ca_head) )
\r
144 ca = (dapl_ibal_ca_t *) cl_qlist_remove_head (&dapl_ibal_root.ca_head);
\r
148 dapl_os_free (ca->p_ca_attr, sizeof (ib_ca_attr_t));
\r
152 RELEASE_LOCK (dapl_ibal_root.ca_lock);
\r
154 dapli_shutdown_port_access (ca);
\r
155 REMOVE_REFERENCE (&ca->refs);
\r
157 TAKE_LOCK (dapl_ibal_root.ca_lock);
\r
160 RELEASE_LOCK (dapl_ibal_root.ca_lock);
\r
164 * Destroy the root CA list and list lock
\r
166 dapli_destroy_root_ca_list (&dapl_ibal_root);
\r
169 * Signal we're all done and wake any waiter
\r
171 dapl_ibal_root.shutdown = FALSE;
\r
175 dapl_ibal_evd_cb_t *
\r
176 dapli_find_evd_cb_by_context(
\r
178 IN dapl_ibal_ca_t *ca)
\r
180 dapl_ibal_evd_cb_t *evd_cb = NULL;
\r
182 TAKE_LOCK( ca->evd_cb_lock );
\r
184 evd_cb = (dapl_ibal_evd_cb_t *) cl_qlist_head( &ca->evd_cb_head );
\r
185 while ( &evd_cb->next != cl_qlist_end( &ca->evd_cb_head ) )
\r
187 if ( context == evd_cb->context)
\r
195 evd_cb = (dapl_ibal_evd_cb_t *) cl_qlist_next( &evd_cb->next );
\r
204 RELEASE_LOCK( ca->evd_cb_lock );
\r
211 dapli_init_ca_evd_cb_list(
\r
212 IN dapl_ibal_ca_t *ca )
\r
214 cl_status_t status;
\r
216 cl_qlist_init( &ca->evd_cb_head );
\r
217 status = cl_spinlock_init( &ca->evd_cb_lock );
\r
218 if ( status != CL_SUCCESS )
\r
219 dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> DiICECL: cl_spinlock_init returned %d\n", status);
\r
225 dapli_init_ca_port_list(
\r
226 IN dapl_ibal_ca_t *ca )
\r
228 cl_status_t status;
\r
230 cl_qlist_init( &ca->port_head );
\r
231 status = cl_spinlock_init( &ca->port_lock );
\r
232 if ( status != CL_SUCCESS )
\r
233 dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> DiICPL: cl_spinlock_init returned %d\n", status);
\r
238 dapli_ibal_get_port (
\r
239 IN dapl_ibal_ca_t *p_ca,
\r
240 IN uint8_t port_num)
\r
242 cl_list_item_t *p_active_port = NULL;
\r
244 TAKE_LOCK (p_ca->port_lock);
\r
245 for ( p_active_port = cl_qlist_head( &p_ca->port_head );
\r
246 p_active_port != cl_qlist_end ( &p_ca->port_head);
\r
247 p_active_port = cl_qlist_next ( p_active_port ) )
\r
249 if (((dapl_ibal_port_t *)p_active_port)->p_attr->port_num == port_num)
\r
252 RELEASE_LOCK (p_ca->port_lock);
\r
254 return (dapl_ibal_port_t *)p_active_port;
\r
258 dapli_ibal_cq_async_error_callback(
\r
259 IN ib_async_event_rec_t* p_err_rec )
\r
261 DAPL_EVD *evd_ptr = (DAPL_EVD*)((void *)p_err_rec->context);
\r
262 DAPL_EVD *async_evd_ptr;
\r
264 dapl_ibal_ca_t *p_ca;
\r
265 dapl_ibal_evd_cb_t *evd_cb;
\r
267 dapl_dbg_log (DAPL_DBG_TYPE_ERR,"--> DiCqAEC: CQ error %d for EVD context %p\n",
\r
268 p_err_rec->code, p_err_rec->context);
\r
270 if (DAPL_BAD_HANDLE (evd_ptr, DAPL_MAGIC_EVD))
\r
272 dapl_dbg_log (DAPL_DBG_TYPE_ERR,"--> DiCqAEC: invalid EVD %p \n", evd_ptr);
\r
276 ia_ptr = evd_ptr->header.owner_ia;
\r
277 async_evd_ptr = ia_ptr->async_error_evd;
\r
278 if (async_evd_ptr == NULL)
\r
280 dapl_dbg_log (DAPL_DBG_TYPE_ERR,"--> DiCqAEC: can't find async_error_evd on %s HCA\n",
\r
281 (ia_ptr->header.provider)->device_name );
\r
285 p_ca = (dapl_ibal_ca_t *) ia_ptr->hca_ptr->ib_hca_handle;
\r
288 dapl_dbg_log (DAPL_DBG_TYPE_ERR,"--> DiCqAEC: can't find %s HCA\n",
\r
289 (ia_ptr->header.provider)->device_name);
\r
293 /* find CQ error callback using ia_ptr for context */
\r
294 evd_cb = dapli_find_evd_cb_by_context ( async_evd_ptr, p_ca );
\r
295 if ((evd_cb == NULL) || (evd_cb->pfn_async_cq_err_cb == NULL))
\r
297 dapl_dbg_log (DAPL_DBG_TYPE_ERR,"--> DiCqAEC: no ERROR cb on %p found \n", p_ca);
\r
301 /* maps to dapl_evd_cq_async_error_callback(), context is EVD */
\r
302 evd_cb->pfn_async_cq_err_cb( (ib_hca_handle_t)p_ca,
\r
303 (ib_error_record_t*)&p_err_rec->code, evd_ptr);
\r
308 dapli_ibal_ca_async_error_callback(
\r
309 IN ib_async_event_rec_t* p_err_rec )
\r
311 dapl_ibal_ca_t *p_ca = (dapl_ibal_ca_t*)((void *)p_err_rec->context);
\r
312 dapl_ibal_evd_cb_t *evd_cb;
\r
315 dapl_dbg_log (DAPL_DBG_TYPE_ERR,"--> DiCaAEC: CA error %d for context %p\n",
\r
316 p_err_rec->code, p_err_rec->context);
\r
320 dapl_dbg_log (DAPL_DBG_TYPE_ERR,"--> DiCaAEC: invalid p_ca(%p)in async event rec\n",p_ca);
\r
324 ia_ptr = (DAPL_IA*)p_ca->ia_ptr;
\r
325 if (ia_ptr == NULL)
\r
327 dapl_dbg_log (DAPL_DBG_TYPE_ERR,"--> DiCaAEC: invalid ia_ptr in %p ca \n", p_ca );
\r
331 if (ia_ptr->async_error_evd == NULL)
\r
333 dapl_dbg_log (DAPL_DBG_TYPE_ERR,"--> DiCqAEC: can't find async_error_evd on %s HCA\n",
\r
334 (ia_ptr->header.provider)->device_name );
\r
338 /* find QP error callback using p_ca for context */
\r
339 evd_cb = dapli_find_evd_cb_by_context (ia_ptr->async_error_evd, p_ca);
\r
340 if ((evd_cb == NULL) || (evd_cb->pfn_async_err_cb == NULL))
\r
342 dapl_dbg_log (DAPL_DBG_TYPE_ERR,"--> DiCaAEC: no ERROR cb on %p found \n", p_ca);
\r
346 /* maps to dapl_evd_un_async_error_callback(), context is async_evd */
\r
347 evd_cb->pfn_async_err_cb( (ib_hca_handle_t)p_ca,
\r
348 (ib_error_record_t*)&p_err_rec->code,
\r
349 ia_ptr->async_error_evd);
\r
354 static dapl_ibal_port_t *
\r
356 IN dapl_ibal_ca_t *ca,
\r
357 IN ib_port_attr_t *ib_port )
\r
359 dapl_ibal_port_t *p_port = NULL;
\r
360 if (ca->h_ca == NULL )
\r
365 * Allocate the port structure memory. This will also deal with the
\r
366 * copying ib_port_attr_t including GID and P_Key tables
\r
368 p_port = dapl_os_alloc ( sizeof(dapl_ibal_port_t ) );
\r
372 dapl_os_memzero (p_port, sizeof(dapl_ibal_port_t ) );
\r
375 * We're good to go after initializing reference.
\r
377 INIT_REFERENCE( &p_port->refs, 1, p_port, NULL /* pfn_destructor */ );
\r
379 p_port->p_attr = ib_port;
\r
385 dapli_add_active_port( IN dapl_ibal_ca_t *ca)
\r
387 dapl_ibal_port_t *p_port;
\r
388 ib_port_attr_t *p_port_attr;
\r
389 ib_ca_attr_t *p_ca_attr;
\r
392 p_ca_attr = ca->p_ca_attr;
\r
394 dapl_os_assert (p_ca_attr != NULL);
\r
396 for (i = 0; i < p_ca_attr->num_ports; i++)
\r
398 p_port_attr = &p_ca_attr->p_port_attr[i];
\r
401 p_port = dapli_alloc_port( ca, p_port_attr );
\r
404 TAKE_REFERENCE (&ca->refs);
\r
407 * Record / update attribues
\r
409 p_port->p_attr = p_port_attr;
\r
412 * Remember the parant CA keeping the reference we took above
\r
417 * We're good to go - Add the new port to the list on the CA
\r
419 LOCK_INSERT_TAIL( ca->port_lock, ca->port_head, p_port->next );
\r
423 dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> %s: Could not allocate "
\r
424 "dapl_ibal_port_t\n", "DiAAP");
\r
427 dapl_dbg_log( DAPL_DBG_TYPE_UTIL, "--> DiAAP: Port %d logical link is %s lid = %#x\n",
\r
428 p_port_attr->port_num,
\r
429 ( p_port_attr->link_state != IB_LINK_ACTIVE ? "DOWN": "UP" ),
\r
430 CL_HTON16(p_port_attr->lid) );
\r
437 static dapl_ibal_ca_t *
\r
439 IN ib_al_handle_t h_al,
\r
440 IN ib_net64_t ca_guid)
\r
442 dapl_ibal_ca_t *p_ca;
\r
443 ib_api_status_t status;
\r
444 uint32_t attr_size;
\r
447 * Allocate the CA structure
\r
449 p_ca = dapl_os_alloc( sizeof(dapl_ibal_ca_t) );
\r
450 dapl_os_memzero (p_ca, sizeof(dapl_ibal_ca_t) );
\r
455 * Now we pass dapli_ibal_ca_async_error_callback as the
\r
456 * async error callback
\r
458 status = ib_open_ca( h_al,
\r
460 dapli_ibal_ca_async_error_callback,
\r
463 if ( status != IB_SUCCESS )
\r
465 dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> DiAC: ib_open_ca returned %d\n", status);
\r
466 dapl_os_free (p_ca, sizeof (dapl_ibal_ca_t));
\r
471 * Get port list lock and list head initialized
\r
473 if (( dapli_init_ca_port_list( p_ca ) != CL_SUCCESS ) ||
\r
474 ( dapli_init_ca_evd_cb_list( p_ca ) != CL_SUCCESS ))
\r
476 dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> %s: dapli_init_ca_port_list returned failed\n",
\r
478 goto close_and_free_ca;
\r
482 status = ib_query_ca (p_ca->h_ca, NULL, &attr_size);
\r
483 if (status != IB_INSUFFICIENT_MEMORY)
\r
485 dapl_dbg_log ( DAPL_DBG_TYPE_ERR,
\r
486 "--> DiAC: ib_query_ca returned failed status = %d\n",
\r
488 goto close_and_free_ca;
\r
491 p_ca->p_ca_attr = dapl_os_alloc ((int)attr_size);
\r
492 if (p_ca->p_ca_attr == NULL)
\r
494 dapl_dbg_log ( DAPL_DBG_TYPE_ERR,
\r
495 "--> %s: dapli_alloc_ca failed to alloc memory\n",
\r
497 goto close_and_free_ca;
\r
500 status = ib_query_ca (
\r
504 if (status != IB_SUCCESS)
\r
506 dapl_dbg_log ( DAPL_DBG_TYPE_ERR,
\r
507 "--> ib_query_ca returned failed status = %d\n",
\r
509 dapl_os_free (p_ca->p_ca_attr, (int)attr_size);
\r
510 goto close_and_free_ca;
\r
513 p_ca->ca_attr_size = attr_size;
\r
515 INIT_REFERENCE( &p_ca->refs, 1, p_ca, NULL /* pfn_destructor */ );
\r
517 dapli_add_active_port (p_ca);
\r
526 dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> %s: Error allocating CA structure\n","DiAC");
\r
534 (void) ib_close_ca ( p_ca->h_ca, NULL /* callback */);
\r
535 dapl_os_free (p_ca, sizeof (dapl_ibal_ca_t));
\r
538 * If we get here, there was an initialization failure
\r
544 static dapl_ibal_ca_t *
\r
546 IN ib_al_handle_t h_al,
\r
547 IN ib_net64_t ca_guid)
\r
549 dapl_ibal_ca_t *p_ca;
\r
552 * Allocate a CA structure
\r
554 p_ca = dapli_alloc_ca( h_al, ca_guid );
\r
558 * Add the new CA to the list
\r
560 LOCK_INSERT_TAIL( dapl_ibal_root.ca_lock,
\r
561 dapl_ibal_root.ca_head, p_ca->next );
\r
565 dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> %s: Could not allocate dapl_ibal_ca_t \n","DiAA");
\r
572 int32_t dapls_ib_init (void)
\r
574 ib_api_status_t status;
\r
577 * Initialize the root structure
\r
579 if (dapli_init_root_ca_list (&dapl_ibal_root) == CL_SUCCESS)
\r
582 * Register with the access layer
\r
584 status = ib_open_al (&dapl_ibal_root.h_al);
\r
586 if (status == IB_SUCCESS)
\r
590 status = ib_get_ca_guids (dapl_ibal_root.h_al, NULL, &(size_t)guid_count);
\r
591 if (status != IB_INSUFFICIENT_MEMORY)
\r
593 dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> DsII: ib_get_ca_guids failed = %d\n", status);
\r
597 if (guid_count == 0)
\r
599 dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> %s: found NO HCA in the system\n",
\r
604 if (guid_count > DAPL_IBAL_MAX_CA)
\r
606 guid_count = DAPL_IBAL_MAX_CA;
\r
609 gp_ibal_ca_guid_tbl = ( ib_net64_t*)dapl_os_alloc ((int)(guid_count *
\r
610 sizeof (ib_net64_t)) );
\r
612 if (gp_ibal_ca_guid_tbl == NULL)
\r
614 dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> %s: can not alloc gp_ibal_ca_guid_tbl\n",
\r
620 status = ib_get_ca_guids ( dapl_ibal_root.h_al,
\r
621 gp_ibal_ca_guid_tbl,
\r
622 &(size_t)guid_count );
\r
625 if ( status != IB_SUCCESS )
\r
627 dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> DsII: ib_get_ca_guids failed = %s\n",
\r
628 ib_get_err_str(status) );
\r
632 dapl_dbg_log ( DAPL_DBG_TYPE_UTIL,
\r
633 "--> DsII: Success open AL & found %d HCA avail\n",
\r
639 dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> DsII: ib_open_al returned %s\n", ib_get_err_str(status));
\r
643 dapli_destroy_root_ca_list (&dapl_ibal_root);
\r
650 int32_t dapls_ib_release (void)
\r
652 dapl_ibal_root.shutdown = TRUE;
\r
654 dapli_shutdown_ca_access();
\r
657 * If shutdown not complete, wait for it
\r
659 if (dapl_ibal_root.shutdown)
\r
661 dapl_dbg_log (DAPL_DBG_TYPE_UTIL,
\r
662 "--> DsIR: timeout waiting for completion\n");
\r
665 if ( dapl_ibal_root.h_al != NULL )
\r
667 dapl_dbg_log (DAPL_DBG_TYPE_UTIL,
\r
668 "--> DsIR: ib_close_al called\n");
\r
669 ib_close_al (dapl_ibal_root.h_al);
\r
670 dapl_dbg_log (DAPL_DBG_TYPE_UTIL,
\r
671 "--> DsIR: ib_close_al return\n");
\r
672 dapl_ibal_root.h_al = NULL;
\r
680 * dapls_ib_enum_hcas
\r
682 * Enumerate all HCAs on the system
\r
688 * hca_names Array of hca names
\r
693 * DAT_INSUFFICIENT_RESOURCES
\r
697 dapls_ib_enum_hcas (
\r
698 OUT DAPL_HCA_NAME **hca_names,
\r
699 OUT DAT_COUNT *total_hca_count,
\r
700 IN const char *vendor )
\r
703 ib_api_status_t ib_status;
\r
704 UNREFERENCED_PARAMETER(vendor);
\r
706 ib_status = ib_get_ca_guids (dapl_ibal_root.h_al, NULL, &(size_t)guid_count);
\r
707 if (ib_status != IB_INSUFFICIENT_MEMORY)
\r
709 dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> DsIEH: ib_get_ca_guids failed status = %d\n", ib_status);
\r
710 return dapl_ib_status_convert (ib_status);
\r
713 if (guid_count == 0)
\r
715 dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> %s: ib_get_ca_guids no HCA in the system\n",
\r
717 return (DAT_PROVIDER_NOT_FOUND);
\r
720 if (guid_count > DAPL_IBAL_MAX_CA)
\r
722 guid_count = DAPL_IBAL_MAX_CA;
\r
725 gp_ibal_ca_guid_tbl = (ib_net64_t *)dapl_os_alloc ((int)(guid_count * sizeof (ib_net64_t)) );
\r
727 if (gp_ibal_ca_guid_tbl == NULL)
\r
729 dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> %s: can not alloc resources @line%d\n",
\r
730 "DsIEH", __LINE__);
\r
731 return (DAT_INSUFFICIENT_RESOURCES);
\r
734 ib_status = ib_get_ca_guids (
\r
735 dapl_ibal_root.h_al, gp_ibal_ca_guid_tbl, &(size_t)guid_count);
\r
737 if (ib_status != IB_SUCCESS)
\r
739 dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> DsIEH: ib_get_ca_guids failed status = %s\n",
\r
740 ib_get_err_str(ib_status));
\r
741 return dapl_ib_status_convert (ib_status);
\r
744 *hca_names = (DAPL_HCA_NAME*)dapl_os_alloc ((int)(guid_count * sizeof (DAPL_HCA_NAME)) );
\r
746 if (*hca_names == NULL)
\r
748 dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> %s: can not alloc resources @line%d\n",
\r
749 "DsIEH", __LINE__);
\r
750 return (DAT_INSUFFICIENT_RESOURCES);
\r
753 dapl_os_memcpy (*hca_names,
\r
754 dapl_ibal_hca_name_array,
\r
755 (int)(guid_count * sizeof (DAPL_HCA_NAME)) );
\r
757 *total_hca_count = (DAT_COUNT)guid_count;
\r
762 for (i = 0; i < guid_count; i++)
\r
763 dapl_dbg_log (DAPL_DBG_TYPE_UTIL,
\r
764 "--> DsIEH: %d) hca_names = %s\n",
\r
765 i, dapl_ibal_hca_name_array[i]);
\r
768 return (DAT_SUCCESS);
\r
774 dapl_ib_convert_name(
\r
779 if (gp_ibal_ca_guid_tbl == NULL)
\r
781 dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> DICN: found no HCA with name %s\n", name);
\r
785 for (i = 0; i < DAPL_IBAL_MAX_CA; i++)
\r
787 if (strcmp (name, dapl_ibal_hca_name_array[i]) == 0)
\r
793 if (i >= DAPL_IBAL_MAX_CA)
\r
795 dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> DICN: can't find any HCA with name %s\n", name);
\r
799 return (gp_ibal_ca_guid_tbl[i]);
\r
804 * dapls_ib_open_hca
\r
809 * *hca_name pointer to provider device name
\r
810 * *ib_hca_handle_p pointer to provide HCA handle
\r
817 * DAT_INSUFFICIENT_RESOURCES
\r
820 DAT_RETURN dapls_ib_open_hca (
\r
821 IN IB_HCA_NAME hca_name,
\r
822 OUT ib_hca_handle_t *p_ib_hca_handle)
\r
824 dapl_ibal_ca_t *p_ca;
\r
826 if (gp_ibal_ca_guid_tbl == NULL)
\r
828 dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> DsIOH: found no HCA with ca_guid" F64x "\n", hca_name);
\r
829 return (DAT_PROVIDER_NOT_FOUND);
\r
832 p_ca = dapli_add_ca (dapl_ibal_root.h_al, hca_name);
\r
836 dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> DsIOH: can not create ca with ca_guid" F64x "\n", hca_name);
\r
837 return (DAT_INSUFFICIENT_RESOURCES);
\r
840 *p_ib_hca_handle = (ib_hca_handle_t) p_ca;
\r
842 return (DAT_SUCCESS);
\r
847 * dapls_ib_close_hca
\r
852 * ib_hca_handle provide HCA handle
\r
859 * DAT_INSUFFICIENT_RESOURCES
\r
862 DAT_RETURN dapls_ib_close_hca (
\r
863 IN ib_hca_handle_t ib_hca_handle)
\r
865 dapl_ibal_ca_t *p_ca;
\r
867 p_ca = (dapl_ibal_ca_t *) ib_hca_handle;
\r
870 * Remove it from the list
\r
872 TAKE_LOCK (dapl_ibal_root.ca_lock);
\r
874 cl_qlist_remove_item (&dapl_ibal_root.ca_head, &p_ca->next);
\r
876 RELEASE_LOCK (dapl_ibal_root.ca_lock);
\r
878 dapli_shutdown_port_access (p_ca);
\r
881 * Remove the constructor reference
\r
883 REMOVE_REFERENCE (&p_ca->refs);
\r
885 cl_spinlock_destroy (&p_ca->port_lock);
\r
886 cl_spinlock_destroy (&p_ca->evd_cb_lock);
\r
888 if (p_ca->p_ca_attr)
\r
889 dapl_os_free (p_ca->p_ca_attr, sizeof (ib_ca_attr_t));
\r
891 (void) ib_close_ca (p_ca->h_ca, NULL /* close_callback */);
\r
893 dapl_os_free (p_ca, sizeof (dapl_ibal_ca_t));
\r
895 return (DAT_SUCCESS);
\r
899 * dapli_ibal_cq_competion_callback
\r
901 * Completion callback for a CQ
\r
904 * cq_context User context
\r
912 dapli_ib_cq_completion_cb (
\r
913 IN const ib_cq_handle_t h_cq,
\r
914 IN void *cq_context )
\r
917 dapl_ibal_ca_t *p_ca;
\r
919 evd_ptr = (DAPL_EVD *) cq_context;
\r
921 dapl_dbg_log (DAPL_DBG_TYPE_CALLBACK,
\r
922 "--> DiICCC: cq_completion_cb evd %p CQ %p\n",
\r
923 evd_ptr, evd_ptr->ib_cq_handle);
\r
925 dapl_os_assert (evd_ptr != DAT_HANDLE_NULL);
\r
927 p_ca = (dapl_ibal_ca_t *) evd_ptr->header.owner_ia->hca_ptr->ib_hca_handle;
\r
929 dapl_os_assert( h_cq == evd_ptr->ib_cq_handle );
\r
931 dapl_evd_dto_callback (
\r
932 (ib_hca_handle_t) p_ca,
\r
940 * dapl_ib_cq_late_alloc
\r
945 * ia_handle IA handle
\r
946 * evd_ptr pointer to EVD struct
\r
947 * cqlen minimum QLen
\r
954 * DAT_INSUFFICIENT_RESOURCES
\r
958 dapls_ib_cq_late_alloc (
\r
959 IN ib_pd_handle_t pd_handle,
\r
960 IN DAPL_EVD *evd_ptr)
\r
962 ib_cq_create_t cq_create;
\r
963 ib_api_status_t ib_status;
\r
964 DAT_RETURN dat_status;
\r
965 dapl_ibal_ca_t *ibal_ca = (dapl_ibal_ca_t *)evd_ptr->header.owner_ia->hca_ptr->ib_hca_handle;
\r
967 dat_status = DAT_SUCCESS;
\r
968 cq_create.size = evd_ptr->qlen;
\r
971 if (evd_ptr->cq_wait_obj_handle)
\r
973 cq_create.h_wait_obj = evd_ptr->cq_wait_obj_handle;
\r
974 cq_create.pfn_comp_cb = NULL;
\r
978 cq_create.h_wait_obj = NULL;
\r
979 cq_create.pfn_comp_cb = dapli_ib_cq_completion_cb;
\r
982 ib_status = ib_create_cq (
\r
983 (ib_ca_handle_t)ibal_ca->h_ca,
\r
985 evd_ptr /* context */,
\r
986 dapli_ibal_cq_async_error_callback,
\r
987 &evd_ptr->ib_cq_handle);
\r
989 dat_status = dapl_ib_status_convert (ib_status);
\r
991 if ( dat_status != DAT_SUCCESS )
\r
993 dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> DsICLA: failed to create CQ for EVD %p\n", evd_ptr);
\r
997 dapl_dbg_log (DAPL_DBG_TYPE_UTIL,
\r
998 "--> DsCQ_alloc: pd=%p cq=%p Csz=%d Qln=%d \n",
\r
999 pd_handle, evd_ptr->ib_cq_handle,
\r
1000 cq_create.size, evd_ptr->qlen );
\r
1003 if ( cq_create.size > (uint32_t)evd_ptr->qlen )
\r
1005 DAT_COUNT pending_cnt, free_cnt;
\r
1006 DAT_EVENT *event_ptr;
\r
1009 dapl_os_lock ( &evd_ptr->header.lock );
\r
1011 pending_cnt = dapls_rbuf_count ( &evd_ptr->pending_event_queue );
\r
1012 free_cnt = dapls_rbuf_count ( &evd_ptr->free_event_queue );
\r
1014 if ( pending_cnt == 0 )
\r
1016 dat_status = dapls_rbuf_realloc ( &evd_ptr->pending_event_queue,
\r
1019 if ( dat_status != DAT_SUCCESS )
\r
1021 dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> DsICLA: failed to resize EVD pending_event_queue"
\r
1022 "from %d to %d\n",
\r
1023 evd_ptr->qlen, cq_create.size );
\r
1024 dat_status = DAT_SUCCESS;
\r
1030 for (i = 0; i < free_cnt; i++)
\r
1032 event_ptr = (DAT_EVENT *)
\r
1033 dapls_rbuf_remove ( &evd_ptr->free_event_queue );
\r
1036 dat_status = dapls_rbuf_realloc ( &evd_ptr->free_event_queue,
\r
1039 if ( dat_status != DAT_SUCCESS )
\r
1041 dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> DsICLA: failed to resize EVD free_event_queue"
\r
1042 "from %d to %d\n",
\r
1043 evd_ptr->qlen, cq_create.size );
\r
1045 dapl_os_unlock ( &evd_ptr->header.lock );
\r
1047 dapls_ib_cq_free ( evd_ptr->header.owner_ia, evd_ptr);
\r
1052 if (evd_ptr->events)
\r
1054 evd_ptr->events = (void *)
\r
1055 dapl_os_realloc (
\r
1057 cq_create.size * sizeof (DAT_EVENT));
\r
1061 evd_ptr->events = (void *)
\r
1063 cq_create.size * sizeof (DAT_EVENT));
\r
1066 if ( evd_ptr->events == NULL )
\r
1068 dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> DsICLA: failed to resize EVD events buffers"
\r
1069 "from %d to %d\n",
\r
1070 evd_ptr->qlen, cq_create.size );
\r
1071 dat_status = DAT_INSUFFICIENT_RESOURCES;
\r
1073 dapl_os_unlock ( &evd_ptr->header.lock );
\r
1075 dapls_ib_cq_free ( evd_ptr->header.owner_ia, evd_ptr);
\r
1080 event_ptr = evd_ptr->events;
\r
1082 /* add events to free event queue */
\r
1083 for (i = 0; (uint32_t)i < cq_create.size; i++)
\r
1085 dapls_rbuf_add (&evd_ptr->free_event_queue, (void *)event_ptr);
\r
1089 dapl_dbg_log (DAPL_DBG_TYPE_EVD,
\r
1090 "--> DsICLA: resize EVD events buffers from %d to %d\n",
\r
1091 evd_ptr->qlen, cq_create.size);
\r
1093 evd_ptr->qlen = cq_create.size;
\r
1095 dapl_os_unlock ( &evd_ptr->header.lock );
\r
1100 return dat_status;
\r
1109 * ia_handle IA handle
\r
1110 * evd_ptr pointer to EVD struct
\r
1117 * DAT_INVALID_HANDLE
\r
1118 * DAT_INSUFFICIENT_RESOURCES
\r
1122 dapls_ib_cq_free (
\r
1123 IN DAPL_IA *ia_ptr,
\r
1124 IN DAPL_EVD *evd_ptr)
\r
1126 ib_api_status_t ib_status;
\r
1128 if ( ia_ptr == NULL || ia_ptr->header.magic != DAPL_MAGIC_IA )
\r
1130 return DAT_INVALID_HANDLE;
\r
1133 ib_status = ib_destroy_cq (evd_ptr->ib_cq_handle,
\r
1134 /* destroy_callback */ NULL);
\r
1136 return dapl_ib_status_convert (ib_status);
\r
1142 * Resize CQ completion notifications
\r
1145 * ia_handle IA handle
\r
1146 * evd_ptr pointer to EVD struct
\r
1147 * cqlen minimum QLen
\r
1150 * cqlen may round up for optimal memory boundaries
\r
1154 * DAT_INVALID_HANDLE
\r
1155 * DAT_INSUFFICIENT_RESOURCES
\r
1160 dapls_ib_cq_resize (
\r
1161 IN DAPL_IA *ia_ptr,
\r
1162 IN DAPL_EVD *evd_ptr,
\r
1163 IN DAT_COUNT *cqlen )
\r
1165 ib_api_status_t ib_status = IB_SUCCESS;
\r
1166 if ( ia_ptr == NULL || ia_ptr->header.magic != DAPL_MAGIC_IA )
\r
1168 return DAT_INVALID_HANDLE;
\r
1171 * Resize CQ only if CQ handle is valid, may be delayed waiting
\r
1172 * for PZ allocation with IBAL
\r
1174 #if defined(_VENDOR_IBAL_)
\r
1175 if ( evd_ptr->ib_cq_handle != IB_INVALID_HANDLE )
\r
1176 #endif /* _VENDOR_IBAL_ */
\r
1178 ib_status = ib_modify_cq ( evd_ptr->ib_cq_handle,
\r
1179 (uint32_t *)cqlen );
\r
1180 dapl_dbg_log (DAPL_DBG_TYPE_EVD,
\r
1181 "ib_modify_cq ( new cqlen = %d, status=%d ) \n",
\r
1182 *cqlen, ib_status );
\r
1184 return dapl_ib_status_convert (ib_status);
\r
1189 * dapl_set_cq_notify
\r
1191 * Set up CQ completion notifications
\r
1194 * ia_handle IA handle
\r
1195 * evd_ptr pointer to EVD struct
\r
1202 * DAT_INVALID_HANDLE
\r
1203 * DAT_INSUFFICIENT_RESOURCES
\r
1207 dapls_set_cq_notify (
\r
1208 IN DAPL_IA *ia_ptr,
\r
1209 IN DAPL_EVD *evd_ptr)
\r
1211 ib_api_status_t ib_status;
\r
1212 if ( ia_ptr == NULL || ia_ptr->header.magic != DAPL_MAGIC_IA )
\r
1214 return DAT_INVALID_HANDLE;
\r
1216 ib_status = ib_rearm_cq (
\r
1217 evd_ptr->ib_cq_handle,
\r
1218 FALSE /* next event but not solicited event */ );
\r
1220 return dapl_ib_status_convert (ib_status);
\r
1225 * dapls_ib_cqd_create
\r
1227 * Set up CQ notification event thread
\r
1230 * ia_handle HCA handle
\r
1237 * DAT_INVALID_HANDLE
\r
1238 * DAT_INSUFFICIENT_RESOURCES
\r
1242 dapls_ib_cqd_create (
\r
1243 IN DAPL_HCA *hca_ptr)
\r
1246 * We do not have CQD concept
\r
1248 hca_ptr->ib_cqd_handle = IB_INVALID_HANDLE;
\r
1250 return DAT_SUCCESS;
\r
1255 * dapl_cqd_destroy
\r
1257 * Destroy CQ notification event thread
\r
1260 * ia_handle IA handle
\r
1267 * DAT_INVALID_HANDLE
\r
1268 * DAT_INSUFFICIENT_RESOURCES
\r
1272 dapls_ib_cqd_destroy (
\r
1273 IN DAPL_HCA *hca_ptr)
\r
1275 hca_ptr->ib_cqd_handle = IB_INVALID_HANDLE;
\r
1276 return (DAT_SUCCESS);
\r
1281 * dapl_ib_pd_alloc
\r
1286 * ia_handle IA handle
\r
1287 * PZ_ptr pointer to PZEVD struct
\r
1294 * DAT_INSUFFICIENT_RESOURCES
\r
1298 dapls_ib_pd_alloc (
\r
1302 ib_api_status_t ib_status;
\r
1303 dapl_ibal_ca_t *p_ca;
\r
1305 p_ca = (dapl_ibal_ca_t *) ia->hca_ptr->ib_hca_handle;
\r
1306 ib_status = ib_alloc_pd (
\r
1312 return dapl_ib_status_convert (ib_status);
\r
1322 * PZ_ptr pointer to PZ struct
\r
1329 * DAT_INSUFFICIENT_RESOURCES
\r
1333 dapls_ib_pd_free (
\r
1336 ib_api_status_t ib_status;
\r
1338 ib_status = ib_dealloc_pd (pz->pd_handle, /* destroy_callback */ NULL);
\r
1340 pz->pd_handle = IB_INVALID_HANDLE;
\r
1342 return dapl_ib_status_convert (ib_status);
\r
1347 * dapl_ib_mr_register
\r
1349 * Register a virtual memory region
\r
1352 * ia_handle IA handle
\r
1353 * lmr pointer to dapl_lmr struct
\r
1354 * virt_addr virtual address of beginning of mem region
\r
1355 * length length of memory region
\r
1362 * DAT_INSUFFICIENT_RESOURCES
\r
1366 dapls_ib_mr_register (
\r
1369 IN DAT_PVOID virt_addr,
\r
1370 IN DAT_VLEN length,
\r
1371 IN DAT_MEM_PRIV_FLAGS privileges)
\r
1373 ib_api_status_t ib_status;
\r
1374 ib_mr_handle_t mr_handle;
\r
1375 ib_mr_create_t mr_create;
\r
1376 uint32_t l_key, r_key;
\r
1378 if ( ia == NULL || ia->header.magic != DAPL_MAGIC_IA )
\r
1380 return DAT_INVALID_HANDLE;
\r
1382 mr_create.vaddr = (void *) virt_addr;
\r
1383 mr_create.length = (size_t)length;
\r
1384 mr_create.access_ctrl = dapl_lmr_convert_privileges (privileges);
\r
1385 mr_create.access_ctrl |= IB_AC_MW_BIND;
\r
1387 if (lmr->param.mem_type == DAT_MEM_TYPE_SHARED_VIRTUAL)
\r
1389 ib_status = ib_reg_shmid (
\r
1390 ((DAPL_PZ *)lmr->param.pz_handle)->pd_handle,
\r
1391 (const uint8_t*)&lmr->ib_shmid,
\r
1393 (uint64_t *)&virt_addr,
\r
1400 ib_status = ib_reg_mem (
\r
1401 ((DAPL_PZ *)lmr->param.pz_handle)->pd_handle,
\r
1408 if (ib_status != IB_SUCCESS)
\r
1410 return (dapl_ib_status_convert (ib_status));
\r
1413 dapl_dbg_log (DAPL_DBG_TYPE_UTIL, "--> DsIMR: lmr (%p) lkey 0x%x r_key %#x "
\r
1414 "mr_handle %p vaddr 0x%LX len 0x%LX\n",
\r
1415 lmr, l_key, r_key, mr_handle, virt_addr, length);
\r
1417 lmr->param.lmr_context = l_key;
\r
1418 lmr->param.rmr_context = r_key;
\r
1419 lmr->param.registered_size = length;
\r
1421 // Fix MS compiler warning C4826: Conversion from 'DAT_PVOID' to 'DAT_VADDR'
\r
1422 // is sign-extended. This may cause unexpected runtime behavior.
\r
1423 lmr->param.registered_address = (DAT_VADDR) (DAT_UINT32) virt_addr;
\r
1425 lmr->param.registered_address = (DAT_VADDR)virt_addr;
\r
1427 lmr->mr_handle = mr_handle;
\r
1429 return (DAT_SUCCESS);
\r
1434 * dapl_ib_mr_deregister
\r
1436 * Free a memory region
\r
1439 * lmr pointer to dapl_lmr struct
\r
1446 * DAT_INSUFFICIENT_RESOURCES
\r
1450 dapls_ib_mr_deregister (
\r
1453 ib_api_status_t ib_status;
\r
1455 ib_status = ib_dereg_mr (lmr->mr_handle);
\r
1457 if (ib_status != IB_SUCCESS)
\r
1459 return dapl_ib_status_convert (ib_status);
\r
1462 lmr->param.lmr_context = 0;
\r
1463 lmr->mr_handle = IB_INVALID_HANDLE;
\r
1465 return (DAT_SUCCESS);
\r
1470 * dapl_ib_mr_register_shared
\r
1472 * Register a virtual memory region
\r
1475 * ia_handle IA handle
\r
1476 * lmr pointer to dapl_lmr struct
\r
1477 * virt_addr virtual address of beginning of mem region
\r
1478 * length length of memory region
\r
1485 * DAT_INSUFFICIENT_RESOURCES
\r
1489 dapls_ib_mr_register_shared (
\r
1492 IN DAT_MEM_PRIV_FLAGS privileges)
\r
1494 DAT_VADDR virt_addr;
\r
1495 ib_mr_handle_t mr_handle;
\r
1496 ib_api_status_t ib_status;
\r
1497 ib_mr_handle_t new_mr_handle;
\r
1498 ib_access_t access_ctrl;
\r
1499 uint32_t l_key, r_key;
\r
1500 ib_mr_create_t mr_create;
\r
1501 if ( ia == NULL || ia->header.magic != DAPL_MAGIC_IA )
\r
1503 return DAT_INVALID_HANDLE;
\r
1505 virt_addr = dapl_mr_get_address (lmr->param.region_desc,
\r
1506 lmr->param.mem_type);
\r
1508 access_ctrl = dapl_lmr_convert_privileges (privileges);
\r
1509 access_ctrl |= IB_AC_MW_BIND;
\r
1511 mr_create.vaddr = (void *) virt_addr;
\r
1512 mr_create.access_ctrl = access_ctrl;
\r
1513 mr_handle = (ib_mr_handle_t) lmr->mr_handle;
\r
1515 dapl_dbg_log (DAPL_DBG_TYPE_UTIL,
\r
1516 "--> DsIMRS: orig mr_handle %p vaddr %p\n",
\r
1517 mr_handle, virt_addr);
\r
1519 if (lmr->param.mem_type == DAT_MEM_TYPE_SHARED_VIRTUAL)
\r
1521 ib_status = ib_reg_shmid (
\r
1522 ((DAPL_PZ *)lmr->param.pz_handle)->pd_handle,
\r
1523 (const uint8_t*)&lmr->ib_shmid,
\r
1533 ib_status = ib_reg_shared (
\r
1535 ((DAPL_PZ *)lmr->param.pz_handle)->pd_handle,
\r
1537 /* in/out */(DAT_UINT64 *)&virt_addr,
\r
1543 if (ib_status != IB_SUCCESS)
\r
1545 return dapl_ib_status_convert (ib_status);
\r
1549 * What if virt_addr as an OUTPUT having the actual virtual address
\r
1550 * assigned to the register region
\r
1552 dapl_dbg_log (DAPL_DBG_TYPE_UTIL,
\r
1553 "--> DsIMRS: lmr (%p) lkey = 0x%x new mr_handle %p vaddr %p\n",
\r
1554 lmr, l_key, new_mr_handle, virt_addr);
\r
1556 lmr->param.lmr_context = l_key;
\r
1557 lmr->param.rmr_context = r_key;
\r
1558 lmr->param.registered_address = (DAT_VADDR) (uintptr_t) virt_addr;
\r
1559 lmr->mr_handle = new_mr_handle;
\r
1561 return (DAT_SUCCESS);
\r
1566 * dapls_ib_mw_alloc
\r
1568 * Bind a protection domain to a memory window
\r
1571 * rmr Initialized rmr to hold binding handles
\r
1578 * DAT_INSUFFICIENT_RESOURCES
\r
1582 dapls_ib_mw_alloc (
\r
1585 ib_api_status_t ib_status;
\r
1587 ib_mw_handle_t mw_handle;
\r
1589 ib_status = ib_create_mw (
\r
1590 ((DAPL_PZ *)rmr->param.pz_handle)->pd_handle,
\r
1594 if (ib_status != IB_SUCCESS)
\r
1596 dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> DsIMA: create MW failed = %s\n", ib_get_err_str(ib_status));
\r
1597 return dapl_ib_status_convert (ib_status);
\r
1600 rmr->mw_handle = mw_handle;
\r
1601 rmr->param.rmr_context = (DAT_RMR_CONTEXT) r_key;
\r
1603 dapl_dbg_log (DAPL_DBG_TYPE_UTIL,
\r
1604 "--> DsIMA: mw_handle %p r_key = 0x%x\n",
\r
1605 mw_handle, r_key);
\r
1607 return (DAT_SUCCESS);
\r
1612 * dapls_ib_mw_free
\r
1614 * Release bindings of a protection domain to a memory window
\r
1617 * rmr Initialized rmr to hold binding handles
\r
1624 * DAT_INSUFFICIENT_RESOURCES
\r
1628 dapls_ib_mw_free (
\r
1631 ib_api_status_t ib_status;
\r
1633 dapl_dbg_log (DAPL_DBG_TYPE_UTIL,
\r
1634 "--> DsIMF: mw_handle %p\n", rmr->mw_handle);
\r
1636 ib_status = ib_destroy_mw (rmr->mw_handle);
\r
1638 if (ib_status != IB_SUCCESS)
\r
1640 dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> DsIMF: Free MW failed = %s\n", ib_get_err_str(ib_status));
\r
1641 return dapl_ib_status_convert (ib_status);
\r
1644 rmr->param.rmr_context = 0;
\r
1645 rmr->mw_handle = IB_INVALID_HANDLE;
\r
1647 return (DAT_SUCCESS);
\r
1651 * dapls_ib_mw_bind
\r
1653 * Bind a protection domain to a memory window
\r
1656 * rmr Initialized rmr to hold binding handles
\r
1663 * DAT_INSUFFICIENT_RESOURCES
\r
1667 dapls_ib_mw_bind (
\r
1671 IN DAPL_COOKIE *cookie,
\r
1672 IN DAT_VADDR virtual_address,
\r
1673 IN DAT_VLEN length,
\r
1674 IN DAT_MEM_PRIV_FLAGS mem_priv,
\r
1675 IN ib_bool_t is_signaled)
\r
1677 ib_api_status_t ib_status;
\r
1678 ib_bind_wr_t bind_wr_prop;
\r
1679 uint32_t new_rkey;
\r
1681 bind_wr_prop.local_ds.vaddr = virtual_address;
\r
1682 bind_wr_prop.local_ds.length = (uint32_t)length;
\r
1683 bind_wr_prop.local_ds.lkey = lmr->param.lmr_context;
\r
1684 bind_wr_prop.current_rkey = rmr->param.rmr_context;
\r
1685 bind_wr_prop.access_ctrl = dapl_rmr_convert_privileges (mem_priv);
\r
1686 bind_wr_prop.send_opt = (is_signaled == TRUE) ?
\r
1687 IB_SEND_OPT_SIGNALED : 0;
\r
1688 bind_wr_prop.wr_id = (uint64_t) ((uintptr_t) cookie);
\r
1689 bind_wr_prop.h_mr = lmr->mr_handle;
\r
1691 dapl_dbg_log (DAPL_DBG_TYPE_UTIL,
\r
1692 "--> DsIMB: mr_handle %p, mw_handle %p vaddr %#I64x length %#I64x\n",
\r
1693 lmr->mr_handle, rmr->mw_handle, virtual_address, length);
\r
1695 ib_status = ib_bind_mw (
\r
1701 if (ib_status != IB_SUCCESS)
\r
1703 dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> DsIMB: Bind MW failed = %s\n",
\r
1704 ib_get_err_str(ib_status));
\r
1705 return (dapl_ib_status_convert (ib_status));
\r
1708 rmr->param.rmr_context = (DAT_RMR_CONTEXT) new_rkey;
\r
1710 dapl_dbg_log (DAPL_DBG_TYPE_UTIL,
\r
1711 "--> DsIMB: new_rkey = 0x%x\n",new_rkey);
\r
1712 return (DAT_SUCCESS);
\r
1716 * dapls_ib_mw_unbind
\r
1718 * Unbind a memory window
\r
1721 * rmr Initialized rmr to hold binding handles
\r
1728 * DAT_INSUFFICIENT_RESOURCES
\r
1732 dapls_ib_mw_unbind (
\r
1735 IN DAPL_COOKIE *cookie,
\r
1736 IN ib_bool_t is_signaled)
\r
1738 ib_api_status_t ib_status;
\r
1739 ib_bind_wr_t bind_wr_prop;
\r
1740 uint32_t new_rkey;
\r
1742 bind_wr_prop.local_ds.vaddr = 0;
\r
1743 bind_wr_prop.local_ds.length = 0;
\r
1744 bind_wr_prop.local_ds.lkey = 0;
\r
1745 bind_wr_prop.access_ctrl = 0;
\r
1746 bind_wr_prop.send_opt = (is_signaled == TRUE) ?
\r
1747 IB_SEND_OPT_SIGNALED : 0;
\r
1748 bind_wr_prop.wr_id = (uint64_t) ((uintptr_t) cookie);
\r
1750 dapl_dbg_log (DAPL_DBG_TYPE_UTIL,
\r
1751 "--> DsIMU: mw_handle = %p\n", rmr->mw_handle);
\r
1753 ib_status = ib_bind_mw (
\r
1759 if (ib_status != IB_SUCCESS)
\r
1761 dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> DsIMU: Unbind MW failed = %s\n",
\r
1762 ib_get_err_str(ib_status));
\r
1763 return (dapl_ib_status_convert (ib_status));
\r
1766 rmr->param.rmr_context = (DAT_RMR_CONTEXT) new_rkey;
\r
1768 dapl_dbg_log (DAPL_DBG_TYPE_UTIL,
\r
1769 "--> DsIMU: unbind new_rkey = 0x%x\n", new_rkey);
\r
1770 return (DAT_SUCCESS);
\r
1775 * dapls_ib_setup_async_callback
\r
1777 * Set up an asynchronous callbacks of various kinds
\r
1780 * ia_handle IA handle
\r
1781 * handler_type type of handler to set up
\r
1782 * callback_handle handle param for completion callbacks
\r
1783 * callback callback routine pointer
\r
1784 * context argument for callback routine
\r
1791 * DAT_INSUFFICIENT_RESOURCES
\r
1792 * DAT_INVALID_PARAMETER
\r
1796 dapls_ib_setup_async_callback (
\r
1797 IN DAPL_IA *ia_ptr,
\r
1798 IN DAPL_ASYNC_HANDLER_TYPE handler_type,
\r
1799 IN unsigned int *callback_handle,
\r
1800 IN ib_async_handler_t callback,
\r
1801 IN void *context )
\r
1803 dapl_ibal_ca_t *p_ca;
\r
1804 dapl_ibal_evd_cb_t *evd_cb;
\r
1805 UNREFERENCED_PARAMETER(callback_handle);
\r
1807 p_ca = (dapl_ibal_ca_t *) ia_ptr->hca_ptr->ib_hca_handle;
\r
1811 dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> DsISAC: can't find %s HCA\n",
\r
1812 (ia_ptr->header.provider)->device_name);
\r
1813 return (DAT_INVALID_HANDLE);
\r
1816 if (handler_type != DAPL_ASYNC_CQ_COMPLETION)
\r
1818 evd_cb = dapli_find_evd_cb_by_context (context, p_ca);
\r
1820 if (evd_cb == NULL)
\r
1823 * No record for this evd. We allocate one
\r
1825 evd_cb = dapl_os_alloc (sizeof (dapl_ibal_evd_cb_t));
\r
1826 dapl_os_memzero (evd_cb, sizeof(dapl_ibal_evd_cb_t));
\r
1828 if (evd_cb == NULL)
\r
1830 dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> %s: can't alloc res\n","DsISAC");
\r
1831 return (DAT_INSUFFICIENT_RESOURCES);
\r
1834 evd_cb->context = context;
\r
1837 * Add the new EVD CB to the list
\r
1839 LOCK_INSERT_TAIL( p_ca->evd_cb_lock,
\r
1840 p_ca->evd_cb_head,
\r
1844 switch (handler_type)
\r
1846 case DAPL_ASYNC_UNAFILIATED:
\r
1847 evd_cb->pfn_async_err_cb = callback;
\r
1849 case DAPL_ASYNC_CQ_ERROR:
\r
1850 evd_cb->pfn_async_cq_err_cb = callback;
\r
1852 case DAPL_ASYNC_QP_ERROR:
\r
1853 evd_cb->pfn_async_qp_err_cb = callback;
\r
1861 return DAT_SUCCESS;
\r
1866 * dapls_ib_query_hca
\r
1868 * Query the hca attribute
\r
1871 * hca_handl hca handle
\r
1872 * ep_attr attribute of the ep
\r
1879 * DAT_INVALID_PARAMETER
\r
1882 DAT_RETURN dapls_ib_query_hca (
\r
1883 IN DAPL_HCA *hca_ptr,
\r
1884 OUT DAT_IA_ATTR *ia_attr,
\r
1885 OUT DAT_EP_ATTR *ep_attr,
\r
1886 OUT DAT_SOCK_ADDR6 *ip_addr)
\r
1888 ib_ca_attr_t *p_hca_attr;
\r
1889 dapl_ibal_ca_t *p_ca;
\r
1890 ib_api_status_t ib_status;
\r
1891 ib_hca_port_t port_num;
\r
1893 DAT_SOCK_ADDR6 *p_sock_addr;
\r
1894 DAT_RETURN dat_status = DAT_SUCCESS;
\r
1896 port_num = hca_ptr->port_num;
\r
1898 p_ca = (dapl_ibal_ca_t *) hca_ptr->ib_hca_handle;
\r
1902 dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> DsIQH: invalid handle %p",
\r
1904 return (DAT_INVALID_HANDLE);
\r
1907 ib_status = ib_query_ca ( p_ca->h_ca,
\r
1909 &p_ca->ca_attr_size );
\r
1911 if (ib_status != IB_SUCCESS)
\r
1913 dapl_dbg_log ( DAPL_DBG_TYPE_ERR,
\r
1914 "--> DsIQH: ib_query_ca returned failed status = %s\n",
\r
1915 ib_get_err_str(ib_status));
\r
1916 return (dapl_ib_status_convert (ib_status));
\r
1919 p_hca_attr = p_ca->p_ca_attr;
\r
1921 if (ip_addr != NULL)
\r
1923 p_sock_addr = dapl_os_alloc(sizeof(DAT_SOCK_ADDR6));
\r
1924 if ( !p_sock_addr )
\r
1926 dat_status = DAT_INSUFFICIENT_RESOURCES;
\r
1927 dapl_dbg_log (DAPL_DBG_TYPE_ERR,
\r
1928 " Query Hca alloc Err: status %d\n", dat_status);
\r
1929 return dat_status;
\r
1931 dapl_os_memzero(p_sock_addr, sizeof(DAT_SOCK_ADDR6));
\r
1934 p_hca_attr->p_port_attr[port_num-1].p_gid_table->unicast.prefix;
\r
1937 p_hca_attr->p_port_attr[port_num-1].p_gid_table->unicast.interface_id;
\r
1939 dat_status = dapls_ns_map_ipaddr(hca_ptr, gid,
\r
1940 (DAT_IA_ADDRESS_PTR)p_sock_addr);
\r
1942 if ( dat_status != DAT_SUCCESS )
\r
1944 dapl_dbg_log (DAPL_DBG_TYPE_ERR,
\r
1945 " SA Query for local IP failed= %d\n",
\r
1947 /* what to do next ? */
\r
1951 dapl_dbg_log (DAPL_DBG_TYPE_CM, "SA query GID for IP: ");
\r
1952 dapl_dbg_log ( DAPL_DBG_TYPE_CM, "%0d:%d:%d:%d\n",
\r
1953 (uint8_t)((DAT_IA_ADDRESS_PTR )p_sock_addr)->sa_data[2]&0xff,
\r
1954 (uint8_t)((DAT_IA_ADDRESS_PTR )p_sock_addr)->sa_data[3]&0xff,
\r
1955 (uint8_t)((DAT_IA_ADDRESS_PTR )p_sock_addr)->sa_data[4]&0xff,
\r
1956 (uint8_t)((DAT_IA_ADDRESS_PTR )p_sock_addr)->sa_data[5]&0xff);
\r
1959 hca_ptr->hca_address = *p_sock_addr;
\r
1961 /* if structure address not from our hca_ptr */
\r
1962 if ( ip_addr != &hca_ptr->hca_address )
\r
1964 *ip_addr = *p_sock_addr;
\r
1966 dapl_os_free (p_sock_addr, sizeof(DAT_SOCK_ADDR6));
\r
1967 } /* ip_addr != NULL */
\r
1969 if ( ia_attr != NULL )
\r
1971 dapl_os_memzero (ia_attr->adapter_name,
\r
1972 (int)sizeof(ia_attr->adapter_name ));
\r
1973 dapl_os_memcpy (ia_attr->adapter_name,
\r
1974 DAT_ADAPTER_NAME,
\r
1975 min ( (int)dapl_os_strlen(DAT_ADAPTER_NAME),
\r
1976 (int)(DAT_NAME_MAX_LENGTH)-1 ) );
\r
1978 dapl_os_memzero (ia_attr->vendor_name,
\r
1979 (int)sizeof(ia_attr->vendor_name));
\r
1980 dapl_os_memcpy(ia_attr->vendor_name,
\r
1982 min ((int)dapl_os_strlen(DAT_VENDOR_NAME),
\r
1983 (int)(DAT_NAME_MAX_LENGTH)-1 ));
\r
1986 * this value should be revisited
\r
1987 * It can be set by DAT consumers
\r
1989 ia_attr->ia_address_ptr = (DAT_PVOID)&hca_ptr->hca_address;
\r
1990 ia_attr->hardware_version_major = p_hca_attr->dev_id;
\r
1991 ia_attr->hardware_version_minor = p_hca_attr->revision;
\r
1992 ia_attr->max_eps = p_hca_attr->max_qps;
\r
1993 ia_attr->max_dto_per_ep = p_hca_attr->max_wrs;
\r
1994 ia_attr->max_rdma_read_per_ep = p_hca_attr->max_qp_resp_res;
\r
1995 ia_attr->max_evds = p_hca_attr->max_cqs;
\r
1996 ia_attr->max_evd_qlen = p_hca_attr->max_cqes;
\r
1997 ia_attr->max_iov_segments_per_dto = p_hca_attr->max_sges;
\r
1998 ia_attr->max_lmrs = p_hca_attr->init_regions;
\r
1999 ia_attr->max_lmr_block_size = p_hca_attr->init_region_size;
\r
2000 ia_attr->max_rmrs = p_hca_attr->init_windows;
\r
2001 ia_attr->max_lmr_virtual_address = p_hca_attr->max_addr_handles;
\r
2002 ia_attr->max_rmr_target_address = p_hca_attr->max_addr_handles;
\r
2003 ia_attr->max_pzs = p_hca_attr->max_pds;
\r
2005 * DAT spec does not tie max_mtu_size with IB MTU
\r
2007 ia_attr->max_mtu_size =
\r
2008 dapl_ibal_mtu_table[p_hca_attr->p_port_attr->mtu];
\r
2010 ia_attr->max_mtu_size =
\r
2011 p_hca_attr->p_port_attr->max_msg_size;
\r
2012 ia_attr->max_rdma_size =
\r
2013 p_hca_attr->p_port_attr->max_msg_size;
\r
2014 ia_attr->num_transport_attr = 0;
\r
2015 ia_attr->transport_attr = NULL;
\r
2016 ia_attr->num_vendor_attr = 0;
\r
2017 ia_attr->vendor_attr = NULL;
\r
2019 dapl_dbg_log(DAPL_DBG_TYPE_UTIL, " --> DsIMU_qHCA: (ver=%x) ep %d "
\r
2020 "ep_q %d evd %d evd_q %d\n",
\r
2021 ia_attr->hardware_version_major,
\r
2022 ia_attr->max_eps, ia_attr->max_dto_per_ep,
\r
2023 ia_attr->max_evds, ia_attr->max_evd_qlen );
\r
2024 dapl_dbg_log(DAPL_DBG_TYPE_UTIL,
\r
2025 " --> DsIMU_qHCA: mtu %llu rdma %llu iov %d lmr %d rmr %d"
\r
2027 ia_attr->max_mtu_size, ia_attr->max_rdma_size,
\r
2028 ia_attr->max_iov_segments_per_dto, ia_attr->max_lmrs,
\r
2029 ia_attr->max_rmrs, ia_attr->max_rdma_read_per_ep );
\r
2032 if ( ep_attr != NULL )
\r
2035 * DAT spec does not tie max_mtu_size with IB MTU
\r
2037 ep_attr->max_mtu_size =
\r
2038 dapl_ibal_mtu_table[p_hca_attr->p_port_attr->mtu];
\r
2040 ep_attr->max_mtu_size = p_hca_attr->p_port_attr->max_msg_size;
\r
2041 ep_attr->max_rdma_size = p_hca_attr->p_port_attr->max_msg_size;
\r
2042 ep_attr->max_recv_dtos = p_hca_attr->max_wrs;
\r
2043 ep_attr->max_request_dtos = p_hca_attr->max_wrs;
\r
2044 ep_attr->max_recv_iov = p_hca_attr->max_sges;
\r
2045 ep_attr->max_request_iov = p_hca_attr->max_sges;
\r
2046 ep_attr->max_rdma_read_in = p_hca_attr->max_qp_resp_res;
\r
2047 ep_attr->max_rdma_read_out= p_hca_attr->max_qp_resp_res;
\r
2049 dapl_dbg_log(DAPL_DBG_TYPE_UTIL,
\r
2050 " --> DsIMU_qHCA: msg %llu dto %d iov %d rdma i%d,o%d\n",
\r
2051 ep_attr->max_mtu_size,
\r
2052 ep_attr->max_recv_dtos, ep_attr->max_recv_iov,
\r
2053 ep_attr->max_rdma_read_in, ep_attr->max_rdma_read_out);
\r
2056 return DAT_SUCCESS;
\r
2061 dapls_ib_completion_poll (
\r
2062 IN ib_hca_handle_t hca_handle,
\r
2063 IN ib_cq_handle_t cq_handle,
\r
2064 IN ib_work_completion_t* cqe_ptr)
\r
2066 ib_api_status_t ib_status;
\r
2067 ib_work_completion_t *cqe_filled;
\r
2071 * Now we only poll for one cqe. We can poll for more than
\r
2072 * one completions later for better. However, this requires
\r
2073 * to change the logic in dapl_evd_dto_callback function
\r
2074 * to process more than one completion.
\r
2076 cqe_ptr->p_next = NULL;
\r
2077 cqe_filled = NULL;
\r
2078 if ( !hca_handle )
\r
2080 return DAT_INVALID_HANDLE;
\r
2082 ib_status = ib_poll_cq (cq_handle, &cqe_ptr, &cqe_filled);
\r
2084 if ( ib_status == IB_INVALID_CQ_HANDLE )
\r
2085 ib_status = IB_NOT_FOUND;
\r
2087 return dapl_ib_status_convert (ib_status);
\r
2092 dapls_ib_completion_notify (
\r
2093 IN ib_hca_handle_t hca_handle,
\r
2094 IN ib_cq_handle_t cq_handle,
\r
2095 IN ib_notification_type_t type)
\r
2097 ib_api_status_t ib_status;
\r
2098 DAT_BOOLEAN solic_notify;
\r
2099 if ( !hca_handle )
\r
2101 return DAT_INVALID_HANDLE;
\r
2103 solic_notify = (type == IB_NOTIFY_ON_SOLIC_COMP) ? DAT_TRUE : DAT_FALSE;
\r
2104 ib_status = ib_rearm_cq ( cq_handle, solic_notify );
\r
2106 return dapl_ib_status_convert (ib_status);
\r
2111 dapls_ib_n_completions_notify (
\r
2112 IN ib_hca_handle_t hca_handle,
\r
2113 IN ib_cq_handle_t cq_handle,
\r
2114 IN uint32_t n_cqes)
\r
2116 ib_api_status_t ib_status;
\r
2117 UNREFERENCED_PARAMETER(hca_handle);
\r
2119 ib_status = ib_rearm_n_cq (
\r
2123 return dapl_ib_status_convert (ib_status);
\r
2128 dapls_ib_peek_cq (
\r
2129 IN ib_cq_handle_t cq_handle,
\r
2130 OUT uint32_t* p_n_cqes)
\r
2132 ib_api_status_t ib_status;
\r
2134 ib_status = ib_peek_cq (
\r
2138 return dapl_ib_status_convert (ib_status);
\r
2143 dapls_ib_wait_object_create (
\r
2144 IN cl_waitobj_handle_t* p_cq_wait_obj_handle)
\r
2146 cl_status_t cl_status;
\r
2148 cl_status = cl_waitobj_create (FALSE /* auto_reset */, p_cq_wait_obj_handle);
\r
2150 if (cl_status == CL_SUCCESS)
\r
2151 return DAT_SUCCESS;
\r
2153 return DAT_INTERNAL_ERROR;
\r
2158 dapls_ib_wait_object_destroy (
\r
2159 IN cl_waitobj_handle_t cq_wait_obj_handle)
\r
2161 cl_status_t cl_status;
\r
2163 cl_status = cl_waitobj_destroy (cq_wait_obj_handle);
\r
2165 if (cl_status == CL_SUCCESS)
\r
2166 return DAT_SUCCESS;
\r
2168 return DAT_INTERNAL_ERROR;
\r
2173 dapls_ib_wait_object_wakeup (
\r
2174 IN cl_waitobj_handle_t cq_wait_obj_handle)
\r
2176 cl_status_t cl_status;
\r
2178 cl_status = cl_waitobj_signal (cq_wait_obj_handle);
\r
2180 if (cl_status == CL_SUCCESS)
\r
2181 return DAT_SUCCESS;
\r
2183 return DAT_INTERNAL_ERROR;
\r
2188 dapls_ib_wait_object_wait (
\r
2189 IN cl_waitobj_handle_t cq_wait_obj_handle,
\r
2190 IN uint32_t timeout)
\r
2192 cl_status_t cl_status;
\r
2194 cl_status = cl_waitobj_wait_on (cq_wait_obj_handle, timeout, TRUE );
\r
2196 switch (cl_status)
\r
2199 return DAT_SUCCESS;
\r
2201 dapl_dbg_log (DAPL_DBG_TYPE_ERR, "--> wait_object_wait: cl_timeout: %d\n", timeout);
\r
2202 return DAT_TIMEOUT_EXPIRED;
\r
2203 case CL_NOT_DONE:
\r
2204 return DAT_SUCCESS;
\r
2206 dapl_dbg_log (DAPL_DBG_TYPE_ERR, "--> wait_object_wait: cl_error: %d\n", cl_status);
\r
2207 return DAT_INTERNAL_ERROR;
\r
2213 * dapls_ib_get_async_event
\r
2215 * Translate an asynchronous event type to the DAT event.
\r
2216 * Note that different providers have different sets of errors.
\r
2219 * cause_ptr provider event cause
\r
2222 * async_event DAT mapping of error
\r
2226 * DAT_NOT_IMPLEMENTED Caller is not interested this event
\r
2229 DAT_RETURN dapls_ib_get_async_event(
\r
2230 IN ib_async_event_rec_t *cause_ptr,
\r
2231 OUT DAT_EVENT_NUMBER *async_event)
\r
2233 ib_async_event_t event_id;
\r
2234 DAT_RETURN dat_status;
\r
2236 dat_status = DAT_SUCCESS;
\r
2237 event_id = cause_ptr->code;
\r
2239 dapl_dbg_log (DAPL_DBG_TYPE_WARN,
\r
2240 "--> DsAE: event_id = %d%d\n", event_id);
\r
2242 switch (event_id )
\r
2244 case IB_AE_SQ_ERROR:
\r
2245 case IB_AE_SQ_DRAINED:
\r
2246 case IB_AE_RQ_ERROR:
\r
2248 *async_event = DAT_ASYNC_ERROR_EP_BROKEN;
\r
2254 /* INTERNAL errors */
\r
2255 case IB_AE_QP_FATAL:
\r
2256 case IB_AE_CQ_ERROR:
\r
2257 case IB_AE_LOCAL_FATAL:
\r
2258 case IB_AE_WQ_REQ_ERROR:
\r
2259 case IB_AE_WQ_ACCESS_ERROR:
\r
2261 *async_event = DAT_ASYNC_ERROR_PROVIDER_INTERNAL_ERROR;
\r
2265 /* CATASTROPHIC errors */
\r
2266 case IB_AE_FLOW_CTRL_ERROR:
\r
2267 case IB_AE_BUF_OVERRUN:
\r
2269 *async_event = DAT_ASYNC_ERROR_IA_CATASTROPHIC;
\r
2275 * Errors we are not interested in reporting:
\r
2281 * IB_AE_QP_APM_ERROR
\r
2282 * IB_AE_PORT_ACTIVE
\r
2285 dat_status = DAT_NOT_IMPLEMENTED;
\r
2290 return dat_status;
\r
2294 * dapls_ib_get_dto_status
\r
2296 * Return the DAT status of a DTO operation
\r
2299 * cqe_ptr pointer to completion queue entry
\r
2305 * Value from ib_status_map table above
\r
2308 DAT_DTO_COMPLETION_STATUS
\r
2309 dapls_ib_get_dto_status(
\r
2310 IN ib_work_completion_t *cqe_ptr)
\r
2312 ib_uint32_t ib_status;
\r
2314 ib_status = DAPL_GET_CQE_STATUS (cqe_ptr);
\r
2316 switch (ib_status)
\r
2318 case IB_COMP_ST_SUCCESS :
\r
2319 return DAT_DTO_SUCCESS;
\r
2320 case IB_COMP_ST_LOCAL_LEN_ERR:
\r
2321 return DAT_DTO_ERR_LOCAL_LENGTH;
\r
2322 case IB_COMP_ST_LOCAL_OP_ERR:
\r
2323 return DAT_DTO_ERR_LOCAL_EP;
\r
2324 case IB_COMP_ST_LOCAL_PROTECT_ERR:
\r
2325 return DAT_DTO_ERR_LOCAL_PROTECTION;
\r
2326 case IB_COMP_ST_WR_FLUSHED_ERR:
\r
2327 return DAT_DTO_ERR_FLUSHED;
\r
2328 case IB_COMP_ST_MW_BIND_ERR:
\r
2329 return DAT_RMR_OPERATION_FAILED;
\r
2330 case IB_COMP_ST_REM_ACC_ERR:
\r
2331 return DAT_DTO_ERR_REMOTE_ACCESS;
\r
2332 case IB_COMP_ST_REM_OP_ERR:
\r
2333 return DAT_DTO_ERR_REMOTE_RESPONDER;
\r
2334 case IB_COMP_ST_RNR_COUNTER:
\r
2335 return DAT_DTO_ERR_RECEIVER_NOT_READY;
\r
2336 case IB_COMP_ST_TRANSP_COUNTER:
\r
2337 return DAT_DTO_ERR_TRANSPORT;
\r
2338 case IB_COMP_ST_REM_REQ_ERR:
\r
2339 return DAT_DTO_ERR_REMOTE_RESPONDER;
\r
2340 case IB_COMP_ST_BAD_RESPONSE_ERR:
\r
2341 return DAT_DTO_ERR_BAD_RESPONSE;
\r
2342 case IB_COMP_ST_EE_STATE_ERR:
\r
2343 case IB_COMP_ST_EE_CTX_NO_ERR:
\r
2344 return DAT_DTO_ERR_TRANSPORT;
\r
2346 return DAT_DTO_FAILURE;
\r
2352 * Map all IBAPI DTO completion codes to the DAT equivelent.
\r
2354 * dapls_ib_get_dat_event
\r
2356 * Return a DAT connection event given a provider CM event.
\r
2358 * N.B. Some architectures combine async and CM events into a
\r
2359 * generic async event. In that case, dapls_ib_get_dat_event()
\r
2360 * and dapls_ib_get_async_event() should be entry points that
\r
2361 * call into a common routine.
\r
2364 * ib_cm_event event provided to the dapl callback routine
\r
2365 * active switch indicating active or passive connection
\r
2371 * DAT_EVENT_NUMBER of translated provider value
\r
2375 dapls_ib_get_dat_event (
\r
2376 IN const ib_cm_events_t ib_cm_event,
\r
2377 IN DAT_BOOLEAN active)
\r
2379 DAT_EVENT_NUMBER dat_event_num = 0;
\r
2380 UNREFERENCED_PARAMETER (active);
\r
2382 switch ( ib_cm_event)
\r
2384 case IB_CME_CONNECTED:
\r
2385 dat_event_num = DAT_CONNECTION_EVENT_ESTABLISHED;
\r
2387 case IB_CME_DISCONNECTED:
\r
2388 dat_event_num = DAT_CONNECTION_EVENT_DISCONNECTED;
\r
2390 case IB_CME_DISCONNECTED_ON_LINK_DOWN:
\r
2391 dat_event_num = DAT_CONNECTION_EVENT_DISCONNECTED;
\r
2393 case IB_CME_CONNECTION_REQUEST_PENDING:
\r
2394 dat_event_num = DAT_CONNECTION_REQUEST_EVENT;
\r
2396 case IB_CME_CONNECTION_REQUEST_PENDING_PRIVATE_DATA:
\r
2397 dat_event_num = DAT_CONNECTION_REQUEST_EVENT;
\r
2399 case IB_CME_DESTINATION_REJECT:
\r
2400 dat_event_num = DAT_CONNECTION_EVENT_NON_PEER_REJECTED;
\r
2402 case IB_CME_DESTINATION_REJECT_PRIVATE_DATA:
\r
2403 dat_event_num = DAT_CONNECTION_EVENT_PEER_REJECTED;
\r
2405 case IB_CME_DESTINATION_UNREACHABLE:
\r
2406 dat_event_num = DAT_CONNECTION_EVENT_UNREACHABLE;
\r
2408 case IB_CME_TOO_MANY_CONNECTION_REQUESTS:
\r
2409 dat_event_num = DAT_CONNECTION_EVENT_NON_PEER_REJECTED;
\r
2411 case IB_CME_LOCAL_FAILURE:
\r
2412 dat_event_num = DAT_CONNECTION_EVENT_BROKEN;
\r
2414 case IB_CME_REPLY_RECEIVED:
\r
2415 case IB_CME_REPLY_RECEIVED_PRIVATE_DATA:
\r
2419 dapl_dbg_log (DAPL_DBG_TYPE_CM,
\r
2420 " dapls_ib_get_dat_event: event translation: (%s) "
\r
2421 "ib_event 0x%x dat_event 0x%x\n",
\r
2422 active ? "active" : "passive",
\r
2426 return dat_event_num;
\r
2431 * dapls_ib_get_dat_event
\r
2433 * Return a DAT connection event given a provider CM event.
\r
2435 * N.B. Some architectures combine async and CM events into a
\r
2436 * generic async event. In that case, dapls_ib_get_cm_event()
\r
2437 * and dapls_ib_get_async_event() should be entry points that
\r
2438 * call into a common routine.
\r
2440 * WARNING: In this implementation, there are multiple CM
\r
2441 * events that map to a single DAT event. Be very careful
\r
2442 * with provider routines that depend on this reverse mapping,
\r
2443 * they may have to accomodate more CM events than they
\r
2444 * 'naturally' would.
\r
2447 * dat_event_num DAT event we need an equivelent CM event for
\r
2453 * ib_cm_event of translated DAPL value
\r
2456 dapls_ib_get_cm_event (
\r
2457 IN DAT_EVENT_NUMBER dat_event_num)
\r
2459 ib_cm_events_t ib_cm_event = 0;
\r
2461 switch (dat_event_num)
\r
2463 case DAT_CONNECTION_EVENT_ESTABLISHED:
\r
2464 ib_cm_event = IB_CME_CONNECTED;
\r
2466 case DAT_CONNECTION_EVENT_DISCONNECTED:
\r
2467 ib_cm_event = IB_CME_DISCONNECTED;
\r
2469 case DAT_CONNECTION_REQUEST_EVENT:
\r
2470 ib_cm_event = IB_CME_CONNECTION_REQUEST_PENDING;
\r
2472 case DAT_CONNECTION_EVENT_NON_PEER_REJECTED:
\r
2473 ib_cm_event = IB_CME_DESTINATION_REJECT;
\r
2475 case DAT_CONNECTION_EVENT_PEER_REJECTED:
\r
2476 ib_cm_event = IB_CME_DESTINATION_REJECT_PRIVATE_DATA;
\r
2478 case DAT_CONNECTION_EVENT_UNREACHABLE:
\r
2479 ib_cm_event = IB_CME_DESTINATION_UNREACHABLE;
\r
2481 case DAT_CONNECTION_EVENT_BROKEN:
\r
2482 ib_cm_event = IB_CME_LOCAL_FAILURE;
\r
2488 return ib_cm_event;
\r
2493 * Local variables:
\r
2494 * c-indent-level: 4
\r
2495 * c-basic-offset: 4
\r