\r
#include "al.h"\r
#include "al_ca.h"\r
-#include "al_cm_shared.h"\r
+#include "al_cm_cep.h"\r
#include "al_common.h"\r
#include "al_debug.h"\r
#include "al_mad_pool.h"\r
}\r
\r
cl_spinlock_release( &p_obj->lock );\r
+\r
+ /* Cleanup any left-over connections. */\r
+ al_cep_cleanup_al( h_al );\r
}\r
\r
\r
}\r
\r
\r
-\r
-static void\r
-__free_conns(\r
- IN const ib_al_handle_t h_al )\r
-{\r
- cl_list_item_t *p_list_item;\r
- ib_cm_handle_t h_conn;\r
-\r
- /*\r
- * Report any outstanding connections left lying around. We should\r
- * never enter the loop below if the code is written correctly.\r
- */\r
- for( p_list_item = cl_qlist_head( &h_al->conn_list );\r
- p_list_item != cl_qlist_end( &h_al->conn_list );\r
- p_list_item = cl_qlist_head( &h_al->conn_list ) )\r
- {\r
- CL_ASSERT( !p_list_item );\r
-\r
- h_conn = PARENT_STRUCT( p_list_item, al_conn_t, al_item );\r
-\r
- /* Release the connection object, so the CM can clean-up properly. */\r
- cm_cleanup_conn( h_conn );\r
- }\r
-}\r
-\r
-\r
-\r
void\r
free_al(\r
IN al_obj_t *p_obj )\r
/* Free any MADs not returned by the user. */\r
__free_mads( h_al );\r
\r
- /* Cleanup any left-over connections. */\r
- __free_conns( h_al );\r
-\r
#ifdef CL_KERNEL\r
cl_vector_destroy( &h_al->hdl_vector );\r
#endif\r
}\r
\r
\r
-\r
-\r
ib_api_status_t\r
ib_query_ca_by_guid(\r
IN const ib_al_handle_t h_al,\r
\r
\r
\r
-void\r
-al_insert_conn(\r
- IN const ib_al_handle_t h_al,\r
- IN const ib_cm_handle_t h_conn )\r
-{\r
- ref_al_obj( &h_al->obj );\r
- cl_spinlock_acquire( &h_al->obj.lock );\r
-\r
- h_conn->h_al = h_al;\r
- cl_qlist_insert_tail( &h_al->conn_list, &h_conn->al_item );\r
-#ifdef CL_KERNEL\r
- h_conn->hdl = al_hdl_insert( h_al, h_conn, AL_OBJ_TYPE_H_CONN );\r
-#endif\r
-\r
- cl_spinlock_release( &h_al->obj.lock );\r
-}\r
-\r
-\r
-\r
-void\r
-al_remove_conn(\r
- IN const ib_cm_handle_t h_conn )\r
-{\r
- cl_spinlock_acquire( &h_conn->h_al->obj.lock );\r
- cl_qlist_remove_item( &h_conn->h_al->conn_list, &h_conn->al_item );\r
-#ifdef CL_KERNEL\r
- al_hdl_free( h_conn->h_al, h_conn->hdl );\r
-#endif\r
- cl_spinlock_release( &h_conn->h_al->obj.lock );\r
-\r
- deref_al_obj( &h_conn->h_al->obj );\r
-\r
- h_conn->h_al = NULL;\r
-#ifdef CL_KERNEL\r
- h_conn->hdl = AL_INVALID_HANDLE;\r
-#endif\r
-}\r
-\r
-\r
-\r
void\r
al_insert_mad(\r
IN const ib_al_handle_t h_al,\r
\r
cl_qlist_t key_list;\r
cl_qlist_t query_list;\r
- cl_qlist_t conn_list;\r
+ cl_qlist_t cep_list;\r
\r
#ifdef CL_KERNEL\r
/* Handle manager is only needed in the kernel. */\r
#include "al_mgr.h"\r
#include "al_pnp.h"\r
#include "al_qp.h"\r
-\r
-#if defined(CL_KERNEL)\r
-#include "al_cm.h"\r
-#endif\r
#include "ib_common.h"\r
\r
\r
case IB_AE_QP_COMM:\r
case IB_AE_QP_APM:\r
case IB_AE_QP_APM_ERROR:\r
-#if defined(CL_KERNEL)\r
- cm_async_event_cb( &p_event_item->event_rec );\r
-#endif\r
- /* Fall through next case. */\r
-\r
case IB_AE_QP_FATAL:\r
case IB_AE_RQ_ERROR:\r
case IB_AE_SQ_ERROR:\r
--- /dev/null
+/*\r
+ * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.\r
+ *\r
+ * This software is available to you under the OpenIB.org BSD license\r
+ * below:\r
+ *\r
+ * Redistribution and use in source and binary forms, with or\r
+ * without modification, are permitted provided that the following\r
+ * conditions are met:\r
+ *\r
+ * - Redistributions of source code must retain the above\r
+ * copyright notice, this list of conditions and the following\r
+ * disclaimer.\r
+ *\r
+ * - Redistributions in binary form must reproduce the above\r
+ * copyright notice, this list of conditions and the following\r
+ * disclaimer in the documentation and/or other materials\r
+ * provided with the distribution.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+ * SOFTWARE.\r
+ *\r
+ * $Id$\r
+ */\r
+\r
+\r
+#pragma once\r
+\r
+#ifndef _AL_CM_CEP_H_\r
+#define _AL_CM_CEP_H_\r
+\r
+\r
+#include <iba/ib_al.h>\r
+#include "al_common.h"\r
+\r
+\r
+#define CEP_EVENT_TIMEOUT 0x80000000\r
+#define CEP_EVENT_RECV 0x40000000\r
+#define CEP_EVENT_REQ 0x00000001\r
+#define CEP_EVENT_REP 0x00000002\r
+#define CEP_EVENT_RTU 0x00000004\r
+#define CEP_EVENT_DREQ 0x00000008\r
+#define CEP_EVENT_DREP 0x00000010\r
+#define CEP_EVENT_MRA 0x00000020\r
+#define CEP_EVENT_REJ 0x00000040\r
+#define CEP_EVENT_LAP 0x00000080\r
+#define CEP_EVENT_APR 0x00000100\r
+#define CEP_EVENT_SIDR 0x00800000\r
+\r
+\r
+#define AL_INVALID_CID 0xFFFFFFFF\r
+\r
+\r
+typedef void\r
+(*al_pfn_cep_cb_t)(\r
+ IN const ib_al_handle_t h_al,\r
+ IN ib_cep_t* const p_cep );\r
+/* PARAMETERS\r
+* h_al\r
+* [in] Handle to the AL instance to pass into the al_cep_poll call.\r
+*\r
+* p_cep\r
+* [in] Pointer to an ib_cep_t structure containing the CID and context\r
+* for the CEP on which the event occured. The CID should be passed\r
+* into the al_cep_poll call.\r
+*\r
+* RETURN VALUES:\r
+* This function does not return a value.\r
+*\r
+* NOTES\r
+* The callback is invoked at DISPATCH_LEVEL.\r
+*\r
+* Recipients of the callback are expected to call al_cep_poll to retrieve\r
+* event specific details until al_cep_poll returns IB_NOT_DONE. This may\r
+* be done in a different thread context.\r
+*********/\r
+\r
+\r
+ib_api_status_t\r
+create_cep_mgr(\r
+ IN al_obj_t* const p_parent_obj );\r
+\r
+\r
+void\r
+al_cep_cleanup_al(\r
+ IN const ib_al_handle_t h_al );\r
+\r
+\r
+ib_api_status_t\r
+al_create_cep(\r
+ IN ib_al_handle_t h_al,\r
+ IN al_pfn_cep_cb_t pfn_cb,\r
+ IN void *context,\r
+ OUT net32_t* const p_cid );\r
+/*\r
+* NOTES\r
+* This function may be invoked at DISPATCH_LEVEL\r
+*\r
+* The pfn_cb parameter may be NULL in the kernel if using IRPs for\r
+* event notification.\r
+*********/\r
+\r
+\r
+/* Destruction is asynchronous. */\r
+ib_api_status_t\r
+al_destroy_cep(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN ib_pfn_destroy_cb_t pfn_destroy_cb );\r
+/*\r
+* NOTES\r
+* Destruction is synchronous.\r
+* Clients must not invoke this function from a CEP callback, but should\r
+* instead return IB_CANCELLED or other appropriate value.\r
+*\r
+* The reason parameter is passed as input to KeWaitForSingleObject.\r
+* The user-mode proxy sets this to UserRequest. Kernel clients should set\r
+* this to Executive.\r
+*********/\r
+\r
+ib_api_status_t\r
+al_cep_listen(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN ib_cep_listen_t* const p_listen_info );\r
+\r
+\r
+ib_api_status_t\r
+al_cep_pre_req(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN const ib_cm_req_t* const p_cm_req,\r
+ OUT ib_qp_mod_t* const p_init );\r
+\r
+\r
+ib_api_status_t\r
+al_cep_send_req(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid );\r
+\r
+\r
+ib_api_status_t\r
+al_cep_pre_rep(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN void *context,\r
+ IN const ib_cm_rep_t* const p_cm_rep,\r
+ OUT ib_qp_mod_t* const p_init );\r
+\r
+\r
+ib_api_status_t\r
+al_cep_send_rep(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid );\r
+\r
+\r
+ib_api_status_t\r
+al_cep_get_rtr_attr(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ OUT ib_qp_mod_t* const p_rtr );\r
+\r
+\r
+ib_api_status_t\r
+al_cep_get_rts_attr(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ OUT ib_qp_mod_t* const p_rts );\r
+\r
+\r
+ib_api_status_t\r
+al_cep_rtu(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN const uint8_t* p_pdata OPTIONAL,\r
+ IN uint8_t pdata_len );\r
+\r
+\r
+ib_api_status_t\r
+al_cep_rej(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN ib_rej_status_t rej_status,\r
+ IN const uint8_t* const p_ari,\r
+ IN uint8_t ari_len,\r
+ IN const uint8_t* const p_pdata,\r
+ IN uint8_t pdata_len );\r
+\r
+\r
+ib_api_status_t\r
+al_cep_mra(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN const ib_cm_mra_t* const p_cm_mra );\r
+\r
+\r
+ib_api_status_t\r
+al_cep_lap(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN const ib_cm_lap_t* const p_cm_lap );\r
+\r
+\r
+ib_api_status_t\r
+al_cep_pre_apr(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN const ib_cm_apr_t* const p_cm_apr,\r
+ OUT ib_qp_mod_t* const p_apr );\r
+\r
+\r
+ib_api_status_t\r
+al_cep_send_apr(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid );\r
+\r
+\r
+ib_api_status_t\r
+al_cep_dreq(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN const uint8_t* const p_pdata OPTIONAL,\r
+ IN const uint8_t pdata_len );\r
+\r
+\r
+ib_api_status_t\r
+al_cep_drep(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN const ib_cm_drep_t* const p_cm_drep );\r
+\r
+\r
+ib_api_status_t\r
+al_cep_get_timewait(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ OUT uint64_t* const p_timewait_us );\r
+\r
+\r
+ib_api_status_t\r
+al_cep_migrate(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid );\r
+\r
+\r
+ib_api_status_t\r
+al_cep_established(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid );\r
+\r
+\r
+ib_api_status_t\r
+al_cep_poll(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN OUT ib_cep_t* const p_new_cep,\r
+ OUT ib_mad_element_t** const pp_mad );\r
+\r
+\r
+#ifdef CL_KERNEL\r
+NTSTATUS\r
+al_cep_queue_irp(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN IRP* const p_irp );\r
+#endif /* CL_KERNEL */\r
+\r
+\r
+/****s* Access Layer/al_cep_sreq_t\r
+* NAME\r
+* al_cep_sreq_t\r
+*\r
+* DESCRIPTION\r
+* Connection request information used to establish a new connection.\r
+*\r
+* SYNOPSIS\r
+*/\r
+typedef struct _al_cep_sreq\r
+{\r
+ ib_net64_t svc_id;\r
+\r
+ ib_path_rec_t* __ptr64 p_path;\r
+\r
+ const uint8_t* __ptr64 p_pdata;\r
+ uint8_t pdata_len;\r
+\r
+ uint8_t max_cm_retries;\r
+ ib_net16_t pkey;\r
+ uint32_t timeout_ms;\r
+\r
+} al_cep_sreq_t;\r
+/*\r
+* FIELDS\r
+* svc_id\r
+* The ID of the remote service to which the SIDR request is\r
+* being made.\r
+*\r
+* p_path\r
+* Path information over which to send the request.\r
+*\r
+* p_pdata\r
+* Optional user-defined private data sent as part of the SIDR request.\r
+*\r
+* pdata_len\r
+* Defines the size of the user-defined private data.\r
+*\r
+* max_cm_retries\r
+* The maximum number of times that either CM should\r
+* resend a SIDR message.\r
+*\r
+* timeout_ms\r
+* Timeout value in milli-seconds for the SIDR REQ to expire. The CM will\r
+* add twice packet lifetime to this value to determine the actual timeout\r
+* value used.\r
+*\r
+* pkey\r
+* pkey to be used as part of the request.\r
+*\r
+* SEE ALSO\r
+* al_cep_sreq\r
+*****/\r
+\r
+ib_api_status_t\r
+al_cep_sreq(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN const al_cep_sreq_t* const p_sreq );\r
+\r
+\r
+/****s* Access Layer/al_cep_srep_t\r
+* NAME\r
+* al_cep_srep_t\r
+*\r
+* DESCRIPTION\r
+* SIDR reply information.\r
+*\r
+* SYNOPSIS\r
+*/\r
+typedef struct _al_cep_srep\r
+{\r
+ net32_t qp_num;\r
+ net32_t qkey;\r
+\r
+ const uint8_t* __ptr64 p_pdata;\r
+ const void* __ptr64 p_info;\r
+\r
+ uint8_t pdata_len;\r
+ uint8_t info_len;\r
+\r
+ ib_sidr_status_t status;\r
+\r
+} al_cep_srep_t;\r
+/*\r
+* FIELDS\r
+* qp_num\r
+* The number of the queue pair on which the requested service\r
+* is supported.\r
+*\r
+* qp_key\r
+* The QKEY of the returned queue pair.\r
+*\r
+* p_pdata\r
+* Optional user-defined private data sent as part of the SIDR reply.\r
+*\r
+* p_info\r
+* Optional "additonal information" sent as part of the SIDR reply.\r
+*\r
+* pdata_len\r
+* Size of the user-defined private data.\r
+*\r
+* info_len\r
+* Size of the "additional information".\r
+*\r
+* status\r
+* sidr status value returned back to a previously received REQ.\r
+*\r
+* SEE ALSO\r
+* al_cep_srep\r
+*****/\r
+\r
+ib_api_status_t\r
+al_cep_srep(\r
+ IN ib_al_handle_t h_al,\r
+ IN net32_t cid,\r
+ IN const al_cep_srep_t* const p_sreq );\r
+\r
+\r
+\r
+\r
+/*\r
+ * Return the local ACK timeout value based on the given packet lifetime\r
+ * and target ACK delay. Both input values are assumed to be in the form\r
+ * 4.096 x 2 ^ input.\r
+ */\r
+#define MAX_LOCAL_ACK_TIMEOUT 0x1F /* limited to 5 bits */\r
+\r
+inline uint8_t\r
+calc_lcl_ack_timeout(\r
+ IN const uint8_t round_trip_time,\r
+ IN const uint8_t target_ack_delay )\r
+{\r
+ uint64_t timeout;\r
+ uint8_t local_ack_timeout;\r
+\r
+ if( !target_ack_delay )\r
+ {\r
+ if( round_trip_time > MAX_LOCAL_ACK_TIMEOUT )\r
+ return MAX_LOCAL_ACK_TIMEOUT;\r
+ else\r
+ return round_trip_time;\r
+ }\r
+\r
+ /*\r
+ * Since both input and the output values are in the same form, we\r
+ * can ignore the 4.096 portion by dividing it out.\r
+ */\r
+\r
+ /* The input parameter is the round trip time. */\r
+ timeout = (uint64_t)1 << round_trip_time;\r
+\r
+ /* Add in the target ack delay. */\r
+ if( target_ack_delay )\r
+ timeout += (uint64_t)1 << target_ack_delay;\r
+\r
+ /* Calculate the local ACK timeout. */\r
+ local_ack_timeout = 1;\r
+ while( (1ui64 << local_ack_timeout) <= timeout )\r
+ {\r
+ local_ack_timeout++;\r
+\r
+ /* Only 5-bits are valid. */\r
+ if( local_ack_timeout > MAX_LOCAL_ACK_TIMEOUT )\r
+ return MAX_LOCAL_ACK_TIMEOUT;\r
+ }\r
+\r
+ return local_ack_timeout;\r
+}\r
+\r
+#endif /* _AL_CM_CEP_H_ */\r
uint8_t pdata[IB_REQ_PDATA_SIZE];\r
\r
} PACK_SUFFIX mad_cm_req_t;\r
+C_ASSERT( sizeof(mad_cm_req_t) == MAD_BLOCK_SIZE );\r
\r
#include <complib/cl_packoff.h>\r
\r
p_req->offset51 = (retries << 4);\r
}\r
\r
-static inline void\r
+static inline ib_api_status_t\r
conn_req_set_pdata(\r
IN const uint8_t* const p_data OPTIONAL,\r
IN const uint8_t data_len,\r
\r
if( p_data )\r
{\r
+ if( data_len > IB_REQ_PDATA_SIZE )\r
+ return IB_INVALID_SETTING;\r
+\r
cl_memcpy( p_req->pdata, p_data, data_len );\r
- cl_memclr( p_req->pdata + data_len,\r
- IB_REQ_PDATA_SIZE - data_len );\r
+ cl_memclr( p_req->pdata + data_len, IB_REQ_PDATA_SIZE - data_len );\r
}\r
else\r
{\r
cl_memclr( p_req->pdata, IB_REQ_PDATA_SIZE );\r
}\r
+ return IB_SUCCESS;\r
}\r
\r
static inline void\r
IN OUT req_path_info_t* const p_path )\r
{\r
if( subn_lcl )\r
- p_path->offset42 = (p_path->offset42 & 0xF0);\r
- else\r
p_path->offset42 = ((p_path->offset42 & 0xF0) | 0x08);\r
+ else\r
+ p_path->offset42 = (p_path->offset42 & 0xF0);\r
}\r
\r
static inline uint8_t\r
uint8_t pdata[IB_MRA_PDATA_SIZE];\r
\r
} PACK_SUFFIX mad_cm_mra_t;\r
+C_ASSERT( sizeof(mad_cm_mra_t) == MAD_BLOCK_SIZE );\r
\r
#include <complib/cl_packoff.h>\r
\r
IN const uint8_t data_len,\r
IN OUT mad_cm_mra_t* const p_mra )\r
{\r
- if( p_data && data_len > IB_MRA_PDATA_SIZE )\r
- return IB_INVALID_SETTING;\r
-\r
if( p_data )\r
{\r
+ if( data_len > IB_MRA_PDATA_SIZE )\r
+ return IB_INVALID_SETTING;\r
+\r
cl_memcpy( p_mra->pdata, p_data, data_len );\r
cl_memclr( p_mra->pdata + data_len, IB_MRA_PDATA_SIZE - data_len );\r
}\r
uint8_t pdata[IB_REJ_PDATA_SIZE];\r
\r
} PACK_SUFFIX mad_cm_rej_t;\r
+C_ASSERT( sizeof(mad_cm_rej_t) == MAD_BLOCK_SIZE );\r
\r
#include <complib/cl_packoff.h>\r
\r
IN const uint8_t data_len,\r
IN OUT mad_cm_rej_t* const p_rej )\r
{\r
- if( p_data && data_len > IB_REJ_PDATA_SIZE )\r
- return IB_INVALID_PARAMETER;\r
-\r
if( p_data )\r
{\r
+ if( data_len > IB_REJ_PDATA_SIZE )\r
+ return IB_INVALID_SETTING;\r
+\r
cl_memcpy( p_rej->pdata, p_data, data_len );\r
- cl_memclr( p_rej->pdata + data_len,\r
- IB_REJ_PDATA_SIZE - data_len );\r
+ cl_memclr( p_rej->pdata + data_len, IB_REJ_PDATA_SIZE - data_len );\r
}\r
else\r
{\r
uint8_t pdata[IB_REP_PDATA_SIZE];\r
\r
} PACK_SUFFIX mad_cm_rep_t;\r
+C_ASSERT( sizeof(mad_cm_rep_t) == MAD_BLOCK_SIZE );\r
\r
#include <complib/cl_packoff.h>\r
\r
p_rep->offset27 = (rnr_retry_cnt << 5);\r
}\r
\r
-static inline void\r
+static inline ib_api_status_t\r
conn_rep_set_pdata(\r
IN const uint8_t* const p_data OPTIONAL,\r
- IN const uint8_t rep_len,\r
+ IN const uint8_t data_len,\r
IN OUT mad_cm_rep_t* const p_rep )\r
{\r
CL_ASSERT( p_rep->hdr.class_ver == IB_MCLASS_CM_VER_2 );\r
+\r
if( p_data )\r
{\r
- cl_memcpy( p_rep->pdata, p_data, rep_len );\r
- cl_memclr( p_rep->pdata + rep_len,\r
- IB_REP_PDATA_SIZE - rep_len );\r
+ if( data_len > IB_REP_PDATA_SIZE )\r
+ return IB_INVALID_SETTING;\r
+\r
+ cl_memcpy( p_rep->pdata, p_data, data_len );\r
+ cl_memclr( p_rep->pdata + data_len, IB_REP_PDATA_SIZE - data_len );\r
}\r
else\r
{\r
cl_memclr( p_rep->pdata, IB_REP_PDATA_SIZE );\r
}\r
+ return IB_SUCCESS;\r
}\r
\r
static inline void\r
uint8_t pdata[IB_RTU_PDATA_SIZE];\r
\r
} PACK_SUFFIX mad_cm_rtu_t;\r
+C_ASSERT( sizeof(mad_cm_rtu_t) == MAD_BLOCK_SIZE );\r
\r
#include <complib/cl_packoff.h>\r
\r
-static inline void\r
+static inline ib_api_status_t\r
conn_rtu_set_pdata(\r
IN const uint8_t* const p_data OPTIONAL,\r
- IN const uint8_t rtu_len,\r
+ IN const uint8_t data_len,\r
IN OUT mad_cm_rtu_t* const p_rtu )\r
{\r
if( p_data )\r
{\r
- cl_memcpy( p_rtu->pdata, p_data, rtu_len );\r
- cl_memclr( p_rtu->pdata + rtu_len, IB_RTU_PDATA_SIZE - rtu_len );\r
+ if( data_len > IB_RTU_PDATA_SIZE )\r
+ return IB_INVALID_SETTING;\r
+\r
+ cl_memcpy( p_rtu->pdata, p_data, data_len );\r
+ cl_memclr( p_rtu->pdata + data_len, IB_RTU_PDATA_SIZE - data_len );\r
}\r
else\r
{\r
cl_memclr( p_rtu->pdata, IB_RTU_PDATA_SIZE );\r
}\r
+ return IB_SUCCESS;\r
}\r
\r
/* DREQ */\r
uint8_t pdata[IB_DREQ_PDATA_SIZE];\r
\r
} PACK_SUFFIX mad_cm_dreq_t;\r
+C_ASSERT( sizeof(mad_cm_dreq_t) == MAD_BLOCK_SIZE );\r
\r
#include <complib/cl_packoff.h>\r
\r
__set_low24( &p_dreq->offset8, qpn );\r
}\r
\r
-static inline void\r
+static inline ib_api_status_t\r
conn_dreq_set_pdata(\r
IN const uint8_t* const p_data OPTIONAL,\r
- IN const uint8_t dreq_len,\r
+ IN const uint8_t data_len,\r
IN OUT mad_cm_dreq_t* const p_dreq )\r
{\r
if( p_data )\r
{\r
- cl_memcpy( p_dreq->pdata, p_data, dreq_len );\r
- cl_memclr( p_dreq->pdata + dreq_len,\r
- IB_DREQ_PDATA_SIZE - dreq_len );\r
+ if( data_len > IB_DREQ_PDATA_SIZE )\r
+ return IB_INVALID_SETTING;\r
+\r
+ cl_memcpy( p_dreq->pdata, p_data, data_len );\r
+ cl_memclr( p_dreq->pdata + data_len, IB_DREQ_PDATA_SIZE - data_len );\r
}\r
else\r
{\r
cl_memclr( p_dreq->pdata, IB_DREQ_PDATA_SIZE );\r
}\r
+ return IB_SUCCESS;\r
}\r
\r
static inline void\r
uint8_t pdata[IB_DREP_PDATA_SIZE];\r
\r
} PACK_SUFFIX mad_cm_drep_t;\r
+C_ASSERT( sizeof(mad_cm_drep_t) == MAD_BLOCK_SIZE );\r
\r
#include <complib/cl_packoff.h>\r
\r
-static inline void\r
+static inline ib_api_status_t\r
conn_drep_set_pdata(\r
IN const uint8_t* const p_data OPTIONAL,\r
- IN const uint8_t drep_len,\r
+ IN const uint8_t data_len,\r
IN OUT mad_cm_drep_t* const p_drep )\r
{\r
if( p_data )\r
{\r
- cl_memcpy( p_drep->pdata, p_data, drep_len );\r
- cl_memclr( p_drep->pdata + drep_len,\r
- IB_DREP_PDATA_SIZE - drep_len );\r
+ if( data_len > IB_DREP_PDATA_SIZE )\r
+ return IB_INVALID_SETTING;\r
+\r
+ cl_memcpy( p_drep->pdata, p_data, data_len );\r
+ cl_memclr( p_drep->pdata + data_len, IB_DREP_PDATA_SIZE - data_len );\r
}\r
else\r
{\r
cl_memclr( p_drep->pdata, IB_DREP_PDATA_SIZE );\r
}\r
+ return IB_SUCCESS;\r
}\r
\r
\r
uint8_t pdata[IB_LAP_PDATA_SIZE];\r
\r
} PACK_SUFFIX mad_cm_lap_t;\r
+C_ASSERT( sizeof(mad_cm_lap_t) == MAD_BLOCK_SIZE );\r
\r
#include <complib/cl_packoff.h>\r
\r
IN OUT mad_cm_lap_t* const p_lap )\r
{\r
CL_ASSERT( p_lap->hdr.class_ver == IB_MCLASS_CM_VER_2 );\r
- if( p_data && data_len > IB_LAP_PDATA_SIZE )\r
- return IB_INVALID_PARAMETER;\r
\r
cl_memclr( p_lap->pdata, IB_LAP_PDATA_SIZE );\r
if( p_data )\r
{\r
+ if( data_len > IB_LAP_PDATA_SIZE )\r
+ return IB_INVALID_PARAMETER;\r
+\r
cl_memcpy( p_lap->pdata, p_data, data_len );\r
cl_memclr( p_lap->pdata + data_len,\r
IB_LAP_PDATA_SIZE - data_len );\r
uint8_t pdata[IB_APR_PDATA_SIZE];\r
\r
} PACK_SUFFIX mad_cm_apr_t;\r
+C_ASSERT( sizeof(mad_cm_apr_t) == MAD_BLOCK_SIZE );\r
\r
#include <complib/cl_packoff.h>\r
\r
IN OUT mad_cm_apr_t* const p_apr )\r
{\r
CL_ASSERT( p_apr->hdr.class_ver == IB_MCLASS_CM_VER_2 );\r
- if( p_data && ( data_len > IB_APR_PDATA_SIZE ) )\r
- return IB_INVALID_PARAMETER;\r
-\r
if( p_data )\r
{\r
+ if( data_len > IB_APR_PDATA_SIZE )\r
+ return IB_INVALID_PARAMETER;\r
+\r
cl_memcpy( p_apr->pdata, p_data, data_len );\r
cl_memclr( p_apr->pdata + data_len,\r
IB_APR_PDATA_SIZE - data_len );\r
--- /dev/null
+/*\r
+ * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.\r
+ *\r
+ * This software is available to you under the OpenIB.org BSD license\r
+ * below:\r
+ *\r
+ * Redistribution and use in source and binary forms, with or\r
+ * without modification, are permitted provided that the following\r
+ * conditions are met:\r
+ *\r
+ * - Redistributions of source code must retain the above\r
+ * copyright notice, this list of conditions and the following\r
+ * disclaimer.\r
+ *\r
+ * - Redistributions in binary form must reproduce the above\r
+ * copyright notice, this list of conditions and the following\r
+ * disclaimer in the documentation and/or other materials\r
+ * provided with the distribution.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+ * SOFTWARE.\r
+ *\r
+ * $Id$\r
+ */\r
+\r
+\r
+#include <iba/ib_al.h>\r
+#include "al.h"\r
+#include "al_qp.h"\r
+#include "al_cm_cep.h"\r
+#include "al_cm_conn.h"\r
+#include "al_cm_sidr.h"\r
+#include "al_mgr.h"\r
+#include "al_debug.h"\r
+\r
+\r
+typedef struct _al_listen\r
+{\r
+ al_obj_t obj;\r
+ net32_t cid;\r
+\r
+ ib_pfn_cm_req_cb_t pfn_cm_req_cb;\r
+\r
+ /* valid for ud qp_type only */\r
+ const void* __ptr64 sidr_context;\r
+\r
+} al_listen_t;\r
+\r
+\r
+#ifdef CL_KERNEL\r
+\r
+/*\r
+ * Structure for queuing received MADs to the asynchronous processing\r
+ * manager.\r
+ */\r
+typedef struct _cep_async_mad\r
+{\r
+ cl_async_proc_item_t item;\r
+ ib_al_handle_t h_al;\r
+ ib_cep_t cep;\r
+\r
+} cep_async_mad_t;\r
+\r
+#endif /* CL_KERNEL */\r
+\r
+\r
+/*\r
+ * Transition the QP to the error state to flush all oustanding work\r
+ * requests and sets the timewait time. This function may be called\r
+ * when destroying the QP in order to flush all work requests, so we\r
+ * cannot call through the main API, or the call will fail since the\r
+ * QP is no longer in the initialize state.\r
+ */\r
+static void\r
+__cep_timewait_qp(\r
+ IN const ib_qp_handle_t h_qp )\r
+{\r
+ uint64_t timewait = 0;\r
+ ib_qp_mod_t qp_mod;\r
+ ib_api_status_t status;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( h_qp );\r
+\r
+ /*\r
+ * The CM should have set the proper timewait time-out value. Reset\r
+ * the QP and let it enter the timewait state.\r
+ */\r
+ if( al_cep_get_timewait( h_qp->obj.h_al,\r
+ ((al_conn_qp_t*)h_qp)->cid, &timewait ) == IB_SUCCESS )\r
+ {\r
+ /* Special checks on the QP state for error handling - see above. */\r
+ if( !h_qp || !AL_OBJ_IS_TYPE( h_qp, AL_OBJ_TYPE_H_QP ) ||\r
+ ( (h_qp->obj.state != CL_INITIALIZED) && \r
+ (h_qp->obj.state != CL_DESTROYING) ) )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_CM, ("IB_INVALID_QP_HANDLE\n") );\r
+ return;\r
+ }\r
+\r
+ cl_memclr( &qp_mod, sizeof(ib_qp_mod_t) );\r
+ qp_mod.req_state = IB_QPS_ERROR;\r
+\r
+ /* Modify to error state using function pointers - see above. */\r
+ status = h_qp->pfn_modify_qp( h_qp, &qp_mod, NULL );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("pfn_modify_qp to IB_QPS_ERROR returned %s\n",\r
+ ib_get_err_str( status )) );\r
+ return;\r
+ }\r
+\r
+#ifdef CL_KERNEL\r
+ /* Store the timestamp after which the QP exits timewait. */\r
+ h_qp->timewait = cl_get_time_stamp() + timewait;\r
+#endif /* CL_KERNEL */\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static void\r
+__format_req_path_rec(\r
+ IN const mad_cm_req_t* const p_req,\r
+ IN const req_path_info_t* const p_path,\r
+ OUT ib_path_rec_t* const p_path_rec )\r
+{\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( p_req );\r
+ CL_ASSERT( p_path );\r
+ CL_ASSERT( p_path_rec );\r
+\r
+ /*\r
+ * Format a local path record. The local ack timeout specified in the\r
+ * REQ is twice the packet life plus the sender's CA ACK delay. When\r
+ * reporting the packet life, we divide the local ack timeout by 2 to\r
+ * approach the path's packet lifetime. Since local ack timeout is\r
+ * expressed as 4.096 * 2^x, subtracting 1 is equivalent to dividing the\r
+ * time in half.\r
+ */\r
+ ib_path_rec_init_local( p_path_rec,\r
+ &p_path->local_gid,\r
+ &p_path->remote_gid,\r
+ p_path->local_lid,\r
+ p_path->remote_lid,\r
+ 1, p_req->pkey,\r
+ conn_req_path_get_svc_lvl( p_path ),\r
+ IB_PATH_SELECTOR_EXACTLY, conn_req_get_mtu( p_req ),\r
+ IB_PATH_SELECTOR_EXACTLY,\r
+ conn_req_path_get_pkt_rate( p_path ),\r
+ IB_PATH_SELECTOR_EXACTLY,\r
+ (uint8_t)( conn_req_path_get_lcl_ack_timeout( p_path ) - 1 ),\r
+ 0 );\r
+\r
+ p_path_rec->hop_flow_raw.val = 0;\r
+ /* Add global routing info as necessary. */\r
+ if( !conn_req_path_get_subn_lcl( p_path ) )\r
+ {\r
+ ib_path_rec_set_hop_flow_raw( p_path_rec, p_path->hop_limit,\r
+ conn_req_path_get_flow_lbl( p_path ), FALSE );\r
+ p_path_rec->tclass = p_path->traffic_class;\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static void\r
+__format_req_rec(\r
+ IN const mad_cm_req_t* const p_req,\r
+ OUT ib_cm_req_rec_t *p_req_rec )\r
+{\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( p_req );\r
+ CL_ASSERT( p_req_rec );\r
+\r
+ cl_memclr( p_req_rec, sizeof(ib_cm_req_rec_t) );\r
+\r
+ /* format version specific data */\r
+ p_req_rec->p_req_pdata = p_req->pdata;\r
+\r
+ p_req_rec->qp_type = conn_req_get_qp_type( p_req );\r
+\r
+ p_req_rec->resp_res = conn_req_get_resp_res( p_req );\r
+ p_req_rec->flow_ctrl = conn_req_get_flow_ctrl( p_req );\r
+ p_req_rec->rnr_retry_cnt = conn_req_get_rnr_retry_cnt( p_req );\r
+\r
+ __format_req_path_rec( p_req, &p_req->primary_path,\r
+ &p_req_rec->primary_path );\r
+ __format_req_path_rec( p_req, &p_req->alternate_path,\r
+ &p_req_rec->alt_path );\r
+\r
+ /* These values are filled in later based on listen or peer connections\r
+ p_req_rec->context = ;\r
+ p_req_rec->h_cm_req = ;\r
+ p_req_rec->h_cm_listen = ;\r
+ */\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+/******************************************************************************\r
+* Functions that handle incoming REQs that matched to an outstanding listen.\r
+*\r
+*/\r
+\r
+\r
+static void\r
+__listen_req(\r
+ IN al_listen_t* const p_listen,\r
+ IN const ib_cep_t* const p_new_cep,\r
+ IN const mad_cm_req_t* const p_req )\r
+{\r
+ ib_cm_req_rec_t req_rec;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( p_listen );\r
+ CL_ASSERT( p_new_cep );\r
+ CL_ASSERT( p_req );\r
+\r
+ /* Format the callback record. */\r
+ __format_req_rec( p_req, &req_rec );\r
+\r
+ /* update listen based rec */\r
+ req_rec.context = p_listen->obj.context;\r
+\r
+ req_rec.h_cm_req.cid = p_new_cep->cid;\r
+ req_rec.h_cm_req.h_al = p_listen->obj.h_al;\r
+ req_rec.h_cm_req.h_qp = p_new_cep->context;\r
+\r
+ req_rec.h_cm_listen = p_listen;\r
+\r
+ /* Invoke the user's callback. */\r
+ p_listen->pfn_cm_req_cb( &req_rec );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static void\r
+__proc_listen(\r
+ IN al_listen_t* const p_listen,\r
+ IN ib_cep_t* const p_new_cep,\r
+ IN const ib_mad_t* const p_mad )\r
+{\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ /* Context is a listen - MAD must be a REQ or SIDR REQ */\r
+ switch( p_mad->attr_id )\r
+ {\r
+ case CM_REQ_ATTR_ID:\r
+ __listen_req(\r
+ p_listen, p_new_cep, (mad_cm_req_t*)p_mad );\r
+ break;\r
+\r
+ case CM_SIDR_REQ_ATTR_ID:\r
+ /* TODO - implement SIDR. */\r
+ default:\r
+ CL_ASSERT( p_mad->attr_id == CM_REQ_ATTR_ID ||\r
+ p_mad->attr_id == CM_SIDR_REQ_ATTR_ID );\r
+ /* Destroy the new CEP as it won't ever be reported to the user. */\r
+ al_destroy_cep( p_listen->obj.h_al, p_new_cep->cid, NULL );\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+/******************************************************************************\r
+* Functions that handle send timeouts:\r
+*\r
+*/\r
+\r
+/*\r
+ * callback to process a connection establishment timeout due to reply not\r
+ * being received. The connection object has a reference\r
+ * taken when the timer is set or when the send is sent.\r
+ */\r
+static void\r
+__proc_conn_timeout(\r
+ IN const ib_qp_handle_t h_qp )\r
+{\r
+ ib_cm_rej_rec_t rej_rec;\r
+ net32_t cid;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( h_qp );\r
+\r
+ /*\r
+ * Format the reject record before aborting the connection since\r
+ * we need the QP context.\r
+ */\r
+ cl_memclr( &rej_rec, sizeof(ib_cm_rej_rec_t) );\r
+ rej_rec.h_qp = h_qp;\r
+ rej_rec.qp_context = h_qp->obj.context;\r
+ rej_rec.rej_status = IB_REJ_TIMEOUT;\r
+\r
+ ref_al_obj( &h_qp->obj );\r
+\r
+ /* Unbind the QP from the CEP. */\r
+ __cep_timewait_qp( h_qp );\r
+\r
+ cid = cl_atomic_xchg( &((al_conn_qp_t*)h_qp)->cid, AL_INVALID_CID );\r
+\r
+ /* Invoke the callback. */\r
+ ((al_conn_qp_t*)h_qp)->pfn_cm_rej_cb( &rej_rec );\r
+\r
+ if( cid == AL_INVALID_CID ||\r
+ al_destroy_cep( h_qp->obj.h_al, cid, deref_al_obj ) != IB_SUCCESS )\r
+ {\r
+ deref_al_obj( &h_qp->obj );\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+/*\r
+ * callback to process a LAP timeout due to APR not being received.\r
+ */\r
+static void\r
+__proc_lap_timeout(\r
+ IN const ib_qp_handle_t h_qp )\r
+{\r
+ ib_cm_apr_rec_t apr_rec;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( h_qp );\r
+\r
+ /* Report the timeout. */\r
+ cl_memclr( &apr_rec, sizeof(ib_cm_apr_rec_t) );\r
+ apr_rec.h_qp = h_qp;\r
+ apr_rec.qp_context = h_qp->obj.context;\r
+ apr_rec.cm_status = IB_TIMEOUT;\r
+ apr_rec.apr_status = IB_AP_REJECT;\r
+\r
+ /* Notify the user that the LAP failed. */\r
+ ((al_conn_qp_t*)h_qp)->pfn_cm_apr_cb( &apr_rec );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+/*\r
+ * Callback to process a disconnection timeout due to not receiving the DREP\r
+ * within allowable time.\r
+ */\r
+static void\r
+__proc_dconn_timeout(\r
+ IN const ib_qp_handle_t h_qp )\r
+{\r
+ ib_cm_drep_rec_t drep_rec;\r
+ net32_t cid;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ /* No response. We're done. Deliver a DREP callback. */\r
+ cl_memclr( &drep_rec, sizeof(ib_cm_drep_rec_t) );\r
+ drep_rec.h_qp = h_qp;\r
+ drep_rec.qp_context = h_qp->obj.context;\r
+ drep_rec.cm_status = IB_TIMEOUT;\r
+\r
+ ref_al_obj( &h_qp->obj );\r
+\r
+ __cep_timewait_qp( h_qp );\r
+\r
+ cid = cl_atomic_xchg( &((al_conn_qp_t*)h_qp)->cid, AL_INVALID_CID );\r
+\r
+ /* Call the user back. */\r
+ ((al_conn_qp_t*)h_qp)->pfn_cm_drep_cb( &drep_rec );\r
+\r
+ if( cid == AL_INVALID_CID ||\r
+ al_destroy_cep( h_qp->obj.h_al, cid, deref_al_obj ) != IB_SUCCESS )\r
+ {\r
+ deref_al_obj( &h_qp->obj );\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static void\r
+__proc_failed_send(\r
+ IN ib_qp_handle_t h_qp,\r
+ IN const ib_mad_t* const p_mad )\r
+{\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ /* Failure indicates a send. */\r
+ switch( p_mad->attr_id )\r
+ {\r
+ case CM_REQ_ATTR_ID:\r
+ case CM_REP_ATTR_ID:\r
+ __proc_conn_timeout( h_qp );\r
+ break;\r
+ case CM_LAP_ATTR_ID:\r
+ __proc_lap_timeout( h_qp );\r
+ break;\r
+ case CM_DREQ_ATTR_ID:\r
+ __proc_dconn_timeout( h_qp );\r
+ break;\r
+ default:\r
+ AL_TRACE( AL_DBG_ERROR,\r
+ ("Invalid CM send MAD attribute ID %d.\n", p_mad->attr_id) );\r
+ break;\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+/******************************************************************************\r
+* Functions that handle received MADs on a connection (not listen)\r
+*\r
+*/\r
+\r
+\r
+void\r
+__proc_peer_req(\r
+ IN const ib_cm_handle_t* const p_cm,\r
+ IN const mad_cm_req_t* const p_req )\r
+{\r
+ ib_cm_req_rec_t req_rec;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( p_cm );\r
+ CL_ASSERT( p_cm->h_qp );\r
+ /* Must be peer-to-peer. */\r
+ CL_ASSERT( ((al_conn_qp_t*)p_cm->h_qp)->pfn_cm_req_cb );\r
+ CL_ASSERT( p_req );\r
+\r
+ /* Format the callback record. */\r
+ __format_req_rec( p_req, &req_rec );\r
+\r
+ /* update peer based rec handles and context values */\r
+ req_rec.context = p_cm->h_qp->obj.context;\r
+ req_rec.h_cm_req = *p_cm;\r
+ req_rec.h_cm_listen = NULL;\r
+\r
+ /* Invoke the user's callback. User must call ib_cm_rep or ib_cm_rej. */\r
+ ((al_conn_qp_t*)p_cm->h_qp)->pfn_cm_req_cb( &req_rec );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+void\r
+__proc_mra(\r
+ IN const ib_cm_handle_t* const p_cm,\r
+ IN const mad_cm_mra_t* const p_mra )\r
+{\r
+ ib_cm_mra_rec_t mra_rec;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( p_cm->h_qp );\r
+ CL_ASSERT( ((al_conn_qp_t*)p_cm->h_qp)->pfn_cm_mra_cb );\r
+\r
+ /* Format the MRA callback record. */\r
+ cl_memclr( &mra_rec, sizeof(ib_cm_mra_rec_t) );\r
+\r
+ mra_rec.h_qp = p_cm->h_qp;\r
+ mra_rec.qp_context = p_cm->h_qp->obj.context;\r
+ mra_rec.p_mra_pdata = p_mra->pdata;\r
+\r
+ /*\r
+ * Call the user back. Note that users will get a callback only\r
+ * for the first MRA received in response to a REQ, REP, or LAP.\r
+ */\r
+ ((al_conn_qp_t*)p_cm->h_qp)->pfn_cm_mra_cb( &mra_rec );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+void\r
+__proc_rej(\r
+ IN const ib_cm_handle_t* const p_cm,\r
+ IN const mad_cm_rej_t* const p_rej )\r
+{\r
+ ib_cm_rej_rec_t rej_rec;\r
+ net32_t cid;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( p_cm->h_qp )\r
+ {\r
+ /* Format the REJ callback record. */\r
+ cl_memclr( &rej_rec, sizeof(ib_cm_rej_rec_t) );\r
+\r
+ rej_rec.h_qp = p_cm->h_qp;\r
+ rej_rec.qp_context = p_cm->h_qp->obj.context;\r
+\r
+ rej_rec.p_rej_pdata = p_rej->pdata;\r
+ rej_rec.p_ari = p_rej->ari;\r
+ rej_rec.ari_length = conn_rej_get_ari_len( p_rej );\r
+ rej_rec.rej_status = p_rej->reason;\r
+\r
+ ref_al_obj( &p_cm->h_qp->obj );\r
+\r
+ /*\r
+ * Unbind the QP from the connection object. This allows the QP to\r
+ * be immediately reused in another connection request.\r
+ */\r
+ __cep_timewait_qp( p_cm->h_qp );\r
+\r
+ cid = cl_atomic_xchg( &((al_conn_qp_t*)p_cm->h_qp)->cid, AL_INVALID_CID );\r
+ CL_ASSERT( cid == p_cm->cid || cid == AL_INVALID_CID );\r
+ if( cid == AL_INVALID_CID ||\r
+ al_destroy_cep( p_cm->h_al, cid, deref_al_obj ) != IB_SUCCESS )\r
+ {\r
+ deref_al_obj( &p_cm->h_qp->obj );\r
+ }\r
+\r
+ /* Call the user back. */\r
+ ((al_conn_qp_t*)p_cm->h_qp)->pfn_cm_rej_cb( &rej_rec );\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static void\r
+__proc_rep(\r
+ IN ib_cm_handle_t* const p_cm,\r
+ IN mad_cm_rep_t* const p_rep )\r
+{\r
+ ib_cm_rep_rec_t rep_rec;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ cl_memclr( &rep_rec, sizeof(ib_cm_rep_rec_t) );\r
+\r
+ /* fill the rec callback data */\r
+ rep_rec.p_rep_pdata = p_rep->pdata;\r
+ rep_rec.qp_type = p_cm->h_qp->type;\r
+\r
+ rep_rec.h_cm_rep = *p_cm;\r
+ rep_rec.qp_context = p_cm->h_qp->obj.context;\r
+ rep_rec.resp_res = p_rep->resp_resources;\r
+ rep_rec.flow_ctrl = conn_rep_get_e2e_flow_ctl( p_rep );\r
+ rep_rec.apr_status = conn_rep_get_failover( p_rep );\r
+\r
+ /* Notify the user of the reply. */\r
+ ((al_conn_qp_t*)p_cm->h_qp)->pfn_cm_rep_cb( &rep_rec );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static void\r
+__proc_rtu(\r
+ IN ib_cm_handle_t* const p_cm,\r
+ IN mad_cm_rtu_t* const p_rtu )\r
+{\r
+ ib_cm_rtu_rec_t rtu_rec;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ rtu_rec.p_rtu_pdata = p_rtu->pdata;\r
+ rtu_rec.h_qp = p_cm->h_qp;\r
+ rtu_rec.qp_context = p_cm->h_qp->obj.context;\r
+\r
+ ((al_conn_qp_t*)p_cm->h_qp)->pfn_cm_rtu_cb( &rtu_rec );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static void\r
+__proc_dreq(\r
+ IN ib_cm_handle_t* const p_cm,\r
+ IN mad_cm_dreq_t* const p_dreq )\r
+{\r
+ ib_cm_dreq_rec_t dreq_rec;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ cl_memclr( &dreq_rec, sizeof(ib_cm_dreq_rec_t) );\r
+\r
+ dreq_rec.h_cm_dreq = *p_cm;\r
+ dreq_rec.p_dreq_pdata = p_dreq->pdata;\r
+\r
+ dreq_rec.qp_context = p_cm->h_qp->obj.context;\r
+\r
+ ((al_conn_qp_t*)p_cm->h_qp)->pfn_cm_dreq_cb( &dreq_rec );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+void\r
+__proc_drep(\r
+ IN ib_cm_handle_t* const p_cm,\r
+ IN mad_cm_drep_t* const p_drep )\r
+{\r
+ ib_cm_drep_rec_t drep_rec;\r
+ net32_t cid;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ cl_memclr( &drep_rec, sizeof(ib_cm_drep_rec_t) );\r
+\r
+ /* Copy qp context before the connection is released */\r
+ drep_rec.cm_status = IB_SUCCESS;\r
+ drep_rec.p_drep_pdata = p_drep->pdata;\r
+ drep_rec.h_qp = p_cm->h_qp;\r
+ drep_rec.qp_context = p_cm->h_qp->obj.context;\r
+\r
+ ref_al_obj( &p_cm->h_qp->obj );\r
+\r
+ __cep_timewait_qp( p_cm->h_qp );\r
+\r
+ cid = cl_atomic_xchg( &((al_conn_qp_t*)p_cm->h_qp)->cid, AL_INVALID_CID );\r
+ if( cid != AL_INVALID_CID )\r
+ {\r
+ CL_ASSERT( cid == p_cm->cid );\r
+\r
+ if( al_destroy_cep(\r
+ p_cm->h_al, p_cm->cid, deref_al_obj ) != IB_SUCCESS )\r
+ {\r
+ deref_al_obj( &p_cm->h_qp->obj );\r
+ }\r
+ }\r
+ else\r
+ {\r
+ deref_al_obj( &p_cm->h_qp->obj );\r
+ }\r
+\r
+ ((al_conn_qp_t*)p_cm->h_qp)->pfn_cm_drep_cb( &drep_rec );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+void\r
+__proc_lap(\r
+ IN ib_cm_handle_t* const p_cm,\r
+ IN const mad_cm_lap_t* const p_lap )\r
+{\r
+ ib_cm_lap_rec_t lap_rec;\r
+ const lap_path_info_t* const p_path = &p_lap->alternate_path;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( p_cm );\r
+ CL_ASSERT( p_cm->h_qp );\r
+ CL_ASSERT( p_lap );\r
+\r
+ cl_memclr( &lap_rec, sizeof(ib_cm_lap_rec_t) );\r
+ lap_rec.qp_context = p_cm->h_qp->obj.context;\r
+ lap_rec.h_cm_lap = *p_cm;\r
+\r
+ /*\r
+ * Format the path record. The local ack timeout specified in the\r
+ * LAP is twice the packet life plus the sender's CA ACK delay. When\r
+ * reporting the packet life, we divide the local ack timeout by 2 to\r
+ * approach the path's packet lifetime. Since local ack timeout is\r
+ * expressed as 4.096 * 2^x, subtracting 1 is equivalent to dividing the\r
+ * time in half.\r
+ */\r
+ ib_path_rec_init_local( &lap_rec.alt_path,\r
+ &p_lap->alternate_path.local_gid,\r
+ &p_lap->alternate_path.remote_gid,\r
+ p_lap->alternate_path.local_lid,\r
+ p_lap->alternate_path.remote_lid,\r
+ 1, IB_DEFAULT_PKEY,\r
+ conn_lap_path_get_svc_lvl( &p_lap->alternate_path ),\r
+ IB_PATH_SELECTOR_EXACTLY,\r
+ IB_MTU_2048,\r
+ IB_PATH_SELECTOR_EXACTLY,\r
+ conn_lap_path_get_pkt_rate( p_path ),\r
+ IB_PATH_SELECTOR_EXACTLY,\r
+ (uint8_t)( conn_lap_path_get_lcl_ack_timeout( p_path ) - 1 ),\r
+ 0 );\r
+\r
+ lap_rec.alt_path.hop_flow_raw.val = 0;\r
+ /* Add global routing info as necessary. */\r
+ if( !conn_lap_path_get_subn_lcl( &p_lap->alternate_path ) )\r
+ {\r
+ ib_path_rec_set_hop_flow_raw( &lap_rec.alt_path,\r
+ p_lap->alternate_path.hop_limit,\r
+ conn_lap_path_get_flow_lbl( &p_lap->alternate_path ),\r
+ FALSE );\r
+ lap_rec.alt_path.tclass =\r
+ conn_lap_path_get_tclass( &p_lap->alternate_path );\r
+ }\r
+\r
+ ((al_conn_qp_t*)p_cm->h_qp)->pfn_cm_lap_cb( &lap_rec );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static ib_api_status_t\r
+__cep_lap_qp(\r
+ IN ib_cm_handle_t* const p_cm )\r
+{\r
+ ib_api_status_t status;\r
+ ib_qp_mod_t qp_mod;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ status = al_cep_get_rts_attr( p_cm->h_al, p_cm->cid, &qp_mod );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE( AL_DBG_ERROR,\r
+ ("al_cep_get_rts_attr returned %s.\n", ib_get_err_str(status)) );\r
+ goto done;\r
+ }\r
+\r
+ status = ib_modify_qp( p_cm->h_qp, &qp_mod );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE( AL_DBG_ERROR,\r
+ ("ib_modify_qp for LAP returned %s.\n", ib_get_err_str(status)) );\r
+ }\r
+\r
+done:\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+static void\r
+__proc_apr(\r
+ IN ib_cm_handle_t* const p_cm,\r
+ IN mad_cm_apr_t* const p_apr )\r
+{\r
+ ib_cm_apr_rec_t apr_rec;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ apr_rec.h_qp = p_cm->h_qp;\r
+ apr_rec.qp_context = p_cm->h_qp->obj.context;\r
+ apr_rec.p_info = (const uint8_t*)&p_apr->info;\r
+ apr_rec.info_length = p_apr->info_len;\r
+ apr_rec.p_apr_pdata = p_apr->pdata;\r
+ apr_rec.apr_status = p_apr->status;\r
+\r
+ if( apr_rec.apr_status == IB_AP_SUCCESS )\r
+ {\r
+ apr_rec.cm_status = __cep_lap_qp( p_cm );\r
+ }\r
+ else\r
+ {\r
+ apr_rec.cm_status = IB_ERROR;\r
+ }\r
+\r
+ ((al_conn_qp_t*)p_cm->h_qp)->pfn_cm_apr_cb( &apr_rec );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static void\r
+__proc_conn(\r
+ IN ib_cm_handle_t* const p_cm,\r
+ IN ib_mad_t* const p_mad )\r
+{\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ /* Success indicates a receive. */\r
+ switch( p_mad->attr_id )\r
+ {\r
+ case CM_REQ_ATTR_ID:\r
+ __proc_peer_req( p_cm, (mad_cm_req_t*)p_mad );\r
+ break;\r
+\r
+ case CM_MRA_ATTR_ID:\r
+ __proc_mra( p_cm, (mad_cm_mra_t*)p_mad );\r
+ break;\r
+\r
+ case CM_REJ_ATTR_ID:\r
+ __proc_rej( p_cm, (mad_cm_rej_t*)p_mad );\r
+ break;\r
+\r
+ case CM_REP_ATTR_ID:\r
+ __proc_rep( p_cm, (mad_cm_rep_t*)p_mad );\r
+ break;\r
+\r
+ case CM_RTU_ATTR_ID:\r
+ __proc_rtu( p_cm, (mad_cm_rtu_t*)p_mad );\r
+ break;\r
+\r
+ case CM_DREQ_ATTR_ID:\r
+ __proc_dreq( p_cm, (mad_cm_dreq_t*)p_mad );\r
+ break;\r
+\r
+ case CM_DREP_ATTR_ID:\r
+ __proc_drep( p_cm, (mad_cm_drep_t*)p_mad );\r
+ break;\r
+\r
+ case CM_LAP_ATTR_ID:\r
+ __proc_lap( p_cm, (mad_cm_lap_t*)p_mad );\r
+ break;\r
+\r
+ case CM_APR_ATTR_ID:\r
+ __proc_apr( p_cm, (mad_cm_apr_t*)p_mad );\r
+ break;\r
+\r
+ //case CM_SIDR_REQ_ATTR_ID:\r
+ // p_async_mad->item.pfn_callback = __process_cm_sidr_req;\r
+ // break;\r
+\r
+ //case CM_SIDR_REP_ATTR_ID:\r
+ // p_async_mad->item.pfn_callback = __process_cm_sidr_rep;\r
+ // break;\r
+\r
+ default:\r
+ AL_TRACE( AL_DBG_ERROR,\r
+ ("Invalid CM recv MAD attribute ID %d.\n", p_mad->attr_id) );\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+/******************************************************************************\r
+* CEP callback handler.\r
+*\r
+*/\r
+\r
+#ifdef CL_KERNEL\r
+static void\r
+__process_cep_cb(\r
+#else\r
+static void\r
+__cm_handler(\r
+#endif\r
+ IN const ib_al_handle_t h_al,\r
+ IN ib_cep_t* const p_cep )\r
+{\r
+ ib_api_status_t status;\r
+ ib_cep_t new_cep;\r
+ ib_mad_element_t *p_mad;\r
+ ib_cm_handle_t h_cm;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ for( status = al_cep_poll( h_al, p_cep->cid, &new_cep, &p_mad );\r
+ status == IB_SUCCESS;\r
+ status = al_cep_poll( h_al, p_cep->cid, &new_cep, &p_mad ) )\r
+ {\r
+ /* Something to do - WOOT!!! */\r
+ if( new_cep.cid != AL_INVALID_CID )\r
+ {\r
+ __proc_listen( (al_listen_t*)p_cep->context,\r
+ &new_cep, ib_get_mad_buf( p_mad ) );\r
+ }\r
+ else if( p_mad->status != IB_SUCCESS )\r
+ {\r
+ /* Context is a QP handle, and a sent MAD timed out. */\r
+ __proc_failed_send(\r
+ (ib_qp_handle_t)p_cep->context, ib_get_mad_buf( p_mad ) );\r
+ }\r
+ else\r
+ {\r
+ h_cm.h_al = h_al;\r
+ h_cm.cid = p_cep->cid;\r
+ h_cm.h_qp = (ib_qp_handle_t)p_cep->context;\r
+ __proc_conn( &h_cm, ib_get_mad_buf( p_mad ) );\r
+ }\r
+ ib_put_mad( p_mad );\r
+ }\r
+}\r
+\r
+\r
+#ifdef CL_KERNEL\r
+\r
+static void\r
+__process_cep_async(\r
+ IN cl_async_proc_item_t *p_item )\r
+{\r
+ cep_async_mad_t *p_async_mad;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_async_mad = PARENT_STRUCT( p_item, cep_async_mad_t, item );\r
+\r
+ __process_cep_cb( p_async_mad->h_al, &p_async_mad->cep );\r
+\r
+ cl_free( p_async_mad );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+/*\r
+ * The handler is invoked at DISPATCH_LEVEL in kernel mode. We need to switch\r
+ * to a passive level thread context to perform QP modify and invoke user\r
+ * callbacks.\r
+ */\r
+static void\r
+__cm_handler(\r
+ IN const ib_al_handle_t h_al,\r
+ IN ib_cep_t* const p_cep )\r
+{\r
+ cep_async_mad_t *p_async_mad;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_async_mad = (cep_async_mad_t*)cl_zalloc( sizeof(cep_async_mad_t) );\r
+ if( !p_async_mad )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("failed to cl_zalloc cm_async_mad_t (%d bytes)\n",\r
+ sizeof(cep_async_mad_t)) );\r
+ return;\r
+ }\r
+\r
+ p_async_mad->h_al = h_al;\r
+ p_async_mad->cep = *p_cep;\r
+ p_async_mad->item.pfn_callback = __process_cep_async;\r
+\r
+ /* Queue the MAD for asynchronous processing. */\r
+ cl_async_proc_queue( gp_async_proc_mgr, &p_async_mad->item );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+#endif /* CL_KERNEL */\r
+\r
+\r
+/*\r
+ * Transition the QP to the INIT state, if it is not already in the\r
+ * INIT state.\r
+ */\r
+ib_api_status_t\r
+__cep_init_qp(\r
+ IN const ib_qp_handle_t h_qp,\r
+ IN ib_qp_mod_t* const p_init )\r
+{\r
+ ib_qp_mod_t qp_mod;\r
+ ib_api_status_t status;\r
+\r
+ /*\r
+ * Move to the init state to allow posting of receive buffers.\r
+ * Chech the current state of the QP. The user may have already\r
+ * transitioned it and posted some receives to the QP, so we\r
+ * should not reset the QP if it is already in the INIT state.\r
+ */\r
+ if( h_qp->state != IB_QPS_INIT )\r
+ {\r
+ /* Reset the QP. */\r
+ cl_memclr( &qp_mod, sizeof(ib_qp_mod_t) );\r
+ qp_mod.req_state = IB_QPS_RESET;\r
+\r
+ status = ib_modify_qp( h_qp, &qp_mod );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE( AL_DBG_ERROR,\r
+ ("ib_modify_qp to IB_QPS_RESET returned %s\n",\r
+ ib_get_err_str(status) ) );\r
+ }\r
+\r
+ /* Initialize the QP. */\r
+ status = ib_modify_qp( h_qp, p_init );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE( AL_DBG_ERROR,\r
+ ("ib_modify_qp returned %s.\n", ib_get_err_str(status) ) );\r
+ return status;\r
+ }\r
+ }\r
+\r
+ return IB_SUCCESS;\r
+}\r
+\r
+static ib_api_status_t\r
+__cep_pre_req(\r
+ IN const ib_cm_req_t* const p_cm_req )\r
+{\r
+ ib_api_status_t status;\r
+ ib_qp_mod_t qp_mod;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ status = al_cep_pre_req( qp_get_al( p_cm_req->h_qp ),\r
+ ((al_conn_qp_t*)p_cm_req->h_qp)->cid, p_cm_req, &qp_mod );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("al_cep_pre_req returned %s.\n", ib_get_err_str( status )) );\r
+ return status;\r
+ }\r
+\r
+ /* Transition QP through state machine */\r
+ /*\r
+ * Warning! Using all access rights. We need to modify\r
+ * the ib_cm_req_t to include this.\r
+ */\r
+ qp_mod.state.init.access_ctrl |=\r
+ IB_AC_RDMA_READ | IB_AC_RDMA_WRITE | IB_AC_ATOMIC;\r
+ status = __cep_init_qp( p_cm_req->h_qp, &qp_mod );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("__cep_init_qp returned %s\n", ib_get_err_str(status)) );\r
+ return status;\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_SUCCESS;\r
+}\r
+\r
+\r
+static ib_api_status_t\r
+__cep_conn_req(\r
+ IN const ib_al_handle_t h_al,\r
+ IN const ib_cm_req_t* const p_cm_req )\r
+{\r
+ ib_api_status_t status;\r
+ //cl_status_t cl_status;\r
+ //cl_event_t sync_event;\r
+ //cl_event_t *p_sync_event = NULL;\r
+ al_conn_qp_t *p_qp;\r
+ net32_t cid, old_cid;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ /* event based mechanism */\r
+ if( p_cm_req->flags & IB_FLAGS_SYNC )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_UNSUPPORTED;\r
+ //cl_event_construct( &sync_event );\r
+ //cl_status = cl_event_init( &sync_event, FALSE );\r
+ //if( cl_status != CL_SUCCESS )\r
+ //{\r
+ // __deref_conn( p_conn );\r
+ // return ib_convert_cl_status( cl_status );\r
+ //}\r
+ //p_conn->p_sync_event = p_sync_event = &sync_event;\r
+ }\r
+\r
+ p_qp = (al_conn_qp_t*)p_cm_req->h_qp;\r
+\r
+ /* Get a CEP and bind it to the QP. */\r
+ status = al_create_cep( h_al, __cm_handler, p_cm_req->h_qp, &cid );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE( AL_DBG_ERROR,\r
+ ("al_create_cep returned %s.\n", ib_get_err_str( status )) );\r
+ goto done;\r
+ }\r
+\r
+ /* See if this QP has already been connected. */\r
+ old_cid = cl_atomic_comp_xchg( &p_qp->cid, AL_INVALID_CID, cid );\r
+ if( old_cid != AL_INVALID_CID )\r
+ {\r
+ al_destroy_cep( h_al, cid, NULL );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INVALID_STATE;\r
+ }\r
+\r
+ status = __cep_pre_req( p_cm_req );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE( AL_DBG_ERROR,\r
+ ("__cep_pre_req returned %s.\n", ib_get_err_str( status )) );\r
+ goto err;\r
+ }\r
+\r
+ /* Store callback pointers. */\r
+ p_qp->pfn_cm_req_cb = p_cm_req->pfn_cm_req_cb;\r
+ p_qp->pfn_cm_rep_cb = p_cm_req->pfn_cm_rep_cb;\r
+ p_qp->pfn_cm_mra_cb = p_cm_req->pfn_cm_mra_cb;\r
+ p_qp->pfn_cm_rej_cb = p_cm_req->pfn_cm_rej_cb;\r
+\r
+ /* Send the REQ. */\r
+ status = al_cep_send_req( h_al, p_qp->cid );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ //if( p_sync_event )\r
+ // cl_event_destroy( p_sync_event );\r
+\r
+ AL_TRACE( AL_DBG_ERROR,\r
+ ("al_cep_send_req returned %s.\n", ib_get_err_str(status)) );\r
+err:\r
+ ref_al_obj( &p_qp->qp.obj );\r
+ cl_atomic_xchg( &p_qp->cid, AL_INVALID_CID );\r
+ if( al_destroy_cep( h_al, cid, deref_al_obj ) != IB_SUCCESS )\r
+ deref_al_obj( &p_qp->qp.obj );\r
+ }\r
+\r
+ /* wait on event if synchronous operation */\r
+ //if( p_sync_event )\r
+ //{\r
+ // CL_TRACE( AL_DBG_CM, g_al_dbg_lvl,\r
+ // ("event blocked on REQ...\n") );\r
+ // cl_event_wait_on( p_sync_event, EVENT_NO_TIMEOUT, FALSE );\r
+\r
+ // cl_event_destroy( p_sync_event );\r
+ //}\r
+\r
+done:\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+ib_cm_req(\r
+ IN const ib_cm_req_t* const p_cm_req )\r
+{\r
+ ib_api_status_t status;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( !p_cm_req )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );\r
+ return IB_INVALID_PARAMETER;\r
+ }\r
+\r
+ /* Only supported qp types allowed */\r
+ switch( p_cm_req->qp_type )\r
+ {\r
+ default:\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("Invalid qp_type.\n") );\r
+ return IB_INVALID_SETTING;\r
+\r
+ case IB_QPT_RELIABLE_CONN:\r
+ case IB_QPT_UNRELIABLE_CONN:\r
+ if( AL_OBJ_INVALID_HANDLE( p_cm_req->h_qp, AL_OBJ_TYPE_H_QP ) ||\r
+ (p_cm_req->h_qp->type != p_cm_req->qp_type) )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_QP_HANDLE\n") );\r
+ return IB_INVALID_QP_HANDLE;\r
+ }\r
+\r
+ status = __cep_conn_req( qp_get_al( p_cm_req->h_qp ), p_cm_req );\r
+ break;\r
+\r
+ case IB_QPT_UNRELIABLE_DGRM:\r
+ if( AL_OBJ_INVALID_HANDLE( p_cm_req->h_al, AL_OBJ_TYPE_H_AL ) )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_AL_HANDLE\n") );\r
+ return IB_INVALID_AL_HANDLE;\r
+ }\r
+ status = IB_UNSUPPORTED;\r
+// status = cm_sidr_req( p_cm_req->h_al, p_cm_req );\r
+ break;\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+/*\r
+ * Note: we pass in the QP handle separately because it comes form different\r
+ * sources. It comes from the ib_cm_rep_t structure in the ib_cm_rep path, and\r
+ * from the ib_cm_handle_t structure in the ib_cm_rtu path.\r
+ */\r
+static ib_api_status_t\r
+__cep_rts_qp(\r
+ IN const ib_cm_handle_t h_cm,\r
+ IN const ib_qp_handle_t h_qp,\r
+ IN const ib_access_t access_ctrl,\r
+ IN const uint32_t sq_depth,\r
+ IN const uint32_t rq_depth )\r
+{\r
+ ib_api_status_t status;\r
+ ib_qp_mod_t qp_mod;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ /* Set the QP to RTR. */\r
+ status = al_cep_get_rtr_attr( h_cm.h_al, h_cm.cid, &qp_mod );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("al_cep_get_rtr_attr returned %s\n", ib_get_err_str( status )) );\r
+ return status;\r
+ }\r
+\r
+ if( access_ctrl )\r
+ {\r
+ qp_mod.state.rtr.access_ctrl = access_ctrl;\r
+ qp_mod.state.rtr.opts |= IB_MOD_QP_ACCESS_CTRL;\r
+ }\r
+\r
+ if( sq_depth )\r
+ {\r
+ qp_mod.state.rtr.sq_depth = sq_depth;\r
+ qp_mod.state.rtr.opts |= IB_MOD_QP_SQ_DEPTH;\r
+ }\r
+\r
+ if( rq_depth )\r
+ {\r
+ qp_mod.state.rtr.rq_depth = rq_depth;\r
+ qp_mod.state.rtr.opts |= IB_MOD_QP_RQ_DEPTH;\r
+ }\r
+\r
+ status = ib_modify_qp( h_qp, &qp_mod );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("ib_modify_qp to RTR returned %s.\n", ib_get_err_str(status) ) );\r
+ return status;\r
+ }\r
+\r
+ /* Set the QP to RTS. */\r
+ status = al_cep_get_rts_attr( h_cm.h_al, h_cm.cid, &qp_mod );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("al_cep_get_rts_attr returned %s\n", ib_get_err_str( status )) );\r
+ return status;\r
+ }\r
+\r
+ status = ib_modify_qp( h_qp, &qp_mod );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("ib_modify_qp to RTS returned %s.\n", ib_get_err_str(status) ) );\r
+ return status;\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_SUCCESS;\r
+}\r
+\r
+\r
+static ib_api_status_t\r
+__cep_pre_rep(\r
+ IN const ib_cm_handle_t h_cm,\r
+ IN const ib_cm_rep_t* const p_cm_rep )\r
+{\r
+ ib_api_status_t status;\r
+ al_conn_qp_t *p_qp;\r
+ ib_qp_mod_t qp_mod;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_qp = (al_conn_qp_t*)p_cm_rep->h_qp;\r
+\r
+ status = al_cep_pre_rep(\r
+ h_cm.h_al, h_cm.cid, p_cm_rep->h_qp, p_cm_rep, &qp_mod );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("al_cep_pre_rep returned %s.\n", ib_get_err_str( status )) );\r
+ return status;\r
+ }\r
+\r
+ /* Transition the QP to the INIT state. */\r
+ qp_mod.state.init.access_ctrl = p_cm_rep->access_ctrl;\r
+ status = __cep_init_qp( p_cm_rep->h_qp, &qp_mod );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("cm_init_qp returned %s.\n", ib_get_err_str(status)) );\r
+ return status;\r
+ }\r
+\r
+ /* Prepost receives. */\r
+ if( p_cm_rep->p_recv_wr )\r
+ {\r
+ status = ib_post_recv( p_cm_rep->h_qp, p_cm_rep->p_recv_wr,\r
+ (ib_recv_wr_t** __ptr64)p_cm_rep->pp_recv_failure );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("ib_post_recv returned %s.\n", ib_get_err_str(status)) );\r
+ return status;\r
+ }\r
+ }\r
+\r
+ /* Transition the QP to the RTR and RTS states. */\r
+ status = __cep_rts_qp( h_cm, p_cm_rep->h_qp,\r
+ p_cm_rep->access_ctrl, p_cm_rep->sq_depth, p_cm_rep->rq_depth );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE( AL_DBG_ERROR,\r
+ ("__cep_rts_qp returned %s.\n", ib_get_err_str(status)) );\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+static ib_api_status_t\r
+__cep_conn_rep(\r
+ IN ib_cm_handle_t h_cm,\r
+ IN const ib_cm_rep_t* const p_cm_rep )\r
+{\r
+ ib_api_status_t status;\r
+ net32_t cid;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ cid = cl_atomic_comp_xchg(\r
+ &((al_conn_qp_t*)p_cm_rep->h_qp)->cid, AL_INVALID_CID, h_cm.cid );\r
+\r
+ if( cid != AL_INVALID_CID )\r
+ {\r
+ /* We don't destroy the CEP to allow the user to retry accepting. */\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("QP already connected.\n") );\r
+ return IB_INVALID_QP_HANDLE;\r
+ }\r
+\r
+ /* Store the CM callbacks. */\r
+ ((al_conn_qp_t*)p_cm_rep->h_qp)->pfn_cm_rej_cb = p_cm_rep->pfn_cm_rej_cb;\r
+ ((al_conn_qp_t*)p_cm_rep->h_qp)->pfn_cm_mra_cb = p_cm_rep->pfn_cm_mra_cb;\r
+ ((al_conn_qp_t*)p_cm_rep->h_qp)->pfn_cm_rtu_cb = p_cm_rep->pfn_cm_rtu_cb;\r
+ ((al_conn_qp_t*)p_cm_rep->h_qp)->pfn_cm_lap_cb = p_cm_rep->pfn_cm_lap_cb;\r
+ ((al_conn_qp_t*)p_cm_rep->h_qp)->pfn_cm_dreq_cb = p_cm_rep->pfn_cm_dreq_cb;\r
+\r
+ /* Transition QP through state machine */\r
+ status = __cep_pre_rep( h_cm, p_cm_rep );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("__cep_pre_req returned %s\n", ib_get_err_str(status)) );\r
+ goto err;\r
+ }\r
+\r
+ status = al_cep_send_rep( h_cm.h_al, h_cm.cid );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("al_cep_send_rep returned %s\n", ib_get_err_str(status)) );\r
+err:\r
+ cl_atomic_xchg(\r
+ &((al_conn_qp_t*)p_cm_rep->h_qp)->cid, AL_INVALID_CID );\r
+\r
+ ref_al_obj( &p_cm_rep->h_qp->obj );\r
+\r
+ /* Reject and abort the connection. */\r
+ al_cep_rej( h_cm.h_al, h_cm.cid, IB_REJ_INSUF_QP, NULL, 0, NULL, 0 );\r
+\r
+ if( al_destroy_cep( h_cm.h_al, h_cm.cid, deref_al_obj ) != IB_SUCCESS )\r
+ deref_al_obj( &p_cm_rep->h_qp->obj );\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+ib_cm_rep(\r
+ IN const ib_cm_handle_t h_cm_req,\r
+ IN const ib_cm_rep_t* const p_cm_rep )\r
+{\r
+ ib_api_status_t status;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( !p_cm_rep )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );\r
+ return IB_INVALID_PARAMETER;\r
+ }\r
+\r
+ /* Only supported qp types allowed */\r
+ status = IB_SUCCESS;\r
+ switch( p_cm_rep->qp_type )\r
+ {\r
+ default:\r
+ AL_TRACE( AL_DBG_ERROR, ("Invalid qp_type.\n") );\r
+ status = IB_INVALID_SETTING;\r
+ break;\r
+\r
+ case IB_QPT_RELIABLE_CONN:\r
+ case IB_QPT_UNRELIABLE_CONN:\r
+ if( AL_OBJ_INVALID_HANDLE( p_cm_rep->h_qp, AL_OBJ_TYPE_H_QP ) ||\r
+ (p_cm_rep->h_qp->type != p_cm_rep->qp_type) )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_QP_HANDLE\n") );\r
+ status = IB_INVALID_QP_HANDLE;\r
+ }\r
+ if( p_cm_rep->h_qp->obj.h_al != h_cm_req.h_al )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_QP_HANDLE\n") );\r
+ status = IB_INVALID_QP_HANDLE;\r
+ }\r
+ break;\r
+\r
+ case IB_QPT_UNRELIABLE_DGRM:\r
+ if( ( p_cm_rep->status == IB_SIDR_SUCCESS ) &&\r
+ (AL_OBJ_INVALID_HANDLE( p_cm_rep->h_qp, AL_OBJ_TYPE_H_QP ) ||\r
+ (p_cm_rep->h_qp->type != p_cm_rep->qp_type) ) )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_QP_HANDLE\n") );\r
+ status = IB_INVALID_QP_HANDLE;\r
+ }\r
+ break;\r
+ }\r
+\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ al_cep_rej(\r
+ h_cm_req.h_al, h_cm_req.cid, IB_REJ_INSUF_QP, NULL, 0, NULL, 0 );\r
+ al_destroy_cep( h_cm_req.h_al, h_cm_req.cid, NULL );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+ }\r
+\r
+ if( p_cm_rep->qp_type == IB_QPT_UNRELIABLE_DGRM )\r
+ status = IB_UNSUPPORTED;//status = cm_sidr_rep( p_conn, p_cm_rep );\r
+ else\r
+ status = __cep_conn_rep( h_cm_req, p_cm_rep );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+ib_cm_rtu(\r
+ IN const ib_cm_handle_t h_cm_rep,\r
+ IN const ib_cm_rtu_t* const p_cm_rtu )\r
+{\r
+ ib_api_status_t status;\r
+ net32_t cid;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( !p_cm_rtu )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );\r
+ return IB_INVALID_PARAMETER;\r
+ }\r
+\r
+ ///*\r
+ // * Call invalid if event is still processed.\r
+ // * User may have called rtu in rep callback.\r
+ // */\r
+ //if( p_conn->p_sync_event )\r
+ //{\r
+ // CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,\r
+ // ("Connection in invalid state. Sync call in progress.\n" ) );\r
+\r
+ // cm_res_release( p_conn );\r
+ // __deref_conn( p_conn );\r
+ // return IB_INVALID_STATE;\r
+ //}\r
+ ((al_conn_qp_t*)h_cm_rep.h_qp)->pfn_cm_apr_cb = p_cm_rtu->pfn_cm_apr_cb;\r
+ ((al_conn_qp_t*)h_cm_rep.h_qp)->pfn_cm_dreq_cb = p_cm_rtu->pfn_cm_dreq_cb;\r
+\r
+ /* Transition QP through state machine */\r
+ status = __cep_rts_qp( h_cm_rep, h_cm_rep.h_qp,\r
+ p_cm_rtu->access_ctrl, p_cm_rtu->sq_depth, p_cm_rtu->rq_depth );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("__cep_rts_qp returned %s.\n", ib_get_err_str( status )) );\r
+ goto err;\r
+ }\r
+\r
+ status = al_cep_rtu( h_cm_rep.h_al, h_cm_rep.cid,\r
+ p_cm_rtu->p_rtu_pdata, p_cm_rtu->rtu_length );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+err:\r
+ /* Reject and abort the connection. */\r
+ al_cep_rej(\r
+ h_cm_rep.h_al, h_cm_rep.cid, IB_REJ_INSUF_QP, NULL, 0, NULL, 0 );\r
+\r
+ __cep_timewait_qp( h_cm_rep.h_qp );\r
+\r
+ cid = cl_atomic_xchg(\r
+ &((al_conn_qp_t*)h_cm_rep.h_qp)->cid, AL_INVALID_CID );\r
+\r
+ CL_ASSERT( cid == h_cm_rep.cid );\r
+\r
+ ref_al_obj( &h_cm_rep.h_qp->obj );\r
+ if( al_destroy_cep(\r
+ h_cm_rep.h_al, h_cm_rep.cid, deref_al_obj ) != IB_SUCCESS )\r
+ {\r
+ deref_al_obj( &h_cm_rep.h_qp->obj );\r
+ }\r
+\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("al_cep_rtu returned %s.\n", ib_get_err_str( status )) );\r
+ return status;\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+ib_cm_mra(\r
+ IN const ib_cm_handle_t h_cm,\r
+ IN const ib_cm_mra_t* const p_cm_mra )\r
+{\r
+ ib_api_status_t status;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( !p_cm_mra )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );\r
+ return IB_INVALID_PARAMETER;\r
+ }\r
+\r
+ status = al_cep_mra( h_cm.h_al, h_cm.cid, p_cm_mra );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE( AL_DBG_ERROR,\r
+ ("al_cep_mra returned %s\n", ib_get_err_str( status )) );\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+ib_cm_rej(\r
+ IN const ib_cm_handle_t h_cm,\r
+ IN const ib_cm_rej_t* const p_cm_rej )\r
+{\r
+ ib_api_status_t status;\r
+ net32_t cid;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( !p_cm_rej )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );\r
+ return IB_INVALID_PARAMETER;\r
+ }\r
+\r
+ status = al_cep_rej( h_cm.h_al, h_cm.cid, p_cm_rej->rej_status,\r
+ p_cm_rej->p_ari->data, p_cm_rej->ari_length,\r
+ p_cm_rej->p_rej_pdata, p_cm_rej->rej_length );\r
+\r
+ if( h_cm.h_qp )\r
+ {\r
+ __cep_timewait_qp( h_cm.h_qp );\r
+\r
+ cid = cl_atomic_xchg(\r
+ &((al_conn_qp_t*)h_cm.h_qp)->cid, AL_INVALID_CID );\r
+ if( cid != AL_INVALID_CID )\r
+ {\r
+ ref_al_obj( &h_cm.h_qp->obj );\r
+ if( al_destroy_cep( h_cm.h_al, h_cm.cid, deref_al_obj ) != IB_SUCCESS )\r
+ deref_al_obj( &h_cm.h_qp->obj );\r
+ }\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+ib_cm_dreq(\r
+ IN const ib_cm_dreq_t* const p_cm_dreq )\r
+{\r
+ ib_api_status_t status;\r
+ net32_t cid;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( !p_cm_dreq )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );\r
+ return IB_INVALID_PARAMETER;\r
+ }\r
+\r
+ /* Only supported qp types allowed */\r
+ switch( p_cm_dreq->qp_type )\r
+ {\r
+ default:\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("Invalid qp_type.\n") );\r
+ return IB_INVALID_SETTING;\r
+\r
+ case IB_QPT_RELIABLE_CONN:\r
+ case IB_QPT_UNRELIABLE_CONN:\r
+ if( AL_OBJ_INVALID_HANDLE( p_cm_dreq->h_qp, AL_OBJ_TYPE_H_QP ) ||\r
+ (p_cm_dreq->h_qp->type != p_cm_dreq->qp_type) )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_QP_HANDLE\n") );\r
+ return IB_INVALID_QP_HANDLE;\r
+ }\r
+ break;\r
+ }\r
+\r
+ /* Store the callback pointers. */\r
+ ((al_conn_qp_t*)p_cm_dreq->h_qp)->pfn_cm_drep_cb =\r
+ p_cm_dreq->pfn_cm_drep_cb;\r
+\r
+ status = al_cep_dreq( p_cm_dreq->h_qp->obj.h_al,\r
+ ((al_conn_qp_t*)p_cm_dreq->h_qp)->cid,\r
+ p_cm_dreq->p_dreq_pdata, p_cm_dreq->dreq_length );\r
+ switch( status )\r
+ {\r
+ case IB_INVALID_STATE:\r
+ case IB_INVALID_HANDLE:\r
+ case IB_INVALID_PARAMETER:\r
+ case IB_INVALID_SETTING:\r
+ /* Bad call - don't touch the QP. */\r
+ break;\r
+\r
+ case IB_SUCCESS:\r
+ /* Wait for the DREP or timeout. */\r
+ break;\r
+\r
+ default:\r
+ /*\r
+ * If we failed to send the DREQ, just release the connection. It's\r
+ * unreliable anyway. The local port may be down. Note that we could\r
+ * not send the DREQ, but we still could have received one. The DREQ\r
+ * will have a reference on the connection until the user calls\r
+ * ib_cm_drep.\r
+ */\r
+ __cep_timewait_qp( p_cm_dreq->h_qp );\r
+\r
+ cid = cl_atomic_xchg(\r
+ &((al_conn_qp_t*)p_cm_dreq->h_qp)->cid, AL_INVALID_CID );\r
+ ref_al_obj( &p_cm_dreq->h_qp->obj );\r
+ if( cid == AL_INVALID_CID || al_destroy_cep(\r
+ p_cm_dreq->h_qp->obj.h_al, cid, deref_al_obj ) != IB_SUCCESS )\r
+ {\r
+ deref_al_obj( &p_cm_dreq->h_qp->obj );\r
+ }\r
+ status = IB_SUCCESS;\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+\r
+ib_api_status_t\r
+ib_cm_drep(\r
+ IN const ib_cm_handle_t h_cm_dreq,\r
+ IN const ib_cm_drep_t* const p_cm_drep )\r
+{\r
+ ib_api_status_t status;\r
+ net32_t cid;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( !p_cm_drep )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );\r
+ return IB_INVALID_PARAMETER;\r
+ }\r
+\r
+ status = al_cep_drep( h_cm_dreq.h_al, h_cm_dreq.cid, p_cm_drep );\r
+ switch( status )\r
+ {\r
+ case IB_INVALID_SETTING:\r
+ case IB_INVALID_HANDLE:\r
+ case IB_INVALID_PARAMETER:\r
+ case IB_INVALID_STATE:\r
+ /* Bad call - don't touch the QP. */\r
+ break;\r
+\r
+ default:\r
+ /*\r
+ * Some other out-of-resource error - continue as if we succeeded in\r
+ * sending the DREP.\r
+ */\r
+ status = IB_SUCCESS;\r
+ /* Fall through */\r
+ case IB_SUCCESS:\r
+ __cep_timewait_qp( h_cm_dreq.h_qp );\r
+\r
+ cid = cl_atomic_xchg(\r
+ &((al_conn_qp_t*)h_cm_dreq.h_qp)->cid, AL_INVALID_CID );\r
+ if( cid != AL_INVALID_CID )\r
+ {\r
+ CL_ASSERT( cid == h_cm_dreq.cid );\r
+ ref_al_obj( &h_cm_dreq.h_qp->obj );\r
+ if( al_destroy_cep(\r
+ h_cm_dreq.h_al, h_cm_dreq.cid, deref_al_obj ) != IB_SUCCESS )\r
+ {\r
+ deref_al_obj( &h_cm_dreq.h_qp->obj );\r
+ }\r
+ }\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+ib_cm_lap(\r
+ IN const ib_cm_lap_t* const p_cm_lap )\r
+{\r
+ ib_api_status_t status;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( !p_cm_lap )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );\r
+ return IB_INVALID_PARAMETER;\r
+ }\r
+\r
+ /* Only supported qp types allowed */\r
+ switch( p_cm_lap->qp_type )\r
+ {\r
+ default:\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("Invalid qp_type.\n") );\r
+ return IB_INVALID_SETTING;\r
+\r
+ case IB_QPT_RELIABLE_CONN:\r
+ case IB_QPT_UNRELIABLE_CONN:\r
+ if( AL_OBJ_INVALID_HANDLE( p_cm_lap->h_qp, AL_OBJ_TYPE_H_QP ) ||\r
+ (p_cm_lap->h_qp->type != p_cm_lap->qp_type) )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_QP_HANDLE\n") );\r
+ return IB_INVALID_QP_HANDLE;\r
+ }\r
+ break;\r
+ }\r
+\r
+ status = al_cep_lap( p_cm_lap->h_qp->obj.h_al,\r
+ ((al_conn_qp_t*)p_cm_lap->h_qp)->cid, p_cm_lap );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE( AL_DBG_ERROR,\r
+ ("al_cep_lap returned %s.\n", ib_get_err_str( status )) );\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+ib_cm_apr(\r
+ IN const ib_cm_handle_t h_cm_lap,\r
+ IN const ib_cm_apr_t* const p_cm_apr )\r
+{\r
+ ib_api_status_t status;\r
+ ib_qp_mod_t qp_mod;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( !p_cm_apr )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );\r
+ return IB_INVALID_PARAMETER;\r
+ }\r
+\r
+ /* Only supported qp types allowed */\r
+ switch( p_cm_apr->qp_type )\r
+ {\r
+ default:\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("Invalid qp_type.\n") );\r
+ return IB_INVALID_SETTING;\r
+\r
+ case IB_QPT_RELIABLE_CONN:\r
+ case IB_QPT_UNRELIABLE_CONN:\r
+ if( AL_OBJ_INVALID_HANDLE( p_cm_apr->h_qp, AL_OBJ_TYPE_H_QP ) ||\r
+ (p_cm_apr->h_qp->type != p_cm_apr->qp_type) )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_QP_HANDLE\n") );\r
+ return IB_INVALID_QP_HANDLE;\r
+ }\r
+ break;\r
+ }\r
+\r
+ status = al_cep_pre_apr( h_cm_lap.h_al, h_cm_lap.cid, p_cm_apr, &qp_mod );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("al_cep_pre_apr returned %s.\n", ib_get_err_str( status )) );\r
+ return status;\r
+ }\r
+\r
+ /* Load alt path into QP */\r
+ status = ib_modify_qp( h_cm_lap.h_qp, &qp_mod );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("ib_modify_qp for LAP returned %s.\n",\r
+ ib_get_err_str( status )) );\r
+ return status;\r
+ }\r
+ \r
+ status = al_cep_send_apr( h_cm_lap.h_al, h_cm_lap.cid );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+ib_force_apm(\r
+ IN const ib_qp_handle_t h_qp )\r
+{\r
+ ib_api_status_t status;\r
+ al_conn_qp_t *p_conn_qp;\r
+ ib_qp_mod_t qp_mod;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( AL_OBJ_INVALID_HANDLE( h_qp, AL_OBJ_TYPE_H_QP ) )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_QP_HANDLE\n") );\r
+ return IB_INVALID_QP_HANDLE;\r
+ }\r
+\r
+ p_conn_qp = PARENT_STRUCT( h_qp, al_conn_qp_t, qp );\r
+ cl_memclr( &qp_mod, sizeof(ib_qp_mod_t) );\r
+ qp_mod.req_state = IB_QPS_RTS;\r
+ qp_mod.state.rts.apm_state = IB_APM_MIGRATED;\r
+ qp_mod.state.rts.opts = IB_MOD_QP_APM_STATE;\r
+\r
+ /* Set the QP to RTS. */\r
+ status = ib_modify_qp( h_qp, &qp_mod );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+}\r
+\r
+\r
+static void\r
+__destroying_listen(\r
+ IN al_obj_t* p_obj )\r
+{\r
+ ib_api_status_t status;\r
+ al_listen_t *p_listen;\r
+\r
+ p_listen = PARENT_STRUCT( p_obj, al_listen_t, obj );\r
+\r
+ /* Destroy the listen's CEP. */\r
+ status = al_destroy_cep(\r
+ p_obj->h_al, p_listen->cid, deref_al_obj );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE( AL_DBG_ERROR,\r
+ ("al_destroy_cep returned %s.\n", ib_get_err_str( status )) );\r
+ deref_al_obj( p_obj );\r
+ }\r
+}\r
+\r
+\r
+\r
+static void\r
+__free_listen(\r
+ IN al_obj_t* p_obj )\r
+{\r
+ destroy_al_obj( p_obj );\r
+ cl_free( PARENT_STRUCT( p_obj, al_listen_t, obj ) );\r
+}\r
+\r
+\r
+static ib_api_status_t\r
+__cep_listen(\r
+ IN const ib_al_handle_t h_al,\r
+ IN const ib_cm_listen_t* const p_cm_listen,\r
+ IN const ib_pfn_listen_err_cb_t pfn_listen_err_cb,\r
+ IN const void* const listen_context,\r
+ OUT ib_listen_handle_t* const ph_cm_listen )\r
+{\r
+ ib_api_status_t status;\r
+ al_listen_t *p_listen;\r
+ ib_cep_listen_t cep_listen;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ UNUSED_PARAM( pfn_listen_err_cb );\r
+\r
+ /* Allocate the listen object. */\r
+ p_listen = (al_listen_t*)cl_zalloc( sizeof(al_listen_t) );\r
+ if( !p_listen )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_INSUFFICIENT_MEMORY;\r
+ }\r
+\r
+ /* Copy the listen request information for matching incoming requests. */\r
+ p_listen->pfn_cm_req_cb = p_cm_listen->pfn_cm_req_cb;\r
+\r
+ /* valid for ud qp_type only */\r
+ p_listen->sidr_context = p_cm_listen->sidr_context;\r
+\r
+ construct_al_obj( &p_listen->obj, AL_OBJ_TYPE_H_LISTEN );\r
+ status = init_al_obj( &p_listen->obj, listen_context, TRUE,\r
+ __destroying_listen, NULL, __free_listen );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ __free_listen( &p_listen->obj );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return status;\r
+ }\r
+\r
+ /* Add the listen to the AL instance's object list. */\r
+ status = attach_al_obj( &h_al->obj, &p_listen->obj );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ p_listen->obj.pfn_destroy( &p_listen->obj, NULL );\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );\r
+ return status;\r
+ }\r
+\r
+ /* Create a CEP to listen on. */\r
+ status = al_create_cep( h_al, __cm_handler, p_listen, &p_listen->cid );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ p_listen->obj.pfn_destroy( &p_listen->obj, NULL );\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("al_create_cep returned %s.\n", ib_get_err_str(status)) );\r
+ return status;\r
+ }\r
+\r
+ cep_listen.cmp_len = p_cm_listen->compare_length;\r
+ cep_listen.cmp_offset = p_cm_listen->compare_offset;\r
+ cep_listen.p_cmp_buf = p_cm_listen->p_compare_buffer;\r
+ cep_listen.port_guid = p_cm_listen->port_guid;\r
+ cep_listen.svc_id = p_cm_listen->svc_id;\r
+\r
+ status = al_cep_listen( h_al, p_listen->cid, &cep_listen );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ p_listen->obj.pfn_destroy( &p_listen->obj, NULL );\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("al_cep_listen returned %s.\n", ib_get_err_str(status)) );\r
+ return status;\r
+ }\r
+\r
+ *ph_cm_listen = p_listen;\r
+\r
+ /* Note that we keep the reference held on behalf of the CEP. */\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_SUCCESS;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+ib_cm_listen(\r
+ IN const ib_al_handle_t h_al,\r
+ IN const ib_cm_listen_t* const p_cm_listen,\r
+ IN const ib_pfn_listen_err_cb_t pfn_listen_err_cb,\r
+ IN const void* const listen_context,\r
+ OUT ib_listen_handle_t* const ph_cm_listen )\r
+{\r
+ ib_api_status_t status;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( AL_OBJ_INVALID_HANDLE( h_al, AL_OBJ_TYPE_H_AL ) )\r
+ {\r
+ CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl, ("IB_INVALID_AL_HANDLE\n") );\r
+ return IB_INVALID_AL_HANDLE;\r
+ }\r
+ if( !p_cm_listen || !pfn_listen_err_cb || !ph_cm_listen )\r
+ {\r
+ CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl, ("IB_INVALID_PARAMETER\n") );\r
+ return IB_INVALID_PARAMETER;\r
+ }\r
+\r
+ status = __cep_listen(h_al, p_cm_listen, pfn_listen_err_cb, listen_context,\r
+ ph_cm_listen );\r
+\r
+ CL_EXIT( AL_DBG_CM, g_al_dbg_lvl );\r
+ return status;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+ib_cm_cancel(\r
+ IN const ib_listen_handle_t h_cm_listen,\r
+ IN const ib_pfn_destroy_cb_t pfn_destroy_cb OPTIONAL )\r
+{\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ if( AL_OBJ_INVALID_HANDLE( h_cm_listen, AL_OBJ_TYPE_H_LISTEN ) )\r
+ {\r
+ CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl, ("IB_INVALID_HANDLE\n") );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+\r
+ ref_al_obj( &h_cm_listen->obj );\r
+ h_cm_listen->obj.pfn_destroy( &h_cm_listen->obj, pfn_destroy_cb );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return IB_SUCCESS;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+ib_cm_handoff(\r
+ IN const ib_cm_handle_t h_cm_req,\r
+ IN const ib_net64_t svc_id )\r
+{\r
+ UNUSED_PARAM( h_cm_req );\r
+ UNUSED_PARAM( svc_id );\r
+ return IB_UNSUPPORTED;\r
+}\r
#define IS_CM_IOCTL(cmd) \\r
((cmd) > AL_CM_OPS_START && (cmd) < AL_CM_MAXOPS)\r
\r
+\r
+enum _ual_cep_ops\r
+{\r
+ al_cep_ops_start = al_ioc_maxops,\r
+ ual_create_cep,\r
+ ual_destroy_cep,\r
+ ual_cep_listen,\r
+ ual_cep_pre_req,\r
+ ual_cep_send_req,\r
+ ual_cep_pre_rep,\r
+ ual_cep_send_rep,\r
+ ual_cep_get_rtr,\r
+ ual_cep_get_rts,\r
+ ual_cep_rtu,\r
+ ual_cep_rej,\r
+ ual_cep_mra,\r
+ ual_cep_lap,\r
+ ual_cep_pre_apr,\r
+ ual_cep_send_apr,\r
+ ual_cep_dreq,\r
+ ual_cep_drep,\r
+ ual_cep_get_timewait,\r
+ ual_cep_get_event,\r
+ ual_cep_poll,\r
+\r
+ al_cep_maxops\r
+\r
+} ual_cep_ops_t;\r
+\r
+#define UAL_CEP_OPS_START IOCTL_CODE(ALDEV_KEY, al_cep_ops_start)\r
+#define UAL_CEP_MAXOPS IOCTL_CODE(ALDEV_KEY, al_cep_maxops)\r
+#define IS_CEP_IOCTL(cmd) \\r
+ ((cmd) > UAL_CEP_OPS_START && (cmd) < UAL_CEP_MAXOPS)\r
+\r
+\r
/* AL ioctls */\r
\r
typedef enum _al_dev_ops\r
{\r
- al_ops_start = al_cm_maxops,\r
+ al_ops_start = al_cep_maxops,\r
\r
ual_reg_shmid_cmd,\r
ual_get_ca_attr,\r
#define UAL_CM_APR IOCTL_CODE(ALDEV_KEY, ual_cm_apr_cmd)\r
#define UAL_CM_FORCE_APM IOCTL_CODE(ALDEV_KEY, ual_force_apm_cmd)\r
\r
+/* CEP Related IOCTL commands */\r
+#define UAL_CREATE_CEP IOCTL_CODE(ALDEV_KEY, ual_create_cep)\r
+#define UAL_DESTROY_CEP IOCTL_CODE(ALDEV_KEY, ual_destroy_cep)\r
+#define UAL_CEP_LISTEN IOCTL_CODE(ALDEV_KEY, ual_cep_listen)\r
+#define UAL_CEP_PRE_REQ IOCTL_CODE(ALDEV_KEY, ual_cep_pre_req)\r
+#define UAL_CEP_SEND_REQ IOCTL_CODE(ALDEV_KEY, ual_cep_send_req)\r
+#define UAL_CEP_PRE_REP IOCTL_CODE(ALDEV_KEY, ual_cep_pre_rep)\r
+#define UAL_CEP_SEND_REP IOCTL_CODE(ALDEV_KEY, ual_cep_send_rep)\r
+#define UAL_CEP_GET_RTR IOCTL_CODE(ALDEV_KEY, ual_cep_get_rtr)\r
+#define UAL_CEP_GET_RTS IOCTL_CODE(ALDEV_KEY, ual_cep_get_rts)\r
+#define UAL_CEP_RTU IOCTL_CODE(ALDEV_KEY, ual_cep_rtu)\r
+#define UAL_CEP_REJ IOCTL_CODE(ALDEV_KEY, ual_cep_rej)\r
+#define UAL_CEP_MRA IOCTL_CODE(ALDEV_KEY, ual_cep_mra)\r
+#define UAL_CEP_LAP IOCTL_CODE(ALDEV_KEY, ual_cep_lap)\r
+#define UAL_CEP_PRE_APR IOCTL_CODE(ALDEV_KEY, ual_cep_pre_apr)\r
+#define UAL_CEP_SEND_APR IOCTL_CODE(ALDEV_KEY, ual_cep_send_apr)\r
+#define UAL_CEP_DREQ IOCTL_CODE(ALDEV_KEY, ual_cep_dreq)\r
+#define UAL_CEP_DREP IOCTL_CODE(ALDEV_KEY, ual_cep_drep)\r
+#define UAL_CEP_GET_TIMEWAIT IOCTL_CODE(ALDEV_KEY, ual_cep_get_timewait)\r
+#define UAL_CEP_GET_EVENT IOCTL_CODE(ALDEV_KEY, ual_cep_get_event)\r
+#define UAL_CEP_POLL IOCTL_CODE(ALDEV_KEY, ual_cep_poll)\r
+\r
#define UAL_GET_CA_ATTR_INFO IOCTL_CODE(ALDEV_KEY, ual_get_ca_attr)\r
\r
/* PnP related ioctl commands. */\r
CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl, ("IB_INVALID_CA_HANDLE\n") );\r
return IB_INVALID_CA_HANDLE;\r
}\r
- if( !p_ioc_profile || ph_ioc )\r
+ if( !p_ioc_profile || !ph_ioc )\r
{\r
CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl, ("IB_INVALID_PARAMETER\n") );\r
return IB_INVALID_PARAMETER;\r
} ioc_state_t;\r
\r
\r
+#pragma warning(disable:4324)\r
typedef struct _al_ioc\r
{\r
al_obj_t obj; /* Child of ib_ca_t */\r
atomic32_t in_use_cnt;\r
\r
} al_ioc_t;\r
+#pragma warning(default:4324)\r
\r
\r
typedef struct _al_svc_entry\r
__send_timer_cb(\r
IN void *context );\r
\r
-static void\r
-__send_async_proc_cb(\r
- IN cl_async_proc_item_t *p_send_async_item );\r
-\r
static void\r
__check_send_queue(\r
IN ib_mad_svc_handle_t h_mad_svc );\r
\r
/* Construct the MAD service. */\r
construct_al_obj( &h_mad_svc->obj, AL_OBJ_TYPE_H_MAD_SVC );\r
- cl_async_proc_construct( &h_mad_svc->send_async_proc );\r
cl_timer_construct( &h_mad_svc->send_timer );\r
cl_timer_construct( &h_mad_svc->recv_timer );\r
cl_qlist_init( &h_mad_svc->send_list );\r
return ib_convert_cl_status( cl_status );\r
}\r
\r
- cl_status = cl_async_proc_init( &h_mad_svc->send_async_proc,\r
- 1, "MAD svc send timeout" );\r
- if( cl_status != CL_SUCCESS )\r
- {\r
- h_mad_svc->obj.pfn_destroy( &h_mad_svc->obj, NULL );\r
- return ib_convert_cl_status( cl_status );\r
- }\r
-\r
*ph_mad_svc = h_mad_svc;\r
\r
CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
ib_mad_send_handle_t h_send;\r
cl_list_item_t *p_list_item;\r
int32_t timeout_ms;\r
+#ifdef CL_KERNEL\r
+ KIRQL old_irql;\r
+#endif\r
\r
CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
CL_ASSERT( p_obj );\r
timeout_ms -= 10;\r
}\r
\r
- /*\r
- * Cancel all outstanding send requests. Stop the send timer to avoid\r
- * synchronizing with it.\r
- */\r
- cl_timer_stop( &h_mad_svc->send_timer );\r
- cl_async_proc_destroy( &h_mad_svc->send_async_proc );\r
- cl_timer_destroy( &h_mad_svc->send_timer );\r
-\r
/*\r
* Deregister from the MAD dispatcher. The MAD dispatcher holds\r
* a reference on the MAD service when invoking callbacks. Since we\r
if( h_mad_svc->h_mad_reg )\r
__mad_disp_dereg( h_mad_svc->h_mad_reg );\r
\r
+ /* Cancel all outstanding send requests. */\r
+ cl_spinlock_acquire( &h_mad_svc->obj.lock );\r
for( p_list_item = cl_qlist_head( &h_mad_svc->send_list );\r
p_list_item != cl_qlist_end( &h_mad_svc->send_list );\r
p_list_item = cl_qlist_next( p_list_item ) )\r
h_send = PARENT_STRUCT( p_list_item, al_mad_send_t, pool_item );\r
h_send->canceled = TRUE;\r
}\r
+ cl_spinlock_release( &h_mad_svc->obj.lock );\r
\r
/*\r
* Invoke the timer callback to return the canceled MADs to the user.\r
* Since the MAD service is being destroyed, the user cannot be issuing\r
* sends.\r
*/\r
+#ifdef CL_KERNEL\r
+ old_irql = KeRaiseIrqlToDpcLevel();\r
+#endif\r
__check_send_queue( h_mad_svc );\r
+#ifdef CL_KERNEL\r
+ KeLowerIrql( old_irql );\r
+#endif\r
+\r
+ cl_timer_destroy( &h_mad_svc->send_timer );\r
\r
#ifdef CL_KERNEL\r
/*\r
if( h_mad_svc->obj.h_al->p_context )\r
{\r
cl_qlist_t *p_cblist;\r
- cl_list_item_t *p_list_item;\r
al_proxy_cb_info_t *p_cb_info;\r
\r
cl_spinlock_acquire( &h_mad_svc->obj.h_al->p_context->cb_lock );\r
}\r
\r
\r
+ib_api_status_t\r
+ib_delay_mad(\r
+ IN const ib_mad_svc_handle_t h_mad_svc,\r
+ IN ib_mad_element_t* const p_mad_element,\r
+ IN const uint32_t delay_ms )\r
+{\r
+#ifdef CL_KERNEL\r
+ cl_list_item_t *p_list_item;\r
+ ib_mad_send_handle_t h_send;\r
+#endif\r
+\r
+ AL_ENTER( AL_DBG_MAD_SVC );\r
+\r
+ if( AL_OBJ_INVALID_HANDLE( h_mad_svc, AL_OBJ_TYPE_H_MAD_SVC ) )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_HANDLE\n") );\r
+ return IB_INVALID_HANDLE;\r
+ }\r
+ if( !p_mad_element )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );\r
+ return IB_INVALID_PARAMETER;\r
+ }\r
+\r
+#ifndef CL_KERNEL\r
+ UNUSED_PARAM( p_mad_element );\r
+ UNUSED_PARAM( delay_ms );\r
+ /* TODO: support for user-mode MAD QP's. */\r
+ AL_EXIT( AL_DBG_MAD_SVC );\r
+ return IB_UNSUPPORTED;\r
+#else\r
+ /* Search for the MAD in our MAD list. It may have already completed. */\r
+ cl_spinlock_acquire( &h_mad_svc->obj.lock );\r
+ p_list_item = cl_qlist_find_from_head( &h_mad_svc->send_list,\r
+ __mad_svc_find_send, p_mad_element );\r
+\r
+ if( !p_list_item )\r
+ {\r
+ cl_spinlock_release( &h_mad_svc->obj.lock );\r
+ AL_TRACE( AL_DBG_MAD_SVC, ("MAD not found\n") );\r
+ return IB_NOT_FOUND;\r
+ }\r
+\r
+ /* Mark the MAD as having been canceled. */\r
+ h_send = PARENT_STRUCT( p_list_item, al_mad_send_t, pool_item );\r
+\r
+ if( h_send->retry_time == MAX_TIME )\r
+ h_send->delay = delay_ms;\r
+ else\r
+ h_send->retry_time += ((uint64_t)delay_ms * 1000ULL);\r
+\r
+ cl_spinlock_release( &h_mad_svc->obj.lock );\r
+ AL_EXIT( AL_DBG_MAD_SVC );\r
+ return IB_SUCCESS;\r
+#endif\r
+}\r
+\r
\r
/*\r
* Process a send completion.\r
__set_retry_time(\r
IN ib_mad_send_handle_t h_send )\r
{\r
- h_send->retry_time = h_send->p_send_mad->timeout_ms * 1000 +\r
+ h_send->retry_time =\r
+ (uint64_t)(h_send->p_send_mad->timeout_ms + h_send->delay) * 1000ULL +\r
cl_get_time_stamp();\r
+ h_send->delay = 0;\r
}\r
\r
\r
__send_timer_cb(\r
IN void *context )\r
{\r
- ib_mad_svc_handle_t h_mad_svc;\r
-\r
CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
\r
- /*\r
- If we haven't already queued the asynchronous processing item to\r
- check the send queue, do so now.\r
- */\r
- h_mad_svc = (ib_mad_svc_handle_t)context;\r
- cl_spinlock_acquire( &h_mad_svc->obj.lock );\r
-\r
- /*\r
- See if the asynchronous processing item is in use. If it is already\r
- in use, it means that we're about to check the send queue anyway, so\r
- just ignore the timer. Also, don't bother scheduling if the object\r
- state is not CL_INITIALIZED; we may be destroying the MAD service.\r
- */\r
- if( !h_mad_svc->send_async_item.pfn_callback &&\r
- ( h_mad_svc->obj.state == CL_INITIALIZED ) )\r
- {\r
- /* Not in use, reference the service and queue the callback. */\r
- cl_atomic_inc( &h_mad_svc->ref_cnt );\r
- h_mad_svc->send_async_item.pfn_callback = __send_async_proc_cb;\r
- cl_async_proc_queue( &h_mad_svc->send_async_proc,\r
- &h_mad_svc->send_async_item );\r
- }\r
-\r
- cl_spinlock_release( &h_mad_svc->obj.lock );\r
-\r
- CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
-}\r
-\r
-\r
-\r
-static void\r
-__send_async_proc_cb(\r
- IN cl_async_proc_item_t *p_send_async_item )\r
-{\r
- ib_mad_svc_handle_t h_mad_svc;\r
-\r
- CL_ENTER( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
-\r
- h_mad_svc = PARENT_STRUCT( p_send_async_item, al_mad_svc_t,\r
- send_async_item );\r
-\r
- cl_spinlock_acquire( &h_mad_svc->obj.lock );\r
-\r
- /*\r
- * Don't bother processing if the object state is not\r
- * CL_INITIALIZED; we may be destroying the MAD service.\r
- */\r
- if( h_mad_svc->obj.state != CL_INITIALIZED )\r
- {\r
- cl_spinlock_release( &h_mad_svc->obj.lock );\r
- cl_atomic_dec( &h_mad_svc->ref_cnt );\r
- return;\r
- }\r
-\r
- /* The send_async_item is available for use again. */\r
- h_mad_svc->send_async_item.pfn_callback = NULL;\r
-\r
- cl_spinlock_release( &h_mad_svc->obj.lock );\r
-\r
- __check_send_queue( h_mad_svc );\r
+ __check_send_queue( (ib_mad_svc_handle_t)context );\r
\r
- /* Release the reference held during async processing. */\r
- cl_atomic_dec( &h_mad_svc->ref_cnt );\r
CL_EXIT( AL_DBG_MAD_SVC, g_al_dbg_lvl );\r
}\r
\r
ib_pfn_mad_comp_cb_t pfn_user_send_cb;\r
ib_pfn_mad_comp_cb_t pfn_user_recv_cb;\r
\r
- cl_async_proc_t send_async_proc;\r
- cl_async_proc_item_t send_async_item;\r
cl_qlist_t send_list;\r
cl_timer_t send_timer;\r
\r
IN al_mad_wr_t* const p_mad_wr );\r
\r
\r
+ib_api_status_t\r
+ib_delay_mad(\r
+ IN const ib_mad_svc_handle_t h_mad_svc,\r
+ IN ib_mad_element_t* const p_mad_element,\r
+ IN const uint32_t delay_ms );\r
+\r
\r
#endif /* __IB_AL_MAD_H__ */\r
/* Absolute time that the request should be retried. */\r
uint64_t retry_time;\r
\r
+ /* Delay, in milliseconds, to add before the next retry. */\r
+ uint32_t delay;\r
+\r
/* Number of times that the request can be retried. */\r
uint32_t retry_cnt;\r
boolean_t canceled; /* indicates if send was canceled */\r
\r
ref_al_obj( &h_mcast->obj );\r
status = al_send_sa_req(\r
- &h_mcast->sa_dereg_req, h_mcast->port_guid, 500, 0, &sa_mad_data );\r
+ &h_mcast->sa_dereg_req, h_mcast->port_guid, 500, 0, &sa_mad_data, 0 );\r
if( status != IB_SUCCESS )\r
deref_al_obj( &h_mcast->obj );\r
\r
\r
p_mcast->state = SA_REG_STARTING;\r
status = al_send_sa_req( &p_mcast->sa_reg_req, p_mcast->port_guid,\r
- p_mcast_req->timeout_ms, p_mcast_req->retry_cnt, &sa_mad_data );\r
+ p_mcast_req->timeout_ms, p_mcast_req->retry_cnt, &sa_mad_data, 0 );\r
\r
CL_EXIT( AL_DBG_MCAST, g_al_dbg_lvl );\r
return status;\r
IN cl_ioctl_handle_t h_ioctl,\r
OUT size_t *p_ret_bytes );\r
\r
+cl_status_t cep_ioctl(\r
+ IN cl_ioctl_handle_t h_ioctl,\r
+ OUT size_t *p_ret_bytes );\r
+\r
cl_status_t ioc_ioctl(\r
IN cl_ioctl_handle_t h_ioctl,\r
OUT size_t *p_ret_bytes );\r
#include "al.h"\r
#include "al_av.h"\r
#include "al_ca.h"\r
-#include "al_cm_shared.h"\r
+#include "al_cm_cep.h"\r
#include "al_cq.h"\r
#include "al_debug.h"\r
#include "al_mad.h"\r
}\r
\r
\r
-/*\r
-static ib_api_status_t\r
-al_bad_leave_mcast(\r
- IN const ib_mcast_handle_t h_mcast )\r
-{\r
- UNUSED_PARAM( h_mcast );\r
- return IB_INVALID_PARAMETER;\r
-}\r
-*/\r
-\r
-\r
-static ib_api_status_t\r
-al_bad_cm_call(\r
- IN OUT al_conn_t* const p_conn )\r
-{\r
- UNUSED_PARAM( p_conn );\r
- return IB_INVALID_PARAMETER;\r
-}\r
-\r
-\r
-static ib_api_status_t\r
-al_bad_cm_pre_rep(\r
- IN OUT al_conn_t* const p_conn,\r
- IN OUT const ib_cm_rep_t* p_cm_rep )\r
-{\r
- UNUSED_PARAM( p_conn );\r
- UNUSED_PARAM( p_cm_rep );\r
- return IB_INVALID_PARAMETER;\r
-}\r
-\r
-\r
ib_api_status_t\r
init_base_qp(\r
IN ib_qp_t* const p_qp,\r
cq_attach_qp( h_qp->h_recv_cq, &h_qp->recv_cq_rel );\r
cq_attach_qp( h_qp->h_send_cq, &h_qp->send_cq_rel );\r
\r
- /*\r
- * Get the QP attributes. This works around a bug with create QP calls\r
- * above not reporting the correct attributes.\r
- */\r
-// ib_query_qp( h_qp, &qp_attr );\r
h_qp->num = qp_attr.num;\r
\r
return IB_SUCCESS;\r
/* Initialize the inherited QP first. */\r
status = init_raw_qp( &p_conn_qp->qp, h_pd, UNBOUND_PORT_GUID,\r
p_qp_create, p_umv_buf );\r
- if( status != IB_SUCCESS )\r
- {\r
- return status;\r
- }\r
\r
- return IB_SUCCESS;\r
+ p_conn_qp->cid = AL_INVALID_CID;\r
+\r
+ return status;\r
}\r
\r
\r
ib_qp_handle_t h_qp;\r
al_mad_qp_t *p_mad_qp;\r
al_qp_alias_t *p_qp_alias;\r
- al_conn_qp_t *p_conn_qp;\r
+ net32_t cid;\r
\r
CL_ASSERT( p_obj );\r
h_qp = PARENT_STRUCT( p_obj, ib_qp_t, obj );\r
\r
case IB_QPT_RELIABLE_CONN:\r
case IB_QPT_UNRELIABLE_CONN:\r
- p_conn_qp = PARENT_STRUCT( h_qp, al_conn_qp_t, qp);\r
+ cid = cl_atomic_xchg(\r
+ &((al_conn_qp_t*)h_qp)->cid, AL_INVALID_CID );\r
+ if( cid != AL_INVALID_CID )\r
+ {\r
+ ref_al_obj( &h_qp->obj );\r
+ if( al_destroy_cep(\r
+ h_qp->obj.h_al, cid, deref_al_obj ) != IB_SUCCESS )\r
+ {\r
+ deref_al_obj( &h_qp->obj );\r
+ }\r
+ }\r
\r
- /* Disconnect the QP. */\r
- cm_conn_destroy( p_conn_qp );\r
/* Fall through. */\r
-\r
case IB_QPT_UNRELIABLE_DGRM:\r
default:\r
/* Multicast membership gets cleaned up by object hierarchy. */\r
CL_ASSERT( p_event_rec );\r
h_qp = (ib_qp_handle_t)p_event_rec->context;\r
\r
+#if defined(CL_KERNEL)\r
+ switch( p_event_rec->code )\r
+ {\r
+ case IB_AE_QP_COMM:\r
+ al_cep_established( h_qp->obj.h_al, ((al_conn_qp_t*)h_qp)->cid );\r
+ break;\r
+\r
+ case IB_AE_QP_APM:\r
+ al_cep_migrate( h_qp->obj.h_al, ((al_conn_qp_t*)h_qp)->cid );\r
+ break;\r
+\r
+ case IB_AE_QP_APM_ERROR:\r
+ //***TODO: Figure out how to handle these errors.\r
+ break;\r
+\r
+ default:\r
+ break;\r
+ }\r
+#endif\r
+\r
p_event_rec->context = (void*)h_qp->obj.context;\r
p_event_rec->handle.h_qp = h_qp;\r
\r
CL_EXIT( AL_DBG_MW, g_al_dbg_lvl );\r
return status;\r
}\r
-\r
-\r
-ib_al_handle_t\r
-qp_get_al(\r
- IN const ib_qp_handle_t h_qp )\r
-{\r
- /* AL the is great-grandparent of the QP. */\r
- return (ib_al_handle_t)\r
- h_qp->obj.p_parent_obj->p_parent_obj->p_parent_obj;\r
-}\r
\r
ib_cm_handle_t p_conn;\r
\r
+ atomic32_t cid;\r
+\r
+ /* Callback table. */\r
+ ib_pfn_cm_req_cb_t pfn_cm_req_cb;\r
+ ib_pfn_cm_rep_cb_t pfn_cm_rep_cb;\r
+ ib_pfn_cm_mra_cb_t pfn_cm_mra_cb;\r
+ ib_pfn_cm_rtu_cb_t pfn_cm_rtu_cb;\r
+ ib_pfn_cm_lap_cb_t pfn_cm_lap_cb;\r
+ ib_pfn_cm_apr_cb_t pfn_cm_apr_cb;\r
+ ib_pfn_cm_dreq_cb_t pfn_cm_dreq_cb;\r
+ ib_pfn_cm_drep_cb_t pfn_cm_drep_cb;\r
+ ib_pfn_cm_rej_cb_t pfn_cm_rej_cb; /* If RTU times out */\r
+\r
+\r
} al_conn_qp_t;\r
\r
\r
\r
\r
/* Return the AL instance associated with this QP. */\r
-ib_al_handle_t\r
+static inline ib_al_handle_t\r
qp_get_al(\r
- IN const ib_qp_handle_t h_qp );\r
+ IN const ib_qp_handle_t h_qp )\r
+{\r
+ return h_qp->obj.h_al;\r
+}\r
\r
\r
#endif /* __AL_QP_H__ */\r
static ib_api_status_t\r
query_sa(\r
IN al_query_t *p_query,\r
- IN const ib_query_req_t* const p_query_req );\r
+ IN const ib_query_req_t* const p_query_req,\r
+ IN const ib_al_flags_t flags );\r
\r
void\r
query_req_cb(\r
IN al_sa_req_t *p_sa_req,\r
IN ib_mad_element_t *p_mad_response );\r
\r
-static void\r
-__free_query(\r
- IN OUT al_query_t *p_query );\r
-\r
-\r
\r
ib_api_status_t\r
ib_query(\r
{\r
al_query_t *p_query;\r
ib_api_status_t status;\r
- cl_status_t cl_status;\r
- boolean_t sync;\r
\r
CL_ENTER( AL_DBG_QUERY, g_al_dbg_lvl );\r
\r
return IB_INSUFFICIENT_MEMORY;\r
}\r
\r
- /* Check for synchronous operation. */\r
- p_query->flags = p_query_req->flags;\r
- cl_event_construct( &p_query->event );\r
- sync = ( (p_query->flags & IB_FLAGS_SYNC) == IB_FLAGS_SYNC );\r
- if( sync )\r
- {\r
- cl_status = cl_event_init( &p_query->event, TRUE );\r
- if( cl_status != CL_SUCCESS )\r
- {\r
- status = ib_convert_cl_status( cl_status );\r
- CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,\r
- ("cl_init_event failed: %s\n", ib_get_err_str(status) ) );\r
- __free_query( p_query );\r
- return status;\r
- }\r
- }\r
-\r
/* Copy the query context information. */\r
p_query->sa_req.pfn_sa_req_cb = query_req_cb;\r
p_query->sa_req.user_context = p_query_req->query_context;\r
/* Track the query with the AL instance. */\r
al_insert_query( h_al, p_query );\r
\r
+ /*\r
+ * Set the query handle now so that users that do sync queries\r
+ * can also cancel the queries.\r
+ */\r
+ if( ph_query )\r
+ *ph_query = p_query;\r
+\r
/* Issue the MAD to the SA. */\r
- status = query_sa( p_query, (ib_query_req_t*)p_query_req );\r
- if( status == IB_SUCCESS )\r
- {\r
- /*\r
- * Set the query handle now so that users that do sync queries\r
- * can also cancel the queries.\r
- */\r
- if( ph_query )\r
- *ph_query = p_query;\r
- /* If synchronous, wait for the completion. */\r
- if( sync )\r
- {\r
- do\r
- {\r
- cl_status = cl_event_wait_on(\r
- &p_query->event, EVENT_NO_TIMEOUT, AL_WAIT_ALERTABLE );\r
- } while( cl_status == CL_NOT_DONE );\r
- CL_ASSERT( cl_status == CL_SUCCESS );\r
- }\r
- }\r
- else if( status != IB_INVALID_GUID )\r
+ status = query_sa( p_query, p_query_req, p_query_req->flags );\r
+ if( status != IB_SUCCESS && status != IB_INVALID_GUID )\r
{\r
CL_TRACE( AL_DBG_ERROR, g_al_dbg_lvl,\r
("query_sa failed: %s\n", ib_get_err_str(status) ) );\r
}\r
\r
/* Cleanup from issuing the query if it failed or was synchronous. */\r
- if( ( status != IB_SUCCESS ) || sync )\r
+ if( status != IB_SUCCESS )\r
{\r
al_remove_query( p_query );\r
- __free_query( p_query );\r
+ cl_free( p_query );\r
}\r
\r
CL_EXIT( AL_DBG_QUERY, g_al_dbg_lvl );\r
static ib_api_status_t\r
query_sa(\r
IN al_query_t *p_query,\r
- IN const ib_query_req_t* const p_query_req )\r
+ IN const ib_query_req_t* const p_query_req,\r
+ IN const ib_al_flags_t flags )\r
{\r
ib_user_query_t sa_req, *p_sa_req;\r
union _query_sa_recs\r
\r
status = al_send_sa_req(\r
&p_query->sa_req, p_query_req->port_guid, p_query_req->timeout_ms,\r
- p_query_req->retry_cnt, p_sa_req );\r
+ p_query_req->retry_cnt, p_sa_req, flags );\r
CL_EXIT( AL_DBG_QUERY, g_al_dbg_lvl );\r
return status;\r
}\r
/* Notify the user of the result. */\r
p_query->pfn_query_cb( &query_rec );\r
\r
- /* Check for synchronous operation. */\r
- if( (p_query->flags & IB_FLAGS_SYNC) == IB_FLAGS_SYNC )\r
- {\r
- cl_event_signal( &p_query->event );\r
- }\r
- else\r
- {\r
- /* Cleanup from issuing the query. */\r
- al_remove_query( p_query );\r
- __free_query( p_query );\r
- }\r
+ /* Cleanup from issuing the query. */\r
+ al_remove_query( p_query );\r
+ cl_free( p_query );\r
\r
CL_EXIT( AL_DBG_QUERY, g_al_dbg_lvl );\r
}\r
-\r
-\r
-\r
-static void\r
-__free_query(\r
- IN OUT al_query_t *p_query )\r
-{\r
- CL_ASSERT( p_query );\r
-\r
- cl_event_destroy( &p_query->event );\r
- cl_free( p_query );\r
-}\r
sa_req_svc_t *p_sa_req_svc; /* For cancellation */\r
ib_mad_element_t *p_mad_response;\r
ib_mad_element_t *p_mad_request; /* For cancellation */\r
+ KEVENT *p_sync_event;\r
#else /* defined( CL_KERNEL ) */\r
uint64_t hdl;\r
ual_send_sa_req_ioctl_t ioctl;\r
{\r
al_sa_req_t sa_req; /* Must be first. */\r
\r
- /* Used to perform synchronous requests. */\r
- ib_al_flags_t flags;\r
- cl_event_t event;\r
-\r
ib_al_handle_t h_al;\r
ib_pfn_query_cb_t pfn_query_cb;\r
ib_query_type_t query_type;\r
IN const net64_t port_guid,\r
IN const uint32_t timeout_ms,\r
IN const uint32_t retry_cnt,\r
- IN const ib_user_query_t* const p_sa_req_data );\r
+ IN const ib_user_query_t* const p_sa_req_data,\r
+ IN const ib_al_flags_t flags );\r
\r
#if defined( CL_KERNEL )\r
static __inline void\r
{\r
ib_reg_svc_handle_t h_reg_svc;\r
\r
- h_reg_svc = PARENT_STRUCT ( p_sa_req, al_reg_svc_t, sa_req );\r
+ /*\r
+ * Note that we come into this callback with a reference\r
+ * on the registration object.\r
+ */\r
+ h_reg_svc = PARENT_STRUCT( p_sa_req, al_reg_svc_t, sa_req );\r
\r
if( p_mad_response )\r
ib_put_mad( p_mad_response );\r
\r
- deref_al_obj( &h_reg_svc->obj );\r
+ h_reg_svc->obj.pfn_destroy( &h_reg_svc->obj, NULL );\r
}\r
\r
\r
sa_mad_data.comp_mask = ~CL_CONST64(0);\r
\r
if( al_send_sa_req( &h_reg_svc->sa_req, h_reg_svc->port_guid,\r
- 500, 0, &sa_mad_data ) != IB_SUCCESS )\r
+ 500, 0, &sa_mad_data, 0 ) != IB_SUCCESS )\r
{\r
/* Cleanup from the registration. */\r
deref_al_obj( &h_reg_svc->obj );\r
\r
h_reg_svc->pfn_reg_svc_cb( ®_svc_rec );\r
\r
- /* Check for synchronous operation. */\r
- if( (h_reg_svc->flags & IB_FLAGS_SYNC) == IB_FLAGS_SYNC )\r
- cl_event_signal( &h_reg_svc->event );\r
-\r
- /* Release the reference taken when issuing the request. */\r
- deref_al_obj( &h_reg_svc->obj );\r
+ if( p_sa_req->status != IB_SUCCESS )\r
+ {\r
+ h_reg_svc->obj.pfn_destroy( &h_reg_svc->obj, NULL );\r
+ }\r
+ else\r
+ {\r
+ /* Release the reference taken when issuing the request. */\r
+ deref_al_obj( &h_reg_svc->obj );\r
+ }\r
}\r
\r
\r
h_sa_reg = PARENT_STRUCT( p_obj, al_reg_svc_t, obj );\r
\r
destroy_al_obj( p_obj );\r
- cl_event_destroy( &h_sa_reg->event );\r
cl_free( h_sa_reg );\r
\r
AL_EXIT( AL_DBG_SA_REQ );\r
IN const ib_reg_svc_req_t* const p_reg_svc_req )\r
{\r
ib_user_query_t sa_mad_data;\r
- ib_api_status_t status;\r
\r
/* Set the request information. */\r
h_reg_svc->sa_req.pfn_sa_req_cb = reg_svc_req_cb;\r
sa_mad_data.comp_mask = p_reg_svc_req->svc_data_mask;\r
sa_mad_data.p_attr = &h_reg_svc->svc_rec;\r
\r
- status = al_send_sa_req( &h_reg_svc->sa_req, h_reg_svc->port_guid,\r
- p_reg_svc_req->timeout_ms, p_reg_svc_req->retry_cnt, &sa_mad_data );\r
- return status;\r
+ return al_send_sa_req( &h_reg_svc->sa_req, h_reg_svc->port_guid,\r
+ p_reg_svc_req->timeout_ms, p_reg_svc_req->retry_cnt, &sa_mad_data,\r
+ p_reg_svc_req->flags );\r
}\r
\r
\r
{\r
ib_reg_svc_handle_t h_sa_reg = NULL;\r
ib_api_status_t status;\r
- cl_status_t cl_status;\r
\r
AL_ENTER( AL_DBG_SA_REQ );\r
\r
return IB_INSUFFICIENT_MEMORY;\r
}\r
\r
- h_sa_reg->flags = p_reg_svc_req->flags;\r
- cl_event_construct( &h_sa_reg->event );\r
construct_al_obj( &h_sa_reg->obj, AL_OBJ_TYPE_H_SA_REG );\r
\r
status = init_al_obj( &h_sa_reg->obj, p_reg_svc_req->svc_context, TRUE,\r
return status;\r
}\r
\r
- /* Check for synchronous operation. */\r
- if( h_sa_reg->flags & IB_FLAGS_SYNC )\r
- {\r
- cl_status = cl_event_init( &h_sa_reg->event, TRUE );\r
- if( cl_status != CL_SUCCESS )\r
- {\r
- status = ib_convert_cl_status( cl_status );\r
- AL_TRACE_EXIT( AL_DBG_ERROR,\r
- ("cl_init_event failed: %s\n", ib_get_err_str(status)) );\r
- h_sa_reg->obj.pfn_destroy( &h_sa_reg->obj, NULL );\r
- return status;\r
- }\r
- }\r
-\r
/* Store the port GUID on which to issue the request. */\r
h_sa_reg->port_guid = p_reg_svc_req->port_guid;\r
\r
\r
/* Issue the MAD to the SA. */\r
status = sa_reg_svc( h_sa_reg, p_reg_svc_req );\r
- if( status == IB_SUCCESS )\r
- {\r
- /* If synchronous, wait for the completion. */\r
- if( h_sa_reg->flags & IB_FLAGS_SYNC )\r
- {\r
- do\r
- {\r
- cl_status = cl_event_wait_on(\r
- &h_sa_reg->event, EVENT_NO_TIMEOUT, AL_WAIT_ALERTABLE );\r
- } while( cl_status == CL_NOT_DONE );\r
- CL_ASSERT( cl_status == CL_SUCCESS );\r
-\r
- /* Cleanup from issuing the request if it failed. */\r
- if( h_sa_reg->state == SA_REG_ERROR )\r
- {\r
- status = h_sa_reg->req_status;\r
- /* The callback released the reference from init_al_obj. */\r
- ref_al_obj( &h_sa_reg->obj );\r
- }\r
- }\r
- }\r
- else\r
+ if( status != IB_SUCCESS )\r
{\r
AL_TRACE( AL_DBG_ERROR,\r
("sa_reg_svc failed: %s\n", ib_get_err_str(status) ) );\r
h_sa_reg->state = SA_REG_ERROR;\r
- }\r
\r
- if( status != IB_SUCCESS )\r
h_sa_reg->obj.pfn_destroy( &h_sa_reg->obj, NULL );\r
+ }\r
else\r
+ {\r
*ph_reg_svc = h_sa_reg;\r
+ }\r
\r
AL_EXIT( AL_DBG_SA_REQ );\r
return status;\r
/* Additional status information returned in the registration response. */\r
ib_net16_t resp_status;\r
\r
- /* Used to perform synchronous requests. */\r
- ib_al_flags_t flags;\r
- cl_event_t event;\r
-\r
al_sa_reg_state_t state;\r
ib_pfn_reg_svc_cb_t pfn_reg_svc_cb;\r
\r
SOURCES= ibal.rc \\r
al_ca_pnp.c \\r
al_ci_ca.c \\r
- al_cm.c \\r
- al_cm_conn.c \\r
- al_cm_sidr.c \\r
+ al_cm_cep.c \\r
al_dev.c \\r
al_driver.c \\r
al_ioc_pnp.c \\r
al_mr.c \\r
al_pnp.c \\r
al_proxy.c \\r
- al_proxy_cm.c \\r
+ al_proxy_cep.c \\r
al_proxy_ioc.c \\r
al_proxy_subnet.c \\r
al_proxy_verbs.c \\r
..\al_av.c \\r
..\al_ca.c \\r
..\al_ci_ca_shared.c \\r
- ..\al_cm_shared.c \\r
+ ..\al_cm_qp.c \\r
..\al_common.c \\r
..\al_cq.c \\r
..\al_dm.c \\r
--- /dev/null
+/*\r
+ * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.\r
+ *\r
+ * This software is available to you under the OpenIB.org BSD license\r
+ * below:\r
+ *\r
+ * Redistribution and use in source and binary forms, with or\r
+ * without modification, are permitted provided that the following\r
+ * conditions are met:\r
+ *\r
+ * - Redistributions of source code must retain the above\r
+ * copyright notice, this list of conditions and the following\r
+ * disclaimer.\r
+ *\r
+ * - Redistributions in binary form must reproduce the above\r
+ * copyright notice, this list of conditions and the following\r
+ * disclaimer in the documentation and/or other materials\r
+ * provided with the distribution.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+ * SOFTWARE.\r
+ *\r
+ * $Id$\r
+ */\r
+\r
+\r
+#include <iba/ib_al.h>\r
+#include <complib/cl_vector.h>\r
+#include <complib/cl_rbmap.h>\r
+#include <complib/cl_qmap.h>\r
+#include <complib/cl_spinlock.h>\r
+#include "al_common.h"\r
+#include "al_cm_cep.h"\r
+#include "al_cm_conn.h"\r
+#include "al_cm_sidr.h"\r
+#include "al_debug.h"\r
+#include "ib_common.h"\r
+#include "al_mgr.h"\r
+#include "al_ca.h"\r
+#include "al.h"\r
+#include "al_mad.h"\r
+#include "al_qp.h"\r
+\r
+\r
+/*\r
+ * The vector object uses a list item at the front of the buffers\r
+ * it allocates. Take the list item into account so that allocations\r
+ * are for full page sizes.\r
+ */\r
+#define CEP_CID_MIN \\r
+ ((PAGE_SIZE - sizeof(cl_list_item_t)) / sizeof(cep_cid_t))\r
+#define CEP_CID_GROW \\r
+ ((PAGE_SIZE - sizeof(cl_list_item_t)) / sizeof(cep_cid_t))\r
+\r
+/*\r
+ * We reserve the upper byte of the connection ID as a revolving counter so\r
+ * that connections that are retried by the client change connection ID.\r
+ * This counter is never zero, so it is OK to use all CIDs since we will never\r
+ * have a full CID (base + counter) that is zero.\r
+ * See the IB spec, section 12.9.8.7 for details about REJ retry.\r
+ */\r
+#define CEP_MAX_CID (0x00FFFFFF)\r
+#define CEP_MAX_CID_MASK (0x00FFFFFF)\r
+\r
+#define CEP_MAD_SQ_DEPTH (128)\r
+#define CEP_MAD_RQ_DEPTH (1) /* ignored. */\r
+#define CEP_MAD_SQ_SGE (1)\r
+#define CEP_MAD_RQ_SGE (1) /* ignored. */\r
+\r
+\r
+/* Global connection manager object. */\r
+typedef struct _al_cep_mgr\r
+{\r
+ al_obj_t obj;\r
+\r
+ cl_qmap_t port_map;\r
+\r
+ KSPIN_LOCK lock;\r
+\r
+ /* Bitmap of CEPs, indexed by CID. */\r
+ cl_vector_t cid_vector;\r
+ uint32_t free_cid;\r
+\r
+ /* List of active listens. */\r
+ cl_rbmap_t listen_map;\r
+\r
+ /* Map of CEP by remote CID and CA GUID. */\r
+ cl_rbmap_t conn_id_map;\r
+ /* Map of CEP by remote QPN, used for stale connection matching. */\r
+ cl_rbmap_t conn_qp_map;\r
+\r
+ NPAGED_LOOKASIDE_LIST cep_pool;\r
+ NPAGED_LOOKASIDE_LIST req_pool;\r
+\r
+ /*\r
+ * Periodically walk the list of connections in the time wait state\r
+ * and flush them as appropriate.\r
+ */\r
+ cl_timer_t timewait_timer;\r
+ cl_qlist_t timewait_list;\r
+\r
+ ib_pnp_handle_t h_pnp;\r
+\r
+} al_cep_mgr_t;\r
+\r
+\r
+/* Per-port CM object. */\r
+typedef struct _cep_port_agent\r
+{\r
+ al_obj_t obj;\r
+\r
+ cl_map_item_t item;\r
+\r
+ ib_ca_handle_t h_ca;\r
+ ib_pd_handle_t h_pd;\r
+ ib_qp_handle_t h_qp;\r
+ ib_pool_key_t pool_key;\r
+ ib_mad_svc_handle_t h_mad_svc;\r
+\r
+ net64_t port_guid;\r
+ uint8_t port_num;\r
+ net16_t base_lid;\r
+\r
+} cep_agent_t;\r
+\r
+\r
+/*\r
+ * Note: the REQ, REP, and LAP values must be 1, 2, and 4 respectively.\r
+ * This allows shifting 1 << msg_mraed from an MRA to figure out for what\r
+ * message the MRA was sent for.\r
+ */\r
+#define CEP_STATE_RCVD 0x10000000\r
+#define CEP_STATE_SENT 0x20000000\r
+#define CEP_STATE_MRA 0x01000000\r
+#define CEP_STATE_REQ 0x00000001\r
+#define CEP_STATE_REP 0x00000002\r
+#define CEP_STATE_LAP 0x00000004\r
+#define CEP_STATE_RTU 0x00000008\r
+#define CEP_STATE_DREQ 0x00000010\r
+#define CEP_STATE_DREP 0x00000020\r
+#define CEP_STATE_DESTROYING 0x00010000\r
+#define CEP_STATE_USER 0x00020000\r
+\r
+#define CEP_MSG_MASK 0x000000FF\r
+#define CEP_OP_MASK 0xF0000000\r
+\r
+#define CEP_STATE_PREP 0x00100000\r
+\r
+/* States match CM state transition diagrams from spec. */\r
+typedef enum _cep_state\r
+{\r
+ CEP_STATE_IDLE,\r
+ CEP_STATE_LISTEN,\r
+ CEP_STATE_ESTABLISHED,\r
+ CEP_STATE_TIMEWAIT,\r
+ CEP_STATE_SREQ_SENT,\r
+ CEP_STATE_SREQ_RCVD,\r
+ CEP_STATE_ERROR,\r
+ CEP_STATE_DESTROY = CEP_STATE_DESTROYING,\r
+ CEP_STATE_PRE_REQ = CEP_STATE_IDLE | CEP_STATE_PREP,\r
+ CEP_STATE_REQ_RCVD = CEP_STATE_REQ | CEP_STATE_RCVD,\r
+ CEP_STATE_PRE_REP = CEP_STATE_REQ_RCVD | CEP_STATE_PREP,\r
+ CEP_STATE_REQ_SENT = CEP_STATE_REQ | CEP_STATE_SENT,\r
+ CEP_STATE_REQ_MRA_RCVD = CEP_STATE_REQ_SENT | CEP_STATE_MRA,\r
+ CEP_STATE_REQ_MRA_SENT = CEP_STATE_REQ_RCVD | CEP_STATE_MRA,\r
+ CEP_STATE_PRE_REP_MRA_SENT = CEP_STATE_REQ_MRA_SENT | CEP_STATE_PREP,\r
+ CEP_STATE_REP_RCVD = CEP_STATE_REP | CEP_STATE_RCVD,\r
+ CEP_STATE_REP_SENT = CEP_STATE_REP | CEP_STATE_SENT,\r
+ CEP_STATE_REP_MRA_RCVD = CEP_STATE_REP_SENT | CEP_STATE_MRA,\r
+ CEP_STATE_REP_MRA_SENT = CEP_STATE_REP_RCVD | CEP_STATE_MRA,\r
+ CEP_STATE_LAP_RCVD = CEP_STATE_LAP | CEP_STATE_RCVD,\r
+ CEP_STATE_PRE_APR = CEP_STATE_LAP_RCVD | CEP_STATE_PREP,\r
+ CEP_STATE_LAP_SENT = CEP_STATE_LAP | CEP_STATE_SENT,\r
+ CEP_STATE_LAP_MRA_RCVD = CEP_STATE_LAP_SENT | CEP_STATE_MRA,\r
+ CEP_STATE_LAP_MRA_SENT = CEP_STATE_LAP_RCVD | CEP_STATE_MRA,\r
+ CEP_STATE_PRE_APR_MRA_SENT = CEP_STATE_LAP_MRA_SENT | CEP_STATE_PREP,\r
+ CEP_STATE_DREQ_SENT = CEP_STATE_DREQ | CEP_STATE_SENT,\r
+ CEP_STATE_DREQ_RCVD = CEP_STATE_DREQ | CEP_STATE_RCVD,\r
+ CEP_STATE_DREQ_DESTROY = CEP_STATE_DREQ_SENT | CEP_STATE_DESTROYING\r
+\r
+} cep_state_t;\r
+\r
+\r
+/* Active side CEP state transitions:\r
+* al_create_cep -> IDLE\r
+* al_cep_pre_req -> PRE_REQ\r
+* al_cep_send_req -> REQ_SENT\r
+* Recv REQ MRA -> REQ_MRA_RCVD\r
+* Recv REP -> REP_RCVD\r
+* al_cep_mra -> REP_MRA_SENT\r
+* al_cep_rtu -> ESTABLISHED\r
+*\r
+* Passive side CEP state transitions:\r
+* al_create_cep -> IDLE\r
+* Recv REQ -> REQ_RCVD\r
+* al_cep_mra* -> REQ_MRA_SENT\r
+* al_cep_pre_rep -> PRE_REP\r
+* al_cep_mra* -> PRE_REP_MRA_SENT\r
+* al_cep_send_rep -> REP_SENT\r
+* Recv RTU -> ESTABLISHED\r
+*\r
+* *al_cep_mra can only be called once - either before or after PRE_REP.\r
+*/\r
+\r
+typedef struct _al_kcep_av\r
+{\r
+ ib_av_attr_t attr;\r
+ net64_t port_guid;\r
+ uint16_t pkey_index;\r
+\r
+} kcep_av_t;\r
+\r
+\r
+typedef struct _al_kcep\r
+{\r
+ ib_cep_t cep;\r
+\r
+ struct _cep_cid *p_cid;\r
+\r
+ net64_t sid;\r
+\r
+ /* Port guid for filtering incoming requests. */\r
+ net64_t port_guid;\r
+\r
+ uint8_t* __ptr64 p_cmp_buf;\r
+ uint8_t cmp_offset;\r
+ uint8_t cmp_len;\r
+\r
+ boolean_t p2p;\r
+\r
+ /* Used to store connection structure with owning AL instance. */\r
+ cl_list_item_t al_item;\r
+\r
+ /* Flag to indicate whether a user is processing events. */\r
+ boolean_t signalled;\r
+\r
+ /* Destroy callback. */\r
+ ib_pfn_destroy_cb_t pfn_destroy_cb;\r
+\r
+ ib_mad_element_t *p_mad_head;\r
+ ib_mad_element_t *p_mad_tail;\r
+ al_pfn_cep_cb_t pfn_cb;\r
+\r
+ IRP *p_irp;\r
+\r
+ /* MAP item for finding listen CEPs. */\r
+ cl_rbmap_item_t listen_item;\r
+\r
+ /* Map item for finding CEPs based on remote comm ID & CA GUID. */\r
+ cl_rbmap_item_t rem_id_item;\r
+\r
+ /* Map item for finding CEPs based on remote QP number. */\r
+ cl_rbmap_item_t rem_qp_item;\r
+\r
+ /* Communication ID's for the connection. */\r
+ net32_t local_comm_id;\r
+ net32_t remote_comm_id;\r
+\r
+ net64_t local_ca_guid;\r
+ net64_t remote_ca_guid;\r
+\r
+ /* Remote QP, used for stale connection checking. */\r
+ net32_t remote_qpn;\r
+\r
+ /* Parameters to format QP modification structure. */\r
+ net32_t sq_psn;\r
+ net32_t rq_psn;\r
+ uint8_t resp_res;\r
+ uint8_t init_depth;\r
+ uint8_t rnr_nak_timeout;\r
+\r
+ /*\r
+ * Local QP number, used for the "additional check" required\r
+ * of the DREQ.\r
+ */\r
+ net32_t local_qpn;\r
+\r
+ /* PKEY to make sure a LAP is on the same partition. */\r
+ net16_t pkey;\r
+\r
+ /* Initiator depth as received in the REQ. */\r
+ uint8_t req_init_depth;\r
+\r
+ /*\r
+ * Primary and alternate path info, used to create the address vectors for\r
+ * sending MADs, to locate the port CM agent to use for outgoing sends,\r
+ * and for creating the address vectors for transitioning QPs.\r
+ */\r
+ kcep_av_t av[2];\r
+ uint8_t idx_primary;\r
+\r
+ /* Temporary AV and CEP port GUID used when processing LAP. */\r
+ kcep_av_t alt_av;\r
+ uint8_t alt_2pkt_life;\r
+\r
+ /* maxium packet lifetime * 2 of any path used on a connection. */\r
+ uint8_t max_2pkt_life;\r
+ /* Given by the REP, used for alternate path setup. */\r
+ uint8_t target_ack_delay;\r
+ /* Stored to help calculate the local ACK delay in the LAP. */\r
+ uint8_t local_ack_delay;\r
+\r
+ /* Volatile to allow using atomic operations for state checks. */\r
+ cep_state_t state;\r
+\r
+ /*\r
+ * Flag that indicates whether a connection took the active role during\r
+ * establishment. \r
+ */\r
+ boolean_t was_active;\r
+\r
+ /*\r
+ * Handle to the sent MAD, used for cancelling. We store the handle to\r
+ * the mad service so that we can properly cancel. This should not be a\r
+ * problem since all outstanding sends should be completed before the\r
+ * mad service completes its destruction and the handle becomes invalid.\r
+ */\r
+ ib_mad_svc_handle_t h_mad_svc;\r
+ ib_mad_element_t *p_send_mad;\r
+\r
+ /* Number of outstanding MADs. Delays destruction of CEP destruction. */\r
+ atomic32_t ref_cnt;\r
+\r
+ /* MAD transaction ID to use when sending MADs. */\r
+ uint64_t tid;\r
+\r
+ /* Maximum retries per MAD. Set at REQ time, stored to retry LAP. */\r
+ uint8_t max_cm_retries;\r
+ /* Timeout value, in milliseconds. Set at REQ time, stored to retry LAP. */\r
+ uint32_t retry_timeout;\r
+\r
+ /* Timer that will be signalled when the CEP exits timewait. */\r
+ KTIMER timewait_timer;\r
+ LARGE_INTEGER timewait_time;\r
+ cl_list_item_t timewait_item;\r
+\r
+ /*\r
+ * Pointer to a formatted MAD. The pre_req, pre_rep and pre_apr calls\r
+ * allocate and format the MAD, and the send_req, send_rep and send_apr\r
+ * calls send it.\r
+ */\r
+ ib_mad_element_t *p_mad;\r
+\r
+ /* Cache the last MAD sent for retransmission. */\r
+ union _mads\r
+ {\r
+ ib_mad_t hdr;\r
+ mad_cm_mra_t mra;\r
+ mad_cm_rtu_t rtu;\r
+ mad_cm_drep_t drep;\r
+\r
+ } mads;\r
+\r
+} kcep_t;\r
+\r
+\r
+/* Structures stored in the CID vector. */\r
+typedef struct _cep_cid\r
+{\r
+ /* Owning AL handle. NULL if invalid. */\r
+ ib_al_handle_t h_al;\r
+ /* Pointer to CEP, or index of next free entry if h_al is NULL. */\r
+ kcep_t *p_cep;\r
+ /* For REJ Retry support */\r
+ uint8_t modifier;\r
+\r
+} cep_cid_t;\r
+\r
+\r
+/* Global instance of the CM agent. */\r
+al_cep_mgr_t *gp_cep_mgr = NULL;\r
+\r
+\r
+static ib_api_status_t\r
+__format_drep(\r
+ IN kcep_t* const p_cep,\r
+ IN const uint8_t* p_pdata OPTIONAL,\r
+ IN uint8_t pdata_len,\r
+ IN OUT mad_cm_drep_t* const p_drep );\r
+\r
+static ib_api_status_t\r
+__cep_queue_mad(\r
+ IN kcep_t* const p_cep,\r
+ IN ib_mad_element_t* p_mad );\r
+\r
+static inline void\r
+__process_cep(\r
+ IN kcep_t* const p_cep );\r
+\r
+static inline uint32_t\r
+__calc_mad_timeout(\r
+ IN const uint8_t pkt_life );\r
+\r
+static inline void\r
+__calc_timewait(\r
+ IN kcep_t* const p_cep );\r
+\r
+static kcep_t*\r
+__create_cep( void );\r
+\r
+static int32_t\r
+__cleanup_cep(\r
+ IN kcep_t* const p_cep );\r
+\r
+static void\r
+__destroy_cep(\r
+ IN kcep_t* const p_cep );\r
+\r
+static inline void\r
+__bind_cep(\r
+ IN kcep_t* const p_cep,\r
+ IN ib_al_handle_t h_al,\r
+ IN al_pfn_cep_cb_t pfn_cb,\r
+ IN void *context );\r
+\r
+static inline void\r
+__unbind_cep(\r
+ IN kcep_t* const p_cep );\r
+\r
+static void\r
+__pre_destroy_cep(\r
+ IN kcep_t* const p_cep );\r
+\r
+static kcep_t*\r
+__lookup_by_id(\r
+ IN net32_t remote_comm_id,\r
+ IN net64_t remote_ca_guid );\r
+\r
+static kcep_t*\r
+__lookup_listen(\r
+ IN net64_t sid,\r
+ IN net64_t port_guid,\r
+ IN void *p_pdata );\r
+\r
+static inline kcep_t*\r
+__lookup_cep(\r
+ IN ib_al_handle_t h_al OPTIONAL,\r
+ IN net32_t cid );\r
+\r
+static inline kcep_t*\r
+__insert_cep(\r
+ IN kcep_t* const p_new_cep );\r
+\r
+static inline void\r
+__remove_cep(\r
+ IN kcep_t* const p_cep );\r
+\r
+static inline void\r
+__insert_timewait(\r
+ IN kcep_t* const p_cep );\r
+\r
+static ib_api_status_t\r
+__cep_send_mad(\r
+ IN cep_agent_t* const p_port_cep,\r
+ IN ib_mad_element_t* const p_mad );\r
+\r
+/* Returns the 1-based port index of the CEP agent with the specified GID. */\r
+static cep_agent_t*\r
+__find_port_cep(\r
+ IN const ib_gid_t* const p_gid,\r
+ IN const net16_t lid,\r
+ IN const net16_t pkey,\r
+ OUT uint16_t* const p_pkey_index );\r
+\r
+static cep_cid_t*\r
+__get_lcid(\r
+ OUT net32_t* const p_cid );\r
+\r
+static void\r
+__process_cep_send_comp(\r
+ IN cl_async_proc_item_t *p_item );\r
+\r
+\r
+/******************************************************************************\r
+* Per-port CEP agent\r
+******************************************************************************/\r
+\r
+\r
+static inline void\r
+__format_mad_hdr(\r
+ IN ib_mad_t* const p_mad,\r
+ IN const kcep_t* const p_cep,\r
+ IN net16_t attr_id )\r
+{\r
+ p_mad->base_ver = 1;\r
+ p_mad->mgmt_class = IB_MCLASS_COMM_MGMT;\r
+ p_mad->class_ver = IB_MCLASS_CM_VER_2;\r
+ p_mad->method = IB_MAD_METHOD_SEND;\r
+ p_mad->status = 0;\r
+ p_mad->class_spec = 0;\r
+ p_mad->trans_id = p_cep->tid;\r
+ p_mad->attr_id = attr_id;\r
+ p_mad->resv = 0;\r
+ p_mad->attr_mod = 0;\r
+}\r
+\r
+\r
+/* Consumes the input MAD. */\r
+static void\r
+__reject_mad(\r
+ IN cep_agent_t* const p_port_cep,\r
+ IN kcep_t* const p_cep,\r
+ IN ib_mad_element_t* const p_mad,\r
+ IN ib_rej_status_t reason )\r
+{\r
+ mad_cm_rej_t *p_rej;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_rej = (mad_cm_rej_t*)p_mad->p_mad_buf;\r
+\r
+ __format_mad_hdr( p_mad->p_mad_buf, p_cep, CM_REJ_ATTR_ID );\r
+\r
+ p_rej->local_comm_id = p_cep->local_comm_id;\r
+ p_rej->remote_comm_id = p_cep->remote_comm_id;\r
+ p_rej->reason = reason;\r
+\r
+ switch( p_cep->state )\r
+ {\r
+ case CEP_STATE_REQ_RCVD:\r
+ case CEP_STATE_REQ_MRA_SENT:\r
+ case CEP_STATE_PRE_REP:\r
+ case CEP_STATE_PRE_REP_MRA_SENT:\r
+ conn_rej_set_msg_rejected( 0, p_rej );\r
+ break;\r
+\r
+ case CEP_STATE_REP_RCVD:\r
+ case CEP_STATE_REP_MRA_SENT:\r
+ conn_rej_set_msg_rejected( 1, p_rej );\r
+ break;\r
+\r
+ default:\r
+ CL_ASSERT( reason == IB_REJ_TIMEOUT );\r
+ conn_rej_set_msg_rejected( 2, p_rej );\r
+ break;\r
+ }\r
+\r
+ conn_rej_clr_rsvd_fields( p_rej );\r
+ __cep_send_mad( p_port_cep, p_mad );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static void\r
+__reject_timeout(\r
+ IN cep_agent_t* const p_port_cep,\r
+ IN kcep_t* const p_cep,\r
+ IN const ib_mad_element_t* const p_mad )\r
+{\r
+ ib_api_status_t status;\r
+ ib_mad_element_t *p_rej_mad;\r
+ ib_mad_t *p_mad_buf;\r
+ ib_grh_t *p_grh;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ status = ib_get_mad( p_port_cep->pool_key, MAD_BLOCK_SIZE, &p_rej_mad );\r
+ if( status != IB_SUCCESS )\r
+ {\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("ib_get_mad returned %s\n", ib_get_err_str( status )) );\r
+ return;\r
+ }\r
+\r
+ /* Save the buffer pointers from the new element. */\r
+ p_mad_buf = p_rej_mad->p_mad_buf;\r
+ p_grh = p_rej_mad->p_grh;\r
+\r
+ /*\r
+ * Copy the input MAD element to the reject - this gives us\r
+ * all appropriate addressing information.\r
+ */\r
+ cl_memcpy( p_rej_mad, p_mad, sizeof(ib_mad_element_t) );\r
+ cl_memcpy( p_grh, p_mad->p_grh, sizeof(ib_grh_t) );\r
+\r
+ /* Restore the buffer pointers now that the copy is complete. */\r
+ p_rej_mad->p_mad_buf = p_mad_buf;\r
+ p_rej_mad->p_grh = p_grh;\r
+\r
+ status = conn_rej_set_pdata( NULL, 0, (mad_cm_rej_t*)p_mad_buf );\r
+ CL_ASSERT( status == IB_SUCCESS );\r
+\r
+ /* Copy the local CA GUID into the ARI. */\r
+ switch( p_mad->p_mad_buf->attr_id )\r
+ {\r
+ case CM_REQ_ATTR_ID:\r
+ status = conn_rej_set_ari(\r
+ (uint8_t*)&p_cep->local_ca_guid,\r
+ sizeof(p_cep->local_ca_guid), (mad_cm_rej_t*)p_mad_buf );\r
+ CL_ASSERT( status == IB_SUCCESS );\r
+ __reject_mad( p_port_cep, p_cep, p_rej_mad, IB_REJ_TIMEOUT );\r
+ break;\r
+\r
+ case CM_REP_ATTR_ID:\r
+ status = conn_rej_set_ari(\r
+ (uint8_t*)&p_cep->local_ca_guid,\r
+ sizeof(p_cep->local_ca_guid), (mad_cm_rej_t*)p_mad_buf );\r
+ CL_ASSERT( status == IB_SUCCESS );\r
+ __reject_mad( p_port_cep, p_cep, p_rej_mad, IB_REJ_TIMEOUT );\r
+ break;\r
+\r
+ default:\r
+ CL_ASSERT( p_mad->p_mad_buf->attr_id == CM_REQ_ATTR_ID ||\r
+ p_mad->p_mad_buf->attr_id == CM_REP_ATTR_ID );\r
+ ib_put_mad( p_rej_mad );\r
+ return;\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static void\r
+__reject_req(\r
+ IN cep_agent_t* const p_port_cep,\r
+ IN ib_mad_element_t* const p_mad,\r
+ IN const ib_rej_status_t reason )\r
+{\r
+ mad_cm_req_t *p_req;\r
+ mad_cm_rej_t *p_rej;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( p_port_cep );\r
+ CL_ASSERT( p_mad );\r
+ CL_ASSERT( reason != 0 );\r
+\r
+ p_req = (mad_cm_req_t*)p_mad->p_mad_buf;\r
+ p_rej = (mad_cm_rej_t*)p_mad->p_mad_buf;\r
+\r
+ /*\r
+ * Format the reject information, overwriting the REQ data and send\r
+ * the response.\r
+ */\r
+ p_rej->hdr.attr_id = CM_REJ_ATTR_ID;\r
+ p_rej->remote_comm_id = p_req->local_comm_id;\r
+ p_rej->local_comm_id = 0;\r
+ conn_rej_set_msg_rejected( 0, p_rej );\r
+ p_rej->reason = reason;\r
+ conn_rej_set_ari( NULL, 0, p_rej );\r
+ conn_rej_set_pdata( NULL, 0, p_rej );\r
+ conn_rej_clr_rsvd_fields( p_rej );\r
+\r
+ p_mad->retry_cnt = 0;\r
+ p_mad->send_opt = 0;\r
+ p_mad->timeout_ms = 0;\r
+ p_mad->resp_expected = FALSE;\r
+\r
+ __cep_send_mad( p_port_cep, p_mad );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static void\r
+__format_req_av(\r
+ IN kcep_t* const p_cep,\r
+ IN const mad_cm_req_t* const p_req,\r
+ IN const uint8_t idx )\r
+{\r
+ cep_agent_t *p_port_cep;\r
+ const req_path_info_t *p_path;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( p_cep );\r
+ CL_ASSERT( p_req );\r
+\r
+ cl_memclr( &p_cep->av[idx], sizeof(kcep_av_t) );\r
+\r
+ p_path = &((&p_req->primary_path)[idx]);\r
+\r
+ p_port_cep = __find_port_cep( &p_path->remote_gid,\r
+ p_path->remote_lid, p_req->pkey, &p_cep->av[idx].pkey_index );\r
+ if( !p_port_cep )\r
+ {\r
+ if( !idx )\r
+ p_cep->local_ca_guid = 0;\r
+ AL_EXIT( AL_DBG_CM );\r
+ return;\r
+ }\r
+\r
+ if( !idx )\r
+ p_cep->local_ca_guid = p_port_cep->h_ca->obj.p_ci_ca->verbs.guid;\r
+\r
+ /* Check that CA GUIDs match if formatting the alternate path. */\r
+ if( idx &&\r
+ p_port_cep->h_ca->obj.p_ci_ca->verbs.guid != p_cep->local_ca_guid )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return;\r
+ }\r
+\r
+ /*\r
+ * Pkey indeces must match if formating the alternat path - the QP\r
+ * modify structure only allows for a single PKEY index to be specified.\r
+ */\r
+ if( idx &&\r
+ p_cep->av[0].pkey_index != p_cep->av[1].pkey_index )\r
+ {\r
+ AL_EXIT( AL_DBG_CM );\r
+ return;\r
+ }\r
+\r
+ p_cep->av[idx].port_guid = p_port_cep->port_guid;\r
+ p_cep->av[idx].attr.port_num = p_port_cep->port_num;\r
+\r
+ p_cep->av[idx].attr.sl = conn_req_path_get_svc_lvl( p_path );\r
+ p_cep->av[idx].attr.dlid = p_path->local_lid;\r
+\r
+ if( !conn_req_path_get_subn_lcl( p_path ) )\r
+ {\r
+ p_cep->av[idx].attr.grh_valid = TRUE;\r
+ p_cep->av[idx].attr.grh.ver_class_flow = ib_grh_set_ver_class_flow(\r
+ 1, p_path->traffic_class, conn_req_path_get_flow_lbl( p_path ) );\r
+ p_cep->av[idx].attr.grh.hop_limit = p_path->hop_limit;\r
+ p_cep->av[idx].attr.grh.dest_gid = p_path->local_gid;\r
+ p_cep->av[idx].attr.grh.src_gid = p_path->remote_gid;\r
+ }\r
+ else\r
+ {\r
+ p_cep->av[idx].attr.grh_valid = FALSE;\r
+ }\r
+ p_cep->av[idx].attr.static_rate = conn_req_path_get_pkt_rate( p_path );\r
+ p_cep->av[idx].attr.path_bits =\r
+ (uint8_t)(p_path->remote_lid - p_port_cep->base_lid);\r
+\r
+ /*\r
+ * Note that while we never use the connected AV attributes internally,\r
+ * we store them so we can pass them back to users.\r
+ */\r
+ p_cep->av[idx].attr.conn.path_mtu = conn_req_get_mtu( p_req );\r
+ p_cep->av[idx].attr.conn.local_ack_timeout =\r
+ conn_req_path_get_lcl_ack_timeout( p_path );\r
+ p_cep->av[idx].attr.conn.seq_err_retry_cnt =\r
+ conn_req_get_retry_cnt( p_req );\r
+ p_cep->av[idx].attr.conn.rnr_retry_cnt =\r
+ conn_req_get_rnr_retry_cnt( p_req );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+/*\r
+ * + Validates the path information provided in the REQ and stores the\r
+ * associated CA attributes and port indeces.\r
+ * + Transitions a connection object from active to passive in the peer case.\r
+ * + Sets the path information in the connection and sets the CA GUID\r
+ * in the REQ callback record.\r
+ */\r
+static void\r
+__save_wire_req(\r
+ IN OUT kcep_t* const p_cep,\r
+ IN OUT mad_cm_req_t* const p_req )\r
+{\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ p_cep->state = CEP_STATE_REQ_RCVD;\r
+ p_cep->was_active = FALSE;\r
+\r
+ p_cep->sid = p_req->sid;\r
+\r
+ /* Store pertinent information in the connection. */\r
+ p_cep->remote_comm_id = p_req->local_comm_id;\r
+ p_cep->remote_ca_guid = p_req->local_ca_guid;\r
+\r
+ p_cep->remote_qpn = conn_req_get_lcl_qpn( p_req );\r
+ p_cep->local_qpn = 0;\r
+\r
+ p_cep->retry_timeout =\r
+ __calc_mad_timeout( conn_req_get_lcl_resp_timeout( p_req ) );\r
+\r
+ /* Store the retry count. */\r
+ p_cep->max_cm_retries = conn_req_get_max_cm_retries( p_req );\r
+\r
+ /*\r
+ * Copy the paths from the req_rec into the connection for\r
+ * future use. Note that if the primary path is invalid,\r
+ * the REP will fail.\r
+ */\r
+ __format_req_av( p_cep, p_req, 0 );\r
+\r
+ if( p_req->alternate_path.local_lid )\r
+ __format_req_av( p_cep, p_req, 1 );\r
+ else\r
+ cl_memclr( &p_cep->av[1], sizeof(kcep_av_t) );\r
+\r
+ p_cep->idx_primary = 0;\r
+\r
+ /* Store the maximum packet lifetime, used to calculate timewait. */\r
+ p_cep->max_2pkt_life = conn_req_path_get_lcl_ack_timeout( &p_req->primary_path );\r
+ p_cep->max_2pkt_life = max( p_cep->max_2pkt_life,\r
+ conn_req_path_get_lcl_ack_timeout( &p_req->alternate_path ) );\r
+\r
+ /*\r
+ * Make sure the target ack delay is cleared - the above\r
+ * "packet life" includes it.\r
+ */\r
+ p_cep->target_ack_delay = 0;\r
+\r
+ /* Store the requested initiator depth. */\r
+ p_cep->req_init_depth = conn_req_get_init_depth( p_req );\r
+\r
+ /*\r
+ * Store the provided responder resources. These turn into the local\r
+ * QP's initiator depth.\r
+ */\r
+ p_cep->init_depth = conn_req_get_resp_res( p_req );\r
+\r
+ p_cep->sq_psn = conn_req_get_starting_psn( p_req );\r
+\r
+ p_cep->tid = p_req->hdr.trans_id;\r
+ /* copy mad info for cm handoff */\r
+ /* TODO: Do need to support CM handoff? */\r
+ //p_cep->mads.req = *p_req;\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+/* Must be called with the CEP lock held. */\r
+static void\r
+__repeat_mad(\r
+ IN cep_agent_t* const p_port_cep,\r
+ IN kcep_t* const p_cep,\r
+ IN ib_mad_element_t* const p_mad )\r
+{\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( p_port_cep );\r
+ CL_ASSERT( p_cep );\r
+ CL_ASSERT( p_mad );\r
+\r
+ /* Repeat the last mad sent for the connection. */\r
+ switch( p_cep->state )\r
+ {\r
+ case CEP_STATE_REQ_MRA_SENT: /* resend MRA(REQ) */\r
+ case CEP_STATE_REP_MRA_SENT: /* resend MRA(REP) */\r
+ case CEP_STATE_LAP_MRA_SENT: /* resend MRA(LAP) */\r
+ case CEP_STATE_ESTABLISHED: /* resend RTU */\r
+ case CEP_STATE_TIMEWAIT: /* resend the DREP */\r
+ cl_memcpy( p_mad->p_mad_buf, &p_cep->mads, MAD_BLOCK_SIZE );\r
+ p_mad->send_context1 = NULL;\r
+ p_mad->send_context2 = NULL;\r
+ __cep_send_mad( p_port_cep, p_mad );\r
+ break;\r
+\r
+ default:\r
+ /* Return the MAD to the mad pool */\r
+ ib_put_mad( p_mad );\r
+ break;\r
+ }\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static void\r
+__process_req(\r
+ IN cep_agent_t* const p_port_cep,\r
+ IN ib_mad_element_t* const p_mad )\r
+{\r
+ ib_api_status_t status;\r
+ mad_cm_req_t *p_req;\r
+ kcep_t *p_cep, *p_new_cep, *p_stale_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+ ib_rej_status_t reason;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL );\r
+\r
+ p_req = (mad_cm_req_t*)p_mad->p_mad_buf;\r
+\r
+ AL_TRACE( AL_DBG_CM,\r
+ ("REQ: comm_id (x%x) qpn (x%x) received\n",\r
+ p_req->local_comm_id, conn_req_get_lcl_qpn( p_req )) );\r
+\r
+ KeAcquireInStackQueuedSpinLockAtDpcLevel( &gp_cep_mgr->lock, &hdl );\r
+\r
+ if( conn_req_get_qp_type( p_req ) > IB_QPT_UNRELIABLE_CONN )\r
+ {\r
+ /* Reserved value. Reject. */\r
+ AL_TRACE( AL_DBG_ERROR, ("Invalid transport type received.\n") );\r
+ reason = IB_REJ_INVALID_XPORT;\r
+ goto reject;\r
+ }\r
+\r
+ /* Match against pending connections using remote comm ID and CA GUID. */\r
+ p_cep = __lookup_by_id( p_req->local_comm_id, p_req->local_ca_guid );\r
+ if( p_cep )\r
+ {\r
+ /* Already received the REQ. */\r
+ switch( p_cep->state )\r
+ {\r
+ case CEP_STATE_REQ_MRA_SENT:\r
+ __repeat_mad( p_port_cep, p_cep, p_mad );\r
+ break;\r
+\r
+ case CEP_STATE_TIMEWAIT:\r
+ case CEP_STATE_DESTROY:\r
+ /* Send a reject. */\r
+ AL_TRACE( AL_DBG_CM,\r
+ ("REQ received for connection in TIME_WAIT state.\n") );\r
+ __reject_req( p_port_cep, p_mad, IB_REJ_STALE_CONN );\r
+ break;\r
+\r
+ default:\r
+ /*\r
+ * Let regular retries repeat the MAD. If our last message was\r
+ * dropped, resending only adds to the congestion. If it wasn't\r
+ * dropped, then the remote CM will eventually process it, and\r
+ * we'd just be adding traffic.\r
+ */\r
+ AL_TRACE( AL_DBG_CM, ("Duplicate REQ received.\n") );\r
+ ib_put_mad( p_mad );\r
+ }\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return;\r
+ }\r
+\r
+ /*\r
+ * Allocate a new CEP for the new request. This will\r
+ * prevent multiple identical REQs from queueing up for processing.\r
+ */\r
+ p_new_cep = __create_cep();\r
+ if( !p_new_cep )\r
+ {\r
+ /* Reject the request for insufficient resources. */\r
+ reason = IB_REJ_INSUF_RESOURCES;\r
+ AL_TRACE_EXIT( AL_DBG_ERROR,\r
+ ("al_create_cep failed\nREJ sent for insufficient resources.\n") );\r
+ goto reject;\r
+ }\r
+\r
+ __save_wire_req( p_new_cep, p_req );\r
+\r
+ /*\r
+ * Match against listens using SID and compare data, also provide the receiving\r
+ * MAD service's port GUID so we can properly filter.\r
+ */\r
+ p_cep = __lookup_listen( p_req->sid, p_port_cep->port_guid, p_req->pdata );\r
+ if( p_cep )\r
+ {\r
+ __bind_cep( p_new_cep, p_cep->p_cid->h_al, p_cep->pfn_cb, NULL );\r
+\r
+ /* Add the new CEP to the map so that repeated REQs match up. */\r
+ p_stale_cep = __insert_cep( p_new_cep );\r
+ if( p_stale_cep != p_new_cep )\r
+ {\r
+ /* Duplicate - must be a stale connection. */\r
+ /* TODO: Fail the CEP in p_stale_cep */\r
+ reason = IB_REJ_STALE_CONN;\r
+ goto unbind;\r
+ }\r
+\r
+ /*\r
+ * Queue the mad - the return value indicates whether we should\r
+ * invoke the callback.\r
+ */\r
+ status = __cep_queue_mad( p_cep, p_mad );\r
+ switch( status )\r
+ {\r
+ case IB_SUCCESS:\r
+ case IB_PENDING:\r
+ p_mad->send_context1 = p_new_cep;\r
+ break;\r
+\r
+ default:\r
+ reason = IB_REJ_INSUF_RESOURCES;\r
+ goto unbind;\r
+ }\r
+ }\r
+ else\r
+ {\r
+ AL_TRACE( AL_DBG_CM, ("No listens active!\n") );\r
+\r
+ /* Match against peer-to-peer requests using SID and compare data. */\r
+ //p_cep = __lookup_peer();\r
+ //if( p_cep )\r
+ //{\r
+ // p_mad->send_context2 = NULL;\r
+ // p_list_item = cl_qlist_find_from_head( &gp_cep_mgr->pending_list,\r
+ // __match_peer, p_req );\r
+ // if( p_list_item != cl_qlist_end( &gp_cep_mgr->pending_list ) )\r
+ // {\r
+ // KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+ // p_conn = PARENT_STRUCT( p_list_item, kcep_t, map_item );\r
+ // __peer_req( p_port_cep, p_conn, p_async_mad->p_mad );\r
+ // cl_free( p_async_mad );\r
+ // CL_TRACE_EXIT( AL_DBG_CM, g_al_dbg_lvl,\r
+ // ("REQ matched a peer-to-peer request.\n") );\r
+ // return;\r
+ // }\r
+ // reason = IB_REJ_INVALID_SID;\r
+ // goto free;\r
+ //}\r
+ //else\r
+ {\r
+ /* No match found. Reject. */\r
+ reason = IB_REJ_INVALID_SID;\r
+ AL_TRACE( AL_DBG_CM, ("REQ received but no match found.\n") );\r
+ goto cleanup;\r
+ }\r
+ }\r
+\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+\r
+ /* Process any queued MADs for the CEP. */\r
+ if( status == IB_SUCCESS )\r
+ __process_cep( p_cep );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return;\r
+\r
+unbind:\r
+ __unbind_cep( p_new_cep );\r
+\r
+cleanup:\r
+ /*\r
+ * Move the CEP in the idle state so that we don't send a reject\r
+ * for it when cleaning up. Also clear the RQPN and RCID so that\r
+ * we don't try to remove it from our maps (since it isn't inserted).\r
+ */\r
+ p_new_cep->state = CEP_STATE_IDLE;\r
+ p_new_cep->remote_comm_id = 0;\r
+ p_new_cep->remote_qpn = 0;\r
+ __cleanup_cep( p_new_cep );\r
+\r
+reject:\r
+ __reject_req( p_port_cep, p_mad, reason );\r
+\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static void\r
+__save_wire_rep(\r
+ IN OUT kcep_t* const p_cep,\r
+ IN const mad_cm_rep_t* const p_rep )\r
+{\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ /* The send should have been cancelled during MRA processing. */\r
+ p_cep->state = CEP_STATE_REP_RCVD;\r
+\r
+ /* Store pertinent information in the connection. */\r
+ p_cep->remote_comm_id = p_rep->local_comm_id;\r
+ p_cep->remote_ca_guid = p_rep->local_ca_guid;\r
+\r
+ p_cep->remote_qpn = conn_rep_get_lcl_qpn( p_rep );\r
+\r
+ /* Store the remote endpoint's target ACK delay. */\r
+ p_cep->target_ack_delay = conn_rep_get_target_ack_delay( p_rep );\r
+\r
+ /* Update the local ACK delay stored in the AV's. */\r
+ p_cep->av[0].attr.conn.local_ack_timeout = calc_lcl_ack_timeout(\r
+ p_cep->av[0].attr.conn.local_ack_timeout, p_cep->target_ack_delay );\r
+ p_cep->av[0].attr.conn.rnr_retry_cnt = conn_rep_get_rnr_retry_cnt( p_rep );\r
+\r
+ if( p_cep->av[1].port_guid )\r
+ {\r
+ p_cep->av[1].attr.conn.local_ack_timeout = calc_lcl_ack_timeout(\r
+ p_cep->av[1].attr.conn.local_ack_timeout,\r
+ p_cep->target_ack_delay );\r
+ p_cep->av[1].attr.conn.rnr_retry_cnt =\r
+ p_cep->av[0].attr.conn.rnr_retry_cnt;\r
+ }\r
+\r
+ p_cep->init_depth = p_rep->resp_resources;\r
+ p_cep->resp_res = p_rep->initiator_depth;\r
+\r
+ p_cep->sq_psn = conn_rep_get_starting_psn( p_rep );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static void\r
+__process_mra(\r
+ IN ib_mad_element_t* const p_mad )\r
+{\r
+ ib_api_status_t status;\r
+ mad_cm_mra_t *p_mra;\r
+ kcep_t *p_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL );\r
+\r
+ p_mra = (mad_cm_mra_t*)p_mad->p_mad_buf;\r
+\r
+ KeAcquireInStackQueuedSpinLockAtDpcLevel( &gp_cep_mgr->lock, &hdl );\r
+ p_cep = __lookup_cep( NULL, p_mra->remote_comm_id );\r
+ if( !p_cep )\r
+ {\r
+ AL_TRACE( AL_DBG_CM,\r
+ ("MRA received that could not be matched.\n") );\r
+ goto err;\r
+ }\r
+\r
+ if( p_cep->remote_comm_id )\r
+ {\r
+ if( p_cep->remote_comm_id != p_mra->local_comm_id )\r
+ {\r
+ AL_TRACE( AL_DBG_CM,\r
+ ("MRA received that could not be matched.\n") );\r
+ goto err;\r
+ }\r
+ }\r
+ /*\r
+ * Note that we don't update the CEP's remote comm ID - it messes up REP\r
+ * processing since a non-zero RCID implies the connection is in the RCID\r
+ * map. Adding it here requires checking there and conditionally adding\r
+ * it. Ignoring it is a valid thing to do.\r
+ */\r
+\r
+ if( !(p_cep->state & CEP_STATE_SENT) ||\r
+ (1 << conn_mra_get_msg_mraed( p_mra ) !=\r
+ (p_cep->state & CEP_MSG_MASK)) )\r
+ {\r
+ /* Invalid state. */\r
+ AL_TRACE( AL_DBG_CM, ("MRA received in invalid state.\n") );\r
+ goto err;\r
+ }\r
+\r
+ /* Delay the current send. */\r
+ CL_ASSERT( p_cep->p_send_mad );\r
+ ib_delay_mad( p_cep->h_mad_svc, p_cep->p_send_mad,\r
+ __calc_mad_timeout( conn_mra_get_svc_timeout( p_mra ) ) +\r
+ __calc_mad_timeout( p_cep->max_2pkt_life - 1 ) );\r
+\r
+ /* We only invoke a single callback for MRA. */\r
+ if( p_cep->state & CEP_STATE_MRA )\r
+ {\r
+ /* Invalid state. */\r
+ AL_TRACE( AL_DBG_CM, ("Already received MRA.\n") );\r
+ goto err;\r
+ }\r
+\r
+ p_cep->state |= CEP_STATE_MRA;\r
+\r
+ status = __cep_queue_mad( p_cep, p_mad );\r
+ CL_ASSERT( status != IB_INVALID_STATE );\r
+\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+\r
+ if( status == IB_SUCCESS )\r
+ __process_cep( p_cep );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return;\r
+\r
+err:\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+ ib_put_mad( p_mad );\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static void\r
+__process_rej(\r
+ IN ib_mad_element_t* const p_mad )\r
+{\r
+ ib_api_status_t status;\r
+ mad_cm_rej_t *p_rej;\r
+ kcep_t *p_cep = NULL;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+ net64_t ca_guid;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL );\r
+\r
+ p_rej = (mad_cm_rej_t*)p_mad->p_mad_buf;\r
+\r
+ /* Either one of the communication IDs must be set. */\r
+ if( !p_rej->remote_comm_id && !p_rej->local_comm_id )\r
+ goto err1;\r
+\r
+ /* Check the pending list by the remote CA GUID and connection ID. */\r
+ KeAcquireInStackQueuedSpinLockAtDpcLevel( &gp_cep_mgr->lock, &hdl );\r
+ if( p_rej->remote_comm_id )\r
+ {\r
+ p_cep = __lookup_cep( NULL, p_rej->remote_comm_id );\r
+ }\r
+ else if( p_rej->reason == IB_REJ_TIMEOUT &&\r
+ conn_rej_get_ari_len( p_rej ) == sizeof(net64_t) )\r
+ {\r
+ cl_memcpy( &ca_guid, p_rej->ari, sizeof(net64_t) );\r
+ p_cep = __lookup_by_id( p_rej->local_comm_id, ca_guid );\r
+ }\r
+\r
+ if( !p_cep )\r
+ {\r
+ goto err2;\r
+ }\r
+\r
+ if( p_cep->remote_comm_id &&\r
+ p_cep->remote_comm_id != p_rej->local_comm_id )\r
+ {\r
+ goto err2;\r
+ }\r
+\r
+ switch( p_cep->state )\r
+ {\r
+ case CEP_STATE_REQ_SENT:\r
+ /*\r
+ * Ignore rejects with the status set to IB_REJ_INVALID_SID. We will\r
+ * continue to retry (up to max_cm_retries) to connect to the remote\r
+ * side. This is required to support peer-to-peer connections and\r
+ * clients that try to connect before the server comes up.\r
+ */\r
+ if( p_rej->reason == IB_REJ_INVALID_SID )\r
+ {\r
+ AL_TRACE( AL_DBG_CM,\r
+ ("Request rejected (invalid SID) - retrying.\n") );\r
+ goto err2;\r
+ }\r
+\r
+ /* Fall through */\r
+ case CEP_STATE_REP_SENT:\r
+ case CEP_STATE_REQ_MRA_RCVD:\r
+ case CEP_STATE_REP_MRA_RCVD:\r
+ /* Cancel any outstanding MAD. */\r
+ if( p_cep->p_send_mad )\r
+ {\r
+ ib_cancel_mad( p_cep->h_mad_svc, p_cep->p_send_mad );\r
+ p_cep->p_send_mad = NULL;\r
+ }\r
+\r
+ /* Fall through */\r
+ case CEP_STATE_REQ_RCVD:\r
+ case CEP_STATE_REP_RCVD:\r
+ case CEP_STATE_REQ_MRA_SENT:\r
+ case CEP_STATE_REP_MRA_SENT:\r
+ case CEP_STATE_PRE_REP:\r
+ case CEP_STATE_PRE_REP_MRA_SENT:\r
+ if( p_cep->state & CEP_STATE_PREP )\r
+ {\r
+ CL_ASSERT( p_cep->p_mad );\r
+ ib_put_mad( p_cep->p_mad );\r
+ p_cep->p_mad = NULL;\r
+ }\r
+ /* Abort connection establishment. No transition to timewait. */\r
+ __remove_cep( p_cep );\r
+ p_cep->state = CEP_STATE_IDLE;\r
+ break;\r
+\r
+ case CEP_STATE_ESTABLISHED:\r
+ case CEP_STATE_LAP_RCVD:\r
+ case CEP_STATE_LAP_SENT:\r
+ case CEP_STATE_LAP_MRA_RCVD:\r
+ case CEP_STATE_LAP_MRA_SENT:\r
+ case CEP_STATE_PRE_APR:\r
+ case CEP_STATE_PRE_APR_MRA_SENT:\r
+ if( p_cep->state & CEP_STATE_PREP )\r
+ {\r
+ CL_ASSERT( p_cep->p_mad );\r
+ ib_put_mad( p_cep->p_mad );\r
+ p_cep->p_mad = NULL;\r
+ }\r
+ p_cep->state = CEP_STATE_TIMEWAIT;\r
+ __insert_timewait( p_cep );\r
+ break;\r
+\r
+ default:\r
+ /* Ignore the REJ. */\r
+ AL_TRACE( AL_DBG_CM, ("REJ received in invalid state.\n") );\r
+ goto err2;\r
+ }\r
+\r
+ status = __cep_queue_mad( p_cep, p_mad );\r
+ CL_ASSERT( status != IB_INVALID_STATE );\r
+\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+\r
+ if( status == IB_SUCCESS )\r
+ __process_cep( p_cep );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return;\r
+\r
+err2:\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+err1:\r
+ ib_put_mad( p_mad );\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static void\r
+__process_rep(\r
+ IN cep_agent_t* const p_port_cep,\r
+ IN ib_mad_element_t* const p_mad )\r
+{\r
+ ib_api_status_t status;\r
+ mad_cm_rep_t *p_rep;\r
+ kcep_t *p_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+ cep_state_t old_state;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL );\r
+\r
+ p_rep = (mad_cm_rep_t*)p_mad->p_mad_buf;\r
+\r
+ AL_TRACE( AL_DBG_CM,\r
+ ("REP: comm_id (x%x) received\n", p_rep->local_comm_id ) );\r
+\r
+ KeAcquireInStackQueuedSpinLockAtDpcLevel( &gp_cep_mgr->lock, &hdl );\r
+ p_cep = __lookup_cep( NULL, p_rep->remote_comm_id );\r
+ if( !p_cep )\r
+ {\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+ ib_put_mad( p_mad );\r
+ AL_TRACE_EXIT( AL_DBG_CM,\r
+ ("REP received that could not be matched.\n") );\r
+ return;\r
+ }\r
+\r
+ switch( p_cep->state )\r
+ {\r
+ case CEP_STATE_REQ_MRA_RCVD:\r
+ case CEP_STATE_REQ_SENT:\r
+ old_state = p_cep->state;\r
+ /* Save pertinent information and change state. */\r
+ __save_wire_rep( p_cep, p_rep );\r
+\r
+ if( __insert_cep( p_cep ) != p_cep )\r
+ {\r
+ /* Roll back the state change. */\r
+ p_cep->state = old_state;\r
+ __reject_mad( p_port_cep, p_cep, p_mad, IB_REJ_STALE_CONN );\r
+ /* TODO: Handle stale connection. */\r
+ break;\r
+ }\r
+\r
+ /*\r
+ * Cancel any outstanding send. Note that we do this only after\r
+ * inserting the CEP - if we failed, then we the send will timeout\r
+ * and we'll finish our way through the state machine.\r
+ */\r
+ if( p_cep->p_send_mad )\r
+ {\r
+ ib_cancel_mad( p_cep->h_mad_svc, p_cep->p_send_mad );\r
+ p_cep->p_send_mad = NULL;\r
+ }\r
+\r
+ status = __cep_queue_mad( p_cep, p_mad );\r
+ CL_ASSERT( status != IB_INVALID_STATE );\r
+\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+\r
+ if( status == IB_SUCCESS )\r
+ __process_cep( p_cep );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return;\r
+\r
+ case CEP_STATE_ESTABLISHED:\r
+ case CEP_STATE_LAP_RCVD:\r
+ case CEP_STATE_LAP_SENT:\r
+ case CEP_STATE_LAP_MRA_RCVD:\r
+ case CEP_STATE_LAP_MRA_SENT:\r
+ case CEP_STATE_REP_MRA_SENT:\r
+ /* Repeate the MRA or RTU. */\r
+ __repeat_mad( p_port_cep, p_cep, p_mad );\r
+ break;\r
+\r
+ default:\r
+ ib_put_mad( p_mad );\r
+ AL_TRACE( AL_DBG_CM, ("REP received in invalid state.\n") );\r
+ break;\r
+ }\r
+\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static void\r
+__process_rtu(\r
+ IN ib_mad_element_t* const p_mad )\r
+{\r
+ ib_api_status_t status;\r
+ mad_cm_rtu_t *p_rtu;\r
+ kcep_t *p_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL );\r
+\r
+ p_rtu = (mad_cm_rtu_t*)p_mad->p_mad_buf;\r
+\r
+ AL_TRACE( AL_DBG_CM,\r
+ ("RTU: comm_id (x%x) received\n", p_rtu->local_comm_id) );\r
+\r
+ /* Find the connection by local connection ID. */\r
+ KeAcquireInStackQueuedSpinLockAtDpcLevel( &gp_cep_mgr->lock, &hdl );\r
+ p_cep = __lookup_cep( NULL, p_rtu->remote_comm_id );\r
+ if( !p_cep || p_cep->remote_comm_id != p_rtu->local_comm_id )\r
+ {\r
+ AL_TRACE( AL_DBG_CM, ("RTU received that could not be matched.\n") );\r
+ goto done;\r
+ }\r
+\r
+ switch( p_cep->state )\r
+ {\r
+ case CEP_STATE_REP_SENT:\r
+ case CEP_STATE_REP_MRA_RCVD:\r
+ /* Cancel any outstanding send. */\r
+ if( p_cep->p_send_mad )\r
+ {\r
+ ib_cancel_mad( p_cep->h_mad_svc, p_cep->p_send_mad );\r
+ p_cep->p_send_mad = NULL;\r
+ }\r
+\r
+ p_cep->state = CEP_STATE_ESTABLISHED;\r
+\r
+ status = __cep_queue_mad( p_cep, p_mad );\r
+ CL_ASSERT( status != IB_INVALID_STATE );\r
+\r
+ /* Update timewait time. */\r
+ __calc_timewait( p_cep );\r
+\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+\r
+ if( status == IB_SUCCESS )\r
+ __process_cep( p_cep );\r
+\r
+ AL_EXIT( AL_DBG_CM );\r
+ return;\r
+\r
+ default:\r
+ AL_TRACE( AL_DBG_CM, ("RTU received in invalid state.\n") );\r
+ break;\r
+ }\r
+\r
+done:\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+ ib_put_mad( p_mad );\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static void\r
+__process_dreq(\r
+ IN cep_agent_t* const p_port_cep,\r
+ IN ib_mad_element_t* const p_mad )\r
+{\r
+ ib_api_status_t status;\r
+ mad_cm_dreq_t *p_dreq;\r
+ kcep_t *p_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL );\r
+\r
+ p_dreq = (mad_cm_dreq_t*)p_mad->p_mad_buf;\r
+\r
+ AL_TRACE( AL_DBG_CM,\r
+ ("DREQ: comm_id (x%x) qpn (x%x) received\n",\r
+ p_dreq->local_comm_id, conn_dreq_get_remote_qpn( p_dreq )) );\r
+\r
+ /* Find the connection by connection IDs. */\r
+ KeAcquireInStackQueuedSpinLockAtDpcLevel( &gp_cep_mgr->lock, &hdl );\r
+ p_cep = __lookup_cep( NULL, p_dreq->remote_comm_id );\r
+ if( !p_cep ||\r
+ p_cep->remote_comm_id != p_dreq->local_comm_id ||\r
+ p_cep->local_qpn != conn_dreq_get_remote_qpn( p_dreq ) )\r
+ {\r
+ AL_TRACE( AL_DBG_CM, ("DREQ received that could not be matched.\n") );\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+ ib_put_mad( p_mad );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return;\r
+ }\r
+\r
+ switch( p_cep->state )\r
+ {\r
+ case CEP_STATE_REP_SENT:\r
+ case CEP_STATE_REP_MRA_RCVD:\r
+ case CEP_STATE_DREQ_SENT:\r
+ /* Cancel the outstanding MAD. */\r
+ if( p_cep->p_send_mad )\r
+ {\r
+ ib_cancel_mad( p_cep->h_mad_svc, p_cep->p_send_mad );\r
+ p_cep->p_send_mad = NULL;\r
+ }\r
+\r
+ /* Fall through and process as DREQ received case. */\r
+ case CEP_STATE_ESTABLISHED:\r
+ case CEP_STATE_LAP_RCVD:\r
+ case CEP_STATE_LAP_SENT:\r
+ case CEP_STATE_LAP_MRA_RCVD:\r
+ case CEP_STATE_LAP_MRA_SENT:\r
+ p_cep->state = CEP_STATE_DREQ_RCVD;\r
+\r
+ status = __cep_queue_mad( p_cep, p_mad );\r
+ CL_ASSERT( status != IB_INVALID_STATE );\r
+\r
+ /* Store the TID for use in the reply DREP. */\r
+ p_cep->tid = p_dreq->hdr.trans_id;\r
+\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+\r
+ if( status == IB_SUCCESS )\r
+ __process_cep( p_cep );\r
+ AL_EXIT( AL_DBG_CM );\r
+ return;\r
+\r
+ case CEP_STATE_TIMEWAIT:\r
+ case CEP_STATE_DESTROY:\r
+ /* Repeat the DREP. */\r
+ __repeat_mad( p_port_cep, p_cep, p_mad );\r
+ break;\r
+\r
+ default:\r
+ AL_TRACE( AL_DBG_CM, ("DREQ received in invalid state.\n") );\r
+ case CEP_STATE_DREQ_RCVD:\r
+ ib_put_mad( p_mad );\r
+ break;\r
+ }\r
+\r
+ KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl );\r
+ AL_EXIT( AL_DBG_CM );\r
+}\r
+\r
+\r
+static void\r
+__process_drep(\r
+ IN ib_mad_element_t* const p_mad )\r
+{\r
+ ib_api_status_t status;\r
+ mad_cm_drep_t *p_drep;\r
+ kcep_t *p_cep;\r
+ KLOCK_QUEUE_HANDLE hdl;\r
+\r
+ AL_ENTER( AL_DBG_CM );\r
+\r
+ CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL );\r
+\r
+ p_drep = (mad_cm_drep_t*)p_mad->p_mad_buf;\r
+\r
+ /* Find the connection by local connection