uint8_t mad_buf[MAD_BLOCK_GRH_SIZE];\r
#endif\r
\r
- TO_LONG_PTR(ib_mad_element_t*, h_proxy_element) ; /* For user-mode support */\r
+ uint64_t h_proxy_element; /* For user-mode support */\r
\r
} al_mad_element_t;\r
\r
/* Multicast record */\r
struct _mcast_cb_ioctl_rec\r
{\r
- TO_LONG_PTR(const void* , mcast_context) ;\r
+ uint64_t mcast_context;\r
ib_api_status_t status;\r
ib_net16_t error_status;\r
-TO_LONG_PTR( ib_mcast_handle_t , h_mcast) ; \r
+ uint64_t h_mcast;\r
ib_member_rec_t member_rec;\r
\r
} mcast_cb_ioctl_rec;\r
/* Mad send */\r
struct _mad_send_cb_ioctl_rec\r
{\r
- TO_LONG_PTR(ib_mad_element_t* , p_um_mad) ;\r
+ uint64_t p_um_mad;\r
ib_wc_status_t wc_status;\r
- TO_LONG_PTR(void* , mad_svc_context) ;\r
+ uint64_t mad_svc_context;\r
\r
} mad_send_cb_ioctl_rec;\r
\r
{\r
uint64_t h_mad;\r
uint32_t elem_size;\r
- TO_LONG_PTR(void* , mad_svc_context) ;\r
- TO_LONG_PTR(ib_mad_element_t* , p_send_mad) ;\r
+ uint64_t mad_svc_context;\r
+ uint64_t p_send_mad;\r
\r
} mad_recv_cb_ioctl_rec;\r
\r
\r
typedef struct _comp_cb_ioctl_info\r
{\r
- TO_LONG_PTR(void* , cq_context) ;\r
+ uint64_t cq_context;\r
\r
} comp_cb_ioctl_info_t;\r
\r
pnp_req.pnp_context = p_open_context;\r
pnp_req.pfn_pnp_cb = __proxy_pnp_cb;\r
\r
- p_user_status = p_ioctl->p_status;\r
- p_user_hdl = p_ioctl->p_hdl;\r
+ p_user_status = (ib_api_status_t*)(ULONG_PTR)p_ioctl->p_status;\r
+ p_user_hdl = (uint64_t*)(ULONG_PTR)p_ioctl->p_hdl;\r
\r
if( pnp_get_flag( p_ioctl->pnp_class ) & IB_PNP_FLAG_REG_SYNC )\r
{\r
p_context->h_al, p_ioctl->last_evt_hdl, AL_OBJ_TYPE_H_PNP_EVENT );\r
if( p_evt )\r
{\r
- p_evt->evt_context = p_ioctl->last_evt_context;\r
+ p_evt->evt_context = (void*)(ULONG_PTR)p_ioctl->last_evt_context;\r
p_evt->evt_status = p_ioctl->last_evt_status;\r
cl_event_signal( &p_evt->event );\r
}\r
\r
cid = AL_INVALID_CID;\r
p_ioctl->out.status = al_cep_pre_rep( p_context->h_al, p_ioctl->in.cid,\r
- p_ioctl->in.context, NULL, &p_ioctl->in.cm_rep, &cid, &p_ioctl->out.init );\r
+ (void*)(ULONG_PTR)p_ioctl->in.context, NULL, &p_ioctl->in.cm_rep,\r
+ &cid, &p_ioctl->out.init );\r
\r
deref_al_obj( &h_qp->obj );\r
\r
*p_ret_bytes = sizeof(ual_cep_poll_ioctl_t);\r
\r
p_ioctl->status = al_cep_poll( p_context->h_al,\r
- *(net32_t*)cl_ioctl_in_buf( h_ioctl ), &p_ioctl->context,\r
+ *(net32_t*)cl_ioctl_in_buf( h_ioctl ),\r
+ &(void*)(ULONG_PTR)p_ioctl->context,\r
&p_ioctl->new_cid, &p_mad );\r
\r
if( p_ioctl->status == IB_SUCCESS )\r
if( status != IB_SUCCESS )\r
goto proxy_create_cq_err2;\r
\r
- status = create_cq( h_ca, &cq_create, p_ioctl->in.context,\r
- pfn_ev, &h_cq, p_umv_buf );\r
+ status = create_cq( h_ca, &cq_create,\r
+ (void*)(ULONG_PTR)p_ioctl->in.context, pfn_ev, &h_cq, p_umv_buf );\r
\r
if( status != IB_SUCCESS )\r
goto proxy_create_cq_err2;\r
CL_ASSERT( p_ioctl );\r
\r
/* Must save user's pointers in case req completes before call returns. */\r
- p_usr_status = p_ioctl->in.p_status;\r
- p_usr_hdl = p_ioctl->in.ph_sa_req;\r
+ p_usr_status = (ib_api_status_t*)(ULONG_PTR)p_ioctl->in.p_status;\r
+ p_usr_hdl = (uint64_t*)(ULONG_PTR)p_ioctl->in.ph_sa_req;\r
\r
if( p_ioctl->in.sa_req.attr_size > IB_SA_DATA_SIZE )\r
{\r
\r
/* Now copy the mad element with all info */\r
status = ib_convert_cl_status( cl_copy_from_user( p_mad_el,\r
- p_ioctl->in.p_mad_element, sizeof(ib_mad_element_t) ) );\r
+ (void*)(ULONG_PTR)p_ioctl->in.p_mad_element,\r
+ sizeof(ib_mad_element_t) ) );\r
if( status != IB_SUCCESS )\r
goto proxy_send_mad_err2;\r
\r
* MAD receive completion could fail to be delivered to the app even though\r
* the response was properly received in the kernel.\r
*/\r
- p_mad_el->context1 = p_ioctl->in.p_mad_element;\r
+ p_mad_el->context1 = (void*)(ULONG_PTR)p_ioctl->in.p_mad_element;\r
\r
/* Set the kernel AV handle. This is either NULL or a valid KM handle. */\r
p_mad_el->h_av = h_av;\r
\r
/* Copy the handle to UM to allow cancelling. */\r
status = ib_convert_cl_status( cl_copy_to_user(\r
- p_ioctl->in.ph_proxy, p_mad_el, sizeof(ib_mad_element_t*) ) );\r
+ (void*)(ULONG_PTR)p_ioctl->in.ph_proxy,\r
+ &p_mad_el, sizeof(ib_mad_element_t*) ) );\r
if( status != IB_SUCCESS )\r
goto proxy_send_mad_err2;\r
\r
* for correcting all pointers.\r
*/\r
status = ib_convert_cl_status( cl_copy_to_user(\r
- p_ioctl->in.p_user_mad, p_mad, sizeof(ib_mad_element_t) ) );\r
+ (void*)(ULONG_PTR)p_ioctl->in.p_user_mad,\r
+ p_mad, sizeof(ib_mad_element_t) ) );\r
if( status != IB_SUCCESS )\r
{\r
AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
\r
/* Copy the MAD buffer. */\r
status = ib_convert_cl_status( cl_copy_to_user(\r
- p_ioctl->in.p_mad_buf, p_mad->p_mad_buf, p_mad->size ) );\r
+ (void*)(ULONG_PTR)p_ioctl->in.p_mad_buf, p_mad->p_mad_buf, p_mad->size ) );\r
if( status != IB_SUCCESS )\r
{\r
AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
if( p_mad->grh_valid )\r
{\r
status = ib_convert_cl_status( cl_copy_to_user(\r
- p_ioctl->in.p_grh, p_mad->p_grh, sizeof(ib_grh_t) ) );\r
+ (void*)(ULONG_PTR)p_ioctl->in.p_grh, p_mad->p_grh, sizeof(ib_grh_t) ) );\r
if( status != IB_SUCCESS )\r
{\r
AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
cb_info.ioctl_rec.mad_send_cb_ioctl_rec.p_um_mad =\r
p_al_el->h_proxy_element;\r
cb_info.ioctl_rec.mad_send_cb_ioctl_rec.mad_svc_context =\r
- mad_svc_context;\r
+ (ULONG_PTR)mad_svc_context;\r
\r
/* Queue this mad completion notification for the user. */\r
proxy_queue_cb_buf( UAL_GET_MISC_CB_INFO, p_context, &cb_info,\r
\r
/* Set up context and callback record type appropriate for UAL */\r
cb_info.rec_type = MAD_RECV_REC;\r
- cb_info.ioctl_rec.mad_recv_cb_ioctl_rec.mad_svc_context = mad_svc_context;\r
+ cb_info.ioctl_rec.mad_recv_cb_ioctl_rec.mad_svc_context = (ULONG_PTR)mad_svc_context;\r
cb_info.ioctl_rec.mad_recv_cb_ioctl_rec.elem_size = p_mad_element->size;\r
cb_info.ioctl_rec.mad_recv_cb_ioctl_rec.p_send_mad =\r
- (ib_mad_element_t*)p_mad_element->send_context1;\r
+ (ULONG_PTR)p_mad_element->send_context1;\r
\r
/*\r
* If we're already closing the device - do not queue a callback, since\r
return CL_SUCCESS;\r
}\r
\r
- p_ioctl->out.status =\r
- ib_cancel_mad( h_mad_svc, p_ioctl->in.h_proxy_element );\r
+ p_ioctl->out.status = ib_cancel_mad( h_mad_svc,\r
+ (ib_mad_element_t*)(ULONG_PTR)p_ioctl->in.h_proxy_element );\r
\r
/*\r
* The clean up of resources allocated for the sent mad will\r
goto proxy_open_ca_err;\r
\r
status = open_ca( p_context->h_al, p_ioctl->in.guid, proxy_ca_err_cb,\r
- p_ioctl->in.context, &h_ca, p_umv_buf );\r
+ (void*)(ULONG_PTR)p_ioctl->in.context, &h_ca, p_umv_buf );\r
if( status != IB_SUCCESS )\r
goto proxy_open_ca_err;\r
\r
{\r
__try\r
{\r
- ProbeForWrite( p_ioctl->in.p_ca_attr, byte_cnt, sizeof(void*) );\r
- ib_copy_ca_attr( p_ioctl->in.p_ca_attr, p_ca_attr );\r
+ ProbeForWrite( (void*)(ULONG_PTR)p_ioctl->in.p_ca_attr,\r
+ byte_cnt, sizeof(void*) );\r
+ ib_copy_ca_attr( (void*)(ULONG_PTR)p_ioctl->in.p_ca_attr,\r
+ p_ca_attr );\r
}\r
__except(EXCEPTION_EXECUTE_HANDLER)\r
{\r
AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,\r
("Failed to copy CA attributes to user buffer %016I64x\n",\r
- (LONG64)p_ioctl->in.p_ca_attr) );\r
+ p_ioctl->in.p_ca_attr) );\r
status = IB_INVALID_PERMISSION;\r
}\r
}\r
if( status != IB_SUCCESS )\r
goto proxy_alloc_pd_err;\r
\r
- status = alloc_pd( h_ca, p_ioctl->in.type, p_ioctl->in.context,\r
- &h_pd, p_umv_buf );\r
+ status = alloc_pd( h_ca, p_ioctl->in.type,\r
+ (void*)(ULONG_PTR)p_ioctl->in.context, &h_pd, p_umv_buf );\r
\r
if( status != IB_SUCCESS )\r
goto proxy_alloc_pd_err;\r
else\r
pfn_ev = NULL;\r
\r
- status = create_srq( h_pd, &p_ioctl->in.srq_attr, p_ioctl->in.context,\r
- pfn_ev, &h_srq, p_umv_buf );\r
+ status = create_srq( h_pd, &p_ioctl->in.srq_attr,\r
+ (void*)(ULONG_PTR)p_ioctl->in.context, pfn_ev, &h_srq, p_umv_buf );\r
if( status != IB_SUCCESS )\r
goto proxy_create_srq_err1;\r
\r
else\r
pfn_ev = NULL;\r
\r
- status = create_qp( h_pd, &p_ioctl->in.qp_create, p_ioctl->in.context,\r
- pfn_ev, &h_qp, p_umv_buf );\r
+ status = create_qp( h_pd, &p_ioctl->in.qp_create,\r
+ (void*)(ULONG_PTR)p_ioctl->in.context, pfn_ev, &h_qp, p_umv_buf );\r
/* TODO: The create_qp call should return the attributes... */\r
if( status != IB_SUCCESS )\r
goto proxy_create_qp_err1;\r
}\r
\r
/* Set up context and callback record type appropriate for UAL */\r
- cb_info.cq_context = cq_context;\r
+ cb_info.cq_context = (ULONG_PTR)cq_context;\r
\r
/* The proxy handle must be valid now. */\r
if( !h_cq->obj.hdl_valid )\r
else\r
pfn_ev = NULL;\r
\r
- status = create_cq( h_ca, &cq_create, p_ioctl->in.context,\r
- pfn_ev, &h_cq, p_umv_buf );\r
+ status = create_cq( h_ca, &cq_create,\r
+ (void*)(ULONG_PTR)p_ioctl->in.context, pfn_ev, &h_cq, p_umv_buf );\r
\r
if( status != IB_SUCCESS )\r
goto proxy_create_cq_err2;\r
\r
/* We obtain the pool_key separately from the special QP. */\r
status = get_spl_qp( h_pd, p_ioctl->in.port_guid,\r
- &p_ioctl->in.qp_create, p_ioctl->in.context, proxy_qp_err_cb, NULL, &h_qp, p_umv_buf );\r
+ &p_ioctl->in.qp_create, (void*)(ULONG_PTR)p_ioctl->in.context,\r
+ proxy_qp_err_cb, NULL, &h_qp, p_umv_buf );\r
if( status != IB_SUCCESS )\r
goto proxy_get_spl_qp_err;\r
\r
}\r
\r
ca_ioctl.in.guid = ca_guid;\r
- ca_ioctl.in.context = p_ci_ca;\r
+ ca_ioctl.in.context = (ULONG_PTR)p_ci_ca;\r
\r
cl_status = do_al_dev_ioctl( UAL_OPEN_CA,\r
&ca_ioctl.in, sizeof(ca_ioctl.in), &ca_ioctl.out, sizeof(ca_ioctl.out),\r
cl_memclr( &ca_ioctl, sizeof(ca_ioctl) );\r
\r
ca_ioctl.in.h_ca = h_ca->obj.p_ci_ca->obj.hdl;\r
- ca_ioctl.in.p_ca_attr = p_ca_attr;\r
+ ca_ioctl.in.p_ca_attr = (ULONG_PTR)p_ca_attr;\r
ca_ioctl.in.byte_cnt = *p_size;\r
\r
/* Call the uvp pre call if the vendor library provided a valid ca handle */\r
uvp_interface_t uvp_intf = h_ca->obj.p_ci_ca->verbs.user_verbs;\r
\r
AL_ENTER( AL_DBG_CA );\r
- cl_memclr(&ca_ioctl, sizeof(ca_ioctl));\r
+\r
/* Call the uvp pre call if the vendor library provided a valid ca handle */\r
if( h_ca->obj.p_ci_ca->h_ci_ca && uvp_intf.pre_modify_ca )\r
{\r
\r
/* Store user parameters. */\r
p_cep->pfn_cb = pfn_cb;\r
- p_cep->destroy_context = context; //TODO: context will no be longer __ptr64\r
+ p_cep->destroy_context = context;\r
\r
/* Create a kernel CEP only if we don't already have a CID. */\r
if( cid == AL_INVALID_CID )\r
{\r
- if( !DeviceIoControl( g_al_device, UAL_CREATE_CEP, &context, \r
- sizeof(context), &ioctl, sizeof(ioctl), &bytes_ret, NULL ) ||\r
+ uint64_t cep_context = (ULONG_PTR)context;\r
+ if( !DeviceIoControl( g_al_device, UAL_CREATE_CEP, &cep_context,\r
+ sizeof(cep_context), &ioctl, sizeof(ioctl), &bytes_ret, NULL ) ||\r
bytes_ret != sizeof(ioctl) )\r
{\r
__destroy_ucep( p_cep );\r
p_cep->destroy_context = context;\r
cl_spinlock_release( &gp_cep_mgr->obj.lock );\r
\r
- ioctl.in.context = context;\r
+ ioctl.in.context = (ULONG_PTR)context;\r
ioctl.in.cid = cid;\r
ioctl.in.cm_rep = *p_cm_rep;\r
ioctl.in.cm_rep.h_qp = (ib_qp_handle_t)HDL_TO_PTR(p_cm_rep->h_qp->obj.hdl);\r
\r
cl_memcpy( p_mad->p_mad_buf, ioctl.mad_buf, MAD_BLOCK_SIZE );\r
\r
- *p_context = ioctl.context;\r
+ *p_context = (void*)(ULONG_PTR)ioctl.context;\r
*p_new_cid = ioctl.new_cid;\r
*pp_mad = p_mad;\r
}\r
\r
cq_ioctl.in.h_ca = p_ci_ca->obj.hdl;\r
cq_ioctl.in.size = p_cq_create->size;\r
- cq_ioctl.in.h_wait_obj = p_cq_create->h_wait_obj;\r
- cq_ioctl.in.context = h_cq;\r
+ cq_ioctl.in.h_wait_obj = HandleToHandle64( p_cq_create->h_wait_obj );\r
+ cq_ioctl.in.context = (ULONG_PTR)h_cq;\r
cq_ioctl.in.ev_notify = (h_cq->pfn_event_cb != NULL) ? TRUE : FALSE;\r
\r
cl_status = do_al_dev_ioctl( UAL_CREATE_CQ,\r
AL_ENTER( AL_DBG_MAD );\r
\r
CL_ASSERT( p_mad_element );\r
- cl_memclr(&ioctl_buf, sizeof(ioctl_buf));\r
+\r
p_al_element = PARENT_STRUCT(\r
p_mad_element, al_mad_element_t, element );\r
\r
else\r
ioctl_buf.in.h_av = AL_INVALID_HANDLE;\r
\r
- ioctl_buf.in.p_mad_element = p_mad_element;\r
+ ioctl_buf.in.p_mad_element = (ULONG_PTR)p_mad_element;\r
ioctl_buf.in.size = p_mad_element->size;\r
- ioctl_buf.in.ph_proxy = &p_al_element->h_proxy_element;\r
+ ioctl_buf.in.ph_proxy = (ULONG_PTR)&p_al_element->h_proxy_element;\r
\r
cl_status = do_al_dev_ioctl( UAL_MAD_SEND,\r
&ioctl_buf.in, sizeof(ioctl_buf.in),\r
* Note that we issue the IOCTL regardless of failure of ib_get_mad.\r
* This is done in order to release the kernel-mode MAD.\r
*/\r
- ioctl_buf.in.p_user_mad = p_mad;\r
+ ioctl_buf.in.p_user_mad = (ULONG_PTR)p_mad;\r
\r
if( p_mad )\r
{\r
p_mad_buf = p_mad->p_mad_buf;\r
p_grh = p_mad->p_grh;\r
\r
- ioctl_buf.in.p_mad_buf = p_mad_buf;\r
- ioctl_buf.in.p_grh = p_grh;\r
+ ioctl_buf.in.p_mad_buf = (ULONG_PTR)p_mad_buf;\r
+ ioctl_buf.in.p_grh = (ULONG_PTR)p_grh;\r
}\r
ioctl_buf.in.h_mad = h_mad;\r
\r
ib_api_status_t status = IB_SUCCESS;\r
\r
AL_ENTER( AL_DBG_CA );\r
- cl_memclr(&local_mad_ioctl, sizeof (local_mad_ioctl));\r
+\r
local_mad_ioctl.in.h_ca = h_ca->obj.p_ci_ca->obj.hdl;\r
local_mad_ioctl.in.port_num = port_num;\r
cl_memcpy( local_mad_ioctl.in.mad_in, p_mad_in,\r
status = ioctl_buf.out.status;\r
if( status == IB_SUCCESS ){\r
h_mcast->obj.hdl = ioctl_buf.out.h_attach;\r
- h_mcast->h_ci_mcast = (ib_mcast_handle_t) HDL_TO_PTR(ioctl_buf.out.h_attach);\r
+ h_mcast->h_ci_mcast = (ib_mcast_handle_t)(ULONG_PTR)ioctl_buf.out.h_attach;\r
}\r
}\r
\r
uintn_t bytes_ret;\r
\r
AL_ENTER( AL_DBG_MGR );\r
- cl_memclr(&ioctl, sizeof (ual_bind_file_ioctl_t));\r
+\r
/* Create a file object on which to issue all SA requests. */\r
- ioctl.h_file = CreateFileW( L"\\\\.\\ibal",\r
+ ioctl.h_file = HandleToHandle64( CreateFileW( L"\\\\.\\ibal",\r
GENERIC_READ | GENERIC_WRITE, FILE_SHARE_READ | FILE_SHARE_WRITE,\r
- NULL, OPEN_EXISTING, FILE_FLAG_OVERLAPPED, NULL );\r
+ NULL, OPEN_EXISTING, FILE_FLAG_OVERLAPPED, NULL ) );\r
if( ioctl.h_file == INVALID_HANDLE_VALUE )\r
{\r
AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR,\r
/* We got a send completion. */\r
ib_mad_element_t *p_element;\r
\r
- ib_mad_svc_handle_t h_mad_svc = (ib_mad_svc_handle_t)\r
+ ib_mad_svc_handle_t h_mad_svc = (ib_mad_svc_handle_t)(ULONG_PTR)\r
p_misc_cb_info->ioctl_rec.mad_send_cb_ioctl_rec.mad_svc_context;\r
\r
/* Copy the data to the user's element. */\r
- p_element = p_misc_cb_info->ioctl_rec.mad_send_cb_ioctl_rec.p_um_mad;\r
+ p_element = (ib_mad_element_t*)(ULONG_PTR)\r
+ p_misc_cb_info->ioctl_rec.mad_send_cb_ioctl_rec.p_um_mad;\r
/* Only update the status if a receive wasn't failed. */\r
if( p_element->status != IB_WCS_TIMEOUT_RETRY_ERR )\r
{\r
ib_mad_t *p_mad_buf = NULL;\r
ib_grh_t *p_grh = NULL;\r
\r
- h_mad_svc = (ib_mad_svc_handle_t)\r
+ h_mad_svc = (ib_mad_svc_handle_t)(ULONG_PTR)\r
p_misc_cb_info->ioctl_rec.mad_recv_cb_ioctl_rec.mad_svc_context;\r
\r
- p_send_mad =\r
+ p_send_mad = (ib_mad_element_t*)(ULONG_PTR)\r
p_misc_cb_info->ioctl_rec.mad_recv_cb_ioctl_rec.p_send_mad;\r
\r
cl_memclr( &ioctl_buf, sizeof(ioctl_buf) );\r
else if( p_send_mad )\r
p_send_mad->status = IB_WCS_TIMEOUT_RETRY_ERR;\r
\r
- ioctl_buf.in.p_user_mad = p_mad;\r
+ ioctl_buf.in.p_user_mad = (ULONG_PTR)p_mad;\r
\r
if( p_mad )\r
{\r
p_mad_buf = p_mad->p_mad_buf;\r
p_grh = p_mad->p_grh;\r
\r
- ioctl_buf.in.p_mad_buf = p_mad_buf;\r
- ioctl_buf.in.p_grh = p_grh;\r
+ ioctl_buf.in.p_mad_buf = (ULONG_PTR)p_mad_buf;\r
+ ioctl_buf.in.p_grh = (ULONG_PTR)p_grh;\r
}\r
ioctl_buf.in.h_mad = p_misc_cb_info->ioctl_rec.mad_recv_cb_ioctl_rec.h_mad;\r
\r
\r
pd_ioctl.in.h_ca = h_ca->obj.p_ci_ca->obj.hdl;\r
pd_ioctl.in.type = pd_type;\r
- pd_ioctl.in.context = h_pd;\r
+ pd_ioctl.in.context = (ULONG_PTR)h_pd;\r
\r
cl_status = do_al_dev_ioctl( UAL_ALLOC_PD,\r
&pd_ioctl.in, sizeof(pd_ioctl.in), &pd_ioctl.out, sizeof(pd_ioctl.out),\r
\r
/* Copy the request information. */\r
p_reg->pfn_pnp_cb = p_pnp_req->pfn_pnp_cb;\r
- cl_memclr(&in, sizeof(in));\r
+\r
in.pnp_class = p_pnp_req->pnp_class;\r
- in.p_status = &status;\r
- in.p_hdl = &p_reg->obj.hdl;\r
+ in.p_status = (ULONG_PTR)&status;\r
+ in.p_hdl = (ULONG_PTR)&p_reg->obj.hdl;\r
\r
if( pnp_get_flag( p_pnp_req->pnp_class ) & IB_PNP_FLAG_REG_SYNC )\r
{\r
- in.sync_event = CreateEvent( NULL, FALSE, FALSE, NULL );\r
+ in.sync_event = HandleToHandle64( CreateEvent( NULL, FALSE, FALSE, NULL ) );\r
if( !in.sync_event )\r
{\r
p_reg->obj.pfn_destroy( &p_reg->obj, NULL );\r
AL_ENTER( AL_DBG_PNP );\r
\r
p_reg = PARENT_STRUCT( p_item, al_pnp_t, async_item );\r
- cl_memclr(&in, sizeof(in));\r
+\r
in.pnp_hdl = p_reg->obj.hdl;\r
in.last_evt_hdl = p_reg->rearm.evt_hdl;\r
+ in.last_evt_context = 0;\r
\r
if( p_reg->rearm.evt_size )\r
{\r
}\r
p_pnp_rec->pnp_context = (void*)p_reg->obj.context;\r
in.last_evt_status = p_reg->pfn_pnp_cb( p_pnp_rec );\r
- in.last_evt_context = p_pnp_rec->context;\r
+ in.last_evt_context = (ULONG_PTR)p_pnp_rec->context;\r
}\r
else\r
{\r
in.last_evt_status = IB_SUCCESS;\r
- in.last_evt_context = NULL;\r
}\r
\r
if( p_pnp_rec )\r
else\r
{\r
in.last_evt_status = IB_SUCCESS;\r
- in.last_evt_context = NULL;\r
}\r
}\r
else\r
{\r
in.last_evt_status = IB_SUCCESS;\r
- in.last_evt_context = NULL;\r
}\r
\r
/* Request the next PnP event. */\r
if (p_qp_create->h_srq)\r
qp_ioctl.in.qp_create.h_srq =\r
(ib_srq_handle_t)HDL_TO_PTR(p_qp_create->h_srq->obj.hdl);\r
- qp_ioctl.in.context = h_qp;\r
+ qp_ioctl.in.context = (ULONG_PTR)h_qp;\r
qp_ioctl.in.ev_notify = (h_qp->pfn_event_cb != NULL) ? TRUE : FALSE;\r
\r
cl_status = do_al_dev_ioctl( UAL_CREATE_QP,\r
qp_ioctl.in.h_pd = h_pd->obj.hdl;\r
qp_ioctl.in.port_guid = port_guid;\r
qp_ioctl.in.qp_create = *p_qp_create;\r
- qp_ioctl.in.context = &p_qp_alias->qp;\r
+ qp_ioctl.in.context = (ULONG_PTR)&p_qp_alias->qp;\r
\r
cl_status = do_al_dev_ioctl( UAL_GET_SPL_QP_ALIAS,\r
&qp_ioctl.in, sizeof(qp_ioctl.in),\r
p_sa_req->ioctl.in.sa_req = *p_sa_req_data;\r
cl_memcpy( p_sa_req->ioctl.in.attr,\r
p_sa_req_data->p_attr, p_sa_req_data->attr_size );\r
- p_sa_req->ioctl.in.ph_sa_req = &p_sa_req->hdl;\r
- p_sa_req->ioctl.in.p_status = &p_sa_req->status;\r
+ p_sa_req->ioctl.in.ph_sa_req = (ULONG_PTR)&p_sa_req->hdl;\r
+ p_sa_req->ioctl.in.p_status = (ULONG_PTR)&p_sa_req->status;\r
\r
if( flags & IB_FLAGS_SYNC )\r
h_dev = g_al_device;\r
size_t bytes_ret;\r
\r
AL_ENTER( AL_DBG_SA_REQ );\r
- cl_memclr(&ioctl, sizeof(ioctl));\r
+\r
ioctl.h_sa_req = p_sa_req->hdl;\r
\r
do_al_dev_ioctl(\r
*/\r
srq_ioctl.in.h_pd = h_pd->obj.hdl;\r
srq_ioctl.in.srq_attr = *p_srq_attr;\r
- srq_ioctl.in.context = h_srq;\r
+ srq_ioctl.in.context = (ULONG_PTR)h_srq;\r
srq_ioctl.in.ev_notify = (h_srq->pfn_event_cb != NULL) ? TRUE : FALSE;\r
\r
cl_status = do_al_dev_ioctl( UAL_CREATE_SRQ,\r
#include <iba/ib_al.h>\r
\r
\r
-\r
/*\r
* Typedefs\r
*\r
*/\r
typedef struct _ual_bind_file_ioctl\r
{\r
- TO_LONG_PTR(void* , h_file) ;\r
+ void* __ptr64 h_file; /* __ptr64 is correct for HANDLE types. */\r
\r
} ual_bind_file_ioctl_t;\r
/*\r
{\r
ci_umv_buf_t umv_buf;\r
ib_net64_t guid;\r
- TO_LONG_PTR(void* , context) ;\r
+ uint64_t context;\r
\r
} in;\r
\r
ci_umv_buf_t umv_buf;\r
uint64_t h_ca;\r
uint32_t byte_cnt;\r
- TO_LONG_PTR(ib_ca_attr_t* , p_ca_attr) ;\r
+ uint64_t p_ca_attr;\r
\r
} in;\r
struct _ual_query_ca_ioctl_out\r
ci_umv_buf_t umv_buf;\r
uint64_t h_ca;\r
ib_pd_type_t type;\r
- TO_LONG_PTR(void* , context) ;\r
+ uint64_t context;\r
\r
} in;\r
struct _ual_alloc_pd_ioctl_out\r
ci_umv_buf_t umv_buf;\r
uint64_t h_pd;\r
ib_srq_attr_t srq_attr;\r
- TO_LONG_PTR(void* , context) ;\r
- boolean_t ev_notify;\r
+ uint64_t context;\r
+ boolean_t ev_notify;\r
\r
} in;\r
struct _ual_create_srq_ioctl_out\r
{\r
ci_umv_buf_t umv_buf;\r
- ib_api_status_t status;\r
+ ib_api_status_t status;\r
uint64_t h_srq;\r
\r
} out;\r
ci_umv_buf_t umv_buf;\r
uint64_t h_pd;\r
ib_qp_create_t qp_create;\r
- TO_LONG_PTR(void* , context) ;\r
+ uint64_t context;\r
boolean_t ev_notify;\r
\r
} in;\r
{\r
ci_umv_buf_t umv_buf;\r
uint64_t h_ca;\r
- TO_LONG_PTR(void* , h_wait_obj) ;\r
- TO_LONG_PTR(void* , context) ;\r
+ void* __ptr64 h_wait_obj; /* __ptr64 is correct for HANDLE types. */\r
+ uint64_t context;\r
uint32_t size;\r
boolean_t ev_notify;\r
\r
uint64_t h_mad_svc;\r
uint64_t pool_key;\r
uint64_t h_av;\r
- TO_LONG_PTR(ib_mad_element_t* , p_mad_element) ;\r
+ uint64_t p_mad_element;\r
uint32_t size;\r
- TO_LONG_PTR(void* *, /*__ptr64*/ ph_proxy) ;\r
+ uint64_t ph_proxy;\r
\r
} in;\r
struct _ual_send_mad_ioctl_out\r
struct _ual_cancel_mad_ioctl_in\r
{\r
uint64_t h_mad_svc;\r
- TO_LONG_PTR(void* , h_proxy_element) ;\r
+ uint64_t h_proxy_element;\r
\r
} in;\r
struct _ual_cancel_mad_ioctl_out\r
uint64_t h_pd;\r
ib_net64_t port_guid;\r
ib_qp_create_t qp_create;\r
- TO_LONG_PTR(void* , context) ;\r
+ uint64_t context;\r
\r
} in;\r
struct _ual_spl_qp_ioctl_out\r
struct _ual_mad_recv_comp_ioctl_in\r
{\r
uint64_t h_mad;\r
- TO_LONG_PTR(ib_mad_element_t* , p_user_mad) ;\r
- TO_LONG_PTR(ib_mad_t* , p_mad_buf) ;\r
- TO_LONG_PTR(ib_grh_t* , p_grh) ;\r
+ uint64_t p_user_mad;\r
+ uint64_t p_mad_buf;\r
+ uint64_t p_grh;\r
\r
} in;\r
struct _ual_mad_recv_comp_ioctl_out\r
{\r
uint64_t h_ca;\r
__declspec(align(8)) uint8_t mad_in[MAD_BLOCK_SIZE];\r
- uint8_t port_num;\r
+ uint8_t port_num;\r
\r
\r
} in;\r
struct _ual_local_mad_ioctl_out\r
{\r
- ib_api_status_t status;\r
+ ib_api_status_t status;\r
uint32_t _pad; /* 8-byte alignment needed for ia64 */\r
__declspec(align(8)) uint8_t mad_out[MAD_BLOCK_SIZE];\r
\r
* CID of the created CEP.\r
*****/\r
\r
-/****s* User-mode Access Layer/ual_create_cep_ioctl_in\r
-* NAME\r
-* ual_create_cep_ioctl_in\r
-*\r
-* DESCRIPTION\r
-* IOCTL structure containing the input parameters to\r
-* create a CEP.\r
-*\r
-* SYNOPSIS\r
-*/\r
-\r
-typedef struct _ual_create_cep_ioctl_in\r
-{\r
- TO_LONG_PTR(void* , context);\r
-} ual_create_cep_ioctl_in;\r
-\r
-/*\r
-* FIELDS\r
-* context\r
-* \r
-* \r
-*****/\r
-\r
\r
/****s* User-mode Access Layer/ual_cep_listen_ioctl_t\r
* NAME\r
{\r
struct _ual_cep_rep_ioctl_in\r
{\r
- TO_LONG_PTR(void* , context) ;\r
+ uint64_t context;\r
net32_t cid;\r
ib_cm_rep_t cm_rep;\r
uint8_t pdata[IB_REP_PDATA_SIZE];\r
typedef struct _ual_cep_poll_ioctl\r
{\r
ib_api_status_t status;\r
- TO_LONG_PTR(void* , context) ;\r
+ uint64_t context;\r
net32_t new_cid;\r
ib_mad_element_t element;\r
ib_grh_t grh;\r
uint32_t retry_cnt;\r
ib_user_query_t sa_req;\r
uint8_t attr[IB_SA_DATA_SIZE];\r
- TO_LONG_PTR(uint64_t* , ph_sa_req) ;\r
- TO_LONG_PTR(ib_api_status_t* , p_status) ;\r
+ uint64_t ph_sa_req;\r
+ uint64_t p_status;\r
\r
} in;\r
struct _ual_send_sa_req_ioctl_out\r
typedef struct _ual_reg_pnp_ioctl_in\r
{\r
ib_pnp_class_t pnp_class;\r
- TO_LONG_PTR(void* , sync_event) ;\r
- TO_LONG_PTR(ib_api_status_t* , p_status) ;\r
- TO_LONG_PTR(uint64_t* , p_hdl) ;\r
+ void* __ptr64 sync_event; /* __ptr64 is correct for HANDLE types. */\r
+ uint64_t p_status;\r
+ uint64_t p_hdl;\r
\r
} ual_reg_pnp_ioctl_in_t;\r
/*\r
{\r
uint64_t pnp_hdl;\r
uint64_t last_evt_hdl;\r
- TO_LONG_PTR(void* , last_evt_context) ;\r
+ uint64_t last_evt_context;\r
ib_api_status_t last_evt_status;\r
\r
} ual_rearm_pnp_ioctl_in_t;\r
uint64_t h_qp;\r
net64_t guid;\r
uint16_t dst_port;\r
+ uint16_t pkey;\r
uint8_t resp_res;\r
uint8_t init_depth;\r
uint8_t prot;\r
* dst_port\r
* Destination port number.\r
*\r
+* pkey\r
+* Partition key.\r
+*\r
* resp_res\r
* Responder resources for the QP.\r
*\r
* resp_res\r
* The maximum number of RDMA read/atomic operations from the recipient.\r
*\r
-* pdata_size\r
+* pdata_size\r
* The size of following private data\r
*\r
* pdata\r
* cid\r
* Connection ID.\r
*\r
-* pdata_size\r
+* pdata_size\r
* The size of following private data\r
*\r
* pdata\r