2 * Copyright (c) 2005-2009 Intel Corporation. All rights reserved.
\r
4 * This software is available to you under the OpenIB.org BSD license
\r
7 * Redistribution and use in source and binary forms, with or
\r
8 * without modification, are permitted provided that the following
\r
9 * conditions are met:
\r
11 * - Redistributions of source code must retain the above
\r
12 * copyright notice, this list of conditions and the following
\r
15 * - Redistributions in binary form must reproduce the above
\r
16 * copyright notice, this list of conditions and the following
\r
17 * disclaimer in the documentation and/or other materials
\r
18 * provided with the distribution.
\r
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
21 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AWV
\r
23 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
\r
24 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
\r
25 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
\r
26 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
\r
30 #include <windows.h>
\r
31 #include <winsock2.h>
\r
33 #include <iphlpapi.h>
\r
35 #include <rdma/rdma_cma.h>
\r
36 #include <infiniband/verbs.h>
\r
37 #include <comp_channel.h>
\r
38 #include <iba/ibat.h>
\r
40 #include "..\..\..\etc\user\comp_channel.cpp"
\r
41 #include "..\..\..\etc\user\dlist.c"
\r
43 static struct ibvw_windata windata;
\r
53 cma_passive_connect,
\r
58 cma_active_disconnect,
\r
59 cma_passive_disconnect,
\r
64 #define CMA_DEFAULT_BACKLOG 16
\r
66 struct cma_id_private
\r
68 struct rdma_cm_id id;
\r
69 enum cma_state state;
\r
70 struct cma_device *cma_dev;
\r
73 volatile LONG refcnt;
\r
74 struct rdma_cm_id **req_list;
\r
79 struct ibv_context *verbs;
\r
82 uint8_t max_initiator_depth;
\r
83 uint8_t max_responder_resources;
\r
87 struct rdma_cm_event event;
\r
88 uint8_t private_data[56];
\r
89 struct cma_id_private *id_priv;
\r
92 static struct cma_device *cma_dev_array;
\r
93 static int cma_dev_cnt;
\r
95 static void ucma_cleanup(void)
\r
97 if (cma_dev_cnt > 0) {
\r
98 while (cma_dev_cnt > 0) {
\r
99 ibv_close_device(cma_dev_array[--cma_dev_cnt].verbs);
\r
101 delete cma_dev_array;
\r
104 if (windata.prov != NULL) {
\r
105 ibvw_release_windata(&windata, IBVW_WINDATA_VERSION);
\r
106 windata.prov = NULL;
\r
110 static int ucma_init(void)
\r
112 struct ibv_device **dev_list = NULL;
\r
113 struct cma_device *cma_dev;
\r
114 struct ibv_device_attr attr;
\r
117 EnterCriticalSection(&lock);
\r
118 if (cma_dev_cnt > 0) {
\r
122 ret = ibvw_get_windata(&windata, IBVW_WINDATA_VERSION);
\r
127 dev_list = ibv_get_device_list(&cma_dev_cnt);
\r
128 if (dev_list == NULL) {
\r
133 cma_dev_array = new struct cma_device[cma_dev_cnt];
\r
134 if (cma_dev_array == NULL) {
\r
139 for (i = 0; dev_list[i]; ++i) {
\r
140 cma_dev = &cma_dev_array[i];
\r
142 cma_dev->guid = ibv_get_device_guid(dev_list[i]);
\r
143 cma_dev->verbs = ibv_open_device(dev_list[i]);
\r
144 if (cma_dev->verbs == NULL) {
\r
149 ret = ibv_query_device(cma_dev->verbs, &attr);
\r
154 cma_dev->port_cnt = attr.phys_port_cnt;
\r
155 cma_dev->max_initiator_depth = (uint8_t) attr.max_qp_init_rd_atom;
\r
156 cma_dev->max_responder_resources = (uint8_t) attr.max_qp_rd_atom;
\r
158 ibv_free_device_list(dev_list);
\r
160 LeaveCriticalSection(&lock);
\r
165 LeaveCriticalSection(&lock);
\r
167 ibv_free_device_list(dev_list);
\r
172 __declspec(dllexport)
\r
173 struct ibv_context **rdma_get_devices(int *num_devices)
\r
175 struct ibv_context **devs = NULL;
\r
178 if (!cma_dev_cnt && ucma_init()) {
\r
182 devs = new struct ibv_context *[cma_dev_cnt + 1];
\r
183 if (devs == NULL) {
\r
187 for (i = 0; i < cma_dev_cnt; i++) {
\r
188 devs[i] = cma_dev_array[i].verbs;
\r
192 if (num_devices != NULL) {
\r
193 *num_devices = devs ? cma_dev_cnt : 0;
\r
198 __declspec(dllexport)
\r
199 void rdma_free_devices(struct ibv_context **list)
\r
204 __declspec(dllexport)
\r
205 struct rdma_event_channel *rdma_create_event_channel(void)
\r
207 struct rdma_event_channel *channel;
\r
209 if (!cma_dev_cnt && ucma_init()) {
\r
213 channel = new struct rdma_event_channel;
\r
214 if (channel == NULL) {
\r
218 CompChannelInit(windata.comp_mgr, &channel->channel, INFINITE);
\r
222 __declspec(dllexport)
\r
223 void rdma_destroy_event_channel(struct rdma_event_channel *channel)
\r
225 CompChannelCleanup(&channel->channel);
\r
229 __declspec(dllexport)
\r
230 int rdma_create_id(struct rdma_event_channel *channel,
\r
231 struct rdma_cm_id **id, void *context,
\r
232 enum rdma_port_space ps)
\r
234 struct cma_id_private *id_priv;
\r
237 hr = cma_dev_cnt ? 0 : ucma_init();
\r
242 id_priv = new struct cma_id_private;
\r
243 if (id_priv == NULL) {
\r
247 RtlZeroMemory(id_priv, sizeof(struct cma_id_private));
\r
248 id_priv->refcnt = 1;
\r
249 id_priv->id.context = context;
\r
250 id_priv->id.channel = channel;
\r
251 id_priv->id.ps = ps;
\r
252 CompEntryInit(&channel->channel, &id_priv->id.comp_entry);
\r
254 if (ps == RDMA_PS_TCP) {
\r
255 hr = windata.prov->CreateConnectEndpoint(&id_priv->id.ep.connect);
\r
257 hr = windata.prov->CreateDatagramEndpoint(&id_priv->id.ep.datagram);
\r
263 *id = &id_priv->id;
\r
271 static void ucma_destroy_listen(struct cma_id_private *id_priv)
\r
273 while (--id_priv->backlog >= 0) {
\r
274 if (id_priv->req_list[id_priv->backlog] != NULL) {
\r
275 InterlockedDecrement(&id_priv->refcnt);
\r
276 rdma_destroy_id(id_priv->req_list[id_priv->backlog]);
\r
280 delete id_priv->req_list;
\r
283 __declspec(dllexport)
\r
284 int rdma_destroy_id(struct rdma_cm_id *id)
\r
286 struct cma_id_private *id_priv;
\r
288 id_priv = CONTAINING_RECORD(id, struct cma_id_private, id);
\r
290 EnterCriticalSection(&lock);
\r
291 id_priv->state = cma_destroying;
\r
292 LeaveCriticalSection(&lock);
\r
294 if (id->ps == RDMA_PS_TCP) {
\r
295 id->ep.connect->CancelOverlappedRequests();
\r
297 id->ep.datagram->CancelOverlappedRequests();
\r
300 if (CompEntryCancel(&id->comp_entry) != NULL) {
\r
301 InterlockedDecrement(&id_priv->refcnt);
\r
304 if (id_priv->backlog > 0) {
\r
305 ucma_destroy_listen(id_priv);
\r
308 if (id_priv->id.ps == RDMA_PS_TCP) {
\r
309 id_priv->id.ep.connect->Release();
\r
311 id_priv->id.ep.datagram->Release();
\r
314 InterlockedDecrement(&id_priv->refcnt);
\r
315 while (id_priv->refcnt) {
\r
322 static int ucma_addrlen(struct sockaddr *addr)
\r
324 if (addr->sa_family == PF_INET) {
\r
325 return sizeof(struct sockaddr_in);
\r
327 return sizeof(struct sockaddr_in6);
\r
331 static int ucma_get_device(struct cma_id_private *id_priv, uint64_t guid)
\r
333 struct cma_device *cma_dev;
\r
336 for (i = 0; i < cma_dev_cnt; i++) {
\r
337 cma_dev = &cma_dev_array[i];
\r
338 if (cma_dev->guid == guid) {
\r
339 id_priv->cma_dev = cma_dev;
\r
340 id_priv->id.verbs = cma_dev->verbs;
\r
347 static int ucma_query_connect(struct rdma_cm_id *id, struct rdma_conn_param *param)
\r
349 struct cma_id_private *id_priv;
\r
350 WV_CONNECT_ATTRIBUTES attr;
\r
353 id_priv = CONTAINING_RECORD(id, struct cma_id_private, id);
\r
354 hr = id->ep.connect->Query(&attr);
\r
359 RtlCopyMemory(&id->route.addr.src_addr, &attr.LocalAddress,
\r
360 sizeof attr.LocalAddress);
\r
361 RtlCopyMemory(&id->route.addr.dst_addr, &attr.PeerAddress,
\r
362 sizeof attr.PeerAddress);
\r
364 if (param != NULL) {
\r
365 RtlCopyMemory((void *) param->private_data, attr.Param.Data,
\r
366 attr.Param.DataLength);
\r
367 param->private_data_len = (uint8_t) attr.Param.DataLength;
\r
368 param->responder_resources = (uint8_t) attr.Param.ResponderResources;
\r
369 param->initiator_depth = (uint8_t) attr.Param.InitiatorDepth;
\r
370 param->flow_control = 1;
\r
371 param->retry_count = attr.Param.RetryCount;
\r
372 param->rnr_retry_count = attr.Param.RnrRetryCount;
\r
375 if (id_priv->cma_dev == NULL && attr.Device.DeviceGuid != 0) {
\r
376 hr = ucma_get_device(id_priv, attr.Device.DeviceGuid);
\r
381 id->route.addr.addr.ibaddr.pkey = attr.Device.Pkey;
\r
382 id_priv->id.port_num = attr.Device.PortNumber;
\r
388 static int ucma_query_datagram(struct rdma_cm_id *id, struct rdma_ud_param *param)
\r
390 struct cma_id_private *id_priv;
\r
391 WV_DATAGRAM_ATTRIBUTES attr;
\r
394 id_priv = CONTAINING_RECORD(id, struct cma_id_private, id);
\r
395 hr = id->ep.datagram->Query(&attr);
\r
400 RtlCopyMemory(&id->route.addr.src_addr, &attr.LocalAddress,
\r
401 sizeof attr.LocalAddress);
\r
402 RtlCopyMemory(&id->route.addr.dst_addr, &attr.PeerAddress,
\r
403 sizeof attr.PeerAddress);
\r
405 if (param != NULL) {
\r
406 RtlCopyMemory((void *) param->private_data, attr.Param.Data,
\r
407 attr.Param.DataLength);
\r
408 param->private_data_len = (uint8_t) attr.Param.DataLength;
\r
409 // ucma_convert_av(&attr.Param.AddressVector, param->ah_attr)
\r
410 param->qp_num = attr.Param.Qpn;
\r
411 param->qkey = attr.Param.Qkey;
\r
414 if (id_priv->cma_dev == NULL && attr.Device.DeviceGuid != 0) {
\r
415 hr = ucma_get_device(id_priv, attr.Device.DeviceGuid);
\r
418 id->route.addr.addr.ibaddr.pkey = attr.Device.Pkey;
\r
419 id_priv->id.port_num = attr.Device.PortNumber;
\r
424 __declspec(dllexport)
\r
425 int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
\r
427 struct cma_id_private *id_priv;
\r
430 if (id->ps == RDMA_PS_TCP) {
\r
431 hr = id->ep.connect->BindAddress(addr);
\r
432 if (SUCCEEDED(hr)) {
\r
433 hr = ucma_query_connect(id, NULL);
\r
436 hr = id->ep.datagram->BindAddress(addr);
\r
437 if (SUCCEEDED(hr)) {
\r
438 hr = ucma_query_datagram(id, NULL);
\r
442 if (SUCCEEDED(hr)) {
\r
443 id_priv = CONTAINING_RECORD(id, struct cma_id_private, id);
\r
444 id_priv->state = cma_addr_bind;
\r
449 __declspec(dllexport)
\r
450 int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
\r
451 struct sockaddr *dst_addr, int timeout_ms)
\r
453 struct cma_id_private *id_priv;
\r
459 id_priv = CONTAINING_RECORD(id, struct cma_id_private, id);
\r
460 if (id_priv->state == cma_idle) {
\r
461 if (src_addr == NULL) {
\r
462 if (id->ps == RDMA_PS_TCP) {
\r
463 s = socket(dst_addr->sa_family, SOCK_STREAM, IPPROTO_TCP);
\r
465 s = socket(dst_addr->sa_family, SOCK_DGRAM, IPPROTO_UDP);
\r
467 if (s == INVALID_SOCKET) {
\r
468 return WSAGetLastError();
\r
471 hr = WSAIoctl(s, SIO_ROUTING_INTERFACE_QUERY, dst_addr, ucma_addrlen(dst_addr),
\r
472 &addr, sizeof addr, &size, NULL, NULL);
\r
475 return WSAGetLastError();
\r
477 src_addr = &addr.Sa;
\r
480 hr = rdma_bind_addr(id, src_addr);
\r
486 RtlCopyMemory(&id->route.addr.dst_addr, dst_addr, ucma_addrlen(dst_addr));
\r
487 id_priv->state = cma_addr_resolve;
\r
490 CompEntryPost(&id->comp_entry);
\r
494 __declspec(dllexport)
\r
495 int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
\r
497 struct cma_id_private *id_priv;
\r
498 IBAT_PATH_BLOB path;
\r
501 hr = IBAT::Resolve(&id->route.addr.src_addr, &id->route.addr.dst_addr, &path);
\r
506 hr = (id->ps == RDMA_PS_TCP) ?
\r
507 id->ep.connect->Modify(WV_EP_OPTION_ROUTE, &path, sizeof path) :
\r
508 id->ep.datagram->Modify(WV_EP_OPTION_ROUTE, &path, sizeof path);
\r
513 id_priv = CONTAINING_RECORD(id, struct cma_id_private, id);
\r
514 id_priv->state = cma_route_resolve;
\r
517 CompEntryPost(&id->comp_entry);
\r
521 static int ucma_modify_qp_init(struct cma_id_private *id_priv, struct ibv_qp *qp)
\r
523 struct ibv_qp_attr qp_attr;
\r
527 RtlZeroMemory(&qp_attr, sizeof qp_attr);
\r
528 qp_attr.qp_state = IBV_QPS_INIT;
\r
529 qp_attr.port_num = id_priv->id.port_num;
\r
530 hr = qp->context->cmd_if->FindPkey(id_priv->id.port_num,
\r
531 id_priv->id.route.addr.addr.ibaddr.pkey,
\r
537 qp_attr.pkey_index = index;
\r
538 return ibv_modify_qp(qp, &qp_attr, (enum ibv_qp_attr_mask)
\r
539 (IBV_QP_STATE | IBV_QP_PKEY_INDEX | IBV_QP_PORT));
\r
542 static int ucma_init_ud_qp(struct cma_id_private *id_priv, struct ibv_qp *qp)
\r
544 struct ibv_qp_attr qp_attr;
\r
545 int qp_attr_mask, ret;
\r
547 ret = ucma_modify_qp_init(id_priv, qp);
\r
552 qp_attr.qp_state = IBV_QPS_RTR;
\r
553 ret = ibv_modify_qp(qp, &qp_attr, IBV_QP_STATE);
\r
558 qp_attr.qp_state = IBV_QPS_RTS;
\r
559 qp_attr.sq_psn = 0;
\r
560 return ibv_modify_qp(qp, &qp_attr, (enum ibv_qp_attr_mask)
\r
561 (IBV_QP_STATE | IBV_QP_SQ_PSN));
\r
564 __declspec(dllexport)
\r
565 int rdma_create_qp(struct rdma_cm_id *id, struct ibv_pd *pd,
\r
566 struct ibv_qp_init_attr *qp_init_attr)
\r
568 struct cma_id_private *id_priv;
\r
572 id_priv = CONTAINING_RECORD(id, struct cma_id_private, id);
\r
573 if (id->verbs != pd->context) {
\r
577 qp = ibv_create_qp(pd, qp_init_attr);
\r
582 if (id->ps == RDMA_PS_TCP) {
\r
583 ret = ucma_modify_qp_init(id_priv, qp);
\r
585 ret = ucma_init_ud_qp(id_priv, qp);
\r
594 ibv_destroy_qp(qp);
\r
598 __declspec(dllexport)
\r
599 void rdma_destroy_qp(struct rdma_cm_id *id)
\r
601 ibv_destroy_qp(id->qp);
\r
604 static int ucma_valid_param(struct cma_id_private *id_priv,
\r
605 struct rdma_conn_param *conn_param)
\r
607 if (id_priv->id.ps != RDMA_PS_TCP) {
\r
611 if ((conn_param->responder_resources > id_priv->cma_dev->max_responder_resources) ||
\r
612 (conn_param->initiator_depth > id_priv->cma_dev->max_initiator_depth)) {
\r
619 __declspec(dllexport)
\r
620 int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
\r
622 struct cma_id_private *id_priv;
\r
623 WV_CONNECT_PARAM attr;
\r
626 id_priv = CONTAINING_RECORD(id, struct cma_id_private, id);
\r
627 hr = ucma_valid_param(id_priv, conn_param);
\r
632 RtlZeroMemory(&attr, sizeof attr);
\r
633 attr.ResponderResources = conn_param->responder_resources;
\r
634 attr.InitiatorDepth = conn_param->initiator_depth;
\r
635 attr.RetryCount = conn_param->retry_count;
\r
636 attr.RnrRetryCount = conn_param->rnr_retry_count;
\r
637 if ((attr.DataLength = conn_param->private_data_len)) {
\r
638 RtlCopyMemory(attr.Data, conn_param->private_data, attr.DataLength);
\r
641 id_priv->state = cma_active_connect;
\r
643 id->comp_entry.Busy = 1;
\r
644 hr = id->ep.connect->Connect(id->qp->conn_handle, &id->route.addr.dst_addr,
\r
645 &attr, &id->comp_entry.Overlap);
\r
646 if (FAILED(hr) && hr != WV_IO_PENDING) {
\r
648 id->comp_entry.Busy = 0;
\r
649 id_priv->state = cma_route_resolve;
\r
656 static int ucma_get_request(struct cma_id_private *listen, int index)
\r
658 struct cma_id_private *id_priv = NULL;
\r
661 EnterCriticalSection(&lock);
\r
662 if (listen->state != cma_listening) {
\r
663 hr = WV_INVALID_PARAMETER;
\r
667 InterlockedIncrement(&listen->refcnt);
\r
668 hr = rdma_create_id(listen->id.channel, &listen->req_list[index],
\r
669 listen, listen->id.ps);
\r
674 id_priv = CONTAINING_RECORD(listen->req_list[index], struct cma_id_private, id);
\r
675 id_priv->index = index;
\r
676 id_priv->state = cma_get_request;
\r
679 id_priv->id.comp_entry.Busy = 1;
\r
680 if (listen->id.ps == RDMA_PS_TCP) {
\r
681 hr = listen->id.ep.connect->GetRequest(id_priv->id.ep.connect,
\r
682 &id_priv->id.comp_entry.Overlap);
\r
684 hr = listen->id.ep.datagram->GetRequest(id_priv->id.ep.datagram,
\r
685 &id_priv->id.comp_entry.Overlap);
\r
687 if (FAILED(hr) && hr != WV_IO_PENDING) {
\r
688 id_priv->id.comp_entry.Busy = 0;
\r
692 LeaveCriticalSection(&lock);
\r
697 InterlockedDecrement(&listen->refcnt);
\r
699 LeaveCriticalSection(&lock);
\r
700 if (id_priv != NULL) {
\r
701 rdma_destroy_id(&id_priv->id);
\r
706 __declspec(dllexport)
\r
707 int rdma_listen(struct rdma_cm_id *id, int backlog)
\r
709 struct cma_id_private *id_priv, *req_id;
\r
713 if (backlog <= 0) {
\r
714 backlog = CMA_DEFAULT_BACKLOG;
\r
717 id_priv = CONTAINING_RECORD(id, struct cma_id_private, id);
\r
718 id_priv->req_list = new struct rdma_cm_id*[backlog];
\r
719 if (id_priv->req_list == NULL) {
\r
723 RtlZeroMemory(id_priv->req_list, sizeof(struct rdma_cm_id *) * backlog);
\r
724 id_priv->backlog = backlog;
\r
726 id_priv->state = cma_listening;
\r
727 hr = (id->ps == RDMA_PS_TCP) ?
\r
728 id->ep.connect->Listen(backlog) : id->ep.datagram->Listen(backlog);
\r
733 for (i = 0; i < backlog; i++) {
\r
734 hr = ucma_get_request(id_priv, i);
\r
743 __declspec(dllexport)
\r
744 int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
\r
746 struct cma_id_private *id_priv;
\r
747 WV_CONNECT_PARAM attr;
\r
750 id_priv = CONTAINING_RECORD(id, struct cma_id_private, id);
\r
751 hr = ucma_valid_param(id_priv, conn_param);
\r
756 RtlZeroMemory(&attr, sizeof attr);
\r
757 attr.ResponderResources = conn_param->responder_resources;
\r
758 attr.InitiatorDepth = conn_param->initiator_depth;
\r
759 attr.RetryCount = conn_param->retry_count;
\r
760 attr.RnrRetryCount = conn_param->rnr_retry_count;
\r
761 if ((attr.DataLength = conn_param->private_data_len)) {
\r
762 RtlCopyMemory(attr.Data, conn_param->private_data, attr.DataLength);
\r
765 id_priv->state = cma_accepting;
\r
767 id->comp_entry.Busy = 1;
\r
768 hr = id->ep.connect->Accept(id->qp->conn_handle, &attr,
\r
769 &id->comp_entry.Overlap);
\r
770 if (FAILED(hr) && hr != WV_IO_PENDING) {
\r
772 id->comp_entry.Busy = 0;
\r
773 id_priv->state = cma_disconnected;
\r
780 __declspec(dllexport)
\r
781 int rdma_reject(struct rdma_cm_id *id, const void *private_data,
\r
782 uint8_t private_data_len)
\r
784 struct cma_id_private *id_priv;
\r
787 id_priv = CONTAINING_RECORD(id, struct cma_id_private, id);
\r
788 id_priv->state = cma_disconnected;
\r
789 hr = id->ep.connect->Reject(private_data, private_data_len);
\r
796 __declspec(dllexport)
\r
797 int rdma_notify(struct rdma_cm_id *id, enum ibv_event_type event)
\r
802 __declspec(dllexport)
\r
803 int rdma_disconnect(struct rdma_cm_id *id)
\r
805 struct cma_id_private *id_priv;
\r
808 id_priv = CONTAINING_RECORD(id, struct cma_id_private, id);
\r
809 if (id_priv->state == cma_connected) {
\r
810 id_priv->state = cma_active_disconnect;
\r
812 id_priv->state = cma_disconnected;
\r
814 hr = id->ep.connect->Disconnect(id->qp->conn_handle, NULL);
\r
822 __declspec(dllexport)
\r
823 int rdma_ack_cm_event(struct rdma_cm_event *event)
\r
825 struct cma_event *evt;
\r
826 struct cma_id_private *listen;
\r
828 evt = CONTAINING_RECORD(event, struct cma_event, event);
\r
829 InterlockedDecrement(&evt->id_priv->refcnt);
\r
830 if (evt->event.listen_id) {
\r
831 listen = CONTAINING_RECORD(evt->event.listen_id, struct cma_id_private, id);
\r
832 InterlockedDecrement(&listen->refcnt);
\r
838 static int ucma_process_conn_req(struct cma_event *event)
\r
840 struct cma_id_private *listen, *id_priv;
\r
841 struct cma_event_channel *chan;
\r
843 listen = (struct cma_id_private *) event->id_priv->id.context;
\r
844 id_priv = event->id_priv;
\r
846 ucma_get_request(listen, id_priv->index);
\r
848 if (SUCCEEDED(event->event.status)) {
\r
849 event->event.status = ucma_query_connect(&id_priv->id,
\r
850 &event->event.param.conn);
\r
853 if (SUCCEEDED(event->event.status)) {
\r
854 event->event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
\r
855 id_priv->state = cma_passive_connect;
\r
856 event->event.listen_id = &listen->id;
\r
858 InterlockedDecrement(&listen->refcnt);
\r
859 InterlockedDecrement(&id_priv->refcnt);
\r
860 rdma_destroy_id(&id_priv->id);
\r
863 return event->event.status;
\r
866 static int ucma_process_conn_resp(struct cma_event *event)
\r
868 struct rdma_cm_id *id;
\r
869 WV_CONNECT_PARAM attr;
\r
872 if (FAILED(event->event.status)) {
\r
876 RtlZeroMemory(&attr, sizeof(attr));
\r
877 event->id_priv->state = cma_accepting;
\r
879 id = &event->id_priv->id;
\r
880 id->comp_entry.Busy = 1;
\r
881 hr = id->ep.connect->Accept(id->qp->conn_handle, &attr,
\r
882 &id->comp_entry.Overlap);
\r
883 if (FAILED(hr) && hr != WV_IO_PENDING) {
\r
884 id->comp_entry.Busy = 0;
\r
885 event->event.status = hr;
\r
889 return WV_IO_PENDING;
\r
892 event->event.event = (event->event.status == WV_REJECTED) ?
\r
893 RDMA_CM_EVENT_REJECTED :
\r
894 RDMA_CM_EVENT_CONNECT_ERROR;
\r
895 event->id_priv->state = cma_disconnected;
\r
899 static void ucma_process_establish(struct cma_event *event)
\r
901 struct cma_id_private *id_priv = event->id_priv;
\r
903 if (SUCCEEDED(event->event.status)) {
\r
904 event->event.status = ucma_query_connect(&id_priv->id,
\r
905 &event->event.param.conn);
\r
908 if (SUCCEEDED(event->event.status)) {
\r
909 event->event.event = RDMA_CM_EVENT_ESTABLISHED;
\r
911 id_priv->state = cma_connected;
\r
912 InterlockedIncrement(&id_priv->refcnt);
\r
913 id_priv->id.comp_entry.Busy = 1;
\r
914 id_priv->id.ep.connect->NotifyDisconnect(&id_priv->id.comp_entry.Overlap);
\r
916 event->event.event = RDMA_CM_EVENT_CONNECT_ERROR;
\r
917 event->id_priv->state = cma_disconnected;
\r
921 static int ucma_process_event(struct cma_event *event)
\r
923 struct cma_id_private *listen, *id_priv;
\r
924 WV_CONNECT_ATTRIBUTES attr;
\r
927 id_priv = event->id_priv;
\r
929 EnterCriticalSection(&lock);
\r
930 switch (id_priv->state) {
\r
931 case cma_get_request:
\r
932 listen = (struct cma_id_private *) id_priv->id.context;
\r
933 if (listen->state != cma_listening) {
\r
934 InterlockedDecrement(&id_priv->refcnt);
\r
939 listen->req_list[id_priv->index] = NULL;
\r
940 LeaveCriticalSection(&lock);
\r
941 return ucma_process_conn_req(event);
\r
942 case cma_addr_resolve:
\r
943 event->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
\r
945 case cma_route_resolve:
\r
946 event->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
\r
948 case cma_active_connect:
\r
949 hr = ucma_process_conn_resp(event);
\r
951 case cma_accepting:
\r
952 ucma_process_establish(event);
\r
954 case cma_connected:
\r
955 event->event.event = RDMA_CM_EVENT_DISCONNECTED;
\r
956 id_priv->state = cma_passive_disconnect;
\r
958 case cma_active_disconnect:
\r
959 event->event.event = RDMA_CM_EVENT_DISCONNECTED;
\r
960 id_priv->state = cma_disconnected;
\r
963 InterlockedDecrement(&id_priv->refcnt);
\r
966 LeaveCriticalSection(&lock);
\r
971 __declspec(dllexport)
\r
972 int rdma_get_cm_event(struct rdma_event_channel *channel,
\r
973 struct rdma_cm_event **event)
\r
975 struct cma_event *evt;
\r
976 struct rdma_cm_id *id;
\r
980 evt = new struct cma_event;
\r
986 RtlZeroMemory(evt, sizeof(struct cma_event));
\r
988 ret = CompChannelPoll(&channel->channel, &entry);
\r
993 id = CONTAINING_RECORD(entry, struct rdma_cm_id, comp_entry);
\r
994 evt->id_priv = CONTAINING_RECORD(id, struct cma_id_private, id);
\r
995 evt->event.id = id;
\r
996 evt->event.param.conn.private_data = evt->private_data;
\r
997 evt->event.status = id->ep.connect->
\r
998 GetOverlappedResult(&entry->Overlap, &bytes, FALSE);
\r
1000 ret = ucma_process_event(evt);
\r
1003 *event = &evt->event;
\r
1008 __declspec(dllexport)
\r
1009 int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
\r
1012 return WV_NOT_SUPPORTED;
\r
1015 __declspec(dllexport)
\r
1016 int rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
\r
1018 return WV_NOT_SUPPORTED;
\r
1021 __declspec(dllexport)
\r
1022 const char *rdma_event_str(enum rdma_cm_event_type event)
\r
1025 case RDMA_CM_EVENT_ADDR_RESOLVED:
\r
1026 return "RDMA_CM_EVENT_ADDR_RESOLVED";
\r
1027 case RDMA_CM_EVENT_ADDR_ERROR:
\r
1028 return "RDMA_CM_EVENT_ADDR_ERROR";
\r
1029 case RDMA_CM_EVENT_ROUTE_RESOLVED:
\r
1030 return "RDMA_CM_EVENT_ROUTE_RESOLVED";
\r
1031 case RDMA_CM_EVENT_ROUTE_ERROR:
\r
1032 return "RDMA_CM_EVENT_ROUTE_ERROR";
\r
1033 case RDMA_CM_EVENT_CONNECT_REQUEST:
\r
1034 return "RDMA_CM_EVENT_CONNECT_REQUEST";
\r
1035 case RDMA_CM_EVENT_CONNECT_RESPONSE:
\r
1036 return "RDMA_CM_EVENT_CONNECT_RESPONSE";
\r
1037 case RDMA_CM_EVENT_CONNECT_ERROR:
\r
1038 return "RDMA_CM_EVENT_CONNECT_ERROR";
\r
1039 case RDMA_CM_EVENT_UNREACHABLE:
\r
1040 return "RDMA_CM_EVENT_UNREACHABLE";
\r
1041 case RDMA_CM_EVENT_REJECTED:
\r
1042 return "RDMA_CM_EVENT_REJECTED";
\r
1043 case RDMA_CM_EVENT_ESTABLISHED:
\r
1044 return "RDMA_CM_EVENT_ESTABLISHED";
\r
1045 case RDMA_CM_EVENT_DISCONNECTED:
\r
1046 return "RDMA_CM_EVENT_DISCONNECTED";
\r
1047 case RDMA_CM_EVENT_DEVICE_REMOVAL:
\r
1048 return "RDMA_CM_EVENT_DEVICE_REMOVAL";
\r
1049 case RDMA_CM_EVENT_MULTICAST_JOIN:
\r
1050 return "RDMA_CM_EVENT_MULTICAST_JOIN";
\r
1051 case RDMA_CM_EVENT_MULTICAST_ERROR:
\r
1052 return "RDMA_CM_EVENT_MULTICAST_ERROR";
\r
1053 case RDMA_CM_EVENT_ADDR_CHANGE:
\r
1054 return "RDMA_CM_EVENT_ADDR_CHANGE";
\r
1055 case RDMA_CM_EVENT_TIMEWAIT_EXIT:
\r
1056 return "RDMA_CM_EVENT_TIMEWAIT_EXIT";
\r
1058 return "UNKNOWN EVENT";
\r
1062 __declspec(dllexport)
\r
1063 int rdma_set_option(struct rdma_cm_id *id, int level, int optname,
\r
1064 void *optval, size_t optlen)
\r
1066 return WV_NOT_SUPPORTED;
\r
1069 __declspec(dllexport)
\r
1070 int rdma_migrate_id(struct rdma_cm_id *id, struct rdma_event_channel *channel)
\r
1072 id->channel = channel;
\r