2 * Copyright (c) 2005-2009 Intel Corporation. All rights reserved.
\r
4 * This software is available to you under the OpenIB.org BSD license
\r
7 * Redistribution and use in source and binary forms, with or
\r
8 * without modification, are permitted provided that the following
\r
9 * conditions are met:
\r
11 * - Redistributions of source code must retain the above
\r
12 * copyright notice, this list of conditions and the following
\r
15 * - Redistributions in binary form must reproduce the above
\r
16 * copyright notice, this list of conditions and the following
\r
17 * disclaimer in the documentation and/or other materials
\r
18 * provided with the distribution.
\r
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
21 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AWV
\r
23 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
\r
24 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
\r
25 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
\r
26 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
\r
30 #include <windows.h>
\r
31 #include <winsock2.h>
\r
33 #include <iphlpapi.h>
\r
35 #include <rdma/rdma_cma.h>
\r
36 #include <infiniband/verbs.h>
\r
37 #include <comp_channel.h>
\r
38 #include <iba/ibat.h>
\r
40 #include "..\..\..\etc\user\comp_channel.cpp"
\r
41 #include "..\..\..\etc\user\dlist.c"
\r
43 static struct ibvw_windata windata;
\r
53 cma_passive_connect,
\r
58 cma_active_disconnect,
\r
59 cma_passive_disconnect,
\r
63 #define CMA_DEFAULT_BACKLOG 16
\r
65 struct cma_id_private
\r
67 struct rdma_cm_id id;
\r
68 enum cma_state state;
\r
69 struct cma_device *cma_dev;
\r
72 struct rdma_cm_id **req_list;
\r
77 struct ibv_context *verbs;
\r
80 uint8_t max_initiator_depth;
\r
81 uint8_t max_responder_resources;
\r
85 struct rdma_cm_event event;
\r
86 uint8_t private_data[56];
\r
87 struct cma_id_private *id_priv;
\r
90 static struct cma_device *cma_dev_array;
\r
91 static int cma_dev_cnt;
\r
93 static void ucma_cleanup(void)
\r
95 if (cma_dev_cnt > 0) {
\r
96 while (cma_dev_cnt > 0) {
\r
97 ibv_close_device(cma_dev_array[--cma_dev_cnt].verbs);
\r
99 delete cma_dev_array;
\r
102 if (windata.prov != NULL) {
\r
103 ibvw_release_windata(&windata, IBVW_WINDATA_VERSION);
\r
104 windata.prov = NULL;
\r
108 static int ucma_init(void)
\r
110 struct ibv_device **dev_list = NULL;
\r
111 struct cma_device *cma_dev;
\r
112 struct ibv_device_attr attr;
\r
115 EnterCriticalSection(&lock);
\r
116 if (cma_dev_cnt > 0) {
\r
120 ret = ibvw_get_windata(&windata, IBVW_WINDATA_VERSION);
\r
125 dev_list = ibv_get_device_list(&cma_dev_cnt);
\r
126 if (dev_list == NULL) {
\r
131 cma_dev_array = new struct cma_device[cma_dev_cnt];
\r
132 if (cma_dev_array == NULL) {
\r
137 for (i = 0; dev_list[i]; ++i) {
\r
138 cma_dev = &cma_dev_array[i];
\r
140 cma_dev->guid = ibv_get_device_guid(dev_list[i]);
\r
141 cma_dev->verbs = ibv_open_device(dev_list[i]);
\r
142 if (cma_dev->verbs == NULL) {
\r
147 ret = ibv_query_device(cma_dev->verbs, &attr);
\r
152 cma_dev->port_cnt = attr.phys_port_cnt;
\r
153 cma_dev->max_initiator_depth = (uint8_t) attr.max_qp_init_rd_atom;
\r
154 cma_dev->max_responder_resources = (uint8_t) attr.max_qp_rd_atom;
\r
156 ibv_free_device_list(dev_list);
\r
158 LeaveCriticalSection(&lock);
\r
163 LeaveCriticalSection(&lock);
\r
165 ibv_free_device_list(dev_list);
\r
170 __declspec(dllexport)
\r
171 struct ibv_context **rdma_get_devices(int *num_devices)
\r
173 struct ibv_context **devs = NULL;
\r
176 if (!cma_dev_cnt && ucma_init()) {
\r
180 devs = new struct ibv_context *[cma_dev_cnt + 1];
\r
181 if (devs == NULL) {
\r
185 for (i = 0; i < cma_dev_cnt; i++) {
\r
186 devs[i] = cma_dev_array[i].verbs;
\r
190 if (num_devices != NULL) {
\r
191 *num_devices = devs ? cma_dev_cnt : 0;
\r
196 __declspec(dllexport)
\r
197 void rdma_free_devices(struct ibv_context **list)
\r
202 __declspec(dllexport)
\r
203 struct rdma_event_channel *rdma_create_event_channel(void)
\r
205 struct rdma_event_channel *channel;
\r
207 if (!cma_dev_cnt && ucma_init()) {
\r
211 channel = new struct rdma_event_channel;
\r
212 if (channel == NULL) {
\r
216 CompChannelInit(windata.comp_mgr, &channel->channel, INFINITE);
\r
220 __declspec(dllexport)
\r
221 void rdma_destroy_event_channel(struct rdma_event_channel *channel)
\r
223 CompChannelCleanup(&channel->channel);
\r
227 __declspec(dllexport)
\r
228 int rdma_create_id(struct rdma_event_channel *channel,
\r
229 struct rdma_cm_id **id, void *context,
\r
230 enum rdma_port_space ps)
\r
232 struct cma_id_private *id_priv;
\r
235 hr = cma_dev_cnt ? 0 : ucma_init();
\r
240 id_priv = new struct cma_id_private;
\r
241 if (id_priv == NULL) {
\r
245 RtlZeroMemory(id_priv, sizeof(struct cma_id_private));
\r
246 id_priv->id.context = context;
\r
247 id_priv->id.channel = channel;
\r
248 id_priv->id.ps = ps;
\r
249 CompEntryInit(&channel->channel, &id_priv->id.comp_entry);
\r
251 if (ps == RDMA_PS_TCP) {
\r
252 hr = windata.prov->CreateConnectEndpoint(&id_priv->id.ep.connect);
\r
254 hr = windata.prov->CreateDatagramEndpoint(&id_priv->id.ep.datagram);
\r
260 *id = &id_priv->id;
\r
268 static void ucma_destroy_listen(struct cma_id_private *id_priv)
\r
270 while (--id_priv->backlog >= 0) {
\r
271 if (id_priv->req_list[id_priv->backlog] != NULL) {
\r
272 rdma_destroy_id(id_priv->req_list[id_priv->backlog]);
\r
276 delete id_priv->req_list;
\r
279 __declspec(dllexport)
\r
280 int rdma_destroy_id(struct rdma_cm_id *id)
\r
282 struct cma_id_private *id_priv;
\r
284 id_priv = CONTAINING_RECORD(id, struct cma_id_private, id);
\r
285 if (id->ps == RDMA_PS_TCP) {
\r
286 id->ep.connect->CancelOverlappedRequests();
\r
288 id->ep.datagram->CancelOverlappedRequests();
\r
291 CompEntryCancel(&id->comp_entry);
\r
293 if (id_priv->backlog > 0) {
\r
294 ucma_destroy_listen(id_priv);
\r
297 if (id_priv->id.ps == RDMA_PS_TCP) {
\r
298 id_priv->id.ep.connect->Release();
\r
300 id_priv->id.ep.datagram->Release();
\r
307 static int ucma_addrlen(struct sockaddr *addr)
\r
309 if (addr->sa_family == PF_INET) {
\r
310 return sizeof(struct sockaddr_in);
\r
312 return sizeof(struct sockaddr_in6);
\r
316 static int ucma_get_device(struct cma_id_private *id_priv, uint64_t guid)
\r
318 struct cma_device *cma_dev;
\r
321 for (i = 0; i < cma_dev_cnt; i++) {
\r
322 cma_dev = &cma_dev_array[i];
\r
323 if (cma_dev->guid == guid) {
\r
324 id_priv->cma_dev = cma_dev;
\r
325 id_priv->id.verbs = cma_dev->verbs;
\r
332 static int ucma_query_connect(struct rdma_cm_id *id, struct rdma_conn_param *param)
\r
334 struct cma_id_private *id_priv;
\r
335 WV_CONNECT_ATTRIBUTES attr;
\r
338 id_priv = CONTAINING_RECORD(id, struct cma_id_private, id);
\r
339 hr = id->ep.connect->Query(&attr);
\r
344 RtlCopyMemory(&id->route.addr.src_addr, &attr.LocalAddress,
\r
345 sizeof attr.LocalAddress);
\r
346 RtlCopyMemory(&id->route.addr.dst_addr, &attr.PeerAddress,
\r
347 sizeof attr.PeerAddress);
\r
349 if (param != NULL) {
\r
350 RtlCopyMemory((void *) param->private_data, attr.Param.Data,
\r
351 attr.Param.DataLength);
\r
352 param->private_data_len = (uint8_t) attr.Param.DataLength;
\r
353 param->responder_resources = (uint8_t) attr.Param.ResponderResources;
\r
354 param->initiator_depth = (uint8_t) attr.Param.InitiatorDepth;
\r
355 param->flow_control = 1;
\r
356 param->retry_count = attr.Param.RetryCount;
\r
357 param->rnr_retry_count = attr.Param.RnrRetryCount;
\r
360 if (id_priv->cma_dev == NULL && attr.Device.DeviceGuid != 0) {
\r
361 hr = ucma_get_device(id_priv, attr.Device.DeviceGuid);
\r
366 id->route.addr.addr.ibaddr.pkey = attr.Device.Pkey;
\r
367 id_priv->id.port_num = attr.Device.PortNumber;
\r
373 static int ucma_query_datagram(struct rdma_cm_id *id, struct rdma_ud_param *param)
\r
375 struct cma_id_private *id_priv;
\r
376 WV_DATAGRAM_ATTRIBUTES attr;
\r
379 id_priv = CONTAINING_RECORD(id, struct cma_id_private, id);
\r
380 hr = id->ep.datagram->Query(&attr);
\r
385 RtlCopyMemory(&id->route.addr.src_addr, &attr.LocalAddress,
\r
386 sizeof attr.LocalAddress);
\r
387 RtlCopyMemory(&id->route.addr.dst_addr, &attr.PeerAddress,
\r
388 sizeof attr.PeerAddress);
\r
390 if (param != NULL) {
\r
391 RtlCopyMemory((void *) param->private_data, attr.Param.Data,
\r
392 attr.Param.DataLength);
\r
393 param->private_data_len = (uint8_t) attr.Param.DataLength;
\r
394 // ucma_convert_av(&attr.Param.AddressVector, param->ah_attr)
\r
395 param->qp_num = attr.Param.Qpn;
\r
396 param->qkey = attr.Param.Qkey;
\r
399 if (id_priv->cma_dev == NULL && attr.Device.DeviceGuid != 0) {
\r
400 hr = ucma_get_device(id_priv, attr.Device.DeviceGuid);
\r
403 id->route.addr.addr.ibaddr.pkey = attr.Device.Pkey;
\r
404 id_priv->id.port_num = attr.Device.PortNumber;
\r
409 __declspec(dllexport)
\r
410 int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
\r
412 struct cma_id_private *id_priv;
\r
415 if (id->ps == RDMA_PS_TCP) {
\r
416 hr = id->ep.connect->BindAddress(addr);
\r
417 if (SUCCEEDED(hr)) {
\r
418 hr = ucma_query_connect(id, NULL);
\r
421 hr = id->ep.datagram->BindAddress(addr);
\r
422 if (SUCCEEDED(hr)) {
\r
423 hr = ucma_query_datagram(id, NULL);
\r
427 if (SUCCEEDED(hr)) {
\r
428 id_priv = CONTAINING_RECORD(id, struct cma_id_private, id);
\r
429 id_priv->state = cma_addr_bind;
\r
434 __declspec(dllexport)
\r
435 int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
\r
436 struct sockaddr *dst_addr, int timeout_ms)
\r
438 struct cma_id_private *id_priv;
\r
444 id_priv = CONTAINING_RECORD(id, struct cma_id_private, id);
\r
445 if (id_priv->state == cma_idle) {
\r
446 if (src_addr == NULL) {
\r
447 if (id->ps == RDMA_PS_TCP) {
\r
448 s = socket(dst_addr->sa_family, SOCK_STREAM, IPPROTO_TCP);
\r
450 s = socket(dst_addr->sa_family, SOCK_DGRAM, IPPROTO_UDP);
\r
452 if (s == INVALID_SOCKET) {
\r
453 return WSAGetLastError();
\r
456 hr = WSAIoctl(s, SIO_ROUTING_INTERFACE_QUERY, dst_addr, ucma_addrlen(dst_addr),
\r
457 &addr, sizeof addr, &size, NULL, NULL);
\r
460 return WSAGetLastError();
\r
462 src_addr = &addr.Sa;
\r
465 hr = rdma_bind_addr(id, src_addr);
\r
471 RtlCopyMemory(&id->route.addr.dst_addr, dst_addr, ucma_addrlen(dst_addr));
\r
472 id_priv->state = cma_addr_resolve;
\r
474 CompEntryPost(&id->comp_entry);
\r
478 __declspec(dllexport)
\r
479 int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
\r
481 struct cma_id_private *id_priv;
\r
482 IBAT_PATH_BLOB path;
\r
485 hr = IBAT::Resolve(&id->route.addr.src_addr, &id->route.addr.dst_addr, &path);
\r
490 hr = (id->ps == RDMA_PS_TCP) ?
\r
491 id->ep.connect->Modify(WV_EP_OPTION_ROUTE, &path, sizeof path) :
\r
492 id->ep.datagram->Modify(WV_EP_OPTION_ROUTE, &path, sizeof path);
\r
497 id_priv = CONTAINING_RECORD(id, struct cma_id_private, id);
\r
498 id_priv->state = cma_route_resolve;
\r
500 CompEntryPost(&id->comp_entry);
\r
504 static int ucma_modify_qp_init(struct cma_id_private *id_priv, struct ibv_qp *qp)
\r
506 struct ibv_qp_attr qp_attr;
\r
510 RtlZeroMemory(&qp_attr, sizeof qp_attr);
\r
511 qp_attr.qp_state = IBV_QPS_INIT;
\r
512 qp_attr.port_num = id_priv->id.port_num;
\r
513 hr = qp->context->cmd_if->FindPkey(id_priv->id.port_num,
\r
514 id_priv->id.route.addr.addr.ibaddr.pkey,
\r
520 qp_attr.pkey_index = index;
\r
521 return ibv_modify_qp(qp, &qp_attr, (enum ibv_qp_attr_mask)
\r
522 (IBV_QP_STATE | IBV_QP_PKEY_INDEX | IBV_QP_PORT));
\r
525 static int ucma_init_ud_qp(struct cma_id_private *id_priv, struct ibv_qp *qp)
\r
527 struct ibv_qp_attr qp_attr;
\r
528 int qp_attr_mask, ret;
\r
530 ret = ucma_modify_qp_init(id_priv, qp);
\r
535 qp_attr.qp_state = IBV_QPS_RTR;
\r
536 ret = ibv_modify_qp(qp, &qp_attr, IBV_QP_STATE);
\r
541 qp_attr.qp_state = IBV_QPS_RTS;
\r
542 qp_attr.sq_psn = 0;
\r
543 return ibv_modify_qp(qp, &qp_attr, (enum ibv_qp_attr_mask)
\r
544 (IBV_QP_STATE | IBV_QP_SQ_PSN));
\r
547 __declspec(dllexport)
\r
548 int rdma_create_qp(struct rdma_cm_id *id, struct ibv_pd *pd,
\r
549 struct ibv_qp_init_attr *qp_init_attr)
\r
551 struct cma_id_private *id_priv;
\r
555 id_priv = CONTAINING_RECORD(id, struct cma_id_private, id);
\r
556 if (id->verbs != pd->context) {
\r
560 qp = ibv_create_qp(pd, qp_init_attr);
\r
565 if (id->ps == RDMA_PS_TCP) {
\r
566 ret = ucma_modify_qp_init(id_priv, qp);
\r
568 ret = ucma_init_ud_qp(id_priv, qp);
\r
577 ibv_destroy_qp(qp);
\r
581 __declspec(dllexport)
\r
582 void rdma_destroy_qp(struct rdma_cm_id *id)
\r
584 ibv_destroy_qp(id->qp);
\r
587 static int ucma_valid_param(struct cma_id_private *id_priv,
\r
588 struct rdma_conn_param *conn_param)
\r
590 if (id_priv->id.ps != RDMA_PS_TCP) {
\r
594 if ((conn_param->responder_resources > id_priv->cma_dev->max_responder_resources) ||
\r
595 (conn_param->initiator_depth > id_priv->cma_dev->max_initiator_depth)) {
\r
602 __declspec(dllexport)
\r
603 int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
\r
605 struct cma_id_private *id_priv;
\r
606 WV_CONNECT_PARAM attr;
\r
609 id_priv = CONTAINING_RECORD(id, struct cma_id_private, id);
\r
610 hr = ucma_valid_param(id_priv, conn_param);
\r
615 RtlZeroMemory(&attr, sizeof attr);
\r
616 attr.ResponderResources = conn_param->responder_resources;
\r
617 attr.InitiatorDepth = conn_param->initiator_depth;
\r
618 attr.RetryCount = conn_param->retry_count;
\r
619 attr.RnrRetryCount = conn_param->rnr_retry_count;
\r
620 if ((attr.DataLength = conn_param->private_data_len)) {
\r
621 RtlCopyMemory(attr.Data, conn_param->private_data, attr.DataLength);
\r
624 id_priv->state = cma_active_connect;
\r
625 id->comp_entry.Busy = 1;
\r
626 hr = id->ep.connect->Connect(id->qp->conn_handle, &id->route.addr.dst_addr,
\r
627 &attr, &id->comp_entry.Overlap);
\r
628 if (FAILED(hr) && hr != WV_IO_PENDING) {
\r
629 id->comp_entry.Busy = 0;
\r
630 id_priv->state = cma_route_resolve;
\r
637 static int ucma_get_request(struct cma_id_private *listen, int index)
\r
639 struct cma_id_private *id_priv;
\r
642 hr = rdma_create_id(listen->id.channel, &listen->req_list[index],
\r
643 listen, listen->id.ps);
\r
648 id_priv = CONTAINING_RECORD(listen->req_list[index], struct cma_id_private, id);
\r
649 id_priv->index = index;
\r
650 id_priv->state = cma_get_request;
\r
652 id_priv->id.comp_entry.Busy = 1;
\r
653 if (listen->id.ps == RDMA_PS_TCP) {
\r
654 hr = listen->id.ep.connect->GetRequest(id_priv->id.ep.connect,
\r
655 &id_priv->id.comp_entry.Overlap);
\r
657 hr = listen->id.ep.datagram->GetRequest(id_priv->id.ep.datagram,
\r
658 &id_priv->id.comp_entry.Overlap);
\r
660 if (FAILED(hr) && hr != WV_IO_PENDING) {
\r
661 id_priv->id.comp_entry.Busy = 0;
\r
668 __declspec(dllexport)
\r
669 int rdma_listen(struct rdma_cm_id *id, int backlog)
\r
671 struct cma_id_private *id_priv, *req_id;
\r
675 if (backlog <= 0) {
\r
676 backlog = CMA_DEFAULT_BACKLOG;
\r
679 id_priv = CONTAINING_RECORD(id, struct cma_id_private, id);
\r
680 id_priv->req_list = new struct rdma_cm_id*[backlog];
\r
681 if (id_priv->req_list == NULL) {
\r
685 RtlZeroMemory(id_priv->req_list, sizeof(struct rdma_cm_id *) * backlog);
\r
686 id_priv->backlog = backlog;
\r
688 id_priv->state = cma_listening;
\r
689 hr = (id->ps == RDMA_PS_TCP) ?
\r
690 id->ep.connect->Listen(backlog) : id->ep.datagram->Listen(backlog);
\r
695 for (i = 0; i < backlog; i++) {
\r
696 hr = ucma_get_request(id_priv, i);
\r
705 __declspec(dllexport)
\r
706 int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
\r
708 struct cma_id_private *id_priv;
\r
709 WV_CONNECT_PARAM attr;
\r
712 id_priv = CONTAINING_RECORD(id, struct cma_id_private, id);
\r
713 hr = ucma_valid_param(id_priv, conn_param);
\r
718 RtlZeroMemory(&attr, sizeof attr);
\r
719 attr.ResponderResources = conn_param->responder_resources;
\r
720 attr.InitiatorDepth = conn_param->initiator_depth;
\r
721 attr.RetryCount = conn_param->retry_count;
\r
722 attr.RnrRetryCount = conn_param->rnr_retry_count;
\r
723 if ((attr.DataLength = conn_param->private_data_len)) {
\r
724 RtlCopyMemory(attr.Data, conn_param->private_data, attr.DataLength);
\r
727 id_priv->state = cma_accepting;
\r
728 id->comp_entry.Busy = 1;
\r
729 hr = id->ep.connect->Accept(id->qp->conn_handle, &attr,
\r
730 &id->comp_entry.Overlap);
\r
731 if (FAILED(hr) && hr != WV_IO_PENDING) {
\r
732 id->comp_entry.Busy = 0;
\r
733 id_priv->state = cma_disconnected;
\r
740 __declspec(dllexport)
\r
741 int rdma_reject(struct rdma_cm_id *id, const void *private_data,
\r
742 uint8_t private_data_len)
\r
744 struct cma_id_private *id_priv;
\r
747 id_priv = CONTAINING_RECORD(id, struct cma_id_private, id);
\r
748 id_priv->state = cma_disconnected;
\r
749 hr = id->ep.connect->Reject(private_data, private_data_len);
\r
756 __declspec(dllexport)
\r
757 int rdma_notify(struct rdma_cm_id *id, enum ibv_event_type event)
\r
762 __declspec(dllexport)
\r
763 int rdma_disconnect(struct rdma_cm_id *id)
\r
765 struct cma_id_private *id_priv;
\r
768 id_priv = CONTAINING_RECORD(id, struct cma_id_private, id);
\r
769 if (id_priv->state == cma_connected) {
\r
770 id_priv->state = cma_active_disconnect;
\r
772 id_priv->state = cma_disconnected;
\r
774 hr = id->ep.connect->Disconnect();
\r
782 __declspec(dllexport)
\r
783 int rdma_ack_cm_event(struct rdma_cm_event *event)
\r
785 struct cma_event *evt;
\r
787 evt = CONTAINING_RECORD(event, struct cma_event, event);
\r
792 static int ucma_process_conn_req(struct cma_event *event)
\r
794 struct cma_id_private *listen;
\r
795 struct cma_event_channel *chan;
\r
797 listen = (struct cma_id_private *) event->id_priv->id.context;
\r
798 ucma_get_request(listen, event->id_priv->index);
\r
800 if (SUCCEEDED(event->event.status)) {
\r
801 event->event.status = ucma_query_connect(&event->id_priv->id,
\r
802 &event->event.param.conn);
\r
805 if (SUCCEEDED(event->event.status)) {
\r
806 event->event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
\r
807 event->id_priv->state = cma_passive_connect;
\r
808 event->event.listen_id = &listen->id;
\r
810 rdma_destroy_id(&event->id_priv->id);
\r
813 return event->event.status;
\r
816 static int ucma_process_conn_resp(struct cma_event *event)
\r
818 struct rdma_cm_id *id;
\r
819 WV_CONNECT_PARAM attr;
\r
822 if (FAILED(event->event.status)) {
\r
826 RtlZeroMemory(&attr, sizeof(attr));
\r
827 event->id_priv->state = cma_accepting;
\r
829 id = &event->id_priv->id;
\r
830 id->comp_entry.Busy = 1;
\r
831 hr = id->ep.connect->Accept(id->qp->conn_handle, &attr,
\r
832 &id->comp_entry.Overlap);
\r
833 if (FAILED(hr) && hr != WV_IO_PENDING) {
\r
834 id->comp_entry.Busy = 0;
\r
835 event->event.status = hr;
\r
839 return WV_IO_PENDING;
\r
842 event->event.event = (event->event.status == WV_REJECTED) ?
\r
843 RDMA_CM_EVENT_REJECTED :
\r
844 RDMA_CM_EVENT_CONNECT_ERROR;
\r
845 event->id_priv->state = cma_disconnected;
\r
849 static void ucma_process_establish(struct cma_event *event)
\r
851 struct cma_id_private *id_priv = event->id_priv;
\r
853 if (SUCCEEDED(event->event.status)) {
\r
854 event->event.status = ucma_query_connect(&id_priv->id,
\r
855 &event->event.param.conn);
\r
858 if (SUCCEEDED(event->event.status)) {
\r
859 event->event.event = RDMA_CM_EVENT_ESTABLISHED;
\r
861 id_priv->state = cma_connected;
\r
862 id_priv->id.comp_entry.Busy = 1;
\r
863 id_priv->id.ep.connect->NotifyDisconnect(&id_priv->id.comp_entry.Overlap);
\r
865 event->event.event = RDMA_CM_EVENT_CONNECT_ERROR;
\r
866 event->id_priv->state = cma_disconnected;
\r
870 static int ucma_process_event(struct cma_event *event)
\r
872 WV_CONNECT_ATTRIBUTES attr;
\r
875 switch (event->id_priv->state) {
\r
876 case cma_get_request:
\r
877 hr = ucma_process_conn_req(event);
\r
879 case cma_addr_resolve:
\r
880 event->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
\r
882 case cma_route_resolve:
\r
883 event->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
\r
885 case cma_active_connect:
\r
886 hr = ucma_process_conn_resp(event);
\r
888 case cma_accepting:
\r
889 ucma_process_establish(event);
\r
891 case cma_connected:
\r
892 event->event.event = RDMA_CM_EVENT_DISCONNECTED;
\r
893 event->id_priv->state = cma_passive_disconnect;
\r
895 case cma_active_disconnect:
\r
896 event->event.event = RDMA_CM_EVENT_DISCONNECTED;
\r
897 event->id_priv->state = cma_disconnected;
\r
906 __declspec(dllexport)
\r
907 int rdma_get_cm_event(struct rdma_event_channel *channel,
\r
908 struct rdma_cm_event **event)
\r
910 struct cma_event *evt;
\r
911 struct rdma_cm_id *id;
\r
915 evt = new struct cma_event;
\r
921 RtlZeroMemory(evt, sizeof(struct cma_event));
\r
923 ret = CompChannelPoll(&channel->channel, &entry);
\r
928 id = CONTAINING_RECORD(entry, struct rdma_cm_id, comp_entry);
\r
929 evt->id_priv = CONTAINING_RECORD(id, struct cma_id_private, id);
\r
930 evt->event.id = id;
\r
931 evt->event.param.conn.private_data = evt->private_data;
\r
932 evt->event.status = id->ep.connect->
\r
933 GetOverlappedResult(&entry->Overlap, &bytes, FALSE);
\r
935 ret = ucma_process_event(evt);
\r
938 *event = &evt->event;
\r
943 __declspec(dllexport)
\r
944 int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
\r
947 return WV_NOT_SUPPORTED;
\r
950 __declspec(dllexport)
\r
951 int rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
\r
953 return WV_NOT_SUPPORTED;
\r
956 __declspec(dllexport)
\r
957 const char *rdma_event_str(enum rdma_cm_event_type event)
\r
960 case RDMA_CM_EVENT_ADDR_RESOLVED:
\r
961 return "RDMA_CM_EVENT_ADDR_RESOLVED";
\r
962 case RDMA_CM_EVENT_ADDR_ERROR:
\r
963 return "RDMA_CM_EVENT_ADDR_ERROR";
\r
964 case RDMA_CM_EVENT_ROUTE_RESOLVED:
\r
965 return "RDMA_CM_EVENT_ROUTE_RESOLVED";
\r
966 case RDMA_CM_EVENT_ROUTE_ERROR:
\r
967 return "RDMA_CM_EVENT_ROUTE_ERROR";
\r
968 case RDMA_CM_EVENT_CONNECT_REQUEST:
\r
969 return "RDMA_CM_EVENT_CONNECT_REQUEST";
\r
970 case RDMA_CM_EVENT_CONNECT_RESPONSE:
\r
971 return "RDMA_CM_EVENT_CONNECT_RESPONSE";
\r
972 case RDMA_CM_EVENT_CONNECT_ERROR:
\r
973 return "RDMA_CM_EVENT_CONNECT_ERROR";
\r
974 case RDMA_CM_EVENT_UNREACHABLE:
\r
975 return "RDMA_CM_EVENT_UNREACHABLE";
\r
976 case RDMA_CM_EVENT_REJECTED:
\r
977 return "RDMA_CM_EVENT_REJECTED";
\r
978 case RDMA_CM_EVENT_ESTABLISHED:
\r
979 return "RDMA_CM_EVENT_ESTABLISHED";
\r
980 case RDMA_CM_EVENT_DISCONNECTED:
\r
981 return "RDMA_CM_EVENT_DISCONNECTED";
\r
982 case RDMA_CM_EVENT_DEVICE_REMOVAL:
\r
983 return "RDMA_CM_EVENT_DEVICE_REMOVAL";
\r
984 case RDMA_CM_EVENT_MULTICAST_JOIN:
\r
985 return "RDMA_CM_EVENT_MULTICAST_JOIN";
\r
986 case RDMA_CM_EVENT_MULTICAST_ERROR:
\r
987 return "RDMA_CM_EVENT_MULTICAST_ERROR";
\r
988 case RDMA_CM_EVENT_ADDR_CHANGE:
\r
989 return "RDMA_CM_EVENT_ADDR_CHANGE";
\r
990 case RDMA_CM_EVENT_TIMEWAIT_EXIT:
\r
991 return "RDMA_CM_EVENT_TIMEWAIT_EXIT";
\r
993 return "UNKNOWN EVENT";
\r
997 __declspec(dllexport)
\r
998 int rdma_set_option(struct rdma_cm_id *id, int level, int optname,
\r
999 void *optval, size_t optlen)
\r
1001 return WV_NOT_SUPPORTED;
\r
1004 __declspec(dllexport)
\r
1005 int rdma_migrate_id(struct rdma_cm_id *id, struct rdma_event_channel *channel)
\r
1007 id->channel = channel;
\r