2 * Copyright (c) 2005-2009 Intel Corporation. All rights reserved.
\r
4 * This software is available to you under the OpenIB.org BSD license
\r
7 * Redistribution and use in source and binary forms, with or
\r
8 * without modification, are permitted provided that the following
\r
9 * conditions are met:
\r
11 * - Redistributions of source code must retain the above
\r
12 * copyright notice, this list of conditions and the following
\r
15 * - Redistributions in binary form must reproduce the above
\r
16 * copyright notice, this list of conditions and the following
\r
17 * disclaimer in the documentation and/or other materials
\r
18 * provided with the distribution.
\r
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
21 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AWV
\r
23 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
\r
24 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
\r
25 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
\r
26 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
\r
30 #include <windows.h>
\r
31 #include <winsock2.h>
\r
33 #include <iphlpapi.h>
\r
35 #include <rdma/rdma_cma.h>
\r
36 #include <infiniband/verbs.h>
\r
37 #include <comp_channel.h>
\r
38 #include <iba/ibat.h>
\r
40 #include "..\..\..\etc\user\comp_channel.cpp"
\r
42 static struct ibvw_windata windata;
\r
52 cma_passive_connect,
\r
57 cma_active_disconnect,
\r
58 cma_passive_disconnect,
\r
63 #define CMA_DEFAULT_BACKLOG 16
\r
65 struct cma_id_private
\r
67 struct rdma_cm_id id;
\r
68 enum cma_state state;
\r
69 struct cma_device *cma_dev;
\r
72 volatile LONG refcnt;
\r
73 struct rdma_cm_id **req_list;
\r
78 struct ibv_context *verbs;
\r
81 uint8_t max_initiator_depth;
\r
82 uint8_t max_responder_resources;
\r
86 struct rdma_cm_event event;
\r
87 uint8_t private_data[56];
\r
88 struct cma_id_private *id_priv;
\r
91 static struct cma_device *cma_dev_array;
\r
92 static int cma_dev_cnt;
\r
94 static void ucma_cleanup(void)
\r
96 if (cma_dev_cnt > 0) {
\r
97 while (cma_dev_cnt > 0) {
\r
98 ibv_close_device(cma_dev_array[--cma_dev_cnt].verbs);
\r
100 delete cma_dev_array;
\r
103 if (windata.prov != NULL) {
\r
104 ibvw_release_windata(&windata, IBVW_WINDATA_VERSION);
\r
105 windata.prov = NULL;
\r
109 static int ucma_init(void)
\r
111 struct ibv_device **dev_list = NULL;
\r
112 struct cma_device *cma_dev;
\r
113 struct ibv_device_attr attr;
\r
116 EnterCriticalSection(&lock);
\r
117 if (cma_dev_cnt > 0) {
\r
121 ret = ibvw_get_windata(&windata, IBVW_WINDATA_VERSION);
\r
126 dev_list = ibv_get_device_list(&cma_dev_cnt);
\r
127 if (dev_list == NULL) {
\r
132 cma_dev_array = new struct cma_device[cma_dev_cnt];
\r
133 if (cma_dev_array == NULL) {
\r
138 for (i = 0; dev_list[i]; ++i) {
\r
139 cma_dev = &cma_dev_array[i];
\r
141 cma_dev->guid = ibv_get_device_guid(dev_list[i]);
\r
142 cma_dev->verbs = ibv_open_device(dev_list[i]);
\r
143 if (cma_dev->verbs == NULL) {
\r
148 ret = ibv_query_device(cma_dev->verbs, &attr);
\r
153 cma_dev->port_cnt = attr.phys_port_cnt;
\r
154 cma_dev->max_initiator_depth = (uint8_t) attr.max_qp_init_rd_atom;
\r
155 cma_dev->max_responder_resources = (uint8_t) attr.max_qp_rd_atom;
\r
157 ibv_free_device_list(dev_list);
\r
159 LeaveCriticalSection(&lock);
\r
164 LeaveCriticalSection(&lock);
\r
166 ibv_free_device_list(dev_list);
\r
171 __declspec(dllexport)
\r
172 struct ibv_context **rdma_get_devices(int *num_devices)
\r
174 struct ibv_context **devs = NULL;
\r
177 if (!cma_dev_cnt && ucma_init()) {
\r
181 devs = new struct ibv_context *[cma_dev_cnt + 1];
\r
182 if (devs == NULL) {
\r
186 for (i = 0; i < cma_dev_cnt; i++) {
\r
187 devs[i] = cma_dev_array[i].verbs;
\r
191 if (num_devices != NULL) {
\r
192 *num_devices = devs ? cma_dev_cnt : 0;
\r
197 __declspec(dllexport)
\r
198 void rdma_free_devices(struct ibv_context **list)
\r
203 __declspec(dllexport)
\r
204 struct rdma_event_channel *rdma_create_event_channel(void)
\r
206 struct rdma_event_channel *channel;
\r
208 if (!cma_dev_cnt && ucma_init()) {
\r
212 channel = new struct rdma_event_channel;
\r
213 if (channel == NULL) {
\r
217 CompChannelInit(windata.comp_mgr, &channel->channel, INFINITE);
\r
221 __declspec(dllexport)
\r
222 void rdma_destroy_event_channel(struct rdma_event_channel *channel)
\r
224 CompChannelCleanup(&channel->channel);
\r
228 __declspec(dllexport)
\r
229 int rdma_create_id(struct rdma_event_channel *channel,
\r
230 struct rdma_cm_id **id, void *context,
\r
231 enum rdma_port_space ps)
\r
233 struct cma_id_private *id_priv;
\r
236 hr = cma_dev_cnt ? 0 : ucma_init();
\r
241 id_priv = new struct cma_id_private;
\r
242 if (id_priv == NULL) {
\r
246 RtlZeroMemory(id_priv, sizeof(struct cma_id_private));
\r
247 id_priv->refcnt = 1;
\r
248 id_priv->id.context = context;
\r
249 id_priv->id.channel = channel;
\r
250 id_priv->id.ps = ps;
\r
251 CompEntryInit(&channel->channel, &id_priv->id.comp_entry);
\r
253 if (ps == RDMA_PS_TCP) {
\r
254 hr = windata.prov->CreateConnectEndpoint(&id_priv->id.ep.connect);
\r
256 hr = windata.prov->CreateDatagramEndpoint(&id_priv->id.ep.datagram);
\r
262 *id = &id_priv->id;
\r
270 static void ucma_destroy_listen(struct cma_id_private *id_priv)
\r
272 while (--id_priv->backlog >= 0) {
\r
273 if (id_priv->req_list[id_priv->backlog] != NULL) {
\r
274 InterlockedDecrement(&id_priv->refcnt);
\r
275 rdma_destroy_id(id_priv->req_list[id_priv->backlog]);
\r
279 delete id_priv->req_list;
\r
282 __declspec(dllexport)
\r
283 int rdma_destroy_id(struct rdma_cm_id *id)
\r
285 struct cma_id_private *id_priv;
\r
287 id_priv = CONTAINING_RECORD(id, struct cma_id_private, id);
\r
289 EnterCriticalSection(&lock);
\r
290 id_priv->state = cma_destroying;
\r
291 LeaveCriticalSection(&lock);
\r
293 if (id->ps == RDMA_PS_TCP) {
\r
294 id->ep.connect->CancelOverlappedRequests();
\r
296 id->ep.datagram->CancelOverlappedRequests();
\r
299 if (CompEntryCancel(&id->comp_entry) != NULL) {
\r
300 InterlockedDecrement(&id_priv->refcnt);
\r
303 if (id_priv->backlog > 0) {
\r
304 ucma_destroy_listen(id_priv);
\r
307 if (id_priv->id.ps == RDMA_PS_TCP) {
\r
308 id_priv->id.ep.connect->Release();
\r
310 id_priv->id.ep.datagram->Release();
\r
313 InterlockedDecrement(&id_priv->refcnt);
\r
314 while (id_priv->refcnt) {
\r
321 static int ucma_addrlen(struct sockaddr *addr)
\r
323 if (addr->sa_family == PF_INET) {
\r
324 return sizeof(struct sockaddr_in);
\r
326 return sizeof(struct sockaddr_in6);
\r
330 static int ucma_get_device(struct cma_id_private *id_priv, uint64_t guid)
\r
332 struct cma_device *cma_dev;
\r
335 for (i = 0; i < cma_dev_cnt; i++) {
\r
336 cma_dev = &cma_dev_array[i];
\r
337 if (cma_dev->guid == guid) {
\r
338 id_priv->cma_dev = cma_dev;
\r
339 id_priv->id.verbs = cma_dev->verbs;
\r
346 static int ucma_query_connect(struct rdma_cm_id *id, struct rdma_conn_param *param)
\r
348 struct cma_id_private *id_priv;
\r
349 WV_CONNECT_ATTRIBUTES attr;
\r
352 id_priv = CONTAINING_RECORD(id, struct cma_id_private, id);
\r
353 hr = id->ep.connect->Query(&attr);
\r
358 RtlCopyMemory(&id->route.addr.src_addr, &attr.LocalAddress,
\r
359 sizeof attr.LocalAddress);
\r
360 RtlCopyMemory(&id->route.addr.dst_addr, &attr.PeerAddress,
\r
361 sizeof attr.PeerAddress);
\r
363 if (param != NULL) {
\r
364 RtlCopyMemory((void *) param->private_data, attr.Param.Data,
\r
365 attr.Param.DataLength);
\r
366 param->private_data_len = (uint8_t) attr.Param.DataLength;
\r
367 param->responder_resources = (uint8_t) attr.Param.ResponderResources;
\r
368 param->initiator_depth = (uint8_t) attr.Param.InitiatorDepth;
\r
369 param->flow_control = 1;
\r
370 param->retry_count = attr.Param.RetryCount;
\r
371 param->rnr_retry_count = attr.Param.RnrRetryCount;
\r
374 if (id_priv->cma_dev == NULL && attr.Device.DeviceGuid != 0) {
\r
375 hr = ucma_get_device(id_priv, attr.Device.DeviceGuid);
\r
380 id->route.addr.addr.ibaddr.pkey = attr.Device.Pkey;
\r
381 id_priv->id.port_num = attr.Device.PortNumber;
\r
387 static int ucma_query_datagram(struct rdma_cm_id *id, struct rdma_ud_param *param)
\r
389 struct cma_id_private *id_priv;
\r
390 WV_DATAGRAM_ATTRIBUTES attr;
\r
393 id_priv = CONTAINING_RECORD(id, struct cma_id_private, id);
\r
394 hr = id->ep.datagram->Query(&attr);
\r
399 RtlCopyMemory(&id->route.addr.src_addr, &attr.LocalAddress,
\r
400 sizeof attr.LocalAddress);
\r
401 RtlCopyMemory(&id->route.addr.dst_addr, &attr.PeerAddress,
\r
402 sizeof attr.PeerAddress);
\r
404 if (param != NULL) {
\r
405 RtlCopyMemory((void *) param->private_data, attr.Param.Data,
\r
406 attr.Param.DataLength);
\r
407 param->private_data_len = (uint8_t) attr.Param.DataLength;
\r
408 // ucma_convert_av(&attr.Param.AddressVector, param->ah_attr)
\r
409 param->qp_num = attr.Param.Qpn;
\r
410 param->qkey = attr.Param.Qkey;
\r
413 if (id_priv->cma_dev == NULL && attr.Device.DeviceGuid != 0) {
\r
414 hr = ucma_get_device(id_priv, attr.Device.DeviceGuid);
\r
417 id->route.addr.addr.ibaddr.pkey = attr.Device.Pkey;
\r
418 id_priv->id.port_num = attr.Device.PortNumber;
\r
423 __declspec(dllexport)
\r
424 int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
\r
426 struct cma_id_private *id_priv;
\r
429 if (id->ps == RDMA_PS_TCP) {
\r
430 hr = id->ep.connect->BindAddress(addr);
\r
431 if (SUCCEEDED(hr)) {
\r
432 hr = ucma_query_connect(id, NULL);
\r
435 hr = id->ep.datagram->BindAddress(addr);
\r
436 if (SUCCEEDED(hr)) {
\r
437 hr = ucma_query_datagram(id, NULL);
\r
441 if (SUCCEEDED(hr)) {
\r
442 id_priv = CONTAINING_RECORD(id, struct cma_id_private, id);
\r
443 id_priv->state = cma_addr_bind;
\r
448 __declspec(dllexport)
\r
449 int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
\r
450 struct sockaddr *dst_addr, int timeout_ms)
\r
452 struct cma_id_private *id_priv;
\r
458 id_priv = CONTAINING_RECORD(id, struct cma_id_private, id);
\r
459 if (id_priv->state == cma_idle) {
\r
460 if (src_addr == NULL) {
\r
461 if (id->ps == RDMA_PS_TCP) {
\r
462 s = socket(dst_addr->sa_family, SOCK_STREAM, IPPROTO_TCP);
\r
464 s = socket(dst_addr->sa_family, SOCK_DGRAM, IPPROTO_UDP);
\r
466 if (s == INVALID_SOCKET) {
\r
467 return WSAGetLastError();
\r
470 hr = WSAIoctl(s, SIO_ROUTING_INTERFACE_QUERY, dst_addr, ucma_addrlen(dst_addr),
\r
471 &addr, sizeof addr, &size, NULL, NULL);
\r
474 return WSAGetLastError();
\r
476 src_addr = &addr.Sa;
\r
479 hr = rdma_bind_addr(id, src_addr);
\r
485 RtlCopyMemory(&id->route.addr.dst_addr, dst_addr, ucma_addrlen(dst_addr));
\r
486 id_priv->state = cma_addr_resolve;
\r
489 CompEntryPost(&id->comp_entry);
\r
493 __declspec(dllexport)
\r
494 int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
\r
496 struct cma_id_private *id_priv;
\r
497 IBAT_PATH_BLOB path;
\r
500 hr = IBAT::Resolve(&id->route.addr.src_addr, &id->route.addr.dst_addr, &path);
\r
505 hr = (id->ps == RDMA_PS_TCP) ?
\r
506 id->ep.connect->Modify(WV_EP_OPTION_ROUTE, &path, sizeof path) :
\r
507 id->ep.datagram->Modify(WV_EP_OPTION_ROUTE, &path, sizeof path);
\r
512 id_priv = CONTAINING_RECORD(id, struct cma_id_private, id);
\r
513 id_priv->state = cma_route_resolve;
\r
516 CompEntryPost(&id->comp_entry);
\r
520 static int ucma_modify_qp_init(struct cma_id_private *id_priv, struct ibv_qp *qp)
\r
522 struct ibv_qp_attr qp_attr;
\r
526 RtlZeroMemory(&qp_attr, sizeof qp_attr);
\r
527 qp_attr.qp_state = IBV_QPS_INIT;
\r
528 qp_attr.port_num = id_priv->id.port_num;
\r
529 hr = qp->context->cmd_if->FindPkey(id_priv->id.port_num,
\r
530 id_priv->id.route.addr.addr.ibaddr.pkey,
\r
536 qp_attr.pkey_index = index;
\r
537 return ibv_modify_qp(qp, &qp_attr, (enum ibv_qp_attr_mask)
\r
538 (IBV_QP_STATE | IBV_QP_PKEY_INDEX | IBV_QP_PORT));
\r
541 static int ucma_init_ud_qp(struct cma_id_private *id_priv, struct ibv_qp *qp)
\r
543 struct ibv_qp_attr qp_attr;
\r
544 int qp_attr_mask, ret;
\r
546 ret = ucma_modify_qp_init(id_priv, qp);
\r
551 qp_attr.qp_state = IBV_QPS_RTR;
\r
552 ret = ibv_modify_qp(qp, &qp_attr, IBV_QP_STATE);
\r
557 qp_attr.qp_state = IBV_QPS_RTS;
\r
558 qp_attr.sq_psn = 0;
\r
559 return ibv_modify_qp(qp, &qp_attr, (enum ibv_qp_attr_mask)
\r
560 (IBV_QP_STATE | IBV_QP_SQ_PSN));
\r
563 __declspec(dllexport)
\r
564 int rdma_create_qp(struct rdma_cm_id *id, struct ibv_pd *pd,
\r
565 struct ibv_qp_init_attr *qp_init_attr)
\r
567 struct cma_id_private *id_priv;
\r
571 id_priv = CONTAINING_RECORD(id, struct cma_id_private, id);
\r
572 if (id->verbs != pd->context) {
\r
576 qp = ibv_create_qp(pd, qp_init_attr);
\r
581 if (id->ps == RDMA_PS_TCP) {
\r
582 ret = ucma_modify_qp_init(id_priv, qp);
\r
584 ret = ucma_init_ud_qp(id_priv, qp);
\r
593 ibv_destroy_qp(qp);
\r
597 __declspec(dllexport)
\r
598 void rdma_destroy_qp(struct rdma_cm_id *id)
\r
600 ibv_destroy_qp(id->qp);
\r
603 static int ucma_valid_param(struct cma_id_private *id_priv,
\r
604 struct rdma_conn_param *conn_param)
\r
606 if (id_priv->id.ps != RDMA_PS_TCP) {
\r
610 if ((conn_param->responder_resources > id_priv->cma_dev->max_responder_resources) ||
\r
611 (conn_param->initiator_depth > id_priv->cma_dev->max_initiator_depth)) {
\r
618 __declspec(dllexport)
\r
619 int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
\r
621 struct cma_id_private *id_priv;
\r
622 WV_CONNECT_PARAM attr;
\r
625 id_priv = CONTAINING_RECORD(id, struct cma_id_private, id);
\r
626 hr = ucma_valid_param(id_priv, conn_param);
\r
631 RtlZeroMemory(&attr, sizeof attr);
\r
632 attr.ResponderResources = conn_param->responder_resources;
\r
633 attr.InitiatorDepth = conn_param->initiator_depth;
\r
634 attr.RetryCount = conn_param->retry_count;
\r
635 attr.RnrRetryCount = conn_param->rnr_retry_count;
\r
636 if ((attr.DataLength = conn_param->private_data_len)) {
\r
637 RtlCopyMemory(attr.Data, conn_param->private_data, attr.DataLength);
\r
640 id_priv->state = cma_active_connect;
\r
642 id->comp_entry.Busy = 1;
\r
643 hr = id->ep.connect->Connect(id->qp->conn_handle, &id->route.addr.dst_addr,
\r
644 &attr, &id->comp_entry.Overlap);
\r
645 if (FAILED(hr) && hr != WV_IO_PENDING) {
\r
647 id->comp_entry.Busy = 0;
\r
648 id_priv->state = cma_route_resolve;
\r
655 static int ucma_get_request(struct cma_id_private *listen, int index)
\r
657 struct cma_id_private *id_priv = NULL;
\r
660 EnterCriticalSection(&lock);
\r
661 if (listen->state != cma_listening) {
\r
662 hr = WV_INVALID_PARAMETER;
\r
666 InterlockedIncrement(&listen->refcnt);
\r
667 hr = rdma_create_id(listen->id.channel, &listen->req_list[index],
\r
668 listen, listen->id.ps);
\r
673 id_priv = CONTAINING_RECORD(listen->req_list[index], struct cma_id_private, id);
\r
674 id_priv->index = index;
\r
675 id_priv->state = cma_get_request;
\r
678 id_priv->id.comp_entry.Busy = 1;
\r
679 if (listen->id.ps == RDMA_PS_TCP) {
\r
680 hr = listen->id.ep.connect->GetRequest(id_priv->id.ep.connect,
\r
681 &id_priv->id.comp_entry.Overlap);
\r
683 hr = listen->id.ep.datagram->GetRequest(id_priv->id.ep.datagram,
\r
684 &id_priv->id.comp_entry.Overlap);
\r
686 if (FAILED(hr) && hr != WV_IO_PENDING) {
\r
687 id_priv->id.comp_entry.Busy = 0;
\r
691 LeaveCriticalSection(&lock);
\r
696 InterlockedDecrement(&listen->refcnt);
\r
698 LeaveCriticalSection(&lock);
\r
699 if (id_priv != NULL) {
\r
700 rdma_destroy_id(&id_priv->id);
\r
705 __declspec(dllexport)
\r
706 int rdma_listen(struct rdma_cm_id *id, int backlog)
\r
708 struct cma_id_private *id_priv, *req_id;
\r
712 if (backlog <= 0) {
\r
713 backlog = CMA_DEFAULT_BACKLOG;
\r
716 id_priv = CONTAINING_RECORD(id, struct cma_id_private, id);
\r
717 id_priv->req_list = new struct rdma_cm_id*[backlog];
\r
718 if (id_priv->req_list == NULL) {
\r
722 RtlZeroMemory(id_priv->req_list, sizeof(struct rdma_cm_id *) * backlog);
\r
723 id_priv->backlog = backlog;
\r
725 id_priv->state = cma_listening;
\r
726 hr = (id->ps == RDMA_PS_TCP) ?
\r
727 id->ep.connect->Listen(backlog) : id->ep.datagram->Listen(backlog);
\r
732 for (i = 0; i < backlog; i++) {
\r
733 hr = ucma_get_request(id_priv, i);
\r
742 __declspec(dllexport)
\r
743 int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
\r
745 struct cma_id_private *id_priv;
\r
746 WV_CONNECT_PARAM attr;
\r
749 id_priv = CONTAINING_RECORD(id, struct cma_id_private, id);
\r
750 hr = ucma_valid_param(id_priv, conn_param);
\r
755 RtlZeroMemory(&attr, sizeof attr);
\r
756 attr.ResponderResources = conn_param->responder_resources;
\r
757 attr.InitiatorDepth = conn_param->initiator_depth;
\r
758 attr.RetryCount = conn_param->retry_count;
\r
759 attr.RnrRetryCount = conn_param->rnr_retry_count;
\r
760 if ((attr.DataLength = conn_param->private_data_len)) {
\r
761 RtlCopyMemory(attr.Data, conn_param->private_data, attr.DataLength);
\r
764 id_priv->state = cma_accepting;
\r
766 id->comp_entry.Busy = 1;
\r
767 hr = id->ep.connect->Accept(id->qp->conn_handle, &attr,
\r
768 &id->comp_entry.Overlap);
\r
769 if (FAILED(hr) && hr != WV_IO_PENDING) {
\r
771 id->comp_entry.Busy = 0;
\r
772 id_priv->state = cma_disconnected;
\r
779 __declspec(dllexport)
\r
780 int rdma_reject(struct rdma_cm_id *id, const void *private_data,
\r
781 uint8_t private_data_len)
\r
783 struct cma_id_private *id_priv;
\r
786 id_priv = CONTAINING_RECORD(id, struct cma_id_private, id);
\r
787 id_priv->state = cma_disconnected;
\r
788 hr = id->ep.connect->Reject(private_data, private_data_len);
\r
795 __declspec(dllexport)
\r
796 int rdma_notify(struct rdma_cm_id *id, enum ibv_event_type event)
\r
801 __declspec(dllexport)
\r
802 int rdma_disconnect(struct rdma_cm_id *id)
\r
804 struct cma_id_private *id_priv;
\r
807 id_priv = CONTAINING_RECORD(id, struct cma_id_private, id);
\r
808 if (id_priv->state == cma_connected) {
\r
809 id_priv->state = cma_active_disconnect;
\r
811 id_priv->state = cma_disconnected;
\r
813 hr = id->ep.connect->Disconnect(NULL);
\r
821 __declspec(dllexport)
\r
822 int rdma_ack_cm_event(struct rdma_cm_event *event)
\r
824 struct cma_event *evt;
\r
825 struct cma_id_private *listen;
\r
827 evt = CONTAINING_RECORD(event, struct cma_event, event);
\r
828 InterlockedDecrement(&evt->id_priv->refcnt);
\r
829 if (evt->event.listen_id) {
\r
830 listen = CONTAINING_RECORD(evt->event.listen_id, struct cma_id_private, id);
\r
831 InterlockedDecrement(&listen->refcnt);
\r
837 static int ucma_process_conn_req(struct cma_event *event)
\r
839 struct cma_id_private *listen, *id_priv;
\r
840 struct cma_event_channel *chan;
\r
842 listen = (struct cma_id_private *) event->id_priv->id.context;
\r
843 id_priv = event->id_priv;
\r
845 ucma_get_request(listen, id_priv->index);
\r
847 if (SUCCEEDED(event->event.status)) {
\r
848 event->event.status = ucma_query_connect(&id_priv->id,
\r
849 &event->event.param.conn);
\r
852 if (SUCCEEDED(event->event.status)) {
\r
853 event->event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
\r
854 id_priv->state = cma_passive_connect;
\r
855 event->event.listen_id = &listen->id;
\r
857 InterlockedDecrement(&listen->refcnt);
\r
858 InterlockedDecrement(&id_priv->refcnt);
\r
859 rdma_destroy_id(&id_priv->id);
\r
862 return event->event.status;
\r
865 static int ucma_process_conn_resp(struct cma_event *event)
\r
867 struct rdma_cm_id *id;
\r
868 WV_CONNECT_PARAM attr;
\r
871 if (FAILED(event->event.status)) {
\r
875 RtlZeroMemory(&attr, sizeof(attr));
\r
876 event->id_priv->state = cma_accepting;
\r
878 id = &event->id_priv->id;
\r
879 id->comp_entry.Busy = 1;
\r
880 hr = id->ep.connect->Accept(id->qp->conn_handle, &attr,
\r
881 &id->comp_entry.Overlap);
\r
882 if (FAILED(hr) && hr != WV_IO_PENDING) {
\r
883 id->comp_entry.Busy = 0;
\r
884 event->event.status = hr;
\r
888 return WV_IO_PENDING;
\r
891 event->event.event = (event->event.status == WV_REJECTED) ?
\r
892 RDMA_CM_EVENT_REJECTED :
\r
893 RDMA_CM_EVENT_CONNECT_ERROR;
\r
894 event->id_priv->state = cma_disconnected;
\r
898 static void ucma_process_establish(struct cma_event *event)
\r
900 struct cma_id_private *id_priv = event->id_priv;
\r
902 if (SUCCEEDED(event->event.status)) {
\r
903 event->event.status = ucma_query_connect(&id_priv->id,
\r
904 &event->event.param.conn);
\r
907 if (SUCCEEDED(event->event.status)) {
\r
908 event->event.event = RDMA_CM_EVENT_ESTABLISHED;
\r
910 id_priv->state = cma_connected;
\r
911 InterlockedIncrement(&id_priv->refcnt);
\r
912 id_priv->id.comp_entry.Busy = 1;
\r
913 id_priv->id.ep.connect->NotifyDisconnect(&id_priv->id.comp_entry.Overlap);
\r
915 event->event.event = RDMA_CM_EVENT_CONNECT_ERROR;
\r
916 event->id_priv->state = cma_disconnected;
\r
920 static int ucma_process_event(struct cma_event *event)
\r
922 struct cma_id_private *listen, *id_priv;
\r
923 WV_CONNECT_ATTRIBUTES attr;
\r
926 id_priv = event->id_priv;
\r
928 EnterCriticalSection(&lock);
\r
929 switch (id_priv->state) {
\r
930 case cma_get_request:
\r
931 listen = (struct cma_id_private *) id_priv->id.context;
\r
932 if (listen->state != cma_listening) {
\r
933 InterlockedDecrement(&id_priv->refcnt);
\r
938 listen->req_list[id_priv->index] = NULL;
\r
939 LeaveCriticalSection(&lock);
\r
940 return ucma_process_conn_req(event);
\r
941 case cma_addr_resolve:
\r
942 event->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
\r
944 case cma_route_resolve:
\r
945 event->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
\r
947 case cma_active_connect:
\r
948 hr = ucma_process_conn_resp(event);
\r
950 case cma_accepting:
\r
951 ucma_process_establish(event);
\r
953 case cma_connected:
\r
954 event->event.event = RDMA_CM_EVENT_DISCONNECTED;
\r
955 id_priv->state = cma_passive_disconnect;
\r
957 case cma_active_disconnect:
\r
958 event->event.event = RDMA_CM_EVENT_DISCONNECTED;
\r
959 id_priv->state = cma_disconnected;
\r
962 InterlockedDecrement(&id_priv->refcnt);
\r
965 LeaveCriticalSection(&lock);
\r
970 __declspec(dllexport)
\r
971 int rdma_get_cm_event(struct rdma_event_channel *channel,
\r
972 struct rdma_cm_event **event)
\r
974 struct cma_event *evt;
\r
975 struct rdma_cm_id *id;
\r
979 evt = new struct cma_event;
\r
985 RtlZeroMemory(evt, sizeof(struct cma_event));
\r
987 ret = CompChannelPoll(&channel->channel, &entry);
\r
993 id = CONTAINING_RECORD(entry, struct rdma_cm_id, comp_entry);
\r
994 evt->id_priv = CONTAINING_RECORD(id, struct cma_id_private, id);
\r
995 evt->event.id = id;
\r
996 evt->event.param.conn.private_data = evt->private_data;
\r
997 evt->event.status = id->ep.connect->
\r
998 GetOverlappedResult(&entry->Overlap, &bytes, FALSE);
\r
1000 ret = ucma_process_event(evt);
\r
1003 *event = &evt->event;
\r
1008 __declspec(dllexport)
\r
1009 int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
\r
1012 return WV_NOT_SUPPORTED;
\r
1015 __declspec(dllexport)
\r
1016 int rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
\r
1018 return WV_NOT_SUPPORTED;
\r
1021 __declspec(dllexport)
\r
1022 const char *rdma_event_str(enum rdma_cm_event_type event)
\r
1025 case RDMA_CM_EVENT_ADDR_RESOLVED:
\r
1026 return "RDMA_CM_EVENT_ADDR_RESOLVED";
\r
1027 case RDMA_CM_EVENT_ADDR_ERROR:
\r
1028 return "RDMA_CM_EVENT_ADDR_ERROR";
\r
1029 case RDMA_CM_EVENT_ROUTE_RESOLVED:
\r
1030 return "RDMA_CM_EVENT_ROUTE_RESOLVED";
\r
1031 case RDMA_CM_EVENT_ROUTE_ERROR:
\r
1032 return "RDMA_CM_EVENT_ROUTE_ERROR";
\r
1033 case RDMA_CM_EVENT_CONNECT_REQUEST:
\r
1034 return "RDMA_CM_EVENT_CONNECT_REQUEST";
\r
1035 case RDMA_CM_EVENT_CONNECT_RESPONSE:
\r
1036 return "RDMA_CM_EVENT_CONNECT_RESPONSE";
\r
1037 case RDMA_CM_EVENT_CONNECT_ERROR:
\r
1038 return "RDMA_CM_EVENT_CONNECT_ERROR";
\r
1039 case RDMA_CM_EVENT_UNREACHABLE:
\r
1040 return "RDMA_CM_EVENT_UNREACHABLE";
\r
1041 case RDMA_CM_EVENT_REJECTED:
\r
1042 return "RDMA_CM_EVENT_REJECTED";
\r
1043 case RDMA_CM_EVENT_ESTABLISHED:
\r
1044 return "RDMA_CM_EVENT_ESTABLISHED";
\r
1045 case RDMA_CM_EVENT_DISCONNECTED:
\r
1046 return "RDMA_CM_EVENT_DISCONNECTED";
\r
1047 case RDMA_CM_EVENT_DEVICE_REMOVAL:
\r
1048 return "RDMA_CM_EVENT_DEVICE_REMOVAL";
\r
1049 case RDMA_CM_EVENT_MULTICAST_JOIN:
\r
1050 return "RDMA_CM_EVENT_MULTICAST_JOIN";
\r
1051 case RDMA_CM_EVENT_MULTICAST_ERROR:
\r
1052 return "RDMA_CM_EVENT_MULTICAST_ERROR";
\r
1053 case RDMA_CM_EVENT_ADDR_CHANGE:
\r
1054 return "RDMA_CM_EVENT_ADDR_CHANGE";
\r
1055 case RDMA_CM_EVENT_TIMEWAIT_EXIT:
\r
1056 return "RDMA_CM_EVENT_TIMEWAIT_EXIT";
\r
1058 return "UNKNOWN EVENT";
\r
1062 __declspec(dllexport)
\r
1063 int rdma_set_option(struct rdma_cm_id *id, int level, int optname,
\r
1064 void *optval, size_t optlen)
\r
1066 return WV_NOT_SUPPORTED;
\r
1069 __declspec(dllexport)
\r
1070 int rdma_migrate_id(struct rdma_cm_id *id, struct rdma_event_channel *channel)
\r
1072 id->channel = channel;
\r