2 * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.
\r
4 * This software is available to you under the OpenIB.org BSD license
\r
7 * Redistribution and use in source and binary forms, with or
\r
8 * without modification, are permitted provided that the following
\r
9 * conditions are met:
\r
11 * - Redistributions of source code must retain the above
\r
12 * copyright notice, this list of conditions and the following
\r
15 * - Redistributions in binary form must reproduce the above
\r
16 * copyright notice, this list of conditions and the following
\r
17 * disclaimer in the documentation and/or other materials
\r
18 * provided with the distribution.
\r
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
21 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
\r
23 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
\r
24 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
\r
25 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
\r
26 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
\r
32 #include "ibspdll.h"
\r
35 static void ib_destroy_cq_tinfo( struct cq_thread_info *cq_tinfo );
\r
38 typedef struct _io_comp_info
\r
41 LPWSAOVERLAPPED p_ov;
\r
42 atomic32_t *p_io_cnt;
\r
47 /* Work queue entry completion routine. */
\r
50 IN const ib_wc_t *wc,
\r
51 OUT io_comp_info_t *p_io_info )
\r
53 struct _wr *wr = NULL;
\r
54 struct _recv_wr *p_recv_wr = NULL;
\r
55 LPWSAOVERLAPPED lpOverlapped = NULL;
\r
56 struct ibsp_socket_info *socket_info = NULL;
\r
58 IBSP_ENTER( IBSP_DBG_IO );
\r
60 wr = (struct _wr * __ptr64)wc->wr_id;
\r
61 p_recv_wr = (struct _recv_wr * __ptr64)wc->wr_id;
\r
65 socket_info = wr->socket_info;
\r
66 p_io_info->socket = socket_info->switch_socket;
\r
68 lpOverlapped = wr->lpOverlapped;
\r
70 IBSP_TRACE4( IBSP_DBG_IO,
\r
71 ("socket %p, ov %p, work completion status=%s, wc_type=%s\n",
\r
72 socket_info, lpOverlapped, ib_get_wc_status_str( wc->status ),
\r
73 ib_get_wc_type_str( wc->wc_type )) );
\r
75 /* Set the windows error code. It's not easy to find an easy
\r
76 * correspondence between the IBAL error codes and windows error
\r
77 * codes; but it probably does not matter, as long as it returns an
\r
79 switch( wc->status )
\r
81 case IB_WCS_SUCCESS:
\r
83 * Set the length of the operation. Under Infiniband, the work
\r
84 * completion length is only valid for a receive
\r
85 * operation. Fortunately we had already set the length during the
\r
88 * lpWPUCompleteOverlappedRequest is supposed to store the length
\r
89 * into InternalHigh, however it will not be called if the low
\r
90 * order bit of lpOverlapped->hEvent is set. So we do it and hope
\r
93 * NOTE: Without a valid length, the switch doesn't seem to call
\r
94 * GetOverlappedResult() even if we call lpWPUCompleteOverlappedRequest()
\r
96 if( wc->wc_type == IB_WC_RECV )
\r
97 lpOverlapped->InternalHigh = wc->length;
\r
99 lpOverlapped->OffsetHigh = 0;
\r
102 case IB_WCS_WR_FLUSHED_ERR:
\r
103 cl_spinlock_acquire( &socket_info->mutex );
\r
105 if( socket_info->socket_state == IBSP_DUPLICATING_REMOTE &&
\r
106 wc->wc_type == IB_WC_RECV )
\r
109 * Take the wr off the wr_list, and place onto the
\r
110 * dup_wr_list. We will post them later on the new QP.
\r
112 cl_spinlock_acquire( &socket_info->recv_lock );
\r
114 /* Copy to the duplicate WR array. */
\r
115 socket_info->dup_wr[socket_info->dup_idx] = *p_recv_wr;
\r
117 #if QP_ATTRIB_RQ_DEPTH == 256 || QP_ATTRIB_RQ_DEPTH == 128 || \
\r
118 QP_ATTRIB_RQ_DEPTH == 64 || QP_ATTRIB_RQ_DEPTH == 32 || \
\r
119 QP_ATTRIB_RQ_DEPTH == 16 || QP_ATTRIB_RQ_DEPTH == 8
\r
120 socket_info->dup_idx++;
\r
121 socket_info->dup_idx &= (QP_ATTRIB_RQ_DEPTH - 1);
\r
123 if( ++socket_info->dup_idx == QP_ATTRIB_RQ_DEPTH )
\r
124 socket_info->dup_idx = 0;
\r
127 cl_atomic_inc( &socket_info->dup_cnt );
\r
128 /* ib_cq_comp will decrement the receive count. */
\r
129 p_io_info->p_io_cnt = &socket_info->recv_cnt;
\r
131 cl_spinlock_release( &socket_info->recv_lock );
\r
133 cl_spinlock_release( &socket_info->mutex );
\r
134 IBSP_EXIT( IBSP_DBG_IO );
\r
138 /* Check for flushing the receive buffers on purpose. */
\r
139 if( socket_info->socket_state == IBSP_DUPLICATING_OLD )
\r
140 wr->lpOverlapped->OffsetHigh = 0;
\r
142 wr->lpOverlapped->OffsetHigh = WSA_OPERATION_ABORTED;
\r
144 cl_spinlock_release( &socket_info->mutex );
\r
146 /* Override the length, as per the WSD specs. */
\r
147 wr->lpOverlapped->InternalHigh = 0;
\r
150 case IB_WCS_LOCAL_LEN_ERR:
\r
151 case IB_WCS_LOCAL_OP_ERR:
\r
152 case IB_WCS_LOCAL_PROTECTION_ERR:
\r
153 case IB_WCS_MEM_WINDOW_BIND_ERR:
\r
154 case IB_WCS_REM_ACCESS_ERR:
\r
155 case IB_WCS_REM_OP_ERR:
\r
156 case IB_WCS_RNR_RETRY_ERR:
\r
157 case IB_WCS_TIMEOUT_RETRY_ERR:
\r
158 case IB_WCS_REM_INVALID_REQ_ERR:
\r
160 IBSP_ERROR( ("%s error: %s\n",
\r
161 ib_get_wc_type_str( wc->wc_type ),
\r
162 ib_get_wc_status_str( wc->status )) );
\r
163 lpOverlapped->OffsetHigh = WSAECONNABORTED;
\r
164 wr->lpOverlapped->InternalHigh = 0;
\r
165 socket_info->qp_error = WSAECONNABORTED;
\r
170 if( wc->wc_type == IB_WC_RECV )
\r
172 // This code requires the recv count to be decremented here, but it needs
\r
173 // to be decremented after any callbacks are invoked so socket destruction
\r
174 // gets delayed until all callbacks have been invoked.
\r
178 // cl_spinlock_acquire( &socket_info->recv_lock );
\r
179 // idx = socket_info->recv_idx - (uint8_t)socket_info->recv_cnt;
\r
180 // if( idx >= QP_ATTRIB_RQ_DEPTH )
\r
181 // idx += QP_ATTRIB_RQ_DEPTH;
\r
183 // CL_ASSERT( wc->wr_id == (uint64_t)(void* __ptr64)&socket_info->recv_wr[idx] );
\r
184 // cl_atomic_dec( &socket_info->recv_cnt );
\r
185 // cl_spinlock_release( &socket_info->recv_lock );
\r
188 if( wc->status == IB_SUCCESS && p_recv_wr->ds_array[0].length >= 40 )
\r
190 debug_dump_buffer( IBSP_DBG_WQ | IBSP_DBG_LEVEL4, "RECV",
\r
191 (void * __ptr64)p_recv_wr->ds_array[0].vaddr, 40 );
\r
194 cl_atomic_dec( &g_ibsp.recv_count );
\r
195 cl_atomic_inc( &socket_info->recv_comp );
\r
197 memset( p_recv_wr, 0x33, sizeof(struct _recv_wr) );
\r
201 // This code requires the send count to be decremented here, but it needs
\r
202 // to be decremented after any callbacks are invoked so socket destruction
\r
203 // gets delayed until all callbacks have been invoked.
\r
207 // cl_spinlock_acquire( &socket_info->send_lock );
\r
208 // idx = socket_info->send_idx - (uint8_t)socket_info->send_cnt;
\r
209 // if( idx >= QP_ATTRIB_SQ_DEPTH )
\r
210 // idx += QP_ATTRIB_SQ_DEPTH;
\r
211 // CL_ASSERT( wc->wr_id == (uint64_t)(void* __ptr64)&socket_info->send_wr[idx] );
\r
212 // cl_atomic_dec( &socket_info->send_cnt );
\r
213 // cl_spinlock_release( &socket_info->send_lock );
\r
216 if( wc->wc_type == IB_WC_SEND )
\r
218 cl_atomic_dec( &g_ibsp.send_count );
\r
219 cl_atomic_inc( &socket_info->send_comp );
\r
221 fzprint(("%s():%d:0x%x:0x%x: send_count=%d\n",
\r
223 __LINE__, GetCurrentProcessId(), GetCurrentThreadId(), g_ibsp.send_count));
\r
226 memset( wr, 0x33, sizeof(struct _wr) );
\r
230 IBSP_TRACE4( IBSP_DBG_IO,
\r
231 ("overlapped=%p, InternalHigh=%d, hEvent=%x\n",
\r
232 lpOverlapped, lpOverlapped->InternalHigh,
\r
233 (uintptr_t) lpOverlapped->hEvent) );
\r
235 /* Don't notify the switch for that completion only if:
\r
236 * - the switch don't want a notification
\r
237 * - the wq completed with success
\r
238 * - the socket is still connected
\r
240 if( ((uintptr_t) lpOverlapped->hEvent) & 0x00000001 )
\r
242 /* Indicate this operation is complete. The switch will poll
\r
243 * with calls to WSPGetOverlappedResult(). */
\r
246 cl_atomic_dec( &g_ibsp.overlap_h1_comp_count );
\r
248 fzprint(("%s():%d:0x%x:0x%x: ov=0x%p h0=%d h1=%d h1_c=%d send=%d recv=%d\n",
\r
249 __FUNCTION__, __LINE__, GetCurrentProcessId(),
\r
250 GetCurrentThreadId(), lpOverlapped,
\r
251 g_ibsp.overlap_h0_count, g_ibsp.overlap_h1_count,
\r
252 g_ibsp.overlap_h1_comp_count, g_ibsp.send_count, g_ibsp.recv_count));
\r
255 IBSP_TRACE1( IBSP_DBG_IO,
\r
256 ("Not calling lpWPUCompleteOverlappedRequest: "
\r
257 "socket=%p, ov=%p OffsetHigh=%d, InternalHigh=%d hEvent=%p\n",
\r
258 socket_info, lpOverlapped, lpOverlapped->OffsetHigh,
\r
259 lpOverlapped->InternalHigh, lpOverlapped->hEvent) );
\r
261 lpOverlapped->Internal = 0;
\r
262 p_io_info->p_ov = NULL;
\r
267 cl_atomic_dec( &g_ibsp.overlap_h0_count );
\r
269 fzprint(("%s():%d:0x%x:0x%x: ov=0x%p h0=%d h1=%d h1_c=%d send=%d recv=%d\n",
\r
270 __FUNCTION__, __LINE__, GetCurrentProcessId(),
\r
271 GetCurrentThreadId(), lpOverlapped,
\r
272 g_ibsp.overlap_h0_count, g_ibsp.overlap_h1_count,
\r
273 g_ibsp.overlap_h1_comp_count, g_ibsp.send_count, g_ibsp.recv_count));
\r
276 IBSP_TRACE1( IBSP_DBG_IO,
\r
277 ("Calling lpWPUCompleteOverlappedRequest: "
\r
278 "socket=%p, ov=%p OffsetHigh=%d InternalHigh=%d hEvent=%p\n",
\r
279 socket_info, lpOverlapped, lpOverlapped->OffsetHigh,
\r
280 lpOverlapped->InternalHigh, lpOverlapped->hEvent) );
\r
282 p_io_info->p_ov = lpOverlapped;
\r
285 if( wc->wc_type == IB_WC_RECV )
\r
286 p_io_info->p_io_cnt = &socket_info->recv_cnt;
\r
288 p_io_info->p_io_cnt = &socket_info->send_cnt;
\r
290 IBSP_EXIT( IBSP_DBG_IO );
\r
294 /* CQ completion handler. */
\r
299 struct cq_thread_info *cq_tinfo = cq_context;
\r
300 ib_api_status_t status;
\r
301 ib_wc_t wclist[WC_LIST_SIZE];
\r
302 ib_wc_t *free_wclist;
\r
303 ib_wc_t *done_wclist;
\r
304 io_comp_info_t info[WC_LIST_SIZE];
\r
311 CL_ENTER( IBSP_DBG_WQ, gdbg_lvl );
\r
313 CL_ASSERT( WC_LIST_SIZE >= 1 );
\r
317 /* Try to retrieve up to WC_LIST_SIZE completions at a time. */
\r
318 for( i = 0; i < (WC_LIST_SIZE - 1); i++ )
\r
320 wclist[i].p_next = &wclist[i + 1];
\r
322 wclist[(WC_LIST_SIZE - 1)].p_next = NULL;
\r
324 free_wclist = &wclist[0];
\r
325 done_wclist = NULL;
\r
327 status = ib_poll_cq( cq_tinfo->cq, &free_wclist, &done_wclist );
\r
329 CL_TRACE( IBSP_DBG_WQ, gdbg_lvl,
\r
330 ("%s():%d:0x%x:0x%x: poll CQ got status %d, free=%p, done=%p\n",
\r
331 __FUNCTION__, __LINE__, GetCurrentProcessId(), GetCurrentThreadId(),
\r
332 status, free_wclist, done_wclist) );
\r
340 case IB_INVALID_CQ_HANDLE:
\r
341 /* This happens when the switch closes the socket while the
\r
342 * execution thread was calling lpWPUCompleteOverlappedRequest. */
\r
343 CL_ERROR( IBSP_DBG_WQ, gdbg_lvl,
\r
344 ("ib_poll_cq returned IB_INVLALID_CQ_HANDLE\n") );
\r
348 CL_ERROR( IBSP_DBG_WQ, gdbg_lvl,
\r
349 ("ib_poll_cq failed returned %s\n", ib_get_err_str( status )) );
\r
357 /* We have some completions. */
\r
359 while( done_wclist )
\r
364 complete_wq( done_wclist, &info[cb_idx++] );
\r
366 done_wclist = done_wclist->p_next;
\r
374 if( info[cb_idx].p_ov )
\r
376 ret = g_ibsp.up_call_table.lpWPUCompleteOverlappedRequest(
\r
377 info[cb_idx].socket, info[cb_idx].p_ov,
\r
378 info[cb_idx].p_ov->OffsetHigh,
\r
379 (DWORD)info[cb_idx].p_ov->InternalHigh, &error );
\r
382 IBSP_ERROR( ("WPUCompleteOverlappedRequest for ov=%p "
\r
383 "returned %d err %d\n", info[cb_idx].p_ov, ret, error) );
\r
387 cl_atomic_dec( info[cb_idx].p_io_cnt );
\r
391 if( comp_count > g_ibsp.max_comp_count )
\r
393 g_ibsp.max_comp_count = comp_count;
\r
396 } while( !free_wclist );
\r
398 status = ib_rearm_cq( cq_tinfo->cq, FALSE );
\r
399 if( status != IB_SUCCESS )
\r
401 CL_ERROR( IBSP_DBG_WQ, gdbg_lvl,
\r
402 ("ib_rearm_cq returned %s)\n", ib_get_err_str( status )) );
\r
408 fzprint(("%s():%d:0x%x:0x%x: overlap_h0_count=%d overlap_h1_count=%d\n",
\r
410 __LINE__, GetCurrentProcessId(),
\r
411 GetCurrentThreadId(), g_ibsp.overlap_h0_count, g_ibsp.overlap_h1_count));
\r
414 CL_EXIT( IBSP_DBG_WQ, gdbg_lvl );
\r
418 /* IB completion thread */
\r
419 static DWORD WINAPI
\r
421 LPVOID lpParameter )
\r
423 struct cq_thread_info *cq_tinfo = (struct cq_thread_info *)lpParameter;
\r
424 cl_status_t cl_status;
\r
426 CL_ENTER( IBSP_DBG_HW, gdbg_lvl );
\r
429 fzprint(("%s():%d:0x%x:0x%x: cq_tinfo=0x%p\n", __FUNCTION__,
\r
430 __LINE__, GetCurrentProcessId(), GetCurrentThreadId(), cq_tinfo));
\r
434 cl_status = cl_waitobj_wait_on( cq_tinfo->cq_waitobj, EVENT_NO_TIMEOUT, TRUE );
\r
435 if( cl_status != CL_SUCCESS )
\r
437 CL_ERROR( IBSP_DBG_EP, gdbg_lvl,
\r
438 ("cl_waitobj_wait_on() (%d)\n", cl_status) );
\r
442 * TODO: By rearanging thread creation and cq creation, this check
\r
443 * may be eliminated.
\r
445 if( cq_tinfo->cq != NULL )
\r
447 fzprint(("%s():%d:0x%x:0x%x: Calling ib_cq_comp().\n", __FUNCTION__,
\r
448 __LINE__, GetCurrentProcessId(), GetCurrentThreadId()));
\r
450 ib_cq_comp( cq_tinfo );
\r
451 fzprint(("%s():%d:0x%x:0x%x: Done calling ib_cq_comp().\n", __FUNCTION__,
\r
452 __LINE__, GetCurrentProcessId(), GetCurrentThreadId()));
\r
455 } while( (cq_tinfo->ib_cq_thread_exit_wanted != TRUE) ||
\r
456 cl_qlist_count( &cq_tinfo->done_wr_list ) );
\r
458 cl_status = cl_waitobj_destroy( cq_tinfo->cq_waitobj );
\r
459 if( cl_status != CL_SUCCESS )
\r
461 CL_ERROR( IBSP_DBG_EP, gdbg_lvl, ("cl_waitobj_destroy() (%d)\n", cl_status) );
\r
463 HeapFree( g_ibsp.heap, 0, cq_tinfo );
\r
465 /* No special exit code, even on errors. */
\r
466 CL_EXIT( IBSP_DBG_HW, gdbg_lvl );
\r
471 static struct cq_thread_info *
\r
473 struct ibsp_hca *hca )
\r
475 struct cq_thread_info *cq_tinfo = NULL;
\r
476 ib_cq_create_t cq_create;
\r
477 ib_api_status_t status;
\r
478 cl_status_t cl_status;
\r
481 CL_ENTER( IBSP_DBG_HW, gdbg_lvl );
\r
483 cq_tinfo = HeapAlloc( g_ibsp.heap, HEAP_ZERO_MEMORY, sizeof(struct cq_thread_info) );
\r
485 if( cq_tinfo == NULL )
\r
487 CL_ERROR( IBSP_DBG_EP, gdbg_lvl, ("HeapAlloc() Failed.\n") );
\r
492 cl_status = cl_waitobj_create( FALSE, &cq_tinfo->cq_waitobj );
\r
493 if( cl_status != CL_SUCCESS )
\r
495 cq_tinfo->cq_waitobj = NULL;
\r
496 CL_ERROR( IBSP_DBG_EP, gdbg_lvl, ("cl_waitobj_create() (%d)\n", cl_status) );
\r
501 cq_tinfo->hca = hca;
\r
502 cq_tinfo->ib_cq_thread_exit_wanted = FALSE;
\r
504 /* Create a cleanup thread */
\r
505 cq_tinfo->ib_cq_thread = CreateThread( NULL, 0, ib_cq_thread, cq_tinfo, 0, (LPDWORD)&cq_tinfo->ib_cq_thread_id );
\r
507 if( cq_tinfo->ib_cq_thread == NULL )
\r
509 CL_ERROR( IBSP_DBG_HW, gdbg_lvl, ("CreateThread failed.") );
\r
514 STAT_INC( thread_num );
\r
516 /* Completion queue */
\r
517 cq_create.size = IB_CQ_SIZE;
\r
519 cq_create.pfn_comp_cb = NULL;
\r
520 cq_create.h_wait_obj = cq_tinfo->cq_waitobj;
\r
522 status = ib_create_cq( hca->hca_handle, &cq_create, cq_tinfo, /* context */
\r
523 NULL, /* async handler */
\r
527 CL_ERROR( IBSP_DBG_EP, gdbg_lvl, ("ib_create_cq failed (%d)\n", status) );
\r
532 STAT_INC( cq_num );
\r
534 status = ib_rearm_cq( cq_tinfo->cq, FALSE );
\r
537 CL_ERROR( IBSP_DBG_EP, gdbg_lvl, ("ib_rearm_cq failed (%d)\n", status) );
\r
542 cl_spinlock_init( &cq_tinfo->wr_mutex );
\r
543 cl_qlist_init( &cq_tinfo->done_wr_list );
\r
544 cq_tinfo->cqe_size = IB_CQ_SIZE;
\r
546 /* Only one CQ per HCA now */
\r
547 hca->cq_tinfo = cq_tinfo;
\r
552 if( error == TRUE )
\r
554 ib_destroy_cq_tinfo( cq_tinfo );
\r
558 CL_EXIT( IBSP_DBG_HW, gdbg_lvl );
\r
565 ib_destroy_cq_tinfo(
\r
566 struct cq_thread_info *cq_tinfo )
\r
569 ib_wc_t *free_wclist;
\r
570 ib_wc_t *done_wclist;
\r
571 ib_api_status_t status;
\r
572 HANDLE h_cq_thread;
\r
574 CL_ENTER( IBSP_DBG_HW, gdbg_lvl );
\r
576 if( cq_tinfo == NULL )
\r
583 wclist.p_next = NULL;
\r
584 free_wclist = &wclist;
\r
586 while( ib_poll_cq( cq_tinfo->cq, &free_wclist, &done_wclist ) == IB_SUCCESS )
\r
588 CL_TRACE( IBSP_DBG_WQ, gdbg_lvl, ("%s():%d:0x%x:0x%x: free=%p, done=%p\n",
\r
590 __LINE__, GetCurrentProcessId(),
\r
591 GetCurrentThreadId(),
\r
592 free_wclist, done_wclist) );
\r
595 CL_TRACE( IBSP_DBG_WQ, gdbg_lvl, ("%s():%d:0x%x:0x%x: ib_destroy_cq() start..\n",
\r
597 __LINE__, GetCurrentProcessId(),
\r
598 GetCurrentThreadId()) );
\r
601 * Called from cleanup thread, okay to block.
\r
603 status = ib_destroy_cq( cq_tinfo->cq, ib_sync_destroy );
\r
606 CL_ERROR( IBSP_DBG_EP, gdbg_lvl, ("ib_destroy_cq failed (%d)\n", status) );
\r
610 CL_TRACE( IBSP_DBG_WQ, gdbg_lvl,
\r
611 ("%s():%d:0x%x:0x%x: ib_destroy_cq() finished.\n", __FUNCTION__,
\r
612 __LINE__, GetCurrentProcessId(), GetCurrentThreadId()) );
\r
614 cq_tinfo->cq = NULL;
\r
616 STAT_DEC( cq_num );
\r
620 /* Currently only 1 CQ per HCA */
\r
621 cq_tinfo->hca = NULL;
\r
623 if( cq_tinfo->ib_cq_thread )
\r
625 /* ib_cq_thread() will release the cq_tinfo before exit. Don't
\r
626 reference cq_tinfo after signaling */
\r
627 h_cq_thread = cq_tinfo->ib_cq_thread;
\r
628 cq_tinfo->ib_cq_thread = NULL;
\r
630 cq_tinfo->ib_cq_thread_exit_wanted = TRUE;
\r
631 cl_waitobj_signal( cq_tinfo->cq_waitobj );
\r
633 /* Wait for ib_cq_thread to die, if we are not running on it */
\r
634 if( GetCurrentThreadId() != cq_tinfo->ib_cq_thread_id )
\r
636 fzprint(("%s():%d:0x%x:0x%x: Waiting for ib_cq_thread=0x%x to die\n",
\r
637 __FUNCTION__, __LINE__, GetCurrentProcessId(), GetCurrentThreadId(),
\r
638 cq_tinfo->ib_cq_thread_id ));
\r
639 if( WaitForSingleObject( h_cq_thread, INFINITE ) != WAIT_OBJECT_0 )
\r
641 CL_ERROR( IBSP_DBG_CM, gdbg_lvl, ("WaitForSingleObject failed\n") );
\r
645 STAT_DEC( thread_num );
\r
650 fzprint(("%s():%d:0x%x:0x%x: Currently on ib_cq_thread.\n", __FUNCTION__,
\r
651 __LINE__, GetCurrentProcessId(), GetCurrentThreadId()));
\r
652 STAT_DEC( thread_num );
\r
654 CloseHandle( h_cq_thread );
\r
658 /* There was no thread created, destroy cq_waitobj and
\r
660 if( cq_tinfo->cq_waitobj )
\r
662 cl_waitobj_destroy( cq_tinfo->cq_waitobj );
\r
663 cq_tinfo->cq_waitobj = NULL;
\r
665 HeapFree( g_ibsp.heap, 0, cq_tinfo );
\r
668 CL_EXIT( IBSP_DBG_HW, gdbg_lvl );
\r
672 static struct cq_thread_info *
\r
673 ib_acquire_cq_tinfo(
\r
674 struct ibsp_hca *hca )
\r
676 struct cq_thread_info *cq_tinfo = NULL;
\r
677 uint32_t current_cqe_size;
\r
679 CL_ENTER( IBSP_DBG_HW, gdbg_lvl );
\r
682 * TODO: If future implementations require more than 1 cq_tinfo per HCA, then
\r
683 * search HCA cq_tinfo list for optimal cq_tinfo
\r
685 if( hca->cq_tinfo == NULL )
\r
687 cq_tinfo = ib_alloc_cq_tinfo( hca );
\r
688 if( cq_tinfo == NULL )
\r
690 CL_ERROR( IBSP_DBG_CM, gdbg_lvl, ("ib_alloc_cq_tinfo() failed\n") );
\r
696 cq_tinfo = hca->cq_tinfo;
\r
699 CL_ASSERT( cq_tinfo != NULL );
\r
701 current_cqe_size = cq_tinfo->qp_count * IB_CQ_SIZE;
\r
703 cl_atomic_inc( &cq_tinfo->qp_count );
\r
705 if( cq_tinfo->cqe_size < current_cqe_size )
\r
707 ib_api_status_t status;
\r
708 status = ib_modify_cq( cq_tinfo->cq, ¤t_cqe_size );
\r
712 * TODO: This could mean we are out of cqe and need to have
\r
713 * more than one cq per HCA in the future.
\r
715 cl_atomic_dec( &cq_tinfo->qp_count );
\r
716 CL_EXIT_ERROR( IBSP_DBG_EP, gdbg_lvl,
\r
717 ("ib_modify_cq() failed. (%d)\n", status) );
\r
722 cq_tinfo->cqe_size = current_cqe_size;
\r
723 fzprint(("%s():%d:0x%x:0x%x: New cq size=%d.\n",
\r
725 __LINE__, GetCurrentProcessId(),
\r
726 GetCurrentThreadId(), cq_tinfo->cqe_size));
\r
731 CL_EXIT( IBSP_DBG_HW, gdbg_lvl );
\r
737 ib_release_cq_tinfo(
\r
738 struct cq_thread_info *cq_tinfo )
\r
740 CL_ENTER( IBSP_DBG_HW, gdbg_lvl );
\r
742 cl_atomic_dec( &cq_tinfo->qp_count );
\r
744 /* TODO: downsize the cq */
\r
746 CL_EXIT( IBSP_DBG_HW, gdbg_lvl );
\r
750 /* Release IB ressources. */
\r
754 cl_fmap_item_t *p_item;
\r
756 CL_ENTER( IBSP_DBG_HW, gdbg_lvl );
\r
758 if( g_ibsp.al_handle )
\r
760 cl_list_item_t *item;
\r
761 ib_api_status_t status;
\r
765 if( g_ibsp.ib_cleanup_thread )
\r
767 /* Let thread know it's okay to exit after resources are freed */
\r
768 g_ibsp.ib_cleanup_thread_exit_wanted = TRUE;
\r
769 SetEvent( g_ibsp.ib_cleanup_event );
\r
771 fzprint(("%s():%d:0x%x:0x%x: Waiting for ib_cleanup_thread to die.\n",
\r
772 __FUNCTION__, __LINE__, GetCurrentProcessId(),
\r
773 GetCurrentThreadId()));
\r
775 /* Wait for ib_cleanup_thread to die */
\r
776 if( WaitForSingleObject( g_ibsp.ib_cleanup_thread, INFINITE ) != WAIT_OBJECT_0 )
\r
778 CL_ERROR( IBSP_DBG_CM, gdbg_lvl, ("WaitForSingleObject failed\n") );
\r
782 STAT_DEC( thread_num );
\r
785 fzprint(("%s():%d:0x%x:0x%x: ib_cleanup_thread exited.\n", __FUNCTION__,
\r
786 __LINE__, GetCurrentProcessId(), GetCurrentThreadId()));
\r
787 CloseHandle( g_ibsp.ib_cleanup_thread );
\r
788 g_ibsp.ib_cleanup_thread = NULL;
\r
791 if( g_ibsp.ib_cleanup_event )
\r
793 CloseHandle( g_ibsp.ib_cleanup_event );
\r
794 g_ibsp.ib_cleanup_event = NULL;
\r
797 while( (item = cl_qlist_head( &g_ibsp.hca_list )) != cl_qlist_end( &g_ibsp.hca_list ) )
\r
799 struct ibsp_hca *hca = PARENT_STRUCT(item, struct ibsp_hca, item);
\r
801 if( hca->cq_tinfo )
\r
803 CL_ASSERT( hca->cq_tinfo->qp_count == 0 );
\r
804 ib_destroy_cq_tinfo( hca->cq_tinfo );
\r
807 pnp_ca_remove( hca );
\r
810 fzprint(("%s():%d:0x%x:0x%x: Calling ib_close_al...\n", __FUNCTION__,
\r
811 __LINE__, GetCurrentProcessId(), GetCurrentThreadId()));
\r
813 status = ib_close_al( g_ibsp.al_handle );
\r
815 fzprint(("%s():%d:0x%x:0x%x: Done calling ib_close_al, status=%d.\n",
\r
816 __FUNCTION__, __LINE__, GetCurrentProcessId(), GetCurrentThreadId(),
\r
818 if( status != IB_SUCCESS )
\r
820 CL_ERROR( IBSP_DBG_HW, gdbg_lvl, ("ib_close_al failed (%d)\n", status) );
\r
824 CL_TRACE( IBSP_DBG_HW, gdbg_lvl, ("ib_close_al success\n") );
\r
825 STAT_DEC( al_num );
\r
827 g_ibsp.al_handle = NULL;
\r
830 for( p_item = cl_fmap_head( &g_ibsp.ip_map );
\r
831 p_item != cl_fmap_end( &g_ibsp.ip_map );
\r
832 p_item = cl_fmap_head( &g_ibsp.ip_map ) )
\r
834 cl_fmap_remove_item( &g_ibsp.ip_map, p_item );
\r
836 HeapFree( g_ibsp.heap, 0,
\r
837 PARENT_STRUCT(p_item, struct ibsp_ip_addr, item) );
\r
840 CL_EXIT( IBSP_DBG_HW, gdbg_lvl );
\r
844 /* IP notify thread */
\r
845 static DWORD WINAPI
\r
847 LPVOID lpParameter )
\r
849 cl_list_item_t *socket_item = NULL;
\r
851 CL_ENTER( IBSP_DBG_HW, gdbg_lvl );
\r
853 UNUSED_PARAM( lpParameter );
\r
855 while( !g_ibsp.ib_cleanup_thread_exit_wanted ||
\r
856 cl_qlist_count( &g_ibsp.socket_info_list ) )
\r
858 if( g_ibsp.ib_cleanup_thread_exit_wanted == FALSE )
\r
860 if( WaitForSingleObject( g_ibsp.ib_cleanup_event, INFINITE ) != WAIT_OBJECT_0 )
\r
862 CL_ERROR( IBSP_DBG_CM, gdbg_lvl, ("WaitForSingleObject failed\n") );
\r
864 ResetEvent( g_ibsp.ib_cleanup_event );
\r
868 fzprint(("%s():%d:0x%x:0x%x: socket_info_list cnt=%d\n", __FUNCTION__,
\r
869 __LINE__, GetCurrentProcessId(),
\r
870 GetCurrentThreadId(),
\r
871 cl_qlist_count( &g_ibsp.socket_info_list) == 0));
\r
875 CL_TRACE( IBSP_DBG_WQ, gdbg_lvl, ("%s():%d:0x%x:0x%x: Wakeup\n",
\r
877 __LINE__, GetCurrentProcessId(),
\r
878 GetCurrentThreadId()));
\r
880 cl_spinlock_acquire( &g_ibsp.closed_socket_info_mutex );
\r
881 while( (socket_item = cl_qlist_remove_head( &g_ibsp.closed_socket_info_list )) !=
\r
882 cl_qlist_end( &g_ibsp.closed_socket_info_list ) )
\r
884 struct ibsp_socket_info *socket_info = NULL;
\r
886 cl_spinlock_release( &g_ibsp.closed_socket_info_mutex );
\r
888 socket_info = PARENT_STRUCT(socket_item, struct ibsp_socket_info, item);
\r
893 LPOVERLAPPED lpOverlapped;
\r
895 idx = socket_info->send_idx - (uint8_t)socket_info->send_cnt;
\r
896 if( idx >= QP_ATTRIB_SQ_DEPTH )
\r
897 idx += QP_ATTRIB_SQ_DEPTH;
\r
899 for( i = 0; i < socket_info->send_cnt; i++ )
\r
901 lpOverlapped = socket_info->send_wr[idx].lpOverlapped;
\r
902 fzprint(("%s():%d:0x%x:0x%x: socket=0x%p wr=0x%p overlapped=0x%p Internal=%d InternalHigh=%d hEvent=%d\n",
\r
903 __FUNCTION__, __LINE__, GetCurrentProcessId(), GetCurrentThreadId(), socket_info, &socket_info->send_wr[idx], lpOverlapped, lpOverlapped->Internal, lpOverlapped->InternalHigh, lpOverlapped->hEvent));
\r
905 if( ++idx == QP_ATTRIB_SQ_DEPTH )
\r
909 idx = socket_info->recv_idx - (uint8_t)socket_info->recv_cnt;
\r
910 if( idx >= QP_ATTRIB_RQ_DEPTH )
\r
911 idx += QP_ATTRIB_RQ_DEPTH;
\r
913 for( i = 0; i < socket_info->recv_cnt; i++ )
\r
915 lpOverlapped = socket_info->recv_wr[idx].wr.lpOverlapped;
\r
916 fzprint(("%s():%d:0x%x:0x%x: socket=0x%p wr=0x%p overlapped=0x%p Internal=%d InternalHigh=%d hEvent=%d\n",
\r
917 __FUNCTION__, __LINE__, GetCurrentProcessId(), GetCurrentThreadId(), socket_info, &socket_info->recv_wr[idx], lpOverlapped, lpOverlapped->Internal, lpOverlapped->InternalHigh, lpOverlapped->hEvent));
\r
919 if( ++idx == QP_ATTRIB_RQ_DEPTH )
\r
924 fzprint(("%s():%d:0x%x:0x%x: socket=0x%p\n",
\r
926 __LINE__, GetCurrentProcessId(), GetCurrentThreadId(), socket_info));
\r
928 wait_cq_drain( socket_info );
\r
930 if( socket_info->dup_cnt )
\r
931 ibsp_dup_overlap_abort( socket_info );
\r
933 /* Destroy the switch socket. */
\r
934 if( socket_info->switch_socket != INVALID_SOCKET )
\r
939 fzprint(("%s():%d:0x%x:0x%x: socket=0x%p calling lpWPUCloseSocketHandle=0x%p\n", __FUNCTION__, __LINE__, GetCurrentProcessId(), GetCurrentThreadId(), socket_info, socket_info->switch_socket));
\r
941 ret = g_ibsp.up_call_table.lpWPUCloseSocketHandle(
\r
942 socket_info->switch_socket, &error );
\r
943 if( ret == SOCKET_ERROR )
\r
945 CL_ERROR( IBSP_DBG_EP, gdbg_lvl,
\r
946 ("WPUCloseSocketHandle failed: %d\n", error) );
\r
950 STAT_DEC( wpusocket_num );
\r
953 socket_info->switch_socket = INVALID_SOCKET;
\r
956 ib_destroy_socket( socket_info );
\r
958 ib_deregister_all_mr( &socket_info->buf_mem_list );
\r
959 free_socket_info( socket_info );
\r
960 cl_spinlock_acquire( &g_ibsp.closed_socket_info_mutex );
\r
962 cl_spinlock_release( &g_ibsp.closed_socket_info_mutex );
\r
965 /* No special exit code, even on errors. */
\r
966 CL_EXIT( IBSP_DBG_HW, gdbg_lvl );
\r
972 /* Initialize IB ressources. */
\r
974 ibsp_initialize(void)
\r
976 ib_api_status_t status;
\r
979 CL_ENTER( IBSP_DBG_HW, gdbg_lvl );
\r
981 CL_ASSERT( g_ibsp.al_handle == NULL );
\r
982 CL_ASSERT( cl_qlist_count( &g_ibsp.hca_list ) == 0 );
\r
984 /* Open the IB library */
\r
985 status = ib_open_al( &g_ibsp.al_handle );
\r
987 CL_TRACE( IBSP_DBG_HW, gdbg_lvl, ("open is %d %p\n", status, g_ibsp.al_handle) );
\r
989 if( status != IB_SUCCESS )
\r
991 CL_ERROR( IBSP_DBG_HW, gdbg_lvl, ("ib_open_al failed (%d)\n", status) );
\r
992 ret = WSAEPROVIDERFAILEDINIT;
\r
996 STAT_INC( al_num );
\r
998 /* Register for PNP events */
\r
999 status = register_pnp();
\r
1002 CL_ERROR( IBSP_DBG_HW, gdbg_lvl, ("register_pnp failed (%d)\n", status) );
\r
1003 ret = WSAEPROVIDERFAILEDINIT;
\r
1007 /* Populate IP list. */
\r
1008 update_all_ip_addrs();
\r
1010 /* Create a cleanup event */
\r
1011 g_ibsp.ib_cleanup_event = CreateEvent( NULL, TRUE, FALSE, NULL );
\r
1012 if( g_ibsp.ib_cleanup_event == NULL )
\r
1014 CL_ERROR( IBSP_DBG_HW, gdbg_lvl, ("CreateEvent failed."));
\r
1015 ret = WSAEPROVIDERFAILEDINIT;
\r
1019 /* Create a cleanup thread */
\r
1020 g_ibsp.ib_cleanup_thread = CreateThread( NULL, 0, ib_cleanup_thread, NULL, 0, NULL );
\r
1022 if( g_ibsp.ib_cleanup_thread == NULL )
\r
1024 CL_ERROR( IBSP_DBG_HW, gdbg_lvl, ("CreateThread failed.") );
\r
1025 ret = WSAEPROVIDERFAILEDINIT;
\r
1029 STAT_INC( thread_num );
\r
1035 /* Free up resources. */
\r
1039 CL_EXIT( IBSP_DBG_HW, gdbg_lvl );
\r
1045 /* Destroys the infiniband ressources of a socket. */
\r
1047 ib_destroy_socket(
\r
1048 IN OUT struct ibsp_socket_info *socket_info )
\r
1050 ib_api_status_t status;
\r
1052 CL_ENTER( IBSP_DBG_EP, gdbg_lvl );
\r
1054 if( socket_info->qp )
\r
1056 status = ib_destroy_qp( socket_info->qp, ib_sync_destroy );
\r
1059 CL_ERROR( IBSP_DBG_EP, gdbg_lvl, ("ib_destroy_qp failed (%d)\n", status) );
\r
1063 CL_TRACE( IBSP_DBG_WQ, gdbg_lvl,
\r
1064 ("%s():%d:0x%x:0x%x: ib_destroy_qp() finished\n", __FUNCTION__,
\r
1065 __LINE__, GetCurrentProcessId(), GetCurrentThreadId()) );
\r
1067 socket_info->qp = NULL;
\r
1069 STAT_DEC( qp_num );
\r
1071 ib_release_cq_tinfo( socket_info->cq_tinfo );
\r
1075 CL_EXIT( IBSP_DBG_EP, gdbg_lvl );
\r
1080 * Creates the necessary IB ressources for a socket
\r
1084 IN OUT struct ibsp_socket_info *socket_info)
\r
1086 struct cq_thread_info *cq_tinfo;
\r
1087 ib_qp_create_t qp_create;
\r
1088 ib_api_status_t status;
\r
1090 struct ibsp_hca *hca;
\r
1091 ib_qp_attr_t qp_attr;
\r
1093 CL_ENTER( IBSP_DBG_EP, gdbg_lvl );
\r
1095 CL_ASSERT( socket_info != NULL );
\r
1096 CL_ASSERT( socket_info->port != NULL );
\r
1097 CL_ASSERT( socket_info->qp == NULL );
\r
1099 hca = socket_info->port->hca;
\r
1100 socket_info->hca_pd = hca->pd;
\r
1102 /* Get the completion queue and thread info for this socket */
\r
1103 cq_tinfo = ib_acquire_cq_tinfo( hca );
\r
1104 if( cq_tinfo == NULL )
\r
1106 CL_ERROR( IBSP_DBG_EP, gdbg_lvl, ("ib_acquire_cq_tinfo failed\n") );
\r
1107 ret = WSAEPROVIDERFAILEDINIT;
\r
1110 socket_info->cq_tinfo = cq_tinfo;
\r
1113 qp_create.qp_type = IB_QPT_RELIABLE_CONN;
\r
1114 qp_create.sq_depth = QP_ATTRIB_SQ_DEPTH;
\r
1115 qp_create.rq_depth = QP_ATTRIB_RQ_DEPTH;
\r
1116 qp_create.sq_sge = QP_ATTRIB_SQ_SGE;
\r
1117 qp_create.rq_sge = 1;
\r
1118 qp_create.h_rq_cq = cq_tinfo->cq;
\r
1119 qp_create.h_sq_cq = cq_tinfo->cq;
\r
1120 qp_create.sq_signaled = TRUE;
\r
1122 status = ib_create_qp( socket_info->hca_pd, &qp_create, socket_info, /* context */
\r
1123 NULL, /* async handler */
\r
1124 &socket_info->qp );
\r
1127 CL_ERROR( IBSP_DBG_EP, gdbg_lvl, ("ib_create_qp failed (%d)\n", status));
\r
1128 ret = WSAEPROVIDERFAILEDINIT;
\r
1132 status = ib_query_qp( socket_info->qp, &qp_attr );
\r
1133 if( status == IB_SUCCESS )
\r
1135 socket_info->max_inline = min( g_max_inline, qp_attr.sq_max_inline );
\r
1139 CL_ERROR( IBSP_DBG_EP, gdbg_lvl,
\r
1140 ("ib_query_qp returned %s\n", ib_get_err_str( status )) );
\r
1141 socket_info->max_inline = 0;
\r
1144 STAT_INC( qp_num );
\r
1151 ib_destroy_socket( socket_info );
\r
1154 CL_EXIT( IBSP_DBG_EP, gdbg_lvl );
\r
1162 IN OUT struct ibsp_socket_info *socket_info )
\r
1164 CL_ENTER( IBSP_DBG_EP, gdbg_lvl );
\r
1166 if( socket_info->cq_tinfo == NULL )
\r
1168 CL_EXIT( IBSP_DBG_EP, gdbg_lvl );
\r
1172 /* Wait for the QP to be drained. */
\r
1173 while( socket_info->send_cnt || socket_info->recv_cnt )
\r
1175 fzprint(("%s():%d:0x%x:0x%x: socket=0x%p wr_list_count=%d qp state=%d\n",
\r
1176 __FUNCTION__, __LINE__, GetCurrentProcessId(), GetCurrentThreadId(),
\r
1177 socket_info, cl_qlist_count(&socket_info->wr_list)));
\r
1182 CL_EXIT( IBSP_DBG_EP, gdbg_lvl );
\r
1187 ibsp_dup_overlap_abort(
\r
1188 IN OUT struct ibsp_socket_info *socket_info )
\r
1190 LPWSAOVERLAPPED lpOverlapped = NULL;
\r
1195 CL_ENTER( IBSP_DBG_EP, gdbg_lvl );
\r
1196 CL_ASSERT( !socket_info->send_cnt && !socket_info->recv_cnt );
\r
1198 /* Browse the list of all posted overlapped structures
\r
1199 * to mark them as aborted. */
\r
1200 idx = socket_info->dup_idx - (uint8_t)socket_info->dup_cnt;
\r
1201 if( idx >= QP_ATTRIB_RQ_DEPTH )
\r
1202 idx += QP_ATTRIB_RQ_DEPTH;
\r
1204 while( socket_info->dup_cnt )
\r
1206 lpOverlapped = socket_info->dup_wr[idx].wr.lpOverlapped;
\r
1208 fzprint(("%s():%d:0x%x:0x%x: socket=0x%p wr=0x%p overlapped=0x%p Internal=%d InternalHigh=%d hEvent=%d\n",
\r
1209 __FUNCTION__, __LINE__, GetCurrentProcessId(), GetCurrentThreadId(), socket_info, &socket_info->dup_wr[idx], lpOverlapped, lpOverlapped->Internal, lpOverlapped->InternalHigh, lpOverlapped->hEvent));
\r
1211 lpOverlapped->OffsetHigh = WSAECONNABORTED;
\r
1212 lpOverlapped->InternalHigh = 0;
\r
1214 if( ((uintptr_t) lpOverlapped->hEvent) & 0x00000001 )
\r
1216 /* Indicate this operation is complete. The switch will poll
\r
1217 * with calls to WSPGetOverlappedResult(). */
\r
1219 cl_atomic_dec(&g_ibsp.overlap_h1_comp_count);
\r
1221 fzprint(("%s():%d:0x%x:0x%x: ov=0x%p h0=%d h1=%d h1_c=%d send=%d recv=%d\n",
\r
1222 __FUNCTION__, __LINE__, GetCurrentProcessId(),
\r
1223 GetCurrentThreadId(), lpOverlapped,
\r
1224 g_ibsp.overlap_h0_count, g_ibsp.overlap_h1_count,
\r
1225 g_ibsp.overlap_h1_comp_count, g_ibsp.send_count, g_ibsp.recv_count));
\r
1228 CL_TRACE(IBSP_DBG_WQ, gdbg_lvl,
\r
1229 ("%s: set internal overlapped=0x%p Internal=%d OffsetHigh=%d\n",
\r
1230 __FUNCTION__, lpOverlapped, lpOverlapped->Internal,
\r
1231 lpOverlapped->OffsetHigh));
\r
1233 lpOverlapped->Internal = 0;
\r
1238 cl_atomic_dec(&g_ibsp.overlap_h0_count);
\r
1241 fzprint(("%s():%d:0x%x:0x%x: ov=0x%p h0=%d h1=%d h1_c=%d send=%d recv=%d\n",
\r
1242 __FUNCTION__, __LINE__, GetCurrentProcessId(),
\r
1243 GetCurrentThreadId(), lpOverlapped,
\r
1244 g_ibsp.overlap_h0_count, g_ibsp.overlap_h1_count,
\r
1245 g_ibsp.overlap_h1_comp_count, g_ibsp.send_count, g_ibsp.recv_count));
\r
1247 CL_TRACE(IBSP_DBG_WQ, gdbg_lvl,
\r
1248 ("%s: calls lpWPUCompleteOverlappedRequest, overlapped=0x%p OffsetHigh=%d InternalHigh=%d hEvent=%d\n",
\r
1249 __FUNCTION__, lpOverlapped, lpOverlapped->OffsetHigh,
\r
1250 lpOverlapped->InternalHigh, lpOverlapped->hEvent));
\r
1252 ret = g_ibsp.up_call_table.lpWPUCompleteOverlappedRequest
\r
1253 (socket_info->switch_socket,
\r
1255 lpOverlapped->OffsetHigh, (DWORD) lpOverlapped->InternalHigh, &error);
\r
1259 CL_ERROR(IBSP_DBG_EP, gdbg_lvl,
\r
1260 ("lpWPUCompleteOverlappedRequest failed with %d/%d\n", ret,
\r
1264 cl_atomic_dec( &socket_info->dup_cnt );
\r
1267 CL_EXIT( IBSP_DBG_EP, gdbg_lvl );
\r
1271 /* Closes a connection and release its ressources. */
\r
1273 shutdown_and_destroy_socket_info(
\r
1274 IN OUT struct ibsp_socket_info *socket_info,
\r
1275 IN int old_state )
\r
1277 CL_ENTER( IBSP_DBG_EP, gdbg_lvl );
\r
1279 if( socket_info->duplicate.mmap_handle )
\r
1281 CloseHandle( socket_info->duplicate.mmap_handle );
\r
1282 socket_info->duplicate.mmap_handle = NULL;
\r
1285 if( socket_info->info.listen.handle )
\r
1287 /* Stop listening and reject queued connections. */
\r
1288 ib_listen_cancel( socket_info );
\r
1291 switch( old_state )
\r
1293 case IBSP_CLOSING:
\r
1294 /* This function has already been called. Should not happen. */
\r
1295 CL_ERROR( IBSP_DBG_EP, gdbg_lvl,
\r
1296 ("shutdown_and_destroy_socket_info already in closing socket_state\n") );
\r
1301 /* Nothing to do. */
\r
1304 case IBSP_CONNECT:
\r
1315 case IBSP_CONNECTED:
\r
1317 struct disconnect_reason reason;
\r
1318 memset( &reason, 0, sizeof(reason) );
\r
1319 reason.type = DISC_SHUTDOWN;
\r
1320 ib_disconnect( socket_info, &reason );
\r
1324 case IBSP_DISCONNECTED:
\r
1325 /* Nothing to do. */
\r
1329 CL_EXIT( IBSP_DBG_EP, gdbg_lvl );
\r
1335 IN struct ibsp_socket_info *s )
\r
1337 struct ibsp_socket_info *p_sock;
\r
1338 cl_rbmap_item_t *p_item, *p_insert_at;
\r
1339 boolean_t left = TRUE;
\r
1341 p_item = cl_rbmap_root( &g_ibsp.conn_map );
\r
1342 p_insert_at = p_item;
\r
1344 cl_spinlock_acquire( &g_ibsp.socket_info_mutex );
\r
1345 CL_ASSERT( !s->conn_item.p_map );
\r
1346 while( p_item != cl_rbmap_end( &g_ibsp.conn_map ) )
\r
1348 p_insert_at = p_item;
\r
1349 p_sock = PARENT_STRUCT( p_item, struct ibsp_socket_info, conn_item );
\r
1350 if( p_sock->local_addr.sin_family < s->local_addr.sin_family )
\r
1351 p_item = cl_rbmap_left( p_item ), left = TRUE;
\r
1352 else if( p_sock->local_addr.sin_family > s->local_addr.sin_family )
\r
1353 p_item = cl_rbmap_right( p_item ), left = FALSE;
\r
1354 else if( p_sock->local_addr.sin_addr.S_un.S_addr < s->local_addr.sin_addr.S_un.S_addr )
\r
1355 p_item = cl_rbmap_left( p_item ), left = TRUE;
\r
1356 else if( p_sock->local_addr.sin_addr.S_un.S_addr > s->local_addr.sin_addr.S_un.S_addr )
\r
1357 p_item = cl_rbmap_right( p_item ), left = FALSE;
\r
1358 else if( p_sock->local_addr.sin_port < s->local_addr.sin_port )
\r
1359 p_item = cl_rbmap_left( p_item ), left = TRUE;
\r
1360 else if( p_sock->local_addr.sin_port > s->local_addr.sin_port )
\r
1361 p_item = cl_rbmap_right( p_item ), left = FALSE;
\r
1362 else if( p_sock->peer_addr.sin_family < s->peer_addr.sin_family )
\r
1363 p_item = cl_rbmap_left( p_item ), left = TRUE;
\r
1364 else if( p_sock->peer_addr.sin_family > s->peer_addr.sin_family )
\r
1365 p_item = cl_rbmap_right( p_item ), left = FALSE;
\r
1366 else if( p_sock->peer_addr.sin_addr.S_un.S_addr < s->peer_addr.sin_addr.S_un.S_addr )
\r
1367 p_item = cl_rbmap_left( p_item ), left = TRUE;
\r
1368 else if( p_sock->peer_addr.sin_addr.S_un.S_addr > s->peer_addr.sin_addr.S_un.S_addr )
\r
1369 p_item = cl_rbmap_right( p_item ), left = FALSE;
\r
1370 else if( p_sock->peer_addr.sin_port < s->peer_addr.sin_port )
\r
1371 p_item = cl_rbmap_left( p_item ), left = TRUE;
\r
1372 else if( p_sock->peer_addr.sin_port > s->peer_addr.sin_port )
\r
1373 p_item = cl_rbmap_right( p_item ), left = FALSE;
\r
1378 cl_rbmap_insert( &g_ibsp.conn_map, p_insert_at, &s->conn_item, left );
\r
1381 cl_spinlock_release( &g_ibsp.socket_info_mutex );
\r
1382 return p_item == cl_rbmap_end( &g_ibsp.conn_map );
\r
1388 IN struct ibsp_socket_info *s )
\r
1390 cl_spinlock_acquire( &g_ibsp.socket_info_mutex );
\r
1391 CL_ASSERT( s->conn_item.p_map );
\r
1392 cl_rbmap_remove_item( &g_ibsp.conn_map, &s->conn_item );
\r
1393 cl_spinlock_release( &g_ibsp.socket_info_mutex );
\r