[IBAL] Fix race in UAL between CQ callbacks and CQ destruction.
[mirror/winof/.git] / core / al / user / ual_mgr.c
1 /*\r
2  * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.\r
3  * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. \r
4  *\r
5  * This software is available to you under the OpenIB.org BSD license\r
6  * below:\r
7  *\r
8  *     Redistribution and use in source and binary forms, with or\r
9  *     without modification, are permitted provided that the following\r
10  *     conditions are met:\r
11  *\r
12  *      - Redistributions of source code must retain the above\r
13  *        copyright notice, this list of conditions and the following\r
14  *        disclaimer.\r
15  *\r
16  *      - Redistributions in binary form must reproduce the above\r
17  *        copyright notice, this list of conditions and the following\r
18  *        disclaimer in the documentation and/or other materials\r
19  *        provided with the distribution.\r
20  *\r
21  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
22  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
23  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
24  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
25  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
26  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
27  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
28  * SOFTWARE.\r
29  *\r
30  * $Id$\r
31  */\r
32 \r
33 \r
34 #include "ual_support.h"\r
35 #include "al_debug.h"\r
36 #include "al_mgr.h"\r
37 #include "al_init.h"\r
38 #include "al_res_mgr.h"\r
39 #include "al_proxy_ioctl.h"\r
40 #include "al.h"\r
41 #include "al_ci_ca.h"\r
42 #include "al_pnp.h"\r
43 #include "al_ioc_pnp.h"\r
44 #include "al_cq.h"\r
45 #include "ual_ca.h"\r
46 #include "ual_qp.h"\r
47 #include "ual_mad.h"\r
48 #include "ib_common.h"\r
49 #include "al_cm_cep.h"\r
50 \r
51 \r
52 /* Global AL manager handle is defined in al_mgr_shared.c */\r
53 extern  ib_al_handle_t          gh_al;\r
54 extern  al_mgr_t*                       gp_al_mgr;\r
55 extern  ib_pool_handle_t        gh_mad_pool;\r
56 \r
57 \r
58 atomic32_t                                      g_open_cnt = 0;\r
59 \r
60 /* Define the thread names to handle various notifications */\r
61 #define CM_THREAD_NAME                  "CM_Thread"\r
62 #define COMP_THREAD_NAME                "Comp_Thread"\r
63 #define MISC_THREAD_NAME                "Misc_Thread"\r
64 \r
65 static DWORD WINAPI\r
66 __cb_thread_routine(\r
67         IN                              void                                            *context );\r
68 \r
69 //static void\r
70 //__process_cm_cb(\r
71 //      IN              cm_cb_ioctl_info_t*                     p_cm_cb_info);\r
72 \r
73 static void\r
74 __process_misc_cb(\r
75         IN              misc_cb_ioctl_info_t*           p_misc_cb_info );\r
76 \r
77 \r
78 static void\r
79 __cleanup_ual_mgr(\r
80         IN                              al_obj_t                                        *p_obj )\r
81 {\r
82         CL_ENTER( AL_DBG_MGR, g_al_dbg_lvl );\r
83 \r
84         UNUSED_PARAM( p_obj );\r
85 \r
86         /* Set the callback thread state to exit. */\r
87         gp_al_mgr->ual_mgr.exit_thread = TRUE;\r
88 \r
89         /* Closing the file handles cancels any pending I/O requests. */\r
90         //CloseHandle( gp_al_mgr->ual_mgr.h_cm_file );\r
91         CloseHandle( gp_al_mgr->ual_mgr.h_cq_file );\r
92         CloseHandle( gp_al_mgr->ual_mgr.h_misc_file );\r
93         CloseHandle( g_al_device );\r
94         g_al_device = INVALID_HANDLE_VALUE;\r
95 }\r
96 \r
97 \r
98 static void\r
99 __free_ual_mgr(\r
100         IN                              al_obj_t                                        *p_obj )\r
101 {\r
102         size_t                  i;\r
103         HANDLE                  h_thread;\r
104 \r
105         UNUSED_PARAM( p_obj );\r
106 \r
107         /*\r
108          * We need to destroy the AL object before the spinlock, since\r
109          * destroying the AL object will try to acquire the spinlock.\r
110          */\r
111         destroy_al_obj( &gp_al_mgr->obj );\r
112 \r
113         /* Verify that the object list is empty. */\r
114         print_al_objs( NULL );\r
115 \r
116         if( gp_al_mgr->ual_mgr.h_cb_port )\r
117         {\r
118                 /* Post a notification to the completion port to make threads exit. */\r
119                 for( i = 0;\r
120                         i < cl_ptr_vector_get_size( &gp_al_mgr->ual_mgr.cb_threads );\r
121                         i++ )\r
122                 {\r
123                         if( !PostQueuedCompletionStatus( gp_al_mgr->ual_mgr.h_cb_port,\r
124                                 0, 0, NULL ) )\r
125                         {\r
126                                 AL_TRACE( AL_DBG_ERROR,\r
127                                         ("PostQueuedCompletionStatus returned %d\n",\r
128                                         GetLastError()) );\r
129                         }\r
130                 }\r
131 \r
132                 while( cl_ptr_vector_get_size( &gp_al_mgr->ual_mgr.cb_threads ) )\r
133                 {\r
134                         h_thread = cl_ptr_vector_get( &gp_al_mgr->ual_mgr.cb_threads, 0 );\r
135                         WaitForSingleObject( h_thread, INFINITE );\r
136                         CloseHandle( h_thread );\r
137                         cl_ptr_vector_remove( &gp_al_mgr->ual_mgr.cb_threads, 0 );\r
138                 }\r
139 \r
140                 CloseHandle( gp_al_mgr->ual_mgr.h_cb_port );\r
141         }\r
142 \r
143         cl_ptr_vector_destroy( &gp_al_mgr->ual_mgr.cb_threads );\r
144         cl_spinlock_destroy( &gp_al_mgr->lock );\r
145 \r
146         cl_free( gp_al_mgr );\r
147         gp_al_mgr = NULL;\r
148 }\r
149 \r
150 \r
151 HANDLE\r
152 ual_create_async_file(\r
153         IN                              uint32_t                                        type )\r
154 {\r
155         cl_status_t                             cl_status;\r
156         ual_bind_file_ioctl_t   ioctl;\r
157         uintn_t                                 bytes_ret;\r
158 \r
159         AL_ENTER( AL_DBG_MGR );\r
160 \r
161         /* Create a file object on which to issue all SA requests. */\r
162         ioctl.h_file = CreateFileW( L"\\\\.\\ibal",\r
163                 GENERIC_READ | GENERIC_WRITE, FILE_SHARE_READ | FILE_SHARE_WRITE,\r
164                 NULL, OPEN_EXISTING, FILE_FLAG_OVERLAPPED, NULL );\r
165         if( ioctl.h_file == INVALID_HANDLE_VALUE )\r
166         {\r
167                 AL_TRACE_EXIT( AL_DBG_ERROR,\r
168                         ("CreateFile returned %d.\n", GetLastError()) );\r
169                 return INVALID_HANDLE_VALUE;\r
170         }\r
171 \r
172         /* Bind this file object to the completion port. */\r
173         if( !CreateIoCompletionPort(\r
174                 ioctl.h_file, gp_al_mgr->ual_mgr.h_cb_port, type, 0 ) )\r
175         {\r
176                 CloseHandle( ioctl.h_file );\r
177                 AL_TRACE_EXIT( AL_DBG_ERROR,\r
178                         ("CreateIoCompletionPort for file of type %d returned %d.\n",\r
179                         type, GetLastError()) );\r
180                 return INVALID_HANDLE_VALUE;\r
181         }\r
182 \r
183         /*\r
184          * Send an IOCTL down on the main file handle to bind this file\r
185          * handle with our proxy context.\r
186          */\r
187         cl_status = do_al_dev_ioctl(\r
188                 type, &ioctl, sizeof(ioctl), NULL, 0, &bytes_ret );\r
189         if( cl_status != CL_SUCCESS )\r
190         {\r
191                 CloseHandle( ioctl.h_file );\r
192                 AL_TRACE_EXIT( AL_DBG_ERROR,\r
193                         ("Bind IOCTL for type %d returned %s.\n",\r
194                         CL_STATUS_MSG(cl_status)) );\r
195                 return INVALID_HANDLE_VALUE;\r
196         }\r
197 \r
198         AL_EXIT( AL_DBG_MGR );\r
199         return ioctl.h_file;\r
200 }\r
201 \r
202 \r
203 ib_api_status_t\r
204 ual_create_cb_threads( void )\r
205 {\r
206         cl_status_t             cl_status;\r
207         uint32_t                i;\r
208         HANDLE                  h_thread;\r
209 \r
210         AL_ENTER( AL_DBG_MGR );\r
211 \r
212         cl_status = cl_ptr_vector_init(\r
213                 &gp_al_mgr->ual_mgr.cb_threads, cl_proc_count(), 0 );\r
214         if( cl_status != CL_SUCCESS )\r
215         {\r
216                 AL_TRACE_EXIT( AL_DBG_ERROR,\r
217                         ("cl_ptr_vector_init returned %s.\n", CL_STATUS_MSG( cl_status )) );\r
218                 return IB_ERROR;\r
219         }\r
220 \r
221         for( i = 0; i < cl_proc_count(); i++ )\r
222         {\r
223                 h_thread = CreateThread( NULL, 0, __cb_thread_routine, NULL, 0, NULL );\r
224                 if( !h_thread )\r
225                 {\r
226                         AL_TRACE_EXIT( AL_DBG_ERROR,\r
227                                 ("CreateThread returned %d.\n", GetLastError()) );\r
228                         return IB_ERROR;\r
229                 }\r
230 \r
231                 /* We already sized the vector, so insertion should work. */\r
232                 cl_status = cl_ptr_vector_insert( &gp_al_mgr->ual_mgr.cb_threads,\r
233                         h_thread, NULL );\r
234                 CL_ASSERT( cl_status == CL_SUCCESS );\r
235         }\r
236 \r
237         AL_EXIT( AL_DBG_MGR );\r
238         return IB_SUCCESS;\r
239 }\r
240 \r
241 \r
242 /*\r
243  * Create the ual manager for the process\r
244  */\r
245 ib_api_status_t\r
246 create_al_mgr()\r
247 {\r
248         ib_api_status_t                 ib_status;\r
249         cl_status_t                             cl_status;\r
250         uintn_t                                 bytes_ret;\r
251 \r
252         CL_ENTER( AL_DBG_MGR, g_al_dbg_lvl );\r
253 \r
254         CL_ASSERT( !gp_al_mgr );\r
255 \r
256         /* First open the kernel device. */\r
257         CL_ASSERT( g_al_device == INVALID_HANDLE_VALUE );\r
258         g_al_device = CreateFileW( L"\\\\.\\ibal",\r
259                 GENERIC_READ | GENERIC_WRITE, FILE_SHARE_READ | FILE_SHARE_WRITE,\r
260                 NULL, OPEN_EXISTING, 0, NULL );\r
261         if( g_al_device == INVALID_HANDLE_VALUE )\r
262                 return IB_ERROR;\r
263 \r
264         cl_status = do_al_dev_ioctl( UAL_BIND, NULL, 0, NULL, 0, &bytes_ret );\r
265         if( cl_status != CL_SUCCESS )\r
266                 return IB_ERROR;\r
267 \r
268         gp_al_mgr = cl_zalloc( sizeof( al_mgr_t ) );\r
269         if( !gp_al_mgr )\r
270         {\r
271                 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,\r
272                         ("Failed to cl_zalloc ual_mgr_t.\n") );\r
273                 return IB_INSUFFICIENT_MEMORY;\r
274         }\r
275 \r
276         /* Construct the AL manager. */\r
277         cl_event_construct( &gp_al_mgr->ual_mgr.sync_event );\r
278         cl_ptr_vector_construct( &gp_al_mgr->ual_mgr.cb_threads );\r
279         cl_qlist_init( &gp_al_mgr->al_obj_list );\r
280         cl_qlist_init( &gp_al_mgr->ci_ca_list );\r
281         cl_spinlock_construct( &gp_al_mgr->lock );\r
282         gp_al_mgr->ual_mgr.h_cb_port = NULL;\r
283 \r
284         /* Init the al object in the ual manager */\r
285         construct_al_obj(&gp_al_mgr->obj, AL_OBJ_TYPE_AL_MGR);\r
286         ib_status = init_al_obj( &gp_al_mgr->obj, gp_al_mgr, FALSE,\r
287                 NULL, __cleanup_ual_mgr, __free_ual_mgr );\r
288         if( ib_status != IB_SUCCESS )\r
289         {\r
290                 __free_ual_mgr( &gp_al_mgr->obj );\r
291                 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,\r
292                         ("init_al_obj failed, status = 0x%x.\n", ib_status) );\r
293                 return ib_status;\r
294         }\r
295 \r
296         /* Allocate the I/O completion port for async operations. */\r
297         gp_al_mgr->ual_mgr.h_cb_port = CreateIoCompletionPort(\r
298                 INVALID_HANDLE_VALUE, NULL, 0, 0 );\r
299         if( !gp_al_mgr->ual_mgr.h_cb_port )\r
300         {\r
301                 gp_al_mgr->obj.pfn_destroy( &gp_al_mgr->obj, NULL );\r
302                 AL_TRACE_EXIT( AL_DBG_ERROR,\r
303                         ("Failed to create I/O completion port.\n") );\r
304                 return IB_ERROR;\r
305         }\r
306 \r
307         /* Create the threads to process completion callbacks. */\r
308         ib_status = ual_create_cb_threads();\r
309         if( ib_status != IB_SUCCESS )\r
310         {\r
311                 gp_al_mgr->obj.pfn_destroy( &gp_al_mgr->obj, NULL );\r
312                 AL_TRACE_EXIT( AL_DBG_ERROR, ("ual_create_cb_threads failed.\n") );\r
313                 return ib_status;\r
314         }\r
315 \r
316         /* Create CM callback file handle. */\r
317         //gp_al_mgr->ual_mgr.h_cm_file = ual_create_async_file( UAL_BIND_CM );\r
318         //if( gp_al_mgr->ual_mgr.h_cq_file == INVALID_HANDLE_VALUE )\r
319         //{\r
320         //      gp_al_mgr->obj.pfn_destroy( &gp_al_mgr->obj, NULL );\r
321         //      AL_TRACE_EXIT( AL_DBG_ERROR,\r
322         //              ("ual_create_async_file for UAL_BIND_CM returned %d.\n",\r
323         //              GetLastError()) );\r
324         //      return IB_ERROR;\r
325         //}\r
326 \r
327         /* Create the CQ completion callback file handle. */\r
328         gp_al_mgr->ual_mgr.h_cq_file = ual_create_async_file( UAL_BIND_CQ );\r
329         if( gp_al_mgr->ual_mgr.h_cq_file == INVALID_HANDLE_VALUE )\r
330         {\r
331                 gp_al_mgr->obj.pfn_destroy( &gp_al_mgr->obj, NULL );\r
332                 AL_TRACE_EXIT( AL_DBG_ERROR,\r
333                         ("ual_create_async_file for UAL_BIND_CQ returned %d.\n",\r
334                         GetLastError()) );\r
335                 return IB_ERROR;\r
336         }\r
337 \r
338         /* Create the miscelaneous callback file handle. */\r
339         gp_al_mgr->ual_mgr.h_misc_file = ual_create_async_file( UAL_BIND_MISC );\r
340         if( gp_al_mgr->ual_mgr.h_misc_file == INVALID_HANDLE_VALUE )\r
341         {\r
342                 gp_al_mgr->obj.pfn_destroy( &gp_al_mgr->obj, NULL );\r
343                 AL_TRACE_EXIT( AL_DBG_ERROR,\r
344                         ("ual_create_async_file for UAL_BIND_CQ returned %d.\n",\r
345                         GetLastError()) );\r
346                 return IB_ERROR;\r
347         }\r
348 \r
349         cl_status = cl_spinlock_init( &gp_al_mgr->lock );\r
350         if( cl_status != CL_SUCCESS )\r
351         {\r
352                 gp_al_mgr->obj.pfn_destroy(&gp_al_mgr->obj, NULL);\r
353                 return ib_convert_cl_status( cl_status );\r
354         }\r
355 \r
356         /* With PnP support, open the AL instance before the threads\r
357          * get a chance to process async events\r
358          */\r
359 \r
360         /* Open an implicit al instance for UAL's internal usage.  This call will\r
361          * automatically create the gh_al.\r
362          */\r
363         gh_al = NULL;\r
364         if ((ib_status = do_open_al(&gp_al_mgr->ual_mgr.h_al)) != IB_SUCCESS)\r
365         {\r
366                 gp_al_mgr->obj.pfn_destroy(&gp_al_mgr->obj, NULL);\r
367                 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,\r
368                         ("do_open_al() failed, status = 0x%x.\n", ib_status) );\r
369                 return ( ib_status );\r
370         }\r
371 \r
372         /* Create the global AL MAD pool. */\r
373         ib_status = ib_create_mad_pool( gh_al, 0, 0, 64, &gh_mad_pool );\r
374         if( ib_status != IB_SUCCESS )\r
375         {\r
376                 gp_al_mgr->obj.pfn_destroy(&gp_al_mgr->obj, NULL);\r
377                 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,\r
378                         ("ib_create_mad_pool failed with %s.\n", ib_get_err_str(ib_status)) );\r
379                 return ib_status;\r
380         }\r
381 \r
382         /*\r
383          * Create a global pool key for internal MADs - they are never\r
384          * registered on any CA.\r
385          */\r
386         ib_status = ual_reg_global_mad_pool( gh_mad_pool, &g_pool_key );\r
387         if( ib_status != IB_SUCCESS )\r
388         {\r
389                 gp_al_mgr->obj.pfn_destroy(&gp_al_mgr->obj, NULL);\r
390                 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,\r
391                         ("ual_reg_global_mad_pool failed with %s.\n", ib_get_err_str(ib_status)) );\r
392                 return ib_status;\r
393         }\r
394 \r
395         /* Create the pnp manager before the thread initialize.  This makes\r
396          * sure that the pnp manager is ready to process pnp callbacks as\r
397          * soon as the callback threads start running\r
398          */\r
399         ib_status = create_pnp( &gp_al_mgr->obj );\r
400         if( ib_status != IB_SUCCESS )\r
401         {\r
402                 gp_al_mgr->obj.pfn_destroy(&gp_al_mgr->obj, NULL);\r
403                 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,\r
404                         ("al_pnp_create failed with %s.\n", ib_get_err_str(ib_status)) );\r
405                 return ib_status;\r
406         }\r
407 \r
408         /* Initialize the AL resource manager. */\r
409         ib_status = create_res_mgr( &gp_al_mgr->obj );\r
410         if( ib_status != IB_SUCCESS )\r
411         {\r
412                 gp_al_mgr->obj.pfn_destroy(&gp_al_mgr->obj, NULL);\r
413                 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,\r
414                         ("create_res_mgr failed with %s.\n", ib_get_err_str(ib_status)) );\r
415                 return ib_status;\r
416         }\r
417 \r
418         /* Initialize the AL SA request manager. */\r
419         ib_status = create_sa_req_mgr( &gp_al_mgr->obj );\r
420         if( ib_status != IB_SUCCESS )\r
421         {\r
422                 gp_al_mgr->obj.pfn_destroy(&gp_al_mgr->obj, NULL);\r
423                 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,\r
424                         ("create_sa_req_mgr failed with %s.\n", ib_get_err_str(ib_status)) );\r
425                 return ib_status;\r
426         }\r
427 \r
428         /* Initialize CM */\r
429         ib_status = create_cep_mgr( &gp_al_mgr->obj );\r
430         if( ib_status != IB_SUCCESS )\r
431         {\r
432                 gp_al_mgr->obj.pfn_destroy( &gp_al_mgr->obj, NULL );\r
433                 AL_TRACE_EXIT( AL_DBG_ERROR,\r
434                         ("create_cm_mgr failed, status = 0x%x.\n", ib_status) );\r
435                 return ib_status;\r
436         }\r
437 \r
438         cl_status = cl_event_init( &gp_al_mgr->ual_mgr.sync_event, FALSE );\r
439         if( cl_status != CL_SUCCESS )\r
440         {\r
441                 gp_al_mgr->obj.pfn_destroy(&gp_al_mgr->obj, NULL);\r
442                 return ib_convert_cl_status( cl_status );\r
443         }\r
444 \r
445         /* Everything is ready now.  Issue the first callback requests. */\r
446         if( !DeviceIoControl( gp_al_mgr->ual_mgr.h_misc_file, UAL_GET_MISC_CB_INFO,\r
447                 NULL, 0,\r
448                 &gp_al_mgr->ual_mgr.misc_cb_info, sizeof(misc_cb_ioctl_info_t),\r
449                 NULL, &gp_al_mgr->ual_mgr.misc_ov ) )\r
450         {\r
451                 if( GetLastError() != ERROR_IO_PENDING )\r
452                 {\r
453                         AL_TRACE_EXIT( AL_DBG_ERROR,\r
454                                 ("DeviceIoControl for misc callback request returned %d.\n",\r
455                                 GetLastError()) );\r
456                         gp_al_mgr->obj.pfn_destroy(&gp_al_mgr->obj, NULL);\r
457                         return IB_ERROR;\r
458                 }\r
459         }\r
460 \r
461         //if( !DeviceIoControl( gp_al_mgr->ual_mgr.h_cm_file, UAL_GET_CM_CB_INFO,\r
462         //      NULL, 0,\r
463         //      &gp_al_mgr->ual_mgr.cm_cb_info, sizeof(cm_cb_ioctl_info_t),\r
464         //      NULL, &gp_al_mgr->ual_mgr.cm_ov ) )\r
465         //{\r
466         //      if( GetLastError() != ERROR_IO_PENDING )\r
467         //      {\r
468         //              AL_TRACE_EXIT( AL_DBG_ERROR,\r
469         //                      ("DeviceIoControl for CM callback request returned %d.\n",\r
470         //                      GetLastError()) );\r
471         //              gp_al_mgr->obj.pfn_destroy(&gp_al_mgr->obj, NULL);\r
472         //              return IB_ERROR;\r
473         //      }\r
474         //}\r
475 \r
476         if( !DeviceIoControl( gp_al_mgr->ual_mgr.h_cq_file, UAL_GET_COMP_CB_INFO,\r
477                 NULL, 0,\r
478                 &gp_al_mgr->ual_mgr.comp_cb_info, sizeof(comp_cb_ioctl_info_t),\r
479                 NULL, &gp_al_mgr->ual_mgr.cq_ov ) )\r
480         {\r
481                 if( GetLastError() != ERROR_IO_PENDING )\r
482                 {\r
483                         AL_TRACE_EXIT( AL_DBG_ERROR,\r
484                                 ("DeviceIoControl for CM callback request returned %d.\n",\r
485                                 GetLastError()) );\r
486                         gp_al_mgr->obj.pfn_destroy(&gp_al_mgr->obj, NULL);\r
487                         return IB_ERROR;\r
488                 }\r
489         }\r
490 \r
491         /*\r
492          * Wait until the associated kernel PnP registration completes.  This\r
493          * indicates that all known CAs have been reported to user-space\r
494          * and are being processed by the PnP manager.\r
495          */\r
496 #ifdef _DEBUG_\r
497         cl_status = cl_event_wait_on( &gp_al_mgr->ual_mgr.sync_event,\r
498                 EVENT_NO_TIMEOUT, TRUE );\r
499         CL_ASSERT ( cl_status == CL_SUCCESS);\r
500 #else\r
501         cl_status = cl_event_wait_on( &gp_al_mgr->ual_mgr.sync_event,\r
502                 EVENT_NO_TIMEOUT, TRUE );\r
503 #endif\r
504         \r
505         if( cl_status != CL_SUCCESS )\r
506         {\r
507                 gp_al_mgr->obj.pfn_destroy(&gp_al_mgr->obj, NULL);\r
508                 return ib_convert_cl_status( cl_status );\r
509         }\r
510         /* Release the reference taken in init_al_obj. */\r
511         deref_al_obj( &gp_al_mgr->obj );\r
512         \r
513         CL_EXIT( AL_DBG_MGR, g_al_dbg_lvl );\r
514         return IB_SUCCESS;\r
515 }\r
516 \r
517 \r
518 \r
519 /*\r
520  * UAL thread start routines.\r
521  */\r
522 //\r
523 //\r
524 ///* Thread to process the asynchronous CM notifications */\r
525 //void\r
526 //cm_cb(\r
527 //      IN                              DWORD                                           error_code,\r
528 //      IN                              DWORD                                           ret_bytes,\r
529 //      IN                              LPOVERLAPPED                            p_ov )\r
530 //{\r
531 //      AL_ENTER( AL_DBG_CM );\r
532 //\r
533 //      UNUSED_PARAM( p_ov );\r
534 //\r
535 //      if( !error_code && ret_bytes )\r
536 //      {\r
537 //              /* Check the record type and adjust the pointers */\r
538 //              /*      TBD     */\r
539 //              __process_cm_cb( &gp_al_mgr->ual_mgr.cm_cb_info );\r
540 //      }\r
541 //      \r
542 //      if( error_code != ERROR_OPERATION_ABORTED )\r
543 //      {\r
544 //              if( !DeviceIoControl( gp_al_mgr->ual_mgr.h_cm_file, UAL_GET_CM_CB_INFO,\r
545 //                      NULL, 0,\r
546 //                      &gp_al_mgr->ual_mgr.cm_cb_info, sizeof(cm_cb_ioctl_info_t),\r
547 //                      NULL, &gp_al_mgr->ual_mgr.cm_ov ) )\r
548 //              {\r
549 //                      if( GetLastError() != ERROR_IO_PENDING )\r
550 //                      {\r
551 //                              AL_TRACE_EXIT( AL_DBG_ERROR,\r
552 //                                      ("DeviceIoControl for CM callback request returned %d.\n",\r
553 //                                      GetLastError()) );\r
554 //                      }\r
555 //              }\r
556 //      }\r
557 //\r
558 //      AL_EXIT( AL_DBG_CM );\r
559 //}\r
560 \r
561 \r
562 \r
563 //static void\r
564 //__process_cm_cb(\r
565 //      IN              cm_cb_ioctl_info_t*                     p_cm_cb_info)\r
566 //{\r
567 //      switch( p_cm_cb_info->rec_type)\r
568 //      {\r
569 //      case CM_REQ_REC:\r
570 //      {\r
571 //              struct _cm_req_cb_ioctl_rec *p_ioctl_rec =\r
572 //                              &p_cm_cb_info->ioctl_rec.cm_req_cb_ioctl_rec;\r
573 //\r
574 //              if (p_ioctl_rec->req_rec.qp_type == IB_QPT_UNRELIABLE_DGRM)\r
575 //              {\r
576 //                      p_ioctl_rec->req_rec.p_req_pdata =\r
577 //                              (uint8_t *)&p_ioctl_rec->cm_req_pdata_rec.sidr_req_pdata;\r
578 //              }\r
579 //              else\r
580 //              {\r
581 //                      p_ioctl_rec->req_rec.p_req_pdata =\r
582 //                              (uint8_t *)&p_ioctl_rec->cm_req_pdata_rec.req_pdata;\r
583 //              }\r
584 //              ual_cm_req_cb( &p_ioctl_rec->req_rec, &p_ioctl_rec->qp_mod_rtr,\r
585 //                      &p_ioctl_rec->qp_mod_rts, p_ioctl_rec->timeout_ms );\r
586 //              break;\r
587 //      }\r
588 //      case CM_REP_REC:\r
589 //      {\r
590 //              struct _cm_rep_cb_ioctl_rec *p_ioctl_rec =\r
591 //                              &p_cm_cb_info->ioctl_rec.cm_rep_cb_ioctl_rec;\r
592 //\r
593 //              if (p_ioctl_rec->rep_rec.qp_type == IB_QPT_UNRELIABLE_DGRM)\r
594 //              {\r
595 //                      p_ioctl_rec->rep_rec.p_rep_pdata =\r
596 //                              (uint8_t *)&p_ioctl_rec->cm_rep_pdata_rec.sidr_rep_pdata;\r
597 //              }\r
598 //              else\r
599 //              {\r
600 //                      p_ioctl_rec->rep_rec.p_rep_pdata =\r
601 //                              (uint8_t *)&p_ioctl_rec->cm_rep_pdata_rec.rep_pdata;\r
602 //              }\r
603 //              ual_cm_rep_cb( &p_ioctl_rec->rep_rec, &p_ioctl_rec->qp_mod_rtr,\r
604 //                      &p_ioctl_rec->qp_mod_rts );\r
605 //              break;\r
606 //      }\r
607 //      case CM_RTU_REC:\r
608 //      {\r
609 //              struct _cm_rtu_cb_ioctl_rec *p_ioctl_rec =\r
610 //                              &p_cm_cb_info->ioctl_rec.cm_rtu_cb_ioctl_rec;\r
611 //\r
612 //              p_ioctl_rec->rtu_rec.p_rtu_pdata = (uint8_t *)&p_ioctl_rec->rtu_pdata;\r
613 //              ual_cm_rtu_cb( &p_ioctl_rec->rtu_rec );\r
614 //              break;\r
615 //      }\r
616 //      case CM_REJ_REC:\r
617 //      {\r
618 //              struct _cm_rej_cb_ioctl_rec *p_ioctl_rec =\r
619 //                              &p_cm_cb_info->ioctl_rec.cm_rej_cb_ioctl_rec;\r
620 //\r
621 //              p_ioctl_rec->rej_rec.p_rej_pdata = \r
622 //                      (uint8_t*)&p_ioctl_rec->rej_pdata;\r
623 //              p_ioctl_rec->rej_rec.p_ari =\r
624 //                      (uint8_t*)&p_ioctl_rec->ari_pdata;\r
625 //              ual_cm_rej_cb( &p_ioctl_rec->rej_rec );\r
626 //              break;\r
627 //      }\r
628 //      case CM_MRA_REC:\r
629 //      {\r
630 //              struct _cm_mra_cb_ioctl_rec *p_ioctl_rec =\r
631 //                              &p_cm_cb_info->ioctl_rec.cm_mra_cb_ioctl_rec;\r
632 //\r
633 //              p_ioctl_rec->mra_rec.p_mra_pdata =\r
634 //                      (uint8_t*)&p_ioctl_rec->mra_pdata;\r
635 //              ual_cm_mra_cb( &p_cm_cb_info->ioctl_rec.cm_mra_cb_ioctl_rec.mra_rec );\r
636 //              break;\r
637 //      }\r
638 //      case CM_LAP_REC:\r
639 //      {\r
640 //              struct _cm_lap_cb_ioctl_rec *p_ioctl_rec =\r
641 //                              &p_cm_cb_info->ioctl_rec.cm_lap_cb_ioctl_rec;\r
642 //\r
643 //              p_ioctl_rec->lap_rec.p_lap_pdata =\r
644 //                      (uint8_t *)&p_ioctl_rec->lap_pdata;\r
645 //              ual_cm_lap_cb( &p_ioctl_rec->lap_rec );\r
646 //              break;\r
647 //      }\r
648 //      case CM_APR_REC:\r
649 //      {\r
650 //              struct _cm_apr_cb_ioctl_rec *p_ioctl_rec =\r
651 //                              &p_cm_cb_info->ioctl_rec.cm_apr_cb_ioctl_rec;\r
652 //\r
653 //              p_ioctl_rec->apr_rec.p_apr_pdata =\r
654 //                      (uint8_t*)&p_ioctl_rec->apr_pdata;\r
655 //              p_ioctl_rec->apr_rec.p_info =\r
656 //                      (uint8_t*)&p_ioctl_rec->apr_info;\r
657 //              ual_cm_apr_cb( &p_ioctl_rec->apr_rec );\r
658 //              break;\r
659 //      }\r
660 //      case CM_DREQ_REC:\r
661 //      {\r
662 //              struct _cm_dreq_cb_ioctl_rec *p_ioctl_rec =\r
663 //                              &p_cm_cb_info->ioctl_rec.cm_dreq_cb_ioctl_rec;\r
664 //\r
665 //              p_ioctl_rec->dreq_rec.p_dreq_pdata =\r
666 //                      (uint8_t*)&p_ioctl_rec->dreq_pdata;\r
667 //              ual_cm_dreq_cb( &p_ioctl_rec->dreq_rec );\r
668 //              break;\r
669 //      }\r
670 //      case CM_DREP_REC:\r
671 //      {\r
672 //              struct _cm_drep_cb_ioctl_rec *p_ioctl_rec =\r
673 //                              &p_cm_cb_info->ioctl_rec.cm_drep_cb_ioctl_rec;\r
674 //\r
675 //              p_ioctl_rec->drep_rec.p_drep_pdata =\r
676 //                      (uint8_t*)&p_ioctl_rec->drep_pdata;\r
677 //              ual_cm_drep_cb( &p_ioctl_rec->drep_rec );\r
678 //              break;\r
679 //      }\r
680 //      default:\r
681 //              /* Unknown record type - just return */\r
682 //              break;\r
683 //      }\r
684 //}\r
685 //\r
686 //\r
687 //\r
688 static void\r
689 __process_comp_cb(\r
690         IN              comp_cb_ioctl_info_t*                   p_comp_cb_info )\r
691 {\r
692         ib_cq_handle_t  h_cq;\r
693         CL_ASSERT( p_comp_cb_info->cq_context );\r
694         h_cq = (ib_cq_handle_t)(p_comp_cb_info->cq_context);\r
695 \r
696         if( ref_al_obj( &h_cq->obj ) > 1 )\r
697         {\r
698                 CL_ASSERT( h_cq->pfn_user_comp_cb );\r
699                 h_cq->pfn_user_comp_cb( h_cq, (void*)h_cq->obj.context );\r
700         }\r
701         deref_al_obj( &h_cq->obj );\r
702 }\r
703 \r
704 \r
705 \r
706 /* Thread to process the asynchronous completion notifications */\r
707 void\r
708 cq_cb(\r
709         IN                              DWORD                                           error_code,\r
710         IN                              DWORD                                           ret_bytes,\r
711         IN                              LPOVERLAPPED                            p_ov )\r
712 {\r
713         AL_ENTER( AL_DBG_CQ );\r
714 \r
715         UNUSED_PARAM( p_ov );\r
716 \r
717         if( !error_code && ret_bytes )\r
718         {\r
719                 /* Check the record type and adjust the pointers */\r
720                 /*      TBD     */\r
721                 __process_comp_cb( &gp_al_mgr->ual_mgr.comp_cb_info );\r
722         }\r
723         \r
724         if( error_code != ERROR_OPERATION_ABORTED )\r
725         {\r
726                 if( !DeviceIoControl( gp_al_mgr->ual_mgr.h_cq_file, UAL_GET_COMP_CB_INFO,\r
727                         NULL, 0,\r
728                         &gp_al_mgr->ual_mgr.comp_cb_info, sizeof(comp_cb_ioctl_info_t),\r
729                         NULL, &gp_al_mgr->ual_mgr.cq_ov ) )\r
730                 {\r
731                         if( GetLastError() != ERROR_IO_PENDING )\r
732                         {\r
733                                 AL_TRACE_EXIT( AL_DBG_ERROR,\r
734                                         ("DeviceIoControl for CM callback request returned %d.\n",\r
735                                         GetLastError()) );\r
736                         }\r
737                 }\r
738         }\r
739 \r
740         AL_EXIT( AL_DBG_CQ );\r
741 }\r
742 \r
743 \r
744 \r
745 /* Thread to process miscellaneous asynchronous events */\r
746 void\r
747 misc_cb(\r
748         IN                              DWORD                                           error_code,\r
749         IN                              DWORD                                           ret_bytes,\r
750         IN                              LPOVERLAPPED                            p_ov )\r
751 {\r
752         AL_ENTER( AL_DBG_MGR );\r
753 \r
754         UNUSED_PARAM( p_ov );\r
755 \r
756         if( !error_code && ret_bytes )\r
757         {\r
758                 /* Check the record type and adjust the pointers */\r
759                 /*      TBD     */\r
760                 __process_misc_cb( &gp_al_mgr->ual_mgr.misc_cb_info );\r
761         }\r
762         \r
763         if( error_code != ERROR_OPERATION_ABORTED )\r
764         {\r
765                 /* Issue the next request. */\r
766                 if( !DeviceIoControl( gp_al_mgr->ual_mgr.h_misc_file, UAL_GET_MISC_CB_INFO,\r
767                         NULL, 0,\r
768                         &gp_al_mgr->ual_mgr.misc_cb_info, sizeof(misc_cb_ioctl_info_t),\r
769                         NULL, &gp_al_mgr->ual_mgr.misc_ov ) )\r
770                 {\r
771                         if( GetLastError() != ERROR_IO_PENDING )\r
772                         {\r
773                                 AL_TRACE_EXIT( AL_DBG_ERROR,\r
774                                         ("DeviceIoControl for misc callback request returned %d.\n",\r
775                                         GetLastError()) );\r
776                         }\r
777                 }\r
778         }\r
779 \r
780         AL_EXIT( AL_DBG_MGR );\r
781 }\r
782 \r
783 \r
784 \r
785 void\r
786 __process_misc_cb(\r
787         IN              misc_cb_ioctl_info_t*           p_misc_cb_info )\r
788 {\r
789         switch( p_misc_cb_info->rec_type )\r
790         {\r
791         case CA_ERROR_REC:\r
792         case QP_ERROR_REC:\r
793         case CQ_ERROR_REC:\r
794         {\r
795                 /* Initiate user-mode asynchronous event processing. */\r
796                 ci_ca_async_event( &p_misc_cb_info->ioctl_rec.event_rec );\r
797                 break;\r
798         }\r
799         case MCAST_REC:\r
800         {\r
801                 ib_mcast_rec_t                  mcast_rec;\r
802                 cl_memcpy((void *)&mcast_rec,\r
803                                         (void*)&p_misc_cb_info->ioctl_rec.mcast_cb_ioctl_rec,\r
804                                         sizeof(ib_mcast_rec_t));\r
805                 mcast_rec.p_member_rec = \r
806                         &p_misc_cb_info->ioctl_rec.mcast_cb_ioctl_rec.member_rec;\r
807                 /******* Call the cb function for app callback *****/\r
808                 break;\r
809         }\r
810         case MAD_SEND_REC:\r
811         {\r
812                 /* We got a send completion. */\r
813                 ib_mad_element_t                        *p_element;\r
814 \r
815                 ib_mad_svc_handle_t                     h_mad_svc = (ib_mad_svc_handle_t)\r
816                         p_misc_cb_info->ioctl_rec.mad_send_cb_ioctl_rec.mad_svc_context;\r
817 \r
818                 /* Copy the data to the user's element. */\r
819                 p_element = p_misc_cb_info->ioctl_rec.mad_send_cb_ioctl_rec.p_um_mad;\r
820                 /* Only update the status if a receive wasn't failed. */\r
821                 if( p_element->status != IB_WCS_TIMEOUT_RETRY_ERR )\r
822                 {\r
823                         p_element->status =\r
824                                 p_misc_cb_info->ioctl_rec.mad_send_cb_ioctl_rec.wc_status;\r
825                 }\r
826                 p_element->p_next = NULL;\r
827 \r
828                 /* Now the user mad_elements should have the right data\r
829                  * Make the client callback\r
830                  */\r
831                 h_mad_svc->pfn_user_send_cb( h_mad_svc,\r
832                         (void*)h_mad_svc->obj.context, p_element );\r
833                 break;\r
834         }\r
835         case MAD_RECV_REC:\r
836         {\r
837                 /*\r
838                  * We've receive a MAD.  We need to get a user-mode MAD of the\r
839                  * correct size, then send it down to retrieve the received MAD.\r
840                  */\r
841                 ual_mad_recv_ioctl_t    ioctl_buf;\r
842                 uintn_t                                 bytes_ret;\r
843                 cl_status_t                             cl_status;\r
844                 ib_api_status_t                 status;\r
845                 ib_mad_svc_handle_t             h_mad_svc;\r
846                 ib_mad_element_t                *p_mad = NULL;\r
847                 ib_mad_element_t                *p_send_mad;\r
848                 ib_mad_t                                *p_mad_buf = NULL;\r
849                 ib_grh_t                                *p_grh = NULL;\r
850 \r
851                 h_mad_svc = (ib_mad_svc_handle_t)\r
852                         p_misc_cb_info->ioctl_rec.mad_recv_cb_ioctl_rec.mad_svc_context;\r
853 \r
854                 p_send_mad =\r
855                         p_misc_cb_info->ioctl_rec.mad_recv_cb_ioctl_rec.p_send_mad;\r
856 \r
857                 cl_memclr( &ioctl_buf, sizeof(ioctl_buf) );\r
858 \r
859                 /*\r
860                  * Get a MAD large enough to receive the MAD.  If we can't get a\r
861                  * MAD, we still perform the IOCTL so that the kernel will return\r
862                  * the MAD to its pool, resulting in a dropped MAD.\r
863                  */\r
864                 status = ib_get_mad( h_mad_svc->obj.p_ci_ca->pool_key,\r
865                         p_misc_cb_info->ioctl_rec.mad_recv_cb_ioctl_rec.elem_size,\r
866                         &p_mad );\r
867 \r
868                 /*\r
869                  * Note that we set any associated send MAD's status here\r
870                  * in case of failure.\r
871                  */\r
872                 if( status == IB_SUCCESS )\r
873                         al_handoff_mad( (ib_al_handle_t)h_mad_svc->obj.h_al, p_mad );\r
874                 else if( p_send_mad )\r
875                         p_send_mad->status = IB_WCS_TIMEOUT_RETRY_ERR;\r
876 \r
877                 ioctl_buf.in.p_user_mad = p_mad;\r
878 \r
879                 if( p_mad )\r
880                 {\r
881                         /* Save off the pointers since the proxy overwrites the element. */\r
882                         p_mad_buf = p_mad->p_mad_buf;\r
883                         p_grh = p_mad->p_grh;\r
884 \r
885                         ioctl_buf.in.p_mad_buf = p_mad_buf;\r
886                         ioctl_buf.in.p_grh = p_grh;\r
887                 }\r
888                 ioctl_buf.in.h_mad = p_misc_cb_info->ioctl_rec.mad_recv_cb_ioctl_rec.h_mad;\r
889 \r
890                 cl_status = do_al_dev_ioctl( UAL_MAD_RECV_COMP,\r
891                         &ioctl_buf.in, sizeof(ioctl_buf.in),\r
892                         &ioctl_buf.out, sizeof(ioctl_buf.out),\r
893                         &bytes_ret );\r
894                 if( cl_status != CL_SUCCESS || bytes_ret != sizeof(ioctl_buf.out) )\r
895                 {\r
896                         AL_TRACE( AL_DBG_ERROR,\r
897                                 ("UAL_MAD_RECV_COMP IOCTL returned %s.\n",\r
898                                 CL_STATUS_MSG(cl_status)) );\r
899                         status = IB_ERROR;\r
900                 }\r
901                 else\r
902                 {\r
903                         status = ioctl_buf.out.status;\r
904                 }\r
905                 if( p_mad )\r
906                 {\r
907                         if( status == IB_SUCCESS )\r
908                         {\r
909                                 /* We need to reset MAD data pointers. */\r
910                                 p_mad->p_mad_buf = p_mad_buf;\r
911                                 p_mad->p_grh = p_grh;\r
912                                 /* Restore the client's send context1 */\r
913                                 if( p_send_mad )\r
914                                         p_mad->send_context1 = (void* __ptr64)p_send_mad->context1;\r
915                 \r
916                                 h_mad_svc->pfn_user_recv_cb( h_mad_svc,\r
917                                         (void*)h_mad_svc->obj.context, p_mad );\r
918                         }\r
919                         else\r
920                         {\r
921                                 ib_put_mad( p_mad );\r
922                         }\r
923                 }\r
924                 break;\r
925         }\r
926         case SVC_REG_REC:\r
927         {\r
928                 break;\r
929         }\r
930         case QUERY_REC:\r
931         {\r
932                 break;\r
933         }\r
934         case PNP_REC:\r
935         {\r
936                 ib_pnp_event_t                                  pnp_event;\r
937                 ib_net64_t                                              ca_guid;\r
938                 al_ci_ca_t                                              *p_ci_ca;\r
939                 ual_ca_attr_info_ioctl_t                attr_ioctl;\r
940                 uintn_t                                                 bytes_ret;\r
941                 cl_status_t                                             cl_status;\r
942                 ib_ca_attr_t                                    *p_old_ca_attr;\r
943 \r
944                 pnp_event = p_misc_cb_info->ioctl_rec.pnp_cb_ioctl_rec.pnp_event;\r
945                 ca_guid = p_misc_cb_info->ioctl_rec.pnp_cb_ioctl_rec.pnp_info.ca.ca_guid;\r
946 \r
947                 switch( pnp_event )\r
948                 {\r
949                 case IB_PNP_CA_ADD:\r
950                         /* Create a new CI CA. */\r
951                         create_ci_ca( gh_al, &gp_al_mgr->obj,\r
952                                 p_misc_cb_info->ioctl_rec.pnp_cb_ioctl_rec.pnp_info.ca.ca_guid );\r
953                         break;\r
954 \r
955                 case IB_PNP_CA_REMOVE:\r
956                         /* Destroy the CI CA. */\r
957                         cl_spinlock_acquire( &gp_al_mgr->obj.lock );\r
958                         p_ci_ca = find_ci_ca( ca_guid );\r
959                         if( !p_ci_ca )\r
960                         {\r
961                                 cl_spinlock_release( &gp_al_mgr->obj.lock );\r
962                                 break;\r
963                         }\r
964                         ref_al_obj( &p_ci_ca->obj );\r
965                         cl_spinlock_release( &gp_al_mgr->obj.lock );\r
966 \r
967                         p_ci_ca->obj.pfn_destroy( &p_ci_ca->obj, NULL );\r
968                         break;\r
969 \r
970                 case IB_PNP_PORT_ADD:\r
971                 case IB_PNP_PORT_REMOVE:\r
972                         /* Should never get these. */\r
973                         break;\r
974 \r
975                 case IB_PNP_REG_COMPLETE:\r
976                         /*\r
977                          * Signal that the kernel PnP registration is done, indicating\r
978                          * that the current system state has been reported to the user.\r
979                          */\r
980                         cl_event_signal( &gp_al_mgr->ual_mgr.sync_event );\r
981                         break;\r
982 \r
983                 default:\r
984                         /* Process the PnP event - most likely a port change event. */\r
985                         cl_spinlock_acquire( &gp_al_mgr->obj.lock );\r
986                         p_ci_ca = find_ci_ca( ca_guid );\r
987                         if( !p_ci_ca )\r
988                         {\r
989                                 cl_spinlock_release( &gp_al_mgr->obj.lock );\r
990                                 break;\r
991                         }\r
992                         ref_al_obj( &p_ci_ca->obj );\r
993                         cl_spinlock_release( &gp_al_mgr->obj.lock );\r
994 \r
995                         ci_ca_update_attr( p_ci_ca, &p_old_ca_attr );\r
996                         if( p_old_ca_attr )\r
997                                 cl_free( p_old_ca_attr );\r
998 \r
999                         /*\r
1000                          * We need to fetch the cached CA attributes from the proxy.  We\r
1001                          * always send down the IOCTL to free the cached attributes.\r
1002                          */\r
1003                         //p_ca_attr = (ib_ca_attr_t*)cl_zalloc(\r
1004                         //      p_misc_cb_info->ioctl_rec.pnp_cb_ioctl_rec.pnp_info.ca.size );\r
1005                         attr_ioctl.in.p_ca_attr = NULL;\r
1006 \r
1007                         /* Get the cached attributes from the kernel. */\r
1008                         attr_ioctl.in.h_ca_attr =\r
1009                                 p_misc_cb_info->ioctl_rec.pnp_cb_ioctl_rec.pnp_info.ca.h_ca_attr;\r
1010                         cl_status = do_al_dev_ioctl( UAL_GET_CA_ATTR_INFO,\r
1011                                 &attr_ioctl, sizeof(attr_ioctl.in),\r
1012                                 &attr_ioctl, sizeof(attr_ioctl.out),\r
1013                                 &bytes_ret );\r
1014 \r
1015                         ///* Notify PnP manager of the changes if we have them. */\r
1016                         //if( p_ca_attr )\r
1017                         //{\r
1018                         //      if( cl_status == CL_SUCCESS &&\r
1019                         //              attr_ioctl.out.status == IB_SUCCESS )\r
1020                         //      {\r
1021                         //              pnp_ca_change( p_ci_ca, p_ca_attr );\r
1022                         //      }\r
1023                         //      else\r
1024                         //      {\r
1025                         //      }\r
1026 \r
1027                         //      cl_free( p_ca_attr );\r
1028                         //}\r
1029                         /* Dereference the CA now. */\r
1030                         deref_al_obj( &p_ci_ca->obj );\r
1031                         break;\r
1032                 }\r
1033 \r
1034                 break;  /* For PNP_EVENT_REC */\r
1035         }\r
1036         case SUB_REC:\r
1037         {\r
1038                 /******* TBD *******/\r
1039                 /* No adjustment needed */\r
1040                 break;\r
1041         }\r
1042         case REPORT_REC:\r
1043         {\r
1044                 ib_report_rec_t                 report_rec;\r
1045                 cl_memcpy((void *)&report_rec,\r
1046                                         (void*)&p_misc_cb_info->ioctl_rec.report_cb_ioctl_rec,\r
1047                                         sizeof(ib_report_rec_t));\r
1048                 report_rec.p_notice = &p_misc_cb_info->ioctl_rec.report_cb_ioctl_rec.notice;\r
1049                 /******* Call the cb function for app callback *****/\r
1050                 break;\r
1051         }\r
1052         default:\r
1053                 CL_ASSERT (0);\r
1054                 break;\r
1055         }\r
1056 }\r
1057 \r
1058 \r
1059 \r
1060 /*\r
1061  * Create a new instance of the access layer.\r
1062  */\r
1063 ib_api_status_t\r
1064 ib_open_al(\r
1065                 OUT                     ib_al_handle_t* const           ph_al )\r
1066 {\r
1067         ib_api_status_t         status;\r
1068 \r
1069         cl_mutex_acquire( &g_open_close_mutex );\r
1070         status = do_open_al( ph_al );\r
1071         if( status == IB_SUCCESS )\r
1072         {\r
1073                 /*\r
1074                  * Bump the open count.  Note that we only do this for external\r
1075                  * calls, not the internal ib_open_al call.\r
1076                  */\r
1077                 cl_atomic_inc( &g_open_cnt );\r
1078         }\r
1079         cl_mutex_release( &g_open_close_mutex );\r
1080         return status;\r
1081 }\r
1082 \r
1083 \r
1084 ib_api_status_t\r
1085 ib_close_al(\r
1086         IN              const   ib_al_handle_t                          h_al )\r
1087 {\r
1088         ib_api_status_t         status;\r
1089 \r
1090         cl_mutex_acquire( &g_open_close_mutex );\r
1091         status = do_close_al( h_al );\r
1092         if( status == IB_SUCCESS && !cl_atomic_dec( &g_open_cnt ) )\r
1093                 al_cleanup();\r
1094         cl_mutex_release( &g_open_close_mutex );\r
1095         return status;\r
1096 }\r
1097 \r
1098 \r
1099 ib_api_status_t\r
1100 do_open_al(\r
1101                 OUT                     ib_al_handle_t* const           ph_al )\r
1102 {\r
1103         ib_al_handle_t                  h_al;\r
1104         ib_api_status_t                 status;\r
1105 \r
1106         CL_ENTER( AL_DBG_MGR, g_al_dbg_lvl );\r
1107 \r
1108         if( !ph_al )\r
1109         {\r
1110                 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl, ("IB_INVALID_PARAMETER\n") );\r
1111                 return IB_INVALID_PARAMETER;\r
1112         }\r
1113 \r
1114         /*\r
1115          * Initialize AL if needed.\r
1116          * This should only occur on the first ib_open_al call.\r
1117          */\r
1118         if( !gp_al_mgr )\r
1119         {\r
1120                 status = al_initialize();\r
1121                 if( status != IB_SUCCESS )\r
1122                 {\r
1123                         al_cleanup();\r
1124                         CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,\r
1125                                 ("ual_init failed, status = %s\n", ib_get_err_str(status) ) );\r
1126                         return status;\r
1127                 }\r
1128                 /*\r
1129                 * Wait for 50ms before returning. This ensures the pnp events are\r
1130                 * delivered before any special qp services are invoked.\r
1131                 */\r
1132                 cl_thread_suspend( 50 );\r
1133         }\r
1134 \r
1135         /* Allocate an access layer instance. */\r
1136         h_al = (ib_al_handle_t)cl_zalloc( sizeof( ib_al_t ) );\r
1137         if( !h_al )\r
1138         {\r
1139                 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl, ("cl_malloc failed\n") );\r
1140                 return IB_INSUFFICIENT_MEMORY;\r
1141         }\r
1142 \r
1143         /* Construct the instance. */\r
1144         construct_al_obj( &h_al->obj, AL_OBJ_TYPE_H_AL );\r
1145         cl_spinlock_construct( &h_al->mad_lock );\r
1146         cl_qlist_init( &h_al->mad_list );\r
1147         cl_qlist_init( &h_al->key_list );\r
1148         cl_qlist_init( &h_al->query_list );\r
1149         cl_qlist_init( &h_al->cep_list );\r
1150 \r
1151         if( cl_spinlock_init( &h_al->mad_lock ) != CL_SUCCESS )\r
1152         {\r
1153                 free_al( &h_al->obj );\r
1154                 AL_EXIT( AL_DBG_ERROR );\r
1155                 return IB_ERROR;\r
1156         }\r
1157 \r
1158         /* Initialize the base object. */\r
1159         status = init_al_obj( &h_al->obj, NULL, FALSE,\r
1160                 destroying_al, NULL, free_al );\r
1161         if( status != IB_SUCCESS )\r
1162         {\r
1163                 free_al( &h_al->obj );\r
1164                 CL_EXIT( AL_DBG_MGR, g_al_dbg_lvl );\r
1165                 return status;\r
1166         }\r
1167         attach_al_obj( &gp_al_mgr->obj, &h_al->obj );\r
1168 \r
1169         /*\r
1170          * Self reference the AL instance so that all attached objects\r
1171          * insert themselve in the instance's handle manager automatically.\r
1172          */\r
1173         h_al->obj.h_al = h_al;\r
1174 \r
1175         /*\r
1176          * We only maintain a single AL instance in the kernel.  It is created\r
1177          * automatically when the device is opened.\r
1178          */\r
1179         if( !gh_al )\r
1180         {\r
1181                 /* Save a copy of the implicit al handle in a global */\r
1182                 gh_al = h_al;\r
1183         }\r
1184 \r
1185         /* Return UAL's handle to caller */\r
1186         *ph_al = (ib_al_handle_t)h_al;\r
1187 \r
1188         /* Release the reference taken in init_al_obj. */\r
1189         deref_al_obj( &h_al->obj );\r
1190 \r
1191         CL_EXIT( AL_DBG_MGR, g_al_dbg_lvl );\r
1192         return IB_SUCCESS;\r
1193 }\r
1194 \r
1195 \r
1196 static DWORD WINAPI\r
1197 __cb_thread_routine(\r
1198         IN                              void                                            *context )\r
1199 {\r
1200         DWORD           ret_bytes, err;\r
1201         OVERLAPPED      *p_ov;\r
1202         ULONG_PTR       key;\r
1203         BOOL            ret;\r
1204 \r
1205         AL_ENTER( AL_DBG_MGR );\r
1206 \r
1207         UNUSED_PARAM( context );\r
1208 \r
1209         do\r
1210         {\r
1211                 ret = GetQueuedCompletionStatus( gp_al_mgr->ual_mgr.h_cb_port,\r
1212                         &ret_bytes, &key, &p_ov, INFINITE );\r
1213 \r
1214                 if( ret && !p_ov )\r
1215                         break;\r
1216 \r
1217                 if( !ret )\r
1218                         err = GetLastError();\r
1219                 else\r
1220                         err = 0;\r
1221 \r
1222                 CL_ASSERT( p_ov );\r
1223                 switch( key )\r
1224                 {\r
1225                 case UAL_BIND_CM:\r
1226                         //DebugBreak();\r
1227                         /* CM callback. */\r
1228                         cm_cb( err, ret_bytes, p_ov );\r
1229                         break;\r
1230 \r
1231                 case UAL_BIND_CQ:\r
1232                         /* CQ completion callback. */\r
1233                         cq_cb( err, ret_bytes, p_ov );\r
1234                         break;\r
1235 \r
1236                 case UAL_BIND_MISC:\r
1237                         /* Misc callback. */\r
1238                         misc_cb( err, ret_bytes, p_ov );\r
1239                         break;\r
1240 \r
1241                 case UAL_BIND_PNP:\r
1242                         /* PnP callback. */\r
1243                         pnp_cb( err, ret_bytes, p_ov );\r
1244                         break;\r
1245 \r
1246                 case UAL_BIND_SA:\r
1247                         /* SA callback. */\r
1248                         sa_req_cb( err, ret_bytes, p_ov );\r
1249                         break;\r
1250 \r
1251                 case UAL_BIND_DESTROY:\r
1252                         if( p_ov )\r
1253                                 deref_al_obj( (al_obj_t*)p_ov->Pointer );\r
1254                         break;\r
1255 \r
1256                 default:\r
1257                         CL_ASSERT( key == UAL_BIND_CM || key == UAL_BIND_CQ ||\r
1258                                 key == UAL_BIND_MISC || key == UAL_BIND_PNP ||\r
1259                                 key == UAL_BIND_SA || key == UAL_BIND_DESTROY );\r
1260                         break;\r
1261                 }\r
1262         } while( !ret || p_ov );\r
1263 \r
1264         AL_EXIT( AL_DBG_MGR );\r
1265         ExitThread( 0 );\r
1266 }\r