2 * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.
\r
3 * Copyright (c) 1996-2003 Intel Corporation. All rights reserved.
\r
5 * This software is available to you under the OpenIB.org BSD license
\r
8 * Redistribution and use in source and binary forms, with or
\r
9 * without modification, are permitted provided that the following
\r
10 * conditions are met:
\r
12 * - Redistributions of source code must retain the above
\r
13 * copyright notice, this list of conditions and the following
\r
16 * - Redistributions in binary form must reproduce the above
\r
17 * copyright notice, this list of conditions and the following
\r
18 * disclaimer in the documentation and/or other materials
\r
19 * provided with the distribution.
\r
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
22 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
23 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
\r
24 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
\r
25 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
\r
26 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
\r
27 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
\r
33 #include <iba/ib_al.h>
\r
34 #include <complib/cl_qmap.h>
\r
35 #include <complib/cl_memory.h>
\r
36 #include <complib/cl_qpool.h>
\r
37 #include <complib/cl_passivelock.h>
\r
38 #include <complib/cl_vector.h>
\r
39 #include <complib/cl_spinlock.h>
\r
43 #include "al_common.h"
\r
45 #include "al_debug.h"
\r
46 #if defined(EVENT_TRACING)
\r
50 #include "al_dev.tmh"
\r
55 #include "al_proxy.h"
\r
61 IN al_dev_open_context_t *p_context );
\r
64 __proxy_cancel_cblists(
\r
65 IN al_dev_open_context_t *p_context );
\r
71 __construct_open_context(
\r
72 IN al_dev_open_context_t *p_context )
\r
74 cl_event_construct( &p_context->close_event );
\r
76 cl_qpool_construct( &p_context->cb_pool );
\r
77 cl_spinlock_construct( &p_context->cb_pool_lock );
\r
79 cl_qlist_init( &p_context->cm_cb_list );
\r
80 cl_qlist_init( &p_context->comp_cb_list );
\r
81 cl_qlist_init( &p_context->misc_cb_list );
\r
82 cl_spinlock_construct( &p_context->cb_lock );
\r
83 cl_mutex_construct( &p_context->pnp_mutex );
\r
89 * Initialize all objects used by the per client open context.
\r
92 __init_open_context(
\r
93 IN al_dev_open_context_t *p_context )
\r
95 cl_status_t cl_status;
\r
97 cl_status = cl_event_init( &p_context->close_event, FALSE );
\r
98 if( cl_status != CL_SUCCESS )
\r
101 /* Allocate pool for storing callback info or requests. */
\r
102 cl_status = cl_qpool_init( &p_context->cb_pool,
\r
103 AL_CB_POOL_START_SIZE, 0, AL_CB_POOL_GROW_SIZE,
\r
104 sizeof(al_proxy_cb_info_t), NULL, NULL, NULL );
\r
105 if( cl_status != CL_SUCCESS )
\r
108 cl_status = cl_spinlock_init( &p_context->cb_pool_lock );
\r
109 if( cl_status != CL_SUCCESS )
\r
112 cl_status = cl_spinlock_init( &p_context->cb_lock );
\r
113 if( cl_status != CL_SUCCESS )
\r
116 cl_status = cl_mutex_init( &p_context->pnp_mutex );
\r
117 if( cl_status != CL_SUCCESS )
\r
126 __destroy_open_context(
\r
127 IN al_dev_open_context_t *p_context )
\r
129 cl_event_destroy( &p_context->close_event );
\r
131 cl_qpool_destroy( &p_context->cb_pool );
\r
132 cl_spinlock_destroy( &p_context->cb_pool_lock );
\r
133 cl_spinlock_destroy( &p_context->cb_lock );
\r
134 cl_mutex_destroy( &p_context->pnp_mutex );
\r
141 IN cl_ioctl_handle_t h_ioctl )
\r
143 al_dev_open_context_t *p_context;
\r
144 ib_api_status_t status;
\r
145 cl_status_t cl_status;
\r
146 IO_STACK_LOCATION *p_io_stack;
\r
149 AL_ENTER( AL_DBG_DEV );
\r
151 p_io_stack = IoGetCurrentIrpStackLocation( h_ioctl );
\r
153 p_ver = cl_ioctl_in_buf( h_ioctl );
\r
155 if( p_io_stack->FileObject->FsContext ||
\r
156 cl_ioctl_in_size( h_ioctl ) != sizeof(ULONG) ||
\r
158 cl_ioctl_out_size( h_ioctl ) )
\r
160 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
161 ("context already exists or bad parameters.\n") );
\r
162 return CL_INVALID_PARAMETER;
\r
165 if( *p_ver != AL_IOCTL_VERSION )
\r
167 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
168 ("Unsupported client version: %d\n", *p_ver) );
\r
169 return CL_INVALID_PARAMETER;
\r
172 /* Allocate the client's context structure. */
\r
173 p_context = (al_dev_open_context_t*)
\r
174 cl_zalloc( sizeof(al_dev_open_context_t) );
\r
177 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
178 ("cl_malloc( %d ) failed.\n", sizeof(al_dev_open_context_t)) );
\r
179 return CL_INSUFFICIENT_MEMORY;
\r
182 /* Construct the open context to allow destruction. */
\r
183 __construct_open_context( p_context );
\r
185 /* Initialize the open context elements. */
\r
186 cl_status = __init_open_context( p_context );
\r
187 if( cl_status != CL_SUCCESS )
\r
189 __destroy_open_context( p_context );
\r
193 /* Open an internal AL instance for this process. */
\r
194 status = ib_open_al( &p_context->h_al );
\r
195 if( status == IB_SUCCESS )
\r
197 /* Register for PnP events. */
\r
198 status = __proxy_reg_pnp( p_context );
\r
201 /* Make sure that we were able to open AL and register for PnP. */
\r
202 if( status == IB_SUCCESS )
\r
205 * Store the reference from the AL instance back to this
\r
206 * open context. This allows using the user-mode context
\r
207 * for resource creation.
\r
209 p_context->h_al->p_context = p_context;
\r
210 /* We successfully opened the device. */
\r
211 p_io_stack->FileObject->FsContext = p_context;
\r
215 __destroy_open_context( p_context );
\r
216 cl_status = CL_INSUFFICIENT_RESOURCES;
\r
219 AL_EXIT( AL_DBG_DEV );
\r
226 * To be called by al_dev_open(). This will register for PnP events
\r
227 * on behalf of user process (UAL). It uses the implicit global
\r
228 * al instance created by AL manager. PnP events are propagated
\r
229 * to UAL automatically from the time AL device is open till the
\r
232 static ib_api_status_t
\r
234 IN al_dev_open_context_t *p_context )
\r
236 ib_pnp_req_t pnp_req;
\r
237 ib_pnp_handle_t h_pnp;
\r
238 ib_api_status_t status;
\r
240 /* Register for PnP events. */
\r
241 cl_memclr( &pnp_req, sizeof( ib_pnp_req_t ) );
\r
242 pnp_req.pnp_class = IB_PNP_CA | IB_PNP_FLAG_REG_COMPLETE;
\r
243 pnp_req.pnp_context = p_context;
\r
244 pnp_req.pfn_pnp_cb = proxy_pnp_ca_cb;
\r
246 /* No need to track the registration. We'll deregister when closing AL. */
\r
247 status = ib_reg_pnp( p_context->h_al, &pnp_req, &h_pnp );
\r
248 if( status != IB_SUCCESS )
\r
251 /* Register for port events. */
\r
252 pnp_req.pfn_pnp_cb = proxy_pnp_port_cb;
\r
253 pnp_req.pnp_class = IB_PNP_PORT | IB_PNP_FLAG_REG_COMPLETE;
\r
254 status = ib_reg_pnp( p_context->h_al, &pnp_req, &h_pnp );
\r
262 * Cleanup the handle map. Remove all mappings. Perform all necessary
\r
266 __proxy_cleanup_map(
\r
267 IN al_dev_open_context_t *p_context )
\r
272 AL_ENTER( AL_DBG_DEV );
\r
274 cl_spinlock_acquire( &p_context->h_al->obj.lock );
\r
275 for( i = 0; i < cl_vector_get_size( &p_context->h_al->hdl_vector ); i++ )
\r
277 p_h = (al_handle_t*)
\r
278 cl_vector_get_ptr( &p_context->h_al->hdl_vector, i );
\r
280 switch( AL_BASE_TYPE( p_h->type ) )
\r
282 /* Return any MADs not reported to the user. */
\r
283 case AL_OBJ_TYPE_H_MAD:
\r
284 proxy_put_mad( (ib_mad_element_t*)p_h->p_obj );
\r
285 al_hdl_free( p_context->h_al, i );
\r
288 case AL_OBJ_TYPE_H_CA_ATTR:
\r
289 /* Release a saved CA attribute. */
\r
290 cl_free( p_h->p_obj );
\r
291 al_hdl_free( p_context->h_al, i );
\r
294 case AL_OBJ_TYPE_H_SA_REQ:
\r
295 al_cancel_sa_req( (al_sa_req_t*)p_h->p_obj );
\r
298 case AL_OBJ_TYPE_H_PNP_EVENT:
\r
299 cl_event_signal( &((proxy_pnp_evt_t*)p_h->p_obj)->event );
\r
303 /* Nothing else to do for other handle types. */
\r
307 cl_spinlock_release( &p_context->h_al->obj.lock );
\r
309 AL_EXIT( AL_DBG_DEV );
\r
315 IN cl_ioctl_handle_t h_ioctl )
\r
317 al_dev_open_context_t *p_context;
\r
318 IO_STACK_LOCATION *p_io_stack;
\r
320 AL_ENTER( AL_DBG_DEV );
\r
322 p_io_stack = IoGetCurrentIrpStackLocation( h_ioctl );
\r
324 /* Determine if the client closed the al_handle. */
\r
325 p_context = (al_dev_open_context_t*)p_io_stack->FileObject->FsContext;
\r
328 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
329 ("Client closed with a null open context .\n") );
\r
332 if( p_io_stack->FileObject->FsContext2 )
\r
334 /* Not the main file object - ignore. */
\r
335 AL_EXIT( AL_DBG_DEV );
\r
339 /* Mark that we're closing this device. */
\r
340 p_context->closing = TRUE;
\r
342 /* Flush any pending IOCTLs in case user-mode threads died on us. */
\r
343 if( p_context->h_cm_ioctl )
\r
344 al_dev_cancel_ioctl( p_context->h_cm_ioctl );
\r
345 if( p_context->h_comp_ioctl )
\r
346 al_dev_cancel_ioctl( p_context->h_comp_ioctl );
\r
347 if( p_context->h_misc_ioctl )
\r
348 al_dev_cancel_ioctl( p_context->h_misc_ioctl );
\r
350 while( p_context->ref_cnt )
\r
353 cl_status_t cl_status;
\r
355 cl_status = cl_event_wait_on( &p_context->close_event, 1000, FALSE );
\r
356 ASSERT( cl_status == IB_SUCCESS );
\r
357 if( cl_status != IB_SUCCESS )
\r
359 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Waiting on ref_cnt timed out!\n") );
\r
363 cl_event_wait_on( &p_context->close_event, EVENT_NO_TIMEOUT, FALSE );
\r
367 /* Cleanup any leftover callback resources. */
\r
368 __proxy_cancel_cblists( p_context );
\r
370 /* Close the AL instance for this process. */
\r
371 if( p_context->h_al )
\r
373 /* Cleanup all user to kernel handle mappings. */
\r
374 __proxy_cleanup_map( p_context );
\r
376 ib_close_al( p_context->h_al );
\r
377 p_context->h_al = NULL;
\r
380 /* Destroy the open context now. */
\r
381 __destroy_open_context( p_context );
\r
382 cl_free( p_context );
\r
384 AL_EXIT( AL_DBG_DEV );
\r
391 * Remove all callbacks on the given callback queue and return them to
\r
392 * the callback pool.
\r
396 IN al_dev_open_context_t *p_context,
\r
397 IN cl_qlist_t *p_cblist )
\r
399 cl_list_item_t *p_list_item;
\r
400 al_proxy_cb_info_t *p_cb_info;
\r
402 cl_spinlock_acquire( &p_context->cb_lock );
\r
403 for( p_list_item = cl_qlist_remove_head( p_cblist );
\r
404 p_list_item != cl_qlist_end( p_cblist );
\r
405 p_list_item = cl_qlist_remove_head( p_cblist ) )
\r
407 p_cb_info = (al_proxy_cb_info_t*)p_list_item;
\r
408 if( p_cb_info->p_al_obj )
\r
409 deref_al_obj( p_cb_info->p_al_obj );
\r
410 proxy_cb_put( p_cb_info );
\r
412 cl_spinlock_release( &p_context->cb_lock );
\r
418 * Remove all queued callbacks from all callback lists.
\r
421 __proxy_cancel_cblists(
\r
422 IN al_dev_open_context_t *p_context )
\r
424 __proxy_dq_cblist( p_context, &p_context->cm_cb_list );
\r
425 __proxy_dq_cblist( p_context, &p_context->comp_cb_list );
\r
426 __proxy_dq_cblist( p_context, &p_context->misc_cb_list );
\r
432 IN cl_ioctl_handle_t h_ioctl )
\r
434 cl_status_t cl_status;
\r
435 size_t ret_bytes = 0;
\r
436 void *p_open_context;
\r
437 IO_STACK_LOCATION *p_io_stack;
\r
439 AL_ENTER( AL_DBG_DEV );
\r
441 p_io_stack = IoGetCurrentIrpStackLocation( h_ioctl );
\r
442 p_open_context = p_io_stack->FileObject->FsContext;
\r
444 AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_DEV,
\r
445 ("al_dev_ioctl: buf_size (%d) p_buf (%016I64x).\n",
\r
446 cl_ioctl_in_size( h_ioctl ), (LONG_PTR)cl_ioctl_in_buf( h_ioctl )) );
\r
448 /* Process the ioctl command. */
\r
449 if( IS_AL_PROXY_IOCTL(cl_ioctl_ctl_code( h_ioctl )) )
\r
450 cl_status = proxy_ioctl( h_ioctl, &ret_bytes );
\r
451 else if( IS_VERBS_IOCTL(cl_ioctl_ctl_code( h_ioctl )) )
\r
452 cl_status = verbs_ioctl( h_ioctl, &ret_bytes );
\r
453 //else if( IS_CM_IOCTL(cl_ioctl_ctl_code( h_ioctl )) )
\r
454 // cl_status = cm_ioctl( h_ioctl, &ret_bytes );
\r
455 else if( IS_CEP_IOCTL(cl_ioctl_ctl_code( h_ioctl )) )
\r
456 cl_status = cep_ioctl( h_ioctl, &ret_bytes );
\r
457 else if( IS_AL_IOCTL(cl_ioctl_ctl_code( h_ioctl )) )
\r
458 cl_status = al_ioctl( h_ioctl, &ret_bytes );
\r
459 else if( IS_SUBNET_IOCTL(cl_ioctl_ctl_code( h_ioctl )) )
\r
460 cl_status = subnet_ioctl( h_ioctl, &ret_bytes );
\r
461 else if( IS_IOC_IOCTL(cl_ioctl_ctl_code( h_ioctl )) )
\r
462 cl_status = ioc_ioctl( h_ioctl, &ret_bytes );
\r
464 cl_status = CL_INVALID_REQUEST;
\r
466 switch( cl_status )
\r
469 /* Flip the status since the IOCTL was completed. */
\r
470 cl_status = CL_SUCCESS;
\r
474 case CL_INVALID_REQUEST:
\r
476 * In Windows, Driver Verifier sends bogus IOCTLs to the device.
\r
477 * These must be passed down the device stack, and so cannot be
\r
478 * completed in the IOCTL handler. They are properly cleaned up,
\r
479 * though no data is returned to the user.
\r
483 cl_ioctl_complete( h_ioctl, cl_status, ret_bytes );
\r
486 AL_EXIT( AL_DBG_DEV );
\r
493 * Cancel any pending IOCTL calls for the specified type.
\r
494 * This routine is also called when closing the device.
\r
497 al_dev_cancel_ioctl(
\r
498 IN cl_ioctl_handle_t h_ioctl )
\r
500 al_dev_open_context_t *p_context;
\r
501 cl_ioctl_handle_t *ph_ioctl;
\r
502 PIO_STACK_LOCATION p_io_stack;
\r
505 * Search the ioctl buffer in the process specific queue
\r
506 * Dequeue it, if found
\r
508 AL_ENTER( AL_DBG_DEV );
\r
510 /* Get the stack location. */
\r
511 p_io_stack = IoGetCurrentIrpStackLocation( h_ioctl );
\r
513 p_context = (al_dev_open_context_t *)p_io_stack->FileObject->FsContext;
\r
514 ASSERT( p_context );
\r
516 /* Clear the IOCTL. */
\r
517 cl_spinlock_acquire( &p_context->cb_lock );
\r
518 switch( cl_ioctl_ctl_code( h_ioctl ) )
\r
520 case UAL_GET_CM_CB_INFO:
\r
521 ph_ioctl = &p_context->h_cm_ioctl;
\r
523 case UAL_GET_COMP_CB_INFO:
\r
524 ph_ioctl = &p_context->h_comp_ioctl;
\r
526 case UAL_GET_MISC_CB_INFO:
\r
527 ph_ioctl = &p_context->h_misc_ioctl;
\r
530 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Invalid CB type\n") );
\r
535 if( ph_ioctl && *ph_ioctl == h_ioctl )
\r
538 #pragma warning(push, 3)
\r
539 IoSetCancelRoutine( h_ioctl, NULL );
\r
540 #pragma warning(pop)
\r
542 /* Complete the IOCTL. */
\r
543 cl_ioctl_complete( h_ioctl, CL_CANCELED, 0 );
\r
544 proxy_context_deref( p_context );
\r
546 cl_spinlock_release( &p_context->cb_lock );
\r
548 AL_EXIT( AL_DBG_DEV );
\r
554 IN DEVICE_OBJECT *p_dev_obj,
\r
557 AL_ENTER( AL_DBG_DEV );
\r
559 UNUSED_PARAM( p_dev_obj );
\r
561 al_dev_cancel_ioctl( p_irp );
\r
563 IoReleaseCancelSpinLock( p_irp->CancelIrql );
\r
565 AL_EXIT( AL_DBG_DEV );
\r