2 * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.
\r
3 * Copyright (c) 1996-2003 Intel Corporation. All rights reserved.
\r
5 * This software is available to you under the OpenIB.org BSD license
\r
8 * Redistribution and use in source and binary forms, with or
\r
9 * without modification, are permitted provided that the following
\r
10 * conditions are met:
\r
12 * - Redistributions of source code must retain the above
\r
13 * copyright notice, this list of conditions and the following
\r
16 * - Redistributions in binary form must reproduce the above
\r
17 * copyright notice, this list of conditions and the following
\r
18 * disclaimer in the documentation and/or other materials
\r
19 * provided with the distribution.
\r
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
22 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
23 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
\r
24 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
\r
25 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
\r
26 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
\r
27 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
\r
33 #include "al_ci_ca.h"
\r
35 #include "al_debug.h"
\r
36 #include "al_mad_pool.h"
\r
40 #include "al_mad_pool.h"
\r
42 #include "ib_common.h"
\r
45 #define EVENT_POOL_MIN 4
\r
46 #define EVENT_POOL_MAX 0
\r
47 #define EVENT_POOL_GROW 1
\r
52 IN al_obj_t* p_obj );
\r
56 IN al_obj_t* p_obj );
\r
60 IN al_obj_t* p_obj );
\r
64 IN void *cq_context );
\r
67 ci_ca_async_proc_cb(
\r
68 IN struct _cl_async_proc_item *p_item );
\r
71 ci_ca_async_event_cb(
\r
72 IN const ib_event_rec_t* const p_event_record );
\r
78 IN al_obj_t *p_parent_obj,
\r
79 IN const ci_interface_t* p_ci )
\r
81 ib_api_status_t status;
\r
82 cl_status_t cl_status;
\r
83 al_ci_ca_t *p_ci_ca;
\r
85 CL_ENTER( AL_DBG_CA, g_al_dbg_lvl );
\r
89 /* Allocate the CI CA. */
\r
90 p_ci_ca = (al_ci_ca_t*)cl_zalloc( sizeof( al_ci_ca_t ) );
\r
93 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,
\r
94 ("cl_zalloc failed\n") );
\r
95 return IB_INSUFFICIENT_MEMORY;
\r
98 /* Construct the CI CA. */
\r
99 construct_al_obj( &p_ci_ca->obj, AL_OBJ_TYPE_CI_CA );
\r
100 cl_spinlock_construct( &p_ci_ca->attr_lock );
\r
101 cl_qlist_init( &p_ci_ca->ca_list );
\r
102 cl_qlist_init( &p_ci_ca->shmid_list );
\r
103 cl_qpool_construct( &p_ci_ca->event_pool );
\r
104 p_ci_ca->verbs = *p_ci;
\r
106 cl_status = cl_spinlock_init( &p_ci_ca->attr_lock );
\r
107 if( cl_status != CL_SUCCESS )
\r
109 free_ci_ca( &p_ci_ca->obj );
\r
110 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,
\r
111 ("cl_spinlock_init failed, status = 0x%x.\n",
\r
112 ib_convert_cl_status( cl_status ) ) );
\r
113 return ib_convert_cl_status( cl_status );
\r
116 /* Create a pool of items to report asynchronous events. */
\r
117 cl_status = cl_qpool_init( &p_ci_ca->event_pool, EVENT_POOL_MIN,
\r
118 EVENT_POOL_MAX, EVENT_POOL_GROW, sizeof( event_item_t ), NULL,
\r
120 if( cl_status != CL_SUCCESS )
\r
122 free_ci_ca( &p_ci_ca->obj );
\r
123 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,
\r
124 ("cl_qpool_init failed, status = 0x%x.\n",
\r
125 ib_convert_cl_status( cl_status ) ) );
\r
126 return ib_convert_cl_status( cl_status );
\r
129 status = init_al_obj( &p_ci_ca->obj, p_ci_ca, FALSE,
\r
130 destroying_ci_ca, cleanup_ci_ca, free_ci_ca );
\r
131 if( status != IB_SUCCESS )
\r
133 free_ci_ca( &p_ci_ca->obj );
\r
134 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,
\r
135 ("init_al_obj failed, status = 0x%x.\n", status) );
\r
138 status = attach_al_obj( p_parent_obj, &p_ci_ca->obj );
\r
139 if( status != IB_SUCCESS )
\r
141 p_ci_ca->obj.pfn_destroy( &p_ci_ca->obj, NULL );
\r
142 AL_TRACE_EXIT( AL_DBG_ERROR,
\r
143 ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );
\r
147 p_ci_ca->dereg_async_item.pfn_callback = ci_ca_async_proc_cb;
\r
149 /* Open the CI CA. */
\r
150 status = p_ci_ca->verbs.open_ca( p_ci_ca->verbs.guid, ci_ca_comp_cb,
\r
151 ci_ca_async_event_cb, p_ci_ca, &p_ci_ca->h_ci_ca );
\r
152 if( status != IB_SUCCESS )
\r
154 p_ci_ca->obj.pfn_destroy( &p_ci_ca->obj, NULL );
\r
155 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,
\r
156 ("open_ca failed, status = 0x%x.\n", status) );
\r
160 /* Increase the max timeout for the CI CA to handle driver unload. */
\r
161 set_al_obj_timeout( &p_ci_ca->obj, AL_MAX_TIMEOUT_MS );
\r
164 * Register ourselves with the AL manager, so that the open call below
\r
167 add_ci_ca( p_ci_ca );
\r
169 /* Open the AL CA. */
\r
170 status = ib_open_ca( gh_al, p_ci_ca->verbs.guid, ca_event_cb, p_ci_ca,
\r
172 if( status != IB_SUCCESS )
\r
174 p_ci_ca->obj.pfn_destroy( &p_ci_ca->obj, NULL );
\r
175 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,
\r
176 ("ib_open_ca failed, status = 0x%x.\n", status) );
\r
180 /* Get a list of the port GUIDs on this CI CA. */
\r
181 status = get_port_info( p_ci_ca );
\r
182 if( status != IB_SUCCESS )
\r
184 p_ci_ca->obj.pfn_destroy( &p_ci_ca->obj, NULL );
\r
185 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,
\r
186 ("get_port_guids failed, status = 0x%x.\n", status) );
\r
190 /* Allocate a PD for use by AL itself. */
\r
191 status = ib_alloc_pd( p_ci_ca->h_ca, IB_PDT_SQP, p_ci_ca,
\r
193 if( status != IB_SUCCESS )
\r
195 p_ci_ca->obj.pfn_destroy( &p_ci_ca->obj, NULL );
\r
196 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,
\r
197 ("ib_alloc_pd failed, status = 0x%x.\n", status) );
\r
201 /* Allocate a PD for use by AL itself. */
\r
202 status = ib_alloc_pd( p_ci_ca->h_ca, IB_PDT_ALIAS, p_ci_ca,
\r
203 &p_ci_ca->h_pd_alias );
\r
204 if( status != IB_SUCCESS )
\r
206 p_ci_ca->obj.pfn_destroy( &p_ci_ca->obj, NULL );
\r
207 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,
\r
208 ("ib_alloc_pd alias failed, status = 0x%x.\n", status) );
\r
212 /* Register the global MAD pool on this CA. */
\r
213 status = ib_reg_mad_pool( gh_mad_pool, p_ci_ca->h_pd, &p_ci_ca->pool_key );
\r
214 if( status != IB_SUCCESS )
\r
216 p_ci_ca->obj.pfn_destroy( &p_ci_ca->obj, NULL );
\r
217 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,
\r
218 ("ib_reg_mad_pool failed, status = 0x%x.\n", status) );
\r
223 * Notify the PnP manager that a CA has been added.
\r
224 * NOTE: PnP Manager must increment the CA reference count.
\r
226 status = pnp_ca_event( p_ci_ca, IB_PNP_CA_ADD );
\r
227 if( status != IB_SUCCESS )
\r
229 /* Destroy the CA */
\r
230 p_ci_ca->obj.pfn_destroy( &p_ci_ca->obj, NULL );
\r
231 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl,
\r
232 ("al_pnp_add_ca failed, status = 0x%x.\n", status) );
\r
236 /* Release the reference taken in init_al_obj. */
\r
237 deref_al_obj( &p_ci_ca->obj );
\r
239 CL_EXIT( AL_DBG_CA, g_al_dbg_lvl );
\r
247 IN al_obj_t* p_obj )
\r
249 al_ci_ca_t *p_ci_ca;
\r
251 CL_ASSERT( p_obj );
\r
252 p_ci_ca = PARENT_STRUCT( p_obj, al_ci_ca_t, obj );
\r
255 * Notify the PnP manager that this CA is being removed.
\r
256 * NOTE: PnP Manager must decrement the CA reference count.
\r
258 pnp_ca_event( p_ci_ca, IB_PNP_CA_REMOVE );
\r
261 * We queue a request to the asynchronous processing manager to close
\r
262 * the CA after the PNP remove CA event has been delivered. This avoids
\r
263 * the ib_close_ca() call from immediately removing resouces (PDs, QPs)
\r
264 * that are in use by clients waiting on the remove CA event.
\r
266 if( p_ci_ca->h_ca )
\r
267 cl_async_proc_queue( gp_async_pnp_mgr, &p_ci_ca->dereg_async_item );
\r
273 ci_ca_async_proc_cb(
\r
274 IN struct _cl_async_proc_item *p_item )
\r
276 al_ci_ca_t *p_ci_ca;
\r
278 p_ci_ca = PARENT_STRUCT( p_item, al_ci_ca_t, dereg_async_item );
\r
280 /* Release all AL resources acquired by the CI CA. */
\r
281 ib_close_ca( p_ci_ca->h_ca, NULL );
\r
288 IN al_obj_t* p_obj )
\r
290 ib_api_status_t status;
\r
291 al_ci_ca_t *p_ci_ca;
\r
293 CL_ENTER( AL_DBG_CA, g_al_dbg_lvl );
\r
295 CL_ASSERT( p_obj );
\r
296 p_ci_ca = PARENT_STRUCT( p_obj, al_ci_ca_t, obj );
\r
298 CL_ASSERT( cl_is_qlist_empty( &p_ci_ca->shmid_list ) );
\r
300 if( p_ci_ca->h_ci_ca )
\r
302 remove_ci_ca( p_ci_ca );
\r
303 status = p_ci_ca->verbs.close_ca( p_ci_ca->h_ci_ca );
\r
304 CL_ASSERT( status == IB_SUCCESS );
\r
307 CL_EXIT( AL_DBG_CA, g_al_dbg_lvl );
\r
313 * CI CA asynchronous event callback.
\r
316 ci_ca_async_event_cb(
\r
317 IN const ib_event_rec_t* const p_event_record )
\r
319 ib_async_event_rec_t event_rec;
\r
321 CL_ENTER( AL_DBG_CA, g_al_dbg_lvl );
\r
323 CL_ASSERT( p_event_record );
\r
325 event_rec.code = p_event_record->type;
\r
326 event_rec.context = p_event_record->context;
\r
327 event_rec.vendor_specific = p_event_record->vendor_specific;
\r
329 ci_ca_async_event( &event_rec );
\r
331 CL_EXIT( AL_DBG_CA, g_al_dbg_lvl );
\r
337 * Insert a new shmid tracking structure into the CI CA's list.
\r
341 IN al_ci_ca_t* const p_ci_ca,
\r
342 IN struct _al_shmid *p_shmid )
\r
344 CL_ASSERT( p_ci_ca && p_shmid );
\r
346 p_shmid->obj.p_ci_ca = p_ci_ca;
\r
348 /* Insert the shmid structure into the shmid list. */
\r
349 cl_spinlock_acquire( &p_ci_ca->obj.lock );
\r
350 cl_qlist_insert_head( &p_ci_ca->shmid_list, &p_shmid->list_item );
\r
351 cl_spinlock_release( &p_ci_ca->obj.lock );
\r
358 IN al_ci_ca_t* const p_ci_ca,
\r
360 OUT struct _al_shmid **pp_shmid )
\r
362 al_shmid_t *p_shmid;
\r
363 cl_list_item_t *p_list_item;
\r
365 /* Try to find the shmid. */
\r
366 cl_spinlock_acquire( &p_ci_ca->obj.lock );
\r
367 for( p_list_item = cl_qlist_head( &p_ci_ca->shmid_list );
\r
368 p_list_item != cl_qlist_end( &p_ci_ca->shmid_list );
\r
369 p_list_item = cl_qlist_next( p_list_item ) )
\r
371 p_shmid = PARENT_STRUCT( p_list_item, al_shmid_t, list_item );
\r
372 if( p_shmid->id == shmid )
\r
374 ref_al_obj( &p_shmid->obj );
\r
375 *pp_shmid = p_shmid;
\r
379 cl_spinlock_release( &p_ci_ca->obj.lock );
\r
381 if( p_list_item == cl_qlist_end( &p_ci_ca->shmid_list ) )
\r
382 return IB_NOT_FOUND;
\r
391 IN struct _al_shmid *p_shmid )
\r
393 al_ci_ca_t *p_ci_ca;
\r
396 CL_ASSERT( p_shmid );
\r
398 p_ci_ca = p_shmid->obj.p_ci_ca;
\r
400 cl_spinlock_acquire( &p_ci_ca->obj.lock );
\r
402 /* Dereference the shmid. */
\r
403 ref_cnt = deref_al_obj( &p_shmid->obj );
\r
405 /* If the shmid is no longer in active use, remove it. */
\r
407 cl_qlist_remove_item( &p_ci_ca->shmid_list, &p_shmid->list_item );
\r
409 cl_spinlock_release( &p_ci_ca->obj.lock );
\r
411 /* Destroy the shmid if it is not needed. */
\r
414 ref_al_obj( &p_shmid->obj );
\r
415 p_shmid->obj.pfn_destroy( &p_shmid->obj, NULL );
\r
423 IN ib_ca_handle_t h_ca,
\r
424 IN const void* __ptr64 * const handle_array OPTIONAL,
\r
425 IN uint32_t num_handles,
\r
426 IN ib_ci_op_t* const p_ci_op )
\r
428 return ci_call( h_ca, handle_array, num_handles, p_ci_op, NULL );
\r
435 IN ib_ca_handle_t h_ca,
\r
436 IN const void* __ptr64 * const handle_array OPTIONAL,
\r
437 IN uint32_t num_handles,
\r
438 IN ib_ci_op_t* const p_ci_op,
\r
439 IN ci_umv_buf_t* const p_umv_buf OPTIONAL )
\r
441 void* __ptr64 * p_handle_array;
\r
442 ib_api_status_t status;
\r
444 CL_ENTER( AL_DBG_CA, g_al_dbg_lvl );
\r
446 if( AL_OBJ_INVALID_HANDLE( h_ca, AL_OBJ_TYPE_H_CA ) )
\r
448 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl, ("IB_INVALID_CA_HANDLE\n") );
\r
449 return IB_INVALID_CA_HANDLE;
\r
453 CL_TRACE_EXIT( AL_DBG_ERROR, g_al_dbg_lvl, ("IB_INVALID_PARAMETER\n") );
\r
454 return IB_INVALID_PARAMETER;
\r
456 p_handle_array = NULL;
\r
459 p_handle_array = cl_zalloc( sizeof(void* __ptr64) * num_handles );
\r
460 if( !p_handle_array )
\r
461 return IB_INSUFFICIENT_MEMORY;
\r
463 status = al_convert_to_ci_handles( p_handle_array, handle_array,
\r
466 if( status != IB_SUCCESS )
\r
468 cl_free( p_handle_array );
\r
473 if( h_ca->obj.p_ci_ca->verbs.vendor_call )
\r
475 status = h_ca->obj.p_ci_ca->verbs.vendor_call(
\r
476 h_ca->obj.p_ci_ca->h_ci_ca, p_handle_array, num_handles,
\r
477 p_ci_op, p_umv_buf );
\r
481 status = IB_UNSUPPORTED;
\r
485 cl_free( p_handle_array );
\r
487 CL_EXIT( AL_DBG_QUERY, g_al_dbg_lvl );
\r