2 * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.
\r
3 * Copyright (c) 1996-2003 Intel Corporation. All rights reserved.
\r
4 * Portions Copyright (c) 2008 Microsoft Corporation. All rights reserved.
\r
6 * This software is available to you under the OpenIB.org BSD license
\r
9 * Redistribution and use in source and binary forms, with or
\r
10 * without modification, are permitted provided that the following
\r
11 * conditions are met:
\r
13 * - Redistributions of source code must retain the above
\r
14 * copyright notice, this list of conditions and the following
\r
17 * - Redistributions in binary form must reproduce the above
\r
18 * copyright notice, this list of conditions and the following
\r
19 * disclaimer in the documentation and/or other materials
\r
20 * provided with the distribution.
\r
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
\r
25 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
\r
26 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
\r
27 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
\r
28 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
\r
35 #include "al_debug.h"
\r
37 #if defined(EVENT_TRACING)
\r
41 #include "al_dm.tmh"
\r
46 #include "ib_common.h"
\r
50 * This code implements a minimal device management agent.
\r
54 static dm_agent_t* gp_dm_agent = NULL;
\r
57 #define SVC_REG_TIMEOUT 2000 // Milliseconds
\r
58 #define SVC_REG_RETRY_CNT 3
\r
59 #define DM_CLASS_RESP_TIME_VALUE 20
\r
62 #define SET_NIBBLE( nibble_array, nibble_num, value ) \
\r
64 ((uint8_t*)(nibble_array))[(nibble_num) >> 1] = (uint8_t) \
\r
65 ((((nibble_num) & 1) == 0) ? \
\r
66 ((uint8_t*)(nibble_array))[(nibble_num) >> 1] & 0x0f : \
\r
67 ((uint8_t*)(nibble_array))[(nibble_num) >> 1] & 0xf0); \
\r
68 ((uint8_t*)(nibble_array))[(nibble_num) >> 1] |= \
\r
69 ( ((nibble_num) & 1) == 0) ? ((value) << 4) : ((value) & 0x0f); \
\r
75 IN al_obj_t* p_obj );
\r
79 IN al_obj_t* p_obj );
\r
83 IN const ib_net64_t ca_guid );
\r
87 IN const ib_ioc_handle_t h_ioc );
\r
91 IN const ib_ca_handle_t h_ca );
\r
96 IN ib_ioc_handle_t h_ioc );
\r
100 IN ib_ioc_handle_t h_ioc );
\r
104 IN al_iou_t* p_iou );
\r
108 IN al_iou_port_t* p_iou_port );
\r
111 iou_port_svc_reg_cb(
\r
112 IN ib_reg_svc_rec_t* p_reg_svc_rec );
\r
115 destroying_dm_agent(
\r
116 IN al_obj_t* p_obj );
\r
120 IN al_obj_t* p_obj );
\r
124 IN ib_pnp_class_t pnp_class,
\r
125 IN ib_pnp_handle_t * ph_pnp );
\r
129 IN ib_pnp_rec_t* p_pnp_rec );
\r
133 IN ib_pnp_rec_t* p_pnp_rec );
\r
137 IN al_obj_t* p_obj );
\r
141 IN al_obj_t* p_obj );
\r
145 IN ib_pnp_port_rec_t* p_pnp_rec );
\r
148 destroying_iou_port(
\r
149 IN al_obj_t* p_obj );
\r
153 IN al_obj_t* p_obj );
\r
157 IN ib_async_event_rec_t *p_event_rec );
\r
161 IN ib_mad_svc_handle_t h_mad_svc,
\r
162 IN void* mad_svc_context,
\r
163 IN ib_mad_element_t* p_mad_response );
\r
167 IN ib_mad_svc_handle_t h_mad_svc,
\r
168 IN void* mad_svc_context,
\r
169 IN ib_mad_element_t* p_mad_request );
\r
173 IN al_iou_port_t* p_iou_port,
\r
174 IN ib_mad_t* p_mad_req,
\r
175 IN ib_mad_t* p_mad_rsp );
\r
179 IN al_iou_port_t* p_iou_port,
\r
180 IN ib_mad_t* p_mad_req,
\r
181 IN ib_mad_t* p_mad_rsp );
\r
184 get_class_port_info(
\r
185 IN al_iou_t* p_iou,
\r
186 IN ib_dm_mad_t* p_dm_mad );
\r
190 IN al_iou_t* p_iou,
\r
191 IN ib_dm_mad_t* p_dm_mad );
\r
195 IN al_iou_t* p_iou,
\r
197 IN ib_dm_mad_t* p_dm_mad );
\r
201 IN al_iou_t* p_iou,
\r
203 IN uint8_t svc_num_lo,
\r
204 IN uint8_t svc_num_hi,
\r
205 IN ib_dm_mad_t* p_dm_mad );
\r
212 IN const ib_ca_handle_t h_ca,
\r
213 IN const ib_ioc_profile_t* const p_ioc_profile,
\r
214 OUT ib_ioc_handle_t* const ph_ioc )
\r
216 ib_ioc_handle_t h_ioc;
\r
218 AL_ENTER( AL_DBG_IOC );
\r
220 if( AL_OBJ_INVALID_HANDLE( h_ca, AL_OBJ_TYPE_H_CA ) )
\r
222 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_CA_HANDLE\n") );
\r
223 return IB_INVALID_CA_HANDLE;
\r
225 if( !p_ioc_profile || !ph_ioc )
\r
227 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );
\r
228 return IB_INVALID_PARAMETER;
\r
232 h_ioc = get_ioc( h_ca );
\r
234 return IB_INSUFFICIENT_MEMORY;
\r
236 /* Save the IOC profile. */
\r
237 cl_memcpy( &h_ioc->ioc_profile, p_ioc_profile, sizeof(ib_ioc_profile_t) );
\r
239 /* Clear the service entry count. */
\r
240 h_ioc->ioc_profile.num_svc_entries = 0;
\r
242 /* Return the IOC handle to the user. */
\r
245 AL_EXIT( AL_DBG_IOC );
\r
253 IN const ib_ioc_handle_t h_ioc )
\r
255 AL_ENTER( AL_DBG_IOC );
\r
257 if( AL_OBJ_INVALID_HANDLE( h_ioc, AL_OBJ_TYPE_H_IOC ) )
\r
259 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_HANDLE\n") );
\r
260 return IB_INVALID_HANDLE;
\r
263 ref_al_obj( &h_ioc->obj );
\r
264 h_ioc->obj.pfn_destroy( &h_ioc->obj, NULL );
\r
266 AL_EXIT( AL_DBG_IOC );
\r
277 IN al_obj_t* p_obj )
\r
279 ib_ioc_handle_t h_ioc;
\r
281 CL_ASSERT( p_obj );
\r
283 h_ioc = PARENT_STRUCT( p_obj, al_ioc_t, obj );
\r
286 * To maintain slot ordering, IOCs attached to an IO unit are freed when
\r
287 * the IO unit is destroyed. Otherwise, unattached IOCs may be freed now.
\r
291 /* Mark the IOC slot as empty. */
\r
292 h_ioc->state = EMPTY_SLOT;
\r
293 reset_al_obj( p_obj );
\r
294 deref_al_obj( &h_ioc->p_iou->obj );
\r
296 /* Report that a change occurred on the IOC. */
\r
297 ioc_change( h_ioc );
\r
301 /* Unattached IOCs can be destroyed. */
\r
302 destroy_al_obj( p_obj );
\r
311 IN const ib_ioc_handle_t h_ioc )
\r
314 ib_api_status_t status;
\r
316 AL_ENTER( AL_DBG_IOC );
\r
318 if( AL_OBJ_INVALID_HANDLE( h_ioc, AL_OBJ_TYPE_H_IOC ) )
\r
320 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_HANDLE\n") );
\r
321 return IB_INVALID_HANDLE;
\r
324 /* Get an IO unit for this IOC. */
\r
325 p_iou = get_iou( h_ioc );
\r
327 return IB_INSUFFICIENT_MEMORY;
\r
329 /* Register the IOC with the IO unit. */
\r
330 status = add_ioc( p_iou, h_ioc );
\r
332 AL_EXIT( AL_DBG_IOC );
\r
340 IN const ib_ioc_handle_t h_ioc,
\r
341 IN const ib_svc_entry_t* const p_svc_entry,
\r
342 OUT ib_svc_handle_t* const ph_svc )
\r
344 ib_svc_handle_t h_svc;
\r
345 ib_api_status_t status;
\r
347 AL_ENTER( AL_DBG_IOC );
\r
349 if( AL_OBJ_INVALID_HANDLE( h_ioc, AL_OBJ_TYPE_H_IOC ) )
\r
351 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_HANDLE\n") );
\r
352 return IB_INVALID_HANDLE;
\r
354 if( !p_svc_entry || !ph_svc )
\r
356 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") );
\r
357 return IB_INVALID_PARAMETER;
\r
361 * Synchronize the addition of a service entry with the removal.
\r
362 * Cannot hold a lock on the IOC when attaching a service entry
\r
363 * object. Wait here until the IOC is no longer in use.
\r
365 cl_spinlock_acquire( &h_ioc->obj.lock );
\r
366 while( h_ioc->in_use_cnt )
\r
368 cl_spinlock_release( &h_ioc->obj.lock );
\r
369 cl_thread_suspend( 0 );
\r
370 cl_spinlock_acquire( &h_ioc->obj.lock );
\r
372 /* Flag the IOC as in use by this thread. */
\r
373 cl_atomic_inc( &h_ioc->in_use_cnt );
\r
374 cl_spinlock_release( &h_ioc->obj.lock );
\r
376 /* Check the current service entry count. */
\r
377 if( h_ioc->ioc_profile.num_svc_entries == MAX_NUM_SVC_ENTRIES )
\r
379 cl_spinlock_release( &h_ioc->obj.lock );
\r
380 AL_EXIT( AL_DBG_IOC );
\r
381 return IB_INSUFFICIENT_RESOURCES;
\r
383 h_svc = cl_zalloc( sizeof( ib_svc_handle_t ) );
\r
386 AL_EXIT( AL_DBG_IOC );
\r
387 return IB_INSUFFICIENT_MEMORY;
\r
390 /* Construct the service entry. */
\r
391 construct_al_obj( &h_svc->obj, AL_OBJ_TYPE_H_SVC_ENTRY );
\r
393 /* Save the service entry. */
\r
394 cl_memcpy( &h_svc->svc_entry, p_svc_entry, sizeof( ib_svc_entry_t ) );
\r
396 /* Initialize the service entry object. */
\r
397 status = init_al_obj( &h_svc->obj, h_svc, FALSE, NULL, NULL,
\r
399 if( status != IB_SUCCESS )
\r
401 free_svc_entry( &h_svc->obj );
\r
402 AL_EXIT( AL_DBG_IOC );
\r
406 /* Attach the service entry to the IOC. */
\r
407 status = attach_al_obj( &h_ioc->obj, &h_svc->obj );
\r
408 if( status != IB_SUCCESS )
\r
410 h_svc->obj.pfn_destroy( &h_svc->obj, NULL );
\r
411 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
412 ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );
\r
416 h_ioc->ioc_profile.num_svc_entries++;
\r
418 /* Indicate that a change occured on the IOC. */
\r
419 ioc_change( h_ioc );
\r
421 /* No longer in use by this thread. */
\r
422 cl_atomic_dec( &h_ioc->in_use_cnt );
\r
424 /* Return the service entry handle to the user. */
\r
427 /* Release the reference taken in init_al_obj. */
\r
428 deref_al_obj( &h_svc->obj );
\r
430 AL_EXIT( AL_DBG_IOC );
\r
437 ib_remove_svc_entry(
\r
438 IN const ib_svc_handle_t h_svc )
\r
440 ib_ioc_handle_t h_ioc;
\r
442 AL_ENTER( AL_DBG_IOC );
\r
444 if( AL_OBJ_INVALID_HANDLE( h_svc, AL_OBJ_TYPE_H_SVC_ENTRY ) )
\r
446 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_HANDLE\n") );
\r
447 return IB_INVALID_HANDLE;
\r
450 h_ioc = PARENT_STRUCT( h_svc->obj.p_parent_obj, al_ioc_t, obj );
\r
453 * Synchronize the removal of a service entry with the addition.
\r
454 * Cannot hold a lock on the IOC when detaching a service entry
\r
455 * object. Wait here until the IOC is no longer in use.
\r
457 cl_spinlock_acquire( &h_ioc->obj.lock );
\r
458 while( h_ioc->in_use_cnt )
\r
460 cl_spinlock_release( &h_ioc->obj.lock );
\r
461 cl_thread_suspend( 0 );
\r
462 cl_spinlock_acquire( &h_ioc->obj.lock );
\r
464 /* Flag the IOC as in use by this thread. */
\r
465 cl_atomic_inc( &h_ioc->in_use_cnt );
\r
466 cl_spinlock_release( &h_ioc->obj.lock );
\r
469 * Synchronously destroy the service entry.
\r
470 * The service handle is invalid when this call returns.
\r
472 ref_al_obj( &h_svc->obj );
\r
473 h_svc->obj.pfn_destroy( &h_svc->obj, NULL );
\r
475 /* Decrement the service entry count. */
\r
476 h_ioc->ioc_profile.num_svc_entries--;
\r
478 /* Indicate that a change occured on the IOC. */
\r
479 ioc_change( h_ioc );
\r
481 /* No longer in use by this thread. */
\r
482 cl_atomic_dec( &h_ioc->in_use_cnt );
\r
484 AL_EXIT( AL_DBG_IOC );
\r
491 * Free a service entry.
\r
495 IN al_obj_t* p_obj )
\r
497 ib_svc_handle_t h_svc;
\r
499 CL_ASSERT( p_obj );
\r
500 h_svc = PARENT_STRUCT( p_obj, al_svc_entry_t, obj );
\r
502 destroy_al_obj( &h_svc->obj );
\r
509 * Acquire the IO unit matching the given CA GUID.
\r
513 IN const ib_net64_t ca_guid )
\r
515 cl_list_item_t* p_iou_item;
\r
519 /* Search for an existing IO unit matching the CA GUID. */
\r
520 cl_spinlock_acquire( &gp_dm_agent->obj.lock );
\r
521 for( p_iou_item = cl_qlist_head( &gp_dm_agent->obj.obj_list );
\r
522 p_iou_item != cl_qlist_end( &gp_dm_agent->obj.obj_list );
\r
523 p_iou_item = cl_qlist_next( p_iou_item ) )
\r
525 p_obj = PARENT_STRUCT( p_iou_item, al_obj_t, pool_item );
\r
526 p_iou = PARENT_STRUCT( p_obj, al_iou_t, obj );
\r
528 /* Check for a GUID match. */
\r
529 if( p_iou->obj.p_ci_ca->verbs.guid == ca_guid )
\r
531 /* Reference the IO unit on behalf of the client. */
\r
532 ref_al_obj( &p_iou->obj );
\r
534 cl_spinlock_release( &gp_dm_agent->obj.lock );
\r
538 cl_spinlock_release( &gp_dm_agent->obj.lock );
\r
546 * Get the IO unit for the given IOC.
\r
550 IN const ib_ioc_handle_t h_ioc )
\r
552 CL_ASSERT( h_ioc );
\r
554 /* Check if the IOC is already attached to an IO unit. */
\r
556 return h_ioc->p_iou;
\r
558 /* The IOC is a new slot. Acquire the IO unit. */
\r
559 return acquire_iou( h_ioc->obj.p_ci_ca->verbs.guid );
\r
566 IN const ib_ca_handle_t h_ca )
\r
568 cl_list_item_t* p_ioc_item;
\r
570 ib_ioc_handle_t h_ioc;
\r
572 ib_api_status_t status;
\r
577 /* Acquire the IO unit. */
\r
578 p_iou = acquire_iou( h_ca->obj.p_ci_ca->verbs.guid );
\r
582 /* Search for an empty IOC slot in the IO unit. */
\r
583 cl_spinlock_acquire( &p_iou->obj.lock );
\r
584 for( p_ioc_item = cl_qlist_head( &p_iou->ioc_list );
\r
585 (p_ioc_item != cl_qlist_end( &p_iou->ioc_list )) && !found;
\r
586 p_ioc_item = cl_qlist_next( p_ioc_item ) )
\r
588 h_ioc = PARENT_STRUCT( p_ioc_item, al_ioc_t, iou_item );
\r
590 if( h_ioc->state == EMPTY_SLOT )
\r
593 * An empty slot was found.
\r
594 * Change the state to indicate that the slot is in use.
\r
596 h_ioc->state = SLOT_IN_USE;
\r
600 cl_spinlock_release( &p_iou->obj.lock );
\r
603 /* Allocate a new IOC if one was not found. */
\r
606 h_ioc = cl_zalloc( sizeof( al_ioc_t ) );
\r
610 /* Construct the IOC. */
\r
611 construct_al_obj( &h_ioc->obj, AL_OBJ_TYPE_H_IOC );
\r
613 /* Initialize the IOC object. */
\r
615 init_al_obj( &h_ioc->obj, h_ioc, FALSE, NULL, NULL, free_ioc );
\r
616 if( status != IB_SUCCESS )
\r
618 free_ioc( &h_ioc->obj );
\r
623 /* Attach the IOC to the CA. */
\r
624 status = attach_al_obj( &h_ca->obj, &h_ioc->obj );
\r
625 if( status != IB_SUCCESS )
\r
627 h_ioc->obj.pfn_destroy( &h_ioc->obj, NULL );
\r
628 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
629 ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );
\r
633 /* Release the reference taken in init_al_obj. */
\r
634 deref_al_obj( &h_ioc->obj );
\r
643 IN al_iou_t* p_iou,
\r
644 IN ib_ioc_handle_t h_ioc )
\r
646 cl_list_item_t* p_list_item;
\r
648 al_iou_port_t* p_iou_port;
\r
649 ib_api_status_t status;
\r
651 CL_ASSERT( p_iou );
\r
652 CL_ASSERT( h_ioc );
\r
654 /* Attach the IOC to the IO unit. */
\r
655 if( !h_ioc->p_iou )
\r
657 cl_spinlock_acquire( &p_iou->obj.lock );
\r
659 /* Make sure the IO unit can support the new IOC slot. */
\r
660 if( cl_qlist_count( &p_iou->ioc_list ) >=
\r
661 ( sizeof( ((ib_iou_info_t*)0)->controller_list ) - 1) )
\r
663 cl_spinlock_release( &p_iou->obj.lock );
\r
664 deref_al_obj( &p_iou->obj );
\r
665 return IB_INSUFFICIENT_RESOURCES;
\r
668 /* Add a new IOC slot to the IO unit. */
\r
669 cl_qlist_insert_tail( &p_iou->ioc_list, &h_ioc->iou_item );
\r
670 h_ioc->p_iou = p_iou;
\r
672 cl_spinlock_release( &p_iou->obj.lock );
\r
676 /* The IOC is being added to an empty IO unit slot. */
\r
677 CL_ASSERT( h_ioc->p_iou == p_iou );
\r
678 CL_ASSERT( h_ioc->state == SLOT_IN_USE );
\r
681 /* Enable the IOC. */
\r
682 h_ioc->state = IOC_ACTIVE;
\r
684 /* Indicate that a change occured on the IO unit. */
\r
685 iou_change( p_iou );
\r
687 /* Flag each port on the IO unit CA as supporting device management. */
\r
688 status = IB_SUCCESS;
\r
689 cl_spinlock_acquire( &p_iou->obj.lock );
\r
690 for( p_list_item = cl_qlist_head( &p_iou->obj.obj_list );
\r
691 p_list_item != cl_qlist_end( &p_iou->obj.obj_list );
\r
692 p_list_item = cl_qlist_next( p_list_item ) )
\r
694 p_obj = PARENT_STRUCT( p_list_item, al_obj_t, pool_item );
\r
695 p_iou_port = PARENT_STRUCT( p_obj, al_iou_port_t, obj );
\r
697 status = set_port_dm_attr( p_iou_port );
\r
698 if( status != IB_SUCCESS ) break;
\r
700 cl_spinlock_release( &p_iou->obj.lock );
\r
702 if( status != IB_SUCCESS )
\r
703 h_ioc->state = SLOT_IN_USE;
\r
712 IN ib_ioc_handle_t h_ioc )
\r
714 CL_ASSERT( h_ioc );
\r
716 /* Report a change to the IO unit which the IOC is attached. */
\r
717 if( h_ioc->p_iou ) iou_change( h_ioc->p_iou );
\r
724 IN al_iou_t* p_iou )
\r
726 CL_ASSERT( p_iou );
\r
728 /* Increment the IO unit change counter. */
\r
729 cl_spinlock_acquire( &p_iou->obj.lock );
\r
730 p_iou->change_id++;
\r
731 cl_spinlock_release( &p_iou->obj.lock );
\r
738 IN al_iou_port_t* p_iou_port )
\r
740 ib_port_attr_mod_t port_attr_mod;
\r
741 ib_reg_svc_req_t reg_svc_req;
\r
742 ib_api_status_t status;
\r
744 CL_ASSERT( p_iou_port );
\r
746 /* Initialize a port attribute modification structure. */
\r
747 cl_memclr( &port_attr_mod, sizeof( ib_port_attr_mod_t ) );
\r
748 port_attr_mod.cap.dev_mgmt = TRUE;
\r
750 /* Flag each port on the IO unit CA as supporting device management. */
\r
751 status = ib_modify_ca( p_iou_port->obj.p_ci_ca->h_ca, p_iou_port->port_num,
\r
752 IB_CA_MOD_IS_DEV_MGMT_SUPPORTED, &port_attr_mod );
\r
754 if( status != IB_SUCCESS )
\r
757 /* The register a service with the SA if one is needed. */
\r
758 if( !p_iou_port->svc_handle )
\r
760 /* Build the service registration request. */
\r
761 cl_memclr( ®_svc_req, sizeof( ib_reg_svc_req_t ) );
\r
763 reg_svc_req.svc_rec.service_lease = 0xffffffff;
\r
764 strncpy( (char*)reg_svc_req.svc_rec.service_name, DM_SVC_NAME,
\r
765 sizeof( reg_svc_req.svc_rec.service_name ) );
\r
766 reg_svc_req.svc_rec.service_gid = p_iou_port->port_gid;
\r
767 reg_svc_req.port_guid = p_iou_port->port_guid;
\r
769 reg_svc_req.timeout_ms = SVC_REG_TIMEOUT;
\r
770 reg_svc_req.retry_cnt = SVC_REG_RETRY_CNT;
\r
771 reg_svc_req.svc_context = p_iou_port;
\r
772 reg_svc_req.pfn_reg_svc_cb = iou_port_svc_reg_cb;
\r
773 reg_svc_req.svc_data_mask = IB_SR_COMPMASK_SGID |
\r
774 IB_SR_COMPMASK_SPKEY |
\r
775 IB_SR_COMPMASK_SLEASE |
\r
776 IB_SR_COMPMASK_SNAME;
\r
778 /* Reference the IO unit port on behalf of the ib_reg_svc call. */
\r
779 ref_al_obj( &p_iou_port->obj );
\r
781 status = ib_reg_svc( gh_al, ®_svc_req, &p_iou_port->svc_handle );
\r
783 if( status != IB_SUCCESS )
\r
785 deref_al_obj( &p_iou_port->obj );
\r
787 /* Ignore this error - the SM will sweep port attribute changes. */
\r
788 status = IB_SUCCESS;
\r
798 iou_port_svc_reg_cb(
\r
799 IN ib_reg_svc_rec_t* p_reg_svc_rec )
\r
801 al_iou_port_t* p_iou_port;
\r
803 CL_ASSERT( p_reg_svc_rec );
\r
805 p_iou_port = (al_iou_port_t*)p_reg_svc_rec->svc_context;
\r
807 if( p_reg_svc_rec->req_status != IB_SUCCESS )
\r
808 deref_al_obj( &p_iou_port->obj );
\r
813 * Device Management Agent
\r
818 * Create the device management agent.
\r
822 IN al_obj_t* const p_parent_obj )
\r
824 cl_status_t cl_status;
\r
825 ib_api_status_t status;
\r
827 CL_ASSERT( p_parent_obj );
\r
828 CL_ASSERT( !gp_dm_agent );
\r
830 gp_dm_agent = cl_zalloc( sizeof( dm_agent_t ) );
\r
832 return IB_INSUFFICIENT_MEMORY;
\r
834 /* Construct the device management agent. */
\r
835 construct_al_obj( &gp_dm_agent->obj, AL_OBJ_TYPE_DM );
\r
836 cl_spinlock_construct( &gp_dm_agent->lock );
\r
838 cl_status = cl_spinlock_init( &gp_dm_agent->lock );
\r
839 if( cl_status != CL_SUCCESS )
\r
841 free_dm_agent( &gp_dm_agent->obj );
\r
842 return ib_convert_cl_status( cl_status );
\r
845 /* Initialize the device management agent object. */
\r
846 status = init_al_obj( &gp_dm_agent->obj, gp_dm_agent, TRUE,
\r
847 destroying_dm_agent, NULL, free_dm_agent );
\r
848 if( status != IB_SUCCESS )
\r
850 free_dm_agent( &gp_dm_agent->obj );
\r
854 /* Attach the device management agent to the parent object. */
\r
855 status = attach_al_obj( p_parent_obj, &gp_dm_agent->obj );
\r
856 if( status != IB_SUCCESS )
\r
858 gp_dm_agent->obj.pfn_destroy( &gp_dm_agent->obj, NULL );
\r
859 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
860 ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );
\r
864 /* Register for CA PnP events. */
\r
865 status = dm_agent_reg_pnp( IB_PNP_CA, &gp_dm_agent->h_ca_pnp );
\r
866 if (status != IB_SUCCESS)
\r
868 gp_dm_agent->obj.pfn_destroy( &gp_dm_agent->obj, NULL );
\r
872 /* Register for port PnP events. */
\r
873 status = dm_agent_reg_pnp( IB_PNP_PORT, &gp_dm_agent->h_port_pnp );
\r
874 if (status != IB_SUCCESS)
\r
876 gp_dm_agent->obj.pfn_destroy( &gp_dm_agent->obj, NULL );
\r
880 /* Release the reference taken in init_al_obj. */
\r
881 deref_al_obj( &gp_dm_agent->obj );
\r
889 * Pre-destroy the device management agent.
\r
892 destroying_dm_agent(
\r
893 IN al_obj_t* p_obj )
\r
895 ib_api_status_t status;
\r
897 CL_ASSERT( p_obj );
\r
898 CL_ASSERT( gp_dm_agent == PARENT_STRUCT( p_obj, dm_agent_t, obj ) );
\r
899 UNUSED_PARAM( p_obj );
\r
901 /* Mark that we're destroying the agent. */
\r
902 cl_spinlock_acquire( &gp_dm_agent->lock );
\r
903 gp_dm_agent->destroying = TRUE;
\r
904 cl_spinlock_release( &gp_dm_agent->lock );
\r
906 /* Deregister for port PnP events. */
\r
907 if( gp_dm_agent->h_port_pnp )
\r
909 status = ib_dereg_pnp( gp_dm_agent->h_port_pnp,
\r
910 (ib_pfn_destroy_cb_t)deref_al_obj );
\r
911 CL_ASSERT( status == IB_SUCCESS );
\r
914 /* Deregister for CA PnP events. */
\r
915 if( gp_dm_agent->h_ca_pnp )
\r
917 status = ib_dereg_pnp( gp_dm_agent->h_ca_pnp,
\r
918 (ib_pfn_destroy_cb_t)deref_al_obj );
\r
919 CL_ASSERT( status == IB_SUCCESS );
\r
926 * Free the device management agent.
\r
930 IN al_obj_t* p_obj )
\r
932 CL_ASSERT( p_obj );
\r
933 CL_ASSERT( gp_dm_agent == PARENT_STRUCT( p_obj, dm_agent_t, obj ) );
\r
934 UNUSED_PARAM( p_obj );
\r
936 destroy_al_obj( &gp_dm_agent->obj );
\r
937 cl_free( gp_dm_agent );
\r
938 gp_dm_agent = NULL;
\r
944 * Register the device management agent for the given PnP class events.
\r
948 IN ib_pnp_class_t pnp_class,
\r
949 IN ib_pnp_handle_t * ph_pnp )
\r
951 ib_api_status_t status;
\r
952 ib_pnp_req_t pnp_req;
\r
954 CL_ASSERT( ph_pnp );
\r
956 cl_memclr( &pnp_req, sizeof( ib_pnp_req_t ) );
\r
957 pnp_req.pnp_class = pnp_class;
\r
958 pnp_req.pnp_context = gp_dm_agent;
\r
959 pnp_req.pfn_pnp_cb = dm_agent_pnp_cb;
\r
961 status = ib_reg_pnp( gh_al, &pnp_req, ph_pnp );
\r
963 /* Reference the DM agent on behalf of the ib_reg_pnp call. */
\r
964 if( status == IB_SUCCESS )
\r
965 ref_al_obj( &gp_dm_agent->obj );
\r
973 * Device managment agent PnP event callback.
\r
977 IN ib_pnp_rec_t* p_pnp_rec )
\r
979 ib_api_status_t status;
\r
981 al_iou_port_t* p_iou_port;
\r
983 CL_ASSERT( p_pnp_rec );
\r
984 CL_ASSERT( p_pnp_rec->pnp_context == gp_dm_agent );
\r
986 /* Dispatch based on the PnP event type. */
\r
987 switch( p_pnp_rec->pnp_event )
\r
989 case IB_PNP_CA_ADD:
\r
990 status = create_iou( p_pnp_rec );
\r
993 case IB_PNP_CA_REMOVE:
\r
994 CL_ASSERT( p_pnp_rec->context );
\r
995 p_iou = p_pnp_rec->context;
\r
996 ref_al_obj( &p_iou->obj );
\r
997 p_iou->obj.pfn_destroy( &p_iou->obj, NULL );
\r
998 status = IB_SUCCESS;
\r
1001 case IB_PNP_PORT_ADD:
\r
1002 CL_ASSERT( !p_pnp_rec->context );
\r
1003 status = create_iou_port( (ib_pnp_port_rec_t*)p_pnp_rec );
\r
1006 case IB_PNP_PORT_REMOVE:
\r
1007 CL_ASSERT( p_pnp_rec->context );
\r
1008 p_iou_port = p_pnp_rec->context;
\r
1009 ref_al_obj( &p_iou_port->obj );
\r
1010 p_iou_port->obj.pfn_destroy( &p_iou_port->obj, NULL );
\r
1013 /* All other events are ignored. */
\r
1014 status = IB_SUCCESS;
\r
1024 * Create an IO unit.
\r
1028 IN ib_pnp_rec_t* p_pnp_rec )
\r
1031 ib_ca_handle_t h_ca;
\r
1032 ib_api_status_t status;
\r
1034 CL_ASSERT( p_pnp_rec );
\r
1036 p_iou = cl_zalloc( sizeof( al_iou_t ) );
\r
1038 return IB_INSUFFICIENT_MEMORY;
\r
1040 /* Construct the IO unit object. */
\r
1041 construct_al_obj( &p_iou->obj, AL_OBJ_TYPE_IOU );
\r
1043 /* Initialize the IO unit object. */
\r
1045 init_al_obj( &p_iou->obj, p_iou, TRUE, NULL, cleanup_iou, free_iou );
\r
1046 if( status != IB_SUCCESS )
\r
1048 free_iou( &p_iou->obj );
\r
1053 * Attach the IO unit to the device management agent. Lock and
\r
1054 * check to synchronize the destruction of the user-mode device
\r
1055 * management agent with the creation of the IO unit through a
\r
1058 cl_spinlock_acquire( &gp_dm_agent->lock );
\r
1059 if( gp_dm_agent->destroying )
\r
1061 p_iou->obj.pfn_destroy( &p_iou->obj, NULL );
\r
1062 cl_spinlock_release( &gp_dm_agent->lock );
\r
1063 return IB_INVALID_STATE;
\r
1065 status = attach_al_obj( &gp_dm_agent->obj, &p_iou->obj );
\r
1066 if( status != IB_SUCCESS )
\r
1068 p_iou->obj.pfn_destroy( &p_iou->obj, NULL );
\r
1069 cl_spinlock_release( &gp_dm_agent->lock );
\r
1070 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
1071 ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );
\r
1074 cl_spinlock_release( &gp_dm_agent->lock );
\r
1076 /* It is now safe to acquire the CA and initialize the p_ci_ca pointer. */
\r
1077 h_ca = acquire_ca( p_pnp_rec->guid );
\r
1080 p_iou->obj.pfn_destroy( &p_iou->obj, NULL );
\r
1081 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
1082 ("acquire_ca for GUID %016I64x failed.\n", p_pnp_rec->guid) );
\r
1083 return IB_INVALID_CA_HANDLE;
\r
1086 p_iou->obj.p_ci_ca = h_ca->obj.p_ci_ca;
\r
1088 /* Initialize the IO unit IOC list. */
\r
1089 cl_qlist_init( &p_iou->ioc_list );
\r
1091 /* Set the context of the PnP event to this child object. */
\r
1092 p_pnp_rec->context = p_iou;
\r
1094 /* Release the reference taken in init_al_obj. */
\r
1095 deref_al_obj( &p_iou->obj );
\r
1097 return IB_SUCCESS;
\r
1103 * Cleanup an IO unit.
\r
1107 IN al_obj_t* p_obj )
\r
1110 cl_list_item_t* p_ioc_item;
\r
1111 ib_ioc_handle_t h_ioc;
\r
1113 CL_ASSERT( p_obj );
\r
1114 p_iou = PARENT_STRUCT( p_obj, al_iou_t, obj );
\r
1116 /* No need to lock during cleanup. */
\r
1117 for( p_ioc_item = cl_qlist_remove_head( &p_iou->ioc_list );
\r
1118 p_ioc_item != cl_qlist_end( &p_iou->ioc_list );
\r
1119 p_ioc_item = cl_qlist_remove_head( &p_iou->ioc_list ) )
\r
1121 h_ioc = PARENT_STRUCT( p_ioc_item, al_ioc_t, obj );
\r
1123 CL_ASSERT( h_ioc->state == EMPTY_SLOT );
\r
1125 /* Detach the IOC from the IO unit. */
\r
1126 CL_ASSERT( h_ioc->p_iou == p_iou );
\r
1127 h_ioc->p_iou = NULL;
\r
1129 /* Destroy the IOC. */
\r
1130 ref_al_obj( &h_ioc->obj );
\r
1131 h_ioc->obj.pfn_destroy( &h_ioc->obj, NULL );
\r
1138 * Free an IO unit.
\r
1142 IN al_obj_t* p_obj )
\r
1146 CL_ASSERT( p_obj );
\r
1148 p_iou = PARENT_STRUCT( p_obj, al_iou_t, obj );
\r
1150 /* Dereference the CA. */
\r
1151 if( p_iou->obj.p_ci_ca )
\r
1152 deref_al_obj( &p_iou->obj.p_ci_ca->h_ca->obj );
\r
1154 destroy_al_obj( &p_iou->obj );
\r
1161 * Create an IO unit port.
\r
1165 IN ib_pnp_port_rec_t* p_pnp_rec )
\r
1167 al_iou_port_t* p_iou_port;
\r
1169 ib_qp_create_t qp_create;
\r
1170 ib_mad_svc_t mad_svc;
\r
1171 ib_api_status_t status;
\r
1173 CL_ASSERT( p_pnp_rec );
\r
1175 CL_ASSERT( p_pnp_rec->p_ca_attr );
\r
1176 CL_ASSERT( p_pnp_rec->p_port_attr );
\r
1178 p_iou_port = cl_zalloc( sizeof( al_iou_port_t ) );
\r
1180 return IB_INSUFFICIENT_MEMORY;
\r
1182 /* Construct the IO unit port object. */
\r
1183 construct_al_obj( &p_iou_port->obj, AL_OBJ_TYPE_IOU );
\r
1185 /* Initialize the IO unit port object. */
\r
1186 status = init_al_obj( &p_iou_port->obj, p_iou_port, TRUE,
\r
1187 destroying_iou_port, NULL, free_iou_port );
\r
1188 if( status != IB_SUCCESS )
\r
1190 free_iou_port( &p_iou_port->obj );
\r
1194 /* Acquire the IO unit. */
\r
1195 p_iou = acquire_iou( p_pnp_rec->p_ca_attr->ca_guid );
\r
1198 p_iou_port->obj.pfn_destroy( &p_iou_port->obj, NULL );
\r
1199 return IB_INVALID_GUID;
\r
1202 /* Attach the IO unit port to the IO unit. */
\r
1203 status = attach_al_obj( &p_iou->obj, &p_iou_port->obj );
\r
1204 if( status != IB_SUCCESS )
\r
1206 p_iou_port->obj.pfn_destroy( &p_iou_port->obj, NULL );
\r
1207 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
1208 ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );
\r
1211 deref_al_obj( &p_iou->obj );
\r
1213 /* Save the port number. */
\r
1214 p_iou_port->port_num = p_pnp_rec->p_port_attr->port_num;
\r
1216 /* Save the port GUID - used in svc reg. */
\r
1217 p_iou_port->port_guid = p_pnp_rec->pnp_rec.guid;
\r
1219 /* Save the default port gid and pkey */
\r
1220 p_iou_port->port_gid = p_pnp_rec->p_port_attr->p_gid_table[0];
\r
1221 p_iou_port->port_pkey = p_pnp_rec->p_port_attr->p_pkey_table[0];
\r
1223 /* Create a QP alias. */
\r
1224 cl_memclr( &qp_create, sizeof( ib_qp_create_t ) );
\r
1225 qp_create.qp_type = IB_QPT_QP1_ALIAS;
\r
1226 qp_create.sq_depth = 1;
\r
1227 qp_create.sq_sge = 1;
\r
1228 qp_create.sq_signaled = TRUE;
\r
1230 status = ib_get_spl_qp( p_iou_port->obj.p_ci_ca->h_pd_alias,
\r
1231 p_pnp_rec->p_port_attr->port_guid, &qp_create,
\r
1232 p_iou_port, iou_port_event_cb, &p_iou_port->pool_key,
\r
1233 &p_iou_port->h_qp_alias );
\r
1235 if (status != IB_SUCCESS)
\r
1237 p_iou_port->obj.pfn_destroy( &p_iou_port->obj, NULL );
\r
1241 /* Reference the IO unit port on behalf of ib_get_spl_qp. */
\r
1242 ref_al_obj( &p_iou_port->obj );
\r
1244 /* Register a service the MAD service for device management. */
\r
1245 cl_memclr( &mad_svc, sizeof( ib_mad_svc_t ) );
\r
1246 mad_svc.mad_svc_context = p_iou_port;
\r
1247 mad_svc.pfn_mad_send_cb = dm_agent_send_cb;
\r
1248 mad_svc.pfn_mad_recv_cb = dm_agent_recv_cb;
\r
1249 mad_svc.support_unsol = TRUE;
\r
1250 mad_svc.mgmt_class = IB_MCLASS_DEV_MGMT;
\r
1251 mad_svc.mgmt_version = 1;
\r
1252 mad_svc.method_array[ IB_MAD_METHOD_GET ] = TRUE;
\r
1253 mad_svc.method_array[ IB_MAD_METHOD_SET ] = TRUE;
\r
1255 status = ib_reg_mad_svc( p_iou_port->h_qp_alias, &mad_svc,
\r
1256 &p_iou_port->h_mad_svc );
\r
1257 if( status != IB_SUCCESS )
\r
1259 p_iou_port->obj.pfn_destroy( &p_iou_port->obj, NULL );
\r
1263 /* Determine if any IOCs are attached to this IO unit. */
\r
1264 cl_spinlock_acquire( &p_iou->obj.lock );
\r
1265 if( !cl_is_qlist_empty( &p_iou->ioc_list ) )
\r
1267 /* Set the device management port attribute. */
\r
1268 status = set_port_dm_attr( p_iou_port );
\r
1269 CL_ASSERT( status == IB_SUCCESS );
\r
1271 cl_spinlock_release( &p_iou->obj.lock );
\r
1273 /* Set the context of the PnP event to this child object. */
\r
1274 p_pnp_rec->pnp_rec.context = p_iou_port;
\r
1276 /* Release the reference taken in init_al_obj. */
\r
1277 deref_al_obj( &p_iou_port->obj );
\r
1279 return IB_SUCCESS;
\r
1285 * Pre-destroy an IO unit port.
\r
1288 destroying_iou_port(
\r
1289 IN al_obj_t* p_obj )
\r
1291 al_iou_port_t* p_iou_port;
\r
1292 ib_api_status_t status;
\r
1294 CL_ASSERT( p_obj );
\r
1295 p_iou_port = PARENT_STRUCT( p_obj, al_iou_port_t, obj );
\r
1297 /* Deregister the device management service. */
\r
1298 if( p_iou_port->svc_handle )
\r
1300 status = ib_dereg_svc( p_iou_port->svc_handle,
\r
1301 (ib_pfn_destroy_cb_t)deref_al_obj );
\r
1302 CL_ASSERT( status == IB_SUCCESS );
\r
1305 /* Destroy the QP alias. */
\r
1306 if( p_iou_port->h_qp_alias )
\r
1308 status = ib_destroy_qp( p_iou_port->h_qp_alias,
\r
1309 (ib_pfn_destroy_cb_t)deref_al_obj );
\r
1310 CL_ASSERT( status == IB_SUCCESS );
\r
1317 * Free an IO unit port.
\r
1321 IN al_obj_t* p_obj )
\r
1323 al_iou_port_t* p_iou_port;
\r
1325 CL_ASSERT( p_obj );
\r
1327 p_iou_port = PARENT_STRUCT( p_obj, al_iou_port_t, obj );
\r
1329 destroy_al_obj( &p_iou_port->obj );
\r
1330 cl_free( p_iou_port );
\r
1336 * IO unit port asynchronous event callback.
\r
1339 iou_port_event_cb(
\r
1340 IN ib_async_event_rec_t *p_event_rec )
\r
1342 UNUSED_PARAM( p_event_rec );
\r
1344 /* The QP is an alias, so if we've received an error, it is unusable. */
\r
1350 * Device management agent send completion callback.
\r
1354 IN ib_mad_svc_handle_t h_mad_svc,
\r
1355 IN void* mad_svc_context,
\r
1356 IN ib_mad_element_t* p_mad_response )
\r
1358 ib_api_status_t status;
\r
1360 CL_ASSERT( mad_svc_context );
\r
1361 CL_ASSERT( p_mad_response );
\r
1362 UNUSED_PARAM( h_mad_svc );
\r
1363 UNUSED_PARAM( mad_svc_context );
\r
1365 /* Return the MAD. */
\r
1366 status = ib_destroy_av( p_mad_response->h_av );
\r
1367 CL_ASSERT( status == IB_SUCCESS );
\r
1368 status = ib_put_mad( p_mad_response );
\r
1369 CL_ASSERT( status == IB_SUCCESS );
\r
1375 * Device management agent receive completion callback.
\r
1379 IN ib_mad_svc_handle_t h_mad_svc,
\r
1380 IN void* mad_svc_context,
\r
1381 IN ib_mad_element_t* p_mad_request )
\r
1383 al_iou_port_t* p_iou_port;
\r
1384 ib_mad_element_t* p_mad_response;
\r
1385 ib_mad_t* p_mad_req;
\r
1386 ib_mad_t* p_mad_rsp;
\r
1387 ib_av_attr_t av_attr;
\r
1388 ib_api_status_t status;
\r
1390 CL_ASSERT( mad_svc_context );
\r
1391 CL_ASSERT( p_mad_request );
\r
1393 p_iou_port = mad_svc_context;
\r
1394 p_mad_req = ib_get_mad_buf( p_mad_request );
\r
1396 /* Get a MAD element for the response. */
\r
1397 status = ib_get_mad( p_iou_port->pool_key, MAD_BLOCK_SIZE,
\r
1398 &p_mad_response );
\r
1400 if( status != IB_SUCCESS )
\r
1402 status = ib_put_mad( p_mad_request );
\r
1403 CL_ASSERT( status == IB_SUCCESS );
\r
1407 /* Initialize the response MAD element. */
\r
1408 p_mad_response->remote_qp = p_mad_request->remote_qp;
\r
1409 p_mad_response->remote_qkey = IB_QP1_WELL_KNOWN_Q_KEY;
\r
1410 p_mad_rsp = ib_get_mad_buf( p_mad_response );
\r
1412 /* Create an address vector for the response. */
\r
1413 cl_memclr( &av_attr, sizeof( ib_av_attr_t ) );
\r
1414 av_attr.port_num = p_iou_port->port_num;
\r
1415 av_attr.sl = p_mad_request->remote_sl;
\r
1416 av_attr.dlid = p_mad_request->remote_lid;
\r
1417 av_attr.path_bits = p_mad_request->path_bits;
\r
1418 av_attr.static_rate = IB_PATH_RECORD_RATE_10_GBS;
\r
1419 if( p_mad_request->grh_valid )
\r
1421 av_attr.grh_valid = TRUE;
\r
1422 av_attr.grh = *p_mad_request->p_grh;
\r
1425 status = ib_create_av( p_iou_port->obj.p_ci_ca->h_pd_alias, &av_attr,
\r
1426 &p_mad_response->h_av );
\r
1428 if( status != IB_SUCCESS )
\r
1430 status = ib_put_mad( p_mad_request );
\r
1431 CL_ASSERT( status == IB_SUCCESS );
\r
1432 status = ib_put_mad( p_mad_response );
\r
1433 CL_ASSERT( status == IB_SUCCESS );
\r
1437 /* Initialize the response header. */
\r
1438 ib_mad_init_response( p_mad_req, p_mad_rsp, 0 );
\r
1440 /* Process the MAD request. */
\r
1441 switch( p_mad_req->method )
\r
1443 case IB_MAD_METHOD_GET:
\r
1444 dm_agent_get( p_iou_port, p_mad_req, p_mad_rsp );
\r
1447 case IB_MAD_METHOD_SET:
\r
1448 dm_agent_set( p_iou_port, p_mad_req, p_mad_rsp );
\r
1452 p_mad_rsp->status = IB_MAD_STATUS_UNSUP_METHOD;
\r
1456 /* Return the request to the pool. */
\r
1457 status = ib_put_mad( p_mad_request );
\r
1458 CL_ASSERT( status == IB_SUCCESS );
\r
1460 /* Send the response. */
\r
1461 status = ib_send_mad( h_mad_svc, p_mad_response, NULL );
\r
1463 if( status != IB_SUCCESS )
\r
1465 status = ib_destroy_av( p_mad_response->h_av );
\r
1466 CL_ASSERT( status == IB_SUCCESS );
\r
1467 status = ib_put_mad( p_mad_response );
\r
1468 CL_ASSERT( status == IB_SUCCESS );
\r
1475 * Device management agent get method MAD.
\r
1479 IN al_iou_port_t* p_iou_port,
\r
1480 IN ib_mad_t* p_mad_req,
\r
1481 IN ib_mad_t* p_mad_rsp )
\r
1484 ib_dm_mad_t* p_dm_mad;
\r
1486 CL_ASSERT( p_iou_port );
\r
1487 CL_ASSERT( p_mad_req );
\r
1488 CL_ASSERT( p_mad_rsp );
\r
1490 p_iou = PARENT_STRUCT( p_iou_port->obj.p_parent_obj, al_iou_t, obj );
\r
1492 p_dm_mad = (ib_dm_mad_t*)p_mad_rsp;
\r
1494 switch( p_mad_req->attr_id )
\r
1496 case IB_MAD_ATTR_CLASS_PORT_INFO:
\r
1497 get_class_port_info( p_iou, p_dm_mad );
\r
1500 case IB_MAD_ATTR_IO_UNIT_INFO:
\r
1501 get_io_unit_info( p_iou, p_dm_mad );
\r
1504 case IB_MAD_ATTR_IO_CONTROLLER_PROFILE:
\r
1508 slot = (uint8_t)CL_NTOH32( p_dm_mad->hdr.attr_mod );
\r
1510 get_ioc_profile( p_iou, slot, p_dm_mad );
\r
1514 case IB_MAD_ATTR_SERVICE_ENTRIES:
\r
1517 uint8_t svc_num_hi;
\r
1518 uint8_t svc_num_lo;
\r
1520 ib_dm_get_slot_lo_hi( p_dm_mad->hdr.attr_mod, &slot,
\r
1521 &svc_num_hi, &svc_num_lo );
\r
1523 get_svc_entries( p_iou, slot, svc_num_lo, svc_num_hi, p_dm_mad );
\r
1527 case IB_MAD_ATTR_DIAGNOSTIC_TIMEOUT:
\r
1528 case IB_MAD_ATTR_PREPARE_TO_TEST:
\r
1529 case IB_MAD_ATTR_DIAG_CODE:
\r
1531 p_mad_rsp->status = IB_MAD_STATUS_UNSUP_METHOD_ATTR;
\r
1539 * Device management agent set method MAD.
\r
1543 IN al_iou_port_t* p_iou_port,
\r
1544 IN ib_mad_t* p_mad_req,
\r
1545 IN ib_mad_t* p_mad_rsp )
\r
1547 ib_dm_mad_t* p_dm_mad;
\r
1549 CL_ASSERT( p_iou_port );
\r
1550 CL_ASSERT( p_mad_req );
\r
1551 CL_ASSERT( p_mad_rsp );
\r
1552 UNUSED_PARAM( p_iou_port );
\r
1554 p_dm_mad = (ib_dm_mad_t*)p_mad_rsp;
\r
1556 switch( p_mad_req->attr_id )
\r
1558 case IB_MAD_ATTR_CLASS_PORT_INFO:
\r
1561 case IB_MAD_ATTR_PREPARE_TO_TEST:
\r
1562 case IB_MAD_ATTR_TEST_DEVICE_ONCE:
\r
1563 case IB_MAD_ATTR_TEST_DEVICE_LOOP:
\r
1565 p_mad_rsp->status = IB_MAD_STATUS_UNSUP_METHOD_ATTR;
\r
1572 get_class_port_info(
\r
1573 IN al_iou_t* p_iou,
\r
1574 IN ib_dm_mad_t* p_dm_mad )
\r
1576 ib_class_port_info_t* p_class_port_info;
\r
1578 CL_ASSERT( p_iou );
\r
1579 CL_ASSERT( p_dm_mad );
\r
1580 UNUSED_PARAM( p_iou );
\r
1582 p_class_port_info = (ib_class_port_info_t*)&p_dm_mad->data;
\r
1584 p_class_port_info->base_ver = 1;
\r
1585 p_class_port_info->class_ver = 1;
\r
1586 p_class_port_info->cap_mask2_resp_time = CL_HTON32( DM_CLASS_RESP_TIME_VALUE );
\r
1593 IN al_iou_t* p_iou,
\r
1594 IN ib_dm_mad_t* p_dm_mad )
\r
1596 ib_iou_info_t* p_iou_info;
\r
1597 cl_list_item_t* p_ioc_item;
\r
1598 ib_ioc_handle_t h_ioc;
\r
1601 CL_ASSERT( p_iou );
\r
1602 CL_ASSERT( p_dm_mad );
\r
1604 p_iou_info = (ib_iou_info_t*)&p_dm_mad->data;
\r
1606 cl_spinlock_acquire( &p_iou->obj.lock );
\r
1608 p_iou_info->change_id = p_iou->change_id;
\r
1610 /* Mark all slots as non-existant. */
\r
1611 SET_NIBBLE( &slot, 0, SLOT_DOES_NOT_EXIST );
\r
1612 SET_NIBBLE( &slot, 1, SLOT_DOES_NOT_EXIST );
\r
1613 cl_memset( p_iou_info->controller_list, slot, sizeof( p_iou->ioc_list ) );
\r
1615 /* Now mark the existing slots. */
\r
1617 for( p_ioc_item = cl_qlist_head( &p_iou->ioc_list );
\r
1618 p_ioc_item != cl_qlist_end( &p_iou->ioc_list );
\r
1619 p_ioc_item = cl_qlist_next( p_ioc_item ) )
\r
1621 h_ioc = PARENT_STRUCT( p_ioc_item, al_ioc_t, iou_item );
\r
1623 switch( h_ioc->state )
\r
1627 SET_NIBBLE( p_iou_info->controller_list, slot, IOC_NOT_INSTALLED );
\r
1631 SET_NIBBLE( p_iou_info->controller_list, slot, IOC_INSTALLED );
\r
1640 p_iou_info->max_controllers = slot;
\r
1642 cl_spinlock_release( &p_iou->obj.lock );
\r
1649 IN al_iou_t* p_iou,
\r
1651 IN ib_dm_mad_t* p_dm_mad )
\r
1653 ib_ioc_profile_t* p_ioc_profile;
\r
1654 cl_list_item_t* p_ioc_item;
\r
1655 ib_ioc_handle_t h_ioc;
\r
1657 CL_ASSERT( p_iou );
\r
1658 CL_ASSERT( p_dm_mad );
\r
1660 p_ioc_profile = (ib_ioc_profile_t*)&p_dm_mad->data;
\r
1662 cl_spinlock_acquire( &p_iou->obj.lock );
\r
1664 /* Verify that the slot number is within range. */
\r
1665 if( ( slot == 0 ) ||
\r
1666 ( slot > cl_qlist_count( &p_iou->ioc_list ) ) )
\r
1668 cl_spinlock_release( &p_iou->obj.lock );
\r
1669 p_dm_mad->hdr.status = IB_MAD_STATUS_INVALID_FIELD;
\r
1673 /* The remaining code assumes the slot number starts at zero. */
\r
1674 for( p_ioc_item = cl_qlist_head( &p_iou->ioc_list );
\r
1675 p_ioc_item != cl_qlist_end( &p_iou->ioc_list ) && slot;
\r
1676 p_ioc_item = cl_qlist_next( p_ioc_item ) )
\r
1681 h_ioc = PARENT_STRUCT( p_ioc_item, al_ioc_t, iou_item );
\r
1683 cl_spinlock_acquire( &h_ioc->obj.lock );
\r
1685 /* Verify the IOC state. */
\r
1686 if( h_ioc->state != IOC_ACTIVE )
\r
1688 cl_spinlock_release( &h_ioc->obj.lock );
\r
1689 cl_spinlock_release( &p_iou->obj.lock );
\r
1690 p_dm_mad->hdr.status = IB_DM_MAD_STATUS_NO_IOC_RESP;
\r
1694 /* Copy the IOC profile. */
\r
1695 *p_ioc_profile = h_ioc->ioc_profile;
\r
1697 cl_spinlock_release( &h_ioc->obj.lock );
\r
1698 cl_spinlock_release( &p_iou->obj.lock );
\r
1705 IN al_iou_t* p_iou,
\r
1707 IN uint8_t svc_num_lo,
\r
1708 IN uint8_t svc_num_hi,
\r
1709 IN ib_dm_mad_t* p_dm_mad )
\r
1711 ib_svc_entries_t* p_svc_entries;
\r
1712 cl_list_item_t* p_ioc_item;
\r
1713 cl_list_item_t* p_list_item;
\r
1714 ib_ioc_handle_t h_ioc;
\r
1716 al_svc_entry_t* p_svc_entry;
\r
1719 CL_ASSERT( p_iou );
\r
1720 CL_ASSERT( p_dm_mad );
\r
1722 p_svc_entries = (ib_svc_entries_t*)&p_dm_mad->data;
\r
1724 cl_spinlock_acquire( &p_iou->obj.lock );
\r
1727 * Verify that the slot number is within range and
\r
1728 * a maximum of SVC_ENTRY_COUNT entries is requested.
\r
1730 if( ( slot == 0 ) ||
\r
1731 ( slot > cl_qlist_count( &p_iou->ioc_list ) ) ||
\r
1732 ( ( svc_num_hi - svc_num_lo + 1) > SVC_ENTRY_COUNT ) )
\r
1734 cl_spinlock_release( &p_iou->obj.lock );
\r
1735 p_dm_mad->hdr.status = IB_MAD_STATUS_INVALID_FIELD;
\r
1739 /* The remaining code assumes the slot number starts at zero. */
\r
1740 for( p_ioc_item = cl_qlist_head( &p_iou->ioc_list );
\r
1741 p_ioc_item != cl_qlist_end( &p_iou->ioc_list ) && slot;
\r
1742 p_ioc_item = cl_qlist_next( p_ioc_item ) )
\r
1747 h_ioc = PARENT_STRUCT( p_ioc_item, al_ioc_t, iou_item );
\r
1749 cl_spinlock_acquire( &h_ioc->obj.lock );
\r
1751 /* Verify the IOC state. */
\r
1752 if( h_ioc->state != IOC_ACTIVE )
\r
1754 cl_spinlock_release( &h_ioc->obj.lock );
\r
1755 cl_spinlock_release( &p_iou->obj.lock );
\r
1756 p_dm_mad->hdr.status = IB_DM_MAD_STATUS_NO_IOC_RESP;
\r
1760 /* Verify the service entry range. */
\r
1761 if( ( svc_num_lo > h_ioc->ioc_profile.num_svc_entries ) ||
\r
1762 ( svc_num_hi >= h_ioc->ioc_profile.num_svc_entries ) )
\r
1764 cl_spinlock_release( &h_ioc->obj.lock );
\r
1765 cl_spinlock_release( &p_iou->obj.lock );
\r
1766 p_dm_mad->hdr.status = IB_MAD_STATUS_INVALID_FIELD;
\r
1770 for( i = svc_num_lo, j = 0; j < ( svc_num_hi - svc_num_lo + 1 ); i++, j++ )
\r
1774 /* Locate the service entry. Traverse until k=0. */
\r
1775 for( p_list_item = cl_qlist_head( &h_ioc->obj.obj_list );
\r
1776 k && ( p_list_item != cl_qlist_end( &h_ioc->obj.obj_list ) );
\r
1777 p_list_item = cl_qlist_next( p_list_item ) )
\r
1782 if( p_list_item == cl_qlist_end( &h_ioc->obj.obj_list ) )
\r
1784 /* The service entry list was empty or the end was reached. */
\r
1785 cl_spinlock_release( &h_ioc->obj.lock );
\r
1786 cl_spinlock_release( &p_iou->obj.lock );
\r
1787 p_dm_mad->hdr.status = IB_DM_MAD_STATUS_NO_SVC_ENTRIES;
\r
1791 p_obj = PARENT_STRUCT( p_list_item, al_obj_t, obj_list );
\r
1792 p_svc_entry = PARENT_STRUCT( p_obj, al_svc_entry_t, obj );
\r
1794 /* Copy the service entry. */
\r
1795 p_svc_entries->service_entry[ j ] = p_svc_entry->svc_entry;
\r
1798 cl_spinlock_release( &h_ioc->obj.lock );
\r
1799 cl_spinlock_release( &p_iou->obj.lock );
\r