2 * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.
\r
3 * Portions Copyright (c) 2008 Microsoft Corporation. All rights reserved.
\r
5 * This software is available to you under the OpenIB.org BSD license
\r
8 * Redistribution and use in source and binary forms, with or
\r
9 * without modification, are permitted provided that the following
\r
10 * conditions are met:
\r
12 * - Redistributions of source code must retain the above
\r
13 * copyright notice, this list of conditions and the following
\r
16 * - Redistributions in binary form must reproduce the above
\r
17 * copyright notice, this list of conditions and the following
\r
18 * disclaimer in the documentation and/or other materials
\r
19 * provided with the distribution.
\r
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
22 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
23 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
\r
24 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
\r
25 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
\r
26 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
\r
27 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
\r
34 #include <iba/ib_al.h>
\r
36 #include "al_ioc_pnp.h"
\r
37 #include "al_debug.h"
\r
38 #if defined(EVENT_TRACING)
\r
42 #include "al_ioc_pnp.tmh"
\r
44 #include "ib_common.h"
\r
47 #include <complib/cl_timer.h>
\r
48 #include <complib/cl_qpool.h>
\r
49 #include <complib/cl_qmap.h>
\r
50 #include <complib/cl_fleximap.h>
\r
51 #include <complib/cl_math.h>
\r
54 /* Basic sweep operation flow:
\r
56 * NOTE: Empty lines indicate asynchronous decoupling.
\r
58 * 2. Issue SA query for all CA nodes
\r
59 * 3. Issue SA query for all paths
\r
61 * 4. Query callback for first query - store results.
\r
62 * 5. Query callback for second query - process results.
\r
63 * 6. Associate paths to nodes.
\r
64 * 7. For each node, use the first path to send a IOU Info query.
\r
66 * 8a. Recv callback (success) - record IOU info, decrement ref count.
\r
67 * 8b. Recv callback (failure) - decrement ref count.
\r
68 * 8c. Send failure - decrement ref count.
\r
69 * 8d. Send timeout - pick next path and repeate IOU info query.
\r
70 * 9. Queue results to async proc thread once ref count hits zero
\r
72 * 10. Discard any nodes that failed IOU info query, or reported no IOCs.
\r
73 * 11. For each node scanned that is already known, compare change ID
\r
74 * 12a. Change ID identical - report any path changes.
\r
75 * 12b. Change ID different - for each active IOC slot, query IOC profile.
\r
77 * 13a. Recv callback (success) - associate IOC with IOU, decrement ref count.
\r
78 * 13b. Recv callback (failure) - decrement ref count.
\r
79 * 13c. Send failure - decrement ref count.
\r
80 * 14. Queue results to async proc thread once ref count hits zero.
\r
82 * 15. Discard any nodes that have no IOCs.
\r
83 * 16. For each IOC of each node, query all service entries.
\r
85 * 17a. Recv callback (success) - copy service entries, decrement ref count.
\r
86 * 17b. Recv callback (failure) - Remove IOC from IOU, decrement ref count.
\r
87 * 17c. Send failure - Remove IOC from IOU, decrement ref count.
\r
88 * 18. Queue results to async proc thread once ref count hits zero.
\r
90 * 19. Discard any nodes that have no IOCs.
\r
91 * 20. Compare new node map to known nodes and report changes.
\r
92 * 21. Compare IOCs for any duplicates and report changes.
\r
93 * 22. Compare paths for any duplicates and report changes.
\r
94 * 23. Reset sweep timer.
\r
96 * Note: the sweep timer is reset at any point where there can be no further
\r
101 /* Number of entries in the various pools to grow by. */
\r
102 #define IOC_PNP_POOL_GROW (10)
\r
105 /* IOC PnP Manager structure. */
\r
106 typedef struct _ioc_pnp_mgr
\r
110 cl_qlist_t iou_reg_list;
\r
111 cl_qlist_t ioc_reg_list;
\r
113 ib_pnp_handle_t h_pnp;
\r
115 cl_async_proc_item_t async_item;
\r
116 boolean_t async_item_is_busy;
\r
118 cl_spinlock_t iou_pool_lock;
\r
119 cl_qpool_t iou_pool;
\r
120 cl_spinlock_t ioc_pool_lock;
\r
121 cl_qpool_t ioc_pool;
\r
122 cl_spinlock_t path_pool_lock;
\r
123 cl_qpool_t path_pool;
\r
125 cl_fmap_t iou_map; /* Map of currently known IOUs */
\r
126 cl_fmap_t sweep_map; /* Map of IOUs from sweep results. */
\r
127 cl_timer_t sweep_timer;/* Timer to trigger sweep. */
\r
128 atomic32_t query_cnt; /* Number of sweep results outstanding. */
\r
133 /* Per-port IOC PnP agent. */
\r
134 typedef struct _ioc_pnp_svc
\r
141 ib_qp_handle_t h_qp;
\r
142 ib_pool_key_t pool_key;
\r
143 ib_mad_svc_handle_t h_mad_svc;
\r
145 atomic32_t query_cnt;
\r
146 ib_query_handle_t h_node_query;
\r
147 ib_query_handle_t h_path_query;
\r
148 ib_mad_element_t *p_node_element;
\r
149 ib_mad_element_t *p_path_element;
\r
150 uint32_t num_nodes;
\r
151 uint32_t num_paths;
\r
156 /****d* Access Layer:IOC PnP/iou_path_t
\r
161 * Describes a path to an IOU node.
\r
165 typedef struct _iou_path
\r
167 cl_fmap_item_t map_item;
\r
176 * Map item for storing paths in a map.
\r
186 /****d* Access Layer:IOC PnP/iou_node_t
\r
191 * Describes an IOU node on the fabric.
\r
195 typedef struct _iou_node
\r
197 cl_fmap_item_t map_item;
\r
198 cl_fmap_t path_map;
\r
200 cl_spinlock_t lock;
\r
202 iou_path_t *p_config_path;
\r
206 net64_t chassis_guid;
\r
211 ib_iou_info_t info;
\r
213 char desc[IB_NODE_DESCRIPTION_SIZE + 1];
\r
219 * Map item for storing IOUs in a map.
\r
222 * Map of paths to the IOU.
\r
225 * Map of IOCs on the IOU.
\r
228 * Path used to get configuration information from the IOU.
\r
231 * CA GUID through which the IOU is accessible.
\r
234 * Node GUID used as key when storing IOUs in the map.
\r
237 * GUID of the chassis in which the IOU is installed.
\r
240 * Slot number in the chassis in which the IOU is installed.
\r
243 * Vendor ID of the IOU.
\r
246 * Device ID of the IOU.
\r
249 * Device revision of the IOU.
\r
252 * I/O unit info structure.
\r
255 * Node description as provided in ib_node_record_t, along with space for
\r
256 * terminating NULL.
\r
259 * The guid member must follow the ca_guid member to allow both guids to
\r
260 * be compared in single call to cl_memcmp.
\r
267 #pragma warning(disable:4324)
\r
268 typedef struct _iou_ioc
\r
270 cl_map_item_t map_item;
\r
273 ib_ioc_profile_t profile;
\r
274 uint8_t num_valid_entries;
\r
275 ib_svc_entry_t *p_svc_entries;
\r
276 atomic32_t ref_cnt;
\r
279 #pragma warning(default:4324)
\r
282 typedef enum _sweep_state
\r
292 typedef struct _ioc_sweep_results
\r
294 cl_async_proc_item_t async_item;
\r
295 sweep_state_t state;
\r
296 ioc_pnp_svc_t *p_svc;
\r
297 atomic32_t query_cnt;
\r
300 } ioc_sweep_results_t;
\r
303 typedef struct _al_pnp_ioc_event
\r
306 ib_pnp_rec_t *p_rec;
\r
307 ib_pnp_rec_t *p_user_rec;
\r
309 } al_pnp_ioc_event_t;
\r
312 /* Global instance of the IOC PnP manager. */
\r
313 ioc_pnp_mgr_t *gp_ioc_pnp = NULL;
\r
314 uint32_t g_ioc_query_timeout = 250;
\r
315 uint32_t g_ioc_query_retries = 4;
\r
316 uint32_t g_ioc_poll_interval = 30000;
\r
320 /******************************************************************************
\r
322 * IOC PnP Manager functions - global object.
\r
324 ******************************************************************************/
\r
326 __construct_ioc_pnp(
\r
327 IN ioc_pnp_mgr_t* const p_ioc_mgr );
\r
329 static ib_api_status_t
\r
331 IN ioc_pnp_mgr_t* const p_ioc_mgr );
\r
334 __destroying_ioc_pnp(
\r
335 IN al_obj_t *p_obj );
\r
339 IN al_obj_t *p_obj );
\r
341 static ib_api_status_t
\r
343 IN ib_pnp_rec_t *p_pnp_rec );
\r
347 IN void* const p_obj,
\r
349 OUT cl_pool_item_t** const pp_pool_item );
\r
351 /******************************************************************************
\r
353 * IOC PnP manager sweep-related functions.
\r
355 ******************************************************************************/
\r
358 IN ioc_pnp_mgr_t* const p_ioc_mgr,
\r
359 IN const net64_t ca_guid,
\r
360 IN const ib_node_record_t* const p_node_rec );
\r
364 IN ioc_pnp_mgr_t* const p_ioc_mgr,
\r
365 IN iou_node_t* const p_iou );
\r
369 IN ioc_pnp_mgr_t* const p_ioc_mgr,
\r
370 IN cl_fmap_t* const p_iou_map );
\r
374 IN ioc_pnp_mgr_t* const p_ioc_mgr,
\r
375 IN const net64_t ca_guid,
\r
376 IN const net64_t port_guid,
\r
377 IN const ib_path_rec_t* const p_path_rec );
\r
381 IN ioc_pnp_mgr_t* const p_ioc_mgr,
\r
382 IN iou_path_t* const p_path );
\r
386 IN ioc_pnp_mgr_t* const p_ioc_mgr,
\r
387 IN cl_fmap_t* const p_path_map );
\r
391 IN ioc_pnp_mgr_t* const p_ioc_mgr,
\r
392 IN const uint32_t ioc_slot,
\r
393 IN const ib_ioc_profile_t* const p_profile );
\r
397 IN ioc_pnp_mgr_t* const p_ioc_mgr,
\r
398 IN iou_ioc_t* const p_ioc );
\r
402 IN ioc_pnp_mgr_t* const p_ioc_mgr,
\r
403 IN cl_qmap_t* const p_ioc_map );
\r
407 IN const void* const p_key1,
\r
408 IN const void* const p_key2 );
\r
412 IN const void* const p_key1,
\r
413 IN const void* const p_key2 );
\r
416 __ioc_pnp_timer_cb(
\r
417 IN void *context );
\r
421 IN cl_async_proc_item_t *p_async_item );
\r
423 /******************************************************************************
\r
425 * IOC PnP service - per local port child of IOC PnP manager.
\r
427 ******************************************************************************/
\r
428 static ib_api_status_t
\r
429 __create_ioc_pnp_svc(
\r
430 IN ib_pnp_rec_t *p_pnp_rec );
\r
432 static ib_api_status_t
\r
433 __init_ioc_pnp_svc(
\r
434 IN ioc_pnp_svc_t* const p_ioc_pnp_svc,
\r
435 IN const ib_pnp_rec_t* const p_pnp_rec );
\r
438 __destroying_ioc_pnp_svc(
\r
439 IN al_obj_t *p_obj );
\r
442 __free_ioc_pnp_svc(
\r
443 IN al_obj_t *p_obj );
\r
445 /******************************************************************************
\r
447 * IOC PnP service sweep functions.
\r
449 ******************************************************************************/
\r
452 IN const ib_mad_svc_handle_t h_mad_svc,
\r
453 IN void *mad_svc_context,
\r
454 IN ib_mad_element_t *p_request_mad );
\r
458 IN const ib_mad_svc_handle_t h_mad_svc,
\r
459 IN void *mad_svc_context,
\r
460 IN ib_mad_element_t *p_mad_response );
\r
464 IN ib_query_rec_t *p_query_rec );
\r
468 IN ib_query_rec_t *p_query_rec );
\r
472 IN cl_async_proc_item_t *p_async_item );
\r
476 IN ioc_pnp_svc_t* const p_svc );
\r
480 IN ioc_pnp_svc_t* const p_svc,
\r
481 IN cl_qmap_t* const p_iou_map );
\r
485 IN ioc_pnp_svc_t* const p_svc,
\r
486 IN cl_qmap_t* const p_iou_map );
\r
490 IN cl_qmap_t* const p_port_map,
\r
491 IN OUT cl_fmap_t* const p_iou_map );
\r
495 IN const void* const context1,
\r
496 IN const void* const context2,
\r
497 IN const iou_path_t* const p_path,
\r
498 IN const net16_t attr_id,
\r
499 IN const net32_t attr_mod,
\r
500 IN OUT ib_mad_element_t* const p_mad_element );
\r
502 static ib_api_status_t
\r
504 IN ioc_pnp_svc_t* const p_svc );
\r
506 static ib_api_status_t
\r
508 IN ioc_sweep_results_t* const p_results );
\r
510 static ib_api_status_t
\r
511 __query_ioc_profiles(
\r
512 IN ioc_sweep_results_t* const p_results );
\r
514 static ib_api_status_t
\r
515 __query_svc_entries(
\r
516 IN ioc_sweep_results_t* const p_results );
\r
520 IN ioc_sweep_results_t* const p_results );
\r
524 IN OUT iou_node_t* const p_iou,
\r
525 IN const ib_dm_mad_t* const p_mad );
\r
528 __ioc_profile_resp(
\r
529 IN OUT iou_node_t* const p_iou,
\r
530 IN const ib_dm_mad_t* const p_mad );
\r
534 IN OUT iou_ioc_t* const p_ioc,
\r
535 IN const ib_dm_mad_t* const p_mad );
\r
537 /******************************************************************************
\r
539 * Client registration and notification management
\r
541 ******************************************************************************/
\r
544 IN cl_fmap_t* const p_cur_ious,
\r
545 IN cl_fmap_t* const p_dup_ious );
\r
549 IN cl_fmap_t* const p_cur_ious,
\r
550 IN cl_fmap_t* const p_new_ious,
\r
551 IN al_pnp_t* const p_reg OPTIONAL );
\r
555 IN cl_fmap_t* const p_old_ious );
\r
559 IN iou_node_t* const p_iou,
\r
560 IN cl_qmap_t* const p_new_iocs,
\r
561 IN al_pnp_t* const p_reg OPTIONAL );
\r
565 IN iou_node_t* const p_iou,
\r
566 IN cl_qmap_t* const p_old_iocs );
\r
570 IN iou_node_t* const p_iou,
\r
571 IN cl_qmap_t* const p_ioc_map,
\r
572 IN cl_fmap_t* const p_new_paths,
\r
573 IN al_pnp_t* const p_reg OPTIONAL );
\r
577 IN iou_ioc_t* const p_ioc,
\r
578 IN cl_fmap_t* const p_new_paths,
\r
579 IN al_pnp_t* const p_reg OPTIONAL );
\r
583 IN cl_qmap_t* const p_ioc_map,
\r
584 IN cl_fmap_t* const p_old_paths );
\r
588 IN iou_node_t* const p_iou,
\r
589 IN al_pnp_t* const p_reg OPTIONAL );
\r
592 __report_iou_remove(
\r
593 IN iou_node_t* const p_iou );
\r
597 IN iou_node_t* const p_iou,
\r
598 IN iou_ioc_t* const p_ioc,
\r
599 IN al_pnp_t* const p_reg OPTIONAL );
\r
602 __report_ioc_remove(
\r
603 IN iou_node_t* const p_iou,
\r
604 IN iou_ioc_t* const p_ioc );
\r
608 IN iou_ioc_t* const p_ioc,
\r
609 IN iou_path_t* const p_path,
\r
610 IN ib_pnp_event_t pnp_event,
\r
611 IN al_pnp_t* const p_reg OPTIONAL );
\r
614 /******************************************************************************
\r
618 ******************************************************************************/
\r
621 IN al_obj_t* const p_parent_obj )
\r
623 ib_api_status_t status;
\r
624 ib_pnp_req_t pnp_req;
\r
626 AL_ENTER( AL_DBG_PNP );
\r
628 CL_ASSERT( !gp_ioc_pnp );
\r
630 gp_ioc_pnp = (ioc_pnp_mgr_t*)cl_zalloc( sizeof(ioc_pnp_mgr_t) );
\r
633 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
634 ("Failed to allocate IOC PnP manager.\n") );
\r
635 return IB_INSUFFICIENT_MEMORY;
\r
638 __construct_ioc_pnp( gp_ioc_pnp );
\r
640 status = __init_ioc_pnp( gp_ioc_pnp );
\r
641 if( status != IB_SUCCESS )
\r
643 __free_ioc_pnp( &gp_ioc_pnp->obj );
\r
644 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
645 ("__construct_ioc_pnp returned %s\n", ib_get_err_str( status )) );
\r
649 /* Attach to the parent object. */
\r
650 status = attach_al_obj( p_parent_obj, &gp_ioc_pnp->obj );
\r
651 if( status != IB_SUCCESS )
\r
653 gp_ioc_pnp->obj.pfn_destroy( &gp_ioc_pnp->obj, NULL );
\r
654 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
655 ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );
\r
659 /* Register for port PnP notifications. */
\r
660 cl_memclr( &pnp_req, sizeof(pnp_req) );
\r
661 pnp_req.pnp_class = IB_PNP_PORT;
\r
662 pnp_req.pnp_context = gp_ioc_pnp;
\r
663 pnp_req.pfn_pnp_cb = __ioc_pnp_cb;
\r
664 status = ib_reg_pnp( gh_al, &pnp_req, &gp_ioc_pnp->h_pnp );
\r
665 if( status != IB_SUCCESS )
\r
667 gp_ioc_pnp->obj.pfn_destroy( &gp_ioc_pnp->obj, NULL );
\r
668 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
669 ("ib_reg_pnp failed with status %s.\n",
\r
670 ib_get_err_str( status )) );
\r
674 * We don't release the reference taken in init_al_obj
\r
675 * since PnP deregistration is asynchronous.
\r
678 AL_EXIT( AL_DBG_PNP );
\r
684 __construct_ioc_pnp(
\r
685 IN ioc_pnp_mgr_t* const p_ioc_mgr )
\r
687 AL_ENTER( AL_DBG_PNP );
\r
689 cl_qlist_init( &p_ioc_mgr->iou_reg_list );
\r
690 cl_qlist_init( &p_ioc_mgr->ioc_reg_list );
\r
691 cl_fmap_init( &p_ioc_mgr->iou_map, __iou_cmp );
\r
692 construct_al_obj( &p_ioc_mgr->obj, AL_OBJ_TYPE_IOC_PNP_MGR );
\r
693 cl_spinlock_construct( &p_ioc_mgr->iou_pool_lock );
\r
694 cl_spinlock_construct( &p_ioc_mgr->path_pool_lock );
\r
695 cl_spinlock_construct( &p_ioc_mgr->ioc_pool_lock );
\r
696 cl_qpool_construct( &p_ioc_mgr->iou_pool );
\r
697 cl_qpool_construct( &p_ioc_mgr->path_pool );
\r
698 cl_qpool_construct( &p_ioc_mgr->ioc_pool );
\r
699 cl_fmap_init( &p_ioc_mgr->sweep_map, __iou_cmp );
\r
700 cl_timer_construct( &p_ioc_mgr->sweep_timer );
\r
701 p_ioc_mgr->async_item.pfn_callback = __ioc_async_cb;
\r
703 AL_EXIT( AL_DBG_PNP );
\r
707 static ib_api_status_t
\r
709 IN ioc_pnp_mgr_t* const p_ioc_mgr )
\r
711 ib_api_status_t status;
\r
712 cl_status_t cl_status;
\r
714 AL_ENTER( AL_DBG_PNP );
\r
716 /* Initialize the pool locks. */
\r
717 cl_status = cl_spinlock_init( &p_ioc_mgr->iou_pool_lock );
\r
718 if( cl_status != CL_SUCCESS )
\r
720 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
721 ("cl_spinlock_init returned %#x\n", cl_status) );
\r
722 return ib_convert_cl_status( cl_status );
\r
725 cl_status = cl_spinlock_init( &p_ioc_mgr->path_pool_lock );
\r
726 if( cl_status != CL_SUCCESS )
\r
728 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
729 ("cl_spinlock_init returned %#x\n", cl_status) );
\r
730 return ib_convert_cl_status( cl_status );
\r
733 cl_status = cl_spinlock_init( &p_ioc_mgr->ioc_pool_lock );
\r
734 if( cl_status != CL_SUCCESS )
\r
736 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
737 ("cl_spinlock_init returned %#x\n", cl_status) );
\r
738 return ib_convert_cl_status( cl_status );
\r
741 /* Initialize the pools */
\r
742 cl_status = cl_qpool_init( &p_ioc_mgr->iou_pool, 0, 0, IOC_PNP_POOL_GROW,
\r
743 sizeof(iou_node_t), __init_iou, NULL, p_ioc_mgr );
\r
744 if( cl_status != CL_SUCCESS )
\r
746 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
747 ("cl_qpool_init returned %#x\n", cl_status) );
\r
748 return ib_convert_cl_status( cl_status );
\r
751 cl_status = cl_qpool_init( &p_ioc_mgr->path_pool, 0, 0, IOC_PNP_POOL_GROW,
\r
752 sizeof(iou_path_t), NULL, NULL, NULL );
\r
753 if( cl_status != CL_SUCCESS )
\r
755 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
756 ("cl_qpool_init returned %#x\n", cl_status) );
\r
757 return ib_convert_cl_status( cl_status );
\r
760 cl_status = cl_qpool_init( &p_ioc_mgr->ioc_pool, 0, 0, IOC_PNP_POOL_GROW,
\r
761 sizeof(iou_ioc_t), NULL, NULL, NULL );
\r
762 if( cl_status != CL_SUCCESS )
\r
764 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
765 ("cl_qpool_init returned %#x\n", cl_status) );
\r
766 return ib_convert_cl_status( cl_status );
\r
769 /* Initialize the sweep timer. */
\r
770 cl_status = cl_timer_init( &p_ioc_mgr->sweep_timer,
\r
771 __ioc_pnp_timer_cb, p_ioc_mgr );
\r
772 if( cl_status != CL_SUCCESS )
\r
774 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
775 ("cl_timer_init failed with %#x\n", cl_status) );
\r
776 return ib_convert_cl_status( cl_status );
\r
779 status = init_al_obj( &p_ioc_mgr->obj, p_ioc_mgr, TRUE,
\r
780 __destroying_ioc_pnp, NULL, __free_ioc_pnp );
\r
781 if( status != IB_SUCCESS )
\r
783 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
784 ("init_al_obj returned %s\n", ib_get_err_str( status )) );
\r
788 AL_EXIT( AL_DBG_PNP );
\r
794 __destroying_ioc_pnp(
\r
795 IN al_obj_t *p_obj )
\r
797 ib_api_status_t status;
\r
799 AL_ENTER( AL_DBG_PNP );
\r
801 UNUSED_PARAM( p_obj );
\r
802 CL_ASSERT( &gp_ioc_pnp->obj == p_obj );
\r
804 /* Stop the timer. */
\r
805 cl_timer_stop( &gp_ioc_pnp->sweep_timer );
\r
807 if( gp_ioc_pnp->h_pnp )
\r
809 status = ib_dereg_pnp( gp_ioc_pnp->h_pnp,
\r
810 (ib_pfn_destroy_cb_t)deref_al_obj );
\r
811 CL_ASSERT( status == IB_SUCCESS );
\r
814 AL_EXIT( AL_DBG_PNP );
\r
820 IN al_obj_t *p_obj )
\r
822 AL_ENTER( AL_DBG_PNP );
\r
824 CL_ASSERT( &gp_ioc_pnp->obj == p_obj );
\r
827 * Return all items from the maps to their pools before
\r
828 * destroying the pools
\r
830 __put_iou_map( gp_ioc_pnp, &gp_ioc_pnp->iou_map );
\r
831 cl_timer_destroy( &gp_ioc_pnp->sweep_timer );
\r
832 cl_qpool_destroy( &gp_ioc_pnp->ioc_pool );
\r
833 cl_qpool_destroy( &gp_ioc_pnp->path_pool );
\r
834 cl_qpool_destroy( &gp_ioc_pnp->iou_pool );
\r
835 cl_spinlock_destroy( &gp_ioc_pnp->ioc_pool_lock );
\r
836 cl_spinlock_destroy( &gp_ioc_pnp->path_pool_lock );
\r
837 cl_spinlock_destroy( &gp_ioc_pnp->iou_pool_lock );
\r
838 destroy_al_obj( p_obj );
\r
839 cl_free( gp_ioc_pnp );
\r
842 AL_EXIT( AL_DBG_PNP );
\r
848 IN void* const p_obj,
\r
850 OUT cl_pool_item_t** const pp_pool_item )
\r
854 UNUSED_PARAM( context );
\r
856 p_iou = (iou_node_t*)p_obj;
\r
858 cl_spinlock_construct( &p_iou->lock );
\r
859 cl_qmap_init( &p_iou->ioc_map );
\r
860 cl_fmap_init( &p_iou->path_map, __path_cmp );
\r
862 *pp_pool_item = &p_iou->map_item.pool_item;
\r
863 return cl_spinlock_init( &p_iou->lock );
\r
869 IN ioc_pnp_mgr_t* const p_ioc_mgr,
\r
870 IN const net64_t ca_guid,
\r
871 IN const ib_node_record_t* const p_node_rec )
\r
874 cl_pool_item_t *p_item;
\r
876 cl_spinlock_acquire( &p_ioc_mgr->iou_pool_lock );
\r
877 p_item = cl_qpool_get( &p_ioc_mgr->iou_pool );
\r
878 cl_spinlock_release( &p_ioc_mgr->iou_pool_lock );
\r
882 p_iou = PARENT_STRUCT( PARENT_STRUCT( p_item, cl_map_item_t, pool_item ),
\r
883 iou_node_t, map_item );
\r
885 p_iou->ca_guid = ca_guid;
\r
886 p_iou->guid = p_node_rec->node_info.node_guid;
\r
887 p_iou->chassis_guid = p_node_rec->node_info.sys_guid;
\r
888 p_iou->vend_id = ib_node_info_get_vendor_id( &p_node_rec->node_info );
\r
889 p_iou->dev_id = p_node_rec->node_info.device_id;
\r
890 p_iou->revision = p_node_rec->node_info.revision;
\r
892 cl_memclr( &p_iou->info, sizeof(ib_iou_info_t) );
\r
894 cl_memcpy( p_iou->desc, p_node_rec->node_desc.description,
\r
895 IB_NODE_DESCRIPTION_SIZE );
\r
897 /* The terminating NULL should never get overwritten. */
\r
898 CL_ASSERT( p_iou->desc[IB_NODE_DESCRIPTION_SIZE] == '\0' );
\r
906 IN ioc_pnp_mgr_t* const p_ioc_mgr,
\r
907 IN iou_node_t* const p_iou )
\r
909 __put_path_map( p_ioc_mgr, &p_iou->path_map );
\r
910 __put_ioc_map( p_ioc_mgr, &p_iou->ioc_map );
\r
912 cl_spinlock_acquire( &p_ioc_mgr->iou_pool_lock );
\r
913 cl_qpool_put( &p_ioc_mgr->iou_pool, &p_iou->map_item.pool_item );
\r
914 cl_spinlock_release( &p_ioc_mgr->iou_pool_lock );
\r
920 IN ioc_pnp_mgr_t* const p_ioc_mgr,
\r
921 IN cl_fmap_t* const p_iou_map )
\r
924 cl_fmap_item_t *p_item;
\r
927 cl_qlist_init( &list );
\r
929 p_item = cl_fmap_head( p_iou_map );
\r
930 while( p_item != cl_fmap_end( p_iou_map ) )
\r
932 cl_fmap_remove_item( p_iou_map, p_item );
\r
934 p_iou = PARENT_STRUCT(
\r
935 PARENT_STRUCT( p_item, cl_map_item_t, pool_item ),
\r
936 iou_node_t, map_item );
\r
938 __put_path_map( p_ioc_mgr, &p_iou->path_map );
\r
939 __put_ioc_map( p_ioc_mgr, &p_iou->ioc_map );
\r
940 cl_qlist_insert_head( &list, &p_item->pool_item.list_item );
\r
941 p_item = cl_fmap_head( p_iou_map );
\r
943 cl_spinlock_acquire( &p_ioc_mgr->iou_pool_lock );
\r
944 cl_qpool_put_list( &p_ioc_mgr->iou_pool, &list );
\r
945 cl_spinlock_release( &p_ioc_mgr->iou_pool_lock );
\r
951 IN ioc_pnp_mgr_t* const p_ioc_mgr,
\r
952 IN const net64_t ca_guid,
\r
953 IN const net64_t port_guid,
\r
954 IN const ib_path_rec_t* const p_path_rec )
\r
956 cl_pool_item_t *p_item;
\r
957 iou_path_t *p_path;
\r
959 cl_spinlock_acquire( &p_ioc_mgr->path_pool_lock );
\r
960 p_item = cl_qpool_get( &p_ioc_mgr->path_pool );
\r
961 cl_spinlock_release( &p_ioc_mgr->path_pool_lock );
\r
965 p_path = PARENT_STRUCT( PARENT_STRUCT( p_item, cl_fmap_item_t, pool_item ),
\r
966 iou_path_t, map_item );
\r
969 * Store the local CA and port GUID for this path to let recipients
\r
970 * of a PATH_ADD event avoid a CA lookup based on GID.
\r
972 p_path->ca_guid = ca_guid;
\r
973 p_path->port_guid = port_guid;
\r
975 p_path->rec = *p_path_rec;
\r
976 /* Clear the num_path field since it is just "undefined". */
\r
977 p_path->rec.num_path = 0;
\r
979 * Clear reserved fields in case they were set to prevent undue path
\r
982 p_path->rec.resv1 = 0;
\r
983 p_path->rec.resv2 = 0;
\r
991 IN ioc_pnp_mgr_t* const p_ioc_mgr,
\r
992 IN iou_path_t* const p_path )
\r
994 cl_spinlock_acquire( &p_ioc_mgr->path_pool_lock );
\r
995 cl_qpool_put( &p_ioc_mgr->path_pool, &p_path->map_item.pool_item );
\r
996 cl_spinlock_release( &p_ioc_mgr->path_pool_lock );
\r
1002 IN ioc_pnp_mgr_t* const p_ioc_mgr,
\r
1003 IN cl_fmap_t* const p_path_map )
\r
1006 cl_fmap_item_t *p_item;
\r
1007 iou_path_t *p_path;
\r
1009 cl_qlist_init( &list );
\r
1011 p_item = cl_fmap_head( p_path_map );
\r
1012 while( p_item != cl_fmap_end( p_path_map ) )
\r
1014 cl_fmap_remove_item( p_path_map, p_item );
\r
1016 p_path = PARENT_STRUCT( PARENT_STRUCT( p_item, cl_fmap_item_t, pool_item ),
\r
1017 iou_path_t, map_item );
\r
1019 cl_qlist_insert_head( &list, &p_item->pool_item.list_item );
\r
1020 p_item = cl_fmap_head( p_path_map );
\r
1022 cl_spinlock_acquire( &p_ioc_mgr->path_pool_lock );
\r
1023 cl_qpool_put_list( &p_ioc_mgr->path_pool, &list );
\r
1024 cl_spinlock_release( &p_ioc_mgr->path_pool_lock );
\r
1030 IN ioc_pnp_mgr_t* const p_ioc_mgr,
\r
1031 IN const uint32_t ioc_slot,
\r
1032 IN const ib_ioc_profile_t* const p_profile )
\r
1034 cl_pool_item_t *p_item;
\r
1036 ib_svc_entry_t *p_svc_entries;
\r
1038 if( !p_profile->num_svc_entries )
\r
1042 cl_zalloc( sizeof(ib_svc_entry_t) * p_profile->num_svc_entries );
\r
1043 if( !p_svc_entries )
\r
1046 cl_spinlock_acquire( &p_ioc_mgr->ioc_pool_lock );
\r
1047 p_item = cl_qpool_get( &p_ioc_mgr->ioc_pool );
\r
1048 cl_spinlock_release( &p_ioc_mgr->ioc_pool_lock );
\r
1051 cl_free( p_svc_entries );
\r
1055 p_ioc = PARENT_STRUCT( PARENT_STRUCT( p_item, cl_map_item_t, pool_item ),
\r
1056 iou_ioc_t, map_item );
\r
1058 CL_ASSERT( !p_ioc->ref_cnt );
\r
1060 CL_ASSERT( !(ioc_slot >> 8) );
\r
1061 p_ioc->slot = (uint8_t)ioc_slot;
\r
1062 p_ioc->profile = *p_profile;
\r
1063 p_ioc->num_valid_entries = 0;
\r
1064 p_ioc->p_svc_entries = p_svc_entries;
\r
1065 cl_atomic_inc( &p_ioc->ref_cnt );
\r
1072 IN ioc_pnp_mgr_t* const p_ioc_mgr,
\r
1073 IN iou_ioc_t* const p_ioc )
\r
1075 if( cl_atomic_dec( &p_ioc->ref_cnt ) == 0 )
\r
1077 cl_free( p_ioc->p_svc_entries );
\r
1079 cl_spinlock_acquire( &p_ioc_mgr->ioc_pool_lock );
\r
1080 cl_qpool_put( &p_ioc_mgr->ioc_pool, &p_ioc->map_item.pool_item );
\r
1081 cl_spinlock_release( &p_ioc_mgr->ioc_pool_lock );
\r
1088 IN ioc_pnp_mgr_t* const p_ioc_mgr,
\r
1089 IN cl_qmap_t* const p_ioc_map )
\r
1092 cl_map_item_t *p_item;
\r
1095 cl_qlist_init( &list );
\r
1097 p_item = cl_qmap_head( p_ioc_map );
\r
1098 while( p_item != cl_qmap_end( p_ioc_map ) )
\r
1100 cl_qmap_remove_item( p_ioc_map, p_item );
\r
1102 p_ioc = PARENT_STRUCT(
\r
1103 PARENT_STRUCT( p_item, cl_map_item_t, pool_item ),
\r
1104 iou_ioc_t, map_item );
\r
1106 if( cl_atomic_dec( &p_ioc->ref_cnt ) == 0 )
\r
1108 cl_free( p_ioc->p_svc_entries );
\r
1109 cl_qlist_insert_head( &list, &p_item->pool_item.list_item );
\r
1111 p_item = cl_qmap_head( p_ioc_map );
\r
1113 cl_spinlock_acquire( &p_ioc_mgr->ioc_pool_lock );
\r
1114 cl_qpool_put_list( &p_ioc_mgr->ioc_pool, &list );
\r
1115 cl_spinlock_release( &p_ioc_mgr->ioc_pool_lock );
\r
1120 * Compares two IOUs for inserts/lookups in a flexi map. Keys are the
\r
1121 * address of the ca_guid, which is adjacent to the node GUID of the IOU.
\r
1122 * This allows for a single call to cl_memcmp.
\r
1126 IN const void* const p_key1,
\r
1127 IN const void* const p_key2 )
\r
1129 return cl_memcmp( p_key1, p_key2, sizeof(uint64_t) * 2 );
\r
1134 * Compares two paths for inserts/lookups in a flexi map.
\r
1138 IN const void* const p_key1,
\r
1139 IN const void* const p_key2 )
\r
1141 return cl_memcmp( p_key1, p_key2, sizeof(ib_path_rec_t) );
\r
1146 * Removes all paths and orphaned IOC/IOUs upon a port DOWN event.
\r
1149 __process_port_down(
\r
1150 IN const net64_t port_guid )
\r
1152 cl_fmap_item_t *p_path_item;
\r
1153 cl_fmap_item_t *p_iou_item;
\r
1154 iou_node_t *p_iou;
\r
1155 iou_path_t *p_path;
\r
1156 cl_fmap_t old_paths;
\r
1157 cl_fmap_t old_ious;
\r
1159 AL_ENTER( AL_DBG_PNP );
\r
1161 cl_fmap_init( &old_paths, __path_cmp );
\r
1162 cl_fmap_init( &old_ious, __iou_cmp );
\r
1164 p_iou_item = cl_fmap_head( &gp_ioc_pnp->iou_map );
\r
1165 while( p_iou_item != cl_fmap_end( &gp_ioc_pnp->iou_map ) )
\r
1167 p_iou = PARENT_STRUCT( p_iou_item, iou_node_t, map_item );
\r
1169 * Note that it is safe to move to the next item even if we remove
\r
1170 * the IOU from the map since the map effectively maintains an ordered
\r
1171 * list of its contents.
\r
1173 p_iou_item = cl_fmap_next( p_iou_item );
\r
1175 p_path_item = cl_fmap_head( &p_iou->path_map );
\r
1176 while( p_path_item != cl_fmap_end( &p_iou->path_map ) )
\r
1178 p_path = PARENT_STRUCT( p_path_item, iou_path_t, map_item );
\r
1179 p_path_item = cl_fmap_next( p_path_item );
\r
1180 if( p_path->rec.sgid.unicast.interface_id == port_guid )
\r
1182 cl_fmap_remove_item( &p_iou->path_map, &p_path->map_item );
\r
1183 cl_fmap_insert( &old_paths, &p_path->rec, &p_path->map_item );
\r
1187 if( !cl_fmap_count( &p_iou->path_map ) )
\r
1189 /* Move the paths back to the IOU so that they get freed. */
\r
1190 cl_fmap_merge( &p_iou->path_map, &old_paths );
\r
1191 cl_fmap_remove_item( &gp_ioc_pnp->iou_map, &p_iou->map_item );
\r
1192 cl_fmap_insert( &old_ious, &p_iou->ca_guid, &p_iou->map_item );
\r
1196 /* Report the removed paths. */
\r
1197 __remove_paths( &p_iou->ioc_map, &old_paths );
\r
1201 /* Report any removed IOUs. */
\r
1202 __remove_ious( &old_ious );
\r
1204 AL_EXIT( AL_DBG_PNP );
\r
1209 * PnP callback for port event notifications.
\r
1211 static ib_api_status_t
\r
1213 IN ib_pnp_rec_t *p_pnp_rec )
\r
1215 ib_api_status_t status = IB_SUCCESS;
\r
1216 cl_status_t cl_status;
\r
1218 AL_ENTER( AL_DBG_PNP );
\r
1220 AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_PNP,
\r
1221 ("p_pnp_rec->pnp_event = 0x%x (%s)\n",
\r
1222 p_pnp_rec->pnp_event, ib_get_pnp_event_str( p_pnp_rec->pnp_event )) );
\r
1224 switch( p_pnp_rec->pnp_event )
\r
1226 case IB_PNP_PORT_ADD:
\r
1227 /* Create the port service. */
\r
1228 CL_ASSERT( !p_pnp_rec->context );
\r
1229 status = __create_ioc_pnp_svc( p_pnp_rec );
\r
1232 case IB_PNP_SM_CHANGE:
\r
1233 case IB_PNP_PORT_ACTIVE:
\r
1234 /* Initiate a sweep - delay a bit to allow the ports to come up. */
\r
1235 if( g_ioc_poll_interval && !gp_ioc_pnp->query_cnt)
\r
1237 cl_status = cl_timer_start( &gp_ioc_pnp->sweep_timer, 250 );
\r
1238 CL_ASSERT( cl_status == CL_SUCCESS );
\r
1242 case IB_PNP_PORT_DOWN:
\r
1243 case IB_PNP_PORT_INIT:
\r
1244 case IB_PNP_PORT_ARMED:
\r
1245 CL_ASSERT( p_pnp_rec->context );
\r
1248 * Report IOC and IOU remove events for any IOU/IOCs that only have
\r
1249 * paths through this port. Note, no need to synchronize with a
\r
1250 * sweep since synchronization is provided by the PnP thread.
\r
1252 __process_port_down( p_pnp_rec->guid );
\r
1255 case IB_PNP_PORT_REMOVE:
\r
1256 /* Destroy the port service. */
\r
1257 ref_al_obj( &((ioc_pnp_svc_t*)p_pnp_rec->context)->obj );
\r
1258 ((ioc_pnp_svc_t*)p_pnp_rec->context)->obj.pfn_destroy(
\r
1259 &((ioc_pnp_svc_t*)p_pnp_rec->context)->obj, NULL );
\r
1260 p_pnp_rec->context = NULL;
\r
1263 break; /* Ignore other PNP events. */
\r
1266 AL_EXIT( AL_DBG_PNP );
\r
1271 static ib_api_status_t
\r
1272 __init_ioc_pnp_svc(
\r
1273 IN ioc_pnp_svc_t* const p_ioc_pnp_svc,
\r
1274 IN const ib_pnp_rec_t* const p_pnp_rec )
\r
1276 ib_api_status_t status;
\r
1277 ib_ca_handle_t h_ca;
\r
1278 ib_qp_create_t qp_create;
\r
1279 ib_mad_svc_t mad_svc;
\r
1280 ib_pnp_port_rec_t *p_pnp_port_rec;
\r
1282 AL_ENTER( AL_DBG_PNP );
\r
1284 p_pnp_port_rec = PARENT_STRUCT( p_pnp_rec, ib_pnp_port_rec_t, pnp_rec );
\r
1286 /* Store the CA and port GUID so we can issue SA queries. */
\r
1287 p_ioc_pnp_svc->ca_guid = p_pnp_port_rec->p_ca_attr->ca_guid;
\r
1288 p_ioc_pnp_svc->port_guid = p_pnp_rec->guid;
\r
1290 /* Acquire the correct CI CA for this port. */
\r
1291 h_ca = acquire_ca( p_pnp_port_rec->p_ca_attr->ca_guid );
\r
1294 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("acquire_ca failed.\n") );
\r
1295 return IB_INVALID_GUID;
\r
1297 p_ioc_pnp_svc->obj.p_ci_ca = h_ca->obj.p_ci_ca;
\r
1299 /* Create the MAD QP. */
\r
1300 cl_memclr( &qp_create, sizeof( ib_qp_create_t ) );
\r
1301 qp_create.qp_type = IB_QPT_QP1_ALIAS;
\r
1302 qp_create.sq_depth = p_pnp_port_rec->p_ca_attr->max_wrs;
\r
1303 qp_create.sq_sge = 1;
\r
1304 qp_create.sq_signaled = TRUE;
\r
1306 * We use the IOC PnP service's al_obj_t as the context to allow using
\r
1307 * deref_al_obj as the destroy callback.
\r
1309 status = ib_get_spl_qp( h_ca->obj.p_ci_ca->h_pd_alias,
\r
1310 p_pnp_port_rec->p_port_attr->port_guid, &qp_create,
\r
1311 &p_ioc_pnp_svc->obj, NULL, &p_ioc_pnp_svc->pool_key,
\r
1312 &p_ioc_pnp_svc->h_qp );
\r
1315 * Release the CI CA once we've allocated the QP. The CI CA will not
\r
1316 * go away while we hold the QP.
\r
1318 deref_al_obj( &h_ca->obj );
\r
1320 /* Check for failure allocating the QP. */
\r
1321 if( status != IB_SUCCESS )
\r
1323 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
1324 ("ib_get_spl_qp failed with status %s\n",
\r
1325 ib_get_err_str( status )) );
\r
1328 /* Reference the port object on behalf of the QP. */
\r
1329 ref_al_obj( &p_ioc_pnp_svc->obj );
\r
1331 /* Create the MAD service. */
\r
1332 cl_memclr( &mad_svc, sizeof(ib_mad_svc_t) );
\r
1333 mad_svc.mad_svc_context = p_ioc_pnp_svc;
\r
1334 mad_svc.pfn_mad_recv_cb = __ioc_pnp_recv_cb;
\r
1335 mad_svc.pfn_mad_send_cb = __ioc_pnp_send_cb;
\r
1337 ib_reg_mad_svc( p_ioc_pnp_svc->h_qp, &mad_svc,
\r
1338 &p_ioc_pnp_svc->h_mad_svc );
\r
1339 if( status != IB_SUCCESS )
\r
1341 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
1342 ("ib_reg_mad_svc failed with status %s\n",
\r
1343 ib_get_err_str( status )) );
\r
1347 AL_EXIT( AL_DBG_PNP );
\r
1348 return IB_SUCCESS;
\r
1353 * Create a port agent for a given port.
\r
1355 static ib_api_status_t
\r
1356 __create_ioc_pnp_svc(
\r
1357 IN ib_pnp_rec_t *p_pnp_rec )
\r
1359 ioc_pnp_svc_t *p_ioc_pnp_svc;
\r
1360 ib_api_status_t status;
\r
1362 AL_ENTER( AL_DBG_PNP );
\r
1364 /* calculate size of port_cm struct */
\r
1365 p_ioc_pnp_svc = (ioc_pnp_svc_t*)cl_zalloc( sizeof(ioc_pnp_svc_t) );
\r
1366 if( !p_ioc_pnp_svc )
\r
1368 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
1369 ("Failed to cl_zalloc port CM agent.\n") );
\r
1370 return IB_INSUFFICIENT_MEMORY;
\r
1373 construct_al_obj( &p_ioc_pnp_svc->obj, AL_OBJ_TYPE_IOC_PNP_SVC );
\r
1375 status = init_al_obj( &p_ioc_pnp_svc->obj, p_ioc_pnp_svc, TRUE,
\r
1376 __destroying_ioc_pnp_svc, NULL, __free_ioc_pnp_svc );
\r
1377 if( status != IB_SUCCESS )
\r
1379 __free_ioc_pnp_svc( &p_ioc_pnp_svc->obj );
\r
1380 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
1381 ("init_al_obj failed with status %s.\n",
\r
1382 ib_get_err_str( status )) );
\r
1386 /* Attach to the global CM object. */
\r
1387 status = attach_al_obj( &gp_ioc_pnp->obj, &p_ioc_pnp_svc->obj );
\r
1388 if( status != IB_SUCCESS )
\r
1390 p_ioc_pnp_svc->obj.pfn_destroy( &p_ioc_pnp_svc->obj, NULL );
\r
1391 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
1392 ("attach_al_obj returned %s.\n", ib_get_err_str(status)) );
\r
1396 status = __init_ioc_pnp_svc( p_ioc_pnp_svc, p_pnp_rec );
\r
1397 if( status != IB_SUCCESS )
\r
1399 p_ioc_pnp_svc->obj.pfn_destroy( &p_ioc_pnp_svc->obj, NULL );
\r
1400 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
1401 ("__init_data_svc failed with status %s.\n",
\r
1402 ib_get_err_str( status )) );
\r
1406 /* Set the PnP context to reference this service. */
\r
1407 p_pnp_rec->context = p_ioc_pnp_svc;
\r
1409 /* Release the reference taken in init_al_obj. */
\r
1410 deref_al_obj( &p_ioc_pnp_svc->obj );
\r
1412 AL_EXIT( AL_DBG_PNP );
\r
1418 __destroying_ioc_pnp_svc(
\r
1419 IN al_obj_t *p_obj )
\r
1421 ib_api_status_t status;
\r
1422 ioc_pnp_svc_t *p_svc;
\r
1424 CL_ASSERT( p_obj );
\r
1425 p_svc = PARENT_STRUCT( p_obj, ioc_pnp_svc_t, obj );
\r
1427 if( p_svc->h_node_query )
\r
1428 ib_cancel_query( gh_al, p_svc->h_node_query );
\r
1430 if( p_svc->h_path_query )
\r
1431 ib_cancel_query( gh_al, p_svc->h_path_query );
\r
1433 /* Destroy the QP. */
\r
1437 ib_destroy_qp( p_svc->h_qp, (ib_pfn_destroy_cb_t)deref_al_obj );
\r
1438 CL_ASSERT( status == IB_SUCCESS );
\r
1444 __free_ioc_pnp_svc(
\r
1445 IN al_obj_t *p_obj )
\r
1447 ioc_pnp_svc_t* p_svc;
\r
1449 CL_ASSERT( p_obj );
\r
1450 p_svc = PARENT_STRUCT( p_obj, ioc_pnp_svc_t, obj );
\r
1452 CL_ASSERT( !p_svc->query_cnt );
\r
1454 destroy_al_obj( p_obj );
\r
1460 __ioc_pnp_timer_cb(
\r
1461 IN void *context )
\r
1463 ib_api_status_t status;
\r
1464 ioc_pnp_mgr_t *p_mgr;
\r
1465 cl_list_item_t *p_item;
\r
1466 ioc_pnp_svc_t *p_svc;
\r
1468 AL_ENTER( AL_DBG_PNP );
\r
1470 p_mgr = (ioc_pnp_mgr_t*)context;
\r
1472 cl_spinlock_acquire( &p_mgr->obj.lock );
\r
1473 if( p_mgr->obj.state == CL_DESTROYING )
\r
1475 AL_PRINT_EXIT( TRACE_LEVEL_INFORMATION, AL_DBG_PNP,
\r
1476 ("Destroying - not resetting timer.\n") );
\r
1477 cl_spinlock_release( &p_mgr->obj.lock );
\r
1481 CL_ASSERT( !cl_fmap_count( &p_mgr->sweep_map ) );
\r
1483 /* Pre-charge the ref count so that we don't toggle between 0 and 1. */
\r
1484 cl_atomic_inc( &p_mgr->query_cnt );
\r
1485 /* Take a reference on the object for the duration of the sweep process. */
\r
1486 ref_al_obj( &p_mgr->obj );
\r
1487 for( p_item = cl_qlist_head( &p_mgr->obj.obj_list );
\r
1488 p_item != cl_qlist_end( &p_mgr->obj.obj_list );
\r
1489 p_item = cl_qlist_next( p_item ) )
\r
1491 p_svc = PARENT_STRUCT( PARENT_STRUCT( p_item, al_obj_t, pool_item ),
\r
1492 ioc_pnp_svc_t, obj );
\r
1493 cl_atomic_inc( &p_mgr->query_cnt );
\r
1494 status = __ioc_query_sa( p_svc );
\r
1495 if( status != IB_SUCCESS )
\r
1496 cl_atomic_dec( &p_mgr->query_cnt );
\r
1498 /* Release the reference we took and see if we're done sweeping. */
\r
1499 if( !cl_atomic_dec( &p_mgr->query_cnt ) )
\r
1500 cl_async_proc_queue( gp_async_pnp_mgr, &p_mgr->async_item );
\r
1502 cl_spinlock_release( &p_mgr->obj.lock );
\r
1504 AL_EXIT( AL_DBG_PNP );
\r
1508 static ib_api_status_t
\r
1510 IN ioc_pnp_svc_t* const p_svc )
\r
1512 ib_api_status_t status = IB_NOT_DONE;
\r
1513 ib_query_req_t query;
\r
1514 ib_user_query_t info;
\r
1515 union _ioc_pnp_timer_cb_u
\r
1517 ib_node_record_t node_rec;
\r
1518 ib_path_rec_t path_rec;
\r
1522 AL_ENTER( AL_DBG_PNP );
\r
1524 if( p_svc->h_node_query )
\r
1525 return IB_NOT_DONE;
\r
1526 if( p_svc->h_path_query )
\r
1527 return IB_NOT_DONE;
\r
1529 if( p_svc->obj.state == CL_DESTROYING )
\r
1531 AL_PRINT_EXIT( TRACE_LEVEL_INFORMATION, AL_DBG_PNP,
\r
1532 ("Destroying - not resetting timer.\n") );
\r
1533 return IB_NOT_DONE;
\r
1536 info.method = IB_MAD_METHOD_GETTABLE;
\r
1537 info.attr_id = IB_MAD_ATTR_NODE_RECORD;
\r
1538 info.attr_size = sizeof(ib_node_record_t);
\r
1539 info.comp_mask = IB_NR_COMPMASK_NODETYPE;
\r
1540 info.p_attr = &u.node_rec;
\r
1542 cl_memclr( &u.node_rec, sizeof(ib_node_record_t) );
\r
1543 u.node_rec.node_info.node_type = IB_NODE_TYPE_CA;
\r
1545 cl_memclr( &query, sizeof(ib_query_req_t) );
\r
1546 query.query_type = IB_QUERY_USER_DEFINED;
\r
1547 query.p_query_input = &info;
\r
1548 query.port_guid = p_svc->port_guid;
\r
1549 query.timeout_ms = g_ioc_query_timeout;
\r
1550 query.retry_cnt = g_ioc_query_retries;
\r
1551 query.query_context = p_svc;
\r
1552 query.pfn_query_cb = __node_rec_cb;
\r
1554 /* Reference the service for the node record query. */
\r
1555 ref_al_obj( &p_svc->obj );
\r
1556 cl_atomic_inc( &p_svc->query_cnt );
\r
1558 status = ib_query( gh_al, &query, &p_svc->h_node_query );
\r
1559 if( status != IB_SUCCESS )
\r
1561 cl_atomic_dec( &p_svc->query_cnt );
\r
1562 deref_al_obj( &p_svc->obj );
\r
1563 AL_PRINT_EXIT( TRACE_LEVEL_WARNING, AL_DBG_PNP,
\r
1564 ("ib_query returned %s\n", ib_get_err_str( status )) );
\r
1568 /* Setup the path query. */
\r
1569 info.method = IB_MAD_METHOD_GETTABLE;
\r
1570 info.attr_id = IB_MAD_ATTR_PATH_RECORD;
\r
1571 info.attr_size = sizeof(ib_path_rec_t);
\r
1572 info.comp_mask = IB_PR_COMPMASK_SGID | IB_PR_COMPMASK_NUMBPATH |
\r
1573 IB_PR_COMPMASK_PKEY;
\r
1574 info.p_attr = &u.path_rec;
\r
1576 cl_memclr( &u.path_rec, sizeof(ib_path_rec_t) );
\r
1577 ib_gid_set_default( &u.path_rec.sgid, p_svc->port_guid );
\r
1578 /* Request all the paths available, setting the reversible bit. */
\r
1579 u.path_rec.num_path = 0xFF;
\r
1580 /* Request only paths from the default partition */
\r
1581 u.path_rec.pkey = cl_hton16(IB_DEFAULT_PKEY);
\r
1583 query.pfn_query_cb = __path_rec_cb;
\r
1585 /* Reference the service for the node record query. */
\r
1586 ref_al_obj( &p_svc->obj );
\r
1587 cl_atomic_inc( &p_svc->query_cnt );
\r
1589 status = ib_query( gh_al, &query, &p_svc->h_path_query );
\r
1590 if( status != IB_SUCCESS )
\r
1592 cl_atomic_dec( &p_svc->query_cnt );
\r
1593 deref_al_obj( &p_svc->obj );
\r
1594 AL_PRINT_EXIT( TRACE_LEVEL_WARNING, AL_DBG_PNP,
\r
1595 ("ib_query returned %s\n", ib_get_err_str( status )) );
\r
1598 AL_EXIT( AL_DBG_PNP );
\r
1599 return IB_SUCCESS;
\r
1605 IN ib_query_rec_t *p_query_rec )
\r
1607 ioc_pnp_svc_t *p_svc;
\r
1609 AL_ENTER( AL_DBG_PNP );
\r
1611 p_svc = (ioc_pnp_svc_t*)p_query_rec->query_context;
\r
1613 if( p_svc->obj.state != CL_DESTROYING &&
\r
1614 p_query_rec->status == IB_SUCCESS && p_query_rec->result_cnt )
\r
1616 CL_ASSERT( p_query_rec->p_result_mad );
\r
1617 CL_ASSERT( !p_svc->p_node_element );
\r
1618 CL_ASSERT( p_query_rec->p_result_mad->p_next == NULL );
\r
1619 p_svc->p_node_element = p_query_rec->p_result_mad;
\r
1620 p_svc->num_nodes = p_query_rec->result_cnt;
\r
1622 else if( p_query_rec->p_result_mad )
\r
1624 ib_put_mad( p_query_rec->p_result_mad );
\r
1627 p_svc->h_node_query = NULL;
\r
1628 if( !cl_atomic_dec( &p_svc->query_cnt ) )
\r
1630 /* The path query has already completed. Process the results. */
\r
1631 __process_query( p_svc );
\r
1634 /* Release the reference taken for the query. */
\r
1635 deref_al_obj( &p_svc->obj );
\r
1637 AL_EXIT( AL_DBG_PNP );
\r
1643 IN ib_query_rec_t *p_query_rec )
\r
1645 ioc_pnp_svc_t *p_svc;
\r
1647 AL_ENTER( AL_DBG_PNP );
\r
1649 p_svc = (ioc_pnp_svc_t*)p_query_rec->query_context;
\r
1651 if( p_svc->obj.state != CL_DESTROYING &&
\r
1652 p_query_rec->status == IB_SUCCESS && p_query_rec->result_cnt )
\r
1654 CL_ASSERT( p_query_rec->p_result_mad );
\r
1655 CL_ASSERT( !p_svc->p_path_element );
\r
1656 CL_ASSERT( p_query_rec->p_result_mad->p_next == NULL );
\r
1657 p_svc->p_path_element = p_query_rec->p_result_mad;
\r
1658 p_svc->num_paths = p_query_rec->result_cnt;
\r
1660 else if( p_query_rec->p_result_mad )
\r
1662 ib_put_mad( p_query_rec->p_result_mad );
\r
1665 p_svc->h_path_query = NULL;
\r
1666 if( !cl_atomic_dec( &p_svc->query_cnt ) )
\r
1668 /* The node query has already completed. Process the results. */
\r
1669 __process_query( p_svc );
\r
1672 /* Release the reference taken for the query. */
\r
1673 deref_al_obj( &p_svc->obj );
\r
1675 AL_EXIT( AL_DBG_PNP );
\r
1680 IN ioc_pnp_svc_t* const p_svc )
\r
1682 ib_api_status_t status;
\r
1683 ioc_sweep_results_t *p_results;
\r
1684 cl_qmap_t port_map;
\r
1686 AL_ENTER( AL_DBG_PNP );
\r
1688 cl_qmap_init( &port_map );
\r
1690 if( !p_svc->p_node_element || !p_svc->p_path_element )
\r
1692 /* One of the queries failed. Release the MADs and reset the timer. */
\r
1693 if( p_svc->p_node_element )
\r
1695 ib_put_mad( p_svc->p_node_element );
\r
1696 p_svc->p_node_element = NULL;
\r
1699 if( p_svc->p_path_element )
\r
1701 ib_put_mad( p_svc->p_path_element );
\r
1702 p_svc->p_path_element = NULL;
\r
1705 /* Decrement the IOC PnP manager's query count. */
\r
1706 if( !cl_atomic_dec( &gp_ioc_pnp->query_cnt ) )
\r
1707 cl_async_proc_queue( gp_async_pnp_mgr, &gp_ioc_pnp->async_item );
\r
1708 AL_EXIT( AL_DBG_PNP );
\r
1713 * Allocate the sweep results structure to allow processing
\r
1716 p_results = cl_zalloc( sizeof(ioc_sweep_results_t) );
\r
1719 p_results->async_item.pfn_callback = __process_sweep;
\r
1720 p_results->p_svc = p_svc;
\r
1721 cl_fmap_init( &p_results->iou_map, __iou_cmp );
\r
1723 /* Build the map of nodes by port GUID. */
\r
1724 __process_nodes( p_svc, &port_map );
\r
1726 /* Build the map of paths for each node. */
\r
1727 __process_paths( p_svc, &port_map );
\r
1729 /* Collapse the map of nodes to be keyed by node GUID. */
\r
1730 __build_iou_map( &port_map, &p_results->iou_map );
\r
1732 /* Send the IOU Info queries to the nodes. */
\r
1733 status = __query_ious( p_results );
\r
1737 status = IB_INSUFFICIENT_MEMORY;
\r
1740 /* Release the query result MADs now that we're done with them. */
\r
1741 ib_put_mad( p_svc->p_node_element );
\r
1742 ib_put_mad( p_svc->p_path_element );
\r
1743 p_svc->p_node_element = NULL;
\r
1744 p_svc->p_path_element = NULL;
\r
1751 CL_ASSERT( p_results );
\r
1752 cl_free( p_results );
\r
1753 /* Fall through */
\r
1754 case IB_INSUFFICIENT_MEMORY:
\r
1755 /* Decrement the IOC PnP manager's query count. */
\r
1756 if( !cl_atomic_dec( &gp_ioc_pnp->query_cnt ) )
\r
1757 cl_async_proc_queue( gp_async_pnp_mgr, &gp_ioc_pnp->async_item );
\r
1759 AL_EXIT( AL_DBG_PNP );
\r
1765 IN ioc_pnp_svc_t* const p_svc,
\r
1766 IN cl_qmap_t* const p_port_map )
\r
1768 iou_node_t *p_iou;
\r
1769 ib_node_record_t *p_node_rec;
\r
1773 AL_ENTER( AL_DBG_PNP );
\r
1775 CL_ASSERT( p_svc );
\r
1776 CL_ASSERT( p_svc->p_node_element );
\r
1777 CL_ASSERT( p_port_map );
\r
1779 for( i = 0; i < p_svc->num_nodes; i++ )
\r
1781 p_node_rec = ib_get_query_node_rec( p_svc->p_node_element, i );
\r
1783 p_iou = __get_iou( gp_ioc_pnp, p_svc->ca_guid, p_node_rec );
\r
1788 * We insert by port GUID, not node GUID so that we can match
\r
1789 * to paths using DGID. Note that it is safe to cast between
\r
1790 * a flexi-map item and a map item since the pointer to the key
\r
1791 * in a flexi-map item is always a 64-bit pointer.
\r
1793 p_item = cl_qmap_insert(
\r
1794 p_port_map, p_node_rec->node_info.port_guid,
\r
1795 (cl_map_item_t*)&p_iou->map_item );
\r
1796 if( p_item != &p_iou->map_item )
\r
1798 /* Duplicate node - discard. */
\r
1799 __put_iou( gp_ioc_pnp, p_iou );
\r
1803 AL_EXIT( AL_DBG_PNP );
\r
1809 IN ioc_pnp_svc_t* const p_svc,
\r
1810 IN cl_qmap_t* const p_port_map )
\r
1812 iou_node_t *p_iou;
\r
1813 iou_path_t *p_path;
\r
1814 ib_path_rec_t *p_path_rec;
\r
1816 cl_map_item_t *p_iou_item;
\r
1817 cl_fmap_item_t *p_item;
\r
1819 AL_ENTER( AL_DBG_PNP );
\r
1821 CL_ASSERT( p_svc );
\r
1822 CL_ASSERT( p_svc->p_node_element );
\r
1823 CL_ASSERT( p_port_map );
\r
1825 for( i = 0; i < p_svc->num_paths; i++ )
\r
1827 p_path_rec = ib_get_query_path_rec( p_svc->p_path_element, i );
\r
1830 cl_qmap_get( p_port_map, p_path_rec->dgid.unicast.interface_id );
\r
1831 if( p_iou_item == cl_qmap_end( p_port_map ) )
\r
1834 p_iou = PARENT_STRUCT( p_iou_item, iou_node_t, map_item );
\r
1836 p_path = __get_path( gp_ioc_pnp, p_svc->ca_guid,
\r
1837 p_svc->port_guid, p_path_rec );
\r
1841 p_item = cl_fmap_insert( &p_iou->path_map, &p_path->rec,
\r
1842 &p_path->map_item );
\r
1843 if( p_item != &p_path->map_item )
\r
1845 /* Duplicate path - discard. */
\r
1846 __put_path( gp_ioc_pnp, p_path );
\r
1850 AL_EXIT( AL_DBG_PNP );
\r
1856 IN cl_qmap_t* const p_port_map,
\r
1857 IN OUT cl_fmap_t* const p_iou_map )
\r
1859 cl_fmap_t map1, map2;
\r
1861 iou_node_t *p_iou, *p_dup;
\r
1863 AL_ENTER( AL_DBG_PNP );
\r
1865 CL_ASSERT( !cl_fmap_count( p_iou_map ) );
\r
1867 cl_fmap_init( &map1, __path_cmp );
\r
1868 cl_fmap_init( &map2, __path_cmp );
\r
1871 * Now collapse the map so that IOUs aren't repeated.
\r
1872 * This is needed because the IOU map is keyed by port GUID, and thus
\r
1873 * a multi-port IOU could be listed twice.
\r
1875 /* Merge the port map into a map of IOUs. */
\r
1876 for( p_item = cl_qmap_head( p_port_map );
\r
1877 p_item != cl_qmap_end( p_port_map );
\r
1878 p_item = cl_qmap_head( p_port_map ) )
\r
1880 cl_qmap_remove_item( p_port_map, (cl_map_item_t*)p_item );
\r
1881 p_iou = PARENT_STRUCT( p_item, iou_node_t, map_item );
\r
1883 p_item = cl_fmap_insert( p_iou_map, &p_iou->ca_guid, p_item );
\r
1884 if( p_item != &p_iou->map_item )
\r
1886 /* Duplicate IOU information - merge the paths. */
\r
1887 p_dup = PARENT_STRUCT( p_item, iou_node_t, map_item );
\r
1888 CL_ASSERT( p_dup != p_iou );
\r
1889 cl_fmap_delta( &p_dup->path_map, &p_iou->path_map, &map1, &map2 );
\r
1891 * The path map in p_iou->path_map is duplicate paths.
\r
1892 * map1 contains paths unique to p_iou->path_map, map2 contains
\r
1893 * paths unique to p_dup->path_map. Add the unique paths back to
\r
1894 * p_dup->path_map since that IOU is already in the IOU map.
\r
1895 * Note that we are keeping the p_dup IOU node.
\r
1897 cl_fmap_merge( &p_dup->path_map, &map1 );
\r
1898 cl_fmap_merge( &p_dup->path_map, &map2 );
\r
1899 /* All unique items should have merged without duplicates. */
\r
1900 CL_ASSERT( !cl_fmap_count( &map1 ) );
\r
1901 CL_ASSERT( !cl_fmap_count( &map2 ) );
\r
1903 __put_iou( gp_ioc_pnp, p_iou );
\r
1907 AL_EXIT( AL_DBG_PNP );
\r
1913 IN const void* const context1,
\r
1914 IN const void* const context2,
\r
1915 IN const iou_path_t* const p_path,
\r
1916 IN const net16_t attr_id,
\r
1917 IN const net32_t attr_mod,
\r
1918 IN OUT ib_mad_element_t* const p_mad_element )
\r
1920 static uint64_t tid = 0;
\r
1922 AL_ENTER( AL_DBG_PNP );
\r
1925 * Context information so that we can continue processing when
\r
1926 * the query completes.
\r
1928 p_mad_element->context1 = context1;
\r
1929 p_mad_element->context2 = context2;
\r
1932 * Set the addressing bits necessary for the mad service to
\r
1933 * create the address vector
\r
1935 p_mad_element->h_av = NULL;
\r
1936 p_mad_element->remote_sl = ib_path_rec_sl( &p_path->rec );
\r
1937 p_mad_element->remote_lid = p_path->rec.dlid;
\r
1938 p_mad_element->grh_valid = FALSE;
\r
1939 p_mad_element->path_bits = p_path->rec.num_path;
\r
1941 /* Request response processing. */
\r
1942 p_mad_element->resp_expected = TRUE;
\r
1943 p_mad_element->retry_cnt = g_ioc_query_retries;
\r
1944 p_mad_element->timeout_ms = g_ioc_query_timeout;
\r
1946 /* Set the destination information for the send. */
\r
1947 p_mad_element->remote_qp = IB_QP1;
\r
1948 p_mad_element->remote_qkey = IB_QP1_WELL_KNOWN_Q_KEY;
\r
1950 /* Format the MAD payload. */
\r
1951 cl_memclr( p_mad_element->p_mad_buf, sizeof(ib_dm_mad_t) );
\r
1952 ib_mad_init_new( p_mad_element->p_mad_buf, IB_MCLASS_DEV_MGMT, 1,
\r
1953 IB_MAD_METHOD_GET, cl_ntoh64( tid++ ), attr_id, attr_mod );
\r
1955 AL_EXIT( AL_DBG_PNP );
\r
1959 static ib_api_status_t
\r
1961 IN ioc_sweep_results_t* const p_results )
\r
1963 ib_api_status_t status;
\r
1964 iou_node_t *p_iou;
\r
1965 iou_path_t *p_path;
\r
1966 cl_fmap_item_t *p_iou_item;
\r
1967 cl_fmap_item_t *p_path_item;
\r
1968 ib_mad_element_t *p_mad, *p_mad_list = NULL;
\r
1970 AL_ENTER( AL_DBG_PNP );
\r
1972 p_results->state = SWEEP_IOU_INFO;
\r
1974 /* Send a IOU Info query on the first path to every IOU. */
\r
1975 p_iou_item = cl_fmap_head( &p_results->iou_map );
\r
1976 while( p_iou_item != cl_fmap_end( &p_results->iou_map ) )
\r
1978 p_iou = PARENT_STRUCT( p_iou_item, iou_node_t, map_item );
\r
1979 p_iou_item = cl_fmap_next( p_iou_item );
\r
1980 if( !cl_fmap_count( &p_iou->path_map ) )
\r
1982 /* No paths for this node. Discard it. */
\r
1983 cl_fmap_remove_item( &p_results->iou_map, &p_iou->map_item );
\r
1984 __put_iou( gp_ioc_pnp, p_iou );
\r
1988 p_path_item = cl_fmap_head( &p_iou->path_map );
\r
1990 p_path = PARENT_STRUCT( p_path_item, iou_path_t, map_item );
\r
1992 status = ib_get_mad( p_results->p_svc->pool_key,
\r
1993 MAD_BLOCK_SIZE, &p_mad );
\r
1994 if( status != IB_SUCCESS )
\r
1996 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
1997 ("ib_get_mad for IOU Info query returned %s.\n",
\r
1998 ib_get_err_str( status )) );
\r
2002 p_iou->p_config_path = p_path;
\r
2003 __format_dm_get( p_results, p_iou, p_path,
\r
2004 IB_MAD_ATTR_IO_UNIT_INFO, 0, p_mad );
\r
2006 /* Link the elements together. */
\r
2007 p_mad->p_next = p_mad_list;
\r
2008 p_mad_list = p_mad;
\r
2010 cl_atomic_inc( &p_results->p_svc->query_cnt );
\r
2015 AL_EXIT( AL_DBG_PNP );
\r
2019 status = ib_send_mad( p_results->p_svc->h_mad_svc, p_mad_list, &p_mad );
\r
2020 if( status != IB_SUCCESS )
\r
2022 AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
2023 ("ib_send_mad returned %s\n", ib_get_err_str( status )) );
\r
2025 /* If some sends succeeded, change the status. */
\r
2026 if( p_mad_list != p_mad )
\r
2027 status = IB_SUCCESS;
\r
2031 p_mad_list = p_mad->p_next;
\r
2032 p_mad->p_next = NULL;
\r
2033 ib_put_mad( p_mad );
\r
2034 if( !cl_atomic_dec( &p_results->p_svc->query_cnt ) &&
\r
2035 status == IB_SUCCESS )
\r
2037 cl_async_proc_queue( gp_async_pnp_mgr,
\r
2038 &p_results->async_item );
\r
2040 p_mad = p_mad_list;
\r
2043 AL_EXIT( AL_DBG_PNP );
\r
2049 __ioc_pnp_recv_cb(
\r
2050 IN const ib_mad_svc_handle_t h_mad_svc,
\r
2051 IN void *mad_svc_context,
\r
2052 IN ib_mad_element_t *p_mad_response )
\r
2054 ioc_sweep_results_t *p_results;
\r
2055 iou_node_t *p_iou;
\r
2058 AL_ENTER( AL_DBG_PNP );
\r
2060 UNUSED_PARAM( h_mad_svc );
\r
2061 UNUSED_PARAM( mad_svc_context );
\r
2062 CL_ASSERT( !p_mad_response->p_next );
\r
2064 p_results = (ioc_sweep_results_t*)p_mad_response->send_context1;
\r
2065 if( !p_mad_response->p_mad_buf->status )
\r
2067 /* Query was successful */
\r
2068 switch( p_mad_response->p_mad_buf->attr_id )
\r
2070 case IB_MAD_ATTR_IO_UNIT_INFO:
\r
2071 p_iou = (iou_node_t*)p_mad_response->send_context2;
\r
2072 __iou_info_resp( p_iou,
\r
2073 (ib_dm_mad_t*)p_mad_response->p_mad_buf );
\r
2076 case IB_MAD_ATTR_IO_CONTROLLER_PROFILE:
\r
2077 p_iou = (iou_node_t*)p_mad_response->send_context2;
\r
2078 __ioc_profile_resp( p_iou,
\r
2079 (ib_dm_mad_t*)p_mad_response->p_mad_buf );
\r
2082 case IB_MAD_ATTR_SERVICE_ENTRIES:
\r
2083 p_ioc = (iou_ioc_t*)p_mad_response->send_context2;
\r
2084 __svc_entry_resp( p_ioc,
\r
2085 (ib_dm_mad_t*)p_mad_response->p_mad_buf );
\r
2093 ib_put_mad( p_mad_response );
\r
2094 AL_EXIT( AL_DBG_PNP );
\r
2100 IN OUT iou_node_t* const p_iou,
\r
2101 IN const ib_dm_mad_t* const p_mad )
\r
2103 AL_ENTER( AL_DBG_PNP );
\r
2104 /* Copy the IOU info for post-processing. */
\r
2105 p_iou->info = *((ib_iou_info_t*)p_mad->data);
\r
2106 AL_EXIT( AL_DBG_PNP );
\r
2111 __ioc_profile_resp(
\r
2112 IN OUT iou_node_t* const p_iou,
\r
2113 IN const ib_dm_mad_t* const p_mad )
\r
2116 cl_map_item_t *p_item;
\r
2118 AL_ENTER( AL_DBG_PNP );
\r
2119 p_ioc = __get_ioc( gp_ioc_pnp, cl_ntoh32(p_mad->hdr.attr_mod),
\r
2120 (ib_ioc_profile_t*)p_mad->data );
\r
2123 /* Need back link to process service entry failures. */
\r
2124 p_ioc->p_iou = p_iou;
\r
2125 cl_spinlock_acquire( &p_iou->lock );
\r
2126 p_item = cl_qmap_insert( &p_iou->ioc_map,
\r
2127 p_ioc->profile.ioc_guid, &p_ioc->map_item );
\r
2128 cl_spinlock_release( &p_iou->lock );
\r
2129 /* Return the IOC if it's a duplicate. */
\r
2130 if( p_item != &p_ioc->map_item )
\r
2131 __put_ioc( gp_ioc_pnp, p_ioc );
\r
2133 AL_EXIT( AL_DBG_PNP );
\r
2139 IN OUT iou_ioc_t* const p_ioc,
\r
2140 IN const ib_dm_mad_t* const p_mad )
\r
2144 ib_svc_entries_t *p_svc_entries;
\r
2146 AL_ENTER( AL_DBG_PNP );
\r
2148 ib_dm_get_slot_lo_hi( p_mad->hdr.attr_mod, NULL, &lo, &hi );
\r
2149 CL_ASSERT( (hi - lo) < SVC_ENTRY_COUNT );
\r
2150 p_svc_entries = (ib_svc_entries_t*)p_mad->data;
\r
2152 /* Copy the entries. */
\r
2153 for( idx = lo; idx <= hi; idx++ )
\r
2154 p_ioc->p_svc_entries[idx] = p_svc_entries->service_entry[idx - lo];
\r
2156 /* Update the number of entries received so far. */
\r
2157 p_ioc->num_valid_entries += (hi - lo) + 1;
\r
2158 cl_atomic_dec(&p_ioc->ref_cnt);
\r
2159 AL_EXIT( AL_DBG_PNP );
\r
2164 __ioc_pnp_send_cb(
\r
2165 IN const ib_mad_svc_handle_t h_mad_svc,
\r
2166 IN void *mad_svc_context,
\r
2167 IN ib_mad_element_t *p_request_mad )
\r
2169 ib_api_status_t status;
\r
2170 ioc_sweep_results_t *p_results;
\r
2171 iou_node_t *p_iou;
\r
2173 cl_fmap_item_t *p_item;
\r
2175 AL_ENTER( AL_DBG_PNP );
\r
2177 UNUSED_PARAM( h_mad_svc );
\r
2178 UNUSED_PARAM( mad_svc_context );
\r
2180 CL_ASSERT( p_request_mad->p_next == NULL );
\r
2182 p_results = (ioc_sweep_results_t*)p_request_mad->context1;
\r
2184 if( p_request_mad->status != IB_WCS_SUCCESS )
\r
2186 switch( p_request_mad->p_mad_buf->attr_id )
\r
2188 case IB_MAD_ATTR_IO_UNIT_INFO:
\r
2189 p_iou = (iou_node_t*)p_request_mad->context2;
\r
2190 if( p_request_mad->status == IB_WCS_TIMEOUT_RETRY_ERR )
\r
2192 /* Move to the next path for the node and try the query again. */
\r
2193 p_item = cl_fmap_next( &p_iou->p_config_path->map_item );
\r
2194 if( p_item != cl_fmap_end( &p_iou->path_map ) )
\r
2196 p_iou->p_config_path =
\r
2197 PARENT_STRUCT( p_item, iou_path_t, map_item );
\r
2198 __format_dm_get( p_results, p_iou, p_iou->p_config_path,
\r
2199 IB_MAD_ATTR_IO_UNIT_INFO, 0, p_request_mad );
\r
2201 status = ib_send_mad( p_results->p_svc->h_mad_svc,
\r
2202 p_request_mad, &p_request_mad );
\r
2203 if( status == IB_SUCCESS )
\r
2205 AL_EXIT( AL_DBG_PNP );
\r
2212 case IB_MAD_ATTR_SERVICE_ENTRIES:
\r
2213 p_ioc = (iou_ioc_t*)p_request_mad->context2;
\r
2214 cl_spinlock_acquire( &p_ioc->p_iou->lock );
\r
2215 cl_qmap_remove_item( &p_ioc->p_iou->ioc_map, &p_ioc->map_item );
\r
2216 cl_spinlock_release( &p_ioc->p_iou->lock );
\r
2217 __put_ioc( gp_ioc_pnp, p_ioc );
\r
2226 ib_put_mad( p_request_mad );
\r
2229 * If this is the last MAD, finish processing the IOU queries
\r
2230 * in the PnP thread.
\r
2232 if( !cl_atomic_dec( &p_results->p_svc->query_cnt ) )
\r
2233 cl_async_proc_queue( gp_async_pnp_mgr, &p_results->async_item );
\r
2235 AL_EXIT( AL_DBG_PNP );
\r
2241 IN OUT ioc_sweep_results_t *p_results )
\r
2243 cl_fmap_item_t *p_item;
\r
2244 cl_map_item_t *p_ioc_item;
\r
2245 iou_node_t *p_iou;
\r
2248 AL_ENTER( AL_DBG_PNP );
\r
2250 /* Walk the map of IOUs and discard any that didn't respond to IOU info. */
\r
2251 p_item = cl_fmap_head( &p_results->iou_map );
\r
2253 * No locking required since we're protected by the serialization of the
\r
2256 while( p_item != cl_fmap_end( &p_results->iou_map ) )
\r
2258 p_iou = PARENT_STRUCT( p_item, iou_node_t, map_item );
\r
2260 p_item = cl_fmap_next( p_item );
\r
2261 switch( p_results->state )
\r
2263 case SWEEP_IOU_INFO:
\r
2264 if( p_iou->info.max_controllers )
\r
2268 case SWEEP_SVC_ENTRIES:
\r
2269 CL_ASSERT( cl_qmap_count( &p_iou->ioc_map ) );
\r
2270 p_ioc_item = cl_qmap_head( &p_iou->ioc_map );
\r
2271 while( p_ioc_item != cl_qmap_end( &p_iou->ioc_map ) )
\r
2273 p_ioc = PARENT_STRUCT( p_ioc_item, iou_ioc_t, map_item );
\r
2274 p_ioc_item = cl_qmap_next( p_ioc_item );
\r
2276 if( !p_ioc->num_valid_entries ||
\r
2277 p_ioc->num_valid_entries != p_ioc->profile.num_svc_entries )
\r
2279 cl_qmap_remove_item( &p_iou->ioc_map, &p_ioc->map_item );
\r
2280 __put_ioc( gp_ioc_pnp, p_ioc );
\r
2283 /* Fall through. */
\r
2284 case SWEEP_IOC_PROFILE:
\r
2285 if( cl_qmap_count( &p_iou->ioc_map ) )
\r
2290 CL_ASSERT( p_results->state != SWEEP_COMPLETE );
\r
2294 cl_fmap_remove_item( &p_results->iou_map, &p_iou->map_item );
\r
2295 __put_iou( gp_ioc_pnp, p_iou );
\r
2298 AL_EXIT( AL_DBG_PNP );
\r
2303 IN cl_async_proc_item_t *p_async_item )
\r
2305 ib_api_status_t status;
\r
2306 ioc_sweep_results_t *p_results;
\r
2308 AL_ENTER( AL_DBG_PNP );
\r
2310 p_results = PARENT_STRUCT( p_async_item, ioc_sweep_results_t, async_item );
\r
2311 CL_ASSERT( !p_results->p_svc->query_cnt );
\r
2313 if( p_results->p_svc->obj.state == CL_DESTROYING )
\r
2315 __put_iou_map( gp_ioc_pnp, &p_results->iou_map );
\r
2319 /* Walk the map of IOUs and discard any that didn't respond to IOU info. */
\r
2320 __flush_duds( p_results );
\r
2321 switch( p_results->state )
\r
2323 case SWEEP_IOU_INFO:
\r
2324 /* Next step, query IOC profiles for all IOUs. */
\r
2325 p_results->state = SWEEP_IOC_PROFILE;
\r
2326 status = __query_ioc_profiles( p_results );
\r
2329 case SWEEP_IOC_PROFILE:
\r
2330 /* Next step: query service entries for all IOCs. */
\r
2331 p_results->state = SWEEP_SVC_ENTRIES;
\r
2332 status = __query_svc_entries( p_results );
\r
2335 case SWEEP_SVC_ENTRIES:
\r
2336 /* Filter results and report changes. */
\r
2337 p_results->state = SWEEP_COMPLETE;
\r
2338 __update_results( p_results );
\r
2339 status = IB_SUCCESS;
\r
2343 CL_ASSERT( p_results->state == SWEEP_IOU_INFO ||
\r
2344 p_results->state == SWEEP_IOC_PROFILE ||
\r
2345 p_results->state == SWEEP_SVC_ENTRIES );
\r
2346 status = IB_ERROR;
\r
2349 if( p_results->state == SWEEP_COMPLETE || status != IB_SUCCESS )
\r
2352 if( !cl_atomic_dec( &gp_ioc_pnp->query_cnt ) )
\r
2353 cl_async_proc_queue( gp_async_pnp_mgr, &gp_ioc_pnp->async_item );
\r
2354 cl_free( p_results );
\r
2357 AL_EXIT( AL_DBG_PNP );
\r
2361 static ib_api_status_t
\r
2362 __query_ioc_profiles(
\r
2363 IN ioc_sweep_results_t* const p_results )
\r
2365 ib_api_status_t status;
\r
2366 cl_fmap_item_t *p_item;
\r
2367 iou_node_t *p_iou;
\r
2369 ib_mad_element_t *p_mad, *p_mad_list = NULL;
\r
2371 AL_ENTER( AL_DBG_PNP );
\r
2373 p_item = cl_fmap_head( &p_results->iou_map );
\r
2374 while( p_item != cl_fmap_end( &p_results->iou_map ) )
\r
2376 p_iou = PARENT_STRUCT( p_item, iou_node_t, map_item );
\r
2377 CL_ASSERT( p_iou->info.max_controllers );
\r
2378 CL_ASSERT( cl_fmap_count( &p_iou->path_map ) );
\r
2379 CL_ASSERT( p_iou->p_config_path );
\r
2380 p_item = cl_fmap_next( p_item );
\r
2383 for( slot = 1; slot <= p_iou->info.max_controllers; slot++ )
\r
2385 if( ioc_at_slot( &p_iou->info, slot ) == IOC_INSTALLED )
\r
2387 status = ib_get_mad( p_results->p_svc->pool_key,
\r
2388 MAD_BLOCK_SIZE, &p_mad );
\r
2389 if( status != IB_SUCCESS )
\r
2392 __format_dm_get( p_results, p_iou, p_iou->p_config_path,
\r
2393 IB_MAD_ATTR_IO_CONTROLLER_PROFILE, cl_hton32( slot ), p_mad );
\r
2395 /* Chain the MAD up. */
\r
2396 p_mad->p_next = p_mad_list;
\r
2397 p_mad_list = p_mad;
\r
2399 cl_atomic_inc( &p_results->p_svc->query_cnt );
\r
2404 /* No IOCs installed in this IOU, or failed to get MAD. */
\r
2405 cl_fmap_remove_item( &p_results->iou_map, &p_iou->map_item );
\r
2406 __put_iou( gp_ioc_pnp, p_iou );
\r
2410 /* Trap the case where there are no queries to send. */
\r
2413 AL_EXIT( AL_DBG_PNP );
\r
2414 return IB_NOT_DONE;
\r
2417 status = ib_send_mad( p_results->p_svc->h_mad_svc, p_mad_list, &p_mad );
\r
2418 if( status != IB_SUCCESS )
\r
2420 /* If some of the MADs were sent wait for their completion. */
\r
2421 if( p_mad_list != p_mad )
\r
2422 status = IB_SUCCESS;
\r
2426 p_mad_list = p_mad->p_next;
\r
2427 p_mad->p_next = NULL;
\r
2428 ib_put_mad( p_mad );
\r
2429 if( !cl_atomic_dec( &p_results->p_svc->query_cnt ) &&
\r
2430 status == IB_SUCCESS )
\r
2432 cl_async_proc_queue( gp_async_pnp_mgr,
\r
2433 &p_results->async_item );
\r
2435 p_mad = p_mad_list;
\r
2438 AL_EXIT( AL_DBG_PNP );
\r
2443 static ib_api_status_t
\r
2444 __query_svc_entries(
\r
2445 IN ioc_sweep_results_t* const p_results )
\r
2447 ib_api_status_t status;
\r
2448 cl_fmap_item_t *p_iou_item;
\r
2449 cl_map_item_t *p_ioc_item;
\r
2450 iou_node_t *p_iou;
\r
2453 uint32_t attr_mod;
\r
2454 ib_mad_element_t *p_mad, *p_mad_list = NULL;
\r
2456 AL_ENTER( AL_DBG_PNP );
\r
2458 for( p_iou_item = cl_fmap_head( &p_results->iou_map );
\r
2459 p_iou_item != cl_fmap_end( &p_results->iou_map );
\r
2460 p_iou_item = cl_fmap_next( p_iou_item ) )
\r
2462 p_iou = PARENT_STRUCT( p_iou_item, iou_node_t, map_item );
\r
2463 CL_ASSERT( cl_qmap_count( &p_iou->ioc_map ) );
\r
2464 CL_ASSERT( cl_fmap_count( &p_iou->path_map ) );
\r
2465 CL_ASSERT( p_iou->p_config_path );
\r
2467 for( p_ioc_item = cl_qmap_head( &p_iou->ioc_map );
\r
2468 p_ioc_item != cl_qmap_end( &p_iou->ioc_map );
\r
2469 p_ioc_item = cl_qmap_next( p_ioc_item ) )
\r
2471 p_ioc = PARENT_STRUCT( p_ioc_item, iou_ioc_t, map_item );
\r
2472 CL_ASSERT( p_ioc->p_iou == p_iou );
\r
2474 for( i = 0; i < p_ioc->profile.num_svc_entries; i += 4 )
\r
2476 status = ib_get_mad( p_results->p_svc->pool_key,
\r
2477 MAD_BLOCK_SIZE, &p_mad );
\r
2478 if( status != IB_SUCCESS )
\r
2481 attr_mod = (((uint32_t)p_ioc->slot) << 16) | i;
\r
2482 if( (i + 3) > p_ioc->profile.num_svc_entries )
\r
2483 attr_mod |= ((p_ioc->profile.num_svc_entries - 1) << 8);
\r
2485 attr_mod |= ((i + 3) << 8);
\r
2487 __format_dm_get( p_results, p_ioc, p_iou->p_config_path,
\r
2488 IB_MAD_ATTR_SERVICE_ENTRIES, cl_hton32( attr_mod ),
\r
2491 /* Chain the MAD up. */
\r
2492 p_mad->p_next = p_mad_list;
\r
2493 p_mad_list = p_mad;
\r
2495 cl_atomic_inc( &p_ioc->ref_cnt );
\r
2496 cl_atomic_inc( &p_results->p_svc->query_cnt );
\r
2501 /* Trap the case where there are no queries to send. */
\r
2504 AL_EXIT( AL_DBG_PNP );
\r
2505 return IB_NOT_DONE;
\r
2508 status = ib_send_mad( p_results->p_svc->h_mad_svc, p_mad_list, &p_mad );
\r
2509 if( status != IB_SUCCESS )
\r
2511 /* If some of the MADs were sent wait for their completion. */
\r
2512 if( p_mad_list != p_mad )
\r
2513 status = IB_SUCCESS;
\r
2517 p_mad_list = p_mad->p_next;
\r
2518 p_mad->p_next = NULL;
\r
2519 p_ioc = (iou_ioc_t*)p_mad->context2;
\r
2520 cl_atomic_dec( &p_ioc->ref_cnt );
\r
2521 ib_put_mad( p_mad );
\r
2522 if( !cl_atomic_dec( &p_results->p_svc->query_cnt ) &&
\r
2523 status == IB_SUCCESS )
\r
2525 cl_async_proc_queue( gp_async_pnp_mgr,
\r
2526 &p_results->async_item );
\r
2528 p_mad = p_mad_list;
\r
2531 AL_EXIT( AL_DBG_PNP );
\r
2538 IN ioc_sweep_results_t* const p_results )
\r
2540 cl_fmap_t iou_map1, iou_map2;
\r
2541 cl_fmap_item_t *p_item1, *p_item2;
\r
2542 iou_node_t *p_iou1, *p_iou2;
\r
2544 AL_ENTER( AL_DBG_PNP );
\r
2546 cl_fmap_init( &iou_map1, __iou_cmp );
\r
2547 cl_fmap_init( &iou_map2, __iou_cmp );
\r
2550 * No need to lock on the sweep map since all accesses are serialized
\r
2551 * by the PnP thread.
\r
2553 cl_fmap_delta( &gp_ioc_pnp->sweep_map, &p_results->iou_map,
\r
2554 &iou_map1, &iou_map2 );
\r
2555 /* sweep_map and iou_map now contain exactly the same items. */
\r
2556 p_item1 = cl_fmap_head( &gp_ioc_pnp->sweep_map );
\r
2557 p_item2 = cl_fmap_head( &p_results->iou_map );
\r
2558 while( p_item1 != cl_fmap_end( &gp_ioc_pnp->sweep_map ) )
\r
2560 CL_ASSERT( p_item2 != cl_fmap_end( &p_results->iou_map ) );
\r
2561 p_iou1 = PARENT_STRUCT( p_item1, iou_node_t, map_item );
\r
2562 p_iou2 = PARENT_STRUCT( p_item2, iou_node_t, map_item );
\r
2563 CL_ASSERT( p_iou1->guid == p_iou2->guid );
\r
2566 * Merge the IOC maps - this leaves all duplicates in
\r
2567 * p_iou2->ioc_map.
\r
2569 cl_qmap_merge( &p_iou1->ioc_map, &p_iou2->ioc_map );
\r
2572 * Merge the path maps - this leaves all duplicates in
\r
2573 * p_iou2->path_map
\r
2575 cl_fmap_merge( &p_iou1->path_map, &p_iou2->path_map );
\r
2577 /* Return the duplicate IOU (and whatever duplicate paths and IOCs) */
\r
2578 cl_fmap_remove_item( &p_results->iou_map, p_item2 );
\r
2579 __put_iou( gp_ioc_pnp, p_iou2 );
\r
2581 p_item1 = cl_fmap_next( p_item1 );
\r
2582 p_item2 = cl_fmap_head( &p_results->iou_map );
\r
2584 CL_ASSERT( !cl_fmap_count( &p_results->iou_map ) );
\r
2586 /* Merge in the unique items. */
\r
2587 cl_fmap_merge( &gp_ioc_pnp->sweep_map, &iou_map1 );
\r
2588 CL_ASSERT( !cl_fmap_count( &iou_map1 ) );
\r
2589 cl_fmap_merge( &gp_ioc_pnp->sweep_map, &iou_map2 );
\r
2590 CL_ASSERT( !cl_fmap_count( &iou_map2 ) );
\r
2592 AL_EXIT( AL_DBG_PNP );
\r
2599 IN cl_async_proc_item_t *p_item )
\r
2601 cl_status_t status;
\r
2602 cl_fmap_t old_ious, new_ious;
\r
2604 AL_ENTER( AL_DBG_PNP );
\r
2606 CL_ASSERT( p_item == &gp_ioc_pnp->async_item );
\r
2607 UNUSED_PARAM( p_item );
\r
2609 CL_ASSERT( !gp_ioc_pnp->query_cnt );
\r
2611 cl_fmap_init( &old_ious, __iou_cmp );
\r
2612 cl_fmap_init( &new_ious, __iou_cmp );
\r
2614 &gp_ioc_pnp->iou_map, &gp_ioc_pnp->sweep_map, &new_ious, &old_ious );
\r
2616 /* For each duplicate IOU, report changes in IOCs or paths. */
\r
2617 __change_ious( &gp_ioc_pnp->iou_map, &gp_ioc_pnp->sweep_map );
\r
2619 /* Report all new IOUs. */
\r
2620 __add_ious( &gp_ioc_pnp->iou_map, &new_ious, NULL );
\r
2621 CL_ASSERT( !cl_fmap_count( &new_ious ) );
\r
2623 /* Report all removed IOUs. */
\r
2624 __remove_ious( &old_ious );
\r
2625 CL_ASSERT( !cl_fmap_count( &old_ious ) );
\r
2627 /* Reset the sweep timer. */
\r
2628 if( g_ioc_poll_interval )
\r
2630 status = cl_timer_start(
\r
2631 &gp_ioc_pnp->sweep_timer, g_ioc_poll_interval );
\r
2632 CL_ASSERT( status == CL_SUCCESS );
\r
2635 /* Release the reference we took in the timer callback. */
\r
2636 deref_al_obj( &gp_ioc_pnp->obj );
\r
2638 AL_EXIT( AL_DBG_PNP );
\r
2644 IN cl_fmap_t* const p_cur_ious,
\r
2645 IN cl_fmap_t* const p_dup_ious )
\r
2647 cl_fmap_t new_paths, old_paths;
\r
2648 cl_qmap_t new_iocs, old_iocs;
\r
2649 cl_fmap_item_t *p_item1, *p_item2;
\r
2650 iou_node_t *p_iou1, *p_iou2;
\r
2652 AL_ENTER( AL_DBG_PNP );
\r
2654 cl_fmap_init( &new_paths, __path_cmp );
\r
2655 cl_fmap_init( &old_paths, __path_cmp );
\r
2656 cl_qmap_init( &new_iocs );
\r
2657 cl_qmap_init( &old_iocs );
\r
2659 p_item1 = cl_fmap_head( p_cur_ious );
\r
2660 p_item2 = cl_fmap_head( p_dup_ious );
\r
2661 while( p_item1 != cl_fmap_end( p_cur_ious ) )
\r
2663 p_iou1 = PARENT_STRUCT( p_item1, iou_node_t, map_item );
\r
2664 p_iou2 = PARENT_STRUCT( p_item2, iou_node_t, map_item );
\r
2665 CL_ASSERT( p_iou1->guid == p_iou2->guid );
\r
2667 /* Figure out what changed. */
\r
2669 &p_iou1->path_map, &p_iou2->path_map, &new_paths, &old_paths );
\r
2671 &p_iou1->ioc_map, &p_iou2->ioc_map, &new_iocs, &old_iocs );
\r
2674 * Report path changes before IOC changes so that new IOCs
\r
2675 * report up-to-date paths. Report new paths before removing
\r
2676 * old ones to minimize the chance of disruption of service -
\r
2677 * i.e. the last path being removed before an alternate is available.
\r
2679 __add_paths( p_iou1, &p_iou1->ioc_map, &new_paths, NULL );
\r
2680 CL_ASSERT( !cl_fmap_count( &new_paths ) );
\r
2682 __remove_paths( &p_iou1->ioc_map, &old_paths );
\r
2683 CL_ASSERT( !cl_fmap_count( &old_paths ) );
\r
2685 /* Report IOCs. */
\r
2686 __add_iocs( p_iou1, &new_iocs, NULL );
\r
2687 CL_ASSERT( !cl_qmap_count( &new_iocs ) );
\r
2689 __remove_iocs( p_iou1, &old_iocs );
\r
2690 CL_ASSERT( !cl_qmap_count( &old_iocs ) );
\r
2692 /* Done with the duplicate IOU. Return it to the pool */
\r
2693 cl_fmap_remove_item( p_dup_ious, p_item2 );
\r
2694 __put_iou( gp_ioc_pnp, p_iou2 );
\r
2696 p_item1 = cl_fmap_next( p_item1 );
\r
2697 p_item2 = cl_fmap_head( p_dup_ious );
\r
2699 CL_ASSERT( !cl_fmap_count( p_dup_ious ) );
\r
2701 AL_EXIT( AL_DBG_PNP );
\r
2707 IN cl_fmap_t* const p_cur_ious,
\r
2708 IN cl_fmap_t* const p_new_ious,
\r
2709 IN al_pnp_t* const p_reg OPTIONAL )
\r
2711 cl_fmap_item_t *p_item;
\r
2712 iou_node_t *p_iou;
\r
2714 AL_ENTER( AL_DBG_PNP );
\r
2716 p_item = cl_fmap_head( p_new_ious );
\r
2717 while( p_item != cl_fmap_end( p_new_ious ) )
\r
2719 p_iou = PARENT_STRUCT( p_item, iou_node_t, map_item );
\r
2721 /* Report the IOU addition. */
\r
2722 __report_iou_add( p_iou, p_reg );
\r
2724 p_item = cl_fmap_next( p_item );
\r
2727 if( p_cur_ious != p_new_ious )
\r
2729 cl_fmap_merge( p_cur_ious, p_new_ious );
\r
2730 CL_ASSERT( !cl_fmap_count( p_new_ious ) );
\r
2733 AL_EXIT( AL_DBG_PNP );
\r
2739 IN cl_fmap_t* const p_old_ious )
\r
2741 cl_fmap_item_t *p_item;
\r
2742 iou_node_t *p_iou;
\r
2744 AL_ENTER( AL_DBG_PNP );
\r
2746 p_item = cl_fmap_head( p_old_ious );
\r
2747 while( p_item != cl_fmap_end( p_old_ious ) )
\r
2749 p_iou = PARENT_STRUCT( p_item, iou_node_t, map_item );
\r
2751 /* Report the IOU removal. */
\r
2752 __report_iou_remove( p_iou );
\r
2754 cl_fmap_remove_item( p_old_ious, p_item );
\r
2755 __put_iou( gp_ioc_pnp, p_iou );
\r
2756 p_item = cl_fmap_head( p_old_ious );
\r
2758 CL_ASSERT( !cl_fmap_count( p_old_ious ) );
\r
2760 AL_EXIT( AL_DBG_PNP );
\r
2766 IN iou_node_t* const p_iou,
\r
2767 IN cl_qmap_t* const p_new_iocs,
\r
2768 IN al_pnp_t* const p_reg OPTIONAL )
\r
2770 cl_map_item_t *p_item;
\r
2773 AL_ENTER( AL_DBG_PNP );
\r
2775 p_item = cl_qmap_head( p_new_iocs );
\r
2776 while( p_item != cl_qmap_end( p_new_iocs ) )
\r
2778 p_ioc = PARENT_STRUCT( p_item, iou_ioc_t, map_item );
\r
2780 /* Report the IOU addition. */
\r
2781 __report_ioc_add( p_iou, p_ioc, p_reg );
\r
2783 p_item = cl_qmap_next( p_item );
\r
2786 if( p_new_iocs != &p_iou->ioc_map )
\r
2788 cl_qmap_merge( &p_iou->ioc_map, p_new_iocs );
\r
2789 CL_ASSERT( !cl_qmap_count( p_new_iocs ) );
\r
2791 AL_EXIT( AL_DBG_PNP );
\r
2797 IN iou_node_t* const p_iou,
\r
2798 IN cl_qmap_t* const p_old_iocs )
\r
2800 cl_map_item_t *p_item;
\r
2803 AL_ENTER( AL_DBG_PNP );
\r
2805 p_item = cl_qmap_tail( p_old_iocs );
\r
2806 while( p_item != cl_qmap_end( p_old_iocs ) )
\r
2808 p_ioc = PARENT_STRUCT( p_item, iou_ioc_t, map_item );
\r
2810 /* Report the IOC removal. */
\r
2811 __report_ioc_remove( p_iou, p_ioc );
\r
2813 cl_qmap_remove_item( p_old_iocs, p_item );
\r
2814 __put_ioc( gp_ioc_pnp, p_ioc );
\r
2815 p_item = cl_qmap_tail( p_old_iocs );
\r
2817 CL_ASSERT( !cl_qmap_count( p_old_iocs ) );
\r
2819 AL_EXIT( AL_DBG_PNP );
\r
2825 IN iou_node_t* const p_iou,
\r
2826 IN cl_qmap_t* const p_ioc_map,
\r
2827 IN cl_fmap_t* const p_new_paths,
\r
2828 IN al_pnp_t* const p_reg OPTIONAL )
\r
2830 cl_map_item_t *p_ioc_item;
\r
2831 cl_fmap_item_t *p_item;
\r
2833 iou_path_t *p_path;
\r
2835 AL_ENTER( AL_DBG_PNP );
\r
2837 p_item = cl_fmap_head( p_new_paths );
\r
2838 while( p_item != cl_fmap_end( p_new_paths ) )
\r
2840 p_path = PARENT_STRUCT( p_item, iou_path_t, map_item );
\r
2842 /* Report the path to all IOCs. */
\r
2843 for( p_ioc_item = cl_qmap_head( p_ioc_map );
\r
2844 p_ioc_item != cl_qmap_end( p_ioc_map );
\r
2845 p_ioc_item = cl_qmap_next( p_ioc_item ) )
\r
2847 p_ioc = PARENT_STRUCT( p_ioc_item, iou_ioc_t, map_item );
\r
2848 __report_path( p_ioc, p_path, IB_PNP_IOC_PATH_ADD, p_reg );
\r
2851 p_item = cl_fmap_next( p_item );
\r
2854 ASSERT( &p_iou->path_map != p_new_paths );
\r
2856 cl_fmap_merge( &p_iou->path_map, p_new_paths );
\r
2857 CL_ASSERT( !cl_fmap_count( p_new_paths ) );
\r
2859 AL_EXIT( AL_DBG_PNP );
\r
2865 IN iou_ioc_t* const p_ioc,
\r
2866 IN cl_fmap_t* const p_new_paths,
\r
2867 IN al_pnp_t* const p_reg OPTIONAL )
\r
2869 cl_fmap_item_t *p_item;
\r
2870 iou_path_t *p_path;
\r
2872 AL_ENTER( AL_DBG_PNP );
\r
2874 p_item = cl_fmap_head( p_new_paths );
\r
2875 while( p_item != cl_fmap_end( p_new_paths ) )
\r
2877 p_path = PARENT_STRUCT( p_item, iou_path_t, map_item );
\r
2879 __report_path( p_ioc, p_path, IB_PNP_IOC_PATH_ADD, p_reg );
\r
2881 p_item = cl_fmap_next( p_item );
\r
2884 AL_EXIT( AL_DBG_PNP );
\r
2890 IN cl_qmap_t* const p_ioc_map,
\r
2891 IN cl_fmap_t* const p_old_paths )
\r
2893 cl_map_item_t *p_ioc_item;
\r
2894 cl_fmap_item_t *p_item;
\r
2896 iou_path_t *p_path;
\r
2898 AL_ENTER( AL_DBG_PNP );
\r
2900 p_item = cl_fmap_tail( p_old_paths );
\r
2901 while( p_item != cl_fmap_end( p_old_paths ) )
\r
2903 p_path = PARENT_STRUCT( p_item, iou_path_t, map_item );
\r
2905 for( p_ioc_item = cl_qmap_tail( p_ioc_map );
\r
2906 p_ioc_item != cl_qmap_end( p_ioc_map );
\r
2907 p_ioc_item = cl_qmap_prev( p_ioc_item ) )
\r
2909 p_ioc = PARENT_STRUCT( p_ioc_item, iou_ioc_t, map_item );
\r
2910 __report_path( p_ioc, p_path, IB_PNP_IOC_PATH_REMOVE, NULL );
\r
2913 cl_fmap_remove_item( p_old_paths, p_item );
\r
2914 __put_path( gp_ioc_pnp, p_path );
\r
2915 p_item = cl_fmap_tail( p_old_paths );
\r
2917 CL_ASSERT( !cl_fmap_count( p_old_paths ) );
\r
2919 AL_EXIT( AL_DBG_PNP );
\r
2923 static cl_status_t
\r
2925 IN const cl_list_item_t* const p_item,
\r
2926 IN al_pnp_ioc_event_t* const p_event )
\r
2928 ib_api_status_t status;
\r
2930 al_pnp_context_t *p_context;
\r
2932 AL_ENTER( AL_DBG_PNP );
\r
2934 p_reg = PARENT_STRUCT( p_item, al_pnp_t, list_item );
\r
2936 /* Copy the source record into the user's record. */
\r
2937 cl_memcpy( p_event->p_user_rec, p_event->p_rec, p_event->rec_size );
\r
2938 p_event->p_user_rec->h_pnp = p_reg;
\r
2939 p_event->p_user_rec->pnp_context = (void*)p_reg->obj.context;
\r
2941 switch( p_event->p_rec->pnp_event )
\r
2943 case IB_PNP_IOU_ADD:
\r
2944 CL_ASSERT( pnp_get_class( p_reg->pnp_class ) == IB_PNP_IOU );
\r
2945 p_context = pnp_create_context( p_reg, &p_event->p_rec->guid);
\r
2948 case IB_PNP_IOU_REMOVE:
\r
2949 CL_ASSERT( pnp_get_class( p_reg->pnp_class ) == IB_PNP_IOU );
\r
2950 /* Lookup the context for this IOU. */
\r
2951 p_context = pnp_get_context( p_reg, &p_event->p_rec->guid );
\r
2954 case IB_PNP_IOC_ADD:
\r
2955 CL_ASSERT( pnp_get_class( p_reg->pnp_class ) == IB_PNP_IOC );
\r
2956 p_context = pnp_create_context( p_reg, &p_event->p_rec->guid);
\r
2958 case IB_PNP_IOC_REMOVE:
\r
2959 case IB_PNP_IOC_PATH_ADD:
\r
2960 case IB_PNP_IOC_PATH_REMOVE:
\r
2961 CL_ASSERT( pnp_get_class( p_reg->pnp_class ) == IB_PNP_IOC );
\r
2962 p_context = pnp_get_context( p_reg, &p_event->p_rec->guid );
\r
2965 AL_PRINT_EXIT(TRACE_LEVEL_WARNING, AL_DBG_PNP,("Invalid PnP event %#x\n",
\r
2966 p_event->p_rec->pnp_event));
\r
2967 return CL_NOT_DONE;
\r
2971 return CL_NOT_FOUND;
\r
2973 p_event->p_user_rec->context = (void*)p_context->context;
\r
2975 /* Notify user. */
\r
2976 status = p_reg->pfn_pnp_cb( p_event->p_user_rec );
\r
2978 /* Update contexts */
\r
2979 if( status != IB_SUCCESS ||
\r
2980 p_event->p_rec->pnp_event == IB_PNP_IOU_REMOVE ||
\r
2981 p_event->p_rec->pnp_event == IB_PNP_IOC_REMOVE )
\r
2983 cl_fmap_remove_item( &p_reg->context_map, &p_context->map_item );
\r
2984 cl_free( p_context );
\r
2988 p_context->context = p_event->p_user_rec->context;
\r
2991 AL_EXIT( AL_DBG_PNP );
\r
2992 return CL_NOT_FOUND;
\r
2998 IN iou_node_t* const p_iou,
\r
2999 IN al_pnp_t* const p_reg OPTIONAL )
\r
3001 al_pnp_ioc_event_t event;
\r
3002 ib_pnp_iou_rec_t *p_rec, *p_user_rec;
\r
3004 AL_ENTER( AL_DBG_PNP );
\r
3006 event.rec_size = sizeof(ib_pnp_iou_rec_t);
\r
3007 event.rec_size = ROUNDUP( event.rec_size, sizeof(void*) );
\r
3009 p_rec = cl_zalloc( event.rec_size * 2 );
\r
3012 AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR,
\r
3013 ("Failed to allocate user record.\n") );
\r
3016 p_rec->pnp_rec.pnp_event = IB_PNP_IOU_ADD;
\r
3017 p_rec->pnp_rec.guid = p_iou->guid;
\r
3018 p_rec->pnp_rec.ca_guid = p_iou->ca_guid;
\r
3020 p_rec->ca_guid = p_iou->ca_guid;
\r
3021 p_rec->guid = p_iou->guid;
\r
3022 p_rec->chassis_guid = p_iou->chassis_guid;
\r
3023 p_rec->vend_id = p_iou->vend_id;
\r
3024 p_rec->dev_id = p_iou->dev_id;
\r
3025 p_rec->revision = p_iou->revision;
\r
3026 cl_memcpy( p_rec->desc, p_iou->desc, sizeof(p_rec->desc) );
\r
3027 p_user_rec = (ib_pnp_iou_rec_t*)(((uint8_t*)p_rec) + event.rec_size);
\r
3029 event.p_rec = (ib_pnp_rec_t*)p_rec;
\r
3030 event.p_user_rec = (ib_pnp_rec_t*)p_user_rec;
\r
3034 if( pnp_get_class( p_reg->pnp_class ) == IB_PNP_IOU )
\r
3035 __notify_users( &p_reg->list_item, &event );
\r
3037 __add_iocs( p_iou, &p_iou->ioc_map, p_reg );
\r
3041 /* Report the IOU to all clients registered for IOU events. */
\r
3042 cl_qlist_find_from_head( &gp_ioc_pnp->iou_reg_list,
\r
3043 __notify_users, &event );
\r
3045 /* Report IOCs - this will in turn report the paths. */
\r
3046 __add_iocs( p_iou, &p_iou->ioc_map, NULL );
\r
3050 AL_EXIT( AL_DBG_PNP );
\r
3055 __report_iou_remove(
\r
3056 IN iou_node_t* const p_iou )
\r
3058 al_pnp_ioc_event_t event;
\r
3059 ib_pnp_iou_rec_t rec, user_rec;
\r
3061 AL_ENTER( AL_DBG_PNP );
\r
3063 /* Report IOCs - this will in turn report the paths. */
\r
3064 __remove_iocs( p_iou, &p_iou->ioc_map );
\r
3066 cl_memclr( &rec, sizeof(ib_pnp_iou_rec_t) );
\r
3067 rec.pnp_rec.pnp_event = IB_PNP_IOU_REMOVE;
\r
3068 rec.pnp_rec.guid = p_iou->guid;
\r
3069 rec.pnp_rec.ca_guid = p_iou->ca_guid;
\r
3071 event.rec_size = sizeof(ib_pnp_iou_rec_t);
\r
3072 event.p_rec = (ib_pnp_rec_t*)&rec;
\r
3073 event.p_user_rec = (ib_pnp_rec_t*)&user_rec;
\r
3076 * Report the IOU to all clients registered for IOU events in
\r
3077 * reverse order than ADD notifications.
\r
3079 cl_qlist_find_from_tail( &gp_ioc_pnp->iou_reg_list,
\r
3080 __notify_users, &event );
\r
3082 AL_EXIT( AL_DBG_PNP );
\r
3088 IN iou_node_t* const p_iou,
\r
3089 IN iou_ioc_t* const p_ioc,
\r
3090 IN al_pnp_t* const p_reg OPTIONAL )
\r
3092 al_pnp_ioc_event_t event;
\r
3093 ib_pnp_ioc_rec_t *p_rec;
\r
3095 AL_ENTER( AL_DBG_PNP );
\r
3097 event.rec_size = sizeof(ib_pnp_ioc_rec_t) +
\r
3098 (sizeof(ib_svc_entry_t) * (p_ioc->profile.num_svc_entries - 1));
\r
3099 event.rec_size = ROUNDUP( event.rec_size, sizeof(void*) );
\r
3102 * The layout of the pnp record is as follows:
\r
3107 * This is needed to keep the service entries contiguous to the first
\r
3108 * entry in the pnp record.
\r
3110 p_rec = (ib_pnp_ioc_rec_t*)cl_zalloc( event.rec_size * 2 );
\r
3114 p_rec->pnp_rec.pnp_event = IB_PNP_IOC_ADD;
\r
3115 p_rec->pnp_rec.guid = p_ioc->profile.ioc_guid;
\r
3116 p_rec->pnp_rec.ca_guid = p_ioc->p_iou->ca_guid;
\r
3118 p_rec->ca_guid = p_ioc->p_iou->ca_guid;
\r
3119 cl_memcpy( p_rec->svc_entry_array, p_ioc->p_svc_entries,
\r
3120 p_ioc->profile.num_svc_entries * sizeof(ib_svc_entry_t) );
\r
3121 p_rec->info.chassis_guid = p_iou->chassis_guid;
\r
3122 p_rec->info.chassis_slot = p_iou->slot;
\r
3123 p_rec->info.iou_guid = p_iou->guid;
\r
3124 p_rec->info.iou_slot = p_ioc->slot;
\r
3125 p_rec->info.profile = p_ioc->profile;
\r
3127 event.p_rec = (ib_pnp_rec_t*)p_rec;
\r
3128 event.p_user_rec = (ib_pnp_rec_t*)(((uint8_t*)p_rec) + event.rec_size);
\r
3132 __notify_users( &p_reg->list_item, &event );
\r
3136 /* Report the IOC to all clients registered for IOC events. */
\r
3137 cl_qlist_find_from_head( &gp_ioc_pnp->ioc_reg_list,
\r
3138 __notify_users, &event );
\r
3142 /* Report the paths for this IOC only. */
\r
3143 __add_ioc_paths( p_ioc, &p_iou->path_map, p_reg );
\r
3145 AL_EXIT( AL_DBG_PNP );
\r
3150 __report_ioc_remove(
\r
3151 IN iou_node_t* const p_iou,
\r
3152 IN iou_ioc_t* const p_ioc )
\r
3154 al_pnp_ioc_event_t event;
\r
3155 ib_pnp_ioc_rec_t rec, user_rec;
\r
3157 AL_ENTER( AL_DBG_PNP );
\r
3159 UNUSED_PARAM( p_iou );
\r