[HW] removed MT23108 driver by emptying files.
authorleonidk <leonidk@ad392aa1-c5ef-ae45-8dd8-e69d62a5ef86>
Thu, 28 Feb 2008 13:03:17 +0000 (13:03 +0000)
committerleonidk <leonidk@ad392aa1-c5ef-ae45-8dd8-e69d62a5ef86>
Thu, 28 Feb 2008 13:03:17 +0000 (13:03 +0000)
Signed-off-by: Sean Hefty <sean.hefty@intel.com>
git-svn-id: svn://openib.tc.cornell.edu/gen1/trunk@951 ad392aa1-c5ef-ae45-8dd8-e69d62a5ef86

292 files changed:
hw/dirs
hw/mt23108/dirs
hw/mt23108/kernel/Makefile
hw/mt23108/kernel/SOURCES
hw/mt23108/kernel/hca.rc
hw/mt23108/kernel/hca_data.c
hw/mt23108/kernel/hca_data.h
hw/mt23108/kernel/hca_debug.h
hw/mt23108/kernel/hca_direct.c
hw/mt23108/kernel/hca_driver.c
hw/mt23108/kernel/hca_driver.h
hw/mt23108/kernel/hca_mcast.c
hw/mt23108/kernel/hca_memory.c
hw/mt23108/kernel/hca_smp.c
hw/mt23108/kernel/hca_verbs.c
hw/mt23108/kernel/infinihost.inf
hw/mt23108/user/Makefile
hw/mt23108/user/SOURCES
hw/mt23108/user/hca_data.h
hw/mt23108/user/mlnx_ual_av.c
hw/mt23108/user/mlnx_ual_ca.c
hw/mt23108/user/mlnx_ual_cq.c
hw/mt23108/user/mlnx_ual_main.c
hw/mt23108/user/mlnx_ual_main.h
hw/mt23108/user/mlnx_ual_mcast.c
hw/mt23108/user/mlnx_ual_mrw.c
hw/mt23108/user/mlnx_ual_osbypass.c
hw/mt23108/user/mlnx_ual_pd.c
hw/mt23108/user/mlnx_ual_qp.c
hw/mt23108/user/uvpd.rc
hw/mt23108/user/uvpd_exports.src
hw/mt23108/vapi/Hca/hcahal/hh.c
hw/mt23108/vapi/Hca/hcahal/hh.h
hw/mt23108/vapi/Hca/hcahal/hh_common.c
hw/mt23108/vapi/Hca/hcahal/hh_common.h
hw/mt23108/vapi/Hca/hcahal/hh_init.h
hw/mt23108/vapi/Hca/hcahal/hh_rx_stub.c
hw/mt23108/vapi/Hca/hcahal/hh_stub_defines.h
hw/mt23108/vapi/Hca/hcahal/hh_tx_stub.c
hw/mt23108/vapi/Hca/hcahal/hh_tx_stub_defines.h
hw/mt23108/vapi/Hca/hcahal/hhenosys.ic
hw/mt23108/vapi/Hca/hcahal/hhul.c
hw/mt23108/vapi/Hca/hcahal/hhul.h
hw/mt23108/vapi/Hca/hcahal/hhul_obj.h
hw/mt23108/vapi/Hca/hcahal/hhul_stub.c
hw/mt23108/vapi/Hca/hcahal/hhulenosys.ic
hw/mt23108/vapi/Hca/hcahal/invalid.ic
hw/mt23108/vapi/Hca/hcahal/rx_stub.c
hw/mt23108/vapi/Hca/hcahal/tavor/cmdif/cmd_types.h
hw/mt23108/vapi/Hca/hcahal/tavor/cmdif/cmdif.c
hw/mt23108/vapi/Hca/hcahal/tavor/cmdif/cmdif.h
hw/mt23108/vapi/Hca/hcahal/tavor/cmdif/cmdif_priv.h
hw/mt23108/vapi/Hca/hcahal/tavor/cmdif/cmds_wrap.c
hw/mt23108/vapi/Hca/hcahal/tavor/ddrmm/tddrmm.c
hw/mt23108/vapi/Hca/hcahal/tavor/ddrmm/tddrmm.h
hw/mt23108/vapi/Hca/hcahal/tavor/eventp/event_irqh.c
hw/mt23108/vapi/Hca/hcahal/tavor/eventp/eventp.c
hw/mt23108/vapi/Hca/hcahal/tavor/eventp/eventp.h
hw/mt23108/vapi/Hca/hcahal/tavor/eventp/eventp_priv.h
hw/mt23108/vapi/Hca/hcahal/tavor/mcgm/mcgm.c
hw/mt23108/vapi/Hca/hcahal/tavor/mcgm/mcgm.h
hw/mt23108/vapi/Hca/hcahal/tavor/mrwm/tmrwm.c
hw/mt23108/vapi/Hca/hcahal/tavor/mrwm/tmrwm.h
hw/mt23108/vapi/Hca/hcahal/tavor/os_dep/win/thh_kl.def
hw/mt23108/vapi/Hca/hcahal/tavor/os_dep/win/thh_mod_obj.c
hw/mt23108/vapi/Hca/hcahal/tavor/os_dep/win/thh_mod_obj.h
hw/mt23108/vapi/Hca/hcahal/tavor/os_dep/win/thhul_kl.def
hw/mt23108/vapi/Hca/hcahal/tavor/os_dep/win/thhul_mod_obj.c
hw/mt23108/vapi/Hca/hcahal/tavor/thh.h
hw/mt23108/vapi/Hca/hcahal/tavor/thh_common.h
hw/mt23108/vapi/Hca/hcahal/tavor/thh_cqm/tcqm.c
hw/mt23108/vapi/Hca/hcahal/tavor/thh_cqm/tcqm.h
hw/mt23108/vapi/Hca/hcahal/tavor/thh_default_profile.h
hw/mt23108/vapi/Hca/hcahal/tavor/thh_hob/thh_hob.c
hw/mt23108/vapi/Hca/hcahal/tavor/thh_hob/thh_hob.h
hw/mt23108/vapi/Hca/hcahal/tavor/thh_hob/thh_hob_priv.h
hw/mt23108/vapi/Hca/hcahal/tavor/thh_init.c
hw/mt23108/vapi/Hca/hcahal/tavor/thh_init.h
hw/mt23108/vapi/Hca/hcahal/tavor/thh_qpm/tqpm.c
hw/mt23108/vapi/Hca/hcahal/tavor/thh_qpm/tqpm.h
hw/mt23108/vapi/Hca/hcahal/tavor/thh_requested_profile.h
hw/mt23108/vapi/Hca/hcahal/tavor/thh_srqm/thh_srqm.c
hw/mt23108/vapi/Hca/hcahal/tavor/thh_srqm/thh_srqm.h
hw/mt23108/vapi/Hca/hcahal/tavor/thhul.h
hw/mt23108/vapi/Hca/hcahal/tavor/thhul_cqm/thhul_cqm.c
hw/mt23108/vapi/Hca/hcahal/tavor/thhul_cqm/thhul_cqm.h
hw/mt23108/vapi/Hca/hcahal/tavor/thhul_hob/thhul_hob.c
hw/mt23108/vapi/Hca/hcahal/tavor/thhul_hob/thhul_hob.h
hw/mt23108/vapi/Hca/hcahal/tavor/thhul_mwm/thhul_mwm.c
hw/mt23108/vapi/Hca/hcahal/tavor/thhul_mwm/thhul_mwm.h
hw/mt23108/vapi/Hca/hcahal/tavor/thhul_pdm/thhul_pdm.c
hw/mt23108/vapi/Hca/hcahal/tavor/thhul_pdm/thhul_pdm.h
hw/mt23108/vapi/Hca/hcahal/tavor/thhul_pdm/thhul_pdm_priv.h
hw/mt23108/vapi/Hca/hcahal/tavor/thhul_qpm/thhul_qpm.c
hw/mt23108/vapi/Hca/hcahal/tavor/thhul_qpm/thhul_qpm.h
hw/mt23108/vapi/Hca/hcahal/tavor/thhul_qpm/thhul_qpm_ibal.h
hw/mt23108/vapi/Hca/hcahal/tavor/thhul_srqm/thhul_srqm.c
hw/mt23108/vapi/Hca/hcahal/tavor/thhul_srqm/thhul_srqm.h
hw/mt23108/vapi/Hca/hcahal/tavor/uar/uar.c
hw/mt23108/vapi/Hca/hcahal/tavor/uar/uar.h
hw/mt23108/vapi/Hca/hcahal/tavor/udavm/udavm.c
hw/mt23108/vapi/Hca/hcahal/tavor/udavm/udavm.h
hw/mt23108/vapi/Hca/hcahal/tavor/uldm/thh_uldm.c
hw/mt23108/vapi/Hca/hcahal/tavor/uldm/thh_uldm.h
hw/mt23108/vapi/Hca/hcahal/tavor/uldm/thh_uldm_priv.h
hw/mt23108/vapi/Hca/hcahal/tavor/util/epool.c
hw/mt23108/vapi/Hca/hcahal/tavor/util/epool.h
hw/mt23108/vapi/Hca/hcahal/tavor/util/extbuddy.c
hw/mt23108/vapi/Hca/hcahal/tavor/util/extbuddy.h
hw/mt23108/vapi/Hca/hcahal/tavor/util/sm_mad.c
hw/mt23108/vapi/Hca/hcahal/tavor/util/sm_mad.h
hw/mt23108/vapi/Hca/hcahal/tavor/util/tlog2.c
hw/mt23108/vapi/Hca/hcahal/tavor/util/tlog2.h
hw/mt23108/vapi/Hca/hcahal/zombie.ic
hw/mt23108/vapi/Hca/verbs/common/allocator.h
hw/mt23108/vapi/Hca/verbs/common/os_dep/win/vapi_common.def
hw/mt23108/vapi/Hca/verbs/common/os_dep/win/vapi_common_kl.def
hw/mt23108/vapi/Hca/verbs/common/os_dep/win/vapi_mod_obj.c
hw/mt23108/vapi/Hca/verbs/common/os_dep/win/vip_imp.h
hw/mt23108/vapi/Hca/verbs/common/vapi_common.c
hw/mt23108/vapi/Hca/verbs/common/vapi_common.h
hw/mt23108/vapi/Hca/verbs/common/vip_array.c
hw/mt23108/vapi/Hca/verbs/common/vip_array.h
hw/mt23108/vapi/Hca/verbs/common/vip_cirq.c
hw/mt23108/vapi/Hca/verbs/common/vip_cirq.h
hw/mt23108/vapi/Hca/verbs/common/vip_common.h
hw/mt23108/vapi/Hca/verbs/common/vip_delay_unlock.c
hw/mt23108/vapi/Hca/verbs/common/vip_delay_unlock.h
hw/mt23108/vapi/Hca/verbs/common/vip_delay_unlock_priv.h
hw/mt23108/vapi/Hca/verbs/common/vip_hash.c
hw/mt23108/vapi/Hca/verbs/common/vip_hash.h
hw/mt23108/vapi/Hca/verbs/common/vip_hash.ic
hw/mt23108/vapi/Hca/verbs/common/vip_hash.ih
hw/mt23108/vapi/Hca/verbs/common/vip_hash64p.h
hw/mt23108/vapi/Hca/verbs/common/vip_hashp.h
hw/mt23108/vapi/Hca/verbs/common/vip_hashp2p.h
hw/mt23108/vapi/Hca/verbs/common/vip_hashv4p.h
hw/mt23108/vapi/Hca/verbs/evapi.h
hw/mt23108/vapi/Hca/verbs/vapi.h
hw/mt23108/vapi/Hca/verbs/vapi_features.h
hw/mt23108/vapi/Hca/verbs/vapi_types.h
hw/mt23108/vapi/dirs
hw/mt23108/vapi/kernel/Makefile
hw/mt23108/vapi/kernel/SOURCES
hw/mt23108/vapi/kernel/hh_kl_sources.c
hw/mt23108/vapi/kernel/mdmsg.h
hw/mt23108/vapi/kernel/mosal_kl_sources.c
hw/mt23108/vapi/kernel/mpga_kl_sources.c
hw/mt23108/vapi/kernel/mt23108.def
hw/mt23108/vapi/kernel/mt23108.rc
hw/mt23108/vapi/kernel/mtl_common_kl_sources.c
hw/mt23108/vapi/kernel/tdriver_sources.c
hw/mt23108/vapi/kernel/thh_kl_sources.c
hw/mt23108/vapi/kernel/thhul_kl_sources.c
hw/mt23108/vapi/kernel/vapi_common_kl_sources.c
hw/mt23108/vapi/mlxsys/mosal/mosal.h
hw/mt23108/vapi/mlxsys/mosal/mosal_gen.h
hw/mt23108/vapi/mlxsys/mosal/mosal_gen_nos.c
hw/mt23108/vapi/mlxsys/mosal/mosal_i2c.h
hw/mt23108/vapi/mlxsys/mosal/mosal_iobuf.h
hw/mt23108/vapi/mlxsys/mosal/mosal_k2u_cbk.h
hw/mt23108/vapi/mlxsys/mosal/mosal_k2u_cbk_priv.h
hw/mt23108/vapi/mlxsys/mosal/mosal_mem.h
hw/mt23108/vapi/mlxsys/mosal/mosal_mlock.h
hw/mt23108/vapi/mlxsys/mosal/mosal_prot_ctx.h
hw/mt23108/vapi/mlxsys/mosal/mosal_que.h
hw/mt23108/vapi/mlxsys/mosal/mosal_sync.h
hw/mt23108/vapi/mlxsys/mosal/mosal_thread.h
hw/mt23108/vapi/mlxsys/mosal/mosal_timer.h
hw/mt23108/vapi/mlxsys/mosal/mosalu_socket.h
hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal.def
hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_arch.h
hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_bus.c
hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_bus.h
hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_driver.c
hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_gen.c
hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_gen_priv.h
hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_iobuf.c
hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_iobuf_imp.h
hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_k2u_cbk.c
hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_kl.def
hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_mem.c
hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_mem_imp.h
hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_mem_priv.h
hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_mlock.c
hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_mlock_priv.h
hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_ntddk.c
hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_ntddk.h
hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_priv.h
hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_prot_ctx_imp.h
hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_que.c
hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_que_priv.h
hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_sync.c
hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_sync_imp.h
hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_sync_priv.h
hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_thread.c
hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_thread_imp.h
hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_timer.c
hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_timer_imp.h
hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_timer_priv.h
hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_types.h
hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_util.c
hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_util.h
hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosalu_driver.c
hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosalu_k2u_cbk.c
hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosalu_k2u_cbk.h
hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosalu_mem.c
hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosalu_socket.c
hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosalu_socket_imp.h
hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosalu_sync.c
hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosalu_thread.c
hw/mt23108/vapi/mlxsys/mpga/MPGA_headers.h
hw/mt23108/vapi/mlxsys/mpga/ib_opcodes.h
hw/mt23108/vapi/mlxsys/mpga/internal_functions.c
hw/mt23108/vapi/mlxsys/mpga/internal_functions.h
hw/mt23108/vapi/mlxsys/mpga/mpga.c
hw/mt23108/vapi/mlxsys/mpga/mpga.h
hw/mt23108/vapi/mlxsys/mpga/mpga_sv.c
hw/mt23108/vapi/mlxsys/mpga/mpga_sv.h
hw/mt23108/vapi/mlxsys/mpga/nMPGA.c
hw/mt23108/vapi/mlxsys/mpga/nMPGA.h
hw/mt23108/vapi/mlxsys/mpga/nMPGA_packet_append.c
hw/mt23108/vapi/mlxsys/mpga/nMPGA_packet_append.h
hw/mt23108/vapi/mlxsys/mpga/os_dep/win/mpga.def
hw/mt23108/vapi/mlxsys/mpga/os_dep/win/mpga_driver.c
hw/mt23108/vapi/mlxsys/mpga/os_dep/win/mpga_kl.def
hw/mt23108/vapi/mlxsys/mpga/packet_append.c
hw/mt23108/vapi/mlxsys/mpga/packet_append.h
hw/mt23108/vapi/mlxsys/mpga/packet_utilities.c
hw/mt23108/vapi/mlxsys/mpga/packet_utilities.h
hw/mt23108/vapi/mlxsys/mpga/ud_pack_fmt.h
hw/mt23108/vapi/mlxsys/mtl_common/mtl_common.c
hw/mt23108/vapi/mlxsys/mtl_common/mtl_common.h
hw/mt23108/vapi/mlxsys/mtl_common/mtl_log.h
hw/mt23108/vapi/mlxsys/mtl_common/os_dep/win/mtl_common.def
hw/mt23108/vapi/mlxsys/mtl_common/os_dep/win/mtl_common_kl.def
hw/mt23108/vapi/mlxsys/mtl_common/os_dep/win/mtl_log_win.h
hw/mt23108/vapi/mlxsys/mtl_types/bit_ops.h
hw/mt23108/vapi/mlxsys/mtl_types/ib_defs.h
hw/mt23108/vapi/mlxsys/mtl_types/mtl_errno.h
hw/mt23108/vapi/mlxsys/mtl_types/mtl_pci_types.h
hw/mt23108/vapi/mlxsys/mtl_types/mtl_types.h
hw/mt23108/vapi/mlxsys/mtl_types/win/MdIoctl.h
hw/mt23108/vapi/mlxsys/mtl_types/win/MdIoctlSpec.h
hw/mt23108/vapi/mlxsys/mtl_types/win/endian.h
hw/mt23108/vapi/mlxsys/mtl_types/win/mtl_sys_defs.h
hw/mt23108/vapi/mlxsys/mtl_types/win/mtl_sys_types.h
hw/mt23108/vapi/mlxsys/mtl_types/win/unistd.h
hw/mt23108/vapi/mlxsys/mtl_types/win/win/mtl_arch_types.h
hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/Md.c
hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/Md.h
hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/Md.rc
hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdCard.h
hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdConf.c
hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdConf.h
hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdConfPriv.h
hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdCtl.c
hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdCtl.h
hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdDbg.c
hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdDbg.h
hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdGen.h
hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdGuid.h
hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdIoctl.c
hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdPci.c
hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdPci.h
hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdPnp.c
hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdPwr.c
hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdRdWr.c
hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdUtil.c
hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdUtil.h
hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MddLib.h
hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/infinihost.h
hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/mdmsg/MdMsg.c
hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/mdmsg/MdMsg.mc
hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/resource.h
hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/tavor_csp.h
hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/version.h
hw/mt23108/vapi/mlxsys/tools/mtperf.h
hw/mt23108/vapi/tavor_arch_db/MT23108.h
hw/mt23108/vapi/tavor_arch_db/MT23108_PRM.h
hw/mt23108/vapi/tavor_arch_db/MT23108_PRM_append.h
hw/mt23108/vapi/tavor_arch_db/cr_types.h
hw/mt23108/vapi/tavor_arch_db/tavor_dev_defs.h
hw/mt23108/vapi/tavor_arch_db/tavor_if_defs.h
hw/mt23108/vapi/user/Makefile
hw/mt23108/vapi/user/SOURCES
hw/mt23108/vapi/user/hh_ul_sources.c
hw/mt23108/vapi/user/mosal_ul_sources.c
hw/mt23108/vapi/user/mpga_ul_sources.c
hw/mt23108/vapi/user/mtl_common_ul_sources.c
hw/mt23108/vapi/user/thhul_ul_sources.c
hw/mt23108/vapi/user/vapi_common_ul_sources.c

diff --git a/hw/dirs b/hw/dirs
index f65d556..5905f6c 100644 (file)
--- a/hw/dirs
+++ b/hw/dirs
@@ -1,3 +1,2 @@
 DIRS=\\r
-       mt23108 \\r
        mthca\r
index 9adac80..e69de29 100644 (file)
@@ -1,4 +0,0 @@
-DIRS=\\r
-       vapi    \\r
-       kernel  \\r
-       user\r
index bffacaa..e69de29 100644 (file)
@@ -1,7 +0,0 @@
-#\r
-# DO NOT EDIT THIS FILE!!!  Edit .\sources. if you want to add a new source\r
-# file to this component.  This file merely indirects to the real make file\r
-# that is shared by all the driver components of the OpenIB Windows project.\r
-#\r
-\r
-!INCLUDE ..\..\..\inc\openib.def\r
index cdef483..e69de29 100644 (file)
@@ -1,58 +0,0 @@
-TARGETNAME=thca\r
-TARGETPATH=..\..\..\bin\kernel\obj$(BUILD_ALT_DIR)\r
-TARGETTYPE=DRIVER\r
-\r
-SOURCES= hca_driver.c  \\r
-       hca_data.c                      \\r
-       hca_direct.c            \\r
-       hca_mcast.c                     \\r
-       hca_memory.c            \\r
-       hca_verbs.c                     \\r
-       hca_smp.c                       \\r
-       hca.rc\r
-\r
-MT_HOME=..\vapi\r
-\r
-INCLUDES=\\r
-       ..\..\..\inc;..\..\..\inc\kernel; \\r
-       $(MT_HOME)\tavor_arch_db; \\r
-       $(MT_HOME)\Hca\verbs; \\r
-       $(MT_HOME)\Hca\verbs\common; \\r
-       $(MT_HOME)\mlxsys\mtl_types; \\r
-       $(MT_HOME)\mlxsys\mtl_types\win; \\r
-       $(MT_HOME)\mlxsys\mtl_types\win\win; \\r
-       $(MT_HOME)\mlxsys\mtl_common; \\r
-       $(MT_HOME)\mlxsys\mtl_common\os_dep\win; \\r
-       $(MT_HOME)\mlxsys\mosal; \\r
-       $(MT_HOME)\mlxsys\mosal\os_dep\win; \\r
-       $(MT_HOME)\Hca\hcahal; \\r
-       $(MT_HOME)\Hca\hcahal\tavor; \\r
-       $(MT_HOME)\Hca\hcahal\tavor\os_dep\win; \\r
-       $(MT_HOME)\Hca\hcahal\tavor\thhul_hob; \\r
-       $(MT_HOME)\Hca\hcahal\tavor\thhul_pdm; \\r
-       $(MT_HOME)\Hca\hcahal\tavor\thhul_cqm; \\r
-       $(MT_HOME)\Hca\hcahal\tavor\thhul_qpm; \\r
-       $(MT_HOME)\Hca\hcahal\tavor\thhul_mwm; \\r
-       $(MT_HOME)\Hca\hcahal\tavor\util; \\r
-       $(MT_HOME)\Hca\hcahal\tavor\thh_hob; \\r
-       $(MT_HOME)\Hca\hcahal\tavor\cmdif; \\r
-       $(MT_HOME)\Hca\hcahal\tavor\eventp; \\r
-       $(MT_HOME)\Hca\hcahal\tavor\uar; \\r
-       $(MT_HOME)\Hca\hcahal\tavor\mrwm; \\r
-       $(MT_HOME)\Hca\hcahal\tavor\udavm; \\r
-       $(MT_HOME)\Hca\hcahal\tavor\mcgm; \\r
-       $(MT_HOME)\Hca\hcahal\tavor\ddrmm; \\r
-       $(MT_HOME)\Hca\hcahal\tavor\uldm; \\r
-       $(MT_HOME)\mlxsys\os_dep\win\tdriver;\r
-\r
-C_DEFINES=$(C_DEFINES) -DDRIVER -DDEPRECATE_DDK_FUNCTIONS -D__MSC__ \\r
-       -D__KERNEL__ -D__WIN__ -D__LITTLE_ENDIAN -DMT_LITTLE_ENDIAN \\r
-       -DUSE_RELAY_MOD_NAME -DMAX_ERROR=4 -DIVAPI_THH \\r
-       -DMTL_MODULE=HCA\r
-\r
-TARGETLIBS= \\r
-       $(TARGETPATH)\*\complib.lib     \\r
-       $(TARGETPATH)\*\mt23108.lib     \\r
-       $(DDK_LIB_PATH)\wdmguid.lib\r
-\r
-MSC_WARNING_LEVEL= /W4\r
index 9dfda9e..e69de29 100644 (file)
@@ -1,47 +0,0 @@
-/*\r
- * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.\r
- *\r
- * This software is available to you under the OpenIB.org BSD license\r
- * below:\r
- *\r
- *     Redistribution and use in source and binary forms, with or\r
- *     without modification, are permitted provided that the following\r
- *     conditions are met:\r
- *\r
- *      - Redistributions of source code must retain the above\r
- *        copyright notice, this list of conditions and the following\r
- *        disclaimer.\r
- *\r
- *      - Redistributions in binary form must reproduce the above\r
- *        copyright notice, this list of conditions and the following\r
- *        disclaimer in the documentation and/or other materials\r
- *        provided with the distribution.\r
- *\r
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
- * SOFTWARE.\r
- *\r
- * $Id$\r
- */\r
-\r
-\r
-#include <oib_ver.h>\r
-\r
-#define VER_FILETYPE                           VFT_DRV\r
-#define VER_FILESUBTYPE                                VFT2_UNKNOWN\r
-\r
-#ifdef _DEBUG_\r
-#define VER_FILEDESCRIPTION_STR                "Tavor HCA Filter Driver (Debug)"\r
-#else\r
-#define VER_FILEDESCRIPTION_STR                "Tavor HCA Filter Driver"\r
-#endif\r
-\r
-#define VER_INTERNALNAME_STR           "thca.sys"\r
-#define VER_ORIGINALFILENAME_STR       "thca.sys"\r
-\r
-#include <common.ver>\r
index 0a64566..e69de29 100644 (file)
-/*\r
- * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.\r
- * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. \r
- *\r
- * This software is available to you under the OpenIB.org BSD license\r
- * below:\r
- *\r
- *     Redistribution and use in source and binary forms, with or\r
- *     without modification, are permitted provided that the following\r
- *     conditions are met:\r
- *\r
- *      - Redistributions of source code must retain the above\r
- *        copyright notice, this list of conditions and the following\r
- *        disclaimer.\r
- *\r
- *      - Redistributions in binary form must reproduce the above\r
- *        copyright notice, this list of conditions and the following\r
- *        disclaimer in the documentation and/or other materials\r
- *        provided with the distribution.\r
- *\r
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
- * SOFTWARE.\r
- *\r
- * $Id$\r
- */\r
-\r
-\r
-#include "hca_data.h"\r
-#include "hca_debug.h"\r
-\r
-static cl_spinlock_t   hob_lock;\r
-\r
-#if 1\r
-u_int32_t              g_mlnx_dbg_lvl = CL_DBG_ERROR ;\r
-#else\r
-u_int32_t              g_mlnx_dbg_lvl = CL_DBG_ERROR |\r
-       MLNX_DBG_QPN |\r
-       MLNX_DBG_MEM |\r
-       MLNX_DBG_INFO |\r
-       MLNX_DBG_TRACE |\r
-       // MLNX_DBG_DIRECT |\r
-       0;\r
-#endif\r
-\r
-u_int32_t              g_mlnx_dpc2thread = 0;\r
-\r
-#ifdef MODULE_LICENSE\r
-MODULE_LICENSE("Proprietary");\r
-#endif\r
-\r
-MODULE_PARM(g_mlnx_dbg_lvl, "i");\r
-MODULE_PARM(g_mlnx_dpc2thread, "i");\r
-\r
-cl_qlist_t             mlnx_hca_list;\r
-//mlnx_hca_t           mlnx_hca_array[MLNX_MAX_HCA];\r
-//uint32_t             mlnx_num_hca = 0;\r
-\r
-mlnx_hob_t             mlnx_hob_array[MLNX_NUM_HOBKL];         // kernel HOB - one per HCA (cmdif access)\r
-\r
-mlnx_hobul_t   *mlnx_hobul_array[MLNX_NUM_HOBUL];      // kernel HOBUL - one per HCA (kar access)\r
-\r
-/* User verb library name */\r
-/* TODO: Move to linux osd file.\r
-char                   mlnx_uvp_lib_name[MAX_LIB_NAME] = {"libmlnx_uvp.so"};\r
-*/\r
-\r
-static void\r
-mlnx_async_dpc(\r
-       IN                              cl_async_proc_item_t            *async_item_p );\r
-\r
-#if MLNX_COMP_MODEL\r
-static void\r
-mlnx_comp_dpc(\r
-       IN                              PRKDPC                                          p_dpc,\r
-       IN                              void                                            *context,\r
-       IN                              void                                            *pfn_comp_cb,\r
-       IN                              void                                            *unused );\r
-#else\r
-static void\r
-mlnx_comp_dpc(\r
-       IN                              cl_async_proc_item_t            *async_item_p );\r
-#endif\r
-\r
-// ### Callback Interface\r
-static void\r
-mlnx_comp_cb(\r
-       IN                              HH_hca_hndl_t                           hh_hndl,\r
-       IN                              HH_cq_hndl_t                            hh_cq,\r
-       IN                              void                                            *private_data);\r
-\r
-static void\r
-mlnx_async_cb(\r
-       IN                              HH_hca_hndl_t                           hh_hndl,\r
-       IN                              HH_event_record_t                       *hh_er_p,\r
-       IN                              void                                            *private_data);\r
-\r
-/////////////////////////////////////////////////////////\r
-// ### HCA\r
-/////////////////////////////////////////////////////////\r
-void\r
-mlnx_hca_insert(\r
-       IN                              mlnx_hca_t                                      *p_hca )\r
-{\r
-       cl_spinlock_acquire( &hob_lock );\r
-       cl_qlist_insert_tail( &mlnx_hca_list, &p_hca->list_item );\r
-       cl_spinlock_release( &hob_lock );\r
-}\r
-\r
-void\r
-mlnx_hca_remove(\r
-       IN                              mlnx_hca_t                                      *p_hca )\r
-{\r
-       cl_spinlock_acquire( &hob_lock );\r
-       cl_qlist_remove_item( &mlnx_hca_list, &p_hca->list_item );\r
-       cl_spinlock_release( &hob_lock );\r
-}\r
-\r
-mlnx_hca_t*\r
-mlnx_hca_from_guid(\r
-       IN                              ib_net64_t                                      guid )\r
-{\r
-       cl_list_item_t  *p_item;\r
-       mlnx_hca_t              *p_hca = NULL;\r
-\r
-       cl_spinlock_acquire( &hob_lock );\r
-       p_item = cl_qlist_head( &mlnx_hca_list );\r
-       while( p_item != cl_qlist_end( &mlnx_hca_list ) )\r
-       {\r
-               p_hca = PARENT_STRUCT( p_item, mlnx_hca_t, list_item );\r
-               if( p_hca->guid == guid )\r
-                       break;\r
-               p_item = cl_qlist_next( p_item );\r
-               p_hca = NULL;\r
-       }\r
-       cl_spinlock_release( &hob_lock );\r
-       return p_hca;\r
-}\r
-\r
-mlnx_hca_t*\r
-mlnx_hca_from_hh_hndl(\r
-       IN                              HH_hca_hndl_t                                   hh_hndl )\r
-{\r
-       cl_list_item_t  *p_item;\r
-       mlnx_hca_t              *p_hca = NULL;\r
-\r
-       cl_spinlock_acquire( &hob_lock );\r
-       p_item = cl_qlist_head( &mlnx_hca_list );\r
-       while( p_item != cl_qlist_end( &mlnx_hca_list ) )\r
-       {\r
-               p_hca = PARENT_STRUCT( p_item, mlnx_hca_t, list_item );\r
-               if( p_hca->hh_hndl == hh_hndl )\r
-                       break;\r
-               p_item = cl_qlist_next( p_item );\r
-               p_hca = NULL;\r
-       }\r
-       cl_spinlock_release( &hob_lock );\r
-       return p_hca;\r
-}\r
-\r
-\r
-/*\r
-void\r
-mlnx_names_from_guid(\r
-       IN                              ib_net64_t                                      guid,\r
-               OUT                     char                                            **hca_name_p,\r
-               OUT                     char                                            **dev_name_p)\r
-{\r
-       unsigned int idx;\r
-\r
-       if (!hca_name_p) return;\r
-       if (!dev_name_p) return;\r
-\r
-       for (idx = 0; idx < mlnx_num_hca; idx++)\r
-       {\r
-               if (mlnx_hca_array[idx].ifx.guid == guid)\r
-               {\r
-                       *hca_name_p = mlnx_hca_array[idx].hca_name_p;\r
-                       *dev_name_p = mlnx_hca_array[idx].dev_name_p;\r
-               }\r
-       }\r
-}\r
-*/\r
-\r
-/////////////////////////////////////////////////////////\r
-// ### HOB\r
-/////////////////////////////////////////////////////////\r
-cl_status_t\r
-mlnx_hobs_init( void )\r
-{\r
-       u_int32_t idx;\r
-\r
-       cl_qlist_init( &mlnx_hca_list );\r
-\r
-       for (idx = 0; idx < MLNX_NUM_HOBKL; idx++)\r
-       {\r
-               mlnx_hob_array[idx].hh_hndl = NULL;\r
-               mlnx_hob_array[idx].comp_cb_p = NULL;\r
-               mlnx_hob_array[idx].async_cb_p = NULL;\r
-               mlnx_hob_array[idx].ca_context = NULL;\r
-               mlnx_hob_array[idx].async_proc_mgr_p = NULL;\r
-               mlnx_hob_array[idx].cl_device_h = NULL;\r
-               // mlnx_hob_array[idx].port_lmc_p = NULL;\r
-               mlnx_hob_array[idx].index = idx;\r
-               mlnx_hob_array[idx].mark = E_MARK_INVALID;\r
-       }\r
-       return cl_spinlock_init( &hob_lock );\r
-}\r
-\r
-/////////////////////////////////////////////////////////\r
-/////////////////////////////////////////////////////////\r
-ib_api_status_t\r
-mlnx_hobs_insert(\r
-       IN                              mlnx_hca_t                                      *p_hca,\r
-               OUT                     mlnx_hob_t                                      **hob_pp)\r
-{\r
-       u_int32_t idx;\r
-       ib_api_status_t status = IB_ERROR;\r
-       mlnx_cache_t    *p_cache;\r
-\r
-       p_cache = (mlnx_cache_t*)cl_pzalloc( sizeof(mlnx_cache_t) * 2 );\r
-       if( !p_cache )\r
-               return IB_INSUFFICIENT_MEMORY;\r
-\r
-       cl_spinlock_acquire(&hob_lock);\r
-       for (idx = 0; idx < MLNX_NUM_HOBKL; idx++)\r
-       {\r
-               if (!mlnx_hob_array[idx].hh_hndl)\r
-               {\r
-                       mlnx_hob_array[idx].hh_hndl = p_hca->hh_hndl;\r
-                       mlnx_hob_array[idx].mark = E_MARK_CA;\r
-                       if (hob_pp) *hob_pp = &mlnx_hob_array[idx];\r
-                       status = IB_SUCCESS;\r
-                       break;\r
-               }\r
-       }\r
-       cl_spinlock_release(&hob_lock);\r
-\r
-       if (IB_SUCCESS == status)\r
-               (*hob_pp)->cache = p_cache;\r
-       else\r
-               cl_free( p_cache );\r
-\r
-       return status;\r
-}\r
-\r
-/////////////////////////////////////////////////////////\r
-/////////////////////////////////////////////////////////\r
-ib_api_status_t\r
-mlnx_hobs_set_cb(\r
-       IN                              mlnx_hob_t                                      *hob_p, \r
-       IN                              ci_completion_cb_t                      comp_cb_p,\r
-       IN                              ci_async_event_cb_t                     async_cb_p,\r
-       IN              const   void* const                                     ib_context)\r
-{\r
-       cl_status_t             cl_status;\r
-\r
-       // Verify handle\r
-       CL_ASSERT((hob_p - mlnx_hob_array) < MLNX_NUM_HOBKL);\r
-\r
-       // Setup the callbacks\r
-       if (!hob_p->async_proc_mgr_p)\r
-       {\r
-               hob_p->async_proc_mgr_p = cl_malloc( sizeof( cl_async_proc_t ) );\r
-               if( !hob_p->async_proc_mgr_p )\r
-               {\r
-                       return IB_INSUFFICIENT_MEMORY;\r
-               }\r
-               cl_async_proc_construct( hob_p->async_proc_mgr_p );\r
-               cl_status = cl_async_proc_init( hob_p->async_proc_mgr_p, MLNX_NUM_CB_THR, "CBthread" );\r
-               if( cl_status != CL_SUCCESS )\r
-               {\r
-                       cl_async_proc_destroy( hob_p->async_proc_mgr_p );\r
-                       cl_free(hob_p->async_proc_mgr_p);\r
-                       hob_p->async_proc_mgr_p = NULL;\r
-                       return IB_INSUFFICIENT_RESOURCES;\r
-               }\r
-       }\r
-\r
-       if (hob_p->hh_hndl)\r
-       {\r
-               THH_hob_set_async_eventh(hob_p->hh_hndl,\r
-                       mlnx_async_cb,\r
-                       &hob_p->index); // This is the context our CB wants to receive\r
-               THH_hob_set_comp_eventh( hob_p->hh_hndl,\r
-                       mlnx_comp_cb,\r
-                       &hob_p->index); // This is the context our CB wants to receive\r
-               hob_p->comp_cb_p  = comp_cb_p;\r
-               hob_p->async_cb_p = async_cb_p;\r
-               hob_p->ca_context = ib_context; // This is the context our CB forwards to IBAL\r
-               CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("CL: hca_idx %d context 0x%p\n", hob_p - mlnx_hob_array, ib_context));\r
-               return IB_SUCCESS;\r
-       }\r
-       return IB_ERROR;\r
-}\r
-\r
-/////////////////////////////////////////////////////////\r
-/////////////////////////////////////////////////////////\r
-ib_api_status_t\r
-mlnx_hobs_get_context(\r
-       IN                              mlnx_hob_t                                      *hob_p,\r
-               OUT                     void                                            **context_p)\r
-{\r
-       // Verify handle\r
-       CL_ASSERT((hob_p - mlnx_hob_array) < MLNX_NUM_HOBKL);\r
-\r
-       if (hob_p->hh_hndl)\r
-       {\r
-               if (context_p) *context_p = &hob_p->index;\r
-               return IB_SUCCESS;\r
-       }\r
-       return IB_ERROR;\r
-}\r
-\r
-/////////////////////////////////////////////////////////\r
-/////////////////////////////////////////////////////////\r
-void\r
-mlnx_hobs_remove(\r
-       IN                              mlnx_hob_t                                      *hob_p)\r
-{\r
-       cl_async_proc_t *p_async_proc;\r
-       mlnx_cache_t    *p_cache;\r
-\r
-       // Verify handle\r
-       CL_ASSERT((hob_p - mlnx_hob_array) < MLNX_NUM_HOBKL);\r
-\r
-       cl_spinlock_acquire( &hob_lock );\r
-\r
-       hob_p->mark = E_MARK_INVALID;\r
-\r
-       p_async_proc = hob_p->async_proc_mgr_p;\r
-       hob_p->async_proc_mgr_p = NULL;\r
-\r
-       p_cache = hob_p->cache;\r
-       hob_p->cache = NULL;\r
-\r
-       hob_p->hh_hndl = NULL;\r
-       hob_p->comp_cb_p = NULL;\r
-       hob_p->async_cb_p = NULL;\r
-       hob_p->ca_context = NULL;\r
-       hob_p->cl_device_h = NULL;\r
-\r
-       cl_spinlock_release( &hob_lock );\r
-\r
-       if( p_async_proc )\r
-       {\r
-               cl_async_proc_destroy( p_async_proc );\r
-               cl_free( p_async_proc );\r
-       }\r
-\r
-       if( p_cache )\r
-               cl_free( p_cache );\r
-\r
-       CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("CL: hobs_remove idx %d hh_hndl 0x%p\n", hob_p - mlnx_hob_array, hob_p->hh_hndl));\r
-}\r
-\r
-/////////////////////////////////////////////////////////\r
-/////////////////////////////////////////////////////////\r
-ib_api_status_t\r
-mlnx_hobs_lookup(\r
-       IN                              HH_hca_hndl_t                           hndl,\r
-               OUT                     mlnx_hob_t                                      **hca_p)\r
-{\r
-       u_int32_t idx;\r
-\r
-       if (!hca_p)\r
-               return IB_ERROR;\r
-\r
-       cl_spinlock_acquire( &hob_lock );\r
-       for (idx = 0; idx < MLNX_NUM_HOBKL; idx++)\r
-       {\r
-               if (hndl == mlnx_hob_array[idx].hh_hndl)\r
-               {\r
-                       *hca_p = &mlnx_hob_array[idx];\r
-                       cl_spinlock_release( &hob_lock );\r
-                       return IB_SUCCESS;\r
-               }\r
-       }\r
-       cl_spinlock_release( &hob_lock );\r
-       return IB_ERROR;\r
-}\r
-\r
-/////////////////////////////////////////////////////////\r
-/////////////////////////////////////////////////////////\r
-void\r
-mlnx_hobs_get_handle(\r
-       IN                              mlnx_hob_t                                      *hob_p,\r
-               OUT                     HH_hca_hndl_t                           *hndl_p)\r
-{\r
-       // Verify handle\r
-       CL_ASSERT((hob_p - mlnx_hob_array) < MLNX_NUM_HOBKL);\r
-\r
-       if (hndl_p)\r
-               *hndl_p = hob_p->hh_hndl;\r
-}\r
-\r
-/////////////////////////////////////////////////////////\r
-/////////////////////////////////////////////////////////\r
-mlnx_hobul_t *\r
-mlnx_hobs_get_hobul(\r
-       IN                              mlnx_hob_t                                      *hob_p)\r
-{\r
-       // Verify handle\r
-       if ((hob_p - mlnx_hob_array) >= MLNX_NUM_HOBKL)\r
-               return NULL;\r
-\r
-       return mlnx_hobul_array[hob_p->index];\r
-}\r
-\r
-\r
-static int priv_ceil_log2(u_int32_t n)\r
-{\r
-       int shift;\r
-\r
-       for (shift = 31; shift >0; shift--)\r
-               if (n & (1 << shift)) break;\r
-\r
-       if (((unsigned)1 << shift) < n) shift++;\r
-\r
-       return shift;\r
-}\r
-\r
-/////////////////////////////////////////////////////////\r
-// ### HOBUL\r
-/////////////////////////////////////////////////////////\r
-ib_api_status_t\r
-mlnx_hobul_new(\r
-       IN                              mlnx_hob_t                                      *hob_p,\r
-       IN                              HH_hca_hndl_t                           hh_hndl,\r
-       IN                              void                                            *resources_p)\r
-{\r
-       mlnx_hobul_t            *hobul_p;\r
-       HH_hca_dev_t            *hca_ul_info;\r
-       ib_api_status_t         status;\r
-       VAPI_hca_cap_t          hca_caps;\r
-       u_int32_t                       i;\r
-#if MLNX_COMP_MODEL == 1\r
-       static uint32_t         proc_num = 0;\r
-#endif\r
-\r
-       // Verify handle\r
-       CL_ASSERT((hob_p - mlnx_hob_array) < MLNX_NUM_HOBKL);\r
-\r
-       if (NULL == (hobul_p = cl_zalloc( sizeof(mlnx_hobul_t))))\r
-               return IB_INSUFFICIENT_MEMORY;\r
-\r
-       // The following will NULL all pointers/sizes (used in cleanup)\r
-//     cl_memclr(hobul_p, sizeof (mlnx_hobul_t));\r
-\r
-       hobul_p->hh_hndl = hh_hndl;\r
-\r
-       if (HH_OK != THHUL_hob_create(resources_p, hh_hndl->dev_id, &hobul_p->hhul_hndl))\r
-       {\r
-               status = IB_INSUFFICIENT_RESOURCES;\r
-               goto cleanup;\r
-       }\r
-\r
-       hca_ul_info = (HH_hca_dev_t *)hh_hndl;\r
-\r
-       if (hca_ul_info)\r
-       {\r
-               hobul_p->vendor_id = hca_ul_info->vendor_id;\r
-               hobul_p->device_id = hca_ul_info->dev_id;\r
-               hobul_p->hca_ul_resources_p = resources_p;\r
-               hobul_p->cq_ul_resources_sz = hca_ul_info->cq_ul_resources_sz;\r
-               hobul_p->qp_ul_resources_sz = hca_ul_info->qp_ul_resources_sz;\r
-               hobul_p->pd_ul_resources_sz = hca_ul_info->pd_ul_resources_sz;\r
-       }\r
-\r
-       if (HH_OK != THH_hob_query(hh_hndl, &hca_caps))\r
-       {\r
-               status = IB_ERROR;\r
-               goto cleanup;\r
-       }\r
-\r
-       hobul_p->cq_idx_mask = MASK32(priv_ceil_log2(hca_caps.max_num_cq));\r
-       hobul_p->qp_idx_mask = MASK32(priv_ceil_log2(hca_caps.max_num_qp)); // Currently mask = 0xFFFF\r
-       hobul_p->max_pd = MASK32(priv_ceil_log2(hca_caps.max_pd_num)) + 1;\r
-       hobul_p->max_cq = hobul_p->cq_idx_mask + 1;\r
-       hobul_p->max_qp = hobul_p->qp_idx_mask + 1;\r
-\r
-       CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("CL: sizes cq 0%x qp 0%x pd 0%x\n", hca_caps.max_num_cq, hca_caps.max_num_qp, hca_caps.max_pd_num));\r
-\r
-       /* create and initialize the data stucture for CQs */\r
-       hobul_p->cq_info_tbl = cl_zalloc(hobul_p->max_cq * sizeof (cq_info_t));\r
-\r
-       /* create and initialize the data stucture for QPs */\r
-       hobul_p->qp_info_tbl = cl_zalloc(hobul_p->max_qp * sizeof (qp_info_t));\r
-\r
-       /* create and initialize the data stucture for PDs */\r
-       hobul_p->pd_info_tbl = cl_zalloc(hobul_p->max_pd * sizeof (pd_info_t));\r
-\r
-       CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("CL: alloc failed?  cq=%d qp=%d pd=%d\n",\r
-               !hobul_p->cq_info_tbl, !hobul_p->qp_info_tbl, !hobul_p->pd_info_tbl));\r
-\r
-       if (!hobul_p->pd_info_tbl ||\r
-               !hobul_p->qp_info_tbl ||\r
-               !hobul_p->cq_info_tbl)\r
-       {\r
-               status = IB_INSUFFICIENT_MEMORY;\r
-               goto cleanup;\r
-       }\r
-\r
-       /* Initialize all mutexes. */\r
-       for( i = 0; i < hobul_p->max_cq; i++ )\r
-       {\r
-               cl_mutex_construct( &hobul_p->cq_info_tbl[i].mutex );\r
-#if MLNX_COMP_MODEL\r
-               KeInitializeDpc( &hobul_p->cq_info_tbl[i].dpc,\r
-                       mlnx_comp_dpc, &hobul_p->cq_info_tbl[i] );\r
-#if MLNX_COMP_MODEL == 1\r
-               KeSetTargetProcessorDpc( &hobul_p->cq_info_tbl[i].dpc,\r
-                       (CCHAR)(proc_num++ % cl_proc_count()) );\r
-#endif /* MLNX_COMP_MODEL == 1 */\r
-#endif /* MLNX_COMP_MODEL */\r
-       }\r
-\r
-       for( i = 0; i < hobul_p->max_qp; i++ )\r
-               cl_mutex_construct( &hobul_p->qp_info_tbl[i].mutex );\r
-\r
-       for( i = 0; i < hobul_p->max_pd; i++ )\r
-               cl_mutex_construct( &hobul_p->pd_info_tbl[i].mutex );\r
-\r
-       for( i = 0; i < hobul_p->max_cq; i++ )\r
-       {\r
-               if( cl_mutex_init( &hobul_p->cq_info_tbl[i].mutex ) != CL_SUCCESS )\r
-               {\r
-                       status = IB_ERROR;\r
-                       goto cleanup;\r
-               }\r
-       }\r
-\r
-       for( i = 0; i < hobul_p->max_qp; i++ )\r
-       {\r
-               if( cl_mutex_init( &hobul_p->qp_info_tbl[i].mutex ) != CL_SUCCESS )\r
-               {\r
-                       status = IB_ERROR;\r
-                       goto cleanup;\r
-               }\r
-       }\r
-\r
-       for( i = 0; i < hobul_p->max_pd; i++ )\r
-       {\r
-               if( cl_mutex_init( &hobul_p->pd_info_tbl[i].mutex ) != CL_SUCCESS )\r
-               {\r
-                       status = IB_ERROR;\r
-                       goto cleanup;\r
-               }\r
-       }\r
-\r
-       hobul_p->log2_mpt_size = ((THH_hca_ul_resources_t *)resources_p)->log2_mpt_size;\r
-       CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("log2_mpt_size = %d\n", hobul_p->log2_mpt_size));\r
-\r
-       cl_spinlock_acquire(&hob_lock);\r
-       mlnx_hobul_array[hob_p->index] = hobul_p;\r
-       cl_spinlock_release(&hob_lock);\r
-\r
-       return IB_SUCCESS;\r
-\r
-cleanup:\r
-       if (hobul_p->hhul_hndl) THHUL_hob_destroy( hobul_p->hhul_hndl );\r
-       if (hobul_p->pd_info_tbl)\r
-       {\r
-               for( i = 0; i < hobul_p->max_pd; i++ )\r
-                       cl_mutex_destroy( &hobul_p->pd_info_tbl[i].mutex );\r
-               cl_free(hobul_p->pd_info_tbl);\r
-       }\r
-       if (hobul_p->qp_info_tbl)\r
-       {\r
-               for( i = 0; i < hobul_p->max_qp; i++ )\r
-                       cl_mutex_destroy( &hobul_p->qp_info_tbl[i].mutex );\r
-               cl_free(hobul_p->qp_info_tbl);\r
-       }\r
-       if (hobul_p->cq_info_tbl)\r
-       {\r
-               for( i = 0; i < hobul_p->max_cq; i++ )\r
-                       cl_mutex_destroy( &hobul_p->cq_info_tbl[i].mutex );\r
-               cl_free(hobul_p->cq_info_tbl);\r
-       }\r
-       if (hobul_p) cl_free( hobul_p);\r
-       return status;\r
-}\r
-\r
-/////////////////////////////////////////////////////////\r
-/////////////////////////////////////////////////////////\r
-void\r
-mlnx_hobul_get(\r
-       IN                              mlnx_hob_t                                      *hob_p,\r
-               OUT                     void                                            **resources_p )\r
-{\r
-       mlnx_hobul_t            *hobul_p;\r
-\r
-       // Verify handle\r
-       CL_ASSERT((hob_p - mlnx_hob_array) < MLNX_NUM_HOBKL);\r
-\r
-       hobul_p = mlnx_hobul_array[hob_p->index];\r
-\r
-       if (hobul_p && resources_p)\r
-       {\r
-               *resources_p = hobul_p->hca_ul_resources_p;\r
-       }\r
-}\r
-\r
-/////////////////////////////////////////////////////////\r
-/////////////////////////////////////////////////////////\r
-void\r
-mlnx_hobul_delete(\r
-       IN                              mlnx_hob_t                                      *hob_p)\r
-{\r
-       mlnx_hobul_t            *hobul_p;\r
-       u_int32_t                       i;\r
-\r
-       // Verify handle\r
-       CL_ASSERT((hob_p - mlnx_hob_array) < MLNX_NUM_HOBKL);\r
-\r
-       cl_spinlock_acquire(&hob_lock);\r
-       hobul_p = mlnx_hobul_array[hob_p->index];\r
-       mlnx_hobul_array[hob_p->index] = NULL;\r
-       cl_spinlock_release(&hob_lock);\r
-\r
-       if (!hobul_p) return;\r
-\r
-       if (hobul_p->hhul_hndl) THHUL_hob_destroy( hobul_p->hhul_hndl );\r
-       if (hobul_p->pd_info_tbl)\r
-       {\r
-               for( i = 0; i < hobul_p->max_pd; i++ )\r
-                       cl_mutex_destroy( &hobul_p->pd_info_tbl[i].mutex );\r
-               cl_free(hobul_p->pd_info_tbl);\r
-       }\r
-       if (hobul_p->qp_info_tbl)\r
-       {\r
-               for( i = 0; i < hobul_p->max_qp; i++ )\r
-                       cl_mutex_destroy( &hobul_p->qp_info_tbl[i].mutex );\r
-               cl_free(hobul_p->qp_info_tbl);\r
-       }\r
-       if (hobul_p->cq_info_tbl)\r
-       {\r
-               for( i = 0; i < hobul_p->max_cq; i++ )\r
-               {\r
-                       KeRemoveQueueDpc( &hobul_p->cq_info_tbl[i].dpc );\r
-                       cl_mutex_destroy( &hobul_p->cq_info_tbl[i].mutex );\r
-               }\r
-               cl_free(hobul_p->cq_info_tbl);\r
-       }\r
-       if (hobul_p) cl_free( hobul_p);\r
-}\r
-\r
-/////////////////////////////////////////////////////////\r
-// ### Callbacks\r
-/////////////////////////////////////////////////////////\r
-\r
-ib_async_event_t\r
-mlnx_map_vapi_event_type(\r
-       IN                              unsigned                                        event_id,\r
-               OUT                     ENUM_EVENT_CLASS                        *event_class_p)\r
-{\r
-       switch (event_id)\r
-       {\r
-       case VAPI_QP_PATH_MIGRATED:\r
-               if (event_class_p) *event_class_p = E_EV_QP;\r
-               return IB_AE_QP_APM;\r
-\r
-       case VAPI_QP_COMM_ESTABLISHED:\r
-               if (event_class_p) *event_class_p = E_EV_QP;\r
-               return IB_AE_QP_COMM;\r
-\r
-       case VAPI_SEND_QUEUE_DRAINED:\r
-               if (event_class_p) *event_class_p = E_EV_QP;\r
-               return IB_AE_SQ_DRAINED;\r
-\r
-       case VAPI_CQ_ERROR:\r
-               if (event_class_p) *event_class_p = E_EV_CQ;\r
-               return IB_AE_CQ_ERROR;\r
-\r
-       case VAPI_LOCAL_WQ_INV_REQUEST_ERROR:\r
-               if (event_class_p) *event_class_p = E_EV_QP;\r
-               return IB_AE_WQ_REQ_ERROR;\r
-\r
-       case VAPI_LOCAL_WQ_ACCESS_VIOL_ERROR:\r
-               if (event_class_p) *event_class_p = E_EV_QP;\r
-               return IB_AE_WQ_ACCESS_ERROR;\r
-\r
-       case VAPI_LOCAL_WQ_CATASTROPHIC_ERROR:\r
-               if (event_class_p) *event_class_p = E_EV_QP;\r
-               return IB_AE_QP_FATAL;\r
-\r
-       case VAPI_PATH_MIG_REQ_ERROR:\r
-               if (event_class_p) *event_class_p = E_EV_QP;\r
-               return IB_AE_QP_APM_ERROR;\r
-\r
-       case VAPI_LOCAL_CATASTROPHIC_ERROR:\r
-               if (event_class_p) *event_class_p = E_EV_CA;\r
-               return IB_AE_LOCAL_FATAL;\r
-\r
-       case VAPI_PORT_ERROR:\r
-               /*\r
-                * In tavor_hca\src\Hca\hcahal\tavor\eventp\event_irqh.c:\r
-                * TAVOR_IF_EV_TYPE_PORT_ERR maps one of two port events:\r
-                *      - TAVOR_IF_SUB_EV_PORT_DOWN\r
-                *      - TAVOR_IF_SUB_EV_PORT_UP\r
-                * \r
-                * These map to (respectively)\r
-                *      - VAPI_PORT_ERROR\r
-                *      - VAPI_PORT_ACTIVE\r
-                */\r
-               if (event_class_p) *event_class_p = E_EV_CA;\r
-               return IB_AE_PORT_DOWN; /* INIT, ARMED, DOWN */\r
-\r
-       case VAPI_PORT_ACTIVE:\r
-               if (event_class_p) *event_class_p = E_EV_CA;\r
-               return IB_AE_PORT_ACTIVE; /* ACTIVE STATE */\r
-\r
-       case VAPI_CLIENT_REREGISTER:\r
-               if (event_class_p) *event_class_p = E_EV_CA;\r
-               return IB_AE_CLIENT_REREGISTER; /* ACTIVE STATE */\r
-\r
-       default:\r
-               CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("FAIL to map %d (last known %d) returning %d\n",\r
-                       event_id, VAPI_PORT_ACTIVE, IB_AE_LOCAL_FATAL));\r
-               if (event_class_p) *event_class_p = E_EV_CA;\r
-               return IB_AE_LOCAL_FATAL;\r
-       }\r
-}\r
-\r
-void\r
-mlnx_conv_vapi_event(\r
-       IN                              HH_event_record_t                       *hh_event_p,\r
-       IN                              ib_event_rec_t                          *ib_event_p,\r
-               OUT                     ENUM_EVENT_CLASS                        *event_class_p)\r
-{\r
-\r
-       // ib_event_p->context is handled by the caller\r
-       //\r
-       ib_event_p->type = mlnx_map_vapi_event_type(hh_event_p->etype, event_class_p);\r
-\r
-       // no traps currently generated\r
-       // ib_event_p->trap_info.lid  =  ;\r
-       // ib_event_p->trap_info.port_guid = ;\r
-       // ib_event_p->trap_info.port_num  = hh_er;\r
-}\r
-\r
-void\r
-mlnx_async_cb(\r
-       IN                              HH_hca_hndl_t                           hh_hndl,\r
-       IN                              HH_event_record_t                       *hh_er_p,\r
-       IN                              void                                            *private_data)\r
-{\r
-       u_int32_t                       obj_idx;\r
-       mlnx_hob_t                      *hob_p;\r
-\r
-       mlnx_cb_data_t          cb_data;\r
-       mlnx_cb_data_t          *cb_data_p;\r
-\r
-       CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("ASYNC CB %p (0x%x)\n",\r
-               private_data, (private_data) ? *(u_int32_t *)private_data : 0xB5));\r
-\r
-       if (!private_data || !hh_er_p) return;\r
-\r
-       obj_idx =  *(u_int32_t *)private_data;\r
-       if (obj_idx >= MLNX_NUM_HOBKL) return;\r
-\r
-       hob_p = mlnx_hob_array + obj_idx;\r
-\r
-       // g_mlnx_dpc2thread will be initialized as a module paramter (default - disabled(0))\r
-       if (g_mlnx_dpc2thread)\r
-       {\r
-               cb_data_p = cl_malloc(sizeof(mlnx_cb_data_t));\r
-               if (!cb_data_p) return;\r
-\r
-               cb_data_p->hh_hndl      = hh_hndl;\r
-               cb_data_p->private_data = private_data;\r
-               cl_memcpy(&cb_data_p->hh_er, hh_er_p, sizeof(HH_event_record_t));\r
-               cb_data_p->async_item.pfn_callback = mlnx_async_dpc;\r
-               cl_async_proc_queue(hob_p->async_proc_mgr_p, &cb_data_p->async_item );\r
-       } else\r
-       {\r
-               cb_data_p = &cb_data;\r
-\r
-               cb_data_p->hh_hndl      = hh_hndl;\r
-               cb_data_p->private_data = private_data;\r
-               cl_memcpy(&cb_data_p->hh_er, hh_er_p, sizeof(HH_event_record_t));\r
-               mlnx_async_dpc( &cb_data_p->async_item );\r
-       }\r
-}\r
-\r
-static void\r
-mlnx_async_dpc(\r
-       IN                              cl_async_proc_item_t            *async_item_p )\r
-{\r
-       HH_event_record_t       *hh_er_p;\r
-       u_int32_t                       obj_idx;\r
-       mlnx_hob_t                      *hob_p;\r
-       mlnx_hobul_t            *hobul_p;\r
-       mlnx_cb_data_t          *cb_data_p;\r
-\r
-       ENUM_EVENT_CLASS        event_class;\r
-       ib_event_rec_t          event_r;\r
-\r
-       CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("ASYNC DPC %p\n", async_item_p));\r
-\r
-       cb_data_p = PARENT_STRUCT( async_item_p, mlnx_cb_data_t, async_item );\r
-\r
-       if (!cb_data_p) return;\r
-\r
-       hh_er_p =  &cb_data_p->hh_er;\r
-       obj_idx =  *(u_int32_t *)cb_data_p->private_data;\r
-       hob_p = mlnx_hob_array + obj_idx;\r
-       hobul_p = mlnx_hobul_array[obj_idx];\r
-\r
-       CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("ASYNC DPC type %d ca_context %p\n",\r
-               hh_er_p->etype, hob_p->ca_context));\r
-\r
-       if (!hob_p ||\r
-               !hobul_p ||\r
-               !hob_p->hh_hndl ||\r
-               !hob_p->async_cb_p)\r
-       {\r
-               goto cleanup;\r
-       }\r
-\r
-       cl_memclr(&event_r, sizeof(event_r));\r
-       mlnx_conv_vapi_event(hh_er_p, &event_r, &event_class);\r
-\r
-       switch(event_class)\r
-       {\r
-       case E_EV_CA:\r
-               event_r.context = (void *)hob_p->ca_context;\r
-               break;\r
-\r
-       case E_EV_QP:\r
-               {\r
-                       obj_idx = hh_er_p->event_modifier.qpn & hobul_p->qp_idx_mask;\r
-                       if (obj_idx < hobul_p->max_qp)\r
-                               event_r.context = (void *)hobul_p->qp_info_tbl[obj_idx].qp_context;\r
-                       else\r
-                       {\r
-                               CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("ASYNC DPC bad qpn 0x%x max 0x%x\n", obj_idx, hobul_p->max_qp));\r
-                               goto cleanup;\r
-                       }\r
-               }\r
-               break;\r
-\r
-       case E_EV_CQ:\r
-               {\r
-                       obj_idx = hh_er_p->event_modifier.cq & hobul_p->cq_idx_mask;\r
-                       if (obj_idx < hobul_p->max_cq)\r
-                               event_r.context = (void *)hobul_p->cq_info_tbl[obj_idx].cq_context;\r
-                       else\r
-                       {\r
-                               CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("ASYNC DPC bad cqn 0x%x max 0x%x\n", obj_idx, hobul_p->max_cq));\r
-                               goto cleanup;\r
-                       }\r
-               }\r
-               break;\r
-\r
-       case E_EV_LAST:\r
-       default:\r
-               // CL_ASSERT(0); // This shouldn't happen\r
-               CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("ASYNC DPC unknown event_class 0x%x\n", event_class));\r
-               break;\r
-       }\r
-\r
-       // Call the registered CB\r
-       (*hob_p->async_cb_p)(&event_r);\r
-       // Fall Through\r
-cleanup:\r
-       if (g_mlnx_dpc2thread)\r
-       {\r
-               cl_free(cb_data_p);\r
-       }\r
-}\r
-\r
-/////////////////////////////////////////////////////////\r
-/////////////////////////////////////////////////////////\r
-void\r
-mlnx_comp_cb(\r
-       IN                              HH_hca_hndl_t                           hh_hndl,\r
-       IN                              HH_cq_hndl_t                            hh_cq,\r
-       IN                              void                                            *private_data)\r
-{\r
-#if MLNX_COMP_MODEL\r
-       u_int32_t                       cq_num;\r
-       u_int32_t                       hca_idx;\r
-       mlnx_hob_t                      *hob_p;\r
-       mlnx_hobul_t            *hobul_p;\r
-#if MLNX_COMP_MODEL == 2\r
-       static uint32_t         proc_num = 0;\r
-#endif\r
-\r
-       CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("COMP CB cq 0x%x %p\n", hh_cq, private_data));\r
-\r
-       UNUSED_PARAM( hh_hndl );\r
-\r
-       hca_idx = *(u_int32_t *)private_data;\r
-       hob_p   = mlnx_hob_array + hca_idx;\r
-       hobul_p = mlnx_hobul_array[hca_idx];\r
-       cq_num  = hh_cq & hobul_p->cq_idx_mask;\r
-\r
-       if (NULL != hob_p && NULL != hobul_p &&\r
-               hob_p->hh_hndl && hob_p->comp_cb_p)\r
-       {\r
-               if (cq_num < hobul_p->max_cq)\r
-               {\r
-#if MLNX_COMP_MODEL == 2\r
-                       KeSetTargetProcessorDpc( &hobul_p->cq_info_tbl[cq_num].dpc,\r
-                               (CCHAR)(proc_num++ % cl_proc_count()) );\r
-#endif /* MLNX_COMP_MODEL == 2 */\r
-                       KeInsertQueueDpc( &hobul_p->cq_info_tbl[cq_num].dpc,\r
-                               hob_p, NULL );\r
-               }\r
-               else\r
-               {\r
-                       HCA_TRACE( HCA_DBG_ERROR, ("CQ index out of range!!!\n") );\r
-               }\r
-       }\r
-#else  /* MLNX_COMP_MODEL */\r
-       u_int32_t                       obj_idx;\r
-       mlnx_hob_t                      *hob_p;\r
-\r
-       mlnx_cb_data_t          cb_data;\r
-       mlnx_cb_data_t          *cb_data_p;\r
-\r
-       CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("COMP CB cq 0x%x %p\n", hh_cq, private_data));\r
-\r
-       if (!private_data) return;\r
-\r
-       obj_idx =  *(u_int32_t *)private_data;\r
-       hob_p = mlnx_hob_array + obj_idx;\r
-       if (!hob_p) return;\r
-\r
-       if (g_mlnx_dpc2thread)\r
-       {\r
-               cb_data_p = cl_malloc(sizeof(mlnx_cb_data_t));\r
-               if (!cb_data_p) return;\r
-\r
-               cb_data_p->hh_hndl      = hh_hndl;\r
-               cb_data_p->hh_cq        = hh_cq;\r
-               cb_data_p->private_data = private_data;\r
-\r
-               cb_data_p->async_item.pfn_callback = mlnx_comp_dpc;\r
-\r
-               // Report completion through async_proc\r
-               cl_async_proc_queue(hob_p->async_proc_mgr_p, &cb_data_p->async_item );\r
-\r
-       } else\r
-       {\r
-               cb_data_p = &cb_data;\r
-\r
-               cb_data_p->hh_hndl      = hh_hndl;\r
-               cb_data_p->hh_cq        = hh_cq;\r
-               cb_data_p->private_data = private_data;\r
-\r
-               // Report completion directly from DPC (verbs should NOT sleep)\r
-               mlnx_comp_dpc( &cb_data_p->async_item );\r
-       }\r
-#endif /* MLNX_COMP_MODEL */\r
-}\r
-\r
-#if MLNX_COMP_MODEL\r
-static void\r
-mlnx_comp_dpc(\r
-       IN                              PRKDPC                                          p_dpc,\r
-       IN                              void                                            *context,\r
-       IN                              void                                            *arg1,\r
-       IN                              void                                            *unused )\r
-{\r
-       mlnx_hob_t              *hob_p = (mlnx_hob_t*)arg1;\r
-       UNUSED_PARAM( p_dpc );\r
-       UNUSED_PARAM( unused );\r
-\r
-       hob_p->comp_cb_p( (void*)((cq_info_t*)context)->cq_context );\r
-}\r
-#else  /* MLNX_COMP_MODEL */\r
-static void\r
-mlnx_comp_dpc(\r
-       IN                              cl_async_proc_item_t            *async_item_p )\r
-{\r
-       u_int32_t                       cq_num;\r
-       u_int32_t                       hca_idx;\r
-       mlnx_hob_t                      *hob_p;\r
-       mlnx_hobul_t            *hobul_p;\r
-       mlnx_cb_data_t          *cb_data_p;\r
-\r
-       CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("COMP DPC %p\n", async_item_p));\r
-\r
-       cb_data_p = PARENT_STRUCT( async_item_p, mlnx_cb_data_t, async_item );\r
-       if (!cb_data_p) return;\r
-\r
-       hca_idx = *(u_int32_t *)cb_data_p->private_data;\r
-       hob_p   = mlnx_hob_array + hca_idx;\r
-       hobul_p = mlnx_hobul_array[hca_idx];\r
-       cq_num  = (u_int32_t)cb_data_p->hh_cq & hobul_p->cq_idx_mask;\r
-\r
-       if (NULL != hob_p && NULL != hobul_p &&\r
-               hob_p->hh_hndl && hob_p->comp_cb_p)\r
-       {\r
-               if (cq_num < hobul_p->max_cq)\r
-               {\r
-                       (*hob_p->comp_cb_p)((void *)hobul_p->cq_info_tbl[cq_num].cq_context);\r
-               }\r
-       }\r
-\r
-       if (g_mlnx_dpc2thread)\r
-       {\r
-               cl_free(cb_data_p);\r
-       }\r
-}\r
-#endif /* MLNX_COMP_MODEL */\r
-\r
-// ### Conversions\r
-\r
-/////////////////////////////////////////////////////////\r
-/////////////////////////////////////////////////////////\r
-VAPI_mrw_acl_t\r
-map_ibal_acl(\r
-       IN                              ib_access_t                                     ibal_acl)\r
-{\r
-       VAPI_mrw_acl_t          vapi_acl = 0;\r
-\r
-       if (ibal_acl & IB_AC_RDMA_READ)   vapi_acl |= VAPI_EN_REMOTE_READ;\r
-       if (ibal_acl & IB_AC_RDMA_WRITE)  vapi_acl |= VAPI_EN_REMOTE_WRITE;\r
-       if (ibal_acl & IB_AC_ATOMIC)      vapi_acl |= VAPI_EN_REMOTE_ATOM;\r
-       if (ibal_acl & IB_AC_LOCAL_WRITE) vapi_acl |= VAPI_EN_LOCAL_WRITE;\r
-       if (ibal_acl & IB_AC_MW_BIND)     vapi_acl |= VAPI_EN_MEMREG_BIND;\r
-\r
-       return vapi_acl;\r
-}\r
-\r
-/////////////////////////////////////////////////////////\r
-/////////////////////////////////////////////////////////\r
-ib_access_t\r
-map_vapi_acl(\r
-       IN                              VAPI_mrw_acl_t                          vapi_acl)\r
-{\r
-       ib_access_t ibal_acl = 0;\r
-\r
-       if (vapi_acl & VAPI_EN_REMOTE_READ)  ibal_acl |= IB_AC_RDMA_READ;\r
-       if (vapi_acl & VAPI_EN_REMOTE_WRITE) ibal_acl |= IB_AC_RDMA_WRITE;\r
-       if (vapi_acl & VAPI_EN_REMOTE_ATOM)  ibal_acl |= IB_AC_ATOMIC;\r
-       if (vapi_acl & VAPI_EN_LOCAL_WRITE)  ibal_acl |= IB_AC_LOCAL_WRITE;\r
-       if (vapi_acl & VAPI_EN_MEMREG_BIND)  ibal_acl |= IB_AC_MW_BIND;\r
-\r
-       return ibal_acl;\r
-}\r
-\r
-/////////////////////////////////////////////////////////\r
-/////////////////////////////////////////////////////////\r
-static VAPI_rdma_atom_acl_t \r
-map_ibal_qp_acl(\r
-       IN                              ib_access_t                                     ibal_acl)\r
-{\r
-       VAPI_rdma_atom_acl_t vapi_qp_acl = 0;\r
-\r
-       if (ibal_acl & IB_AC_RDMA_WRITE) vapi_qp_acl |= VAPI_EN_REM_WRITE;\r
-       if (ibal_acl & IB_AC_RDMA_READ)  vapi_qp_acl |= VAPI_EN_REM_READ;\r
-       if (ibal_acl & IB_AC_ATOMIC)     vapi_qp_acl |= VAPI_EN_REM_ATOMIC_OP;\r
-\r
-       return vapi_qp_acl;\r
-\r
-}\r
-\r
-/////////////////////////////////////////////////////////\r
-/////////////////////////////////////////////////////////\r
-static ib_access_t\r
-map_vapi_qp_acl(\r
-       IN                              VAPI_rdma_atom_acl_t            vapi_qp_acl)\r
-{\r
-       ib_access_t     ibal_acl = IB_AC_LOCAL_WRITE;\r
-\r
-       if (vapi_qp_acl & VAPI_EN_REM_WRITE)     ibal_acl |= IB_AC_RDMA_WRITE;\r
-       if (vapi_qp_acl & VAPI_EN_REM_READ)      ibal_acl |= IB_AC_RDMA_READ;\r
-       if (vapi_qp_acl & VAPI_EN_REM_ATOMIC_OP) ibal_acl |= IB_AC_ATOMIC;\r
-\r
-       return ibal_acl;\r
-}\r
-\r
-\r
-/////////////////////////////////////////////////////////\r
-/////////////////////////////////////////////////////////\r
-ib_api_status_t\r
-mlnx_lock_region(\r
-       IN                              mlnx_mro_t                                      *mro_p,\r
-       IN                              boolean_t                                       um_call )\r
-{\r
-       MOSAL_iobuf_t   old_iobuf;\r
-\r
-       // Find context\r
-       if( um_call )\r
-               mro_p->mr_prot_ctx = MOSAL_get_current_prot_ctx();\r
-       else\r
-               mro_p->mr_prot_ctx = MOSAL_get_kernel_prot_ctx();\r
-\r
-       // Save pointer to existing locked region.\r
-       old_iobuf = mro_p->mr_iobuf;\r
-\r
-       // Lock Region\r
-       if (MT_OK != MOSAL_iobuf_register((MT_virt_addr_t)mro_p->mr_start,\r
-               (MT_size_t)mro_p->mr_size,\r
-               mro_p->mr_prot_ctx,\r
-               mro_p->mr_mosal_perm,\r
-               &mro_p->mr_iobuf,\r
-               0 ))\r
-       {\r
-               return IB_ERROR;\r
-       }\r
-\r
-       if( old_iobuf )\r
-       {\r
-               if( MT_OK != MOSAL_iobuf_deregister( old_iobuf ) )\r
-                       return IB_ERROR;\r
-       }\r
-\r
-       return IB_SUCCESS;\r
-}\r
-\r
-\r
-/////////////////////////////////////////////////////////\r
-/////////////////////////////////////////////////////////\r
-ib_api_status_t\r
-mlnx_conv_ibal_mr_create(\r
-       IN                              u_int32_t                                       pd_idx,\r
-       IN      OUT                     mlnx_mro_t                                      *mro_p,\r
-       IN                              VAPI_mr_change_t                        change_flags,\r
-       IN                              ib_mr_create_t const            *p_mr_create,\r
-       IN                              boolean_t                                       um_call,\r
-               OUT                     HH_mr_t                                         *mr_props_p )\r
-{\r
-       ib_api_status_t         status;\r
-\r
-       /* Set ACL information first since it is used to lock the region. */\r
-       if( change_flags & VAPI_MR_CHANGE_ACL )\r
-       {\r
-               mro_p->mr_acl = map_ibal_acl( p_mr_create->access_ctrl );\r
-               // This computation should be externalized by THH\r
-               mro_p->mr_mosal_perm =\r
-                       MOSAL_PERM_READ |\r
-                       ((mro_p->mr_acl & VAPI_EN_LOCAL_WRITE) ? MOSAL_PERM_WRITE : 0);\r
-       }\r
-\r
-       if( change_flags & VAPI_MR_CHANGE_TRANS )\r
-       {\r
-               CL_TRACE(MLNX_DBG_MEM, g_mlnx_dbg_lvl, ("addr 0x%p size %"PRId64"\n", (void *)p_mr_create->vaddr, p_mr_create->length));\r
-               // Build TPT entries\r
-               mro_p->mr_start = (IB_virt_addr_t)p_mr_create->vaddr;\r
-               mro_p->mr_size = p_mr_create->length;\r
-               if (IB_SUCCESS != (status = mlnx_lock_region(mro_p, um_call)))\r
-               {\r
-                       return status;\r
-               }\r
-       }\r
-\r
-       /* Now fill in the MR properties. */\r
-       mr_props_p->start = mro_p->mr_start;\r
-       mr_props_p->size = mro_p->mr_size;\r
-       mr_props_p->acl = mro_p->mr_acl;\r
-       mr_props_p->pd = pd_idx;\r
-\r
-       // Setup MTT info\r
-       mr_props_p->tpt.tpt_type = HH_TPT_IOBUF;\r
-       mr_props_p->tpt.tpt.iobuf = mro_p->mr_iobuf;\r
-\r
-       return IB_SUCCESS;\r
-}\r
-\r
-/////////////////////////////////////////////////////////\r
-// On entry mro_p->mr_start holds the pmr address\r
-/////////////////////////////////////////////////////////\r
-ib_api_status_t\r
-mlnx_conv_ibal_pmr_create(\r
-       IN                              u_int32_t                                       pd_idx,\r
-       IN                              mlnx_mro_t                                      *mro_p,\r
-       IN                              ib_phys_create_t const          *p_pmr_create,\r
-               OUT                     HH_mr_t                                         *mr_props_p )\r
-{\r
-       VAPI_phy_addr_t*        buf_lst = NULL;\r
-       VAPI_size_t*            sz_lst = NULL;\r
-       u_int32_t                       i;\r
-       u_int32_t                       page_shift = priv_ceil_log2(p_pmr_create->hca_page_size);\r
-       u_int64_t                       page_mask = (1 << page_shift) - 1;\r
-       u_int64_t                       tot_sz = 0;\r
-\r
-       CL_TRACE(MLNX_DBG_MEM, g_mlnx_dbg_lvl, \r
-               ("PRE: addr %p size 0x%"PRIx64" shift %d\n",\r
-               (void *)(uintn_t)mro_p->mr_start, p_pmr_create->length, page_mask));\r
-       mro_p->mr_start = (mro_p->mr_start & ~page_mask) | (p_pmr_create->buf_offset & page_mask);\r
-       CL_TRACE(MLNX_DBG_MEM, g_mlnx_dbg_lvl, \r
-               ("POST: addr %p\n", (void *)(uintn_t)mro_p->mr_start));\r
-\r
-       mr_props_p->start = mro_p->mr_start;\r
-       mr_props_p->size = p_pmr_create->length;\r
-       mr_props_p->acl = map_ibal_acl(p_pmr_create->access_ctrl);\r
-       mr_props_p->pd = pd_idx;\r
-\r
-#ifdef _DEBUG_\r
-       mro_p->mr_size           = mr_props_p->size;\r
-//     mro_p->mr_first_page_addr = 0;\r
-//     mro_p->mr_num_pages       = (mro_p->mr_end >> PAGESHIFT) + 1 - (mro_p->mr_start >> PAGESHIFT);\r
-//     CL_TRACE(MLNX_DBG_MEM, g_mlnx_dbg_lvl, ("1st pg addr 0x%p pages %d\n",\r
-//             (void *)mro_p->mr_first_page_addr, p_pmr_create->num_bufs));\r
-       CL_TRACE(MLNX_DBG_MEM, g_mlnx_dbg_lvl, ("1st phys addr 0x%"PRIx64" phys pages %d\n",\r
-               p_pmr_create->range_array[0].base_addr, p_pmr_create->num_ranges));\r
-#endif\r
-\r
-       // Build TPT entries\r
-       if (!p_pmr_create->range_array)\r
-       {\r
-               return IB_INVALID_PARAMETER;\r
-       }\r
-\r
-       if (p_pmr_create->hca_page_size !=\r
-               MT_DOWN_ALIGNX_PHYS(p_pmr_create->hca_page_size, page_shift))\r
-       {\r
-               CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("phys buf size is not page aligned\n"));\r
-               return IB_INVALID_PARAMETER;\r
-       }\r
-\r
-       for (i = 0; i < p_pmr_create->num_ranges; i++)\r
-       {\r
-               uint64_t        start_addr = p_pmr_create->range_array[i].base_addr;\r
-               uint64_t        end_addr = start_addr + p_pmr_create->range_array[i].size;\r
-\r
-               if( end_addr < start_addr ) {\r
-                       CL_TRACE( CL_DBG_ERROR, g_mlnx_dbg_lvl, ("phys buf end < start\n") );\r
-                       return IB_INVALID_PARAMETER;\r
-               }\r
-\r
-               if (start_addr !=\r
-                       MT_DOWN_ALIGNX_PHYS(start_addr, page_shift))\r
-               {\r
-                       CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("phys buf start adrs is not page aligned\n"));\r
-                       return IB_INVALID_PARAMETER;\r
-               }\r
-\r
-               tot_sz += p_pmr_create->range_array[i].size;\r
-       }\r
-\r
-       if( tot_sz < p_pmr_create->length + p_pmr_create->buf_offset )\r
-       {\r
-               HCA_TRACE_EXIT( HCA_DBG_ERROR, \r
-                       ("length(0x"PRIx64") + buf offset(0x"PRIx64") larger than sum "\r
-                       "of phys ranges(0x"PRIx64")\n",\r
-                       p_pmr_create->length, p_pmr_create->buf_offset, tot_sz) );\r
-               return IB_INVALID_PARAMETER;\r
-       }\r
-\r
-       if( p_pmr_create->buf_offset > p_pmr_create->range_array[0].size )\r
-       {\r
-               HCA_TRACE_EXIT( HCA_DBG_ERROR,\r
-                       ("buf offset(0x%x) > than 1st phy range size(0x"PRIx64")\n",\r
-                       p_pmr_create->buf_offset, p_pmr_create->range_array[0].size) );\r
-               return IB_INVALID_PARAMETER;\r
-       }\r
-\r
-       /* Memory registration must be done at PASSIVE_LEVEL, so paged memory here is fine. */\r
-       buf_lst = (VAPI_phy_addr_t*)cl_pzalloc( sizeof(VAPI_phy_addr_t)*(p_pmr_create->num_ranges));\r
-       if (!buf_lst)\r
-       {\r
-               HCA_TRACE_EXIT( HCA_DBG_ERROR,\r
-                       ("Failed to allocate range address list.\n") );\r
-               return IB_INSUFFICIENT_MEMORY;\r
-       }\r
-\r
-\r
-       /* Memory registration must be done at PASSIVE_LEVEL, so paged memory here is fine. */\r
-       sz_lst = (VAPI_size_t*)cl_pzalloc( sizeof(VAPI_size_t)*(p_pmr_create->num_ranges));\r
-       if (!sz_lst)\r
-       {\r
-               cl_free( buf_lst );\r
-               HCA_TRACE_EXIT( HCA_DBG_ERROR,\r
-                       ("Failed to allocate range size list.\n") );\r
-               return IB_INSUFFICIENT_MEMORY;\r
-       }\r
-\r
-       for (i = 0; i < p_pmr_create->num_ranges; i++)\r
-       {\r
-               buf_lst[i] = p_pmr_create->range_array[i].base_addr;\r
-               sz_lst[i] = p_pmr_create->range_array[i].size;\r
-       }\r
-\r
-       mr_props_p->tpt.tpt_type = HH_TPT_BUF;\r
-       mr_props_p->tpt.num_entries = p_pmr_create->num_ranges;\r
-       mr_props_p->tpt.tpt.buf_lst.buf_sz_lst = sz_lst;\r
-       mr_props_p->tpt.tpt.buf_lst.phys_buf_lst = buf_lst; \r
-       mr_props_p->tpt.tpt.buf_lst.iova_offset = p_pmr_create->buf_offset;\r
-\r
-       return IB_SUCCESS;\r
-}\r
-\r
-\r
-u_int8_t\r
-mlnx_gid_to_index(\r
-       IN                              HH_hca_hndl_t                           hh_hndl,\r
-       IN                              u_int8_t                                        port_num,\r
-       IN                              u_int8_t                                        *raw_gid)\r
-{\r
-       ib_gid_t        *gid_table_p = NULL;\r
-       u_int8_t        index = 0; // default return value\r
-       u_int8_t        i;\r
-\r
-       gid_table_p = cl_zalloc( 64*sizeof(ib_gid_t));\r
-\r
-       mlnx_get_hca_gid_tbl(hh_hndl, port_num, 64, gid_table_p);\r
-\r
-       for (i = 0; i < 64; i++)\r
-       {\r
-               if (!cl_memcmp(raw_gid, gid_table_p[i].raw, sizeof(ib_gid_t)))\r
-               {\r
-                       CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("1: found GID at index %d\n", i));\r
-                       index = i;\r
-                       break;\r
-               }\r
-       }\r
-\r
-       CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("2: found GID at index %d\n", index));\r
-\r
-       cl_free( gid_table_p);\r
-       return index;\r
-}\r
-\r
-/////////////////////////////////////////////////////////\r
-/////////////////////////////////////////////////////////\r
-void\r
-mlnx_conv_ibal_av(\r
-       IN                              HH_hca_hndl_t                           hh_hndl,\r
-       IN              const   ib_av_attr_t                            *ibal_av_p,\r
-               OUT                     VAPI_ud_av_t                            *vapi_av_p)\r
-{\r
-       vapi_av_p->port = ibal_av_p->port_num;\r
-       vapi_av_p->sl   = ibal_av_p->sl;\r
-       vapi_av_p->dlid = cl_ntoh16 (ibal_av_p->dlid);\r
-\r
-       vapi_av_p->static_rate   =\r
-               (ibal_av_p->static_rate == IB_PATH_RECORD_RATE_10_GBS? 0 : 3);\r
-       ib_grh_get_ver_class_flow( ibal_av_p->grh.ver_class_flow, NULL,\r
-               &vapi_av_p->traffic_class, &vapi_av_p->flow_label );\r
-       vapi_av_p->src_path_bits = ibal_av_p->path_bits; // PATH:\r
-       //vapi_av_p->src_path_bits = 0;\r
-\r
-       /* For global destination or Multicast address:*/\r
-       if (ibal_av_p->grh_valid)\r
-       {\r
-               vapi_av_p->grh_flag = TRUE;\r
-               vapi_av_p->hop_limit     = ibal_av_p->grh.hop_limit;\r
-               // CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("raw %p, &raw %p\n", ibal_av_p->grh.src_gid.raw, &ibal_av_p->grh.src_gid.raw));\r
-               vapi_av_p->sgid_index    = mlnx_gid_to_index(hh_hndl, ibal_av_p->port_num, (u_int8_t *)ibal_av_p->grh.src_gid.raw);\r
-               cl_memcpy(vapi_av_p->dgid, ibal_av_p->grh.dest_gid.raw, sizeof(vapi_av_p->dgid));\r
-       }\r
-}\r
-\r
-/////////////////////////////////////////////////////////\r
-/////////////////////////////////////////////////////////\r
-void\r
-mlnx_conv_vapi_av(\r
-       IN                              HH_hca_hndl_t                           hh_hndl,\r
-       IN              const   VAPI_ud_av_t                            *vapi_av_p,\r
-               OUT                     ib_av_attr_t                            *ibal_av_p)\r
-{\r
-       uint8_t         ver;\r
-\r
-       ibal_av_p->port_num = vapi_av_p->port;\r
-       ibal_av_p->sl       = vapi_av_p->sl;\r
-       ibal_av_p->dlid     = cl_ntoh16(vapi_av_p->dlid);\r
-\r
-       /* For global destination or Multicast address:*/\r
-       ibal_av_p->grh_valid = vapi_av_p->grh_flag;\r
-\r
-       ver = 2;\r
-       ibal_av_p->grh.ver_class_flow = ib_grh_set_ver_class_flow( ver,\r
-               vapi_av_p->traffic_class,\r
-               vapi_av_p->flow_label);\r
-       ibal_av_p->grh.hop_limit = vapi_av_p->hop_limit;\r
-\r
-       THH_hob_get_sgid(hh_hndl,\r
-               vapi_av_p->port,\r
-               vapi_av_p->sgid_index,\r
-               &ibal_av_p->grh.src_gid.raw);\r
-\r
-       cl_memcpy(ibal_av_p->grh.dest_gid.raw, vapi_av_p->dgid, sizeof(vapi_av_p->dgid));\r
-\r
-       ibal_av_p->static_rate = (vapi_av_p->static_rate?\r
-               IB_PATH_RECORD_RATE_2_5_GBS : IB_PATH_RECORD_RATE_10_GBS);\r
-       ibal_av_p->path_bits   = vapi_av_p->src_path_bits;\r
-}\r
-\r
-/////////////////////////////////////////////////////////\r
-/////////////////////////////////////////////////////////\r
-int\r
-mlnx_map_vapi_cqe_status(\r
-       IN                              VAPI_wc_status_t                        vapi_status)\r
-{\r
-       switch (vapi_status)\r
-       {\r
-       case IB_COMP_SUCCESS:           return IB_WCS_SUCCESS;\r
-       case IB_COMP_LOC_LEN_ERR:       return IB_WCS_LOCAL_LEN_ERR;\r
-       case IB_COMP_LOC_QP_OP_ERR:     return IB_WCS_LOCAL_OP_ERR;\r
-       case IB_COMP_LOC_PROT_ERR:      return IB_WCS_LOCAL_PROTECTION_ERR;\r
-       case IB_COMP_WR_FLUSH_ERR:      return IB_WCS_WR_FLUSHED_ERR;\r
-       case IB_COMP_MW_BIND_ERR:       return IB_WCS_MEM_WINDOW_BIND_ERR;\r
-       case IB_COMP_REM_INV_REQ_ERR:   return IB_WCS_REM_INVALID_REQ_ERR;\r
-       case IB_COMP_REM_ACCESS_ERR:    return IB_WCS_REM_ACCESS_ERR;\r
-       case IB_COMP_REM_OP_ERR:        return IB_WCS_REM_OP_ERR;\r
-       case IB_COMP_RETRY_EXC_ERR:     return IB_WCS_TIMEOUT_RETRY_ERR;\r
-       case IB_COMP_RNR_RETRY_EXC_ERR: return IB_WCS_RNR_RETRY_ERR;\r
-       case IB_COMP_REM_ABORT_ERR:     return IB_WCS_REM_ACCESS_ERR; // ???\r
-       case IB_COMP_FATAL_ERR:         return IB_WCS_REM_ACCESS_ERR; // ???\r
-       case IB_COMP_GENERAL_ERR:       return IB_WCS_REM_ACCESS_ERR; // ???\r
-       default:\r
-               CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("FAIL to map %d (last known %d) returning %d\n",\r
-                       vapi_status, IB_COMP_GENERAL_ERR, IB_WCS_REM_ACCESS_ERR));\r
-               return IB_WCS_REM_ACCESS_ERR;\r
-       }\r
-}\r
-\r
-/////////////////////////////////////////////////////////\r
-/////////////////////////////////////////////////////////\r
-int\r
-mlnx_map_vapi_cqe_type(\r
-       IN                              VAPI_cqe_opcode_t                       opcode)\r
-{\r
-       switch (opcode)\r
-       {\r
-       case VAPI_CQE_SQ_SEND_DATA:     return IB_WC_SEND;\r
-       case VAPI_CQE_SQ_RDMA_WRITE:    return IB_WC_RDMA_WRITE;\r
-       case VAPI_CQE_SQ_RDMA_READ:     return IB_WC_RDMA_READ;\r
-       case VAPI_CQE_SQ_COMP_SWAP:     return IB_WC_COMPARE_SWAP;\r
-       case VAPI_CQE_SQ_FETCH_ADD:     return IB_WC_FETCH_ADD;\r
-       case VAPI_CQE_SQ_BIND_MRW:      return IB_WC_MW_BIND;\r
-       case VAPI_CQE_RQ_SEND_DATA:     return IB_WC_RECV;\r
-       case VAPI_CQE_RQ_RDMA_WITH_IMM: return IB_WC_RECV_RDMA_WRITE;\r
-       default:\r
-               return IB_WC_SEND;\r
-       }\r
-}\r
-\r
-/////////////////////////////////////////////////////////\r
-// Map Remote Node Addr Type\r
-/////////////////////////////////////////////////////////\r
-int\r
-mlnx_map_vapi_rna_type(\r
-       IN                              VAPI_remote_node_addr_type_t    rna)\r
-{\r
-       switch (rna)\r
-       {\r
-       case VAPI_RNA_UD:       return IB_QPT_UNRELIABLE_DGRM;\r
-       case VAPI_RNA_RAW_ETY:  return IB_QPT_RAW_ETHER;\r
-       case VAPI_RNA_RAW_IPV6: return IB_QPT_RAW_IPV6;\r
-       default:\r
-               return IB_QPT_RELIABLE_CONN;\r
-       }\r
-}\r
-\r
-//////////////////////////////////////////////////////////////\r
-// Convert from VAPI memory-region attributes to IBAL \r
-//////////////////////////////////////////////////////////////\r
-void\r
-mlnx_conv_vapi_mr_attr(\r
-       IN                              ib_pd_handle_t                          pd_h,\r
-       IN                              HH_mr_info_t                            *mr_info_p,\r
-               OUT                     ib_mr_attr_t                            *mr_query_p)\r
-{\r
-       mr_query_p->h_pd = pd_h;\r
-       mr_query_p->local_lb  = mr_info_p->local_start;\r
-       mr_query_p->local_ub  = mr_info_p->local_start + mr_info_p->local_size;\r
-       mr_query_p->remote_lb = mr_info_p->remote_start;\r
-       mr_query_p->remote_ub = mr_info_p->remote_start + mr_info_p->remote_size;\r
-\r
-       mr_query_p->access_ctrl = map_vapi_acl(mr_info_p->acl);\r
-       mr_query_p->lkey = mr_info_p->lkey;\r
-       mr_query_p->rkey = cl_hton32(mr_info_p->rkey);\r
-}\r
-\r
-//////////////////////////////////////////////////////////////\r
-// Convert from IBAL memory-window bind request to VAPI \r
-//////////////////////////////////////////////////////////////\r
-void\r
-mlnx_conv_bind_req(\r
-       IN                              HHUL_qp_hndl_t                          hhul_qp_hndl,\r
-       IN                              ib_bind_wr_t* const                     p_mw_bind,\r
-               OUT                     HHUL_mw_bind_t                          *bind_prop_p)\r
-{\r
-       bind_prop_p->qp = hhul_qp_hndl;\r
-       bind_prop_p->id  = p_mw_bind->wr_id;\r
-       bind_prop_p->acl  = map_ibal_acl(p_mw_bind->access_ctrl);\r
-       bind_prop_p->size  = p_mw_bind->local_ds.length;\r
-       bind_prop_p->start  = (VAPI_virt_addr_t)(MT_virt_addr_t)p_mw_bind->local_ds.vaddr;\r
-       bind_prop_p->mr_lkey = p_mw_bind->local_ds.lkey;\r
-       bind_prop_p->comp_type = \r
-               (p_mw_bind->send_opt & IB_SEND_OPT_SIGNALED) ? VAPI_SIGNALED : VAPI_UNSIGNALED;\r
-}\r
-\r
-\r
-/////////////////////////////////////////////////////////\r
-// Map IBAL qp type to VAPI transport and special qp_type\r
-/////////////////////////////////////////////////////////\r
-int\r
-mlnx_map_ibal_qp_type(\r
-       IN                              ib_qp_type_t                            ibal_qpt,\r
-               OUT                     VAPI_special_qp_t                       *vapi_qp_type_p)\r
-{\r
-       switch (ibal_qpt)\r
-       {\r
-       case IB_QPT_RELIABLE_CONN:\r
-               if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_REGULAR_QP;\r
-               return IB_TS_RC;\r
-\r
-       case IB_QPT_UNRELIABLE_CONN:\r
-               if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_REGULAR_QP;\r
-               return IB_TS_UC;\r
-\r
-       case IB_QPT_UNRELIABLE_DGRM:\r
-               if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_REGULAR_QP;\r
-               return IB_TS_UD;\r
-\r
-       case IB_QPT_QP0:\r
-               if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_SMI_QP;\r
-               return IB_TS_UD;\r
-\r
-       case IB_QPT_QP1:\r
-               if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_GSI_QP;\r
-               return IB_TS_UD;\r
-\r
-       case IB_QPT_RAW_IPV6:\r
-               if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_RAW_IPV6_QP; // TBD: ??\r
-               return IB_TS_RAW;\r
-\r
-       case IB_QPT_RAW_ETHER:\r
-               if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_RAW_ETY_QP;  // TBD: ??\r
-               return IB_TS_RAW;\r
-\r
-       case IB_QPT_MAD:\r
-               if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_GSI_QP;\r
-               return IB_TS_UD;\r
-\r
-       case IB_QPT_QP0_ALIAS:\r
-               if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_SMI_QP;\r
-               return IB_TS_UD;\r
-\r
-       case IB_QPT_QP1_ALIAS:\r
-               if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_GSI_QP;\r
-               return IB_TS_UD;\r
-\r
-       default:\r
-               CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("FAIL to map ibal_qp_type %d (last known %d) returning %d\n",\r
-                       ibal_qpt, IB_QPT_QP1_ALIAS, IB_TS_RAW));\r
-               if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_RAW_ETY_QP;\r
-               return IB_TS_RAW;\r
-       }\r
-}\r
-\r
-/////////////////////////////////////////////////////////\r
-// QP and CQ value must be handled by caller\r
-/////////////////////////////////////////////////////////\r
-void\r
-mlnx_conv_qp_create_attr(\r
-       IN              const   ib_qp_create_t                          *create_attr_p,\r
-               OUT                     HHUL_qp_init_attr_t                     *init_attr_p,\r
-               OUT                     VAPI_special_qp_t                       *vapi_qp_type_p)\r
-{\r
-       init_attr_p->ts_type = mlnx_map_ibal_qp_type(create_attr_p->qp_type, vapi_qp_type_p);\r
-\r
-       init_attr_p->qp_cap.max_oust_wr_sq = create_attr_p->sq_depth;\r
-       init_attr_p->qp_cap.max_oust_wr_rq = create_attr_p->rq_depth;\r
-       init_attr_p->qp_cap.max_sg_size_sq = create_attr_p->sq_sge;\r
-       init_attr_p->qp_cap.max_sg_size_rq = create_attr_p->rq_sge;\r
-\r
-       init_attr_p->sq_sig_type = (create_attr_p->sq_signaled) ? VAPI_SIGNAL_ALL_WR : VAPI_SIGNAL_REQ_WR;\r
-       init_attr_p->rq_sig_type = VAPI_SIGNAL_ALL_WR;\r
-\r
-       init_attr_p->srq = HHUL_INVAL_SRQ_HNDL;\r
-}\r
-\r
-/////////////////////////////////////////////////////////\r
-// NOTE: ibal_qp_state is non linear - so we cannot use a LUT\r
-/////////////////////////////////////////////////////////\r
-VAPI_qp_state_t\r
-mlnx_map_ibal_qp_state(\r
-       IN                              ib_qp_state_t                           ibal_qp_state)\r
-{\r
-       VAPI_qp_state_t vapi_qp_state = VAPI_RESET;\r
-\r
-       if      (ibal_qp_state & IB_QPS_RESET) vapi_qp_state = VAPI_RESET;\r
-       else if (ibal_qp_state & IB_QPS_INIT)  vapi_qp_state = VAPI_INIT;\r
-       else if (ibal_qp_state & IB_QPS_RTR)   vapi_qp_state = VAPI_RTR;\r
-       else if (ibal_qp_state & IB_QPS_RTS)   vapi_qp_state = VAPI_RTS;\r
-       else if (ibal_qp_state & IB_QPS_SQD)   vapi_qp_state = VAPI_SQD;\r
-       else if (ibal_qp_state & IB_QPS_SQERR) vapi_qp_state = VAPI_SQE;\r
-       else if (ibal_qp_state & IB_QPS_ERROR) vapi_qp_state = VAPI_ERR;\r
-\r
-       return vapi_qp_state;\r
-}\r
-\r
-/////////////////////////////////////////////////////////\r
-/////////////////////////////////////////////////////////\r
-ib_qp_state_t\r
-mlnx_map_vapi_qp_state(\r
-       IN                              VAPI_qp_state_t                         vapi_qp_state)\r
-{\r
-       switch (vapi_qp_state)\r
-       {\r
-       case VAPI_RESET: return IB_QPS_RESET;\r
-       case VAPI_INIT:  return IB_QPS_INIT;\r
-       case VAPI_RTR:   return IB_QPS_RTR;\r
-       case VAPI_RTS:   return IB_QPS_RTS;\r
-       case VAPI_SQD:   return IB_QPS_SQD;\r
-       case VAPI_SQE:   return IB_QPS_SQERR;\r
-       case VAPI_ERR:   return IB_QPS_ERROR;\r
-               // TBD: IB_QPS_SQD_DRAINING\r
-               // TBD: IB_QPS_SQD_DRAINED\r
-       default:\r
-               CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("FAIL to map vapi_qp_state %d (last known %d) returning %d\n",\r
-                       vapi_qp_state, VAPI_ERR, IB_QPS_INIT));\r
-               return IB_QPS_INIT;\r
-       }\r
-}\r
-\r
-/////////////////////////////////////////////////////////\r
-/////////////////////////////////////////////////////////\r
-ib_apm_state_t\r
-mlnx_map_vapi_apm_state(\r
-       IN                              VAPI_mig_state_t                        vapi_apm_state)\r
-{\r
-       switch (vapi_apm_state)\r
-       {\r
-       case VAPI_MIGRATED: return IB_APM_MIGRATED;\r
-       case VAPI_REARM:    return IB_APM_REARM;\r
-       case VAPI_ARMED:    return IB_APM_ARMED;\r
-\r
-       default:\r
-               CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("FAIL to map vapi_apm_state %d (last known %d) returning %d\n",\r
-                       vapi_apm_state, VAPI_ARMED, 0));\r
-               return 0;\r
-       }\r
-}\r
-\r
-#if 0\r
-/////////////////////////////////////////////////////////\r
-// UNUSED: IBAL uses same encoding as THH\r
-/////////////////////////////////////////////////////////\r
-static\r
-u_int32_t ibal_mtu_to_vapi(u_int32_t ibal_mtu)\r
-{\r
-       u_int32_t mtu = 0;\r
-\r
-       // MTU256=1, MTU512=2, MTU1024=3\r
-       while (ibal_mtu >>= 1) mtu++;\r
-       return mtu - 7;\r
-}\r
-\r
-/////////////////////////////////////////////////////////\r
-/////////////////////////////////////////////////////////\r
-static\r
-u_int32_t vapi_mtu_to_ibal(u_int32_t vapi_mtu)\r
-{\r
-       return (1 << (vapi_mtu + 7));\r
-}\r
-#endif\r
-\r
-/////////////////////////////////////////////////////////\r
-/////////////////////////////////////////////////////////\r
-void\r
-mlnx_conv_vapi_qp_attr(\r
-       IN                              HH_hca_hndl_t                           hh_hndl,\r
-       IN                              VAPI_qp_attr_t                          *hh_qp_attr_p,\r
-               OUT                     ib_qp_attr_t                            *qp_attr_p)\r
-{\r
-       qp_attr_p->access_ctrl     = map_vapi_qp_acl(hh_qp_attr_p->remote_atomic_flags);\r
-       qp_attr_p->pkey_index      = (uint16_t)hh_qp_attr_p->pkey_ix;\r
-       qp_attr_p->sq_depth        = hh_qp_attr_p->cap.max_oust_wr_sq;\r
-       qp_attr_p->rq_depth        = hh_qp_attr_p->cap.max_oust_wr_rq;\r
-       qp_attr_p->sq_sge          = hh_qp_attr_p->cap.max_sg_size_sq;\r
-       qp_attr_p->rq_sge          = hh_qp_attr_p->cap.max_sg_size_rq;\r
-       qp_attr_p->sq_max_inline   = hh_qp_attr_p->cap.max_inline_data_sq;\r
-       qp_attr_p->init_depth      = hh_qp_attr_p->ous_dst_rd_atom; // outstanding outgoing\r
-       qp_attr_p->resp_res        = hh_qp_attr_p->qp_ous_rd_atom;  // outstanding as target (in)\r
-\r
-       qp_attr_p->num             = cl_ntoh32(hh_qp_attr_p->qp_num);\r
-       CL_TRACE(MLNX_DBG_QPN, g_mlnx_dbg_lvl, ("ibal_qpn 0x%x = hh_qpn 0x%x\n",\r
-               qp_attr_p->num,\r
-               hh_qp_attr_p->qp_num));\r
-\r
-       qp_attr_p->dest_num        = cl_ntoh32(hh_qp_attr_p->dest_qp_num);\r
-       CL_TRACE(MLNX_DBG_QPN, g_mlnx_dbg_lvl, ("ibal_dest 0x%x = hh_dest 0x%x\n",\r
-               qp_attr_p->dest_num,\r
-               hh_qp_attr_p->dest_qp_num));\r
-       qp_attr_p->qkey            = cl_ntoh32 (hh_qp_attr_p->qkey);\r
-\r
-       qp_attr_p->sq_psn          = cl_ntoh32 (hh_qp_attr_p->sq_psn);\r
-       qp_attr_p->rq_psn          = cl_ntoh32 (hh_qp_attr_p->rq_psn);\r
-\r
-       qp_attr_p->primary_port    = hh_qp_attr_p->port;\r
-       qp_attr_p->alternate_port  = hh_qp_attr_p->alt_port;\r
-\r
-       qp_attr_p->state           = mlnx_map_vapi_qp_state(hh_qp_attr_p->qp_state);\r
-       qp_attr_p->apm_state       = mlnx_map_vapi_apm_state(hh_qp_attr_p->path_mig_state);\r
-\r
-       mlnx_conv_vapi_av(hh_hndl, &hh_qp_attr_p->av, &qp_attr_p->primary_av);\r
-       qp_attr_p->primary_av.conn.path_mtu          = (u_int8_t)hh_qp_attr_p->path_mtu;\r
-       qp_attr_p->primary_av.conn.local_ack_timeout = hh_qp_attr_p->timeout; \r
-       qp_attr_p->primary_av.conn.seq_err_retry_cnt = hh_qp_attr_p->retry_count;\r
-       qp_attr_p->primary_av.conn.rnr_retry_cnt     = hh_qp_attr_p->rnr_retry;\r
-\r
-       mlnx_conv_vapi_av(hh_hndl, &hh_qp_attr_p->alt_av, &qp_attr_p->alternate_av);\r
-       qp_attr_p->alternate_av.conn. path_mtu         = (u_int8_t)hh_qp_attr_p->path_mtu;\r
-       qp_attr_p->alternate_av.conn.local_ack_timeout = hh_qp_attr_p->timeout;\r
-       qp_attr_p->alternate_av.conn.seq_err_retry_cnt = hh_qp_attr_p->retry_count;\r
-       qp_attr_p->alternate_av.conn.rnr_retry_cnt     = hh_qp_attr_p->rnr_retry;\r
-}\r
-#if 0\r
-XXX:\r
-QP_ATTR_QP_STATE\r
-QP_ATTR_EN_SQD_ASYN_NOTIF\r
-QP_ATTR_QP_NUM\r
-+ QP_ATTR_REMOTE_ATOMIC_FLAGS\r
-+ QP_ATTR_PKEY_IX\r
-+ QP_ATTR_PORT\r
-+ QP_ATTR_QKEY\r
-+ QP_ATTR_RQ_PSN\r
-+ QP_ATTR_AV\r
-\r
-QP_ATTR_PATH_MTU\r
-+ QP_ATTR_TIMEOUT\r
-+ QP_ATTR_RETRY_COUNT\r
-+ QP_ATTR_RNR_RETRY\r
-QP_ATTR_QP_OUS_RD_ATOM\r
-\r
-- QP_ATTR_ALT_PATH\r
-\r
-+ QP_ATTR_MIN_RNR_TIMER\r
-QP_ATTR_SQ_PSN\r
-QP_ATTR_OUS_DST_RD_ATOM\r
-QP_ATTR_PATH_MIG_STATE\r
-QP_ATTR_CAP\r
-#endif\r
-\r
-/////////////////////////////////////////////////////////\r
-/////////////////////////////////////////////////////////\r
-ib_api_status_t\r
-mlnx_conv_qp_modify_attr(\r
-       IN                              HH_hca_hndl_t                                   hh_hndl,\r
-       IN                              ib_qp_type_t                                    qp_type,\r
-       IN              const   ib_qp_mod_t                                             *modify_attr_p,\r
-               OUT                     VAPI_qp_attr_t                                  *qp_attr_p, \r
-               OUT                     VAPI_qp_attr_mask_t                             *attr_mask_p)\r
-{\r
-\r
-       qp_attr_p->qp_state = mlnx_map_ibal_qp_state(modify_attr_p->req_state);\r
-       *attr_mask_p = QP_ATTR_QP_STATE;\r
-\r
-       switch(modify_attr_p->req_state)\r
-       {\r
-       case IB_QPS_RESET:\r
-               break;\r
-\r
-       case IB_QPS_INIT:\r
-               *attr_mask_p |= QP_ATTR_PORT |\r
-                       QP_ATTR_QKEY |\r
-                       QP_ATTR_PKEY_IX ;\r
-\r
-               qp_attr_p->port    = modify_attr_p->state.init.primary_port;\r
-               qp_attr_p->qkey    = cl_ntoh32 (modify_attr_p->state.init.qkey);\r
-               qp_attr_p->pkey_ix = modify_attr_p->state.init.pkey_index;\r
-               if (IB_QPT_RELIABLE_CONN == qp_type)\r
-               {\r
-                       *attr_mask_p |= QP_ATTR_REMOTE_ATOMIC_FLAGS;\r
-                       qp_attr_p->remote_atomic_flags = map_ibal_qp_acl(modify_attr_p->state.init.access_ctrl);\r
-               } else\r
-               {\r
-                       qp_attr_p->remote_atomic_flags = 0;\r
-               }\r
-               break;\r
-\r
-       case IB_QPS_RTR:\r
-               /* VAPI doesn't support modifying the WQE depth ever. */\r
-               if( modify_attr_p->state.rtr.opts & IB_MOD_QP_SQ_DEPTH ||\r
-                       modify_attr_p->state.rtr.opts & IB_MOD_QP_RQ_DEPTH )\r
-               {\r
-                       return IB_UNSUPPORTED;\r
-               }\r
-\r
-               *attr_mask_p |= QP_ATTR_RQ_PSN |\r
-                       QP_ATTR_DEST_QP_NUM |\r
-                       QP_ATTR_QP_OUS_RD_ATOM |\r
-                       QP_ATTR_MIN_RNR_TIMER |\r
-                       QP_ATTR_AV ;\r
-\r
-               qp_attr_p->rq_psn          = cl_ntoh32 (modify_attr_p->state.rtr.rq_psn);\r
-               qp_attr_p->dest_qp_num     = cl_ntoh32 (modify_attr_p->state.rtr.dest_qp);\r
-               qp_attr_p->qp_ous_rd_atom = modify_attr_p->state.rtr.resp_res;\r
-\r
-               qp_attr_p->min_rnr_timer   = modify_attr_p->state.rtr.rnr_nak_timeout;\r
-\r
-#if 1\r
-               CL_TRACE(MLNX_DBG_QPN, g_mlnx_dbg_lvl, ("modify_qp: hh_dest 0x%x = ibal_dest 0x%x\n",\r
-                       qp_attr_p->dest_qp_num, modify_attr_p->state.rtr.dest_qp));\r
-#endif\r
-\r
-               // Convert primary RC AV (mandatory)\r
-               cl_memclr(&qp_attr_p->av, sizeof(VAPI_ud_av_t));\r
-               mlnx_conv_ibal_av(hh_hndl,\r
-                       &modify_attr_p->state.rtr.primary_av, &qp_attr_p->av);\r
-\r
-               if (IB_QPT_RELIABLE_CONN == qp_type)\r
-               {\r
-                       *attr_mask_p |= QP_ATTR_PATH_MTU;\r
-                       qp_attr_p->path_mtu     = modify_attr_p->state.rtr.primary_av.conn.path_mtu; // MTU\r
-                       *attr_mask_p |= QP_ATTR_TIMEOUT;\r
-                       qp_attr_p->timeout     = modify_attr_p->state.rtr.primary_av.conn.local_ack_timeout; // XXX: conv\r
-                       *attr_mask_p |= QP_ATTR_RETRY_COUNT;\r
-                       qp_attr_p->retry_count = modify_attr_p->state.rtr.primary_av.conn.seq_err_retry_cnt;\r
-                       *attr_mask_p |= QP_ATTR_RNR_RETRY;\r
-                       qp_attr_p->rnr_retry   = modify_attr_p->state.rtr.primary_av.conn.rnr_retry_cnt;\r
-               }\r
-\r
-               // Convert Remote Atomic Flags\r
-               if (modify_attr_p->state.rtr.opts & IB_MOD_QP_ACCESS_CTRL)\r
-               {\r
-                       *attr_mask_p |= QP_ATTR_REMOTE_ATOMIC_FLAGS;\r
-                       qp_attr_p->remote_atomic_flags = map_ibal_qp_acl(modify_attr_p->state.rtr.access_ctrl);\r
-               }\r
-\r
-               // Convert alternate RC AV\r
-               if (modify_attr_p->state.rtr.opts & IB_MOD_QP_ALTERNATE_AV)\r
-               {\r
-                       *attr_mask_p |= QP_ATTR_ALT_PATH;\r
-                       cl_memclr(&qp_attr_p->alt_av, sizeof(VAPI_ud_av_t));\r
-                       mlnx_conv_ibal_av(hh_hndl,\r
-                               &modify_attr_p->state.rtr.alternate_av, &qp_attr_p->alt_av);\r
-\r
-                       if (IB_QPT_RELIABLE_CONN == qp_type)\r
-                       {\r
-                               qp_attr_p->alt_timeout     = modify_attr_p->state.rtr.alternate_av.conn.local_ack_timeout; // XXX: conv\r
-#if 0\r
-                               /* Incompliant with spec 1.1! Data already set before */\r
-                               qp_attr_p->retry_count = modify_attr_p->state.rtr.alternate_av.conn.seq_err_retry_cnt;\r
-                               qp_attr_p->rnr_retry   = modify_attr_p->state.rtr.alternate_av.conn.rnr_retry_cnt;\r
-#endif\r
-                       }\r
-               }\r
-               break;\r
-\r
-       case IB_QPS_RTS:\r
-               /* VAPI doesn't support modifying the WQE depth ever. */\r
-               if( modify_attr_p->state.rts.opts & IB_MOD_QP_SQ_DEPTH ||\r
-                       modify_attr_p->state.rts.opts & IB_MOD_QP_RQ_DEPTH )\r
-               {\r
-                       return IB_UNSUPPORTED;\r
-               }\r
-\r
-               *attr_mask_p |= QP_ATTR_SQ_PSN |\r
-                       QP_ATTR_RETRY_COUNT |\r
-                       QP_ATTR_RNR_RETRY |\r
-                       QP_ATTR_TIMEOUT|\r
-                       QP_ATTR_OUS_DST_RD_ATOM |\r
-                       QP_ATTR_MIN_RNR_TIMER;\r
-\r
-               qp_attr_p->sq_psn = cl_ntoh32 (modify_attr_p->state.rts.sq_psn);\r
-\r
-               if (modify_attr_p->state.rts.opts & IB_MOD_QP_ACCESS_CTRL)\r
-               {\r
-                       *attr_mask_p |= QP_ATTR_REMOTE_ATOMIC_FLAGS;\r
-                       qp_attr_p->remote_atomic_flags = map_ibal_qp_acl(modify_attr_p->state.rts.access_ctrl);\r
-               }\r
-\r
-               qp_attr_p->timeout     = modify_attr_p->state.rts.local_ack_timeout; // XXX: conv\r
-               qp_attr_p->ous_dst_rd_atom = modify_attr_p->state.rts.init_depth;\r
-               qp_attr_p->retry_count = modify_attr_p->state.rts.retry_cnt;\r
-               qp_attr_p->rnr_retry   = modify_attr_p->state.rts.rnr_retry_cnt;\r
-               qp_attr_p->min_rnr_timer   = modify_attr_p->state.rts.rnr_nak_timeout;\r
-\r
-               // Update the responder resources for RDMA/ATOMIC (optional for SQD->RTS)\r
-               if (modify_attr_p->state.rts.opts & IB_MOD_QP_RESP_RES) {\r
-                       *attr_mask_p |= QP_ATTR_QP_OUS_RD_ATOM;\r
-                       qp_attr_p->qp_ous_rd_atom = modify_attr_p->state.rts.resp_res;\r
-               }\r
-\r
-               // Convert alternate RC AV\r
-               if (modify_attr_p->state.rts.opts & IB_MOD_QP_ALTERNATE_AV)\r
-               {\r
-                       *attr_mask_p |= QP_ATTR_ALT_PATH;\r
-                       cl_memclr(&qp_attr_p->alt_av, sizeof(VAPI_ud_av_t));\r
-                       mlnx_conv_ibal_av(hh_hndl,\r
-                               &modify_attr_p->state.rts.alternate_av, &qp_attr_p->alt_av);\r
-                       if (IB_QPT_RELIABLE_CONN == qp_type)\r
-                       {\r
-                               qp_attr_p->alt_timeout     = modify_attr_p->state.rts.alternate_av.conn.local_ack_timeout; // XXX: conv\r
-#if 0\r
-                               /* Incompliant with spec 1.1! Data already set before */\r
-                               qp_attr_p->retry_count = modify_attr_p->state.rts.alternate_av.conn.seq_err_retry_cnt;\r
-                               qp_attr_p->rnr_retry   = modify_attr_p->state.rts.alternate_av.conn.rnr_retry_cnt;\r
-#endif\r
-                       }\r
-               }\r
-               break;\r
-\r
-               // TBD: The following are treated equally (SQ Drain)\r
-       case IB_QPS_SQD:\r
-       case IB_QPS_SQD_DRAINING:\r
-       case IB_QPS_SQD_DRAINED:\r
-               *attr_mask_p |= QP_ATTR_EN_SQD_ASYN_NOTIF;\r
-               qp_attr_p->en_sqd_asyn_notif = (MT_bool)modify_attr_p->state.sqd.sqd_event;\r
-               break;\r
-\r
-       case IB_QPS_SQERR:\r
-       case IB_QPS_ERROR:\r
-       case IB_QPS_TIME_WAIT:\r
-       default:\r
-               break;\r
-       }\r
-       CL_TRACE(MLNX_DBG_QPN, g_mlnx_dbg_lvl, ("CL: conv_qp_modify: new state %d attr_mask 0x%x\n", qp_attr_p->qp_state, *attr_mask_p));\r
-       return IB_SUCCESS;\r
-}\r
-\r
-/////////////////////////////////////////////////////////\r
-/////////////////////////////////////////////////////////\r
-static VAPI_wr_opcode_t\r
-map_ibal_send_opcode(\r
-       IN                              ib_wr_type_t                            ibal_opcode,\r
-       IN                              boolean_t                                       imm)\r
-{\r
-       VAPI_wr_opcode_t                vapi_opcode;\r
-\r
-       switch (ibal_opcode)\r
-       {\r
-       case WR_SEND:         vapi_opcode = VAPI_SEND;\r
-               break;\r
-       case WR_RDMA_WRITE:   vapi_opcode = VAPI_RDMA_WRITE;\r
-               break;\r
-       case WR_RDMA_READ:    vapi_opcode = VAPI_RDMA_READ;\r
-               break;\r
-       case WR_COMPARE_SWAP: vapi_opcode = VAPI_ATOMIC_CMP_AND_SWP;\r
-               break;\r
-       case WR_FETCH_ADD:    vapi_opcode = VAPI_ATOMIC_FETCH_AND_ADD;\r
-               break;\r
-       default:              vapi_opcode = VAPI_SEND;\r
-               break;\r
-       }\r
-       if (imm && (VAPI_SEND == vapi_opcode || VAPI_RDMA_WRITE == vapi_opcode)) vapi_opcode++;\r
-       return vapi_opcode;\r
-}\r
-\r
-/////////////////////////////////////////////////////////\r
-/////////////////////////////////////////////////////////\r
-ib_api_status_t\r
-mlnx_conv_send_desc(\r
-       IN                              IB_ts_t                                         transport,\r
-       IN              const   ib_send_wr_t                            *ibal_send_wqe_p,\r
-               OUT                     VAPI_sr_desc_t                          *vapi_send_desc_p)\r
-{\r
-       boolean_t                                               imm = FALSE;\r
-       u_int32_t                                               idx;\r
-       register VAPI_sg_lst_entry_t    *sg_lst_p;\r
-       register ib_local_ds_t                  *ds_array;\r
-\r
-\r
-       switch (transport)\r
-       {\r
-       case IB_TS_UD:\r
-               CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("mapping %s QP\n", "UD"));\r
-               {\r
-                       mlnx_avo_t *avo_p = (mlnx_avo_t *)ibal_send_wqe_p->dgrm.ud.h_av;\r
-\r
-                       vapi_send_desc_p->remote_qp  = cl_ntoh32 (ibal_send_wqe_p->dgrm.ud.remote_qp);\r
-                       vapi_send_desc_p->remote_qkey = cl_ntoh32 (ibal_send_wqe_p->dgrm.ud.remote_qkey);\r
-\r
-                       if (!avo_p || avo_p->mark != E_MARK_AV)\r
-                               return IB_INVALID_AV_HANDLE;\r
-\r
-                       vapi_send_desc_p->remote_ah = avo_p->h_av; // was ah.hhul\r
-                       break;\r
-               }\r
-\r
-       case IB_TS_RC:\r
-               CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("mapping %s QP\n", "RC"));\r
-               // vapi_send_desc_p->remote_qp   = 0;\r
-               // vapi_send_desc_p->remote_qkey = 0;\r
-               vapi_send_desc_p->remote_addr = ibal_send_wqe_p->remote_ops.vaddr;\r
-               vapi_send_desc_p->r_key       = ibal_send_wqe_p->remote_ops.rkey;\r
-               vapi_send_desc_p->compare_add = ibal_send_wqe_p->remote_ops.atomic1;\r
-               vapi_send_desc_p->swap        = ibal_send_wqe_p->remote_ops.atomic2;\r
-               break;\r
-\r
-       default: // TBD: RAW, RD\r
-               return IB_UNSUPPORTED;\r
-       }\r
-\r
-       imm = (0 != (ibal_send_wqe_p->send_opt & IB_SEND_OPT_IMMEDIATE));\r
-       vapi_send_desc_p->fence      = (MT_bool)(0 != (ibal_send_wqe_p->send_opt & IB_SEND_OPT_FENCE));\r
-       vapi_send_desc_p->set_se     = (MT_bool)(0 != (ibal_send_wqe_p->send_opt & IB_SEND_OPT_SOLICITED));\r
-       vapi_send_desc_p->comp_type  = (ibal_send_wqe_p->send_opt & IB_SEND_OPT_SIGNALED) ?\r
-VAPI_SIGNALED : VAPI_UNSIGNALED;\r
-\r
-       vapi_send_desc_p->id = ibal_send_wqe_p->wr_id;\r
-       vapi_send_desc_p->opcode = map_ibal_send_opcode(ibal_send_wqe_p->wr_type, imm);\r
-\r
-       if (imm)\r
-               vapi_send_desc_p->imm_data = cl_ntoh32 (ibal_send_wqe_p->immediate_data);\r
-\r
-       vapi_send_desc_p->sg_lst_len = ibal_send_wqe_p->num_ds;\r
-\r
-       sg_lst_p = vapi_send_desc_p->sg_lst_p;\r
-       ds_array = ibal_send_wqe_p->ds_array;\r
-       for (idx = 0; idx < ibal_send_wqe_p->num_ds; idx++)\r
-       {\r
-               sg_lst_p->addr = ds_array->vaddr;\r
-               sg_lst_p->len  = ds_array->length;\r
-               sg_lst_p->lkey = ds_array->lkey;\r
-               // CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("post_send (conv) addr %Lx size %d key 0x%x\n", sg_lst_p->addr, sg_lst_p->len, sg_lst_p->lkey));\r
-               sg_lst_p++;\r
-               ds_array++;\r
-       }\r
-       CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("send: rqpn 0x%x rkey 0x%x\n", \r
-               vapi_send_desc_p->remote_qp,\r
-               vapi_send_desc_p->remote_qkey));\r
-       return IB_SUCCESS;\r
-}\r
-\r
-/////////////////////////////////////////////////////////\r
-/////////////////////////////////////////////////////////\r
-ib_api_status_t\r
-mlnx_conv_recv_desc(\r
-       IN              const   ib_recv_wr_t                            *ibal_recv_wqe_p,\r
-               OUT                     VAPI_rr_desc_t                          *vapi_recv_desc_p)\r
-{\r
-       u_int32_t                                               idx;\r
-       register VAPI_sg_lst_entry_t    *sg_lst_p;\r
-       register ib_local_ds_t                  *ds_array;\r
-\r
-       vapi_recv_desc_p->id         = ibal_recv_wqe_p->wr_id;\r
-       vapi_recv_desc_p->sg_lst_len = ibal_recv_wqe_p->num_ds;\r
-       vapi_recv_desc_p->opcode     = VAPI_RECEIVE;\r
-       vapi_recv_desc_p->comp_type  = VAPI_SIGNALED;\r
-\r
-       sg_lst_p = vapi_recv_desc_p->sg_lst_p;\r
-       ds_array = ibal_recv_wqe_p->ds_array;\r
-       for (idx = 0; idx < ibal_recv_wqe_p->num_ds; idx++)\r
-       {\r
-               sg_lst_p->addr = ds_array->vaddr;\r
-               sg_lst_p->len  = ds_array->length;\r
-               sg_lst_p->lkey = ds_array->lkey;\r
-               // CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("post_recv (conv) addr 0x%Lx size %d key 0x%x\n", sg_lst_p->addr, sg_lst_p->len, sg_lst_p->lkey));\r
-               sg_lst_p++;\r
-               ds_array++;\r
-       }\r
-\r
-       return IB_SUCCESS;\r
-}\r
-\r
-/////////////////////////////////////////////////////////\r
-/////////////////////////////////////////////////////////\r
-void\r
-vapi_port_cap_to_ibal(\r
-       IN                              IB_port_cap_mask_t                      vapi_port_cap,\r
-               OUT                     ib_port_cap_t                           *ibal_port_cap_p)\r
-{\r
-       if (vapi_port_cap & IB_CAP_MASK_IS_CONN_MGMT_SUP)\r
-               ibal_port_cap_p->cm = TRUE;\r
-       if (vapi_port_cap & IB_CAP_MASK_IS_SNMP_TUNN_SUP)\r
-               ibal_port_cap_p->snmp = TRUE;\r
-       if (vapi_port_cap & IB_CAP_MASK_IS_DEVICE_MGMT_SUP)\r
-               ibal_port_cap_p->dev_mgmt = TRUE;\r
-       if (vapi_port_cap & IB_CAP_MASK_IS_VENDOR_CLS_SUP)\r
-               ibal_port_cap_p->vend = TRUE;\r
-       if (vapi_port_cap & IB_CAP_MASK_IS_SM_DISABLED)\r
-               ibal_port_cap_p->sm_disable = TRUE;\r
-       if (vapi_port_cap & IB_CAP_MASK_IS_SM)\r
-               ibal_port_cap_p->sm = TRUE;\r
-       if (vapi_port_cap & IB_CAP_MASK_IS_CLIENT_REREGISTRATION_SUP)\r
-               ibal_port_cap_p->client_reregister= TRUE;\r
-}\r
-\r
-/////////////////////////////////////////////////////////\r
-/////////////////////////////////////////////////////////\r
-void\r
-mlnx_conv_vapi_hca_cap(\r
-       IN                              HH_hca_dev_t                            *hca_info_p,\r
-       IN                              VAPI_hca_cap_t                          *vapi_hca_cap_p,\r
-       IN                              VAPI_hca_port_t                         *vapi_hca_ports,\r
-               OUT                     ib_ca_attr_t                            *ca_attr_p)\r
-{\r
-       u_int8_t                        port_num;\r
-       VAPI_hca_port_t         *vapi_port_p;\r
-       ib_port_attr_t          *ibal_port_p;\r
-\r
-       ca_attr_p->vend_id  = hca_info_p->vendor_id;\r
-       ca_attr_p->dev_id   = (uint16_t)hca_info_p->dev_id;\r
-       ca_attr_p->revision = (uint16_t)hca_info_p->hw_ver;\r
-       ca_attr_p->fw_ver = hca_info_p->fw_ver;\r
-\r
-       ca_attr_p->ca_guid   = *(UNALIGNED64 u_int64_t *)vapi_hca_cap_p->node_guid;\r
-       ca_attr_p->num_ports = vapi_hca_cap_p->phys_port_num;\r
-       ca_attr_p->max_qps   = vapi_hca_cap_p->max_num_qp;\r
-       ca_attr_p->max_wrs   = vapi_hca_cap_p->max_qp_ous_wr;\r
-       ca_attr_p->max_sges   = vapi_hca_cap_p->max_num_sg_ent;\r
-       ca_attr_p->max_rd_sges = vapi_hca_cap_p->max_num_sg_ent_rd;\r
-       ca_attr_p->max_cqs    = vapi_hca_cap_p->max_num_cq;\r
-       ca_attr_p->max_cqes  = vapi_hca_cap_p->max_num_ent_cq;\r
-       ca_attr_p->max_pds    = vapi_hca_cap_p->max_pd_num;\r
-       ca_attr_p->init_regions = vapi_hca_cap_p->max_num_mr;\r
-       ca_attr_p->init_windows = vapi_hca_cap_p->max_mw_num;\r
-       ca_attr_p->init_region_size = vapi_hca_cap_p->max_mr_size;\r
-       ca_attr_p->max_addr_handles = vapi_hca_cap_p->max_ah_num;\r
-       ca_attr_p->atomicity     = vapi_hca_cap_p->atomic_cap;\r
-       ca_attr_p->max_partitions = vapi_hca_cap_p->max_pkeys;\r
-       ca_attr_p->max_qp_resp_res = vapi_hca_cap_p->max_qp_ous_rd_atom;\r
-       ca_attr_p->max_resp_res    = vapi_hca_cap_p->max_res_rd_atom;\r
-       ca_attr_p->max_qp_init_depth = vapi_hca_cap_p->max_qp_init_rd_atom;\r
-       ca_attr_p->max_ipv6_qps    = vapi_hca_cap_p->max_raw_ipv6_qp;\r
-       ca_attr_p->max_ether_qps   = vapi_hca_cap_p->max_raw_ethy_qp;\r
-       ca_attr_p->max_mcast_grps  = vapi_hca_cap_p->max_mcast_grp_num;\r
-       ca_attr_p->max_mcast_qps   = vapi_hca_cap_p->max_total_mcast_qp_attach_num;\r
-       ca_attr_p->max_qps_per_mcast_grp = vapi_hca_cap_p->max_mcast_qp_attach_num;\r
-       ca_attr_p->local_ack_delay = vapi_hca_cap_p->local_ca_ack_delay;\r
-       ca_attr_p->bad_pkey_ctr_support = vapi_hca_cap_p->flags & VAPI_BAD_PKEY_COUNT_CAP;\r
-       ca_attr_p->bad_qkey_ctr_support = vapi_hca_cap_p->flags & VAPI_BAD_QKEY_COUNT_CAP;\r
-       ca_attr_p->raw_mcast_support    = vapi_hca_cap_p->flags & VAPI_RAW_MULTI_CAP;\r
-       ca_attr_p->apm_support          = vapi_hca_cap_p->flags & VAPI_AUTO_PATH_MIG_CAP;\r
-       ca_attr_p->av_port_check        = vapi_hca_cap_p->flags & VAPI_UD_AV_PORT_ENFORCE_CAP;\r
-       ca_attr_p->change_primary_port  = vapi_hca_cap_p->flags & VAPI_CHANGE_PHY_PORT_CAP;\r
-       ca_attr_p->modify_wr_depth      = vapi_hca_cap_p->flags & VAPI_RESIZE_OUS_WQE_CAP;\r
-       ca_attr_p->hw_agents            = FALSE; // in the context of IBAL then agent is implemented on the host\r
-\r
-       ca_attr_p->num_page_sizes = 1;\r
-       ca_attr_p->p_page_size[0] = PAGESIZE; // TBD: extract an array of page sizes from HCA cap\r
-\r
-       for (port_num = 0; port_num < vapi_hca_cap_p->phys_port_num; port_num++)\r
-       {\r
-               // Setup port pointers\r
-               ibal_port_p = &ca_attr_p->p_port_attr[port_num];\r
-               vapi_port_p = &vapi_hca_ports[port_num];\r
-\r
-               // Port Cabapilities\r
-               cl_memclr(&ibal_port_p->cap, sizeof(ib_port_cap_t));\r
-               vapi_port_cap_to_ibal(vapi_port_p->capability_mask, &ibal_port_p->cap);\r
-\r
-               // Port Atributes\r
-               ibal_port_p->port_num   = port_num + 1;\r
-               ibal_port_p->port_guid  = ibal_port_p->p_gid_table[0].unicast.interface_id;\r
-               ibal_port_p->lid        = cl_ntoh16(vapi_port_p->lid);\r
-               ibal_port_p->lmc        = vapi_port_p->lmc;\r
-               ibal_port_p->max_vls    = vapi_port_p->max_vl_num;\r
-               ibal_port_p->sm_lid     = cl_ntoh16(vapi_port_p->sm_lid);\r
-               ibal_port_p->sm_sl      = vapi_port_p->sm_sl;\r
-               ibal_port_p->link_state = (vapi_port_p->state != 0) ? (uint8_t)vapi_port_p->state : IB_LINK_DOWN;\r
-               ibal_port_p->num_gids   = vapi_port_p->gid_tbl_len;\r
-               ibal_port_p->num_pkeys  = vapi_port_p->pkey_tbl_len;\r
-               ibal_port_p->pkey_ctr   = (uint16_t)vapi_port_p->bad_pkey_counter;\r
-               ibal_port_p->qkey_ctr   = (uint16_t)vapi_port_p->qkey_viol_counter;\r
-               ibal_port_p->max_msg_size = vapi_port_p->max_msg_sz;\r
-               ibal_port_p->mtu = (u_int8_t)vapi_port_p->max_mtu;\r
-\r
-               ibal_port_p->subnet_timeout = 5; // TBD: currently 128us\r
-               // ibal_port_p->local_ack_timeout = 3; // TBD: currently ~32 usec\r
-#if 0\r
-               CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("Port %d port_guid 0x%"PRIx64"\n",\r
-                       ibal_port_p->port_num, ibal_port_p->port_guid));\r
-#endif\r
-       }\r
-}\r
-\r
-/////////////////////////////////////////////////////////\r
-/////////////////////////////////////////////////////////\r
-ib_api_status_t\r
-mlnx_get_hca_pkey_tbl(\r
-       IN                              HH_hca_hndl_t                           hh_hndl,\r
-       IN                              u_int8_t                                        port_num,\r
-       IN                              u_int16_t                                       num_entries,\r
-               OUT                     void*                                           table_p)\r
-{\r
-       u_int16_t               size;\r
-       ib_net16_t              *pkey_p;\r
-\r
-               if (HH_OK != THH_hob_get_pkey_tbl( hh_hndl, port_num, num_entries, &size, table_p))\r
-               return IB_ERROR;\r
-\r
-       pkey_p = (ib_net16_t *)table_p;\r
-#if 0\r
-       CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("port %d pkey0 0x%x pkey1 0x%x\n", port_num, pkey_p[0], pkey_p[1]));\r
-#endif\r
-       return IB_SUCCESS;\r
-}\r
-\r
-ib_api_status_t\r
-mlnx_get_hca_gid_tbl(\r
-       IN                              HH_hca_hndl_t                           hh_hndl,\r
-       IN                              u_int8_t                                        port_num,\r
-       IN                              u_int16_t                                       num_entries,\r
-               OUT                     void*                                           table_p)\r
-{\r
-       u_int16_t               size;\r
-\r
-       if (HH_OK != THH_hob_get_gid_tbl( hh_hndl, port_num, num_entries, &size, table_p))\r
-               return IB_ERROR;\r
-\r
-       return IB_SUCCESS;\r
-}\r
index e524897..e69de29 100644 (file)
@@ -1,608 +0,0 @@
-/*\r
- * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.\r
- * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. \r
- *\r
- * This software is available to you under the OpenIB.org BSD license\r
- * below:\r
- *\r
- *     Redistribution and use in source and binary forms, with or\r
- *     without modification, are permitted provided that the following\r
- *     conditions are met:\r
- *\r
- *      - Redistributions of source code must retain the above\r
- *        copyright notice, this list of conditions and the following\r
- *        disclaimer.\r
- *\r
- *      - Redistributions in binary form must reproduce the above\r
- *        copyright notice, this list of conditions and the following\r
- *        disclaimer in the documentation and/or other materials\r
- *        provided with the distribution.\r
- *\r
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
- * SOFTWARE.\r
- *\r
- * $Id$\r
- */\r
-\r
-#ifndef __HCA_DATA_H__\r
-#define __HCA_DATA_H__\r
-\r
-\r
-#include <iba/ib_ci.h>\r
-#include <complib/comp_lib.h>\r
-\r
-#include <vapi.h>\r
-#include <evapi.h>\r
-#include <hh.h>\r
-#include <thh.h>\r
-#include <thh_hob.h>\r
-#include <tavor_dev_defs.h>\r
-#include <thh_init.h>\r
-#include <hhul.h>\r
-#include <thhul_hob.h>\r
-#include <thhul_pdm.h>\r
-#include <thhul_cqm.h>\r
-#include <thhul_qpm.h>\r
-\r
-extern u_int32_t               g_mlnx_dbg_lvl;\r
-extern uint32_t                        g_sqp_max_avs;\r
-extern char                            mlnx_uvp_lib_name[];\r
-\r
-#define MLNX_DBG_INFO    (1<<1)\r
-#define MLNX_DBG_TRACE   (1<<2)\r
-#define MLNX_DBG_VERBOSE (1<<3)\r
-// for data path debugging\r
-#define MLNX_DBG_DIRECT  (1<<4)\r
-#define MLNX_DBG_QPN     (1<<5)\r
-#define MLNX_DBG_MEM     (1<<6)\r
-\r
-#define MLNX_MAX_HCA   4\r
-#define MLNX_NUM_HOBKL MLNX_MAX_HCA\r
-#define MLNX_NUM_HOBUL MLNX_MAX_HCA\r
-#define MLNX_NUM_CB_THR     1\r
-#define MLNX_SIZE_CB_POOL 256\r
-#define MLNX_UAL_ALLOC_HCA_UL_RES 1\r
-#define MLNX_UAL_FREE_HCA_UL_RES 2\r
-\r
-\r
-// Defines for QP ops\r
-#define        MLNX_MAX_NUM_SGE 8\r
-#define        MLNX_MAX_WRS_PER_CHAIN 4\r
-\r
-#define MLNX_NUM_RESERVED_QPS 16\r
-\r
-/*\r
- * Completion model.\r
- *     0: No DPC processor assignment\r
- *     1: DPCs per-CQ, processor affinity set at CQ initialization time.\r
- *     2: DPCs per-CQ, processor affinity set at runtime.\r
- *     3: DPCs per-CQ, no processor affinity set.\r
- */\r
-#define MLNX_COMP_MODEL                3\r
-\r
-#define PD_HCA_FROM_HNDL(hndl) (((pd_info_t *)hndl)->hca_idx)\r
-#define PD_NUM_FROM_HNDL(hndl) (((pd_info_t *)hndl)->pd_num)\r
-#define CQ_HCA_FROM_HNDL(hndl) (((cq_info_t *)hndl)->hca_idx)\r
-#define CQ_NUM_FROM_HNDL(hndl) (((cq_info_t *)hndl)->cq_num)\r
-#define QP_HCA_FROM_HNDL(hndl) (((qp_info_t *)hndl)->hca_idx)\r
-#define QP_NUM_FROM_HNDL(hndl) (((qp_info_t *)hndl)->qp_num)\r
-\r
-#define PD_HNDL_FROM_PD(pd_num) (&hobul_p->pd_info_tbl[pd_num])\r
-#define CQ_HNDL_FROM_CQ(cq_num) (&hobul_p->cq_info_tbl[cq_num])\r
-#define QP_HNDL_FROM_QP(qp_num) (&hobul_p->qp_info_tbl[qp_num])\r
-\r
-#ifdef _DEBUG_\r
-#define VALIDATE_INDEX(index, limit, error, label) \\r
-       {                  \\r
-               if (index >= limit) \\r
-               {                   \\r
-                       status = error;   \\r
-                       CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("file %s line %d\n", __FILE__, __LINE__)); \\r
-                       goto label;       \\r
-               }                   \\r
-       }\r
-#else\r
-#define VALIDATE_INDEX(index, limit, error, label)\r
-#endif\r
-\r
-\r
-\r
-// Typedefs\r
-\r
-typedef enum {\r
-       E_EV_CA=1,\r
-       E_EV_QP,\r
-       E_EV_CQ,\r
-       E_EV_LAST\r
-} ENUM_EVENT_CLASS;\r
-\r
-typedef enum {\r
-       E_MARK_CA=1, // Channel Adaptor\r
-       E_MARK_PD, // Protection Domain\r
-       E_MARK_CQ, // Completion Queue\r
-       E_MARK_QP, // Queue Pair\r
-       E_MARK_AV, // Address Vector (UD)\r
-       E_MARK_MG, // Multicast Group\r
-       E_MARK_MR, // Memory Region\r
-       E_MARK_MW, // Memory Windows\r
-       E_MARK_INVALID,\r
-} ENUM_MARK;\r
-\r
-typedef enum {\r
-       E_MR_PHYS=1,\r
-       E_MR_SHARED,\r
-       E_MR_ANY,\r
-       E_MR_INVALID\r
-} ENUM_MR_TYPE;\r
-\r
-/*\r
- * Attribute cache for port info saved to expedite local MAD processing.\r
- * Note that the cache accounts for the worst case GID and PKEY table size\r
- * but is allocated from paged pool, so it's nothing to worry about.\r
- */\r
-\r
-typedef struct _guid_block\r
-{\r
-       boolean_t                               valid;\r
-       ib_guid_info_t                  tbl;\r
-\r
-}      mlnx_guid_block_t;\r
-\r
-typedef struct _port_info_cache\r
-{\r
-       boolean_t                               valid;\r
-       ib_port_info_t                  info;\r
-\r
-}      mlnx_port_info_cache_t;\r
-\r
-typedef struct _pkey_block\r
-{\r
-       boolean_t                               valid;\r
-       ib_pkey_table_t tbl;\r
-\r
-}      mlnx_pkey_block_t;\r
-\r
-typedef struct _sl_vl_cache\r
-{\r
-       boolean_t                               valid;\r
-       ib_slvl_table_t                 tbl;\r
-\r
-}      mlnx_sl_vl_cache_t;\r
-\r
-typedef struct _vl_arb_block\r
-{\r
-       boolean_t                               valid;\r
-       ib_vl_arb_table_t               tbl;\r
-\r
-}      mlnx_vl_arb_block_t;\r
-\r
-typedef struct _attr_cache\r
-{\r
-       mlnx_guid_block_t               guid_block[32];\r
-       mlnx_port_info_cache_t  port_info;\r
-       mlnx_pkey_block_t               pkey_tbl[2048];\r
-       mlnx_sl_vl_cache_t              sl_vl;\r
-       mlnx_vl_arb_block_t             vl_arb[4];\r
-\r
-}      mlnx_cache_t;\r
-\r
-typedef struct _ib_ca {\r
-       ENUM_MARK           mark;\r
-       HH_hca_hndl_t       hh_hndl;\r
-       ci_completion_cb_t  comp_cb_p;\r
-       ci_async_event_cb_t async_cb_p;\r
-       const void          *ca_context;\r
-       void                *cl_device_h;\r
-       u_int32_t           index;\r
-       cl_async_proc_t     *async_proc_mgr_p;\r
-       mlnx_cache_t            *cache; // Cached port attributes.\r
-       const void * __ptr64    p_dev_obj; // store underlying device object\r
-} mlnx_hob_t;\r
-\r
-typedef struct _ib_um_ca\r
-{\r
-       MDL                                     *p_mdl;\r
-       void                            *p_mapped_addr;\r
-       HH_hca_hndl_t           hh_hndl;\r
-       mlnx_hob_t                      *hob_p;\r
-       /* The next two fields must be grouped together as the are mapped to UM. */\r
-       HH_hca_dev_t            dev_info;\r
-       uint8_t                         ul_hca_res[1];  // Beginning of UL resource buffer.\r
-}      mlnx_um_ca_t;\r
-\r
-typedef struct {\r
-       cl_async_proc_item_t async_item;\r
-       HH_hca_hndl_t        hh_hndl;\r
-       HH_cq_hndl_t         hh_cq; // for completion\r
-       HH_event_record_t    hh_er; // for async events\r
-       void                 *private_data;\r
-} mlnx_cb_data_t;\r
-\r
-typedef struct {\r
-       cl_list_item_t  list_item;\r
-       HH_hca_hndl_t hh_hndl;\r
-       struct _hca_if {\r
-               HH_hca_hndl_t hh_hndl;\r
-               void *          kernel_crspace_addr;\r
-               ULONG           kernel_crspace_size;\r
-       } s;\r
-//     char                    *hca_name_p;\r
-       net64_t                 guid;\r
-       const void* __ptr64     p_dev_obj;              // hca device object\r
-//     ci_interface_t ifx;\r
-} mlnx_hca_t;\r
-\r
-typedef struct _ib_pd {        /* struct of PD related resources */\r
-       ENUM_MARK         mark;\r
-       cl_mutex_t        mutex;\r
-       u_int32_t         kernel_mode;\r
-       atomic32_t        count;\r
-       u_int32_t         hca_idx;\r
-       // mlnx_hob_t        *hob_p;\r
-       HH_hca_hndl_t     hh_hndl;        /* For HH direct access */\r
-       HH_pd_hndl_t      pd_num;         /* For HH-UL direct access */\r
-       HHUL_pd_hndl_t    hhul_pd_hndl;\r
-       void              *pd_ul_resources_p;\r
-} pd_info_t;\r
-\r
-typedef struct _ib_cq {        /* struct of CQ related resources */\r
-       ENUM_MARK         mark;\r
-       cl_mutex_t        mutex;\r
-       u_int32_t         hca_idx;\r
-       u_int32_t         kernel_mode;\r
-       // mlnx_hob_t        *hob_p;\r
-       HH_hca_hndl_t     hh_hndl;        /* For HH direct access */\r
-       HH_cq_hndl_t      cq_num;         /* For HH-UL direct access */\r
-//     HH_pd_hndl_t      pd_num;         /* For HH-UL direct access */\r
-       HHUL_cq_hndl_t    hhul_cq_hndl;\r
-       void              *cq_ul_resources_p;\r
-       const void        *cq_context;\r
-       KDPC                            dpc;\r
-       atomic32_t                      spl_qp_cnt;\r
-\r
-} cq_info_t;\r
-\r
-typedef struct _ib_qp {\r
-       ENUM_MARK         mark;\r
-       cl_mutex_t        mutex;\r
-       u_int32_t         hca_idx;\r
-       u_int32_t         kernel_mode;\r
-       // mlnx_hob_t        *hob_p;\r
-       HH_hca_hndl_t     hh_hndl;      // For HH direct access */\r
-       HHUL_qp_hndl_t    hhul_qp_hndl;\r
-       IB_wqpn_t         qp_num;       // For direct HH-UL access */\r
-       HH_pd_hndl_t      pd_num;       // For HH-UL direct access */\r
-       IB_port_t         port;         // Valid for special QPs only */\r
-       ib_qp_type_t      qp_type;      // Required for qp_query\r
-       u_int32_t         sq_signaled;  // Required for qp_query\r
-       ib_cq_handle_t          h_sq_cq;\r
-       ib_cq_handle_t          h_rq_cq;\r
-       u_int32_t         sq_size;\r
-       u_int32_t         rq_size;\r
-       VAPI_sr_desc_t    *send_desc_p;\r
-       VAPI_rr_desc_t    *recv_desc_p;\r
-       VAPI_sg_lst_entry_t *send_sge_p;\r
-       VAPI_sg_lst_entry_t *recv_sge_p;\r
-       void              *qp_ul_resources_p;\r
-       const void        *qp_context;\r
-} qp_info_t;\r
-\r
-typedef struct HOBUL_t {\r
-       HH_hca_hndl_t     hh_hndl;                /* For HH direct access */\r
-       HHUL_hca_hndl_t   hhul_hndl;              /* user level HCA resources handle for HH */\r
-       u_int32_t         cq_idx_mask;            /*                                                */\r
-       u_int32_t         qp_idx_mask;            /*                                                */\r
-       u_int32_t         vendor_id;              /* \                                              */\r
-       u_int32_t         device_id;              /*  >  3 items needed for initializing user level */\r
-       void              *hca_ul_resources_p;    /* /                                              */\r
-       MT_size_t         cq_ul_resources_sz;     /* Needed for allocating user resources for CQs  */\r
-       MT_size_t         qp_ul_resources_sz;     /* Needed for allocating user resources for QPs  */\r
-       MT_size_t         pd_ul_resources_sz;     /* Needed for allocating user resources for PDs  */\r
-       u_int32_t         max_cq;                 /* Max num. of CQs - size of following table */\r
-       cq_info_t         *cq_info_tbl;\r
-       u_int32_t         max_qp;                 /* Max num. of QPs - size of following table */\r
-       qp_info_t         *qp_info_tbl;\r
-       u_int32_t         max_pd;                 /* Max num. of PDs - size of following table */\r
-       pd_info_t         *pd_info_tbl;\r
-       u_int32_t         log2_mpt_size;\r
-       atomic32_t        count;\r
-} mlnx_hobul_t, *mlnx_hobul_hndl_t;\r
-\r
-typedef struct _ib_mr {\r
-       ENUM_MARK                               mark;\r
-       ENUM_MR_TYPE                    mr_type;\r
-       u_int64_t                               mr_start;       // TBD: IA64\r
-       u_int64_t                               mr_size;                // TBD: IA64\r
-//     u_int64_t                               mr_first_page_addr; // TBD : IA64\r
-//     u_int32_t                               mr_num_pages;\r
-       ib_pd_handle_t                  mr_pd_handle;\r
-       MOSAL_iobuf_t                   mr_iobuf;\r
-       VAPI_mrw_acl_t                  mr_acl;\r
-       VAPI_lkey_t                             mr_lkey;\r
-       MOSAL_protection_ctx_t  mr_prot_ctx;\r
-       MOSAL_mem_perm_t                mr_mosal_perm;\r
-} mlnx_mro_t;\r
-\r
-typedef struct _ib_mw {\r
-       ENUM_MARK         mark;\r
-       u_int32_t         hca_idx;\r
-       u_int32_t         pd_idx;\r
-       u_int32_t         mw_rkey;\r
-} mlnx_mwo_t;\r
-\r
-typedef struct _ib_mcast {\r
-       ENUM_MARK         mark;\r
-       IB_gid_t          mcast_gid;\r
-       u_int32_t         hca_idx;\r
-       u_int32_t         qp_num;\r
-       u_int32_t         kernel_mode;\r
-} mlnx_mcast_t;\r
-\r
-typedef struct _ib_av {\r
-       ENUM_MARK         mark;\r
-       u_int32_t         hca_idx;\r
-       u_int32_t         pd_idx;\r
-       u_int32_t         user_mode;\r
-       HHUL_ud_av_hndl_t h_av;\r
-} mlnx_avo_t;\r
-\r
-typedef mlnx_hob_t *mlnx_hca_h;\r
-\r
-// Global Variables\r
-//extern mlnx_hca_t       mlnx_hca_array[];\r
-//extern uint32_t         mlnx_num_hca;\r
-\r
-extern mlnx_hob_t   mlnx_hob_array[];\r
-extern mlnx_hobul_t *mlnx_hobul_array[];\r
-\r
-// Functions\r
-void\r
-setup_ci_interface(\r
-       IN              const   ib_net64_t                                      ca_guid,\r
-               OUT                     ci_interface_t                          *p_interface );\r
-\r
-void\r
-mlnx_hca_insert(\r
-       IN                              mlnx_hca_t                                      *p_hca );\r
-\r
-void\r
-mlnx_hca_remove(\r
-       IN                              mlnx_hca_t                                      *p_hca );\r
-\r
-mlnx_hca_t*\r
-mlnx_hca_from_guid(\r
-       IN                              ib_net64_t                                      guid );\r
-\r
-mlnx_hca_t*\r
-mlnx_hca_from_hh_hndl(\r
-       IN                              HH_hca_hndl_t                   hh_hndl );\r
-\r
-/*\r
-void\r
-mlnx_names_from_guid(\r
-       IN                              ib_net64_t                                      guid,\r
-               OUT                     char                                            **hca_name_p,\r
-               OUT                     char                                            **dev_name_p);\r
-*/\r
-\r
-cl_status_t\r
-mlnx_hobs_init( void );\r
-\r
-ib_api_status_t\r
-mlnx_hobs_insert(\r
-       IN                              mlnx_hca_t                                      *p_hca,\r
-               OUT                     mlnx_hob_t                                      **hob_p);\r
-\r
-void\r
-mlnx_hobs_get_handle(\r
-       IN                              mlnx_hob_t                                      *hob_p,\r
-               OUT                     HH_hca_hndl_t                           *hndl_p);\r
-\r
-ib_api_status_t\r
-mlnx_hobs_set_cb(\r
-       IN                              mlnx_hob_t                                      *hob_p, \r
-       IN                              ci_completion_cb_t                      comp_cb_p,\r
-       IN                              ci_async_event_cb_t                     async_cb_p,\r
-       IN              const   void* const                                     ib_context);\r
-\r
-ib_api_status_t\r
-mlnx_hobs_get_context(\r
-       IN                              mlnx_hob_t                                      *hob_p,\r
-               OUT                     void                                            **context_p);\r
-\r
-ib_api_status_t\r
-mlnx_hobs_create_device(\r
-       IN                              mlnx_hob_t                                      *hob_p,\r
-               OUT                     char*                                           dev_name);\r
-\r
-void\r
-mlnx_hobs_remove(\r
-       IN                              mlnx_hob_t                                      *hob_p);\r
-\r
-ib_api_status_t\r
-mlnx_hobs_lookup(\r
-       IN                              HH_hca_hndl_t                           hndl,\r
-               OUT                     mlnx_hob_t                                      **hca_p);\r
-\r
-mlnx_hobul_t *\r
-mlnx_hobs_get_hobul(\r
-       IN                              mlnx_hob_t                                      *hob_p);\r
-\r
-ib_api_status_t\r
-mlnx_hobul_new(\r
-       IN                              mlnx_hob_t                                      *hob_p,\r
-       IN                              HH_hca_hndl_t                           hh_hndl,\r
-       IN                              void                                            *resources_p);\r
-\r
-void\r
-mlnx_hobul_get(\r
-       IN                              mlnx_hob_t                                      *hob_p,\r
-               OUT                     void                                            **resources_p );\r
-\r
-void\r
-mlnx_hobul_delete(\r
-       IN                              mlnx_hob_t                                      *hob_p);\r
-\r
-// Conversion Functions\r
-\r
-VAPI_mrw_acl_t\r
-map_ibal_acl(\r
-       IN                              ib_access_t                                     ibal_acl);\r
-\r
-ib_access_t\r
-map_vapi_acl(\r
-       IN                              VAPI_mrw_acl_t                          vapi_acl);\r
-\r
-ib_api_status_t\r
-mlnx_lock_region(\r
-       IN                              mlnx_mro_t                                      *mro_p,\r
-       IN                              boolean_t                                       um_call );\r
-\r
-ib_api_status_t\r
-mlnx_conv_ibal_mr_create(\r
-       IN                              u_int32_t                                       pd_idx,\r
-       IN      OUT                     mlnx_mro_t                                      *mro_p,\r
-       IN                              VAPI_mr_change_t                        change_flags,\r
-       IN                              ib_mr_create_t const            *p_mr_create,\r
-       IN                              boolean_t                                       um_call,\r
-               OUT                     HH_mr_t                                         *mr_props_p );\r
-\r
-ib_api_status_t\r
-mlnx_conv_ibal_pmr_create(\r
-       IN                              u_int32_t                                       pd_idx,\r
-       IN                              mlnx_mro_t                                      *mro_p,\r
-       IN                              ib_phys_create_t const          *p_pmr_create,\r
-               OUT                     HH_mr_t                                         *mr_props_p );\r
-\r
-void\r
-mlnx_conv_ibal_av(\r
-       IN                              HH_hca_hndl_t                           hh_hndl,\r
-       IN              const   ib_av_attr_t                            *ibal_av_p,\r
-               OUT                     VAPI_ud_av_t                            *vapi_av_p);\r
-\r
-void\r
-mlnx_conv_vapi_av(\r
-       IN                              HH_hca_hndl_t                           hh_hndl,\r
-       IN              const   VAPI_ud_av_t                            *vapi_av_p,\r
-               OUT                     ib_av_attr_t                            *ibal_av_p);\r
-\r
-int\r
-mlnx_map_vapi_cqe_status(\r
-       IN                              VAPI_wc_status_t                        vapi_status);\r
-\r
-int\r
-mlnx_map_vapi_cqe_type(\r
-       IN                              VAPI_cqe_opcode_t                       opcode);\r
-\r
-int\r
-mlnx_map_vapi_rna_type(\r
-       IN                              VAPI_remote_node_addr_type_t    rna);\r
-\r
-void\r
-mlnx_conv_vapi_mr_attr(\r
-       IN                              ib_pd_handle_t                          pd_h,\r
-       IN                              HH_mr_info_t                            *mr_info_p,\r
-               OUT                     ib_mr_attr_t                            *mr_query_p);\r
-\r
-void\r
-mlnx_conv_bind_req(\r
-       IN                              HHUL_qp_hndl_t                          hhul_qp_hndl,\r
-       IN                              ib_bind_wr_t* const                     p_mw_bind,\r
-               OUT                     HHUL_mw_bind_t                          *bind_prop_p);\r
-\r
-int\r
-mlnx_map_ibal_qp_type(\r
-       IN                              ib_qp_type_t                            ibal_qpt,\r
-               OUT                     VAPI_special_qp_t                       *vapi_qp_type_p);\r
-\r
-void\r
-mlnx_conv_qp_create_attr(\r
-       IN              const   ib_qp_create_t                          *create_attr_p,\r
-       IN                              HHUL_qp_init_attr_t                     *init_attr_p,\r
-               OUT                     VAPI_special_qp_t                       *vapi_qp_type_p);\r
-\r
-void\r
-mlnx_conv_vapi_qp_attr(\r
-       IN                              HH_hca_hndl_t                           hh_hndl,\r
-       IN                              VAPI_qp_attr_t                          *hh_qp_attr_p,\r
-               OUT                     ib_qp_attr_t                            *qp_attr_p);\r
-\r
-ib_api_status_t\r
-mlnx_conv_qp_modify_attr(\r
-       IN                              HH_hca_hndl_t                           hh_hndl,\r
-       IN                              ib_qp_type_t                            qp_type,\r
-       IN              const   ib_qp_mod_t                                     *modify_attr_p,\r
-               OUT                     VAPI_qp_attr_t                          *qp_attr_p, \r
-               OUT                     VAPI_qp_attr_mask_t                     *attr_mask_p);\r
-\r
-ib_api_status_t\r
-mlnx_conv_send_desc(\r
-       IN                              IB_ts_t                                         transport,\r
-       IN              const   ib_send_wr_t                            *ibal_send_wqe_p,\r
-               OUT                     VAPI_sr_desc_t                          *vapi_send_desc_p);\r
-\r
-ib_api_status_t\r
-mlnx_conv_recv_desc(\r
-       IN              const   ib_recv_wr_t                            *ibal_recv_wqe_p,\r
-               OUT                     VAPI_rr_desc_t                          *vapi_recv_desc_p);\r
-\r
-void\r
-mlnx_conv_vapi_hca_cap(\r
-       IN                              HH_hca_dev_t                            *hca_info_p,\r
-       IN                              VAPI_hca_cap_t                          *vapi_hca_cap_p,\r
-       IN                              VAPI_hca_port_t                         *vapi_hca_ports,\r
-               OUT                     ib_ca_attr_t                            *ca_attr_p);\r
-\r
-ib_api_status_t\r
-mlnx_get_hca_pkey_tbl(\r
-       IN                              HH_hca_hndl_t                           hh_hndl,\r
-       IN                              u_int8_t                                        port_num,\r
-       IN                              u_int16_t                                       num_entries,\r
-               OUT                     void*                                           table);\r
-\r
-ib_api_status_t\r
-mlnx_get_hca_gid_tbl(\r
-       IN                              HH_hca_hndl_t                           hh_hndl,\r
-       IN                              u_int8_t                                        port_num,\r
-       IN                              u_int16_t                                       num_entries,\r
-               OUT                     void*                                           table);\r
-\r
-ib_api_status_t\r
-mlnx_local_mad (\r
-       IN              const   ib_ca_handle_t                          h_ca,\r
-       IN              const   uint8_t                                         port_num,\r
-       IN              const   ib_av_attr_t                                    *p_av_src_attr, \r
-       IN              const   ib_mad_t                                        *p_mad_in,\r
-               OUT                     ib_mad_t                                        *p_mad_out );\r
-\r
-void\r
-mlnx_memory_if(\r
-       IN      OUT                     ci_interface_t                          *p_interface );\r
-\r
-void\r
-mlnx_ecc_if(\r
-       IN      OUT                     ci_interface_t                          *p_interface );\r
-\r
-void\r
-mlnx_direct_if(\r
-       IN      OUT                     ci_interface_t                          *p_interface );\r
-\r
-void\r
-mlnx_mcast_if(\r
-       IN      OUT                     ci_interface_t                          *p_interface );\r
-\r
-ib_api_status_t\r
-fw_access_ctrl(\r
-       IN              const   void* __ptr64                           context,\r
-       IN              const   void* __ptr64* const            handle_array    OPTIONAL,\r
-       IN                              uint32_t                                        num_handles,\r
-       IN                              ib_ci_op_t* const                       p_ci_op,\r
-       IN      OUT                     ci_umv_buf_t                            *p_umv_buf              OPTIONAL);\r
-\r
-#endif\r
index ea46a54..d3f5a12 100644 (file)
@@ -1,67 +1 @@
-/*\r
- * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.\r
- *\r
- * This software is available to you under the OpenIB.org BSD license\r
- * below:\r
- *\r
- *     Redistribution and use in source and binary forms, with or\r
- *     without modification, are permitted provided that the following\r
- *     conditions are met:\r
- *\r
- *      - Redistributions of source code must retain the above\r
- *        copyright notice, this list of conditions and the following\r
- *        disclaimer.\r
- *\r
- *      - Redistributions in binary form must reproduce the above\r
- *        copyright notice, this list of conditions and the following\r
- *        disclaimer in the documentation and/or other materials\r
- *        provided with the distribution.\r
- *\r
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
- * SOFTWARE.\r
- *\r
- * $Id$\r
- */\r
-\r
-\r
-#if !defined( _HCA_DEBUG_H_ )\r
-#define _HCA_DEBUG_H_\r
-\r
-#include <complib/cl_debug.h>\r
-\r
-\r
-#define HCA_DBG_DEV                    (1 << 0)\r
-#define HCA_DBG_PNP                    (1 << 1)\r
-#define HCA_DBG_PO                     (1 << 2)\r
-\r
-#define HCA_DBG_ERROR          CL_DBG_ERROR\r
-#define HCA_DBG_FULL           CL_DBG_ALL\r
-\r
-\r
-extern uint32_t                g_mlnx_dbg_lvl;\r
-\r
-\r
-#define HCA_ENTER( msg_lvl )                   \\r
-       CL_ENTER( msg_lvl, g_mlnx_dbg_lvl )\r
-\r
-#define HCA_EXIT( msg_lvl )                            \\r
-       CL_EXIT( msg_lvl, g_mlnx_dbg_lvl )\r
-\r
-#define HCA_TRACE( msg_lvl, msg )              \\r
-       CL_TRACE( msg_lvl, g_mlnx_dbg_lvl, msg )\r
-\r
-#define HCA_TRACE_EXIT( msg_lvl, msg ) \\r
-       CL_TRACE_EXIT( msg_lvl, g_mlnx_dbg_lvl, msg )\r
-\r
-#define HCA_PRINT( msg_lvl, msg )              \\r
-       CL_PRINT( msg_lvl, g_mlnx_dbg_lvl, msg )\r
-\r
-#endif /* !defined( _HCA_DEBUG_H_ ) */\r
-\r
 \r
index 7bbdc3b..e69de29 100644 (file)
@@ -1,598 +0,0 @@
-/*\r
- * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.\r
- * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. \r
- *\r
- * This software is available to you under the OpenIB.org BSD license\r
- * below:\r
- *\r
- *     Redistribution and use in source and binary forms, with or\r
- *     without modification, are permitted provided that the following\r
- *     conditions are met:\r
- *\r
- *      - Redistributions of source code must retain the above\r
- *        copyright notice, this list of conditions and the following\r
- *        disclaimer.\r
- *\r
- *      - Redistributions in binary form must reproduce the above\r
- *        copyright notice, this list of conditions and the following\r
- *        disclaimer in the documentation and/or other materials\r
- *        provided with the distribution.\r
- *\r
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
- * SOFTWARE.\r
- *\r
- * $Id$\r
- */\r
-\r
-\r
-#include "hca_data.h"\r
-\r
-\r
-/* Controls whether to use the VAPI entrypoints in THH, or the IBAL native ones. */\r
-#define MLNX_SEND_NATIVE       1\r
-#define MLNX_RECV_NATIVE       1\r
-#define MLNX_POLL_NATIVE       1\r
-\r
-\r
-/*\r
-* Work Request Processing Verbs.\r
-*/\r
-ib_api_status_t\r
-mlnx_post_send (\r
-       IN      const   ib_qp_handle_t                                  h_qp,\r
-       IN                      ib_send_wr_t                                    *p_send_wr,\r
-               OUT             ib_send_wr_t                                    **pp_failed )\r
-{\r
-       ib_api_status_t         status = IB_SUCCESS;\r
-       qp_info_t                       *qp_info_p = (qp_info_t *)h_qp;\r
-       u_int32_t                       qp_idx  = 0;\r
-       mlnx_hobul_t            *hobul_p;\r
-#if !MLNX_SEND_NATIVE\r
-       HH_ret_t                        ret;\r
-       VAPI_sr_desc_t          send_desc;\r
-       VAPI_special_qp_t       vapi_qp_type;\r
-       IB_ts_t                         transport;\r
-       ib_send_wr_t            *wqe_p;\r
-#endif\r
-\r
-       // CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("hca %x qp %x\n", qp_info_p->hca_idx, qp_info_p->qp_num));\r
-       if( !p_send_wr )\r
-       {\r
-               status = IB_INVALID_PARAMETER;\r
-               goto cleanup;\r
-       }\r
-\r
-       if (!qp_info_p || E_MARK_QP != qp_info_p->mark) {\r
-               status = IB_INVALID_QP_HANDLE;\r
-               goto cleanup;\r
-       }\r
-\r
-       VALIDATE_INDEX(qp_info_p->hca_idx, MLNX_MAX_HCA, IB_INVALID_QP_HANDLE, cleanup);\r
-       hobul_p = mlnx_hobul_array[qp_info_p->hca_idx];\r
-\r
-       CL_ASSERT(hobul_p);\r
-       CL_ASSERT(hobul_p->qp_info_tbl);\r
-\r
-       qp_idx = qp_info_p->qp_num & hobul_p->qp_idx_mask;\r
-       VALIDATE_INDEX(qp_idx, hobul_p->max_qp, IB_INVALID_QP_HANDLE, cleanup);\r
-\r
-#if MLNX_SEND_NATIVE\r
-       return THHUL_qpm_post_send_wrs( hobul_p->hhul_hndl,\r
-               qp_info_p->hhul_qp_hndl, p_send_wr, pp_failed );\r
-#else\r
-       // Assuming that posting all WQE will succeed. Errors are handled below.\r
-       *pp_failed = NULL;\r
-\r
-       // Loop and post all descriptors in list, bail out on failure.\r
-       transport = mlnx_map_ibal_qp_type(qp_info_p->qp_type, &vapi_qp_type);\r
-\r
-       if (VAPI_REGULAR_QP != vapi_qp_type)\r
-       {\r
-               memset(&send_desc, 0, sizeof(send_desc));\r
-               // send_desc.sg_lst_p = &sg_lst_a[0];\r
-               send_desc.sg_lst_p = hobul_p->qp_info_tbl[qp_idx].send_sge_p;\r
-               if (!send_desc.sg_lst_p) {\r
-                       CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("hca %x qp %x\n", qp_info_p->hca_idx, qp_idx));\r
-               }\r
-               CL_ASSERT(send_desc.sg_lst_p);\r
-               for (wqe_p = p_send_wr; wqe_p; wqe_p = wqe_p->p_next)\r
-               {\r
-                       // sq_size is a misnomer, it is really max_sge\r
-                       CL_ASSERT( hobul_p->qp_info_tbl[qp_idx].sq_size >= wqe_p->num_ds);\r
-\r
-                       status = mlnx_conv_send_desc( transport, wqe_p, &send_desc);\r
-                       if (IB_SUCCESS != status) break;\r
-\r
-                       if (HH_OK != (ret = THHUL_qpm_post_send_req(hobul_p->hhul_hndl,\r
-                               qp_info_p->hhul_qp_hndl,\r
-                               &send_desc)))\r
-                       {\r
-                               status = (HH_EAGAIN        == ret) ? IB_INSUFFICIENT_RESOURCES :\r
-                       (HH_EINVAL_SG_NUM == ret) ? IB_INVALID_MAX_SGE : \r
-                       IB_ERROR;\r
-                       *pp_failed    =  wqe_p;\r
-                       // wqe_p->p_next = NULL;\r
-                       goto cleanup;\r
-                       }\r
-               }\r
-       }\r
-       else {\r
-               // For regular QP use real send multiple\r
-               VAPI_sr_desc_t      desc_list[MLNX_MAX_WRS_PER_CHAIN];\r
-               VAPI_sg_lst_entry_t sg_list[MLNX_MAX_WRS_PER_CHAIN][MLNX_MAX_NUM_SGE];\r
-               u_int32_t           num_wrs;\r
-\r
-               wqe_p = p_send_wr;\r
-               while (wqe_p) {\r
-                       for (num_wrs = 0; (num_wrs < MLNX_MAX_WRS_PER_CHAIN) && wqe_p; \r
-                               wqe_p = wqe_p->p_next, num_wrs++)\r
-                       {\r
-                               desc_list[num_wrs].sg_lst_p = &sg_list[num_wrs][0];\r
-                               status = mlnx_conv_send_desc(transport, wqe_p, &desc_list[num_wrs]);\r
-                               if (status != IB_SUCCESS) {\r
-                                       CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl,\r
-                                               ("FAILED to map the send_desc %d\n", num_wrs));\r
-                                       break;\r
-                               }\r
-                       }\r
-                       if (num_wrs > 0) {\r
-                               if (num_wrs > 1) {\r
-                                       ret = THHUL_qpm_post_send_reqs(hobul_p->hhul_hndl,\r
-                                               qp_info_p->hhul_qp_hndl,\r
-                                               num_wrs, desc_list);\r
-                               } else {\r
-                                       ret = THHUL_qpm_post_send_req(hobul_p->hhul_hndl,\r
-                                               qp_info_p->hhul_qp_hndl,\r
-                                               desc_list);\r
-                               }\r
-                               if (HH_OK != ret) {\r
-                                       status = (HH_EAGAIN        == ret) ? IB_INSUFFICIENT_RESOURCES :\r
-                               (HH_EINVAL_SG_NUM == ret) ? IB_INVALID_MAX_SGE : \r
-                               IB_ERROR;\r
-                               *pp_failed    =  wqe_p;\r
-                               // wqe_p->p_next = NULL;\r
-                               goto cleanup;\r
-                               }\r
-                       } else {\r
-                               /* no work requests this round */\r
-                               CL_TRACE (MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("NO WRs\n"));\r
-                               *pp_failed = wqe_p;\r
-                               break;\r
-                       }\r
-               }\r
-       }\r
-\r
-       return status;\r
-#endif\r
-\r
-cleanup:\r
-       CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status));\r
-       return status;\r
-}\r
-\r
-ib_api_status_t \r
-mlnx_post_recv (\r
-       IN              const   ib_qp_handle_t                          h_qp,\r
-       IN                              ib_recv_wr_t                            *p_recv_wr,\r
-               OUT                     ib_recv_wr_t                            **pp_failed OPTIONAL )\r
-{\r
-       ib_api_status_t         status = IB_SUCCESS;\r
-       qp_info_t                       *qp_info_p = (qp_info_t *)h_qp;\r
-\r
-       u_int32_t                       qp_idx  = 0;\r
-       mlnx_hobul_t            *hobul_p;\r
-#if !MLNX_RECV_NATIVE\r
-       HH_ret_t                        ret;\r
-       ib_recv_wr_t            *wqe_p;\r
-       IB_ts_t                         transport;\r
-       VAPI_rr_desc_t          recv_desc;\r
-       VAPI_special_qp_t       vapi_qp_type;\r
-#endif\r
-\r
-       // CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("hca %x qp %x\n",\r
-       //                                           qp_info_p->hca_idx, qp_info_p->qp_num));\r
-       if( !p_recv_wr )\r
-       {\r
-               status = IB_INVALID_PARAMETER;\r
-               goto cleanup;\r
-       }\r
-\r
-       if (!qp_info_p || E_MARK_QP != qp_info_p->mark) {\r
-               status = IB_INVALID_QP_HANDLE;\r
-               goto cleanup;\r
-       }\r
-\r
-       VALIDATE_INDEX(qp_info_p->hca_idx, MLNX_MAX_HCA, IB_INVALID_QP_HANDLE, cleanup);\r
-       hobul_p = mlnx_hobul_array[qp_info_p->hca_idx];\r
-\r
-       CL_ASSERT(hobul_p);\r
-       CL_ASSERT(hobul_p->qp_info_tbl);\r
-\r
-       qp_idx = qp_info_p->qp_num & hobul_p->qp_idx_mask;\r
-       VALIDATE_INDEX(qp_idx, hobul_p->max_qp, IB_INVALID_QP_HANDLE, cleanup);\r
-\r
-#if MLNX_RECV_NATIVE\r
-       return THHUL_qpm_post_recv_wrs( hobul_p->hhul_hndl, qp_info_p->hhul_qp_hndl,\r
-               p_recv_wr, pp_failed );\r
-#else\r
-       // Assuming that posting all WQE will succeed. Errors are handled below.\r
-       *pp_failed = NULL;\r
-\r
-       // Loop and post all descriptors in list, bail out on failure.\r
-       transport = mlnx_map_ibal_qp_type(qp_info_p->qp_type, &vapi_qp_type);\r
-\r
-       if (VAPI_REGULAR_QP != vapi_qp_type)\r
-       {\r
-               memset(&recv_desc, 0, sizeof(recv_desc));\r
-               recv_desc.sg_lst_p = hobul_p->qp_info_tbl[qp_idx].recv_sge_p;\r
-               for (wqe_p = p_recv_wr; wqe_p; wqe_p = wqe_p->p_next)\r
-               {\r
-                       // rq_size is a misnomer, it is really max_sge\r
-                       CL_ASSERT( hobul_p->qp_info_tbl[qp_idx].rq_size >= wqe_p->num_ds);\r
-\r
-                       mlnx_conv_recv_desc(wqe_p, &recv_desc);\r
-                       if (HH_OK != (ret = THHUL_qpm_post_recv_req(hobul_p->hhul_hndl, qp_info_p->hhul_qp_hndl, &recv_desc)))\r
-                       {\r
-                               status = (HH_EAGAIN == ret)        ? IB_INSUFFICIENT_RESOURCES :\r
-                       (HH_EINVAL_SG_NUM == ret) ? IB_INVALID_MAX_SGE : \r
-                       IB_ERROR;\r
-\r
-                       *pp_failed    =  wqe_p;\r
-                       // wqe_p->p_next = NULL;\r
-                       goto cleanup;\r
-                       }\r
-               }\r
-       }\r
-       else {\r
-               // For regular QP use real send multiple\r
-               VAPI_rr_desc_t      desc_list[MLNX_MAX_WRS_PER_CHAIN];\r
-               VAPI_sg_lst_entry_t sg_list[MLNX_MAX_WRS_PER_CHAIN][MLNX_MAX_NUM_SGE];\r
-               u_int32_t           num_wrs;\r
-\r
-               wqe_p = p_recv_wr;\r
-               while (wqe_p) {\r
-                       for (num_wrs = 0; (num_wrs < MLNX_MAX_WRS_PER_CHAIN) && wqe_p; \r
-                               wqe_p = wqe_p->p_next, num_wrs++)\r
-                       {\r
-                               desc_list [num_wrs].sg_lst_p = &sg_list [num_wrs][0];\r
-                               status = mlnx_conv_recv_desc(wqe_p, &desc_list[num_wrs]);\r
-                               if (status != IB_SUCCESS) {\r
-                                       CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl,\r
-                                               ("FAILED to map the recv_desc %d\n", num_wrs));\r
-                                       break;\r
-                               }\r
-                       }\r
-                       // CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("num_wrs %d\n", num_wrs));\r
-                       if (num_wrs > 0) {\r
-                               if (num_wrs > 1) {\r
-                                       ret = THHUL_qpm_post_recv_reqs (hobul_p->hhul_hndl,\r
-                                               qp_info_p->hhul_qp_hndl,\r
-                                               num_wrs, desc_list);\r
-                               } else {\r
-                                       ret = THHUL_qpm_post_recv_req (hobul_p->hhul_hndl,\r
-                                               qp_info_p->hhul_qp_hndl,\r
-                                               desc_list);\r
-                               }\r
-                               if (HH_OK != ret) {\r
-                                       status = (HH_EAGAIN        == ret) ? IB_INSUFFICIENT_RESOURCES :\r
-                               (HH_EINVAL_SG_NUM == ret) ? IB_INVALID_MAX_SGE : \r
-                               IB_ERROR;\r
-                               *pp_failed    =  wqe_p;\r
-                               // wqe_p->p_next = NULL;\r
-                               goto cleanup;\r
-                               }\r
-                       } else {\r
-                               /* no work requests this round */\r
-                               CL_TRACE (MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("NO WRs\n"));\r
-                               *pp_failed = wqe_p;\r
-                               break;\r
-                       }\r
-               }\r
-       }\r
-\r
-       return status;\r
-#endif\r
-\r
-cleanup:\r
-       CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status));\r
-       return status;\r
-}\r
-\r
-/*\r
-* Completion Processing and Completion Notification Request Verbs.\r
-*/\r
-\r
-ib_api_status_t\r
-mlnx_peek_cq(\r
-       IN              const   ib_cq_handle_t                          h_cq,\r
-       OUT                             uint32_t* const                         p_n_cqes )\r
-{\r
-       ib_api_status_t                 status = IB_UNKNOWN_ERROR;\r
-\r
-       u_int32_t                               hca_idx = CQ_HCA_FROM_HNDL(h_cq);\r
-       u_int32_t                               cq_num  = CQ_NUM_FROM_HNDL(h_cq);\r
-       u_int32_t                               cq_idx;\r
-       mlnx_hobul_t                    *hobul_p;\r
-       HHUL_cq_hndl_t                  hhul_cq_hndl;\r
-\r
-       VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_CQ_HANDLE, cleanup);\r
-       hobul_p = mlnx_hobul_array[hca_idx];\r
-       if (NULL == hobul_p || NULL == hobul_p->cq_info_tbl || NULL == hobul_p->pd_info_tbl) {\r
-               status =  IB_INVALID_CQ_HANDLE;\r
-               goto cleanup;\r
-       }\r
-\r
-       cq_idx = cq_num & hobul_p->cq_idx_mask;\r
-       VALIDATE_INDEX(cq_idx, hobul_p->max_cq, IB_INVALID_CQ_HANDLE, cleanup);\r
-       if (hobul_p->cq_info_tbl[cq_idx].cq_num != cq_num ||\r
-               E_MARK_CQ != hobul_p->cq_info_tbl[cq_idx].mark)\r
-       {\r
-               status =  IB_INVALID_CQ_HANDLE;\r
-               goto cleanup;\r
-       }\r
-\r
-       hhul_cq_hndl = hobul_p->cq_info_tbl[cq_idx].hhul_cq_hndl;\r
-\r
-       status = THHUL_cqm_count_cqe( \r
-               hobul_p->hhul_hndl, hhul_cq_hndl, p_n_cqes );\r
-       if( status != IB_SUCCESS )\r
-               goto cleanup;\r
-\r
-       return IB_SUCCESS;\r
-\r
-cleanup:\r
-       CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status));\r
-       return status;\r
-}\r
-\r
-ib_api_status_t\r
-mlnx_poll_cq (\r
-       IN              const   ib_cq_handle_t                          h_cq,\r
-       IN      OUT                     ib_wc_t** const                         pp_free_wclist,\r
-               OUT                     ib_wc_t** const                         pp_done_wclist )\r
-{\r
-       ib_api_status_t         status = IB_UNKNOWN_ERROR;\r
-\r
-       u_int32_t                       hca_idx = CQ_HCA_FROM_HNDL(h_cq);\r
-       u_int32_t                       cq_num  = CQ_NUM_FROM_HNDL(h_cq);\r
-       u_int32_t                       cq_idx;\r
-       mlnx_hobul_t            *hobul_p;\r
-       HHUL_cq_hndl_t          hhul_cq_hndl;\r
-#if !MLNX_POLL_NATIVE\r
-       HH_ret_t                        ret;\r
-       VAPI_wc_desc_t          comp_desc;\r
-       ib_wc_t                         *wc_p;\r
-#endif\r
-\r
-       if (!pp_free_wclist || !pp_done_wclist || !*pp_free_wclist) {\r
-               status = IB_INVALID_PARAMETER;\r
-               goto cleanup;\r
-       }\r
-\r
-       VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_CQ_HANDLE, cleanup);\r
-       hobul_p = mlnx_hobul_array[hca_idx];\r
-       if (NULL == hobul_p || NULL == hobul_p->cq_info_tbl || NULL == hobul_p->pd_info_tbl) {\r
-               status =  IB_INVALID_CQ_HANDLE;\r
-               goto cleanup;\r
-       }\r
-\r
-       cq_idx = cq_num & hobul_p->cq_idx_mask;\r
-       VALIDATE_INDEX(cq_idx, hobul_p->max_cq, IB_INVALID_CQ_HANDLE, cleanup);\r
-       if (hobul_p->cq_info_tbl[cq_idx].cq_num != cq_num ||\r
-               E_MARK_CQ != hobul_p->cq_info_tbl[cq_idx].mark) {\r
-                       status =  IB_INVALID_CQ_HANDLE;\r
-                       goto cleanup;\r
-               }\r
-\r
-       hhul_cq_hndl = hobul_p->cq_info_tbl[cq_idx].hhul_cq_hndl;\r
-\r
-       return THHUL_cqm_poll4wc(hobul_p->hhul_hndl, hhul_cq_hndl,\r
-               pp_free_wclist, pp_done_wclist );\r
-\r
-cleanup:\r
-       CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status));\r
-       return status;\r
-}\r
-\r
-ib_api_status_t\r
-mlnx_enable_cq_notify (\r
-       IN              const   ib_cq_handle_t                          h_cq,\r
-       IN              const   boolean_t                                       solicited )\r
-{\r
-       ib_api_status_t                 status = IB_UNKNOWN_ERROR;\r
-\r
-       u_int32_t                               hca_idx = CQ_HCA_FROM_HNDL(h_cq);\r
-       u_int32_t                               cq_num  = CQ_NUM_FROM_HNDL(h_cq);\r
-       u_int32_t                               cq_idx;\r
-       mlnx_hobul_t                    *hobul_p;\r
-       HHUL_cq_hndl_t                  hhul_cq_hndl;\r
-       VAPI_cq_notif_type_t    hh_request;\r
-\r
-       hh_request = (solicited) ?\r
-               VAPI_SOLIC_COMP: /* Notify on solicited completion event only */\r
-               VAPI_NEXT_COMP;  /* Notify on next completion */\r
-\r
-       VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_CQ_HANDLE, cleanup);\r
-       hobul_p = mlnx_hobul_array[hca_idx];\r
-       if (NULL == hobul_p || NULL == hobul_p->cq_info_tbl || NULL == hobul_p->pd_info_tbl) {\r
-               status =  IB_INVALID_CQ_HANDLE;\r
-               goto cleanup;\r
-       }\r
-\r
-       cq_idx = cq_num & hobul_p->cq_idx_mask;\r
-       VALIDATE_INDEX(cq_idx, hobul_p->max_cq, IB_INVALID_CQ_HANDLE, cleanup);\r
-       if (hobul_p->cq_info_tbl[cq_idx].cq_num != cq_num ||\r
-               E_MARK_CQ != hobul_p->cq_info_tbl[cq_idx].mark)\r
-       {\r
-               status =  IB_INVALID_CQ_HANDLE;\r
-               goto cleanup;\r
-       }\r
-\r
-       hhul_cq_hndl = hobul_p->cq_info_tbl[cq_idx].hhul_cq_hndl;\r
-\r
-       if (HH_OK != THHUL_cqm_req_comp_notif(hobul_p->hhul_hndl, hhul_cq_hndl, hh_request))\r
-       {\r
-               status = IB_ERROR;\r
-               goto cleanup;\r
-       }\r
-\r
-       return IB_SUCCESS;\r
-\r
-cleanup:\r
-       CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status));\r
-       return status;\r
-}\r
-\r
-ib_api_status_t\r
-mlnx_enable_ncomp_cq_notify (\r
-       IN              const   ib_cq_handle_t                          h_cq,\r
-       IN              const   uint32_t                                        n_cqes )\r
-{\r
-       ib_api_status_t                 status = IB_UNKNOWN_ERROR;\r
-\r
-       u_int32_t                               hca_idx = CQ_HCA_FROM_HNDL(h_cq);\r
-       u_int32_t                               cq_num  = CQ_NUM_FROM_HNDL(h_cq);\r
-       u_int32_t                               cq_idx;\r
-       mlnx_hobul_t                    *hobul_p;\r
-       HHUL_cq_hndl_t                  hhul_cq_hndl;\r
-\r
-       VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_CQ_HANDLE, cleanup);\r
-       hobul_p = mlnx_hobul_array[hca_idx];\r
-       if (NULL == hobul_p || NULL == hobul_p->cq_info_tbl || NULL == hobul_p->pd_info_tbl) {\r
-               status =  IB_INVALID_CQ_HANDLE;\r
-               goto cleanup;\r
-       }\r
-\r
-       cq_idx = cq_num & hobul_p->cq_idx_mask;\r
-       VALIDATE_INDEX(cq_idx, hobul_p->max_cq, IB_INVALID_CQ_HANDLE, cleanup);\r
-       if (hobul_p->cq_info_tbl[cq_idx].cq_num != cq_num ||\r
-               E_MARK_CQ != hobul_p->cq_info_tbl[cq_idx].mark)\r
-       {\r
-               status =  IB_INVALID_CQ_HANDLE;\r
-               goto cleanup;\r
-       }\r
-\r
-       hhul_cq_hndl = hobul_p->cq_info_tbl[cq_idx].hhul_cq_hndl;\r
-\r
-       if (HH_OK != THHUL_cqm_req_ncomp_notif(\r
-               hobul_p->hhul_hndl, hhul_cq_hndl, n_cqes ))\r
-       {\r
-               status = IB_ERROR;\r
-               goto cleanup;\r
-       }\r
-\r
-       return IB_SUCCESS;\r
-\r
-cleanup:\r
-       CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status));\r
-       return status;\r
-}\r
-\r
-ib_api_status_t\r
-mlnx_bind_mw (\r
-       IN              const   ib_mw_handle_t                          h_mw,\r
-       IN              const   ib_qp_handle_t                          h_qp,\r
-       IN                              ib_bind_wr_t* const                     p_mw_bind,\r
-               OUT                     net32_t* const                          p_rkey )\r
-{\r
-       ib_api_status_t         status = IB_UNKNOWN_ERROR;\r
-\r
-       u_int32_t                       hca_idx = QP_HCA_FROM_HNDL(h_qp);\r
-       u_int32_t                       qp_num  = QP_NUM_FROM_HNDL(h_qp);\r
-       u_int32_t                       qp_idx  = 0;\r
-       u_int32_t                       new_key;\r
-       mlnx_hobul_t            *hobul_p;\r
-       mlnx_mwo_t                      *mwo_p;\r
-       HHUL_qp_hndl_t          hhul_qp_hndl;\r
-       HHUL_mw_bind_t          bind_props;\r
-\r
-       // CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
-       mwo_p = (mlnx_mwo_t *)h_mw;\r
-       if (!mwo_p || mwo_p->mark != E_MARK_MW) {\r
-               status = IB_INVALID_MW_HANDLE;\r
-               goto cleanup;\r
-       }\r
-\r
-       if (!p_rkey) {\r
-               status = IB_INVALID_PARAMETER;\r
-               goto cleanup;\r
-       }\r
-\r
-       VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_QP_HANDLE, cleanup);\r
-       hobul_p = mlnx_hobul_array[hca_idx];\r
-       if (NULL == hobul_p || NULL == hobul_p->qp_info_tbl || NULL == hobul_p->pd_info_tbl) {\r
-               status =  IB_INVALID_QP_HANDLE;\r
-               goto cleanup;\r
-       }\r
-\r
-       qp_idx = qp_num & hobul_p->qp_idx_mask;\r
-       // CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("line %d - qp_idx 0x%x\n", __LINE__, qp_idx));\r
-\r
-       VALIDATE_INDEX(qp_idx, hobul_p->max_qp, IB_INVALID_QP_HANDLE, cleanup);\r
-\r
-#if 0\r
-       CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("line %d - qp_num 0x%x valid %d\n",\r
-               __LINE__,\r
-               hobul_p->qp_info_tbl[qp_idx].qp_num,\r
-               E_MARK_QP == hobul_p->qp_info_tbl[qp_idx].mark));\r
-#endif\r
-       if (hobul_p->qp_info_tbl[qp_idx].qp_num != qp_num ||\r
-               E_MARK_QP != hobul_p->qp_info_tbl[qp_idx].mark) {\r
-                       status =  IB_INVALID_QP_HANDLE;\r
-                       goto cleanup;\r
-               }\r
-\r
-       /* Trap the RKEY passed in not matching. */\r
-       if ( cl_ntoh32( p_mw_bind->current_rkey ) != mwo_p->mw_rkey ) {\r
-               status = IB_INVALID_PARAMETER;\r
-               goto cleanup;\r
-       }\r
-\r
-               hhul_qp_hndl = hobul_p->qp_info_tbl[qp_idx].hhul_qp_hndl;\r
-\r
-               mlnx_conv_bind_req(hobul_p->qp_info_tbl[qp_idx].hhul_qp_hndl, p_mw_bind, &bind_props);\r
-\r
-               // Binding a window to zero length is in fact an unbinding\r
-               // IF unbinding, window rkey remains the same.\r
-               // IF binding, new r_key tag is the previous tag incremented by 1:\r
-               new_key = mwo_p->mw_rkey;\r
-               if( bind_props.size > 0 ) {\r
-                       new_key += (1 << hobul_p->log2_mpt_size);\r
-               }\r
-\r
-               if (HH_OK != THHUL_qpm_post_bind_req(&bind_props, new_key))\r
-               {\r
-                       status = IB_ERROR;\r
-                       goto cleanup;\r
-               }\r
-\r
-               *p_rkey = cl_hton32( new_key );\r
-               // CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
-               return IB_SUCCESS;\r
-\r
-cleanup:\r
-               CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status));\r
-               // CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl);\r
-               return status;\r
-}\r
-\r
-void\r
-mlnx_direct_if(\r
-       IN      OUT                     ci_interface_t                          *p_interface )\r
-{\r
-       p_interface->post_send = mlnx_post_send;\r
-       p_interface->post_recv = mlnx_post_recv;\r
-\r
-       p_interface->enable_ncomp_cq_notify = mlnx_enable_ncomp_cq_notify;\r
-       p_interface->peek_cq = mlnx_peek_cq;\r
-       p_interface->poll_cq = mlnx_poll_cq;\r
-       p_interface->enable_cq_notify = mlnx_enable_cq_notify;\r
-\r
-       p_interface->bind_mw = mlnx_bind_mw;\r
-}\r
index c16d71e..e69de29 100644 (file)
-/*\r
- * Copyright (c) 2005 SilverStorm Technologies.  All rights reserved.\r
- *\r
- * This software is available to you under the OpenIB.org BSD license\r
- * below:\r
- *\r
- *     Redistribution and use in source and binary forms, with or\r
- *     without modification, are permitted provided that the following\r
- *     conditions are met:\r
- *\r
- *      - Redistributions of source code must retain the above\r
- *        copyright notice, this list of conditions and the following\r
- *        disclaimer.\r
- *\r
- *      - Redistributions in binary form must reproduce the above\r
- *        copyright notice, this list of conditions and the following\r
- *        disclaimer in the documentation and/or other materials\r
- *        provided with the distribution.\r
- *\r
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
- * SOFTWARE.\r
- *\r
- * $Id$\r
- */\r
-\r
-\r
-/*\r
- * Provides the driver entry points for the Tavor VPD.\r
- */\r
-\r
-\r
-#include "hca_driver.h"\r
-#include <wdmguid.h>\r
-#include <initguid.h>\r
-#pragma warning( push, 3 )\r
-#include "MdCard.h"\r
-#pragma warning( pop )\r
-#include <iba/ib_ci_ifc.h>\r
-#include <complib/cl_init.h>\r
-\r
-\r
-/*\r
- * UVP name does not include file extension.  For debug builds, UAL\r
- * will append "d.dll".  For release builds, UAL will append ".dll"\r
- */\r
-char                   mlnx_uvp_lib_name[MAX_LIB_NAME] = {"mt23108u"};\r
-\r
-\r
-NTSTATUS\r
-DriverEntry(\r
-       IN                              PDRIVER_OBJECT                          p_driver_obj,\r
-       IN                              PUNICODE_STRING                         p_registry_path );\r
-\r
-static NTSTATUS\r
-__read_registry(\r
-       IN                              UNICODE_STRING* const           p_Param_Path );\r
-\r
-static void\r
-hca_drv_unload(\r
-       IN                              PDRIVER_OBJECT                          p_driver_obj );\r
-\r
-static NTSTATUS\r
-hca_sysctl(\r
-       IN                              PDEVICE_OBJECT                          p_dev_obj,\r
-       IN                              PIRP                                            p_irp );\r
-\r
-static NTSTATUS\r
-hca_add_device(\r
-       IN                              PDRIVER_OBJECT                          p_driver_obj,\r
-       IN                              PDEVICE_OBJECT                          p_pdo );\r
-//\r
-//static NTSTATUS\r
-//hca_enable(\r
-//     IN                              DEVICE_OBJECT* const            p_dev_obj );\r
-//\r
-//static NTSTATUS\r
-//hca_disable(\r
-//     IN                              DEVICE_OBJECT* const            p_dev_obj );\r
-\r
-static NTSTATUS\r
-hca_start(\r
-       IN                              DEVICE_OBJECT* const            p_dev_obj,\r
-       IN                              IRP* const                                      p_irp, \r
-               OUT                     cl_irp_action_t* const          p_action );\r
-\r
-static void\r
-hca_release_resources(\r
-       IN                              DEVICE_OBJECT* const            p_dev_obj );\r
-\r
-//static NTSTATUS\r
-//hca_deactivate(\r
-//     IN                              DEVICE_OBJECT* const            p_dev_obj,\r
-//     IN                              IRP* const                                      p_irp, \r
-//             OUT                     cl_irp_action_t* const          p_action );\r
-//\r
-static NTSTATUS\r
-hca_query_bus_relations(\r
-       IN                              DEVICE_OBJECT* const            p_dev_obj,\r
-       IN                              IRP* const                                      p_irp, \r
-               OUT                     cl_irp_action_t* const          p_action );\r
-\r
-static NTSTATUS\r
-hca_set_power(\r
-       IN                              DEVICE_OBJECT* const            p_dev_obj,\r
-       IN                              IRP* const                                      p_irp, \r
-               OUT                     cl_irp_action_t* const          p_action );\r
-\r
-static ci_interface_t*\r
-__alloc_hca_ifc(\r
-       IN                              hca_dev_ext_t* const            p_ext );\r
-\r
-static NTSTATUS\r
-__get_ci_interface(\r
-       IN                                      DEVICE_OBJECT* const    p_dev_obj );\r
-\r
-static NTSTATUS\r
-__get_hca_handle(\r
-       IN                                      hca_dev_ext_t* const    p_ext );\r
-\r
-static NTSTATUS\r
-__hca_register(\r
-       IN                              DEVICE_OBJECT                           *p_dev_obj );\r
-\r
-//static void\r
-//__work_item_pnp_cb(\r
-//     IN                              DEVICE_OBJECT                           *p_dev_obj,\r
-//     IN                              hca_work_item_context_t         *p_context );\r
-\r
-static NTSTATUS\r
-__pnp_notify_target(\r
-       IN                              TARGET_DEVICE_REMOVAL_NOTIFICATION      *p_notify,\r
-       IN                              void                                            *context );\r
-\r
-static NTSTATUS\r
-__pnp_notify_ifc(\r
-       IN                              DEVICE_INTERFACE_CHANGE_NOTIFICATION    *p_notify,\r
-       IN                              void                                            *context );\r
-\r
-static NTSTATUS\r
-fw_access_pciconf (\r
-               IN              BUS_INTERFACE_STANDARD                  *p_BusInterface,\r
-               IN              ULONG                                                   op_flag,\r
-               IN              PVOID                                                   p_buffer,\r
-               IN              ULONG                                                   offset,\r
-               IN              ULONG POINTER_ALIGNMENT                 length );\r
-\r
-static NTSTATUS\r
-fw_get_pci_bus_interface(\r
-       IN              DEVICE_OBJECT                           *p_dev_obj,\r
-       OUT             BUS_INTERFACE_STANDARD          *p_BusInterface );\r
-\r
-static NTSTATUS\r
-fw_flash_write_data (\r
-               IN              BUS_INTERFACE_STANDARD                  *p_BusInterface,\r
-               IN              PVOID                                                   p_buffer,\r
-               IN              ULONG                                                   offset,\r
-               IN              ULONG POINTER_ALIGNMENT                 length );\r
-\r
-static NTSTATUS\r
-fw_flash_read_data (\r
-               IN              BUS_INTERFACE_STANDARD                  *p_BusInterface,\r
-               IN              PVOID                                                   p_buffer,\r
-               IN              ULONG                                                   offset,\r
-               IN              ULONG POINTER_ALIGNMENT                 length );\r
-\r
-static NTSTATUS\r
-fw_flash_get_ca_guid(\r
-       IN              DEVICE_OBJECT           *p_dev_obj,\r
-       OUT             uint64_t                        *ca_guid );\r
-\r
-static NTSTATUS\r
-fw_flash_read4( \r
-       IN                      BUS_INTERFACE_STANDARD  *p_BusInterface,\r
-       IN                      uint32_t                                addr, \r
-       IN      OUT             uint32_t                                *p_data);\r
-\r
-static NTSTATUS\r
-fw_flash_readbuf(\r
-       IN              BUS_INTERFACE_STANDARD  *p_BusInterface,\r
-       IN              uint32_t                                offset,\r
-       IN OUT  void                                    *p_data,\r
-       IN              uint32_t                                len);\r
-static NTSTATUS\r
-fw_set_bank(\r
-       IN              BUS_INTERFACE_STANDARD  *p_BusInterface,\r
-       IN              uint32_t                                bank );\r
-\r
-static NTSTATUS\r
-fw_flash_init(\r
-               IN              BUS_INTERFACE_STANDARD                  *p_BusInterface  );\r
-\r
-static NTSTATUS\r
-fw_flash_deinit(\r
-               IN              BUS_INTERFACE_STANDARD                  *p_BusInterface  );\r
-\r
-#ifdef ALLOC_PRAGMA\r
-#pragma alloc_text (INIT, DriverEntry)\r
-#pragma alloc_text (INIT, __read_registry)\r
-#pragma alloc_text (PAGE, hca_drv_unload)\r
-#pragma alloc_text (PAGE, hca_sysctl)\r
-#pragma alloc_text (PAGE, hca_add_device)\r
-#pragma alloc_text (PAGE, hca_start)\r
-//#pragma alloc_text (PAGE, hca_deactivate)\r
-//#pragma alloc_text (PAGE, hca_enable)\r
-//#pragma alloc_text (PAGE, hca_disable)\r
-#pragma alloc_text (PAGE, hca_release_resources)\r
-#pragma alloc_text (PAGE, hca_query_bus_relations)\r
-#pragma alloc_text (PAGE, hca_set_power)\r
-#pragma alloc_text (PAGE, __alloc_hca_ifc)\r
-#pragma alloc_text (PAGE, __get_ci_interface)\r
-#pragma alloc_text (PAGE, __get_hca_handle)\r
-#pragma alloc_text (PAGE, __hca_register)\r
-//#pragma alloc_text (PAGE, __work_item_pnp_cb)\r
-#pragma alloc_text (PAGE, __pnp_notify_target)\r
-#pragma alloc_text (PAGE, __pnp_notify_ifc)\r
-#pragma alloc_text (PAGE, fw_flash_get_ca_guid)\r
-#endif\r
-\r
-\r
-static const cl_vfptr_pnp_po_t hca_vfptr_pnp = {\r
-       "Tavor HCA VPD",\r
-       hca_start,                              /* StartDevice */\r
-       cl_irp_skip,\r
-       cl_irp_skip,\r
-       cl_do_sync_pnp,\r
-       cl_irp_skip,                    /* QueryRemove */\r
-       hca_release_resources,\r
-       cl_do_remove,                   /* Remove */\r
-       cl_irp_skip,                    /* CancelRemove */\r
-       cl_irp_skip,                    /* SurpriseRemove */\r
-       cl_irp_skip,            \r
-       cl_irp_skip,\r
-       cl_irp_skip,\r
-       cl_do_sync_pnp,\r
-       hca_query_bus_relations,\r
-       cl_irp_ignore,\r
-       cl_irp_skip,\r
-       cl_irp_ignore,\r
-       cl_irp_ignore,\r
-       cl_irp_ignore,\r
-       cl_irp_ignore,\r
-       cl_irp_ignore,\r
-       cl_irp_ignore,\r
-       cl_irp_ignore,\r
-       cl_irp_ignore,\r
-       cl_irp_ignore,\r
-       cl_irp_ignore,\r
-       cl_irp_ignore,                  /* QueryPower */\r
-       hca_set_power,                  /* SetPower */\r
-       cl_irp_ignore,                  /* PowerSequence */\r
-       cl_irp_ignore                   /* WaitWake */\r
-};\r
-\r
-\r
-NTSTATUS\r
-DriverEntry(\r
-       IN                              PDRIVER_OBJECT                  p_driver_obj,\r
-       IN                              PUNICODE_STRING                 p_registry_path )\r
-{\r
-       NTSTATUS                        status;\r
-       cl_status_t                     cl_status;\r
-\r
-       HCA_ENTER( HCA_DBG_DEV );\r
-\r
-       status = CL_INIT;\r
-       if( !NT_SUCCESS(status) )\r
-       {\r
-               HCA_TRACE_EXIT( HCA_DBG_ERROR,\r
-                       ("cl_init returned %08X.\n", status) );\r
-               return status;\r
-       }\r
-\r
-       status = __read_registry( p_registry_path );\r
-       if( !NT_SUCCESS( status ) )\r
-       {\r
-               CL_DEINIT;\r
-               HCA_TRACE_EXIT( HCA_DBG_ERROR,\r
-                       ("__read_registry_path returned 0x%X.\n", status) );\r
-               return status;\r
-       }\r
-\r
-       /* Initializae Adapter DB */\r
-       cl_status = mlnx_hobs_init();\r
-       if( cl_status != CL_SUCCESS )\r
-       {\r
-               CL_DEINIT;\r
-               HCA_TRACE_EXIT( HCA_DBG_ERROR,\r
-                       ("mlnx_hobs_init returned %#x.\n", cl_status) );\r
-               return cl_to_ntstatus( cl_status );\r
-       }\r
-//     cl_memclr( mlnx_hca_array, MLNX_MAX_HCA * sizeof(ci_interface_t) );\r
-\r
-       p_driver_obj->MajorFunction[IRP_MJ_PNP] = cl_pnp;\r
-       p_driver_obj->MajorFunction[IRP_MJ_POWER] = cl_power;\r
-       p_driver_obj->MajorFunction[IRP_MJ_SYSTEM_CONTROL] = hca_sysctl;\r
-       p_driver_obj->DriverUnload = hca_drv_unload;\r
-       p_driver_obj->DriverExtension->AddDevice = hca_add_device;\r
-\r
-       HCA_EXIT( HCA_DBG_DEV );\r
-       return STATUS_SUCCESS;\r
-}\r
-\r
-\r
-static NTSTATUS\r
-__read_registry(\r
-       IN                              UNICODE_STRING* const   p_registry_path )\r
-{\r
-       NTSTATUS                                        status;\r
-       /* Remember the terminating entry in the table below. */\r
-       RTL_QUERY_REGISTRY_TABLE        table[2];\r
-       UNICODE_STRING                          param_path;\r
-\r
-       HCA_ENTER( HCA_DBG_DEV );\r
-\r
-       RtlInitUnicodeString( &param_path, NULL );\r
-       param_path.MaximumLength = p_registry_path->Length + \r
-               sizeof(L"\\Parameters");\r
-       param_path.Buffer = cl_zalloc( param_path.MaximumLength );\r
-       if( !param_path.Buffer )\r
-       {\r
-               HCA_TRACE_EXIT( HCA_DBG_ERROR, \r
-                       ("Failed to allocate parameters path buffer.\n") );\r
-               return STATUS_INSUFFICIENT_RESOURCES;\r
-       }\r
-\r
-       RtlAppendUnicodeStringToString( &param_path, p_registry_path );\r
-       RtlAppendUnicodeToString( &param_path, L"\\Parameters" );\r
-\r
-       /*\r
-        * Clear the table.  This clears all the query callback pointers,\r
-        * and sets up the terminating table entry.\r
-        */\r
-       cl_memclr( table, sizeof(table) );\r
-\r
-       /* Setup the table entries. */\r
-       table[0].Flags = RTL_QUERY_REGISTRY_DIRECT;\r
-       table[0].Name = L"DebugFlags";\r
-       table[0].EntryContext = &g_mlnx_dbg_lvl;\r
-       table[0].DefaultType = REG_DWORD;\r
-       table[0].DefaultData = &g_mlnx_dbg_lvl;\r
-       table[0].DefaultLength = sizeof(ULONG);\r
-\r
-       /* Have at it! */\r
-       status = RtlQueryRegistryValues( RTL_REGISTRY_ABSOLUTE, \r
-               param_path.Buffer, table, NULL, NULL );\r
-\r
-       cl_free( param_path.Buffer );\r
-       HCA_EXIT( HCA_DBG_DEV );\r
-       return status;\r
-}\r
-\r
-\r
-static void\r
-hca_drv_unload(\r
-       IN                              PDRIVER_OBJECT                  p_driver_obj )\r
-{\r
-       HCA_ENTER( HCA_DBG_DEV );\r
-\r
-       UNUSED_PARAM( p_driver_obj );\r
-\r
-       CL_DEINIT;\r
-\r
-       HCA_EXIT( HCA_DBG_DEV );\r
-}\r
-\r
-\r
-static NTSTATUS\r
-hca_sysctl(\r
-       IN                              PDEVICE_OBJECT                          p_dev_obj,\r
-       IN                              PIRP                                            p_irp )\r
-{\r
-       NTSTATUS                status;\r
-       hca_dev_ext_t   *p_ext;\r
-\r
-       HCA_ENTER( HCA_DBG_DEV );\r
-\r
-       p_ext = p_dev_obj->DeviceExtension;\r
-\r
-       IoSkipCurrentIrpStackLocation( p_irp );\r
-       status = IoCallDriver( p_ext->cl_ext.p_next_do, p_irp );\r
-\r
-       HCA_EXIT( HCA_DBG_DEV );\r
-       return status;\r
-}\r
-\r
-\r
-static NTSTATUS\r
-hca_add_device(\r
-       IN                              PDRIVER_OBJECT                          p_driver_obj,\r
-       IN                              PDEVICE_OBJECT                          p_pdo )\r
-{\r
-       NTSTATUS                        status;\r
-       DEVICE_OBJECT           *p_dev_obj, *p_next_do;\r
-       hca_dev_ext_t           *p_ext;\r
-       //cl_status_t                   cl_status;\r
-\r
-       HCA_ENTER( HCA_DBG_PNP );\r
-\r
-       /*\r
-        * Create the device so that we have a device extension to store stuff in.\r
-        */\r
-       status = IoCreateDevice( p_driver_obj, sizeof(hca_dev_ext_t),\r
-               NULL, FILE_DEVICE_INFINIBAND, FILE_DEVICE_SECURE_OPEN,\r
-               FALSE, &p_dev_obj );\r
-       if( !NT_SUCCESS( status ) )\r
-       {\r
-               HCA_TRACE_EXIT( HCA_DBG_ERROR,\r
-                       ("IoCreateDevice returned 0x%08X.\n", status) );\r
-               return status;\r
-       }\r
-\r
-       p_ext = p_dev_obj->DeviceExtension;\r
-\r
-       //cl_status = cl_event_init( &p_ext->mutex, FALSE );\r
-       //if( cl_status != CL_SUCCESS )\r
-       //{\r
-       //      IoDeleteDevice( p_dev_obj );\r
-       //      HCA_TRACE_EXIT( HCA_DBG_ERROR,\r
-       //              ("cl_mutex_init returned %#x.\n", cl_status) );\r
-       //      return cl_to_ntstatus( cl_status );\r
-       //}\r
-       //cl_event_signal( &p_ext->mutex );\r
-\r
-       /* Attach to the device stack. */\r
-       p_next_do = IoAttachDeviceToDeviceStack( p_dev_obj, p_pdo );\r
-       if( !p_next_do )\r
-       {\r
-               //cl_event_destroy( &p_ext->mutex );\r
-               IoDeleteDevice( p_dev_obj );\r
-               HCA_TRACE_EXIT( HCA_DBG_ERROR,\r
-                       ("IoAttachDeviceToDeviceStack failed.\n") );\r
-               return STATUS_NO_SUCH_DEVICE;\r
-       }\r
-\r
-       /* Inititalize the complib extension. */\r
-       cl_init_pnp_po_ext( p_dev_obj, p_next_do, p_pdo, g_mlnx_dbg_lvl,\r
-               &hca_vfptr_pnp, NULL );\r
-\r
-       p_ext->state = HCA_ADDED;\r
-\r
-       HCA_EXIT( HCA_DBG_PNP );\r
-       return status;\r
-}\r
-\r
-\r
-static NTSTATUS\r
-__get_ci_interface(\r
-       IN                                      DEVICE_OBJECT* const    p_dev_obj )\r
-{\r
-       NTSTATUS                        status;\r
-       IRP                                     *p_irp;\r
-       hca_dev_ext_t           *p_ext;\r
-       IO_STATUS_BLOCK         io_status;\r
-       IO_STACK_LOCATION       *p_io_stack;\r
-       KEVENT                          event;\r
-\r
-       HCA_ENTER( HCA_DBG_PNP );\r
-\r
-       p_ext = p_dev_obj->DeviceExtension;\r
-\r
-       KeInitializeEvent( &event, NotificationEvent, FALSE );\r
-\r
-       /* Query for the verbs interface. */\r
-       p_irp = IoBuildSynchronousFsdRequest( IRP_MJ_PNP, p_ext->p_al_dev,\r
-               NULL, 0, NULL, &event, &io_status );\r
-       if( !p_irp )\r
-       {\r
-               HCA_TRACE_EXIT( HCA_DBG_ERROR,\r
-                       ("IoBuildSynchronousFsdRequest failed.\n") );\r
-               return STATUS_INSUFFICIENT_RESOURCES;\r
-       }\r
-\r
-       /* Format the IRP. */\r
-       p_io_stack = IoGetNextIrpStackLocation( p_irp );\r
-       p_io_stack->MinorFunction = IRP_MN_QUERY_INTERFACE;\r
-       p_io_stack->Parameters.QueryInterface.Version = IB_CI_INTERFACE_VERSION;\r
-       p_io_stack->Parameters.QueryInterface.Size = sizeof(ib_ci_ifc_t);\r
-       p_io_stack->Parameters.QueryInterface.Interface = \r
-               (INTERFACE*)&p_ext->ci_ifc;\r
-       p_io_stack->Parameters.QueryInterface.InterfaceSpecificData = NULL;\r
-       p_io_stack->Parameters.QueryInterface.InterfaceType = \r
-               &GUID_IB_CI_INTERFACE;\r
-       p_irp->IoStatus.Status = STATUS_NOT_SUPPORTED;\r
-\r
-       /* Send the IRP. */\r
-       status = IoCallDriver( p_ext->p_al_dev, p_irp );\r
-       if( status == STATUS_PENDING )\r
-       {\r
-               KeWaitForSingleObject( &event, Executive, KernelMode, \r
-                       FALSE, NULL );\r
-\r
-               status = io_status.Status;\r
-       }\r
-\r
-       if( !NT_SUCCESS( status ) )\r
-       {\r
-               HCA_TRACE_EXIT( HCA_DBG_ERROR,\r
-                       ("Query interface for verbs returned %08x.\n", status) );\r
-               return status;\r
-       }\r
-\r
-       HCA_EXIT( HCA_DBG_PNP );\r
-       return status;\r
-}\r
-\r
-\r
-static NTSTATUS\r
-__get_hca_handle(\r
-       IN                                      hca_dev_ext_t* const    p_ext )\r
-{\r
-       NTSTATUS                        status;\r
-       IRP                                     *p_irp;\r
-       IO_STATUS_BLOCK         io_status;\r
-       IO_STACK_LOCATION       *p_io_stack;\r
-       KEVENT                          event;\r
-\r
-       HCA_ENTER( HCA_DBG_PNP );\r
-\r
-       KeInitializeEvent( &event, NotificationEvent, FALSE );\r
-\r
-       /* Query for the verbs interface. */\r
-       p_irp = IoBuildSynchronousFsdRequest( IRP_MJ_PNP, p_ext->cl_ext.p_next_do,\r
-               NULL, 0, NULL, &event, &io_status );\r
-       if( !p_irp )\r
-       {\r
-               HCA_TRACE_EXIT( HCA_DBG_ERROR,\r
-                       ("IoBuildSynchronousFsdRequest failed.\n") );\r
-               return STATUS_INSUFFICIENT_RESOURCES;\r
-       }\r
-\r
-       /* Format the IRP. */\r
-       p_io_stack = IoGetNextIrpStackLocation( p_irp );\r
-       p_io_stack->MinorFunction = IRP_MN_QUERY_INTERFACE;\r
-       p_io_stack->Parameters.QueryInterface.Version = 1;\r
-       p_io_stack->Parameters.QueryInterface.Size = 0;\r
-       p_io_stack->Parameters.QueryInterface.Interface = NULL;\r
-       { \r
-               void *p = &p_ext->hca.s;\r
-               memset( p, 0, sizeof(p_ext->hca.s) );\r
-               p_io_stack->Parameters.QueryInterface.InterfaceSpecificData = p;\r
-       }\r
-       p_io_stack->Parameters.QueryInterface.InterfaceType = \r
-               &GUID_MD_INTERFACE;\r
-       p_irp->IoStatus.Status = STATUS_NOT_SUPPORTED;\r
-\r
-       /* Send the IRP. */\r
-       status = IoCallDriver( p_ext->cl_ext.p_next_do, p_irp );\r
-       if( status == STATUS_PENDING )\r
-       {\r
-               KeWaitForSingleObject( &event, Executive, KernelMode, \r
-                       FALSE, NULL );\r
-\r
-               status = io_status.Status;\r
-       }\r
-\r
-       if( !NT_SUCCESS( status ) )\r
-       {\r
-               HCA_TRACE_EXIT( HCA_DBG_ERROR,\r
-                       ("Query interface for HCA handle returned %08x.\n", status) );\r
-               return status;\r
-       }\r
-       p_ext->hca.hh_hndl = p_ext->hca.s.hh_hndl;\r
-       \r
-       HCA_EXIT( HCA_DBG_PNP );\r
-       return status;\r
-}\r
-\r
-\r
-static NTSTATUS\r
-__pnp_notify_target(\r
-       IN                              TARGET_DEVICE_REMOVAL_NOTIFICATION      *p_notify,\r
-       IN                              void                                            *context )\r
-{\r
-       NTSTATUS                                                        status = STATUS_SUCCESS;\r
-       DEVICE_OBJECT                                           *p_dev_obj;\r
-       hca_dev_ext_t                                           *p_ext;\r
-\r
-       HCA_ENTER( HCA_DBG_PNP );\r
-\r
-       p_dev_obj = context;\r
-       p_ext = p_dev_obj->DeviceExtension;\r
-\r
-       if( IsEqualGUID( &p_notify->Event, &GUID_TARGET_DEVICE_QUERY_REMOVE ) )\r
-       {\r
-               if( p_ext->state == HCA_REGISTERED )\r
-               {\r
-                       /* Release AL's CI interface. */\r
-                       p_ext->ci_ifc.wdm.InterfaceDereference( p_ext->ci_ifc.wdm.Context );\r
-                       p_ext->state = HCA_STARTED;\r
-               }\r
-\r
-               /* Release AL's file object so that it can unload. */\r
-               CL_ASSERT( p_ext->p_al_file_obj );\r
-               CL_ASSERT( p_ext->p_al_file_obj == p_notify->FileObject );\r
-               ObDereferenceObject( p_ext->p_al_file_obj );\r
-               p_ext->p_al_file_obj = NULL;\r
-               p_ext->p_al_dev = NULL;\r
-       }\r
-       else if( IsEqualGUID( &p_notify->Event, \r
-               &GUID_TARGET_DEVICE_REMOVE_COMPLETE ) )\r
-       {\r
-               if( p_ext->state == HCA_REGISTERED )\r
-               {\r
-                       /* Release AL's CI interface. */\r
-                       p_ext->ci_ifc.wdm.InterfaceDereference( p_ext->ci_ifc.wdm.Context );\r
-                       p_ext->state = HCA_STARTED;\r
-               }\r
-\r
-               /* Release AL's file object so that it can unload. */\r
-               if( p_ext->p_al_file_obj )\r
-               {\r
-                       ObDereferenceObject( p_ext->p_al_file_obj );\r
-                       p_ext->p_al_file_obj = NULL;\r
-                       p_ext->p_al_dev = NULL;\r
-               }\r
-\r
-               /* Cancel our target device change registration. */\r
-               IoUnregisterPlugPlayNotification( p_ext->pnp_target_entry );\r
-               p_ext->pnp_target_entry = NULL;\r
-       }\r
-       else if( IsEqualGUID( &p_notify->Event, \r
-               &GUID_TARGET_DEVICE_REMOVE_CANCELLED ) )\r
-       {\r
-               /* Cancel our target device change registration. */\r
-               IoUnregisterPlugPlayNotification( p_ext->pnp_target_entry );\r
-               p_ext->pnp_target_entry = NULL;\r
-\r
-               /* Get the device object pointer for the AL. */\r
-               CL_ASSERT( !p_ext->p_al_file_obj );\r
-               CL_ASSERT( !p_ext->p_al_dev );\r
-               p_ext->p_al_file_obj = p_notify->FileObject;\r
-               p_ext->p_al_dev = IoGetRelatedDeviceObject( p_ext->p_al_file_obj );\r
-\r
-               status = IoRegisterPlugPlayNotification( \r
-                       EventCategoryTargetDeviceChange, 0, p_ext->p_al_file_obj, \r
-                       p_dev_obj->DriverObject, __pnp_notify_target, p_dev_obj, \r
-                       &p_ext->pnp_target_entry );\r
-               if( !NT_SUCCESS( status ) )\r
-               {\r
-                       HCA_TRACE_EXIT( HCA_DBG_ERROR, \r
-                               ("IoRegisterPlugPlayNotification returned %08x.\n") );\r
-                       return status;\r
-               }\r
-\r
-               __hca_register( p_dev_obj );\r
-       }\r
-\r
-       HCA_EXIT( HCA_DBG_PNP );\r
-       return status;\r
-}\r
-\r
-\r
-static ci_interface_t*\r
-__alloc_hca_ifc(\r
-       IN                              hca_dev_ext_t* const            p_ext )\r
-{\r
-       ci_interface_t  *p_ifc;\r
-\r
-       HCA_ENTER( HCA_DBG_PNP );\r
-\r
-       p_ifc = ExAllocatePoolWithTag( PagedPool, sizeof(ci_interface_t), 'fiha' );\r
-       if( !p_ifc )\r
-       {\r
-               HCA_TRACE_EXIT( HCA_DBG_ERROR,\r
-                       ("Failed to allocate ci_interface_t (%d bytes).\n",\r
-                       sizeof(ci_interface_t)) );\r
-               return NULL;\r
-       }\r
-\r
-       setup_ci_interface( p_ext->hca.guid, p_ifc );\r
-\r
-       p_ifc->p_hca_dev = p_ext->cl_ext.p_pdo;\r
-       p_ifc->vend_id = p_ext->hca.hh_hndl->vendor_id;\r
-       p_ifc->dev_id = (uint16_t)p_ext->hca.hh_hndl->dev_id;\r
-       p_ifc->dev_revision = (uint16_t)p_ext->hca.hh_hndl->hw_ver;\r
-\r
-       HCA_EXIT( HCA_DBG_PNP );\r
-       return p_ifc;\r
-}\r
-\r
-\r
-static NTSTATUS\r
-__hca_register(\r
-       IN                              DEVICE_OBJECT                           *p_dev_obj )\r
-{\r
-       hca_dev_ext_t                   *p_ext;\r
-       NTSTATUS                                status;\r
-       ib_api_status_t                 ib_status;\r
-       ci_interface_t                  *p_hca_ifc;\r
-\r
-       HCA_ENTER( HCA_DBG_PNP );\r
-       \r
-       p_ext = p_dev_obj->DeviceExtension;\r
-\r
-       ASSERT( p_ext->state == HCA_STARTED );\r
-       ASSERT( p_ext->p_al_dev );\r
-\r
-       /* Get the AL's lower interface. */\r
-       status = __get_ci_interface( p_dev_obj );\r
-       if( !NT_SUCCESS( status ) )\r
-       {\r
-               HCA_TRACE( HCA_DBG_ERROR,\r
-                       ("__get_ci_interface returned %08x.\n", status) );\r
-               return status;\r
-       }\r
-\r
-       /* Allocate and populate our HCA interface structure. */\r
-       p_hca_ifc = __alloc_hca_ifc( p_ext );\r
-       if( !p_hca_ifc )\r
-       {\r
-               HCA_TRACE( HCA_DBG_ERROR, ("__alloc_hca_ifc failed.\n") );\r
-               return STATUS_NO_MEMORY;\r
-       }\r
-\r
-       /* Notify AL that we're available... */\r
-       ib_status = p_ext->ci_ifc.register_ca( p_hca_ifc );\r
-       ExFreePool( p_hca_ifc );\r
-       if( ib_status != IB_SUCCESS )\r
-       {\r
-               p_ext->ci_ifc.wdm.InterfaceDereference( p_ext->ci_ifc.wdm.Context );\r
-               return STATUS_INSUFFICIENT_RESOURCES;\r
-       }\r
-\r
-       p_ext->state = HCA_REGISTERED;\r
-       return STATUS_SUCCESS;\r
-}\r
-\r
-\r
-//static void\r
-//__work_item_pnp_cb(\r
-//     IN                              DEVICE_OBJECT                           *p_dev_obj,\r
-//     IN                              hca_work_item_context_t         *p_context )\r
-//{\r
-//     hca_dev_ext_t                   *p_ext;\r
-//     NTSTATUS                                status;\r
-//\r
-//     HCA_ENTER( HCA_DBG_PNP );\r
-//     \r
-//     p_ext = p_dev_obj->DeviceExtension;\r
-//\r
-//     cl_event_wait_on( &p_ext->mutex, EVENT_NO_TIMEOUT, FALSE );\r
-//     do\r
-//     {\r
-//             /* Check the state under protection of the mutex. */\r
-//             if( p_ext->state != HCA_ADDED &&\r
-//                     p_ext->state != HCA_STARTED )\r
-//             {\r
-//                     HCA_TRACE( HCA_DBG_ERROR, ("Invalid state.\n") );\r
-//                     break;\r
-//             }\r
-//\r
-//             ASSERT( !p_ext->p_al_dev );\r
-//\r
-//             /* Get the AL device object. */\r
-//             HCA_TRACE( HCA_DBG_PNP, ("Calling IoGetDeviceObjectPointer.\n") );\r
-//             status = IoGetDeviceObjectPointer( &p_context->sym_link_name,\r
-//                     FILE_ALL_ACCESS, &p_ext->p_al_file_obj, &p_ext->p_al_dev );\r
-//             if( !NT_SUCCESS( status ) )\r
-//             {\r
-//                     HCA_TRACE( HCA_DBG_ERROR,\r
-//                             ("IoGetDeviceObjectPointer returned %08x.\n", status) );\r
-//                     break;\r
-//             }\r
-//\r
-//             cl_event_signal( &p_ext->mutex );\r
-//             /* Register for removal notification of the IB Fabric root device. */\r
-//             HCA_TRACE( HCA_DBG_PNP, \r
-//                     ("Registering for target notifications.\n") );\r
-//             status = IoRegisterPlugPlayNotification( \r
-//                     EventCategoryTargetDeviceChange, 0, p_ext->p_al_file_obj, \r
-//                     p_dev_obj->DriverObject, __pnp_notify_target, p_dev_obj, \r
-//                     &p_ext->pnp_target_entry );\r
-//             cl_event_wait_on( &p_ext->mutex, EVENT_NO_TIMEOUT, FALSE );\r
-//             if( !NT_SUCCESS( status ) )\r
-//             {\r
-//                     ObDereferenceObject( p_ext->p_al_file_obj );\r
-//                     HCA_TRACE( HCA_DBG_ERROR, \r
-//                             ("IoRegisterPlugPlayNotification returned %08x.\n", status) );\r
-//                     break;\r
-//             }\r
-//\r
-//             if( p_ext->state == HCA_STARTED )\r
-//             {\r
-//                     /* Queue the work item again to complete the registration. */\r
-//                     IoQueueWorkItem( p_context->p_item, __work_item_started_cb, \r
-//                             DelayedWorkQueue, p_context->p_item );\r
-//             }\r
-//             else\r
-//             {\r
-//                     /* Free the work item. */\r
-//                     IoFreeWorkItem( p_context->p_item );\r
-//             }\r
-//     } while( !p_ext );\r
-//\r
-//     cl_event_signal( &p_ext->mutex );\r
-//     cl_free( p_context );\r
-//     return;\r
-//}\r
-\r
-\r
-static NTSTATUS\r
-__pnp_notify_ifc(\r
-       IN                              DEVICE_INTERFACE_CHANGE_NOTIFICATION    *p_notify,\r
-       IN                              void                                            *context )\r
-{\r
-       NTSTATUS                                status;\r
-       DEVICE_OBJECT                   *p_dev_obj;\r
-       hca_dev_ext_t                   *p_ext;\r
-\r
-       HCA_ENTER( HCA_DBG_PNP );\r
-\r
-       p_dev_obj = context;\r
-       p_ext = p_dev_obj->DeviceExtension;\r
-\r
-       if( !IsEqualGUID( &p_notify->Event, &GUID_DEVICE_INTERFACE_ARRIVAL ) )\r
-       {\r
-               HCA_EXIT( HCA_DBG_PNP );\r
-               return STATUS_SUCCESS;\r
-       }\r
-\r
-       /*\r
-        * Sanity check.  We should only be getting notifications of the \r
-        * CI interface exported by AL.\r
-        */\r
-       ASSERT( \r
-               IsEqualGUID( &p_notify->InterfaceClassGuid, &GUID_IB_CI_INTERFACE ) );\r
-\r
-       if( p_ext->state != HCA_STARTED )\r
-       {\r
-               HCA_TRACE( HCA_DBG_ERROR, ("Invalid state: %d\n", p_ext->state) );\r
-               return STATUS_SUCCESS;\r
-       }\r
-\r
-       ASSERT( !p_ext->p_al_dev );\r
-       ASSERT( !p_ext->p_al_file_obj );\r
-\r
-       /* Get the AL device object. */\r
-       HCA_TRACE( HCA_DBG_PNP, ("Calling IoGetDeviceObjectPointer.\n") );\r
-       status = IoGetDeviceObjectPointer( p_notify->SymbolicLinkName,\r
-               FILE_ALL_ACCESS, &p_ext->p_al_file_obj, &p_ext->p_al_dev );\r
-       if( !NT_SUCCESS( status ) )\r
-       {\r
-               HCA_TRACE( HCA_DBG_ERROR,\r
-                       ("IoGetDeviceObjectPointer returned %08x.\n", status) );\r
-               return STATUS_SUCCESS;\r
-       }\r
-\r
-       /* Register for removal notification of the IB Fabric root device. */\r
-       HCA_TRACE( HCA_DBG_PNP, \r
-               ("Registering for target notifications.\n") );\r
-       status = IoRegisterPlugPlayNotification( \r
-               EventCategoryTargetDeviceChange, 0, p_ext->p_al_file_obj, \r
-               p_dev_obj->DriverObject, __pnp_notify_target, p_dev_obj, \r
-               &p_ext->pnp_target_entry );\r
-       if( !NT_SUCCESS( status ) )\r
-       {\r
-               ObDereferenceObject( p_ext->p_al_file_obj );\r
-               p_ext->p_al_file_obj = NULL;\r
-               p_ext->p_al_dev = NULL;\r
-               HCA_TRACE( HCA_DBG_ERROR, \r
-                       ("IoRegisterPlugPlayNotification returned %08x.\n", status) );\r
-               return STATUS_SUCCESS;\r
-       }\r
-\r
-       status = __hca_register( p_dev_obj );\r
-       if( !NT_SUCCESS( status ) )\r
-       {\r
-               IoUnregisterPlugPlayNotification( p_ext->pnp_target_entry );\r
-               p_ext->pnp_target_entry = NULL;\r
-               ObDereferenceObject( p_ext->p_al_file_obj );\r
-               p_ext->p_al_file_obj = NULL;\r
-               p_ext->p_al_dev = NULL;\r
-               HCA_TRACE( HCA_DBG_ERROR,\r
-                       ("__get_ci_interface returned %08x.\n", status) );\r
-               return STATUS_SUCCESS;\r
-       }\r
-\r
-       HCA_EXIT( HCA_DBG_PNP );\r
-       return STATUS_SUCCESS;\r
-}\r
-//\r
-//\r
-//static NTSTATUS\r
-//hca_enable(\r
-//     IN                              DEVICE_OBJECT* const            p_dev_obj )\r
-//{\r
-//     PIO_WORKITEM    p_item;\r
-//     hca_dev_ext_t   *p_ext;\r
-//\r
-//     HCA_ENTER( HCA_DBG_PNP );\r
-//\r
-//     p_ext = p_dev_obj->DeviceExtension;\r
-//\r
-//     /* Check for the AL device reference. */\r
-//     if( p_ext->p_al_dev )\r
-//     {\r
-//             __hca_register( p_dev_obj );\r
-//     }\r
-//     p_ext->state = HCA_STARTED;\r
-//\r
-//     HCA_EXIT( HCA_DBG_PNP );\r
-//     return STATUS_SUCCESS;\r
-//}\r
-\r
-\r
-static NTSTATUS\r
-hca_start(\r
-       IN                              DEVICE_OBJECT* const            p_dev_obj,\r
-       IN                              IRP* const                                      p_irp, \r
-               OUT                     cl_irp_action_t* const          p_action )\r
-{\r
-       NTSTATUS                        status;\r
-       hca_dev_ext_t           *p_ext;\r
-\r
-       HCA_ENTER( HCA_DBG_PNP );\r
-\r
-       /* Handled on the way up. */\r
-       status = cl_do_sync_pnp( p_dev_obj, p_irp, p_action );\r
-       if( !NT_SUCCESS( status ) )\r
-       {\r
-               HCA_TRACE_EXIT( HCA_DBG_ERROR, \r
-                       ("Lower drivers failed IRP_MN_START_DEVICE.\n") );\r
-               return status;\r
-       }\r
-\r
-       p_ext = p_dev_obj->DeviceExtension;\r
-\r
-       /* Get the HH HCA handle for this instance. */\r
-       status = __get_hca_handle( p_ext );\r
-       if( !NT_SUCCESS( status ) )\r
-       {\r
-               HCA_TRACE_EXIT( HCA_DBG_ERROR,\r
-                       ("Failed to get HH HCA handle.\n") );\r
-               return status;\r
-       }\r
-\r
-       {\r
-               VAPI_hca_cap_t hca_cap;\r
-               int rc;\r
-\r
-               if (HH_OK != THH_hob_open_hca(p_ext->hca.hh_hndl, NULL, NULL)) {\r
-                       status = IB_ERROR;\r
-                       return status;\r
-               }\r
-               \r
-               rc = THH_hob_query(p_ext->hca.hh_hndl, &hca_cap);\r
-               if (rc != HH_OK) {\r
-                       HCA_TRACE( HCA_DBG_ERROR, ("Error on getting guid (%#x).\n", rc) );\r
-                       status = IB_ERROR;\r
-                       return status;\r
-               }\r
-               p_ext->hca.guid = *(uint64_t *)hca_cap.node_guid;\r
-               p_ext->hca.p_dev_obj = p_ext->cl_ext.p_pdo;\r
-\r
-               THH_hob_close_hca(p_ext->hca.hh_hndl);\r
-       }\r
-\r
-       mlnx_hca_insert( &p_ext->hca );\r
-\r
-       /*\r
-        * Change the state since the PnP callback can happen\r
-        * before the callback returns.\r
-        */\r
-       p_ext->state = HCA_STARTED;\r
-       /* Register for interface arrival of the IB_AL device. */\r
-       status = IoRegisterPlugPlayNotification(\r
-               EventCategoryDeviceInterfaceChange,\r
-               PNPNOTIFY_DEVICE_INTERFACE_INCLUDE_EXISTING_INTERFACES,\r
-               (void*)&GUID_IB_CI_INTERFACE, p_dev_obj->DriverObject,\r
-               __pnp_notify_ifc, p_dev_obj, &p_ext->pnp_ifc_entry );\r
-       if( !NT_SUCCESS( status ) )\r
-       {\r
-               p_ext->state = HCA_ADDED;\r
-               HCA_TRACE( HCA_DBG_ERROR, \r
-                       ("IoRegisterPlugPlayNotification returned %08x.\n", status) );\r
-       }\r
-\r
-       HCA_EXIT( HCA_DBG_PNP );\r
-       return status;\r
-}\r
-\r
-\r
-static void\r
-hca_release_resources(\r
-       IN                              DEVICE_OBJECT* const            p_dev_obj )\r
-{\r
-       hca_dev_ext_t           *p_ext;\r
-\r
-       HCA_ENTER( HCA_DBG_PNP );\r
-\r
-       p_ext = p_dev_obj->DeviceExtension;\r
-\r
-       if( p_ext->state == HCA_REGISTERED )\r
-       {\r
-               CL_ASSERT( p_ext->ci_ifc.deregister_ca );\r
-               CL_ASSERT( p_ext->p_al_dev );\r
-               CL_ASSERT( p_ext->p_al_file_obj );\r
-               /* Notify AL that the CA is being removed. */\r
-               p_ext->ci_ifc.deregister_ca( p_ext->hca.guid );\r
-               /* Release AL's CI interface. */\r
-               p_ext->ci_ifc.wdm.InterfaceDereference( p_ext->ci_ifc.wdm.Context );\r
-       }\r
-\r
-       if( p_ext->pnp_target_entry )\r
-       {\r
-               ASSERT( p_ext->pnp_ifc_entry );\r
-               IoUnregisterPlugPlayNotification( p_ext->pnp_target_entry );\r
-       }\r
-\r
-       if( p_ext->pnp_ifc_entry )\r
-               IoUnregisterPlugPlayNotification( p_ext->pnp_ifc_entry );\r
-\r
-       if( p_ext->p_al_file_obj )\r
-               ObDereferenceObject( p_ext->p_al_file_obj );\r
-\r
-       //cl_event_destroy( &p_ext->mutex );\r
-\r
-       HCA_EXIT( HCA_DBG_PNP );\r
-}\r
-//\r
-//\r
-//static NTSTATUS\r
-//hca_disable(\r
-//     IN                                      DEVICE_OBJECT* const    p_dev_obj )\r
-//{\r
-//     hca_dev_ext_t   *p_ext;\r
-//\r
-//     HCA_ENTER( HCA_DBG_PNP );\r
-//\r
-//     p_ext = p_dev_obj->DeviceExtension;\r
-//\r
-//     ASSERT( p_ext->state == HCA_STARTED );\r
-//\r
-//     if( p_ext->state = HCA_REGISTERED )\r
-//     {\r
-//             /* Notify AL that the CA is being removed. */\r
-//             p_ext->ci_ifc.deregister_ca( p_ext->hca.guid );\r
-//             /* Release AL's CI interface. */\r
-//             p_ext->ci_ifc.wdm.InterfaceDereference( p_ext->ci_ifc.wdm.Context );\r
-//\r
-//             p_ext->state = HCA_STARTED;\r
-//     }\r
-//\r
-//     HCA_EXIT( HCA_DBG_PNP );\r
-//     return STATUS_SUCCESS;\r
-//}\r
-//\r
-//\r
-//static NTSTATUS\r
-//hca_deactivate(\r
-//     IN                                      DEVICE_OBJECT* const    p_dev_obj,\r
-//     IN                                      IRP* const                              p_irp, \r
-//             OUT                             cl_irp_action_t* const  p_action )\r
-//{\r
-//     NTSTATUS        status;\r
-//     hca_dev_ext_t   *p_ext;\r
-//\r
-//     HCA_ENTER( HCA_DBG_PNP );\r
-//\r
-//     UNUSED_PARAM( p_irp );\r
-//\r
-//     p_ext = p_dev_obj->DeviceExtension;\r
-//\r
-//     *p_action = IrpSkip;\r
-//\r
-//     status = hca_disable( p_dev_obj );\r
-//\r
-//     mlnx_hca_remove( &p_ext->hca );\r
-//\r
-//     p_ext->hca.hh_hndl = NULL;\r
-//\r
-//     p_ext->state = HCA_ADDED;\r
-//\r
-//     HCA_EXIT( HCA_DBG_PNP );\r
-//     return status;\r
-//}\r
-\r
-\r
-static NTSTATUS\r
-hca_query_bus_relations(\r
-       IN                                      DEVICE_OBJECT* const    p_dev_obj,\r
-       IN                                      IRP* const                              p_irp, \r
-               OUT                             cl_irp_action_t* const  p_action )\r
-{\r
-       NTSTATUS                        status;\r
-       DEVICE_RELATIONS        *p_rel;\r
-       hca_dev_ext_t           *p_ext;\r
-\r
-       HCA_ENTER( HCA_DBG_PNP );\r
-\r
-       p_ext = p_dev_obj->DeviceExtension;\r
-\r
-       //cl_event_wait_on( &p_ext->mutex, EVENT_NO_TIMEOUT, FALSE );\r
-       if( p_ext->state == HCA_REGISTERED )\r
-       {\r
-               status = p_ext->ci_ifc.get_relations( p_ext->hca.guid, p_irp );\r
-               if( !NT_SUCCESS( status ) )\r
-               {\r
-                       //cl_event_signal( &p_ext->mutex );\r
-                       *p_action = IrpComplete;\r
-                       HCA_TRACE_EXIT( HCA_DBG_ERROR,\r
-                               ("AL get_relations returned %08x.\n", status) );\r
-                       return status;\r
-               }\r
-       }\r
-       else\r
-       {\r
-               status = cl_alloc_relations( p_irp, 1 );\r
-               if( !NT_SUCCESS( status ) )\r
-               {\r
-                       HCA_TRACE_EXIT( HCA_DBG_ERROR,\r
-                               ("cl_alloc_relations returned %08x.\n", status) );\r
-                       return status;\r
-               }\r
-\r
-               p_rel = (DEVICE_RELATIONS*)p_irp->IoStatus.Information;\r
-               p_rel->Count = 0;\r
-               p_rel->Objects[0] = NULL;\r
-       }\r
-\r
-       //cl_event_signal( &p_ext->mutex );\r
-\r
-       *p_action = IrpPassDown;\r
-       HCA_EXIT( HCA_DBG_PNP );\r
-       return STATUS_SUCCESS;\r
-}\r
-\r
-\r
-static NTSTATUS\r
-hca_set_power(\r
-       IN                              DEVICE_OBJECT* const            p_dev_obj,\r
-       IN                              IRP* const                                      p_irp, \r
-               OUT                     cl_irp_action_t* const          p_action )\r
-{\r
-       NTSTATUS                        status;\r
-       hca_dev_ext_t           *p_ext;\r
-       IO_STACK_LOCATION       *p_io_stack;\r
-\r
-       HCA_ENTER( HCA_DBG_PNP );\r
-\r
-       p_ext = p_dev_obj->DeviceExtension;\r
-\r
-       *p_action = IrpSkip;\r
-\r
-       p_io_stack = IoGetCurrentIrpStackLocation( p_irp );\r
-\r
-       if( p_io_stack->Parameters.Power.Type != DevicePowerState )\r
-               return STATUS_SUCCESS;\r
-\r
-       switch( p_io_stack->Parameters.Power.State.DeviceState )\r
-       {\r
-       case PowerDeviceD0:\r
-               if( p_ext->p_al_dev )\r
-                       status = __hca_register( p_dev_obj );\r
-               else\r
-                       status = STATUS_SUCCESS;\r
-               break;\r
-\r
-       default:\r
-               if( p_ext->state == HCA_REGISTERED )\r
-               {\r
-                       /* Notify AL that the CA is being removed. */\r
-                       p_ext->ci_ifc.deregister_ca( p_ext->hca.guid );\r
-                       /* Release AL's CI interface. */\r
-                       p_ext->ci_ifc.wdm.InterfaceDereference( p_ext->ci_ifc.wdm.Context );\r
-\r
-                       p_ext->state = HCA_STARTED;\r
-               }\r
-               status = STATUS_SUCCESS;\r
-               break;\r
-       }\r
-\r
-       if( !NT_SUCCESS( status ) )\r
-               *p_action = IrpComplete;\r
-\r
-       HCA_EXIT( HCA_DBG_PNP );\r
-       return status;\r
-}\r
-\r
-typedef struct Primary_Sector{\r
-       uint32_t fi_addr;\r
-       uint32_t fi_size;\r
-       uint32_t signature;\r
-       uint32_t fw_reserved[5];\r
-       uint32_t vsd[56];\r
-       uint32_t branch_to;\r
-       uint32_t crc016;\r
-} primary_sector_t;\r
-\r
-static uint32_t old_dir;\r
-static uint32_t old_pol;\r
-static uint32_t old_mod;\r
-static uint32_t old_dat;\r
-\r
-static NTSTATUS\r
-fw_access_pciconf (\r
-               IN              BUS_INTERFACE_STANDARD                  *p_BusInterface,\r
-               IN              ULONG                                                   op_flag,\r
-               IN              PVOID                                                   p_buffer,\r
-               IN              ULONG                                                   offset,\r
-               IN              ULONG POINTER_ALIGNMENT                 length )\r
-{\r
-\r
-       ULONG                           bytes;  \r
-       NTSTATUS                        status = STATUS_SUCCESS;\r
-\r
-       PAGED_CODE();\r
-\r
-       if (p_BusInterface)\r
-       {\r
-\r
-               bytes = p_BusInterface->SetBusData(\r
-                                               p_BusInterface->Context,\r
-                                               PCI_WHICHSPACE_CONFIG,\r
-                                               (PVOID)&offset,\r
-                                               PCI_CONF_ADDR,\r
-                                               sizeof(ULONG) );\r
-\r
-               if( op_flag == 0 )\r
-               {\r
-                       if ( bytes )\r
-                               bytes = p_BusInterface->GetBusData(\r
-                                                       p_BusInterface->Context,\r
-                                                       PCI_WHICHSPACE_CONFIG,\r
-                                                       p_buffer,\r
-                                                       PCI_CONF_DATA,\r
-                                                       length );\r
-                       if ( !bytes )\r
-                               status = STATUS_NOT_SUPPORTED;\r
-               }\r
-\r
-               else\r
-               {\r
-                       if ( bytes )\r
-                               bytes = p_BusInterface->SetBusData(\r
-                                                       p_BusInterface->Context,\r
-                                                       PCI_WHICHSPACE_CONFIG,\r
-                                                       p_buffer,\r
-                                                       PCI_CONF_DATA,\r
-                                                       length);\r
-\r
-                       if ( !bytes )\r
-                               status = STATUS_NOT_SUPPORTED;\r
-               }\r
-       }\r
-       return status;\r
-}\r
-\r
-static NTSTATUS\r
-fw_get_pci_bus_interface(\r
-       IN              DEVICE_OBJECT                           *p_dev_obj,\r
-       OUT             BUS_INTERFACE_STANDARD          *p_BusInterface )\r
-{\r
-       KEVENT event;\r
-       NTSTATUS status;\r
-       PIRP p_irp;\r
-       IO_STATUS_BLOCK ioStatus;\r
-       PIO_STACK_LOCATION p_irpStack;\r
-       PDEVICE_OBJECT p_target_obj;\r
-\r
-       KeInitializeEvent( &event, NotificationEvent, FALSE );\r
-\r
-       p_target_obj = IoGetAttachedDeviceReference( p_dev_obj );\r
-\r
-       p_irp = IoBuildSynchronousFsdRequest( IRP_MJ_PNP,\r
-                                        &nbs