--- /dev/null
+DIRS=\\r
+ kernel \\r
+ user\r
--- /dev/null
+/*
+ * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.
+ * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved.
+ *
+ * This software is available to you under the OpenIB.org BSD license
+ * below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: hca_data.c 148 2005-07-12 07:48:46Z sleybo $
+ */
+
+
+#include "mthca_dev.h"
+
+
+#if defined(EVENT_TRACING)
+#ifdef offsetof
+#undef offsetof
+#endif
+#include "hca_data.tmh"
+#endif
+
+
+mthca_qp_access_t
+map_qp_ibal_acl(
+ IN ib_access_t ibal_acl)
+{
+#define IBAL_ACL(ifl,mfl) if (ibal_acl & ifl) mthca_acl |= mfl
+ mthca_qp_access_t mthca_acl = 0;
+
+ IBAL_ACL(IB_AC_RDMA_READ,MTHCA_ACCESS_REMOTE_READ);
+ IBAL_ACL(IB_AC_RDMA_WRITE,MTHCA_ACCESS_REMOTE_WRITE);
+ IBAL_ACL(IB_AC_ATOMIC,MTHCA_ACCESS_REMOTE_ATOMIC);
+ IBAL_ACL(IB_AC_LOCAL_WRITE,MTHCA_ACCESS_LOCAL_WRITE);
+ IBAL_ACL(IB_AC_MW_BIND,MTHCA_ACCESS_MW_BIND);
+
+ return mthca_acl;
+}
+
+/////////////////////////////////////////////////////////
+/////////////////////////////////////////////////////////
+ib_access_t
+map_qp_mthca_acl(
+ IN mthca_qp_access_t mthca_acl)
+{
+#define ACL_IBAL(mfl,ifl) if (mthca_acl & mfl) ibal_acl |= ifl
+ ib_access_t ibal_acl = 0;
+
+ ACL_IBAL(MTHCA_ACCESS_REMOTE_READ,IB_AC_RDMA_READ);
+ ACL_IBAL(MTHCA_ACCESS_REMOTE_WRITE,IB_AC_RDMA_WRITE);
+ ACL_IBAL(MTHCA_ACCESS_REMOTE_ATOMIC,IB_AC_ATOMIC);
+ ACL_IBAL(MTHCA_ACCESS_LOCAL_WRITE,IB_AC_LOCAL_WRITE);
+ ACL_IBAL(MTHCA_ACCESS_MW_BIND,IB_AC_MW_BIND);
+
+ return ibal_acl;
+}
+
+
--- /dev/null
+/*
+ * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.
+ * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved.
+ *
+ * This software is available to you under the OpenIB.org BSD license
+ * below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: hca_data.h 148 2005-07-12 07:48:46Z sleybo $
+ */
+
+#ifndef __HCA_UTILS_H__
+#define __HCA_UTILS_H__
+
+#include <iba\ib_types.h>
+#include <ib_verbs.h>
+
+mthca_qp_access_t
+map_qp_ibal_acl(
+ IN ib_access_t ibal_acl)
+;
+
+ib_access_t
+map_qp_mthca_acl(
+ IN mthca_qp_access_t mthca_acl);
+
+#endif
+
--- /dev/null
+# DO NOT EDIT THIS FILE!!! Edit .\sources. if you want to add a new source\r
+# file to this component. This file merely indirects to the real make file\r
+# that is shared by all the driver components of the Windows NT DDK\r
+#\r
+\r
+!INCLUDE ..\..\..\inc\openib.def#\r
--- /dev/null
+TRUNK=..\..\..\r
+\r
+TARGETNAME=mthca\r
+TARGETPATH=$(TRUNK)\bin\kernel\obj$(BUILD_ALT_DIR)\r
+TARGETTYPE=DRIVER\r
+\r
+#ENABLE_EVENT_TRACING=1\r
+\r
+SOURCES= \\r
+ mthca_log.mc \\r
+ mthca_log.rc \\r
+ hca.rc \\r
+ ..\hca_utils.c \\r
+ hca_data.c \\r
+ hca_mcast.c \\r
+ hca_verbs.c \\r
+ hca_pnp.c \\r
+ hca_pci.c \\r
+ hca_driver.c \\r
+ hca_direct.c \\r
+ hca_memory.c \\r
+ hca_smp.c \\r
+ \\r
+ mt_l2w.c \\r
+ mt_memory.c \\r
+ mt_cache.c \\r
+ mt_packer.c \\r
+ mt_ud_header.c \\r
+ mt_device.c \\r
+ mt_verbs.c \\r
+ mt_reset_tavor.c \\r
+ mt_uverbs.c \\r
+ mt_uverbsmem.c \\r
+ \\r
+ mthca_allocator.c \\r
+ mthca_av.c \\r
+ mthca_cmd.c \\r
+ mthca_cq.c \\r
+ mthca_eq.c \\r
+ mthca_main.c \\r
+ mthca_memfree.c \\r
+ mthca_mr.c \\r
+ mthca_mcg.c \\r
+ mthca_mad.c \\r
+ mthca_pd.c \\r
+ mthca_profile.c \\r
+ mthca_provider.c \\r
+ mthca_qp.c \\r
+ mthca_srq.c \\r
+ mthca_uar.c \\r
+ mthca_log.c \\r
+ mthca_catas.c \r
+\r
+\r
+INCLUDES=\\r
+ ..; \\r
+ $(TRUNK)\inc; \\r
+ $(TRUNK)\inc\kernel; \\r
+ $(TRUNK)\inc\complib; \\r
+ $(TRUNK)\inc\kernel\complib; \\r
+ \r
+C_DEFINES=$(C_DEFINES) -DDRIVER -DDEPRECATE_DDK_FUNCTIONS -D__LITTLE_ENDIAN\r
+\r
+TARGETLIBS= \\r
+ $(TARGETPATH)\*\complib.lib \\r
+ $(TARGETPATH)\*\ibal.lib \\r
+ $(DDK_LIB_PATH)\wdmguid.lib\r
+\r
+\r
+\r
+!IFDEF ENABLE_EVENT_TRACING\r
+\r
+C_DEFINES = $(C_DEFINES) -DEVENT_TRACING\r
+\r
+RUN_WPP= -ext:.c.h $(SOURCES) -km \\r
+ -scan:hca_debug.h \\r
+ -func:HCA_PRINT(LEVEL,FLAGS,(MSG,...)) \\r
+ -func:HCA_PRINT_EV(LEVEL,FLAGS,(MSG,...)) \\r
+ -func:HCA_PRINT_EXIT(LEVEL,FLAGS,(MSG,...)) \r
+!ENDIF\r
+\r
+\r
+MSC_WARNING_LEVEL= /W4\r
--- /dev/null
+/*\r
+ * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.\r
+ *\r
+ * This software is available to you under the OpenIB.org BSD license\r
+ * below:\r
+ *\r
+ * Redistribution and use in source and binary forms, with or\r
+ * without modification, are permitted provided that the following\r
+ * conditions are met:\r
+ *\r
+ * - Redistributions of source code must retain the above\r
+ * copyright notice, this list of conditions and the following\r
+ * disclaimer.\r
+ *\r
+ * - Redistributions in binary form must reproduce the above\r
+ * copyright notice, this list of conditions and the following\r
+ * disclaimer in the documentation and/or other materials\r
+ * provided with the distribution.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+ * SOFTWARE.\r
+ *\r
+ * $Id$\r
+ */\r
+\r
+\r
+#include <oib_ver.h>\r
+\r
+#define VER_FILETYPE VFT_DRV\r
+#define VER_FILESUBTYPE VFT2_UNKNOWN\r
+#ifdef DBG\r
+#define VER_FILEDESCRIPTION_STR "HCA Driver (checked)"\r
+#else\r
+#define VER_FILEDESCRIPTION_STR "HCA Driver"\r
+#endif\r
+#define VER_INTERNALNAME_STR "mthca.sys"\r
+#define VER_ORIGINALFILENAME_STR "mthca.sys"\r
+#include <common.ver>\r
--- /dev/null
+/*\r
+ * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.\r
+ * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. \r
+ *\r
+ * This software is available to you under the OpenIB.org BSD license\r
+ * below:\r
+ *\r
+ * Redistribution and use in source and binary forms, with or\r
+ * without modification, are permitted provided that the following\r
+ * conditions are met:\r
+ *\r
+ * - Redistributions of source code must retain the above\r
+ * copyright notice, this list of conditions and the following\r
+ * disclaimer.\r
+ *\r
+ * - Redistributions in binary form must reproduce the above\r
+ * copyright notice, this list of conditions and the following\r
+ * disclaimer in the documentation and/or other materials\r
+ * provided with the distribution.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+ * SOFTWARE.\r
+ *\r
+ * $Id: hca_data.c 148 2005-07-12 07:48:46Z sleybo $\r
+ */\r
+\r
+\r
+#include "hca_driver.h"\r
+#include "hca_utils.h"\r
+\r
+#if defined(EVENT_TRACING)\r
+#ifdef offsetof\r
+#undef offsetof\r
+#endif\r
+#include "hca_data.tmh"\r
+#endif\r
+\r
+#include "mthca_dev.h"\r
+#include <ib_cache.h>\r
+\r
+static cl_spinlock_t hob_lock;\r
+\r
+\r
+\r
+uint32_t g_mlnx_dpc2thread = 0;\r
+\r
+\r
+cl_qlist_t mlnx_hca_list;\r
+\r
+mlnx_hob_t mlnx_hob_array[MLNX_NUM_HOBKL]; // kernel HOB - one per HCA (cmdif access)\r
+mlnx_hobul_t *mlnx_hobul_array[MLNX_NUM_HOBUL]; // kernel HOBUL - one per HCA (kar access)\r
+\r
+/////////////////////////////////////////////////////////\r
+// ### HCA\r
+/////////////////////////////////////////////////////////\r
+void\r
+mlnx_hca_insert(\r
+ IN mlnx_hca_t *p_hca )\r
+{\r
+ cl_spinlock_acquire( &hob_lock );\r
+ cl_qlist_insert_tail( &mlnx_hca_list, &p_hca->list_item );\r
+ cl_spinlock_release( &hob_lock );\r
+}\r
+\r
+void\r
+mlnx_hca_remove(\r
+ IN mlnx_hca_t *p_hca )\r
+{\r
+ cl_spinlock_acquire( &hob_lock );\r
+ cl_qlist_remove_item( &mlnx_hca_list, &p_hca->list_item );\r
+ cl_spinlock_release( &hob_lock );\r
+}\r
+\r
+mlnx_hca_t*\r
+mlnx_hca_from_guid(\r
+ IN ib_net64_t guid )\r
+{\r
+ cl_list_item_t *p_item;\r
+ mlnx_hca_t *p_hca = NULL;\r
+\r
+ cl_spinlock_acquire( &hob_lock );\r
+ p_item = cl_qlist_head( &mlnx_hca_list );\r
+ while( p_item != cl_qlist_end( &mlnx_hca_list ) )\r
+ {\r
+ p_hca = PARENT_STRUCT( p_item, mlnx_hca_t, list_item );\r
+ if( p_hca->guid == guid )\r
+ break;\r
+ p_item = cl_qlist_next( p_item );\r
+ p_hca = NULL;\r
+ }\r
+ cl_spinlock_release( &hob_lock );\r
+ return p_hca;\r
+}\r
+\r
+/*\r
+void\r
+mlnx_names_from_guid(\r
+ IN ib_net64_t guid,\r
+ OUT char **hca_name_p,\r
+ OUT char **dev_name_p)\r
+{\r
+ unsigned int idx;\r
+\r
+ if (!hca_name_p) return;\r
+ if (!dev_name_p) return;\r
+\r
+ for (idx = 0; idx < mlnx_num_hca; idx++)\r
+ {\r
+ if (mlnx_hca_array[idx].ifx.guid == guid)\r
+ {\r
+ *hca_name_p = mlnx_hca_array[idx].hca_name_p;\r
+ *dev_name_p = mlnx_hca_array[idx].dev_name_p;\r
+ }\r
+ }\r
+}\r
+*/\r
+\r
+/////////////////////////////////////////////////////////\r
+// ### HCA\r
+/////////////////////////////////////////////////////////\r
+cl_status_t\r
+mlnx_hcas_init( void )\r
+{\r
+ cl_qlist_init( &mlnx_hca_list );\r
+ return cl_spinlock_init( &hob_lock );\r
+}\r
+\r
+\r
+/////////////////////////////////////////////////////////\r
+/////////////////////////////////////////////////////////\r
+ib_api_status_t\r
+mlnx_hobs_set_cb(\r
+ IN mlnx_hob_t *hob_p, \r
+ IN ci_completion_cb_t comp_cb_p,\r
+ IN ci_async_event_cb_t async_cb_p,\r
+ IN const void* const ib_context)\r
+{\r
+ cl_status_t cl_status;\r
+\r
+ // Setup the callbacks\r
+ if (!hob_p->async_proc_mgr_p)\r
+ {\r
+ hob_p->async_proc_mgr_p = cl_malloc( sizeof( cl_async_proc_t ) );\r
+ if( !hob_p->async_proc_mgr_p )\r
+ {\r
+ return IB_INSUFFICIENT_MEMORY;\r
+ }\r
+ cl_async_proc_construct( hob_p->async_proc_mgr_p );\r
+ cl_status = cl_async_proc_init( hob_p->async_proc_mgr_p, MLNX_NUM_CB_THR, "CBthread" );\r
+ if( cl_status != CL_SUCCESS )\r
+ {\r
+ cl_async_proc_destroy( hob_p->async_proc_mgr_p );\r
+ cl_free(hob_p->async_proc_mgr_p);\r
+ hob_p->async_proc_mgr_p = NULL;\r
+ return IB_INSUFFICIENT_RESOURCES;\r
+ }\r
+ }\r
+\r
+ hob_p->comp_cb_p = comp_cb_p;\r
+ hob_p->async_cb_p = async_cb_p;\r
+ hob_p->ca_context = ib_context; // This is the context our CB forwards to IBAL\r
+ HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_SHIM,("CL: hca_idx %d context 0x%p\n", (int)(hob_p - mlnx_hob_array), ib_context));\r
+ return IB_SUCCESS;\r
+}\r
+\r
+/////////////////////////////////////////////////////////\r
+/////////////////////////////////////////////////////////\r
+void\r
+mlnx_hobs_remove(\r
+ IN mlnx_hob_t *hob_p)\r
+{\r
+ cl_async_proc_t *p_async_proc;\r
+ mlnx_cache_t *p_cache;\r
+\r
+ cl_spinlock_acquire( &hob_lock );\r
+\r
+ hob_p->mark = E_MARK_INVALID;\r
+\r
+ p_async_proc = hob_p->async_proc_mgr_p;\r
+ hob_p->async_proc_mgr_p = NULL;\r
+\r
+ p_cache = hob_p->cache;\r
+ hob_p->cache = NULL;\r
+\r
+ hob_p->comp_cb_p = NULL;\r
+ hob_p->async_cb_p = NULL;\r
+ hob_p->ca_context = NULL;\r
+ hob_p->cl_device_h = NULL;\r
+\r
+ cl_spinlock_release( &hob_lock );\r
+\r
+ if( p_async_proc )\r
+ {\r
+ cl_async_proc_destroy( p_async_proc );\r
+ cl_free( p_async_proc );\r
+ }\r
+\r
+ if( p_cache )\r
+ cl_free( p_cache );\r
+\r
+ HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_SHIM,("CL: hobs_remove idx %d \n", (int)(hob_p - mlnx_hob_array)));\r
+}\r
+\r
+/////////////////////////////////////////////////////////\r
+/////////////////////////////////////////////////////////\r
+void\r
+mthca_port_cap_to_ibal(\r
+ IN u32 mthca_port_cap,\r
+ OUT ib_port_cap_t *ibal_port_cap_p)\r
+{\r
+ if (mthca_port_cap & IB_PORT_CM_SUP)\r
+ ibal_port_cap_p->cm = TRUE;\r
+ if (mthca_port_cap & IB_PORT_SNMP_TUNNEL_SUP)\r
+ ibal_port_cap_p->snmp = TRUE;\r
+ if (mthca_port_cap & IB_PORT_DEVICE_MGMT_SUP)\r
+ ibal_port_cap_p->dev_mgmt = TRUE;\r
+ if (mthca_port_cap & IB_PORT_VENDOR_CLASS_SUP)\r
+ ibal_port_cap_p->vend = TRUE;\r
+ if (mthca_port_cap & IB_PORT_SM_DISABLED)\r
+ ibal_port_cap_p->sm_disable = TRUE;\r
+ if (mthca_port_cap & IB_PORT_SM)\r
+ ibal_port_cap_p->sm = TRUE;\r
+}\r
+\r
+\r
+/////////////////////////////////////////////////////////\r
+void\r
+mlnx_conv_hca_cap(\r
+ IN struct ib_device *ib_dev,\r
+ IN struct ib_device_attr *hca_info_p,\r
+ IN struct ib_port_attr *hca_ports,\r
+ OUT ib_ca_attr_t *ca_attr_p)\r
+{\r
+ uint8_t port_num;\r
+ ib_port_attr_t *ibal_port_p;\r
+ struct ib_port_attr *mthca_port_p;\r
+\r
+ ca_attr_p->vend_id = hca_info_p->vendor_id;\r
+ ca_attr_p->dev_id = (uint16_t)hca_info_p->vendor_part_id;\r
+ ca_attr_p->revision = (uint16_t)hca_info_p->hw_ver;\r
+ ca_attr_p->fw_ver = hca_info_p->fw_ver;\r
+ ca_attr_p->ca_guid = *(UNALIGNED64 uint64_t *)&ib_dev->node_guid;\r
+ ca_attr_p->num_ports = ib_dev->phys_port_cnt;\r
+ ca_attr_p->max_qps = hca_info_p->max_qp;\r
+ ca_attr_p->max_wrs = hca_info_p->max_qp_wr;\r
+ ca_attr_p->max_sges = hca_info_p->max_sge;\r
+ ca_attr_p->max_rd_sges = hca_info_p->max_sge_rd;\r
+ ca_attr_p->max_cqs = hca_info_p->max_cq;\r
+ ca_attr_p->max_cqes = hca_info_p->max_cqe;\r
+ ca_attr_p->max_pds = hca_info_p->max_pd;\r
+ ca_attr_p->init_regions = hca_info_p->max_mr;\r
+ ca_attr_p->init_windows = hca_info_p->max_mw;\r
+ ca_attr_p->init_region_size = hca_info_p->max_mr_size;\r
+ ca_attr_p->max_addr_handles = hca_info_p->max_ah;\r
+ ca_attr_p->atomicity = hca_info_p->atomic_cap;\r
+ ca_attr_p->max_partitions = hca_info_p->max_pkeys;\r
+ ca_attr_p->max_qp_resp_res =(uint8_t) hca_info_p->max_qp_rd_atom;\r
+ ca_attr_p->max_resp_res = (uint8_t)hca_info_p->max_res_rd_atom;\r
+ ca_attr_p->max_qp_init_depth = (uint8_t)hca_info_p->max_qp_init_rd_atom;\r
+ ca_attr_p->max_ipv6_qps = hca_info_p->max_raw_ipv6_qp;\r
+ ca_attr_p->max_ether_qps = hca_info_p->max_raw_ethy_qp;\r
+ ca_attr_p->max_mcast_grps = hca_info_p->max_mcast_grp;\r
+ ca_attr_p->max_mcast_qps = hca_info_p->max_total_mcast_qp_attach;\r
+ ca_attr_p->max_qps_per_mcast_grp = hca_info_p->max_mcast_qp_attach;\r
+ ca_attr_p->local_ack_delay = hca_info_p->local_ca_ack_delay;\r
+ ca_attr_p->bad_pkey_ctr_support = hca_info_p->device_cap_flags & IB_DEVICE_BAD_PKEY_CNTR;\r
+ ca_attr_p->bad_qkey_ctr_support = hca_info_p->device_cap_flags & IB_DEVICE_BAD_QKEY_CNTR;\r
+ ca_attr_p->raw_mcast_support = hca_info_p->device_cap_flags & IB_DEVICE_RAW_MULTI;\r
+ ca_attr_p->apm_support = hca_info_p->device_cap_flags & IB_DEVICE_AUTO_PATH_MIG;\r
+ ca_attr_p->av_port_check = hca_info_p->device_cap_flags & IB_DEVICE_UD_AV_PORT_ENFORCE;\r
+ ca_attr_p->change_primary_port = hca_info_p->device_cap_flags & IB_DEVICE_CHANGE_PHY_PORT;\r
+ ca_attr_p->modify_wr_depth = hca_info_p->device_cap_flags & IB_DEVICE_RESIZE_MAX_WR;\r
+ ca_attr_p->hw_agents = FALSE; // in the context of IBAL then agent is implemented on the host\r
+\r
+ ca_attr_p->num_page_sizes = 1;\r
+ ca_attr_p->p_page_size[0] = PAGE_SIZE; // TBD: extract an array of page sizes from HCA cap\r
+\r
+ for (port_num = 0; port_num <= end_port(ib_dev) - start_port(ib_dev); ++port_num)\r
+ {\r
+ // Setup port pointers\r
+ ibal_port_p = &ca_attr_p->p_port_attr[port_num];\r
+ mthca_port_p = &hca_ports[port_num];\r
+\r
+ // Port Cabapilities\r
+ cl_memclr(&ibal_port_p->cap, sizeof(ib_port_cap_t));\r
+ mthca_port_cap_to_ibal(mthca_port_p->port_cap_flags, &ibal_port_p->cap);\r
+\r
+ // Port Atributes\r
+ ibal_port_p->port_num = port_num + start_port(ib_dev);\r
+ ibal_port_p->port_guid = ibal_port_p->p_gid_table[0].unicast.interface_id;\r
+ ibal_port_p->lid = cl_ntoh16(mthca_port_p->lid);\r
+ ibal_port_p->lmc = mthca_port_p->lmc;\r
+ ibal_port_p->max_vls = mthca_port_p->max_vl_num;\r
+ ibal_port_p->sm_lid = cl_ntoh16(mthca_port_p->sm_lid);\r
+ ibal_port_p->sm_sl = mthca_port_p->sm_sl;\r
+ ibal_port_p->link_state = (mthca_port_p->state != 0) ? (uint8_t)mthca_port_p->state : IB_LINK_DOWN;\r
+ ibal_port_p->num_gids = (uint16_t)mthca_port_p->gid_tbl_len;\r
+ ibal_port_p->num_pkeys = mthca_port_p->pkey_tbl_len;\r
+ ibal_port_p->pkey_ctr = (uint16_t)mthca_port_p->bad_pkey_cntr;\r
+ ibal_port_p->qkey_ctr = (uint16_t)mthca_port_p->qkey_viol_cntr;\r
+ ibal_port_p->max_msg_size = mthca_port_p->max_msg_sz;\r
+ ibal_port_p->mtu = (uint8_t)mthca_port_p->max_mtu;\r
+\r
+ ibal_port_p->subnet_timeout = mthca_port_p->subnet_timeout;\r
+ // ibal_port_p->local_ack_timeout = 3; // TBD: currently ~32 usec\r
+#if 0\r
+ HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_SHIM ,("Port %d port_guid 0x%I64x\n",\r
+ ibal_port_p->port_num, ibal_port_p->port_guid));\r
+#endif\r
+ }\r
+}\r
+\r
+void cq_comp_handler(struct ib_cq *cq, void *context)\r
+{\r
+ mlnx_hob_t *hob_p = (mlnx_hob_t *)context;\r
+ struct mthca_cq *mcq =(struct mthca_cq *)cq; \r
+ HCA_ENTER(HCA_DBG_CQ);\r
+ if (hob_p) {\r
+ HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_CQ ,("Invoking completion callback\n"));\r
+ (hob_p->comp_cb_p)(mcq->cq_context);\r
+ }\r
+ else {\r
+ HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_CQ ,("Incorrect context. Completion callback was not invoked\n"));\r
+ }\r
+ HCA_EXIT(HCA_DBG_CQ);\r
+}\r
+\r
+void ca_event_handler(struct ib_event *ev, void *context)\r
+{\r
+ mlnx_hob_t *hob_p = (mlnx_hob_t *)context;\r
+ ib_event_rec_t event_rec;\r
+\r
+ // prepare parameters\r
+ event_rec.context = (void *)hob_p->ca_context;\r
+ event_rec.trap.info.port_num = ev->element.port_num;\r
+ event_rec.type = ev->event;\r
+ if (event_rec.type > IB_AE_UNKNOWN) {\r
+ // CL_ASSERT(0); // This shouldn't happen\r
+ HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_SHIM,("Unmapped E_EV_CA event of type 0x%x. Replaced by 0x%x (IB_AE_LOCAL_FATAL)\n", \r
+ event_rec.type, IB_AE_LOCAL_FATAL));\r
+ event_rec.type = IB_AE_LOCAL_FATAL;\r
+ }\r
+\r
+ // call the user callback\r
+ if (hob_p)\r
+ (hob_p->async_cb_p)(&event_rec);\r
+ else {\r
+ HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("Incorrect context. Async callback was not invoked\n"));\r
+ }\r
+}\r
+\r
+void qp_event_handler(struct ib_event *ev, void *context)\r
+{\r
+ mlnx_hob_t *hob_p = (mlnx_hob_t *)context;\r
+ ib_event_rec_t event_rec;\r
+ struct mthca_qp *qp_p;\r
+\r
+ // prepare parameters\r
+ event_rec.type = ev->event;\r
+ qp_p = (struct mthca_qp *)ev->element.qp;\r
+ event_rec.context = qp_p->qp_context;\r
+\r
+ // call the user callback\r
+ if (hob_p)\r
+ (hob_p->async_cb_p)(&event_rec);\r
+ else {\r
+ HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("Incorrect context. Async callback was not invoked\n"));\r
+ }\r
+}\r
+\r
+void cq_event_handler(struct ib_event *ev, void *context)\r
+{\r
+ mlnx_hob_t *hob_p = (mlnx_hob_t *)context;\r
+ ib_event_rec_t event_rec;\r
+ struct mthca_cq *cq_p;\r
+\r
+ // prepare parameters\r
+ event_rec.type = ev->event;\r
+ cq_p = (struct mthca_cq *)ev->element.cq;\r
+ event_rec.context = cq_p->cq_context;\r
+\r
+ // call the user callback\r
+ if (hob_p)\r
+ (hob_p->async_cb_p)(&event_rec);\r
+ else {\r
+ HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("Incorrect context. Async callback was not invoked\n"));\r
+ }\r
+}\r
+\r
+ib_qp_state_t mlnx_qps_to_ibal(enum ib_qp_state qps)\r
+{\r
+#define MAP_QPS(val1,val2) case val1: ib_qps = val2; break\r
+ ib_qp_state_t ib_qps;\r
+ switch (qps) {\r
+ MAP_QPS( IBQPS_RESET, IB_QPS_RESET );\r
+ MAP_QPS( IBQPS_INIT, IB_QPS_INIT );\r
+ MAP_QPS( IBQPS_RTR, IB_QPS_RTR );\r
+ MAP_QPS( IBQPS_RTS, IB_QPS_RTS );\r
+ MAP_QPS( IBQPS_SQD, IB_QPS_SQD );\r
+ MAP_QPS( IBQPS_SQE, IB_QPS_SQERR );\r
+ MAP_QPS( IBQPS_ERR, IB_QPS_ERROR );\r
+ default:\r
+ HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("Unmapped MTHCA qp_state %d\n", qps));\r
+ ib_qps = 0xffffffff;\r
+ }\r
+ return ib_qps;\r
+}\r
+\r
+enum ib_qp_state mlnx_qps_from_ibal(ib_qp_state_t ib_qps)\r
+{\r
+#define MAP_IBQPS(val1,val2) case val1: qps = val2; break\r
+ enum ib_qp_state qps;\r
+ switch (ib_qps) {\r
+ MAP_IBQPS( IB_QPS_RESET, IBQPS_RESET );\r
+ MAP_IBQPS( IB_QPS_INIT, IBQPS_INIT );\r
+ MAP_IBQPS( IB_QPS_RTR, IBQPS_RTR );\r
+ MAP_IBQPS( IB_QPS_RTS, IBQPS_RTS );\r
+ MAP_IBQPS( IB_QPS_SQD, IBQPS_SQD );\r
+ MAP_IBQPS( IB_QPS_SQD_DRAINING, IBQPS_SQD );\r
+ MAP_IBQPS( IB_QPS_SQD_DRAINED, IBQPS_SQD );\r
+ MAP_IBQPS( IB_QPS_SQERR, IBQPS_SQE );\r
+ MAP_IBQPS( IB_QPS_ERROR, IBQPS_ERR );\r
+ default:\r
+ HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("Unmapped IBAL qp_state %d\n", ib_qps));\r
+ qps = 0xffffffff;\r
+ }\r
+ return qps;\r
+}\r
+\r
+ib_api_status_t\r
+mlnx_conv_qp_modify_attr(\r
+ IN const struct ib_qp *ib_qp_p,\r
+ IN ib_qp_type_t qp_type,\r
+ IN const ib_qp_mod_t *modify_attr_p, \r
+ OUT struct ib_qp_attr *qp_attr_p,\r
+ OUT int *qp_attr_mask_p\r
+ )\r
+{\r
+ int err;\r
+ ib_api_status_t status = IB_SUCCESS;\r
+ struct mthca_qp *qp_p = (struct mthca_qp *)ib_qp_p;\r
+\r
+ RtlZeroMemory( qp_attr_p, sizeof *qp_attr_p );\r
+ *qp_attr_mask_p = IB_QP_STATE;\r
+ qp_attr_p->qp_state = mlnx_qps_from_ibal( modify_attr_p->req_state ); \r
+\r
+ // skipped cases\r
+ if (qp_p->state == IBQPS_RESET && modify_attr_p->req_state != IB_QPS_INIT)\r
+ return IB_NOT_DONE;\r
+ \r
+ switch (modify_attr_p->req_state) {\r
+ case IB_QPS_RESET:\r
+ case IB_QPS_ERROR:\r
+ case IB_QPS_SQERR:\r
+ case IB_QPS_TIME_WAIT:\r
+ break;\r
+\r
+ case IB_QPS_INIT:\r
+ \r
+ switch (qp_type) {\r
+ case IB_QPT_RELIABLE_CONN:\r
+ case IB_QPT_UNRELIABLE_CONN:\r
+ *qp_attr_mask_p |= IB_QP_PORT | IB_QP_PKEY_INDEX |IB_QP_ACCESS_FLAGS;\r
+ qp_attr_p->qp_access_flags = map_qp_ibal_acl(modify_attr_p->state.init.access_ctrl);\r
+ break;\r
+ case IB_QPT_UNRELIABLE_DGRM:\r
+ case IB_QPT_QP0:\r
+ case IB_QPT_QP1:\r
+ default: \r
+ *qp_attr_mask_p |= IB_QP_PORT | IB_QP_QKEY | IB_QP_PKEY_INDEX ;\r
+ qp_attr_p->qkey = cl_ntoh32 (modify_attr_p->state.init.qkey);\r
+ break;\r
+ } \r
+ \r
+ // IB_QP_PORT\r
+ qp_attr_p->port_num = modify_attr_p->state.init.primary_port;\r
+\r
+ // IB_QP_PKEY_INDEX\r
+ qp_attr_p->pkey_index = modify_attr_p->state.init.pkey_index;\r
+\r
+ break;\r
+ \r
+ case IB_QPS_RTR:\r
+ /* modifying the WQE depth is not supported */\r
+ if( modify_attr_p->state.rtr.opts & IB_MOD_QP_SQ_DEPTH ||\r
+ modify_attr_p->state.rtr.opts & IB_MOD_QP_RQ_DEPTH ) {\r
+ status = IB_UNSUPPORTED;\r
+ break;\r
+ }\r
+\r
+ switch (qp_type) {\r
+ case IB_QPT_RELIABLE_CONN:\r
+ *qp_attr_mask_p |= /* required flags */\r
+ IB_QP_DEST_QPN |IB_QP_RQ_PSN | IB_QP_MAX_DEST_RD_ATOMIC |\r
+ IB_QP_AV |IB_QP_PATH_MTU | IB_QP_MIN_RNR_TIMER;\r
+\r
+ // IB_QP_DEST_QPN\r
+ qp_attr_p->dest_qp_num = cl_ntoh32 (modify_attr_p->state.rtr.dest_qp);\r
+\r
+ // IB_QP_RQ_PSN\r
+ qp_attr_p->rq_psn = cl_ntoh32 (modify_attr_p->state.rtr.rq_psn);\r
+ \r
+ // IB_QP_MAX_DEST_RD_ATOMIC\r
+ qp_attr_p->max_dest_rd_atomic = modify_attr_p->state.rtr.resp_res;\r
+\r
+ // IB_QP_AV, IB_QP_PATH_MTU: Convert primary RC AV (mandatory)\r
+ err = mlnx_conv_ibal_av(ib_qp_p->device,\r
+ &modify_attr_p->state.rtr.primary_av, &qp_attr_p->ah_attr);\r
+ if (err) {\r
+ status = IB_ERROR;\r
+ break;\r
+ }\r
+ qp_attr_p->path_mtu = modify_attr_p->state.rtr.primary_av.conn.path_mtu; // MTU\r
+ qp_attr_p->timeout = modify_attr_p->state.rtr.primary_av.conn.local_ack_timeout; // MTU\r
+ qp_attr_p->retry_cnt = modify_attr_p->state.rtr.primary_av.conn.seq_err_retry_cnt; // MTU\r
+ qp_attr_p->rnr_retry = modify_attr_p->state.rtr.primary_av.conn.rnr_retry_cnt; // MTU\r
+\r
+ // IB_QP_MIN_RNR_TIMER\r
+ if (modify_attr_p->state.rtr.opts & IB_MOD_QP_RNR_NAK_TIMEOUT) {\r
+ qp_attr_p->min_rnr_timer = modify_attr_p->state.rtr.rnr_nak_timeout;\r
+ }\r
+\r
+ // IB_QP_ACCESS_FLAGS: Convert Remote Atomic Flags\r
+ if (modify_attr_p->state.rtr.opts & IB_MOD_QP_ACCESS_CTRL) {\r
+ *qp_attr_mask_p |= IB_QP_ACCESS_FLAGS; /* optional flag */\r
+ qp_attr_p->qp_access_flags = map_qp_ibal_acl(modify_attr_p->state.init.access_ctrl);\r
+ }\r
+\r
+ // IB_QP_ALT_PATH: Convert alternate RC AV\r
+ if (modify_attr_p->state.rtr.opts & IB_MOD_QP_ALTERNATE_AV) {\r
+ *qp_attr_mask_p |= IB_QP_ALT_PATH; /* required flag */\r
+ err = mlnx_conv_ibal_av(ib_qp_p->device,\r
+ &modify_attr_p->state.rtr.alternate_av, &qp_attr_p->alt_ah_attr);\r
+ if (err) {\r
+ status = IB_ERROR;\r
+ break;\r
+ }\r
+ qp_attr_p->alt_timeout = modify_attr_p->state.rtr.alternate_av.conn.local_ack_timeout; // XXX: conv\r
+ }\r
+\r
+ // IB_QP_PKEY_INDEX \r
+ if (modify_attr_p->state.rtr.opts & IB_MOD_QP_PKEY) {\r
+ *qp_attr_mask_p |= IB_QP_PKEY_INDEX; \r
+ qp_attr_p->pkey_index = modify_attr_p->state.rtr.pkey_index;\r
+ }\r
+ break;\r
+ \r
+ case IB_QPT_UNRELIABLE_CONN:\r
+ *qp_attr_mask_p |= /* required flags */\r
+ IB_QP_DEST_QPN |IB_QP_RQ_PSN | IB_QP_AV | IB_QP_PATH_MTU;\r
+\r
+ // IB_QP_DEST_QPN\r
+ qp_attr_p->dest_qp_num = cl_ntoh32 (modify_attr_p->state.rtr.dest_qp);\r
+\r
+ // IB_QP_RQ_PSN\r
+ qp_attr_p->rq_psn = cl_ntoh32 (modify_attr_p->state.rtr.rq_psn);\r
+\r
+ // IB_QP_PATH_MTU\r
+ qp_attr_p->path_mtu = modify_attr_p->state.rtr.primary_av.conn.path_mtu;\r
+\r
+ // IB_QP_AV: Convert primary AV (mandatory)\r
+ err = mlnx_conv_ibal_av(ib_qp_p->device,\r
+ &modify_attr_p->state.rtr.primary_av, &qp_attr_p->ah_attr);\r
+ if (err) {\r
+ status = IB_ERROR;\r
+ break;\r
+ }\r
+\r
+ // IB_QP_ACCESS_FLAGS: Convert Remote Atomic Flags\r
+ if (modify_attr_p->state.rtr.opts & IB_MOD_QP_ACCESS_CTRL) {\r
+ *qp_attr_mask_p |= IB_QP_ACCESS_FLAGS; /* optional flag */\r
+ qp_attr_p->qp_access_flags = map_qp_ibal_acl(modify_attr_p->state.init.access_ctrl);\r
+ }\r
+\r
+ // IB_QP_ALT_PATH: Convert alternate RC AV\r
+ if (modify_attr_p->state.rtr.opts & IB_MOD_QP_ALTERNATE_AV) {\r
+ *qp_attr_mask_p |= IB_QP_ALT_PATH; /* required flag */\r
+ err = mlnx_conv_ibal_av(ib_qp_p->device,\r
+ &modify_attr_p->state.rtr.alternate_av, &qp_attr_p->alt_ah_attr);\r
+ if (err) {\r
+ status = IB_ERROR;\r
+ break;\r
+ }\r
+ }\r
+\r
+ // IB_QP_PKEY_INDEX \r
+ if (modify_attr_p->state.rtr.opts & IB_MOD_QP_PKEY) {\r
+ *qp_attr_mask_p |= IB_QP_PKEY_INDEX; \r
+ qp_attr_p->pkey_index = modify_attr_p->state.rtr.pkey_index;\r
+ }\r
+ break;\r
+ \r
+ case IB_QPT_UNRELIABLE_DGRM:\r
+ case IB_QPT_QP0:\r
+ case IB_QPT_QP1:\r
+ default: \r
+ // IB_QP_PKEY_INDEX \r
+ if (modify_attr_p->state.rtr.opts & IB_MOD_QP_PKEY) {\r
+ *qp_attr_mask_p |= IB_QP_PKEY_INDEX; \r
+ qp_attr_p->pkey_index = modify_attr_p->state.rtr.pkey_index;\r
+ }\r
+\r
+ // IB_QP_QKEY\r
+ if (modify_attr_p->state.rtr.opts & IB_MOD_QP_QKEY) {\r
+ *qp_attr_mask_p |= IB_QP_QKEY; \r
+ qp_attr_p->qkey = cl_ntoh32 (modify_attr_p->state.rtr.qkey);\r
+ }\r
+ break;\r
+ \r
+ }\r
+ break;\r
+ \r
+ case IB_QPS_RTS:\r
+ /* modifying the WQE depth is not supported */\r
+ if( modify_attr_p->state.rts.opts & IB_MOD_QP_SQ_DEPTH ||\r
+ modify_attr_p->state.rts.opts & IB_MOD_QP_RQ_DEPTH )\r
+ {\r
+ status = IB_UNSUPPORTED;\r
+ break;\r
+ }\r
+\r
+ switch (qp_type) {\r
+ case IB_QPT_RELIABLE_CONN:\r
+ *qp_attr_mask_p |= /* required flags */\r
+ IB_QP_SQ_PSN |IB_QP_MAX_QP_RD_ATOMIC | IB_QP_TIMEOUT |\r
+ IB_QP_RETRY_CNT |IB_QP_RNR_RETRY;\r
+\r
+ // IB_QP_MAX_QP_RD_ATOMIC\r
+ qp_attr_p->max_rd_atomic = modify_attr_p->state.rts.init_depth;\r
+\r
+ // IB_QP_TIMEOUT\r
+ qp_attr_p->timeout = modify_attr_p->state.rts.local_ack_timeout; // XXX: conv\r
+ \r
+ // IB_QP_RETRY_CNT\r
+ qp_attr_p->retry_cnt = modify_attr_p->state.rts.retry_cnt;\r
+ \r
+ // IB_QP_RNR_RETRY\r
+ qp_attr_p->rnr_retry = modify_attr_p->state.rts.rnr_retry_cnt;\r
+\r
+ // IB_QP_MAX_DEST_RD_ATOMIC: Update the responder resources for RDMA/ATOMIC (optional for SQD->RTS)\r
+ if (modify_attr_p->state.rts.opts & IB_MOD_QP_RESP_RES) {\r
+ *qp_attr_mask_p |= IB_QP_MAX_DEST_RD_ATOMIC; \r
+ qp_attr_p->max_dest_rd_atomic = modify_attr_p->state.rts.resp_res;\r
+ }\r
+\r
+#if 0\r
+ // Linux patch 4793: PKEY_INDEX is not a legal parameter in the RTR->RTS transition.\r
+\r
+ // IB_QP_PKEY_INDEX \r
+ if (modify_attr_p->state.rtr.opts & IB_MOD_QP_PKEY) {\r
+ *qp_attr_mask_p |= IB_QP_PKEY_INDEX; \r
+ qp_attr_p->pkey_index = modify_attr_p->state.rts.pkey_index;\r
+ }\r
+#endif \r
+\r
+ // IB_QP_MIN_RNR_TIMER\r
+ if (modify_attr_p->state.rtr.opts & IB_MOD_QP_RNR_NAK_TIMEOUT) {\r
+ *qp_attr_mask_p |= IB_QP_MIN_RNR_TIMER; \r
+ qp_attr_p->min_rnr_timer = modify_attr_p->state.rts.rnr_nak_timeout;\r
+ }\r
+\r
+ // IB_QP_PATH_MIG_STATE\r
+ if (modify_attr_p->state.rtr.opts & IB_MOD_QP_APM_STATE) {\r
+ *qp_attr_mask_p |= IB_QP_PATH_MIG_STATE; \r
+ qp_attr_p->path_mig_state = modify_attr_p->state.rts.apm_state;\r
+ }\r
+\r
+ // IB_QP_ACCESS_FLAGS\r
+ if (modify_attr_p->state.rts.opts & IB_MOD_QP_ACCESS_CTRL) {\r
+ *qp_attr_mask_p |= IB_QP_ACCESS_FLAGS; /* optional flags */\r
+ qp_attr_p->qp_access_flags = map_qp_ibal_acl(modify_attr_p->state.init.access_ctrl);\r
+ }\r
+\r
+ // IB_QP_ALT_PATH: Convert alternate RC AV\r
+ if (modify_attr_p->state.rts.opts & IB_MOD_QP_ALTERNATE_AV) {\r
+ *qp_attr_mask_p |= IB_QP_ALT_PATH; /* optional flag */\r
+ err = mlnx_conv_ibal_av(ib_qp_p->device,\r
+ &modify_attr_p->state.rts.alternate_av, &qp_attr_p->alt_ah_attr);\r
+ if (err) {\r
+ status = IB_ERROR;\r
+ break;\r
+ }\r
+ qp_attr_p->alt_timeout = modify_attr_p->state.rts.alternate_av.conn.local_ack_timeout; // XXX: conv\r
+ }\r
+ break;\r
+ \r
+ case IB_QPT_UNRELIABLE_CONN:\r
+ *qp_attr_mask_p |= /* required flags */\r
+ IB_QP_SQ_PSN;\r
+\r
+ // IB_QP_MAX_DEST_RD_ATOMIC: Update the responder resources for RDMA/ATOMIC (optional for SQD->RTS)\r
+ if (modify_attr_p->state.rts.opts & IB_MOD_QP_RESP_RES) {\r
+ *qp_attr_mask_p |= IB_QP_MAX_DEST_RD_ATOMIC; \r
+ qp_attr_p->max_dest_rd_atomic = modify_attr_p->state.rts.resp_res;\r
+ }\r
+\r
+#if 0\r
+ // Linux patch 4793: PKEY_INDEX is not a legal parameter in the RTR->RTS transition.\r
+\r
+ // IB_QP_PKEY_INDEX \r
+ if (modify_attr_p->state.rtr.opts & IB_MOD_QP_PKEY) {\r
+ *qp_attr_mask_p |= IB_QP_PKEY_INDEX; \r
+ qp_attr_p->pkey_index = modify_attr_p->state.rts.pkey_index;\r
+ }\r
+#endif \r
+\r
+ // IB_QP_PATH_MIG_STATE\r
+ if (modify_attr_p->state.rtr.opts & IB_MOD_QP_APM_STATE) {\r
+ *qp_attr_mask_p |= IB_QP_PATH_MIG_STATE; \r
+ qp_attr_p->path_mig_state = modify_attr_p->state.rts.apm_state;\r
+ }\r
+\r
+ // IB_QP_ACCESS_FLAGS\r
+ if (modify_attr_p->state.rts.opts & IB_MOD_QP_ACCESS_CTRL) {\r
+ *qp_attr_mask_p |= IB_QP_ACCESS_FLAGS; /* optional flags */\r
+ qp_attr_p->qp_access_flags = map_qp_ibal_acl(modify_attr_p->state.init.access_ctrl);\r
+ }\r
+\r
+ // IB_QP_ALT_PATH: Convert alternate RC AV\r
+ if (modify_attr_p->state.rts.opts & IB_MOD_QP_ALTERNATE_AV) {\r
+ *qp_attr_mask_p |= IB_QP_ALT_PATH; /* optional flag */\r
+ err = mlnx_conv_ibal_av(ib_qp_p->device,\r
+ &modify_attr_p->state.rts.alternate_av, &qp_attr_p->alt_ah_attr);\r
+ if (err) {\r
+ status = IB_ERROR;\r
+ break;\r
+ }\r
+ }\r
+ break;\r
+ \r
+ case IB_QPT_UNRELIABLE_DGRM:\r
+ case IB_QPT_QP0:\r
+ case IB_QPT_QP1:\r
+ default: \r
+ *qp_attr_mask_p |= /* required flags */\r
+ IB_QP_SQ_PSN;\r
+\r
+ // IB_QP_QKEY\r
+ if (modify_attr_p->state.rtr.opts & IB_MOD_QP_QKEY) {\r
+ *qp_attr_mask_p |= IB_QP_QKEY; \r
+ qp_attr_p->qkey = cl_ntoh32 (modify_attr_p->state.rtr.qkey);\r
+ }\r
+ break;\r
+ \r
+ break;\r
+ \r
+ }\r
+\r
+ // IB_QP_SQ_PSN: common for all\r
+ qp_attr_p->sq_psn = cl_ntoh32 (modify_attr_p->state.rts.sq_psn);\r
+ //NB: IB_QP_CUR_STATE flag is not provisioned by IBAL\r
+ break;\r
+ \r
+ case IB_QPS_SQD:\r
+ case IB_QPS_SQD_DRAINING:\r
+ case IB_QPS_SQD_DRAINED:\r
+ *qp_attr_mask_p |= IB_QP_EN_SQD_ASYNC_NOTIFY;\r
+ qp_attr_p->en_sqd_async_notify = (u8)modify_attr_p->state.sqd.sqd_event;\r
+ HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_SHIM ,("IB_QP_EN_SQD_ASYNC_NOTIFY seems like unsupported\n"));\r
+ break;\r
+ \r
+ default: \r
+ //NB: is this an error case and we need this message ? What about returning an error ?\r
+ HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("Unmapped qp_state %d\n", modify_attr_p->req_state));\r
+ break;\r
+ \r
+ }\r
+\r
+ return status;\r
+} \r
+\r
+int\r
+mlnx_conv_ibal_av(\r
+ IN const struct ib_device *ib_dev_p,\r
+ IN const ib_av_attr_t *ibal_av_p,\r
+ OUT struct ib_ah_attr *ah_attr_p)\r
+{\r
+ int err = 0;\r
+ u8 port_num;\r
+ u16 gid_index;\r
+ \r
+ ah_attr_p->port_num = ibal_av_p->port_num;\r
+ ah_attr_p->sl = ibal_av_p->sl;\r
+ ah_attr_p->dlid = cl_ntoh16(ibal_av_p->dlid);\r
+ //TODO: how static_rate is coded ?\r
+ ah_attr_p->static_rate =\r
+ (ibal_av_p->static_rate == IB_PATH_RECORD_RATE_10_GBS ? 0 : 3);\r
+ ah_attr_p->src_path_bits = ibal_av_p->path_bits; // PATH:\r
+\r
+ /* For global destination or Multicast address:*/\r
+ if (ibal_av_p->grh_valid)\r
+ {\r
+ ah_attr_p->ah_flags |= IB_AH_GRH;\r
+ ah_attr_p->grh.hop_limit = ibal_av_p->grh.hop_limit;\r
+ ib_grh_get_ver_class_flow( ibal_av_p->grh.ver_class_flow, NULL,\r
+ &ah_attr_p->grh.traffic_class, &ah_attr_p->grh.flow_label );\r
+ err = ib_find_cached_gid((struct ib_device *)ib_dev_p, \r
+ (union ib_gid *)ibal_av_p->grh.src_gid.raw, &port_num, &gid_index);\r
+ if (err) {\r\r
+ HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("ib_find_cached_gid failed %d (%#x). Using default: sgid_index = 0\n", err, err));\r
+ gid_index = 0;\r
+ }\r
+ else if (port_num != ah_attr_p->port_num) {\r
+ HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("ib_find_cached_gid returned wrong port_num %u (Expected - %u). Using the expected.\n", \r
+ (u32)port_num, (u32)ah_attr_p->port_num));\r
+ }\r
+ ah_attr_p->grh.sgid_index = (u8)gid_index;\r
+ RtlCopyMemory(ah_attr_p->grh.dgid.raw, ibal_av_p->grh.dest_gid.raw, sizeof(ah_attr_p->grh.dgid));\r
+ }\r
+\r
+ return err;\r
+}\r
+\r
+int\r
+mlnx_conv_mthca_av(\r
+ IN const struct ib_ah *ib_ah_p,\r
+ OUT ib_av_attr_t *ibal_av_p)\r
+{\r
+ int err = 0;\r
+ struct ib_ud_header header;\r
+ struct mthca_ah *ah_p = (struct mthca_ah *)ib_ah_p;\r
+ struct ib_device *ib_dev_p = ib_ah_p->pd->device;\r
+ struct mthca_dev *dev_p = (struct mthca_dev *)ib_dev_p;\r
+\r
+ err = mthca_read_ah( dev_p, ah_p, &header);\r
+ if (err)\r
+ goto err_read_ah;\r
+\r
+ // common part\r
+ ibal_av_p->sl = header.lrh.service_level;\r
+ mthca_get_av_params(ah_p, &ibal_av_p->port_num,\r
+ &ibal_av_p->dlid, &ibal_av_p->static_rate, &ibal_av_p->path_bits );\r
+\r
+ // GRH\r
+ ibal_av_p->grh_valid = header.grh_present;\r
+ if (ibal_av_p->grh_valid) {\r
+ ibal_av_p->grh.ver_class_flow = ib_grh_set_ver_class_flow(\r
+ header.grh.ip_version, header.grh.traffic_class, header.grh.flow_label );\r
+ ibal_av_p->grh.hop_limit = header.grh.hop_limit;\r
+ RtlCopyMemory(ibal_av_p->grh.src_gid.raw, \r
+ header.grh.source_gid.raw, sizeof(ibal_av_p->grh.src_gid));\r
+ RtlCopyMemory(ibal_av_p->grh.src_gid.raw, \r
+ header.grh.destination_gid.raw, sizeof(ibal_av_p->grh.dest_gid));\r
+ }\r
+\r
+ //TODO: don't know, how to fill conn. Note, that previous version didn't fill it also.\r
+\r
+err_read_ah:\r
+ return err;\r
+}\r
+\r
+void\r
+mlnx_modify_ah(\r
+ IN const struct ib_ah *ib_ah_p,\r
+ IN const struct ib_ah_attr *ah_attr_p)\r
+{\r
+ struct ib_device *ib_dev_p = ib_ah_p->pd->device;\r
+ struct mthca_dev *dev_p = (struct mthca_dev *)ib_dev_p;\r
+ \r
+ mthca_set_av_params(dev_p, (struct mthca_ah *)ib_ah_p, (struct ib_ah_attr *)ah_attr_p );\r
+}\r
+\r
--- /dev/null
+/*\r
+ * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.\r
+ * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. \r
+ *\r
+ * This software is available to you under the OpenIB.org BSD license\r
+ * below:\r
+ *\r
+ * Redistribution and use in source and binary forms, with or\r
+ * without modification, are permitted provided that the following\r
+ * conditions are met:\r
+ *\r
+ * - Redistributions of source code must retain the above\r
+ * copyright notice, this list of conditions and the following\r
+ * disclaimer.\r
+ *\r
+ * - Redistributions in binary form must reproduce the above\r
+ * copyright notice, this list of conditions and the following\r
+ * disclaimer in the documentation and/or other materials\r
+ * provided with the distribution.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+ * SOFTWARE.\r
+ *\r
+ * $Id: hca_data.h 148 2005-07-12 07:48:46Z sleybo $\r
+ */\r
+\r
+#ifndef __HCA_DATA_H__\r
+#define __HCA_DATA_H__\r
+\r
+\r
+#include <iba/ib_ci.h>\r
+#include <complib/comp_lib.h>\r
+#include <mt_l2w.h>\r
+#include <mthca_provider.h>\r
+\r
+extern uint32_t g_sqp_max_avs;\r
+extern char mlnx_uvp_lib_name[];\r
+\r
+\r
+#define MLNX_MAX_HCA 4\r
+#define MLNX_NUM_HOBKL MLNX_MAX_HCA\r
+#define MLNX_NUM_HOBUL MLNX_MAX_HCA\r
+#define MLNX_NUM_CB_THR 1\r
+#define MLNX_SIZE_CB_POOL 256\r
+#define MLNX_UAL_ALLOC_HCA_UL_RES 1\r
+#define MLNX_UAL_FREE_HCA_UL_RES 2\r
+\r
+\r
+// Defines for QP ops\r
+#define MLNX_MAX_NUM_SGE 8\r
+#define MLNX_MAX_WRS_PER_CHAIN 4\r
+\r
+#define MLNX_NUM_RESERVED_QPS 16\r
+\r
+/*\r
+ * Completion model.\r
+ * 0: No DPC processor assignment\r
+ * 1: DPCs per-CQ, processor affinity set at CQ initialization time.\r
+ * 2: DPCs per-CQ, processor affinity set at runtime.\r
+ * 3: DPCs per-CQ, no processor affinity set.\r
+ */\r
+#define MLNX_COMP_MODEL 3\r
+\r
+#ifdef DBG\r
+#define VALIDATE_INDEX(index, limit, error, label) \\r
+ { \\r
+ if (index >= limit) \\r
+ { \\r
+ status = error; \\r
+ HCA_PRINT(TRACE_LEVEL_ERROR , g_mlnx_dbg_lvl ,("file %s line %d\n", __FILE__, __LINE__)));\\r
+ goto label; \\r
+ } \\r
+ }\r
+#else\r
+#define VALIDATE_INDEX(index, limit, error, label)\r
+#endif\r
+\r
+\r
+\r
+// Typedefs\r
+\r
+typedef enum {\r
+ E_EV_CA=1,\r
+ E_EV_QP,\r
+ E_EV_CQ,\r
+ E_EV_LAST\r
+} ENUM_EVENT_CLASS;\r
+\r
+typedef enum {\r
+ E_MARK_CA=1, // Channel Adaptor\r
+ E_MARK_PD, // Protection Domain\r
+ E_MARK_CQ, // Completion Queue\r
+ E_MARK_QP, // Queue Pair\r
+ E_MARK_AV, // Address Vector (UD)\r
+ E_MARK_MG, // Multicast Group\r
+ E_MARK_MR, // Memory Region\r
+ E_MARK_MW, // Memory Windows\r
+ E_MARK_INVALID,\r
+} ENUM_MARK;\r
+\r
+typedef enum {\r
+ E_MR_PHYS=1,\r
+ E_MR_SHARED,\r
+ E_MR_ANY,\r
+ E_MR_INVALID\r
+} ENUM_MR_TYPE;\r
+\r
+/*\r
+ * Attribute cache for port info saved to expedite local MAD processing.\r
+ * Note that the cache accounts for the worst case GID and PKEY table size\r
+ * but is allocated from paged pool, so it's nothing to worry about.\r
+ */\r
+\r
+typedef struct _guid_block\r
+{\r
+ boolean_t valid;\r
+ ib_guid_info_t tbl;\r
+\r
+} mlnx_guid_block_t;\r
+\r
+typedef struct _port_info_cache\r
+{\r
+ boolean_t valid;\r
+ ib_port_info_t info;\r
+\r
+} mlnx_port_info_cache_t;\r
+\r
+typedef struct _pkey_block\r
+{\r
+ boolean_t valid;\r
+ ib_pkey_table_info_t tbl;\r
+\r
+} mlnx_pkey_block_t;\r
+\r
+typedef struct _sl_vl_cache\r
+{\r
+ boolean_t valid;\r
+ ib_slvl_table_t tbl;\r
+\r
+} mlnx_sl_vl_cache_t;\r
+\r
+typedef struct _vl_arb_block\r
+{\r
+ boolean_t valid;\r
+ ib_vl_arb_table_t tbl;\r
+\r
+} mlnx_vl_arb_block_t;\r
+\r
+typedef struct _attr_cache\r
+{\r
+ mlnx_guid_block_t guid_block[32];\r
+ mlnx_port_info_cache_t port_info;\r
+ mlnx_pkey_block_t pkey_tbl[2048];\r
+ mlnx_sl_vl_cache_t sl_vl;\r
+ mlnx_vl_arb_block_t vl_arb[4];\r
+\r
+} mlnx_cache_t;\r
+\r
+typedef struct _ib_ca {\r
+ ENUM_MARK mark;\r
+ ci_completion_cb_t comp_cb_p;\r
+ ci_async_event_cb_t async_cb_p;\r
+ const void *ca_context;\r
+ void *cl_device_h;\r
+ uint32_t index;\r
+ cl_async_proc_t *async_proc_mgr_p;\r
+ mlnx_cache_t *cache; // Cached port attributes.\r
+ const void * __ptr64 p_dev_obj; // store underlying device object\r
+} mlnx_hob_t;\r
+\r
+typedef struct HOBUL_t {\r
+ int dummy;\r
+#ifdef WIN_TO_BE_REMOVED \r
+ pd_info_t *pd_info_tbl;\r
+ HH_hca_hndl_t hh_hndl; /* For HH direct access */\r
+ HHUL_hca_hndl_t hhul_hndl; /* user level HCA resources handle for HH */\r
+ uint32_t cq_idx_mask; /* */\r
+ uint32_t qp_idx_mask; /* */\r
+ uint32_t vendor_id; /* \ */\r
+ uint32_t device_id; /* > 3 items needed for initializing user level */\r
+ void *hca_ul_resources_p; /* / */\r
+ MT_size_t cq_ul_resources_sz; /* Needed for allocating user resources for CQs */\r
+ MT_size_t qp_ul_resources_sz; /* Needed for allocating user resources for QPs */\r
+ MT_size_t pd_ul_resources_sz; /* Needed for allocating user resources for PDs */\r
+ uint32_t max_cq; /* Max num. of CQs - size of following table */\r
+ cq_info_t *cq_info_tbl;\r
+ uint32_t max_qp; /* Max num. of QPs - size of following table */\r
+ qp_info_t *qp_info_tbl;\r
+ uint32_t max_pd; /* Max num. of PDs - size of following table */\r
+ uint32_t log2_mpt_size;\r
+ atomic32_t count;\r
+#endif \r
+} mlnx_hobul_t, *mlnx_hobul_hndl_t;\r
+\r
+typedef struct _ib_mcast {\r
+ ib_gid_t mcast_gid;\r
+ struct ib_qp *ib_qp_p;\r
+ uint16_t mcast_lid;\r
+} mlnx_mcast_t;\r
+\r
+typedef struct _mlnx_hca_t {\r
+ cl_list_item_t list_item; // to include in the HCA chain\r
+ net64_t guid; // HCA node Guid\r
+ struct mthca_dev *mdev; // VP Driver device\r
+ uint32_t hw_ver; // HCA HW version\r
+ mlnx_hob_t hob; // HOB - IBAL-related HCA resources\r
+ mlnx_hobul_t hobul; // HOBUL - - IBAL-related kernel client resources\r
+\r
+#ifdef WIN_TO_BE_REMOVED \r
+ // removed as it is found in p_ext->cl_ext.p_pdo\r
+ const void* __ptr64 p_dev_obj; // Driver PDO\r
+#endif \r
+} mlnx_hca_t;\r
+\r
+\r
+typedef mlnx_hob_t *mlnx_hca_h;\r
+\r
+// Global Variables\r
+//extern mlnx_hca_t mlnx_hca_array[];\r
+//extern uint32_t mlnx_num_hca;\r
+\r
+extern mlnx_hob_t mlnx_hob_array[];\r
+extern mlnx_hobul_t *mlnx_hobul_array[];\r
+\r
+// Functions\r
+void\r
+setup_ci_interface(\r
+ IN const ib_net64_t ca_guid,\r
+ OUT ci_interface_t *p_interface );\r
+\r
+void\r
+mlnx_hca_insert(\r
+ IN mlnx_hca_t *p_hca );\r
+\r
+void\r
+mlnx_hca_remove(\r
+ IN mlnx_hca_t *p_hca );\r
+\r
+mlnx_hca_t*\r
+mlnx_hca_from_guid(\r
+ IN ib_net64_t guid );\r
+\r
+/*\r
+void\r
+mlnx_names_from_guid(\r
+ IN ib_net64_t guid,\r
+ OUT char **hca_name_p,\r
+ OUT char **dev_name_p);\r
+*/\r
+\r
+cl_status_t\r
+mlnx_hcas_init( void );\r
+\r
+cl_status_t\r
+mlnx_hobs_init( void );\r
+\r
+ib_api_status_t\r
+mlnx_hobs_insert(\r
+ IN mlnx_hca_t *p_hca,\r
+ OUT mlnx_hob_t **hob_p);\r
+\r
+\r
+ib_api_status_t\r
+mlnx_hobs_set_cb(\r
+ IN mlnx_hob_t *hob_p, \r
+ IN ci_completion_cb_t comp_cb_p,\r
+ IN ci_async_event_cb_t async_cb_p,\r
+ IN const void* const ib_context);\r
+\r
+ib_api_status_t\r
+mlnx_hobs_get_context(\r
+ IN mlnx_hob_t *hob_p,\r
+ OUT void **context_p);\r
+\r
+ib_api_status_t\r
+mlnx_hobs_create_device(\r
+ IN mlnx_hob_t *hob_p,\r
+ OUT char* dev_name);\r
+\r
+void\r
+mlnx_hobs_remove(\r
+ IN mlnx_hob_t *hob_p);\r
+\r
+mlnx_hobul_t *\r
+mlnx_hobs_get_hobul(\r
+ IN mlnx_hob_t *hob_p);\r
+\r
+void\r
+mlnx_hobul_get(\r
+ IN mlnx_hob_t *hob_p,\r
+ OUT void **resources_p );\r
+\r
+void\r
+mlnx_hobul_delete(\r
+ IN mlnx_hob_t *hob_p);\r
+\r
+void\r
+mlnx_conv_hca_cap(\r
+ IN struct ib_device *ib_dev,\r
+ IN struct ib_device_attr *hca_info_p,\r
+ IN struct ib_port_attr *hca_ports,\r
+ OUT ib_ca_attr_t *ca_attr_p);\r
+\r
+ib_api_status_t\r
+mlnx_local_mad (\r
+ IN const ib_ca_handle_t h_ca,\r
+ IN const uint8_t port_num,\r
+ IN const ib_mad_t *p_mad_in,\r
+ OUT ib_mad_t *p_mad_out );\r
+\r
+void\r
+mlnx_memory_if(\r
+ IN OUT ci_interface_t *p_interface );\r
+\r
+void\r
+mlnx_ecc_if(\r
+ IN OUT ci_interface_t *p_interface );\r
+\r
+void\r
+mlnx_direct_if(\r
+ IN OUT ci_interface_t *p_interface );\r
+\r
+void\r
+mlnx_mcast_if(\r
+ IN OUT ci_interface_t *p_interface );\r
+\r
+ib_api_status_t\r
+fw_access_ctrl(\r
+ IN const void* __ptr64 context,\r
+ IN const void* __ptr64* const handle_array OPTIONAL,\r
+ IN uint32_t num_handles,\r
+ IN ib_ci_op_t* const p_ci_op,\r
+ IN OUT ci_umv_buf_t *p_umv_buf OPTIONAL);\r
+\r
+\r
+void cq_comp_handler(struct ib_cq *cq, void *context);\r
+\r
+void ca_event_handler(struct ib_event *ev, void *context);\r
+\r
+void qp_event_handler(struct ib_event *ev, void *context);\r
+\r
+void cq_event_handler(struct ib_event *ev, void *context);\r
+\r
+ib_qp_state_t mlnx_qps_to_ibal(enum ib_qp_state qps);\r
+\r
+enum ib_qp_state mlnx_qps_from_ibal(ib_qp_state_t ib_qps);\r
+\r
+ib_api_status_t\r
+mlnx_conv_qp_modify_attr(\r
+ IN const struct ib_qp *ib_qp_p,\r
+ IN ib_qp_type_t qp_type,\r
+ IN const ib_qp_mod_t *modify_attr_p, \r
+ OUT struct ib_qp_attr *qp_attr_p,\r
+ OUT int *qp_attr_mask_p\r
+ );\r
+\r
+int\r
+mlnx_conv_ibal_av(\r
+ IN const struct ib_device *ib_dev_p,\r
+ IN const ib_av_attr_t *ibal_av_p,\r
+ OUT struct ib_ah_attr *ah_attr_p);\r
+\r
+int\r
+mlnx_conv_mthca_av(\r
+ IN const struct ib_ah *ib_ah_p,\r
+ OUT ib_av_attr_t *ibal_av_p);\r
+\r
+void\r
+mlnx_modify_ah(\r
+ IN const struct ib_ah *ib_ah_p,\r
+ IN const struct ib_ah_attr *ah_attr_p);\r
+\r
+\r
+#endif\r
--- /dev/null
+/*\r
+ * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.\r
+ *\r
+ * This software is available to you under the OpenIB.org BSD license\r
+ * below:\r
+ *\r
+ * Redistribution and use in source and binary forms, with or\r
+ * without modification, are permitted provided that the following\r
+ * conditions are met:\r
+ *\r
+ * - Redistributions of source code must retain the above\r
+ * copyright notice, this list of conditions and the following\r
+ * disclaimer.\r
+ *\r
+ * - Redistributions in binary form must reproduce the above\r
+ * copyright notice, this list of conditions and the following\r
+ * disclaimer in the documentation and/or other materials\r
+ * provided with the distribution.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+ * SOFTWARE.\r
+ *\r
+ * $Id: hca_debug.h 46 2005-05-30 17:55:53Z sleybo $\r
+ */\r
+\r
+\r
+#ifndef _HCA_DEBUG_H_\r
+#define _HCA_DEBUG_H_\r
+\r
+\r
+extern uint32_t g_mthca_dbg_level;\r
+extern uint32_t g_mthca_dbg_flags;\r
+#define MAX_LOG_BUF_LEN 512\r
+extern WCHAR g_wlog_buf[ MAX_LOG_BUF_LEN ]; \r
+extern UCHAR g_slog_buf[ MAX_LOG_BUF_LEN ]; \r
+\r
+\r
+#if defined(EVENT_TRACING)\r
+//\r
+// Software Tracing Definitions \r
+//\r
+\r
+#define WPP_CONTROL_GUIDS \\r
+ WPP_DEFINE_CONTROL_GUID(HCACtlGuid,(8BF1F640,63FE,4743,B9EF,FA38C695BFDE), \\r
+ WPP_DEFINE_BIT( HCA_DBG_DEV) \\r
+ WPP_DEFINE_BIT( HCA_DBG_INIT) \\r
+ WPP_DEFINE_BIT( HCA_DBG_PNP) \\r
+ WPP_DEFINE_BIT( HCA_DBG_MAD) \\r
+ WPP_DEFINE_BIT( HCA_DBG_PO) \\r
+ WPP_DEFINE_BIT( HCA_DBG_CQ) \\r
+ WPP_DEFINE_BIT( HCA_DBG_QP) \\r
+ WPP_DEFINE_BIT( HCA_DBG_MEMORY) \\r
+ WPP_DEFINE_BIT( HCA_DBG_AV) \\r
+ WPP_DEFINE_BIT( HCA_DBG_LOW) \\r
+ WPP_DEFINE_BIT( HCA_DBG_SHIM))\r
+\r
+#define WPP_GLOBALLOGGER\r
+\r
+\r
+#define WPP_LEVEL_FLAGS_ENABLED(lvl, flags) (WPP_LEVEL_ENABLED(flags) && WPP_CONTROL(WPP_BIT_ ## flags).Level >= lvl)\r
+#define WPP_LEVEL_FLAGS_LOGGER(lvl,flags) WPP_LEVEL_LOGGER(flags)\r
+#define WPP_FLAG_ENABLED(flags)(WPP_LEVEL_ENABLED(flags) && WPP_CONTROL(WPP_BIT_ ## flags).Level >= TRACE_LEVEL_VERBOSE)\r
+#define WPP_FLAG_LOGGER(flags) WPP_LEVEL_LOGGER(flags)\r
+\r
+\r
+// begin_wpp config\r
+// HCA_ENTER(FLAG);\r
+// HCA_EXIT(FLAG);\r
+// USEPREFIX(HCA_PRINT, "%!STDPREFIX! %!FUNC!() :");\r
+// USESUFFIX(HCA_ENTER, " %!FUNC!()===>");\r
+// USESUFFIX(HCA_EXIT, " %!FUNC!()<===");\r
+// end_wpp\r
+\r
+\r
+\r
+#else\r
+\r
+\r
+#include <evntrace.h>\r
+\r
+/*\r
+ * Debug macros\r
+ */\r
+\r
+\r
+#define HCA_DBG_DEV (1 << 0)\r
+#define HCA_DBG_INIT (1<<1)\r
+#define HCA_DBG_PNP (1 << 2)\r
+#define HCA_DBG_MAD (1 << 3)\r
+#define HCA_DBG_PO (1 << 4)\r
+#define HCA_DBG_QP (1 << 5)\r
+#define HCA_DBG_CQ (1 << 6)\r
+#define HCA_DBG_MEMORY (1 << 7)\r
+#define HCA_DBG_AV (1<<8)\r
+#define HCA_DBG_LOW (1 << 9)\r
+#define HCA_DBG_SHIM (1 << 10)\r
+\r
+static void _build_str( const char * format, ... )\r
+{\r
+ va_list p_arg;\r
+ va_start(p_arg, format);\r
+ vsprintf((char *)g_slog_buf , format , p_arg);\r
+ swprintf(g_wlog_buf, L"%S", g_slog_buf);\r
+ va_end(p_arg);\r
+}\r
+\r
+#define HCA_PRINT_TO_EVENT_LOG(_obj_,_level_,_flag_,_msg_) \\r
+ { \\r
+ NTSTATUS event_id; \\r
+ switch (_level_) { \\r
+ case TRACE_LEVEL_FATAL: case TRACE_LEVEL_ERROR: event_id = EVENT_MTHCA_ANY_ERROR; break; \\r
+ case TRACE_LEVEL_WARNING: event_id = EVENT_MTHCA_ANY_WARN; break; \\r
+ default: event_id = EVENT_MTHCA_ANY_INFO; break; \\r
+ } \\r
+ _build_str _msg_; \\r
+ WriteEventLogEntryStr( _obj_, (ULONG)event_id, 0, 0, g_wlog_buf, 0, 0 ); \\r
+ }\r
+\r
+#define HCA_PRINT_EV_MDEV(_level_,_flag_,_msg_) \\r
+ HCA_PRINT_TO_EVENT_LOG(mdev->ext->cl_ext.p_self_do,_level_,_flag_,_msg_)\r
+\r
+\r
+#if DBG\r
+\r
+// assignment of _level_ is need to to overcome warning C4127\r
+#define HCA_PRINT(_level_,_flag_,_msg_) \\r
+ { \\r
+ int __lvl = _level_; \\r
+ if (g_mthca_dbg_level >= (_level_) && \\r
+ (g_mthca_dbg_flags & (_flag_))) { \\r
+ DbgPrint ("[MTHCA] %s() :", __FUNCTION__); \\r
+ if(__lvl == TRACE_LEVEL_ERROR) DbgPrint ("***ERROR*** "); \\r
+ DbgPrint _msg_; \\r
+ } \\r
+ }\r
+\r
+#else\r
+\r
+#define HCA_PRINT(lvl ,flags, msg) \r
+\r
+#endif\r
+\r
+#define HCA_PRINT_EV(_level_,_flag_,_msg_) \\r
+ { \\r
+ HCA_PRINT(_level_,_flag_,_msg_) \\r
+ HCA_PRINT_EV_MDEV(_level_,_flag_,_msg_) \\r
+ }\r
+\r
+#define HCA_ENTER(flags)\\r
+ HCA_PRINT(TRACE_LEVEL_VERBOSE, flags,("===>\n"));\r
+\r
+#define HCA_EXIT(flags)\\r
+ HCA_PRINT(TRACE_LEVEL_VERBOSE, flags, ("<===\n" ));\r
+\r
+\r
+#define HCA_PRINT_EXIT(_level_,_flag_,_msg_) \\r
+ {\\r
+ if (status != IB_SUCCESS) {\\r
+ HCA_PRINT(_level_,_flag_,_msg_);\\r
+ }\\r
+ HCA_EXIT(_flag_);\\r
+ }\r
+\r
+#endif //EVENT_TRACING\r
+\r
+\r
+\r
+\r
+#endif /*_HCA_DEBUG_H_ */\r
+\r
+\r
--- /dev/null
+/*\r
+ * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.\r
+ * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. \r
+ *\r
+ * This software is available to you under the OpenIB.org BSD license\r
+ * below:\r
+ *\r
+ * Redistribution and use in source and binary forms, with or\r
+ * without modification, are permitted provided that the following\r
+ * conditions are met:\r
+ *\r
+ * - Redistributions of source code must retain the above\r
+ * copyright notice, this list of conditions and the following\r
+ * disclaimer.\r
+ *\r
+ * - Redistributions in binary form must reproduce the above\r
+ * copyright notice, this list of conditions and the following\r
+ * disclaimer in the documentation and/or other materials\r
+ * provided with the distribution.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+ * SOFTWARE.\r
+ *\r
+ * $Id: hca_direct.c 148 2005-07-12 07:48:46Z sleybo $\r
+ */\r
+\r
+\r
+#include "hca_driver.h"\r
+#include "hca_debug.h"\r
+\r
+#if defined(EVENT_TRACING)\r
+#ifdef offsetof\r
+#undef offsetof\r
+#endif\r
+#include "hca_direct.tmh"\r
+#endif\r
+#include "mthca_dev.h"\r
+\r
+\r
+/* Controls whether to use the VAPI entrypoints in THH, or the IBAL native ones. */\r
+#define MLNX_SEND_NATIVE 1\r
+#define MLNX_RECV_NATIVE 1\r
+#define MLNX_POLL_NATIVE 1\r
+\r
+\r
+/*\r
+* Work Request Processing Verbs.\r
+*/\r
+ib_api_status_t\r
+mlnx_post_send (\r
+ IN const ib_qp_handle_t h_qp,\r
+ IN ib_send_wr_t *p_send_wr,\r
+ OUT ib_send_wr_t **pp_failed )\r
+{\r
+ int err;\r
+ ib_api_status_t status;\r
+ struct ib_qp *ib_qp_p = (struct ib_qp *)h_qp;\r
+ struct ib_device *ib_dev_p = ib_qp_p->device;\r
+ \r
+ HCA_ENTER(HCA_DBG_QP);\r
+ \r
+ // sanity checks\r
+\r
+ // create CQ\r
+ err = ib_dev_p->post_send(ib_qp_p, p_send_wr, pp_failed );\r
+ if (err) {\r
+ HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_QP ,("post_send failed (%d)\n", err));\r
+ if (err == -ENOMEM)\r
+ status = IB_INSUFFICIENT_RESOURCES;\r
+ else\r
+ status = errno_to_iberr(err);\r
+ goto err_post_send;\r
+ }\r
+\r
+ status = IB_SUCCESS;\r
+ \r
+err_post_send: \r
+ HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_QP ,("completes with ERROR status %s\n", ib_get_err_str(status)));\r
+ return status;\r
+ \r
+}\r
+\r
+\r
+ib_api_status_t \r
+mlnx_post_recv (\r
+ IN const ib_qp_handle_t h_qp,\r
+ IN ib_recv_wr_t *p_recv_wr,\r
+ OUT ib_recv_wr_t **pp_failed OPTIONAL )\r
+{\r
+ int err;\r
+ ib_api_status_t status;\r
+ struct ib_qp *ib_qp_p = (struct ib_qp *)h_qp;\r
+ struct ib_device *ib_dev_p = ib_qp_p->device;\r
+ \r
+ HCA_ENTER(HCA_DBG_QP);\r
+\r
+ // sanity checks\r
+ \r
+ // create CQ\r
+ err = ib_dev_p->post_recv(ib_qp_p, p_recv_wr, pp_failed );\r
+ if (err) {\r
+ HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP ,("post_recv failed (%d)\n", err));\r
+ if (err == -ENOMEM)\r
+ status = IB_INSUFFICIENT_RESOURCES;\r
+ else\r
+ status = errno_to_iberr(err);\r
+ goto err_post_recv;\r
+ }\r
+\r
+ status = IB_SUCCESS;\r
+ \r
+err_post_recv: \r
+ HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_QP ,("completes with ERROR status %s\n", ib_get_err_str(status)));\r
+ return status;\r
+ \r
+}\r
+\r
+/*\r
+* Completion Processing and Completion Notification Request Verbs.\r
+*/\r
+\r
+ib_api_status_t\r
+mlnx_peek_cq(\r
+ IN const ib_cq_handle_t h_cq,\r
+ OUT uint32_t* const p_n_cqes )\r
+{\r
+ UNREFERENCED_PARAMETER(h_cq);\r
+ UNREFERENCED_PARAMETER(p_n_cqes);\r
+ HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM ,("mlnx_peek_cq not implemented\n"));\r
+ return IB_INVALID_CA_HANDLE;\r
+}\r
+\r
+ib_api_status_t\r
+mlnx_poll_cq (\r
+ IN const ib_cq_handle_t h_cq,\r
+ IN OUT ib_wc_t** const pp_free_wclist,\r
+ OUT ib_wc_t** const pp_done_wclist )\r
+{\r
+ int err;\r
+ ib_api_status_t status = IB_SUCCESS;\r
+ struct ib_cq *ib_cq_p = (struct ib_cq *)h_cq;\r
+ \r
+ HCA_ENTER(HCA_DBG_CQ);\r
+\r
+ // sanity checks\r
+ if (!pp_free_wclist || !pp_done_wclist || !*pp_free_wclist) {\r
+ status = IB_INVALID_PARAMETER;\r
+ goto err_invalid_params;\r
+ }\r
+\r
+ // poll CQ\r
+ err = mthca_poll_cq_list(ib_cq_p, pp_free_wclist, pp_done_wclist );\r
+ if (err) {\r
+ HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_CQ ,("mthca_poll_cq_list failed (%d)\n", err));\r
+ status = errno_to_iberr(err);\r
+ }else if (!*pp_done_wclist)\r
+ status = IB_NOT_FOUND;\r
+ \r
+err_invalid_params: \r
+ if (status != IB_NOT_FOUND){\r
+ HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_CQ ,("completes with ERROR status %s\n", ib_get_err_str(status)));\r
+ }else\r
+ HCA_EXIT(HCA_DBG_CQ);\r
+ return status;\r
+ \r
+}\r
+\r
+ib_api_status_t\r
+mlnx_enable_cq_notify (\r
+ IN const ib_cq_handle_t h_cq,\r
+ IN const boolean_t solicited )\r
+{\r
+ int err;\r
+ ib_api_status_t status = IB_SUCCESS;\r
+ struct ib_cq *ib_cq_p = (struct ib_cq *)h_cq;\r
+ \r
+ HCA_ENTER(HCA_DBG_SHIM);\r
+\r
+ // REARM CQ\r
+ err = ib_req_notify_cq(ib_cq_p, (solicited) ? IB_CQ_SOLICITED : IB_CQ_NEXT_COMP );\r
+ if (err) {\r
+ HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("ib_req_notify_cq failed (%d)\n", err));\r
+ status = errno_to_iberr(err);\r
+ }\r
+ \r
+ HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM ,("completes with ERROR status %s\n", ib_get_err_str(status)));\r
+ return status;\r
+}\r
+\r
+ib_api_status_t\r
+mlnx_enable_ncomp_cq_notify (\r
+ IN const ib_cq_handle_t h_cq,\r
+ IN const uint32_t n_cqes )\r
+{\r
+ int err;\r
+ ib_api_status_t status = IB_SUCCESS;\r
+ struct ib_cq *ib_cq_p = (struct ib_cq *)h_cq;\r
+\r
+ HCA_ENTER(HCA_DBG_SHIM);\r
+\r
+ err = ib_req_ncomp_notif(ib_cq_p, n_cqes );\r
+ if (err) {\r
+ HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("ib_req_ncomp_notif failed (%d)\n", err));\r
+ status = errno_to_iberr(err);\r
+ }\r
+ \r
+ HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM ,("completes with ERROR status %s\n", ib_get_err_str(status)));\r
+ return status;\r
+}\r
+\r
+ib_api_status_t\r
+mlnx_bind_mw (\r
+ IN const ib_mw_handle_t h_mw,\r
+ IN const ib_qp_handle_t h_qp,\r
+ IN ib_bind_wr_t* const p_mw_bind,\r
+ OUT net32_t* const p_rkey )\r
+{\r
+ UNREFERENCED_PARAMETER(h_mw);\r
+ UNREFERENCED_PARAMETER(h_qp);\r
+ UNREFERENCED_PARAMETER(p_mw_bind);\r
+ UNREFERENCED_PARAMETER(p_rkey);\r
+ HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM ,("mlnx_bind_mw not implemented\n"));\r
+ return IB_INVALID_CA_HANDLE;\r
+}\r
+\r
+\r
+void\r
+mlnx_direct_if(\r
+ IN OUT ci_interface_t *p_interface )\r
+{\r
+ p_interface->post_send = mlnx_post_send;\r
+ p_interface->post_recv = mlnx_post_recv;\r
+\r
+ p_interface->enable_ncomp_cq_notify = mlnx_enable_ncomp_cq_notify;\r
+ p_interface->peek_cq = NULL; /* mlnx_peek_cq: Not implemented */\r
+ p_interface->poll_cq = mlnx_poll_cq;\r
+ p_interface->enable_cq_notify = mlnx_enable_cq_notify;\r
+\r
+ p_interface->bind_mw = mlnx_bind_mw;\r
+}\r
--- /dev/null
+/*\r
+ * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.\r
+ *\r
+ * This software is available to you under the OpenIB.org BSD license\r
+ * below:\r
+ *\r
+ * Redistribution and use in source and binary forms, with or\r
+ * without modification, are permitted provided that the following\r
+ * conditions are met:\r
+ *\r
+ * - Redistributions of source code must retain the above\r
+ * copyright notice, this list of conditions and the following\r
+ * disclaimer.\r
+ *\r
+ * - Redistributions in binary form must reproduce the above\r
+ * copyright notice, this list of conditions and the following\r
+ * disclaimer in the documentation and/or other materials\r
+ * provided with the distribution.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+ * SOFTWARE.\r
+ *\r
+ * $Id: hca_driver.c 46 2005-05-30 17:55:53Z sleybo $\r
+ */\r
+\r
+\r
+/*\r
+ * Provides the driver entry points for the Tavor VPD.\r
+ */\r
+\r
+#include "hca_driver.h"\r
+#include "hca_debug.h"\r
+\r
+#include "mthca_log.h"\r
+#if defined(EVENT_TRACING)\r
+#ifdef offsetof\r
+#undef offsetof\r
+#endif\r
+#include "hca_driver.tmh"\r
+#endif\r
+#include "mthca_dev.h"\r
+#include <wdmguid.h>\r
+#include <initguid.h>\r
+#pragma warning( push, 3 )\r
+//#include "MdCard.h"\r
+#pragma warning( pop )\r
+#include <iba/ib_ci_ifc.h>\r
+\r
+/* from \inc\platform\evntrace.h\r
+#define TRACE_LEVEL_NONE 0 // Tracing is not on\r
+#define TRACE_LEVEL_FATAL 1 // Abnormal exit or termination\r
+#define TRACE_LEVEL_ERROR 2 // Severe errors that need logging\r
+#define TRACE_LEVEL_WARNING 3 // Warnings such as allocation failure\r
+#define TRACE_LEVEL_INFORMATION 4 // Includes non-error cases(e.g.,Entry-Exit)\r
+#define TRACE_LEVEL_VERBOSE 5 // Detailed traces from intermediate steps\r
+*/\r
+uint32_t g_mthca_dbg_level = TRACE_LEVEL_INFORMATION;\r
+uint32_t g_mthca_dbg_flags= 0xffff;\r
+WCHAR g_wlog_buf[ MAX_LOG_BUF_LEN ]; \r
+UCHAR g_slog_buf[ MAX_LOG_BUF_LEN ]; \r
+\r
+/*\r
+ * UVP name does not include file extension. For debug builds, UAL\r
+ * will append "d.dll". For release builds, UAL will append ".dll"\r
+ */\r
+char mlnx_uvp_lib_name[MAX_LIB_NAME] = {"mthcau"};\r
+\r
+\r
+NTSTATUS\r
+DriverEntry(\r
+ IN PDRIVER_OBJECT p_driver_obj,\r
+ IN PUNICODE_STRING p_registry_path );\r
+\r
+static NTSTATUS\r
+__read_registry(\r
+ IN UNICODE_STRING* const p_Param_Path );\r
+\r
+static void\r
+hca_drv_unload(\r
+ IN PDRIVER_OBJECT p_driver_obj );\r
+\r
+static NTSTATUS\r
+hca_sysctl(\r
+ IN PDEVICE_OBJECT p_dev_obj,\r
+ IN PIRP p_irp );\r
+\r
+static NTSTATUS\r
+__pnp_notify_target(\r
+ IN TARGET_DEVICE_REMOVAL_NOTIFICATION *p_notify,\r
+ IN void *context );\r
+\r
+static NTSTATUS\r
+__pnp_notify_ifc(\r
+ IN DEVICE_INTERFACE_CHANGE_NOTIFICATION *p_notify,\r
+ IN void *context );\r
+\r
+static NTSTATUS\r
+fw_access_pciconf (\r
+ IN BUS_INTERFACE_STANDARD *p_BusInterface,\r
+ IN ULONG op_flag,\r
+ IN PVOID p_buffer,\r
+ IN ULONG offset,\r
+ IN ULONG POINTER_ALIGNMENT length );\r
+\r
+static NTSTATUS\r
+fw_get_pci_bus_interface(\r
+ IN DEVICE_OBJECT *p_dev_obj,\r
+ OUT BUS_INTERFACE_STANDARD *p_BusInterface );\r
+\r
+static NTSTATUS\r
+fw_flash_write_data (\r
+ IN BUS_INTERFACE_STANDARD *p_BusInterface,\r
+ IN PVOID p_buffer,\r
+ IN ULONG offset,\r
+ IN ULONG POINTER_ALIGNMENT length );\r
+\r
+static NTSTATUS\r
+fw_flash_read_data (\r
+ IN BUS_INTERFACE_STANDARD *p_BusInterface,\r
+ IN PVOID p_buffer,\r
+ IN ULONG offset,\r
+ IN ULONG POINTER_ALIGNMENT length );\r
+\r
+static NTSTATUS\r
+fw_flash_get_ca_guid(\r
+ IN DEVICE_OBJECT *p_dev_obj,\r
+ OUT uint64_t *ca_guid );\r
+\r
+static NTSTATUS\r
+fw_flash_read4( \r
+ IN BUS_INTERFACE_STANDARD *p_BusInterface,\r
+ IN uint32_t addr, \r
+ IN OUT uint32_t *p_data);\r
+\r
+static NTSTATUS\r
+fw_flash_readbuf(\r
+ IN BUS_INTERFACE_STANDARD *p_BusInterface,\r
+ IN uint32_t offset,\r
+ IN OUT void *p_data,\r
+ IN uint32_t len);\r
+static NTSTATUS\r
+fw_set_bank(\r
+ IN BUS_INTERFACE_STANDARD *p_BusInterface,\r
+ IN uint32_t bank );\r
+\r
+static NTSTATUS\r
+fw_flash_init(\r
+ IN BUS_INTERFACE_STANDARD *p_BusInterface );\r
+\r
+static NTSTATUS\r
+fw_flash_deinit(\r
+ IN BUS_INTERFACE_STANDARD *p_BusInterface );\r
+\r
+#ifdef ALLOC_PRAGMA\r
+#pragma alloc_text (INIT, DriverEntry)\r
+#pragma alloc_text (INIT, __read_registry)\r
+#pragma alloc_text (PAGE, hca_drv_unload)\r
+#pragma alloc_text (PAGE, hca_sysctl)\r
+#pragma alloc_text (PAGE, fw_flash_get_ca_guid)\r
+#endif\r
+\r
+NTSTATUS\r
+DriverEntry(\r
+ IN PDRIVER_OBJECT p_driver_obj,\r
+ IN PUNICODE_STRING p_registry_path )\r
+{\r
+ NTSTATUS status;\r
+ cl_status_t cl_status;\r
+#if defined(EVENT_TRACING)\r
+ WPP_INIT_TRACING(p_driver_obj ,p_registry_path);\r
+#endif\r
+ HCA_ENTER( HCA_DBG_DEV );\r
+\r
+ status = __read_registry( p_registry_path );\r
+ if( !NT_SUCCESS( status ) )\r
+ {\r
+ HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_INIT, \r
+ ("__read_registry_path returned 0x%X.\n", status));\r
+ return status;\r
+ }\r
+\r
+ /* Initialize Adapter DB */\r
+ cl_status = mlnx_hcas_init();\r
+ if( cl_status != CL_SUCCESS )\r
+ {\r
+ HCA_PRINT( TRACE_LEVEL_ERROR ,HCA_DBG_INIT ,\r
+ ("mlnx_hcas_init returned %s.\n", cl_status_text[cl_status]));\r
+ return cl_to_ntstatus( cl_status );\r
+ }\r
+// cl_memclr( mlnx_hca_array, MLNX_MAX_HCA * sizeof(ci_interface_t) );\r
+\r
+ /*leo: init function table */\r
+ hca_init_vfptr();\r
+\r
+ /*leo: calibrate CPU */\r
+ MT_time_calibrate();\r
+ \r
+ p_driver_obj->MajorFunction[IRP_MJ_PNP] = cl_pnp;\r
+ p_driver_obj->MajorFunction[IRP_MJ_POWER] = cl_power;\r
+ p_driver_obj->MajorFunction[IRP_MJ_SYSTEM_CONTROL] = hca_sysctl;\r
+ p_driver_obj->DriverUnload = hca_drv_unload;\r
+ p_driver_obj->DriverExtension->AddDevice = hca_add_device;\r
+\r
+ /* init core */\r
+ if (ib_core_init()) {\r
+ HCA_PRINT( TRACE_LEVEL_ERROR ,HCA_DBG_INIT ,("Failed to init core, aborting.\n"));\r
+ return STATUS_UNSUCCESSFUL;\r
+ }\r
+\r
+ /* init uverbs module */\r
+ if (ib_uverbs_init()) {\r
+ HCA_PRINT( TRACE_LEVEL_ERROR ,HCA_DBG_INIT ,("Failed ib_uverbs_init, aborting.\n"));\r
+ return STATUS_UNSUCCESSFUL;\r
+ }\r
+ HCA_EXIT( HCA_DBG_DEV );\r
+ return STATUS_SUCCESS;\r
+}\r
+\r
+\r
+static NTSTATUS\r
+__read_registry(\r
+ IN UNICODE_STRING* const p_registry_path )\r
+{\r
+ NTSTATUS status;\r
+ /* Remember the terminating entry in the table below. */\r
+ RTL_QUERY_REGISTRY_TABLE table[3];\r
+ UNICODE_STRING param_path;\r
+\r
+ HCA_ENTER( HCA_DBG_DEV );\r
+\r
+ RtlInitUnicodeString( ¶m_path, NULL );\r
+ param_path.MaximumLength = p_registry_path->Length + \r
+ sizeof(L"\\Parameters");\r
+ param_path.Buffer = cl_zalloc( param_path.MaximumLength );\r
+ if( !param_path.Buffer )\r
+ {\r
+ HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_DEV, \r
+ ("Failed to allocate parameters path buffer.\n"));\r
+ return STATUS_INSUFFICIENT_RESOURCES;\r
+ }\r
+\r
+ RtlAppendUnicodeStringToString( ¶m_path, p_registry_path );\r
+ RtlAppendUnicodeToString( ¶m_path, L"\\Parameters" );\r
+\r
+ /*\r
+ * Clear the table. This clears all the query callback pointers,\r
+ * and sets up the terminating table entry.\r
+ */\r
+ cl_memclr( table, sizeof(table) );\r
+\r
+ /* Setup the table entries. */\r
+ table[0].Flags = RTL_QUERY_REGISTRY_DIRECT;\r
+ table[0].Name = L"DebugLevel";\r
+ table[0].EntryContext = &g_mthca_dbg_level;\r
+ table[0].DefaultType = REG_DWORD;\r
+ table[0].DefaultData = &g_mthca_dbg_level;\r
+ table[0].DefaultLength = sizeof(ULONG);\r
+\r
+ \r
+ table[1].Flags = RTL_QUERY_REGISTRY_DIRECT;\r
+ table[1].Name = L"DebugFlags";\r
+ table[1].EntryContext = &g_mthca_dbg_flags;\r
+ table[1].DefaultType = REG_DWORD;\r
+ table[1].DefaultData = &g_mthca_dbg_flags;\r
+ table[1].DefaultLength = sizeof(ULONG);\r
+\r
+ /* Have at it! */\r
+ status = RtlQueryRegistryValues( RTL_REGISTRY_ABSOLUTE, \r
+ param_path.Buffer, table, NULL, NULL );\r
+\r
+ HCA_PRINT( TRACE_LEVEL_INFORMATION, HCA_DBG_INIT, \r
+ ("debug level %d debug flags 0x%.8x\n",\r
+ g_mthca_dbg_level ,\r
+ g_mthca_dbg_flags));\r
+\r
+\r
+ cl_free( param_path.Buffer );\r
+ HCA_EXIT( HCA_DBG_DEV );\r
+ return status;\r
+}\r
+\r
+\r
+static void\r
+hca_drv_unload(\r
+ IN PDRIVER_OBJECT p_driver_obj )\r
+{\r
+ HCA_ENTER( HCA_DBG_DEV );\r
+\r
+ UNUSED_PARAM( p_driver_obj );\r
+\r
+ ib_uverbs_cleanup();\r
+ ib_core_cleanup();\r
+ \r
+ HCA_EXIT( HCA_DBG_DEV );\r
+#if defined(EVENT_TRACING)\r
+ WPP_CLEANUP(p_driver_obj);\r
+#endif\r
+\r
+}\r
+\r
+\r
+static NTSTATUS\r
+hca_sysctl(\r
+ IN PDEVICE_OBJECT p_dev_obj,\r
+ IN PIRP p_irp )\r
+{\r
+ NTSTATUS status;\r
+ hca_dev_ext_t *p_ext;\r
+\r
+ HCA_ENTER( HCA_DBG_DEV );\r
+\r
+ p_ext = p_dev_obj->DeviceExtension;\r
+\r
+ IoSkipCurrentIrpStackLocation( p_irp );\r
+ status = IoCallDriver( p_ext->cl_ext.p_next_do, p_irp );\r
+\r
+ HCA_EXIT( HCA_DBG_DEV );\r
+ return status;\r
+}\r
+\r
+typedef struct Primary_Sector{\r
+ uint32_t fi_addr;\r
+ uint32_t fi_size;\r
+ uint32_t signature;\r
+ uint32_t fw_reserved[5];\r
+ uint32_t vsd[56];\r
+ uint32_t branch_to;\r
+ uint32_t crc016;\r
+} primary_sector_t;\r
+\r
+static uint32_t old_dir;\r
+static uint32_t old_pol;\r
+static uint32_t old_mod;\r
+static uint32_t old_dat;\r
+\r
+static NTSTATUS\r
+fw_access_pciconf (\r
+ IN BUS_INTERFACE_STANDARD *p_BusInterface,\r
+ IN ULONG op_flag,\r
+ IN PVOID p_buffer,\r
+ IN ULONG offset,\r
+ IN ULONG POINTER_ALIGNMENT length )\r
+{\r
+\r
+ ULONG bytes; \r
+ NTSTATUS status = STATUS_SUCCESS;\r
+\r
+ PAGED_CODE();\r
+\r
+ if (p_BusInterface)\r
+ {\r
+\r
+ bytes = p_BusInterface->SetBusData(\r
+ p_BusInterface->Context,\r
+ PCI_WHICHSPACE_CONFIG,\r
+ (PVOID)&offset,\r
+ PCI_CONF_ADDR,\r
+ sizeof(ULONG) );\r
+\r
+ if( op_flag == 0 )\r
+ {\r
+ if ( bytes )\r
+ bytes = p_BusInterface->GetBusData(\r
+ p_BusInterface->Context,\r
+ PCI_WHICHSPACE_CONFIG,\r
+ p_buffer,\r
+ PCI_CONF_DATA,\r
+ length );\r
+ if ( !bytes )\r
+ status = STATUS_NOT_SUPPORTED;\r
+ }\r
+\r
+ else\r
+ {\r
+ if ( bytes )\r
+ bytes = p_BusInterface->SetBusData(\r
+ p_BusInterface->Context,\r
+ PCI_WHICHSPACE_CONFIG,\r
+ p_buffer,\r
+ PCI_CONF_DATA,\r
+ length);\r
+\r
+ if ( !bytes )\r
+ status = STATUS_NOT_SUPPORTED;\r
+ }\r
+ }\r
+ return status;\r
+}\r
+\r
+static NTSTATUS\r
+fw_get_pci_bus_interface(\r
+ IN DEVICE_OBJECT *p_dev_obj,\r
+ OUT BUS_INTERFACE_STANDARD *p_BusInterface )\r
+{\r
+ KEVENT event;\r
+ NTSTATUS status;\r
+ PIRP p_irp;\r
+ IO_STATUS_BLOCK ioStatus;\r
+ PIO_STACK_LOCATION p_irpStack;\r
+ PDEVICE_OBJECT p_target_obj;\r
+\r
+ KeInitializeEvent( &event, NotificationEvent, FALSE );\r
+\r
+ p_target_obj = IoGetAttachedDeviceReference( p_dev_obj );\r
+\r
+ p_irp = IoBuildSynchronousFsdRequest( IRP_MJ_PNP,\r
+ p_target_obj,\r
+ NULL,\r
+ 0,\r
+ NULL,\r
+ &event,\r
+ &ioStatus );\r
+ if (p_irp == NULL) {\r
+ status = STATUS_INSUFFICIENT_RESOURCES;\r
+ goto End;\r
+ }\r
+ p_irpStack = IoGetNextIrpStackLocation( p_irp );\r
+ p_irpStack->MinorFunction = IRP_MN_QUERY_INTERFACE;\r
+ p_irpStack->Parameters.QueryInterface.InterfaceType = (LPGUID) &GUID_BUS_INTERFACE_STANDARD;\r
+ p_irpStack->Parameters.QueryInterface.Size = sizeof(BUS_INTERFACE_STANDARD);\r
+ p_irpStack->Parameters.QueryInterface.Version = 1;\r
+ p_irpStack->Parameters.QueryInterface.Interface = (PINTERFACE) p_BusInterface;\r
+ p_irpStack->Parameters.QueryInterface.InterfaceSpecificData = NULL;\r
+\r
+ p_irp->IoStatus.Status = STATUS_NOT_SUPPORTED;\r
+ \r
+ status = IoCallDriver( p_target_obj, p_irp );\r
+\r
+ if ( status == STATUS_PENDING )\r
+ {\r
+ KeWaitForSingleObject( &event, Executive, KernelMode, FALSE, NULL );\r
+ status = ioStatus.Status;\r
+ }\r
+End:\r
+ // Done with reference\r
+ ObDereferenceObject( p_target_obj );\r
+ return status;\r
+}\r
+\r
+ib_api_status_t\r
+fw_access_ctrl(\r
+ IN const void* __ptr64 p_context,\r
+ IN const void* __ptr64* const handle_array OPTIONAL,\r
+ IN uint32_t num_handles,\r
+ IN ib_ci_op_t* const p_ci_op,\r
+ IN OUT ci_umv_buf_t *p_umv_buf )\r
+{\r
+ DEVICE_OBJECT *p_dev_obj;\r
+ static BUS_INTERFACE_STANDARD BusInterface;\r
+ static uint32_t if_ready;\r
+ NTSTATUS status;\r
+ PVOID p_data;\r
+ ULONG offset;\r
+ ULONG POINTER_ALIGNMENT length;\r
+ ib_ci_op_t *p_ci;\r
+ mlnx_hob_t *p_hob;\r
+\r
+ UNREFERENCED_PARAMETER(handle_array);\r
+ UNREFERENCED_PARAMETER(num_handles);\r
+ UNREFERENCED_PARAMETER(p_umv_buf);\r
+\r
+ status = STATUS_SUCCESS;\r
+ p_hob = (mlnx_hob_t *)(const void *)p_context;\r
+\r
+ p_dev_obj = (DEVICE_OBJECT *)(const void *)p_hob->p_dev_obj;\r
+ p_ci = p_ci_op;\r
+\r
+ if ( !p_ci )\r
+ return STATUS_INVALID_DEVICE_REQUEST;\r
+ if ( !p_ci->buf_size )\r
+ return STATUS_INVALID_DEVICE_REQUEST;\r
+\r
+ length = p_ci->buf_size;\r
+ offset = p_ci->buf_info;\r
+ p_data = p_ci->p_buf;\r
+\r
+ switch ( p_ci->command )\r
+ {\r
+ case FW_READ: // read data from flash\r
+ if ( if_ready )\r
+ {\r
+ status = fw_flash_read_data(&BusInterface, p_data, offset, length);\r
+ }\r
+ break;\r
+ case FW_WRITE: // write data to flash\r
+ if ( if_ready )\r
+ {\r
+\r
+ status = fw_flash_write_data(&BusInterface, p_data, offset, length);\r
+ }\r
+ break;\r
+ case FW_READ_CMD:\r
+ if ( if_ready )\r
+ {\r
+ status = fw_access_pciconf(&BusInterface, 0 , p_data, offset, 4);\r
+ }\r
+ break;\r
+ case FW_WRITE_CMD:\r
+ if ( if_ready )\r
+ {\r
+ status = fw_access_pciconf(&BusInterface, 1 , p_data, offset, 4);\r
+ }\r
+ break;\r
+ case FW_CLOSE_IF: // close BusInterface\r
+ if (if_ready )\r
+ {\r
+ if_ready = 0;\r
+ BusInterface.InterfaceDereference((PVOID)BusInterface.Context);\r
+ }\r
+ return status;\r
+ case FW_OPEN_IF: // open BusInterface\r
+ if ( !if_ready )\r
+ {\r
+ status = fw_get_pci_bus_interface(p_dev_obj, &BusInterface);\r
+ \r
+ if ( NT_SUCCESS( status ) )\r
+ {\r
+ if_ready = 1;\r
+ status = STATUS_SUCCESS;\r
+ }\r
+ }\r
+ return status;\r
+ default:\r
+ status = STATUS_NOT_SUPPORTED;\r
+ }\r
+\r
+ if ( status != STATUS_SUCCESS )\r
+ {\r
+ if ( if_ready )\r
+ {\r
+ if_ready = 0;\r
+ BusInterface.InterfaceDereference((PVOID)BusInterface.Context);\r
+ }\r
+ HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_INIT, \r
+ ("fw_access_ctrl failed returns %08x.\n", status));\r
+ }\r
+ return status;\r
+}\r
+\r
+static NTSTATUS\r
+fw_flash_write_data (\r
+ IN BUS_INTERFACE_STANDARD *p_BusInterface,\r
+ IN PVOID p_buffer,\r
+ IN ULONG offset,\r
+ IN ULONG POINTER_ALIGNMENT length )\r
+{\r
+ NTSTATUS status;\r
+ uint32_t cnt = 0;\r
+ uint32_t lcl_data;\r
+\r
+ lcl_data = (*((uint32_t*)p_buffer) << 24);\r
+\r
+ status = fw_access_pciconf(p_BusInterface, FW_WRITE , &lcl_data, FLASH_OFFSET+4, length );\r
+ if ( status != STATUS_SUCCESS )\r
+ return status;\r
+ lcl_data = ( WRITE_BIT | (offset & ADDR_MSK));\r
+ \r
+ status = fw_access_pciconf(p_BusInterface, FW_WRITE , &lcl_data, FLASH_OFFSET, 4 );\r
+ if ( status != STATUS_SUCCESS )\r
+ return status;\r
+\r
+ lcl_data = 0;\r
+ \r
+ do\r
+ {\r
+ if (++cnt > 5000)\r
+ {\r
+ return STATUS_DEVICE_NOT_READY;\r
+ }\r
+\r
+ status = fw_access_pciconf(p_BusInterface, FW_READ , &lcl_data, FLASH_OFFSET, 4 );\r
+ if ( status != STATUS_SUCCESS )\r
+ return status;\r
+\r
+ } while(lcl_data & CMD_MASK);\r
+\r
+ return status;\r
+}\r
+\r
+static NTSTATUS\r
+fw_flash_read_data (\r
+ IN BUS_INTERFACE_STANDARD *p_BusInterface,\r
+ IN PVOID p_buffer,\r
+ IN ULONG offset,\r
+ IN ULONG POINTER_ALIGNMENT length )\r
+{\r
+ NTSTATUS status = STATUS_SUCCESS;\r
+ uint32_t cnt = 0;\r
+ uint32_t lcl_data = ( READ_BIT | (offset & ADDR_MSK));\r
+ \r
+ status = fw_access_pciconf(p_BusInterface, FW_WRITE, &lcl_data, FLASH_OFFSET, 4 );\r
+ if ( status != STATUS_SUCCESS )\r
+ return status;\r
+\r
+ lcl_data = 0;\r
+ do\r
+ {\r
+ // Timeout checks\r
+ if (++cnt > 5000 )\r
+ {\r
+ return STATUS_DEVICE_NOT_READY;\r
+ }\r
+\r
+ status = fw_access_pciconf(p_BusInterface, FW_READ, &lcl_data, FLASH_OFFSET, 4 );\r
+ \r
+ if ( status != STATUS_SUCCESS )\r
+ return status;\r
+\r
+ } while(lcl_data & CMD_MASK);\r
+\r
+ status = fw_access_pciconf(p_BusInterface, FW_READ, p_buffer, FLASH_OFFSET+4, length );\r
+ return status;\r
+}\r
+\r
+static NTSTATUS\r
+fw_flash_get_ca_guid(\r
+ IN DEVICE_OBJECT *p_dev_obj,\r
+ OUT net64_t *ca_guid )\r
+{\r
+ NTSTATUS status = STATUS_SUCCESS;\r
+ BUS_INTERFACE_STANDARD BusInterface;\r
+\r
+ uint32_t NODE_GUIDH, NODE_GUIDL;\r
+ uint32_t prim_ptr = 0;\r
+ uint32_t signature;\r
+\r
+ primary_sector_t ps;\r
+ cl_memset( &ps, 0, sizeof(primary_sector_t));\r
+\r
+ status = fw_get_pci_bus_interface(p_dev_obj, &BusInterface);\r
+\r
+ if ( !NT_SUCCESS( status ) )\r
+ return status;\r
+ \r
+ status = fw_flash_init (&BusInterface);\r
+ if (status != STATUS_SUCCESS )\r
+ return status;\r
+ status = fw_flash_read_data(&BusInterface, &signature, 0x24, 4); \r
+ if (status != STATUS_SUCCESS )\r
+ return status;\r
+ //signature = cl_ntoh32(signature);\r
+\r
+ if (signature == FW_SIGNATURE)\r
+ {\r
+ //Fail Safe image\r
+ \r
+ // Assume flash has been verified, and both images have the same guids, therefore,\r
+ // we only need to read the primary image's guids\r
+ status = fw_flash_readbuf(&BusInterface, FW_SECT_SIZE, &ps, sizeof(ps));\r
+ if ( status == STATUS_SUCCESS )\r
+ {\r
+ status = fw_flash_read_data(&BusInterface, &prim_ptr, ps.fi_addr+0x24, 4);\r
+ if (status == STATUS_SUCCESS )\r
+ prim_ptr = prim_ptr + ps.fi_addr;\r
+ }\r
+ }\r
+ else\r
+ {\r
+ // Short image\r
+ prim_ptr = signature; \r
+ }\r
+\r
+ if ( signature == FW_SIGNATURE || prim_ptr < MAX_FLASH_SIZE )\r
+ {\r
+ /* now we can read ca guid\r
+ * since we read it in host mode fw_flash_read4() \r
+ * swaps it back in BE - how it was stored in FW\r
+ */\r
+ if (( status = fw_flash_read4(&BusInterface, prim_ptr, &NODE_GUIDL)) == STATUS_SUCCESS )\r
+ if (( status = fw_flash_read4(&BusInterface, prim_ptr+4, &NODE_GUIDH)) == STATUS_SUCCESS )\r
+ {\r
+ *ca_guid = NODE_GUIDH;\r
+ *ca_guid = (*ca_guid << 32) | NODE_GUIDL;\r
+ }\r
+ }\r
+ else \r
+ {\r
+ //invalid GUID pointer\r
+ return STATUS_NO_SUCH_DEVICE;\r
+ }\r
+ fw_flash_deinit(&BusInterface);\r
+ BusInterface.InterfaceDereference((PVOID)BusInterface.Context);\r
+ return status;\r
+}\r
+\r
+static NTSTATUS\r
+fw_flash_read4( \r
+ IN BUS_INTERFACE_STANDARD *p_BusInterface,\r
+ IN uint32_t addr, \r
+ IN OUT uint32_t *p_data)\r
+{\r
+ NTSTATUS status = STATUS_SUCCESS;\r
+ uint32_t lcl_data = 0;\r
+ uint32_t bank;\r
+ static uint32_t curr_bank = 0xffffffff;\r
+\r
+ if (addr & 0x3)\r
+ return STATUS_INVALID_PARAMETER;\r
+\r
+ bank = addr & BANK_MASK;\r
+ if (bank != curr_bank)\r
+ {\r
+ curr_bank = bank;\r
+ if ((status = fw_set_bank(p_BusInterface, bank)) != STATUS_SUCCESS )\r
+ return STATUS_INVALID_PARAMETER;\r
+ }\r
+ status = fw_flash_read_data(p_BusInterface, &lcl_data, addr, 4);\r
+ *p_data = cl_ntoh32(lcl_data);\r
+ return STATUS_SUCCESS;\r
+}\r
+\r
+static NTSTATUS\r
+fw_flash_readbuf(\r
+ IN BUS_INTERFACE_STANDARD *p_BusInterface,\r
+ IN uint32_t offset,\r
+ IN OUT void *p_data,\r
+ IN uint32_t len)\r
+{\r
+ NTSTATUS status = STATUS_SUCCESS;\r
+ uint32_t *p_lcl_data;\r
+ uint32_t i;\r
+\r
+ if (offset & 0x3)\r
+ {\r
+ //Address should be 4-bytes aligned\r
+ return STATUS_INVALID_PARAMETER;\r
+ }\r
+ if (len & 0x3)\r
+ {\r
+ //Length should be 4-bytes aligned\r
+ return STATUS_INVALID_PARAMETER;\r
+ }\r
+ p_lcl_data = (uint32_t *)p_data;\r
+ \r
+ for ( i=0; i < (len >> 2); i++)\r
+ { \r
+ if ( (status = fw_flash_read_data( p_BusInterface, p_lcl_data, offset, sizeof(uint32_t) )) != STATUS_SUCCESS )\r
+ return status;\r
+ offset += 4;\r
+ p_lcl_data++;\r
+ }\r
+ return STATUS_SUCCESS;\r
+} // Flash::flash_read\r
+\r
+static NTSTATUS\r
+fw_flash_writebuf(\r
+ IN BUS_INTERFACE_STANDARD *p_BusInterface,\r
+ IN PVOID p_buffer,\r
+ IN ULONG offset,\r
+ IN ULONG POINTER_ALIGNMENT length )\r
+{\r
+ NTSTATUS status = STATUS_SUCCESS;\r
+ uint32_t i;\r
+ uint8_t *p_data = (uint8_t *)p_buffer;\r
+\r
+ for ( i = 0; i < length; i++ )\r
+ {\r
+ status = fw_flash_write_data (p_BusInterface, p_data, offset, 1 );\r
+ if (status != STATUS_SUCCESS )\r
+ return status;\r
+ p_data++;\r
+ offset++;\r
+ }\r
+ return status;\r
+}\r
+static NTSTATUS\r
+fw_flash_init(\r
+ IN BUS_INTERFACE_STANDARD *p_BusInterface )\r
+{\r
+ uint32_t dir;\r
+ uint32_t pol;\r
+ uint32_t mod;\r
+\r
+ uint32_t cnt=0;\r
+ uint32_t data;\r
+ NTSTATUS status = STATUS_SUCCESS;\r
+ uint32_t semaphore = 0;\r
+ \r
+ while ( !semaphore )\r
+ {\r
+ status = fw_access_pciconf(p_BusInterface, FW_READ , &data, SEMAP63, 4);\r
+ if ( status != STATUS_SUCCESS )\r
+ break;\r
+ if( !data )\r
+ {\r
+ semaphore = 1;\r
+ break;\r
+ }\r
+ if (++cnt > 5000 )\r
+ {\r
+ break;\r
+ }\r
+ } \r
+\r
+ if ( !semaphore )\r
+ {\r
+ return STATUS_NOT_SUPPORTED;\r
+ }\r
+\r
+ // Save old values\r
+ \r
+ status = fw_access_pciconf(p_BusInterface, FW_READ , &old_dir,GPIO_DIR_L , 4);\r
+ if ( status == STATUS_SUCCESS )\r
+ status = fw_access_pciconf(p_BusInterface, FW_READ , &old_pol,GPIO_POL_L , 4);\r
+ if ( status == STATUS_SUCCESS )\r
+ status = fw_access_pciconf(p_BusInterface, FW_READ , &old_mod,GPIO_MOD_L , 4);\r
+ if ( status == STATUS_SUCCESS )\r
+ status = fw_access_pciconf(p_BusInterface, FW_READ , &old_dat,GPIO_DAT_L , 4);\r
+\r
+ // Set Direction=1, Polarity=0, Mode=0 for 3 GPIO lower bits\r
+ dir = old_dir | 0x70;\r
+ pol = old_pol & ~0x70;\r
+ mod = old_mod & ~0x70;\r
+\r
+ status = fw_access_pciconf(p_BusInterface, FW_WRITE , &dir,GPIO_DIR_L , 4);\r
+ if ( status == STATUS_SUCCESS )\r
+ status = fw_access_pciconf(p_BusInterface, FW_WRITE , &pol,GPIO_POL_L , 4);\r
+ if ( status == STATUS_SUCCESS )\r
+ status = fw_access_pciconf(p_BusInterface, FW_WRITE , &mod,GPIO_MOD_L , 4);\r
+ if ( status == STATUS_SUCCESS )\r
+ // Set CPUMODE\r
+ status = fw_access_pciconf(p_BusInterface, FW_READ , &data, CPUMODE, 4);\r
+ if ( status == STATUS_SUCCESS )\r
+ {\r
+ data &= ~CPUMODE_MSK;\r
+ data |= 1 << CPUMODE_SHIFT;\r
+ status = fw_access_pciconf(p_BusInterface, FW_WRITE , &data, CPUMODE, 4);\r
+ }\r
+ if ( status == STATUS_SUCCESS )\r
+ {\r
+ // Reset flash\r
+ data = 0xf0;\r
+ status = fw_flash_write_data(p_BusInterface, &data, 0x0, 4);\r
+ }\r
+ return status;\r
+}\r
+\r
+static NTSTATUS\r
+fw_flash_deinit(\r
+ IN BUS_INTERFACE_STANDARD *p_BusInterface )\r
+{\r
+ uint32_t data = 0;\r
+ NTSTATUS status = STATUS_SUCCESS;\r
+ \r
+ status = fw_set_bank(p_BusInterface, 0);\r
+ if ( status == STATUS_SUCCESS )\r
+ // Restore origin values\r
+ status = fw_access_pciconf(p_BusInterface, FW_WRITE , &old_dir,GPIO_DIR_L , 4);\r
+ if ( status == STATUS_SUCCESS )\r
+ status = fw_access_pciconf(p_BusInterface, FW_WRITE , &old_pol,GPIO_POL_L , 4);\r
+ if ( status == STATUS_SUCCESS )\r
+ status = fw_access_pciconf(p_BusInterface, FW_WRITE , &old_mod,GPIO_MOD_L , 4);\r
+ if ( status == STATUS_SUCCESS )\r
+ status = fw_access_pciconf(p_BusInterface, FW_WRITE , &old_dat,GPIO_DAT_L , 4);\r
+ if ( status == STATUS_SUCCESS )\r
+ // Free GPIO Semaphore\r
+ status = fw_access_pciconf(p_BusInterface, FW_WRITE , &data, SEMAP63, 4);\r
+ return status;\r
+}\r
+\r
+static NTSTATUS\r
+fw_set_bank(\r
+ IN BUS_INTERFACE_STANDARD *p_BusInterface,\r
+ IN uint32_t bank )\r
+{\r
+ NTSTATUS status = STATUS_SUCCESS;\r
+ uint32_t data = ( (uint32_t)0x70 << 24 );\r
+ uint32_t mask = ((bank >> (BANK_SHIFT-4)) << 24 );\r
+\r
+ status = fw_access_pciconf(p_BusInterface, FW_WRITE , &data, GPIO_DATACLEAR_L, 4);\r
+ if (status == STATUS_SUCCESS)\r
+ {\r
+ // A1\r
+ data &= mask;\r
+ //data |= mask; // for A0\r
+ status = fw_access_pciconf(p_BusInterface, FW_WRITE , &data, GPIO_DATASET_L, 4);\r
+ }\r
+ return status;\r
+}\r
--- /dev/null
+/*\r
+ * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.\r
+ *\r
+ * This software is available to you under the OpenIB.org BSD license\r
+ * below:\r
+ *\r
+ * Redistribution and use in source and binary forms, with or\r
+ * without modification, are permitted provided that the following\r
+ * conditions are met:\r
+ *\r
+ * - Redistributions of source code must retain the above\r
+ * copyright notice, this list of conditions and the following\r
+ * disclaimer.\r
+ *\r
+ * - Redistributions in binary form must reproduce the above\r
+ * copyright notice, this list of conditions and the following\r
+ * disclaimer in the documentation and/or other materials\r
+ * provided with the distribution.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+ * SOFTWARE.\r
+ *\r
+ * $Id: hca_driver.h 46 2005-05-30 17:55:53Z sleybo $\r
+ */\r
+\r
+\r
+#if !defined( _HCA_DRIVER_H_ )\r
+#define _HCA_DRIVER_H_\r
+\r
+\r
+#include <complib/cl_types.h>\r
+#include <complib/cl_pnp_po.h>\r
+#include <complib/cl_mutex.h>\r
+#include <iba/ib_ci_ifc.h>\r
+#include "hca_data.h"\r
+#include "mt_l2w.h"\r
+#include "hca_debug.h"\r
+\r
+\r
+#include "hca_pnp.h"\r
+#include "hca_pci.h"\r
+\r
+#if !defined(FILE_DEVICE_INFINIBAND) // Not defined in WXP DDK\r
+#define FILE_DEVICE_INFINIBAND 0x0000003B\r
+#endif\r
+\r
+/****s* HCA/hca_reg_state_t\r
+* NAME\r
+* hca_reg_state_t\r
+*\r
+* DESCRIPTION\r
+* State for tracking registration with AL. This state is independent of the\r
+* device PnP state, and both are used to properly register with AL.\r
+*\r
+* SYNOPSIS\r
+*/\r
+typedef enum _hca_reg_state\r
+{\r
+ HCA_SHUTDOWN,\r
+ HCA_ADDED,\r
+ HCA_STARTED,\r
+ HCA_REGISTERED\r
+\r
+} hca_reg_state_t;\r
+/*\r
+* VALUES\r
+* HCA_SHUTDOWN\r
+* Cleaning up.\r
+*\r
+* HCA_ADDED\r
+* AddDevice was called and successfully registered for interface\r
+* notifications.\r
+*\r
+* HCA_STARTED\r
+* IRP_MN_START_DEVICE was called. The HCA is fully functional.\r
+*\r
+* HCA_REGISTERED\r
+* Fully functional and registered with the bus root.\r
+*********/\r
+\r
+\r
+typedef enum _hca_bar_type\r
+{\r
+ HCA_BAR_TYPE_HCR,\r
+ HCA_BAR_TYPE_UAR,\r
+ HCA_BAR_TYPE_DDR,\r
+ HCA_BAR_TYPE_MAX\r
+\r
+} hca_bar_type_t;\r
+\r
+\r
+typedef struct _hca_bar\r
+{\r
+ uint64_t phys;\r
+ void *virt;\r
+ SIZE_T size;\r
+\r
+} hca_bar_t;\r
+\r
+\r
+typedef struct _hca_dev_ext\r
+{\r
+ /* -------------------------------------------------\r
+ * PNP DATA \r
+ * ------------------------------------------------ */\r
+ cl_pnp_po_ext_t cl_ext; /* COMPLIB PnP object */\r
+ void * pnp_ifc_entry; /* Notification entry for PnP interface events. */\r
+ void * pnp_target_entry; /* Notification entry for PnP target events. */\r
+ PNP_DEVICE_STATE pnpState; /* state for PnP Manager */\r
+\r
+ /* -------------------------------------------------\r
+ * POWER MANAGER DATA \r
+ * ------------------------------------------------ */\r
+ /* Cache of the system to device power states. */\r
+ DEVICE_POWER_STATE DevicePower[PowerSystemMaximum];\r
+ DEVICE_POWER_STATE PowerState; /* state for Power Manager */\r
+ PIO_WORKITEM pPoWorkItem;\r
+\r
+ /* -------------------------------------------------\r
+ * IB_AL DATA \r
+ * ------------------------------------------------ */\r
+ ib_ci_ifc_t ci_ifc; /* Interface for the lower edge of the IB_AL device. */\r
+ hca_reg_state_t state; /* State for tracking registration with AL */\r
+ DEVICE_OBJECT * p_al_dev; /* IB_AL FDO */\r
+ FILE_OBJECT * p_al_file_obj; /* IB_AL file object */\r
+\r
+ /* -------------------------------------------------\r
+ * LOW LEVEL DRIVER' DATA \r
+ * ------------------------------------------------ */\r
+ mlnx_hca_t hca;\r
+\r
+ /* -------------------------------------------------\r
+ * OS DATA \r
+ * ------------------------------------------------ */\r
+ hca_bar_t bar[HCA_BAR_TYPE_MAX]; /* HCA memory bars */\r
+ CM_PARTIAL_RESOURCE_DESCRIPTOR interruptInfo; /* HCA interrupt resources */\r
+ PKINTERRUPT int_obj; /* HCA interrupt object */\r
+ spinlock_t isr_lock; /* lock for the ISR */\r
+ ULONG bus_number; /* HCA's bus number */\r
+\r
+ /* -------------------------------------------------\r
+ * VARIABLES \r
+ * ------------------------------------------------ */\r
+ DMA_ADAPTER * p_dma_adapter; /* HCA adapter object */\r
+ ULONG n_map_regs; /* num of allocated adapter map registers */\r
+ PCI_COMMON_CONFIG hcaConfig; /* saved HCA PCI configuration header */\r
+ int hca_hidden; /* flag: when set - no attached DDR memory */\r
+ \r
+} hca_dev_ext_t;\r
+\r
+#define EXT_FROM_HOB(hob_p) (container_of(hob_p, hca_dev_ext_t, hca.hob))\r
+#define IBDEV_FROM_HOB(hob_p) (&EXT_FROM_HOB(hob_p)->hca.mdev->ib_dev)\r
+#define HOBUL_FROM_HOB(hob_p) (&EXT_FROM_HOB(hob_p)->hca.hobul)\r
+#define HOB_FROM_IBDEV(dev_p) (mlnx_hob_t *)&dev_p->mdev->ext->hca.hob\r
+\r
+\r
+\r
+\r
+/***********************************\r
+Firmware Update definitions\r
+***********************************/\r
+#define PCI_CONF_ADDR (0x00000058)\r
+#define PCI_CONF_DATA (0x0000005c)\r
+#define FLASH_OFFSET (0x000f01a4)\r
+#define READ_BIT (1<<29)\r
+#define WRITE_BIT (2<<29)\r
+#define ADDR_MSK (0x0007ffff)\r
+#define CMD_MASK (0xe0000000)\r
+#define BANK_SHIFT (19)\r
+#define BANK_MASK (0xfff80000)\r
+#define MAX_FLASH_SIZE (0x80000) // 512K\r
+\r
+#define SEMAP63 (0xf03fc)\r
+#define GPIO_DIR_L (0xf008c)\r
+#define GPIO_POL_L (0xf0094)\r
+#define GPIO_MOD_L (0xf009c)\r
+#define GPIO_DAT_L (0xf0084)\r
+#define GPIO_DATACLEAR_L (0xf00d4)\r
+#define GPIO_DATASET_L (0xf00dc)\r
+\r
+#define CPUMODE (0xf0150)\r
+#define CPUMODE_MSK (0xc0000000UL)\r
+#define CPUMODE_SHIFT (30)\r
+\r
+/* Definitions intended to become shared with UM. Later... */\r
+#define FW_READ 0x00\r
+#define FW_WRITE 0x01\r
+#define FW_READ_CMD 0x08\r
+#define FW_WRITE_CMD 0x09\r
+#define FW_OPEN_IF 0xe7\r
+#define FW_CLOSE_IF 0x7e\r
+\r
+#define FW_SIGNATURE (0x5a445a44)\r
+#define FW_SECT_SIZE (0x10000)\r
+\r
+static inline errno_to_iberr(int err)\r
+{\r
+#define MAP_ERR(err,ibstatus) case err: ib_status = ibstatus; break\r
+ ib_api_status_t ib_status = IB_UNKNOWN_ERROR;\r
+ if (err < 0)\r
+ err = -err;\r
+ switch (err) {\r
+ MAP_ERR( ENOENT, IB_NOT_FOUND );\r
+ MAP_ERR( EINTR, IB_INTERRUPTED );\r
+ MAP_ERR( EAGAIN, IB_RESOURCE_BUSY );\r
+ MAP_ERR( ENOMEM, IB_INSUFFICIENT_MEMORY );\r
+ MAP_ERR( EACCES, IB_INVALID_PERMISSION );\r
+ MAP_ERR( EFAULT, IB_ERROR );\r
+ MAP_ERR( EBUSY, IB_RESOURCE_BUSY );\r
+ MAP_ERR( ENODEV, IB_UNSUPPORTED );\r
+ MAP_ERR( EINVAL, IB_INVALID_PARAMETER );\r
+ MAP_ERR( ENOSYS, IB_UNSUPPORTED );\r
+ default:\r
+ //HCA_PRINT(TRACE_LEVEL_ERROR, HCA_DBG_SHIM,\r
+ // "Unmapped errno (%d)\n", err);\r
+ break;\r
+ }\r
+ return ib_status;\r
+}\r
+\r
+#endif /* !defined( _HCA_DRIVER_H_ ) */\r
--- /dev/null
+/*\r
+ * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.\r
+ * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. \r
+ *\r
+ * This software is available to you under the OpenIB.org BSD license\r
+ * below:\r
+ *\r
+ * Redistribution and use in source and binary forms, with or\r
+ * without modification, are permitted provided that the following\r
+ * conditions are met:\r
+ *\r
+ * - Redistributions of source code must retain the above\r
+ * copyright notice, this list of conditions and the following\r
+ * disclaimer.\r
+ *\r
+ * - Redistributions in binary form must reproduce the above\r
+ * copyright notice, this list of conditions and the following\r
+ * disclaimer in the documentation and/or other materials\r
+ * provided with the distribution.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+ * SOFTWARE.\r
+ *\r
+ * $Id: hca_mcast.c 46 2005-05-30 17:55:53Z sleybo $\r
+ */\r
+\r
+\r
+#include <iba/ib_ci.h>\r
+#include <complib/comp_lib.h>\r
+\r
+#include "hca_driver.h"\r
+\r
+#if defined(EVENT_TRACING)\r
+#ifdef offsetof\r
+#undef offsetof\r
+#endif\r
+#include "hca_mcast.tmh"\r
+#endif\r
+#include "mthca_dev.h"\r
+\r
+/*\r
+* Multicast Support Verbs.\r
+*/\r
+ib_api_status_t\r
+mlnx_attach_mcast (\r
+ IN const ib_qp_handle_t h_qp,\r
+ IN const ib_gid_t *p_mcast_gid,\r
+ IN const uint16_t mcast_lid,\r
+ OUT ib_mcast_handle_t *ph_mcast,\r
+ IN OUT ci_umv_buf_t *p_umv_buf )\r
+{\r
+ int err;\r
+ ib_api_status_t status;\r
+ struct ib_qp *ib_qp_p = (struct ib_qp *)h_qp;\r
+ mlnx_mcast_t *mcast_p;\r
+\r
+ HCA_ENTER(HCA_DBG_SHIM);\r
+\r
+ // sanity checks\r
+ if( p_umv_buf && p_umv_buf->command ) {\r
+ HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM ,("User mode is not supported yet\n"));\r
+ HCA_PRINT(TRACE_LEVEL_ERROR, HCA_DBG_SHIM ,("User mode is not supported yet\n"));\r
+ status = IB_UNSUPPORTED;\r
+ goto err_user_unsupported;\r
+ }\r
+ if (!p_mcast_gid || !ph_mcast) {\r
+ status = IB_INVALID_PARAMETER;\r
+ goto err_invalid_param;\r
+ }\r
+\r
+ // allocate structure\r
+ mcast_p = (mlnx_mcast_t*)kmalloc(sizeof *mcast_p, GFP_ATOMIC );\r
+ if (mcast_p == NULL) {\r
+ status = IB_INSUFFICIENT_MEMORY;\r
+ goto err_no_mem;\r
+ }\r
+ \r
+ // attach to mcast group\r
+ if( p_umv_buf && p_umv_buf->command ) {\r
+ //TODO: call uverbs\r
+ }\r
+ else {\r
+ err = ibv_attach_mcast(ib_qp_p, (union ib_gid *)p_mcast_gid, (u16)mcast_lid);\r
+ if (err) {\r
+ HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("ibv_attach_mcast failed (%d)\n", err));\r
+ status = errno_to_iberr(err);\r
+ goto err_attach;\r
+ }\r
+ }\r
+\r
+ // fill the structure\r
+ mcast_p->ib_qp_p = ib_qp_p;\r
+ mcast_p->mcast_lid = mcast_lid;\r
+ RtlCopyMemory(mcast_p->mcast_gid.raw, p_mcast_gid->raw, sizeof *p_mcast_gid);\r
+ HCA_PRINT(TRACE_LEVEL_WARNING, HCA_DBG_SHIM, ("mcasth %p, qp_p %p, mlid %hx, mgid %I64x`%I64x\n", \r
+ mcast_p, mcast_p->ib_qp_p, mcast_p->mcast_lid,\r
+ *(uint64_t*)&mcast_p->mcast_gid.raw[0],\r
+ *(uint64_t*)&mcast_p->mcast_gid.raw[8] ));\r
+ \r
+ // return the result\r
+ if (ph_mcast) *ph_mcast = (ib_mcast_handle_t)mcast_p;\r
+\r
+ status = IB_SUCCESS;\r
+ goto end;\r
+ \r
+err_attach: \r
+ kfree(mcast_p);\r
+err_no_mem: \r
+err_invalid_param:\r
+err_user_unsupported:\r
+end: \r
+ HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM ,("completes with ERROR status %s\n", ib_get_err_str(status)));\r
+ return status;\r
+}\r
+\r
+ib_api_status_t\r
+mlnx_detach_mcast (\r
+ IN const ib_mcast_handle_t h_mcast)\r
+{\r
+ ib_api_status_t status;\r
+ int err;\r
+ mlnx_mcast_t *mcast_p = (mlnx_mcast_t*)h_mcast;\r
+\r
+ // sanity check\r
+ if (!mcast_p || !mcast_p->ib_qp_p) {\r
+ status = IB_INVALID_PARAMETER;\r
+ goto err_invalid_param;\r
+ }\r
+ HCA_PRINT(TRACE_LEVEL_WARNING, HCA_DBG_SHIM,("mcasth %p, qp_p %p, mlid %hx, mgid %I64x`%I64x\n", \r
+ mcast_p, mcast_p->ib_qp_p, mcast_p->mcast_lid,\r
+ *(uint64_t*)&mcast_p->mcast_gid.raw[0],\r
+ *(uint64_t*)&mcast_p->mcast_gid.raw[8] ));\r
+ \r
+ // detach\r
+ if( mcast_p->ib_qp_p->ucontext) {\r
+ //TODO: call uverbs\r
+ }\r
+ else {\r
+ err = ibv_detach_mcast( mcast_p->ib_qp_p, \r
+ (union ib_gid *)&mcast_p->mcast_gid, mcast_p->mcast_lid );\r
+ if (err) {\r
+ HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("ibv_detach_mcast failed (%d)\n", err));\r
+ status = errno_to_iberr(err);\r
+ goto err_detach_mcast;\r
+ }\r
+ }\r
+\r
+ status = IB_SUCCESS;\r
+\r
+err_detach_mcast:\r
+ kfree(mcast_p);\r
+err_invalid_param: \r
+ HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM ,("completes with ERROR status %s\n", ib_get_err_str(status)));\r
+ return status;\r
+}\r
+\r
+\r
+void\r
+mlnx_mcast_if(\r
+ IN OUT ci_interface_t *p_interface )\r
+{\r
+ p_interface->attach_mcast = mlnx_attach_mcast;\r
+ p_interface->detach_mcast = mlnx_detach_mcast;\r
+}\r
--- /dev/null
+/*\r
+ * Copyright (c) 2005 SilverStorm Technologies. All rights reserved.\r
+ * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. \r
+ *\r
+ * This software is available to you under the OpenIB.org BSD license\r
+ * below:\r
+ *\r
+ * Redistribution and use in source and binary forms, with or\r
+ * without modification, are permitted provided that the following\r
+ * conditions are met:\r
+ *\r
+ * - Redistributions of source code must retain the above\r
+ * copyright notice, this list of conditions and the following\r
+ * disclaimer.\r
+ *\r
+ * - Redistributions in binary form must reproduce the above\r
+ * copyright notice, this list of conditions and the following\r
+ * disclaimer in the documentation and/or other materials\r
+ * provided with the distribution.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,\r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\r
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\r
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\r
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\r
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r
+ * SOFTWARE.\r
+ *\r
+ * $Id: hca_memory.c 46 2005-05-30 17:55:53Z sleybo $\r
+ */\r
+\r
+\r
+#include "hca_driver.h"\r
+#include "hca_utils.h"\r
+\r
+#if defined(EVENT_TRACING)\r
+#ifdef offsetof\r
+#undef offsetof\r
+#endif\r
+#include "hca_memory.tmh"\r
+#endif\r
+\r
+/*\r
+ * Memory Management Verbs.\r
+ */\r
+\r
+ib_api_status_t\r
+mlnx_register_mr (\r
+ IN const ib_pd_handle_t h_pd,\r
+ IN const ib_mr_create_t *p_mr_create,\r
+ OUT net32_t* const p_lkey,\r
+ OUT net32_t* const p_rkey,\r
+ OUT ib_mr_handle_t *ph_mr,\r
+ IN boolean_t um_call )\r
+{\r
+ ib_api_status_t status;\r
+ int err;\r
+ struct ib_mr *mr_p;\r
+ struct mthca_mr *mro_p;\r
+ struct ib_pd *ib_pd_p = (struct ib_pd *)h_pd;\r
+ \r
+ HCA_ENTER(HCA_DBG_MEMORY);\r
+\r
+ // sanity checks\r
+ if( !cl_is_blockable() ) {\r
+ status = IB_UNSUPPORTED;\r
+ goto err_unsupported;\r
+ } \r
+ if (!p_mr_create || 0 == p_mr_create->length) {\r
+ HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_MEMORY ,("invalid attributes"));\r
+ status = IB_INVALID_PARAMETER;\r
+ goto err_invalid_parm; \r
+ }\r
+ /*\r
+ * Local write permission is required if remote write or\r
+ * remote atomic permission is also requested.\r
+ */\r
+ if (p_mr_create->access_ctrl & (IB_AC_RDMA_WRITE | IB_AC_ATOMIC) &&\r
+ !(p_mr_create->access_ctrl & IB_AC_LOCAL_WRITE)) {\r
+ HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_MEMORY ,("invalid access rights"));\r
+ status = IB_INVALID_PERMISSION;\r
+ goto err_invalid_access; \r
+ } \r
+\r
+#ifdef WIN_TO_BE_REMOVED\r
+ // lock buffer for user\r
+ if (um_call) {\r
+ err = iobuf_register(\r
+ (UINT_PTR)p_mr_create->vaddr,\r
+ p_mr_create->length,\r
+ um_call,\r
+ (int)p_mr_create->access_ctrl,\r
+ &iobuf );\r
+ if (err) {\r
+ HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_MEMORY ,("iobuf_register failed(%d)",err));\r
+ status = errno_to_iberr(err);\r
+ goto err_lock;\r
+ }\r
+ }\r
+ \r
+ // prepare parameters\r
+ RtlZeroMemory(®ion, sizeof(region));\r
+ RtlZeroMemory(&umv_buf, sizeof(umv_buf));\r
+ region.user_base = (u64)p_mr_create->vaddr;\r
+ region.virt_base = (u64)p_mr_create->vaddr;\r
+ region.page_size = PAGE_SIZE;\r
+ region.length = p_mr_create->length;\r
+ region.offset = p_mr_create->vaddr & (PAGE_SIZE - 1);\r
+ //TODO: end filling region (add list of chunks)\r
+ //TODO: fill umv_buf\r
+#endif \r
+\r
+ // register mr \r
+ mr_p = ibv_reg_mr(ib_pd_p, map_qp_ibal_acl(p_mr_create->access_ctrl), \r
+ p_mr_create->vaddr, p_mr_create->length, \r
+ (uint64_t)(ULONG_PTR)(void*)p_mr_create->vaddr, um_call );\r
+ if (IS_ERR(mr_p)) {\r
+ err = PTR_ERR(mr_p);\r
+ HCA_PRINT(TRACE_LEVEL_ERROR, HCA_DBG_MEMORY,\r
+ ("ibv_reg_mr failed (%d)\n", err));\r
+ status = errno_to_iberr(err);\r
+ goto err_reg_mr;\r
+ }\r
+\r
+ // results\r
+ mro_p = (struct mthca_mr *)mr_p;\r
+#ifdef WIN_TO_BE_REMOVED\r
+ mro_p->iobuf = iobuf;\r
+#endif\r
+ *p_lkey = mr_p->lkey;\r
+ *p_rkey = mr_p->rkey;\r
+ if (ph_mr) *ph_mr = (ib_mr_handle_t)mr_p;\r
+ status = IB_SUCCESS;\r
+\r
+err_reg_mr:\r
+#ifdef WIN_TO_BE_REMOVED\r
+ if (um_call) \r
+ iobuf_deregister(&iobuf );\r
+err_lock:\r
+#endif \r
+err_invalid_access: \r
+err_invalid_parm:\r
+err_unsupported:\r
+ HCA_PRINT_EXIT(TRACE_LEVEL_ERROR ,HCA_DBG_MEMORY ,("completes with ERROR status %s\n", ib_get_err_str(status)));\r
+ return status;\r
+}\r
+\r
+ib_api_status_t\r
+mlnx_register_pmr (\r
+ IN const ib_pd_handle_t h_pd,\r
+ IN const ib_phys_create_t* const p_pmr_create,\r
+ IN OUT uint64_t* const p_vaddr,\r
+ OUT net32_t* const p_lkey,\r
+ OUT net32_t* const p_rkey,\r
+ OUT ib_mr_handle_t* const ph_mr,\r
+ IN boolean_t um_call )\r
+{\r
+ ib_api_status_t status;\r
+ int err;\r
+ struct ib_mr *mr_p;\r
+ struct ib_phys_buf *buffer_list;\r
+ struct ib_pd *ib_pd_p = (struct ib_pd *)h_pd;\r
+ \r
+ UNUSED_PARAM( um_call );\r
+\r
+ HCA_ENTER(HCA_DBG_SHIM);\r
+\r
+ // sanity checks\r
+ if( !cl_is_blockable() ) {\r
+ status = IB_UNSUPPORTED;\r
+ goto err_unsupported;\r
+ } \r
+ if (!p_vaddr || !p_pmr_create ||\r
+ 0 == p_pmr_create->length ) {\r
+ status = IB_INVALID_PARAMETER;\r
+ goto err_invalid_parm; \r
+ }\r
+\r
+ // prepare parameters\r
+ buffer_list = (void*)p_pmr_create->range_array;\r
+ //NB: p_pmr_create->buf_offset is not used, i.e. supposed that region is page-aligned\r
+ //NB: p_pmr_create->hca_page_size is not used, i.e. supposed it is always the same\r
+ \r
+ // register pmr \r
+ if (p_pmr_create->length == (uint64_t)-1LL) \r
+ mr_p = ibv_get_dma_mr(ib_pd_p, map_qp_ibal_acl(p_pmr_create->access_ctrl) );\r
+ else\r
+ mr_p = ibv_reg_phys_mr(ib_pd_p, buffer_list, p_pmr_create->num_ranges, \r
+ map_qp_ibal_acl(p_pmr_create->access_ctrl), p_vaddr );\r
+ if (IS_ERR(mr_p)) {\r
+ err = PTR_ERR(mr_p);\r
+ HCA_PRINT(TRACE_LEVEL_ERROR, HCA_DBG_MEMORY,\r
+ ("mthca_reg_phys_mr failed (%d)\n", err));\r
+ status = errno_to_iberr(err);\r
+ goto err_reg_phys_mr;\r
+ }\r
+\r
+ // results\r
+ if (ph_mr) *ph_mr = (ib_mr_handle_t)mr_p;\r
+ *p_lkey = mr_p->lkey;\r
+ *p_rkey = mr_p->rkey;\r
+ //NB: p_vaddr was not changed\r
+ status = IB_SUCCESS;\r
+\r
+err_reg_phys_mr:\r
+err_invalid_parm:\r
+err_unsupported:\r
+ HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_MEMORY ,("completes with ERROR status %s\n", ib_get_err_str(status)));\r
+ return status;\r
+ \r
+}\r
+\r
+ib_api_status_t\r
+mlnx_query_mr (\r
+ IN const ib_mr_handle_t h_mr,\r
+ OUT ib_mr_attr_t *p_mr_query )\r
+{\r
+ UNREFERENCED_PARAMETER(h_mr);\r
+ UNREFERENCED_PARAMETER(p_mr_query);\r
+ HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_MEMORY ,("mlnx_query_mr not implemented\n"));\r
+ return IB_UNSUPPORTED;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+mlnx_modify_mr (\r
+ IN const ib_mr_handle_t h_mr,\r
+ IN const ib_mr_mod_t mem_modify_req,\r
+ IN const ib_mr_create_t *p_mr_create,\r
+ OUT uint32_t *p_lkey,\r
+ OUT uint32_t *p_rkey,\r
+ IN const ib_pd_handle_t h_pd OPTIONAL,\r
+ IN boolean_t um_call )\r
+{\r
+ UNREFERENCED_PARAMETER(h_mr);\r
+ UNREFERENCED_PARAMETER(mem_modify_req);\r
+ UNREFERENCED_PARAMETER(p_mr_create);\r
+ UNREFERENCED_PARAMETER(p_lkey);\r
+ UNREFERENCED_PARAMETER(p_rkey);\r
+ UNREFERENCED_PARAMETER(h_pd);\r
+ UNREFERENCED_PARAMETER(um_call);\r
+ HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_MEMORY ,("mlnx_modify_mr not implemented\n"));\r
+ return IB_UNSUPPORTED;\r
+}\r
+\r
+\r
+ib_api_status_t\r
+mlnx_modify_pmr (\r
+ IN const ib_mr_handle_t h_mr,\r
+ IN const ib_mr_mod_t mem_modify_req,\r
+ IN const ib_phys_create_t* const p_pmr_create,\r
+ IN OUT uint64_t* const p_vaddr,\r
+ OUT uint32_t* const p_lkey,\r
+ OUT uint32_t* const p_rkey,\r
+ IN const ib_pd_handle_t h_pd OPTIONAL,\r
+ IN boolean_t um_call )\r
+{\r
+ UNREFERENCED_PARAMETER(h_mr);\r
+ UNREFERENCED_PARAMETER(mem_modify_req);\r
+ UNREFERENCED_PARAMETER(p_pmr_create);\r
+ UNREFERENCED_PARAMETER(p_vaddr);\r
+ UNREFERENCED_PARAMETER(p_lkey);\r
+ UNREFERENCED_PARAMETER(p_rkey);\r
+ UNREFERENCED_PARAMETER(h_pd);\r
+ UNREFERENCED_PARAMETER(um_call);\r
+ HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_MEMORY ,("mlnx_modify_pmr not implemented\n"));\r
+ return IB_UNSUPPORTED;\r
+}\r
+\r
+ib_api_status_t\r
+mlnx_register_smr (\r
+ IN const ib_mr_handle_t h_mr,\r
+ IN const ib_pd_handle_t h_pd,\r
+ IN const ib_access_t access_ctrl,\r
+ IN OUT uint64_t* const p_vaddr,\r
+ OUT net32_t* const p_lkey,\r
+ OUT net32_t* const p_rkey,\r
+ OUT ib_mr_handle_t* const ph_mr,\r
+ IN boolean_t um_call )\r
+{\r
+ UNREFERENCED_PARAMETER(h_mr);\r
+ UNREFERENCED_PARAMETER(h_pd);\r
+ UNREFERENCED_PARAMETER(access_ctrl);\r
+ UNREFERENCED_PARAMETER(p_vaddr);\r
+ UNREFERENCED_PARAMETER(p_lkey);\r
+ UNREFERENCED_PARAMETER(p_rkey);\r
+ UNREFERENCED_PARAMETER(ph_mr);\r
+ UNREFERENCED_PARAMETER(um_call);\r
+ HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_MEMORY ,("mlnx_register_smr not implemented\n"));\r
+ return IB_UNSUPPORTED;\r
+}\r
+\r
+ib_api_status_t\r
+mlnx_deregister_mr (\r
+ IN const ib_mr_handle_t h_mr)\r
+{\r
+ ib_api_status_t status;\r
+ int err;\r
+\r
+ HCA_ENTER(HCA_DBG_SHIM);\r
+\r
+ // sanity checks\r
+ if( !cl_is_blockable() ) {\r
+ status = IB_UNSUPPORTED;\r
+ goto err_unsupported;\r
+ } \r
+\r
+#ifdef WIN_TO_BE_REMOVED \r
+ // unlock user buffer\r
+ {\r
+ struct mthca_mr *mro_p = (struct mthca_mr *)h_mr;\r
+ if (mro_p->ibmr.uobject) \r
+ iobuf_deregister( &mro_p->iobuf );\r
+ }\r
+#endif \r
+\r
+ // deregister \r
+ err = ibv_dereg_mr((struct ib_mr *)h_mr);\r
+ if (err) {\r
+ status = errno_to_iberr(err);\r
+ HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_MEMORY, \r
+ ("mthca_dereg_mr failed (%d)", status));\r
+ goto err_dereg_mr;\r
+ }\r
+\r
+ status = IB_SUCCESS;\r
+ \r
+err_dereg_mr:\r
+err_unsupported:\r
+ HCA_PRINT_EXIT(TRACE_LEVEL_ERROR , HCA_DBG_MEMORY ,("completes with ERROR status %s\n", ib_get_err_str(status)));\r
+ return status;\r
+ \r
+}\r
+\r
+/*\r
+* Memory Window Verbs.\r
+*/\r
+\r
+ib_api_status_t\r
+mlnx_create_mw (\r
+ IN const ib_pd_handle_t h_pd,\r
+ OUT net32_t* const p_rkey,\r
+ OUT ib_mw_handle_t *ph_mw,\r
+ IN OUT ci_umv_buf_t *p_umv_buf )\r
+{\r
+ UNREFERENCED_PARAMETER(h_pd);\r
+ UNREFERENCED_PARAMETER(p_rkey);\r
+ UNREFERENCED_PARAMETER(ph_mw);\r
+ UNREFERENCED_PARAMETER(p_umv_buf);\r
+ HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_MEMORY ,("mlnx_create_mw not implemented\n"));\r
+ return IB_UNSUPPORTED;\r
+}\r
+\r
+ib_api_status_t\r
+mlnx_query_mw (\r
+ IN const ib_mw_handle_t h_mw,\r
+ OUT ib_pd_handle_t *ph_pd,\r
+ OUT net32_t* const p_rkey,\r
+ IN OUT ci_umv_buf_t *p_umv_buf )\r
+{\r
+ UNREFERENCED_PARAMETER(h_mw);\r
+ UNREFERENCED_PARAMETER(ph_pd);\r
+ UNREFERENCED_PARAMETER(p_rkey);\r
+ UNREFERENCED_PARAMETER(p_umv_buf);\r
+ HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_MEMORY ,("mlnx_query_mw not implemented\n"));\r
+ return IB_UNSUPPORTED;\r
+}\r
+\r
+ib_api_status_t\r
+mlnx_destroy_mw (\r
+ IN const ib_mw_handle_t h_mw)\r
+{\r
+ UNREFERENCED_PARAMETER(h_mw);\r
+ HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_MEMORY ,("mlnx_destroy_mw not implemented\n"));\r
+ return IB_UNSUPPORTED;\r
+}\r
+\r
+\r
+void\r
+mlnx_memory_if(\r
+ IN OUT ci_interface_t *p_interface )\r
+{\r
+ p_interface->register_mr = mlnx_register_mr;\r
+ p_interface->register_pmr = mlnx_register_pmr;\r
+ p_interface->query_mr = mlnx_query_mr;\r
+ p_interface->modify_mr = mlnx_modify_mr;\r
+ p_interface->modify_pmr = mlnx_modify_pmr;\r
+ p_interface->register_smr = mlnx_register_smr;\r
+ p_interface->deregister_mr = mlnx_deregister_mr;\r
+\r
+ p_interface->create_mw = mlnx_create_mw;\r
+ p_interface->query_mw = mlnx_query_mw;\r
+ p_interface->destroy_mw = mlnx_destroy_mw;\r
+}\r
+\r
--- /dev/null
+\r
+#include "hca_driver.h"\r
+#if defined(EVENT_TRACING)\r
+#ifdef offsetof\r
+#undef offsetof\r
+#endif\r
+#include "hca_pci.tmh"\r
+#endif\r
+#include <complib/cl_thread.h>\r
+#include <initguid.h>\r
+#include <wdmguid.h>\r
+#ifdef WIN_TO_BE_CHANGED\r
+#include <iba/hca_br_ifc.h>\r
+#endif\r
+\r
+#define HCA_RESET_HCR_OFFSET 0x000F0010\r
+#define HCA_RESET_TOKEN CL_HTON32(0x00000001)\r
+\r
+#define PCI_CAPABILITY_ID_VPD 0x03\r
+#define PCI_CAPABILITY_ID_PCIX 0x07\r
+#define PCI_CAPABILITY_ID_PCIEXP 0x10\r
+\r
+boolean_t\r
+FindBridgeIf(\r
+ IN hca_dev_ext_t *pi_ext,\r
+ IN PBUS_INTERFACE_STANDARD pi_pInterface\r
+ );\r
+\r
+\r
+/*\r
+ * Vital Product Data Capability\r
+ */\r
+typedef struct _PCI_VPD_CAPABILITY {\r
+\r
+ PCI_CAPABILITIES_HEADER Header;\r
+\r
+ USHORT Flags;\r
+ ULONG Data;\r
+\r
+} PCI_VPD_CAPABILITY, *PPCI_VPD_CAPABILITY;\r
+\r
+\r
+/*\r
+ * PCI-X Capability\r
+ */\r
+typedef struct _PCI_PCIX_CAPABILITY {\r
+\r
+ PCI_CAPABILITIES_HEADER Header;\r
+\r
+ USHORT Command;\r
+ ULONG Status;\r
+\r
+/* for Command: */\r
+} PCI_PCIX_CAPABILITY, *PPCI_PCIX_CAPABILITY;\r
+\r
+#define PCI_X_CMD_MAX_READ 0x000c /* Max Memory Read Byte Count */\r
+\r
+/*\r
+ * PCI-Express Capability\r
+ */\r
+typedef struct _PCI_PCIEXP_CAPABILITY {\r
+\r
+ PCI_CAPABILITIES_HEADER Header;\r
+\r
+ USHORT Flags;\r
+ ULONG DevCapabilities;\r
+ USHORT DevControl;\r
+ USHORT DevStatus;\r
+ ULONG LinkCapabilities;\r
+ USHORT LinkControl;\r
+ USHORT LinkStatus;\r
+ ULONG SlotCapabilities;\r
+ USHORT SlotControl;\r
+ USHORT SlotStatus;\r
+ USHORT RootControl;\r
+ USHORT RootCapabilities;\r
+ USHORT RootStatus;\r
+} PCI_PCIEXP_CAPABILITY, *PPCI_PCIEXP_CAPABILITY;\r
+\r
+/* for DevControl: */\r
+#define PCI_EXP_DEVCTL_READRQ 0x7000 /* Max_Read_Request_Size */\r
+\r
+static NTSTATUS\r
+__get_bus_ifc(\r
+ IN DEVICE_OBJECT* const pDevObj,\r
+ IN const GUID* const pGuid,\r
+ OUT BUS_INTERFACE_STANDARD *pBusIfc );\r
+\r
+static void\r
+__fixup_pci_capabilities(\r
+ IN PCI_COMMON_CONFIG* const pConfig );\r
+\r
+static NTSTATUS\r
+__save_pci_config(\r
+ IN BUS_INTERFACE_STANDARD *pBusIfc,\r
+ OUT PCI_COMMON_CONFIG* const pConfig );\r
+\r
+static NTSTATUS\r
+__restore_pci_config(\r
+ IN BUS_INTERFACE_STANDARD *pBusIfc,\r
+ IN PCI_COMMON_CONFIG* const pConfig );\r
+\r
+\r
+#ifdef ALLOC_PRAGMA\r
+#pragma alloc_text (PAGE, __get_bus_ifc)\r
+#pragma alloc_text (PAGE, __fixup_pci_capabilities)\r
+#pragma alloc_text (PAGE, __save_pci_config)\r
+#pragma alloc_text (PAGE, __restore_pci_config)\r
+#endif\r
+\r
+/* Forwards the request to the HCA's PDO. */\r
+static NTSTATUS\r
+__get_bus_ifc(\r
+ IN DEVICE_OBJECT* const pDevObj,\r
+ IN const GUID* const pGuid,\r
+ OUT BUS_INTERFACE_STANDARD *pBusIfc )\r
+{\r
+ NTSTATUS status;\r
+ IRP *pIrp;\r
+ IO_STATUS_BLOCK ioStatus;\r
+ IO_STACK_LOCATION *pIoStack;\r
+ DEVICE_OBJECT *pDev;\r
+ KEVENT event;\r
+\r
+ HCA_ENTER( HCA_DBG_PNP );\r
+\r
+ CL_ASSERT( KeGetCurrentIrql() < DISPATCH_LEVEL );\r
+\r
+ pDev = IoGetAttachedDeviceReference( pDevObj );\r
+\r
+ KeInitializeEvent( &event, NotificationEvent, FALSE );\r
+\r
+ /* Build the IRP for the HCA. */\r
+ pIrp = IoBuildSynchronousFsdRequest( IRP_MJ_PNP, pDev,\r
+ NULL, 0, NULL, &event, &ioStatus );\r
+ if( !pIrp )\r
+ {\r
+ ObDereferenceObject( pDev );\r
+ HCA_PRINT( TRACE_LEVEL_ERROR,HCA_DBG_PNP, \r
+ ("IoBuildSynchronousFsdRequest failed.\n"));\r
+ return STATUS_INSUFFICIENT_RESOURCES;\r
+ }\r
+\r
+ /* Copy the request query parameters. */\r
+ pIoStack = IoGetNextIrpStackLocation( pIrp );\r
+ pIoStack->MinorFunction = IRP_MN_QUERY_INTERFACE;\r
+ pIoStack->Parameters.QueryInterface.Size = sizeof(BUS_INTERFACE_STANDARD);\r
+ pIoStack->Parameters.QueryInterface.Version = 1;\r
+ pIoStack->Parameters.QueryInterface.InterfaceType = pGuid;\r
+ pIoStack->Parameters.QueryInterface.Interface = (INTERFACE*)pBusIfc;\r
+ pIoStack->Parameters.QueryInterface.InterfaceSpecificData = NULL;\r
+\r
+ pIrp->IoStatus.Status = STATUS_NOT_SUPPORTED;\r
+\r
+ /* Send the IRP. */\r
+ status = IoCallDriver( pDev, pIrp );\r
+ if( status == STATUS_PENDING )\r
+ {\r
+ KeWaitForSingleObject( &event, Executive, KernelMode,\r
+ FALSE, NULL );\r
+\r
+ status = ioStatus.Status;\r
+ }\r
+ ObDereferenceObject( pDev );\r
+\r
+ HCA_EXIT( HCA_DBG_PNP );\r
+ return status;\r
+}\r
+\r
+\r
+/*\r
+ * Reads and saves the PCI configuration of the device accessible\r
+ * through the provided bus interface. Does not read registers 22 or 23\r
+ * as directed in Tavor PRM 1.0.1, Appendix A. InfiniHost Software Reset.\r
+ */\r
+static NTSTATUS\r
+__save_pci_config(\r
+ IN BUS_INTERFACE_STANDARD *pBusIfc,\r
+ OUT PCI_COMMON_CONFIG* const pConfig )\r
+{\r
+ ULONG len;\r
+ UINT32 *pBuf;\r
+\r
+ HCA_ENTER( HCA_DBG_PNP );\r
+ \r
+ pBuf = (UINT32*)pConfig;\r
+\r
+ /*\r
+ * Read the lower portion of the configuration, up to but excluding\r
+ * register 22.\r
+ */\r
+ len = pBusIfc->GetBusData(\r
+ pBusIfc->Context, PCI_WHICHSPACE_CONFIG, &pBuf[0], 0, 88 );\r
+ if( len != 88 )\r
+ {\r
+ HCA_PRINT( TRACE_LEVEL_ERROR , HCA_DBG_PNP ,("Failed to read HCA config.\n"));\r
+ return STATUS_DEVICE_NOT_READY;\r
+ }\r
+\r
+ /* Read the upper portion of the configuration, from register 24. */\r
+ len = pBusIfc->GetBusData(\r
+ pBusIfc->Context, PCI_WHICHSPACE_CONFIG, &pBuf[24], 96, 160 );\r
+ if( len != 160 )\r
+ {\r
+ HCA_PRINT( TRACE_LEVEL_ERROR ,HCA_DBG_PNP ,("Failed to read HCA config.\n"));\r
+ return STATUS_DEVICE_NOT_READY;\r
+ }\r
+\r
+ HCA_EXIT( HCA_DBG_PNP );\r
+ return STATUS_SUCCESS;\r
+}\r
+\r
+\r
+static void\r
+__fixup_pci_capabilities(\r
+ IN PCI_COMMON_CONFIG* const pConfig )\r
+{\r
+ UCHAR *pBuf;\r
+ PCI_CAPABILITIES_HEADER *pHdr, *pNextHdr;\r
+\r
+ HCA_ENTER( HCA_DBG_PNP );\r
+\r
+ pBuf = (UCHAR*)pConfig;\r
+\r
+ if( pConfig->HeaderType == PCI_DEVICE_TYPE )\r
+ {\r
+ if( pConfig->u.type0.CapabilitiesPtr )\r
+ {\r
+ pNextHdr = (PCI_CAPABILITIES_HEADER*)\r
+ (pBuf + pConfig->u.type0.CapabilitiesPtr);\r
+ }\r
+ else\r
+ {\r
+ pNextHdr = NULL;\r
+ }\r
+ }\r
+ else\r
+ {\r
+ ASSERT( pConfig->HeaderType == PCI_BRIDGE_TYPE );\r
+ if( pConfig->u.type1.CapabilitiesPtr )\r
+ {\r
+ pNextHdr = (PCI_CAPABILITIES_HEADER*)\r
+ (pBuf + pConfig->u.type1.CapabilitiesPtr);\r
+ }\r
+ else\r
+ {\r
+ pNextHdr = NULL;\r
+ }\r
+ }\r
+\r
+ /*\r
+ * Fix up any fields that might cause changes to the\r
+ * device - like writing VPD data.\r
+ */\r
+ while( pNextHdr )\r
+ {\r
+ pHdr = pNextHdr;\r
+ if( pNextHdr->Next )\r
+ pNextHdr = (PCI_CAPABILITIES_HEADER*)(pBuf + pHdr->Next);\r
+ else\r
+ pNextHdr = NULL;\r
+\r
+ switch( pHdr->CapabilityID )\r
+ {\r
+ case PCI_CAPABILITY_ID_VPD:\r
+ /* Clear the flags field so we don't cause a write. */\r
+ ((PCI_VPD_CAPABILITY*)pHdr)->Flags = 0;\r
+ break;\r
+\r
+ default:\r
+ break;\r
+ }\r
+ }\r
+\r
+ HCA_EXIT( HCA_DBG_PNP );\r
+}\r
+\r
+\r
+#define PCI_CONFIG_OFFSET( field ) \\r
+ offsetof( PCI_COMMON_CONFIG, field )\r
+\r
+#define PCI_CONFIG_LEN( fromField, toField ) \\r
+ offsetof( PCI_COMMON_CONFIG, toField ) - \\r
+ offsetof( PCI_COMMON_CONFIG, fromField ) + \\r
+ sizeof( ((PCI_COMMON_CONFIG*)NULL)->##toField )\r
+\r
+#define PCI_CONFIG_WRITE( fromField, toField ) \\r
+ pBusIfc->SetBusData( pBusIfc->Context, PCI_WHICHSPACE_CONFIG, \\r
+ &pConfig->##fromField, PCI_CONFIG_OFFSET( fromField ), \\r
+ PCI_CONFIG_LEN( fromField, toField ) )\r
+\r
+/*\r
+ * Restore saved PCI configuration, skipping registers 22 and 23, as well\r
+ * as any registers where writing will have side effects such as the flags\r
+ * field of the VPD and vendor specific capabilities. The function also delays\r
+ * writing the command register, bridge control register (if applicable), and\r
+ * PCIX command register (if present).\r
+ */\r
+static NTSTATUS\r
+__restore_pci_config(\r
+ IN BUS_INTERFACE_STANDARD *pBusIfc,\r
+ IN PCI_COMMON_CONFIG* const pConfig )\r
+{\r
+ ULONG len;\r
+ UCHAR *pBuf;\r
+\r
+ HCA_ENTER( HCA_DBG_PNP );\r
+\r
+ pBuf = (UCHAR*)pConfig;\r
+\r
+ /* Fixup the capabilities as needed. */\r
+ __fixup_pci_capabilities( pConfig );\r
+\r
+ /* Restore the vendor/device IDs */\r
+ len = PCI_CONFIG_WRITE( VendorID, DeviceID );\r
+ if( len != PCI_CONFIG_LEN( VendorID, DeviceID ) )\r
+ {\r
+ HCA_PRINT( TRACE_LEVEL_ERROR ,HCA_DBG_PNP ,("Failed to write vendor/device IDs.\n"));\r
+ return STATUS_DEVICE_NOT_READY;\r
+ }\r
+\r
+ /*\r
+ * Skip the command register and write the rest (except the bridge\r
+ * control if this is a bridge).\r
+ */\r
+ if( pConfig->HeaderType == PCI_DEVICE_TYPE )\r
+ {\r
+ len = PCI_CONFIG_WRITE( Status, u.type0.MaximumLatency );\r
+ if( len != PCI_CONFIG_LEN( Status, u.type0.MaximumLatency ) )\r
+ {\r
+ HCA_PRINT( TRACE_LEVEL_ERROR ,HCA_DBG_PNP ,("Failed to write type 0 common header.\n"));\r
+ return STATUS_DEVICE_NOT_READY;\r
+ }\r
+ }\r
+ else\r
+ {\r
+ ASSERT( pConfig->HeaderType == PCI_BRIDGE_TYPE );\r
+ len = PCI_CONFIG_WRITE( Status, u.type1.InterruptPin );\r
+ if( len != PCI_CONFIG_LEN( Status, u.type1.InterruptPin ) )\r
+ {\r
+ HCA_PRINT( TRACE_LEVEL_ERROR ,HCA_DBG_PNP ,("Failed to write type 1 common header.\n"));\r
+ return STATUS_DEVICE_NOT_READY;\r
+ }\r
+ }\r
+\r
+ /* Write the capabilities back. */\r
+ len = pBusIfc->SetBusData( pBusIfc->Context, PCI_WHICHSPACE_CONFIG,\r
+ pConfig->DeviceSpecific, PCI_CONFIG_OFFSET( DeviceSpecific ), 192 );\r
+ if( len != 192 )\r
+ {\r
+ HCA_PRINT( TRACE_LEVEL_ERROR ,HCA_DBG_PNP ,("Failed to write capabilites.\n"));\r
+ return STATUS_DEVICE_NOT_READY;\r
+ }\r
+\r
+ /* Write the command register. */\r
+ len = PCI_CONFIG_WRITE( Command, Command );\r
+ if( len != PCI_CONFIG_LEN( Command, Command ) )\r
+ {\r
+ HCA_PRINT( TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("Failed to write command register.\n"));\r
+ return STATUS_DEVICE_NOT_READY;\r
+ }\r
+\r
+ /* Write the bridge control register if a bridge. */\r
+ if( pConfig->HeaderType == PCI_BRIDGE_TYPE )\r
+ {\r
+ len =\r
+ PCI_CONFIG_WRITE( u.type1.BridgeControl, u.type1.BridgeControl );\r
+ if( len !=\r
+ PCI_CONFIG_LEN( u.type1.BridgeControl, u.type1.BridgeControl ) )\r
+ {\r
+ HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, \r
+ ("Failed to write bridge control register.\n"));\r
+ return STATUS_DEVICE_NOT_READY;\r
+ }\r
+ }\r
+\r
+ HCA_EXIT( HCA_DBG_PNP );\r
+ return STATUS_SUCCESS;\r
+}\r
+\r
+\r
+NTSTATUS\r
+hca_reset( DEVICE_OBJECT* const pDevObj, int is_tavor )\r
+{\r
+ NTSTATUS status = STATUS_SUCCESS;\r
+ PCI_COMMON_CONFIG hcaConfig, brConfig;\r
+ BUS_INTERFACE_STANDARD hcaBusIfc;\r
+ BUS_INTERFACE_STANDARD brBusIfc = {0}; // to bypass C4701\r
+ hca_dev_ext_t *pExt = (hca_dev_ext_t*)pDevObj->DeviceExtension;\r
+ ULONG data, i;\r
+ PULONG reset_p;\r
+ PHYSICAL_ADDRESS pa;\r
+ static int skip = 1;\r
+\r
+ HCA_ENTER( HCA_DBG_PNP );\r
+ if (skip) goto resetErr1;\r
+\r
+ /* Get the HCA's bus interface. */\r
+ status = __get_bus_ifc( pDevObj, &GUID_BUS_INTERFACE_STANDARD, &hcaBusIfc );\r
+ if( !NT_SUCCESS( status ) )\r
+ {\r
+ HCA_PRINT( TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("Failed to get HCA bus interface.\n"));\r
+ goto resetErr1;\r
+ }\r
+\r
+ if (is_tavor) {\r
+#if 0 \r
+ /* Get the HCA Bridge's bus interface. */\r
+ status = __get_bus_ifc( pDevObj, &GUID_HCA_BRIDGE_INTERFACE, &brBusIfc );\r
+ if( !NT_SUCCESS( status ) )\r
+ {\r
+ HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, \r
+ ("Failed to get HCA bridge bus interface.\n"));\r
+ goto resetErr2;\r
+ }\r
+#else\r
+ if (!FindBridgeIf( pExt, &brBusIfc ))\r
+ goto resetErr2;\r
+#endif\r
+ }\r
+\r
+ /* Save the HCA's configuration. */\r
+ status = __save_pci_config( &hcaBusIfc, &hcaConfig );\r
+ if( !NT_SUCCESS( status ) )\r
+ {\r
+ HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP,\r
+ ("Failed to save HCA config.\n"));\r
+ goto resetErr3;\r
+ }\r
+\r
+ if (is_tavor) {\r
+ /* Save the HCA bridge's configuration. */\r
+ status = __save_pci_config( &brBusIfc, &brConfig );\r
+ if( !NT_SUCCESS( status ) )\r
+ {\r
+ HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP,\r
+ ("Failed to save bridge config.\n"));\r
+ goto resetErr3;\r
+ }\r
+ }\r
+ \r
+ /* map reset register */\r
+ pa.QuadPart = pExt->bar[HCA_BAR_TYPE_HCR].phys + HCA_RESET_HCR_OFFSET;\r
+ HCA_PRINT( TRACE_LEVEL_INFORMATION ,HCA_DBG_PNP ,("Mapping reset register with address 0x%I64x\n", pa.QuadPart));\r
+ reset_p = MmMapIoSpace( pa, 4, MmNonCached );\r
+ if( !reset_p )\r
+ {\r
+ HCA_PRINT( TRACE_LEVEL_ERROR ,HCA_DBG_PNP ,("Failed to map reset register with address 0x%I64x\n", pa.QuadPart));\r
+ status = STATUS_UNSUCCESSFUL;\r
+ goto resetErr3;\r
+ }\r
+ \r
+ /* Issue the reset. */\r
+ HCA_PRINT( TRACE_LEVEL_INFORMATION ,HCA_DBG_PNP ,("Resetting the chip ...\n"));\r
+ WRITE_REGISTER_ULONG( reset_p, HCA_RESET_TOKEN );\r
+\r
+ /* Wait a second. */\r
+ cl_thread_suspend( 1000 );\r
+\r
+ /* unmap the reset register */\r
+ HCA_PRINT( TRACE_LEVEL_INFORMATION ,HCA_DBG_PNP ,("Unmapping reset register \n"));\r
+ MmUnmapIoSpace( reset_p, 4 );\r
+\r
+ \r
+ if (is_tavor) {\r
+ /*\r
+ * Now read the bridge's configuration register until it doesn't\r
+ * return 0xFFFFFFFF. Give it 10 seconds for good measure.\r
+ */\r
+ HCA_PRINT( TRACE_LEVEL_INFORMATION ,HCA_DBG_PNP ,("Read the Bridge's configuration register \n"));\r
+ for( i = 0; i < 10; i++ )\r
+ {\r
+ if( brBusIfc.GetBusData( brBusIfc.Context, PCI_WHICHSPACE_CONFIG,\r
+ &data, 0, sizeof(ULONG) ) != sizeof(ULONG) )\r
+ {\r
+ HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, \r
+ ("Failed to read bridge configuration data.\n"));\r
+ status = STATUS_UNSUCCESSFUL;\r
+ goto resetErr3;\r
+ }\r
+ /* See if we got valid data. */\r
+ if( data != 0xFFFFFFFF )\r
+ break;\r
+\r
+ cl_thread_suspend( 1000 );\r
+ } \r
+ if( i == 10 )\r
+ {\r
+ /* Darn, timed out. :( */\r
+ HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, \r
+ ("Doh! HCA Bridge never came back from reset!\n"));\r
+ status = STATUS_UNSUCCESSFUL;\r
+ goto resetErr3;\r
+ }\r
+ } \r
+\r
+ else {\r
+ /*\r
+ * Now read the HCA's configuration register until it doesn't\r
+ * return 0xFFFFFFFF. Give it 10 seconds for good measure.\r
+ */\r
+ HCA_PRINT( TRACE_LEVEL_INFORMATION ,HCA_DBG_PNP ,("Read the HCA's configuration register \n"));\r
+ for( i = 0; i < 100; i++ )\r
+ {\r
+ if( hcaBusIfc.GetBusData( hcaBusIfc.Context, PCI_WHICHSPACE_CONFIG,\r
+ &data, 0, sizeof(ULONG) ) != sizeof(ULONG) )\r
+ {\r
+ HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, \r
+ ("Failed to read HCA configuration data.\n"));\r
+ status = STATUS_UNSUCCESSFUL;\r
+ goto resetErr3;\r
+ }\r
+ /* See if we got valid data. */\r
+ if( data != 0xFFFFFFFF )\r
+ break;\r
+\r
+ cl_thread_suspend( 100 );\r
+ } \r
+ if( i >= 100 )\r
+ {\r
+ /* Darn, timed out. :( */\r
+ HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, \r
+ ("Doh! HCA Bridge never came back from reset!\n"));\r
+ status = STATUS_UNSUCCESSFUL;\r
+ goto resetErr3;\r
+ }\r
+ }\r
+ \r
+ if (is_tavor) {\r
+ /* Restore the HCA's bridge configuration. */\r
+ HCA_PRINT( TRACE_LEVEL_INFORMATION ,HCA_DBG_PNP ,("Restoring bridge PCI configuration \n"));\r
+ status = __restore_pci_config( &brBusIfc, &brConfig );\r
+ if( !NT_SUCCESS( status ) )\r
+ {\r
+ HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, \r
+ ("Failed to restore bridge config.\n"));\r
+ goto resetErr3;\r
+ }\r
+ }\r
+ \r
+ /* Restore the HCA's configuration. */\r
+ HCA_PRINT( TRACE_LEVEL_INFORMATION ,HCA_DBG_PNP ,("Restoring HCA PCI configuration \n"));\r
+ status = __restore_pci_config( &hcaBusIfc, &hcaConfig );\r
+ if( !NT_SUCCESS( status ) )\r
+ {\r
+ HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, \r
+ ("Failed to restore HCA config.\n"));\r
+ }\r
+\r
+resetErr3:\r
+ if (is_tavor) \r
+ brBusIfc.InterfaceDereference( brBusIfc.Context );\r
+\r
+resetErr2:\r
+ hcaBusIfc.InterfaceDereference( hcaBusIfc.Context );\r
+\r
+resetErr1:\r
+ HCA_EXIT( HCA_DBG_PNP );\r
+ return status;\r
+}\r
+\r
+\r
+/*\r
+ * Returns the offset in configuration space of the PCI-X capabilites.\r
+ */\r
+static ULONG\r
+__FindCapability(\r
+ IN PCI_COMMON_CONFIG* const pConfig, \r
+ IN char cap_id\r
+ )\r
+{\r
+ ULONG offset = 0;\r
+ UCHAR *pBuf;\r
+ PCI_CAPABILITIES_HEADER *pHdr;\r
+\r
+ HCA_ENTER( HCA_DBG_PNP );\r
+\r
+ pBuf = (UCHAR*)pConfig;\r
+\r
+ ASSERT( pConfig->HeaderType == PCI_DEVICE_TYPE );\r
+\r
+ if( pConfig->u.type0.CapabilitiesPtr )\r
+ {\r
+ pHdr = (PCI_CAPABILITIES_HEADER*)\r
+ (pBuf + pConfig->u.type0.CapabilitiesPtr);\r
+ }\r
+ else\r
+ {\r
+ pHdr = NULL;\r
+ }\r
+\r
+ /*\r
+ * Fix up any fields that might cause changes to the\r
+ * device - like writing VPD data.\r
+ */\r
+ while( pHdr )\r
+ {\r
+ if( pHdr->CapabilityID == cap_id )\r
+ {\r
+ offset = (UCHAR)(((ULONG_PTR)pHdr) - ((ULONG_PTR)pConfig));\r
+ break;\r
+ }\r
+\r
+ if( pHdr->Next )\r
+ pHdr = (PCI_CAPABILITIES_HEADER*)(pBuf + pHdr->Next);\r
+ else\r
+ pHdr = NULL;\r
+ }\r
+\r
+ HCA_EXIT( HCA_DBG_PNP );\r
+ return offset;\r
+}\r
+\r
+\r
+/*\r
+ * Tunes PCI configuration as described in 13.3.2 in the Tavor PRM.\r
+ */\r
+NTSTATUS\r
+hca_tune_pci(\r
+ IN DEVICE_OBJECT* const pDevObj )\r
+{\r
+ NTSTATUS status;\r
+ PCI_COMMON_CONFIG hcaConfig;\r
+ BUS_INTERFACE_STANDARD hcaBusIfc;\r
+ ULONG len;\r
+ ULONG capOffset;\r
+ PCI_PCIX_CAPABILITY *pPciXCap;\r
+ PCI_PCIEXP_CAPABILITY *pPciExpCap;\r
+\r
+ HCA_ENTER( HCA_DBG_PNP );\r
+\r
+ /* Get the HCA's bus interface. */\r
+ status = __get_bus_ifc( pDevObj, &GUID_BUS_INTERFACE_STANDARD, &hcaBusIfc );\r
+ if( !NT_SUCCESS( status ) )\r
+ {\r
+ HCA_PRINT( TRACE_LEVEL_ERROR ,HCA_DBG_PNP ,("Failed to get HCA bus interface.\n"));\r
+ return status;\r
+ }\r
+\r
+ /* Save the HCA's configuration. */\r
+ status = __save_pci_config( &hcaBusIfc, &hcaConfig );\r
+ if( !NT_SUCCESS( status ) )\r
+ {\r
+ HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, \r
+ ("Failed to save HCA config.\n"));\r
+ status = STATUS_UNSUCCESSFUL;\r
+ goto tweakErr;\r
+ }\r
+ status = 0;\r
+\r
+ /*\r
+ * PCIX Capability\r
+ */\r
+ capOffset = __FindCapability( &hcaConfig, PCI_CAPABILITY_ID_PCIX );\r
+ if( capOffset )\r
+ {\r
+ pPciXCap = (PCI_PCIX_CAPABILITY*)(((UCHAR*)&hcaConfig) + capOffset);\r
+ /* Update the command field to max the read byte count if needed. */\r
+ if( (pPciXCap->Command & 0x000C) != 0x000C )\r
+ {\r
+ HCA_PRINT( TRACE_LEVEL_WARNING, HCA_DBG_PNP,\r
+ ("Updating max recv byte count of PCI-X capability.\n"));\r
+ pPciXCap->Command = (pPciXCap->Command & ~PCI_X_CMD_MAX_READ) | (3 << 2);\r
+ len = hcaBusIfc.SetBusData( hcaBusIfc.Context, PCI_WHICHSPACE_CONFIG,\r
+ &pPciXCap->Command,\r
+ capOffset + offsetof( PCI_PCIX_CAPABILITY, Command),\r
+ sizeof( pPciXCap->Command ) );\r
+ if( len != sizeof( pPciXCap->Command ) )\r
+ {\r
+ HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, \r
+ ("Failed to update PCI-X maximum read byte count.\n"));\r
+ status = STATUS_UNSUCCESSFUL;\r
+ goto tweakErr;\r
+ }\r
+ }\r
+ }\r
+\r
+\r
+ /*\r
+ * PCI Express Capability\r
+ */\r
+ capOffset = __FindCapability( &hcaConfig, PCI_CAPABILITY_ID_PCIEXP );\r
+ if( capOffset )\r
+ {\r
+ pPciExpCap = (PCI_PCIEXP_CAPABILITY*)(((UCHAR*)&hcaConfig) + capOffset);\r
+ \r
+ /* Update Max_Read_Request_Size. */\r
+ HCA_PRINT( TRACE_LEVEL_WARNING ,HCA_DBG_PNP,\r
+ ("Updating max recv byte count of PCI-X capability.\n"));\r
+ pPciExpCap->DevControl = (pPciExpCap->DevControl & ~PCI_EXP_DEVCTL_READRQ) | (5 << 12);\r
+ len = hcaBusIfc.SetBusData( hcaBusIfc.Context, PCI_WHICHSPACE_CONFIG,\r
+ &pPciExpCap->DevControl,\r
+ capOffset + offsetof( PCI_PCIEXP_CAPABILITY, DevControl),\r
+ sizeof( pPciExpCap->DevControl ) );\r
+ if( len != sizeof( pPciExpCap->DevControl ) )\r
+ {\r
+ HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, \r
+ ("Failed to update PCI-Exp maximum read byte count.\n"));\r
+ goto tweakErr;\r
+ }\r
+ }\r
+\r
+\r
+tweakErr:\r
+ hcaBusIfc.InterfaceDereference( hcaBusIfc.Context );\r
+\r
+ HCA_EXIT( HCA_DBG_PNP );\r
+ return status;\r
+}\r
+\r
+\r
+/* leo */\r
+\r
+NTSTATUS\r
+hca_enable_pci(\r
+ IN DEVICE_OBJECT* const pDevObj,\r
+ OUT PCI_COMMON_CONFIG* pHcaConfig\r
+ )\r
+{\r
+ NTSTATUS status;\r
+ BUS_INTERFACE_STANDARD hcaBusIfc;\r
+ ULONG len;\r
+ \r
+ HCA_ENTER( HCA_DBG_PNP );\r
+ \r
+ /* Get the HCA's bus interface. */\r
+ status = __get_bus_ifc( pDevObj, &GUID_BUS_INTERFACE_STANDARD, &hcaBusIfc );\r
+ if( !NT_SUCCESS( status ) )\r
+ {\r
+ HCA_PRINT( TRACE_LEVEL_ERROR , HCA_DBG_PNP ,("Failed to get HCA bus interface.\n"));\r
+ return STATUS_DEVICE_NOT_READY;\r
+ }\r
+ \r
+ /* Save the HCA's configuration. */\r
+ status = __save_pci_config( &hcaBusIfc, pHcaConfig );\r
+ if( !NT_SUCCESS( status ) )\r
+ {\r
+ HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP,\r
+ ("Failed to save HCA config.\n"));\r
+ goto pciErr;\r
+ }\r
+\r
+ /* fix command register (set PCI Master bit) */\r
+ // NOTE: we change here the saved value of the command register\r
+ pHcaConfig->Command |= 7;\r
+ len = hcaBusIfc.SetBusData( hcaBusIfc.Context, PCI_WHICHSPACE_CONFIG,\r
+ (PVOID)&pHcaConfig->Command , 4, sizeof(ULONG) ); \r
+ if( len != sizeof(ULONG) )\r
+ {\r
+ HCA_PRINT( TRACE_LEVEL_ERROR ,HCA_DBG_PNP ,("Failed to write command register.\n"));\r
+ status = STATUS_DEVICE_NOT_READY;\r
+ goto pciErr;\r
+ }\r
+\r
+ pciErr:\r
+ hcaBusIfc.InterfaceDereference( hcaBusIfc.Context );\r
+ \r
+ HCA_EXIT( HCA_DBG_PNP );\r
+ return status;\r
+}\r
--- /dev/null
+#ifndef HCI_PCI_H
+#define HCI_PCI_H
+
+
+NTSTATUS
+hca_reset(
+ IN DEVICE_OBJECT* const pDevObj, int is_tavor );
+
+NTSTATUS
+hca_enable_pci(
+ IN DEVICE_OBJECT* const pDevObj,
+ OUT PCI_COMMON_CONFIG* pHcaConfig
+ );
+
+NTSTATUS
+hca_tune_pci(
+ IN DEVICE_OBJECT* const pDevObj );
+
+#endif
--- /dev/null
+/* BEGIN_ICS_COPYRIGHT ****************************************\r
+** END_ICS_COPYRIGHT ****************************************/\r
+\r
+/*\r
+ $Revision: 1.1 $\r
+*/\r
+\r
+\r
+/*\r
+ * Provides the driver entry points for the Tavor VPD.\r
+ */\r
+\r
+#include "hca_driver.h"\r
+#include "mthca_dev.h"\r
+\r
+#if defined(EVENT_TRACING)\r
+#ifdef offsetof\r
+#undef offsetof\r
+#endif\r
+#include "hca_pnp.tmh"\r
+#endif\r
+#include "mthca.h"\r
+#include <initguid.h>\r
+#include <wdmguid.h>\r
+\r
+extern const char *mthca_version;\r
+\r
+\r
+static NTSTATUS\r
+hca_start(\r
+ IN DEVICE_OBJECT* const p_dev_obj,\r
+ IN IRP* const p_irp, \r
+ OUT cl_irp_action_t* const p_action );\r
+\r
+static NTSTATUS\r
+hca_query_stop(\r
+ IN DEVICE_OBJECT* const p_dev_obj,\r
+ IN IRP* const p_irp, \r
+ OUT cl_irp_action_t* const p_action );\r
+\r
+static NTSTATUS\r
+hca_stop(\r
+ IN DEVICE_OBJECT* const p_dev_obj,\r
+ IN IRP* const p_irp, \r
+ OUT cl_irp_action_t* const p_action );\r
+\r
+static NTSTATUS\r
+hca_cancel_stop(\r
+ IN DEVICE_OBJECT* const p_dev_obj,\r
+ IN IRP* const p_irp, \r
+ OUT cl_irp_action_t* const p_action );\r
+\r
+static NTSTATUS\r
+hca_query_remove(\r
+ IN DEVICE_OBJECT* const p_dev_obj,\r
+ IN IRP* const p_irp, \r
+ OUT cl_irp_action_t* const p_action );\r
+\r
+static void\r
+hca_release_resources(\r
+ IN DEVICE_OBJECT* const p_dev_obj );\r
+\r
+static NTSTATUS\r
+hca_cancel_remove(\r
+ IN DEVICE_OBJECT* const p_dev_obj,\r
+ IN IRP* const p_irp, \r
+ OUT cl_irp_action_t* const p_action );\r
+\r
+static NTSTATUS\r
+hca_surprise_remove(\r
+ IN DEVICE_OBJECT* const p_dev_obj,\r
+ IN IRP* const p_irp, \r
+ OUT cl_irp_action_t* const p_action );\r
+\r
+static NTSTATUS\r
+hca_query_capabilities(\r
+ IN DEVICE_OBJECT* const p_dev_obj,\r
+ IN IRP* const p_irp, \r
+ OUT cl_irp_action_t* const p_action );\r
+\r
+static NTSTATUS\r
+hca_query_pnp_state(\r
+ IN DEVICE_OBJECT* const p_dev_obj,\r
+ IN IRP* const p_irp, \r
+ OUT cl_irp_action_t* const p_action );\r
+\r
+static NTSTATUS\r
+hca_query_bus_relations(\r
+ IN DEVICE_OBJECT* const p_dev_obj,\r
+ IN IRP* const p_irp, \r
+ OUT cl_irp_action_t* const p_action );\r
+\r
+static NTSTATUS\r
+hca_query_removal_relations(\r
+ IN DEVICE_OBJECT* const p_dev_obj,\r
+ IN IRP* const p_irp, \r
+ OUT cl_irp_action_t* const p_action );\r
+\r
+static NTSTATUS\r
+hca_query_power(\r
+ IN DEVICE_OBJECT* const p_dev_obj,\r
+ IN IRP* const p_irp,\r
+ OUT cl_irp_action_t* const p_action );\r
+\r
+static NTSTATUS\r
+hca_set_power(\r
+ IN DEVICE_OBJECT* const p_dev_obj,\r
+ IN IRP* const p_irp, \r
+ OUT cl_irp_action_t* const p_action );\r
+\r
+static ci_interface_t*\r
+__alloc_hca_ifc(\r
+ IN hca_dev_ext_t* const p_ext );\r
+\r
+static NTSTATUS\r
+__get_ci_interface(\r
+ IN DEVICE_OBJECT* const p_dev_obj );\r
+\r
+static NTSTATUS\r
+__hca_register(\r
+ IN DEVICE_OBJECT *p_dev_obj );\r
+\r
+static NTSTATUS\r
+__pnp_notify_target(\r
+ IN void *pNotifyStruct,\r
+ IN void *context );\r
+\r
+static NTSTATUS\r
+__pnp_notify_ifc(\r
+ IN void *pNotifyStruct,\r
+ IN void *context );\r
+\r
+\r
+#ifdef ALLOC_PRAGMA\r
+#pragma alloc_text (PAGE, hca_add_device)\r
+#pragma alloc_text (PAGE, hca_start)\r
+#pragma alloc_text (PAGE, hca_query_stop)\r
+#pragma alloc_text (PAGE, hca_stop)\r
+#pragma alloc_text (PAGE, hca_cancel_stop)\r
+#pragma alloc_text (PAGE, hca_query_remove)\r
+#pragma alloc_text (PAGE, hca_release_resources)\r
+#pragma alloc_text (PAGE, hca_cancel_remove)\r
+#pragma alloc_text (PAGE, hca_surprise_remove)\r
+#pragma alloc_text (PAGE, hca_query_capabilities)\r
+#pragma alloc_text (PAGE, hca_query_pnp_state)\r
+#pragma alloc_text (PAGE, hca_query_bus_relations)\r
+#pragma alloc_text (PAGE, hca_query_removal_relations)\r
+#pragma alloc_text (PAGE, hca_set_power)\r
+#pragma alloc_text (PAGE, __alloc_hca_ifc)\r
+#pragma alloc_text (PAGE, __get_ci_interface)\r
+#pragma alloc_text (PAGE, __hca_register)\r
+#pragma alloc_text (PAGE, __pnp_notify_target)\r
+#pragma alloc_text (PAGE, __pnp_notify_ifc)\r
+#endif\r
+\r
+\r
+static cl_vfptr_pnp_po_t vfptrHcaPnp;\r
+\r
+\r
+void\r
+hca_init_vfptr( void )\r
+{\r
+ vfptrHcaPnp.identity = "HCA driver";\r
+ vfptrHcaPnp.pfn_start = hca_start;\r
+ vfptrHcaPnp.pfn_query_stop = hca_query_stop;\r
+ vfptrHcaPnp.pfn_stop = hca_stop;\r
+ vfptrHcaPnp.pfn_cancel_stop = hca_cancel_stop;\r
+ vfptrHcaPnp.pfn_query_remove = hca_query_remove;\r
+ vfptrHcaPnp.pfn_release_resources = hca_release_resources;\r
+ vfptrHcaPnp.pfn_remove = cl_do_remove;\r
+ vfptrHcaPnp.pfn_cancel_remove = hca_cancel_remove;\r
+ vfptrHcaPnp.pfn_surprise_remove = hca_surprise_remove;\r
+ vfptrHcaPnp.pfn_query_capabilities = hca_query_capabilities;\r
+ vfptrHcaPnp.pfn_query_pnp_state = hca_query_pnp_state;\r
+ vfptrHcaPnp.pfn_filter_res_req = cl_irp_skip;\r
+ vfptrHcaPnp.pfn_dev_usage_notification = cl_do_sync_pnp;\r
+ vfptrHcaPnp.pfn_query_bus_relations = hca_query_bus_relations;\r
+ vfptrHcaPnp.pfn_query_ejection_relations = cl_irp_ignore;\r
+ vfptrHcaPnp.pfn_query_removal_relations = hca_query_removal_relations;\r
+ vfptrHcaPnp.pfn_query_target_relations = cl_irp_ignore;\r
+ vfptrHcaPnp.pfn_unknown = cl_irp_ignore;\r
+ vfptrHcaPnp.pfn_query_resources = cl_irp_ignore;\r
+ vfptrHcaPnp.pfn_query_res_req = cl_irp_ignore;\r
+ vfptrHcaPnp.pfn_query_bus_info = cl_irp_ignore;\r
+ vfptrHcaPnp.pfn_query_interface = cl_irp_ignore;\r
+ vfptrHcaPnp.pfn_read_config = cl_irp_ignore;\r
+ vfptrHcaPnp.pfn_write_config = cl_irp_ignore;\r
+ vfptrHcaPnp.pfn_eject = cl_irp_ignore;\r
+ vfptrHcaPnp.pfn_set_lock = cl_irp_ignore;\r
+ vfptrHcaPnp.pfn_query_power = hca_query_power;\r
+ vfptrHcaPnp.pfn_set_power = hca_set_power;\r
+ vfptrHcaPnp.pfn_power_sequence = cl_irp_ignore;\r
+ vfptrHcaPnp.pfn_wait_wake = cl_irp_ignore;\r
+}\r
+\r
+\r
+NTSTATUS\r
+hca_add_device(\r
+ IN PDRIVER_OBJECT pDriverObj,\r
+ IN PDEVICE_OBJECT pPdo )\r
+{\r
+ NTSTATUS status;\r
+ DEVICE_OBJECT *p_dev_obj, *pNextDevObj;\r
+ hca_dev_ext_t *p_ext;\r
+\r
+ HCA_ENTER(HCA_DBG_PNP);\r
+\r
+ /*\r
+ * Create the device so that we have a device extension to store stuff in.\r
+ */\r
+ status = IoCreateDevice( pDriverObj, sizeof(hca_dev_ext_t),\r
+ NULL, FILE_DEVICE_INFINIBAND, FILE_DEVICE_SECURE_OPEN,\r
+ FALSE, &p_dev_obj );\r
+ if( !NT_SUCCESS( status ) )\r
+ {\r
+ HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, \r
+ ("IoCreateDevice returned 0x%08X.\n", status));\r
+ return status;\r
+ }\r
+\r
+ p_ext = (hca_dev_ext_t*)p_dev_obj->DeviceExtension;\r
+ cl_memclr( p_ext, sizeof(hca_dev_ext_t) );\r
+\r
+ /* Attach to the device stack. */\r
+ pNextDevObj = IoAttachDeviceToDeviceStack( p_dev_obj, pPdo );\r
+ if( !pNextDevObj )\r
+ {\r
+ //cl_event_destroy( &p_ext->mutex );\r
+ IoDeleteDevice( p_dev_obj );\r
+ HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, \r
+ ("IoAttachDeviceToDeviceStack failed.\n"));\r
+ return STATUS_NO_SUCH_DEVICE;\r
+ }\r
+\r
+ /* Inititalize the complib extension. */\r
+ cl_init_pnp_po_ext( p_dev_obj, pNextDevObj, pPdo, 0,\r
+ &vfptrHcaPnp, NULL );\r
+\r
+ p_ext->state = HCA_ADDED;\r
+\r
+ HCA_EXIT(HCA_DBG_PNP);\r
+ return status;\r
+}\r
+\r
+\r
+static NTSTATUS\r
+__get_ci_interface(\r
+ IN DEVICE_OBJECT* const p_dev_obj )\r
+{\r
+ NTSTATUS status;\r
+ IRP *p_irp;\r
+ hca_dev_ext_t *p_ext;\r
+ IO_STATUS_BLOCK ioStatus;\r
+ IO_STACK_LOCATION *pIoStack;\r
+ KEVENT event;\r
+\r
+ HCA_ENTER( HCA_DBG_PNP );\r
+\r
+ p_ext = (hca_dev_ext_t*)p_dev_obj->DeviceExtension;\r
+\r
+ KeInitializeEvent( &event, NotificationEvent, FALSE );\r
+\r
+ /* Query for the verbs interface. */\r
+ p_irp = IoBuildSynchronousFsdRequest( IRP_MJ_PNP, p_ext->p_al_dev,\r
+ NULL, 0, NULL, &event, &ioStatus );\r
+ if( !p_irp )\r
+ {\r
+ HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, \r
+ ("IoBuildSynchronousFsdRequest failed.\n"));\r
+ return STATUS_INSUFFICIENT_RESOURCES;\r
+ }\r
+\r
+ /* Format the IRP. */\r
+ pIoStack = IoGetNextIrpStackLocation( p_irp );\r
+ pIoStack->MinorFunction = IRP_MN_QUERY_INTERFACE;\r
+ pIoStack->Parameters.QueryInterface.Version = IB_CI_INTERFACE_VERSION;\r
+ pIoStack->Parameters.QueryInterface.Size = sizeof(ib_ci_ifc_t);\r
+ pIoStack->Parameters.QueryInterface.Interface = \r
+ (INTERFACE*)&p_ext->ci_ifc;\r
+ pIoStack->Parameters.QueryInterface.InterfaceSpecificData = NULL;\r
+ pIoStack->Parameters.QueryInterface.InterfaceType = \r
+ &GUID_IB_CI_INTERFACE;\r
+ p_irp->IoStatus.Status = STATUS_NOT_SUPPORTED;\r
+\r
+ /* Send the IRP. */\r
+ status = IoCallDriver( p_ext->p_al_dev, p_irp );\r
+ if( status == STATUS_PENDING )\r
+ {\r
+ KeWaitForSingleObject( &event, Executive, KernelMode, \r
+ FALSE, NULL );\r
+\r
+ status = ioStatus.Status;\r
+ }\r
+\r
+ if( !NT_SUCCESS( status ) )\r
+ {\r
+ HCA_PRINT( TRACE_LEVEL_ERROR,HCA_DBG_PNP, \r
+ ("Query interface for verbs returned %08x.\n", status));\r
+ return status;\r
+ }\r
+\r
+ HCA_EXIT( HCA_DBG_PNP );\r
+ return status;\r
+}\r
+\r
+\r
+static NTSTATUS\r
+__pnp_notify_target(\r
+ IN void *pNotifyStruct,\r
+ IN void *context )\r
+{\r
+ NTSTATUS status = STATUS_SUCCESS;\r
+ DEVICE_OBJECT *p_dev_obj;\r
+ hca_dev_ext_t *p_ext;\r
+ TARGET_DEVICE_REMOVAL_NOTIFICATION *pNotify;\r
+\r
+ HCA_ENTER( HCA_DBG_PNP );\r
+\r
+ pNotify = (TARGET_DEVICE_REMOVAL_NOTIFICATION*)pNotifyStruct;\r
+ p_dev_obj = (DEVICE_OBJECT*)context;\r
+ p_ext = (hca_dev_ext_t*)p_dev_obj->DeviceExtension;\r
+\r
+ if( IsEqualGUID( &pNotify->Event, &GUID_TARGET_DEVICE_QUERY_REMOVE ) )\r
+ {\r
+ if( p_ext->state == HCA_REGISTERED )\r
+ {\r
+ /* Release AL's CI interface. */\r
+ p_ext->ci_ifc.wdm.InterfaceDereference( p_ext->ci_ifc.wdm.Context );\r
+ p_ext->state = HCA_STARTED;\r
+ }\r
+\r
+ /* Release AL's file object so that it can unload. */\r
+ CL_ASSERT( p_ext->p_al_file_obj );\r
+ CL_ASSERT( p_ext->p_al_file_obj == pNotify->FileObject );\r
+ ObDereferenceObject( p_ext->p_al_file_obj );\r
+ p_ext->p_al_file_obj = NULL;\r
+ p_ext->p_al_dev = NULL;\r
+ }\r
+ else if( IsEqualGUID( &pNotify->Event, \r
+ &GUID_TARGET_DEVICE_REMOVE_COMPLETE ) )\r
+ {\r
+ if( p_ext->state == HCA_REGISTERED )\r
+ {\r
+ /* Release AL's CI interface. */\r
+ p_ext->ci_ifc.wdm.InterfaceDereference( p_ext->ci_ifc.wdm.Context );\r
+ p_ext->state = HCA_STARTED;\r
+ }\r
+\r
+ /* Release AL's file object so that it can unload. */\r
+ if( p_ext->p_al_file_obj )\r
+ {\r
+ ObDereferenceObject( p_ext->p_al_file_obj );\r
+ p_ext->p_al_file_obj = NULL;\r
+ p_ext->p_al_dev = NULL;\r
+ }\r
+\r
+ /* Cancel our target device change registration. */\r
+ IoUnregisterPlugPlayNotification( p_ext->pnp_target_entry );\r
+ p_ext->pnp_target_entry = NULL;\r
+ }\r
+ else if( IsEqualGUID( &pNotify->Event, \r
+ &GUID_TARGET_DEVICE_REMOVE_CANCELLED ) )\r
+ {\r
+ /* Cancel our target device change registration. */\r
+ IoUnregisterPlugPlayNotification( p_ext->pnp_target_entry );\r
+ p_ext->pnp_target_entry = NULL;\r
+\r
+ /* Get the device object pointer for the AL. */\r
+ CL_ASSERT( !p_ext->p_al_file_obj );\r
+ CL_ASSERT( !p_ext->p_al_dev );\r
+ p_ext->p_al_file_obj = pNotify->FileObject;\r
+ p_ext->p_al_dev = IoGetRelatedDeviceObject( p_ext->p_al_file_obj );\r
+\r
+ status = IoRegisterPlugPlayNotification( \r
+ EventCategoryTargetDeviceChange, 0, p_ext->p_al_file_obj, \r
+ p_dev_obj->DriverObject, __pnp_notify_target, p_dev_obj, \r
+ &p_ext->pnp_target_entry );\r
+ if( !NT_SUCCESS( status ) )\r
+ {\r
+ HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, \r
+ ("IoRegisterPlugPlayNotification returned %08x.\n", status));\r
+ return status;\r
+ }\r
+\r
+ __hca_register( p_dev_obj );\r
+ }\r
+\r
+ HCA_EXIT( HCA_DBG_PNP );\r
+ return status;\r
+}\r
+\r
+\r
+static ci_interface_t*\r
+__alloc_hca_ifc(\r
+ IN hca_dev_ext_t* const p_ext )\r
+{\r
+ ci_interface_t *pIfc;\r
+\r
+ HCA_ENTER( HCA_DBG_PNP );\r
+\r
+ pIfc =\r
+ (ci_interface_t*)ExAllocatePool( PagedPool, sizeof(ci_interface_t) );\r
+ if( !pIfc )\r
+ {\r
+ HCA_PRINT( TRACE_LEVEL_ERROR,HCA_DBG_PNP, \r
+ ("Failed to allocate ci_interface_t (%d bytes).\n",\r
+ sizeof(ci_interface_t)));\r
+ return NULL;\r
+ }\r
+\r
+ setup_ci_interface( p_ext->hca.guid, pIfc );\r
+\r
+ pIfc->p_hca_dev = p_ext->cl_ext.p_pdo;\r
+ pIfc->vend_id = (uint32_t)p_ext->hcaConfig.VendorID;\r
+ pIfc->dev_id = (uint16_t)p_ext->hcaConfig.DeviceID;\r
+ pIfc->dev_revision = (uint16_t)p_ext->hca.hw_ver;\r
+\r
+ HCA_EXIT( HCA_DBG_PNP );\r
+ return pIfc;\r
+}\r
+\r
+\r
+static NTSTATUS\r
+__hca_register(\r
+ IN DEVICE_OBJECT *p_dev_obj )\r
+{\r
+ hca_dev_ext_t *p_ext;\r
+ NTSTATUS status;\r
+ ib_api_status_t ib_status;\r
+ ci_interface_t *p_hca_ifc;\r
+\r
+ HCA_ENTER( HCA_DBG_PNP );\r
+ \r
+ p_ext = (hca_dev_ext_t*)p_dev_obj->DeviceExtension;\r
+\r
+ ASSERT( p_ext->state == HCA_STARTED );\r
+ ASSERT( p_ext->p_al_dev );\r
+\r
+ /* Get the AL's lower interface. */\r
+ status = __get_ci_interface( p_dev_obj );\r
+ if( !NT_SUCCESS( status ) )\r
+ {\r
+ HCA_PRINT( TRACE_LEVEL_ERROR,HCA_DBG_SHIM, \r
+ ("__get_ci_interface returned %08x.\n", status));\r
+ return status;\r
+ }\r
+\r
+ /* Allocate and populate our HCA interface structure. */\r
+ p_hca_ifc = __alloc_hca_ifc( p_ext );\r
+ if( !p_hca_ifc )\r
+ {\r
+ HCA_PRINT( TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("__alloc_hca_ifc failed.\n"));\r
+ return STATUS_NO_MEMORY;\r
+ }\r
+\r
+ /* Notify AL that we're available... */\r
+ ib_status = p_ext->ci_ifc.register_ca( p_hca_ifc );\r
+ ExFreePool( p_hca_ifc );\r
+ if( ib_status != IB_SUCCESS )\r
+ {\r
+ p_ext->ci_ifc.wdm.InterfaceDereference( p_ext->ci_ifc.wdm.Context );\r
+ return STATUS_INSUFFICIENT_RESOURCES;\r
+ }\r
+\r
+ p_ext->state = HCA_REGISTERED;\r
+ return STATUS_SUCCESS;\r
+}\r
+\r
+\r
+static NTSTATUS\r
+__pnp_notify_ifc(\r
+ IN void *pNotifyStruct,\r
+ IN void *context )\r
+{\r
+ NTSTATUS status;\r
+ DEVICE_OBJECT *p_dev_obj;\r
+ hca_dev_ext_t *p_ext;\r
+ DEVICE_INTERFACE_CHANGE_NOTIFICATION *pNotify;\r
+\r
+ HCA_ENTER( HCA_DBG_PNP );\r
+\r
+ pNotify = (DEVICE_INTERFACE_CHANGE_NOTIFICATION*)pNotifyStruct;\r
+ p_dev_obj = (DEVICE_OBJECT*)context;\r
+ p_ext = (hca_dev_ext_t*)p_dev_obj->DeviceExtension;\r
+\r
+ if( !IsEqualGUID( &pNotify->Event, &GUID_DEVICE_INTERFACE_ARRIVAL ) )\r
+ {\r
+ HCA_EXIT( HCA_DBG_PNP );\r
+ return STATUS_SUCCESS;\r
+ }\r
+\r
+ /*\r
+ * Sanity check. We should only be getting notifications of the \r
+ * CI interface exported by AL.\r
+ */\r
+ ASSERT( \r
+ IsEqualGUID( &pNotify->InterfaceClassGuid, &GUID_IB_CI_INTERFACE ) );\r
+\r
+ if( p_ext->state != HCA_STARTED )\r
+ {\r
+ HCA_PRINT( TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("Invalid state: %d\n", p_ext->state));\r
+ return STATUS_SUCCESS;\r
+ }\r
+\r
+ ASSERT( !p_ext->p_al_dev );\r
+ ASSERT( !p_ext->p_al_file_obj );\r
+\r
+ /* Get the AL device object. */\r
+ HCA_PRINT( TRACE_LEVEL_INFORMATION ,HCA_DBG_SHIM ,("Calling IoGetDeviceObjectPointer.\n"));\r
+ status = IoGetDeviceObjectPointer( pNotify->SymbolicLinkName,\r
+ FILE_ALL_ACCESS, &p_ext->p_al_file_obj, &p_ext->p_al_dev );\r
+ if( !NT_SUCCESS( status ) )\r
+ {\r
+ HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_SHIM, \r
+ ("IoGetDeviceObjectPointer returned %08x.\n", status ));\r
+ return STATUS_SUCCESS;\r
+ }\r
+\r
+ /* Register for removal notification of the IB Fabric root device. */\r
+ HCA_PRINT( TRACE_LEVEL_INFORMATION, HCA_DBG_PNP, \r
+ ("Registering for target notifications.\n"));\r
+ status = IoRegisterPlugPlayNotification( \r
+ EventCategoryTargetDeviceChange, 0, p_ext->p_al_file_obj, \r
+ p_dev_obj->DriverObject, __pnp_notify_target, p_dev_obj, \r
+ &p_ext->pnp_target_entry );\r
+ if( !NT_SUCCESS( status ) )\r
+ {\r
+ ObDereferenceObject( p_ext->p_al_file_obj );\r
+ p_ext->p_al_file_obj = NULL;\r
+ p_ext->p_al_dev = NULL;\r
+ HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, \r
+ ("IoRegisterPlugPlayNotification returned %08x.\n", status));\r
+ return STATUS_SUCCESS;\r
+ }\r
+\r
+ status = __hca_register( p_dev_obj );\r
+ if( !NT_SUCCESS( status ) )\r
+ {\r
+ IoUnregisterPlugPlayNotification( p_ext->pnp_target_entry );\r
+ p_ext->pnp_target_entry = NULL;\r
+ ObDereferenceObject( p_ext->p_al_file_obj );\r
+ p_ext->p_al_file_obj = NULL;\r
+ p_ext->p_al_dev = NULL;\r
+ HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_SHIM, \r
+ ("__get_ci_interface returned %08x.\n", status));\r
+ return STATUS_SUCCESS;\r
+ }\r
+\r
+ HCA_EXIT( HCA_DBG_PNP );\r
+ return STATUS_SUCCESS;\r
+}\r
+\r
+\r
+/*\r
+ * Walk the resource lists and store the information. The write-only\r
+ * flag is not set for the UAR region, so it is indistinguishable from the\r
+ * DDR region since both are prefetchable. The code here assumes that the\r
+ * resources get handed in order - HCR, UAR, DDR.\r
+ * - Configuration Space: not prefetchable, read/write\r
+ * - UAR space: prefetchable, write only.\r
+ * - DDR: prefetchable, read/write.\r
+ */\r
+static NTSTATUS\r
+__SetupHcaResources(\r
+ IN DEVICE_OBJECT* const p_dev_obj,\r
+ IN CM_RESOURCE_LIST* const pHcaResList,\r
+ IN CM_RESOURCE_LIST* const pHostResList )\r
+{\r
+ NTSTATUS status = STATUS_SUCCESS;\r
+ hca_dev_ext_t *p_ext;\r
+ USHORT i;\r
+ hca_bar_type_t type = HCA_BAR_TYPE_HCR;\r
+\r
+ CM_PARTIAL_RESOURCE_DESCRIPTOR *pHcaRes, *pHostRes;\r
+\r
+ HCA_ENTER( HCA_DBG_PNP );\r
+\r
+ p_ext = (hca_dev_ext_t*)p_dev_obj->DeviceExtension;\r
+\r
+ ASSERT( pHostResList->List[0].PartialResourceList.Version == 1 );\r
+ ASSERT( pHostResList->List[0].PartialResourceList.Revision == 1 );\r
+\r
+ // store the bus number for reset of Tavor\r
+ p_ext->bus_number = pHostResList->List[0].BusNumber;\r
+ \r
+ for( i = 0; i < pHostResList->List[0].PartialResourceList.Count; i++ )\r
+ {\r
+ pHcaRes =\r
+ &pHcaResList->List[0].PartialResourceList.PartialDescriptors[i];\r
+ pHostRes = \r
+ &pHostResList->List[0].PartialResourceList.PartialDescriptors[i];\r
+\r
+\r
+ /*\r
+ * Save the interrupt information so that we can power the device\r
+ * up and down. Since the device will lose state when powered down\r
+ * we have to fully disable it. Note that we can leave memory mapped\r
+ * resources in place when powered down as the resource assignments\r
+ * won't change. However, we must disconnect our interrupt, and\r
+ * reconnect it when powering up.\r
+ */\r
+ if( pHcaRes->Type == CmResourceTypeInterrupt )\r
+ {\r
+ p_ext->interruptInfo = *pHostRes;\r
+ continue;\r
+ }\r
+ \r
+ if( pHcaRes->Type != CmResourceTypeMemory )\r
+ continue;\r
+\r
+ /*\r
+ * Sanity check that our assumption on how resources\r
+ * are reported hold.\r
+ */\r
+ if( type == HCA_BAR_TYPE_HCR &&\r
+ (pHcaRes->Flags & CM_RESOURCE_MEMORY_PREFETCHABLE) )\r
+ {\r
+ HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, \r
+ ("First memory resource is prefetchable - expected HCR.\n"));\r
+ status = STATUS_UNSUCCESSFUL;\r
+ break;\r
+ }\r
+\r
+ p_ext->bar[type].phys = pHcaRes->u.Memory.Start.QuadPart;\r
+ p_ext->bar[type].size = pHcaRes->u.Memory.Length;\r
+#ifdef MAP_ALL_HCA_MEMORY \r
+ /*leo: no need to map all the resources */\r
+ p_ext->bar[type].virt = MmMapIoSpace( pHostRes->u.Memory.Start,\r
+ pHostRes->u.Memory.Length, MmNonCached );\r
+ if( !p_ext->bar[type].virt )\r
+ {\r
+ HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, \r
+ ("Failed to map memory resource type %d\n", type));\r
+ status = STATUS_UNSUCCESSFUL;\r
+ break;\r
+ }\r
+#else \r
+ p_ext->bar[type].virt = NULL;\r
+#endif \r
+\r
+ type++;\r
+ }\r
+\r
+ if( type == HCA_BAR_TYPE_DDR)\r
+ {\r
+ p_ext->hca_hidden = 1;\r
+ }\r
+ else \r
+ if( type != HCA_BAR_TYPE_MAX )\r
+ {\r
+ HCA_PRINT( TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("Failed to map all memory resources.\n"));\r
+ status = STATUS_UNSUCCESSFUL;\r
+ }\r
+\r
+ if( p_ext->interruptInfo.Type != CmResourceTypeInterrupt )\r
+ {\r
+ HCA_PRINT( TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("No interrupt resource.\n"));\r
+ status = STATUS_UNSUCCESSFUL;\r
+ }\r
+\r
+ HCA_EXIT( HCA_DBG_PNP );\r
+ return status;\r
+}\r
+\r
+\r
+static void\r
+__UnmapHcaMemoryResources(\r
+ IN DEVICE_OBJECT* const p_dev_obj )\r
+{\r
+ hca_dev_ext_t *p_ext;\r
+ USHORT i;\r
+\r
+ HCA_ENTER( HCA_DBG_PNP );\r
+\r
+ p_ext = (hca_dev_ext_t*)p_dev_obj->DeviceExtension;\r
+\r
+ for( i = 0; i < HCA_BAR_TYPE_MAX; i++ )\r
+ {\r
+ if( p_ext->bar[i].virt )\r
+ {\r
+ MmUnmapIoSpace( p_ext->bar[i].virt, p_ext->bar[i].size );\r
+ cl_memclr( &p_ext->bar[i], sizeof(hca_bar_t) );\r
+ }\r
+ }\r
+\r
+ HCA_EXIT( HCA_DBG_PNP );\r
+}\r
+\r
+\r
+static NTSTATUS\r
+hca_start(\r
+ IN DEVICE_OBJECT* const p_dev_obj,\r
+ IN IRP* const p_irp, \r
+ OUT cl_irp_action_t* const p_action )\r
+{\r
+ NTSTATUS status;\r
+ hca_dev_ext_t *p_ext;\r
+ IO_STACK_LOCATION *pIoStack;\r
+ POWER_STATE powerState;\r
+ DEVICE_DESCRIPTION devDesc;\r
+\r
+ HCA_ENTER( HCA_DBG_PNP );\r
+\r
+ p_ext = (hca_dev_ext_t*)p_dev_obj->DeviceExtension;\r
+\r
+ /* Handled on the way up. */\r
+ status = cl_do_sync_pnp( p_dev_obj, p_irp, p_action );\r
+ if( !NT_SUCCESS( status ) )\r
+ {\r
+ HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, \r
+ ("Lower drivers failed IRP_MN_START_DEVICE.\n"));\r
+ return status;\r
+ }\r
+\r
+ pIoStack = IoGetCurrentIrpStackLocation( p_irp );\r
+\r
+ /*\r
+ * Walk the resource lists and store the information. The write-only\r
+ * flag is not set for the UAR region, so it is indistinguishable from the\r
+ * DDR region since both are prefetchable. The code here assumes that the\r
+ * resources get handed in order - HCR, UAR, DDR.\r
+ * - Configuration Space: not prefetchable, read/write\r
+ * - UAR space: prefetchable, write only.\r
+ * - DDR: prefetchable, read/write.\r
+ */\r
+ status = __SetupHcaResources( p_dev_obj,\r
+ pIoStack->Parameters.StartDevice.AllocatedResources,\r
+ pIoStack->Parameters.StartDevice.AllocatedResourcesTranslated );\r
+ if( !NT_SUCCESS( status ) )\r
+ {\r
+ HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, \r
+ ("__ProcessResources returned %08X.\n", status));\r
+ return status;\r
+ }\r
+ \r
+ /* save PCI configuration info and enable device */\r
+ hca_enable_pci( p_dev_obj, &p_ext->hcaConfig );\r
+\r
+ /*\r
+ * Get the DMA adapter representing the HCA so we can\r
+ * allocate common buffers.\r
+ */\r
+ RtlZeroMemory( &devDesc, sizeof(devDesc) );\r
+ devDesc.Version = DEVICE_DESCRIPTION_VERSION2;\r
+ devDesc.Master = TRUE;\r
+ devDesc.ScatterGather = TRUE;\r
+ devDesc.Dma32BitAddresses = TRUE;\r
+ devDesc.Dma64BitAddresses = TRUE;\r
+ devDesc.InterfaceType = PCIBus;\r
+\r
+ // get the adapter object\r
+ // 0x80000000 is a threshold, that's why - 1\r
+ devDesc.MaximumLength = 0x80000000 - 1;\r
+ p_ext->p_dma_adapter = IoGetDmaAdapter(\r
+ p_ext->cl_ext.p_pdo, &devDesc, &p_ext->n_map_regs );\r
+ if( !p_ext->p_dma_adapter )\r
+ {\r
+ HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, \r
+ ("Failed to get DMA_ADAPTER for HCA.\n"));\r
+ return STATUS_INSUFFICIENT_RESOURCES;\r
+ }\r
+\r
+ /* Initialize the HCA now. */\r
+ status = mthca_init_one( p_ext );\r
+ if( !NT_SUCCESS( status ) )\r
+ {\r
+ //TODO: no cleanup on error\r
+ HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, \r
+ ("mthca_start returned %08X\n", status));\r
+ return status;\r
+ }\r
+\r
+ /*\r
+ * Change the state since the PnP callback can happen\r
+ * before the callback returns.\r
+ */\r
+ p_ext->state = HCA_STARTED;\r
+\r
+ /*leo: get node GUID */\r
+ {\r
+ int err = mthca_get_dev_info( p_ext->hca.mdev, &p_ext->hca.guid, &p_ext->hca.hw_ver );\r
+ if (err) {\r\r
+ //TODO: no cleanup on error\r
+ HCA_PRINT( TRACE_LEVEL_ERROR,HCA_DBG_PNP, \r
+ ("can't get guid - mthca_query_port()"));\r
+ return STATUS_INSUFFICIENT_RESOURCES;\r
+ }\r
+ }\r
+\r
+ /* queue HCA */\r
+ mlnx_hca_insert( &p_ext->hca );\r
+\r
+ /*\r
+ * Change the state since the PnP callback can happen\r
+ * before the callback returns.\r
+ */\r
+ p_ext->state = HCA_STARTED;\r
+ \r
+ /* Register for interface arrival of the IB_AL device. */\r
+ status = IoRegisterPlugPlayNotification(\r
+ EventCategoryDeviceInterfaceChange,\r
+ PNPNOTIFY_DEVICE_INTERFACE_INCLUDE_EXISTING_INTERFACES,\r
+ (void*)&GUID_IB_CI_INTERFACE, p_dev_obj->DriverObject,\r
+ __pnp_notify_ifc, p_dev_obj, &p_ext->pnp_ifc_entry );\r
+ if( !NT_SUCCESS( status ) )\r
+ {\r
+ p_ext->state = HCA_ADDED;\r
+ HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP,\r
+ ("IoRegisterPlugPlayNotification returned %08x.\n", status));\r
+ }\r
+\r
+ /* We get started fully powered. */\r
+ p_ext->PowerState = PowerDeviceD0;\r
+ powerState.DeviceState = PowerDeviceD0;\r
+ PoSetPowerState ( p_ext->cl_ext.p_self_do, DevicePowerState, powerState );\r
+\r
+ {\r
+ struct mthca_dev *mdev = p_ext->hca.mdev;\r
+ HCA_PRINT_EV(TRACE_LEVEL_INFORMATION ,HCA_DBG_LOW ,\r
+ ("Ven %d Dev %d Hw %x Fw %d.%d.%d Drv %s (%s) Flg %s%s%s\n", \r
+ (unsigned)p_ext->hcaConfig.VendorID, (unsigned)p_ext->hcaConfig.DeviceID,\r
+ p_ext->hca.hw_ver, (int) (mdev->fw_ver >> 32),\r
+ (int) (mdev->fw_ver >> 16) & 0xffff, (int) (mdev->fw_ver & 0xffff),\r
+ DRV_VERSION, DRV_RELDATE,\r
+ (mdev->mthca_flags & MTHCA_FLAG_MEMFREE) ? "M:" : "",\r
+ (mdev->mthca_flags & MTHCA_FLAG_PCIE) ? "E:" : "",\r
+ (mdev->mthca_flags & MTHCA_FLAG_DDR_HIDDEN) ? "H" : ""\r
+ ));\r
+ }\r
+\r
+ HCA_EXIT( HCA_DBG_PNP );\r
+ return status;\r
+}\r
+\r
+\r
+static void\r
+hca_release_resources(\r
+ IN DEVICE_OBJECT* const p_dev_obj )\r
+{\r
+ hca_dev_ext_t *p_ext;\r
+ POWER_STATE powerState;\r
+\r
+ HCA_ENTER( HCA_DBG_PNP );\r
+\r
+ p_ext = (hca_dev_ext_t*)p_dev_obj->DeviceExtension;\r
+\r
+ if( p_ext->state == HCA_REGISTERED )\r
+ {\r
+ CL_ASSERT( p_ext->ci_ifc.deregister_ca );\r
+ CL_ASSERT( p_ext->p_al_dev );\r
+ CL_ASSERT( p_ext->p_al_file_obj );\r
+ /* Notify AL that the CA is being removed. */\r
+ p_ext->ci_ifc.deregister_ca( p_ext->hca.guid );\r
+ /* Release AL's CI interface. */\r
+ p_ext->ci_ifc.wdm.InterfaceDereference( p_ext->ci_ifc.wdm.Context );\r
+ }\r
+\r
+ if( p_ext->pnp_target_entry )\r
+ {\r
+ ASSERT( p_ext->pnp_ifc_entry );\r
+ IoUnregisterPlugPlayNotification( p_ext->pnp_target_entry );\r
+ }\r
+\r
+ if( p_ext->pnp_ifc_entry )\r
+ IoUnregisterPlugPlayNotification( p_ext->pnp_ifc_entry );\r
+\r
+ if( p_ext->p_al_file_obj )\r
+ ObDereferenceObject( p_ext->p_al_file_obj );\r
+\r
+ mthca_remove_one( p_ext );\r
+\r
+ if( p_ext->p_dma_adapter )\r
+ p_ext->p_dma_adapter->DmaOperations->PutDmaAdapter( p_ext->p_dma_adapter );\r
+\r
+ //cl_event_destroy( &p_ext->mutex );\r
+ __UnmapHcaMemoryResources( p_dev_obj );\r
+\r
+ /* Notify the power manager that the device is powered down. */\r
+ powerState.DeviceState = PowerDeviceD3;\r
+ PoSetPowerState ( p_ext->cl_ext.p_self_do, DevicePowerState, powerState );\r
+\r
+ /* Clear the PnP state in case we get restarted. */\r
+ p_ext->pnpState = 0;\r
+\r
+ HCA_EXIT( HCA_DBG_PNP );\r
+}\r
+\r
+\r
+static NTSTATUS\r
+hca_query_removal_relations(\r
+ IN DEVICE_OBJECT* const p_dev_obj,\r
+ IN IRP* const p_irp, \r
+ OUT cl_irp_action_t* const p_action )\r
+{\r
+ NTSTATUS status;\r
+ hca_dev_ext_t *p_ext;\r
+\r
+ HCA_ENTER( HCA_DBG_PNP );\r
+\r
+ p_ext = (hca_dev_ext_t*)p_dev_obj->DeviceExtension;\r
+\r
+ if( p_ext->state == HCA_REGISTERED )\r
+ {\r
+ status = p_ext->ci_ifc.get_relations( p_ext->hca.guid, p_irp );\r
+ if( !NT_SUCCESS( status ) )\r
+ {\r
+ *p_action = IrpComplete;\r
+ HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, \r
+ ("AL get_relations returned %08x.\n", status));\r
+ return status;\r
+ }\r
+ }\r
+\r
+ *p_action = IrpPassDown;\r
+ HCA_EXIT( HCA_DBG_PNP );\r
+ return STATUS_SUCCESS;\r
+}\r
+\r
+\r
+static NTSTATUS\r
+hca_query_bus_relations(\r
+ IN DEVICE_OBJECT* const p_dev_obj,\r
+ IN IRP* const p_irp, \r
+ OUT cl_irp_action_t* const p_action )\r
+{\r
+#ifndef WIN_TO_BE_CHANGED\r
+\r
+ NTSTATUS status;\r
+ DEVICE_RELATIONS *p_rel;\r
+ hca_dev_ext_t *p_ext;\r
+\r
+ HCA_ENTER( HCA_DBG_PNP );\r
+\r
+ p_ext = p_dev_obj->DeviceExtension;\r
+\r
+ //cl_event_wait_on( &p_ext->mutex, EVENT_NO_TIMEOUT, FALSE );\r
+ if( p_ext->state == HCA_REGISTERED )\r
+ {\r
+ status = p_ext->ci_ifc.get_relations( p_ext->hca.guid, p_irp );\r
+ if( !NT_SUCCESS( status ) )\r
+ {\r
+ //cl_event_signal( &p_ext->mutex );\r
+ *p_action = IrpComplete;\r
+ HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, \r
+ ("AL get_relations returned %08x.\n", status));\r
+ return status;\r
+ }\r
+ }\r
+ else\r
+ {\r
+ status = cl_alloc_relations( p_irp, 1 );\r
+ if( !NT_SUCCESS( status ) )\r
+ {\r
+ HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, \r
+ ("cl_alloc_relations returned %08x.\n", status));\r
+ return status;\r
+ }\r
+\r
+ p_rel = (DEVICE_RELATIONS*)p_irp->IoStatus.Information;\r
+ p_rel->Count = 0;\r
+ p_rel->Objects[0] = NULL;\r
+ }\r
+\r
+ //cl_event_signal( &p_ext->mutex );\r
+\r
+ *p_action = IrpPassDown;\r
+ HCA_EXIT( HCA_DBG_PNP );\r
+ return STATUS_SUCCESS;\r
+\r
+#else\r
+ return cl_irp_skip( p_dev_obj, p_irp, p_action );\r
+ //NTSTATUS status;\r
+ //hca_dev_ext_t *p_ext;\r
+\r
+ //HCA_ENTER( HCA_DBG_PNP );\r
+\r
+ //p_ext = p_dev_obj->DeviceExtension;\r
+\r
+ //if( p_ext->state == HCA_REGISTERED )\r
+ //{\r
+ // status = p_ext->ci_ifc.get_relations( p_ext->hca.guid, p_irp );\r
+ // if( !NT_SUCCESS( status ) )\r
+ // {\r
+ // *p_action = IrpComplete;\r
+ // HCA_PRINT( TRACE_LEVEL_ERROR,\r
+ // "AL get_relations returned %08x.\n", status);\r
+ // return status;\r
+ // }\r
+ //}\r
+\r
+ //*p_action = IrpPassDown;\r
+ //HCA_EXIT( HCA_DBG_PNP );\r
+ //return STATUS_SUCCESS;\r
+#endif \r
+}\r
+\r
+\r
+static NTSTATUS\r
+hca_query_stop(\r
+ IN DEVICE_OBJECT* const p_dev_obj,\r
+ IN IRP* const p_irp, \r
+ OUT cl_irp_action_t* const p_action )\r
+{\r
+ /* All kernel clients will get notified through the device hierarchy. */\r
+\r
+ /* TODO: set a flag to fail creation of any new IB resources. */\r
+ return cl_irp_skip( p_dev_obj, p_irp, p_action );\r
+}\r
+\r
+\r
+static NTSTATUS\r
+hca_stop(\r
+ IN DEVICE_OBJECT* const p_dev_obj,\r
+ IN IRP* const p_irp, \r
+ OUT cl_irp_action_t* const p_action )\r
+{\r
+ /*\r
+ * Must disable everything. Complib framework will\r
+ * call ReleaseResources handler.\r
+ */\r
+ return cl_irp_skip( p_dev_obj, p_irp, p_action );\r
+}\r
+\r
+\r
+static NTSTATUS\r
+hca_cancel_stop(\r
+ IN DEVICE_OBJECT* const p_dev_obj,\r
+ IN IRP* const p_irp, \r
+ OUT cl_irp_action_t* const p_action )\r
+{\r
+ /* Handled on the way up. */\r
+ return cl_do_sync_pnp( p_dev_obj, p_irp, p_action );\r
+}\r
+\r
+\r
+static NTSTATUS\r
+hca_query_remove(\r
+ IN DEVICE_OBJECT* const p_dev_obj,\r
+ IN IRP* const p_irp, \r
+ OUT cl_irp_action_t* const p_action )\r
+{\r
+ /* Query remove always succeeds. */\r
+ /* TODO: set a flag to fail creation of any new IB resources. */\r
+ return cl_irp_skip( p_dev_obj, p_irp, p_action );\r
+}\r
+\r
+\r
+static NTSTATUS\r
+hca_cancel_remove(\r
+ IN DEVICE_OBJECT* const p_dev_obj,\r
+ IN IRP* const p_irp, \r
+ OUT cl_irp_action_t* const p_action )\r
+{\r
+ /* Handled on the way up. */\r
+ return cl_do_sync_pnp( p_dev_obj, p_irp, p_action );\r
+}\r
+\r
+\r
+static NTSTATUS\r
+hca_surprise_remove(\r
+ IN DEVICE_OBJECT* const p_dev_obj,\r
+ IN IRP* const p_irp, \r
+ OUT cl_irp_action_t* const p_action )\r
+{\r
+ /*\r
+ * TODO: Set state so that all further requests\r
+ * automatically succeed/fail as needed.\r
+ */\r
+ return cl_irp_skip( p_dev_obj, p_irp, p_action );\r
+}\r
+\r
+\r
+static NTSTATUS\r
+hca_query_capabilities(\r
+ IN DEVICE_OBJECT* const p_dev_obj,\r
+ IN IRP* const p_irp, \r
+ OUT cl_irp_action_t* const p_action )\r
+{\r
+ NTSTATUS status;\r
+ hca_dev_ext_t *p_ext;\r
+ IO_STACK_LOCATION *pIoStack;\r
+ DEVICE_CAPABILITIES *pCaps;\r
+\r
+ HCA_ENTER( HCA_DBG_PNP );\r
+\r
+ p_ext = (hca_dev_ext_t*)p_dev_obj->DeviceExtension;\r
+\r
+ /* Process on the way up. */\r
+ status = cl_do_sync_pnp( p_dev_obj, p_irp, p_action );\r
+ if( !NT_SUCCESS( status ) )\r
+ {\r
+ HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, \r
+ ("cl_do_sync_pnp returned %08X.\n", status));\r
+ return status;\r
+ }\r
+\r
+ pIoStack = IoGetCurrentIrpStackLocation( p_irp );\r
+ pCaps = pIoStack->Parameters.DeviceCapabilities.Capabilities;\r
+\r
+ /*\r
+ * Store the device power mapping into our extension since we're\r
+ * the power policy owner. The mapping is used when handling\r
+ * IRP_MN_SET_POWER IRPs.\r
+ */\r
+ cl_memcpy(\r
+ p_ext->DevicePower, pCaps->DeviceState, sizeof(p_ext->DevicePower) );\r
+\r
+ if( pCaps->DeviceD1 )\r
+ {\r
+ HCA_PRINT( TRACE_LEVEL_WARNING ,HCA_DBG_PNP,\r
+ ("WARNING: Device reports support for DeviceD1 power state.\n"));\r
+ pCaps->DeviceD1 = FALSE;\r
+ }\r
+\r
+ if( pCaps->DeviceD2 )\r
+ {\r
+ HCA_PRINT( TRACE_LEVEL_WARNING,HCA_DBG_PNP,\r
+ ("WARINING: Device reports support for DeviceD2 power state.\n"));\r
+ pCaps->DeviceD2 = FALSE;\r
+ }\r
+\r
+ if( pCaps->SystemWake != PowerSystemUnspecified )\r
+ {\r
+ HCA_PRINT( TRACE_LEVEL_WARNING ,HCA_DBG_PNP,\r
+ ("WARINING: Device reports support for system wake.\n"));\r
+ pCaps->SystemWake = PowerSystemUnspecified;\r
+ }\r
+\r
+ if( pCaps->DeviceWake != PowerDeviceUnspecified )\r
+ {\r
+ HCA_PRINT( TRACE_LEVEL_WARNING, HCA_DBG_PNP,\r
+ ("WARINING: Device reports support for device wake.\n"));\r
+ pCaps->DeviceWake = PowerDeviceUnspecified;\r
+ }\r
+\r
+ HCA_EXIT( HCA_DBG_PNP );\r
+ return status;\r
+}\r
+\r
+\r
+static NTSTATUS\r
+hca_query_pnp_state(\r
+ IN DEVICE_OBJECT* const p_dev_obj,\r
+ IN IRP* const p_irp, \r
+ OUT cl_irp_action_t* const p_action )\r
+{\r
+ hca_dev_ext_t *p_ext;\r
+\r
+ HCA_ENTER( HCA_DBG_PNP );\r
+\r
+ p_ext = (hca_dev_ext_t*)p_dev_obj->DeviceExtension;\r
+\r
+ p_irp->IoStatus.Information |= p_ext->pnpState;\r
+\r
+ *p_action = IrpSkip;\r
+\r
+ HCA_EXIT( HCA_DBG_PNP );\r
+ return STATUS_SUCCESS;;\r
+}\r
+\r
+\r
+static NTSTATUS\r
+hca_query_power(\r
+ IN DEVICE_OBJECT* const p_dev_obj,\r
+ IN IRP* const p_irp,\r
+ OUT cl_irp_action_t* const p_action )\r
+{\r
+ NTSTATUS status = STATUS_SUCCESS;\r
+ IO_STACK_LOCATION *pIoStack;\r
+\r
+ HCA_ENTER(HCA_DBG_PO);\r
+\r
+ UNUSED_PARAM( p_dev_obj );\r
+\r
+ pIoStack = IoGetCurrentIrpStackLocation( p_irp );\r
+\r
+ switch( pIoStack->Parameters.Power.Type )\r
+ {\r
+ case SystemPowerState:\r
+ /* Fail any requests to hibernate or sleep the system. */\r
+ switch( pIoStack->Parameters.Power.State.SystemState )\r
+ {\r
+ case PowerSystemWorking:\r
+ case PowerSystemShutdown:\r
+ /* We only support fully working and shutdown system states. */\r
+ break;\r
+\r
+ default:\r
+ status = STATUS_NOT_SUPPORTED;\r
+ }\r
+ break;\r
+\r
+ case DevicePowerState:\r
+ /* Fail any query for low power states. */\r
+ switch( pIoStack->Parameters.Power.State.DeviceState )\r
+ {\r
+ case PowerDeviceD0:\r
+ case PowerDeviceD3:\r
+ /* We only support fully powered or off power states. */\r
+ break;\r
+\r
+ default:\r
+ status = STATUS_NOT_SUPPORTED;\r
+ }\r
+ break;\r
+ }\r
+\r
+ if( status == STATUS_NOT_SUPPORTED )\r
+ *p_action = IrpComplete;\r
+ else\r
+ *p_action = IrpSkip;\r
+\r
+ HCA_EXIT( HCA_DBG_PO );\r
+ return status;\r
+}\r
+\r
+\r
+static void\r
+__RequestPowerCompletion(\r
+ IN DEVICE_OBJECT *p_dev_obj,\r
+ IN UCHAR minorFunction,\r
+ IN POWER_STATE powerState,\r
+ IN void *context,\r
+ IN IO_STATUS_BLOCK *pIoStatus )\r
+{\r
+ IRP *p_irp;\r
+ cl_pnp_po_ext_t *p_ext;\r
+\r
+ HCA_ENTER( HCA_DBG_PO );\r
+\r
+ UNUSED_PARAM( minorFunction );\r
+ UNUSED_PARAM( powerState );\r
+\r
+ p_irp = (IRP*)context;\r
+ p_ext = (cl_pnp_po_ext_t*)p_dev_obj->DeviceExtension;\r
+\r
+ /* Propagate the device IRP status to the system IRP status. */\r
+ p_irp->IoStatus.Status = pIoStatus->Status;\r
+\r
+ /* Continue Power IRP processing. */\r
+ PoStartNextPowerIrp( p_irp );\r
+ IoCompleteRequest( p_irp, IO_NO_INCREMENT );\r
+ IoReleaseRemoveLock( &p_ext->remove_lock, p_irp );\r
+ HCA_EXIT( HCA_DBG_PO );\r
+}\r
+\r
+\r
+/*NOTE: Completion routines must NEVER be pageable. */\r
+static NTSTATUS\r
+__SystemPowerCompletion(\r
+ IN DEVICE_OBJECT *p_dev_obj,\r
+ IN IRP *p_irp,\r
+ IN void *context )\r
+{\r
+ NTSTATUS status;\r
+ POWER_STATE state;\r
+ hca_dev_ext_t *p_ext;\r
+ IO_STACK_LOCATION *pIoStack;\r
+\r
+ HCA_ENTER( HCA_DBG_PNP );\r
+\r
+ UNUSED_PARAM( context );\r
+\r
+ p_ext = (hca_dev_ext_t*)p_dev_obj->DeviceExtension;\r
+ pIoStack = IoGetCurrentIrpStackLocation( p_irp );\r
+\r
+ if( !NT_SUCCESS( p_irp->IoStatus.Status ) )\r
+ {\r
+ PoStartNextPowerIrp( p_irp );\r
+ IoReleaseRemoveLock( &p_ext->cl_ext.remove_lock, p_irp );\r
+ HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, \r
+ ("IRP_MN_SET_POWER for system failed by lower driver with %08x.\n",\r
+ p_irp->IoStatus.Status));\r
+ return STATUS_SUCCESS;\r
+ }\r
+\r
+ state.DeviceState = \r
+ p_ext->DevicePower[pIoStack->Parameters.Power.State.SystemState];\r
+\r
+ /*\r
+ * Send a device power IRP to our devnode. Using our device object will\r
+ * only work on win2k and other NT based systems.\r
+ */\r
+ status = PoRequestPowerIrp( p_dev_obj, IRP_MN_SET_POWER, state,\r
+ __RequestPowerCompletion, p_irp, NULL );\r
+\r
+ if( !NT_SUCCESS( p_irp->IoStatus.Status ) )\r
+ {\r
+ PoStartNextPowerIrp( p_irp );\r
+ /* Propagate the failure. */\r
+ p_irp->IoStatus.Status = status;\r
+ IoCompleteRequest( p_irp, IO_NO_INCREMENT );\r
+ IoReleaseRemoveLock( &p_ext->cl_ext.remove_lock, p_irp );\r
+ HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP,\r
+ ("PoRequestPowerIrp returned %08x.\n", status));\r
+ }\r
+\r
+ HCA_EXIT( HCA_DBG_PNP );\r
+ return STATUS_MORE_PROCESSING_REQUIRED;\r
+}\r
+\r
+\r
+/* Work item callback to handle DevicePowerD0 IRPs at passive level. */\r
+static void\r
+__PowerUpCb(\r
+ IN DEVICE_OBJECT* p_dev_obj,\r
+ IN void* context )\r
+{\r
+ NTSTATUS status;\r
+ IO_STACK_LOCATION *pIoStack;\r
+ hca_dev_ext_t *p_ext;\r
+ IRP *p_irp;\r
+\r
+ HCA_ENTER( HCA_DBG_PO );\r
+\r
+ p_ext = (hca_dev_ext_t*)p_dev_obj->DeviceExtension;\r
+ p_irp = (IRP*)context;\r
+ pIoStack = IoGetCurrentIrpStackLocation( p_irp );\r
+\r
+ IoFreeWorkItem( p_ext->pPoWorkItem );\r
+ p_ext->pPoWorkItem = NULL;\r
+\r
+ status = mthca_init_one( p_ext );\r
+ if( !NT_SUCCESS( status ) )\r
+ goto done;\r
+\r
+ if( p_ext->p_al_dev )\r
+ status = __hca_register( p_dev_obj );\r
+\r
+done:\r
+ if( !NT_SUCCESS( status ) )\r
+ {\r
+ /* Flag device as having failed. */\r
+ p_ext->pnpState |= PNP_DEVICE_FAILED;\r
+ IoInvalidateDeviceState( p_ext->cl_ext.p_pdo );\r
+ }\r
+\r
+ PoStartNextPowerIrp( p_irp );\r
+ IoCompleteRequest( p_irp, IO_NO_INCREMENT );\r
+ IoReleaseRemoveLock( &p_ext->cl_ext.remove_lock, p_irp );\r
+\r
+ &nb